]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/kern/uipc_shm.c
sys: Remove $FreeBSD$: one-line .c pattern
[FreeBSD/FreeBSD.git] / sys / kern / uipc_shm.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2006, 2011, 2016-2017 Robert N. M. Watson
5  * Copyright 2020 The FreeBSD Foundation
6  * All rights reserved.
7  *
8  * Portions of this software were developed by BAE Systems, the University of
9  * Cambridge Computer Laboratory, and Memorial University under DARPA/AFRL
10  * contract FA8650-15-C-7558 ("CADETS"), as part of the DARPA Transparent
11  * Computing (TC) research program.
12  *
13  * Portions of this software were developed by Konstantin Belousov
14  * under sponsorship from the FreeBSD Foundation.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37
38 /*
39  * Support for shared swap-backed anonymous memory objects via
40  * shm_open(2), shm_rename(2), and shm_unlink(2).
41  * While most of the implementation is here, vm_mmap.c contains
42  * mapping logic changes.
43  *
44  * posixshmcontrol(1) allows users to inspect the state of the memory
45  * objects.  Per-uid swap resource limit controls total amount of
46  * memory that user can consume for anonymous objects, including
47  * shared.
48  */
49
50 #include <sys/cdefs.h>
51 #include "opt_capsicum.h"
52 #include "opt_ktrace.h"
53
54 #include <sys/param.h>
55 #include <sys/capsicum.h>
56 #include <sys/conf.h>
57 #include <sys/fcntl.h>
58 #include <sys/file.h>
59 #include <sys/filedesc.h>
60 #include <sys/filio.h>
61 #include <sys/fnv_hash.h>
62 #include <sys/kernel.h>
63 #include <sys/limits.h>
64 #include <sys/uio.h>
65 #include <sys/signal.h>
66 #include <sys/jail.h>
67 #include <sys/ktrace.h>
68 #include <sys/lock.h>
69 #include <sys/malloc.h>
70 #include <sys/mman.h>
71 #include <sys/mutex.h>
72 #include <sys/priv.h>
73 #include <sys/proc.h>
74 #include <sys/refcount.h>
75 #include <sys/resourcevar.h>
76 #include <sys/rwlock.h>
77 #include <sys/sbuf.h>
78 #include <sys/stat.h>
79 #include <sys/syscallsubr.h>
80 #include <sys/sysctl.h>
81 #include <sys/sysproto.h>
82 #include <sys/systm.h>
83 #include <sys/sx.h>
84 #include <sys/time.h>
85 #include <sys/vmmeter.h>
86 #include <sys/vnode.h>
87 #include <sys/unistd.h>
88 #include <sys/user.h>
89
90 #include <security/audit/audit.h>
91 #include <security/mac/mac_framework.h>
92
93 #include <vm/vm.h>
94 #include <vm/vm_param.h>
95 #include <vm/pmap.h>
96 #include <vm/vm_extern.h>
97 #include <vm/vm_map.h>
98 #include <vm/vm_kern.h>
99 #include <vm/vm_object.h>
100 #include <vm/vm_page.h>
101 #include <vm/vm_pageout.h>
102 #include <vm/vm_pager.h>
103 #include <vm/swap_pager.h>
104
105 struct shm_mapping {
106         char            *sm_path;
107         Fnv32_t         sm_fnv;
108         struct shmfd    *sm_shmfd;
109         LIST_ENTRY(shm_mapping) sm_link;
110 };
111
112 static MALLOC_DEFINE(M_SHMFD, "shmfd", "shared memory file descriptor");
113 static LIST_HEAD(, shm_mapping) *shm_dictionary;
114 static struct sx shm_dict_lock;
115 static struct mtx shm_timestamp_lock;
116 static u_long shm_hash;
117 static struct unrhdr64 shm_ino_unr;
118 static dev_t shm_dev_ino;
119
120 #define SHM_HASH(fnv)   (&shm_dictionary[(fnv) & shm_hash])
121
122 static void     shm_init(void *arg);
123 static void     shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd);
124 static struct shmfd *shm_lookup(char *path, Fnv32_t fnv);
125 static int      shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred);
126 static void     shm_doremove(struct shm_mapping *map);
127 static int      shm_dotruncate_cookie(struct shmfd *shmfd, off_t length,
128     void *rl_cookie);
129 static int      shm_dotruncate_locked(struct shmfd *shmfd, off_t length,
130     void *rl_cookie);
131 static int      shm_copyin_path(struct thread *td, const char *userpath_in,
132     char **path_out);
133
134 static fo_rdwr_t        shm_read;
135 static fo_rdwr_t        shm_write;
136 static fo_truncate_t    shm_truncate;
137 static fo_ioctl_t       shm_ioctl;
138 static fo_stat_t        shm_stat;
139 static fo_close_t       shm_close;
140 static fo_chmod_t       shm_chmod;
141 static fo_chown_t       shm_chown;
142 static fo_seek_t        shm_seek;
143 static fo_fill_kinfo_t  shm_fill_kinfo;
144 static fo_mmap_t        shm_mmap;
145 static fo_get_seals_t   shm_get_seals;
146 static fo_add_seals_t   shm_add_seals;
147 static fo_fallocate_t   shm_fallocate;
148
149 /* File descriptor operations. */
150 struct fileops shm_ops = {
151         .fo_read = shm_read,
152         .fo_write = shm_write,
153         .fo_truncate = shm_truncate,
154         .fo_ioctl = shm_ioctl,
155         .fo_poll = invfo_poll,
156         .fo_kqfilter = invfo_kqfilter,
157         .fo_stat = shm_stat,
158         .fo_close = shm_close,
159         .fo_chmod = shm_chmod,
160         .fo_chown = shm_chown,
161         .fo_sendfile = vn_sendfile,
162         .fo_seek = shm_seek,
163         .fo_fill_kinfo = shm_fill_kinfo,
164         .fo_mmap = shm_mmap,
165         .fo_get_seals = shm_get_seals,
166         .fo_add_seals = shm_add_seals,
167         .fo_fallocate = shm_fallocate,
168         .fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE,
169 };
170
171 FEATURE(posix_shm, "POSIX shared memory");
172
173 static SYSCTL_NODE(_vm, OID_AUTO, largepages, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
174     "");
175
176 static int largepage_reclaim_tries = 1;
177 SYSCTL_INT(_vm_largepages, OID_AUTO, reclaim_tries,
178     CTLFLAG_RWTUN, &largepage_reclaim_tries, 0,
179     "Number of contig reclaims before giving up for default alloc policy");
180
181 static int
182 uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio)
183 {
184         vm_page_t m;
185         vm_pindex_t idx;
186         size_t tlen;
187         int error, offset, rv;
188
189         idx = OFF_TO_IDX(uio->uio_offset);
190         offset = uio->uio_offset & PAGE_MASK;
191         tlen = MIN(PAGE_SIZE - offset, len);
192
193         rv = vm_page_grab_valid_unlocked(&m, obj, idx,
194             VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY | VM_ALLOC_NOCREAT);
195         if (rv == VM_PAGER_OK)
196                 goto found;
197
198         /*
199          * Read I/O without either a corresponding resident page or swap
200          * page: use zero_region.  This is intended to avoid instantiating
201          * pages on read from a sparse region.
202          */
203         VM_OBJECT_WLOCK(obj);
204         m = vm_page_lookup(obj, idx);
205         if (uio->uio_rw == UIO_READ && m == NULL &&
206             !vm_pager_has_page(obj, idx, NULL, NULL)) {
207                 VM_OBJECT_WUNLOCK(obj);
208                 return (uiomove(__DECONST(void *, zero_region), tlen, uio));
209         }
210
211         /*
212          * Although the tmpfs vnode lock is held here, it is
213          * nonetheless safe to sleep waiting for a free page.  The
214          * pageout daemon does not need to acquire the tmpfs vnode
215          * lock to page out tobj's pages because tobj is a OBJT_SWAP
216          * type object.
217          */
218         rv = vm_page_grab_valid(&m, obj, idx,
219             VM_ALLOC_NORMAL | VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY);
220         if (rv != VM_PAGER_OK) {
221                 VM_OBJECT_WUNLOCK(obj);
222                 if (bootverbose) {
223                         printf("uiomove_object: vm_obj %p idx %jd "
224                             "pager error %d\n", obj, idx, rv);
225                 }
226                 return (rv == VM_PAGER_AGAIN ? ENOSPC : EIO);
227         }
228         VM_OBJECT_WUNLOCK(obj);
229
230 found:
231         error = uiomove_fromphys(&m, offset, tlen, uio);
232         if (uio->uio_rw == UIO_WRITE && error == 0)
233                 vm_page_set_dirty(m);
234         vm_page_activate(m);
235         vm_page_sunbusy(m);
236
237         return (error);
238 }
239
240 int
241 uiomove_object(vm_object_t obj, off_t obj_size, struct uio *uio)
242 {
243         ssize_t resid;
244         size_t len;
245         int error;
246
247         error = 0;
248         while ((resid = uio->uio_resid) > 0) {
249                 if (obj_size <= uio->uio_offset)
250                         break;
251                 len = MIN(obj_size - uio->uio_offset, resid);
252                 if (len == 0)
253                         break;
254                 error = uiomove_object_page(obj, len, uio);
255                 if (error != 0 || resid == uio->uio_resid)
256                         break;
257         }
258         return (error);
259 }
260
261 static u_long count_largepages[MAXPAGESIZES];
262
263 static int
264 shm_largepage_phys_populate(vm_object_t object, vm_pindex_t pidx,
265     int fault_type, vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last)
266 {
267         vm_page_t m __diagused;
268         int psind;
269
270         psind = object->un_pager.phys.data_val;
271         if (psind == 0 || pidx >= object->size)
272                 return (VM_PAGER_FAIL);
273         *first = rounddown2(pidx, pagesizes[psind] / PAGE_SIZE);
274
275         /*
276          * We only busy the first page in the superpage run.  It is
277          * useless to busy whole run since we only remove full
278          * superpage, and it takes too long to busy e.g. 512 * 512 ==
279          * 262144 pages constituing 1G amd64 superage.
280          */
281         m = vm_page_grab(object, *first, VM_ALLOC_NORMAL | VM_ALLOC_NOCREAT);
282         MPASS(m != NULL);
283
284         *last = *first + atop(pagesizes[psind]) - 1;
285         return (VM_PAGER_OK);
286 }
287
288 static boolean_t
289 shm_largepage_phys_haspage(vm_object_t object, vm_pindex_t pindex,
290     int *before, int *after)
291 {
292         int psind;
293
294         psind = object->un_pager.phys.data_val;
295         if (psind == 0 || pindex >= object->size)
296                 return (FALSE);
297         if (before != NULL) {
298                 *before = pindex - rounddown2(pindex, pagesizes[psind] /
299                     PAGE_SIZE);
300         }
301         if (after != NULL) {
302                 *after = roundup2(pindex, pagesizes[psind] / PAGE_SIZE) -
303                     pindex;
304         }
305         return (TRUE);
306 }
307
308 static void
309 shm_largepage_phys_ctor(vm_object_t object, vm_prot_t prot,
310     vm_ooffset_t foff, struct ucred *cred)
311 {
312 }
313
314 static void
315 shm_largepage_phys_dtor(vm_object_t object)
316 {
317         int psind;
318
319         psind = object->un_pager.phys.data_val;
320         if (psind != 0) {
321                 atomic_subtract_long(&count_largepages[psind],
322                     object->size / (pagesizes[psind] / PAGE_SIZE));
323                 vm_wire_sub(object->size);
324         } else {
325                 KASSERT(object->size == 0,
326                     ("largepage phys obj %p not initialized bit size %#jx > 0",
327                     object, (uintmax_t)object->size));
328         }
329 }
330
331 static const struct phys_pager_ops shm_largepage_phys_ops = {
332         .phys_pg_populate =     shm_largepage_phys_populate,
333         .phys_pg_haspage =      shm_largepage_phys_haspage,
334         .phys_pg_ctor =         shm_largepage_phys_ctor,
335         .phys_pg_dtor =         shm_largepage_phys_dtor,
336 };
337
338 bool
339 shm_largepage(struct shmfd *shmfd)
340 {
341         return (shmfd->shm_object->type == OBJT_PHYS);
342 }
343
344 static void
345 shm_pager_freespace(vm_object_t obj, vm_pindex_t start, vm_size_t size)
346 {
347         struct shmfd *shm;
348         vm_size_t c;
349
350         swap_pager_freespace(obj, start, size, &c);
351         if (c == 0)
352                 return;
353
354         shm = obj->un_pager.swp.swp_priv;
355         if (shm == NULL)
356                 return;
357         KASSERT(shm->shm_pages >= c,
358             ("shm %p pages %jd free %jd", shm,
359             (uintmax_t)shm->shm_pages, (uintmax_t)c));
360         shm->shm_pages -= c;
361 }
362
363 static void
364 shm_page_inserted(vm_object_t obj, vm_page_t m)
365 {
366         struct shmfd *shm;
367
368         shm = obj->un_pager.swp.swp_priv;
369         if (shm == NULL)
370                 return;
371         if (!vm_pager_has_page(obj, m->pindex, NULL, NULL))
372                 shm->shm_pages += 1;
373 }
374
375 static void
376 shm_page_removed(vm_object_t obj, vm_page_t m)
377 {
378         struct shmfd *shm;
379
380         shm = obj->un_pager.swp.swp_priv;
381         if (shm == NULL)
382                 return;
383         if (!vm_pager_has_page(obj, m->pindex, NULL, NULL)) {
384                 KASSERT(shm->shm_pages >= 1,
385                     ("shm %p pages %jd free 1", shm,
386                     (uintmax_t)shm->shm_pages));
387                 shm->shm_pages -= 1;
388         }
389 }
390
391 static struct pagerops shm_swap_pager_ops = {
392         .pgo_kvme_type = KVME_TYPE_SWAP,
393         .pgo_freespace = shm_pager_freespace,
394         .pgo_page_inserted = shm_page_inserted,
395         .pgo_page_removed = shm_page_removed,
396 };
397 static int shmfd_pager_type = -1;
398
399 static int
400 shm_seek(struct file *fp, off_t offset, int whence, struct thread *td)
401 {
402         struct shmfd *shmfd;
403         off_t foffset;
404         int error;
405
406         shmfd = fp->f_data;
407         foffset = foffset_lock(fp, 0);
408         error = 0;
409         switch (whence) {
410         case L_INCR:
411                 if (foffset < 0 ||
412                     (offset > 0 && foffset > OFF_MAX - offset)) {
413                         error = EOVERFLOW;
414                         break;
415                 }
416                 offset += foffset;
417                 break;
418         case L_XTND:
419                 if (offset > 0 && shmfd->shm_size > OFF_MAX - offset) {
420                         error = EOVERFLOW;
421                         break;
422                 }
423                 offset += shmfd->shm_size;
424                 break;
425         case L_SET:
426                 break;
427         default:
428                 error = EINVAL;
429         }
430         if (error == 0) {
431                 if (offset < 0 || offset > shmfd->shm_size)
432                         error = EINVAL;
433                 else
434                         td->td_uretoff.tdu_off = offset;
435         }
436         foffset_unlock(fp, offset, error != 0 ? FOF_NOUPDATE : 0);
437         return (error);
438 }
439
440 static int
441 shm_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
442     int flags, struct thread *td)
443 {
444         struct shmfd *shmfd;
445         void *rl_cookie;
446         int error;
447
448         shmfd = fp->f_data;
449 #ifdef MAC
450         error = mac_posixshm_check_read(active_cred, fp->f_cred, shmfd);
451         if (error)
452                 return (error);
453 #endif
454         foffset_lock_uio(fp, uio, flags);
455         rl_cookie = rangelock_rlock(&shmfd->shm_rl, uio->uio_offset,
456             uio->uio_offset + uio->uio_resid, &shmfd->shm_mtx);
457         error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio);
458         rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
459         foffset_unlock_uio(fp, uio, flags);
460         return (error);
461 }
462
463 static int
464 shm_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
465     int flags, struct thread *td)
466 {
467         struct shmfd *shmfd;
468         void *rl_cookie;
469         int error;
470         off_t size;
471
472         shmfd = fp->f_data;
473 #ifdef MAC
474         error = mac_posixshm_check_write(active_cred, fp->f_cred, shmfd);
475         if (error)
476                 return (error);
477 #endif
478         if (shm_largepage(shmfd) && shmfd->shm_lp_psind == 0)
479                 return (EINVAL);
480         foffset_lock_uio(fp, uio, flags);
481         if (uio->uio_resid > OFF_MAX - uio->uio_offset) {
482                 /*
483                  * Overflow is only an error if we're supposed to expand on
484                  * write.  Otherwise, we'll just truncate the write to the
485                  * size of the file, which can only grow up to OFF_MAX.
486                  */
487                 if ((shmfd->shm_flags & SHM_GROW_ON_WRITE) != 0) {
488                         foffset_unlock_uio(fp, uio, flags);
489                         return (EFBIG);
490                 }
491
492                 size = shmfd->shm_size;
493         } else {
494                 size = uio->uio_offset + uio->uio_resid;
495         }
496         if ((flags & FOF_OFFSET) == 0) {
497                 rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
498                     &shmfd->shm_mtx);
499         } else {
500                 rl_cookie = rangelock_wlock(&shmfd->shm_rl, uio->uio_offset,
501                     size, &shmfd->shm_mtx);
502         }
503         if ((shmfd->shm_seals & F_SEAL_WRITE) != 0) {
504                 error = EPERM;
505         } else {
506                 error = 0;
507                 if ((shmfd->shm_flags & SHM_GROW_ON_WRITE) != 0 &&
508                     size > shmfd->shm_size) {
509                         error = shm_dotruncate_cookie(shmfd, size, rl_cookie);
510                 }
511                 if (error == 0)
512                         error = uiomove_object(shmfd->shm_object,
513                             shmfd->shm_size, uio);
514         }
515         rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
516         foffset_unlock_uio(fp, uio, flags);
517         return (error);
518 }
519
520 static int
521 shm_truncate(struct file *fp, off_t length, struct ucred *active_cred,
522     struct thread *td)
523 {
524         struct shmfd *shmfd;
525 #ifdef MAC
526         int error;
527 #endif
528
529         shmfd = fp->f_data;
530 #ifdef MAC
531         error = mac_posixshm_check_truncate(active_cred, fp->f_cred, shmfd);
532         if (error)
533                 return (error);
534 #endif
535         return (shm_dotruncate(shmfd, length));
536 }
537
538 int
539 shm_ioctl(struct file *fp, u_long com, void *data, struct ucred *active_cred,
540     struct thread *td)
541 {
542         struct shmfd *shmfd;
543         struct shm_largepage_conf *conf;
544         void *rl_cookie;
545
546         shmfd = fp->f_data;
547         switch (com) {
548         case FIONBIO:
549         case FIOASYNC:
550                 /*
551                  * Allow fcntl(fd, F_SETFL, O_NONBLOCK) to work,
552                  * just like it would on an unlinked regular file
553                  */
554                 return (0);
555         case FIOSSHMLPGCNF:
556                 if (!shm_largepage(shmfd))
557                         return (ENOTTY);
558                 conf = data;
559                 if (shmfd->shm_lp_psind != 0 &&
560                     conf->psind != shmfd->shm_lp_psind)
561                         return (EINVAL);
562                 if (conf->psind <= 0 || conf->psind >= MAXPAGESIZES ||
563                     pagesizes[conf->psind] == 0)
564                         return (EINVAL);
565                 if (conf->alloc_policy != SHM_LARGEPAGE_ALLOC_DEFAULT &&
566                     conf->alloc_policy != SHM_LARGEPAGE_ALLOC_NOWAIT &&
567                     conf->alloc_policy != SHM_LARGEPAGE_ALLOC_HARD)
568                         return (EINVAL);
569
570                 rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
571                     &shmfd->shm_mtx);
572                 shmfd->shm_lp_psind = conf->psind;
573                 shmfd->shm_lp_alloc_policy = conf->alloc_policy;
574                 shmfd->shm_object->un_pager.phys.data_val = conf->psind;
575                 rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
576                 return (0);
577         case FIOGSHMLPGCNF:
578                 if (!shm_largepage(shmfd))
579                         return (ENOTTY);
580                 conf = data;
581                 rl_cookie = rangelock_rlock(&shmfd->shm_rl, 0, OFF_MAX,
582                     &shmfd->shm_mtx);
583                 conf->psind = shmfd->shm_lp_psind;
584                 conf->alloc_policy = shmfd->shm_lp_alloc_policy;
585                 rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
586                 return (0);
587         default:
588                 return (ENOTTY);
589         }
590 }
591
592 static int
593 shm_stat(struct file *fp, struct stat *sb, struct ucred *active_cred,
594     struct thread *td)
595 {
596         struct shmfd *shmfd;
597 #ifdef MAC
598         int error;
599 #endif
600
601         shmfd = fp->f_data;
602
603 #ifdef MAC
604         error = mac_posixshm_check_stat(active_cred, fp->f_cred, shmfd);
605         if (error)
606                 return (error);
607 #endif
608
609         /*
610          * Attempt to return sanish values for fstat() on a memory file
611          * descriptor.
612          */
613         bzero(sb, sizeof(*sb));
614         sb->st_blksize = PAGE_SIZE;
615         sb->st_size = shmfd->shm_size;
616         mtx_lock(&shm_timestamp_lock);
617         sb->st_atim = shmfd->shm_atime;
618         sb->st_ctim = shmfd->shm_ctime;
619         sb->st_mtim = shmfd->shm_mtime;
620         sb->st_birthtim = shmfd->shm_birthtime;
621         sb->st_mode = S_IFREG | shmfd->shm_mode;                /* XXX */
622         sb->st_uid = shmfd->shm_uid;
623         sb->st_gid = shmfd->shm_gid;
624         mtx_unlock(&shm_timestamp_lock);
625         sb->st_dev = shm_dev_ino;
626         sb->st_ino = shmfd->shm_ino;
627         sb->st_nlink = shmfd->shm_object->ref_count;
628         if (shm_largepage(shmfd)) {
629                 sb->st_blocks = shmfd->shm_object->size /
630                     (pagesizes[shmfd->shm_lp_psind] >> PAGE_SHIFT);
631         } else {
632                 sb->st_blocks = shmfd->shm_pages;
633         }
634
635         return (0);
636 }
637
638 static int
639 shm_close(struct file *fp, struct thread *td)
640 {
641         struct shmfd *shmfd;
642
643         shmfd = fp->f_data;
644         fp->f_data = NULL;
645         shm_drop(shmfd);
646
647         return (0);
648 }
649
650 static int
651 shm_copyin_path(struct thread *td, const char *userpath_in, char **path_out) {
652         int error;
653         char *path;
654         const char *pr_path;
655         size_t pr_pathlen;
656
657         path = malloc(MAXPATHLEN, M_SHMFD, M_WAITOK);
658         pr_path = td->td_ucred->cr_prison->pr_path;
659
660         /* Construct a full pathname for jailed callers. */
661         pr_pathlen = strcmp(pr_path, "/") ==
662             0 ? 0 : strlcpy(path, pr_path, MAXPATHLEN);
663         error = copyinstr(userpath_in, path + pr_pathlen,
664             MAXPATHLEN - pr_pathlen, NULL);
665         if (error != 0)
666                 goto out;
667
668 #ifdef KTRACE
669         if (KTRPOINT(curthread, KTR_NAMEI))
670                 ktrnamei(path);
671 #endif
672
673         /* Require paths to start with a '/' character. */
674         if (path[pr_pathlen] != '/') {
675                 error = EINVAL;
676                 goto out;
677         }
678
679         *path_out = path;
680
681 out:
682         if (error != 0)
683                 free(path, M_SHMFD);
684
685         return (error);
686 }
687
688 static int
689 shm_dotruncate_locked(struct shmfd *shmfd, off_t length, void *rl_cookie)
690 {
691         vm_object_t object;
692         vm_page_t m;
693         vm_pindex_t idx, nobjsize;
694         vm_ooffset_t delta;
695         int base, rv;
696
697         KASSERT(length >= 0, ("shm_dotruncate: length < 0"));
698         object = shmfd->shm_object;
699         VM_OBJECT_ASSERT_WLOCKED(object);
700         rangelock_cookie_assert(rl_cookie, RA_WLOCKED);
701         if (length == shmfd->shm_size)
702                 return (0);
703         nobjsize = OFF_TO_IDX(length + PAGE_MASK);
704
705         /* Are we shrinking?  If so, trim the end. */
706         if (length < shmfd->shm_size) {
707                 if ((shmfd->shm_seals & F_SEAL_SHRINK) != 0)
708                         return (EPERM);
709
710                 /*
711                  * Disallow any requests to shrink the size if this
712                  * object is mapped into the kernel.
713                  */
714                 if (shmfd->shm_kmappings > 0)
715                         return (EBUSY);
716
717                 /*
718                  * Zero the truncated part of the last page.
719                  */
720                 base = length & PAGE_MASK;
721                 if (base != 0) {
722                         idx = OFF_TO_IDX(length);
723 retry:
724                         m = vm_page_grab(object, idx, VM_ALLOC_NOCREAT);
725                         if (m != NULL) {
726                                 MPASS(vm_page_all_valid(m));
727                         } else if (vm_pager_has_page(object, idx, NULL, NULL)) {
728                                 m = vm_page_alloc(object, idx,
729                                     VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL);
730                                 if (m == NULL)
731                                         goto retry;
732                                 vm_object_pip_add(object, 1);
733                                 VM_OBJECT_WUNLOCK(object);
734                                 rv = vm_pager_get_pages(object, &m, 1, NULL,
735                                     NULL);
736                                 VM_OBJECT_WLOCK(object);
737                                 vm_object_pip_wakeup(object);
738                                 if (rv == VM_PAGER_OK) {
739                                         /*
740                                          * Since the page was not resident,
741                                          * and therefore not recently
742                                          * accessed, immediately enqueue it
743                                          * for asynchronous laundering.  The
744                                          * current operation is not regarded
745                                          * as an access.
746                                          */
747                                         vm_page_launder(m);
748                                 } else {
749                                         vm_page_free(m);
750                                         VM_OBJECT_WUNLOCK(object);
751                                         return (EIO);
752                                 }
753                         }
754                         if (m != NULL) {
755                                 pmap_zero_page_area(m, base, PAGE_SIZE - base);
756                                 KASSERT(vm_page_all_valid(m),
757                                     ("shm_dotruncate: page %p is invalid", m));
758                                 vm_page_set_dirty(m);
759                                 vm_page_xunbusy(m);
760                         }
761                 }
762                 delta = IDX_TO_OFF(object->size - nobjsize);
763
764                 if (nobjsize < object->size)
765                         vm_object_page_remove(object, nobjsize, object->size,
766                             0);
767
768                 /* Free the swap accounted for shm */
769                 swap_release_by_cred(delta, object->cred);
770                 object->charge -= delta;
771         } else {
772                 if ((shmfd->shm_seals & F_SEAL_GROW) != 0)
773                         return (EPERM);
774
775                 /* Try to reserve additional swap space. */
776                 delta = IDX_TO_OFF(nobjsize - object->size);
777                 if (!swap_reserve_by_cred(delta, object->cred))
778                         return (ENOMEM);
779                 object->charge += delta;
780         }
781         shmfd->shm_size = length;
782         mtx_lock(&shm_timestamp_lock);
783         vfs_timestamp(&shmfd->shm_ctime);
784         shmfd->shm_mtime = shmfd->shm_ctime;
785         mtx_unlock(&shm_timestamp_lock);
786         object->size = nobjsize;
787         return (0);
788 }
789
790 static int
791 shm_dotruncate_largepage(struct shmfd *shmfd, off_t length, void *rl_cookie)
792 {
793         vm_object_t object;
794         vm_page_t m;
795         vm_pindex_t newobjsz;
796         vm_pindex_t oldobjsz __unused;
797         int aflags, error, i, psind, try;
798
799         KASSERT(length >= 0, ("shm_dotruncate: length < 0"));
800         object = shmfd->shm_object;
801         VM_OBJECT_ASSERT_WLOCKED(object);
802         rangelock_cookie_assert(rl_cookie, RA_WLOCKED);
803
804         oldobjsz = object->size;
805         newobjsz = OFF_TO_IDX(length);
806         if (length == shmfd->shm_size)
807                 return (0);
808         psind = shmfd->shm_lp_psind;
809         if (psind == 0 && length != 0)
810                 return (EINVAL);
811         if ((length & (pagesizes[psind] - 1)) != 0)
812                 return (EINVAL);
813
814         if (length < shmfd->shm_size) {
815                 if ((shmfd->shm_seals & F_SEAL_SHRINK) != 0)
816                         return (EPERM);
817                 if (shmfd->shm_kmappings > 0)
818                         return (EBUSY);
819                 return (ENOTSUP);       /* Pages are unmanaged. */
820 #if 0
821                 vm_object_page_remove(object, newobjsz, oldobjsz, 0);
822                 object->size = newobjsz;
823                 shmfd->shm_size = length;
824                 return (0);
825 #endif
826         }
827
828         if ((shmfd->shm_seals & F_SEAL_GROW) != 0)
829                 return (EPERM);
830
831         aflags = VM_ALLOC_NORMAL | VM_ALLOC_ZERO;
832         if (shmfd->shm_lp_alloc_policy == SHM_LARGEPAGE_ALLOC_NOWAIT)
833                 aflags |= VM_ALLOC_WAITFAIL;
834         try = 0;
835
836         /*
837          * Extend shmfd and object, keeping all already fully
838          * allocated large pages intact even on error, because dropped
839          * object lock might allowed mapping of them.
840          */
841         while (object->size < newobjsz) {
842                 m = vm_page_alloc_contig(object, object->size, aflags,
843                     pagesizes[psind] / PAGE_SIZE, 0, ~0,
844                     pagesizes[psind], 0,
845                     VM_MEMATTR_DEFAULT);
846                 if (m == NULL) {
847                         VM_OBJECT_WUNLOCK(object);
848                         if (shmfd->shm_lp_alloc_policy ==
849                             SHM_LARGEPAGE_ALLOC_NOWAIT ||
850                             (shmfd->shm_lp_alloc_policy ==
851                             SHM_LARGEPAGE_ALLOC_DEFAULT &&
852                             try >= largepage_reclaim_tries)) {
853                                 VM_OBJECT_WLOCK(object);
854                                 return (ENOMEM);
855                         }
856                         error = vm_page_reclaim_contig(aflags,
857                             pagesizes[psind] / PAGE_SIZE, 0, ~0,
858                             pagesizes[psind], 0) ? 0 :
859                             vm_wait_intr(object);
860                         if (error != 0) {
861                                 VM_OBJECT_WLOCK(object);
862                                 return (error);
863                         }
864                         try++;
865                         VM_OBJECT_WLOCK(object);
866                         continue;
867                 }
868                 try = 0;
869                 for (i = 0; i < pagesizes[psind] / PAGE_SIZE; i++) {
870                         if ((m[i].flags & PG_ZERO) == 0)
871                                 pmap_zero_page(&m[i]);
872                         vm_page_valid(&m[i]);
873                         vm_page_xunbusy(&m[i]);
874                 }
875                 object->size += OFF_TO_IDX(pagesizes[psind]);
876                 shmfd->shm_size += pagesizes[psind];
877                 atomic_add_long(&count_largepages[psind], 1);
878                 vm_wire_add(atop(pagesizes[psind]));
879         }
880         return (0);
881 }
882
883 static int
884 shm_dotruncate_cookie(struct shmfd *shmfd, off_t length, void *rl_cookie)
885 {
886         int error;
887
888         VM_OBJECT_WLOCK(shmfd->shm_object);
889         error = shm_largepage(shmfd) ? shm_dotruncate_largepage(shmfd,
890             length, rl_cookie) : shm_dotruncate_locked(shmfd, length,
891             rl_cookie);
892         VM_OBJECT_WUNLOCK(shmfd->shm_object);
893         return (error);
894 }
895
896 int
897 shm_dotruncate(struct shmfd *shmfd, off_t length)
898 {
899         void *rl_cookie;
900         int error;
901
902         rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
903             &shmfd->shm_mtx);
904         error = shm_dotruncate_cookie(shmfd, length, rl_cookie);
905         rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
906         return (error);
907 }
908
909 /*
910  * shmfd object management including creation and reference counting
911  * routines.
912  */
913 struct shmfd *
914 shm_alloc(struct ucred *ucred, mode_t mode, bool largepage)
915 {
916         struct shmfd *shmfd;
917         vm_object_t obj;
918
919         shmfd = malloc(sizeof(*shmfd), M_SHMFD, M_WAITOK | M_ZERO);
920         shmfd->shm_size = 0;
921         shmfd->shm_uid = ucred->cr_uid;
922         shmfd->shm_gid = ucred->cr_gid;
923         shmfd->shm_mode = mode;
924         if (largepage) {
925                 shmfd->shm_object = phys_pager_allocate(NULL,
926                     &shm_largepage_phys_ops, NULL, shmfd->shm_size,
927                     VM_PROT_DEFAULT, 0, ucred);
928                 shmfd->shm_lp_alloc_policy = SHM_LARGEPAGE_ALLOC_DEFAULT;
929         } else {
930                 obj = vm_pager_allocate(shmfd_pager_type, NULL,
931                     shmfd->shm_size, VM_PROT_DEFAULT, 0, ucred);
932                 VM_OBJECT_WLOCK(obj);
933                 obj->un_pager.swp.swp_priv = shmfd;
934                 VM_OBJECT_WUNLOCK(obj);
935                 shmfd->shm_object = obj;
936         }
937         KASSERT(shmfd->shm_object != NULL, ("shm_create: vm_pager_allocate"));
938         vfs_timestamp(&shmfd->shm_birthtime);
939         shmfd->shm_atime = shmfd->shm_mtime = shmfd->shm_ctime =
940             shmfd->shm_birthtime;
941         shmfd->shm_ino = alloc_unr64(&shm_ino_unr);
942         refcount_init(&shmfd->shm_refs, 1);
943         mtx_init(&shmfd->shm_mtx, "shmrl", NULL, MTX_DEF);
944         rangelock_init(&shmfd->shm_rl);
945 #ifdef MAC
946         mac_posixshm_init(shmfd);
947         mac_posixshm_create(ucred, shmfd);
948 #endif
949
950         return (shmfd);
951 }
952
953 struct shmfd *
954 shm_hold(struct shmfd *shmfd)
955 {
956
957         refcount_acquire(&shmfd->shm_refs);
958         return (shmfd);
959 }
960
961 void
962 shm_drop(struct shmfd *shmfd)
963 {
964         vm_object_t obj;
965
966         if (refcount_release(&shmfd->shm_refs)) {
967 #ifdef MAC
968                 mac_posixshm_destroy(shmfd);
969 #endif
970                 rangelock_destroy(&shmfd->shm_rl);
971                 mtx_destroy(&shmfd->shm_mtx);
972                 obj = shmfd->shm_object;
973                 if (!shm_largepage(shmfd)) {
974                         VM_OBJECT_WLOCK(obj);
975                         obj->un_pager.swp.swp_priv = NULL;
976                         VM_OBJECT_WUNLOCK(obj);
977                 }
978                 vm_object_deallocate(obj);
979                 free(shmfd, M_SHMFD);
980         }
981 }
982
983 /*
984  * Determine if the credentials have sufficient permissions for a
985  * specified combination of FREAD and FWRITE.
986  */
987 int
988 shm_access(struct shmfd *shmfd, struct ucred *ucred, int flags)
989 {
990         accmode_t accmode;
991         int error;
992
993         accmode = 0;
994         if (flags & FREAD)
995                 accmode |= VREAD;
996         if (flags & FWRITE)
997                 accmode |= VWRITE;
998         mtx_lock(&shm_timestamp_lock);
999         error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid,
1000             accmode, ucred);
1001         mtx_unlock(&shm_timestamp_lock);
1002         return (error);
1003 }
1004
1005 static void
1006 shm_init(void *arg)
1007 {
1008         char name[32];
1009         int i;
1010
1011         mtx_init(&shm_timestamp_lock, "shm timestamps", NULL, MTX_DEF);
1012         sx_init(&shm_dict_lock, "shm dictionary");
1013         shm_dictionary = hashinit(1024, M_SHMFD, &shm_hash);
1014         new_unrhdr64(&shm_ino_unr, 1);
1015         shm_dev_ino = devfs_alloc_cdp_inode();
1016         KASSERT(shm_dev_ino > 0, ("shm dev inode not initialized"));
1017         shmfd_pager_type = vm_pager_alloc_dyn_type(&shm_swap_pager_ops,
1018             OBJT_SWAP);
1019         MPASS(shmfd_pager_type != -1);
1020
1021         for (i = 1; i < MAXPAGESIZES; i++) {
1022                 if (pagesizes[i] == 0)
1023                         break;
1024 #define M       (1024 * 1024)
1025 #define G       (1024 * M)
1026                 if (pagesizes[i] >= G)
1027                         snprintf(name, sizeof(name), "%luG", pagesizes[i] / G);
1028                 else if (pagesizes[i] >= M)
1029                         snprintf(name, sizeof(name), "%luM", pagesizes[i] / M);
1030                 else
1031                         snprintf(name, sizeof(name), "%lu", pagesizes[i]);
1032 #undef G
1033 #undef M
1034                 SYSCTL_ADD_ULONG(NULL, SYSCTL_STATIC_CHILDREN(_vm_largepages),
1035                     OID_AUTO, name, CTLFLAG_RD, &count_largepages[i],
1036                     "number of non-transient largepages allocated");
1037         }
1038 }
1039 SYSINIT(shm_init, SI_SUB_SYSV_SHM, SI_ORDER_ANY, shm_init, NULL);
1040
1041 /*
1042  * Remove all shared memory objects that belong to a prison.
1043  */
1044 void
1045 shm_remove_prison(struct prison *pr)
1046 {
1047         struct shm_mapping *shmm, *tshmm;
1048         u_long i;
1049
1050         sx_xlock(&shm_dict_lock);
1051         for (i = 0; i < shm_hash + 1; i++) {
1052                 LIST_FOREACH_SAFE(shmm, &shm_dictionary[i], sm_link, tshmm) {
1053                         if (shmm->sm_shmfd->shm_object->cred &&
1054                             shmm->sm_shmfd->shm_object->cred->cr_prison == pr)
1055                                 shm_doremove(shmm);
1056                 }
1057         }
1058         sx_xunlock(&shm_dict_lock);
1059 }
1060
1061 /*
1062  * Dictionary management.  We maintain an in-kernel dictionary to map
1063  * paths to shmfd objects.  We use the FNV hash on the path to store
1064  * the mappings in a hash table.
1065  */
1066 static struct shmfd *
1067 shm_lookup(char *path, Fnv32_t fnv)
1068 {
1069         struct shm_mapping *map;
1070
1071         LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
1072                 if (map->sm_fnv != fnv)
1073                         continue;
1074                 if (strcmp(map->sm_path, path) == 0)
1075                         return (map->sm_shmfd);
1076         }
1077
1078         return (NULL);
1079 }
1080
1081 static void
1082 shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd)
1083 {
1084         struct shm_mapping *map;
1085
1086         map = malloc(sizeof(struct shm_mapping), M_SHMFD, M_WAITOK);
1087         map->sm_path = path;
1088         map->sm_fnv = fnv;
1089         map->sm_shmfd = shm_hold(shmfd);
1090         shmfd->shm_path = path;
1091         LIST_INSERT_HEAD(SHM_HASH(fnv), map, sm_link);
1092 }
1093
1094 static int
1095 shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred)
1096 {
1097         struct shm_mapping *map;
1098         int error;
1099
1100         LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
1101                 if (map->sm_fnv != fnv)
1102                         continue;
1103                 if (strcmp(map->sm_path, path) == 0) {
1104 #ifdef MAC
1105                         error = mac_posixshm_check_unlink(ucred, map->sm_shmfd);
1106                         if (error)
1107                                 return (error);
1108 #endif
1109                         error = shm_access(map->sm_shmfd, ucred,
1110                             FREAD | FWRITE);
1111                         if (error)
1112                                 return (error);
1113                         shm_doremove(map);
1114                         return (0);
1115                 }
1116         }
1117
1118         return (ENOENT);
1119 }
1120
1121 static void
1122 shm_doremove(struct shm_mapping *map)
1123 {
1124         map->sm_shmfd->shm_path = NULL;
1125         LIST_REMOVE(map, sm_link);
1126         shm_drop(map->sm_shmfd);
1127         free(map->sm_path, M_SHMFD);
1128         free(map, M_SHMFD);
1129 }
1130
1131 int
1132 kern_shm_open2(struct thread *td, const char *userpath, int flags, mode_t mode,
1133     int shmflags, struct filecaps *fcaps, const char *name __unused)
1134 {
1135         struct pwddesc *pdp;
1136         struct shmfd *shmfd;
1137         struct file *fp;
1138         char *path;
1139         void *rl_cookie;
1140         Fnv32_t fnv;
1141         mode_t cmode;
1142         int error, fd, initial_seals;
1143         bool largepage;
1144
1145         if ((shmflags & ~(SHM_ALLOW_SEALING | SHM_GROW_ON_WRITE |
1146             SHM_LARGEPAGE)) != 0)
1147                 return (EINVAL);
1148
1149         initial_seals = F_SEAL_SEAL;
1150         if ((shmflags & SHM_ALLOW_SEALING) != 0)
1151                 initial_seals &= ~F_SEAL_SEAL;
1152
1153 #ifdef CAPABILITY_MODE
1154         /*
1155          * shm_open(2) is only allowed for anonymous objects.
1156          */
1157         if (IN_CAPABILITY_MODE(td) && (userpath != SHM_ANON))
1158                 return (ECAPMODE);
1159 #endif
1160
1161         AUDIT_ARG_FFLAGS(flags);
1162         AUDIT_ARG_MODE(mode);
1163
1164         if ((flags & O_ACCMODE) != O_RDONLY && (flags & O_ACCMODE) != O_RDWR)
1165                 return (EINVAL);
1166
1167         if ((flags & ~(O_ACCMODE | O_CREAT | O_EXCL | O_TRUNC | O_CLOEXEC)) != 0)
1168                 return (EINVAL);
1169
1170         largepage = (shmflags & SHM_LARGEPAGE) != 0;
1171         if (largepage && !PMAP_HAS_LARGEPAGES)
1172                 return (ENOTTY);
1173
1174         /*
1175          * Currently only F_SEAL_SEAL may be set when creating or opening shmfd.
1176          * If the decision is made later to allow additional seals, care must be
1177          * taken below to ensure that the seals are properly set if the shmfd
1178          * already existed -- this currently assumes that only F_SEAL_SEAL can
1179          * be set and doesn't take further precautions to ensure the validity of
1180          * the seals being added with respect to current mappings.
1181          */
1182         if ((initial_seals & ~F_SEAL_SEAL) != 0)
1183                 return (EINVAL);
1184
1185         pdp = td->td_proc->p_pd;
1186         cmode = (mode & ~pdp->pd_cmask) & ACCESSPERMS;
1187
1188         /*
1189          * shm_open(2) created shm should always have O_CLOEXEC set, as mandated
1190          * by POSIX.  We allow it to be unset here so that an in-kernel
1191          * interface may be written as a thin layer around shm, optionally not
1192          * setting CLOEXEC.  For shm_open(2), O_CLOEXEC is set unconditionally
1193          * in sys_shm_open() to keep this implementation compliant.
1194          */
1195         error = falloc_caps(td, &fp, &fd, flags & O_CLOEXEC, fcaps);
1196         if (error)
1197                 return (error);
1198
1199         /* A SHM_ANON path pointer creates an anonymous object. */
1200         if (userpath == SHM_ANON) {
1201                 /* A read-only anonymous object is pointless. */
1202                 if ((flags & O_ACCMODE) == O_RDONLY) {
1203                         fdclose(td, fp, fd);
1204                         fdrop(fp, td);
1205                         return (EINVAL);
1206                 }
1207                 shmfd = shm_alloc(td->td_ucred, cmode, largepage);
1208                 shmfd->shm_seals = initial_seals;
1209                 shmfd->shm_flags = shmflags;
1210         } else {
1211                 error = shm_copyin_path(td, userpath, &path);
1212                 if (error != 0) {
1213                         fdclose(td, fp, fd);
1214                         fdrop(fp, td);
1215                         return (error);
1216                 }
1217
1218                 AUDIT_ARG_UPATH1_CANON(path);
1219                 fnv = fnv_32_str(path, FNV1_32_INIT);
1220                 sx_xlock(&shm_dict_lock);
1221                 shmfd = shm_lookup(path, fnv);
1222                 if (shmfd == NULL) {
1223                         /* Object does not yet exist, create it if requested. */
1224                         if (flags & O_CREAT) {
1225 #ifdef MAC
1226                                 error = mac_posixshm_check_create(td->td_ucred,
1227                                     path);
1228                                 if (error == 0) {
1229 #endif
1230                                         shmfd = shm_alloc(td->td_ucred, cmode,
1231                                             largepage);
1232                                         shmfd->shm_seals = initial_seals;
1233                                         shmfd->shm_flags = shmflags;
1234                                         shm_insert(path, fnv, shmfd);
1235 #ifdef MAC
1236                                 }
1237 #endif
1238                         } else {
1239                                 free(path, M_SHMFD);
1240                                 error = ENOENT;
1241                         }
1242                 } else {
1243                         rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
1244                             &shmfd->shm_mtx);
1245
1246                         /*
1247                          * kern_shm_open() likely shouldn't ever error out on
1248                          * trying to set a seal that already exists, unlike
1249                          * F_ADD_SEALS.  This would break terribly as
1250                          * shm_open(2) actually sets F_SEAL_SEAL to maintain
1251                          * historical behavior where the underlying file could
1252                          * not be sealed.
1253                          */
1254                         initial_seals &= ~shmfd->shm_seals;
1255
1256                         /*
1257                          * Object already exists, obtain a new
1258                          * reference if requested and permitted.
1259                          */
1260                         free(path, M_SHMFD);
1261
1262                         /*
1263                          * initial_seals can't set additional seals if we've
1264                          * already been set F_SEAL_SEAL.  If F_SEAL_SEAL is set,
1265                          * then we've already removed that one from
1266                          * initial_seals.  This is currently redundant as we
1267                          * only allow setting F_SEAL_SEAL at creation time, but
1268                          * it's cheap to check and decreases the effort required
1269                          * to allow additional seals.
1270                          */
1271                         if ((shmfd->shm_seals & F_SEAL_SEAL) != 0 &&
1272                             initial_seals != 0)
1273                                 error = EPERM;
1274                         else if ((flags & (O_CREAT | O_EXCL)) ==
1275                             (O_CREAT | O_EXCL))
1276                                 error = EEXIST;
1277                         else if (shmflags != 0 && shmflags != shmfd->shm_flags)
1278                                 error = EINVAL;
1279                         else {
1280 #ifdef MAC
1281                                 error = mac_posixshm_check_open(td->td_ucred,
1282                                     shmfd, FFLAGS(flags & O_ACCMODE));
1283                                 if (error == 0)
1284 #endif
1285                                 error = shm_access(shmfd, td->td_ucred,
1286                                     FFLAGS(flags & O_ACCMODE));
1287                         }
1288
1289                         /*
1290                          * Truncate the file back to zero length if
1291                          * O_TRUNC was specified and the object was
1292                          * opened with read/write.
1293                          */
1294                         if (error == 0 &&
1295                             (flags & (O_ACCMODE | O_TRUNC)) ==
1296                             (O_RDWR | O_TRUNC)) {
1297                                 VM_OBJECT_WLOCK(shmfd->shm_object);
1298 #ifdef MAC
1299                                 error = mac_posixshm_check_truncate(
1300                                         td->td_ucred, fp->f_cred, shmfd);
1301                                 if (error == 0)
1302 #endif
1303                                         error = shm_dotruncate_locked(shmfd, 0,
1304                                             rl_cookie);
1305                                 VM_OBJECT_WUNLOCK(shmfd->shm_object);
1306                         }
1307                         if (error == 0) {
1308                                 /*
1309                                  * Currently we only allow F_SEAL_SEAL to be
1310                                  * set initially.  As noted above, this would
1311                                  * need to be reworked should that change.
1312                                  */
1313                                 shmfd->shm_seals |= initial_seals;
1314                                 shm_hold(shmfd);
1315                         }
1316                         rangelock_unlock(&shmfd->shm_rl, rl_cookie,
1317                             &shmfd->shm_mtx);
1318                 }
1319                 sx_xunlock(&shm_dict_lock);
1320
1321                 if (error) {
1322                         fdclose(td, fp, fd);
1323                         fdrop(fp, td);
1324                         return (error);
1325                 }
1326         }
1327
1328         finit(fp, FFLAGS(flags & O_ACCMODE), DTYPE_SHM, shmfd, &shm_ops);
1329
1330         td->td_retval[0] = fd;
1331         fdrop(fp, td);
1332
1333         return (0);
1334 }
1335
1336 /* System calls. */
1337 #ifdef COMPAT_FREEBSD12
1338 int
1339 freebsd12_shm_open(struct thread *td, struct freebsd12_shm_open_args *uap)
1340 {
1341
1342         return (kern_shm_open(td, uap->path, uap->flags | O_CLOEXEC,
1343             uap->mode, NULL));
1344 }
1345 #endif
1346
1347 int
1348 sys_shm_unlink(struct thread *td, struct shm_unlink_args *uap)
1349 {
1350         char *path;
1351         Fnv32_t fnv;
1352         int error;
1353
1354         error = shm_copyin_path(td, uap->path, &path);
1355         if (error != 0)
1356                 return (error);
1357
1358         AUDIT_ARG_UPATH1_CANON(path);
1359         fnv = fnv_32_str(path, FNV1_32_INIT);
1360         sx_xlock(&shm_dict_lock);
1361         error = shm_remove(path, fnv, td->td_ucred);
1362         sx_xunlock(&shm_dict_lock);
1363         free(path, M_SHMFD);
1364
1365         return (error);
1366 }
1367
1368 int
1369 sys_shm_rename(struct thread *td, struct shm_rename_args *uap)
1370 {
1371         char *path_from = NULL, *path_to = NULL;
1372         Fnv32_t fnv_from, fnv_to;
1373         struct shmfd *fd_from;
1374         struct shmfd *fd_to;
1375         int error;
1376         int flags;
1377
1378         flags = uap->flags;
1379         AUDIT_ARG_FFLAGS(flags);
1380
1381         /*
1382          * Make sure the user passed only valid flags.
1383          * If you add a new flag, please add a new term here.
1384          */
1385         if ((flags & ~(
1386             SHM_RENAME_NOREPLACE |
1387             SHM_RENAME_EXCHANGE
1388             )) != 0) {
1389                 error = EINVAL;
1390                 goto out;
1391         }
1392
1393         /*
1394          * EXCHANGE and NOREPLACE don't quite make sense together. Let's
1395          * force the user to choose one or the other.
1396          */
1397         if ((flags & SHM_RENAME_NOREPLACE) != 0 &&
1398             (flags & SHM_RENAME_EXCHANGE) != 0) {
1399                 error = EINVAL;
1400                 goto out;
1401         }
1402
1403         /* Renaming to or from anonymous makes no sense */
1404         if (uap->path_from == SHM_ANON || uap->path_to == SHM_ANON) {
1405                 error = EINVAL;
1406                 goto out;
1407         }
1408
1409         error = shm_copyin_path(td, uap->path_from, &path_from);
1410         if (error != 0)
1411                 goto out;
1412
1413         error = shm_copyin_path(td, uap->path_to, &path_to);
1414         if (error != 0)
1415                 goto out;
1416
1417         AUDIT_ARG_UPATH1_CANON(path_from);
1418         AUDIT_ARG_UPATH2_CANON(path_to);
1419
1420         /* Rename with from/to equal is a no-op */
1421         if (strcmp(path_from, path_to) == 0)
1422                 goto out;
1423
1424         fnv_from = fnv_32_str(path_from, FNV1_32_INIT);
1425         fnv_to = fnv_32_str(path_to, FNV1_32_INIT);
1426
1427         sx_xlock(&shm_dict_lock);
1428
1429         fd_from = shm_lookup(path_from, fnv_from);
1430         if (fd_from == NULL) {
1431                 error = ENOENT;
1432                 goto out_locked;
1433         }
1434
1435         fd_to = shm_lookup(path_to, fnv_to);
1436         if ((flags & SHM_RENAME_NOREPLACE) != 0 && fd_to != NULL) {
1437                 error = EEXIST;
1438                 goto out_locked;
1439         }
1440
1441         /*
1442          * Unconditionally prevents shm_remove from invalidating the 'from'
1443          * shm's state.
1444          */
1445         shm_hold(fd_from);
1446         error = shm_remove(path_from, fnv_from, td->td_ucred);
1447
1448         /*
1449          * One of my assumptions failed if ENOENT (e.g. locking didn't
1450          * protect us)
1451          */
1452         KASSERT(error != ENOENT, ("Our shm disappeared during shm_rename: %s",
1453             path_from));
1454         if (error != 0) {
1455                 shm_drop(fd_from);
1456                 goto out_locked;
1457         }
1458
1459         /*
1460          * If we are exchanging, we need to ensure the shm_remove below
1461          * doesn't invalidate the dest shm's state.
1462          */
1463         if ((flags & SHM_RENAME_EXCHANGE) != 0 && fd_to != NULL)
1464                 shm_hold(fd_to);
1465
1466         /*
1467          * NOTE: if path_to is not already in the hash, c'est la vie;
1468          * it simply means we have nothing already at path_to to unlink.
1469          * That is the ENOENT case.
1470          *
1471          * If we somehow don't have access to unlink this guy, but
1472          * did for the shm at path_from, then relink the shm to path_from
1473          * and abort with EACCES.
1474          *
1475          * All other errors: that is weird; let's relink and abort the
1476          * operation.
1477          */
1478         error = shm_remove(path_to, fnv_to, td->td_ucred);
1479         if (error != 0 && error != ENOENT) {
1480                 shm_insert(path_from, fnv_from, fd_from);
1481                 shm_drop(fd_from);
1482                 /* Don't free path_from now, since the hash references it */
1483                 path_from = NULL;
1484                 goto out_locked;
1485         }
1486
1487         error = 0;
1488
1489         shm_insert(path_to, fnv_to, fd_from);
1490
1491         /* Don't free path_to now, since the hash references it */
1492         path_to = NULL;
1493
1494         /* We kept a ref when we removed, and incremented again in insert */
1495         shm_drop(fd_from);
1496         KASSERT(fd_from->shm_refs > 0, ("Expected >0 refs; got: %d\n",
1497             fd_from->shm_refs));
1498
1499         if ((flags & SHM_RENAME_EXCHANGE) != 0 && fd_to != NULL) {
1500                 shm_insert(path_from, fnv_from, fd_to);
1501                 path_from = NULL;
1502                 shm_drop(fd_to);
1503                 KASSERT(fd_to->shm_refs > 0, ("Expected >0 refs; got: %d\n",
1504                     fd_to->shm_refs));
1505         }
1506
1507 out_locked:
1508         sx_xunlock(&shm_dict_lock);
1509
1510 out:
1511         free(path_from, M_SHMFD);
1512         free(path_to, M_SHMFD);
1513         return (error);
1514 }
1515
1516 static int
1517 shm_mmap_large(struct shmfd *shmfd, vm_map_t map, vm_offset_t *addr,
1518     vm_size_t size, vm_prot_t prot, vm_prot_t max_prot, int flags,
1519     vm_ooffset_t foff, struct thread *td)
1520 {
1521         struct vmspace *vms;
1522         vm_map_entry_t next_entry, prev_entry;
1523         vm_offset_t align, mask, maxaddr;
1524         int docow, error, rv, try;
1525         bool curmap;
1526
1527         if (shmfd->shm_lp_psind == 0)
1528                 return (EINVAL);
1529
1530         /* MAP_PRIVATE is disabled */
1531         if ((flags & ~(MAP_SHARED | MAP_FIXED | MAP_EXCL |
1532             MAP_NOCORE |
1533 #ifdef MAP_32BIT
1534             MAP_32BIT |
1535 #endif
1536             MAP_ALIGNMENT_MASK)) != 0)
1537                 return (EINVAL);
1538
1539         vms = td->td_proc->p_vmspace;
1540         curmap = map == &vms->vm_map;
1541         if (curmap) {
1542                 error = kern_mmap_racct_check(td, map, size);
1543                 if (error != 0)
1544                         return (error);
1545         }
1546
1547         docow = shmfd->shm_lp_psind << MAP_SPLIT_BOUNDARY_SHIFT;
1548         docow |= MAP_INHERIT_SHARE;
1549         if ((flags & MAP_NOCORE) != 0)
1550                 docow |= MAP_DISABLE_COREDUMP;
1551
1552         mask = pagesizes[shmfd->shm_lp_psind] - 1;
1553         if ((foff & mask) != 0)
1554                 return (EINVAL);
1555         maxaddr = vm_map_max(map);
1556 #ifdef MAP_32BIT
1557         if ((flags & MAP_32BIT) != 0 && maxaddr > MAP_32BIT_MAX_ADDR)
1558                 maxaddr = MAP_32BIT_MAX_ADDR;
1559 #endif
1560         if (size == 0 || (size & mask) != 0 ||
1561             (*addr != 0 && ((*addr & mask) != 0 ||
1562             *addr + size < *addr || *addr + size > maxaddr)))
1563                 return (EINVAL);
1564
1565         align = flags & MAP_ALIGNMENT_MASK;
1566         if (align == 0) {
1567                 align = pagesizes[shmfd->shm_lp_psind];
1568         } else if (align == MAP_ALIGNED_SUPER) {
1569                 if (shmfd->shm_lp_psind != 1)
1570                         return (EINVAL);
1571                 align = pagesizes[1];
1572         } else {
1573                 align >>= MAP_ALIGNMENT_SHIFT;
1574                 align = 1ULL << align;
1575                 /* Also handles overflow. */
1576                 if (align < pagesizes[shmfd->shm_lp_psind])
1577                         return (EINVAL);
1578         }
1579
1580         vm_map_lock(map);
1581         if ((flags & MAP_FIXED) == 0) {
1582                 try = 1;
1583                 if (curmap && (*addr == 0 ||
1584                     (*addr >= round_page((vm_offset_t)vms->vm_taddr) &&
1585                     *addr < round_page((vm_offset_t)vms->vm_daddr +
1586                     lim_max(td, RLIMIT_DATA))))) {
1587                         *addr = roundup2((vm_offset_t)vms->vm_daddr +
1588                             lim_max(td, RLIMIT_DATA),
1589                             pagesizes[shmfd->shm_lp_psind]);
1590                 }
1591 again:
1592                 rv = vm_map_find_aligned(map, addr, size, maxaddr, align);
1593                 if (rv != KERN_SUCCESS) {
1594                         if (try == 1) {
1595                                 try = 2;
1596                                 *addr = vm_map_min(map);
1597                                 if ((*addr & mask) != 0)
1598                                         *addr = (*addr + mask) & mask;
1599                                 goto again;
1600                         }
1601                         goto fail1;
1602                 }
1603         } else if ((flags & MAP_EXCL) == 0) {
1604                 rv = vm_map_delete(map, *addr, *addr + size);
1605                 if (rv != KERN_SUCCESS)
1606                         goto fail1;
1607         } else {
1608                 error = ENOSPC;
1609                 if (vm_map_lookup_entry(map, *addr, &prev_entry))
1610                         goto fail;
1611                 next_entry = vm_map_entry_succ(prev_entry);
1612                 if (next_entry->start < *addr + size)
1613                         goto fail;
1614         }
1615
1616         rv = vm_map_insert(map, shmfd->shm_object, foff, *addr, *addr + size,
1617             prot, max_prot, docow);
1618 fail1:
1619         error = vm_mmap_to_errno(rv);
1620 fail:
1621         vm_map_unlock(map);
1622         return (error);
1623 }
1624
1625 static int
1626 shm_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t objsize,
1627     vm_prot_t prot, vm_prot_t cap_maxprot, int flags,
1628     vm_ooffset_t foff, struct thread *td)
1629 {
1630         struct shmfd *shmfd;
1631         vm_prot_t maxprot;
1632         int error;
1633         bool writecnt;
1634         void *rl_cookie;
1635
1636         shmfd = fp->f_data;
1637         maxprot = VM_PROT_NONE;
1638
1639         rl_cookie = rangelock_rlock(&shmfd->shm_rl, 0, objsize,
1640             &shmfd->shm_mtx);
1641         /* FREAD should always be set. */
1642         if ((fp->f_flag & FREAD) != 0)
1643                 maxprot |= VM_PROT_EXECUTE | VM_PROT_READ;
1644
1645         /*
1646          * If FWRITE's set, we can allow VM_PROT_WRITE unless it's a shared
1647          * mapping with a write seal applied.  Private mappings are always
1648          * writeable.
1649          */
1650         if ((flags & MAP_SHARED) == 0) {
1651                 cap_maxprot |= VM_PROT_WRITE;
1652                 maxprot |= VM_PROT_WRITE;
1653                 writecnt = false;
1654         } else {
1655                 if ((fp->f_flag & FWRITE) != 0 &&
1656                     (shmfd->shm_seals & F_SEAL_WRITE) == 0)
1657                         maxprot |= VM_PROT_WRITE;
1658
1659                 /*
1660                  * Any mappings from a writable descriptor may be upgraded to
1661                  * VM_PROT_WRITE with mprotect(2), unless a write-seal was
1662                  * applied between the open and subsequent mmap(2).  We want to
1663                  * reject application of a write seal as long as any such
1664                  * mapping exists so that the seal cannot be trivially bypassed.
1665                  */
1666                 writecnt = (maxprot & VM_PROT_WRITE) != 0;
1667                 if (!writecnt && (prot & VM_PROT_WRITE) != 0) {
1668                         error = EACCES;
1669                         goto out;
1670                 }
1671         }
1672         maxprot &= cap_maxprot;
1673
1674         /* See comment in vn_mmap(). */
1675         if (
1676 #ifdef _LP64
1677             objsize > OFF_MAX ||
1678 #endif
1679             foff > OFF_MAX - objsize) {
1680                 error = EINVAL;
1681                 goto out;
1682         }
1683
1684 #ifdef MAC
1685         error = mac_posixshm_check_mmap(td->td_ucred, shmfd, prot, flags);
1686         if (error != 0)
1687                 goto out;
1688 #endif
1689
1690         mtx_lock(&shm_timestamp_lock);
1691         vfs_timestamp(&shmfd->shm_atime);
1692         mtx_unlock(&shm_timestamp_lock);
1693         vm_object_reference(shmfd->shm_object);
1694
1695         if (shm_largepage(shmfd)) {
1696                 writecnt = false;
1697                 error = shm_mmap_large(shmfd, map, addr, objsize, prot,
1698                     maxprot, flags, foff, td);
1699         } else {
1700                 if (writecnt) {
1701                         vm_pager_update_writecount(shmfd->shm_object, 0,
1702                             objsize);
1703                 }
1704                 error = vm_mmap_object(map, addr, objsize, prot, maxprot, flags,
1705                     shmfd->shm_object, foff, writecnt, td);
1706         }
1707         if (error != 0) {
1708                 if (writecnt)
1709                         vm_pager_release_writecount(shmfd->shm_object, 0,
1710                             objsize);
1711                 vm_object_deallocate(shmfd->shm_object);
1712         }
1713 out:
1714         rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
1715         return (error);
1716 }
1717
1718 static int
1719 shm_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
1720     struct thread *td)
1721 {
1722         struct shmfd *shmfd;
1723         int error;
1724
1725         error = 0;
1726         shmfd = fp->f_data;
1727         mtx_lock(&shm_timestamp_lock);
1728         /*
1729          * SUSv4 says that x bits of permission need not be affected.
1730          * Be consistent with our shm_open there.
1731          */
1732 #ifdef MAC
1733         error = mac_posixshm_check_setmode(active_cred, shmfd, mode);
1734         if (error != 0)
1735                 goto out;
1736 #endif
1737         error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid,
1738             VADMIN, active_cred);
1739         if (error != 0)
1740                 goto out;
1741         shmfd->shm_mode = mode & ACCESSPERMS;
1742 out:
1743         mtx_unlock(&shm_timestamp_lock);
1744         return (error);
1745 }
1746
1747 static int
1748 shm_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
1749     struct thread *td)
1750 {
1751         struct shmfd *shmfd;
1752         int error;
1753
1754         error = 0;
1755         shmfd = fp->f_data;
1756         mtx_lock(&shm_timestamp_lock);
1757 #ifdef MAC
1758         error = mac_posixshm_check_setowner(active_cred, shmfd, uid, gid);
1759         if (error != 0)
1760                 goto out;
1761 #endif
1762         if (uid == (uid_t)-1)
1763                 uid = shmfd->shm_uid;
1764         if (gid == (gid_t)-1)
1765                  gid = shmfd->shm_gid;
1766         if (((uid != shmfd->shm_uid && uid != active_cred->cr_uid) ||
1767             (gid != shmfd->shm_gid && !groupmember(gid, active_cred))) &&
1768             (error = priv_check_cred(active_cred, PRIV_VFS_CHOWN)))
1769                 goto out;
1770         shmfd->shm_uid = uid;
1771         shmfd->shm_gid = gid;
1772 out:
1773         mtx_unlock(&shm_timestamp_lock);
1774         return (error);
1775 }
1776
1777 /*
1778  * Helper routines to allow the backing object of a shared memory file
1779  * descriptor to be mapped in the kernel.
1780  */
1781 int
1782 shm_map(struct file *fp, size_t size, off_t offset, void **memp)
1783 {
1784         struct shmfd *shmfd;
1785         vm_offset_t kva, ofs;
1786         vm_object_t obj;
1787         int rv;
1788
1789         if (fp->f_type != DTYPE_SHM)
1790                 return (EINVAL);
1791         shmfd = fp->f_data;
1792         obj = shmfd->shm_object;
1793         VM_OBJECT_WLOCK(obj);
1794         /*
1795          * XXXRW: This validation is probably insufficient, and subject to
1796          * sign errors.  It should be fixed.
1797          */
1798         if (offset >= shmfd->shm_size ||
1799             offset + size > round_page(shmfd->shm_size)) {
1800                 VM_OBJECT_WUNLOCK(obj);
1801                 return (EINVAL);
1802         }
1803
1804         shmfd->shm_kmappings++;
1805         vm_object_reference_locked(obj);
1806         VM_OBJECT_WUNLOCK(obj);
1807
1808         /* Map the object into the kernel_map and wire it. */
1809         kva = vm_map_min(kernel_map);
1810         ofs = offset & PAGE_MASK;
1811         offset = trunc_page(offset);
1812         size = round_page(size + ofs);
1813         rv = vm_map_find(kernel_map, obj, offset, &kva, size, 0,
1814             VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE,
1815             VM_PROT_READ | VM_PROT_WRITE, 0);
1816         if (rv == KERN_SUCCESS) {
1817                 rv = vm_map_wire(kernel_map, kva, kva + size,
1818                     VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
1819                 if (rv == KERN_SUCCESS) {
1820                         *memp = (void *)(kva + ofs);
1821                         return (0);
1822                 }
1823                 vm_map_remove(kernel_map, kva, kva + size);
1824         } else
1825                 vm_object_deallocate(obj);
1826
1827         /* On failure, drop our mapping reference. */
1828         VM_OBJECT_WLOCK(obj);
1829         shmfd->shm_kmappings--;
1830         VM_OBJECT_WUNLOCK(obj);
1831
1832         return (vm_mmap_to_errno(rv));
1833 }
1834
1835 /*
1836  * We require the caller to unmap the entire entry.  This allows us to
1837  * safely decrement shm_kmappings when a mapping is removed.
1838  */
1839 int
1840 shm_unmap(struct file *fp, void *mem, size_t size)
1841 {
1842         struct shmfd *shmfd;
1843         vm_map_entry_t entry;
1844         vm_offset_t kva, ofs;
1845         vm_object_t obj;
1846         vm_pindex_t pindex;
1847         vm_prot_t prot;
1848         boolean_t wired;
1849         vm_map_t map;
1850         int rv;
1851
1852         if (fp->f_type != DTYPE_SHM)
1853                 return (EINVAL);
1854         shmfd = fp->f_data;
1855         kva = (vm_offset_t)mem;
1856         ofs = kva & PAGE_MASK;
1857         kva = trunc_page(kva);
1858         size = round_page(size + ofs);
1859         map = kernel_map;
1860         rv = vm_map_lookup(&map, kva, VM_PROT_READ | VM_PROT_WRITE, &entry,
1861             &obj, &pindex, &prot, &wired);
1862         if (rv != KERN_SUCCESS)
1863                 return (EINVAL);
1864         if (entry->start != kva || entry->end != kva + size) {
1865                 vm_map_lookup_done(map, entry);
1866                 return (EINVAL);
1867         }
1868         vm_map_lookup_done(map, entry);
1869         if (obj != shmfd->shm_object)
1870                 return (EINVAL);
1871         vm_map_remove(map, kva, kva + size);
1872         VM_OBJECT_WLOCK(obj);
1873         KASSERT(shmfd->shm_kmappings > 0, ("shm_unmap: object not mapped"));
1874         shmfd->shm_kmappings--;
1875         VM_OBJECT_WUNLOCK(obj);
1876         return (0);
1877 }
1878
1879 static int
1880 shm_fill_kinfo_locked(struct shmfd *shmfd, struct kinfo_file *kif, bool list)
1881 {
1882         const char *path, *pr_path;
1883         size_t pr_pathlen;
1884         bool visible;
1885
1886         sx_assert(&shm_dict_lock, SA_LOCKED);
1887         kif->kf_type = KF_TYPE_SHM;
1888         kif->kf_un.kf_file.kf_file_mode = S_IFREG | shmfd->shm_mode;
1889         kif->kf_un.kf_file.kf_file_size = shmfd->shm_size;
1890         if (shmfd->shm_path != NULL) {
1891                 if (shmfd->shm_path != NULL) {
1892                         path = shmfd->shm_path;
1893                         pr_path = curthread->td_ucred->cr_prison->pr_path;
1894                         if (strcmp(pr_path, "/") != 0) {
1895                                 /* Return the jail-rooted pathname. */
1896                                 pr_pathlen = strlen(pr_path);
1897                                 visible = strncmp(path, pr_path, pr_pathlen)
1898                                     == 0 && path[pr_pathlen] == '/';
1899                                 if (list && !visible)
1900                                         return (EPERM);
1901                                 if (visible)
1902                                         path += pr_pathlen;
1903                         }
1904                         strlcpy(kif->kf_path, path, sizeof(kif->kf_path));
1905                 }
1906         }
1907         return (0);
1908 }
1909
1910 static int
1911 shm_fill_kinfo(struct file *fp, struct kinfo_file *kif,
1912     struct filedesc *fdp __unused)
1913 {
1914         int res;
1915
1916         sx_slock(&shm_dict_lock);
1917         res = shm_fill_kinfo_locked(fp->f_data, kif, false);
1918         sx_sunlock(&shm_dict_lock);
1919         return (res);
1920 }
1921
1922 static int
1923 shm_add_seals(struct file *fp, int seals)
1924 {
1925         struct shmfd *shmfd;
1926         void *rl_cookie;
1927         vm_ooffset_t writemappings;
1928         int error, nseals;
1929
1930         error = 0;
1931         shmfd = fp->f_data;
1932         rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
1933             &shmfd->shm_mtx);
1934
1935         /* Even already-set seals should result in EPERM. */
1936         if ((shmfd->shm_seals & F_SEAL_SEAL) != 0) {
1937                 error = EPERM;
1938                 goto out;
1939         }
1940         nseals = seals & ~shmfd->shm_seals;
1941         if ((nseals & F_SEAL_WRITE) != 0) {
1942                 if (shm_largepage(shmfd)) {
1943                         error = ENOTSUP;
1944                         goto out;
1945                 }
1946
1947                 /*
1948                  * The rangelock above prevents writable mappings from being
1949                  * added after we've started applying seals.  The RLOCK here
1950                  * is to avoid torn reads on ILP32 arches as unmapping/reducing
1951                  * writemappings will be done without a rangelock.
1952                  */
1953                 VM_OBJECT_RLOCK(shmfd->shm_object);
1954                 writemappings = shmfd->shm_object->un_pager.swp.writemappings;
1955                 VM_OBJECT_RUNLOCK(shmfd->shm_object);
1956                 /* kmappings are also writable */
1957                 if (writemappings > 0) {
1958                         error = EBUSY;
1959                         goto out;
1960                 }
1961         }
1962         shmfd->shm_seals |= nseals;
1963 out:
1964         rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
1965         return (error);
1966 }
1967
1968 static int
1969 shm_get_seals(struct file *fp, int *seals)
1970 {
1971         struct shmfd *shmfd;
1972
1973         shmfd = fp->f_data;
1974         *seals = shmfd->shm_seals;
1975         return (0);
1976 }
1977
1978 static int
1979 shm_fallocate(struct file *fp, off_t offset, off_t len, struct thread *td)
1980 {
1981         void *rl_cookie;
1982         struct shmfd *shmfd;
1983         size_t size;
1984         int error;
1985
1986         /* This assumes that the caller already checked for overflow. */
1987         error = 0;
1988         shmfd = fp->f_data;
1989         size = offset + len;
1990
1991         /*
1992          * Just grab the rangelock for the range that we may be attempting to
1993          * grow, rather than blocking read/write for regions we won't be
1994          * touching while this (potential) resize is in progress.  Other
1995          * attempts to resize the shmfd will have to take a write lock from 0 to
1996          * OFF_MAX, so this being potentially beyond the current usable range of
1997          * the shmfd is not necessarily a concern.  If other mechanisms are
1998          * added to grow a shmfd, this may need to be re-evaluated.
1999          */
2000         rl_cookie = rangelock_wlock(&shmfd->shm_rl, offset, size,
2001             &shmfd->shm_mtx);
2002         if (size > shmfd->shm_size)
2003                 error = shm_dotruncate_cookie(shmfd, size, rl_cookie);
2004         rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
2005         /* Translate to posix_fallocate(2) return value as needed. */
2006         if (error == ENOMEM)
2007                 error = ENOSPC;
2008         return (error);
2009 }
2010
2011 static int
2012 sysctl_posix_shm_list(SYSCTL_HANDLER_ARGS)
2013 {
2014         struct shm_mapping *shmm;
2015         struct sbuf sb;
2016         struct kinfo_file kif;
2017         u_long i;
2018         ssize_t curlen;
2019         int error, error2;
2020
2021         sbuf_new_for_sysctl(&sb, NULL, sizeof(struct kinfo_file) * 5, req);
2022         sbuf_clear_flags(&sb, SBUF_INCLUDENUL);
2023         curlen = 0;
2024         error = 0;
2025         sx_slock(&shm_dict_lock);
2026         for (i = 0; i < shm_hash + 1; i++) {
2027                 LIST_FOREACH(shmm, &shm_dictionary[i], sm_link) {
2028                         error = shm_fill_kinfo_locked(shmm->sm_shmfd,
2029                             &kif, true);
2030                         if (error == EPERM) {
2031                                 error = 0;
2032                                 continue;
2033                         }
2034                         if (error != 0)
2035                                 break;
2036                         pack_kinfo(&kif);
2037                         error = sbuf_bcat(&sb, &kif, kif.kf_structsize) == 0 ?
2038                             0 : ENOMEM;
2039                         if (error != 0)
2040                                 break;
2041                         curlen += kif.kf_structsize;
2042                 }
2043         }
2044         sx_sunlock(&shm_dict_lock);
2045         error2 = sbuf_finish(&sb);
2046         sbuf_delete(&sb);
2047         return (error != 0 ? error : error2);
2048 }
2049
2050 SYSCTL_PROC(_kern_ipc, OID_AUTO, posix_shm_list,
2051     CTLFLAG_RD | CTLFLAG_PRISON | CTLFLAG_MPSAFE | CTLTYPE_OPAQUE,
2052     NULL, 0, sysctl_posix_shm_list, "",
2053     "POSIX SHM list");
2054
2055 int
2056 kern_shm_open(struct thread *td, const char *path, int flags, mode_t mode,
2057     struct filecaps *caps)
2058 {
2059
2060         return (kern_shm_open2(td, path, flags, mode, 0, caps, NULL));
2061 }
2062
2063 /*
2064  * This version of the shm_open() interface leaves CLOEXEC behavior up to the
2065  * caller, and libc will enforce it for the traditional shm_open() call.  This
2066  * allows other consumers, like memfd_create(), to opt-in for CLOEXEC.  This
2067  * interface also includes a 'name' argument that is currently unused, but could
2068  * potentially be exported later via some interface for debugging purposes.
2069  * From the kernel's perspective, it is optional.  Individual consumers like
2070  * memfd_create() may require it in order to be compatible with other systems
2071  * implementing the same function.
2072  */
2073 int
2074 sys_shm_open2(struct thread *td, struct shm_open2_args *uap)
2075 {
2076
2077         return (kern_shm_open2(td, uap->path, uap->flags, uap->mode,
2078             uap->shmflags, NULL, uap->name));
2079 }