]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/kern/uipc_shm.c
libfido2: update to 1.10.0
[FreeBSD/FreeBSD.git] / sys / kern / uipc_shm.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2006, 2011, 2016-2017 Robert N. M. Watson
5  * Copyright 2020 The FreeBSD Foundation
6  * All rights reserved.
7  *
8  * Portions of this software were developed by BAE Systems, the University of
9  * Cambridge Computer Laboratory, and Memorial University under DARPA/AFRL
10  * contract FA8650-15-C-7558 ("CADETS"), as part of the DARPA Transparent
11  * Computing (TC) research program.
12  *
13  * Portions of this software were developed by Konstantin Belousov
14  * under sponsorship from the FreeBSD Foundation.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37
38 /*
39  * Support for shared swap-backed anonymous memory objects via
40  * shm_open(2), shm_rename(2), and shm_unlink(2).
41  * While most of the implementation is here, vm_mmap.c contains
42  * mapping logic changes.
43  *
44  * posixshmcontrol(1) allows users to inspect the state of the memory
45  * objects.  Per-uid swap resource limit controls total amount of
46  * memory that user can consume for anonymous objects, including
47  * shared.
48  */
49
50 #include <sys/cdefs.h>
51 __FBSDID("$FreeBSD$");
52
53 #include "opt_capsicum.h"
54 #include "opt_ktrace.h"
55
56 #include <sys/param.h>
57 #include <sys/capsicum.h>
58 #include <sys/conf.h>
59 #include <sys/fcntl.h>
60 #include <sys/file.h>
61 #include <sys/filedesc.h>
62 #include <sys/filio.h>
63 #include <sys/fnv_hash.h>
64 #include <sys/kernel.h>
65 #include <sys/limits.h>
66 #include <sys/uio.h>
67 #include <sys/signal.h>
68 #include <sys/jail.h>
69 #include <sys/ktrace.h>
70 #include <sys/lock.h>
71 #include <sys/malloc.h>
72 #include <sys/mman.h>
73 #include <sys/mutex.h>
74 #include <sys/priv.h>
75 #include <sys/proc.h>
76 #include <sys/refcount.h>
77 #include <sys/resourcevar.h>
78 #include <sys/rwlock.h>
79 #include <sys/sbuf.h>
80 #include <sys/stat.h>
81 #include <sys/syscallsubr.h>
82 #include <sys/sysctl.h>
83 #include <sys/sysproto.h>
84 #include <sys/systm.h>
85 #include <sys/sx.h>
86 #include <sys/time.h>
87 #include <sys/vmmeter.h>
88 #include <sys/vnode.h>
89 #include <sys/unistd.h>
90 #include <sys/user.h>
91
92 #include <security/audit/audit.h>
93 #include <security/mac/mac_framework.h>
94
95 #include <vm/vm.h>
96 #include <vm/vm_param.h>
97 #include <vm/pmap.h>
98 #include <vm/vm_extern.h>
99 #include <vm/vm_map.h>
100 #include <vm/vm_kern.h>
101 #include <vm/vm_object.h>
102 #include <vm/vm_page.h>
103 #include <vm/vm_pageout.h>
104 #include <vm/vm_pager.h>
105 #include <vm/swap_pager.h>
106
107 struct shm_mapping {
108         char            *sm_path;
109         Fnv32_t         sm_fnv;
110         struct shmfd    *sm_shmfd;
111         LIST_ENTRY(shm_mapping) sm_link;
112 };
113
114 static MALLOC_DEFINE(M_SHMFD, "shmfd", "shared memory file descriptor");
115 static LIST_HEAD(, shm_mapping) *shm_dictionary;
116 static struct sx shm_dict_lock;
117 static struct mtx shm_timestamp_lock;
118 static u_long shm_hash;
119 static struct unrhdr64 shm_ino_unr;
120 static dev_t shm_dev_ino;
121
122 #define SHM_HASH(fnv)   (&shm_dictionary[(fnv) & shm_hash])
123
124 static void     shm_init(void *arg);
125 static void     shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd);
126 static struct shmfd *shm_lookup(char *path, Fnv32_t fnv);
127 static int      shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred);
128 static void     shm_doremove(struct shm_mapping *map);
129 static int      shm_dotruncate_cookie(struct shmfd *shmfd, off_t length,
130     void *rl_cookie);
131 static int      shm_dotruncate_locked(struct shmfd *shmfd, off_t length,
132     void *rl_cookie);
133 static int      shm_copyin_path(struct thread *td, const char *userpath_in,
134     char **path_out);
135 static int      shm_deallocate(struct shmfd *shmfd, off_t *offset,
136     off_t *length, int flags);
137
138 static fo_rdwr_t        shm_read;
139 static fo_rdwr_t        shm_write;
140 static fo_truncate_t    shm_truncate;
141 static fo_ioctl_t       shm_ioctl;
142 static fo_stat_t        shm_stat;
143 static fo_close_t       shm_close;
144 static fo_chmod_t       shm_chmod;
145 static fo_chown_t       shm_chown;
146 static fo_seek_t        shm_seek;
147 static fo_fill_kinfo_t  shm_fill_kinfo;
148 static fo_mmap_t        shm_mmap;
149 static fo_get_seals_t   shm_get_seals;
150 static fo_add_seals_t   shm_add_seals;
151 static fo_fallocate_t   shm_fallocate;
152 static fo_fspacectl_t   shm_fspacectl;
153
154 /* File descriptor operations. */
155 struct fileops shm_ops = {
156         .fo_read = shm_read,
157         .fo_write = shm_write,
158         .fo_truncate = shm_truncate,
159         .fo_ioctl = shm_ioctl,
160         .fo_poll = invfo_poll,
161         .fo_kqfilter = invfo_kqfilter,
162         .fo_stat = shm_stat,
163         .fo_close = shm_close,
164         .fo_chmod = shm_chmod,
165         .fo_chown = shm_chown,
166         .fo_sendfile = vn_sendfile,
167         .fo_seek = shm_seek,
168         .fo_fill_kinfo = shm_fill_kinfo,
169         .fo_mmap = shm_mmap,
170         .fo_get_seals = shm_get_seals,
171         .fo_add_seals = shm_add_seals,
172         .fo_fallocate = shm_fallocate,
173         .fo_fspacectl = shm_fspacectl,
174         .fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE,
175 };
176
177 FEATURE(posix_shm, "POSIX shared memory");
178
179 static SYSCTL_NODE(_vm, OID_AUTO, largepages, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
180     "");
181
182 static int largepage_reclaim_tries = 1;
183 SYSCTL_INT(_vm_largepages, OID_AUTO, reclaim_tries,
184     CTLFLAG_RWTUN, &largepage_reclaim_tries, 0,
185     "Number of contig reclaims before giving up for default alloc policy");
186
187 static int
188 uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio)
189 {
190         vm_page_t m;
191         vm_pindex_t idx;
192         size_t tlen;
193         int error, offset, rv;
194
195         idx = OFF_TO_IDX(uio->uio_offset);
196         offset = uio->uio_offset & PAGE_MASK;
197         tlen = MIN(PAGE_SIZE - offset, len);
198
199         rv = vm_page_grab_valid_unlocked(&m, obj, idx,
200             VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY | VM_ALLOC_NOCREAT);
201         if (rv == VM_PAGER_OK)
202                 goto found;
203
204         /*
205          * Read I/O without either a corresponding resident page or swap
206          * page: use zero_region.  This is intended to avoid instantiating
207          * pages on read from a sparse region.
208          */
209         VM_OBJECT_WLOCK(obj);
210         m = vm_page_lookup(obj, idx);
211         if (uio->uio_rw == UIO_READ && m == NULL &&
212             !vm_pager_has_page(obj, idx, NULL, NULL)) {
213                 VM_OBJECT_WUNLOCK(obj);
214                 return (uiomove(__DECONST(void *, zero_region), tlen, uio));
215         }
216
217         /*
218          * Although the tmpfs vnode lock is held here, it is
219          * nonetheless safe to sleep waiting for a free page.  The
220          * pageout daemon does not need to acquire the tmpfs vnode
221          * lock to page out tobj's pages because tobj is a OBJT_SWAP
222          * type object.
223          */
224         rv = vm_page_grab_valid(&m, obj, idx,
225             VM_ALLOC_NORMAL | VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY);
226         if (rv != VM_PAGER_OK) {
227                 VM_OBJECT_WUNLOCK(obj);
228                 if (bootverbose) {
229                         printf("uiomove_object: vm_obj %p idx %jd "
230                             "pager error %d\n", obj, idx, rv);
231                 }
232                 return (rv == VM_PAGER_AGAIN ? ENOSPC : EIO);
233         }
234         VM_OBJECT_WUNLOCK(obj);
235
236 found:
237         error = uiomove_fromphys(&m, offset, tlen, uio);
238         if (uio->uio_rw == UIO_WRITE && error == 0)
239                 vm_page_set_dirty(m);
240         vm_page_activate(m);
241         vm_page_sunbusy(m);
242
243         return (error);
244 }
245
246 int
247 uiomove_object(vm_object_t obj, off_t obj_size, struct uio *uio)
248 {
249         ssize_t resid;
250         size_t len;
251         int error;
252
253         error = 0;
254         while ((resid = uio->uio_resid) > 0) {
255                 if (obj_size <= uio->uio_offset)
256                         break;
257                 len = MIN(obj_size - uio->uio_offset, resid);
258                 if (len == 0)
259                         break;
260                 error = uiomove_object_page(obj, len, uio);
261                 if (error != 0 || resid == uio->uio_resid)
262                         break;
263         }
264         return (error);
265 }
266
267 static u_long count_largepages[MAXPAGESIZES];
268
269 static int
270 shm_largepage_phys_populate(vm_object_t object, vm_pindex_t pidx,
271     int fault_type, vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last)
272 {
273         vm_page_t m __diagused;
274         int psind;
275
276         psind = object->un_pager.phys.data_val;
277         if (psind == 0 || pidx >= object->size)
278                 return (VM_PAGER_FAIL);
279         *first = rounddown2(pidx, pagesizes[psind] / PAGE_SIZE);
280
281         /*
282          * We only busy the first page in the superpage run.  It is
283          * useless to busy whole run since we only remove full
284          * superpage, and it takes too long to busy e.g. 512 * 512 ==
285          * 262144 pages constituing 1G amd64 superage.
286          */
287         m = vm_page_grab(object, *first, VM_ALLOC_NORMAL | VM_ALLOC_NOCREAT);
288         MPASS(m != NULL);
289
290         *last = *first + atop(pagesizes[psind]) - 1;
291         return (VM_PAGER_OK);
292 }
293
294 static boolean_t
295 shm_largepage_phys_haspage(vm_object_t object, vm_pindex_t pindex,
296     int *before, int *after)
297 {
298         int psind;
299
300         psind = object->un_pager.phys.data_val;
301         if (psind == 0 || pindex >= object->size)
302                 return (FALSE);
303         if (before != NULL) {
304                 *before = pindex - rounddown2(pindex, pagesizes[psind] /
305                     PAGE_SIZE);
306         }
307         if (after != NULL) {
308                 *after = roundup2(pindex, pagesizes[psind] / PAGE_SIZE) -
309                     pindex;
310         }
311         return (TRUE);
312 }
313
314 static void
315 shm_largepage_phys_ctor(vm_object_t object, vm_prot_t prot,
316     vm_ooffset_t foff, struct ucred *cred)
317 {
318 }
319
320 static void
321 shm_largepage_phys_dtor(vm_object_t object)
322 {
323         int psind;
324
325         psind = object->un_pager.phys.data_val;
326         if (psind != 0) {
327                 atomic_subtract_long(&count_largepages[psind],
328                     object->size / (pagesizes[psind] / PAGE_SIZE));
329                 vm_wire_sub(object->size);
330         } else {
331                 KASSERT(object->size == 0,
332                     ("largepage phys obj %p not initialized bit size %#jx > 0",
333                     object, (uintmax_t)object->size));
334         }
335 }
336
337 static const struct phys_pager_ops shm_largepage_phys_ops = {
338         .phys_pg_populate =     shm_largepage_phys_populate,
339         .phys_pg_haspage =      shm_largepage_phys_haspage,
340         .phys_pg_ctor =         shm_largepage_phys_ctor,
341         .phys_pg_dtor =         shm_largepage_phys_dtor,
342 };
343
344 bool
345 shm_largepage(struct shmfd *shmfd)
346 {
347         return (shmfd->shm_object->type == OBJT_PHYS);
348 }
349
350 static void
351 shm_pager_freespace(vm_object_t obj, vm_pindex_t start, vm_size_t size)
352 {
353         struct shmfd *shm;
354         vm_size_t c;
355
356         swap_pager_freespace(obj, start, size, &c);
357         if (c == 0)
358                 return;
359
360         shm = obj->un_pager.swp.swp_priv;
361         if (shm == NULL)
362                 return;
363         KASSERT(shm->shm_pages >= c,
364             ("shm %p pages %jd free %jd", shm,
365             (uintmax_t)shm->shm_pages, (uintmax_t)c));
366         shm->shm_pages -= c;
367 }
368
369 static void
370 shm_page_inserted(vm_object_t obj, vm_page_t m)
371 {
372         struct shmfd *shm;
373
374         shm = obj->un_pager.swp.swp_priv;
375         if (shm == NULL)
376                 return;
377         if (!vm_pager_has_page(obj, m->pindex, NULL, NULL))
378                 shm->shm_pages += 1;
379 }
380
381 static void
382 shm_page_removed(vm_object_t obj, vm_page_t m)
383 {
384         struct shmfd *shm;
385
386         shm = obj->un_pager.swp.swp_priv;
387         if (shm == NULL)
388                 return;
389         if (!vm_pager_has_page(obj, m->pindex, NULL, NULL)) {
390                 KASSERT(shm->shm_pages >= 1,
391                     ("shm %p pages %jd free 1", shm,
392                     (uintmax_t)shm->shm_pages));
393                 shm->shm_pages -= 1;
394         }
395 }
396
397 static struct pagerops shm_swap_pager_ops = {
398         .pgo_kvme_type = KVME_TYPE_SWAP,
399         .pgo_freespace = shm_pager_freespace,
400         .pgo_page_inserted = shm_page_inserted,
401         .pgo_page_removed = shm_page_removed,
402 };
403 static int shmfd_pager_type = -1;
404
405 static int
406 shm_seek(struct file *fp, off_t offset, int whence, struct thread *td)
407 {
408         struct shmfd *shmfd;
409         off_t foffset;
410         int error;
411
412         shmfd = fp->f_data;
413         foffset = foffset_lock(fp, 0);
414         error = 0;
415         switch (whence) {
416         case L_INCR:
417                 if (foffset < 0 ||
418                     (offset > 0 && foffset > OFF_MAX - offset)) {
419                         error = EOVERFLOW;
420                         break;
421                 }
422                 offset += foffset;
423                 break;
424         case L_XTND:
425                 if (offset > 0 && shmfd->shm_size > OFF_MAX - offset) {
426                         error = EOVERFLOW;
427                         break;
428                 }
429                 offset += shmfd->shm_size;
430                 break;
431         case L_SET:
432                 break;
433         default:
434                 error = EINVAL;
435         }
436         if (error == 0) {
437                 if (offset < 0 || offset > shmfd->shm_size)
438                         error = EINVAL;
439                 else
440                         td->td_uretoff.tdu_off = offset;
441         }
442         foffset_unlock(fp, offset, error != 0 ? FOF_NOUPDATE : 0);
443         return (error);
444 }
445
446 static int
447 shm_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
448     int flags, struct thread *td)
449 {
450         struct shmfd *shmfd;
451         void *rl_cookie;
452         int error;
453
454         shmfd = fp->f_data;
455 #ifdef MAC
456         error = mac_posixshm_check_read(active_cred, fp->f_cred, shmfd);
457         if (error)
458                 return (error);
459 #endif
460         foffset_lock_uio(fp, uio, flags);
461         rl_cookie = rangelock_rlock(&shmfd->shm_rl, uio->uio_offset,
462             uio->uio_offset + uio->uio_resid, &shmfd->shm_mtx);
463         error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio);
464         rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
465         foffset_unlock_uio(fp, uio, flags);
466         return (error);
467 }
468
469 static int
470 shm_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
471     int flags, struct thread *td)
472 {
473         struct shmfd *shmfd;
474         void *rl_cookie;
475         int error;
476         off_t size;
477
478         shmfd = fp->f_data;
479 #ifdef MAC
480         error = mac_posixshm_check_write(active_cred, fp->f_cred, shmfd);
481         if (error)
482                 return (error);
483 #endif
484         if (shm_largepage(shmfd) && shmfd->shm_lp_psind == 0)
485                 return (EINVAL);
486         foffset_lock_uio(fp, uio, flags);
487         if (uio->uio_resid > OFF_MAX - uio->uio_offset) {
488                 /*
489                  * Overflow is only an error if we're supposed to expand on
490                  * write.  Otherwise, we'll just truncate the write to the
491                  * size of the file, which can only grow up to OFF_MAX.
492                  */
493                 if ((shmfd->shm_flags & SHM_GROW_ON_WRITE) != 0) {
494                         foffset_unlock_uio(fp, uio, flags);
495                         return (EFBIG);
496                 }
497
498                 size = shmfd->shm_size;
499         } else {
500                 size = uio->uio_offset + uio->uio_resid;
501         }
502         if ((flags & FOF_OFFSET) == 0) {
503                 rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
504                     &shmfd->shm_mtx);
505         } else {
506                 rl_cookie = rangelock_wlock(&shmfd->shm_rl, uio->uio_offset,
507                     size, &shmfd->shm_mtx);
508         }
509         if ((shmfd->shm_seals & F_SEAL_WRITE) != 0) {
510                 error = EPERM;
511         } else {
512                 error = 0;
513                 if ((shmfd->shm_flags & SHM_GROW_ON_WRITE) != 0 &&
514                     size > shmfd->shm_size) {
515                         error = shm_dotruncate_cookie(shmfd, size, rl_cookie);
516                 }
517                 if (error == 0)
518                         error = uiomove_object(shmfd->shm_object,
519                             shmfd->shm_size, uio);
520         }
521         rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
522         foffset_unlock_uio(fp, uio, flags);
523         return (error);
524 }
525
526 static int
527 shm_truncate(struct file *fp, off_t length, struct ucred *active_cred,
528     struct thread *td)
529 {
530         struct shmfd *shmfd;
531 #ifdef MAC
532         int error;
533 #endif
534
535         shmfd = fp->f_data;
536 #ifdef MAC
537         error = mac_posixshm_check_truncate(active_cred, fp->f_cred, shmfd);
538         if (error)
539                 return (error);
540 #endif
541         return (shm_dotruncate(shmfd, length));
542 }
543
544 int
545 shm_ioctl(struct file *fp, u_long com, void *data, struct ucred *active_cred,
546     struct thread *td)
547 {
548         struct shmfd *shmfd;
549         struct shm_largepage_conf *conf;
550         void *rl_cookie;
551
552         shmfd = fp->f_data;
553         switch (com) {
554         case FIONBIO:
555         case FIOASYNC:
556                 /*
557                  * Allow fcntl(fd, F_SETFL, O_NONBLOCK) to work,
558                  * just like it would on an unlinked regular file
559                  */
560                 return (0);
561         case FIOSSHMLPGCNF:
562                 if (!shm_largepage(shmfd))
563                         return (ENOTTY);
564                 conf = data;
565                 if (shmfd->shm_lp_psind != 0 &&
566                     conf->psind != shmfd->shm_lp_psind)
567                         return (EINVAL);
568                 if (conf->psind <= 0 || conf->psind >= MAXPAGESIZES ||
569                     pagesizes[conf->psind] == 0)
570                         return (EINVAL);
571                 if (conf->alloc_policy != SHM_LARGEPAGE_ALLOC_DEFAULT &&
572                     conf->alloc_policy != SHM_LARGEPAGE_ALLOC_NOWAIT &&
573                     conf->alloc_policy != SHM_LARGEPAGE_ALLOC_HARD)
574                         return (EINVAL);
575
576                 rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
577                     &shmfd->shm_mtx);
578                 shmfd->shm_lp_psind = conf->psind;
579                 shmfd->shm_lp_alloc_policy = conf->alloc_policy;
580                 shmfd->shm_object->un_pager.phys.data_val = conf->psind;
581                 rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
582                 return (0);
583         case FIOGSHMLPGCNF:
584                 if (!shm_largepage(shmfd))
585                         return (ENOTTY);
586                 conf = data;
587                 rl_cookie = rangelock_rlock(&shmfd->shm_rl, 0, OFF_MAX,
588                     &shmfd->shm_mtx);
589                 conf->psind = shmfd->shm_lp_psind;
590                 conf->alloc_policy = shmfd->shm_lp_alloc_policy;
591                 rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
592                 return (0);
593         default:
594                 return (ENOTTY);
595         }
596 }
597
598 static int
599 shm_stat(struct file *fp, struct stat *sb, struct ucred *active_cred)
600 {
601         struct shmfd *shmfd;
602 #ifdef MAC
603         int error;
604 #endif
605
606         shmfd = fp->f_data;
607
608 #ifdef MAC
609         error = mac_posixshm_check_stat(active_cred, fp->f_cred, shmfd);
610         if (error)
611                 return (error);
612 #endif
613
614         /*
615          * Attempt to return sanish values for fstat() on a memory file
616          * descriptor.
617          */
618         bzero(sb, sizeof(*sb));
619         sb->st_blksize = PAGE_SIZE;
620         sb->st_size = shmfd->shm_size;
621         mtx_lock(&shm_timestamp_lock);
622         sb->st_atim = shmfd->shm_atime;
623         sb->st_ctim = shmfd->shm_ctime;
624         sb->st_mtim = shmfd->shm_mtime;
625         sb->st_birthtim = shmfd->shm_birthtime;
626         sb->st_mode = S_IFREG | shmfd->shm_mode;                /* XXX */
627         sb->st_uid = shmfd->shm_uid;
628         sb->st_gid = shmfd->shm_gid;
629         mtx_unlock(&shm_timestamp_lock);
630         sb->st_dev = shm_dev_ino;
631         sb->st_ino = shmfd->shm_ino;
632         sb->st_nlink = shmfd->shm_object->ref_count;
633         if (shm_largepage(shmfd)) {
634                 sb->st_blocks = shmfd->shm_object->size /
635                     (pagesizes[shmfd->shm_lp_psind] >> PAGE_SHIFT);
636         } else {
637                 sb->st_blocks = shmfd->shm_pages;
638         }
639
640         return (0);
641 }
642
643 static int
644 shm_close(struct file *fp, struct thread *td)
645 {
646         struct shmfd *shmfd;
647
648         shmfd = fp->f_data;
649         fp->f_data = NULL;
650         shm_drop(shmfd);
651
652         return (0);
653 }
654
655 static int
656 shm_copyin_path(struct thread *td, const char *userpath_in, char **path_out) {
657         int error;
658         char *path;
659         const char *pr_path;
660         size_t pr_pathlen;
661
662         path = malloc(MAXPATHLEN, M_SHMFD, M_WAITOK);
663         pr_path = td->td_ucred->cr_prison->pr_path;
664
665         /* Construct a full pathname for jailed callers. */
666         pr_pathlen = strcmp(pr_path, "/") ==
667             0 ? 0 : strlcpy(path, pr_path, MAXPATHLEN);
668         error = copyinstr(userpath_in, path + pr_pathlen,
669             MAXPATHLEN - pr_pathlen, NULL);
670         if (error != 0)
671                 goto out;
672
673 #ifdef KTRACE
674         if (KTRPOINT(curthread, KTR_NAMEI))
675                 ktrnamei(path);
676 #endif
677
678         /* Require paths to start with a '/' character. */
679         if (path[pr_pathlen] != '/') {
680                 error = EINVAL;
681                 goto out;
682         }
683
684         *path_out = path;
685
686 out:
687         if (error != 0)
688                 free(path, M_SHMFD);
689
690         return (error);
691 }
692
693 static int
694 shm_partial_page_invalidate(vm_object_t object, vm_pindex_t idx, int base,
695     int end)
696 {
697         vm_page_t m;
698         int rv;
699
700         VM_OBJECT_ASSERT_WLOCKED(object);
701         KASSERT(base >= 0, ("%s: base %d", __func__, base));
702         KASSERT(end - base <= PAGE_SIZE, ("%s: base %d end %d", __func__, base,
703             end));
704
705 retry:
706         m = vm_page_grab(object, idx, VM_ALLOC_NOCREAT);
707         if (m != NULL) {
708                 MPASS(vm_page_all_valid(m));
709         } else if (vm_pager_has_page(object, idx, NULL, NULL)) {
710                 m = vm_page_alloc(object, idx,
711                     VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL);
712                 if (m == NULL)
713                         goto retry;
714                 vm_object_pip_add(object, 1);
715                 VM_OBJECT_WUNLOCK(object);
716                 rv = vm_pager_get_pages(object, &m, 1, NULL, NULL);
717                 VM_OBJECT_WLOCK(object);
718                 vm_object_pip_wakeup(object);
719                 if (rv == VM_PAGER_OK) {
720                         /*
721                          * Since the page was not resident, and therefore not
722                          * recently accessed, immediately enqueue it for
723                          * asynchronous laundering.  The current operation is
724                          * not regarded as an access.
725                          */
726                         vm_page_launder(m);
727                 } else {
728                         vm_page_free(m);
729                         VM_OBJECT_WUNLOCK(object);
730                         return (EIO);
731                 }
732         }
733         if (m != NULL) {
734                 pmap_zero_page_area(m, base, end - base);
735                 KASSERT(vm_page_all_valid(m), ("%s: page %p is invalid",
736                     __func__, m));
737                 vm_page_set_dirty(m);
738                 vm_page_xunbusy(m);
739         }
740
741         return (0);
742 }
743
744 static int
745 shm_dotruncate_locked(struct shmfd *shmfd, off_t length, void *rl_cookie)
746 {
747         vm_object_t object;
748         vm_pindex_t nobjsize;
749         vm_ooffset_t delta;
750         int base, error;
751
752         KASSERT(length >= 0, ("shm_dotruncate: length < 0"));
753         object = shmfd->shm_object;
754         VM_OBJECT_ASSERT_WLOCKED(object);
755         rangelock_cookie_assert(rl_cookie, RA_WLOCKED);
756         if (length == shmfd->shm_size)
757                 return (0);
758         nobjsize = OFF_TO_IDX(length + PAGE_MASK);
759
760         /* Are we shrinking?  If so, trim the end. */
761         if (length < shmfd->shm_size) {
762                 if ((shmfd->shm_seals & F_SEAL_SHRINK) != 0)
763                         return (EPERM);
764
765                 /*
766                  * Disallow any requests to shrink the size if this
767                  * object is mapped into the kernel.
768                  */
769                 if (shmfd->shm_kmappings > 0)
770                         return (EBUSY);
771
772                 /*
773                  * Zero the truncated part of the last page.
774                  */
775                 base = length & PAGE_MASK;
776                 if (base != 0) {
777                         error = shm_partial_page_invalidate(object,
778                             OFF_TO_IDX(length), base, PAGE_SIZE);
779                         if (error)
780                                 return (error);
781                 }
782                 delta = IDX_TO_OFF(object->size - nobjsize);
783
784                 if (nobjsize < object->size)
785                         vm_object_page_remove(object, nobjsize, object->size,
786                             0);
787
788                 /* Free the swap accounted for shm */
789                 swap_release_by_cred(delta, object->cred);
790                 object->charge -= delta;
791         } else {
792                 if ((shmfd->shm_seals & F_SEAL_GROW) != 0)
793                         return (EPERM);
794
795                 /* Try to reserve additional swap space. */
796                 delta = IDX_TO_OFF(nobjsize - object->size);
797                 if (!swap_reserve_by_cred(delta, object->cred))
798                         return (ENOMEM);
799                 object->charge += delta;
800         }
801         shmfd->shm_size = length;
802         mtx_lock(&shm_timestamp_lock);
803         vfs_timestamp(&shmfd->shm_ctime);
804         shmfd->shm_mtime = shmfd->shm_ctime;
805         mtx_unlock(&shm_timestamp_lock);
806         object->size = nobjsize;
807         return (0);
808 }
809
810 static int
811 shm_dotruncate_largepage(struct shmfd *shmfd, off_t length, void *rl_cookie)
812 {
813         vm_object_t object;
814         vm_page_t m;
815         vm_pindex_t newobjsz;
816         vm_pindex_t oldobjsz __unused;
817         int aflags, error, i, psind, try;
818
819         KASSERT(length >= 0, ("shm_dotruncate: length < 0"));
820         object = shmfd->shm_object;
821         VM_OBJECT_ASSERT_WLOCKED(object);
822         rangelock_cookie_assert(rl_cookie, RA_WLOCKED);
823
824         oldobjsz = object->size;
825         newobjsz = OFF_TO_IDX(length);
826         if (length == shmfd->shm_size)
827                 return (0);
828         psind = shmfd->shm_lp_psind;
829         if (psind == 0 && length != 0)
830                 return (EINVAL);
831         if ((length & (pagesizes[psind] - 1)) != 0)
832                 return (EINVAL);
833
834         if (length < shmfd->shm_size) {
835                 if ((shmfd->shm_seals & F_SEAL_SHRINK) != 0)
836                         return (EPERM);
837                 if (shmfd->shm_kmappings > 0)
838                         return (EBUSY);
839                 return (ENOTSUP);       /* Pages are unmanaged. */
840 #if 0
841                 vm_object_page_remove(object, newobjsz, oldobjsz, 0);
842                 object->size = newobjsz;
843                 shmfd->shm_size = length;
844                 return (0);
845 #endif
846         }
847
848         if ((shmfd->shm_seals & F_SEAL_GROW) != 0)
849                 return (EPERM);
850
851         aflags = VM_ALLOC_NORMAL | VM_ALLOC_ZERO;
852         if (shmfd->shm_lp_alloc_policy == SHM_LARGEPAGE_ALLOC_NOWAIT)
853                 aflags |= VM_ALLOC_WAITFAIL;
854         try = 0;
855
856         /*
857          * Extend shmfd and object, keeping all already fully
858          * allocated large pages intact even on error, because dropped
859          * object lock might allowed mapping of them.
860          */
861         while (object->size < newobjsz) {
862                 m = vm_page_alloc_contig(object, object->size, aflags,
863                     pagesizes[psind] / PAGE_SIZE, 0, ~0,
864                     pagesizes[psind], 0,
865                     VM_MEMATTR_DEFAULT);
866                 if (m == NULL) {
867                         VM_OBJECT_WUNLOCK(object);
868                         if (shmfd->shm_lp_alloc_policy ==
869                             SHM_LARGEPAGE_ALLOC_NOWAIT ||
870                             (shmfd->shm_lp_alloc_policy ==
871                             SHM_LARGEPAGE_ALLOC_DEFAULT &&
872                             try >= largepage_reclaim_tries)) {
873                                 VM_OBJECT_WLOCK(object);
874                                 return (ENOMEM);
875                         }
876                         error = vm_page_reclaim_contig(aflags,
877                             pagesizes[psind] / PAGE_SIZE, 0, ~0,
878                             pagesizes[psind], 0) ? 0 :
879                             vm_wait_intr(object);
880                         if (error != 0) {
881                                 VM_OBJECT_WLOCK(object);
882                                 return (error);
883                         }
884                         try++;
885                         VM_OBJECT_WLOCK(object);
886                         continue;
887                 }
888                 try = 0;
889                 for (i = 0; i < pagesizes[psind] / PAGE_SIZE; i++) {
890                         if ((m[i].flags & PG_ZERO) == 0)
891                                 pmap_zero_page(&m[i]);
892                         vm_page_valid(&m[i]);
893                         vm_page_xunbusy(&m[i]);
894                 }
895                 object->size += OFF_TO_IDX(pagesizes[psind]);
896                 shmfd->shm_size += pagesizes[psind];
897                 atomic_add_long(&count_largepages[psind], 1);
898                 vm_wire_add(atop(pagesizes[psind]));
899         }
900         return (0);
901 }
902
903 static int
904 shm_dotruncate_cookie(struct shmfd *shmfd, off_t length, void *rl_cookie)
905 {
906         int error;
907
908         VM_OBJECT_WLOCK(shmfd->shm_object);
909         error = shm_largepage(shmfd) ? shm_dotruncate_largepage(shmfd,
910             length, rl_cookie) : shm_dotruncate_locked(shmfd, length,
911             rl_cookie);
912         VM_OBJECT_WUNLOCK(shmfd->shm_object);
913         return (error);
914 }
915
916 int
917 shm_dotruncate(struct shmfd *shmfd, off_t length)
918 {
919         void *rl_cookie;
920         int error;
921
922         rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
923             &shmfd->shm_mtx);
924         error = shm_dotruncate_cookie(shmfd, length, rl_cookie);
925         rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
926         return (error);
927 }
928
929 /*
930  * shmfd object management including creation and reference counting
931  * routines.
932  */
933 struct shmfd *
934 shm_alloc(struct ucred *ucred, mode_t mode, bool largepage)
935 {
936         struct shmfd *shmfd;
937         vm_object_t obj;
938
939         shmfd = malloc(sizeof(*shmfd), M_SHMFD, M_WAITOK | M_ZERO);
940         shmfd->shm_size = 0;
941         shmfd->shm_uid = ucred->cr_uid;
942         shmfd->shm_gid = ucred->cr_gid;
943         shmfd->shm_mode = mode;
944         if (largepage) {
945                 shmfd->shm_object = phys_pager_allocate(NULL,
946                     &shm_largepage_phys_ops, NULL, shmfd->shm_size,
947                     VM_PROT_DEFAULT, 0, ucred);
948                 shmfd->shm_lp_alloc_policy = SHM_LARGEPAGE_ALLOC_DEFAULT;
949         } else {
950                 obj = vm_pager_allocate(shmfd_pager_type, NULL,
951                     shmfd->shm_size, VM_PROT_DEFAULT, 0, ucred);
952                 VM_OBJECT_WLOCK(obj);
953                 obj->un_pager.swp.swp_priv = shmfd;
954                 VM_OBJECT_WUNLOCK(obj);
955                 shmfd->shm_object = obj;
956         }
957         KASSERT(shmfd->shm_object != NULL, ("shm_create: vm_pager_allocate"));
958         vfs_timestamp(&shmfd->shm_birthtime);
959         shmfd->shm_atime = shmfd->shm_mtime = shmfd->shm_ctime =
960             shmfd->shm_birthtime;
961         shmfd->shm_ino = alloc_unr64(&shm_ino_unr);
962         refcount_init(&shmfd->shm_refs, 1);
963         mtx_init(&shmfd->shm_mtx, "shmrl", NULL, MTX_DEF);
964         rangelock_init(&shmfd->shm_rl);
965 #ifdef MAC
966         mac_posixshm_init(shmfd);
967         mac_posixshm_create(ucred, shmfd);
968 #endif
969
970         return (shmfd);
971 }
972
973 struct shmfd *
974 shm_hold(struct shmfd *shmfd)
975 {
976
977         refcount_acquire(&shmfd->shm_refs);
978         return (shmfd);
979 }
980
981 void
982 shm_drop(struct shmfd *shmfd)
983 {
984         vm_object_t obj;
985
986         if (refcount_release(&shmfd->shm_refs)) {
987 #ifdef MAC
988                 mac_posixshm_destroy(shmfd);
989 #endif
990                 rangelock_destroy(&shmfd->shm_rl);
991                 mtx_destroy(&shmfd->shm_mtx);
992                 obj = shmfd->shm_object;
993                 if (!shm_largepage(shmfd)) {
994                         VM_OBJECT_WLOCK(obj);
995                         obj->un_pager.swp.swp_priv = NULL;
996                         VM_OBJECT_WUNLOCK(obj);
997                 }
998                 vm_object_deallocate(obj);
999                 free(shmfd, M_SHMFD);
1000         }
1001 }
1002
1003 /*
1004  * Determine if the credentials have sufficient permissions for a
1005  * specified combination of FREAD and FWRITE.
1006  */
1007 int
1008 shm_access(struct shmfd *shmfd, struct ucred *ucred, int flags)
1009 {
1010         accmode_t accmode;
1011         int error;
1012
1013         accmode = 0;
1014         if (flags & FREAD)
1015                 accmode |= VREAD;
1016         if (flags & FWRITE)
1017                 accmode |= VWRITE;
1018         mtx_lock(&shm_timestamp_lock);
1019         error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid,
1020             accmode, ucred);
1021         mtx_unlock(&shm_timestamp_lock);
1022         return (error);
1023 }
1024
1025 static void
1026 shm_init(void *arg)
1027 {
1028         char name[32];
1029         int i;
1030
1031         mtx_init(&shm_timestamp_lock, "shm timestamps", NULL, MTX_DEF);
1032         sx_init(&shm_dict_lock, "shm dictionary");
1033         shm_dictionary = hashinit(1024, M_SHMFD, &shm_hash);
1034         new_unrhdr64(&shm_ino_unr, 1);
1035         shm_dev_ino = devfs_alloc_cdp_inode();
1036         KASSERT(shm_dev_ino > 0, ("shm dev inode not initialized"));
1037         shmfd_pager_type = vm_pager_alloc_dyn_type(&shm_swap_pager_ops,
1038             OBJT_SWAP);
1039         MPASS(shmfd_pager_type != -1);
1040
1041         for (i = 1; i < MAXPAGESIZES; i++) {
1042                 if (pagesizes[i] == 0)
1043                         break;
1044 #define M       (1024 * 1024)
1045 #define G       (1024 * M)
1046                 if (pagesizes[i] >= G)
1047                         snprintf(name, sizeof(name), "%luG", pagesizes[i] / G);
1048                 else if (pagesizes[i] >= M)
1049                         snprintf(name, sizeof(name), "%luM", pagesizes[i] / M);
1050                 else
1051                         snprintf(name, sizeof(name), "%lu", pagesizes[i]);
1052 #undef G
1053 #undef M
1054                 SYSCTL_ADD_ULONG(NULL, SYSCTL_STATIC_CHILDREN(_vm_largepages),
1055                     OID_AUTO, name, CTLFLAG_RD, &count_largepages[i],
1056                     "number of non-transient largepages allocated");
1057         }
1058 }
1059 SYSINIT(shm_init, SI_SUB_SYSV_SHM, SI_ORDER_ANY, shm_init, NULL);
1060
1061 /*
1062  * Remove all shared memory objects that belong to a prison.
1063  */
1064 void
1065 shm_remove_prison(struct prison *pr)
1066 {
1067         struct shm_mapping *shmm, *tshmm;
1068         u_long i;
1069
1070         sx_xlock(&shm_dict_lock);
1071         for (i = 0; i < shm_hash + 1; i++) {
1072                 LIST_FOREACH_SAFE(shmm, &shm_dictionary[i], sm_link, tshmm) {
1073                         if (shmm->sm_shmfd->shm_object->cred &&
1074                             shmm->sm_shmfd->shm_object->cred->cr_prison == pr)
1075                                 shm_doremove(shmm);
1076                 }
1077         }
1078         sx_xunlock(&shm_dict_lock);
1079 }
1080
1081 /*
1082  * Dictionary management.  We maintain an in-kernel dictionary to map
1083  * paths to shmfd objects.  We use the FNV hash on the path to store
1084  * the mappings in a hash table.
1085  */
1086 static struct shmfd *
1087 shm_lookup(char *path, Fnv32_t fnv)
1088 {
1089         struct shm_mapping *map;
1090
1091         LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
1092                 if (map->sm_fnv != fnv)
1093                         continue;
1094                 if (strcmp(map->sm_path, path) == 0)
1095                         return (map->sm_shmfd);
1096         }
1097
1098         return (NULL);
1099 }
1100
1101 static void
1102 shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd)
1103 {
1104         struct shm_mapping *map;
1105
1106         map = malloc(sizeof(struct shm_mapping), M_SHMFD, M_WAITOK);
1107         map->sm_path = path;
1108         map->sm_fnv = fnv;
1109         map->sm_shmfd = shm_hold(shmfd);
1110         shmfd->shm_path = path;
1111         LIST_INSERT_HEAD(SHM_HASH(fnv), map, sm_link);
1112 }
1113
1114 static int
1115 shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred)
1116 {
1117         struct shm_mapping *map;
1118         int error;
1119
1120         LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
1121                 if (map->sm_fnv != fnv)
1122                         continue;
1123                 if (strcmp(map->sm_path, path) == 0) {
1124 #ifdef MAC
1125                         error = mac_posixshm_check_unlink(ucred, map->sm_shmfd);
1126                         if (error)
1127                                 return (error);
1128 #endif
1129                         error = shm_access(map->sm_shmfd, ucred,
1130                             FREAD | FWRITE);
1131                         if (error)
1132                                 return (error);
1133                         shm_doremove(map);
1134                         return (0);
1135                 }
1136         }
1137
1138         return (ENOENT);
1139 }
1140
1141 static void
1142 shm_doremove(struct shm_mapping *map)
1143 {
1144         map->sm_shmfd->shm_path = NULL;
1145         LIST_REMOVE(map, sm_link);
1146         shm_drop(map->sm_shmfd);
1147         free(map->sm_path, M_SHMFD);
1148         free(map, M_SHMFD);
1149 }
1150
1151 int
1152 kern_shm_open2(struct thread *td, const char *userpath, int flags, mode_t mode,
1153     int shmflags, struct filecaps *fcaps, const char *name __unused)
1154 {
1155         struct pwddesc *pdp;
1156         struct shmfd *shmfd;
1157         struct file *fp;
1158         char *path;
1159         void *rl_cookie;
1160         Fnv32_t fnv;
1161         mode_t cmode;
1162         int error, fd, initial_seals;
1163         bool largepage;
1164
1165         if ((shmflags & ~(SHM_ALLOW_SEALING | SHM_GROW_ON_WRITE |
1166             SHM_LARGEPAGE)) != 0)
1167                 return (EINVAL);
1168
1169         initial_seals = F_SEAL_SEAL;
1170         if ((shmflags & SHM_ALLOW_SEALING) != 0)
1171                 initial_seals &= ~F_SEAL_SEAL;
1172
1173 #ifdef CAPABILITY_MODE
1174         /*
1175          * shm_open(2) is only allowed for anonymous objects.
1176          */
1177         if (IN_CAPABILITY_MODE(td) && (userpath != SHM_ANON))
1178                 return (ECAPMODE);
1179 #endif
1180
1181         AUDIT_ARG_FFLAGS(flags);
1182         AUDIT_ARG_MODE(mode);
1183
1184         if ((flags & O_ACCMODE) != O_RDONLY && (flags & O_ACCMODE) != O_RDWR)
1185                 return (EINVAL);
1186
1187         if ((flags & ~(O_ACCMODE | O_CREAT | O_EXCL | O_TRUNC | O_CLOEXEC)) != 0)
1188                 return (EINVAL);
1189
1190         largepage = (shmflags & SHM_LARGEPAGE) != 0;
1191         if (largepage && !PMAP_HAS_LARGEPAGES)
1192                 return (ENOTTY);
1193
1194         /*
1195          * Currently only F_SEAL_SEAL may be set when creating or opening shmfd.
1196          * If the decision is made later to allow additional seals, care must be
1197          * taken below to ensure that the seals are properly set if the shmfd
1198          * already existed -- this currently assumes that only F_SEAL_SEAL can
1199          * be set and doesn't take further precautions to ensure the validity of
1200          * the seals being added with respect to current mappings.
1201          */
1202         if ((initial_seals & ~F_SEAL_SEAL) != 0)
1203                 return (EINVAL);
1204
1205         pdp = td->td_proc->p_pd;
1206         cmode = (mode & ~pdp->pd_cmask) & ACCESSPERMS;
1207
1208         /*
1209          * shm_open(2) created shm should always have O_CLOEXEC set, as mandated
1210          * by POSIX.  We allow it to be unset here so that an in-kernel
1211          * interface may be written as a thin layer around shm, optionally not
1212          * setting CLOEXEC.  For shm_open(2), O_CLOEXEC is set unconditionally
1213          * in sys_shm_open() to keep this implementation compliant.
1214          */
1215         error = falloc_caps(td, &fp, &fd, flags & O_CLOEXEC, fcaps);
1216         if (error)
1217                 return (error);
1218
1219         /* A SHM_ANON path pointer creates an anonymous object. */
1220         if (userpath == SHM_ANON) {
1221                 /* A read-only anonymous object is pointless. */
1222                 if ((flags & O_ACCMODE) == O_RDONLY) {
1223                         fdclose(td, fp, fd);
1224                         fdrop(fp, td);
1225                         return (EINVAL);
1226                 }
1227                 shmfd = shm_alloc(td->td_ucred, cmode, largepage);
1228                 shmfd->shm_seals = initial_seals;
1229                 shmfd->shm_flags = shmflags;
1230         } else {
1231                 error = shm_copyin_path(td, userpath, &path);
1232                 if (error != 0) {
1233                         fdclose(td, fp, fd);
1234                         fdrop(fp, td);
1235                         return (error);
1236                 }
1237
1238                 AUDIT_ARG_UPATH1_CANON(path);
1239                 fnv = fnv_32_str(path, FNV1_32_INIT);
1240                 sx_xlock(&shm_dict_lock);
1241                 shmfd = shm_lookup(path, fnv);
1242                 if (shmfd == NULL) {
1243                         /* Object does not yet exist, create it if requested. */
1244                         if (flags & O_CREAT) {
1245 #ifdef MAC
1246                                 error = mac_posixshm_check_create(td->td_ucred,
1247                                     path);
1248                                 if (error == 0) {
1249 #endif
1250                                         shmfd = shm_alloc(td->td_ucred, cmode,
1251                                             largepage);
1252                                         shmfd->shm_seals = initial_seals;
1253                                         shmfd->shm_flags = shmflags;
1254                                         shm_insert(path, fnv, shmfd);
1255 #ifdef MAC
1256                                 }
1257 #endif
1258                         } else {
1259                                 free(path, M_SHMFD);
1260                                 error = ENOENT;
1261                         }
1262                 } else {
1263                         rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
1264                             &shmfd->shm_mtx);
1265
1266                         /*
1267                          * kern_shm_open() likely shouldn't ever error out on
1268                          * trying to set a seal that already exists, unlike
1269                          * F_ADD_SEALS.  This would break terribly as
1270                          * shm_open(2) actually sets F_SEAL_SEAL to maintain
1271                          * historical behavior where the underlying file could
1272                          * not be sealed.
1273                          */
1274                         initial_seals &= ~shmfd->shm_seals;
1275
1276                         /*
1277                          * Object already exists, obtain a new
1278                          * reference if requested and permitted.
1279                          */
1280                         free(path, M_SHMFD);
1281
1282                         /*
1283                          * initial_seals can't set additional seals if we've
1284                          * already been set F_SEAL_SEAL.  If F_SEAL_SEAL is set,
1285                          * then we've already removed that one from
1286                          * initial_seals.  This is currently redundant as we
1287                          * only allow setting F_SEAL_SEAL at creation time, but
1288                          * it's cheap to check and decreases the effort required
1289                          * to allow additional seals.
1290                          */
1291                         if ((shmfd->shm_seals & F_SEAL_SEAL) != 0 &&
1292                             initial_seals != 0)
1293                                 error = EPERM;
1294                         else if ((flags & (O_CREAT | O_EXCL)) ==
1295                             (O_CREAT | O_EXCL))
1296                                 error = EEXIST;
1297                         else if (shmflags != 0 && shmflags != shmfd->shm_flags)
1298                                 error = EINVAL;
1299                         else {
1300 #ifdef MAC
1301                                 error = mac_posixshm_check_open(td->td_ucred,
1302                                     shmfd, FFLAGS(flags & O_ACCMODE));
1303                                 if (error == 0)
1304 #endif
1305                                 error = shm_access(shmfd, td->td_ucred,
1306                                     FFLAGS(flags & O_ACCMODE));
1307                         }
1308
1309                         /*
1310                          * Truncate the file back to zero length if
1311                          * O_TRUNC was specified and the object was
1312                          * opened with read/write.
1313                          */
1314                         if (error == 0 &&
1315                             (flags & (O_ACCMODE | O_TRUNC)) ==
1316                             (O_RDWR | O_TRUNC)) {
1317                                 VM_OBJECT_WLOCK(shmfd->shm_object);
1318 #ifdef MAC
1319                                 error = mac_posixshm_check_truncate(
1320                                         td->td_ucred, fp->f_cred, shmfd);
1321                                 if (error == 0)
1322 #endif
1323                                         error = shm_dotruncate_locked(shmfd, 0,
1324                                             rl_cookie);
1325                                 VM_OBJECT_WUNLOCK(shmfd->shm_object);
1326                         }
1327                         if (error == 0) {
1328                                 /*
1329                                  * Currently we only allow F_SEAL_SEAL to be
1330                                  * set initially.  As noted above, this would
1331                                  * need to be reworked should that change.
1332                                  */
1333                                 shmfd->shm_seals |= initial_seals;
1334                                 shm_hold(shmfd);
1335                         }
1336                         rangelock_unlock(&shmfd->shm_rl, rl_cookie,
1337                             &shmfd->shm_mtx);
1338                 }
1339                 sx_xunlock(&shm_dict_lock);
1340
1341                 if (error) {
1342                         fdclose(td, fp, fd);
1343                         fdrop(fp, td);
1344                         return (error);
1345                 }
1346         }
1347
1348         finit(fp, FFLAGS(flags & O_ACCMODE), DTYPE_SHM, shmfd, &shm_ops);
1349
1350         td->td_retval[0] = fd;
1351         fdrop(fp, td);
1352
1353         return (0);
1354 }
1355
1356 /* System calls. */
1357 #ifdef COMPAT_FREEBSD12
1358 int
1359 freebsd12_shm_open(struct thread *td, struct freebsd12_shm_open_args *uap)
1360 {
1361
1362         return (kern_shm_open(td, uap->path, uap->flags | O_CLOEXEC,
1363             uap->mode, NULL));
1364 }
1365 #endif
1366
1367 int
1368 sys_shm_unlink(struct thread *td, struct shm_unlink_args *uap)
1369 {
1370         char *path;
1371         Fnv32_t fnv;
1372         int error;
1373
1374         error = shm_copyin_path(td, uap->path, &path);
1375         if (error != 0)
1376                 return (error);
1377
1378         AUDIT_ARG_UPATH1_CANON(path);
1379         fnv = fnv_32_str(path, FNV1_32_INIT);
1380         sx_xlock(&shm_dict_lock);
1381         error = shm_remove(path, fnv, td->td_ucred);
1382         sx_xunlock(&shm_dict_lock);
1383         free(path, M_SHMFD);
1384
1385         return (error);
1386 }
1387
1388 int
1389 sys_shm_rename(struct thread *td, struct shm_rename_args *uap)
1390 {
1391         char *path_from = NULL, *path_to = NULL;
1392         Fnv32_t fnv_from, fnv_to;
1393         struct shmfd *fd_from;
1394         struct shmfd *fd_to;
1395         int error;
1396         int flags;
1397
1398         flags = uap->flags;
1399         AUDIT_ARG_FFLAGS(flags);
1400
1401         /*
1402          * Make sure the user passed only valid flags.
1403          * If you add a new flag, please add a new term here.
1404          */
1405         if ((flags & ~(
1406             SHM_RENAME_NOREPLACE |
1407             SHM_RENAME_EXCHANGE
1408             )) != 0) {
1409                 error = EINVAL;
1410                 goto out;
1411         }
1412
1413         /*
1414          * EXCHANGE and NOREPLACE don't quite make sense together. Let's
1415          * force the user to choose one or the other.
1416          */
1417         if ((flags & SHM_RENAME_NOREPLACE) != 0 &&
1418             (flags & SHM_RENAME_EXCHANGE) != 0) {
1419                 error = EINVAL;
1420                 goto out;
1421         }
1422
1423         /* Renaming to or from anonymous makes no sense */
1424         if (uap->path_from == SHM_ANON || uap->path_to == SHM_ANON) {
1425                 error = EINVAL;
1426                 goto out;
1427         }
1428
1429         error = shm_copyin_path(td, uap->path_from, &path_from);
1430         if (error != 0)
1431                 goto out;
1432
1433         error = shm_copyin_path(td, uap->path_to, &path_to);
1434         if (error != 0)
1435                 goto out;
1436
1437         AUDIT_ARG_UPATH1_CANON(path_from);
1438         AUDIT_ARG_UPATH2_CANON(path_to);
1439
1440         /* Rename with from/to equal is a no-op */
1441         if (strcmp(path_from, path_to) == 0)
1442                 goto out;
1443
1444         fnv_from = fnv_32_str(path_from, FNV1_32_INIT);
1445         fnv_to = fnv_32_str(path_to, FNV1_32_INIT);
1446
1447         sx_xlock(&shm_dict_lock);
1448
1449         fd_from = shm_lookup(path_from, fnv_from);
1450         if (fd_from == NULL) {
1451                 error = ENOENT;
1452                 goto out_locked;
1453         }
1454
1455         fd_to = shm_lookup(path_to, fnv_to);
1456         if ((flags & SHM_RENAME_NOREPLACE) != 0 && fd_to != NULL) {
1457                 error = EEXIST;
1458                 goto out_locked;
1459         }
1460
1461         /*
1462          * Unconditionally prevents shm_remove from invalidating the 'from'
1463          * shm's state.
1464          */
1465         shm_hold(fd_from);
1466         error = shm_remove(path_from, fnv_from, td->td_ucred);
1467
1468         /*
1469          * One of my assumptions failed if ENOENT (e.g. locking didn't
1470          * protect us)
1471          */
1472         KASSERT(error != ENOENT, ("Our shm disappeared during shm_rename: %s",
1473             path_from));
1474         if (error != 0) {
1475                 shm_drop(fd_from);
1476                 goto out_locked;
1477         }
1478
1479         /*
1480          * If we are exchanging, we need to ensure the shm_remove below
1481          * doesn't invalidate the dest shm's state.
1482          */
1483         if ((flags & SHM_RENAME_EXCHANGE) != 0 && fd_to != NULL)
1484                 shm_hold(fd_to);
1485
1486         /*
1487          * NOTE: if path_to is not already in the hash, c'est la vie;
1488          * it simply means we have nothing already at path_to to unlink.
1489          * That is the ENOENT case.
1490          *
1491          * If we somehow don't have access to unlink this guy, but
1492          * did for the shm at path_from, then relink the shm to path_from
1493          * and abort with EACCES.
1494          *
1495          * All other errors: that is weird; let's relink and abort the
1496          * operation.
1497          */
1498         error = shm_remove(path_to, fnv_to, td->td_ucred);
1499         if (error != 0 && error != ENOENT) {
1500                 shm_insert(path_from, fnv_from, fd_from);
1501                 shm_drop(fd_from);
1502                 /* Don't free path_from now, since the hash references it */
1503                 path_from = NULL;
1504                 goto out_locked;
1505         }
1506
1507         error = 0;
1508
1509         shm_insert(path_to, fnv_to, fd_from);
1510
1511         /* Don't free path_to now, since the hash references it */
1512         path_to = NULL;
1513
1514         /* We kept a ref when we removed, and incremented again in insert */
1515         shm_drop(fd_from);
1516         KASSERT(fd_from->shm_refs > 0, ("Expected >0 refs; got: %d\n",
1517             fd_from->shm_refs));
1518
1519         if ((flags & SHM_RENAME_EXCHANGE) != 0 && fd_to != NULL) {
1520                 shm_insert(path_from, fnv_from, fd_to);
1521                 path_from = NULL;
1522                 shm_drop(fd_to);
1523                 KASSERT(fd_to->shm_refs > 0, ("Expected >0 refs; got: %d\n",
1524                     fd_to->shm_refs));
1525         }
1526
1527 out_locked:
1528         sx_xunlock(&shm_dict_lock);
1529
1530 out:
1531         free(path_from, M_SHMFD);
1532         free(path_to, M_SHMFD);
1533         return (error);
1534 }
1535
1536 static int
1537 shm_mmap_large(struct shmfd *shmfd, vm_map_t map, vm_offset_t *addr,
1538     vm_size_t size, vm_prot_t prot, vm_prot_t max_prot, int flags,
1539     vm_ooffset_t foff, struct thread *td)
1540 {
1541         struct vmspace *vms;
1542         vm_map_entry_t next_entry, prev_entry;
1543         vm_offset_t align, mask, maxaddr;
1544         int docow, error, rv, try;
1545         bool curmap;
1546
1547         if (shmfd->shm_lp_psind == 0)
1548                 return (EINVAL);
1549
1550         /* MAP_PRIVATE is disabled */
1551         if ((flags & ~(MAP_SHARED | MAP_FIXED | MAP_EXCL |
1552             MAP_NOCORE |
1553 #ifdef MAP_32BIT
1554             MAP_32BIT |
1555 #endif
1556             MAP_ALIGNMENT_MASK)) != 0)
1557                 return (EINVAL);
1558
1559         vms = td->td_proc->p_vmspace;
1560         curmap = map == &vms->vm_map;
1561         if (curmap) {
1562                 error = kern_mmap_racct_check(td, map, size);
1563                 if (error != 0)
1564                         return (error);
1565         }
1566
1567         docow = shmfd->shm_lp_psind << MAP_SPLIT_BOUNDARY_SHIFT;
1568         docow |= MAP_INHERIT_SHARE;
1569         if ((flags & MAP_NOCORE) != 0)
1570                 docow |= MAP_DISABLE_COREDUMP;
1571
1572         mask = pagesizes[shmfd->shm_lp_psind] - 1;
1573         if ((foff & mask) != 0)
1574                 return (EINVAL);
1575         maxaddr = vm_map_max(map);
1576 #ifdef MAP_32BIT
1577         if ((flags & MAP_32BIT) != 0 && maxaddr > MAP_32BIT_MAX_ADDR)
1578                 maxaddr = MAP_32BIT_MAX_ADDR;
1579 #endif
1580         if (size == 0 || (size & mask) != 0 ||
1581             (*addr != 0 && ((*addr & mask) != 0 ||
1582             *addr + size < *addr || *addr + size > maxaddr)))
1583                 return (EINVAL);
1584
1585         align = flags & MAP_ALIGNMENT_MASK;
1586         if (align == 0) {
1587                 align = pagesizes[shmfd->shm_lp_psind];
1588         } else if (align == MAP_ALIGNED_SUPER) {
1589                 if (shmfd->shm_lp_psind != 1)
1590                         return (EINVAL);
1591                 align = pagesizes[1];
1592         } else {
1593                 align >>= MAP_ALIGNMENT_SHIFT;
1594                 align = 1ULL << align;
1595                 /* Also handles overflow. */
1596                 if (align < pagesizes[shmfd->shm_lp_psind])
1597                         return (EINVAL);
1598         }
1599
1600         vm_map_lock(map);
1601         if ((flags & MAP_FIXED) == 0) {
1602                 try = 1;
1603                 if (curmap && (*addr == 0 ||
1604                     (*addr >= round_page((vm_offset_t)vms->vm_taddr) &&
1605                     *addr < round_page((vm_offset_t)vms->vm_daddr +
1606                     lim_max(td, RLIMIT_DATA))))) {
1607                         *addr = roundup2((vm_offset_t)vms->vm_daddr +
1608                             lim_max(td, RLIMIT_DATA),
1609                             pagesizes[shmfd->shm_lp_psind]);
1610                 }
1611 again:
1612                 rv = vm_map_find_aligned(map, addr, size, maxaddr, align);
1613                 if (rv != KERN_SUCCESS) {
1614                         if (try == 1) {
1615                                 try = 2;
1616                                 *addr = vm_map_min(map);
1617                                 if ((*addr & mask) != 0)
1618                                         *addr = (*addr + mask) & mask;
1619                                 goto again;
1620                         }
1621                         goto fail1;
1622                 }
1623         } else if ((flags & MAP_EXCL) == 0) {
1624                 rv = vm_map_delete(map, *addr, *addr + size);
1625                 if (rv != KERN_SUCCESS)
1626                         goto fail1;
1627         } else {
1628                 error = ENOSPC;
1629                 if (vm_map_lookup_entry(map, *addr, &prev_entry))
1630                         goto fail;
1631                 next_entry = vm_map_entry_succ(prev_entry);
1632                 if (next_entry->start < *addr + size)
1633                         goto fail;
1634         }
1635
1636         rv = vm_map_insert(map, shmfd->shm_object, foff, *addr, *addr + size,
1637             prot, max_prot, docow);
1638 fail1:
1639         error = vm_mmap_to_errno(rv);
1640 fail:
1641         vm_map_unlock(map);
1642         return (error);
1643 }
1644
1645 static int
1646 shm_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t objsize,
1647     vm_prot_t prot, vm_prot_t cap_maxprot, int flags,
1648     vm_ooffset_t foff, struct thread *td)
1649 {
1650         struct shmfd *shmfd;
1651         vm_prot_t maxprot;
1652         int error;
1653         bool writecnt;
1654         void *rl_cookie;
1655
1656         shmfd = fp->f_data;
1657         maxprot = VM_PROT_NONE;
1658
1659         rl_cookie = rangelock_rlock(&shmfd->shm_rl, 0, objsize,
1660             &shmfd->shm_mtx);
1661         /* FREAD should always be set. */
1662         if ((fp->f_flag & FREAD) != 0)
1663                 maxprot |= VM_PROT_EXECUTE | VM_PROT_READ;
1664
1665         /*
1666          * If FWRITE's set, we can allow VM_PROT_WRITE unless it's a shared
1667          * mapping with a write seal applied.  Private mappings are always
1668          * writeable.
1669          */
1670         if ((flags & MAP_SHARED) == 0) {
1671                 cap_maxprot |= VM_PROT_WRITE;
1672                 maxprot |= VM_PROT_WRITE;
1673                 writecnt = false;
1674         } else {
1675                 if ((fp->f_flag & FWRITE) != 0 &&
1676                     (shmfd->shm_seals & F_SEAL_WRITE) == 0)
1677                         maxprot |= VM_PROT_WRITE;
1678
1679                 /*
1680                  * Any mappings from a writable descriptor may be upgraded to
1681                  * VM_PROT_WRITE with mprotect(2), unless a write-seal was
1682                  * applied between the open and subsequent mmap(2).  We want to
1683                  * reject application of a write seal as long as any such
1684                  * mapping exists so that the seal cannot be trivially bypassed.
1685                  */
1686                 writecnt = (maxprot & VM_PROT_WRITE) != 0;
1687                 if (!writecnt && (prot & VM_PROT_WRITE) != 0) {
1688                         error = EACCES;
1689                         goto out;
1690                 }
1691         }
1692         maxprot &= cap_maxprot;
1693
1694         /* See comment in vn_mmap(). */
1695         if (
1696 #ifdef _LP64
1697             objsize > OFF_MAX ||
1698 #endif
1699             foff > OFF_MAX - objsize) {
1700                 error = EINVAL;
1701                 goto out;
1702         }
1703
1704 #ifdef MAC
1705         error = mac_posixshm_check_mmap(td->td_ucred, shmfd, prot, flags);
1706         if (error != 0)
1707                 goto out;
1708 #endif
1709
1710         mtx_lock(&shm_timestamp_lock);
1711         vfs_timestamp(&shmfd->shm_atime);
1712         mtx_unlock(&shm_timestamp_lock);
1713         vm_object_reference(shmfd->shm_object);
1714
1715         if (shm_largepage(shmfd)) {
1716                 writecnt = false;
1717                 error = shm_mmap_large(shmfd, map, addr, objsize, prot,
1718                     maxprot, flags, foff, td);
1719         } else {
1720                 if (writecnt) {
1721                         vm_pager_update_writecount(shmfd->shm_object, 0,
1722                             objsize);
1723                 }
1724                 error = vm_mmap_object(map, addr, objsize, prot, maxprot, flags,
1725                     shmfd->shm_object, foff, writecnt, td);
1726         }
1727         if (error != 0) {
1728                 if (writecnt)
1729                         vm_pager_release_writecount(shmfd->shm_object, 0,
1730                             objsize);
1731                 vm_object_deallocate(shmfd->shm_object);
1732         }
1733 out:
1734         rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
1735         return (error);
1736 }
1737
1738 static int
1739 shm_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
1740     struct thread *td)
1741 {
1742         struct shmfd *shmfd;
1743         int error;
1744
1745         error = 0;
1746         shmfd = fp->f_data;
1747         mtx_lock(&shm_timestamp_lock);
1748         /*
1749          * SUSv4 says that x bits of permission need not be affected.
1750          * Be consistent with our shm_open there.
1751          */
1752 #ifdef MAC
1753         error = mac_posixshm_check_setmode(active_cred, shmfd, mode);
1754         if (error != 0)
1755                 goto out;
1756 #endif
1757         error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid,
1758             VADMIN, active_cred);
1759         if (error != 0)
1760                 goto out;
1761         shmfd->shm_mode = mode & ACCESSPERMS;
1762 out:
1763         mtx_unlock(&shm_timestamp_lock);
1764         return (error);
1765 }
1766
1767 static int
1768 shm_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
1769     struct thread *td)
1770 {
1771         struct shmfd *shmfd;
1772         int error;
1773
1774         error = 0;
1775         shmfd = fp->f_data;
1776         mtx_lock(&shm_timestamp_lock);
1777 #ifdef MAC
1778         error = mac_posixshm_check_setowner(active_cred, shmfd, uid, gid);
1779         if (error != 0)
1780                 goto out;
1781 #endif
1782         if (uid == (uid_t)-1)
1783                 uid = shmfd->shm_uid;
1784         if (gid == (gid_t)-1)
1785                  gid = shmfd->shm_gid;
1786         if (((uid != shmfd->shm_uid && uid != active_cred->cr_uid) ||
1787             (gid != shmfd->shm_gid && !groupmember(gid, active_cred))) &&
1788             (error = priv_check_cred(active_cred, PRIV_VFS_CHOWN)))
1789                 goto out;
1790         shmfd->shm_uid = uid;
1791         shmfd->shm_gid = gid;
1792 out:
1793         mtx_unlock(&shm_timestamp_lock);
1794         return (error);
1795 }
1796
1797 /*
1798  * Helper routines to allow the backing object of a shared memory file
1799  * descriptor to be mapped in the kernel.
1800  */
1801 int
1802 shm_map(struct file *fp, size_t size, off_t offset, void **memp)
1803 {
1804         struct shmfd *shmfd;
1805         vm_offset_t kva, ofs;
1806         vm_object_t obj;
1807         int rv;
1808
1809         if (fp->f_type != DTYPE_SHM)
1810                 return (EINVAL);
1811         shmfd = fp->f_data;
1812         obj = shmfd->shm_object;
1813         VM_OBJECT_WLOCK(obj);
1814         /*
1815          * XXXRW: This validation is probably insufficient, and subject to
1816          * sign errors.  It should be fixed.
1817          */
1818         if (offset >= shmfd->shm_size ||
1819             offset + size > round_page(shmfd->shm_size)) {
1820                 VM_OBJECT_WUNLOCK(obj);
1821                 return (EINVAL);
1822         }
1823
1824         shmfd->shm_kmappings++;
1825         vm_object_reference_locked(obj);
1826         VM_OBJECT_WUNLOCK(obj);
1827
1828         /* Map the object into the kernel_map and wire it. */
1829         kva = vm_map_min(kernel_map);
1830         ofs = offset & PAGE_MASK;
1831         offset = trunc_page(offset);
1832         size = round_page(size + ofs);
1833         rv = vm_map_find(kernel_map, obj, offset, &kva, size, 0,
1834             VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE,
1835             VM_PROT_READ | VM_PROT_WRITE, 0);
1836         if (rv == KERN_SUCCESS) {
1837                 rv = vm_map_wire(kernel_map, kva, kva + size,
1838                     VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
1839                 if (rv == KERN_SUCCESS) {
1840                         *memp = (void *)(kva + ofs);
1841                         return (0);
1842                 }
1843                 vm_map_remove(kernel_map, kva, kva + size);
1844         } else
1845                 vm_object_deallocate(obj);
1846
1847         /* On failure, drop our mapping reference. */
1848         VM_OBJECT_WLOCK(obj);
1849         shmfd->shm_kmappings--;
1850         VM_OBJECT_WUNLOCK(obj);
1851
1852         return (vm_mmap_to_errno(rv));
1853 }
1854
1855 /*
1856  * We require the caller to unmap the entire entry.  This allows us to
1857  * safely decrement shm_kmappings when a mapping is removed.
1858  */
1859 int
1860 shm_unmap(struct file *fp, void *mem, size_t size)
1861 {
1862         struct shmfd *shmfd;
1863         vm_map_entry_t entry;
1864         vm_offset_t kva, ofs;
1865         vm_object_t obj;
1866         vm_pindex_t pindex;
1867         vm_prot_t prot;
1868         boolean_t wired;
1869         vm_map_t map;
1870         int rv;
1871
1872         if (fp->f_type != DTYPE_SHM)
1873                 return (EINVAL);
1874         shmfd = fp->f_data;
1875         kva = (vm_offset_t)mem;
1876         ofs = kva & PAGE_MASK;
1877         kva = trunc_page(kva);
1878         size = round_page(size + ofs);
1879         map = kernel_map;
1880         rv = vm_map_lookup(&map, kva, VM_PROT_READ | VM_PROT_WRITE, &entry,
1881             &obj, &pindex, &prot, &wired);
1882         if (rv != KERN_SUCCESS)
1883                 return (EINVAL);
1884         if (entry->start != kva || entry->end != kva + size) {
1885                 vm_map_lookup_done(map, entry);
1886                 return (EINVAL);
1887         }
1888         vm_map_lookup_done(map, entry);
1889         if (obj != shmfd->shm_object)
1890                 return (EINVAL);
1891         vm_map_remove(map, kva, kva + size);
1892         VM_OBJECT_WLOCK(obj);
1893         KASSERT(shmfd->shm_kmappings > 0, ("shm_unmap: object not mapped"));
1894         shmfd->shm_kmappings--;
1895         VM_OBJECT_WUNLOCK(obj);
1896         return (0);
1897 }
1898
1899 static int
1900 shm_fill_kinfo_locked(struct shmfd *shmfd, struct kinfo_file *kif, bool list)
1901 {
1902         const char *path, *pr_path;
1903         size_t pr_pathlen;
1904         bool visible;
1905
1906         sx_assert(&shm_dict_lock, SA_LOCKED);
1907         kif->kf_type = KF_TYPE_SHM;
1908         kif->kf_un.kf_file.kf_file_mode = S_IFREG | shmfd->shm_mode;
1909         kif->kf_un.kf_file.kf_file_size = shmfd->shm_size;
1910         if (shmfd->shm_path != NULL) {
1911                 if (shmfd->shm_path != NULL) {
1912                         path = shmfd->shm_path;
1913                         pr_path = curthread->td_ucred->cr_prison->pr_path;
1914                         if (strcmp(pr_path, "/") != 0) {
1915                                 /* Return the jail-rooted pathname. */
1916                                 pr_pathlen = strlen(pr_path);
1917                                 visible = strncmp(path, pr_path, pr_pathlen)
1918                                     == 0 && path[pr_pathlen] == '/';
1919                                 if (list && !visible)
1920                                         return (EPERM);
1921                                 if (visible)
1922                                         path += pr_pathlen;
1923                         }
1924                         strlcpy(kif->kf_path, path, sizeof(kif->kf_path));
1925                 }
1926         }
1927         return (0);
1928 }
1929
1930 static int
1931 shm_fill_kinfo(struct file *fp, struct kinfo_file *kif,
1932     struct filedesc *fdp __unused)
1933 {
1934         int res;
1935
1936         sx_slock(&shm_dict_lock);
1937         res = shm_fill_kinfo_locked(fp->f_data, kif, false);
1938         sx_sunlock(&shm_dict_lock);
1939         return (res);
1940 }
1941
1942 static int
1943 shm_add_seals(struct file *fp, int seals)
1944 {
1945         struct shmfd *shmfd;
1946         void *rl_cookie;
1947         vm_ooffset_t writemappings;
1948         int error, nseals;
1949
1950         error = 0;
1951         shmfd = fp->f_data;
1952         rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
1953             &shmfd->shm_mtx);
1954
1955         /* Even already-set seals should result in EPERM. */
1956         if ((shmfd->shm_seals & F_SEAL_SEAL) != 0) {
1957                 error = EPERM;
1958                 goto out;
1959         }
1960         nseals = seals & ~shmfd->shm_seals;
1961         if ((nseals & F_SEAL_WRITE) != 0) {
1962                 if (shm_largepage(shmfd)) {
1963                         error = ENOTSUP;
1964                         goto out;
1965                 }
1966
1967                 /*
1968                  * The rangelock above prevents writable mappings from being
1969                  * added after we've started applying seals.  The RLOCK here
1970                  * is to avoid torn reads on ILP32 arches as unmapping/reducing
1971                  * writemappings will be done without a rangelock.
1972                  */
1973                 VM_OBJECT_RLOCK(shmfd->shm_object);
1974                 writemappings = shmfd->shm_object->un_pager.swp.writemappings;
1975                 VM_OBJECT_RUNLOCK(shmfd->shm_object);
1976                 /* kmappings are also writable */
1977                 if (writemappings > 0) {
1978                         error = EBUSY;
1979                         goto out;
1980                 }
1981         }
1982         shmfd->shm_seals |= nseals;
1983 out:
1984         rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
1985         return (error);
1986 }
1987
1988 static int
1989 shm_get_seals(struct file *fp, int *seals)
1990 {
1991         struct shmfd *shmfd;
1992
1993         shmfd = fp->f_data;
1994         *seals = shmfd->shm_seals;
1995         return (0);
1996 }
1997
1998 static int
1999 shm_deallocate(struct shmfd *shmfd, off_t *offset, off_t *length, int flags)
2000 {
2001         vm_object_t object;
2002         vm_pindex_t pistart, pi, piend;
2003         vm_ooffset_t off, len;
2004         int startofs, endofs, end;
2005         int error;
2006
2007         off = *offset;
2008         len = *length;
2009         KASSERT(off + len <= (vm_ooffset_t)OFF_MAX, ("off + len overflows"));
2010         if (off + len > shmfd->shm_size)
2011                 len = shmfd->shm_size - off;
2012         object = shmfd->shm_object;
2013         startofs = off & PAGE_MASK;
2014         endofs = (off + len) & PAGE_MASK;
2015         pistart = OFF_TO_IDX(off);
2016         piend = OFF_TO_IDX(off + len);
2017         pi = OFF_TO_IDX(off + PAGE_MASK);
2018         error = 0;
2019
2020         /* Handle the case when offset is on or beyond shm size. */
2021         if ((off_t)len <= 0) {
2022                 *length = 0;
2023                 return (0);
2024         }
2025
2026         VM_OBJECT_WLOCK(object);
2027
2028         if (startofs != 0) {
2029                 end = pistart != piend ? PAGE_SIZE : endofs;
2030                 error = shm_partial_page_invalidate(object, pistart, startofs,
2031                     end);
2032                 if (error)
2033                         goto out;
2034                 off += end - startofs;
2035                 len -= end - startofs;
2036         }
2037
2038         if (pi < piend) {
2039                 vm_object_page_remove(object, pi, piend, 0);
2040                 off += IDX_TO_OFF(piend - pi);
2041                 len -= IDX_TO_OFF(piend - pi);
2042         }
2043
2044         if (endofs != 0 && pistart != piend) {
2045                 error = shm_partial_page_invalidate(object, piend, 0, endofs);
2046                 if (error)
2047                         goto out;
2048                 off += endofs;
2049                 len -= endofs;
2050         }
2051
2052 out:
2053         VM_OBJECT_WUNLOCK(shmfd->shm_object);
2054         *offset = off;
2055         *length = len;
2056         return (error);
2057 }
2058
2059 static int
2060 shm_fspacectl(struct file *fp, int cmd, off_t *offset, off_t *length, int flags,
2061     struct ucred *active_cred, struct thread *td)
2062 {
2063         void *rl_cookie;
2064         struct shmfd *shmfd;
2065         off_t off, len;
2066         int error;
2067
2068         KASSERT(cmd == SPACECTL_DEALLOC, ("shm_fspacectl: Invalid cmd"));
2069         KASSERT((flags & ~SPACECTL_F_SUPPORTED) == 0,
2070             ("shm_fspacectl: non-zero flags"));
2071         KASSERT(*offset >= 0 && *length > 0 && *length <= OFF_MAX - *offset,
2072             ("shm_fspacectl: offset/length overflow or underflow"));
2073         error = EINVAL;
2074         shmfd = fp->f_data;
2075         off = *offset;
2076         len = *length;
2077
2078         rl_cookie = rangelock_wlock(&shmfd->shm_rl, off, off + len,
2079             &shmfd->shm_mtx);
2080         switch (cmd) {
2081         case SPACECTL_DEALLOC:
2082                 if ((shmfd->shm_seals & F_SEAL_WRITE) != 0) {
2083                         error = EPERM;
2084                         break;
2085                 }
2086                 error = shm_deallocate(shmfd, &off, &len, flags);
2087                 *offset = off;
2088                 *length = len;
2089                 break;
2090         default:
2091                 __assert_unreachable();
2092         }
2093         rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
2094         return (error);
2095 }
2096
2097
2098 static int
2099 shm_fallocate(struct file *fp, off_t offset, off_t len, struct thread *td)
2100 {
2101         void *rl_cookie;
2102         struct shmfd *shmfd;
2103         size_t size;
2104         int error;
2105
2106         /* This assumes that the caller already checked for overflow. */
2107         error = 0;
2108         shmfd = fp->f_data;
2109         size = offset + len;
2110
2111         /*
2112          * Just grab the rangelock for the range that we may be attempting to
2113          * grow, rather than blocking read/write for regions we won't be
2114          * touching while this (potential) resize is in progress.  Other
2115          * attempts to resize the shmfd will have to take a write lock from 0 to
2116          * OFF_MAX, so this being potentially beyond the current usable range of
2117          * the shmfd is not necessarily a concern.  If other mechanisms are
2118          * added to grow a shmfd, this may need to be re-evaluated.
2119          */
2120         rl_cookie = rangelock_wlock(&shmfd->shm_rl, offset, size,
2121             &shmfd->shm_mtx);
2122         if (size > shmfd->shm_size)
2123                 error = shm_dotruncate_cookie(shmfd, size, rl_cookie);
2124         rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
2125         /* Translate to posix_fallocate(2) return value as needed. */
2126         if (error == ENOMEM)
2127                 error = ENOSPC;
2128         return (error);
2129 }
2130
2131 static int
2132 sysctl_posix_shm_list(SYSCTL_HANDLER_ARGS)
2133 {
2134         struct shm_mapping *shmm;
2135         struct sbuf sb;
2136         struct kinfo_file kif;
2137         u_long i;
2138         int error, error2;
2139
2140         sbuf_new_for_sysctl(&sb, NULL, sizeof(struct kinfo_file) * 5, req);
2141         sbuf_clear_flags(&sb, SBUF_INCLUDENUL);
2142         error = 0;
2143         sx_slock(&shm_dict_lock);
2144         for (i = 0; i < shm_hash + 1; i++) {
2145                 LIST_FOREACH(shmm, &shm_dictionary[i], sm_link) {
2146                         error = shm_fill_kinfo_locked(shmm->sm_shmfd,
2147                             &kif, true);
2148                         if (error == EPERM) {
2149                                 error = 0;
2150                                 continue;
2151                         }
2152                         if (error != 0)
2153                                 break;
2154                         pack_kinfo(&kif);
2155                         error = sbuf_bcat(&sb, &kif, kif.kf_structsize) == 0 ?
2156                             0 : ENOMEM;
2157                         if (error != 0)
2158                                 break;
2159                 }
2160         }
2161         sx_sunlock(&shm_dict_lock);
2162         error2 = sbuf_finish(&sb);
2163         sbuf_delete(&sb);
2164         return (error != 0 ? error : error2);
2165 }
2166
2167 SYSCTL_PROC(_kern_ipc, OID_AUTO, posix_shm_list,
2168     CTLFLAG_RD | CTLFLAG_PRISON | CTLFLAG_MPSAFE | CTLTYPE_OPAQUE,
2169     NULL, 0, sysctl_posix_shm_list, "",
2170     "POSIX SHM list");
2171
2172 int
2173 kern_shm_open(struct thread *td, const char *path, int flags, mode_t mode,
2174     struct filecaps *caps)
2175 {
2176
2177         return (kern_shm_open2(td, path, flags, mode, 0, caps, NULL));
2178 }
2179
2180 /*
2181  * This version of the shm_open() interface leaves CLOEXEC behavior up to the
2182  * caller, and libc will enforce it for the traditional shm_open() call.  This
2183  * allows other consumers, like memfd_create(), to opt-in for CLOEXEC.  This
2184  * interface also includes a 'name' argument that is currently unused, but could
2185  * potentially be exported later via some interface for debugging purposes.
2186  * From the kernel's perspective, it is optional.  Individual consumers like
2187  * memfd_create() may require it in order to be compatible with other systems
2188  * implementing the same function.
2189  */
2190 int
2191 sys_shm_open2(struct thread *td, struct shm_open2_args *uap)
2192 {
2193
2194         return (kern_shm_open2(td, uap->path, uap->flags, uap->mode,
2195             uap->shmflags, NULL, uap->name));
2196 }