]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/kern/uipc_shm.c
zfs: merge openzfs/zfs@a582d5299
[FreeBSD/FreeBSD.git] / sys / kern / uipc_shm.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2006, 2011, 2016-2017 Robert N. M. Watson
5  * Copyright 2020 The FreeBSD Foundation
6  * All rights reserved.
7  *
8  * Portions of this software were developed by BAE Systems, the University of
9  * Cambridge Computer Laboratory, and Memorial University under DARPA/AFRL
10  * contract FA8650-15-C-7558 ("CADETS"), as part of the DARPA Transparent
11  * Computing (TC) research program.
12  *
13  * Portions of this software were developed by Konstantin Belousov
14  * under sponsorship from the FreeBSD Foundation.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37
38 /*
39  * Support for shared swap-backed anonymous memory objects via
40  * shm_open(2), shm_rename(2), and shm_unlink(2).
41  * While most of the implementation is here, vm_mmap.c contains
42  * mapping logic changes.
43  *
44  * posixshmcontrol(1) allows users to inspect the state of the memory
45  * objects.  Per-uid swap resource limit controls total amount of
46  * memory that user can consume for anonymous objects, including
47  * shared.
48  */
49
50 #include <sys/cdefs.h>
51 __FBSDID("$FreeBSD$");
52
53 #include "opt_capsicum.h"
54 #include "opt_ktrace.h"
55
56 #include <sys/param.h>
57 #include <sys/capsicum.h>
58 #include <sys/conf.h>
59 #include <sys/fcntl.h>
60 #include <sys/file.h>
61 #include <sys/filedesc.h>
62 #include <sys/filio.h>
63 #include <sys/fnv_hash.h>
64 #include <sys/kernel.h>
65 #include <sys/limits.h>
66 #include <sys/uio.h>
67 #include <sys/signal.h>
68 #include <sys/jail.h>
69 #include <sys/ktrace.h>
70 #include <sys/lock.h>
71 #include <sys/malloc.h>
72 #include <sys/mman.h>
73 #include <sys/mutex.h>
74 #include <sys/priv.h>
75 #include <sys/proc.h>
76 #include <sys/refcount.h>
77 #include <sys/resourcevar.h>
78 #include <sys/rwlock.h>
79 #include <sys/sbuf.h>
80 #include <sys/stat.h>
81 #include <sys/syscallsubr.h>
82 #include <sys/sysctl.h>
83 #include <sys/sysproto.h>
84 #include <sys/systm.h>
85 #include <sys/sx.h>
86 #include <sys/time.h>
87 #include <sys/vmmeter.h>
88 #include <sys/vnode.h>
89 #include <sys/unistd.h>
90 #include <sys/user.h>
91
92 #include <security/audit/audit.h>
93 #include <security/mac/mac_framework.h>
94
95 #include <vm/vm.h>
96 #include <vm/vm_param.h>
97 #include <vm/pmap.h>
98 #include <vm/vm_extern.h>
99 #include <vm/vm_map.h>
100 #include <vm/vm_kern.h>
101 #include <vm/vm_object.h>
102 #include <vm/vm_page.h>
103 #include <vm/vm_pageout.h>
104 #include <vm/vm_pager.h>
105 #include <vm/swap_pager.h>
106
107 struct shm_mapping {
108         char            *sm_path;
109         Fnv32_t         sm_fnv;
110         struct shmfd    *sm_shmfd;
111         LIST_ENTRY(shm_mapping) sm_link;
112 };
113
114 static MALLOC_DEFINE(M_SHMFD, "shmfd", "shared memory file descriptor");
115 static LIST_HEAD(, shm_mapping) *shm_dictionary;
116 static struct sx shm_dict_lock;
117 static struct mtx shm_timestamp_lock;
118 static u_long shm_hash;
119 static struct unrhdr64 shm_ino_unr;
120 static dev_t shm_dev_ino;
121
122 #define SHM_HASH(fnv)   (&shm_dictionary[(fnv) & shm_hash])
123
124 static void     shm_init(void *arg);
125 static void     shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd);
126 static struct shmfd *shm_lookup(char *path, Fnv32_t fnv);
127 static int      shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred);
128 static void     shm_doremove(struct shm_mapping *map);
129 static int      shm_dotruncate_cookie(struct shmfd *shmfd, off_t length,
130     void *rl_cookie);
131 static int      shm_dotruncate_locked(struct shmfd *shmfd, off_t length,
132     void *rl_cookie);
133 static int      shm_copyin_path(struct thread *td, const char *userpath_in,
134     char **path_out);
135 static int      shm_deallocate(struct shmfd *shmfd, off_t *offset,
136     off_t *length, int flags);
137
138 static fo_rdwr_t        shm_read;
139 static fo_rdwr_t        shm_write;
140 static fo_truncate_t    shm_truncate;
141 static fo_ioctl_t       shm_ioctl;
142 static fo_stat_t        shm_stat;
143 static fo_close_t       shm_close;
144 static fo_chmod_t       shm_chmod;
145 static fo_chown_t       shm_chown;
146 static fo_seek_t        shm_seek;
147 static fo_fill_kinfo_t  shm_fill_kinfo;
148 static fo_mmap_t        shm_mmap;
149 static fo_get_seals_t   shm_get_seals;
150 static fo_add_seals_t   shm_add_seals;
151 static fo_fallocate_t   shm_fallocate;
152 static fo_fspacectl_t   shm_fspacectl;
153
154 /* File descriptor operations. */
155 struct fileops shm_ops = {
156         .fo_read = shm_read,
157         .fo_write = shm_write,
158         .fo_truncate = shm_truncate,
159         .fo_ioctl = shm_ioctl,
160         .fo_poll = invfo_poll,
161         .fo_kqfilter = invfo_kqfilter,
162         .fo_stat = shm_stat,
163         .fo_close = shm_close,
164         .fo_chmod = shm_chmod,
165         .fo_chown = shm_chown,
166         .fo_sendfile = vn_sendfile,
167         .fo_seek = shm_seek,
168         .fo_fill_kinfo = shm_fill_kinfo,
169         .fo_mmap = shm_mmap,
170         .fo_get_seals = shm_get_seals,
171         .fo_add_seals = shm_add_seals,
172         .fo_fallocate = shm_fallocate,
173         .fo_fspacectl = shm_fspacectl,
174         .fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE,
175 };
176
177 FEATURE(posix_shm, "POSIX shared memory");
178
179 static SYSCTL_NODE(_vm, OID_AUTO, largepages, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
180     "");
181
182 static int largepage_reclaim_tries = 1;
183 SYSCTL_INT(_vm_largepages, OID_AUTO, reclaim_tries,
184     CTLFLAG_RWTUN, &largepage_reclaim_tries, 0,
185     "Number of contig reclaims before giving up for default alloc policy");
186
187 static int
188 uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio)
189 {
190         vm_page_t m;
191         vm_pindex_t idx;
192         size_t tlen;
193         int error, offset, rv;
194
195         idx = OFF_TO_IDX(uio->uio_offset);
196         offset = uio->uio_offset & PAGE_MASK;
197         tlen = MIN(PAGE_SIZE - offset, len);
198
199         rv = vm_page_grab_valid_unlocked(&m, obj, idx,
200             VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY | VM_ALLOC_NOCREAT);
201         if (rv == VM_PAGER_OK)
202                 goto found;
203
204         /*
205          * Read I/O without either a corresponding resident page or swap
206          * page: use zero_region.  This is intended to avoid instantiating
207          * pages on read from a sparse region.
208          */
209         VM_OBJECT_WLOCK(obj);
210         m = vm_page_lookup(obj, idx);
211         if (uio->uio_rw == UIO_READ && m == NULL &&
212             !vm_pager_has_page(obj, idx, NULL, NULL)) {
213                 VM_OBJECT_WUNLOCK(obj);
214                 return (uiomove(__DECONST(void *, zero_region), tlen, uio));
215         }
216
217         /*
218          * Although the tmpfs vnode lock is held here, it is
219          * nonetheless safe to sleep waiting for a free page.  The
220          * pageout daemon does not need to acquire the tmpfs vnode
221          * lock to page out tobj's pages because tobj is a OBJT_SWAP
222          * type object.
223          */
224         rv = vm_page_grab_valid(&m, obj, idx,
225             VM_ALLOC_NORMAL | VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY);
226         if (rv != VM_PAGER_OK) {
227                 VM_OBJECT_WUNLOCK(obj);
228                 printf("uiomove_object: vm_obj %p idx %jd pager error %d\n",
229                     obj, idx, rv);
230                 return (EIO);
231         }
232         VM_OBJECT_WUNLOCK(obj);
233
234 found:
235         error = uiomove_fromphys(&m, offset, tlen, uio);
236         if (uio->uio_rw == UIO_WRITE && error == 0)
237                 vm_page_set_dirty(m);
238         vm_page_activate(m);
239         vm_page_sunbusy(m);
240
241         return (error);
242 }
243
244 int
245 uiomove_object(vm_object_t obj, off_t obj_size, struct uio *uio)
246 {
247         ssize_t resid;
248         size_t len;
249         int error;
250
251         error = 0;
252         while ((resid = uio->uio_resid) > 0) {
253                 if (obj_size <= uio->uio_offset)
254                         break;
255                 len = MIN(obj_size - uio->uio_offset, resid);
256                 if (len == 0)
257                         break;
258                 error = uiomove_object_page(obj, len, uio);
259                 if (error != 0 || resid == uio->uio_resid)
260                         break;
261         }
262         return (error);
263 }
264
265 static u_long count_largepages[MAXPAGESIZES];
266
267 static int
268 shm_largepage_phys_populate(vm_object_t object, vm_pindex_t pidx,
269     int fault_type, vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last)
270 {
271         vm_page_t m __diagused;
272         int psind;
273
274         psind = object->un_pager.phys.data_val;
275         if (psind == 0 || pidx >= object->size)
276                 return (VM_PAGER_FAIL);
277         *first = rounddown2(pidx, pagesizes[psind] / PAGE_SIZE);
278
279         /*
280          * We only busy the first page in the superpage run.  It is
281          * useless to busy whole run since we only remove full
282          * superpage, and it takes too long to busy e.g. 512 * 512 ==
283          * 262144 pages constituing 1G amd64 superage.
284          */
285         m = vm_page_grab(object, *first, VM_ALLOC_NORMAL | VM_ALLOC_NOCREAT);
286         MPASS(m != NULL);
287
288         *last = *first + atop(pagesizes[psind]) - 1;
289         return (VM_PAGER_OK);
290 }
291
292 static boolean_t
293 shm_largepage_phys_haspage(vm_object_t object, vm_pindex_t pindex,
294     int *before, int *after)
295 {
296         int psind;
297
298         psind = object->un_pager.phys.data_val;
299         if (psind == 0 || pindex >= object->size)
300                 return (FALSE);
301         if (before != NULL) {
302                 *before = pindex - rounddown2(pindex, pagesizes[psind] /
303                     PAGE_SIZE);
304         }
305         if (after != NULL) {
306                 *after = roundup2(pindex, pagesizes[psind] / PAGE_SIZE) -
307                     pindex;
308         }
309         return (TRUE);
310 }
311
312 static void
313 shm_largepage_phys_ctor(vm_object_t object, vm_prot_t prot,
314     vm_ooffset_t foff, struct ucred *cred)
315 {
316 }
317
318 static void
319 shm_largepage_phys_dtor(vm_object_t object)
320 {
321         int psind;
322
323         psind = object->un_pager.phys.data_val;
324         if (psind != 0) {
325                 atomic_subtract_long(&count_largepages[psind],
326                     object->size / (pagesizes[psind] / PAGE_SIZE));
327                 vm_wire_sub(object->size);
328         } else {
329                 KASSERT(object->size == 0,
330                     ("largepage phys obj %p not initialized bit size %#jx > 0",
331                     object, (uintmax_t)object->size));
332         }
333 }
334
335 static const struct phys_pager_ops shm_largepage_phys_ops = {
336         .phys_pg_populate =     shm_largepage_phys_populate,
337         .phys_pg_haspage =      shm_largepage_phys_haspage,
338         .phys_pg_ctor =         shm_largepage_phys_ctor,
339         .phys_pg_dtor =         shm_largepage_phys_dtor,
340 };
341
342 bool
343 shm_largepage(struct shmfd *shmfd)
344 {
345         return (shmfd->shm_object->type == OBJT_PHYS);
346 }
347
348 static int
349 shm_seek(struct file *fp, off_t offset, int whence, struct thread *td)
350 {
351         struct shmfd *shmfd;
352         off_t foffset;
353         int error;
354
355         shmfd = fp->f_data;
356         foffset = foffset_lock(fp, 0);
357         error = 0;
358         switch (whence) {
359         case L_INCR:
360                 if (foffset < 0 ||
361                     (offset > 0 && foffset > OFF_MAX - offset)) {
362                         error = EOVERFLOW;
363                         break;
364                 }
365                 offset += foffset;
366                 break;
367         case L_XTND:
368                 if (offset > 0 && shmfd->shm_size > OFF_MAX - offset) {
369                         error = EOVERFLOW;
370                         break;
371                 }
372                 offset += shmfd->shm_size;
373                 break;
374         case L_SET:
375                 break;
376         default:
377                 error = EINVAL;
378         }
379         if (error == 0) {
380                 if (offset < 0 || offset > shmfd->shm_size)
381                         error = EINVAL;
382                 else
383                         td->td_uretoff.tdu_off = offset;
384         }
385         foffset_unlock(fp, offset, error != 0 ? FOF_NOUPDATE : 0);
386         return (error);
387 }
388
389 static int
390 shm_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
391     int flags, struct thread *td)
392 {
393         struct shmfd *shmfd;
394         void *rl_cookie;
395         int error;
396
397         shmfd = fp->f_data;
398 #ifdef MAC
399         error = mac_posixshm_check_read(active_cred, fp->f_cred, shmfd);
400         if (error)
401                 return (error);
402 #endif
403         foffset_lock_uio(fp, uio, flags);
404         rl_cookie = rangelock_rlock(&shmfd->shm_rl, uio->uio_offset,
405             uio->uio_offset + uio->uio_resid, &shmfd->shm_mtx);
406         error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio);
407         rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
408         foffset_unlock_uio(fp, uio, flags);
409         return (error);
410 }
411
412 static int
413 shm_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
414     int flags, struct thread *td)
415 {
416         struct shmfd *shmfd;
417         void *rl_cookie;
418         int error;
419         off_t size;
420
421         shmfd = fp->f_data;
422 #ifdef MAC
423         error = mac_posixshm_check_write(active_cred, fp->f_cred, shmfd);
424         if (error)
425                 return (error);
426 #endif
427         if (shm_largepage(shmfd) && shmfd->shm_lp_psind == 0)
428                 return (EINVAL);
429         foffset_lock_uio(fp, uio, flags);
430         if (uio->uio_resid > OFF_MAX - uio->uio_offset) {
431                 /*
432                  * Overflow is only an error if we're supposed to expand on
433                  * write.  Otherwise, we'll just truncate the write to the
434                  * size of the file, which can only grow up to OFF_MAX.
435                  */
436                 if ((shmfd->shm_flags & SHM_GROW_ON_WRITE) != 0) {
437                         foffset_unlock_uio(fp, uio, flags);
438                         return (EFBIG);
439                 }
440
441                 size = shmfd->shm_size;
442         } else {
443                 size = uio->uio_offset + uio->uio_resid;
444         }
445         if ((flags & FOF_OFFSET) == 0) {
446                 rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
447                     &shmfd->shm_mtx);
448         } else {
449                 rl_cookie = rangelock_wlock(&shmfd->shm_rl, uio->uio_offset,
450                     size, &shmfd->shm_mtx);
451         }
452         if ((shmfd->shm_seals & F_SEAL_WRITE) != 0) {
453                 error = EPERM;
454         } else {
455                 error = 0;
456                 if ((shmfd->shm_flags & SHM_GROW_ON_WRITE) != 0 &&
457                     size > shmfd->shm_size) {
458                         error = shm_dotruncate_cookie(shmfd, size, rl_cookie);
459                 }
460                 if (error == 0)
461                         error = uiomove_object(shmfd->shm_object,
462                             shmfd->shm_size, uio);
463         }
464         rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
465         foffset_unlock_uio(fp, uio, flags);
466         return (error);
467 }
468
469 static int
470 shm_truncate(struct file *fp, off_t length, struct ucred *active_cred,
471     struct thread *td)
472 {
473         struct shmfd *shmfd;
474 #ifdef MAC
475         int error;
476 #endif
477
478         shmfd = fp->f_data;
479 #ifdef MAC
480         error = mac_posixshm_check_truncate(active_cred, fp->f_cred, shmfd);
481         if (error)
482                 return (error);
483 #endif
484         return (shm_dotruncate(shmfd, length));
485 }
486
487 int
488 shm_ioctl(struct file *fp, u_long com, void *data, struct ucred *active_cred,
489     struct thread *td)
490 {
491         struct shmfd *shmfd;
492         struct shm_largepage_conf *conf;
493         void *rl_cookie;
494
495         shmfd = fp->f_data;
496         switch (com) {
497         case FIONBIO:
498         case FIOASYNC:
499                 /*
500                  * Allow fcntl(fd, F_SETFL, O_NONBLOCK) to work,
501                  * just like it would on an unlinked regular file
502                  */
503                 return (0);
504         case FIOSSHMLPGCNF:
505                 if (!shm_largepage(shmfd))
506                         return (ENOTTY);
507                 conf = data;
508                 if (shmfd->shm_lp_psind != 0 &&
509                     conf->psind != shmfd->shm_lp_psind)
510                         return (EINVAL);
511                 if (conf->psind <= 0 || conf->psind >= MAXPAGESIZES ||
512                     pagesizes[conf->psind] == 0)
513                         return (EINVAL);
514                 if (conf->alloc_policy != SHM_LARGEPAGE_ALLOC_DEFAULT &&
515                     conf->alloc_policy != SHM_LARGEPAGE_ALLOC_NOWAIT &&
516                     conf->alloc_policy != SHM_LARGEPAGE_ALLOC_HARD)
517                         return (EINVAL);
518
519                 rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
520                     &shmfd->shm_mtx);
521                 shmfd->shm_lp_psind = conf->psind;
522                 shmfd->shm_lp_alloc_policy = conf->alloc_policy;
523                 shmfd->shm_object->un_pager.phys.data_val = conf->psind;
524                 rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
525                 return (0);
526         case FIOGSHMLPGCNF:
527                 if (!shm_largepage(shmfd))
528                         return (ENOTTY);
529                 conf = data;
530                 rl_cookie = rangelock_rlock(&shmfd->shm_rl, 0, OFF_MAX,
531                     &shmfd->shm_mtx);
532                 conf->psind = shmfd->shm_lp_psind;
533                 conf->alloc_policy = shmfd->shm_lp_alloc_policy;
534                 rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
535                 return (0);
536         default:
537                 return (ENOTTY);
538         }
539 }
540
541 static int
542 shm_stat(struct file *fp, struct stat *sb, struct ucred *active_cred)
543 {
544         struct shmfd *shmfd;
545 #ifdef MAC
546         int error;
547 #endif
548
549         shmfd = fp->f_data;
550
551 #ifdef MAC
552         error = mac_posixshm_check_stat(active_cred, fp->f_cred, shmfd);
553         if (error)
554                 return (error);
555 #endif
556
557         /*
558          * Attempt to return sanish values for fstat() on a memory file
559          * descriptor.
560          */
561         bzero(sb, sizeof(*sb));
562         sb->st_blksize = PAGE_SIZE;
563         sb->st_size = shmfd->shm_size;
564         sb->st_blocks = howmany(sb->st_size, sb->st_blksize);
565         mtx_lock(&shm_timestamp_lock);
566         sb->st_atim = shmfd->shm_atime;
567         sb->st_ctim = shmfd->shm_ctime;
568         sb->st_mtim = shmfd->shm_mtime;
569         sb->st_birthtim = shmfd->shm_birthtime;
570         sb->st_mode = S_IFREG | shmfd->shm_mode;                /* XXX */
571         sb->st_uid = shmfd->shm_uid;
572         sb->st_gid = shmfd->shm_gid;
573         mtx_unlock(&shm_timestamp_lock);
574         sb->st_dev = shm_dev_ino;
575         sb->st_ino = shmfd->shm_ino;
576         sb->st_nlink = shmfd->shm_object->ref_count;
577         sb->st_blocks = shmfd->shm_object->size /
578             (pagesizes[shmfd->shm_lp_psind] >> PAGE_SHIFT);
579
580         return (0);
581 }
582
583 static int
584 shm_close(struct file *fp, struct thread *td)
585 {
586         struct shmfd *shmfd;
587
588         shmfd = fp->f_data;
589         fp->f_data = NULL;
590         shm_drop(shmfd);
591
592         return (0);
593 }
594
595 static int
596 shm_copyin_path(struct thread *td, const char *userpath_in, char **path_out) {
597         int error;
598         char *path;
599         const char *pr_path;
600         size_t pr_pathlen;
601
602         path = malloc(MAXPATHLEN, M_SHMFD, M_WAITOK);
603         pr_path = td->td_ucred->cr_prison->pr_path;
604
605         /* Construct a full pathname for jailed callers. */
606         pr_pathlen = strcmp(pr_path, "/") ==
607             0 ? 0 : strlcpy(path, pr_path, MAXPATHLEN);
608         error = copyinstr(userpath_in, path + pr_pathlen,
609             MAXPATHLEN - pr_pathlen, NULL);
610         if (error != 0)
611                 goto out;
612
613 #ifdef KTRACE
614         if (KTRPOINT(curthread, KTR_NAMEI))
615                 ktrnamei(path);
616 #endif
617
618         /* Require paths to start with a '/' character. */
619         if (path[pr_pathlen] != '/') {
620                 error = EINVAL;
621                 goto out;
622         }
623
624         *path_out = path;
625
626 out:
627         if (error != 0)
628                 free(path, M_SHMFD);
629
630         return (error);
631 }
632
633 static int
634 shm_partial_page_invalidate(vm_object_t object, vm_pindex_t idx, int base,
635     int end)
636 {
637         vm_page_t m;
638         int rv;
639
640         VM_OBJECT_ASSERT_WLOCKED(object);
641         KASSERT(base >= 0, ("%s: base %d", __func__, base));
642         KASSERT(end - base <= PAGE_SIZE, ("%s: base %d end %d", __func__, base,
643             end));
644
645 retry:
646         m = vm_page_grab(object, idx, VM_ALLOC_NOCREAT);
647         if (m != NULL) {
648                 MPASS(vm_page_all_valid(m));
649         } else if (vm_pager_has_page(object, idx, NULL, NULL)) {
650                 m = vm_page_alloc(object, idx,
651                     VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL);
652                 if (m == NULL)
653                         goto retry;
654                 vm_object_pip_add(object, 1);
655                 VM_OBJECT_WUNLOCK(object);
656                 rv = vm_pager_get_pages(object, &m, 1, NULL, NULL);
657                 VM_OBJECT_WLOCK(object);
658                 vm_object_pip_wakeup(object);
659                 if (rv == VM_PAGER_OK) {
660                         /*
661                          * Since the page was not resident, and therefore not
662                          * recently accessed, immediately enqueue it for
663                          * asynchronous laundering.  The current operation is
664                          * not regarded as an access.
665                          */
666                         vm_page_launder(m);
667                 } else {
668                         vm_page_free(m);
669                         VM_OBJECT_WUNLOCK(object);
670                         return (EIO);
671                 }
672         }
673         if (m != NULL) {
674                 pmap_zero_page_area(m, base, end - base);
675                 KASSERT(vm_page_all_valid(m), ("%s: page %p is invalid",
676                     __func__, m));
677                 vm_page_set_dirty(m);
678                 vm_page_xunbusy(m);
679         }
680
681         return (0);
682 }
683
684 static int
685 shm_dotruncate_locked(struct shmfd *shmfd, off_t length, void *rl_cookie)
686 {
687         vm_object_t object;
688         vm_pindex_t nobjsize;
689         vm_ooffset_t delta;
690         int base, error;
691
692         KASSERT(length >= 0, ("shm_dotruncate: length < 0"));
693         object = shmfd->shm_object;
694         VM_OBJECT_ASSERT_WLOCKED(object);
695         rangelock_cookie_assert(rl_cookie, RA_WLOCKED);
696         if (length == shmfd->shm_size)
697                 return (0);
698         nobjsize = OFF_TO_IDX(length + PAGE_MASK);
699
700         /* Are we shrinking?  If so, trim the end. */
701         if (length < shmfd->shm_size) {
702                 if ((shmfd->shm_seals & F_SEAL_SHRINK) != 0)
703                         return (EPERM);
704
705                 /*
706                  * Disallow any requests to shrink the size if this
707                  * object is mapped into the kernel.
708                  */
709                 if (shmfd->shm_kmappings > 0)
710                         return (EBUSY);
711
712                 /*
713                  * Zero the truncated part of the last page.
714                  */
715                 base = length & PAGE_MASK;
716                 if (base != 0) {
717                         error = shm_partial_page_invalidate(object,
718                             OFF_TO_IDX(length), base, PAGE_SIZE);
719                         if (error)
720                                 return (error);
721                 }
722                 delta = IDX_TO_OFF(object->size - nobjsize);
723
724                 if (nobjsize < object->size)
725                         vm_object_page_remove(object, nobjsize, object->size,
726                             0);
727
728                 /* Free the swap accounted for shm */
729                 swap_release_by_cred(delta, object->cred);
730                 object->charge -= delta;
731         } else {
732                 if ((shmfd->shm_seals & F_SEAL_GROW) != 0)
733                         return (EPERM);
734
735                 /* Try to reserve additional swap space. */
736                 delta = IDX_TO_OFF(nobjsize - object->size);
737                 if (!swap_reserve_by_cred(delta, object->cred))
738                         return (ENOMEM);
739                 object->charge += delta;
740         }
741         shmfd->shm_size = length;
742         mtx_lock(&shm_timestamp_lock);
743         vfs_timestamp(&shmfd->shm_ctime);
744         shmfd->shm_mtime = shmfd->shm_ctime;
745         mtx_unlock(&shm_timestamp_lock);
746         object->size = nobjsize;
747         return (0);
748 }
749
750 static int
751 shm_dotruncate_largepage(struct shmfd *shmfd, off_t length, void *rl_cookie)
752 {
753         vm_object_t object;
754         vm_page_t m;
755         vm_pindex_t newobjsz;
756         vm_pindex_t oldobjsz __unused;
757         int aflags, error, i, psind, try;
758
759         KASSERT(length >= 0, ("shm_dotruncate: length < 0"));
760         object = shmfd->shm_object;
761         VM_OBJECT_ASSERT_WLOCKED(object);
762         rangelock_cookie_assert(rl_cookie, RA_WLOCKED);
763
764         oldobjsz = object->size;
765         newobjsz = OFF_TO_IDX(length);
766         if (length == shmfd->shm_size)
767                 return (0);
768         psind = shmfd->shm_lp_psind;
769         if (psind == 0 && length != 0)
770                 return (EINVAL);
771         if ((length & (pagesizes[psind] - 1)) != 0)
772                 return (EINVAL);
773
774         if (length < shmfd->shm_size) {
775                 if ((shmfd->shm_seals & F_SEAL_SHRINK) != 0)
776                         return (EPERM);
777                 if (shmfd->shm_kmappings > 0)
778                         return (EBUSY);
779                 return (ENOTSUP);       /* Pages are unmanaged. */
780 #if 0
781                 vm_object_page_remove(object, newobjsz, oldobjsz, 0);
782                 object->size = newobjsz;
783                 shmfd->shm_size = length;
784                 return (0);
785 #endif
786         }
787
788         if ((shmfd->shm_seals & F_SEAL_GROW) != 0)
789                 return (EPERM);
790
791         aflags = VM_ALLOC_NORMAL | VM_ALLOC_ZERO;
792         if (shmfd->shm_lp_alloc_policy == SHM_LARGEPAGE_ALLOC_NOWAIT)
793                 aflags |= VM_ALLOC_WAITFAIL;
794         try = 0;
795
796         /*
797          * Extend shmfd and object, keeping all already fully
798          * allocated large pages intact even on error, because dropped
799          * object lock might allowed mapping of them.
800          */
801         while (object->size < newobjsz) {
802                 m = vm_page_alloc_contig(object, object->size, aflags,
803                     pagesizes[psind] / PAGE_SIZE, 0, ~0,
804                     pagesizes[psind], 0,
805                     VM_MEMATTR_DEFAULT);
806                 if (m == NULL) {
807                         VM_OBJECT_WUNLOCK(object);
808                         if (shmfd->shm_lp_alloc_policy ==
809                             SHM_LARGEPAGE_ALLOC_NOWAIT ||
810                             (shmfd->shm_lp_alloc_policy ==
811                             SHM_LARGEPAGE_ALLOC_DEFAULT &&
812                             try >= largepage_reclaim_tries)) {
813                                 VM_OBJECT_WLOCK(object);
814                                 return (ENOMEM);
815                         }
816                         error = vm_page_reclaim_contig(aflags,
817                             pagesizes[psind] / PAGE_SIZE, 0, ~0,
818                             pagesizes[psind], 0) ? 0 :
819                             vm_wait_intr(object);
820                         if (error != 0) {
821                                 VM_OBJECT_WLOCK(object);
822                                 return (error);
823                         }
824                         try++;
825                         VM_OBJECT_WLOCK(object);
826                         continue;
827                 }
828                 try = 0;
829                 for (i = 0; i < pagesizes[psind] / PAGE_SIZE; i++) {
830                         if ((m[i].flags & PG_ZERO) == 0)
831                                 pmap_zero_page(&m[i]);
832                         vm_page_valid(&m[i]);
833                         vm_page_xunbusy(&m[i]);
834                 }
835                 object->size += OFF_TO_IDX(pagesizes[psind]);
836                 shmfd->shm_size += pagesizes[psind];
837                 atomic_add_long(&count_largepages[psind], 1);
838                 vm_wire_add(atop(pagesizes[psind]));
839         }
840         return (0);
841 }
842
843 static int
844 shm_dotruncate_cookie(struct shmfd *shmfd, off_t length, void *rl_cookie)
845 {
846         int error;
847
848         VM_OBJECT_WLOCK(shmfd->shm_object);
849         error = shm_largepage(shmfd) ? shm_dotruncate_largepage(shmfd,
850             length, rl_cookie) : shm_dotruncate_locked(shmfd, length,
851             rl_cookie);
852         VM_OBJECT_WUNLOCK(shmfd->shm_object);
853         return (error);
854 }
855
856 int
857 shm_dotruncate(struct shmfd *shmfd, off_t length)
858 {
859         void *rl_cookie;
860         int error;
861
862         rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
863             &shmfd->shm_mtx);
864         error = shm_dotruncate_cookie(shmfd, length, rl_cookie);
865         rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
866         return (error);
867 }
868
869 /*
870  * shmfd object management including creation and reference counting
871  * routines.
872  */
873 struct shmfd *
874 shm_alloc(struct ucred *ucred, mode_t mode, bool largepage)
875 {
876         struct shmfd *shmfd;
877
878         shmfd = malloc(sizeof(*shmfd), M_SHMFD, M_WAITOK | M_ZERO);
879         shmfd->shm_size = 0;
880         shmfd->shm_uid = ucred->cr_uid;
881         shmfd->shm_gid = ucred->cr_gid;
882         shmfd->shm_mode = mode;
883         if (largepage) {
884                 shmfd->shm_object = phys_pager_allocate(NULL,
885                     &shm_largepage_phys_ops, NULL, shmfd->shm_size,
886                     VM_PROT_DEFAULT, 0, ucred);
887                 shmfd->shm_lp_alloc_policy = SHM_LARGEPAGE_ALLOC_DEFAULT;
888         } else {
889                 shmfd->shm_object = vm_pager_allocate(OBJT_SWAP, NULL,
890                     shmfd->shm_size, VM_PROT_DEFAULT, 0, ucred);
891         }
892         KASSERT(shmfd->shm_object != NULL, ("shm_create: vm_pager_allocate"));
893         vfs_timestamp(&shmfd->shm_birthtime);
894         shmfd->shm_atime = shmfd->shm_mtime = shmfd->shm_ctime =
895             shmfd->shm_birthtime;
896         shmfd->shm_ino = alloc_unr64(&shm_ino_unr);
897         refcount_init(&shmfd->shm_refs, 1);
898         mtx_init(&shmfd->shm_mtx, "shmrl", NULL, MTX_DEF);
899         rangelock_init(&shmfd->shm_rl);
900 #ifdef MAC
901         mac_posixshm_init(shmfd);
902         mac_posixshm_create(ucred, shmfd);
903 #endif
904
905         return (shmfd);
906 }
907
908 struct shmfd *
909 shm_hold(struct shmfd *shmfd)
910 {
911
912         refcount_acquire(&shmfd->shm_refs);
913         return (shmfd);
914 }
915
916 void
917 shm_drop(struct shmfd *shmfd)
918 {
919
920         if (refcount_release(&shmfd->shm_refs)) {
921 #ifdef MAC
922                 mac_posixshm_destroy(shmfd);
923 #endif
924                 rangelock_destroy(&shmfd->shm_rl);
925                 mtx_destroy(&shmfd->shm_mtx);
926                 vm_object_deallocate(shmfd->shm_object);
927                 free(shmfd, M_SHMFD);
928         }
929 }
930
931 /*
932  * Determine if the credentials have sufficient permissions for a
933  * specified combination of FREAD and FWRITE.
934  */
935 int
936 shm_access(struct shmfd *shmfd, struct ucred *ucred, int flags)
937 {
938         accmode_t accmode;
939         int error;
940
941         accmode = 0;
942         if (flags & FREAD)
943                 accmode |= VREAD;
944         if (flags & FWRITE)
945                 accmode |= VWRITE;
946         mtx_lock(&shm_timestamp_lock);
947         error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid,
948             accmode, ucred);
949         mtx_unlock(&shm_timestamp_lock);
950         return (error);
951 }
952
953 static void
954 shm_init(void *arg)
955 {
956         char name[32];
957         int i;
958
959         mtx_init(&shm_timestamp_lock, "shm timestamps", NULL, MTX_DEF);
960         sx_init(&shm_dict_lock, "shm dictionary");
961         shm_dictionary = hashinit(1024, M_SHMFD, &shm_hash);
962         new_unrhdr64(&shm_ino_unr, 1);
963         shm_dev_ino = devfs_alloc_cdp_inode();
964         KASSERT(shm_dev_ino > 0, ("shm dev inode not initialized"));
965
966         for (i = 1; i < MAXPAGESIZES; i++) {
967                 if (pagesizes[i] == 0)
968                         break;
969 #define M       (1024 * 1024)
970 #define G       (1024 * M)
971                 if (pagesizes[i] >= G)
972                         snprintf(name, sizeof(name), "%luG", pagesizes[i] / G);
973                 else if (pagesizes[i] >= M)
974                         snprintf(name, sizeof(name), "%luM", pagesizes[i] / M);
975                 else
976                         snprintf(name, sizeof(name), "%lu", pagesizes[i]);
977 #undef G
978 #undef M
979                 SYSCTL_ADD_ULONG(NULL, SYSCTL_STATIC_CHILDREN(_vm_largepages),
980                     OID_AUTO, name, CTLFLAG_RD, &count_largepages[i],
981                     "number of non-transient largepages allocated");
982         }
983 }
984 SYSINIT(shm_init, SI_SUB_SYSV_SHM, SI_ORDER_ANY, shm_init, NULL);
985
986 /*
987  * Remove all shared memory objects that belong to a prison.
988  */
989 void
990 shm_remove_prison(struct prison *pr)
991 {
992         struct shm_mapping *shmm, *tshmm;
993         u_long i;
994
995         sx_xlock(&shm_dict_lock);
996         for (i = 0; i < shm_hash + 1; i++) {
997                 LIST_FOREACH_SAFE(shmm, &shm_dictionary[i], sm_link, tshmm) {
998                         if (shmm->sm_shmfd->shm_object->cred &&
999                             shmm->sm_shmfd->shm_object->cred->cr_prison == pr)
1000                                 shm_doremove(shmm);
1001                 }
1002         }
1003         sx_xunlock(&shm_dict_lock);
1004 }
1005
1006 /*
1007  * Dictionary management.  We maintain an in-kernel dictionary to map
1008  * paths to shmfd objects.  We use the FNV hash on the path to store
1009  * the mappings in a hash table.
1010  */
1011 static struct shmfd *
1012 shm_lookup(char *path, Fnv32_t fnv)
1013 {
1014         struct shm_mapping *map;
1015
1016         LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
1017                 if (map->sm_fnv != fnv)
1018                         continue;
1019                 if (strcmp(map->sm_path, path) == 0)
1020                         return (map->sm_shmfd);
1021         }
1022
1023         return (NULL);
1024 }
1025
1026 static void
1027 shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd)
1028 {
1029         struct shm_mapping *map;
1030
1031         map = malloc(sizeof(struct shm_mapping), M_SHMFD, M_WAITOK);
1032         map->sm_path = path;
1033         map->sm_fnv = fnv;
1034         map->sm_shmfd = shm_hold(shmfd);
1035         shmfd->shm_path = path;
1036         LIST_INSERT_HEAD(SHM_HASH(fnv), map, sm_link);
1037 }
1038
1039 static int
1040 shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred)
1041 {
1042         struct shm_mapping *map;
1043         int error;
1044
1045         LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
1046                 if (map->sm_fnv != fnv)
1047                         continue;
1048                 if (strcmp(map->sm_path, path) == 0) {
1049 #ifdef MAC
1050                         error = mac_posixshm_check_unlink(ucred, map->sm_shmfd);
1051                         if (error)
1052                                 return (error);
1053 #endif
1054                         error = shm_access(map->sm_shmfd, ucred,
1055                             FREAD | FWRITE);
1056                         if (error)
1057                                 return (error);
1058                         shm_doremove(map);
1059                         return (0);
1060                 }
1061         }
1062
1063         return (ENOENT);
1064 }
1065
1066 static void
1067 shm_doremove(struct shm_mapping *map)
1068 {
1069         map->sm_shmfd->shm_path = NULL;
1070         LIST_REMOVE(map, sm_link);
1071         shm_drop(map->sm_shmfd);
1072         free(map->sm_path, M_SHMFD);
1073         free(map, M_SHMFD);
1074 }
1075
1076 int
1077 kern_shm_open2(struct thread *td, const char *userpath, int flags, mode_t mode,
1078     int shmflags, struct filecaps *fcaps, const char *name __unused)
1079 {
1080         struct pwddesc *pdp;
1081         struct shmfd *shmfd;
1082         struct file *fp;
1083         char *path;
1084         void *rl_cookie;
1085         Fnv32_t fnv;
1086         mode_t cmode;
1087         int error, fd, initial_seals;
1088         bool largepage;
1089
1090         if ((shmflags & ~(SHM_ALLOW_SEALING | SHM_GROW_ON_WRITE |
1091             SHM_LARGEPAGE)) != 0)
1092                 return (EINVAL);
1093
1094         initial_seals = F_SEAL_SEAL;
1095         if ((shmflags & SHM_ALLOW_SEALING) != 0)
1096                 initial_seals &= ~F_SEAL_SEAL;
1097
1098 #ifdef CAPABILITY_MODE
1099         /*
1100          * shm_open(2) is only allowed for anonymous objects.
1101          */
1102         if (IN_CAPABILITY_MODE(td) && (userpath != SHM_ANON))
1103                 return (ECAPMODE);
1104 #endif
1105
1106         AUDIT_ARG_FFLAGS(flags);
1107         AUDIT_ARG_MODE(mode);
1108
1109         if ((flags & O_ACCMODE) != O_RDONLY && (flags & O_ACCMODE) != O_RDWR)
1110                 return (EINVAL);
1111
1112         if ((flags & ~(O_ACCMODE | O_CREAT | O_EXCL | O_TRUNC | O_CLOEXEC)) != 0)
1113                 return (EINVAL);
1114
1115         largepage = (shmflags & SHM_LARGEPAGE) != 0;
1116         if (largepage && !PMAP_HAS_LARGEPAGES)
1117                 return (ENOTTY);
1118
1119         /*
1120          * Currently only F_SEAL_SEAL may be set when creating or opening shmfd.
1121          * If the decision is made later to allow additional seals, care must be
1122          * taken below to ensure that the seals are properly set if the shmfd
1123          * already existed -- this currently assumes that only F_SEAL_SEAL can
1124          * be set and doesn't take further precautions to ensure the validity of
1125          * the seals being added with respect to current mappings.
1126          */
1127         if ((initial_seals & ~F_SEAL_SEAL) != 0)
1128                 return (EINVAL);
1129
1130         pdp = td->td_proc->p_pd;
1131         cmode = (mode & ~pdp->pd_cmask) & ACCESSPERMS;
1132
1133         /*
1134          * shm_open(2) created shm should always have O_CLOEXEC set, as mandated
1135          * by POSIX.  We allow it to be unset here so that an in-kernel
1136          * interface may be written as a thin layer around shm, optionally not
1137          * setting CLOEXEC.  For shm_open(2), O_CLOEXEC is set unconditionally
1138          * in sys_shm_open() to keep this implementation compliant.
1139          */
1140         error = falloc_caps(td, &fp, &fd, flags & O_CLOEXEC, fcaps);
1141         if (error)
1142                 return (error);
1143
1144         /* A SHM_ANON path pointer creates an anonymous object. */
1145         if (userpath == SHM_ANON) {
1146                 /* A read-only anonymous object is pointless. */
1147                 if ((flags & O_ACCMODE) == O_RDONLY) {
1148                         fdclose(td, fp, fd);
1149                         fdrop(fp, td);
1150                         return (EINVAL);
1151                 }
1152                 shmfd = shm_alloc(td->td_ucred, cmode, largepage);
1153                 shmfd->shm_seals = initial_seals;
1154                 shmfd->shm_flags = shmflags;
1155         } else {
1156                 error = shm_copyin_path(td, userpath, &path);
1157                 if (error != 0) {
1158                         fdclose(td, fp, fd);
1159                         fdrop(fp, td);
1160                         return (error);
1161                 }
1162
1163                 AUDIT_ARG_UPATH1_CANON(path);
1164                 fnv = fnv_32_str(path, FNV1_32_INIT);
1165                 sx_xlock(&shm_dict_lock);
1166                 shmfd = shm_lookup(path, fnv);
1167                 if (shmfd == NULL) {
1168                         /* Object does not yet exist, create it if requested. */
1169                         if (flags & O_CREAT) {
1170 #ifdef MAC
1171                                 error = mac_posixshm_check_create(td->td_ucred,
1172                                     path);
1173                                 if (error == 0) {
1174 #endif
1175                                         shmfd = shm_alloc(td->td_ucred, cmode,
1176                                             largepage);
1177                                         shmfd->shm_seals = initial_seals;
1178                                         shmfd->shm_flags = shmflags;
1179                                         shm_insert(path, fnv, shmfd);
1180 #ifdef MAC
1181                                 }
1182 #endif
1183                         } else {
1184                                 free(path, M_SHMFD);
1185                                 error = ENOENT;
1186                         }
1187                 } else {
1188                         rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
1189                             &shmfd->shm_mtx);
1190
1191                         /*
1192                          * kern_shm_open() likely shouldn't ever error out on
1193                          * trying to set a seal that already exists, unlike
1194                          * F_ADD_SEALS.  This would break terribly as
1195                          * shm_open(2) actually sets F_SEAL_SEAL to maintain
1196                          * historical behavior where the underlying file could
1197                          * not be sealed.
1198                          */
1199                         initial_seals &= ~shmfd->shm_seals;
1200
1201                         /*
1202                          * Object already exists, obtain a new
1203                          * reference if requested and permitted.
1204                          */
1205                         free(path, M_SHMFD);
1206
1207                         /*
1208                          * initial_seals can't set additional seals if we've
1209                          * already been set F_SEAL_SEAL.  If F_SEAL_SEAL is set,
1210                          * then we've already removed that one from
1211                          * initial_seals.  This is currently redundant as we
1212                          * only allow setting F_SEAL_SEAL at creation time, but
1213                          * it's cheap to check and decreases the effort required
1214                          * to allow additional seals.
1215                          */
1216                         if ((shmfd->shm_seals & F_SEAL_SEAL) != 0 &&
1217                             initial_seals != 0)
1218                                 error = EPERM;
1219                         else if ((flags & (O_CREAT | O_EXCL)) ==
1220                             (O_CREAT | O_EXCL))
1221                                 error = EEXIST;
1222                         else if (shmflags != 0 && shmflags != shmfd->shm_flags)
1223                                 error = EINVAL;
1224                         else {
1225 #ifdef MAC
1226                                 error = mac_posixshm_check_open(td->td_ucred,
1227                                     shmfd, FFLAGS(flags & O_ACCMODE));
1228                                 if (error == 0)
1229 #endif
1230                                 error = shm_access(shmfd, td->td_ucred,
1231                                     FFLAGS(flags & O_ACCMODE));
1232                         }
1233
1234                         /*
1235                          * Truncate the file back to zero length if
1236                          * O_TRUNC was specified and the object was
1237                          * opened with read/write.
1238                          */
1239                         if (error == 0 &&
1240                             (flags & (O_ACCMODE | O_TRUNC)) ==
1241                             (O_RDWR | O_TRUNC)) {
1242                                 VM_OBJECT_WLOCK(shmfd->shm_object);
1243 #ifdef MAC
1244                                 error = mac_posixshm_check_truncate(
1245                                         td->td_ucred, fp->f_cred, shmfd);
1246                                 if (error == 0)
1247 #endif
1248                                         error = shm_dotruncate_locked(shmfd, 0,
1249                                             rl_cookie);
1250                                 VM_OBJECT_WUNLOCK(shmfd->shm_object);
1251                         }
1252                         if (error == 0) {
1253                                 /*
1254                                  * Currently we only allow F_SEAL_SEAL to be
1255                                  * set initially.  As noted above, this would
1256                                  * need to be reworked should that change.
1257                                  */
1258                                 shmfd->shm_seals |= initial_seals;
1259                                 shm_hold(shmfd);
1260                         }
1261                         rangelock_unlock(&shmfd->shm_rl, rl_cookie,
1262                             &shmfd->shm_mtx);
1263                 }
1264                 sx_xunlock(&shm_dict_lock);
1265
1266                 if (error) {
1267                         fdclose(td, fp, fd);
1268                         fdrop(fp, td);
1269                         return (error);
1270                 }
1271         }
1272
1273         finit(fp, FFLAGS(flags & O_ACCMODE), DTYPE_SHM, shmfd, &shm_ops);
1274
1275         td->td_retval[0] = fd;
1276         fdrop(fp, td);
1277
1278         return (0);
1279 }
1280
1281 /* System calls. */
1282 #ifdef COMPAT_FREEBSD12
1283 int
1284 freebsd12_shm_open(struct thread *td, struct freebsd12_shm_open_args *uap)
1285 {
1286
1287         return (kern_shm_open(td, uap->path, uap->flags | O_CLOEXEC,
1288             uap->mode, NULL));
1289 }
1290 #endif
1291
1292 int
1293 sys_shm_unlink(struct thread *td, struct shm_unlink_args *uap)
1294 {
1295         char *path;
1296         Fnv32_t fnv;
1297         int error;
1298
1299         error = shm_copyin_path(td, uap->path, &path);
1300         if (error != 0)
1301                 return (error);
1302
1303         AUDIT_ARG_UPATH1_CANON(path);
1304         fnv = fnv_32_str(path, FNV1_32_INIT);
1305         sx_xlock(&shm_dict_lock);
1306         error = shm_remove(path, fnv, td->td_ucred);
1307         sx_xunlock(&shm_dict_lock);
1308         free(path, M_SHMFD);
1309
1310         return (error);
1311 }
1312
1313 int
1314 sys_shm_rename(struct thread *td, struct shm_rename_args *uap)
1315 {
1316         char *path_from = NULL, *path_to = NULL;
1317         Fnv32_t fnv_from, fnv_to;
1318         struct shmfd *fd_from;
1319         struct shmfd *fd_to;
1320         int error;
1321         int flags;
1322
1323         flags = uap->flags;
1324         AUDIT_ARG_FFLAGS(flags);
1325
1326         /*
1327          * Make sure the user passed only valid flags.
1328          * If you add a new flag, please add a new term here.
1329          */
1330         if ((flags & ~(
1331             SHM_RENAME_NOREPLACE |
1332             SHM_RENAME_EXCHANGE
1333             )) != 0) {
1334                 error = EINVAL;
1335                 goto out;
1336         }
1337
1338         /*
1339          * EXCHANGE and NOREPLACE don't quite make sense together. Let's
1340          * force the user to choose one or the other.
1341          */
1342         if ((flags & SHM_RENAME_NOREPLACE) != 0 &&
1343             (flags & SHM_RENAME_EXCHANGE) != 0) {
1344                 error = EINVAL;
1345                 goto out;
1346         }
1347
1348         /* Renaming to or from anonymous makes no sense */
1349         if (uap->path_from == SHM_ANON || uap->path_to == SHM_ANON) {
1350                 error = EINVAL;
1351                 goto out;
1352         }
1353
1354         error = shm_copyin_path(td, uap->path_from, &path_from);
1355         if (error != 0)
1356                 goto out;
1357
1358         error = shm_copyin_path(td, uap->path_to, &path_to);
1359         if (error != 0)
1360                 goto out;
1361
1362         AUDIT_ARG_UPATH1_CANON(path_from);
1363         AUDIT_ARG_UPATH2_CANON(path_to);
1364
1365         /* Rename with from/to equal is a no-op */
1366         if (strcmp(path_from, path_to) == 0)
1367                 goto out;
1368
1369         fnv_from = fnv_32_str(path_from, FNV1_32_INIT);
1370         fnv_to = fnv_32_str(path_to, FNV1_32_INIT);
1371
1372         sx_xlock(&shm_dict_lock);
1373
1374         fd_from = shm_lookup(path_from, fnv_from);
1375         if (fd_from == NULL) {
1376                 error = ENOENT;
1377                 goto out_locked;
1378         }
1379
1380         fd_to = shm_lookup(path_to, fnv_to);
1381         if ((flags & SHM_RENAME_NOREPLACE) != 0 && fd_to != NULL) {
1382                 error = EEXIST;
1383                 goto out_locked;
1384         }
1385
1386         /*
1387          * Unconditionally prevents shm_remove from invalidating the 'from'
1388          * shm's state.
1389          */
1390         shm_hold(fd_from);
1391         error = shm_remove(path_from, fnv_from, td->td_ucred);
1392
1393         /*
1394          * One of my assumptions failed if ENOENT (e.g. locking didn't
1395          * protect us)
1396          */
1397         KASSERT(error != ENOENT, ("Our shm disappeared during shm_rename: %s",
1398             path_from));
1399         if (error != 0) {
1400                 shm_drop(fd_from);
1401                 goto out_locked;
1402         }
1403
1404         /*
1405          * If we are exchanging, we need to ensure the shm_remove below
1406          * doesn't invalidate the dest shm's state.
1407          */
1408         if ((flags & SHM_RENAME_EXCHANGE) != 0 && fd_to != NULL)
1409                 shm_hold(fd_to);
1410
1411         /*
1412          * NOTE: if path_to is not already in the hash, c'est la vie;
1413          * it simply means we have nothing already at path_to to unlink.
1414          * That is the ENOENT case.
1415          *
1416          * If we somehow don't have access to unlink this guy, but
1417          * did for the shm at path_from, then relink the shm to path_from
1418          * and abort with EACCES.
1419          *
1420          * All other errors: that is weird; let's relink and abort the
1421          * operation.
1422          */
1423         error = shm_remove(path_to, fnv_to, td->td_ucred);
1424         if (error != 0 && error != ENOENT) {
1425                 shm_insert(path_from, fnv_from, fd_from);
1426                 shm_drop(fd_from);
1427                 /* Don't free path_from now, since the hash references it */
1428                 path_from = NULL;
1429                 goto out_locked;
1430         }
1431
1432         error = 0;
1433
1434         shm_insert(path_to, fnv_to, fd_from);
1435
1436         /* Don't free path_to now, since the hash references it */
1437         path_to = NULL;
1438
1439         /* We kept a ref when we removed, and incremented again in insert */
1440         shm_drop(fd_from);
1441         KASSERT(fd_from->shm_refs > 0, ("Expected >0 refs; got: %d\n",
1442             fd_from->shm_refs));
1443
1444         if ((flags & SHM_RENAME_EXCHANGE) != 0 && fd_to != NULL) {
1445                 shm_insert(path_from, fnv_from, fd_to);
1446                 path_from = NULL;
1447                 shm_drop(fd_to);
1448                 KASSERT(fd_to->shm_refs > 0, ("Expected >0 refs; got: %d\n",
1449                     fd_to->shm_refs));
1450         }
1451
1452 out_locked:
1453         sx_xunlock(&shm_dict_lock);
1454
1455 out:
1456         free(path_from, M_SHMFD);
1457         free(path_to, M_SHMFD);
1458         return (error);
1459 }
1460
1461 static int
1462 shm_mmap_large(struct shmfd *shmfd, vm_map_t map, vm_offset_t *addr,
1463     vm_size_t size, vm_prot_t prot, vm_prot_t max_prot, int flags,
1464     vm_ooffset_t foff, struct thread *td)
1465 {
1466         struct vmspace *vms;
1467         vm_map_entry_t next_entry, prev_entry;
1468         vm_offset_t align, mask, maxaddr;
1469         int docow, error, rv, try;
1470         bool curmap;
1471
1472         if (shmfd->shm_lp_psind == 0)
1473                 return (EINVAL);
1474
1475         /* MAP_PRIVATE is disabled */
1476         if ((flags & ~(MAP_SHARED | MAP_FIXED | MAP_EXCL |
1477             MAP_NOCORE |
1478 #ifdef MAP_32BIT
1479             MAP_32BIT |
1480 #endif
1481             MAP_ALIGNMENT_MASK)) != 0)
1482                 return (EINVAL);
1483
1484         vms = td->td_proc->p_vmspace;
1485         curmap = map == &vms->vm_map;
1486         if (curmap) {
1487                 error = kern_mmap_racct_check(td, map, size);
1488                 if (error != 0)
1489                         return (error);
1490         }
1491
1492         docow = shmfd->shm_lp_psind << MAP_SPLIT_BOUNDARY_SHIFT;
1493         docow |= MAP_INHERIT_SHARE;
1494         if ((flags & MAP_NOCORE) != 0)
1495                 docow |= MAP_DISABLE_COREDUMP;
1496
1497         mask = pagesizes[shmfd->shm_lp_psind] - 1;
1498         if ((foff & mask) != 0)
1499                 return (EINVAL);
1500         maxaddr = vm_map_max(map);
1501 #ifdef MAP_32BIT
1502         if ((flags & MAP_32BIT) != 0 && maxaddr > MAP_32BIT_MAX_ADDR)
1503                 maxaddr = MAP_32BIT_MAX_ADDR;
1504 #endif
1505         if (size == 0 || (size & mask) != 0 ||
1506             (*addr != 0 && ((*addr & mask) != 0 ||
1507             *addr + size < *addr || *addr + size > maxaddr)))
1508                 return (EINVAL);
1509
1510         align = flags & MAP_ALIGNMENT_MASK;
1511         if (align == 0) {
1512                 align = pagesizes[shmfd->shm_lp_psind];
1513         } else if (align == MAP_ALIGNED_SUPER) {
1514                 if (shmfd->shm_lp_psind != 1)
1515                         return (EINVAL);
1516                 align = pagesizes[1];
1517         } else {
1518                 align >>= MAP_ALIGNMENT_SHIFT;
1519                 align = 1ULL << align;
1520                 /* Also handles overflow. */
1521                 if (align < pagesizes[shmfd->shm_lp_psind])
1522                         return (EINVAL);
1523         }
1524
1525         vm_map_lock(map);
1526         if ((flags & MAP_FIXED) == 0) {
1527                 try = 1;
1528                 if (curmap && (*addr == 0 ||
1529                     (*addr >= round_page((vm_offset_t)vms->vm_taddr) &&
1530                     *addr < round_page((vm_offset_t)vms->vm_daddr +
1531                     lim_max(td, RLIMIT_DATA))))) {
1532                         *addr = roundup2((vm_offset_t)vms->vm_daddr +
1533                             lim_max(td, RLIMIT_DATA),
1534                             pagesizes[shmfd->shm_lp_psind]);
1535                 }
1536 again:
1537                 rv = vm_map_find_aligned(map, addr, size, maxaddr, align);
1538                 if (rv != KERN_SUCCESS) {
1539                         if (try == 1) {
1540                                 try = 2;
1541                                 *addr = vm_map_min(map);
1542                                 if ((*addr & mask) != 0)
1543                                         *addr = (*addr + mask) & mask;
1544                                 goto again;
1545                         }
1546                         goto fail1;
1547                 }
1548         } else if ((flags & MAP_EXCL) == 0) {
1549                 rv = vm_map_delete(map, *addr, *addr + size);
1550                 if (rv != KERN_SUCCESS)
1551                         goto fail1;
1552         } else {
1553                 error = ENOSPC;
1554                 if (vm_map_lookup_entry(map, *addr, &prev_entry))
1555                         goto fail;
1556                 next_entry = vm_map_entry_succ(prev_entry);
1557                 if (next_entry->start < *addr + size)
1558                         goto fail;
1559         }
1560
1561         rv = vm_map_insert(map, shmfd->shm_object, foff, *addr, *addr + size,
1562             prot, max_prot, docow);
1563 fail1:
1564         error = vm_mmap_to_errno(rv);
1565 fail:
1566         vm_map_unlock(map);
1567         return (error);
1568 }
1569
1570 static int
1571 shm_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t objsize,
1572     vm_prot_t prot, vm_prot_t cap_maxprot, int flags,
1573     vm_ooffset_t foff, struct thread *td)
1574 {
1575         struct shmfd *shmfd;
1576         vm_prot_t maxprot;
1577         int error;
1578         bool writecnt;
1579         void *rl_cookie;
1580
1581         shmfd = fp->f_data;
1582         maxprot = VM_PROT_NONE;
1583
1584         rl_cookie = rangelock_rlock(&shmfd->shm_rl, 0, objsize,
1585             &shmfd->shm_mtx);
1586         /* FREAD should always be set. */
1587         if ((fp->f_flag & FREAD) != 0)
1588                 maxprot |= VM_PROT_EXECUTE | VM_PROT_READ;
1589
1590         /*
1591          * If FWRITE's set, we can allow VM_PROT_WRITE unless it's a shared
1592          * mapping with a write seal applied.  Private mappings are always
1593          * writeable.
1594          */
1595         if ((flags & MAP_SHARED) == 0) {
1596                 cap_maxprot |= VM_PROT_WRITE;
1597                 maxprot |= VM_PROT_WRITE;
1598                 writecnt = false;
1599         } else {
1600                 if ((fp->f_flag & FWRITE) != 0 &&
1601                     (shmfd->shm_seals & F_SEAL_WRITE) == 0)
1602                         maxprot |= VM_PROT_WRITE;
1603
1604                 /*
1605                  * Any mappings from a writable descriptor may be upgraded to
1606                  * VM_PROT_WRITE with mprotect(2), unless a write-seal was
1607                  * applied between the open and subsequent mmap(2).  We want to
1608                  * reject application of a write seal as long as any such
1609                  * mapping exists so that the seal cannot be trivially bypassed.
1610                  */
1611                 writecnt = (maxprot & VM_PROT_WRITE) != 0;
1612                 if (!writecnt && (prot & VM_PROT_WRITE) != 0) {
1613                         error = EACCES;
1614                         goto out;
1615                 }
1616         }
1617         maxprot &= cap_maxprot;
1618
1619         /* See comment in vn_mmap(). */
1620         if (
1621 #ifdef _LP64
1622             objsize > OFF_MAX ||
1623 #endif
1624             foff > OFF_MAX - objsize) {
1625                 error = EINVAL;
1626                 goto out;
1627         }
1628
1629 #ifdef MAC
1630         error = mac_posixshm_check_mmap(td->td_ucred, shmfd, prot, flags);
1631         if (error != 0)
1632                 goto out;
1633 #endif
1634
1635         mtx_lock(&shm_timestamp_lock);
1636         vfs_timestamp(&shmfd->shm_atime);
1637         mtx_unlock(&shm_timestamp_lock);
1638         vm_object_reference(shmfd->shm_object);
1639
1640         if (shm_largepage(shmfd)) {
1641                 writecnt = false;
1642                 error = shm_mmap_large(shmfd, map, addr, objsize, prot,
1643                     maxprot, flags, foff, td);
1644         } else {
1645                 if (writecnt) {
1646                         vm_pager_update_writecount(shmfd->shm_object, 0,
1647                             objsize);
1648                 }
1649                 error = vm_mmap_object(map, addr, objsize, prot, maxprot, flags,
1650                     shmfd->shm_object, foff, writecnt, td);
1651         }
1652         if (error != 0) {
1653                 if (writecnt)
1654                         vm_pager_release_writecount(shmfd->shm_object, 0,
1655                             objsize);
1656                 vm_object_deallocate(shmfd->shm_object);
1657         }
1658 out:
1659         rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
1660         return (error);
1661 }
1662
1663 static int
1664 shm_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
1665     struct thread *td)
1666 {
1667         struct shmfd *shmfd;
1668         int error;
1669
1670         error = 0;
1671         shmfd = fp->f_data;
1672         mtx_lock(&shm_timestamp_lock);
1673         /*
1674          * SUSv4 says that x bits of permission need not be affected.
1675          * Be consistent with our shm_open there.
1676          */
1677 #ifdef MAC
1678         error = mac_posixshm_check_setmode(active_cred, shmfd, mode);
1679         if (error != 0)
1680                 goto out;
1681 #endif
1682         error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid,
1683             VADMIN, active_cred);
1684         if (error != 0)
1685                 goto out;
1686         shmfd->shm_mode = mode & ACCESSPERMS;
1687 out:
1688         mtx_unlock(&shm_timestamp_lock);
1689         return (error);
1690 }
1691
1692 static int
1693 shm_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
1694     struct thread *td)
1695 {
1696         struct shmfd *shmfd;
1697         int error;
1698
1699         error = 0;
1700         shmfd = fp->f_data;
1701         mtx_lock(&shm_timestamp_lock);
1702 #ifdef MAC
1703         error = mac_posixshm_check_setowner(active_cred, shmfd, uid, gid);
1704         if (error != 0)
1705                 goto out;
1706 #endif
1707         if (uid == (uid_t)-1)
1708                 uid = shmfd->shm_uid;
1709         if (gid == (gid_t)-1)
1710                  gid = shmfd->shm_gid;
1711         if (((uid != shmfd->shm_uid && uid != active_cred->cr_uid) ||
1712             (gid != shmfd->shm_gid && !groupmember(gid, active_cred))) &&
1713             (error = priv_check_cred(active_cred, PRIV_VFS_CHOWN)))
1714                 goto out;
1715         shmfd->shm_uid = uid;
1716         shmfd->shm_gid = gid;
1717 out:
1718         mtx_unlock(&shm_timestamp_lock);
1719         return (error);
1720 }
1721
1722 /*
1723  * Helper routines to allow the backing object of a shared memory file
1724  * descriptor to be mapped in the kernel.
1725  */
1726 int
1727 shm_map(struct file *fp, size_t size, off_t offset, void **memp)
1728 {
1729         struct shmfd *shmfd;
1730         vm_offset_t kva, ofs;
1731         vm_object_t obj;
1732         int rv;
1733
1734         if (fp->f_type != DTYPE_SHM)
1735                 return (EINVAL);
1736         shmfd = fp->f_data;
1737         obj = shmfd->shm_object;
1738         VM_OBJECT_WLOCK(obj);
1739         /*
1740          * XXXRW: This validation is probably insufficient, and subject to
1741          * sign errors.  It should be fixed.
1742          */
1743         if (offset >= shmfd->shm_size ||
1744             offset + size > round_page(shmfd->shm_size)) {
1745                 VM_OBJECT_WUNLOCK(obj);
1746                 return (EINVAL);
1747         }
1748
1749         shmfd->shm_kmappings++;
1750         vm_object_reference_locked(obj);
1751         VM_OBJECT_WUNLOCK(obj);
1752
1753         /* Map the object into the kernel_map and wire it. */
1754         kva = vm_map_min(kernel_map);
1755         ofs = offset & PAGE_MASK;
1756         offset = trunc_page(offset);
1757         size = round_page(size + ofs);
1758         rv = vm_map_find(kernel_map, obj, offset, &kva, size, 0,
1759             VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE,
1760             VM_PROT_READ | VM_PROT_WRITE, 0);
1761         if (rv == KERN_SUCCESS) {
1762                 rv = vm_map_wire(kernel_map, kva, kva + size,
1763                     VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
1764                 if (rv == KERN_SUCCESS) {
1765                         *memp = (void *)(kva + ofs);
1766                         return (0);
1767                 }
1768                 vm_map_remove(kernel_map, kva, kva + size);
1769         } else
1770                 vm_object_deallocate(obj);
1771
1772         /* On failure, drop our mapping reference. */
1773         VM_OBJECT_WLOCK(obj);
1774         shmfd->shm_kmappings--;
1775         VM_OBJECT_WUNLOCK(obj);
1776
1777         return (vm_mmap_to_errno(rv));
1778 }
1779
1780 /*
1781  * We require the caller to unmap the entire entry.  This allows us to
1782  * safely decrement shm_kmappings when a mapping is removed.
1783  */
1784 int
1785 shm_unmap(struct file *fp, void *mem, size_t size)
1786 {
1787         struct shmfd *shmfd;
1788         vm_map_entry_t entry;
1789         vm_offset_t kva, ofs;
1790         vm_object_t obj;
1791         vm_pindex_t pindex;
1792         vm_prot_t prot;
1793         boolean_t wired;
1794         vm_map_t map;
1795         int rv;
1796
1797         if (fp->f_type != DTYPE_SHM)
1798                 return (EINVAL);
1799         shmfd = fp->f_data;
1800         kva = (vm_offset_t)mem;
1801         ofs = kva & PAGE_MASK;
1802         kva = trunc_page(kva);
1803         size = round_page(size + ofs);
1804         map = kernel_map;
1805         rv = vm_map_lookup(&map, kva, VM_PROT_READ | VM_PROT_WRITE, &entry,
1806             &obj, &pindex, &prot, &wired);
1807         if (rv != KERN_SUCCESS)
1808                 return (EINVAL);
1809         if (entry->start != kva || entry->end != kva + size) {
1810                 vm_map_lookup_done(map, entry);
1811                 return (EINVAL);
1812         }
1813         vm_map_lookup_done(map, entry);
1814         if (obj != shmfd->shm_object)
1815                 return (EINVAL);
1816         vm_map_remove(map, kva, kva + size);
1817         VM_OBJECT_WLOCK(obj);
1818         KASSERT(shmfd->shm_kmappings > 0, ("shm_unmap: object not mapped"));
1819         shmfd->shm_kmappings--;
1820         VM_OBJECT_WUNLOCK(obj);
1821         return (0);
1822 }
1823
1824 static int
1825 shm_fill_kinfo_locked(struct shmfd *shmfd, struct kinfo_file *kif, bool list)
1826 {
1827         const char *path, *pr_path;
1828         size_t pr_pathlen;
1829         bool visible;
1830
1831         sx_assert(&shm_dict_lock, SA_LOCKED);
1832         kif->kf_type = KF_TYPE_SHM;
1833         kif->kf_un.kf_file.kf_file_mode = S_IFREG | shmfd->shm_mode;
1834         kif->kf_un.kf_file.kf_file_size = shmfd->shm_size;
1835         if (shmfd->shm_path != NULL) {
1836                 if (shmfd->shm_path != NULL) {
1837                         path = shmfd->shm_path;
1838                         pr_path = curthread->td_ucred->cr_prison->pr_path;
1839                         if (strcmp(pr_path, "/") != 0) {
1840                                 /* Return the jail-rooted pathname. */
1841                                 pr_pathlen = strlen(pr_path);
1842                                 visible = strncmp(path, pr_path, pr_pathlen)
1843                                     == 0 && path[pr_pathlen] == '/';
1844                                 if (list && !visible)
1845                                         return (EPERM);
1846                                 if (visible)
1847                                         path += pr_pathlen;
1848                         }
1849                         strlcpy(kif->kf_path, path, sizeof(kif->kf_path));
1850                 }
1851         }
1852         return (0);
1853 }
1854
1855 static int
1856 shm_fill_kinfo(struct file *fp, struct kinfo_file *kif,
1857     struct filedesc *fdp __unused)
1858 {
1859         int res;
1860
1861         sx_slock(&shm_dict_lock);
1862         res = shm_fill_kinfo_locked(fp->f_data, kif, false);
1863         sx_sunlock(&shm_dict_lock);
1864         return (res);
1865 }
1866
1867 static int
1868 shm_add_seals(struct file *fp, int seals)
1869 {
1870         struct shmfd *shmfd;
1871         void *rl_cookie;
1872         vm_ooffset_t writemappings;
1873         int error, nseals;
1874
1875         error = 0;
1876         shmfd = fp->f_data;
1877         rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
1878             &shmfd->shm_mtx);
1879
1880         /* Even already-set seals should result in EPERM. */
1881         if ((shmfd->shm_seals & F_SEAL_SEAL) != 0) {
1882                 error = EPERM;
1883                 goto out;
1884         }
1885         nseals = seals & ~shmfd->shm_seals;
1886         if ((nseals & F_SEAL_WRITE) != 0) {
1887                 if (shm_largepage(shmfd)) {
1888                         error = ENOTSUP;
1889                         goto out;
1890                 }
1891
1892                 /*
1893                  * The rangelock above prevents writable mappings from being
1894                  * added after we've started applying seals.  The RLOCK here
1895                  * is to avoid torn reads on ILP32 arches as unmapping/reducing
1896                  * writemappings will be done without a rangelock.
1897                  */
1898                 VM_OBJECT_RLOCK(shmfd->shm_object);
1899                 writemappings = shmfd->shm_object->un_pager.swp.writemappings;
1900                 VM_OBJECT_RUNLOCK(shmfd->shm_object);
1901                 /* kmappings are also writable */
1902                 if (writemappings > 0) {
1903                         error = EBUSY;
1904                         goto out;
1905                 }
1906         }
1907         shmfd->shm_seals |= nseals;
1908 out:
1909         rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
1910         return (error);
1911 }
1912
1913 static int
1914 shm_get_seals(struct file *fp, int *seals)
1915 {
1916         struct shmfd *shmfd;
1917
1918         shmfd = fp->f_data;
1919         *seals = shmfd->shm_seals;
1920         return (0);
1921 }
1922
1923 static int
1924 shm_deallocate(struct shmfd *shmfd, off_t *offset, off_t *length, int flags)
1925 {
1926         vm_object_t object;
1927         vm_pindex_t pistart, pi, piend;
1928         vm_ooffset_t off, len;
1929         int startofs, endofs, end;
1930         int error;
1931
1932         off = *offset;
1933         len = *length;
1934         KASSERT(off + len <= (vm_ooffset_t)OFF_MAX, ("off + len overflows"));
1935         if (off + len > shmfd->shm_size)
1936                 len = shmfd->shm_size - off;
1937         object = shmfd->shm_object;
1938         startofs = off & PAGE_MASK;
1939         endofs = (off + len) & PAGE_MASK;
1940         pistart = OFF_TO_IDX(off);
1941         piend = OFF_TO_IDX(off + len);
1942         pi = OFF_TO_IDX(off + PAGE_MASK);
1943         error = 0;
1944
1945         /* Handle the case when offset is on or beyond shm size. */
1946         if ((off_t)len <= 0) {
1947                 *length = 0;
1948                 return (0);
1949         }
1950
1951         VM_OBJECT_WLOCK(object);
1952
1953         if (startofs != 0) {
1954                 end = pistart != piend ? PAGE_SIZE : endofs;
1955                 error = shm_partial_page_invalidate(object, pistart, startofs,
1956                     end);
1957                 if (error)
1958                         goto out;
1959                 off += end - startofs;
1960                 len -= end - startofs;
1961         }
1962
1963         if (pi < piend) {
1964                 vm_object_page_remove(object, pi, piend, 0);
1965                 off += IDX_TO_OFF(piend - pi);
1966                 len -= IDX_TO_OFF(piend - pi);
1967         }
1968
1969         if (endofs != 0 && pistart != piend) {
1970                 error = shm_partial_page_invalidate(object, piend, 0, endofs);
1971                 if (error)
1972                         goto out;
1973                 off += endofs;
1974                 len -= endofs;
1975         }
1976
1977 out:
1978         VM_OBJECT_WUNLOCK(shmfd->shm_object);
1979         *offset = off;
1980         *length = len;
1981         return (error);
1982 }
1983
1984 static int
1985 shm_fspacectl(struct file *fp, int cmd, off_t *offset, off_t *length, int flags,
1986     struct ucred *active_cred, struct thread *td)
1987 {
1988         void *rl_cookie;
1989         struct shmfd *shmfd;
1990         off_t off, len;
1991         int error;
1992
1993         KASSERT(cmd == SPACECTL_DEALLOC, ("shm_fspacectl: Invalid cmd"));
1994         KASSERT((flags & ~SPACECTL_F_SUPPORTED) == 0,
1995             ("shm_fspacectl: non-zero flags"));
1996         KASSERT(*offset >= 0 && *length > 0 && *length <= OFF_MAX - *offset,
1997             ("shm_fspacectl: offset/length overflow or underflow"));
1998         error = EINVAL;
1999         shmfd = fp->f_data;
2000         off = *offset;
2001         len = *length;
2002
2003         rl_cookie = rangelock_wlock(&shmfd->shm_rl, off, off + len,
2004             &shmfd->shm_mtx);
2005         switch (cmd) {
2006         case SPACECTL_DEALLOC:
2007                 if ((shmfd->shm_seals & F_SEAL_WRITE) != 0) {
2008                         error = EPERM;
2009                         break;
2010                 }
2011                 error = shm_deallocate(shmfd, &off, &len, flags);
2012                 *offset = off;
2013                 *length = len;
2014                 break;
2015         default:
2016                 __assert_unreachable();
2017         }
2018         rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
2019         return (error);
2020 }
2021
2022
2023 static int
2024 shm_fallocate(struct file *fp, off_t offset, off_t len, struct thread *td)
2025 {
2026         void *rl_cookie;
2027         struct shmfd *shmfd;
2028         size_t size;
2029         int error;
2030
2031         /* This assumes that the caller already checked for overflow. */
2032         error = 0;
2033         shmfd = fp->f_data;
2034         size = offset + len;
2035
2036         /*
2037          * Just grab the rangelock for the range that we may be attempting to
2038          * grow, rather than blocking read/write for regions we won't be
2039          * touching while this (potential) resize is in progress.  Other
2040          * attempts to resize the shmfd will have to take a write lock from 0 to
2041          * OFF_MAX, so this being potentially beyond the current usable range of
2042          * the shmfd is not necessarily a concern.  If other mechanisms are
2043          * added to grow a shmfd, this may need to be re-evaluated.
2044          */
2045         rl_cookie = rangelock_wlock(&shmfd->shm_rl, offset, size,
2046             &shmfd->shm_mtx);
2047         if (size > shmfd->shm_size)
2048                 error = shm_dotruncate_cookie(shmfd, size, rl_cookie);
2049         rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
2050         /* Translate to posix_fallocate(2) return value as needed. */
2051         if (error == ENOMEM)
2052                 error = ENOSPC;
2053         return (error);
2054 }
2055
2056 static int
2057 sysctl_posix_shm_list(SYSCTL_HANDLER_ARGS)
2058 {
2059         struct shm_mapping *shmm;
2060         struct sbuf sb;
2061         struct kinfo_file kif;
2062         u_long i;
2063         int error, error2;
2064
2065         sbuf_new_for_sysctl(&sb, NULL, sizeof(struct kinfo_file) * 5, req);
2066         sbuf_clear_flags(&sb, SBUF_INCLUDENUL);
2067         error = 0;
2068         sx_slock(&shm_dict_lock);
2069         for (i = 0; i < shm_hash + 1; i++) {
2070                 LIST_FOREACH(shmm, &shm_dictionary[i], sm_link) {
2071                         error = shm_fill_kinfo_locked(shmm->sm_shmfd,
2072                             &kif, true);
2073                         if (error == EPERM) {
2074                                 error = 0;
2075                                 continue;
2076                         }
2077                         if (error != 0)
2078                                 break;
2079                         pack_kinfo(&kif);
2080                         error = sbuf_bcat(&sb, &kif, kif.kf_structsize) == 0 ?
2081                             0 : ENOMEM;
2082                         if (error != 0)
2083                                 break;
2084                 }
2085         }
2086         sx_sunlock(&shm_dict_lock);
2087         error2 = sbuf_finish(&sb);
2088         sbuf_delete(&sb);
2089         return (error != 0 ? error : error2);
2090 }
2091
2092 SYSCTL_PROC(_kern_ipc, OID_AUTO, posix_shm_list,
2093     CTLFLAG_RD | CTLFLAG_PRISON | CTLFLAG_MPSAFE | CTLTYPE_OPAQUE,
2094     NULL, 0, sysctl_posix_shm_list, "",
2095     "POSIX SHM list");
2096
2097 int
2098 kern_shm_open(struct thread *td, const char *path, int flags, mode_t mode,
2099     struct filecaps *caps)
2100 {
2101
2102         return (kern_shm_open2(td, path, flags, mode, 0, caps, NULL));
2103 }
2104
2105 /*
2106  * This version of the shm_open() interface leaves CLOEXEC behavior up to the
2107  * caller, and libc will enforce it for the traditional shm_open() call.  This
2108  * allows other consumers, like memfd_create(), to opt-in for CLOEXEC.  This
2109  * interface also includes a 'name' argument that is currently unused, but could
2110  * potentially be exported later via some interface for debugging purposes.
2111  * From the kernel's perspective, it is optional.  Individual consumers like
2112  * memfd_create() may require it in order to be compatible with other systems
2113  * implementing the same function.
2114  */
2115 int
2116 sys_shm_open2(struct thread *td, struct shm_open2_args *uap)
2117 {
2118
2119         return (kern_shm_open2(td, uap->path, uap->flags, uap->mode,
2120             uap->shmflags, NULL, uap->name));
2121 }