]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/kern/uipc_shm.c
Merge llvm, clang, compiler-rt, libc++, libunwind, lld, lldb and openmp
[FreeBSD/FreeBSD.git] / sys / kern / uipc_shm.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2006, 2011, 2016-2017 Robert N. M. Watson
5  * All rights reserved.
6  *
7  * Portions of this software were developed by BAE Systems, the University of
8  * Cambridge Computer Laboratory, and Memorial University under DARPA/AFRL
9  * contract FA8650-15-C-7558 ("CADETS"), as part of the DARPA Transparent
10  * Computing (TC) research program.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  */
33
34 /*
35  * Support for shared swap-backed anonymous memory objects via
36  * shm_open(2), shm_rename(2), and shm_unlink(2).
37  * While most of the implementation is here, vm_mmap.c contains
38  * mapping logic changes.
39  *
40  * posixshmcontrol(1) allows users to inspect the state of the memory
41  * objects.  Per-uid swap resource limit controls total amount of
42  * memory that user can consume for anonymous objects, including
43  * shared.
44  */
45
46 #include <sys/cdefs.h>
47 __FBSDID("$FreeBSD$");
48
49 #include "opt_capsicum.h"
50 #include "opt_ktrace.h"
51
52 #include <sys/param.h>
53 #include <sys/capsicum.h>
54 #include <sys/conf.h>
55 #include <sys/fcntl.h>
56 #include <sys/file.h>
57 #include <sys/filedesc.h>
58 #include <sys/filio.h>
59 #include <sys/fnv_hash.h>
60 #include <sys/kernel.h>
61 #include <sys/limits.h>
62 #include <sys/uio.h>
63 #include <sys/signal.h>
64 #include <sys/jail.h>
65 #include <sys/ktrace.h>
66 #include <sys/lock.h>
67 #include <sys/malloc.h>
68 #include <sys/mman.h>
69 #include <sys/mutex.h>
70 #include <sys/priv.h>
71 #include <sys/proc.h>
72 #include <sys/refcount.h>
73 #include <sys/resourcevar.h>
74 #include <sys/rwlock.h>
75 #include <sys/sbuf.h>
76 #include <sys/stat.h>
77 #include <sys/syscallsubr.h>
78 #include <sys/sysctl.h>
79 #include <sys/sysproto.h>
80 #include <sys/systm.h>
81 #include <sys/sx.h>
82 #include <sys/time.h>
83 #include <sys/vnode.h>
84 #include <sys/unistd.h>
85 #include <sys/user.h>
86
87 #include <security/audit/audit.h>
88 #include <security/mac/mac_framework.h>
89
90 #include <vm/vm.h>
91 #include <vm/vm_param.h>
92 #include <vm/pmap.h>
93 #include <vm/vm_extern.h>
94 #include <vm/vm_map.h>
95 #include <vm/vm_kern.h>
96 #include <vm/vm_object.h>
97 #include <vm/vm_page.h>
98 #include <vm/vm_pageout.h>
99 #include <vm/vm_pager.h>
100 #include <vm/swap_pager.h>
101
102 struct shm_mapping {
103         char            *sm_path;
104         Fnv32_t         sm_fnv;
105         struct shmfd    *sm_shmfd;
106         LIST_ENTRY(shm_mapping) sm_link;
107 };
108
109 static MALLOC_DEFINE(M_SHMFD, "shmfd", "shared memory file descriptor");
110 static LIST_HEAD(, shm_mapping) *shm_dictionary;
111 static struct sx shm_dict_lock;
112 static struct mtx shm_timestamp_lock;
113 static u_long shm_hash;
114 static struct unrhdr64 shm_ino_unr;
115 static dev_t shm_dev_ino;
116
117 #define SHM_HASH(fnv)   (&shm_dictionary[(fnv) & shm_hash])
118
119 static void     shm_init(void *arg);
120 static void     shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd);
121 static struct shmfd *shm_lookup(char *path, Fnv32_t fnv);
122 static int      shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred);
123 static int      shm_dotruncate_locked(struct shmfd *shmfd, off_t length,
124     void *rl_cookie);
125 static int      shm_copyin_path(struct thread *td, const char *userpath_in,
126     char **path_out);
127
128 static fo_rdwr_t        shm_read;
129 static fo_rdwr_t        shm_write;
130 static fo_truncate_t    shm_truncate;
131 static fo_ioctl_t       shm_ioctl;
132 static fo_stat_t        shm_stat;
133 static fo_close_t       shm_close;
134 static fo_chmod_t       shm_chmod;
135 static fo_chown_t       shm_chown;
136 static fo_seek_t        shm_seek;
137 static fo_fill_kinfo_t  shm_fill_kinfo;
138 static fo_mmap_t        shm_mmap;
139 static fo_get_seals_t   shm_get_seals;
140 static fo_add_seals_t   shm_add_seals;
141 static fo_fallocate_t   shm_fallocate;
142
143 /* File descriptor operations. */
144 struct fileops shm_ops = {
145         .fo_read = shm_read,
146         .fo_write = shm_write,
147         .fo_truncate = shm_truncate,
148         .fo_ioctl = shm_ioctl,
149         .fo_poll = invfo_poll,
150         .fo_kqfilter = invfo_kqfilter,
151         .fo_stat = shm_stat,
152         .fo_close = shm_close,
153         .fo_chmod = shm_chmod,
154         .fo_chown = shm_chown,
155         .fo_sendfile = vn_sendfile,
156         .fo_seek = shm_seek,
157         .fo_fill_kinfo = shm_fill_kinfo,
158         .fo_mmap = shm_mmap,
159         .fo_get_seals = shm_get_seals,
160         .fo_add_seals = shm_add_seals,
161         .fo_fallocate = shm_fallocate,
162         .fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE
163 };
164
165 FEATURE(posix_shm, "POSIX shared memory");
166
167 static int
168 uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio)
169 {
170         vm_page_t m;
171         vm_pindex_t idx;
172         size_t tlen;
173         int error, offset, rv;
174
175         idx = OFF_TO_IDX(uio->uio_offset);
176         offset = uio->uio_offset & PAGE_MASK;
177         tlen = MIN(PAGE_SIZE - offset, len);
178
179         rv = vm_page_grab_valid_unlocked(&m, obj, idx,
180             VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY | VM_ALLOC_NOCREAT);
181         if (rv == VM_PAGER_OK)
182                 goto found;
183
184         /*
185          * Read I/O without either a corresponding resident page or swap
186          * page: use zero_region.  This is intended to avoid instantiating
187          * pages on read from a sparse region.
188          */
189         VM_OBJECT_WLOCK(obj);
190         m = vm_page_lookup(obj, idx);
191         if (uio->uio_rw == UIO_READ && m == NULL &&
192             !vm_pager_has_page(obj, idx, NULL, NULL)) {
193                 VM_OBJECT_WUNLOCK(obj);
194                 return (uiomove(__DECONST(void *, zero_region), tlen, uio));
195         }
196
197         /*
198          * Although the tmpfs vnode lock is held here, it is
199          * nonetheless safe to sleep waiting for a free page.  The
200          * pageout daemon does not need to acquire the tmpfs vnode
201          * lock to page out tobj's pages because tobj is a OBJT_SWAP
202          * type object.
203          */
204         rv = vm_page_grab_valid(&m, obj, idx,
205             VM_ALLOC_NORMAL | VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY);
206         if (rv != VM_PAGER_OK) {
207                 VM_OBJECT_WUNLOCK(obj);
208                 printf("uiomove_object: vm_obj %p idx %jd pager error %d\n",
209                     obj, idx, rv);
210                 return (EIO);
211         }
212         VM_OBJECT_WUNLOCK(obj);
213
214 found:
215         error = uiomove_fromphys(&m, offset, tlen, uio);
216         if (uio->uio_rw == UIO_WRITE && error == 0)
217                 vm_page_set_dirty(m);
218         vm_page_activate(m);
219         vm_page_sunbusy(m);
220
221         return (error);
222 }
223
224 int
225 uiomove_object(vm_object_t obj, off_t obj_size, struct uio *uio)
226 {
227         ssize_t resid;
228         size_t len;
229         int error;
230
231         error = 0;
232         while ((resid = uio->uio_resid) > 0) {
233                 if (obj_size <= uio->uio_offset)
234                         break;
235                 len = MIN(obj_size - uio->uio_offset, resid);
236                 if (len == 0)
237                         break;
238                 error = uiomove_object_page(obj, len, uio);
239                 if (error != 0 || resid == uio->uio_resid)
240                         break;
241         }
242         return (error);
243 }
244
245 static int
246 shm_seek(struct file *fp, off_t offset, int whence, struct thread *td)
247 {
248         struct shmfd *shmfd;
249         off_t foffset;
250         int error;
251
252         shmfd = fp->f_data;
253         foffset = foffset_lock(fp, 0);
254         error = 0;
255         switch (whence) {
256         case L_INCR:
257                 if (foffset < 0 ||
258                     (offset > 0 && foffset > OFF_MAX - offset)) {
259                         error = EOVERFLOW;
260                         break;
261                 }
262                 offset += foffset;
263                 break;
264         case L_XTND:
265                 if (offset > 0 && shmfd->shm_size > OFF_MAX - offset) {
266                         error = EOVERFLOW;
267                         break;
268                 }
269                 offset += shmfd->shm_size;
270                 break;
271         case L_SET:
272                 break;
273         default:
274                 error = EINVAL;
275         }
276         if (error == 0) {
277                 if (offset < 0 || offset > shmfd->shm_size)
278                         error = EINVAL;
279                 else
280                         td->td_uretoff.tdu_off = offset;
281         }
282         foffset_unlock(fp, offset, error != 0 ? FOF_NOUPDATE : 0);
283         return (error);
284 }
285
286 static int
287 shm_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
288     int flags, struct thread *td)
289 {
290         struct shmfd *shmfd;
291         void *rl_cookie;
292         int error;
293
294         shmfd = fp->f_data;
295 #ifdef MAC
296         error = mac_posixshm_check_read(active_cred, fp->f_cred, shmfd);
297         if (error)
298                 return (error);
299 #endif
300         foffset_lock_uio(fp, uio, flags);
301         rl_cookie = rangelock_rlock(&shmfd->shm_rl, uio->uio_offset,
302             uio->uio_offset + uio->uio_resid, &shmfd->shm_mtx);
303         error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio);
304         rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
305         foffset_unlock_uio(fp, uio, flags);
306         return (error);
307 }
308
309 static int
310 shm_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
311     int flags, struct thread *td)
312 {
313         struct shmfd *shmfd;
314         void *rl_cookie;
315         int error;
316         off_t size;
317
318         shmfd = fp->f_data;
319 #ifdef MAC
320         error = mac_posixshm_check_write(active_cred, fp->f_cred, shmfd);
321         if (error)
322                 return (error);
323 #endif
324         foffset_lock_uio(fp, uio, flags);
325         if (uio->uio_resid > OFF_MAX - uio->uio_offset) {
326                 /*
327                  * Overflow is only an error if we're supposed to expand on
328                  * write.  Otherwise, we'll just truncate the write to the
329                  * size of the file, which can only grow up to OFF_MAX.
330                  */
331                 if ((shmfd->shm_flags & SHM_GROW_ON_WRITE) != 0) {
332                         foffset_unlock_uio(fp, uio, flags);
333                         return (EFBIG);
334                 }
335
336                 size = shmfd->shm_size;
337         } else {
338                 size = uio->uio_offset + uio->uio_resid;
339         }
340         if ((flags & FOF_OFFSET) == 0) {
341                 rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
342                     &shmfd->shm_mtx);
343         } else {
344                 rl_cookie = rangelock_wlock(&shmfd->shm_rl, uio->uio_offset,
345                     size, &shmfd->shm_mtx);
346         }
347         if ((shmfd->shm_seals & F_SEAL_WRITE) != 0) {
348                 error = EPERM;
349         } else {
350                 error = 0;
351                 if ((shmfd->shm_flags & SHM_GROW_ON_WRITE) != 0 &&
352                     size > shmfd->shm_size) {
353                         VM_OBJECT_WLOCK(shmfd->shm_object);
354                         error = shm_dotruncate_locked(shmfd, size, rl_cookie);
355                         VM_OBJECT_WUNLOCK(shmfd->shm_object);
356                 }
357                 if (error == 0)
358                         error = uiomove_object(shmfd->shm_object,
359                             shmfd->shm_size, uio);
360         }
361         rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
362         foffset_unlock_uio(fp, uio, flags);
363         return (error);
364 }
365
366 static int
367 shm_truncate(struct file *fp, off_t length, struct ucred *active_cred,
368     struct thread *td)
369 {
370         struct shmfd *shmfd;
371 #ifdef MAC
372         int error;
373 #endif
374
375         shmfd = fp->f_data;
376 #ifdef MAC
377         error = mac_posixshm_check_truncate(active_cred, fp->f_cred, shmfd);
378         if (error)
379                 return (error);
380 #endif
381         return (shm_dotruncate(shmfd, length));
382 }
383
384 int
385 shm_ioctl(struct file *fp, u_long com, void *data, struct ucred *active_cred,
386     struct thread *td)
387 {
388
389         switch (com) {
390         case FIONBIO:
391         case FIOASYNC:
392                 /*
393                  * Allow fcntl(fd, F_SETFL, O_NONBLOCK) to work,
394                  * just like it would on an unlinked regular file
395                  */
396                 return (0);
397         default:
398                 return (ENOTTY);
399         }
400 }
401
402 static int
403 shm_stat(struct file *fp, struct stat *sb, struct ucred *active_cred,
404     struct thread *td)
405 {
406         struct shmfd *shmfd;
407 #ifdef MAC
408         int error;
409 #endif
410
411         shmfd = fp->f_data;
412
413 #ifdef MAC
414         error = mac_posixshm_check_stat(active_cred, fp->f_cred, shmfd);
415         if (error)
416                 return (error);
417 #endif
418         
419         /*
420          * Attempt to return sanish values for fstat() on a memory file
421          * descriptor.
422          */
423         bzero(sb, sizeof(*sb));
424         sb->st_blksize = PAGE_SIZE;
425         sb->st_size = shmfd->shm_size;
426         sb->st_blocks = howmany(sb->st_size, sb->st_blksize);
427         mtx_lock(&shm_timestamp_lock);
428         sb->st_atim = shmfd->shm_atime;
429         sb->st_ctim = shmfd->shm_ctime;
430         sb->st_mtim = shmfd->shm_mtime;
431         sb->st_birthtim = shmfd->shm_birthtime;
432         sb->st_mode = S_IFREG | shmfd->shm_mode;                /* XXX */
433         sb->st_uid = shmfd->shm_uid;
434         sb->st_gid = shmfd->shm_gid;
435         mtx_unlock(&shm_timestamp_lock);
436         sb->st_dev = shm_dev_ino;
437         sb->st_ino = shmfd->shm_ino;
438         sb->st_nlink = shmfd->shm_object->ref_count;
439
440         return (0);
441 }
442
443 static int
444 shm_close(struct file *fp, struct thread *td)
445 {
446         struct shmfd *shmfd;
447
448         shmfd = fp->f_data;
449         fp->f_data = NULL;
450         shm_drop(shmfd);
451
452         return (0);
453 }
454
455 static int
456 shm_copyin_path(struct thread *td, const char *userpath_in, char **path_out) {
457         int error;
458         char *path;
459         const char *pr_path;
460         size_t pr_pathlen;
461
462         path = malloc(MAXPATHLEN, M_SHMFD, M_WAITOK);
463         pr_path = td->td_ucred->cr_prison->pr_path;
464
465         /* Construct a full pathname for jailed callers. */
466         pr_pathlen = strcmp(pr_path, "/") ==
467             0 ? 0 : strlcpy(path, pr_path, MAXPATHLEN);
468         error = copyinstr(userpath_in, path + pr_pathlen,
469             MAXPATHLEN - pr_pathlen, NULL);
470         if (error != 0)
471                 goto out;
472
473 #ifdef KTRACE
474         if (KTRPOINT(curthread, KTR_NAMEI))
475                 ktrnamei(path);
476 #endif
477
478         /* Require paths to start with a '/' character. */
479         if (path[pr_pathlen] != '/') {
480                 error = EINVAL;
481                 goto out;
482         }
483
484         *path_out = path;
485
486 out:
487         if (error != 0)
488                 free(path, M_SHMFD);
489
490         return (error);
491 }
492
493 static int
494 shm_dotruncate_locked(struct shmfd *shmfd, off_t length, void *rl_cookie)
495 {
496         vm_object_t object;
497         vm_page_t m;
498         vm_pindex_t idx, nobjsize;
499         vm_ooffset_t delta;
500         int base, rv;
501
502         KASSERT(length >= 0, ("shm_dotruncate: length < 0"));
503         object = shmfd->shm_object;
504         VM_OBJECT_ASSERT_WLOCKED(object);
505         rangelock_cookie_assert(rl_cookie, RA_WLOCKED);
506         if (length == shmfd->shm_size)
507                 return (0);
508         nobjsize = OFF_TO_IDX(length + PAGE_MASK);
509
510         /* Are we shrinking?  If so, trim the end. */
511         if (length < shmfd->shm_size) {
512                 if ((shmfd->shm_seals & F_SEAL_SHRINK) != 0)
513                         return (EPERM);
514
515                 /*
516                  * Disallow any requests to shrink the size if this
517                  * object is mapped into the kernel.
518                  */
519                 if (shmfd->shm_kmappings > 0)
520                         return (EBUSY);
521
522                 /*
523                  * Zero the truncated part of the last page.
524                  */
525                 base = length & PAGE_MASK;
526                 if (base != 0) {
527                         idx = OFF_TO_IDX(length);
528 retry:
529                         m = vm_page_grab(object, idx, VM_ALLOC_NOCREAT);
530                         if (m != NULL) {
531                                 MPASS(vm_page_all_valid(m));
532                         } else if (vm_pager_has_page(object, idx, NULL, NULL)) {
533                                 m = vm_page_alloc(object, idx,
534                                     VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL);
535                                 if (m == NULL)
536                                         goto retry;
537                                 vm_object_pip_add(object, 1);
538                                 VM_OBJECT_WUNLOCK(object);
539                                 rv = vm_pager_get_pages(object, &m, 1, NULL,
540                                     NULL);
541                                 VM_OBJECT_WLOCK(object);
542                                 vm_object_pip_wakeup(object);
543                                 if (rv == VM_PAGER_OK) {
544                                         /*
545                                          * Since the page was not resident,
546                                          * and therefore not recently
547                                          * accessed, immediately enqueue it
548                                          * for asynchronous laundering.  The
549                                          * current operation is not regarded
550                                          * as an access.
551                                          */
552                                         vm_page_launder(m);
553                                 } else {
554                                         vm_page_free(m);
555                                         VM_OBJECT_WUNLOCK(object);
556                                         return (EIO);
557                                 }
558                         }
559                         if (m != NULL) {
560                                 pmap_zero_page_area(m, base, PAGE_SIZE - base);
561                                 KASSERT(vm_page_all_valid(m),
562                                     ("shm_dotruncate: page %p is invalid", m));
563                                 vm_page_set_dirty(m);
564                                 vm_page_xunbusy(m);
565                         }
566                 }
567                 delta = IDX_TO_OFF(object->size - nobjsize);
568
569                 if (nobjsize < object->size)
570                         vm_object_page_remove(object, nobjsize, object->size,
571                             0);
572
573                 /* Free the swap accounted for shm */
574                 swap_release_by_cred(delta, object->cred);
575                 object->charge -= delta;
576         } else {
577                 if ((shmfd->shm_seals & F_SEAL_GROW) != 0)
578                         return (EPERM);
579
580                 /* Try to reserve additional swap space. */
581                 delta = IDX_TO_OFF(nobjsize - object->size);
582                 if (!swap_reserve_by_cred(delta, object->cred))
583                         return (ENOMEM);
584                 object->charge += delta;
585         }
586         shmfd->shm_size = length;
587         mtx_lock(&shm_timestamp_lock);
588         vfs_timestamp(&shmfd->shm_ctime);
589         shmfd->shm_mtime = shmfd->shm_ctime;
590         mtx_unlock(&shm_timestamp_lock);
591         object->size = nobjsize;
592         return (0);
593 }
594
595 int
596 shm_dotruncate(struct shmfd *shmfd, off_t length)
597 {
598         void *rl_cookie;
599         int error;
600
601         rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
602             &shmfd->shm_mtx);
603         VM_OBJECT_WLOCK(shmfd->shm_object);
604         error = shm_dotruncate_locked(shmfd, length, rl_cookie);
605         VM_OBJECT_WUNLOCK(shmfd->shm_object);
606         rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
607         return (error);
608 }
609
610 /*
611  * shmfd object management including creation and reference counting
612  * routines.
613  */
614 struct shmfd *
615 shm_alloc(struct ucred *ucred, mode_t mode)
616 {
617         struct shmfd *shmfd;
618
619         shmfd = malloc(sizeof(*shmfd), M_SHMFD, M_WAITOK | M_ZERO);
620         shmfd->shm_size = 0;
621         shmfd->shm_uid = ucred->cr_uid;
622         shmfd->shm_gid = ucred->cr_gid;
623         shmfd->shm_mode = mode;
624         shmfd->shm_object = vm_pager_allocate(OBJT_SWAP, NULL,
625             shmfd->shm_size, VM_PROT_DEFAULT, 0, ucred);
626         KASSERT(shmfd->shm_object != NULL, ("shm_create: vm_pager_allocate"));
627         vfs_timestamp(&shmfd->shm_birthtime);
628         shmfd->shm_atime = shmfd->shm_mtime = shmfd->shm_ctime =
629             shmfd->shm_birthtime;
630         shmfd->shm_ino = alloc_unr64(&shm_ino_unr);
631         refcount_init(&shmfd->shm_refs, 1);
632         mtx_init(&shmfd->shm_mtx, "shmrl", NULL, MTX_DEF);
633         rangelock_init(&shmfd->shm_rl);
634 #ifdef MAC
635         mac_posixshm_init(shmfd);
636         mac_posixshm_create(ucred, shmfd);
637 #endif
638
639         return (shmfd);
640 }
641
642 struct shmfd *
643 shm_hold(struct shmfd *shmfd)
644 {
645
646         refcount_acquire(&shmfd->shm_refs);
647         return (shmfd);
648 }
649
650 void
651 shm_drop(struct shmfd *shmfd)
652 {
653
654         if (refcount_release(&shmfd->shm_refs)) {
655 #ifdef MAC
656                 mac_posixshm_destroy(shmfd);
657 #endif
658                 rangelock_destroy(&shmfd->shm_rl);
659                 mtx_destroy(&shmfd->shm_mtx);
660                 vm_object_deallocate(shmfd->shm_object);
661                 free(shmfd, M_SHMFD);
662         }
663 }
664
665 /*
666  * Determine if the credentials have sufficient permissions for a
667  * specified combination of FREAD and FWRITE.
668  */
669 int
670 shm_access(struct shmfd *shmfd, struct ucred *ucred, int flags)
671 {
672         accmode_t accmode;
673         int error;
674
675         accmode = 0;
676         if (flags & FREAD)
677                 accmode |= VREAD;
678         if (flags & FWRITE)
679                 accmode |= VWRITE;
680         mtx_lock(&shm_timestamp_lock);
681         error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid,
682             accmode, ucred, NULL);
683         mtx_unlock(&shm_timestamp_lock);
684         return (error);
685 }
686
687 /*
688  * Dictionary management.  We maintain an in-kernel dictionary to map
689  * paths to shmfd objects.  We use the FNV hash on the path to store
690  * the mappings in a hash table.
691  */
692 static void
693 shm_init(void *arg)
694 {
695
696         mtx_init(&shm_timestamp_lock, "shm timestamps", NULL, MTX_DEF);
697         sx_init(&shm_dict_lock, "shm dictionary");
698         shm_dictionary = hashinit(1024, M_SHMFD, &shm_hash);
699         new_unrhdr64(&shm_ino_unr, 1);
700         shm_dev_ino = devfs_alloc_cdp_inode();
701         KASSERT(shm_dev_ino > 0, ("shm dev inode not initialized"));
702 }
703 SYSINIT(shm_init, SI_SUB_SYSV_SHM, SI_ORDER_ANY, shm_init, NULL);
704
705 static struct shmfd *
706 shm_lookup(char *path, Fnv32_t fnv)
707 {
708         struct shm_mapping *map;
709
710         LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
711                 if (map->sm_fnv != fnv)
712                         continue;
713                 if (strcmp(map->sm_path, path) == 0)
714                         return (map->sm_shmfd);
715         }
716
717         return (NULL);
718 }
719
720 static void
721 shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd)
722 {
723         struct shm_mapping *map;
724
725         map = malloc(sizeof(struct shm_mapping), M_SHMFD, M_WAITOK);
726         map->sm_path = path;
727         map->sm_fnv = fnv;
728         map->sm_shmfd = shm_hold(shmfd);
729         shmfd->shm_path = path;
730         LIST_INSERT_HEAD(SHM_HASH(fnv), map, sm_link);
731 }
732
733 static int
734 shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred)
735 {
736         struct shm_mapping *map;
737         int error;
738
739         LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
740                 if (map->sm_fnv != fnv)
741                         continue;
742                 if (strcmp(map->sm_path, path) == 0) {
743 #ifdef MAC
744                         error = mac_posixshm_check_unlink(ucred, map->sm_shmfd);
745                         if (error)
746                                 return (error);
747 #endif
748                         error = shm_access(map->sm_shmfd, ucred,
749                             FREAD | FWRITE);
750                         if (error)
751                                 return (error);
752                         map->sm_shmfd->shm_path = NULL;
753                         LIST_REMOVE(map, sm_link);
754                         shm_drop(map->sm_shmfd);
755                         free(map->sm_path, M_SHMFD);
756                         free(map, M_SHMFD);
757                         return (0);
758                 }
759         }
760
761         return (ENOENT);
762 }
763
764 int
765 kern_shm_open2(struct thread *td, const char *userpath, int flags, mode_t mode,
766     int shmflags, struct filecaps *fcaps, const char *name __unused)
767 {
768         struct filedesc *fdp;
769         struct shmfd *shmfd;
770         struct file *fp;
771         char *path;
772         void *rl_cookie;
773         Fnv32_t fnv;
774         mode_t cmode;
775         int error, fd, initial_seals;
776
777         if ((shmflags & ~(SHM_ALLOW_SEALING | SHM_GROW_ON_WRITE)) != 0)
778                 return (EINVAL);
779
780         initial_seals = F_SEAL_SEAL;
781         if ((shmflags & SHM_ALLOW_SEALING) != 0)
782                 initial_seals &= ~F_SEAL_SEAL;
783
784 #ifdef CAPABILITY_MODE
785         /*
786          * shm_open(2) is only allowed for anonymous objects.
787          */
788         if (IN_CAPABILITY_MODE(td) && (userpath != SHM_ANON))
789                 return (ECAPMODE);
790 #endif
791
792         AUDIT_ARG_FFLAGS(flags);
793         AUDIT_ARG_MODE(mode);
794
795         if ((flags & O_ACCMODE) != O_RDONLY && (flags & O_ACCMODE) != O_RDWR)
796                 return (EINVAL);
797
798         if ((flags & ~(O_ACCMODE | O_CREAT | O_EXCL | O_TRUNC | O_CLOEXEC)) != 0)
799                 return (EINVAL);
800
801         /*
802          * Currently only F_SEAL_SEAL may be set when creating or opening shmfd.
803          * If the decision is made later to allow additional seals, care must be
804          * taken below to ensure that the seals are properly set if the shmfd
805          * already existed -- this currently assumes that only F_SEAL_SEAL can
806          * be set and doesn't take further precautions to ensure the validity of
807          * the seals being added with respect to current mappings.
808          */
809         if ((initial_seals & ~F_SEAL_SEAL) != 0)
810                 return (EINVAL);
811
812         fdp = td->td_proc->p_fd;
813         cmode = (mode & ~fdp->fd_cmask) & ACCESSPERMS;
814
815         /*
816          * shm_open(2) created shm should always have O_CLOEXEC set, as mandated
817          * by POSIX.  We allow it to be unset here so that an in-kernel
818          * interface may be written as a thin layer around shm, optionally not
819          * setting CLOEXEC.  For shm_open(2), O_CLOEXEC is set unconditionally
820          * in sys_shm_open() to keep this implementation compliant.
821          */
822         error = falloc_caps(td, &fp, &fd, flags & O_CLOEXEC, fcaps);
823         if (error)
824                 return (error);
825
826         /* A SHM_ANON path pointer creates an anonymous object. */
827         if (userpath == SHM_ANON) {
828                 /* A read-only anonymous object is pointless. */
829                 if ((flags & O_ACCMODE) == O_RDONLY) {
830                         fdclose(td, fp, fd);
831                         fdrop(fp, td);
832                         return (EINVAL);
833                 }
834                 shmfd = shm_alloc(td->td_ucred, cmode);
835                 shmfd->shm_seals = initial_seals;
836         } else {
837                 error = shm_copyin_path(td, userpath, &path);
838                 if (error != 0) {
839                         fdclose(td, fp, fd);
840                         fdrop(fp, td);
841                         return (error);
842                 }
843
844                 AUDIT_ARG_UPATH1_CANON(path);
845                 fnv = fnv_32_str(path, FNV1_32_INIT);
846                 sx_xlock(&shm_dict_lock);
847                 shmfd = shm_lookup(path, fnv);
848                 if (shmfd == NULL) {
849                         /* Object does not yet exist, create it if requested. */
850                         if (flags & O_CREAT) {
851 #ifdef MAC
852                                 error = mac_posixshm_check_create(td->td_ucred,
853                                     path);
854                                 if (error == 0) {
855 #endif
856                                         shmfd = shm_alloc(td->td_ucred, cmode);
857                                         shmfd->shm_seals = initial_seals;
858                                         shm_insert(path, fnv, shmfd);
859 #ifdef MAC
860                                 }
861 #endif
862                         } else {
863                                 free(path, M_SHMFD);
864                                 error = ENOENT;
865                         }
866                 } else {
867                         rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
868                             &shmfd->shm_mtx);
869
870                         /*
871                          * kern_shm_open() likely shouldn't ever error out on
872                          * trying to set a seal that already exists, unlike
873                          * F_ADD_SEALS.  This would break terribly as
874                          * shm_open(2) actually sets F_SEAL_SEAL to maintain
875                          * historical behavior where the underlying file could
876                          * not be sealed.
877                          */
878                         initial_seals &= ~shmfd->shm_seals;
879
880                         /*
881                          * Object already exists, obtain a new
882                          * reference if requested and permitted.
883                          */
884                         free(path, M_SHMFD);
885
886                         /*
887                          * initial_seals can't set additional seals if we've
888                          * already been set F_SEAL_SEAL.  If F_SEAL_SEAL is set,
889                          * then we've already removed that one from
890                          * initial_seals.  This is currently redundant as we
891                          * only allow setting F_SEAL_SEAL at creation time, but
892                          * it's cheap to check and decreases the effort required
893                          * to allow additional seals.
894                          */
895                         if ((shmfd->shm_seals & F_SEAL_SEAL) != 0 &&
896                             initial_seals != 0)
897                                 error = EPERM;
898                         else if ((flags & (O_CREAT | O_EXCL)) ==
899                             (O_CREAT | O_EXCL))
900                                 error = EEXIST;
901                         else {
902 #ifdef MAC
903                                 error = mac_posixshm_check_open(td->td_ucred,
904                                     shmfd, FFLAGS(flags & O_ACCMODE));
905                                 if (error == 0)
906 #endif
907                                 error = shm_access(shmfd, td->td_ucred,
908                                     FFLAGS(flags & O_ACCMODE));
909                         }
910
911                         /*
912                          * Truncate the file back to zero length if
913                          * O_TRUNC was specified and the object was
914                          * opened with read/write.
915                          */
916                         if (error == 0 &&
917                             (flags & (O_ACCMODE | O_TRUNC)) ==
918                             (O_RDWR | O_TRUNC)) {
919                                 VM_OBJECT_WLOCK(shmfd->shm_object);
920 #ifdef MAC
921                                 error = mac_posixshm_check_truncate(
922                                         td->td_ucred, fp->f_cred, shmfd);
923                                 if (error == 0)
924 #endif
925                                         error = shm_dotruncate_locked(shmfd, 0,
926                                             rl_cookie);
927                                 VM_OBJECT_WUNLOCK(shmfd->shm_object);
928                         }
929                         if (error == 0) {
930                                 /*
931                                  * Currently we only allow F_SEAL_SEAL to be
932                                  * set initially.  As noted above, this would
933                                  * need to be reworked should that change.
934                                  */
935                                 shmfd->shm_seals |= initial_seals;
936                                 shm_hold(shmfd);
937                         }
938                         rangelock_unlock(&shmfd->shm_rl, rl_cookie,
939                             &shmfd->shm_mtx);
940                 }
941                 sx_xunlock(&shm_dict_lock);
942
943                 if (error) {
944                         fdclose(td, fp, fd);
945                         fdrop(fp, td);
946                         return (error);
947                 }
948         }
949
950         shmfd->shm_flags = shmflags;
951         finit(fp, FFLAGS(flags & O_ACCMODE), DTYPE_SHM, shmfd, &shm_ops);
952
953         td->td_retval[0] = fd;
954         fdrop(fp, td);
955
956         return (0);
957 }
958
959 /* System calls. */
960 #ifdef COMPAT_FREEBSD12
961 int
962 freebsd12_shm_open(struct thread *td, struct freebsd12_shm_open_args *uap)
963 {
964
965         return (kern_shm_open(td, uap->path, uap->flags | O_CLOEXEC,
966             uap->mode, NULL));
967 }
968 #endif
969
970 int
971 sys_shm_unlink(struct thread *td, struct shm_unlink_args *uap)
972 {
973         char *path;
974         Fnv32_t fnv;
975         int error;
976
977         error = shm_copyin_path(td, uap->path, &path);
978         if (error != 0)
979                 return (error);
980
981         AUDIT_ARG_UPATH1_CANON(path);
982         fnv = fnv_32_str(path, FNV1_32_INIT);
983         sx_xlock(&shm_dict_lock);
984         error = shm_remove(path, fnv, td->td_ucred);
985         sx_xunlock(&shm_dict_lock);
986         free(path, M_SHMFD);
987
988         return (error);
989 }
990
991 int
992 sys_shm_rename(struct thread *td, struct shm_rename_args *uap)
993 {
994         char *path_from = NULL, *path_to = NULL;
995         Fnv32_t fnv_from, fnv_to;
996         struct shmfd *fd_from;
997         struct shmfd *fd_to;
998         int error;
999         int flags;
1000
1001         flags = uap->flags;
1002         AUDIT_ARG_FFLAGS(flags);
1003
1004         /*
1005          * Make sure the user passed only valid flags.
1006          * If you add a new flag, please add a new term here.
1007          */
1008         if ((flags & ~(
1009             SHM_RENAME_NOREPLACE |
1010             SHM_RENAME_EXCHANGE
1011             )) != 0) {
1012                 error = EINVAL;
1013                 goto out;
1014         }
1015
1016         /*
1017          * EXCHANGE and NOREPLACE don't quite make sense together. Let's
1018          * force the user to choose one or the other.
1019          */
1020         if ((flags & SHM_RENAME_NOREPLACE) != 0 &&
1021             (flags & SHM_RENAME_EXCHANGE) != 0) {
1022                 error = EINVAL;
1023                 goto out;
1024         }
1025
1026         /* Renaming to or from anonymous makes no sense */
1027         if (uap->path_from == SHM_ANON || uap->path_to == SHM_ANON) {
1028                 error = EINVAL;
1029                 goto out;
1030         }
1031
1032         error = shm_copyin_path(td, uap->path_from, &path_from);
1033         if (error != 0)
1034                 goto out;
1035
1036         error = shm_copyin_path(td, uap->path_to, &path_to);
1037         if (error != 0)
1038                 goto out;
1039
1040         AUDIT_ARG_UPATH1_CANON(path_from);
1041         AUDIT_ARG_UPATH2_CANON(path_to);
1042
1043         /* Rename with from/to equal is a no-op */
1044         if (strcmp(path_from, path_to) == 0)
1045                 goto out;
1046
1047         fnv_from = fnv_32_str(path_from, FNV1_32_INIT);
1048         fnv_to = fnv_32_str(path_to, FNV1_32_INIT);
1049
1050         sx_xlock(&shm_dict_lock);
1051
1052         fd_from = shm_lookup(path_from, fnv_from);
1053         if (fd_from == NULL) {
1054                 error = ENOENT;
1055                 goto out_locked;
1056         }
1057
1058         fd_to = shm_lookup(path_to, fnv_to);
1059         if ((flags & SHM_RENAME_NOREPLACE) != 0 && fd_to != NULL) {
1060                 error = EEXIST;
1061                 goto out_locked;
1062         }
1063
1064         /*
1065          * Unconditionally prevents shm_remove from invalidating the 'from'
1066          * shm's state.
1067          */
1068         shm_hold(fd_from);
1069         error = shm_remove(path_from, fnv_from, td->td_ucred);
1070
1071         /*
1072          * One of my assumptions failed if ENOENT (e.g. locking didn't
1073          * protect us)
1074          */
1075         KASSERT(error != ENOENT, ("Our shm disappeared during shm_rename: %s",
1076             path_from));
1077         if (error != 0) {
1078                 shm_drop(fd_from);
1079                 goto out_locked;
1080         }
1081
1082         /*
1083          * If we are exchanging, we need to ensure the shm_remove below
1084          * doesn't invalidate the dest shm's state.
1085          */
1086         if ((flags & SHM_RENAME_EXCHANGE) != 0 && fd_to != NULL)
1087                 shm_hold(fd_to);
1088
1089         /*
1090          * NOTE: if path_to is not already in the hash, c'est la vie;
1091          * it simply means we have nothing already at path_to to unlink.
1092          * That is the ENOENT case.
1093          *
1094          * If we somehow don't have access to unlink this guy, but
1095          * did for the shm at path_from, then relink the shm to path_from
1096          * and abort with EACCES.
1097          *
1098          * All other errors: that is weird; let's relink and abort the
1099          * operation.
1100          */
1101         error = shm_remove(path_to, fnv_to, td->td_ucred);
1102         if (error != 0 && error != ENOENT) {
1103                 shm_insert(path_from, fnv_from, fd_from);
1104                 shm_drop(fd_from);
1105                 /* Don't free path_from now, since the hash references it */
1106                 path_from = NULL;
1107                 goto out_locked;
1108         }
1109
1110         error = 0;
1111
1112         shm_insert(path_to, fnv_to, fd_from);
1113
1114         /* Don't free path_to now, since the hash references it */
1115         path_to = NULL;
1116
1117         /* We kept a ref when we removed, and incremented again in insert */
1118         shm_drop(fd_from);
1119         KASSERT(fd_from->shm_refs > 0, ("Expected >0 refs; got: %d\n",
1120             fd_from->shm_refs));
1121
1122         if ((flags & SHM_RENAME_EXCHANGE) != 0 && fd_to != NULL) {
1123                 shm_insert(path_from, fnv_from, fd_to);
1124                 path_from = NULL;
1125                 shm_drop(fd_to);
1126                 KASSERT(fd_to->shm_refs > 0, ("Expected >0 refs; got: %d\n",
1127                     fd_to->shm_refs));
1128         }
1129
1130 out_locked:
1131         sx_xunlock(&shm_dict_lock);
1132
1133 out:
1134         free(path_from, M_SHMFD);
1135         free(path_to, M_SHMFD);
1136         return (error);
1137 }
1138
1139 int
1140 shm_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t objsize,
1141     vm_prot_t prot, vm_prot_t cap_maxprot, int flags,
1142     vm_ooffset_t foff, struct thread *td)
1143 {
1144         struct shmfd *shmfd;
1145         vm_prot_t maxprot;
1146         int error;
1147         bool writecnt;
1148         void *rl_cookie;
1149
1150         shmfd = fp->f_data;
1151         maxprot = VM_PROT_NONE;
1152
1153         rl_cookie = rangelock_rlock(&shmfd->shm_rl, 0, objsize,
1154             &shmfd->shm_mtx);
1155         /* FREAD should always be set. */
1156         if ((fp->f_flag & FREAD) != 0)
1157                 maxprot |= VM_PROT_EXECUTE | VM_PROT_READ;
1158
1159         /*
1160          * If FWRITE's set, we can allow VM_PROT_WRITE unless it's a shared
1161          * mapping with a write seal applied.  Private mappings are always
1162          * writeable.
1163          */
1164         if ((flags & MAP_SHARED) == 0) {
1165                 cap_maxprot |= VM_PROT_WRITE;
1166                 maxprot |= VM_PROT_WRITE;
1167                 writecnt = false;
1168         } else {
1169                 if ((fp->f_flag & FWRITE) != 0 &&
1170                     (shmfd->shm_seals & F_SEAL_WRITE) == 0)
1171                         maxprot |= VM_PROT_WRITE;
1172
1173                 /*
1174                  * Any mappings from a writable descriptor may be upgraded to
1175                  * VM_PROT_WRITE with mprotect(2), unless a write-seal was
1176                  * applied between the open and subsequent mmap(2).  We want to
1177                  * reject application of a write seal as long as any such
1178                  * mapping exists so that the seal cannot be trivially bypassed.
1179                  */
1180                 writecnt = (maxprot & VM_PROT_WRITE) != 0;
1181                 if (!writecnt && (prot & VM_PROT_WRITE) != 0) {
1182                         error = EACCES;
1183                         goto out;
1184                 }
1185         }
1186         maxprot &= cap_maxprot;
1187
1188         /* See comment in vn_mmap(). */
1189         if (
1190 #ifdef _LP64
1191             objsize > OFF_MAX ||
1192 #endif
1193             foff < 0 || foff > OFF_MAX - objsize) {
1194                 error = EINVAL;
1195                 goto out;
1196         }
1197
1198 #ifdef MAC
1199         error = mac_posixshm_check_mmap(td->td_ucred, shmfd, prot, flags);
1200         if (error != 0)
1201                 goto out;
1202 #endif
1203         
1204         mtx_lock(&shm_timestamp_lock);
1205         vfs_timestamp(&shmfd->shm_atime);
1206         mtx_unlock(&shm_timestamp_lock);
1207         vm_object_reference(shmfd->shm_object);
1208
1209         if (writecnt)
1210                 vm_pager_update_writecount(shmfd->shm_object, 0, objsize);
1211         error = vm_mmap_object(map, addr, objsize, prot, maxprot, flags,
1212             shmfd->shm_object, foff, writecnt, td);
1213         if (error != 0) {
1214                 if (writecnt)
1215                         vm_pager_release_writecount(shmfd->shm_object, 0,
1216                             objsize);
1217                 vm_object_deallocate(shmfd->shm_object);
1218         }
1219 out:
1220         rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
1221         return (error);
1222 }
1223
1224 static int
1225 shm_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
1226     struct thread *td)
1227 {
1228         struct shmfd *shmfd;
1229         int error;
1230
1231         error = 0;
1232         shmfd = fp->f_data;
1233         mtx_lock(&shm_timestamp_lock);
1234         /*
1235          * SUSv4 says that x bits of permission need not be affected.
1236          * Be consistent with our shm_open there.
1237          */
1238 #ifdef MAC
1239         error = mac_posixshm_check_setmode(active_cred, shmfd, mode);
1240         if (error != 0)
1241                 goto out;
1242 #endif
1243         error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid,
1244             shmfd->shm_gid, VADMIN, active_cred, NULL);
1245         if (error != 0)
1246                 goto out;
1247         shmfd->shm_mode = mode & ACCESSPERMS;
1248 out:
1249         mtx_unlock(&shm_timestamp_lock);
1250         return (error);
1251 }
1252
1253 static int
1254 shm_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
1255     struct thread *td)
1256 {
1257         struct shmfd *shmfd;
1258         int error;
1259
1260         error = 0;
1261         shmfd = fp->f_data;
1262         mtx_lock(&shm_timestamp_lock);
1263 #ifdef MAC
1264         error = mac_posixshm_check_setowner(active_cred, shmfd, uid, gid);
1265         if (error != 0)
1266                 goto out;
1267 #endif
1268         if (uid == (uid_t)-1)
1269                 uid = shmfd->shm_uid;
1270         if (gid == (gid_t)-1)
1271                  gid = shmfd->shm_gid;
1272         if (((uid != shmfd->shm_uid && uid != active_cred->cr_uid) ||
1273             (gid != shmfd->shm_gid && !groupmember(gid, active_cred))) &&
1274             (error = priv_check_cred(active_cred, PRIV_VFS_CHOWN)))
1275                 goto out;
1276         shmfd->shm_uid = uid;
1277         shmfd->shm_gid = gid;
1278 out:
1279         mtx_unlock(&shm_timestamp_lock);
1280         return (error);
1281 }
1282
1283 /*
1284  * Helper routines to allow the backing object of a shared memory file
1285  * descriptor to be mapped in the kernel.
1286  */
1287 int
1288 shm_map(struct file *fp, size_t size, off_t offset, void **memp)
1289 {
1290         struct shmfd *shmfd;
1291         vm_offset_t kva, ofs;
1292         vm_object_t obj;
1293         int rv;
1294
1295         if (fp->f_type != DTYPE_SHM)
1296                 return (EINVAL);
1297         shmfd = fp->f_data;
1298         obj = shmfd->shm_object;
1299         VM_OBJECT_WLOCK(obj);
1300         /*
1301          * XXXRW: This validation is probably insufficient, and subject to
1302          * sign errors.  It should be fixed.
1303          */
1304         if (offset >= shmfd->shm_size ||
1305             offset + size > round_page(shmfd->shm_size)) {
1306                 VM_OBJECT_WUNLOCK(obj);
1307                 return (EINVAL);
1308         }
1309
1310         shmfd->shm_kmappings++;
1311         vm_object_reference_locked(obj);
1312         VM_OBJECT_WUNLOCK(obj);
1313
1314         /* Map the object into the kernel_map and wire it. */
1315         kva = vm_map_min(kernel_map);
1316         ofs = offset & PAGE_MASK;
1317         offset = trunc_page(offset);
1318         size = round_page(size + ofs);
1319         rv = vm_map_find(kernel_map, obj, offset, &kva, size, 0,
1320             VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE,
1321             VM_PROT_READ | VM_PROT_WRITE, 0);
1322         if (rv == KERN_SUCCESS) {
1323                 rv = vm_map_wire(kernel_map, kva, kva + size,
1324                     VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
1325                 if (rv == KERN_SUCCESS) {
1326                         *memp = (void *)(kva + ofs);
1327                         return (0);
1328                 }
1329                 vm_map_remove(kernel_map, kva, kva + size);
1330         } else
1331                 vm_object_deallocate(obj);
1332
1333         /* On failure, drop our mapping reference. */
1334         VM_OBJECT_WLOCK(obj);
1335         shmfd->shm_kmappings--;
1336         VM_OBJECT_WUNLOCK(obj);
1337
1338         return (vm_mmap_to_errno(rv));
1339 }
1340
1341 /*
1342  * We require the caller to unmap the entire entry.  This allows us to
1343  * safely decrement shm_kmappings when a mapping is removed.
1344  */
1345 int
1346 shm_unmap(struct file *fp, void *mem, size_t size)
1347 {
1348         struct shmfd *shmfd;
1349         vm_map_entry_t entry;
1350         vm_offset_t kva, ofs;
1351         vm_object_t obj;
1352         vm_pindex_t pindex;
1353         vm_prot_t prot;
1354         boolean_t wired;
1355         vm_map_t map;
1356         int rv;
1357
1358         if (fp->f_type != DTYPE_SHM)
1359                 return (EINVAL);
1360         shmfd = fp->f_data;
1361         kva = (vm_offset_t)mem;
1362         ofs = kva & PAGE_MASK;
1363         kva = trunc_page(kva);
1364         size = round_page(size + ofs);
1365         map = kernel_map;
1366         rv = vm_map_lookup(&map, kva, VM_PROT_READ | VM_PROT_WRITE, &entry,
1367             &obj, &pindex, &prot, &wired);
1368         if (rv != KERN_SUCCESS)
1369                 return (EINVAL);
1370         if (entry->start != kva || entry->end != kva + size) {
1371                 vm_map_lookup_done(map, entry);
1372                 return (EINVAL);
1373         }
1374         vm_map_lookup_done(map, entry);
1375         if (obj != shmfd->shm_object)
1376                 return (EINVAL);
1377         vm_map_remove(map, kva, kva + size);
1378         VM_OBJECT_WLOCK(obj);
1379         KASSERT(shmfd->shm_kmappings > 0, ("shm_unmap: object not mapped"));
1380         shmfd->shm_kmappings--;
1381         VM_OBJECT_WUNLOCK(obj);
1382         return (0);
1383 }
1384
1385 static int
1386 shm_fill_kinfo_locked(struct shmfd *shmfd, struct kinfo_file *kif, bool list)
1387 {
1388         const char *path, *pr_path;
1389         size_t pr_pathlen;
1390         bool visible;
1391
1392         sx_assert(&shm_dict_lock, SA_LOCKED);
1393         kif->kf_type = KF_TYPE_SHM;
1394         kif->kf_un.kf_file.kf_file_mode = S_IFREG | shmfd->shm_mode;
1395         kif->kf_un.kf_file.kf_file_size = shmfd->shm_size;
1396         if (shmfd->shm_path != NULL) {
1397                 if (shmfd->shm_path != NULL) {
1398                         path = shmfd->shm_path;
1399                         pr_path = curthread->td_ucred->cr_prison->pr_path;
1400                         if (strcmp(pr_path, "/") != 0) {
1401                                 /* Return the jail-rooted pathname. */
1402                                 pr_pathlen = strlen(pr_path);
1403                                 visible = strncmp(path, pr_path, pr_pathlen)
1404                                     == 0 && path[pr_pathlen] == '/';
1405                                 if (list && !visible)
1406                                         return (EPERM);
1407                                 if (visible)
1408                                         path += pr_pathlen;
1409                         }
1410                         strlcpy(kif->kf_path, path, sizeof(kif->kf_path));
1411                 }
1412         }
1413         return (0);
1414 }
1415
1416 static int
1417 shm_fill_kinfo(struct file *fp, struct kinfo_file *kif,
1418     struct filedesc *fdp __unused)
1419 {
1420         int res;
1421
1422         sx_slock(&shm_dict_lock);
1423         res = shm_fill_kinfo_locked(fp->f_data, kif, false);
1424         sx_sunlock(&shm_dict_lock);
1425         return (res);
1426 }
1427
1428 static int
1429 shm_add_seals(struct file *fp, int seals)
1430 {
1431         struct shmfd *shmfd;
1432         void *rl_cookie;
1433         vm_ooffset_t writemappings;
1434         int error, nseals;
1435
1436         error = 0;
1437         shmfd = fp->f_data;
1438         rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
1439             &shmfd->shm_mtx);
1440
1441         /* Even already-set seals should result in EPERM. */
1442         if ((shmfd->shm_seals & F_SEAL_SEAL) != 0) {
1443                 error = EPERM;
1444                 goto out;
1445         }
1446         nseals = seals & ~shmfd->shm_seals;
1447         if ((nseals & F_SEAL_WRITE) != 0) {
1448                 /*
1449                  * The rangelock above prevents writable mappings from being
1450                  * added after we've started applying seals.  The RLOCK here
1451                  * is to avoid torn reads on ILP32 arches as unmapping/reducing
1452                  * writemappings will be done without a rangelock.
1453                  */
1454                 VM_OBJECT_RLOCK(shmfd->shm_object);
1455                 writemappings = shmfd->shm_object->un_pager.swp.writemappings;
1456                 VM_OBJECT_RUNLOCK(shmfd->shm_object);
1457                 /* kmappings are also writable */
1458                 if (writemappings > 0) {
1459                         error = EBUSY;
1460                         goto out;
1461                 }
1462         }
1463         shmfd->shm_seals |= nseals;
1464 out:
1465         rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
1466         return (error);
1467 }
1468
1469 static int
1470 shm_get_seals(struct file *fp, int *seals)
1471 {
1472         struct shmfd *shmfd;
1473
1474         shmfd = fp->f_data;
1475         *seals = shmfd->shm_seals;
1476         return (0);
1477 }
1478
1479 static int
1480 shm_fallocate(struct file *fp, off_t offset, off_t len, struct thread *td)
1481 {
1482         void *rl_cookie;
1483         struct shmfd *shmfd;
1484         size_t size;
1485         int error;
1486
1487         /* This assumes that the caller already checked for overflow. */
1488         error = 0;
1489         shmfd = fp->f_data;
1490         size = offset + len;
1491
1492         /*
1493          * Just grab the rangelock for the range that we may be attempting to
1494          * grow, rather than blocking read/write for regions we won't be
1495          * touching while this (potential) resize is in progress.  Other
1496          * attempts to resize the shmfd will have to take a write lock from 0 to
1497          * OFF_MAX, so this being potentially beyond the current usable range of
1498          * the shmfd is not necessarily a concern.  If other mechanisms are
1499          * added to grow a shmfd, this may need to be re-evaluated.
1500          */
1501         rl_cookie = rangelock_wlock(&shmfd->shm_rl, offset, size,
1502             &shmfd->shm_mtx);
1503         if (size > shmfd->shm_size) {
1504                 VM_OBJECT_WLOCK(shmfd->shm_object);
1505                 error = shm_dotruncate_locked(shmfd, size, rl_cookie);
1506                 VM_OBJECT_WUNLOCK(shmfd->shm_object);
1507         }
1508         rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
1509         /* Translate to posix_fallocate(2) return value as needed. */
1510         if (error == ENOMEM)
1511                 error = ENOSPC;
1512         return (error);
1513 }
1514
1515 static int
1516 sysctl_posix_shm_list(SYSCTL_HANDLER_ARGS)
1517 {
1518         struct shm_mapping *shmm;
1519         struct sbuf sb;
1520         struct kinfo_file kif;
1521         u_long i;
1522         ssize_t curlen;
1523         int error, error2;
1524
1525         sbuf_new_for_sysctl(&sb, NULL, sizeof(struct kinfo_file) * 5, req);
1526         sbuf_clear_flags(&sb, SBUF_INCLUDENUL);
1527         curlen = 0;
1528         error = 0;
1529         sx_slock(&shm_dict_lock);
1530         for (i = 0; i < shm_hash + 1; i++) {
1531                 LIST_FOREACH(shmm, &shm_dictionary[i], sm_link) {
1532                         error = shm_fill_kinfo_locked(shmm->sm_shmfd,
1533                             &kif, true);
1534                         if (error == EPERM)
1535                                 continue;
1536                         if (error != 0)
1537                                 break;
1538                         pack_kinfo(&kif);
1539                         if (req->oldptr != NULL &&
1540                             kif.kf_structsize + curlen > req->oldlen)
1541                                 break;
1542                         error = sbuf_bcat(&sb, &kif, kif.kf_structsize) == 0 ?
1543                             0 : ENOMEM;
1544                         if (error != 0)
1545                                 break;
1546                         curlen += kif.kf_structsize;
1547                 }
1548         }
1549         sx_sunlock(&shm_dict_lock);
1550         error2 = sbuf_finish(&sb);
1551         sbuf_delete(&sb);
1552         return (error != 0 ? error : error2);
1553 }
1554
1555 SYSCTL_PROC(_kern_ipc, OID_AUTO, posix_shm_list,
1556     CTLFLAG_RD | CTLFLAG_MPSAFE | CTLTYPE_OPAQUE,
1557     NULL, 0, sysctl_posix_shm_list, "",
1558     "POSIX SHM list");
1559
1560 int
1561 kern_shm_open(struct thread *td, const char *path, int flags, mode_t mode,
1562     struct filecaps *caps)
1563 {
1564
1565         return (kern_shm_open2(td, path, flags, mode, 0, caps, NULL));
1566 }
1567
1568 /*
1569  * This version of the shm_open() interface leaves CLOEXEC behavior up to the
1570  * caller, and libc will enforce it for the traditional shm_open() call.  This
1571  * allows other consumers, like memfd_create(), to opt-in for CLOEXEC.  This
1572  * interface also includes a 'name' argument that is currently unused, but could
1573  * potentially be exported later via some interface for debugging purposes.
1574  * From the kernel's perspective, it is optional.  Individual consumers like
1575  * memfd_create() may require it in order to be compatible with other systems
1576  * implementing the same function.
1577  */
1578 int
1579 sys_shm_open2(struct thread *td, struct shm_open2_args *uap)
1580 {
1581
1582         return (kern_shm_open2(td, uap->path, uap->flags, uap->mode,
1583             uap->shmflags, NULL, uap->name));
1584 }