]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/kern/uipc_shm.c
zfs: merge openzfs/zfs@233d34e47
[FreeBSD/FreeBSD.git] / sys / kern / uipc_shm.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2006, 2011, 2016-2017 Robert N. M. Watson
5  * Copyright 2020 The FreeBSD Foundation
6  * All rights reserved.
7  *
8  * Portions of this software were developed by BAE Systems, the University of
9  * Cambridge Computer Laboratory, and Memorial University under DARPA/AFRL
10  * contract FA8650-15-C-7558 ("CADETS"), as part of the DARPA Transparent
11  * Computing (TC) research program.
12  *
13  * Portions of this software were developed by Konstantin Belousov
14  * under sponsorship from the FreeBSD Foundation.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37
38 /*
39  * Support for shared swap-backed anonymous memory objects via
40  * shm_open(2), shm_rename(2), and shm_unlink(2).
41  * While most of the implementation is here, vm_mmap.c contains
42  * mapping logic changes.
43  *
44  * posixshmcontrol(1) allows users to inspect the state of the memory
45  * objects.  Per-uid swap resource limit controls total amount of
46  * memory that user can consume for anonymous objects, including
47  * shared.
48  */
49
50 #include <sys/cdefs.h>
51 #include "opt_capsicum.h"
52 #include "opt_ktrace.h"
53
54 #include <sys/param.h>
55 #include <sys/capsicum.h>
56 #include <sys/conf.h>
57 #include <sys/fcntl.h>
58 #include <sys/file.h>
59 #include <sys/filedesc.h>
60 #include <sys/filio.h>
61 #include <sys/fnv_hash.h>
62 #include <sys/kernel.h>
63 #include <sys/limits.h>
64 #include <sys/uio.h>
65 #include <sys/signal.h>
66 #include <sys/jail.h>
67 #include <sys/ktrace.h>
68 #include <sys/lock.h>
69 #include <sys/malloc.h>
70 #include <sys/mman.h>
71 #include <sys/mutex.h>
72 #include <sys/priv.h>
73 #include <sys/proc.h>
74 #include <sys/refcount.h>
75 #include <sys/resourcevar.h>
76 #include <sys/rwlock.h>
77 #include <sys/sbuf.h>
78 #include <sys/stat.h>
79 #include <sys/syscallsubr.h>
80 #include <sys/sysctl.h>
81 #include <sys/sysproto.h>
82 #include <sys/systm.h>
83 #include <sys/sx.h>
84 #include <sys/time.h>
85 #include <sys/vmmeter.h>
86 #include <sys/vnode.h>
87 #include <sys/unistd.h>
88 #include <sys/user.h>
89
90 #include <security/audit/audit.h>
91 #include <security/mac/mac_framework.h>
92
93 #include <vm/vm.h>
94 #include <vm/vm_param.h>
95 #include <vm/pmap.h>
96 #include <vm/vm_extern.h>
97 #include <vm/vm_map.h>
98 #include <vm/vm_kern.h>
99 #include <vm/vm_object.h>
100 #include <vm/vm_page.h>
101 #include <vm/vm_pageout.h>
102 #include <vm/vm_pager.h>
103 #include <vm/swap_pager.h>
104
105 struct shm_mapping {
106         char            *sm_path;
107         Fnv32_t         sm_fnv;
108         struct shmfd    *sm_shmfd;
109         LIST_ENTRY(shm_mapping) sm_link;
110 };
111
112 static MALLOC_DEFINE(M_SHMFD, "shmfd", "shared memory file descriptor");
113 static LIST_HEAD(, shm_mapping) *shm_dictionary;
114 static struct sx shm_dict_lock;
115 static struct mtx shm_timestamp_lock;
116 static u_long shm_hash;
117 static struct unrhdr64 shm_ino_unr;
118 static dev_t shm_dev_ino;
119
120 #define SHM_HASH(fnv)   (&shm_dictionary[(fnv) & shm_hash])
121
122 static void     shm_init(void *arg);
123 static void     shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd);
124 static struct shmfd *shm_lookup(char *path, Fnv32_t fnv);
125 static int      shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred);
126 static void     shm_doremove(struct shm_mapping *map);
127 static int      shm_dotruncate_cookie(struct shmfd *shmfd, off_t length,
128     void *rl_cookie);
129 static int      shm_dotruncate_locked(struct shmfd *shmfd, off_t length,
130     void *rl_cookie);
131 static int      shm_copyin_path(struct thread *td, const char *userpath_in,
132     char **path_out);
133 static int      shm_deallocate(struct shmfd *shmfd, off_t *offset,
134     off_t *length, int flags);
135
136 static fo_rdwr_t        shm_read;
137 static fo_rdwr_t        shm_write;
138 static fo_truncate_t    shm_truncate;
139 static fo_ioctl_t       shm_ioctl;
140 static fo_stat_t        shm_stat;
141 static fo_close_t       shm_close;
142 static fo_chmod_t       shm_chmod;
143 static fo_chown_t       shm_chown;
144 static fo_seek_t        shm_seek;
145 static fo_fill_kinfo_t  shm_fill_kinfo;
146 static fo_mmap_t        shm_mmap;
147 static fo_get_seals_t   shm_get_seals;
148 static fo_add_seals_t   shm_add_seals;
149 static fo_fallocate_t   shm_fallocate;
150 static fo_fspacectl_t   shm_fspacectl;
151
152 /* File descriptor operations. */
153 struct fileops shm_ops = {
154         .fo_read = shm_read,
155         .fo_write = shm_write,
156         .fo_truncate = shm_truncate,
157         .fo_ioctl = shm_ioctl,
158         .fo_poll = invfo_poll,
159         .fo_kqfilter = invfo_kqfilter,
160         .fo_stat = shm_stat,
161         .fo_close = shm_close,
162         .fo_chmod = shm_chmod,
163         .fo_chown = shm_chown,
164         .fo_sendfile = vn_sendfile,
165         .fo_seek = shm_seek,
166         .fo_fill_kinfo = shm_fill_kinfo,
167         .fo_mmap = shm_mmap,
168         .fo_get_seals = shm_get_seals,
169         .fo_add_seals = shm_add_seals,
170         .fo_fallocate = shm_fallocate,
171         .fo_fspacectl = shm_fspacectl,
172         .fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE,
173 };
174
175 FEATURE(posix_shm, "POSIX shared memory");
176
177 static SYSCTL_NODE(_vm, OID_AUTO, largepages, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
178     "");
179
180 static int largepage_reclaim_tries = 1;
181 SYSCTL_INT(_vm_largepages, OID_AUTO, reclaim_tries,
182     CTLFLAG_RWTUN, &largepage_reclaim_tries, 0,
183     "Number of contig reclaims before giving up for default alloc policy");
184
185 #define shm_rangelock_unlock(shmfd, cookie)                             \
186         rangelock_unlock(&(shmfd)->shm_rl, (cookie), &(shmfd)->shm_mtx)
187 #define shm_rangelock_rlock(shmfd, start, end)                          \
188         rangelock_rlock(&(shmfd)->shm_rl, (start), (end), &(shmfd)->shm_mtx)
189 #define shm_rangelock_tryrlock(shmfd, start, end)                       \
190         rangelock_tryrlock(&(shmfd)->shm_rl, (start), (end), &(shmfd)->shm_mtx)
191 #define shm_rangelock_wlock(shmfd, start, end)                          \
192         rangelock_wlock(&(shmfd)->shm_rl, (start), (end), &(shmfd)->shm_mtx)
193
194 static int
195 uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio)
196 {
197         vm_page_t m;
198         vm_pindex_t idx;
199         size_t tlen;
200         int error, offset, rv;
201
202         idx = OFF_TO_IDX(uio->uio_offset);
203         offset = uio->uio_offset & PAGE_MASK;
204         tlen = MIN(PAGE_SIZE - offset, len);
205
206         rv = vm_page_grab_valid_unlocked(&m, obj, idx,
207             VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY | VM_ALLOC_NOCREAT);
208         if (rv == VM_PAGER_OK)
209                 goto found;
210
211         /*
212          * Read I/O without either a corresponding resident page or swap
213          * page: use zero_region.  This is intended to avoid instantiating
214          * pages on read from a sparse region.
215          */
216         VM_OBJECT_WLOCK(obj);
217         m = vm_page_lookup(obj, idx);
218         if (uio->uio_rw == UIO_READ && m == NULL &&
219             !vm_pager_has_page(obj, idx, NULL, NULL)) {
220                 VM_OBJECT_WUNLOCK(obj);
221                 return (uiomove(__DECONST(void *, zero_region), tlen, uio));
222         }
223
224         /*
225          * Although the tmpfs vnode lock is held here, it is
226          * nonetheless safe to sleep waiting for a free page.  The
227          * pageout daemon does not need to acquire the tmpfs vnode
228          * lock to page out tobj's pages because tobj is a OBJT_SWAP
229          * type object.
230          */
231         rv = vm_page_grab_valid(&m, obj, idx,
232             VM_ALLOC_NORMAL | VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY);
233         if (rv != VM_PAGER_OK) {
234                 VM_OBJECT_WUNLOCK(obj);
235                 if (bootverbose) {
236                         printf("uiomove_object: vm_obj %p idx %jd "
237                             "pager error %d\n", obj, idx, rv);
238                 }
239                 return (rv == VM_PAGER_AGAIN ? ENOSPC : EIO);
240         }
241         VM_OBJECT_WUNLOCK(obj);
242
243 found:
244         error = uiomove_fromphys(&m, offset, tlen, uio);
245         if (uio->uio_rw == UIO_WRITE && error == 0)
246                 vm_page_set_dirty(m);
247         vm_page_activate(m);
248         vm_page_sunbusy(m);
249
250         return (error);
251 }
252
253 int
254 uiomove_object(vm_object_t obj, off_t obj_size, struct uio *uio)
255 {
256         ssize_t resid;
257         size_t len;
258         int error;
259
260         error = 0;
261         while ((resid = uio->uio_resid) > 0) {
262                 if (obj_size <= uio->uio_offset)
263                         break;
264                 len = MIN(obj_size - uio->uio_offset, resid);
265                 if (len == 0)
266                         break;
267                 error = uiomove_object_page(obj, len, uio);
268                 if (error != 0 || resid == uio->uio_resid)
269                         break;
270         }
271         return (error);
272 }
273
274 static u_long count_largepages[MAXPAGESIZES];
275
276 static int
277 shm_largepage_phys_populate(vm_object_t object, vm_pindex_t pidx,
278     int fault_type, vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last)
279 {
280         vm_page_t m __diagused;
281         int psind;
282
283         psind = object->un_pager.phys.data_val;
284         if (psind == 0 || pidx >= object->size)
285                 return (VM_PAGER_FAIL);
286         *first = rounddown2(pidx, pagesizes[psind] / PAGE_SIZE);
287
288         /*
289          * We only busy the first page in the superpage run.  It is
290          * useless to busy whole run since we only remove full
291          * superpage, and it takes too long to busy e.g. 512 * 512 ==
292          * 262144 pages constituing 1G amd64 superage.
293          */
294         m = vm_page_grab(object, *first, VM_ALLOC_NORMAL | VM_ALLOC_NOCREAT);
295         MPASS(m != NULL);
296
297         *last = *first + atop(pagesizes[psind]) - 1;
298         return (VM_PAGER_OK);
299 }
300
301 static boolean_t
302 shm_largepage_phys_haspage(vm_object_t object, vm_pindex_t pindex,
303     int *before, int *after)
304 {
305         int psind;
306
307         psind = object->un_pager.phys.data_val;
308         if (psind == 0 || pindex >= object->size)
309                 return (FALSE);
310         if (before != NULL) {
311                 *before = pindex - rounddown2(pindex, pagesizes[psind] /
312                     PAGE_SIZE);
313         }
314         if (after != NULL) {
315                 *after = roundup2(pindex, pagesizes[psind] / PAGE_SIZE) -
316                     pindex;
317         }
318         return (TRUE);
319 }
320
321 static void
322 shm_largepage_phys_ctor(vm_object_t object, vm_prot_t prot,
323     vm_ooffset_t foff, struct ucred *cred)
324 {
325 }
326
327 static void
328 shm_largepage_phys_dtor(vm_object_t object)
329 {
330         int psind;
331
332         psind = object->un_pager.phys.data_val;
333         if (psind != 0) {
334                 atomic_subtract_long(&count_largepages[psind],
335                     object->size / (pagesizes[psind] / PAGE_SIZE));
336                 vm_wire_sub(object->size);
337         } else {
338                 KASSERT(object->size == 0,
339                     ("largepage phys obj %p not initialized bit size %#jx > 0",
340                     object, (uintmax_t)object->size));
341         }
342 }
343
344 static const struct phys_pager_ops shm_largepage_phys_ops = {
345         .phys_pg_populate =     shm_largepage_phys_populate,
346         .phys_pg_haspage =      shm_largepage_phys_haspage,
347         .phys_pg_ctor =         shm_largepage_phys_ctor,
348         .phys_pg_dtor =         shm_largepage_phys_dtor,
349 };
350
351 bool
352 shm_largepage(struct shmfd *shmfd)
353 {
354         return (shmfd->shm_object->type == OBJT_PHYS);
355 }
356
357 static void
358 shm_pager_freespace(vm_object_t obj, vm_pindex_t start, vm_size_t size)
359 {
360         struct shmfd *shm;
361         vm_size_t c;
362
363         swap_pager_freespace(obj, start, size, &c);
364         if (c == 0)
365                 return;
366
367         shm = obj->un_pager.swp.swp_priv;
368         if (shm == NULL)
369                 return;
370         KASSERT(shm->shm_pages >= c,
371             ("shm %p pages %jd free %jd", shm,
372             (uintmax_t)shm->shm_pages, (uintmax_t)c));
373         shm->shm_pages -= c;
374 }
375
376 static void
377 shm_page_inserted(vm_object_t obj, vm_page_t m)
378 {
379         struct shmfd *shm;
380
381         shm = obj->un_pager.swp.swp_priv;
382         if (shm == NULL)
383                 return;
384         if (!vm_pager_has_page(obj, m->pindex, NULL, NULL))
385                 shm->shm_pages += 1;
386 }
387
388 static void
389 shm_page_removed(vm_object_t obj, vm_page_t m)
390 {
391         struct shmfd *shm;
392
393         shm = obj->un_pager.swp.swp_priv;
394         if (shm == NULL)
395                 return;
396         if (!vm_pager_has_page(obj, m->pindex, NULL, NULL)) {
397                 KASSERT(shm->shm_pages >= 1,
398                     ("shm %p pages %jd free 1", shm,
399                     (uintmax_t)shm->shm_pages));
400                 shm->shm_pages -= 1;
401         }
402 }
403
404 static struct pagerops shm_swap_pager_ops = {
405         .pgo_kvme_type = KVME_TYPE_SWAP,
406         .pgo_freespace = shm_pager_freespace,
407         .pgo_page_inserted = shm_page_inserted,
408         .pgo_page_removed = shm_page_removed,
409 };
410 static int shmfd_pager_type = -1;
411
412 static int
413 shm_seek(struct file *fp, off_t offset, int whence, struct thread *td)
414 {
415         struct shmfd *shmfd;
416         off_t foffset;
417         int error;
418
419         shmfd = fp->f_data;
420         foffset = foffset_lock(fp, 0);
421         error = 0;
422         switch (whence) {
423         case L_INCR:
424                 if (foffset < 0 ||
425                     (offset > 0 && foffset > OFF_MAX - offset)) {
426                         error = EOVERFLOW;
427                         break;
428                 }
429                 offset += foffset;
430                 break;
431         case L_XTND:
432                 if (offset > 0 && shmfd->shm_size > OFF_MAX - offset) {
433                         error = EOVERFLOW;
434                         break;
435                 }
436                 offset += shmfd->shm_size;
437                 break;
438         case L_SET:
439                 break;
440         default:
441                 error = EINVAL;
442         }
443         if (error == 0) {
444                 if (offset < 0 || offset > shmfd->shm_size)
445                         error = EINVAL;
446                 else
447                         td->td_uretoff.tdu_off = offset;
448         }
449         foffset_unlock(fp, offset, error != 0 ? FOF_NOUPDATE : 0);
450         return (error);
451 }
452
453 static int
454 shm_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
455     int flags, struct thread *td)
456 {
457         struct shmfd *shmfd;
458         void *rl_cookie;
459         int error;
460
461         shmfd = fp->f_data;
462 #ifdef MAC
463         error = mac_posixshm_check_read(active_cred, fp->f_cred, shmfd);
464         if (error)
465                 return (error);
466 #endif
467         foffset_lock_uio(fp, uio, flags);
468         rl_cookie = shm_rangelock_rlock(shmfd, uio->uio_offset,
469             uio->uio_offset + uio->uio_resid);
470         error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio);
471         shm_rangelock_unlock(shmfd, rl_cookie);
472         foffset_unlock_uio(fp, uio, flags);
473         return (error);
474 }
475
476 static int
477 shm_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
478     int flags, struct thread *td)
479 {
480         struct shmfd *shmfd;
481         void *rl_cookie;
482         int error;
483         off_t size;
484
485         shmfd = fp->f_data;
486 #ifdef MAC
487         error = mac_posixshm_check_write(active_cred, fp->f_cred, shmfd);
488         if (error)
489                 return (error);
490 #endif
491         if (shm_largepage(shmfd) && shmfd->shm_lp_psind == 0)
492                 return (EINVAL);
493         foffset_lock_uio(fp, uio, flags);
494         if (uio->uio_resid > OFF_MAX - uio->uio_offset) {
495                 /*
496                  * Overflow is only an error if we're supposed to expand on
497                  * write.  Otherwise, we'll just truncate the write to the
498                  * size of the file, which can only grow up to OFF_MAX.
499                  */
500                 if ((shmfd->shm_flags & SHM_GROW_ON_WRITE) != 0) {
501                         foffset_unlock_uio(fp, uio, flags);
502                         return (EFBIG);
503                 }
504
505                 size = shmfd->shm_size;
506         } else {
507                 size = uio->uio_offset + uio->uio_resid;
508         }
509         if ((flags & FOF_OFFSET) == 0)
510                 rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
511         else
512                 rl_cookie = shm_rangelock_wlock(shmfd, uio->uio_offset, size);
513         if ((shmfd->shm_seals & F_SEAL_WRITE) != 0) {
514                 error = EPERM;
515         } else {
516                 error = 0;
517                 if ((shmfd->shm_flags & SHM_GROW_ON_WRITE) != 0 &&
518                     size > shmfd->shm_size) {
519                         error = shm_dotruncate_cookie(shmfd, size, rl_cookie);
520                 }
521                 if (error == 0)
522                         error = uiomove_object(shmfd->shm_object,
523                             shmfd->shm_size, uio);
524         }
525         shm_rangelock_unlock(shmfd, rl_cookie);
526         foffset_unlock_uio(fp, uio, flags);
527         return (error);
528 }
529
530 static int
531 shm_truncate(struct file *fp, off_t length, struct ucred *active_cred,
532     struct thread *td)
533 {
534         struct shmfd *shmfd;
535 #ifdef MAC
536         int error;
537 #endif
538
539         shmfd = fp->f_data;
540 #ifdef MAC
541         error = mac_posixshm_check_truncate(active_cred, fp->f_cred, shmfd);
542         if (error)
543                 return (error);
544 #endif
545         return (shm_dotruncate(shmfd, length));
546 }
547
548 int
549 shm_ioctl(struct file *fp, u_long com, void *data, struct ucred *active_cred,
550     struct thread *td)
551 {
552         struct shmfd *shmfd;
553         struct shm_largepage_conf *conf;
554         void *rl_cookie;
555
556         shmfd = fp->f_data;
557         switch (com) {
558         case FIONBIO:
559         case FIOASYNC:
560                 /*
561                  * Allow fcntl(fd, F_SETFL, O_NONBLOCK) to work,
562                  * just like it would on an unlinked regular file
563                  */
564                 return (0);
565         case FIOSSHMLPGCNF:
566                 if (!shm_largepage(shmfd))
567                         return (ENOTTY);
568                 conf = data;
569                 if (shmfd->shm_lp_psind != 0 &&
570                     conf->psind != shmfd->shm_lp_psind)
571                         return (EINVAL);
572                 if (conf->psind <= 0 || conf->psind >= MAXPAGESIZES ||
573                     pagesizes[conf->psind] == 0)
574                         return (EINVAL);
575                 if (conf->alloc_policy != SHM_LARGEPAGE_ALLOC_DEFAULT &&
576                     conf->alloc_policy != SHM_LARGEPAGE_ALLOC_NOWAIT &&
577                     conf->alloc_policy != SHM_LARGEPAGE_ALLOC_HARD)
578                         return (EINVAL);
579
580                 rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
581                 shmfd->shm_lp_psind = conf->psind;
582                 shmfd->shm_lp_alloc_policy = conf->alloc_policy;
583                 shmfd->shm_object->un_pager.phys.data_val = conf->psind;
584                 shm_rangelock_unlock(shmfd, rl_cookie);
585                 return (0);
586         case FIOGSHMLPGCNF:
587                 if (!shm_largepage(shmfd))
588                         return (ENOTTY);
589                 conf = data;
590                 rl_cookie = shm_rangelock_rlock(shmfd, 0, OFF_MAX);
591                 conf->psind = shmfd->shm_lp_psind;
592                 conf->alloc_policy = shmfd->shm_lp_alloc_policy;
593                 shm_rangelock_unlock(shmfd, rl_cookie);
594                 return (0);
595         default:
596                 return (ENOTTY);
597         }
598 }
599
600 static int
601 shm_stat(struct file *fp, struct stat *sb, struct ucred *active_cred)
602 {
603         struct shmfd *shmfd;
604 #ifdef MAC
605         int error;
606 #endif
607
608         shmfd = fp->f_data;
609
610 #ifdef MAC
611         error = mac_posixshm_check_stat(active_cred, fp->f_cred, shmfd);
612         if (error)
613                 return (error);
614 #endif
615
616         /*
617          * Attempt to return sanish values for fstat() on a memory file
618          * descriptor.
619          */
620         bzero(sb, sizeof(*sb));
621         sb->st_blksize = PAGE_SIZE;
622         sb->st_size = shmfd->shm_size;
623         mtx_lock(&shm_timestamp_lock);
624         sb->st_atim = shmfd->shm_atime;
625         sb->st_ctim = shmfd->shm_ctime;
626         sb->st_mtim = shmfd->shm_mtime;
627         sb->st_birthtim = shmfd->shm_birthtime;
628         sb->st_mode = S_IFREG | shmfd->shm_mode;                /* XXX */
629         sb->st_uid = shmfd->shm_uid;
630         sb->st_gid = shmfd->shm_gid;
631         mtx_unlock(&shm_timestamp_lock);
632         sb->st_dev = shm_dev_ino;
633         sb->st_ino = shmfd->shm_ino;
634         sb->st_nlink = shmfd->shm_object->ref_count;
635         if (shm_largepage(shmfd)) {
636                 sb->st_blocks = shmfd->shm_object->size /
637                     (pagesizes[shmfd->shm_lp_psind] >> PAGE_SHIFT);
638         } else {
639                 sb->st_blocks = shmfd->shm_pages;
640         }
641
642         return (0);
643 }
644
645 static int
646 shm_close(struct file *fp, struct thread *td)
647 {
648         struct shmfd *shmfd;
649
650         shmfd = fp->f_data;
651         fp->f_data = NULL;
652         shm_drop(shmfd);
653
654         return (0);
655 }
656
657 static int
658 shm_copyin_path(struct thread *td, const char *userpath_in, char **path_out) {
659         int error;
660         char *path;
661         const char *pr_path;
662         size_t pr_pathlen;
663
664         path = malloc(MAXPATHLEN, M_SHMFD, M_WAITOK);
665         pr_path = td->td_ucred->cr_prison->pr_path;
666
667         /* Construct a full pathname for jailed callers. */
668         pr_pathlen = strcmp(pr_path, "/") ==
669             0 ? 0 : strlcpy(path, pr_path, MAXPATHLEN);
670         error = copyinstr(userpath_in, path + pr_pathlen,
671             MAXPATHLEN - pr_pathlen, NULL);
672         if (error != 0)
673                 goto out;
674
675 #ifdef KTRACE
676         if (KTRPOINT(curthread, KTR_NAMEI))
677                 ktrnamei(path);
678 #endif
679
680         /* Require paths to start with a '/' character. */
681         if (path[pr_pathlen] != '/') {
682                 error = EINVAL;
683                 goto out;
684         }
685
686         *path_out = path;
687
688 out:
689         if (error != 0)
690                 free(path, M_SHMFD);
691
692         return (error);
693 }
694
695 static int
696 shm_partial_page_invalidate(vm_object_t object, vm_pindex_t idx, int base,
697     int end)
698 {
699         vm_page_t m;
700         int rv;
701
702         VM_OBJECT_ASSERT_WLOCKED(object);
703         KASSERT(base >= 0, ("%s: base %d", __func__, base));
704         KASSERT(end - base <= PAGE_SIZE, ("%s: base %d end %d", __func__, base,
705             end));
706
707 retry:
708         m = vm_page_grab(object, idx, VM_ALLOC_NOCREAT);
709         if (m != NULL) {
710                 MPASS(vm_page_all_valid(m));
711         } else if (vm_pager_has_page(object, idx, NULL, NULL)) {
712                 m = vm_page_alloc(object, idx,
713                     VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL);
714                 if (m == NULL)
715                         goto retry;
716                 vm_object_pip_add(object, 1);
717                 VM_OBJECT_WUNLOCK(object);
718                 rv = vm_pager_get_pages(object, &m, 1, NULL, NULL);
719                 VM_OBJECT_WLOCK(object);
720                 vm_object_pip_wakeup(object);
721                 if (rv == VM_PAGER_OK) {
722                         /*
723                          * Since the page was not resident, and therefore not
724                          * recently accessed, immediately enqueue it for
725                          * asynchronous laundering.  The current operation is
726                          * not regarded as an access.
727                          */
728                         vm_page_launder(m);
729                 } else {
730                         vm_page_free(m);
731                         VM_OBJECT_WUNLOCK(object);
732                         return (EIO);
733                 }
734         }
735         if (m != NULL) {
736                 pmap_zero_page_area(m, base, end - base);
737                 KASSERT(vm_page_all_valid(m), ("%s: page %p is invalid",
738                     __func__, m));
739                 vm_page_set_dirty(m);
740                 vm_page_xunbusy(m);
741         }
742
743         return (0);
744 }
745
746 static int
747 shm_dotruncate_locked(struct shmfd *shmfd, off_t length, void *rl_cookie)
748 {
749         vm_object_t object;
750         vm_pindex_t nobjsize;
751         vm_ooffset_t delta;
752         int base, error;
753
754         KASSERT(length >= 0, ("shm_dotruncate: length < 0"));
755         object = shmfd->shm_object;
756         VM_OBJECT_ASSERT_WLOCKED(object);
757         rangelock_cookie_assert(rl_cookie, RA_WLOCKED);
758         if (length == shmfd->shm_size)
759                 return (0);
760         nobjsize = OFF_TO_IDX(length + PAGE_MASK);
761
762         /* Are we shrinking?  If so, trim the end. */
763         if (length < shmfd->shm_size) {
764                 if ((shmfd->shm_seals & F_SEAL_SHRINK) != 0)
765                         return (EPERM);
766
767                 /*
768                  * Disallow any requests to shrink the size if this
769                  * object is mapped into the kernel.
770                  */
771                 if (shmfd->shm_kmappings > 0)
772                         return (EBUSY);
773
774                 /*
775                  * Zero the truncated part of the last page.
776                  */
777                 base = length & PAGE_MASK;
778                 if (base != 0) {
779                         error = shm_partial_page_invalidate(object,
780                             OFF_TO_IDX(length), base, PAGE_SIZE);
781                         if (error)
782                                 return (error);
783                 }
784                 delta = IDX_TO_OFF(object->size - nobjsize);
785
786                 if (nobjsize < object->size)
787                         vm_object_page_remove(object, nobjsize, object->size,
788                             0);
789
790                 /* Free the swap accounted for shm */
791                 swap_release_by_cred(delta, object->cred);
792                 object->charge -= delta;
793         } else {
794                 if ((shmfd->shm_seals & F_SEAL_GROW) != 0)
795                         return (EPERM);
796
797                 /* Try to reserve additional swap space. */
798                 delta = IDX_TO_OFF(nobjsize - object->size);
799                 if (!swap_reserve_by_cred(delta, object->cred))
800                         return (ENOMEM);
801                 object->charge += delta;
802         }
803         shmfd->shm_size = length;
804         mtx_lock(&shm_timestamp_lock);
805         vfs_timestamp(&shmfd->shm_ctime);
806         shmfd->shm_mtime = shmfd->shm_ctime;
807         mtx_unlock(&shm_timestamp_lock);
808         object->size = nobjsize;
809         return (0);
810 }
811
812 static int
813 shm_dotruncate_largepage(struct shmfd *shmfd, off_t length, void *rl_cookie)
814 {
815         vm_object_t object;
816         vm_page_t m;
817         vm_pindex_t newobjsz;
818         vm_pindex_t oldobjsz __unused;
819         int aflags, error, i, psind, try;
820
821         KASSERT(length >= 0, ("shm_dotruncate: length < 0"));
822         object = shmfd->shm_object;
823         VM_OBJECT_ASSERT_WLOCKED(object);
824         rangelock_cookie_assert(rl_cookie, RA_WLOCKED);
825
826         oldobjsz = object->size;
827         newobjsz = OFF_TO_IDX(length);
828         if (length == shmfd->shm_size)
829                 return (0);
830         psind = shmfd->shm_lp_psind;
831         if (psind == 0 && length != 0)
832                 return (EINVAL);
833         if ((length & (pagesizes[psind] - 1)) != 0)
834                 return (EINVAL);
835
836         if (length < shmfd->shm_size) {
837                 if ((shmfd->shm_seals & F_SEAL_SHRINK) != 0)
838                         return (EPERM);
839                 if (shmfd->shm_kmappings > 0)
840                         return (EBUSY);
841                 return (ENOTSUP);       /* Pages are unmanaged. */
842 #if 0
843                 vm_object_page_remove(object, newobjsz, oldobjsz, 0);
844                 object->size = newobjsz;
845                 shmfd->shm_size = length;
846                 return (0);
847 #endif
848         }
849
850         if ((shmfd->shm_seals & F_SEAL_GROW) != 0)
851                 return (EPERM);
852
853         aflags = VM_ALLOC_NORMAL | VM_ALLOC_ZERO;
854         if (shmfd->shm_lp_alloc_policy == SHM_LARGEPAGE_ALLOC_NOWAIT)
855                 aflags |= VM_ALLOC_WAITFAIL;
856         try = 0;
857
858         /*
859          * Extend shmfd and object, keeping all already fully
860          * allocated large pages intact even on error, because dropped
861          * object lock might allowed mapping of them.
862          */
863         while (object->size < newobjsz) {
864                 m = vm_page_alloc_contig(object, object->size, aflags,
865                     pagesizes[psind] / PAGE_SIZE, 0, ~0,
866                     pagesizes[psind], 0,
867                     VM_MEMATTR_DEFAULT);
868                 if (m == NULL) {
869                         VM_OBJECT_WUNLOCK(object);
870                         if (shmfd->shm_lp_alloc_policy ==
871                             SHM_LARGEPAGE_ALLOC_NOWAIT ||
872                             (shmfd->shm_lp_alloc_policy ==
873                             SHM_LARGEPAGE_ALLOC_DEFAULT &&
874                             try >= largepage_reclaim_tries)) {
875                                 VM_OBJECT_WLOCK(object);
876                                 return (ENOMEM);
877                         }
878                         error = vm_page_reclaim_contig(aflags,
879                             pagesizes[psind] / PAGE_SIZE, 0, ~0,
880                             pagesizes[psind], 0);
881                         if (error == ENOMEM)
882                                 error = vm_wait_intr(object);
883                         if (error != 0) {
884                                 VM_OBJECT_WLOCK(object);
885                                 return (error);
886                         }
887                         try++;
888                         VM_OBJECT_WLOCK(object);
889                         continue;
890                 }
891                 try = 0;
892                 for (i = 0; i < pagesizes[psind] / PAGE_SIZE; i++) {
893                         if ((m[i].flags & PG_ZERO) == 0)
894                                 pmap_zero_page(&m[i]);
895                         vm_page_valid(&m[i]);
896                         vm_page_xunbusy(&m[i]);
897                 }
898                 object->size += OFF_TO_IDX(pagesizes[psind]);
899                 shmfd->shm_size += pagesizes[psind];
900                 atomic_add_long(&count_largepages[psind], 1);
901                 vm_wire_add(atop(pagesizes[psind]));
902         }
903         return (0);
904 }
905
906 static int
907 shm_dotruncate_cookie(struct shmfd *shmfd, off_t length, void *rl_cookie)
908 {
909         int error;
910
911         VM_OBJECT_WLOCK(shmfd->shm_object);
912         error = shm_largepage(shmfd) ? shm_dotruncate_largepage(shmfd,
913             length, rl_cookie) : shm_dotruncate_locked(shmfd, length,
914             rl_cookie);
915         VM_OBJECT_WUNLOCK(shmfd->shm_object);
916         return (error);
917 }
918
919 int
920 shm_dotruncate(struct shmfd *shmfd, off_t length)
921 {
922         void *rl_cookie;
923         int error;
924
925         rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
926         error = shm_dotruncate_cookie(shmfd, length, rl_cookie);
927         shm_rangelock_unlock(shmfd, rl_cookie);
928         return (error);
929 }
930
931 /*
932  * shmfd object management including creation and reference counting
933  * routines.
934  */
935 struct shmfd *
936 shm_alloc(struct ucred *ucred, mode_t mode, bool largepage)
937 {
938         struct shmfd *shmfd;
939         vm_object_t obj;
940
941         shmfd = malloc(sizeof(*shmfd), M_SHMFD, M_WAITOK | M_ZERO);
942         shmfd->shm_size = 0;
943         shmfd->shm_uid = ucred->cr_uid;
944         shmfd->shm_gid = ucred->cr_gid;
945         shmfd->shm_mode = mode;
946         if (largepage) {
947                 shmfd->shm_object = phys_pager_allocate(NULL,
948                     &shm_largepage_phys_ops, NULL, shmfd->shm_size,
949                     VM_PROT_DEFAULT, 0, ucred);
950                 shmfd->shm_lp_alloc_policy = SHM_LARGEPAGE_ALLOC_DEFAULT;
951         } else {
952                 obj = vm_pager_allocate(shmfd_pager_type, NULL,
953                     shmfd->shm_size, VM_PROT_DEFAULT, 0, ucred);
954                 VM_OBJECT_WLOCK(obj);
955                 obj->un_pager.swp.swp_priv = shmfd;
956                 VM_OBJECT_WUNLOCK(obj);
957                 shmfd->shm_object = obj;
958         }
959         KASSERT(shmfd->shm_object != NULL, ("shm_create: vm_pager_allocate"));
960         vfs_timestamp(&shmfd->shm_birthtime);
961         shmfd->shm_atime = shmfd->shm_mtime = shmfd->shm_ctime =
962             shmfd->shm_birthtime;
963         shmfd->shm_ino = alloc_unr64(&shm_ino_unr);
964         refcount_init(&shmfd->shm_refs, 1);
965         mtx_init(&shmfd->shm_mtx, "shmrl", NULL, MTX_DEF);
966         rangelock_init(&shmfd->shm_rl);
967 #ifdef MAC
968         mac_posixshm_init(shmfd);
969         mac_posixshm_create(ucred, shmfd);
970 #endif
971
972         return (shmfd);
973 }
974
975 struct shmfd *
976 shm_hold(struct shmfd *shmfd)
977 {
978
979         refcount_acquire(&shmfd->shm_refs);
980         return (shmfd);
981 }
982
983 void
984 shm_drop(struct shmfd *shmfd)
985 {
986         vm_object_t obj;
987
988         if (refcount_release(&shmfd->shm_refs)) {
989 #ifdef MAC
990                 mac_posixshm_destroy(shmfd);
991 #endif
992                 rangelock_destroy(&shmfd->shm_rl);
993                 mtx_destroy(&shmfd->shm_mtx);
994                 obj = shmfd->shm_object;
995                 if (!shm_largepage(shmfd)) {
996                         VM_OBJECT_WLOCK(obj);
997                         obj->un_pager.swp.swp_priv = NULL;
998                         VM_OBJECT_WUNLOCK(obj);
999                 }
1000                 vm_object_deallocate(obj);
1001                 free(shmfd, M_SHMFD);
1002         }
1003 }
1004
1005 /*
1006  * Determine if the credentials have sufficient permissions for a
1007  * specified combination of FREAD and FWRITE.
1008  */
1009 int
1010 shm_access(struct shmfd *shmfd, struct ucred *ucred, int flags)
1011 {
1012         accmode_t accmode;
1013         int error;
1014
1015         accmode = 0;
1016         if (flags & FREAD)
1017                 accmode |= VREAD;
1018         if (flags & FWRITE)
1019                 accmode |= VWRITE;
1020         mtx_lock(&shm_timestamp_lock);
1021         error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid,
1022             accmode, ucred);
1023         mtx_unlock(&shm_timestamp_lock);
1024         return (error);
1025 }
1026
1027 static void
1028 shm_init(void *arg)
1029 {
1030         char name[32];
1031         int i;
1032
1033         mtx_init(&shm_timestamp_lock, "shm timestamps", NULL, MTX_DEF);
1034         sx_init(&shm_dict_lock, "shm dictionary");
1035         shm_dictionary = hashinit(1024, M_SHMFD, &shm_hash);
1036         new_unrhdr64(&shm_ino_unr, 1);
1037         shm_dev_ino = devfs_alloc_cdp_inode();
1038         KASSERT(shm_dev_ino > 0, ("shm dev inode not initialized"));
1039         shmfd_pager_type = vm_pager_alloc_dyn_type(&shm_swap_pager_ops,
1040             OBJT_SWAP);
1041         MPASS(shmfd_pager_type != -1);
1042
1043         for (i = 1; i < MAXPAGESIZES; i++) {
1044                 if (pagesizes[i] == 0)
1045                         break;
1046 #define M       (1024 * 1024)
1047 #define G       (1024 * M)
1048                 if (pagesizes[i] >= G)
1049                         snprintf(name, sizeof(name), "%luG", pagesizes[i] / G);
1050                 else if (pagesizes[i] >= M)
1051                         snprintf(name, sizeof(name), "%luM", pagesizes[i] / M);
1052                 else
1053                         snprintf(name, sizeof(name), "%lu", pagesizes[i]);
1054 #undef G
1055 #undef M
1056                 SYSCTL_ADD_ULONG(NULL, SYSCTL_STATIC_CHILDREN(_vm_largepages),
1057                     OID_AUTO, name, CTLFLAG_RD, &count_largepages[i],
1058                     "number of non-transient largepages allocated");
1059         }
1060 }
1061 SYSINIT(shm_init, SI_SUB_SYSV_SHM, SI_ORDER_ANY, shm_init, NULL);
1062
1063 /*
1064  * Remove all shared memory objects that belong to a prison.
1065  */
1066 void
1067 shm_remove_prison(struct prison *pr)
1068 {
1069         struct shm_mapping *shmm, *tshmm;
1070         u_long i;
1071
1072         sx_xlock(&shm_dict_lock);
1073         for (i = 0; i < shm_hash + 1; i++) {
1074                 LIST_FOREACH_SAFE(shmm, &shm_dictionary[i], sm_link, tshmm) {
1075                         if (shmm->sm_shmfd->shm_object->cred &&
1076                             shmm->sm_shmfd->shm_object->cred->cr_prison == pr)
1077                                 shm_doremove(shmm);
1078                 }
1079         }
1080         sx_xunlock(&shm_dict_lock);
1081 }
1082
1083 /*
1084  * Dictionary management.  We maintain an in-kernel dictionary to map
1085  * paths to shmfd objects.  We use the FNV hash on the path to store
1086  * the mappings in a hash table.
1087  */
1088 static struct shmfd *
1089 shm_lookup(char *path, Fnv32_t fnv)
1090 {
1091         struct shm_mapping *map;
1092
1093         LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
1094                 if (map->sm_fnv != fnv)
1095                         continue;
1096                 if (strcmp(map->sm_path, path) == 0)
1097                         return (map->sm_shmfd);
1098         }
1099
1100         return (NULL);
1101 }
1102
1103 static void
1104 shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd)
1105 {
1106         struct shm_mapping *map;
1107
1108         map = malloc(sizeof(struct shm_mapping), M_SHMFD, M_WAITOK);
1109         map->sm_path = path;
1110         map->sm_fnv = fnv;
1111         map->sm_shmfd = shm_hold(shmfd);
1112         shmfd->shm_path = path;
1113         LIST_INSERT_HEAD(SHM_HASH(fnv), map, sm_link);
1114 }
1115
1116 static int
1117 shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred)
1118 {
1119         struct shm_mapping *map;
1120         int error;
1121
1122         LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
1123                 if (map->sm_fnv != fnv)
1124                         continue;
1125                 if (strcmp(map->sm_path, path) == 0) {
1126 #ifdef MAC
1127                         error = mac_posixshm_check_unlink(ucred, map->sm_shmfd);
1128                         if (error)
1129                                 return (error);
1130 #endif
1131                         error = shm_access(map->sm_shmfd, ucred,
1132                             FREAD | FWRITE);
1133                         if (error)
1134                                 return (error);
1135                         shm_doremove(map);
1136                         return (0);
1137                 }
1138         }
1139
1140         return (ENOENT);
1141 }
1142
1143 static void
1144 shm_doremove(struct shm_mapping *map)
1145 {
1146         map->sm_shmfd->shm_path = NULL;
1147         LIST_REMOVE(map, sm_link);
1148         shm_drop(map->sm_shmfd);
1149         free(map->sm_path, M_SHMFD);
1150         free(map, M_SHMFD);
1151 }
1152
1153 int
1154 kern_shm_open2(struct thread *td, const char *userpath, int flags, mode_t mode,
1155     int shmflags, struct filecaps *fcaps, const char *name __unused)
1156 {
1157         struct pwddesc *pdp;
1158         struct shmfd *shmfd;
1159         struct file *fp;
1160         char *path;
1161         void *rl_cookie;
1162         Fnv32_t fnv;
1163         mode_t cmode;
1164         int error, fd, initial_seals;
1165         bool largepage;
1166
1167         if ((shmflags & ~(SHM_ALLOW_SEALING | SHM_GROW_ON_WRITE |
1168             SHM_LARGEPAGE)) != 0)
1169                 return (EINVAL);
1170
1171         initial_seals = F_SEAL_SEAL;
1172         if ((shmflags & SHM_ALLOW_SEALING) != 0)
1173                 initial_seals &= ~F_SEAL_SEAL;
1174
1175 #ifdef CAPABILITY_MODE
1176         /*
1177          * shm_open(2) is only allowed for anonymous objects.
1178          */
1179         if (IN_CAPABILITY_MODE(td) && (userpath != SHM_ANON))
1180                 return (ECAPMODE);
1181 #endif
1182
1183         AUDIT_ARG_FFLAGS(flags);
1184         AUDIT_ARG_MODE(mode);
1185
1186         if ((flags & O_ACCMODE) != O_RDONLY && (flags & O_ACCMODE) != O_RDWR)
1187                 return (EINVAL);
1188
1189         if ((flags & ~(O_ACCMODE | O_CREAT | O_EXCL | O_TRUNC | O_CLOEXEC)) != 0)
1190                 return (EINVAL);
1191
1192         largepage = (shmflags & SHM_LARGEPAGE) != 0;
1193         if (largepage && !PMAP_HAS_LARGEPAGES)
1194                 return (ENOTTY);
1195
1196         /*
1197          * Currently only F_SEAL_SEAL may be set when creating or opening shmfd.
1198          * If the decision is made later to allow additional seals, care must be
1199          * taken below to ensure that the seals are properly set if the shmfd
1200          * already existed -- this currently assumes that only F_SEAL_SEAL can
1201          * be set and doesn't take further precautions to ensure the validity of
1202          * the seals being added with respect to current mappings.
1203          */
1204         if ((initial_seals & ~F_SEAL_SEAL) != 0)
1205                 return (EINVAL);
1206
1207         pdp = td->td_proc->p_pd;
1208         cmode = (mode & ~pdp->pd_cmask) & ACCESSPERMS;
1209
1210         /*
1211          * shm_open(2) created shm should always have O_CLOEXEC set, as mandated
1212          * by POSIX.  We allow it to be unset here so that an in-kernel
1213          * interface may be written as a thin layer around shm, optionally not
1214          * setting CLOEXEC.  For shm_open(2), O_CLOEXEC is set unconditionally
1215          * in sys_shm_open() to keep this implementation compliant.
1216          */
1217         error = falloc_caps(td, &fp, &fd, flags & O_CLOEXEC, fcaps);
1218         if (error)
1219                 return (error);
1220
1221         /* A SHM_ANON path pointer creates an anonymous object. */
1222         if (userpath == SHM_ANON) {
1223                 /* A read-only anonymous object is pointless. */
1224                 if ((flags & O_ACCMODE) == O_RDONLY) {
1225                         fdclose(td, fp, fd);
1226                         fdrop(fp, td);
1227                         return (EINVAL);
1228                 }
1229                 shmfd = shm_alloc(td->td_ucred, cmode, largepage);
1230                 shmfd->shm_seals = initial_seals;
1231                 shmfd->shm_flags = shmflags;
1232         } else {
1233                 error = shm_copyin_path(td, userpath, &path);
1234                 if (error != 0) {
1235                         fdclose(td, fp, fd);
1236                         fdrop(fp, td);
1237                         return (error);
1238                 }
1239
1240                 AUDIT_ARG_UPATH1_CANON(path);
1241                 fnv = fnv_32_str(path, FNV1_32_INIT);
1242                 sx_xlock(&shm_dict_lock);
1243                 shmfd = shm_lookup(path, fnv);
1244                 if (shmfd == NULL) {
1245                         /* Object does not yet exist, create it if requested. */
1246                         if (flags & O_CREAT) {
1247 #ifdef MAC
1248                                 error = mac_posixshm_check_create(td->td_ucred,
1249                                     path);
1250                                 if (error == 0) {
1251 #endif
1252                                         shmfd = shm_alloc(td->td_ucred, cmode,
1253                                             largepage);
1254                                         shmfd->shm_seals = initial_seals;
1255                                         shmfd->shm_flags = shmflags;
1256                                         shm_insert(path, fnv, shmfd);
1257 #ifdef MAC
1258                                 }
1259 #endif
1260                         } else {
1261                                 free(path, M_SHMFD);
1262                                 error = ENOENT;
1263                         }
1264                 } else {
1265                         rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
1266
1267                         /*
1268                          * kern_shm_open() likely shouldn't ever error out on
1269                          * trying to set a seal that already exists, unlike
1270                          * F_ADD_SEALS.  This would break terribly as
1271                          * shm_open(2) actually sets F_SEAL_SEAL to maintain
1272                          * historical behavior where the underlying file could
1273                          * not be sealed.
1274                          */
1275                         initial_seals &= ~shmfd->shm_seals;
1276
1277                         /*
1278                          * Object already exists, obtain a new
1279                          * reference if requested and permitted.
1280                          */
1281                         free(path, M_SHMFD);
1282
1283                         /*
1284                          * initial_seals can't set additional seals if we've
1285                          * already been set F_SEAL_SEAL.  If F_SEAL_SEAL is set,
1286                          * then we've already removed that one from
1287                          * initial_seals.  This is currently redundant as we
1288                          * only allow setting F_SEAL_SEAL at creation time, but
1289                          * it's cheap to check and decreases the effort required
1290                          * to allow additional seals.
1291                          */
1292                         if ((shmfd->shm_seals & F_SEAL_SEAL) != 0 &&
1293                             initial_seals != 0)
1294                                 error = EPERM;
1295                         else if ((flags & (O_CREAT | O_EXCL)) ==
1296                             (O_CREAT | O_EXCL))
1297                                 error = EEXIST;
1298                         else if (shmflags != 0 && shmflags != shmfd->shm_flags)
1299                                 error = EINVAL;
1300                         else {
1301 #ifdef MAC
1302                                 error = mac_posixshm_check_open(td->td_ucred,
1303                                     shmfd, FFLAGS(flags & O_ACCMODE));
1304                                 if (error == 0)
1305 #endif
1306                                 error = shm_access(shmfd, td->td_ucred,
1307                                     FFLAGS(flags & O_ACCMODE));
1308                         }
1309
1310                         /*
1311                          * Truncate the file back to zero length if
1312                          * O_TRUNC was specified and the object was
1313                          * opened with read/write.
1314                          */
1315                         if (error == 0 &&
1316                             (flags & (O_ACCMODE | O_TRUNC)) ==
1317                             (O_RDWR | O_TRUNC)) {
1318                                 VM_OBJECT_WLOCK(shmfd->shm_object);
1319 #ifdef MAC
1320                                 error = mac_posixshm_check_truncate(
1321                                         td->td_ucred, fp->f_cred, shmfd);
1322                                 if (error == 0)
1323 #endif
1324                                         error = shm_dotruncate_locked(shmfd, 0,
1325                                             rl_cookie);
1326                                 VM_OBJECT_WUNLOCK(shmfd->shm_object);
1327                         }
1328                         if (error == 0) {
1329                                 /*
1330                                  * Currently we only allow F_SEAL_SEAL to be
1331                                  * set initially.  As noted above, this would
1332                                  * need to be reworked should that change.
1333                                  */
1334                                 shmfd->shm_seals |= initial_seals;
1335                                 shm_hold(shmfd);
1336                         }
1337                         shm_rangelock_unlock(shmfd, rl_cookie);
1338                 }
1339                 sx_xunlock(&shm_dict_lock);
1340
1341                 if (error) {
1342                         fdclose(td, fp, fd);
1343                         fdrop(fp, td);
1344                         return (error);
1345                 }
1346         }
1347
1348         finit(fp, FFLAGS(flags & O_ACCMODE), DTYPE_SHM, shmfd, &shm_ops);
1349
1350         td->td_retval[0] = fd;
1351         fdrop(fp, td);
1352
1353         return (0);
1354 }
1355
1356 /* System calls. */
1357 #ifdef COMPAT_FREEBSD12
1358 int
1359 freebsd12_shm_open(struct thread *td, struct freebsd12_shm_open_args *uap)
1360 {
1361
1362         return (kern_shm_open(td, uap->path, uap->flags | O_CLOEXEC,
1363             uap->mode, NULL));
1364 }
1365 #endif
1366
1367 int
1368 sys_shm_unlink(struct thread *td, struct shm_unlink_args *uap)
1369 {
1370         char *path;
1371         Fnv32_t fnv;
1372         int error;
1373
1374         error = shm_copyin_path(td, uap->path, &path);
1375         if (error != 0)
1376                 return (error);
1377
1378         AUDIT_ARG_UPATH1_CANON(path);
1379         fnv = fnv_32_str(path, FNV1_32_INIT);
1380         sx_xlock(&shm_dict_lock);
1381         error = shm_remove(path, fnv, td->td_ucred);
1382         sx_xunlock(&shm_dict_lock);
1383         free(path, M_SHMFD);
1384
1385         return (error);
1386 }
1387
1388 int
1389 sys_shm_rename(struct thread *td, struct shm_rename_args *uap)
1390 {
1391         char *path_from = NULL, *path_to = NULL;
1392         Fnv32_t fnv_from, fnv_to;
1393         struct shmfd *fd_from;
1394         struct shmfd *fd_to;
1395         int error;
1396         int flags;
1397
1398         flags = uap->flags;
1399         AUDIT_ARG_FFLAGS(flags);
1400
1401         /*
1402          * Make sure the user passed only valid flags.
1403          * If you add a new flag, please add a new term here.
1404          */
1405         if ((flags & ~(
1406             SHM_RENAME_NOREPLACE |
1407             SHM_RENAME_EXCHANGE
1408             )) != 0) {
1409                 error = EINVAL;
1410                 goto out;
1411         }
1412
1413         /*
1414          * EXCHANGE and NOREPLACE don't quite make sense together. Let's
1415          * force the user to choose one or the other.
1416          */
1417         if ((flags & SHM_RENAME_NOREPLACE) != 0 &&
1418             (flags & SHM_RENAME_EXCHANGE) != 0) {
1419                 error = EINVAL;
1420                 goto out;
1421         }
1422
1423         /* Renaming to or from anonymous makes no sense */
1424         if (uap->path_from == SHM_ANON || uap->path_to == SHM_ANON) {
1425                 error = EINVAL;
1426                 goto out;
1427         }
1428
1429         error = shm_copyin_path(td, uap->path_from, &path_from);
1430         if (error != 0)
1431                 goto out;
1432
1433         error = shm_copyin_path(td, uap->path_to, &path_to);
1434         if (error != 0)
1435                 goto out;
1436
1437         AUDIT_ARG_UPATH1_CANON(path_from);
1438         AUDIT_ARG_UPATH2_CANON(path_to);
1439
1440         /* Rename with from/to equal is a no-op */
1441         if (strcmp(path_from, path_to) == 0)
1442                 goto out;
1443
1444         fnv_from = fnv_32_str(path_from, FNV1_32_INIT);
1445         fnv_to = fnv_32_str(path_to, FNV1_32_INIT);
1446
1447         sx_xlock(&shm_dict_lock);
1448
1449         fd_from = shm_lookup(path_from, fnv_from);
1450         if (fd_from == NULL) {
1451                 error = ENOENT;
1452                 goto out_locked;
1453         }
1454
1455         fd_to = shm_lookup(path_to, fnv_to);
1456         if ((flags & SHM_RENAME_NOREPLACE) != 0 && fd_to != NULL) {
1457                 error = EEXIST;
1458                 goto out_locked;
1459         }
1460
1461         /*
1462          * Unconditionally prevents shm_remove from invalidating the 'from'
1463          * shm's state.
1464          */
1465         shm_hold(fd_from);
1466         error = shm_remove(path_from, fnv_from, td->td_ucred);
1467
1468         /*
1469          * One of my assumptions failed if ENOENT (e.g. locking didn't
1470          * protect us)
1471          */
1472         KASSERT(error != ENOENT, ("Our shm disappeared during shm_rename: %s",
1473             path_from));
1474         if (error != 0) {
1475                 shm_drop(fd_from);
1476                 goto out_locked;
1477         }
1478
1479         /*
1480          * If we are exchanging, we need to ensure the shm_remove below
1481          * doesn't invalidate the dest shm's state.
1482          */
1483         if ((flags & SHM_RENAME_EXCHANGE) != 0 && fd_to != NULL)
1484                 shm_hold(fd_to);
1485
1486         /*
1487          * NOTE: if path_to is not already in the hash, c'est la vie;
1488          * it simply means we have nothing already at path_to to unlink.
1489          * That is the ENOENT case.
1490          *
1491          * If we somehow don't have access to unlink this guy, but
1492          * did for the shm at path_from, then relink the shm to path_from
1493          * and abort with EACCES.
1494          *
1495          * All other errors: that is weird; let's relink and abort the
1496          * operation.
1497          */
1498         error = shm_remove(path_to, fnv_to, td->td_ucred);
1499         if (error != 0 && error != ENOENT) {
1500                 shm_insert(path_from, fnv_from, fd_from);
1501                 shm_drop(fd_from);
1502                 /* Don't free path_from now, since the hash references it */
1503                 path_from = NULL;
1504                 goto out_locked;
1505         }
1506
1507         error = 0;
1508
1509         shm_insert(path_to, fnv_to, fd_from);
1510
1511         /* Don't free path_to now, since the hash references it */
1512         path_to = NULL;
1513
1514         /* We kept a ref when we removed, and incremented again in insert */
1515         shm_drop(fd_from);
1516         KASSERT(fd_from->shm_refs > 0, ("Expected >0 refs; got: %d\n",
1517             fd_from->shm_refs));
1518
1519         if ((flags & SHM_RENAME_EXCHANGE) != 0 && fd_to != NULL) {
1520                 shm_insert(path_from, fnv_from, fd_to);
1521                 path_from = NULL;
1522                 shm_drop(fd_to);
1523                 KASSERT(fd_to->shm_refs > 0, ("Expected >0 refs; got: %d\n",
1524                     fd_to->shm_refs));
1525         }
1526
1527 out_locked:
1528         sx_xunlock(&shm_dict_lock);
1529
1530 out:
1531         free(path_from, M_SHMFD);
1532         free(path_to, M_SHMFD);
1533         return (error);
1534 }
1535
1536 static int
1537 shm_mmap_large(struct shmfd *shmfd, vm_map_t map, vm_offset_t *addr,
1538     vm_size_t size, vm_prot_t prot, vm_prot_t max_prot, int flags,
1539     vm_ooffset_t foff, struct thread *td)
1540 {
1541         struct vmspace *vms;
1542         vm_map_entry_t next_entry, prev_entry;
1543         vm_offset_t align, mask, maxaddr;
1544         int docow, error, rv, try;
1545         bool curmap;
1546
1547         if (shmfd->shm_lp_psind == 0)
1548                 return (EINVAL);
1549
1550         /* MAP_PRIVATE is disabled */
1551         if ((flags & ~(MAP_SHARED | MAP_FIXED | MAP_EXCL |
1552             MAP_NOCORE | MAP_32BIT | MAP_ALIGNMENT_MASK)) != 0)
1553                 return (EINVAL);
1554
1555         vms = td->td_proc->p_vmspace;
1556         curmap = map == &vms->vm_map;
1557         if (curmap) {
1558                 error = kern_mmap_racct_check(td, map, size);
1559                 if (error != 0)
1560                         return (error);
1561         }
1562
1563         docow = shmfd->shm_lp_psind << MAP_SPLIT_BOUNDARY_SHIFT;
1564         docow |= MAP_INHERIT_SHARE;
1565         if ((flags & MAP_NOCORE) != 0)
1566                 docow |= MAP_DISABLE_COREDUMP;
1567
1568         mask = pagesizes[shmfd->shm_lp_psind] - 1;
1569         if ((foff & mask) != 0)
1570                 return (EINVAL);
1571         maxaddr = vm_map_max(map);
1572         if ((flags & MAP_32BIT) != 0 && maxaddr > MAP_32BIT_MAX_ADDR)
1573                 maxaddr = MAP_32BIT_MAX_ADDR;
1574         if (size == 0 || (size & mask) != 0 ||
1575             (*addr != 0 && ((*addr & mask) != 0 ||
1576             *addr + size < *addr || *addr + size > maxaddr)))
1577                 return (EINVAL);
1578
1579         align = flags & MAP_ALIGNMENT_MASK;
1580         if (align == 0) {
1581                 align = pagesizes[shmfd->shm_lp_psind];
1582         } else if (align == MAP_ALIGNED_SUPER) {
1583                 if (shmfd->shm_lp_psind != 1)
1584                         return (EINVAL);
1585                 align = pagesizes[1];
1586         } else {
1587                 align >>= MAP_ALIGNMENT_SHIFT;
1588                 align = 1ULL << align;
1589                 /* Also handles overflow. */
1590                 if (align < pagesizes[shmfd->shm_lp_psind])
1591                         return (EINVAL);
1592         }
1593
1594         vm_map_lock(map);
1595         if ((flags & MAP_FIXED) == 0) {
1596                 try = 1;
1597                 if (curmap && (*addr == 0 ||
1598                     (*addr >= round_page((vm_offset_t)vms->vm_taddr) &&
1599                     *addr < round_page((vm_offset_t)vms->vm_daddr +
1600                     lim_max(td, RLIMIT_DATA))))) {
1601                         *addr = roundup2((vm_offset_t)vms->vm_daddr +
1602                             lim_max(td, RLIMIT_DATA),
1603                             pagesizes[shmfd->shm_lp_psind]);
1604                 }
1605 again:
1606                 rv = vm_map_find_aligned(map, addr, size, maxaddr, align);
1607                 if (rv != KERN_SUCCESS) {
1608                         if (try == 1) {
1609                                 try = 2;
1610                                 *addr = vm_map_min(map);
1611                                 if ((*addr & mask) != 0)
1612                                         *addr = (*addr + mask) & mask;
1613                                 goto again;
1614                         }
1615                         goto fail1;
1616                 }
1617         } else if ((flags & MAP_EXCL) == 0) {
1618                 rv = vm_map_delete(map, *addr, *addr + size);
1619                 if (rv != KERN_SUCCESS)
1620                         goto fail1;
1621         } else {
1622                 error = ENOSPC;
1623                 if (vm_map_lookup_entry(map, *addr, &prev_entry))
1624                         goto fail;
1625                 next_entry = vm_map_entry_succ(prev_entry);
1626                 if (next_entry->start < *addr + size)
1627                         goto fail;
1628         }
1629
1630         rv = vm_map_insert(map, shmfd->shm_object, foff, *addr, *addr + size,
1631             prot, max_prot, docow);
1632 fail1:
1633         error = vm_mmap_to_errno(rv);
1634 fail:
1635         vm_map_unlock(map);
1636         return (error);
1637 }
1638
1639 static int
1640 shm_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t objsize,
1641     vm_prot_t prot, vm_prot_t cap_maxprot, int flags,
1642     vm_ooffset_t foff, struct thread *td)
1643 {
1644         struct shmfd *shmfd;
1645         vm_prot_t maxprot;
1646         int error;
1647         bool writecnt;
1648         void *rl_cookie;
1649
1650         shmfd = fp->f_data;
1651         maxprot = VM_PROT_NONE;
1652
1653         rl_cookie = shm_rangelock_rlock(shmfd, 0, objsize);
1654         /* FREAD should always be set. */
1655         if ((fp->f_flag & FREAD) != 0)
1656                 maxprot |= VM_PROT_EXECUTE | VM_PROT_READ;
1657
1658         /*
1659          * If FWRITE's set, we can allow VM_PROT_WRITE unless it's a shared
1660          * mapping with a write seal applied.  Private mappings are always
1661          * writeable.
1662          */
1663         if ((flags & MAP_SHARED) == 0) {
1664                 cap_maxprot |= VM_PROT_WRITE;
1665                 maxprot |= VM_PROT_WRITE;
1666                 writecnt = false;
1667         } else {
1668                 if ((fp->f_flag & FWRITE) != 0 &&
1669                     (shmfd->shm_seals & F_SEAL_WRITE) == 0)
1670                         maxprot |= VM_PROT_WRITE;
1671
1672                 /*
1673                  * Any mappings from a writable descriptor may be upgraded to
1674                  * VM_PROT_WRITE with mprotect(2), unless a write-seal was
1675                  * applied between the open and subsequent mmap(2).  We want to
1676                  * reject application of a write seal as long as any such
1677                  * mapping exists so that the seal cannot be trivially bypassed.
1678                  */
1679                 writecnt = (maxprot & VM_PROT_WRITE) != 0;
1680                 if (!writecnt && (prot & VM_PROT_WRITE) != 0) {
1681                         error = EACCES;
1682                         goto out;
1683                 }
1684         }
1685         maxprot &= cap_maxprot;
1686
1687         /* See comment in vn_mmap(). */
1688         if (
1689 #ifdef _LP64
1690             objsize > OFF_MAX ||
1691 #endif
1692             foff > OFF_MAX - objsize) {
1693                 error = EINVAL;
1694                 goto out;
1695         }
1696
1697 #ifdef MAC
1698         error = mac_posixshm_check_mmap(td->td_ucred, shmfd, prot, flags);
1699         if (error != 0)
1700                 goto out;
1701 #endif
1702
1703         mtx_lock(&shm_timestamp_lock);
1704         vfs_timestamp(&shmfd->shm_atime);
1705         mtx_unlock(&shm_timestamp_lock);
1706         vm_object_reference(shmfd->shm_object);
1707
1708         if (shm_largepage(shmfd)) {
1709                 writecnt = false;
1710                 error = shm_mmap_large(shmfd, map, addr, objsize, prot,
1711                     maxprot, flags, foff, td);
1712         } else {
1713                 if (writecnt) {
1714                         vm_pager_update_writecount(shmfd->shm_object, 0,
1715                             objsize);
1716                 }
1717                 error = vm_mmap_object(map, addr, objsize, prot, maxprot, flags,
1718                     shmfd->shm_object, foff, writecnt, td);
1719         }
1720         if (error != 0) {
1721                 if (writecnt)
1722                         vm_pager_release_writecount(shmfd->shm_object, 0,
1723                             objsize);
1724                 vm_object_deallocate(shmfd->shm_object);
1725         }
1726 out:
1727         shm_rangelock_unlock(shmfd, rl_cookie);
1728         return (error);
1729 }
1730
1731 static int
1732 shm_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
1733     struct thread *td)
1734 {
1735         struct shmfd *shmfd;
1736         int error;
1737
1738         error = 0;
1739         shmfd = fp->f_data;
1740         mtx_lock(&shm_timestamp_lock);
1741         /*
1742          * SUSv4 says that x bits of permission need not be affected.
1743          * Be consistent with our shm_open there.
1744          */
1745 #ifdef MAC
1746         error = mac_posixshm_check_setmode(active_cred, shmfd, mode);
1747         if (error != 0)
1748                 goto out;
1749 #endif
1750         error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid,
1751             VADMIN, active_cred);
1752         if (error != 0)
1753                 goto out;
1754         shmfd->shm_mode = mode & ACCESSPERMS;
1755 out:
1756         mtx_unlock(&shm_timestamp_lock);
1757         return (error);
1758 }
1759
1760 static int
1761 shm_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
1762     struct thread *td)
1763 {
1764         struct shmfd *shmfd;
1765         int error;
1766
1767         error = 0;
1768         shmfd = fp->f_data;
1769         mtx_lock(&shm_timestamp_lock);
1770 #ifdef MAC
1771         error = mac_posixshm_check_setowner(active_cred, shmfd, uid, gid);
1772         if (error != 0)
1773                 goto out;
1774 #endif
1775         if (uid == (uid_t)-1)
1776                 uid = shmfd->shm_uid;
1777         if (gid == (gid_t)-1)
1778                  gid = shmfd->shm_gid;
1779         if (((uid != shmfd->shm_uid && uid != active_cred->cr_uid) ||
1780             (gid != shmfd->shm_gid && !groupmember(gid, active_cred))) &&
1781             (error = priv_check_cred(active_cred, PRIV_VFS_CHOWN)))
1782                 goto out;
1783         shmfd->shm_uid = uid;
1784         shmfd->shm_gid = gid;
1785 out:
1786         mtx_unlock(&shm_timestamp_lock);
1787         return (error);
1788 }
1789
1790 /*
1791  * Helper routines to allow the backing object of a shared memory file
1792  * descriptor to be mapped in the kernel.
1793  */
1794 int
1795 shm_map(struct file *fp, size_t size, off_t offset, void **memp)
1796 {
1797         struct shmfd *shmfd;
1798         vm_offset_t kva, ofs;
1799         vm_object_t obj;
1800         int rv;
1801
1802         if (fp->f_type != DTYPE_SHM)
1803                 return (EINVAL);
1804         shmfd = fp->f_data;
1805         obj = shmfd->shm_object;
1806         VM_OBJECT_WLOCK(obj);
1807         /*
1808          * XXXRW: This validation is probably insufficient, and subject to
1809          * sign errors.  It should be fixed.
1810          */
1811         if (offset >= shmfd->shm_size ||
1812             offset + size > round_page(shmfd->shm_size)) {
1813                 VM_OBJECT_WUNLOCK(obj);
1814                 return (EINVAL);
1815         }
1816
1817         shmfd->shm_kmappings++;
1818         vm_object_reference_locked(obj);
1819         VM_OBJECT_WUNLOCK(obj);
1820
1821         /* Map the object into the kernel_map and wire it. */
1822         kva = vm_map_min(kernel_map);
1823         ofs = offset & PAGE_MASK;
1824         offset = trunc_page(offset);
1825         size = round_page(size + ofs);
1826         rv = vm_map_find(kernel_map, obj, offset, &kva, size, 0,
1827             VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE,
1828             VM_PROT_READ | VM_PROT_WRITE, 0);
1829         if (rv == KERN_SUCCESS) {
1830                 rv = vm_map_wire(kernel_map, kva, kva + size,
1831                     VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
1832                 if (rv == KERN_SUCCESS) {
1833                         *memp = (void *)(kva + ofs);
1834                         return (0);
1835                 }
1836                 vm_map_remove(kernel_map, kva, kva + size);
1837         } else
1838                 vm_object_deallocate(obj);
1839
1840         /* On failure, drop our mapping reference. */
1841         VM_OBJECT_WLOCK(obj);
1842         shmfd->shm_kmappings--;
1843         VM_OBJECT_WUNLOCK(obj);
1844
1845         return (vm_mmap_to_errno(rv));
1846 }
1847
1848 /*
1849  * We require the caller to unmap the entire entry.  This allows us to
1850  * safely decrement shm_kmappings when a mapping is removed.
1851  */
1852 int
1853 shm_unmap(struct file *fp, void *mem, size_t size)
1854 {
1855         struct shmfd *shmfd;
1856         vm_map_entry_t entry;
1857         vm_offset_t kva, ofs;
1858         vm_object_t obj;
1859         vm_pindex_t pindex;
1860         vm_prot_t prot;
1861         boolean_t wired;
1862         vm_map_t map;
1863         int rv;
1864
1865         if (fp->f_type != DTYPE_SHM)
1866                 return (EINVAL);
1867         shmfd = fp->f_data;
1868         kva = (vm_offset_t)mem;
1869         ofs = kva & PAGE_MASK;
1870         kva = trunc_page(kva);
1871         size = round_page(size + ofs);
1872         map = kernel_map;
1873         rv = vm_map_lookup(&map, kva, VM_PROT_READ | VM_PROT_WRITE, &entry,
1874             &obj, &pindex, &prot, &wired);
1875         if (rv != KERN_SUCCESS)
1876                 return (EINVAL);
1877         if (entry->start != kva || entry->end != kva + size) {
1878                 vm_map_lookup_done(map, entry);
1879                 return (EINVAL);
1880         }
1881         vm_map_lookup_done(map, entry);
1882         if (obj != shmfd->shm_object)
1883                 return (EINVAL);
1884         vm_map_remove(map, kva, kva + size);
1885         VM_OBJECT_WLOCK(obj);
1886         KASSERT(shmfd->shm_kmappings > 0, ("shm_unmap: object not mapped"));
1887         shmfd->shm_kmappings--;
1888         VM_OBJECT_WUNLOCK(obj);
1889         return (0);
1890 }
1891
1892 static int
1893 shm_fill_kinfo_locked(struct shmfd *shmfd, struct kinfo_file *kif, bool list)
1894 {
1895         const char *path, *pr_path;
1896         size_t pr_pathlen;
1897         bool visible;
1898
1899         sx_assert(&shm_dict_lock, SA_LOCKED);
1900         kif->kf_type = KF_TYPE_SHM;
1901         kif->kf_un.kf_file.kf_file_mode = S_IFREG | shmfd->shm_mode;
1902         kif->kf_un.kf_file.kf_file_size = shmfd->shm_size;
1903         if (shmfd->shm_path != NULL) {
1904                 if (shmfd->shm_path != NULL) {
1905                         path = shmfd->shm_path;
1906                         pr_path = curthread->td_ucred->cr_prison->pr_path;
1907                         if (strcmp(pr_path, "/") != 0) {
1908                                 /* Return the jail-rooted pathname. */
1909                                 pr_pathlen = strlen(pr_path);
1910                                 visible = strncmp(path, pr_path, pr_pathlen)
1911                                     == 0 && path[pr_pathlen] == '/';
1912                                 if (list && !visible)
1913                                         return (EPERM);
1914                                 if (visible)
1915                                         path += pr_pathlen;
1916                         }
1917                         strlcpy(kif->kf_path, path, sizeof(kif->kf_path));
1918                 }
1919         }
1920         return (0);
1921 }
1922
1923 static int
1924 shm_fill_kinfo(struct file *fp, struct kinfo_file *kif,
1925     struct filedesc *fdp __unused)
1926 {
1927         int res;
1928
1929         sx_slock(&shm_dict_lock);
1930         res = shm_fill_kinfo_locked(fp->f_data, kif, false);
1931         sx_sunlock(&shm_dict_lock);
1932         return (res);
1933 }
1934
1935 static int
1936 shm_add_seals(struct file *fp, int seals)
1937 {
1938         struct shmfd *shmfd;
1939         void *rl_cookie;
1940         vm_ooffset_t writemappings;
1941         int error, nseals;
1942
1943         error = 0;
1944         shmfd = fp->f_data;
1945         rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
1946
1947         /* Even already-set seals should result in EPERM. */
1948         if ((shmfd->shm_seals & F_SEAL_SEAL) != 0) {
1949                 error = EPERM;
1950                 goto out;
1951         }
1952         nseals = seals & ~shmfd->shm_seals;
1953         if ((nseals & F_SEAL_WRITE) != 0) {
1954                 if (shm_largepage(shmfd)) {
1955                         error = ENOTSUP;
1956                         goto out;
1957                 }
1958
1959                 /*
1960                  * The rangelock above prevents writable mappings from being
1961                  * added after we've started applying seals.  The RLOCK here
1962                  * is to avoid torn reads on ILP32 arches as unmapping/reducing
1963                  * writemappings will be done without a rangelock.
1964                  */
1965                 VM_OBJECT_RLOCK(shmfd->shm_object);
1966                 writemappings = shmfd->shm_object->un_pager.swp.writemappings;
1967                 VM_OBJECT_RUNLOCK(shmfd->shm_object);
1968                 /* kmappings are also writable */
1969                 if (writemappings > 0) {
1970                         error = EBUSY;
1971                         goto out;
1972                 }
1973         }
1974         shmfd->shm_seals |= nseals;
1975 out:
1976         shm_rangelock_unlock(shmfd, rl_cookie);
1977         return (error);
1978 }
1979
1980 static int
1981 shm_get_seals(struct file *fp, int *seals)
1982 {
1983         struct shmfd *shmfd;
1984
1985         shmfd = fp->f_data;
1986         *seals = shmfd->shm_seals;
1987         return (0);
1988 }
1989
1990 static int
1991 shm_deallocate(struct shmfd *shmfd, off_t *offset, off_t *length, int flags)
1992 {
1993         vm_object_t object;
1994         vm_pindex_t pistart, pi, piend;
1995         vm_ooffset_t off, len;
1996         int startofs, endofs, end;
1997         int error;
1998
1999         off = *offset;
2000         len = *length;
2001         KASSERT(off + len <= (vm_ooffset_t)OFF_MAX, ("off + len overflows"));
2002         if (off + len > shmfd->shm_size)
2003                 len = shmfd->shm_size - off;
2004         object = shmfd->shm_object;
2005         startofs = off & PAGE_MASK;
2006         endofs = (off + len) & PAGE_MASK;
2007         pistart = OFF_TO_IDX(off);
2008         piend = OFF_TO_IDX(off + len);
2009         pi = OFF_TO_IDX(off + PAGE_MASK);
2010         error = 0;
2011
2012         /* Handle the case when offset is on or beyond shm size. */
2013         if ((off_t)len <= 0) {
2014                 *length = 0;
2015                 return (0);
2016         }
2017
2018         VM_OBJECT_WLOCK(object);
2019
2020         if (startofs != 0) {
2021                 end = pistart != piend ? PAGE_SIZE : endofs;
2022                 error = shm_partial_page_invalidate(object, pistart, startofs,
2023                     end);
2024                 if (error)
2025                         goto out;
2026                 off += end - startofs;
2027                 len -= end - startofs;
2028         }
2029
2030         if (pi < piend) {
2031                 vm_object_page_remove(object, pi, piend, 0);
2032                 off += IDX_TO_OFF(piend - pi);
2033                 len -= IDX_TO_OFF(piend - pi);
2034         }
2035
2036         if (endofs != 0 && pistart != piend) {
2037                 error = shm_partial_page_invalidate(object, piend, 0, endofs);
2038                 if (error)
2039                         goto out;
2040                 off += endofs;
2041                 len -= endofs;
2042         }
2043
2044 out:
2045         VM_OBJECT_WUNLOCK(shmfd->shm_object);
2046         *offset = off;
2047         *length = len;
2048         return (error);
2049 }
2050
2051 static int
2052 shm_fspacectl(struct file *fp, int cmd, off_t *offset, off_t *length, int flags,
2053     struct ucred *active_cred, struct thread *td)
2054 {
2055         void *rl_cookie;
2056         struct shmfd *shmfd;
2057         off_t off, len;
2058         int error;
2059
2060         KASSERT(cmd == SPACECTL_DEALLOC, ("shm_fspacectl: Invalid cmd"));
2061         KASSERT((flags & ~SPACECTL_F_SUPPORTED) == 0,
2062             ("shm_fspacectl: non-zero flags"));
2063         KASSERT(*offset >= 0 && *length > 0 && *length <= OFF_MAX - *offset,
2064             ("shm_fspacectl: offset/length overflow or underflow"));
2065         error = EINVAL;
2066         shmfd = fp->f_data;
2067         off = *offset;
2068         len = *length;
2069
2070         rl_cookie = shm_rangelock_wlock(shmfd, off, off + len);
2071         switch (cmd) {
2072         case SPACECTL_DEALLOC:
2073                 if ((shmfd->shm_seals & F_SEAL_WRITE) != 0) {
2074                         error = EPERM;
2075                         break;
2076                 }
2077                 error = shm_deallocate(shmfd, &off, &len, flags);
2078                 *offset = off;
2079                 *length = len;
2080                 break;
2081         default:
2082                 __assert_unreachable();
2083         }
2084         shm_rangelock_unlock(shmfd, rl_cookie);
2085         return (error);
2086 }
2087
2088
2089 static int
2090 shm_fallocate(struct file *fp, off_t offset, off_t len, struct thread *td)
2091 {
2092         void *rl_cookie;
2093         struct shmfd *shmfd;
2094         size_t size;
2095         int error;
2096
2097         /* This assumes that the caller already checked for overflow. */
2098         error = 0;
2099         shmfd = fp->f_data;
2100         size = offset + len;
2101
2102         /*
2103          * Just grab the rangelock for the range that we may be attempting to
2104          * grow, rather than blocking read/write for regions we won't be
2105          * touching while this (potential) resize is in progress.  Other
2106          * attempts to resize the shmfd will have to take a write lock from 0 to
2107          * OFF_MAX, so this being potentially beyond the current usable range of
2108          * the shmfd is not necessarily a concern.  If other mechanisms are
2109          * added to grow a shmfd, this may need to be re-evaluated.
2110          */
2111         rl_cookie = shm_rangelock_wlock(shmfd, offset, size);
2112         if (size > shmfd->shm_size)
2113                 error = shm_dotruncate_cookie(shmfd, size, rl_cookie);
2114         shm_rangelock_unlock(shmfd, rl_cookie);
2115         /* Translate to posix_fallocate(2) return value as needed. */
2116         if (error == ENOMEM)
2117                 error = ENOSPC;
2118         return (error);
2119 }
2120
2121 static int
2122 sysctl_posix_shm_list(SYSCTL_HANDLER_ARGS)
2123 {
2124         struct shm_mapping *shmm;
2125         struct sbuf sb;
2126         struct kinfo_file kif;
2127         u_long i;
2128         int error, error2;
2129
2130         sbuf_new_for_sysctl(&sb, NULL, sizeof(struct kinfo_file) * 5, req);
2131         sbuf_clear_flags(&sb, SBUF_INCLUDENUL);
2132         error = 0;
2133         sx_slock(&shm_dict_lock);
2134         for (i = 0; i < shm_hash + 1; i++) {
2135                 LIST_FOREACH(shmm, &shm_dictionary[i], sm_link) {
2136                         error = shm_fill_kinfo_locked(shmm->sm_shmfd,
2137                             &kif, true);
2138                         if (error == EPERM) {
2139                                 error = 0;
2140                                 continue;
2141                         }
2142                         if (error != 0)
2143                                 break;
2144                         pack_kinfo(&kif);
2145                         error = sbuf_bcat(&sb, &kif, kif.kf_structsize) == 0 ?
2146                             0 : ENOMEM;
2147                         if (error != 0)
2148                                 break;
2149                 }
2150         }
2151         sx_sunlock(&shm_dict_lock);
2152         error2 = sbuf_finish(&sb);
2153         sbuf_delete(&sb);
2154         return (error != 0 ? error : error2);
2155 }
2156
2157 SYSCTL_PROC(_kern_ipc, OID_AUTO, posix_shm_list,
2158     CTLFLAG_RD | CTLFLAG_PRISON | CTLFLAG_MPSAFE | CTLTYPE_OPAQUE,
2159     NULL, 0, sysctl_posix_shm_list, "",
2160     "POSIX SHM list");
2161
2162 int
2163 kern_shm_open(struct thread *td, const char *path, int flags, mode_t mode,
2164     struct filecaps *caps)
2165 {
2166
2167         return (kern_shm_open2(td, path, flags, mode, 0, caps, NULL));
2168 }
2169
2170 /*
2171  * This version of the shm_open() interface leaves CLOEXEC behavior up to the
2172  * caller, and libc will enforce it for the traditional shm_open() call.  This
2173  * allows other consumers, like memfd_create(), to opt-in for CLOEXEC.  This
2174  * interface also includes a 'name' argument that is currently unused, but could
2175  * potentially be exported later via some interface for debugging purposes.
2176  * From the kernel's perspective, it is optional.  Individual consumers like
2177  * memfd_create() may require it in order to be compatible with other systems
2178  * implementing the same function.
2179  */
2180 int
2181 sys_shm_open2(struct thread *td, struct shm_open2_args *uap)
2182 {
2183
2184         return (kern_shm_open2(td, uap->path, uap->flags, uap->mode,
2185             uap->shmflags, NULL, uap->name));
2186 }