]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/kern/uipc_shm.c
ktrace: log genio events on failed write
[FreeBSD/FreeBSD.git] / sys / kern / uipc_shm.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2006, 2011, 2016-2017 Robert N. M. Watson
5  * Copyright 2020 The FreeBSD Foundation
6  * All rights reserved.
7  *
8  * Portions of this software were developed by BAE Systems, the University of
9  * Cambridge Computer Laboratory, and Memorial University under DARPA/AFRL
10  * contract FA8650-15-C-7558 ("CADETS"), as part of the DARPA Transparent
11  * Computing (TC) research program.
12  *
13  * Portions of this software were developed by Konstantin Belousov
14  * under sponsorship from the FreeBSD Foundation.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37
38 /*
39  * Support for shared swap-backed anonymous memory objects via
40  * shm_open(2), shm_rename(2), and shm_unlink(2).
41  * While most of the implementation is here, vm_mmap.c contains
42  * mapping logic changes.
43  *
44  * posixshmcontrol(1) allows users to inspect the state of the memory
45  * objects.  Per-uid swap resource limit controls total amount of
46  * memory that user can consume for anonymous objects, including
47  * shared.
48  */
49
50 #include <sys/cdefs.h>
51 #include "opt_capsicum.h"
52 #include "opt_ktrace.h"
53
54 #include <sys/param.h>
55 #include <sys/capsicum.h>
56 #include <sys/conf.h>
57 #include <sys/fcntl.h>
58 #include <sys/file.h>
59 #include <sys/filedesc.h>
60 #include <sys/filio.h>
61 #include <sys/fnv_hash.h>
62 #include <sys/kernel.h>
63 #include <sys/limits.h>
64 #include <sys/uio.h>
65 #include <sys/signal.h>
66 #include <sys/jail.h>
67 #include <sys/ktrace.h>
68 #include <sys/lock.h>
69 #include <sys/malloc.h>
70 #include <sys/mman.h>
71 #include <sys/mutex.h>
72 #include <sys/priv.h>
73 #include <sys/proc.h>
74 #include <sys/refcount.h>
75 #include <sys/resourcevar.h>
76 #include <sys/rwlock.h>
77 #include <sys/sbuf.h>
78 #include <sys/stat.h>
79 #include <sys/syscallsubr.h>
80 #include <sys/sysctl.h>
81 #include <sys/sysproto.h>
82 #include <sys/systm.h>
83 #include <sys/sx.h>
84 #include <sys/time.h>
85 #include <sys/vmmeter.h>
86 #include <sys/vnode.h>
87 #include <sys/unistd.h>
88 #include <sys/user.h>
89
90 #include <security/audit/audit.h>
91 #include <security/mac/mac_framework.h>
92
93 #include <vm/vm.h>
94 #include <vm/vm_param.h>
95 #include <vm/pmap.h>
96 #include <vm/vm_extern.h>
97 #include <vm/vm_map.h>
98 #include <vm/vm_kern.h>
99 #include <vm/vm_object.h>
100 #include <vm/vm_page.h>
101 #include <vm/vm_pageout.h>
102 #include <vm/vm_pager.h>
103 #include <vm/swap_pager.h>
104
105 struct shm_mapping {
106         char            *sm_path;
107         Fnv32_t         sm_fnv;
108         struct shmfd    *sm_shmfd;
109         LIST_ENTRY(shm_mapping) sm_link;
110 };
111
112 static MALLOC_DEFINE(M_SHMFD, "shmfd", "shared memory file descriptor");
113 static LIST_HEAD(, shm_mapping) *shm_dictionary;
114 static struct sx shm_dict_lock;
115 static struct mtx shm_timestamp_lock;
116 static u_long shm_hash;
117 static struct unrhdr64 shm_ino_unr;
118 static dev_t shm_dev_ino;
119
120 #define SHM_HASH(fnv)   (&shm_dictionary[(fnv) & shm_hash])
121
122 static void     shm_init(void *arg);
123 static void     shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd);
124 static struct shmfd *shm_lookup(char *path, Fnv32_t fnv);
125 static int      shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred);
126 static void     shm_doremove(struct shm_mapping *map);
127 static int      shm_dotruncate_cookie(struct shmfd *shmfd, off_t length,
128     void *rl_cookie);
129 static int      shm_dotruncate_locked(struct shmfd *shmfd, off_t length,
130     void *rl_cookie);
131 static int      shm_copyin_path(struct thread *td, const char *userpath_in,
132     char **path_out);
133 static int      shm_deallocate(struct shmfd *shmfd, off_t *offset,
134     off_t *length, int flags);
135
136 static fo_rdwr_t        shm_read;
137 static fo_rdwr_t        shm_write;
138 static fo_truncate_t    shm_truncate;
139 static fo_ioctl_t       shm_ioctl;
140 static fo_stat_t        shm_stat;
141 static fo_close_t       shm_close;
142 static fo_chmod_t       shm_chmod;
143 static fo_chown_t       shm_chown;
144 static fo_seek_t        shm_seek;
145 static fo_fill_kinfo_t  shm_fill_kinfo;
146 static fo_mmap_t        shm_mmap;
147 static fo_get_seals_t   shm_get_seals;
148 static fo_add_seals_t   shm_add_seals;
149 static fo_fallocate_t   shm_fallocate;
150 static fo_fspacectl_t   shm_fspacectl;
151
152 /* File descriptor operations. */
153 struct fileops shm_ops = {
154         .fo_read = shm_read,
155         .fo_write = shm_write,
156         .fo_truncate = shm_truncate,
157         .fo_ioctl = shm_ioctl,
158         .fo_poll = invfo_poll,
159         .fo_kqfilter = invfo_kqfilter,
160         .fo_stat = shm_stat,
161         .fo_close = shm_close,
162         .fo_chmod = shm_chmod,
163         .fo_chown = shm_chown,
164         .fo_sendfile = vn_sendfile,
165         .fo_seek = shm_seek,
166         .fo_fill_kinfo = shm_fill_kinfo,
167         .fo_mmap = shm_mmap,
168         .fo_get_seals = shm_get_seals,
169         .fo_add_seals = shm_add_seals,
170         .fo_fallocate = shm_fallocate,
171         .fo_fspacectl = shm_fspacectl,
172         .fo_cmp = file_kcmp_generic,
173         .fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE,
174 };
175
176 FEATURE(posix_shm, "POSIX shared memory");
177
178 static SYSCTL_NODE(_vm, OID_AUTO, largepages, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
179     "");
180
181 static int largepage_reclaim_tries = 1;
182 SYSCTL_INT(_vm_largepages, OID_AUTO, reclaim_tries,
183     CTLFLAG_RWTUN, &largepage_reclaim_tries, 0,
184     "Number of contig reclaims before giving up for default alloc policy");
185
186 #define shm_rangelock_unlock(shmfd, cookie)                             \
187         rangelock_unlock(&(shmfd)->shm_rl, (cookie), &(shmfd)->shm_mtx)
188 #define shm_rangelock_rlock(shmfd, start, end)                          \
189         rangelock_rlock(&(shmfd)->shm_rl, (start), (end), &(shmfd)->shm_mtx)
190 #define shm_rangelock_tryrlock(shmfd, start, end)                       \
191         rangelock_tryrlock(&(shmfd)->shm_rl, (start), (end), &(shmfd)->shm_mtx)
192 #define shm_rangelock_wlock(shmfd, start, end)                          \
193         rangelock_wlock(&(shmfd)->shm_rl, (start), (end), &(shmfd)->shm_mtx)
194
195 static int
196 uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio)
197 {
198         vm_page_t m;
199         vm_pindex_t idx;
200         size_t tlen;
201         int error, offset, rv;
202
203         idx = OFF_TO_IDX(uio->uio_offset);
204         offset = uio->uio_offset & PAGE_MASK;
205         tlen = MIN(PAGE_SIZE - offset, len);
206
207         rv = vm_page_grab_valid_unlocked(&m, obj, idx,
208             VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY | VM_ALLOC_NOCREAT);
209         if (rv == VM_PAGER_OK)
210                 goto found;
211
212         /*
213          * Read I/O without either a corresponding resident page or swap
214          * page: use zero_region.  This is intended to avoid instantiating
215          * pages on read from a sparse region.
216          */
217         VM_OBJECT_WLOCK(obj);
218         m = vm_page_lookup(obj, idx);
219         if (uio->uio_rw == UIO_READ && m == NULL &&
220             !vm_pager_has_page(obj, idx, NULL, NULL)) {
221                 VM_OBJECT_WUNLOCK(obj);
222                 return (uiomove(__DECONST(void *, zero_region), tlen, uio));
223         }
224
225         /*
226          * Although the tmpfs vnode lock is held here, it is
227          * nonetheless safe to sleep waiting for a free page.  The
228          * pageout daemon does not need to acquire the tmpfs vnode
229          * lock to page out tobj's pages because tobj is a OBJT_SWAP
230          * type object.
231          */
232         rv = vm_page_grab_valid(&m, obj, idx,
233             VM_ALLOC_NORMAL | VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY);
234         if (rv != VM_PAGER_OK) {
235                 VM_OBJECT_WUNLOCK(obj);
236                 if (bootverbose) {
237                         printf("uiomove_object: vm_obj %p idx %jd "
238                             "pager error %d\n", obj, idx, rv);
239                 }
240                 return (rv == VM_PAGER_AGAIN ? ENOSPC : EIO);
241         }
242         VM_OBJECT_WUNLOCK(obj);
243
244 found:
245         error = uiomove_fromphys(&m, offset, tlen, uio);
246         if (uio->uio_rw == UIO_WRITE && error == 0)
247                 vm_page_set_dirty(m);
248         vm_page_activate(m);
249         vm_page_sunbusy(m);
250
251         return (error);
252 }
253
254 int
255 uiomove_object(vm_object_t obj, off_t obj_size, struct uio *uio)
256 {
257         ssize_t resid;
258         size_t len;
259         int error;
260
261         error = 0;
262         while ((resid = uio->uio_resid) > 0) {
263                 if (obj_size <= uio->uio_offset)
264                         break;
265                 len = MIN(obj_size - uio->uio_offset, resid);
266                 if (len == 0)
267                         break;
268                 error = uiomove_object_page(obj, len, uio);
269                 if (error != 0 || resid == uio->uio_resid)
270                         break;
271         }
272         return (error);
273 }
274
275 static u_long count_largepages[MAXPAGESIZES];
276
277 static int
278 shm_largepage_phys_populate(vm_object_t object, vm_pindex_t pidx,
279     int fault_type, vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last)
280 {
281         vm_page_t m __diagused;
282         int psind;
283
284         psind = object->un_pager.phys.data_val;
285         if (psind == 0 || pidx >= object->size)
286                 return (VM_PAGER_FAIL);
287         *first = rounddown2(pidx, pagesizes[psind] / PAGE_SIZE);
288
289         /*
290          * We only busy the first page in the superpage run.  It is
291          * useless to busy whole run since we only remove full
292          * superpage, and it takes too long to busy e.g. 512 * 512 ==
293          * 262144 pages constituing 1G amd64 superage.
294          */
295         m = vm_page_grab(object, *first, VM_ALLOC_NORMAL | VM_ALLOC_NOCREAT);
296         MPASS(m != NULL);
297
298         *last = *first + atop(pagesizes[psind]) - 1;
299         return (VM_PAGER_OK);
300 }
301
302 static boolean_t
303 shm_largepage_phys_haspage(vm_object_t object, vm_pindex_t pindex,
304     int *before, int *after)
305 {
306         int psind;
307
308         psind = object->un_pager.phys.data_val;
309         if (psind == 0 || pindex >= object->size)
310                 return (FALSE);
311         if (before != NULL) {
312                 *before = pindex - rounddown2(pindex, pagesizes[psind] /
313                     PAGE_SIZE);
314         }
315         if (after != NULL) {
316                 *after = roundup2(pindex, pagesizes[psind] / PAGE_SIZE) -
317                     pindex;
318         }
319         return (TRUE);
320 }
321
322 static void
323 shm_largepage_phys_ctor(vm_object_t object, vm_prot_t prot,
324     vm_ooffset_t foff, struct ucred *cred)
325 {
326 }
327
328 static void
329 shm_largepage_phys_dtor(vm_object_t object)
330 {
331         int psind;
332
333         psind = object->un_pager.phys.data_val;
334         if (psind != 0) {
335                 atomic_subtract_long(&count_largepages[psind],
336                     object->size / (pagesizes[psind] / PAGE_SIZE));
337                 vm_wire_sub(object->size);
338         } else {
339                 KASSERT(object->size == 0,
340                     ("largepage phys obj %p not initialized bit size %#jx > 0",
341                     object, (uintmax_t)object->size));
342         }
343 }
344
345 static const struct phys_pager_ops shm_largepage_phys_ops = {
346         .phys_pg_populate =     shm_largepage_phys_populate,
347         .phys_pg_haspage =      shm_largepage_phys_haspage,
348         .phys_pg_ctor =         shm_largepage_phys_ctor,
349         .phys_pg_dtor =         shm_largepage_phys_dtor,
350 };
351
352 bool
353 shm_largepage(struct shmfd *shmfd)
354 {
355         return (shmfd->shm_object->type == OBJT_PHYS);
356 }
357
358 static void
359 shm_pager_freespace(vm_object_t obj, vm_pindex_t start, vm_size_t size)
360 {
361         struct shmfd *shm;
362         vm_size_t c;
363
364         swap_pager_freespace(obj, start, size, &c);
365         if (c == 0)
366                 return;
367
368         shm = obj->un_pager.swp.swp_priv;
369         if (shm == NULL)
370                 return;
371         KASSERT(shm->shm_pages >= c,
372             ("shm %p pages %jd free %jd", shm,
373             (uintmax_t)shm->shm_pages, (uintmax_t)c));
374         shm->shm_pages -= c;
375 }
376
377 static void
378 shm_page_inserted(vm_object_t obj, vm_page_t m)
379 {
380         struct shmfd *shm;
381
382         shm = obj->un_pager.swp.swp_priv;
383         if (shm == NULL)
384                 return;
385         if (!vm_pager_has_page(obj, m->pindex, NULL, NULL))
386                 shm->shm_pages += 1;
387 }
388
389 static void
390 shm_page_removed(vm_object_t obj, vm_page_t m)
391 {
392         struct shmfd *shm;
393
394         shm = obj->un_pager.swp.swp_priv;
395         if (shm == NULL)
396                 return;
397         if (!vm_pager_has_page(obj, m->pindex, NULL, NULL)) {
398                 KASSERT(shm->shm_pages >= 1,
399                     ("shm %p pages %jd free 1", shm,
400                     (uintmax_t)shm->shm_pages));
401                 shm->shm_pages -= 1;
402         }
403 }
404
405 static struct pagerops shm_swap_pager_ops = {
406         .pgo_kvme_type = KVME_TYPE_SWAP,
407         .pgo_freespace = shm_pager_freespace,
408         .pgo_page_inserted = shm_page_inserted,
409         .pgo_page_removed = shm_page_removed,
410 };
411 static int shmfd_pager_type = -1;
412
413 static int
414 shm_seek(struct file *fp, off_t offset, int whence, struct thread *td)
415 {
416         struct shmfd *shmfd;
417         off_t foffset;
418         int error;
419
420         shmfd = fp->f_data;
421         foffset = foffset_lock(fp, 0);
422         error = 0;
423         switch (whence) {
424         case L_INCR:
425                 if (foffset < 0 ||
426                     (offset > 0 && foffset > OFF_MAX - offset)) {
427                         error = EOVERFLOW;
428                         break;
429                 }
430                 offset += foffset;
431                 break;
432         case L_XTND:
433                 if (offset > 0 && shmfd->shm_size > OFF_MAX - offset) {
434                         error = EOVERFLOW;
435                         break;
436                 }
437                 offset += shmfd->shm_size;
438                 break;
439         case L_SET:
440                 break;
441         default:
442                 error = EINVAL;
443         }
444         if (error == 0) {
445                 if (offset < 0 || offset > shmfd->shm_size)
446                         error = EINVAL;
447                 else
448                         td->td_uretoff.tdu_off = offset;
449         }
450         foffset_unlock(fp, offset, error != 0 ? FOF_NOUPDATE : 0);
451         return (error);
452 }
453
454 static int
455 shm_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
456     int flags, struct thread *td)
457 {
458         struct shmfd *shmfd;
459         void *rl_cookie;
460         int error;
461
462         shmfd = fp->f_data;
463 #ifdef MAC
464         error = mac_posixshm_check_read(active_cred, fp->f_cred, shmfd);
465         if (error)
466                 return (error);
467 #endif
468         foffset_lock_uio(fp, uio, flags);
469         rl_cookie = shm_rangelock_rlock(shmfd, uio->uio_offset,
470             uio->uio_offset + uio->uio_resid);
471         error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio);
472         shm_rangelock_unlock(shmfd, rl_cookie);
473         foffset_unlock_uio(fp, uio, flags);
474         return (error);
475 }
476
477 static int
478 shm_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
479     int flags, struct thread *td)
480 {
481         struct shmfd *shmfd;
482         void *rl_cookie;
483         int error;
484         off_t size;
485
486         shmfd = fp->f_data;
487 #ifdef MAC
488         error = mac_posixshm_check_write(active_cred, fp->f_cred, shmfd);
489         if (error)
490                 return (error);
491 #endif
492         if (shm_largepage(shmfd) && shmfd->shm_lp_psind == 0)
493                 return (EINVAL);
494         foffset_lock_uio(fp, uio, flags);
495         if (uio->uio_resid > OFF_MAX - uio->uio_offset) {
496                 /*
497                  * Overflow is only an error if we're supposed to expand on
498                  * write.  Otherwise, we'll just truncate the write to the
499                  * size of the file, which can only grow up to OFF_MAX.
500                  */
501                 if ((shmfd->shm_flags & SHM_GROW_ON_WRITE) != 0) {
502                         foffset_unlock_uio(fp, uio, flags);
503                         return (EFBIG);
504                 }
505
506                 size = shmfd->shm_size;
507         } else {
508                 size = uio->uio_offset + uio->uio_resid;
509         }
510         if ((flags & FOF_OFFSET) == 0)
511                 rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
512         else
513                 rl_cookie = shm_rangelock_wlock(shmfd, uio->uio_offset, size);
514         if ((shmfd->shm_seals & F_SEAL_WRITE) != 0) {
515                 error = EPERM;
516         } else {
517                 error = 0;
518                 if ((shmfd->shm_flags & SHM_GROW_ON_WRITE) != 0 &&
519                     size > shmfd->shm_size) {
520                         error = shm_dotruncate_cookie(shmfd, size, rl_cookie);
521                 }
522                 if (error == 0)
523                         error = uiomove_object(shmfd->shm_object,
524                             shmfd->shm_size, uio);
525         }
526         shm_rangelock_unlock(shmfd, rl_cookie);
527         foffset_unlock_uio(fp, uio, flags);
528         return (error);
529 }
530
531 static int
532 shm_truncate(struct file *fp, off_t length, struct ucred *active_cred,
533     struct thread *td)
534 {
535         struct shmfd *shmfd;
536 #ifdef MAC
537         int error;
538 #endif
539
540         shmfd = fp->f_data;
541 #ifdef MAC
542         error = mac_posixshm_check_truncate(active_cred, fp->f_cred, shmfd);
543         if (error)
544                 return (error);
545 #endif
546         return (shm_dotruncate(shmfd, length));
547 }
548
549 int
550 shm_ioctl(struct file *fp, u_long com, void *data, struct ucred *active_cred,
551     struct thread *td)
552 {
553         struct shmfd *shmfd;
554         struct shm_largepage_conf *conf;
555         void *rl_cookie;
556
557         shmfd = fp->f_data;
558         switch (com) {
559         case FIONBIO:
560         case FIOASYNC:
561                 /*
562                  * Allow fcntl(fd, F_SETFL, O_NONBLOCK) to work,
563                  * just like it would on an unlinked regular file
564                  */
565                 return (0);
566         case FIOSSHMLPGCNF:
567                 if (!shm_largepage(shmfd))
568                         return (ENOTTY);
569                 conf = data;
570                 if (shmfd->shm_lp_psind != 0 &&
571                     conf->psind != shmfd->shm_lp_psind)
572                         return (EINVAL);
573                 if (conf->psind <= 0 || conf->psind >= MAXPAGESIZES ||
574                     pagesizes[conf->psind] == 0)
575                         return (EINVAL);
576                 if (conf->alloc_policy != SHM_LARGEPAGE_ALLOC_DEFAULT &&
577                     conf->alloc_policy != SHM_LARGEPAGE_ALLOC_NOWAIT &&
578                     conf->alloc_policy != SHM_LARGEPAGE_ALLOC_HARD)
579                         return (EINVAL);
580
581                 rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
582                 shmfd->shm_lp_psind = conf->psind;
583                 shmfd->shm_lp_alloc_policy = conf->alloc_policy;
584                 shmfd->shm_object->un_pager.phys.data_val = conf->psind;
585                 shm_rangelock_unlock(shmfd, rl_cookie);
586                 return (0);
587         case FIOGSHMLPGCNF:
588                 if (!shm_largepage(shmfd))
589                         return (ENOTTY);
590                 conf = data;
591                 rl_cookie = shm_rangelock_rlock(shmfd, 0, OFF_MAX);
592                 conf->psind = shmfd->shm_lp_psind;
593                 conf->alloc_policy = shmfd->shm_lp_alloc_policy;
594                 shm_rangelock_unlock(shmfd, rl_cookie);
595                 return (0);
596         default:
597                 return (ENOTTY);
598         }
599 }
600
601 static int
602 shm_stat(struct file *fp, struct stat *sb, struct ucred *active_cred)
603 {
604         struct shmfd *shmfd;
605 #ifdef MAC
606         int error;
607 #endif
608
609         shmfd = fp->f_data;
610
611 #ifdef MAC
612         error = mac_posixshm_check_stat(active_cred, fp->f_cred, shmfd);
613         if (error)
614                 return (error);
615 #endif
616
617         /*
618          * Attempt to return sanish values for fstat() on a memory file
619          * descriptor.
620          */
621         bzero(sb, sizeof(*sb));
622         sb->st_blksize = PAGE_SIZE;
623         sb->st_size = shmfd->shm_size;
624         mtx_lock(&shm_timestamp_lock);
625         sb->st_atim = shmfd->shm_atime;
626         sb->st_ctim = shmfd->shm_ctime;
627         sb->st_mtim = shmfd->shm_mtime;
628         sb->st_birthtim = shmfd->shm_birthtime;
629         sb->st_mode = S_IFREG | shmfd->shm_mode;                /* XXX */
630         sb->st_uid = shmfd->shm_uid;
631         sb->st_gid = shmfd->shm_gid;
632         mtx_unlock(&shm_timestamp_lock);
633         sb->st_dev = shm_dev_ino;
634         sb->st_ino = shmfd->shm_ino;
635         sb->st_nlink = shmfd->shm_object->ref_count;
636         if (shm_largepage(shmfd)) {
637                 sb->st_blocks = shmfd->shm_object->size /
638                     (pagesizes[shmfd->shm_lp_psind] >> PAGE_SHIFT);
639         } else {
640                 sb->st_blocks = shmfd->shm_pages;
641         }
642
643         return (0);
644 }
645
646 static int
647 shm_close(struct file *fp, struct thread *td)
648 {
649         struct shmfd *shmfd;
650
651         shmfd = fp->f_data;
652         fp->f_data = NULL;
653         shm_drop(shmfd);
654
655         return (0);
656 }
657
658 static int
659 shm_copyin_path(struct thread *td, const char *userpath_in, char **path_out) {
660         int error;
661         char *path;
662         const char *pr_path;
663         size_t pr_pathlen;
664
665         path = malloc(MAXPATHLEN, M_SHMFD, M_WAITOK);
666         pr_path = td->td_ucred->cr_prison->pr_path;
667
668         /* Construct a full pathname for jailed callers. */
669         pr_pathlen = strcmp(pr_path, "/") ==
670             0 ? 0 : strlcpy(path, pr_path, MAXPATHLEN);
671         error = copyinstr(userpath_in, path + pr_pathlen,
672             MAXPATHLEN - pr_pathlen, NULL);
673         if (error != 0)
674                 goto out;
675
676 #ifdef KTRACE
677         if (KTRPOINT(curthread, KTR_NAMEI))
678                 ktrnamei(path);
679 #endif
680
681         /* Require paths to start with a '/' character. */
682         if (path[pr_pathlen] != '/') {
683                 error = EINVAL;
684                 goto out;
685         }
686
687         *path_out = path;
688
689 out:
690         if (error != 0)
691                 free(path, M_SHMFD);
692
693         return (error);
694 }
695
696 static int
697 shm_partial_page_invalidate(vm_object_t object, vm_pindex_t idx, int base,
698     int end)
699 {
700         vm_page_t m;
701         int rv;
702
703         VM_OBJECT_ASSERT_WLOCKED(object);
704         KASSERT(base >= 0, ("%s: base %d", __func__, base));
705         KASSERT(end - base <= PAGE_SIZE, ("%s: base %d end %d", __func__, base,
706             end));
707
708 retry:
709         m = vm_page_grab(object, idx, VM_ALLOC_NOCREAT);
710         if (m != NULL) {
711                 MPASS(vm_page_all_valid(m));
712         } else if (vm_pager_has_page(object, idx, NULL, NULL)) {
713                 m = vm_page_alloc(object, idx,
714                     VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL);
715                 if (m == NULL)
716                         goto retry;
717                 vm_object_pip_add(object, 1);
718                 VM_OBJECT_WUNLOCK(object);
719                 rv = vm_pager_get_pages(object, &m, 1, NULL, NULL);
720                 VM_OBJECT_WLOCK(object);
721                 vm_object_pip_wakeup(object);
722                 if (rv == VM_PAGER_OK) {
723                         /*
724                          * Since the page was not resident, and therefore not
725                          * recently accessed, immediately enqueue it for
726                          * asynchronous laundering.  The current operation is
727                          * not regarded as an access.
728                          */
729                         vm_page_launder(m);
730                 } else {
731                         vm_page_free(m);
732                         VM_OBJECT_WUNLOCK(object);
733                         return (EIO);
734                 }
735         }
736         if (m != NULL) {
737                 pmap_zero_page_area(m, base, end - base);
738                 KASSERT(vm_page_all_valid(m), ("%s: page %p is invalid",
739                     __func__, m));
740                 vm_page_set_dirty(m);
741                 vm_page_xunbusy(m);
742         }
743
744         return (0);
745 }
746
747 static int
748 shm_dotruncate_locked(struct shmfd *shmfd, off_t length, void *rl_cookie)
749 {
750         vm_object_t object;
751         vm_pindex_t nobjsize;
752         vm_ooffset_t delta;
753         int base, error;
754
755         KASSERT(length >= 0, ("shm_dotruncate: length < 0"));
756         object = shmfd->shm_object;
757         VM_OBJECT_ASSERT_WLOCKED(object);
758         rangelock_cookie_assert(rl_cookie, RA_WLOCKED);
759         if (length == shmfd->shm_size)
760                 return (0);
761         nobjsize = OFF_TO_IDX(length + PAGE_MASK);
762
763         /* Are we shrinking?  If so, trim the end. */
764         if (length < shmfd->shm_size) {
765                 if ((shmfd->shm_seals & F_SEAL_SHRINK) != 0)
766                         return (EPERM);
767
768                 /*
769                  * Disallow any requests to shrink the size if this
770                  * object is mapped into the kernel.
771                  */
772                 if (shmfd->shm_kmappings > 0)
773                         return (EBUSY);
774
775                 /*
776                  * Zero the truncated part of the last page.
777                  */
778                 base = length & PAGE_MASK;
779                 if (base != 0) {
780                         error = shm_partial_page_invalidate(object,
781                             OFF_TO_IDX(length), base, PAGE_SIZE);
782                         if (error)
783                                 return (error);
784                 }
785                 delta = IDX_TO_OFF(object->size - nobjsize);
786
787                 if (nobjsize < object->size)
788                         vm_object_page_remove(object, nobjsize, object->size,
789                             0);
790
791                 /* Free the swap accounted for shm */
792                 swap_release_by_cred(delta, object->cred);
793                 object->charge -= delta;
794         } else {
795                 if ((shmfd->shm_seals & F_SEAL_GROW) != 0)
796                         return (EPERM);
797
798                 /* Try to reserve additional swap space. */
799                 delta = IDX_TO_OFF(nobjsize - object->size);
800                 if (!swap_reserve_by_cred(delta, object->cred))
801                         return (ENOMEM);
802                 object->charge += delta;
803         }
804         shmfd->shm_size = length;
805         mtx_lock(&shm_timestamp_lock);
806         vfs_timestamp(&shmfd->shm_ctime);
807         shmfd->shm_mtime = shmfd->shm_ctime;
808         mtx_unlock(&shm_timestamp_lock);
809         object->size = nobjsize;
810         return (0);
811 }
812
813 static int
814 shm_dotruncate_largepage(struct shmfd *shmfd, off_t length, void *rl_cookie)
815 {
816         vm_object_t object;
817         vm_page_t m;
818         vm_pindex_t newobjsz;
819         vm_pindex_t oldobjsz __unused;
820         int aflags, error, i, psind, try;
821
822         KASSERT(length >= 0, ("shm_dotruncate: length < 0"));
823         object = shmfd->shm_object;
824         VM_OBJECT_ASSERT_WLOCKED(object);
825         rangelock_cookie_assert(rl_cookie, RA_WLOCKED);
826
827         oldobjsz = object->size;
828         newobjsz = OFF_TO_IDX(length);
829         if (length == shmfd->shm_size)
830                 return (0);
831         psind = shmfd->shm_lp_psind;
832         if (psind == 0 && length != 0)
833                 return (EINVAL);
834         if ((length & (pagesizes[psind] - 1)) != 0)
835                 return (EINVAL);
836
837         if (length < shmfd->shm_size) {
838                 if ((shmfd->shm_seals & F_SEAL_SHRINK) != 0)
839                         return (EPERM);
840                 if (shmfd->shm_kmappings > 0)
841                         return (EBUSY);
842                 return (ENOTSUP);       /* Pages are unmanaged. */
843 #if 0
844                 vm_object_page_remove(object, newobjsz, oldobjsz, 0);
845                 object->size = newobjsz;
846                 shmfd->shm_size = length;
847                 return (0);
848 #endif
849         }
850
851         if ((shmfd->shm_seals & F_SEAL_GROW) != 0)
852                 return (EPERM);
853
854         aflags = VM_ALLOC_NORMAL | VM_ALLOC_ZERO;
855         if (shmfd->shm_lp_alloc_policy == SHM_LARGEPAGE_ALLOC_NOWAIT)
856                 aflags |= VM_ALLOC_WAITFAIL;
857         try = 0;
858
859         /*
860          * Extend shmfd and object, keeping all already fully
861          * allocated large pages intact even on error, because dropped
862          * object lock might allowed mapping of them.
863          */
864         while (object->size < newobjsz) {
865                 m = vm_page_alloc_contig(object, object->size, aflags,
866                     pagesizes[psind] / PAGE_SIZE, 0, ~0,
867                     pagesizes[psind], 0,
868                     VM_MEMATTR_DEFAULT);
869                 if (m == NULL) {
870                         VM_OBJECT_WUNLOCK(object);
871                         if (shmfd->shm_lp_alloc_policy ==
872                             SHM_LARGEPAGE_ALLOC_NOWAIT ||
873                             (shmfd->shm_lp_alloc_policy ==
874                             SHM_LARGEPAGE_ALLOC_DEFAULT &&
875                             try >= largepage_reclaim_tries)) {
876                                 VM_OBJECT_WLOCK(object);
877                                 return (ENOMEM);
878                         }
879                         error = vm_page_reclaim_contig(aflags,
880                             pagesizes[psind] / PAGE_SIZE, 0, ~0,
881                             pagesizes[psind], 0);
882                         if (error == ENOMEM)
883                                 error = vm_wait_intr(object);
884                         if (error != 0) {
885                                 VM_OBJECT_WLOCK(object);
886                                 return (error);
887                         }
888                         try++;
889                         VM_OBJECT_WLOCK(object);
890                         continue;
891                 }
892                 try = 0;
893                 for (i = 0; i < pagesizes[psind] / PAGE_SIZE; i++) {
894                         if ((m[i].flags & PG_ZERO) == 0)
895                                 pmap_zero_page(&m[i]);
896                         vm_page_valid(&m[i]);
897                         vm_page_xunbusy(&m[i]);
898                 }
899                 object->size += OFF_TO_IDX(pagesizes[psind]);
900                 shmfd->shm_size += pagesizes[psind];
901                 atomic_add_long(&count_largepages[psind], 1);
902                 vm_wire_add(atop(pagesizes[psind]));
903         }
904         return (0);
905 }
906
907 static int
908 shm_dotruncate_cookie(struct shmfd *shmfd, off_t length, void *rl_cookie)
909 {
910         int error;
911
912         VM_OBJECT_WLOCK(shmfd->shm_object);
913         error = shm_largepage(shmfd) ? shm_dotruncate_largepage(shmfd,
914             length, rl_cookie) : shm_dotruncate_locked(shmfd, length,
915             rl_cookie);
916         VM_OBJECT_WUNLOCK(shmfd->shm_object);
917         return (error);
918 }
919
920 int
921 shm_dotruncate(struct shmfd *shmfd, off_t length)
922 {
923         void *rl_cookie;
924         int error;
925
926         rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
927         error = shm_dotruncate_cookie(shmfd, length, rl_cookie);
928         shm_rangelock_unlock(shmfd, rl_cookie);
929         return (error);
930 }
931
932 /*
933  * shmfd object management including creation and reference counting
934  * routines.
935  */
936 struct shmfd *
937 shm_alloc(struct ucred *ucred, mode_t mode, bool largepage)
938 {
939         struct shmfd *shmfd;
940         vm_object_t obj;
941
942         shmfd = malloc(sizeof(*shmfd), M_SHMFD, M_WAITOK | M_ZERO);
943         shmfd->shm_size = 0;
944         shmfd->shm_uid = ucred->cr_uid;
945         shmfd->shm_gid = ucred->cr_gid;
946         shmfd->shm_mode = mode;
947         if (largepage) {
948                 shmfd->shm_object = phys_pager_allocate(NULL,
949                     &shm_largepage_phys_ops, NULL, shmfd->shm_size,
950                     VM_PROT_DEFAULT, 0, ucred);
951                 shmfd->shm_lp_alloc_policy = SHM_LARGEPAGE_ALLOC_DEFAULT;
952         } else {
953                 obj = vm_pager_allocate(shmfd_pager_type, NULL,
954                     shmfd->shm_size, VM_PROT_DEFAULT, 0, ucred);
955                 VM_OBJECT_WLOCK(obj);
956                 obj->un_pager.swp.swp_priv = shmfd;
957                 VM_OBJECT_WUNLOCK(obj);
958                 shmfd->shm_object = obj;
959         }
960         KASSERT(shmfd->shm_object != NULL, ("shm_create: vm_pager_allocate"));
961         vfs_timestamp(&shmfd->shm_birthtime);
962         shmfd->shm_atime = shmfd->shm_mtime = shmfd->shm_ctime =
963             shmfd->shm_birthtime;
964         shmfd->shm_ino = alloc_unr64(&shm_ino_unr);
965         refcount_init(&shmfd->shm_refs, 1);
966         mtx_init(&shmfd->shm_mtx, "shmrl", NULL, MTX_DEF);
967         rangelock_init(&shmfd->shm_rl);
968 #ifdef MAC
969         mac_posixshm_init(shmfd);
970         mac_posixshm_create(ucred, shmfd);
971 #endif
972
973         return (shmfd);
974 }
975
976 struct shmfd *
977 shm_hold(struct shmfd *shmfd)
978 {
979
980         refcount_acquire(&shmfd->shm_refs);
981         return (shmfd);
982 }
983
984 void
985 shm_drop(struct shmfd *shmfd)
986 {
987         vm_object_t obj;
988
989         if (refcount_release(&shmfd->shm_refs)) {
990 #ifdef MAC
991                 mac_posixshm_destroy(shmfd);
992 #endif
993                 rangelock_destroy(&shmfd->shm_rl);
994                 mtx_destroy(&shmfd->shm_mtx);
995                 obj = shmfd->shm_object;
996                 if (!shm_largepage(shmfd)) {
997                         VM_OBJECT_WLOCK(obj);
998                         obj->un_pager.swp.swp_priv = NULL;
999                         VM_OBJECT_WUNLOCK(obj);
1000                 }
1001                 vm_object_deallocate(obj);
1002                 free(shmfd, M_SHMFD);
1003         }
1004 }
1005
1006 /*
1007  * Determine if the credentials have sufficient permissions for a
1008  * specified combination of FREAD and FWRITE.
1009  */
1010 int
1011 shm_access(struct shmfd *shmfd, struct ucred *ucred, int flags)
1012 {
1013         accmode_t accmode;
1014         int error;
1015
1016         accmode = 0;
1017         if (flags & FREAD)
1018                 accmode |= VREAD;
1019         if (flags & FWRITE)
1020                 accmode |= VWRITE;
1021         mtx_lock(&shm_timestamp_lock);
1022         error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid,
1023             accmode, ucred);
1024         mtx_unlock(&shm_timestamp_lock);
1025         return (error);
1026 }
1027
1028 static void
1029 shm_init(void *arg)
1030 {
1031         char name[32];
1032         int i;
1033
1034         mtx_init(&shm_timestamp_lock, "shm timestamps", NULL, MTX_DEF);
1035         sx_init(&shm_dict_lock, "shm dictionary");
1036         shm_dictionary = hashinit(1024, M_SHMFD, &shm_hash);
1037         new_unrhdr64(&shm_ino_unr, 1);
1038         shm_dev_ino = devfs_alloc_cdp_inode();
1039         KASSERT(shm_dev_ino > 0, ("shm dev inode not initialized"));
1040         shmfd_pager_type = vm_pager_alloc_dyn_type(&shm_swap_pager_ops,
1041             OBJT_SWAP);
1042         MPASS(shmfd_pager_type != -1);
1043
1044         for (i = 1; i < MAXPAGESIZES; i++) {
1045                 if (pagesizes[i] == 0)
1046                         break;
1047 #define M       (1024 * 1024)
1048 #define G       (1024 * M)
1049                 if (pagesizes[i] >= G)
1050                         snprintf(name, sizeof(name), "%luG", pagesizes[i] / G);
1051                 else if (pagesizes[i] >= M)
1052                         snprintf(name, sizeof(name), "%luM", pagesizes[i] / M);
1053                 else
1054                         snprintf(name, sizeof(name), "%lu", pagesizes[i]);
1055 #undef G
1056 #undef M
1057                 SYSCTL_ADD_ULONG(NULL, SYSCTL_STATIC_CHILDREN(_vm_largepages),
1058                     OID_AUTO, name, CTLFLAG_RD, &count_largepages[i],
1059                     "number of non-transient largepages allocated");
1060         }
1061 }
1062 SYSINIT(shm_init, SI_SUB_SYSV_SHM, SI_ORDER_ANY, shm_init, NULL);
1063
1064 /*
1065  * Remove all shared memory objects that belong to a prison.
1066  */
1067 void
1068 shm_remove_prison(struct prison *pr)
1069 {
1070         struct shm_mapping *shmm, *tshmm;
1071         u_long i;
1072
1073         sx_xlock(&shm_dict_lock);
1074         for (i = 0; i < shm_hash + 1; i++) {
1075                 LIST_FOREACH_SAFE(shmm, &shm_dictionary[i], sm_link, tshmm) {
1076                         if (shmm->sm_shmfd->shm_object->cred &&
1077                             shmm->sm_shmfd->shm_object->cred->cr_prison == pr)
1078                                 shm_doremove(shmm);
1079                 }
1080         }
1081         sx_xunlock(&shm_dict_lock);
1082 }
1083
1084 /*
1085  * Dictionary management.  We maintain an in-kernel dictionary to map
1086  * paths to shmfd objects.  We use the FNV hash on the path to store
1087  * the mappings in a hash table.
1088  */
1089 static struct shmfd *
1090 shm_lookup(char *path, Fnv32_t fnv)
1091 {
1092         struct shm_mapping *map;
1093
1094         LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
1095                 if (map->sm_fnv != fnv)
1096                         continue;
1097                 if (strcmp(map->sm_path, path) == 0)
1098                         return (map->sm_shmfd);
1099         }
1100
1101         return (NULL);
1102 }
1103
1104 static void
1105 shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd)
1106 {
1107         struct shm_mapping *map;
1108
1109         map = malloc(sizeof(struct shm_mapping), M_SHMFD, M_WAITOK);
1110         map->sm_path = path;
1111         map->sm_fnv = fnv;
1112         map->sm_shmfd = shm_hold(shmfd);
1113         shmfd->shm_path = path;
1114         LIST_INSERT_HEAD(SHM_HASH(fnv), map, sm_link);
1115 }
1116
1117 static int
1118 shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred)
1119 {
1120         struct shm_mapping *map;
1121         int error;
1122
1123         LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
1124                 if (map->sm_fnv != fnv)
1125                         continue;
1126                 if (strcmp(map->sm_path, path) == 0) {
1127 #ifdef MAC
1128                         error = mac_posixshm_check_unlink(ucred, map->sm_shmfd);
1129                         if (error)
1130                                 return (error);
1131 #endif
1132                         error = shm_access(map->sm_shmfd, ucred,
1133                             FREAD | FWRITE);
1134                         if (error)
1135                                 return (error);
1136                         shm_doremove(map);
1137                         return (0);
1138                 }
1139         }
1140
1141         return (ENOENT);
1142 }
1143
1144 static void
1145 shm_doremove(struct shm_mapping *map)
1146 {
1147         map->sm_shmfd->shm_path = NULL;
1148         LIST_REMOVE(map, sm_link);
1149         shm_drop(map->sm_shmfd);
1150         free(map->sm_path, M_SHMFD);
1151         free(map, M_SHMFD);
1152 }
1153
1154 int
1155 kern_shm_open2(struct thread *td, const char *userpath, int flags, mode_t mode,
1156     int shmflags, struct filecaps *fcaps, const char *name __unused)
1157 {
1158         struct pwddesc *pdp;
1159         struct shmfd *shmfd;
1160         struct file *fp;
1161         char *path;
1162         void *rl_cookie;
1163         Fnv32_t fnv;
1164         mode_t cmode;
1165         int error, fd, initial_seals;
1166         bool largepage;
1167
1168         if ((shmflags & ~(SHM_ALLOW_SEALING | SHM_GROW_ON_WRITE |
1169             SHM_LARGEPAGE)) != 0)
1170                 return (EINVAL);
1171
1172         initial_seals = F_SEAL_SEAL;
1173         if ((shmflags & SHM_ALLOW_SEALING) != 0)
1174                 initial_seals &= ~F_SEAL_SEAL;
1175
1176 #ifdef CAPABILITY_MODE
1177         /*
1178          * shm_open(2) is only allowed for anonymous objects.
1179          */
1180         if (IN_CAPABILITY_MODE(td) && (userpath != SHM_ANON))
1181                 return (ECAPMODE);
1182 #endif
1183
1184         AUDIT_ARG_FFLAGS(flags);
1185         AUDIT_ARG_MODE(mode);
1186
1187         if ((flags & O_ACCMODE) != O_RDONLY && (flags & O_ACCMODE) != O_RDWR)
1188                 return (EINVAL);
1189
1190         if ((flags & ~(O_ACCMODE | O_CREAT | O_EXCL | O_TRUNC | O_CLOEXEC)) != 0)
1191                 return (EINVAL);
1192
1193         largepage = (shmflags & SHM_LARGEPAGE) != 0;
1194         if (largepage && !PMAP_HAS_LARGEPAGES)
1195                 return (ENOTTY);
1196
1197         /*
1198          * Currently only F_SEAL_SEAL may be set when creating or opening shmfd.
1199          * If the decision is made later to allow additional seals, care must be
1200          * taken below to ensure that the seals are properly set if the shmfd
1201          * already existed -- this currently assumes that only F_SEAL_SEAL can
1202          * be set and doesn't take further precautions to ensure the validity of
1203          * the seals being added with respect to current mappings.
1204          */
1205         if ((initial_seals & ~F_SEAL_SEAL) != 0)
1206                 return (EINVAL);
1207
1208         pdp = td->td_proc->p_pd;
1209         cmode = (mode & ~pdp->pd_cmask) & ACCESSPERMS;
1210
1211         /*
1212          * shm_open(2) created shm should always have O_CLOEXEC set, as mandated
1213          * by POSIX.  We allow it to be unset here so that an in-kernel
1214          * interface may be written as a thin layer around shm, optionally not
1215          * setting CLOEXEC.  For shm_open(2), O_CLOEXEC is set unconditionally
1216          * in sys_shm_open() to keep this implementation compliant.
1217          */
1218         error = falloc_caps(td, &fp, &fd, flags & O_CLOEXEC, fcaps);
1219         if (error)
1220                 return (error);
1221
1222         /* A SHM_ANON path pointer creates an anonymous object. */
1223         if (userpath == SHM_ANON) {
1224                 /* A read-only anonymous object is pointless. */
1225                 if ((flags & O_ACCMODE) == O_RDONLY) {
1226                         fdclose(td, fp, fd);
1227                         fdrop(fp, td);
1228                         return (EINVAL);
1229                 }
1230                 shmfd = shm_alloc(td->td_ucred, cmode, largepage);
1231                 shmfd->shm_seals = initial_seals;
1232                 shmfd->shm_flags = shmflags;
1233         } else {
1234                 error = shm_copyin_path(td, userpath, &path);
1235                 if (error != 0) {
1236                         fdclose(td, fp, fd);
1237                         fdrop(fp, td);
1238                         return (error);
1239                 }
1240
1241                 AUDIT_ARG_UPATH1_CANON(path);
1242                 fnv = fnv_32_str(path, FNV1_32_INIT);
1243                 sx_xlock(&shm_dict_lock);
1244                 shmfd = shm_lookup(path, fnv);
1245                 if (shmfd == NULL) {
1246                         /* Object does not yet exist, create it if requested. */
1247                         if (flags & O_CREAT) {
1248 #ifdef MAC
1249                                 error = mac_posixshm_check_create(td->td_ucred,
1250                                     path);
1251                                 if (error == 0) {
1252 #endif
1253                                         shmfd = shm_alloc(td->td_ucred, cmode,
1254                                             largepage);
1255                                         shmfd->shm_seals = initial_seals;
1256                                         shmfd->shm_flags = shmflags;
1257                                         shm_insert(path, fnv, shmfd);
1258 #ifdef MAC
1259                                 }
1260 #endif
1261                         } else {
1262                                 free(path, M_SHMFD);
1263                                 error = ENOENT;
1264                         }
1265                 } else {
1266                         rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
1267
1268                         /*
1269                          * kern_shm_open() likely shouldn't ever error out on
1270                          * trying to set a seal that already exists, unlike
1271                          * F_ADD_SEALS.  This would break terribly as
1272                          * shm_open(2) actually sets F_SEAL_SEAL to maintain
1273                          * historical behavior where the underlying file could
1274                          * not be sealed.
1275                          */
1276                         initial_seals &= ~shmfd->shm_seals;
1277
1278                         /*
1279                          * Object already exists, obtain a new
1280                          * reference if requested and permitted.
1281                          */
1282                         free(path, M_SHMFD);
1283
1284                         /*
1285                          * initial_seals can't set additional seals if we've
1286                          * already been set F_SEAL_SEAL.  If F_SEAL_SEAL is set,
1287                          * then we've already removed that one from
1288                          * initial_seals.  This is currently redundant as we
1289                          * only allow setting F_SEAL_SEAL at creation time, but
1290                          * it's cheap to check and decreases the effort required
1291                          * to allow additional seals.
1292                          */
1293                         if ((shmfd->shm_seals & F_SEAL_SEAL) != 0 &&
1294                             initial_seals != 0)
1295                                 error = EPERM;
1296                         else if ((flags & (O_CREAT | O_EXCL)) ==
1297                             (O_CREAT | O_EXCL))
1298                                 error = EEXIST;
1299                         else if (shmflags != 0 && shmflags != shmfd->shm_flags)
1300                                 error = EINVAL;
1301                         else {
1302 #ifdef MAC
1303                                 error = mac_posixshm_check_open(td->td_ucred,
1304                                     shmfd, FFLAGS(flags & O_ACCMODE));
1305                                 if (error == 0)
1306 #endif
1307                                 error = shm_access(shmfd, td->td_ucred,
1308                                     FFLAGS(flags & O_ACCMODE));
1309                         }
1310
1311                         /*
1312                          * Truncate the file back to zero length if
1313                          * O_TRUNC was specified and the object was
1314                          * opened with read/write.
1315                          */
1316                         if (error == 0 &&
1317                             (flags & (O_ACCMODE | O_TRUNC)) ==
1318                             (O_RDWR | O_TRUNC)) {
1319                                 VM_OBJECT_WLOCK(shmfd->shm_object);
1320 #ifdef MAC
1321                                 error = mac_posixshm_check_truncate(
1322                                         td->td_ucred, fp->f_cred, shmfd);
1323                                 if (error == 0)
1324 #endif
1325                                         error = shm_dotruncate_locked(shmfd, 0,
1326                                             rl_cookie);
1327                                 VM_OBJECT_WUNLOCK(shmfd->shm_object);
1328                         }
1329                         if (error == 0) {
1330                                 /*
1331                                  * Currently we only allow F_SEAL_SEAL to be
1332                                  * set initially.  As noted above, this would
1333                                  * need to be reworked should that change.
1334                                  */
1335                                 shmfd->shm_seals |= initial_seals;
1336                                 shm_hold(shmfd);
1337                         }
1338                         shm_rangelock_unlock(shmfd, rl_cookie);
1339                 }
1340                 sx_xunlock(&shm_dict_lock);
1341
1342                 if (error) {
1343                         fdclose(td, fp, fd);
1344                         fdrop(fp, td);
1345                         return (error);
1346                 }
1347         }
1348
1349         finit(fp, FFLAGS(flags & O_ACCMODE), DTYPE_SHM, shmfd, &shm_ops);
1350
1351         td->td_retval[0] = fd;
1352         fdrop(fp, td);
1353
1354         return (0);
1355 }
1356
1357 /* System calls. */
1358 #ifdef COMPAT_FREEBSD12
1359 int
1360 freebsd12_shm_open(struct thread *td, struct freebsd12_shm_open_args *uap)
1361 {
1362
1363         return (kern_shm_open(td, uap->path, uap->flags | O_CLOEXEC,
1364             uap->mode, NULL));
1365 }
1366 #endif
1367
1368 int
1369 sys_shm_unlink(struct thread *td, struct shm_unlink_args *uap)
1370 {
1371         char *path;
1372         Fnv32_t fnv;
1373         int error;
1374
1375         error = shm_copyin_path(td, uap->path, &path);
1376         if (error != 0)
1377                 return (error);
1378
1379         AUDIT_ARG_UPATH1_CANON(path);
1380         fnv = fnv_32_str(path, FNV1_32_INIT);
1381         sx_xlock(&shm_dict_lock);
1382         error = shm_remove(path, fnv, td->td_ucred);
1383         sx_xunlock(&shm_dict_lock);
1384         free(path, M_SHMFD);
1385
1386         return (error);
1387 }
1388
1389 int
1390 sys_shm_rename(struct thread *td, struct shm_rename_args *uap)
1391 {
1392         char *path_from = NULL, *path_to = NULL;
1393         Fnv32_t fnv_from, fnv_to;
1394         struct shmfd *fd_from;
1395         struct shmfd *fd_to;
1396         int error;
1397         int flags;
1398
1399         flags = uap->flags;
1400         AUDIT_ARG_FFLAGS(flags);
1401
1402         /*
1403          * Make sure the user passed only valid flags.
1404          * If you add a new flag, please add a new term here.
1405          */
1406         if ((flags & ~(
1407             SHM_RENAME_NOREPLACE |
1408             SHM_RENAME_EXCHANGE
1409             )) != 0) {
1410                 error = EINVAL;
1411                 goto out;
1412         }
1413
1414         /*
1415          * EXCHANGE and NOREPLACE don't quite make sense together. Let's
1416          * force the user to choose one or the other.
1417          */
1418         if ((flags & SHM_RENAME_NOREPLACE) != 0 &&
1419             (flags & SHM_RENAME_EXCHANGE) != 0) {
1420                 error = EINVAL;
1421                 goto out;
1422         }
1423
1424         /* Renaming to or from anonymous makes no sense */
1425         if (uap->path_from == SHM_ANON || uap->path_to == SHM_ANON) {
1426                 error = EINVAL;
1427                 goto out;
1428         }
1429
1430         error = shm_copyin_path(td, uap->path_from, &path_from);
1431         if (error != 0)
1432                 goto out;
1433
1434         error = shm_copyin_path(td, uap->path_to, &path_to);
1435         if (error != 0)
1436                 goto out;
1437
1438         AUDIT_ARG_UPATH1_CANON(path_from);
1439         AUDIT_ARG_UPATH2_CANON(path_to);
1440
1441         /* Rename with from/to equal is a no-op */
1442         if (strcmp(path_from, path_to) == 0)
1443                 goto out;
1444
1445         fnv_from = fnv_32_str(path_from, FNV1_32_INIT);
1446         fnv_to = fnv_32_str(path_to, FNV1_32_INIT);
1447
1448         sx_xlock(&shm_dict_lock);
1449
1450         fd_from = shm_lookup(path_from, fnv_from);
1451         if (fd_from == NULL) {
1452                 error = ENOENT;
1453                 goto out_locked;
1454         }
1455
1456         fd_to = shm_lookup(path_to, fnv_to);
1457         if ((flags & SHM_RENAME_NOREPLACE) != 0 && fd_to != NULL) {
1458                 error = EEXIST;
1459                 goto out_locked;
1460         }
1461
1462         /*
1463          * Unconditionally prevents shm_remove from invalidating the 'from'
1464          * shm's state.
1465          */
1466         shm_hold(fd_from);
1467         error = shm_remove(path_from, fnv_from, td->td_ucred);
1468
1469         /*
1470          * One of my assumptions failed if ENOENT (e.g. locking didn't
1471          * protect us)
1472          */
1473         KASSERT(error != ENOENT, ("Our shm disappeared during shm_rename: %s",
1474             path_from));
1475         if (error != 0) {
1476                 shm_drop(fd_from);
1477                 goto out_locked;
1478         }
1479
1480         /*
1481          * If we are exchanging, we need to ensure the shm_remove below
1482          * doesn't invalidate the dest shm's state.
1483          */
1484         if ((flags & SHM_RENAME_EXCHANGE) != 0 && fd_to != NULL)
1485                 shm_hold(fd_to);
1486
1487         /*
1488          * NOTE: if path_to is not already in the hash, c'est la vie;
1489          * it simply means we have nothing already at path_to to unlink.
1490          * That is the ENOENT case.
1491          *
1492          * If we somehow don't have access to unlink this guy, but
1493          * did for the shm at path_from, then relink the shm to path_from
1494          * and abort with EACCES.
1495          *
1496          * All other errors: that is weird; let's relink and abort the
1497          * operation.
1498          */
1499         error = shm_remove(path_to, fnv_to, td->td_ucred);
1500         if (error != 0 && error != ENOENT) {
1501                 shm_insert(path_from, fnv_from, fd_from);
1502                 shm_drop(fd_from);
1503                 /* Don't free path_from now, since the hash references it */
1504                 path_from = NULL;
1505                 goto out_locked;
1506         }
1507
1508         error = 0;
1509
1510         shm_insert(path_to, fnv_to, fd_from);
1511
1512         /* Don't free path_to now, since the hash references it */
1513         path_to = NULL;
1514
1515         /* We kept a ref when we removed, and incremented again in insert */
1516         shm_drop(fd_from);
1517         KASSERT(fd_from->shm_refs > 0, ("Expected >0 refs; got: %d\n",
1518             fd_from->shm_refs));
1519
1520         if ((flags & SHM_RENAME_EXCHANGE) != 0 && fd_to != NULL) {
1521                 shm_insert(path_from, fnv_from, fd_to);
1522                 path_from = NULL;
1523                 shm_drop(fd_to);
1524                 KASSERT(fd_to->shm_refs > 0, ("Expected >0 refs; got: %d\n",
1525                     fd_to->shm_refs));
1526         }
1527
1528 out_locked:
1529         sx_xunlock(&shm_dict_lock);
1530
1531 out:
1532         free(path_from, M_SHMFD);
1533         free(path_to, M_SHMFD);
1534         return (error);
1535 }
1536
1537 static int
1538 shm_mmap_large(struct shmfd *shmfd, vm_map_t map, vm_offset_t *addr,
1539     vm_size_t size, vm_prot_t prot, vm_prot_t max_prot, int flags,
1540     vm_ooffset_t foff, struct thread *td)
1541 {
1542         struct vmspace *vms;
1543         vm_map_entry_t next_entry, prev_entry;
1544         vm_offset_t align, mask, maxaddr;
1545         int docow, error, rv, try;
1546         bool curmap;
1547
1548         if (shmfd->shm_lp_psind == 0)
1549                 return (EINVAL);
1550
1551         /* MAP_PRIVATE is disabled */
1552         if ((flags & ~(MAP_SHARED | MAP_FIXED | MAP_EXCL |
1553             MAP_NOCORE | MAP_32BIT | MAP_ALIGNMENT_MASK)) != 0)
1554                 return (EINVAL);
1555
1556         vms = td->td_proc->p_vmspace;
1557         curmap = map == &vms->vm_map;
1558         if (curmap) {
1559                 error = kern_mmap_racct_check(td, map, size);
1560                 if (error != 0)
1561                         return (error);
1562         }
1563
1564         docow = shmfd->shm_lp_psind << MAP_SPLIT_BOUNDARY_SHIFT;
1565         docow |= MAP_INHERIT_SHARE;
1566         if ((flags & MAP_NOCORE) != 0)
1567                 docow |= MAP_DISABLE_COREDUMP;
1568
1569         mask = pagesizes[shmfd->shm_lp_psind] - 1;
1570         if ((foff & mask) != 0)
1571                 return (EINVAL);
1572         maxaddr = vm_map_max(map);
1573         if ((flags & MAP_32BIT) != 0 && maxaddr > MAP_32BIT_MAX_ADDR)
1574                 maxaddr = MAP_32BIT_MAX_ADDR;
1575         if (size == 0 || (size & mask) != 0 ||
1576             (*addr != 0 && ((*addr & mask) != 0 ||
1577             *addr + size < *addr || *addr + size > maxaddr)))
1578                 return (EINVAL);
1579
1580         align = flags & MAP_ALIGNMENT_MASK;
1581         if (align == 0) {
1582                 align = pagesizes[shmfd->shm_lp_psind];
1583         } else if (align == MAP_ALIGNED_SUPER) {
1584                 if (shmfd->shm_lp_psind != 1)
1585                         return (EINVAL);
1586                 align = pagesizes[1];
1587         } else {
1588                 align >>= MAP_ALIGNMENT_SHIFT;
1589                 align = 1ULL << align;
1590                 /* Also handles overflow. */
1591                 if (align < pagesizes[shmfd->shm_lp_psind])
1592                         return (EINVAL);
1593         }
1594
1595         vm_map_lock(map);
1596         if ((flags & MAP_FIXED) == 0) {
1597                 try = 1;
1598                 if (curmap && (*addr == 0 ||
1599                     (*addr >= round_page((vm_offset_t)vms->vm_taddr) &&
1600                     *addr < round_page((vm_offset_t)vms->vm_daddr +
1601                     lim_max(td, RLIMIT_DATA))))) {
1602                         *addr = roundup2((vm_offset_t)vms->vm_daddr +
1603                             lim_max(td, RLIMIT_DATA),
1604                             pagesizes[shmfd->shm_lp_psind]);
1605                 }
1606 again:
1607                 rv = vm_map_find_aligned(map, addr, size, maxaddr, align);
1608                 if (rv != KERN_SUCCESS) {
1609                         if (try == 1) {
1610                                 try = 2;
1611                                 *addr = vm_map_min(map);
1612                                 if ((*addr & mask) != 0)
1613                                         *addr = (*addr + mask) & mask;
1614                                 goto again;
1615                         }
1616                         goto fail1;
1617                 }
1618         } else if ((flags & MAP_EXCL) == 0) {
1619                 rv = vm_map_delete(map, *addr, *addr + size);
1620                 if (rv != KERN_SUCCESS)
1621                         goto fail1;
1622         } else {
1623                 error = ENOSPC;
1624                 if (vm_map_lookup_entry(map, *addr, &prev_entry))
1625                         goto fail;
1626                 next_entry = vm_map_entry_succ(prev_entry);
1627                 if (next_entry->start < *addr + size)
1628                         goto fail;
1629         }
1630
1631         rv = vm_map_insert(map, shmfd->shm_object, foff, *addr, *addr + size,
1632             prot, max_prot, docow);
1633 fail1:
1634         error = vm_mmap_to_errno(rv);
1635 fail:
1636         vm_map_unlock(map);
1637         return (error);
1638 }
1639
1640 static int
1641 shm_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t objsize,
1642     vm_prot_t prot, vm_prot_t cap_maxprot, int flags,
1643     vm_ooffset_t foff, struct thread *td)
1644 {
1645         struct shmfd *shmfd;
1646         vm_prot_t maxprot;
1647         int error;
1648         bool writecnt;
1649         void *rl_cookie;
1650
1651         shmfd = fp->f_data;
1652         maxprot = VM_PROT_NONE;
1653
1654         rl_cookie = shm_rangelock_rlock(shmfd, 0, objsize);
1655         /* FREAD should always be set. */
1656         if ((fp->f_flag & FREAD) != 0)
1657                 maxprot |= VM_PROT_EXECUTE | VM_PROT_READ;
1658
1659         /*
1660          * If FWRITE's set, we can allow VM_PROT_WRITE unless it's a shared
1661          * mapping with a write seal applied.  Private mappings are always
1662          * writeable.
1663          */
1664         if ((flags & MAP_SHARED) == 0) {
1665                 cap_maxprot |= VM_PROT_WRITE;
1666                 maxprot |= VM_PROT_WRITE;
1667                 writecnt = false;
1668         } else {
1669                 if ((fp->f_flag & FWRITE) != 0 &&
1670                     (shmfd->shm_seals & F_SEAL_WRITE) == 0)
1671                         maxprot |= VM_PROT_WRITE;
1672
1673                 /*
1674                  * Any mappings from a writable descriptor may be upgraded to
1675                  * VM_PROT_WRITE with mprotect(2), unless a write-seal was
1676                  * applied between the open and subsequent mmap(2).  We want to
1677                  * reject application of a write seal as long as any such
1678                  * mapping exists so that the seal cannot be trivially bypassed.
1679                  */
1680                 writecnt = (maxprot & VM_PROT_WRITE) != 0;
1681                 if (!writecnt && (prot & VM_PROT_WRITE) != 0) {
1682                         error = EACCES;
1683                         goto out;
1684                 }
1685         }
1686         maxprot &= cap_maxprot;
1687
1688         /* See comment in vn_mmap(). */
1689         if (
1690 #ifdef _LP64
1691             objsize > OFF_MAX ||
1692 #endif
1693             foff > OFF_MAX - objsize) {
1694                 error = EINVAL;
1695                 goto out;
1696         }
1697
1698 #ifdef MAC
1699         error = mac_posixshm_check_mmap(td->td_ucred, shmfd, prot, flags);
1700         if (error != 0)
1701                 goto out;
1702 #endif
1703
1704         mtx_lock(&shm_timestamp_lock);
1705         vfs_timestamp(&shmfd->shm_atime);
1706         mtx_unlock(&shm_timestamp_lock);
1707         vm_object_reference(shmfd->shm_object);
1708
1709         if (shm_largepage(shmfd)) {
1710                 writecnt = false;
1711                 error = shm_mmap_large(shmfd, map, addr, objsize, prot,
1712                     maxprot, flags, foff, td);
1713         } else {
1714                 if (writecnt) {
1715                         vm_pager_update_writecount(shmfd->shm_object, 0,
1716                             objsize);
1717                 }
1718                 error = vm_mmap_object(map, addr, objsize, prot, maxprot, flags,
1719                     shmfd->shm_object, foff, writecnt, td);
1720         }
1721         if (error != 0) {
1722                 if (writecnt)
1723                         vm_pager_release_writecount(shmfd->shm_object, 0,
1724                             objsize);
1725                 vm_object_deallocate(shmfd->shm_object);
1726         }
1727 out:
1728         shm_rangelock_unlock(shmfd, rl_cookie);
1729         return (error);
1730 }
1731
1732 static int
1733 shm_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
1734     struct thread *td)
1735 {
1736         struct shmfd *shmfd;
1737         int error;
1738
1739         error = 0;
1740         shmfd = fp->f_data;
1741         mtx_lock(&shm_timestamp_lock);
1742         /*
1743          * SUSv4 says that x bits of permission need not be affected.
1744          * Be consistent with our shm_open there.
1745          */
1746 #ifdef MAC
1747         error = mac_posixshm_check_setmode(active_cred, shmfd, mode);
1748         if (error != 0)
1749                 goto out;
1750 #endif
1751         error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid,
1752             VADMIN, active_cred);
1753         if (error != 0)
1754                 goto out;
1755         shmfd->shm_mode = mode & ACCESSPERMS;
1756 out:
1757         mtx_unlock(&shm_timestamp_lock);
1758         return (error);
1759 }
1760
1761 static int
1762 shm_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
1763     struct thread *td)
1764 {
1765         struct shmfd *shmfd;
1766         int error;
1767
1768         error = 0;
1769         shmfd = fp->f_data;
1770         mtx_lock(&shm_timestamp_lock);
1771 #ifdef MAC
1772         error = mac_posixshm_check_setowner(active_cred, shmfd, uid, gid);
1773         if (error != 0)
1774                 goto out;
1775 #endif
1776         if (uid == (uid_t)-1)
1777                 uid = shmfd->shm_uid;
1778         if (gid == (gid_t)-1)
1779                  gid = shmfd->shm_gid;
1780         if (((uid != shmfd->shm_uid && uid != active_cred->cr_uid) ||
1781             (gid != shmfd->shm_gid && !groupmember(gid, active_cred))) &&
1782             (error = priv_check_cred(active_cred, PRIV_VFS_CHOWN)))
1783                 goto out;
1784         shmfd->shm_uid = uid;
1785         shmfd->shm_gid = gid;
1786 out:
1787         mtx_unlock(&shm_timestamp_lock);
1788         return (error);
1789 }
1790
1791 /*
1792  * Helper routines to allow the backing object of a shared memory file
1793  * descriptor to be mapped in the kernel.
1794  */
1795 int
1796 shm_map(struct file *fp, size_t size, off_t offset, void **memp)
1797 {
1798         struct shmfd *shmfd;
1799         vm_offset_t kva, ofs;
1800         vm_object_t obj;
1801         int rv;
1802
1803         if (fp->f_type != DTYPE_SHM)
1804                 return (EINVAL);
1805         shmfd = fp->f_data;
1806         obj = shmfd->shm_object;
1807         VM_OBJECT_WLOCK(obj);
1808         /*
1809          * XXXRW: This validation is probably insufficient, and subject to
1810          * sign errors.  It should be fixed.
1811          */
1812         if (offset >= shmfd->shm_size ||
1813             offset + size > round_page(shmfd->shm_size)) {
1814                 VM_OBJECT_WUNLOCK(obj);
1815                 return (EINVAL);
1816         }
1817
1818         shmfd->shm_kmappings++;
1819         vm_object_reference_locked(obj);
1820         VM_OBJECT_WUNLOCK(obj);
1821
1822         /* Map the object into the kernel_map and wire it. */
1823         kva = vm_map_min(kernel_map);
1824         ofs = offset & PAGE_MASK;
1825         offset = trunc_page(offset);
1826         size = round_page(size + ofs);
1827         rv = vm_map_find(kernel_map, obj, offset, &kva, size, 0,
1828             VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE,
1829             VM_PROT_READ | VM_PROT_WRITE, 0);
1830         if (rv == KERN_SUCCESS) {
1831                 rv = vm_map_wire(kernel_map, kva, kva + size,
1832                     VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
1833                 if (rv == KERN_SUCCESS) {
1834                         *memp = (void *)(kva + ofs);
1835                         return (0);
1836                 }
1837                 vm_map_remove(kernel_map, kva, kva + size);
1838         } else
1839                 vm_object_deallocate(obj);
1840
1841         /* On failure, drop our mapping reference. */
1842         VM_OBJECT_WLOCK(obj);
1843         shmfd->shm_kmappings--;
1844         VM_OBJECT_WUNLOCK(obj);
1845
1846         return (vm_mmap_to_errno(rv));
1847 }
1848
1849 /*
1850  * We require the caller to unmap the entire entry.  This allows us to
1851  * safely decrement shm_kmappings when a mapping is removed.
1852  */
1853 int
1854 shm_unmap(struct file *fp, void *mem, size_t size)
1855 {
1856         struct shmfd *shmfd;
1857         vm_map_entry_t entry;
1858         vm_offset_t kva, ofs;
1859         vm_object_t obj;
1860         vm_pindex_t pindex;
1861         vm_prot_t prot;
1862         boolean_t wired;
1863         vm_map_t map;
1864         int rv;
1865
1866         if (fp->f_type != DTYPE_SHM)
1867                 return (EINVAL);
1868         shmfd = fp->f_data;
1869         kva = (vm_offset_t)mem;
1870         ofs = kva & PAGE_MASK;
1871         kva = trunc_page(kva);
1872         size = round_page(size + ofs);
1873         map = kernel_map;
1874         rv = vm_map_lookup(&map, kva, VM_PROT_READ | VM_PROT_WRITE, &entry,
1875             &obj, &pindex, &prot, &wired);
1876         if (rv != KERN_SUCCESS)
1877                 return (EINVAL);
1878         if (entry->start != kva || entry->end != kva + size) {
1879                 vm_map_lookup_done(map, entry);
1880                 return (EINVAL);
1881         }
1882         vm_map_lookup_done(map, entry);
1883         if (obj != shmfd->shm_object)
1884                 return (EINVAL);
1885         vm_map_remove(map, kva, kva + size);
1886         VM_OBJECT_WLOCK(obj);
1887         KASSERT(shmfd->shm_kmappings > 0, ("shm_unmap: object not mapped"));
1888         shmfd->shm_kmappings--;
1889         VM_OBJECT_WUNLOCK(obj);
1890         return (0);
1891 }
1892
1893 static int
1894 shm_fill_kinfo_locked(struct shmfd *shmfd, struct kinfo_file *kif, bool list)
1895 {
1896         const char *path, *pr_path;
1897         size_t pr_pathlen;
1898         bool visible;
1899
1900         sx_assert(&shm_dict_lock, SA_LOCKED);
1901         kif->kf_type = KF_TYPE_SHM;
1902         kif->kf_un.kf_file.kf_file_mode = S_IFREG | shmfd->shm_mode;
1903         kif->kf_un.kf_file.kf_file_size = shmfd->shm_size;
1904         if (shmfd->shm_path != NULL) {
1905                 if (shmfd->shm_path != NULL) {
1906                         path = shmfd->shm_path;
1907                         pr_path = curthread->td_ucred->cr_prison->pr_path;
1908                         if (strcmp(pr_path, "/") != 0) {
1909                                 /* Return the jail-rooted pathname. */
1910                                 pr_pathlen = strlen(pr_path);
1911                                 visible = strncmp(path, pr_path, pr_pathlen)
1912                                     == 0 && path[pr_pathlen] == '/';
1913                                 if (list && !visible)
1914                                         return (EPERM);
1915                                 if (visible)
1916                                         path += pr_pathlen;
1917                         }
1918                         strlcpy(kif->kf_path, path, sizeof(kif->kf_path));
1919                 }
1920         }
1921         return (0);
1922 }
1923
1924 static int
1925 shm_fill_kinfo(struct file *fp, struct kinfo_file *kif,
1926     struct filedesc *fdp __unused)
1927 {
1928         int res;
1929
1930         sx_slock(&shm_dict_lock);
1931         res = shm_fill_kinfo_locked(fp->f_data, kif, false);
1932         sx_sunlock(&shm_dict_lock);
1933         return (res);
1934 }
1935
1936 static int
1937 shm_add_seals(struct file *fp, int seals)
1938 {
1939         struct shmfd *shmfd;
1940         void *rl_cookie;
1941         vm_ooffset_t writemappings;
1942         int error, nseals;
1943
1944         error = 0;
1945         shmfd = fp->f_data;
1946         rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
1947
1948         /* Even already-set seals should result in EPERM. */
1949         if ((shmfd->shm_seals & F_SEAL_SEAL) != 0) {
1950                 error = EPERM;
1951                 goto out;
1952         }
1953         nseals = seals & ~shmfd->shm_seals;
1954         if ((nseals & F_SEAL_WRITE) != 0) {
1955                 if (shm_largepage(shmfd)) {
1956                         error = ENOTSUP;
1957                         goto out;
1958                 }
1959
1960                 /*
1961                  * The rangelock above prevents writable mappings from being
1962                  * added after we've started applying seals.  The RLOCK here
1963                  * is to avoid torn reads on ILP32 arches as unmapping/reducing
1964                  * writemappings will be done without a rangelock.
1965                  */
1966                 VM_OBJECT_RLOCK(shmfd->shm_object);
1967                 writemappings = shmfd->shm_object->un_pager.swp.writemappings;
1968                 VM_OBJECT_RUNLOCK(shmfd->shm_object);
1969                 /* kmappings are also writable */
1970                 if (writemappings > 0) {
1971                         error = EBUSY;
1972                         goto out;
1973                 }
1974         }
1975         shmfd->shm_seals |= nseals;
1976 out:
1977         shm_rangelock_unlock(shmfd, rl_cookie);
1978         return (error);
1979 }
1980
1981 static int
1982 shm_get_seals(struct file *fp, int *seals)
1983 {
1984         struct shmfd *shmfd;
1985
1986         shmfd = fp->f_data;
1987         *seals = shmfd->shm_seals;
1988         return (0);
1989 }
1990
1991 static int
1992 shm_deallocate(struct shmfd *shmfd, off_t *offset, off_t *length, int flags)
1993 {
1994         vm_object_t object;
1995         vm_pindex_t pistart, pi, piend;
1996         vm_ooffset_t off, len;
1997         int startofs, endofs, end;
1998         int error;
1999
2000         off = *offset;
2001         len = *length;
2002         KASSERT(off + len <= (vm_ooffset_t)OFF_MAX, ("off + len overflows"));
2003         if (off + len > shmfd->shm_size)
2004                 len = shmfd->shm_size - off;
2005         object = shmfd->shm_object;
2006         startofs = off & PAGE_MASK;
2007         endofs = (off + len) & PAGE_MASK;
2008         pistart = OFF_TO_IDX(off);
2009         piend = OFF_TO_IDX(off + len);
2010         pi = OFF_TO_IDX(off + PAGE_MASK);
2011         error = 0;
2012
2013         /* Handle the case when offset is on or beyond shm size. */
2014         if ((off_t)len <= 0) {
2015                 *length = 0;
2016                 return (0);
2017         }
2018
2019         VM_OBJECT_WLOCK(object);
2020
2021         if (startofs != 0) {
2022                 end = pistart != piend ? PAGE_SIZE : endofs;
2023                 error = shm_partial_page_invalidate(object, pistart, startofs,
2024                     end);
2025                 if (error)
2026                         goto out;
2027                 off += end - startofs;
2028                 len -= end - startofs;
2029         }
2030
2031         if (pi < piend) {
2032                 vm_object_page_remove(object, pi, piend, 0);
2033                 off += IDX_TO_OFF(piend - pi);
2034                 len -= IDX_TO_OFF(piend - pi);
2035         }
2036
2037         if (endofs != 0 && pistart != piend) {
2038                 error = shm_partial_page_invalidate(object, piend, 0, endofs);
2039                 if (error)
2040                         goto out;
2041                 off += endofs;
2042                 len -= endofs;
2043         }
2044
2045 out:
2046         VM_OBJECT_WUNLOCK(shmfd->shm_object);
2047         *offset = off;
2048         *length = len;
2049         return (error);
2050 }
2051
2052 static int
2053 shm_fspacectl(struct file *fp, int cmd, off_t *offset, off_t *length, int flags,
2054     struct ucred *active_cred, struct thread *td)
2055 {
2056         void *rl_cookie;
2057         struct shmfd *shmfd;
2058         off_t off, len;
2059         int error;
2060
2061         KASSERT(cmd == SPACECTL_DEALLOC, ("shm_fspacectl: Invalid cmd"));
2062         KASSERT((flags & ~SPACECTL_F_SUPPORTED) == 0,
2063             ("shm_fspacectl: non-zero flags"));
2064         KASSERT(*offset >= 0 && *length > 0 && *length <= OFF_MAX - *offset,
2065             ("shm_fspacectl: offset/length overflow or underflow"));
2066         error = EINVAL;
2067         shmfd = fp->f_data;
2068         off = *offset;
2069         len = *length;
2070
2071         rl_cookie = shm_rangelock_wlock(shmfd, off, off + len);
2072         switch (cmd) {
2073         case SPACECTL_DEALLOC:
2074                 if ((shmfd->shm_seals & F_SEAL_WRITE) != 0) {
2075                         error = EPERM;
2076                         break;
2077                 }
2078                 error = shm_deallocate(shmfd, &off, &len, flags);
2079                 *offset = off;
2080                 *length = len;
2081                 break;
2082         default:
2083                 __assert_unreachable();
2084         }
2085         shm_rangelock_unlock(shmfd, rl_cookie);
2086         return (error);
2087 }
2088
2089
2090 static int
2091 shm_fallocate(struct file *fp, off_t offset, off_t len, struct thread *td)
2092 {
2093         void *rl_cookie;
2094         struct shmfd *shmfd;
2095         size_t size;
2096         int error;
2097
2098         /* This assumes that the caller already checked for overflow. */
2099         error = 0;
2100         shmfd = fp->f_data;
2101         size = offset + len;
2102
2103         /*
2104          * Just grab the rangelock for the range that we may be attempting to
2105          * grow, rather than blocking read/write for regions we won't be
2106          * touching while this (potential) resize is in progress.  Other
2107          * attempts to resize the shmfd will have to take a write lock from 0 to
2108          * OFF_MAX, so this being potentially beyond the current usable range of
2109          * the shmfd is not necessarily a concern.  If other mechanisms are
2110          * added to grow a shmfd, this may need to be re-evaluated.
2111          */
2112         rl_cookie = shm_rangelock_wlock(shmfd, offset, size);
2113         if (size > shmfd->shm_size)
2114                 error = shm_dotruncate_cookie(shmfd, size, rl_cookie);
2115         shm_rangelock_unlock(shmfd, rl_cookie);
2116         /* Translate to posix_fallocate(2) return value as needed. */
2117         if (error == ENOMEM)
2118                 error = ENOSPC;
2119         return (error);
2120 }
2121
2122 static int
2123 sysctl_posix_shm_list(SYSCTL_HANDLER_ARGS)
2124 {
2125         struct shm_mapping *shmm;
2126         struct sbuf sb;
2127         struct kinfo_file kif;
2128         u_long i;
2129         int error, error2;
2130
2131         sbuf_new_for_sysctl(&sb, NULL, sizeof(struct kinfo_file) * 5, req);
2132         sbuf_clear_flags(&sb, SBUF_INCLUDENUL);
2133         error = 0;
2134         sx_slock(&shm_dict_lock);
2135         for (i = 0; i < shm_hash + 1; i++) {
2136                 LIST_FOREACH(shmm, &shm_dictionary[i], sm_link) {
2137                         error = shm_fill_kinfo_locked(shmm->sm_shmfd,
2138                             &kif, true);
2139                         if (error == EPERM) {
2140                                 error = 0;
2141                                 continue;
2142                         }
2143                         if (error != 0)
2144                                 break;
2145                         pack_kinfo(&kif);
2146                         error = sbuf_bcat(&sb, &kif, kif.kf_structsize) == 0 ?
2147                             0 : ENOMEM;
2148                         if (error != 0)
2149                                 break;
2150                 }
2151         }
2152         sx_sunlock(&shm_dict_lock);
2153         error2 = sbuf_finish(&sb);
2154         sbuf_delete(&sb);
2155         return (error != 0 ? error : error2);
2156 }
2157
2158 SYSCTL_PROC(_kern_ipc, OID_AUTO, posix_shm_list,
2159     CTLFLAG_RD | CTLFLAG_PRISON | CTLFLAG_MPSAFE | CTLTYPE_OPAQUE,
2160     NULL, 0, sysctl_posix_shm_list, "",
2161     "POSIX SHM list");
2162
2163 int
2164 kern_shm_open(struct thread *td, const char *path, int flags, mode_t mode,
2165     struct filecaps *caps)
2166 {
2167
2168         return (kern_shm_open2(td, path, flags, mode, 0, caps, NULL));
2169 }
2170
2171 /*
2172  * This version of the shm_open() interface leaves CLOEXEC behavior up to the
2173  * caller, and libc will enforce it for the traditional shm_open() call.  This
2174  * allows other consumers, like memfd_create(), to opt-in for CLOEXEC.  This
2175  * interface also includes a 'name' argument that is currently unused, but could
2176  * potentially be exported later via some interface for debugging purposes.
2177  * From the kernel's perspective, it is optional.  Individual consumers like
2178  * memfd_create() may require it in order to be compatible with other systems
2179  * implementing the same function.
2180  */
2181 int
2182 sys_shm_open2(struct thread *td, struct shm_open2_args *uap)
2183 {
2184
2185         return (kern_shm_open2(td, uap->path, uap->flags, uap->mode,
2186             uap->shmflags, NULL, uap->name));
2187 }