]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/kern/uipc_shm.c
posixshm: Allow jails to use kern.ipc.posix_shm_list
[FreeBSD/FreeBSD.git] / sys / kern / uipc_shm.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2006, 2011, 2016-2017 Robert N. M. Watson
5  * Copyright 2020 The FreeBSD Foundation
6  * All rights reserved.
7  *
8  * Portions of this software were developed by BAE Systems, the University of
9  * Cambridge Computer Laboratory, and Memorial University under DARPA/AFRL
10  * contract FA8650-15-C-7558 ("CADETS"), as part of the DARPA Transparent
11  * Computing (TC) research program.
12  *
13  * Portions of this software were developed by Konstantin Belousov
14  * under sponsorship from the FreeBSD Foundation.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37
38 /*
39  * Support for shared swap-backed anonymous memory objects via
40  * shm_open(2), shm_rename(2), and shm_unlink(2).
41  * While most of the implementation is here, vm_mmap.c contains
42  * mapping logic changes.
43  *
44  * posixshmcontrol(1) allows users to inspect the state of the memory
45  * objects.  Per-uid swap resource limit controls total amount of
46  * memory that user can consume for anonymous objects, including
47  * shared.
48  */
49
50 #include <sys/cdefs.h>
51 __FBSDID("$FreeBSD$");
52
53 #include "opt_capsicum.h"
54 #include "opt_ktrace.h"
55
56 #include <sys/param.h>
57 #include <sys/capsicum.h>
58 #include <sys/conf.h>
59 #include <sys/fcntl.h>
60 #include <sys/file.h>
61 #include <sys/filedesc.h>
62 #include <sys/filio.h>
63 #include <sys/fnv_hash.h>
64 #include <sys/kernel.h>
65 #include <sys/limits.h>
66 #include <sys/uio.h>
67 #include <sys/signal.h>
68 #include <sys/jail.h>
69 #include <sys/ktrace.h>
70 #include <sys/lock.h>
71 #include <sys/malloc.h>
72 #include <sys/mman.h>
73 #include <sys/mutex.h>
74 #include <sys/priv.h>
75 #include <sys/proc.h>
76 #include <sys/refcount.h>
77 #include <sys/resourcevar.h>
78 #include <sys/rwlock.h>
79 #include <sys/sbuf.h>
80 #include <sys/stat.h>
81 #include <sys/syscallsubr.h>
82 #include <sys/sysctl.h>
83 #include <sys/sysproto.h>
84 #include <sys/systm.h>
85 #include <sys/sx.h>
86 #include <sys/time.h>
87 #include <sys/vmmeter.h>
88 #include <sys/vnode.h>
89 #include <sys/unistd.h>
90 #include <sys/user.h>
91
92 #include <security/audit/audit.h>
93 #include <security/mac/mac_framework.h>
94
95 #include <vm/vm.h>
96 #include <vm/vm_param.h>
97 #include <vm/pmap.h>
98 #include <vm/vm_extern.h>
99 #include <vm/vm_map.h>
100 #include <vm/vm_kern.h>
101 #include <vm/vm_object.h>
102 #include <vm/vm_page.h>
103 #include <vm/vm_pageout.h>
104 #include <vm/vm_pager.h>
105 #include <vm/swap_pager.h>
106
107 struct shm_mapping {
108         char            *sm_path;
109         Fnv32_t         sm_fnv;
110         struct shmfd    *sm_shmfd;
111         LIST_ENTRY(shm_mapping) sm_link;
112 };
113
114 static MALLOC_DEFINE(M_SHMFD, "shmfd", "shared memory file descriptor");
115 static LIST_HEAD(, shm_mapping) *shm_dictionary;
116 static struct sx shm_dict_lock;
117 static struct mtx shm_timestamp_lock;
118 static u_long shm_hash;
119 static struct unrhdr64 shm_ino_unr;
120 static dev_t shm_dev_ino;
121
122 #define SHM_HASH(fnv)   (&shm_dictionary[(fnv) & shm_hash])
123
124 static void     shm_init(void *arg);
125 static void     shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd);
126 static struct shmfd *shm_lookup(char *path, Fnv32_t fnv);
127 static int      shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred);
128 static int      shm_dotruncate_cookie(struct shmfd *shmfd, off_t length,
129     void *rl_cookie);
130 static int      shm_dotruncate_locked(struct shmfd *shmfd, off_t length,
131     void *rl_cookie);
132 static int      shm_copyin_path(struct thread *td, const char *userpath_in,
133     char **path_out);
134
135 static fo_rdwr_t        shm_read;
136 static fo_rdwr_t        shm_write;
137 static fo_truncate_t    shm_truncate;
138 static fo_ioctl_t       shm_ioctl;
139 static fo_stat_t        shm_stat;
140 static fo_close_t       shm_close;
141 static fo_chmod_t       shm_chmod;
142 static fo_chown_t       shm_chown;
143 static fo_seek_t        shm_seek;
144 static fo_fill_kinfo_t  shm_fill_kinfo;
145 static fo_mmap_t        shm_mmap;
146 static fo_get_seals_t   shm_get_seals;
147 static fo_add_seals_t   shm_add_seals;
148 static fo_fallocate_t   shm_fallocate;
149
150 /* File descriptor operations. */
151 struct fileops shm_ops = {
152         .fo_read = shm_read,
153         .fo_write = shm_write,
154         .fo_truncate = shm_truncate,
155         .fo_ioctl = shm_ioctl,
156         .fo_poll = invfo_poll,
157         .fo_kqfilter = invfo_kqfilter,
158         .fo_stat = shm_stat,
159         .fo_close = shm_close,
160         .fo_chmod = shm_chmod,
161         .fo_chown = shm_chown,
162         .fo_sendfile = vn_sendfile,
163         .fo_seek = shm_seek,
164         .fo_fill_kinfo = shm_fill_kinfo,
165         .fo_mmap = shm_mmap,
166         .fo_get_seals = shm_get_seals,
167         .fo_add_seals = shm_add_seals,
168         .fo_fallocate = shm_fallocate,
169         .fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE,
170 };
171
172 FEATURE(posix_shm, "POSIX shared memory");
173
174 static SYSCTL_NODE(_vm, OID_AUTO, largepages, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
175     "");
176
177 static int largepage_reclaim_tries = 1;
178 SYSCTL_INT(_vm_largepages, OID_AUTO, reclaim_tries,
179     CTLFLAG_RWTUN, &largepage_reclaim_tries, 0,
180     "Number of contig reclaims before giving up for default alloc policy");
181
182 static int
183 uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio)
184 {
185         vm_page_t m;
186         vm_pindex_t idx;
187         size_t tlen;
188         int error, offset, rv;
189
190         idx = OFF_TO_IDX(uio->uio_offset);
191         offset = uio->uio_offset & PAGE_MASK;
192         tlen = MIN(PAGE_SIZE - offset, len);
193
194         rv = vm_page_grab_valid_unlocked(&m, obj, idx,
195             VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY | VM_ALLOC_NOCREAT);
196         if (rv == VM_PAGER_OK)
197                 goto found;
198
199         /*
200          * Read I/O without either a corresponding resident page or swap
201          * page: use zero_region.  This is intended to avoid instantiating
202          * pages on read from a sparse region.
203          */
204         VM_OBJECT_WLOCK(obj);
205         m = vm_page_lookup(obj, idx);
206         if (uio->uio_rw == UIO_READ && m == NULL &&
207             !vm_pager_has_page(obj, idx, NULL, NULL)) {
208                 VM_OBJECT_WUNLOCK(obj);
209                 return (uiomove(__DECONST(void *, zero_region), tlen, uio));
210         }
211
212         /*
213          * Although the tmpfs vnode lock is held here, it is
214          * nonetheless safe to sleep waiting for a free page.  The
215          * pageout daemon does not need to acquire the tmpfs vnode
216          * lock to page out tobj's pages because tobj is a OBJT_SWAP
217          * type object.
218          */
219         rv = vm_page_grab_valid(&m, obj, idx,
220             VM_ALLOC_NORMAL | VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY);
221         if (rv != VM_PAGER_OK) {
222                 VM_OBJECT_WUNLOCK(obj);
223                 printf("uiomove_object: vm_obj %p idx %jd pager error %d\n",
224                     obj, idx, rv);
225                 return (EIO);
226         }
227         VM_OBJECT_WUNLOCK(obj);
228
229 found:
230         error = uiomove_fromphys(&m, offset, tlen, uio);
231         if (uio->uio_rw == UIO_WRITE && error == 0)
232                 vm_page_set_dirty(m);
233         vm_page_activate(m);
234         vm_page_sunbusy(m);
235
236         return (error);
237 }
238
239 int
240 uiomove_object(vm_object_t obj, off_t obj_size, struct uio *uio)
241 {
242         ssize_t resid;
243         size_t len;
244         int error;
245
246         error = 0;
247         while ((resid = uio->uio_resid) > 0) {
248                 if (obj_size <= uio->uio_offset)
249                         break;
250                 len = MIN(obj_size - uio->uio_offset, resid);
251                 if (len == 0)
252                         break;
253                 error = uiomove_object_page(obj, len, uio);
254                 if (error != 0 || resid == uio->uio_resid)
255                         break;
256         }
257         return (error);
258 }
259
260 static u_long count_largepages[MAXPAGESIZES];
261
262 static int
263 shm_largepage_phys_populate(vm_object_t object, vm_pindex_t pidx,
264     int fault_type, vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last)
265 {
266         vm_page_t m __diagused;
267         int psind;
268
269         psind = object->un_pager.phys.data_val;
270         if (psind == 0 || pidx >= object->size)
271                 return (VM_PAGER_FAIL);
272         *first = rounddown2(pidx, pagesizes[psind] / PAGE_SIZE);
273
274         /*
275          * We only busy the first page in the superpage run.  It is
276          * useless to busy whole run since we only remove full
277          * superpage, and it takes too long to busy e.g. 512 * 512 ==
278          * 262144 pages constituing 1G amd64 superage.
279          */
280         m = vm_page_grab(object, *first, VM_ALLOC_NORMAL | VM_ALLOC_NOCREAT);
281         MPASS(m != NULL);
282
283         *last = *first + atop(pagesizes[psind]) - 1;
284         return (VM_PAGER_OK);
285 }
286
287 static boolean_t
288 shm_largepage_phys_haspage(vm_object_t object, vm_pindex_t pindex,
289     int *before, int *after)
290 {
291         int psind;
292
293         psind = object->un_pager.phys.data_val;
294         if (psind == 0 || pindex >= object->size)
295                 return (FALSE);
296         if (before != NULL) {
297                 *before = pindex - rounddown2(pindex, pagesizes[psind] /
298                     PAGE_SIZE);
299         }
300         if (after != NULL) {
301                 *after = roundup2(pindex, pagesizes[psind] / PAGE_SIZE) -
302                     pindex;
303         }
304         return (TRUE);
305 }
306
307 static void
308 shm_largepage_phys_ctor(vm_object_t object, vm_prot_t prot,
309     vm_ooffset_t foff, struct ucred *cred)
310 {
311 }
312
313 static void
314 shm_largepage_phys_dtor(vm_object_t object)
315 {
316         int psind;
317
318         psind = object->un_pager.phys.data_val;
319         if (psind != 0) {
320                 atomic_subtract_long(&count_largepages[psind],
321                     object->size / (pagesizes[psind] / PAGE_SIZE));
322                 vm_wire_sub(object->size);
323         } else {
324                 KASSERT(object->size == 0,
325                     ("largepage phys obj %p not initialized bit size %#jx > 0",
326                     object, (uintmax_t)object->size));
327         }
328 }
329
330 static const struct phys_pager_ops shm_largepage_phys_ops = {
331         .phys_pg_populate =     shm_largepage_phys_populate,
332         .phys_pg_haspage =      shm_largepage_phys_haspage,
333         .phys_pg_ctor =         shm_largepage_phys_ctor,
334         .phys_pg_dtor =         shm_largepage_phys_dtor,
335 };
336
337 bool
338 shm_largepage(struct shmfd *shmfd)
339 {
340         return (shmfd->shm_object->type == OBJT_PHYS);
341 }
342
343 static int
344 shm_seek(struct file *fp, off_t offset, int whence, struct thread *td)
345 {
346         struct shmfd *shmfd;
347         off_t foffset;
348         int error;
349
350         shmfd = fp->f_data;
351         foffset = foffset_lock(fp, 0);
352         error = 0;
353         switch (whence) {
354         case L_INCR:
355                 if (foffset < 0 ||
356                     (offset > 0 && foffset > OFF_MAX - offset)) {
357                         error = EOVERFLOW;
358                         break;
359                 }
360                 offset += foffset;
361                 break;
362         case L_XTND:
363                 if (offset > 0 && shmfd->shm_size > OFF_MAX - offset) {
364                         error = EOVERFLOW;
365                         break;
366                 }
367                 offset += shmfd->shm_size;
368                 break;
369         case L_SET:
370                 break;
371         default:
372                 error = EINVAL;
373         }
374         if (error == 0) {
375                 if (offset < 0 || offset > shmfd->shm_size)
376                         error = EINVAL;
377                 else
378                         td->td_uretoff.tdu_off = offset;
379         }
380         foffset_unlock(fp, offset, error != 0 ? FOF_NOUPDATE : 0);
381         return (error);
382 }
383
384 static int
385 shm_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
386     int flags, struct thread *td)
387 {
388         struct shmfd *shmfd;
389         void *rl_cookie;
390         int error;
391
392         shmfd = fp->f_data;
393 #ifdef MAC
394         error = mac_posixshm_check_read(active_cred, fp->f_cred, shmfd);
395         if (error)
396                 return (error);
397 #endif
398         foffset_lock_uio(fp, uio, flags);
399         rl_cookie = rangelock_rlock(&shmfd->shm_rl, uio->uio_offset,
400             uio->uio_offset + uio->uio_resid, &shmfd->shm_mtx);
401         error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio);
402         rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
403         foffset_unlock_uio(fp, uio, flags);
404         return (error);
405 }
406
407 static int
408 shm_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
409     int flags, struct thread *td)
410 {
411         struct shmfd *shmfd;
412         void *rl_cookie;
413         int error;
414         off_t size;
415
416         shmfd = fp->f_data;
417 #ifdef MAC
418         error = mac_posixshm_check_write(active_cred, fp->f_cred, shmfd);
419         if (error)
420                 return (error);
421 #endif
422         if (shm_largepage(shmfd) && shmfd->shm_lp_psind == 0)
423                 return (EINVAL);
424         foffset_lock_uio(fp, uio, flags);
425         if (uio->uio_resid > OFF_MAX - uio->uio_offset) {
426                 /*
427                  * Overflow is only an error if we're supposed to expand on
428                  * write.  Otherwise, we'll just truncate the write to the
429                  * size of the file, which can only grow up to OFF_MAX.
430                  */
431                 if ((shmfd->shm_flags & SHM_GROW_ON_WRITE) != 0) {
432                         foffset_unlock_uio(fp, uio, flags);
433                         return (EFBIG);
434                 }
435
436                 size = shmfd->shm_size;
437         } else {
438                 size = uio->uio_offset + uio->uio_resid;
439         }
440         if ((flags & FOF_OFFSET) == 0) {
441                 rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
442                     &shmfd->shm_mtx);
443         } else {
444                 rl_cookie = rangelock_wlock(&shmfd->shm_rl, uio->uio_offset,
445                     size, &shmfd->shm_mtx);
446         }
447         if ((shmfd->shm_seals & F_SEAL_WRITE) != 0) {
448                 error = EPERM;
449         } else {
450                 error = 0;
451                 if ((shmfd->shm_flags & SHM_GROW_ON_WRITE) != 0 &&
452                     size > shmfd->shm_size) {
453                         error = shm_dotruncate_cookie(shmfd, size, rl_cookie);
454                 }
455                 if (error == 0)
456                         error = uiomove_object(shmfd->shm_object,
457                             shmfd->shm_size, uio);
458         }
459         rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
460         foffset_unlock_uio(fp, uio, flags);
461         return (error);
462 }
463
464 static int
465 shm_truncate(struct file *fp, off_t length, struct ucred *active_cred,
466     struct thread *td)
467 {
468         struct shmfd *shmfd;
469 #ifdef MAC
470         int error;
471 #endif
472
473         shmfd = fp->f_data;
474 #ifdef MAC
475         error = mac_posixshm_check_truncate(active_cred, fp->f_cred, shmfd);
476         if (error)
477                 return (error);
478 #endif
479         return (shm_dotruncate(shmfd, length));
480 }
481
482 int
483 shm_ioctl(struct file *fp, u_long com, void *data, struct ucred *active_cred,
484     struct thread *td)
485 {
486         struct shmfd *shmfd;
487         struct shm_largepage_conf *conf;
488         void *rl_cookie;
489
490         shmfd = fp->f_data;
491         switch (com) {
492         case FIONBIO:
493         case FIOASYNC:
494                 /*
495                  * Allow fcntl(fd, F_SETFL, O_NONBLOCK) to work,
496                  * just like it would on an unlinked regular file
497                  */
498                 return (0);
499         case FIOSSHMLPGCNF:
500                 if (!shm_largepage(shmfd))
501                         return (ENOTTY);
502                 conf = data;
503                 if (shmfd->shm_lp_psind != 0 &&
504                     conf->psind != shmfd->shm_lp_psind)
505                         return (EINVAL);
506                 if (conf->psind <= 0 || conf->psind >= MAXPAGESIZES ||
507                     pagesizes[conf->psind] == 0)
508                         return (EINVAL);
509                 if (conf->alloc_policy != SHM_LARGEPAGE_ALLOC_DEFAULT &&
510                     conf->alloc_policy != SHM_LARGEPAGE_ALLOC_NOWAIT &&
511                     conf->alloc_policy != SHM_LARGEPAGE_ALLOC_HARD)
512                         return (EINVAL);
513
514                 rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
515                     &shmfd->shm_mtx);
516                 shmfd->shm_lp_psind = conf->psind;
517                 shmfd->shm_lp_alloc_policy = conf->alloc_policy;
518                 shmfd->shm_object->un_pager.phys.data_val = conf->psind;
519                 rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
520                 return (0);
521         case FIOGSHMLPGCNF:
522                 if (!shm_largepage(shmfd))
523                         return (ENOTTY);
524                 conf = data;
525                 rl_cookie = rangelock_rlock(&shmfd->shm_rl, 0, OFF_MAX,
526                     &shmfd->shm_mtx);
527                 conf->psind = shmfd->shm_lp_psind;
528                 conf->alloc_policy = shmfd->shm_lp_alloc_policy;
529                 rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
530                 return (0);
531         default:
532                 return (ENOTTY);
533         }
534 }
535
536 static int
537 shm_stat(struct file *fp, struct stat *sb, struct ucred *active_cred,
538     struct thread *td)
539 {
540         struct shmfd *shmfd;
541 #ifdef MAC
542         int error;
543 #endif
544
545         shmfd = fp->f_data;
546
547 #ifdef MAC
548         error = mac_posixshm_check_stat(active_cred, fp->f_cred, shmfd);
549         if (error)
550                 return (error);
551 #endif
552
553         /*
554          * Attempt to return sanish values for fstat() on a memory file
555          * descriptor.
556          */
557         bzero(sb, sizeof(*sb));
558         sb->st_blksize = PAGE_SIZE;
559         sb->st_size = shmfd->shm_size;
560         sb->st_blocks = howmany(sb->st_size, sb->st_blksize);
561         mtx_lock(&shm_timestamp_lock);
562         sb->st_atim = shmfd->shm_atime;
563         sb->st_ctim = shmfd->shm_ctime;
564         sb->st_mtim = shmfd->shm_mtime;
565         sb->st_birthtim = shmfd->shm_birthtime;
566         sb->st_mode = S_IFREG | shmfd->shm_mode;                /* XXX */
567         sb->st_uid = shmfd->shm_uid;
568         sb->st_gid = shmfd->shm_gid;
569         mtx_unlock(&shm_timestamp_lock);
570         sb->st_dev = shm_dev_ino;
571         sb->st_ino = shmfd->shm_ino;
572         sb->st_nlink = shmfd->shm_object->ref_count;
573         sb->st_blocks = shmfd->shm_object->size /
574             (pagesizes[shmfd->shm_lp_psind] >> PAGE_SHIFT);
575
576         return (0);
577 }
578
579 static int
580 shm_close(struct file *fp, struct thread *td)
581 {
582         struct shmfd *shmfd;
583
584         shmfd = fp->f_data;
585         fp->f_data = NULL;
586         shm_drop(shmfd);
587
588         return (0);
589 }
590
591 static int
592 shm_copyin_path(struct thread *td, const char *userpath_in, char **path_out) {
593         int error;
594         char *path;
595         const char *pr_path;
596         size_t pr_pathlen;
597
598         path = malloc(MAXPATHLEN, M_SHMFD, M_WAITOK);
599         pr_path = td->td_ucred->cr_prison->pr_path;
600
601         /* Construct a full pathname for jailed callers. */
602         pr_pathlen = strcmp(pr_path, "/") ==
603             0 ? 0 : strlcpy(path, pr_path, MAXPATHLEN);
604         error = copyinstr(userpath_in, path + pr_pathlen,
605             MAXPATHLEN - pr_pathlen, NULL);
606         if (error != 0)
607                 goto out;
608
609 #ifdef KTRACE
610         if (KTRPOINT(curthread, KTR_NAMEI))
611                 ktrnamei(path);
612 #endif
613
614         /* Require paths to start with a '/' character. */
615         if (path[pr_pathlen] != '/') {
616                 error = EINVAL;
617                 goto out;
618         }
619
620         *path_out = path;
621
622 out:
623         if (error != 0)
624                 free(path, M_SHMFD);
625
626         return (error);
627 }
628
629 static int
630 shm_dotruncate_locked(struct shmfd *shmfd, off_t length, void *rl_cookie)
631 {
632         vm_object_t object;
633         vm_page_t m;
634         vm_pindex_t idx, nobjsize;
635         vm_ooffset_t delta;
636         int base, rv;
637
638         KASSERT(length >= 0, ("shm_dotruncate: length < 0"));
639         object = shmfd->shm_object;
640         VM_OBJECT_ASSERT_WLOCKED(object);
641         rangelock_cookie_assert(rl_cookie, RA_WLOCKED);
642         if (length == shmfd->shm_size)
643                 return (0);
644         nobjsize = OFF_TO_IDX(length + PAGE_MASK);
645
646         /* Are we shrinking?  If so, trim the end. */
647         if (length < shmfd->shm_size) {
648                 if ((shmfd->shm_seals & F_SEAL_SHRINK) != 0)
649                         return (EPERM);
650
651                 /*
652                  * Disallow any requests to shrink the size if this
653                  * object is mapped into the kernel.
654                  */
655                 if (shmfd->shm_kmappings > 0)
656                         return (EBUSY);
657
658                 /*
659                  * Zero the truncated part of the last page.
660                  */
661                 base = length & PAGE_MASK;
662                 if (base != 0) {
663                         idx = OFF_TO_IDX(length);
664 retry:
665                         m = vm_page_grab(object, idx, VM_ALLOC_NOCREAT);
666                         if (m != NULL) {
667                                 MPASS(vm_page_all_valid(m));
668                         } else if (vm_pager_has_page(object, idx, NULL, NULL)) {
669                                 m = vm_page_alloc(object, idx,
670                                     VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL);
671                                 if (m == NULL)
672                                         goto retry;
673                                 vm_object_pip_add(object, 1);
674                                 VM_OBJECT_WUNLOCK(object);
675                                 rv = vm_pager_get_pages(object, &m, 1, NULL,
676                                     NULL);
677                                 VM_OBJECT_WLOCK(object);
678                                 vm_object_pip_wakeup(object);
679                                 if (rv == VM_PAGER_OK) {
680                                         /*
681                                          * Since the page was not resident,
682                                          * and therefore not recently
683                                          * accessed, immediately enqueue it
684                                          * for asynchronous laundering.  The
685                                          * current operation is not regarded
686                                          * as an access.
687                                          */
688                                         vm_page_launder(m);
689                                 } else {
690                                         vm_page_free(m);
691                                         VM_OBJECT_WUNLOCK(object);
692                                         return (EIO);
693                                 }
694                         }
695                         if (m != NULL) {
696                                 pmap_zero_page_area(m, base, PAGE_SIZE - base);
697                                 KASSERT(vm_page_all_valid(m),
698                                     ("shm_dotruncate: page %p is invalid", m));
699                                 vm_page_set_dirty(m);
700                                 vm_page_xunbusy(m);
701                         }
702                 }
703                 delta = IDX_TO_OFF(object->size - nobjsize);
704
705                 if (nobjsize < object->size)
706                         vm_object_page_remove(object, nobjsize, object->size,
707                             0);
708
709                 /* Free the swap accounted for shm */
710                 swap_release_by_cred(delta, object->cred);
711                 object->charge -= delta;
712         } else {
713                 if ((shmfd->shm_seals & F_SEAL_GROW) != 0)
714                         return (EPERM);
715
716                 /* Try to reserve additional swap space. */
717                 delta = IDX_TO_OFF(nobjsize - object->size);
718                 if (!swap_reserve_by_cred(delta, object->cred))
719                         return (ENOMEM);
720                 object->charge += delta;
721         }
722         shmfd->shm_size = length;
723         mtx_lock(&shm_timestamp_lock);
724         vfs_timestamp(&shmfd->shm_ctime);
725         shmfd->shm_mtime = shmfd->shm_ctime;
726         mtx_unlock(&shm_timestamp_lock);
727         object->size = nobjsize;
728         return (0);
729 }
730
731 static int
732 shm_dotruncate_largepage(struct shmfd *shmfd, off_t length, void *rl_cookie)
733 {
734         vm_object_t object;
735         vm_page_t m;
736         vm_pindex_t newobjsz;
737         vm_pindex_t oldobjsz __unused;
738         int aflags, error, i, psind, try;
739
740         KASSERT(length >= 0, ("shm_dotruncate: length < 0"));
741         object = shmfd->shm_object;
742         VM_OBJECT_ASSERT_WLOCKED(object);
743         rangelock_cookie_assert(rl_cookie, RA_WLOCKED);
744
745         oldobjsz = object->size;
746         newobjsz = OFF_TO_IDX(length);
747         if (length == shmfd->shm_size)
748                 return (0);
749         psind = shmfd->shm_lp_psind;
750         if (psind == 0 && length != 0)
751                 return (EINVAL);
752         if ((length & (pagesizes[psind] - 1)) != 0)
753                 return (EINVAL);
754
755         if (length < shmfd->shm_size) {
756                 if ((shmfd->shm_seals & F_SEAL_SHRINK) != 0)
757                         return (EPERM);
758                 if (shmfd->shm_kmappings > 0)
759                         return (EBUSY);
760                 return (ENOTSUP);       /* Pages are unmanaged. */
761 #if 0
762                 vm_object_page_remove(object, newobjsz, oldobjsz, 0);
763                 object->size = newobjsz;
764                 shmfd->shm_size = length;
765                 return (0);
766 #endif
767         }
768
769         if ((shmfd->shm_seals & F_SEAL_GROW) != 0)
770                 return (EPERM);
771
772         aflags = VM_ALLOC_NORMAL | VM_ALLOC_ZERO;
773         if (shmfd->shm_lp_alloc_policy == SHM_LARGEPAGE_ALLOC_NOWAIT)
774                 aflags |= VM_ALLOC_WAITFAIL;
775         try = 0;
776
777         /*
778          * Extend shmfd and object, keeping all already fully
779          * allocated large pages intact even on error, because dropped
780          * object lock might allowed mapping of them.
781          */
782         while (object->size < newobjsz) {
783                 m = vm_page_alloc_contig(object, object->size, aflags,
784                     pagesizes[psind] / PAGE_SIZE, 0, ~0,
785                     pagesizes[psind], 0,
786                     VM_MEMATTR_DEFAULT);
787                 if (m == NULL) {
788                         VM_OBJECT_WUNLOCK(object);
789                         if (shmfd->shm_lp_alloc_policy ==
790                             SHM_LARGEPAGE_ALLOC_NOWAIT ||
791                             (shmfd->shm_lp_alloc_policy ==
792                             SHM_LARGEPAGE_ALLOC_DEFAULT &&
793                             try >= largepage_reclaim_tries)) {
794                                 VM_OBJECT_WLOCK(object);
795                                 return (ENOMEM);
796                         }
797                         error = vm_page_reclaim_contig(aflags,
798                             pagesizes[psind] / PAGE_SIZE, 0, ~0,
799                             pagesizes[psind], 0) ? 0 :
800                             vm_wait_intr(object);
801                         if (error != 0) {
802                                 VM_OBJECT_WLOCK(object);
803                                 return (error);
804                         }
805                         try++;
806                         VM_OBJECT_WLOCK(object);
807                         continue;
808                 }
809                 try = 0;
810                 for (i = 0; i < pagesizes[psind] / PAGE_SIZE; i++) {
811                         if ((m[i].flags & PG_ZERO) == 0)
812                                 pmap_zero_page(&m[i]);
813                         vm_page_valid(&m[i]);
814                         vm_page_xunbusy(&m[i]);
815                 }
816                 object->size += OFF_TO_IDX(pagesizes[psind]);
817                 shmfd->shm_size += pagesizes[psind];
818                 atomic_add_long(&count_largepages[psind], 1);
819                 vm_wire_add(atop(pagesizes[psind]));
820         }
821         return (0);
822 }
823
824 static int
825 shm_dotruncate_cookie(struct shmfd *shmfd, off_t length, void *rl_cookie)
826 {
827         int error;
828
829         VM_OBJECT_WLOCK(shmfd->shm_object);
830         error = shm_largepage(shmfd) ? shm_dotruncate_largepage(shmfd,
831             length, rl_cookie) : shm_dotruncate_locked(shmfd, length,
832             rl_cookie);
833         VM_OBJECT_WUNLOCK(shmfd->shm_object);
834         return (error);
835 }
836
837 int
838 shm_dotruncate(struct shmfd *shmfd, off_t length)
839 {
840         void *rl_cookie;
841         int error;
842
843         rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
844             &shmfd->shm_mtx);
845         error = shm_dotruncate_cookie(shmfd, length, rl_cookie);
846         rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
847         return (error);
848 }
849
850 /*
851  * shmfd object management including creation and reference counting
852  * routines.
853  */
854 struct shmfd *
855 shm_alloc(struct ucred *ucred, mode_t mode, bool largepage)
856 {
857         struct shmfd *shmfd;
858
859         shmfd = malloc(sizeof(*shmfd), M_SHMFD, M_WAITOK | M_ZERO);
860         shmfd->shm_size = 0;
861         shmfd->shm_uid = ucred->cr_uid;
862         shmfd->shm_gid = ucred->cr_gid;
863         shmfd->shm_mode = mode;
864         if (largepage) {
865                 shmfd->shm_object = phys_pager_allocate(NULL,
866                     &shm_largepage_phys_ops, NULL, shmfd->shm_size,
867                     VM_PROT_DEFAULT, 0, ucred);
868                 shmfd->shm_lp_alloc_policy = SHM_LARGEPAGE_ALLOC_DEFAULT;
869         } else {
870                 shmfd->shm_object = vm_pager_allocate(OBJT_SWAP, NULL,
871                     shmfd->shm_size, VM_PROT_DEFAULT, 0, ucred);
872         }
873         KASSERT(shmfd->shm_object != NULL, ("shm_create: vm_pager_allocate"));
874         vfs_timestamp(&shmfd->shm_birthtime);
875         shmfd->shm_atime = shmfd->shm_mtime = shmfd->shm_ctime =
876             shmfd->shm_birthtime;
877         shmfd->shm_ino = alloc_unr64(&shm_ino_unr);
878         refcount_init(&shmfd->shm_refs, 1);
879         mtx_init(&shmfd->shm_mtx, "shmrl", NULL, MTX_DEF);
880         rangelock_init(&shmfd->shm_rl);
881 #ifdef MAC
882         mac_posixshm_init(shmfd);
883         mac_posixshm_create(ucred, shmfd);
884 #endif
885
886         return (shmfd);
887 }
888
889 struct shmfd *
890 shm_hold(struct shmfd *shmfd)
891 {
892
893         refcount_acquire(&shmfd->shm_refs);
894         return (shmfd);
895 }
896
897 void
898 shm_drop(struct shmfd *shmfd)
899 {
900
901         if (refcount_release(&shmfd->shm_refs)) {
902 #ifdef MAC
903                 mac_posixshm_destroy(shmfd);
904 #endif
905                 rangelock_destroy(&shmfd->shm_rl);
906                 mtx_destroy(&shmfd->shm_mtx);
907                 vm_object_deallocate(shmfd->shm_object);
908                 free(shmfd, M_SHMFD);
909         }
910 }
911
912 /*
913  * Determine if the credentials have sufficient permissions for a
914  * specified combination of FREAD and FWRITE.
915  */
916 int
917 shm_access(struct shmfd *shmfd, struct ucred *ucred, int flags)
918 {
919         accmode_t accmode;
920         int error;
921
922         accmode = 0;
923         if (flags & FREAD)
924                 accmode |= VREAD;
925         if (flags & FWRITE)
926                 accmode |= VWRITE;
927         mtx_lock(&shm_timestamp_lock);
928         error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid,
929             accmode, ucred);
930         mtx_unlock(&shm_timestamp_lock);
931         return (error);
932 }
933
934 static void
935 shm_init(void *arg)
936 {
937         char name[32];
938         int i;
939
940         mtx_init(&shm_timestamp_lock, "shm timestamps", NULL, MTX_DEF);
941         sx_init(&shm_dict_lock, "shm dictionary");
942         shm_dictionary = hashinit(1024, M_SHMFD, &shm_hash);
943         new_unrhdr64(&shm_ino_unr, 1);
944         shm_dev_ino = devfs_alloc_cdp_inode();
945         KASSERT(shm_dev_ino > 0, ("shm dev inode not initialized"));
946
947         for (i = 1; i < MAXPAGESIZES; i++) {
948                 if (pagesizes[i] == 0)
949                         break;
950 #define M       (1024 * 1024)
951 #define G       (1024 * M)
952                 if (pagesizes[i] >= G)
953                         snprintf(name, sizeof(name), "%luG", pagesizes[i] / G);
954                 else if (pagesizes[i] >= M)
955                         snprintf(name, sizeof(name), "%luM", pagesizes[i] / M);
956                 else
957                         snprintf(name, sizeof(name), "%lu", pagesizes[i]);
958 #undef G
959 #undef M
960                 SYSCTL_ADD_ULONG(NULL, SYSCTL_STATIC_CHILDREN(_vm_largepages),
961                     OID_AUTO, name, CTLFLAG_RD, &count_largepages[i],
962                     "number of non-transient largepages allocated");
963         }
964 }
965 SYSINIT(shm_init, SI_SUB_SYSV_SHM, SI_ORDER_ANY, shm_init, NULL);
966
967 /*
968  * Dictionary management.  We maintain an in-kernel dictionary to map
969  * paths to shmfd objects.  We use the FNV hash on the path to store
970  * the mappings in a hash table.
971  */
972 static struct shmfd *
973 shm_lookup(char *path, Fnv32_t fnv)
974 {
975         struct shm_mapping *map;
976
977         LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
978                 if (map->sm_fnv != fnv)
979                         continue;
980                 if (strcmp(map->sm_path, path) == 0)
981                         return (map->sm_shmfd);
982         }
983
984         return (NULL);
985 }
986
987 static void
988 shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd)
989 {
990         struct shm_mapping *map;
991
992         map = malloc(sizeof(struct shm_mapping), M_SHMFD, M_WAITOK);
993         map->sm_path = path;
994         map->sm_fnv = fnv;
995         map->sm_shmfd = shm_hold(shmfd);
996         shmfd->shm_path = path;
997         LIST_INSERT_HEAD(SHM_HASH(fnv), map, sm_link);
998 }
999
1000 static int
1001 shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred)
1002 {
1003         struct shm_mapping *map;
1004         int error;
1005
1006         LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
1007                 if (map->sm_fnv != fnv)
1008                         continue;
1009                 if (strcmp(map->sm_path, path) == 0) {
1010 #ifdef MAC
1011                         error = mac_posixshm_check_unlink(ucred, map->sm_shmfd);
1012                         if (error)
1013                                 return (error);
1014 #endif
1015                         error = shm_access(map->sm_shmfd, ucred,
1016                             FREAD | FWRITE);
1017                         if (error)
1018                                 return (error);
1019                         map->sm_shmfd->shm_path = NULL;
1020                         LIST_REMOVE(map, sm_link);
1021                         shm_drop(map->sm_shmfd);
1022                         free(map->sm_path, M_SHMFD);
1023                         free(map, M_SHMFD);
1024                         return (0);
1025                 }
1026         }
1027
1028         return (ENOENT);
1029 }
1030
1031 int
1032 kern_shm_open2(struct thread *td, const char *userpath, int flags, mode_t mode,
1033     int shmflags, struct filecaps *fcaps, const char *name __unused)
1034 {
1035         struct pwddesc *pdp;
1036         struct shmfd *shmfd;
1037         struct file *fp;
1038         char *path;
1039         void *rl_cookie;
1040         Fnv32_t fnv;
1041         mode_t cmode;
1042         int error, fd, initial_seals;
1043         bool largepage;
1044
1045         if ((shmflags & ~(SHM_ALLOW_SEALING | SHM_GROW_ON_WRITE |
1046             SHM_LARGEPAGE)) != 0)
1047                 return (EINVAL);
1048
1049         initial_seals = F_SEAL_SEAL;
1050         if ((shmflags & SHM_ALLOW_SEALING) != 0)
1051                 initial_seals &= ~F_SEAL_SEAL;
1052
1053 #ifdef CAPABILITY_MODE
1054         /*
1055          * shm_open(2) is only allowed for anonymous objects.
1056          */
1057         if (IN_CAPABILITY_MODE(td) && (userpath != SHM_ANON))
1058                 return (ECAPMODE);
1059 #endif
1060
1061         AUDIT_ARG_FFLAGS(flags);
1062         AUDIT_ARG_MODE(mode);
1063
1064         if ((flags & O_ACCMODE) != O_RDONLY && (flags & O_ACCMODE) != O_RDWR)
1065                 return (EINVAL);
1066
1067         if ((flags & ~(O_ACCMODE | O_CREAT | O_EXCL | O_TRUNC | O_CLOEXEC)) != 0)
1068                 return (EINVAL);
1069
1070         largepage = (shmflags & SHM_LARGEPAGE) != 0;
1071         if (largepage && !PMAP_HAS_LARGEPAGES)
1072                 return (ENOTTY);
1073
1074         /*
1075          * Currently only F_SEAL_SEAL may be set when creating or opening shmfd.
1076          * If the decision is made later to allow additional seals, care must be
1077          * taken below to ensure that the seals are properly set if the shmfd
1078          * already existed -- this currently assumes that only F_SEAL_SEAL can
1079          * be set and doesn't take further precautions to ensure the validity of
1080          * the seals being added with respect to current mappings.
1081          */
1082         if ((initial_seals & ~F_SEAL_SEAL) != 0)
1083                 return (EINVAL);
1084
1085         pdp = td->td_proc->p_pd;
1086         cmode = (mode & ~pdp->pd_cmask) & ACCESSPERMS;
1087
1088         /*
1089          * shm_open(2) created shm should always have O_CLOEXEC set, as mandated
1090          * by POSIX.  We allow it to be unset here so that an in-kernel
1091          * interface may be written as a thin layer around shm, optionally not
1092          * setting CLOEXEC.  For shm_open(2), O_CLOEXEC is set unconditionally
1093          * in sys_shm_open() to keep this implementation compliant.
1094          */
1095         error = falloc_caps(td, &fp, &fd, flags & O_CLOEXEC, fcaps);
1096         if (error)
1097                 return (error);
1098
1099         /* A SHM_ANON path pointer creates an anonymous object. */
1100         if (userpath == SHM_ANON) {
1101                 /* A read-only anonymous object is pointless. */
1102                 if ((flags & O_ACCMODE) == O_RDONLY) {
1103                         fdclose(td, fp, fd);
1104                         fdrop(fp, td);
1105                         return (EINVAL);
1106                 }
1107                 shmfd = shm_alloc(td->td_ucred, cmode, largepage);
1108                 shmfd->shm_seals = initial_seals;
1109                 shmfd->shm_flags = shmflags;
1110         } else {
1111                 error = shm_copyin_path(td, userpath, &path);
1112                 if (error != 0) {
1113                         fdclose(td, fp, fd);
1114                         fdrop(fp, td);
1115                         return (error);
1116                 }
1117
1118                 AUDIT_ARG_UPATH1_CANON(path);
1119                 fnv = fnv_32_str(path, FNV1_32_INIT);
1120                 sx_xlock(&shm_dict_lock);
1121                 shmfd = shm_lookup(path, fnv);
1122                 if (shmfd == NULL) {
1123                         /* Object does not yet exist, create it if requested. */
1124                         if (flags & O_CREAT) {
1125 #ifdef MAC
1126                                 error = mac_posixshm_check_create(td->td_ucred,
1127                                     path);
1128                                 if (error == 0) {
1129 #endif
1130                                         shmfd = shm_alloc(td->td_ucred, cmode,
1131                                             largepage);
1132                                         shmfd->shm_seals = initial_seals;
1133                                         shmfd->shm_flags = shmflags;
1134                                         shm_insert(path, fnv, shmfd);
1135 #ifdef MAC
1136                                 }
1137 #endif
1138                         } else {
1139                                 free(path, M_SHMFD);
1140                                 error = ENOENT;
1141                         }
1142                 } else {
1143                         rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
1144                             &shmfd->shm_mtx);
1145
1146                         /*
1147                          * kern_shm_open() likely shouldn't ever error out on
1148                          * trying to set a seal that already exists, unlike
1149                          * F_ADD_SEALS.  This would break terribly as
1150                          * shm_open(2) actually sets F_SEAL_SEAL to maintain
1151                          * historical behavior where the underlying file could
1152                          * not be sealed.
1153                          */
1154                         initial_seals &= ~shmfd->shm_seals;
1155
1156                         /*
1157                          * Object already exists, obtain a new
1158                          * reference if requested and permitted.
1159                          */
1160                         free(path, M_SHMFD);
1161
1162                         /*
1163                          * initial_seals can't set additional seals if we've
1164                          * already been set F_SEAL_SEAL.  If F_SEAL_SEAL is set,
1165                          * then we've already removed that one from
1166                          * initial_seals.  This is currently redundant as we
1167                          * only allow setting F_SEAL_SEAL at creation time, but
1168                          * it's cheap to check and decreases the effort required
1169                          * to allow additional seals.
1170                          */
1171                         if ((shmfd->shm_seals & F_SEAL_SEAL) != 0 &&
1172                             initial_seals != 0)
1173                                 error = EPERM;
1174                         else if ((flags & (O_CREAT | O_EXCL)) ==
1175                             (O_CREAT | O_EXCL))
1176                                 error = EEXIST;
1177                         else if (shmflags != 0 && shmflags != shmfd->shm_flags)
1178                                 error = EINVAL;
1179                         else {
1180 #ifdef MAC
1181                                 error = mac_posixshm_check_open(td->td_ucred,
1182                                     shmfd, FFLAGS(flags & O_ACCMODE));
1183                                 if (error == 0)
1184 #endif
1185                                 error = shm_access(shmfd, td->td_ucred,
1186                                     FFLAGS(flags & O_ACCMODE));
1187                         }
1188
1189                         /*
1190                          * Truncate the file back to zero length if
1191                          * O_TRUNC was specified and the object was
1192                          * opened with read/write.
1193                          */
1194                         if (error == 0 &&
1195                             (flags & (O_ACCMODE | O_TRUNC)) ==
1196                             (O_RDWR | O_TRUNC)) {
1197                                 VM_OBJECT_WLOCK(shmfd->shm_object);
1198 #ifdef MAC
1199                                 error = mac_posixshm_check_truncate(
1200                                         td->td_ucred, fp->f_cred, shmfd);
1201                                 if (error == 0)
1202 #endif
1203                                         error = shm_dotruncate_locked(shmfd, 0,
1204                                             rl_cookie);
1205                                 VM_OBJECT_WUNLOCK(shmfd->shm_object);
1206                         }
1207                         if (error == 0) {
1208                                 /*
1209                                  * Currently we only allow F_SEAL_SEAL to be
1210                                  * set initially.  As noted above, this would
1211                                  * need to be reworked should that change.
1212                                  */
1213                                 shmfd->shm_seals |= initial_seals;
1214                                 shm_hold(shmfd);
1215                         }
1216                         rangelock_unlock(&shmfd->shm_rl, rl_cookie,
1217                             &shmfd->shm_mtx);
1218                 }
1219                 sx_xunlock(&shm_dict_lock);
1220
1221                 if (error) {
1222                         fdclose(td, fp, fd);
1223                         fdrop(fp, td);
1224                         return (error);
1225                 }
1226         }
1227
1228         finit(fp, FFLAGS(flags & O_ACCMODE), DTYPE_SHM, shmfd, &shm_ops);
1229
1230         td->td_retval[0] = fd;
1231         fdrop(fp, td);
1232
1233         return (0);
1234 }
1235
1236 /* System calls. */
1237 #ifdef COMPAT_FREEBSD12
1238 int
1239 freebsd12_shm_open(struct thread *td, struct freebsd12_shm_open_args *uap)
1240 {
1241
1242         return (kern_shm_open(td, uap->path, uap->flags | O_CLOEXEC,
1243             uap->mode, NULL));
1244 }
1245 #endif
1246
1247 int
1248 sys_shm_unlink(struct thread *td, struct shm_unlink_args *uap)
1249 {
1250         char *path;
1251         Fnv32_t fnv;
1252         int error;
1253
1254         error = shm_copyin_path(td, uap->path, &path);
1255         if (error != 0)
1256                 return (error);
1257
1258         AUDIT_ARG_UPATH1_CANON(path);
1259         fnv = fnv_32_str(path, FNV1_32_INIT);
1260         sx_xlock(&shm_dict_lock);
1261         error = shm_remove(path, fnv, td->td_ucred);
1262         sx_xunlock(&shm_dict_lock);
1263         free(path, M_SHMFD);
1264
1265         return (error);
1266 }
1267
1268 int
1269 sys_shm_rename(struct thread *td, struct shm_rename_args *uap)
1270 {
1271         char *path_from = NULL, *path_to = NULL;
1272         Fnv32_t fnv_from, fnv_to;
1273         struct shmfd *fd_from;
1274         struct shmfd *fd_to;
1275         int error;
1276         int flags;
1277
1278         flags = uap->flags;
1279         AUDIT_ARG_FFLAGS(flags);
1280
1281         /*
1282          * Make sure the user passed only valid flags.
1283          * If you add a new flag, please add a new term here.
1284          */
1285         if ((flags & ~(
1286             SHM_RENAME_NOREPLACE |
1287             SHM_RENAME_EXCHANGE
1288             )) != 0) {
1289                 error = EINVAL;
1290                 goto out;
1291         }
1292
1293         /*
1294          * EXCHANGE and NOREPLACE don't quite make sense together. Let's
1295          * force the user to choose one or the other.
1296          */
1297         if ((flags & SHM_RENAME_NOREPLACE) != 0 &&
1298             (flags & SHM_RENAME_EXCHANGE) != 0) {
1299                 error = EINVAL;
1300                 goto out;
1301         }
1302
1303         /* Renaming to or from anonymous makes no sense */
1304         if (uap->path_from == SHM_ANON || uap->path_to == SHM_ANON) {
1305                 error = EINVAL;
1306                 goto out;
1307         }
1308
1309         error = shm_copyin_path(td, uap->path_from, &path_from);
1310         if (error != 0)
1311                 goto out;
1312
1313         error = shm_copyin_path(td, uap->path_to, &path_to);
1314         if (error != 0)
1315                 goto out;
1316
1317         AUDIT_ARG_UPATH1_CANON(path_from);
1318         AUDIT_ARG_UPATH2_CANON(path_to);
1319
1320         /* Rename with from/to equal is a no-op */
1321         if (strcmp(path_from, path_to) == 0)
1322                 goto out;
1323
1324         fnv_from = fnv_32_str(path_from, FNV1_32_INIT);
1325         fnv_to = fnv_32_str(path_to, FNV1_32_INIT);
1326
1327         sx_xlock(&shm_dict_lock);
1328
1329         fd_from = shm_lookup(path_from, fnv_from);
1330         if (fd_from == NULL) {
1331                 error = ENOENT;
1332                 goto out_locked;
1333         }
1334
1335         fd_to = shm_lookup(path_to, fnv_to);
1336         if ((flags & SHM_RENAME_NOREPLACE) != 0 && fd_to != NULL) {
1337                 error = EEXIST;
1338                 goto out_locked;
1339         }
1340
1341         /*
1342          * Unconditionally prevents shm_remove from invalidating the 'from'
1343          * shm's state.
1344          */
1345         shm_hold(fd_from);
1346         error = shm_remove(path_from, fnv_from, td->td_ucred);
1347
1348         /*
1349          * One of my assumptions failed if ENOENT (e.g. locking didn't
1350          * protect us)
1351          */
1352         KASSERT(error != ENOENT, ("Our shm disappeared during shm_rename: %s",
1353             path_from));
1354         if (error != 0) {
1355                 shm_drop(fd_from);
1356                 goto out_locked;
1357         }
1358
1359         /*
1360          * If we are exchanging, we need to ensure the shm_remove below
1361          * doesn't invalidate the dest shm's state.
1362          */
1363         if ((flags & SHM_RENAME_EXCHANGE) != 0 && fd_to != NULL)
1364                 shm_hold(fd_to);
1365
1366         /*
1367          * NOTE: if path_to is not already in the hash, c'est la vie;
1368          * it simply means we have nothing already at path_to to unlink.
1369          * That is the ENOENT case.
1370          *
1371          * If we somehow don't have access to unlink this guy, but
1372          * did for the shm at path_from, then relink the shm to path_from
1373          * and abort with EACCES.
1374          *
1375          * All other errors: that is weird; let's relink and abort the
1376          * operation.
1377          */
1378         error = shm_remove(path_to, fnv_to, td->td_ucred);
1379         if (error != 0 && error != ENOENT) {
1380                 shm_insert(path_from, fnv_from, fd_from);
1381                 shm_drop(fd_from);
1382                 /* Don't free path_from now, since the hash references it */
1383                 path_from = NULL;
1384                 goto out_locked;
1385         }
1386
1387         error = 0;
1388
1389         shm_insert(path_to, fnv_to, fd_from);
1390
1391         /* Don't free path_to now, since the hash references it */
1392         path_to = NULL;
1393
1394         /* We kept a ref when we removed, and incremented again in insert */
1395         shm_drop(fd_from);
1396         KASSERT(fd_from->shm_refs > 0, ("Expected >0 refs; got: %d\n",
1397             fd_from->shm_refs));
1398
1399         if ((flags & SHM_RENAME_EXCHANGE) != 0 && fd_to != NULL) {
1400                 shm_insert(path_from, fnv_from, fd_to);
1401                 path_from = NULL;
1402                 shm_drop(fd_to);
1403                 KASSERT(fd_to->shm_refs > 0, ("Expected >0 refs; got: %d\n",
1404                     fd_to->shm_refs));
1405         }
1406
1407 out_locked:
1408         sx_xunlock(&shm_dict_lock);
1409
1410 out:
1411         free(path_from, M_SHMFD);
1412         free(path_to, M_SHMFD);
1413         return (error);
1414 }
1415
1416 static int
1417 shm_mmap_large(struct shmfd *shmfd, vm_map_t map, vm_offset_t *addr,
1418     vm_size_t size, vm_prot_t prot, vm_prot_t max_prot, int flags,
1419     vm_ooffset_t foff, struct thread *td)
1420 {
1421         struct vmspace *vms;
1422         vm_map_entry_t next_entry, prev_entry;
1423         vm_offset_t align, mask, maxaddr;
1424         int docow, error, rv, try;
1425         bool curmap;
1426
1427         if (shmfd->shm_lp_psind == 0)
1428                 return (EINVAL);
1429
1430         /* MAP_PRIVATE is disabled */
1431         if ((flags & ~(MAP_SHARED | MAP_FIXED | MAP_EXCL |
1432             MAP_NOCORE |
1433 #ifdef MAP_32BIT
1434             MAP_32BIT |
1435 #endif
1436             MAP_ALIGNMENT_MASK)) != 0)
1437                 return (EINVAL);
1438
1439         vms = td->td_proc->p_vmspace;
1440         curmap = map == &vms->vm_map;
1441         if (curmap) {
1442                 error = kern_mmap_racct_check(td, map, size);
1443                 if (error != 0)
1444                         return (error);
1445         }
1446
1447         docow = shmfd->shm_lp_psind << MAP_SPLIT_BOUNDARY_SHIFT;
1448         docow |= MAP_INHERIT_SHARE;
1449         if ((flags & MAP_NOCORE) != 0)
1450                 docow |= MAP_DISABLE_COREDUMP;
1451
1452         mask = pagesizes[shmfd->shm_lp_psind] - 1;
1453         if ((foff & mask) != 0)
1454                 return (EINVAL);
1455         maxaddr = vm_map_max(map);
1456 #ifdef MAP_32BIT
1457         if ((flags & MAP_32BIT) != 0 && maxaddr > MAP_32BIT_MAX_ADDR)
1458                 maxaddr = MAP_32BIT_MAX_ADDR;
1459 #endif
1460         if (size == 0 || (size & mask) != 0 ||
1461             (*addr != 0 && ((*addr & mask) != 0 ||
1462             *addr + size < *addr || *addr + size > maxaddr)))
1463                 return (EINVAL);
1464
1465         align = flags & MAP_ALIGNMENT_MASK;
1466         if (align == 0) {
1467                 align = pagesizes[shmfd->shm_lp_psind];
1468         } else if (align == MAP_ALIGNED_SUPER) {
1469                 if (shmfd->shm_lp_psind != 1)
1470                         return (EINVAL);
1471                 align = pagesizes[1];
1472         } else {
1473                 align >>= MAP_ALIGNMENT_SHIFT;
1474                 align = 1ULL << align;
1475                 /* Also handles overflow. */
1476                 if (align < pagesizes[shmfd->shm_lp_psind])
1477                         return (EINVAL);
1478         }
1479
1480         vm_map_lock(map);
1481         if ((flags & MAP_FIXED) == 0) {
1482                 try = 1;
1483                 if (curmap && (*addr == 0 ||
1484                     (*addr >= round_page((vm_offset_t)vms->vm_taddr) &&
1485                     *addr < round_page((vm_offset_t)vms->vm_daddr +
1486                     lim_max(td, RLIMIT_DATA))))) {
1487                         *addr = roundup2((vm_offset_t)vms->vm_daddr +
1488                             lim_max(td, RLIMIT_DATA),
1489                             pagesizes[shmfd->shm_lp_psind]);
1490                 }
1491 again:
1492                 rv = vm_map_find_aligned(map, addr, size, maxaddr, align);
1493                 if (rv != KERN_SUCCESS) {
1494                         if (try == 1) {
1495                                 try = 2;
1496                                 *addr = vm_map_min(map);
1497                                 if ((*addr & mask) != 0)
1498                                         *addr = (*addr + mask) & mask;
1499                                 goto again;
1500                         }
1501                         goto fail1;
1502                 }
1503         } else if ((flags & MAP_EXCL) == 0) {
1504                 rv = vm_map_delete(map, *addr, *addr + size);
1505                 if (rv != KERN_SUCCESS)
1506                         goto fail1;
1507         } else {
1508                 error = ENOSPC;
1509                 if (vm_map_lookup_entry(map, *addr, &prev_entry))
1510                         goto fail;
1511                 next_entry = vm_map_entry_succ(prev_entry);
1512                 if (next_entry->start < *addr + size)
1513                         goto fail;
1514         }
1515
1516         rv = vm_map_insert(map, shmfd->shm_object, foff, *addr, *addr + size,
1517             prot, max_prot, docow);
1518 fail1:
1519         error = vm_mmap_to_errno(rv);
1520 fail:
1521         vm_map_unlock(map);
1522         return (error);
1523 }
1524
1525 static int
1526 shm_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t objsize,
1527     vm_prot_t prot, vm_prot_t cap_maxprot, int flags,
1528     vm_ooffset_t foff, struct thread *td)
1529 {
1530         struct shmfd *shmfd;
1531         vm_prot_t maxprot;
1532         int error;
1533         bool writecnt;
1534         void *rl_cookie;
1535
1536         shmfd = fp->f_data;
1537         maxprot = VM_PROT_NONE;
1538
1539         rl_cookie = rangelock_rlock(&shmfd->shm_rl, 0, objsize,
1540             &shmfd->shm_mtx);
1541         /* FREAD should always be set. */
1542         if ((fp->f_flag & FREAD) != 0)
1543                 maxprot |= VM_PROT_EXECUTE | VM_PROT_READ;
1544
1545         /*
1546          * If FWRITE's set, we can allow VM_PROT_WRITE unless it's a shared
1547          * mapping with a write seal applied.  Private mappings are always
1548          * writeable.
1549          */
1550         if ((flags & MAP_SHARED) == 0) {
1551                 cap_maxprot |= VM_PROT_WRITE;
1552                 maxprot |= VM_PROT_WRITE;
1553                 writecnt = false;
1554         } else {
1555                 if ((fp->f_flag & FWRITE) != 0 &&
1556                     (shmfd->shm_seals & F_SEAL_WRITE) == 0)
1557                         maxprot |= VM_PROT_WRITE;
1558
1559                 /*
1560                  * Any mappings from a writable descriptor may be upgraded to
1561                  * VM_PROT_WRITE with mprotect(2), unless a write-seal was
1562                  * applied between the open and subsequent mmap(2).  We want to
1563                  * reject application of a write seal as long as any such
1564                  * mapping exists so that the seal cannot be trivially bypassed.
1565                  */
1566                 writecnt = (maxprot & VM_PROT_WRITE) != 0;
1567                 if (!writecnt && (prot & VM_PROT_WRITE) != 0) {
1568                         error = EACCES;
1569                         goto out;
1570                 }
1571         }
1572         maxprot &= cap_maxprot;
1573
1574         /* See comment in vn_mmap(). */
1575         if (
1576 #ifdef _LP64
1577             objsize > OFF_MAX ||
1578 #endif
1579             foff > OFF_MAX - objsize) {
1580                 error = EINVAL;
1581                 goto out;
1582         }
1583
1584 #ifdef MAC
1585         error = mac_posixshm_check_mmap(td->td_ucred, shmfd, prot, flags);
1586         if (error != 0)
1587                 goto out;
1588 #endif
1589
1590         mtx_lock(&shm_timestamp_lock);
1591         vfs_timestamp(&shmfd->shm_atime);
1592         mtx_unlock(&shm_timestamp_lock);
1593         vm_object_reference(shmfd->shm_object);
1594
1595         if (shm_largepage(shmfd)) {
1596                 writecnt = false;
1597                 error = shm_mmap_large(shmfd, map, addr, objsize, prot,
1598                     maxprot, flags, foff, td);
1599         } else {
1600                 if (writecnt) {
1601                         vm_pager_update_writecount(shmfd->shm_object, 0,
1602                             objsize);
1603                 }
1604                 error = vm_mmap_object(map, addr, objsize, prot, maxprot, flags,
1605                     shmfd->shm_object, foff, writecnt, td);
1606         }
1607         if (error != 0) {
1608                 if (writecnt)
1609                         vm_pager_release_writecount(shmfd->shm_object, 0,
1610                             objsize);
1611                 vm_object_deallocate(shmfd->shm_object);
1612         }
1613 out:
1614         rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
1615         return (error);
1616 }
1617
1618 static int
1619 shm_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
1620     struct thread *td)
1621 {
1622         struct shmfd *shmfd;
1623         int error;
1624
1625         error = 0;
1626         shmfd = fp->f_data;
1627         mtx_lock(&shm_timestamp_lock);
1628         /*
1629          * SUSv4 says that x bits of permission need not be affected.
1630          * Be consistent with our shm_open there.
1631          */
1632 #ifdef MAC
1633         error = mac_posixshm_check_setmode(active_cred, shmfd, mode);
1634         if (error != 0)
1635                 goto out;
1636 #endif
1637         error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid,
1638             VADMIN, active_cred);
1639         if (error != 0)
1640                 goto out;
1641         shmfd->shm_mode = mode & ACCESSPERMS;
1642 out:
1643         mtx_unlock(&shm_timestamp_lock);
1644         return (error);
1645 }
1646
1647 static int
1648 shm_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
1649     struct thread *td)
1650 {
1651         struct shmfd *shmfd;
1652         int error;
1653
1654         error = 0;
1655         shmfd = fp->f_data;
1656         mtx_lock(&shm_timestamp_lock);
1657 #ifdef MAC
1658         error = mac_posixshm_check_setowner(active_cred, shmfd, uid, gid);
1659         if (error != 0)
1660                 goto out;
1661 #endif
1662         if (uid == (uid_t)-1)
1663                 uid = shmfd->shm_uid;
1664         if (gid == (gid_t)-1)
1665                  gid = shmfd->shm_gid;
1666         if (((uid != shmfd->shm_uid && uid != active_cred->cr_uid) ||
1667             (gid != shmfd->shm_gid && !groupmember(gid, active_cred))) &&
1668             (error = priv_check_cred(active_cred, PRIV_VFS_CHOWN)))
1669                 goto out;
1670         shmfd->shm_uid = uid;
1671         shmfd->shm_gid = gid;
1672 out:
1673         mtx_unlock(&shm_timestamp_lock);
1674         return (error);
1675 }
1676
1677 /*
1678  * Helper routines to allow the backing object of a shared memory file
1679  * descriptor to be mapped in the kernel.
1680  */
1681 int
1682 shm_map(struct file *fp, size_t size, off_t offset, void **memp)
1683 {
1684         struct shmfd *shmfd;
1685         vm_offset_t kva, ofs;
1686         vm_object_t obj;
1687         int rv;
1688
1689         if (fp->f_type != DTYPE_SHM)
1690                 return (EINVAL);
1691         shmfd = fp->f_data;
1692         obj = shmfd->shm_object;
1693         VM_OBJECT_WLOCK(obj);
1694         /*
1695          * XXXRW: This validation is probably insufficient, and subject to
1696          * sign errors.  It should be fixed.
1697          */
1698         if (offset >= shmfd->shm_size ||
1699             offset + size > round_page(shmfd->shm_size)) {
1700                 VM_OBJECT_WUNLOCK(obj);
1701                 return (EINVAL);
1702         }
1703
1704         shmfd->shm_kmappings++;
1705         vm_object_reference_locked(obj);
1706         VM_OBJECT_WUNLOCK(obj);
1707
1708         /* Map the object into the kernel_map and wire it. */
1709         kva = vm_map_min(kernel_map);
1710         ofs = offset & PAGE_MASK;
1711         offset = trunc_page(offset);
1712         size = round_page(size + ofs);
1713         rv = vm_map_find(kernel_map, obj, offset, &kva, size, 0,
1714             VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE,
1715             VM_PROT_READ | VM_PROT_WRITE, 0);
1716         if (rv == KERN_SUCCESS) {
1717                 rv = vm_map_wire(kernel_map, kva, kva + size,
1718                     VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
1719                 if (rv == KERN_SUCCESS) {
1720                         *memp = (void *)(kva + ofs);
1721                         return (0);
1722                 }
1723                 vm_map_remove(kernel_map, kva, kva + size);
1724         } else
1725                 vm_object_deallocate(obj);
1726
1727         /* On failure, drop our mapping reference. */
1728         VM_OBJECT_WLOCK(obj);
1729         shmfd->shm_kmappings--;
1730         VM_OBJECT_WUNLOCK(obj);
1731
1732         return (vm_mmap_to_errno(rv));
1733 }
1734
1735 /*
1736  * We require the caller to unmap the entire entry.  This allows us to
1737  * safely decrement shm_kmappings when a mapping is removed.
1738  */
1739 int
1740 shm_unmap(struct file *fp, void *mem, size_t size)
1741 {
1742         struct shmfd *shmfd;
1743         vm_map_entry_t entry;
1744         vm_offset_t kva, ofs;
1745         vm_object_t obj;
1746         vm_pindex_t pindex;
1747         vm_prot_t prot;
1748         boolean_t wired;
1749         vm_map_t map;
1750         int rv;
1751
1752         if (fp->f_type != DTYPE_SHM)
1753                 return (EINVAL);
1754         shmfd = fp->f_data;
1755         kva = (vm_offset_t)mem;
1756         ofs = kva & PAGE_MASK;
1757         kva = trunc_page(kva);
1758         size = round_page(size + ofs);
1759         map = kernel_map;
1760         rv = vm_map_lookup(&map, kva, VM_PROT_READ | VM_PROT_WRITE, &entry,
1761             &obj, &pindex, &prot, &wired);
1762         if (rv != KERN_SUCCESS)
1763                 return (EINVAL);
1764         if (entry->start != kva || entry->end != kva + size) {
1765                 vm_map_lookup_done(map, entry);
1766                 return (EINVAL);
1767         }
1768         vm_map_lookup_done(map, entry);
1769         if (obj != shmfd->shm_object)
1770                 return (EINVAL);
1771         vm_map_remove(map, kva, kva + size);
1772         VM_OBJECT_WLOCK(obj);
1773         KASSERT(shmfd->shm_kmappings > 0, ("shm_unmap: object not mapped"));
1774         shmfd->shm_kmappings--;
1775         VM_OBJECT_WUNLOCK(obj);
1776         return (0);
1777 }
1778
1779 static int
1780 shm_fill_kinfo_locked(struct shmfd *shmfd, struct kinfo_file *kif, bool list)
1781 {
1782         const char *path, *pr_path;
1783         size_t pr_pathlen;
1784         bool visible;
1785
1786         sx_assert(&shm_dict_lock, SA_LOCKED);
1787         kif->kf_type = KF_TYPE_SHM;
1788         kif->kf_un.kf_file.kf_file_mode = S_IFREG | shmfd->shm_mode;
1789         kif->kf_un.kf_file.kf_file_size = shmfd->shm_size;
1790         if (shmfd->shm_path != NULL) {
1791                 if (shmfd->shm_path != NULL) {
1792                         path = shmfd->shm_path;
1793                         pr_path = curthread->td_ucred->cr_prison->pr_path;
1794                         if (strcmp(pr_path, "/") != 0) {
1795                                 /* Return the jail-rooted pathname. */
1796                                 pr_pathlen = strlen(pr_path);
1797                                 visible = strncmp(path, pr_path, pr_pathlen)
1798                                     == 0 && path[pr_pathlen] == '/';
1799                                 if (list && !visible)
1800                                         return (EPERM);
1801                                 if (visible)
1802                                         path += pr_pathlen;
1803                         }
1804                         strlcpy(kif->kf_path, path, sizeof(kif->kf_path));
1805                 }
1806         }
1807         return (0);
1808 }
1809
1810 static int
1811 shm_fill_kinfo(struct file *fp, struct kinfo_file *kif,
1812     struct filedesc *fdp __unused)
1813 {
1814         int res;
1815
1816         sx_slock(&shm_dict_lock);
1817         res = shm_fill_kinfo_locked(fp->f_data, kif, false);
1818         sx_sunlock(&shm_dict_lock);
1819         return (res);
1820 }
1821
1822 static int
1823 shm_add_seals(struct file *fp, int seals)
1824 {
1825         struct shmfd *shmfd;
1826         void *rl_cookie;
1827         vm_ooffset_t writemappings;
1828         int error, nseals;
1829
1830         error = 0;
1831         shmfd = fp->f_data;
1832         rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
1833             &shmfd->shm_mtx);
1834
1835         /* Even already-set seals should result in EPERM. */
1836         if ((shmfd->shm_seals & F_SEAL_SEAL) != 0) {
1837                 error = EPERM;
1838                 goto out;
1839         }
1840         nseals = seals & ~shmfd->shm_seals;
1841         if ((nseals & F_SEAL_WRITE) != 0) {
1842                 if (shm_largepage(shmfd)) {
1843                         error = ENOTSUP;
1844                         goto out;
1845                 }
1846
1847                 /*
1848                  * The rangelock above prevents writable mappings from being
1849                  * added after we've started applying seals.  The RLOCK here
1850                  * is to avoid torn reads on ILP32 arches as unmapping/reducing
1851                  * writemappings will be done without a rangelock.
1852                  */
1853                 VM_OBJECT_RLOCK(shmfd->shm_object);
1854                 writemappings = shmfd->shm_object->un_pager.swp.writemappings;
1855                 VM_OBJECT_RUNLOCK(shmfd->shm_object);
1856                 /* kmappings are also writable */
1857                 if (writemappings > 0) {
1858                         error = EBUSY;
1859                         goto out;
1860                 }
1861         }
1862         shmfd->shm_seals |= nseals;
1863 out:
1864         rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
1865         return (error);
1866 }
1867
1868 static int
1869 shm_get_seals(struct file *fp, int *seals)
1870 {
1871         struct shmfd *shmfd;
1872
1873         shmfd = fp->f_data;
1874         *seals = shmfd->shm_seals;
1875         return (0);
1876 }
1877
1878 static int
1879 shm_fallocate(struct file *fp, off_t offset, off_t len, struct thread *td)
1880 {
1881         void *rl_cookie;
1882         struct shmfd *shmfd;
1883         size_t size;
1884         int error;
1885
1886         /* This assumes that the caller already checked for overflow. */
1887         error = 0;
1888         shmfd = fp->f_data;
1889         size = offset + len;
1890
1891         /*
1892          * Just grab the rangelock for the range that we may be attempting to
1893          * grow, rather than blocking read/write for regions we won't be
1894          * touching while this (potential) resize is in progress.  Other
1895          * attempts to resize the shmfd will have to take a write lock from 0 to
1896          * OFF_MAX, so this being potentially beyond the current usable range of
1897          * the shmfd is not necessarily a concern.  If other mechanisms are
1898          * added to grow a shmfd, this may need to be re-evaluated.
1899          */
1900         rl_cookie = rangelock_wlock(&shmfd->shm_rl, offset, size,
1901             &shmfd->shm_mtx);
1902         if (size > shmfd->shm_size)
1903                 error = shm_dotruncate_cookie(shmfd, size, rl_cookie);
1904         rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
1905         /* Translate to posix_fallocate(2) return value as needed. */
1906         if (error == ENOMEM)
1907                 error = ENOSPC;
1908         return (error);
1909 }
1910
1911 static int
1912 sysctl_posix_shm_list(SYSCTL_HANDLER_ARGS)
1913 {
1914         struct shm_mapping *shmm;
1915         struct sbuf sb;
1916         struct kinfo_file kif;
1917         u_long i;
1918         ssize_t curlen;
1919         int error, error2;
1920
1921         sbuf_new_for_sysctl(&sb, NULL, sizeof(struct kinfo_file) * 5, req);
1922         sbuf_clear_flags(&sb, SBUF_INCLUDENUL);
1923         curlen = 0;
1924         error = 0;
1925         sx_slock(&shm_dict_lock);
1926         for (i = 0; i < shm_hash + 1; i++) {
1927                 LIST_FOREACH(shmm, &shm_dictionary[i], sm_link) {
1928                         error = shm_fill_kinfo_locked(shmm->sm_shmfd,
1929                             &kif, true);
1930                         if (error == EPERM) {
1931                                 error = 0;
1932                                 continue;
1933                         }
1934                         if (error != 0)
1935                                 break;
1936                         pack_kinfo(&kif);
1937                         error = sbuf_bcat(&sb, &kif, kif.kf_structsize) == 0 ?
1938                             0 : ENOMEM;
1939                         if (error != 0)
1940                                 break;
1941                         curlen += kif.kf_structsize;
1942                 }
1943         }
1944         sx_sunlock(&shm_dict_lock);
1945         error2 = sbuf_finish(&sb);
1946         sbuf_delete(&sb);
1947         return (error != 0 ? error : error2);
1948 }
1949
1950 SYSCTL_PROC(_kern_ipc, OID_AUTO, posix_shm_list,
1951     CTLFLAG_RD | CTLFLAG_PRISON | CTLFLAG_MPSAFE | CTLTYPE_OPAQUE,
1952     NULL, 0, sysctl_posix_shm_list, "",
1953     "POSIX SHM list");
1954
1955 int
1956 kern_shm_open(struct thread *td, const char *path, int flags, mode_t mode,
1957     struct filecaps *caps)
1958 {
1959
1960         return (kern_shm_open2(td, path, flags, mode, 0, caps, NULL));
1961 }
1962
1963 /*
1964  * This version of the shm_open() interface leaves CLOEXEC behavior up to the
1965  * caller, and libc will enforce it for the traditional shm_open() call.  This
1966  * allows other consumers, like memfd_create(), to opt-in for CLOEXEC.  This
1967  * interface also includes a 'name' argument that is currently unused, but could
1968  * potentially be exported later via some interface for debugging purposes.
1969  * From the kernel's perspective, it is optional.  Individual consumers like
1970  * memfd_create() may require it in order to be compatible with other systems
1971  * implementing the same function.
1972  */
1973 int
1974 sys_shm_open2(struct thread *td, struct shm_open2_args *uap)
1975 {
1976
1977         return (kern_shm_open2(td, uap->path, uap->flags, uap->mode,
1978             uap->shmflags, NULL, uap->name));
1979 }