]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/kern/uipc_shm.c
Enable subr_physmem_test on supported architectures
[FreeBSD/FreeBSD.git] / sys / kern / uipc_shm.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2006, 2011, 2016-2017 Robert N. M. Watson
5  * Copyright 2020 The FreeBSD Foundation
6  * All rights reserved.
7  *
8  * Portions of this software were developed by BAE Systems, the University of
9  * Cambridge Computer Laboratory, and Memorial University under DARPA/AFRL
10  * contract FA8650-15-C-7558 ("CADETS"), as part of the DARPA Transparent
11  * Computing (TC) research program.
12  *
13  * Portions of this software were developed by Konstantin Belousov
14  * under sponsorship from the FreeBSD Foundation.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37
38 /*
39  * Support for shared swap-backed anonymous memory objects via
40  * shm_open(2), shm_rename(2), and shm_unlink(2).
41  * While most of the implementation is here, vm_mmap.c contains
42  * mapping logic changes.
43  *
44  * posixshmcontrol(1) allows users to inspect the state of the memory
45  * objects.  Per-uid swap resource limit controls total amount of
46  * memory that user can consume for anonymous objects, including
47  * shared.
48  */
49
50 #include <sys/cdefs.h>
51 __FBSDID("$FreeBSD$");
52
53 #include "opt_capsicum.h"
54 #include "opt_ktrace.h"
55
56 #include <sys/param.h>
57 #include <sys/capsicum.h>
58 #include <sys/conf.h>
59 #include <sys/fcntl.h>
60 #include <sys/file.h>
61 #include <sys/filedesc.h>
62 #include <sys/filio.h>
63 #include <sys/fnv_hash.h>
64 #include <sys/kernel.h>
65 #include <sys/limits.h>
66 #include <sys/uio.h>
67 #include <sys/signal.h>
68 #include <sys/jail.h>
69 #include <sys/ktrace.h>
70 #include <sys/lock.h>
71 #include <sys/malloc.h>
72 #include <sys/mman.h>
73 #include <sys/mutex.h>
74 #include <sys/priv.h>
75 #include <sys/proc.h>
76 #include <sys/refcount.h>
77 #include <sys/resourcevar.h>
78 #include <sys/rwlock.h>
79 #include <sys/sbuf.h>
80 #include <sys/stat.h>
81 #include <sys/syscallsubr.h>
82 #include <sys/sysctl.h>
83 #include <sys/sysproto.h>
84 #include <sys/systm.h>
85 #include <sys/sx.h>
86 #include <sys/time.h>
87 #include <sys/vmmeter.h>
88 #include <sys/vnode.h>
89 #include <sys/unistd.h>
90 #include <sys/user.h>
91
92 #include <security/audit/audit.h>
93 #include <security/mac/mac_framework.h>
94
95 #include <vm/vm.h>
96 #include <vm/vm_param.h>
97 #include <vm/pmap.h>
98 #include <vm/vm_extern.h>
99 #include <vm/vm_map.h>
100 #include <vm/vm_kern.h>
101 #include <vm/vm_object.h>
102 #include <vm/vm_page.h>
103 #include <vm/vm_pageout.h>
104 #include <vm/vm_pager.h>
105 #include <vm/swap_pager.h>
106
107 struct shm_mapping {
108         char            *sm_path;
109         Fnv32_t         sm_fnv;
110         struct shmfd    *sm_shmfd;
111         LIST_ENTRY(shm_mapping) sm_link;
112 };
113
114 static MALLOC_DEFINE(M_SHMFD, "shmfd", "shared memory file descriptor");
115 static LIST_HEAD(, shm_mapping) *shm_dictionary;
116 static struct sx shm_dict_lock;
117 static struct mtx shm_timestamp_lock;
118 static u_long shm_hash;
119 static struct unrhdr64 shm_ino_unr;
120 static dev_t shm_dev_ino;
121
122 #define SHM_HASH(fnv)   (&shm_dictionary[(fnv) & shm_hash])
123
124 static void     shm_init(void *arg);
125 static void     shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd);
126 static struct shmfd *shm_lookup(char *path, Fnv32_t fnv);
127 static int      shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred);
128 static int      shm_dotruncate_cookie(struct shmfd *shmfd, off_t length,
129     void *rl_cookie);
130 static int      shm_dotruncate_locked(struct shmfd *shmfd, off_t length,
131     void *rl_cookie);
132 static int      shm_copyin_path(struct thread *td, const char *userpath_in,
133     char **path_out);
134 static int      shm_deallocate(struct shmfd *shmfd, off_t *offset,
135     off_t *length, int flags);
136
137 static fo_rdwr_t        shm_read;
138 static fo_rdwr_t        shm_write;
139 static fo_truncate_t    shm_truncate;
140 static fo_ioctl_t       shm_ioctl;
141 static fo_stat_t        shm_stat;
142 static fo_close_t       shm_close;
143 static fo_chmod_t       shm_chmod;
144 static fo_chown_t       shm_chown;
145 static fo_seek_t        shm_seek;
146 static fo_fill_kinfo_t  shm_fill_kinfo;
147 static fo_mmap_t        shm_mmap;
148 static fo_get_seals_t   shm_get_seals;
149 static fo_add_seals_t   shm_add_seals;
150 static fo_fallocate_t   shm_fallocate;
151 static fo_fspacectl_t   shm_fspacectl;
152
153 /* File descriptor operations. */
154 struct fileops shm_ops = {
155         .fo_read = shm_read,
156         .fo_write = shm_write,
157         .fo_truncate = shm_truncate,
158         .fo_ioctl = shm_ioctl,
159         .fo_poll = invfo_poll,
160         .fo_kqfilter = invfo_kqfilter,
161         .fo_stat = shm_stat,
162         .fo_close = shm_close,
163         .fo_chmod = shm_chmod,
164         .fo_chown = shm_chown,
165         .fo_sendfile = vn_sendfile,
166         .fo_seek = shm_seek,
167         .fo_fill_kinfo = shm_fill_kinfo,
168         .fo_mmap = shm_mmap,
169         .fo_get_seals = shm_get_seals,
170         .fo_add_seals = shm_add_seals,
171         .fo_fallocate = shm_fallocate,
172         .fo_fspacectl = shm_fspacectl,
173         .fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE,
174 };
175
176 FEATURE(posix_shm, "POSIX shared memory");
177
178 static SYSCTL_NODE(_vm, OID_AUTO, largepages, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
179     "");
180
181 static int largepage_reclaim_tries = 1;
182 SYSCTL_INT(_vm_largepages, OID_AUTO, reclaim_tries,
183     CTLFLAG_RWTUN, &largepage_reclaim_tries, 0,
184     "Number of contig reclaims before giving up for default alloc policy");
185
186 static int
187 uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio)
188 {
189         vm_page_t m;
190         vm_pindex_t idx;
191         size_t tlen;
192         int error, offset, rv;
193
194         idx = OFF_TO_IDX(uio->uio_offset);
195         offset = uio->uio_offset & PAGE_MASK;
196         tlen = MIN(PAGE_SIZE - offset, len);
197
198         rv = vm_page_grab_valid_unlocked(&m, obj, idx,
199             VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY | VM_ALLOC_NOCREAT);
200         if (rv == VM_PAGER_OK)
201                 goto found;
202
203         /*
204          * Read I/O without either a corresponding resident page or swap
205          * page: use zero_region.  This is intended to avoid instantiating
206          * pages on read from a sparse region.
207          */
208         VM_OBJECT_WLOCK(obj);
209         m = vm_page_lookup(obj, idx);
210         if (uio->uio_rw == UIO_READ && m == NULL &&
211             !vm_pager_has_page(obj, idx, NULL, NULL)) {
212                 VM_OBJECT_WUNLOCK(obj);
213                 return (uiomove(__DECONST(void *, zero_region), tlen, uio));
214         }
215
216         /*
217          * Although the tmpfs vnode lock is held here, it is
218          * nonetheless safe to sleep waiting for a free page.  The
219          * pageout daemon does not need to acquire the tmpfs vnode
220          * lock to page out tobj's pages because tobj is a OBJT_SWAP
221          * type object.
222          */
223         rv = vm_page_grab_valid(&m, obj, idx,
224             VM_ALLOC_NORMAL | VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY);
225         if (rv != VM_PAGER_OK) {
226                 VM_OBJECT_WUNLOCK(obj);
227                 printf("uiomove_object: vm_obj %p idx %jd pager error %d\n",
228                     obj, idx, rv);
229                 return (EIO);
230         }
231         VM_OBJECT_WUNLOCK(obj);
232
233 found:
234         error = uiomove_fromphys(&m, offset, tlen, uio);
235         if (uio->uio_rw == UIO_WRITE && error == 0)
236                 vm_page_set_dirty(m);
237         vm_page_activate(m);
238         vm_page_sunbusy(m);
239
240         return (error);
241 }
242
243 int
244 uiomove_object(vm_object_t obj, off_t obj_size, struct uio *uio)
245 {
246         ssize_t resid;
247         size_t len;
248         int error;
249
250         error = 0;
251         while ((resid = uio->uio_resid) > 0) {
252                 if (obj_size <= uio->uio_offset)
253                         break;
254                 len = MIN(obj_size - uio->uio_offset, resid);
255                 if (len == 0)
256                         break;
257                 error = uiomove_object_page(obj, len, uio);
258                 if (error != 0 || resid == uio->uio_resid)
259                         break;
260         }
261         return (error);
262 }
263
264 static u_long count_largepages[MAXPAGESIZES];
265
266 static int
267 shm_largepage_phys_populate(vm_object_t object, vm_pindex_t pidx,
268     int fault_type, vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last)
269 {
270         vm_page_t m __diagused;
271         int psind;
272
273         psind = object->un_pager.phys.data_val;
274         if (psind == 0 || pidx >= object->size)
275                 return (VM_PAGER_FAIL);
276         *first = rounddown2(pidx, pagesizes[psind] / PAGE_SIZE);
277
278         /*
279          * We only busy the first page in the superpage run.  It is
280          * useless to busy whole run since we only remove full
281          * superpage, and it takes too long to busy e.g. 512 * 512 ==
282          * 262144 pages constituing 1G amd64 superage.
283          */
284         m = vm_page_grab(object, *first, VM_ALLOC_NORMAL | VM_ALLOC_NOCREAT);
285         MPASS(m != NULL);
286
287         *last = *first + atop(pagesizes[psind]) - 1;
288         return (VM_PAGER_OK);
289 }
290
291 static boolean_t
292 shm_largepage_phys_haspage(vm_object_t object, vm_pindex_t pindex,
293     int *before, int *after)
294 {
295         int psind;
296
297         psind = object->un_pager.phys.data_val;
298         if (psind == 0 || pindex >= object->size)
299                 return (FALSE);
300         if (before != NULL) {
301                 *before = pindex - rounddown2(pindex, pagesizes[psind] /
302                     PAGE_SIZE);
303         }
304         if (after != NULL) {
305                 *after = roundup2(pindex, pagesizes[psind] / PAGE_SIZE) -
306                     pindex;
307         }
308         return (TRUE);
309 }
310
311 static void
312 shm_largepage_phys_ctor(vm_object_t object, vm_prot_t prot,
313     vm_ooffset_t foff, struct ucred *cred)
314 {
315 }
316
317 static void
318 shm_largepage_phys_dtor(vm_object_t object)
319 {
320         int psind;
321
322         psind = object->un_pager.phys.data_val;
323         if (psind != 0) {
324                 atomic_subtract_long(&count_largepages[psind],
325                     object->size / (pagesizes[psind] / PAGE_SIZE));
326                 vm_wire_sub(object->size);
327         } else {
328                 KASSERT(object->size == 0,
329                     ("largepage phys obj %p not initialized bit size %#jx > 0",
330                     object, (uintmax_t)object->size));
331         }
332 }
333
334 static const struct phys_pager_ops shm_largepage_phys_ops = {
335         .phys_pg_populate =     shm_largepage_phys_populate,
336         .phys_pg_haspage =      shm_largepage_phys_haspage,
337         .phys_pg_ctor =         shm_largepage_phys_ctor,
338         .phys_pg_dtor =         shm_largepage_phys_dtor,
339 };
340
341 bool
342 shm_largepage(struct shmfd *shmfd)
343 {
344         return (shmfd->shm_object->type == OBJT_PHYS);
345 }
346
347 static int
348 shm_seek(struct file *fp, off_t offset, int whence, struct thread *td)
349 {
350         struct shmfd *shmfd;
351         off_t foffset;
352         int error;
353
354         shmfd = fp->f_data;
355         foffset = foffset_lock(fp, 0);
356         error = 0;
357         switch (whence) {
358         case L_INCR:
359                 if (foffset < 0 ||
360                     (offset > 0 && foffset > OFF_MAX - offset)) {
361                         error = EOVERFLOW;
362                         break;
363                 }
364                 offset += foffset;
365                 break;
366         case L_XTND:
367                 if (offset > 0 && shmfd->shm_size > OFF_MAX - offset) {
368                         error = EOVERFLOW;
369                         break;
370                 }
371                 offset += shmfd->shm_size;
372                 break;
373         case L_SET:
374                 break;
375         default:
376                 error = EINVAL;
377         }
378         if (error == 0) {
379                 if (offset < 0 || offset > shmfd->shm_size)
380                         error = EINVAL;
381                 else
382                         td->td_uretoff.tdu_off = offset;
383         }
384         foffset_unlock(fp, offset, error != 0 ? FOF_NOUPDATE : 0);
385         return (error);
386 }
387
388 static int
389 shm_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
390     int flags, struct thread *td)
391 {
392         struct shmfd *shmfd;
393         void *rl_cookie;
394         int error;
395
396         shmfd = fp->f_data;
397 #ifdef MAC
398         error = mac_posixshm_check_read(active_cred, fp->f_cred, shmfd);
399         if (error)
400                 return (error);
401 #endif
402         foffset_lock_uio(fp, uio, flags);
403         rl_cookie = rangelock_rlock(&shmfd->shm_rl, uio->uio_offset,
404             uio->uio_offset + uio->uio_resid, &shmfd->shm_mtx);
405         error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio);
406         rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
407         foffset_unlock_uio(fp, uio, flags);
408         return (error);
409 }
410
411 static int
412 shm_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
413     int flags, struct thread *td)
414 {
415         struct shmfd *shmfd;
416         void *rl_cookie;
417         int error;
418         off_t size;
419
420         shmfd = fp->f_data;
421 #ifdef MAC
422         error = mac_posixshm_check_write(active_cred, fp->f_cred, shmfd);
423         if (error)
424                 return (error);
425 #endif
426         if (shm_largepage(shmfd) && shmfd->shm_lp_psind == 0)
427                 return (EINVAL);
428         foffset_lock_uio(fp, uio, flags);
429         if (uio->uio_resid > OFF_MAX - uio->uio_offset) {
430                 /*
431                  * Overflow is only an error if we're supposed to expand on
432                  * write.  Otherwise, we'll just truncate the write to the
433                  * size of the file, which can only grow up to OFF_MAX.
434                  */
435                 if ((shmfd->shm_flags & SHM_GROW_ON_WRITE) != 0) {
436                         foffset_unlock_uio(fp, uio, flags);
437                         return (EFBIG);
438                 }
439
440                 size = shmfd->shm_size;
441         } else {
442                 size = uio->uio_offset + uio->uio_resid;
443         }
444         if ((flags & FOF_OFFSET) == 0) {
445                 rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
446                     &shmfd->shm_mtx);
447         } else {
448                 rl_cookie = rangelock_wlock(&shmfd->shm_rl, uio->uio_offset,
449                     size, &shmfd->shm_mtx);
450         }
451         if ((shmfd->shm_seals & F_SEAL_WRITE) != 0) {
452                 error = EPERM;
453         } else {
454                 error = 0;
455                 if ((shmfd->shm_flags & SHM_GROW_ON_WRITE) != 0 &&
456                     size > shmfd->shm_size) {
457                         error = shm_dotruncate_cookie(shmfd, size, rl_cookie);
458                 }
459                 if (error == 0)
460                         error = uiomove_object(shmfd->shm_object,
461                             shmfd->shm_size, uio);
462         }
463         rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
464         foffset_unlock_uio(fp, uio, flags);
465         return (error);
466 }
467
468 static int
469 shm_truncate(struct file *fp, off_t length, struct ucred *active_cred,
470     struct thread *td)
471 {
472         struct shmfd *shmfd;
473 #ifdef MAC
474         int error;
475 #endif
476
477         shmfd = fp->f_data;
478 #ifdef MAC
479         error = mac_posixshm_check_truncate(active_cred, fp->f_cred, shmfd);
480         if (error)
481                 return (error);
482 #endif
483         return (shm_dotruncate(shmfd, length));
484 }
485
486 int
487 shm_ioctl(struct file *fp, u_long com, void *data, struct ucred *active_cred,
488     struct thread *td)
489 {
490         struct shmfd *shmfd;
491         struct shm_largepage_conf *conf;
492         void *rl_cookie;
493
494         shmfd = fp->f_data;
495         switch (com) {
496         case FIONBIO:
497         case FIOASYNC:
498                 /*
499                  * Allow fcntl(fd, F_SETFL, O_NONBLOCK) to work,
500                  * just like it would on an unlinked regular file
501                  */
502                 return (0);
503         case FIOSSHMLPGCNF:
504                 if (!shm_largepage(shmfd))
505                         return (ENOTTY);
506                 conf = data;
507                 if (shmfd->shm_lp_psind != 0 &&
508                     conf->psind != shmfd->shm_lp_psind)
509                         return (EINVAL);
510                 if (conf->psind <= 0 || conf->psind >= MAXPAGESIZES ||
511                     pagesizes[conf->psind] == 0)
512                         return (EINVAL);
513                 if (conf->alloc_policy != SHM_LARGEPAGE_ALLOC_DEFAULT &&
514                     conf->alloc_policy != SHM_LARGEPAGE_ALLOC_NOWAIT &&
515                     conf->alloc_policy != SHM_LARGEPAGE_ALLOC_HARD)
516                         return (EINVAL);
517
518                 rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
519                     &shmfd->shm_mtx);
520                 shmfd->shm_lp_psind = conf->psind;
521                 shmfd->shm_lp_alloc_policy = conf->alloc_policy;
522                 shmfd->shm_object->un_pager.phys.data_val = conf->psind;
523                 rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
524                 return (0);
525         case FIOGSHMLPGCNF:
526                 if (!shm_largepage(shmfd))
527                         return (ENOTTY);
528                 conf = data;
529                 rl_cookie = rangelock_rlock(&shmfd->shm_rl, 0, OFF_MAX,
530                     &shmfd->shm_mtx);
531                 conf->psind = shmfd->shm_lp_psind;
532                 conf->alloc_policy = shmfd->shm_lp_alloc_policy;
533                 rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
534                 return (0);
535         default:
536                 return (ENOTTY);
537         }
538 }
539
540 static int
541 shm_stat(struct file *fp, struct stat *sb, struct ucred *active_cred)
542 {
543         struct shmfd *shmfd;
544 #ifdef MAC
545         int error;
546 #endif
547
548         shmfd = fp->f_data;
549
550 #ifdef MAC
551         error = mac_posixshm_check_stat(active_cred, fp->f_cred, shmfd);
552         if (error)
553                 return (error);
554 #endif
555
556         /*
557          * Attempt to return sanish values for fstat() on a memory file
558          * descriptor.
559          */
560         bzero(sb, sizeof(*sb));
561         sb->st_blksize = PAGE_SIZE;
562         sb->st_size = shmfd->shm_size;
563         sb->st_blocks = howmany(sb->st_size, sb->st_blksize);
564         mtx_lock(&shm_timestamp_lock);
565         sb->st_atim = shmfd->shm_atime;
566         sb->st_ctim = shmfd->shm_ctime;
567         sb->st_mtim = shmfd->shm_mtime;
568         sb->st_birthtim = shmfd->shm_birthtime;
569         sb->st_mode = S_IFREG | shmfd->shm_mode;                /* XXX */
570         sb->st_uid = shmfd->shm_uid;
571         sb->st_gid = shmfd->shm_gid;
572         mtx_unlock(&shm_timestamp_lock);
573         sb->st_dev = shm_dev_ino;
574         sb->st_ino = shmfd->shm_ino;
575         sb->st_nlink = shmfd->shm_object->ref_count;
576         sb->st_blocks = shmfd->shm_object->size /
577             (pagesizes[shmfd->shm_lp_psind] >> PAGE_SHIFT);
578
579         return (0);
580 }
581
582 static int
583 shm_close(struct file *fp, struct thread *td)
584 {
585         struct shmfd *shmfd;
586
587         shmfd = fp->f_data;
588         fp->f_data = NULL;
589         shm_drop(shmfd);
590
591         return (0);
592 }
593
594 static int
595 shm_copyin_path(struct thread *td, const char *userpath_in, char **path_out) {
596         int error;
597         char *path;
598         const char *pr_path;
599         size_t pr_pathlen;
600
601         path = malloc(MAXPATHLEN, M_SHMFD, M_WAITOK);
602         pr_path = td->td_ucred->cr_prison->pr_path;
603
604         /* Construct a full pathname for jailed callers. */
605         pr_pathlen = strcmp(pr_path, "/") ==
606             0 ? 0 : strlcpy(path, pr_path, MAXPATHLEN);
607         error = copyinstr(userpath_in, path + pr_pathlen,
608             MAXPATHLEN - pr_pathlen, NULL);
609         if (error != 0)
610                 goto out;
611
612 #ifdef KTRACE
613         if (KTRPOINT(curthread, KTR_NAMEI))
614                 ktrnamei(path);
615 #endif
616
617         /* Require paths to start with a '/' character. */
618         if (path[pr_pathlen] != '/') {
619                 error = EINVAL;
620                 goto out;
621         }
622
623         *path_out = path;
624
625 out:
626         if (error != 0)
627                 free(path, M_SHMFD);
628
629         return (error);
630 }
631
632 static int
633 shm_partial_page_invalidate(vm_object_t object, vm_pindex_t idx, int base,
634     int end)
635 {
636         vm_page_t m;
637         int rv;
638
639         VM_OBJECT_ASSERT_WLOCKED(object);
640         KASSERT(base >= 0, ("%s: base %d", __func__, base));
641         KASSERT(end - base <= PAGE_SIZE, ("%s: base %d end %d", __func__, base,
642             end));
643
644 retry:
645         m = vm_page_grab(object, idx, VM_ALLOC_NOCREAT);
646         if (m != NULL) {
647                 MPASS(vm_page_all_valid(m));
648         } else if (vm_pager_has_page(object, idx, NULL, NULL)) {
649                 m = vm_page_alloc(object, idx,
650                     VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL);
651                 if (m == NULL)
652                         goto retry;
653                 vm_object_pip_add(object, 1);
654                 VM_OBJECT_WUNLOCK(object);
655                 rv = vm_pager_get_pages(object, &m, 1, NULL, NULL);
656                 VM_OBJECT_WLOCK(object);
657                 vm_object_pip_wakeup(object);
658                 if (rv == VM_PAGER_OK) {
659                         /*
660                          * Since the page was not resident, and therefore not
661                          * recently accessed, immediately enqueue it for
662                          * asynchronous laundering.  The current operation is
663                          * not regarded as an access.
664                          */
665                         vm_page_launder(m);
666                 } else {
667                         vm_page_free(m);
668                         VM_OBJECT_WUNLOCK(object);
669                         return (EIO);
670                 }
671         }
672         if (m != NULL) {
673                 pmap_zero_page_area(m, base, end - base);
674                 KASSERT(vm_page_all_valid(m), ("%s: page %p is invalid",
675                     __func__, m));
676                 vm_page_set_dirty(m);
677                 vm_page_xunbusy(m);
678         }
679
680         return (0);
681 }
682
683 static int
684 shm_dotruncate_locked(struct shmfd *shmfd, off_t length, void *rl_cookie)
685 {
686         vm_object_t object;
687         vm_pindex_t nobjsize;
688         vm_ooffset_t delta;
689         int base, error;
690
691         KASSERT(length >= 0, ("shm_dotruncate: length < 0"));
692         object = shmfd->shm_object;
693         VM_OBJECT_ASSERT_WLOCKED(object);
694         rangelock_cookie_assert(rl_cookie, RA_WLOCKED);
695         if (length == shmfd->shm_size)
696                 return (0);
697         nobjsize = OFF_TO_IDX(length + PAGE_MASK);
698
699         /* Are we shrinking?  If so, trim the end. */
700         if (length < shmfd->shm_size) {
701                 if ((shmfd->shm_seals & F_SEAL_SHRINK) != 0)
702                         return (EPERM);
703
704                 /*
705                  * Disallow any requests to shrink the size if this
706                  * object is mapped into the kernel.
707                  */
708                 if (shmfd->shm_kmappings > 0)
709                         return (EBUSY);
710
711                 /*
712                  * Zero the truncated part of the last page.
713                  */
714                 base = length & PAGE_MASK;
715                 if (base != 0) {
716                         error = shm_partial_page_invalidate(object,
717                             OFF_TO_IDX(length), base, PAGE_SIZE);
718                         if (error)
719                                 return (error);
720                 }
721                 delta = IDX_TO_OFF(object->size - nobjsize);
722
723                 if (nobjsize < object->size)
724                         vm_object_page_remove(object, nobjsize, object->size,
725                             0);
726
727                 /* Free the swap accounted for shm */
728                 swap_release_by_cred(delta, object->cred);
729                 object->charge -= delta;
730         } else {
731                 if ((shmfd->shm_seals & F_SEAL_GROW) != 0)
732                         return (EPERM);
733
734                 /* Try to reserve additional swap space. */
735                 delta = IDX_TO_OFF(nobjsize - object->size);
736                 if (!swap_reserve_by_cred(delta, object->cred))
737                         return (ENOMEM);
738                 object->charge += delta;
739         }
740         shmfd->shm_size = length;
741         mtx_lock(&shm_timestamp_lock);
742         vfs_timestamp(&shmfd->shm_ctime);
743         shmfd->shm_mtime = shmfd->shm_ctime;
744         mtx_unlock(&shm_timestamp_lock);
745         object->size = nobjsize;
746         return (0);
747 }
748
749 static int
750 shm_dotruncate_largepage(struct shmfd *shmfd, off_t length, void *rl_cookie)
751 {
752         vm_object_t object;
753         vm_page_t m;
754         vm_pindex_t newobjsz;
755         vm_pindex_t oldobjsz __unused;
756         int aflags, error, i, psind, try;
757
758         KASSERT(length >= 0, ("shm_dotruncate: length < 0"));
759         object = shmfd->shm_object;
760         VM_OBJECT_ASSERT_WLOCKED(object);
761         rangelock_cookie_assert(rl_cookie, RA_WLOCKED);
762
763         oldobjsz = object->size;
764         newobjsz = OFF_TO_IDX(length);
765         if (length == shmfd->shm_size)
766                 return (0);
767         psind = shmfd->shm_lp_psind;
768         if (psind == 0 && length != 0)
769                 return (EINVAL);
770         if ((length & (pagesizes[psind] - 1)) != 0)
771                 return (EINVAL);
772
773         if (length < shmfd->shm_size) {
774                 if ((shmfd->shm_seals & F_SEAL_SHRINK) != 0)
775                         return (EPERM);
776                 if (shmfd->shm_kmappings > 0)
777                         return (EBUSY);
778                 return (ENOTSUP);       /* Pages are unmanaged. */
779 #if 0
780                 vm_object_page_remove(object, newobjsz, oldobjsz, 0);
781                 object->size = newobjsz;
782                 shmfd->shm_size = length;
783                 return (0);
784 #endif
785         }
786
787         if ((shmfd->shm_seals & F_SEAL_GROW) != 0)
788                 return (EPERM);
789
790         aflags = VM_ALLOC_NORMAL | VM_ALLOC_ZERO;
791         if (shmfd->shm_lp_alloc_policy == SHM_LARGEPAGE_ALLOC_NOWAIT)
792                 aflags |= VM_ALLOC_WAITFAIL;
793         try = 0;
794
795         /*
796          * Extend shmfd and object, keeping all already fully
797          * allocated large pages intact even on error, because dropped
798          * object lock might allowed mapping of them.
799          */
800         while (object->size < newobjsz) {
801                 m = vm_page_alloc_contig(object, object->size, aflags,
802                     pagesizes[psind] / PAGE_SIZE, 0, ~0,
803                     pagesizes[psind], 0,
804                     VM_MEMATTR_DEFAULT);
805                 if (m == NULL) {
806                         VM_OBJECT_WUNLOCK(object);
807                         if (shmfd->shm_lp_alloc_policy ==
808                             SHM_LARGEPAGE_ALLOC_NOWAIT ||
809                             (shmfd->shm_lp_alloc_policy ==
810                             SHM_LARGEPAGE_ALLOC_DEFAULT &&
811                             try >= largepage_reclaim_tries)) {
812                                 VM_OBJECT_WLOCK(object);
813                                 return (ENOMEM);
814                         }
815                         error = vm_page_reclaim_contig(aflags,
816                             pagesizes[psind] / PAGE_SIZE, 0, ~0,
817                             pagesizes[psind], 0) ? 0 :
818                             vm_wait_intr(object);
819                         if (error != 0) {
820                                 VM_OBJECT_WLOCK(object);
821                                 return (error);
822                         }
823                         try++;
824                         VM_OBJECT_WLOCK(object);
825                         continue;
826                 }
827                 try = 0;
828                 for (i = 0; i < pagesizes[psind] / PAGE_SIZE; i++) {
829                         if ((m[i].flags & PG_ZERO) == 0)
830                                 pmap_zero_page(&m[i]);
831                         vm_page_valid(&m[i]);
832                         vm_page_xunbusy(&m[i]);
833                 }
834                 object->size += OFF_TO_IDX(pagesizes[psind]);
835                 shmfd->shm_size += pagesizes[psind];
836                 atomic_add_long(&count_largepages[psind], 1);
837                 vm_wire_add(atop(pagesizes[psind]));
838         }
839         return (0);
840 }
841
842 static int
843 shm_dotruncate_cookie(struct shmfd *shmfd, off_t length, void *rl_cookie)
844 {
845         int error;
846
847         VM_OBJECT_WLOCK(shmfd->shm_object);
848         error = shm_largepage(shmfd) ? shm_dotruncate_largepage(shmfd,
849             length, rl_cookie) : shm_dotruncate_locked(shmfd, length,
850             rl_cookie);
851         VM_OBJECT_WUNLOCK(shmfd->shm_object);
852         return (error);
853 }
854
855 int
856 shm_dotruncate(struct shmfd *shmfd, off_t length)
857 {
858         void *rl_cookie;
859         int error;
860
861         rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
862             &shmfd->shm_mtx);
863         error = shm_dotruncate_cookie(shmfd, length, rl_cookie);
864         rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
865         return (error);
866 }
867
868 /*
869  * shmfd object management including creation and reference counting
870  * routines.
871  */
872 struct shmfd *
873 shm_alloc(struct ucred *ucred, mode_t mode, bool largepage)
874 {
875         struct shmfd *shmfd;
876
877         shmfd = malloc(sizeof(*shmfd), M_SHMFD, M_WAITOK | M_ZERO);
878         shmfd->shm_size = 0;
879         shmfd->shm_uid = ucred->cr_uid;
880         shmfd->shm_gid = ucred->cr_gid;
881         shmfd->shm_mode = mode;
882         if (largepage) {
883                 shmfd->shm_object = phys_pager_allocate(NULL,
884                     &shm_largepage_phys_ops, NULL, shmfd->shm_size,
885                     VM_PROT_DEFAULT, 0, ucred);
886                 shmfd->shm_lp_alloc_policy = SHM_LARGEPAGE_ALLOC_DEFAULT;
887         } else {
888                 shmfd->shm_object = vm_pager_allocate(OBJT_SWAP, NULL,
889                     shmfd->shm_size, VM_PROT_DEFAULT, 0, ucred);
890         }
891         KASSERT(shmfd->shm_object != NULL, ("shm_create: vm_pager_allocate"));
892         vfs_timestamp(&shmfd->shm_birthtime);
893         shmfd->shm_atime = shmfd->shm_mtime = shmfd->shm_ctime =
894             shmfd->shm_birthtime;
895         shmfd->shm_ino = alloc_unr64(&shm_ino_unr);
896         refcount_init(&shmfd->shm_refs, 1);
897         mtx_init(&shmfd->shm_mtx, "shmrl", NULL, MTX_DEF);
898         rangelock_init(&shmfd->shm_rl);
899 #ifdef MAC
900         mac_posixshm_init(shmfd);
901         mac_posixshm_create(ucred, shmfd);
902 #endif
903
904         return (shmfd);
905 }
906
907 struct shmfd *
908 shm_hold(struct shmfd *shmfd)
909 {
910
911         refcount_acquire(&shmfd->shm_refs);
912         return (shmfd);
913 }
914
915 void
916 shm_drop(struct shmfd *shmfd)
917 {
918
919         if (refcount_release(&shmfd->shm_refs)) {
920 #ifdef MAC
921                 mac_posixshm_destroy(shmfd);
922 #endif
923                 rangelock_destroy(&shmfd->shm_rl);
924                 mtx_destroy(&shmfd->shm_mtx);
925                 vm_object_deallocate(shmfd->shm_object);
926                 free(shmfd, M_SHMFD);
927         }
928 }
929
930 /*
931  * Determine if the credentials have sufficient permissions for a
932  * specified combination of FREAD and FWRITE.
933  */
934 int
935 shm_access(struct shmfd *shmfd, struct ucred *ucred, int flags)
936 {
937         accmode_t accmode;
938         int error;
939
940         accmode = 0;
941         if (flags & FREAD)
942                 accmode |= VREAD;
943         if (flags & FWRITE)
944                 accmode |= VWRITE;
945         mtx_lock(&shm_timestamp_lock);
946         error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid,
947             accmode, ucred);
948         mtx_unlock(&shm_timestamp_lock);
949         return (error);
950 }
951
952 static void
953 shm_init(void *arg)
954 {
955         char name[32];
956         int i;
957
958         mtx_init(&shm_timestamp_lock, "shm timestamps", NULL, MTX_DEF);
959         sx_init(&shm_dict_lock, "shm dictionary");
960         shm_dictionary = hashinit(1024, M_SHMFD, &shm_hash);
961         new_unrhdr64(&shm_ino_unr, 1);
962         shm_dev_ino = devfs_alloc_cdp_inode();
963         KASSERT(shm_dev_ino > 0, ("shm dev inode not initialized"));
964
965         for (i = 1; i < MAXPAGESIZES; i++) {
966                 if (pagesizes[i] == 0)
967                         break;
968 #define M       (1024 * 1024)
969 #define G       (1024 * M)
970                 if (pagesizes[i] >= G)
971                         snprintf(name, sizeof(name), "%luG", pagesizes[i] / G);
972                 else if (pagesizes[i] >= M)
973                         snprintf(name, sizeof(name), "%luM", pagesizes[i] / M);
974                 else
975                         snprintf(name, sizeof(name), "%lu", pagesizes[i]);
976 #undef G
977 #undef M
978                 SYSCTL_ADD_ULONG(NULL, SYSCTL_STATIC_CHILDREN(_vm_largepages),
979                     OID_AUTO, name, CTLFLAG_RD, &count_largepages[i],
980                     "number of non-transient largepages allocated");
981         }
982 }
983 SYSINIT(shm_init, SI_SUB_SYSV_SHM, SI_ORDER_ANY, shm_init, NULL);
984
985 /*
986  * Dictionary management.  We maintain an in-kernel dictionary to map
987  * paths to shmfd objects.  We use the FNV hash on the path to store
988  * the mappings in a hash table.
989  */
990 static struct shmfd *
991 shm_lookup(char *path, Fnv32_t fnv)
992 {
993         struct shm_mapping *map;
994
995         LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
996                 if (map->sm_fnv != fnv)
997                         continue;
998                 if (strcmp(map->sm_path, path) == 0)
999                         return (map->sm_shmfd);
1000         }
1001
1002         return (NULL);
1003 }
1004
1005 static void
1006 shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd)
1007 {
1008         struct shm_mapping *map;
1009
1010         map = malloc(sizeof(struct shm_mapping), M_SHMFD, M_WAITOK);
1011         map->sm_path = path;
1012         map->sm_fnv = fnv;
1013         map->sm_shmfd = shm_hold(shmfd);
1014         shmfd->shm_path = path;
1015         LIST_INSERT_HEAD(SHM_HASH(fnv), map, sm_link);
1016 }
1017
1018 static int
1019 shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred)
1020 {
1021         struct shm_mapping *map;
1022         int error;
1023
1024         LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
1025                 if (map->sm_fnv != fnv)
1026                         continue;
1027                 if (strcmp(map->sm_path, path) == 0) {
1028 #ifdef MAC
1029                         error = mac_posixshm_check_unlink(ucred, map->sm_shmfd);
1030                         if (error)
1031                                 return (error);
1032 #endif
1033                         error = shm_access(map->sm_shmfd, ucred,
1034                             FREAD | FWRITE);
1035                         if (error)
1036                                 return (error);
1037                         map->sm_shmfd->shm_path = NULL;
1038                         LIST_REMOVE(map, sm_link);
1039                         shm_drop(map->sm_shmfd);
1040                         free(map->sm_path, M_SHMFD);
1041                         free(map, M_SHMFD);
1042                         return (0);
1043                 }
1044         }
1045
1046         return (ENOENT);
1047 }
1048
1049 int
1050 kern_shm_open2(struct thread *td, const char *userpath, int flags, mode_t mode,
1051     int shmflags, struct filecaps *fcaps, const char *name __unused)
1052 {
1053         struct pwddesc *pdp;
1054         struct shmfd *shmfd;
1055         struct file *fp;
1056         char *path;
1057         void *rl_cookie;
1058         Fnv32_t fnv;
1059         mode_t cmode;
1060         int error, fd, initial_seals;
1061         bool largepage;
1062
1063         if ((shmflags & ~(SHM_ALLOW_SEALING | SHM_GROW_ON_WRITE |
1064             SHM_LARGEPAGE)) != 0)
1065                 return (EINVAL);
1066
1067         initial_seals = F_SEAL_SEAL;
1068         if ((shmflags & SHM_ALLOW_SEALING) != 0)
1069                 initial_seals &= ~F_SEAL_SEAL;
1070
1071 #ifdef CAPABILITY_MODE
1072         /*
1073          * shm_open(2) is only allowed for anonymous objects.
1074          */
1075         if (IN_CAPABILITY_MODE(td) && (userpath != SHM_ANON))
1076                 return (ECAPMODE);
1077 #endif
1078
1079         AUDIT_ARG_FFLAGS(flags);
1080         AUDIT_ARG_MODE(mode);
1081
1082         if ((flags & O_ACCMODE) != O_RDONLY && (flags & O_ACCMODE) != O_RDWR)
1083                 return (EINVAL);
1084
1085         if ((flags & ~(O_ACCMODE | O_CREAT | O_EXCL | O_TRUNC | O_CLOEXEC)) != 0)
1086                 return (EINVAL);
1087
1088         largepage = (shmflags & SHM_LARGEPAGE) != 0;
1089         if (largepage && !PMAP_HAS_LARGEPAGES)
1090                 return (ENOTTY);
1091
1092         /*
1093          * Currently only F_SEAL_SEAL may be set when creating or opening shmfd.
1094          * If the decision is made later to allow additional seals, care must be
1095          * taken below to ensure that the seals are properly set if the shmfd
1096          * already existed -- this currently assumes that only F_SEAL_SEAL can
1097          * be set and doesn't take further precautions to ensure the validity of
1098          * the seals being added with respect to current mappings.
1099          */
1100         if ((initial_seals & ~F_SEAL_SEAL) != 0)
1101                 return (EINVAL);
1102
1103         pdp = td->td_proc->p_pd;
1104         cmode = (mode & ~pdp->pd_cmask) & ACCESSPERMS;
1105
1106         /*
1107          * shm_open(2) created shm should always have O_CLOEXEC set, as mandated
1108          * by POSIX.  We allow it to be unset here so that an in-kernel
1109          * interface may be written as a thin layer around shm, optionally not
1110          * setting CLOEXEC.  For shm_open(2), O_CLOEXEC is set unconditionally
1111          * in sys_shm_open() to keep this implementation compliant.
1112          */
1113         error = falloc_caps(td, &fp, &fd, flags & O_CLOEXEC, fcaps);
1114         if (error)
1115                 return (error);
1116
1117         /* A SHM_ANON path pointer creates an anonymous object. */
1118         if (userpath == SHM_ANON) {
1119                 /* A read-only anonymous object is pointless. */
1120                 if ((flags & O_ACCMODE) == O_RDONLY) {
1121                         fdclose(td, fp, fd);
1122                         fdrop(fp, td);
1123                         return (EINVAL);
1124                 }
1125                 shmfd = shm_alloc(td->td_ucred, cmode, largepage);
1126                 shmfd->shm_seals = initial_seals;
1127                 shmfd->shm_flags = shmflags;
1128         } else {
1129                 error = shm_copyin_path(td, userpath, &path);
1130                 if (error != 0) {
1131                         fdclose(td, fp, fd);
1132                         fdrop(fp, td);
1133                         return (error);
1134                 }
1135
1136                 AUDIT_ARG_UPATH1_CANON(path);
1137                 fnv = fnv_32_str(path, FNV1_32_INIT);
1138                 sx_xlock(&shm_dict_lock);
1139                 shmfd = shm_lookup(path, fnv);
1140                 if (shmfd == NULL) {
1141                         /* Object does not yet exist, create it if requested. */
1142                         if (flags & O_CREAT) {
1143 #ifdef MAC
1144                                 error = mac_posixshm_check_create(td->td_ucred,
1145                                     path);
1146                                 if (error == 0) {
1147 #endif
1148                                         shmfd = shm_alloc(td->td_ucred, cmode,
1149                                             largepage);
1150                                         shmfd->shm_seals = initial_seals;
1151                                         shmfd->shm_flags = shmflags;
1152                                         shm_insert(path, fnv, shmfd);
1153 #ifdef MAC
1154                                 }
1155 #endif
1156                         } else {
1157                                 free(path, M_SHMFD);
1158                                 error = ENOENT;
1159                         }
1160                 } else {
1161                         rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
1162                             &shmfd->shm_mtx);
1163
1164                         /*
1165                          * kern_shm_open() likely shouldn't ever error out on
1166                          * trying to set a seal that already exists, unlike
1167                          * F_ADD_SEALS.  This would break terribly as
1168                          * shm_open(2) actually sets F_SEAL_SEAL to maintain
1169                          * historical behavior where the underlying file could
1170                          * not be sealed.
1171                          */
1172                         initial_seals &= ~shmfd->shm_seals;
1173
1174                         /*
1175                          * Object already exists, obtain a new
1176                          * reference if requested and permitted.
1177                          */
1178                         free(path, M_SHMFD);
1179
1180                         /*
1181                          * initial_seals can't set additional seals if we've
1182                          * already been set F_SEAL_SEAL.  If F_SEAL_SEAL is set,
1183                          * then we've already removed that one from
1184                          * initial_seals.  This is currently redundant as we
1185                          * only allow setting F_SEAL_SEAL at creation time, but
1186                          * it's cheap to check and decreases the effort required
1187                          * to allow additional seals.
1188                          */
1189                         if ((shmfd->shm_seals & F_SEAL_SEAL) != 0 &&
1190                             initial_seals != 0)
1191                                 error = EPERM;
1192                         else if ((flags & (O_CREAT | O_EXCL)) ==
1193                             (O_CREAT | O_EXCL))
1194                                 error = EEXIST;
1195                         else if (shmflags != 0 && shmflags != shmfd->shm_flags)
1196                                 error = EINVAL;
1197                         else {
1198 #ifdef MAC
1199                                 error = mac_posixshm_check_open(td->td_ucred,
1200                                     shmfd, FFLAGS(flags & O_ACCMODE));
1201                                 if (error == 0)
1202 #endif
1203                                 error = shm_access(shmfd, td->td_ucred,
1204                                     FFLAGS(flags & O_ACCMODE));
1205                         }
1206
1207                         /*
1208                          * Truncate the file back to zero length if
1209                          * O_TRUNC was specified and the object was
1210                          * opened with read/write.
1211                          */
1212                         if (error == 0 &&
1213                             (flags & (O_ACCMODE | O_TRUNC)) ==
1214                             (O_RDWR | O_TRUNC)) {
1215                                 VM_OBJECT_WLOCK(shmfd->shm_object);
1216 #ifdef MAC
1217                                 error = mac_posixshm_check_truncate(
1218                                         td->td_ucred, fp->f_cred, shmfd);
1219                                 if (error == 0)
1220 #endif
1221                                         error = shm_dotruncate_locked(shmfd, 0,
1222                                             rl_cookie);
1223                                 VM_OBJECT_WUNLOCK(shmfd->shm_object);
1224                         }
1225                         if (error == 0) {
1226                                 /*
1227                                  * Currently we only allow F_SEAL_SEAL to be
1228                                  * set initially.  As noted above, this would
1229                                  * need to be reworked should that change.
1230                                  */
1231                                 shmfd->shm_seals |= initial_seals;
1232                                 shm_hold(shmfd);
1233                         }
1234                         rangelock_unlock(&shmfd->shm_rl, rl_cookie,
1235                             &shmfd->shm_mtx);
1236                 }
1237                 sx_xunlock(&shm_dict_lock);
1238
1239                 if (error) {
1240                         fdclose(td, fp, fd);
1241                         fdrop(fp, td);
1242                         return (error);
1243                 }
1244         }
1245
1246         finit(fp, FFLAGS(flags & O_ACCMODE), DTYPE_SHM, shmfd, &shm_ops);
1247
1248         td->td_retval[0] = fd;
1249         fdrop(fp, td);
1250
1251         return (0);
1252 }
1253
1254 /* System calls. */
1255 #ifdef COMPAT_FREEBSD12
1256 int
1257 freebsd12_shm_open(struct thread *td, struct freebsd12_shm_open_args *uap)
1258 {
1259
1260         return (kern_shm_open(td, uap->path, uap->flags | O_CLOEXEC,
1261             uap->mode, NULL));
1262 }
1263 #endif
1264
1265 int
1266 sys_shm_unlink(struct thread *td, struct shm_unlink_args *uap)
1267 {
1268         char *path;
1269         Fnv32_t fnv;
1270         int error;
1271
1272         error = shm_copyin_path(td, uap->path, &path);
1273         if (error != 0)
1274                 return (error);
1275
1276         AUDIT_ARG_UPATH1_CANON(path);
1277         fnv = fnv_32_str(path, FNV1_32_INIT);
1278         sx_xlock(&shm_dict_lock);
1279         error = shm_remove(path, fnv, td->td_ucred);
1280         sx_xunlock(&shm_dict_lock);
1281         free(path, M_SHMFD);
1282
1283         return (error);
1284 }
1285
1286 int
1287 sys_shm_rename(struct thread *td, struct shm_rename_args *uap)
1288 {
1289         char *path_from = NULL, *path_to = NULL;
1290         Fnv32_t fnv_from, fnv_to;
1291         struct shmfd *fd_from;
1292         struct shmfd *fd_to;
1293         int error;
1294         int flags;
1295
1296         flags = uap->flags;
1297         AUDIT_ARG_FFLAGS(flags);
1298
1299         /*
1300          * Make sure the user passed only valid flags.
1301          * If you add a new flag, please add a new term here.
1302          */
1303         if ((flags & ~(
1304             SHM_RENAME_NOREPLACE |
1305             SHM_RENAME_EXCHANGE
1306             )) != 0) {
1307                 error = EINVAL;
1308                 goto out;
1309         }
1310
1311         /*
1312          * EXCHANGE and NOREPLACE don't quite make sense together. Let's
1313          * force the user to choose one or the other.
1314          */
1315         if ((flags & SHM_RENAME_NOREPLACE) != 0 &&
1316             (flags & SHM_RENAME_EXCHANGE) != 0) {
1317                 error = EINVAL;
1318                 goto out;
1319         }
1320
1321         /* Renaming to or from anonymous makes no sense */
1322         if (uap->path_from == SHM_ANON || uap->path_to == SHM_ANON) {
1323                 error = EINVAL;
1324                 goto out;
1325         }
1326
1327         error = shm_copyin_path(td, uap->path_from, &path_from);
1328         if (error != 0)
1329                 goto out;
1330
1331         error = shm_copyin_path(td, uap->path_to, &path_to);
1332         if (error != 0)
1333                 goto out;
1334
1335         AUDIT_ARG_UPATH1_CANON(path_from);
1336         AUDIT_ARG_UPATH2_CANON(path_to);
1337
1338         /* Rename with from/to equal is a no-op */
1339         if (strcmp(path_from, path_to) == 0)
1340                 goto out;
1341
1342         fnv_from = fnv_32_str(path_from, FNV1_32_INIT);
1343         fnv_to = fnv_32_str(path_to, FNV1_32_INIT);
1344
1345         sx_xlock(&shm_dict_lock);
1346
1347         fd_from = shm_lookup(path_from, fnv_from);
1348         if (fd_from == NULL) {
1349                 error = ENOENT;
1350                 goto out_locked;
1351         }
1352
1353         fd_to = shm_lookup(path_to, fnv_to);
1354         if ((flags & SHM_RENAME_NOREPLACE) != 0 && fd_to != NULL) {
1355                 error = EEXIST;
1356                 goto out_locked;
1357         }
1358
1359         /*
1360          * Unconditionally prevents shm_remove from invalidating the 'from'
1361          * shm's state.
1362          */
1363         shm_hold(fd_from);
1364         error = shm_remove(path_from, fnv_from, td->td_ucred);
1365
1366         /*
1367          * One of my assumptions failed if ENOENT (e.g. locking didn't
1368          * protect us)
1369          */
1370         KASSERT(error != ENOENT, ("Our shm disappeared during shm_rename: %s",
1371             path_from));
1372         if (error != 0) {
1373                 shm_drop(fd_from);
1374                 goto out_locked;
1375         }
1376
1377         /*
1378          * If we are exchanging, we need to ensure the shm_remove below
1379          * doesn't invalidate the dest shm's state.
1380          */
1381         if ((flags & SHM_RENAME_EXCHANGE) != 0 && fd_to != NULL)
1382                 shm_hold(fd_to);
1383
1384         /*
1385          * NOTE: if path_to is not already in the hash, c'est la vie;
1386          * it simply means we have nothing already at path_to to unlink.
1387          * That is the ENOENT case.
1388          *
1389          * If we somehow don't have access to unlink this guy, but
1390          * did for the shm at path_from, then relink the shm to path_from
1391          * and abort with EACCES.
1392          *
1393          * All other errors: that is weird; let's relink and abort the
1394          * operation.
1395          */
1396         error = shm_remove(path_to, fnv_to, td->td_ucred);
1397         if (error != 0 && error != ENOENT) {
1398                 shm_insert(path_from, fnv_from, fd_from);
1399                 shm_drop(fd_from);
1400                 /* Don't free path_from now, since the hash references it */
1401                 path_from = NULL;
1402                 goto out_locked;
1403         }
1404
1405         error = 0;
1406
1407         shm_insert(path_to, fnv_to, fd_from);
1408
1409         /* Don't free path_to now, since the hash references it */
1410         path_to = NULL;
1411
1412         /* We kept a ref when we removed, and incremented again in insert */
1413         shm_drop(fd_from);
1414         KASSERT(fd_from->shm_refs > 0, ("Expected >0 refs; got: %d\n",
1415             fd_from->shm_refs));
1416
1417         if ((flags & SHM_RENAME_EXCHANGE) != 0 && fd_to != NULL) {
1418                 shm_insert(path_from, fnv_from, fd_to);
1419                 path_from = NULL;
1420                 shm_drop(fd_to);
1421                 KASSERT(fd_to->shm_refs > 0, ("Expected >0 refs; got: %d\n",
1422                     fd_to->shm_refs));
1423         }
1424
1425 out_locked:
1426         sx_xunlock(&shm_dict_lock);
1427
1428 out:
1429         free(path_from, M_SHMFD);
1430         free(path_to, M_SHMFD);
1431         return (error);
1432 }
1433
1434 static int
1435 shm_mmap_large(struct shmfd *shmfd, vm_map_t map, vm_offset_t *addr,
1436     vm_size_t size, vm_prot_t prot, vm_prot_t max_prot, int flags,
1437     vm_ooffset_t foff, struct thread *td)
1438 {
1439         struct vmspace *vms;
1440         vm_map_entry_t next_entry, prev_entry;
1441         vm_offset_t align, mask, maxaddr;
1442         int docow, error, rv, try;
1443         bool curmap;
1444
1445         if (shmfd->shm_lp_psind == 0)
1446                 return (EINVAL);
1447
1448         /* MAP_PRIVATE is disabled */
1449         if ((flags & ~(MAP_SHARED | MAP_FIXED | MAP_EXCL |
1450             MAP_NOCORE |
1451 #ifdef MAP_32BIT
1452             MAP_32BIT |
1453 #endif
1454             MAP_ALIGNMENT_MASK)) != 0)
1455                 return (EINVAL);
1456
1457         vms = td->td_proc->p_vmspace;
1458         curmap = map == &vms->vm_map;
1459         if (curmap) {
1460                 error = kern_mmap_racct_check(td, map, size);
1461                 if (error != 0)
1462                         return (error);
1463         }
1464
1465         docow = shmfd->shm_lp_psind << MAP_SPLIT_BOUNDARY_SHIFT;
1466         docow |= MAP_INHERIT_SHARE;
1467         if ((flags & MAP_NOCORE) != 0)
1468                 docow |= MAP_DISABLE_COREDUMP;
1469
1470         mask = pagesizes[shmfd->shm_lp_psind] - 1;
1471         if ((foff & mask) != 0)
1472                 return (EINVAL);
1473         maxaddr = vm_map_max(map);
1474 #ifdef MAP_32BIT
1475         if ((flags & MAP_32BIT) != 0 && maxaddr > MAP_32BIT_MAX_ADDR)
1476                 maxaddr = MAP_32BIT_MAX_ADDR;
1477 #endif
1478         if (size == 0 || (size & mask) != 0 ||
1479             (*addr != 0 && ((*addr & mask) != 0 ||
1480             *addr + size < *addr || *addr + size > maxaddr)))
1481                 return (EINVAL);
1482
1483         align = flags & MAP_ALIGNMENT_MASK;
1484         if (align == 0) {
1485                 align = pagesizes[shmfd->shm_lp_psind];
1486         } else if (align == MAP_ALIGNED_SUPER) {
1487                 if (shmfd->shm_lp_psind != 1)
1488                         return (EINVAL);
1489                 align = pagesizes[1];
1490         } else {
1491                 align >>= MAP_ALIGNMENT_SHIFT;
1492                 align = 1ULL << align;
1493                 /* Also handles overflow. */
1494                 if (align < pagesizes[shmfd->shm_lp_psind])
1495                         return (EINVAL);
1496         }
1497
1498         vm_map_lock(map);
1499         if ((flags & MAP_FIXED) == 0) {
1500                 try = 1;
1501                 if (curmap && (*addr == 0 ||
1502                     (*addr >= round_page((vm_offset_t)vms->vm_taddr) &&
1503                     *addr < round_page((vm_offset_t)vms->vm_daddr +
1504                     lim_max(td, RLIMIT_DATA))))) {
1505                         *addr = roundup2((vm_offset_t)vms->vm_daddr +
1506                             lim_max(td, RLIMIT_DATA),
1507                             pagesizes[shmfd->shm_lp_psind]);
1508                 }
1509 again:
1510                 rv = vm_map_find_aligned(map, addr, size, maxaddr, align);
1511                 if (rv != KERN_SUCCESS) {
1512                         if (try == 1) {
1513                                 try = 2;
1514                                 *addr = vm_map_min(map);
1515                                 if ((*addr & mask) != 0)
1516                                         *addr = (*addr + mask) & mask;
1517                                 goto again;
1518                         }
1519                         goto fail1;
1520                 }
1521         } else if ((flags & MAP_EXCL) == 0) {
1522                 rv = vm_map_delete(map, *addr, *addr + size);
1523                 if (rv != KERN_SUCCESS)
1524                         goto fail1;
1525         } else {
1526                 error = ENOSPC;
1527                 if (vm_map_lookup_entry(map, *addr, &prev_entry))
1528                         goto fail;
1529                 next_entry = vm_map_entry_succ(prev_entry);
1530                 if (next_entry->start < *addr + size)
1531                         goto fail;
1532         }
1533
1534         rv = vm_map_insert(map, shmfd->shm_object, foff, *addr, *addr + size,
1535             prot, max_prot, docow);
1536 fail1:
1537         error = vm_mmap_to_errno(rv);
1538 fail:
1539         vm_map_unlock(map);
1540         return (error);
1541 }
1542
1543 static int
1544 shm_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t objsize,
1545     vm_prot_t prot, vm_prot_t cap_maxprot, int flags,
1546     vm_ooffset_t foff, struct thread *td)
1547 {
1548         struct shmfd *shmfd;
1549         vm_prot_t maxprot;
1550         int error;
1551         bool writecnt;
1552         void *rl_cookie;
1553
1554         shmfd = fp->f_data;
1555         maxprot = VM_PROT_NONE;
1556
1557         rl_cookie = rangelock_rlock(&shmfd->shm_rl, 0, objsize,
1558             &shmfd->shm_mtx);
1559         /* FREAD should always be set. */
1560         if ((fp->f_flag & FREAD) != 0)
1561                 maxprot |= VM_PROT_EXECUTE | VM_PROT_READ;
1562
1563         /*
1564          * If FWRITE's set, we can allow VM_PROT_WRITE unless it's a shared
1565          * mapping with a write seal applied.  Private mappings are always
1566          * writeable.
1567          */
1568         if ((flags & MAP_SHARED) == 0) {
1569                 cap_maxprot |= VM_PROT_WRITE;
1570                 maxprot |= VM_PROT_WRITE;
1571                 writecnt = false;
1572         } else {
1573                 if ((fp->f_flag & FWRITE) != 0 &&
1574                     (shmfd->shm_seals & F_SEAL_WRITE) == 0)
1575                         maxprot |= VM_PROT_WRITE;
1576
1577                 /*
1578                  * Any mappings from a writable descriptor may be upgraded to
1579                  * VM_PROT_WRITE with mprotect(2), unless a write-seal was
1580                  * applied between the open and subsequent mmap(2).  We want to
1581                  * reject application of a write seal as long as any such
1582                  * mapping exists so that the seal cannot be trivially bypassed.
1583                  */
1584                 writecnt = (maxprot & VM_PROT_WRITE) != 0;
1585                 if (!writecnt && (prot & VM_PROT_WRITE) != 0) {
1586                         error = EACCES;
1587                         goto out;
1588                 }
1589         }
1590         maxprot &= cap_maxprot;
1591
1592         /* See comment in vn_mmap(). */
1593         if (
1594 #ifdef _LP64
1595             objsize > OFF_MAX ||
1596 #endif
1597             foff > OFF_MAX - objsize) {
1598                 error = EINVAL;
1599                 goto out;
1600         }
1601
1602 #ifdef MAC
1603         error = mac_posixshm_check_mmap(td->td_ucred, shmfd, prot, flags);
1604         if (error != 0)
1605                 goto out;
1606 #endif
1607
1608         mtx_lock(&shm_timestamp_lock);
1609         vfs_timestamp(&shmfd->shm_atime);
1610         mtx_unlock(&shm_timestamp_lock);
1611         vm_object_reference(shmfd->shm_object);
1612
1613         if (shm_largepage(shmfd)) {
1614                 writecnt = false;
1615                 error = shm_mmap_large(shmfd, map, addr, objsize, prot,
1616                     maxprot, flags, foff, td);
1617         } else {
1618                 if (writecnt) {
1619                         vm_pager_update_writecount(shmfd->shm_object, 0,
1620                             objsize);
1621                 }
1622                 error = vm_mmap_object(map, addr, objsize, prot, maxprot, flags,
1623                     shmfd->shm_object, foff, writecnt, td);
1624         }
1625         if (error != 0) {
1626                 if (writecnt)
1627                         vm_pager_release_writecount(shmfd->shm_object, 0,
1628                             objsize);
1629                 vm_object_deallocate(shmfd->shm_object);
1630         }
1631 out:
1632         rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
1633         return (error);
1634 }
1635
1636 static int
1637 shm_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
1638     struct thread *td)
1639 {
1640         struct shmfd *shmfd;
1641         int error;
1642
1643         error = 0;
1644         shmfd = fp->f_data;
1645         mtx_lock(&shm_timestamp_lock);
1646         /*
1647          * SUSv4 says that x bits of permission need not be affected.
1648          * Be consistent with our shm_open there.
1649          */
1650 #ifdef MAC
1651         error = mac_posixshm_check_setmode(active_cred, shmfd, mode);
1652         if (error != 0)
1653                 goto out;
1654 #endif
1655         error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid,
1656             VADMIN, active_cred);
1657         if (error != 0)
1658                 goto out;
1659         shmfd->shm_mode = mode & ACCESSPERMS;
1660 out:
1661         mtx_unlock(&shm_timestamp_lock);
1662         return (error);
1663 }
1664
1665 static int
1666 shm_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
1667     struct thread *td)
1668 {
1669         struct shmfd *shmfd;
1670         int error;
1671
1672         error = 0;
1673         shmfd = fp->f_data;
1674         mtx_lock(&shm_timestamp_lock);
1675 #ifdef MAC
1676         error = mac_posixshm_check_setowner(active_cred, shmfd, uid, gid);
1677         if (error != 0)
1678                 goto out;
1679 #endif
1680         if (uid == (uid_t)-1)
1681                 uid = shmfd->shm_uid;
1682         if (gid == (gid_t)-1)
1683                  gid = shmfd->shm_gid;
1684         if (((uid != shmfd->shm_uid && uid != active_cred->cr_uid) ||
1685             (gid != shmfd->shm_gid && !groupmember(gid, active_cred))) &&
1686             (error = priv_check_cred(active_cred, PRIV_VFS_CHOWN)))
1687                 goto out;
1688         shmfd->shm_uid = uid;
1689         shmfd->shm_gid = gid;
1690 out:
1691         mtx_unlock(&shm_timestamp_lock);
1692         return (error);
1693 }
1694
1695 /*
1696  * Helper routines to allow the backing object of a shared memory file
1697  * descriptor to be mapped in the kernel.
1698  */
1699 int
1700 shm_map(struct file *fp, size_t size, off_t offset, void **memp)
1701 {
1702         struct shmfd *shmfd;
1703         vm_offset_t kva, ofs;
1704         vm_object_t obj;
1705         int rv;
1706
1707         if (fp->f_type != DTYPE_SHM)
1708                 return (EINVAL);
1709         shmfd = fp->f_data;
1710         obj = shmfd->shm_object;
1711         VM_OBJECT_WLOCK(obj);
1712         /*
1713          * XXXRW: This validation is probably insufficient, and subject to
1714          * sign errors.  It should be fixed.
1715          */
1716         if (offset >= shmfd->shm_size ||
1717             offset + size > round_page(shmfd->shm_size)) {
1718                 VM_OBJECT_WUNLOCK(obj);
1719                 return (EINVAL);
1720         }
1721
1722         shmfd->shm_kmappings++;
1723         vm_object_reference_locked(obj);
1724         VM_OBJECT_WUNLOCK(obj);
1725
1726         /* Map the object into the kernel_map and wire it. */
1727         kva = vm_map_min(kernel_map);
1728         ofs = offset & PAGE_MASK;
1729         offset = trunc_page(offset);
1730         size = round_page(size + ofs);
1731         rv = vm_map_find(kernel_map, obj, offset, &kva, size, 0,
1732             VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE,
1733             VM_PROT_READ | VM_PROT_WRITE, 0);
1734         if (rv == KERN_SUCCESS) {
1735                 rv = vm_map_wire(kernel_map, kva, kva + size,
1736                     VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
1737                 if (rv == KERN_SUCCESS) {
1738                         *memp = (void *)(kva + ofs);
1739                         return (0);
1740                 }
1741                 vm_map_remove(kernel_map, kva, kva + size);
1742         } else
1743                 vm_object_deallocate(obj);
1744
1745         /* On failure, drop our mapping reference. */
1746         VM_OBJECT_WLOCK(obj);
1747         shmfd->shm_kmappings--;
1748         VM_OBJECT_WUNLOCK(obj);
1749
1750         return (vm_mmap_to_errno(rv));
1751 }
1752
1753 /*
1754  * We require the caller to unmap the entire entry.  This allows us to
1755  * safely decrement shm_kmappings when a mapping is removed.
1756  */
1757 int
1758 shm_unmap(struct file *fp, void *mem, size_t size)
1759 {
1760         struct shmfd *shmfd;
1761         vm_map_entry_t entry;
1762         vm_offset_t kva, ofs;
1763         vm_object_t obj;
1764         vm_pindex_t pindex;
1765         vm_prot_t prot;
1766         boolean_t wired;
1767         vm_map_t map;
1768         int rv;
1769
1770         if (fp->f_type != DTYPE_SHM)
1771                 return (EINVAL);
1772         shmfd = fp->f_data;
1773         kva = (vm_offset_t)mem;
1774         ofs = kva & PAGE_MASK;
1775         kva = trunc_page(kva);
1776         size = round_page(size + ofs);
1777         map = kernel_map;
1778         rv = vm_map_lookup(&map, kva, VM_PROT_READ | VM_PROT_WRITE, &entry,
1779             &obj, &pindex, &prot, &wired);
1780         if (rv != KERN_SUCCESS)
1781                 return (EINVAL);
1782         if (entry->start != kva || entry->end != kva + size) {
1783                 vm_map_lookup_done(map, entry);
1784                 return (EINVAL);
1785         }
1786         vm_map_lookup_done(map, entry);
1787         if (obj != shmfd->shm_object)
1788                 return (EINVAL);
1789         vm_map_remove(map, kva, kva + size);
1790         VM_OBJECT_WLOCK(obj);
1791         KASSERT(shmfd->shm_kmappings > 0, ("shm_unmap: object not mapped"));
1792         shmfd->shm_kmappings--;
1793         VM_OBJECT_WUNLOCK(obj);
1794         return (0);
1795 }
1796
1797 static int
1798 shm_fill_kinfo_locked(struct shmfd *shmfd, struct kinfo_file *kif, bool list)
1799 {
1800         const char *path, *pr_path;
1801         size_t pr_pathlen;
1802         bool visible;
1803
1804         sx_assert(&shm_dict_lock, SA_LOCKED);
1805         kif->kf_type = KF_TYPE_SHM;
1806         kif->kf_un.kf_file.kf_file_mode = S_IFREG | shmfd->shm_mode;
1807         kif->kf_un.kf_file.kf_file_size = shmfd->shm_size;
1808         if (shmfd->shm_path != NULL) {
1809                 if (shmfd->shm_path != NULL) {
1810                         path = shmfd->shm_path;
1811                         pr_path = curthread->td_ucred->cr_prison->pr_path;
1812                         if (strcmp(pr_path, "/") != 0) {
1813                                 /* Return the jail-rooted pathname. */
1814                                 pr_pathlen = strlen(pr_path);
1815                                 visible = strncmp(path, pr_path, pr_pathlen)
1816                                     == 0 && path[pr_pathlen] == '/';
1817                                 if (list && !visible)
1818                                         return (EPERM);
1819                                 if (visible)
1820                                         path += pr_pathlen;
1821                         }
1822                         strlcpy(kif->kf_path, path, sizeof(kif->kf_path));
1823                 }
1824         }
1825         return (0);
1826 }
1827
1828 static int
1829 shm_fill_kinfo(struct file *fp, struct kinfo_file *kif,
1830     struct filedesc *fdp __unused)
1831 {
1832         int res;
1833
1834         sx_slock(&shm_dict_lock);
1835         res = shm_fill_kinfo_locked(fp->f_data, kif, false);
1836         sx_sunlock(&shm_dict_lock);
1837         return (res);
1838 }
1839
1840 static int
1841 shm_add_seals(struct file *fp, int seals)
1842 {
1843         struct shmfd *shmfd;
1844         void *rl_cookie;
1845         vm_ooffset_t writemappings;
1846         int error, nseals;
1847
1848         error = 0;
1849         shmfd = fp->f_data;
1850         rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
1851             &shmfd->shm_mtx);
1852
1853         /* Even already-set seals should result in EPERM. */
1854         if ((shmfd->shm_seals & F_SEAL_SEAL) != 0) {
1855                 error = EPERM;
1856                 goto out;
1857         }
1858         nseals = seals & ~shmfd->shm_seals;
1859         if ((nseals & F_SEAL_WRITE) != 0) {
1860                 if (shm_largepage(shmfd)) {
1861                         error = ENOTSUP;
1862                         goto out;
1863                 }
1864
1865                 /*
1866                  * The rangelock above prevents writable mappings from being
1867                  * added after we've started applying seals.  The RLOCK here
1868                  * is to avoid torn reads on ILP32 arches as unmapping/reducing
1869                  * writemappings will be done without a rangelock.
1870                  */
1871                 VM_OBJECT_RLOCK(shmfd->shm_object);
1872                 writemappings = shmfd->shm_object->un_pager.swp.writemappings;
1873                 VM_OBJECT_RUNLOCK(shmfd->shm_object);
1874                 /* kmappings are also writable */
1875                 if (writemappings > 0) {
1876                         error = EBUSY;
1877                         goto out;
1878                 }
1879         }
1880         shmfd->shm_seals |= nseals;
1881 out:
1882         rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
1883         return (error);
1884 }
1885
1886 static int
1887 shm_get_seals(struct file *fp, int *seals)
1888 {
1889         struct shmfd *shmfd;
1890
1891         shmfd = fp->f_data;
1892         *seals = shmfd->shm_seals;
1893         return (0);
1894 }
1895
1896 static int
1897 shm_deallocate(struct shmfd *shmfd, off_t *offset, off_t *length, int flags)
1898 {
1899         vm_object_t object;
1900         vm_pindex_t pistart, pi, piend;
1901         vm_ooffset_t off, len;
1902         int startofs, endofs, end;
1903         int error;
1904
1905         off = *offset;
1906         len = *length;
1907         KASSERT(off + len <= (vm_ooffset_t)OFF_MAX, ("off + len overflows"));
1908         if (off + len > shmfd->shm_size)
1909                 len = shmfd->shm_size - off;
1910         object = shmfd->shm_object;
1911         startofs = off & PAGE_MASK;
1912         endofs = (off + len) & PAGE_MASK;
1913         pistart = OFF_TO_IDX(off);
1914         piend = OFF_TO_IDX(off + len);
1915         pi = OFF_TO_IDX(off + PAGE_MASK);
1916         error = 0;
1917
1918         /* Handle the case when offset is on or beyond shm size. */
1919         if ((off_t)len <= 0) {
1920                 *length = 0;
1921                 return (0);
1922         }
1923
1924         VM_OBJECT_WLOCK(object);
1925
1926         if (startofs != 0) {
1927                 end = pistart != piend ? PAGE_SIZE : endofs;
1928                 error = shm_partial_page_invalidate(object, pistart, startofs,
1929                     end);
1930                 if (error)
1931                         goto out;
1932                 off += end - startofs;
1933                 len -= end - startofs;
1934         }
1935
1936         if (pi < piend) {
1937                 vm_object_page_remove(object, pi, piend, 0);
1938                 off += IDX_TO_OFF(piend - pi);
1939                 len -= IDX_TO_OFF(piend - pi);
1940         }
1941
1942         if (endofs != 0 && pistart != piend) {
1943                 error = shm_partial_page_invalidate(object, piend, 0, endofs);
1944                 if (error)
1945                         goto out;
1946                 off += endofs;
1947                 len -= endofs;
1948         }
1949
1950 out:
1951         VM_OBJECT_WUNLOCK(shmfd->shm_object);
1952         *offset = off;
1953         *length = len;
1954         return (error);
1955 }
1956
1957 static int
1958 shm_fspacectl(struct file *fp, int cmd, off_t *offset, off_t *length, int flags,
1959     struct ucred *active_cred, struct thread *td)
1960 {
1961         void *rl_cookie;
1962         struct shmfd *shmfd;
1963         off_t off, len;
1964         int error;
1965
1966         /* This assumes that the caller already checked for overflow. */
1967         error = EINVAL;
1968         shmfd = fp->f_data;
1969         off = *offset;
1970         len = *length;
1971
1972         if (cmd != SPACECTL_DEALLOC || off < 0 || len <= 0 ||
1973             len > OFF_MAX - off || flags != 0)
1974                 return (EINVAL);
1975
1976         rl_cookie = rangelock_wlock(&shmfd->shm_rl, off, off + len,
1977             &shmfd->shm_mtx);
1978         switch (cmd) {
1979         case SPACECTL_DEALLOC:
1980                 if ((shmfd->shm_seals & F_SEAL_WRITE) != 0) {
1981                         error = EPERM;
1982                         break;
1983                 }
1984                 error = shm_deallocate(shmfd, &off, &len, flags);
1985                 *offset = off;
1986                 *length = len;
1987                 break;
1988         default:
1989                 __assert_unreachable();
1990         }
1991         rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
1992         return (error);
1993 }
1994
1995
1996 static int
1997 shm_fallocate(struct file *fp, off_t offset, off_t len, struct thread *td)
1998 {
1999         void *rl_cookie;
2000         struct shmfd *shmfd;
2001         size_t size;
2002         int error;
2003
2004         /* This assumes that the caller already checked for overflow. */
2005         error = 0;
2006         shmfd = fp->f_data;
2007         size = offset + len;
2008
2009         /*
2010          * Just grab the rangelock for the range that we may be attempting to
2011          * grow, rather than blocking read/write for regions we won't be
2012          * touching while this (potential) resize is in progress.  Other
2013          * attempts to resize the shmfd will have to take a write lock from 0 to
2014          * OFF_MAX, so this being potentially beyond the current usable range of
2015          * the shmfd is not necessarily a concern.  If other mechanisms are
2016          * added to grow a shmfd, this may need to be re-evaluated.
2017          */
2018         rl_cookie = rangelock_wlock(&shmfd->shm_rl, offset, size,
2019             &shmfd->shm_mtx);
2020         if (size > shmfd->shm_size)
2021                 error = shm_dotruncate_cookie(shmfd, size, rl_cookie);
2022         rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
2023         /* Translate to posix_fallocate(2) return value as needed. */
2024         if (error == ENOMEM)
2025                 error = ENOSPC;
2026         return (error);
2027 }
2028
2029 static int
2030 sysctl_posix_shm_list(SYSCTL_HANDLER_ARGS)
2031 {
2032         struct shm_mapping *shmm;
2033         struct sbuf sb;
2034         struct kinfo_file kif;
2035         u_long i;
2036         int error, error2;
2037
2038         sbuf_new_for_sysctl(&sb, NULL, sizeof(struct kinfo_file) * 5, req);
2039         sbuf_clear_flags(&sb, SBUF_INCLUDENUL);
2040         error = 0;
2041         sx_slock(&shm_dict_lock);
2042         for (i = 0; i < shm_hash + 1; i++) {
2043                 LIST_FOREACH(shmm, &shm_dictionary[i], sm_link) {
2044                         error = shm_fill_kinfo_locked(shmm->sm_shmfd,
2045                             &kif, true);
2046                         if (error == EPERM) {
2047                                 error = 0;
2048                                 continue;
2049                         }
2050                         if (error != 0)
2051                                 break;
2052                         pack_kinfo(&kif);
2053                         error = sbuf_bcat(&sb, &kif, kif.kf_structsize) == 0 ?
2054                             0 : ENOMEM;
2055                         if (error != 0)
2056                                 break;
2057                 }
2058         }
2059         sx_sunlock(&shm_dict_lock);
2060         error2 = sbuf_finish(&sb);
2061         sbuf_delete(&sb);
2062         return (error != 0 ? error : error2);
2063 }
2064
2065 SYSCTL_PROC(_kern_ipc, OID_AUTO, posix_shm_list,
2066     CTLFLAG_RD | CTLFLAG_PRISON | CTLFLAG_MPSAFE | CTLTYPE_OPAQUE,
2067     NULL, 0, sysctl_posix_shm_list, "",
2068     "POSIX SHM list");
2069
2070 int
2071 kern_shm_open(struct thread *td, const char *path, int flags, mode_t mode,
2072     struct filecaps *caps)
2073 {
2074
2075         return (kern_shm_open2(td, path, flags, mode, 0, caps, NULL));
2076 }
2077
2078 /*
2079  * This version of the shm_open() interface leaves CLOEXEC behavior up to the
2080  * caller, and libc will enforce it for the traditional shm_open() call.  This
2081  * allows other consumers, like memfd_create(), to opt-in for CLOEXEC.  This
2082  * interface also includes a 'name' argument that is currently unused, but could
2083  * potentially be exported later via some interface for debugging purposes.
2084  * From the kernel's perspective, it is optional.  Individual consumers like
2085  * memfd_create() may require it in order to be compatible with other systems
2086  * implementing the same function.
2087  */
2088 int
2089 sys_shm_open2(struct thread *td, struct shm_open2_args *uap)
2090 {
2091
2092         return (kern_shm_open2(td, uap->path, uap->flags, uap->mode,
2093             uap->shmflags, NULL, uap->name));
2094 }