]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/kern/uipc_shm.c
Move etc/tests/rc.d to etc/rc.d/tests to match the directory layout jmmv@
[FreeBSD/FreeBSD.git] / sys / kern / uipc_shm.c
1 /*-
2  * Copyright (c) 2006, 2011 Robert N. M. Watson
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26
27 /*
28  * Support for shared swap-backed anonymous memory objects via
29  * shm_open(2) and shm_unlink(2).  While most of the implementation is
30  * here, vm_mmap.c contains mapping logic changes.
31  *
32  * TODO:
33  *
34  * (1) Need to export data to a userland tool via a sysctl.  Should ipcs(1)
35  *     and ipcrm(1) be expanded or should new tools to manage both POSIX
36  *     kernel semaphores and POSIX shared memory be written?
37  *
38  * (2) Add support for this file type to fstat(1).
39  *
40  * (3) Resource limits?  Does this need its own resource limits or are the
41  *     existing limits in mmap(2) sufficient?
42  */
43
44 #include <sys/cdefs.h>
45 __FBSDID("$FreeBSD$");
46
47 #include "opt_capsicum.h"
48 #include "opt_ktrace.h"
49
50 #include <sys/param.h>
51 #include <sys/capsicum.h>
52 #include <sys/conf.h>
53 #include <sys/fcntl.h>
54 #include <sys/file.h>
55 #include <sys/filedesc.h>
56 #include <sys/fnv_hash.h>
57 #include <sys/kernel.h>
58 #include <sys/uio.h>
59 #include <sys/signal.h>
60 #include <sys/ktrace.h>
61 #include <sys/lock.h>
62 #include <sys/malloc.h>
63 #include <sys/mman.h>
64 #include <sys/mutex.h>
65 #include <sys/priv.h>
66 #include <sys/proc.h>
67 #include <sys/refcount.h>
68 #include <sys/resourcevar.h>
69 #include <sys/rwlock.h>
70 #include <sys/stat.h>
71 #include <sys/sysctl.h>
72 #include <sys/sysproto.h>
73 #include <sys/systm.h>
74 #include <sys/sx.h>
75 #include <sys/time.h>
76 #include <sys/vnode.h>
77 #include <sys/unistd.h>
78 #include <sys/user.h>
79
80 #include <security/mac/mac_framework.h>
81
82 #include <vm/vm.h>
83 #include <vm/vm_param.h>
84 #include <vm/pmap.h>
85 #include <vm/vm_extern.h>
86 #include <vm/vm_map.h>
87 #include <vm/vm_kern.h>
88 #include <vm/vm_object.h>
89 #include <vm/vm_page.h>
90 #include <vm/vm_pageout.h>
91 #include <vm/vm_pager.h>
92 #include <vm/swap_pager.h>
93
94 struct shm_mapping {
95         char            *sm_path;
96         Fnv32_t         sm_fnv;
97         struct shmfd    *sm_shmfd;
98         LIST_ENTRY(shm_mapping) sm_link;
99 };
100
101 static MALLOC_DEFINE(M_SHMFD, "shmfd", "shared memory file descriptor");
102 static LIST_HEAD(, shm_mapping) *shm_dictionary;
103 static struct sx shm_dict_lock;
104 static struct mtx shm_timestamp_lock;
105 static u_long shm_hash;
106 static struct unrhdr *shm_ino_unr;
107 static dev_t shm_dev_ino;
108
109 #define SHM_HASH(fnv)   (&shm_dictionary[(fnv) & shm_hash])
110
111 static int      shm_access(struct shmfd *shmfd, struct ucred *ucred, int flags);
112 static struct shmfd *shm_alloc(struct ucred *ucred, mode_t mode);
113 static void     shm_init(void *arg);
114 static void     shm_drop(struct shmfd *shmfd);
115 static struct shmfd *shm_hold(struct shmfd *shmfd);
116 static void     shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd);
117 static struct shmfd *shm_lookup(char *path, Fnv32_t fnv);
118 static int      shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred);
119 static int      shm_dotruncate(struct shmfd *shmfd, off_t length);
120
121 static fo_rdwr_t        shm_read;
122 static fo_rdwr_t        shm_write;
123 static fo_truncate_t    shm_truncate;
124 static fo_stat_t        shm_stat;
125 static fo_close_t       shm_close;
126 static fo_chmod_t       shm_chmod;
127 static fo_chown_t       shm_chown;
128 static fo_seek_t        shm_seek;
129 static fo_fill_kinfo_t  shm_fill_kinfo;
130
131 /* File descriptor operations. */
132 static struct fileops shm_ops = {
133         .fo_read = shm_read,
134         .fo_write = shm_write,
135         .fo_truncate = shm_truncate,
136         .fo_ioctl = invfo_ioctl,
137         .fo_poll = invfo_poll,
138         .fo_kqfilter = invfo_kqfilter,
139         .fo_stat = shm_stat,
140         .fo_close = shm_close,
141         .fo_chmod = shm_chmod,
142         .fo_chown = shm_chown,
143         .fo_sendfile = vn_sendfile,
144         .fo_seek = shm_seek,
145         .fo_fill_kinfo = shm_fill_kinfo,
146         .fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE
147 };
148
149 FEATURE(posix_shm, "POSIX shared memory");
150
151 static int
152 uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio)
153 {
154         vm_page_t m;
155         vm_pindex_t idx;
156         size_t tlen;
157         int error, offset, rv;
158
159         idx = OFF_TO_IDX(uio->uio_offset);
160         offset = uio->uio_offset & PAGE_MASK;
161         tlen = MIN(PAGE_SIZE - offset, len);
162
163         VM_OBJECT_WLOCK(obj);
164
165         /*
166          * Read I/O without either a corresponding resident page or swap
167          * page: use zero_region.  This is intended to avoid instantiating
168          * pages on read from a sparse region.
169          */
170         if (uio->uio_rw == UIO_READ && vm_page_lookup(obj, idx) == NULL &&
171             !vm_pager_has_page(obj, idx, NULL, NULL)) {
172                 VM_OBJECT_WUNLOCK(obj);
173                 return (uiomove(__DECONST(void *, zero_region), tlen, uio));
174         }
175
176         /*
177          * Parallel reads of the page content from disk are prevented
178          * by exclusive busy.
179          *
180          * Although the tmpfs vnode lock is held here, it is
181          * nonetheless safe to sleep waiting for a free page.  The
182          * pageout daemon does not need to acquire the tmpfs vnode
183          * lock to page out tobj's pages because tobj is a OBJT_SWAP
184          * type object.
185          */
186         m = vm_page_grab(obj, idx, VM_ALLOC_NORMAL);
187         if (m->valid != VM_PAGE_BITS_ALL) {
188                 if (vm_pager_has_page(obj, idx, NULL, NULL)) {
189                         rv = vm_pager_get_pages(obj, &m, 1, 0);
190                         m = vm_page_lookup(obj, idx);
191                         if (m == NULL) {
192                                 printf(
193                     "uiomove_object: vm_obj %p idx %jd null lookup rv %d\n",
194                                     obj, idx, rv);
195                                 VM_OBJECT_WUNLOCK(obj);
196                                 return (EIO);
197                         }
198                         if (rv != VM_PAGER_OK) {
199                                 printf(
200             "uiomove_object: vm_obj %p idx %jd valid %x pager error %d\n",
201                                     obj, idx, m->valid, rv);
202                                 vm_page_lock(m);
203                                 vm_page_free(m);
204                                 vm_page_unlock(m);
205                                 VM_OBJECT_WUNLOCK(obj);
206                                 return (EIO);
207                         }
208                 } else
209                         vm_page_zero_invalid(m, TRUE);
210         }
211         vm_page_xunbusy(m);
212         vm_page_lock(m);
213         vm_page_hold(m);
214         if (m->queue == PQ_NONE) {
215                 vm_page_deactivate(m);
216         } else {
217                 /* Requeue to maintain LRU ordering. */
218                 vm_page_requeue(m);
219         }
220         vm_page_unlock(m);
221         VM_OBJECT_WUNLOCK(obj);
222         error = uiomove_fromphys(&m, offset, tlen, uio);
223         if (uio->uio_rw == UIO_WRITE && error == 0) {
224                 VM_OBJECT_WLOCK(obj);
225                 vm_page_dirty(m);
226                 vm_pager_page_unswapped(m);
227                 VM_OBJECT_WUNLOCK(obj);
228         }
229         vm_page_lock(m);
230         vm_page_unhold(m);
231         vm_page_unlock(m);
232
233         return (error);
234 }
235
236 int
237 uiomove_object(vm_object_t obj, off_t obj_size, struct uio *uio)
238 {
239         ssize_t resid;
240         size_t len;
241         int error;
242
243         error = 0;
244         while ((resid = uio->uio_resid) > 0) {
245                 if (obj_size <= uio->uio_offset)
246                         break;
247                 len = MIN(obj_size - uio->uio_offset, resid);
248                 if (len == 0)
249                         break;
250                 error = uiomove_object_page(obj, len, uio);
251                 if (error != 0 || resid == uio->uio_resid)
252                         break;
253         }
254         return (error);
255 }
256
257 static int
258 shm_seek(struct file *fp, off_t offset, int whence, struct thread *td)
259 {
260         struct shmfd *shmfd;
261         off_t foffset;
262         int error;
263
264         shmfd = fp->f_data;
265         foffset = foffset_lock(fp, 0);
266         error = 0;
267         switch (whence) {
268         case L_INCR:
269                 if (foffset < 0 ||
270                     (offset > 0 && foffset > OFF_MAX - offset)) {
271                         error = EOVERFLOW;
272                         break;
273                 }
274                 offset += foffset;
275                 break;
276         case L_XTND:
277                 if (offset > 0 && shmfd->shm_size > OFF_MAX - offset) {
278                         error = EOVERFLOW;
279                         break;
280                 }
281                 offset += shmfd->shm_size;
282                 break;
283         case L_SET:
284                 break;
285         default:
286                 error = EINVAL;
287         }
288         if (error == 0) {
289                 if (offset < 0 || offset > shmfd->shm_size)
290                         error = EINVAL;
291                 else
292                         td->td_uretoff.tdu_off = offset;
293         }
294         foffset_unlock(fp, offset, error != 0 ? FOF_NOUPDATE : 0);
295         return (error);
296 }
297
298 static int
299 shm_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
300     int flags, struct thread *td)
301 {
302         struct shmfd *shmfd;
303         void *rl_cookie;
304         int error;
305
306         shmfd = fp->f_data;
307         foffset_lock_uio(fp, uio, flags);
308         rl_cookie = rangelock_rlock(&shmfd->shm_rl, uio->uio_offset,
309             uio->uio_offset + uio->uio_resid, &shmfd->shm_mtx);
310 #ifdef MAC
311         error = mac_posixshm_check_read(active_cred, fp->f_cred, shmfd);
312         if (error)
313                 return (error);
314 #endif
315         error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio);
316         rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
317         foffset_unlock_uio(fp, uio, flags);
318         return (error);
319 }
320
321 static int
322 shm_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
323     int flags, struct thread *td)
324 {
325         struct shmfd *shmfd;
326         void *rl_cookie;
327         int error;
328
329         shmfd = fp->f_data;
330 #ifdef MAC
331         error = mac_posixshm_check_write(active_cred, fp->f_cred, shmfd);
332         if (error)
333                 return (error);
334 #endif
335         foffset_lock_uio(fp, uio, flags);
336         if ((flags & FOF_OFFSET) == 0) {
337                 rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
338                     &shmfd->shm_mtx);
339         } else {
340                 rl_cookie = rangelock_wlock(&shmfd->shm_rl, uio->uio_offset,
341                     uio->uio_offset + uio->uio_resid, &shmfd->shm_mtx);
342         }
343
344         error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio);
345         rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
346         foffset_unlock_uio(fp, uio, flags);
347         return (error);
348 }
349
350 static int
351 shm_truncate(struct file *fp, off_t length, struct ucred *active_cred,
352     struct thread *td)
353 {
354         struct shmfd *shmfd;
355 #ifdef MAC
356         int error;
357 #endif
358
359         shmfd = fp->f_data;
360 #ifdef MAC
361         error = mac_posixshm_check_truncate(active_cred, fp->f_cred, shmfd);
362         if (error)
363                 return (error);
364 #endif
365         return (shm_dotruncate(shmfd, length));
366 }
367
368 static int
369 shm_stat(struct file *fp, struct stat *sb, struct ucred *active_cred,
370     struct thread *td)
371 {
372         struct shmfd *shmfd;
373 #ifdef MAC
374         int error;
375 #endif
376
377         shmfd = fp->f_data;
378
379 #ifdef MAC
380         error = mac_posixshm_check_stat(active_cred, fp->f_cred, shmfd);
381         if (error)
382                 return (error);
383 #endif
384         
385         /*
386          * Attempt to return sanish values for fstat() on a memory file
387          * descriptor.
388          */
389         bzero(sb, sizeof(*sb));
390         sb->st_blksize = PAGE_SIZE;
391         sb->st_size = shmfd->shm_size;
392         sb->st_blocks = (sb->st_size + sb->st_blksize - 1) / sb->st_blksize;
393         mtx_lock(&shm_timestamp_lock);
394         sb->st_atim = shmfd->shm_atime;
395         sb->st_ctim = shmfd->shm_ctime;
396         sb->st_mtim = shmfd->shm_mtime;
397         sb->st_birthtim = shmfd->shm_birthtime;
398         sb->st_mode = S_IFREG | shmfd->shm_mode;                /* XXX */
399         sb->st_uid = shmfd->shm_uid;
400         sb->st_gid = shmfd->shm_gid;
401         mtx_unlock(&shm_timestamp_lock);
402         sb->st_dev = shm_dev_ino;
403         sb->st_ino = shmfd->shm_ino;
404
405         return (0);
406 }
407
408 static int
409 shm_close(struct file *fp, struct thread *td)
410 {
411         struct shmfd *shmfd;
412
413         shmfd = fp->f_data;
414         fp->f_data = NULL;
415         shm_drop(shmfd);
416
417         return (0);
418 }
419
420 static int
421 shm_dotruncate(struct shmfd *shmfd, off_t length)
422 {
423         vm_object_t object;
424         vm_page_t m, ma[1];
425         vm_pindex_t idx, nobjsize;
426         vm_ooffset_t delta;
427         int base, rv;
428
429         object = shmfd->shm_object;
430         VM_OBJECT_WLOCK(object);
431         if (length == shmfd->shm_size) {
432                 VM_OBJECT_WUNLOCK(object);
433                 return (0);
434         }
435         nobjsize = OFF_TO_IDX(length + PAGE_MASK);
436
437         /* Are we shrinking?  If so, trim the end. */
438         if (length < shmfd->shm_size) {
439                 /*
440                  * Disallow any requests to shrink the size if this
441                  * object is mapped into the kernel.
442                  */
443                 if (shmfd->shm_kmappings > 0) {
444                         VM_OBJECT_WUNLOCK(object);
445                         return (EBUSY);
446                 }
447
448                 /*
449                  * Zero the truncated part of the last page.
450                  */
451                 base = length & PAGE_MASK;
452                 if (base != 0) {
453                         idx = OFF_TO_IDX(length);
454 retry:
455                         m = vm_page_lookup(object, idx);
456                         if (m != NULL) {
457                                 if (vm_page_sleep_if_busy(m, "shmtrc"))
458                                         goto retry;
459                         } else if (vm_pager_has_page(object, idx, NULL, NULL)) {
460                                 m = vm_page_alloc(object, idx, VM_ALLOC_NORMAL);
461                                 if (m == NULL) {
462                                         VM_OBJECT_WUNLOCK(object);
463                                         VM_WAIT;
464                                         VM_OBJECT_WLOCK(object);
465                                         goto retry;
466                                 } else if (m->valid != VM_PAGE_BITS_ALL) {
467                                         ma[0] = m;
468                                         rv = vm_pager_get_pages(object, ma, 1,
469                                             0);
470                                         m = vm_page_lookup(object, idx);
471                                 } else
472                                         /* A cached page was reactivated. */
473                                         rv = VM_PAGER_OK;
474                                 vm_page_lock(m);
475                                 if (rv == VM_PAGER_OK) {
476                                         vm_page_deactivate(m);
477                                         vm_page_unlock(m);
478                                         vm_page_xunbusy(m);
479                                 } else {
480                                         vm_page_free(m);
481                                         vm_page_unlock(m);
482                                         VM_OBJECT_WUNLOCK(object);
483                                         return (EIO);
484                                 }
485                         }
486                         if (m != NULL) {
487                                 pmap_zero_page_area(m, base, PAGE_SIZE - base);
488                                 KASSERT(m->valid == VM_PAGE_BITS_ALL,
489                                     ("shm_dotruncate: page %p is invalid", m));
490                                 vm_page_dirty(m);
491                                 vm_pager_page_unswapped(m);
492                         }
493                 }
494                 delta = ptoa(object->size - nobjsize);
495
496                 /* Toss in memory pages. */
497                 if (nobjsize < object->size)
498                         vm_object_page_remove(object, nobjsize, object->size,
499                             0);
500
501                 /* Toss pages from swap. */
502                 if (object->type == OBJT_SWAP)
503                         swap_pager_freespace(object, nobjsize, delta);
504
505                 /* Free the swap accounted for shm */
506                 swap_release_by_cred(delta, object->cred);
507                 object->charge -= delta;
508         } else {
509                 /* Attempt to reserve the swap */
510                 delta = ptoa(nobjsize - object->size);
511                 if (!swap_reserve_by_cred(delta, object->cred)) {
512                         VM_OBJECT_WUNLOCK(object);
513                         return (ENOMEM);
514                 }
515                 object->charge += delta;
516         }
517         shmfd->shm_size = length;
518         mtx_lock(&shm_timestamp_lock);
519         vfs_timestamp(&shmfd->shm_ctime);
520         shmfd->shm_mtime = shmfd->shm_ctime;
521         mtx_unlock(&shm_timestamp_lock);
522         object->size = nobjsize;
523         VM_OBJECT_WUNLOCK(object);
524         return (0);
525 }
526
527 /*
528  * shmfd object management including creation and reference counting
529  * routines.
530  */
531 static struct shmfd *
532 shm_alloc(struct ucred *ucred, mode_t mode)
533 {
534         struct shmfd *shmfd;
535         int ino;
536
537         shmfd = malloc(sizeof(*shmfd), M_SHMFD, M_WAITOK | M_ZERO);
538         shmfd->shm_size = 0;
539         shmfd->shm_uid = ucred->cr_uid;
540         shmfd->shm_gid = ucred->cr_gid;
541         shmfd->shm_mode = mode;
542         shmfd->shm_object = vm_pager_allocate(OBJT_DEFAULT, NULL,
543             shmfd->shm_size, VM_PROT_DEFAULT, 0, ucred);
544         KASSERT(shmfd->shm_object != NULL, ("shm_create: vm_pager_allocate"));
545         shmfd->shm_object->pg_color = 0;
546         VM_OBJECT_WLOCK(shmfd->shm_object);
547         vm_object_clear_flag(shmfd->shm_object, OBJ_ONEMAPPING);
548         vm_object_set_flag(shmfd->shm_object, OBJ_COLORED | OBJ_NOSPLIT);
549         VM_OBJECT_WUNLOCK(shmfd->shm_object);
550         vfs_timestamp(&shmfd->shm_birthtime);
551         shmfd->shm_atime = shmfd->shm_mtime = shmfd->shm_ctime =
552             shmfd->shm_birthtime;
553         ino = alloc_unr(shm_ino_unr);
554         if (ino == -1)
555                 shmfd->shm_ino = 0;
556         else
557                 shmfd->shm_ino = ino;
558         refcount_init(&shmfd->shm_refs, 1);
559         mtx_init(&shmfd->shm_mtx, "shmrl", NULL, MTX_DEF);
560         rangelock_init(&shmfd->shm_rl);
561 #ifdef MAC
562         mac_posixshm_init(shmfd);
563         mac_posixshm_create(ucred, shmfd);
564 #endif
565
566         return (shmfd);
567 }
568
569 static struct shmfd *
570 shm_hold(struct shmfd *shmfd)
571 {
572
573         refcount_acquire(&shmfd->shm_refs);
574         return (shmfd);
575 }
576
577 static void
578 shm_drop(struct shmfd *shmfd)
579 {
580
581         if (refcount_release(&shmfd->shm_refs)) {
582 #ifdef MAC
583                 mac_posixshm_destroy(shmfd);
584 #endif
585                 rangelock_destroy(&shmfd->shm_rl);
586                 mtx_destroy(&shmfd->shm_mtx);
587                 vm_object_deallocate(shmfd->shm_object);
588                 if (shmfd->shm_ino != 0)
589                         free_unr(shm_ino_unr, shmfd->shm_ino);
590                 free(shmfd, M_SHMFD);
591         }
592 }
593
594 /*
595  * Determine if the credentials have sufficient permissions for a
596  * specified combination of FREAD and FWRITE.
597  */
598 static int
599 shm_access(struct shmfd *shmfd, struct ucred *ucred, int flags)
600 {
601         accmode_t accmode;
602         int error;
603
604         accmode = 0;
605         if (flags & FREAD)
606                 accmode |= VREAD;
607         if (flags & FWRITE)
608                 accmode |= VWRITE;
609         mtx_lock(&shm_timestamp_lock);
610         error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid,
611             accmode, ucred, NULL);
612         mtx_unlock(&shm_timestamp_lock);
613         return (error);
614 }
615
616 /*
617  * Dictionary management.  We maintain an in-kernel dictionary to map
618  * paths to shmfd objects.  We use the FNV hash on the path to store
619  * the mappings in a hash table.
620  */
621 static void
622 shm_init(void *arg)
623 {
624
625         mtx_init(&shm_timestamp_lock, "shm timestamps", NULL, MTX_DEF);
626         sx_init(&shm_dict_lock, "shm dictionary");
627         shm_dictionary = hashinit(1024, M_SHMFD, &shm_hash);
628         shm_ino_unr = new_unrhdr(1, INT32_MAX, NULL);
629         KASSERT(shm_ino_unr != NULL, ("shm fake inodes not initialized"));
630         shm_dev_ino = devfs_alloc_cdp_inode();
631         KASSERT(shm_dev_ino > 0, ("shm dev inode not initialized"));
632 }
633 SYSINIT(shm_init, SI_SUB_SYSV_SHM, SI_ORDER_ANY, shm_init, NULL);
634
635 static struct shmfd *
636 shm_lookup(char *path, Fnv32_t fnv)
637 {
638         struct shm_mapping *map;
639
640         LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
641                 if (map->sm_fnv != fnv)
642                         continue;
643                 if (strcmp(map->sm_path, path) == 0)
644                         return (map->sm_shmfd);
645         }
646
647         return (NULL);
648 }
649
650 static void
651 shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd)
652 {
653         struct shm_mapping *map;
654
655         map = malloc(sizeof(struct shm_mapping), M_SHMFD, M_WAITOK);
656         map->sm_path = path;
657         map->sm_fnv = fnv;
658         map->sm_shmfd = shm_hold(shmfd);
659         shmfd->shm_path = path;
660         LIST_INSERT_HEAD(SHM_HASH(fnv), map, sm_link);
661 }
662
663 static int
664 shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred)
665 {
666         struct shm_mapping *map;
667         int error;
668
669         LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
670                 if (map->sm_fnv != fnv)
671                         continue;
672                 if (strcmp(map->sm_path, path) == 0) {
673 #ifdef MAC
674                         error = mac_posixshm_check_unlink(ucred, map->sm_shmfd);
675                         if (error)
676                                 return (error);
677 #endif
678                         error = shm_access(map->sm_shmfd, ucred,
679                             FREAD | FWRITE);
680                         if (error)
681                                 return (error);
682                         map->sm_shmfd->shm_path = NULL;
683                         LIST_REMOVE(map, sm_link);
684                         shm_drop(map->sm_shmfd);
685                         free(map->sm_path, M_SHMFD);
686                         free(map, M_SHMFD);
687                         return (0);
688                 }
689         }
690
691         return (ENOENT);
692 }
693
694 /* System calls. */
695 int
696 sys_shm_open(struct thread *td, struct shm_open_args *uap)
697 {
698         struct filedesc *fdp;
699         struct shmfd *shmfd;
700         struct file *fp;
701         char *path;
702         Fnv32_t fnv;
703         mode_t cmode;
704         int fd, error;
705
706 #ifdef CAPABILITY_MODE
707         /*
708          * shm_open(2) is only allowed for anonymous objects.
709          */
710         if (IN_CAPABILITY_MODE(td) && (uap->path != SHM_ANON))
711                 return (ECAPMODE);
712 #endif
713
714         if ((uap->flags & O_ACCMODE) != O_RDONLY &&
715             (uap->flags & O_ACCMODE) != O_RDWR)
716                 return (EINVAL);
717
718         if ((uap->flags & ~(O_ACCMODE | O_CREAT | O_EXCL | O_TRUNC | O_CLOEXEC)) != 0)
719                 return (EINVAL);
720
721         fdp = td->td_proc->p_fd;
722         cmode = (uap->mode & ~fdp->fd_cmask) & ACCESSPERMS;
723
724         error = falloc(td, &fp, &fd, O_CLOEXEC);
725         if (error)
726                 return (error);
727
728         /* A SHM_ANON path pointer creates an anonymous object. */
729         if (uap->path == SHM_ANON) {
730                 /* A read-only anonymous object is pointless. */
731                 if ((uap->flags & O_ACCMODE) == O_RDONLY) {
732                         fdclose(td, fp, fd);
733                         fdrop(fp, td);
734                         return (EINVAL);
735                 }
736                 shmfd = shm_alloc(td->td_ucred, cmode);
737         } else {
738                 path = malloc(MAXPATHLEN, M_SHMFD, M_WAITOK);
739                 error = copyinstr(uap->path, path, MAXPATHLEN, NULL);
740 #ifdef KTRACE
741                 if (error == 0 && KTRPOINT(curthread, KTR_NAMEI))
742                         ktrnamei(path);
743 #endif
744                 /* Require paths to start with a '/' character. */
745                 if (error == 0 && path[0] != '/')
746                         error = EINVAL;
747                 if (error) {
748                         fdclose(td, fp, fd);
749                         fdrop(fp, td);
750                         free(path, M_SHMFD);
751                         return (error);
752                 }
753
754                 fnv = fnv_32_str(path, FNV1_32_INIT);
755                 sx_xlock(&shm_dict_lock);
756                 shmfd = shm_lookup(path, fnv);
757                 if (shmfd == NULL) {
758                         /* Object does not yet exist, create it if requested. */
759                         if (uap->flags & O_CREAT) {
760 #ifdef MAC
761                                 error = mac_posixshm_check_create(td->td_ucred,
762                                     path);
763                                 if (error == 0) {
764 #endif
765                                         shmfd = shm_alloc(td->td_ucred, cmode);
766                                         shm_insert(path, fnv, shmfd);
767 #ifdef MAC
768                                 }
769 #endif
770                         } else {
771                                 free(path, M_SHMFD);
772                                 error = ENOENT;
773                         }
774                 } else {
775                         /*
776                          * Object already exists, obtain a new
777                          * reference if requested and permitted.
778                          */
779                         free(path, M_SHMFD);
780                         if ((uap->flags & (O_CREAT | O_EXCL)) ==
781                             (O_CREAT | O_EXCL))
782                                 error = EEXIST;
783                         else {
784 #ifdef MAC
785                                 error = mac_posixshm_check_open(td->td_ucred,
786                                     shmfd, FFLAGS(uap->flags & O_ACCMODE));
787                                 if (error == 0)
788 #endif
789                                 error = shm_access(shmfd, td->td_ucred,
790                                     FFLAGS(uap->flags & O_ACCMODE));
791                         }
792
793                         /*
794                          * Truncate the file back to zero length if
795                          * O_TRUNC was specified and the object was
796                          * opened with read/write.
797                          */
798                         if (error == 0 &&
799                             (uap->flags & (O_ACCMODE | O_TRUNC)) ==
800                             (O_RDWR | O_TRUNC)) {
801 #ifdef MAC
802                                 error = mac_posixshm_check_truncate(
803                                         td->td_ucred, fp->f_cred, shmfd);
804                                 if (error == 0)
805 #endif
806                                         shm_dotruncate(shmfd, 0);
807                         }
808                         if (error == 0)
809                                 shm_hold(shmfd);
810                 }
811                 sx_xunlock(&shm_dict_lock);
812
813                 if (error) {
814                         fdclose(td, fp, fd);
815                         fdrop(fp, td);
816                         return (error);
817                 }
818         }
819
820         finit(fp, FFLAGS(uap->flags & O_ACCMODE), DTYPE_SHM, shmfd, &shm_ops);
821
822         td->td_retval[0] = fd;
823         fdrop(fp, td);
824
825         return (0);
826 }
827
828 int
829 sys_shm_unlink(struct thread *td, struct shm_unlink_args *uap)
830 {
831         char *path;
832         Fnv32_t fnv;
833         int error;
834
835         path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
836         error = copyinstr(uap->path, path, MAXPATHLEN, NULL);
837         if (error) {
838                 free(path, M_TEMP);
839                 return (error);
840         }
841 #ifdef KTRACE
842         if (KTRPOINT(curthread, KTR_NAMEI))
843                 ktrnamei(path);
844 #endif
845         fnv = fnv_32_str(path, FNV1_32_INIT);
846         sx_xlock(&shm_dict_lock);
847         error = shm_remove(path, fnv, td->td_ucred);
848         sx_xunlock(&shm_dict_lock);
849         free(path, M_TEMP);
850
851         return (error);
852 }
853
854 /*
855  * mmap() helper to validate mmap() requests against shm object state
856  * and give mmap() the vm_object to use for the mapping.
857  */
858 int
859 shm_mmap(struct shmfd *shmfd, vm_size_t objsize, vm_ooffset_t foff,
860     vm_object_t *obj)
861 {
862
863         /*
864          * XXXRW: This validation is probably insufficient, and subject to
865          * sign errors.  It should be fixed.
866          */
867         if (foff >= shmfd->shm_size ||
868             foff + objsize > round_page(shmfd->shm_size))
869                 return (EINVAL);
870
871         mtx_lock(&shm_timestamp_lock);
872         vfs_timestamp(&shmfd->shm_atime);
873         mtx_unlock(&shm_timestamp_lock);
874         vm_object_reference(shmfd->shm_object);
875         *obj = shmfd->shm_object;
876         return (0);
877 }
878
879 static int
880 shm_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
881     struct thread *td)
882 {
883         struct shmfd *shmfd;
884         int error;
885
886         error = 0;
887         shmfd = fp->f_data;
888         mtx_lock(&shm_timestamp_lock);
889         /*
890          * SUSv4 says that x bits of permission need not be affected.
891          * Be consistent with our shm_open there.
892          */
893 #ifdef MAC
894         error = mac_posixshm_check_setmode(active_cred, shmfd, mode);
895         if (error != 0)
896                 goto out;
897 #endif
898         error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid,
899             shmfd->shm_gid, VADMIN, active_cred, NULL);
900         if (error != 0)
901                 goto out;
902         shmfd->shm_mode = mode & ACCESSPERMS;
903 out:
904         mtx_unlock(&shm_timestamp_lock);
905         return (error);
906 }
907
908 static int
909 shm_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
910     struct thread *td)
911 {
912         struct shmfd *shmfd;
913         int error;
914
915         error = 0;
916         shmfd = fp->f_data;
917         mtx_lock(&shm_timestamp_lock);
918 #ifdef MAC
919         error = mac_posixshm_check_setowner(active_cred, shmfd, uid, gid);
920         if (error != 0)
921                 goto out;
922 #endif
923         if (uid == (uid_t)-1)
924                 uid = shmfd->shm_uid;
925         if (gid == (gid_t)-1)
926                  gid = shmfd->shm_gid;
927         if (((uid != shmfd->shm_uid && uid != active_cred->cr_uid) ||
928             (gid != shmfd->shm_gid && !groupmember(gid, active_cred))) &&
929             (error = priv_check_cred(active_cred, PRIV_VFS_CHOWN, 0)))
930                 goto out;
931         shmfd->shm_uid = uid;
932         shmfd->shm_gid = gid;
933 out:
934         mtx_unlock(&shm_timestamp_lock);
935         return (error);
936 }
937
938 /*
939  * Helper routines to allow the backing object of a shared memory file
940  * descriptor to be mapped in the kernel.
941  */
942 int
943 shm_map(struct file *fp, size_t size, off_t offset, void **memp)
944 {
945         struct shmfd *shmfd;
946         vm_offset_t kva, ofs;
947         vm_object_t obj;
948         int rv;
949
950         if (fp->f_type != DTYPE_SHM)
951                 return (EINVAL);
952         shmfd = fp->f_data;
953         obj = shmfd->shm_object;
954         VM_OBJECT_WLOCK(obj);
955         /*
956          * XXXRW: This validation is probably insufficient, and subject to
957          * sign errors.  It should be fixed.
958          */
959         if (offset >= shmfd->shm_size ||
960             offset + size > round_page(shmfd->shm_size)) {
961                 VM_OBJECT_WUNLOCK(obj);
962                 return (EINVAL);
963         }
964
965         shmfd->shm_kmappings++;
966         vm_object_reference_locked(obj);
967         VM_OBJECT_WUNLOCK(obj);
968
969         /* Map the object into the kernel_map and wire it. */
970         kva = vm_map_min(kernel_map);
971         ofs = offset & PAGE_MASK;
972         offset = trunc_page(offset);
973         size = round_page(size + ofs);
974         rv = vm_map_find(kernel_map, obj, offset, &kva, size, 0,
975             VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE,
976             VM_PROT_READ | VM_PROT_WRITE, 0);
977         if (rv == KERN_SUCCESS) {
978                 rv = vm_map_wire(kernel_map, kva, kva + size,
979                     VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
980                 if (rv == KERN_SUCCESS) {
981                         *memp = (void *)(kva + ofs);
982                         return (0);
983                 }
984                 vm_map_remove(kernel_map, kva, kva + size);
985         } else
986                 vm_object_deallocate(obj);
987
988         /* On failure, drop our mapping reference. */
989         VM_OBJECT_WLOCK(obj);
990         shmfd->shm_kmappings--;
991         VM_OBJECT_WUNLOCK(obj);
992
993         return (vm_mmap_to_errno(rv));
994 }
995
996 /*
997  * We require the caller to unmap the entire entry.  This allows us to
998  * safely decrement shm_kmappings when a mapping is removed.
999  */
1000 int
1001 shm_unmap(struct file *fp, void *mem, size_t size)
1002 {
1003         struct shmfd *shmfd;
1004         vm_map_entry_t entry;
1005         vm_offset_t kva, ofs;
1006         vm_object_t obj;
1007         vm_pindex_t pindex;
1008         vm_prot_t prot;
1009         boolean_t wired;
1010         vm_map_t map;
1011         int rv;
1012
1013         if (fp->f_type != DTYPE_SHM)
1014                 return (EINVAL);
1015         shmfd = fp->f_data;
1016         kva = (vm_offset_t)mem;
1017         ofs = kva & PAGE_MASK;
1018         kva = trunc_page(kva);
1019         size = round_page(size + ofs);
1020         map = kernel_map;
1021         rv = vm_map_lookup(&map, kva, VM_PROT_READ | VM_PROT_WRITE, &entry,
1022             &obj, &pindex, &prot, &wired);
1023         if (rv != KERN_SUCCESS)
1024                 return (EINVAL);
1025         if (entry->start != kva || entry->end != kva + size) {
1026                 vm_map_lookup_done(map, entry);
1027                 return (EINVAL);
1028         }
1029         vm_map_lookup_done(map, entry);
1030         if (obj != shmfd->shm_object)
1031                 return (EINVAL);
1032         vm_map_remove(map, kva, kva + size);
1033         VM_OBJECT_WLOCK(obj);
1034         KASSERT(shmfd->shm_kmappings > 0, ("shm_unmap: object not mapped"));
1035         shmfd->shm_kmappings--;
1036         VM_OBJECT_WUNLOCK(obj);
1037         return (0);
1038 }
1039
1040 static int
1041 shm_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp)
1042 {
1043         struct shmfd *shmfd;
1044
1045         kif->kf_type = KF_TYPE_SHM;
1046         shmfd = fp->f_data;
1047
1048         mtx_lock(&shm_timestamp_lock);
1049         kif->kf_un.kf_file.kf_file_mode = S_IFREG | shmfd->shm_mode;    /* XXX */
1050         mtx_unlock(&shm_timestamp_lock);
1051         kif->kf_un.kf_file.kf_file_size = shmfd->shm_size;
1052         if (shmfd->shm_path != NULL) {
1053                 sx_slock(&shm_dict_lock);
1054                 if (shmfd->shm_path != NULL)
1055                         strlcpy(kif->kf_path, shmfd->shm_path,
1056                             sizeof(kif->kf_path));
1057                 sx_sunlock(&shm_dict_lock);
1058         }
1059         return (0);
1060 }