]> CyberLeo.Net >> Repos - FreeBSD/stable/9.git/blob - sys/kern/uipc_shm.c
MFC r229821
[FreeBSD/stable/9.git] / sys / kern / uipc_shm.c
1 /*-
2  * Copyright (c) 2006, 2011 Robert N. M. Watson
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26
27 /*
28  * Support for shared swap-backed anonymous memory objects via
29  * shm_open(2) and shm_unlink(2).  While most of the implementation is
30  * here, vm_mmap.c contains mapping logic changes.
31  *
32  * TODO:
33  *
34  * (1) Need to export data to a userland tool via a sysctl.  Should ipcs(1)
35  *     and ipcrm(1) be expanded or should new tools to manage both POSIX
36  *     kernel semaphores and POSIX shared memory be written?
37  *
38  * (2) Add support for this file type to fstat(1).
39  *
40  * (3) Resource limits?  Does this need its own resource limits or are the
41  *     existing limits in mmap(2) sufficient?
42  */
43
44 #include <sys/cdefs.h>
45 __FBSDID("$FreeBSD$");
46
47 #include "opt_capsicum.h"
48
49 #include <sys/param.h>
50 #include <sys/capability.h>
51 #include <sys/fcntl.h>
52 #include <sys/file.h>
53 #include <sys/filedesc.h>
54 #include <sys/fnv_hash.h>
55 #include <sys/kernel.h>
56 #include <sys/lock.h>
57 #include <sys/malloc.h>
58 #include <sys/mman.h>
59 #include <sys/mutex.h>
60 #include <sys/priv.h>
61 #include <sys/proc.h>
62 #include <sys/refcount.h>
63 #include <sys/resourcevar.h>
64 #include <sys/stat.h>
65 #include <sys/sysctl.h>
66 #include <sys/sysproto.h>
67 #include <sys/systm.h>
68 #include <sys/sx.h>
69 #include <sys/time.h>
70 #include <sys/vnode.h>
71
72 #include <security/mac/mac_framework.h>
73
74 #include <vm/vm.h>
75 #include <vm/vm_param.h>
76 #include <vm/pmap.h>
77 #include <vm/vm_map.h>
78 #include <vm/vm_object.h>
79 #include <vm/vm_page.h>
80 #include <vm/vm_pageout.h>
81 #include <vm/vm_pager.h>
82 #include <vm/swap_pager.h>
83
84 struct shm_mapping {
85         char            *sm_path;
86         Fnv32_t         sm_fnv;
87         struct shmfd    *sm_shmfd;
88         LIST_ENTRY(shm_mapping) sm_link;
89 };
90
91 static MALLOC_DEFINE(M_SHMFD, "shmfd", "shared memory file descriptor");
92 static LIST_HEAD(, shm_mapping) *shm_dictionary;
93 static struct sx shm_dict_lock;
94 static struct mtx shm_timestamp_lock;
95 static u_long shm_hash;
96
97 #define SHM_HASH(fnv)   (&shm_dictionary[(fnv) & shm_hash])
98
99 static int      shm_access(struct shmfd *shmfd, struct ucred *ucred, int flags);
100 static struct shmfd *shm_alloc(struct ucred *ucred, mode_t mode);
101 static void     shm_dict_init(void *arg);
102 static void     shm_drop(struct shmfd *shmfd);
103 static struct shmfd *shm_hold(struct shmfd *shmfd);
104 static void     shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd);
105 static struct shmfd *shm_lookup(char *path, Fnv32_t fnv);
106 static int      shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred);
107 static int      shm_dotruncate(struct shmfd *shmfd, off_t length);
108
109 static fo_rdwr_t        shm_read;
110 static fo_rdwr_t        shm_write;
111 static fo_truncate_t    shm_truncate;
112 static fo_ioctl_t       shm_ioctl;
113 static fo_poll_t        shm_poll;
114 static fo_kqfilter_t    shm_kqfilter;
115 static fo_stat_t        shm_stat;
116 static fo_close_t       shm_close;
117 static fo_chmod_t       shm_chmod;
118 static fo_chown_t       shm_chown;
119
120 /* File descriptor operations. */
121 static struct fileops shm_ops = {
122         .fo_read = shm_read,
123         .fo_write = shm_write,
124         .fo_truncate = shm_truncate,
125         .fo_ioctl = shm_ioctl,
126         .fo_poll = shm_poll,
127         .fo_kqfilter = shm_kqfilter,
128         .fo_stat = shm_stat,
129         .fo_close = shm_close,
130         .fo_chmod = shm_chmod,
131         .fo_chown = shm_chown,
132         .fo_flags = DFLAG_PASSABLE
133 };
134
135 FEATURE(posix_shm, "POSIX shared memory");
136
137 static int
138 shm_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
139     int flags, struct thread *td)
140 {
141
142         return (EOPNOTSUPP);
143 }
144
145 static int
146 shm_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
147     int flags, struct thread *td)
148 {
149
150         return (EOPNOTSUPP);
151 }
152
153 static int
154 shm_truncate(struct file *fp, off_t length, struct ucred *active_cred,
155     struct thread *td)
156 {
157         struct shmfd *shmfd;
158 #ifdef MAC
159         int error;
160 #endif
161
162         shmfd = fp->f_data;
163 #ifdef MAC
164         error = mac_posixshm_check_truncate(active_cred, fp->f_cred, shmfd);
165         if (error)
166                 return (error);
167 #endif
168         return (shm_dotruncate(shmfd, length));
169 }
170
171 static int
172 shm_ioctl(struct file *fp, u_long com, void *data,
173     struct ucred *active_cred, struct thread *td)
174 {
175
176         return (EOPNOTSUPP);
177 }
178
179 static int
180 shm_poll(struct file *fp, int events, struct ucred *active_cred,
181     struct thread *td)
182 {
183
184         return (EOPNOTSUPP);
185 }
186
187 static int
188 shm_kqfilter(struct file *fp, struct knote *kn)
189 {
190
191         return (EOPNOTSUPP);
192 }
193
194 static int
195 shm_stat(struct file *fp, struct stat *sb, struct ucred *active_cred,
196     struct thread *td)
197 {
198         struct shmfd *shmfd;
199 #ifdef MAC
200         int error;
201 #endif
202
203         shmfd = fp->f_data;
204
205 #ifdef MAC
206         error = mac_posixshm_check_stat(active_cred, fp->f_cred, shmfd);
207         if (error)
208                 return (error);
209 #endif
210         
211         /*
212          * Attempt to return sanish values for fstat() on a memory file
213          * descriptor.
214          */
215         bzero(sb, sizeof(*sb));
216         sb->st_blksize = PAGE_SIZE;
217         sb->st_size = shmfd->shm_size;
218         sb->st_blocks = (sb->st_size + sb->st_blksize - 1) / sb->st_blksize;
219         mtx_lock(&shm_timestamp_lock);
220         sb->st_atim = shmfd->shm_atime;
221         sb->st_ctim = shmfd->shm_ctime;
222         sb->st_mtim = shmfd->shm_mtime;
223         sb->st_birthtim = shmfd->shm_birthtime;
224         sb->st_mode = S_IFREG | shmfd->shm_mode;                /* XXX */
225         sb->st_uid = shmfd->shm_uid;
226         sb->st_gid = shmfd->shm_gid;
227         mtx_unlock(&shm_timestamp_lock);
228
229         return (0);
230 }
231
232 static int
233 shm_close(struct file *fp, struct thread *td)
234 {
235         struct shmfd *shmfd;
236
237         shmfd = fp->f_data;
238         fp->f_data = NULL;
239         shm_drop(shmfd);
240
241         return (0);
242 }
243
244 static int
245 shm_dotruncate(struct shmfd *shmfd, off_t length)
246 {
247         vm_object_t object;
248         vm_page_t m, ma[1];
249         vm_pindex_t idx, nobjsize;
250         vm_ooffset_t delta;
251         int base, rv;
252
253         object = shmfd->shm_object;
254         VM_OBJECT_LOCK(object);
255         if (length == shmfd->shm_size) {
256                 VM_OBJECT_UNLOCK(object);
257                 return (0);
258         }
259         nobjsize = OFF_TO_IDX(length + PAGE_MASK);
260
261         /* Are we shrinking?  If so, trim the end. */
262         if (length < shmfd->shm_size) {
263
264                 /*
265                  * Zero the truncated part of the last page.
266                  */
267                 base = length & PAGE_MASK;
268                 if (base != 0) {
269                         idx = OFF_TO_IDX(length);
270 retry:
271                         m = vm_page_lookup(object, idx);
272                         if (m != NULL) {
273                                 if ((m->oflags & VPO_BUSY) != 0 ||
274                                     m->busy != 0) {
275                                         vm_page_sleep(m, "shmtrc");
276                                         goto retry;
277                                 }
278                         } else if (vm_pager_has_page(object, idx, NULL, NULL)) {
279                                 m = vm_page_alloc(object, idx, VM_ALLOC_NORMAL);
280                                 if (m == NULL) {
281                                         VM_OBJECT_UNLOCK(object);
282                                         VM_WAIT;
283                                         VM_OBJECT_LOCK(object);
284                                         goto retry;
285                                 } else if (m->valid != VM_PAGE_BITS_ALL) {
286                                         ma[0] = m;
287                                         rv = vm_pager_get_pages(object, ma, 1,
288                                             0);
289                                         m = vm_page_lookup(object, idx);
290                                 } else
291                                         /* A cached page was reactivated. */
292                                         rv = VM_PAGER_OK;
293                                 vm_page_lock(m);
294                                 if (rv == VM_PAGER_OK) {
295                                         vm_page_deactivate(m);
296                                         vm_page_unlock(m);
297                                         vm_page_wakeup(m);
298                                 } else {
299                                         vm_page_free(m);
300                                         vm_page_unlock(m);
301                                         VM_OBJECT_UNLOCK(object);
302                                         return (EIO);
303                                 }
304                         }
305                         if (m != NULL) {
306                                 pmap_zero_page_area(m, base, PAGE_SIZE - base);
307                                 KASSERT(m->valid == VM_PAGE_BITS_ALL,
308                                     ("shm_dotruncate: page %p is invalid", m));
309                                 vm_page_dirty(m);
310                                 vm_pager_page_unswapped(m);
311                         }
312                 }
313                 delta = ptoa(object->size - nobjsize);
314
315                 /* Toss in memory pages. */
316                 if (nobjsize < object->size)
317                         vm_object_page_remove(object, nobjsize, object->size,
318                             0);
319
320                 /* Toss pages from swap. */
321                 if (object->type == OBJT_SWAP)
322                         swap_pager_freespace(object, nobjsize, delta);
323
324                 /* Free the swap accounted for shm */
325                 swap_release_by_cred(delta, object->cred);
326                 object->charge -= delta;
327         } else {
328                 /* Attempt to reserve the swap */
329                 delta = ptoa(nobjsize - object->size);
330                 if (!swap_reserve_by_cred(delta, object->cred)) {
331                         VM_OBJECT_UNLOCK(object);
332                         return (ENOMEM);
333                 }
334                 object->charge += delta;
335         }
336         shmfd->shm_size = length;
337         mtx_lock(&shm_timestamp_lock);
338         vfs_timestamp(&shmfd->shm_ctime);
339         shmfd->shm_mtime = shmfd->shm_ctime;
340         mtx_unlock(&shm_timestamp_lock);
341         object->size = nobjsize;
342         VM_OBJECT_UNLOCK(object);
343         return (0);
344 }
345
346 /*
347  * shmfd object management including creation and reference counting
348  * routines.
349  */
350 static struct shmfd *
351 shm_alloc(struct ucred *ucred, mode_t mode)
352 {
353         struct shmfd *shmfd;
354
355         shmfd = malloc(sizeof(*shmfd), M_SHMFD, M_WAITOK | M_ZERO);
356         shmfd->shm_size = 0;
357         shmfd->shm_uid = ucred->cr_uid;
358         shmfd->shm_gid = ucred->cr_gid;
359         shmfd->shm_mode = mode;
360         shmfd->shm_object = vm_pager_allocate(OBJT_DEFAULT, NULL,
361             shmfd->shm_size, VM_PROT_DEFAULT, 0, ucred);
362         KASSERT(shmfd->shm_object != NULL, ("shm_create: vm_pager_allocate"));
363         VM_OBJECT_LOCK(shmfd->shm_object);
364         vm_object_clear_flag(shmfd->shm_object, OBJ_ONEMAPPING);
365         vm_object_set_flag(shmfd->shm_object, OBJ_NOSPLIT);
366         VM_OBJECT_UNLOCK(shmfd->shm_object);
367         vfs_timestamp(&shmfd->shm_birthtime);
368         shmfd->shm_atime = shmfd->shm_mtime = shmfd->shm_ctime =
369             shmfd->shm_birthtime;
370         refcount_init(&shmfd->shm_refs, 1);
371 #ifdef MAC
372         mac_posixshm_init(shmfd);
373         mac_posixshm_create(ucred, shmfd);
374 #endif
375
376         return (shmfd);
377 }
378
379 static struct shmfd *
380 shm_hold(struct shmfd *shmfd)
381 {
382
383         refcount_acquire(&shmfd->shm_refs);
384         return (shmfd);
385 }
386
387 static void
388 shm_drop(struct shmfd *shmfd)
389 {
390
391         if (refcount_release(&shmfd->shm_refs)) {
392 #ifdef MAC
393                 mac_posixshm_destroy(shmfd);
394 #endif
395                 vm_object_deallocate(shmfd->shm_object);
396                 free(shmfd, M_SHMFD);
397         }
398 }
399
400 /*
401  * Determine if the credentials have sufficient permissions for a
402  * specified combination of FREAD and FWRITE.
403  */
404 static int
405 shm_access(struct shmfd *shmfd, struct ucred *ucred, int flags)
406 {
407         accmode_t accmode;
408         int error;
409
410         accmode = 0;
411         if (flags & FREAD)
412                 accmode |= VREAD;
413         if (flags & FWRITE)
414                 accmode |= VWRITE;
415         mtx_lock(&shm_timestamp_lock);
416         error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid,
417             accmode, ucred, NULL);
418         mtx_unlock(&shm_timestamp_lock);
419         return (error);
420 }
421
422 /*
423  * Dictionary management.  We maintain an in-kernel dictionary to map
424  * paths to shmfd objects.  We use the FNV hash on the path to store
425  * the mappings in a hash table.
426  */
427 static void
428 shm_dict_init(void *arg)
429 {
430
431         mtx_init(&shm_timestamp_lock, "shm timestamps", NULL, MTX_DEF);
432         sx_init(&shm_dict_lock, "shm dictionary");
433         shm_dictionary = hashinit(1024, M_SHMFD, &shm_hash);
434 }
435 SYSINIT(shm_dict_init, SI_SUB_SYSV_SHM, SI_ORDER_ANY, shm_dict_init, NULL);
436
437 static struct shmfd *
438 shm_lookup(char *path, Fnv32_t fnv)
439 {
440         struct shm_mapping *map;
441
442         LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
443                 if (map->sm_fnv != fnv)
444                         continue;
445                 if (strcmp(map->sm_path, path) == 0)
446                         return (map->sm_shmfd);
447         }
448
449         return (NULL);
450 }
451
452 static void
453 shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd)
454 {
455         struct shm_mapping *map;
456
457         map = malloc(sizeof(struct shm_mapping), M_SHMFD, M_WAITOK);
458         map->sm_path = path;
459         map->sm_fnv = fnv;
460         map->sm_shmfd = shm_hold(shmfd);
461         LIST_INSERT_HEAD(SHM_HASH(fnv), map, sm_link);
462 }
463
464 static int
465 shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred)
466 {
467         struct shm_mapping *map;
468         int error;
469
470         LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
471                 if (map->sm_fnv != fnv)
472                         continue;
473                 if (strcmp(map->sm_path, path) == 0) {
474 #ifdef MAC
475                         error = mac_posixshm_check_unlink(ucred, map->sm_shmfd);
476                         if (error)
477                                 return (error);
478 #endif
479                         error = shm_access(map->sm_shmfd, ucred,
480                             FREAD | FWRITE);
481                         if (error)
482                                 return (error);
483                         LIST_REMOVE(map, sm_link);
484                         shm_drop(map->sm_shmfd);
485                         free(map->sm_path, M_SHMFD);
486                         free(map, M_SHMFD);
487                         return (0);
488                 }
489         }
490
491         return (ENOENT);
492 }
493
494 /* System calls. */
495 int
496 sys_shm_open(struct thread *td, struct shm_open_args *uap)
497 {
498         struct filedesc *fdp;
499         struct shmfd *shmfd;
500         struct file *fp;
501         char *path;
502         Fnv32_t fnv;
503         mode_t cmode;
504         int fd, error;
505
506 #ifdef CAPABILITY_MODE
507         /*
508          * shm_open(2) is only allowed for anonymous objects.
509          */
510         if (IN_CAPABILITY_MODE(td) && (uap->path != SHM_ANON))
511                 return (ECAPMODE);
512 #endif
513
514         if ((uap->flags & O_ACCMODE) != O_RDONLY &&
515             (uap->flags & O_ACCMODE) != O_RDWR)
516                 return (EINVAL);
517
518         if ((uap->flags & ~(O_ACCMODE | O_CREAT | O_EXCL | O_TRUNC)) != 0)
519                 return (EINVAL);
520
521         fdp = td->td_proc->p_fd;
522         cmode = (uap->mode & ~fdp->fd_cmask) & ACCESSPERMS;
523
524         error = falloc(td, &fp, &fd, 0);
525         if (error)
526                 return (error);
527
528         /* A SHM_ANON path pointer creates an anonymous object. */
529         if (uap->path == SHM_ANON) {
530                 /* A read-only anonymous object is pointless. */
531                 if ((uap->flags & O_ACCMODE) == O_RDONLY) {
532                         fdclose(fdp, fp, fd, td);
533                         fdrop(fp, td);
534                         return (EINVAL);
535                 }
536                 shmfd = shm_alloc(td->td_ucred, cmode);
537         } else {
538                 path = malloc(MAXPATHLEN, M_SHMFD, M_WAITOK);
539                 error = copyinstr(uap->path, path, MAXPATHLEN, NULL);
540
541                 /* Require paths to start with a '/' character. */
542                 if (error == 0 && path[0] != '/')
543                         error = EINVAL;
544                 if (error) {
545                         fdclose(fdp, fp, fd, td);
546                         fdrop(fp, td);
547                         free(path, M_SHMFD);
548                         return (error);
549                 }
550
551                 fnv = fnv_32_str(path, FNV1_32_INIT);
552                 sx_xlock(&shm_dict_lock);
553                 shmfd = shm_lookup(path, fnv);
554                 if (shmfd == NULL) {
555                         /* Object does not yet exist, create it if requested. */
556                         if (uap->flags & O_CREAT) {
557 #ifdef MAC
558                                 error = mac_posixshm_check_create(td->td_ucred,
559                                     path);
560                                 if (error == 0) {
561 #endif
562                                         shmfd = shm_alloc(td->td_ucred, cmode);
563                                         shm_insert(path, fnv, shmfd);
564 #ifdef MAC
565                                 }
566 #endif
567                         } else {
568                                 free(path, M_SHMFD);
569                                 error = ENOENT;
570                         }
571                 } else {
572                         /*
573                          * Object already exists, obtain a new
574                          * reference if requested and permitted.
575                          */
576                         free(path, M_SHMFD);
577                         if ((uap->flags & (O_CREAT | O_EXCL)) ==
578                             (O_CREAT | O_EXCL))
579                                 error = EEXIST;
580                         else {
581 #ifdef MAC
582                                 error = mac_posixshm_check_open(td->td_ucred,
583                                     shmfd, FFLAGS(uap->flags & O_ACCMODE));
584                                 if (error == 0)
585 #endif
586                                 error = shm_access(shmfd, td->td_ucred,
587                                     FFLAGS(uap->flags & O_ACCMODE));
588                         }
589
590                         /*
591                          * Truncate the file back to zero length if
592                          * O_TRUNC was specified and the object was
593                          * opened with read/write.
594                          */
595                         if (error == 0 &&
596                             (uap->flags & (O_ACCMODE | O_TRUNC)) ==
597                             (O_RDWR | O_TRUNC)) {
598 #ifdef MAC
599                                 error = mac_posixshm_check_truncate(
600                                         td->td_ucred, fp->f_cred, shmfd);
601                                 if (error == 0)
602 #endif
603                                         shm_dotruncate(shmfd, 0);
604                         }
605                         if (error == 0)
606                                 shm_hold(shmfd);
607                 }
608                 sx_xunlock(&shm_dict_lock);
609
610                 if (error) {
611                         fdclose(fdp, fp, fd, td);
612                         fdrop(fp, td);
613                         return (error);
614                 }
615         }
616
617         finit(fp, FFLAGS(uap->flags & O_ACCMODE), DTYPE_SHM, shmfd, &shm_ops);
618
619         FILEDESC_XLOCK(fdp);
620         if (fdp->fd_ofiles[fd] == fp)
621                 fdp->fd_ofileflags[fd] |= UF_EXCLOSE;
622         FILEDESC_XUNLOCK(fdp);
623         td->td_retval[0] = fd;
624         fdrop(fp, td);
625
626         return (0);
627 }
628
629 int
630 sys_shm_unlink(struct thread *td, struct shm_unlink_args *uap)
631 {
632         char *path;
633         Fnv32_t fnv;
634         int error;
635
636         path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
637         error = copyinstr(uap->path, path, MAXPATHLEN, NULL);
638         if (error) {
639                 free(path, M_TEMP);
640                 return (error);
641         }
642
643         fnv = fnv_32_str(path, FNV1_32_INIT);
644         sx_xlock(&shm_dict_lock);
645         error = shm_remove(path, fnv, td->td_ucred);
646         sx_xunlock(&shm_dict_lock);
647         free(path, M_TEMP);
648
649         return (error);
650 }
651
652 /*
653  * mmap() helper to validate mmap() requests against shm object state
654  * and give mmap() the vm_object to use for the mapping.
655  */
656 int
657 shm_mmap(struct shmfd *shmfd, vm_size_t objsize, vm_ooffset_t foff,
658     vm_object_t *obj)
659 {
660
661         /*
662          * XXXRW: This validation is probably insufficient, and subject to
663          * sign errors.  It should be fixed.
664          */
665         if (foff >= shmfd->shm_size ||
666             foff + objsize > round_page(shmfd->shm_size))
667                 return (EINVAL);
668
669         mtx_lock(&shm_timestamp_lock);
670         vfs_timestamp(&shmfd->shm_atime);
671         mtx_unlock(&shm_timestamp_lock);
672         vm_object_reference(shmfd->shm_object);
673         *obj = shmfd->shm_object;
674         return (0);
675 }
676
677 static int
678 shm_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
679     struct thread *td)
680 {
681         struct shmfd *shmfd;
682         int error;
683
684         error = 0;
685         shmfd = fp->f_data;
686         mtx_lock(&shm_timestamp_lock);
687         /*
688          * SUSv4 says that x bits of permission need not be affected.
689          * Be consistent with our shm_open there.
690          */
691 #ifdef MAC
692         error = mac_posixshm_check_setmode(active_cred, shmfd, mode);
693         if (error != 0)
694                 goto out;
695 #endif
696         error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid,
697             shmfd->shm_gid, VADMIN, active_cred, NULL);
698         if (error != 0)
699                 goto out;
700         shmfd->shm_mode = mode & ACCESSPERMS;
701 out:
702         mtx_unlock(&shm_timestamp_lock);
703         return (error);
704 }
705
706 static int
707 shm_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
708     struct thread *td)
709 {
710         struct shmfd *shmfd;
711         int error;
712
713         error = 0;
714         shmfd = fp->f_data;
715         mtx_lock(&shm_timestamp_lock);
716 #ifdef MAC
717         error = mac_posixshm_check_setowner(active_cred, shmfd, uid, gid);
718         if (error != 0)
719                 goto out;
720 #endif
721         if (uid == (uid_t)-1)
722                 uid = shmfd->shm_uid;
723         if (gid == (gid_t)-1)
724                  gid = shmfd->shm_gid;
725         if (((uid != shmfd->shm_uid && uid != active_cred->cr_uid) ||
726             (gid != shmfd->shm_gid && !groupmember(gid, active_cred))) &&
727             (error = priv_check_cred(active_cred, PRIV_VFS_CHOWN, 0)))
728                 goto out;
729         shmfd->shm_uid = uid;
730         shmfd->shm_gid = gid;
731 out:
732         mtx_unlock(&shm_timestamp_lock);
733         return (error);
734 }