2 * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * POSIX message queue implementation.
31 * 1) A mqueue filesystem can be mounted, each message queue appears
32 * in mounted directory, user can change queue's permission and
33 * ownership, or remove a queue. Manually creating a file in the
34 * directory causes a message queue to be created in the kernel with
35 * default message queue attributes applied and same name used, this
36 * method is not advocated since mq_open syscall allows user to specify
37 * different attributes. Also the file system can be mounted multiple
38 * times at different mount points but shows same contents.
40 * 2) Standard POSIX message queue API. The syscalls do not use vfs layer,
41 * but directly operate on internal data structure, this allows user to
42 * use the IPC facility without having to mount mqueue file system.
45 #include <sys/cdefs.h>
46 __FBSDID("$FreeBSD$");
48 #include "opt_capsicum.h"
49 #include "opt_compat.h"
51 #include <sys/param.h>
52 #include <sys/kernel.h>
53 #include <sys/systm.h>
54 #include <sys/limits.h>
55 #include <sys/malloc.h>
57 #include <sys/capsicum.h>
58 #include <sys/dirent.h>
59 #include <sys/event.h>
60 #include <sys/eventhandler.h>
61 #include <sys/fcntl.h>
63 #include <sys/filedesc.h>
66 #include <sys/module.h>
67 #include <sys/mount.h>
68 #include <sys/mqueue.h>
69 #include <sys/mutex.h>
70 #include <sys/namei.h>
71 #include <sys/posix4.h>
75 #include <sys/queue.h>
76 #include <sys/sysproto.h>
78 #include <sys/syscall.h>
79 #include <sys/syscallsubr.h>
80 #include <sys/sysent.h>
82 #include <sys/sysctl.h>
83 #include <sys/taskqueue.h>
84 #include <sys/unistd.h>
85 #include <sys/vnode.h>
86 #include <machine/atomic.h>
88 FEATURE(p1003_1b_mqueue, "POSIX P1003.1B message queues support");
91 * Limits and constants
93 #define MQFS_NAMELEN NAME_MAX
94 #define MQFS_DELEN (8 + MQFS_NAMELEN)
110 * mqfs_info: describes a mqfs instance
114 struct mqfs_node *mi_root;
115 struct unrhdr *mi_unrhdr;
119 LIST_ENTRY(mqfs_vdata) mv_link;
120 struct mqfs_node *mv_node;
121 struct vnode *mv_vnode;
126 * mqfs_node: describes a node (file or directory) within a mqfs
129 char mn_name[MQFS_NAMELEN+1];
130 struct mqfs_info *mn_info;
131 struct mqfs_node *mn_parent;
132 LIST_HEAD(,mqfs_node) mn_children;
133 LIST_ENTRY(mqfs_node) mn_sibling;
134 LIST_HEAD(,mqfs_vdata) mn_vnodes;
135 const void *mn_pr_root;
141 struct timespec mn_birth;
142 struct timespec mn_ctime;
143 struct timespec mn_atime;
144 struct timespec mn_mtime;
150 #define VTON(vp) (((struct mqfs_vdata *)((vp)->v_data))->mv_node)
151 #define VTOMQ(vp) ((struct mqueue *)(VTON(vp)->mn_data))
152 #define VFSTOMQFS(m) ((struct mqfs_info *)((m)->mnt_data))
153 #define FPTOMQ(fp) ((struct mqueue *)(((struct mqfs_node *) \
154 (fp)->f_data)->mn_data))
158 const void *mo_pr_root;
161 TAILQ_HEAD(msgq, mqueue_msg);
165 struct mqueue_notifier {
166 LIST_ENTRY(mqueue_notifier) nt_link;
167 struct sigevent nt_sigev;
169 struct proc *nt_proc;
182 struct selinfo mq_rsel;
183 struct selinfo mq_wsel;
184 struct mqueue_notifier *mq_notifier;
191 TAILQ_ENTRY(mqueue_msg) msg_link;
192 unsigned int msg_prio;
193 unsigned int msg_size;
194 /* following real data... */
197 static SYSCTL_NODE(_kern, OID_AUTO, mqueue, CTLFLAG_RW, 0,
198 "POSIX real time message queue");
200 static int default_maxmsg = 10;
201 static int default_msgsize = 1024;
203 static int maxmsg = 100;
204 SYSCTL_INT(_kern_mqueue, OID_AUTO, maxmsg, CTLFLAG_RW,
205 &maxmsg, 0, "Default maximum messages in queue");
206 static int maxmsgsize = 16384;
207 SYSCTL_INT(_kern_mqueue, OID_AUTO, maxmsgsize, CTLFLAG_RW,
208 &maxmsgsize, 0, "Default maximum message size");
209 static int maxmq = 100;
210 SYSCTL_INT(_kern_mqueue, OID_AUTO, maxmq, CTLFLAG_RW,
211 &maxmq, 0, "maximum message queues");
212 static int curmq = 0;
213 SYSCTL_INT(_kern_mqueue, OID_AUTO, curmq, CTLFLAG_RW,
214 &curmq, 0, "current message queue number");
215 static int unloadable = 0;
216 static MALLOC_DEFINE(M_MQUEUEDATA, "mqdata", "mqueue data");
218 static eventhandler_tag exit_tag;
220 /* Only one instance per-system */
221 static struct mqfs_info mqfs_data;
222 static uma_zone_t mqnode_zone;
223 static uma_zone_t mqueue_zone;
224 static uma_zone_t mvdata_zone;
225 static uma_zone_t mqnoti_zone;
226 static struct vop_vector mqfs_vnodeops;
227 static struct fileops mqueueops;
228 static unsigned mqfs_osd_jail_slot;
231 * Directory structure construction and manipulation
234 static struct mqfs_node *mqfs_create_dir(struct mqfs_node *parent,
235 const char *name, int namelen, struct ucred *cred, int mode);
236 static struct mqfs_node *mqfs_create_link(struct mqfs_node *parent,
237 const char *name, int namelen, struct ucred *cred, int mode);
240 static struct mqfs_node *mqfs_create_file(struct mqfs_node *parent,
241 const char *name, int namelen, struct ucred *cred, int mode);
242 static int mqfs_destroy(struct mqfs_node *mn);
243 static void mqfs_fileno_alloc(struct mqfs_info *mi, struct mqfs_node *mn);
244 static void mqfs_fileno_free(struct mqfs_info *mi, struct mqfs_node *mn);
245 static int mqfs_allocv(struct mount *mp, struct vnode **vpp, struct mqfs_node *pn);
246 static int mqfs_prison_create(void *obj, void *data);
247 static void mqfs_prison_destructor(void *data);
248 static void mqfs_prison_remove_task(void *context, int pending);
251 * Message queue construction and maniplation
253 static struct mqueue *mqueue_alloc(const struct mq_attr *attr);
254 static void mqueue_free(struct mqueue *mq);
255 static int mqueue_send(struct mqueue *mq, const char *msg_ptr,
256 size_t msg_len, unsigned msg_prio, int waitok,
257 const struct timespec *abs_timeout);
258 static int mqueue_receive(struct mqueue *mq, char *msg_ptr,
259 size_t msg_len, unsigned *msg_prio, int waitok,
260 const struct timespec *abs_timeout);
261 static int _mqueue_send(struct mqueue *mq, struct mqueue_msg *msg,
263 static int _mqueue_recv(struct mqueue *mq, struct mqueue_msg **msg,
265 static void mqueue_send_notification(struct mqueue *mq);
266 static void mqueue_fdclose(struct thread *td, int fd, struct file *fp);
267 static void mq_proc_exit(void *arg, struct proc *p);
272 static void filt_mqdetach(struct knote *kn);
273 static int filt_mqread(struct knote *kn, long hint);
274 static int filt_mqwrite(struct knote *kn, long hint);
276 struct filterops mq_rfiltops = {
278 .f_detach = filt_mqdetach,
279 .f_event = filt_mqread,
281 struct filterops mq_wfiltops = {
283 .f_detach = filt_mqdetach,
284 .f_event = filt_mqwrite,
288 * Initialize fileno bitmap
291 mqfs_fileno_init(struct mqfs_info *mi)
295 up = new_unrhdr(1, INT_MAX, NULL);
300 * Tear down fileno bitmap
303 mqfs_fileno_uninit(struct mqfs_info *mi)
308 mi->mi_unrhdr = NULL;
313 * Allocate a file number
316 mqfs_fileno_alloc(struct mqfs_info *mi, struct mqfs_node *mn)
318 /* make sure our parent has a file number */
319 if (mn->mn_parent && !mn->mn_parent->mn_fileno)
320 mqfs_fileno_alloc(mi, mn->mn_parent);
322 switch (mn->mn_type) {
326 case mqfstype_symlink:
327 mn->mn_fileno = alloc_unr(mi->mi_unrhdr);
330 KASSERT(mn->mn_parent != NULL,
331 ("mqfstype_this node has no parent"));
332 mn->mn_fileno = mn->mn_parent->mn_fileno;
334 case mqfstype_parent:
335 KASSERT(mn->mn_parent != NULL,
336 ("mqfstype_parent node has no parent"));
337 if (mn->mn_parent == mi->mi_root) {
338 mn->mn_fileno = mn->mn_parent->mn_fileno;
341 KASSERT(mn->mn_parent->mn_parent != NULL,
342 ("mqfstype_parent node has no grandparent"));
343 mn->mn_fileno = mn->mn_parent->mn_parent->mn_fileno;
347 ("mqfs_fileno_alloc() called for unknown type node: %d",
354 * Release a file number
357 mqfs_fileno_free(struct mqfs_info *mi, struct mqfs_node *mn)
359 switch (mn->mn_type) {
363 case mqfstype_symlink:
364 free_unr(mi->mi_unrhdr, mn->mn_fileno);
367 case mqfstype_parent:
368 /* ignore these, as they don't "own" their file number */
372 ("mqfs_fileno_free() called for unknown type node: %d",
378 static __inline struct mqfs_node *
381 return uma_zalloc(mqnode_zone, M_WAITOK | M_ZERO);
385 mqnode_free(struct mqfs_node *node)
387 uma_zfree(mqnode_zone, node);
391 mqnode_addref(struct mqfs_node *node)
393 atomic_fetchadd_int(&node->mn_refcount, 1);
397 mqnode_release(struct mqfs_node *node)
399 struct mqfs_info *mqfs;
402 mqfs = node->mn_info;
403 old = atomic_fetchadd_int(&node->mn_refcount, -1);
404 if (node->mn_type == mqfstype_dir ||
405 node->mn_type == mqfstype_root)
406 exp = 3; /* include . and .. */
410 int locked = sx_xlocked(&mqfs->mi_lock);
412 sx_xlock(&mqfs->mi_lock);
415 sx_xunlock(&mqfs->mi_lock);
420 * Add a node to a directory
423 mqfs_add_node(struct mqfs_node *parent, struct mqfs_node *node)
425 KASSERT(parent != NULL, ("%s(): parent is NULL", __func__));
426 KASSERT(parent->mn_info != NULL,
427 ("%s(): parent has no mn_info", __func__));
428 KASSERT(parent->mn_type == mqfstype_dir ||
429 parent->mn_type == mqfstype_root,
430 ("%s(): parent is not a directory", __func__));
432 node->mn_info = parent->mn_info;
433 node->mn_parent = parent;
434 LIST_INIT(&node->mn_children);
435 LIST_INIT(&node->mn_vnodes);
436 LIST_INSERT_HEAD(&parent->mn_children, node, mn_sibling);
437 mqnode_addref(parent);
441 static struct mqfs_node *
442 mqfs_create_node(const char *name, int namelen, struct ucred *cred, int mode,
445 struct mqfs_node *node;
447 node = mqnode_alloc();
448 strncpy(node->mn_name, name, namelen);
449 node->mn_pr_root = cred->cr_prison->pr_root;
450 node->mn_type = nodetype;
451 node->mn_refcount = 1;
452 vfs_timestamp(&node->mn_birth);
453 node->mn_ctime = node->mn_atime = node->mn_mtime
455 node->mn_uid = cred->cr_uid;
456 node->mn_gid = cred->cr_gid;
457 node->mn_mode = mode;
464 static struct mqfs_node *
465 mqfs_create_file(struct mqfs_node *parent, const char *name, int namelen,
466 struct ucred *cred, int mode)
468 struct mqfs_node *node;
470 node = mqfs_create_node(name, namelen, cred, mode, mqfstype_file);
471 if (mqfs_add_node(parent, node) != 0) {
479 * Add . and .. to a directory
482 mqfs_fixup_dir(struct mqfs_node *parent)
484 struct mqfs_node *dir;
486 dir = mqnode_alloc();
487 dir->mn_name[0] = '.';
488 dir->mn_type = mqfstype_this;
489 dir->mn_refcount = 1;
490 if (mqfs_add_node(parent, dir) != 0) {
495 dir = mqnode_alloc();
496 dir->mn_name[0] = dir->mn_name[1] = '.';
497 dir->mn_type = mqfstype_parent;
498 dir->mn_refcount = 1;
500 if (mqfs_add_node(parent, dir) != 0) {
513 static struct mqfs_node *
514 mqfs_create_dir(struct mqfs_node *parent, const char *name, int namelen,
515 struct ucred *cred, int mode)
517 struct mqfs_node *node;
519 node = mqfs_create_node(name, namelen, cred, mode, mqfstype_dir);
520 if (mqfs_add_node(parent, node) != 0) {
525 if (mqfs_fixup_dir(node) != 0) {
535 static struct mqfs_node *
536 mqfs_create_link(struct mqfs_node *parent, const char *name, int namelen,
537 struct ucred *cred, int mode)
539 struct mqfs_node *node;
541 node = mqfs_create_node(name, namelen, cred, mode, mqfstype_symlink);
542 if (mqfs_add_node(parent, node) != 0) {
552 * Destroy a node or a tree of nodes
555 mqfs_destroy(struct mqfs_node *node)
557 struct mqfs_node *parent;
559 KASSERT(node != NULL,
560 ("%s(): node is NULL", __func__));
561 KASSERT(node->mn_info != NULL,
562 ("%s(): node has no mn_info", __func__));
564 /* destroy children */
565 if (node->mn_type == mqfstype_dir || node->mn_type == mqfstype_root)
566 while (! LIST_EMPTY(&node->mn_children))
567 mqfs_destroy(LIST_FIRST(&node->mn_children));
569 /* unlink from parent */
570 if ((parent = node->mn_parent) != NULL) {
571 KASSERT(parent->mn_info == node->mn_info,
572 ("%s(): parent has different mn_info", __func__));
573 LIST_REMOVE(node, mn_sibling);
576 if (node->mn_fileno != 0)
577 mqfs_fileno_free(node->mn_info, node);
578 if (node->mn_data != NULL)
579 mqueue_free(node->mn_data);
585 * Mount a mqfs instance
588 mqfs_mount(struct mount *mp)
592 if (mp->mnt_flag & MNT_UPDATE)
595 mp->mnt_data = &mqfs_data;
597 mp->mnt_flag |= MNT_LOCAL;
602 vfs_mountedfrom(mp, "mqueue");
603 sbp->f_bsize = PAGE_SIZE;
604 sbp->f_iosize = PAGE_SIZE;
614 * Unmount a mqfs instance
617 mqfs_unmount(struct mount *mp, int mntflags)
621 error = vflush(mp, 0, (mntflags & MNT_FORCE) ? FORCECLOSE : 0,
627 * Return a root vnode
630 mqfs_root(struct mount *mp, int flags, struct vnode **vpp)
632 struct mqfs_info *mqfs;
635 mqfs = VFSTOMQFS(mp);
636 ret = mqfs_allocv(mp, vpp, mqfs->mi_root);
641 * Return filesystem stats
644 mqfs_statfs(struct mount *mp, struct statfs *sbp)
646 /* XXX update statistics */
651 * Initialize a mqfs instance
654 mqfs_init(struct vfsconf *vfc)
656 struct mqfs_node *root;
657 struct mqfs_info *mi;
659 osd_method_t methods[PR_MAXMETHOD] = {
660 [PR_METHOD_CREATE] = mqfs_prison_create,
663 mqnode_zone = uma_zcreate("mqnode", sizeof(struct mqfs_node),
664 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
665 mqueue_zone = uma_zcreate("mqueue", sizeof(struct mqueue),
666 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
667 mvdata_zone = uma_zcreate("mvdata",
668 sizeof(struct mqfs_vdata), NULL, NULL, NULL,
669 NULL, UMA_ALIGN_PTR, 0);
670 mqnoti_zone = uma_zcreate("mqnotifier", sizeof(struct mqueue_notifier),
671 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
673 sx_init(&mi->mi_lock, "mqfs lock");
674 /* set up the root diretory */
675 root = mqfs_create_node("/", 1, curthread->td_ucred, 01777,
678 LIST_INIT(&root->mn_children);
679 LIST_INIT(&root->mn_vnodes);
681 mqfs_fileno_init(mi);
682 mqfs_fileno_alloc(mi, root);
683 mqfs_fixup_dir(root);
684 exit_tag = EVENTHANDLER_REGISTER(process_exit, mq_proc_exit, NULL,
685 EVENTHANDLER_PRI_ANY);
686 mq_fdclose = mqueue_fdclose;
687 p31b_setcfg(CTL_P1003_1B_MESSAGE_PASSING, _POSIX_MESSAGE_PASSING);
689 /* Note current jails. */
690 mqfs_osd_jail_slot = osd_jail_register(mqfs_prison_destructor, methods);
691 sx_slock(&allprison_lock);
692 TAILQ_FOREACH(pr, &allprison, pr_list)
693 (void)mqfs_prison_create(pr, NULL);
694 sx_sunlock(&allprison_lock);
699 * Destroy a mqfs instance
702 mqfs_uninit(struct vfsconf *vfc)
705 struct mqfs_info *mi;
709 slot = mqfs_osd_jail_slot;
710 mqfs_osd_jail_slot = 0;
711 osd_jail_deregister(slot);
712 EVENTHANDLER_DEREGISTER(process_exit, exit_tag);
714 mqfs_destroy(mi->mi_root);
716 mqfs_fileno_uninit(mi);
717 sx_destroy(&mi->mi_lock);
718 uma_zdestroy(mqnode_zone);
719 uma_zdestroy(mqueue_zone);
720 uma_zdestroy(mvdata_zone);
721 uma_zdestroy(mqnoti_zone);
729 do_recycle(void *context, int pending __unused)
731 struct vnode *vp = (struct vnode *)context;
741 mqfs_allocv(struct mount *mp, struct vnode **vpp, struct mqfs_node *pn)
743 struct mqfs_vdata *vd;
744 struct mqfs_info *mqfs;
745 struct vnode *newvpp;
750 sx_xlock(&mqfs->mi_lock);
751 LIST_FOREACH(vd, &pn->mn_vnodes, mv_link) {
752 if (vd->mv_vnode->v_mount == mp) {
761 sx_xunlock(&mqfs->mi_lock);
762 error = vget(*vpp, LK_RETRY | LK_EXCLUSIVE, curthread);
766 sx_xunlock(&mqfs->mi_lock);
768 error = getnewvnode("mqueue", mp, &mqfs_vnodeops, &newvpp);
771 vn_lock(newvpp, LK_EXCLUSIVE | LK_RETRY);
772 error = insmntque(newvpp, mp);
776 sx_xlock(&mqfs->mi_lock);
778 * Check if it has already been allocated
779 * while we were blocked.
781 LIST_FOREACH(vd, &pn->mn_vnodes, mv_link) {
782 if (vd->mv_vnode->v_mount == mp) {
784 sx_xunlock(&mqfs->mi_lock);
794 vd = uma_zalloc(mvdata_zone, M_WAITOK);
798 TASK_INIT(&vd->mv_task, 0, do_recycle, *vpp);
799 LIST_INSERT_HEAD(&pn->mn_vnodes, vd, mv_link);
801 switch (pn->mn_type) {
803 (*vpp)->v_vflag = VV_ROOT;
807 case mqfstype_parent:
808 (*vpp)->v_type = VDIR;
811 (*vpp)->v_type = VREG;
813 case mqfstype_symlink:
814 (*vpp)->v_type = VLNK;
817 KASSERT(0, ("mqfs_allocf called for null node\n"));
819 panic("%s has unexpected type: %d", pn->mn_name, pn->mn_type);
821 sx_xunlock(&mqfs->mi_lock);
826 * Search a directory entry
828 static struct mqfs_node *
829 mqfs_search(struct mqfs_node *pd, const char *name, int len, struct ucred *cred)
831 struct mqfs_node *pn;
834 sx_assert(&pd->mn_info->mi_lock, SX_LOCKED);
835 pr_root = cred->cr_prison->pr_root;
836 LIST_FOREACH(pn, &pd->mn_children, mn_sibling) {
837 /* Only match names within the same prison root directory */
838 if ((pn->mn_pr_root == NULL || pn->mn_pr_root == pr_root) &&
839 strncmp(pn->mn_name, name, len) == 0 &&
840 pn->mn_name[len] == '\0')
847 * Look up a file or directory.
850 mqfs_lookupx(struct vop_cachedlookup_args *ap)
852 struct componentname *cnp;
853 struct vnode *dvp, **vpp;
854 struct mqfs_node *pd;
855 struct mqfs_node *pn;
856 struct mqfs_info *mqfs;
857 int nameiop, flags, error, namelen;
864 pname = cnp->cn_nameptr;
865 namelen = cnp->cn_namelen;
867 flags = cnp->cn_flags;
868 nameiop = cnp->cn_nameiop;
874 if (dvp->v_type != VDIR)
877 error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, cnp->cn_thread);
881 /* shortcut: check if the name is too long */
882 if (cnp->cn_namelen >= MQFS_NAMELEN)
886 if (namelen == 1 && pname[0] == '.') {
887 if ((flags & ISLASTCN) && nameiop != LOOKUP)
896 if (cnp->cn_flags & ISDOTDOT) {
897 if (dvp->v_vflag & VV_ROOT)
899 if ((flags & ISLASTCN) && nameiop != LOOKUP)
902 KASSERT(pd->mn_parent, ("non-root directory has no parent"));
904 error = mqfs_allocv(dvp->v_mount, vpp, pn);
905 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
910 sx_xlock(&mqfs->mi_lock);
911 pn = mqfs_search(pd, pname, namelen, cnp->cn_cred);
914 sx_xunlock(&mqfs->mi_lock);
919 if (nameiop == DELETE && (flags & ISLASTCN)) {
920 error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred, td);
934 error = mqfs_allocv(dvp->v_mount, vpp, pn);
936 if (error == 0 && cnp->cn_flags & MAKEENTRY)
937 cache_enter(dvp, *vpp, cnp);
943 /* will create a new entry in the directory ? */
944 if ((nameiop == CREATE || nameiop == RENAME) && (flags & LOCKPARENT)
945 && (flags & ISLASTCN)) {
946 error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred, td);
949 cnp->cn_flags |= SAVENAME;
950 return (EJUSTRETURN);
956 struct vop_lookup_args {
957 struct vop_generic_args a_gen;
959 struct vnode **a_vpp;
960 struct componentname *a_cnp;
965 * vnode lookup operation
968 mqfs_lookup(struct vop_cachedlookup_args *ap)
972 rc = mqfs_lookupx(ap);
977 struct vop_create_args {
979 struct vnode **a_vpp;
980 struct componentname *a_cnp;
986 * vnode creation operation
989 mqfs_create(struct vop_create_args *ap)
991 struct mqfs_info *mqfs = VFSTOMQFS(ap->a_dvp->v_mount);
992 struct componentname *cnp = ap->a_cnp;
993 struct mqfs_node *pd;
994 struct mqfs_node *pn;
998 pd = VTON(ap->a_dvp);
999 if (pd->mn_type != mqfstype_root && pd->mn_type != mqfstype_dir)
1001 mq = mqueue_alloc(NULL);
1004 sx_xlock(&mqfs->mi_lock);
1005 if ((cnp->cn_flags & HASBUF) == 0)
1006 panic("%s: no name", __func__);
1007 pn = mqfs_create_file(pd, cnp->cn_nameptr, cnp->cn_namelen,
1008 cnp->cn_cred, ap->a_vap->va_mode);
1010 sx_xunlock(&mqfs->mi_lock);
1014 sx_xunlock(&mqfs->mi_lock);
1015 error = mqfs_allocv(ap->a_dvp->v_mount, ap->a_vpp, pn);
1031 int do_unlink(struct mqfs_node *pn, struct ucred *ucred)
1033 struct mqfs_node *parent;
1034 struct mqfs_vdata *vd;
1037 sx_assert(&pn->mn_info->mi_lock, SX_LOCKED);
1039 if (ucred->cr_uid != pn->mn_uid &&
1040 (error = priv_check_cred(ucred, PRIV_MQ_ADMIN, 0)) != 0)
1042 else if (!pn->mn_deleted) {
1043 parent = pn->mn_parent;
1044 pn->mn_parent = NULL;
1046 LIST_REMOVE(pn, mn_sibling);
1047 LIST_FOREACH(vd, &pn->mn_vnodes, mv_link) {
1048 cache_purge(vd->mv_vnode);
1049 vhold(vd->mv_vnode);
1050 taskqueue_enqueue(taskqueue_thread, &vd->mv_task);
1053 mqnode_release(parent);
1060 struct vop_remove_args {
1061 struct vnode *a_dvp;
1063 struct componentname *a_cnp;
1068 * vnode removal operation
1071 mqfs_remove(struct vop_remove_args *ap)
1073 struct mqfs_info *mqfs = VFSTOMQFS(ap->a_dvp->v_mount);
1074 struct mqfs_node *pn;
1077 if (ap->a_vp->v_type == VDIR)
1079 pn = VTON(ap->a_vp);
1080 sx_xlock(&mqfs->mi_lock);
1081 error = do_unlink(pn, ap->a_cnp->cn_cred);
1082 sx_xunlock(&mqfs->mi_lock);
1087 struct vop_inactive_args {
1089 struct thread *a_td;
1094 mqfs_inactive(struct vop_inactive_args *ap)
1096 struct mqfs_node *pn = VTON(ap->a_vp);
1104 struct vop_reclaim_args {
1105 struct vop_generic_args a_gen;
1107 struct thread *a_td;
1112 mqfs_reclaim(struct vop_reclaim_args *ap)
1114 struct mqfs_info *mqfs = VFSTOMQFS(ap->a_vp->v_mount);
1115 struct vnode *vp = ap->a_vp;
1116 struct mqfs_node *pn;
1117 struct mqfs_vdata *vd;
1121 sx_xlock(&mqfs->mi_lock);
1123 LIST_REMOVE(vd, mv_link);
1124 uma_zfree(mvdata_zone, vd);
1126 sx_xunlock(&mqfs->mi_lock);
1131 struct vop_open_args {
1132 struct vop_generic_args a_gen;
1135 struct ucred *a_cred;
1136 struct thread *a_td;
1142 mqfs_open(struct vop_open_args *ap)
1148 struct vop_close_args {
1149 struct vop_generic_args a_gen;
1152 struct ucred *a_cred;
1153 struct thread *a_td;
1158 mqfs_close(struct vop_close_args *ap)
1164 struct vop_access_args {
1165 struct vop_generic_args a_gen;
1167 accmode_t a_accmode;
1168 struct ucred *a_cred;
1169 struct thread *a_td;
1174 * Verify permissions
1177 mqfs_access(struct vop_access_args *ap)
1179 struct vnode *vp = ap->a_vp;
1183 error = VOP_GETATTR(vp, &vattr, ap->a_cred);
1186 error = vaccess(vp->v_type, vattr.va_mode, vattr.va_uid,
1187 vattr.va_gid, ap->a_accmode, ap->a_cred, NULL);
1192 struct vop_getattr_args {
1193 struct vop_generic_args a_gen;
1195 struct vattr *a_vap;
1196 struct ucred *a_cred;
1201 * Get file attributes
1204 mqfs_getattr(struct vop_getattr_args *ap)
1206 struct vnode *vp = ap->a_vp;
1207 struct mqfs_node *pn = VTON(vp);
1208 struct vattr *vap = ap->a_vap;
1211 vap->va_type = vp->v_type;
1212 vap->va_mode = pn->mn_mode;
1214 vap->va_uid = pn->mn_uid;
1215 vap->va_gid = pn->mn_gid;
1216 vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0];
1217 vap->va_fileid = pn->mn_fileno;
1219 vap->va_blocksize = PAGE_SIZE;
1220 vap->va_bytes = vap->va_size = 0;
1221 vap->va_atime = pn->mn_atime;
1222 vap->va_mtime = pn->mn_mtime;
1223 vap->va_ctime = pn->mn_ctime;
1224 vap->va_birthtime = pn->mn_birth;
1227 vap->va_rdev = NODEV;
1229 vap->va_filerev = 0;
1234 struct vop_setattr_args {
1235 struct vop_generic_args a_gen;
1237 struct vattr *a_vap;
1238 struct ucred *a_cred;
1245 mqfs_setattr(struct vop_setattr_args *ap)
1247 struct mqfs_node *pn;
1258 if ((vap->va_type != VNON) ||
1259 (vap->va_nlink != VNOVAL) ||
1260 (vap->va_fsid != VNOVAL) ||
1261 (vap->va_fileid != VNOVAL) ||
1262 (vap->va_blocksize != VNOVAL) ||
1263 (vap->va_flags != VNOVAL && vap->va_flags != 0) ||
1264 (vap->va_rdev != VNOVAL) ||
1265 ((int)vap->va_bytes != VNOVAL) ||
1266 (vap->va_gen != VNOVAL)) {
1273 if (vap->va_uid == (uid_t)VNOVAL)
1277 if (vap->va_gid == (gid_t)VNOVAL)
1282 if (uid != pn->mn_uid || gid != pn->mn_gid) {
1284 * To modify the ownership of a file, must possess VADMIN
1287 if ((error = VOP_ACCESS(vp, VADMIN, ap->a_cred, td)))
1291 * XXXRW: Why is there a privilege check here: shouldn't the
1292 * check in VOP_ACCESS() be enough? Also, are the group bits
1293 * below definitely right?
1295 if (((ap->a_cred->cr_uid != pn->mn_uid) || uid != pn->mn_uid ||
1296 (gid != pn->mn_gid && !groupmember(gid, ap->a_cred))) &&
1297 (error = priv_check(td, PRIV_MQ_ADMIN)) != 0)
1304 if (vap->va_mode != (mode_t)VNOVAL) {
1305 if ((ap->a_cred->cr_uid != pn->mn_uid) &&
1306 (error = priv_check(td, PRIV_MQ_ADMIN)))
1308 pn->mn_mode = vap->va_mode;
1312 if (vap->va_atime.tv_sec != VNOVAL || vap->va_mtime.tv_sec != VNOVAL) {
1313 /* See the comment in ufs_vnops::ufs_setattr(). */
1314 if ((error = VOP_ACCESS(vp, VADMIN, ap->a_cred, td)) &&
1315 ((vap->va_vaflags & VA_UTIMES_NULL) == 0 ||
1316 (error = VOP_ACCESS(vp, VWRITE, ap->a_cred, td))))
1318 if (vap->va_atime.tv_sec != VNOVAL) {
1319 pn->mn_atime = vap->va_atime;
1321 if (vap->va_mtime.tv_sec != VNOVAL) {
1322 pn->mn_mtime = vap->va_mtime;
1327 vfs_timestamp(&pn->mn_ctime);
1333 struct vop_read_args {
1334 struct vop_generic_args a_gen;
1338 struct ucred *a_cred;
1346 mqfs_read(struct vop_read_args *ap)
1349 struct vnode *vp = ap->a_vp;
1350 struct uio *uio = ap->a_uio;
1351 struct mqfs_node *pn;
1355 if (vp->v_type != VREG)
1360 snprintf(buf, sizeof(buf),
1361 "QSIZE:%-10ld MAXMSG:%-10ld CURMSG:%-10ld MSGSIZE:%-10ld\n",
1366 buf[sizeof(buf)-1] = '\0';
1368 error = uiomove_frombuf(buf, len, uio);
1373 struct vop_readdir_args {
1374 struct vop_generic_args a_gen;
1377 struct ucred *a_cred;
1385 * Return directory entries.
1388 mqfs_readdir(struct vop_readdir_args *ap)
1391 struct mqfs_info *mi;
1392 struct mqfs_node *pd;
1393 struct mqfs_node *pn;
1394 struct dirent entry;
1396 const void *pr_root;
1397 int *tmp_ncookies = NULL;
1402 mi = VFSTOMQFS(vp->v_mount);
1406 if (vp->v_type != VDIR)
1409 if (uio->uio_offset < 0)
1412 if (ap->a_ncookies != NULL) {
1413 tmp_ncookies = ap->a_ncookies;
1414 *ap->a_ncookies = 0;
1415 ap->a_ncookies = NULL;
1421 pr_root = ap->a_cred->cr_prison->pr_root;
1422 sx_xlock(&mi->mi_lock);
1424 LIST_FOREACH(pn, &pd->mn_children, mn_sibling) {
1425 entry.d_reclen = sizeof(entry);
1428 * Only show names within the same prison root directory
1429 * (or not associated with a prison, e.g. "." and "..").
1431 if (pn->mn_pr_root != NULL && pn->mn_pr_root != pr_root)
1434 mqfs_fileno_alloc(mi, pn);
1435 entry.d_fileno = pn->mn_fileno;
1436 for (i = 0; i < MQFS_NAMELEN - 1 && pn->mn_name[i] != '\0'; ++i)
1437 entry.d_name[i] = pn->mn_name[i];
1438 entry.d_name[i] = 0;
1440 switch (pn->mn_type) {
1444 case mqfstype_parent:
1445 entry.d_type = DT_DIR;
1448 entry.d_type = DT_REG;
1450 case mqfstype_symlink:
1451 entry.d_type = DT_LNK;
1454 panic("%s has unexpected node type: %d", pn->mn_name,
1457 if (entry.d_reclen > uio->uio_resid)
1459 if (offset >= uio->uio_offset) {
1460 error = vfs_read_dirent(ap, &entry, offset);
1464 offset += entry.d_reclen;
1466 sx_xunlock(&mi->mi_lock);
1468 uio->uio_offset = offset;
1470 if (tmp_ncookies != NULL)
1471 ap->a_ncookies = tmp_ncookies;
1479 struct vop_mkdir_args {
1480 struct vnode *a_dvp;
1481 struvt vnode **a_vpp;
1482 struvt componentname *a_cnp;
1483 struct vattr *a_vap;
1488 * Create a directory.
1491 mqfs_mkdir(struct vop_mkdir_args *ap)
1493 struct mqfs_info *mqfs = VFSTOMQFS(ap->a_dvp->v_mount);
1494 struct componentname *cnp = ap->a_cnp;
1495 struct mqfs_node *pd = VTON(ap->a_dvp);
1496 struct mqfs_node *pn;
1499 if (pd->mn_type != mqfstype_root && pd->mn_type != mqfstype_dir)
1501 sx_xlock(&mqfs->mi_lock);
1502 if ((cnp->cn_flags & HASBUF) == 0)
1503 panic("%s: no name", __func__);
1504 pn = mqfs_create_dir(pd, cnp->cn_nameptr, cnp->cn_namelen,
1505 ap->a_vap->cn_cred, ap->a_vap->va_mode);
1508 sx_xunlock(&mqfs->mi_lock);
1512 error = mqfs_allocv(ap->a_dvp->v_mount, ap->a_vpp, pn);
1519 struct vop_rmdir_args {
1520 struct vnode *a_dvp;
1522 struct componentname *a_cnp;
1527 * Remove a directory.
1530 mqfs_rmdir(struct vop_rmdir_args *ap)
1532 struct mqfs_info *mqfs = VFSTOMQFS(ap->a_dvp->v_mount);
1533 struct mqfs_node *pn = VTON(ap->a_vp);
1534 struct mqfs_node *pt;
1536 if (pn->mn_type != mqfstype_dir)
1539 sx_xlock(&mqfs->mi_lock);
1540 if (pn->mn_deleted) {
1541 sx_xunlock(&mqfs->mi_lock);
1545 pt = LIST_FIRST(&pn->mn_children);
1546 pt = LIST_NEXT(pt, mn_sibling);
1547 pt = LIST_NEXT(pt, mn_sibling);
1549 sx_xunlock(&mqfs->mi_lock);
1553 pn->mn_parent = NULL;
1555 LIST_REMOVE(pn, mn_sibling);
1558 sx_xunlock(&mqfs->mi_lock);
1559 cache_purge(ap->a_vp);
1567 * Set a destructor task with the prison's root
1570 mqfs_prison_create(void *obj, void *data __unused)
1572 struct prison *pr = obj;
1573 struct mqfs_osd *mo;
1576 if (pr->pr_root == pr->pr_parent->pr_root)
1579 mo = malloc(sizeof(struct mqfs_osd), M_PRISON, M_WAITOK);
1580 rsv = osd_reserve(mqfs_osd_jail_slot);
1581 TASK_INIT(&mo->mo_task, 0, mqfs_prison_remove_task, mo);
1582 mtx_lock(&pr->pr_mtx);
1583 mo->mo_pr_root = pr->pr_root;
1584 (void)osd_jail_set_reserved(pr, mqfs_osd_jail_slot, rsv, mo);
1585 mtx_unlock(&pr->pr_mtx);
1590 * Queue the task for after jail/OSD locks are released
1593 mqfs_prison_destructor(void *data)
1595 struct mqfs_osd *mo = data;
1597 if (mqfs_osd_jail_slot != 0)
1598 taskqueue_enqueue(taskqueue_thread, &mo->mo_task);
1604 * See if this prison root is obsolete, and clean up associated queues if it is
1607 mqfs_prison_remove_task(void *context, int pending)
1609 struct mqfs_osd *mo = context;
1610 struct mqfs_node *pn, *tpn;
1611 const struct prison *pr;
1612 const void *pr_root;
1615 pr_root = mo->mo_pr_root;
1617 sx_slock(&allprison_lock);
1618 TAILQ_FOREACH(pr, &allprison, pr_list) {
1619 if (pr->pr_root == pr_root)
1622 sx_sunlock(&allprison_lock);
1625 * No jails are rooted in this directory anymore,
1626 * so no queues should be either.
1628 sx_xlock(&mqfs_data.mi_lock);
1629 LIST_FOREACH_SAFE(pn, &mqfs_data.mi_root->mn_children,
1631 if (pn->mn_pr_root == pr_root)
1632 (void)do_unlink(pn, curthread->td_ucred);
1634 sx_xunlock(&mqfs_data.mi_lock);
1641 * Allocate a message queue
1643 static struct mqueue *
1644 mqueue_alloc(const struct mq_attr *attr)
1650 mq = uma_zalloc(mqueue_zone, M_WAITOK | M_ZERO);
1651 TAILQ_INIT(&mq->mq_msgq);
1653 mq->mq_maxmsg = attr->mq_maxmsg;
1654 mq->mq_msgsize = attr->mq_msgsize;
1656 mq->mq_maxmsg = default_maxmsg;
1657 mq->mq_msgsize = default_msgsize;
1659 mtx_init(&mq->mq_mutex, "mqueue lock", NULL, MTX_DEF);
1660 knlist_init_mtx(&mq->mq_rsel.si_note, &mq->mq_mutex);
1661 knlist_init_mtx(&mq->mq_wsel.si_note, &mq->mq_mutex);
1662 atomic_add_int(&curmq, 1);
1667 * Destroy a message queue
1670 mqueue_free(struct mqueue *mq)
1672 struct mqueue_msg *msg;
1674 while ((msg = TAILQ_FIRST(&mq->mq_msgq)) != NULL) {
1675 TAILQ_REMOVE(&mq->mq_msgq, msg, msg_link);
1676 free(msg, M_MQUEUEDATA);
1679 mtx_destroy(&mq->mq_mutex);
1680 seldrain(&mq->mq_rsel);
1681 seldrain(&mq->mq_wsel);
1682 knlist_destroy(&mq->mq_rsel.si_note);
1683 knlist_destroy(&mq->mq_wsel.si_note);
1684 uma_zfree(mqueue_zone, mq);
1685 atomic_add_int(&curmq, -1);
1689 * Load a message from user space
1691 static struct mqueue_msg *
1692 mqueue_loadmsg(const char *msg_ptr, size_t msg_size, int msg_prio)
1694 struct mqueue_msg *msg;
1698 len = sizeof(struct mqueue_msg) + msg_size;
1699 msg = malloc(len, M_MQUEUEDATA, M_WAITOK);
1700 error = copyin(msg_ptr, ((char *)msg) + sizeof(struct mqueue_msg),
1703 free(msg, M_MQUEUEDATA);
1706 msg->msg_size = msg_size;
1707 msg->msg_prio = msg_prio;
1713 * Save a message to user space
1716 mqueue_savemsg(struct mqueue_msg *msg, char *msg_ptr, int *msg_prio)
1720 error = copyout(((char *)msg) + sizeof(*msg), msg_ptr,
1722 if (error == 0 && msg_prio != NULL)
1723 error = copyout(&msg->msg_prio, msg_prio, sizeof(int));
1728 * Free a message's memory
1730 static __inline void
1731 mqueue_freemsg(struct mqueue_msg *msg)
1733 free(msg, M_MQUEUEDATA);
1737 * Send a message. if waitok is false, thread will not be
1738 * blocked if there is no data in queue, otherwise, absolute
1739 * time will be checked.
1742 mqueue_send(struct mqueue *mq, const char *msg_ptr,
1743 size_t msg_len, unsigned msg_prio, int waitok,
1744 const struct timespec *abs_timeout)
1746 struct mqueue_msg *msg;
1747 struct timespec ts, ts2;
1751 if (msg_prio >= MQ_PRIO_MAX)
1753 if (msg_len > mq->mq_msgsize)
1755 msg = mqueue_loadmsg(msg_ptr, msg_len, msg_prio);
1759 /* O_NONBLOCK case */
1761 error = _mqueue_send(mq, msg, -1);
1767 /* we allow a null timeout (wait forever) */
1768 if (abs_timeout == NULL) {
1769 error = _mqueue_send(mq, msg, 0);
1775 /* send it before checking time */
1776 error = _mqueue_send(mq, msg, -1);
1780 if (error != EAGAIN)
1783 if (abs_timeout->tv_nsec >= 1000000000 || abs_timeout->tv_nsec < 0) {
1790 timespecsub(&ts2, &ts);
1791 if (ts2.tv_sec < 0 || (ts2.tv_sec == 0 && ts2.tv_nsec <= 0)) {
1795 TIMESPEC_TO_TIMEVAL(&tv, &ts2);
1796 error = _mqueue_send(mq, msg, tvtohz(&tv));
1797 if (error != ETIMEDOUT)
1803 mqueue_freemsg(msg);
1808 * Common routine to send a message
1811 _mqueue_send(struct mqueue *mq, struct mqueue_msg *msg, int timo)
1813 struct mqueue_msg *msg2;
1816 mtx_lock(&mq->mq_mutex);
1817 while (mq->mq_curmsgs >= mq->mq_maxmsg && error == 0) {
1819 mtx_unlock(&mq->mq_mutex);
1823 error = msleep(&mq->mq_senders, &mq->mq_mutex,
1824 PCATCH, "mqsend", timo);
1826 if (error == EAGAIN)
1829 if (mq->mq_curmsgs >= mq->mq_maxmsg) {
1830 mtx_unlock(&mq->mq_mutex);
1834 if (TAILQ_EMPTY(&mq->mq_msgq)) {
1835 TAILQ_INSERT_HEAD(&mq->mq_msgq, msg, msg_link);
1837 if (msg->msg_prio <= TAILQ_LAST(&mq->mq_msgq, msgq)->msg_prio) {
1838 TAILQ_INSERT_TAIL(&mq->mq_msgq, msg, msg_link);
1840 TAILQ_FOREACH(msg2, &mq->mq_msgq, msg_link) {
1841 if (msg2->msg_prio < msg->msg_prio)
1844 TAILQ_INSERT_BEFORE(msg2, msg, msg_link);
1848 mq->mq_totalbytes += msg->msg_size;
1849 if (mq->mq_receivers)
1850 wakeup_one(&mq->mq_receivers);
1851 else if (mq->mq_notifier != NULL)
1852 mqueue_send_notification(mq);
1853 if (mq->mq_flags & MQ_RSEL) {
1854 mq->mq_flags &= ~MQ_RSEL;
1855 selwakeup(&mq->mq_rsel);
1857 KNOTE_LOCKED(&mq->mq_rsel.si_note, 0);
1858 mtx_unlock(&mq->mq_mutex);
1863 * Send realtime a signal to process which registered itself
1864 * successfully by mq_notify.
1867 mqueue_send_notification(struct mqueue *mq)
1869 struct mqueue_notifier *nt;
1874 mtx_assert(&mq->mq_mutex, MA_OWNED);
1875 nt = mq->mq_notifier;
1876 if (nt->nt_sigev.sigev_notify != SIGEV_NONE) {
1878 error = sigev_findtd(p, &nt->nt_sigev, &td);
1880 mq->mq_notifier = NULL;
1883 if (!KSI_ONQ(&nt->nt_ksi)) {
1884 ksiginfo_set_sigev(&nt->nt_ksi, &nt->nt_sigev);
1885 tdsendsignal(p, td, nt->nt_ksi.ksi_signo, &nt->nt_ksi);
1889 mq->mq_notifier = NULL;
1893 * Get a message. if waitok is false, thread will not be
1894 * blocked if there is no data in queue, otherwise, absolute
1895 * time will be checked.
1898 mqueue_receive(struct mqueue *mq, char *msg_ptr,
1899 size_t msg_len, unsigned *msg_prio, int waitok,
1900 const struct timespec *abs_timeout)
1902 struct mqueue_msg *msg;
1903 struct timespec ts, ts2;
1907 if (msg_len < mq->mq_msgsize)
1910 /* O_NONBLOCK case */
1912 error = _mqueue_recv(mq, &msg, -1);
1918 /* we allow a null timeout (wait forever). */
1919 if (abs_timeout == NULL) {
1920 error = _mqueue_recv(mq, &msg, 0);
1926 /* try to get a message before checking time */
1927 error = _mqueue_recv(mq, &msg, -1);
1931 if (error != EAGAIN)
1934 if (abs_timeout->tv_nsec >= 1000000000 || abs_timeout->tv_nsec < 0) {
1942 timespecsub(&ts2, &ts);
1943 if (ts2.tv_sec < 0 || (ts2.tv_sec == 0 && ts2.tv_nsec <= 0)) {
1947 TIMESPEC_TO_TIMEVAL(&tv, &ts2);
1948 error = _mqueue_recv(mq, &msg, tvtohz(&tv));
1951 if (error != ETIMEDOUT)
1956 error = mqueue_savemsg(msg, msg_ptr, msg_prio);
1958 curthread->td_retval[0] = msg->msg_size;
1959 curthread->td_retval[1] = 0;
1961 mqueue_freemsg(msg);
1966 * Common routine to receive a message
1969 _mqueue_recv(struct mqueue *mq, struct mqueue_msg **msg, int timo)
1973 mtx_lock(&mq->mq_mutex);
1974 while ((*msg = TAILQ_FIRST(&mq->mq_msgq)) == NULL && error == 0) {
1976 mtx_unlock(&mq->mq_mutex);
1980 error = msleep(&mq->mq_receivers, &mq->mq_mutex,
1981 PCATCH, "mqrecv", timo);
1983 if (error == EAGAIN)
1988 TAILQ_REMOVE(&mq->mq_msgq, *msg, msg_link);
1990 mq->mq_totalbytes -= (*msg)->msg_size;
1992 wakeup_one(&mq->mq_senders);
1993 if (mq->mq_flags & MQ_WSEL) {
1994 mq->mq_flags &= ~MQ_WSEL;
1995 selwakeup(&mq->mq_wsel);
1997 KNOTE_LOCKED(&mq->mq_wsel.si_note, 0);
1999 if (mq->mq_notifier != NULL && mq->mq_receivers == 0 &&
2000 !TAILQ_EMPTY(&mq->mq_msgq)) {
2001 mqueue_send_notification(mq);
2003 mtx_unlock(&mq->mq_mutex);
2007 static __inline struct mqueue_notifier *
2008 notifier_alloc(void)
2010 return (uma_zalloc(mqnoti_zone, M_WAITOK | M_ZERO));
2013 static __inline void
2014 notifier_free(struct mqueue_notifier *p)
2016 uma_zfree(mqnoti_zone, p);
2019 static struct mqueue_notifier *
2020 notifier_search(struct proc *p, int fd)
2022 struct mqueue_notifier *nt;
2024 LIST_FOREACH(nt, &p->p_mqnotifier, nt_link) {
2025 if (nt->nt_ksi.ksi_mqd == fd)
2031 static __inline void
2032 notifier_insert(struct proc *p, struct mqueue_notifier *nt)
2034 LIST_INSERT_HEAD(&p->p_mqnotifier, nt, nt_link);
2037 static __inline void
2038 notifier_delete(struct proc *p, struct mqueue_notifier *nt)
2040 LIST_REMOVE(nt, nt_link);
2045 notifier_remove(struct proc *p, struct mqueue *mq, int fd)
2047 struct mqueue_notifier *nt;
2049 mtx_assert(&mq->mq_mutex, MA_OWNED);
2051 nt = notifier_search(p, fd);
2053 if (mq->mq_notifier == nt)
2054 mq->mq_notifier = NULL;
2055 sigqueue_take(&nt->nt_ksi);
2056 notifier_delete(p, nt);
2062 kern_kmq_open(struct thread *td, const char *upath, int flags, mode_t mode,
2063 const struct mq_attr *attr)
2065 char path[MQFS_NAMELEN + 1];
2066 struct mqfs_node *pn;
2067 struct filedesc *fdp;
2070 int fd, error, len, cmode;
2072 fdp = td->td_proc->p_fd;
2073 cmode = (((mode & ~fdp->fd_cmask) & ALLPERMS) & ~S_ISTXT);
2075 if ((flags & O_CREAT) != 0 && attr != NULL) {
2076 if (attr->mq_maxmsg <= 0 || attr->mq_maxmsg > maxmsg)
2078 if (attr->mq_msgsize <= 0 || attr->mq_msgsize > maxmsgsize)
2082 error = copyinstr(upath, path, MQFS_NAMELEN + 1, NULL);
2087 * The first character of name must be a slash (/) character
2088 * and the remaining characters of name cannot include any slash
2092 if (len < 2 || path[0] != '/' || strchr(path + 1, '/') != NULL)
2095 error = falloc(td, &fp, &fd, O_CLOEXEC);
2099 sx_xlock(&mqfs_data.mi_lock);
2100 pn = mqfs_search(mqfs_data.mi_root, path + 1, len - 1, td->td_ucred);
2102 if (!(flags & O_CREAT)) {
2105 mq = mqueue_alloc(attr);
2109 pn = mqfs_create_file(mqfs_data.mi_root,
2110 path + 1, len - 1, td->td_ucred,
2123 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL)) {
2126 accmode_t accmode = 0;
2132 error = vaccess(VREG, pn->mn_mode, pn->mn_uid,
2133 pn->mn_gid, accmode, td->td_ucred, NULL);
2138 sx_xunlock(&mqfs_data.mi_lock);
2139 fdclose(fdp, fp, fd, td);
2145 sx_xunlock(&mqfs_data.mi_lock);
2147 finit(fp, flags & (FREAD | FWRITE | O_NONBLOCK), DTYPE_MQUEUE, pn,
2150 td->td_retval[0] = fd;
2156 * Syscall to open a message queue.
2159 sys_kmq_open(struct thread *td, struct kmq_open_args *uap)
2161 struct mq_attr attr;
2164 if ((uap->flags & O_ACCMODE) == O_ACCMODE || uap->flags & O_EXEC)
2166 flags = FFLAGS(uap->flags);
2167 if ((flags & O_CREAT) != 0 && uap->attr != NULL) {
2168 error = copyin(uap->attr, &attr, sizeof(attr));
2172 return (kern_kmq_open(td, uap->path, flags, uap->mode,
2173 uap->attr != NULL ? &attr : NULL));
2177 * Syscall to unlink a message queue.
2180 sys_kmq_unlink(struct thread *td, struct kmq_unlink_args *uap)
2182 char path[MQFS_NAMELEN+1];
2183 struct mqfs_node *pn;
2186 error = copyinstr(uap->path, path, MQFS_NAMELEN + 1, NULL);
2191 if (len < 2 || path[0] != '/' || strchr(path + 1, '/') != NULL)
2194 sx_xlock(&mqfs_data.mi_lock);
2195 pn = mqfs_search(mqfs_data.mi_root, path + 1, len - 1, td->td_ucred);
2197 error = do_unlink(pn, td->td_ucred);
2200 sx_xunlock(&mqfs_data.mi_lock);
2204 typedef int (*_fgetf)(struct thread *, int, cap_rights_t *, struct file **);
2207 * Get message queue by giving file slot
2210 _getmq(struct thread *td, int fd, cap_rights_t *rightsp, _fgetf func,
2211 struct file **fpp, struct mqfs_node **ppn, struct mqueue **pmq)
2213 struct mqfs_node *pn;
2216 error = func(td, fd, rightsp, fpp);
2219 if (&mqueueops != (*fpp)->f_ops) {
2223 pn = (*fpp)->f_data;
2232 getmq(struct thread *td, int fd, struct file **fpp, struct mqfs_node **ppn,
2233 struct mqueue **pmq)
2235 cap_rights_t rights;
2237 return _getmq(td, fd, cap_rights_init(&rights, CAP_EVENT), fget,
2242 getmq_read(struct thread *td, int fd, struct file **fpp,
2243 struct mqfs_node **ppn, struct mqueue **pmq)
2245 cap_rights_t rights;
2247 return _getmq(td, fd, cap_rights_init(&rights, CAP_READ), fget_read,
2252 getmq_write(struct thread *td, int fd, struct file **fpp,
2253 struct mqfs_node **ppn, struct mqueue **pmq)
2255 cap_rights_t rights;
2257 return _getmq(td, fd, cap_rights_init(&rights, CAP_WRITE), fget_write,
2262 kern_kmq_setattr(struct thread *td, int mqd, const struct mq_attr *attr,
2263 struct mq_attr *oattr)
2270 if (attr != NULL && (attr->mq_flags & ~O_NONBLOCK) != 0)
2272 error = getmq(td, mqd, &fp, NULL, &mq);
2275 oattr->mq_maxmsg = mq->mq_maxmsg;
2276 oattr->mq_msgsize = mq->mq_msgsize;
2277 oattr->mq_curmsgs = mq->mq_curmsgs;
2280 oflag = flag = fp->f_flag;
2281 flag &= ~O_NONBLOCK;
2282 flag |= (attr->mq_flags & O_NONBLOCK);
2283 } while (atomic_cmpset_int(&fp->f_flag, oflag, flag) == 0);
2286 oattr->mq_flags = (O_NONBLOCK & oflag);
2292 sys_kmq_setattr(struct thread *td, struct kmq_setattr_args *uap)
2294 struct mq_attr attr, oattr;
2297 if (uap->attr != NULL) {
2298 error = copyin(uap->attr, &attr, sizeof(attr));
2302 error = kern_kmq_setattr(td, uap->mqd, uap->attr != NULL ? &attr : NULL,
2306 if (uap->oattr != NULL)
2307 error = copyout(&oattr, uap->oattr, sizeof(oattr));
2312 sys_kmq_timedreceive(struct thread *td, struct kmq_timedreceive_args *uap)
2316 struct timespec *abs_timeout, ets;
2320 error = getmq_read(td, uap->mqd, &fp, NULL, &mq);
2323 if (uap->abs_timeout != NULL) {
2324 error = copyin(uap->abs_timeout, &ets, sizeof(ets));
2330 waitok = !(fp->f_flag & O_NONBLOCK);
2331 error = mqueue_receive(mq, uap->msg_ptr, uap->msg_len,
2332 uap->msg_prio, waitok, abs_timeout);
2338 sys_kmq_timedsend(struct thread *td, struct kmq_timedsend_args *uap)
2342 struct timespec *abs_timeout, ets;
2345 error = getmq_write(td, uap->mqd, &fp, NULL, &mq);
2348 if (uap->abs_timeout != NULL) {
2349 error = copyin(uap->abs_timeout, &ets, sizeof(ets));
2355 waitok = !(fp->f_flag & O_NONBLOCK);
2356 error = mqueue_send(mq, uap->msg_ptr, uap->msg_len,
2357 uap->msg_prio, waitok, abs_timeout);
2363 kern_kmq_notify(struct thread *td, int mqd, struct sigevent *sigev)
2366 cap_rights_t rights;
2368 struct filedesc *fdp;
2371 struct file *fp, *fp2;
2372 struct mqueue_notifier *nt, *newnt = NULL;
2375 if (sigev != NULL) {
2376 if (sigev->sigev_notify != SIGEV_SIGNAL &&
2377 sigev->sigev_notify != SIGEV_THREAD_ID &&
2378 sigev->sigev_notify != SIGEV_NONE)
2380 if ((sigev->sigev_notify == SIGEV_SIGNAL ||
2381 sigev->sigev_notify == SIGEV_THREAD_ID) &&
2382 !_SIG_VALID(sigev->sigev_signo))
2386 fdp = td->td_proc->p_fd;
2387 error = getmq(td, mqd, &fp, NULL, &mq);
2391 FILEDESC_SLOCK(fdp);
2392 fp2 = fget_locked(fdp, mqd);
2394 FILEDESC_SUNLOCK(fdp);
2399 error = cap_check(cap_rights(fdp, mqd),
2400 cap_rights_init(&rights, CAP_EVENT));
2402 FILEDESC_SUNLOCK(fdp);
2407 FILEDESC_SUNLOCK(fdp);
2411 mtx_lock(&mq->mq_mutex);
2412 FILEDESC_SUNLOCK(fdp);
2413 if (sigev != NULL) {
2414 if (mq->mq_notifier != NULL) {
2418 nt = notifier_search(p, mqd);
2420 if (newnt == NULL) {
2422 mtx_unlock(&mq->mq_mutex);
2423 newnt = notifier_alloc();
2429 sigqueue_take(&nt->nt_ksi);
2430 if (newnt != NULL) {
2431 notifier_free(newnt);
2437 ksiginfo_init(&nt->nt_ksi);
2438 nt->nt_ksi.ksi_flags |= KSI_INS | KSI_EXT;
2439 nt->nt_ksi.ksi_code = SI_MESGQ;
2441 nt->nt_ksi.ksi_mqd = mqd;
2442 notifier_insert(p, nt);
2444 nt->nt_sigev = *sigev;
2445 mq->mq_notifier = nt;
2448 * if there is no receivers and message queue
2449 * is not empty, we should send notification
2450 * as soon as possible.
2452 if (mq->mq_receivers == 0 &&
2453 !TAILQ_EMPTY(&mq->mq_msgq))
2454 mqueue_send_notification(mq);
2457 notifier_remove(p, mq, mqd);
2459 mtx_unlock(&mq->mq_mutex);
2464 notifier_free(newnt);
2469 sys_kmq_notify(struct thread *td, struct kmq_notify_args *uap)
2471 struct sigevent ev, *evp;
2474 if (uap->sigev == NULL) {
2477 error = copyin(uap->sigev, &ev, sizeof(ev));
2482 return (kern_kmq_notify(td, uap->mqd, evp));
2486 mqueue_fdclose(struct thread *td, int fd, struct file *fp)
2488 struct filedesc *fdp;
2491 fdp = td->td_proc->p_fd;
2492 FILEDESC_LOCK_ASSERT(fdp);
2494 if (fp->f_ops == &mqueueops) {
2496 mtx_lock(&mq->mq_mutex);
2497 notifier_remove(td->td_proc, mq, fd);
2499 /* have to wakeup thread in same process */
2500 if (mq->mq_flags & MQ_RSEL) {
2501 mq->mq_flags &= ~MQ_RSEL;
2502 selwakeup(&mq->mq_rsel);
2504 if (mq->mq_flags & MQ_WSEL) {
2505 mq->mq_flags &= ~MQ_WSEL;
2506 selwakeup(&mq->mq_wsel);
2508 mtx_unlock(&mq->mq_mutex);
2513 mq_proc_exit(void *arg __unused, struct proc *p)
2515 struct filedesc *fdp;
2521 FILEDESC_SLOCK(fdp);
2522 for (i = 0; i < fdp->fd_nfiles; ++i) {
2523 fp = fget_locked(fdp, i);
2524 if (fp != NULL && fp->f_ops == &mqueueops) {
2526 mtx_lock(&mq->mq_mutex);
2527 notifier_remove(p, FPTOMQ(fp), i);
2528 mtx_unlock(&mq->mq_mutex);
2531 FILEDESC_SUNLOCK(fdp);
2532 KASSERT(LIST_EMPTY(&p->p_mqnotifier), ("mq notifiers left"));
2536 mqf_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
2537 int flags, struct thread *td)
2539 return (EOPNOTSUPP);
2543 mqf_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
2544 int flags, struct thread *td)
2546 return (EOPNOTSUPP);
2550 mqf_truncate(struct file *fp, off_t length, struct ucred *active_cred,
2558 mqf_ioctl(struct file *fp, u_long cmd, void *data,
2559 struct ucred *active_cred, struct thread *td)
2565 mqf_poll(struct file *fp, int events, struct ucred *active_cred,
2568 struct mqueue *mq = FPTOMQ(fp);
2571 mtx_lock(&mq->mq_mutex);
2572 if (events & (POLLIN | POLLRDNORM)) {
2573 if (mq->mq_curmsgs) {
2574 revents |= events & (POLLIN | POLLRDNORM);
2576 mq->mq_flags |= MQ_RSEL;
2577 selrecord(td, &mq->mq_rsel);
2580 if (events & POLLOUT) {
2581 if (mq->mq_curmsgs < mq->mq_maxmsg)
2584 mq->mq_flags |= MQ_WSEL;
2585 selrecord(td, &mq->mq_wsel);
2588 mtx_unlock(&mq->mq_mutex);
2593 mqf_close(struct file *fp, struct thread *td)
2595 struct mqfs_node *pn;
2597 fp->f_ops = &badfileops;
2600 sx_xlock(&mqfs_data.mi_lock);
2602 sx_xunlock(&mqfs_data.mi_lock);
2607 mqf_stat(struct file *fp, struct stat *st, struct ucred *active_cred,
2610 struct mqfs_node *pn = fp->f_data;
2612 bzero(st, sizeof *st);
2613 sx_xlock(&mqfs_data.mi_lock);
2614 st->st_atim = pn->mn_atime;
2615 st->st_mtim = pn->mn_mtime;
2616 st->st_ctim = pn->mn_ctime;
2617 st->st_birthtim = pn->mn_birth;
2618 st->st_uid = pn->mn_uid;
2619 st->st_gid = pn->mn_gid;
2620 st->st_mode = S_IFIFO | pn->mn_mode;
2621 sx_xunlock(&mqfs_data.mi_lock);
2626 mqf_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
2629 struct mqfs_node *pn;
2634 sx_xlock(&mqfs_data.mi_lock);
2635 error = vaccess(VREG, pn->mn_mode, pn->mn_uid, pn->mn_gid, VADMIN,
2639 pn->mn_mode = mode & ACCESSPERMS;
2641 sx_xunlock(&mqfs_data.mi_lock);
2646 mqf_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
2649 struct mqfs_node *pn;
2654 sx_xlock(&mqfs_data.mi_lock);
2655 if (uid == (uid_t)-1)
2657 if (gid == (gid_t)-1)
2659 if (((uid != pn->mn_uid && uid != active_cred->cr_uid) ||
2660 (gid != pn->mn_gid && !groupmember(gid, active_cred))) &&
2661 (error = priv_check_cred(active_cred, PRIV_VFS_CHOWN, 0)))
2666 sx_xunlock(&mqfs_data.mi_lock);
2671 mqf_kqfilter(struct file *fp, struct knote *kn)
2673 struct mqueue *mq = FPTOMQ(fp);
2676 if (kn->kn_filter == EVFILT_READ) {
2677 kn->kn_fop = &mq_rfiltops;
2678 knlist_add(&mq->mq_rsel.si_note, kn, 0);
2679 } else if (kn->kn_filter == EVFILT_WRITE) {
2680 kn->kn_fop = &mq_wfiltops;
2681 knlist_add(&mq->mq_wsel.si_note, kn, 0);
2688 filt_mqdetach(struct knote *kn)
2690 struct mqueue *mq = FPTOMQ(kn->kn_fp);
2692 if (kn->kn_filter == EVFILT_READ)
2693 knlist_remove(&mq->mq_rsel.si_note, kn, 0);
2694 else if (kn->kn_filter == EVFILT_WRITE)
2695 knlist_remove(&mq->mq_wsel.si_note, kn, 0);
2697 panic("filt_mqdetach");
2701 filt_mqread(struct knote *kn, long hint)
2703 struct mqueue *mq = FPTOMQ(kn->kn_fp);
2705 mtx_assert(&mq->mq_mutex, MA_OWNED);
2706 return (mq->mq_curmsgs != 0);
2710 filt_mqwrite(struct knote *kn, long hint)
2712 struct mqueue *mq = FPTOMQ(kn->kn_fp);
2714 mtx_assert(&mq->mq_mutex, MA_OWNED);
2715 return (mq->mq_curmsgs < mq->mq_maxmsg);
2718 static struct fileops mqueueops = {
2719 .fo_read = mqf_read,
2720 .fo_write = mqf_write,
2721 .fo_truncate = mqf_truncate,
2722 .fo_ioctl = mqf_ioctl,
2723 .fo_poll = mqf_poll,
2724 .fo_kqfilter = mqf_kqfilter,
2725 .fo_stat = mqf_stat,
2726 .fo_chmod = mqf_chmod,
2727 .fo_chown = mqf_chown,
2728 .fo_close = mqf_close,
2729 .fo_sendfile = invfo_sendfile,
2732 static struct vop_vector mqfs_vnodeops = {
2733 .vop_default = &default_vnodeops,
2734 .vop_access = mqfs_access,
2735 .vop_cachedlookup = mqfs_lookup,
2736 .vop_lookup = vfs_cache_lookup,
2737 .vop_reclaim = mqfs_reclaim,
2738 .vop_create = mqfs_create,
2739 .vop_remove = mqfs_remove,
2740 .vop_inactive = mqfs_inactive,
2741 .vop_open = mqfs_open,
2742 .vop_close = mqfs_close,
2743 .vop_getattr = mqfs_getattr,
2744 .vop_setattr = mqfs_setattr,
2745 .vop_read = mqfs_read,
2746 .vop_write = VOP_EOPNOTSUPP,
2747 .vop_readdir = mqfs_readdir,
2748 .vop_mkdir = VOP_EOPNOTSUPP,
2749 .vop_rmdir = VOP_EOPNOTSUPP
2752 static struct vfsops mqfs_vfsops = {
2753 .vfs_init = mqfs_init,
2754 .vfs_uninit = mqfs_uninit,
2755 .vfs_mount = mqfs_mount,
2756 .vfs_unmount = mqfs_unmount,
2757 .vfs_root = mqfs_root,
2758 .vfs_statfs = mqfs_statfs,
2761 static struct vfsconf mqueuefs_vfsconf = {
2762 .vfc_version = VFS_VERSION,
2763 .vfc_name = "mqueuefs",
2764 .vfc_vfsops = &mqfs_vfsops,
2766 .vfc_flags = VFCF_SYNTHETIC
2769 static struct syscall_helper_data mq_syscalls[] = {
2770 SYSCALL_INIT_HELPER(kmq_open),
2771 SYSCALL_INIT_HELPER(kmq_setattr),
2772 SYSCALL_INIT_HELPER(kmq_timedsend),
2773 SYSCALL_INIT_HELPER(kmq_timedreceive),
2774 SYSCALL_INIT_HELPER(kmq_notify),
2775 SYSCALL_INIT_HELPER(kmq_unlink),
2779 #ifdef COMPAT_FREEBSD32
2780 #include <compat/freebsd32/freebsd32.h>
2781 #include <compat/freebsd32/freebsd32_proto.h>
2782 #include <compat/freebsd32/freebsd32_signal.h>
2783 #include <compat/freebsd32/freebsd32_syscall.h>
2784 #include <compat/freebsd32/freebsd32_util.h>
2787 mq_attr_from32(const struct mq_attr32 *from, struct mq_attr *to)
2790 to->mq_flags = from->mq_flags;
2791 to->mq_maxmsg = from->mq_maxmsg;
2792 to->mq_msgsize = from->mq_msgsize;
2793 to->mq_curmsgs = from->mq_curmsgs;
2797 mq_attr_to32(const struct mq_attr *from, struct mq_attr32 *to)
2800 to->mq_flags = from->mq_flags;
2801 to->mq_maxmsg = from->mq_maxmsg;
2802 to->mq_msgsize = from->mq_msgsize;
2803 to->mq_curmsgs = from->mq_curmsgs;
2807 freebsd32_kmq_open(struct thread *td, struct freebsd32_kmq_open_args *uap)
2809 struct mq_attr attr;
2810 struct mq_attr32 attr32;
2813 if ((uap->flags & O_ACCMODE) == O_ACCMODE || uap->flags & O_EXEC)
2815 flags = FFLAGS(uap->flags);
2816 if ((flags & O_CREAT) != 0 && uap->attr != NULL) {
2817 error = copyin(uap->attr, &attr32, sizeof(attr32));
2820 mq_attr_from32(&attr32, &attr);
2822 return (kern_kmq_open(td, uap->path, flags, uap->mode,
2823 uap->attr != NULL ? &attr : NULL));
2827 freebsd32_kmq_setattr(struct thread *td, struct freebsd32_kmq_setattr_args *uap)
2829 struct mq_attr attr, oattr;
2830 struct mq_attr32 attr32, oattr32;
2833 if (uap->attr != NULL) {
2834 error = copyin(uap->attr, &attr32, sizeof(attr32));
2837 mq_attr_from32(&attr32, &attr);
2839 error = kern_kmq_setattr(td, uap->mqd, uap->attr != NULL ? &attr : NULL,
2843 if (uap->oattr != NULL) {
2844 mq_attr_to32(&oattr, &oattr32);
2845 error = copyout(&oattr32, uap->oattr, sizeof(oattr32));
2851 freebsd32_kmq_timedsend(struct thread *td,
2852 struct freebsd32_kmq_timedsend_args *uap)
2856 struct timespec32 ets32;
2857 struct timespec *abs_timeout, ets;
2861 error = getmq_write(td, uap->mqd, &fp, NULL, &mq);
2864 if (uap->abs_timeout != NULL) {
2865 error = copyin(uap->abs_timeout, &ets32, sizeof(ets32));
2868 CP(ets32, ets, tv_sec);
2869 CP(ets32, ets, tv_nsec);
2873 waitok = !(fp->f_flag & O_NONBLOCK);
2874 error = mqueue_send(mq, uap->msg_ptr, uap->msg_len,
2875 uap->msg_prio, waitok, abs_timeout);
2881 freebsd32_kmq_timedreceive(struct thread *td,
2882 struct freebsd32_kmq_timedreceive_args *uap)
2886 struct timespec32 ets32;
2887 struct timespec *abs_timeout, ets;
2890 error = getmq_read(td, uap->mqd, &fp, NULL, &mq);
2893 if (uap->abs_timeout != NULL) {
2894 error = copyin(uap->abs_timeout, &ets32, sizeof(ets32));
2897 CP(ets32, ets, tv_sec);
2898 CP(ets32, ets, tv_nsec);
2902 waitok = !(fp->f_flag & O_NONBLOCK);
2903 error = mqueue_receive(mq, uap->msg_ptr, uap->msg_len,
2904 uap->msg_prio, waitok, abs_timeout);
2910 freebsd32_kmq_notify(struct thread *td, struct freebsd32_kmq_notify_args *uap)
2912 struct sigevent ev, *evp;
2913 struct sigevent32 ev32;
2916 if (uap->sigev == NULL) {
2919 error = copyin(uap->sigev, &ev32, sizeof(ev32));
2922 error = convert_sigevent32(&ev32, &ev);
2927 return (kern_kmq_notify(td, uap->mqd, evp));
2930 static struct syscall_helper_data mq32_syscalls[] = {
2931 SYSCALL32_INIT_HELPER(freebsd32_kmq_open),
2932 SYSCALL32_INIT_HELPER(freebsd32_kmq_setattr),
2933 SYSCALL32_INIT_HELPER(freebsd32_kmq_timedsend),
2934 SYSCALL32_INIT_HELPER(freebsd32_kmq_timedreceive),
2935 SYSCALL32_INIT_HELPER(freebsd32_kmq_notify),
2936 SYSCALL32_INIT_HELPER_COMPAT(kmq_unlink),
2946 error = syscall_helper_register(mq_syscalls);
2949 #ifdef COMPAT_FREEBSD32
2950 error = syscall32_helper_register(mq32_syscalls);
2961 #ifdef COMPAT_FREEBSD32
2962 syscall32_helper_unregister(mq32_syscalls);
2964 syscall_helper_unregister(mq_syscalls);
2969 mq_modload(struct module *module, int cmd, void *arg)
2973 error = vfs_modevent(module, cmd, arg);
2992 static moduledata_t mqueuefs_mod = {
2997 DECLARE_MODULE(mqueuefs, mqueuefs_mod, SI_SUB_VFS, SI_ORDER_MIDDLE);
2998 MODULE_VERSION(mqueuefs, 1);