2 * Copyright (c) 2006 Robert N. M. Watson
5 * This software was developed by Robert Watson for the TrustedBSD Project.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <sys/param.h>
32 #include <sys/condvar.h>
34 #include <sys/eventhandler.h>
35 #include <sys/filio.h>
36 #include <sys/kernel.h>
38 #include <sys/malloc.h>
39 #include <sys/mutex.h>
42 #include <sys/queue.h>
43 #include <sys/selinfo.h>
44 #include <sys/sigio.h>
45 #include <sys/signal.h>
46 #include <sys/signalvar.h>
47 #include <sys/systm.h>
50 #include <security/audit/audit.h>
51 #include <security/audit/audit_ioctl.h>
52 #include <security/audit/audit_private.h>
55 * Implementation of a clonable special device providing a live stream of BSM
56 * audit data. This is a "tee" of the data going to the file. It provides
57 * unreliable but timely access to audit events. Consumers of this interface
58 * should be very careful to avoid introducing event cycles. Consumers may
59 * express interest via a set of preselection ioctls.
65 static MALLOC_DEFINE(M_AUDIT_PIPE, "audit_pipe", "Audit pipes");
66 static MALLOC_DEFINE(M_AUDIT_PIPE_ENTRY, "audit_pipeent",
67 "Audit pipe entries and buffers");
68 static MALLOC_DEFINE(M_AUDIT_PIPE_PRESELECT, "audit_pipe_presel",
69 "Audit pipe preselection structure");
72 * Audit pipe buffer parameters.
74 #define AUDIT_PIPE_QLIMIT_DEFAULT (128)
75 #define AUDIT_PIPE_QLIMIT_MIN (0)
76 #define AUDIT_PIPE_QLIMIT_MAX (1024)
79 * Description of an entry in an audit_pipe.
81 struct audit_pipe_entry {
84 TAILQ_ENTRY(audit_pipe_entry) ape_queue;
88 * Audit pipes allow processes to express "interest" in the set of records
89 * that are delivered via the pipe. They do this in a similar manner to the
90 * mechanism for audit trail configuration, by expressing two global masks,
91 * and optionally expressing per-auid masks. The following data structure is
92 * the per-auid mask description. The global state is stored in the audit
93 * pipe data structure.
95 * We may want to consider a more space/time-efficient data structure once
96 * usage patterns for per-auid specifications are clear.
98 struct audit_pipe_preselect {
101 TAILQ_ENTRY(audit_pipe_preselect) app_list;
105 * Description of an individual audit_pipe. Consists largely of a bounded
108 #define AUDIT_PIPE_ASYNC 0x00000001
109 #define AUDIT_PIPE_NBIO 0x00000002
111 int ap_open; /* Device open? */
114 struct selinfo ap_selinfo;
115 struct sigio *ap_sigio;
120 u_int64_t ap_inserts; /* Records added. */
121 u_int64_t ap_reads; /* Records read. */
122 u_int64_t ap_drops; /* Records dropped. */
123 u_int64_t ap_truncates; /* Records too long. */
126 * Fields relating to pipe interest: global masks for unmatched
127 * processes (attributable, non-attributable), and a list of specific
128 * interest specifications by auid.
130 int ap_preselect_mode;
131 au_mask_t ap_preselect_flags;
132 au_mask_t ap_preselect_naflags;
133 TAILQ_HEAD(, audit_pipe_preselect) ap_preselect_list;
136 * Current pending record list.
138 TAILQ_HEAD(, audit_pipe_entry) ap_queue;
143 TAILQ_ENTRY(audit_pipe) ap_list;
147 * Global list of audit pipes, mutex to protect it and the pipes. Finer
148 * grained locking may be desirable at some point.
150 static TAILQ_HEAD(, audit_pipe) audit_pipe_list;
151 static struct mtx audit_pipe_mtx;
154 * This CV is used to wakeup on an audit record write. Eventually, it might
155 * be per-pipe to avoid unnecessary wakeups when several pipes with different
156 * preselection masks are present.
158 static struct cv audit_pipe_cv;
161 * Cloning related variables and constants.
163 #define AUDIT_PIPE_NAME "auditpipe"
164 static eventhandler_tag audit_pipe_eh_tag;
165 static struct clonedevs *audit_pipe_clones;
168 * Special device methods and definition.
170 static d_open_t audit_pipe_open;
171 static d_close_t audit_pipe_close;
172 static d_read_t audit_pipe_read;
173 static d_ioctl_t audit_pipe_ioctl;
174 static d_poll_t audit_pipe_poll;
175 static d_kqfilter_t audit_pipe_kqfilter;
177 static struct cdevsw audit_pipe_cdevsw = {
178 .d_version = D_VERSION,
179 .d_flags = D_PSEUDO | D_NEEDGIANT,
180 .d_open = audit_pipe_open,
181 .d_close = audit_pipe_close,
182 .d_read = audit_pipe_read,
183 .d_ioctl = audit_pipe_ioctl,
184 .d_poll = audit_pipe_poll,
185 .d_kqfilter = audit_pipe_kqfilter,
186 .d_name = AUDIT_PIPE_NAME,
189 static int audit_pipe_kqread(struct knote *note, long hint);
190 static void audit_pipe_kqdetach(struct knote *note);
192 static struct filterops audit_pipe_read_filterops = {
195 .f_detach = audit_pipe_kqdetach,
196 .f_event = audit_pipe_kqread,
200 * Some global statistics on audit pipes.
202 static int audit_pipe_count; /* Current number of pipes. */
203 static u_int64_t audit_pipe_ever; /* Pipes ever allocated. */
204 static u_int64_t audit_pipe_records; /* Records seen. */
205 static u_int64_t audit_pipe_drops; /* Global record drop count. */
208 * Free an audit pipe entry.
211 audit_pipe_entry_free(struct audit_pipe_entry *ape)
214 free(ape->ape_record, M_AUDIT_PIPE_ENTRY);
215 free(ape, M_AUDIT_PIPE_ENTRY);
219 * Find an audit pipe preselection specification for an auid, if any.
221 static struct audit_pipe_preselect *
222 audit_pipe_preselect_find(struct audit_pipe *ap, au_id_t auid)
224 struct audit_pipe_preselect *app;
226 mtx_assert(&audit_pipe_mtx, MA_OWNED);
228 TAILQ_FOREACH(app, &ap->ap_preselect_list, app_list) {
229 if (app->app_auid == auid)
236 * Query the per-pipe mask for a specific auid.
239 audit_pipe_preselect_get(struct audit_pipe *ap, au_id_t auid,
242 struct audit_pipe_preselect *app;
245 mtx_lock(&audit_pipe_mtx);
246 app = audit_pipe_preselect_find(ap, auid);
248 *maskp = app->app_mask;
252 mtx_unlock(&audit_pipe_mtx);
257 * Set the per-pipe mask for a specific auid. Add a new entry if needed;
258 * otherwise, update the current entry.
261 audit_pipe_preselect_set(struct audit_pipe *ap, au_id_t auid, au_mask_t mask)
263 struct audit_pipe_preselect *app, *app_new;
266 * Pessimistically assume that the auid doesn't already have a mask
267 * set, and allocate. We will free it if it is unneeded.
269 app_new = malloc(sizeof(*app_new), M_AUDIT_PIPE_PRESELECT, M_WAITOK);
270 mtx_lock(&audit_pipe_mtx);
271 app = audit_pipe_preselect_find(ap, auid);
275 app->app_auid = auid;
276 TAILQ_INSERT_TAIL(&ap->ap_preselect_list, app, app_list);
278 app->app_mask = mask;
279 mtx_unlock(&audit_pipe_mtx);
281 free(app_new, M_AUDIT_PIPE_PRESELECT);
285 * Delete a per-auid mask on an audit pipe.
288 audit_pipe_preselect_delete(struct audit_pipe *ap, au_id_t auid)
290 struct audit_pipe_preselect *app;
293 mtx_lock(&audit_pipe_mtx);
294 app = audit_pipe_preselect_find(ap, auid);
296 TAILQ_REMOVE(&ap->ap_preselect_list, app, app_list);
300 mtx_unlock(&audit_pipe_mtx);
302 free(app, M_AUDIT_PIPE_PRESELECT);
307 * Delete all per-auid masks on an audit pipe.
310 audit_pipe_preselect_flush_locked(struct audit_pipe *ap)
312 struct audit_pipe_preselect *app;
314 mtx_assert(&audit_pipe_mtx, MA_OWNED);
316 while ((app = TAILQ_FIRST(&ap->ap_preselect_list)) != NULL) {
317 TAILQ_REMOVE(&ap->ap_preselect_list, app, app_list);
318 free(app, M_AUDIT_PIPE_PRESELECT);
323 audit_pipe_preselect_flush(struct audit_pipe *ap)
326 mtx_lock(&audit_pipe_mtx);
327 audit_pipe_preselect_flush_locked(ap);
328 mtx_unlock(&audit_pipe_mtx);
332 * Determine whether a specific audit pipe matches a record with these
333 * properties. Algorithm is as follows:
335 * - If the pipe is configured to track the default trail configuration, then
336 * use the results of global preselection matching.
337 * - If not, search for a specifically configured auid entry matching the
338 * event. If an entry is found, use that.
339 * - Otherwise, use the default flags or naflags configured for the pipe.
342 audit_pipe_preselect_check(struct audit_pipe *ap, au_id_t auid,
343 au_event_t event, au_class_t class, int sorf, int trail_preselect)
345 struct audit_pipe_preselect *app;
347 mtx_assert(&audit_pipe_mtx, MA_OWNED);
349 switch (ap->ap_preselect_mode) {
350 case AUDITPIPE_PRESELECT_MODE_TRAIL:
351 return (trail_preselect);
353 case AUDITPIPE_PRESELECT_MODE_LOCAL:
354 app = audit_pipe_preselect_find(ap, auid);
356 if (auid == AU_DEFAUDITID)
357 return (au_preselect(event, class,
358 &ap->ap_preselect_naflags, sorf));
360 return (au_preselect(event, class,
361 &ap->ap_preselect_flags, sorf));
363 return (au_preselect(event, class, &app->app_mask,
367 panic("audit_pipe_preselect_check: mode %d",
368 ap->ap_preselect_mode);
375 * Determine whether there exists a pipe interested in a record with specific
379 audit_pipe_preselect(au_id_t auid, au_event_t event, au_class_t class,
380 int sorf, int trail_preselect)
382 struct audit_pipe *ap;
384 mtx_lock(&audit_pipe_mtx);
385 TAILQ_FOREACH(ap, &audit_pipe_list, ap_list) {
386 if (audit_pipe_preselect_check(ap, auid, event, class, sorf,
388 mtx_unlock(&audit_pipe_mtx);
392 mtx_unlock(&audit_pipe_mtx);
397 * Append individual record to a queue -- allocate queue-local buffer, and
398 * add to the queue. We try to drop from the head of the queue so that more
399 * recent events take precedence over older ones, but if allocation fails we
400 * do drop the new event.
403 audit_pipe_append(struct audit_pipe *ap, void *record, u_int record_len)
405 struct audit_pipe_entry *ape, *ape_remove;
407 mtx_assert(&audit_pipe_mtx, MA_OWNED);
409 ape = malloc(sizeof(*ape), M_AUDIT_PIPE_ENTRY, M_NOWAIT | M_ZERO);
416 ape->ape_record = malloc(record_len, M_AUDIT_PIPE_ENTRY, M_NOWAIT);
417 if (ape->ape_record == NULL) {
418 free(ape, M_AUDIT_PIPE_ENTRY);
424 bcopy(record, ape->ape_record, record_len);
425 ape->ape_record_len = record_len;
427 if (ap->ap_qlen >= ap->ap_qlimit) {
428 ape_remove = TAILQ_FIRST(&ap->ap_queue);
429 TAILQ_REMOVE(&ap->ap_queue, ape_remove, ape_queue);
430 audit_pipe_entry_free(ape_remove);
436 TAILQ_INSERT_TAIL(&ap->ap_queue, ape, ape_queue);
439 selwakeuppri(&ap->ap_selinfo, PSOCK);
440 KNOTE_LOCKED(&ap->ap_selinfo.si_note, 0);
441 if (ap->ap_flags & AUDIT_PIPE_ASYNC)
442 pgsigio(&ap->ap_sigio, SIGIO, 0);
446 * audit_pipe_submit(): audit_worker submits audit records via this
447 * interface, which arranges for them to be delivered to pipe queues.
450 audit_pipe_submit(au_id_t auid, au_event_t event, au_class_t class, int sorf,
451 int trail_select, void *record, u_int record_len)
453 struct audit_pipe *ap;
456 * Lockless read to avoid mutex overhead if pipes are not in use.
458 if (TAILQ_FIRST(&audit_pipe_list) == NULL)
461 mtx_lock(&audit_pipe_mtx);
462 TAILQ_FOREACH(ap, &audit_pipe_list, ap_list) {
463 if (audit_pipe_preselect_check(ap, auid, event, class, sorf,
465 audit_pipe_append(ap, record, record_len);
467 audit_pipe_records++;
468 mtx_unlock(&audit_pipe_mtx);
469 cv_broadcastpri(&audit_pipe_cv, PSOCK);
473 * audit_pipe_submit_user(): the same as audit_pipe_submit(), except that
474 * since we don't currently have selection information available, it is
475 * delivered to the pipe unconditionally.
477 * XXXRW: This is a bug. The BSM check routine for submitting a user record
478 * should parse that information and return it.
481 audit_pipe_submit_user(void *record, u_int record_len)
483 struct audit_pipe *ap;
486 * Lockless read to avoid mutex overhead if pipes are not in use.
488 if (TAILQ_FIRST(&audit_pipe_list) == NULL)
491 mtx_lock(&audit_pipe_mtx);
492 TAILQ_FOREACH(ap, &audit_pipe_list, ap_list)
493 audit_pipe_append(ap, record, record_len);
494 audit_pipe_records++;
495 mtx_unlock(&audit_pipe_mtx);
496 cv_broadcastpri(&audit_pipe_cv, PSOCK);
501 * Pop the next record off of an audit pipe.
503 static struct audit_pipe_entry *
504 audit_pipe_pop(struct audit_pipe *ap)
506 struct audit_pipe_entry *ape;
508 mtx_assert(&audit_pipe_mtx, MA_OWNED);
510 ape = TAILQ_FIRST(&ap->ap_queue);
511 KASSERT((ape == NULL && ap->ap_qlen == 0) ||
512 (ape != NULL && ap->ap_qlen != 0), ("audit_pipe_pop: qlen"));
515 TAILQ_REMOVE(&ap->ap_queue, ape, ape_queue);
521 * Allocate a new audit pipe. Connects the pipe, on success, to the global
522 * list and updates statistics.
524 static struct audit_pipe *
525 audit_pipe_alloc(void)
527 struct audit_pipe *ap;
529 mtx_assert(&audit_pipe_mtx, MA_OWNED);
531 ap = malloc(sizeof(*ap), M_AUDIT_PIPE, M_NOWAIT | M_ZERO);
534 ap->ap_qlimit = AUDIT_PIPE_QLIMIT_DEFAULT;
535 TAILQ_INIT(&ap->ap_queue);
536 knlist_init(&ap->ap_selinfo.si_note, &audit_pipe_mtx, NULL, NULL,
540 * Default flags, naflags, and auid-specific preselection settings to
541 * 0. Initialize the mode to the global trail so that if praudit(1)
542 * is run on /dev/auditpipe, it sees events associated with the
543 * default trail. Pipe-aware application can clear the flag, set
544 * custom masks, and flush the pipe as needed.
546 bzero(&ap->ap_preselect_flags, sizeof(ap->ap_preselect_flags));
547 bzero(&ap->ap_preselect_naflags, sizeof(ap->ap_preselect_naflags));
548 TAILQ_INIT(&ap->ap_preselect_list);
549 ap->ap_preselect_mode = AUDITPIPE_PRESELECT_MODE_TRAIL;
552 * Add to global list and update global statistics.
554 TAILQ_INSERT_HEAD(&audit_pipe_list, ap, ap_list);
562 * Flush all records currently present in an audit pipe; assume mutex is held.
565 audit_pipe_flush(struct audit_pipe *ap)
567 struct audit_pipe_entry *ape;
569 mtx_assert(&audit_pipe_mtx, MA_OWNED);
571 while ((ape = TAILQ_FIRST(&ap->ap_queue)) != NULL) {
572 TAILQ_REMOVE(&ap->ap_queue, ape, ape_queue);
573 audit_pipe_entry_free(ape);
576 KASSERT(ap->ap_qlen == 0, ("audit_pipe_free: ap_qlen"));
580 * Free an audit pipe; this means freeing all preselection state and all
581 * records in the pipe. Assumes mutex is held to prevent any new records
582 * from being inserted during the free, and that the audit pipe is still on
586 audit_pipe_free(struct audit_pipe *ap)
589 mtx_assert(&audit_pipe_mtx, MA_OWNED);
591 audit_pipe_preselect_flush_locked(ap);
592 audit_pipe_flush(ap);
593 knlist_destroy(&ap->ap_selinfo.si_note);
594 TAILQ_REMOVE(&audit_pipe_list, ap, ap_list);
595 free(ap, M_AUDIT_PIPE);
600 * Audit pipe clone routine -- provide specific requested audit pipe, or a
601 * fresh one if a specific one is not requested.
604 audit_pipe_clone(void *arg, struct ucred *cred, char *name, int namelen,
612 if (strcmp(name, AUDIT_PIPE_NAME) == 0)
614 else if (dev_stdclone(name, NULL, AUDIT_PIPE_NAME, &u) != 1)
617 i = clone_create(&audit_pipe_clones, &audit_pipe_cdevsw, &u, dev, 0);
619 *dev = make_dev(&audit_pipe_cdevsw, unit2minor(u), UID_ROOT,
620 GID_WHEEL, 0600, "%s%d", AUDIT_PIPE_NAME, u);
623 (*dev)->si_flags |= SI_CHEAPCLONE;
629 * Audit pipe open method. Explicit privilege check isn't used as this
630 * allows file permissions on the special device to be used to grant audit
631 * review access. Those file permissions should be managed carefully.
634 audit_pipe_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
636 struct audit_pipe *ap;
638 mtx_lock(&audit_pipe_mtx);
641 ap = audit_pipe_alloc();
643 mtx_unlock(&audit_pipe_mtx);
648 KASSERT(ap->ap_open, ("audit_pipe_open: ap && !ap_open"));
649 mtx_unlock(&audit_pipe_mtx);
653 mtx_unlock(&audit_pipe_mtx);
654 fsetown(td->td_proc->p_pid, &ap->ap_sigio);
659 * Close audit pipe, tear down all records, etc.
662 audit_pipe_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
664 struct audit_pipe *ap;
667 KASSERT(ap != NULL, ("audit_pipe_close: ap == NULL"));
668 KASSERT(ap->ap_open, ("audit_pipe_close: !ap_open"));
669 funsetown(&ap->ap_sigio);
670 mtx_lock(&audit_pipe_mtx);
674 mtx_unlock(&audit_pipe_mtx);
679 * Audit pipe ioctl() routine. Handle file descriptor and audit pipe layer
682 * Would be desirable to support filtering, although perhaps something simple
683 * like an event mask, as opposed to something complicated like BPF.
686 audit_pipe_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag,
689 struct auditpipe_ioctl_preselect *aip;
690 struct audit_pipe *ap;
696 KASSERT(ap != NULL, ("audit_pipe_ioctl: ap == NULL"));
699 * Audit pipe ioctls: first come standard device node ioctls, then
700 * manipulation of pipe settings, and finally, statistics query
705 mtx_lock(&audit_pipe_mtx);
707 ap->ap_flags |= AUDIT_PIPE_NBIO;
709 ap->ap_flags &= ~AUDIT_PIPE_NBIO;
710 mtx_unlock(&audit_pipe_mtx);
715 mtx_lock(&audit_pipe_mtx);
716 if (TAILQ_FIRST(&ap->ap_queue) != NULL)
718 TAILQ_FIRST(&ap->ap_queue)->ape_record_len;
721 mtx_unlock(&audit_pipe_mtx);
726 mtx_lock(&audit_pipe_mtx);
728 ap->ap_flags |= AUDIT_PIPE_ASYNC;
730 ap->ap_flags &= ~AUDIT_PIPE_ASYNC;
731 mtx_unlock(&audit_pipe_mtx);
736 error = fsetown(*(int *)data, &ap->ap_sigio);
740 *(int *)data = fgetown(&ap->ap_sigio);
744 case AUDITPIPE_GET_QLEN:
745 *(u_int *)data = ap->ap_qlen;
749 case AUDITPIPE_GET_QLIMIT:
750 *(u_int *)data = ap->ap_qlimit;
754 case AUDITPIPE_SET_QLIMIT:
755 /* Lockless integer write. */
756 if (*(u_int *)data >= AUDIT_PIPE_QLIMIT_MIN ||
757 *(u_int *)data <= AUDIT_PIPE_QLIMIT_MAX) {
758 ap->ap_qlimit = *(u_int *)data;
764 case AUDITPIPE_GET_QLIMIT_MIN:
765 *(u_int *)data = AUDIT_PIPE_QLIMIT_MIN;
769 case AUDITPIPE_GET_QLIMIT_MAX:
770 *(u_int *)data = AUDIT_PIPE_QLIMIT_MAX;
774 case AUDITPIPE_GET_PRESELECT_FLAGS:
775 mtx_lock(&audit_pipe_mtx);
776 maskp = (au_mask_t *)data;
777 *maskp = ap->ap_preselect_flags;
778 mtx_unlock(&audit_pipe_mtx);
782 case AUDITPIPE_SET_PRESELECT_FLAGS:
783 mtx_lock(&audit_pipe_mtx);
784 maskp = (au_mask_t *)data;
785 ap->ap_preselect_flags = *maskp;
786 mtx_unlock(&audit_pipe_mtx);
790 case AUDITPIPE_GET_PRESELECT_NAFLAGS:
791 mtx_lock(&audit_pipe_mtx);
792 maskp = (au_mask_t *)data;
793 *maskp = ap->ap_preselect_naflags;
794 mtx_unlock(&audit_pipe_mtx);
798 case AUDITPIPE_SET_PRESELECT_NAFLAGS:
799 mtx_lock(&audit_pipe_mtx);
800 maskp = (au_mask_t *)data;
801 ap->ap_preselect_naflags = *maskp;
802 mtx_unlock(&audit_pipe_mtx);
806 case AUDITPIPE_GET_PRESELECT_AUID:
807 aip = (struct auditpipe_ioctl_preselect *)data;
808 error = audit_pipe_preselect_get(ap, aip->aip_auid,
812 case AUDITPIPE_SET_PRESELECT_AUID:
813 aip = (struct auditpipe_ioctl_preselect *)data;
814 audit_pipe_preselect_set(ap, aip->aip_auid, aip->aip_mask);
818 case AUDITPIPE_DELETE_PRESELECT_AUID:
819 auid = *(au_id_t *)data;
820 error = audit_pipe_preselect_delete(ap, auid);
823 case AUDITPIPE_FLUSH_PRESELECT_AUID:
824 audit_pipe_preselect_flush(ap);
828 case AUDITPIPE_GET_PRESELECT_MODE:
829 mtx_lock(&audit_pipe_mtx);
830 *(int *)data = ap->ap_preselect_mode;
831 mtx_unlock(&audit_pipe_mtx);
835 case AUDITPIPE_SET_PRESELECT_MODE:
838 case AUDITPIPE_PRESELECT_MODE_TRAIL:
839 case AUDITPIPE_PRESELECT_MODE_LOCAL:
840 mtx_lock(&audit_pipe_mtx);
841 ap->ap_preselect_mode = mode;
842 mtx_unlock(&audit_pipe_mtx);
851 case AUDITPIPE_FLUSH:
852 mtx_lock(&audit_pipe_mtx);
853 audit_pipe_flush(ap);
854 mtx_unlock(&audit_pipe_mtx);
858 case AUDITPIPE_GET_MAXAUDITDATA:
859 *(u_int *)data = MAXAUDITDATA;
863 case AUDITPIPE_GET_INSERTS:
864 *(u_int *)data = ap->ap_inserts;
868 case AUDITPIPE_GET_READS:
869 *(u_int *)data = ap->ap_reads;
873 case AUDITPIPE_GET_DROPS:
874 *(u_int *)data = ap->ap_drops;
878 case AUDITPIPE_GET_TRUNCATES:
879 *(u_int *)data = ap->ap_truncates;
890 * Audit pipe read. Pull one record off the queue and copy to user space.
891 * On error, the record is dropped.
893 * Providing more sophisticated behavior, such as partial reads, is tricky
894 * due to the potential for parallel I/O. If partial read support is
895 * required, it will require a per-pipe "current record being read" along
896 * with an offset into that trecord which has already been read. Threads
897 * performing partial reads will need to allocate per-thread copies of the
898 * data so that if another thread completes the read of the record, it can be
899 * freed without adding reference count logic. If this is added, a flag to
900 * indicate that only atomic record reads are desired would be useful, as if
901 * different threads are all waiting for records on the pipe, they will want
902 * independent record reads, which is currently the behavior.
905 audit_pipe_read(struct cdev *dev, struct uio *uio, int flag)
907 struct audit_pipe_entry *ape;
908 struct audit_pipe *ap;
912 KASSERT(ap != NULL, ("audit_pipe_read: ap == NULL"));
913 mtx_lock(&audit_pipe_mtx);
916 * Wait for a record that fits into the read buffer, dropping
917 * records that would be truncated if actually passed to the
918 * process. This helps maintain the discreet record read
921 while ((ape = audit_pipe_pop(ap)) == NULL) {
922 if (ap->ap_flags & AUDIT_PIPE_NBIO) {
923 mtx_unlock(&audit_pipe_mtx);
926 error = cv_wait_sig(&audit_pipe_cv, &audit_pipe_mtx);
928 mtx_unlock(&audit_pipe_mtx);
932 if (ape->ape_record_len <= uio->uio_resid)
934 audit_pipe_entry_free(ape);
938 mtx_unlock(&audit_pipe_mtx);
941 * Now read record to user space memory. Even if the read is short,
942 * we abandon the remainder of the record, supporting only discreet
945 error = uiomove(ape->ape_record, ape->ape_record_len, uio);
946 audit_pipe_entry_free(ape);
954 audit_pipe_poll(struct cdev *dev, int events, struct thread *td)
956 struct audit_pipe *ap;
961 KASSERT(ap != NULL, ("audit_pipe_poll: ap == NULL"));
962 if (events & (POLLIN | POLLRDNORM)) {
963 mtx_lock(&audit_pipe_mtx);
964 if (TAILQ_FIRST(&ap->ap_queue) != NULL)
965 revents |= events & (POLLIN | POLLRDNORM);
967 selrecord(td, &ap->ap_selinfo);
968 mtx_unlock(&audit_pipe_mtx);
974 * Audit pipe kqfilter.
977 audit_pipe_kqfilter(struct cdev *dev, struct knote *kn)
979 struct audit_pipe *ap;
982 KASSERT(ap != NULL, ("audit_pipe_kqfilter: ap == NULL"));
984 if (kn->kn_filter != EVFILT_READ)
987 kn->kn_fop = &audit_pipe_read_filterops;
990 mtx_lock(&audit_pipe_mtx);
991 knlist_add(&ap->ap_selinfo.si_note, kn, 1);
992 mtx_unlock(&audit_pipe_mtx);
997 * Return true if there are records available for reading on the pipe.
1000 audit_pipe_kqread(struct knote *kn, long hint)
1002 struct audit_pipe_entry *ape;
1003 struct audit_pipe *ap;
1005 mtx_assert(&audit_pipe_mtx, MA_OWNED);
1007 ap = (struct audit_pipe *)kn->kn_hook;
1008 KASSERT(ap != NULL, ("audit_pipe_kqread: ap == NULL"));
1010 if (ap->ap_qlen != 0) {
1011 ape = TAILQ_FIRST(&ap->ap_queue);
1012 KASSERT(ape != NULL, ("audit_pipe_kqread: ape == NULL"));
1014 kn->kn_data = ape->ape_record_len;
1023 * Detach kqueue state from audit pipe.
1026 audit_pipe_kqdetach(struct knote *kn)
1028 struct audit_pipe *ap;
1030 ap = (struct audit_pipe *)kn->kn_hook;
1031 KASSERT(ap != NULL, ("audit_pipe_kqdetach: ap == NULL"));
1033 mtx_lock(&audit_pipe_mtx);
1034 knlist_remove(&ap->ap_selinfo.si_note, kn, 1);
1035 mtx_unlock(&audit_pipe_mtx);
1039 * Initialize the audit pipe system.
1042 audit_pipe_init(void *unused)
1045 TAILQ_INIT(&audit_pipe_list);
1046 mtx_init(&audit_pipe_mtx, "audit_pipe_mtx", NULL, MTX_DEF);
1047 cv_init(&audit_pipe_cv, "audit_pipe_cv");
1049 clone_setup(&audit_pipe_clones);
1050 audit_pipe_eh_tag = EVENTHANDLER_REGISTER(dev_clone,
1051 audit_pipe_clone, 0, 1000);
1052 if (audit_pipe_eh_tag == NULL)
1053 panic("audit_pipe_init: EVENTHANDLER_REGISTER");
1056 SYSINIT(audit_pipe_init, SI_SUB_DRIVERS, SI_ORDER_MIDDLE, audit_pipe_init,