2 * Copyright (c) 1999-2008 Apple Inc.
3 * Copyright (c) 2006-2008 Robert N. M. Watson
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of Apple Inc. ("Apple") nor the names of
15 * its contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR
22 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
26 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
27 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
35 #include <sys/condvar.h>
38 #include <sys/filedesc.h>
39 #include <sys/fcntl.h>
41 #include <sys/kernel.h>
42 #include <sys/kthread.h>
43 #include <sys/malloc.h>
44 #include <sys/mount.h>
45 #include <sys/namei.h>
47 #include <sys/queue.h>
48 #include <sys/socket.h>
49 #include <sys/socketvar.h>
50 #include <sys/protosw.h>
51 #include <sys/domain.h>
53 #include <sys/sysproto.h>
54 #include <sys/sysent.h>
55 #include <sys/systm.h>
56 #include <sys/ucred.h>
59 #include <sys/unistd.h>
60 #include <sys/vnode.h>
62 #include <bsm/audit.h>
63 #include <bsm/audit_internal.h>
64 #include <bsm/audit_kevents.h>
66 #include <netinet/in.h>
67 #include <netinet/in_pcb.h>
69 #include <security/audit/audit.h>
70 #include <security/audit/audit_private.h>
75 * Worker thread that will schedule disk I/O, etc.
77 static struct proc *audit_thread;
80 * audit_cred and audit_vp are the stored credential and vnode to use for
81 * active audit trail. They are protected by the audit worker lock, which
82 * will be held across all I/O and all rotation to prevent them from being
83 * replaced (rotated) while in use. The audit_file_rotate_wait flag is set
84 * when the kernel has delivered a trigger to auditd to rotate the trail, and
85 * is cleared when the next rotation takes place. It is also protected by
86 * the audit worker lock.
88 static int audit_file_rotate_wait;
89 static struct ucred *audit_cred;
90 static struct vnode *audit_vp;
91 static off_t audit_size;
92 static struct sx audit_worker_lock;
94 #define AUDIT_WORKER_LOCK_INIT() sx_init(&audit_worker_lock, \
96 #define AUDIT_WORKER_LOCK_ASSERT() sx_assert(&audit_worker_lock, \
98 #define AUDIT_WORKER_LOCK() sx_xlock(&audit_worker_lock)
99 #define AUDIT_WORKER_UNLOCK() sx_xunlock(&audit_worker_lock)
102 * Write an audit record to a file, performed as the last stage after both
103 * preselection and BSM conversion. Both space management and write failures
104 * are handled in this function.
106 * No attempt is made to deal with possible failure to deliver a trigger to
107 * the audit daemon, since the message is asynchronous anyway.
110 audit_record_write(struct vnode *vp, struct ucred *cred, void *data,
113 static struct timeval last_lowspace_trigger;
114 static struct timeval last_fail;
115 static int cur_lowspace_trigger;
116 struct statfs *mnt_stat;
117 int error, vfslocked;
121 AUDIT_WORKER_LOCK_ASSERT();
126 mnt_stat = &vp->v_mount->mnt_stat;
127 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
130 * First, gather statistics on the audit log file and file system so
131 * that we know how we're doing on space. Consider failure of these
132 * operations to indicate a future inability to write to the file.
134 error = VFS_STATFS(vp->v_mount, mnt_stat);
139 * We handle four different space-related limits:
141 * - A fixed (hard) limit on the minimum free blocks we require on
142 * the file system, and results in record loss, a trigger, and
143 * possible fail stop due to violating invariants.
145 * - An administrative (soft) limit, which when fallen below, results
146 * in the kernel notifying the audit daemon of low space.
148 * - An audit trail size limit, which when gone above, results in the
149 * kernel notifying the audit daemon that rotation is desired.
151 * - The total depth of the kernel audit record exceeding free space,
152 * which can lead to possible fail stop (with drain), in order to
153 * prevent violating invariants. Failure here doesn't halt
154 * immediately, but prevents new records from being generated.
156 * Possibly, the last of these should be handled differently, always
157 * allowing a full queue to be lost, rather than trying to prevent
160 * First, handle the hard limit, which generates a trigger and may
161 * fail stop. This is handled in the same manner as ENOSPC from
162 * VOP_WRITE, and results in record loss.
164 if (mnt_stat->f_bfree < AUDIT_HARD_LIMIT_FREE_BLOCKS) {
170 * Second, handle falling below the soft limit, if defined; we send
171 * the daemon a trigger and continue processing the record. Triggers
172 * are limited to 1/sec.
174 if (audit_qctrl.aq_minfree != 0) {
175 temp = mnt_stat->f_blocks / (100 / audit_qctrl.aq_minfree);
176 if (mnt_stat->f_bfree < temp) {
177 if (ppsratecheck(&last_lowspace_trigger,
178 &cur_lowspace_trigger, 1)) {
179 (void)audit_send_trigger(
180 AUDIT_TRIGGER_LOW_SPACE);
181 printf("Warning: disk space low (< %d%% free) "
182 "on audit log file-system\n",
183 audit_qctrl.aq_minfree);
189 * If the current file is getting full, generate a rotation trigger
190 * to the daemon. This is only approximate, which is fine as more
191 * records may be generated before the daemon rotates the file.
193 if (audit_fstat.af_filesz != 0 &&
194 audit_size >= audit_fstat.af_filesz * (audit_file_rotate_wait + 1)) {
195 AUDIT_WORKER_LOCK_ASSERT();
197 audit_file_rotate_wait++;
198 (void)audit_send_trigger(AUDIT_TRIGGER_ROTATE_KERNEL);
202 * If the estimated amount of audit data in the audit event queue
203 * (plus records allocated but not yet queued) has reached the amount
204 * of free space on the disk, then we need to go into an audit fail
205 * stop state, in which we do not permit the allocation/committing of
206 * any new audit records. We continue to process records but don't
207 * allow any activities that might generate new records. In the
208 * future, we might want to detect when space is available again and
209 * allow operation to continue, but this behavior is sufficient to
210 * meet fail stop requirements in CAPP.
212 if (audit_fail_stop) {
213 if ((unsigned long)((audit_q_len + audit_pre_q_len + 1) *
214 MAX_AUDIT_RECORD_SIZE) / mnt_stat->f_bsize >=
215 (unsigned long)(mnt_stat->f_bfree)) {
216 if (ppsratecheck(&last_fail, &cur_fail, 1))
217 printf("audit_record_write: free space "
218 "below size of audit queue, failing "
220 audit_in_failure = 1;
221 } else if (audit_in_failure) {
223 * Note: if we want to handle recovery, this is the
224 * spot to do it: unset audit_in_failure, and issue a
230 error = vn_rdwr(UIO_WRITE, vp, data, len, (off_t)0, UIO_SYSSPACE,
231 IO_APPEND|IO_UNIT, cred, NULL, NULL, curthread);
236 AUDIT_WORKER_LOCK_ASSERT();
240 * Catch completion of a queue drain here; if we're draining and the
241 * queue is now empty, fail stop. That audit_fail_stop is implicitly
242 * true, since audit_in_failure can only be set of audit_fail_stop is
245 * Note: if we handle recovery from audit_in_failure, then we need to
246 * make panic here conditional.
248 if (audit_in_failure) {
249 if (audit_q_len == 0 && audit_pre_q_len == 0) {
250 VOP_LOCK(vp, LK_EXCLUSIVE | LK_RETRY);
251 (void)VOP_FSYNC(vp, MNT_WAIT, curthread);
253 panic("Audit store overflow; record queue drained.");
257 VFS_UNLOCK_GIANT(vfslocked);
262 * ENOSPC is considered a special case with respect to failures, as
263 * this can reflect either our preemptive detection of insufficient
264 * space, or ENOSPC returned by the vnode write call.
266 if (audit_fail_stop) {
267 VOP_LOCK(vp, LK_EXCLUSIVE | LK_RETRY);
268 (void)VOP_FSYNC(vp, MNT_WAIT, curthread);
270 panic("Audit log space exhausted and fail-stop set.");
272 (void)audit_send_trigger(AUDIT_TRIGGER_NO_SPACE);
278 * We have failed to write to the file, so the current record is
279 * lost, which may require an immediate system halt.
281 if (audit_panic_on_write_fail) {
282 VOP_LOCK(vp, LK_EXCLUSIVE | LK_RETRY);
283 (void)VOP_FSYNC(vp, MNT_WAIT, curthread);
285 panic("audit_worker: write error %d\n", error);
286 } else if (ppsratecheck(&last_fail, &cur_fail, 1))
287 printf("audit_worker: write error %d\n", error);
288 VFS_UNLOCK_GIANT(vfslocked);
292 * Given a kernel audit record, process as required. Kernel audit records
293 * are converted to one, or possibly two, BSM records, depending on whether
294 * there is a user audit record present also. Kernel records need be
295 * converted to BSM before they can be written out. Both types will be
296 * written to disk, and audit pipes.
299 audit_worker_process_record(struct kaudit_record *ar)
301 struct au_record *bsm;
309 * We hold the audit worker lock over both writes, if there are two,
310 * so that the two records won't be split across a rotation and end
311 * up in two different trail files.
313 if (((ar->k_ar_commit & AR_COMMIT_USER) &&
314 (ar->k_ar_commit & AR_PRESELECT_USER_TRAIL)) ||
315 (ar->k_ar_commit & AR_PRESELECT_TRAIL)) {
322 * First, handle the user record, if any: commit to the system trail
323 * and audit pipes as selected.
325 if ((ar->k_ar_commit & AR_COMMIT_USER) &&
326 (ar->k_ar_commit & AR_PRESELECT_USER_TRAIL)) {
327 AUDIT_WORKER_LOCK_ASSERT();
328 audit_record_write(audit_vp, audit_cred, ar->k_udata,
332 if ((ar->k_ar_commit & AR_COMMIT_USER) &&
333 (ar->k_ar_commit & AR_PRESELECT_USER_PIPE))
334 audit_pipe_submit_user(ar->k_udata, ar->k_ulen);
336 if (!(ar->k_ar_commit & AR_COMMIT_KERNEL) ||
337 ((ar->k_ar_commit & AR_PRESELECT_PIPE) == 0 &&
338 (ar->k_ar_commit & AR_PRESELECT_TRAIL) == 0))
341 auid = ar->k_ar.ar_subj_auid;
342 event = ar->k_ar.ar_event;
343 class = au_event_class(event);
344 if (ar->k_ar.ar_errno == 0)
345 sorf = AU_PRS_SUCCESS;
347 sorf = AU_PRS_FAILURE;
349 error = kaudit_to_bsm(ar, &bsm);
355 printf("audit_worker_process_record: BSM_FAILURE\n");
362 panic("kaudit_to_bsm returned %d", error);
365 if (ar->k_ar_commit & AR_PRESELECT_TRAIL) {
366 AUDIT_WORKER_LOCK_ASSERT();
367 audit_record_write(audit_vp, audit_cred, bsm->data, bsm->len);
370 if (ar->k_ar_commit & AR_PRESELECT_PIPE)
371 audit_pipe_submit(auid, event, class, sorf,
372 ar->k_ar_commit & AR_PRESELECT_TRAIL, bsm->data,
378 AUDIT_WORKER_UNLOCK();
382 * The audit_worker thread is responsible for watching the event queue,
383 * dequeueing records, converting them to BSM format, and committing them to
384 * disk. In order to minimize lock thrashing, records are dequeued in sets
385 * to a thread-local work queue.
387 * Note: this means that the effect bound on the size of the pending record
388 * queue is 2x the length of the global queue.
391 audit_worker(void *arg)
393 struct kaudit_queue ar_worklist;
394 struct kaudit_record *ar;
397 TAILQ_INIT(&ar_worklist);
398 mtx_lock(&audit_mtx);
400 mtx_assert(&audit_mtx, MA_OWNED);
405 while (TAILQ_EMPTY(&audit_q))
406 cv_wait(&audit_worker_cv, &audit_mtx);
409 * If there are records in the global audit record queue,
410 * transfer them to a thread-local queue and process them
411 * one by one. If we cross the low watermark threshold,
412 * signal any waiting processes that they may wake up and
413 * continue generating records.
416 while ((ar = TAILQ_FIRST(&audit_q))) {
417 TAILQ_REMOVE(&audit_q, ar, k_q);
419 if (audit_q_len == audit_qctrl.aq_lowater)
421 TAILQ_INSERT_TAIL(&ar_worklist, ar, k_q);
424 cv_broadcast(&audit_watermark_cv);
426 mtx_unlock(&audit_mtx);
427 while ((ar = TAILQ_FIRST(&ar_worklist))) {
428 TAILQ_REMOVE(&ar_worklist, ar, k_q);
429 audit_worker_process_record(ar);
432 mtx_lock(&audit_mtx);
437 * audit_rotate_vnode() is called by a user or kernel thread to configure or
438 * de-configure auditing on a vnode. The arguments are the replacement
439 * credential (referenced) and vnode (referenced and opened) to substitute
440 * for the current credential and vnode, if any. If either is set to NULL,
441 * both should be NULL, and this is used to indicate that audit is being
442 * disabled. Any previous cred/vnode will be closed and freed. We re-enable
443 * generating rotation requests to auditd.
446 audit_rotate_vnode(struct ucred *cred, struct vnode *vp)
448 struct ucred *old_audit_cred;
449 struct vnode *old_audit_vp;
453 KASSERT((cred != NULL && vp != NULL) || (cred == NULL && vp == NULL),
454 ("audit_rotate_vnode: cred %p vp %p", cred, vp));
457 vn_lock(vp, LK_SHARED | LK_RETRY);
458 if (VOP_GETATTR(vp, &vattr, cred) != 0)
466 * Rotate the vnode/cred, and clear the rotate flag so that we will
467 * send a rotate trigger if the new file fills.
470 old_audit_cred = audit_cred;
471 old_audit_vp = audit_vp;
474 audit_size = vattr.va_size;
475 audit_file_rotate_wait = 0;
476 audit_enabled = (audit_vp != NULL);
477 AUDIT_WORKER_UNLOCK();
480 * If there was an old vnode/credential, close and free.
482 if (old_audit_vp != NULL) {
483 vfslocked = VFS_LOCK_GIANT(old_audit_vp->v_mount);
484 vn_close(old_audit_vp, AUDIT_CLOSE_FLAGS, old_audit_cred,
486 VFS_UNLOCK_GIANT(vfslocked);
487 crfree(old_audit_cred);
492 audit_worker_init(void)
496 AUDIT_WORKER_LOCK_INIT();
497 error = kproc_create(audit_worker, NULL, &audit_thread, RFHIGHPID,
500 panic("audit_worker_init: kproc_create returned %d", error);