2 * Copyright (c) 2004 Apple Inc.
3 * Copyright (c) 2005 Robert N. M. Watson
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of Apple Inc. ("Apple") nor the names of
15 * its contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR
22 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
26 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
27 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
30 * $P4: //depot/projects/trustedbsd/openbsm/libbsm/bsm_mask.c#15 $
33 #include <sys/types.h>
35 #include <config/config.h>
36 #ifdef HAVE_FULL_QUEUE_H
37 #include <sys/queue.h>
38 #else /* !HAVE_FULL_QUEUE_H */
39 #include <compat/queue.h>
40 #endif /* !HAVE_FULL_QUEUE_H */
42 #include <bsm/libbsm.h>
44 #ifdef HAVE_PTHREAD_MUTEX_LOCK
51 #ifdef HAVE_PTHREAD_MUTEX_LOCK
52 static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
54 static int firsttime = 1;
57 * XXX ev_cache, once created, sticks around until the calling program exits.
58 * This may or may not be a problem as far as absolute memory usage goes, but
59 * at least there don't appear to be any leaks in using the cache.
61 * XXXRW: Note that despite (mutex), load_event_table() could race with
62 * other consumers of the getauevents() API.
64 struct audit_event_map {
65 char ev_name[AU_EVENT_NAME_MAX];
66 char ev_desc[AU_EVENT_DESC_MAX];
67 struct au_event_ent ev;
68 LIST_ENTRY(audit_event_map) ev_list;
70 static LIST_HEAD(, audit_event_map) ev_cache;
72 static struct audit_event_map *
73 audit_event_map_alloc(void)
75 struct audit_event_map *aemp;
77 aemp = malloc(sizeof(*aemp));
80 bzero(aemp, sizeof(*aemp));
81 aemp->ev.ae_name = aemp->ev_name;
82 aemp->ev.ae_desc = aemp->ev_desc;
87 audit_event_map_free(struct audit_event_map *aemp)
94 * When reading into the cache fails, we need to flush the entire cache to
95 * prevent it from containing some but not all records.
100 struct audit_event_map *aemp;
102 /* XXX: Would assert 'mutex'. */
104 while ((aemp = LIST_FIRST(&ev_cache)) != NULL) {
105 LIST_REMOVE(aemp, ev_list);
106 audit_event_map_free(aemp);
111 load_event_table(void)
113 struct audit_event_map *aemp;
114 struct au_event_ent *ep;
117 * XXX: Would assert 'mutex'.
118 * Loading of the cache happens only once; dont check if cache is
121 LIST_INIT(&ev_cache);
122 setauevent(); /* Rewind to beginning of entries. */
124 aemp = audit_event_map_alloc();
129 ep = getauevent_r(&aemp->ev);
131 LIST_INSERT_HEAD(&ev_cache, aemp, ev_list);
133 audit_event_map_free(aemp);
134 } while (ep != NULL);
139 * Read the event with the matching event number from the cache.
141 static struct au_event_ent *
142 read_from_cache(au_event_t event)
144 struct audit_event_map *elem;
146 /* XXX: Would assert 'mutex'. */
148 LIST_FOREACH(elem, &ev_cache, ev_list) {
149 if (elem->ev.ae_number == event)
157 * Check if the audit event is preselected against the preselection mask.
160 au_preselect(au_event_t event, au_mask_t *mask_p, int sorf, int flag)
162 struct au_event_ent *ev;
163 au_class_t effmask = 0;
169 #ifdef HAVE_PTHREAD_MUTEX_LOCK
170 pthread_mutex_lock(&mutex);
174 if ( -1 == load_event_table()) {
175 #ifdef HAVE_PTHREAD_MUTEX_LOCK
176 pthread_mutex_unlock(&mutex);
184 if (load_event_table() == -1) {
185 #ifdef HAVE_PTHREAD_MUTEX_LOCK
186 pthread_mutex_unlock(&mutex);
190 ev = read_from_cache(event);
192 case AU_PRS_USECACHE:
193 ev = read_from_cache(event);
199 #ifdef HAVE_PTHREAD_MUTEX_LOCK
200 pthread_mutex_unlock(&mutex);
204 if (sorf & AU_PRS_SUCCESS)
205 effmask |= (mask_p->am_success & ev->ae_class);
206 if (sorf & AU_PRS_FAILURE)
207 effmask |= (mask_p->am_failure & ev->ae_class);
208 #ifdef HAVE_PTHREAD_MUTEX_LOCK
209 pthread_mutex_unlock(&mutex);