2 * Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in thereg
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/param.h>
29 #include <sys/event.h>
30 #include <sys/socket.h>
47 #include "agents/passwd.h"
48 #include "agents/group.h"
49 #include "agents/services.h"
57 #include "singletons.h"
60 #define CONFIG_PATH "/etc/nscd.conf"
62 #define DEFAULT_CONFIG_PATH "nscd.conf"
64 #define MAX_SOCKET_IO_SIZE 4096
66 struct processing_thread_args {
68 struct configuration *the_configuration;
69 struct runtime_env *the_runtime_env;
72 static void accept_connection(struct kevent *, struct runtime_env *,
73 struct configuration *);
74 static void destroy_cache_(cache);
75 static void destroy_runtime_env(struct runtime_env *);
76 static cache init_cache_(struct configuration *);
77 static struct runtime_env *init_runtime_env(struct configuration *);
78 static void processing_loop(cache, struct runtime_env *,
79 struct configuration *);
80 static void process_socket_event(struct kevent *, struct runtime_env *,
81 struct configuration *);
82 static void process_timer_event(struct kevent *, struct runtime_env *,
83 struct configuration *);
84 static void *processing_thread(void *);
85 static void usage(void) __dead2;
87 void get_time_func(struct timeval *);
93 "usage: nscd [-dnst] [-i cachename] [-I cachename]\n");
98 init_cache_(struct configuration *config)
100 struct cache_params params;
103 struct configuration_entry *config_entry;
106 TRACE_IN(init_cache_);
108 memset(¶ms, 0, sizeof(struct cache_params));
109 params.get_time_func = get_time_func;
110 retval = init_cache(¶ms);
112 size = configuration_get_entries_size(config);
113 for (i = 0; i < size; ++i) {
114 config_entry = configuration_get_entry(config, i);
116 * We should register common entries now - multipart entries
117 * would be registered automatically during the queries.
119 register_cache_entry(retval, (struct cache_entry_params *)
120 &config_entry->positive_cache_params);
121 config_entry->positive_cache_entry = find_cache_entry(retval,
122 config_entry->positive_cache_params.cep.entry_name);
123 assert(config_entry->positive_cache_entry !=
124 INVALID_CACHE_ENTRY);
126 register_cache_entry(retval, (struct cache_entry_params *)
127 &config_entry->negative_cache_params);
128 config_entry->negative_cache_entry = find_cache_entry(retval,
129 config_entry->negative_cache_params.cep.entry_name);
130 assert(config_entry->negative_cache_entry !=
131 INVALID_CACHE_ENTRY);
134 LOG_MSG_2("cache", "cache was successfully initialized");
135 TRACE_OUT(init_cache_);
140 destroy_cache_(cache the_cache)
142 TRACE_IN(destroy_cache_);
143 destroy_cache(the_cache);
144 TRACE_OUT(destroy_cache_);
148 * Socket and kqueues are prepared here. We have one global queue for both
149 * socket and timers events.
151 static struct runtime_env *
152 init_runtime_env(struct configuration *config)
155 struct sockaddr_un serv_addr;
157 struct kevent eventlist;
158 struct timespec timeout;
160 struct runtime_env *retval;
162 TRACE_IN(init_runtime_env);
163 retval = calloc(1, sizeof(*retval));
164 assert(retval != NULL);
166 retval->sockfd = socket(PF_LOCAL, SOCK_STREAM, 0);
168 if (config->force_unlink == 1)
169 unlink(config->socket_path);
171 memset(&serv_addr, 0, sizeof(struct sockaddr_un));
172 serv_addr.sun_family = PF_LOCAL;
173 strlcpy(serv_addr.sun_path, config->socket_path,
174 sizeof(serv_addr.sun_path));
175 serv_addr_len = sizeof(serv_addr.sun_family) +
176 strlen(serv_addr.sun_path) + 1;
178 if (bind(retval->sockfd, (struct sockaddr *)&serv_addr,
179 serv_addr_len) == -1) {
180 close(retval->sockfd);
183 LOG_ERR_2("runtime environment", "can't bind socket to path: "
184 "%s", config->socket_path);
185 TRACE_OUT(init_runtime_env);
188 LOG_MSG_2("runtime environment", "using socket %s",
189 config->socket_path);
192 * Here we're marking socket as non-blocking and setting its backlog
193 * to the maximum value
195 chmod(config->socket_path, config->socket_mode);
196 listen(retval->sockfd, -1);
197 fcntl(retval->sockfd, F_SETFL, O_NONBLOCK);
199 retval->queue = kqueue();
200 assert(retval->queue != -1);
202 EV_SET(&eventlist, retval->sockfd, EVFILT_READ, EV_ADD | EV_ONESHOT,
204 memset(&timeout, 0, sizeof(struct timespec));
205 kevent(retval->queue, &eventlist, 1, NULL, 0, &timeout);
207 LOG_MSG_2("runtime environment", "successfully initialized");
208 TRACE_OUT(init_runtime_env);
213 destroy_runtime_env(struct runtime_env *env)
215 TRACE_IN(destroy_runtime_env);
219 TRACE_OUT(destroy_runtime_env);
223 accept_connection(struct kevent *event_data, struct runtime_env *env,
224 struct configuration *config)
226 struct kevent eventlist[2];
227 struct timespec timeout;
228 struct query_state *qstate;
236 TRACE_IN(accept_connection);
237 fd = accept(event_data->ident, NULL, NULL);
239 LOG_ERR_2("accept_connection", "error %d during accept()",
241 TRACE_OUT(accept_connection);
245 if (getpeereid(fd, &euid, &egid) != 0) {
246 LOG_ERR_2("accept_connection", "error %d during getpeereid()",
248 TRACE_OUT(accept_connection);
252 qstate = init_query_state(fd, sizeof(int), euid, egid);
253 if (qstate == NULL) {
254 LOG_ERR_2("accept_connection", "can't init query_state");
255 TRACE_OUT(accept_connection);
259 memset(&timeout, 0, sizeof(struct timespec));
260 EV_SET(&eventlist[0], fd, EVFILT_TIMER, EV_ADD | EV_ONESHOT,
261 0, qstate->timeout.tv_sec * 1000, qstate);
262 EV_SET(&eventlist[1], fd, EVFILT_READ, EV_ADD | EV_ONESHOT,
263 NOTE_LOWAT, qstate->kevent_watermark, qstate);
264 res = kevent(env->queue, eventlist, 2, NULL, 0, &timeout);
266 LOG_ERR_2("accept_connection", "kevent error");
268 TRACE_OUT(accept_connection);
272 process_socket_event(struct kevent *event_data, struct runtime_env *env,
273 struct configuration *config)
275 struct kevent eventlist[2];
276 struct timeval query_timeout;
277 struct timespec kevent_timeout;
281 struct query_state *qstate;
283 TRACE_IN(process_socket_event);
284 eof_res = event_data->flags & EV_EOF ? 1 : 0;
287 memset(&kevent_timeout, 0, sizeof(struct timespec));
288 EV_SET(&eventlist[0], event_data->ident, EVFILT_TIMER, EV_DELETE,
290 nevents = kevent(env->queue, eventlist, 1, NULL, 0, &kevent_timeout);
292 if (errno == ENOENT) {
293 /* the timer is already handling this event */
294 TRACE_OUT(process_socket_event);
297 /* some other error happened */
298 LOG_ERR_2("process_socket_event", "kevent error, errno"
300 TRACE_OUT(process_socket_event);
304 qstate = (struct query_state *)event_data->udata;
307 * If the buffer that is to be send/received is too large,
308 * we send it implicitly, by using query_io_buffer_read and
309 * query_io_buffer_write functions in the query_state. These functions
310 * use the temporary buffer, which is later send/received in parts.
311 * The code below implements buffer splitting/mergind for send/receive
312 * operations. It also does the actual socket IO operations.
314 if (((qstate->use_alternate_io == 0) &&
315 (qstate->kevent_watermark <= (size_t)event_data->data)) ||
316 ((qstate->use_alternate_io != 0) &&
317 (qstate->io_buffer_watermark <= (size_t)event_data->data))) {
318 if (qstate->use_alternate_io != 0) {
319 switch (qstate->io_buffer_filter) {
321 io_res = query_socket_read(qstate,
323 qstate->io_buffer_watermark);
325 qstate->use_alternate_io = 0;
326 qstate->process_func = NULL;
328 qstate->io_buffer_p += io_res;
329 if (qstate->io_buffer_p ==
331 qstate->io_buffer_size) {
332 qstate->io_buffer_p =
334 qstate->use_alternate_io = 0;
343 if (qstate->use_alternate_io == 0) {
345 res = qstate->process_func(qstate);
346 } while ((qstate->kevent_watermark == 0) &&
347 (qstate->process_func != NULL) &&
351 qstate->process_func = NULL;
354 if ((qstate->use_alternate_io != 0) &&
355 (qstate->io_buffer_filter == EVFILT_WRITE)) {
356 io_res = query_socket_write(qstate, qstate->io_buffer_p,
357 qstate->io_buffer_watermark);
359 qstate->use_alternate_io = 0;
360 qstate->process_func = NULL;
362 qstate->io_buffer_p += io_res;
365 /* assuming that socket was closed */
366 qstate->process_func = NULL;
367 qstate->use_alternate_io = 0;
370 if (((qstate->process_func == NULL) &&
371 (qstate->use_alternate_io == 0)) ||
372 (eof_res != 0) || (res != 0)) {
373 destroy_query_state(qstate);
374 close(event_data->ident);
375 TRACE_OUT(process_socket_event);
379 /* updating the query_state lifetime variable */
380 get_time_func(&query_timeout);
381 query_timeout.tv_usec = 0;
382 query_timeout.tv_sec -= qstate->creation_time.tv_sec;
383 if (query_timeout.tv_sec > qstate->timeout.tv_sec)
384 query_timeout.tv_sec = 0;
386 query_timeout.tv_sec = qstate->timeout.tv_sec -
387 query_timeout.tv_sec;
389 if ((qstate->use_alternate_io != 0) && (qstate->io_buffer_p ==
390 qstate->io_buffer + qstate->io_buffer_size))
391 qstate->use_alternate_io = 0;
393 if (qstate->use_alternate_io == 0) {
395 * If we must send/receive the large block of data,
396 * we should prepare the query_state's io_XXX fields.
397 * We should also substitute its write_func and read_func
398 * with the query_io_buffer_write and query_io_buffer_read,
399 * which will allow us to implicitly send/receive this large
400 * buffer later (in the subsequent calls to the
401 * process_socket_event).
403 if (qstate->kevent_watermark > MAX_SOCKET_IO_SIZE) {
406 * XXX: Uncommenting this code makes nscd(8) fail for
407 * entries larger than a few kB, causing few second
408 * worth of delay for each call to retrieve them.
410 if (qstate->io_buffer != NULL)
411 free(qstate->io_buffer);
413 qstate->io_buffer = calloc(1,
414 qstate->kevent_watermark);
415 assert(qstate->io_buffer != NULL);
417 qstate->io_buffer_p = qstate->io_buffer;
418 qstate->io_buffer_size = qstate->kevent_watermark;
419 qstate->io_buffer_filter = qstate->kevent_filter;
421 qstate->write_func = query_io_buffer_write;
422 qstate->read_func = query_io_buffer_read;
424 if (qstate->kevent_filter == EVFILT_READ)
425 qstate->use_alternate_io = 1;
428 qstate->io_buffer_watermark = MAX_SOCKET_IO_SIZE;
429 EV_SET(&eventlist[1], event_data->ident,
430 qstate->kevent_filter, EV_ADD | EV_ONESHOT,
431 NOTE_LOWAT, MAX_SOCKET_IO_SIZE, qstate);
433 EV_SET(&eventlist[1], event_data->ident,
434 qstate->kevent_filter, EV_ADD | EV_ONESHOT,
435 NOTE_LOWAT, qstate->kevent_watermark, qstate);
438 if (qstate->io_buffer + qstate->io_buffer_size -
439 qstate->io_buffer_p <
440 MAX_SOCKET_IO_SIZE) {
441 qstate->io_buffer_watermark = qstate->io_buffer +
442 qstate->io_buffer_size - qstate->io_buffer_p;
443 EV_SET(&eventlist[1], event_data->ident,
444 qstate->io_buffer_filter,
445 EV_ADD | EV_ONESHOT, NOTE_LOWAT,
446 qstate->io_buffer_watermark,
449 qstate->io_buffer_watermark = MAX_SOCKET_IO_SIZE;
450 EV_SET(&eventlist[1], event_data->ident,
451 qstate->io_buffer_filter, EV_ADD | EV_ONESHOT,
452 NOTE_LOWAT, MAX_SOCKET_IO_SIZE, qstate);
455 EV_SET(&eventlist[0], event_data->ident, EVFILT_TIMER,
456 EV_ADD | EV_ONESHOT, 0, query_timeout.tv_sec * 1000, qstate);
457 kevent(env->queue, eventlist, 2, NULL, 0, &kevent_timeout);
459 TRACE_OUT(process_socket_event);
463 * This routine is called if timer event has been signaled in the kqueue. It
464 * just closes the socket and destroys the query_state.
467 process_timer_event(struct kevent *event_data, struct runtime_env *env,
468 struct configuration *config)
470 struct query_state *qstate;
472 TRACE_IN(process_timer_event);
473 qstate = (struct query_state *)event_data->udata;
474 destroy_query_state(qstate);
475 close(event_data->ident);
476 TRACE_OUT(process_timer_event);
480 * Processing loop is the basic processing routine, that forms a body of each
484 processing_loop(cache the_cache, struct runtime_env *env,
485 struct configuration *config)
487 struct timespec timeout;
488 const int eventlist_size = 1;
489 struct kevent eventlist[eventlist_size];
492 TRACE_MSG("=> processing_loop");
493 memset(&timeout, 0, sizeof(struct timespec));
494 memset(&eventlist, 0, sizeof(struct kevent) * eventlist_size);
497 nevents = kevent(env->queue, NULL, 0, eventlist,
498 eventlist_size, NULL);
500 * we can only receive 1 event on success
503 struct kevent *event_data;
504 event_data = &eventlist[0];
506 if ((int)event_data->ident == env->sockfd) {
507 for (i = 0; i < event_data->data; ++i)
508 accept_connection(event_data, env, config);
510 EV_SET(eventlist, s_runtime_env->sockfd,
511 EVFILT_READ, EV_ADD | EV_ONESHOT,
514 sizeof(struct timespec));
515 kevent(s_runtime_env->queue, eventlist,
516 1, NULL, 0, &timeout);
519 switch (event_data->filter) {
522 process_socket_event(event_data,
526 process_timer_event(event_data,
534 /* this branch shouldn't be currently executed */
538 TRACE_MSG("<= processing_loop");
542 * Wrapper above the processing loop function. It sets the thread signal mask
543 * to avoid SIGPIPE signals (which can happen if the client works incorrectly).
546 processing_thread(void *data)
548 struct processing_thread_args *args;
551 TRACE_MSG("=> processing_thread");
552 args = (struct processing_thread_args *)data;
555 sigaddset(&new, SIGPIPE);
556 if (pthread_sigmask(SIG_BLOCK, &new, NULL) != 0)
557 LOG_ERR_1("processing thread",
558 "thread can't block the SIGPIPE signal");
560 processing_loop(args->the_cache, args->the_runtime_env,
561 args->the_configuration);
563 TRACE_MSG("<= processing_thread");
569 get_time_func(struct timeval *time)
572 memset(&res, 0, sizeof(struct timespec));
573 clock_gettime(CLOCK_MONOTONIC, &res);
575 time->tv_sec = res.tv_sec;
580 * The idea of _nss_cache_cycle_prevention_function is that nsdispatch
581 * will search for this symbol in the executable. This symbol is the
582 * attribute of the caching daemon. So, if it exists, nsdispatch won't try
583 * to connect to the caching daemon and will just ignore the 'cache'
584 * source in the nsswitch.conf. This method helps to avoid cycles and
585 * organize self-performing requests.
587 * (not actually a function; it used to be, but it doesn't make any
588 * difference, as long as it has external linkage)
590 void *_nss_cache_cycle_prevention_function;
593 main(int argc, char *argv[])
595 struct processing_thread_args *thread_args;
598 struct pidfh *pidfile;
601 char const *config_file;
602 char const *error_str;
606 int trace_mode_enabled;
607 int force_single_threaded;
608 int do_not_daemonize;
609 int clear_user_cache_entries, clear_all_cache_entries;
610 char *user_config_entry_name, *global_config_entry_name;
612 int daemon_mode, interactive_mode;
615 /* by default all debug messages are omitted */
618 /* parsing command line arguments */
619 trace_mode_enabled = 0;
620 force_single_threaded = 0;
621 do_not_daemonize = 0;
622 clear_user_cache_entries = 0;
623 clear_all_cache_entries = 0;
625 user_config_entry_name = NULL;
626 global_config_entry_name = NULL;
627 while ((res = getopt(argc, argv, "nstdi:I:")) != -1) {
630 do_not_daemonize = 1;
633 force_single_threaded = 1;
636 trace_mode_enabled = 1;
639 clear_user_cache_entries = 1;
641 if (strcmp(optarg, "all") != 0)
642 user_config_entry_name = strdup(optarg);
645 clear_all_cache_entries = 1;
647 if (strcmp(optarg, "all") != 0)
648 global_config_entry_name =
661 daemon_mode = do_not_daemonize | force_single_threaded |
663 interactive_mode = clear_user_cache_entries | clear_all_cache_entries |
666 if ((daemon_mode != 0) && (interactive_mode != 0)) {
667 LOG_ERR_1("main", "daemon mode and interactive_mode arguments "
668 "can't be used together");
672 if (interactive_mode != 0) {
673 FILE *pidfin = fopen(DEFAULT_PIDFILE_PATH, "r");
676 struct nscd_connection_params connection_params;
677 nscd_connection connection;
682 errx(EXIT_FAILURE, "There is no daemon running.");
684 memset(pidbuf, 0, sizeof(pidbuf));
685 fread(pidbuf, sizeof(pidbuf) - 1, 1, pidfin);
688 if (ferror(pidfin) != 0)
689 errx(EXIT_FAILURE, "Can't read from pidfile.");
691 if (sscanf(pidbuf, "%d", &pid) != 1)
692 errx(EXIT_FAILURE, "Invalid pidfile.");
693 LOG_MSG_1("main", "daemon PID is %d", pid);
696 memset(&connection_params, 0,
697 sizeof(struct nscd_connection_params));
698 connection_params.socket_path = DEFAULT_SOCKET_PATH;
699 connection = open_nscd_connection__(&connection_params);
700 if (connection == INVALID_NSCD_CONNECTION)
701 errx(EXIT_FAILURE, "Can't connect to the daemon.");
703 if (clear_user_cache_entries != 0) {
704 result = nscd_transform__(connection,
705 user_config_entry_name, TT_USER);
708 "user cache transformation failed");
711 "user cache_transformation "
715 if (clear_all_cache_entries != 0) {
717 errx(EXIT_FAILURE, "Only root can initiate "
718 "global cache transformation.");
720 result = nscd_transform__(connection,
721 global_config_entry_name, TT_ALL);
724 "global cache transformation "
728 "global cache transformation "
732 close_nscd_connection__(connection);
734 free(user_config_entry_name);
735 free(global_config_entry_name);
736 return (EXIT_SUCCESS);
739 pidfile = pidfile_open(DEFAULT_PIDFILE_PATH, 0644, &pid);
740 if (pidfile == NULL) {
742 errx(EXIT_FAILURE, "Daemon already running, pid: %d.",
744 warn("Cannot open or create pidfile");
747 if (trace_mode_enabled == 1)
750 /* blocking the main thread from receiving SIGPIPE signal */
751 sigblock(sigmask(SIGPIPE));
754 if (do_not_daemonize == 0) {
755 res = daemon(0, trace_mode_enabled == 0 ? 0 : 1);
757 LOG_ERR_1("main", "can't daemonize myself: %s",
759 pidfile_remove(pidfile);
762 LOG_MSG_1("main", "successfully daemonized");
765 pidfile_write(pidfile);
767 s_agent_table = init_agent_table();
768 register_agent(s_agent_table, init_passwd_agent());
769 register_agent(s_agent_table, init_passwd_mp_agent());
770 register_agent(s_agent_table, init_group_agent());
771 register_agent(s_agent_table, init_group_mp_agent());
772 register_agent(s_agent_table, init_services_agent());
773 register_agent(s_agent_table, init_services_mp_agent());
774 LOG_MSG_1("main", "request agents registered successfully");
777 * Hosts agent can't work properly until we have access to the
778 * appropriate dtab structures, which are used in nsdispatch
781 register_agent(s_agent_table, init_hosts_agent());
784 /* configuration initialization */
785 s_configuration = init_configuration();
786 fill_configuration_defaults(s_configuration);
790 config_file = CONFIG_PATH;
792 res = parse_config_file(s_configuration, config_file, &error_str,
794 if ((res != 0) && (error_str == NULL)) {
795 config_file = DEFAULT_CONFIG_PATH;
796 res = parse_config_file(s_configuration, config_file,
797 &error_str, &error_line);
801 if (error_str != NULL) {
802 LOG_ERR_1("main", "error in configuration file(%s, %d): %s\n",
803 config_file, error_line, error_str);
805 LOG_ERR_1("main", "no configuration file found "
806 "- was looking for %s and %s",
807 CONFIG_PATH, DEFAULT_CONFIG_PATH);
809 destroy_configuration(s_configuration);
813 if (force_single_threaded == 1)
814 s_configuration->threads_num = 1;
816 /* cache initialization */
817 s_cache = init_cache_(s_configuration);
818 if (s_cache == NULL) {
819 LOG_ERR_1("main", "can't initialize the cache");
820 destroy_configuration(s_configuration);
824 /* runtime environment initialization */
825 s_runtime_env = init_runtime_env(s_configuration);
826 if (s_runtime_env == NULL) {
827 LOG_ERR_1("main", "can't initialize the runtime environment");
828 destroy_configuration(s_configuration);
829 destroy_cache_(s_cache);
833 if (s_configuration->threads_num > 1) {
834 threads = calloc(s_configuration->threads_num,
836 for (i = 0; i < s_configuration->threads_num; ++i) {
837 thread_args = malloc(
838 sizeof(*thread_args));
839 thread_args->the_cache = s_cache;
840 thread_args->the_runtime_env = s_runtime_env;
841 thread_args->the_configuration = s_configuration;
843 LOG_MSG_1("main", "thread #%d was successfully created",
845 pthread_create(&threads[i], NULL, processing_thread,
851 for (i = 0; i < s_configuration->threads_num; ++i)
852 pthread_join(threads[i], NULL);
854 LOG_MSG_1("main", "working in single-threaded mode");
855 processing_loop(s_cache, s_runtime_env, s_configuration);
859 /* runtime environment destruction */
860 destroy_runtime_env(s_runtime_env);
862 /* cache destruction */
863 destroy_cache_(s_cache);
865 /* configuration destruction */
866 destroy_configuration(s_configuration);
868 /* agents table destruction */
869 destroy_agent_table(s_agent_table);
871 pidfile_remove(pidfile);
872 return (EXIT_SUCCESS);