]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - usr.sbin/nscd/nscd.c
trim(8): Fix a few issues reported by mandoc
[FreeBSD/FreeBSD.git] / usr.sbin / nscd / nscd.c
1 /*-
2  * Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in thereg
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30
31 #include <sys/param.h>
32 #include <sys/event.h>
33 #include <sys/socket.h>
34 #include <sys/stat.h>
35 #include <sys/time.h>
36 #include <sys/un.h>
37
38 #include <assert.h>
39 #include <err.h>
40 #include <errno.h>
41 #include <fcntl.h>
42 #include <libutil.h>
43 #include <pthread.h>
44 #include <signal.h>
45 #include <stdio.h>
46 #include <stdlib.h>
47 #include <string.h>
48 #include <unistd.h>
49
50 #include "agents/passwd.h"
51 #include "agents/group.h"
52 #include "agents/services.h"
53 #include "cachelib.h"
54 #include "config.h"
55 #include "debug.h"
56 #include "log.h"
57 #include "nscdcli.h"
58 #include "parser.h"
59 #include "query.h"
60 #include "singletons.h"
61
62 #ifndef CONFIG_PATH
63 #define CONFIG_PATH "/etc/nscd.conf"
64 #endif
65 #define DEFAULT_CONFIG_PATH     "nscd.conf"
66
67 #define MAX_SOCKET_IO_SIZE      4096
68
69 struct processing_thread_args {
70         cache   the_cache;
71         struct configuration    *the_configuration;
72         struct runtime_env              *the_runtime_env;
73 };
74
75 static void accept_connection(struct kevent *, struct runtime_env *,
76         struct configuration *);
77 static void destroy_cache_(cache);
78 static void destroy_runtime_env(struct runtime_env *);
79 static cache init_cache_(struct configuration *);
80 static struct runtime_env *init_runtime_env(struct configuration *);
81 static void processing_loop(cache, struct runtime_env *,
82         struct configuration *);
83 static void process_socket_event(struct kevent *, struct runtime_env *,
84         struct configuration *);
85 static void process_timer_event(struct kevent *, struct runtime_env *,
86         struct configuration *);
87 static void *processing_thread(void *);
88 static void usage(void);
89
90 void get_time_func(struct timeval *);
91
92 static void
93 usage(void)
94 {
95         fprintf(stderr,
96             "usage: nscd [-dnst] [-i cachename] [-I cachename]\n");
97         exit(1);
98 }
99
100 static cache
101 init_cache_(struct configuration *config)
102 {
103         struct cache_params params;
104         cache retval;
105
106         struct configuration_entry *config_entry;
107         size_t  size, i;
108         int res;
109
110         TRACE_IN(init_cache_);
111
112         memset(&params, 0, sizeof(struct cache_params));
113         params.get_time_func = get_time_func;
114         retval = init_cache(&params);
115
116         size = configuration_get_entries_size(config);
117         for (i = 0; i < size; ++i) {
118                 config_entry = configuration_get_entry(config, i);
119                 /*
120                  * We should register common entries now - multipart entries
121                  * would be registered automatically during the queries.
122                  */
123                 res = register_cache_entry(retval, (struct cache_entry_params *)
124                         &config_entry->positive_cache_params);
125                 config_entry->positive_cache_entry = find_cache_entry(retval,
126                         config_entry->positive_cache_params.cep.entry_name);
127                 assert(config_entry->positive_cache_entry !=
128                         INVALID_CACHE_ENTRY);
129
130                 res = register_cache_entry(retval, (struct cache_entry_params *)
131                         &config_entry->negative_cache_params);
132                 config_entry->negative_cache_entry = find_cache_entry(retval,
133                         config_entry->negative_cache_params.cep.entry_name);
134                 assert(config_entry->negative_cache_entry !=
135                         INVALID_CACHE_ENTRY);
136         }
137
138         LOG_MSG_2("cache", "cache was successfully initialized");
139         TRACE_OUT(init_cache_);
140         return (retval);
141 }
142
143 static void
144 destroy_cache_(cache the_cache)
145 {
146         TRACE_IN(destroy_cache_);
147         destroy_cache(the_cache);
148         TRACE_OUT(destroy_cache_);
149 }
150
151 /*
152  * Socket and kqueues are prepared here. We have one global queue for both
153  * socket and timers events.
154  */
155 static struct runtime_env *
156 init_runtime_env(struct configuration *config)
157 {
158         int serv_addr_len;
159         struct sockaddr_un serv_addr;
160
161         struct kevent eventlist;
162         struct timespec timeout;
163
164         struct runtime_env *retval;
165
166         TRACE_IN(init_runtime_env);
167         retval = calloc(1, sizeof(*retval));
168         assert(retval != NULL);
169
170         retval->sockfd = socket(PF_LOCAL, SOCK_STREAM, 0);
171
172         if (config->force_unlink == 1)
173                 unlink(config->socket_path);
174
175         memset(&serv_addr, 0, sizeof(struct sockaddr_un));
176         serv_addr.sun_family = PF_LOCAL;
177         strlcpy(serv_addr.sun_path, config->socket_path,
178                 sizeof(serv_addr.sun_path));
179         serv_addr_len = sizeof(serv_addr.sun_family) +
180                 strlen(serv_addr.sun_path) + 1;
181
182         if (bind(retval->sockfd, (struct sockaddr *)&serv_addr,
183                 serv_addr_len) == -1) {
184                 close(retval->sockfd);
185                 free(retval);
186
187                 LOG_ERR_2("runtime environment", "can't bind socket to path: "
188                         "%s", config->socket_path);
189                 TRACE_OUT(init_runtime_env);
190                 return (NULL);
191         }
192         LOG_MSG_2("runtime environment", "using socket %s",
193                 config->socket_path);
194
195         /*
196          * Here we're marking socket as non-blocking and setting its backlog
197          * to the maximum value
198          */
199         chmod(config->socket_path, config->socket_mode);
200         listen(retval->sockfd, -1);
201         fcntl(retval->sockfd, F_SETFL, O_NONBLOCK);
202
203         retval->queue = kqueue();
204         assert(retval->queue != -1);
205
206         EV_SET(&eventlist, retval->sockfd, EVFILT_READ, EV_ADD | EV_ONESHOT,
207                 0, 0, 0);
208         memset(&timeout, 0, sizeof(struct timespec));
209         kevent(retval->queue, &eventlist, 1, NULL, 0, &timeout);
210
211         LOG_MSG_2("runtime environment", "successfully initialized");
212         TRACE_OUT(init_runtime_env);
213         return (retval);
214 }
215
216 static void
217 destroy_runtime_env(struct runtime_env *env)
218 {
219         TRACE_IN(destroy_runtime_env);
220         close(env->queue);
221         close(env->sockfd);
222         free(env);
223         TRACE_OUT(destroy_runtime_env);
224 }
225
226 static void
227 accept_connection(struct kevent *event_data, struct runtime_env *env,
228         struct configuration *config)
229 {
230         struct kevent   eventlist[2];
231         struct timespec timeout;
232         struct query_state      *qstate;
233
234         int     fd;
235         int     res;
236
237         uid_t   euid;
238         gid_t   egid;
239
240         TRACE_IN(accept_connection);
241         fd = accept(event_data->ident, NULL, NULL);
242         if (fd == -1) {
243                 LOG_ERR_2("accept_connection", "error %d during accept()",
244                     errno);
245                 TRACE_OUT(accept_connection);
246                 return;
247         }
248
249         if (getpeereid(fd, &euid, &egid) != 0) {
250                 LOG_ERR_2("accept_connection", "error %d during getpeereid()",
251                         errno);
252                 TRACE_OUT(accept_connection);
253                 return;
254         }
255
256         qstate = init_query_state(fd, sizeof(int), euid, egid);
257         if (qstate == NULL) {
258                 LOG_ERR_2("accept_connection", "can't init query_state");
259                 TRACE_OUT(accept_connection);
260                 return;
261         }
262
263         memset(&timeout, 0, sizeof(struct timespec));
264         EV_SET(&eventlist[0], fd, EVFILT_TIMER, EV_ADD | EV_ONESHOT,
265                 0, qstate->timeout.tv_sec * 1000, qstate);
266         EV_SET(&eventlist[1], fd, EVFILT_READ, EV_ADD | EV_ONESHOT,
267                 NOTE_LOWAT, qstate->kevent_watermark, qstate);
268         res = kevent(env->queue, eventlist, 2, NULL, 0, &timeout);
269         if (res < 0)
270                 LOG_ERR_2("accept_connection", "kevent error");
271
272         TRACE_OUT(accept_connection);
273 }
274
275 static void
276 process_socket_event(struct kevent *event_data, struct runtime_env *env,
277         struct configuration *config)
278 {
279         struct kevent   eventlist[2];
280         struct timeval  query_timeout;
281         struct timespec kevent_timeout;
282         int     nevents;
283         int     eof_res, res;
284         ssize_t io_res;
285         struct query_state *qstate;
286
287         TRACE_IN(process_socket_event);
288         eof_res = event_data->flags & EV_EOF ? 1 : 0;
289         res = 0;
290
291         memset(&kevent_timeout, 0, sizeof(struct timespec));
292         EV_SET(&eventlist[0], event_data->ident, EVFILT_TIMER, EV_DELETE,
293                 0, 0, NULL);
294         nevents = kevent(env->queue, eventlist, 1, NULL, 0, &kevent_timeout);
295         if (nevents == -1) {
296                 if (errno == ENOENT) {
297                         /* the timer is already handling this event */
298                         TRACE_OUT(process_socket_event);
299                         return;
300                 } else {
301                         /* some other error happened */
302                         LOG_ERR_2("process_socket_event", "kevent error, errno"
303                                 " is %d", errno);
304                         TRACE_OUT(process_socket_event);
305                         return;
306                 }
307         }
308         qstate = (struct query_state *)event_data->udata;
309
310         /*
311          * If the buffer that is to be send/received is too large,
312          * we send it implicitly, by using query_io_buffer_read and
313          * query_io_buffer_write functions in the query_state. These functions
314          * use the temporary buffer, which is later send/received in parts.
315          * The code below implements buffer splitting/mergind for send/receive
316          * operations. It also does the actual socket IO operations.
317          */
318         if (((qstate->use_alternate_io == 0) &&
319                 (qstate->kevent_watermark <= (size_t)event_data->data)) ||
320                 ((qstate->use_alternate_io != 0) &&
321                 (qstate->io_buffer_watermark <= (size_t)event_data->data))) {
322                 if (qstate->use_alternate_io != 0) {
323                         switch (qstate->io_buffer_filter) {
324                         case EVFILT_READ:
325                                 io_res = query_socket_read(qstate,
326                                         qstate->io_buffer_p,
327                                         qstate->io_buffer_watermark);
328                                 if (io_res < 0) {
329                                         qstate->use_alternate_io = 0;
330                                         qstate->process_func = NULL;
331                                 } else {
332                                         qstate->io_buffer_p += io_res;
333                                         if (qstate->io_buffer_p ==
334                                                 qstate->io_buffer +
335                                                 qstate->io_buffer_size) {
336                                                 qstate->io_buffer_p =
337                                                     qstate->io_buffer;
338                                                 qstate->use_alternate_io = 0;
339                                         }
340                                 }
341                         break;
342                         default:
343                         break;
344                         }
345                 }
346
347                 if (qstate->use_alternate_io == 0) {
348                         do {
349                                 res = qstate->process_func(qstate);
350                         } while ((qstate->kevent_watermark == 0) &&
351                                         (qstate->process_func != NULL) &&
352                                         (res == 0));
353
354                         if (res != 0)
355                                 qstate->process_func = NULL;
356                 }
357
358                 if ((qstate->use_alternate_io != 0) &&
359                         (qstate->io_buffer_filter == EVFILT_WRITE)) {
360                         io_res = query_socket_write(qstate, qstate->io_buffer_p,
361                                 qstate->io_buffer_watermark);
362                         if (io_res < 0) {
363                                 qstate->use_alternate_io = 0;
364                                 qstate->process_func = NULL;
365                         } else
366                                 qstate->io_buffer_p += io_res;
367                 }
368         } else {
369                 /* assuming that socket was closed */
370                 qstate->process_func = NULL;
371                 qstate->use_alternate_io = 0;
372         }
373
374         if (((qstate->process_func == NULL) &&
375                 (qstate->use_alternate_io == 0)) ||
376                 (eof_res != 0) || (res != 0)) {
377                 destroy_query_state(qstate);
378                 close(event_data->ident);
379                 TRACE_OUT(process_socket_event);
380                 return;
381         }
382
383         /* updating the query_state lifetime variable */
384         get_time_func(&query_timeout);
385         query_timeout.tv_usec = 0;
386         query_timeout.tv_sec -= qstate->creation_time.tv_sec;
387         if (query_timeout.tv_sec > qstate->timeout.tv_sec)
388                 query_timeout.tv_sec = 0;
389         else
390                 query_timeout.tv_sec = qstate->timeout.tv_sec -
391                         query_timeout.tv_sec;
392
393         if ((qstate->use_alternate_io != 0) && (qstate->io_buffer_p ==
394                 qstate->io_buffer + qstate->io_buffer_size))
395                 qstate->use_alternate_io = 0;
396
397         if (qstate->use_alternate_io == 0) {
398                 /*
399                  * If we must send/receive the large block of data,
400                  * we should prepare the query_state's io_XXX fields.
401                  * We should also substitute its write_func and read_func
402                  * with the query_io_buffer_write and query_io_buffer_read,
403                  * which will allow us to implicitly send/receive this large
404                  * buffer later (in the subsequent calls to the
405                  * process_socket_event).
406                  */
407                 if (qstate->kevent_watermark > MAX_SOCKET_IO_SIZE) {
408 #if 0
409                         /*
410                          * XXX: Uncommenting this code makes nscd(8) fail for
411                          *      entries larger than a few kB, causing few second
412                          *      worth of delay for each call to retrieve them.
413                          */
414                         if (qstate->io_buffer != NULL)
415                                 free(qstate->io_buffer);
416
417                         qstate->io_buffer = calloc(1,
418                                 qstate->kevent_watermark);
419                         assert(qstate->io_buffer != NULL);
420
421                         qstate->io_buffer_p = qstate->io_buffer;
422                         qstate->io_buffer_size = qstate->kevent_watermark;
423                         qstate->io_buffer_filter = qstate->kevent_filter;
424
425                         qstate->write_func = query_io_buffer_write;
426                         qstate->read_func = query_io_buffer_read;
427
428                         if (qstate->kevent_filter == EVFILT_READ)
429                                 qstate->use_alternate_io = 1;
430 #endif
431
432                         qstate->io_buffer_watermark = MAX_SOCKET_IO_SIZE;
433                         EV_SET(&eventlist[1], event_data->ident,
434                                 qstate->kevent_filter, EV_ADD | EV_ONESHOT,
435                                 NOTE_LOWAT, MAX_SOCKET_IO_SIZE, qstate);
436                 } else {
437                         EV_SET(&eventlist[1], event_data->ident,
438                                 qstate->kevent_filter, EV_ADD | EV_ONESHOT,
439                                 NOTE_LOWAT, qstate->kevent_watermark, qstate);
440                 }
441         } else {
442                 if (qstate->io_buffer + qstate->io_buffer_size -
443                         qstate->io_buffer_p <
444                         MAX_SOCKET_IO_SIZE) {
445                         qstate->io_buffer_watermark = qstate->io_buffer +
446                                 qstate->io_buffer_size - qstate->io_buffer_p;
447                         EV_SET(&eventlist[1], event_data->ident,
448                                 qstate->io_buffer_filter,
449                                 EV_ADD | EV_ONESHOT, NOTE_LOWAT,
450                                 qstate->io_buffer_watermark,
451                                 qstate);
452                 } else {
453                         qstate->io_buffer_watermark = MAX_SOCKET_IO_SIZE;
454                         EV_SET(&eventlist[1], event_data->ident,
455                                 qstate->io_buffer_filter, EV_ADD | EV_ONESHOT,
456                                 NOTE_LOWAT, MAX_SOCKET_IO_SIZE, qstate);
457                 }
458         }
459         EV_SET(&eventlist[0], event_data->ident, EVFILT_TIMER,
460                 EV_ADD | EV_ONESHOT, 0, query_timeout.tv_sec * 1000, qstate);
461         kevent(env->queue, eventlist, 2, NULL, 0, &kevent_timeout);
462
463         TRACE_OUT(process_socket_event);
464 }
465
466 /*
467  * This routine is called if timer event has been signaled in the kqueue. It
468  * just closes the socket and destroys the query_state.
469  */
470 static void
471 process_timer_event(struct kevent *event_data, struct runtime_env *env,
472         struct configuration *config)
473 {
474         struct query_state      *qstate;
475
476         TRACE_IN(process_timer_event);
477         qstate = (struct query_state *)event_data->udata;
478         destroy_query_state(qstate);
479         close(event_data->ident);
480         TRACE_OUT(process_timer_event);
481 }
482
483 /*
484  * Processing loop is the basic processing routine, that forms a body of each
485  * procssing thread
486  */
487 static void
488 processing_loop(cache the_cache, struct runtime_env *env,
489         struct configuration *config)
490 {
491         struct timespec timeout;
492         const int eventlist_size = 1;
493         struct kevent eventlist[eventlist_size];
494         int nevents, i;
495
496         TRACE_MSG("=> processing_loop");
497         memset(&timeout, 0, sizeof(struct timespec));
498         memset(&eventlist, 0, sizeof(struct kevent) * eventlist_size);
499
500         for (;;) {
501                 nevents = kevent(env->queue, NULL, 0, eventlist,
502                         eventlist_size, NULL);
503                 /*
504                  * we can only receive 1 event on success
505                  */
506                 if (nevents == 1) {
507                         struct kevent *event_data;
508                         event_data = &eventlist[0];
509
510                         if ((int)event_data->ident == env->sockfd) {
511                                 for (i = 0; i < event_data->data; ++i)
512                                     accept_connection(event_data, env, config);
513
514                                 EV_SET(eventlist, s_runtime_env->sockfd,
515                                     EVFILT_READ, EV_ADD | EV_ONESHOT,
516                                     0, 0, 0);
517                                 memset(&timeout, 0,
518                                     sizeof(struct timespec));
519                                 kevent(s_runtime_env->queue, eventlist,
520                                     1, NULL, 0, &timeout);
521
522                         } else {
523                                 switch (event_data->filter) {
524                                 case EVFILT_READ:
525                                 case EVFILT_WRITE:
526                                         process_socket_event(event_data,
527                                                 env, config);
528                                         break;
529                                 case EVFILT_TIMER:
530                                         process_timer_event(event_data,
531                                                 env, config);
532                                         break;
533                                 default:
534                                         break;
535                                 }
536                         }
537                 } else {
538                         /* this branch shouldn't be currently executed */
539                 }
540         }
541
542         TRACE_MSG("<= processing_loop");
543 }
544
545 /*
546  * Wrapper above the processing loop function. It sets the thread signal mask
547  * to avoid SIGPIPE signals (which can happen if the client works incorrectly).
548  */
549 static void *
550 processing_thread(void *data)
551 {
552         struct processing_thread_args   *args;
553         sigset_t new;
554
555         TRACE_MSG("=> processing_thread");
556         args = (struct processing_thread_args *)data;
557
558         sigemptyset(&new);
559         sigaddset(&new, SIGPIPE);
560         if (pthread_sigmask(SIG_BLOCK, &new, NULL) != 0)
561                 LOG_ERR_1("processing thread",
562                         "thread can't block the SIGPIPE signal");
563
564         processing_loop(args->the_cache, args->the_runtime_env,
565                 args->the_configuration);
566         free(args);
567         TRACE_MSG("<= processing_thread");
568
569         return (NULL);
570 }
571
572 void
573 get_time_func(struct timeval *time)
574 {
575         struct timespec res;
576         memset(&res, 0, sizeof(struct timespec));
577         clock_gettime(CLOCK_MONOTONIC, &res);
578
579         time->tv_sec = res.tv_sec;
580         time->tv_usec = 0;
581 }
582
583 /*
584  * The idea of _nss_cache_cycle_prevention_function is that nsdispatch
585  * will search for this symbol in the executable. This symbol is the
586  * attribute of the caching daemon. So, if it exists, nsdispatch won't try
587  * to connect to the caching daemon and will just ignore the 'cache'
588  * source in the nsswitch.conf. This method helps to avoid cycles and
589  * organize self-performing requests.
590  *
591  * (not actually a function; it used to be, but it doesn't make any
592  * difference, as long as it has external linkage)
593  */
594 void *_nss_cache_cycle_prevention_function;
595
596 int
597 main(int argc, char *argv[])
598 {
599         struct processing_thread_args *thread_args;
600         pthread_t *threads;
601
602         struct pidfh *pidfile;
603         pid_t pid;
604
605         char const *config_file;
606         char const *error_str;
607         int error_line;
608         int i, res;
609
610         int trace_mode_enabled;
611         int force_single_threaded;
612         int do_not_daemonize;
613         int clear_user_cache_entries, clear_all_cache_entries;
614         char *user_config_entry_name, *global_config_entry_name;
615         int show_statistics;
616         int daemon_mode, interactive_mode;
617
618
619         /* by default all debug messages are omitted */
620         TRACE_OFF();
621
622         /* parsing command line arguments */
623         trace_mode_enabled = 0;
624         force_single_threaded = 0;
625         do_not_daemonize = 0;
626         clear_user_cache_entries = 0;
627         clear_all_cache_entries = 0;
628         show_statistics = 0;
629         user_config_entry_name = NULL;
630         global_config_entry_name = NULL;
631         while ((res = getopt(argc, argv, "nstdi:I:")) != -1) {
632                 switch (res) {
633                 case 'n':
634                         do_not_daemonize = 1;
635                         break;
636                 case 's':
637                         force_single_threaded = 1;
638                         break;
639                 case 't':
640                         trace_mode_enabled = 1;
641                         break;
642                 case 'i':
643                         clear_user_cache_entries = 1;
644                         if (optarg != NULL)
645                                 if (strcmp(optarg, "all") != 0)
646                                         user_config_entry_name = strdup(optarg);
647                         break;
648                 case 'I':
649                         clear_all_cache_entries = 1;
650                         if (optarg != NULL)
651                                 if (strcmp(optarg, "all") != 0)
652                                         global_config_entry_name =
653                                                 strdup(optarg);
654                         break;
655                 case 'd':
656                         show_statistics = 1;
657                         break;
658                 case '?':
659                 default:
660                         usage();
661                         /* NOT REACHED */
662                 }
663         }
664
665         daemon_mode = do_not_daemonize | force_single_threaded |
666                 trace_mode_enabled;
667         interactive_mode = clear_user_cache_entries | clear_all_cache_entries |
668                 show_statistics;
669
670         if ((daemon_mode != 0) && (interactive_mode != 0)) {
671                 LOG_ERR_1("main", "daemon mode and interactive_mode arguments "
672                         "can't be used together");
673                 usage();
674         }
675
676         if (interactive_mode != 0) {
677                 FILE *pidfin = fopen(DEFAULT_PIDFILE_PATH, "r");
678                 char pidbuf[256];
679
680                 struct nscd_connection_params connection_params;
681                 nscd_connection connection;
682
683                 int result;
684
685                 if (pidfin == NULL)
686                         errx(EXIT_FAILURE, "There is no daemon running.");
687
688                 memset(pidbuf, 0, sizeof(pidbuf));
689                 fread(pidbuf, sizeof(pidbuf) - 1, 1, pidfin);
690                 fclose(pidfin);
691
692                 if (ferror(pidfin) != 0)
693                         errx(EXIT_FAILURE, "Can't read from pidfile.");
694
695                 if (sscanf(pidbuf, "%d", &pid) != 1)
696                         errx(EXIT_FAILURE, "Invalid pidfile.");
697                 LOG_MSG_1("main", "daemon PID is %d", pid);
698
699
700                 memset(&connection_params, 0,
701                         sizeof(struct nscd_connection_params));
702                 connection_params.socket_path = DEFAULT_SOCKET_PATH;
703                 connection = open_nscd_connection__(&connection_params);
704                 if (connection == INVALID_NSCD_CONNECTION)
705                         errx(EXIT_FAILURE, "Can't connect to the daemon.");
706
707                 if (clear_user_cache_entries != 0) {
708                         result = nscd_transform__(connection,
709                                 user_config_entry_name, TT_USER);
710                         if (result != 0)
711                                 LOG_MSG_1("main",
712                                         "user cache transformation failed");
713                         else
714                                 LOG_MSG_1("main",
715                                         "user cache_transformation "
716                                         "succeeded");
717                 }
718
719                 if (clear_all_cache_entries != 0) {
720                         if (geteuid() != 0)
721                                 errx(EXIT_FAILURE, "Only root can initiate "
722                                         "global cache transformation.");
723
724                         result = nscd_transform__(connection,
725                                 global_config_entry_name, TT_ALL);
726                         if (result != 0)
727                                 LOG_MSG_1("main",
728                                         "global cache transformation "
729                                         "failed");
730                         else
731                                 LOG_MSG_1("main",
732                                         "global cache transformation "
733                                         "succeeded");
734                 }
735
736                 close_nscd_connection__(connection);
737
738                 free(user_config_entry_name);
739                 free(global_config_entry_name);
740                 return (EXIT_SUCCESS);
741         }
742
743         pidfile = pidfile_open(DEFAULT_PIDFILE_PATH, 0644, &pid);
744         if (pidfile == NULL) {
745                 if (errno == EEXIST)
746                         errx(EXIT_FAILURE, "Daemon already running, pid: %d.",
747                                 pid);
748                 warn("Cannot open or create pidfile");
749         }
750
751         if (trace_mode_enabled == 1)
752                 TRACE_ON();
753
754         /* blocking the main thread from receiving SIGPIPE signal */
755         sigblock(sigmask(SIGPIPE));
756
757         /* daemonization */
758         if (do_not_daemonize == 0) {
759                 res = daemon(0, trace_mode_enabled == 0 ? 0 : 1);
760                 if (res != 0) {
761                         LOG_ERR_1("main", "can't daemonize myself: %s",
762                                 strerror(errno));
763                         pidfile_remove(pidfile);
764                         goto fin;
765                 } else
766                         LOG_MSG_1("main", "successfully daemonized");
767         }
768
769         pidfile_write(pidfile);
770
771         s_agent_table = init_agent_table();
772         register_agent(s_agent_table, init_passwd_agent());
773         register_agent(s_agent_table, init_passwd_mp_agent());
774         register_agent(s_agent_table, init_group_agent());
775         register_agent(s_agent_table, init_group_mp_agent());
776         register_agent(s_agent_table, init_services_agent());
777         register_agent(s_agent_table, init_services_mp_agent());
778         LOG_MSG_1("main", "request agents registered successfully");
779
780         /*
781          * Hosts agent can't work properly until we have access to the
782          * appropriate dtab structures, which are used in nsdispatch
783          * calls
784          *
785          register_agent(s_agent_table, init_hosts_agent());
786         */
787
788         /* configuration initialization */
789         s_configuration = init_configuration();
790         fill_configuration_defaults(s_configuration);
791
792         error_str = NULL;
793         error_line = 0;
794         config_file = CONFIG_PATH;
795
796         res = parse_config_file(s_configuration, config_file, &error_str,
797                 &error_line);
798         if ((res != 0) && (error_str == NULL)) {
799                 config_file = DEFAULT_CONFIG_PATH;
800                 res = parse_config_file(s_configuration, config_file,
801                         &error_str, &error_line);
802         }
803
804         if (res != 0) {
805                 if (error_str != NULL) {
806                 LOG_ERR_1("main", "error in configuration file(%s, %d): %s\n",
807                         config_file, error_line, error_str);
808                 } else {
809                 LOG_ERR_1("main", "no configuration file found "
810                         "- was looking for %s and %s",
811                         CONFIG_PATH, DEFAULT_CONFIG_PATH);
812                 }
813                 destroy_configuration(s_configuration);
814                 return (-1);
815         }
816
817         if (force_single_threaded == 1)
818                 s_configuration->threads_num = 1;
819
820         /* cache initialization */
821         s_cache = init_cache_(s_configuration);
822         if (s_cache == NULL) {
823                 LOG_ERR_1("main", "can't initialize the cache");
824                 destroy_configuration(s_configuration);
825                 return (-1);
826         }
827
828         /* runtime environment initialization */
829         s_runtime_env = init_runtime_env(s_configuration);
830         if (s_runtime_env == NULL) {
831                 LOG_ERR_1("main", "can't initialize the runtime environment");
832                 destroy_configuration(s_configuration);
833                 destroy_cache_(s_cache);
834                 return (-1);
835         }
836
837         if (s_configuration->threads_num > 1) {
838                 threads = calloc(s_configuration->threads_num,
839                         sizeof(*threads));
840                 for (i = 0; i < s_configuration->threads_num; ++i) {
841                         thread_args = malloc(
842                                 sizeof(*thread_args));
843                         thread_args->the_cache = s_cache;
844                         thread_args->the_runtime_env = s_runtime_env;
845                         thread_args->the_configuration = s_configuration;
846
847                         LOG_MSG_1("main", "thread #%d was successfully created",
848                                 i);
849                         pthread_create(&threads[i], NULL, processing_thread,
850                                 thread_args);
851
852                         thread_args = NULL;
853                 }
854
855                 for (i = 0; i < s_configuration->threads_num; ++i)
856                         pthread_join(threads[i], NULL);
857         } else {
858                 LOG_MSG_1("main", "working in single-threaded mode");
859                 processing_loop(s_cache, s_runtime_env, s_configuration);
860         }
861
862 fin:
863         /* runtime environment destruction */
864         destroy_runtime_env(s_runtime_env);
865
866         /* cache destruction */
867         destroy_cache_(s_cache);
868
869         /* configuration destruction */
870         destroy_configuration(s_configuration);
871
872         /* agents table destruction */
873         destroy_agent_table(s_agent_table);
874
875         pidfile_remove(pidfile);
876         return (EXIT_SUCCESS);
877 }