]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - usr.sbin/nscd/nscd.c
Remove some unused files.
[FreeBSD/FreeBSD.git] / usr.sbin / nscd / nscd.c
1 /*-
2  * Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in thereg
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30
31 #include <sys/types.h>
32 #include <sys/event.h>
33 #include <sys/socket.h>
34 #include <sys/time.h>
35 #include <sys/param.h>
36 #include <sys/un.h>
37 #include <assert.h>
38 #include <err.h>
39 #include <errno.h>
40 #include <fcntl.h>
41 #include <libutil.h>
42 #include <pthread.h>
43 #include <signal.h>
44 #include <stdio.h>
45 #include <stdlib.h>
46 #include <string.h>
47 #include <unistd.h>
48
49 #include "agents/passwd.h"
50 #include "agents/group.h"
51 #include "agents/services.h"
52 #include "cachelib.h"
53 #include "config.h"
54 #include "debug.h"
55 #include "log.h"
56 #include "nscdcli.h"
57 #include "parser.h"
58 #include "query.h"
59 #include "singletons.h"
60
61 #ifndef CONFIG_PATH
62 #define CONFIG_PATH "/etc/nscd.conf"
63 #endif
64 #define DEFAULT_CONFIG_PATH     "nscd.conf"
65
66 #define MAX_SOCKET_IO_SIZE      4096
67
68 struct processing_thread_args {
69         cache   the_cache;
70         struct configuration    *the_configuration;
71         struct runtime_env              *the_runtime_env;
72 };
73
74 static void accept_connection(struct kevent *, struct runtime_env *,
75         struct configuration *);
76 static void destroy_cache_(cache);
77 static void destroy_runtime_env(struct runtime_env *);
78 static cache init_cache_(struct configuration *);
79 static struct runtime_env *init_runtime_env(struct configuration *);
80 static void print_version_info(void);
81 static void processing_loop(cache, struct runtime_env *,
82         struct configuration *);
83 static void process_socket_event(struct kevent *, struct runtime_env *,
84         struct configuration *);
85 static void process_timer_event(struct kevent *, struct runtime_env *,
86         struct configuration *);
87 static void *processing_thread(void *);
88 static void usage(void);
89
90 void get_time_func(struct timeval *);
91
92 static void
93 print_version_info(void)
94 {
95         TRACE_IN(print_version_info);
96         printf("nscd v0.2 (20 Oct 2005)\nwas developed during SoC 2005\n");
97         TRACE_OUT(print_version_info);
98 }
99
100 static void
101 usage(void)
102 {
103         fprintf(stderr,
104             "usage: nscd [-dnst] [-i cachename] [-I cachename]\n");
105         exit(1);
106 }
107
108 static cache
109 init_cache_(struct configuration *config)
110 {
111         struct cache_params params;
112         cache retval;
113
114         struct configuration_entry *config_entry;
115         size_t  size, i;
116         int res;
117
118         TRACE_IN(init_cache_);
119
120         memset(&params, 0, sizeof(struct cache_params));
121         params.get_time_func = get_time_func;
122         retval = init_cache(&params);
123
124         size = configuration_get_entries_size(config);
125         for (i = 0; i < size; ++i) {
126                 config_entry = configuration_get_entry(config, i);
127                 /*
128                  * We should register common entries now - multipart entries
129                  * would be registered automatically during the queries.
130                  */
131                 res = register_cache_entry(retval, (struct cache_entry_params *)
132                         &config_entry->positive_cache_params);
133                 config_entry->positive_cache_entry = find_cache_entry(retval,
134                         config_entry->positive_cache_params.entry_name);
135                 assert(config_entry->positive_cache_entry !=
136                         INVALID_CACHE_ENTRY);
137
138                 res = register_cache_entry(retval, (struct cache_entry_params *)
139                         &config_entry->negative_cache_params);
140                 config_entry->negative_cache_entry = find_cache_entry(retval,
141                         config_entry->negative_cache_params.entry_name);
142                 assert(config_entry->negative_cache_entry !=
143                         INVALID_CACHE_ENTRY);
144         }
145
146         LOG_MSG_2("cache", "cache was successfully initialized");
147         TRACE_OUT(init_cache_);
148         return (retval);
149 }
150
151 static void
152 destroy_cache_(cache the_cache)
153 {
154         TRACE_IN(destroy_cache_);
155         destroy_cache(the_cache);
156         TRACE_OUT(destroy_cache_);
157 }
158
159 /*
160  * Socket and kqueues are prepared here. We have one global queue for both
161  * socket and timers events.
162  */
163 static struct runtime_env *
164 init_runtime_env(struct configuration *config)
165 {
166         int serv_addr_len;
167         struct sockaddr_un serv_addr;
168
169         struct kevent eventlist;
170         struct timespec timeout;
171
172         struct runtime_env *retval;
173
174         TRACE_IN(init_runtime_env);
175         retval = (struct runtime_env *)malloc(sizeof(struct runtime_env));
176         assert(retval != NULL);
177         memset(retval, 0, sizeof(struct runtime_env));
178
179         retval->sockfd = socket(PF_LOCAL, SOCK_STREAM, 0);
180
181         if (config->force_unlink == 1)
182                 unlink(config->socket_path);
183
184         memset(&serv_addr, 0, sizeof(struct sockaddr_un));
185         serv_addr.sun_family = PF_LOCAL;
186         strncpy(serv_addr.sun_path, config->socket_path,
187                 sizeof(serv_addr.sun_path));
188         serv_addr_len = sizeof(serv_addr.sun_family) +
189                 strlen(serv_addr.sun_path) + 1;
190
191         if (bind(retval->sockfd, (struct sockaddr *)&serv_addr,
192                 serv_addr_len) == -1) {
193                 close(retval->sockfd);
194                 free(retval);
195
196                 LOG_ERR_2("runtime environment", "can't bind socket to path: "
197                         "%s", config->socket_path);
198                 TRACE_OUT(init_runtime_env);
199                 return (NULL);
200         }
201         LOG_MSG_2("runtime environment", "using socket %s",
202                 config->socket_path);
203
204         /*
205          * Here we're marking socket as non-blocking and setting its backlog
206          * to the maximum value
207          */
208         chmod(config->socket_path, config->socket_mode);
209         listen(retval->sockfd, -1);
210         fcntl(retval->sockfd, F_SETFL, O_NONBLOCK);
211
212         retval->queue = kqueue();
213         assert(retval->queue != -1);
214
215         EV_SET(&eventlist, retval->sockfd, EVFILT_READ, EV_ADD | EV_ONESHOT,
216                 0, 0, 0);
217         memset(&timeout, 0, sizeof(struct timespec));
218         kevent(retval->queue, &eventlist, 1, NULL, 0, &timeout);
219
220         LOG_MSG_2("runtime environment", "successfully initialized");
221         TRACE_OUT(init_runtime_env);
222         return (retval);
223 }
224
225 static void
226 destroy_runtime_env(struct runtime_env *env)
227 {
228         TRACE_IN(destroy_runtime_env);
229         close(env->queue);
230         close(env->sockfd);
231         free(env);
232         TRACE_OUT(destroy_runtime_env);
233 }
234
235 static void
236 accept_connection(struct kevent *event_data, struct runtime_env *env,
237         struct configuration *config)
238 {
239         struct kevent   eventlist[2];
240         struct timespec timeout;
241         struct query_state      *qstate;
242
243         int     fd;
244         int     res;
245
246         uid_t   euid;
247         gid_t   egid;
248
249         TRACE_IN(accept_connection);
250         fd = accept(event_data->ident, NULL, NULL);
251         if (fd == -1) {
252                 LOG_ERR_2("accept_connection", "error %d during accept()",
253                     errno);
254                 TRACE_OUT(accept_connection);
255                 return;
256         }
257
258         if (getpeereid(fd, &euid, &egid) != 0) {
259                 LOG_ERR_2("accept_connection", "error %d during getpeereid()",
260                         errno);
261                 TRACE_OUT(accept_connection);
262                 return;
263         }
264
265         qstate = init_query_state(fd, sizeof(int), euid, egid);
266         if (qstate == NULL) {
267                 LOG_ERR_2("accept_connection", "can't init query_state");
268                 TRACE_OUT(accept_connection);
269                 return;
270         }
271
272         memset(&timeout, 0, sizeof(struct timespec));
273         EV_SET(&eventlist[0], fd, EVFILT_TIMER, EV_ADD | EV_ONESHOT,
274                 0, qstate->timeout.tv_sec * 1000, qstate);
275         EV_SET(&eventlist[1], fd, EVFILT_READ, EV_ADD | EV_ONESHOT,
276                 NOTE_LOWAT, qstate->kevent_watermark, qstate);
277         res = kevent(env->queue, eventlist, 2, NULL, 0, &timeout);
278         if (res < 0)
279                 LOG_ERR_2("accept_connection", "kevent error");
280
281         TRACE_OUT(accept_connection);
282 }
283
284 static void
285 process_socket_event(struct kevent *event_data, struct runtime_env *env,
286         struct configuration *config)
287 {
288         struct kevent   eventlist[2];
289         struct timeval  query_timeout;
290         struct timespec kevent_timeout;
291         int     nevents;
292         int     eof_res, res;
293         ssize_t io_res;
294         struct query_state *qstate;
295
296         TRACE_IN(process_socket_event);
297         eof_res = event_data->flags & EV_EOF ? 1 : 0;
298         res = 0;
299
300         memset(&kevent_timeout, 0, sizeof(struct timespec));
301         EV_SET(&eventlist[0], event_data->ident, EVFILT_TIMER, EV_DELETE,
302                 0, 0, NULL);
303         nevents = kevent(env->queue, eventlist, 1, NULL, 0, &kevent_timeout);
304         if (nevents == -1) {
305                 if (errno == ENOENT) {
306                         /* the timer is already handling this event */
307                         TRACE_OUT(process_socket_event);
308                         return;
309                 } else {
310                         /* some other error happened */
311                         LOG_ERR_2("process_socket_event", "kevent error, errno"
312                                 " is %d", errno);
313                         TRACE_OUT(process_socket_event);
314                         return;
315                 }
316         }
317         qstate = (struct query_state *)event_data->udata;
318
319         /*
320          * If the buffer that is to be send/received is too large,
321          * we send it implicitly, by using query_io_buffer_read and
322          * query_io_buffer_write functions in the query_state. These functions
323          * use the temporary buffer, which is later send/received in parts.
324          * The code below implements buffer splitting/mergind for send/receive
325          * operations. It also does the actual socket IO operations.
326          */
327         if (((qstate->use_alternate_io == 0) &&
328                 (qstate->kevent_watermark <= event_data->data)) ||
329                 ((qstate->use_alternate_io != 0) &&
330                 (qstate->io_buffer_watermark <= event_data->data))) {
331                 if (qstate->use_alternate_io != 0) {
332                         switch (qstate->io_buffer_filter) {
333                         case EVFILT_READ:
334                                 io_res = query_socket_read(qstate,
335                                         qstate->io_buffer_p,
336                                         qstate->io_buffer_watermark);
337                                 if (io_res < 0) {
338                                         qstate->use_alternate_io = 0;
339                                         qstate->process_func = NULL;
340                                 } else {
341                                         qstate->io_buffer_p += io_res;
342                                         if (qstate->io_buffer_p ==
343                                                 qstate->io_buffer +
344                                                 qstate->io_buffer_size) {
345                                                 qstate->io_buffer_p =
346                                                     qstate->io_buffer;
347                                                 qstate->use_alternate_io = 0;
348                                         }
349                                 }
350                         break;
351                         default:
352                         break;
353                         }
354                 }
355
356                 if (qstate->use_alternate_io == 0) {
357                         do {
358                                 res = qstate->process_func(qstate);
359                         } while ((qstate->kevent_watermark == 0) &&
360                                         (qstate->process_func != NULL) &&
361                                         (res == 0));
362
363                         if (res != 0)
364                                 qstate->process_func = NULL;
365                 }
366
367                 if ((qstate->use_alternate_io != 0) &&
368                         (qstate->io_buffer_filter == EVFILT_WRITE)) {
369                         io_res = query_socket_write(qstate, qstate->io_buffer_p,
370                                 qstate->io_buffer_watermark);
371                         if (io_res < 0) {
372                                 qstate->use_alternate_io = 0;
373                                 qstate->process_func = NULL;
374                         } else
375                                 qstate->io_buffer_p += io_res;
376                 }
377         } else {
378                 /* assuming that socket was closed */
379                 qstate->process_func = NULL;
380                 qstate->use_alternate_io = 0;
381         }
382
383         if (((qstate->process_func == NULL) &&
384                 (qstate->use_alternate_io == 0)) ||
385                 (eof_res != 0) || (res != 0)) {
386                 destroy_query_state(qstate);
387                 close(event_data->ident);
388                 TRACE_OUT(process_socket_event);
389                 return;
390         }
391
392         /* updating the query_state lifetime variable */
393         get_time_func(&query_timeout);
394         query_timeout.tv_usec = 0;
395         query_timeout.tv_sec -= qstate->creation_time.tv_sec;
396         if (query_timeout.tv_sec > qstate->timeout.tv_sec)
397                 query_timeout.tv_sec = 0;
398         else
399                 query_timeout.tv_sec = qstate->timeout.tv_sec -
400                         query_timeout.tv_sec;
401
402         if ((qstate->use_alternate_io != 0) && (qstate->io_buffer_p ==
403                 qstate->io_buffer + qstate->io_buffer_size))
404                 qstate->use_alternate_io = 0;
405
406         if (qstate->use_alternate_io == 0) {
407                 /*
408                  * If we must send/receive the large block of data,
409                  * we should prepare the query_state's io_XXX fields.
410                  * We should also substitute its write_func and read_func
411                  * with the query_io_buffer_write and query_io_buffer_read,
412                  * which will allow us to implicitly send/receive this large
413                  * buffer later (in the subsequent calls to the
414                  * process_socket_event).
415                  */
416                 if (qstate->kevent_watermark > MAX_SOCKET_IO_SIZE) {
417                         if (qstate->io_buffer != NULL)
418                                 free(qstate->io_buffer);
419
420                         qstate->io_buffer = (char *)malloc(
421                                 qstate->kevent_watermark);
422                         assert(qstate->io_buffer != NULL);
423                         memset(qstate->io_buffer, 0, qstate->kevent_watermark);
424
425                         qstate->io_buffer_p = qstate->io_buffer;
426                         qstate->io_buffer_size = qstate->kevent_watermark;
427                         qstate->io_buffer_filter = qstate->kevent_filter;
428
429                         qstate->write_func = query_io_buffer_write;
430                         qstate->read_func = query_io_buffer_read;
431
432                         if (qstate->kevent_filter == EVFILT_READ)
433                                 qstate->use_alternate_io = 1;
434
435                         qstate->io_buffer_watermark = MAX_SOCKET_IO_SIZE;
436                         EV_SET(&eventlist[1], event_data->ident,
437                                 qstate->kevent_filter, EV_ADD | EV_ONESHOT,
438                                 NOTE_LOWAT, MAX_SOCKET_IO_SIZE, qstate);
439                 } else {
440                         EV_SET(&eventlist[1], event_data->ident,
441                                 qstate->kevent_filter, EV_ADD | EV_ONESHOT,
442                                 NOTE_LOWAT, qstate->kevent_watermark, qstate);
443                 }
444         } else {
445                 if (qstate->io_buffer + qstate->io_buffer_size -
446                         qstate->io_buffer_p <
447                         MAX_SOCKET_IO_SIZE) {
448                         qstate->io_buffer_watermark = qstate->io_buffer +
449                                 qstate->io_buffer_size - qstate->io_buffer_p;
450                         EV_SET(&eventlist[1], event_data->ident,
451                                 qstate->io_buffer_filter,
452                                 EV_ADD | EV_ONESHOT, NOTE_LOWAT,
453                                 qstate->io_buffer_watermark,
454                                 qstate);
455                 } else {
456                         qstate->io_buffer_watermark = MAX_SOCKET_IO_SIZE;
457                         EV_SET(&eventlist[1], event_data->ident,
458                                 qstate->io_buffer_filter, EV_ADD | EV_ONESHOT,
459                                 NOTE_LOWAT, MAX_SOCKET_IO_SIZE, qstate);
460                 }
461         }
462         EV_SET(&eventlist[0], event_data->ident, EVFILT_TIMER,
463                 EV_ADD | EV_ONESHOT, 0, query_timeout.tv_sec * 1000, qstate);
464         kevent(env->queue, eventlist, 2, NULL, 0, &kevent_timeout);
465
466         TRACE_OUT(process_socket_event);
467 }
468
469 /*
470  * This routine is called if timer event has been signaled in the kqueue. It
471  * just closes the socket and destroys the query_state.
472  */
473 static void
474 process_timer_event(struct kevent *event_data, struct runtime_env *env,
475         struct configuration *config)
476 {
477         struct query_state      *qstate;
478
479         TRACE_IN(process_timer_event);
480         qstate = (struct query_state *)event_data->udata;
481         destroy_query_state(qstate);
482         close(event_data->ident);
483         TRACE_OUT(process_timer_event);
484 }
485
486 /*
487  * Processing loop is the basic processing routine, that forms a body of each
488  * procssing thread
489  */
490 static void
491 processing_loop(cache the_cache, struct runtime_env *env,
492         struct configuration *config)
493 {
494         struct timespec timeout;
495         const int eventlist_size = 1;
496         struct kevent eventlist[eventlist_size];
497         int nevents, i;
498
499         TRACE_MSG("=> processing_loop");
500         memset(&timeout, 0, sizeof(struct timespec));
501         memset(&eventlist, 0, sizeof(struct kevent) * eventlist_size);
502
503         for (;;) {
504                 nevents = kevent(env->queue, NULL, 0, eventlist,
505                         eventlist_size, NULL);
506                 /*
507                  * we can only receive 1 event on success
508                  */
509                 if (nevents == 1) {
510                         struct kevent *event_data;
511                         event_data = &eventlist[0];
512
513                         if (event_data->ident == env->sockfd) {
514                                 for (i = 0; i < event_data->data; ++i)
515                                     accept_connection(event_data, env, config);
516
517                                 EV_SET(eventlist, s_runtime_env->sockfd,
518                                     EVFILT_READ, EV_ADD | EV_ONESHOT,
519                                     0, 0, 0);
520                                 memset(&timeout, 0,
521                                     sizeof(struct timespec));
522                                 kevent(s_runtime_env->queue, eventlist,
523                                     1, NULL, 0, &timeout);
524
525                         } else {
526                                 switch (event_data->filter) {
527                                 case EVFILT_READ:
528                                 case EVFILT_WRITE:
529                                         process_socket_event(event_data,
530                                                 env, config);
531                                         break;
532                                 case EVFILT_TIMER:
533                                         process_timer_event(event_data,
534                                                 env, config);
535                                         break;
536                                 default:
537                                         break;
538                                 }
539                         }
540                 } else {
541                         /* this branch shouldn't be currently executed */
542                 }
543         }
544
545         TRACE_MSG("<= processing_loop");
546 }
547
548 /*
549  * Wrapper above the processing loop function. It sets the thread signal mask
550  * to avoid SIGPIPE signals (which can happen if the client works incorrectly).
551  */
552 static void *
553 processing_thread(void *data)
554 {
555         struct processing_thread_args   *args;
556         sigset_t new;
557
558         TRACE_MSG("=> processing_thread");
559         args = (struct processing_thread_args *)data;
560
561         sigemptyset(&new);
562         sigaddset(&new, SIGPIPE);
563         if (pthread_sigmask(SIG_BLOCK, &new, NULL) != 0)
564                 LOG_ERR_1("processing thread",
565                         "thread can't block the SIGPIPE signal");
566
567         processing_loop(args->the_cache, args->the_runtime_env,
568                 args->the_configuration);
569         free(args);
570         TRACE_MSG("<= processing_thread");
571
572         return (NULL);
573 }
574
575 void
576 get_time_func(struct timeval *time)
577 {
578         struct timespec res;
579         memset(&res, 0, sizeof(struct timespec));
580         clock_gettime(CLOCK_MONOTONIC, &res);
581
582         time->tv_sec = res.tv_sec;
583         time->tv_usec = 0;
584 }
585
586 /*
587  * The idea of _nss_cache_cycle_prevention_function is that nsdispatch will
588  * search for this symbol in the executable. This symbol is the attribute of
589  * the caching daemon. So, if it exists, nsdispatch won't try to connect to
590  * the caching daemon and will just ignore the 'cache' source in the
591  * nsswitch.conf. This method helps to avoid cycles and organize
592  * self-performing requests.
593  */
594 void
595 _nss_cache_cycle_prevention_function(void)
596 {
597 }
598
599 int
600 main(int argc, char *argv[])
601 {
602         struct processing_thread_args *thread_args;
603         pthread_t *threads;
604
605         struct pidfh *pidfile;
606         pid_t pid;
607
608         char const *config_file;
609         char const *error_str;
610         int error_line;
611         int i, res;
612
613         int trace_mode_enabled;
614         int force_single_threaded;
615         int do_not_daemonize;
616         int clear_user_cache_entries, clear_all_cache_entries;
617         char *user_config_entry_name, *global_config_entry_name;
618         int show_statistics;
619         int daemon_mode, interactive_mode;
620
621
622         /* by default all debug messages are omitted */
623         TRACE_OFF();
624
625         /* startup output */
626         print_version_info();
627
628         /* parsing command line arguments */
629         trace_mode_enabled = 0;
630         force_single_threaded = 0;
631         do_not_daemonize = 0;
632         clear_user_cache_entries = 0;
633         clear_all_cache_entries = 0;
634         show_statistics = 0;
635         user_config_entry_name = NULL;
636         global_config_entry_name = NULL;
637         while ((res = getopt(argc, argv, "nstdi:I:")) != -1) {
638                 switch (res) {
639                 case 'n':
640                         do_not_daemonize = 1;
641                         break;
642                 case 's':
643                         force_single_threaded = 1;
644                         break;
645                 case 't':
646                         trace_mode_enabled = 1;
647                         break;
648                 case 'i':
649                         clear_user_cache_entries = 1;
650                         if (optarg != NULL)
651                                 if (strcmp(optarg, "all") != 0)
652                                         user_config_entry_name = strdup(optarg);
653                         break;
654                 case 'I':
655                         clear_all_cache_entries = 1;
656                         if (optarg != NULL)
657                                 if (strcmp(optarg, "all") != 0)
658                                         global_config_entry_name =
659                                                 strdup(optarg);
660                         break;
661                 case 'd':
662                         show_statistics = 1;
663                         break;
664                 case '?':
665                 default:
666                         usage();
667                         /* NOT REACHED */
668                 }
669         }
670
671         daemon_mode = do_not_daemonize | force_single_threaded |
672                 trace_mode_enabled;
673         interactive_mode = clear_user_cache_entries | clear_all_cache_entries |
674                 show_statistics;
675
676         if ((daemon_mode != 0) && (interactive_mode != 0)) {
677                 LOG_ERR_1("main", "daemon mode and interactive_mode arguments "
678                         "can't be used together");
679                 usage();
680         }
681
682         if (interactive_mode != 0) {
683                 FILE *pidfin = fopen(DEFAULT_PIDFILE_PATH, "r");
684                 char pidbuf[256];
685
686                 struct nscd_connection_params connection_params;
687                 nscd_connection connection;
688
689                 int result;
690
691                 if (pidfin == NULL)
692                         errx(EXIT_FAILURE, "There is no daemon running.");
693
694                 memset(pidbuf, 0, sizeof(pidbuf));
695                 fread(pidbuf, sizeof(pidbuf) - 1, 1, pidfin);
696                 fclose(pidfin);
697
698                 if (ferror(pidfin) != 0)
699                         errx(EXIT_FAILURE, "Can't read from pidfile.");
700
701                 if (sscanf(pidbuf, "%d", &pid) != 1)
702                         errx(EXIT_FAILURE, "Invalid pidfile.");
703                 LOG_MSG_1("main", "daemon PID is %d", pid);
704
705
706                 memset(&connection_params, 0,
707                         sizeof(struct nscd_connection_params));
708                 connection_params.socket_path = DEFAULT_SOCKET_PATH;
709                 connection = open_nscd_connection__(&connection_params);
710                 if (connection == INVALID_NSCD_CONNECTION)
711                         errx(EXIT_FAILURE, "Can't connect to the daemon.");
712
713                 if (clear_user_cache_entries != 0) {
714                         result = nscd_transform__(connection,
715                                 user_config_entry_name, TT_USER);
716                         if (result != 0)
717                                 LOG_MSG_1("main",
718                                         "user cache transformation failed");
719                         else
720                                 LOG_MSG_1("main",
721                                         "user cache_transformation "
722                                         "succeeded");
723                 }
724
725                 if (clear_all_cache_entries != 0) {
726                         if (geteuid() != 0)
727                                 errx(EXIT_FAILURE, "Only root can initiate "
728                                         "global cache transformation.");
729
730                         result = nscd_transform__(connection,
731                                 global_config_entry_name, TT_ALL);
732                         if (result != 0)
733                                 LOG_MSG_1("main",
734                                         "global cache transformation "
735                                         "failed");
736                         else
737                                 LOG_MSG_1("main",
738                                         "global cache transformation "
739                                         "succeeded");
740                 }
741
742                 close_nscd_connection__(connection);
743
744                 free(user_config_entry_name);
745                 free(global_config_entry_name);
746                 return (EXIT_SUCCESS);
747         }
748
749         pidfile = pidfile_open(DEFAULT_PIDFILE_PATH, 0644, &pid);
750         if (pidfile == NULL) {
751                 if (errno == EEXIST)
752                         errx(EXIT_FAILURE, "Daemon already running, pid: %d.",
753                                 pid);
754                 warn("Cannot open or create pidfile");
755         }
756
757         if (trace_mode_enabled == 1)
758                 TRACE_ON();
759
760         /* blocking the main thread from receiving SIGPIPE signal */
761         sigblock(sigmask(SIGPIPE));
762
763         /* daemonization */
764         if (do_not_daemonize == 0) {
765                 res = daemon(0, trace_mode_enabled == 0 ? 0 : 1);
766                 if (res != 0) {
767                         LOG_ERR_1("main", "can't daemonize myself: %s",
768                                 strerror(errno));
769                         pidfile_remove(pidfile);
770                         goto fin;
771                 } else
772                         LOG_MSG_1("main", "successfully daemonized");
773         }
774
775         pidfile_write(pidfile);
776
777         s_agent_table = init_agent_table();
778         register_agent(s_agent_table, init_passwd_agent());
779         register_agent(s_agent_table, init_passwd_mp_agent());
780         register_agent(s_agent_table, init_group_agent());
781         register_agent(s_agent_table, init_group_mp_agent());
782         register_agent(s_agent_table, init_services_agent());
783         register_agent(s_agent_table, init_services_mp_agent());
784         LOG_MSG_1("main", "request agents registered successfully");
785
786         /*
787          * Hosts agent can't work properly until we have access to the
788          * appropriate dtab structures, which are used in nsdispatch
789          * calls
790          *
791          register_agent(s_agent_table, init_hosts_agent());
792         */
793
794         /* configuration initialization */
795         s_configuration = init_configuration();
796         fill_configuration_defaults(s_configuration);
797
798         error_str = NULL;
799         error_line = 0;
800         config_file = CONFIG_PATH;
801
802         res = parse_config_file(s_configuration, config_file, &error_str,
803                 &error_line);
804         if ((res != 0) && (error_str == NULL)) {
805                 config_file = DEFAULT_CONFIG_PATH;
806                 res = parse_config_file(s_configuration, config_file,
807                         &error_str, &error_line);
808         }
809
810         if (res != 0) {
811                 if (error_str != NULL) {
812                 LOG_ERR_1("main", "error in configuration file(%s, %d): %s\n",
813                         config_file, error_line, error_str);
814                 } else {
815                 LOG_ERR_1("main", "no configuration file found "
816                         "- was looking for %s and %s",
817                         CONFIG_PATH, DEFAULT_CONFIG_PATH);
818                 }
819                 destroy_configuration(s_configuration);
820                 return (-1);
821         }
822
823         if (force_single_threaded == 1)
824                 s_configuration->threads_num = 1;
825
826         /* cache initialization */
827         s_cache = init_cache_(s_configuration);
828         if (s_cache == NULL) {
829                 LOG_ERR_1("main", "can't initialize the cache");
830                 destroy_configuration(s_configuration);
831                 return (-1);
832         }
833
834         /* runtime environment initialization */
835         s_runtime_env = init_runtime_env(s_configuration);
836         if (s_runtime_env == NULL) {
837                 LOG_ERR_1("main", "can't initialize the runtime environment");
838                 destroy_configuration(s_configuration);
839                 destroy_cache_(s_cache);
840                 return (-1);
841         }
842
843         if (s_configuration->threads_num > 1) {
844                 threads = (pthread_t *)malloc(sizeof(pthread_t) *
845                         s_configuration->threads_num);
846                 memset(threads, 0, sizeof(pthread_t) *
847                         s_configuration->threads_num);
848                 for (i = 0; i < s_configuration->threads_num; ++i) {
849                         thread_args = (struct processing_thread_args *)malloc(
850                                 sizeof(struct processing_thread_args));
851                         thread_args->the_cache = s_cache;
852                         thread_args->the_runtime_env = s_runtime_env;
853                         thread_args->the_configuration = s_configuration;
854
855                         LOG_MSG_1("main", "thread #%d was successfully created",
856                                 i);
857                         pthread_create(&threads[i], NULL, processing_thread,
858                                 thread_args);
859
860                         thread_args = NULL;
861                 }
862
863                 for (i = 0; i < s_configuration->threads_num; ++i)
864                         pthread_join(threads[i], NULL);
865         } else {
866                 LOG_MSG_1("main", "working in single-threaded mode");
867                 processing_loop(s_cache, s_runtime_env, s_configuration);
868         }
869
870 fin:
871         /* runtime environment destruction */
872         destroy_runtime_env(s_runtime_env);
873
874         /* cache destruction */
875         destroy_cache_(s_cache);
876
877         /* configuration destruction */
878         destroy_configuration(s_configuration);
879
880         /* agents table destruction */
881         destroy_agent_table(s_agent_table);
882
883         pidfile_remove(pidfile);
884         return (EXIT_SUCCESS);
885 }