2 * work_fork.c - fork implementation for blocking worker child.
5 #include "ntp_workimpl.h"
14 #include "ntp_stdlib.h"
15 #include "ntp_malloc.h"
16 #include "ntp_syslog.h"
19 #include "ntp_assert.h"
20 #include "ntp_unixtime.h"
21 #include "ntp_worker.h"
23 /* === variables === */
25 addremove_io_fd_func addremove_io_fd;
26 static volatile int worker_sighup_received;
30 /* === function prototypes === */
31 static void fork_blocking_child(blocking_child *);
32 static RETSIGTYPE worker_sighup(int);
33 static void send_worker_home_atexit(void);
34 static void cleanup_after_child(blocking_child *);
36 /* === I/O helpers === */
37 /* Since we have signals enabled, there's a good chance that blocking IO
38 * via pipe suffers from EINTR -- and this goes for both directions.
39 * The next two wrappers will loop until either all the data is written
40 * or read, plus handling the EOF condition on read. They may return
41 * zero if no data was transferred at all, and effectively every return
42 * value that differs from the given transfer length signifies an error
61 } else if (r == 0 || errno != EINTR) {
65 return (size_t)(b - (char *)vb);
84 } else if (errno != EINTR) {
88 return (size_t)(b - (const char *)vb);
92 #if defined(HAVE_DROPROOT)
93 extern int set_user_group_ids(void);
96 /* === functions === */
100 * On some systems _exit() is preferred to exit() for forked children.
101 * For example, http://netbsd.gw.com/cgi-bin/man-cgi?fork++NetBSD-5.0
102 * recommends _exit() to avoid double-flushing C runtime stream buffers
103 * and also to avoid calling the parent's atexit() routines in the
104 * child. On those systems WORKER_CHILD_EXIT is _exit. Since _exit
105 * bypasses CRT cleanup, fflush() files we know might have output
113 if (syslog_file != NULL)
117 WORKER_CHILD_EXIT (exitcode); /* space before ( required */
127 worker_sighup_received = 1;
139 sleep_remain = (u_int)seconds;
141 if (!worker_sighup_received)
142 sleep_remain = sleep(sleep_remain);
143 if (worker_sighup_received) {
144 TRACE(1, ("worker SIGHUP with %us left to sleep",
146 worker_sighup_received = 0;
149 } while (sleep_remain);
156 interrupt_worker_sleep(void)
162 for (idx = 0; idx < blocking_children_alloc; idx++) {
163 c = blocking_children[idx];
165 if (NULL == c || c->reusable == TRUE)
168 rc = kill(c->pid, SIGHUP);
171 "Unable to signal HUP to wake child pid %d: %m",
178 * harvest_child_status() runs in the parent.
180 * Note the error handling -- this is an interaction with SIGCHLD.
181 * SIG_IGN on SIGCHLD on some OSes means do not wait but reap
182 * automatically. Since we're not really interested in the result code,
183 * we simply ignore the error.
186 harvest_child_status(
191 /* Wait on the child so it can finish terminating */
192 if (waitpid(c->pid, NULL, 0) == c->pid)
193 TRACE(4, ("harvested child %d\n", c->pid));
194 else if (errno != ECHILD)
195 msyslog(LOG_ERR, "error waiting on child %d: %m", c->pid);
201 * req_child_exit() runs in the parent.
208 if (-1 != c->req_write_pipe) {
209 close(c->req_write_pipe);
210 c->req_write_pipe = -1;
213 /* Closing the pipe forces the child to exit */
214 harvest_child_status(c);
220 * cleanup_after_child() runs in parent.
227 harvest_child_status(c);
228 if (-1 != c->resp_read_pipe) {
229 (*addremove_io_fd)(c->resp_read_pipe, c->ispipe, TRUE);
230 close(c->resp_read_pipe);
231 c->resp_read_pipe = -1;
233 c->resp_read_ctx = NULL;
234 DEBUG_INSIST(-1 == c->req_read_pipe);
235 DEBUG_INSIST(-1 == c->resp_write_pipe);
241 send_worker_home_atexit(void)
249 for (idx = 0; idx < blocking_children_alloc; idx++) {
250 c = blocking_children[idx];
259 send_blocking_req_internal(
261 blocking_pipe_header * hdr,
268 DEBUG_REQUIRE(hdr != NULL);
269 DEBUG_REQUIRE(data != NULL);
270 DEBUG_REQUIRE(BLOCKING_REQ_MAGIC == hdr->magic_sig);
272 if (-1 == c->req_write_pipe) {
273 fork_blocking_child(c);
274 DEBUG_INSIST(-1 != c->req_write_pipe);
277 octets = sizeof(*hdr);
278 rc = netwrite(c->req_write_pipe, hdr, octets);
281 octets = hdr->octets - sizeof(*hdr);
282 rc = netwrite(c->req_write_pipe, data, octets);
288 "send_blocking_req_internal: short write (%zu of %zu), %m",
291 /* Fatal error. Clean up the child process. */
293 exit(1); /* otherwise would be return -1 */
297 blocking_pipe_header *
298 receive_blocking_req_internal(
302 blocking_pipe_header hdr;
303 blocking_pipe_header * req;
307 DEBUG_REQUIRE(-1 != c->req_read_pipe);
310 rc = netread(c->req_read_pipe, &hdr, sizeof(hdr));
313 TRACE(4, ("parent closed request pipe, child %d terminating\n",
315 } else if (rc != sizeof(hdr)) {
317 "receive_blocking_req_internal: short header read (%zu of %zu), %m",
320 INSIST(sizeof(hdr) < hdr.octets && hdr.octets < 4 * 1024);
321 req = emalloc(hdr.octets);
322 memcpy(req, &hdr, sizeof(*req));
323 octets = hdr.octets - sizeof(hdr);
324 rc = netread(c->req_read_pipe, (char *)(req + 1),
329 "receive_blocking_req_internal: short read (%zu of %zu), %m",
331 else if (BLOCKING_REQ_MAGIC != req->magic_sig)
333 "receive_blocking_req_internal: packet header mismatch (0x%x)",
347 send_blocking_resp_internal(
349 blocking_pipe_header * resp
355 DEBUG_REQUIRE(-1 != c->resp_write_pipe);
357 octets = resp->octets;
358 rc = netwrite(c->resp_write_pipe, resp, octets);
364 TRACE(1, ("send_blocking_resp_internal: short write (%zu of %zu), %m\n",
370 blocking_pipe_header *
371 receive_blocking_resp_internal(
375 blocking_pipe_header hdr;
376 blocking_pipe_header * resp;
380 DEBUG_REQUIRE(c->resp_read_pipe != -1);
383 rc = netread(c->resp_read_pipe, &hdr, sizeof(hdr));
386 /* this is the normal child exited indication */
387 } else if (rc != sizeof(hdr)) {
388 TRACE(1, ("receive_blocking_resp_internal: short header read (%zu of %zu), %m\n",
390 } else if (BLOCKING_RESP_MAGIC != hdr.magic_sig) {
391 TRACE(1, ("receive_blocking_resp_internal: header mismatch (0x%x)\n",
394 INSIST(sizeof(hdr) < hdr.octets &&
395 hdr.octets < 16 * 1024);
396 resp = emalloc(hdr.octets);
397 memcpy(resp, &hdr, sizeof(*resp));
398 octets = hdr.octets - sizeof(hdr);
399 rc = netread(c->resp_read_pipe, (char *)(resp + 1),
403 TRACE(1, ("receive_blocking_resp_internal: short read (%zu of %zu), %m\n",
409 cleanup_after_child(c);
418 #if defined(HAVE_DROPROOT) && defined(WORK_FORK)
420 fork_deferred_worker(void)
425 REQUIRE(droproot && root_dropped);
427 for (idx = 0; idx < blocking_children_alloc; idx++) {
428 c = blocking_children[idx];
431 if (-1 != c->req_write_pipe && 0 == c->pid)
432 fork_blocking_child(c);
443 static int atexit_installed;
444 static int blocking_pipes[4] = { -1, -1, -1, -1 };
454 * parent and child communicate via a pair of pipes.
456 * 0 child read request
457 * 1 parent write request
458 * 2 parent read response
459 * 3 child write response
461 if (-1 == c->req_write_pipe) {
462 rc = pipe_socketpair(&blocking_pipes[0], &was_pipe);
466 rc = pipe_socketpair(&blocking_pipes[2], &is_pipe);
469 close(blocking_pipes[0]);
470 close(blocking_pipes[1]);
472 INSIST(was_pipe == is_pipe);
477 msyslog(LOG_ERR, "unable to create worker pipes: %m");
482 * Move the descriptors the parent will keep open out of the
483 * low descriptors preferred by C runtime buffered FILE *.
485 c->req_write_pipe = move_fd(blocking_pipes[1]);
486 c->resp_read_pipe = move_fd(blocking_pipes[2]);
488 * wake any worker child on orderly shutdown of the
489 * daemon so that it can notice the broken pipes and
492 if (!atexit_installed) {
493 atexit(&send_worker_home_atexit);
494 atexit_installed = TRUE;
498 #if defined(HAVE_DROPROOT) && !defined(NEED_EARLY_FORK)
499 /* defer the fork until after root is dropped */
500 if (droproot && !root_dropped)
503 if (syslog_file != NULL)
508 /* [BUG 3050] setting SIGCHLD to SIG_IGN likely causes unwanted
509 * or undefined effects. We don't do it and leave SIGCHLD alone.
511 /* signal_no_reset(SIGCHLD, SIG_IGN); */
514 if (-1 == childpid) {
515 msyslog(LOG_ERR, "unable to fork worker: %m");
520 /* this is the parent */
521 TRACE(1, ("forked worker child (pid %d)\n", childpid));
525 /* close the child's pipe descriptors. */
526 close(blocking_pipes[0]);
527 close(blocking_pipes[3]);
529 memset(blocking_pipes, -1, sizeof(blocking_pipes));
531 /* wire into I/O loop */
532 (*addremove_io_fd)(c->resp_read_pipe, is_pipe, FALSE);
534 return; /* parent returns */
538 * The parent gets the child pid as the return value of fork().
539 * The child must work for it.
542 worker_process = TRUE;
545 * Change the process name of the child to avoid confusion
546 * about ntpd trunning twice.
548 if (saved_argc != 0) {
552 for (argcc = 0; argcc < saved_argc; argcc++) {
553 int l = strlen(saved_argv[argcc]);
555 memset(saved_argv[argcc], 0, l);
557 strlcpy(saved_argv[0], "ntpd: asynchronous dns resolver", argvlen);
561 * In the child, close all files except stdin, stdout, stderr,
562 * and the two child ends of the pipes.
564 DEBUG_INSIST(-1 == c->req_read_pipe);
565 DEBUG_INSIST(-1 == c->resp_write_pipe);
566 c->req_read_pipe = blocking_pipes[0];
567 c->resp_write_pipe = blocking_pipes[3];
571 if (syslog_file != NULL) {
576 keep_fd = max(c->req_read_pipe, c->resp_write_pipe);
577 for (fd = 3; fd < keep_fd; fd++)
578 if (fd != c->req_read_pipe &&
579 fd != c->resp_write_pipe)
581 close_all_beyond(keep_fd);
583 * We get signals from refclock serial I/O on NetBSD in the
584 * worker if we do not reset SIGIO's handler to the default.
585 * It is not conditionalized for NetBSD alone because on
586 * systems where it is not needed, it is harmless, and that
587 * allows us to handle unknown others with NetBSD behavior.
590 #if defined(USE_SIGIO)
591 signal_no_reset(SIGIO, SIG_DFL);
592 #elif defined(USE_SIGPOLL)
593 signal_no_reset(SIGPOLL, SIG_DFL);
595 signal_no_reset(SIGHUP, worker_sighup);
596 init_logging("ntp_intres", 0, FALSE);
600 (void) set_user_group_ids();
604 * And now back to the portable code
606 exit_worker(blocking_child_common(c));
610 void worker_global_lock(int inOrOut)
615 #else /* !WORK_FORK follows */
616 char work_fork_nonempty_compilation_unit;