2 * work_fork.c - fork implementation for blocking worker child.
5 #include "ntp_workimpl.h"
14 #include "ntp_stdlib.h"
15 #include "ntp_malloc.h"
16 #include "ntp_syslog.h"
19 #include "ntp_assert.h"
20 #include "ntp_unixtime.h"
21 #include "ntp_worker.h"
23 /* === variables === */
25 addremove_io_fd_func addremove_io_fd;
26 static volatile int worker_sighup_received;
28 /* === function prototypes === */
29 static void fork_blocking_child(blocking_child *);
30 static RETSIGTYPE worker_sighup(int);
31 static void send_worker_home_atexit(void);
32 static void cleanup_after_child(blocking_child *);
34 /* === functions === */
38 * On some systems _exit() is preferred to exit() for forked children.
39 * For example, http://netbsd.gw.com/cgi-bin/man-cgi?fork++NetBSD-5.0
40 * recommends _exit() to avoid double-flushing C runtime stream buffers
41 * and also to avoid calling the parent's atexit() routines in the
42 * child. On those systems WORKER_CHILD_EXIT is _exit. Since _exit
43 * bypasses CRT cleanup, fflush() files we know might have output
51 if (syslog_file != NULL)
55 WORKER_CHILD_EXIT (exitcode); /* space before ( required */
65 worker_sighup_received = 1;
77 sleep_remain = (u_int)seconds;
79 if (!worker_sighup_received)
80 sleep_remain = sleep(sleep_remain);
81 if (worker_sighup_received) {
82 TRACE(1, ("worker SIGHUP with %us left to sleep",
84 worker_sighup_received = 0;
87 } while (sleep_remain);
94 interrupt_worker_sleep(void)
100 for (idx = 0; idx < blocking_children_alloc; idx++) {
101 c = blocking_children[idx];
103 if (NULL == c || c->reusable == TRUE)
106 rc = kill(c->pid, SIGHUP);
109 "Unable to signal HUP to wake child pid %d: %m",
116 * harvest_child_status() runs in the parent.
118 * Note the error handling -- this is an interaction with SIGCHLD.
119 * SIG_IGN on SIGCHLD on some OSes means do not wait but reap
120 * automatically. Since we're not really interested in the result code,
121 * we simply ignore the error.
124 harvest_child_status(
129 /* Wait on the child so it can finish terminating */
130 if (waitpid(c->pid, NULL, 0) == c->pid)
131 TRACE(4, ("harvested child %d\n", c->pid));
132 else if (errno != ECHILD)
133 msyslog(LOG_ERR, "error waiting on child %d: %m", c->pid);
139 * req_child_exit() runs in the parent.
146 if (-1 != c->req_write_pipe) {
147 close(c->req_write_pipe);
148 c->req_write_pipe = -1;
151 /* Closing the pipe forces the child to exit */
152 harvest_child_status(c);
158 * cleanup_after_child() runs in parent.
165 harvest_child_status(c);
166 if (-1 != c->resp_read_pipe) {
167 (*addremove_io_fd)(c->resp_read_pipe, c->ispipe, TRUE);
168 close(c->resp_read_pipe);
169 c->resp_read_pipe = -1;
171 c->resp_read_ctx = NULL;
172 DEBUG_INSIST(-1 == c->req_read_pipe);
173 DEBUG_INSIST(-1 == c->resp_write_pipe);
179 send_worker_home_atexit(void)
187 for (idx = 0; idx < blocking_children_alloc; idx++) {
188 c = blocking_children[idx];
197 send_blocking_req_internal(
199 blocking_pipe_header * hdr,
206 DEBUG_REQUIRE(hdr != NULL);
207 DEBUG_REQUIRE(data != NULL);
208 DEBUG_REQUIRE(BLOCKING_REQ_MAGIC == hdr->magic_sig);
210 if (-1 == c->req_write_pipe) {
211 fork_blocking_child(c);
212 DEBUG_INSIST(-1 != c->req_write_pipe);
215 octets = sizeof(*hdr);
216 rc = write(c->req_write_pipe, hdr, octets);
219 octets = hdr->octets - sizeof(*hdr);
220 rc = write(c->req_write_pipe, data, octets);
228 "send_blocking_req_internal: pipe write: %m");
231 "send_blocking_req_internal: short write %d of %d",
234 /* Fatal error. Clean up the child process. */
236 exit(1); /* otherwise would be return -1 */
240 blocking_pipe_header *
241 receive_blocking_req_internal(
245 blocking_pipe_header hdr;
246 blocking_pipe_header * req;
250 DEBUG_REQUIRE(-1 != c->req_read_pipe);
255 rc = read(c->req_read_pipe, &hdr, sizeof(hdr));
256 } while (rc < 0 && EINTR == errno);
260 "receive_blocking_req_internal: pipe read %m");
261 } else if (0 == rc) {
262 TRACE(4, ("parent closed request pipe, child %d terminating\n",
264 } else if (rc != sizeof(hdr)) {
266 "receive_blocking_req_internal: short header read %d of %lu",
267 rc, (u_long)sizeof(hdr));
269 INSIST(sizeof(hdr) < hdr.octets && hdr.octets < 4 * 1024);
270 req = emalloc(hdr.octets);
271 memcpy(req, &hdr, sizeof(*req));
272 octets = hdr.octets - sizeof(hdr);
273 rc = read(c->req_read_pipe, (char *)req + sizeof(*req),
278 "receive_blocking_req_internal: pipe data read %m");
279 else if (rc != octets)
281 "receive_blocking_req_internal: short read %d of %ld",
283 else if (BLOCKING_REQ_MAGIC != req->magic_sig)
285 "receive_blocking_req_internal: packet header mismatch (0x%x)",
299 send_blocking_resp_internal(
301 blocking_pipe_header * resp
307 DEBUG_REQUIRE(-1 != c->resp_write_pipe);
309 octets = resp->octets;
310 rc = write(c->resp_write_pipe, resp, octets);
317 TRACE(1, ("send_blocking_resp_internal: pipe write %m\n"));
319 TRACE(1, ("send_blocking_resp_internal: short write %d of %ld\n",
326 blocking_pipe_header *
327 receive_blocking_resp_internal(
331 blocking_pipe_header hdr;
332 blocking_pipe_header * resp;
336 DEBUG_REQUIRE(c->resp_read_pipe != -1);
339 rc = read(c->resp_read_pipe, &hdr, sizeof(hdr));
342 TRACE(1, ("receive_blocking_resp_internal: pipe read %m\n"));
343 } else if (0 == rc) {
344 /* this is the normal child exited indication */
345 } else if (rc != sizeof(hdr)) {
346 TRACE(1, ("receive_blocking_resp_internal: short header read %d of %lu\n",
347 rc, (u_long)sizeof(hdr)));
348 } else if (BLOCKING_RESP_MAGIC != hdr.magic_sig) {
349 TRACE(1, ("receive_blocking_resp_internal: header mismatch (0x%x)\n",
352 INSIST(sizeof(hdr) < hdr.octets &&
353 hdr.octets < 16 * 1024);
354 resp = emalloc(hdr.octets);
355 memcpy(resp, &hdr, sizeof(*resp));
356 octets = hdr.octets - sizeof(hdr);
357 rc = read(c->resp_read_pipe,
358 (char *)resp + sizeof(*resp),
362 TRACE(1, ("receive_blocking_resp_internal: pipe data read %m\n"));
363 else if (rc < octets)
364 TRACE(1, ("receive_blocking_resp_internal: short read %d of %ld\n",
370 cleanup_after_child(c);
379 #if defined(HAVE_DROPROOT) && defined(WORK_FORK)
381 fork_deferred_worker(void)
386 REQUIRE(droproot && root_dropped);
388 for (idx = 0; idx < blocking_children_alloc; idx++) {
389 c = blocking_children[idx];
392 if (-1 != c->req_write_pipe && 0 == c->pid)
393 fork_blocking_child(c);
404 static int atexit_installed;
405 static int blocking_pipes[4] = { -1, -1, -1, -1 };
415 * parent and child communicate via a pair of pipes.
417 * 0 child read request
418 * 1 parent write request
419 * 2 parent read response
420 * 3 child write response
422 if (-1 == c->req_write_pipe) {
423 rc = pipe_socketpair(&blocking_pipes[0], &was_pipe);
427 rc = pipe_socketpair(&blocking_pipes[2], &is_pipe);
430 close(blocking_pipes[0]);
431 close(blocking_pipes[1]);
433 INSIST(was_pipe == is_pipe);
438 msyslog(LOG_ERR, "unable to create worker pipes: %m");
443 * Move the descriptors the parent will keep open out of the
444 * low descriptors preferred by C runtime buffered FILE *.
446 c->req_write_pipe = move_fd(blocking_pipes[1]);
447 c->resp_read_pipe = move_fd(blocking_pipes[2]);
449 * wake any worker child on orderly shutdown of the
450 * daemon so that it can notice the broken pipes and
453 if (!atexit_installed) {
454 atexit(&send_worker_home_atexit);
455 atexit_installed = TRUE;
459 #if defined(HAVE_DROPROOT) && !defined(NEED_EARLY_FORK)
460 /* defer the fork until after root is dropped */
461 if (droproot && !root_dropped)
464 if (syslog_file != NULL)
469 /* [BUG 3050] setting SIGCHLD to SIG_IGN likely causes unwanted
470 * or undefined effects. We don't do it and leave SIGCHLD alone.
472 /* signal_no_reset(SIGCHLD, SIG_IGN); */
475 if (-1 == childpid) {
476 msyslog(LOG_ERR, "unable to fork worker: %m");
481 /* this is the parent */
482 TRACE(1, ("forked worker child (pid %d)\n", childpid));
486 /* close the child's pipe descriptors. */
487 close(blocking_pipes[0]);
488 close(blocking_pipes[3]);
490 memset(blocking_pipes, -1, sizeof(blocking_pipes));
492 /* wire into I/O loop */
493 (*addremove_io_fd)(c->resp_read_pipe, is_pipe, FALSE);
495 return; /* parent returns */
499 * The parent gets the child pid as the return value of fork().
500 * The child must work for it.
503 worker_process = TRUE;
506 * In the child, close all files except stdin, stdout, stderr,
507 * and the two child ends of the pipes.
509 DEBUG_INSIST(-1 == c->req_read_pipe);
510 DEBUG_INSIST(-1 == c->resp_write_pipe);
511 c->req_read_pipe = blocking_pipes[0];
512 c->resp_write_pipe = blocking_pipes[3];
516 if (syslog_file != NULL) {
521 keep_fd = max(c->req_read_pipe, c->resp_write_pipe);
522 for (fd = 3; fd < keep_fd; fd++)
523 if (fd != c->req_read_pipe &&
524 fd != c->resp_write_pipe)
526 close_all_beyond(keep_fd);
528 * We get signals from refclock serial I/O on NetBSD in the
529 * worker if we do not reset SIGIO's handler to the default.
530 * It is not conditionalized for NetBSD alone because on
531 * systems where it is not needed, it is harmless, and that
532 * allows us to handle unknown others with NetBSD behavior.
535 #if defined(USE_SIGIO)
536 signal_no_reset(SIGIO, SIG_DFL);
537 #elif defined(USE_SIGPOLL)
538 signal_no_reset(SIGPOLL, SIG_DFL);
540 signal_no_reset(SIGHUP, worker_sighup);
541 init_logging("ntp_intres", 0, FALSE);
545 * And now back to the portable code
547 exit_worker(blocking_child_common(c));
551 void worker_global_lock(int inOrOut)
556 #else /* !WORK_FORK follows */
557 char work_fork_nonempty_compilation_unit;