5 #include "ntp_workimpl.h"
14 #include "ntp_stdlib.h"
15 #include "ntp_malloc.h"
16 #include "ntp_syslog.h"
19 #include "ntp_assert.h"
20 #include "ntp_unixtime.h"
21 #include "intreswork.h"
24 #define CHILD_MAX_IDLE (3 * 60) /* seconds, idle worker limit */
26 blocking_child ** blocking_children;
27 size_t blocking_children_alloc;
28 int worker_per_query; /* boolean */
29 int intres_req_pending;
32 #ifndef HAVE_IO_COMPLETION_PORT
36 * Provides an AF_UNIX socketpair on systems which have them, otherwise
37 * pair of unidirectional pipes.
49 #ifdef HAVE_SOCKETPAIR
50 rc = socketpair(AF_UNIX, SOCK_STREAM, 0, &fds[0]);
65 caller_fds[0] = fds[0];
66 caller_fds[1] = fds[1];
68 *is_pipe = called_pipe;
77 * Close all file descriptors except the given keep_fd.
86 for (fd = 0; fd < keep_fd; fd++)
89 close_all_beyond(keep_fd);
96 * Close all file descriptors after the given keep_fd, which is the
97 * highest fd to keep open.
104 # ifdef HAVE_CLOSEFROM
105 closefrom(keep_fd + 1);
106 # elif defined(F_CLOSEM)
108 * From 'Writing Reliable AIX Daemons,' SG24-4946-00,
109 * by Eric Agar (saves us from doing 32767 system
112 if (fcntl(keep_fd + 1, F_CLOSEM, 0) == -1)
113 msyslog(LOG_ERR, "F_CLOSEM(%d): %m", keep_fd + 1);
114 # else /* !HAVE_CLOSEFROM && !F_CLOSEM follows */
118 max_fd = GETDTABLESIZE();
119 for (fd = keep_fd + 1; fd < max_fd; fd++)
121 # endif /* !HAVE_CLOSEFROM && !F_CLOSEM */
123 #endif /* HAVE_IO_COMPLETION_PORT */
127 available_blocking_child_slot(void)
129 const size_t each = sizeof(blocking_children[0]);
136 for (slot = 0; slot < blocking_children_alloc; slot++) {
137 if (NULL == blocking_children[slot])
139 if (blocking_children[slot]->reusable) {
140 blocking_children[slot]->reusable = FALSE;
145 prev_alloc = blocking_children_alloc;
146 prev_octets = prev_alloc * each;
147 new_alloc = blocking_children_alloc + 4;
148 octets = new_alloc * each;
149 blocking_children = erealloc_zero(blocking_children, octets,
151 blocking_children_alloc = new_alloc;
153 /* assume we'll never have enough workers to overflow u_int */
154 return (u_int)prev_alloc;
159 queue_blocking_request(
160 blocking_work_req rtype,
163 blocking_work_callback done_func,
167 static u_int intres_slot = UINT_MAX;
170 blocking_pipe_header req_hdr;
172 req_hdr.octets = sizeof(req_hdr) + reqsize;
173 req_hdr.magic_sig = BLOCKING_REQ_MAGIC;
174 req_hdr.rtype = rtype;
175 req_hdr.done_func = done_func;
176 req_hdr.context = context;
178 child_slot = UINT_MAX;
179 if (worker_per_query || UINT_MAX == intres_slot ||
180 blocking_children[intres_slot]->reusable)
181 child_slot = available_blocking_child_slot();
182 if (!worker_per_query) {
183 if (UINT_MAX == intres_slot)
184 intres_slot = child_slot;
186 child_slot = intres_slot;
187 if (0 == intres_req_pending)
188 intres_timeout_req(0);
190 intres_req_pending++;
191 INSIST(UINT_MAX != child_slot);
192 c = blocking_children[child_slot];
194 c = emalloc_zero(sizeof(*c));
196 c->req_read_pipe = -1;
197 c->req_write_pipe = -1;
200 c->resp_read_pipe = -1;
201 c->resp_write_pipe = -1;
203 blocking_children[child_slot] = c;
205 req_hdr.child_idx = child_slot;
207 return send_blocking_req_internal(c, &req_hdr, req);
211 int queue_blocking_response(
213 blocking_pipe_header * resp,
215 const blocking_pipe_header * req
218 resp->octets = respsize;
219 resp->magic_sig = BLOCKING_RESP_MAGIC;
220 resp->rtype = req->rtype;
221 resp->context = req->context;
222 resp->done_func = req->done_func;
224 return send_blocking_resp_internal(c, resp);
229 process_blocking_resp(
233 blocking_pipe_header * resp;
237 * On Windows send_blocking_resp_internal() may signal the
238 * blocking_response_ready event multiple times while we're
239 * processing a response, so always consume all available
240 * responses before returning to test the event again.
245 resp = receive_blocking_resp_internal(c);
247 DEBUG_REQUIRE(BLOCKING_RESP_MAGIC ==
249 data = (char *)resp + sizeof(*resp);
250 intres_req_pending--;
251 (*resp->done_func)(resp->rtype, resp->context,
252 resp->octets - sizeof(*resp),
257 } while (NULL != resp);
259 if (!worker_per_query && 0 == intres_req_pending)
260 intres_timeout_req(CHILD_MAX_IDLE);
261 else if (worker_per_query)
267 * blocking_child_common runs as a forked child or a thread
270 blocking_child_common(
275 blocking_pipe_header *req;
279 req = receive_blocking_req_internal(c);
285 DEBUG_REQUIRE(BLOCKING_REQ_MAGIC == req->magic_sig);
287 switch (req->rtype) {
288 case BLOCKING_GETADDRINFO:
289 if (blocking_getaddrinfo(c, req))
293 case BLOCKING_GETNAMEINFO:
294 if (blocking_getnameinfo(c, req))
299 msyslog(LOG_ERR, "unknown req %d to blocking worker", req->rtype);
311 * worker_idle_timer_fired()
313 * The parent starts this timer when the last pending response has been
314 * received from the child, making it idle, and clears the timer when a
315 * request is dispatched to the child. Once the timer expires, the
316 * child is sent packing.
318 * This is called when worker_idle_timer is nonzero and less than or
319 * equal to current_time.
322 worker_idle_timer_fired(void)
327 DEBUG_REQUIRE(0 == intres_req_pending);
329 intres_timeout_req(0);
330 for (idx = 0; idx < blocking_children_alloc; idx++) {
331 c = blocking_children[idx];
339 #else /* !WORKER follows */
340 int ntp_worker_nonempty_compilation_unit;