7 #include "ntp_assert.h"
8 #include "ntp_syslog.h"
9 #include "ntp_stdlib.h"
10 #include "ntp_lists.h"
14 #if (RECV_INC & (RECV_INC-1))
15 # error RECV_INC not a power of 2!
17 #if (RECV_BATCH & (RECV_BATCH - 1))
18 #error RECV_BATCH not a power of 2!
20 #if (RECV_BATCH < RECV_INC)
21 #error RECV_BATCH must be >= RECV_INC!
27 static u_long volatile full_recvbufs; /* recvbufs on full_recv_fifo */
28 static u_long volatile free_recvbufs; /* recvbufs on free_recv_list */
29 static u_long volatile total_recvbufs; /* total recvbufs currently in use */
30 static u_long volatile lowater_adds; /* number of times we have added memory */
31 static u_long volatile buffer_shortfall;/* number of missed free receive buffers
32 between replenishments */
33 static u_long limit_recvbufs; /* maximum total of receive buffers */
34 static u_long emerg_recvbufs; /* emergency/urgent buffers to keep */
36 static DECL_FIFO_ANCHOR(recvbuf_t) full_recv_fifo;
37 static recvbuf_t * free_recv_list;
39 #if defined(SYS_WINNT)
42 * For Windows we need to set up a lock to manipulate the
43 * recv buffers to prevent corruption. We keep it lock for as
44 * short a time as possible
46 static CRITICAL_SECTION RecvLock;
47 static CRITICAL_SECTION FreeLock;
48 # define LOCK_R() EnterCriticalSection(&RecvLock)
49 # define UNLOCK_R() LeaveCriticalSection(&RecvLock)
50 # define LOCK_F() EnterCriticalSection(&FreeLock)
51 # define UNLOCK_F() LeaveCriticalSection(&FreeLock)
53 # define LOCK_R() do {} while (FALSE)
54 # define UNLOCK_R() do {} while (FALSE)
55 # define LOCK_F() do {} while (FALSE)
56 # define UNLOCK_F() do {} while (FALSE)
60 static void uninit_recvbuff(void);
77 total_recvbuffs (void)
79 return total_recvbufs;
83 lowater_additions(void)
89 initialise_buffer(recvbuf_t *buff)
99 static const u_int chunk = RECV_INC;
101 /* Allocate each buffer individually so they can be free()d
102 * during ntpd shutdown on DEBUG builds to keep them out of heap
105 static const u_int chunk = 1;
108 register recvbuf_t *bufp;
112 if (limit_recvbufs <= total_recvbufs)
115 abuf = nbufs + buffer_shortfall;
116 buffer_shortfall = 0;
118 if (abuf < nbufs || abuf > RECV_BATCH)
119 abuf = RECV_BATCH; /* clamp on overflow */
121 abuf += (~abuf + 1) & (RECV_INC - 1); /* round up */
123 if (abuf > (limit_recvbufs - total_recvbufs))
124 abuf = limit_recvbufs - total_recvbufs;
125 abuf += (~abuf + 1) & (chunk - 1); /* round up */
128 bufp = calloc(chunk, sizeof(*bufp));
130 limit_recvbufs = total_recvbufs;
133 for (i = chunk; i; --i,++bufp) {
134 LINK_SLIST(free_recv_list, bufp, link);
136 free_recvbufs += chunk;
137 total_recvbufs += chunk;
144 init_recvbuff(int nbufs)
148 * Init buffer free list and stat counters
150 free_recvbufs = total_recvbufs = 0;
151 full_recvbufs = lowater_adds = 0;
153 limit_recvbufs = RECV_TOOMANY;
154 emerg_recvbufs = RECV_CLOCK;
156 create_buffers(nbufs);
158 # if defined(SYS_WINNT)
159 InitializeCriticalSection(&RecvLock);
160 InitializeCriticalSection(&FreeLock);
164 atexit(&uninit_recvbuff);
171 uninit_recvbuff(void)
173 recvbuf_t *rbunlinked;
176 UNLINK_FIFO(rbunlinked, full_recv_fifo, link);
177 if (rbunlinked == NULL)
183 UNLINK_HEAD_SLIST(rbunlinked, free_recv_list, link);
184 if (rbunlinked == NULL)
188 # if defined(SYS_WINNT)
189 DeleteCriticalSection(&FreeLock);
190 DeleteCriticalSection(&RecvLock);
197 * freerecvbuf - make a single recvbuf available for reuse
200 freerecvbuf(recvbuf_t *rb)
203 if (--rb->used != 0) {
204 msyslog(LOG_ERR, "******** freerecvbuff non-zero usage: %d *******", rb->used);
208 LINK_SLIST(free_recv_list, rb, link);
216 add_full_recv_buffer(recvbuf_t *rb)
219 msyslog(LOG_ERR, "add_full_recv_buffer received NULL buffer");
223 LINK_FIFO(full_recv_fifo, rb, link);
230 get_free_recv_buffer(
234 recvbuf_t *buffer = NULL;
237 if (free_recvbufs > (urgent ? emerg_recvbufs : 0)) {
238 UNLINK_HEAD_SLIST(buffer, free_recv_list, link);
241 if (buffer != NULL) {
244 initialise_buffer(buffer);
255 #ifdef HAVE_IO_COMPLETION_PORT
257 get_free_recv_buffer_alloc(
262 if (free_recvbufs <= emerg_recvbufs || buffer_shortfall > 0)
263 create_buffers(RECV_INC);
265 return get_free_recv_buffer(urgent);
271 get_full_recv_buffer(void)
276 * make sure there are free buffers when we wander off to do
277 * lengthy packet processing with any buffer we grab from the
280 * fixes malloc() interrupted by SIGIO risk (Bug 889)
283 if (free_recvbufs <= emerg_recvbufs || buffer_shortfall > 0)
284 create_buffers(RECV_INC);
288 * try to grab a full buffer
291 UNLINK_FIFO(rbuf, full_recv_fifo, link);
292 if (rbuf != NULL && full_recvbufs)
301 * purge_recv_buffers_for_fd() - purges any previously-received input
302 * from a given file descriptor.
305 purge_recv_buffers_for_fd(
311 recvbuf_t *punlinked;
312 recvbuf_t *freelist = NULL;
314 /* We want to hold only one lock at a time. So we do a scan on
315 * the full buffer queue, collecting items as we go, and when
316 * done we spool the the collected items to 'freerecvbuf()'.
320 for (rbufp = HEAD_FIFO(full_recv_fifo);
325 # ifdef HAVE_IO_COMPLETION_PORT
326 if (rbufp->dstadr == NULL && rbufp->fd == fd)
331 UNLINK_MID_FIFO(punlinked, full_recv_fifo,
332 rbufp, link, recvbuf_t);
333 INSIST(punlinked == rbufp);
336 rbufp->link = freelist;
344 next = freelist->link;
345 freerecvbuf(freelist);
352 * Checks to see if there are buffers to process
354 isc_boolean_t has_full_recv_buffer(void)
356 if (HEAD_FIFO(full_recv_fifo) != NULL)
363 #ifdef NTP_DEBUG_LISTS_H
365 check_gen_fifo_consistency(void *fifo)
372 REQUIRE((NULL == pf->phead && NULL == pf->pptail) ||
373 (NULL != pf->phead && NULL != pf->pptail));
376 for (pthis = pf->phead;
379 if (NULL != pthis->link)
380 pptail = &pthis->link;
382 REQUIRE(NULL == pf->pptail || pptail == pf->pptail);
384 #endif /* NTP_DEBUG_LISTS_H */