]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - contrib/libevent/buffer.c
amd64: use register macros for gdb_cpu_getreg()
[FreeBSD/FreeBSD.git] / contrib / libevent / buffer.c
1 /*
2  * Copyright (c) 2002-2007 Niels Provos <provos@citi.umich.edu>
3  * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27
28 #include "event2/event-config.h"
29 #include "evconfig-private.h"
30
31 #ifdef _WIN32
32 #include <winsock2.h>
33 #include <windows.h>
34 #include <io.h>
35 #endif
36
37 #ifdef EVENT__HAVE_VASPRINTF
38 /* If we have vasprintf, we need to define _GNU_SOURCE before we include
39  * stdio.h.  This comes from evconfig-private.h.
40  */
41 #endif
42
43 #include <sys/types.h>
44
45 #ifdef EVENT__HAVE_SYS_TIME_H
46 #include <sys/time.h>
47 #endif
48
49 #ifdef EVENT__HAVE_SYS_SOCKET_H
50 #include <sys/socket.h>
51 #endif
52
53 #ifdef EVENT__HAVE_SYS_UIO_H
54 #include <sys/uio.h>
55 #endif
56
57 #ifdef EVENT__HAVE_SYS_IOCTL_H
58 #include <sys/ioctl.h>
59 #endif
60
61 #ifdef EVENT__HAVE_SYS_MMAN_H
62 #include <sys/mman.h>
63 #endif
64
65 #ifdef EVENT__HAVE_SYS_SENDFILE_H
66 #include <sys/sendfile.h>
67 #endif
68 #ifdef EVENT__HAVE_SYS_STAT_H
69 #include <sys/stat.h>
70 #endif
71
72
73 #include <errno.h>
74 #include <stdio.h>
75 #include <stdlib.h>
76 #include <string.h>
77 #ifdef EVENT__HAVE_STDARG_H
78 #include <stdarg.h>
79 #endif
80 #ifdef EVENT__HAVE_UNISTD_H
81 #include <unistd.h>
82 #endif
83 #include <limits.h>
84
85 #include "event2/event.h"
86 #include "event2/buffer.h"
87 #include "event2/buffer_compat.h"
88 #include "event2/bufferevent.h"
89 #include "event2/bufferevent_compat.h"
90 #include "event2/bufferevent_struct.h"
91 #include "event2/thread.h"
92 #include "log-internal.h"
93 #include "mm-internal.h"
94 #include "util-internal.h"
95 #include "evthread-internal.h"
96 #include "evbuffer-internal.h"
97 #include "bufferevent-internal.h"
98
99 /* some systems do not have MAP_FAILED */
100 #ifndef MAP_FAILED
101 #define MAP_FAILED      ((void *)-1)
102 #endif
103
104 /* send file support */
105 #if defined(EVENT__HAVE_SYS_SENDFILE_H) && defined(EVENT__HAVE_SENDFILE) && defined(__linux__)
106 #define USE_SENDFILE            1
107 #define SENDFILE_IS_LINUX       1
108 #elif defined(EVENT__HAVE_SENDFILE) && defined(__FreeBSD__)
109 #define USE_SENDFILE            1
110 #define SENDFILE_IS_FREEBSD     1
111 #elif defined(EVENT__HAVE_SENDFILE) && defined(__APPLE__)
112 #define USE_SENDFILE            1
113 #define SENDFILE_IS_MACOSX      1
114 #elif defined(EVENT__HAVE_SENDFILE) && defined(__sun__) && defined(__svr4__)
115 #define USE_SENDFILE            1
116 #define SENDFILE_IS_SOLARIS     1
117 #endif
118
119 /* Mask of user-selectable callback flags. */
120 #define EVBUFFER_CB_USER_FLAGS      0xffff
121 /* Mask of all internal-use-only flags. */
122 #define EVBUFFER_CB_INTERNAL_FLAGS  0xffff0000
123
124 /* Flag set if the callback is using the cb_obsolete function pointer  */
125 #define EVBUFFER_CB_OBSOLETE           0x00040000
126
127 /* evbuffer_chain support */
128 #define CHAIN_SPACE_PTR(ch) ((ch)->buffer + (ch)->misalign + (ch)->off)
129 #define CHAIN_SPACE_LEN(ch) ((ch)->flags & EVBUFFER_IMMUTABLE ? \
130             0 : (ch)->buffer_len - ((ch)->misalign + (ch)->off))
131
132 #define CHAIN_PINNED(ch)  (((ch)->flags & EVBUFFER_MEM_PINNED_ANY) != 0)
133 #define CHAIN_PINNED_R(ch)  (((ch)->flags & EVBUFFER_MEM_PINNED_R) != 0)
134
135 /* evbuffer_ptr support */
136 #define PTR_NOT_FOUND(ptr) do {                 \
137         (ptr)->pos = -1;                                        \
138         (ptr)->internal_.chain = NULL;          \
139         (ptr)->internal_.pos_in_chain = 0;      \
140 } while (0)
141
142 static void evbuffer_chain_align(struct evbuffer_chain *chain);
143 static int evbuffer_chain_should_realign(struct evbuffer_chain *chain,
144     size_t datalen);
145 static void evbuffer_deferred_callback(struct event_callback *cb, void *arg);
146 static int evbuffer_ptr_memcmp(const struct evbuffer *buf,
147     const struct evbuffer_ptr *pos, const char *mem, size_t len);
148 static struct evbuffer_chain *evbuffer_expand_singlechain(struct evbuffer *buf,
149     size_t datlen);
150 static int evbuffer_ptr_subtract(struct evbuffer *buf, struct evbuffer_ptr *pos,
151     size_t howfar);
152 static int evbuffer_file_segment_materialize(struct evbuffer_file_segment *seg);
153 static inline void evbuffer_chain_incref(struct evbuffer_chain *chain);
154
155 static struct evbuffer_chain *
156 evbuffer_chain_new(size_t size)
157 {
158         struct evbuffer_chain *chain;
159         size_t to_alloc;
160
161         if (size > EVBUFFER_CHAIN_MAX - EVBUFFER_CHAIN_SIZE)
162                 return (NULL);
163
164         size += EVBUFFER_CHAIN_SIZE;
165
166         /* get the next largest memory that can hold the buffer */
167         if (size < EVBUFFER_CHAIN_MAX / 2) {
168                 to_alloc = MIN_BUFFER_SIZE;
169                 while (to_alloc < size) {
170                         to_alloc <<= 1;
171                 }
172         } else {
173                 to_alloc = size;
174         }
175
176         /* we get everything in one chunk */
177         if ((chain = mm_malloc(to_alloc)) == NULL)
178                 return (NULL);
179
180         memset(chain, 0, EVBUFFER_CHAIN_SIZE);
181
182         chain->buffer_len = to_alloc - EVBUFFER_CHAIN_SIZE;
183
184         /* this way we can manipulate the buffer to different addresses,
185          * which is required for mmap for example.
186          */
187         chain->buffer = EVBUFFER_CHAIN_EXTRA(unsigned char, chain);
188
189         chain->refcnt = 1;
190
191         return (chain);
192 }
193
194 static inline void
195 evbuffer_chain_free(struct evbuffer_chain *chain)
196 {
197         EVUTIL_ASSERT(chain->refcnt > 0);
198         if (--chain->refcnt > 0) {
199                 /* chain is still referenced by other chains */
200                 return;
201         }
202
203         if (CHAIN_PINNED(chain)) {
204                 /* will get freed once no longer dangling */
205                 chain->refcnt++;
206                 chain->flags |= EVBUFFER_DANGLING;
207                 return;
208         }
209
210         /* safe to release chain, it's either a referencing
211          * chain or all references to it have been freed */
212         if (chain->flags & EVBUFFER_REFERENCE) {
213                 struct evbuffer_chain_reference *info =
214                     EVBUFFER_CHAIN_EXTRA(
215                             struct evbuffer_chain_reference,
216                             chain);
217                 if (info->cleanupfn)
218                         (*info->cleanupfn)(chain->buffer,
219                             chain->buffer_len,
220                             info->extra);
221         }
222         if (chain->flags & EVBUFFER_FILESEGMENT) {
223                 struct evbuffer_chain_file_segment *info =
224                     EVBUFFER_CHAIN_EXTRA(
225                             struct evbuffer_chain_file_segment,
226                             chain);
227                 if (info->segment) {
228 #ifdef _WIN32
229                         if (info->segment->is_mapping)
230                                 UnmapViewOfFile(chain->buffer);
231 #endif
232                         evbuffer_file_segment_free(info->segment);
233                 }
234         }
235         if (chain->flags & EVBUFFER_MULTICAST) {
236                 struct evbuffer_multicast_parent *info =
237                     EVBUFFER_CHAIN_EXTRA(
238                             struct evbuffer_multicast_parent,
239                             chain);
240                 /* referencing chain is being freed, decrease
241                  * refcounts of source chain and associated
242                  * evbuffer (which get freed once both reach
243                  * zero) */
244                 EVUTIL_ASSERT(info->source != NULL);
245                 EVUTIL_ASSERT(info->parent != NULL);
246                 EVBUFFER_LOCK(info->source);
247                 evbuffer_chain_free(info->parent);
248                 evbuffer_decref_and_unlock_(info->source);
249         }
250
251         mm_free(chain);
252 }
253
254 static void
255 evbuffer_free_all_chains(struct evbuffer_chain *chain)
256 {
257         struct evbuffer_chain *next;
258         for (; chain; chain = next) {
259                 next = chain->next;
260                 evbuffer_chain_free(chain);
261         }
262 }
263
264 #ifndef NDEBUG
265 static int
266 evbuffer_chains_all_empty(struct evbuffer_chain *chain)
267 {
268         for (; chain; chain = chain->next) {
269                 if (chain->off)
270                         return 0;
271         }
272         return 1;
273 }
274 #else
275 /* The definition is needed for EVUTIL_ASSERT, which uses sizeof to avoid
276 "unused variable" warnings. */
277 static inline int evbuffer_chains_all_empty(struct evbuffer_chain *chain) {
278         return 1;
279 }
280 #endif
281
282 /* Free all trailing chains in 'buf' that are neither pinned nor empty, prior
283  * to replacing them all with a new chain.  Return a pointer to the place
284  * where the new chain will go.
285  *
286  * Internal; requires lock.  The caller must fix up buf->last and buf->first
287  * as needed; they might have been freed.
288  */
289 static struct evbuffer_chain **
290 evbuffer_free_trailing_empty_chains(struct evbuffer *buf)
291 {
292         struct evbuffer_chain **ch = buf->last_with_datap;
293         /* Find the first victim chain.  It might be *last_with_datap */
294         while ((*ch) && ((*ch)->off != 0 || CHAIN_PINNED(*ch)))
295                 ch = &(*ch)->next;
296         if (*ch) {
297                 EVUTIL_ASSERT(evbuffer_chains_all_empty(*ch));
298                 evbuffer_free_all_chains(*ch);
299                 *ch = NULL;
300         }
301         return ch;
302 }
303
304 /* Add a single chain 'chain' to the end of 'buf', freeing trailing empty
305  * chains as necessary.  Requires lock.  Does not schedule callbacks.
306  */
307 static void
308 evbuffer_chain_insert(struct evbuffer *buf,
309     struct evbuffer_chain *chain)
310 {
311         ASSERT_EVBUFFER_LOCKED(buf);
312         if (*buf->last_with_datap == NULL) {
313                 /* There are no chains data on the buffer at all. */
314                 EVUTIL_ASSERT(buf->last_with_datap == &buf->first);
315                 EVUTIL_ASSERT(buf->first == NULL);
316                 buf->first = buf->last = chain;
317         } else {
318                 struct evbuffer_chain **chp;
319                 chp = evbuffer_free_trailing_empty_chains(buf);
320                 *chp = chain;
321                 if (chain->off)
322                         buf->last_with_datap = chp;
323                 buf->last = chain;
324         }
325         buf->total_len += chain->off;
326 }
327
328 static inline struct evbuffer_chain *
329 evbuffer_chain_insert_new(struct evbuffer *buf, size_t datlen)
330 {
331         struct evbuffer_chain *chain;
332         if ((chain = evbuffer_chain_new(datlen)) == NULL)
333                 return NULL;
334         evbuffer_chain_insert(buf, chain);
335         return chain;
336 }
337
338 void
339 evbuffer_chain_pin_(struct evbuffer_chain *chain, unsigned flag)
340 {
341         EVUTIL_ASSERT((chain->flags & flag) == 0);
342         chain->flags |= flag;
343 }
344
345 void
346 evbuffer_chain_unpin_(struct evbuffer_chain *chain, unsigned flag)
347 {
348         EVUTIL_ASSERT((chain->flags & flag) != 0);
349         chain->flags &= ~flag;
350         if (chain->flags & EVBUFFER_DANGLING)
351                 evbuffer_chain_free(chain);
352 }
353
354 static inline void
355 evbuffer_chain_incref(struct evbuffer_chain *chain)
356 {
357     ++chain->refcnt;
358 }
359
360 struct evbuffer *
361 evbuffer_new(void)
362 {
363         struct evbuffer *buffer;
364
365         buffer = mm_calloc(1, sizeof(struct evbuffer));
366         if (buffer == NULL)
367                 return (NULL);
368
369         LIST_INIT(&buffer->callbacks);
370         buffer->refcnt = 1;
371         buffer->last_with_datap = &buffer->first;
372
373         return (buffer);
374 }
375
376 int
377 evbuffer_set_flags(struct evbuffer *buf, ev_uint64_t flags)
378 {
379         EVBUFFER_LOCK(buf);
380         buf->flags |= (ev_uint32_t)flags;
381         EVBUFFER_UNLOCK(buf);
382         return 0;
383 }
384
385 int
386 evbuffer_clear_flags(struct evbuffer *buf, ev_uint64_t flags)
387 {
388         EVBUFFER_LOCK(buf);
389         buf->flags &= ~(ev_uint32_t)flags;
390         EVBUFFER_UNLOCK(buf);
391         return 0;
392 }
393
394 void
395 evbuffer_incref_(struct evbuffer *buf)
396 {
397         EVBUFFER_LOCK(buf);
398         ++buf->refcnt;
399         EVBUFFER_UNLOCK(buf);
400 }
401
402 void
403 evbuffer_incref_and_lock_(struct evbuffer *buf)
404 {
405         EVBUFFER_LOCK(buf);
406         ++buf->refcnt;
407 }
408
409 int
410 evbuffer_defer_callbacks(struct evbuffer *buffer, struct event_base *base)
411 {
412         EVBUFFER_LOCK(buffer);
413         buffer->cb_queue = base;
414         buffer->deferred_cbs = 1;
415         event_deferred_cb_init_(&buffer->deferred,
416             event_base_get_npriorities(base) / 2,
417             evbuffer_deferred_callback, buffer);
418         EVBUFFER_UNLOCK(buffer);
419         return 0;
420 }
421
422 int
423 evbuffer_enable_locking(struct evbuffer *buf, void *lock)
424 {
425 #ifdef EVENT__DISABLE_THREAD_SUPPORT
426         return -1;
427 #else
428         if (buf->lock)
429                 return -1;
430
431         if (!lock) {
432                 EVTHREAD_ALLOC_LOCK(lock, EVTHREAD_LOCKTYPE_RECURSIVE);
433                 if (!lock)
434                         return -1;
435                 buf->lock = lock;
436                 buf->own_lock = 1;
437         } else {
438                 buf->lock = lock;
439                 buf->own_lock = 0;
440         }
441
442         return 0;
443 #endif
444 }
445
446 void
447 evbuffer_set_parent_(struct evbuffer *buf, struct bufferevent *bev)
448 {
449         EVBUFFER_LOCK(buf);
450         buf->parent = bev;
451         EVBUFFER_UNLOCK(buf);
452 }
453
454 static void
455 evbuffer_run_callbacks(struct evbuffer *buffer, int running_deferred)
456 {
457         struct evbuffer_cb_entry *cbent, *next;
458         struct evbuffer_cb_info info;
459         size_t new_size;
460         ev_uint32_t mask, masked_val;
461         int clear = 1;
462
463         if (running_deferred) {
464                 mask = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED;
465                 masked_val = EVBUFFER_CB_ENABLED;
466         } else if (buffer->deferred_cbs) {
467                 mask = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED;
468                 masked_val = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED;
469                 /* Don't zero-out n_add/n_del, since the deferred callbacks
470                    will want to see them. */
471                 clear = 0;
472         } else {
473                 mask = EVBUFFER_CB_ENABLED;
474                 masked_val = EVBUFFER_CB_ENABLED;
475         }
476
477         ASSERT_EVBUFFER_LOCKED(buffer);
478
479         if (LIST_EMPTY(&buffer->callbacks)) {
480                 buffer->n_add_for_cb = buffer->n_del_for_cb = 0;
481                 return;
482         }
483         if (buffer->n_add_for_cb == 0 && buffer->n_del_for_cb == 0)
484                 return;
485
486         new_size = buffer->total_len;
487         info.orig_size = new_size + buffer->n_del_for_cb - buffer->n_add_for_cb;
488         info.n_added = buffer->n_add_for_cb;
489         info.n_deleted = buffer->n_del_for_cb;
490         if (clear) {
491                 buffer->n_add_for_cb = 0;
492                 buffer->n_del_for_cb = 0;
493         }
494         for (cbent = LIST_FIRST(&buffer->callbacks);
495              cbent != LIST_END(&buffer->callbacks);
496              cbent = next) {
497                 /* Get the 'next' pointer now in case this callback decides
498                  * to remove itself or something. */
499                 next = LIST_NEXT(cbent, next);
500
501                 if ((cbent->flags & mask) != masked_val)
502                         continue;
503
504                 if ((cbent->flags & EVBUFFER_CB_OBSOLETE))
505                         cbent->cb.cb_obsolete(buffer,
506                             info.orig_size, new_size, cbent->cbarg);
507                 else
508                         cbent->cb.cb_func(buffer, &info, cbent->cbarg);
509         }
510 }
511
512 void
513 evbuffer_invoke_callbacks_(struct evbuffer *buffer)
514 {
515         if (LIST_EMPTY(&buffer->callbacks)) {
516                 buffer->n_add_for_cb = buffer->n_del_for_cb = 0;
517                 return;
518         }
519
520         if (buffer->deferred_cbs) {
521                 if (event_deferred_cb_schedule_(buffer->cb_queue, &buffer->deferred)) {
522                         evbuffer_incref_and_lock_(buffer);
523                         if (buffer->parent)
524                                 bufferevent_incref_(buffer->parent);
525                 }
526                 EVBUFFER_UNLOCK(buffer);
527         }
528
529         evbuffer_run_callbacks(buffer, 0);
530 }
531
532 static void
533 evbuffer_deferred_callback(struct event_callback *cb, void *arg)
534 {
535         struct bufferevent *parent = NULL;
536         struct evbuffer *buffer = arg;
537
538         /* XXXX It would be better to run these callbacks without holding the
539          * lock */
540         EVBUFFER_LOCK(buffer);
541         parent = buffer->parent;
542         evbuffer_run_callbacks(buffer, 1);
543         evbuffer_decref_and_unlock_(buffer);
544         if (parent)
545                 bufferevent_decref_(parent);
546 }
547
548 static void
549 evbuffer_remove_all_callbacks(struct evbuffer *buffer)
550 {
551         struct evbuffer_cb_entry *cbent;
552
553         while ((cbent = LIST_FIRST(&buffer->callbacks))) {
554                 LIST_REMOVE(cbent, next);
555                 mm_free(cbent);
556         }
557 }
558
559 void
560 evbuffer_decref_and_unlock_(struct evbuffer *buffer)
561 {
562         struct evbuffer_chain *chain, *next;
563         ASSERT_EVBUFFER_LOCKED(buffer);
564
565         EVUTIL_ASSERT(buffer->refcnt > 0);
566
567         if (--buffer->refcnt > 0) {
568                 EVBUFFER_UNLOCK(buffer);
569                 return;
570         }
571
572         for (chain = buffer->first; chain != NULL; chain = next) {
573                 next = chain->next;
574                 evbuffer_chain_free(chain);
575         }
576         evbuffer_remove_all_callbacks(buffer);
577         if (buffer->deferred_cbs)
578                 event_deferred_cb_cancel_(buffer->cb_queue, &buffer->deferred);
579
580         EVBUFFER_UNLOCK(buffer);
581         if (buffer->own_lock)
582                 EVTHREAD_FREE_LOCK(buffer->lock, EVTHREAD_LOCKTYPE_RECURSIVE);
583         mm_free(buffer);
584 }
585
586 void
587 evbuffer_free(struct evbuffer *buffer)
588 {
589         EVBUFFER_LOCK(buffer);
590         evbuffer_decref_and_unlock_(buffer);
591 }
592
593 void
594 evbuffer_lock(struct evbuffer *buf)
595 {
596         EVBUFFER_LOCK(buf);
597 }
598
599 void
600 evbuffer_unlock(struct evbuffer *buf)
601 {
602         EVBUFFER_UNLOCK(buf);
603 }
604
605 size_t
606 evbuffer_get_length(const struct evbuffer *buffer)
607 {
608         size_t result;
609
610         EVBUFFER_LOCK(buffer);
611
612         result = (buffer->total_len);
613
614         EVBUFFER_UNLOCK(buffer);
615
616         return result;
617 }
618
619 size_t
620 evbuffer_get_contiguous_space(const struct evbuffer *buf)
621 {
622         struct evbuffer_chain *chain;
623         size_t result;
624
625         EVBUFFER_LOCK(buf);
626         chain = buf->first;
627         result = (chain != NULL ? chain->off : 0);
628         EVBUFFER_UNLOCK(buf);
629
630         return result;
631 }
632
633 size_t
634 evbuffer_add_iovec(struct evbuffer * buf, struct evbuffer_iovec * vec, int n_vec) {
635         int n;
636         size_t res;
637         size_t to_alloc;
638
639         EVBUFFER_LOCK(buf);
640
641         res = to_alloc = 0;
642
643         for (n = 0; n < n_vec; n++) {
644                 to_alloc += vec[n].iov_len;
645         }
646
647         if (evbuffer_expand_fast_(buf, to_alloc, 2) < 0) {
648                 goto done;
649         }
650
651         for (n = 0; n < n_vec; n++) {
652                 /* XXX each 'add' call here does a bunch of setup that's
653                  * obviated by evbuffer_expand_fast_, and some cleanup that we
654                  * would like to do only once.  Instead we should just extract
655                  * the part of the code that's needed. */
656
657                 if (evbuffer_add(buf, vec[n].iov_base, vec[n].iov_len) < 0) {
658                         goto done;
659                 }
660
661                 res += vec[n].iov_len;
662         }
663
664 done:
665     EVBUFFER_UNLOCK(buf);
666     return res;
667 }
668
669 int
670 evbuffer_reserve_space(struct evbuffer *buf, ev_ssize_t size,
671     struct evbuffer_iovec *vec, int n_vecs)
672 {
673         struct evbuffer_chain *chain, **chainp;
674         int n = -1;
675
676         EVBUFFER_LOCK(buf);
677         if (buf->freeze_end)
678                 goto done;
679         if (n_vecs < 1)
680                 goto done;
681         if (n_vecs == 1) {
682                 if ((chain = evbuffer_expand_singlechain(buf, size)) == NULL)
683                         goto done;
684
685                 vec[0].iov_base = (void *)CHAIN_SPACE_PTR(chain);
686                 vec[0].iov_len = (size_t)CHAIN_SPACE_LEN(chain);
687                 EVUTIL_ASSERT(size<0 || (size_t)vec[0].iov_len >= (size_t)size);
688                 n = 1;
689         } else {
690                 if (evbuffer_expand_fast_(buf, size, n_vecs)<0)
691                         goto done;
692                 n = evbuffer_read_setup_vecs_(buf, size, vec, n_vecs,
693                                 &chainp, 0);
694         }
695
696 done:
697         EVBUFFER_UNLOCK(buf);
698         return n;
699
700 }
701
702 static int
703 advance_last_with_data(struct evbuffer *buf)
704 {
705         int n = 0;
706         ASSERT_EVBUFFER_LOCKED(buf);
707
708         if (!*buf->last_with_datap)
709                 return 0;
710
711         while ((*buf->last_with_datap)->next && (*buf->last_with_datap)->next->off) {
712                 buf->last_with_datap = &(*buf->last_with_datap)->next;
713                 ++n;
714         }
715         return n;
716 }
717
718 int
719 evbuffer_commit_space(struct evbuffer *buf,
720     struct evbuffer_iovec *vec, int n_vecs)
721 {
722         struct evbuffer_chain *chain, **firstchainp, **chainp;
723         int result = -1;
724         size_t added = 0;
725         int i;
726
727         EVBUFFER_LOCK(buf);
728
729         if (buf->freeze_end)
730                 goto done;
731         if (n_vecs == 0) {
732                 result = 0;
733                 goto done;
734         } else if (n_vecs == 1 &&
735             (buf->last && vec[0].iov_base == (void *)CHAIN_SPACE_PTR(buf->last))) {
736                 /* The user only got or used one chain; it might not
737                  * be the first one with space in it. */
738                 if ((size_t)vec[0].iov_len > (size_t)CHAIN_SPACE_LEN(buf->last))
739                         goto done;
740                 buf->last->off += vec[0].iov_len;
741                 added = vec[0].iov_len;
742                 if (added)
743                         advance_last_with_data(buf);
744                 goto okay;
745         }
746
747         /* Advance 'firstchain' to the first chain with space in it. */
748         firstchainp = buf->last_with_datap;
749         if (!*firstchainp)
750                 goto done;
751         if (CHAIN_SPACE_LEN(*firstchainp) == 0) {
752                 firstchainp = &(*firstchainp)->next;
753         }
754
755         chain = *firstchainp;
756         /* pass 1: make sure that the pointers and lengths of vecs[] are in
757          * bounds before we try to commit anything. */
758         for (i=0; i<n_vecs; ++i) {
759                 if (!chain)
760                         goto done;
761                 if (vec[i].iov_base != (void *)CHAIN_SPACE_PTR(chain) ||
762                     (size_t)vec[i].iov_len > CHAIN_SPACE_LEN(chain))
763                         goto done;
764                 chain = chain->next;
765         }
766         /* pass 2: actually adjust all the chains. */
767         chainp = firstchainp;
768         for (i=0; i<n_vecs; ++i) {
769                 (*chainp)->off += vec[i].iov_len;
770                 added += vec[i].iov_len;
771                 if (vec[i].iov_len) {
772                         buf->last_with_datap = chainp;
773                 }
774                 chainp = &(*chainp)->next;
775         }
776
777 okay:
778         buf->total_len += added;
779         buf->n_add_for_cb += added;
780         result = 0;
781         evbuffer_invoke_callbacks_(buf);
782
783 done:
784         EVBUFFER_UNLOCK(buf);
785         return result;
786 }
787
788 static inline int
789 HAS_PINNED_R(struct evbuffer *buf)
790 {
791         return (buf->last && CHAIN_PINNED_R(buf->last));
792 }
793
794 static inline void
795 ZERO_CHAIN(struct evbuffer *dst)
796 {
797         ASSERT_EVBUFFER_LOCKED(dst);
798         dst->first = NULL;
799         dst->last = NULL;
800         dst->last_with_datap = &(dst)->first;
801         dst->total_len = 0;
802 }
803
804 /* Prepares the contents of src to be moved to another buffer by removing
805  * read-pinned chains. The first pinned chain is saved in first, and the
806  * last in last. If src has no read-pinned chains, first and last are set
807  * to NULL. */
808 static int
809 PRESERVE_PINNED(struct evbuffer *src, struct evbuffer_chain **first,
810                 struct evbuffer_chain **last)
811 {
812         struct evbuffer_chain *chain, **pinned;
813
814         ASSERT_EVBUFFER_LOCKED(src);
815
816         if (!HAS_PINNED_R(src)) {
817                 *first = *last = NULL;
818                 return 0;
819         }
820
821         pinned = src->last_with_datap;
822         if (!CHAIN_PINNED_R(*pinned))
823                 pinned = &(*pinned)->next;
824         EVUTIL_ASSERT(CHAIN_PINNED_R(*pinned));
825         chain = *first = *pinned;
826         *last = src->last;
827
828         /* If there's data in the first pinned chain, we need to allocate
829          * a new chain and copy the data over. */
830         if (chain->off) {
831                 struct evbuffer_chain *tmp;
832
833                 EVUTIL_ASSERT(pinned == src->last_with_datap);
834                 tmp = evbuffer_chain_new(chain->off);
835                 if (!tmp)
836                         return -1;
837                 memcpy(tmp->buffer, chain->buffer + chain->misalign,
838                         chain->off);
839                 tmp->off = chain->off;
840                 *src->last_with_datap = tmp;
841                 src->last = tmp;
842                 chain->misalign += chain->off;
843                 chain->off = 0;
844         } else {
845                 src->last = *src->last_with_datap;
846                 *pinned = NULL;
847         }
848
849         return 0;
850 }
851
852 static inline void
853 RESTORE_PINNED(struct evbuffer *src, struct evbuffer_chain *pinned,
854                 struct evbuffer_chain *last)
855 {
856         ASSERT_EVBUFFER_LOCKED(src);
857
858         if (!pinned) {
859                 ZERO_CHAIN(src);
860                 return;
861         }
862
863         src->first = pinned;
864         src->last = last;
865         src->last_with_datap = &src->first;
866         src->total_len = 0;
867 }
868
869 static inline void
870 COPY_CHAIN(struct evbuffer *dst, struct evbuffer *src)
871 {
872         ASSERT_EVBUFFER_LOCKED(dst);
873         ASSERT_EVBUFFER_LOCKED(src);
874         dst->first = src->first;
875         if (src->last_with_datap == &src->first)
876                 dst->last_with_datap = &dst->first;
877         else
878                 dst->last_with_datap = src->last_with_datap;
879         dst->last = src->last;
880         dst->total_len = src->total_len;
881 }
882
883 static void
884 APPEND_CHAIN(struct evbuffer *dst, struct evbuffer *src)
885 {
886         struct evbuffer_chain **chp;
887
888         ASSERT_EVBUFFER_LOCKED(dst);
889         ASSERT_EVBUFFER_LOCKED(src);
890
891         chp = evbuffer_free_trailing_empty_chains(dst);
892         *chp = src->first;
893
894         if (src->last_with_datap == &src->first)
895                 dst->last_with_datap = chp;
896         else
897                 dst->last_with_datap = src->last_with_datap;
898         dst->last = src->last;
899         dst->total_len += src->total_len;
900 }
901
902 static inline void
903 APPEND_CHAIN_MULTICAST(struct evbuffer *dst, struct evbuffer *src)
904 {
905         struct evbuffer_chain *tmp;
906         struct evbuffer_chain *chain = src->first;
907         struct evbuffer_multicast_parent *extra;
908
909         ASSERT_EVBUFFER_LOCKED(dst);
910         ASSERT_EVBUFFER_LOCKED(src);
911
912         for (; chain; chain = chain->next) {
913                 if (!chain->off || chain->flags & EVBUFFER_DANGLING) {
914                         /* skip empty chains */
915                         continue;
916                 }
917
918                 tmp = evbuffer_chain_new(sizeof(struct evbuffer_multicast_parent));
919                 if (!tmp) {
920                         event_warn("%s: out of memory", __func__);
921                         return;
922                 }
923                 extra = EVBUFFER_CHAIN_EXTRA(struct evbuffer_multicast_parent, tmp);
924                 /* reference evbuffer containing source chain so it
925                  * doesn't get released while the chain is still
926                  * being referenced to */
927                 evbuffer_incref_(src);
928                 extra->source = src;
929                 /* reference source chain which now becomes immutable */
930                 evbuffer_chain_incref(chain);
931                 extra->parent = chain;
932                 chain->flags |= EVBUFFER_IMMUTABLE;
933                 tmp->buffer_len = chain->buffer_len;
934                 tmp->misalign = chain->misalign;
935                 tmp->off = chain->off;
936                 tmp->flags |= EVBUFFER_MULTICAST|EVBUFFER_IMMUTABLE;
937                 tmp->buffer = chain->buffer;
938                 evbuffer_chain_insert(dst, tmp);
939         }
940 }
941
942 static void
943 PREPEND_CHAIN(struct evbuffer *dst, struct evbuffer *src)
944 {
945         ASSERT_EVBUFFER_LOCKED(dst);
946         ASSERT_EVBUFFER_LOCKED(src);
947         src->last->next = dst->first;
948         dst->first = src->first;
949         dst->total_len += src->total_len;
950         if (*dst->last_with_datap == NULL) {
951                 if (src->last_with_datap == &(src)->first)
952                         dst->last_with_datap = &dst->first;
953                 else
954                         dst->last_with_datap = src->last_with_datap;
955         } else if (dst->last_with_datap == &dst->first) {
956                 dst->last_with_datap = &src->last->next;
957         }
958 }
959
960 int
961 evbuffer_add_buffer(struct evbuffer *outbuf, struct evbuffer *inbuf)
962 {
963         struct evbuffer_chain *pinned, *last;
964         size_t in_total_len, out_total_len;
965         int result = 0;
966
967         EVBUFFER_LOCK2(inbuf, outbuf);
968         in_total_len = inbuf->total_len;
969         out_total_len = outbuf->total_len;
970
971         if (in_total_len == 0 || outbuf == inbuf)
972                 goto done;
973
974         if (outbuf->freeze_end || inbuf->freeze_start) {
975                 result = -1;
976                 goto done;
977         }
978
979         if (PRESERVE_PINNED(inbuf, &pinned, &last) < 0) {
980                 result = -1;
981                 goto done;
982         }
983
984         if (out_total_len == 0) {
985                 /* There might be an empty chain at the start of outbuf; free
986                  * it. */
987                 evbuffer_free_all_chains(outbuf->first);
988                 COPY_CHAIN(outbuf, inbuf);
989         } else {
990                 APPEND_CHAIN(outbuf, inbuf);
991         }
992
993         RESTORE_PINNED(inbuf, pinned, last);
994
995         inbuf->n_del_for_cb += in_total_len;
996         outbuf->n_add_for_cb += in_total_len;
997
998         evbuffer_invoke_callbacks_(inbuf);
999         evbuffer_invoke_callbacks_(outbuf);
1000
1001 done:
1002         EVBUFFER_UNLOCK2(inbuf, outbuf);
1003         return result;
1004 }
1005
1006 int
1007 evbuffer_add_buffer_reference(struct evbuffer *outbuf, struct evbuffer *inbuf)
1008 {
1009         size_t in_total_len, out_total_len;
1010         struct evbuffer_chain *chain;
1011         int result = 0;
1012
1013         EVBUFFER_LOCK2(inbuf, outbuf);
1014         in_total_len = inbuf->total_len;
1015         out_total_len = outbuf->total_len;
1016         chain = inbuf->first;
1017
1018         if (in_total_len == 0)
1019                 goto done;
1020
1021         if (outbuf->freeze_end || outbuf == inbuf) {
1022                 result = -1;
1023                 goto done;
1024         }
1025
1026         for (; chain; chain = chain->next) {
1027                 if ((chain->flags & (EVBUFFER_FILESEGMENT|EVBUFFER_SENDFILE|EVBUFFER_MULTICAST)) != 0) {
1028                         /* chain type can not be referenced */
1029                         result = -1;
1030                         goto done;
1031                 }
1032         }
1033
1034         if (out_total_len == 0) {
1035                 /* There might be an empty chain at the start of outbuf; free
1036                  * it. */
1037                 evbuffer_free_all_chains(outbuf->first);
1038         }
1039         APPEND_CHAIN_MULTICAST(outbuf, inbuf);
1040
1041         outbuf->n_add_for_cb += in_total_len;
1042         evbuffer_invoke_callbacks_(outbuf);
1043
1044 done:
1045         EVBUFFER_UNLOCK2(inbuf, outbuf);
1046         return result;
1047 }
1048
1049 int
1050 evbuffer_prepend_buffer(struct evbuffer *outbuf, struct evbuffer *inbuf)
1051 {
1052         struct evbuffer_chain *pinned, *last;
1053         size_t in_total_len, out_total_len;
1054         int result = 0;
1055
1056         EVBUFFER_LOCK2(inbuf, outbuf);
1057
1058         in_total_len = inbuf->total_len;
1059         out_total_len = outbuf->total_len;
1060
1061         if (!in_total_len || inbuf == outbuf)
1062                 goto done;
1063
1064         if (outbuf->freeze_start || inbuf->freeze_start) {
1065                 result = -1;
1066                 goto done;
1067         }
1068
1069         if (PRESERVE_PINNED(inbuf, &pinned, &last) < 0) {
1070                 result = -1;
1071                 goto done;
1072         }
1073
1074         if (out_total_len == 0) {
1075                 /* There might be an empty chain at the start of outbuf; free
1076                  * it. */
1077                 evbuffer_free_all_chains(outbuf->first);
1078                 COPY_CHAIN(outbuf, inbuf);
1079         } else {
1080                 PREPEND_CHAIN(outbuf, inbuf);
1081         }
1082
1083         RESTORE_PINNED(inbuf, pinned, last);
1084
1085         inbuf->n_del_for_cb += in_total_len;
1086         outbuf->n_add_for_cb += in_total_len;
1087
1088         evbuffer_invoke_callbacks_(inbuf);
1089         evbuffer_invoke_callbacks_(outbuf);
1090 done:
1091         EVBUFFER_UNLOCK2(inbuf, outbuf);
1092         return result;
1093 }
1094
1095 int
1096 evbuffer_drain(struct evbuffer *buf, size_t len)
1097 {
1098         struct evbuffer_chain *chain, *next;
1099         size_t remaining, old_len;
1100         int result = 0;
1101
1102         EVBUFFER_LOCK(buf);
1103         old_len = buf->total_len;
1104
1105         if (old_len == 0)
1106                 goto done;
1107
1108         if (buf->freeze_start) {
1109                 result = -1;
1110                 goto done;
1111         }
1112
1113         if (len >= old_len && !HAS_PINNED_R(buf)) {
1114                 len = old_len;
1115                 for (chain = buf->first; chain != NULL; chain = next) {
1116                         next = chain->next;
1117                         evbuffer_chain_free(chain);
1118                 }
1119
1120                 ZERO_CHAIN(buf);
1121         } else {
1122                 if (len >= old_len)
1123                         len = old_len;
1124
1125                 buf->total_len -= len;
1126                 remaining = len;
1127                 for (chain = buf->first;
1128                      remaining >= chain->off;
1129                      chain = next) {
1130                         next = chain->next;
1131                         remaining -= chain->off;
1132
1133                         if (chain == *buf->last_with_datap) {
1134                                 buf->last_with_datap = &buf->first;
1135                         }
1136                         if (&chain->next == buf->last_with_datap)
1137                                 buf->last_with_datap = &buf->first;
1138
1139                         if (CHAIN_PINNED_R(chain)) {
1140                                 EVUTIL_ASSERT(remaining == 0);
1141                                 chain->misalign += chain->off;
1142                                 chain->off = 0;
1143                                 break;
1144                         } else
1145                                 evbuffer_chain_free(chain);
1146                 }
1147
1148                 buf->first = chain;
1149                 EVUTIL_ASSERT(chain && remaining <= chain->off);
1150                 chain->misalign += remaining;
1151                 chain->off -= remaining;
1152         }
1153
1154         buf->n_del_for_cb += len;
1155         /* Tell someone about changes in this buffer */
1156         evbuffer_invoke_callbacks_(buf);
1157
1158 done:
1159         EVBUFFER_UNLOCK(buf);
1160         return result;
1161 }
1162
1163 /* Reads data from an event buffer and drains the bytes read */
1164 int
1165 evbuffer_remove(struct evbuffer *buf, void *data_out, size_t datlen)
1166 {
1167         ev_ssize_t n;
1168         EVBUFFER_LOCK(buf);
1169         n = evbuffer_copyout_from(buf, NULL, data_out, datlen);
1170         if (n > 0) {
1171                 if (evbuffer_drain(buf, n)<0)
1172                         n = -1;
1173         }
1174         EVBUFFER_UNLOCK(buf);
1175         return (int)n;
1176 }
1177
1178 ev_ssize_t
1179 evbuffer_copyout(struct evbuffer *buf, void *data_out, size_t datlen)
1180 {
1181         return evbuffer_copyout_from(buf, NULL, data_out, datlen);
1182 }
1183
1184 ev_ssize_t
1185 evbuffer_copyout_from(struct evbuffer *buf, const struct evbuffer_ptr *pos,
1186     void *data_out, size_t datlen)
1187 {
1188         /*XXX fails badly on sendfile case. */
1189         struct evbuffer_chain *chain;
1190         char *data = data_out;
1191         size_t nread;
1192         ev_ssize_t result = 0;
1193         size_t pos_in_chain;
1194
1195         EVBUFFER_LOCK(buf);
1196
1197         if (pos) {
1198                 if (datlen > (size_t)(EV_SSIZE_MAX - pos->pos)) {
1199                         result = -1;
1200                         goto done;
1201                 }
1202                 chain = pos->internal_.chain;
1203                 pos_in_chain = pos->internal_.pos_in_chain;
1204                 if (datlen + pos->pos > buf->total_len)
1205                         datlen = buf->total_len - pos->pos;
1206         } else {
1207                 chain = buf->first;
1208                 pos_in_chain = 0;
1209                 if (datlen > buf->total_len)
1210                         datlen = buf->total_len;
1211         }
1212
1213
1214         if (datlen == 0)
1215                 goto done;
1216
1217         if (buf->freeze_start) {
1218                 result = -1;
1219                 goto done;
1220         }
1221
1222         nread = datlen;
1223
1224         while (datlen && datlen >= chain->off - pos_in_chain) {
1225                 size_t copylen = chain->off - pos_in_chain;
1226                 memcpy(data,
1227                     chain->buffer + chain->misalign + pos_in_chain,
1228                     copylen);
1229                 data += copylen;
1230                 datlen -= copylen;
1231
1232                 chain = chain->next;
1233                 pos_in_chain = 0;
1234                 EVUTIL_ASSERT(chain || datlen==0);
1235         }
1236
1237         if (datlen) {
1238                 EVUTIL_ASSERT(chain);
1239                 EVUTIL_ASSERT(datlen+pos_in_chain <= chain->off);
1240
1241                 memcpy(data, chain->buffer + chain->misalign + pos_in_chain,
1242                     datlen);
1243         }
1244
1245         result = nread;
1246 done:
1247         EVBUFFER_UNLOCK(buf);
1248         return result;
1249 }
1250
1251 /* reads data from the src buffer to the dst buffer, avoids memcpy as
1252  * possible. */
1253 /*  XXXX should return ev_ssize_t */
1254 int
1255 evbuffer_remove_buffer(struct evbuffer *src, struct evbuffer *dst,
1256     size_t datlen)
1257 {
1258         /*XXX We should have an option to force this to be zero-copy.*/
1259
1260         /*XXX can fail badly on sendfile case. */
1261         struct evbuffer_chain *chain, *previous;
1262         size_t nread = 0;
1263         int result;
1264
1265         EVBUFFER_LOCK2(src, dst);
1266
1267         chain = previous = src->first;
1268
1269         if (datlen == 0 || dst == src) {
1270                 result = 0;
1271                 goto done;
1272         }
1273
1274         if (dst->freeze_end || src->freeze_start) {
1275                 result = -1;
1276                 goto done;
1277         }
1278
1279         /* short-cut if there is no more data buffered */
1280         if (datlen >= src->total_len) {
1281                 datlen = src->total_len;
1282                 evbuffer_add_buffer(dst, src);
1283                 result = (int)datlen; /*XXXX should return ev_ssize_t*/
1284                 goto done;
1285         }
1286
1287         /* removes chains if possible */
1288         while (chain->off <= datlen) {
1289                 /* We can't remove the last with data from src unless we
1290                  * remove all chains, in which case we would have done the if
1291                  * block above */
1292                 EVUTIL_ASSERT(chain != *src->last_with_datap);
1293                 nread += chain->off;
1294                 datlen -= chain->off;
1295                 previous = chain;
1296                 if (src->last_with_datap == &chain->next)
1297                         src->last_with_datap = &src->first;
1298                 chain = chain->next;
1299         }
1300
1301         if (nread) {
1302                 /* we can remove the chain */
1303                 struct evbuffer_chain **chp;
1304                 chp = evbuffer_free_trailing_empty_chains(dst);
1305
1306                 if (dst->first == NULL) {
1307                         dst->first = src->first;
1308                 } else {
1309                         *chp = src->first;
1310                 }
1311                 dst->last = previous;
1312                 previous->next = NULL;
1313                 src->first = chain;
1314                 advance_last_with_data(dst);
1315
1316                 dst->total_len += nread;
1317                 dst->n_add_for_cb += nread;
1318         }
1319
1320         /* we know that there is more data in the src buffer than
1321          * we want to read, so we manually drain the chain */
1322         evbuffer_add(dst, chain->buffer + chain->misalign, datlen);
1323         chain->misalign += datlen;
1324         chain->off -= datlen;
1325         nread += datlen;
1326
1327         /* You might think we would want to increment dst->n_add_for_cb
1328          * here too.  But evbuffer_add above already took care of that.
1329          */
1330         src->total_len -= nread;
1331         src->n_del_for_cb += nread;
1332
1333         if (nread) {
1334                 evbuffer_invoke_callbacks_(dst);
1335                 evbuffer_invoke_callbacks_(src);
1336         }
1337         result = (int)nread;/*XXXX should change return type */
1338
1339 done:
1340         EVBUFFER_UNLOCK2(src, dst);
1341         return result;
1342 }
1343
1344 unsigned char *
1345 evbuffer_pullup(struct evbuffer *buf, ev_ssize_t size)
1346 {
1347         struct evbuffer_chain *chain, *next, *tmp, *last_with_data;
1348         unsigned char *buffer, *result = NULL;
1349         ev_ssize_t remaining;
1350         int removed_last_with_data = 0;
1351         int removed_last_with_datap = 0;
1352
1353         EVBUFFER_LOCK(buf);
1354
1355         chain = buf->first;
1356
1357         if (size < 0)
1358                 size = buf->total_len;
1359         /* if size > buf->total_len, we cannot guarantee to the user that she
1360          * is going to have a long enough buffer afterwards; so we return
1361          * NULL */
1362         if (size == 0 || (size_t)size > buf->total_len)
1363                 goto done;
1364
1365         /* No need to pull up anything; the first size bytes are
1366          * already here. */
1367         if (chain->off >= (size_t)size) {
1368                 result = chain->buffer + chain->misalign;
1369                 goto done;
1370         }
1371
1372         /* Make sure that none of the chains we need to copy from is pinned. */
1373         remaining = size - chain->off;
1374         EVUTIL_ASSERT(remaining >= 0);
1375         for (tmp=chain->next; tmp; tmp=tmp->next) {
1376                 if (CHAIN_PINNED(tmp))
1377                         goto done;
1378                 if (tmp->off >= (size_t)remaining)
1379                         break;
1380                 remaining -= tmp->off;
1381         }
1382
1383         if (CHAIN_PINNED(chain)) {
1384                 size_t old_off = chain->off;
1385                 if (CHAIN_SPACE_LEN(chain) < size - chain->off) {
1386                         /* not enough room at end of chunk. */
1387                         goto done;
1388                 }
1389                 buffer = CHAIN_SPACE_PTR(chain);
1390                 tmp = chain;
1391                 tmp->off = size;
1392                 size -= old_off;
1393                 chain = chain->next;
1394         } else if (chain->buffer_len - chain->misalign >= (size_t)size) {
1395                 /* already have enough space in the first chain */
1396                 size_t old_off = chain->off;
1397                 buffer = chain->buffer + chain->misalign + chain->off;
1398                 tmp = chain;
1399                 tmp->off = size;
1400                 size -= old_off;
1401                 chain = chain->next;
1402         } else {
1403                 if ((tmp = evbuffer_chain_new(size)) == NULL) {
1404                         event_warn("%s: out of memory", __func__);
1405                         goto done;
1406                 }
1407                 buffer = tmp->buffer;
1408                 tmp->off = size;
1409                 buf->first = tmp;
1410         }
1411
1412         /* TODO(niels): deal with buffers that point to NULL like sendfile */
1413
1414         /* Copy and free every chunk that will be entirely pulled into tmp */
1415         last_with_data = *buf->last_with_datap;
1416         for (; chain != NULL && (size_t)size >= chain->off; chain = next) {
1417                 next = chain->next;
1418
1419                 memcpy(buffer, chain->buffer + chain->misalign, chain->off);
1420                 size -= chain->off;
1421                 buffer += chain->off;
1422                 if (chain == last_with_data)
1423                         removed_last_with_data = 1;
1424                 if (&chain->next == buf->last_with_datap)
1425                         removed_last_with_datap = 1;
1426
1427                 evbuffer_chain_free(chain);
1428         }
1429
1430         if (chain != NULL) {
1431                 memcpy(buffer, chain->buffer + chain->misalign, size);
1432                 chain->misalign += size;
1433                 chain->off -= size;
1434         } else {
1435                 buf->last = tmp;
1436         }
1437
1438         tmp->next = chain;
1439
1440         if (removed_last_with_data) {
1441                 buf->last_with_datap = &buf->first;
1442         } else if (removed_last_with_datap) {
1443                 if (buf->first->next && buf->first->next->off)
1444                         buf->last_with_datap = &buf->first->next;
1445                 else
1446                         buf->last_with_datap = &buf->first;
1447         }
1448
1449         result = (tmp->buffer + tmp->misalign);
1450
1451 done:
1452         EVBUFFER_UNLOCK(buf);
1453         return result;
1454 }
1455
1456 /*
1457  * Reads a line terminated by either '\r\n', '\n\r' or '\r' or '\n'.
1458  * The returned buffer needs to be freed by the called.
1459  */
1460 char *
1461 evbuffer_readline(struct evbuffer *buffer)
1462 {
1463         return evbuffer_readln(buffer, NULL, EVBUFFER_EOL_ANY);
1464 }
1465
1466 static inline ev_ssize_t
1467 evbuffer_strchr(struct evbuffer_ptr *it, const char chr)
1468 {
1469         struct evbuffer_chain *chain = it->internal_.chain;
1470         size_t i = it->internal_.pos_in_chain;
1471         while (chain != NULL) {
1472                 char *buffer = (char *)chain->buffer + chain->misalign;
1473                 char *cp = memchr(buffer+i, chr, chain->off-i);
1474                 if (cp) {
1475                         it->internal_.chain = chain;
1476                         it->internal_.pos_in_chain = cp - buffer;
1477                         it->pos += (cp - buffer - i);
1478                         return it->pos;
1479                 }
1480                 it->pos += chain->off - i;
1481                 i = 0;
1482                 chain = chain->next;
1483         }
1484
1485         return (-1);
1486 }
1487
1488 static inline char *
1489 find_eol_char(char *s, size_t len)
1490 {
1491 #define CHUNK_SZ 128
1492         /* Lots of benchmarking found this approach to be faster in practice
1493          * than doing two memchrs over the whole buffer, doin a memchr on each
1494          * char of the buffer, or trying to emulate memchr by hand. */
1495         char *s_end, *cr, *lf;
1496         s_end = s+len;
1497         while (s < s_end) {
1498                 size_t chunk = (s + CHUNK_SZ < s_end) ? CHUNK_SZ : (s_end - s);
1499                 cr = memchr(s, '\r', chunk);
1500                 lf = memchr(s, '\n', chunk);
1501                 if (cr) {
1502                         if (lf && lf < cr)
1503                                 return lf;
1504                         return cr;
1505                 } else if (lf) {
1506                         return lf;
1507                 }
1508                 s += CHUNK_SZ;
1509         }
1510
1511         return NULL;
1512 #undef CHUNK_SZ
1513 }
1514
1515 static ev_ssize_t
1516 evbuffer_find_eol_char(struct evbuffer_ptr *it)
1517 {
1518         struct evbuffer_chain *chain = it->internal_.chain;
1519         size_t i = it->internal_.pos_in_chain;
1520         while (chain != NULL) {
1521                 char *buffer = (char *)chain->buffer + chain->misalign;
1522                 char *cp = find_eol_char(buffer+i, chain->off-i);
1523                 if (cp) {
1524                         it->internal_.chain = chain;
1525                         it->internal_.pos_in_chain = cp - buffer;
1526                         it->pos += (cp - buffer) - i;
1527                         return it->pos;
1528                 }
1529                 it->pos += chain->off - i;
1530                 i = 0;
1531                 chain = chain->next;
1532         }
1533
1534         return (-1);
1535 }
1536
1537 static inline int
1538 evbuffer_strspn(
1539         struct evbuffer_ptr *ptr, const char *chrset)
1540 {
1541         int count = 0;
1542         struct evbuffer_chain *chain = ptr->internal_.chain;
1543         size_t i = ptr->internal_.pos_in_chain;
1544
1545         if (!chain)
1546                 return 0;
1547
1548         while (1) {
1549                 char *buffer = (char *)chain->buffer + chain->misalign;
1550                 for (; i < chain->off; ++i) {
1551                         const char *p = chrset;
1552                         while (*p) {
1553                                 if (buffer[i] == *p++)
1554                                         goto next;
1555                         }
1556                         ptr->internal_.chain = chain;
1557                         ptr->internal_.pos_in_chain = i;
1558                         ptr->pos += count;
1559                         return count;
1560                 next:
1561                         ++count;
1562                 }
1563                 i = 0;
1564
1565                 if (! chain->next) {
1566                         ptr->internal_.chain = chain;
1567                         ptr->internal_.pos_in_chain = i;
1568                         ptr->pos += count;
1569                         return count;
1570                 }
1571
1572                 chain = chain->next;
1573         }
1574 }
1575
1576
1577 static inline int
1578 evbuffer_getchr(struct evbuffer_ptr *it)
1579 {
1580         struct evbuffer_chain *chain = it->internal_.chain;
1581         size_t off = it->internal_.pos_in_chain;
1582
1583         if (chain == NULL)
1584                 return -1;
1585
1586         return (unsigned char)chain->buffer[chain->misalign + off];
1587 }
1588
1589 struct evbuffer_ptr
1590 evbuffer_search_eol(struct evbuffer *buffer,
1591     struct evbuffer_ptr *start, size_t *eol_len_out,
1592     enum evbuffer_eol_style eol_style)
1593 {
1594         struct evbuffer_ptr it, it2;
1595         size_t extra_drain = 0;
1596         int ok = 0;
1597
1598         /* Avoid locking in trivial edge cases */
1599         if (start && start->internal_.chain == NULL) {
1600                 PTR_NOT_FOUND(&it);
1601                 if (eol_len_out)
1602                         *eol_len_out = extra_drain;
1603                 return it;
1604         }
1605
1606         EVBUFFER_LOCK(buffer);
1607
1608         if (start) {
1609                 memcpy(&it, start, sizeof(it));
1610         } else {
1611                 it.pos = 0;
1612                 it.internal_.chain = buffer->first;
1613                 it.internal_.pos_in_chain = 0;
1614         }
1615
1616         /* the eol_style determines our first stop character and how many
1617          * characters we are going to drain afterwards. */
1618         switch (eol_style) {
1619         case EVBUFFER_EOL_ANY:
1620                 if (evbuffer_find_eol_char(&it) < 0)
1621                         goto done;
1622                 memcpy(&it2, &it, sizeof(it));
1623                 extra_drain = evbuffer_strspn(&it2, "\r\n");
1624                 break;
1625         case EVBUFFER_EOL_CRLF_STRICT: {
1626                 it = evbuffer_search(buffer, "\r\n", 2, &it);
1627                 if (it.pos < 0)
1628                         goto done;
1629                 extra_drain = 2;
1630                 break;
1631         }
1632         case EVBUFFER_EOL_CRLF: {
1633                 ev_ssize_t start_pos = it.pos;
1634                 /* Look for a LF ... */
1635                 if (evbuffer_strchr(&it, '\n') < 0)
1636                         goto done;
1637                 extra_drain = 1;
1638                 /* ... optionally preceeded by a CR. */
1639                 if (it.pos == start_pos)
1640                         break; /* If the first character is \n, don't back up */
1641                 /* This potentially does an extra linear walk over the first
1642                  * few chains.  Probably, that's not too expensive unless you
1643                  * have a really pathological setup. */
1644                 memcpy(&it2, &it, sizeof(it));
1645                 if (evbuffer_ptr_subtract(buffer, &it2, 1)<0)
1646                         break;
1647                 if (evbuffer_getchr(&it2) == '\r') {
1648                         memcpy(&it, &it2, sizeof(it));
1649                         extra_drain = 2;
1650                 }
1651                 break;
1652         }
1653         case EVBUFFER_EOL_LF:
1654                 if (evbuffer_strchr(&it, '\n') < 0)
1655                         goto done;
1656                 extra_drain = 1;
1657                 break;
1658         case EVBUFFER_EOL_NUL:
1659                 if (evbuffer_strchr(&it, '\0') < 0)
1660                         goto done;
1661                 extra_drain = 1;
1662                 break;
1663         default:
1664                 goto done;
1665         }
1666
1667         ok = 1;
1668 done:
1669         EVBUFFER_UNLOCK(buffer);
1670
1671         if (!ok)
1672                 PTR_NOT_FOUND(&it);
1673         if (eol_len_out)
1674                 *eol_len_out = extra_drain;
1675
1676         return it;
1677 }
1678
1679 char *
1680 evbuffer_readln(struct evbuffer *buffer, size_t *n_read_out,
1681                 enum evbuffer_eol_style eol_style)
1682 {
1683         struct evbuffer_ptr it;
1684         char *line;
1685         size_t n_to_copy=0, extra_drain=0;
1686         char *result = NULL;
1687
1688         EVBUFFER_LOCK(buffer);
1689
1690         if (buffer->freeze_start) {
1691                 goto done;
1692         }
1693
1694         it = evbuffer_search_eol(buffer, NULL, &extra_drain, eol_style);
1695         if (it.pos < 0)
1696                 goto done;
1697         n_to_copy = it.pos;
1698
1699         if ((line = mm_malloc(n_to_copy+1)) == NULL) {
1700                 event_warn("%s: out of memory", __func__);
1701                 goto done;
1702         }
1703
1704         evbuffer_remove(buffer, line, n_to_copy);
1705         line[n_to_copy] = '\0';
1706
1707         evbuffer_drain(buffer, extra_drain);
1708         result = line;
1709 done:
1710         EVBUFFER_UNLOCK(buffer);
1711
1712         if (n_read_out)
1713                 *n_read_out = result ? n_to_copy : 0;
1714
1715         return result;
1716 }
1717
1718 #define EVBUFFER_CHAIN_MAX_AUTO_SIZE 4096
1719
1720 /* Adds data to an event buffer */
1721
1722 int
1723 evbuffer_add(struct evbuffer *buf, const void *data_in, size_t datlen)
1724 {
1725         struct evbuffer_chain *chain, *tmp;
1726         const unsigned char *data = data_in;
1727         size_t remain, to_alloc;
1728         int result = -1;
1729
1730         EVBUFFER_LOCK(buf);
1731
1732         if (buf->freeze_end) {
1733                 goto done;
1734         }
1735         /* Prevent buf->total_len overflow */
1736         if (datlen > EV_SIZE_MAX - buf->total_len) {
1737                 goto done;
1738         }
1739
1740         if (*buf->last_with_datap == NULL) {
1741                 chain = buf->last;
1742         } else {
1743                 chain = *buf->last_with_datap;
1744         }
1745
1746         /* If there are no chains allocated for this buffer, allocate one
1747          * big enough to hold all the data. */
1748         if (chain == NULL) {
1749                 chain = evbuffer_chain_new(datlen);
1750                 if (!chain)
1751                         goto done;
1752                 evbuffer_chain_insert(buf, chain);
1753         }
1754
1755         if ((chain->flags & EVBUFFER_IMMUTABLE) == 0) {
1756                 /* Always true for mutable buffers */
1757                 EVUTIL_ASSERT(chain->misalign >= 0 &&
1758                     (ev_uint64_t)chain->misalign <= EVBUFFER_CHAIN_MAX);
1759                 remain = chain->buffer_len - (size_t)chain->misalign - chain->off;
1760                 if (remain >= datlen) {
1761                         /* there's enough space to hold all the data in the
1762                          * current last chain */
1763                         memcpy(chain->buffer + chain->misalign + chain->off,
1764                             data, datlen);
1765                         chain->off += datlen;
1766                         buf->total_len += datlen;
1767                         buf->n_add_for_cb += datlen;
1768                         goto out;
1769                 } else if (!CHAIN_PINNED(chain) &&
1770                     evbuffer_chain_should_realign(chain, datlen)) {
1771                         /* we can fit the data into the misalignment */
1772                         evbuffer_chain_align(chain);
1773
1774                         memcpy(chain->buffer + chain->off, data, datlen);
1775                         chain->off += datlen;
1776                         buf->total_len += datlen;
1777                         buf->n_add_for_cb += datlen;
1778                         goto out;
1779                 }
1780         } else {
1781                 /* we cannot write any data to the last chain */
1782                 remain = 0;
1783         }
1784
1785         /* we need to add another chain */
1786         to_alloc = chain->buffer_len;
1787         if (to_alloc <= EVBUFFER_CHAIN_MAX_AUTO_SIZE/2)
1788                 to_alloc <<= 1;
1789         if (datlen > to_alloc)
1790                 to_alloc = datlen;
1791         tmp = evbuffer_chain_new(to_alloc);
1792         if (tmp == NULL)
1793                 goto done;
1794
1795         if (remain) {
1796                 memcpy(chain->buffer + chain->misalign + chain->off,
1797                     data, remain);
1798                 chain->off += remain;
1799                 buf->total_len += remain;
1800                 buf->n_add_for_cb += remain;
1801         }
1802
1803         data += remain;
1804         datlen -= remain;
1805
1806         memcpy(tmp->buffer, data, datlen);
1807         tmp->off = datlen;
1808         evbuffer_chain_insert(buf, tmp);
1809         buf->n_add_for_cb += datlen;
1810
1811 out:
1812         evbuffer_invoke_callbacks_(buf);
1813         result = 0;
1814 done:
1815         EVBUFFER_UNLOCK(buf);
1816         return result;
1817 }
1818
1819 int
1820 evbuffer_prepend(struct evbuffer *buf, const void *data, size_t datlen)
1821 {
1822         struct evbuffer_chain *chain, *tmp;
1823         int result = -1;
1824
1825         EVBUFFER_LOCK(buf);
1826
1827         if (buf->freeze_start) {
1828                 goto done;
1829         }
1830         if (datlen > EV_SIZE_MAX - buf->total_len) {
1831                 goto done;
1832         }
1833
1834         chain = buf->first;
1835
1836         if (chain == NULL) {
1837                 chain = evbuffer_chain_new(datlen);
1838                 if (!chain)
1839                         goto done;
1840                 evbuffer_chain_insert(buf, chain);
1841         }
1842
1843         /* we cannot touch immutable buffers */
1844         if ((chain->flags & EVBUFFER_IMMUTABLE) == 0) {
1845                 /* Always true for mutable buffers */
1846                 EVUTIL_ASSERT(chain->misalign >= 0 &&
1847                     (ev_uint64_t)chain->misalign <= EVBUFFER_CHAIN_MAX);
1848
1849                 /* If this chain is empty, we can treat it as
1850                  * 'empty at the beginning' rather than 'empty at the end' */
1851                 if (chain->off == 0)
1852                         chain->misalign = chain->buffer_len;
1853
1854                 if ((size_t)chain->misalign >= datlen) {
1855                         /* we have enough space to fit everything */
1856                         memcpy(chain->buffer + chain->misalign - datlen,
1857                             data, datlen);
1858                         chain->off += datlen;
1859                         chain->misalign -= datlen;
1860                         buf->total_len += datlen;
1861                         buf->n_add_for_cb += datlen;
1862                         goto out;
1863                 } else if (chain->misalign) {
1864                         /* we can only fit some of the data. */
1865                         memcpy(chain->buffer,
1866                             (char*)data + datlen - chain->misalign,
1867                             (size_t)chain->misalign);
1868                         chain->off += (size_t)chain->misalign;
1869                         buf->total_len += (size_t)chain->misalign;
1870                         buf->n_add_for_cb += (size_t)chain->misalign;
1871                         datlen -= (size_t)chain->misalign;
1872                         chain->misalign = 0;
1873                 }
1874         }
1875
1876         /* we need to add another chain */
1877         if ((tmp = evbuffer_chain_new(datlen)) == NULL)
1878                 goto done;
1879         buf->first = tmp;
1880         if (buf->last_with_datap == &buf->first)
1881                 buf->last_with_datap = &tmp->next;
1882
1883         tmp->next = chain;
1884
1885         tmp->off = datlen;
1886         EVUTIL_ASSERT(datlen <= tmp->buffer_len);
1887         tmp->misalign = tmp->buffer_len - datlen;
1888
1889         memcpy(tmp->buffer + tmp->misalign, data, datlen);
1890         buf->total_len += datlen;
1891         buf->n_add_for_cb += datlen;
1892
1893 out:
1894         evbuffer_invoke_callbacks_(buf);
1895         result = 0;
1896 done:
1897         EVBUFFER_UNLOCK(buf);
1898         return result;
1899 }
1900
1901 /** Helper: realigns the memory in chain->buffer so that misalign is 0. */
1902 static void
1903 evbuffer_chain_align(struct evbuffer_chain *chain)
1904 {
1905         EVUTIL_ASSERT(!(chain->flags & EVBUFFER_IMMUTABLE));
1906         EVUTIL_ASSERT(!(chain->flags & EVBUFFER_MEM_PINNED_ANY));
1907         memmove(chain->buffer, chain->buffer + chain->misalign, chain->off);
1908         chain->misalign = 0;
1909 }
1910
1911 #define MAX_TO_COPY_IN_EXPAND 4096
1912 #define MAX_TO_REALIGN_IN_EXPAND 2048
1913
1914 /** Helper: return true iff we should realign chain to fit datalen bytes of
1915     data in it. */
1916 static int
1917 evbuffer_chain_should_realign(struct evbuffer_chain *chain,
1918     size_t datlen)
1919 {
1920         return chain->buffer_len - chain->off >= datlen &&
1921             (chain->off < chain->buffer_len / 2) &&
1922             (chain->off <= MAX_TO_REALIGN_IN_EXPAND);
1923 }
1924
1925 /* Expands the available space in the event buffer to at least datlen, all in
1926  * a single chunk.  Return that chunk. */
1927 static struct evbuffer_chain *
1928 evbuffer_expand_singlechain(struct evbuffer *buf, size_t datlen)
1929 {
1930         struct evbuffer_chain *chain, **chainp;
1931         struct evbuffer_chain *result = NULL;
1932         ASSERT_EVBUFFER_LOCKED(buf);
1933
1934         chainp = buf->last_with_datap;
1935
1936         /* XXX If *chainp is no longer writeable, but has enough space in its
1937          * misalign, this might be a bad idea: we could still use *chainp, not
1938          * (*chainp)->next. */
1939         if (*chainp && CHAIN_SPACE_LEN(*chainp) == 0)
1940                 chainp = &(*chainp)->next;
1941
1942         /* 'chain' now points to the first chain with writable space (if any)
1943          * We will either use it, realign it, replace it, or resize it. */
1944         chain = *chainp;
1945
1946         if (chain == NULL ||
1947             (chain->flags & (EVBUFFER_IMMUTABLE|EVBUFFER_MEM_PINNED_ANY))) {
1948                 /* We can't use the last_with_data chain at all.  Just add a
1949                  * new one that's big enough. */
1950                 goto insert_new;
1951         }
1952
1953         /* If we can fit all the data, then we don't have to do anything */
1954         if (CHAIN_SPACE_LEN(chain) >= datlen) {
1955                 result = chain;
1956                 goto ok;
1957         }
1958
1959         /* If the chain is completely empty, just replace it by adding a new
1960          * empty chain. */
1961         if (chain->off == 0) {
1962                 goto insert_new;
1963         }
1964
1965         /* If the misalignment plus the remaining space fulfills our data
1966          * needs, we could just force an alignment to happen.  Afterwards, we
1967          * have enough space.  But only do this if we're saving a lot of space
1968          * and not moving too much data.  Otherwise the space savings are
1969          * probably offset by the time lost in copying.
1970          */
1971         if (evbuffer_chain_should_realign(chain, datlen)) {
1972                 evbuffer_chain_align(chain);
1973                 result = chain;
1974                 goto ok;
1975         }
1976
1977         /* At this point, we can either resize the last chunk with space in
1978          * it, use the next chunk after it, or   If we add a new chunk, we waste
1979          * CHAIN_SPACE_LEN(chain) bytes in the former last chunk.  If we
1980          * resize, we have to copy chain->off bytes.
1981          */
1982
1983         /* Would expanding this chunk be affordable and worthwhile? */
1984         if (CHAIN_SPACE_LEN(chain) < chain->buffer_len / 8 ||
1985             chain->off > MAX_TO_COPY_IN_EXPAND ||
1986                 datlen >= (EVBUFFER_CHAIN_MAX - chain->off)) {
1987                 /* It's not worth resizing this chain. Can the next one be
1988                  * used? */
1989                 if (chain->next && CHAIN_SPACE_LEN(chain->next) >= datlen) {
1990                         /* Yes, we can just use the next chain (which should
1991                          * be empty. */
1992                         result = chain->next;
1993                         goto ok;
1994                 } else {
1995                         /* No; append a new chain (which will free all
1996                          * terminal empty chains.) */
1997                         goto insert_new;
1998                 }
1999         } else {
2000                 /* Okay, we're going to try to resize this chain: Not doing so
2001                  * would waste at least 1/8 of its current allocation, and we
2002                  * can do so without having to copy more than
2003                  * MAX_TO_COPY_IN_EXPAND bytes. */
2004                 /* figure out how much space we need */
2005                 size_t length = chain->off + datlen;
2006                 struct evbuffer_chain *tmp = evbuffer_chain_new(length);
2007                 if (tmp == NULL)
2008                         goto err;
2009
2010                 /* copy the data over that we had so far */
2011                 tmp->off = chain->off;
2012                 memcpy(tmp->buffer, chain->buffer + chain->misalign,
2013                     chain->off);
2014                 /* fix up the list */
2015                 EVUTIL_ASSERT(*chainp == chain);
2016                 result = *chainp = tmp;
2017
2018                 if (buf->last == chain)
2019                         buf->last = tmp;
2020
2021                 tmp->next = chain->next;
2022                 evbuffer_chain_free(chain);
2023                 goto ok;
2024         }
2025
2026 insert_new:
2027         result = evbuffer_chain_insert_new(buf, datlen);
2028         if (!result)
2029                 goto err;
2030 ok:
2031         EVUTIL_ASSERT(result);
2032         EVUTIL_ASSERT(CHAIN_SPACE_LEN(result) >= datlen);
2033 err:
2034         return result;
2035 }
2036
2037 /* Make sure that datlen bytes are available for writing in the last n
2038  * chains.  Never copies or moves data. */
2039 int
2040 evbuffer_expand_fast_(struct evbuffer *buf, size_t datlen, int n)
2041 {
2042         struct evbuffer_chain *chain = buf->last, *tmp, *next;
2043         size_t avail;
2044         int used;
2045
2046         ASSERT_EVBUFFER_LOCKED(buf);
2047         EVUTIL_ASSERT(n >= 2);
2048
2049         if (chain == NULL || (chain->flags & EVBUFFER_IMMUTABLE)) {
2050                 /* There is no last chunk, or we can't touch the last chunk.
2051                  * Just add a new chunk. */
2052                 chain = evbuffer_chain_new(datlen);
2053                 if (chain == NULL)
2054                         return (-1);
2055
2056                 evbuffer_chain_insert(buf, chain);
2057                 return (0);
2058         }
2059
2060         used = 0; /* number of chains we're using space in. */
2061         avail = 0; /* how much space they have. */
2062         /* How many bytes can we stick at the end of buffer as it is?  Iterate
2063          * over the chains at the end of the buffer, tring to see how much
2064          * space we have in the first n. */
2065         for (chain = *buf->last_with_datap; chain; chain = chain->next) {
2066                 if (chain->off) {
2067                         size_t space = (size_t) CHAIN_SPACE_LEN(chain);
2068                         EVUTIL_ASSERT(chain == *buf->last_with_datap);
2069                         if (space) {
2070                                 avail += space;
2071                                 ++used;
2072                         }
2073                 } else {
2074                         /* No data in chain; realign it. */
2075                         chain->misalign = 0;
2076                         avail += chain->buffer_len;
2077                         ++used;
2078                 }
2079                 if (avail >= datlen) {
2080                         /* There is already enough space.  Just return */
2081                         return (0);
2082                 }
2083                 if (used == n)
2084                         break;
2085         }
2086
2087         /* There wasn't enough space in the first n chains with space in
2088          * them. Either add a new chain with enough space, or replace all
2089          * empty chains with one that has enough space, depending on n. */
2090         if (used < n) {
2091                 /* The loop ran off the end of the chains before it hit n
2092                  * chains; we can add another. */
2093                 EVUTIL_ASSERT(chain == NULL);
2094
2095                 tmp = evbuffer_chain_new(datlen - avail);
2096                 if (tmp == NULL)
2097                         return (-1);
2098
2099                 buf->last->next = tmp;
2100                 buf->last = tmp;
2101                 /* (we would only set last_with_data if we added the first
2102                  * chain. But if the buffer had no chains, we would have
2103                  * just allocated a new chain earlier) */
2104                 return (0);
2105         } else {
2106                 /* Nuke _all_ the empty chains. */
2107                 int rmv_all = 0; /* True iff we removed last_with_data. */
2108                 chain = *buf->last_with_datap;
2109                 if (!chain->off) {
2110                         EVUTIL_ASSERT(chain == buf->first);
2111                         rmv_all = 1;
2112                         avail = 0;
2113                 } else {
2114                         /* can't overflow, since only mutable chains have
2115                          * huge misaligns. */
2116                         avail = (size_t) CHAIN_SPACE_LEN(chain);
2117                         chain = chain->next;
2118                 }
2119
2120
2121                 for (; chain; chain = next) {
2122                         next = chain->next;
2123                         EVUTIL_ASSERT(chain->off == 0);
2124                         evbuffer_chain_free(chain);
2125                 }
2126                 EVUTIL_ASSERT(datlen >= avail);
2127                 tmp = evbuffer_chain_new(datlen - avail);
2128                 if (tmp == NULL) {
2129                         if (rmv_all) {
2130                                 ZERO_CHAIN(buf);
2131                         } else {
2132                                 buf->last = *buf->last_with_datap;
2133                                 (*buf->last_with_datap)->next = NULL;
2134                         }
2135                         return (-1);
2136                 }
2137
2138                 if (rmv_all) {
2139                         buf->first = buf->last = tmp;
2140                         buf->last_with_datap = &buf->first;
2141                 } else {
2142                         (*buf->last_with_datap)->next = tmp;
2143                         buf->last = tmp;
2144                 }
2145                 return (0);
2146         }
2147 }
2148
2149 int
2150 evbuffer_expand(struct evbuffer *buf, size_t datlen)
2151 {
2152         struct evbuffer_chain *chain;
2153
2154         EVBUFFER_LOCK(buf);
2155         chain = evbuffer_expand_singlechain(buf, datlen);
2156         EVBUFFER_UNLOCK(buf);
2157         return chain ? 0 : -1;
2158 }
2159
2160 /*
2161  * Reads data from a file descriptor into a buffer.
2162  */
2163
2164 #if defined(EVENT__HAVE_SYS_UIO_H) || defined(_WIN32)
2165 #define USE_IOVEC_IMPL
2166 #endif
2167
2168 #ifdef USE_IOVEC_IMPL
2169
2170 #ifdef EVENT__HAVE_SYS_UIO_H
2171 /* number of iovec we use for writev, fragmentation is going to determine
2172  * how much we end up writing */
2173
2174 #define DEFAULT_WRITE_IOVEC 128
2175
2176 #if defined(UIO_MAXIOV) && UIO_MAXIOV < DEFAULT_WRITE_IOVEC
2177 #define NUM_WRITE_IOVEC UIO_MAXIOV
2178 #elif defined(IOV_MAX) && IOV_MAX < DEFAULT_WRITE_IOVEC
2179 #define NUM_WRITE_IOVEC IOV_MAX
2180 #else
2181 #define NUM_WRITE_IOVEC DEFAULT_WRITE_IOVEC
2182 #endif
2183
2184 #define IOV_TYPE struct iovec
2185 #define IOV_PTR_FIELD iov_base
2186 #define IOV_LEN_FIELD iov_len
2187 #define IOV_LEN_TYPE size_t
2188 #else
2189 #define NUM_WRITE_IOVEC 16
2190 #define IOV_TYPE WSABUF
2191 #define IOV_PTR_FIELD buf
2192 #define IOV_LEN_FIELD len
2193 #define IOV_LEN_TYPE unsigned long
2194 #endif
2195 #endif
2196 #define NUM_READ_IOVEC 4
2197
2198 #define EVBUFFER_MAX_READ       4096
2199
2200 /** Helper function to figure out which space to use for reading data into
2201     an evbuffer.  Internal use only.
2202
2203     @param buf The buffer to read into
2204     @param howmuch How much we want to read.
2205     @param vecs An array of two or more iovecs or WSABUFs.
2206     @param n_vecs_avail The length of vecs
2207     @param chainp A pointer to a variable to hold the first chain we're
2208       reading into.
2209     @param exact Boolean: if true, we do not provide more than 'howmuch'
2210       space in the vectors, even if more space is available.
2211     @return The number of buffers we're using.
2212  */
2213 int
2214 evbuffer_read_setup_vecs_(struct evbuffer *buf, ev_ssize_t howmuch,
2215     struct evbuffer_iovec *vecs, int n_vecs_avail,
2216     struct evbuffer_chain ***chainp, int exact)
2217 {
2218         struct evbuffer_chain *chain;
2219         struct evbuffer_chain **firstchainp;
2220         size_t so_far;
2221         int i;
2222         ASSERT_EVBUFFER_LOCKED(buf);
2223
2224         if (howmuch < 0)
2225                 return -1;
2226
2227         so_far = 0;
2228         /* Let firstchain be the first chain with any space on it */
2229         firstchainp = buf->last_with_datap;
2230         if (CHAIN_SPACE_LEN(*firstchainp) == 0) {
2231                 firstchainp = &(*firstchainp)->next;
2232         }
2233
2234         chain = *firstchainp;
2235         for (i = 0; i < n_vecs_avail && so_far < (size_t)howmuch; ++i) {
2236                 size_t avail = (size_t) CHAIN_SPACE_LEN(chain);
2237                 if (avail > (howmuch - so_far) && exact)
2238                         avail = howmuch - so_far;
2239                 vecs[i].iov_base = (void *)CHAIN_SPACE_PTR(chain);
2240                 vecs[i].iov_len = avail;
2241                 so_far += avail;
2242                 chain = chain->next;
2243         }
2244
2245         *chainp = firstchainp;
2246         return i;
2247 }
2248
2249 static int
2250 get_n_bytes_readable_on_socket(evutil_socket_t fd)
2251 {
2252 #if defined(FIONREAD) && defined(_WIN32)
2253         unsigned long lng = EVBUFFER_MAX_READ;
2254         if (ioctlsocket(fd, FIONREAD, &lng) < 0)
2255                 return -1;
2256         /* Can overflow, but mostly harmlessly. XXXX */
2257         return (int)lng;
2258 #elif defined(FIONREAD)
2259         int n = EVBUFFER_MAX_READ;
2260         if (ioctl(fd, FIONREAD, &n) < 0)
2261                 return -1;
2262         return n;
2263 #else
2264         return EVBUFFER_MAX_READ;
2265 #endif
2266 }
2267
2268 /* TODO(niels): should this function return ev_ssize_t and take ev_ssize_t
2269  * as howmuch? */
2270 int
2271 evbuffer_read(struct evbuffer *buf, evutil_socket_t fd, int howmuch)
2272 {
2273         struct evbuffer_chain **chainp;
2274         int n;
2275         int result;
2276
2277 #ifdef USE_IOVEC_IMPL
2278         int nvecs, i, remaining;
2279 #else
2280         struct evbuffer_chain *chain;
2281         unsigned char *p;
2282 #endif
2283
2284         EVBUFFER_LOCK(buf);
2285
2286         if (buf->freeze_end) {
2287                 result = -1;
2288                 goto done;
2289         }
2290
2291         n = get_n_bytes_readable_on_socket(fd);
2292         if (n <= 0 || n > EVBUFFER_MAX_READ)
2293                 n = EVBUFFER_MAX_READ;
2294         if (howmuch < 0 || howmuch > n)
2295                 howmuch = n;
2296
2297 #ifdef USE_IOVEC_IMPL
2298         /* Since we can use iovecs, we're willing to use the last
2299          * NUM_READ_IOVEC chains. */
2300         if (evbuffer_expand_fast_(buf, howmuch, NUM_READ_IOVEC) == -1) {
2301                 result = -1;
2302                 goto done;
2303         } else {
2304                 IOV_TYPE vecs[NUM_READ_IOVEC];
2305 #ifdef EVBUFFER_IOVEC_IS_NATIVE_
2306                 nvecs = evbuffer_read_setup_vecs_(buf, howmuch, vecs,
2307                     NUM_READ_IOVEC, &chainp, 1);
2308 #else
2309                 /* We aren't using the native struct iovec.  Therefore,
2310                    we are on win32. */
2311                 struct evbuffer_iovec ev_vecs[NUM_READ_IOVEC];
2312                 nvecs = evbuffer_read_setup_vecs_(buf, howmuch, ev_vecs, 2,
2313                     &chainp, 1);
2314
2315                 for (i=0; i < nvecs; ++i)
2316                         WSABUF_FROM_EVBUFFER_IOV(&vecs[i], &ev_vecs[i]);
2317 #endif
2318
2319 #ifdef _WIN32
2320                 {
2321                         DWORD bytesRead;
2322                         DWORD flags=0;
2323                         if (WSARecv(fd, vecs, nvecs, &bytesRead, &flags, NULL, NULL)) {
2324                                 /* The read failed. It might be a close,
2325                                  * or it might be an error. */
2326                                 if (WSAGetLastError() == WSAECONNABORTED)
2327                                         n = 0;
2328                                 else
2329                                         n = -1;
2330                         } else
2331                                 n = bytesRead;
2332                 }
2333 #else
2334                 n = readv(fd, vecs, nvecs);
2335 #endif
2336         }
2337
2338 #else /*!USE_IOVEC_IMPL*/
2339         /* If we don't have FIONREAD, we might waste some space here */
2340         /* XXX we _will_ waste some space here if there is any space left
2341          * over on buf->last. */
2342         if ((chain = evbuffer_expand_singlechain(buf, howmuch)) == NULL) {
2343                 result = -1;
2344                 goto done;
2345         }
2346
2347         /* We can append new data at this point */
2348         p = chain->buffer + chain->misalign + chain->off;
2349
2350 #ifndef _WIN32
2351         n = read(fd, p, howmuch);
2352 #else
2353         n = recv(fd, p, howmuch, 0);
2354 #endif
2355 #endif /* USE_IOVEC_IMPL */
2356
2357         if (n == -1) {
2358                 result = -1;
2359                 goto done;
2360         }
2361         if (n == 0) {
2362                 result = 0;
2363                 goto done;
2364         }
2365
2366 #ifdef USE_IOVEC_IMPL
2367         remaining = n;
2368         for (i=0; i < nvecs; ++i) {
2369                 /* can't overflow, since only mutable chains have
2370                  * huge misaligns. */
2371                 size_t space = (size_t) CHAIN_SPACE_LEN(*chainp);
2372                 /* XXXX This is a kludge that can waste space in perverse
2373                  * situations. */
2374                 if (space > EVBUFFER_CHAIN_MAX)
2375                         space = EVBUFFER_CHAIN_MAX;
2376                 if ((ev_ssize_t)space < remaining) {
2377                         (*chainp)->off += space;
2378                         remaining -= (int)space;
2379                 } else {
2380                         (*chainp)->off += remaining;
2381                         buf->last_with_datap = chainp;
2382                         break;
2383                 }
2384                 chainp = &(*chainp)->next;
2385         }
2386 #else
2387         chain->off += n;
2388         advance_last_with_data(buf);
2389 #endif
2390         buf->total_len += n;
2391         buf->n_add_for_cb += n;
2392
2393         /* Tell someone about changes in this buffer */
2394         evbuffer_invoke_callbacks_(buf);
2395         result = n;
2396 done:
2397         EVBUFFER_UNLOCK(buf);
2398         return result;
2399 }
2400
2401 #ifdef USE_IOVEC_IMPL
2402 static inline int
2403 evbuffer_write_iovec(struct evbuffer *buffer, evutil_socket_t fd,
2404     ev_ssize_t howmuch)
2405 {
2406         IOV_TYPE iov[NUM_WRITE_IOVEC];
2407         struct evbuffer_chain *chain = buffer->first;
2408         int n, i = 0;
2409
2410         if (howmuch < 0)
2411                 return -1;
2412
2413         ASSERT_EVBUFFER_LOCKED(buffer);
2414         /* XXX make this top out at some maximal data length?  if the
2415          * buffer has (say) 1MB in it, split over 128 chains, there's
2416          * no way it all gets written in one go. */
2417         while (chain != NULL && i < NUM_WRITE_IOVEC && howmuch) {
2418 #ifdef USE_SENDFILE
2419                 /* we cannot write the file info via writev */
2420                 if (chain->flags & EVBUFFER_SENDFILE)
2421                         break;
2422 #endif
2423                 iov[i].IOV_PTR_FIELD = (void *) (chain->buffer + chain->misalign);
2424                 if ((size_t)howmuch >= chain->off) {
2425                         /* XXXcould be problematic when windows supports mmap*/
2426                         iov[i++].IOV_LEN_FIELD = (IOV_LEN_TYPE)chain->off;
2427                         howmuch -= chain->off;
2428                 } else {
2429                         /* XXXcould be problematic when windows supports mmap*/
2430                         iov[i++].IOV_LEN_FIELD = (IOV_LEN_TYPE)howmuch;
2431                         break;
2432                 }
2433                 chain = chain->next;
2434         }
2435         if (! i)
2436                 return 0;
2437
2438 #ifdef _WIN32
2439         {
2440                 DWORD bytesSent;
2441                 if (WSASend(fd, iov, i, &bytesSent, 0, NULL, NULL))
2442                         n = -1;
2443                 else
2444                         n = bytesSent;
2445         }
2446 #else
2447         n = writev(fd, iov, i);
2448 #endif
2449         return (n);
2450 }
2451 #endif
2452
2453 #ifdef USE_SENDFILE
2454 static inline int
2455 evbuffer_write_sendfile(struct evbuffer *buffer, evutil_socket_t dest_fd,
2456     ev_ssize_t howmuch)
2457 {
2458         struct evbuffer_chain *chain = buffer->first;
2459         struct evbuffer_chain_file_segment *info =
2460             EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_file_segment,
2461                 chain);
2462         const int source_fd = info->segment->fd;
2463 #if defined(SENDFILE_IS_MACOSX) || defined(SENDFILE_IS_FREEBSD)
2464         int res;
2465         ev_off_t len = chain->off;
2466 #elif defined(SENDFILE_IS_LINUX) || defined(SENDFILE_IS_SOLARIS)
2467         ev_ssize_t res;
2468         ev_off_t offset = chain->misalign;
2469 #endif
2470
2471         ASSERT_EVBUFFER_LOCKED(buffer);
2472
2473 #if defined(SENDFILE_IS_MACOSX)
2474         res = sendfile(source_fd, dest_fd, chain->misalign, &len, NULL, 0);
2475         if (res == -1 && !EVUTIL_ERR_RW_RETRIABLE(errno))
2476                 return (-1);
2477
2478         return (len);
2479 #elif defined(SENDFILE_IS_FREEBSD)
2480         res = sendfile(source_fd, dest_fd, chain->misalign, chain->off, NULL, &len, 0);
2481         if (res == -1 && !EVUTIL_ERR_RW_RETRIABLE(errno))
2482                 return (-1);
2483
2484         return (len);
2485 #elif defined(SENDFILE_IS_LINUX)
2486         /* TODO(niels): implement splice */
2487         res = sendfile(dest_fd, source_fd, &offset, chain->off);
2488         if (res == -1 && EVUTIL_ERR_RW_RETRIABLE(errno)) {
2489                 /* if this is EAGAIN or EINTR return 0; otherwise, -1 */
2490                 return (0);
2491         }
2492         return (res);
2493 #elif defined(SENDFILE_IS_SOLARIS)
2494         {
2495                 const off_t offset_orig = offset;
2496                 res = sendfile(dest_fd, source_fd, &offset, chain->off);
2497                 if (res == -1 && EVUTIL_ERR_RW_RETRIABLE(errno)) {
2498                         if (offset - offset_orig)
2499                                 return offset - offset_orig;
2500                         /* if this is EAGAIN or EINTR and no bytes were
2501                          * written, return 0 */
2502                         return (0);
2503                 }
2504                 return (res);
2505         }
2506 #endif
2507 }
2508 #endif
2509
2510 int
2511 evbuffer_write_atmost(struct evbuffer *buffer, evutil_socket_t fd,
2512     ev_ssize_t howmuch)
2513 {
2514         int n = -1;
2515
2516         EVBUFFER_LOCK(buffer);
2517
2518         if (buffer->freeze_start) {
2519                 goto done;
2520         }
2521
2522         if (howmuch < 0 || (size_t)howmuch > buffer->total_len)
2523                 howmuch = buffer->total_len;
2524
2525         if (howmuch > 0) {
2526 #ifdef USE_SENDFILE
2527                 struct evbuffer_chain *chain = buffer->first;
2528                 if (chain != NULL && (chain->flags & EVBUFFER_SENDFILE))
2529                         n = evbuffer_write_sendfile(buffer, fd, howmuch);
2530                 else {
2531 #endif
2532 #ifdef USE_IOVEC_IMPL
2533                 n = evbuffer_write_iovec(buffer, fd, howmuch);
2534 #elif defined(_WIN32)
2535                 /* XXX(nickm) Don't disable this code until we know if
2536                  * the WSARecv code above works. */
2537                 void *p = evbuffer_pullup(buffer, howmuch);
2538                 EVUTIL_ASSERT(p || !howmuch);
2539                 n = send(fd, p, howmuch, 0);
2540 #else
2541                 void *p = evbuffer_pullup(buffer, howmuch);
2542                 EVUTIL_ASSERT(p || !howmuch);
2543                 n = write(fd, p, howmuch);
2544 #endif
2545 #ifdef USE_SENDFILE
2546                 }
2547 #endif
2548         }
2549
2550         if (n > 0)
2551                 evbuffer_drain(buffer, n);
2552
2553 done:
2554         EVBUFFER_UNLOCK(buffer);
2555         return (n);
2556 }
2557
2558 int
2559 evbuffer_write(struct evbuffer *buffer, evutil_socket_t fd)
2560 {
2561         return evbuffer_write_atmost(buffer, fd, -1);
2562 }
2563
2564 unsigned char *
2565 evbuffer_find(struct evbuffer *buffer, const unsigned char *what, size_t len)
2566 {
2567         unsigned char *search;
2568         struct evbuffer_ptr ptr;
2569
2570         EVBUFFER_LOCK(buffer);
2571
2572         ptr = evbuffer_search(buffer, (const char *)what, len, NULL);
2573         if (ptr.pos < 0) {
2574                 search = NULL;
2575         } else {
2576                 search = evbuffer_pullup(buffer, ptr.pos + len);
2577                 if (search)
2578                         search += ptr.pos;
2579         }
2580         EVBUFFER_UNLOCK(buffer);
2581         return search;
2582 }
2583
2584 /* Subract <b>howfar</b> from the position of <b>pos</b> within
2585  * <b>buf</b>. Returns 0 on success, -1 on failure.
2586  *
2587  * This isn't exposed yet, because of potential inefficiency issues.
2588  * Maybe it should be. */
2589 static int
2590 evbuffer_ptr_subtract(struct evbuffer *buf, struct evbuffer_ptr *pos,
2591     size_t howfar)
2592 {
2593         if (pos->pos < 0)
2594                 return -1;
2595         if (howfar > (size_t)pos->pos)
2596                 return -1;
2597         if (pos->internal_.chain && howfar <= pos->internal_.pos_in_chain) {
2598                 pos->internal_.pos_in_chain -= howfar;
2599                 pos->pos -= howfar;
2600                 return 0;
2601         } else {
2602                 const size_t newpos = pos->pos - howfar;
2603                 /* Here's the inefficient part: it walks over the
2604                  * chains until we hit newpos. */
2605                 return evbuffer_ptr_set(buf, pos, newpos, EVBUFFER_PTR_SET);
2606         }
2607 }
2608
2609 int
2610 evbuffer_ptr_set(struct evbuffer *buf, struct evbuffer_ptr *pos,
2611     size_t position, enum evbuffer_ptr_how how)
2612 {
2613         size_t left = position;
2614         struct evbuffer_chain *chain = NULL;
2615         int result = 0;
2616
2617         EVBUFFER_LOCK(buf);
2618
2619         switch (how) {
2620         case EVBUFFER_PTR_SET:
2621                 chain = buf->first;
2622                 pos->pos = position;
2623                 position = 0;
2624                 break;
2625         case EVBUFFER_PTR_ADD:
2626                 /* this avoids iterating over all previous chains if
2627                    we just want to advance the position */
2628                 if (pos->pos < 0 || EV_SIZE_MAX - position < (size_t)pos->pos) {
2629                         EVBUFFER_UNLOCK(buf);
2630                         return -1;
2631                 }
2632                 chain = pos->internal_.chain;
2633                 pos->pos += position;
2634                 position = pos->internal_.pos_in_chain;
2635                 break;
2636         }
2637
2638         EVUTIL_ASSERT(EV_SIZE_MAX - left >= position);
2639         while (chain && position + left >= chain->off) {
2640                 left -= chain->off - position;
2641                 chain = chain->next;
2642                 position = 0;
2643         }
2644         if (chain) {
2645                 pos->internal_.chain = chain;
2646                 pos->internal_.pos_in_chain = position + left;
2647         } else if (left == 0) {
2648                 /* The first byte in the (nonexistent) chain after the last chain */
2649                 pos->internal_.chain = NULL;
2650                 pos->internal_.pos_in_chain = 0;
2651         } else {
2652                 PTR_NOT_FOUND(pos);
2653                 result = -1;
2654         }
2655
2656         EVBUFFER_UNLOCK(buf);
2657
2658         return result;
2659 }
2660
2661 /**
2662    Compare the bytes in buf at position pos to the len bytes in mem.  Return
2663    less than 0, 0, or greater than 0 as memcmp.
2664  */
2665 static int
2666 evbuffer_ptr_memcmp(const struct evbuffer *buf, const struct evbuffer_ptr *pos,
2667     const char *mem, size_t len)
2668 {
2669         struct evbuffer_chain *chain;
2670         size_t position;
2671         int r;
2672
2673         ASSERT_EVBUFFER_LOCKED(buf);
2674
2675         if (pos->pos < 0 ||
2676             EV_SIZE_MAX - len < (size_t)pos->pos ||
2677             pos->pos + len > buf->total_len)
2678                 return -1;
2679
2680         chain = pos->internal_.chain;
2681         position = pos->internal_.pos_in_chain;
2682         while (len && chain) {
2683                 size_t n_comparable;
2684                 if (len + position > chain->off)
2685                         n_comparable = chain->off - position;
2686                 else
2687                         n_comparable = len;
2688                 r = memcmp(chain->buffer + chain->misalign + position, mem,
2689                     n_comparable);
2690                 if (r)
2691                         return r;
2692                 mem += n_comparable;
2693                 len -= n_comparable;
2694                 position = 0;
2695                 chain = chain->next;
2696         }
2697
2698         return 0;
2699 }
2700
2701 struct evbuffer_ptr
2702 evbuffer_search(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start)
2703 {
2704         return evbuffer_search_range(buffer, what, len, start, NULL);
2705 }
2706
2707 struct evbuffer_ptr
2708 evbuffer_search_range(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start, const struct evbuffer_ptr *end)
2709 {
2710         struct evbuffer_ptr pos;
2711         struct evbuffer_chain *chain, *last_chain = NULL;
2712         const unsigned char *p;
2713         char first;
2714
2715         EVBUFFER_LOCK(buffer);
2716
2717         if (start) {
2718                 memcpy(&pos, start, sizeof(pos));
2719                 chain = pos.internal_.chain;
2720         } else {
2721                 pos.pos = 0;
2722                 chain = pos.internal_.chain = buffer->first;
2723                 pos.internal_.pos_in_chain = 0;
2724         }
2725
2726         if (end)
2727                 last_chain = end->internal_.chain;
2728
2729         if (!len || len > EV_SSIZE_MAX)
2730                 goto done;
2731
2732         first = what[0];
2733
2734         while (chain) {
2735                 const unsigned char *start_at =
2736                     chain->buffer + chain->misalign +
2737                     pos.internal_.pos_in_chain;
2738                 p = memchr(start_at, first,
2739                     chain->off - pos.internal_.pos_in_chain);
2740                 if (p) {
2741                         pos.pos += p - start_at;
2742                         pos.internal_.pos_in_chain += p - start_at;
2743                         if (!evbuffer_ptr_memcmp(buffer, &pos, what, len)) {
2744                                 if (end && pos.pos + (ev_ssize_t)len > end->pos)
2745                                         goto not_found;
2746                                 else
2747                                         goto done;
2748                         }
2749                         ++pos.pos;
2750                         ++pos.internal_.pos_in_chain;
2751                         if (pos.internal_.pos_in_chain == chain->off) {
2752                                 chain = pos.internal_.chain = chain->next;
2753                                 pos.internal_.pos_in_chain = 0;
2754                         }
2755                 } else {
2756                         if (chain == last_chain)
2757                                 goto not_found;
2758                         pos.pos += chain->off - pos.internal_.pos_in_chain;
2759                         chain = pos.internal_.chain = chain->next;
2760                         pos.internal_.pos_in_chain = 0;
2761                 }
2762         }
2763
2764 not_found:
2765         PTR_NOT_FOUND(&pos);
2766 done:
2767         EVBUFFER_UNLOCK(buffer);
2768         return pos;
2769 }
2770
2771 int
2772 evbuffer_peek(struct evbuffer *buffer, ev_ssize_t len,
2773     struct evbuffer_ptr *start_at,
2774     struct evbuffer_iovec *vec, int n_vec)
2775 {
2776         struct evbuffer_chain *chain;
2777         int idx = 0;
2778         ev_ssize_t len_so_far = 0;
2779
2780         /* Avoid locking in trivial edge cases */
2781         if (start_at && start_at->internal_.chain == NULL)
2782                 return 0;
2783
2784         EVBUFFER_LOCK(buffer);
2785
2786         if (start_at) {
2787                 chain = start_at->internal_.chain;
2788                 len_so_far = chain->off
2789                     - start_at->internal_.pos_in_chain;
2790                 idx = 1;
2791                 if (n_vec > 0) {
2792                         vec[0].iov_base = (void *)(chain->buffer + chain->misalign
2793                             + start_at->internal_.pos_in_chain);
2794                         vec[0].iov_len = len_so_far;
2795                 }
2796                 chain = chain->next;
2797         } else {
2798                 chain = buffer->first;
2799         }
2800
2801         if (n_vec == 0 && len < 0) {
2802                 /* If no vectors are provided and they asked for "everything",
2803                  * pretend they asked for the actual available amount. */
2804                 len = buffer->total_len;
2805                 if (start_at) {
2806                         len -= start_at->pos;
2807                 }
2808         }
2809
2810         while (chain) {
2811                 if (len >= 0 && len_so_far >= len)
2812                         break;
2813                 if (idx<n_vec) {
2814                         vec[idx].iov_base = (void *)(chain->buffer + chain->misalign);
2815                         vec[idx].iov_len = chain->off;
2816                 } else if (len<0) {
2817                         break;
2818                 }
2819                 ++idx;
2820                 len_so_far += chain->off;
2821                 chain = chain->next;
2822         }
2823
2824         EVBUFFER_UNLOCK(buffer);
2825
2826         return idx;
2827 }
2828
2829
2830 int
2831 evbuffer_add_vprintf(struct evbuffer *buf, const char *fmt, va_list ap)
2832 {
2833         char *buffer;
2834         size_t space;
2835         int sz, result = -1;
2836         va_list aq;
2837         struct evbuffer_chain *chain;
2838
2839
2840         EVBUFFER_LOCK(buf);
2841
2842         if (buf->freeze_end) {
2843                 goto done;
2844         }
2845
2846         /* make sure that at least some space is available */
2847         if ((chain = evbuffer_expand_singlechain(buf, 64)) == NULL)
2848                 goto done;
2849
2850         for (;;) {
2851 #if 0
2852                 size_t used = chain->misalign + chain->off;
2853                 buffer = (char *)chain->buffer + chain->misalign + chain->off;
2854                 EVUTIL_ASSERT(chain->buffer_len >= used);
2855                 space = chain->buffer_len - used;
2856 #endif
2857                 buffer = (char*) CHAIN_SPACE_PTR(chain);
2858                 space = (size_t) CHAIN_SPACE_LEN(chain);
2859
2860 #ifndef va_copy
2861 #define va_copy(dst, src)       memcpy(&(dst), &(src), sizeof(va_list))
2862 #endif
2863                 va_copy(aq, ap);
2864
2865                 sz = evutil_vsnprintf(buffer, space, fmt, aq);
2866
2867                 va_end(aq);
2868
2869                 if (sz < 0)
2870                         goto done;
2871                 if (INT_MAX >= EVBUFFER_CHAIN_MAX &&
2872                     (size_t)sz >= EVBUFFER_CHAIN_MAX)
2873                         goto done;
2874                 if ((size_t)sz < space) {
2875                         chain->off += sz;
2876                         buf->total_len += sz;
2877                         buf->n_add_for_cb += sz;
2878
2879                         advance_last_with_data(buf);
2880                         evbuffer_invoke_callbacks_(buf);
2881                         result = sz;
2882                         goto done;
2883                 }
2884                 if ((chain = evbuffer_expand_singlechain(buf, sz + 1)) == NULL)
2885                         goto done;
2886         }
2887         /* NOTREACHED */
2888
2889 done:
2890         EVBUFFER_UNLOCK(buf);
2891         return result;
2892 }
2893
2894 int
2895 evbuffer_add_printf(struct evbuffer *buf, const char *fmt, ...)
2896 {
2897         int res = -1;
2898         va_list ap;
2899
2900         va_start(ap, fmt);
2901         res = evbuffer_add_vprintf(buf, fmt, ap);
2902         va_end(ap);
2903
2904         return (res);
2905 }
2906
2907 int
2908 evbuffer_add_reference(struct evbuffer *outbuf,
2909     const void *data, size_t datlen,
2910     evbuffer_ref_cleanup_cb cleanupfn, void *extra)
2911 {
2912         struct evbuffer_chain *chain;
2913         struct evbuffer_chain_reference *info;
2914         int result = -1;
2915
2916         chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_reference));
2917         if (!chain)
2918                 return (-1);
2919         chain->flags |= EVBUFFER_REFERENCE | EVBUFFER_IMMUTABLE;
2920         chain->buffer = (unsigned char *)data;
2921         chain->buffer_len = datlen;
2922         chain->off = datlen;
2923
2924         info = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_reference, chain);
2925         info->cleanupfn = cleanupfn;
2926         info->extra = extra;
2927
2928         EVBUFFER_LOCK(outbuf);
2929         if (outbuf->freeze_end) {
2930                 /* don't call chain_free; we do not want to actually invoke
2931                  * the cleanup function */
2932                 mm_free(chain);
2933                 goto done;
2934         }
2935         evbuffer_chain_insert(outbuf, chain);
2936         outbuf->n_add_for_cb += datlen;
2937
2938         evbuffer_invoke_callbacks_(outbuf);
2939
2940         result = 0;
2941 done:
2942         EVBUFFER_UNLOCK(outbuf);
2943
2944         return result;
2945 }
2946
2947 /* TODO(niels): we may want to add to automagically convert to mmap, in
2948  * case evbuffer_remove() or evbuffer_pullup() are being used.
2949  */
2950 struct evbuffer_file_segment *
2951 evbuffer_file_segment_new(
2952         int fd, ev_off_t offset, ev_off_t length, unsigned flags)
2953 {
2954         struct evbuffer_file_segment *seg =
2955             mm_calloc(sizeof(struct evbuffer_file_segment), 1);
2956         if (!seg)
2957                 return NULL;
2958         seg->refcnt = 1;
2959         seg->fd = fd;
2960         seg->flags = flags;
2961         seg->file_offset = offset;
2962         seg->cleanup_cb = NULL;
2963         seg->cleanup_cb_arg = NULL;
2964 #ifdef _WIN32
2965 #ifndef lseek
2966 #define lseek _lseeki64
2967 #endif
2968 #ifndef fstat
2969 #define fstat _fstat
2970 #endif
2971 #ifndef stat
2972 #define stat _stat
2973 #endif
2974 #endif
2975         if (length == -1) {
2976                 struct stat st;
2977                 if (fstat(fd, &st) < 0)
2978                         goto err;
2979                 length = st.st_size;
2980         }
2981         seg->length = length;
2982
2983         if (offset < 0 || length < 0 ||
2984             ((ev_uint64_t)length > EVBUFFER_CHAIN_MAX) ||
2985             (ev_uint64_t)offset > (ev_uint64_t)(EVBUFFER_CHAIN_MAX - length))
2986                 goto err;
2987
2988 #if defined(USE_SENDFILE)
2989         if (!(flags & EVBUF_FS_DISABLE_SENDFILE)) {
2990                 seg->can_sendfile = 1;
2991                 goto done;
2992         }
2993 #endif
2994
2995         if (evbuffer_file_segment_materialize(seg)<0)
2996                 goto err;
2997
2998 #if defined(USE_SENDFILE)
2999 done:
3000 #endif
3001         if (!(flags & EVBUF_FS_DISABLE_LOCKING)) {
3002                 EVTHREAD_ALLOC_LOCK(seg->lock, 0);
3003         }
3004         return seg;
3005 err:
3006         mm_free(seg);
3007         return NULL;
3008 }
3009
3010 #ifdef EVENT__HAVE_MMAP
3011 static long
3012 get_page_size(void)
3013 {
3014 #ifdef SC_PAGE_SIZE
3015         return sysconf(SC_PAGE_SIZE);
3016 #elif defined(_SC_PAGE_SIZE)
3017         return sysconf(_SC_PAGE_SIZE);
3018 #else
3019         return 1;
3020 #endif
3021 }
3022 #endif
3023
3024 /* DOCDOC */
3025 /* Requires lock */
3026 static int
3027 evbuffer_file_segment_materialize(struct evbuffer_file_segment *seg)
3028 {
3029         const unsigned flags = seg->flags;
3030         const int fd = seg->fd;
3031         const ev_off_t length = seg->length;
3032         const ev_off_t offset = seg->file_offset;
3033
3034         if (seg->contents)
3035                 return 0; /* already materialized */
3036
3037 #if defined(EVENT__HAVE_MMAP)
3038         if (!(flags & EVBUF_FS_DISABLE_MMAP)) {
3039                 off_t offset_rounded = 0, offset_leftover = 0;
3040                 void *mapped;
3041                 if (offset) {
3042                         /* mmap implementations don't generally like us
3043                          * to have an offset that isn't a round  */
3044                         long page_size = get_page_size();
3045                         if (page_size == -1)
3046                                 goto err;
3047                         offset_leftover = offset % page_size;
3048                         offset_rounded = offset - offset_leftover;
3049                 }
3050                 mapped = mmap(NULL, length + offset_leftover,
3051                     PROT_READ,
3052 #ifdef MAP_NOCACHE
3053                     MAP_NOCACHE | /* ??? */
3054 #endif
3055 #ifdef MAP_FILE
3056                     MAP_FILE |
3057 #endif
3058                     MAP_PRIVATE,
3059                     fd, offset_rounded);
3060                 if (mapped == MAP_FAILED) {
3061                         event_warn("%s: mmap(%d, %d, %zu) failed",
3062                             __func__, fd, 0, (size_t)(offset + length));
3063                 } else {
3064                         seg->mapping = mapped;
3065                         seg->contents = (char*)mapped+offset_leftover;
3066                         seg->mmap_offset = 0;
3067                         seg->is_mapping = 1;
3068                         goto done;
3069                 }
3070         }
3071 #endif
3072 #ifdef _WIN32
3073         if (!(flags & EVBUF_FS_DISABLE_MMAP)) {
3074                 intptr_t h = _get_osfhandle(fd);
3075                 HANDLE m;
3076                 ev_uint64_t total_size = length+offset;
3077                 if ((HANDLE)h == INVALID_HANDLE_VALUE)
3078                         goto err;
3079                 m = CreateFileMapping((HANDLE)h, NULL, PAGE_READONLY,
3080                     (total_size >> 32), total_size & 0xfffffffful,
3081                     NULL);
3082                 if (m != INVALID_HANDLE_VALUE) { /* Does h leak? */
3083                         seg->mapping_handle = m;
3084                         seg->mmap_offset = offset;
3085                         seg->is_mapping = 1;
3086                         goto done;
3087                 }
3088         }
3089 #endif
3090         {
3091                 ev_off_t start_pos = lseek(fd, 0, SEEK_CUR), pos;
3092                 ev_off_t read_so_far = 0;
3093                 char *mem;
3094                 int e;
3095                 ev_ssize_t n = 0;
3096                 if (!(mem = mm_malloc(length)))
3097                         goto err;
3098                 if (start_pos < 0) {
3099                         mm_free(mem);
3100                         goto err;
3101                 }
3102                 if (lseek(fd, offset, SEEK_SET) < 0) {
3103                         mm_free(mem);
3104                         goto err;
3105                 }
3106                 while (read_so_far < length) {
3107                         n = read(fd, mem+read_so_far, length-read_so_far);
3108                         if (n <= 0)
3109                                 break;
3110                         read_so_far += n;
3111                 }
3112
3113                 e = errno;
3114                 pos = lseek(fd, start_pos, SEEK_SET);
3115                 if (n < 0 || (n == 0 && length > read_so_far)) {
3116                         mm_free(mem);
3117                         errno = e;
3118                         goto err;
3119                 } else if (pos < 0) {
3120                         mm_free(mem);
3121                         goto err;
3122                 }
3123
3124                 seg->contents = mem;
3125         }
3126
3127 done:
3128         return 0;
3129 err:
3130         return -1;
3131 }
3132
3133 void evbuffer_file_segment_add_cleanup_cb(struct evbuffer_file_segment *seg,
3134         evbuffer_file_segment_cleanup_cb cb, void* arg)
3135 {
3136         EVUTIL_ASSERT(seg->refcnt > 0);
3137         seg->cleanup_cb = cb;
3138         seg->cleanup_cb_arg = arg;
3139 }
3140
3141 void
3142 evbuffer_file_segment_free(struct evbuffer_file_segment *seg)
3143 {
3144         int refcnt;
3145         EVLOCK_LOCK(seg->lock, 0);
3146         refcnt = --seg->refcnt;
3147         EVLOCK_UNLOCK(seg->lock, 0);
3148         if (refcnt > 0)
3149                 return;
3150         EVUTIL_ASSERT(refcnt == 0);
3151
3152         if (seg->is_mapping) {
3153 #ifdef _WIN32
3154                 CloseHandle(seg->mapping_handle);
3155 #elif defined (EVENT__HAVE_MMAP)
3156                 off_t offset_leftover;
3157                 offset_leftover = seg->file_offset % get_page_size();
3158                 if (munmap(seg->mapping, seg->length + offset_leftover) == -1)
3159                         event_warn("%s: munmap failed", __func__);
3160 #endif
3161         } else if (seg->contents) {
3162                 mm_free(seg->contents);
3163         }
3164
3165         if ((seg->flags & EVBUF_FS_CLOSE_ON_FREE) && seg->fd >= 0) {
3166                 close(seg->fd);
3167         }
3168         
3169         if (seg->cleanup_cb) {
3170                 (*seg->cleanup_cb)((struct evbuffer_file_segment const*)seg, 
3171                     seg->flags, seg->cleanup_cb_arg);
3172                 seg->cleanup_cb = NULL;
3173                 seg->cleanup_cb_arg = NULL;
3174         }
3175
3176         EVTHREAD_FREE_LOCK(seg->lock, 0);
3177         mm_free(seg);
3178 }
3179
3180 int
3181 evbuffer_add_file_segment(struct evbuffer *buf,
3182     struct evbuffer_file_segment *seg, ev_off_t offset, ev_off_t length)
3183 {
3184         struct evbuffer_chain *chain;
3185         struct evbuffer_chain_file_segment *extra;
3186         int can_use_sendfile = 0;
3187
3188         EVBUFFER_LOCK(buf);
3189         EVLOCK_LOCK(seg->lock, 0);
3190         if (buf->flags & EVBUFFER_FLAG_DRAINS_TO_FD) {
3191                 can_use_sendfile = 1;
3192         } else {
3193                 if (!seg->contents) {
3194                         if (evbuffer_file_segment_materialize(seg)<0) {
3195                                 EVLOCK_UNLOCK(seg->lock, 0);
3196                                 EVBUFFER_UNLOCK(buf);
3197                                 return -1;
3198                         }
3199                 }
3200         }
3201         ++seg->refcnt;
3202         EVLOCK_UNLOCK(seg->lock, 0);
3203
3204         if (buf->freeze_end)
3205                 goto err;
3206
3207         if (length < 0) {
3208                 if (offset > seg->length)
3209                         goto err;
3210                 length = seg->length - offset;
3211         }
3212
3213         /* Can we actually add this? */
3214         if (offset+length > seg->length)
3215                 goto err;
3216
3217         chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_file_segment));
3218         if (!chain)
3219                 goto err;
3220         extra = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_file_segment, chain);
3221
3222         chain->flags |= EVBUFFER_IMMUTABLE|EVBUFFER_FILESEGMENT;
3223         if (can_use_sendfile && seg->can_sendfile) {
3224                 chain->flags |= EVBUFFER_SENDFILE;
3225                 chain->misalign = seg->file_offset + offset;
3226                 chain->off = length;
3227                 chain->buffer_len = chain->misalign + length;
3228         } else if (seg->is_mapping) {
3229 #ifdef _WIN32
3230                 ev_uint64_t total_offset = seg->mmap_offset+offset;
3231                 ev_uint64_t offset_rounded=0, offset_remaining=0;
3232                 LPVOID data;
3233                 if (total_offset) {
3234                         SYSTEM_INFO si;
3235                         memset(&si, 0, sizeof(si)); /* cargo cult */
3236                         GetSystemInfo(&si);
3237                         offset_remaining = total_offset % si.dwAllocationGranularity;
3238                         offset_rounded = total_offset - offset_remaining;
3239                 }
3240                 data = MapViewOfFile(
3241                         seg->mapping_handle,
3242                         FILE_MAP_READ,
3243                         offset_rounded >> 32,
3244                         offset_rounded & 0xfffffffful,
3245                         length + offset_remaining);
3246                 if (data == NULL) {
3247                         mm_free(chain);
3248                         goto err;
3249                 }
3250                 chain->buffer = (unsigned char*) data;
3251                 chain->buffer_len = length+offset_remaining;
3252                 chain->misalign = offset_remaining;
3253                 chain->off = length;
3254 #else
3255                 chain->buffer = (unsigned char*)(seg->contents + offset);
3256                 chain->buffer_len = length;
3257                 chain->off = length;
3258 #endif
3259         } else {
3260                 chain->buffer = (unsigned char*)(seg->contents + offset);
3261                 chain->buffer_len = length;
3262                 chain->off = length;
3263         }
3264
3265         extra->segment = seg;
3266         buf->n_add_for_cb += length;
3267         evbuffer_chain_insert(buf, chain);
3268
3269         evbuffer_invoke_callbacks_(buf);
3270
3271         EVBUFFER_UNLOCK(buf);
3272
3273         return 0;
3274 err:
3275         EVBUFFER_UNLOCK(buf);
3276         evbuffer_file_segment_free(seg); /* Lowers the refcount */
3277         return -1;
3278 }
3279
3280 int
3281 evbuffer_add_file(struct evbuffer *buf, int fd, ev_off_t offset, ev_off_t length)
3282 {
3283         struct evbuffer_file_segment *seg;
3284         unsigned flags = EVBUF_FS_CLOSE_ON_FREE;
3285         int r;
3286
3287         seg = evbuffer_file_segment_new(fd, offset, length, flags);
3288         if (!seg)
3289                 return -1;
3290         r = evbuffer_add_file_segment(buf, seg, 0, length);
3291         if (r == 0)
3292                 evbuffer_file_segment_free(seg);
3293         return r;
3294 }
3295
3296 void
3297 evbuffer_setcb(struct evbuffer *buffer, evbuffer_cb cb, void *cbarg)
3298 {
3299         EVBUFFER_LOCK(buffer);
3300
3301         if (!LIST_EMPTY(&buffer->callbacks))
3302                 evbuffer_remove_all_callbacks(buffer);
3303
3304         if (cb) {
3305                 struct evbuffer_cb_entry *ent =
3306                     evbuffer_add_cb(buffer, NULL, cbarg);
3307                 ent->cb.cb_obsolete = cb;
3308                 ent->flags |= EVBUFFER_CB_OBSOLETE;
3309         }
3310         EVBUFFER_UNLOCK(buffer);
3311 }
3312
3313 struct evbuffer_cb_entry *
3314 evbuffer_add_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg)
3315 {
3316         struct evbuffer_cb_entry *e;
3317         if (! (e = mm_calloc(1, sizeof(struct evbuffer_cb_entry))))
3318                 return NULL;
3319         EVBUFFER_LOCK(buffer);
3320         e->cb.cb_func = cb;
3321         e->cbarg = cbarg;
3322         e->flags = EVBUFFER_CB_ENABLED;
3323         LIST_INSERT_HEAD(&buffer->callbacks, e, next);
3324         EVBUFFER_UNLOCK(buffer);
3325         return e;
3326 }
3327
3328 int
3329 evbuffer_remove_cb_entry(struct evbuffer *buffer,
3330                          struct evbuffer_cb_entry *ent)
3331 {
3332         EVBUFFER_LOCK(buffer);
3333         LIST_REMOVE(ent, next);
3334         EVBUFFER_UNLOCK(buffer);
3335         mm_free(ent);
3336         return 0;
3337 }
3338
3339 int
3340 evbuffer_remove_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg)
3341 {
3342         struct evbuffer_cb_entry *cbent;
3343         int result = -1;
3344         EVBUFFER_LOCK(buffer);
3345         LIST_FOREACH(cbent, &buffer->callbacks, next) {
3346                 if (cb == cbent->cb.cb_func && cbarg == cbent->cbarg) {
3347                         result = evbuffer_remove_cb_entry(buffer, cbent);
3348                         goto done;
3349                 }
3350         }
3351 done:
3352         EVBUFFER_UNLOCK(buffer);
3353         return result;
3354 }
3355
3356 int
3357 evbuffer_cb_set_flags(struct evbuffer *buffer,
3358                       struct evbuffer_cb_entry *cb, ev_uint32_t flags)
3359 {
3360         /* the user isn't allowed to mess with these. */
3361         flags &= ~EVBUFFER_CB_INTERNAL_FLAGS;
3362         EVBUFFER_LOCK(buffer);
3363         cb->flags |= flags;
3364         EVBUFFER_UNLOCK(buffer);
3365         return 0;
3366 }
3367
3368 int
3369 evbuffer_cb_clear_flags(struct evbuffer *buffer,
3370                       struct evbuffer_cb_entry *cb, ev_uint32_t flags)
3371 {
3372         /* the user isn't allowed to mess with these. */
3373         flags &= ~EVBUFFER_CB_INTERNAL_FLAGS;
3374         EVBUFFER_LOCK(buffer);
3375         cb->flags &= ~flags;
3376         EVBUFFER_UNLOCK(buffer);
3377         return 0;
3378 }
3379
3380 int
3381 evbuffer_freeze(struct evbuffer *buffer, int start)
3382 {
3383         EVBUFFER_LOCK(buffer);
3384         if (start)
3385                 buffer->freeze_start = 1;
3386         else
3387                 buffer->freeze_end = 1;
3388         EVBUFFER_UNLOCK(buffer);
3389         return 0;
3390 }
3391
3392 int
3393 evbuffer_unfreeze(struct evbuffer *buffer, int start)
3394 {
3395         EVBUFFER_LOCK(buffer);
3396         if (start)
3397                 buffer->freeze_start = 0;
3398         else
3399                 buffer->freeze_end = 0;
3400         EVBUFFER_UNLOCK(buffer);
3401         return 0;
3402 }
3403
3404 #if 0
3405 void
3406 evbuffer_cb_suspend(struct evbuffer *buffer, struct evbuffer_cb_entry *cb)
3407 {
3408         if (!(cb->flags & EVBUFFER_CB_SUSPENDED)) {
3409                 cb->size_before_suspend = evbuffer_get_length(buffer);
3410                 cb->flags |= EVBUFFER_CB_SUSPENDED;
3411         }
3412 }
3413
3414 void
3415 evbuffer_cb_unsuspend(struct evbuffer *buffer, struct evbuffer_cb_entry *cb)
3416 {
3417         if ((cb->flags & EVBUFFER_CB_SUSPENDED)) {
3418                 unsigned call = (cb->flags & EVBUFFER_CB_CALL_ON_UNSUSPEND);
3419                 size_t sz = cb->size_before_suspend;
3420                 cb->flags &= ~(EVBUFFER_CB_SUSPENDED|
3421                                EVBUFFER_CB_CALL_ON_UNSUSPEND);
3422                 cb->size_before_suspend = 0;
3423                 if (call && (cb->flags & EVBUFFER_CB_ENABLED)) {
3424                         cb->cb(buffer, sz, evbuffer_get_length(buffer), cb->cbarg);
3425                 }
3426         }
3427 }
3428 #endif
3429
3430 int
3431 evbuffer_get_callbacks_(struct evbuffer *buffer, struct event_callback **cbs,
3432     int max_cbs)
3433 {
3434         int r = 0;
3435         EVBUFFER_LOCK(buffer);
3436         if (buffer->deferred_cbs) {
3437                 if (max_cbs < 1) {
3438                         r = -1;
3439                         goto done;
3440                 }
3441                 cbs[0] = &buffer->deferred;
3442                 r = 1;
3443         }
3444 done:
3445         EVBUFFER_UNLOCK(buffer);
3446         return r;
3447 }