1 /* Licensed to the Apache Software Foundation (ASF) under one or more
2 * contributor license agreements. See the NOTICE file distributed with
3 * this work for additional information regarding copyright ownership.
4 * The ASF licenses this file to You under the Apache License, Version 2.0
5 * (the "License"); you may not use this file except in compliance with
6 * the License. You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
19 #include "apr_strings.h"
20 #include "apr_pools.h"
21 #include "apr_tables.h"
22 #include "apr_buckets.h"
23 #include "apr_errno.h"
24 #define APR_WANT_MEMFUNC
25 #define APR_WANT_STRFUNC
28 #if APR_HAVE_SYS_UIO_H
32 static apr_status_t brigade_cleanup(void *data)
34 return apr_brigade_cleanup(data);
37 APU_DECLARE(apr_status_t) apr_brigade_cleanup(void *data)
39 apr_bucket_brigade *b = data;
42 while (!APR_BRIGADE_EMPTY(b)) {
43 e = APR_BRIGADE_FIRST(b);
46 /* We don't need to free(bb) because it's allocated from a pool. */
50 APU_DECLARE(apr_status_t) apr_brigade_destroy(apr_bucket_brigade *b)
52 apr_pool_cleanup_kill(b->p, b, brigade_cleanup);
53 return apr_brigade_cleanup(b);
56 APU_DECLARE(apr_bucket_brigade *) apr_brigade_create(apr_pool_t *p,
57 apr_bucket_alloc_t *list)
59 apr_bucket_brigade *b;
61 b = apr_palloc(p, sizeof(*b));
63 b->bucket_alloc = list;
65 APR_RING_INIT(&b->list, apr_bucket, link);
67 apr_pool_cleanup_register(b->p, b, brigade_cleanup, apr_pool_cleanup_null);
71 APU_DECLARE(apr_bucket_brigade *) apr_brigade_split_ex(apr_bucket_brigade *b,
73 apr_bucket_brigade *a)
78 a = apr_brigade_create(b->p, b->bucket_alloc);
80 else if (!APR_BRIGADE_EMPTY(a)) {
81 apr_brigade_cleanup(a);
83 /* Return an empty brigade if there is nothing left in
84 * the first brigade to split off
86 if (e != APR_BRIGADE_SENTINEL(b)) {
87 f = APR_RING_LAST(&b->list);
88 APR_RING_UNSPLICE(e, f, link);
89 APR_RING_SPLICE_HEAD(&a->list, e, f, apr_bucket, link);
92 APR_BRIGADE_CHECK_CONSISTENCY(a);
93 APR_BRIGADE_CHECK_CONSISTENCY(b);
98 APU_DECLARE(apr_bucket_brigade *) apr_brigade_split(apr_bucket_brigade *b,
101 return apr_brigade_split_ex(b, e, NULL);
104 APU_DECLARE(apr_status_t) apr_brigade_partition(apr_bucket_brigade *b,
106 apr_bucket **after_point)
111 apr_uint64_t point64;
115 /* this could cause weird (not necessarily SEGV) things to happen */
119 *after_point = APR_BRIGADE_FIRST(b);
124 * Try to reduce the following casting mess: We know that point will be
125 * larger equal 0 now and forever and thus that point (apr_off_t) and
126 * apr_size_t will fit into apr_uint64_t in any case.
128 point64 = (apr_uint64_t)point;
130 APR_BRIGADE_CHECK_CONSISTENCY(b);
132 for (e = APR_BRIGADE_FIRST(b);
133 e != APR_BRIGADE_SENTINEL(b);
134 e = APR_BUCKET_NEXT(e))
136 /* For an unknown length bucket, while 'point64' is beyond the possible
137 * size contained in apr_size_t, read and continue...
139 if ((e->length == (apr_size_t)(-1))
140 && (point64 > (apr_uint64_t)APR_SIZE_MAX)) {
141 /* point64 is too far out to simply split this bucket,
142 * we must fix this bucket's size and keep going... */
143 rv = apr_bucket_read(e, &s, &len, APR_BLOCK_READ);
144 if (rv != APR_SUCCESS) {
149 else if ((point64 < (apr_uint64_t)e->length)
150 || (e->length == (apr_size_t)(-1))) {
151 /* We already consumed buckets where point64 is beyond
152 * our interest ( point64 > APR_SIZE_MAX ), above.
153 * Here point falls between 0 and APR_SIZE_MAX
154 * and is within this bucket, or this bucket's len
155 * is undefined, so now we are ready to split it.
156 * First try to split the bucket natively... */
157 if ((rv = apr_bucket_split(e, (apr_size_t)point64))
159 *after_point = APR_BUCKET_NEXT(e);
163 /* if the bucket cannot be split, we must read from it,
164 * changing its type to one that can be split */
165 rv = apr_bucket_read(e, &s, &len, APR_BLOCK_READ);
166 if (rv != APR_SUCCESS) {
171 /* this assumes that len == e->length, which is okay because e
172 * might have been morphed by the apr_bucket_read() above, but
173 * if it was, the length would have been adjusted appropriately */
174 if (point64 < (apr_uint64_t)e->length) {
175 rv = apr_bucket_split(e, (apr_size_t)point64);
176 *after_point = APR_BUCKET_NEXT(e);
180 if (point64 == (apr_uint64_t)e->length) {
181 *after_point = APR_BUCKET_NEXT(e);
184 point64 -= (apr_uint64_t)e->length;
186 *after_point = APR_BRIGADE_SENTINEL(b);
187 return APR_INCOMPLETE;
190 APU_DECLARE(apr_status_t) apr_brigade_length(apr_bucket_brigade *bb,
191 int read_all, apr_off_t *length)
195 apr_status_t status = APR_SUCCESS;
197 for (bkt = APR_BRIGADE_FIRST(bb);
198 bkt != APR_BRIGADE_SENTINEL(bb);
199 bkt = APR_BUCKET_NEXT(bkt))
201 if (bkt->length == (apr_size_t)(-1)) {
210 if ((status = apr_bucket_read(bkt, &ignore, &len,
211 APR_BLOCK_READ)) != APR_SUCCESS) {
216 total += bkt->length;
223 APU_DECLARE(apr_status_t) apr_brigade_flatten(apr_bucket_brigade *bb,
224 char *c, apr_size_t *len)
226 apr_size_t actual = 0;
229 for (b = APR_BRIGADE_FIRST(bb);
230 b != APR_BRIGADE_SENTINEL(bb);
231 b = APR_BUCKET_NEXT(b))
237 status = apr_bucket_read(b, &str, &str_len, APR_BLOCK_READ);
238 if (status != APR_SUCCESS) {
242 /* If we would overflow. */
243 if (str_len + actual > *len) {
244 str_len = *len - actual;
247 /* XXX: It appears that overflow of the final bucket
248 * is DISCARDED without any warning to the caller.
250 * No, we only copy the data up to their requested size. -- jre
252 memcpy(c, str, str_len);
257 /* This could probably be actual == *len, but be safe from stray
259 if (actual >= *len) {
268 APU_DECLARE(apr_status_t) apr_brigade_pflatten(apr_bucket_brigade *bb,
277 apr_brigade_length(bb, 1, &actual);
279 /* XXX: This is dangerous beyond belief. At least in the
280 * apr_brigade_flatten case, the user explicitly stated their
281 * buffer length - so we don't up and palloc 4GB for a single
282 * file bucket. This API must grow a useful max boundry,
283 * either compiled-in or preset via the *len value.
285 * Shouldn't both fn's grow an additional return value for
286 * the case that the brigade couldn't be flattened into the
287 * provided or allocated buffer (such as APR_EMOREDATA?)
288 * Not a failure, simply an advisory result.
290 total = (apr_size_t)actual;
292 *c = apr_palloc(pool, total);
294 rv = apr_brigade_flatten(bb, *c, &total);
296 if (rv != APR_SUCCESS) {
304 APU_DECLARE(apr_status_t) apr_brigade_split_line(apr_bucket_brigade *bbOut,
305 apr_bucket_brigade *bbIn,
306 apr_read_type_e block,
309 apr_off_t readbytes = 0;
311 while (!APR_BRIGADE_EMPTY(bbIn)) {
318 e = APR_BRIGADE_FIRST(bbIn);
319 rv = apr_bucket_read(e, &str, &len, block);
321 if (rv != APR_SUCCESS) {
325 pos = memchr(str, APR_ASCII_LF, len);
326 /* We found a match. */
328 apr_bucket_split(e, pos - str + 1);
329 APR_BUCKET_REMOVE(e);
330 APR_BRIGADE_INSERT_TAIL(bbOut, e);
333 APR_BUCKET_REMOVE(e);
334 if (APR_BUCKET_IS_METADATA(e) || len > APR_BUCKET_BUFF_SIZE/4) {
335 APR_BRIGADE_INSERT_TAIL(bbOut, e);
339 rv = apr_brigade_write(bbOut, NULL, NULL, str, len);
340 if (rv != APR_SUCCESS) {
344 apr_bucket_destroy(e);
347 /* We didn't find an APR_ASCII_LF within the maximum line length. */
348 if (readbytes >= maxbytes) {
357 APU_DECLARE(apr_status_t) apr_brigade_to_iovec(apr_bucket_brigade *b,
358 struct iovec *vec, int *nvec)
364 const char *iov_base;
369 for (e = APR_BRIGADE_FIRST(b);
370 e != APR_BRIGADE_SENTINEL(b);
371 e = APR_BUCKET_NEXT(e))
376 rv = apr_bucket_read(e, &iov_base, &iov_len, APR_NONBLOCK_READ);
377 if (rv != APR_SUCCESS)
379 /* Set indirectly since types differ: */
380 vec->iov_len = iov_len;
381 vec->iov_base = (void *)iov_base;
385 *nvec = (int)(vec - orig);
389 APU_DECLARE(apr_status_t) apr_brigade_vputstrs(apr_bucket_brigade *b,
390 apr_brigade_flush flush,
395 struct iovec vec[MAX_VECS];
399 char *str = va_arg(va, char *);
405 vec[i].iov_base = str;
406 vec[i].iov_len = strlen(str);
410 rv = apr_brigade_writev(b, flush, ctx, vec, i);
411 if (rv != APR_SUCCESS)
417 return apr_brigade_writev(b, flush, ctx, vec, i);
422 APU_DECLARE(apr_status_t) apr_brigade_putc(apr_bucket_brigade *b,
423 apr_brigade_flush flush, void *ctx,
426 return apr_brigade_write(b, flush, ctx, &c, 1);
429 APU_DECLARE(apr_status_t) apr_brigade_write(apr_bucket_brigade *b,
430 apr_brigade_flush flush,
432 const char *str, apr_size_t nbyte)
434 apr_bucket *e = APR_BRIGADE_LAST(b);
435 apr_size_t remaining = APR_BUCKET_BUFF_SIZE;
439 * If the last bucket is a heap bucket and its buffer is not shared with
440 * another bucket, we may write into that bucket.
442 if (!APR_BRIGADE_EMPTY(b) && APR_BUCKET_IS_HEAP(e)
443 && ((apr_bucket_heap *)(e->data))->refcount.refcount == 1) {
444 apr_bucket_heap *h = e->data;
446 /* HEAP bucket start offsets are always in-memory, safe to cast */
447 remaining = h->alloc_len - (e->length + (apr_size_t)e->start);
448 buf = h->base + e->start + e->length;
451 if (nbyte > remaining) {
452 /* either a buffer bucket exists but is full,
453 * or no buffer bucket exists and the data is too big
454 * to buffer. In either case, we should flush. */
456 e = apr_bucket_transient_create(str, nbyte, b->bucket_alloc);
457 APR_BRIGADE_INSERT_TAIL(b, e);
458 return flush(b, ctx);
461 e = apr_bucket_heap_create(str, nbyte, NULL, b->bucket_alloc);
462 APR_BRIGADE_INSERT_TAIL(b, e);
467 /* we don't have a buffer, but the data is small enough
468 * that we don't mind making a new buffer */
469 buf = apr_bucket_alloc(APR_BUCKET_BUFF_SIZE, b->bucket_alloc);
470 e = apr_bucket_heap_create(buf, APR_BUCKET_BUFF_SIZE,
471 apr_bucket_free, b->bucket_alloc);
472 APR_BRIGADE_INSERT_TAIL(b, e);
473 e->length = 0; /* We are writing into the brigade, and
474 * allocating more memory than we need. This
475 * ensures that the bucket thinks it is empty just
476 * after we create it. We'll fix the length
477 * once we put data in it below.
481 /* there is a sufficiently big buffer bucket available now */
482 memcpy(buf, str, nbyte);
488 APU_DECLARE(apr_status_t) apr_brigade_writev(apr_bucket_brigade *b,
489 apr_brigade_flush flush,
491 const struct iovec *vec,
495 apr_size_t total_len;
499 /* Compute the total length of the data to be written.
502 for (i = 0; i < nvec; i++) {
503 total_len += vec[i].iov_len;
506 /* If the data to be written is very large, try to convert
507 * the iovec to transient buckets rather than copying.
509 if (total_len > APR_BUCKET_BUFF_SIZE) {
511 for (i = 0; i < nvec; i++) {
512 e = apr_bucket_transient_create(vec[i].iov_base,
515 APR_BRIGADE_INSERT_TAIL(b, e);
517 return flush(b, ctx);
520 for (i = 0; i < nvec; i++) {
521 e = apr_bucket_heap_create((const char *) vec[i].iov_base,
522 vec[i].iov_len, NULL,
524 APR_BRIGADE_INSERT_TAIL(b, e);
532 /* If there is a heap bucket at the end of the brigade
533 * already, and its refcount is 1, copy into the existing bucket.
535 e = APR_BRIGADE_LAST(b);
536 if (!APR_BRIGADE_EMPTY(b) && APR_BUCKET_IS_HEAP(e)
537 && ((apr_bucket_heap *)(e->data))->refcount.refcount == 1) {
538 apr_bucket_heap *h = e->data;
539 apr_size_t remaining = h->alloc_len -
540 (e->length + (apr_size_t)e->start);
541 buf = h->base + e->start + e->length;
543 if (remaining >= total_len) {
544 /* Simple case: all the data will fit in the
545 * existing heap bucket
547 for (; i < nvec; i++) {
548 apr_size_t len = vec[i].iov_len;
549 memcpy(buf, (const void *) vec[i].iov_base, len);
552 e->length += total_len;
556 /* More complicated case: not all of the data
557 * will fit in the existing heap bucket. The
558 * total data size is <= APR_BUCKET_BUFF_SIZE,
559 * so we'll need only one additional bucket.
561 const char *start_buf = buf;
562 for (; i < nvec; i++) {
563 apr_size_t len = vec[i].iov_len;
564 if (len > remaining) {
567 memcpy(buf, (const void *) vec[i].iov_base, len);
571 e->length += (buf - start_buf);
572 total_len -= (buf - start_buf);
575 apr_status_t rv = flush(b, ctx);
576 if (rv != APR_SUCCESS) {
581 /* Now fall through into the case below to
582 * allocate another heap bucket and copy the
583 * rest of the array. (Note that i is not
584 * reset to zero here; it holds the index
585 * of the first vector element to be
586 * written to the new bucket.)
591 /* Allocate a new heap bucket, and copy the data into it.
592 * The checks above ensure that the amount of data to be
593 * written here is no larger than APR_BUCKET_BUFF_SIZE.
595 buf = apr_bucket_alloc(APR_BUCKET_BUFF_SIZE, b->bucket_alloc);
596 e = apr_bucket_heap_create(buf, APR_BUCKET_BUFF_SIZE,
597 apr_bucket_free, b->bucket_alloc);
598 for (; i < nvec; i++) {
599 apr_size_t len = vec[i].iov_len;
600 memcpy(buf, (const void *) vec[i].iov_base, len);
603 e->length = total_len;
604 APR_BRIGADE_INSERT_TAIL(b, e);
609 APU_DECLARE(apr_status_t) apr_brigade_puts(apr_bucket_brigade *bb,
610 apr_brigade_flush flush, void *ctx,
613 return apr_brigade_write(bb, flush, ctx, str, strlen(str));
616 APU_DECLARE_NONSTD(apr_status_t) apr_brigade_putstrs(apr_bucket_brigade *b,
617 apr_brigade_flush flush,
624 rv = apr_brigade_vputstrs(b, flush, ctx, va);
629 APU_DECLARE_NONSTD(apr_status_t) apr_brigade_printf(apr_bucket_brigade *b,
630 apr_brigade_flush flush,
632 const char *fmt, ...)
638 rv = apr_brigade_vprintf(b, flush, ctx, fmt, ap);
643 struct brigade_vprintf_data_t {
644 apr_vformatter_buff_t vbuff;
646 apr_bucket_brigade *b; /* associated brigade */
647 apr_brigade_flush *flusher; /* flushing function */
650 char *cbuff; /* buffer to flush from */
653 static apr_status_t brigade_flush(apr_vformatter_buff_t *buff)
655 /* callback function passed to ap_vformatter to be
656 * called when vformatter needs to buff and
657 * buff.curpos > buff.endpos
660 /* "downcast," have really passed a brigade_vprintf_data_t* */
661 struct brigade_vprintf_data_t *vd = (struct brigade_vprintf_data_t*)buff;
662 apr_status_t res = APR_SUCCESS;
664 res = apr_brigade_write(vd->b, *vd->flusher, vd->ctx, vd->cbuff,
665 APR_BUCKET_BUFF_SIZE);
667 if(res != APR_SUCCESS) {
671 vd->vbuff.curpos = vd->cbuff;
672 vd->vbuff.endpos = vd->cbuff + APR_BUCKET_BUFF_SIZE;
677 APU_DECLARE(apr_status_t) apr_brigade_vprintf(apr_bucket_brigade *b,
678 apr_brigade_flush flush,
680 const char *fmt, va_list va)
682 /* the cast, in order of appearance */
683 struct brigade_vprintf_data_t vd;
684 char buf[APR_BUCKET_BUFF_SIZE];
687 vd.vbuff.curpos = buf;
688 vd.vbuff.endpos = buf + APR_BUCKET_BUFF_SIZE;
694 written = apr_vformatter(brigade_flush, &vd.vbuff, fmt, va);
700 /* write out what remains in the buffer */
701 return apr_brigade_write(b, flush, ctx, buf, vd.vbuff.curpos - buf);
704 /* A "safe" maximum bucket size, 1Gb */
705 #define MAX_BUCKET_SIZE (0x40000000)
707 APU_DECLARE(apr_bucket *) apr_brigade_insert_file(apr_bucket_brigade *bb,
715 if (sizeof(apr_off_t) == sizeof(apr_size_t) || length < MAX_BUCKET_SIZE) {
716 e = apr_bucket_file_create(f, start, (apr_size_t)length, p,
720 /* Several buckets are needed. */
721 e = apr_bucket_file_create(f, start, MAX_BUCKET_SIZE, p,
724 while (length > MAX_BUCKET_SIZE) {
726 apr_bucket_copy(e, &ce);
727 APR_BRIGADE_INSERT_TAIL(bb, ce);
728 e->start += MAX_BUCKET_SIZE;
729 length -= MAX_BUCKET_SIZE;
731 e->length = (apr_size_t)length; /* Resize just the last bucket */
734 APR_BRIGADE_INSERT_TAIL(bb, e);