1 // MT-optimized allocator -*- C++ -*-
3 // Copyright (C) 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 2, or (at your option)
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
16 // You should have received a copy of the GNU General Public License along
17 // with this library; see the file COPYING. If not, write to the Free
18 // Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
21 // As a special exception, you may use this file as part of a free software
22 // library without restriction. Specifically, if other files instantiate
23 // templates or use macros or inline functions from this file, or you compile
24 // this file and link it with other files to produce an executable, this
25 // file does not by itself cause the resulting executable to be covered by
26 // the GNU General Public License. This exception does not however
27 // invalidate any other reasons why the executable file might be covered by
28 // the GNU General Public License.
30 /** @file ext/mt_allocator.h
31 * This file is a GNU extension to the Standard C++ Library.
34 #ifndef _MT_ALLOCATOR_H
35 #define _MT_ALLOCATOR_H 1
39 #include <bits/functexcept.h>
40 #include <ext/atomicity.h>
42 _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
47 typedef void (*__destroy_handler)(void*);
49 /// @brief Base class for pool object.
52 // Using short int as type for the binmap implies we are never
53 // caching blocks larger than 32768 with this allocator.
54 typedef unsigned short int _Binmap_type;
56 // Variables used to configure the behavior of the allocator,
57 // assigned and explained in detail below.
60 // Compile time constants for the default _Tune values.
61 enum { _S_align = 8 };
62 enum { _S_max_bytes = 128 };
63 enum { _S_min_bin = 8 };
64 enum { _S_chunk_size = 4096 - 4 * sizeof(void*) };
65 enum { _S_max_threads = 4096 };
66 enum { _S_freelist_headroom = 10 };
69 // NB: In any case must be >= sizeof(_Block_record), that
70 // is 4 on 32 bit machines and 8 on 64 bit machines.
73 // Allocation requests (after round-up to power of 2) below
74 // this value will be handled by the allocator. A raw new/
75 // call will be used for requests larger than this value.
76 // NB: Must be much smaller than _M_chunk_size and in any
80 // Size in bytes of the smallest bin.
81 // NB: Must be a power of 2 and >= _M_align (and of course
82 // much smaller than _M_max_bytes).
85 // In order to avoid fragmenting and minimize the number of
86 // new() calls we always request new memory using this
87 // value. Based on previous discussions on the libstdc++
88 // mailing list we have choosen the value below.
89 // See http://gcc.gnu.org/ml/libstdc++/2001-07/msg00077.html
90 // NB: At least one order of magnitude > _M_max_bytes.
93 // The maximum number of supported threads. For
94 // single-threaded operation, use one. Maximum values will
95 // vary depending on details of the underlying system. (For
96 // instance, Linux 2.4.18 reports 4070 in
97 // /proc/sys/kernel/threads-max, while Linux 2.6.6 reports
99 size_t _M_max_threads;
101 // Each time a deallocation occurs in a threaded application
102 // we make sure that there are no more than
103 // _M_freelist_headroom % of used memory on the freelist. If
104 // the number of additional records is more than
105 // _M_freelist_headroom % of the freelist, we move these
106 // records back to the global pool.
107 size_t _M_freelist_headroom;
109 // Set to true forces all allocations to use new().
114 : _M_align(_S_align), _M_max_bytes(_S_max_bytes), _M_min_bin(_S_min_bin),
115 _M_chunk_size(_S_chunk_size), _M_max_threads(_S_max_threads),
116 _M_freelist_headroom(_S_freelist_headroom),
117 _M_force_new(std::getenv("GLIBCXX_FORCE_NEW") ? true : false)
121 _Tune(size_t __align, size_t __maxb, size_t __minbin, size_t __chunk,
122 size_t __maxthreads, size_t __headroom, bool __force)
123 : _M_align(__align), _M_max_bytes(__maxb), _M_min_bin(__minbin),
124 _M_chunk_size(__chunk), _M_max_threads(__maxthreads),
125 _M_freelist_headroom(__headroom), _M_force_new(__force)
129 struct _Block_address
132 _Block_address* _M_next;
136 _M_get_options() const
137 { return _M_options; }
140 _M_set_options(_Tune __t)
147 _M_check_threshold(size_t __bytes)
148 { return __bytes > _M_options._M_max_bytes || _M_options._M_force_new; }
151 _M_get_binmap(size_t __bytes)
152 { return _M_binmap[__bytes]; }
156 { return _M_options._M_align; }
160 : _M_options(_Tune()), _M_binmap(NULL), _M_init(false) { }
163 __pool_base(const _Tune& __options)
164 : _M_options(__options), _M_binmap(NULL), _M_init(false) { }
168 __pool_base(const __pool_base&);
171 operator=(const __pool_base&);
174 // Configuration options.
177 _Binmap_type* _M_binmap;
179 // Configuration of the pool object via _M_options can happen
180 // after construction but before initialization. After
181 // initialization is complete, this variable is set to true.
187 * @brief Data describing the underlying memory pool, parameterized on
190 template<bool _Thread>
193 /// Specialization for single thread.
195 class __pool<false> : public __pool_base
200 // Points to the block_record of the next free block.
201 _Block_record* _M_next;
206 // An "array" of pointers to the first free block.
207 _Block_record** _M_first;
209 // A list of the initial addresses of all allocated blocks.
210 _Block_address* _M_address;
216 if (__builtin_expect(_M_init == false, false))
221 _M_destroy() throw();
224 _M_reserve_block(size_t __bytes, const size_t __thread_id);
227 _M_reclaim_block(char* __p, size_t __bytes);
230 _M_get_thread_id() { return 0; }
233 _M_get_bin(size_t __which)
234 { return _M_bin[__which]; }
237 _M_adjust_freelist(const _Bin_record&, _Block_record*, size_t)
241 : _M_bin(NULL), _M_bin_size(1) { }
243 explicit __pool(const __pool_base::_Tune& __tune)
244 : __pool_base(__tune), _M_bin(NULL), _M_bin_size(1) { }
247 // An "array" of bin_records each of which represents a specific
248 // power of 2 size. Memory to this "array" is allocated in
252 // Actual value calculated in _M_initialize().
260 /// Specialization for thread enabled, via gthreads.h.
262 class __pool<true> : public __pool_base
265 // Each requesting thread is assigned an id ranging from 1 to
266 // _S_max_threads. Thread id 0 is used as a global memory pool.
267 // In order to get constant performance on the thread assignment
268 // routine, we keep a list of free ids. When a thread first
269 // requests memory we remove the first record in this list and
270 // stores the address in a __gthread_key. When initializing the
271 // __gthread_key we specify a destructor. When this destructor
272 // (i.e. the thread dies) is called, we return the thread id to
273 // the front of this list.
274 struct _Thread_record
276 // Points to next free thread id record. NULL if last record in list.
277 _Thread_record* _M_next;
279 // Thread id ranging from 1 to _S_max_threads.
285 // Points to the block_record of the next free block.
286 _Block_record* _M_next;
288 // The thread id of the thread which has requested this block.
294 // An "array" of pointers to the first free block for each
295 // thread id. Memory to this "array" is allocated in
296 // _S_initialize() for _S_max_threads + global pool 0.
297 _Block_record** _M_first;
299 // A list of the initial addresses of all allocated blocks.
300 _Block_address* _M_address;
302 // An "array" of counters used to keep track of the amount of
303 // blocks that are on the freelist/used for each thread id.
304 // - Note that the second part of the allocated _M_used "array"
305 // actually hosts (atomic) counters of reclaimed blocks: in
306 // _M_reserve_block and in _M_reclaim_block those numbers are
307 // subtracted from the first ones to obtain the actual size
308 // of the "working set" of the given thread.
309 // - Memory to these "arrays" is allocated in _S_initialize()
310 // for _S_max_threads + global pool 0.
314 // Each bin has its own mutex which is used to ensure data
315 // integrity while changing "ownership" on a block. The mutex
316 // is initialized in _S_initialize().
317 __gthread_mutex_t* _M_mutex;
320 // XXX GLIBCXX_ABI Deprecated
322 _M_initialize(__destroy_handler);
327 if (__builtin_expect(_M_init == false, false))
332 _M_destroy() throw();
335 _M_reserve_block(size_t __bytes, const size_t __thread_id);
338 _M_reclaim_block(char* __p, size_t __bytes);
341 _M_get_bin(size_t __which)
342 { return _M_bin[__which]; }
345 _M_adjust_freelist(const _Bin_record& __bin, _Block_record* __block_record,
348 if (__gthread_active_p())
350 __block_record->_M_thread_id = __thread_id;
351 --__bin._M_free[__thread_id];
352 ++__bin._M_used[__thread_id];
356 // XXX GLIBCXX_ABI Deprecated
358 _M_destroy_thread_key(void*);
364 : _M_bin(NULL), _M_bin_size(1), _M_thread_freelist(NULL)
367 explicit __pool(const __pool_base::_Tune& __tune)
368 : __pool_base(__tune), _M_bin(NULL), _M_bin_size(1),
369 _M_thread_freelist(NULL)
373 // An "array" of bin_records each of which represents a specific
374 // power of 2 size. Memory to this "array" is allocated in
378 // Actual value calculated in _M_initialize().
381 _Thread_record* _M_thread_freelist;
382 void* _M_thread_freelist_initial;
389 template<template <bool> class _PoolTp, bool _Thread>
392 typedef _PoolTp<_Thread> pool_type;
397 static pool_type _S_pool;
402 template<template <bool> class _PoolTp, bool _Thread>
403 struct __common_pool_base;
405 template<template <bool> class _PoolTp>
406 struct __common_pool_base<_PoolTp, false>
407 : public __common_pool<_PoolTp, false>
409 using __common_pool<_PoolTp, false>::_S_get_pool;
415 if (__builtin_expect(__init == false, false))
417 _S_get_pool()._M_initialize_once();
424 template<template <bool> class _PoolTp>
425 struct __common_pool_base<_PoolTp, true>
426 : public __common_pool<_PoolTp, true>
428 using __common_pool<_PoolTp, true>::_S_get_pool;
432 { _S_get_pool()._M_initialize_once(); }
438 if (__builtin_expect(__init == false, false))
440 if (__gthread_active_p())
442 // On some platforms, __gthread_once_t is an aggregate.
443 static __gthread_once_t __once = __GTHREAD_ONCE_INIT;
444 __gthread_once(&__once, _S_initialize);
447 // Double check initialization. May be necessary on some
448 // systems for proper construction when not compiling with
450 _S_get_pool()._M_initialize_once();
457 /// @brief Policy for shared __pool objects.
458 template<template <bool> class _PoolTp, bool _Thread>
459 struct __common_pool_policy : public __common_pool_base<_PoolTp, _Thread>
461 template<typename _Tp1, template <bool> class _PoolTp1 = _PoolTp,
462 bool _Thread1 = _Thread>
464 { typedef __common_pool_policy<_PoolTp1, _Thread1> other; };
466 using __common_pool_base<_PoolTp, _Thread>::_S_get_pool;
467 using __common_pool_base<_PoolTp, _Thread>::_S_initialize_once;
471 template<typename _Tp, template <bool> class _PoolTp, bool _Thread>
472 struct __per_type_pool
474 typedef _Tp value_type;
475 typedef _PoolTp<_Thread> pool_type;
480 // Sane defaults for the _PoolTp.
481 typedef typename pool_type::_Block_record _Block_record;
482 const static size_t __a = (__alignof__(_Tp) >= sizeof(_Block_record)
483 ? __alignof__(_Tp) : sizeof(_Block_record));
485 typedef typename __pool_base::_Tune _Tune;
486 static _Tune _S_tune(__a, sizeof(_Tp) * 64,
487 sizeof(_Tp) * 2 >= __a ? sizeof(_Tp) * 2 : __a,
488 sizeof(_Tp) * size_t(_Tune::_S_chunk_size),
489 _Tune::_S_max_threads,
490 _Tune::_S_freelist_headroom,
491 std::getenv("GLIBCXX_FORCE_NEW") ? true : false);
492 static pool_type _S_pool(_S_tune);
497 template<typename _Tp, template <bool> class _PoolTp, bool _Thread>
498 struct __per_type_pool_base;
500 template<typename _Tp, template <bool> class _PoolTp>
501 struct __per_type_pool_base<_Tp, _PoolTp, false>
502 : public __per_type_pool<_Tp, _PoolTp, false>
504 using __per_type_pool<_Tp, _PoolTp, false>::_S_get_pool;
510 if (__builtin_expect(__init == false, false))
512 _S_get_pool()._M_initialize_once();
519 template<typename _Tp, template <bool> class _PoolTp>
520 struct __per_type_pool_base<_Tp, _PoolTp, true>
521 : public __per_type_pool<_Tp, _PoolTp, true>
523 using __per_type_pool<_Tp, _PoolTp, true>::_S_get_pool;
527 { _S_get_pool()._M_initialize_once(); }
533 if (__builtin_expect(__init == false, false))
535 if (__gthread_active_p())
537 // On some platforms, __gthread_once_t is an aggregate.
538 static __gthread_once_t __once = __GTHREAD_ONCE_INIT;
539 __gthread_once(&__once, _S_initialize);
542 // Double check initialization. May be necessary on some
543 // systems for proper construction when not compiling with
545 _S_get_pool()._M_initialize_once();
552 /// @brief Policy for individual __pool objects.
553 template<typename _Tp, template <bool> class _PoolTp, bool _Thread>
554 struct __per_type_pool_policy
555 : public __per_type_pool_base<_Tp, _PoolTp, _Thread>
557 template<typename _Tp1, template <bool> class _PoolTp1 = _PoolTp,
558 bool _Thread1 = _Thread>
560 { typedef __per_type_pool_policy<_Tp1, _PoolTp1, _Thread1> other; };
562 using __per_type_pool_base<_Tp, _PoolTp, _Thread>::_S_get_pool;
563 using __per_type_pool_base<_Tp, _PoolTp, _Thread>::_S_initialize_once;
567 /// @brief Base class for _Tp dependent member functions.
568 template<typename _Tp>
569 class __mt_alloc_base
572 typedef size_t size_type;
573 typedef ptrdiff_t difference_type;
574 typedef _Tp* pointer;
575 typedef const _Tp* const_pointer;
576 typedef _Tp& reference;
577 typedef const _Tp& const_reference;
578 typedef _Tp value_type;
581 address(reference __x) const
585 address(const_reference __x) const
589 max_size() const throw()
590 { return size_t(-1) / sizeof(_Tp); }
592 // _GLIBCXX_RESOLVE_LIB_DEFECTS
593 // 402. wrong new expression in [some_] allocator::construct
595 construct(pointer __p, const _Tp& __val)
596 { ::new(__p) _Tp(__val); }
599 destroy(pointer __p) { __p->~_Tp(); }
603 #define __thread_default true
605 #define __thread_default false
609 * @brief This is a fixed size (power of 2) allocator which - when
610 * compiled with thread support - will maintain one freelist per
611 * size per thread plus a "global" one. Steps are taken to limit
612 * the per thread freelist sizes (by returning excess back to
613 * the "global" list).
616 * http://gcc.gnu.org/onlinedocs/libstdc++/ext/mt_allocator.html
618 template<typename _Tp,
619 typename _Poolp = __common_pool_policy<__pool, __thread_default> >
620 class __mt_alloc : public __mt_alloc_base<_Tp>
623 typedef size_t size_type;
624 typedef ptrdiff_t difference_type;
625 typedef _Tp* pointer;
626 typedef const _Tp* const_pointer;
627 typedef _Tp& reference;
628 typedef const _Tp& const_reference;
629 typedef _Tp value_type;
630 typedef _Poolp __policy_type;
631 typedef typename _Poolp::pool_type __pool_type;
633 template<typename _Tp1, typename _Poolp1 = _Poolp>
636 typedef typename _Poolp1::template _M_rebind<_Tp1>::other pol_type;
637 typedef __mt_alloc<_Tp1, pol_type> other;
640 __mt_alloc() throw() { }
642 __mt_alloc(const __mt_alloc&) throw() { }
644 template<typename _Tp1, typename _Poolp1>
645 __mt_alloc(const __mt_alloc<_Tp1, _Poolp1>&) throw() { }
647 ~__mt_alloc() throw() { }
650 allocate(size_type __n, const void* = 0);
653 deallocate(pointer __p, size_type __n);
655 const __pool_base::_Tune
658 // Return a copy, not a reference, for external consumption.
659 return __policy_type::_S_get_pool()._M_get_options();
663 _M_set_options(__pool_base::_Tune __t)
664 { __policy_type::_S_get_pool()._M_set_options(__t); }
667 template<typename _Tp, typename _Poolp>
668 typename __mt_alloc<_Tp, _Poolp>::pointer
669 __mt_alloc<_Tp, _Poolp>::
670 allocate(size_type __n, const void*)
672 if (__builtin_expect(__n > this->max_size(), false))
673 std::__throw_bad_alloc();
675 __policy_type::_S_initialize_once();
677 // Requests larger than _M_max_bytes are handled by operator
678 // new/delete directly.
679 __pool_type& __pool = __policy_type::_S_get_pool();
680 const size_t __bytes = __n * sizeof(_Tp);
681 if (__pool._M_check_threshold(__bytes))
683 void* __ret = ::operator new(__bytes);
684 return static_cast<_Tp*>(__ret);
687 // Round up to power of 2 and figure out which bin to use.
688 const size_t __which = __pool._M_get_binmap(__bytes);
689 const size_t __thread_id = __pool._M_get_thread_id();
691 // Find out if we have blocks on our freelist. If so, go ahead
692 // and use them directly without having to lock anything.
694 typedef typename __pool_type::_Bin_record _Bin_record;
695 const _Bin_record& __bin = __pool._M_get_bin(__which);
696 if (__bin._M_first[__thread_id])
699 typedef typename __pool_type::_Block_record _Block_record;
700 _Block_record* __block_record = __bin._M_first[__thread_id];
701 __bin._M_first[__thread_id] = __block_record->_M_next;
703 __pool._M_adjust_freelist(__bin, __block_record, __thread_id);
704 __c = reinterpret_cast<char*>(__block_record) + __pool._M_get_align();
709 __c = __pool._M_reserve_block(__bytes, __thread_id);
711 return static_cast<_Tp*>(static_cast<void*>(__c));
714 template<typename _Tp, typename _Poolp>
716 __mt_alloc<_Tp, _Poolp>::
717 deallocate(pointer __p, size_type __n)
719 if (__builtin_expect(__p != 0, true))
721 // Requests larger than _M_max_bytes are handled by
722 // operators new/delete directly.
723 __pool_type& __pool = __policy_type::_S_get_pool();
724 const size_t __bytes = __n * sizeof(_Tp);
725 if (__pool._M_check_threshold(__bytes))
726 ::operator delete(__p);
728 __pool._M_reclaim_block(reinterpret_cast<char*>(__p), __bytes);
732 template<typename _Tp, typename _Poolp>
734 operator==(const __mt_alloc<_Tp, _Poolp>&, const __mt_alloc<_Tp, _Poolp>&)
737 template<typename _Tp, typename _Poolp>
739 operator!=(const __mt_alloc<_Tp, _Poolp>&, const __mt_alloc<_Tp, _Poolp>&)
742 #undef __thread_default
744 _GLIBCXX_END_NAMESPACE