Commit 2832d07b by Benjamin Kosnik Committed by Benjamin Kosnik

pool_allocator.h: Qualify __throw_bad_alloc.


2004-06-18  Benjamin Kosnik  <bkoz@redhat.com>

	* include/ext/pool_allocator.h: Qualify __throw_bad_alloc.
	(__pool_base): Remove unused template parameter.  Add
	protected. Move lock data into __pool_base::_Lock. Remove static
	on member functions.
	(__pool_base::_M_get_free_list): New.
	(__pool_alloc): Move _S_force new here.
	* src/allocator.cc: Move out of line __pool_base definitions here.
	* config/linker-map.gnu: Export bits from __pool_base.

From-SVN: r83355
parent 64e1e4c4
2004-06-18 Benjamin Kosnik <bkoz@redhat.com>
* include/ext/pool_allocator.h: Qualify __throw_bad_alloc.
(__pool_base): Remove unused template parameter. Add
protected. Move lock data into __pool_base::_Lock. Remove static
on member functions.
(__pool_base::_M_get_free_list): New.
(__pool_alloc): Move _S_force new here.
* src/allocator.cc: Move out of line __pool_base definitions here.
* config/linker-map.gnu: Export bits from __pool_base.
2004-06-18 Paolo Carlini <pcarlini@suse.de> 2004-06-18 Paolo Carlini <pcarlini@suse.de>
* config/locale/gnu/numeric_members.cc * config/locale/gnu/numeric_members.cc
......
...@@ -255,6 +255,13 @@ GLIBCXX_3.4.1 { ...@@ -255,6 +255,13 @@ GLIBCXX_3.4.1 {
} GLIBCXX_3.4; } GLIBCXX_3.4;
GLIBCXX_3.4.2 {
_ZN9__gnu_cxx11__pool_base5_Lock7_S_lockE;
_ZN9__gnu_cxx11__pool_base9_M_refillEj;
_ZN9__gnu_cxx11__pool_base16_M_get_free_listEj;
} GLIBCXX_3.4.1;
# Symbols in the support library (libsupc++) have their own tag. # Symbols in the support library (libsupc++) have their own tag.
CXXABI_1.3 { CXXABI_1.3 {
......
...@@ -55,8 +55,6 @@ ...@@ -55,8 +55,6 @@
namespace __gnu_cxx namespace __gnu_cxx
{ {
using std::__throw_bad_alloc;
/** /**
* @if maint * @if maint
* Uses various allocators to fulfill underlying requests (and makes as * Uses various allocators to fulfill underlying requests (and makes as
...@@ -71,71 +69,64 @@ namespace __gnu_cxx ...@@ -71,71 +69,64 @@ namespace __gnu_cxx
* information that we can return the object to the proper free list * information that we can return the object to the proper free list
* without permanently losing part of the object. * without permanently losing part of the object.
* *
* The template parameter specifies whether more than one thread may use
* this allocator. It is safe to allocate an object from one instance
* of the allocator and deallocate it with another one. This effectively
* transfers its ownership to the second one. This may have undesirable
* effects on reference locality.
*
* @endif * @endif
* (See @link Allocators allocators info @endlink for more.) * (See @link Allocators allocators info @endlink for more.)
*/ */
template<bool __threads> class __pool_base
struct __pool_base
{ {
protected:
enum { _S_align = 8 }; enum { _S_align = 8 };
enum { _S_max_bytes = 128 }; enum { _S_max_bytes = 128 };
enum { _S_freelists = _S_max_bytes / _S_align }; enum { _S_free_list_size = _S_max_bytes / _S_align };
// It would be nice to use _STL_auto_lock here. But we need a
// test whether threads are in use.
struct _Lock
{
static _STL_mutex_lock _S_lock;
_Lock() { _S_lock._M_acquire_lock(); }
~_Lock() { _S_lock._M_release_lock(); }
};
union _Obj union _Obj
{ {
union _Obj* _M_free_list_link; union _Obj* _M_free_list_link;
char _M_client_data[1]; // The client sees this. char _M_client_data[1]; // The client sees this.
}; };
static _Obj* volatile _S_free_list[_S_freelists]; static _Obj* volatile _S_free_list[_S_free_list_size];
// Chunk allocation state. // Chunk allocation state.
static char* _S_start_free; static char* _S_start_free;
static char* _S_end_free; static char* _S_end_free;
static size_t _S_heap_size; static size_t _S_heap_size;
static _STL_mutex_lock _S_lock;
static _Atomic_word _S_force_new;
static size_t size_t
_S_round_up(size_t __bytes) _M_round_up(size_t __bytes)
{ return ((__bytes + (size_t)_S_align - 1) & ~((size_t)_S_align - 1)); } { return ((__bytes + (size_t)_S_align - 1) & ~((size_t)_S_align - 1)); }
static size_t _Obj* volatile*
_S_freelist_index(size_t __bytes) _M_get_free_list(size_t __bytes);
{ return ((__bytes + (size_t)_S_align - 1) / (size_t)_S_align - 1); }
// Returns an object of size __n, and optionally adds to size __n // Returns an object of size __n, and optionally adds to size __n
// free list. // free list.
static void* void*
_S_refill(size_t __n); _M_refill(size_t __n);
// Allocates a chunk for nobjs of size size. nobjs may be reduced // Allocates a chunk for nobjs of size size. nobjs may be reduced
// if it is inconvenient to allocate the requested number. // if it is inconvenient to allocate the requested number.
static char* char*
_S_chunk_alloc(size_t __n, int& __nobjs); _M_allocate_chunk(size_t __n, int& __nobjs);
// It would be nice to use _STL_auto_lock here. But we need a
// test whether threads are in use.
struct _Lock
{
_Lock() { if (__threads) _S_lock._M_acquire_lock(); }
~_Lock() { if (__threads) _S_lock._M_release_lock(); }
} __attribute__ ((__unused__));
friend struct _Lock;
}; };
typedef __pool_base<true> __pool_alloc_base;
template<typename _Tp> template<typename _Tp>
class __pool_alloc : private __pool_alloc_base class __pool_alloc : private __pool_base
{ {
private:
static _Atomic_word _S_force_new;
public: public:
typedef size_t size_type; typedef size_t size_type;
typedef ptrdiff_t difference_type; typedef ptrdiff_t difference_type;
...@@ -194,116 +185,9 @@ namespace __gnu_cxx ...@@ -194,116 +185,9 @@ namespace __gnu_cxx
operator!=(const __pool_alloc<_Tp>&, const __pool_alloc<_Tp>&) operator!=(const __pool_alloc<_Tp>&, const __pool_alloc<_Tp>&)
{ return false; } { return false; }
// Allocate memory in large chunks in order to avoid fragmenting the template<typename _Tp>
// heap too much. Assume that __n is properly aligned. We hold _Atomic_word
// the allocation lock. __pool_alloc<_Tp>::_S_force_new;
template<bool __threads>
char*
__pool_base<__threads>::_S_chunk_alloc(size_t __n, int& __nobjs)
{
char* __result;
size_t __total_bytes = __n * __nobjs;
size_t __bytes_left = _S_end_free - _S_start_free;
if (__bytes_left >= __total_bytes)
{
__result = _S_start_free;
_S_start_free += __total_bytes;
return __result ;
}
else if (__bytes_left >= __n)
{
__nobjs = (int)(__bytes_left / __n);
__total_bytes = __n * __nobjs;
__result = _S_start_free;
_S_start_free += __total_bytes;
return __result;
}
else
{
size_t __bytes_to_get = (2 * __total_bytes
+ _S_round_up(_S_heap_size >> 4));
// Try to make use of the left-over piece.
if (__bytes_left > 0)
{
_Obj* volatile* __free_list = (_S_free_list
+ _S_freelist_index(__bytes_left));
((_Obj*)(void*)_S_start_free)->_M_free_list_link = *__free_list;
*__free_list = (_Obj*)(void*)_S_start_free;
}
_S_start_free = static_cast<char*>(::operator new(__bytes_to_get));
if (_S_start_free == 0)
{
size_t __i;
_Obj* volatile* __free_list;
_Obj* __p;
// Try to make do with what we have. That can't hurt. We
// do not try smaller requests, since that tends to result
// in disaster on multi-process machines.
__i = __n;
for (; __i <= (size_t) _S_max_bytes; __i += (size_t) _S_align)
{
__free_list = _S_free_list + _S_freelist_index(__i);
__p = *__free_list;
if (__p != 0)
{
*__free_list = __p -> _M_free_list_link;
_S_start_free = (char*)__p;
_S_end_free = _S_start_free + __i;
return _S_chunk_alloc(__n, __nobjs);
// Any leftover piece will eventually make it to the
// right free list.
}
}
_S_end_free = 0; // In case of exception.
_S_start_free = static_cast<char*>(::operator new(__bytes_to_get));
// This should either throw an exception or remedy the situation.
// Thus we assume it succeeded.
}
_S_heap_size += __bytes_to_get;
_S_end_free = _S_start_free + __bytes_to_get;
return _S_chunk_alloc(__n, __nobjs);
}
}
// Returns an object of size __n, and optionally adds to "size
// __n"'s free list. We assume that __n is properly aligned. We
// hold the allocation lock.
template<bool __threads>
void*
__pool_base<__threads>::_S_refill(size_t __n)
{
int __nobjs = 20;
char* __chunk = _S_chunk_alloc(__n, __nobjs);
_Obj* volatile* __free_list;
_Obj* __result;
_Obj* __current_obj;
_Obj* __next_obj;
int __i;
if (1 == __nobjs)
return __chunk;
__free_list = _S_free_list + _S_freelist_index(__n);
// Build free list in chunk.
__result = (_Obj*)(void*)__chunk;
*__free_list = __next_obj = (_Obj*)(void*)(__chunk + __n);
for (__i = 1; ; __i++)
{
__current_obj = __next_obj;
__next_obj = (_Obj*)(void*)((char*)__next_obj + __n);
if (__nobjs - 1 == __i)
{
__current_obj -> _M_free_list_link = 0;
break;
}
else
__current_obj -> _M_free_list_link = __next_obj;
}
return __result;
}
template<typename _Tp> template<typename _Tp>
_Tp* _Tp*
...@@ -314,7 +198,6 @@ namespace __gnu_cxx ...@@ -314,7 +198,6 @@ namespace __gnu_cxx
{ {
if (__n <= max_size()) if (__n <= max_size())
{ {
const size_t __bytes = __n * sizeof(_Tp);
// If there is a race through here, assume answer from getenv // If there is a race through here, assume answer from getenv
// will resolve in same direction. Inspired by techniques // will resolve in same direction. Inspired by techniques
// to efficiently support threading found in basic_string.h. // to efficiently support threading found in basic_string.h.
...@@ -325,31 +208,32 @@ namespace __gnu_cxx ...@@ -325,31 +208,32 @@ namespace __gnu_cxx
else else
__atomic_add(&_S_force_new, -1); __atomic_add(&_S_force_new, -1);
} }
if ((__bytes > (size_t) _S_max_bytes) || (_S_force_new > 0)) const size_t __bytes = __n * sizeof(_Tp);
if (__bytes > size_t(_S_max_bytes) || _S_force_new == 1)
__ret = static_cast<_Tp*>(::operator new(__bytes)); __ret = static_cast<_Tp*>(::operator new(__bytes));
else else
{ {
_Obj* volatile* __free_list = (_S_free_list _Obj* volatile* __free_list = _M_get_free_list(__bytes);
+ _S_freelist_index(__bytes));
// Acquire the lock here with a constructor call. This // Acquire the lock here with a constructor call. This
// ensures that it is released in exit or during stack // ensures that it is released in exit or during stack
// unwinding. // unwinding.
_Lock __lock_instance; _Lock __lock_instance;
_Obj* __restrict__ __result = *__free_list; _Obj* __restrict__ __result = *__free_list;
if (__builtin_expect(__result == 0, 0)) if (__builtin_expect(__result == 0, 0))
__ret = static_cast<_Tp*>(_S_refill(_S_round_up(__bytes))); __ret = static_cast<_Tp*>(_M_refill(_M_round_up(__bytes)));
else else
{ {
*__free_list = __result->_M_free_list_link; *__free_list = __result->_M_free_list_link;
__ret = reinterpret_cast<_Tp*>(__result); __ret = reinterpret_cast<_Tp*>(__result);
} }
if (__builtin_expect(__ret == 0, 0)) if (__builtin_expect(__ret == 0, 0))
__throw_bad_alloc(); std::__throw_bad_alloc();
} }
} }
else else
__throw_bad_alloc(); std::__throw_bad_alloc();
} }
return __ret; return __ret;
} }
...@@ -361,44 +245,22 @@ namespace __gnu_cxx ...@@ -361,44 +245,22 @@ namespace __gnu_cxx
if (__n) if (__n)
{ {
const size_t __bytes = __n * sizeof(_Tp); const size_t __bytes = __n * sizeof(_Tp);
if ((__bytes > (size_t) _S_max_bytes) || (_S_force_new > 0)) if (__bytes > static_cast<size_t>(_S_max_bytes) || _S_force_new == 1)
::operator delete(__p); ::operator delete(__p);
else else
{ {
_Obj* volatile* __free_list = (_S_free_list _Obj* volatile* __free_list = _M_get_free_list(__bytes);
+ _S_freelist_index(__bytes)); _Obj* __q = reinterpret_cast<_Obj*>(__p);
_Obj* __q = (_Obj*)__p;
// Acquire the lock here with a constructor call. This // Acquire the lock here with a constructor call. This
// ensures that it is released in exit or during stack // ensures that it is released in exit or during stack
// unwinding. // unwinding.
_Lock __lock_instance; _Lock __lock_instance;
__q -> _M_free_list_link = *__free_list; __q ->_M_free_list_link = *__free_list;
*__free_list = __q; *__free_list = __q;
} }
} }
} }
template<bool __threads>
typename __pool_base<__threads>::_Obj* volatile
__pool_base<__threads>::_S_free_list[_S_freelists];
template<bool __threads>
char* __pool_base<__threads>::_S_start_free = 0;
template<bool __threads>
char* __pool_base<__threads>::_S_end_free = 0;
template<bool __threads>
size_t __pool_base<__threads>::_S_heap_size = 0;
template<bool __threads>
_STL_mutex_lock
__pool_base<__threads>::_S_lock __STL_MUTEX_INITIALIZER;
template<bool __threads>
_Atomic_word
__pool_base<__threads>::_S_force_new = 0;
} // namespace __gnu_cxx } // namespace __gnu_cxx
#endif #endif
...@@ -36,16 +36,138 @@ ...@@ -36,16 +36,138 @@
#include <ext/mt_allocator.h> #include <ext/mt_allocator.h>
#include <ext/pool_allocator.h> #include <ext/pool_allocator.h>
// Explicitly instantiate the static data members of the underlying
// allocator.
namespace __gnu_cxx namespace __gnu_cxx
{ {
// Instantiations for __mt_alloc.
template class __mt_alloc<char>; template class __mt_alloc<char>;
template class __mt_alloc<wchar_t>; template class __mt_alloc<wchar_t>;
// Static members of __pool_alloc. // Definitions and instantiations for __pool_alloc and base class.
__pool_base::_Obj* volatile*
__pool_base::_M_get_free_list(size_t __bytes)
{
size_t __i = ((__bytes + (size_t)_S_align - 1) / (size_t)_S_align - 1);
return _S_free_list + __i - 1;
}
// Allocate memory in large chunks in order to avoid fragmenting the
// heap too much. Assume that __n is properly aligned. We hold the
// allocation lock.
char*
__pool_base::_M_allocate_chunk(size_t __n, int& __nobjs)
{
char* __result;
size_t __total_bytes = __n * __nobjs;
size_t __bytes_left = _S_end_free - _S_start_free;
if (__bytes_left >= __total_bytes)
{
__result = _S_start_free;
_S_start_free += __total_bytes;
return __result ;
}
else if (__bytes_left >= __n)
{
__nobjs = (int)(__bytes_left / __n);
__total_bytes = __n * __nobjs;
__result = _S_start_free;
_S_start_free += __total_bytes;
return __result;
}
else
{
// Try to make use of the left-over piece.
if (__bytes_left > 0)
{
_Obj* volatile* __free_list = _M_get_free_list(__bytes_left);
((_Obj*)(void*)_S_start_free)->_M_free_list_link = *__free_list;
*__free_list = (_Obj*)(void*)_S_start_free;
}
size_t __bytes_to_get = (2 * __total_bytes
+ _M_round_up(_S_heap_size >> 4));
_S_start_free = static_cast<char*>(::operator new(__bytes_to_get));
if (_S_start_free == 0)
{
size_t __i;
_Obj* volatile* __free_list;
_Obj* __p;
// Try to make do with what we have. That can't hurt. We
// do not try smaller requests, since that tends to result
// in disaster on multi-process machines.
__i = __n;
for (; __i <= (size_t) _S_max_bytes; __i += (size_t) _S_align)
{
__free_list = _M_get_free_list(__i);
__p = *__free_list;
if (__p != 0)
{
*__free_list = __p -> _M_free_list_link;
_S_start_free = (char*)__p;
_S_end_free = _S_start_free + __i;
return _M_allocate_chunk(__n, __nobjs);
// Any leftover piece will eventually make it to the
// right free list.
}
}
_S_end_free = 0; // In case of exception.
_S_start_free = static_cast<char*>(::operator new(__bytes_to_get));
// This should either throw an exception or remedy the situation.
// Thus we assume it succeeded.
}
_S_heap_size += __bytes_to_get;
_S_end_free = _S_start_free + __bytes_to_get;
return _M_allocate_chunk(__n, __nobjs);
}
}
// Returns an object of size __n, and optionally adds to "size
// __n"'s free list. We assume that __n is properly aligned. We
// hold the allocation lock.
void*
__pool_base::_M_refill(size_t __n)
{
int __nobjs = 20;
char* __chunk = _M_allocate_chunk(__n, __nobjs);
_Obj* volatile* __free_list;
_Obj* __result;
_Obj* __current_obj;
_Obj* __next_obj;
int __i;
if (1 == __nobjs)
return __chunk;
__free_list = _M_get_free_list(__n);
// Build free list in chunk.
__result = (_Obj*)(void*)__chunk;
*__free_list = __next_obj = (_Obj*)(void*)(__chunk + __n);
for (__i = 1; ; __i++)
{
__current_obj = __next_obj;
__next_obj = (_Obj*)(void*)((char*)__next_obj + __n);
if (__nobjs - 1 == __i)
{
__current_obj -> _M_free_list_link = 0;
break;
}
else
__current_obj -> _M_free_list_link = __next_obj;
}
return __result;
}
__pool_base::_Obj* volatile __pool_base::_S_free_list[_S_free_list_size];
char* __pool_base::_S_start_free = 0;
char* __pool_base::_S_end_free = 0;
size_t __pool_base::_S_heap_size = 0;
_STL_mutex_lock __pool_base::_Lock::_S_lock __STL_MUTEX_INITIALIZER;
template class __pool_alloc<char>; template class __pool_alloc<char>;
template class __pool_alloc<wchar_t>; template class __pool_alloc<wchar_t>;
template class __pool_base<true>;
} // namespace __gnu_cxx } // namespace __gnu_cxx
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment