Commit ce7df2fd by Benjamin Kosnik

14176.cc: New.


2004-03-12  Benjamin Kosnik  <bkoz@redhat.com>

	* testsuite/20_util/allocator/14176.cc: New.
	* include/ext/mt_allocator.h: Formatting fixes.

From-SVN: r79407
parent 4319ac92
2004-03-12 Benjamin Kosnik <bkoz@redhat.com>
* testsuite/20_util/allocator/14176.cc: New.
* include/ext/mt_allocator.h: Formatting fixes.
2004-03-11 Dhruv Matani <dhruvbird@HotPOP.com> 2004-03-11 Dhruv Matani <dhruvbird@HotPOP.com>
* include/Makefile.am (ext_headers): Add * include/Makefile.am (ext_headers): Add
...@@ -5,7 +10,8 @@ ...@@ -5,7 +10,8 @@
* include/Makefile.in: Regenerate. * include/Makefile.in: Regenerate.
* docs/html/ext/ballocator_doc.txt: New file. * docs/html/ext/ballocator_doc.txt: New file.
* include/ext/bitmap_allocator.h: New file. * include/ext/bitmap_allocator.h: New file.
* testsuite/performance/20_util/allocator/list_sort_search.cc: New test. * testsuite/performance/20_util/allocator/list_sort_search.cc: Add
test.
* testsuite/performance/20_util/allocator/map_mt_find.cc: Likewise. * testsuite/performance/20_util/allocator/map_mt_find.cc: Likewise.
* testsuite/performance/20_util/allocator/producer_consumer.cc: Add * testsuite/performance/20_util/allocator/producer_consumer.cc: Add
test for the bitmap_allocator<>. test for the bitmap_allocator<>.
......
...@@ -216,10 +216,10 @@ namespace __gnu_cxx ...@@ -216,10 +216,10 @@ namespace __gnu_cxx
static void static void
_S_destroy_thread_key(void* freelist_pos); _S_destroy_thread_key(void* freelist_pos);
#endif
static size_t static size_t
_S_get_thread_id(); _S_get_thread_id();
#endif
struct block_record struct block_record
{ {
...@@ -293,18 +293,14 @@ namespace __gnu_cxx ...@@ -293,18 +293,14 @@ namespace __gnu_cxx
} }
// Round up to power of 2 and figure out which bin to use. // Round up to power of 2 and figure out which bin to use.
size_t bin = _S_binmap[__bytes]; const size_t __which = _S_binmap[__bytes];
const size_t __thread_id = _S_get_thread_id();
#ifdef __GTHREADS
size_t thread_id = _S_get_thread_id();
#else
size_t thread_id = 0;
#endif
// Find out if we have blocks on our freelist. If so, go ahead // Find out if we have blocks on our freelist. If so, go ahead
// and use them directly without having to lock anything. // and use them directly without having to lock anything.
const bin_record& __bin = _S_bin[__which];
block_record* block = NULL; block_record* block = NULL;
if (_S_bin[bin].first[thread_id] == NULL) if (__bin.first[__thread_id] == NULL)
{ {
// Are we using threads? // Are we using threads?
// - Yes, check if there are free blocks on the global // - Yes, check if there are free blocks on the global
...@@ -319,124 +315,115 @@ namespace __gnu_cxx ...@@ -319,124 +315,115 @@ namespace __gnu_cxx
#ifdef __GTHREADS #ifdef __GTHREADS
if (__gthread_active_p()) if (__gthread_active_p())
{ {
size_t bin_t = 1 << bin; const size_t bin_size = (1 << __which) + sizeof(block_record);
size_t block_count = size_t block_count = _S_options._M_chunk_size / bin_size;
_S_options._M_chunk_size /(bin_t + sizeof(block_record));
__gthread_mutex_lock(_S_bin[bin].mutex); __gthread_mutex_lock(__bin.mutex);
if (__bin.first[0] == NULL)
if (_S_bin[bin].first[0] == NULL)
{ {
// No need to hold the lock when we are adding a // No need to hold the lock when we are adding a
// whole chunk to our own list. // whole chunk to our own list.
__gthread_mutex_unlock(_S_bin[bin].mutex); __gthread_mutex_unlock(__bin.mutex);
_S_bin[bin].first[thread_id] =
static_cast<block_record*>(::operator new(_S_options._M_chunk_size));
if (!_S_bin[bin].first[thread_id])
std::__throw_bad_alloc();
_S_bin[bin].free[thread_id] = block_count; void* v = ::operator new(_S_options._M_chunk_size);
__bin.first[__thread_id] = static_cast<block_record*>(v);
__bin.free[__thread_id] = block_count;
block_count--; block_count--;
block = _S_bin[bin].first[thread_id]; block = __bin.first[__thread_id];
while (block_count > 0) while (block_count > 0)
{ {
block->next = (block_record*)((char*)block + char* c = reinterpret_cast<char*>(block) + bin_size;
(bin_t + sizeof(block_record))); block->next = reinterpret_cast<block_record*>(c);
block->thread_id = thread_id; block->thread_id = __thread_id;
block = block->next; block = block->next;
block_count--; block_count--;
} }
block->next = NULL; block->next = NULL;
block->thread_id = thread_id; block->thread_id = __thread_id;
} }
else else
{ {
size_t global_count = 0; size_t global_count = 0;
block_record* tmp; block_record* tmp;
while (_S_bin[bin].first[0] != NULL while (__bin.first[0] != NULL && global_count < block_count)
&& global_count < block_count)
{ {
tmp = _S_bin[bin].first[0]->next; tmp = __bin.first[0]->next;
block = _S_bin[bin].first[0]; block = __bin.first[0];
if (_S_bin[bin].first[thread_id] == NULL) if (__bin.first[__thread_id] == NULL)
{ {
_S_bin[bin].first[thread_id] = block; __bin.first[__thread_id] = block;
block->next = NULL; block->next = NULL;
} }
else else
{ {
block->next = _S_bin[bin].first[thread_id]; block->next = __bin.first[__thread_id];
_S_bin[bin].first[thread_id] = block; __bin.first[__thread_id] = block;
} }
block->thread_id = thread_id; block->thread_id = __thread_id;
_S_bin[bin].free[thread_id]++; __bin.free[__thread_id]++;
_S_bin[bin].first[0] = tmp; __bin.first[0] = tmp;
global_count++; global_count++;
} }
__gthread_mutex_unlock(_S_bin[bin].mutex); __gthread_mutex_unlock(__bin.mutex);
} }
// Return the first newly added block in our list and // Return the first newly added block in our list and
// update the counters // update the counters
block = _S_bin[bin].first[thread_id]; block = __bin.first[__thread_id];
_S_bin[bin].first[thread_id] = __bin.first[__thread_id] = __bin.first[__thread_id]->next;
_S_bin[bin].first[thread_id]->next; __bin.free[__thread_id]--;
_S_bin[bin].free[thread_id]--; __bin.used[__thread_id]++;
_S_bin[bin].used[thread_id]++;
} }
else else
#endif #endif
{ {
_S_bin[bin].first[0] = void* __v = ::operator new(_S_options._M_chunk_size);
static_cast<block_record*>(::operator new(_S_options._M_chunk_size)); __bin.first[0] = static_cast<block_record*>(__v);
size_t bin_t = 1 << bin; const size_t bin_size = (1 << __which) + sizeof(block_record);
size_t block_count = size_t block_count = _S_options._M_chunk_size / bin_size;
_S_options._M_chunk_size / (bin_t + sizeof(block_record));
block_count--; block_count--;
block = _S_bin[bin].first[0]; block = __bin.first[0];
while (block_count > 0) while (block_count > 0)
{ {
block->next = (block_record*)((char*)block + char* __c = reinterpret_cast<char*>(block) + bin_size;
(bin_t + sizeof(block_record))); block->next = reinterpret_cast<block_record*>(__c);
block = block->next; block = block->next;
block_count--; block_count--;
} }
block->next = NULL; block->next = NULL;
block = _S_bin[bin].first[0];
// Remove from list. // Remove from list.
_S_bin[bin].first[0] = _S_bin[bin].first[0]->next; block = __bin.first[0];
__bin.first[0] = __bin.first[0]->next;
} }
} }
else else
{ {
// "Default" operation - we have blocks on our own // "Default" operation - we have blocks on our own freelist
// freelist grab the first record and update the counters. // grab the first record and update the counters.
block = _S_bin[bin].first[thread_id]; block = __bin.first[__thread_id];
__bin.first[__thread_id] = __bin.first[__thread_id]->next;
_S_bin[bin].first[thread_id] = _S_bin[bin].first[thread_id]->next;
#ifdef __GTHREADS #ifdef __GTHREADS
if (__gthread_active_p()) if (__gthread_active_p())
{ {
_S_bin[bin].free[thread_id]--; __bin.free[__thread_id]--;
_S_bin[bin].used[thread_id]++; __bin.used[__thread_id]++;
} }
#endif #endif
} }
return static_cast<_Tp*>(static_cast<void*>((char*)block + char* __c = reinterpret_cast<char*>(block) + sizeof(block_record);
sizeof(block_record))); return static_cast<_Tp*>(static_cast<void*>(__c));
} }
template<typename _Tp> template<typename _Tp>
void void
__mt_alloc<_Tp>:: __mt_alloc<_Tp>::
...@@ -444,83 +431,79 @@ namespace __gnu_cxx ...@@ -444,83 +431,79 @@ namespace __gnu_cxx
{ {
// Requests larger than _M_max_bytes are handled by operators // Requests larger than _M_max_bytes are handled by operators
// new/delete directly. // new/delete directly.
if (__n * sizeof(_Tp) > _S_options._M_max_bytes const size_t __bytes = __n * sizeof(_Tp);
|| _S_options._M_force_new) if (__bytes > _S_options._M_max_bytes || _S_options._M_force_new)
{ {
::operator delete(__p); ::operator delete(__p);
return; return;
} }
// Round up to power of 2 and figure out which bin to use. // Round up to power of 2 and figure out which bin to use.
size_t bin = _S_binmap[__n * sizeof(_Tp)]; const size_t __which = _S_binmap[__bytes];
const size_t thread_id = _S_get_thread_id();
#ifdef __GTHREADS const bin_record& __bin = _S_bin[__which];
size_t thread_id = _S_get_thread_id();
#else char* __c = reinterpret_cast<char*>(__p) - sizeof(block_record);
size_t thread_id = 0; block_record* block = reinterpret_cast<block_record*>(__c);
#endif
block_record* block = (block_record*)((char*)__p
- sizeof(block_record));
#ifdef __GTHREADS #ifdef __GTHREADS
if (__gthread_active_p()) if (__gthread_active_p())
{ {
// Calculate the number of records to remove from our freelist. // Calculate the number of records to remove from our freelist.
int remove = _S_bin[bin].free[thread_id] - int remove = __bin.free[thread_id] -
(_S_bin[bin].used[thread_id] / _S_options._M_freelist_headroom); (__bin.used[thread_id] / _S_options._M_freelist_headroom);
// The calculation above will almost always tell us to // The calculation above will almost always tell us to
// remove one or two records at a time, but this creates too // remove one or two records at a time, but this creates too
// much contention when locking and therefore we wait until // much contention when locking and therefore we wait until
// the number of records is "high enough". // the number of records is "high enough".
if (remove > (int)(100 * (_S_bin_size - bin)) && int __cond1 = static_cast<int>(100 * (_S_bin_size - __which));
remove > (int)(_S_bin[bin].free[thread_id] / int __cond2 = static_cast<int>(__bin.free[thread_id] / _S_options._M_freelist_headroom);
_S_options._M_freelist_headroom)) if (remove > __cond1 && remove > __cond2)
{ {
__gthread_mutex_lock(_S_bin[bin].mutex); __gthread_mutex_lock(__bin.mutex);
block_record* tmp; block_record* tmp;
while (remove > 0) while (remove > 0)
{ {
tmp = _S_bin[bin].first[thread_id]->next; tmp = __bin.first[thread_id]->next;
if (_S_bin[bin].first[0] == NULL) if (__bin.first[0] == NULL)
{ {
_S_bin[bin].first[0] = _S_bin[bin].first[thread_id]; __bin.first[0] = __bin.first[thread_id];
_S_bin[bin].first[0]->next = NULL; __bin.first[0]->next = NULL;
} }
else else
{ {
_S_bin[bin].first[thread_id]->next = _S_bin[bin].first[0]; __bin.first[thread_id]->next = __bin.first[0];
_S_bin[bin].first[0] = _S_bin[bin].first[thread_id]; __bin.first[0] = __bin.first[thread_id];
} }
_S_bin[bin].first[thread_id] = tmp; __bin.first[thread_id] = tmp;
_S_bin[bin].free[thread_id]--; __bin.free[thread_id]--;
remove--; remove--;
} }
__gthread_mutex_unlock(_S_bin[bin].mutex); __gthread_mutex_unlock(__bin.mutex);
} }
// Return this block to our list and update counters and // Return this block to our list and update counters and
// owner id as needed. // owner id as needed.
if (_S_bin[bin].first[thread_id] == NULL) if (__bin.first[thread_id] == NULL)
{ {
_S_bin[bin].first[thread_id] = block; __bin.first[thread_id] = block;
block->next = NULL; block->next = NULL;
} }
else else
{ {
block->next = _S_bin[bin].first[thread_id]; block->next = __bin.first[thread_id];
_S_bin[bin].first[thread_id] = block; __bin.first[thread_id] = block;
} }
_S_bin[bin].free[thread_id]++; __bin.free[thread_id]++;
if (thread_id == block->thread_id) if (thread_id == block->thread_id)
_S_bin[bin].used[thread_id]--; __bin.used[thread_id]--;
else else
{ {
_S_bin[bin].used[block->thread_id]--; __bin.used[block->thread_id]--;
block->thread_id = thread_id; block->thread_id = thread_id;
} }
} }
...@@ -528,15 +511,15 @@ namespace __gnu_cxx ...@@ -528,15 +511,15 @@ namespace __gnu_cxx
#endif #endif
{ {
// Single threaded application - return to global pool. // Single threaded application - return to global pool.
if (_S_bin[bin].first[0] == NULL) if (__bin.first[0] == NULL)
{ {
_S_bin[bin].first[0] = block; __bin.first[0] = block;
block->next = NULL; block->next = NULL;
} }
else else
{ {
block->next = _S_bin[bin].first[0]; block->next = __bin.first[0];
_S_bin[bin].first[0] = block; __bin.first[0] = block;
} }
} }
} }
...@@ -551,52 +534,54 @@ namespace __gnu_cxx ...@@ -551,52 +534,54 @@ namespace __gnu_cxx
// Calculate the number of bins required based on _M_max_bytes. // Calculate the number of bins required based on _M_max_bytes.
// _S_bin_size is statically-initialized to one. // _S_bin_size is statically-initialized to one.
size_t bin_size = 1; size_t __bin_size = 1;
while (_S_options._M_max_bytes > bin_size) while (_S_options._M_max_bytes > __bin_size)
{ {
bin_size = bin_size << 1; __bin_size = __bin_size << 1;
_S_bin_size++; _S_bin_size++;
} }
// Setup the bin map for quick lookup of the relevant bin. // Setup the bin map for quick lookup of the relevant bin.
const size_t n1 = (_S_options._M_max_bytes + 1) * sizeof(binmap_type); const size_t __j = (_S_options._M_max_bytes + 1) * sizeof(binmap_type);
_S_binmap = static_cast<binmap_type*>(::operator new(n1)); _S_binmap = static_cast<binmap_type*>(::operator new(__j));
binmap_type* bp_t = _S_binmap; binmap_type* __bp = _S_binmap;
binmap_type bin_max_t = 1; binmap_type __bin_max = 1;
binmap_type bin_t = 0; binmap_type __bint = 0;
for (binmap_type ct = 0; ct <= _S_options._M_max_bytes; ct++) for (binmap_type __ct = 0; __ct <= _S_options._M_max_bytes; __ct++)
{ {
if (ct > bin_max_t) if (__ct > __bin_max)
{ {
bin_max_t <<= 1; __bin_max <<= 1;
bin_t++; __bint++;
} }
*bp_t++ = bin_t; *__bp++ = __bint;
} }
// If __gthread_active_p() create and initialize the list of // If __gthread_active_p() create and initialize the list of
// free thread ids. Single threaded applications use thread id 0 // free thread ids. Single threaded applications use thread id 0
// directly and have no need for this. // directly and have no need for this.
void* __v;
#ifdef __GTHREADS #ifdef __GTHREADS
if (__gthread_active_p()) if (__gthread_active_p())
{ {
const size_t n2 = sizeof(thread_record) * _S_options._M_max_threads; const size_t __k = sizeof(thread_record) * _S_options._M_max_threads;
_S_thread_freelist_first = static_cast<thread_record*>(::operator new(n2)); __v = ::operator new(__k);
_S_thread_freelist_first = static_cast<thread_record*>(__v);
// NOTE! The first assignable thread id is 1 since the // NOTE! The first assignable thread id is 1 since the
// global pool uses id 0 // global pool uses id 0
size_t i; size_t __i;
for (i = 1; i < _S_options._M_max_threads; i++) for (__i = 1; __i < _S_options._M_max_threads; __i++)
{ {
thread_record& tr = _S_thread_freelist_first[i - 1]; thread_record& __tr = _S_thread_freelist_first[__i - 1];
tr.next = &_S_thread_freelist_first[i]; __tr.next = &_S_thread_freelist_first[__i];
tr.id = i; __tr.id = __i;
} }
// Set last record. // Set last record.
_S_thread_freelist_first[i - 1].next = NULL; _S_thread_freelist_first[__i - 1].next = NULL;
_S_thread_freelist_first[i - 1].id = i; _S_thread_freelist_first[__i - 1].id = __i;
// Make sure this is initialized. // Make sure this is initialized.
...@@ -610,50 +595,54 @@ namespace __gnu_cxx ...@@ -610,50 +595,54 @@ namespace __gnu_cxx
#endif #endif
// Initialize _S_bin and its members. // Initialize _S_bin and its members.
_S_bin = static_cast<bin_record*>(::operator __v = ::operator new(sizeof(bin_record) * _S_bin_size);
new(sizeof(bin_record) * _S_bin_size)); _S_bin = static_cast<bin_record*>(__v);
// Maximum number of threads. // Maximum number of threads.
size_t __n = 1; size_t __max_threads = 1;
#ifdef __GTHREADS #ifdef __GTHREADS
if (__gthread_active_p()) if (__gthread_active_p())
__n = _S_options._M_max_threads + 1; __max_threads = _S_options._M_max_threads + 1;
#endif #endif
for (size_t bin = 0; bin < _S_bin_size; bin++) for (size_t __n = 0; __n < _S_bin_size; __n++)
{ {
bin_record& br = _S_bin[bin]; bin_record& __bin = _S_bin[__n];
br.first = static_cast<block_record**>(::operator new(sizeof(block_record*) * __n)); __v = ::operator new(sizeof(block_record*) * __max_threads);
__bin.first = static_cast<block_record**>(__v);
#ifdef __GTHREADS #ifdef __GTHREADS
if (__gthread_active_p()) if (__gthread_active_p())
{ {
br.free = static_cast<size_t*>(::operator new(sizeof(size_t) __v = ::operator new(sizeof(size_t) * __max_threads);
* __n)); __bin.free = static_cast<size_t*>(__v);
br.used = static_cast<size_t*>(::operator new(sizeof(size_t)
* __n)); __v = ::operator new(sizeof(size_t) * __max_threads);
br.mutex = static_cast<__gthread_mutex_t*>(::operator new(sizeof(__gthread_mutex_t))); __bin.used = static_cast<size_t*>(__v);
__v = ::operator new(sizeof(__gthread_mutex_t));
__bin.mutex = static_cast<__gthread_mutex_t*>(__v);
#ifdef __GTHREAD_MUTEX_INIT #ifdef __GTHREAD_MUTEX_INIT
{ {
// Do not copy a POSIX/gthr mutex once in use. // Do not copy a POSIX/gthr mutex once in use.
__gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT; __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT;
*br.mutex = __tmp; *__bin.mutex = __tmp;
} }
#else #else
{ __GTHREAD_MUTEX_INIT_FUNCTION(br.mutex); } { __GTHREAD_MUTEX_INIT_FUNCTION(__bin.mutex); }
#endif #endif
} }
#endif #endif
for (size_t thread = 0; thread < __n; thread++) for (size_t __threadn = 0; __threadn < __max_threads; __threadn++)
{ {
br.first[thread] = NULL; __bin.first[__threadn] = NULL;
#ifdef __GTHREADS #ifdef __GTHREADS
if (__gthread_active_p()) if (__gthread_active_p())
{ {
br.free[thread] = 0; __bin.free[__threadn] = 0;
br.used[thread] = 0; __bin.used[__threadn] = 0;
} }
#endif #endif
} }
...@@ -661,51 +650,53 @@ namespace __gnu_cxx ...@@ -661,51 +650,53 @@ namespace __gnu_cxx
_S_init = true; _S_init = true;
} }
#ifdef __GTHREADS
template<typename _Tp>
void
__mt_alloc<_Tp>::
_S_destroy_thread_key(void* freelist_pos)
{
// Return this thread id record to front of thread_freelist.
__gthread_mutex_lock(&_S_thread_freelist_mutex);
((thread_record*)freelist_pos)->next = _S_thread_freelist_first;
_S_thread_freelist_first = (thread_record*)freelist_pos;
__gthread_mutex_unlock(&_S_thread_freelist_mutex);
}
template<typename _Tp> template<typename _Tp>
size_t size_t
__mt_alloc<_Tp>:: __mt_alloc<_Tp>::
_S_get_thread_id() _S_get_thread_id()
{ {
#ifdef __GTHREADS
// If we have thread support and it's active we check the thread // If we have thread support and it's active we check the thread
// key value and return it's id or if it's not set we take the // key value and return it's id or if it's not set we take the
// first record from _S_thread_freelist and sets the key and // first record from _S_thread_freelist and sets the key and
// returns it's id. // returns it's id.
if (__gthread_active_p()) if (__gthread_active_p())
{ {
thread_record* freelist_pos = static_cast<thread_record*>(__gthread_getspecific(_S_thread_key)); thread_record* __freelist_pos = static_cast<thread_record*>(__gthread_getspecific(_S_thread_key));
if (freelist_pos == NULL) if (__freelist_pos == NULL)
{ {
// Since _S_options._M_max_threads must be larger than // Since _S_options._M_max_threads must be larger than
// the theoretical max number of threads of the OS the // the theoretical max number of threads of the OS the
// list can never be empty. // list can never be empty.
__gthread_mutex_lock(&_S_thread_freelist_mutex); __gthread_mutex_lock(&_S_thread_freelist_mutex);
freelist_pos = _S_thread_freelist_first; __freelist_pos = _S_thread_freelist_first;
_S_thread_freelist_first = _S_thread_freelist_first->next; _S_thread_freelist_first = _S_thread_freelist_first->next;
__gthread_mutex_unlock(&_S_thread_freelist_mutex); __gthread_mutex_unlock(&_S_thread_freelist_mutex);
__gthread_setspecific(_S_thread_key, __gthread_setspecific(_S_thread_key,
static_cast<void*>(freelist_pos)); static_cast<void*>(__freelist_pos));
} }
return freelist_pos->id; return __freelist_pos->id;
} }
#endif
// Otherwise (no thread support or inactive) all requests are // Otherwise (no thread support or inactive) all requests are
// served from the global pool 0. // served from the global pool 0.
return 0; return 0;
} }
#ifdef __GTHREADS
template<typename _Tp>
void
__mt_alloc<_Tp>::
_S_destroy_thread_key(void* __freelist_pos)
{
// Return this thread id record to front of thread_freelist.
__gthread_mutex_lock(&_S_thread_freelist_mutex);
thread_record* __tr = static_cast<thread_record*>(__freelist_pos);
__tr->next = _S_thread_freelist_first;
_S_thread_freelist_first = __tr;
__gthread_mutex_unlock(&_S_thread_freelist_mutex);
}
#endif #endif
template<typename _Tp> template<typename _Tp>
......
// Copyright (C) 2004 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the
// Free Software Foundation; either version 2, or (at your option)
// any later version.
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License along
// with this library; see the file COPYING. If not, write to the Free
// Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307,
// USA.
// 20.4.1.1 allocator members
#include <memory>
#include <testsuite_hooks.h>
// libstdc++/14176
void test02()
{
unsigned int len = 0;
std::allocator<int> a;
int* p = a.allocate(len);
a.deallocate(p, len);
}
#if !__GXX_WEAK__ && _MT_ALLOCATOR_H
// Explicitly instantiate for systems with no COMDAT or weak support.
template class __gnu_cxx::__mt_alloc<int>;
#endif
int main()
{
test02();
return 0;
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment