Commit b0c849fa by François Dumont Committed by Jonathan Wakely

Simplify node ownership in _Hashtable members

Introduce an RAII type to manage nodes in unordered containers while
they are being inserted. If the caller always owns a node until it is
inserted, then the insertion functions don't need to deallocate on
failure. This allows a FIXME in the node re-insertion API to be removed.

Also change extract(const key_type&) to not call extract(const_iterator)
anymore.  This avoids looping through the bucket nodes again to find the
node before the one being extracted.

2019-06-17  François Dumont  <fdumont@gcc.gnu.org>
	    Jonathan Wakely  <jwakely@redhat.com>

	* include/bits/hashtable.h (struct _Hashtable::_Scoped_node): New type.
	(_Hashtable::_M_insert_unique_node): Add key_type parameter. Don't
	deallocate node if insertion fails.
	(_Hashtable::_M_insert_multi_node): Likewise.
	(_Hashtable::_M_reinsert_node): Pass additional key argument.
	(_Hashtable::_M_reinsert_node_multi): Likewise. Remove FIXME.
	(_Hashtable::_M_extract_node(size_t, __node_base*)): New function.
	(_Hashtable::extract(const_iterator)): Use _M_extract_node.
	(_Hashtable::extract(const _Key&)): Likewise.
	(_Hashtable::_M_merge_unique): Pass additional key argument.
	(_Hashtable::_M_emplace<Args>(true_type, Args&&...)): Likewise. Use
	_Scoped_node.
	(_Hashtable::_M_insert): Likewise.
	* include/bits/hashtable_policy.h (_Map_base::operator[]): Likewise.
	(_Hashtable_alloc): Add comments to functions with misleading names.

Co-Authored-By: Jonathan Wakely <jwakely@redhat.com>

From-SVN: r272381
parent e67ddda6
2019-06-17 François Dumont <fdumont@gcc.gnu.org>
Jonathan Wakely <jwakely@redhat.com>
* include/bits/hashtable.h (struct _Hashtable::_Scoped_node): New type.
(_Hashtable::_M_insert_unique_node): Add key_type parameter. Don't
deallocate node if insertion fails.
(_Hashtable::_M_insert_multi_node): Likewise.
(_Hashtable::_M_reinsert_node): Pass additional key argument.
(_Hashtable::_M_reinsert_node_multi): Likewise. Remove FIXME.
(_Hashtable::_M_extract_node(size_t, __node_base*)): New function.
(_Hashtable::extract(const_iterator)): Use _M_extract_node.
(_Hashtable::extract(const _Key&)): Likewise.
(_Hashtable::_M_merge_unique): Pass additional key argument.
(_Hashtable::_M_emplace<Args>(true_type, Args&&...)): Likewise. Use
_Scoped_node.
(_Hashtable::_M_insert): Likewise.
* include/bits/hashtable_policy.h (_Map_base::operator[]): Likewise.
(_Hashtable_alloc): Add comments to functions with misleading names.
2019-06-17 Jonathan Wakely <jwakely@redhat.com> 2019-06-17 Jonathan Wakely <jwakely@redhat.com>
* testsuite/20_util/bad_function_call/what.cc: Include <string> header * testsuite/20_util/bad_function_call/what.cc: Include <string> header
......
...@@ -256,6 +256,30 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION ...@@ -256,6 +256,30 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
using __reuse_or_alloc_node_gen_t = using __reuse_or_alloc_node_gen_t =
__detail::_ReuseOrAllocNode<__node_alloc_type>; __detail::_ReuseOrAllocNode<__node_alloc_type>;
// Simple RAII type for managing a node containing an element
struct _Scoped_node
{
// Take ownership of a node with a constructed element.
_Scoped_node(__node_type* __n, __hashtable_alloc* __h)
: _M_h(__h), _M_node(__n) { }
// Allocate a node and construct an element within it.
template<typename... _Args>
_Scoped_node(__hashtable_alloc* __h, _Args&&... __args)
: _M_h(__h),
_M_node(__h->_M_allocate_node(std::forward<_Args>(__args)...))
{ }
// Destroy element and deallocate node.
~_Scoped_node() { if (_M_node) _M_h->_M_deallocate_node(_M_node); };
_Scoped_node(const _Scoped_node&) = delete;
_Scoped_node& operator=(const _Scoped_node&) = delete;
__hashtable_alloc* _M_h;
__node_type* _M_node;
};
// Metaprogramming for picking apart hash caching. // Metaprogramming for picking apart hash caching.
template<typename _Cond> template<typename _Cond>
using __if_hash_cached = __or_<__not_<__hash_cached>, _Cond>; using __if_hash_cached = __or_<__not_<__hash_cached>, _Cond>;
...@@ -669,17 +693,18 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION ...@@ -669,17 +693,18 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
__node_base* __node_base*
_M_get_previous_node(size_type __bkt, __node_base* __n); _M_get_previous_node(size_type __bkt, __node_base* __n);
// Insert node with hash code __code, in bucket bkt if no rehash (assumes // Insert node __n with key __k and hash code __code, in bucket __bkt
// no element with its key already present). Take ownership of the node, // if no rehash (assumes no element with same key already present).
// deallocate it on exception. // Takes ownership of __n if insertion succeeds, throws otherwise.
iterator iterator
_M_insert_unique_node(size_type __bkt, __hash_code __code, _M_insert_unique_node(const key_type& __k, size_type __bkt,
__node_type* __n, size_type __n_elt = 1); __hash_code __code, __node_type* __n,
size_type __n_elt = 1);
// Insert node with hash code __code. Take ownership of the node, // Insert node __n with key __k and hash code __code.
// deallocate it on exception. // Takes ownership of __n if insertion succeeds, throws otherwise.
iterator iterator
_M_insert_multi_node(__node_type* __hint, _M_insert_multi_node(__node_type* __hint, const key_type& __k,
__hash_code __code, __node_type* __n); __hash_code __code, __node_type* __n);
template<typename... _Args> template<typename... _Args>
...@@ -806,7 +831,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION ...@@ -806,7 +831,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
else else
{ {
__ret.position __ret.position
= _M_insert_unique_node(__bkt, __code, __nh._M_ptr); = _M_insert_unique_node(__k, __bkt, __code, __nh._M_ptr);
__nh._M_ptr = nullptr; __nh._M_ptr = nullptr;
__ret.inserted = true; __ret.inserted = true;
} }
...@@ -818,33 +843,24 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION ...@@ -818,33 +843,24 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
iterator iterator
_M_reinsert_node_multi(const_iterator __hint, node_type&& __nh) _M_reinsert_node_multi(const_iterator __hint, node_type&& __nh)
{ {
iterator __ret;
if (__nh.empty()) if (__nh.empty())
__ret = end(); return end();
else
{
__glibcxx_assert(get_allocator() == __nh.get_allocator()); __glibcxx_assert(get_allocator() == __nh.get_allocator());
auto __code = this->_M_hash_code(__nh._M_key()); const key_type& __k = __nh._M_key();
auto __node = std::exchange(__nh._M_ptr, nullptr); auto __code = this->_M_hash_code(__k);
// FIXME: this deallocates the node on exception. auto __ret
__ret = _M_insert_multi_node(__hint._M_cur, __code, __node); = _M_insert_multi_node(__hint._M_cur, __k, __code, __nh._M_ptr);
} __nh._M_ptr = nullptr;
return __ret; return __ret;
} }
/// Extract a node. private:
node_type node_type
extract(const_iterator __pos) _M_extract_node(size_t __bkt, __node_base* __prev_n)
{ {
__node_type* __n = __pos._M_cur; __node_type* __n = static_cast<__node_type*>(__prev_n->_M_nxt);
size_t __bkt = _M_bucket_index(__n);
// Look for previous node to unlink it from the erased one, this
// is why we need buckets to contain the before begin to make
// this search fast.
__node_base* __prev_n = _M_get_previous_node(__bkt, __n);
if (__prev_n == _M_buckets[__bkt]) if (__prev_n == _M_buckets[__bkt])
_M_remove_bucket_begin(__bkt, __n->_M_next(), _M_remove_bucket_begin(__bkt, __n->_M_next(),
__n->_M_nxt ? _M_bucket_index(__n->_M_next()) : 0); __n->_M_nxt ? _M_bucket_index(__n->_M_next()) : 0);
...@@ -861,14 +877,25 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION ...@@ -861,14 +877,25 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
return { __n, this->_M_node_allocator() }; return { __n, this->_M_node_allocator() };
} }
public:
// Extract a node.
node_type
extract(const_iterator __pos)
{
size_t __bkt = _M_bucket_index(__pos._M_cur);
return _M_extract_node(__bkt,
_M_get_previous_node(__bkt, __pos._M_cur));
}
/// Extract a node. /// Extract a node.
node_type node_type
extract(const _Key& __k) extract(const _Key& __k)
{ {
node_type __nh; node_type __nh;
auto __pos = find(__k); __hash_code __code = this->_M_hash_code(__k);
if (__pos != end()) std::size_t __bkt = _M_bucket_index(__k, __code);
__nh = extract(const_iterator(__pos)); if (__node_base* __prev_node = _M_find_before_node(__bkt, __k, __code))
__nh = _M_extract_node(__bkt, __prev_node);
return __nh; return __nh;
} }
...@@ -885,13 +912,14 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION ...@@ -885,13 +912,14 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
for (auto __i = __src.begin(), __end = __src.end(); __i != __end;) for (auto __i = __src.begin(), __end = __src.end(); __i != __end;)
{ {
auto __pos = __i++; auto __pos = __i++;
const key_type& __k = this->_M_extract()(__pos._M_cur->_M_v()); const key_type& __k = this->_M_extract()(*__pos);
__hash_code __code = this->_M_hash_code(__k); __hash_code __code = this->_M_hash_code(__k);
size_type __bkt = _M_bucket_index(__k, __code); size_type __bkt = _M_bucket_index(__k, __code);
if (_M_find_node(__bkt, __k, __code) == nullptr) if (_M_find_node(__bkt, __k, __code) == nullptr)
{ {
auto __nh = __src.extract(__pos); auto __nh = __src.extract(__pos);
_M_insert_unique_node(__bkt, __code, __nh._M_ptr, __n_elt); _M_insert_unique_node(__k, __bkt, __code, __nh._M_ptr,
__n_elt);
__nh._M_ptr = nullptr; __nh._M_ptr = nullptr;
__n_elt = 1; __n_elt = 1;
} }
...@@ -1634,31 +1662,18 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION ...@@ -1634,31 +1662,18 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
-> pair<iterator, bool> -> pair<iterator, bool>
{ {
// First build the node to get access to the hash code // First build the node to get access to the hash code
__node_type* __node _Scoped_node __node { this, std::forward<_Args>(__args)... };
= this->_M_allocate_node(std::forward<_Args>(__args)...); const key_type& __k = this->_M_extract()(__node._M_node->_M_v());
const key_type& __k = this->_M_extract()(__node->_M_v()); __hash_code __code = this->_M_hash_code(__k);
__hash_code __code;
__try
{
__code = this->_M_hash_code(__k);
}
__catch(...)
{
this->_M_deallocate_node(__node);
__throw_exception_again;
}
size_type __bkt = _M_bucket_index(__k, __code); size_type __bkt = _M_bucket_index(__k, __code);
if (__node_type* __p = _M_find_node(__bkt, __k, __code)) if (__node_type* __p = _M_find_node(__bkt, __k, __code))
{
// There is already an equivalent node, no insertion // There is already an equivalent node, no insertion
this->_M_deallocate_node(__node);
return std::make_pair(iterator(__p), false); return std::make_pair(iterator(__p), false);
}
// Insert the node // Insert the node
return std::make_pair(_M_insert_unique_node(__bkt, __code, __node), auto __pos = _M_insert_unique_node(__k, __bkt, __code, __node._M_node);
true); __node._M_node = nullptr;
return { __pos, true };
} }
template<typename _Key, typename _Value, template<typename _Key, typename _Value,
...@@ -1673,21 +1688,14 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION ...@@ -1673,21 +1688,14 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
-> iterator -> iterator
{ {
// First build the node to get its hash code. // First build the node to get its hash code.
__node_type* __node = _Scoped_node __node { this, std::forward<_Args>(__args)... };
this->_M_allocate_node(std::forward<_Args>(__args)...); const key_type& __k = this->_M_extract()(__node._M_node->_M_v());
__hash_code __code;
__try
{
__code = this->_M_hash_code(this->_M_extract()(__node->_M_v()));
}
__catch(...)
{
this->_M_deallocate_node(__node);
__throw_exception_again;
}
return _M_insert_multi_node(__hint._M_cur, __code, __node); __hash_code __code = this->_M_hash_code(__k);
auto __pos
= _M_insert_multi_node(__hint._M_cur, __k, __code, __node._M_node);
__node._M_node = nullptr;
return __pos;
} }
template<typename _Key, typename _Value, template<typename _Key, typename _Value,
...@@ -1697,8 +1705,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION ...@@ -1697,8 +1705,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
auto auto
_Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal, _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
_H1, _H2, _Hash, _RehashPolicy, _Traits>:: _H1, _H2, _Hash, _RehashPolicy, _Traits>::
_M_insert_unique_node(size_type __bkt, __hash_code __code, _M_insert_unique_node(const key_type& __k, size_type __bkt,
__node_type* __node, size_type __n_elt) __hash_code __code, __node_type* __node,
size_type __n_elt)
-> iterator -> iterator
{ {
const __rehash_state& __saved_state = _M_rehash_policy._M_state(); const __rehash_state& __saved_state = _M_rehash_policy._M_state();
...@@ -1706,13 +1715,10 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION ...@@ -1706,13 +1715,10 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
= _M_rehash_policy._M_need_rehash(_M_bucket_count, _M_element_count, = _M_rehash_policy._M_need_rehash(_M_bucket_count, _M_element_count,
__n_elt); __n_elt);
__try
{
if (__do_rehash.first) if (__do_rehash.first)
{ {
_M_rehash(__do_rehash.second, __saved_state); _M_rehash(__do_rehash.second, __saved_state);
__bkt __bkt = _M_bucket_index(__k, __code);
= _M_bucket_index(this->_M_extract()(__node->_M_v()), __code);
} }
this->_M_store_code(__node, __code); this->_M_store_code(__node, __code);
...@@ -1722,15 +1728,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION ...@@ -1722,15 +1728,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
++_M_element_count; ++_M_element_count;
return iterator(__node); return iterator(__node);
} }
__catch(...)
{
this->_M_deallocate_node(__node);
__throw_exception_again;
}
}
// Insert node, in bucket bkt if no rehash (assumes no element with its key
// already present). Take ownership of the node, deallocate it on exception.
template<typename _Key, typename _Value, template<typename _Key, typename _Value,
typename _Alloc, typename _ExtractKey, typename _Equal, typename _Alloc, typename _ExtractKey, typename _Equal,
typename _H1, typename _H2, typename _Hash, typename _RehashPolicy, typename _H1, typename _H2, typename _Hash, typename _RehashPolicy,
...@@ -1738,21 +1736,18 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION ...@@ -1738,21 +1736,18 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
auto auto
_Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal, _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
_H1, _H2, _Hash, _RehashPolicy, _Traits>:: _H1, _H2, _Hash, _RehashPolicy, _Traits>::
_M_insert_multi_node(__node_type* __hint, __hash_code __code, _M_insert_multi_node(__node_type* __hint, const key_type& __k,
__node_type* __node) __hash_code __code, __node_type* __node)
-> iterator -> iterator
{ {
const __rehash_state& __saved_state = _M_rehash_policy._M_state(); const __rehash_state& __saved_state = _M_rehash_policy._M_state();
std::pair<bool, std::size_t> __do_rehash std::pair<bool, std::size_t> __do_rehash
= _M_rehash_policy._M_need_rehash(_M_bucket_count, _M_element_count, 1); = _M_rehash_policy._M_need_rehash(_M_bucket_count, _M_element_count, 1);
__try
{
if (__do_rehash.first) if (__do_rehash.first)
_M_rehash(__do_rehash.second, __saved_state); _M_rehash(__do_rehash.second, __saved_state);
this->_M_store_code(__node, __code); this->_M_store_code(__node, __code);
const key_type& __k = this->_M_extract()(__node->_M_v());
size_type __bkt = _M_bucket_index(__k, __code); size_type __bkt = _M_bucket_index(__k, __code);
// Find the node before an equivalent one or use hint if it exists and // Find the node before an equivalent one or use hint if it exists and
...@@ -1779,20 +1774,13 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION ...@@ -1779,20 +1774,13 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
} }
} }
else else
// The inserted node has no equivalent in the // The inserted node has no equivalent in the hashtable. We must
// hashtable. We must insert the new node at the // insert the new node at the beginning of the bucket to preserve
// beginning of the bucket to preserve equivalent // equivalent elements' relative positions.
// elements' relative positions.
_M_insert_bucket_begin(__bkt, __node); _M_insert_bucket_begin(__bkt, __node);
++_M_element_count; ++_M_element_count;
return iterator(__node); return iterator(__node);
} }
__catch(...)
{
this->_M_deallocate_node(__node);
__throw_exception_again;
}
}
// Insert v if no element with its key is already present. // Insert v if no element with its key is already present.
template<typename _Key, typename _Value, template<typename _Key, typename _Value,
...@@ -1811,12 +1799,14 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION ...@@ -1811,12 +1799,14 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
__hash_code __code = this->_M_hash_code(__k); __hash_code __code = this->_M_hash_code(__k);
size_type __bkt = _M_bucket_index(__k, __code); size_type __bkt = _M_bucket_index(__k, __code);
__node_type* __n = _M_find_node(__bkt, __k, __code); if (__node_type* __node = _M_find_node(__bkt, __k, __code))
if (__n) return { iterator(__node), false };
return std::make_pair(iterator(__n), false);
__n = __node_gen(std::forward<_Arg>(__v)); _Scoped_node __node{ __node_gen(std::forward<_Arg>(__v)), this };
return { _M_insert_unique_node(__bkt, __code, __n, __n_elt), true }; auto __pos
= _M_insert_unique_node(__k, __bkt, __code, __node._M_node, __n_elt);
__node._M_node = nullptr;
return { __pos, true };
} }
// Insert v unconditionally. // Insert v unconditionally.
...@@ -1837,9 +1827,12 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION ...@@ -1837,9 +1827,12 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
__hash_code __code = this->_M_hash_code(this->_M_extract()(__v)); __hash_code __code = this->_M_hash_code(this->_M_extract()(__v));
// Second allocate new node so that we don't rehash if it throws. // Second allocate new node so that we don't rehash if it throws.
__node_type* __node = __node_gen(std::forward<_Arg>(__v)); _Scoped_node __node{ __node_gen(std::forward<_Arg>(__v)), this };
const key_type& __k = this->_M_extract()(__node._M_node->_M_v());
return _M_insert_multi_node(__hint._M_cur, __code, __node); auto __pos
= _M_insert_multi_node(__hint._M_cur, __k, __code, __node._M_node);
__node._M_node = nullptr;
return __pos;
} }
template<typename _Key, typename _Value, template<typename _Key, typename _Value,
......
...@@ -706,17 +706,19 @@ namespace __detail ...@@ -706,17 +706,19 @@ namespace __detail
__hashtable* __h = static_cast<__hashtable*>(this); __hashtable* __h = static_cast<__hashtable*>(this);
__hash_code __code = __h->_M_hash_code(__k); __hash_code __code = __h->_M_hash_code(__k);
std::size_t __bkt = __h->_M_bucket_index(__k, __code); std::size_t __bkt = __h->_M_bucket_index(__k, __code);
__node_type* __p = __h->_M_find_node(__bkt, __k, __code); if (__node_type* __node = __h->_M_find_node(__bkt, __k, __code))
return __node->_M_v().second;
if (!__p) typename __hashtable::_Scoped_node __node {
{ __h,
__p = __h->_M_allocate_node(std::piecewise_construct, std::piecewise_construct,
std::tuple<const key_type&>(__k), std::tuple<const key_type&>(__k),
std::tuple<>()); std::tuple<>()
return __h->_M_insert_unique_node(__bkt, __code, __p)->second; };
} auto __pos
= __h->_M_insert_unique_node(__k, __bkt, __code, __node._M_node);
return __p->_M_v().second; __node._M_node = nullptr;
return __pos->second;
} }
template<typename _Key, typename _Pair, typename _Alloc, typename _Equal, template<typename _Key, typename _Pair, typename _Alloc, typename _Equal,
...@@ -731,17 +733,19 @@ namespace __detail ...@@ -731,17 +733,19 @@ namespace __detail
__hashtable* __h = static_cast<__hashtable*>(this); __hashtable* __h = static_cast<__hashtable*>(this);
__hash_code __code = __h->_M_hash_code(__k); __hash_code __code = __h->_M_hash_code(__k);
std::size_t __bkt = __h->_M_bucket_index(__k, __code); std::size_t __bkt = __h->_M_bucket_index(__k, __code);
__node_type* __p = __h->_M_find_node(__bkt, __k, __code); if (__node_type* __node = __h->_M_find_node(__bkt, __k, __code))
return __node->_M_v().second;
if (!__p) typename __hashtable::_Scoped_node __node {
{ __h,
__p = __h->_M_allocate_node(std::piecewise_construct, std::piecewise_construct,
std::forward_as_tuple(std::move(__k)), std::forward_as_tuple(std::move(__k)),
std::tuple<>()); std::tuple<>()
return __h->_M_insert_unique_node(__bkt, __code, __p)->second; };
} auto __pos
= __h->_M_insert_unique_node(__k, __bkt, __code, __node._M_node);
return __p->_M_v().second; __node._M_node = nullptr;
return __pos->second;
} }
template<typename _Key, typename _Pair, typename _Alloc, typename _Equal, template<typename _Key, typename _Pair, typename _Alloc, typename _Equal,
...@@ -1972,8 +1976,8 @@ namespace __detail ...@@ -1972,8 +1976,8 @@ namespace __detail
} }
/** /**
* This type deals with all allocation and keeps an allocator instance through * This type deals with all allocation and keeps an allocator instance
* inheritance to benefit from EBO when possible. * through inheritance to benefit from EBO when possible.
*/ */
template<typename _NodeAlloc> template<typename _NodeAlloc>
struct _Hashtable_alloc : private _Hashtable_ebo_helper<0, _NodeAlloc> struct _Hashtable_alloc : private _Hashtable_ebo_helper<0, _NodeAlloc>
...@@ -2012,17 +2016,21 @@ namespace __detail ...@@ -2012,17 +2016,21 @@ namespace __detail
_M_node_allocator() const _M_node_allocator() const
{ return __ebo_node_alloc::_M_cget(); } { return __ebo_node_alloc::_M_cget(); }
// Allocate a node and construct an element within it.
template<typename... _Args> template<typename... _Args>
__node_type* __node_type*
_M_allocate_node(_Args&&... __args); _M_allocate_node(_Args&&... __args);
// Destroy the element within a node and deallocate the node.
void void
_M_deallocate_node(__node_type* __n); _M_deallocate_node(__node_type* __n);
// Deallocate a node.
void void
_M_deallocate_node_ptr(__node_type* __n); _M_deallocate_node_ptr(__node_type* __n);
// Deallocate the linked list of nodes pointed to by __n // Deallocate the linked list of nodes pointed to by __n.
// The elements within the nodes are destroyed.
void void
_M_deallocate_nodes(__node_type* __n); _M_deallocate_nodes(__node_type* __n);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment