Commit 2e362c74 by Benjamin Kosnik Committed by Benjamin Kosnik

atomicity.h: Move to...

2006-09-13  Benjamin Kosnik  <bkoz@redhat.com>

	* include/bits/atomicity.h: Move to...
	* include/ext/atomicity.h: ...here.	
	* include/bits/concurrence.h: Move to...
	* include/ext/concurrence.h: ...here.
	* include/Makefile.am (ext_headers): Additions.
	(bits_headers): Subtractions.
	* include/Makefile.in: Regenerate.

	* include/ext/bitmap_allocator.h (_Mutex), __threads_enabled,
	_Lock, _Auto_Lock): Subsume into...
	* include/bits/concurrence.h (__mutex): ..this. Error check
	locking and unlocking.	
	(lock): Uglify to...
	(__scoped_lock): Use __mutex. 
	(__glibcxx_mutex_define_initialized): Remove.
	(__glibcxx_mutex_type): Remove.

	* include/tr1/boost_shared_ptr.h: Formating tweaks, adjustments.
	(_Lock_policy): Move from here...
	* include/ext/concurrence.h: ... to here.
	(__shared_ptr_default_lock_mode): To __default_lock_policy.
	(_S_lockfree): To _S_atomic.
	Document.

	* libsupc++/guard.cc (static_mutex): Subsume into and fixup for...
	* include/ext/concurrence.h (__recursive_mutex): ...this. Error
	check locking and unlocking.
	* libsupc++/eh_alloc.cc: Use __scoped_lock.

	* config/os/aix/atomicity.h: Fixups for include paths, mutex to
	__scoped_mutex change, removal of locking defines.
	* config/os/irix/atomicity.h: Same.
	* config/cpu/cris/atomicity.h: Same.
	* config/cpu/m68k/atomicity.h: Same.
	* config/cpu/hppa/atomicity.h: Same.
	* config/cpu/mips/atomicity.h: Same.
	* config/cpu/sparc/atomicity.h: Same.
	* config/cpu/i386/atomicity.h: Same.
	* config/cpu/i486/atomicity.h: Same.
	* config/cpu/sh/atomicity.h: Same.
	* config/cpu/generic/atomicity_mutex/atomicity.h: Same.
	* include/ext/pool_allocator.h: Same.
	* include/ext/bitmap_allocator.h: Same.
	* include/ext/rc_string_base.h: Same.
	* include/ext/mt_allocator.h: Same.
	* include/bits/locale_classes.h: Same.
	* include/bits/basic_string.h: Same.
	* include/bits/ios_base.h: Same.
	* include/tr1/memory: Same.
	* src/pool_allocator.cc: Same.
	* src/mt_allocator.cc: Same.
	* src/locale_init.cc: Same.
	* src/ios.cc: Same.
	* src/locale.cc: Same.
	* src/bitmap_allocator.cc: Same.
	* src/ios_init.cc: Same.
	* src/debug.cc: Same.

From-SVN: r116942
parent 06fcf754
2006-09-13 Benjamin Kosnik <bkoz@redhat.com>
* include/bits/atomicity.h: Move to...
* include/ext/atomicity.h: ...here.
* include/bits/concurrence.h: Move to...
* include/ext/concurrence.h: ...here.
* include/Makefile.am (ext_headers): Additions.
(bits_headers): Subtractions.
* include/Makefile.in: Regenerate.
* include/ext/bitmap_allocator.h (_Mutex), __threads_enabled,
_Lock, _Auto_Lock): Subsume into...
* include/bits/concurrence.h (__mutex): ..this. Error check
locking and unlocking.
(lock): Uglify to...
(__scoped_lock): Use __mutex.
(__glibcxx_mutex_define_initialized): Remove.
(__glibcxx_mutex_type): Remove.
* include/tr1/boost_shared_ptr.h: Formating tweaks, adjustments.
(_Lock_policy): Move from here...
* include/ext/concurrence.h: ... to here.
(__shared_ptr_default_lock_mode): To __default_lock_policy.
(_S_lockfree): To _S_atomic.
Document.
* libsupc++/guard.cc (static_mutex): Subsume into and fixup for...
* include/ext/concurrence.h (__recursive_mutex): ...this. Error
check locking and unlocking.
* libsupc++/eh_alloc.cc: Use __scoped_lock.
* config/os/aix/atomicity.h: Fixups for include paths, mutex to
__scoped_mutex change, removal of locking defines.
* config/os/irix/atomicity.h: Same.
* config/cpu/cris/atomicity.h: Same.
* config/cpu/m68k/atomicity.h: Same.
* config/cpu/hppa/atomicity.h: Same.
* config/cpu/mips/atomicity.h: Same.
* config/cpu/sparc/atomicity.h: Same.
* config/cpu/i386/atomicity.h: Same.
* config/cpu/i486/atomicity.h: Same.
* config/cpu/sh/atomicity.h: Same.
* config/cpu/generic/atomicity_mutex/atomicity.h: Same.
* include/ext/pool_allocator.h: Same.
* include/ext/bitmap_allocator.h: Same.
* include/ext/rc_string_base.h: Same.
* include/ext/mt_allocator.h: Same.
* include/bits/locale_classes.h: Same.
* include/bits/basic_string.h: Same.
* include/bits/ios_base.h: Same.
* include/tr1/memory: Same.
* src/pool_allocator.cc: Same.
* src/mt_allocator.cc: Same.
* src/locale_init.cc: Same.
* src/ios.cc: Same.
* src/locale.cc: Same.
* src/bitmap_allocator.cc: Same.
* src/ios_init.cc: Same.
* src/debug.cc: Same.
2006-09-12 Benjamin Kosnik <bkoz@redhat.com>
* libsupc++/eh_globals.cc: Remove __gnu_internal.
......
......@@ -27,7 +27,7 @@
// invalidate any other reasons why the executable file might be covered by
// the GNU General Public License.
#include <bits/atomicity.h>
#include <ext/atomicity.h>
_GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
......
......@@ -28,12 +28,12 @@
// invalidate any other reasons why the executable file might be covered by
// the GNU General Public License.
#include <bits/atomicity.h>
#include <bits/concurrence.h>
#include <ext/atomicity.h>
#include <ext/concurrence.h>
namespace
{
__glibcxx_mutex_define_initialized(atomic_mutex);
__gnu_cxx::__mutex atomic_mutex;
} // anonymous namespace
_GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
......@@ -42,11 +42,10 @@ _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
__attribute__ ((__unused__))
__exchange_and_add(volatile _Atomic_word* __mem, int __val)
{
__glibcxx_mutex_lock(atomic_mutex);
__gnu_cxx::__scoped_lock sentry(atomic_mutex);
_Atomic_word __result;
__result = *__mem;
*__mem += __val;
__glibcxx_mutex_unlock(atomic_mutex);
return __result;
}
......
......@@ -28,7 +28,7 @@
// the GNU General Public License.
#include <bits/c++config.h>
#include <bits/atomicity.h>
#include <ext/atomicity.h>
_GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
......
......@@ -27,7 +27,7 @@
// invalidate any other reasons why the executable file might be covered by
// the GNU General Public License.
#include <bits/atomicity.h>
#include <ext/atomicity.h>
_GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
......
......@@ -27,7 +27,7 @@
// invalidate any other reasons why the executable file might be covered by
// the GNU General Public License.
#include <bits/atomicity.h>
#include <ext/atomicity.h>
_GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
......
......@@ -27,7 +27,7 @@
// invalidate any other reasons why the executable file might be covered by
// the GNU General Public License.
#include <bits/atomicity.h>
#include <ext/atomicity.h>
_GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
......
......@@ -28,7 +28,7 @@
// invalidate any other reasons why the executable file might be covered by
// the GNU General Public License.
#include <bits/atomicity.h>
#include <ext/atomicity.h>
_GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
......
......@@ -74,12 +74,12 @@ __atomic_add (volatile _Atomic_word* __mem, int __val)
/* This is generic/atomicity.h */
#include <bits/atomicity.h>
#include <bits/concurrence.h>
#include <ext/atomicity.h>
#include <ext/concurrence.h>
namespace
{
__glibcxx_mutex_define_initialized(atomic_mutex);
__gnu_cxx::__mutex atomic_mutex;
} // anonymous namespace
_GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
......@@ -88,11 +88,10 @@ _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
__attribute__ ((__unused__))
__exchange_and_add(volatile _Atomic_word* __mem, int __val)
{
__glibcxx_mutex_lock(atomic_mutex);
__gnu_cxx::__scoped_lock sentry(atomic_mutex);
_Atomic_word __result;
__result = *__mem;
*__mem += __val;
__glibcxx_mutex_unlock(atomic_mutex);
return __result;
}
......
......@@ -28,7 +28,7 @@
// invalidate any other reasons why the executable file might be covered by
// the GNU General Public License.
#include <bits/atomicity.h>
#include <ext/atomicity.h>
_GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
......
......@@ -27,7 +27,7 @@
// invalidate any other reasons why the executable file might be covered by
// the GNU General Public License.
#include <bits/atomicity.h>
#include <ext/atomicity.h>
/* We cannot use the cpu/powerpc/bits/atomicity.h inline assembly
definitions for these operations since they depend on operations
......@@ -48,15 +48,11 @@ _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
_Atomic_word
__attribute__ ((__unused__))
__exchange_and_add (volatile _Atomic_word* __mem, int __val)
{
return ::fetch_and_add (const_cast<atomic_p>(__mem), __val);
}
{ return ::fetch_and_add(const_cast<atomic_p>(__mem), __val); }
void
__attribute__ ((__unused__))
__atomic_add (volatile _Atomic_word* __mem, int __val)
{
(void) ::fetch_and_add (const_cast<atomic_p>(__mem), __val);
}
{ (void) ::fetch_and_add(const_cast<atomic_p>(__mem), __val); }
_GLIBCXX_END_NAMESPACE
......@@ -28,7 +28,7 @@
// the GNU General Public License.
#include <mutex.h>
#include <bits/atomicity.h>
#include <ext/atomicity.h>
_GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
......
......@@ -93,7 +93,6 @@ bits_srcdir = ${glibcxx_srcdir}/include/bits
bits_builddir = ./bits
bits_headers = \
${bits_srcdir}/allocator.h \
${bits_srcdir}/atomicity.h \
${bits_srcdir}/basic_ios.h \
${bits_srcdir}/basic_ios.tcc \
${bits_srcdir}/basic_string.h \
......@@ -102,7 +101,6 @@ bits_headers = \
${bits_srcdir}/char_traits.h \
${bits_srcdir}/codecvt.h \
${bits_srcdir}/concept_check.h \
${bits_srcdir}/concurrence.h \
${bits_srcdir}/cpp_type_traits.h \
${bits_srcdir}/deque.tcc \
${bits_srcdir}/fstream.tcc \
......@@ -529,9 +527,11 @@ ext_srcdir = ${glibcxx_srcdir}/include/ext
ext_builddir = ./ext
ext_headers = \
${ext_srcdir}/algorithm \
${ext_srcdir}/atomicity.h \
${ext_srcdir}/array_allocator.h \
${ext_srcdir}/bitmap_allocator.h \
${ext_srcdir}/codecvt_specializations.h \
${ext_srcdir}/concurrence.h \
${ext_srcdir}/debug_allocator.h \
${ext_srcdir}/stdio_filebuf.h \
${ext_srcdir}/stdio_sync_filebuf.h \
......
......@@ -318,7 +318,6 @@ bits_srcdir = ${glibcxx_srcdir}/include/bits
bits_builddir = ./bits
bits_headers = \
${bits_srcdir}/allocator.h \
${bits_srcdir}/atomicity.h \
${bits_srcdir}/basic_ios.h \
${bits_srcdir}/basic_ios.tcc \
${bits_srcdir}/basic_string.h \
......@@ -327,7 +326,6 @@ bits_headers = \
${bits_srcdir}/char_traits.h \
${bits_srcdir}/codecvt.h \
${bits_srcdir}/concept_check.h \
${bits_srcdir}/concurrence.h \
${bits_srcdir}/cpp_type_traits.h \
${bits_srcdir}/deque.tcc \
${bits_srcdir}/fstream.tcc \
......@@ -750,9 +748,11 @@ ext_srcdir = ${glibcxx_srcdir}/include/ext
ext_builddir = ./ext
ext_headers = \
${ext_srcdir}/algorithm \
${ext_srcdir}/atomicity.h \
${ext_srcdir}/array_allocator.h \
${ext_srcdir}/bitmap_allocator.h \
${ext_srcdir}/codecvt_specializations.h \
${ext_srcdir}/concurrence.h \
${ext_srcdir}/debug_allocator.h \
${ext_srcdir}/stdio_filebuf.h \
${ext_srcdir}/stdio_sync_filebuf.h \
......
......@@ -42,7 +42,7 @@
#pragma GCC system_header
#include <bits/atomicity.h>
#include <ext/atomicity.h>
#include <debug/debug.h>
_GLIBCXX_BEGIN_NAMESPACE(std)
......
// Support for concurrent programing -*- C++ -*-
// Copyright (C) 2003, 2004, 2005
// Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the
// Free Software Foundation; either version 2, or (at your option)
// any later version.
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License along
// with this library; see the file COPYING. If not, write to the Free
// Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
// USA.
// As a special exception, you may use this file as part of a free software
// library without restriction. Specifically, if other files instantiate
// templates or use macros or inline functions from this file, or you compile
// this file and link it with other files to produce an executable, this
// file does not by itself cause the resulting executable to be covered by
// the GNU General Public License. This exception does not however
// invalidate any other reasons why the executable file might be covered by
// the GNU General Public License.
/** @file concurrence.h
* This is an internal header file, included by other library headers.
* You should not attempt to use it directly.
*/
#ifndef _CONCURRENCE_H
#define _CONCURRENCE_H 1
// GCC's thread abstraction layer
#include "bits/gthr.h"
#if __GTHREADS
# ifdef __GTHREAD_MUTEX_INIT
# define __glibcxx_mutex_type __gthread_mutex_t
# define __glibcxx_mutex_define_initialized(NAME) \
__gthread_mutex_t NAME = __GTHREAD_MUTEX_INIT
# define __glibcxx_mutex_lock(NAME) \
__gthread_mutex_lock(&NAME)
# else
// Implies __GTHREAD_MUTEX_INIT_FUNCTION
struct __glibcxx_mutex : public __gthread_mutex_t
{
__glibcxx_mutex() { __GTHREAD_MUTEX_INIT_FUNCTION(this); }
};
# define __glibcxx_mutex_type __glibcxx_mutex
# define __glibcxx_mutex_define_initialized(NAME) \
__glibcxx_mutex NAME
# define __glibcxx_mutex_lock(NAME) \
__gthread_mutex_lock(&NAME)
# endif
# define __glibcxx_mutex_unlock(NAME) __gthread_mutex_unlock(&NAME)
#else
# define __glibcxx_mutex_type __gthread_mutex_t
# define __glibcxx_mutex_define_initialized(NAME) __gthread_mutex_t NAME
# define __glibcxx_mutex_lock(NAME)
# define __glibcxx_mutex_unlock(NAME)
#endif
_GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
typedef __glibcxx_mutex_type mutex_type;
/// @brief Scoped lock idiom.
// Acquire the mutex here with a constructor call, then release with
// the destructor call in accordance with RAII style.
class lock
{
// Externally defined and initialized.
mutex_type& device;
public:
explicit lock(mutex_type& name) : device(name)
{ __glibcxx_mutex_lock(device); }
~lock() throw()
{ __glibcxx_mutex_unlock(device); }
private:
lock(const lock&);
lock& operator=(const lock&);
};
_GLIBCXX_END_NAMESPACE
#endif
......@@ -42,7 +42,7 @@
#pragma GCC system_header
#include <bits/atomicity.h>
#include <ext/atomicity.h>
#include <bits/localefwd.h>
#include <bits/locale_classes.h>
......
......@@ -45,8 +45,7 @@
#include <bits/localefwd.h>
#include <cstring> // For strcmp.
#include <string>
#include <bits/atomicity.h>
#include <bits/gthr.h>
#include <ext/atomicity.h>
_GLIBCXX_BEGIN_NAMESPACE(std)
......
// Low-level functions for atomic operations -*- C++ -*-
// Support for atomic operations -*- C++ -*-
// Copyright (C) 2004, 2005, 2006 Free Software Foundation, Inc.
//
......@@ -41,6 +41,10 @@
_GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
// Functions for portable atomic access.
// To abstract locking primatives across all thread policies, use:
// __exchange_and_add_dispatch
// __atomic_add_dispatch
#ifdef _GLIBCXX_ATOMIC_BUILTINS
static inline _Atomic_word
__exchange_and_add(volatile _Atomic_word* __mem, int __val)
......@@ -52,11 +56,11 @@ _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
#else
_Atomic_word
__attribute__ ((__unused__))
__exchange_and_add(volatile _Atomic_word* __mem, int __val);
__exchange_and_add(volatile _Atomic_word*, int);
void
__attribute__ ((__unused__))
__atomic_add(volatile _Atomic_word* __mem, int __val);
__atomic_add(volatile _Atomic_word*, int);
#endif
static inline _Atomic_word
......
......@@ -34,25 +34,13 @@
#ifndef _BITMAP_ALLOCATOR_H
#define _BITMAP_ALLOCATOR_H 1
// For std::size_t, and ptrdiff_t.
#include <cstddef>
// For __throw_bad_alloc().
#include <bits/functexcept.h>
// For std::pair.
#include <utility>
// For greater_equal, and less_equal.
#include <functional>
// For operator new.
#include <new>
// For __gthread_mutex_t, __gthread_mutex_lock and __gthread_mutex_unlock.
#include <bits/gthr.h>
#include <cstddef> // For std::size_t, and ptrdiff_t.
#include <bits/functexcept.h> // For __throw_bad_alloc().
#include <utility> // For std::pair.
#include <functional> // For greater_equal, and less_equal.
#include <new> // For operator new.
#include <debug/debug.h> // _GLIBCXX_DEBUG_ASSERT
#include <ext/concurrence.h>
/** @brief The constant in the expression below is the alignment
......@@ -65,144 +53,6 @@ _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
using std::size_t;
using std::ptrdiff_t;
#if defined __GTHREADS
namespace
{
/** @brief If true, then the application being compiled will be
* using threads, so use mutexes as a synchronization primitive,
* else do no use any synchronization primitives.
*/
bool const __threads_enabled = __gthread_active_p();
} // anonymous namespace
#endif
#if defined __GTHREADS
/** @class _Mutex bitmap_allocator.h bitmap_allocator.h
*
* @brief _Mutex is an OO-Wrapper for __gthread_mutex_t.
*
* It does not allow you to copy or assign an already initialized
* mutex. This is used merely as a convenience for the locking
* classes.
*/
class _Mutex
{
__gthread_mutex_t _M_mut;
// Prevent Copying and assignment.
_Mutex(_Mutex const&);
_Mutex& operator=(_Mutex const&);
public:
_Mutex()
{
if (__threads_enabled)
{
#if !defined __GTHREAD_MUTEX_INIT
__GTHREAD_MUTEX_INIT_FUNCTION(&_M_mut);
#else
__gthread_mutex_t __mtemp = __GTHREAD_MUTEX_INIT;
_M_mut = __mtemp;
#endif
}
}
~_Mutex()
{
// Gthreads does not define a Mutex Destruction Function.
}
__gthread_mutex_t*
_M_get() { return &_M_mut; }
};
/** @class _Lock bitmap_allocator.h bitmap_allocator.h
*
* @brief _Lock is a simple manual locking class which allows you to
* manually lock and unlock a mutex associated with the lock.
*
* There is no automatic locking or unlocking happening without the
* programmer's explicit instructions. This class unlocks the mutex
* ONLY if it has not been locked. However, this check does not
* apply for locking, and wayward use may cause dead-locks.
*/
class _Lock
{
_Mutex* _M_pmt;
bool _M_locked;
// Prevent Copying and assignment.
_Lock(_Lock const&);
_Lock& operator=(_Lock const&);
public:
_Lock(_Mutex* __mptr)
: _M_pmt(__mptr), _M_locked(false)
{ }
void
_M_lock()
{
if (__threads_enabled)
{
_M_locked = true;
__gthread_mutex_lock(_M_pmt->_M_get());
}
}
void
_M_unlock()
{
if (__threads_enabled)
{
if (__builtin_expect(_M_locked, true))
{
__gthread_mutex_unlock(_M_pmt->_M_get());
_M_locked = false;
}
}
}
~_Lock() { }
};
/** @class _Auto_Lock bitmap_allocator.h bitmap_allocator.h
*
* @brief _Auto_Lock locks the associated mutex on construction, and
* unlocks on destruction.
*
* There are no checks performed, and this class follows the RAII
* principle.
*/
class _Auto_Lock
{
_Mutex* _M_pmt;
// Prevent Copying and assignment.
_Auto_Lock(_Auto_Lock const&);
_Auto_Lock& operator=(_Auto_Lock const&);
void
_M_lock()
{
if (__threads_enabled)
__gthread_mutex_lock(_M_pmt->_M_get());
}
void
_M_unlock()
{
if (__threads_enabled)
__gthread_mutex_unlock(_M_pmt->_M_get());
}
public:
_Auto_Lock(_Mutex* __mptr) : _M_pmt(__mptr)
{ this->_M_lock(); }
~_Auto_Lock() { this->_M_unlock(); }
};
#endif
namespace balloc
{
/** @class __mini_vector bitmap_allocator.h bitmap_allocator.h
......@@ -699,9 +549,10 @@ _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
*/
class free_list
{
typedef size_t* value_type;
typedef balloc::__mini_vector<value_type> vector_type;
typedef vector_type::iterator iterator;
typedef size_t* value_type;
typedef balloc::__mini_vector<value_type> vector_type;
typedef vector_type::iterator iterator;
typedef __mutex mutex_type;
struct _LT_pointer_compare
{
......@@ -712,11 +563,11 @@ _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
};
#if defined __GTHREADS
_Mutex*
mutex_type&
_M_get_mutex()
{
static _Mutex _S_mutex;
return &_S_mutex;
static mutex_type _S_mutex;
return _S_mutex;
}
#endif
......@@ -807,7 +658,7 @@ _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
_M_insert(size_t* __addr) throw()
{
#if defined __GTHREADS
_Auto_Lock __bfl_lock(_M_get_mutex());
__gnu_cxx::__scoped_lock __bfl_lock(_M_get_mutex());
#endif
// Call _M_validate to decide what should be done with
// this particular free list.
......@@ -859,13 +710,15 @@ _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
class bitmap_allocator : private free_list
{
public:
typedef size_t size_type;
typedef ptrdiff_t difference_type;
typedef _Tp* pointer;
typedef const _Tp* const_pointer;
typedef _Tp& reference;
typedef const _Tp& const_reference;
typedef _Tp value_type;
typedef size_t size_type;
typedef ptrdiff_t difference_type;
typedef _Tp* pointer;
typedef const _Tp* const_pointer;
typedef _Tp& reference;
typedef const _Tp& const_reference;
typedef _Tp value_type;
typedef free_list::mutex_type mutex_type;
template<typename _Tp1>
struct rebind
{
......@@ -971,7 +824,7 @@ _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
_Bitmap_counter<_Alloc_block*> _S_last_request;
static typename _BPVector::size_type _S_last_dealloc_index;
#if defined __GTHREADS
static _Mutex _S_mut;
static mutex_type _S_mut;
#endif
public:
......@@ -993,7 +846,7 @@ _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
_M_allocate_single_object() throw(std::bad_alloc)
{
#if defined __GTHREADS
_Auto_Lock __bit_lock(&_S_mut);
__gnu_cxx::__scoped_lock __bit_lock(_S_mut);
#endif
// The algorithm is something like this: The last_request
......@@ -1091,7 +944,7 @@ _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
_M_deallocate_single_object(pointer __p) throw()
{
#if defined __GTHREADS
_Auto_Lock __bit_lock(&_S_mut);
__gnu_cxx::__scoped_lock __bit_lock(_S_mut);
#endif
_Alloc_block* __real_p = reinterpret_cast<_Alloc_block*>(__p);
......@@ -1105,8 +958,7 @@ _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
if (__gnu_cxx::balloc::_Inclusive_between<_Alloc_block*>
(__real_p)
(_S_mem_blocks[_S_last_dealloc_index]))
(__real_p) (_S_mem_blocks[_S_last_dealloc_index]))
{
_GLIBCXX_DEBUG_ASSERT(_S_last_dealloc_index <= _S_mem_blocks.size() - 1);
......@@ -1116,8 +968,7 @@ _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
}
else
{
_Iterator _iter =
__gnu_cxx::balloc::
_Iterator _iter = __gnu_cxx::balloc::
__find_if(_S_mem_blocks.begin(),
_S_mem_blocks.end(),
__gnu_cxx::balloc::
......@@ -1276,7 +1127,7 @@ _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
#if defined __GTHREADS
template<typename _Tp>
__gnu_cxx::_Mutex
typename bitmap_allocator<_Tp>::mutex_type
bitmap_allocator<_Tp>::_S_mut;
#endif
......@@ -1284,4 +1135,3 @@ _GLIBCXX_END_NAMESPACE
#endif
// LocalWords: namespace GTHREADS bool const gthread endif Mutex mutex
// Support for concurrent programing -*- C++ -*-
// Copyright (C) 2003, 2004, 2005, 2006
// Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the
// Free Software Foundation; either version 2, or (at your option)
// any later version.
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License along
// with this library; see the file COPYING. If not, write to the Free
// Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
// USA.
// As a special exception, you may use this file as part of a free software
// library without restriction. Specifically, if other files instantiate
// templates or use macros or inline functions from this file, or you compile
// this file and link it with other files to produce an executable, this
// file does not by itself cause the resulting executable to be covered by
// the GNU General Public License. This exception does not however
// invalidate any other reasons why the executable file might be covered by
// the GNU General Public License.
/** @file concurrence.h
* This is an internal header file, included by other library headers.
* You should not attempt to use it directly.
*/
#ifndef _CONCURRENCE_H
#define _CONCURRENCE_H 1
#include <bits/gthr.h>
#include <bits/functexcept.h>
_GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
// Available locking policies:
// _S_single single-threaded code that doesn't need to be locked.
// _S_mutex multi-threaded code that requires additional support
// from gthr.h or abstraction layers in concurrance.h.
// _S_atomic multi-threaded code using atomic operations.
enum _Lock_policy { _S_single, _S_mutex, _S_atomic };
// Compile time constant that indicates prefered locking policy in
// the current configuration.
static const _Lock_policy __default_lock_policy =
#ifdef __GTHREADS
// NB: This macro doesn't actually exist yet in the compiler, but is
// set somewhat haphazardly at configure time.
#ifdef _GLIBCXX_ATOMIC_BUILTINS
_S_atomic;
#else
_S_mutex;
#endif
#else
_S_single;
#endif
class __mutex
{
private:
__gthread_mutex_t _M_mutex;
__mutex(const __mutex&);
__mutex& operator=(const __mutex&);
public:
__mutex()
{
#if __GTHREADS
if (__gthread_active_p())
{
#if defined __GTHREAD_MUTEX_INIT
__gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT;
_M_mutex = __tmp;
#else
__GTHREAD_MUTEX_INIT_FUNCTION(_M_mutex);
#endif
}
#endif
}
void lock()
{
#if __GTHREADS
if (__gthread_active_p())
{
if (__gthread_mutex_lock(&_M_mutex) != 0)
std::__throw_runtime_error("__mutex::lock");
}
#endif
}
void unlock()
{
#if __GTHREADS
if (__gthread_active_p())
{
if (__gthread_mutex_unlock(&_M_mutex) != 0)
std::__throw_runtime_error("__mutex::unlock");
}
#endif
}
};
class __recursive_mutex
{
private:
__gthread_recursive_mutex_t _M_mutex;
__recursive_mutex(const __recursive_mutex&);
__recursive_mutex& operator=(const __recursive_mutex&);
public:
__recursive_mutex()
{
#if __GTHREADS
if (__gthread_active_p())
{
#if defined __GTHREAD_RECURSIVE_MUTEX_INIT
__gthread_recursive_mutex_t __tmp = __GTHREAD_RECURSIVE_MUTEX_INIT;
_M_mutex = __tmp;
#else
__GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION(_M_mutex);
#endif
}
#endif
}
void lock()
{
#if __GTHREADS
if (__gthread_active_p())
{
if (__gthread_recursive_mutex_lock(&_M_mutex) != 0)
std::__throw_runtime_error("__recursive_mutex::lock");
}
#endif
}
void unlock()
{
#if __GTHREADS
if (__gthread_active_p())
{
if (__gthread_recursive_mutex_unlock(&_M_mutex) != 0)
std::__throw_runtime_error("__recursive_mutex::unlock");
}
#endif
}
};
/// @brief Scoped lock idiom.
// Acquire the mutex here with a constructor call, then release with
// the destructor call in accordance with RAII style.
class __scoped_lock
{
public:
typedef __mutex mutex_type;
private:
mutex_type& _M_device;
__scoped_lock(const __scoped_lock&);
__scoped_lock& operator=(const __scoped_lock&);
public:
explicit __scoped_lock(mutex_type& __name) : _M_device(__name)
{ _M_device.lock(); }
~__scoped_lock() throw()
{ _M_device.unlock(); }
};
_GLIBCXX_END_NAMESPACE
#endif
......@@ -37,8 +37,7 @@
#include <new>
#include <cstdlib>
#include <bits/functexcept.h>
#include <bits/gthr.h>
#include <bits/atomicity.h>
#include <ext/atomicity.h>
_GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
......
......@@ -52,8 +52,8 @@
#include <cstdlib>
#include <new>
#include <bits/functexcept.h>
#include <bits/atomicity.h>
#include <bits/concurrence.h>
#include <ext/atomicity.h>
#include <ext/concurrence.h>
_GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
......@@ -106,7 +106,7 @@ _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
_Obj* volatile*
_M_get_free_list(size_t __bytes);
mutex_type&
__mutex&
_M_get_mutex();
// Returns an object of size __n, and optionally adds to size __n
......@@ -218,7 +218,7 @@ _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
{
_Obj* volatile* __free_list = _M_get_free_list(__bytes);
lock sentry(_M_get_mutex());
__scoped_lock sentry(_M_get_mutex());
_Obj* __restrict__ __result = *__free_list;
if (__builtin_expect(__result == 0, 0))
__ret = static_cast<_Tp*>(_M_refill(_M_round_up(__bytes)));
......@@ -248,7 +248,7 @@ _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
_Obj* volatile* __free_list = _M_get_free_list(__bytes);
_Obj* __q = reinterpret_cast<_Obj*>(__p);
lock sentry(_M_get_mutex());
__scoped_lock sentry(_M_get_mutex());
__q ->_M_free_list_link = *__free_list;
*__free_list = __q;
}
......
......@@ -36,7 +36,7 @@
#ifndef _RC_STRING_BASE_H
#define _RC_STRING_BASE_H 1
#include <bits/atomicity.h>
#include <ext/atomicity.h>
_GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
......
// <tr1/boost_shared_ptr.h> -*- C++ -*-
// Copyright (C) 2005 Free Software Foundation, Inc.
// Copyright (C) 2005, 2006 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
......@@ -54,40 +54,18 @@
#ifndef _BOOST_SHARED_PTR_H
#define _BOOST_SHARED_PTR_H 1
// namespace std::tr1
namespace std
{
_GLIBCXX_BEGIN_NAMESPACE(tr1)
// TODO This should go into a separate header really.
enum _Lock_policy { _S_lockfree, _S_mutex, _S_single };
static const _Lock_policy __shared_ptr_default_lock_mode =
#ifdef __GTHREADS
// NOTE: This doesn't actually exist yet in the compiler.
#ifdef _GLIBCXX_ATOMIC_BUILTINS
_S_lockfree;
#else
_S_mutex;
#endif
#else
_S_single;
#endif
// END TODO
class bad_weak_ptr : public std::exception
{
public:
virtual char const*
what() const throw()
{ return "tr1::bad_weak_ptr"; }
what() const throw() { return "tr1::bad_weak_ptr"; }
};
// Helper for exception objects in <tr1/memory>
// TODO this should be defined in a different file.
// Substitute for bad_weak_ptr object in the case of -fno-exceptions.
inline void
__throw_bad_weak_ptr()
{
......@@ -98,6 +76,11 @@ __throw_bad_weak_ptr()
#endif
}
using __gnu_cxx::_Lock_policy;
using __gnu_cxx::__default_lock_policy;
using __gnu_cxx::_S_single;
using __gnu_cxx::_S_mutex;
using __gnu_cxx::_S_atomic;
template<typename _Tp>
struct _Sp_deleter
......@@ -111,64 +94,41 @@ template<typename _Tp>
};
// Empty helper class except when the template argument is _S_mutex.
template <_Lock_policy __l>
template<_Lock_policy _Lp>
class _Mutex_base
{
};
{ };
template <>
class _Mutex_base<_S_mutex>
{
public:
__gnu_cxx::mutex_type _M_mutex;
_Mutex_base()
{
// For the case of __GTHREAD_MUTEX_INIT we haven't initialised
// the mutex yet, so do it now.
#if defined(__GTHREADS) && defined(__GTHREAD_MUTEX_INIT)
__gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT;
_M_mutex = __tmp;
#endif
}
};
template<>
class _Mutex_base<_S_mutex> : public __gnu_cxx::__mutex
{ };
template <_Lock_policy __l = __shared_ptr_default_lock_mode>
class _Sp_counted_base
: public _Mutex_base<__l>
template<_Lock_policy _Lp = __default_lock_policy>
class _Sp_counted_base : public _Mutex_base<_Lp>
{
public:
_Sp_counted_base()
: _M_use_count(1), _M_weak_count(1)
{
}
public:
_Sp_counted_base() : _M_use_count(1), _M_weak_count(1) { }
virtual
~_Sp_counted_base() // nothrow
~_Sp_counted_base() // nothrow
{ }
// dispose() is called when _M_use_count drops to zero, to release
// the resources managed by *this.
// Called when _M_use_count drops to zero, to release the resources
// managed by *this.
virtual void
dispose() = 0; // nothrow
// destroy() is called when _M_weak_count drops to zero.
// Called when _M_weak_count drops to zero.
virtual void
destroy() // nothrow
{
delete this;
}
{ delete this; }
virtual void*
get_deleter(const std::type_info&) = 0;
void
add_ref_copy()
{
__gnu_cxx::__atomic_add(&_M_use_count, 1);
}
{ __gnu_cxx::__atomic_add(&_M_use_count, 1); }
void
add_ref_lock();
......@@ -190,9 +150,7 @@ template <_Lock_policy __l = __shared_ptr_default_lock_mode>
void
weak_add_ref() // nothrow
{
__gnu_cxx::__atomic_add(&_M_weak_count, 1);
}
{ __gnu_cxx::__atomic_add(&_M_weak_count, 1); }
void
weak_release() // nothrow
......@@ -209,12 +167,9 @@ template <_Lock_policy __l = __shared_ptr_default_lock_mode>
long
use_count() const // nothrow
{
return _M_use_count; // XXX is this MT safe?
}
private:
{ return _M_use_count; } // XXX is this MT safe?
private:
_Sp_counted_base(_Sp_counted_base const&);
_Sp_counted_base& operator=(_Sp_counted_base const&);
......@@ -223,8 +178,7 @@ template <_Lock_policy __l = __shared_ptr_default_lock_mode>
};
template<>
inline
void
inline void
_Sp_counted_base<_S_single>::add_ref_lock()
{
if (__gnu_cxx::__exchange_and_add(&_M_use_count, 1) == 0)
......@@ -236,11 +190,10 @@ template<>
#ifdef __GTHREADS
template<>
inline
void
inline void
_Sp_counted_base<_S_mutex>::add_ref_lock()
{
__gnu_cxx::lock lock(_M_mutex);
__gnu_cxx::__scoped_lock sentry(*this);
if (__gnu_cxx::__exchange_and_add(&_M_use_count, 1) == 0)
{
_M_use_count = 0;
......@@ -250,9 +203,8 @@ template<>
#endif
template<>
inline
void
_Sp_counted_base<_S_lockfree>::add_ref_lock()
inline void
_Sp_counted_base<_S_atomic>::add_ref_lock()
{
// Perform lock-free add-if-not-zero operation.
_Atomic_word __count;
......@@ -260,40 +212,32 @@ template<>
{
__count = _M_use_count;
if (__count == 0)
{
__throw_bad_weak_ptr();
}
/* Replace the current counter value with the old value + 1, as long
* as it's not changed meanwhile. */
__throw_bad_weak_ptr();
// Replace the current counter value with the old value + 1, as
// long as it's not changed meanwhile.
}
while (!__sync_bool_compare_and_swap(&_M_use_count, __count, __count + 1));
}
template<typename _Ptr, typename _Deleter, _Lock_policy __l>
class _Sp_counted_base_impl
: public _Sp_counted_base<__l>
template<typename _Ptr, typename _Deleter, _Lock_policy _Lp>
class _Sp_counted_base_impl : public _Sp_counted_base<_Lp>
{
public:
/**
* @brief
* @pre d(p) must not throw.
*/
_Sp_counted_base_impl(_Ptr __p, _Deleter __d)
: _M_ptr(__p), _M_del(__d)
{ }
: _M_ptr(__p), _M_del(__d) { }
virtual void
dispose() // nothrow
{
_M_del(_M_ptr);
}
{ _M_del(_M_ptr); }
virtual void*
get_deleter(const std::type_info& __ti)
{
return __ti == typeid(_Deleter) ? &_M_del : 0;
}
{ return __ti == typeid(_Deleter) ? &_M_del : 0; }
private:
_Sp_counted_base_impl(const _Sp_counted_base_impl&);
......@@ -303,49 +247,44 @@ template<typename _Ptr, typename _Deleter, _Lock_policy __l>
_Deleter _M_del; // copy constructor must not throw
};
template<_Lock_policy __l = __shared_ptr_default_lock_mode>
template<_Lock_policy _Lp = __default_lock_policy>
class weak_count;
template<_Lock_policy __l = __shared_ptr_default_lock_mode>
template<_Lock_policy _Lp = __default_lock_policy>
class shared_count
{
private:
private:
_Sp_counted_base<_Lp>* _M_pi;
_Sp_counted_base<__l>* _M_pi;
friend class weak_count<_Lp>;
friend class weak_count<__l>;
public:
shared_count()
: _M_pi(0) // nothrow
public:
shared_count() : _M_pi(0) // nothrow
{ }
template<typename _Ptr, typename _Deleter>
shared_count(_Ptr __p, _Deleter __d)
: _M_pi(0)
shared_count(_Ptr __p, _Deleter __d) : _M_pi(0)
{
try
{
_M_pi = new _Sp_counted_base_impl<_Ptr, _Deleter, __l>(__p, __d);
_M_pi = new _Sp_counted_base_impl<_Ptr, _Deleter, _Lp>(__p, __d);
}
catch(...)
{
__d(__p); // delete __p
__d(__p); // Call _Deleter on __p.
__throw_exception_again;
}
}
// auto_ptr<_Tp> is special cased to provide the strong guarantee
// Special case for auto_ptr<_Tp> to provide the strong guarantee.
template<typename _Tp>
explicit shared_count(std::auto_ptr<_Tp>& __r)
: _M_pi(new _Sp_counted_base_impl<_Tp*,
_Sp_deleter<_Tp>, __l >(__r.get(), _Sp_deleter<_Tp>()))
_Sp_deleter<_Tp>, _Lp >(__r.get(), _Sp_deleter<_Tp>()))
{ __r.release(); }
// throws bad_weak_ptr when __r.use_count() == 0
explicit shared_count(const weak_count<__l>& __r);
// Throw bad_weak_ptr when __r.use_count() == 0.
explicit shared_count(const weak_count<_Lp>& __r);
~shared_count() // nothrow
{
......@@ -363,13 +302,12 @@ template<_Lock_policy __l = __shared_ptr_default_lock_mode>
shared_count&
operator=(const shared_count& __r) // nothrow
{
_Sp_counted_base<__l>* __tmp = __r._M_pi;
if(__tmp != _M_pi)
_Sp_counted_base<_Lp>* __tmp = __r._M_pi;
if (__tmp != _M_pi)
{
if(__tmp != 0)
if (__tmp != 0)
__tmp->add_ref_copy();
if(_M_pi != 0)
if (_M_pi != 0)
_M_pi->release();
_M_pi = __tmp;
}
......@@ -378,7 +316,7 @@ template<_Lock_policy __l = __shared_ptr_default_lock_mode>
void swap(shared_count& __r) // nothrow
{
_Sp_counted_base<__l>* __tmp = __r._M_pi;
_Sp_counted_base<_Lp>* __tmp = __r._M_pi;
__r._M_pi = _M_pi;
_M_pi = __tmp;
}
......@@ -397,36 +335,34 @@ template<_Lock_policy __l = __shared_ptr_default_lock_mode>
friend inline bool
operator<(const shared_count& __a, const shared_count& __b)
{ return std::less<_Sp_counted_base<__l>*>()(__a._M_pi, __b._M_pi); }
{ return std::less<_Sp_counted_base<_Lp>*>()(__a._M_pi, __b._M_pi); }
void*
get_deleter(const std::type_info& __ti) const
{ return _M_pi ? _M_pi->get_deleter(__ti) : 0; }
};
template<_Lock_policy __l>
template<_Lock_policy _Lp>
class weak_count
{
private:
_Sp_counted_base<__l>* _M_pi;
friend class shared_count<__l>;
private:
_Sp_counted_base<_Lp>* _M_pi;
public:
friend class shared_count<_Lp>;
public:
weak_count()
: _M_pi(0) // nothrow
{ }
weak_count(const shared_count<__l>& __r)
weak_count(const shared_count<_Lp>& __r)
: _M_pi(__r._M_pi) // nothrow
{
if (_M_pi != 0)
_M_pi->weak_add_ref();
}
weak_count(const weak_count<__l>& __r)
weak_count(const weak_count<_Lp>& __r)
: _M_pi(__r._M_pi) // nothrow
{
if (_M_pi != 0)
......@@ -439,36 +375,34 @@ template<_Lock_policy __l>
_M_pi->weak_release();
}
weak_count<__l>&
operator=(const shared_count<__l>& __r) // nothrow
weak_count<_Lp>&
operator=(const shared_count<_Lp>& __r) // nothrow
{
_Sp_counted_base<__l>* __tmp = __r._M_pi;
_Sp_counted_base<_Lp>* __tmp = __r._M_pi;
if (__tmp != 0)
__tmp->weak_add_ref();
if (_M_pi != 0)
_M_pi->weak_release();
_M_pi = __tmp;
_M_pi = __tmp;
return *this;
}
weak_count<__l>&
operator=(const weak_count<__l>& __r) // nothrow
weak_count<_Lp>&
operator=(const weak_count<_Lp>& __r) // nothrow
{
_Sp_counted_base<__l> * __tmp = __r._M_pi;
_Sp_counted_base<_Lp> * __tmp = __r._M_pi;
if (__tmp != 0)
__tmp->weak_add_ref();
if (_M_pi != 0)
_M_pi->weak_release();
_M_pi = __tmp;
return *this;
}
void
swap(weak_count<__l>& __r) // nothrow
swap(weak_count<_Lp>& __r) // nothrow
{
_Sp_counted_base<__l> * __tmp = __r._M_pi;
_Sp_counted_base<_Lp> * __tmp = __r._M_pi;
__r._M_pi = _M_pi;
_M_pi = __tmp;
}
......@@ -478,17 +412,17 @@ template<_Lock_policy __l>
{ return _M_pi != 0 ? _M_pi->use_count() : 0; }
friend inline bool
operator==(const weak_count<__l>& __a, const weak_count<__l>& __b)
operator==(const weak_count<_Lp>& __a, const weak_count<_Lp>& __b)
{ return __a._M_pi == __b._M_pi; }
friend inline bool
operator<(const weak_count<__l>& __a, const weak_count<__l>& __b)
{ return std::less<_Sp_counted_base<__l>*>()(__a._M_pi, __b._M_pi); }
operator<(const weak_count<_Lp>& __a, const weak_count<_Lp>& __b)
{ return std::less<_Sp_counted_base<_Lp>*>()(__a._M_pi, __b._M_pi); }
};
template<_Lock_policy __l>
template<_Lock_policy _Lp>
inline
shared_count<__l>::shared_count(const weak_count<__l>& __r)
shared_count<_Lp>::shared_count(const weak_count<_Lp>& __r)
: _M_pi(__r._M_pi)
{
if (_M_pi != 0)
......@@ -498,20 +432,20 @@ template<_Lock_policy __l>
}
// fwd decls
template<typename _Tp, _Lock_policy __l = __shared_ptr_default_lock_mode>
// Forward decls.
template<typename _Tp, _Lock_policy _Lp = __default_lock_policy>
class __shared_ptr;
template<typename _Tp, _Lock_policy __l = __shared_ptr_default_lock_mode>
template<typename _Tp, _Lock_policy _Lp = __default_lock_policy>
class __weak_ptr;
template<typename _Tp, _Lock_policy __l>
template<typename _Tp, _Lock_policy _Lp>
class __enable_shared_from_this;
struct __static_cast_tag {};
struct __const_cast_tag {};
struct __dynamic_cast_tag {};
struct __polymorphic_cast_tag {};
struct __static_cast_tag { };
struct __const_cast_tag { };
struct __dynamic_cast_tag { };
struct __polymorphic_cast_tag { };
template<class _Tp>
struct shared_ptr_traits
......@@ -534,24 +468,25 @@ template<>
{ typedef void reference; };
// enable_shared_from_this support
// Support for enable_shared_from_this.
// friend of __enable_shared_from_this
template<_Lock_policy __l, typename _Tp1, typename _Tp2>
// Friend of __enable_shared_from_this.
template<_Lock_policy _Lp, typename _Tp1, typename _Tp2>
void
__enable_shared_from_this_helper(const shared_count<__l>& __pn,
const __enable_shared_from_this<_Tp1, __l>* __pe,
const _Tp2* __px );
__enable_shared_from_this_helper(const shared_count<_Lp>&,
const __enable_shared_from_this<_Tp1, _Lp>*,
const _Tp2*);
template<_Lock_policy __l>
template<_Lock_policy _Lp>
inline void
__enable_shared_from_this_helper(const shared_count<__l>&, ...)
__enable_shared_from_this_helper(const shared_count<_Lp>&, ...)
{ }
// get_deleter must be declared before friend declaration by shared_ptr.
template<typename _Del, typename _Tp, _Lock_policy __l>
_Del* get_deleter(const __shared_ptr<_Tp, __l>&);
// Function get_deleter must be declared before friend declaration by
// shared_ptr.
template<typename _Del, typename _Tp, _Lock_policy _Lp>
_Del* get_deleter(const __shared_ptr<_Tp, _Lp>&);
/**
* @class shared_ptr <tr1/memory>
......@@ -560,13 +495,12 @@ template<typename _Del, typename _Tp, _Lock_policy __l>
* The object pointed to is deleted when the last shared_ptr pointing to it
* is destroyed or reset.
*/
template<typename _Tp, _Lock_policy __l>
template<typename _Tp, _Lock_policy _Lp>
class __shared_ptr
{
typedef typename shared_ptr_traits<_Tp>::reference _Reference;
public:
typedef _Tp element_type;
/** @brief Construct an empty %__shared_ptr.
......@@ -587,7 +521,6 @@ template<typename _Tp, _Lock_policy __l>
{
__glibcxx_function_requires(_ConvertibleConcept<_Tp1*, _Tp*>)
// __glibcxx_function_requires(_CompleteConcept<_Tp1*>)
__enable_shared_from_this_helper( _M_refcount, __p, __p );
}
......@@ -609,7 +542,6 @@ template<typename _Tp, _Lock_policy __l>
{
__glibcxx_function_requires(_ConvertibleConcept<_Tp1*, _Tp*>)
// TODO requires D is CopyConstructible and d(p) well-formed
__enable_shared_from_this_helper( _M_refcount, __p, __p );
}
......@@ -622,11 +554,9 @@ template<typename _Tp, _Lock_policy __l>
* @throw std::bad_alloc, in which case
*/
template<typename _Tp1>
__shared_ptr(const __shared_ptr<_Tp1, __l>& __r)
__shared_ptr(const __shared_ptr<_Tp1, _Lp>& __r)
: _M_ptr(__r._M_ptr), _M_refcount(__r._M_refcount) // never throws
{
__glibcxx_function_requires(_ConvertibleConcept<_Tp1*, _Tp*>)
}
{ __glibcxx_function_requires(_ConvertibleConcept<_Tp1*, _Tp*>) }
/** @brief Constructs a %__shared_ptr that shares ownership with @a r
* and stores a copy of the pointer stored in @a r.
......@@ -636,12 +566,12 @@ template<typename _Tp, _Lock_policy __l>
* in which case the constructor has no effect.
*/
template<typename _Tp1>
explicit __shared_ptr(const __weak_ptr<_Tp1, __l>& __r)
explicit __shared_ptr(const __weak_ptr<_Tp1, _Lp>& __r)
: _M_refcount(__r._M_refcount) // may throw
{
__glibcxx_function_requires(_ConvertibleConcept<_Tp1*, _Tp*>)
// it is now safe to copy r__._M_ptr, as _M_refcount(__r._M_refcount)
// did not throw
// It is now safe to copy r__._M_ptr, as _M_refcount(__r._M_refcount)
// did not throw.
_M_ptr = __r._M_ptr;
}
......@@ -655,35 +585,34 @@ template<typename _Tp, _Lock_policy __l>
// TODO requires r.release() convertible to _Tp*, Tp1 is complete,
// delete r.release() well-formed
_Tp1 * __tmp = __r.get();
_M_refcount = shared_count<__l>(__r);
_M_refcount = shared_count<_Lp>(__r);
__enable_shared_from_this_helper( _M_refcount, __tmp, __tmp );
}
template<typename _Tp1>
__shared_ptr(const __shared_ptr<_Tp1, __l>& __r, __static_cast_tag)
__shared_ptr(const __shared_ptr<_Tp1, _Lp>& __r, __static_cast_tag)
: _M_ptr(static_cast<element_type*>(__r._M_ptr)),
_M_refcount(__r._M_refcount)
{ }
template<typename _Tp1>
__shared_ptr(const __shared_ptr<_Tp1, __l>& __r, __const_cast_tag)
__shared_ptr(const __shared_ptr<_Tp1, _Lp>& __r, __const_cast_tag)
: _M_ptr(const_cast<element_type*>(__r._M_ptr)),
_M_refcount(__r._M_refcount)
{ }
template<typename _Tp1>
__shared_ptr(const __shared_ptr<_Tp1, __l>& __r, __dynamic_cast_tag)
__shared_ptr(const __shared_ptr<_Tp1, _Lp>& __r, __dynamic_cast_tag)
: _M_ptr(dynamic_cast<element_type*>(__r._M_ptr)),
_M_refcount(__r._M_refcount)
{
if (_M_ptr == 0) // need to allocate new counter -- the cast failed
_M_refcount = shared_count<__l>();
_M_refcount = shared_count<_Lp>();
}
template<typename _Tp1>
__shared_ptr&
operator=(const __shared_ptr<_Tp1, __l>& __r) // never throws
operator=(const __shared_ptr<_Tp1, _Lp>& __r) // never throws
{
_M_ptr = __r._M_ptr;
_M_refcount = __r._M_refcount; // shared_count::op= doesn't throw
......@@ -704,10 +633,10 @@ template<typename _Tp, _Lock_policy __l>
template<typename _Tp1>
void
reset(_Tp1* __p) // _Tp1 must be complete
reset(_Tp1* __p) // _Tp1 must be complete.
{
_GLIBCXX_DEBUG_ASSERT(__p == 0 || __p != _M_ptr); // catch self-reset
// errors
// Catch self-reset errors.
_GLIBCXX_DEBUG_ASSERT(__p == 0 || __p != _M_ptr);
__shared_ptr(__p).swap(*this);
}
......@@ -716,7 +645,7 @@ template<typename _Tp, _Lock_policy __l>
reset(_Tp1 * __p, _Deleter __d)
{ __shared_ptr(__p, __d).swap(*this); }
// error to instantiate if _Tp is [cv-qual] void
// Error to instantiate if _Tp is [cv-qual] void.
_Reference
operator*() const // never throws
{
......@@ -735,7 +664,7 @@ template<typename _Tp, _Lock_policy __l>
get() const // never throws
{ return _M_ptr; }
// implicit conversion to "bool"
// Implicit conversion to "bool"
private:
typedef _Tp* __shared_ptr::*__unspecified_bool_type;
......@@ -752,7 +681,7 @@ template<typename _Tp, _Lock_policy __l>
{ return _M_refcount.use_count(); }
void
swap(__shared_ptr<_Tp, __l>& __other) // never throws
swap(__shared_ptr<_Tp, _Lp>& __other) // never throws
{
std::swap(_M_ptr, __other._M_ptr);
_M_refcount.swap(__other._M_refcount);
......@@ -763,41 +692,41 @@ template<typename _Tp, _Lock_policy __l>
_M_get_deleter(const std::type_info& __ti) const
{ return _M_refcount.get_deleter(__ti); }
template<typename _Tp1, _Lock_policy __l1>
template<typename _Tp1, _Lock_policy _Lp1>
bool
_M_less(const __shared_ptr<_Tp1, __l1>& __rhs) const
_M_less(const __shared_ptr<_Tp1, _Lp1>& __rhs) const
{ return _M_refcount < __rhs._M_refcount; }
template<typename _Tp1, _Lock_policy __l1> friend class __shared_ptr;
template<typename _Tp1, _Lock_policy __l1> friend class __weak_ptr;
template<typename _Tp1, _Lock_policy _Lp1> friend class __shared_ptr;
template<typename _Tp1, _Lock_policy _Lp1> friend class __weak_ptr;
template<typename _Del, typename _Tp1, _Lock_policy __l1>
friend _Del* get_deleter(const __shared_ptr<_Tp1, __l1>&);
template<typename _Del, typename _Tp1, _Lock_policy _Lp1>
friend _Del* get_deleter(const __shared_ptr<_Tp1, _Lp1>&);
// friends injected into enclosing namespace and found by ADL:
// Friends injected into enclosing namespace and found by ADL:
template<typename _Tp1>
friend inline bool
operator==(const __shared_ptr& __a, const __shared_ptr<_Tp1, __l>& __b)
operator==(const __shared_ptr& __a, const __shared_ptr<_Tp1, _Lp>& __b)
{ return __a.get() == __b.get(); }
template<typename _Tp1>
friend inline bool
operator!=(const __shared_ptr& __a, const __shared_ptr<_Tp1, __l>& __b)
operator!=(const __shared_ptr& __a, const __shared_ptr<_Tp1, _Lp>& __b)
{ return __a.get() != __b.get(); }
template<typename _Tp1>
friend inline bool
operator<(const __shared_ptr& __a, const __shared_ptr<_Tp1, __l>& __b)
operator<(const __shared_ptr& __a, const __shared_ptr<_Tp1, _Lp>& __b)
{ return __a._M_less(__b); }
_Tp* _M_ptr; // contained pointer
shared_count<__l> _M_refcount; // reference counter
}; // __shared_ptr
_Tp* _M_ptr; // Contained pointer.
shared_count<_Lp> _M_refcount; // Reference counter.
};
// 2.2.3.8 shared_ptr specialized algorithms.
template<typename _Tp, _Lock_policy __l>
template<typename _Tp, _Lock_policy _Lp>
inline void
swap(__shared_ptr<_Tp, __l>& __a, __shared_ptr<_Tp, __l>& __b)
swap(__shared_ptr<_Tp, _Lp>& __a, __shared_ptr<_Tp, _Lp>& __b)
{ __a.swap(__b); }
// 2.2.3.9 shared_ptr casts
......@@ -806,85 +735,75 @@ template<typename _Tp, _Lock_policy __l>
* will eventually result in undefined behaviour,
* attempting to delete the same object twice.
*/
template<typename _Tp, typename _Tp1, _Lock_policy __l>
__shared_ptr<_Tp, __l>
static_pointer_cast(const __shared_ptr<_Tp1, __l>& __r)
{
return __shared_ptr<_Tp, __l>(__r, __static_cast_tag());
}
template<typename _Tp, typename _Tp1, _Lock_policy _Lp>
__shared_ptr<_Tp, _Lp>
static_pointer_cast(const __shared_ptr<_Tp1, _Lp>& __r)
{ return __shared_ptr<_Tp, _Lp>(__r, __static_cast_tag()); }
/** @warning The seemingly equivalent
* <code>shared_ptr<T>(const_cast<T*>(r.get()))</code>
* will eventually result in undefined behaviour,
* attempting to delete the same object twice.
*/
template<typename _Tp, typename _Tp1, _Lock_policy __l>
__shared_ptr<_Tp, __l>
const_pointer_cast(const __shared_ptr<_Tp1, __l>& __r)
{
return __shared_ptr<_Tp, __l>(__r, __const_cast_tag());
}
template<typename _Tp, typename _Tp1, _Lock_policy _Lp>
__shared_ptr<_Tp, _Lp>
const_pointer_cast(const __shared_ptr<_Tp1, _Lp>& __r)
{ return __shared_ptr<_Tp, _Lp>(__r, __const_cast_tag()); }
/** @warning The seemingly equivalent
* <code>shared_ptr<T>(dynamic_cast<T*>(r.get()))</code>
* will eventually result in undefined behaviour,
* attempting to delete the same object twice.
*/
template<typename _Tp, typename _Tp1, _Lock_policy __l>
__shared_ptr<_Tp, __l>
dynamic_pointer_cast(const __shared_ptr<_Tp1, __l>& __r)
{
return __shared_ptr<_Tp, __l>(__r, __dynamic_cast_tag());
}
template<typename _Tp, typename _Tp1, _Lock_policy _Lp>
__shared_ptr<_Tp, _Lp>
dynamic_pointer_cast(const __shared_ptr<_Tp1, _Lp>& __r)
{ return __shared_ptr<_Tp, _Lp>(__r, __dynamic_cast_tag()); }
// 2.2.3.7 shared_ptr I/O
template<typename _Ch, typename _Tr, typename _Tp, _Lock_policy __l>
template<typename _Ch, typename _Tr, typename _Tp, _Lock_policy _Lp>
std::basic_ostream<_Ch, _Tr>&
operator<<(std::basic_ostream<_Ch, _Tr>& __os, const __shared_ptr<_Tp, __l>& __p)
operator<<(std::basic_ostream<_Ch, _Tr>& __os,
const __shared_ptr<_Tp, _Lp>& __p)
{
__os << __p.get();
return __os;
}
// 2.2.3.10 shared_ptr get_deleter (experimental)
template<typename _Del, typename _Tp, _Lock_policy __l>
template<typename _Del, typename _Tp, _Lock_policy _Lp>
inline _Del*
get_deleter(const __shared_ptr<_Tp, __l>& __p)
get_deleter(const __shared_ptr<_Tp, _Lp>& __p)
{ return static_cast<_Del*>(__p._M_get_deleter(typeid(_Del))); }
template<typename _Tp, _Lock_policy __l>
template<typename _Tp, _Lock_policy _Lp>
class __weak_ptr
{
public:
typedef _Tp element_type;
__weak_ptr()
: _M_ptr(0), _M_refcount() // never throws
__weak_ptr() : _M_ptr(0), _M_refcount() // never throws
{ }
// generated copy constructor, assignment, destructor are fine
// Generated copy constructor, assignment, destructor are fine.
//
// The "obvious" converting constructor implementation:
// The "obvious" converting constructor implementation:
//
// template<class Y>
// __weak_ptr(__weak_ptr<Y> const & r)
// : _M_ptr(r._M_ptr), _M_refcount(r._M_refcount) // never throws
// { }
//
// has a serious problem.
// has a serious problem.
//
// r._M_ptr may already have been invalidated. The _M_ptr(r._M_ptr)
// conversion may require access to *r._M_ptr (virtual inheritance).
//
// It is not possible to avoid spurious access violations since
// in multithreaded programs r._M_ptr may be invalidated at any point.
//
// It is not possible to avoid spurious access violations since
// in multithreaded programs r._M_ptr may be invalidated at any point.
template<typename _Tp1>
__weak_ptr(const __weak_ptr<_Tp1, __l>& r)
__weak_ptr(const __weak_ptr<_Tp1, _Lp>& r)
: _M_refcount(r._M_refcount) // never throws
{
__glibcxx_function_requires(_ConvertibleConcept<_Tp1*, _Tp*>)
......@@ -892,15 +811,13 @@ template<typename _Tp, _Lock_policy __l>
}
template<typename _Tp1>
__weak_ptr(const __shared_ptr<_Tp1, __l>& r)
__weak_ptr(const __shared_ptr<_Tp1, _Lp>& r)
: _M_ptr(r._M_ptr), _M_refcount(r._M_refcount) // never throws
{
__glibcxx_function_requires(_ConvertibleConcept<_Tp1*, _Tp*>)
}
{ __glibcxx_function_requires(_ConvertibleConcept<_Tp1*, _Tp*>) }
template<typename _Tp1>
__weak_ptr&
operator=(const __weak_ptr<_Tp1, __l>& r) // never throws
operator=(const __weak_ptr<_Tp1, _Lp>& r) // never throws
{
_M_ptr = r.lock().get();
_M_refcount = r._M_refcount;
......@@ -909,39 +826,37 @@ template<typename _Tp, _Lock_policy __l>
template<typename _Tp1>
__weak_ptr&
operator=(const __shared_ptr<_Tp1, __l>& r) // never throws
operator=(const __shared_ptr<_Tp1, _Lp>& r) // never throws
{
_M_ptr = r._M_ptr;
_M_refcount = r._M_refcount;
return *this;
}
__shared_ptr<_Tp, __l>
__shared_ptr<_Tp, _Lp>
lock() const // never throws
{
#ifdef __GTHREADS
// optimization: avoid throw overhead
// Optimization: avoid throw overhead.
if (expired())
return __shared_ptr<element_type, __l>();
return __shared_ptr<element_type, _Lp>();
try
{
return __shared_ptr<element_type, __l>(*this);
return __shared_ptr<element_type, _Lp>(*this);
}
catch (const bad_weak_ptr&)
{
// Q: how can we get here?
// A: another thread may have invalidated r after the
// Q: How can we get here?
// A: Another thread may have invalidated r after the
// use_count test above.
return __shared_ptr<element_type>();
}
#else
// optimization: avoid try/catch overhead when single threaded
return expired() ? __shared_ptr<element_type, __l>()
: __shared_ptr<element_type, __l>(*this);
// Optimization: avoid try/catch overhead when single threaded.
return expired() ? __shared_ptr<element_type, _Lp>()
: __shared_ptr<element_type, _Lp>(*this);
#endif
} // XXX MT
......@@ -966,118 +881,105 @@ template<typename _Tp, _Lock_policy __l>
}
private:
template<typename _Tp1>
bool
_M_less(const __weak_ptr<_Tp1, __l>& __rhs) const
_M_less(const __weak_ptr<_Tp1, _Lp>& __rhs) const
{ return _M_refcount < __rhs._M_refcount; }
// used by __enable_shared_from_this
// Used by __enable_shared_from_this.
void
_M_assign(_Tp* __ptr, const shared_count<__l>& __refcount)
_M_assign(_Tp* __ptr, const shared_count<_Lp>& __refcount)
{
_M_ptr = __ptr;
_M_refcount = __refcount;
}
// friend injected into namespace and found by ADL
// Friend injected into namespace and found by ADL.
template<typename _Tp1>
friend inline bool
operator<(const __weak_ptr& __lhs, const __weak_ptr<_Tp1, __l>& __rhs)
operator<(const __weak_ptr& __lhs, const __weak_ptr<_Tp1, _Lp>& __rhs)
{ return __lhs._M_less(__rhs); }
template<typename _Tp1, _Lock_policy __l1> friend class __weak_ptr;
template<typename _Tp1, _Lock_policy __l1> friend class __shared_ptr;
friend class __enable_shared_from_this<_Tp, __l>;
template<typename _Tp1, _Lock_policy _Lp1> friend class __weak_ptr;
template<typename _Tp1, _Lock_policy _Lp1> friend class __shared_ptr;
friend class __enable_shared_from_this<_Tp, _Lp>;
_Tp* _M_ptr; // contained pointer
weak_count<__l> _M_refcount; // reference counter
}; // __weak_ptr
_Tp* _M_ptr; // Contained pointer.
weak_count<_Lp> _M_refcount; // Reference counter.
};
// 2.2.4.7 weak_ptr specialized algorithms.
template<typename _Tp, _Lock_policy __l>
template<typename _Tp, _Lock_policy _Lp>
void
swap(__weak_ptr<_Tp, __l>& __a, __weak_ptr<_Tp, __l>& __b)
swap(__weak_ptr<_Tp, _Lp>& __a, __weak_ptr<_Tp, _Lp>& __b)
{ __a.swap(__b); }
template<typename _Tp, _Lock_policy __l = __shared_ptr_default_lock_mode>
template<typename _Tp, _Lock_policy _Lp = __default_lock_policy>
class __enable_shared_from_this
{
protected:
__enable_shared_from_this() { }
__enable_shared_from_this()
{ }
__enable_shared_from_this(const __enable_shared_from_this&)
{ }
__enable_shared_from_this(const __enable_shared_from_this&) { }
__enable_shared_from_this&
operator=(const __enable_shared_from_this&)
{ return *this; }
~__enable_shared_from_this()
{ }
~__enable_shared_from_this() { }
public:
__shared_ptr<_Tp, __l>
__shared_ptr<_Tp, _Lp>
shared_from_this()
{
__shared_ptr<_Tp, __l> __p(this->_M_weak_this);
__shared_ptr<_Tp, _Lp> __p(this->_M_weak_this);
return __p;
}
__shared_ptr<const _Tp, __l>
__shared_ptr<const _Tp, _Lp>
shared_from_this() const
{
__shared_ptr<const _Tp, __l> __p(this->_M_weak_this);
__shared_ptr<const _Tp, _Lp> __p(this->_M_weak_this);
return __p;
}
private:
template<typename _Tp1>
void
_M_weak_assign(_Tp1* __p, const shared_count<__l>& __n) const
_M_weak_assign(_Tp1* __p, const shared_count<_Lp>& __n) const
{ _M_weak_this._M_assign(__p, __n); }
template<typename _Tp1>
friend void
__enable_shared_from_this_helper(const shared_count<__l>& __pn,
__enable_shared_from_this_helper(const shared_count<_Lp>& __pn,
const __enable_shared_from_this* __pe,
const _Tp1* __px)
{
if(__pe != 0)
if (__pe != 0)
__pe->_M_weak_assign(const_cast<_Tp1*>(__px), __pn);
}
mutable __weak_ptr<_Tp, __l> _M_weak_this;
mutable __weak_ptr<_Tp, _Lp> _M_weak_this;
};
template<typename _Tp>
class shared_ptr;
// The actual TR1 weak_ptr, with forwarding constructors and assignment operators.
// The actual TR1 weak_ptr, with forwarding constructors and
// assignment operators.
template<typename _Tp>
class weak_ptr
: public __weak_ptr<_Tp>
class weak_ptr : public __weak_ptr<_Tp>
{
public:
weak_ptr()
: __weak_ptr<_Tp>()
{ }
weak_ptr() : __weak_ptr<_Tp>() { }
template<typename _Tp1>
weak_ptr(const __weak_ptr<_Tp1>& r)
: __weak_ptr<_Tp>(r)
{ }
weak_ptr(const __weak_ptr<_Tp1>& r) : __weak_ptr<_Tp>(r) { }
template<typename _Tp1>
weak_ptr(const __shared_ptr<_Tp1>& r)
: __weak_ptr<_Tp>(r)
{ }
weak_ptr(const __shared_ptr<_Tp1>& r) : __weak_ptr<_Tp>(r) { }
template<typename _Tp1>
weak_ptr&
operator=(const weak_ptr<_Tp1>& r) // never throws
......@@ -1095,53 +997,46 @@ template<typename _Tp>
}
};
// The actual TR1 shared_ptr, with forwarding constructors and assignment operators.
// The actual TR1 shared_ptr, with forwarding constructors and
// assignment operators.
template<typename _Tp>
class shared_ptr
: public __shared_ptr<_Tp>
class shared_ptr : public __shared_ptr<_Tp>
{
public:
shared_ptr()
: __shared_ptr<_Tp>()
{ }
shared_ptr() : __shared_ptr<_Tp>() { }
template<typename _Tp1>
explicit shared_ptr(_Tp1* __p)
: __shared_ptr<_Tp>(__p)
{ }
: __shared_ptr<_Tp>(__p) { }
template<typename _Tp1, typename _Deleter>
shared_ptr(_Tp1* __p, _Deleter __d)
: __shared_ptr<_Tp>(__p, __d)
{ }
: __shared_ptr<_Tp>(__p, __d) { }
template<typename _Tp1>
shared_ptr(const __shared_ptr<_Tp1>& __r)
: __shared_ptr<_Tp>(__r)
{ }
: __shared_ptr<_Tp>(__r) { }
template<typename _Tp1>
explicit shared_ptr(const __weak_ptr<_Tp1>& __r)
: __shared_ptr<_Tp>(__r)
{ }
: __shared_ptr<_Tp>(__r) { }
template<typename _Tp1>
explicit shared_ptr(std::auto_ptr<_Tp1>& __r)
: __shared_ptr<_Tp>(__r)
{ }
: __shared_ptr<_Tp>(__r) { }
template<typename _Tp1>
shared_ptr(const __shared_ptr<_Tp1>& __r, __static_cast_tag)
: __shared_ptr<_Tp>(__r, __static_cast_tag())
{ }
: __shared_ptr<_Tp>(__r, __static_cast_tag()) { }
template<typename _Tp1>
shared_ptr(const __shared_ptr<_Tp1>& __r, __const_cast_tag)
: __shared_ptr<_Tp>(__r, __const_cast_tag())
{ }
: __shared_ptr<_Tp>(__r, __const_cast_tag()) { }
template<typename _Tp1>
shared_ptr(const __shared_ptr<_Tp1>& __r, __dynamic_cast_tag)
: __shared_ptr<_Tp>(__r, __dynamic_cast_tag())
{ }
: __shared_ptr<_Tp>(__r, __dynamic_cast_tag()) { }
// Additional non-base assignment operators to avoid excessive errors.
template<typename _Tp1>
shared_ptr&
......@@ -1150,6 +1045,7 @@ template<typename _Tp>
this->__shared_ptr<_Tp>::operator=(__r);
return *this;
}
template<typename _Tp1>
shared_ptr&
operator=(const shared_ptr<_Tp1>& __r) // never throws
......@@ -1160,17 +1056,14 @@ template<typename _Tp>
};
template<typename _Tp>
class enable_shared_from_this
: public __enable_shared_from_this<_Tp>
class enable_shared_from_this : public __enable_shared_from_this<_Tp>
{
protected:
enable_shared_from_this()
: __enable_shared_from_this<_Tp>()
{ }
: __enable_shared_from_this<_Tp>() { }
enable_shared_from_this(const enable_shared_from_this&)
: __enable_shared_from_this<_Tp>(enable_shared_from_this<_Tp>())
{ }
: __enable_shared_from_this<_Tp>(enable_shared_from_this<_Tp>()) { }
};
_GLIBCXX_END_NAMESPACE
......
// <tr1/memory> -*- C++ -*-
// Copyright (C) 2005 Free Software Foundation, Inc.
// Copyright (C) 2005, 2006 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
......@@ -40,10 +40,9 @@
#include <iosfwd> // std::basic_ostream
#include <cstdlib> // std::abort
#include <bits/gthr.h>
#include <bits/atomicity.h>
#include <ext/atomicity.h>
#include <ext/concurrence.h>
#include <bits/functexcept.h>
#include <bits/concurrence.h>
#include <debug/debug.h>
#include <tr1/boost_shared_ptr.h>
......
......@@ -39,7 +39,7 @@
#include <climits>
#include <exception>
#include "unwind-cxx.h"
#include "bits/gthr.h"
#include <ext/concurrence.h>
#if _GLIBCXX_HOSTED
using std::free;
......@@ -89,23 +89,11 @@ typedef char one_buffer[EMERGENCY_OBJ_SIZE] __attribute__((aligned));
static one_buffer emergency_buffer[EMERGENCY_OBJ_COUNT];
static bitmask_type emergency_used;
#ifdef __GTHREADS
#ifdef __GTHREAD_MUTEX_INIT
static __gthread_mutex_t emergency_mutex =__GTHREAD_MUTEX_INIT;
#else
static __gthread_mutex_t emergency_mutex;
#endif
#ifdef __GTHREAD_MUTEX_INIT_FUNCTION
static void
emergency_mutex_init ()
namespace
{
__GTHREAD_MUTEX_INIT_FUNCTION (&emergency_mutex);
// A single mutex controlling emergency allocations.
__gnu_cxx::__mutex emergency_mutex;
}
#endif
#endif
extern "C" void *
__cxxabiv1::__cxa_allocate_exception(std::size_t thrown_size) throw()
......@@ -117,13 +105,7 @@ __cxxabiv1::__cxa_allocate_exception(std::size_t thrown_size) throw()
if (! ret)
{
#ifdef __GTHREADS
#ifdef __GTHREAD_MUTEX_INIT_FUNCTION
static __gthread_once_t once = __GTHREAD_ONCE_INIT;
__gthread_once (&once, emergency_mutex_init);
#endif
__gthread_mutex_lock (&emergency_mutex);
#endif
__gnu_cxx::__scoped_lock sentry(emergency_mutex);
bitmask_type used = emergency_used;
unsigned int which = 0;
......@@ -141,9 +123,7 @@ __cxxabiv1::__cxa_allocate_exception(std::size_t thrown_size) throw()
ret = &emergency_buffer[which][0];
failed:;
#ifdef __GTHREADS
__gthread_mutex_unlock (&emergency_mutex);
#endif
if (!ret)
std::terminate ();
}
......@@ -167,16 +147,11 @@ __cxxabiv1::__cxa_free_exception(void *vptr) throw()
if (ptr >= &emergency_buffer[0][0]
&& ptr < &emergency_buffer[0][0] + sizeof (emergency_buffer))
{
unsigned int which
const unsigned int which
= (unsigned)(ptr - &emergency_buffer[0][0]) / EMERGENCY_OBJ_SIZE;
#ifdef __GTHREADS
__gthread_mutex_lock (&emergency_mutex);
__gnu_cxx::__scoped_lock sentry(emergency_mutex);
emergency_used &= ~((bitmask_type)1 << which);
__gthread_mutex_unlock (&emergency_mutex);
#else
emergency_used &= ~((bitmask_type)1 << which);
#endif
}
else
free (ptr - sizeof (__cxa_exception));
......
// Copyright (C) 2002 Free Software Foundation, Inc.
// Copyright (C) 2002, 2004, 2006 Free Software Foundation, Inc.
//
// This file is part of GCC.
//
......@@ -32,8 +32,8 @@
#include <bits/c++config.h>
#include <cxxabi.h>
#include <exception>
#include <bits/gthr.h>
#include <bits/atomicity.h>
#include <ext/atomicity.h>
#include <ext/concurrence.h>
// The IA64/generic ABI uses the first byte of the guard variable.
// The ARM EABI uses the least significant bit.
......@@ -42,49 +42,8 @@
#ifdef __GTHREADS
namespace
{
// static_mutex is a single mutex controlling all static initializations.
// This is a static class--the need for a static initialization function
// to pass to __gthread_once precludes creating multiple instances, though
// I suppose you could achieve the same effect with a template.
class static_mutex
{
static __gthread_recursive_mutex_t mutex;
#ifdef __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION
static void init();
#endif
public:
static void lock();
static void unlock();
};
__gthread_recursive_mutex_t static_mutex::mutex
#ifdef __GTHREAD_RECURSIVE_MUTEX_INIT
= __GTHREAD_RECURSIVE_MUTEX_INIT
#endif
;
#ifdef __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION
void static_mutex::init()
{
__GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION (&mutex);
}
#endif
void static_mutex::lock()
{
#ifdef __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION
static __gthread_once_t once = __GTHREAD_ONCE_INIT;
__gthread_once (&once, init);
#endif
__gthread_recursive_mutex_lock (&mutex);
}
void static_mutex::unlock ()
{
__gthread_recursive_mutex_unlock (&mutex);
}
// A single mutex controlling all static initializations.
__gnu_cxx::__recursive_mutex static_mutex;
}
#ifndef _GLIBCXX_GUARD_TEST_AND_ACQUIRE
......@@ -125,14 +84,14 @@ namespace __gnu_cxx
// as well check for this situation and throw an exception.
// We use the second byte of the guard variable to remember that we're
// in the middle of an initialization.
class recursive_init: public std::exception
class recursive_init_error: public std::exception
{
public:
recursive_init() throw() { }
virtual ~recursive_init() throw ();
recursive_init_error() throw() { }
virtual ~recursive_init_error() throw ();
};
recursive_init::~recursive_init() throw() { }
recursive_init_error::~recursive_init_error() throw() { }
}
namespace __cxxabiv1
......@@ -158,7 +117,7 @@ namespace __cxxabiv1
if (recursion_push (g))
{
#ifdef __EXCEPTIONS
throw __gnu_cxx::recursive_init();
throw __gnu_cxx::recursive_init_error();
#else
// Use __builtin_trap so we don't require abort().
__builtin_trap ();
......@@ -185,12 +144,12 @@ namespace __cxxabiv1
bool unlock;
mutex_wrapper (): unlock(true)
{
static_mutex::lock ();
static_mutex.lock();
}
~mutex_wrapper ()
{
if (unlock)
static_mutex::unlock ();
static_mutex.unlock();
}
} mw;
......@@ -213,7 +172,7 @@ namespace __cxxabiv1
recursion_pop (g);
#ifdef __GTHREADS
if (__gthread_active_p ())
static_mutex::unlock ();
static_mutex.unlock();
#endif
}
......@@ -224,7 +183,7 @@ namespace __cxxabiv1
_GLIBCXX_GUARD_SET_AND_RELEASE (g);
#ifdef __GTHREADS
if (__gthread_active_p ())
static_mutex::unlock ();
static_mutex.unlock();
#endif
}
}
......@@ -33,19 +33,12 @@ _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
namespace balloc
{
template class __mini_vector<std::pair
<bitmap_allocator<char>::_Alloc_block*,
bitmap_allocator<char>::_Alloc_block*> >;
template class __mini_vector<std::pair
<bitmap_allocator<wchar_t>::_Alloc_block*,
bitmap_allocator<wchar_t>::_Alloc_block*> >;
template class __mini_vector<std::pair<bitmap_allocator<char>::_Alloc_block*, bitmap_allocator<char>::_Alloc_block*> >;
template class __mini_vector<std::pair<bitmap_allocator<wchar_t>::_Alloc_block*, bitmap_allocator<wchar_t>::_Alloc_block*> >;
template class __mini_vector<size_t*>;
template size_t** __lower_bound
(size_t**, size_t**,
size_t const&, free_list::_LT_pointer_compare);
template size_t** __lower_bound(size_t**, size_t**, size_t const&,
free_list::_LT_pointer_compare);
}
size_t*
......@@ -53,25 +46,24 @@ _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
_M_get(size_t __sz) throw(std::bad_alloc)
{
#if defined __GTHREADS
_Lock __bfl_lock(_M_get_mutex());
__bfl_lock._M_lock();
mutex_type& __bfl_mutex = _M_get_mutex();
#endif
iterator __temp =
__gnu_cxx::balloc::__lower_bound
(_M_get_free_list().begin(), _M_get_free_list().end(),
__sz, _LT_pointer_compare());
const vector_type& __free_list = _M_get_free_list();
using __gnu_cxx::balloc::__lower_bound;
iterator __tmp = __lower_bound(__free_list.begin(), __free_list.end(),
__sz, _LT_pointer_compare());
if (__temp == _M_get_free_list().end() || !_M_should_i_give(**__temp, __sz))
if (__tmp == __free_list.end() || !_M_should_i_give(**__tmp, __sz))
{
// We release the lock here, because operator new is
// guaranteed to be thread-safe by the underlying
// implementation.
#if defined __GTHREADS
__bfl_lock._M_unlock();
__bfl_mutex.unlock();
#endif
// Try twice to get the memory: once directly, and the 2nd
// time after clearing the free list. If both fail, then
// throw std::bad_alloc().
// time after clearing the free list. If both fail, then throw
// std::bad_alloc().
int __ctr = 2;
while (__ctr)
{
......@@ -79,8 +71,7 @@ _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
--__ctr;
try
{
__ret = reinterpret_cast<size_t*>
(::operator new(__sz + sizeof(size_t)));
__ret = reinterpret_cast<size_t*>(::operator new(__sz + sizeof(size_t)));
}
catch(...)
{
......@@ -95,10 +86,10 @@ _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
}
else
{
size_t* __ret = *__temp;
_M_get_free_list().erase(__temp);
size_t* __ret = *__tmp;
_M_get_free_list().erase(__tmp);
#if defined __GTHREADS
__bfl_lock._M_unlock();
__bfl_mutex.unlock();
#endif
return __ret + 1;
}
......@@ -109,7 +100,7 @@ _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
_M_clear()
{
#if defined __GTHREADS
_Auto_Lock __bfl_lock(_M_get_mutex());
__gnu_cxx::__scoped_lock __bfl_lock(_M_get_mutex());
#endif
vector_type& __free_list = _M_get_free_list();
iterator __iter = __free_list.begin();
......
......@@ -35,14 +35,14 @@
#include <cassert>
#include <cstring>
#include <cctype>
#include <bits/concurrence.h>
#include <ext/concurrence.h>
using namespace std;
namespace
{
static __glibcxx_mutex_define_initialized(iterator_base_mutex);
}
__gnu_cxx::__mutex iterator_base_mutex;
} // anonymous namespace
namespace __gnu_debug
{
......@@ -192,7 +192,7 @@ namespace __gnu_debug
// Attach to the new sequence (if there is one)
if (__seq)
{
__gnu_cxx::lock sentry(iterator_base_mutex);
__gnu_cxx::__scoped_lock sentry(iterator_base_mutex);
_M_sequence = __seq;
_M_version = _M_sequence->_M_version;
_M_prior = 0;
......@@ -217,7 +217,7 @@ namespace __gnu_debug
_Safe_iterator_base::
_M_detach()
{
__gnu_cxx::lock sentry(iterator_base_mutex);
__gnu_cxx::__scoped_lock sentry(iterator_base_mutex);
if (_M_sequence)
{
// Remove us from this sequence's list
......
......@@ -34,7 +34,6 @@
#include <ios>
#include <limits>
#include <bits/atomicity.h>
_GLIBCXX_BEGIN_NAMESPACE(std)
......
......@@ -36,7 +36,6 @@
#include <ostream>
#include <istream>
#include <fstream>
#include <bits/atomicity.h>
#include <ext/stdio_filebuf.h>
#include <ext/stdio_sync_filebuf.h>
......
......@@ -32,14 +32,12 @@
#include <cctype>
#include <cwctype> // For towupper, etc.
#include <locale>
#include <bits/atomicity.h>
#include <bits/concurrence.h>
#include <ext/concurrence.h>
namespace
{
// Mutex object for cache access.
static __glibcxx_mutex_define_initialized(locale_cache_mutex);
}
__gnu_cxx::__mutex locale_cache_mutex;
} // anonymous namespace
// XXX GLIBCXX_ABI Deprecated
#ifdef _GLIBCXX_LONG_DOUBLE_COMPAT
......@@ -391,7 +389,7 @@ _GLIBCXX_BEGIN_NAMESPACE(std)
locale::_Impl::
_M_install_cache(const facet* __cache, size_t __index)
{
__gnu_cxx::lock sentry(locale_cache_mutex);
__gnu_cxx::__scoped_lock sentry(locale_cache_mutex);
if (_M_caches[__index] != 0)
{
// Some other thread got in first.
......
......@@ -32,8 +32,7 @@
#include <cctype>
#include <cwctype> // For towupper, etc.
#include <locale>
#include <bits/atomicity.h>
#include <bits/concurrence.h>
#include <ext/concurrence.h>
namespace
{
......@@ -200,8 +199,7 @@ namespace
fake_time_cache_w timepunct_cache_w;
#endif
// Mutex object for locale initialization.
static __glibcxx_mutex_define_initialized(locale_mutex);
__gnu_cxx::__mutex locale_mutex;
} // anonymous namespace
_GLIBCXX_BEGIN_NAMESPACE(std)
......@@ -209,7 +207,7 @@ _GLIBCXX_BEGIN_NAMESPACE(std)
locale::locale() throw() : _M_impl(0)
{
_S_initialize();
__gnu_cxx::lock sentry(locale_mutex);
__gnu_cxx::__scoped_lock sentry(locale_mutex);
_S_global->_M_add_reference();
_M_impl = _S_global;
}
......@@ -220,7 +218,7 @@ _GLIBCXX_BEGIN_NAMESPACE(std)
_S_initialize();
_Impl* __old;
{
__gnu_cxx::lock sentry(locale_mutex);
__gnu_cxx::__scoped_lock sentry(locale_mutex);
__old = _S_global;
__other._M_impl->_M_add_reference();
_S_global = __other._M_impl;
......
......@@ -32,7 +32,7 @@
//
#include <bits/c++config.h>
#include <bits/concurrence.h>
#include <ext/concurrence.h>
#include <ext/mt_allocator.h>
#include <cstring>
......@@ -59,13 +59,13 @@ namespace
// Ensure freelist is constructed first.
static __freelist freelist;
static __glibcxx_mutex_define_initialized(freelist_mutex);
__gnu_cxx::__mutex freelist_mutex;
static void
_M_destroy_thread_key(void* __id)
{
// Return this thread id record to the front of thread_freelist.
__gnu_cxx::lock sentry(freelist_mutex);
__gnu_cxx::__scoped_lock sentry(freelist_mutex);
size_t _M_id = reinterpret_cast<size_t>(__id);
typedef __gnu_cxx::__pool<true>::_Thread_record _Thread_record;
......@@ -497,11 +497,10 @@ _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
if (__gthread_active_p())
{
{
__gnu_cxx::lock sentry(freelist_mutex);
__gnu_cxx::__scoped_lock sentry(freelist_mutex);
if (!freelist._M_thread_freelist_array
|| freelist._M_max_threads
< _M_options._M_max_threads)
|| freelist._M_max_threads < _M_options._M_max_threads)
{
const size_t __k = sizeof(_Thread_record)
* _M_options._M_max_threads;
......@@ -622,7 +621,7 @@ _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
if (_M_id == 0)
{
{
__gnu_cxx::lock sentry(freelist_mutex);
__gnu_cxx::__scoped_lock sentry(freelist_mutex);
if (freelist._M_thread_freelist)
{
_M_id = freelist._M_thread_freelist->_M_id;
......@@ -695,7 +694,7 @@ _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
if (__gthread_active_p())
{
{
__gnu_cxx::lock sentry(freelist_mutex);
__gnu_cxx::__scoped_lock sentry(freelist_mutex);
if (!freelist._M_thread_freelist_array
|| freelist._M_max_threads
......
......@@ -37,8 +37,8 @@
namespace
{
static __glibcxx_mutex_define_initialized(palloc_init_mutex);
}
__gnu_cxx::__mutex palloc_init_mutex;
} // anonymous namespace
_GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
......@@ -50,7 +50,7 @@ _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
return _S_free_list + __i;
}
mutex_type&
__mutex&
__pool_alloc_base::_M_get_mutex()
{ return palloc_init_mutex; }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment