Commit 036e0d4f by Benjamin Kosnik

atomic: Remove atomic_address, uplift to N3225.

2011-02-16  Benjamin Kosnik  <bkoz@redhat.com>

	* include/std/atomic: Remove atomic_address, uplift to N3225.
	* include/bits/atomic_0.h: Same.
	* include/bits/atomic_2.h: Same.
	* include/bits/atomic_base.h: Same.
	* testsuite/29_atomics/atomic_address/*: Delete.

From-SVN: r170217
parent 3808007c
2011-02-16 Benjamin Kosnik <bkoz@redhat.com>
* include/std/atomic: Remove atomic_address, uplift to N3225.
* include/bits/atomic_0.h: Same.
* include/bits/atomic_2.h: Same.
* include/bits/atomic_base.h: Same.
* testsuite/29_atomics/atomic_address/*: Delete.
2011-02-14 Jonathan Wakely <jwakely.gcc@gmail.com>
* include/bits/regex.h (sub_match::sub_match): Add.
......@@ -36,7 +44,7 @@
* testsuite/ext/is_heap/47709.cc: New.
2011-02-12 Jakub Jelinek <jakub@redhat.com>
Jonathan Wakely <jwakely.gcc@gmail.com>
Jonathan Wakely <jwakely.gcc@gmail.com>
PR libstdc++/47662
* testsuite/17_intro/headers/c++200x/operator_names.cc: New.
......@@ -259,10 +267,10 @@
2011-01-31 Paolo Carlini <paolo.carlini@oracle.com>
* doc/html/ext/lwg-active.html: Update to Revision D73.
* doc/html/ext/lwg-closed.html: Likewise.
* doc/html/ext/lwg-defects.html: Likewise.
* doc/xml/manual/intro.xml: Update status of issues 408, 539, 865.
* doc/html/ext/lwg-active.html: Update to Revision D73.
* doc/html/ext/lwg-closed.html: Likewise.
* doc/html/ext/lwg-defects.html: Likewise.
* doc/xml/manual/intro.xml: Update status of issues 408, 539, 865.
2011-01-30 Benjamin Kosnik <bkoz@redhat.com>
......
......@@ -134,300 +134,6 @@ namespace __atomic0
};
/// atomic_address
struct atomic_address
{
private:
void* _M_i;
public:
atomic_address() = default;
~atomic_address() = default;
atomic_address(const atomic_address&) = delete;
atomic_address& operator=(const atomic_address&) = delete;
atomic_address& operator=(const atomic_address&) volatile = delete;
constexpr atomic_address(void* __v): _M_i (__v) { }
bool
is_lock_free() const { return false; }
bool
is_lock_free() const volatile { return false; }
void
store(void* __v, memory_order __m = memory_order_seq_cst)
{
__glibcxx_assert(__m != memory_order_acquire);
__glibcxx_assert(__m != memory_order_acq_rel);
__glibcxx_assert(__m != memory_order_consume);
_ATOMIC_STORE_(this, __v, __m);
}
void
store(void* __v, memory_order __m = memory_order_seq_cst) volatile
{
__glibcxx_assert(__m != memory_order_acquire);
__glibcxx_assert(__m != memory_order_acq_rel);
__glibcxx_assert(__m != memory_order_consume);
_ATOMIC_STORE_(this, __v, __m);
}
void*
load(memory_order __m = memory_order_seq_cst) const
{
__glibcxx_assert(__m != memory_order_release);
__glibcxx_assert(__m != memory_order_acq_rel);
return _ATOMIC_LOAD_(this, __m);
}
void*
load(memory_order __m = memory_order_seq_cst) const volatile
{
__glibcxx_assert(__m != memory_order_release);
__glibcxx_assert(__m != memory_order_acq_rel);
return _ATOMIC_LOAD_(this, __m);
}
void*
exchange(void* __v, memory_order __m = memory_order_seq_cst)
{ return _ATOMIC_MODIFY_(this, =, __v, __m); }
void*
exchange(void* __v, memory_order __m = memory_order_seq_cst) volatile
{ return _ATOMIC_MODIFY_(this, =, __v, __m); }
bool
compare_exchange_weak(void*& __v1, void* __v2, memory_order __m1,
memory_order __m2)
{
__glibcxx_assert(__m2 != memory_order_release);
__glibcxx_assert(__m2 != memory_order_acq_rel);
__glibcxx_assert(__m2 <= __m1);
return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1);
}
bool
compare_exchange_weak(void*& __v1, void* __v2, memory_order __m1,
memory_order __m2) volatile
{
__glibcxx_assert(__m2 != memory_order_release);
__glibcxx_assert(__m2 != memory_order_acq_rel);
__glibcxx_assert(__m2 <= __m1);
return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1);
}
bool
compare_exchange_weak(void*& __v1, void* __v2,
memory_order __m = memory_order_seq_cst)
{
return compare_exchange_weak(__v1, __v2, __m,
__calculate_memory_order(__m));
}
bool
compare_exchange_weak(void*& __v1, void* __v2,
memory_order __m = memory_order_seq_cst) volatile
{
return compare_exchange_weak(__v1, __v2, __m,
__calculate_memory_order(__m));
}
bool
compare_exchange_weak(const void*& __v1, const void* __v2,
memory_order __m1, memory_order __m2)
{
__glibcxx_assert(__m2 != memory_order_release);
__glibcxx_assert(__m2 != memory_order_acq_rel);
__glibcxx_assert(__m2 <= __m1);
return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1);
}
bool
compare_exchange_weak(const void*& __v1, const void* __v2,
memory_order __m1, memory_order __m2) volatile
{
__glibcxx_assert(__m2 != memory_order_release);
__glibcxx_assert(__m2 != memory_order_acq_rel);
__glibcxx_assert(__m2 <= __m1);
return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1);
}
bool
compare_exchange_weak(const void*& __v1, const void* __v2,
memory_order __m = memory_order_seq_cst)
{
return compare_exchange_weak(__v1, __v2, __m,
__calculate_memory_order(__m));
}
bool
compare_exchange_weak(const void*& __v1, const void* __v2,
memory_order __m = memory_order_seq_cst) volatile
{
return compare_exchange_weak(__v1, __v2, __m,
__calculate_memory_order(__m));
}
bool
compare_exchange_strong(void*& __v1, void* __v2, memory_order __m1,
memory_order __m2)
{
__glibcxx_assert(__m2 != memory_order_release);
__glibcxx_assert(__m2 != memory_order_acq_rel);
__glibcxx_assert(__m2 <= __m1);
return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1);
}
bool
compare_exchange_strong(void*& __v1, void* __v2, memory_order __m1,
memory_order __m2) volatile
{
__glibcxx_assert(__m2 != memory_order_release);
__glibcxx_assert(__m2 != memory_order_acq_rel);
__glibcxx_assert(__m2 <= __m1);
return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1);
}
bool
compare_exchange_strong(void*& __v1, void* __v2,
memory_order __m = memory_order_seq_cst)
{
return compare_exchange_strong(__v1, __v2, __m,
__calculate_memory_order(__m));
}
bool
compare_exchange_strong(void*& __v1, void* __v2,
memory_order __m = memory_order_seq_cst) volatile
{
return compare_exchange_strong(__v1, __v2, __m,
__calculate_memory_order(__m));
}
bool
compare_exchange_strong(const void*& __v1, const void* __v2,
memory_order __m1, memory_order __m2)
{
__glibcxx_assert(__m2 != memory_order_release);
__glibcxx_assert(__m2 != memory_order_acq_rel);
__glibcxx_assert(__m2 <= __m1);
return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1);
}
bool
compare_exchange_strong(const void*& __v1, const void* __v2,
memory_order __m1, memory_order __m2) volatile
{
__glibcxx_assert(__m2 != memory_order_release);
__glibcxx_assert(__m2 != memory_order_acq_rel);
__glibcxx_assert(__m2 <= __m1);
return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1);
}
bool
compare_exchange_strong(const void*& __v1, const void* __v2,
memory_order __m = memory_order_seq_cst)
{
return compare_exchange_strong(__v1, __v2, __m,
__calculate_memory_order(__m));
}
bool
compare_exchange_strong(const void*& __v1, const void* __v2,
memory_order __m = memory_order_seq_cst) volatile
{
return compare_exchange_strong(__v1, __v2, __m,
__calculate_memory_order(__m));
}
void*
fetch_add(ptrdiff_t __d, memory_order __m = memory_order_seq_cst)
{
void** __p = &(_M_i);
__atomic_flag_base* __g = __atomic_flag_for_address(__p);
__atomic_flag_wait_explicit(__g, __m);
void* __r = *__p;
*__p = (void*)((char*)(*__p) + __d);
atomic_flag_clear_explicit(__g, __m);
return __r;
}
void*
fetch_add(ptrdiff_t __d, memory_order __m = memory_order_seq_cst) volatile
{
void* volatile* __p = &(_M_i);
__atomic_flag_base* __g = __atomic_flag_for_address(__p);
__atomic_flag_wait_explicit(__g, __m);
void* __r = *__p;
*__p = (void*)((char*)(*__p) + __d);
atomic_flag_clear_explicit(__g, __m);
return __r;
}
void*
fetch_sub(ptrdiff_t __d, memory_order __m = memory_order_seq_cst)
{
void** __p = &(_M_i);
__atomic_flag_base* __g = __atomic_flag_for_address(__p);
__atomic_flag_wait_explicit(__g, __m);
void* __r = *__p;
*__p = (void*)((char*)(*__p) - __d);
atomic_flag_clear_explicit(__g, __m);
return __r;
}
void*
fetch_sub(ptrdiff_t __d, memory_order __m = memory_order_seq_cst) volatile
{
void* volatile* __p = &(_M_i);
__atomic_flag_base* __g = __atomic_flag_for_address(__p);
__atomic_flag_wait_explicit(__g, __m);
void* __r = *__p;
*__p = (void*)((char*)(*__p) - __d);
atomic_flag_clear_explicit(__g, __m);
return __r;
}
operator void*() const
{ return load(); }
operator void*() const volatile
{ return load(); }
// XXX
void*
operator=(void* __v)
{
store(__v);
return __v;
}
void*
operator=(void* __v) volatile
{
store(__v);
return __v;
}
void*
operator+=(ptrdiff_t __d)
{ return fetch_add(__d) + __d; }
void*
operator+=(ptrdiff_t __d) volatile
{ return fetch_add(__d) + __d; }
void*
operator-=(ptrdiff_t __d)
{ return fetch_sub(__d) - __d; }
void*
operator-=(ptrdiff_t __d) volatile
{ return fetch_sub(__d) - __d; }
};
/// Base class for atomic integrals.
//
// For each of the integral types, define atomic_[integral type] struct
......@@ -728,6 +434,220 @@ namespace __atomic0
{ return _ATOMIC_MODIFY_(this, ^=, __i, __m); }
};
/// Partial specialization for pointer types.
template<typename _PTp>
struct __atomic_base<_PTp*>
{
private:
typedef _PTp* __return_pointer_type;
typedef void* __pointer_type;
__pointer_type _M_i;
public:
__atomic_base() = default;
~__atomic_base() = default;
__atomic_base(const __atomic_base&) = delete;
__atomic_base& operator=(const __atomic_base&) = delete;
__atomic_base& operator=(const __atomic_base&) volatile = delete;
// Requires __pointer_type convertible to _M_i.
constexpr __atomic_base(__return_pointer_type __p): _M_i (__p) { }
operator __return_pointer_type() const
{ return reinterpret_cast<__return_pointer_type>(load()); }
operator __return_pointer_type() const volatile
{ return reinterpret_cast<__return_pointer_type>(load()); }
__return_pointer_type
operator=(__pointer_type __p)
{
store(__p);
return reinterpret_cast<__return_pointer_type>(__p);
}
__return_pointer_type
operator=(__pointer_type __p) volatile
{
store(__p);
return reinterpret_cast<__return_pointer_type>(__p);
}
__return_pointer_type
operator++(int)
{ return reinterpret_cast<__return_pointer_type>(fetch_add(1)); }
__return_pointer_type
operator++(int) volatile
{ return reinterpret_cast<__return_pointer_type>(fetch_add(1)); }
__return_pointer_type
operator--(int)
{ return reinterpret_cast<__return_pointer_type>(fetch_sub(1)); }
__return_pointer_type
operator--(int) volatile
{ return reinterpret_cast<__return_pointer_type>(fetch_sub(1)); }
__return_pointer_type
operator++()
{ return reinterpret_cast<__return_pointer_type>(fetch_add(1) + 1); }
__return_pointer_type
operator++() volatile
{ return reinterpret_cast<__return_pointer_type>(fetch_add(1) + 1); }
__return_pointer_type
operator--()
{ return reinterpret_cast<__return_pointer_type>(fetch_sub(1) - 1); }
__return_pointer_type
operator--() volatile
{ return reinterpret_cast<__return_pointer_type>(fetch_sub(1) - 1); }
__return_pointer_type
operator+=(ptrdiff_t __d)
{ return reinterpret_cast<__return_pointer_type>(fetch_add(__d) + __d); }
__return_pointer_type
operator+=(ptrdiff_t __d) volatile
{ return reinterpret_cast<__return_pointer_type>(fetch_add(__d) + __d); }
__return_pointer_type
operator-=(ptrdiff_t __d)
{ return reinterpret_cast<__return_pointer_type>(fetch_sub(__d) - __d); }
__return_pointer_type
operator-=(ptrdiff_t __d) volatile
{ return reinterpret_cast<__return_pointer_type>(fetch_sub(__d) - __d); }
bool
is_lock_free() const
{ return true; }
bool
is_lock_free() const volatile
{ return true; }
void
store(__pointer_type __p, memory_order __m = memory_order_seq_cst)
{
__glibcxx_assert(__m != memory_order_acquire);
__glibcxx_assert(__m != memory_order_acq_rel);
__glibcxx_assert(__m != memory_order_consume);
_ATOMIC_STORE_(this, __p, __m);
}
void
store(__pointer_type __p,
memory_order __m = memory_order_seq_cst) volatile
{
__glibcxx_assert(__m != memory_order_acquire);
__glibcxx_assert(__m != memory_order_acq_rel);
__glibcxx_assert(__m != memory_order_consume);
volatile __pointer_type* __p2 = &_M_i;
__typeof__(__p) __w = (__p);
__atomic_flag_base* __g = __atomic_flag_for_address(__p2);
__atomic_flag_wait_explicit(__g, __m);
*__p2 = reinterpret_cast<__pointer_type>(__w);
atomic_flag_clear_explicit(__g, __m);
__w;
}
__return_pointer_type
load(memory_order __m = memory_order_seq_cst) const
{
__glibcxx_assert(__m != memory_order_release);
__glibcxx_assert(__m != memory_order_acq_rel);
void* __v = _ATOMIC_LOAD_(this, __m);
return reinterpret_cast<__return_pointer_type>(__v);
}
__return_pointer_type
load(memory_order __m = memory_order_seq_cst) const volatile
{
__glibcxx_assert(__m != memory_order_release);
__glibcxx_assert(__m != memory_order_acq_rel);
void* __v = _ATOMIC_LOAD_(this, __m);
return reinterpret_cast<__return_pointer_type>(__v);
}
__return_pointer_type
exchange(__pointer_type __p, memory_order __m = memory_order_seq_cst)
{
void* __v = _ATOMIC_MODIFY_(this, =, __p, __m);
return reinterpret_cast<__return_pointer_type>(__v);
}
__return_pointer_type
exchange(__pointer_type __p,
memory_order __m = memory_order_seq_cst) volatile
{
volatile __pointer_type* __p2 = &_M_i;
__typeof__(__p) __w = (__p);
__atomic_flag_base* __g = __atomic_flag_for_address(__p2);
__atomic_flag_wait_explicit(__g, __m);
__pointer_type __r = *__p2;
*__p2 = __w;
atomic_flag_clear_explicit(__g, __m);
__r;
return reinterpret_cast<__return_pointer_type>(_M_i);
}
bool
compare_exchange_strong(__return_pointer_type& __rp1, __pointer_type __p2,
memory_order __m1, memory_order __m2)
{
__glibcxx_assert(__m2 != memory_order_release);
__glibcxx_assert(__m2 != memory_order_acq_rel);
__glibcxx_assert(__m2 <= __m1);
__pointer_type& __p1 = reinterpret_cast<void*&>(__rp1);
return _ATOMIC_CMPEXCHNG_(this, &__p1, __p2, __m1);
}
bool
compare_exchange_strong(__return_pointer_type& __rp1, __pointer_type __p2,
memory_order __m1, memory_order __m2) volatile
{
__glibcxx_assert(__m2 != memory_order_release);
__glibcxx_assert(__m2 != memory_order_acq_rel);
__glibcxx_assert(__m2 <= __m1);
__pointer_type& __p1 = reinterpret_cast<void*&>(__rp1);
return _ATOMIC_CMPEXCHNG_(this, &__p1, __p2, __m1);
}
__return_pointer_type
fetch_add(ptrdiff_t __d, memory_order __m = memory_order_seq_cst)
{
void* __v = _ATOMIC_MODIFY_(this, +=, __d, __m);
return reinterpret_cast<__return_pointer_type>(__v);
}
__return_pointer_type
fetch_add(ptrdiff_t __d,
memory_order __m = memory_order_seq_cst) volatile
{
void* __v = _ATOMIC_MODIFY_(this, +=, __d, __m);
return reinterpret_cast<__return_pointer_type>(__v);
}
__return_pointer_type
fetch_sub(ptrdiff_t __d, memory_order __m = memory_order_seq_cst)
{
void* __v = _ATOMIC_MODIFY_(this, -=, __d, __m);
return reinterpret_cast<__return_pointer_type>(__v);
}
__return_pointer_type
fetch_sub(ptrdiff_t __d,
memory_order __m = memory_order_seq_cst) volatile
{
void* __v = _ATOMIC_MODIFY_(this, -=, __d, __m);
return reinterpret_cast<__return_pointer_type>(__v);
}
};
#undef _ATOMIC_LOAD_
#undef _ATOMIC_STORE_
#undef _ATOMIC_MODIFY_
......@@ -735,6 +655,6 @@ namespace __atomic0
} // namespace __atomic0
_GLIBCXX_END_NAMESPACE_VERSION
} // namespace
} // namespace std
#endif
// -*- C++ -*- header.
// Copyright (C) 2008, 2009, 2010
// Copyright (C) 2008, 2009, 2010, 2011
// Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
......@@ -23,7 +23,7 @@
// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
// <http://www.gnu.org/licenses/>.
/** @file bits/atomic_2.h
/** @file bits/atomic_2.h
* This is an internal header file, included by other library headers.
* Do not attempt to use it directly. @headername{atomic}
*/
......@@ -101,317 +101,6 @@ namespace __atomic2
};
/// atomic_address
struct atomic_address
{
private:
void* _M_i;
public:
atomic_address() = default;
~atomic_address() = default;
atomic_address(const atomic_address&) = delete;
atomic_address& operator=(const atomic_address&) = delete;
atomic_address& operator=(const atomic_address&) volatile = delete;
constexpr atomic_address(void* __v): _M_i (__v) { }
bool
is_lock_free() const { return true; }
bool
is_lock_free() const volatile { return true; }
void
store(void* __v, memory_order __m = memory_order_seq_cst)
{
__glibcxx_assert(__m != memory_order_acquire);
__glibcxx_assert(__m != memory_order_acq_rel);
__glibcxx_assert(__m != memory_order_consume);
if (__m == memory_order_relaxed)
_M_i = __v;
else
{
// write_mem_barrier();
_M_i = __v;
if (__m == memory_order_seq_cst)
__sync_synchronize();
}
}
void
store(void* __v, memory_order __m = memory_order_seq_cst) volatile
{
__glibcxx_assert(__m != memory_order_acquire);
__glibcxx_assert(__m != memory_order_acq_rel);
__glibcxx_assert(__m != memory_order_consume);
if (__m == memory_order_relaxed)
_M_i = __v;
else
{
// write_mem_barrier();
_M_i = __v;
if (__m == memory_order_seq_cst)
__sync_synchronize();
}
}
void*
load(memory_order __m = memory_order_seq_cst) const
{
__glibcxx_assert(__m != memory_order_release);
__glibcxx_assert(__m != memory_order_acq_rel);
__sync_synchronize();
void* __ret = _M_i;
__sync_synchronize();
return __ret;
}
void*
load(memory_order __m = memory_order_seq_cst) const volatile
{
__glibcxx_assert(__m != memory_order_release);
__glibcxx_assert(__m != memory_order_acq_rel);
__sync_synchronize();
void* __ret = _M_i;
__sync_synchronize();
return __ret;
}
void*
exchange(void* __v, memory_order __m = memory_order_seq_cst)
{
// XXX built-in assumes memory_order_acquire.
return __sync_lock_test_and_set(&_M_i, __v);
}
void*
exchange(void* __v, memory_order __m = memory_order_seq_cst) volatile
{
// XXX built-in assumes memory_order_acquire.
return __sync_lock_test_and_set(&_M_i, __v);
}
bool
compare_exchange_weak(void*& __v1, void* __v2, memory_order __m1,
memory_order __m2)
{ return compare_exchange_strong(__v1, __v2, __m1, __m2); }
bool
compare_exchange_weak(void*& __v1, void* __v2, memory_order __m1,
memory_order __m2) volatile
{ return compare_exchange_strong(__v1, __v2, __m1, __m2); }
bool
compare_exchange_weak(void*& __v1, void* __v2,
memory_order __m = memory_order_seq_cst)
{
return compare_exchange_weak(__v1, __v2, __m,
__calculate_memory_order(__m));
}
bool
compare_exchange_weak(void*& __v1, void* __v2,
memory_order __m = memory_order_seq_cst) volatile
{
return compare_exchange_weak(__v1, __v2, __m,
__calculate_memory_order(__m));
}
bool
compare_exchange_weak(const void*& __v1, const void* __v2,
memory_order __m1, memory_order __m2)
{ return compare_exchange_strong(__v1, __v2, __m1, __m2); }
bool
compare_exchange_weak(const void*& __v1, const void* __v2,
memory_order __m1, memory_order __m2) volatile
{ return compare_exchange_strong(__v1, __v2, __m1, __m2); }
bool
compare_exchange_weak(const void*& __v1, const void* __v2,
memory_order __m = memory_order_seq_cst)
{
return compare_exchange_weak(__v1, __v2, __m,
__calculate_memory_order(__m));
}
bool
compare_exchange_weak(const void*& __v1, const void* __v2,
memory_order __m = memory_order_seq_cst) volatile
{
return compare_exchange_weak(__v1, __v2, __m,
__calculate_memory_order(__m));
}
bool
compare_exchange_strong(void*& __v1, void* __v2, memory_order __m1,
memory_order __m2)
{
__glibcxx_assert(__m2 != memory_order_release);
__glibcxx_assert(__m2 != memory_order_acq_rel);
__glibcxx_assert(__m2 <= __m1);
void* __v1o = __v1;
void* __v1n = __sync_val_compare_and_swap(&_M_i, __v1o, __v2);
// Assume extra stores (of same value) allowed in true case.
__v1 = __v1n;
return __v1o == __v1n;
}
bool
compare_exchange_strong(void*& __v1, void* __v2, memory_order __m1,
memory_order __m2) volatile
{
__glibcxx_assert(__m2 != memory_order_release);
__glibcxx_assert(__m2 != memory_order_acq_rel);
__glibcxx_assert(__m2 <= __m1);
void* __v1o = __v1;
void* __v1n = __sync_val_compare_and_swap(&_M_i, __v1o, __v2);
// Assume extra stores (of same value) allowed in true case.
__v1 = __v1n;
return __v1o == __v1n;
}
bool
compare_exchange_strong(void*& __v1, void* __v2,
memory_order __m = memory_order_seq_cst)
{
return compare_exchange_strong(__v1, __v2, __m,
__calculate_memory_order(__m));
}
bool
compare_exchange_strong(void*& __v1, void* __v2,
memory_order __m = memory_order_seq_cst) volatile
{
return compare_exchange_strong(__v1, __v2, __m,
__calculate_memory_order(__m));
}
bool
compare_exchange_strong(const void*& __v1, const void* __v2,
memory_order __m1, memory_order __m2)
{
__glibcxx_assert(__m2 != memory_order_release);
__glibcxx_assert(__m2 != memory_order_acq_rel);
__glibcxx_assert(__m2 <= __m1);
const void* __v1o = __v1;
const void* __v1n = __sync_val_compare_and_swap(&_M_i, __v1o, __v2);
// Assume extra stores (of same value) allowed in true case.
__v1 = __v1n;
return __v1o == __v1n;
}
bool
compare_exchange_strong(const void*& __v1, const void* __v2,
memory_order __m1, memory_order __m2) volatile
{
__glibcxx_assert(__m2 != memory_order_release);
__glibcxx_assert(__m2 != memory_order_acq_rel);
__glibcxx_assert(__m2 <= __m1);
const void* __v1o = __v1;
const void* __v1n = __sync_val_compare_and_swap(&_M_i, __v1o, __v2);
// Assume extra stores (of same value) allowed in true case.
__v1 = __v1n;
return __v1o == __v1n;
}
bool
compare_exchange_strong(const void*& __v1, const void* __v2,
memory_order __m = memory_order_seq_cst)
{
return compare_exchange_strong(__v1, __v2, __m,
__calculate_memory_order(__m));
}
bool
compare_exchange_strong(const void*& __v1, const void* __v2,
memory_order __m = memory_order_seq_cst) volatile
{
return compare_exchange_strong(__v1, __v2, __m,
__calculate_memory_order(__m));
}
void*
fetch_add(ptrdiff_t __d, memory_order __m = memory_order_seq_cst)
{ return __sync_fetch_and_add(&_M_i, __d); }
void*
fetch_add(ptrdiff_t __d, memory_order __m = memory_order_seq_cst) volatile
{ return __sync_fetch_and_add(&_M_i, __d); }
void*
fetch_sub(ptrdiff_t __d, memory_order __m = memory_order_seq_cst)
{ return __sync_fetch_and_sub(&_M_i, __d); }
void*
fetch_sub(ptrdiff_t __d, memory_order __m = memory_order_seq_cst) volatile
{ return __sync_fetch_and_sub(&_M_i, __d); }
operator void*() const
{ return load(); }
operator void*() const volatile
{ return load(); }
void*
#if 0
// XXX as specified but won't compile as store takes void*,
// invalid conversion from const void* to void*
// CD1 had this signature
operator=(const void* __v)
#else
operator=(void* __v)
#endif
{
store(__v);
return __v;
}
void*
#if 0
// XXX as specified but won't compile as store takes void*,
// invalid conversion from const void* to void*
// CD1 had this signature, but store and this could both be const void*?
operator=(const void* __v) volatile
#else
operator=(void* __v) volatile
#endif
{
store(__v);
return __v;
}
void*
operator+=(ptrdiff_t __d)
{ return __sync_add_and_fetch(&_M_i, __d); }
void*
operator+=(ptrdiff_t __d) volatile
{ return __sync_add_and_fetch(&_M_i, __d); }
void*
operator-=(ptrdiff_t __d)
{ return __sync_sub_and_fetch(&_M_i, __d); }
void*
operator-=(ptrdiff_t __d) volatile
{ return __sync_sub_and_fetch(&_M_i, __d); }
};
/// Base class for atomic integrals.
//
// For each of the integral types, define atomic_[integral type] struct
......@@ -747,9 +436,234 @@ namespace __atomic2
memory_order __m = memory_order_seq_cst) volatile
{ return __sync_fetch_and_xor(&_M_i, __i); }
};
/// Partial specialization for pointer types.
template<typename _PTp>
struct __atomic_base<_PTp*>
{
private:
typedef _PTp* __pointer_type;
__pointer_type _M_p;
public:
__atomic_base() = default;
~__atomic_base() = default;
__atomic_base(const __atomic_base&) = delete;
__atomic_base& operator=(const __atomic_base&) = delete;
__atomic_base& operator=(const __atomic_base&) volatile = delete;
// Requires __pointer_type convertible to _M_p.
constexpr __atomic_base(__pointer_type __p): _M_p (__p) { }
operator __pointer_type() const
{ return load(); }
operator __pointer_type() const volatile
{ return load(); }
__pointer_type
operator=(__pointer_type __p)
{
store(__p);
return __p;
}
__pointer_type
operator=(__pointer_type __p) volatile
{
store(__p);
return __p;
}
__pointer_type
operator++(int)
{ return fetch_add(1); }
__pointer_type
operator++(int) volatile
{ return fetch_add(1); }
__pointer_type
operator--(int)
{ return fetch_sub(1); }
__pointer_type
operator--(int) volatile
{ return fetch_sub(1); }
__pointer_type
operator++()
{ return fetch_add(1) + 1; }
__pointer_type
operator++() volatile
{ return fetch_add(1) + 1; }
__pointer_type
operator--()
{ return fetch_sub(1) -1; }
__pointer_type
operator--() volatile
{ return fetch_sub(1) -1; }
__pointer_type
operator+=(ptrdiff_t __d)
{ return fetch_add(__d) + __d; }
__pointer_type
operator+=(ptrdiff_t __d) volatile
{ return fetch_add(__d) + __d; }
__pointer_type
operator-=(ptrdiff_t __d)
{ return fetch_sub(__d) - __d; }
__pointer_type
operator-=(ptrdiff_t __d) volatile
{ return fetch_sub(__d) - __d; }
bool
is_lock_free() const
{ return true; }
bool
is_lock_free() const volatile
{ return true; }
void
store(__pointer_type __p, memory_order __m = memory_order_seq_cst)
{
__glibcxx_assert(__m != memory_order_acquire);
__glibcxx_assert(__m != memory_order_acq_rel);
__glibcxx_assert(__m != memory_order_consume);
if (__m == memory_order_relaxed)
_M_p = __p;
else
{
// write_mem_barrier();
_M_p = __p;
if (__m == memory_order_seq_cst)
__sync_synchronize();
}
}
void
store(__pointer_type __p,
memory_order __m = memory_order_seq_cst) volatile
{
__glibcxx_assert(__m != memory_order_acquire);
__glibcxx_assert(__m != memory_order_acq_rel);
__glibcxx_assert(__m != memory_order_consume);
if (__m == memory_order_relaxed)
_M_p = __p;
else
{
// write_mem_barrier();
_M_p = __p;
if (__m == memory_order_seq_cst)
__sync_synchronize();
}
}
__pointer_type
load(memory_order __m = memory_order_seq_cst) const
{
__glibcxx_assert(__m != memory_order_release);
__glibcxx_assert(__m != memory_order_acq_rel);
__sync_synchronize();
__pointer_type __ret = _M_p;
__sync_synchronize();
return __ret;
}
__pointer_type
load(memory_order __m = memory_order_seq_cst) const volatile
{
__glibcxx_assert(__m != memory_order_release);
__glibcxx_assert(__m != memory_order_acq_rel);
__sync_synchronize();
__pointer_type __ret = _M_p;
__sync_synchronize();
return __ret;
}
__pointer_type
exchange(__pointer_type __p, memory_order __m = memory_order_seq_cst)
{
// XXX built-in assumes memory_order_acquire.
return __sync_lock_test_and_set(&_M_p, __p);
}
__pointer_type
exchange(__pointer_type __p,
memory_order __m = memory_order_seq_cst) volatile
{
// XXX built-in assumes memory_order_acquire.
return __sync_lock_test_and_set(&_M_p, __p);
}
bool
compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
memory_order __m1, memory_order __m2)
{
__glibcxx_assert(__m2 != memory_order_release);
__glibcxx_assert(__m2 != memory_order_acq_rel);
__glibcxx_assert(__m2 <= __m1);
__pointer_type __p1o = __p1;
__pointer_type __p1n = __sync_val_compare_and_swap(&_M_p, __p1o, __p2);
// Assume extra stores (of same value) allowed in true case.
__p1 = __p1n;
return __p1o == __p1n;
}
bool
compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
memory_order __m1, memory_order __m2) volatile
{
__glibcxx_assert(__m2 != memory_order_release);
__glibcxx_assert(__m2 != memory_order_acq_rel);
__glibcxx_assert(__m2 <= __m1);
__pointer_type __p1o = __p1;
__pointer_type __p1n = __sync_val_compare_and_swap(&_M_p, __p1o, __p2);
// Assume extra stores (of same value) allowed in true case.
__p1 = __p1n;
return __p1o == __p1n;
}
__pointer_type
fetch_add(ptrdiff_t __d, memory_order __m = memory_order_seq_cst)
{ return __sync_fetch_and_add(&_M_p, __d); }
__pointer_type
fetch_add(ptrdiff_t __d,
memory_order __m = memory_order_seq_cst) volatile
{ return __sync_fetch_and_add(&_M_p, __d); }
__pointer_type
fetch_sub(ptrdiff_t __d, memory_order __m = memory_order_seq_cst)
{ return __sync_fetch_and_sub(&_M_p, __d); }
__pointer_type
fetch_sub(ptrdiff_t __d,
memory_order __m = memory_order_seq_cst) volatile
{ return __sync_fetch_and_sub(&_M_p, __d); }
};
} // namespace __atomic2
_GLIBCXX_END_NAMESPACE_VERSION
} // namespace
} // namespace std
#endif
......@@ -22,7 +22,7 @@
// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
// <http://www.gnu.org/licenses/>.
/** @file bits/atomic_base.h
/** @file bits/atomic_base.h
* This is an internal header file, included by other library headers.
* Do not attempt to use it directly. @headername{atomic}
*/
......@@ -68,6 +68,12 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
return __mo2;
}
void
atomic_thread_fence(memory_order);
void
atomic_signal_fence(memory_order);
/// kill_dependency
template<typename _Tp>
inline _Tp
......@@ -78,7 +84,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
}
/**
* @brief Base type for atomic_flag.
* @brief Base type for atomic_flag.
*
* Base type is POD with data, allowing atomic_flag to derive from
* it and meet the standard layout type requirement. In addition to
......@@ -114,27 +120,24 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
namespace __atomic0
{
struct atomic_flag;
struct atomic_address;
template<typename _IntTp>
struct __atomic_base;
}
}
namespace __atomic2
{
struct atomic_flag;
struct atomic_address;
template<typename _IntTp>
struct __atomic_base;
}
}
namespace __atomic1
{
using __atomic2::atomic_flag;
using __atomic0::atomic_address;
using __atomic0::__atomic_base;
}
}
/// Lock-free Property
#if defined(_GLIBCXX_ATOMIC_BUILTINS_1) && defined(_GLIBCXX_ATOMIC_BUILTINS_2) \
......@@ -157,7 +160,6 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
#define ATOMIC_INT_LOCK_FREE _GLIBCXX_ATOMIC_PROPERTY
#define ATOMIC_LONG_LOCK_FREE _GLIBCXX_ATOMIC_PROPERTY
#define ATOMIC_LLONG_LOCK_FREE _GLIBCXX_ATOMIC_PROPERTY
#define ATOMIC_ADDRESS_LOCK_FREE _GLIBCXX_ATOMIC_PROPERTY
inline namespace _GLIBCXX_ATOMIC_NAMESPACE { }
......@@ -166,28 +168,28 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
typedef __atomic_base<char> atomic_char;
/// atomic_schar
typedef __atomic_base<signed char> atomic_schar;
typedef __atomic_base<signed char> atomic_schar;
/// atomic_uchar
typedef __atomic_base<unsigned char> atomic_uchar;
typedef __atomic_base<unsigned char> atomic_uchar;
/// atomic_short
typedef __atomic_base<short> atomic_short;
typedef __atomic_base<short> atomic_short;
/// atomic_ushort
typedef __atomic_base<unsigned short> atomic_ushort;
typedef __atomic_base<unsigned short> atomic_ushort;
/// atomic_int
typedef __atomic_base<int> atomic_int;
/// atomic_uint
typedef __atomic_base<unsigned int> atomic_uint;
typedef __atomic_base<unsigned int> atomic_uint;
/// atomic_long
typedef __atomic_base<long> atomic_long;
/// atomic_ulong
typedef __atomic_base<unsigned long> atomic_ulong;
typedef __atomic_base<unsigned long> atomic_ulong;
/// atomic_llong
typedef __atomic_base<long long> atomic_llong;
......@@ -212,50 +214,50 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
typedef __atomic_base<int_least8_t> atomic_int_least8_t;
/// atomic_uint_least8_t
typedef __atomic_base<uint_least8_t> atomic_uint_least8_t;
typedef __atomic_base<uint_least8_t> atomic_uint_least8_t;
/// atomic_int_least16_t
typedef __atomic_base<int_least16_t> atomic_int_least16_t;
typedef __atomic_base<int_least16_t> atomic_int_least16_t;
/// atomic_uint_least16_t
typedef __atomic_base<uint_least16_t> atomic_uint_least16_t;
typedef __atomic_base<uint_least16_t> atomic_uint_least16_t;
/// atomic_int_least32_t
typedef __atomic_base<int_least32_t> atomic_int_least32_t;
typedef __atomic_base<int_least32_t> atomic_int_least32_t;
/// atomic_uint_least32_t
typedef __atomic_base<uint_least32_t> atomic_uint_least32_t;
typedef __atomic_base<uint_least32_t> atomic_uint_least32_t;
/// atomic_int_least64_t
typedef __atomic_base<int_least64_t> atomic_int_least64_t;
typedef __atomic_base<int_least64_t> atomic_int_least64_t;
/// atomic_uint_least64_t
typedef __atomic_base<uint_least64_t> atomic_uint_least64_t;
typedef __atomic_base<uint_least64_t> atomic_uint_least64_t;
/// atomic_int_fast8_t
typedef __atomic_base<int_fast8_t> atomic_int_fast8_t;
/// atomic_uint_fast8_t
typedef __atomic_base<uint_fast8_t> atomic_uint_fast8_t;
typedef __atomic_base<uint_fast8_t> atomic_uint_fast8_t;
/// atomic_int_fast16_t
typedef __atomic_base<int_fast16_t> atomic_int_fast16_t;
typedef __atomic_base<int_fast16_t> atomic_int_fast16_t;
/// atomic_uint_fast16_t
typedef __atomic_base<uint_fast16_t> atomic_uint_fast16_t;
typedef __atomic_base<uint_fast16_t> atomic_uint_fast16_t;
/// atomic_int_fast32_t
typedef __atomic_base<int_fast32_t> atomic_int_fast32_t;
typedef __atomic_base<int_fast32_t> atomic_int_fast32_t;
/// atomic_uint_fast32_t
typedef __atomic_base<uint_fast32_t> atomic_uint_fast32_t;
typedef __atomic_base<uint_fast32_t> atomic_uint_fast32_t;
/// atomic_int_fast64_t
typedef __atomic_base<int_fast64_t> atomic_int_fast64_t;
typedef __atomic_base<int_fast64_t> atomic_int_fast64_t;
/// atomic_uint_fast64_t
typedef __atomic_base<uint_fast64_t> atomic_uint_fast64_t;
typedef __atomic_base<uint_fast64_t> atomic_uint_fast64_t;
/// atomic_intptr_t
......@@ -265,7 +267,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
typedef __atomic_base<uintptr_t> atomic_uintptr_t;
/// atomic_size_t
typedef __atomic_base<size_t> atomic_size_t;
typedef __atomic_base<size_t> atomic_size_t;
/// atomic_intmax_t
typedef __atomic_base<intmax_t> atomic_intmax_t;
......@@ -277,16 +279,17 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
typedef __atomic_base<ptrdiff_t> atomic_ptrdiff_t;
struct atomic_bool;
#define ATOMIC_VAR_INIT(_VI) { _VI }
template<typename _Tp>
struct atomic;
template<typename _Tp>
struct atomic<_Tp*>;
// @} group atomics
_GLIBCXX_END_NAMESPACE_VERSION
} // namespace
} // namespace std
#endif
// -*- C++ -*- header.
// Copyright (C) 2008, 2009, 2010 Free Software Foundation, Inc.
// Copyright (C) 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
......@@ -230,132 +230,188 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
/// Partial specialization for pointer types.
template<typename _Tp>
struct atomic<_Tp*> : atomic_address
struct atomic<_Tp*>
{
typedef _Tp* __pointer_type;
typedef __atomic_base<_Tp*> __base_type;
__base_type _M_b;
atomic() = default;
~atomic() = default;
atomic(const atomic&) = delete;
atomic& operator=(const atomic&) = delete;
atomic& operator=(const atomic&) volatile = delete;
constexpr atomic(_Tp* __v) : atomic_address(__v) { }
constexpr atomic(__pointer_type __p) : _M_b(__p) { }
void
store(_Tp*, memory_order = memory_order_seq_cst);
operator __pointer_type() const
{ return __pointer_type(_M_b); }
void
store(_Tp*, memory_order = memory_order_seq_cst) volatile;
operator __pointer_type() const volatile
{ return __pointer_type(_M_b); }
_Tp*
load(memory_order = memory_order_seq_cst) const;
__pointer_type
operator=(__pointer_type __p)
{ return _M_b.operator=(__p); }
_Tp*
load(memory_order = memory_order_seq_cst) const volatile;
__pointer_type
operator=(__pointer_type __p) volatile
{ return _M_b.operator=(__p); }
_Tp*
exchange(_Tp*, memory_order = memory_order_seq_cst);
__pointer_type
operator++(int)
{ return _M_b++; }
_Tp*
exchange(_Tp*, memory_order = memory_order_seq_cst) volatile;
__pointer_type
operator++(int) volatile
{ return _M_b++; }
bool
compare_exchange_weak(_Tp*&, _Tp*, memory_order, memory_order);
__pointer_type
operator--(int)
{ return _M_b--; }
bool
compare_exchange_weak(_Tp*&, _Tp*, memory_order, memory_order) volatile;
__pointer_type
operator--(int) volatile
{ return _M_b--; }
bool
compare_exchange_weak(_Tp*&, _Tp*, memory_order = memory_order_seq_cst);
__pointer_type
operator++()
{ return ++_M_b; }
bool
compare_exchange_weak(_Tp*&, _Tp*,
memory_order = memory_order_seq_cst) volatile;
__pointer_type
operator++() volatile
{ return ++_M_b; }
bool
compare_exchange_strong(_Tp*&, _Tp*, memory_order, memory_order);
__pointer_type
operator--()
{ return --_M_b; }
bool
compare_exchange_strong(_Tp*&, _Tp*, memory_order, memory_order) volatile;
__pointer_type
operator--() volatile
{ return --_M_b; }
__pointer_type
operator+=(ptrdiff_t __d)
{ return _M_b.operator+=(__d); }
__pointer_type
operator+=(ptrdiff_t __d) volatile
{ return _M_b.operator+=(__d); }
__pointer_type
operator-=(ptrdiff_t __d)
{ return _M_b.operator-=(__d); }
__pointer_type
operator-=(ptrdiff_t __d) volatile
{ return _M_b.operator-=(__d); }
bool
compare_exchange_strong(_Tp*&, _Tp*, memory_order = memory_order_seq_cst);
is_lock_free() const
{ return _M_b.is_lock_free(); }
bool
compare_exchange_strong(_Tp*&, _Tp*,
memory_order = memory_order_seq_cst) volatile;
is_lock_free() const volatile
{ return _M_b.is_lock_free(); }
void
store(__pointer_type __p, memory_order __m = memory_order_seq_cst)
{ return _M_b.store(__p, __m); }
void
store(__pointer_type __p,
memory_order __m = memory_order_seq_cst) volatile
{ return _M_b.store(__p, __m); }
_Tp*
fetch_add(ptrdiff_t, memory_order = memory_order_seq_cst);
__pointer_type
load(memory_order __m = memory_order_seq_cst) const
{ return _M_b.load(__m); }
_Tp*
fetch_add(ptrdiff_t, memory_order = memory_order_seq_cst) volatile;
__pointer_type
load(memory_order __m = memory_order_seq_cst) const volatile
{ return _M_b.load(__m); }
_Tp*
fetch_sub(ptrdiff_t, memory_order = memory_order_seq_cst);
__pointer_type
exchange(__pointer_type __p, memory_order __m = memory_order_seq_cst)
{ return _M_b.exchange(__p, __m); }
_Tp*
fetch_sub(ptrdiff_t, memory_order = memory_order_seq_cst) volatile;
__pointer_type
exchange(__pointer_type __p,
memory_order __m = memory_order_seq_cst) volatile
{ return _M_b.exchange(__p, __m); }
operator _Tp*() const
{ return load(); }
bool
compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
memory_order __m1, memory_order __m2)
{ return _M_b.compare_exchange_strong(__p1, __p2, __m1, __m2); }
operator _Tp*() const volatile
{ return load(); }
bool
compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
memory_order __m1, memory_order __m2) volatile
{ return _M_b.compare_exchange_strong(__p1, __p2, __m1, __m2); }
_Tp*
operator=(_Tp* __v)
bool
compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
memory_order __m = memory_order_seq_cst)
{
store(__v);
return __v;
return compare_exchange_weak(__p1, __p2, __m,
__calculate_memory_order(__m));
}
_Tp*
operator=(_Tp* __v) volatile
bool
compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
memory_order __m = memory_order_seq_cst) volatile
{
store(__v);
return __v;
return compare_exchange_weak(__p1, __p2, __m,
__calculate_memory_order(__m));
}
_Tp*
operator++(int) { return fetch_add(1); }
_Tp*
operator++(int) volatile { return fetch_add(1); }
_Tp*
operator--(int) { return fetch_sub(1); }
bool
compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
memory_order __m1, memory_order __m2)
{ return _M_b.compare_exchange_strong(__p1, __p2, __m1, __m2); }
_Tp*
operator--(int) volatile { return fetch_sub(1); }
bool
compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
memory_order __m1, memory_order __m2) volatile
{ return _M_b.compare_exchange_strong(__p1, __p2, __m1, __m2); }
_Tp*
operator++() { return fetch_add(1) + 1; }
bool
compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
memory_order __m = memory_order_seq_cst)
{
return _M_b.compare_exchange_strong(__p1, __p2, __m,
__calculate_memory_order(__m));
}
_Tp*
operator++() volatile { return fetch_add(1) + 1; }
bool
compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
memory_order __m = memory_order_seq_cst) volatile
{
return _M_b.compare_exchange_strong(__p1, __p2, __m,
__calculate_memory_order(__m));
}
_Tp*
operator--() { return fetch_sub(1) - 1; }
__pointer_type
fetch_add(ptrdiff_t __d, memory_order __m = memory_order_seq_cst)
{ return _M_b.fetch_add(__d, __m); }
_Tp*
operator--() volatile { return fetch_sub(1) - 1; }
__pointer_type
fetch_add(ptrdiff_t __d,
memory_order __m = memory_order_seq_cst) volatile
{ return _M_b.fetch_add(__d, __m); }
_Tp*
operator+=(ptrdiff_t __d)
{ return fetch_add(__d) + __d; }
__pointer_type
fetch_sub(ptrdiff_t __d, memory_order __m = memory_order_seq_cst)
{ return _M_b.fetch_sub(__d, __m); }
_Tp*
operator+=(ptrdiff_t __d) volatile
{ return fetch_add(__d) + __d; }
_Tp*
operator-=(ptrdiff_t __d)
{ return fetch_sub(__d) - __d; }
_Tp*
operator-=(ptrdiff_t __d) volatile
{ return fetch_sub(__d) - __d; }
__pointer_type
fetch_sub(ptrdiff_t __d,
memory_order __m = memory_order_seq_cst) volatile
{ return _M_b.fetch_sub(__d, __m); }
};
/// Explicit specialization for bool.
template<>
struct atomic<bool> : public atomic_bool
......@@ -642,143 +698,13 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
};
template<typename _Tp>
_Tp*
atomic<_Tp*>::load(memory_order __m) const
{ return static_cast<_Tp*>(atomic_address::load(__m)); }
template<typename _Tp>
_Tp*
atomic<_Tp*>::load(memory_order __m) const volatile
{ return static_cast<_Tp*>(atomic_address::load(__m)); }
template<typename _Tp>
_Tp*
atomic<_Tp*>::exchange(_Tp* __v, memory_order __m)
{ return static_cast<_Tp*>(atomic_address::exchange(__v, __m)); }
template<typename _Tp>
_Tp*
atomic<_Tp*>::exchange(_Tp* __v, memory_order __m) volatile
{ return static_cast<_Tp*>(atomic_address::exchange(__v, __m)); }
template<typename _Tp>
bool
atomic<_Tp*>::compare_exchange_weak(_Tp*& __r, _Tp* __v, memory_order __m1,
memory_order __m2)
{
void** __vr = reinterpret_cast<void**>(&__r);
void* __vv = static_cast<void*>(__v);
return atomic_address::compare_exchange_weak(*__vr, __vv, __m1, __m2);
}
template<typename _Tp>
bool
atomic<_Tp*>::compare_exchange_weak(_Tp*& __r, _Tp* __v, memory_order __m1,
memory_order __m2) volatile
{
void** __vr = reinterpret_cast<void**>(&__r);
void* __vv = static_cast<void*>(__v);
return atomic_address::compare_exchange_weak(*__vr, __vv, __m1, __m2);
}
template<typename _Tp>
bool
atomic<_Tp*>::compare_exchange_weak(_Tp*& __r, _Tp* __v, memory_order __m)
{
return compare_exchange_weak(__r, __v, __m,
__calculate_memory_order(__m));
}
template<typename _Tp>
bool
atomic<_Tp*>::compare_exchange_weak(_Tp*& __r, _Tp* __v,
memory_order __m) volatile
{
return compare_exchange_weak(__r, __v, __m,
__calculate_memory_order(__m));
}
template<typename _Tp>
bool
atomic<_Tp*>::compare_exchange_strong(_Tp*& __r, _Tp* __v,
memory_order __m1,
memory_order __m2)
{
void** __vr = reinterpret_cast<void**>(&__r);
void* __vv = static_cast<void*>(__v);
return atomic_address::compare_exchange_strong(*__vr, __vv, __m1, __m2);
}
template<typename _Tp>
bool
atomic<_Tp*>::compare_exchange_strong(_Tp*& __r, _Tp* __v,
memory_order __m1,
memory_order __m2) volatile
{
void** __vr = reinterpret_cast<void**>(&__r);
void* __vv = static_cast<void*>(__v);
return atomic_address::compare_exchange_strong(*__vr, __vv, __m1, __m2);
}
template<typename _Tp>
bool
atomic<_Tp*>::compare_exchange_strong(_Tp*& __r, _Tp* __v,
memory_order __m)
{
return compare_exchange_strong(__r, __v, __m,
__calculate_memory_order(__m));
}
template<typename _Tp>
bool
atomic<_Tp*>::compare_exchange_strong(_Tp*& __r, _Tp* __v,
memory_order __m) volatile
{
return compare_exchange_strong(__r, __v, __m,
__calculate_memory_order(__m));
}
template<typename _Tp>
_Tp*
atomic<_Tp*>::fetch_add(ptrdiff_t __d, memory_order __m)
{
void* __p = atomic_fetch_add_explicit(this, sizeof(_Tp) * __d, __m);
return static_cast<_Tp*>(__p);
}
template<typename _Tp>
_Tp*
atomic<_Tp*>::fetch_add(ptrdiff_t __d, memory_order __m) volatile
{
void* __p = atomic_fetch_add_explicit(this, sizeof(_Tp) * __d, __m);
return static_cast<_Tp*>(__p);
}
template<typename _Tp>
_Tp*
atomic<_Tp*>::fetch_sub(ptrdiff_t __d, memory_order __m)
{
void* __p = atomic_fetch_sub_explicit(this, sizeof(_Tp) * __d, __m);
return static_cast<_Tp*>(__p);
}
template<typename _Tp>
_Tp*
atomic<_Tp*>::fetch_sub(ptrdiff_t __d, memory_order __m) volatile
{
void* __p = atomic_fetch_sub_explicit(this, sizeof(_Tp) * __d, __m);
return static_cast<_Tp*>(__p);
}
// Function definitions, atomic_flag operations.
inline bool
atomic_flag_test_and_set_explicit(atomic_flag* __a, memory_order __m)
{ return __a->test_and_set(__m); }
inline bool
atomic_flag_test_and_set_explicit(volatile atomic_flag* __a,
atomic_flag_test_and_set_explicit(volatile atomic_flag* __a,
memory_order __m)
{ return __a->test_and_set(__m); }
......@@ -805,355 +731,78 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
inline void
atomic_flag_clear(volatile atomic_flag* __a)
{ atomic_flag_clear_explicit(__a, memory_order_seq_cst); }
// Function definitions, atomic_address operations.
inline bool
atomic_is_lock_free(const atomic_address* __a)
{ return __a->is_lock_free(); }
inline bool
atomic_is_lock_free(const volatile atomic_address* __a)
{ return __a->is_lock_free(); }
inline void
atomic_init(atomic_address* __a, void* __v);
inline void
atomic_init(volatile atomic_address* __a, void* __v);
inline void
atomic_store_explicit(atomic_address* __a, void* __v, memory_order __m)
{ __a->store(__v, __m); }
inline void
atomic_store_explicit(volatile atomic_address* __a, void* __v,
memory_order __m)
{ __a->store(__v, __m); }
inline void
atomic_store(atomic_address* __a, void* __v)
{ __a->store(__v); }
inline void
atomic_store(volatile atomic_address* __a, void* __v)
{ __a->store(__v); }
inline void*
atomic_load_explicit(const atomic_address* __a, memory_order __m)
{ return __a->load(__m); }
inline void*
atomic_load_explicit(const volatile atomic_address* __a, memory_order __m)
{ return __a->load(__m); }
inline void*
atomic_load(const atomic_address* __a)
{ return __a->load(); }
inline void*
atomic_load(const volatile atomic_address* __a)
{ return __a->load(); }
inline void*
atomic_exchange_explicit(atomic_address* __a, void* __v, memory_order __m)
{ return __a->exchange(__v, __m); }
inline void*
atomic_exchange_explicit(volatile atomic_address* __a, void* __v,
memory_order __m)
{ return __a->exchange(__v, __m); }
inline void*
atomic_exchange(atomic_address* __a, void* __v)
{ return __a->exchange(__v); }
inline void*
atomic_exchange(volatile atomic_address* __a, void* __v)
{ return __a->exchange(__v); }
inline bool
atomic_compare_exchange_weak_explicit(atomic_address* __a,
void** __v1, void* __v2,
memory_order __m1, memory_order __m2)
{ return __a->compare_exchange_weak(*__v1, __v2, __m1, __m2); }
inline bool
atomic_compare_exchange_weak_explicit(volatile atomic_address* __a,
void** __v1, void* __v2,
memory_order __m1, memory_order __m2)
{ return __a->compare_exchange_weak(*__v1, __v2, __m1, __m2); }
inline bool
atomic_compare_exchange_weak(atomic_address* __a, void** __v1, void* __v2)
{
return __a->compare_exchange_weak(*__v1, __v2, memory_order_seq_cst,
memory_order_seq_cst);
}
inline bool
atomic_compare_exchange_weak(volatile atomic_address* __a, void** __v1,
void* __v2)
{
return __a->compare_exchange_weak(*__v1, __v2, memory_order_seq_cst,
memory_order_seq_cst);
}
inline bool
atomic_compare_exchange_strong_explicit(atomic_address* __a,
void** __v1, void* __v2,
memory_order __m1, memory_order __m2)
{ return __a->compare_exchange_strong(*__v1, __v2, __m1, __m2); }
inline bool
atomic_compare_exchange_strong_explicit(volatile atomic_address* __a,
void** __v1, void* __v2,
memory_order __m1, memory_order __m2)
{ return __a->compare_exchange_strong(*__v1, __v2, __m1, __m2); }
inline bool
atomic_compare_exchange_strong(atomic_address* __a, void** __v1, void* __v2)
{
return __a->compare_exchange_strong(*__v1, __v2, memory_order_seq_cst,
memory_order_seq_cst);
}
inline bool
atomic_compare_exchange_strong(volatile atomic_address* __a,
void** __v1, void* __v2)
{
return __a->compare_exchange_strong(*__v1, __v2, memory_order_seq_cst,
memory_order_seq_cst);
}
inline void*
atomic_fetch_add_explicit(atomic_address* __a, ptrdiff_t __d,
memory_order __m)
{ return __a->fetch_add(__d, __m); }
inline void*
atomic_fetch_add_explicit(volatile atomic_address* __a, ptrdiff_t __d,
memory_order __m)
{ return __a->fetch_add(__d, __m); }
inline void*
atomic_fetch_add(atomic_address* __a, ptrdiff_t __d)
{ return __a->fetch_add(__d); }
inline void*
atomic_fetch_add(volatile atomic_address* __a, ptrdiff_t __d)
{ return __a->fetch_add(__d); }
inline void*
atomic_fetch_sub_explicit(atomic_address* __a, ptrdiff_t __d,
memory_order __m)
{ return __a->fetch_sub(__d, __m); }
inline void*
atomic_fetch_sub_explicit(volatile atomic_address* __a, ptrdiff_t __d,
memory_order __m)
{ return __a->fetch_sub(__d, __m); }
inline void*
atomic_fetch_sub(atomic_address* __a, ptrdiff_t __d)
{ return __a->fetch_sub(__d); }
inline void*
atomic_fetch_sub(volatile atomic_address* __a, ptrdiff_t __d)
{ return __a->fetch_sub(__d); }
// Function definitions, atomic_bool operations.
inline bool
atomic_is_lock_free(const atomic_bool* __a)
{ return __a->is_lock_free(); }
inline bool
atomic_is_lock_free(const volatile atomic_bool* __a)
{ return __a->is_lock_free(); }
inline void
atomic_init(atomic_bool* __a, bool __b);
inline void
atomic_init(volatile atomic_bool* __a, bool __b);
inline void
atomic_store_explicit(atomic_bool* __a, bool __i, memory_order __m)
{ __a->store(__i, __m); }
inline void
atomic_store_explicit(volatile atomic_bool* __a, bool __i, memory_order __m)
{ __a->store(__i, __m); }
inline void
atomic_store(atomic_bool* __a, bool __i)
{ __a->store(__i); }
inline void
atomic_store(volatile atomic_bool* __a, bool __i)
{ __a->store(__i); }
inline bool
atomic_load_explicit(const atomic_bool* __a, memory_order __m)
{ return __a->load(__m); }
inline bool
atomic_load_explicit(const volatile atomic_bool* __a, memory_order __m)
{ return __a->load(__m); }
inline bool
atomic_load(const atomic_bool* __a)
{ return __a->load(); }
inline bool
atomic_load(const volatile atomic_bool* __a)
{ return __a->load(); }
inline bool
atomic_exchange_explicit(atomic_bool* __a, bool __i, memory_order __m)
{ return __a->exchange(__i, __m); }
inline bool
atomic_exchange_explicit(volatile atomic_bool* __a, bool __i,
memory_order __m)
{ return __a->exchange(__i, __m); }
inline bool
atomic_exchange(atomic_bool* __a, bool __i)
{ return __a->exchange(__i); }
inline bool
atomic_exchange(volatile atomic_bool* __a, bool __i)
{ return __a->exchange(__i); }
inline bool
atomic_compare_exchange_weak_explicit(atomic_bool* __a, bool* __i1,
bool __i2, memory_order __m1,
memory_order __m2)
{ return __a->compare_exchange_weak(*__i1, __i2, __m1, __m2); }
inline bool
atomic_compare_exchange_weak_explicit(volatile atomic_bool* __a, bool* __i1,
bool __i2, memory_order __m1,
memory_order __m2)
{ return __a->compare_exchange_weak(*__i1, __i2, __m1, __m2); }
inline bool
atomic_compare_exchange_weak(atomic_bool* __a, bool* __i1, bool __i2)
{
return __a->compare_exchange_weak(*__i1, __i2, memory_order_seq_cst,
memory_order_seq_cst);
}
inline bool
atomic_compare_exchange_weak(volatile atomic_bool* __a, bool* __i1, bool __i2)
{
return __a->compare_exchange_weak(*__i1, __i2, memory_order_seq_cst,
memory_order_seq_cst);
}
inline bool
atomic_compare_exchange_strong_explicit(atomic_bool* __a,
bool* __i1, bool __i2,
memory_order __m1, memory_order __m2)
{ return __a->compare_exchange_strong(*__i1, __i2, __m1, __m2); }
inline bool
atomic_compare_exchange_strong_explicit(volatile atomic_bool* __a,
bool* __i1, bool __i2,
memory_order __m1, memory_order __m2)
{ return __a->compare_exchange_strong(*__i1, __i2, __m1, __m2); }
inline bool
atomic_compare_exchange_strong(atomic_bool* __a, bool* __i1, bool __i2)
{
return __a->compare_exchange_strong(*__i1, __i2, memory_order_seq_cst,
memory_order_seq_cst);
}
inline bool
atomic_compare_exchange_strong(volatile atomic_bool* __a,
bool* __i1, bool __i2)
{
return __a->compare_exchange_strong(*__i1, __i2, memory_order_seq_cst,
memory_order_seq_cst);
}
// Function templates for atomic_integral operations, using
// __atomic_base . Template argument should be constricted to
// intergral types as specified in the standard.
// Function templates generally applicable to atomic types.
template<typename _ITp>
inline bool
atomic_is_lock_free(const __atomic_base<_ITp>* __a)
atomic_is_lock_free(const atomic<_ITp>* __a)
{ return __a->is_lock_free(); }
template<typename _ITp>
inline bool
atomic_is_lock_free(const volatile __atomic_base<_ITp>* __a)
atomic_is_lock_free(const volatile atomic<_ITp>* __a)
{ return __a->is_lock_free(); }
template<typename _ITp>
inline void
atomic_init(__atomic_base<_ITp>* __a, _ITp __i);
inline void
atomic_init(atomic<_ITp>* __a, _ITp __i);
template<typename _ITp>
inline void
atomic_init(volatile __atomic_base<_ITp>* __a, _ITp __i);
inline void
atomic_init(volatile atomic<_ITp>* __a, _ITp __i);
template<typename _ITp>
inline void
atomic_store_explicit(__atomic_base<_ITp>* __a, _ITp __i, memory_order __m)
atomic_store_explicit(atomic<_ITp>* __a, _ITp __i, memory_order __m)
{ __a->store(__i, __m); }
template<typename _ITp>
inline void
atomic_store_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i,
atomic_store_explicit(volatile atomic<_ITp>* __a, _ITp __i,
memory_order __m)
{ __a->store(__i, __m); }
template<typename _ITp>
inline _ITp
atomic_load_explicit(const __atomic_base<_ITp>* __a, memory_order __m)
atomic_load_explicit(const atomic<_ITp>* __a, memory_order __m)
{ return __a->load(__m); }
template<typename _ITp>
inline _ITp
atomic_load_explicit(const volatile __atomic_base<_ITp>* __a,
atomic_load_explicit(const volatile atomic<_ITp>* __a,
memory_order __m)
{ return __a->load(__m); }
template<typename _ITp>
inline _ITp
atomic_exchange_explicit(__atomic_base<_ITp>* __a, _ITp __i,
atomic_exchange_explicit(atomic<_ITp>* __a, _ITp __i,
memory_order __m)
{ return __a->exchange(__i, __m); }
template<typename _ITp>
inline _ITp
atomic_exchange_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i,
atomic_exchange_explicit(volatile atomic<_ITp>* __a, _ITp __i,
memory_order __m)
{ return __a->exchange(__i, __m); }
template<typename _ITp>
inline bool
atomic_compare_exchange_weak_explicit(__atomic_base<_ITp>* __a,
atomic_compare_exchange_weak_explicit(atomic<_ITp>* __a,
_ITp* __i1, _ITp __i2,
memory_order __m1, memory_order __m2)
{ return __a->compare_exchange_weak(*__i1, __i2, __m1, __m2); }
template<typename _ITp>
inline bool
atomic_compare_exchange_weak_explicit(volatile __atomic_base<_ITp>* __a,
atomic_compare_exchange_weak_explicit(volatile atomic<_ITp>* __a,
_ITp* __i1, _ITp __i2,
memory_order __m1, memory_order __m2)
{ return __a->compare_exchange_weak(*__i1, __i2, __m1, __m2); }
template<typename _ITp>
inline bool
atomic_compare_exchange_strong_explicit(__atomic_base<_ITp>* __a,
atomic_compare_exchange_strong_explicit(atomic<_ITp>* __a,
_ITp* __i1, _ITp __i2,
memory_order __m1,
memory_order __m2)
......@@ -1161,105 +810,46 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
template<typename _ITp>
inline bool
atomic_compare_exchange_strong_explicit(volatile __atomic_base<_ITp>* __a,
atomic_compare_exchange_strong_explicit(volatile atomic<_ITp>* __a,
_ITp* __i1, _ITp __i2,
memory_order __m1,
memory_order __m2)
{ return __a->compare_exchange_strong(*__i1, __i2, __m1, __m2); }
template<typename _ITp>
inline _ITp
atomic_fetch_add_explicit(__atomic_base<_ITp>* __a, _ITp __i,
memory_order __m)
{ return __a->fetch_add(__i, __m); }
template<typename _ITp>
inline _ITp
atomic_fetch_add_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i,
memory_order __m)
{ return __a->fetch_add(__i, __m); }
template<typename _ITp>
inline _ITp
atomic_fetch_sub_explicit(__atomic_base<_ITp>* __a, _ITp __i,
memory_order __m)
{ return __a->fetch_sub(__i, __m); }
template<typename _ITp>
inline _ITp
atomic_fetch_sub_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i,
memory_order __m)
{ return __a->fetch_sub(__i, __m); }
template<typename _ITp>
inline _ITp
atomic_fetch_and_explicit(__atomic_base<_ITp>* __a, _ITp __i,
memory_order __m)
{ return __a->fetch_and(__i, __m); }
template<typename _ITp>
inline _ITp
atomic_fetch_and_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i,
memory_order __m)
{ return __a->fetch_and(__i, __m); }
template<typename _ITp>
inline _ITp
atomic_fetch_or_explicit(__atomic_base<_ITp>* __a, _ITp __i,
memory_order __m)
{ return __a->fetch_or(__i, __m); }
template<typename _ITp>
inline _ITp
atomic_fetch_or_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i,
memory_order __m)
{ return __a->fetch_or(__i, __m); }
template<typename _ITp>
inline _ITp
atomic_fetch_xor_explicit(__atomic_base<_ITp>* __a, _ITp __i,
memory_order __m)
{ return __a->fetch_xor(__i, __m); }
template<typename _ITp>
inline _ITp
atomic_fetch_xor_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i,
memory_order __m)
{ return __a->fetch_xor(__i, __m); }
template<typename _ITp>
inline void
atomic_store(__atomic_base<_ITp>* __a, _ITp __i)
atomic_store(atomic<_ITp>* __a, _ITp __i)
{ atomic_store_explicit(__a, __i, memory_order_seq_cst); }
template<typename _ITp>
inline void
atomic_store(volatile __atomic_base<_ITp>* __a, _ITp __i)
atomic_store(volatile atomic<_ITp>* __a, _ITp __i)
{ atomic_store_explicit(__a, __i, memory_order_seq_cst); }
template<typename _ITp>
inline _ITp
atomic_load(const __atomic_base<_ITp>* __a)
atomic_load(const atomic<_ITp>* __a)
{ return atomic_load_explicit(__a, memory_order_seq_cst); }
template<typename _ITp>
inline _ITp
atomic_load(const volatile __atomic_base<_ITp>* __a)
atomic_load(const volatile atomic<_ITp>* __a)
{ return atomic_load_explicit(__a, memory_order_seq_cst); }
template<typename _ITp>
inline _ITp
atomic_exchange(__atomic_base<_ITp>* __a, _ITp __i)
atomic_exchange(atomic<_ITp>* __a, _ITp __i)
{ return atomic_exchange_explicit(__a, __i, memory_order_seq_cst); }
template<typename _ITp>
inline _ITp
atomic_exchange(volatile __atomic_base<_ITp>* __a, _ITp __i)
atomic_exchange(volatile atomic<_ITp>* __a, _ITp __i)
{ return atomic_exchange_explicit(__a, __i, memory_order_seq_cst); }
template<typename _ITp>
inline bool
atomic_compare_exchange_weak(__atomic_base<_ITp>* __a,
atomic_compare_exchange_weak(atomic<_ITp>* __a,
_ITp* __i1, _ITp __i2)
{
return atomic_compare_exchange_weak_explicit(__a, __i1, __i2,
......@@ -1269,7 +859,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
template<typename _ITp>
inline bool
atomic_compare_exchange_weak(volatile __atomic_base<_ITp>* __a,
atomic_compare_exchange_weak(volatile atomic<_ITp>* __a,
_ITp* __i1, _ITp __i2)
{
return atomic_compare_exchange_weak_explicit(__a, __i1, __i2,
......@@ -1279,7 +869,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
template<typename _ITp>
inline bool
atomic_compare_exchange_strong(__atomic_base<_ITp>* __a,
atomic_compare_exchange_strong(atomic<_ITp>* __a,
_ITp* __i1, _ITp __i2)
{
return atomic_compare_exchange_strong_explicit(__a, __i1, __i2,
......@@ -1289,7 +879,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
template<typename _ITp>
inline bool
atomic_compare_exchange_strong(volatile __atomic_base<_ITp>* __a,
atomic_compare_exchange_strong(volatile atomic<_ITp>* __a,
_ITp* __i1, _ITp __i2)
{
return atomic_compare_exchange_strong_explicit(__a, __i1, __i2,
......@@ -1297,6 +887,70 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
memory_order_seq_cst);
}
// Function templates for atomic_integral operations only, using
// __atomic_base. Template argument should be constricted to
// intergral types as specified in the standard, excluding address
// types.
template<typename _ITp>
inline _ITp
atomic_fetch_add_explicit(__atomic_base<_ITp>* __a, _ITp __i,
memory_order __m)
{ return __a->fetch_add(__i, __m); }
template<typename _ITp>
inline _ITp
atomic_fetch_add_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i,
memory_order __m)
{ return __a->fetch_add(__i, __m); }
template<typename _ITp>
inline _ITp
atomic_fetch_sub_explicit(__atomic_base<_ITp>* __a, _ITp __i,
memory_order __m)
{ return __a->fetch_sub(__i, __m); }
template<typename _ITp>
inline _ITp
atomic_fetch_sub_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i,
memory_order __m)
{ return __a->fetch_sub(__i, __m); }
template<typename _ITp>
inline _ITp
atomic_fetch_and_explicit(__atomic_base<_ITp>* __a, _ITp __i,
memory_order __m)
{ return __a->fetch_and(__i, __m); }
template<typename _ITp>
inline _ITp
atomic_fetch_and_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i,
memory_order __m)
{ return __a->fetch_and(__i, __m); }
template<typename _ITp>
inline _ITp
atomic_fetch_or_explicit(__atomic_base<_ITp>* __a, _ITp __i,
memory_order __m)
{ return __a->fetch_or(__i, __m); }
template<typename _ITp>
inline _ITp
atomic_fetch_or_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i,
memory_order __m)
{ return __a->fetch_or(__i, __m); }
template<typename _ITp>
inline _ITp
atomic_fetch_xor_explicit(__atomic_base<_ITp>* __a, _ITp __i,
memory_order __m)
{ return __a->fetch_xor(__i, __m); }
template<typename _ITp>
inline _ITp
atomic_fetch_xor_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i,
memory_order __m)
{ return __a->fetch_xor(__i, __m); }
template<typename _ITp>
inline _ITp
atomic_fetch_add(__atomic_base<_ITp>* __a, _ITp __i)
......@@ -1347,6 +1001,51 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
atomic_fetch_xor(volatile __atomic_base<_ITp>* __a, _ITp __i)
{ return atomic_fetch_xor_explicit(__a, __i, memory_order_seq_cst); }
// Partial specializations for pointers.
template<typename _ITp>
inline _ITp*
atomic_fetch_add_explicit(atomic<_ITp*>* __a, ptrdiff_t __d,
memory_order __m)
{ return __a->fetch_add(__d, __m); }
template<typename _ITp>
inline _ITp*
atomic_fetch_add_explicit(volatile atomic<_ITp*>* __a, ptrdiff_t __d,
memory_order __m)
{ return __a->fetch_add(__d, __m); }
template<typename _ITp>
inline _ITp*
atomic_fetch_add(volatile atomic<_ITp*>* __a, ptrdiff_t __d)
{ return __a->fetch_add(__d); }
template<typename _ITp>
inline _ITp*
atomic_fetch_add(atomic<_ITp*>* __a, ptrdiff_t __d)
{ return __a->fetch_add(__d); }
template<typename _ITp>
inline _ITp*
atomic_fetch_sub_explicit(volatile atomic<_ITp*>* __a,
ptrdiff_t __d, memory_order __m)
{ return __a->fetch_sub(__d, __m); }
template<typename _ITp>
inline _ITp*
atomic_fetch_sub_explicit(atomic<_ITp*>* __a, ptrdiff_t __d,
memory_order __m)
{ return __a->fetch_sub(__d, __m); }
template<typename _ITp>
inline _ITp*
atomic_fetch_sub(volatile atomic<_ITp*>* __a, ptrdiff_t __d)
{ return __a->fetch_sub(__d); }
template<typename _ITp>
inline _ITp*
atomic_fetch_sub(atomic<_ITp*>* __a, ptrdiff_t __d)
{ return __a->fetch_sub(__d); }
// @} group atomics
_GLIBCXX_END_NAMESPACE_VERSION
......
// { dg-options "-std=gnu++0x" }
// { dg-do compile }
// Copyright (C) 2008, 2009, 2010 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the
// Free Software Foundation; either version 3, or (at your option)
// any later version.
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License along
// with this library; see the file COPYING3. If not see
// <http://www.gnu.org/licenses/>.
#include <atomic>
#include <cstddef>
int main()
{
std::atomic_address a __attribute__((unused)) = { { NULL } };
return 0;
}
// { dg-options "-std=gnu++0x" }
// { dg-do compile }
// Copyright (C) 2008, 2009 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the
// Free Software Foundation; either version 3, or (at your option)
// any later version.
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License along
// with this library; see the file COPYING3. If not see
// <http://www.gnu.org/licenses/>.
#include <atomic>
void test01()
{
// Assign.
typedef std::atomic_address test_type;
test_type t1;
test_type t2;
t1 = t2; // { dg-error "deleted" }
}
// { dg-prune-output "include" }
// { dg-do compile }
// { dg-options "-std=gnu++0x" }
// Copyright (C) 2010 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the
// Free Software Foundation; either version 3, or (at your option)
// any later version.
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License along
// with this library; see the file COPYING3. If not see
// <http://www.gnu.org/licenses/>.
#include <atomic>
#include <testsuite_common_types.h>
int main()
{
__gnu_test::constexpr_single_value_constructible test;
test.operator()<std::atomic_address, void*>();
return 0;
}
// { dg-options "-std=gnu++0x" }
// { dg-do compile }
// Copyright (C) 2008, 2009 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the
// Free Software Foundation; either version 3, or (at your option)
// any later version.
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License along
// with this library; see the file COPYING3. If not see
// <http://www.gnu.org/licenses/>.
#include <atomic>
void test01()
{
// Copy.
typedef std::atomic_address test_type;
test_type t1;
test_type t2(t1); // { dg-error "deleted" }
}
// { dg-prune-output "include" }
// { dg-options "-std=gnu++0x" }
// Copyright (C) 2008, 2009 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the
// Free Software Foundation; either version 3, or (at your option)
// any later version.
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License along
// with this library; see the file COPYING3. If not see
// <http://www.gnu.org/licenses/>.
#include <atomic>
int main()
{
// Default constructor.
std::atomic_address a;
return 0;
}
// { dg-options "-std=gnu++0x" }
// Copyright (C) 2008, 2009, 2010 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the
// Free Software Foundation; either version 3, or (at your option)
// any later version.
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License along
// with this library; see the file COPYING3. If not see
// <http://www.gnu.org/licenses/>.
#include <atomic>
int main()
{
// Single value constructor.
void* v = 0;
std::atomic_address a(v);
return 0;
}
// { dg-options "-std=gnu++0x" }
// { dg-do compile }
// Copyright (C) 2008, 2009 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the
// Free Software Foundation; either version 3, or (at your option)
// any later version.
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License along
// with this library; see the file COPYING3. If not see
// <http://www.gnu.org/licenses/>.
#include <atomic>
#include <testsuite_common_types.h>
void test01()
{
__gnu_test::standard_layout test;
test.operator()<std::atomic_address>();
}
// { dg-options "-std=gnu++0x" }
// { dg-do compile }
// Copyright (C) 2009 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the
// Free Software Foundation; either version 3, or (at your option)
// any later version.
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License along
// with this library; see the file COPYING3. If not see
// <http://www.gnu.org/licenses/>.
#include <atomic>
#include <testsuite_common_types.h>
void test01()
{
__gnu_test::has_trivial_cons_dtor test;
test.operator()<std::atomic_address>();
}
......@@ -94,14 +94,6 @@ namespace gnu
# endif
#endif
#ifndef ATOMIC_ADDRESS_LOCK_FREE
# error "ATOMIC_ADDRESS_LOCK_FREE must be a macro"
# if ATOMIC_ADDRESS_LOCK_FREE != 0 \
&& ATOMIC_ADDRESS_LOCK_FREE != 1 && ATOMIC_ADDRESS_LOCK_FREE != 2
# error "ATOMIC_ADDRESS_LOCK_FREE must be 0, 1, or 2"
# endif
#endif
#ifndef ATOMIC_FLAG_INIT
#error "ATOMIC_FLAG_INIT_must_be_a_macro"
#endif
......
// { dg-options "-std=gnu++0x" }
// { dg-do compile }
// Copyright (C) 2008, 2009, 2010 Free Software Foundation, Inc.
// Copyright (C) 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
......@@ -72,6 +72,4 @@ void test01()
using std::atomic_ptrdiff_t;
using std::atomic_intmax_t;
using std::atomic_uintmax_t;
using std::atomic_address;
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment