Commit 27f41cfe by Jonathan Wakely Committed by Jonathan Wakely

re PR libstdc++/40297 ([C++0x] debug mode vs atomics)

2009-06-24  Jonathan Wakely  <jwakely.gcc@gmail.com>

	PR libstdc++/40297
	* include/bits/atomic_0.h: Reverse debug assertions.
	* include/bits/atomic_2.h: Likewise.

From-SVN: r148893
parent fb6234e0
2009-06-24 Jonathan Wakely <jwakely.gcc@gmail.com>
PR libstdc++/40297
* include/bits/atomic_0.h: Reverse debug assertions.
* include/bits/atomic_2.h: Likewise.
2009-06-23 DJ Delorie <dj@redhat.com> 2009-06-23 DJ Delorie <dj@redhat.com>
Add MeP port. Add MeP port.
......
...@@ -119,17 +119,17 @@ namespace __atomic0 ...@@ -119,17 +119,17 @@ namespace __atomic0
void void
store(void* __v, memory_order __m = memory_order_seq_cst) volatile store(void* __v, memory_order __m = memory_order_seq_cst) volatile
{ {
__glibcxx_assert(__m == memory_order_acquire); __glibcxx_assert(__m != memory_order_acquire);
__glibcxx_assert(__m == memory_order_acq_rel); __glibcxx_assert(__m != memory_order_acq_rel);
__glibcxx_assert(__m == memory_order_consume); __glibcxx_assert(__m != memory_order_consume);
_ATOMIC_STORE_(this, __v, __m); _ATOMIC_STORE_(this, __v, __m);
} }
void* void*
load(memory_order __m = memory_order_seq_cst) const volatile load(memory_order __m = memory_order_seq_cst) const volatile
{ {
__glibcxx_assert(__m == memory_order_release); __glibcxx_assert(__m != memory_order_release);
__glibcxx_assert(__m == memory_order_acq_rel); __glibcxx_assert(__m != memory_order_acq_rel);
return _ATOMIC_LOAD_(this, __m); return _ATOMIC_LOAD_(this, __m);
} }
...@@ -141,8 +141,8 @@ namespace __atomic0 ...@@ -141,8 +141,8 @@ namespace __atomic0
compare_exchange_weak(void*& __v1, void* __v2, memory_order __m1, compare_exchange_weak(void*& __v1, void* __v2, memory_order __m1,
memory_order __m2) volatile memory_order __m2) volatile
{ {
__glibcxx_assert(__m2 == memory_order_release); __glibcxx_assert(__m2 != memory_order_release);
__glibcxx_assert(__m2 == memory_order_acq_rel); __glibcxx_assert(__m2 != memory_order_acq_rel);
__glibcxx_assert(__m2 <= __m1); __glibcxx_assert(__m2 <= __m1);
return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1); return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1);
} }
...@@ -159,8 +159,8 @@ namespace __atomic0 ...@@ -159,8 +159,8 @@ namespace __atomic0
compare_exchange_strong(void*& __v1, void* __v2, memory_order __m1, compare_exchange_strong(void*& __v1, void* __v2, memory_order __m1,
memory_order __m2) volatile memory_order __m2) volatile
{ {
__glibcxx_assert(__m2 == memory_order_release); __glibcxx_assert(__m2 != memory_order_release);
__glibcxx_assert(__m2 == memory_order_acq_rel); __glibcxx_assert(__m2 != memory_order_acq_rel);
__glibcxx_assert(__m2 <= __m1); __glibcxx_assert(__m2 <= __m1);
return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1); return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1);
} }
...@@ -310,17 +310,17 @@ namespace __atomic0 ...@@ -310,17 +310,17 @@ namespace __atomic0
store(__integral_type __i, store(__integral_type __i,
memory_order __m = memory_order_seq_cst) volatile memory_order __m = memory_order_seq_cst) volatile
{ {
__glibcxx_assert(__m == memory_order_acquire); __glibcxx_assert(__m != memory_order_acquire);
__glibcxx_assert(__m == memory_order_acq_rel); __glibcxx_assert(__m != memory_order_acq_rel);
__glibcxx_assert(__m == memory_order_consume); __glibcxx_assert(__m != memory_order_consume);
_ATOMIC_STORE_(this, __i, __m); _ATOMIC_STORE_(this, __i, __m);
} }
__integral_type __integral_type
load(memory_order __m = memory_order_seq_cst) const volatile load(memory_order __m = memory_order_seq_cst) const volatile
{ {
__glibcxx_assert(__m == memory_order_release); __glibcxx_assert(__m != memory_order_release);
__glibcxx_assert(__m == memory_order_acq_rel); __glibcxx_assert(__m != memory_order_acq_rel);
return _ATOMIC_LOAD_(this, __m); return _ATOMIC_LOAD_(this, __m);
} }
...@@ -333,8 +333,8 @@ namespace __atomic0 ...@@ -333,8 +333,8 @@ namespace __atomic0
compare_exchange_weak(__integral_type& __i1, __integral_type __i2, compare_exchange_weak(__integral_type& __i1, __integral_type __i2,
memory_order __m1, memory_order __m2) volatile memory_order __m1, memory_order __m2) volatile
{ {
__glibcxx_assert(__m2 == memory_order_release); __glibcxx_assert(__m2 != memory_order_release);
__glibcxx_assert(__m2 == memory_order_acq_rel); __glibcxx_assert(__m2 != memory_order_acq_rel);
__glibcxx_assert(__m2 <= __m1); __glibcxx_assert(__m2 <= __m1);
return _ATOMIC_CMPEXCHNG_(this, &__i1, __i2, __m1); return _ATOMIC_CMPEXCHNG_(this, &__i1, __i2, __m1);
} }
...@@ -351,8 +351,8 @@ namespace __atomic0 ...@@ -351,8 +351,8 @@ namespace __atomic0
compare_exchange_strong(__integral_type& __i1, __integral_type __i2, compare_exchange_strong(__integral_type& __i1, __integral_type __i2,
memory_order __m1, memory_order __m2) volatile memory_order __m1, memory_order __m2) volatile
{ {
__glibcxx_assert(__m2 == memory_order_release); __glibcxx_assert(__m2 != memory_order_release);
__glibcxx_assert(__m2 == memory_order_acq_rel); __glibcxx_assert(__m2 != memory_order_acq_rel);
__glibcxx_assert(__m2 <= __m1); __glibcxx_assert(__m2 <= __m1);
return _ATOMIC_CMPEXCHNG_(this, &__i1, __i2, __m1); return _ATOMIC_CMPEXCHNG_(this, &__i1, __i2, __m1);
} }
......
...@@ -65,6 +65,10 @@ namespace __atomic2 ...@@ -65,6 +65,10 @@ namespace __atomic2
void void
clear(memory_order __m = memory_order_seq_cst) volatile clear(memory_order __m = memory_order_seq_cst) volatile
{ {
__glibcxx_assert(__m != memory_order_consume);
__glibcxx_assert(__m != memory_order_acquire);
__glibcxx_assert(__m != memory_order_acq_rel);
__sync_lock_release(&_M_i); __sync_lock_release(&_M_i);
if (__m != memory_order_acquire && __m != memory_order_acq_rel) if (__m != memory_order_acquire && __m != memory_order_acq_rel)
__sync_synchronize(); __sync_synchronize();
...@@ -93,9 +97,9 @@ namespace __atomic2 ...@@ -93,9 +97,9 @@ namespace __atomic2
void void
store(void* __v, memory_order __m = memory_order_seq_cst) volatile store(void* __v, memory_order __m = memory_order_seq_cst) volatile
{ {
__glibcxx_assert(__m == memory_order_acquire); __glibcxx_assert(__m != memory_order_acquire);
__glibcxx_assert(__m == memory_order_acq_rel); __glibcxx_assert(__m != memory_order_acq_rel);
__glibcxx_assert(__m == memory_order_consume); __glibcxx_assert(__m != memory_order_consume);
if (__m == memory_order_relaxed) if (__m == memory_order_relaxed)
_M_i = __v; _M_i = __v;
...@@ -111,8 +115,8 @@ namespace __atomic2 ...@@ -111,8 +115,8 @@ namespace __atomic2
void* void*
load(memory_order __m = memory_order_seq_cst) const volatile load(memory_order __m = memory_order_seq_cst) const volatile
{ {
__glibcxx_assert(__m == memory_order_release); __glibcxx_assert(__m != memory_order_release);
__glibcxx_assert(__m == memory_order_acq_rel); __glibcxx_assert(__m != memory_order_acq_rel);
__sync_synchronize(); __sync_synchronize();
void* __ret = _M_i; void* __ret = _M_i;
...@@ -144,8 +148,8 @@ namespace __atomic2 ...@@ -144,8 +148,8 @@ namespace __atomic2
compare_exchange_strong(void*& __v1, void* __v2, memory_order __m1, compare_exchange_strong(void*& __v1, void* __v2, memory_order __m1,
memory_order __m2) volatile memory_order __m2) volatile
{ {
__glibcxx_assert(__m2 == memory_order_release); __glibcxx_assert(__m2 != memory_order_release);
__glibcxx_assert(__m2 == memory_order_acq_rel); __glibcxx_assert(__m2 != memory_order_acq_rel);
__glibcxx_assert(__m2 <= __m1); __glibcxx_assert(__m2 <= __m1);
void* __v1o = __v1; void* __v1o = __v1;
...@@ -284,9 +288,9 @@ namespace __atomic2 ...@@ -284,9 +288,9 @@ namespace __atomic2
store(__integral_type __i, store(__integral_type __i,
memory_order __m = memory_order_seq_cst) volatile memory_order __m = memory_order_seq_cst) volatile
{ {
__glibcxx_assert(__m == memory_order_acquire); __glibcxx_assert(__m != memory_order_acquire);
__glibcxx_assert(__m == memory_order_acq_rel); __glibcxx_assert(__m != memory_order_acq_rel);
__glibcxx_assert(__m == memory_order_consume); __glibcxx_assert(__m != memory_order_consume);
if (__m == memory_order_relaxed) if (__m == memory_order_relaxed)
_M_i = __i; _M_i = __i;
...@@ -302,8 +306,8 @@ namespace __atomic2 ...@@ -302,8 +306,8 @@ namespace __atomic2
__integral_type __integral_type
load(memory_order __m = memory_order_seq_cst) const volatile load(memory_order __m = memory_order_seq_cst) const volatile
{ {
__glibcxx_assert(__m == memory_order_release); __glibcxx_assert(__m != memory_order_release);
__glibcxx_assert(__m == memory_order_acq_rel); __glibcxx_assert(__m != memory_order_acq_rel);
__sync_synchronize(); __sync_synchronize();
__integral_type __ret = _M_i; __integral_type __ret = _M_i;
...@@ -336,8 +340,8 @@ namespace __atomic2 ...@@ -336,8 +340,8 @@ namespace __atomic2
compare_exchange_strong(__integral_type& __i1, __integral_type __i2, compare_exchange_strong(__integral_type& __i1, __integral_type __i2,
memory_order __m1, memory_order __m2) volatile memory_order __m1, memory_order __m2) volatile
{ {
__glibcxx_assert(__m2 == memory_order_release); __glibcxx_assert(__m2 != memory_order_release);
__glibcxx_assert(__m2 == memory_order_acq_rel); __glibcxx_assert(__m2 != memory_order_acq_rel);
__glibcxx_assert(__m2 <= __m1); __glibcxx_assert(__m2 <= __m1);
__integral_type __i1o = __i1; __integral_type __i1o = __i1;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment