Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
R
riscv-gcc-1
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
lvzhengyang
riscv-gcc-1
Commits
ef6c2c53
Commit
ef6c2c53
authored
Mar 07, 2012
by
Walter Lee
Committed by
Walter Lee
Mar 07, 2012
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Rename some internal atomic macros to have a less generic prefix.
From-SVN: r185074
parent
ca538e97
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
85 additions
and
71 deletions
+85
-71
libgcc/ChangeLog
+12
-0
libgcc/config/tilepro/atomic.c
+6
-12
libgcc/config/tilepro/atomic.h
+67
-59
No files found.
libgcc/ChangeLog
View file @
ef6c2c53
2012-03-07 Walter Lee <walt@tilera.com>
* config/tilepro/atomic.c: Rename "atomic_" prefix to
"arch_atomic_".
(atomic_xor): Rename and move definition to
config/tilepro/atomic.h.
(atomic_nand): Ditto.
* config/tilepro/atomic.h: Rename "atomic_" prefix to
"arch_atomic_".
(arch_atomic_xor): Move from config/tilepro/atomic.c.
(arch_atomic_nand): Ditto.
2012-03-07 Georg-Johann Lay <avr@gjlay.de>
2012-03-07 Georg-Johann Lay <avr@gjlay.de>
PR target/52507
PR target/52507
...
...
libgcc/config/tilepro/atomic.c
View file @
ef6c2c53
...
@@ -63,18 +63,12 @@ post_atomic_barrier (int model)
...
@@ -63,18 +63,12 @@ post_atomic_barrier (int model)
#define __unused __attribute__((unused))
#define __unused __attribute__((unused))
/* Provide additional methods not implemented by atomic.h. */
#define atomic_xor(mem, mask) \
__atomic_update_cmpxchg(mem, mask, __old ^ __value)
#define atomic_nand(mem, mask) \
__atomic_update_cmpxchg(mem, mask, ~(__old & __value))
#define __atomic_fetch_and_do(type, size, opname) \
#define __atomic_fetch_and_do(type, size, opname) \
type \
type \
__atomic_fetch_##opname##_##size(type* p, type i, int model) \
__atomic_fetch_##opname##_##size(type* p, type i, int model) \
{ \
{ \
pre_atomic_barrier(model); \
pre_atomic_barrier(model); \
type rv = atomic_##opname(p, i); \
type rv = a
rch_a
tomic_##opname(p, i); \
post_atomic_barrier(model); \
post_atomic_barrier(model); \
return rv; \
return rv; \
}
}
...
@@ -96,7 +90,7 @@ type \
...
@@ -96,7 +90,7 @@ type \
__atomic_##opname##_fetch_##size(type* p, type i, int model) \
__atomic_##opname##_fetch_##size(type* p, type i, int model) \
{ \
{ \
pre_atomic_barrier(model); \
pre_atomic_barrier(model); \
type rv = a
tomic_##opname(p, i) op i;
\
type rv = a
rch_atomic_##opname(p, i) op i;
\
post_atomic_barrier(model); \
post_atomic_barrier(model); \
return rv; \
return rv; \
}
}
...
@@ -120,7 +114,7 @@ __atomic_compare_exchange_##size(volatile type* ptr, type* oldvalp, \
...
@@ -120,7 +114,7 @@ __atomic_compare_exchange_##size(volatile type* ptr, type* oldvalp, \
{ \
{ \
type oldval = *oldvalp; \
type oldval = *oldvalp; \
pre_atomic_barrier(models); \
pre_atomic_barrier(models); \
type retval = a
tomic_val_compare_and_exchange(ptr, oldval, newval);
\
type retval = a
rch_atomic_val_compare_and_exchange(ptr, oldval, newval);
\
post_atomic_barrier(models); \
post_atomic_barrier(models); \
bool success = (retval == oldval); \
bool success = (retval == oldval); \
*oldvalp = retval; \
*oldvalp = retval; \
...
@@ -131,7 +125,7 @@ type \
...
@@ -131,7 +125,7 @@ type \
__atomic_exchange_##size(volatile type* ptr, type val, int model) \
__atomic_exchange_##size(volatile type* ptr, type val, int model) \
{ \
{ \
pre_atomic_barrier(model); \
pre_atomic_barrier(model); \
type retval = atomic_exchange(ptr, val); \
type retval = a
rch_a
tomic_exchange(ptr, val); \
post_atomic_barrier(model); \
post_atomic_barrier(model); \
return retval; \
return retval; \
}
}
...
@@ -159,7 +153,7 @@ __atomic_compare_exchange_##size(volatile type* ptr, type* guess, \
...
@@ -159,7 +153,7 @@ __atomic_compare_exchange_##size(volatile type* ptr, type* guess, \
type oldval = (oldword >> shift) & valmask; \
type oldval = (oldword >> shift) & valmask; \
if (__builtin_expect((oldval == *guess), 1)) { \
if (__builtin_expect((oldval == *guess), 1)) { \
unsigned int word = (oldword & bgmask) | ((val & valmask) << shift); \
unsigned int word = (oldword & bgmask) | ((val & valmask) << shift); \
oldword = atomic_val_compare_and_exchange(p, oldword, word); \
oldword = a
rch_a
tomic_val_compare_and_exchange(p, oldword, word); \
oldval = (oldword >> shift) & valmask; \
oldval = (oldword >> shift) & valmask; \
} \
} \
post_atomic_barrier(models); \
post_atomic_barrier(models); \
...
@@ -187,7 +181,7 @@ proto \
...
@@ -187,7 +181,7 @@ proto \
oldval = (oldword >> shift) & valmask; \
oldval = (oldword >> shift) & valmask; \
val = expr; \
val = expr; \
unsigned int word = (oldword & bgmask) | ((val & valmask) << shift); \
unsigned int word = (oldword & bgmask) | ((val & valmask) << shift); \
xword = a
tomic_val_compare_and_exchange(p, oldword, word);
\
xword = a
rch_atomic_val_compare_and_exchange(p, oldword, word);
\
} while (__builtin_expect(xword != oldword, 0)); \
} while (__builtin_expect(xword != oldword, 0)); \
bottom \
bottom \
}
}
...
...
libgcc/config/tilepro/atomic.h
View file @
ef6c2c53
...
@@ -104,8 +104,8 @@
...
@@ -104,8 +104,8 @@
/* 32-bit integer compare-and-exchange. */
/* 32-bit integer compare-and-exchange. */
static
__inline
__attribute__
((
always_inline
))
static
__inline
__attribute__
((
always_inline
))
int
atomic_val_compare_and_exchange_4
(
volatile
int
*
mem
,
int
a
rch_a
tomic_val_compare_and_exchange_4
(
volatile
int
*
mem
,
int
oldval
,
int
newval
)
int
oldval
,
int
newval
)
{
{
#ifdef __tilegx__
#ifdef __tilegx__
__insn_mtspr
(
SPR_CMPEXCH_VALUE
,
oldval
);
__insn_mtspr
(
SPR_CMPEXCH_VALUE
,
oldval
);
...
@@ -123,9 +123,9 @@ static __inline __attribute__ ((always_inline))
...
@@ -123,9 +123,9 @@ static __inline __attribute__ ((always_inline))
/* 64-bit integer compare-and-exchange. */
/* 64-bit integer compare-and-exchange. */
static
__inline
__attribute__
((
always_inline
))
static
__inline
__attribute__
((
always_inline
))
int64_t
atomic_val_compare_and_exchange_8
(
volatile
int64_t
*
mem
,
int64_t
a
rch_a
tomic_val_compare_and_exchange_8
(
volatile
int64_t
*
mem
,
int64_t
oldval
,
int64_t
oldval
,
int64_t
newval
)
int64_t
newval
)
{
{
#ifdef __tilegx__
#ifdef __tilegx__
__insn_mtspr
(
SPR_CMPEXCH_VALUE
,
oldval
);
__insn_mtspr
(
SPR_CMPEXCH_VALUE
,
oldval
);
...
@@ -146,41 +146,41 @@ static __inline __attribute__ ((always_inline))
...
@@ -146,41 +146,41 @@ static __inline __attribute__ ((always_inline))
/* This non-existent symbol is called for sizes other than "4" and "8",
/* This non-existent symbol is called for sizes other than "4" and "8",
indicating a bug in the caller. */
indicating a bug in the caller. */
extern
int
__atomic_error_bad_argument_size
(
void
)
extern
int
__a
rch_a
tomic_error_bad_argument_size
(
void
)
__attribute__
((
warning
(
"sizeof atomic argument not 4 or 8"
)));
__attribute__
((
warning
(
"sizeof atomic argument not 4 or 8"
)));
#define a
tomic_val_compare_and_exchange(mem, o, n)
\
#define a
rch_atomic_val_compare_and_exchange(mem, o, n)
\
({ \
({ \
(__typeof(*(mem)))(__typeof(*(mem)-*(mem))) \
(__typeof(*(mem)))(__typeof(*(mem)-*(mem))) \
((sizeof(*(mem)) == 8) ? \
((sizeof(*(mem)) == 8) ? \
a
tomic_val_compare_and_exchange_8(
\
a
rch_atomic_val_compare_and_exchange_8(
\
(volatile int64_t*)(mem), (__typeof((o)-(o)))(o), \
(volatile int64_t*)(mem), (__typeof((o)-(o)))(o), \
(__typeof((n)-(n)))(n)) : \
(__typeof((n)-(n)))(n)) : \
(sizeof(*(mem)) == 4) ? \
(sizeof(*(mem)) == 4) ? \
a
tomic_val_compare_and_exchange_4(
\
a
rch_atomic_val_compare_and_exchange_4(
\
(volatile int*)(mem), (__typeof((o)-(o)))(o), \
(volatile int*)(mem), (__typeof((o)-(o)))(o), \
(__typeof((n)-(n)))(n)) : \
(__typeof((n)-(n)))(n)) : \
__a
tomic_error_bad_argument_size());
\
__a
rch_atomic_error_bad_argument_size());
\
})
})
#define a
tomic_bool_compare_and_exchange(mem, o, n)
\
#define a
rch_atomic_bool_compare_and_exchange(mem, o, n)
\
({ \
({ \
__typeof(o) __o = (o); \
__typeof(o) __o = (o); \
__builtin_expect( \
__builtin_expect( \
__o == a
tomic_val_compare_and_exchange((mem), __o, (n)), 1);
\
__o == a
rch_atomic_val_compare_and_exchange((mem), __o, (n)), 1);
\
})
})
/* Loop with compare_and_exchange until we guess the correct value.
/* Loop with compare_and_exchange until we guess the correct value.
Normally "expr" will be an expression using __old and __value. */
Normally "expr" will be an expression using __old and __value. */
#define __a
tomic_update_cmpxchg(mem, value, expr)
\
#define __a
rch_atomic_update_cmpxchg(mem, value, expr)
\
({ \
({ \
__typeof(value) __value = (value); \
__typeof(value) __value = (value); \
__typeof(*(mem)) *__mem = (mem), __old = *__mem, __guess; \
__typeof(*(mem)) *__mem = (mem), __old = *__mem, __guess; \
do { \
do { \
__guess = __old; \
__guess = __old; \
__old = atomic_val_compare_and_exchange(__mem, __old, (expr)); \
__old = a
rch_a
tomic_val_compare_and_exchange(__mem, __old, (expr)); \
} while (__builtin_expect(__old != __guess, 0)); \
} while (__builtin_expect(__old != __guess, 0)); \
__old; \
__old; \
})
})
...
@@ -189,14 +189,14 @@ extern int __atomic_error_bad_argument_size (void)
...
@@ -189,14 +189,14 @@ extern int __atomic_error_bad_argument_size (void)
/* Generic atomic op with 8- or 4-byte variant.
/* Generic atomic op with 8- or 4-byte variant.
The _mask, _addend, and _expr arguments are ignored on tilegx. */
The _mask, _addend, and _expr arguments are ignored on tilegx. */
#define __a
tomic_update(mem, value, op, _mask, _addend, _expr)
\
#define __a
rch_atomic_update(mem, value, op, _mask, _addend, _expr)
\
({ \
({ \
((__typeof(*(mem))) \
((__typeof(*(mem))) \
((sizeof(*(mem)) == 8) ? (__typeof(*(mem)-*(mem)))__insn_##op( \
((sizeof(*(mem)) == 8) ? (__typeof(*(mem)-*(mem)))__insn_##op( \
(void *)(mem), (int64_t)(__typeof((value)-(value)))(value)) : \
(void *)(mem), (int64_t)(__typeof((value)-(value)))(value)) : \
(sizeof(*(mem)) == 4) ? (int)__insn_##op##4( \
(sizeof(*(mem)) == 4) ? (int)__insn_##op##4( \
(void *)(mem), (int32_t)(__typeof((value)-(value)))(value)) : \
(void *)(mem), (int32_t)(__typeof((value)-(value)))(value)) : \
__a
tomic_error_bad_argument_size()));
\
__a
rch_atomic_error_bad_argument_size()));
\
})
})
#else
#else
...
@@ -211,7 +211,7 @@ extern int __atomic_error_bad_argument_size (void)
...
@@ -211,7 +211,7 @@ extern int __atomic_error_bad_argument_size (void)
Only 32-bit support is provided. */
Only 32-bit support is provided. */
static
__inline
__attribute__
((
always_inline
))
static
__inline
__attribute__
((
always_inline
))
int
int
__atomic_update_4
(
volatile
int
*
mem
,
int
mask
,
int
addend
)
__a
rch_a
tomic_update_4
(
volatile
int
*
mem
,
int
mask
,
int
addend
)
{
{
int
result
;
int
result
;
__asm__
__volatile__
(
"swint1"
:
"=R00"
(
result
),
__asm__
__volatile__
(
"swint1"
:
"=R00"
(
result
),
...
@@ -224,48 +224,55 @@ static __inline __attribute__ ((always_inline))
...
@@ -224,48 +224,55 @@ static __inline __attribute__ ((always_inline))
/* Generic atomic op with 8- or 4-byte variant.
/* Generic atomic op with 8- or 4-byte variant.
The _op argument is ignored on tilepro. */
The _op argument is ignored on tilepro. */
#define __a
tomic_update(mem, value, _op, mask, addend, expr)
\
#define __a
rch_atomic_update(mem, value, _op, mask, addend, expr)
\
({ \
({ \
(__typeof(*(mem)))(__typeof(*(mem)-*(mem))) \
(__typeof(*(mem)))(__typeof(*(mem)-*(mem))) \
((sizeof(*(mem)) == 8) ? \
((sizeof(*(mem)) == 8) ? \
__a
tomic_update_cmpxchg((mem), (value), (expr)) :
\
__a
rch_atomic_update_cmpxchg((mem), (value), (expr)) :
\
(sizeof(*(mem)) == 4) ? \
(sizeof(*(mem)) == 4) ? \
__atomic_update_4((volatile int*)(mem), (__typeof((mask)-(mask)))(mask), \
__arch_atomic_update_4((volatile int*)(mem), \
(__typeof((addend)-(addend)))(addend)) : \
(__typeof((mask)-(mask)))(mask), \
__atomic_error_bad_argument_size()); \
(__typeof((addend)-(addend)))(addend)) : \
__arch_atomic_error_bad_argument_size()); \
})
})
#endif
/* __tilegx__ */
#endif
/* __tilegx__ */
#define atomic_exchange(mem, newvalue) \
#define a
rch_a
tomic_exchange(mem, newvalue) \
__atomic_update(mem, newvalue, exch, 0, newvalue, __value)
__a
rch_a
tomic_update(mem, newvalue, exch, 0, newvalue, __value)
#define atomic_add(mem, value) \
#define a
rch_a
tomic_add(mem, value) \
__atomic_update(mem, value, fetchadd, -1, value, __old + __value)
__a
rch_a
tomic_update(mem, value, fetchadd, -1, value, __old + __value)
#define a
tomic_sub(mem, value)
atomic_add((mem), -(value))
#define a
rch_atomic_sub(mem, value) arch_
atomic_add((mem), -(value))
#define a
tomic_increment(mem)
atomic_add((mem), 1)
#define a
rch_atomic_increment(mem) arch_
atomic_add((mem), 1)
#define a
tomic_decrement(mem)
atomic_add((mem), -1)
#define a
rch_atomic_decrement(mem) arch_
atomic_add((mem), -1)
#define atomic_and(mem, mask) \
#define a
rch_a
tomic_and(mem, mask) \
__atomic_update(mem, mask, fetchand, mask, 0, __old & __value)
__a
rch_a
tomic_update(mem, mask, fetchand, mask, 0, __old & __value)
#define atomic_or(mem, mask) \
#define a
rch_a
tomic_or(mem, mask) \
__atomic_update(mem, mask, fetchor, ~mask, mask, __old | __value)
__a
rch_a
tomic_update(mem, mask, fetchor, ~mask, mask, __old | __value)
#define atomic_bit_set(mem, bit) \
#define arch_atomic_xor(mem, mask) \
__arch_atomic_update_cmpxchg(mem, mask, __old ^ __value)
#define arch_atomic_nand(mem, mask) \
__arch_atomic_update_cmpxchg(mem, mask, ~(__old & __value))
#define arch_atomic_bit_set(mem, bit) \
({ \
({ \
__typeof(*(mem)) __mask = (__typeof(*(mem)))1 << (bit); \
__typeof(*(mem)) __mask = (__typeof(*(mem)))1 << (bit); \
__mask & a
tomic_or((mem), __mask);
\
__mask & a
rch_atomic_or((mem), __mask);
\
})
})
#define a
tomic_bit_clear(mem, bit)
\
#define a
rch_atomic_bit_clear(mem, bit)
\
({ \
({ \
__typeof(*(mem)) __mask = (__typeof(*(mem)))1 << (bit); \
__typeof(*(mem)) __mask = (__typeof(*(mem)))1 << (bit); \
__mask & a
tomic_and((mem), ~__mask);
\
__mask & a
rch_atomic_and((mem), ~__mask);
\
})
})
#ifdef __tilegx__
#ifdef __tilegx__
...
@@ -275,9 +282,9 @@ static __inline __attribute__ ((always_inline))
...
@@ -275,9 +282,9 @@ static __inline __attribute__ ((always_inline))
This accessor is provided for compatibility with TILEPro, which
This accessor is provided for compatibility with TILEPro, which
required an explicit atomic operation for stores that needed
required an explicit atomic operation for stores that needed
to be atomic with respect to other atomic methods in this header. */
to be atomic with respect to other atomic methods in this header. */
#define atomic_write(mem, value) ((void) (*(mem) = (value)))
#define a
rch_a
tomic_write(mem, value) ((void) (*(mem) = (value)))
#else
#else
#define a
tomic_write(mem, value)
\
#define a
rch_atomic_write(mem, value)
\
do { \
do { \
__typeof(mem) __aw_mem = (mem); \
__typeof(mem) __aw_mem = (mem); \
__typeof(value) __aw_val = (value); \
__typeof(value) __aw_val = (value); \
...
@@ -285,26 +292,26 @@ static __inline __attribute__ ((always_inline))
...
@@ -285,26 +292,26 @@ static __inline __attribute__ ((always_inline))
__aw_intval = (__typeof((value) - (value)))__aw_val; \
__aw_intval = (__typeof((value) - (value)))__aw_val; \
switch (sizeof(*__aw_mem)) { \
switch (sizeof(*__aw_mem)) { \
case 8: \
case 8: \
__a
tomic_update_cmpxchg(__aw_mem, __aw_val, __value);
\
__a
rch_atomic_update_cmpxchg(__aw_mem, __aw_val, __value);
\
break; \
break; \
case 4: \
case 4: \
__a
tomic_update_4((int *)__aw_mem, 0, __aw_intval);
\
__a
rch_atomic_update_4((int *)__aw_mem, 0, __aw_intval);
\
break; \
break; \
case 2: \
case 2: \
__aw_off = 8 * ((long)__aw_mem & 0x2); \
__aw_off = 8 * ((long)__aw_mem & 0x2); \
__aw_mask = 0xffffU << __aw_off; \
__aw_mask = 0xffffU << __aw_off; \
__aw_mem32 = (unsigned int *)((long)__aw_mem & ~0x2); \
__aw_mem32 = (unsigned int *)((long)__aw_mem & ~0x2); \
__aw_val32 = (__aw_intval << __aw_off) & __aw_mask; \
__aw_val32 = (__aw_intval << __aw_off) & __aw_mask; \
__a
tomic_update_cmpxchg(__aw_mem32, __aw_val32,
\
__a
rch_atomic_update_cmpxchg(__aw_mem32, __aw_val32,
\
(__old & ~__aw_mask) | __value);
\
(__old & ~__aw_mask) | __value);
\
break; \
break; \
case 1: \
case 1: \
__aw_off = 8 * ((long)__aw_mem & 0x3); \
__aw_off = 8 * ((long)__aw_mem & 0x3); \
__aw_mask = 0xffU << __aw_off; \
__aw_mask = 0xffU << __aw_off; \
__aw_mem32 = (unsigned int *)((long)__aw_mem & ~0x3); \
__aw_mem32 = (unsigned int *)((long)__aw_mem & ~0x3); \
__aw_val32 = (__aw_intval << __aw_off) & __aw_mask; \
__aw_val32 = (__aw_intval << __aw_off) & __aw_mask; \
__a
tomic_update_cmpxchg(__aw_mem32, __aw_val32,
\
__a
rch_atomic_update_cmpxchg(__aw_mem32, __aw_val32,
\
(__old & ~__aw_mask) | __value);
\
(__old & ~__aw_mask) | __value);
\
break; \
break; \
} \
} \
} while (0)
} while (0)
...
@@ -315,15 +322,15 @@ static __inline __attribute__ ((always_inline))
...
@@ -315,15 +322,15 @@ static __inline __attribute__ ((always_inline))
This macro prevents loads or stores from being moved by the compiler
This macro prevents loads or stores from being moved by the compiler
across the macro. Any loaded value that was loaded before this
across the macro. Any loaded value that was loaded before this
macro must then be reloaded by the compiler. */
macro must then be reloaded by the compiler. */
#define atomic_compiler_barrier() __asm__ __volatile__("" ::: "memory")
#define a
rch_a
tomic_compiler_barrier() __asm__ __volatile__("" ::: "memory")
/* Full memory barrier.
/* Full memory barrier.
This macro has the semantics of atomic_compiler_barrer(), but also
This macro has the semantics of a
rch_a
tomic_compiler_barrer(), but also
ensures that previous stores are visible to other cores, and that
ensures that previous stores are visible to other cores, and that
all previous loaded values have been placed into their target
all previous loaded values have been placed into their target
register on this core. */
register on this core. */
#define atomic_full_barrier() __insn_mf()
#define a
rch_a
tomic_full_barrier() __insn_mf()
/* Read memory barrier.
/* Read memory barrier.
...
@@ -335,9 +342,9 @@ static __inline __attribute__ ((always_inline))
...
@@ -335,9 +342,9 @@ static __inline __attribute__ ((always_inline))
On current TILE chips a read barrier is implemented as a full barrier,
On current TILE chips a read barrier is implemented as a full barrier,
but this may not be true in later versions of the architecture.
but this may not be true in later versions of the architecture.
See also atomic_acquire_barrier() for the appropriate idiom to use
See also a
rch_a
tomic_acquire_barrier() for the appropriate idiom to use
to ensure no reads are lifted above an atomic lock instruction. */
to ensure no reads are lifted above an atomic lock instruction. */
#define a
tomic_read_barrier()
atomic_full_barrier()
#define a
rch_atomic_read_barrier() arch_
atomic_full_barrier()
/* Write memory barrier.
/* Write memory barrier.
...
@@ -349,9 +356,9 @@ static __inline __attribute__ ((always_inline))
...
@@ -349,9 +356,9 @@ static __inline __attribute__ ((always_inline))
On current TILE chips a write barrier is implemented as a full barrier,
On current TILE chips a write barrier is implemented as a full barrier,
but this may not be true in later versions of the architecture.
but this may not be true in later versions of the architecture.
See also atomic_release_barrier() for the appropriate idiom to use
See also a
rch_a
tomic_release_barrier() for the appropriate idiom to use
to ensure all writes are complete prior to an atomic unlock instruction. */
to ensure all writes are complete prior to an atomic unlock instruction. */
#define a
tomic_write_barrier()
atomic_full_barrier()
#define a
rch_atomic_write_barrier() arch_
atomic_full_barrier()
/* Lock acquisition barrier.
/* Lock acquisition barrier.
...
@@ -367,10 +374,10 @@ static __inline __attribute__ ((always_inline))
...
@@ -367,10 +374,10 @@ static __inline __attribute__ ((always_inline))
This should be done after the atomic operation that actually
This should be done after the atomic operation that actually
acquires the lock, and in conjunction with a "control dependency"
acquires the lock, and in conjunction with a "control dependency"
that checks the atomic operation result to see if the lock was
that checks the atomic operation result to see if the lock was
in fact acquired. See the atomic_read_barrier() macro
in fact acquired. See the a
rch_a
tomic_read_barrier() macro
for a heavier-weight barrier to use in certain unusual constructs,
for a heavier-weight barrier to use in certain unusual constructs,
or atomic_acquire_barrier_value() if no control dependency exists. */
or a
rch_a
tomic_acquire_barrier_value() if no control dependency exists. */
#define a
tomic_acquire_barrier()
atomic_compiler_barrier()
#define a
rch_atomic_acquire_barrier() arch_
atomic_compiler_barrier()
/* Lock release barrier.
/* Lock release barrier.
...
@@ -383,7 +390,7 @@ static __inline __attribute__ ((always_inline))
...
@@ -383,7 +390,7 @@ static __inline __attribute__ ((always_inline))
for locking, that is, when leaving a critical section. This should
for locking, that is, when leaving a critical section. This should
be done before the operation (such as a store of zero) that
be done before the operation (such as a store of zero) that
actually releases the lock. */
actually releases the lock. */
#define a
tomic_release_barrier()
atomic_write_barrier()
#define a
rch_atomic_release_barrier() arch_
atomic_write_barrier()
/* Barrier until the read of a particular value is complete.
/* Barrier until the read of a particular value is complete.
...
@@ -400,7 +407,7 @@ static __inline __attribute__ ((always_inline))
...
@@ -400,7 +407,7 @@ static __inline __attribute__ ((always_inline))
atomic instruction, even if the value itself is not checked. This
atomic instruction, even if the value itself is not checked. This
guarantees that if the atomic instruction succeeded in taking the lock,
guarantees that if the atomic instruction succeeded in taking the lock,
the lock was held before any reads in the critical section issued. */
the lock was held before any reads in the critical section issued. */
#define atomic_acquire_barrier_value(val) \
#define a
rch_a
tomic_acquire_barrier_value(val) \
__asm__ __volatile__("move %0, %0" :: "r"(val))
__asm__ __volatile__("move %0, %0" :: "r"(val))
/* Access the given variable in memory exactly once.
/* Access the given variable in memory exactly once.
...
@@ -421,8 +428,9 @@ static __inline __attribute__ ((always_inline))
...
@@ -421,8 +428,9 @@ static __inline __attribute__ ((always_inline))
Note that multiple uses of this macro are guaranteed to be ordered,
Note that multiple uses of this macro are guaranteed to be ordered,
i.e. the compiler will not reorder stores or loads that are wrapped
i.e. the compiler will not reorder stores or loads that are wrapped
in atomic_access_once(). */
in arch_atomic_access_once(). */
#define atomic_access_once(x) (*(volatile __typeof(x) *)&(x))
#define arch_atomic_access_once(x) (*(volatile __typeof(x) *)&(x))
#endif
/* !_ATOMIC_H_ */
#endif
/* !_ATOMIC_H_ */
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment