Commit e67e39c2 by H.J. Lu Committed by Dodji Seketeli

Revert accidental svn commit r174473

From-SVN: r174480
parent cea8c6de
2011-05-31 H.J. Lu <hongjiu.lu@intel.com>
PR libgcj/49193
* configure.host (sysdeps_dir): Set to i386 for x86_64.
* sysdep/i386/locks.h (compare_and_swap): Call
__sync_bool_compare_and_swap.
(release_set): Call write_barrier ().
* sysdep/x86-64/locks.h: Removed.
2011-04-24 Gerald Pfeifer <gerald@pfeifer.com>
* README: Refer to our generic bug reporting page.
......
......@@ -132,7 +132,7 @@ case "${host}" in
slow_pthread_self=yes
;;
x86_64-*)
sysdeps_dir=x86-64
sysdeps_dir=i386
# For 64-bit we always use SSE registers for arithmetic,
# which doesn't have the extra precision problems of the fpu.
# But be careful about 32-bit multilibs.
......@@ -279,7 +279,7 @@ EOF
slow_pthread_self=
;;
i[34567]86-*-solaris2.1[0-9]* )
sysdeps_dir=x86-64
sysdeps_dir=i386
DIVIDESPEC=-f%{m32:no-}%{!m32:%{!m64:no-}}%{m64:}use-divide-subroutine
;;
mips-sgi-irix6* )
......
/* locks.h - Thread synchronization primitives. X86/x86-64 implementation.
Copyright (C) 2002 Free Software Foundation
Copyright (C) 2002, 2011 Free Software Foundation
This file is part of libgcj.
......@@ -23,19 +23,25 @@ compare_and_swap(volatile obj_addr_t *addr,
obj_addr_t old,
obj_addr_t new_val)
{
char result;
#ifdef __x86_64__
__asm__ __volatile__("lock; cmpxchgq %2, %0; setz %1"
: "=m"(*(addr)), "=q"(result)
: "r" (new_val), "a"(old), "m"(*addr)
: "memory");
#else
__asm__ __volatile__("lock; cmpxchgl %2, %0; setz %1"
: "=m"(*addr), "=q"(result)
: "r" (new_val), "a"(old), "m"(*addr)
: "memory");
#endif
return (bool) result;
return __sync_bool_compare_and_swap (addr, old, new_val);
}
// Ensure that subsequent instructions do not execute on stale
// data that was loaded from memory before the barrier.
// On X86/x86-64, the hardware ensures that reads are properly ordered.
inline static void
read_barrier()
{
}
// Ensure that prior stores to memory are completed with respect to other
// processors.
inline static void
write_barrier()
{
/* x86-64/X86 does not reorder writes. We just need to ensure that
gcc also doesn't. */
__asm__ __volatile__(" " : : : "memory");
}
// Set *addr to new_val with release semantics, i.e. making sure
......@@ -46,7 +52,7 @@ compare_and_swap(volatile obj_addr_t *addr,
inline static void
release_set(volatile obj_addr_t *addr, obj_addr_t new_val)
{
__asm__ __volatile__(" " : : : "memory");
write_barrier ();
*(addr) = new_val;
}
......@@ -60,22 +66,4 @@ compare_and_swap_release(volatile obj_addr_t *addr,
{
return compare_and_swap(addr, old, new_val);
}
// Ensure that subsequent instructions do not execute on stale
// data that was loaded from memory before the barrier.
// On X86/x86-64, the hardware ensures that reads are properly ordered.
inline static void
read_barrier()
{
}
// Ensure that prior stores to memory are completed with respect to other
// processors.
inline static void
write_barrier()
{
/* x86-64/X86 does not reorder writes. We just need to ensure that
gcc also doesn't. */
__asm__ __volatile__(" " : : : "memory");
}
#endif
/* locks.h - Thread synchronization primitives. X86/x86-64 implementation.
Copyright (C) 2002 Free Software Foundation
Contributed by Bo Thorsen <bo@suse.de>.
This file is part of libgcj.
This software is copyrighted work licensed under the terms of the
Libgcj License. Please consult the file "LIBGCJ_LICENSE" for
details. */
#ifndef __SYSDEP_LOCKS_H__
#define __SYSDEP_LOCKS_H__
typedef size_t obj_addr_t; /* Integer type big enough for object */
/* address. */
// Atomically replace *addr by new_val if it was initially equal to old.
// Return true if the comparison succeeded.
// Assumed to have acquire semantics, i.e. later memory operations
// cannot execute before the compare_and_swap finishes.
inline static bool
compare_and_swap(volatile obj_addr_t *addr,
obj_addr_t old,
obj_addr_t new_val)
{
char result;
#ifdef __x86_64__
__asm__ __volatile__("lock; cmpxchgq %2, %0; setz %1"
: "=m"(*(addr)), "=q"(result)
: "r" (new_val), "a"(old), "m"(*addr)
: "memory");
#else
__asm__ __volatile__("lock; cmpxchgl %2, %0; setz %1"
: "=m"(*addr), "=q"(result)
: "r" (new_val), "a"(old), "m"(*addr)
: "memory");
#endif
return (bool) result;
}
// Set *addr to new_val with release semantics, i.e. making sure
// that prior loads and stores complete before this
// assignment.
// On X86/x86-64, the hardware shouldn't reorder reads and writes,
// so we just have to convince gcc not to do it either.
inline static void
release_set(volatile obj_addr_t *addr, obj_addr_t new_val)
{
__asm__ __volatile__(" " : : : "memory");
*(addr) = new_val;
}
// Compare_and_swap with release semantics instead of acquire semantics.
// On many architecture, the operation makes both guarantees, so the
// implementation can be the same.
inline static bool
compare_and_swap_release(volatile obj_addr_t *addr,
obj_addr_t old,
obj_addr_t new_val)
{
return compare_and_swap(addr, old, new_val);
}
// Ensure that subsequent instructions do not execute on stale
// data that was loaded from memory before the barrier.
// On X86/x86-64, the hardware ensures that reads are properly ordered.
inline static void
read_barrier()
{
}
// Ensure that prior stores to memory are completed with respect to other
// processors.
inline static void
write_barrier()
{
/* x86-64/X86 does not reorder writes. We just need to ensure that
gcc also doesn't. */
__asm__ __volatile__(" " : : : "memory");
}
#endif
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment