Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
R
riscv-gcc-1
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
lvzhengyang
riscv-gcc-1
Commits
e67e39c2
Commit
e67e39c2
authored
May 31, 2011
by
H.J. Lu
Committed by
Dodji Seketeli
May 31, 2011
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Revert accidental svn commit r174473
From-SVN: r174480
parent
cea8c6de
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
34 additions
and
118 deletions
+34
-118
libjava/ChangeLog
+11
-0
libjava/configure.host
+2
-2
libjava/sysdep/i386/locks.h
+21
-33
libjava/sysdep/x86-64/locks.h
+0
-83
No files found.
libjava/ChangeLog
View file @
e67e39c2
2011-05-31 H.J. Lu <hongjiu.lu@intel.com>
PR libgcj/49193
* configure.host (sysdeps_dir): Set to i386 for x86_64.
* sysdep/i386/locks.h (compare_and_swap): Call
__sync_bool_compare_and_swap.
(release_set): Call write_barrier ().
* sysdep/x86-64/locks.h: Removed.
2011-04-24 Gerald Pfeifer <gerald@pfeifer.com>
* README: Refer to our generic bug reporting page.
...
...
libjava/configure.host
View file @
e67e39c2
...
...
@@ -132,7 +132,7 @@ case "${host}" in
slow_pthread_self=yes
;;
x86_64-*)
sysdeps_dir=
x86-64
sysdeps_dir=
i386
# For 64-bit we always use SSE registers for arithmetic,
# which doesn't have the extra precision problems of the fpu.
# But be careful about 32-bit multilibs.
...
...
@@ -279,7 +279,7 @@ EOF
slow_pthread_self=
;;
i[34567]86-*-solaris2.1[0-9]* )
sysdeps_dir=
x86-64
sysdeps_dir=
i386
DIVIDESPEC=-f%{m32:no-}%{!m32:%{!m64:no-}}%{m64:}use-divide-subroutine
;;
mips-sgi-irix6* )
...
...
libjava/sysdep/i386/locks.h
View file @
e67e39c2
/* locks.h - Thread synchronization primitives. X86/x86-64 implementation.
Copyright (C) 2002 Free Software Foundation
Copyright (C) 2002
, 2011
Free Software Foundation
This file is part of libgcj.
...
...
@@ -23,19 +23,25 @@ compare_and_swap(volatile obj_addr_t *addr,
obj_addr_t
old
,
obj_addr_t
new_val
)
{
char
result
;
#ifdef __x86_64__
__asm__
__volatile__
(
"lock; cmpxchgq %2, %0; setz %1"
:
"=m"
(
*
(
addr
)),
"=q"
(
result
)
:
"r"
(
new_val
),
"a"
(
old
),
"m"
(
*
addr
)
:
"memory"
);
#else
__asm__
__volatile__
(
"lock; cmpxchgl %2, %0; setz %1"
:
"=m"
(
*
addr
),
"=q"
(
result
)
:
"r"
(
new_val
),
"a"
(
old
),
"m"
(
*
addr
)
:
"memory"
);
#endif
return
(
bool
)
result
;
return
__sync_bool_compare_and_swap
(
addr
,
old
,
new_val
);
}
// Ensure that subsequent instructions do not execute on stale
// data that was loaded from memory before the barrier.
// On X86/x86-64, the hardware ensures that reads are properly ordered.
inline
static
void
read_barrier
()
{
}
// Ensure that prior stores to memory are completed with respect to other
// processors.
inline
static
void
write_barrier
()
{
/* x86-64/X86 does not reorder writes. We just need to ensure that
gcc also doesn't. */
__asm__
__volatile__
(
" "
:
:
:
"memory"
);
}
// Set *addr to new_val with release semantics, i.e. making sure
...
...
@@ -46,7 +52,7 @@ compare_and_swap(volatile obj_addr_t *addr,
inline
static
void
release_set
(
volatile
obj_addr_t
*
addr
,
obj_addr_t
new_val
)
{
__asm__
__volatile__
(
" "
:
:
:
"memory"
);
write_barrier
(
);
*
(
addr
)
=
new_val
;
}
...
...
@@ -60,22 +66,4 @@ compare_and_swap_release(volatile obj_addr_t *addr,
{
return
compare_and_swap
(
addr
,
old
,
new_val
);
}
// Ensure that subsequent instructions do not execute on stale
// data that was loaded from memory before the barrier.
// On X86/x86-64, the hardware ensures that reads are properly ordered.
inline
static
void
read_barrier
()
{
}
// Ensure that prior stores to memory are completed with respect to other
// processors.
inline
static
void
write_barrier
()
{
/* x86-64/X86 does not reorder writes. We just need to ensure that
gcc also doesn't. */
__asm__
__volatile__
(
" "
:
:
:
"memory"
);
}
#endif
libjava/sysdep/x86-64/locks.h
deleted
100644 → 0
View file @
cea8c6de
/* locks.h - Thread synchronization primitives. X86/x86-64 implementation.
Copyright (C) 2002 Free Software Foundation
Contributed by Bo Thorsen <bo@suse.de>.
This file is part of libgcj.
This software is copyrighted work licensed under the terms of the
Libgcj License. Please consult the file "LIBGCJ_LICENSE" for
details. */
#ifndef __SYSDEP_LOCKS_H__
#define __SYSDEP_LOCKS_H__
typedef
size_t
obj_addr_t
;
/* Integer type big enough for object */
/* address. */
// Atomically replace *addr by new_val if it was initially equal to old.
// Return true if the comparison succeeded.
// Assumed to have acquire semantics, i.e. later memory operations
// cannot execute before the compare_and_swap finishes.
inline
static
bool
compare_and_swap
(
volatile
obj_addr_t
*
addr
,
obj_addr_t
old
,
obj_addr_t
new_val
)
{
char
result
;
#ifdef __x86_64__
__asm__
__volatile__
(
"lock; cmpxchgq %2, %0; setz %1"
:
"=m"
(
*
(
addr
)),
"=q"
(
result
)
:
"r"
(
new_val
),
"a"
(
old
),
"m"
(
*
addr
)
:
"memory"
);
#else
__asm__
__volatile__
(
"lock; cmpxchgl %2, %0; setz %1"
:
"=m"
(
*
addr
),
"=q"
(
result
)
:
"r"
(
new_val
),
"a"
(
old
),
"m"
(
*
addr
)
:
"memory"
);
#endif
return
(
bool
)
result
;
}
// Set *addr to new_val with release semantics, i.e. making sure
// that prior loads and stores complete before this
// assignment.
// On X86/x86-64, the hardware shouldn't reorder reads and writes,
// so we just have to convince gcc not to do it either.
inline
static
void
release_set
(
volatile
obj_addr_t
*
addr
,
obj_addr_t
new_val
)
{
__asm__
__volatile__
(
" "
:
:
:
"memory"
);
*
(
addr
)
=
new_val
;
}
// Compare_and_swap with release semantics instead of acquire semantics.
// On many architecture, the operation makes both guarantees, so the
// implementation can be the same.
inline
static
bool
compare_and_swap_release
(
volatile
obj_addr_t
*
addr
,
obj_addr_t
old
,
obj_addr_t
new_val
)
{
return
compare_and_swap
(
addr
,
old
,
new_val
);
}
// Ensure that subsequent instructions do not execute on stale
// data that was loaded from memory before the barrier.
// On X86/x86-64, the hardware ensures that reads are properly ordered.
inline
static
void
read_barrier
()
{
}
// Ensure that prior stores to memory are completed with respect to other
// processors.
inline
static
void
write_barrier
()
{
/* x86-64/X86 does not reorder writes. We just need to ensure that
gcc also doesn't. */
__asm__
__volatile__
(
" "
:
:
:
"memory"
);
}
#endif
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment