Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
R
riscv-gcc-1
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
lvzhengyang
riscv-gcc-1
Commits
4ba5ca46
Commit
4ba5ca46
authored
Nov 27, 2012
by
Kostya Serebryany
Committed by
Kostya Serebryany
Nov 27, 2012
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[libsanitizer] merge from upstream r168699
From-SVN: r193849
parent
169d8507
Expand all
Show whitespace changes
Inline
Side-by-side
Showing
26 changed files
with
182 additions
and
76 deletions
+182
-76
libsanitizer/ChangeLog
+4
-0
libsanitizer/MERGE
+1
-1
libsanitizer/asan/asan_allocator.cc
+10
-8
libsanitizer/asan/asan_linux.cc
+3
-2
libsanitizer/asan/asan_mac.cc
+5
-5
libsanitizer/asan/asan_malloc_mac.cc
+1
-1
libsanitizer/asan/asan_mapping.h
+6
-1
libsanitizer/asan/asan_rtl.cc
+7
-5
libsanitizer/asan/asan_stats.cc
+1
-1
libsanitizer/asan/asan_thread.cc
+5
-4
libsanitizer/sanitizer_common/sanitizer_allocator.cc
+1
-1
libsanitizer/sanitizer_common/sanitizer_allocator64.h
+13
-10
libsanitizer/sanitizer_common/sanitizer_common.cc
+11
-3
libsanitizer/sanitizer_common/sanitizer_common.h
+4
-13
libsanitizer/sanitizer_common/sanitizer_posix.cc
+12
-4
libsanitizer/sanitizer_common/sanitizer_stacktrace.cc
+1
-1
libsanitizer/sanitizer_common/sanitizer_win.cc
+8
-0
libsanitizer/tsan/tsan_interceptors.cc
+3
-3
libsanitizer/tsan/tsan_interface.h
+3
-0
libsanitizer/tsan/tsan_interface_atomic.cc
+0
-0
libsanitizer/tsan/tsan_interface_atomic.h
+65
-12
libsanitizer/tsan/tsan_interface_inl.h
+8
-0
libsanitizer/tsan/tsan_platform.h
+1
-1
libsanitizer/tsan/tsan_rtl.h
+1
-0
libsanitizer/tsan/tsan_stat.cc
+6
-0
libsanitizer/tsan/tsan_stat.h
+2
-0
No files found.
libsanitizer/ChangeLog
View file @
4ba5ca46
2012-11-27 Kostya Serebryany <kcc@google.com>
* All files: Merge from upstream r168699.
2012-11-24 Kostya Serebryany kcc@google.com
Jack Howarth <howarth@bromo.med.uc.edu>
...
...
libsanitizer/MERGE
View file @
4ba5ca46
168
514
168
699
The first line of this file holds the svn revision number of the
last merge done from the master library sources.
libsanitizer/asan/asan_allocator.cc
View file @
4ba5ca46
...
...
@@ -130,7 +130,7 @@ static void PoisonHeapPartialRightRedzone(uptr mem, uptr size) {
}
static
u8
*
MmapNewPagesAndPoisonShadow
(
uptr
size
)
{
CHECK
(
IsAligned
(
size
,
kPageSize
));
CHECK
(
IsAligned
(
size
,
GetPageSizeCached
()
));
u8
*
res
=
(
u8
*
)
MmapOrDie
(
size
,
__FUNCTION__
);
PoisonShadow
((
uptr
)
res
,
size
,
kAsanHeapLeftRedzoneMagic
);
if
(
flags
()
->
debug
)
{
...
...
@@ -532,12 +532,13 @@ class MallocInfo {
uptr
mmap_size
=
Max
(
size
,
kMinMmapSize
);
uptr
n_chunks
=
mmap_size
/
size
;
CHECK
(
n_chunks
*
size
==
mmap_size
);
if
(
size
<
kPageSize
)
{
uptr
PageSize
=
GetPageSizeCached
();
if
(
size
<
PageSize
)
{
// Size is small, just poison the last chunk.
n_chunks
--
;
}
else
{
// Size is large, allocate an extra page at right and poison it.
mmap_size
+=
k
PageSize
;
mmap_size
+=
PageSize
;
}
CHECK
(
n_chunks
>
0
);
u8
*
mem
=
MmapNewPagesAndPoisonShadow
(
mmap_size
);
...
...
@@ -811,18 +812,19 @@ void *asan_realloc(void *p, uptr size, StackTrace *stack) {
}
void
*
asan_valloc
(
uptr
size
,
StackTrace
*
stack
)
{
void
*
ptr
=
(
void
*
)
Allocate
(
kPageSize
,
size
,
stack
);
void
*
ptr
=
(
void
*
)
Allocate
(
GetPageSizeCached
()
,
size
,
stack
);
__asan_malloc_hook
(
ptr
,
size
);
return
ptr
;
}
void
*
asan_pvalloc
(
uptr
size
,
StackTrace
*
stack
)
{
size
=
RoundUpTo
(
size
,
kPageSize
);
uptr
PageSize
=
GetPageSizeCached
();
size
=
RoundUpTo
(
size
,
PageSize
);
if
(
size
==
0
)
{
// pvalloc(0) should allocate one page.
size
=
k
PageSize
;
size
=
PageSize
;
}
void
*
ptr
=
(
void
*
)
Allocate
(
k
PageSize
,
size
,
stack
);
void
*
ptr
=
(
void
*
)
Allocate
(
PageSize
,
size
,
stack
);
__asan_malloc_hook
(
ptr
,
size
);
return
ptr
;
}
...
...
@@ -941,7 +943,7 @@ uptr FakeStack::ClassMmapSize(uptr size_class) {
}
void
FakeStack
::
AllocateOneSizeClass
(
uptr
size_class
)
{
CHECK
(
ClassMmapSize
(
size_class
)
>=
kPageSize
);
CHECK
(
ClassMmapSize
(
size_class
)
>=
GetPageSizeCached
()
);
uptr
new_mem
=
(
uptr
)
MmapOrDie
(
ClassMmapSize
(
size_class
),
__FUNCTION__
);
// Printf("T%d new_mem[%zu]: %p-%p mmap %zu\n",
...
...
libsanitizer/asan/asan_linux.cc
View file @
4ba5ca46
...
...
@@ -174,9 +174,10 @@ void ClearShadowMemoryForContext(void *context) {
uptr
sp
=
(
uptr
)
ucp
->
uc_stack
.
ss_sp
;
uptr
size
=
ucp
->
uc_stack
.
ss_size
;
// Align to page size.
uptr
bottom
=
sp
&
~
(
kPageSize
-
1
);
uptr
PageSize
=
GetPageSizeCached
();
uptr
bottom
=
sp
&
~
(
PageSize
-
1
);
size
+=
sp
-
bottom
;
size
=
RoundUpTo
(
size
,
k
PageSize
);
size
=
RoundUpTo
(
size
,
PageSize
);
PoisonShadow
(
bottom
,
size
,
0
);
}
#else
...
...
libsanitizer/asan/asan_mac.cc
View file @
4ba5ca46
...
...
@@ -182,11 +182,11 @@ void ClearShadowMemoryForContext(void *context) {
static
void
*
island_allocator_pos
=
0
;
#if SANITIZER_WORDSIZE == 32
# define kIslandEnd (0xffdf0000 -
kPageSize
)
# define kIslandBeg (kIslandEnd - 256 *
kPageSize
)
# define kIslandEnd (0xffdf0000 -
GetPageSizeCached()
)
# define kIslandBeg (kIslandEnd - 256 *
GetPageSizeCached()
)
#else
# define kIslandEnd (0x7fffffdf0000 -
kPageSize
)
# define kIslandBeg (kIslandEnd - 256 *
kPageSize
)
# define kIslandEnd (0x7fffffdf0000 -
GetPageSizeCached()
)
# define kIslandBeg (kIslandEnd - 256 *
GetPageSizeCached()
)
#endif
extern
"C"
...
...
@@ -210,7 +210,7 @@ mach_error_t __interception_allocate_island(void **ptr,
internal_memset
(
island_allocator_pos
,
0xCC
,
kIslandEnd
-
kIslandBeg
);
};
*
ptr
=
island_allocator_pos
;
island_allocator_pos
=
(
char
*
)
island_allocator_pos
+
kPageSize
;
island_allocator_pos
=
(
char
*
)
island_allocator_pos
+
GetPageSizeCached
()
;
if
(
flags
()
->
verbosity
)
{
Report
(
"Branch island allocated at %p
\n
"
,
*
ptr
);
}
...
...
libsanitizer/asan/asan_malloc_mac.cc
View file @
4ba5ca46
...
...
@@ -163,7 +163,7 @@ void *mz_valloc(malloc_zone_t *zone, size_t size) {
return
malloc_zone_valloc
(
system_malloc_zone
,
size
);
}
GET_STACK_TRACE_HERE_FOR_MALLOC
;
return
asan_memalign
(
kPageSize
,
size
,
&
stack
);
return
asan_memalign
(
GetPageSizeCached
()
,
size
,
&
stack
);
}
#define GET_ZONE_FOR_PTR(ptr) \
...
...
libsanitizer/asan/asan_mapping.h
View file @
4ba5ca46
...
...
@@ -66,7 +66,12 @@ extern __attribute__((visibility("default"))) uptr __asan_mapping_offset;
#define kHighShadowBeg MEM_TO_SHADOW(kHighMemBeg)
#define kHighShadowEnd MEM_TO_SHADOW(kHighMemEnd)
#define kShadowGapBeg (kLowShadowEnd ? kLowShadowEnd + 1 : 16 * kPageSize)
// With the zero shadow base we can not actually map pages starting from 0.
// This constant is somewhat arbitrary.
#define kZeroBaseShadowStart (1 << 18)
#define kShadowGapBeg (kLowShadowEnd ? kLowShadowEnd + 1 \
: kZeroBaseShadowStart)
#define kShadowGapEnd (kHighShadowBeg - 1)
#define kGlobalAndStackRedzone \
...
...
libsanitizer/asan/asan_rtl.cc
View file @
4ba5ca46
...
...
@@ -163,8 +163,8 @@ void ShowStatsAndAbort() {
// ---------------------- mmap -------------------- {{{1
// Reserve memory range [beg, end].
static
void
ReserveShadowMemoryRange
(
uptr
beg
,
uptr
end
)
{
CHECK
((
beg
%
kPageSize
)
==
0
);
CHECK
(((
end
+
1
)
%
kPageSize
)
==
0
);
CHECK
((
beg
%
GetPageSizeCached
()
)
==
0
);
CHECK
(((
end
+
1
)
%
GetPageSizeCached
()
)
==
0
);
uptr
size
=
end
-
beg
+
1
;
void
*
res
=
MmapFixedNoReserve
(
beg
,
size
);
if
(
res
!=
(
void
*
)
beg
)
{
...
...
@@ -269,8 +269,9 @@ void NOINLINE __asan_handle_no_return() {
int
local_stack
;
AsanThread
*
curr_thread
=
asanThreadRegistry
().
GetCurrent
();
CHECK
(
curr_thread
);
uptr
PageSize
=
GetPageSizeCached
();
uptr
top
=
curr_thread
->
stack_top
();
uptr
bottom
=
((
uptr
)
&
local_stack
-
kPageSize
)
&
~
(
k
PageSize
-
1
);
uptr
bottom
=
((
uptr
)
&
local_stack
-
PageSize
)
&
~
(
PageSize
-
1
);
PoisonShadow
(
bottom
,
top
-
bottom
,
0
);
}
...
...
@@ -347,12 +348,13 @@ void __asan_init() {
}
uptr
shadow_start
=
kLowShadowBeg
;
if
(
kLowShadowBeg
>
0
)
shadow_start
-=
kMmapGranularity
;
if
(
kLowShadowBeg
>
0
)
shadow_start
-=
GetMmapGranularity
()
;
uptr
shadow_end
=
kHighShadowEnd
;
if
(
MemoryRangeIsAvailable
(
shadow_start
,
shadow_end
))
{
if
(
kLowShadowBeg
!=
kLowShadowEnd
)
{
// mmap the low shadow plus at least one page.
ReserveShadowMemoryRange
(
kLowShadowBeg
-
kMmapGranularity
,
kLowShadowEnd
);
ReserveShadowMemoryRange
(
kLowShadowBeg
-
GetMmapGranularity
(),
kLowShadowEnd
);
}
// mmap the high shadow.
ReserveShadowMemoryRange
(
kHighShadowBeg
,
kHighShadowEnd
);
...
...
libsanitizer/asan/asan_stats.cc
View file @
4ba5ca46
...
...
@@ -41,7 +41,7 @@ void AsanStats::Print() {
Printf
(
"Stats: %zuM really freed by %zu calls
\n
"
,
really_freed
>>
20
,
real_frees
);
Printf
(
"Stats: %zuM (%zu full pages) mmaped in %zu calls
\n
"
,
mmaped
>>
20
,
mmaped
/
kPageSize
,
mmaps
);
mmaped
>>
20
,
mmaped
/
GetPageSizeCached
()
,
mmaps
);
PrintMallocStatsArray
(
" mmaps by size class: "
,
mmaped_by_size
);
PrintMallocStatsArray
(
" mallocs by size class: "
,
malloced_by_size
);
...
...
libsanitizer/asan/asan_thread.cc
View file @
4ba5ca46
...
...
@@ -26,15 +26,16 @@ AsanThread::AsanThread(LinkerInitialized x)
AsanThread
*
AsanThread
::
Create
(
u32
parent_tid
,
thread_callback_t
start_routine
,
void
*
arg
,
StackTrace
*
stack
)
{
uptr
size
=
RoundUpTo
(
sizeof
(
AsanThread
),
kPageSize
);
uptr
PageSize
=
GetPageSizeCached
();
uptr
size
=
RoundUpTo
(
sizeof
(
AsanThread
),
PageSize
);
AsanThread
*
thread
=
(
AsanThread
*
)
MmapOrDie
(
size
,
__FUNCTION__
);
thread
->
start_routine_
=
start_routine
;
thread
->
arg_
=
arg
;
const
uptr
kSummaryAllocSize
=
k
PageSize
;
const
uptr
kSummaryAllocSize
=
PageSize
;
CHECK_LE
(
sizeof
(
AsanThreadSummary
),
kSummaryAllocSize
);
AsanThreadSummary
*
summary
=
(
AsanThreadSummary
*
)
MmapOrDie
(
k
PageSize
,
"AsanThreadSummary"
);
(
AsanThreadSummary
*
)
MmapOrDie
(
PageSize
,
"AsanThreadSummary"
);
summary
->
Init
(
parent_tid
,
stack
);
summary
->
set_thread
(
thread
);
thread
->
set_summary
(
summary
);
...
...
@@ -64,7 +65,7 @@ void AsanThread::Destroy() {
// and we don't want it to have any poisoned stack.
ClearShadowForThreadStack
();
fake_stack
().
Cleanup
();
uptr
size
=
RoundUpTo
(
sizeof
(
AsanThread
),
kPageSize
);
uptr
size
=
RoundUpTo
(
sizeof
(
AsanThread
),
GetPageSizeCached
()
);
UnmapOrDie
(
this
,
size
);
}
...
...
libsanitizer/sanitizer_common/sanitizer_allocator.cc
View file @
4ba5ca46
...
...
@@ -61,7 +61,7 @@ void *LowLevelAllocator::Allocate(uptr size) {
// Align allocation size.
size
=
RoundUpTo
(
size
,
8
);
if
(
allocated_end_
-
allocated_current_
<
(
sptr
)
size
)
{
uptr
size_to_allocate
=
Max
(
size
,
kPageSize
);
uptr
size_to_allocate
=
Max
(
size
,
GetPageSizeCached
()
);
allocated_current_
=
(
char
*
)
MmapOrDie
(
size_to_allocate
,
__FUNCTION__
);
allocated_end_
=
allocated_current_
+
size_to_allocate
;
...
...
libsanitizer/sanitizer_common/sanitizer_allocator64.h
View file @
4ba5ca46
...
...
@@ -215,7 +215,6 @@ class SizeClassAllocator64 {
}
static
uptr
AllocBeg
()
{
return
kSpaceBeg
;
}
static
uptr
AllocEnd
()
{
return
kSpaceBeg
+
kSpaceSize
+
AdditionalSize
();
}
static
uptr
AllocSize
()
{
return
kSpaceSize
+
AdditionalSize
();
}
static
const
uptr
kNumClasses
=
256
;
// Power of two <= 256
...
...
@@ -241,7 +240,7 @@ class SizeClassAllocator64 {
static
uptr
AdditionalSize
()
{
uptr
res
=
sizeof
(
RegionInfo
)
*
kNumClasses
;
CHECK_EQ
(
res
%
kPageSize
,
0
);
CHECK_EQ
(
res
%
GetPageSizeCached
()
,
0
);
return
res
;
}
...
...
@@ -364,17 +363,18 @@ class LargeMmapAllocator {
public
:
void
Init
()
{
internal_memset
(
this
,
0
,
sizeof
(
*
this
));
page_size_
=
GetPageSizeCached
();
}
void
*
Allocate
(
uptr
size
,
uptr
alignment
)
{
CHECK
(
IsPowerOfTwo
(
alignment
));
uptr
map_size
=
RoundUpMapSize
(
size
);
if
(
alignment
>
kPageSize
)
if
(
alignment
>
page_size_
)
map_size
+=
alignment
;
if
(
map_size
<
size
)
return
0
;
// Overflow.
uptr
map_beg
=
reinterpret_cast
<
uptr
>
(
MmapOrDie
(
map_size
,
"LargeMmapAllocator"
));
uptr
map_end
=
map_beg
+
map_size
;
uptr
res
=
map_beg
+
kPageSize
;
uptr
res
=
map_beg
+
page_size_
;
if
(
res
&
(
alignment
-
1
))
// Align.
res
+=
alignment
-
(
res
&
(
alignment
-
1
));
CHECK_EQ
(
0
,
res
&
(
alignment
-
1
));
...
...
@@ -421,7 +421,7 @@ class LargeMmapAllocator {
bool
PointerIsMine
(
void
*
p
)
{
// Fast check.
if
((
reinterpret_cast
<
uptr
>
(
p
)
%
kPageSize
)
!=
0
)
return
false
;
if
((
reinterpret_cast
<
uptr
>
(
p
)
&
(
page_size_
-
1
))
)
return
false
;
SpinMutexLock
l
(
&
mutex_
);
for
(
Header
*
l
=
list_
;
l
;
l
=
l
->
next
)
{
if
(
GetUser
(
l
)
==
p
)
return
true
;
...
...
@@ -430,10 +430,10 @@ class LargeMmapAllocator {
}
uptr
GetActuallyAllocatedSize
(
void
*
p
)
{
return
RoundUpMapSize
(
GetHeader
(
p
)
->
size
)
-
kPageSize
;
return
RoundUpMapSize
(
GetHeader
(
p
)
->
size
)
-
page_size_
;
}
// At least
kPageSize
/2 metadata bytes is available.
// At least
page_size_
/2 metadata bytes is available.
void
*
GetMetaData
(
void
*
p
)
{
return
GetHeader
(
p
)
+
1
;
}
...
...
@@ -457,17 +457,20 @@ class LargeMmapAllocator {
Header
*
prev
;
};
Header
*
GetHeader
(
uptr
p
)
{
return
reinterpret_cast
<
Header
*>
(
p
-
kPageSize
);
}
Header
*
GetHeader
(
uptr
p
)
{
return
reinterpret_cast
<
Header
*>
(
p
-
page_size_
);
}
Header
*
GetHeader
(
void
*
p
)
{
return
GetHeader
(
reinterpret_cast
<
uptr
>
(
p
));
}
void
*
GetUser
(
Header
*
h
)
{
return
reinterpret_cast
<
void
*>
(
reinterpret_cast
<
uptr
>
(
h
)
+
kPageSize
);
return
reinterpret_cast
<
void
*>
(
reinterpret_cast
<
uptr
>
(
h
)
+
page_size_
);
}
uptr
RoundUpMapSize
(
uptr
size
)
{
return
RoundUpTo
(
size
,
kPageSize
)
+
kPageSize
;
return
RoundUpTo
(
size
,
page_size_
)
+
page_size_
;
}
uptr
page_size_
;
Header
*
list_
;
SpinMutex
mutex_
;
};
...
...
libsanitizer/sanitizer_common/sanitizer_common.cc
View file @
4ba5ca46
...
...
@@ -14,6 +14,13 @@
namespace
__sanitizer
{
uptr
GetPageSizeCached
()
{
static
uptr
PageSize
;
if
(
!
PageSize
)
PageSize
=
GetPageSize
();
return
PageSize
;
}
// By default, dump to stderr. If report_fd is kInvalidFd, try to obtain file
// descriptor by opening file in report_path.
static
fd_t
report_fd
=
kStderrFd
;
...
...
@@ -75,7 +82,8 @@ void RawWrite(const char *buffer) {
uptr
ReadFileToBuffer
(
const
char
*
file_name
,
char
**
buff
,
uptr
*
buff_size
,
uptr
max_len
)
{
const
uptr
kMinFileLen
=
kPageSize
;
uptr
PageSize
=
GetPageSizeCached
();
uptr
kMinFileLen
=
PageSize
;
uptr
read_len
=
0
;
*
buff
=
0
;
*
buff_size
=
0
;
...
...
@@ -89,8 +97,8 @@ uptr ReadFileToBuffer(const char *file_name, char **buff,
// Read up to one page at a time.
read_len
=
0
;
bool
reached_eof
=
false
;
while
(
read_len
+
k
PageSize
<=
size
)
{
uptr
just_read
=
internal_read
(
fd
,
*
buff
+
read_len
,
k
PageSize
);
while
(
read_len
+
PageSize
<=
size
)
{
uptr
just_read
=
internal_read
(
fd
,
*
buff
+
read_len
,
PageSize
);
if
(
just_read
==
0
)
{
reached_eof
=
true
;
break
;
...
...
libsanitizer/sanitizer_common/sanitizer_common.h
View file @
4ba5ca46
...
...
@@ -21,25 +21,16 @@ namespace __sanitizer {
// Constants.
const
uptr
kWordSize
=
SANITIZER_WORDSIZE
/
8
;
const
uptr
kWordSizeInBits
=
8
*
kWordSize
;
#if defined(__powerpc__) || defined(__powerpc64__)
// Current PPC64 kernels use 64K pages sizes, but they can be
// configured with 4K or even other sizes.
// We may want to use getpagesize() or sysconf(_SC_PAGESIZE) here rather than
// hardcoding the values, but today these values need to be compile-time
// constants.
const
uptr
kPageSize
=
1UL
<<
16
;
const
uptr
kCacheLineSize
=
128
;
const
uptr
kMmapGranularity
=
kPageSize
;
#elif !defined(_WIN32)
const
uptr
kPageSize
=
1UL
<<
12
;
const
uptr
kCacheLineSize
=
64
;
const
uptr
kMmapGranularity
=
kPageSize
;
#else
const
uptr
kPageSize
=
1UL
<<
12
;
const
uptr
kCacheLineSize
=
64
;
const
uptr
kMmapGranularity
=
1UL
<<
16
;
#endif
uptr
GetPageSize
();
uptr
GetPageSizeCached
();
uptr
GetMmapGranularity
();
// Threads
int
GetPid
();
uptr
GetTid
();
...
...
libsanitizer/sanitizer_common/sanitizer_posix.cc
View file @
4ba5ca46
...
...
@@ -30,6 +30,13 @@
namespace
__sanitizer
{
// ------------- sanitizer_common.h
uptr
GetPageSize
()
{
return
sysconf
(
_SC_PAGESIZE
);
}
uptr
GetMmapGranularity
()
{
return
GetPageSize
();
}
int
GetPid
()
{
return
getpid
();
...
...
@@ -40,7 +47,7 @@ uptr GetThreadSelf() {
}
void
*
MmapOrDie
(
uptr
size
,
const
char
*
mem_type
)
{
size
=
RoundUpTo
(
size
,
kPageSize
);
size
=
RoundUpTo
(
size
,
GetPageSizeCached
()
);
void
*
res
=
internal_mmap
(
0
,
size
,
PROT_READ
|
PROT_WRITE
,
MAP_PRIVATE
|
MAP_ANON
,
-
1
,
0
);
...
...
@@ -72,8 +79,9 @@ void UnmapOrDie(void *addr, uptr size) {
}
void
*
MmapFixedNoReserve
(
uptr
fixed_addr
,
uptr
size
)
{
void
*
p
=
internal_mmap
((
void
*
)(
fixed_addr
&
~
(
kPageSize
-
1
)),
RoundUpTo
(
size
,
kPageSize
),
uptr
PageSize
=
GetPageSizeCached
();
void
*
p
=
internal_mmap
((
void
*
)(
fixed_addr
&
~
(
PageSize
-
1
)),
RoundUpTo
(
size
,
PageSize
),
PROT_READ
|
PROT_WRITE
,
MAP_PRIVATE
|
MAP_ANON
|
MAP_FIXED
|
MAP_NORESERVE
,
-
1
,
0
);
...
...
@@ -96,7 +104,7 @@ void *MapFileToMemory(const char *file_name, uptr *buff_size) {
uptr
fsize
=
internal_filesize
(
fd
);
CHECK_NE
(
fsize
,
(
uptr
)
-
1
);
CHECK_GT
(
fsize
,
0
);
*
buff_size
=
RoundUpTo
(
fsize
,
kPageSize
);
*
buff_size
=
RoundUpTo
(
fsize
,
GetPageSizeCached
()
);
void
*
map
=
internal_mmap
(
0
,
*
buff_size
,
PROT_READ
,
MAP_PRIVATE
,
fd
,
0
);
return
(
map
==
MAP_FAILED
)
?
0
:
map
;
}
...
...
libsanitizer/sanitizer_common/sanitizer_stacktrace.cc
View file @
4ba5ca46
...
...
@@ -63,7 +63,7 @@ void StackTrace::PrintStack(const uptr *addr, uptr size,
bool
symbolize
,
const
char
*
strip_file_prefix
,
SymbolizeCallback
symbolize_callback
)
{
MemoryMappingLayout
proc_maps
;
InternalScopedBuffer
<
char
>
buff
(
kPageSize
*
2
);
InternalScopedBuffer
<
char
>
buff
(
GetPageSizeCached
()
*
2
);
InternalScopedBuffer
<
AddressInfo
>
addr_frames
(
64
);
uptr
frame_num
=
0
;
for
(
uptr
i
=
0
;
i
<
size
&&
addr
[
i
];
i
++
)
{
...
...
libsanitizer/sanitizer_common/sanitizer_win.cc
View file @
4ba5ca46
...
...
@@ -21,6 +21,14 @@
namespace
__sanitizer
{
// --------------------- sanitizer_common.h
uptr
GetPageSize
()
{
return
1U
<<
14
;
// FIXME: is this configurable?
}
uptr
GetMmapGranularity
()
{
return
1U
<<
16
;
// FIXME: is this configurable?
}
bool
FileExists
(
const
char
*
filename
)
{
UNIMPLEMENTED
();
}
...
...
libsanitizer/tsan/tsan_interceptors.cc
View file @
4ba5ca46
...
...
@@ -564,13 +564,13 @@ TSAN_INTERCEPTOR(void*, memalign, uptr align, uptr sz) {
TSAN_INTERCEPTOR
(
void
*
,
valloc
,
uptr
sz
)
{
SCOPED_TSAN_INTERCEPTOR
(
valloc
,
sz
);
return
user_alloc
(
thr
,
pc
,
sz
,
kPageSize
);
return
user_alloc
(
thr
,
pc
,
sz
,
GetPageSizeCached
()
);
}
TSAN_INTERCEPTOR
(
void
*
,
pvalloc
,
uptr
sz
)
{
SCOPED_TSAN_INTERCEPTOR
(
pvalloc
,
sz
);
sz
=
RoundUp
(
sz
,
kPageSize
);
return
user_alloc
(
thr
,
pc
,
sz
,
kPageSize
);
sz
=
RoundUp
(
sz
,
GetPageSizeCached
()
);
return
user_alloc
(
thr
,
pc
,
sz
,
GetPageSizeCached
()
);
}
TSAN_INTERCEPTOR
(
int
,
posix_memalign
,
void
**
memptr
,
uptr
align
,
uptr
sz
)
{
...
...
libsanitizer/tsan/tsan_interface.h
View file @
4ba5ca46
...
...
@@ -42,6 +42,9 @@ void __tsan_vptr_update(void **vptr_p, void *new_val);
void
__tsan_func_entry
(
void
*
call_pc
);
void
__tsan_func_exit
();
void
__tsan_read_range
(
void
*
addr
,
unsigned
long
size
);
// NOLINT
void
__tsan_write_range
(
void
*
addr
,
unsigned
long
size
);
// NOLINT
#ifdef __cplusplus
}
// extern "C"
#endif
...
...
libsanitizer/tsan/tsan_interface_atomic.cc
View file @
4ba5ca46
This diff is collapsed.
Click to expand it.
libsanitizer/tsan/tsan_interface_atomic.h
View file @
4ba5ca46
...
...
@@ -20,6 +20,15 @@ typedef short __tsan_atomic16; // NOLINT
typedef
int
__tsan_atomic32
;
typedef
long
__tsan_atomic64
;
// NOLINT
#if defined(__SIZEOF_INT128__) \
|| (__clang_major__ * 100 + __clang_minor__ >= 302)
typedef
__int128
__tsan_atomic128
;
#define __TSAN_HAS_INT128 1
#else
typedef
char
__tsan_atomic128
;
#define __TSAN_HAS_INT128 0
#endif
// Part of ABI, do not change.
// http://llvm.org/viewvc/llvm-project/libcxx/trunk/include/atomic?view=markup
typedef
enum
{
...
...
@@ -39,6 +48,8 @@ __tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32 *a,
__tsan_memory_order
mo
);
__tsan_atomic64
__tsan_atomic64_load
(
const
volatile
__tsan_atomic64
*
a
,
__tsan_memory_order
mo
);
__tsan_atomic128
__tsan_atomic128_load
(
const
volatile
__tsan_atomic128
*
a
,
__tsan_memory_order
mo
);
void
__tsan_atomic8_store
(
volatile
__tsan_atomic8
*
a
,
__tsan_atomic8
v
,
__tsan_memory_order
mo
);
...
...
@@ -48,6 +59,8 @@ void __tsan_atomic32_store(volatile __tsan_atomic32 *a, __tsan_atomic32 v,
__tsan_memory_order
mo
);
void
__tsan_atomic64_store
(
volatile
__tsan_atomic64
*
a
,
__tsan_atomic64
v
,
__tsan_memory_order
mo
);
void
__tsan_atomic128_store
(
volatile
__tsan_atomic128
*
a
,
__tsan_atomic128
v
,
__tsan_memory_order
mo
);
__tsan_atomic8
__tsan_atomic8_exchange
(
volatile
__tsan_atomic8
*
a
,
__tsan_atomic8
v
,
__tsan_memory_order
mo
);
...
...
@@ -57,6 +70,8 @@ __tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32 *a,
__tsan_atomic32
v
,
__tsan_memory_order
mo
);
__tsan_atomic64
__tsan_atomic64_exchange
(
volatile
__tsan_atomic64
*
a
,
__tsan_atomic64
v
,
__tsan_memory_order
mo
);
__tsan_atomic128
__tsan_atomic128_exchange
(
volatile
__tsan_atomic128
*
a
,
__tsan_atomic128
v
,
__tsan_memory_order
mo
);
__tsan_atomic8
__tsan_atomic8_fetch_add
(
volatile
__tsan_atomic8
*
a
,
__tsan_atomic8
v
,
__tsan_memory_order
mo
);
...
...
@@ -66,6 +81,8 @@ __tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32 *a,
__tsan_atomic32
v
,
__tsan_memory_order
mo
);
__tsan_atomic64
__tsan_atomic64_fetch_add
(
volatile
__tsan_atomic64
*
a
,
__tsan_atomic64
v
,
__tsan_memory_order
mo
);
__tsan_atomic128
__tsan_atomic128_fetch_add
(
volatile
__tsan_atomic128
*
a
,
__tsan_atomic128
v
,
__tsan_memory_order
mo
);
__tsan_atomic8
__tsan_atomic8_fetch_sub
(
volatile
__tsan_atomic8
*
a
,
__tsan_atomic8
v
,
__tsan_memory_order
mo
);
...
...
@@ -75,6 +92,8 @@ __tsan_atomic32 __tsan_atomic32_fetch_sub(volatile __tsan_atomic32 *a,
__tsan_atomic32
v
,
__tsan_memory_order
mo
);
__tsan_atomic64
__tsan_atomic64_fetch_sub
(
volatile
__tsan_atomic64
*
a
,
__tsan_atomic64
v
,
__tsan_memory_order
mo
);
__tsan_atomic128
__tsan_atomic128_fetch_sub
(
volatile
__tsan_atomic128
*
a
,
__tsan_atomic128
v
,
__tsan_memory_order
mo
);
__tsan_atomic8
__tsan_atomic8_fetch_and
(
volatile
__tsan_atomic8
*
a
,
__tsan_atomic8
v
,
__tsan_memory_order
mo
);
...
...
@@ -84,6 +103,8 @@ __tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32 *a,
__tsan_atomic32
v
,
__tsan_memory_order
mo
);
__tsan_atomic64
__tsan_atomic64_fetch_and
(
volatile
__tsan_atomic64
*
a
,
__tsan_atomic64
v
,
__tsan_memory_order
mo
);
__tsan_atomic128
__tsan_atomic128_fetch_and
(
volatile
__tsan_atomic128
*
a
,
__tsan_atomic128
v
,
__tsan_memory_order
mo
);
__tsan_atomic8
__tsan_atomic8_fetch_or
(
volatile
__tsan_atomic8
*
a
,
__tsan_atomic8
v
,
__tsan_memory_order
mo
);
...
...
@@ -93,6 +114,8 @@ __tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32 *a,
__tsan_atomic32
v
,
__tsan_memory_order
mo
);
__tsan_atomic64
__tsan_atomic64_fetch_or
(
volatile
__tsan_atomic64
*
a
,
__tsan_atomic64
v
,
__tsan_memory_order
mo
);
__tsan_atomic128
__tsan_atomic128_fetch_or
(
volatile
__tsan_atomic128
*
a
,
__tsan_atomic128
v
,
__tsan_memory_order
mo
);
__tsan_atomic8
__tsan_atomic8_fetch_xor
(
volatile
__tsan_atomic8
*
a
,
__tsan_atomic8
v
,
__tsan_memory_order
mo
);
...
...
@@ -102,37 +125,67 @@ __tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32 *a,
__tsan_atomic32
v
,
__tsan_memory_order
mo
);
__tsan_atomic64
__tsan_atomic64_fetch_xor
(
volatile
__tsan_atomic64
*
a
,
__tsan_atomic64
v
,
__tsan_memory_order
mo
);
__tsan_atomic128
__tsan_atomic128_fetch_xor
(
volatile
__tsan_atomic128
*
a
,
__tsan_atomic128
v
,
__tsan_memory_order
mo
);
__tsan_atomic8
__tsan_atomic8_fetch_nand
(
volatile
__tsan_atomic8
*
a
,
__tsan_atomic8
v
,
__tsan_memory_order
mo
);
__tsan_atomic16
__tsan_atomic16_fetch_nand
(
volatile
__tsan_atomic16
*
a
,
__tsan_atomic16
v
,
__tsan_memory_order
mo
);
__tsan_atomic32
__tsan_atomic32_fetch_nand
(
volatile
__tsan_atomic32
*
a
,
__tsan_atomic32
v
,
__tsan_memory_order
mo
);
__tsan_atomic64
__tsan_atomic64_fetch_nand
(
volatile
__tsan_atomic64
*
a
,
__tsan_atomic64
v
,
__tsan_memory_order
mo
);
__tsan_atomic128
__tsan_atomic128_fetch_nand
(
volatile
__tsan_atomic128
*
a
,
__tsan_atomic128
v
,
__tsan_memory_order
mo
);
int
__tsan_atomic8_compare_exchange_weak
(
volatile
__tsan_atomic8
*
a
,
__tsan_atomic8
*
c
,
__tsan_atomic8
v
,
__tsan_memory_order
mo
);
__tsan_atomic8
*
c
,
__tsan_atomic8
v
,
__tsan_memory_order
mo
,
__tsan_memory_order
fail_mo
);
int
__tsan_atomic16_compare_exchange_weak
(
volatile
__tsan_atomic16
*
a
,
__tsan_atomic16
*
c
,
__tsan_atomic16
v
,
__tsan_memory_order
mo
);
__tsan_atomic16
*
c
,
__tsan_atomic16
v
,
__tsan_memory_order
mo
,
__tsan_memory_order
fail_mo
);
int
__tsan_atomic32_compare_exchange_weak
(
volatile
__tsan_atomic32
*
a
,
__tsan_atomic32
*
c
,
__tsan_atomic32
v
,
__tsan_memory_order
mo
);
__tsan_atomic32
*
c
,
__tsan_atomic32
v
,
__tsan_memory_order
mo
,
__tsan_memory_order
fail_mo
);
int
__tsan_atomic64_compare_exchange_weak
(
volatile
__tsan_atomic64
*
a
,
__tsan_atomic64
*
c
,
__tsan_atomic64
v
,
__tsan_memory_order
mo
);
__tsan_atomic64
*
c
,
__tsan_atomic64
v
,
__tsan_memory_order
mo
,
__tsan_memory_order
fail_mo
);
int
__tsan_atomic128_compare_exchange_weak
(
volatile
__tsan_atomic128
*
a
,
__tsan_atomic128
*
c
,
__tsan_atomic128
v
,
__tsan_memory_order
mo
,
__tsan_memory_order
fail_mo
);
int
__tsan_atomic8_compare_exchange_strong
(
volatile
__tsan_atomic8
*
a
,
__tsan_atomic8
*
c
,
__tsan_atomic8
v
,
__tsan_memory_order
mo
);
__tsan_atomic8
*
c
,
__tsan_atomic8
v
,
__tsan_memory_order
mo
,
__tsan_memory_order
fail_mo
);
int
__tsan_atomic16_compare_exchange_strong
(
volatile
__tsan_atomic16
*
a
,
__tsan_atomic16
*
c
,
__tsan_atomic16
v
,
__tsan_memory_order
mo
);
__tsan_atomic16
*
c
,
__tsan_atomic16
v
,
__tsan_memory_order
mo
,
__tsan_memory_order
fail_mo
);
int
__tsan_atomic32_compare_exchange_strong
(
volatile
__tsan_atomic32
*
a
,
__tsan_atomic32
*
c
,
__tsan_atomic32
v
,
__tsan_memory_order
mo
);
__tsan_atomic32
*
c
,
__tsan_atomic32
v
,
__tsan_memory_order
mo
,
__tsan_memory_order
fail_mo
);
int
__tsan_atomic64_compare_exchange_strong
(
volatile
__tsan_atomic64
*
a
,
__tsan_atomic64
*
c
,
__tsan_atomic64
v
,
__tsan_memory_order
mo
);
__tsan_atomic64
*
c
,
__tsan_atomic64
v
,
__tsan_memory_order
mo
,
__tsan_memory_order
fail_mo
);
int
__tsan_atomic128_compare_exchange_strong
(
volatile
__tsan_atomic128
*
a
,
__tsan_atomic128
*
c
,
__tsan_atomic128
v
,
__tsan_memory_order
mo
,
__tsan_memory_order
fail_mo
);
__tsan_atomic8
__tsan_atomic8_compare_exchange_val
(
volatile
__tsan_atomic8
*
a
,
__tsan_atomic8
c
,
__tsan_atomic8
v
,
__tsan_memory_order
mo
);
__tsan_memory_order
mo
,
__tsan_memory_order
fail_mo
);
__tsan_atomic16
__tsan_atomic16_compare_exchange_val
(
volatile
__tsan_atomic16
*
a
,
__tsan_atomic16
c
,
__tsan_atomic16
v
,
__tsan_memory_order
mo
);
__tsan_memory_order
mo
,
__tsan_memory_order
fail_mo
);
__tsan_atomic32
__tsan_atomic32_compare_exchange_val
(
volatile
__tsan_atomic32
*
a
,
__tsan_atomic32
c
,
__tsan_atomic32
v
,
__tsan_memory_order
mo
);
__tsan_memory_order
mo
,
__tsan_memory_order
fail_mo
);
__tsan_atomic64
__tsan_atomic64_compare_exchange_val
(
volatile
__tsan_atomic64
*
a
,
__tsan_atomic64
c
,
__tsan_atomic64
v
,
__tsan_memory_order
mo
);
__tsan_memory_order
mo
,
__tsan_memory_order
fail_mo
);
__tsan_atomic128
__tsan_atomic128_compare_exchange_val
(
volatile
__tsan_atomic128
*
a
,
__tsan_atomic128
c
,
__tsan_atomic128
v
,
__tsan_memory_order
mo
,
__tsan_memory_order
fail_mo
);
void
__tsan_atomic_thread_fence
(
__tsan_memory_order
mo
);
void
__tsan_atomic_signal_fence
(
__tsan_memory_order
mo
);
...
...
libsanitizer/tsan/tsan_interface_inl.h
View file @
4ba5ca46
...
...
@@ -61,3 +61,11 @@ void __tsan_func_entry(void *pc) {
void
__tsan_func_exit
()
{
FuncExit
(
cur_thread
());
}
void
__tsan_read_range
(
void
*
addr
,
uptr
size
)
{
MemoryAccessRange
(
cur_thread
(),
CALLERPC
,
(
uptr
)
addr
,
size
,
false
);
}
void
__tsan_write_range
(
void
*
addr
,
uptr
size
)
{
MemoryAccessRange
(
cur_thread
(),
CALLERPC
,
(
uptr
)
addr
,
size
,
true
);
}
libsanitizer/tsan/tsan_platform.h
View file @
4ba5ca46
...
...
@@ -50,7 +50,7 @@ static const uptr kLinuxAppMemMsk = 0x7c0000000000ULL;
static
const
uptr
kLinuxShadowBeg
=
MemToShadow
(
kLinuxAppMemBeg
);
static
const
uptr
kLinuxShadowEnd
=
MemToShadow
(
kLinuxAppMemEnd
)
|
(
kPageSize
-
1
)
;
MemToShadow
(
kLinuxAppMemEnd
)
|
0xff
;
static
inline
bool
IsAppMem
(
uptr
mem
)
{
return
mem
>=
kLinuxAppMemBeg
&&
mem
<=
kLinuxAppMemEnd
;
...
...
libsanitizer/tsan/tsan_rtl.h
View file @
4ba5ca46
...
...
@@ -521,6 +521,7 @@ void AfterSleep(ThreadState *thr, uptr pc);
#define HACKY_CALL(f) \
__asm__ __volatile__("sub $1024, %%rsp;" \
"/*.cfi_adjust_cfa_offset 1024;*/" \
".hidden " #f "_thunk;" \
"call " #f "_thunk;" \
"add $1024, %%rsp;" \
"/*.cfi_adjust_cfa_offset -1024;*/" \
...
...
libsanitizer/tsan/tsan_stat.cc
View file @
4ba5ca46
...
...
@@ -75,6 +75,11 @@ void StatOutput(u64 *stat) {
name
[
StatAtomicStore
]
=
" store "
;
name
[
StatAtomicExchange
]
=
" exchange "
;
name
[
StatAtomicFetchAdd
]
=
" fetch_add "
;
name
[
StatAtomicFetchSub
]
=
" fetch_sub "
;
name
[
StatAtomicFetchAnd
]
=
" fetch_and "
;
name
[
StatAtomicFetchOr
]
=
" fetch_or "
;
name
[
StatAtomicFetchXor
]
=
" fetch_xor "
;
name
[
StatAtomicFetchNand
]
=
" fetch_nand "
;
name
[
StatAtomicCAS
]
=
" compare_exchange "
;
name
[
StatAtomicFence
]
=
" fence "
;
name
[
StatAtomicRelaxed
]
=
" Including relaxed "
;
...
...
@@ -87,6 +92,7 @@ void StatOutput(u64 *stat) {
name
[
StatAtomic2
]
=
" size 2 "
;
name
[
StatAtomic4
]
=
" size 4 "
;
name
[
StatAtomic8
]
=
" size 8 "
;
name
[
StatAtomic16
]
=
" size 16 "
;
name
[
StatInterceptor
]
=
"Interceptors "
;
name
[
StatInt_longjmp
]
=
" longjmp "
;
...
...
libsanitizer/tsan/tsan_stat.h
View file @
4ba5ca46
...
...
@@ -75,6 +75,7 @@ enum StatType {
StatAtomicFetchAnd
,
StatAtomicFetchOr
,
StatAtomicFetchXor
,
StatAtomicFetchNand
,
StatAtomicCAS
,
StatAtomicFence
,
StatAtomicRelaxed
,
...
...
@@ -87,6 +88,7 @@ enum StatType {
StatAtomic2
,
StatAtomic4
,
StatAtomic8
,
StatAtomic16
,
// Interceptors.
StatInterceptor
,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment