Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
R
riscv-gcc-1
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
lvzhengyang
riscv-gcc-1
Commits
a0408454
Commit
a0408454
authored
Dec 05, 2012
by
Kostya Serebryany
Committed by
Kostya Serebryany
Dec 05, 2012
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[libsanitizer] merge from upstream r169371
From-SVN: r194221
parent
cc4d934f
Expand all
Show whitespace changes
Inline
Side-by-side
Showing
44 changed files
with
795 additions
and
282 deletions
+795
-282
libsanitizer/ChangeLog
+4
-0
libsanitizer/MERGE
+1
-1
libsanitizer/asan/asan_interceptors.cc
+2
-0
libsanitizer/asan/asan_internal.h
+1
-0
libsanitizer/asan/asan_linux.cc
+3
-1
libsanitizer/asan/asan_poisoning.cc
+37
-0
libsanitizer/asan/asan_report.cc
+3
-0
libsanitizer/asan/asan_rtl.cc
+2
-0
libsanitizer/asan/dynamic/asan_interceptors_dynamic.cc
+0
-2
libsanitizer/include/sanitizer/asan_interface.h
+9
-0
libsanitizer/sanitizer_common/sanitizer_allocator.cc
+0
-7
libsanitizer/sanitizer_common/sanitizer_allocator.h
+40
-39
libsanitizer/sanitizer_common/sanitizer_common.h
+0
-3
libsanitizer/sanitizer_common/sanitizer_linux.cc
+48
-8
libsanitizer/sanitizer_common/sanitizer_mac.cc
+9
-0
libsanitizer/sanitizer_common/sanitizer_posix.cc
+4
-1
libsanitizer/sanitizer_common/sanitizer_procmaps.h
+19
-3
libsanitizer/sanitizer_common/sanitizer_stacktrace.cc
+2
-0
libsanitizer/sanitizer_common/sanitizer_symbolizer.cc
+15
-0
libsanitizer/sanitizer_common/sanitizer_symbolizer.h
+1
-0
libsanitizer/tsan/tsan_defs.h
+11
-1
libsanitizer/tsan/tsan_flags.cc
+12
-2
libsanitizer/tsan/tsan_flags.h
+13
-2
libsanitizer/tsan/tsan_interceptors.cc
+82
-2
libsanitizer/tsan/tsan_interceptors.h
+0
-52
libsanitizer/tsan/tsan_interface.h
+26
-21
libsanitizer/tsan/tsan_interface_ann.cc
+60
-40
libsanitizer/tsan/tsan_interface_ann.h
+4
-2
libsanitizer/tsan/tsan_interface_atomic.cc
+92
-27
libsanitizer/tsan/tsan_interface_atomic.h
+0
-0
libsanitizer/tsan/tsan_platform.h
+57
-1
libsanitizer/tsan/tsan_platform_linux.cc
+41
-9
libsanitizer/tsan/tsan_report.cc
+9
-3
libsanitizer/tsan/tsan_report.h
+2
-0
libsanitizer/tsan/tsan_rtl.cc
+29
-7
libsanitizer/tsan/tsan_rtl.h
+58
-15
libsanitizer/tsan/tsan_rtl_mutex.cc
+6
-6
libsanitizer/tsan/tsan_rtl_report.cc
+48
-10
libsanitizer/tsan/tsan_rtl_thread.cc
+32
-8
libsanitizer/tsan/tsan_stat.cc
+1
-0
libsanitizer/tsan/tsan_stat.h
+1
-0
libsanitizer/tsan/tsan_suppressions.cc
+1
-1
libsanitizer/tsan/tsan_symbolize.cc
+7
-1
libsanitizer/tsan/tsan_trace.h
+3
-7
No files found.
libsanitizer/ChangeLog
View file @
a0408454
2012-12-05 Kostya Serebryany <kcc@google.com>
* All files: Merge from upstream r169371.
2012-12-04 Kostya Serebryany <kcc@google.com>
2012-12-04 Kostya Serebryany <kcc@google.com>
Jack Howarth <howarth@bromo.med.uc.edu>
Jack Howarth <howarth@bromo.med.uc.edu>
...
...
libsanitizer/MERGE
View file @
a0408454
16
8699
16
9371
The first line of this file holds the svn revision number of the
The first line of this file holds the svn revision number of the
last merge done from the master library sources.
last merge done from the master library sources.
libsanitizer/asan/asan_interceptors.cc
View file @
a0408454
...
@@ -177,6 +177,8 @@ INTERCEPTOR(void, siglongjmp, void *env, int val) {
...
@@ -177,6 +177,8 @@ INTERCEPTOR(void, siglongjmp, void *env, int val) {
#if ASAN_INTERCEPT___CXA_THROW
#if ASAN_INTERCEPT___CXA_THROW
INTERCEPTOR
(
void
,
__cxa_throw
,
void
*
a
,
void
*
b
,
void
*
c
)
{
INTERCEPTOR
(
void
,
__cxa_throw
,
void
*
a
,
void
*
b
,
void
*
c
)
{
Printf
(
"__asan's __cxa_throw %p; REAL(__cxa_throw) %p PLAIN %p
\n
"
,
__interceptor___cxa_throw
,
REAL
(
__cxa_throw
),
__cxa_throw
);
CHECK
(
REAL
(
__cxa_throw
));
CHECK
(
REAL
(
__cxa_throw
));
__asan_handle_no_return
();
__asan_handle_no_return
();
REAL
(
__cxa_throw
)(
a
,
b
,
c
);
REAL
(
__cxa_throw
)(
a
,
b
,
c
);
...
...
libsanitizer/asan/asan_internal.h
View file @
a0408454
...
@@ -159,6 +159,7 @@ const int kAsanStackPartialRedzoneMagic = 0xf4;
...
@@ -159,6 +159,7 @@ const int kAsanStackPartialRedzoneMagic = 0xf4;
const
int
kAsanStackAfterReturnMagic
=
0xf5
;
const
int
kAsanStackAfterReturnMagic
=
0xf5
;
const
int
kAsanInitializationOrderMagic
=
0xf6
;
const
int
kAsanInitializationOrderMagic
=
0xf6
;
const
int
kAsanUserPoisonedMemoryMagic
=
0xf7
;
const
int
kAsanUserPoisonedMemoryMagic
=
0xf7
;
const
int
kAsanStackUseAfterScopeMagic
=
0xf8
;
const
int
kAsanGlobalRedzoneMagic
=
0xf9
;
const
int
kAsanGlobalRedzoneMagic
=
0xf9
;
const
int
kAsanInternalHeapMagic
=
0xfe
;
const
int
kAsanInternalHeapMagic
=
0xfe
;
...
...
libsanitizer/asan/asan_linux.cc
View file @
a0408454
...
@@ -156,7 +156,9 @@ void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp) {
...
@@ -156,7 +156,9 @@ void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp) {
stack
->
trace
[
0
]
=
pc
;
stack
->
trace
[
0
]
=
pc
;
if
((
max_s
)
>
1
)
{
if
((
max_s
)
>
1
)
{
stack
->
max_size
=
max_s
;
stack
->
max_size
=
max_s
;
#if defined(__arm__) || defined(__powerpc__) || defined(__powerpc64__)
#if defined(__arm__) || \
defined(__powerpc__) || defined(__powerpc64__) || \
defined(__sparc__)
_Unwind_Backtrace
(
Unwind_Trace
,
stack
);
_Unwind_Backtrace
(
Unwind_Trace
,
stack
);
// Pop off the two ASAN functions from the backtrace.
// Pop off the two ASAN functions from the backtrace.
stack
->
PopStackFrames
(
2
);
stack
->
PopStackFrames
(
2
);
...
...
libsanitizer/asan/asan_poisoning.cc
View file @
a0408454
...
@@ -149,3 +149,40 @@ void __asan_unpoison_memory_region(void const volatile *addr, uptr size) {
...
@@ -149,3 +149,40 @@ void __asan_unpoison_memory_region(void const volatile *addr, uptr size) {
bool
__asan_address_is_poisoned
(
void
const
volatile
*
addr
)
{
bool
__asan_address_is_poisoned
(
void
const
volatile
*
addr
)
{
return
__asan
::
AddressIsPoisoned
((
uptr
)
addr
);
return
__asan
::
AddressIsPoisoned
((
uptr
)
addr
);
}
}
// This is a simplified version of __asan_(un)poison_memory_region, which
// assumes that left border of region to be poisoned is properly aligned.
static
void
PoisonAlignedStackMemory
(
uptr
addr
,
uptr
size
,
bool
do_poison
)
{
if
(
size
==
0
)
return
;
uptr
aligned_size
=
size
&
~
(
SHADOW_GRANULARITY
-
1
);
PoisonShadow
(
addr
,
aligned_size
,
do_poison
?
kAsanStackUseAfterScopeMagic
:
0
);
if
(
size
==
aligned_size
)
return
;
s8
end_offset
=
(
s8
)(
size
-
aligned_size
);
s8
*
shadow_end
=
(
s8
*
)
MemToShadow
(
addr
+
aligned_size
);
s8
end_value
=
*
shadow_end
;
if
(
do_poison
)
{
// If possible, mark all the bytes mapping to last shadow byte as
// unaddressable.
if
(
end_value
>
0
&&
end_value
<=
end_offset
)
*
shadow_end
=
kAsanStackUseAfterScopeMagic
;
}
else
{
// If necessary, mark few first bytes mapping to last shadow byte
// as addressable
if
(
end_value
!=
0
)
*
shadow_end
=
Max
(
end_value
,
end_offset
);
}
}
void
__asan_poison_stack_memory
(
uptr
addr
,
uptr
size
)
{
if
(
flags
()
->
verbosity
>
0
)
Report
(
"poisoning: %p %zx
\n
"
,
(
void
*
)
addr
,
size
);
PoisonAlignedStackMemory
(
addr
,
size
,
true
);
}
void
__asan_unpoison_stack_memory
(
uptr
addr
,
uptr
size
)
{
if
(
flags
()
->
verbosity
>
0
)
Report
(
"unpoisoning: %p %zx
\n
"
,
(
void
*
)
addr
,
size
);
PoisonAlignedStackMemory
(
addr
,
size
,
false
);
}
libsanitizer/asan/asan_report.cc
View file @
a0408454
...
@@ -455,6 +455,9 @@ void __asan_report_error(uptr pc, uptr bp, uptr sp,
...
@@ -455,6 +455,9 @@ void __asan_report_error(uptr pc, uptr bp, uptr sp,
case
kAsanUserPoisonedMemoryMagic
:
case
kAsanUserPoisonedMemoryMagic
:
bug_descr
=
"use-after-poison"
;
bug_descr
=
"use-after-poison"
;
break
;
break
;
case
kAsanStackUseAfterScopeMagic
:
bug_descr
=
"stack-use-after-scope"
;
break
;
case
kAsanGlobalRedzoneMagic
:
case
kAsanGlobalRedzoneMagic
:
bug_descr
=
"global-buffer-overflow"
;
bug_descr
=
"global-buffer-overflow"
;
break
;
break
;
...
...
libsanitizer/asan/asan_rtl.cc
View file @
a0408454
...
@@ -246,6 +246,8 @@ static NOINLINE void force_interface_symbols() {
...
@@ -246,6 +246,8 @@ static NOINLINE void force_interface_symbols() {
case
34
:
__asan_malloc_hook
(
0
,
0
);
break
;
case
34
:
__asan_malloc_hook
(
0
,
0
);
break
;
case
35
:
__asan_free_hook
(
0
);
break
;
case
35
:
__asan_free_hook
(
0
);
break
;
case
36
:
__asan_symbolize
(
0
,
0
,
0
);
break
;
case
36
:
__asan_symbolize
(
0
,
0
,
0
);
break
;
case
37
:
__asan_poison_stack_memory
(
0
,
0
);
break
;
case
38
:
__asan_unpoison_stack_memory
(
0
,
0
);
break
;
}
}
}
}
...
...
libsanitizer/asan/dynamic/asan_interceptors_dynamic.cc
View file @
a0408454
//===-- asan_interceptors_dynamic.cc --------------------------------------===//
//===-- asan_interceptors_dynamic.cc --------------------------------------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
// License. See LICENSE.TXT for details.
//
//
...
...
libsanitizer/include/sanitizer/asan_interface.h
View file @
a0408454
...
@@ -64,6 +64,15 @@ extern "C" {
...
@@ -64,6 +64,15 @@ extern "C" {
void
__asan_stack_free
(
uptr
ptr
,
uptr
size
,
uptr
real_stack
)
void
__asan_stack_free
(
uptr
ptr
,
uptr
size
,
uptr
real_stack
)
SANITIZER_INTERFACE_ATTRIBUTE
;
SANITIZER_INTERFACE_ATTRIBUTE
;
// These two functions are used by instrumented code in the
// use-after-scope mode. They mark memory for local variables as
// unaddressable when they leave scope and addressable before the
// function exits.
void
__asan_poison_stack_memory
(
uptr
addr
,
uptr
size
)
SANITIZER_INTERFACE_ATTRIBUTE
;
void
__asan_unpoison_stack_memory
(
uptr
addr
,
uptr
size
)
SANITIZER_INTERFACE_ATTRIBUTE
;
// Marks memory region [addr, addr+size) as unaddressable.
// Marks memory region [addr, addr+size) as unaddressable.
// This memory must be previously allocated by the user program. Accessing
// This memory must be previously allocated by the user program. Accessing
// addresses in this region from instrumented code is forbidden until
// addresses in this region from instrumented code is forbidden until
...
...
libsanitizer/sanitizer_common/sanitizer_allocator.cc
View file @
a0408454
...
@@ -47,13 +47,6 @@ void InternalFree(void *addr) {
...
@@ -47,13 +47,6 @@ void InternalFree(void *addr) {
LIBC_FREE
(
addr
);
LIBC_FREE
(
addr
);
}
}
void
*
InternalAllocBlock
(
void
*
p
)
{
CHECK_NE
(
p
,
(
void
*
)
0
);
u64
*
pp
=
(
u64
*
)((
uptr
)
p
&
~
0x7
);
for
(;
pp
[
0
]
!=
kBlockMagic
;
pp
--
)
{}
return
pp
+
1
;
}
// LowLevelAllocator
// LowLevelAllocator
static
LowLevelAllocateCallback
low_level_alloc_callback
;
static
LowLevelAllocateCallback
low_level_alloc_callback
;
...
...
libsanitizer/sanitizer_common/sanitizer_allocator
64
.h
→
libsanitizer/sanitizer_common/sanitizer_allocator.h
View file @
a0408454
//===-- sanitizer_allocator
64.h
---------------------------------*- C++ -*-===//
//===-- sanitizer_allocator
.h --
---------------------------------*- C++ -*-===//
//
//
// This file is distributed under the University of Illinois Open Source
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
// License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
// Specialized allocator which works only in 64-bit address space.
// To be used by ThreadSanitizer, MemorySanitizer and possibly other tools.
// The main feature of this allocator is that the header is located far away
// from the user memory region, so that the tool does not use extra shadow
// for the header.
//
//
// Status: not yet ready.
// Specialized memory allocator for ThreadSanitizer, MemorySanitizer, etc.
//
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_ALLOCATOR_H
#ifndef SANITIZER_ALLOCATOR_H
#define SANITIZER_ALLOCATOR_H
#define SANITIZER_ALLOCATOR_H
#include "sanitizer_internal_defs.h"
#include "sanitizer_internal_defs.h"
#if SANITIZER_WORDSIZE != 64
# error "sanitizer_allocator64.h can only be used on 64-bit platforms"
#endif
#include "sanitizer_common.h"
#include "sanitizer_common.h"
#include "sanitizer_libc.h"
#include "sanitizer_libc.h"
#include "sanitizer_list.h"
#include "sanitizer_list.h"
...
@@ -28,7 +21,10 @@
...
@@ -28,7 +21,10 @@
namespace
__sanitizer
{
namespace
__sanitizer
{
// Maps size class id to size and back.
// Maps size class id to size and back.
class
DefaultSizeClassMap
{
template
<
uptr
l0
,
uptr
l1
,
uptr
l2
,
uptr
l3
,
uptr
l4
,
uptr
l5
,
uptr
s0
,
uptr
s1
,
uptr
s2
,
uptr
s3
,
uptr
s4
,
uptr
c0
,
uptr
c1
,
uptr
c2
,
uptr
c3
,
uptr
c4
>
class
SplineSizeClassMap
{
private
:
private
:
// Here we use a spline composed of 5 polynomials of oder 1.
// Here we use a spline composed of 5 polynomials of oder 1.
// The first size class is l0, then the classes go with step s0
// The first size class is l0, then the classes go with step s0
...
@@ -36,38 +32,20 @@ class DefaultSizeClassMap {
...
@@ -36,38 +32,20 @@ class DefaultSizeClassMap {
// Steps should be powers of two for cheap division.
// Steps should be powers of two for cheap division.
// The size of the last size class should be a power of two.
// The size of the last size class should be a power of two.
// There should be at most 256 size classes.
// There should be at most 256 size classes.
static
const
uptr
l0
=
1
<<
4
;
static
const
uptr
l1
=
1
<<
9
;
static
const
uptr
l2
=
1
<<
12
;
static
const
uptr
l3
=
1
<<
15
;
static
const
uptr
l4
=
1
<<
18
;
static
const
uptr
l5
=
1
<<
21
;
static
const
uptr
s0
=
1
<<
4
;
static
const
uptr
s1
=
1
<<
6
;
static
const
uptr
s2
=
1
<<
9
;
static
const
uptr
s3
=
1
<<
12
;
static
const
uptr
s4
=
1
<<
15
;
static
const
uptr
u0
=
0
+
(
l1
-
l0
)
/
s0
;
static
const
uptr
u0
=
0
+
(
l1
-
l0
)
/
s0
;
static
const
uptr
u1
=
u0
+
(
l2
-
l1
)
/
s1
;
static
const
uptr
u1
=
u0
+
(
l2
-
l1
)
/
s1
;
static
const
uptr
u2
=
u1
+
(
l3
-
l2
)
/
s2
;
static
const
uptr
u2
=
u1
+
(
l3
-
l2
)
/
s2
;
static
const
uptr
u3
=
u2
+
(
l4
-
l3
)
/
s3
;
static
const
uptr
u3
=
u2
+
(
l4
-
l3
)
/
s3
;
static
const
uptr
u4
=
u3
+
(
l5
-
l4
)
/
s4
;
static
const
uptr
u4
=
u3
+
(
l5
-
l4
)
/
s4
;
// Max cached in local cache blocks.
static
const
uptr
c0
=
256
;
static
const
uptr
c1
=
64
;
static
const
uptr
c2
=
16
;
static
const
uptr
c3
=
4
;
static
const
uptr
c4
=
1
;
public
:
public
:
// The number of size classes should be a power of two for fast division.
static
const
uptr
kNumClasses
=
u4
+
1
;
static
const
uptr
kNumClasses
=
u4
+
1
;
static
const
uptr
kMaxSize
=
l5
;
static
const
uptr
kMaxSize
=
l5
;
static
const
uptr
kMinSize
=
l0
;
static
const
uptr
kMinSize
=
l0
;
COMPILER_CHECK
(
kNumClasses
<=
256
);
COMPILER_CHECK
(
kNumClasses
<=
256
);
COMPILER_CHECK
((
kNumClasses
&
(
kNumClasses
-
1
))
==
0
);
COMPILER_CHECK
((
kMaxSize
&
(
kMaxSize
-
1
))
==
0
);
COMPILER_CHECK
((
kMaxSize
&
(
kMaxSize
-
1
))
==
0
);
static
uptr
Size
(
uptr
class_id
)
{
static
uptr
Size
(
uptr
class_id
)
{
...
@@ -97,13 +75,30 @@ class DefaultSizeClassMap {
...
@@ -97,13 +75,30 @@ class DefaultSizeClassMap {
}
}
};
};
class
DefaultSizeClassMap
:
public
SplineSizeClassMap
<
/* l: */
1
<<
4
,
1
<<
9
,
1
<<
12
,
1
<<
15
,
1
<<
18
,
1
<<
21
,
/* s: */
1
<<
4
,
1
<<
6
,
1
<<
9
,
1
<<
12
,
1
<<
15
,
/* c: */
256
,
64
,
16
,
4
,
1
>
{
private
:
COMPILER_CHECK
(
kNumClasses
==
256
);
};
class
CompactSizeClassMap
:
public
SplineSizeClassMap
<
/* l: */
1
<<
3
,
1
<<
4
,
1
<<
7
,
1
<<
8
,
1
<<
12
,
1
<<
15
,
/* s: */
1
<<
3
,
1
<<
4
,
1
<<
7
,
1
<<
8
,
1
<<
12
,
/* c: */
256
,
64
,
16
,
4
,
1
>
{
private
:
COMPILER_CHECK
(
kNumClasses
<=
32
);
};
struct
AllocatorListNode
{
struct
AllocatorListNode
{
AllocatorListNode
*
next
;
AllocatorListNode
*
next
;
};
};
typedef
IntrusiveList
<
AllocatorListNode
>
AllocatorFreeList
;
typedef
IntrusiveList
<
AllocatorListNode
>
AllocatorFreeList
;
// SizeClassAllocator64 -- allocator for 64-bit address space.
//
// Space: a portion of address space of kSpaceSize bytes starting at
// Space: a portion of address space of kSpaceSize bytes starting at
// a fixed address (kSpaceBeg). Both constants are powers of two and
// a fixed address (kSpaceBeg). Both constants are powers of two and
// kSpaceBeg is kSpaceSize-aligned.
// kSpaceBeg is kSpaceSize-aligned.
...
@@ -217,14 +212,15 @@ class SizeClassAllocator64 {
...
@@ -217,14 +212,15 @@ class SizeClassAllocator64 {
static
uptr
AllocBeg
()
{
return
kSpaceBeg
;
}
static
uptr
AllocBeg
()
{
return
kSpaceBeg
;
}
static
uptr
AllocSize
()
{
return
kSpaceSize
+
AdditionalSize
();
}
static
uptr
AllocSize
()
{
return
kSpaceSize
+
AdditionalSize
();
}
static
const
uptr
kNumClasses
=
256
;
// Power of two <= 256
typedef
SizeClassMap
SizeClassMapT
;
typedef
SizeClassMap
SizeClassMapT
;
static
const
uptr
kNumClasses
=
SizeClassMap
::
kNumClasses
;
// 2^k <= 256
private
:
private
:
static
const
uptr
kRegionSize
=
kSpaceSize
/
kNumClasses
;
COMPILER_CHECK
(
kSpaceBeg
%
kSpaceSize
==
0
);
COMPILER_CHECK
(
kSpaceBeg
%
kSpaceSize
==
0
);
COMPILER_CHECK
(
kNumClasses
<=
SizeClassMap
::
kNumClasses
);
COMPILER_CHECK
(
kNumClasses
<=
SizeClassMap
::
kNumClasses
);
static
const
uptr
kRegionSize
=
kSpaceSize
/
kNumClasses
;
// kRegionSize must be >= 2^32.
COMPILER_CHECK
((
kRegionSize
>>
32
)
>
0
);
// kRegionSize must be >= 2^32.
COMPILER_CHECK
((
kRegionSize
)
>=
(
1ULL
<<
(
SANITIZER_WORDSIZE
/
2
)));
// Populate the free list with at most this number of bytes at once
// Populate the free list with at most this number of bytes at once
// or with one element if its size is greater.
// or with one element if its size is greater.
static
const
uptr
kPopulateSize
=
1
<<
18
;
static
const
uptr
kPopulateSize
=
1
<<
18
;
...
@@ -239,8 +235,9 @@ class SizeClassAllocator64 {
...
@@ -239,8 +235,9 @@ class SizeClassAllocator64 {
COMPILER_CHECK
(
sizeof
(
RegionInfo
)
==
kCacheLineSize
);
COMPILER_CHECK
(
sizeof
(
RegionInfo
)
==
kCacheLineSize
);
static
uptr
AdditionalSize
()
{
static
uptr
AdditionalSize
()
{
uptr
res
=
sizeof
(
RegionInfo
)
*
kNumClasses
;
uptr
PageSize
=
GetPageSizeCached
();
CHECK_EQ
(
res
%
GetPageSizeCached
(),
0
);
uptr
res
=
Max
(
sizeof
(
RegionInfo
)
*
kNumClasses
,
PageSize
);
CHECK_EQ
(
res
%
PageSize
,
0
);
return
res
;
return
res
;
}
}
...
@@ -305,8 +302,10 @@ class SizeClassAllocator64 {
...
@@ -305,8 +302,10 @@ class SizeClassAllocator64 {
// Objects of this type should be used as local caches for SizeClassAllocator64.
// Objects of this type should be used as local caches for SizeClassAllocator64.
// Since the typical use of this class is to have one object per thread in TLS,
// Since the typical use of this class is to have one object per thread in TLS,
// is has to be POD.
// is has to be POD.
template
<
c
onst
uptr
kNumClasses
,
c
lass
SizeClassAllocator
>
template
<
class
SizeClassAllocator
>
struct
SizeClassAllocatorLocalCache
{
struct
SizeClassAllocatorLocalCache
{
typedef
SizeClassAllocator
Allocator
;
static
const
uptr
kNumClasses
=
SizeClassAllocator
::
kNumClasses
;
// Don't need to call Init if the object is a global (i.e. zero-initialized).
// Don't need to call Init if the object is a global (i.e. zero-initialized).
void
Init
()
{
void
Init
()
{
internal_memset
(
this
,
0
,
sizeof
(
*
this
));
internal_memset
(
this
,
0
,
sizeof
(
*
this
));
...
@@ -458,11 +457,13 @@ class LargeMmapAllocator {
...
@@ -458,11 +457,13 @@ class LargeMmapAllocator {
};
};
Header
*
GetHeader
(
uptr
p
)
{
Header
*
GetHeader
(
uptr
p
)
{
CHECK_EQ
(
p
%
page_size_
,
0
);
return
reinterpret_cast
<
Header
*>
(
p
-
page_size_
);
return
reinterpret_cast
<
Header
*>
(
p
-
page_size_
);
}
}
Header
*
GetHeader
(
void
*
p
)
{
return
GetHeader
(
reinterpret_cast
<
uptr
>
(
p
));
}
Header
*
GetHeader
(
void
*
p
)
{
return
GetHeader
(
reinterpret_cast
<
uptr
>
(
p
));
}
void
*
GetUser
(
Header
*
h
)
{
void
*
GetUser
(
Header
*
h
)
{
CHECK_EQ
((
uptr
)
h
%
page_size_
,
0
);
return
reinterpret_cast
<
void
*>
(
reinterpret_cast
<
uptr
>
(
h
)
+
page_size_
);
return
reinterpret_cast
<
void
*>
(
reinterpret_cast
<
uptr
>
(
h
)
+
page_size_
);
}
}
...
...
libsanitizer/sanitizer_common/sanitizer_common.h
View file @
a0408454
...
@@ -49,9 +49,6 @@ bool MemoryRangeIsAvailable(uptr range_start, uptr range_end);
...
@@ -49,9 +49,6 @@ bool MemoryRangeIsAvailable(uptr range_start, uptr range_end);
// Internal allocator
// Internal allocator
void
*
InternalAlloc
(
uptr
size
);
void
*
InternalAlloc
(
uptr
size
);
void
InternalFree
(
void
*
p
);
void
InternalFree
(
void
*
p
);
// Given the pointer p into a valid allocated block,
// returns a pointer to the beginning of the block.
void
*
InternalAllocBlock
(
void
*
p
);
// InternalScopedBuffer can be used instead of large stack arrays to
// InternalScopedBuffer can be used instead of large stack arrays to
// keep frame size low.
// keep frame size low.
...
...
libsanitizer/sanitizer_common/sanitizer_linux.cc
View file @
a0408454
...
@@ -14,6 +14,7 @@
...
@@ -14,6 +14,7 @@
#include "sanitizer_common.h"
#include "sanitizer_common.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_libc.h"
#include "sanitizer_libc.h"
#include "sanitizer_mutex.h"
#include "sanitizer_placement_new.h"
#include "sanitizer_placement_new.h"
#include "sanitizer_procmaps.h"
#include "sanitizer_procmaps.h"
...
@@ -215,21 +216,60 @@ void ReExec() {
...
@@ -215,21 +216,60 @@ void ReExec() {
}
}
// ----------------- sanitizer_procmaps.h
// ----------------- sanitizer_procmaps.h
// Linker initialized.
ProcSelfMapsBuff
MemoryMappingLayout
::
cached_proc_self_maps_
;
StaticSpinMutex
MemoryMappingLayout
::
cache_lock_
;
// Linker initialized.
MemoryMappingLayout
::
MemoryMappingLayout
()
{
MemoryMappingLayout
::
MemoryMappingLayout
()
{
proc_self_maps_buff_len_
=
proc_self_maps_
.
len
=
ReadFileToBuffer
(
"/proc/self/maps"
,
&
proc_self_maps_buff_
,
ReadFileToBuffer
(
"/proc/self/maps"
,
&
proc_self_maps_
.
data
,
&
proc_self_maps_buff_mmaped_size_
,
1
<<
26
);
&
proc_self_maps_
.
mmaped_size
,
1
<<
26
);
CHECK_GT
(
proc_self_maps_buff_len_
,
0
);
if
(
proc_self_maps_
.
mmaped_size
==
0
)
{
// internal_write(2, proc_self_maps_buff_, proc_self_maps_buff_len_);
LoadFromCache
();
CHECK_GT
(
proc_self_maps_
.
len
,
0
);
}
// internal_write(2, proc_self_maps_.data, proc_self_maps_.len);
Reset
();
Reset
();
// FIXME: in the future we may want to cache the mappings on demand only.
CacheMemoryMappings
();
}
}
MemoryMappingLayout
::~
MemoryMappingLayout
()
{
MemoryMappingLayout
::~
MemoryMappingLayout
()
{
UnmapOrDie
(
proc_self_maps_buff_
,
proc_self_maps_buff_mmaped_size_
);
// Only unmap the buffer if it is different from the cached one. Otherwise
// it will be unmapped when the cache is refreshed.
if
(
proc_self_maps_
.
data
!=
cached_proc_self_maps_
.
data
)
{
UnmapOrDie
(
proc_self_maps_
.
data
,
proc_self_maps_
.
mmaped_size
);
}
}
}
void
MemoryMappingLayout
::
Reset
()
{
void
MemoryMappingLayout
::
Reset
()
{
current_
=
proc_self_maps_buff_
;
current_
=
proc_self_maps_
.
data
;
}
// static
void
MemoryMappingLayout
::
CacheMemoryMappings
()
{
SpinMutexLock
l
(
&
cache_lock_
);
// Don't invalidate the cache if the mappings are unavailable.
ProcSelfMapsBuff
old_proc_self_maps
;
old_proc_self_maps
=
cached_proc_self_maps_
;
cached_proc_self_maps_
.
len
=
ReadFileToBuffer
(
"/proc/self/maps"
,
&
cached_proc_self_maps_
.
data
,
&
cached_proc_self_maps_
.
mmaped_size
,
1
<<
26
);
if
(
cached_proc_self_maps_
.
mmaped_size
==
0
)
{
cached_proc_self_maps_
=
old_proc_self_maps
;
}
else
{
if
(
old_proc_self_maps
.
mmaped_size
)
{
UnmapOrDie
(
old_proc_self_maps
.
data
,
old_proc_self_maps
.
mmaped_size
);
}
}
}
void
MemoryMappingLayout
::
LoadFromCache
()
{
SpinMutexLock
l
(
&
cache_lock_
);
if
(
cached_proc_self_maps_
.
data
)
{
proc_self_maps_
=
cached_proc_self_maps_
;
}
}
}
// Parse a hex value in str and update str.
// Parse a hex value in str and update str.
...
@@ -263,7 +303,7 @@ static bool IsDecimal(char c) {
...
@@ -263,7 +303,7 @@ static bool IsDecimal(char c) {
bool
MemoryMappingLayout
::
Next
(
uptr
*
start
,
uptr
*
end
,
uptr
*
offset
,
bool
MemoryMappingLayout
::
Next
(
uptr
*
start
,
uptr
*
end
,
uptr
*
offset
,
char
filename
[],
uptr
filename_size
)
{
char
filename
[],
uptr
filename_size
)
{
char
*
last
=
proc_self_maps_
buff_
+
proc_self_maps_buff_len_
;
char
*
last
=
proc_self_maps_
.
data
+
proc_self_maps_
.
len
;
if
(
current_
>=
last
)
return
false
;
if
(
current_
>=
last
)
return
false
;
uptr
dummy
;
uptr
dummy
;
if
(
!
start
)
start
=
&
dummy
;
if
(
!
start
)
start
=
&
dummy
;
...
...
libsanitizer/sanitizer_common/sanitizer_mac.cc
View file @
a0408454
...
@@ -160,6 +160,15 @@ void MemoryMappingLayout::Reset() {
...
@@ -160,6 +160,15 @@ void MemoryMappingLayout::Reset() {
current_filetype_
=
0
;
current_filetype_
=
0
;
}
}
// static
void
MemoryMappingLayout
::
CacheMemoryMappings
()
{
// No-op on Mac for now.
}
void
MemoryMappingLayout
::
LoadFromCache
()
{
// No-op on Mac for now.
}
// Next and NextSegmentLoad were inspired by base/sysinfo.cc in
// Next and NextSegmentLoad were inspired by base/sysinfo.cc in
// Google Perftools, http://code.google.com/p/google-perftools.
// Google Perftools, http://code.google.com/p/google-perftools.
...
...
libsanitizer/sanitizer_common/sanitizer_posix.cc
View file @
a0408454
...
@@ -167,7 +167,10 @@ void SetStackSizeLimitInBytes(uptr limit) {
...
@@ -167,7 +167,10 @@ void SetStackSizeLimitInBytes(uptr limit) {
struct
rlimit
rlim
;
struct
rlimit
rlim
;
rlim
.
rlim_cur
=
limit
;
rlim
.
rlim_cur
=
limit
;
rlim
.
rlim_max
=
limit
;
rlim
.
rlim_max
=
limit
;
CHECK_EQ
(
0
,
setrlimit
(
RLIMIT_STACK
,
&
rlim
));
if
(
setrlimit
(
RLIMIT_STACK
,
&
rlim
))
{
Report
(
"setrlimit() failed %d
\n
"
,
errno
);
Die
();
}
CHECK
(
!
StackSizeIsUnlimited
());
CHECK
(
!
StackSizeIsUnlimited
());
}
}
...
...
libsanitizer/sanitizer_common/sanitizer_procmaps.h
View file @
a0408454
...
@@ -13,6 +13,7 @@
...
@@ -13,6 +13,7 @@
#define SANITIZER_PROCMAPS_H
#define SANITIZER_PROCMAPS_H
#include "sanitizer_internal_defs.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_mutex.h"
namespace
__sanitizer
{
namespace
__sanitizer
{
...
@@ -27,6 +28,14 @@ class MemoryMappingLayout {
...
@@ -27,6 +28,14 @@ class MemoryMappingLayout {
};
};
#else // _WIN32
#else // _WIN32
#if defined(__linux__)
struct
ProcSelfMapsBuff
{
char
*
data
;
uptr
mmaped_size
;
uptr
len
;
};
#endif // defined(__linux__)
class
MemoryMappingLayout
{
class
MemoryMappingLayout
{
public
:
public
:
MemoryMappingLayout
();
MemoryMappingLayout
();
...
@@ -37,9 +46,14 @@ class MemoryMappingLayout {
...
@@ -37,9 +46,14 @@ class MemoryMappingLayout {
// address 'addr'. Returns true on success.
// address 'addr'. Returns true on success.
bool
GetObjectNameAndOffset
(
uptr
addr
,
uptr
*
offset
,
bool
GetObjectNameAndOffset
(
uptr
addr
,
uptr
*
offset
,
char
filename
[],
uptr
filename_size
);
char
filename
[],
uptr
filename_size
);
// In some cases, e.g. when running under a sandbox on Linux, ASan is unable
// to obtain the memory mappings. It should fall back to pre-cached data
// instead of aborting.
static
void
CacheMemoryMappings
();
~
MemoryMappingLayout
();
~
MemoryMappingLayout
();
private
:
private
:
void
LoadFromCache
();
// Default implementation of GetObjectNameAndOffset.
// Default implementation of GetObjectNameAndOffset.
// Quite slow, because it iterates through the whole process map for each
// Quite slow, because it iterates through the whole process map for each
// lookup.
// lookup.
...
@@ -71,10 +85,12 @@ class MemoryMappingLayout {
...
@@ -71,10 +85,12 @@ class MemoryMappingLayout {
}
}
# if defined __linux__
# if defined __linux__
char
*
proc_self_maps_buff_
;
ProcSelfMapsBuff
proc_self_maps_
;
uptr
proc_self_maps_buff_mmaped_size_
;
uptr
proc_self_maps_buff_len_
;
char
*
current_
;
char
*
current_
;
// Static mappings cache.
static
ProcSelfMapsBuff
cached_proc_self_maps_
;
static
StaticSpinMutex
cache_lock_
;
// protects cached_proc_self_maps_.
# elif defined __APPLE__
# elif defined __APPLE__
template
<
u32
kLCSegment
,
typename
SegmentCommand
>
template
<
u32
kLCSegment
,
typename
SegmentCommand
>
bool
NextSegmentLoad
(
uptr
*
start
,
uptr
*
end
,
uptr
*
offset
,
bool
NextSegmentLoad
(
uptr
*
start
,
uptr
*
end
,
uptr
*
offset
,
...
...
libsanitizer/sanitizer_common/sanitizer_stacktrace.cc
View file @
a0408454
...
@@ -34,6 +34,8 @@ static uptr patch_pc(uptr pc) {
...
@@ -34,6 +34,8 @@ static uptr patch_pc(uptr pc) {
#if defined(__powerpc__) || defined(__powerpc64__)
#if defined(__powerpc__) || defined(__powerpc64__)
// PCs are always 4 byte aligned.
// PCs are always 4 byte aligned.
return
pc
-
4
;
return
pc
-
4
;
#elif defined(__sparc__)
return
pc
-
8
;
#else
#else
return
pc
-
1
;
return
pc
-
1
;
#endif
#endif
...
...
libsanitizer/sanitizer_common/sanitizer_symbolizer.cc
View file @
a0408454
...
@@ -254,6 +254,17 @@ class Symbolizer {
...
@@ -254,6 +254,17 @@ class Symbolizer {
// Otherwise, the data was filled by external symbolizer.
// Otherwise, the data was filled by external symbolizer.
return
actual_frames
;
return
actual_frames
;
}
}
bool
SymbolizeData
(
uptr
addr
,
AddressInfo
*
frame
)
{
LoadedModule
*
module
=
FindModuleForAddress
(
addr
);
if
(
module
==
0
)
return
false
;
const
char
*
module_name
=
module
->
full_name
();
uptr
module_offset
=
addr
-
module
->
base_address
();
frame
->
FillAddressAndModuleInfo
(
addr
,
module_name
,
module_offset
);
return
true
;
}
bool
InitializeExternalSymbolizer
(
const
char
*
path_to_symbolizer
)
{
bool
InitializeExternalSymbolizer
(
const
char
*
path_to_symbolizer
)
{
int
input_fd
,
output_fd
;
int
input_fd
,
output_fd
;
if
(
!
StartSymbolizerSubprocess
(
path_to_symbolizer
,
&
input_fd
,
&
output_fd
))
if
(
!
StartSymbolizerSubprocess
(
path_to_symbolizer
,
&
input_fd
,
&
output_fd
))
...
@@ -305,6 +316,10 @@ uptr SymbolizeCode(uptr address, AddressInfo *frames, uptr max_frames) {
...
@@ -305,6 +316,10 @@ uptr SymbolizeCode(uptr address, AddressInfo *frames, uptr max_frames) {
return
symbolizer
.
SymbolizeCode
(
address
,
frames
,
max_frames
);
return
symbolizer
.
SymbolizeCode
(
address
,
frames
,
max_frames
);
}
}
bool
SymbolizeData
(
uptr
address
,
AddressInfo
*
frame
)
{
return
symbolizer
.
SymbolizeData
(
address
,
frame
);
}
bool
InitializeExternalSymbolizer
(
const
char
*
path_to_symbolizer
)
{
bool
InitializeExternalSymbolizer
(
const
char
*
path_to_symbolizer
)
{
return
symbolizer
.
InitializeExternalSymbolizer
(
path_to_symbolizer
);
return
symbolizer
.
InitializeExternalSymbolizer
(
path_to_symbolizer
);
}
}
...
...
libsanitizer/sanitizer_common/sanitizer_symbolizer.h
View file @
a0408454
...
@@ -56,6 +56,7 @@ struct AddressInfo {
...
@@ -56,6 +56,7 @@ struct AddressInfo {
// of descriptions actually filled.
// of descriptions actually filled.
// This function should NOT be called from two threads simultaneously.
// This function should NOT be called from two threads simultaneously.
uptr
SymbolizeCode
(
uptr
address
,
AddressInfo
*
frames
,
uptr
max_frames
);
uptr
SymbolizeCode
(
uptr
address
,
AddressInfo
*
frames
,
uptr
max_frames
);
bool
SymbolizeData
(
uptr
address
,
AddressInfo
*
frame
);
// Starts external symbolizer program in a subprocess. Sanitizer communicates
// Starts external symbolizer program in a subprocess. Sanitizer communicates
// with external symbolizer via pipes.
// with external symbolizer via pipes.
...
...
libsanitizer/tsan/tsan_defs.h
View file @
a0408454
...
@@ -23,8 +23,12 @@
...
@@ -23,8 +23,12 @@
namespace
__tsan
{
namespace
__tsan
{
#ifdef TSAN_GO
#ifdef TSAN_GO
const
bool
kGoMode
=
true
;
const
bool
kCppMode
=
false
;
const
char
*
const
kTsanOptionsEnv
=
"GORACE"
;
const
char
*
const
kTsanOptionsEnv
=
"GORACE"
;
#else
#else
const
bool
kGoMode
=
false
;
const
bool
kCppMode
=
true
;
const
char
*
const
kTsanOptionsEnv
=
"TSAN_OPTIONS"
;
const
char
*
const
kTsanOptionsEnv
=
"TSAN_OPTIONS"
;
#endif
#endif
...
@@ -122,11 +126,17 @@ T max(T a, T b) {
...
@@ -122,11 +126,17 @@ T max(T a, T b) {
}
}
template
<
typename
T
>
template
<
typename
T
>
T
RoundUp
(
T
p
,
int
align
)
{
T
RoundUp
(
T
p
,
u64
align
)
{
DCHECK_EQ
(
align
&
(
align
-
1
),
0
);
DCHECK_EQ
(
align
&
(
align
-
1
),
0
);
return
(
T
)(((
u64
)
p
+
align
-
1
)
&
~
(
align
-
1
));
return
(
T
)(((
u64
)
p
+
align
-
1
)
&
~
(
align
-
1
));
}
}
template
<
typename
T
>
T
RoundDown
(
T
p
,
u64
align
)
{
DCHECK_EQ
(
align
&
(
align
-
1
),
0
);
return
(
T
)((
u64
)
p
&
~
(
align
-
1
));
}
struct
MD5Hash
{
struct
MD5Hash
{
u64
hash
[
2
];
u64
hash
[
2
];
bool
operator
==
(
const
MD5Hash
&
other
)
const
;
bool
operator
==
(
const
MD5Hash
&
other
)
const
;
...
...
libsanitizer/tsan/tsan_flags.cc
View file @
a0408454
...
@@ -38,6 +38,7 @@ void InitializeFlags(Flags *f, const char *env) {
...
@@ -38,6 +38,7 @@ void InitializeFlags(Flags *f, const char *env) {
f
->
enable_annotations
=
true
;
f
->
enable_annotations
=
true
;
f
->
suppress_equal_stacks
=
true
;
f
->
suppress_equal_stacks
=
true
;
f
->
suppress_equal_addresses
=
true
;
f
->
suppress_equal_addresses
=
true
;
f
->
suppress_java
=
false
;
f
->
report_bugs
=
true
;
f
->
report_bugs
=
true
;
f
->
report_thread_leaks
=
true
;
f
->
report_thread_leaks
=
true
;
f
->
report_destroy_locked
=
true
;
f
->
report_destroy_locked
=
true
;
...
@@ -46,7 +47,7 @@ void InitializeFlags(Flags *f, const char *env) {
...
@@ -46,7 +47,7 @@ void InitializeFlags(Flags *f, const char *env) {
f
->
strip_path_prefix
=
""
;
f
->
strip_path_prefix
=
""
;
f
->
suppressions
=
""
;
f
->
suppressions
=
""
;
f
->
exitcode
=
66
;
f
->
exitcode
=
66
;
f
->
log_
fileno
=
kStderrFd
;
f
->
log_
path
=
"stderr"
;
f
->
atexit_sleep_ms
=
1000
;
f
->
atexit_sleep_ms
=
1000
;
f
->
verbosity
=
0
;
f
->
verbosity
=
0
;
f
->
profile_memory
=
""
;
f
->
profile_memory
=
""
;
...
@@ -54,6 +55,7 @@ void InitializeFlags(Flags *f, const char *env) {
...
@@ -54,6 +55,7 @@ void InitializeFlags(Flags *f, const char *env) {
f
->
stop_on_start
=
false
;
f
->
stop_on_start
=
false
;
f
->
running_on_valgrind
=
false
;
f
->
running_on_valgrind
=
false
;
f
->
external_symbolizer_path
=
""
;
f
->
external_symbolizer_path
=
""
;
f
->
history_size
=
kGoMode
?
1
:
2
;
// There are a lot of goroutines in Go.
// Let a frontend override.
// Let a frontend override.
OverrideFlags
(
f
);
OverrideFlags
(
f
);
...
@@ -62,6 +64,7 @@ void InitializeFlags(Flags *f, const char *env) {
...
@@ -62,6 +64,7 @@ void InitializeFlags(Flags *f, const char *env) {
ParseFlag
(
env
,
&
f
->
enable_annotations
,
"enable_annotations"
);
ParseFlag
(
env
,
&
f
->
enable_annotations
,
"enable_annotations"
);
ParseFlag
(
env
,
&
f
->
suppress_equal_stacks
,
"suppress_equal_stacks"
);
ParseFlag
(
env
,
&
f
->
suppress_equal_stacks
,
"suppress_equal_stacks"
);
ParseFlag
(
env
,
&
f
->
suppress_equal_addresses
,
"suppress_equal_addresses"
);
ParseFlag
(
env
,
&
f
->
suppress_equal_addresses
,
"suppress_equal_addresses"
);
ParseFlag
(
env
,
&
f
->
suppress_java
,
"suppress_java"
);
ParseFlag
(
env
,
&
f
->
report_bugs
,
"report_bugs"
);
ParseFlag
(
env
,
&
f
->
report_bugs
,
"report_bugs"
);
ParseFlag
(
env
,
&
f
->
report_thread_leaks
,
"report_thread_leaks"
);
ParseFlag
(
env
,
&
f
->
report_thread_leaks
,
"report_thread_leaks"
);
ParseFlag
(
env
,
&
f
->
report_destroy_locked
,
"report_destroy_locked"
);
ParseFlag
(
env
,
&
f
->
report_destroy_locked
,
"report_destroy_locked"
);
...
@@ -70,19 +73,26 @@ void InitializeFlags(Flags *f, const char *env) {
...
@@ -70,19 +73,26 @@ void InitializeFlags(Flags *f, const char *env) {
ParseFlag
(
env
,
&
f
->
strip_path_prefix
,
"strip_path_prefix"
);
ParseFlag
(
env
,
&
f
->
strip_path_prefix
,
"strip_path_prefix"
);
ParseFlag
(
env
,
&
f
->
suppressions
,
"suppressions"
);
ParseFlag
(
env
,
&
f
->
suppressions
,
"suppressions"
);
ParseFlag
(
env
,
&
f
->
exitcode
,
"exitcode"
);
ParseFlag
(
env
,
&
f
->
exitcode
,
"exitcode"
);
ParseFlag
(
env
,
&
f
->
log_
fileno
,
"log_fileno
"
);
ParseFlag
(
env
,
&
f
->
log_
path
,
"log_path
"
);
ParseFlag
(
env
,
&
f
->
atexit_sleep_ms
,
"atexit_sleep_ms"
);
ParseFlag
(
env
,
&
f
->
atexit_sleep_ms
,
"atexit_sleep_ms"
);
ParseFlag
(
env
,
&
f
->
verbosity
,
"verbosity"
);
ParseFlag
(
env
,
&
f
->
verbosity
,
"verbosity"
);
ParseFlag
(
env
,
&
f
->
profile_memory
,
"profile_memory"
);
ParseFlag
(
env
,
&
f
->
profile_memory
,
"profile_memory"
);
ParseFlag
(
env
,
&
f
->
flush_memory_ms
,
"flush_memory_ms"
);
ParseFlag
(
env
,
&
f
->
flush_memory_ms
,
"flush_memory_ms"
);
ParseFlag
(
env
,
&
f
->
stop_on_start
,
"stop_on_start"
);
ParseFlag
(
env
,
&
f
->
stop_on_start
,
"stop_on_start"
);
ParseFlag
(
env
,
&
f
->
external_symbolizer_path
,
"external_symbolizer_path"
);
ParseFlag
(
env
,
&
f
->
external_symbolizer_path
,
"external_symbolizer_path"
);
ParseFlag
(
env
,
&
f
->
history_size
,
"history_size"
);
if
(
!
f
->
report_bugs
)
{
if
(
!
f
->
report_bugs
)
{
f
->
report_thread_leaks
=
false
;
f
->
report_thread_leaks
=
false
;
f
->
report_destroy_locked
=
false
;
f
->
report_destroy_locked
=
false
;
f
->
report_signal_unsafe
=
false
;
f
->
report_signal_unsafe
=
false
;
}
}
if
(
f
->
history_size
<
0
||
f
->
history_size
>
7
)
{
Printf
(
"ThreadSanitizer: incorrect value for history_size"
" (must be [0..7])
\n
"
);
Die
();
}
}
}
}
// namespace __tsan
}
// namespace __tsan
libsanitizer/tsan/tsan_flags.h
View file @
a0408454
...
@@ -29,6 +29,9 @@ struct Flags {
...
@@ -29,6 +29,9 @@ struct Flags {
// Supress a race report if we've already output another race report
// Supress a race report if we've already output another race report
// on the same address.
// on the same address.
bool
suppress_equal_addresses
;
bool
suppress_equal_addresses
;
// Suppress weird race reports that can be seen if JVM is embed
// into the process.
bool
suppress_java
;
// Turns off bug reporting entirely (useful for benchmarking).
// Turns off bug reporting entirely (useful for benchmarking).
bool
report_bugs
;
bool
report_bugs
;
// Report thread leaks at exit?
// Report thread leaks at exit?
...
@@ -47,8 +50,10 @@ struct Flags {
...
@@ -47,8 +50,10 @@ struct Flags {
const
char
*
suppressions
;
const
char
*
suppressions
;
// Override exit status if something was reported.
// Override exit status if something was reported.
int
exitcode
;
int
exitcode
;
// Log fileno (1 - stdout, 2 - stderr).
// Write logs to "log_path.pid".
int
log_fileno
;
// The special values are "stdout" and "stderr".
// The default is "stderr".
const
char
*
log_path
;
// Sleep in main thread before exiting for that many ms
// Sleep in main thread before exiting for that many ms
// (useful to catch "at exit" races).
// (useful to catch "at exit" races).
int
atexit_sleep_ms
;
int
atexit_sleep_ms
;
...
@@ -64,6 +69,12 @@ struct Flags {
...
@@ -64,6 +69,12 @@ struct Flags {
bool
running_on_valgrind
;
bool
running_on_valgrind
;
// Path to external symbolizer.
// Path to external symbolizer.
const
char
*
external_symbolizer_path
;
const
char
*
external_symbolizer_path
;
// Per-thread history size, controls how many previous memory accesses
// are remembered per thread. Possible values are [0..7].
// history_size=0 amounts to 32K memory accesses. Each next value doubles
// the amount of memory accesses, up to history_size=7 that amounts to
// 4M memory accesses. The default value is 2 (128K memory accesses).
int
history_size
;
};
};
Flags
*
flags
();
Flags
*
flags
();
...
...
libsanitizer/tsan/tsan_interceptors.cc
View file @
a0408454
...
@@ -13,7 +13,7 @@
...
@@ -13,7 +13,7 @@
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
#include "
tsan_interceptors
.h"
#include "
interception/interception
.h"
#include "tsan_interface.h"
#include "tsan_interface.h"
#include "tsan_platform.h"
#include "tsan_platform.h"
#include "tsan_rtl.h"
#include "tsan_rtl.h"
...
@@ -135,6 +135,15 @@ static SignalContext *SigCtx(ThreadState *thr) {
...
@@ -135,6 +135,15 @@ static SignalContext *SigCtx(ThreadState *thr) {
static
unsigned
g_thread_finalize_key
;
static
unsigned
g_thread_finalize_key
;
class
ScopedInterceptor
{
public
:
ScopedInterceptor
(
ThreadState
*
thr
,
const
char
*
fname
,
uptr
pc
);
~
ScopedInterceptor
();
private
:
ThreadState
*
const
thr_
;
const
int
in_rtl_
;
};
ScopedInterceptor
::
ScopedInterceptor
(
ThreadState
*
thr
,
const
char
*
fname
,
ScopedInterceptor
::
ScopedInterceptor
(
ThreadState
*
thr
,
const
char
*
fname
,
uptr
pc
)
uptr
pc
)
:
thr_
(
thr
)
:
thr_
(
thr
)
...
@@ -158,6 +167,30 @@ ScopedInterceptor::~ScopedInterceptor() {
...
@@ -158,6 +167,30 @@ ScopedInterceptor::~ScopedInterceptor() {
CHECK_EQ
(
in_rtl_
,
thr_
->
in_rtl
);
CHECK_EQ
(
in_rtl_
,
thr_
->
in_rtl
);
}
}
#define SCOPED_INTERCEPTOR_RAW(func, ...) \
ThreadState *thr = cur_thread(); \
StatInc(thr, StatInterceptor); \
StatInc(thr, StatInt_##func); \
const uptr caller_pc = GET_CALLER_PC(); \
ScopedInterceptor si(thr, #func, caller_pc); \
/* Subtract one from pc as we need current instruction address */
\
const uptr pc = __sanitizer::StackTrace::GetCurrentPc() - 1; \
(void)pc; \
/**/
#define SCOPED_TSAN_INTERCEPTOR(func, ...) \
SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__); \
if (REAL(func) == 0) { \
Printf("FATAL: ThreadSanitizer: failed to intercept %s\n", #func); \
Die(); \
} \
if (thr->in_rtl > 1) \
return REAL(func)(__VA_ARGS__); \
/**/
#define TSAN_INTERCEPTOR(ret, func, ...) INTERCEPTOR(ret, func, __VA_ARGS__)
#define TSAN_INTERCEPT(func) INTERCEPT_FUNCTION(func)
#define BLOCK_REAL(name) (BlockingCall(thr), REAL(name))
#define BLOCK_REAL(name) (BlockingCall(thr), REAL(name))
struct
BlockingCall
{
struct
BlockingCall
{
...
@@ -259,7 +292,6 @@ static void finalize(void *arg) {
...
@@ -259,7 +292,6 @@ static void finalize(void *arg) {
TSAN_INTERCEPTOR
(
int
,
atexit
,
void
(
*
f
)())
{
TSAN_INTERCEPTOR
(
int
,
atexit
,
void
(
*
f
)())
{
SCOPED_TSAN_INTERCEPTOR
(
atexit
,
f
);
SCOPED_TSAN_INTERCEPTOR
(
atexit
,
f
);
return
atexit_ctx
->
atexit
(
thr
,
pc
,
f
);
return
atexit_ctx
->
atexit
(
thr
,
pc
,
f
);
return
0
;
}
}
TSAN_INTERCEPTOR
(
void
,
longjmp
,
void
*
env
,
int
val
)
{
TSAN_INTERCEPTOR
(
void
,
longjmp
,
void
*
env
,
int
val
)
{
...
@@ -308,6 +340,11 @@ TSAN_INTERCEPTOR(void*, malloc, uptr size) {
...
@@ -308,6 +340,11 @@ TSAN_INTERCEPTOR(void*, malloc, uptr size) {
return
p
;
return
p
;
}
}
TSAN_INTERCEPTOR
(
void
*
,
__libc_memalign
,
uptr
align
,
uptr
sz
)
{
SCOPED_TSAN_INTERCEPTOR
(
__libc_memalign
,
align
,
sz
);
return
user_alloc
(
thr
,
pc
,
sz
,
align
);
}
TSAN_INTERCEPTOR
(
void
*
,
calloc
,
uptr
size
,
uptr
n
)
{
TSAN_INTERCEPTOR
(
void
*
,
calloc
,
uptr
size
,
uptr
n
)
{
void
*
p
=
0
;
void
*
p
=
0
;
{
{
...
@@ -1347,6 +1384,35 @@ TSAN_INTERCEPTOR(int, gettimeofday, void *tv, void *tz) {
...
@@ -1347,6 +1384,35 @@ TSAN_INTERCEPTOR(int, gettimeofday, void *tv, void *tz) {
return
REAL
(
gettimeofday
)(
tv
,
tz
);
return
REAL
(
gettimeofday
)(
tv
,
tz
);
}
}
// Linux kernel has a bug that leads to kernel deadlock if a process
// maps TBs of memory and then calls mlock().
static
void
MlockIsUnsupported
()
{
static
atomic_uint8_t
printed
;
if
(
atomic_exchange
(
&
printed
,
1
,
memory_order_relaxed
))
return
;
Printf
(
"INFO: ThreadSanitizer ignores mlock/mlockall/munlock/munlockall
\n
"
);
}
TSAN_INTERCEPTOR
(
int
,
mlock
,
const
void
*
addr
,
uptr
len
)
{
MlockIsUnsupported
();
return
0
;
}
TSAN_INTERCEPTOR
(
int
,
munlock
,
const
void
*
addr
,
uptr
len
)
{
MlockIsUnsupported
();
return
0
;
}
TSAN_INTERCEPTOR
(
int
,
mlockall
,
int
flags
)
{
MlockIsUnsupported
();
return
0
;
}
TSAN_INTERCEPTOR
(
int
,
munlockall
,
void
)
{
MlockIsUnsupported
();
return
0
;
}
namespace
__tsan
{
namespace
__tsan
{
void
ProcessPendingSignals
(
ThreadState
*
thr
)
{
void
ProcessPendingSignals
(
ThreadState
*
thr
)
{
...
@@ -1396,6 +1462,11 @@ void ProcessPendingSignals(ThreadState *thr) {
...
@@ -1396,6 +1462,11 @@ void ProcessPendingSignals(ThreadState *thr) {
thr
->
in_signal_handler
=
false
;
thr
->
in_signal_handler
=
false
;
}
}
static
void
unreachable
()
{
Printf
(
"FATAL: ThreadSanitizer: unreachable called
\n
"
);
Die
();
}
void
InitializeInterceptors
()
{
void
InitializeInterceptors
()
{
CHECK_GT
(
cur_thread
()
->
in_rtl
,
0
);
CHECK_GT
(
cur_thread
()
->
in_rtl
,
0
);
...
@@ -1408,6 +1479,7 @@ void InitializeInterceptors() {
...
@@ -1408,6 +1479,7 @@ void InitializeInterceptors() {
TSAN_INTERCEPT
(
siglongjmp
);
TSAN_INTERCEPT
(
siglongjmp
);
TSAN_INTERCEPT
(
malloc
);
TSAN_INTERCEPT
(
malloc
);
TSAN_INTERCEPT
(
__libc_memalign
);
TSAN_INTERCEPT
(
calloc
);
TSAN_INTERCEPT
(
calloc
);
TSAN_INTERCEPT
(
realloc
);
TSAN_INTERCEPT
(
realloc
);
TSAN_INTERCEPT
(
free
);
TSAN_INTERCEPT
(
free
);
...
@@ -1524,6 +1596,14 @@ void InitializeInterceptors() {
...
@@ -1524,6 +1596,14 @@ void InitializeInterceptors() {
TSAN_INTERCEPT
(
nanosleep
);
TSAN_INTERCEPT
(
nanosleep
);
TSAN_INTERCEPT
(
gettimeofday
);
TSAN_INTERCEPT
(
gettimeofday
);
TSAN_INTERCEPT
(
mlock
);
TSAN_INTERCEPT
(
munlock
);
TSAN_INTERCEPT
(
mlockall
);
TSAN_INTERCEPT
(
munlockall
);
// Need to setup it, because interceptors check that the function is resolved.
// But atexit is emitted directly into the module, so can't be resolved.
REAL
(
atexit
)
=
(
int
(
*
)(
void
(
*
)()))
unreachable
;
atexit_ctx
=
new
(
internal_alloc
(
MBlockAtExit
,
sizeof
(
AtExitContext
)))
atexit_ctx
=
new
(
internal_alloc
(
MBlockAtExit
,
sizeof
(
AtExitContext
)))
AtExitContext
();
AtExitContext
();
...
...
libsanitizer/tsan/tsan_interceptors.h
deleted
100644 → 0
View file @
cc4d934f
//===-- tsan_interceptors.h -------------------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//
#ifndef TSAN_INTERCEPTORS_H
#define TSAN_INTERCEPTORS_H
#include "interception/interception.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
#include "tsan_rtl.h"
namespace
__tsan
{
class
ScopedInterceptor
{
public
:
ScopedInterceptor
(
ThreadState
*
thr
,
const
char
*
fname
,
uptr
pc
);
~
ScopedInterceptor
();
private
:
ThreadState
*
const
thr_
;
const
int
in_rtl_
;
};
#define SCOPED_INTERCEPTOR_RAW(func, ...) \
ThreadState *thr = cur_thread(); \
StatInc(thr, StatInterceptor); \
StatInc(thr, StatInt_##func); \
const uptr caller_pc = GET_CALLER_PC(); \
ScopedInterceptor si(thr, #func, caller_pc); \
/* Subtract one from pc as we need current instruction address */
\
const uptr pc = __sanitizer::StackTrace::GetCurrentPc() - 1; \
(void)pc; \
/**/
#define SCOPED_TSAN_INTERCEPTOR(func, ...) \
SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__); \
if (thr->in_rtl > 1) \
return REAL(func)(__VA_ARGS__); \
/**/
#define TSAN_INTERCEPTOR(ret, func, ...) INTERCEPTOR(ret, func, __VA_ARGS__)
#define TSAN_INTERCEPT(func) INTERCEPT_FUNCTION(func)
}
// namespace __tsan
#endif // TSAN_INTERCEPTORS_H
libsanitizer/tsan/tsan_interface.h
View file @
a0408454
...
@@ -14,6 +14,8 @@
...
@@ -14,6 +14,8 @@
#ifndef TSAN_INTERFACE_H
#ifndef TSAN_INTERFACE_H
#define TSAN_INTERFACE_H
#define TSAN_INTERFACE_H
#include <sanitizer/common_interface_defs.h>
// This header should NOT include any other headers.
// This header should NOT include any other headers.
// All functions in this header are extern "C" and start with __tsan_.
// All functions in this header are extern "C" and start with __tsan_.
...
@@ -23,27 +25,30 @@ extern "C" {
...
@@ -23,27 +25,30 @@ extern "C" {
// This function should be called at the very beginning of the process,
// This function should be called at the very beginning of the process,
// before any instrumented code is executed and before any call to malloc.
// before any instrumented code is executed and before any call to malloc.
void
__tsan_init
();
void
__tsan_init
()
SANITIZER_INTERFACE_ATTRIBUTE
;
void
__tsan_read1
(
void
*
addr
);
void
__tsan_read1
(
void
*
addr
)
SANITIZER_INTERFACE_ATTRIBUTE
;
void
__tsan_read2
(
void
*
addr
);
void
__tsan_read2
(
void
*
addr
)
SANITIZER_INTERFACE_ATTRIBUTE
;
void
__tsan_read4
(
void
*
addr
);
void
__tsan_read4
(
void
*
addr
)
SANITIZER_INTERFACE_ATTRIBUTE
;
void
__tsan_read8
(
void
*
addr
);
void
__tsan_read8
(
void
*
addr
)
SANITIZER_INTERFACE_ATTRIBUTE
;
void
__tsan_read16
(
void
*
addr
);
void
__tsan_read16
(
void
*
addr
)
SANITIZER_INTERFACE_ATTRIBUTE
;
void
__tsan_write1
(
void
*
addr
);
void
__tsan_write1
(
void
*
addr
)
SANITIZER_INTERFACE_ATTRIBUTE
;
void
__tsan_write2
(
void
*
addr
);
void
__tsan_write2
(
void
*
addr
)
SANITIZER_INTERFACE_ATTRIBUTE
;
void
__tsan_write4
(
void
*
addr
);
void
__tsan_write4
(
void
*
addr
)
SANITIZER_INTERFACE_ATTRIBUTE
;
void
__tsan_write8
(
void
*
addr
);
void
__tsan_write8
(
void
*
addr
)
SANITIZER_INTERFACE_ATTRIBUTE
;
void
__tsan_write16
(
void
*
addr
);
void
__tsan_write16
(
void
*
addr
)
SANITIZER_INTERFACE_ATTRIBUTE
;
void
__tsan_vptr_update
(
void
**
vptr_p
,
void
*
new_val
);
void
__tsan_vptr_update
(
void
**
vptr_p
,
void
*
new_val
)
SANITIZER_INTERFACE_ATTRIBUTE
;
void
__tsan_func_entry
(
void
*
call_pc
);
void
__tsan_func_exit
();
void
__tsan_func_entry
(
void
*
call_pc
)
SANITIZER_INTERFACE_ATTRIBUTE
;
void
__tsan_func_exit
()
SANITIZER_INTERFACE_ATTRIBUTE
;
void
__tsan_read_range
(
void
*
addr
,
unsigned
long
size
);
// NOLINT
void
__tsan_write_range
(
void
*
addr
,
unsigned
long
size
);
// NOLINT
void
__tsan_read_range
(
void
*
addr
,
unsigned
long
size
)
// NOLINT
SANITIZER_INTERFACE_ATTRIBUTE
;
void
__tsan_write_range
(
void
*
addr
,
unsigned
long
size
)
// NOLINT
SANITIZER_INTERFACE_ATTRIBUTE
;
#ifdef __cplusplus
#ifdef __cplusplus
}
// extern "C"
}
// extern "C"
...
...
libsanitizer/tsan/tsan_interface_ann.cc
View file @
a0408454
...
@@ -9,6 +9,7 @@
...
@@ -9,6 +9,7 @@
//
//
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "tsan_interface_ann.h"
#include "tsan_interface_ann.h"
#include "tsan_mutex.h"
#include "tsan_mutex.h"
...
@@ -157,48 +158,50 @@ bool IsExpectedReport(uptr addr, uptr size) {
...
@@ -157,48 +158,50 @@ bool IsExpectedReport(uptr addr, uptr size) {
using
namespace
__tsan
;
// NOLINT
using
namespace
__tsan
;
// NOLINT
extern
"C"
{
extern
"C"
{
void
AnnotateHappensBefore
(
char
*
f
,
int
l
,
uptr
addr
)
{
void
INTERFACE_ATTRIBUTE
AnnotateHappensBefore
(
char
*
f
,
int
l
,
uptr
addr
)
{
SCOPED_ANNOTATION
(
AnnotateHappensBefore
);
SCOPED_ANNOTATION
(
AnnotateHappensBefore
);
Release
(
cur_thread
(),
CALLERPC
,
addr
);
Release
(
cur_thread
(),
CALLERPC
,
addr
);
}
}
void
AnnotateHappensAfter
(
char
*
f
,
int
l
,
uptr
addr
)
{
void
INTERFACE_ATTRIBUTE
AnnotateHappensAfter
(
char
*
f
,
int
l
,
uptr
addr
)
{
SCOPED_ANNOTATION
(
AnnotateHappensAfter
);
SCOPED_ANNOTATION
(
AnnotateHappensAfter
);
Acquire
(
cur_thread
(),
CALLERPC
,
addr
);
Acquire
(
cur_thread
(),
CALLERPC
,
addr
);
}
}
void
AnnotateCondVarSignal
(
char
*
f
,
int
l
,
uptr
cv
)
{
void
INTERFACE_ATTRIBUTE
AnnotateCondVarSignal
(
char
*
f
,
int
l
,
uptr
cv
)
{
SCOPED_ANNOTATION
(
AnnotateCondVarSignal
);
SCOPED_ANNOTATION
(
AnnotateCondVarSignal
);
}
}
void
AnnotateCondVarSignalAll
(
char
*
f
,
int
l
,
uptr
cv
)
{
void
INTERFACE_ATTRIBUTE
AnnotateCondVarSignalAll
(
char
*
f
,
int
l
,
uptr
cv
)
{
SCOPED_ANNOTATION
(
AnnotateCondVarSignalAll
);
SCOPED_ANNOTATION
(
AnnotateCondVarSignalAll
);
}
}
void
AnnotateMutexIsNotPHB
(
char
*
f
,
int
l
,
uptr
mu
)
{
void
INTERFACE_ATTRIBUTE
AnnotateMutexIsNotPHB
(
char
*
f
,
int
l
,
uptr
mu
)
{
SCOPED_ANNOTATION
(
AnnotateMutexIsNotPHB
);
SCOPED_ANNOTATION
(
AnnotateMutexIsNotPHB
);
}
}
void
AnnotateCondVarWait
(
char
*
f
,
int
l
,
uptr
cv
,
uptr
lock
)
{
void
INTERFACE_ATTRIBUTE
AnnotateCondVarWait
(
char
*
f
,
int
l
,
uptr
cv
,
uptr
lock
)
{
SCOPED_ANNOTATION
(
AnnotateCondVarWait
);
SCOPED_ANNOTATION
(
AnnotateCondVarWait
);
}
}
void
AnnotateRWLockCreate
(
char
*
f
,
int
l
,
uptr
m
)
{
void
INTERFACE_ATTRIBUTE
AnnotateRWLockCreate
(
char
*
f
,
int
l
,
uptr
m
)
{
SCOPED_ANNOTATION
(
AnnotateRWLockCreate
);
SCOPED_ANNOTATION
(
AnnotateRWLockCreate
);
MutexCreate
(
thr
,
pc
,
m
,
true
,
true
,
false
);
MutexCreate
(
thr
,
pc
,
m
,
true
,
true
,
false
);
}
}
void
AnnotateRWLockCreateStatic
(
char
*
f
,
int
l
,
uptr
m
)
{
void
INTERFACE_ATTRIBUTE
AnnotateRWLockCreateStatic
(
char
*
f
,
int
l
,
uptr
m
)
{
SCOPED_ANNOTATION
(
AnnotateRWLockCreateStatic
);
SCOPED_ANNOTATION
(
AnnotateRWLockCreateStatic
);
MutexCreate
(
thr
,
pc
,
m
,
true
,
true
,
true
);
MutexCreate
(
thr
,
pc
,
m
,
true
,
true
,
true
);
}
}
void
AnnotateRWLockDestroy
(
char
*
f
,
int
l
,
uptr
m
)
{
void
INTERFACE_ATTRIBUTE
AnnotateRWLockDestroy
(
char
*
f
,
int
l
,
uptr
m
)
{
SCOPED_ANNOTATION
(
AnnotateRWLockDestroy
);
SCOPED_ANNOTATION
(
AnnotateRWLockDestroy
);
MutexDestroy
(
thr
,
pc
,
m
);
MutexDestroy
(
thr
,
pc
,
m
);
}
}
void
AnnotateRWLockAcquired
(
char
*
f
,
int
l
,
uptr
m
,
uptr
is_w
)
{
void
INTERFACE_ATTRIBUTE
AnnotateRWLockAcquired
(
char
*
f
,
int
l
,
uptr
m
,
uptr
is_w
)
{
SCOPED_ANNOTATION
(
AnnotateRWLockAcquired
);
SCOPED_ANNOTATION
(
AnnotateRWLockAcquired
);
if
(
is_w
)
if
(
is_w
)
MutexLock
(
thr
,
pc
,
m
);
MutexLock
(
thr
,
pc
,
m
);
...
@@ -206,7 +209,8 @@ void AnnotateRWLockAcquired(char *f, int l, uptr m, uptr is_w) {
...
@@ -206,7 +209,8 @@ void AnnotateRWLockAcquired(char *f, int l, uptr m, uptr is_w) {
MutexReadLock
(
thr
,
pc
,
m
);
MutexReadLock
(
thr
,
pc
,
m
);
}
}
void
AnnotateRWLockReleased
(
char
*
f
,
int
l
,
uptr
m
,
uptr
is_w
)
{
void
INTERFACE_ATTRIBUTE
AnnotateRWLockReleased
(
char
*
f
,
int
l
,
uptr
m
,
uptr
is_w
)
{
SCOPED_ANNOTATION
(
AnnotateRWLockReleased
);
SCOPED_ANNOTATION
(
AnnotateRWLockReleased
);
if
(
is_w
)
if
(
is_w
)
MutexUnlock
(
thr
,
pc
,
m
);
MutexUnlock
(
thr
,
pc
,
m
);
...
@@ -214,19 +218,20 @@ void AnnotateRWLockReleased(char *f, int l, uptr m, uptr is_w) {
...
@@ -214,19 +218,20 @@ void AnnotateRWLockReleased(char *f, int l, uptr m, uptr is_w) {
MutexReadUnlock
(
thr
,
pc
,
m
);
MutexReadUnlock
(
thr
,
pc
,
m
);
}
}
void
AnnotateTraceMemory
(
char
*
f
,
int
l
,
uptr
mem
)
{
void
INTERFACE_ATTRIBUTE
AnnotateTraceMemory
(
char
*
f
,
int
l
,
uptr
mem
)
{
SCOPED_ANNOTATION
(
AnnotateTraceMemory
);
SCOPED_ANNOTATION
(
AnnotateTraceMemory
);
}
}
void
AnnotateFlushState
(
char
*
f
,
int
l
)
{
void
INTERFACE_ATTRIBUTE
AnnotateFlushState
(
char
*
f
,
int
l
)
{
SCOPED_ANNOTATION
(
AnnotateFlushState
);
SCOPED_ANNOTATION
(
AnnotateFlushState
);
}
}
void
AnnotateNewMemory
(
char
*
f
,
int
l
,
uptr
mem
,
uptr
size
)
{
void
INTERFACE_ATTRIBUTE
AnnotateNewMemory
(
char
*
f
,
int
l
,
uptr
mem
,
uptr
size
)
{
SCOPED_ANNOTATION
(
AnnotateNewMemory
);
SCOPED_ANNOTATION
(
AnnotateNewMemory
);
}
}
void
AnnotateNoOp
(
char
*
f
,
int
l
,
uptr
mem
)
{
void
INTERFACE_ATTRIBUTE
AnnotateNoOp
(
char
*
f
,
int
l
,
uptr
mem
)
{
SCOPED_ANNOTATION
(
AnnotateNoOp
);
SCOPED_ANNOTATION
(
AnnotateNoOp
);
}
}
...
@@ -238,7 +243,7 @@ static void ReportMissedExpectedRace(ExpectRace *race) {
...
@@ -238,7 +243,7 @@ static void ReportMissedExpectedRace(ExpectRace *race) {
Printf
(
"==================
\n
"
);
Printf
(
"==================
\n
"
);
}
}
void
AnnotateFlushExpectedRaces
(
char
*
f
,
int
l
)
{
void
INTERFACE_ATTRIBUTE
AnnotateFlushExpectedRaces
(
char
*
f
,
int
l
)
{
SCOPED_ANNOTATION
(
AnnotateFlushExpectedRaces
);
SCOPED_ANNOTATION
(
AnnotateFlushExpectedRaces
);
Lock
lock
(
&
dyn_ann_ctx
->
mtx
);
Lock
lock
(
&
dyn_ann_ctx
->
mtx
);
while
(
dyn_ann_ctx
->
expect
.
next
!=
&
dyn_ann_ctx
->
expect
)
{
while
(
dyn_ann_ctx
->
expect
.
next
!=
&
dyn_ann_ctx
->
expect
)
{
...
@@ -253,32 +258,39 @@ void AnnotateFlushExpectedRaces(char *f, int l) {
...
@@ -253,32 +258,39 @@ void AnnotateFlushExpectedRaces(char *f, int l) {
}
}
}
}
void
AnnotateEnableRaceDetection
(
char
*
f
,
int
l
,
int
enable
)
{
void
INTERFACE_ATTRIBUTE
AnnotateEnableRaceDetection
(
char
*
f
,
int
l
,
int
enable
)
{
SCOPED_ANNOTATION
(
AnnotateEnableRaceDetection
);
SCOPED_ANNOTATION
(
AnnotateEnableRaceDetection
);
// FIXME: Reconsider this functionality later. It may be irrelevant.
// FIXME: Reconsider this functionality later. It may be irrelevant.
}
}
void
AnnotateMutexIsUsedAsCondVar
(
char
*
f
,
int
l
,
uptr
mu
)
{
void
INTERFACE_ATTRIBUTE
AnnotateMutexIsUsedAsCondVar
(
char
*
f
,
int
l
,
uptr
mu
)
{
SCOPED_ANNOTATION
(
AnnotateMutexIsUsedAsCondVar
);
SCOPED_ANNOTATION
(
AnnotateMutexIsUsedAsCondVar
);
}
}
void
AnnotatePCQGet
(
char
*
f
,
int
l
,
uptr
pcq
)
{
void
INTERFACE_ATTRIBUTE
AnnotatePCQGet
(
char
*
f
,
int
l
,
uptr
pcq
)
{
SCOPED_ANNOTATION
(
AnnotatePCQGet
);
SCOPED_ANNOTATION
(
AnnotatePCQGet
);
}
}
void
AnnotatePCQPut
(
char
*
f
,
int
l
,
uptr
pcq
)
{
void
INTERFACE_ATTRIBUTE
AnnotatePCQPut
(
char
*
f
,
int
l
,
uptr
pcq
)
{
SCOPED_ANNOTATION
(
AnnotatePCQPut
);
SCOPED_ANNOTATION
(
AnnotatePCQPut
);
}
}
void
AnnotatePCQDestroy
(
char
*
f
,
int
l
,
uptr
pcq
)
{
void
INTERFACE_ATTRIBUTE
AnnotatePCQDestroy
(
char
*
f
,
int
l
,
uptr
pcq
)
{
SCOPED_ANNOTATION
(
AnnotatePCQDestroy
);
SCOPED_ANNOTATION
(
AnnotatePCQDestroy
);
}
}
void
AnnotatePCQCreate
(
char
*
f
,
int
l
,
uptr
pcq
)
{
void
INTERFACE_ATTRIBUTE
AnnotatePCQCreate
(
char
*
f
,
int
l
,
uptr
pcq
)
{
SCOPED_ANNOTATION
(
AnnotatePCQCreate
);
SCOPED_ANNOTATION
(
AnnotatePCQCreate
);
}
}
void
AnnotateExpectRace
(
char
*
f
,
int
l
,
uptr
mem
,
char
*
desc
)
{
void
INTERFACE_ATTRIBUTE
AnnotateExpectRace
(
char
*
f
,
int
l
,
uptr
mem
,
char
*
desc
)
{
SCOPED_ANNOTATION
(
AnnotateExpectRace
);
SCOPED_ANNOTATION
(
AnnotateExpectRace
);
Lock
lock
(
&
dyn_ann_ctx
->
mtx
);
Lock
lock
(
&
dyn_ann_ctx
->
mtx
);
AddExpectRace
(
&
dyn_ann_ctx
->
expect
,
AddExpectRace
(
&
dyn_ann_ctx
->
expect
,
...
@@ -286,7 +298,8 @@ void AnnotateExpectRace(char *f, int l, uptr mem, char *desc) {
...
@@ -286,7 +298,8 @@ void AnnotateExpectRace(char *f, int l, uptr mem, char *desc) {
DPrintf
(
"Add expected race: %s addr=%zx %s:%d
\n
"
,
desc
,
mem
,
f
,
l
);
DPrintf
(
"Add expected race: %s addr=%zx %s:%d
\n
"
,
desc
,
mem
,
f
,
l
);
}
}
static
void
BenignRaceImpl
(
char
*
f
,
int
l
,
uptr
mem
,
uptr
size
,
char
*
desc
)
{
static
void
BenignRaceImpl
(
char
*
f
,
int
l
,
uptr
mem
,
uptr
size
,
char
*
desc
)
{
Lock
lock
(
&
dyn_ann_ctx
->
mtx
);
Lock
lock
(
&
dyn_ann_ctx
->
mtx
);
AddExpectRace
(
&
dyn_ann_ctx
->
benign
,
AddExpectRace
(
&
dyn_ann_ctx
->
benign
,
f
,
l
,
mem
,
size
,
desc
);
f
,
l
,
mem
,
size
,
desc
);
...
@@ -294,69 +307,76 @@ static void BenignRaceImpl(char *f, int l, uptr mem, uptr size, char *desc) {
...
@@ -294,69 +307,76 @@ static void BenignRaceImpl(char *f, int l, uptr mem, uptr size, char *desc) {
}
}
// FIXME: Turn it off later. WTF is benign race?1?? Go talk to Hans Boehm.
// FIXME: Turn it off later. WTF is benign race?1?? Go talk to Hans Boehm.
void
AnnotateBenignRaceSized
(
char
*
f
,
int
l
,
uptr
mem
,
uptr
size
,
char
*
desc
)
{
void
INTERFACE_ATTRIBUTE
AnnotateBenignRaceSized
(
char
*
f
,
int
l
,
uptr
mem
,
uptr
size
,
char
*
desc
)
{
SCOPED_ANNOTATION
(
AnnotateBenignRaceSized
);
SCOPED_ANNOTATION
(
AnnotateBenignRaceSized
);
BenignRaceImpl
(
f
,
l
,
mem
,
size
,
desc
);
BenignRaceImpl
(
f
,
l
,
mem
,
size
,
desc
);
}
}
void
AnnotateBenignRace
(
char
*
f
,
int
l
,
uptr
mem
,
char
*
desc
)
{
void
INTERFACE_ATTRIBUTE
AnnotateBenignRace
(
char
*
f
,
int
l
,
uptr
mem
,
char
*
desc
)
{
SCOPED_ANNOTATION
(
AnnotateBenignRace
);
SCOPED_ANNOTATION
(
AnnotateBenignRace
);
BenignRaceImpl
(
f
,
l
,
mem
,
1
,
desc
);
BenignRaceImpl
(
f
,
l
,
mem
,
1
,
desc
);
}
}
void
AnnotateIgnoreReadsBegin
(
char
*
f
,
int
l
)
{
void
INTERFACE_ATTRIBUTE
AnnotateIgnoreReadsBegin
(
char
*
f
,
int
l
)
{
SCOPED_ANNOTATION
(
AnnotateIgnoreReadsBegin
);
SCOPED_ANNOTATION
(
AnnotateIgnoreReadsBegin
);
IgnoreCtl
(
cur_thread
(),
false
,
true
);
IgnoreCtl
(
cur_thread
(),
false
,
true
);
}
}
void
AnnotateIgnoreReadsEnd
(
char
*
f
,
int
l
)
{
void
INTERFACE_ATTRIBUTE
AnnotateIgnoreReadsEnd
(
char
*
f
,
int
l
)
{
SCOPED_ANNOTATION
(
AnnotateIgnoreReadsEnd
);
SCOPED_ANNOTATION
(
AnnotateIgnoreReadsEnd
);
IgnoreCtl
(
cur_thread
(),
false
,
false
);
IgnoreCtl
(
cur_thread
(),
false
,
false
);
}
}
void
AnnotateIgnoreWritesBegin
(
char
*
f
,
int
l
)
{
void
INTERFACE_ATTRIBUTE
AnnotateIgnoreWritesBegin
(
char
*
f
,
int
l
)
{
SCOPED_ANNOTATION
(
AnnotateIgnoreWritesBegin
);
SCOPED_ANNOTATION
(
AnnotateIgnoreWritesBegin
);
IgnoreCtl
(
cur_thread
(),
true
,
true
);
IgnoreCtl
(
cur_thread
(),
true
,
true
);
}
}
void
AnnotateIgnoreWritesEnd
(
char
*
f
,
int
l
)
{
void
INTERFACE_ATTRIBUTE
AnnotateIgnoreWritesEnd
(
char
*
f
,
int
l
)
{
SCOPED_ANNOTATION
(
AnnotateIgnoreWritesEnd
);
SCOPED_ANNOTATION
(
AnnotateIgnoreWritesEnd
);
IgnoreCtl
(
cur_thread
()
,
true
,
false
);
IgnoreCtl
(
thr
,
true
,
false
);
}
}
void
AnnotatePublishMemoryRange
(
char
*
f
,
int
l
,
uptr
addr
,
uptr
size
)
{
void
INTERFACE_ATTRIBUTE
AnnotatePublishMemoryRange
(
char
*
f
,
int
l
,
uptr
addr
,
uptr
size
)
{
SCOPED_ANNOTATION
(
AnnotatePublishMemoryRange
);
SCOPED_ANNOTATION
(
AnnotatePublishMemoryRange
);
}
}
void
AnnotateUnpublishMemoryRange
(
char
*
f
,
int
l
,
uptr
addr
,
uptr
size
)
{
void
INTERFACE_ATTRIBUTE
AnnotateUnpublishMemoryRange
(
char
*
f
,
int
l
,
uptr
addr
,
uptr
size
)
{
SCOPED_ANNOTATION
(
AnnotateUnpublishMemoryRange
);
SCOPED_ANNOTATION
(
AnnotateUnpublishMemoryRange
);
}
}
void
AnnotateThreadName
(
char
*
f
,
int
l
,
char
*
name
)
{
void
INTERFACE_ATTRIBUTE
AnnotateThreadName
(
char
*
f
,
int
l
,
char
*
name
)
{
SCOPED_ANNOTATION
(
AnnotateThreadName
);
SCOPED_ANNOTATION
(
AnnotateThreadName
);
ThreadSetName
(
thr
,
name
);
}
}
void
WTFAnnotateHappensBefore
(
char
*
f
,
int
l
,
uptr
addr
)
{
void
INTERFACE_ATTRIBUTE
WTFAnnotateHappensBefore
(
char
*
f
,
int
l
,
uptr
addr
)
{
SCOPED_ANNOTATION
(
AnnotateHappensBefore
);
SCOPED_ANNOTATION
(
AnnotateHappensBefore
);
}
}
void
WTFAnnotateHappensAfter
(
char
*
f
,
int
l
,
uptr
addr
)
{
void
INTERFACE_ATTRIBUTE
WTFAnnotateHappensAfter
(
char
*
f
,
int
l
,
uptr
addr
)
{
SCOPED_ANNOTATION
(
AnnotateHappensAfter
);
SCOPED_ANNOTATION
(
AnnotateHappensAfter
);
}
}
void
WTFAnnotateBenignRaceSized
(
char
*
f
,
int
l
,
uptr
mem
,
uptr
sz
,
char
*
desc
)
{
void
INTERFACE_ATTRIBUTE
WTFAnnotateBenignRaceSized
(
char
*
f
,
int
l
,
uptr
mem
,
uptr
sz
,
char
*
desc
)
{
SCOPED_ANNOTATION
(
AnnotateBenignRaceSized
);
SCOPED_ANNOTATION
(
AnnotateBenignRaceSized
);
}
}
int
RunningOnValgrind
()
{
int
INTERFACE_ATTRIBUTE
RunningOnValgrind
()
{
return
flags
()
->
running_on_valgrind
;
return
flags
()
->
running_on_valgrind
;
}
}
double
__attribute__
((
weak
))
ValgrindSlowdown
(
void
)
{
double
__attribute__
((
weak
))
INTERFACE_ATTRIBUTE
ValgrindSlowdown
(
void
)
{
return
10.0
;
return
10.0
;
}
}
const
char
*
ThreadSanitizerQuery
(
const
char
*
query
)
{
const
char
INTERFACE_ATTRIBUTE
*
ThreadSanitizerQuery
(
const
char
*
query
)
{
if
(
internal_strcmp
(
query
,
"pure_happens_before"
)
==
0
)
if
(
internal_strcmp
(
query
,
"pure_happens_before"
)
==
0
)
return
"1"
;
return
"1"
;
else
else
...
...
libsanitizer/tsan/tsan_interface_ann.h
View file @
a0408454
...
@@ -12,6 +12,8 @@
...
@@ -12,6 +12,8 @@
#ifndef TSAN_INTERFACE_ANN_H
#ifndef TSAN_INTERFACE_ANN_H
#define TSAN_INTERFACE_ANN_H
#define TSAN_INTERFACE_ANN_H
#include <sanitizer/common_interface_defs.h>
// This header should NOT include any other headers.
// This header should NOT include any other headers.
// All functions in this header are extern "C" and start with __tsan_.
// All functions in this header are extern "C" and start with __tsan_.
...
@@ -19,8 +21,8 @@
...
@@ -19,8 +21,8 @@
extern
"C"
{
extern
"C"
{
#endif
#endif
void
__tsan_acquire
(
void
*
addr
);
void
__tsan_acquire
(
void
*
addr
)
SANITIZER_INTERFACE_ATTRIBUTE
;
void
__tsan_release
(
void
*
addr
);
void
__tsan_release
(
void
*
addr
)
SANITIZER_INTERFACE_ATTRIBUTE
;
#ifdef __cplusplus
#ifdef __cplusplus
}
// extern "C"
}
// extern "C"
...
...
libsanitizer/tsan/tsan_interface_atomic.cc
View file @
a0408454
...
@@ -112,34 +112,101 @@ static morder ConvertOrder(morder mo) {
...
@@ -112,34 +112,101 @@ static morder ConvertOrder(morder mo) {
return
mo
;
return
mo
;
}
}
template
<
typename
T
>
T
func_xchg
(
T
v
,
T
op
)
{
template
<
typename
T
>
T
func_xchg
(
volatile
T
*
v
,
T
op
)
{
return
op
;
return
__sync_lock_test_and_set
(
v
,
op
)
;
}
}
template
<
typename
T
>
T
func_add
(
T
v
,
T
op
)
{
template
<
typename
T
>
T
func_add
(
volatile
T
*
v
,
T
op
)
{
return
v
+
op
;
return
__sync_fetch_and_add
(
v
,
op
)
;
}
}
template
<
typename
T
>
T
func_sub
(
T
v
,
T
op
)
{
template
<
typename
T
>
T
func_sub
(
volatile
T
*
v
,
T
op
)
{
return
v
-
op
;
return
__sync_fetch_and_sub
(
v
,
op
)
;
}
}
template
<
typename
T
>
T
func_and
(
T
v
,
T
op
)
{
template
<
typename
T
>
T
func_and
(
volatile
T
*
v
,
T
op
)
{
return
v
&
op
;
return
__sync_fetch_and_and
(
v
,
op
)
;
}
}
template
<
typename
T
>
T
func_or
(
T
v
,
T
op
)
{
template
<
typename
T
>
T
func_or
(
volatile
T
*
v
,
T
op
)
{
return
v
|
op
;
return
__sync_fetch_and_or
(
v
,
op
)
;
}
}
template
<
typename
T
>
T
func_xor
(
T
v
,
T
op
)
{
template
<
typename
T
>
T
func_xor
(
volatile
T
*
v
,
T
op
)
{
return
v
^
op
;
return
__sync_fetch_and_xor
(
v
,
op
)
;
}
}
template
<
typename
T
>
T
func_nand
(
T
v
,
T
op
)
{
template
<
typename
T
>
T
func_nand
(
volatile
T
*
v
,
T
op
)
{
return
~
v
&
op
;
// clang does not support __sync_fetch_and_nand.
T
cmp
=
*
v
;
for
(;;)
{
T
newv
=
~
(
cmp
&
op
);
T
cur
=
__sync_val_compare_and_swap
(
v
,
cmp
,
newv
);
if
(
cmp
==
cur
)
return
cmp
;
cmp
=
cur
;
}
}
template
<
typename
T
>
T
func_cas
(
volatile
T
*
v
,
T
cmp
,
T
xch
)
{
return
__sync_val_compare_and_swap
(
v
,
cmp
,
xch
);
}
// clang does not support 128-bit atomic ops.
// Atomic ops are executed under tsan internal mutex,
// here we assume that the atomic variables are not accessed
// from non-instrumented code.
#ifndef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16
a128
func_xchg
(
volatile
a128
*
v
,
a128
op
)
{
a128
cmp
=
*
v
;
*
v
=
op
;
return
cmp
;
}
a128
func_add
(
volatile
a128
*
v
,
a128
op
)
{
a128
cmp
=
*
v
;
*
v
=
cmp
+
op
;
return
cmp
;
}
a128
func_sub
(
volatile
a128
*
v
,
a128
op
)
{
a128
cmp
=
*
v
;
*
v
=
cmp
-
op
;
return
cmp
;
}
a128
func_and
(
volatile
a128
*
v
,
a128
op
)
{
a128
cmp
=
*
v
;
*
v
=
cmp
&
op
;
return
cmp
;
}
a128
func_or
(
volatile
a128
*
v
,
a128
op
)
{
a128
cmp
=
*
v
;
*
v
=
cmp
|
op
;
return
cmp
;
}
}
a128
func_xor
(
volatile
a128
*
v
,
a128
op
)
{
a128
cmp
=
*
v
;
*
v
=
cmp
^
op
;
return
cmp
;
}
a128
func_nand
(
volatile
a128
*
v
,
a128
op
)
{
a128
cmp
=
*
v
;
*
v
=
~
(
cmp
&
op
);
return
cmp
;
}
a128
func_cas
(
volatile
a128
*
v
,
a128
cmp
,
a128
xch
)
{
a128
cur
=
*
v
;
if
(
cur
==
cmp
)
*
v
=
xch
;
return
cur
;
}
#endif
#define SCOPED_ATOMIC(func, ...) \
#define SCOPED_ATOMIC(func, ...) \
mo = ConvertOrder(mo); \
mo = ConvertOrder(mo); \
mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
...
@@ -164,6 +231,7 @@ static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a,
...
@@ -164,6 +231,7 @@ static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a,
thr
->
clock
.
acquire
(
&
s
->
clock
);
thr
->
clock
.
acquire
(
&
s
->
clock
);
T
v
=
*
a
;
T
v
=
*
a
;
s
->
mtx
.
ReadUnlock
();
s
->
mtx
.
ReadUnlock
();
__sync_synchronize
();
return
v
;
return
v
;
}
}
...
@@ -179,6 +247,7 @@ static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
...
@@ -179,6 +247,7 @@ static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
*
a
=
v
;
*
a
=
v
;
return
;
return
;
}
}
__sync_synchronize
();
SyncVar
*
s
=
CTX
()
->
synctab
.
GetAndLock
(
thr
,
pc
,
(
uptr
)
a
,
true
);
SyncVar
*
s
=
CTX
()
->
synctab
.
GetAndLock
(
thr
,
pc
,
(
uptr
)
a
,
true
);
thr
->
clock
.
set
(
thr
->
tid
,
thr
->
fast_state
.
epoch
());
thr
->
clock
.
set
(
thr
->
tid
,
thr
->
fast_state
.
epoch
());
thr
->
clock
.
ReleaseStore
(
&
s
->
clock
);
thr
->
clock
.
ReleaseStore
(
&
s
->
clock
);
...
@@ -186,7 +255,7 @@ static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
...
@@ -186,7 +255,7 @@ static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
s
->
mtx
.
Unlock
();
s
->
mtx
.
Unlock
();
}
}
template
<
typename
T
,
T
(
*
F
)(
T
v
,
T
op
)
>
template
<
typename
T
,
T
(
*
F
)(
volatile
T
*
v
,
T
op
)
>
static
T
AtomicRMW
(
ThreadState
*
thr
,
uptr
pc
,
volatile
T
*
a
,
T
v
,
morder
mo
)
{
static
T
AtomicRMW
(
ThreadState
*
thr
,
uptr
pc
,
volatile
T
*
a
,
T
v
,
morder
mo
)
{
SyncVar
*
s
=
CTX
()
->
synctab
.
GetAndLock
(
thr
,
pc
,
(
uptr
)
a
,
true
);
SyncVar
*
s
=
CTX
()
->
synctab
.
GetAndLock
(
thr
,
pc
,
(
uptr
)
a
,
true
);
thr
->
clock
.
set
(
thr
->
tid
,
thr
->
fast_state
.
epoch
());
thr
->
clock
.
set
(
thr
->
tid
,
thr
->
fast_state
.
epoch
());
...
@@ -196,10 +265,9 @@ static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
...
@@ -196,10 +265,9 @@ static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
thr
->
clock
.
release
(
&
s
->
clock
);
thr
->
clock
.
release
(
&
s
->
clock
);
else
if
(
IsAcquireOrder
(
mo
))
else
if
(
IsAcquireOrder
(
mo
))
thr
->
clock
.
acquire
(
&
s
->
clock
);
thr
->
clock
.
acquire
(
&
s
->
clock
);
T
c
=
*
a
;
v
=
F
(
a
,
v
);
*
a
=
F
(
c
,
v
);
s
->
mtx
.
Unlock
();
s
->
mtx
.
Unlock
();
return
c
;
return
v
;
}
}
template
<
typename
T
>
template
<
typename
T
>
...
@@ -256,16 +324,13 @@ static bool AtomicCAS(ThreadState *thr, uptr pc,
...
@@ -256,16 +324,13 @@ static bool AtomicCAS(ThreadState *thr, uptr pc,
thr
->
clock
.
release
(
&
s
->
clock
);
thr
->
clock
.
release
(
&
s
->
clock
);
else
if
(
IsAcquireOrder
(
mo
))
else
if
(
IsAcquireOrder
(
mo
))
thr
->
clock
.
acquire
(
&
s
->
clock
);
thr
->
clock
.
acquire
(
&
s
->
clock
);
T
cur
=
*
a
;
T
cc
=
*
c
;
bool
res
=
false
;
T
pr
=
func_cas
(
a
,
cc
,
v
);
if
(
cur
==
*
c
)
{
*
a
=
v
;
res
=
true
;
}
else
{
*
c
=
cur
;
}
s
->
mtx
.
Unlock
();
s
->
mtx
.
Unlock
();
return
res
;
if
(
pr
==
cc
)
return
true
;
*
c
=
pr
;
return
false
;
}
}
template
<
typename
T
>
template
<
typename
T
>
...
...
libsanitizer/tsan/tsan_interface_atomic.h
View file @
a0408454
This diff is collapsed.
Click to expand it.
libsanitizer/tsan/tsan_platform.h
View file @
a0408454
...
@@ -10,10 +10,53 @@
...
@@ -10,10 +10,53 @@
// Platform-specific code.
// Platform-specific code.
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
/*
C++ linux memory layout:
0000 0000 0000 - 03c0 0000 0000: protected
03c0 0000 0000 - 1000 0000 0000: shadow
1000 0000 0000 - 6000 0000 0000: protected
6000 0000 0000 - 6200 0000 0000: traces
6200 0000 0000 - 7d00 0000 0000: -
7d00 0000 0000 - 7e00 0000 0000: heap
7e00 0000 0000 - 7fff ffff ffff: modules and main thread stack
C++ COMPAT linux memory layout:
0000 0000 0000 - 0400 0000 0000: protected
0400 0000 0000 - 1000 0000 0000: shadow
1000 0000 0000 - 2900 0000 0000: protected
2900 0000 0000 - 2c00 0000 0000: modules
2c00 0000 0000 - 6000 0000 0000: -
6000 0000 0000 - 6200 0000 0000: traces
6200 0000 0000 - 7d00 0000 0000: -
7d00 0000 0000 - 7e00 0000 0000: heap
7e00 0000 0000 - 7f00 0000 0000: -
7f00 0000 0000 - 7fff ffff ffff: main thread stack
Go linux and darwin memory layout:
0000 0000 0000 - 0000 1000 0000: executable
0000 1000 0000 - 00f8 0000 0000: -
00f8 0000 0000 - 0118 0000 0000: heap
0118 0000 0000 - 1000 0000 0000: -
1000 0000 0000 - 1460 0000 0000: shadow
1460 0000 0000 - 6000 0000 0000: -
6000 0000 0000 - 6200 0000 0000: traces
6200 0000 0000 - 7fff ffff ffff: -
Go windows memory layout:
0000 0000 0000 - 0000 1000 0000: executable
0000 1000 0000 - 00f8 0000 0000: -
00f8 0000 0000 - 0118 0000 0000: heap
0118 0000 0000 - 0100 0000 0000: -
0100 0000 0000 - 0560 0000 0000: shadow
0560 0000 0000 - 0760 0000 0000: traces
0760 0000 0000 - 07ff ffff ffff: -
*/
#ifndef TSAN_PLATFORM_H
#ifndef TSAN_PLATFORM_H
#define TSAN_PLATFORM_H
#define TSAN_PLATFORM_H
#include "tsan_rtl.h"
#include "tsan_defs.h"
#include "tsan_trace.h"
#if defined(__LP64__) || defined(_WIN64)
#if defined(__LP64__) || defined(_WIN64)
namespace
__tsan
{
namespace
__tsan
{
...
@@ -39,6 +82,13 @@ static const uptr kLinuxAppMemEnd = 0x7fffffffffffULL;
...
@@ -39,6 +82,13 @@ static const uptr kLinuxAppMemEnd = 0x7fffffffffffULL;
static
const
uptr
kLinuxAppMemMsk
=
0x7c0000000000ULL
;
static
const
uptr
kLinuxAppMemMsk
=
0x7c0000000000ULL
;
#if defined(_WIN32)
const
uptr
kTraceMemBegin
=
0x056000000000ULL
;
#else
const
uptr
kTraceMemBegin
=
0x600000000000ULL
;
#endif
const
uptr
kTraceMemSize
=
0x020000000000ULL
;
// This has to be a macro to allow constant initialization of constants below.
// This has to be a macro to allow constant initialization of constants below.
#ifndef TSAN_GO
#ifndef TSAN_GO
#define MemToShadow(addr) \
#define MemToShadow(addr) \
...
@@ -85,6 +135,12 @@ void FlushShadowMemory();
...
@@ -85,6 +135,12 @@ void FlushShadowMemory();
const
char
*
InitializePlatform
();
const
char
*
InitializePlatform
();
void
FinalizePlatform
();
void
FinalizePlatform
();
void
MapThreadTrace
(
uptr
addr
,
uptr
size
);
uptr
ALWAYS_INLINE
INLINE
GetThreadTrace
(
int
tid
)
{
uptr
p
=
kTraceMemBegin
+
(
uptr
)
tid
*
kTraceSize
*
sizeof
(
Event
);
DCHECK_LT
(
p
,
kTraceMemBegin
+
kTraceMemSize
);
return
p
;
}
void
internal_start_thread
(
void
(
*
func
)(
void
*
),
void
*
arg
);
void
internal_start_thread
(
void
(
*
func
)(
void
*
),
void
*
arg
);
...
...
libsanitizer/tsan/tsan_platform_linux.cc
View file @
a0408454
...
@@ -101,7 +101,7 @@ void InitializeShadowMemory() {
...
@@ -101,7 +101,7 @@ void InitializeShadowMemory() {
const
uptr
kClosedLowBeg
=
0x200000
;
const
uptr
kClosedLowBeg
=
0x200000
;
const
uptr
kClosedLowEnd
=
kLinuxShadowBeg
-
1
;
const
uptr
kClosedLowEnd
=
kLinuxShadowBeg
-
1
;
const
uptr
kClosedMidBeg
=
kLinuxShadowEnd
+
1
;
const
uptr
kClosedMidBeg
=
kLinuxShadowEnd
+
1
;
const
uptr
kClosedMidEnd
=
kLinuxAppMemBeg
-
1
;
const
uptr
kClosedMidEnd
=
min
(
kLinuxAppMemBeg
,
kTraceMemBegin
)
;
ProtectRange
(
kClosedLowBeg
,
kClosedLowEnd
);
ProtectRange
(
kClosedLowBeg
,
kClosedLowEnd
);
ProtectRange
(
kClosedMidBeg
,
kClosedMidEnd
);
ProtectRange
(
kClosedMidBeg
,
kClosedMidEnd
);
DPrintf
(
"kClosedLow %zx-%zx (%zuGB)
\n
"
,
DPrintf
(
"kClosedLow %zx-%zx (%zuGB)
\n
"
,
...
@@ -118,6 +118,16 @@ void InitializeShadowMemory() {
...
@@ -118,6 +118,16 @@ void InitializeShadowMemory() {
}
}
#endif
#endif
void
MapThreadTrace
(
uptr
addr
,
uptr
size
)
{
DPrintf
(
"Mapping trace at %p-%p(0x%zx)
\n
"
,
addr
,
addr
+
size
,
size
);
CHECK_GE
(
addr
,
kTraceMemBegin
);
CHECK_LE
(
addr
+
size
,
kTraceMemBegin
+
kTraceMemSize
);
if
(
addr
!=
(
uptr
)
MmapFixedNoReserve
(
addr
,
size
))
{
Printf
(
"FATAL: ThreadSanitizer can not mmap thread trace
\n
"
);
Die
();
}
}
static
uptr
g_data_start
;
static
uptr
g_data_start
;
static
uptr
g_data_end
;
static
uptr
g_data_end
;
...
@@ -190,28 +200,50 @@ static int InitTlsSize() {
...
@@ -190,28 +200,50 @@ static int InitTlsSize() {
}
}
#endif // #ifndef TSAN_GO
#endif // #ifndef TSAN_GO
static
rlim_t
getlim
(
int
res
)
{
rlimit
rlim
;
CHECK_EQ
(
0
,
getrlimit
(
res
,
&
rlim
));
return
rlim
.
rlim_cur
;
}
static
void
setlim
(
int
res
,
rlim_t
lim
)
{
// The following magic is to prevent clang from replacing it with memset.
volatile
rlimit
rlim
;
rlim
.
rlim_cur
=
lim
;
rlim
.
rlim_max
=
lim
;
setrlimit
(
res
,
(
rlimit
*
)
&
rlim
);
}
const
char
*
InitializePlatform
()
{
const
char
*
InitializePlatform
()
{
void
*
p
=
0
;
void
*
p
=
0
;
if
(
sizeof
(
p
)
==
8
)
{
if
(
sizeof
(
p
)
==
8
)
{
// Disable core dumps, dumping of 16TB usually takes a bit long.
// Disable core dumps, dumping of 16TB usually takes a bit long.
// The following magic is to prevent clang from replacing it with memset.
setlim
(
RLIMIT_CORE
,
0
);
volatile
rlimit
lim
;
lim
.
rlim_cur
=
0
;
lim
.
rlim_max
=
0
;
setrlimit
(
RLIMIT_CORE
,
(
rlimit
*
)
&
lim
);
}
}
bool
reexec
=
false
;
// TSan doesn't play well with unlimited stack size (as stack
// TSan doesn't play well with unlimited stack size (as stack
// overlaps with shadow memory). If we detect unlimited stack size,
// overlaps with shadow memory). If we detect unlimited stack size,
// we re-exec the program with limited stack size as a best effort.
// we re-exec the program with limited stack size as a best effort.
if
(
StackSizeIsUnlimited
()
)
{
if
(
getlim
(
RLIMIT_STACK
)
==
(
rlim_t
)
-
1
)
{
const
uptr
kMaxStackSize
=
32
*
1024
*
1024
;
// 32 Mb
const
uptr
kMaxStackSize
=
32
*
1024
*
1024
;
Report
(
"WARNING: Program is run with unlimited stack size, which "
Report
(
"WARNING: Program is run with unlimited stack size, which "
"wouldn't work with ThreadSanitizer.
\n
"
);
"wouldn't work with ThreadSanitizer.
\n
"
);
Report
(
"Re-execing with stack size limited to %zd bytes.
\n
"
,
kMaxStackSize
);
Report
(
"Re-execing with stack size limited to %zd bytes.
\n
"
,
kMaxStackSize
);
SetStackSizeLimitInBytes
(
kMaxStackSize
);
SetStackSizeLimitInBytes
(
kMaxStackSize
);
ReExec
();
reexec
=
true
;
}
if
(
getlim
(
RLIMIT_AS
)
!=
(
rlim_t
)
-
1
)
{
Report
(
"WARNING: Program is run with limited virtual address space, which "
"wouldn't work with ThreadSanitizer.
\n
"
);
Report
(
"Re-execing with unlimited virtual address space.
\n
"
);
setlim
(
RLIMIT_AS
,
-
1
);
reexec
=
true
;
}
}
if
(
reexec
)
ReExec
();
#ifndef TSAN_GO
#ifndef TSAN_GO
CheckPIE
();
CheckPIE
();
g_tls_size
=
(
uptr
)
InitTlsSize
();
g_tls_size
=
(
uptr
)
InitTlsSize
();
...
...
libsanitizer/tsan/tsan_report.cc
View file @
a0408454
...
@@ -24,6 +24,7 @@ ReportDesc::ReportDesc()
...
@@ -24,6 +24,7 @@ ReportDesc::ReportDesc()
}
}
ReportDesc
::~
ReportDesc
()
{
ReportDesc
::~
ReportDesc
()
{
// FIXME(dvyukov): it must be leaking a lot of memory.
}
}
#ifndef TSAN_GO
#ifndef TSAN_GO
...
@@ -78,8 +79,9 @@ static void PrintMop(const ReportMop *mop, bool first) {
...
@@ -78,8 +79,9 @@ static void PrintMop(const ReportMop *mop, bool first) {
static
void
PrintLocation
(
const
ReportLocation
*
loc
)
{
static
void
PrintLocation
(
const
ReportLocation
*
loc
)
{
if
(
loc
->
type
==
ReportLocationGlobal
)
{
if
(
loc
->
type
==
ReportLocationGlobal
)
{
Printf
(
" Location is global '%s' of size %zu at %zx %s:%d
\n
"
,
Printf
(
" Location is global '%s' of size %zu at %zx %s:%d (%s+%p)
\n\n
"
,
loc
->
name
,
loc
->
size
,
loc
->
addr
,
loc
->
file
,
loc
->
line
);
loc
->
name
,
loc
->
size
,
loc
->
addr
,
loc
->
file
,
loc
->
line
,
loc
->
module
,
loc
->
offset
);
}
else
if
(
loc
->
type
==
ReportLocationHeap
)
{
}
else
if
(
loc
->
type
==
ReportLocationHeap
)
{
Printf
(
" Location is heap block of size %zu at %p allocated"
,
Printf
(
" Location is heap block of size %zu at %p allocated"
,
loc
->
size
,
loc
->
addr
);
loc
->
size
,
loc
->
addr
);
...
@@ -89,7 +91,7 @@ static void PrintLocation(const ReportLocation *loc) {
...
@@ -89,7 +91,7 @@ static void PrintLocation(const ReportLocation *loc) {
Printf
(
" by thread %d:
\n
"
,
loc
->
tid
);
Printf
(
" by thread %d:
\n
"
,
loc
->
tid
);
PrintStack
(
loc
->
stack
);
PrintStack
(
loc
->
stack
);
}
else
if
(
loc
->
type
==
ReportLocationStack
)
{
}
else
if
(
loc
->
type
==
ReportLocationStack
)
{
Printf
(
" Location is stack of thread %d:
\n
"
,
loc
->
tid
);
Printf
(
" Location is stack of thread %d:
\n
\n
"
,
loc
->
tid
);
}
}
}
}
...
@@ -149,6 +151,10 @@ void PrintReport(const ReportDesc *rep) {
...
@@ -149,6 +151,10 @@ void PrintReport(const ReportDesc *rep) {
#else
#else
void
PrintStack
(
const
ReportStack
*
ent
)
{
void
PrintStack
(
const
ReportStack
*
ent
)
{
if
(
ent
==
0
)
{
Printf
(
" [failed to restore the stack]
\n\n
"
);
return
;
}
for
(
int
i
=
0
;
ent
;
ent
=
ent
->
next
,
i
++
)
{
for
(
int
i
=
0
;
ent
;
ent
=
ent
->
next
,
i
++
)
{
Printf
(
" %s()
\n
%s:%d +0x%zx
\n
"
,
Printf
(
" %s()
\n
%s:%d +0x%zx
\n
"
,
ent
->
func
,
ent
->
file
,
ent
->
line
,
(
void
*
)
ent
->
offset
);
ent
->
func
,
ent
->
file
,
ent
->
line
,
(
void
*
)
ent
->
offset
);
...
...
libsanitizer/tsan/tsan_report.h
View file @
a0408454
...
@@ -56,6 +56,8 @@ struct ReportLocation {
...
@@ -56,6 +56,8 @@ struct ReportLocation {
ReportLocationType
type
;
ReportLocationType
type
;
uptr
addr
;
uptr
addr
;
uptr
size
;
uptr
size
;
char
*
module
;
uptr
offset
;
int
tid
;
int
tid
;
char
*
name
;
char
*
name
;
char
*
file
;
char
*
file
;
...
...
libsanitizer/tsan/tsan_rtl.cc
View file @
a0408454
...
@@ -82,7 +82,8 @@ ThreadContext::ThreadContext(int tid)
...
@@ -82,7 +82,8 @@ ThreadContext::ThreadContext(int tid)
,
epoch0
()
,
epoch0
()
,
epoch1
()
,
epoch1
()
,
dead_info
()
,
dead_info
()
,
dead_next
()
{
,
dead_next
()
,
name
()
{
}
}
static
void
WriteMemoryProfile
(
char
*
buf
,
uptr
buf_size
,
int
num
)
{
static
void
WriteMemoryProfile
(
char
*
buf
,
uptr
buf_size
,
int
num
)
{
...
@@ -189,7 +190,12 @@ void Initialize(ThreadState *thr) {
...
@@ -189,7 +190,12 @@ void Initialize(ThreadState *thr) {
ctx
->
dead_list_tail
=
0
;
ctx
->
dead_list_tail
=
0
;
InitializeFlags
(
&
ctx
->
flags
,
env
);
InitializeFlags
(
&
ctx
->
flags
,
env
);
// Setup correct file descriptor for error reports.
// Setup correct file descriptor for error reports.
__sanitizer_set_report_fd
(
flags
()
->
log_fileno
);
if
(
internal_strcmp
(
flags
()
->
log_path
,
"stdout"
)
==
0
)
__sanitizer_set_report_fd
(
kStdoutFd
);
else
if
(
internal_strcmp
(
flags
()
->
log_path
,
"stderr"
)
==
0
)
__sanitizer_set_report_fd
(
kStderrFd
);
else
__sanitizer_set_report_path
(
flags
()
->
log_path
);
InitializeSuppressions
();
InitializeSuppressions
();
#ifndef TSAN_GO
#ifndef TSAN_GO
// Initialize external symbolizer before internal threads are started.
// Initialize external symbolizer before internal threads are started.
...
@@ -279,13 +285,27 @@ void TraceSwitch(ThreadState *thr) {
...
@@ -279,13 +285,27 @@ void TraceSwitch(ThreadState *thr) {
thr
->
nomalloc
++
;
thr
->
nomalloc
++
;
ScopedInRtl
in_rtl
;
ScopedInRtl
in_rtl
;
Lock
l
(
&
thr
->
trace
.
mtx
);
Lock
l
(
&
thr
->
trace
.
mtx
);
unsigned
trace
=
(
thr
->
fast_state
.
epoch
()
/
kTracePartSize
)
%
kTraceParts
;
unsigned
trace
=
(
thr
->
fast_state
.
epoch
()
/
kTracePartSize
)
%
TraceParts
()
;
TraceHeader
*
hdr
=
&
thr
->
trace
.
headers
[
trace
];
TraceHeader
*
hdr
=
&
thr
->
trace
.
headers
[
trace
];
hdr
->
epoch0
=
thr
->
fast_state
.
epoch
();
hdr
->
epoch0
=
thr
->
fast_state
.
epoch
();
hdr
->
stack0
.
ObtainCurrent
(
thr
,
0
);
hdr
->
stack0
.
ObtainCurrent
(
thr
,
0
);
thr
->
nomalloc
--
;
thr
->
nomalloc
--
;
}
}
uptr
TraceTopPC
(
ThreadState
*
thr
)
{
Event
*
events
=
(
Event
*
)
GetThreadTrace
(
thr
->
tid
);
uptr
pc
=
events
[
thr
->
fast_state
.
GetTracePos
()];
return
pc
;
}
uptr
TraceSize
()
{
return
(
uptr
)(
1ull
<<
(
kTracePartSizeBits
+
flags
()
->
history_size
+
1
));
}
uptr
TraceParts
()
{
return
TraceSize
()
/
kTracePartSize
;
}
#ifndef TSAN_GO
#ifndef TSAN_GO
extern
"C"
void
__tsan_trace_switch
()
{
extern
"C"
void
__tsan_trace_switch
()
{
TraceSwitch
(
cur_thread
());
TraceSwitch
(
cur_thread
());
...
@@ -342,7 +362,7 @@ static inline bool OldIsInSameSynchEpoch(Shadow old, ThreadState *thr) {
...
@@ -342,7 +362,7 @@ static inline bool OldIsInSameSynchEpoch(Shadow old, ThreadState *thr) {
}
}
static
inline
bool
HappensBefore
(
Shadow
old
,
ThreadState
*
thr
)
{
static
inline
bool
HappensBefore
(
Shadow
old
,
ThreadState
*
thr
)
{
return
thr
->
clock
.
get
(
old
.
tid
())
>=
old
.
epoch
();
return
thr
->
clock
.
get
(
old
.
TidWithIgnore
())
>=
old
.
epoch
();
}
}
ALWAYS_INLINE
ALWAYS_INLINE
...
@@ -451,7 +471,7 @@ void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
...
@@ -451,7 +471,7 @@ void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
// We must not store to the trace if we do not store to the shadow.
// We must not store to the trace if we do not store to the shadow.
// That is, this call must be moved somewhere below.
// That is, this call must be moved somewhere below.
TraceAddEvent
(
thr
,
fast_state
.
epoch
()
,
EventTypeMop
,
pc
);
TraceAddEvent
(
thr
,
fast_state
,
EventTypeMop
,
pc
);
MemoryAccessImpl
(
thr
,
addr
,
kAccessSizeLog
,
kAccessIsWrite
,
MemoryAccessImpl
(
thr
,
addr
,
kAccessSizeLog
,
kAccessIsWrite
,
shadow_mem
,
cur
);
shadow_mem
,
cur
);
...
@@ -502,6 +522,7 @@ void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size) {
...
@@ -502,6 +522,7 @@ void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size) {
void
MemoryRangeFreed
(
ThreadState
*
thr
,
uptr
pc
,
uptr
addr
,
uptr
size
)
{
void
MemoryRangeFreed
(
ThreadState
*
thr
,
uptr
pc
,
uptr
addr
,
uptr
size
)
{
MemoryAccessRange
(
thr
,
pc
,
addr
,
size
,
true
);
MemoryAccessRange
(
thr
,
pc
,
addr
,
size
,
true
);
Shadow
s
(
thr
->
fast_state
);
Shadow
s
(
thr
->
fast_state
);
s
.
ClearIgnoreBit
();
s
.
MarkAsFreed
();
s
.
MarkAsFreed
();
s
.
SetWrite
(
true
);
s
.
SetWrite
(
true
);
s
.
SetAddr0AndSizeLog
(
0
,
3
);
s
.
SetAddr0AndSizeLog
(
0
,
3
);
...
@@ -510,6 +531,7 @@ void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) {
...
@@ -510,6 +531,7 @@ void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) {
void
MemoryRangeImitateWrite
(
ThreadState
*
thr
,
uptr
pc
,
uptr
addr
,
uptr
size
)
{
void
MemoryRangeImitateWrite
(
ThreadState
*
thr
,
uptr
pc
,
uptr
addr
,
uptr
size
)
{
Shadow
s
(
thr
->
fast_state
);
Shadow
s
(
thr
->
fast_state
);
s
.
ClearIgnoreBit
();
s
.
SetWrite
(
true
);
s
.
SetWrite
(
true
);
s
.
SetAddr0AndSizeLog
(
0
,
3
);
s
.
SetAddr0AndSizeLog
(
0
,
3
);
MemoryRangeSet
(
thr
,
pc
,
addr
,
size
,
s
.
raw
());
MemoryRangeSet
(
thr
,
pc
,
addr
,
size
,
s
.
raw
());
...
@@ -521,7 +543,7 @@ void FuncEntry(ThreadState *thr, uptr pc) {
...
@@ -521,7 +543,7 @@ void FuncEntry(ThreadState *thr, uptr pc) {
StatInc
(
thr
,
StatFuncEnter
);
StatInc
(
thr
,
StatFuncEnter
);
DPrintf2
(
"#%d: FuncEntry %p
\n
"
,
(
int
)
thr
->
fast_state
.
tid
(),
(
void
*
)
pc
);
DPrintf2
(
"#%d: FuncEntry %p
\n
"
,
(
int
)
thr
->
fast_state
.
tid
(),
(
void
*
)
pc
);
thr
->
fast_state
.
IncrementEpoch
();
thr
->
fast_state
.
IncrementEpoch
();
TraceAddEvent
(
thr
,
thr
->
fast_state
.
epoch
()
,
EventTypeFuncEnter
,
pc
);
TraceAddEvent
(
thr
,
thr
->
fast_state
,
EventTypeFuncEnter
,
pc
);
// Shadow stack maintenance can be replaced with
// Shadow stack maintenance can be replaced with
// stack unwinding during trace switch (which presumably must be faster).
// stack unwinding during trace switch (which presumably must be faster).
...
@@ -551,7 +573,7 @@ void FuncExit(ThreadState *thr) {
...
@@ -551,7 +573,7 @@ void FuncExit(ThreadState *thr) {
StatInc
(
thr
,
StatFuncExit
);
StatInc
(
thr
,
StatFuncExit
);
DPrintf2
(
"#%d: FuncExit
\n
"
,
(
int
)
thr
->
fast_state
.
tid
());
DPrintf2
(
"#%d: FuncExit
\n
"
,
(
int
)
thr
->
fast_state
.
tid
());
thr
->
fast_state
.
IncrementEpoch
();
thr
->
fast_state
.
IncrementEpoch
();
TraceAddEvent
(
thr
,
thr
->
fast_state
.
epoch
()
,
EventTypeFuncExit
,
0
);
TraceAddEvent
(
thr
,
thr
->
fast_state
,
EventTypeFuncExit
,
0
);
DCHECK_GT
(
thr
->
shadow_stack_pos
,
&
thr
->
shadow_stack
[
0
]);
DCHECK_GT
(
thr
->
shadow_stack_pos
,
&
thr
->
shadow_stack
[
0
]);
#ifndef TSAN_GO
#ifndef TSAN_GO
...
...
libsanitizer/tsan/tsan_rtl.h
View file @
a0408454
...
@@ -25,7 +25,7 @@
...
@@ -25,7 +25,7 @@
#define TSAN_RTL_H
#define TSAN_RTL_H
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_allocator
64
.h"
#include "sanitizer_common/sanitizer_allocator.h"
#include "tsan_clock.h"
#include "tsan_clock.h"
#include "tsan_defs.h"
#include "tsan_defs.h"
#include "tsan_flags.h"
#include "tsan_flags.h"
...
@@ -33,6 +33,11 @@
...
@@ -33,6 +33,11 @@
#include "tsan_trace.h"
#include "tsan_trace.h"
#include "tsan_vector.h"
#include "tsan_vector.h"
#include "tsan_report.h"
#include "tsan_report.h"
#include "tsan_platform.h"
#if SANITIZER_WORDSIZE != 64
# error "ThreadSanitizer is supported only on 64-bit platforms"
#endif
namespace
__tsan
{
namespace
__tsan
{
...
@@ -55,8 +60,7 @@ const uptr kAllocatorSize = 0x10000000000ULL; // 1T.
...
@@ -55,8 +60,7 @@ const uptr kAllocatorSize = 0x10000000000ULL; // 1T.
typedef
SizeClassAllocator64
<
kAllocatorSpace
,
kAllocatorSize
,
sizeof
(
MBlock
),
typedef
SizeClassAllocator64
<
kAllocatorSpace
,
kAllocatorSize
,
sizeof
(
MBlock
),
DefaultSizeClassMap
>
PrimaryAllocator
;
DefaultSizeClassMap
>
PrimaryAllocator
;
typedef
SizeClassAllocatorLocalCache
<
PrimaryAllocator
::
kNumClasses
,
typedef
SizeClassAllocatorLocalCache
<
PrimaryAllocator
>
AllocatorCache
;
PrimaryAllocator
>
AllocatorCache
;
typedef
LargeMmapAllocator
SecondaryAllocator
;
typedef
LargeMmapAllocator
SecondaryAllocator
;
typedef
CombinedAllocator
<
PrimaryAllocator
,
AllocatorCache
,
typedef
CombinedAllocator
<
PrimaryAllocator
,
AllocatorCache
,
SecondaryAllocator
>
Allocator
;
SecondaryAllocator
>
Allocator
;
...
@@ -67,18 +71,19 @@ void TsanCheckFailed(const char *file, int line, const char *cond,
...
@@ -67,18 +71,19 @@ void TsanCheckFailed(const char *file, int line, const char *cond,
u64
v1
,
u64
v2
);
u64
v1
,
u64
v2
);
// FastState (from most significant bit):
// FastState (from most significant bit):
//
unused
: 1
//
ignore
: 1
// tid : kTidBits
// tid : kTidBits
// epoch : kClkBits
// epoch : kClkBits
// unused : -
// unused : -
//
ignore_bit : 1
//
history_size : 3
class
FastState
{
class
FastState
{
public
:
public
:
FastState
(
u64
tid
,
u64
epoch
)
{
FastState
(
u64
tid
,
u64
epoch
)
{
x_
=
tid
<<
kTidShift
;
x_
=
tid
<<
kTidShift
;
x_
|=
epoch
<<
kClkShift
;
x_
|=
epoch
<<
kClkShift
;
DCHECK
(
tid
==
this
->
tid
());
DCHECK_EQ
(
tid
,
this
->
tid
());
DCHECK
(
epoch
==
this
->
epoch
());
DCHECK_EQ
(
epoch
,
this
->
epoch
());
DCHECK_EQ
(
GetIgnoreBit
(),
false
);
}
}
explicit
FastState
(
u64
x
)
explicit
FastState
(
u64
x
)
...
@@ -90,6 +95,11 @@ class FastState {
...
@@ -90,6 +95,11 @@ class FastState {
}
}
u64
tid
()
const
{
u64
tid
()
const
{
u64
res
=
(
x_
&
~
kIgnoreBit
)
>>
kTidShift
;
return
res
;
}
u64
TidWithIgnore
()
const
{
u64
res
=
x_
>>
kTidShift
;
u64
res
=
x_
>>
kTidShift
;
return
res
;
return
res
;
}
}
...
@@ -108,13 +118,34 @@ class FastState {
...
@@ -108,13 +118,34 @@ class FastState {
void
SetIgnoreBit
()
{
x_
|=
kIgnoreBit
;
}
void
SetIgnoreBit
()
{
x_
|=
kIgnoreBit
;
}
void
ClearIgnoreBit
()
{
x_
&=
~
kIgnoreBit
;
}
void
ClearIgnoreBit
()
{
x_
&=
~
kIgnoreBit
;
}
bool
GetIgnoreBit
()
const
{
return
x_
&
kIgnoreBit
;
}
bool
GetIgnoreBit
()
const
{
return
(
s64
)
x_
<
0
;
}
void
SetHistorySize
(
int
hs
)
{
CHECK_GE
(
hs
,
0
);
CHECK_LE
(
hs
,
7
);
x_
=
(
x_
&
~
7
)
|
hs
;
}
int
GetHistorySize
()
const
{
return
(
int
)(
x_
&
7
);
}
void
ClearHistorySize
()
{
x_
&=
~
7
;
}
u64
GetTracePos
()
const
{
const
int
hs
=
GetHistorySize
();
// When hs == 0, the trace consists of 2 parts.
const
u64
mask
=
(
1ull
<<
(
kTracePartSizeBits
+
hs
+
1
))
-
1
;
return
epoch
()
&
mask
;
}
private
:
private
:
friend
class
Shadow
;
friend
class
Shadow
;
static
const
int
kTidShift
=
64
-
kTidBits
-
1
;
static
const
int
kTidShift
=
64
-
kTidBits
-
1
;
static
const
int
kClkShift
=
kTidShift
-
kClkBits
;
static
const
int
kClkShift
=
kTidShift
-
kClkBits
;
static
const
u64
kIgnoreBit
=
1ull
;
static
const
u64
kIgnoreBit
=
1ull
<<
63
;
static
const
u64
kFreedBit
=
1ull
<<
63
;
static
const
u64
kFreedBit
=
1ull
<<
63
;
u64
x_
;
u64
x_
;
};
};
...
@@ -128,9 +159,14 @@ class FastState {
...
@@ -128,9 +159,14 @@ class FastState {
// addr0 : 3
// addr0 : 3
class
Shadow
:
public
FastState
{
class
Shadow
:
public
FastState
{
public
:
public
:
explicit
Shadow
(
u64
x
)
:
FastState
(
x
)
{
}
explicit
Shadow
(
u64
x
)
:
FastState
(
x
)
{
}
explicit
Shadow
(
const
FastState
&
s
)
:
FastState
(
s
.
x_
)
{
}
explicit
Shadow
(
const
FastState
&
s
)
:
FastState
(
s
.
x_
)
{
ClearHistorySize
();
}
void
SetAddr0AndSizeLog
(
u64
addr0
,
unsigned
kAccessSizeLog
)
{
void
SetAddr0AndSizeLog
(
u64
addr0
,
unsigned
kAccessSizeLog
)
{
DCHECK_EQ
(
x_
&
31
,
0
);
DCHECK_EQ
(
x_
&
31
,
0
);
...
@@ -152,7 +188,7 @@ class Shadow : public FastState {
...
@@ -152,7 +188,7 @@ class Shadow : public FastState {
static
inline
bool
TidsAreEqual
(
const
Shadow
s1
,
const
Shadow
s2
)
{
static
inline
bool
TidsAreEqual
(
const
Shadow
s1
,
const
Shadow
s2
)
{
u64
shifted_xor
=
(
s1
.
x_
^
s2
.
x_
)
>>
kTidShift
;
u64
shifted_xor
=
(
s1
.
x_
^
s2
.
x_
)
>>
kTidShift
;
DCHECK_EQ
(
shifted_xor
==
0
,
s1
.
tid
()
==
s2
.
tid
());
DCHECK_EQ
(
shifted_xor
==
0
,
s1
.
TidWithIgnore
()
==
s2
.
TidWithIgnore
());
return
shifted_xor
==
0
;
return
shifted_xor
==
0
;
}
}
...
@@ -335,6 +371,7 @@ struct ThreadContext {
...
@@ -335,6 +371,7 @@ struct ThreadContext {
StackTrace
creation_stack
;
StackTrace
creation_stack
;
ThreadDeadInfo
*
dead_info
;
ThreadDeadInfo
*
dead_info
;
ThreadContext
*
dead_next
;
// In dead thread list.
ThreadContext
*
dead_next
;
// In dead thread list.
char
*
name
;
// As annotated by user.
explicit
ThreadContext
(
int
tid
);
explicit
ThreadContext
(
int
tid
);
};
};
...
@@ -491,6 +528,7 @@ int ThreadTid(ThreadState *thr, uptr pc, uptr uid);
...
@@ -491,6 +528,7 @@ int ThreadTid(ThreadState *thr, uptr pc, uptr uid);
void
ThreadJoin
(
ThreadState
*
thr
,
uptr
pc
,
int
tid
);
void
ThreadJoin
(
ThreadState
*
thr
,
uptr
pc
,
int
tid
);
void
ThreadDetach
(
ThreadState
*
thr
,
uptr
pc
,
int
tid
);
void
ThreadDetach
(
ThreadState
*
thr
,
uptr
pc
,
int
tid
);
void
ThreadFinalize
(
ThreadState
*
thr
);
void
ThreadFinalize
(
ThreadState
*
thr
);
void
ThreadSetName
(
ThreadState
*
thr
,
const
char
*
name
);
int
ThreadCount
(
ThreadState
*
thr
);
int
ThreadCount
(
ThreadState
*
thr
);
void
ProcessPendingSignals
(
ThreadState
*
thr
);
void
ProcessPendingSignals
(
ThreadState
*
thr
);
...
@@ -531,19 +569,24 @@ void AfterSleep(ThreadState *thr, uptr pc);
...
@@ -531,19 +569,24 @@ void AfterSleep(ThreadState *thr, uptr pc);
#endif
#endif
void
TraceSwitch
(
ThreadState
*
thr
);
void
TraceSwitch
(
ThreadState
*
thr
);
uptr
TraceTopPC
(
ThreadState
*
thr
);
uptr
TraceSize
();
uptr
TraceParts
();
extern
"C"
void
__tsan_trace_switch
();
extern
"C"
void
__tsan_trace_switch
();
void
ALWAYS_INLINE
INLINE
TraceAddEvent
(
ThreadState
*
thr
,
u64
epoch
,
void
ALWAYS_INLINE
INLINE
TraceAddEvent
(
ThreadState
*
thr
,
FastState
fs
,
EventType
typ
,
uptr
addr
)
{
EventType
typ
,
uptr
addr
)
{
StatInc
(
thr
,
StatEvents
);
StatInc
(
thr
,
StatEvents
);
if
(
UNLIKELY
((
epoch
%
kTracePartSize
)
==
0
))
{
u64
pos
=
fs
.
GetTracePos
();
if
(
UNLIKELY
((
pos
%
kTracePartSize
)
==
0
))
{
#ifndef TSAN_GO
#ifndef TSAN_GO
HACKY_CALL
(
__tsan_trace_switch
);
HACKY_CALL
(
__tsan_trace_switch
);
#else
#else
TraceSwitch
(
thr
);
TraceSwitch
(
thr
);
#endif
#endif
}
}
Event
*
evp
=
&
thr
->
trace
.
events
[
epoch
%
kTraceSize
];
Event
*
trace
=
(
Event
*
)
GetThreadTrace
(
fs
.
tid
());
Event
*
evp
=
&
trace
[
pos
];
Event
ev
=
(
u64
)
addr
|
((
u64
)
typ
<<
61
);
Event
ev
=
(
u64
)
addr
|
((
u64
)
typ
<<
61
);
*
evp
=
ev
;
*
evp
=
ev
;
}
}
...
...
libsanitizer/tsan/tsan_rtl_mutex.cc
View file @
a0408454
...
@@ -73,7 +73,7 @@ void MutexLock(ThreadState *thr, uptr pc, uptr addr) {
...
@@ -73,7 +73,7 @@ void MutexLock(ThreadState *thr, uptr pc, uptr addr) {
if
(
IsAppMem
(
addr
))
if
(
IsAppMem
(
addr
))
MemoryRead1Byte
(
thr
,
pc
,
addr
);
MemoryRead1Byte
(
thr
,
pc
,
addr
);
thr
->
fast_state
.
IncrementEpoch
();
thr
->
fast_state
.
IncrementEpoch
();
TraceAddEvent
(
thr
,
thr
->
fast_state
.
epoch
()
,
EventTypeLock
,
addr
);
TraceAddEvent
(
thr
,
thr
->
fast_state
,
EventTypeLock
,
addr
);
SyncVar
*
s
=
CTX
()
->
synctab
.
GetAndLock
(
thr
,
pc
,
addr
,
true
);
SyncVar
*
s
=
CTX
()
->
synctab
.
GetAndLock
(
thr
,
pc
,
addr
,
true
);
if
(
s
->
owner_tid
==
SyncVar
::
kInvalidTid
)
{
if
(
s
->
owner_tid
==
SyncVar
::
kInvalidTid
)
{
CHECK_EQ
(
s
->
recursion
,
0
);
CHECK_EQ
(
s
->
recursion
,
0
);
...
@@ -105,7 +105,7 @@ void MutexUnlock(ThreadState *thr, uptr pc, uptr addr) {
...
@@ -105,7 +105,7 @@ void MutexUnlock(ThreadState *thr, uptr pc, uptr addr) {
if
(
IsAppMem
(
addr
))
if
(
IsAppMem
(
addr
))
MemoryRead1Byte
(
thr
,
pc
,
addr
);
MemoryRead1Byte
(
thr
,
pc
,
addr
);
thr
->
fast_state
.
IncrementEpoch
();
thr
->
fast_state
.
IncrementEpoch
();
TraceAddEvent
(
thr
,
thr
->
fast_state
.
epoch
()
,
EventTypeUnlock
,
addr
);
TraceAddEvent
(
thr
,
thr
->
fast_state
,
EventTypeUnlock
,
addr
);
SyncVar
*
s
=
CTX
()
->
synctab
.
GetAndLock
(
thr
,
pc
,
addr
,
true
);
SyncVar
*
s
=
CTX
()
->
synctab
.
GetAndLock
(
thr
,
pc
,
addr
,
true
);
if
(
s
->
recursion
==
0
)
{
if
(
s
->
recursion
==
0
)
{
if
(
!
s
->
is_broken
)
{
if
(
!
s
->
is_broken
)
{
...
@@ -142,7 +142,7 @@ void MutexReadLock(ThreadState *thr, uptr pc, uptr addr) {
...
@@ -142,7 +142,7 @@ void MutexReadLock(ThreadState *thr, uptr pc, uptr addr) {
if
(
IsAppMem
(
addr
))
if
(
IsAppMem
(
addr
))
MemoryRead1Byte
(
thr
,
pc
,
addr
);
MemoryRead1Byte
(
thr
,
pc
,
addr
);
thr
->
fast_state
.
IncrementEpoch
();
thr
->
fast_state
.
IncrementEpoch
();
TraceAddEvent
(
thr
,
thr
->
fast_state
.
epoch
()
,
EventTypeRLock
,
addr
);
TraceAddEvent
(
thr
,
thr
->
fast_state
,
EventTypeRLock
,
addr
);
SyncVar
*
s
=
CTX
()
->
synctab
.
GetAndLock
(
thr
,
pc
,
addr
,
false
);
SyncVar
*
s
=
CTX
()
->
synctab
.
GetAndLock
(
thr
,
pc
,
addr
,
false
);
if
(
s
->
owner_tid
!=
SyncVar
::
kInvalidTid
)
{
if
(
s
->
owner_tid
!=
SyncVar
::
kInvalidTid
)
{
Printf
(
"ThreadSanitizer WARNING: read lock of a write locked mutex
\n
"
);
Printf
(
"ThreadSanitizer WARNING: read lock of a write locked mutex
\n
"
);
...
@@ -162,7 +162,7 @@ void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
...
@@ -162,7 +162,7 @@ void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
if
(
IsAppMem
(
addr
))
if
(
IsAppMem
(
addr
))
MemoryRead1Byte
(
thr
,
pc
,
addr
);
MemoryRead1Byte
(
thr
,
pc
,
addr
);
thr
->
fast_state
.
IncrementEpoch
();
thr
->
fast_state
.
IncrementEpoch
();
TraceAddEvent
(
thr
,
thr
->
fast_state
.
epoch
()
,
EventTypeRUnlock
,
addr
);
TraceAddEvent
(
thr
,
thr
->
fast_state
,
EventTypeRUnlock
,
addr
);
SyncVar
*
s
=
CTX
()
->
synctab
.
GetAndLock
(
thr
,
pc
,
addr
,
true
);
SyncVar
*
s
=
CTX
()
->
synctab
.
GetAndLock
(
thr
,
pc
,
addr
,
true
);
if
(
s
->
owner_tid
!=
SyncVar
::
kInvalidTid
)
{
if
(
s
->
owner_tid
!=
SyncVar
::
kInvalidTid
)
{
Printf
(
"ThreadSanitizer WARNING: read unlock of a write "
Printf
(
"ThreadSanitizer WARNING: read unlock of a write "
...
@@ -186,7 +186,7 @@ void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
...
@@ -186,7 +186,7 @@ void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
// Seems to be read unlock.
// Seems to be read unlock.
StatInc
(
thr
,
StatMutexReadUnlock
);
StatInc
(
thr
,
StatMutexReadUnlock
);
thr
->
fast_state
.
IncrementEpoch
();
thr
->
fast_state
.
IncrementEpoch
();
TraceAddEvent
(
thr
,
thr
->
fast_state
.
epoch
()
,
EventTypeRUnlock
,
addr
);
TraceAddEvent
(
thr
,
thr
->
fast_state
,
EventTypeRUnlock
,
addr
);
thr
->
clock
.
set
(
thr
->
tid
,
thr
->
fast_state
.
epoch
());
thr
->
clock
.
set
(
thr
->
tid
,
thr
->
fast_state
.
epoch
());
thr
->
fast_synch_epoch
=
thr
->
fast_state
.
epoch
();
thr
->
fast_synch_epoch
=
thr
->
fast_state
.
epoch
();
thr
->
clock
.
release
(
&
s
->
read_clock
);
thr
->
clock
.
release
(
&
s
->
read_clock
);
...
@@ -203,7 +203,7 @@ void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
...
@@ -203,7 +203,7 @@ void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
// First, it's a bug to increment the epoch w/o writing to the trace.
// First, it's a bug to increment the epoch w/o writing to the trace.
// Then, the acquire/release logic can be factored out as well.
// Then, the acquire/release logic can be factored out as well.
thr
->
fast_state
.
IncrementEpoch
();
thr
->
fast_state
.
IncrementEpoch
();
TraceAddEvent
(
thr
,
thr
->
fast_state
.
epoch
()
,
EventTypeUnlock
,
addr
);
TraceAddEvent
(
thr
,
thr
->
fast_state
,
EventTypeUnlock
,
addr
);
thr
->
clock
.
set
(
thr
->
tid
,
thr
->
fast_state
.
epoch
());
thr
->
clock
.
set
(
thr
->
tid
,
thr
->
fast_state
.
epoch
());
thr
->
fast_synch_epoch
=
thr
->
fast_state
.
epoch
();
thr
->
fast_synch_epoch
=
thr
->
fast_state
.
epoch
();
thr
->
clock
.
ReleaseStore
(
&
s
->
clock
);
thr
->
clock
.
ReleaseStore
(
&
s
->
clock
);
...
...
libsanitizer/tsan/tsan_rtl_report.cc
View file @
a0408454
...
@@ -123,8 +123,7 @@ ScopedReport::ScopedReport(ReportType typ) {
...
@@ -123,8 +123,7 @@ ScopedReport::ScopedReport(ReportType typ) {
ScopedReport
::~
ScopedReport
()
{
ScopedReport
::~
ScopedReport
()
{
ctx_
->
report_mtx
.
Unlock
();
ctx_
->
report_mtx
.
Unlock
();
rep_
->~
ReportDesc
();
DestroyAndFree
(
rep_
);
internal_free
(
rep_
);
}
}
void
ScopedReport
::
AddStack
(
const
StackTrace
*
stack
)
{
void
ScopedReport
::
AddStack
(
const
StackTrace
*
stack
)
{
...
@@ -156,6 +155,7 @@ void ScopedReport::AddThread(const ThreadContext *tctx) {
...
@@ -156,6 +155,7 @@ void ScopedReport::AddThread(const ThreadContext *tctx) {
rt
->
id
=
tctx
->
tid
;
rt
->
id
=
tctx
->
tid
;
rt
->
pid
=
tctx
->
os_id
;
rt
->
pid
=
tctx
->
os_id
;
rt
->
running
=
(
tctx
->
status
==
ThreadStatusRunning
);
rt
->
running
=
(
tctx
->
status
==
ThreadStatusRunning
);
rt
->
name
=
tctx
->
name
?
internal_strdup
(
tctx
->
name
)
:
0
;
rt
->
stack
=
SymbolizeStack
(
tctx
->
creation_stack
);
rt
->
stack
=
SymbolizeStack
(
tctx
->
creation_stack
);
}
}
...
@@ -218,9 +218,11 @@ void ScopedReport::AddLocation(uptr addr, uptr size) {
...
@@ -218,9 +218,11 @@ void ScopedReport::AddLocation(uptr addr, uptr size) {
loc
->
type
=
ReportLocationGlobal
;
loc
->
type
=
ReportLocationGlobal
;
loc
->
addr
=
addr
;
loc
->
addr
=
addr
;
loc
->
size
=
size
;
loc
->
size
=
size
;
loc
->
module
=
symb
->
module
?
internal_strdup
(
symb
->
module
)
:
0
;
loc
->
offset
=
symb
->
offset
;
loc
->
tid
=
0
;
loc
->
tid
=
0
;
loc
->
name
=
symb
->
func
;
loc
->
name
=
symb
->
func
?
internal_strdup
(
symb
->
func
)
:
0
;
loc
->
file
=
symb
->
file
;
loc
->
file
=
symb
->
file
?
internal_strdup
(
symb
->
file
)
:
0
;
loc
->
line
=
symb
->
line
;
loc
->
line
=
symb
->
line
;
loc
->
stack
=
0
;
loc
->
stack
=
0
;
internal_free
(
symb
);
internal_free
(
symb
);
...
@@ -261,12 +263,12 @@ void RestoreStack(int tid, const u64 epoch, StackTrace *stk) {
...
@@ -261,12 +263,12 @@ void RestoreStack(int tid, const u64 epoch, StackTrace *stk) {
return
;
return
;
}
}
Lock
l
(
&
trace
->
mtx
);
Lock
l
(
&
trace
->
mtx
);
const
int
partidx
=
(
epoch
/
(
kTraceSize
/
kTraceParts
))
%
kTraceParts
;
const
int
partidx
=
(
epoch
/
kTracePartSize
)
%
TraceParts
()
;
TraceHeader
*
hdr
=
&
trace
->
headers
[
partidx
];
TraceHeader
*
hdr
=
&
trace
->
headers
[
partidx
];
if
(
epoch
<
hdr
->
epoch0
)
if
(
epoch
<
hdr
->
epoch0
)
return
;
return
;
const
u64
eend
=
epoch
%
kTraceSize
;
const
u64
eend
=
epoch
%
TraceSize
()
;
const
u64
ebegin
=
eend
/
kTracePartSize
*
kTracePartSize
;
const
u64
ebegin
=
RoundDown
(
eend
,
kTracePartSize
)
;
DPrintf
(
"#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d
\n
"
,
DPrintf
(
"#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d
\n
"
,
tid
,
(
uptr
)
epoch
,
(
uptr
)
ebegin
,
(
uptr
)
eend
,
partidx
);
tid
,
(
uptr
)
epoch
,
(
uptr
)
ebegin
,
(
uptr
)
eend
,
partidx
);
InternalScopedBuffer
<
uptr
>
stack
(
1024
);
// FIXME: de-hardcode 1024
InternalScopedBuffer
<
uptr
>
stack
(
1024
);
// FIXME: de-hardcode 1024
...
@@ -275,8 +277,9 @@ void RestoreStack(int tid, const u64 epoch, StackTrace *stk) {
...
@@ -275,8 +277,9 @@ void RestoreStack(int tid, const u64 epoch, StackTrace *stk) {
DPrintf2
(
" #%02lu: pc=%zx
\n
"
,
i
,
stack
[
i
]);
DPrintf2
(
" #%02lu: pc=%zx
\n
"
,
i
,
stack
[
i
]);
}
}
uptr
pos
=
hdr
->
stack0
.
Size
();
uptr
pos
=
hdr
->
stack0
.
Size
();
Event
*
events
=
(
Event
*
)
GetThreadTrace
(
tid
);
for
(
uptr
i
=
ebegin
;
i
<=
eend
;
i
++
)
{
for
(
uptr
i
=
ebegin
;
i
<=
eend
;
i
++
)
{
Event
ev
=
trace
->
events
[
i
];
Event
ev
=
events
[
i
];
EventType
typ
=
(
EventType
)(
ev
>>
61
);
EventType
typ
=
(
EventType
)(
ev
>>
61
);
uptr
pc
=
(
uptr
)(
ev
&
0xffffffffffffull
);
uptr
pc
=
(
uptr
)(
ev
&
0xffffffffffffull
);
DPrintf2
(
" %zu typ=%d pc=%zx
\n
"
,
i
,
typ
,
pc
);
DPrintf2
(
" %zu typ=%d pc=%zx
\n
"
,
i
,
typ
,
pc
);
...
@@ -382,6 +385,39 @@ bool IsFiredSuppression(Context *ctx,
...
@@ -382,6 +385,39 @@ bool IsFiredSuppression(Context *ctx,
return
false
;
return
false
;
}
}
// On programs that use Java we see weird reports like:
// WARNING: ThreadSanitizer: data race (pid=22512)
// Read of size 8 at 0x7d2b00084318 by thread 100:
// #0 memcpy tsan_interceptors.cc:406 (foo+0x00000d8dfae3)
// #1 <null> <null>:0 (0x7f7ad9b40193)
// Previous write of size 8 at 0x7d2b00084318 by thread 105:
// #0 strncpy tsan_interceptors.cc:501 (foo+0x00000d8e0919)
// #1 <null> <null>:0 (0x7f7ad9b42707)
static
bool
IsJavaNonsense
(
const
ReportDesc
*
rep
)
{
for
(
uptr
i
=
0
;
i
<
rep
->
mops
.
Size
();
i
++
)
{
ReportMop
*
mop
=
rep
->
mops
[
i
];
ReportStack
*
frame
=
mop
->
stack
;
if
(
frame
!=
0
&&
frame
->
func
!=
0
&&
(
internal_strcmp
(
frame
->
func
,
"memset"
)
==
0
||
internal_strcmp
(
frame
->
func
,
"memcpy"
)
==
0
||
internal_strcmp
(
frame
->
func
,
"strcmp"
)
==
0
||
internal_strcmp
(
frame
->
func
,
"strncpy"
)
==
0
||
internal_strcmp
(
frame
->
func
,
"pthread_mutex_lock"
)
==
0
))
{
frame
=
frame
->
next
;
if
(
frame
==
0
||
(
frame
->
func
==
0
&&
frame
->
file
==
0
&&
frame
->
line
==
0
&&
frame
->
module
==
0
))
{
if
(
frame
)
{
FiredSuppression
supp
=
{
rep
->
typ
,
frame
->
pc
};
CTX
()
->
fired_suppressions
.
PushBack
(
supp
);
}
return
true
;
}
}
}
return
false
;
}
void
ReportRace
(
ThreadState
*
thr
)
{
void
ReportRace
(
ThreadState
*
thr
)
{
if
(
!
flags
()
->
report_bugs
)
if
(
!
flags
()
->
report_bugs
)
return
;
return
;
...
@@ -414,8 +450,7 @@ void ReportRace(ThreadState *thr) {
...
@@ -414,8 +450,7 @@ void ReportRace(ThreadState *thr) {
ScopedReport
rep
(
freed
?
ReportTypeUseAfterFree
:
ReportTypeRace
);
ScopedReport
rep
(
freed
?
ReportTypeUseAfterFree
:
ReportTypeRace
);
const
uptr
kMop
=
2
;
const
uptr
kMop
=
2
;
StackTrace
traces
[
kMop
];
StackTrace
traces
[
kMop
];
const
uptr
toppc
=
thr
->
trace
.
events
[
thr
->
fast_state
.
epoch
()
%
kTraceSize
]
const
uptr
toppc
=
TraceTopPC
(
thr
);
&
((
1ull
<<
61
)
-
1
);
traces
[
0
].
ObtainCurrent
(
thr
,
toppc
);
traces
[
0
].
ObtainCurrent
(
thr
,
toppc
);
if
(
IsFiredSuppression
(
ctx
,
rep
,
traces
[
0
]))
if
(
IsFiredSuppression
(
ctx
,
rep
,
traces
[
0
]))
return
;
return
;
...
@@ -430,6 +465,9 @@ void ReportRace(ThreadState *thr) {
...
@@ -430,6 +465,9 @@ void ReportRace(ThreadState *thr) {
rep
.
AddMemoryAccess
(
addr
,
s
,
&
traces
[
i
]);
rep
.
AddMemoryAccess
(
addr
,
s
,
&
traces
[
i
]);
}
}
if
(
flags
()
->
suppress_java
&&
IsJavaNonsense
(
rep
.
GetReport
()))
return
;
for
(
uptr
i
=
0
;
i
<
kMop
;
i
++
)
{
for
(
uptr
i
=
0
;
i
<
kMop
;
i
++
)
{
FastState
s
(
thr
->
racy_state
[
i
]);
FastState
s
(
thr
->
racy_state
[
i
]);
ThreadContext
*
tctx
=
ctx
->
threads
[
s
.
tid
()];
ThreadContext
*
tctx
=
ctx
->
threads
[
s
.
tid
()];
...
...
libsanitizer/tsan/tsan_rtl_thread.cc
View file @
a0408454
...
@@ -96,6 +96,7 @@ int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) {
...
@@ -96,6 +96,7 @@ int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) {
ThreadContext
*
tctx
=
0
;
ThreadContext
*
tctx
=
0
;
if
(
ctx
->
dead_list_size
>
kThreadQuarantineSize
if
(
ctx
->
dead_list_size
>
kThreadQuarantineSize
||
ctx
->
thread_seq
>=
kMaxTid
)
{
||
ctx
->
thread_seq
>=
kMaxTid
)
{
// Reusing old thread descriptor and tid.
if
(
ctx
->
dead_list_size
==
0
)
{
if
(
ctx
->
dead_list_size
==
0
)
{
Printf
(
"ThreadSanitizer: %d thread limit exceeded. Dying.
\n
"
,
Printf
(
"ThreadSanitizer: %d thread limit exceeded. Dying.
\n
"
,
kMaxTid
);
kMaxTid
);
...
@@ -115,12 +116,18 @@ int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) {
...
@@ -115,12 +116,18 @@ int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) {
tctx
->
sync
.
Reset
();
tctx
->
sync
.
Reset
();
tid
=
tctx
->
tid
;
tid
=
tctx
->
tid
;
DestroyAndFree
(
tctx
->
dead_info
);
DestroyAndFree
(
tctx
->
dead_info
);
if
(
tctx
->
name
)
{
internal_free
(
tctx
->
name
);
tctx
->
name
=
0
;
}
}
else
{
}
else
{
// Allocating new thread descriptor and tid.
StatInc
(
thr
,
StatThreadMaxTid
);
StatInc
(
thr
,
StatThreadMaxTid
);
tid
=
ctx
->
thread_seq
++
;
tid
=
ctx
->
thread_seq
++
;
void
*
mem
=
internal_alloc
(
MBlockThreadContex
,
sizeof
(
ThreadContext
));
void
*
mem
=
internal_alloc
(
MBlockThreadContex
,
sizeof
(
ThreadContext
));
tctx
=
new
(
mem
)
ThreadContext
(
tid
);
tctx
=
new
(
mem
)
ThreadContext
(
tid
);
ctx
->
threads
[
tid
]
=
tctx
;
ctx
->
threads
[
tid
]
=
tctx
;
MapThreadTrace
(
GetThreadTrace
(
tid
),
TraceSize
()
*
sizeof
(
Event
));
}
}
CHECK_NE
(
tctx
,
0
);
CHECK_NE
(
tctx
,
0
);
CHECK_GE
(
tid
,
0
);
CHECK_GE
(
tid
,
0
);
...
@@ -141,12 +148,11 @@ int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) {
...
@@ -141,12 +148,11 @@ int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) {
if
(
tid
)
{
if
(
tid
)
{
thr
->
fast_state
.
IncrementEpoch
();
thr
->
fast_state
.
IncrementEpoch
();
// Can't increment epoch w/o writing to the trace as well.
// Can't increment epoch w/o writing to the trace as well.
TraceAddEvent
(
thr
,
thr
->
fast_state
.
epoch
()
,
EventTypeMop
,
0
);
TraceAddEvent
(
thr
,
thr
->
fast_state
,
EventTypeMop
,
0
);
thr
->
clock
.
set
(
thr
->
tid
,
thr
->
fast_state
.
epoch
());
thr
->
clock
.
set
(
thr
->
tid
,
thr
->
fast_state
.
epoch
());
thr
->
fast_synch_epoch
=
thr
->
fast_state
.
epoch
();
thr
->
fast_synch_epoch
=
thr
->
fast_state
.
epoch
();
thr
->
clock
.
release
(
&
tctx
->
sync
);
thr
->
clock
.
release
(
&
tctx
->
sync
);
StatInc
(
thr
,
StatSyncRelease
);
StatInc
(
thr
,
StatSyncRelease
);
tctx
->
creation_stack
.
ObtainCurrent
(
thr
,
pc
);
tctx
->
creation_stack
.
ObtainCurrent
(
thr
,
pc
);
}
}
return
tid
;
return
tid
;
...
@@ -185,7 +191,9 @@ void ThreadStart(ThreadState *thr, int tid, uptr os_id) {
...
@@ -185,7 +191,9 @@ void ThreadStart(ThreadState *thr, int tid, uptr os_id) {
CHECK_EQ
(
tctx
->
status
,
ThreadStatusCreated
);
CHECK_EQ
(
tctx
->
status
,
ThreadStatusCreated
);
tctx
->
status
=
ThreadStatusRunning
;
tctx
->
status
=
ThreadStatusRunning
;
tctx
->
os_id
=
os_id
;
tctx
->
os_id
=
os_id
;
tctx
->
epoch0
=
tctx
->
epoch1
+
1
;
// RoundUp so that one trace part does not contain events
// from different threads.
tctx
->
epoch0
=
RoundUp
(
tctx
->
epoch1
+
1
,
kTracePartSize
);
tctx
->
epoch1
=
(
u64
)
-
1
;
tctx
->
epoch1
=
(
u64
)
-
1
;
new
(
thr
)
ThreadState
(
CTX
(),
tid
,
tctx
->
unique_id
,
new
(
thr
)
ThreadState
(
CTX
(),
tid
,
tctx
->
unique_id
,
tctx
->
epoch0
,
stk_addr
,
stk_size
,
tctx
->
epoch0
,
stk_addr
,
stk_size
,
...
@@ -202,6 +210,9 @@ void ThreadStart(ThreadState *thr, int tid, uptr os_id) {
...
@@ -202,6 +210,9 @@ void ThreadStart(ThreadState *thr, int tid, uptr os_id) {
thr
->
fast_synch_epoch
=
tctx
->
epoch0
;
thr
->
fast_synch_epoch
=
tctx
->
epoch0
;
thr
->
clock
.
set
(
tid
,
tctx
->
epoch0
);
thr
->
clock
.
set
(
tid
,
tctx
->
epoch0
);
thr
->
clock
.
acquire
(
&
tctx
->
sync
);
thr
->
clock
.
acquire
(
&
tctx
->
sync
);
thr
->
fast_state
.
SetHistorySize
(
flags
()
->
history_size
);
const
uptr
trace
=
(
tctx
->
epoch0
/
kTracePartSize
)
%
TraceParts
();
thr
->
trace
.
headers
[
trace
].
epoch0
=
tctx
->
epoch0
;
StatInc
(
thr
,
StatSyncAcquire
);
StatInc
(
thr
,
StatSyncAcquire
);
DPrintf
(
"#%d: ThreadStart epoch=%zu stk_addr=%zx stk_size=%zx "
DPrintf
(
"#%d: ThreadStart epoch=%zu stk_addr=%zx stk_size=%zx "
"tls_addr=%zx tls_size=%zx
\n
"
,
"tls_addr=%zx tls_size=%zx
\n
"
,
...
@@ -236,7 +247,7 @@ void ThreadFinish(ThreadState *thr) {
...
@@ -236,7 +247,7 @@ void ThreadFinish(ThreadState *thr) {
}
else
{
}
else
{
thr
->
fast_state
.
IncrementEpoch
();
thr
->
fast_state
.
IncrementEpoch
();
// Can't increment epoch w/o writing to the trace as well.
// Can't increment epoch w/o writing to the trace as well.
TraceAddEvent
(
thr
,
thr
->
fast_state
.
epoch
()
,
EventTypeMop
,
0
);
TraceAddEvent
(
thr
,
thr
->
fast_state
,
EventTypeMop
,
0
);
thr
->
clock
.
set
(
thr
->
tid
,
thr
->
fast_state
.
epoch
());
thr
->
clock
.
set
(
thr
->
tid
,
thr
->
fast_state
.
epoch
());
thr
->
fast_synch_epoch
=
thr
->
fast_state
.
epoch
();
thr
->
fast_synch_epoch
=
thr
->
fast_state
.
epoch
();
thr
->
clock
.
release
(
&
tctx
->
sync
);
thr
->
clock
.
release
(
&
tctx
->
sync
);
...
@@ -247,9 +258,8 @@ void ThreadFinish(ThreadState *thr) {
...
@@ -247,9 +258,8 @@ void ThreadFinish(ThreadState *thr) {
// Save from info about the thread.
// Save from info about the thread.
tctx
->
dead_info
=
new
(
internal_alloc
(
MBlockDeadInfo
,
sizeof
(
ThreadDeadInfo
)))
tctx
->
dead_info
=
new
(
internal_alloc
(
MBlockDeadInfo
,
sizeof
(
ThreadDeadInfo
)))
ThreadDeadInfo
();
ThreadDeadInfo
();
internal_memcpy
(
&
tctx
->
dead_info
->
trace
.
events
[
0
],
for
(
uptr
i
=
0
;
i
<
TraceParts
();
i
++
)
{
&
thr
->
trace
.
events
[
0
],
sizeof
(
thr
->
trace
.
events
));
tctx
->
dead_info
->
trace
.
headers
[
i
].
epoch0
=
thr
->
trace
.
headers
[
i
].
epoch0
;
for
(
int
i
=
0
;
i
<
kTraceParts
;
i
++
)
{
tctx
->
dead_info
->
trace
.
headers
[
i
].
stack0
.
CopyFrom
(
tctx
->
dead_info
->
trace
.
headers
[
i
].
stack0
.
CopyFrom
(
thr
->
trace
.
headers
[
i
].
stack0
);
thr
->
trace
.
headers
[
i
].
stack0
);
}
}
...
@@ -318,6 +328,20 @@ void ThreadDetach(ThreadState *thr, uptr pc, int tid) {
...
@@ -318,6 +328,20 @@ void ThreadDetach(ThreadState *thr, uptr pc, int tid) {
}
}
}
}
void
ThreadSetName
(
ThreadState
*
thr
,
const
char
*
name
)
{
Context
*
ctx
=
CTX
();
Lock
l
(
&
ctx
->
thread_mtx
);
ThreadContext
*
tctx
=
ctx
->
threads
[
thr
->
tid
];
CHECK_NE
(
tctx
,
0
);
CHECK_EQ
(
tctx
->
status
,
ThreadStatusRunning
);
if
(
tctx
->
name
)
{
internal_free
(
tctx
->
name
);
tctx
->
name
=
0
;
}
if
(
name
)
tctx
->
name
=
internal_strdup
(
name
);
}
void
MemoryAccessRange
(
ThreadState
*
thr
,
uptr
pc
,
uptr
addr
,
void
MemoryAccessRange
(
ThreadState
*
thr
,
uptr
pc
,
uptr
addr
,
uptr
size
,
bool
is_write
)
{
uptr
size
,
bool
is_write
)
{
if
(
size
==
0
)
if
(
size
==
0
)
...
@@ -356,7 +380,7 @@ void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
...
@@ -356,7 +380,7 @@ void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
fast_state
.
IncrementEpoch
();
fast_state
.
IncrementEpoch
();
thr
->
fast_state
=
fast_state
;
thr
->
fast_state
=
fast_state
;
TraceAddEvent
(
thr
,
fast_state
.
epoch
()
,
EventTypeMop
,
pc
);
TraceAddEvent
(
thr
,
fast_state
,
EventTypeMop
,
pc
);
bool
unaligned
=
(
addr
%
kShadowCell
)
!=
0
;
bool
unaligned
=
(
addr
%
kShadowCell
)
!=
0
;
...
...
libsanitizer/tsan/tsan_stat.cc
View file @
a0408454
...
@@ -98,6 +98,7 @@ void StatOutput(u64 *stat) {
...
@@ -98,6 +98,7 @@ void StatOutput(u64 *stat) {
name
[
StatInt_longjmp
]
=
" longjmp "
;
name
[
StatInt_longjmp
]
=
" longjmp "
;
name
[
StatInt_siglongjmp
]
=
" siglongjmp "
;
name
[
StatInt_siglongjmp
]
=
" siglongjmp "
;
name
[
StatInt_malloc
]
=
" malloc "
;
name
[
StatInt_malloc
]
=
" malloc "
;
name
[
StatInt___libc_memalign
]
=
" __libc_memalign "
;
name
[
StatInt_calloc
]
=
" calloc "
;
name
[
StatInt_calloc
]
=
" calloc "
;
name
[
StatInt_realloc
]
=
" realloc "
;
name
[
StatInt_realloc
]
=
" realloc "
;
name
[
StatInt_free
]
=
" free "
;
name
[
StatInt_free
]
=
" free "
;
...
...
libsanitizer/tsan/tsan_stat.h
View file @
a0408454
...
@@ -95,6 +95,7 @@ enum StatType {
...
@@ -95,6 +95,7 @@ enum StatType {
StatInt_longjmp
,
StatInt_longjmp
,
StatInt_siglongjmp
,
StatInt_siglongjmp
,
StatInt_malloc
,
StatInt_malloc
,
StatInt___libc_memalign
,
StatInt_calloc
,
StatInt_calloc
,
StatInt_realloc
,
StatInt_realloc
,
StatInt_free
,
StatInt_free
,
...
...
libsanitizer/tsan/tsan_suppressions.cc
View file @
a0408454
...
@@ -25,7 +25,7 @@ static char *ReadFile(const char *filename) {
...
@@ -25,7 +25,7 @@ static char *ReadFile(const char *filename) {
if
(
filename
==
0
||
filename
[
0
]
==
0
)
if
(
filename
==
0
||
filename
[
0
]
==
0
)
return
0
;
return
0
;
InternalScopedBuffer
<
char
>
tmp
(
4
*
1024
);
InternalScopedBuffer
<
char
>
tmp
(
4
*
1024
);
if
(
filename
[
0
]
==
'/'
)
if
(
filename
[
0
]
==
'/'
||
GetPwd
()
==
0
)
internal_snprintf
(
tmp
.
data
(),
tmp
.
size
(),
"%s"
,
filename
);
internal_snprintf
(
tmp
.
data
(),
tmp
.
size
(),
"%s"
,
filename
);
else
else
internal_snprintf
(
tmp
.
data
(),
tmp
.
size
(),
"%s/%s"
,
GetPwd
(),
filename
);
internal_snprintf
(
tmp
.
data
(),
tmp
.
size
(),
"%s/%s"
,
GetPwd
(),
filename
);
...
...
libsanitizer/tsan/tsan_symbolize.cc
View file @
a0408454
...
@@ -50,7 +50,7 @@ static ReportStack *NewReportStackEntry(const AddressInfo &info) {
...
@@ -50,7 +50,7 @@ static ReportStack *NewReportStackEntry(const AddressInfo &info) {
}
}
ReportStack
*
SymbolizeCode
(
uptr
addr
)
{
ReportStack
*
SymbolizeCode
(
uptr
addr
)
{
if
(
0
!=
internal_strcmp
(
flags
()
->
external_symbolizer_path
,
""
)
)
{
if
(
flags
()
->
external_symbolizer_path
[
0
]
)
{
static
const
uptr
kMaxAddrFrames
=
16
;
static
const
uptr
kMaxAddrFrames
=
16
;
InternalScopedBuffer
<
AddressInfo
>
addr_frames
(
kMaxAddrFrames
);
InternalScopedBuffer
<
AddressInfo
>
addr_frames
(
kMaxAddrFrames
);
for
(
uptr
i
=
0
;
i
<
kMaxAddrFrames
;
i
++
)
for
(
uptr
i
=
0
;
i
<
kMaxAddrFrames
;
i
++
)
...
@@ -77,6 +77,12 @@ ReportStack *SymbolizeCode(uptr addr) {
...
@@ -77,6 +77,12 @@ ReportStack *SymbolizeCode(uptr addr) {
}
}
ReportStack
*
SymbolizeData
(
uptr
addr
)
{
ReportStack
*
SymbolizeData
(
uptr
addr
)
{
if
(
flags
()
->
external_symbolizer_path
[
0
])
{
AddressInfo
frame
;
if
(
!
__sanitizer
::
SymbolizeData
(
addr
,
&
frame
))
return
0
;
return
NewReportStackEntry
(
frame
);
}
return
SymbolizeDataAddr2Line
(
addr
);
return
SymbolizeDataAddr2Line
(
addr
);
}
}
...
...
libsanitizer/tsan/tsan_trace.h
View file @
a0408454
...
@@ -17,12 +17,9 @@
...
@@ -17,12 +17,9 @@
namespace
__tsan
{
namespace
__tsan
{
#ifndef TSAN_HISTORY_SIZE // in kibitraces
const
int
kTracePartSizeBits
=
14
;
#define TSAN_HISTORY_SIZE 128
const
int
kTracePartSize
=
1
<<
kTracePartSizeBits
;
#endif
const
int
kTraceParts
=
4
*
1024
*
1024
/
kTracePartSize
;
const
int
kTracePartSize
=
16
*
1024
;
const
int
kTraceParts
=
TSAN_HISTORY_SIZE
*
1024
/
kTracePartSize
;
const
int
kTraceSize
=
kTracePartSize
*
kTraceParts
;
const
int
kTraceSize
=
kTracePartSize
*
kTraceParts
;
// Must fit into 3 bits.
// Must fit into 3 bits.
...
@@ -59,7 +56,6 @@ struct TraceHeader {
...
@@ -59,7 +56,6 @@ struct TraceHeader {
};
};
struct
Trace
{
struct
Trace
{
Event
events
[
kTraceSize
];
TraceHeader
headers
[
kTraceParts
];
TraceHeader
headers
[
kTraceParts
];
Mutex
mtx
;
Mutex
mtx
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment