Commit e9772e16 by Kostya Serebryany Committed by Kostya Serebryany

libsanitizer mege from upstream r171973

From-SVN: r195083
parent e1f674e4
2013-01-10 Kostya Serebryany <kcc@google.com>
* g++.dg/asan/asan_test.cc: Sync from upstream.
2013-01-10 Jakub Jelinek <jakub@redhat.com> 2013-01-10 Jakub Jelinek <jakub@redhat.com>
PR tree-optimization/55921 PR tree-optimization/55921
......
2013-01-10 Kostya Serebryany <kcc@google.com>
* All source files: Merge from upstream r171973.
* sanitizer_common/Makefile.am: Added new files.
* asan/Makefile.am: Likewise.
* tsan/Makefile.am: Likewise.
* sanitizer_common/Makefile.in: Regenerated.
* asan/Makefile.in: Likewise.
* tsan/Makefile.in: Likewise.
2013-01-07 H.J. Lu <hongjiu.lu@intel.com> 2013-01-07 H.J. Lu <hongjiu.lu@intel.com>
* asan/Makefile.am (libasan_la_LIBADD): Replace * asan/Makefile.am (libasan_la_LIBADD): Replace
......
169392 171973
The first line of this file holds the svn revision number of the The first line of this file holds the svn revision number of the
last merge done from the master library sources. last merge done from the master library sources.
...@@ -15,6 +15,7 @@ toolexeclib_LTLIBRARIES = libasan.la ...@@ -15,6 +15,7 @@ toolexeclib_LTLIBRARIES = libasan.la
asan_files = \ asan_files = \
asan_allocator.cc \ asan_allocator.cc \
asan_allocator2.cc \
asan_interceptors.cc \ asan_interceptors.cc \
asan_mac.cc \ asan_mac.cc \
asan_malloc_mac.cc \ asan_malloc_mac.cc \
...@@ -23,6 +24,7 @@ asan_files = \ ...@@ -23,6 +24,7 @@ asan_files = \
asan_rtl.cc \ asan_rtl.cc \
asan_stats.cc \ asan_stats.cc \
asan_thread_registry.cc \ asan_thread_registry.cc \
asan_fake_stack.cc \
asan_globals.cc \ asan_globals.cc \
asan_linux.cc \ asan_linux.cc \
asan_malloc_linux.cc \ asan_malloc_linux.cc \
......
...@@ -84,19 +84,20 @@ am__DEPENDENCIES_1 = ...@@ -84,19 +84,20 @@ am__DEPENDENCIES_1 =
@USING_MAC_INTERPOSE_FALSE@ $(am__DEPENDENCIES_1) @USING_MAC_INTERPOSE_FALSE@ $(am__DEPENDENCIES_1)
@USING_MAC_INTERPOSE_TRUE@libasan_la_DEPENDENCIES = $(top_builddir)/sanitizer_common/libsanitizer_common.la \ @USING_MAC_INTERPOSE_TRUE@libasan_la_DEPENDENCIES = $(top_builddir)/sanitizer_common/libsanitizer_common.la \
@USING_MAC_INTERPOSE_TRUE@ $(am__DEPENDENCIES_1) @USING_MAC_INTERPOSE_TRUE@ $(am__DEPENDENCIES_1)
am__libasan_la_SOURCES_DIST = asan_allocator.cc asan_interceptors.cc \ am__libasan_la_SOURCES_DIST = asan_allocator.cc asan_allocator2.cc \
asan_mac.cc asan_malloc_mac.cc asan_new_delete.cc \ asan_interceptors.cc asan_mac.cc asan_malloc_mac.cc \
asan_posix.cc asan_rtl.cc asan_stats.cc \ asan_new_delete.cc asan_posix.cc asan_rtl.cc asan_stats.cc \
asan_thread_registry.cc asan_globals.cc asan_linux.cc \ asan_thread_registry.cc asan_fake_stack.cc asan_globals.cc \
asan_malloc_linux.cc asan_malloc_win.cc asan_poisoning.cc \ asan_linux.cc asan_malloc_linux.cc asan_malloc_win.cc \
asan_report.cc asan_stack.cc asan_thread.cc asan_win.cc \ asan_poisoning.cc asan_report.cc asan_stack.cc asan_thread.cc \
dynamic/asan_interceptors_dynamic.cc asan_win.cc dynamic/asan_interceptors_dynamic.cc
am__objects_1 = asan_allocator.lo asan_interceptors.lo asan_mac.lo \ am__objects_1 = asan_allocator.lo asan_allocator2.lo \
asan_malloc_mac.lo asan_new_delete.lo asan_posix.lo \ asan_interceptors.lo asan_mac.lo asan_malloc_mac.lo \
asan_rtl.lo asan_stats.lo asan_thread_registry.lo \ asan_new_delete.lo asan_posix.lo asan_rtl.lo asan_stats.lo \
asan_globals.lo asan_linux.lo asan_malloc_linux.lo \ asan_thread_registry.lo asan_fake_stack.lo asan_globals.lo \
asan_malloc_win.lo asan_poisoning.lo asan_report.lo \ asan_linux.lo asan_malloc_linux.lo asan_malloc_win.lo \
asan_stack.lo asan_thread.lo asan_win.lo asan_poisoning.lo asan_report.lo asan_stack.lo asan_thread.lo \
asan_win.lo
@USING_MAC_INTERPOSE_TRUE@am__objects_2 = \ @USING_MAC_INTERPOSE_TRUE@am__objects_2 = \
@USING_MAC_INTERPOSE_TRUE@ asan_interceptors_dynamic.lo @USING_MAC_INTERPOSE_TRUE@ asan_interceptors_dynamic.lo
am_libasan_la_OBJECTS = $(am__objects_1) $(am__objects_2) am_libasan_la_OBJECTS = $(am__objects_1) $(am__objects_2)
...@@ -269,6 +270,7 @@ ACLOCAL_AMFLAGS = -I $(top_srcdir) -I $(top_srcdir)/config ...@@ -269,6 +270,7 @@ ACLOCAL_AMFLAGS = -I $(top_srcdir) -I $(top_srcdir)/config
toolexeclib_LTLIBRARIES = libasan.la toolexeclib_LTLIBRARIES = libasan.la
asan_files = \ asan_files = \
asan_allocator.cc \ asan_allocator.cc \
asan_allocator2.cc \
asan_interceptors.cc \ asan_interceptors.cc \
asan_mac.cc \ asan_mac.cc \
asan_malloc_mac.cc \ asan_malloc_mac.cc \
...@@ -277,6 +279,7 @@ asan_files = \ ...@@ -277,6 +279,7 @@ asan_files = \
asan_rtl.cc \ asan_rtl.cc \
asan_stats.cc \ asan_stats.cc \
asan_thread_registry.cc \ asan_thread_registry.cc \
asan_fake_stack.cc \
asan_globals.cc \ asan_globals.cc \
asan_linux.cc \ asan_linux.cc \
asan_malloc_linux.cc \ asan_malloc_linux.cc \
...@@ -409,6 +412,8 @@ distclean-compile: ...@@ -409,6 +412,8 @@ distclean-compile:
-rm -f *.tab.c -rm -f *.tab.c
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_allocator.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_allocator.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_allocator2.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_fake_stack.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_globals.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_globals.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_interceptors.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_interceptors.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_interceptors_dynamic.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_interceptors_dynamic.Plo@am__quote@
......
...@@ -15,9 +15,22 @@ ...@@ -15,9 +15,22 @@
#include "asan_internal.h" #include "asan_internal.h"
#include "asan_interceptors.h" #include "asan_interceptors.h"
#include "sanitizer_common/sanitizer_list.h"
// We are in the process of transitioning from the old allocator (version 1)
// to a new one (version 2). The change is quite intrusive so both allocators
// will co-exist in the source base for a while. The actual allocator is chosen
// at build time by redefining this macrozz.
#define ASAN_ALLOCATOR_VERSION 1
namespace __asan { namespace __asan {
enum AllocType {
FROM_MALLOC = 1, // Memory block came from malloc, calloc, realloc, etc.
FROM_NEW = 2, // Memory block came from operator new.
FROM_NEW_BR = 3 // Memory block came from operator new [ ]
};
static const uptr kNumberOfSizeClasses = 255; static const uptr kNumberOfSizeClasses = 255;
struct AsanChunk; struct AsanChunk;
...@@ -32,16 +45,40 @@ class AsanChunkView { ...@@ -32,16 +45,40 @@ class AsanChunkView {
uptr FreeTid(); uptr FreeTid();
void GetAllocStack(StackTrace *stack); void GetAllocStack(StackTrace *stack);
void GetFreeStack(StackTrace *stack); void GetFreeStack(StackTrace *stack);
bool AddrIsInside(uptr addr, uptr access_size, uptr *offset); bool AddrIsInside(uptr addr, uptr access_size, uptr *offset) {
bool AddrIsAtLeft(uptr addr, uptr access_size, uptr *offset); if (addr >= Beg() && (addr + access_size) <= End()) {
bool AddrIsAtRight(uptr addr, uptr access_size, uptr *offset); *offset = addr - Beg();
return true;
}
return false;
}
bool AddrIsAtLeft(uptr addr, uptr access_size, uptr *offset) {
(void)access_size;
if (addr < Beg()) {
*offset = Beg() - addr;
return true;
}
return false;
}
bool AddrIsAtRight(uptr addr, uptr access_size, uptr *offset) {
if (addr + access_size >= End()) {
if (addr <= End())
*offset = 0;
else
*offset = addr - End();
return true;
}
return false;
}
private: private:
AsanChunk *const chunk_; AsanChunk *const chunk_;
}; };
AsanChunkView FindHeapChunkByAddress(uptr address); AsanChunkView FindHeapChunkByAddress(uptr address);
class AsanChunkFifoList { // List of AsanChunks with total size.
class AsanChunkFifoList: public IntrusiveList<AsanChunk> {
public: public:
explicit AsanChunkFifoList(LinkerInitialized) { } explicit AsanChunkFifoList(LinkerInitialized) { }
AsanChunkFifoList() { clear(); } AsanChunkFifoList() { clear(); }
...@@ -50,12 +87,10 @@ class AsanChunkFifoList { ...@@ -50,12 +87,10 @@ class AsanChunkFifoList {
AsanChunk *Pop(); AsanChunk *Pop();
uptr size() { return size_; } uptr size() { return size_; }
void clear() { void clear() {
first_ = last_ = 0; IntrusiveList<AsanChunk>::clear();
size_ = 0; size_ = 0;
} }
private: private:
AsanChunk *first_;
AsanChunk *last_;
uptr size_; uptr size_;
}; };
...@@ -68,7 +103,11 @@ struct AsanThreadLocalMallocStorage { ...@@ -68,7 +103,11 @@ struct AsanThreadLocalMallocStorage {
} }
AsanChunkFifoList quarantine_; AsanChunkFifoList quarantine_;
#if ASAN_ALLOCATOR_VERSION == 1
AsanChunk *free_lists_[kNumberOfSizeClasses]; AsanChunk *free_lists_[kNumberOfSizeClasses];
#else
uptr allocator2_cache[1024]; // Opaque.
#endif
void CommitBack(); void CommitBack();
}; };
...@@ -156,8 +195,9 @@ class FakeStack { ...@@ -156,8 +195,9 @@ class FakeStack {
FakeFrameLifo call_stack_; FakeFrameLifo call_stack_;
}; };
void *asan_memalign(uptr alignment, uptr size, StackTrace *stack); void *asan_memalign(uptr alignment, uptr size, StackTrace *stack,
void asan_free(void *ptr, StackTrace *stack); AllocType alloc_type);
void asan_free(void *ptr, StackTrace *stack, AllocType alloc_type);
void *asan_malloc(uptr size, StackTrace *stack); void *asan_malloc(uptr size, StackTrace *stack);
void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack); void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack);
...@@ -173,5 +213,52 @@ uptr asan_mz_size(const void *ptr); ...@@ -173,5 +213,52 @@ uptr asan_mz_size(const void *ptr);
void asan_mz_force_lock(); void asan_mz_force_lock();
void asan_mz_force_unlock(); void asan_mz_force_unlock();
void PrintInternalAllocatorStats();
// Log2 and RoundUpToPowerOfTwo should be inlined for performance.
#if defined(_WIN32) && !defined(__clang__)
extern "C" {
unsigned char _BitScanForward(unsigned long *index, unsigned long mask); // NOLINT
unsigned char _BitScanReverse(unsigned long *index, unsigned long mask); // NOLINT
#if defined(_WIN64)
unsigned char _BitScanForward64(unsigned long *index, unsigned __int64 mask); // NOLINT
unsigned char _BitScanReverse64(unsigned long *index, unsigned __int64 mask); // NOLINT
#endif
}
#endif
static inline uptr Log2(uptr x) {
CHECK(IsPowerOfTwo(x));
#if !defined(_WIN32) || defined(__clang__)
return __builtin_ctzl(x);
#elif defined(_WIN64)
unsigned long ret; // NOLINT
_BitScanForward64(&ret, x);
return ret;
#else
unsigned long ret; // NOLINT
_BitScanForward(&ret, x);
return ret;
#endif
}
static inline uptr RoundUpToPowerOfTwo(uptr size) {
CHECK(size);
if (IsPowerOfTwo(size)) return size;
unsigned long up; // NOLINT
#if !defined(_WIN32) || defined(__clang__)
up = SANITIZER_WORDSIZE - 1 - __builtin_clzl(size);
#elif defined(_WIN64)
_BitScanReverse64(&up, size);
#else
_BitScanReverse(&up, size);
#endif
CHECK(size < (1ULL << (up + 1)));
CHECK(size > (1ULL << up));
return 1UL << (up + 1);
}
} // namespace __asan } // namespace __asan
#endif // ASAN_ALLOCATOR_H #endif // ASAN_ALLOCATOR_H
//===-- asan_fake_stack.cc ------------------------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// FakeStack is used to detect use-after-return bugs.
//===----------------------------------------------------------------------===//
#include "asan_allocator.h"
#include "asan_thread.h"
#include "asan_thread_registry.h"
#include "sanitizer/asan_interface.h"
namespace __asan {
FakeStack::FakeStack() {
CHECK(REAL(memset) != 0);
REAL(memset)(this, 0, sizeof(*this));
}
bool FakeStack::AddrIsInSizeClass(uptr addr, uptr size_class) {
uptr mem = allocated_size_classes_[size_class];
uptr size = ClassMmapSize(size_class);
bool res = mem && addr >= mem && addr < mem + size;
return res;
}
uptr FakeStack::AddrIsInFakeStack(uptr addr) {
for (uptr i = 0; i < kNumberOfSizeClasses; i++) {
if (AddrIsInSizeClass(addr, i)) return allocated_size_classes_[i];
}
return 0;
}
// We may want to compute this during compilation.
inline uptr FakeStack::ComputeSizeClass(uptr alloc_size) {
uptr rounded_size = RoundUpToPowerOfTwo(alloc_size);
uptr log = Log2(rounded_size);
CHECK(alloc_size <= (1UL << log));
if (!(alloc_size > (1UL << (log-1)))) {
Printf("alloc_size %zu log %zu\n", alloc_size, log);
}
CHECK(alloc_size > (1UL << (log-1)));
uptr res = log < kMinStackFrameSizeLog ? 0 : log - kMinStackFrameSizeLog;
CHECK(res < kNumberOfSizeClasses);
CHECK(ClassSize(res) >= rounded_size);
return res;
}
void FakeFrameFifo::FifoPush(FakeFrame *node) {
CHECK(node);
node->next = 0;
if (first_ == 0 && last_ == 0) {
first_ = last_ = node;
} else {
CHECK(first_);
CHECK(last_);
last_->next = node;
last_ = node;
}
}
FakeFrame *FakeFrameFifo::FifoPop() {
CHECK(first_ && last_ && "Exhausted fake stack");
FakeFrame *res = 0;
if (first_ == last_) {
res = first_;
first_ = last_ = 0;
} else {
res = first_;
first_ = first_->next;
}
return res;
}
void FakeStack::Init(uptr stack_size) {
stack_size_ = stack_size;
alive_ = true;
}
void FakeStack::Cleanup() {
alive_ = false;
for (uptr i = 0; i < kNumberOfSizeClasses; i++) {
uptr mem = allocated_size_classes_[i];
if (mem) {
PoisonShadow(mem, ClassMmapSize(i), 0);
allocated_size_classes_[i] = 0;
UnmapOrDie((void*)mem, ClassMmapSize(i));
}
}
}
uptr FakeStack::ClassMmapSize(uptr size_class) {
return RoundUpToPowerOfTwo(stack_size_);
}
void FakeStack::AllocateOneSizeClass(uptr size_class) {
CHECK(ClassMmapSize(size_class) >= GetPageSizeCached());
uptr new_mem = (uptr)MmapOrDie(
ClassMmapSize(size_class), __FUNCTION__);
// Printf("T%d new_mem[%zu]: %p-%p mmap %zu\n",
// asanThreadRegistry().GetCurrent()->tid(),
// size_class, new_mem, new_mem + ClassMmapSize(size_class),
// ClassMmapSize(size_class));
uptr i;
for (i = 0; i < ClassMmapSize(size_class);
i += ClassSize(size_class)) {
size_classes_[size_class].FifoPush((FakeFrame*)(new_mem + i));
}
CHECK(i == ClassMmapSize(size_class));
allocated_size_classes_[size_class] = new_mem;
}
uptr FakeStack::AllocateStack(uptr size, uptr real_stack) {
if (!alive_) return real_stack;
CHECK(size <= kMaxStackMallocSize && size > 1);
uptr size_class = ComputeSizeClass(size);
if (!allocated_size_classes_[size_class]) {
AllocateOneSizeClass(size_class);
}
FakeFrame *fake_frame = size_classes_[size_class].FifoPop();
CHECK(fake_frame);
fake_frame->size_minus_one = size - 1;
fake_frame->real_stack = real_stack;
while (FakeFrame *top = call_stack_.top()) {
if (top->real_stack > real_stack) break;
call_stack_.LifoPop();
DeallocateFrame(top);
}
call_stack_.LifoPush(fake_frame);
uptr ptr = (uptr)fake_frame;
PoisonShadow(ptr, size, 0);
return ptr;
}
void FakeStack::DeallocateFrame(FakeFrame *fake_frame) {
CHECK(alive_);
uptr size = fake_frame->size_minus_one + 1;
uptr size_class = ComputeSizeClass(size);
CHECK(allocated_size_classes_[size_class]);
uptr ptr = (uptr)fake_frame;
CHECK(AddrIsInSizeClass(ptr, size_class));
CHECK(AddrIsInSizeClass(ptr + size - 1, size_class));
size_classes_[size_class].FifoPush(fake_frame);
}
void FakeStack::OnFree(uptr ptr, uptr size, uptr real_stack) {
FakeFrame *fake_frame = (FakeFrame*)ptr;
CHECK(fake_frame->magic = kRetiredStackFrameMagic);
CHECK(fake_frame->descr != 0);
CHECK(fake_frame->size_minus_one == size - 1);
PoisonShadow(ptr, size, kAsanStackAfterReturnMagic);
}
} // namespace __asan
// ---------------------- Interface ---------------- {{{1
using namespace __asan; // NOLINT
uptr __asan_stack_malloc(uptr size, uptr real_stack) {
if (!flags()->use_fake_stack) return real_stack;
AsanThread *t = asanThreadRegistry().GetCurrent();
if (!t) {
// TSD is gone, use the real stack.
return real_stack;
}
uptr ptr = t->fake_stack().AllocateStack(size, real_stack);
// Printf("__asan_stack_malloc %p %zu %p\n", ptr, size, real_stack);
return ptr;
}
void __asan_stack_free(uptr ptr, uptr size, uptr real_stack) {
if (!flags()->use_fake_stack) return;
if (ptr != real_stack) {
FakeStack::OnFree(ptr, size, real_stack);
}
}
...@@ -43,7 +43,7 @@ struct Flags { ...@@ -43,7 +43,7 @@ struct Flags {
int report_globals; int report_globals;
// If set, attempts to catch initialization order issues. // If set, attempts to catch initialization order issues.
bool check_initialization_order; bool check_initialization_order;
// Max number of stack frames kept for each allocation. // Max number of stack frames kept for each allocation/deallocation.
int malloc_context_size; int malloc_context_size;
// If set, uses custom wrappers and replacements for libc string functions // If set, uses custom wrappers and replacements for libc string functions
// to find more errors. // to find more errors.
...@@ -93,6 +93,17 @@ struct Flags { ...@@ -93,6 +93,17 @@ struct Flags {
bool print_full_thread_history; bool print_full_thread_history;
// ASan will write logs to "log_path.pid" instead of stderr. // ASan will write logs to "log_path.pid" instead of stderr.
const char *log_path; const char *log_path;
// Use fast (frame-pointer-based) unwinder on fatal errors (if available).
bool fast_unwind_on_fatal;
// Use fast (frame-pointer-based) unwinder on malloc/free (if available).
bool fast_unwind_on_malloc;
// Poison (or not) the heap memory on [de]allocation. Zero value is useful
// for benchmarking the allocator or instrumentator.
bool poison_heap;
// Report errors on malloc/delete, new/free, new/delete[], etc.
bool alloc_dealloc_mismatch;
// Use stack depot instead of storing stacks in the redzones.
bool use_stack_depot;
}; };
Flags *flags(); Flags *flags();
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include "asan_internal.h" #include "asan_internal.h"
#include "interception/interception.h" #include "interception/interception.h"
#include "sanitizer_common/sanitizer_platform_interceptors.h"
using __sanitizer::uptr; using __sanitizer::uptr;
...@@ -39,8 +40,10 @@ using __sanitizer::uptr; ...@@ -39,8 +40,10 @@ using __sanitizer::uptr;
#if defined(__linux__) #if defined(__linux__)
# define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 1 # define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 1
# define ASAN_INTERCEPT_PRCTL 1
#else #else
# define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 0 # define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 0
# define ASAN_INTERCEPT_PRCTL 0
#endif #endif
#if !defined(__APPLE__) #if !defined(__APPLE__)
...@@ -149,10 +152,23 @@ DECLARE_FUNCTION_AND_WRAPPER(long long, atoll, const char *nptr); // NOLINT ...@@ -149,10 +152,23 @@ DECLARE_FUNCTION_AND_WRAPPER(long long, atoll, const char *nptr); // NOLINT
DECLARE_FUNCTION_AND_WRAPPER(long long, strtoll, const char *nptr, char **endptr, int base); // NOLINT DECLARE_FUNCTION_AND_WRAPPER(long long, strtoll, const char *nptr, char **endptr, int base); // NOLINT
# endif # endif
// unistd.h
# if SANITIZER_INTERCEPT_READ
DECLARE_FUNCTION_AND_WRAPPER(SSIZE_T, read, int fd, void *buf, SIZE_T count);
# endif
# if SANITIZER_INTERCEPT_PREAD
DECLARE_FUNCTION_AND_WRAPPER(SSIZE_T, pread, int fd, void *buf,
SIZE_T count, OFF_T offset);
# endif
# if SANITIZER_INTERCEPT_PREAD64
DECLARE_FUNCTION_AND_WRAPPER(SSIZE_T, pread64, int fd, void *buf,
SIZE_T count, OFF64_T offset);
# endif
# if ASAN_INTERCEPT_MLOCKX # if ASAN_INTERCEPT_MLOCKX
// mlock/munlock // mlock/munlock
DECLARE_FUNCTION_AND_WRAPPER(int, mlock, const void *addr, size_t len); DECLARE_FUNCTION_AND_WRAPPER(int, mlock, const void *addr, SIZE_T len);
DECLARE_FUNCTION_AND_WRAPPER(int, munlock, const void *addr, size_t len); DECLARE_FUNCTION_AND_WRAPPER(int, munlock, const void *addr, SIZE_T len);
DECLARE_FUNCTION_AND_WRAPPER(int, mlockall, int flags); DECLARE_FUNCTION_AND_WRAPPER(int, mlockall, int flags);
DECLARE_FUNCTION_AND_WRAPPER(int, munlockall, void); DECLARE_FUNCTION_AND_WRAPPER(int, munlockall, void);
# endif # endif
......
...@@ -25,38 +25,20 @@ ...@@ -25,38 +25,20 @@
namespace __asan { namespace __asan {
// Instruments read/write access to a single byte in memory.
// On error calls __asan_report_error, which aborts the program.
#define ACCESS_ADDRESS(address, isWrite) do { \
if (!AddrIsInMem(address) || AddressIsPoisoned(address)) { \
GET_CURRENT_PC_BP_SP; \
__asan_report_error(pc, bp, sp, address, isWrite, /* access_size */ 1); \
} \
} while (0)
// We implement ACCESS_MEMORY_RANGE, ASAN_READ_RANGE, // We implement ACCESS_MEMORY_RANGE, ASAN_READ_RANGE,
// and ASAN_WRITE_RANGE as macro instead of function so // and ASAN_WRITE_RANGE as macro instead of function so
// that no extra frames are created, and stack trace contains // that no extra frames are created, and stack trace contains
// relevant information only. // relevant information only.
// We check all shadow bytes.
// Instruments read/write access to a memory range. #define ACCESS_MEMORY_RANGE(offset, size, isWrite) do { \
// More complex implementation is possible, for now just if (uptr __ptr = __asan_region_is_poisoned((uptr)(offset), size)) { \
// checking the first and the last byte of a range. GET_CURRENT_PC_BP_SP; \
#define ACCESS_MEMORY_RANGE(offset, size, isWrite) do { \ __asan_report_error(pc, bp, sp, __ptr, isWrite, /* access_size */1); \
if (size > 0) { \ } \
uptr ptr = (uptr)(offset); \
ACCESS_ADDRESS(ptr, isWrite); \
ACCESS_ADDRESS(ptr + (size) - 1, isWrite); \
} \
} while (0) } while (0)
#define ASAN_READ_RANGE(offset, size) do { \ #define ASAN_READ_RANGE(offset, size) ACCESS_MEMORY_RANGE(offset, size, false)
ACCESS_MEMORY_RANGE(offset, size, false); \ #define ASAN_WRITE_RANGE(offset, size) ACCESS_MEMORY_RANGE(offset, size, true);
} while (0)
#define ASAN_WRITE_RANGE(offset, size) do { \
ACCESS_MEMORY_RANGE(offset, size, true); \
} while (0)
// Behavior of functions like "memcpy" or "strcpy" is undefined // Behavior of functions like "memcpy" or "strcpy" is undefined
// if memory intervals overlap. We report error in this case. // if memory intervals overlap. We report error in this case.
...@@ -69,7 +51,7 @@ static inline bool RangesOverlap(const char *offset1, uptr length1, ...@@ -69,7 +51,7 @@ static inline bool RangesOverlap(const char *offset1, uptr length1,
const char *offset1 = (const char*)_offset1; \ const char *offset1 = (const char*)_offset1; \
const char *offset2 = (const char*)_offset2; \ const char *offset2 = (const char*)_offset2; \
if (RangesOverlap(offset1, length1, offset2, length2)) { \ if (RangesOverlap(offset1, length1, offset2, length2)) { \
GET_STACK_TRACE_HERE(kStackTraceMax); \ GET_STACK_TRACE_FATAL_HERE; \
ReportStringFunctionMemoryRangesOverlap(name, offset1, length1, \ ReportStringFunctionMemoryRangesOverlap(name, offset1, length1, \
offset2, length2, &stack); \ offset2, length2, &stack); \
} \ } \
...@@ -96,6 +78,11 @@ static inline uptr MaybeRealStrnlen(const char *s, uptr maxlen) { ...@@ -96,6 +78,11 @@ static inline uptr MaybeRealStrnlen(const char *s, uptr maxlen) {
// ---------------------- Wrappers ---------------- {{{1 // ---------------------- Wrappers ---------------- {{{1
using namespace __asan; // NOLINT using namespace __asan; // NOLINT
#define COMMON_INTERCEPTOR_WRITE_RANGE(ptr, size) ASAN_WRITE_RANGE(ptr, size)
#define COMMON_INTERCEPTOR_READ_RANGE(ptr, size) ASAN_READ_RANGE(ptr, size)
#define COMMON_INTERCEPTOR_ENTER(func, ...) ENSURE_ASAN_INITED()
#include "sanitizer_common/sanitizer_common_interceptors.h"
static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) { static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) {
AsanThread *t = (AsanThread*)arg; AsanThread *t = (AsanThread*)arg;
asanThreadRegistry().SetCurrent(t); asanThreadRegistry().SetCurrent(t);
...@@ -105,7 +92,7 @@ static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) { ...@@ -105,7 +92,7 @@ static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) {
#if ASAN_INTERCEPT_PTHREAD_CREATE #if ASAN_INTERCEPT_PTHREAD_CREATE
INTERCEPTOR(int, pthread_create, void *thread, INTERCEPTOR(int, pthread_create, void *thread,
void *attr, void *(*start_routine)(void*), void *arg) { void *attr, void *(*start_routine)(void*), void *arg) {
GET_STACK_TRACE_HERE(kStackTraceMax); GET_STACK_TRACE_THREAD;
u32 current_tid = asanThreadRegistry().GetCurrentTidOrInvalid(); u32 current_tid = asanThreadRegistry().GetCurrentTidOrInvalid();
AsanThread *t = AsanThread::Create(current_tid, start_routine, arg, &stack); AsanThread *t = AsanThread::Create(current_tid, start_routine, arg, &stack);
asanThreadRegistry().RegisterThread(t); asanThreadRegistry().RegisterThread(t);
...@@ -175,6 +162,25 @@ INTERCEPTOR(void, siglongjmp, void *env, int val) { ...@@ -175,6 +162,25 @@ INTERCEPTOR(void, siglongjmp, void *env, int val) {
} }
#endif #endif
#if ASAN_INTERCEPT_PRCTL
#define PR_SET_NAME 15
INTERCEPTOR(int, prctl, int option,
unsigned long arg2, unsigned long arg3, // NOLINT
unsigned long arg4, unsigned long arg5) { // NOLINT
int res = REAL(prctl(option, arg2, arg3, arg4, arg5));
if (option == PR_SET_NAME) {
AsanThread *t = asanThreadRegistry().GetCurrent();
if (t) {
char buff[17];
internal_strncpy(buff, (char*)arg2, 16);
buff[16] = 0;
t->summary()->set_name(buff);
}
}
return res;
}
#endif
#if ASAN_INTERCEPT___CXA_THROW #if ASAN_INTERCEPT___CXA_THROW
INTERCEPTOR(void, __cxa_throw, void *a, void *b, void *c) { INTERCEPTOR(void, __cxa_throw, void *a, void *b, void *c) {
CHECK(REAL(__cxa_throw)); CHECK(REAL(__cxa_throw));
...@@ -256,8 +262,8 @@ INTERCEPTOR(void*, memcpy, void *to, const void *from, uptr size) { ...@@ -256,8 +262,8 @@ INTERCEPTOR(void*, memcpy, void *to, const void *from, uptr size) {
// See http://llvm.org/bugs/show_bug.cgi?id=11763. // See http://llvm.org/bugs/show_bug.cgi?id=11763.
CHECK_RANGES_OVERLAP("memcpy", to, size, from, size); CHECK_RANGES_OVERLAP("memcpy", to, size, from, size);
} }
ASAN_WRITE_RANGE(from, size); ASAN_READ_RANGE(from, size);
ASAN_READ_RANGE(to, size); ASAN_WRITE_RANGE(to, size);
} }
#if MAC_INTERPOSE_FUNCTIONS #if MAC_INTERPOSE_FUNCTIONS
// Interposing of resolver functions is broken on Mac OS 10.7 and 10.8. // Interposing of resolver functions is broken on Mac OS 10.7 and 10.8.
...@@ -275,8 +281,8 @@ INTERCEPTOR(void*, memmove, void *to, const void *from, uptr size) { ...@@ -275,8 +281,8 @@ INTERCEPTOR(void*, memmove, void *to, const void *from, uptr size) {
} }
ENSURE_ASAN_INITED(); ENSURE_ASAN_INITED();
if (flags()->replace_intrin) { if (flags()->replace_intrin) {
ASAN_WRITE_RANGE(from, size); ASAN_READ_RANGE(from, size);
ASAN_READ_RANGE(to, size); ASAN_WRITE_RANGE(to, size);
} }
#if MAC_INTERPOSE_FUNCTIONS #if MAC_INTERPOSE_FUNCTIONS
// Interposing of resolver functions is broken on Mac OS 10.7 and 10.8. // Interposing of resolver functions is broken on Mac OS 10.7 and 10.8.
...@@ -621,7 +627,7 @@ INTERCEPTOR_WINAPI(DWORD, CreateThread, ...@@ -621,7 +627,7 @@ INTERCEPTOR_WINAPI(DWORD, CreateThread,
void* security, uptr stack_size, void* security, uptr stack_size,
DWORD (__stdcall *start_routine)(void*), void* arg, DWORD (__stdcall *start_routine)(void*), void* arg,
DWORD flags, void* tid) { DWORD flags, void* tid) {
GET_STACK_TRACE_HERE(kStackTraceMax); GET_STACK_TRACE_THREAD;
u32 current_tid = asanThreadRegistry().GetCurrentTidOrInvalid(); u32 current_tid = asanThreadRegistry().GetCurrentTidOrInvalid();
AsanThread *t = AsanThread::Create(current_tid, start_routine, arg, &stack); AsanThread *t = AsanThread::Create(current_tid, start_routine, arg, &stack);
asanThreadRegistry().RegisterThread(t); asanThreadRegistry().RegisterThread(t);
...@@ -646,6 +652,9 @@ void InitializeAsanInterceptors() { ...@@ -646,6 +652,9 @@ void InitializeAsanInterceptors() {
#if MAC_INTERPOSE_FUNCTIONS #if MAC_INTERPOSE_FUNCTIONS
return; return;
#endif #endif
SANITIZER_COMMON_INTERCEPTORS_INIT;
// Intercept mem* functions. // Intercept mem* functions.
ASAN_INTERCEPT_FUNC(memcmp); ASAN_INTERCEPT_FUNC(memcmp);
ASAN_INTERCEPT_FUNC(memmove); ASAN_INTERCEPT_FUNC(memmove);
...@@ -718,6 +727,9 @@ void InitializeAsanInterceptors() { ...@@ -718,6 +727,9 @@ void InitializeAsanInterceptors() {
#if ASAN_INTERCEPT_SIGLONGJMP #if ASAN_INTERCEPT_SIGLONGJMP
ASAN_INTERCEPT_FUNC(siglongjmp); ASAN_INTERCEPT_FUNC(siglongjmp);
#endif #endif
#if ASAN_INTERCEPT_PRCTL
ASAN_INTERCEPT_FUNC(prctl);
#endif
// Intercept exception handling functions. // Intercept exception handling functions.
#if ASAN_INTERCEPT___CXA_THROW #if ASAN_INTERCEPT___CXA_THROW
......
...@@ -81,9 +81,9 @@ ...@@ -81,9 +81,9 @@
// If set, values like allocator chunk size, as well as defaults for some flags // If set, values like allocator chunk size, as well as defaults for some flags
// will be changed towards less memory overhead. // will be changed towards less memory overhead.
#ifndef ASAN_LOW_MEMORY #ifndef ASAN_LOW_MEMORY
# ifdef ASAN_ANDROID #if SANITIZER_WORDSIZE == 32
# define ASAN_LOW_MEMORY 1 # define ASAN_LOW_MEMORY 1
# else #else
# define ASAN_LOW_MEMORY 0 # define ASAN_LOW_MEMORY 0
# endif # endif
#endif #endif
...@@ -143,6 +143,15 @@ bool PlatformHasDifferentMemcpyAndMemmove(); ...@@ -143,6 +143,15 @@ bool PlatformHasDifferentMemcpyAndMemmove();
# define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE true # define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE true
#endif // __APPLE__ #endif // __APPLE__
// Add convenient macro for interface functions that may be represented as
// weak hooks.
#define ASAN_MALLOC_HOOK(ptr, size) \
if (&__asan_malloc_hook) __asan_malloc_hook(ptr, size)
#define ASAN_FREE_HOOK(ptr) \
if (&__asan_free_hook) __asan_free_hook(ptr)
#define ASAN_ON_ERROR() \
if (&__asan_on_error) __asan_on_error()
extern int asan_inited; extern int asan_inited;
// Used to avoid infinite recursion in __asan_init(). // Used to avoid infinite recursion in __asan_init().
extern bool asan_init_is_running; extern bool asan_init_is_running;
......
...@@ -120,53 +120,21 @@ void AsanLock::Unlock() { ...@@ -120,53 +120,21 @@ void AsanLock::Unlock() {
pthread_mutex_unlock((pthread_mutex_t*)&opaque_storage_); pthread_mutex_unlock((pthread_mutex_t*)&opaque_storage_);
} }
#ifdef __arm__ void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp, bool fast) {
#define UNWIND_STOP _URC_END_OF_STACK #if defined(__arm__) || \
#define UNWIND_CONTINUE _URC_NO_REASON defined(__powerpc__) || defined(__powerpc64__) || \
#else defined(__sparc__)
#define UNWIND_STOP _URC_NORMAL_STOP fast = false;
#define UNWIND_CONTINUE _URC_NO_REASON
#endif
uptr Unwind_GetIP(struct _Unwind_Context *ctx) {
#ifdef __arm__
uptr val;
_Unwind_VRS_Result res = _Unwind_VRS_Get(ctx, _UVRSC_CORE,
15 /* r15 = PC */, _UVRSD_UINT32, &val);
CHECK(res == _UVRSR_OK && "_Unwind_VRS_Get failed");
// Clear the Thumb bit.
return val & ~(uptr)1;
#else
return _Unwind_GetIP(ctx);
#endif #endif
} if (!fast)
return stack->SlowUnwindStack(pc, max_s);
_Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx,
void *param) {
StackTrace *b = (StackTrace*)param;
CHECK(b->size < b->max_size);
uptr pc = Unwind_GetIP(ctx);
b->trace[b->size++] = pc;
if (b->size == b->max_size) return UNWIND_STOP;
return UNWIND_CONTINUE;
}
void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp) {
stack->size = 0; stack->size = 0;
stack->trace[0] = pc; stack->trace[0] = pc;
if ((max_s) > 1) { if (max_s > 1) {
stack->max_size = max_s; stack->max_size = max_s;
#if defined(__arm__) || \
defined(__powerpc__) || defined(__powerpc64__) || \
defined(__sparc__)
_Unwind_Backtrace(Unwind_Trace, stack);
// Pop off the two ASAN functions from the backtrace.
stack->PopStackFrames(2);
#else
if (!asan_inited) return; if (!asan_inited) return;
if (AsanThread *t = asanThreadRegistry().GetCurrent()) if (AsanThread *t = asanThreadRegistry().GetCurrent())
stack->FastUnwindStack(pc, bp, t->stack_top(), t->stack_bottom()); stack->FastUnwindStack(pc, bp, t->stack_top(), t->stack_bottom());
#endif
} }
} }
......
...@@ -158,7 +158,8 @@ void AsanLock::Unlock() { ...@@ -158,7 +158,8 @@ void AsanLock::Unlock() {
OSSpinLockUnlock((OSSpinLock*)&opaque_storage_); OSSpinLockUnlock((OSSpinLock*)&opaque_storage_);
} }
void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp) { void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp, bool fast) {
(void)fast;
stack->size = 0; stack->size = 0;
stack->trace[0] = pc; stack->trace[0] = pc;
if ((max_s) > 1) { if ((max_s) > 1) {
...@@ -306,7 +307,7 @@ void asan_register_worker_thread(int parent_tid, StackTrace *stack) { ...@@ -306,7 +307,7 @@ void asan_register_worker_thread(int parent_tid, StackTrace *stack) {
// alloc_asan_context(). // alloc_asan_context().
extern "C" extern "C"
void asan_dispatch_call_block_and_release(void *block) { void asan_dispatch_call_block_and_release(void *block) {
GET_STACK_TRACE_HERE(kStackTraceMax); GET_STACK_TRACE_THREAD;
asan_block_context_t *context = (asan_block_context_t*)block; asan_block_context_t *context = (asan_block_context_t*)block;
if (flags()->verbosity >= 2) { if (flags()->verbosity >= 2) {
Report("asan_dispatch_call_block_and_release(): " Report("asan_dispatch_call_block_and_release(): "
...@@ -316,7 +317,7 @@ void asan_dispatch_call_block_and_release(void *block) { ...@@ -316,7 +317,7 @@ void asan_dispatch_call_block_and_release(void *block) {
asan_register_worker_thread(context->parent_tid, &stack); asan_register_worker_thread(context->parent_tid, &stack);
// Call the original dispatcher for the block. // Call the original dispatcher for the block.
context->func(context->block); context->func(context->block);
asan_free(context, &stack); asan_free(context, &stack, FROM_MALLOC);
} }
} // namespace __asan } // namespace __asan
...@@ -341,7 +342,7 @@ asan_block_context_t *alloc_asan_context(void *ctxt, dispatch_function_t func, ...@@ -341,7 +342,7 @@ asan_block_context_t *alloc_asan_context(void *ctxt, dispatch_function_t func,
#define INTERCEPT_DISPATCH_X_F_3(dispatch_x_f) \ #define INTERCEPT_DISPATCH_X_F_3(dispatch_x_f) \
INTERCEPTOR(void, dispatch_x_f, dispatch_queue_t dq, void *ctxt, \ INTERCEPTOR(void, dispatch_x_f, dispatch_queue_t dq, void *ctxt, \
dispatch_function_t func) { \ dispatch_function_t func) { \
GET_STACK_TRACE_HERE(kStackTraceMax); \ GET_STACK_TRACE_THREAD; \
asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack); \ asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack); \
if (flags()->verbosity >= 2) { \ if (flags()->verbosity >= 2) { \
Report(#dispatch_x_f "(): context: %p, pthread_self: %p\n", \ Report(#dispatch_x_f "(): context: %p, pthread_self: %p\n", \
...@@ -359,7 +360,7 @@ INTERCEPT_DISPATCH_X_F_3(dispatch_barrier_async_f) ...@@ -359,7 +360,7 @@ INTERCEPT_DISPATCH_X_F_3(dispatch_barrier_async_f)
INTERCEPTOR(void, dispatch_after_f, dispatch_time_t when, INTERCEPTOR(void, dispatch_after_f, dispatch_time_t when,
dispatch_queue_t dq, void *ctxt, dispatch_queue_t dq, void *ctxt,
dispatch_function_t func) { dispatch_function_t func) {
GET_STACK_TRACE_HERE(kStackTraceMax); GET_STACK_TRACE_THREAD;
asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack); asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
if (flags()->verbosity >= 2) { if (flags()->verbosity >= 2) {
Report("dispatch_after_f: %p\n", asan_ctxt); Report("dispatch_after_f: %p\n", asan_ctxt);
...@@ -372,7 +373,7 @@ INTERCEPTOR(void, dispatch_after_f, dispatch_time_t when, ...@@ -372,7 +373,7 @@ INTERCEPTOR(void, dispatch_after_f, dispatch_time_t when,
INTERCEPTOR(void, dispatch_group_async_f, dispatch_group_t group, INTERCEPTOR(void, dispatch_group_async_f, dispatch_group_t group,
dispatch_queue_t dq, void *ctxt, dispatch_queue_t dq, void *ctxt,
dispatch_function_t func) { dispatch_function_t func) {
GET_STACK_TRACE_HERE(kStackTraceMax); GET_STACK_TRACE_THREAD;
asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack); asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
if (flags()->verbosity >= 2) { if (flags()->verbosity >= 2) {
Report("dispatch_group_async_f(): context: %p, pthread_self: %p\n", Report("dispatch_group_async_f(): context: %p, pthread_self: %p\n",
...@@ -407,7 +408,7 @@ void dispatch_source_set_event_handler(dispatch_source_t ds, void(^work)(void)); ...@@ -407,7 +408,7 @@ void dispatch_source_set_event_handler(dispatch_source_t ds, void(^work)(void));
void (^asan_block)(void); \ void (^asan_block)(void); \
int parent_tid = asanThreadRegistry().GetCurrentTidOrInvalid(); \ int parent_tid = asanThreadRegistry().GetCurrentTidOrInvalid(); \
asan_block = ^(void) { \ asan_block = ^(void) { \
GET_STACK_TRACE_HERE(kStackTraceMax); \ GET_STACK_TRACE_THREAD; \
asan_register_worker_thread(parent_tid, &stack); \ asan_register_worker_thread(parent_tid, &stack); \
work(); \ work(); \
} }
...@@ -457,15 +458,15 @@ void *wrap_workitem_func(void *arg) { ...@@ -457,15 +458,15 @@ void *wrap_workitem_func(void *arg) {
asan_block_context_t *ctxt = (asan_block_context_t*)arg; asan_block_context_t *ctxt = (asan_block_context_t*)arg;
worker_t fn = (worker_t)(ctxt->func); worker_t fn = (worker_t)(ctxt->func);
void *result = fn(ctxt->block); void *result = fn(ctxt->block);
GET_STACK_TRACE_HERE(kStackTraceMax); GET_STACK_TRACE_THREAD;
asan_free(arg, &stack); asan_free(arg, &stack, FROM_MALLOC);
return result; return result;
} }
INTERCEPTOR(int, pthread_workqueue_additem_np, pthread_workqueue_t workq, INTERCEPTOR(int, pthread_workqueue_additem_np, pthread_workqueue_t workq,
void *(*workitem_func)(void *), void * workitem_arg, void *(*workitem_func)(void *), void * workitem_arg,
pthread_workitem_handle_t * itemhandlep, unsigned int *gencountp) { pthread_workitem_handle_t * itemhandlep, unsigned int *gencountp) {
GET_STACK_TRACE_HERE(kStackTraceMax); GET_STACK_TRACE_THREAD;
asan_block_context_t *asan_ctxt = asan_block_context_t *asan_ctxt =
(asan_block_context_t*) asan_malloc(sizeof(asan_block_context_t), &stack); (asan_block_context_t*) asan_malloc(sizeof(asan_block_context_t), &stack);
asan_ctxt->block = workitem_arg; asan_ctxt->block = workitem_arg;
......
...@@ -17,6 +17,8 @@ ...@@ -17,6 +17,8 @@
#include "asan_interceptors.h" #include "asan_interceptors.h"
#include "asan_internal.h" #include "asan_internal.h"
#include "asan_stack.h" #include "asan_stack.h"
#include "asan_thread_registry.h"
#include "sanitizer/asan_interface.h"
#if ASAN_ANDROID #if ASAN_ANDROID
DECLARE_REAL_AND_INTERCEPTOR(void*, malloc, uptr size) DECLARE_REAL_AND_INTERCEPTOR(void*, malloc, uptr size)
...@@ -57,17 +59,17 @@ void ReplaceSystemMalloc() { ...@@ -57,17 +59,17 @@ void ReplaceSystemMalloc() {
using namespace __asan; // NOLINT using namespace __asan; // NOLINT
INTERCEPTOR(void, free, void *ptr) { INTERCEPTOR(void, free, void *ptr) {
GET_STACK_TRACE_HERE_FOR_FREE(ptr); GET_STACK_TRACE_FREE;
asan_free(ptr, &stack); asan_free(ptr, &stack, FROM_MALLOC);
} }
INTERCEPTOR(void, cfree, void *ptr) { INTERCEPTOR(void, cfree, void *ptr) {
GET_STACK_TRACE_HERE_FOR_FREE(ptr); GET_STACK_TRACE_FREE;
asan_free(ptr, &stack); asan_free(ptr, &stack, FROM_MALLOC);
} }
INTERCEPTOR(void*, malloc, uptr size) { INTERCEPTOR(void*, malloc, uptr size) {
GET_STACK_TRACE_HERE_FOR_MALLOC; GET_STACK_TRACE_MALLOC;
return asan_malloc(size, &stack); return asan_malloc(size, &stack);
} }
...@@ -83,25 +85,25 @@ INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) { ...@@ -83,25 +85,25 @@ INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) {
CHECK(allocated < kCallocPoolSize); CHECK(allocated < kCallocPoolSize);
return mem; return mem;
} }
GET_STACK_TRACE_HERE_FOR_MALLOC; GET_STACK_TRACE_MALLOC;
return asan_calloc(nmemb, size, &stack); return asan_calloc(nmemb, size, &stack);
} }
INTERCEPTOR(void*, realloc, void *ptr, uptr size) { INTERCEPTOR(void*, realloc, void *ptr, uptr size) {
GET_STACK_TRACE_HERE_FOR_MALLOC; GET_STACK_TRACE_MALLOC;
return asan_realloc(ptr, size, &stack); return asan_realloc(ptr, size, &stack);
} }
INTERCEPTOR(void*, memalign, uptr boundary, uptr size) { INTERCEPTOR(void*, memalign, uptr boundary, uptr size) {
GET_STACK_TRACE_HERE_FOR_MALLOC; GET_STACK_TRACE_MALLOC;
return asan_memalign(boundary, size, &stack); return asan_memalign(boundary, size, &stack, FROM_MALLOC);
} }
INTERCEPTOR(void*, __libc_memalign, uptr align, uptr s) INTERCEPTOR(void*, __libc_memalign, uptr align, uptr s)
ALIAS("memalign"); ALIAS("memalign");
INTERCEPTOR(uptr, malloc_usable_size, void *ptr) { INTERCEPTOR(uptr, malloc_usable_size, void *ptr) {
GET_STACK_TRACE_HERE_FOR_MALLOC; GET_STACK_TRACE_MALLOC;
return asan_malloc_usable_size(ptr, &stack); return asan_malloc_usable_size(ptr, &stack);
} }
...@@ -124,19 +126,23 @@ INTERCEPTOR(int, mallopt, int cmd, int value) { ...@@ -124,19 +126,23 @@ INTERCEPTOR(int, mallopt, int cmd, int value) {
} }
INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) { INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) {
GET_STACK_TRACE_HERE_FOR_MALLOC; GET_STACK_TRACE_MALLOC;
// Printf("posix_memalign: %zx %zu\n", alignment, size); // Printf("posix_memalign: %zx %zu\n", alignment, size);
return asan_posix_memalign(memptr, alignment, size, &stack); return asan_posix_memalign(memptr, alignment, size, &stack);
} }
INTERCEPTOR(void*, valloc, uptr size) { INTERCEPTOR(void*, valloc, uptr size) {
GET_STACK_TRACE_HERE_FOR_MALLOC; GET_STACK_TRACE_MALLOC;
return asan_valloc(size, &stack); return asan_valloc(size, &stack);
} }
INTERCEPTOR(void*, pvalloc, uptr size) { INTERCEPTOR(void*, pvalloc, uptr size) {
GET_STACK_TRACE_HERE_FOR_MALLOC; GET_STACK_TRACE_MALLOC;
return asan_pvalloc(size, &stack); return asan_pvalloc(size, &stack);
} }
INTERCEPTOR(void, malloc_stats, void) {
__asan_print_accumulated_stats();
}
#endif // __linux__ #endif // __linux__
...@@ -90,8 +90,8 @@ INTERCEPTOR(void, free, void *ptr) { ...@@ -90,8 +90,8 @@ INTERCEPTOR(void, free, void *ptr) {
#endif #endif
} else { } else {
if (!asan_mz_size(ptr)) ptr = get_saved_cfallocator_ref(ptr); if (!asan_mz_size(ptr)) ptr = get_saved_cfallocator_ref(ptr);
GET_STACK_TRACE_HERE_FOR_FREE(ptr); GET_STACK_TRACE_FREE;
asan_free(ptr, &stack); asan_free(ptr, &stack, FROM_MALLOC);
} }
} }
...@@ -128,7 +128,7 @@ void *mz_malloc(malloc_zone_t *zone, size_t size) { ...@@ -128,7 +128,7 @@ void *mz_malloc(malloc_zone_t *zone, size_t size) {
CHECK(system_malloc_zone); CHECK(system_malloc_zone);
return malloc_zone_malloc(system_malloc_zone, size); return malloc_zone_malloc(system_malloc_zone, size);
} }
GET_STACK_TRACE_HERE_FOR_MALLOC; GET_STACK_TRACE_MALLOC;
return asan_malloc(size, &stack); return asan_malloc(size, &stack);
} }
...@@ -137,7 +137,7 @@ void *cf_malloc(CFIndex size, CFOptionFlags hint, void *info) { ...@@ -137,7 +137,7 @@ void *cf_malloc(CFIndex size, CFOptionFlags hint, void *info) {
CHECK(system_malloc_zone); CHECK(system_malloc_zone);
return malloc_zone_malloc(system_malloc_zone, size); return malloc_zone_malloc(system_malloc_zone, size);
} }
GET_STACK_TRACE_HERE_FOR_MALLOC; GET_STACK_TRACE_MALLOC;
return asan_malloc(size, &stack); return asan_malloc(size, &stack);
} }
...@@ -153,7 +153,7 @@ void *mz_calloc(malloc_zone_t *zone, size_t nmemb, size_t size) { ...@@ -153,7 +153,7 @@ void *mz_calloc(malloc_zone_t *zone, size_t nmemb, size_t size) {
CHECK(allocated < kCallocPoolSize); CHECK(allocated < kCallocPoolSize);
return mem; return mem;
} }
GET_STACK_TRACE_HERE_FOR_MALLOC; GET_STACK_TRACE_MALLOC;
return asan_calloc(nmemb, size, &stack); return asan_calloc(nmemb, size, &stack);
} }
...@@ -162,8 +162,8 @@ void *mz_valloc(malloc_zone_t *zone, size_t size) { ...@@ -162,8 +162,8 @@ void *mz_valloc(malloc_zone_t *zone, size_t size) {
CHECK(system_malloc_zone); CHECK(system_malloc_zone);
return malloc_zone_valloc(system_malloc_zone, size); return malloc_zone_valloc(system_malloc_zone, size);
} }
GET_STACK_TRACE_HERE_FOR_MALLOC; GET_STACK_TRACE_MALLOC;
return asan_memalign(GetPageSizeCached(), size, &stack); return asan_memalign(GetPageSizeCached(), size, &stack, FROM_MALLOC);
} }
#define GET_ZONE_FOR_PTR(ptr) \ #define GET_ZONE_FOR_PTR(ptr) \
...@@ -173,8 +173,8 @@ void *mz_valloc(malloc_zone_t *zone, size_t size) { ...@@ -173,8 +173,8 @@ void *mz_valloc(malloc_zone_t *zone, size_t size) {
void ALWAYS_INLINE free_common(void *context, void *ptr) { void ALWAYS_INLINE free_common(void *context, void *ptr) {
if (!ptr) return; if (!ptr) return;
if (asan_mz_size(ptr)) { if (asan_mz_size(ptr)) {
GET_STACK_TRACE_HERE_FOR_FREE(ptr); GET_STACK_TRACE_FREE;
asan_free(ptr, &stack); asan_free(ptr, &stack, FROM_MALLOC);
} else { } else {
// If the pointer does not belong to any of the zones, use one of the // If the pointer does not belong to any of the zones, use one of the
// fallback methods to free memory. // fallback methods to free memory.
...@@ -188,9 +188,9 @@ void ALWAYS_INLINE free_common(void *context, void *ptr) { ...@@ -188,9 +188,9 @@ void ALWAYS_INLINE free_common(void *context, void *ptr) {
// If the memory chunk pointer was moved to store additional // If the memory chunk pointer was moved to store additional
// CFAllocatorRef, fix it back. // CFAllocatorRef, fix it back.
ptr = get_saved_cfallocator_ref(ptr); ptr = get_saved_cfallocator_ref(ptr);
GET_STACK_TRACE_HERE_FOR_FREE(ptr); GET_STACK_TRACE_FREE;
if (!flags()->mac_ignore_invalid_free) { if (!flags()->mac_ignore_invalid_free) {
asan_free(ptr, &stack); asan_free(ptr, &stack, FROM_MALLOC);
} else { } else {
GET_ZONE_FOR_PTR(ptr); GET_ZONE_FOR_PTR(ptr);
WarnMacFreeUnallocated((uptr)ptr, (uptr)zone_ptr, zone_name, &stack); WarnMacFreeUnallocated((uptr)ptr, (uptr)zone_ptr, zone_name, &stack);
...@@ -211,17 +211,17 @@ void cf_free(void *ptr, void *info) { ...@@ -211,17 +211,17 @@ void cf_free(void *ptr, void *info) {
void *mz_realloc(malloc_zone_t *zone, void *ptr, size_t size) { void *mz_realloc(malloc_zone_t *zone, void *ptr, size_t size) {
if (!ptr) { if (!ptr) {
GET_STACK_TRACE_HERE_FOR_MALLOC; GET_STACK_TRACE_MALLOC;
return asan_malloc(size, &stack); return asan_malloc(size, &stack);
} else { } else {
if (asan_mz_size(ptr)) { if (asan_mz_size(ptr)) {
GET_STACK_TRACE_HERE_FOR_MALLOC; GET_STACK_TRACE_MALLOC;
return asan_realloc(ptr, size, &stack); return asan_realloc(ptr, size, &stack);
} else { } else {
// We can't recover from reallocating an unknown address, because // We can't recover from reallocating an unknown address, because
// this would require reading at most |size| bytes from // this would require reading at most |size| bytes from
// potentially unaccessible memory. // potentially unaccessible memory.
GET_STACK_TRACE_HERE_FOR_FREE(ptr); GET_STACK_TRACE_FREE;
GET_ZONE_FOR_PTR(ptr); GET_ZONE_FOR_PTR(ptr);
ReportMacMzReallocUnknown((uptr)ptr, (uptr)zone_ptr, zone_name, &stack); ReportMacMzReallocUnknown((uptr)ptr, (uptr)zone_ptr, zone_name, &stack);
} }
...@@ -230,17 +230,17 @@ void *mz_realloc(malloc_zone_t *zone, void *ptr, size_t size) { ...@@ -230,17 +230,17 @@ void *mz_realloc(malloc_zone_t *zone, void *ptr, size_t size) {
void *cf_realloc(void *ptr, CFIndex size, CFOptionFlags hint, void *info) { void *cf_realloc(void *ptr, CFIndex size, CFOptionFlags hint, void *info) {
if (!ptr) { if (!ptr) {
GET_STACK_TRACE_HERE_FOR_MALLOC; GET_STACK_TRACE_MALLOC;
return asan_malloc(size, &stack); return asan_malloc(size, &stack);
} else { } else {
if (asan_mz_size(ptr)) { if (asan_mz_size(ptr)) {
GET_STACK_TRACE_HERE_FOR_MALLOC; GET_STACK_TRACE_MALLOC;
return asan_realloc(ptr, size, &stack); return asan_realloc(ptr, size, &stack);
} else { } else {
// We can't recover from reallocating an unknown address, because // We can't recover from reallocating an unknown address, because
// this would require reading at most |size| bytes from // this would require reading at most |size| bytes from
// potentially unaccessible memory. // potentially unaccessible memory.
GET_STACK_TRACE_HERE_FOR_FREE(ptr); GET_STACK_TRACE_FREE;
GET_ZONE_FOR_PTR(ptr); GET_ZONE_FOR_PTR(ptr);
ReportMacCfReallocUnknown((uptr)ptr, (uptr)zone_ptr, zone_name, &stack); ReportMacCfReallocUnknown((uptr)ptr, (uptr)zone_ptr, zone_name, &stack);
} }
...@@ -259,8 +259,8 @@ void *mz_memalign(malloc_zone_t *zone, size_t align, size_t size) { ...@@ -259,8 +259,8 @@ void *mz_memalign(malloc_zone_t *zone, size_t align, size_t size) {
CHECK(system_malloc_zone); CHECK(system_malloc_zone);
return malloc_zone_memalign(system_malloc_zone, align, size); return malloc_zone_memalign(system_malloc_zone, align, size);
} }
GET_STACK_TRACE_HERE_FOR_MALLOC; GET_STACK_TRACE_MALLOC;
return asan_memalign(align, size, &stack); return asan_memalign(align, size, &stack, FROM_MALLOC);
} }
// This function is currently unused, and we build with -Werror. // This function is currently unused, and we build with -Werror.
......
...@@ -29,8 +29,8 @@ using namespace __asan; // NOLINT ...@@ -29,8 +29,8 @@ using namespace __asan; // NOLINT
extern "C" { extern "C" {
void free(void *ptr) { void free(void *ptr) {
GET_STACK_TRACE_HERE_FOR_FREE(ptr); GET_STACK_TRACE_FREE;
return asan_free(ptr, &stack); return asan_free(ptr, &stack, FROM_MALLOC);
} }
void _free_dbg(void* ptr, int) { void _free_dbg(void* ptr, int) {
...@@ -42,7 +42,7 @@ void cfree(void *ptr) { ...@@ -42,7 +42,7 @@ void cfree(void *ptr) {
} }
void *malloc(size_t size) { void *malloc(size_t size) {
GET_STACK_TRACE_HERE_FOR_MALLOC; GET_STACK_TRACE_MALLOC;
return asan_malloc(size, &stack); return asan_malloc(size, &stack);
} }
...@@ -51,7 +51,7 @@ void* _malloc_dbg(size_t size, int , const char*, int) { ...@@ -51,7 +51,7 @@ void* _malloc_dbg(size_t size, int , const char*, int) {
} }
void *calloc(size_t nmemb, size_t size) { void *calloc(size_t nmemb, size_t size) {
GET_STACK_TRACE_HERE_FOR_MALLOC; GET_STACK_TRACE_MALLOC;
return asan_calloc(nmemb, size, &stack); return asan_calloc(nmemb, size, &stack);
} }
...@@ -64,7 +64,7 @@ void *_calloc_impl(size_t nmemb, size_t size, int *errno_tmp) { ...@@ -64,7 +64,7 @@ void *_calloc_impl(size_t nmemb, size_t size, int *errno_tmp) {
} }
void *realloc(void *ptr, size_t size) { void *realloc(void *ptr, size_t size) {
GET_STACK_TRACE_HERE_FOR_MALLOC; GET_STACK_TRACE_MALLOC;
return asan_realloc(ptr, size, &stack); return asan_realloc(ptr, size, &stack);
} }
...@@ -83,7 +83,7 @@ void* _recalloc(void* p, size_t n, size_t elem_size) { ...@@ -83,7 +83,7 @@ void* _recalloc(void* p, size_t n, size_t elem_size) {
} }
size_t _msize(void *ptr) { size_t _msize(void *ptr) {
GET_STACK_TRACE_HERE_FOR_MALLOC; GET_STACK_TRACE_MALLOC;
return asan_malloc_usable_size(ptr, &stack); return asan_malloc_usable_size(ptr, &stack);
} }
......
...@@ -18,8 +18,8 @@ ...@@ -18,8 +18,8 @@
// http://code.google.com/p/address-sanitizer/wiki/AddressSanitizerAlgorithm // http://code.google.com/p/address-sanitizer/wiki/AddressSanitizerAlgorithm
#if ASAN_FLEXIBLE_MAPPING_AND_OFFSET == 1 #if ASAN_FLEXIBLE_MAPPING_AND_OFFSET == 1
extern __attribute__((visibility("default"))) uptr __asan_mapping_scale; extern SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_mapping_scale;
extern __attribute__((visibility("default"))) uptr __asan_mapping_offset; extern SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_mapping_offset;
# define SHADOW_SCALE (__asan_mapping_scale) # define SHADOW_SCALE (__asan_mapping_scale)
# define SHADOW_OFFSET (__asan_mapping_offset) # define SHADOW_OFFSET (__asan_mapping_offset)
#else #else
......
...@@ -33,32 +33,34 @@ namespace std { ...@@ -33,32 +33,34 @@ namespace std {
struct nothrow_t {}; struct nothrow_t {};
} // namespace std } // namespace std
#define OPERATOR_NEW_BODY \ #define OPERATOR_NEW_BODY(type) \
GET_STACK_TRACE_HERE_FOR_MALLOC;\ GET_STACK_TRACE_MALLOC;\
return asan_memalign(0, size, &stack); return asan_memalign(0, size, &stack, type);
INTERCEPTOR_ATTRIBUTE INTERCEPTOR_ATTRIBUTE
void *operator new(size_t size) { OPERATOR_NEW_BODY; } void *operator new(size_t size) { OPERATOR_NEW_BODY(FROM_NEW); }
INTERCEPTOR_ATTRIBUTE INTERCEPTOR_ATTRIBUTE
void *operator new[](size_t size) { OPERATOR_NEW_BODY; } void *operator new[](size_t size) { OPERATOR_NEW_BODY(FROM_NEW_BR); }
INTERCEPTOR_ATTRIBUTE INTERCEPTOR_ATTRIBUTE
void *operator new(size_t size, std::nothrow_t const&) { OPERATOR_NEW_BODY; } void *operator new(size_t size, std::nothrow_t const&)
{ OPERATOR_NEW_BODY(FROM_NEW); }
INTERCEPTOR_ATTRIBUTE INTERCEPTOR_ATTRIBUTE
void *operator new[](size_t size, std::nothrow_t const&) { OPERATOR_NEW_BODY; } void *operator new[](size_t size, std::nothrow_t const&)
{ OPERATOR_NEW_BODY(FROM_NEW_BR); }
#define OPERATOR_DELETE_BODY \ #define OPERATOR_DELETE_BODY(type) \
GET_STACK_TRACE_HERE_FOR_FREE(ptr);\ GET_STACK_TRACE_FREE;\
asan_free(ptr, &stack); asan_free(ptr, &stack, type);
INTERCEPTOR_ATTRIBUTE INTERCEPTOR_ATTRIBUTE
void operator delete(void *ptr) { OPERATOR_DELETE_BODY; } void operator delete(void *ptr) { OPERATOR_DELETE_BODY(FROM_NEW); }
INTERCEPTOR_ATTRIBUTE INTERCEPTOR_ATTRIBUTE
void operator delete[](void *ptr) { OPERATOR_DELETE_BODY; } void operator delete[](void *ptr) { OPERATOR_DELETE_BODY(FROM_NEW_BR); }
INTERCEPTOR_ATTRIBUTE INTERCEPTOR_ATTRIBUTE
void operator delete(void *ptr, std::nothrow_t const&) void operator delete(void *ptr, std::nothrow_t const&)
{ OPERATOR_DELETE_BODY; } { OPERATOR_DELETE_BODY(FROM_NEW); }
INTERCEPTOR_ATTRIBUTE INTERCEPTOR_ATTRIBUTE
void operator delete[](void *ptr, std::nothrow_t const&) void operator delete[](void *ptr, std::nothrow_t const&)
{ OPERATOR_DELETE_BODY; } { OPERATOR_DELETE_BODY(FROM_NEW_BR); }
#endif #endif
...@@ -14,10 +14,12 @@ ...@@ -14,10 +14,12 @@
#include "asan_internal.h" #include "asan_internal.h"
#include "asan_mapping.h" #include "asan_mapping.h"
#include "sanitizer/asan_interface.h" #include "sanitizer/asan_interface.h"
#include "sanitizer_common/sanitizer_libc.h"
namespace __asan { namespace __asan {
void PoisonShadow(uptr addr, uptr size, u8 value) { void PoisonShadow(uptr addr, uptr size, u8 value) {
if (!flags()->poison_heap) return;
CHECK(AddrIsAlignedByGranularity(addr)); CHECK(AddrIsAlignedByGranularity(addr));
CHECK(AddrIsAlignedByGranularity(addr + size)); CHECK(AddrIsAlignedByGranularity(addr + size));
uptr shadow_beg = MemToShadow(addr); uptr shadow_beg = MemToShadow(addr);
...@@ -30,6 +32,7 @@ void PoisonShadowPartialRightRedzone(uptr addr, ...@@ -30,6 +32,7 @@ void PoisonShadowPartialRightRedzone(uptr addr,
uptr size, uptr size,
uptr redzone_size, uptr redzone_size,
u8 value) { u8 value) {
if (!flags()->poison_heap) return;
CHECK(AddrIsAlignedByGranularity(addr)); CHECK(AddrIsAlignedByGranularity(addr));
u8 *shadow = (u8*)MemToShadow(addr); u8 *shadow = (u8*)MemToShadow(addr);
for (uptr i = 0; i < redzone_size; for (uptr i = 0; i < redzone_size;
...@@ -150,6 +153,33 @@ bool __asan_address_is_poisoned(void const volatile *addr) { ...@@ -150,6 +153,33 @@ bool __asan_address_is_poisoned(void const volatile *addr) {
return __asan::AddressIsPoisoned((uptr)addr); return __asan::AddressIsPoisoned((uptr)addr);
} }
uptr __asan_region_is_poisoned(uptr beg, uptr size) {
if (!size) return 0;
uptr end = beg + size;
if (!AddrIsInMem(beg)) return beg;
if (!AddrIsInMem(end)) return end;
uptr aligned_b = RoundUpTo(beg, SHADOW_GRANULARITY);
uptr aligned_e = RoundDownTo(end, SHADOW_GRANULARITY);
uptr shadow_beg = MemToShadow(aligned_b);
uptr shadow_end = MemToShadow(aligned_e);
// First check the first and the last application bytes,
// then check the SHADOW_GRANULARITY-aligned region by calling
// mem_is_zero on the corresponding shadow.
if (!__asan::AddressIsPoisoned(beg) &&
!__asan::AddressIsPoisoned(end - 1) &&
(shadow_end <= shadow_beg ||
__sanitizer::mem_is_zero((const char *)shadow_beg,
shadow_end - shadow_beg)))
return 0;
// The fast check failed, so we have a poisoned byte somewhere.
// Find it slowly.
for (; beg < end; beg++)
if (__asan::AddressIsPoisoned(beg))
return beg;
UNREACHABLE("mem_is_zero returned false, but poisoned byte was not found");
return 0;
}
// This is a simplified version of __asan_(un)poison_memory_region, which // This is a simplified version of __asan_(un)poison_memory_region, which
// assumes that left border of region to be poisoned is properly aligned. // assumes that left border of region to be poisoned is properly aligned.
static void PoisonAlignedStackMemory(uptr addr, uptr size, bool do_poison) { static void PoisonAlignedStackMemory(uptr addr, uptr size, bool do_poison) {
...@@ -166,7 +196,7 @@ static void PoisonAlignedStackMemory(uptr addr, uptr size, bool do_poison) { ...@@ -166,7 +196,7 @@ static void PoisonAlignedStackMemory(uptr addr, uptr size, bool do_poison) {
// If possible, mark all the bytes mapping to last shadow byte as // If possible, mark all the bytes mapping to last shadow byte as
// unaddressable. // unaddressable.
if (end_value > 0 && end_value <= end_offset) if (end_value > 0 && end_value <= end_offset)
*shadow_end = kAsanStackUseAfterScopeMagic; *shadow_end = (s8)kAsanStackUseAfterScopeMagic;
} else { } else {
// If necessary, mark few first bytes mapping to last shadow byte // If necessary, mark few first bytes mapping to last shadow byte
// as addressable // as addressable
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
// ASan-private header for error reporting functions. // ASan-private header for error reporting functions.
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#include "asan_allocator.h"
#include "asan_internal.h" #include "asan_internal.h"
#include "asan_thread.h" #include "asan_thread.h"
#include "sanitizer/asan_interface.h" #include "sanitizer/asan_interface.h"
...@@ -32,6 +33,9 @@ void DescribeThread(AsanThreadSummary *summary); ...@@ -32,6 +33,9 @@ void DescribeThread(AsanThreadSummary *summary);
void NORETURN ReportSIGSEGV(uptr pc, uptr sp, uptr bp, uptr addr); void NORETURN ReportSIGSEGV(uptr pc, uptr sp, uptr bp, uptr addr);
void NORETURN ReportDoubleFree(uptr addr, StackTrace *stack); void NORETURN ReportDoubleFree(uptr addr, StackTrace *stack);
void NORETURN ReportFreeNotMalloced(uptr addr, StackTrace *stack); void NORETURN ReportFreeNotMalloced(uptr addr, StackTrace *stack);
void NORETURN ReportAllocTypeMismatch(uptr addr, StackTrace *stack,
AllocType alloc_type,
AllocType dealloc_type);
void NORETURN ReportMallocUsableSizeNotOwned(uptr addr, void NORETURN ReportMallocUsableSizeNotOwned(uptr addr,
StackTrace *stack); StackTrace *stack);
void NORETURN ReportAsanGetAllocatedSizeNotOwned(uptr addr, void NORETURN ReportAsanGetAllocatedSizeNotOwned(uptr addr,
......
...@@ -52,7 +52,7 @@ static void AsanCheckFailed(const char *file, int line, const char *cond, ...@@ -52,7 +52,7 @@ static void AsanCheckFailed(const char *file, int line, const char *cond,
file, line, cond, (uptr)v1, (uptr)v2); file, line, cond, (uptr)v1, (uptr)v2);
// FIXME: check for infinite recursion without a thread-local counter here. // FIXME: check for infinite recursion without a thread-local counter here.
PRINT_CURRENT_STACK(); PRINT_CURRENT_STACK();
ShowStatsAndAbort(); Die();
} }
// -------------------------- Flags ------------------------- {{{1 // -------------------------- Flags ------------------------- {{{1
...@@ -64,6 +64,10 @@ Flags *flags() { ...@@ -64,6 +64,10 @@ Flags *flags() {
return &asan_flags; return &asan_flags;
} }
static const char *MaybeCallAsanDefaultOptions() {
return (&__asan_default_options) ? __asan_default_options() : "";
}
static void ParseFlagsFromString(Flags *f, const char *str) { static void ParseFlagsFromString(Flags *f, const char *str) {
ParseFlag(str, &f->quarantine_size, "quarantine_size"); ParseFlag(str, &f->quarantine_size, "quarantine_size");
ParseFlag(str, &f->symbolize, "symbolize"); ParseFlag(str, &f->symbolize, "symbolize");
...@@ -98,21 +102,20 @@ static void ParseFlagsFromString(Flags *f, const char *str) { ...@@ -98,21 +102,20 @@ static void ParseFlagsFromString(Flags *f, const char *str) {
ParseFlag(str, &f->allow_reexec, "allow_reexec"); ParseFlag(str, &f->allow_reexec, "allow_reexec");
ParseFlag(str, &f->print_full_thread_history, "print_full_thread_history"); ParseFlag(str, &f->print_full_thread_history, "print_full_thread_history");
ParseFlag(str, &f->log_path, "log_path"); ParseFlag(str, &f->log_path, "log_path");
ParseFlag(str, &f->fast_unwind_on_fatal, "fast_unwind_on_fatal");
ParseFlag(str, &f->fast_unwind_on_malloc, "fast_unwind_on_malloc");
ParseFlag(str, &f->poison_heap, "poison_heap");
ParseFlag(str, &f->alloc_dealloc_mismatch, "alloc_dealloc_mismatch");
ParseFlag(str, &f->use_stack_depot, "use_stack_depot");
} }
extern "C" {
SANITIZER_WEAK_ATTRIBUTE
SANITIZER_INTERFACE_ATTRIBUTE
const char* __asan_default_options() { return ""; }
} // extern "C"
void InitializeFlags(Flags *f, const char *env) { void InitializeFlags(Flags *f, const char *env) {
internal_memset(f, 0, sizeof(*f)); internal_memset(f, 0, sizeof(*f));
f->quarantine_size = (ASAN_LOW_MEMORY) ? 1UL << 26 : 1UL << 28; f->quarantine_size = (ASAN_LOW_MEMORY) ? 1UL << 26 : 1UL << 28;
f->symbolize = false; f->symbolize = false;
f->verbosity = 0; f->verbosity = 0;
f->redzone = (ASAN_LOW_MEMORY) ? 64 : 128; f->redzone = ASAN_ALLOCATOR_VERSION == 2 ? 16 : (ASAN_LOW_MEMORY) ? 64 : 128;
f->debug = false; f->debug = false;
f->report_globals = 1; f->report_globals = 1;
f->check_initialization_order = true; f->check_initialization_order = true;
...@@ -137,12 +140,17 @@ void InitializeFlags(Flags *f, const char *env) { ...@@ -137,12 +140,17 @@ void InitializeFlags(Flags *f, const char *env) {
f->allow_reexec = true; f->allow_reexec = true;
f->print_full_thread_history = true; f->print_full_thread_history = true;
f->log_path = 0; f->log_path = 0;
f->fast_unwind_on_fatal = true;
f->fast_unwind_on_malloc = true;
f->poison_heap = true;
f->alloc_dealloc_mismatch = true;
f->use_stack_depot = true; // Only affects allocator2.
// Override from user-specified string. // Override from user-specified string.
ParseFlagsFromString(f, __asan_default_options()); ParseFlagsFromString(f, MaybeCallAsanDefaultOptions());
if (flags()->verbosity) { if (flags()->verbosity) {
Report("Using the defaults from __asan_default_options: %s\n", Report("Using the defaults from __asan_default_options: %s\n",
__asan_default_options()); MaybeCallAsanDefaultOptions());
} }
// Override from command line. // Override from command line.
...@@ -239,15 +247,12 @@ static NOINLINE void force_interface_symbols() { ...@@ -239,15 +247,12 @@ static NOINLINE void force_interface_symbols() {
case 27: __asan_set_error_exit_code(0); break; case 27: __asan_set_error_exit_code(0); break;
case 28: __asan_stack_free(0, 0, 0); break; case 28: __asan_stack_free(0, 0, 0); break;
case 29: __asan_stack_malloc(0, 0); break; case 29: __asan_stack_malloc(0, 0); break;
case 30: __asan_on_error(); break; case 30: __asan_before_dynamic_init(0, 0); break;
case 31: __asan_default_options(); break; case 31: __asan_after_dynamic_init(); break;
case 32: __asan_before_dynamic_init(0, 0); break; case 32: __asan_poison_stack_memory(0, 0); break;
case 33: __asan_after_dynamic_init(); break; case 33: __asan_unpoison_stack_memory(0, 0); break;
case 34: __asan_malloc_hook(0, 0); break; case 34: __asan_region_is_poisoned(0, 0); break;
case 35: __asan_free_hook(0); break; case 35: __asan_describe_address(0); break;
case 36: __asan_symbolize(0, 0, 0); break;
case 37: __asan_poison_stack_memory(0, 0); break;
case 38: __asan_unpoison_stack_memory(0, 0); break;
} }
} }
...@@ -261,6 +266,13 @@ static void asan_atexit() { ...@@ -261,6 +266,13 @@ static void asan_atexit() {
// ---------------------- Interface ---------------- {{{1 // ---------------------- Interface ---------------- {{{1
using namespace __asan; // NOLINT using namespace __asan; // NOLINT
#if !SANITIZER_SUPPORTS_WEAK_HOOKS
extern "C" {
SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
const char* __asan_default_options() { return ""; }
} // extern "C"
#endif
int NOINLINE __asan_set_error_exit_code(int exit_code) { int NOINLINE __asan_set_error_exit_code(int exit_code) {
int old = flags()->exitcode; int old = flags()->exitcode;
flags()->exitcode = exit_code; flags()->exitcode = exit_code;
......
...@@ -15,9 +15,15 @@ ...@@ -15,9 +15,15 @@
namespace __asan { namespace __asan {
static bool MaybeCallAsanSymbolize(const void *pc, char *out_buffer,
int out_size) {
return (&__asan_symbolize) ? __asan_symbolize(pc, out_buffer, out_size)
: false;
}
void PrintStack(StackTrace *stack) { void PrintStack(StackTrace *stack) {
stack->PrintStack(stack->trace, stack->size, flags()->symbolize, stack->PrintStack(stack->trace, stack->size, flags()->symbolize,
flags()->strip_path_prefix, __asan_symbolize); flags()->strip_path_prefix, MaybeCallAsanSymbolize);
} }
} // namespace __asan } // namespace __asan
...@@ -27,7 +33,7 @@ void PrintStack(StackTrace *stack) { ...@@ -27,7 +33,7 @@ void PrintStack(StackTrace *stack) {
// Provide default implementation of __asan_symbolize that does nothing // Provide default implementation of __asan_symbolize that does nothing
// and may be overriden by user if he wants to use his own symbolization. // and may be overriden by user if he wants to use his own symbolization.
// ASan on Windows has its own implementation of this. // ASan on Windows has its own implementation of this.
#ifndef _WIN32 #if !defined(_WIN32) && !SANITIZER_SUPPORTS_WEAK_HOOKS
SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE NOINLINE SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE NOINLINE
bool __asan_symbolize(const void *pc, char *out_buffer, int out_size) { bool __asan_symbolize(const void *pc, char *out_buffer, int out_size) {
return false; return false;
......
...@@ -13,10 +13,11 @@ ...@@ -13,10 +13,11 @@
#define ASAN_STACK_H #define ASAN_STACK_H
#include "sanitizer_common/sanitizer_stacktrace.h" #include "sanitizer_common/sanitizer_stacktrace.h"
#include "asan_flags.h"
namespace __asan { namespace __asan {
void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp); void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp, bool fast);
void PrintStack(StackTrace *stack); void PrintStack(StackTrace *stack);
} // namespace __asan } // namespace __asan
...@@ -25,27 +26,38 @@ void PrintStack(StackTrace *stack); ...@@ -25,27 +26,38 @@ void PrintStack(StackTrace *stack);
// The pc will be in the position 0 of the resulting stack trace. // The pc will be in the position 0 of the resulting stack trace.
// The bp may refer to the current frame or to the caller's frame. // The bp may refer to the current frame or to the caller's frame.
// fast_unwind is currently unused. // fast_unwind is currently unused.
#define GET_STACK_TRACE_WITH_PC_AND_BP(max_s, pc, bp) \ #define GET_STACK_TRACE_WITH_PC_AND_BP(max_s, pc, bp, fast) \
StackTrace stack; \ StackTrace stack; \
GetStackTrace(&stack, max_s, pc, bp) GetStackTrace(&stack, max_s, pc, bp, fast)
// NOTE: A Rule of thumb is to retrieve stack trace in the interceptors // NOTE: A Rule of thumb is to retrieve stack trace in the interceptors
// as early as possible (in functions exposed to the user), as we generally // as early as possible (in functions exposed to the user), as we generally
// don't want stack trace to contain functions from ASan internals. // don't want stack trace to contain functions from ASan internals.
#define GET_STACK_TRACE_HERE(max_size) \ #define GET_STACK_TRACE(max_size, fast) \
GET_STACK_TRACE_WITH_PC_AND_BP(max_size, \ GET_STACK_TRACE_WITH_PC_AND_BP(max_size, \
StackTrace::GetCurrentPc(), GET_CURRENT_FRAME()) StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), fast)
#define GET_STACK_TRACE_HERE_FOR_MALLOC \ #define GET_STACK_TRACE_FATAL(pc, bp) \
GET_STACK_TRACE_HERE(flags()->malloc_context_size) GET_STACK_TRACE_WITH_PC_AND_BP(kStackTraceMax, pc, bp, \
flags()->fast_unwind_on_fatal)
#define GET_STACK_TRACE_HERE_FOR_FREE(ptr) \ #define GET_STACK_TRACE_FATAL_HERE \
GET_STACK_TRACE_HERE(flags()->malloc_context_size) GET_STACK_TRACE(kStackTraceMax, flags()->fast_unwind_on_fatal)
#define GET_STACK_TRACE_THREAD \
GET_STACK_TRACE(kStackTraceMax, true)
#define GET_STACK_TRACE_MALLOC \
GET_STACK_TRACE(flags()->malloc_context_size, \
flags()->fast_unwind_on_malloc)
#define GET_STACK_TRACE_FREE GET_STACK_TRACE_MALLOC
#define PRINT_CURRENT_STACK() \ #define PRINT_CURRENT_STACK() \
{ \ { \
GET_STACK_TRACE_HERE(kStackTraceMax); \ GET_STACK_TRACE(kStackTraceMax, \
flags()->fast_unwind_on_fatal); \
PrintStack(&stack); \ PrintStack(&stack); \
} }
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include "asan_stats.h" #include "asan_stats.h"
#include "asan_thread_registry.h" #include "asan_thread_registry.h"
#include "sanitizer/asan_interface.h" #include "sanitizer/asan_interface.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
namespace __asan { namespace __asan {
...@@ -40,8 +41,9 @@ void AsanStats::Print() { ...@@ -40,8 +41,9 @@ void AsanStats::Print() {
Printf("Stats: %zuM freed by %zu calls\n", freed>>20, frees); Printf("Stats: %zuM freed by %zu calls\n", freed>>20, frees);
Printf("Stats: %zuM really freed by %zu calls\n", Printf("Stats: %zuM really freed by %zu calls\n",
really_freed>>20, real_frees); really_freed>>20, real_frees);
Printf("Stats: %zuM (%zu full pages) mmaped in %zu calls\n", Printf("Stats: %zuM (%zuM-%zuM) mmaped; %zu maps, %zu unmaps\n",
mmaped>>20, mmaped / GetPageSizeCached(), mmaps); (mmaped-munmaped)>>20, mmaped>>20, munmaped>>20,
mmaps, munmaps);
PrintMallocStatsArray(" mmaps by size class: ", mmaped_by_size); PrintMallocStatsArray(" mmaps by size class: ", mmaped_by_size);
PrintMallocStatsArray(" mallocs by size class: ", malloced_by_size); PrintMallocStatsArray(" mallocs by size class: ", malloced_by_size);
...@@ -59,6 +61,10 @@ static void PrintAccumulatedStats() { ...@@ -59,6 +61,10 @@ static void PrintAccumulatedStats() {
// Use lock to keep reports from mixing up. // Use lock to keep reports from mixing up.
ScopedLock lock(&print_lock); ScopedLock lock(&print_lock);
stats.Print(); stats.Print();
StackDepotStats *stack_depot_stats = StackDepotGetStats();
Printf("Stats: StackDepot: %zd ids; %zdM mapped\n",
stack_depot_stats->n_uniq_ids, stack_depot_stats->mapped >> 20);
PrintInternalAllocatorStats();
} }
} // namespace __asan } // namespace __asan
......
...@@ -35,6 +35,8 @@ struct AsanStats { ...@@ -35,6 +35,8 @@ struct AsanStats {
uptr realloced; uptr realloced;
uptr mmaps; uptr mmaps;
uptr mmaped; uptr mmaped;
uptr munmaps;
uptr munmaped;
uptr mmaped_by_size[kNumberOfSizeClasses]; uptr mmaped_by_size[kNumberOfSizeClasses];
uptr malloced_by_size[kNumberOfSizeClasses]; uptr malloced_by_size[kNumberOfSizeClasses];
uptr freed_by_size[kNumberOfSizeClasses]; uptr freed_by_size[kNumberOfSizeClasses];
......
...@@ -37,6 +37,7 @@ class AsanThreadSummary { ...@@ -37,6 +37,7 @@ class AsanThreadSummary {
internal_memcpy(&stack_, stack, sizeof(*stack)); internal_memcpy(&stack_, stack, sizeof(*stack));
} }
thread_ = 0; thread_ = 0;
name_[0] = 0;
} }
u32 tid() { return tid_; } u32 tid() { return tid_; }
void set_tid(u32 tid) { tid_ = tid; } void set_tid(u32 tid) { tid_ = tid; }
...@@ -47,6 +48,10 @@ class AsanThreadSummary { ...@@ -47,6 +48,10 @@ class AsanThreadSummary {
AsanThread *thread() { return thread_; } AsanThread *thread() { return thread_; }
void set_thread(AsanThread *thread) { thread_ = thread; } void set_thread(AsanThread *thread) { thread_ = thread; }
static void TSDDtor(void *tsd); static void TSDDtor(void *tsd);
void set_name(const char *name) {
internal_strncpy(name_, name, sizeof(name_) - 1);
}
const char *name() { return name_; }
private: private:
u32 tid_; u32 tid_;
...@@ -54,8 +59,12 @@ class AsanThreadSummary { ...@@ -54,8 +59,12 @@ class AsanThreadSummary {
bool announced_; bool announced_;
StackTrace stack_; StackTrace stack_;
AsanThread *thread_; AsanThread *thread_;
char name_[128];
}; };
// AsanThreadSummary objects are never freed, so we need many of them.
COMPILER_CHECK(sizeof(AsanThreadSummary) <= 4094);
// AsanThread are stored in TSD and destroyed when the thread dies. // AsanThread are stored in TSD and destroyed when the thread dies.
class AsanThread { class AsanThread {
public: public:
......
...@@ -121,13 +121,14 @@ uptr AsanThreadRegistry::GetCurrentAllocatedBytes() { ...@@ -121,13 +121,14 @@ uptr AsanThreadRegistry::GetCurrentAllocatedBytes() {
uptr AsanThreadRegistry::GetHeapSize() { uptr AsanThreadRegistry::GetHeapSize() {
ScopedLock lock(&mu_); ScopedLock lock(&mu_);
UpdateAccumulatedStatsUnlocked(); UpdateAccumulatedStatsUnlocked();
return accumulated_stats_.mmaped; return accumulated_stats_.mmaped - accumulated_stats_.munmaped;
} }
uptr AsanThreadRegistry::GetFreeBytes() { uptr AsanThreadRegistry::GetFreeBytes() {
ScopedLock lock(&mu_); ScopedLock lock(&mu_);
UpdateAccumulatedStatsUnlocked(); UpdateAccumulatedStatsUnlocked();
uptr total_free = accumulated_stats_.mmaped uptr total_free = accumulated_stats_.mmaped
- accumulated_stats_.munmaped
+ accumulated_stats_.really_freed + accumulated_stats_.really_freed
+ accumulated_stats_.really_freed_redzones; + accumulated_stats_.really_freed_redzones;
uptr total_used = accumulated_stats_.malloced uptr total_used = accumulated_stats_.malloced
......
...@@ -30,7 +30,8 @@ static AsanLock dbghelp_lock(LINKER_INITIALIZED); ...@@ -30,7 +30,8 @@ static AsanLock dbghelp_lock(LINKER_INITIALIZED);
static bool dbghelp_initialized = false; static bool dbghelp_initialized = false;
#pragma comment(lib, "dbghelp.lib") #pragma comment(lib, "dbghelp.lib")
void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp) { void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp, bool fast) {
(void)fast;
stack->max_size = max_s; stack->max_size = max_s;
void *tmp[kStackTraceMax]; void *tmp[kStackTraceMax];
......
...@@ -115,6 +115,15 @@ extern "C" { ...@@ -115,6 +115,15 @@ extern "C" {
bool __asan_address_is_poisoned(void const volatile *addr) bool __asan_address_is_poisoned(void const volatile *addr)
SANITIZER_INTERFACE_ATTRIBUTE; SANITIZER_INTERFACE_ATTRIBUTE;
// If at least on byte in [beg, beg+size) is poisoned, return the address
// of the first such byte. Otherwise return 0.
uptr __asan_region_is_poisoned(uptr beg, uptr size)
SANITIZER_INTERFACE_ATTRIBUTE;
// Print the description of addr (useful when debugging in gdb).
void __asan_describe_address(uptr addr)
SANITIZER_INTERFACE_ATTRIBUTE;
// This is an internal function that is called to report an error. // This is an internal function that is called to report an error.
// However it is still a part of the interface because users may want to // However it is still a part of the interface because users may want to
// set a breakpoint on this function in a debugger. // set a breakpoint on this function in a debugger.
...@@ -138,7 +147,7 @@ extern "C" { ...@@ -138,7 +147,7 @@ extern "C" {
// User may provide function that would be called right when ASan detects // User may provide function that would be called right when ASan detects
// an error. This can be used to notice cases when ASan detects an error, but // an error. This can be used to notice cases when ASan detects an error, but
// the program crashes before ASan report is printed. // the program crashes before ASan report is printed.
void __asan_on_error() /* OPTIONAL */ void __asan_on_error()
SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE; SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE;
// User may provide its own implementation for symbolization function. // User may provide its own implementation for symbolization function.
...@@ -146,7 +155,8 @@ extern "C" { ...@@ -146,7 +155,8 @@ extern "C" {
// "out_buffer". Description should be at most "out_size" bytes long. // "out_buffer". Description should be at most "out_size" bytes long.
// User-specified function should return true if symbolization was // User-specified function should return true if symbolization was
// successful. // successful.
bool __asan_symbolize(const void *pc, char *out_buffer, int out_size) /* OPTIONAL */ bool __asan_symbolize(const void *pc, char *out_buffer,
int out_size)
SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE; SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE;
// Returns the estimated number of bytes that will be reserved by allocator // Returns the estimated number of bytes that will be reserved by allocator
...@@ -186,20 +196,19 @@ extern "C" { ...@@ -186,20 +196,19 @@ extern "C" {
void __asan_print_accumulated_stats() void __asan_print_accumulated_stats()
SANITIZER_INTERFACE_ATTRIBUTE; SANITIZER_INTERFACE_ATTRIBUTE;
// This function may be overriden by user to provide a string containing // This function may be optionally provided by user and should return
// ASan runtime options. See asan_flags.h for details. // a string containing ASan runtime options. See asan_flags.h for details.
const char* __asan_default_options() /* OPTIONAL */ const char* __asan_default_options()
SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE; SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE;
// Malloc hooks that may be overriden by user. // Malloc hooks that may be optionally provided by user.
// __asan_malloc_hook(ptr, size) is called immediately after // __asan_malloc_hook(ptr, size) is called immediately after
// allocation of "size" bytes, which returned "ptr". // allocation of "size" bytes, which returned "ptr".
// __asan_free_hook(ptr) is called immediately before // __asan_free_hook(ptr) is called immediately before
// deallocation of "ptr". // deallocation of "ptr".
// If user doesn't provide implementations of these hooks, they are no-op. /* OPTIONAL */ void __asan_malloc_hook(void *ptr, uptr size)
void __asan_malloc_hook(void *ptr, uptr size)
SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE; SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE;
void __asan_free_hook(void *ptr) /* OPTIONAL */ void __asan_free_hook(void *ptr)
SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE; SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE;
} // extern "C" } // extern "C"
......
...@@ -28,6 +28,12 @@ ...@@ -28,6 +28,12 @@
# define SANITIZER_WEAK_ATTRIBUTE __attribute__((weak)) # define SANITIZER_WEAK_ATTRIBUTE __attribute__((weak))
#endif #endif
#ifdef __linux__
# define SANITIZER_SUPPORTS_WEAK_HOOKS 1
#else
# define SANITIZER_SUPPORTS_WEAK_HOOKS 0
#endif
// __has_feature // __has_feature
#if !defined(__has_feature) #if !defined(__has_feature)
# define __has_feature(x) 0 # define __has_feature(x) 0
...@@ -73,6 +79,12 @@ extern "C" { ...@@ -73,6 +79,12 @@ extern "C" {
// stderr. // stderr.
void __sanitizer_set_report_fd(int fd) void __sanitizer_set_report_fd(int fd)
SANITIZER_INTERFACE_ATTRIBUTE; SANITIZER_INTERFACE_ATTRIBUTE;
// Notify the tools that the sandbox is going to be turned on. The reserved
// parameter will be used in the future to hold a structure with functions
// that the tools may call to bypass the sandbox.
void __sanitizer_sandbox_on_notify(void *reserved)
SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE;
} // extern "C" } // extern "C"
#endif // SANITIZER_COMMON_INTERFACE_DEFS_H #endif // SANITIZER_COMMON_INTERFACE_DEFS_H
...@@ -17,6 +17,15 @@ ...@@ -17,6 +17,15 @@
# error "Interception doesn't work on this operating system." # error "Interception doesn't work on this operating system."
#endif #endif
#include "sanitizer/common_interface_defs.h"
// These typedefs should be used only in the interceptor definitions to replace
// the standard system types (e.g. SSIZE_T instead of ssize_t)
typedef __sanitizer::uptr SIZE_T;
typedef __sanitizer::sptr SSIZE_T;
typedef __sanitizer::u64 OFF_T;
typedef __sanitizer::u64 OFF64_T;
// How to use this library: // How to use this library:
// 1) Include this header to define your own interceptors // 1) Include this header to define your own interceptors
// (see details below). // (see details below).
......
...@@ -22,6 +22,7 @@ sanitizer_common_files = \ ...@@ -22,6 +22,7 @@ sanitizer_common_files = \
sanitizer_stackdepot.cc \ sanitizer_stackdepot.cc \
sanitizer_stacktrace.cc \ sanitizer_stacktrace.cc \
sanitizer_symbolizer.cc \ sanitizer_symbolizer.cc \
sanitizer_symbolizer_itanium.cc \
sanitizer_symbolizer_linux.cc \ sanitizer_symbolizer_linux.cc \
sanitizer_symbolizer_mac.cc \ sanitizer_symbolizer_mac.cc \
sanitizer_symbolizer_win.cc \ sanitizer_symbolizer_win.cc \
......
...@@ -59,9 +59,9 @@ am__objects_1 = sanitizer_allocator.lo sanitizer_common.lo \ ...@@ -59,9 +59,9 @@ am__objects_1 = sanitizer_allocator.lo sanitizer_common.lo \
sanitizer_flags.lo sanitizer_libc.lo sanitizer_linux.lo \ sanitizer_flags.lo sanitizer_libc.lo sanitizer_linux.lo \
sanitizer_mac.lo sanitizer_posix.lo sanitizer_printf.lo \ sanitizer_mac.lo sanitizer_posix.lo sanitizer_printf.lo \
sanitizer_stackdepot.lo sanitizer_stacktrace.lo \ sanitizer_stackdepot.lo sanitizer_stacktrace.lo \
sanitizer_symbolizer.lo sanitizer_symbolizer_linux.lo \ sanitizer_symbolizer.lo sanitizer_symbolizer_itanium.lo \
sanitizer_symbolizer_mac.lo sanitizer_symbolizer_win.lo \ sanitizer_symbolizer_linux.lo sanitizer_symbolizer_mac.lo \
sanitizer_win.lo sanitizer_symbolizer_win.lo sanitizer_win.lo
am_libsanitizer_common_la_OBJECTS = $(am__objects_1) am_libsanitizer_common_la_OBJECTS = $(am__objects_1)
libsanitizer_common_la_OBJECTS = $(am_libsanitizer_common_la_OBJECTS) libsanitizer_common_la_OBJECTS = $(am_libsanitizer_common_la_OBJECTS)
DEFAULT_INCLUDES = -I.@am__isrc@ DEFAULT_INCLUDES = -I.@am__isrc@
...@@ -236,6 +236,7 @@ sanitizer_common_files = \ ...@@ -236,6 +236,7 @@ sanitizer_common_files = \
sanitizer_stackdepot.cc \ sanitizer_stackdepot.cc \
sanitizer_stacktrace.cc \ sanitizer_stacktrace.cc \
sanitizer_symbolizer.cc \ sanitizer_symbolizer.cc \
sanitizer_symbolizer_itanium.cc \
sanitizer_symbolizer_linux.cc \ sanitizer_symbolizer_linux.cc \
sanitizer_symbolizer_mac.cc \ sanitizer_symbolizer_mac.cc \
sanitizer_symbolizer_win.cc \ sanitizer_symbolizer_win.cc \
...@@ -345,6 +346,7 @@ distclean-compile: ...@@ -345,6 +346,7 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stackdepot.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stackdepot.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stacktrace.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stacktrace.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_itanium.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_linux.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_linux.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_mac.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_mac.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_win.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_win.Plo@am__quote@
......
...@@ -22,9 +22,31 @@ extern "C" void _mm_pause(); ...@@ -22,9 +22,31 @@ extern "C" void _mm_pause();
extern "C" long _InterlockedExchangeAdd( // NOLINT extern "C" long _InterlockedExchangeAdd( // NOLINT
long volatile * Addend, long Value); // NOLINT long volatile * Addend, long Value); // NOLINT
#pragma intrinsic(_InterlockedExchangeAdd) #pragma intrinsic(_InterlockedExchangeAdd)
extern "C" void *InterlockedCompareExchangePointer(
#ifdef _WIN64
extern "C" void *_InterlockedCompareExchangePointer(
void *volatile *Destination, void *volatile *Destination,
void *Exchange, void *Comparand); void *Exchange, void *Comparand);
#pragma intrinsic(_InterlockedCompareExchangePointer)
#else
// There's no _InterlockedCompareExchangePointer intrinsic on x86,
// so call _InterlockedCompareExchange instead.
extern "C"
long __cdecl _InterlockedCompareExchange( // NOLINT
long volatile *Destination, // NOLINT
long Exchange, long Comparand); // NOLINT
#pragma intrinsic(_InterlockedCompareExchange)
inline static void *_InterlockedCompareExchangePointer(
void *volatile *Destination,
void *Exchange, void *Comparand) {
return reinterpret_cast<void*>(
_InterlockedCompareExchange(
reinterpret_cast<long volatile*>(Destination), // NOLINT
reinterpret_cast<long>(Exchange), // NOLINT
reinterpret_cast<long>(Comparand))); // NOLINT
}
#endif
namespace __sanitizer { namespace __sanitizer {
...@@ -113,7 +135,7 @@ INLINE bool atomic_compare_exchange_strong(volatile atomic_uintptr_t *a, ...@@ -113,7 +135,7 @@ INLINE bool atomic_compare_exchange_strong(volatile atomic_uintptr_t *a,
uptr xchg, uptr xchg,
memory_order mo) { memory_order mo) {
uptr cmpv = *cmp; uptr cmpv = *cmp;
uptr prev = (uptr)InterlockedCompareExchangePointer( uptr prev = (uptr)_InterlockedCompareExchangePointer(
(void*volatile*)&a->val_dont_use, (void*)xchg, (void*)cmpv); (void*volatile*)&a->val_dont_use, (void*)xchg, (void*)cmpv);
if (prev == cmpv) if (prev == cmpv)
return true; return true;
......
...@@ -153,6 +153,27 @@ void SortArray(uptr *array, uptr size) { ...@@ -153,6 +153,27 @@ void SortArray(uptr *array, uptr size) {
} }
} }
// We want to map a chunk of address space aligned to 'alignment'.
// We do it by maping a bit more and then unmaping redundant pieces.
// We probably can do it with fewer syscalls in some OS-dependent way.
void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) {
// uptr PageSize = GetPageSizeCached();
CHECK(IsPowerOfTwo(size));
CHECK(IsPowerOfTwo(alignment));
uptr map_size = size + alignment;
uptr map_res = (uptr)MmapOrDie(map_size, mem_type);
uptr map_end = map_res + map_size;
uptr res = map_res;
if (res & (alignment - 1)) // Not aligned.
res = (map_res + alignment) & ~(alignment - 1);
uptr end = res + size;
if (res != map_res)
UnmapOrDie((void*)map_res, res - map_res);
if (end != map_end)
UnmapOrDie((void*)end, map_end - end);
return (void*)res;
}
} // namespace __sanitizer } // namespace __sanitizer
using namespace __sanitizer; // NOLINT using namespace __sanitizer; // NOLINT
...@@ -178,4 +199,9 @@ void __sanitizer_set_report_fd(int fd) { ...@@ -178,4 +199,9 @@ void __sanitizer_set_report_fd(int fd) {
internal_close(report_fd); internal_close(report_fd);
report_fd = fd; report_fd = fd;
} }
void NOINLINE __sanitizer_sandbox_on_notify(void *reserved) {
(void)reserved;
PrepareForSandboxing();
}
} // extern "C" } // extern "C"
...@@ -42,9 +42,13 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top, ...@@ -42,9 +42,13 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
void *MmapOrDie(uptr size, const char *mem_type); void *MmapOrDie(uptr size, const char *mem_type);
void UnmapOrDie(void *addr, uptr size); void UnmapOrDie(void *addr, uptr size);
void *MmapFixedNoReserve(uptr fixed_addr, uptr size); void *MmapFixedNoReserve(uptr fixed_addr, uptr size);
void *MmapFixedOrDie(uptr fixed_addr, uptr size);
void *Mprotect(uptr fixed_addr, uptr size); void *Mprotect(uptr fixed_addr, uptr size);
// Map aligned chunk of address space; size and alignment are powers of two.
void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type);
// Used to check if we can map shadow memory to a fixed location. // Used to check if we can map shadow memory to a fixed location.
bool MemoryRangeIsAvailable(uptr range_start, uptr range_end); bool MemoryRangeIsAvailable(uptr range_start, uptr range_end);
void FlushUnneededShadowMemory(uptr addr, uptr size);
// Internal allocator // Internal allocator
void *InternalAlloc(uptr size); void *InternalAlloc(uptr size);
...@@ -119,6 +123,7 @@ const char *GetPwd(); ...@@ -119,6 +123,7 @@ const char *GetPwd();
void ReExec(); void ReExec();
bool StackSizeIsUnlimited(); bool StackSizeIsUnlimited();
void SetStackSizeLimitInBytes(uptr limit); void SetStackSizeLimitInBytes(uptr limit);
void PrepareForSandboxing();
// Other // Other
void SleepForSeconds(int seconds); void SleepForSeconds(int seconds);
...@@ -133,6 +138,13 @@ void NORETURN Die(); ...@@ -133,6 +138,13 @@ void NORETURN Die();
void NORETURN SANITIZER_INTERFACE_ATTRIBUTE void NORETURN SANITIZER_INTERFACE_ATTRIBUTE
CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2); CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2);
// Set the name of the current thread to 'name', return true on succees.
// The name may be truncated to a system-dependent limit.
bool SanitizerSetThreadName(const char *name);
// Get the name of the current thread (no more than max_len bytes),
// return true on succees. name should have space for at least max_len+1 bytes.
bool SanitizerGetThreadName(char *name, int max_len);
// Specific tools may override behavior of "Die" and "CheckFailed" functions // Specific tools may override behavior of "Die" and "CheckFailed" functions
// to do tool-specific job. // to do tool-specific job.
void SetDieCallback(void (*callback)(void)); void SetDieCallback(void (*callback)(void));
...@@ -148,6 +160,12 @@ INLINE uptr RoundUpTo(uptr size, uptr boundary) { ...@@ -148,6 +160,12 @@ INLINE uptr RoundUpTo(uptr size, uptr boundary) {
CHECK(IsPowerOfTwo(boundary)); CHECK(IsPowerOfTwo(boundary));
return (size + boundary - 1) & ~(boundary - 1); return (size + boundary - 1) & ~(boundary - 1);
} }
INLINE uptr RoundDownTo(uptr x, uptr boundary) {
return x & ~(boundary - 1);
}
INLINE bool IsAligned(uptr a, uptr alignment) {
return (a & (alignment - 1)) == 0;
}
// Don't use std::min, std::max or std::swap, to minimize dependency // Don't use std::min, std::max or std::swap, to minimize dependency
// on libstdc++. // on libstdc++.
template<class T> T Min(T a, T b) { return a < b ? a : b; } template<class T> T Min(T a, T b) { return a < b ? a : b; }
......
//===-- sanitizer_common_interceptors.h -------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Common function interceptors for tools like AddressSanitizer,
// ThreadSanitizer, MemorySanitizer, etc.
//
// This file should be included into the tool's interceptor file,
// which has to define it's own macros:
// COMMON_INTERCEPTOR_ENTER
// COMMON_INTERCEPTOR_READ_RANGE
// COMMON_INTERCEPTOR_WRITE_RANGE
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_COMMON_INTERCEPTORS_H
#define SANITIZER_COMMON_INTERCEPTORS_H
#include "interception/interception.h"
#include "sanitizer_platform_interceptors.h"
#if SANITIZER_INTERCEPT_READ
INTERCEPTOR(SSIZE_T, read, int fd, void *ptr, SIZE_T count) {
COMMON_INTERCEPTOR_ENTER(read, fd, ptr, count);
SSIZE_T res = REAL(read)(fd, ptr, count);
if (res > 0)
COMMON_INTERCEPTOR_WRITE_RANGE(ptr, res);
return res;
}
#endif
#if SANITIZER_INTERCEPT_PREAD
INTERCEPTOR(SSIZE_T, pread, int fd, void *ptr, SIZE_T count, OFF_T offset) {
COMMON_INTERCEPTOR_ENTER(pread, fd, ptr, count, offset);
SSIZE_T res = REAL(pread)(fd, ptr, count, offset);
if (res > 0)
COMMON_INTERCEPTOR_WRITE_RANGE(ptr, res);
return res;
}
#endif
#if SANITIZER_INTERCEPT_PREAD64
INTERCEPTOR(SSIZE_T, pread64, int fd, void *ptr, SIZE_T count, OFF64_T offset) {
COMMON_INTERCEPTOR_ENTER(pread64, fd, ptr, count, offset);
SSIZE_T res = REAL(pread64)(fd, ptr, count, offset);
if (res > 0)
COMMON_INTERCEPTOR_WRITE_RANGE(ptr, res);
return res;
}
#endif
#if SANITIZER_INTERCEPT_READ
# define INIT_READ INTERCEPT_FUNCTION(read)
#else
# define INIT_READ
#endif
#if SANITIZER_INTERCEPT_PREAD
# define INIT_PREAD INTERCEPT_FUNCTION(pread)
#else
# define INIT_PREAD
#endif
#if SANITIZER_INTERCEPT_PREAD64
# define INIT_PREAD64 INTERCEPT_FUNCTION(pread64)
#else
# define INIT_PREAD64
#endif
#define SANITIZER_COMMON_INTERCEPTORS_INIT \
INIT_READ; \
INIT_PREAD; \
INIT_PREAD64; \
#endif // SANITIZER_COMMON_INTERCEPTORS_H
...@@ -203,4 +203,23 @@ s64 internal_simple_strtoll(const char *nptr, char **endptr, int base) { ...@@ -203,4 +203,23 @@ s64 internal_simple_strtoll(const char *nptr, char **endptr, int base) {
} }
} }
bool mem_is_zero(const char *beg, uptr size) {
CHECK_LE(size, 1UL << FIRST_32_SECOND_64(30, 40)); // Sanity check.
const char *end = beg + size;
uptr *aligned_beg = (uptr *)RoundUpTo((uptr)beg, sizeof(uptr));
uptr *aligned_end = (uptr *)RoundDownTo((uptr)end, sizeof(uptr));
uptr all = 0;
// Prologue.
for (const char *mem = beg; mem < (char*)aligned_beg && mem < end; mem++)
all |= *mem;
// Aligned loop.
for (; aligned_beg < aligned_end; aligned_beg++)
all |= *aligned_beg;
// Epilogue.
if ((char*)aligned_end >= beg)
for (const char *mem = (char*)aligned_end; mem < end; mem++)
all |= *mem;
return all == 0;
}
} // namespace __sanitizer } // namespace __sanitizer
...@@ -45,6 +45,11 @@ char *internal_strstr(const char *haystack, const char *needle); ...@@ -45,6 +45,11 @@ char *internal_strstr(const char *haystack, const char *needle);
// Works only for base=10 and doesn't set errno. // Works only for base=10 and doesn't set errno.
s64 internal_simple_strtoll(const char *nptr, char **endptr, int base); s64 internal_simple_strtoll(const char *nptr, char **endptr, int base);
// Return true if all bytes in [mem, mem+size) are zero.
// Optimized for the case when the result is true.
bool mem_is_zero(const char *mem, uptr size);
// Memory // Memory
void *internal_mmap(void *addr, uptr length, int prot, int flags, void *internal_mmap(void *addr, uptr length, int prot, int flags,
int fd, u64 offset); int fd, u64 offset);
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include "sanitizer_mutex.h" #include "sanitizer_mutex.h"
#include "sanitizer_placement_new.h" #include "sanitizer_placement_new.h"
#include "sanitizer_procmaps.h" #include "sanitizer_procmaps.h"
#include "sanitizer_stacktrace.h"
#include <fcntl.h> #include <fcntl.h>
#include <pthread.h> #include <pthread.h>
...@@ -28,7 +29,9 @@ ...@@ -28,7 +29,9 @@
#include <sys/time.h> #include <sys/time.h>
#include <sys/types.h> #include <sys/types.h>
#include <unistd.h> #include <unistd.h>
#include <unwind.h>
#include <errno.h> #include <errno.h>
#include <sys/prctl.h>
// Are we using 32-bit or 64-bit syscalls? // Are we using 32-bit or 64-bit syscalls?
// x32 (which defines __x86_64__) has SANITIZER_WORDSIZE == 32 // x32 (which defines __x86_64__) has SANITIZER_WORDSIZE == 32
...@@ -215,6 +218,14 @@ void ReExec() { ...@@ -215,6 +218,14 @@ void ReExec() {
execv(argv[0], argv.data()); execv(argv[0], argv.data());
} }
void PrepareForSandboxing() {
// Some kinds of sandboxes may forbid filesystem access, so we won't be able
// to read the file mappings from /proc/self/maps. Luckily, neither the
// process will be able to load additional libraries, so it's fine to use the
// cached mappings.
MemoryMappingLayout::CacheMemoryMappings();
}
// ----------------- sanitizer_procmaps.h // ----------------- sanitizer_procmaps.h
// Linker initialized. // Linker initialized.
ProcSelfMapsBuff MemoryMappingLayout::cached_proc_self_maps_; ProcSelfMapsBuff MemoryMappingLayout::cached_proc_self_maps_;
...@@ -354,6 +365,75 @@ bool MemoryMappingLayout::GetObjectNameAndOffset(uptr addr, uptr *offset, ...@@ -354,6 +365,75 @@ bool MemoryMappingLayout::GetObjectNameAndOffset(uptr addr, uptr *offset,
return IterateForObjectNameAndOffset(addr, offset, filename, filename_size); return IterateForObjectNameAndOffset(addr, offset, filename, filename_size);
} }
bool SanitizerSetThreadName(const char *name) {
return 0 == prctl(PR_SET_NAME, (unsigned long)name, 0, 0, 0); // NOLINT
}
bool SanitizerGetThreadName(char *name, int max_len) {
char buff[17];
if (prctl(PR_GET_NAME, (unsigned long)buff, 0, 0, 0)) // NOLINT
return false;
internal_strncpy(name, buff, max_len);
name[max_len] = 0;
return true;
}
#ifndef SANITIZER_GO
//------------------------- SlowUnwindStack -----------------------------------
#ifdef __arm__
#define UNWIND_STOP _URC_END_OF_STACK
#define UNWIND_CONTINUE _URC_NO_REASON
#else
#define UNWIND_STOP _URC_NORMAL_STOP
#define UNWIND_CONTINUE _URC_NO_REASON
#endif
uptr Unwind_GetIP(struct _Unwind_Context *ctx) {
#ifdef __arm__
uptr val;
_Unwind_VRS_Result res = _Unwind_VRS_Get(ctx, _UVRSC_CORE,
15 /* r15 = PC */, _UVRSD_UINT32, &val);
CHECK(res == _UVRSR_OK && "_Unwind_VRS_Get failed");
// Clear the Thumb bit.
return val & ~(uptr)1;
#else
return _Unwind_GetIP(ctx);
#endif
}
_Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx, void *param) {
StackTrace *b = (StackTrace*)param;
CHECK(b->size < b->max_size);
uptr pc = Unwind_GetIP(ctx);
b->trace[b->size++] = pc;
if (b->size == b->max_size) return UNWIND_STOP;
return UNWIND_CONTINUE;
}
static bool MatchPc(uptr cur_pc, uptr trace_pc) {
return cur_pc - trace_pc <= 64 || trace_pc - cur_pc <= 64;
}
void StackTrace::SlowUnwindStack(uptr pc, uptr max_depth) {
this->size = 0;
this->max_size = max_depth;
if (max_depth > 1) {
_Unwind_Backtrace(Unwind_Trace, this);
// We need to pop a few frames so that pc is on top.
// trace[0] belongs to the current function so we always pop it.
int to_pop = 1;
/**/ if (size > 1 && MatchPc(pc, trace[1])) to_pop = 1;
else if (size > 2 && MatchPc(pc, trace[2])) to_pop = 2;
else if (size > 3 && MatchPc(pc, trace[3])) to_pop = 3;
else if (size > 4 && MatchPc(pc, trace[4])) to_pop = 4;
else if (size > 5 && MatchPc(pc, trace[5])) to_pop = 5;
this->PopStackFrames(to_pop);
}
this->trace[0] = pc;
}
#endif // #ifndef SANITIZER_GO
} // namespace __sanitizer } // namespace __sanitizer
#endif // __linux__ #endif // __linux__
...@@ -124,6 +124,10 @@ void ReExec() { ...@@ -124,6 +124,10 @@ void ReExec() {
UNIMPLEMENTED(); UNIMPLEMENTED();
} }
void PrepareForSandboxing() {
// Nothing here for now.
}
// ----------------- sanitizer_procmaps.h // ----------------- sanitizer_procmaps.h
MemoryMappingLayout::MemoryMappingLayout() { MemoryMappingLayout::MemoryMappingLayout() {
......
//===-- sanitizer_platform_interceptors.h -----------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines macro telling whether sanitizer tools can/should intercept
// given library functions on a given platform.
//
//===----------------------------------------------------------------------===//
#include "sanitizer_internal_defs.h"
#if !defined(_WIN32)
# define SANITIZER_INTERCEPT_READ 1
# define SANITIZER_INTERCEPT_PREAD 1
#else
# define SANITIZER_INTERCEPT_READ 0
# define SANITIZER_INTERCEPT_PREAD 0
#endif
#if defined(__linux__) && !defined(ANDROID)
# define SANITIZER_INTERCEPT_PREAD64 1
#else
# define SANITIZER_INTERCEPT_PREAD64 0
#endif
...@@ -91,6 +91,21 @@ void *MmapFixedNoReserve(uptr fixed_addr, uptr size) { ...@@ -91,6 +91,21 @@ void *MmapFixedNoReserve(uptr fixed_addr, uptr size) {
return p; return p;
} }
void *MmapFixedOrDie(uptr fixed_addr, uptr size) {
uptr PageSize = GetPageSizeCached();
void *p = internal_mmap((void*)(fixed_addr & ~(PageSize - 1)),
RoundUpTo(size, PageSize),
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANON | MAP_FIXED,
-1, 0);
if (p == (void*)-1) {
Report("ERROR: Failed to allocate 0x%zx (%zd) bytes at address %p (%d)\n",
size, size, fixed_addr, errno);
CHECK("unable to mmap" && 0);
}
return p;
}
void *Mprotect(uptr fixed_addr, uptr size) { void *Mprotect(uptr fixed_addr, uptr size) {
return internal_mmap((void*)fixed_addr, size, return internal_mmap((void*)fixed_addr, size,
PROT_NONE, PROT_NONE,
...@@ -98,6 +113,10 @@ void *Mprotect(uptr fixed_addr, uptr size) { ...@@ -98,6 +113,10 @@ void *Mprotect(uptr fixed_addr, uptr size) {
-1, 0); -1, 0);
} }
void FlushUnneededShadowMemory(uptr addr, uptr size) {
madvise((void*)addr, size, MADV_DONTNEED);
}
void *MapFileToMemory(const char *file_name, uptr *buff_size) { void *MapFileToMemory(const char *file_name, uptr *buff_size) {
fd_t fd = internal_open(file_name, false); fd_t fd = internal_open(file_name, false);
CHECK_NE(fd, kInvalidFd); CHECK_NE(fd, kInvalidFd);
......
...@@ -92,7 +92,7 @@ static int AppendPointer(char **buff, const char *buff_end, u64 ptr_value) { ...@@ -92,7 +92,7 @@ static int AppendPointer(char **buff, const char *buff_end, u64 ptr_value) {
int VSNPrintf(char *buff, int buff_length, int VSNPrintf(char *buff, int buff_length,
const char *format, va_list args) { const char *format, va_list args) {
static const char *kPrintfFormatsHelp = static const char *kPrintfFormatsHelp =
"Supported Printf formats: %%(0[0-9]*)?(z|ll)?{d,u,x}; %%p; %%s; %%c\n"; "Supported Printf formats: %(0[0-9]*)?(z|ll)?{d,u,x}; %p; %s; %c\n";
RAW_CHECK(format); RAW_CHECK(format);
RAW_CHECK(buff_length > 0); RAW_CHECK(buff_length > 0);
const char *buff_end = &buff[buff_length - 1]; const char *buff_end = &buff[buff_length - 1];
......
//===-- sanitizer_report_decorator.h ----------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Tags to decorate the sanitizer reports.
// Currently supported tags:
// * None.
// * ANSI color sequences.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_ALLOCATOR_H
#define SANITIZER_ALLOCATOR_H
namespace __sanitizer {
class AnsiColorDecorator {
public:
explicit AnsiColorDecorator(bool use_ansi_colors) : ansi_(use_ansi_colors) { }
const char *Black() { return ansi_ ? "\033[1m\033[30m" : ""; }
const char *Red() { return ansi_ ? "\033[1m\033[31m" : ""; }
const char *Green() { return ansi_ ? "\033[1m\033[32m" : ""; }
const char *Yellow() { return ansi_ ? "\033[1m\033[33m" : ""; }
const char *Blue() { return ansi_ ? "\033[1m\033[34m" : ""; }
const char *Magenta() { return ansi_ ? "\033[1m\033[35m" : ""; }
const char *Cyan() { return ansi_ ? "\033[1m\033[36m" : ""; }
const char *White() { return ansi_ ? "\033[1m\033[37m" : ""; }
const char *Default() { return ansi_ ? "\033[1m\033[0m" : ""; }
private:
bool ansi_;
};
} // namespace __sanitizer
#endif // SANITIZER_ALLOCATOR_H
...@@ -40,6 +40,12 @@ static struct { ...@@ -40,6 +40,12 @@ static struct {
atomic_uint32_t seq[kPartCount]; // Unique id generators. atomic_uint32_t seq[kPartCount]; // Unique id generators.
} depot; } depot;
static StackDepotStats stats;
StackDepotStats *StackDepotGetStats() {
return &stats;
}
static u32 hash(const uptr *stack, uptr size) { static u32 hash(const uptr *stack, uptr size) {
// murmur2 // murmur2
const u32 m = 0x5bd1e995; const u32 m = 0x5bd1e995;
...@@ -75,7 +81,7 @@ static StackDesc *tryallocDesc(uptr memsz) { ...@@ -75,7 +81,7 @@ static StackDesc *tryallocDesc(uptr memsz) {
} }
static StackDesc *allocDesc(uptr size) { static StackDesc *allocDesc(uptr size) {
// Frist, try to allocate optimisitically. // First, try to allocate optimisitically.
uptr memsz = sizeof(StackDesc) + (size - 1) * sizeof(uptr); uptr memsz = sizeof(StackDesc) + (size - 1) * sizeof(uptr);
StackDesc *s = tryallocDesc(memsz); StackDesc *s = tryallocDesc(memsz);
if (s) if (s)
...@@ -91,6 +97,7 @@ static StackDesc *allocDesc(uptr size) { ...@@ -91,6 +97,7 @@ static StackDesc *allocDesc(uptr size) {
if (allocsz < memsz) if (allocsz < memsz)
allocsz = memsz; allocsz = memsz;
uptr mem = (uptr)MmapOrDie(allocsz, "stack depot"); uptr mem = (uptr)MmapOrDie(allocsz, "stack depot");
stats.mapped += allocsz;
atomic_store(&depot.region_end, mem + allocsz, memory_order_release); atomic_store(&depot.region_end, mem + allocsz, memory_order_release);
atomic_store(&depot.region_pos, mem, memory_order_release); atomic_store(&depot.region_pos, mem, memory_order_release);
} }
...@@ -154,6 +161,7 @@ u32 StackDepotPut(const uptr *stack, uptr size) { ...@@ -154,6 +161,7 @@ u32 StackDepotPut(const uptr *stack, uptr size) {
} }
uptr part = (h % kTabSize) / kPartSize; uptr part = (h % kTabSize) / kPartSize;
id = atomic_fetch_add(&depot.seq[part], 1, memory_order_relaxed) + 1; id = atomic_fetch_add(&depot.seq[part], 1, memory_order_relaxed) + 1;
stats.n_uniq_ids++;
CHECK_LT(id, kMaxId); CHECK_LT(id, kMaxId);
id |= part << kPartShift; id |= part << kPartShift;
CHECK_NE(id, 0); CHECK_NE(id, 0);
......
...@@ -22,6 +22,13 @@ u32 StackDepotPut(const uptr *stack, uptr size); ...@@ -22,6 +22,13 @@ u32 StackDepotPut(const uptr *stack, uptr size);
// Retrieves a stored stack trace by the id. // Retrieves a stored stack trace by the id.
const uptr *StackDepotGet(u32 id, uptr *size); const uptr *StackDepotGet(u32 id, uptr *size);
struct StackDepotStats {
uptr n_uniq_ids;
uptr mapped;
};
StackDepotStats *StackDepotGetStats();
} // namespace __sanitizer } // namespace __sanitizer
#endif // SANITIZER_STACKDEPOT_H #endif // SANITIZER_STACKDEPOT_H
...@@ -23,10 +23,7 @@ static const char *StripPathPrefix(const char *filepath, ...@@ -23,10 +23,7 @@ static const char *StripPathPrefix(const char *filepath,
} }
// ----------------------- StackTrace ----------------------------- {{{1 // ----------------------- StackTrace ----------------------------- {{{1
// PCs in stack traces are actually the return addresses, that is, uptr StackTrace::GetPreviousInstructionPc(uptr pc) {
// addresses of the next instructions after the call. That's why we
// decrement them.
static uptr patch_pc(uptr pc) {
#ifdef __arm__ #ifdef __arm__
// Cancel Thumb bit. // Cancel Thumb bit.
pc = pc & (~1); pc = pc & (~1);
...@@ -69,7 +66,9 @@ void StackTrace::PrintStack(const uptr *addr, uptr size, ...@@ -69,7 +66,9 @@ void StackTrace::PrintStack(const uptr *addr, uptr size,
InternalScopedBuffer<AddressInfo> addr_frames(64); InternalScopedBuffer<AddressInfo> addr_frames(64);
uptr frame_num = 0; uptr frame_num = 0;
for (uptr i = 0; i < size && addr[i]; i++) { for (uptr i = 0; i < size && addr[i]; i++) {
uptr pc = patch_pc(addr[i]); // PCs in stack traces are actually the return addresses, that is,
// addresses of the next instructions after the call.
uptr pc = GetPreviousInstructionPc(addr[i]);
uptr addr_frames_num = 0; // The number of stack frames for current uptr addr_frames_num = 0; // The number of stack frames for current
// instruction address. // instruction address.
if (symbolize_callback) { if (symbolize_callback) {
......
...@@ -42,10 +42,12 @@ struct StackTrace { ...@@ -42,10 +42,12 @@ struct StackTrace {
} }
void FastUnwindStack(uptr pc, uptr bp, uptr stack_top, uptr stack_bottom); void FastUnwindStack(uptr pc, uptr bp, uptr stack_top, uptr stack_bottom);
void SlowUnwindStack(uptr pc, uptr max_depth);
void PopStackFrames(uptr count); void PopStackFrames(uptr count);
static uptr GetCurrentPc(); static uptr GetCurrentPc();
static uptr GetPreviousInstructionPc(uptr pc);
static uptr CompressStack(StackTrace *stack, static uptr CompressStack(StackTrace *stack,
u32 *compressed, uptr size); u32 *compressed, uptr size);
......
...@@ -58,6 +58,9 @@ struct AddressInfo { ...@@ -58,6 +58,9 @@ struct AddressInfo {
uptr SymbolizeCode(uptr address, AddressInfo *frames, uptr max_frames); uptr SymbolizeCode(uptr address, AddressInfo *frames, uptr max_frames);
bool SymbolizeData(uptr address, AddressInfo *frame); bool SymbolizeData(uptr address, AddressInfo *frame);
// Attempts to demangle the provided C++ mangled name.
const char *Demangle(const char *Name);
// Starts external symbolizer program in a subprocess. Sanitizer communicates // Starts external symbolizer program in a subprocess. Sanitizer communicates
// with external symbolizer via pipes. // with external symbolizer via pipes.
bool InitializeExternalSymbolizer(const char *path_to_symbolizer); bool InitializeExternalSymbolizer(const char *path_to_symbolizer);
......
//===-- sanitizer_symbolizer_itanium.cc -----------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is shared between the sanitizer run-time libraries.
// Itanium C++ ABI-specific implementation of symbolizer parts.
//===----------------------------------------------------------------------===//
#if defined(__APPLE__) || defined(__linux__)
#include "sanitizer_symbolizer.h"
#include <stdlib.h>
// C++ demangling function, as required by Itanium C++ ABI. This is weak,
// because we do not require a C++ ABI library to be linked to a program
// using sanitizers; if it's not present, we'll just use the mangled name.
namespace __cxxabiv1 {
extern "C" char *__cxa_demangle(const char *mangled, char *buffer,
size_t *length, int *status)
SANITIZER_WEAK_ATTRIBUTE;
}
const char *__sanitizer::Demangle(const char *MangledName) {
// FIXME: __cxa_demangle aggressively insists on allocating memory.
// There's not much we can do about that, short of providing our
// own demangler (libc++abi's implementation could be adapted so that
// it does not allocate). For now, we just call it anyway, and we leak
// the returned value.
if (__cxxabiv1::__cxa_demangle)
if (const char *Demangled =
__cxxabiv1::__cxa_demangle(MangledName, 0, 0, 0))
return Demangled;
return MangledName;
}
#endif // __APPLE__ || __linux__
...@@ -26,6 +26,10 @@ uptr GetListOfModules(LoadedModule *modules, uptr max_modules) { ...@@ -26,6 +26,10 @@ uptr GetListOfModules(LoadedModule *modules, uptr max_modules) {
UNIMPLEMENTED(); UNIMPLEMENTED();
}; };
const char *Demangle(const char *MangledName) {
return MangledName;
}
} // namespace __sanitizer } // namespace __sanitizer
#endif // _WIN32 #endif // _WIN32
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#define WIN32_LEAN_AND_MEAN #define WIN32_LEAN_AND_MEAN
#define NOGDI #define NOGDI
#include <stdlib.h> #include <stdlib.h>
#include <io.h>
#include <windows.h> #include <windows.h>
#include "sanitizer_common.h" #include "sanitizer_common.h"
...@@ -73,6 +74,8 @@ void UnmapOrDie(void *addr, uptr size) { ...@@ -73,6 +74,8 @@ void UnmapOrDie(void *addr, uptr size) {
} }
void *MmapFixedNoReserve(uptr fixed_addr, uptr size) { void *MmapFixedNoReserve(uptr fixed_addr, uptr size) {
// FIXME: is this really "NoReserve"? On Win32 this does not matter much,
// but on Win64 it does.
void *p = VirtualAlloc((LPVOID)fixed_addr, size, void *p = VirtualAlloc((LPVOID)fixed_addr, size,
MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
if (p == 0) if (p == 0)
...@@ -81,6 +84,10 @@ void *MmapFixedNoReserve(uptr fixed_addr, uptr size) { ...@@ -81,6 +84,10 @@ void *MmapFixedNoReserve(uptr fixed_addr, uptr size) {
return p; return p;
} }
void *MmapFixedOrDie(uptr fixed_addr, uptr size) {
return MmapFixedNoReserve(fixed_addr, size);
}
void *Mprotect(uptr fixed_addr, uptr size) { void *Mprotect(uptr fixed_addr, uptr size) {
return VirtualAlloc((LPVOID)fixed_addr, size, return VirtualAlloc((LPVOID)fixed_addr, size,
MEM_RESERVE | MEM_COMMIT, PAGE_NOACCESS); MEM_RESERVE | MEM_COMMIT, PAGE_NOACCESS);
...@@ -127,6 +134,10 @@ void ReExec() { ...@@ -127,6 +134,10 @@ void ReExec() {
UNIMPLEMENTED(); UNIMPLEMENTED();
} }
void PrepareForSandboxing() {
// Nothing here for now.
}
bool StackSizeIsUnlimited() { bool StackSizeIsUnlimited() {
UNIMPLEMENTED(); UNIMPLEMENTED();
} }
...@@ -173,7 +184,7 @@ int internal_close(fd_t fd) { ...@@ -173,7 +184,7 @@ int internal_close(fd_t fd) {
} }
int internal_isatty(fd_t fd) { int internal_isatty(fd_t fd) {
UNIMPLEMENTED(); return _isatty(fd);
} }
fd_t internal_open(const char *filename, bool write) { fd_t internal_open(const char *filename, bool write) {
......
...@@ -31,6 +31,9 @@ tsan_files = \ ...@@ -31,6 +31,9 @@ tsan_files = \
tsan_interface_ann.cc \ tsan_interface_ann.cc \
tsan_mman.cc \ tsan_mman.cc \
tsan_rtl_report.cc \ tsan_rtl_report.cc \
tsan_fd.cc \
tsan_interface_java.cc \
tsan_mutexset.cc \
tsan_symbolize_addr2line_linux.cc tsan_symbolize_addr2line_linux.cc
libtsan_la_SOURCES = $(tsan_files) libtsan_la_SOURCES = $(tsan_files)
......
...@@ -87,7 +87,8 @@ am__objects_1 = tsan_clock.lo tsan_interface_atomic.lo tsan_mutex.lo \ ...@@ -87,7 +87,8 @@ am__objects_1 = tsan_clock.lo tsan_interface_atomic.lo tsan_mutex.lo \
tsan_rtl.lo tsan_stat.lo tsan_sync.lo tsan_interceptors.lo \ tsan_rtl.lo tsan_stat.lo tsan_sync.lo tsan_interceptors.lo \
tsan_md5.lo tsan_platform_mac.lo tsan_rtl_mutex.lo \ tsan_md5.lo tsan_platform_mac.lo tsan_rtl_mutex.lo \
tsan_suppressions.lo tsan_interface_ann.lo tsan_mman.lo \ tsan_suppressions.lo tsan_interface_ann.lo tsan_mman.lo \
tsan_rtl_report.lo tsan_symbolize_addr2line_linux.lo tsan_rtl_report.lo tsan_fd.lo tsan_interface_java.lo \
tsan_mutexset.lo tsan_symbolize_addr2line_linux.lo
am_libtsan_la_OBJECTS = $(am__objects_1) am_libtsan_la_OBJECTS = $(am__objects_1)
libtsan_la_OBJECTS = $(am_libtsan_la_OBJECTS) libtsan_la_OBJECTS = $(am_libtsan_la_OBJECTS)
libtsan_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ libtsan_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \
...@@ -273,6 +274,9 @@ tsan_files = \ ...@@ -273,6 +274,9 @@ tsan_files = \
tsan_interface_ann.cc \ tsan_interface_ann.cc \
tsan_mman.cc \ tsan_mman.cc \
tsan_rtl_report.cc \ tsan_rtl_report.cc \
tsan_fd.cc \
tsan_interface_java.cc \
tsan_mutexset.cc \
tsan_symbolize_addr2line_linux.cc tsan_symbolize_addr2line_linux.cc
libtsan_la_SOURCES = $(tsan_files) libtsan_la_SOURCES = $(tsan_files)
...@@ -393,14 +397,17 @@ distclean-compile: ...@@ -393,14 +397,17 @@ distclean-compile:
-rm -f *.tab.c -rm -f *.tab.c
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_clock.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_clock.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_fd.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_flags.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_flags.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_interceptors.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_interceptors.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_interface.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_interface.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_interface_ann.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_interface_ann.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_interface_atomic.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_interface_atomic.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_interface_java.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_md5.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_md5.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_mman.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_mman.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_mutex.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_mutex.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_mutexset.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_platform_linux.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_platform_linux.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_platform_mac.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_platform_mac.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_report.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_report.Plo@am__quote@
......
...@@ -137,6 +137,12 @@ T RoundDown(T p, u64 align) { ...@@ -137,6 +137,12 @@ T RoundDown(T p, u64 align) {
return (T)((u64)p & ~(align - 1)); return (T)((u64)p & ~(align - 1));
} }
// Zeroizes high part, returns 'bits' lsb bits.
template<typename T>
T GetLsb(T v, int bits) {
return (T)((u64)v & ((1ull << bits) - 1));
}
struct MD5Hash { struct MD5Hash {
u64 hash[2]; u64 hash[2];
bool operator==(const MD5Hash &other) const; bool operator==(const MD5Hash &other) const;
......
//===-- tsan_fd.cc --------------------------------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//
#include "tsan_fd.h"
#include "tsan_rtl.h"
#include <sanitizer_common/sanitizer_atomic.h>
namespace __tsan {
const int kTableSizeL1 = 1024;
const int kTableSizeL2 = 1024;
const int kTableSize = kTableSizeL1 * kTableSizeL2;
struct FdSync {
atomic_uint64_t rc;
};
struct FdDesc {
FdSync *sync;
int creation_tid;
u32 creation_stack;
};
struct FdContext {
atomic_uintptr_t tab[kTableSizeL1];
// Addresses used for synchronization.
FdSync globsync;
FdSync filesync;
FdSync socksync;
u64 connectsync;
};
static FdContext fdctx;
static FdSync *allocsync() {
FdSync *s = (FdSync*)internal_alloc(MBlockFD, sizeof(FdSync));
atomic_store(&s->rc, 1, memory_order_relaxed);
return s;
}
static FdSync *ref(FdSync *s) {
if (s && atomic_load(&s->rc, memory_order_relaxed) != (u64)-1)
atomic_fetch_add(&s->rc, 1, memory_order_relaxed);
return s;
}
static void unref(ThreadState *thr, uptr pc, FdSync *s) {
if (s && atomic_load(&s->rc, memory_order_relaxed) != (u64)-1) {
if (atomic_fetch_sub(&s->rc, 1, memory_order_acq_rel) == 1) {
CHECK_NE(s, &fdctx.globsync);
CHECK_NE(s, &fdctx.filesync);
CHECK_NE(s, &fdctx.socksync);
SyncVar *v = CTX()->synctab.GetAndRemove(thr, pc, (uptr)s);
if (v)
DestroyAndFree(v);
internal_free(s);
}
}
}
static FdDesc *fddesc(ThreadState *thr, uptr pc, int fd) {
CHECK_LT(fd, kTableSize);
atomic_uintptr_t *pl1 = &fdctx.tab[fd / kTableSizeL2];
uptr l1 = atomic_load(pl1, memory_order_consume);
if (l1 == 0) {
uptr size = kTableSizeL2 * sizeof(FdDesc);
void *p = internal_alloc(MBlockFD, size);
internal_memset(p, 0, size);
MemoryResetRange(thr, (uptr)&fddesc, (uptr)p, size);
if (atomic_compare_exchange_strong(pl1, &l1, (uptr)p, memory_order_acq_rel))
l1 = (uptr)p;
else
internal_free(p);
}
return &((FdDesc*)l1)[fd % kTableSizeL2]; // NOLINT
}
// pd must be already ref'ed.
static void init(ThreadState *thr, uptr pc, int fd, FdSync *s) {
FdDesc *d = fddesc(thr, pc, fd);
// As a matter of fact, we don't intercept all close calls.
// See e.g. libc __res_iclose().
if (d->sync) {
unref(thr, pc, d->sync);
d->sync = 0;
}
if (flags()->io_sync == 0) {
unref(thr, pc, s);
} else if (flags()->io_sync == 1) {
d->sync = s;
} else if (flags()->io_sync == 2) {
unref(thr, pc, s);
d->sync = &fdctx.globsync;
}
d->creation_tid = thr->tid;
d->creation_stack = CurrentStackId(thr, pc);
// To catch races between fd usage and open.
MemoryRangeImitateWrite(thr, pc, (uptr)d, 8);
}
void FdInit() {
atomic_store(&fdctx.globsync.rc, (u64)-1, memory_order_relaxed);
atomic_store(&fdctx.filesync.rc, (u64)-1, memory_order_relaxed);
atomic_store(&fdctx.socksync.rc, (u64)-1, memory_order_relaxed);
}
void FdOnFork(ThreadState *thr, uptr pc) {
// On fork() we need to reset all fd's, because the child is going
// close all them, and that will cause races between previous read/write
// and the close.
for (int l1 = 0; l1 < kTableSizeL1; l1++) {
FdDesc *tab = (FdDesc*)atomic_load(&fdctx.tab[l1], memory_order_relaxed);
if (tab == 0)
break;
for (int l2 = 0; l2 < kTableSizeL2; l2++) {
FdDesc *d = &tab[l2];
MemoryResetRange(thr, pc, (uptr)d, 8);
}
}
}
bool FdLocation(uptr addr, int *fd, int *tid, u32 *stack) {
for (int l1 = 0; l1 < kTableSizeL1; l1++) {
FdDesc *tab = (FdDesc*)atomic_load(&fdctx.tab[l1], memory_order_relaxed);
if (tab == 0)
break;
if (addr >= (uptr)tab && addr < (uptr)(tab + kTableSizeL2)) {
int l2 = (addr - (uptr)tab) / sizeof(FdDesc);
FdDesc *d = &tab[l2];
*fd = l1 * kTableSizeL1 + l2;
*tid = d->creation_tid;
*stack = d->creation_stack;
return true;
}
}
return false;
}
void FdAcquire(ThreadState *thr, uptr pc, int fd) {
FdDesc *d = fddesc(thr, pc, fd);
FdSync *s = d->sync;
DPrintf("#%d: FdAcquire(%d) -> %p\n", thr->tid, fd, s);
MemoryRead8Byte(thr, pc, (uptr)d);
if (s)
Acquire(thr, pc, (uptr)s);
}
void FdRelease(ThreadState *thr, uptr pc, int fd) {
FdDesc *d = fddesc(thr, pc, fd);
FdSync *s = d->sync;
DPrintf("#%d: FdRelease(%d) -> %p\n", thr->tid, fd, s);
if (s)
Release(thr, pc, (uptr)s);
MemoryRead8Byte(thr, pc, (uptr)d);
}
void FdClose(ThreadState *thr, uptr pc, int fd) {
DPrintf("#%d: FdClose(%d)\n", thr->tid, fd);
FdDesc *d = fddesc(thr, pc, fd);
// To catch races between fd usage and close.
MemoryWrite8Byte(thr, pc, (uptr)d);
// We need to clear it, because if we do not intercept any call out there
// that creates fd, we will hit false postives.
MemoryResetRange(thr, pc, (uptr)d, 8);
unref(thr, pc, d->sync);
d->sync = 0;
d->creation_tid = 0;
d->creation_stack = 0;
}
void FdFileCreate(ThreadState *thr, uptr pc, int fd) {
DPrintf("#%d: FdFileCreate(%d)\n", thr->tid, fd);
init(thr, pc, fd, &fdctx.filesync);
}
void FdDup(ThreadState *thr, uptr pc, int oldfd, int newfd) {
DPrintf("#%d: FdDup(%d, %d)\n", thr->tid, oldfd, newfd);
// Ignore the case when user dups not yet connected socket.
FdDesc *od = fddesc(thr, pc, oldfd);
MemoryRead8Byte(thr, pc, (uptr)od);
FdClose(thr, pc, newfd);
init(thr, pc, newfd, ref(od->sync));
}
void FdPipeCreate(ThreadState *thr, uptr pc, int rfd, int wfd) {
DPrintf("#%d: FdCreatePipe(%d, %d)\n", thr->tid, rfd, wfd);
FdSync *s = allocsync();
init(thr, pc, rfd, ref(s));
init(thr, pc, wfd, ref(s));
unref(thr, pc, s);
}
void FdEventCreate(ThreadState *thr, uptr pc, int fd) {
DPrintf("#%d: FdEventCreate(%d)\n", thr->tid, fd);
init(thr, pc, fd, allocsync());
}
void FdSignalCreate(ThreadState *thr, uptr pc, int fd) {
DPrintf("#%d: FdSignalCreate(%d)\n", thr->tid, fd);
init(thr, pc, fd, 0);
}
void FdInotifyCreate(ThreadState *thr, uptr pc, int fd) {
DPrintf("#%d: FdInotifyCreate(%d)\n", thr->tid, fd);
init(thr, pc, fd, 0);
}
void FdPollCreate(ThreadState *thr, uptr pc, int fd) {
DPrintf("#%d: FdPollCreate(%d)\n", thr->tid, fd);
init(thr, pc, fd, allocsync());
}
void FdSocketCreate(ThreadState *thr, uptr pc, int fd) {
DPrintf("#%d: FdSocketCreate(%d)\n", thr->tid, fd);
// It can be a UDP socket.
init(thr, pc, fd, &fdctx.socksync);
}
void FdSocketAccept(ThreadState *thr, uptr pc, int fd, int newfd) {
DPrintf("#%d: FdSocketAccept(%d, %d)\n", thr->tid, fd, newfd);
// Synchronize connect->accept.
Acquire(thr, pc, (uptr)&fdctx.connectsync);
init(thr, pc, newfd, &fdctx.socksync);
}
void FdSocketConnecting(ThreadState *thr, uptr pc, int fd) {
DPrintf("#%d: FdSocketConnecting(%d)\n", thr->tid, fd);
// Synchronize connect->accept.
Release(thr, pc, (uptr)&fdctx.connectsync);
}
void FdSocketConnect(ThreadState *thr, uptr pc, int fd) {
DPrintf("#%d: FdSocketConnect(%d)\n", thr->tid, fd);
init(thr, pc, fd, &fdctx.socksync);
}
uptr File2addr(char *path) {
(void)path;
static u64 addr;
return (uptr)&addr;
}
uptr Dir2addr(char *path) {
(void)path;
static u64 addr;
return (uptr)&addr;
}
} // namespace __tsan
//===-- tsan_fd.h -----------------------------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
// This file handles synchronization via IO.
// People use IO for synchronization along the lines of:
//
// int X;
// int client_socket; // initialized elsewhere
// int server_socket; // initialized elsewhere
//
// Thread 1:
// X = 42;
// send(client_socket, ...);
//
// Thread 2:
// if (recv(server_socket, ...) > 0)
// assert(X == 42);
//
// This file determines the scope of the file descriptor (pipe, socket,
// all local files, etc) and executes acquire and release operations on
// the scope as necessary. Some scopes are very fine grained (e.g. pipe
// operations synchronize only with operations on the same pipe), while
// others are corse-grained (e.g. all operations on local files synchronize
// with each other).
//===----------------------------------------------------------------------===//
#ifndef TSAN_FD_H
#define TSAN_FD_H
#include "tsan_rtl.h"
namespace __tsan {
void FdInit();
void FdAcquire(ThreadState *thr, uptr pc, int fd);
void FdRelease(ThreadState *thr, uptr pc, int fd);
void FdClose(ThreadState *thr, uptr pc, int fd);
void FdFileCreate(ThreadState *thr, uptr pc, int fd);
void FdDup(ThreadState *thr, uptr pc, int oldfd, int newfd);
void FdPipeCreate(ThreadState *thr, uptr pc, int rfd, int wfd);
void FdEventCreate(ThreadState *thr, uptr pc, int fd);
void FdSignalCreate(ThreadState *thr, uptr pc, int fd);
void FdInotifyCreate(ThreadState *thr, uptr pc, int fd);
void FdPollCreate(ThreadState *thr, uptr pc, int fd);
void FdSocketCreate(ThreadState *thr, uptr pc, int fd);
void FdSocketAccept(ThreadState *thr, uptr pc, int fd, int newfd);
void FdSocketConnecting(ThreadState *thr, uptr pc, int fd);
void FdSocketConnect(ThreadState *thr, uptr pc, int fd);
bool FdLocation(uptr addr, int *fd, int *tid, u32 *stack);
void FdOnFork(ThreadState *thr, uptr pc);
uptr File2addr(char *path);
uptr Dir2addr(char *path);
} // namespace __tsan
#endif // TSAN_INTERFACE_H
...@@ -56,6 +56,7 @@ void InitializeFlags(Flags *f, const char *env) { ...@@ -56,6 +56,7 @@ void InitializeFlags(Flags *f, const char *env) {
f->running_on_valgrind = false; f->running_on_valgrind = false;
f->external_symbolizer_path = ""; f->external_symbolizer_path = "";
f->history_size = kGoMode ? 1 : 2; // There are a lot of goroutines in Go. f->history_size = kGoMode ? 1 : 2; // There are a lot of goroutines in Go.
f->io_sync = 1;
// Let a frontend override. // Let a frontend override.
OverrideFlags(f); OverrideFlags(f);
...@@ -81,6 +82,7 @@ void InitializeFlags(Flags *f, const char *env) { ...@@ -81,6 +82,7 @@ void InitializeFlags(Flags *f, const char *env) {
ParseFlag(env, &f->stop_on_start, "stop_on_start"); ParseFlag(env, &f->stop_on_start, "stop_on_start");
ParseFlag(env, &f->external_symbolizer_path, "external_symbolizer_path"); ParseFlag(env, &f->external_symbolizer_path, "external_symbolizer_path");
ParseFlag(env, &f->history_size, "history_size"); ParseFlag(env, &f->history_size, "history_size");
ParseFlag(env, &f->io_sync, "io_sync");
if (!f->report_bugs) { if (!f->report_bugs) {
f->report_thread_leaks = false; f->report_thread_leaks = false;
...@@ -93,6 +95,12 @@ void InitializeFlags(Flags *f, const char *env) { ...@@ -93,6 +95,12 @@ void InitializeFlags(Flags *f, const char *env) {
" (must be [0..7])\n"); " (must be [0..7])\n");
Die(); Die();
} }
if (f->io_sync < 0 || f->io_sync > 2) {
Printf("ThreadSanitizer: incorrect value for io_sync"
" (must be [0..2])\n");
Die();
}
} }
} // namespace __tsan } // namespace __tsan
...@@ -75,6 +75,11 @@ struct Flags { ...@@ -75,6 +75,11 @@ struct Flags {
// the amount of memory accesses, up to history_size=7 that amounts to // the amount of memory accesses, up to history_size=7 that amounts to
// 4M memory accesses. The default value is 2 (128K memory accesses). // 4M memory accesses. The default value is 2 (128K memory accesses).
int history_size; int history_size;
// Controls level of synchronization implied by IO operations.
// 0 - no synchronization
// 1 - reasonable level of synchronization (write->read)
// 2 - global synchronization of all IO operations
int io_sync;
}; };
Flags *flags(); Flags *flags();
......
...@@ -229,7 +229,7 @@ static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a, ...@@ -229,7 +229,7 @@ static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a,
// Assume the access is atomic. // Assume the access is atomic.
if (!IsAcquireOrder(mo) && sizeof(T) <= sizeof(a)) if (!IsAcquireOrder(mo) && sizeof(T) <= sizeof(a))
return *a; return *a;
SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, (uptr)a, false); SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, false);
thr->clock.set(thr->tid, thr->fast_state.epoch()); thr->clock.set(thr->tid, thr->fast_state.epoch());
thr->clock.acquire(&s->clock); thr->clock.acquire(&s->clock);
T v = *a; T v = *a;
...@@ -251,7 +251,7 @@ static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v, ...@@ -251,7 +251,7 @@ static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
return; return;
} }
__sync_synchronize(); __sync_synchronize();
SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, (uptr)a, true); SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
thr->clock.set(thr->tid, thr->fast_state.epoch()); thr->clock.set(thr->tid, thr->fast_state.epoch());
thr->clock.ReleaseStore(&s->clock); thr->clock.ReleaseStore(&s->clock);
*a = v; *a = v;
...@@ -263,7 +263,7 @@ static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v, ...@@ -263,7 +263,7 @@ static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
template<typename T, T (*F)(volatile T *v, T op)> template<typename T, T (*F)(volatile T *v, T op)>
static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) { static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, (uptr)a, true); SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
thr->clock.set(thr->tid, thr->fast_state.epoch()); thr->clock.set(thr->tid, thr->fast_state.epoch());
if (IsAcqRelOrder(mo)) if (IsAcqRelOrder(mo))
thr->clock.acq_rel(&s->clock); thr->clock.acq_rel(&s->clock);
...@@ -322,7 +322,7 @@ template<typename T> ...@@ -322,7 +322,7 @@ template<typename T>
static bool AtomicCAS(ThreadState *thr, uptr pc, static bool AtomicCAS(ThreadState *thr, uptr pc,
volatile T *a, T *c, T v, morder mo, morder fmo) { volatile T *a, T *c, T v, morder mo, morder fmo) {
(void)fmo; // Unused because llvm does not pass it yet. (void)fmo; // Unused because llvm does not pass it yet.
SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, (uptr)a, true); SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
thr->clock.set(thr->tid, thr->fast_state.epoch()); thr->clock.set(thr->tid, thr->fast_state.epoch());
if (IsAcqRelOrder(mo)) if (IsAcqRelOrder(mo))
thr->clock.acq_rel(&s->clock); thr->clock.acq_rel(&s->clock);
......
...@@ -26,7 +26,7 @@ typedef long __tsan_atomic64; // NOLINT ...@@ -26,7 +26,7 @@ typedef long __tsan_atomic64; // NOLINT
#if defined(__SIZEOF_INT128__) \ #if defined(__SIZEOF_INT128__) \
|| (__clang_major__ * 100 + __clang_minor__ >= 302) || (__clang_major__ * 100 + __clang_minor__ >= 302)
typedef __int128 __tsan_atomic128; __extension__ typedef __int128 __tsan_atomic128;
#define __TSAN_HAS_INT128 1 #define __TSAN_HAS_INT128 1
#else #else
typedef char __tsan_atomic128; typedef char __tsan_atomic128;
......
//===-- tsan_interface_java.cc --------------------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//
#include "tsan_interface_java.h"
#include "tsan_rtl.h"
#include "tsan_mutex.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_placement_new.h"
using namespace __tsan; // NOLINT
namespace __tsan {
const uptr kHeapShadow = 0x300000000000ull;
const uptr kHeapAlignment = 8;
struct BlockDesc {
bool begin;
Mutex mtx;
SyncVar *head;
BlockDesc()
: mtx(MutexTypeJavaMBlock, StatMtxJavaMBlock)
, head() {
CHECK_EQ(begin, false);
begin = true;
}
~BlockDesc() {
CHECK_EQ(begin, true);
begin = false;
ThreadState *thr = cur_thread();
SyncVar *s = head;
while (s) {
SyncVar *s1 = s->next;
StatInc(thr, StatSyncDestroyed);
s->mtx.Lock();
s->mtx.Unlock();
thr->mset.Remove(s->GetId());
DestroyAndFree(s);
s = s1;
}
}
};
struct JavaContext {
const uptr heap_begin;
const uptr heap_size;
BlockDesc *heap_shadow;
JavaContext(jptr heap_begin, jptr heap_size)
: heap_begin(heap_begin)
, heap_size(heap_size) {
uptr size = heap_size / kHeapAlignment * sizeof(BlockDesc);
heap_shadow = (BlockDesc*)MmapFixedNoReserve(kHeapShadow, size);
if ((uptr)heap_shadow != kHeapShadow) {
Printf("ThreadSanitizer: failed to mmap Java heap shadow\n");
Die();
}
}
};
class ScopedJavaFunc {
public:
ScopedJavaFunc(ThreadState *thr, uptr pc)
: thr_(thr) {
Initialize(thr_);
FuncEntry(thr, pc);
CHECK_EQ(thr_->in_rtl, 0);
thr_->in_rtl++;
}
~ScopedJavaFunc() {
thr_->in_rtl--;
CHECK_EQ(thr_->in_rtl, 0);
FuncExit(thr_);
// FIXME(dvyukov): process pending signals.
}
private:
ThreadState *thr_;
};
static u64 jctx_buf[sizeof(JavaContext) / sizeof(u64) + 1];
static JavaContext *jctx;
static BlockDesc *getblock(uptr addr) {
uptr i = (addr - jctx->heap_begin) / kHeapAlignment;
return &jctx->heap_shadow[i];
}
static uptr USED getmem(BlockDesc *b) {
uptr i = b - jctx->heap_shadow;
uptr p = jctx->heap_begin + i * kHeapAlignment;
CHECK_GE(p, jctx->heap_begin);
CHECK_LT(p, jctx->heap_begin + jctx->heap_size);
return p;
}
static BlockDesc *getblockbegin(uptr addr) {
for (BlockDesc *b = getblock(addr);; b--) {
CHECK_GE(b, jctx->heap_shadow);
if (b->begin)
return b;
}
return 0;
}
SyncVar* GetJavaSync(ThreadState *thr, uptr pc, uptr addr,
bool write_lock, bool create) {
if (jctx == 0 || addr < jctx->heap_begin
|| addr >= jctx->heap_begin + jctx->heap_size)
return 0;
BlockDesc *b = getblockbegin(addr);
DPrintf("#%d: GetJavaSync %p->%p\n", thr->tid, addr, b);
Lock l(&b->mtx);
SyncVar *s = b->head;
for (; s; s = s->next) {
if (s->addr == addr) {
DPrintf("#%d: found existing sync for %p\n", thr->tid, addr);
break;
}
}
if (s == 0 && create) {
DPrintf("#%d: creating new sync for %p\n", thr->tid, addr);
s = CTX()->synctab.Create(thr, pc, addr);
s->next = b->head;
b->head = s;
}
if (s) {
if (write_lock)
s->mtx.Lock();
else
s->mtx.ReadLock();
}
return s;
}
SyncVar* GetAndRemoveJavaSync(ThreadState *thr, uptr pc, uptr addr) {
// We do not destroy Java mutexes other than in __tsan_java_free().
return 0;
}
} // namespace __tsan {
#define SCOPED_JAVA_FUNC(func) \
ThreadState *thr = cur_thread(); \
const uptr caller_pc = GET_CALLER_PC(); \
const uptr pc = (uptr)&func; \
(void)pc; \
ScopedJavaFunc scoped(thr, caller_pc); \
/**/
void __tsan_java_init(jptr heap_begin, jptr heap_size) {
SCOPED_JAVA_FUNC(__tsan_java_init);
DPrintf("#%d: java_init(%p, %p)\n", thr->tid, heap_begin, heap_size);
CHECK_EQ(jctx, 0);
CHECK_GT(heap_begin, 0);
CHECK_GT(heap_size, 0);
CHECK_EQ(heap_begin % kHeapAlignment, 0);
CHECK_EQ(heap_size % kHeapAlignment, 0);
CHECK_LT(heap_begin, heap_begin + heap_size);
jctx = new(jctx_buf) JavaContext(heap_begin, heap_size);
}
int __tsan_java_fini() {
SCOPED_JAVA_FUNC(__tsan_java_fini);
DPrintf("#%d: java_fini()\n", thr->tid);
CHECK_NE(jctx, 0);
// FIXME(dvyukov): this does not call atexit() callbacks.
int status = Finalize(thr);
DPrintf("#%d: java_fini() = %d\n", thr->tid, status);
return status;
}
void __tsan_java_alloc(jptr ptr, jptr size) {
SCOPED_JAVA_FUNC(__tsan_java_alloc);
DPrintf("#%d: java_alloc(%p, %p)\n", thr->tid, ptr, size);
CHECK_NE(jctx, 0);
CHECK_NE(size, 0);
CHECK_EQ(ptr % kHeapAlignment, 0);
CHECK_EQ(size % kHeapAlignment, 0);
CHECK_GE(ptr, jctx->heap_begin);
CHECK_LE(ptr + size, jctx->heap_begin + jctx->heap_size);
BlockDesc *b = getblock(ptr);
new(b) BlockDesc();
}
void __tsan_java_free(jptr ptr, jptr size) {
SCOPED_JAVA_FUNC(__tsan_java_free);
DPrintf("#%d: java_free(%p, %p)\n", thr->tid, ptr, size);
CHECK_NE(jctx, 0);
CHECK_NE(size, 0);
CHECK_EQ(ptr % kHeapAlignment, 0);
CHECK_EQ(size % kHeapAlignment, 0);
CHECK_GE(ptr, jctx->heap_begin);
CHECK_LE(ptr + size, jctx->heap_begin + jctx->heap_size);
BlockDesc *beg = getblock(ptr);
BlockDesc *end = getblock(ptr + size);
for (BlockDesc *b = beg; b != end; b++) {
if (b->begin)
b->~BlockDesc();
}
}
void __tsan_java_move(jptr src, jptr dst, jptr size) {
SCOPED_JAVA_FUNC(__tsan_java_move);
DPrintf("#%d: java_move(%p, %p, %p)\n", thr->tid, src, dst, size);
CHECK_NE(jctx, 0);
CHECK_NE(size, 0);
CHECK_EQ(src % kHeapAlignment, 0);
CHECK_EQ(dst % kHeapAlignment, 0);
CHECK_EQ(size % kHeapAlignment, 0);
CHECK_GE(src, jctx->heap_begin);
CHECK_LE(src + size, jctx->heap_begin + jctx->heap_size);
CHECK_GE(dst, jctx->heap_begin);
CHECK_LE(dst + size, jctx->heap_begin + jctx->heap_size);
CHECK(dst >= src + size || src >= dst + size);
// Assuming it's not running concurrently with threads that do
// memory accesses and mutex operations (stop-the-world phase).
{ // NOLINT
BlockDesc *s = getblock(src);
BlockDesc *d = getblock(dst);
BlockDesc *send = getblock(src + size);
for (; s != send; s++, d++) {
CHECK_EQ(d->begin, false);
if (s->begin) {
DPrintf("#%d: moving block %p->%p\n", thr->tid, getmem(s), getmem(d));
new(d) BlockDesc;
d->head = s->head;
for (SyncVar *sync = d->head; sync; sync = sync->next) {
uptr newaddr = sync->addr - src + dst;
DPrintf("#%d: moving sync %p->%p\n", thr->tid, sync->addr, newaddr);
sync->addr = newaddr;
}
s->head = 0;
s->~BlockDesc();
}
}
}
{ // NOLINT
u64 *s = (u64*)MemToShadow(src);
u64 *d = (u64*)MemToShadow(dst);
u64 *send = (u64*)MemToShadow(src + size);
for (; s != send; s++, d++) {
*d = *s;
*s = 0;
}
}
}
void __tsan_java_mutex_lock(jptr addr) {
SCOPED_JAVA_FUNC(__tsan_java_mutex_lock);
DPrintf("#%d: java_mutex_lock(%p)\n", thr->tid, addr);
CHECK_NE(jctx, 0);
CHECK_GE(addr, jctx->heap_begin);
CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
MutexLock(thr, pc, addr);
}
void __tsan_java_mutex_unlock(jptr addr) {
SCOPED_JAVA_FUNC(__tsan_java_mutex_unlock);
DPrintf("#%d: java_mutex_unlock(%p)\n", thr->tid, addr);
CHECK_NE(jctx, 0);
CHECK_GE(addr, jctx->heap_begin);
CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
MutexUnlock(thr, pc, addr);
}
void __tsan_java_mutex_read_lock(jptr addr) {
SCOPED_JAVA_FUNC(__tsan_java_mutex_read_lock);
DPrintf("#%d: java_mutex_read_lock(%p)\n", thr->tid, addr);
CHECK_NE(jctx, 0);
CHECK_GE(addr, jctx->heap_begin);
CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
MutexReadLock(thr, pc, addr);
}
void __tsan_java_mutex_read_unlock(jptr addr) {
SCOPED_JAVA_FUNC(__tsan_java_mutex_read_unlock);
DPrintf("#%d: java_mutex_read_unlock(%p)\n", thr->tid, addr);
CHECK_NE(jctx, 0);
CHECK_GE(addr, jctx->heap_begin);
CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
MutexReadUnlock(thr, pc, addr);
}
//===-- tsan_interface_java.h -----------------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
// Interface for verification of Java or mixed Java/C++ programs.
// The interface is intended to be used from within a JVM and notify TSan
// about such events like Java locks and GC memory compaction.
//
// For plain memory accesses and function entry/exit a JVM is intended to use
// C++ interfaces: __tsan_readN/writeN and __tsan_func_enter/exit.
//
// For volatile memory accesses and atomic operations JVM is intended to use
// standard atomics API: __tsan_atomicN_load/store/etc.
//
// For usage examples see lit_tests/java_*.cc
//===----------------------------------------------------------------------===//
#ifndef TSAN_INTERFACE_JAVA_H
#define TSAN_INTERFACE_JAVA_H
#ifndef INTERFACE_ATTRIBUTE
# define INTERFACE_ATTRIBUTE __attribute__((visibility("default")))
#endif
#ifdef __cplusplus
extern "C" {
#endif
typedef unsigned long jptr; // NOLINT
// Must be called before any other callback from Java.
void __tsan_java_init(jptr heap_begin, jptr heap_size) INTERFACE_ATTRIBUTE;
// Must be called when the application exits.
// Not necessary the last callback (concurrently running threads are OK).
// Returns exit status or 0 if tsan does not want to override it.
int __tsan_java_fini() INTERFACE_ATTRIBUTE;
// Callback for memory allocations.
// May be omitted for allocations that are not subject to data races
// nor contain synchronization objects (e.g. String).
void __tsan_java_alloc(jptr ptr, jptr size) INTERFACE_ATTRIBUTE;
// Callback for memory free.
// Can be aggregated for several objects (preferably).
void __tsan_java_free(jptr ptr, jptr size) INTERFACE_ATTRIBUTE;
// Callback for memory move by GC.
// Can be aggregated for several objects (preferably).
// The ranges must not overlap.
void __tsan_java_move(jptr src, jptr dst, jptr size) INTERFACE_ATTRIBUTE;
// Mutex lock.
// Addr is any unique address associated with the mutex.
// Must not be called on recursive reentry.
// Object.wait() is handled as a pair of unlock/lock.
void __tsan_java_mutex_lock(jptr addr) INTERFACE_ATTRIBUTE;
// Mutex unlock.
void __tsan_java_mutex_unlock(jptr addr) INTERFACE_ATTRIBUTE;
// Mutex read lock.
void __tsan_java_mutex_read_lock(jptr addr) INTERFACE_ATTRIBUTE;
// Mutex read unlock.
void __tsan_java_mutex_read_unlock(jptr addr) INTERFACE_ATTRIBUTE;
#ifdef __cplusplus
} // extern "C"
#endif
#undef INTERFACE_ATTRIBUTE
#endif // #ifndef TSAN_INTERFACE_JAVA_H
...@@ -58,8 +58,9 @@ void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align) { ...@@ -58,8 +58,9 @@ void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align) {
void *p = allocator()->Allocate(&thr->alloc_cache, sz, align); void *p = allocator()->Allocate(&thr->alloc_cache, sz, align);
if (p == 0) if (p == 0)
return 0; return 0;
MBlock *b = (MBlock*)allocator()->GetMetaData(p); MBlock *b = new(allocator()->GetMetaData(p)) MBlock;
b->size = sz; b->size = sz;
b->head = 0;
b->alloc_tid = thr->unique_id; b->alloc_tid = thr->unique_id;
b->alloc_stack_id = CurrentStackId(thr, pc); b->alloc_stack_id = CurrentStackId(thr, pc);
if (CTX() && CTX()->initialized) { if (CTX() && CTX()->initialized) {
...@@ -90,6 +91,7 @@ void user_free(ThreadState *thr, uptr pc, void *p) { ...@@ -90,6 +91,7 @@ void user_free(ThreadState *thr, uptr pc, void *p) {
if (CTX() && CTX()->initialized && thr->in_rtl == 1) { if (CTX() && CTX()->initialized && thr->in_rtl == 1) {
MemoryRangeFreed(thr, pc, (uptr)p, b->size); MemoryRangeFreed(thr, pc, (uptr)p, b->size);
} }
b->~MBlock();
allocator()->Deallocate(&thr->alloc_cache, p); allocator()->Deallocate(&thr->alloc_cache, p);
SignalUnsafeCall(thr, pc); SignalUnsafeCall(thr, pc);
} }
...@@ -115,9 +117,11 @@ void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) { ...@@ -115,9 +117,11 @@ void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
} }
MBlock *user_mblock(ThreadState *thr, void *p) { MBlock *user_mblock(ThreadState *thr, void *p) {
// CHECK_GT(thr->in_rtl, 0);
CHECK_NE(p, (void*)0); CHECK_NE(p, (void*)0);
return (MBlock*)allocator()->GetMetaData(p); Allocator *a = allocator();
void *b = a->GetBlockBegin(p);
CHECK_NE(b, 0);
return (MBlock*)a->GetMetaData(b);
} }
void invoke_malloc_hook(void *ptr, uptr size) { void invoke_malloc_hook(void *ptr, uptr size) {
......
...@@ -57,6 +57,7 @@ enum MBlockType { ...@@ -57,6 +57,7 @@ enum MBlockType {
MBlockSuppression, MBlockSuppression,
MBlockExpectRace, MBlockExpectRace,
MBlockSignal, MBlockSignal,
MBlockFD,
// This must be the last. // This must be the last.
MBlockTypeCount MBlockTypeCount
......
...@@ -23,22 +23,28 @@ namespace __tsan { ...@@ -23,22 +23,28 @@ namespace __tsan {
// then Report mutex can be locked while under Threads mutex. // then Report mutex can be locked while under Threads mutex.
// The leaf mutexes can be locked under any other mutexes. // The leaf mutexes can be locked under any other mutexes.
// Recursive locking is not supported. // Recursive locking is not supported.
#if TSAN_DEBUG && !TSAN_GO
const MutexType MutexTypeLeaf = (MutexType)-1; const MutexType MutexTypeLeaf = (MutexType)-1;
static MutexType CanLockTab[MutexTypeCount][MutexTypeCount] = { static MutexType CanLockTab[MutexTypeCount][MutexTypeCount] = {
/*0 MutexTypeInvalid*/ {}, /*0 MutexTypeInvalid*/ {},
/*1 MutexTypeTrace*/ {MutexTypeLeaf}, /*1 MutexTypeTrace*/ {MutexTypeLeaf},
/*2 MutexTypeThreads*/ {MutexTypeReport}, /*2 MutexTypeThreads*/ {MutexTypeReport},
/*3 MutexTypeReport*/ {}, /*3 MutexTypeReport*/ {MutexTypeSyncTab, MutexTypeMBlock,
/*4 MutexTypeSyncVar*/ {}, MutexTypeJavaMBlock},
/*5 MutexTypeSyncTab*/ {MutexTypeSyncVar}, /*4 MutexTypeSyncVar*/ {},
/*6 MutexTypeSlab*/ {MutexTypeLeaf}, /*5 MutexTypeSyncTab*/ {MutexTypeSyncVar},
/*7 MutexTypeAnnotations*/ {}, /*6 MutexTypeSlab*/ {MutexTypeLeaf},
/*8 MutexTypeAtExit*/ {MutexTypeSyncTab}, /*7 MutexTypeAnnotations*/ {},
/*8 MutexTypeAtExit*/ {MutexTypeSyncTab},
/*9 MutexTypeMBlock*/ {MutexTypeSyncVar},
/*10 MutexTypeJavaMBlock*/ {MutexTypeSyncVar},
}; };
static bool CanLockAdj[MutexTypeCount][MutexTypeCount]; static bool CanLockAdj[MutexTypeCount][MutexTypeCount];
#endif
void InitializeMutex() { void InitializeMutex() {
#if TSAN_DEBUG && !TSAN_GO
// Build the "can lock" adjacency matrix. // Build the "can lock" adjacency matrix.
// If [i][j]==true, then one can lock mutex j while under mutex i. // If [i][j]==true, then one can lock mutex j while under mutex i.
const int N = MutexTypeCount; const int N = MutexTypeCount;
...@@ -112,14 +118,18 @@ void InitializeMutex() { ...@@ -112,14 +118,18 @@ void InitializeMutex() {
Die(); Die();
} }
} }
#endif
} }
DeadlockDetector::DeadlockDetector() { DeadlockDetector::DeadlockDetector() {
// Rely on zero initialization because some mutexes can be locked before ctor. // Rely on zero initialization because some mutexes can be locked before ctor.
} }
#if TSAN_DEBUG && !TSAN_GO
void DeadlockDetector::Lock(MutexType t) { void DeadlockDetector::Lock(MutexType t) {
// Printf("LOCK %d @%zu\n", t, seq_ + 1); // Printf("LOCK %d @%zu\n", t, seq_ + 1);
CHECK_GT(t, MutexTypeInvalid);
CHECK_LT(t, MutexTypeCount);
u64 max_seq = 0; u64 max_seq = 0;
u64 max_idx = MutexTypeInvalid; u64 max_idx = MutexTypeInvalid;
for (int i = 0; i != MutexTypeCount; i++) { for (int i = 0; i != MutexTypeCount; i++) {
...@@ -148,6 +158,7 @@ void DeadlockDetector::Unlock(MutexType t) { ...@@ -148,6 +158,7 @@ void DeadlockDetector::Unlock(MutexType t) {
CHECK(locked_[t]); CHECK(locked_[t]);
locked_[t] = 0; locked_[t] = 0;
} }
#endif
const uptr kUnlocked = 0; const uptr kUnlocked = 0;
const uptr kWriteLock = 1; const uptr kWriteLock = 1;
......
...@@ -27,6 +27,8 @@ enum MutexType { ...@@ -27,6 +27,8 @@ enum MutexType {
MutexTypeSlab, MutexTypeSlab,
MutexTypeAnnotations, MutexTypeAnnotations,
MutexTypeAtExit, MutexTypeAtExit,
MutexTypeMBlock,
MutexTypeJavaMBlock,
// This must be the last. // This must be the last.
MutexTypeCount MutexTypeCount
......
//===-- tsan_mutexset.cc --------------------------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//
#include "tsan_mutexset.h"
#include "tsan_rtl.h"
namespace __tsan {
const uptr MutexSet::kMaxSize;
MutexSet::MutexSet() {
size_ = 0;
internal_memset(&descs_, 0, sizeof(descs_));
}
void MutexSet::Add(u64 id, bool write, u64 epoch) {
// Look up existing mutex with the same id.
for (uptr i = 0; i < size_; i++) {
if (descs_[i].id == id) {
descs_[i].count++;
descs_[i].epoch = epoch;
return;
}
}
// On overflow, find the oldest mutex and drop it.
if (size_ == kMaxSize) {
u64 minepoch = (u64)-1;
u64 mini = (u64)-1;
for (uptr i = 0; i < size_; i++) {
if (descs_[i].epoch < minepoch) {
minepoch = descs_[i].epoch;
mini = i;
}
}
RemovePos(mini);
CHECK_EQ(size_, kMaxSize - 1);
}
// Add new mutex descriptor.
descs_[size_].id = id;
descs_[size_].write = write;
descs_[size_].epoch = epoch;
descs_[size_].count = 1;
size_++;
}
void MutexSet::Del(u64 id, bool write) {
for (uptr i = 0; i < size_; i++) {
if (descs_[i].id == id) {
if (--descs_[i].count == 0)
RemovePos(i);
return;
}
}
}
void MutexSet::Remove(u64 id) {
for (uptr i = 0; i < size_; i++) {
if (descs_[i].id == id) {
RemovePos(i);
return;
}
}
}
void MutexSet::RemovePos(uptr i) {
CHECK_LT(i, size_);
descs_[i] = descs_[size_ - 1];
size_--;
}
uptr MutexSet::Size() const {
return size_;
}
MutexSet::Desc MutexSet::Get(uptr i) const {
CHECK_LT(i, size_);
return descs_[i];
}
} // namespace __tsan
//===-- tsan_mutexset.h -----------------------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
// MutexSet holds the set of mutexes currently held by a thread.
//===----------------------------------------------------------------------===//
#ifndef TSAN_MUTEXSET_H
#define TSAN_MUTEXSET_H
#include "tsan_defs.h"
namespace __tsan {
class MutexSet {
public:
// Holds limited number of mutexes.
// The oldest mutexes are discarded on overflow.
static const uptr kMaxSize = 64;
struct Desc {
u64 id;
u64 epoch;
int count;
bool write;
};
MutexSet();
// The 'id' is obtained from SyncVar::GetId().
void Add(u64 id, bool write, u64 epoch);
void Del(u64 id, bool write);
void Remove(u64 id); // Removes the mutex completely (if it's destroyed).
uptr Size() const;
Desc Get(uptr i) const;
private:
#ifndef TSAN_GO
uptr size_;
Desc descs_[kMaxSize];
#endif
void RemovePos(uptr i);
};
// Go does not have mutexes, so do not spend memory and time.
// (Go sync.Mutex is actually a semaphore -- can be unlocked
// in different goroutine).
#ifdef TSAN_GO
MutexSet::MutexSet() {}
void MutexSet::Add(u64 id, bool write, u64 epoch) {}
void MutexSet::Del(u64 id, bool write) {}
void MutexSet::Remove(u64 id) {}
void MutexSet::RemovePos(uptr i) {}
uptr MutexSet::Size() const { return 0; }
MutexSet::Desc MutexSet::Get(uptr i) const { return Desc(); }
#endif
} // namespace __tsan
#endif // TSAN_REPORT_H
...@@ -135,7 +135,6 @@ void FlushShadowMemory(); ...@@ -135,7 +135,6 @@ void FlushShadowMemory();
const char *InitializePlatform(); const char *InitializePlatform();
void FinalizePlatform(); void FinalizePlatform();
void MapThreadTrace(uptr addr, uptr size);
uptr ALWAYS_INLINE INLINE GetThreadTrace(int tid) { uptr ALWAYS_INLINE INLINE GetThreadTrace(int tid) {
uptr p = kTraceMemBegin + (uptr)tid * kTraceSize * sizeof(Event); uptr p = kTraceMemBegin + (uptr)tid * kTraceSize * sizeof(Event);
DCHECK_LT(p, kTraceMemBegin + kTraceMemSize); DCHECK_LT(p, kTraceMemBegin + kTraceMemSize);
......
...@@ -69,9 +69,7 @@ uptr GetShadowMemoryConsumption() { ...@@ -69,9 +69,7 @@ uptr GetShadowMemoryConsumption() {
} }
void FlushShadowMemory() { void FlushShadowMemory() {
madvise((void*)kLinuxShadowBeg, FlushUnneededShadowMemory(kLinuxShadowBeg, kLinuxShadowEnd - kLinuxShadowBeg);
kLinuxShadowEnd - kLinuxShadowBeg,
MADV_DONTNEED);
} }
#ifndef TSAN_GO #ifndef TSAN_GO
...@@ -118,16 +116,6 @@ void InitializeShadowMemory() { ...@@ -118,16 +116,6 @@ void InitializeShadowMemory() {
} }
#endif #endif
void MapThreadTrace(uptr addr, uptr size) {
DPrintf("Mapping trace at %p-%p(0x%zx)\n", addr, addr + size, size);
CHECK_GE(addr, kTraceMemBegin);
CHECK_LE(addr + size, kTraceMemBegin + kTraceMemSize);
if (addr != (uptr)MmapFixedNoReserve(addr, size)) {
Printf("FATAL: ThreadSanitizer can not mmap thread trace\n");
Die();
}
}
static uptr g_data_start; static uptr g_data_start;
static uptr g_data_end; static uptr g_data_end;
...@@ -180,18 +168,14 @@ static uptr g_tls_size; ...@@ -180,18 +168,14 @@ static uptr g_tls_size;
#else #else
# define INTERNAL_FUNCTION # define INTERNAL_FUNCTION
#endif #endif
extern "C" void _dl_get_tls_static_info(size_t*, size_t*)
__attribute__((weak)) INTERNAL_FUNCTION;
static int InitTlsSize() { static int InitTlsSize() {
typedef void (*get_tls_func)(size_t*, size_t*) INTERNAL_FUNCTION; typedef void (*get_tls_func)(size_t*, size_t*) INTERNAL_FUNCTION;
get_tls_func get_tls = &_dl_get_tls_static_info; get_tls_func get_tls;
if (get_tls == 0) { void *get_tls_static_info_ptr = dlsym(RTLD_NEXT, "_dl_get_tls_static_info");
void *get_tls_static_info_ptr = dlsym(RTLD_NEXT, "_dl_get_tls_static_info"); CHECK_EQ(sizeof(get_tls), sizeof(get_tls_static_info_ptr));
CHECK_EQ(sizeof(get_tls), sizeof(get_tls_static_info_ptr)); internal_memcpy(&get_tls, &get_tls_static_info_ptr,
internal_memcpy(&get_tls, &get_tls_static_info_ptr, sizeof(get_tls_static_info_ptr));
sizeof(get_tls_static_info_ptr));
}
CHECK_NE(get_tls, 0); CHECK_NE(get_tls, 0);
size_t tls_size = 0; size_t tls_size = 0;
size_t tls_align = 0; size_t tls_align = 0;
...@@ -220,29 +204,35 @@ const char *InitializePlatform() { ...@@ -220,29 +204,35 @@ const char *InitializePlatform() {
// Disable core dumps, dumping of 16TB usually takes a bit long. // Disable core dumps, dumping of 16TB usually takes a bit long.
setlim(RLIMIT_CORE, 0); setlim(RLIMIT_CORE, 0);
} }
bool reexec = false;
// TSan doesn't play well with unlimited stack size (as stack
// overlaps with shadow memory). If we detect unlimited stack size,
// we re-exec the program with limited stack size as a best effort.
if (getlim(RLIMIT_STACK) == (rlim_t)-1) {
const uptr kMaxStackSize = 32 * 1024 * 1024;
Report("WARNING: Program is run with unlimited stack size, which "
"wouldn't work with ThreadSanitizer.\n");
Report("Re-execing with stack size limited to %zd bytes.\n", kMaxStackSize);
SetStackSizeLimitInBytes(kMaxStackSize);
reexec = true;
}
if (getlim(RLIMIT_AS) != (rlim_t)-1) { // Go maps shadow memory lazily and works fine with limited address space.
Report("WARNING: Program is run with limited virtual address space, which " // Unlimited stack is not a problem as well, because the executable
"wouldn't work with ThreadSanitizer.\n"); // is not compiled with -pie.
Report("Re-execing with unlimited virtual address space.\n"); if (kCppMode) {
setlim(RLIMIT_AS, -1); bool reexec = false;
reexec = true; // TSan doesn't play well with unlimited stack size (as stack
} // overlaps with shadow memory). If we detect unlimited stack size,
// we re-exec the program with limited stack size as a best effort.
if (getlim(RLIMIT_STACK) == (rlim_t)-1) {
const uptr kMaxStackSize = 32 * 1024 * 1024;
Report("WARNING: Program is run with unlimited stack size, which "
"wouldn't work with ThreadSanitizer.\n");
Report("Re-execing with stack size limited to %zd bytes.\n",
kMaxStackSize);
SetStackSizeLimitInBytes(kMaxStackSize);
reexec = true;
}
if (reexec) if (getlim(RLIMIT_AS) != (rlim_t)-1) {
ReExec(); Report("WARNING: Program is run with limited virtual address space,"
" which wouldn't work with ThreadSanitizer.\n");
Report("Re-execing with unlimited virtual address space.\n");
setlim(RLIMIT_AS, -1);
reexec = true;
}
if (reexec)
ReExec();
}
#ifndef TSAN_GO #ifndef TSAN_GO
CheckPIE(); CheckPIE();
......
...@@ -23,12 +23,24 @@ ReportDesc::ReportDesc() ...@@ -23,12 +23,24 @@ ReportDesc::ReportDesc()
, sleep() { , sleep() {
} }
ReportMop::ReportMop()
: mset(MBlockReportMutex) {
}
ReportDesc::~ReportDesc() { ReportDesc::~ReportDesc() {
// FIXME(dvyukov): it must be leaking a lot of memory. // FIXME(dvyukov): it must be leaking a lot of memory.
} }
#ifndef TSAN_GO #ifndef TSAN_GO
const int kThreadBufSize = 32;
const char *thread_name(char *buf, int tid) {
if (tid == 0)
return "main thread";
internal_snprintf(buf, kThreadBufSize, "thread T%d", tid);
return buf;
}
static void PrintHeader(ReportType typ) { static void PrintHeader(ReportType typ) {
Printf("WARNING: ThreadSanitizer: "); Printf("WARNING: ThreadSanitizer: ");
...@@ -65,52 +77,69 @@ void PrintStack(const ReportStack *ent) { ...@@ -65,52 +77,69 @@ void PrintStack(const ReportStack *ent) {
Printf("\n"); Printf("\n");
} }
static void PrintMutexSet(Vector<ReportMopMutex> const& mset) {
for (uptr i = 0; i < mset.Size(); i++) {
if (i == 0)
Printf(" (mutexes:");
const ReportMopMutex m = mset[i];
Printf(" %s M%llu", m.write ? "write" : "read", m.id);
Printf(i == mset.Size() - 1 ? ")" : ",");
}
}
static void PrintMop(const ReportMop *mop, bool first) { static void PrintMop(const ReportMop *mop, bool first) {
Printf(" %s of size %d at %p", char thrbuf[kThreadBufSize];
Printf(" %s of size %d at %p by %s",
(first ? (mop->write ? "Write" : "Read") (first ? (mop->write ? "Write" : "Read")
: (mop->write ? "Previous write" : "Previous read")), : (mop->write ? "Previous write" : "Previous read")),
mop->size, (void*)mop->addr); mop->size, (void*)mop->addr,
if (mop->tid == 0) thread_name(thrbuf, mop->tid));
Printf(" by main thread:\n"); PrintMutexSet(mop->mset);
else Printf(":\n");
Printf(" by thread %d:\n", mop->tid);
PrintStack(mop->stack); PrintStack(mop->stack);
} }
static void PrintLocation(const ReportLocation *loc) { static void PrintLocation(const ReportLocation *loc) {
char thrbuf[kThreadBufSize];
if (loc->type == ReportLocationGlobal) { if (loc->type == ReportLocationGlobal) {
Printf(" Location is global '%s' of size %zu at %zx %s:%d (%s+%p)\n\n", Printf(" Location is global '%s' of size %zu at %zx %s:%d (%s+%p)\n\n",
loc->name, loc->size, loc->addr, loc->file, loc->line, loc->name, loc->size, loc->addr, loc->file, loc->line,
loc->module, loc->offset); loc->module, loc->offset);
} else if (loc->type == ReportLocationHeap) { } else if (loc->type == ReportLocationHeap) {
Printf(" Location is heap block of size %zu at %p allocated", char thrbuf[kThreadBufSize];
loc->size, loc->addr); Printf(" Location is heap block of size %zu at %p allocated by %s:\n",
if (loc->tid == 0) loc->size, loc->addr, thread_name(thrbuf, loc->tid));
Printf(" by main thread:\n");
else
Printf(" by thread %d:\n", loc->tid);
PrintStack(loc->stack); PrintStack(loc->stack);
} else if (loc->type == ReportLocationStack) { } else if (loc->type == ReportLocationStack) {
Printf(" Location is stack of thread %d:\n\n", loc->tid); Printf(" Location is stack of %s\n\n", thread_name(thrbuf, loc->tid));
} else if (loc->type == ReportLocationFD) {
Printf(" Location is file descriptor %d created by %s at:\n",
loc->fd, thread_name(thrbuf, loc->tid));
PrintStack(loc->stack);
} }
} }
static void PrintMutex(const ReportMutex *rm) { static void PrintMutex(const ReportMutex *rm) {
if (rm->stack == 0) if (rm->destroyed) {
return; Printf(" Mutex M%llu is already destroyed.\n\n", rm->id);
Printf(" Mutex %d created at:\n", rm->id); } else {
PrintStack(rm->stack); Printf(" Mutex M%llu created at:\n", rm->id);
PrintStack(rm->stack);
}
} }
static void PrintThread(const ReportThread *rt) { static void PrintThread(const ReportThread *rt) {
if (rt->id == 0) // Little sense in describing the main thread. if (rt->id == 0) // Little sense in describing the main thread.
return; return;
Printf(" Thread %d", rt->id); Printf(" Thread T%d", rt->id);
if (rt->name) if (rt->name)
Printf(" '%s'", rt->name); Printf(" '%s'", rt->name);
Printf(" (tid=%zu, %s)", rt->pid, rt->running ? "running" : "finished"); char thrbuf[kThreadBufSize];
Printf(" (tid=%zu, %s) created by %s",
rt->pid, rt->running ? "running" : "finished",
thread_name(thrbuf, rt->parent_tid));
if (rt->stack) if (rt->stack)
Printf(" created at:"); Printf(" at:");
Printf("\n"); Printf("\n");
PrintStack(rt->stack); PrintStack(rt->stack);
} }
......
...@@ -36,20 +36,27 @@ struct ReportStack { ...@@ -36,20 +36,27 @@ struct ReportStack {
int col; int col;
}; };
struct ReportMopMutex {
u64 id;
bool write;
};
struct ReportMop { struct ReportMop {
int tid; int tid;
uptr addr; uptr addr;
int size; int size;
bool write; bool write;
int nmutex; Vector<ReportMopMutex> mset;
int *mutex;
ReportStack *stack; ReportStack *stack;
ReportMop();
}; };
enum ReportLocationType { enum ReportLocationType {
ReportLocationGlobal, ReportLocationGlobal,
ReportLocationHeap, ReportLocationHeap,
ReportLocationStack ReportLocationStack,
ReportLocationFD
}; };
struct ReportLocation { struct ReportLocation {
...@@ -59,6 +66,7 @@ struct ReportLocation { ...@@ -59,6 +66,7 @@ struct ReportLocation {
char *module; char *module;
uptr offset; uptr offset;
int tid; int tid;
int fd;
char *name; char *name;
char *file; char *file;
int line; int line;
...@@ -70,11 +78,13 @@ struct ReportThread { ...@@ -70,11 +78,13 @@ struct ReportThread {
uptr pid; uptr pid;
bool running; bool running;
char *name; char *name;
int parent_tid;
ReportStack *stack; ReportStack *stack;
}; };
struct ReportMutex { struct ReportMutex {
int id; u64 id;
bool destroyed;
ReportStack *stack; ReportStack *stack;
}; };
......
...@@ -164,6 +164,16 @@ void MapShadow(uptr addr, uptr size) { ...@@ -164,6 +164,16 @@ void MapShadow(uptr addr, uptr size) {
MmapFixedNoReserve(MemToShadow(addr), size * kShadowMultiplier); MmapFixedNoReserve(MemToShadow(addr), size * kShadowMultiplier);
} }
void MapThreadTrace(uptr addr, uptr size) {
DPrintf("#0: Mapping trace at %p-%p(0x%zx)\n", addr, addr + size, size);
CHECK_GE(addr, kTraceMemBegin);
CHECK_LE(addr + size, kTraceMemBegin + kTraceMemSize);
if (addr != (uptr)MmapFixedNoReserve(addr, size)) {
Printf("FATAL: ThreadSanitizer can not mmap thread trace\n");
Die();
}
}
void Initialize(ThreadState *thr) { void Initialize(ThreadState *thr) {
// Thread safe because done before all threads exist. // Thread safe because done before all threads exist.
static bool is_initialized = false; static bool is_initialized = false;
...@@ -289,6 +299,7 @@ void TraceSwitch(ThreadState *thr) { ...@@ -289,6 +299,7 @@ void TraceSwitch(ThreadState *thr) {
TraceHeader *hdr = &thr->trace.headers[trace]; TraceHeader *hdr = &thr->trace.headers[trace];
hdr->epoch0 = thr->fast_state.epoch(); hdr->epoch0 = thr->fast_state.epoch();
hdr->stack0.ObtainCurrent(thr, 0); hdr->stack0.ObtainCurrent(thr, 0);
hdr->mset0 = thr->mset;
thr->nomalloc--; thr->nomalloc--;
} }
...@@ -443,7 +454,7 @@ ALWAYS_INLINE ...@@ -443,7 +454,7 @@ ALWAYS_INLINE
void MemoryAccess(ThreadState *thr, uptr pc, uptr addr, void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
int kAccessSizeLog, bool kAccessIsWrite) { int kAccessSizeLog, bool kAccessIsWrite) {
u64 *shadow_mem = (u64*)MemToShadow(addr); u64 *shadow_mem = (u64*)MemToShadow(addr);
DPrintf2("#%d: tsan::OnMemoryAccess: @%p %p size=%d" DPrintf2("#%d: MemoryAccess: @%p %p size=%d"
" is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n", " is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n",
(int)thr->fast_state.tid(), (void*)pc, (void*)addr, (int)thr->fast_state.tid(), (void*)pc, (void*)addr,
(int)(1 << kAccessSizeLog), kAccessIsWrite, shadow_mem, (int)(1 << kAccessSizeLog), kAccessIsWrite, shadow_mem,
......
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
#include "tsan_vector.h" #include "tsan_vector.h"
#include "tsan_report.h" #include "tsan_report.h"
#include "tsan_platform.h" #include "tsan_platform.h"
#include "tsan_mutexset.h"
#if SANITIZER_WORDSIZE != 64 #if SANITIZER_WORDSIZE != 64
# error "ThreadSanitizer is supported only on 64-bit platforms" # error "ThreadSanitizer is supported only on 64-bit platforms"
...@@ -48,6 +49,10 @@ struct MBlock { ...@@ -48,6 +49,10 @@ struct MBlock {
u32 alloc_tid; u32 alloc_tid;
u32 alloc_stack_id; u32 alloc_stack_id;
SyncVar *head; SyncVar *head;
MBlock()
: mtx(MutexTypeMBlock, StatMtxMBlock) {
}
}; };
#ifndef TSAN_GO #ifndef TSAN_GO
...@@ -58,10 +63,22 @@ const uptr kAllocatorSpace = 0x7d0000000000ULL; ...@@ -58,10 +63,22 @@ const uptr kAllocatorSpace = 0x7d0000000000ULL;
#endif #endif
const uptr kAllocatorSize = 0x10000000000ULL; // 1T. const uptr kAllocatorSize = 0x10000000000ULL; // 1T.
struct TsanMapUnmapCallback {
void OnMap(uptr p, uptr size) const { }
void OnUnmap(uptr p, uptr size) const {
// We are about to unmap a chunk of user memory.
// Mark the corresponding shadow memory as not needed.
uptr shadow_beg = MemToShadow(p);
uptr shadow_end = MemToShadow(p + size);
CHECK(IsAligned(shadow_end|shadow_beg, GetPageSizeCached()));
FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg);
}
};
typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, sizeof(MBlock), typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, sizeof(MBlock),
DefaultSizeClassMap> PrimaryAllocator; DefaultSizeClassMap> PrimaryAllocator;
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache; typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
typedef LargeMmapAllocator SecondaryAllocator; typedef LargeMmapAllocator<TsanMapUnmapCallback> SecondaryAllocator;
typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
SecondaryAllocator> Allocator; SecondaryAllocator> Allocator;
Allocator *allocator(); Allocator *allocator();
...@@ -298,6 +315,7 @@ struct ThreadState { ...@@ -298,6 +315,7 @@ struct ThreadState {
uptr *shadow_stack; uptr *shadow_stack;
uptr *shadow_stack_end; uptr *shadow_stack_end;
#endif #endif
MutexSet mset;
ThreadClock clock; ThreadClock clock;
#ifndef TSAN_GO #ifndef TSAN_GO
AllocatorCache alloc_cache; AllocatorCache alloc_cache;
...@@ -369,6 +387,7 @@ struct ThreadContext { ...@@ -369,6 +387,7 @@ struct ThreadContext {
u64 epoch0; u64 epoch0;
u64 epoch1; u64 epoch1;
StackTrace creation_stack; StackTrace creation_stack;
int creation_tid;
ThreadDeadInfo *dead_info; ThreadDeadInfo *dead_info;
ThreadContext *dead_next; // In dead thread list. ThreadContext *dead_next; // In dead thread list.
char *name; // As annotated by user. char *name; // As annotated by user.
...@@ -445,7 +464,8 @@ class ScopedReport { ...@@ -445,7 +464,8 @@ class ScopedReport {
~ScopedReport(); ~ScopedReport();
void AddStack(const StackTrace *stack); void AddStack(const StackTrace *stack);
void AddMemoryAccess(uptr addr, Shadow s, const StackTrace *stack); void AddMemoryAccess(uptr addr, Shadow s, const StackTrace *stack,
const MutexSet *mset);
void AddThread(const ThreadContext *tctx); void AddThread(const ThreadContext *tctx);
void AddMutex(const SyncVar *s); void AddMutex(const SyncVar *s);
void AddLocation(uptr addr, uptr size); void AddLocation(uptr addr, uptr size);
...@@ -457,11 +477,13 @@ class ScopedReport { ...@@ -457,11 +477,13 @@ class ScopedReport {
Context *ctx_; Context *ctx_;
ReportDesc *rep_; ReportDesc *rep_;
void AddMutex(u64 id);
ScopedReport(const ScopedReport&); ScopedReport(const ScopedReport&);
void operator = (const ScopedReport&); void operator = (const ScopedReport&);
}; };
void RestoreStack(int tid, const u64 epoch, StackTrace *stk); void RestoreStack(int tid, const u64 epoch, StackTrace *stk, MutexSet *mset);
void StatAggregate(u64 *dst, u64 *src); void StatAggregate(u64 *dst, u64 *src);
void StatOutput(u64 *stat); void StatOutput(u64 *stat);
...@@ -471,6 +493,7 @@ void ALWAYS_INLINE INLINE StatInc(ThreadState *thr, StatType typ, u64 n = 1) { ...@@ -471,6 +493,7 @@ void ALWAYS_INLINE INLINE StatInc(ThreadState *thr, StatType typ, u64 n = 1) {
} }
void MapShadow(uptr addr, uptr size); void MapShadow(uptr addr, uptr size);
void MapThreadTrace(uptr addr, uptr size);
void InitializeShadowMemory(); void InitializeShadowMemory();
void InitializeInterceptors(); void InitializeInterceptors();
void InitializeDynamicAnnotations(); void InitializeDynamicAnnotations();
...@@ -502,6 +525,10 @@ void PrintCurrentStack(ThreadState *thr, uptr pc); ...@@ -502,6 +525,10 @@ void PrintCurrentStack(ThreadState *thr, uptr pc);
void Initialize(ThreadState *thr); void Initialize(ThreadState *thr);
int Finalize(ThreadState *thr); int Finalize(ThreadState *thr);
SyncVar* GetJavaSync(ThreadState *thr, uptr pc, uptr addr,
bool write_lock, bool create);
SyncVar* GetAndRemoveJavaSync(ThreadState *thr, uptr pc, uptr addr);
void MemoryAccess(ThreadState *thr, uptr pc, uptr addr, void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
int kAccessSizeLog, bool kAccessIsWrite); int kAccessSizeLog, bool kAccessIsWrite);
void MemoryAccessImpl(ThreadState *thr, uptr addr, void MemoryAccessImpl(ThreadState *thr, uptr addr,
...@@ -575,7 +602,10 @@ uptr TraceParts(); ...@@ -575,7 +602,10 @@ uptr TraceParts();
extern "C" void __tsan_trace_switch(); extern "C" void __tsan_trace_switch();
void ALWAYS_INLINE INLINE TraceAddEvent(ThreadState *thr, FastState fs, void ALWAYS_INLINE INLINE TraceAddEvent(ThreadState *thr, FastState fs,
EventType typ, uptr addr) { EventType typ, u64 addr) {
DCHECK_GE((int)typ, 0);
DCHECK_LE((int)typ, 7);
DCHECK_EQ(GetLsb(addr, 61), addr);
StatInc(thr, StatEvents); StatInc(thr, StatEvents);
u64 pos = fs.GetTracePos(); u64 pos = fs.GetTracePos();
if (UNLIKELY((pos % kTracePartSize) == 0)) { if (UNLIKELY((pos % kTracePartSize) == 0)) {
......
...@@ -26,7 +26,7 @@ void MutexCreate(ThreadState *thr, uptr pc, uptr addr, ...@@ -26,7 +26,7 @@ void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
StatInc(thr, StatMutexCreate); StatInc(thr, StatMutexCreate);
if (!linker_init && IsAppMem(addr)) if (!linker_init && IsAppMem(addr))
MemoryWrite1Byte(thr, pc, addr); MemoryWrite1Byte(thr, pc, addr);
SyncVar *s = ctx->synctab.GetAndLock(thr, pc, addr, true); SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
s->is_rw = rw; s->is_rw = rw;
s->is_recursive = recursive; s->is_recursive = recursive;
s->is_linker_init = linker_init; s->is_linker_init = linker_init;
...@@ -59,11 +59,12 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) { ...@@ -59,11 +59,12 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
trace.ObtainCurrent(thr, pc); trace.ObtainCurrent(thr, pc);
rep.AddStack(&trace); rep.AddStack(&trace);
FastState last(s->last_lock); FastState last(s->last_lock);
RestoreStack(last.tid(), last.epoch(), &trace); RestoreStack(last.tid(), last.epoch(), &trace, 0);
rep.AddStack(&trace); rep.AddStack(&trace);
rep.AddLocation(s->addr, 1); rep.AddLocation(s->addr, 1);
OutputReport(ctx, rep); OutputReport(ctx, rep);
} }
thr->mset.Remove(s->GetId());
DestroyAndFree(s); DestroyAndFree(s);
} }
...@@ -72,9 +73,9 @@ void MutexLock(ThreadState *thr, uptr pc, uptr addr) { ...@@ -72,9 +73,9 @@ void MutexLock(ThreadState *thr, uptr pc, uptr addr) {
DPrintf("#%d: MutexLock %zx\n", thr->tid, addr); DPrintf("#%d: MutexLock %zx\n", thr->tid, addr);
if (IsAppMem(addr)) if (IsAppMem(addr))
MemoryRead1Byte(thr, pc, addr); MemoryRead1Byte(thr, pc, addr);
SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
thr->fast_state.IncrementEpoch(); thr->fast_state.IncrementEpoch();
TraceAddEvent(thr, thr->fast_state, EventTypeLock, addr); TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId());
SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, true);
if (s->owner_tid == SyncVar::kInvalidTid) { if (s->owner_tid == SyncVar::kInvalidTid) {
CHECK_EQ(s->recursion, 0); CHECK_EQ(s->recursion, 0);
s->owner_tid = thr->tid; s->owner_tid = thr->tid;
...@@ -96,6 +97,7 @@ void MutexLock(ThreadState *thr, uptr pc, uptr addr) { ...@@ -96,6 +97,7 @@ void MutexLock(ThreadState *thr, uptr pc, uptr addr) {
StatInc(thr, StatMutexRecLock); StatInc(thr, StatMutexRecLock);
} }
s->recursion++; s->recursion++;
thr->mset.Add(s->GetId(), true, thr->fast_state.epoch());
s->mtx.Unlock(); s->mtx.Unlock();
} }
...@@ -104,9 +106,9 @@ void MutexUnlock(ThreadState *thr, uptr pc, uptr addr) { ...@@ -104,9 +106,9 @@ void MutexUnlock(ThreadState *thr, uptr pc, uptr addr) {
DPrintf("#%d: MutexUnlock %zx\n", thr->tid, addr); DPrintf("#%d: MutexUnlock %zx\n", thr->tid, addr);
if (IsAppMem(addr)) if (IsAppMem(addr))
MemoryRead1Byte(thr, pc, addr); MemoryRead1Byte(thr, pc, addr);
SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
thr->fast_state.IncrementEpoch(); thr->fast_state.IncrementEpoch();
TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, addr); TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, true);
if (s->recursion == 0) { if (s->recursion == 0) {
if (!s->is_broken) { if (!s->is_broken) {
s->is_broken = true; s->is_broken = true;
...@@ -132,6 +134,7 @@ void MutexUnlock(ThreadState *thr, uptr pc, uptr addr) { ...@@ -132,6 +134,7 @@ void MutexUnlock(ThreadState *thr, uptr pc, uptr addr) {
StatInc(thr, StatMutexRecUnlock); StatInc(thr, StatMutexRecUnlock);
} }
} }
thr->mset.Del(s->GetId(), true);
s->mtx.Unlock(); s->mtx.Unlock();
} }
...@@ -141,9 +144,9 @@ void MutexReadLock(ThreadState *thr, uptr pc, uptr addr) { ...@@ -141,9 +144,9 @@ void MutexReadLock(ThreadState *thr, uptr pc, uptr addr) {
StatInc(thr, StatMutexReadLock); StatInc(thr, StatMutexReadLock);
if (IsAppMem(addr)) if (IsAppMem(addr))
MemoryRead1Byte(thr, pc, addr); MemoryRead1Byte(thr, pc, addr);
SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, false);
thr->fast_state.IncrementEpoch(); thr->fast_state.IncrementEpoch();
TraceAddEvent(thr, thr->fast_state, EventTypeRLock, addr); TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId());
SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, false);
if (s->owner_tid != SyncVar::kInvalidTid) { if (s->owner_tid != SyncVar::kInvalidTid) {
Printf("ThreadSanitizer WARNING: read lock of a write locked mutex\n"); Printf("ThreadSanitizer WARNING: read lock of a write locked mutex\n");
PrintCurrentStack(thr, pc); PrintCurrentStack(thr, pc);
...@@ -152,6 +155,7 @@ void MutexReadLock(ThreadState *thr, uptr pc, uptr addr) { ...@@ -152,6 +155,7 @@ void MutexReadLock(ThreadState *thr, uptr pc, uptr addr) {
thr->clock.acquire(&s->clock); thr->clock.acquire(&s->clock);
s->last_lock = thr->fast_state.raw(); s->last_lock = thr->fast_state.raw();
StatInc(thr, StatSyncAcquire); StatInc(thr, StatSyncAcquire);
thr->mset.Add(s->GetId(), false, thr->fast_state.epoch());
s->mtx.ReadUnlock(); s->mtx.ReadUnlock();
} }
...@@ -161,9 +165,9 @@ void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) { ...@@ -161,9 +165,9 @@ void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
StatInc(thr, StatMutexReadUnlock); StatInc(thr, StatMutexReadUnlock);
if (IsAppMem(addr)) if (IsAppMem(addr))
MemoryRead1Byte(thr, pc, addr); MemoryRead1Byte(thr, pc, addr);
SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
thr->fast_state.IncrementEpoch(); thr->fast_state.IncrementEpoch();
TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, addr); TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, true);
if (s->owner_tid != SyncVar::kInvalidTid) { if (s->owner_tid != SyncVar::kInvalidTid) {
Printf("ThreadSanitizer WARNING: read unlock of a write " Printf("ThreadSanitizer WARNING: read unlock of a write "
"locked mutex\n"); "locked mutex\n");
...@@ -174,6 +178,7 @@ void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) { ...@@ -174,6 +178,7 @@ void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
thr->clock.release(&s->read_clock); thr->clock.release(&s->read_clock);
StatInc(thr, StatSyncRelease); StatInc(thr, StatSyncRelease);
s->mtx.Unlock(); s->mtx.Unlock();
thr->mset.Del(s->GetId(), false);
} }
void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) { void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
...@@ -181,18 +186,22 @@ void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) { ...@@ -181,18 +186,22 @@ void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr); DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr);
if (IsAppMem(addr)) if (IsAppMem(addr))
MemoryRead1Byte(thr, pc, addr); MemoryRead1Byte(thr, pc, addr);
SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, true); SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
bool write = true;
if (s->owner_tid == SyncVar::kInvalidTid) { if (s->owner_tid == SyncVar::kInvalidTid) {
// Seems to be read unlock. // Seems to be read unlock.
write = false;
StatInc(thr, StatMutexReadUnlock); StatInc(thr, StatMutexReadUnlock);
thr->fast_state.IncrementEpoch(); thr->fast_state.IncrementEpoch();
TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, addr); TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
thr->clock.set(thr->tid, thr->fast_state.epoch()); thr->clock.set(thr->tid, thr->fast_state.epoch());
thr->fast_synch_epoch = thr->fast_state.epoch(); thr->fast_synch_epoch = thr->fast_state.epoch();
thr->clock.release(&s->read_clock); thr->clock.release(&s->read_clock);
StatInc(thr, StatSyncRelease); StatInc(thr, StatSyncRelease);
} else if (s->owner_tid == thr->tid) { } else if (s->owner_tid == thr->tid) {
// Seems to be write unlock. // Seems to be write unlock.
thr->fast_state.IncrementEpoch();
TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
CHECK_GT(s->recursion, 0); CHECK_GT(s->recursion, 0);
s->recursion--; s->recursion--;
if (s->recursion == 0) { if (s->recursion == 0) {
...@@ -202,8 +211,6 @@ void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) { ...@@ -202,8 +211,6 @@ void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
// The sequence of events is quite tricky and doubled in several places. // The sequence of events is quite tricky and doubled in several places.
// First, it's a bug to increment the epoch w/o writing to the trace. // First, it's a bug to increment the epoch w/o writing to the trace.
// Then, the acquire/release logic can be factored out as well. // Then, the acquire/release logic can be factored out as well.
thr->fast_state.IncrementEpoch();
TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, addr);
thr->clock.set(thr->tid, thr->fast_state.epoch()); thr->clock.set(thr->tid, thr->fast_state.epoch());
thr->fast_synch_epoch = thr->fast_state.epoch(); thr->fast_synch_epoch = thr->fast_state.epoch();
thr->clock.ReleaseStore(&s->clock); thr->clock.ReleaseStore(&s->clock);
...@@ -216,13 +223,14 @@ void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) { ...@@ -216,13 +223,14 @@ void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
Printf("ThreadSanitizer WARNING: mutex unlock by another thread\n"); Printf("ThreadSanitizer WARNING: mutex unlock by another thread\n");
PrintCurrentStack(thr, pc); PrintCurrentStack(thr, pc);
} }
thr->mset.Del(s->GetId(), write);
s->mtx.Unlock(); s->mtx.Unlock();
} }
void Acquire(ThreadState *thr, uptr pc, uptr addr) { void Acquire(ThreadState *thr, uptr pc, uptr addr) {
CHECK_GT(thr->in_rtl, 0); CHECK_GT(thr->in_rtl, 0);
DPrintf("#%d: Acquire %zx\n", thr->tid, addr); DPrintf("#%d: Acquire %zx\n", thr->tid, addr);
SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, false); SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, false);
thr->clock.set(thr->tid, thr->fast_state.epoch()); thr->clock.set(thr->tid, thr->fast_state.epoch());
thr->clock.acquire(&s->clock); thr->clock.acquire(&s->clock);
StatInc(thr, StatSyncAcquire); StatInc(thr, StatSyncAcquire);
...@@ -246,7 +254,7 @@ void AcquireGlobal(ThreadState *thr, uptr pc) { ...@@ -246,7 +254,7 @@ void AcquireGlobal(ThreadState *thr, uptr pc) {
void Release(ThreadState *thr, uptr pc, uptr addr) { void Release(ThreadState *thr, uptr pc, uptr addr) {
CHECK_GT(thr->in_rtl, 0); CHECK_GT(thr->in_rtl, 0);
DPrintf("#%d: Release %zx\n", thr->tid, addr); DPrintf("#%d: Release %zx\n", thr->tid, addr);
SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, true); SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
thr->clock.set(thr->tid, thr->fast_state.epoch()); thr->clock.set(thr->tid, thr->fast_state.epoch());
thr->clock.release(&s->clock); thr->clock.release(&s->clock);
StatInc(thr, StatSyncRelease); StatInc(thr, StatSyncRelease);
...@@ -256,7 +264,7 @@ void Release(ThreadState *thr, uptr pc, uptr addr) { ...@@ -256,7 +264,7 @@ void Release(ThreadState *thr, uptr pc, uptr addr) {
void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) { void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
CHECK_GT(thr->in_rtl, 0); CHECK_GT(thr->in_rtl, 0);
DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr); DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr);
SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, true); SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
thr->clock.set(thr->tid, thr->fast_state.epoch()); thr->clock.set(thr->tid, thr->fast_state.epoch());
thr->clock.ReleaseStore(&s->clock); thr->clock.ReleaseStore(&s->clock);
StatInc(thr, StatSyncRelease); StatInc(thr, StatSyncRelease);
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_placement_new.h" #include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_stackdepot.h" #include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_common.h"
#include "tsan_platform.h" #include "tsan_platform.h"
#include "tsan_rtl.h" #include "tsan_rtl.h"
#include "tsan_suppressions.h" #include "tsan_suppressions.h"
...@@ -20,9 +21,12 @@ ...@@ -20,9 +21,12 @@
#include "tsan_sync.h" #include "tsan_sync.h"
#include "tsan_mman.h" #include "tsan_mman.h"
#include "tsan_flags.h" #include "tsan_flags.h"
#include "tsan_fd.h"
namespace __tsan { namespace __tsan {
using namespace __sanitizer; // NOLINT
void TsanCheckFailed(const char *file, int line, const char *cond, void TsanCheckFailed(const char *file, int line, const char *cond,
u64 v1, u64 v2) { u64 v1, u64 v2) {
ScopedInRtl in_rtl; ScopedInRtl in_rtl;
...@@ -132,7 +136,7 @@ void ScopedReport::AddStack(const StackTrace *stack) { ...@@ -132,7 +136,7 @@ void ScopedReport::AddStack(const StackTrace *stack) {
} }
void ScopedReport::AddMemoryAccess(uptr addr, Shadow s, void ScopedReport::AddMemoryAccess(uptr addr, Shadow s,
const StackTrace *stack) { const StackTrace *stack, const MutexSet *mset) {
void *mem = internal_alloc(MBlockReportMop, sizeof(ReportMop)); void *mem = internal_alloc(MBlockReportMop, sizeof(ReportMop));
ReportMop *mop = new(mem) ReportMop; ReportMop *mop = new(mem) ReportMop;
rep_->mops.PushBack(mop); rep_->mops.PushBack(mop);
...@@ -140,8 +144,27 @@ void ScopedReport::AddMemoryAccess(uptr addr, Shadow s, ...@@ -140,8 +144,27 @@ void ScopedReport::AddMemoryAccess(uptr addr, Shadow s,
mop->addr = addr + s.addr0(); mop->addr = addr + s.addr0();
mop->size = s.size(); mop->size = s.size();
mop->write = s.is_write(); mop->write = s.is_write();
mop->nmutex = 0;
mop->stack = SymbolizeStack(*stack); mop->stack = SymbolizeStack(*stack);
for (uptr i = 0; i < mset->Size(); i++) {
MutexSet::Desc d = mset->Get(i);
u64 uid = 0;
uptr addr = SyncVar::SplitId(d.id, &uid);
SyncVar *s = ctx_->synctab.GetIfExistsAndLock(addr, false);
// Check that the mutex is still alive.
// Another mutex can be created at the same address,
// so check uid as well.
if (s && s->CheckId(uid)) {
ReportMopMutex mtx = {s->uid, d.write};
mop->mset.PushBack(mtx);
AddMutex(s);
} else {
ReportMopMutex mtx = {d.id, d.write};
mop->mset.PushBack(mtx);
AddMutex(d.id);
}
if (s)
s->mtx.ReadUnlock();
}
} }
void ScopedReport::AddThread(const ThreadContext *tctx) { void ScopedReport::AddThread(const ThreadContext *tctx) {
...@@ -156,6 +179,7 @@ void ScopedReport::AddThread(const ThreadContext *tctx) { ...@@ -156,6 +179,7 @@ void ScopedReport::AddThread(const ThreadContext *tctx) {
rt->pid = tctx->os_id; rt->pid = tctx->os_id;
rt->running = (tctx->status == ThreadStatusRunning); rt->running = (tctx->status == ThreadStatusRunning);
rt->name = tctx->name ? internal_strdup(tctx->name) : 0; rt->name = tctx->name ? internal_strdup(tctx->name) : 0;
rt->parent_tid = tctx->creation_tid;
rt->stack = SymbolizeStack(tctx->creation_stack); rt->stack = SymbolizeStack(tctx->creation_stack);
} }
...@@ -173,17 +197,58 @@ static ThreadContext *FindThread(int unique_id) { ...@@ -173,17 +197,58 @@ static ThreadContext *FindThread(int unique_id) {
#endif #endif
void ScopedReport::AddMutex(const SyncVar *s) { void ScopedReport::AddMutex(const SyncVar *s) {
for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
if (rep_->mutexes[i]->id == s->uid)
return;
}
void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex)); void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex));
ReportMutex *rm = new(mem) ReportMutex(); ReportMutex *rm = new(mem) ReportMutex();
rep_->mutexes.PushBack(rm); rep_->mutexes.PushBack(rm);
rm->id = 42; rm->id = s->uid;
rm->destroyed = false;
rm->stack = SymbolizeStack(s->creation_stack); rm->stack = SymbolizeStack(s->creation_stack);
} }
void ScopedReport::AddMutex(u64 id) {
for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
if (rep_->mutexes[i]->id == id)
return;
}
void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex));
ReportMutex *rm = new(mem) ReportMutex();
rep_->mutexes.PushBack(rm);
rm->id = id;
rm->destroyed = true;
rm->stack = 0;
}
void ScopedReport::AddLocation(uptr addr, uptr size) { void ScopedReport::AddLocation(uptr addr, uptr size) {
if (addr == 0) if (addr == 0)
return; return;
#ifndef TSAN_GO #ifndef TSAN_GO
int fd = -1;
int creat_tid = -1;
u32 creat_stack = 0;
if (FdLocation(addr, &fd, &creat_tid, &creat_stack)
|| FdLocation(AlternativeAddress(addr), &fd, &creat_tid, &creat_stack)) {
void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation));
ReportLocation *loc = new(mem) ReportLocation();
rep_->locs.PushBack(loc);
loc->type = ReportLocationFD;
loc->fd = fd;
loc->tid = creat_tid;
uptr ssz = 0;
const uptr *stack = StackDepotGet(creat_stack, &ssz);
if (stack) {
StackTrace trace;
trace.Init(stack, ssz);
loc->stack = SymbolizeStack(trace);
}
ThreadContext *tctx = FindThread(creat_tid);
if (tctx)
AddThread(tctx);
return;
}
if (allocator()->PointerIsMine((void*)addr)) { if (allocator()->PointerIsMine((void*)addr)) {
MBlock *b = user_mblock(0, (void*)addr); MBlock *b = user_mblock(0, (void*)addr);
ThreadContext *tctx = FindThread(b->alloc_tid); ThreadContext *tctx = FindThread(b->alloc_tid);
...@@ -246,7 +311,10 @@ const ReportDesc *ScopedReport::GetReport() const { ...@@ -246,7 +311,10 @@ const ReportDesc *ScopedReport::GetReport() const {
return rep_; return rep_;
} }
void RestoreStack(int tid, const u64 epoch, StackTrace *stk) { void RestoreStack(int tid, const u64 epoch, StackTrace *stk, MutexSet *mset) {
// This function restores stack trace and mutex set for the thread/epoch.
// It does so by getting stack trace and mutex set at the beginning of
// trace part, and then replaying the trace till the given epoch.
ThreadContext *tctx = CTX()->threads[tid]; ThreadContext *tctx = CTX()->threads[tid];
if (tctx == 0) if (tctx == 0)
return; return;
...@@ -267,6 +335,7 @@ void RestoreStack(int tid, const u64 epoch, StackTrace *stk) { ...@@ -267,6 +335,7 @@ void RestoreStack(int tid, const u64 epoch, StackTrace *stk) {
TraceHeader* hdr = &trace->headers[partidx]; TraceHeader* hdr = &trace->headers[partidx];
if (epoch < hdr->epoch0) if (epoch < hdr->epoch0)
return; return;
const u64 epoch0 = RoundDown(epoch, TraceSize());
const u64 eend = epoch % TraceSize(); const u64 eend = epoch % TraceSize();
const u64 ebegin = RoundDown(eend, kTracePartSize); const u64 ebegin = RoundDown(eend, kTracePartSize);
DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n", DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n",
...@@ -276,12 +345,14 @@ void RestoreStack(int tid, const u64 epoch, StackTrace *stk) { ...@@ -276,12 +345,14 @@ void RestoreStack(int tid, const u64 epoch, StackTrace *stk) {
stack[i] = hdr->stack0.Get(i); stack[i] = hdr->stack0.Get(i);
DPrintf2(" #%02lu: pc=%zx\n", i, stack[i]); DPrintf2(" #%02lu: pc=%zx\n", i, stack[i]);
} }
if (mset)
*mset = hdr->mset0;
uptr pos = hdr->stack0.Size(); uptr pos = hdr->stack0.Size();
Event *events = (Event*)GetThreadTrace(tid); Event *events = (Event*)GetThreadTrace(tid);
for (uptr i = ebegin; i <= eend; i++) { for (uptr i = ebegin; i <= eend; i++) {
Event ev = events[i]; Event ev = events[i];
EventType typ = (EventType)(ev >> 61); EventType typ = (EventType)(ev >> 61);
uptr pc = (uptr)(ev & 0xffffffffffffull); uptr pc = (uptr)(ev & ((1ull << 61) - 1));
DPrintf2(" %zu typ=%d pc=%zx\n", i, typ, pc); DPrintf2(" %zu typ=%d pc=%zx\n", i, typ, pc);
if (typ == EventTypeMop) { if (typ == EventTypeMop) {
stack[pos] = pc; stack[pos] = pc;
...@@ -291,6 +362,17 @@ void RestoreStack(int tid, const u64 epoch, StackTrace *stk) { ...@@ -291,6 +362,17 @@ void RestoreStack(int tid, const u64 epoch, StackTrace *stk) {
if (pos > 0) if (pos > 0)
pos--; pos--;
} }
if (mset) {
if (typ == EventTypeLock) {
mset->Add(pc, true, epoch0 + i);
} else if (typ == EventTypeUnlock) {
mset->Del(pc, true);
} else if (typ == EventTypeRLock) {
mset->Add(pc, false, epoch0 + i);
} else if (typ == EventTypeRUnlock) {
mset->Del(pc, false);
}
}
for (uptr j = 0; j <= pos; j++) for (uptr j = 0; j <= pos; j++)
DPrintf2(" #%zu: %zx\n", j, stack[j]); DPrintf2(" #%zu: %zx\n", j, stack[j]);
} }
...@@ -400,8 +482,11 @@ static bool IsJavaNonsense(const ReportDesc *rep) { ...@@ -400,8 +482,11 @@ static bool IsJavaNonsense(const ReportDesc *rep) {
if (frame != 0 && frame->func != 0 if (frame != 0 && frame->func != 0
&& (internal_strcmp(frame->func, "memset") == 0 && (internal_strcmp(frame->func, "memset") == 0
|| internal_strcmp(frame->func, "memcpy") == 0 || internal_strcmp(frame->func, "memcpy") == 0
|| internal_strcmp(frame->func, "memmove") == 0
|| internal_strcmp(frame->func, "strcmp") == 0 || internal_strcmp(frame->func, "strcmp") == 0
|| internal_strcmp(frame->func, "strncpy") == 0 || internal_strcmp(frame->func, "strncpy") == 0
|| internal_strcmp(frame->func, "strlen") == 0
|| internal_strcmp(frame->func, "free") == 0
|| internal_strcmp(frame->func, "pthread_mutex_lock") == 0)) { || internal_strcmp(frame->func, "pthread_mutex_lock") == 0)) {
frame = frame->next; frame = frame->next;
if (frame == 0 if (frame == 0
...@@ -423,6 +508,10 @@ void ReportRace(ThreadState *thr) { ...@@ -423,6 +508,10 @@ void ReportRace(ThreadState *thr) {
return; return;
ScopedInRtl in_rtl; ScopedInRtl in_rtl;
if (thr->in_signal_handler)
Printf("ThreadSanitizer: printing report from signal handler."
" Can crash or hang.\n");
bool freed = false; bool freed = false;
{ {
Shadow s(thr->racy_state[1]); Shadow s(thr->racy_state[1]);
...@@ -454,15 +543,18 @@ void ReportRace(ThreadState *thr) { ...@@ -454,15 +543,18 @@ void ReportRace(ThreadState *thr) {
traces[0].ObtainCurrent(thr, toppc); traces[0].ObtainCurrent(thr, toppc);
if (IsFiredSuppression(ctx, rep, traces[0])) if (IsFiredSuppression(ctx, rep, traces[0]))
return; return;
InternalScopedBuffer<MutexSet> mset2(1);
new(mset2.data()) MutexSet();
Shadow s2(thr->racy_state[1]); Shadow s2(thr->racy_state[1]);
RestoreStack(s2.tid(), s2.epoch(), &traces[1]); RestoreStack(s2.tid(), s2.epoch(), &traces[1], mset2.data());
if (HandleRacyStacks(thr, traces, addr_min, addr_max)) if (HandleRacyStacks(thr, traces, addr_min, addr_max))
return; return;
for (uptr i = 0; i < kMop; i++) { for (uptr i = 0; i < kMop; i++) {
Shadow s(thr->racy_state[i]); Shadow s(thr->racy_state[i]);
rep.AddMemoryAccess(addr, s, &traces[i]); rep.AddMemoryAccess(addr, s, &traces[i],
i == 0 ? &thr->mset : mset2.data());
} }
if (flags()->suppress_java && IsJavaNonsense(rep.GetReport())) if (flags()->suppress_java && IsJavaNonsense(rep.GetReport()))
......
...@@ -154,6 +154,7 @@ int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) { ...@@ -154,6 +154,7 @@ int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) {
thr->clock.release(&tctx->sync); thr->clock.release(&tctx->sync);
StatInc(thr, StatSyncRelease); StatInc(thr, StatSyncRelease);
tctx->creation_stack.ObtainCurrent(thr, pc); tctx->creation_stack.ObtainCurrent(thr, pc);
tctx->creation_tid = thr->tid;
} }
return tid; return tid;
} }
...@@ -303,6 +304,7 @@ void ThreadJoin(ThreadState *thr, uptr pc, int tid) { ...@@ -303,6 +304,7 @@ void ThreadJoin(ThreadState *thr, uptr pc, int tid) {
Printf("ThreadSanitizer: join of non-existent thread\n"); Printf("ThreadSanitizer: join of non-existent thread\n");
return; return;
} }
// FIXME(dvyukov): print message and continue (it's user error).
CHECK_EQ(tctx->detached, false); CHECK_EQ(tctx->detached, false);
CHECK_EQ(tctx->status, ThreadStatusFinished); CHECK_EQ(tctx->status, ThreadStatusFinished);
thr->clock.acquire(&tctx->sync); thr->clock.acquire(&tctx->sync);
......
...@@ -179,6 +179,28 @@ void StatOutput(u64 *stat) { ...@@ -179,6 +179,28 @@ void StatOutput(u64 *stat) {
name[StatInt_sem_timedwait] = " sem_timedwait "; name[StatInt_sem_timedwait] = " sem_timedwait ";
name[StatInt_sem_post] = " sem_post "; name[StatInt_sem_post] = " sem_post ";
name[StatInt_sem_getvalue] = " sem_getvalue "; name[StatInt_sem_getvalue] = " sem_getvalue ";
name[StatInt_open] = " open ";
name[StatInt_open64] = " open64 ";
name[StatInt_creat] = " creat ";
name[StatInt_creat64] = " creat64 ";
name[StatInt_dup] = " dup ";
name[StatInt_dup2] = " dup2 ";
name[StatInt_dup3] = " dup3 ";
name[StatInt_eventfd] = " eventfd ";
name[StatInt_signalfd] = " signalfd ";
name[StatInt_inotify_init] = " inotify_init ";
name[StatInt_inotify_init1] = " inotify_init1 ";
name[StatInt_socket] = " socket ";
name[StatInt_socketpair] = " socketpair ";
name[StatInt_connect] = " connect ";
name[StatInt_accept] = " accept ";
name[StatInt_accept4] = " accept4 ";
name[StatInt_epoll_create] = " epoll_create ";
name[StatInt_epoll_create1] = " epoll_create1 ";
name[StatInt_close] = " close ";
name[StatInt___close] = " __close ";
name[StatInt_pipe] = " pipe ";
name[StatInt_pipe2] = " pipe2 ";
name[StatInt_read] = " read "; name[StatInt_read] = " read ";
name[StatInt_pread] = " pread "; name[StatInt_pread] = " pread ";
name[StatInt_pread64] = " pread64 "; name[StatInt_pread64] = " pread64 ";
...@@ -195,6 +217,8 @@ void StatOutput(u64 *stat) { ...@@ -195,6 +217,8 @@ void StatOutput(u64 *stat) {
name[StatInt_recvmsg] = " recvmsg "; name[StatInt_recvmsg] = " recvmsg ";
name[StatInt_unlink] = " unlink "; name[StatInt_unlink] = " unlink ";
name[StatInt_fopen] = " fopen "; name[StatInt_fopen] = " fopen ";
name[StatInt_freopen] = " freopen ";
name[StatInt_fclose] = " fclose ";
name[StatInt_fread] = " fread "; name[StatInt_fread] = " fread ";
name[StatInt_fwrite] = " fwrite "; name[StatInt_fwrite] = " fwrite ";
name[StatInt_puts] = " puts "; name[StatInt_puts] = " puts ";
...@@ -208,6 +232,7 @@ void StatOutput(u64 *stat) { ...@@ -208,6 +232,7 @@ void StatOutput(u64 *stat) {
name[StatInt_usleep] = " usleep "; name[StatInt_usleep] = " usleep ";
name[StatInt_nanosleep] = " nanosleep "; name[StatInt_nanosleep] = " nanosleep ";
name[StatInt_gettimeofday] = " gettimeofday "; name[StatInt_gettimeofday] = " gettimeofday ";
name[StatInt_fork] = " fork ";
name[StatAnnotation] = "Dynamic annotations "; name[StatAnnotation] = "Dynamic annotations ";
name[StatAnnotateHappensBefore] = " HappensBefore "; name[StatAnnotateHappensBefore] = " HappensBefore ";
...@@ -251,6 +276,8 @@ void StatOutput(u64 *stat) { ...@@ -251,6 +276,8 @@ void StatOutput(u64 *stat) {
name[StatMtxSlab] = " Slab "; name[StatMtxSlab] = " Slab ";
name[StatMtxAtExit] = " Atexit "; name[StatMtxAtExit] = " Atexit ";
name[StatMtxAnnotations] = " Annotations "; name[StatMtxAnnotations] = " Annotations ";
name[StatMtxMBlock] = " MBlock ";
name[StatMtxJavaMBlock] = " JavaMBlock ";
Printf("Statistics:\n"); Printf("Statistics:\n");
for (int i = 0; i < StatCnt; i++) for (int i = 0; i < StatCnt; i++)
......
...@@ -174,6 +174,28 @@ enum StatType { ...@@ -174,6 +174,28 @@ enum StatType {
StatInt_sem_timedwait, StatInt_sem_timedwait,
StatInt_sem_post, StatInt_sem_post,
StatInt_sem_getvalue, StatInt_sem_getvalue,
StatInt_open,
StatInt_open64,
StatInt_creat,
StatInt_creat64,
StatInt_dup,
StatInt_dup2,
StatInt_dup3,
StatInt_eventfd,
StatInt_signalfd,
StatInt_inotify_init,
StatInt_inotify_init1,
StatInt_socket,
StatInt_socketpair,
StatInt_connect,
StatInt_accept,
StatInt_accept4,
StatInt_epoll_create,
StatInt_epoll_create1,
StatInt_close,
StatInt___close,
StatInt_pipe,
StatInt_pipe2,
StatInt_read, StatInt_read,
StatInt_pread, StatInt_pread,
StatInt_pread64, StatInt_pread64,
...@@ -190,6 +212,8 @@ enum StatType { ...@@ -190,6 +212,8 @@ enum StatType {
StatInt_recvmsg, StatInt_recvmsg,
StatInt_unlink, StatInt_unlink,
StatInt_fopen, StatInt_fopen,
StatInt_freopen,
StatInt_fclose,
StatInt_fread, StatInt_fread,
StatInt_fwrite, StatInt_fwrite,
StatInt_puts, StatInt_puts,
...@@ -207,6 +231,7 @@ enum StatType { ...@@ -207,6 +231,7 @@ enum StatType {
StatInt_usleep, StatInt_usleep,
StatInt_nanosleep, StatInt_nanosleep,
StatInt_gettimeofday, StatInt_gettimeofday,
StatInt_fork,
// Dynamic annotations. // Dynamic annotations.
StatAnnotation, StatAnnotation,
...@@ -253,6 +278,8 @@ enum StatType { ...@@ -253,6 +278,8 @@ enum StatType {
StatMtxSlab, StatMtxSlab,
StatMtxAnnotations, StatMtxAnnotations,
StatMtxAtExit, StatMtxAtExit,
StatMtxMBlock,
StatMtxJavaMBlock,
// This must be the last. // This must be the last.
StatCnt StatCnt
......
...@@ -102,11 +102,11 @@ static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) { ...@@ -102,11 +102,11 @@ static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) {
m->base = (uptr)info->dlpi_addr; m->base = (uptr)info->dlpi_addr;
m->inp_fd = -1; m->inp_fd = -1;
m->out_fd = -1; m->out_fd = -1;
DPrintf("Module %s %zx\n", m->name, m->base); DPrintf2("Module %s %zx\n", m->name, m->base);
for (int i = 0; i < info->dlpi_phnum; i++) { for (int i = 0; i < info->dlpi_phnum; i++) {
const Elf64_Phdr *s = &info->dlpi_phdr[i]; const Elf64_Phdr *s = &info->dlpi_phdr[i];
DPrintf(" Section p_type=%zx p_offset=%zx p_vaddr=%zx p_paddr=%zx" DPrintf2(" Section p_type=%zx p_offset=%zx p_vaddr=%zx p_paddr=%zx"
" p_filesz=%zx p_memsz=%zx p_flags=%zx p_align=%zx\n", " p_filesz=%zx p_memsz=%zx p_flags=%zx p_align=%zx\n",
(uptr)s->p_type, (uptr)s->p_offset, (uptr)s->p_vaddr, (uptr)s->p_type, (uptr)s->p_offset, (uptr)s->p_vaddr,
(uptr)s->p_paddr, (uptr)s->p_filesz, (uptr)s->p_memsz, (uptr)s->p_paddr, (uptr)s->p_filesz, (uptr)s->p_memsz,
(uptr)s->p_flags, (uptr)s->p_align); (uptr)s->p_flags, (uptr)s->p_align);
...@@ -119,7 +119,7 @@ static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) { ...@@ -119,7 +119,7 @@ static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) {
sec->end = sec->base + s->p_memsz; sec->end = sec->base + s->p_memsz;
sec->next = ctx->sections; sec->next = ctx->sections;
ctx->sections = sec; ctx->sections = sec;
DPrintf(" Section %zx-%zx\n", sec->base, sec->end); DPrintf2(" Section %zx-%zx\n", sec->base, sec->end);
} }
return 0; return 0;
} }
......
...@@ -15,9 +15,10 @@ ...@@ -15,9 +15,10 @@
namespace __tsan { namespace __tsan {
SyncVar::SyncVar(uptr addr) SyncVar::SyncVar(uptr addr, u64 uid)
: mtx(MutexTypeSyncVar, StatMtxSyncVar) : mtx(MutexTypeSyncVar, StatMtxSyncVar)
, addr(addr) , addr(addr)
, uid(uid)
, owner_tid(kInvalidTid) , owner_tid(kInvalidTid)
, last_lock() , last_lock()
, recursion() , recursion()
...@@ -45,9 +46,38 @@ SyncTab::~SyncTab() { ...@@ -45,9 +46,38 @@ SyncTab::~SyncTab() {
} }
} }
SyncVar* SyncTab::GetOrCreateAndLock(ThreadState *thr, uptr pc,
uptr addr, bool write_lock) {
return GetAndLock(thr, pc, addr, write_lock, true);
}
SyncVar* SyncTab::GetIfExistsAndLock(uptr addr, bool write_lock) {
return GetAndLock(0, 0, addr, write_lock, false);
}
SyncVar* SyncTab::Create(ThreadState *thr, uptr pc, uptr addr) {
StatInc(thr, StatSyncCreated);
void *mem = internal_alloc(MBlockSync, sizeof(SyncVar));
const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
SyncVar *res = new(mem) SyncVar(addr, uid);
#ifndef TSAN_GO
res->creation_stack.ObtainCurrent(thr, pc);
#endif
return res;
}
SyncVar* SyncTab::GetAndLock(ThreadState *thr, uptr pc, SyncVar* SyncTab::GetAndLock(ThreadState *thr, uptr pc,
uptr addr, bool write_lock) { uptr addr, bool write_lock, bool create) {
#ifndef TSAN_GO #ifndef TSAN_GO
{ // NOLINT
SyncVar *res = GetJavaSync(thr, pc, addr, write_lock, create);
if (res)
return res;
}
// Here we ask only PrimaryAllocator, because
// SecondaryAllocator::PointerIsMine() is slow and we have fallback on
// the hashmap anyway.
if (PrimaryAllocator::PointerIsMine((void*)addr)) { if (PrimaryAllocator::PointerIsMine((void*)addr)) {
MBlock *b = user_mblock(thr, (void*)addr); MBlock *b = user_mblock(thr, (void*)addr);
Lock l(&b->mtx); Lock l(&b->mtx);
...@@ -57,10 +87,9 @@ SyncVar* SyncTab::GetAndLock(ThreadState *thr, uptr pc, ...@@ -57,10 +87,9 @@ SyncVar* SyncTab::GetAndLock(ThreadState *thr, uptr pc,
break; break;
} }
if (res == 0) { if (res == 0) {
StatInc(thr, StatSyncCreated); if (!create)
void *mem = internal_alloc(MBlockSync, sizeof(SyncVar)); return 0;
res = new(mem) SyncVar(addr); res = Create(thr, pc, addr);
res->creation_stack.ObtainCurrent(thr, pc);
res->next = b->head; res->next = b->head;
b->head = res; b->head = res;
} }
...@@ -85,6 +114,8 @@ SyncVar* SyncTab::GetAndLock(ThreadState *thr, uptr pc, ...@@ -85,6 +114,8 @@ SyncVar* SyncTab::GetAndLock(ThreadState *thr, uptr pc,
} }
} }
} }
if (!create)
return 0;
{ {
Lock l(&p->mtx); Lock l(&p->mtx);
SyncVar *res = p->val; SyncVar *res = p->val;
...@@ -93,12 +124,7 @@ SyncVar* SyncTab::GetAndLock(ThreadState *thr, uptr pc, ...@@ -93,12 +124,7 @@ SyncVar* SyncTab::GetAndLock(ThreadState *thr, uptr pc,
break; break;
} }
if (res == 0) { if (res == 0) {
StatInc(thr, StatSyncCreated); res = Create(thr, pc, addr);
void *mem = internal_alloc(MBlockSync, sizeof(SyncVar));
res = new(mem) SyncVar(addr);
#ifndef TSAN_GO
res->creation_stack.ObtainCurrent(thr, pc);
#endif
res->next = p->val; res->next = p->val;
p->val = res; p->val = res;
} }
...@@ -112,6 +138,11 @@ SyncVar* SyncTab::GetAndLock(ThreadState *thr, uptr pc, ...@@ -112,6 +138,11 @@ SyncVar* SyncTab::GetAndLock(ThreadState *thr, uptr pc,
SyncVar* SyncTab::GetAndRemove(ThreadState *thr, uptr pc, uptr addr) { SyncVar* SyncTab::GetAndRemove(ThreadState *thr, uptr pc, uptr addr) {
#ifndef TSAN_GO #ifndef TSAN_GO
{ // NOLINT
SyncVar *res = GetAndRemoveJavaSync(thr, pc, addr);
if (res)
return res;
}
if (PrimaryAllocator::PointerIsMine((void*)addr)) { if (PrimaryAllocator::PointerIsMine((void*)addr)) {
MBlock *b = user_mblock(thr, (void*)addr); MBlock *b = user_mblock(thr, (void*)addr);
SyncVar *res = 0; SyncVar *res = 0;
......
...@@ -48,12 +48,13 @@ class StackTrace { ...@@ -48,12 +48,13 @@ class StackTrace {
}; };
struct SyncVar { struct SyncVar {
explicit SyncVar(uptr addr); explicit SyncVar(uptr addr, u64 uid);
static const int kInvalidTid = -1; static const int kInvalidTid = -1;
Mutex mtx; Mutex mtx;
const uptr addr; uptr addr;
const u64 uid; // Globally unique id.
SyncClock clock; SyncClock clock;
SyncClock read_clock; // Used for rw mutexes only. SyncClock read_clock; // Used for rw mutexes only.
StackTrace creation_stack; StackTrace creation_stack;
...@@ -67,6 +68,18 @@ struct SyncVar { ...@@ -67,6 +68,18 @@ struct SyncVar {
SyncVar *next; // In SyncTab hashtable. SyncVar *next; // In SyncTab hashtable.
uptr GetMemoryConsumption(); uptr GetMemoryConsumption();
u64 GetId() const {
// 47 lsb is addr, then 14 bits is low part of uid, then 3 zero bits.
return GetLsb((u64)addr | (uid << 47), 61);
}
bool CheckId(u64 uid) const {
CHECK_EQ(uid, GetLsb(uid, 14));
return GetLsb(this->uid, 14) == uid;
}
static uptr SplitId(u64 id, u64 *uid) {
*uid = id >> 47;
return (uptr)GetLsb(id, 47);
}
}; };
class SyncTab { class SyncTab {
...@@ -74,13 +87,15 @@ class SyncTab { ...@@ -74,13 +87,15 @@ class SyncTab {
SyncTab(); SyncTab();
~SyncTab(); ~SyncTab();
// If the SyncVar does not exist yet, it is created. SyncVar* GetOrCreateAndLock(ThreadState *thr, uptr pc,
SyncVar* GetAndLock(ThreadState *thr, uptr pc, uptr addr, bool write_lock);
uptr addr, bool write_lock); SyncVar* GetIfExistsAndLock(uptr addr, bool write_lock);
// If the SyncVar does not exist, returns 0. // If the SyncVar does not exist, returns 0.
SyncVar* GetAndRemove(ThreadState *thr, uptr pc, uptr addr); SyncVar* GetAndRemove(ThreadState *thr, uptr pc, uptr addr);
SyncVar* Create(ThreadState *thr, uptr pc, uptr addr);
uptr GetMemoryConsumption(uptr *nsync); uptr GetMemoryConsumption(uptr *nsync);
private: private:
...@@ -94,9 +109,13 @@ class SyncTab { ...@@ -94,9 +109,13 @@ class SyncTab {
// FIXME: Implement something more sane. // FIXME: Implement something more sane.
static const int kPartCount = 1009; static const int kPartCount = 1009;
Part tab_[kPartCount]; Part tab_[kPartCount];
atomic_uint64_t uid_gen_;
int PartIdx(uptr addr); int PartIdx(uptr addr);
SyncVar* GetAndLock(ThreadState *thr, uptr pc,
uptr addr, bool write_lock, bool create);
SyncTab(const SyncTab&); // Not implemented. SyncTab(const SyncTab&); // Not implemented.
void operator = (const SyncTab&); // Not implemented. void operator = (const SyncTab&); // Not implemented.
}; };
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include "tsan_defs.h" #include "tsan_defs.h"
#include "tsan_mutex.h" #include "tsan_mutex.h"
#include "tsan_sync.h" #include "tsan_sync.h"
#include "tsan_mutexset.h"
namespace __tsan { namespace __tsan {
...@@ -41,6 +42,7 @@ typedef u64 Event; ...@@ -41,6 +42,7 @@ typedef u64 Event;
struct TraceHeader { struct TraceHeader {
StackTrace stack0; // Start stack for the trace. StackTrace stack0; // Start stack for the trace.
u64 epoch0; // Start epoch for the trace. u64 epoch0; // Start epoch for the trace.
MutexSet mset0;
#ifndef TSAN_GO #ifndef TSAN_GO
uptr stack0buf[kTraceStackSize]; uptr stack0buf[kTraceStackSize];
#endif #endif
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment