Commit 2660d12d by Kostya Serebryany Committed by Kostya Serebryany

libsanitizer merge from upstream r173241

From-SVN: r195404
parent b3996898
2013-01-23 Kostya Serebryany <kcc@google.com>
* config/darwin.h: remove dependency on CoreFoundation (asan on Mac OS).
2013-01-23 Jakub Jelinek <jakub@redhat.com> 2013-01-23 Jakub Jelinek <jakub@redhat.com>
PR target/49069 PR target/49069
......
...@@ -178,7 +178,7 @@ extern GTY(()) int darwin_ms_struct; ...@@ -178,7 +178,7 @@ extern GTY(()) int darwin_ms_struct;
%{L*} %(link_libgcc) %o %{fprofile-arcs|fprofile-generate*|coverage:-lgcov} \ %{L*} %(link_libgcc) %o %{fprofile-arcs|fprofile-generate*|coverage:-lgcov} \
%{fopenmp|ftree-parallelize-loops=*: \ %{fopenmp|ftree-parallelize-loops=*: \
%{static|static-libgcc|static-libstdc++|static-libgfortran: libgomp.a%s; : -lgomp } } \ %{static|static-libgcc|static-libstdc++|static-libgfortran: libgomp.a%s; : -lgomp } } \
%{fsanitize=address: -framework CoreFoundation -lasan } \ %{fsanitize=address: -lasan } \
%{fgnu-tm: \ %{fgnu-tm: \
%{static|static-libgcc|static-libstdc++|static-libgfortran: libitm.a%s; : -litm } } \ %{static|static-libgcc|static-libstdc++|static-libgfortran: libitm.a%s; : -litm } } \
%{!nostdlib:%{!nodefaultlibs:\ %{!nostdlib:%{!nodefaultlibs:\
......
2013-01-23 Kostya Serebryany <kcc@google.com>
PR sanitizer/55989
* All source files: Merge from upstream r173241.
* merge.sh: Support merging .inc files.
2013-01-16 Jakub Jelinek <jakub@redhat.com> 2013-01-16 Jakub Jelinek <jakub@redhat.com>
* sanitizer_common/Makefile.am (AM_CXXFLAGS): Remove * sanitizer_common/Makefile.am (AM_CXXFLAGS): Remove
......
171973 173241
The first line of this file holds the svn revision number of the The first line of this file holds the svn revision number of the
last merge done from the master library sources. last merge done from the master library sources.
...@@ -27,7 +27,6 @@ ...@@ -27,7 +27,6 @@
#if ASAN_ALLOCATOR_VERSION == 1 #if ASAN_ALLOCATOR_VERSION == 1
#include "asan_interceptors.h" #include "asan_interceptors.h"
#include "asan_internal.h" #include "asan_internal.h"
#include "asan_lock.h"
#include "asan_mapping.h" #include "asan_mapping.h"
#include "asan_stats.h" #include "asan_stats.h"
#include "asan_report.h" #include "asan_report.h"
...@@ -35,6 +34,7 @@ ...@@ -35,6 +34,7 @@
#include "asan_thread_registry.h" #include "asan_thread_registry.h"
#include "sanitizer/asan_interface.h" #include "sanitizer/asan_interface.h"
#include "sanitizer_common/sanitizer_atomic.h" #include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_mutex.h"
namespace __asan { namespace __asan {
...@@ -227,7 +227,7 @@ class MallocInfo { ...@@ -227,7 +227,7 @@ class MallocInfo {
AsanChunk *m = 0; AsanChunk *m = 0;
AsanChunk **fl = &free_lists_[size_class]; AsanChunk **fl = &free_lists_[size_class];
{ {
ScopedLock lock(&mu_); BlockingMutexLock lock(&mu_);
for (uptr i = 0; i < n_chunks; i++) { for (uptr i = 0; i < n_chunks; i++) {
if (!(*fl)) { if (!(*fl)) {
*fl = GetNewChunks(size_class); *fl = GetNewChunks(size_class);
...@@ -245,7 +245,7 @@ class MallocInfo { ...@@ -245,7 +245,7 @@ class MallocInfo {
void SwallowThreadLocalMallocStorage(AsanThreadLocalMallocStorage *x, void SwallowThreadLocalMallocStorage(AsanThreadLocalMallocStorage *x,
bool eat_free_lists) { bool eat_free_lists) {
CHECK(flags()->quarantine_size > 0); CHECK(flags()->quarantine_size > 0);
ScopedLock lock(&mu_); BlockingMutexLock lock(&mu_);
AsanChunkFifoList *q = &x->quarantine_; AsanChunkFifoList *q = &x->quarantine_;
if (q->size() > 0) { if (q->size() > 0) {
quarantine_.PushList(q); quarantine_.PushList(q);
...@@ -269,18 +269,18 @@ class MallocInfo { ...@@ -269,18 +269,18 @@ class MallocInfo {
} }
void BypassThreadLocalQuarantine(AsanChunk *chunk) { void BypassThreadLocalQuarantine(AsanChunk *chunk) {
ScopedLock lock(&mu_); BlockingMutexLock lock(&mu_);
quarantine_.Push(chunk); quarantine_.Push(chunk);
} }
AsanChunk *FindChunkByAddr(uptr addr) { AsanChunk *FindChunkByAddr(uptr addr) {
ScopedLock lock(&mu_); BlockingMutexLock lock(&mu_);
return FindChunkByAddrUnlocked(addr); return FindChunkByAddrUnlocked(addr);
} }
uptr AllocationSize(uptr ptr) { uptr AllocationSize(uptr ptr) {
if (!ptr) return 0; if (!ptr) return 0;
ScopedLock lock(&mu_); BlockingMutexLock lock(&mu_);
// Make sure this is our chunk and |ptr| actually points to the beginning // Make sure this is our chunk and |ptr| actually points to the beginning
// of the allocated memory. // of the allocated memory.
...@@ -303,7 +303,7 @@ class MallocInfo { ...@@ -303,7 +303,7 @@ class MallocInfo {
} }
void PrintStatus() { void PrintStatus() {
ScopedLock lock(&mu_); BlockingMutexLock lock(&mu_);
uptr malloced = 0; uptr malloced = 0;
Printf(" MallocInfo: in quarantine: %zu malloced: %zu; ", Printf(" MallocInfo: in quarantine: %zu malloced: %zu; ",
...@@ -321,7 +321,7 @@ class MallocInfo { ...@@ -321,7 +321,7 @@ class MallocInfo {
} }
PageGroup *FindPageGroup(uptr addr) { PageGroup *FindPageGroup(uptr addr) {
ScopedLock lock(&mu_); BlockingMutexLock lock(&mu_);
return FindPageGroupUnlocked(addr); return FindPageGroupUnlocked(addr);
} }
...@@ -479,7 +479,7 @@ class MallocInfo { ...@@ -479,7 +479,7 @@ class MallocInfo {
AsanChunk *free_lists_[kNumberOfSizeClasses]; AsanChunk *free_lists_[kNumberOfSizeClasses];
AsanChunkFifoList quarantine_; AsanChunkFifoList quarantine_;
AsanLock mu_; BlockingMutex mu_;
PageGroup *page_groups_[kMaxAvailableRam / kMinMmapSize]; PageGroup *page_groups_[kMaxAvailableRam / kMinMmapSize];
atomic_uint32_t n_page_groups_; atomic_uint32_t n_page_groups_;
......
...@@ -20,8 +20,14 @@ ...@@ -20,8 +20,14 @@
// We are in the process of transitioning from the old allocator (version 1) // We are in the process of transitioning from the old allocator (version 1)
// to a new one (version 2). The change is quite intrusive so both allocators // to a new one (version 2). The change is quite intrusive so both allocators
// will co-exist in the source base for a while. The actual allocator is chosen // will co-exist in the source base for a while. The actual allocator is chosen
// at build time by redefining this macrozz. // at build time by redefining this macro.
#define ASAN_ALLOCATOR_VERSION 1 #ifndef ASAN_ALLOCATOR_VERSION
# if ASAN_LINUX && !ASAN_ANDROID
# define ASAN_ALLOCATOR_VERSION 2
# else
# define ASAN_ALLOCATOR_VERSION 1
# endif
#endif // ASAN_ALLOCATOR_VERSION
namespace __asan { namespace __asan {
...@@ -96,17 +102,21 @@ class AsanChunkFifoList: public IntrusiveList<AsanChunk> { ...@@ -96,17 +102,21 @@ class AsanChunkFifoList: public IntrusiveList<AsanChunk> {
struct AsanThreadLocalMallocStorage { struct AsanThreadLocalMallocStorage {
explicit AsanThreadLocalMallocStorage(LinkerInitialized x) explicit AsanThreadLocalMallocStorage(LinkerInitialized x)
: quarantine_(x) { } #if ASAN_ALLOCATOR_VERSION == 1
: quarantine_(x)
#endif
{ }
AsanThreadLocalMallocStorage() { AsanThreadLocalMallocStorage() {
CHECK(REAL(memset)); CHECK(REAL(memset));
REAL(memset)(this, 0, sizeof(AsanThreadLocalMallocStorage)); REAL(memset)(this, 0, sizeof(AsanThreadLocalMallocStorage));
} }
AsanChunkFifoList quarantine_;
#if ASAN_ALLOCATOR_VERSION == 1 #if ASAN_ALLOCATOR_VERSION == 1
AsanChunkFifoList quarantine_;
AsanChunk *free_lists_[kNumberOfSizeClasses]; AsanChunk *free_lists_[kNumberOfSizeClasses];
#else #else
uptr allocator2_cache[1024]; // Opaque. uptr quarantine_cache[16];
uptr allocator2_cache[96 * (512 * 8 + 16)]; // Opaque.
#endif #endif
void CommitBack(); void CommitBack();
}; };
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include "sanitizer_common/sanitizer_internal_defs.h" #include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_list.h" #include "sanitizer_common/sanitizer_list.h"
#include "sanitizer_common/sanitizer_stackdepot.h" #include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_quarantine.h"
namespace __asan { namespace __asan {
...@@ -90,15 +91,6 @@ static const uptr kMaxThreadLocalQuarantine = ...@@ -90,15 +91,6 @@ static const uptr kMaxThreadLocalQuarantine =
static const uptr kReturnOnZeroMalloc = 2048; // Zero page is protected. static const uptr kReturnOnZeroMalloc = 2048; // Zero page is protected.
static int inited = 0;
static void Init() {
if (inited) return;
__asan_init();
inited = true; // this must happen before any threads are created.
allocator.Init();
}
// Every chunk of memory allocated by this allocator can be in one of 3 states: // Every chunk of memory allocated by this allocator can be in one of 3 states:
// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated. // CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
// CHUNK_ALLOCATED: the chunk is allocated and not yet freed. // CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
...@@ -244,31 +236,26 @@ void AsanChunkView::GetFreeStack(StackTrace *stack) { ...@@ -244,31 +236,26 @@ void AsanChunkView::GetFreeStack(StackTrace *stack) {
chunk_->FreeStackSize()); chunk_->FreeStackSize());
} }
class Quarantine: public AsanChunkFifoList { struct QuarantineCallback;
public: typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine;
void SwallowThreadLocalQuarantine(AsanThreadLocalMallocStorage *ms) { typedef AsanQuarantine::Cache QuarantineCache;
AsanChunkFifoList *q = &ms->quarantine_; static AsanQuarantine quarantine(LINKER_INITIALIZED);
if (!q->size()) return; static QuarantineCache fallback_quarantine_cache(LINKER_INITIALIZED);
SpinMutexLock l(&mutex_); static AllocatorCache fallback_allocator_cache;
PushList(q); static SpinMutex fallback_mutex;
PopAndDeallocateLoop(ms);
}
void BypassThreadLocalQuarantine(AsanChunk *m) { QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) {
SpinMutexLock l(&mutex_); CHECK(ms);
Push(m); CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache));
} return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache);
}
private: struct QuarantineCallback {
void PopAndDeallocateLoop(AsanThreadLocalMallocStorage *ms) { explicit QuarantineCallback(AllocatorCache *cache)
while (size() > (uptr)flags()->quarantine_size) { : cache_(cache) {
PopAndDeallocate(ms);
}
} }
void PopAndDeallocate(AsanThreadLocalMallocStorage *ms) {
CHECK_GT(size(), 0); void Recycle(AsanChunk *m) {
AsanChunk *m = Pop();
CHECK(m);
CHECK(m->chunk_state == CHUNK_QUARANTINE); CHECK(m->chunk_state == CHUNK_QUARANTINE);
m->chunk_state = CHUNK_AVAILABLE; m->chunk_state = CHUNK_AVAILABLE;
CHECK_NE(m->alloc_tid, kInvalidTid); CHECK_NE(m->alloc_tid, kInvalidTid);
...@@ -288,34 +275,27 @@ class Quarantine: public AsanChunkFifoList { ...@@ -288,34 +275,27 @@ class Quarantine: public AsanChunkFifoList {
thread_stats.real_frees++; thread_stats.real_frees++;
thread_stats.really_freed += m->UsedSize(); thread_stats.really_freed += m->UsedSize();
allocator.Deallocate(GetAllocatorCache(ms), p); allocator.Deallocate(cache_, p);
} }
SpinMutex mutex_;
};
static Quarantine quarantine; void *Allocate(uptr size) {
return allocator.Allocate(cache_, size, 1, false);
}
void AsanChunkFifoList::PushList(AsanChunkFifoList *q) { void Deallocate(void *p) {
CHECK(q->size() > 0); allocator.Deallocate(cache_, p);
size_ += q->size(); }
append_back(q);
q->clear();
}
void AsanChunkFifoList::Push(AsanChunk *n) { AllocatorCache *cache_;
push_back(n); };
size_ += n->UsedSize();
}
// Interesting performance observation: this function takes up to 15% of overal static void Init() {
// allocator time. That's because *first_ has been evicted from cache long time static int inited = 0;
// ago. Not sure if we can or want to do anything with this. if (inited) return;
AsanChunk *AsanChunkFifoList::Pop() { __asan_init();
CHECK(first_); inited = true; // this must happen before any threads are created.
AsanChunk *res = front(); allocator.Init();
size_ -= res->UsedSize(); quarantine.Init((uptr)flags()->quarantine_size, kMaxThreadLocalQuarantine);
pop_front();
return res;
} }
static void *Allocate(uptr size, uptr alignment, StackTrace *stack, static void *Allocate(uptr size, uptr alignment, StackTrace *stack,
...@@ -355,9 +335,18 @@ static void *Allocate(uptr size, uptr alignment, StackTrace *stack, ...@@ -355,9 +335,18 @@ static void *Allocate(uptr size, uptr alignment, StackTrace *stack,
} }
AsanThread *t = asanThreadRegistry().GetCurrent(); AsanThread *t = asanThreadRegistry().GetCurrent();
AllocatorCache *cache = t ? GetAllocatorCache(&t->malloc_storage()) : 0; void *allocated;
void *allocated = allocator.Allocate(cache, needed_size, 8, false); if (t) {
AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
allocated = allocator.Allocate(cache, needed_size, 8, false);
} else {
SpinMutexLock l(&fallback_mutex);
AllocatorCache *cache = &fallback_allocator_cache;
allocated = allocator.Allocate(cache, needed_size, 8, false);
}
uptr alloc_beg = reinterpret_cast<uptr>(allocated); uptr alloc_beg = reinterpret_cast<uptr>(allocated);
// Clear the first allocated word (an old kMemalignMagic may still be there).
reinterpret_cast<uptr *>(alloc_beg)[0] = 0;
uptr alloc_end = alloc_beg + needed_size; uptr alloc_end = alloc_beg + needed_size;
uptr beg_plus_redzone = alloc_beg + rz_size; uptr beg_plus_redzone = alloc_beg + rz_size;
uptr user_beg = beg_plus_redzone; uptr user_beg = beg_plus_redzone;
...@@ -432,7 +421,7 @@ static void Deallocate(void *ptr, StackTrace *stack, AllocType alloc_type) { ...@@ -432,7 +421,7 @@ static void Deallocate(void *ptr, StackTrace *stack, AllocType alloc_type) {
// Flip the chunk_state atomically to avoid race on double-free. // Flip the chunk_state atomically to avoid race on double-free.
u8 old_chunk_state = atomic_exchange((atomic_uint8_t*)m, CHUNK_QUARANTINE, u8 old_chunk_state = atomic_exchange((atomic_uint8_t*)m, CHUNK_QUARANTINE,
memory_order_acq_rel); memory_order_relaxed);
if (old_chunk_state == CHUNK_QUARANTINE) if (old_chunk_state == CHUNK_QUARANTINE)
ReportDoubleFree((uptr)ptr, stack); ReportDoubleFree((uptr)ptr, stack);
...@@ -466,13 +455,15 @@ static void Deallocate(void *ptr, StackTrace *stack, AllocType alloc_type) { ...@@ -466,13 +455,15 @@ static void Deallocate(void *ptr, StackTrace *stack, AllocType alloc_type) {
// Push into quarantine. // Push into quarantine.
if (t) { if (t) {
AsanChunkFifoList &q = t->malloc_storage().quarantine_; AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
q.Push(m); AllocatorCache *ac = GetAllocatorCache(ms);
quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac),
if (q.size() > kMaxThreadLocalQuarantine) m, m->UsedSize());
quarantine.SwallowThreadLocalQuarantine(&t->malloc_storage());
} else { } else {
quarantine.BypassThreadLocalQuarantine(m); SpinMutexLock l(&fallback_mutex);
AllocatorCache *ac = &fallback_allocator_cache;
quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac),
m, m->UsedSize());
} }
ASAN_FREE_HOOK(ptr); ASAN_FREE_HOOK(ptr);
...@@ -584,7 +575,8 @@ AsanChunkView FindHeapChunkByAddress(uptr addr) { ...@@ -584,7 +575,8 @@ AsanChunkView FindHeapChunkByAddress(uptr addr) {
} }
void AsanThreadLocalMallocStorage::CommitBack() { void AsanThreadLocalMallocStorage::CommitBack() {
quarantine.SwallowThreadLocalQuarantine(this); AllocatorCache *ac = GetAllocatorCache(this);
quarantine.Drain(GetQuarantineCache(this), QuarantineCallback(ac));
allocator.SwallowCache(GetAllocatorCache(this)); allocator.SwallowCache(GetAllocatorCache(this));
} }
...@@ -681,16 +673,18 @@ uptr __asan_get_estimated_allocated_size(uptr size) { ...@@ -681,16 +673,18 @@ uptr __asan_get_estimated_allocated_size(uptr size) {
} }
bool __asan_get_ownership(const void *p) { bool __asan_get_ownership(const void *p) {
return AllocationSize(reinterpret_cast<uptr>(p)) > 0; uptr ptr = reinterpret_cast<uptr>(p);
return (ptr == kReturnOnZeroMalloc) || (AllocationSize(ptr) > 0);
} }
uptr __asan_get_allocated_size(const void *p) { uptr __asan_get_allocated_size(const void *p) {
if (p == 0) return 0; if (p == 0) return 0;
uptr allocated_size = AllocationSize(reinterpret_cast<uptr>(p)); uptr ptr = reinterpret_cast<uptr>(p);
uptr allocated_size = AllocationSize(ptr);
// Die if p is not malloced or if it is already freed. // Die if p is not malloced or if it is already freed.
if (allocated_size == 0) { if (allocated_size == 0 && ptr != kReturnOnZeroMalloc) {
GET_STACK_TRACE_FATAL_HERE; GET_STACK_TRACE_FATAL_HERE;
ReportAsanGetAllocatedSizeNotOwned(reinterpret_cast<uptr>(p), &stack); ReportAsanGetAllocatedSizeNotOwned(ptr, &stack);
} }
return allocated_size; return allocated_size;
} }
......
...@@ -11,13 +11,13 @@ ...@@ -11,13 +11,13 @@
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#include "asan_interceptors.h" #include "asan_interceptors.h"
#include "asan_internal.h" #include "asan_internal.h"
#include "asan_lock.h"
#include "asan_mapping.h" #include "asan_mapping.h"
#include "asan_report.h" #include "asan_report.h"
#include "asan_stack.h" #include "asan_stack.h"
#include "asan_stats.h" #include "asan_stats.h"
#include "asan_thread.h" #include "asan_thread.h"
#include "sanitizer/asan_interface.h" #include "sanitizer/asan_interface.h"
#include "sanitizer_common/sanitizer_mutex.h"
namespace __asan { namespace __asan {
...@@ -28,7 +28,7 @@ struct ListOfGlobals { ...@@ -28,7 +28,7 @@ struct ListOfGlobals {
ListOfGlobals *next; ListOfGlobals *next;
}; };
static AsanLock mu_for_globals(LINKER_INITIALIZED); static BlockingMutex mu_for_globals(LINKER_INITIALIZED);
static LowLevelAllocator allocator_for_globals; static LowLevelAllocator allocator_for_globals;
static ListOfGlobals *list_of_all_globals; static ListOfGlobals *list_of_all_globals;
static ListOfGlobals *list_of_dynamic_init_globals; static ListOfGlobals *list_of_dynamic_init_globals;
...@@ -53,14 +53,9 @@ void PoisonRedZones(const Global &g) { ...@@ -53,14 +53,9 @@ void PoisonRedZones(const Global &g) {
} }
} }
static uptr GetAlignedSize(uptr size) {
return ((size + kGlobalAndStackRedzone - 1) / kGlobalAndStackRedzone)
* kGlobalAndStackRedzone;
}
bool DescribeAddressIfGlobal(uptr addr) { bool DescribeAddressIfGlobal(uptr addr) {
if (!flags()->report_globals) return false; if (!flags()->report_globals) return false;
ScopedLock lock(&mu_for_globals); BlockingMutexLock lock(&mu_for_globals);
bool res = false; bool res = false;
for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) { for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
const Global &g = *l->g; const Global &g = *l->g;
...@@ -140,23 +135,10 @@ static void UnpoisonGlobal(const Global *g) { ...@@ -140,23 +135,10 @@ static void UnpoisonGlobal(const Global *g) {
// ---------------------- Interface ---------------- {{{1 // ---------------------- Interface ---------------- {{{1
using namespace __asan; // NOLINT using namespace __asan; // NOLINT
// Register one global with a default redzone.
void __asan_register_global(uptr addr, uptr size,
const char *name) {
if (!flags()->report_globals) return;
ScopedLock lock(&mu_for_globals);
Global *g = (Global *)allocator_for_globals.Allocate(sizeof(Global));
g->beg = addr;
g->size = size;
g->size_with_redzone = GetAlignedSize(size) + kGlobalAndStackRedzone;
g->name = name;
RegisterGlobal(g);
}
// Register an array of globals. // Register an array of globals.
void __asan_register_globals(__asan_global *globals, uptr n) { void __asan_register_globals(__asan_global *globals, uptr n) {
if (!flags()->report_globals) return; if (!flags()->report_globals) return;
ScopedLock lock(&mu_for_globals); BlockingMutexLock lock(&mu_for_globals);
for (uptr i = 0; i < n; i++) { for (uptr i = 0; i < n; i++) {
RegisterGlobal(&globals[i]); RegisterGlobal(&globals[i]);
} }
...@@ -166,7 +148,7 @@ void __asan_register_globals(__asan_global *globals, uptr n) { ...@@ -166,7 +148,7 @@ void __asan_register_globals(__asan_global *globals, uptr n) {
// We must do this when a shared objects gets dlclosed. // We must do this when a shared objects gets dlclosed.
void __asan_unregister_globals(__asan_global *globals, uptr n) { void __asan_unregister_globals(__asan_global *globals, uptr n) {
if (!flags()->report_globals) return; if (!flags()->report_globals) return;
ScopedLock lock(&mu_for_globals); BlockingMutexLock lock(&mu_for_globals);
for (uptr i = 0; i < n; i++) { for (uptr i = 0; i < n; i++) {
UnregisterGlobal(&globals[i]); UnregisterGlobal(&globals[i]);
} }
...@@ -179,7 +161,7 @@ void __asan_unregister_globals(__asan_global *globals, uptr n) { ...@@ -179,7 +161,7 @@ void __asan_unregister_globals(__asan_global *globals, uptr n) {
void __asan_before_dynamic_init(uptr first_addr, uptr last_addr) { void __asan_before_dynamic_init(uptr first_addr, uptr last_addr) {
if (!flags()->check_initialization_order) return; if (!flags()->check_initialization_order) return;
CHECK(list_of_dynamic_init_globals); CHECK(list_of_dynamic_init_globals);
ScopedLock lock(&mu_for_globals); BlockingMutexLock lock(&mu_for_globals);
bool from_current_tu = false; bool from_current_tu = false;
// The list looks like: // The list looks like:
// a => ... => b => last_addr => ... => first_addr => c => ... // a => ... => b => last_addr => ... => first_addr => c => ...
...@@ -200,7 +182,7 @@ void __asan_before_dynamic_init(uptr first_addr, uptr last_addr) { ...@@ -200,7 +182,7 @@ void __asan_before_dynamic_init(uptr first_addr, uptr last_addr) {
// TU are poisoned. It simply unpoisons all dynamically initialized globals. // TU are poisoned. It simply unpoisons all dynamically initialized globals.
void __asan_after_dynamic_init() { void __asan_after_dynamic_init() {
if (!flags()->check_initialization_order) return; if (!flags()->check_initialization_order) return;
ScopedLock lock(&mu_for_globals); BlockingMutexLock lock(&mu_for_globals);
for (ListOfGlobals *l = list_of_dynamic_init_globals; l; l = l->next) for (ListOfGlobals *l = list_of_dynamic_init_globals; l; l = l->next)
UnpoisonGlobal(l->g); UnpoisonGlobal(l->g);
} }
...@@ -16,6 +16,8 @@ ...@@ -16,6 +16,8 @@
#include "interception/interception.h" #include "interception/interception.h"
#include "sanitizer_common/sanitizer_platform_interceptors.h" #include "sanitizer_common/sanitizer_platform_interceptors.h"
#include <stdarg.h>
using __sanitizer::uptr; using __sanitizer::uptr;
// Use macro to describe if specific function should be // Use macro to describe if specific function should be
...@@ -40,10 +42,8 @@ using __sanitizer::uptr; ...@@ -40,10 +42,8 @@ using __sanitizer::uptr;
#if defined(__linux__) #if defined(__linux__)
# define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 1 # define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 1
# define ASAN_INTERCEPT_PRCTL 1
#else #else
# define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 0 # define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 0
# define ASAN_INTERCEPT_PRCTL 0
#endif #endif
#if !defined(__APPLE__) #if !defined(__APPLE__)
...@@ -105,7 +105,7 @@ DECLARE_FUNCTION_AND_WRAPPER(void, siglongjmp, void *env, int value); ...@@ -105,7 +105,7 @@ DECLARE_FUNCTION_AND_WRAPPER(void, siglongjmp, void *env, int value);
# endif # endif
# if ASAN_INTERCEPT___CXA_THROW # if ASAN_INTERCEPT___CXA_THROW
DECLARE_FUNCTION_AND_WRAPPER(void, __cxa_throw, void *a, void *b, void *c); DECLARE_FUNCTION_AND_WRAPPER(void, __cxa_throw, void *a, void *b, void *c);
#endif # endif
// string.h / strings.h // string.h / strings.h
DECLARE_FUNCTION_AND_WRAPPER(int, memcmp, DECLARE_FUNCTION_AND_WRAPPER(int, memcmp,
...@@ -139,9 +139,9 @@ DECLARE_FUNCTION_AND_WRAPPER(char*, strdup, const char *s); ...@@ -139,9 +139,9 @@ DECLARE_FUNCTION_AND_WRAPPER(char*, strdup, const char *s);
# if ASAN_INTERCEPT_STRNLEN # if ASAN_INTERCEPT_STRNLEN
DECLARE_FUNCTION_AND_WRAPPER(uptr, strnlen, const char *s, uptr maxlen); DECLARE_FUNCTION_AND_WRAPPER(uptr, strnlen, const char *s, uptr maxlen);
# endif # endif
#if ASAN_INTERCEPT_INDEX # if ASAN_INTERCEPT_INDEX
DECLARE_FUNCTION_AND_WRAPPER(char*, index, const char *string, int c); DECLARE_FUNCTION_AND_WRAPPER(char*, index, const char *string, int c);
#endif # endif
// stdlib.h // stdlib.h
DECLARE_FUNCTION_AND_WRAPPER(int, atoi, const char *nptr); DECLARE_FUNCTION_AND_WRAPPER(int, atoi, const char *nptr);
...@@ -165,6 +165,13 @@ DECLARE_FUNCTION_AND_WRAPPER(SSIZE_T, pread64, int fd, void *buf, ...@@ -165,6 +165,13 @@ DECLARE_FUNCTION_AND_WRAPPER(SSIZE_T, pread64, int fd, void *buf,
SIZE_T count, OFF64_T offset); SIZE_T count, OFF64_T offset);
# endif # endif
# if SANITIZER_INTERCEPT_WRITE
DECLARE_FUNCTION_AND_WRAPPER(SSIZE_T, write, int fd, void *ptr, SIZE_T count);
# endif
# if SANITIZER_INTERCEPT_PWRITE
DECLARE_FUNCTION_AND_WRAPPER(SSIZE_T, pwrite, int fd, void *ptr, SIZE_T count);
# endif
# if ASAN_INTERCEPT_MLOCKX # if ASAN_INTERCEPT_MLOCKX
// mlock/munlock // mlock/munlock
DECLARE_FUNCTION_AND_WRAPPER(int, mlock, const void *addr, SIZE_T len); DECLARE_FUNCTION_AND_WRAPPER(int, mlock, const void *addr, SIZE_T len);
...@@ -186,7 +193,18 @@ DECLARE_FUNCTION_AND_WRAPPER(int, pthread_create, ...@@ -186,7 +193,18 @@ DECLARE_FUNCTION_AND_WRAPPER(int, pthread_create,
void *(*start_routine)(void*), void *arg); void *(*start_routine)(void*), void *arg);
# endif # endif
#if defined(__APPLE__) DECLARE_FUNCTION_AND_WRAPPER(int, vscanf, const char *format, va_list ap);
DECLARE_FUNCTION_AND_WRAPPER(int, vsscanf, const char *str, const char *format,
va_list ap);
DECLARE_FUNCTION_AND_WRAPPER(int, vfscanf, void *stream, const char *format,
va_list ap);
DECLARE_FUNCTION_AND_WRAPPER(int, scanf, const char *format, ...);
DECLARE_FUNCTION_AND_WRAPPER(int, fscanf,
void* stream, const char *format, ...);
DECLARE_FUNCTION_AND_WRAPPER(int, sscanf, // NOLINT
const char *str, const char *format, ...);
# if defined(__APPLE__)
typedef void* pthread_workqueue_t; typedef void* pthread_workqueue_t;
typedef void* pthread_workitem_handle_t; typedef void* pthread_workitem_handle_t;
...@@ -196,8 +214,6 @@ typedef void* dispatch_source_t; ...@@ -196,8 +214,6 @@ typedef void* dispatch_source_t;
typedef u64 dispatch_time_t; typedef u64 dispatch_time_t;
typedef void (*dispatch_function_t)(void *block); typedef void (*dispatch_function_t)(void *block);
typedef void* (*worker_t)(void *block); typedef void* (*worker_t)(void *block);
typedef void* CFStringRef;
typedef void* CFAllocatorRef;
DECLARE_FUNCTION_AND_WRAPPER(void, dispatch_async_f, DECLARE_FUNCTION_AND_WRAPPER(void, dispatch_async_f,
dispatch_queue_t dq, dispatch_queue_t dq,
...@@ -215,11 +231,7 @@ DECLARE_FUNCTION_AND_WRAPPER(void, dispatch_group_async_f, ...@@ -215,11 +231,7 @@ DECLARE_FUNCTION_AND_WRAPPER(void, dispatch_group_async_f,
dispatch_group_t group, dispatch_queue_t dq, dispatch_group_t group, dispatch_queue_t dq,
void *ctxt, dispatch_function_t func); void *ctxt, dispatch_function_t func);
DECLARE_FUNCTION_AND_WRAPPER(void, __CFInitialize, void); # if MAC_INTERPOSE_FUNCTIONS && !defined(MISSING_BLOCKS_SUPPORT)
DECLARE_FUNCTION_AND_WRAPPER(CFStringRef, CFStringCreateCopy,
CFAllocatorRef alloc, CFStringRef str);
DECLARE_FUNCTION_AND_WRAPPER(void, free, void* ptr);
#if MAC_INTERPOSE_FUNCTIONS && !defined(MISSING_BLOCKS_SUPPORT)
DECLARE_FUNCTION_AND_WRAPPER(void, dispatch_group_async, DECLARE_FUNCTION_AND_WRAPPER(void, dispatch_group_async,
dispatch_group_t dg, dispatch_group_t dg,
dispatch_queue_t dq, void (^work)(void)); dispatch_queue_t dq, void (^work)(void));
...@@ -231,9 +243,35 @@ DECLARE_FUNCTION_AND_WRAPPER(void, dispatch_source_set_event_handler, ...@@ -231,9 +243,35 @@ DECLARE_FUNCTION_AND_WRAPPER(void, dispatch_source_set_event_handler,
dispatch_source_t ds, void (^work)(void)); dispatch_source_t ds, void (^work)(void));
DECLARE_FUNCTION_AND_WRAPPER(void, dispatch_source_set_cancel_handler, DECLARE_FUNCTION_AND_WRAPPER(void, dispatch_source_set_cancel_handler,
dispatch_source_t ds, void (^work)(void)); dispatch_source_t ds, void (^work)(void));
#endif // MAC_INTERPOSE_FUNCTIONS # endif // MAC_INTERPOSE_FUNCTIONS
#endif // __APPLE__
typedef void malloc_zone_t;
typedef size_t vm_size_t;
DECLARE_FUNCTION_AND_WRAPPER(malloc_zone_t *, malloc_create_zone,
vm_size_t start_size, unsigned flags);
DECLARE_FUNCTION_AND_WRAPPER(malloc_zone_t *, malloc_default_zone, void);
DECLARE_FUNCTION_AND_WRAPPER(
malloc_zone_t *, malloc_default_purgeable_zone, void);
DECLARE_FUNCTION_AND_WRAPPER(void, malloc_make_purgeable, void *ptr);
DECLARE_FUNCTION_AND_WRAPPER(int, malloc_make_nonpurgeable, void *ptr);
DECLARE_FUNCTION_AND_WRAPPER(void, malloc_set_zone_name,
malloc_zone_t *zone, const char *name);
DECLARE_FUNCTION_AND_WRAPPER(void *, malloc, size_t size);
DECLARE_FUNCTION_AND_WRAPPER(void, free, void *ptr);
DECLARE_FUNCTION_AND_WRAPPER(void *, realloc, void *ptr, size_t size);
DECLARE_FUNCTION_AND_WRAPPER(void *, calloc, size_t nmemb, size_t size);
DECLARE_FUNCTION_AND_WRAPPER(void *, valloc, size_t size);
DECLARE_FUNCTION_AND_WRAPPER(size_t, malloc_good_size, size_t size);
DECLARE_FUNCTION_AND_WRAPPER(int, posix_memalign,
void **memptr, size_t alignment, size_t size);
DECLARE_FUNCTION_AND_WRAPPER(void, _malloc_fork_prepare, void);
DECLARE_FUNCTION_AND_WRAPPER(void, _malloc_fork_parent, void);
DECLARE_FUNCTION_AND_WRAPPER(void, _malloc_fork_child, void);
# endif // __APPLE__
} // extern "C" } // extern "C"
#endif #endif // defined(__APPLE__) || (defined(_WIN32) && !defined(_DLL))
#endif // ASAN_INTERCEPTED_FUNCTIONS_H #endif // ASAN_INTERCEPTED_FUNCTIONS_H
...@@ -73,15 +73,30 @@ static inline uptr MaybeRealStrnlen(const char *s, uptr maxlen) { ...@@ -73,15 +73,30 @@ static inline uptr MaybeRealStrnlen(const char *s, uptr maxlen) {
return internal_strnlen(s, maxlen); return internal_strnlen(s, maxlen);
} }
void SetThreadName(const char *name) {
AsanThread *t = asanThreadRegistry().GetCurrent();
if (t)
t->summary()->set_name(name);
}
} // namespace __asan } // namespace __asan
// ---------------------- Wrappers ---------------- {{{1 // ---------------------- Wrappers ---------------- {{{1
using namespace __asan; // NOLINT using namespace __asan; // NOLINT
#define COMMON_INTERCEPTOR_WRITE_RANGE(ptr, size) ASAN_WRITE_RANGE(ptr, size) #define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
#define COMMON_INTERCEPTOR_READ_RANGE(ptr, size) ASAN_READ_RANGE(ptr, size) ASAN_WRITE_RANGE(ptr, size)
#define COMMON_INTERCEPTOR_ENTER(func, ...) ENSURE_ASAN_INITED() #define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) ASAN_READ_RANGE(ptr, size)
#include "sanitizer_common/sanitizer_common_interceptors.h" #define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
do { \
ctx = 0; \
(void)ctx; \
ENSURE_ASAN_INITED(); \
} while (false)
#define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) do { } while (false)
#define COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd) do { } while (false)
#define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) SetThreadName(name)
#include "sanitizer_common/sanitizer_common_interceptors.inc"
static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) { static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) {
AsanThread *t = (AsanThread*)arg; AsanThread *t = (AsanThread*)arg;
...@@ -122,6 +137,18 @@ DEFINE_REAL(int, sigaction, int signum, const struct sigaction *act, ...@@ -122,6 +137,18 @@ DEFINE_REAL(int, sigaction, int signum, const struct sigaction *act,
#endif // ASAN_INTERCEPT_SIGNAL_AND_SIGACTION #endif // ASAN_INTERCEPT_SIGNAL_AND_SIGACTION
#if ASAN_INTERCEPT_SWAPCONTEXT #if ASAN_INTERCEPT_SWAPCONTEXT
static void ClearShadowMemoryForContextStack(uptr stack, uptr ssize) {
// Align to page size.
uptr PageSize = GetPageSizeCached();
uptr bottom = stack & ~(PageSize - 1);
ssize += stack - bottom;
ssize = RoundUpTo(ssize, PageSize);
static const uptr kMaxSaneContextStackSize = 1 << 22; // 4 Mb
if (ssize && ssize <= kMaxSaneContextStackSize) {
PoisonShadow(bottom, ssize, 0);
}
}
INTERCEPTOR(int, swapcontext, struct ucontext_t *oucp, INTERCEPTOR(int, swapcontext, struct ucontext_t *oucp,
struct ucontext_t *ucp) { struct ucontext_t *ucp) {
static bool reported_warning = false; static bool reported_warning = false;
...@@ -132,16 +159,18 @@ INTERCEPTOR(int, swapcontext, struct ucontext_t *oucp, ...@@ -132,16 +159,18 @@ INTERCEPTOR(int, swapcontext, struct ucontext_t *oucp,
} }
// Clear shadow memory for new context (it may share stack // Clear shadow memory for new context (it may share stack
// with current context). // with current context).
ClearShadowMemoryForContext(ucp); uptr stack, ssize;
ReadContextStack(ucp, &stack, &ssize);
ClearShadowMemoryForContextStack(stack, ssize);
int res = REAL(swapcontext)(oucp, ucp); int res = REAL(swapcontext)(oucp, ucp);
// swapcontext technically does not return, but program may swap context to // swapcontext technically does not return, but program may swap context to
// "oucp" later, that would look as if swapcontext() returned 0. // "oucp" later, that would look as if swapcontext() returned 0.
// We need to clear shadow for ucp once again, as it may be in arbitrary // We need to clear shadow for ucp once again, as it may be in arbitrary
// state. // state.
ClearShadowMemoryForContext(ucp); ClearShadowMemoryForContextStack(stack, ssize);
return res; return res;
} }
#endif #endif // ASAN_INTERCEPT_SWAPCONTEXT
INTERCEPTOR(void, longjmp, void *env, int val) { INTERCEPTOR(void, longjmp, void *env, int val) {
__asan_handle_no_return(); __asan_handle_no_return();
...@@ -162,25 +191,6 @@ INTERCEPTOR(void, siglongjmp, void *env, int val) { ...@@ -162,25 +191,6 @@ INTERCEPTOR(void, siglongjmp, void *env, int val) {
} }
#endif #endif
#if ASAN_INTERCEPT_PRCTL
#define PR_SET_NAME 15
INTERCEPTOR(int, prctl, int option,
unsigned long arg2, unsigned long arg3, // NOLINT
unsigned long arg4, unsigned long arg5) { // NOLINT
int res = REAL(prctl(option, arg2, arg3, arg4, arg5));
if (option == PR_SET_NAME) {
AsanThread *t = asanThreadRegistry().GetCurrent();
if (t) {
char buff[17];
internal_strncpy(buff, (char*)arg2, 16);
buff[16] = 0;
t->summary()->set_name(buff);
}
}
return res;
}
#endif
#if ASAN_INTERCEPT___CXA_THROW #if ASAN_INTERCEPT___CXA_THROW
INTERCEPTOR(void, __cxa_throw, void *a, void *b, void *c) { INTERCEPTOR(void, __cxa_throw, void *a, void *b, void *c) {
CHECK(REAL(__cxa_throw)); CHECK(REAL(__cxa_throw));
...@@ -727,9 +737,6 @@ void InitializeAsanInterceptors() { ...@@ -727,9 +737,6 @@ void InitializeAsanInterceptors() {
#if ASAN_INTERCEPT_SIGLONGJMP #if ASAN_INTERCEPT_SIGLONGJMP
ASAN_INTERCEPT_FUNC(siglongjmp); ASAN_INTERCEPT_FUNC(siglongjmp);
#endif #endif
#if ASAN_INTERCEPT_PRCTL
ASAN_INTERCEPT_FUNC(prctl);
#endif
// Intercept exception handling functions. // Intercept exception handling functions.
#if ASAN_INTERCEPT___CXA_THROW #if ASAN_INTERCEPT___CXA_THROW
......
...@@ -114,7 +114,7 @@ bool AsanInterceptsSignal(int signum); ...@@ -114,7 +114,7 @@ bool AsanInterceptsSignal(int signum);
void SetAlternateSignalStack(); void SetAlternateSignalStack();
void UnsetAlternateSignalStack(); void UnsetAlternateSignalStack();
void InstallSignalHandlers(); void InstallSignalHandlers();
void ClearShadowMemoryForContext(void *context); void ReadContextStack(void *context, uptr *stack, uptr *ssize);
void AsanPlatformThreadInit(); void AsanPlatformThreadInit();
// Wrapper for TLS/TSD. // Wrapper for TLS/TSD.
......
...@@ -13,7 +13,6 @@ ...@@ -13,7 +13,6 @@
#include "asan_interceptors.h" #include "asan_interceptors.h"
#include "asan_internal.h" #include "asan_internal.h"
#include "asan_lock.h"
#include "asan_thread.h" #include "asan_thread.h"
#include "asan_thread_registry.h" #include "asan_thread_registry.h"
#include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_libc.h"
...@@ -100,26 +99,6 @@ void AsanPlatformThreadInit() { ...@@ -100,26 +99,6 @@ void AsanPlatformThreadInit() {
// Nothing here for now. // Nothing here for now.
} }
AsanLock::AsanLock(LinkerInitialized) {
// We assume that pthread_mutex_t initialized to all zeroes is a valid
// unlocked mutex. We can not use PTHREAD_MUTEX_INITIALIZER as it triggers
// a gcc warning:
// extended initializer lists only available with -std=c++0x or -std=gnu++0x
}
void AsanLock::Lock() {
CHECK(sizeof(pthread_mutex_t) <= sizeof(opaque_storage_));
pthread_mutex_lock((pthread_mutex_t*)&opaque_storage_);
CHECK(!owner_);
owner_ = (uptr)pthread_self();
}
void AsanLock::Unlock() {
CHECK(owner_ == (uptr)pthread_self());
owner_ = 0;
pthread_mutex_unlock((pthread_mutex_t*)&opaque_storage_);
}
void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp, bool fast) { void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp, bool fast) {
#if defined(__arm__) || \ #if defined(__arm__) || \
defined(__powerpc__) || defined(__powerpc64__) || \ defined(__powerpc__) || defined(__powerpc64__) || \
...@@ -139,19 +118,13 @@ void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp, bool fast) { ...@@ -139,19 +118,13 @@ void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp, bool fast) {
} }
#if !ASAN_ANDROID #if !ASAN_ANDROID
void ClearShadowMemoryForContext(void *context) { void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
ucontext_t *ucp = (ucontext_t*)context; ucontext_t *ucp = (ucontext_t*)context;
uptr sp = (uptr)ucp->uc_stack.ss_sp; *stack = (uptr)ucp->uc_stack.ss_sp;
uptr size = ucp->uc_stack.ss_size; *ssize = ucp->uc_stack.ss_size;
// Align to page size.
uptr PageSize = GetPageSizeCached();
uptr bottom = sp & ~(PageSize - 1);
size += sp - bottom;
size = RoundUpTo(size, PageSize);
PoisonShadow(bottom, size, 0);
} }
#else #else
void ClearShadowMemoryForContext(void *context) { void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
UNIMPLEMENTED(); UNIMPLEMENTED();
} }
#endif #endif
......
//===-- asan_lock.h ---------------------------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// A wrapper for a simple lock.
//===----------------------------------------------------------------------===//
#ifndef ASAN_LOCK_H
#define ASAN_LOCK_H
#include "sanitizer_common/sanitizer_mutex.h"
#include "asan_internal.h"
// The locks in ASan are global objects and they are never destroyed to avoid
// at-exit races (that is, a lock is being used by other threads while the main
// thread is doing atexit destructors).
// We define the class using opaque storage to avoid including system headers.
namespace __asan {
class AsanLock {
public:
explicit AsanLock(LinkerInitialized);
void Lock();
void Unlock();
bool IsLocked() { return owner_ != 0; }
private:
uptr opaque_storage_[10];
uptr owner_; // for debugging and for malloc_introspection_t interface
};
typedef GenericScopedLock<AsanLock> ScopedLock;
} // namespace __asan
#endif // ASAN_LOCK_H
...@@ -34,7 +34,6 @@ ...@@ -34,7 +34,6 @@
#include <stdlib.h> // for free() #include <stdlib.h> // for free()
#include <unistd.h> #include <unistd.h>
#include <libkern/OSAtomic.h> #include <libkern/OSAtomic.h>
#include <CoreFoundation/CFString.h>
namespace __asan { namespace __asan {
...@@ -129,33 +128,6 @@ bool AsanInterceptsSignal(int signum) { ...@@ -129,33 +128,6 @@ bool AsanInterceptsSignal(int signum) {
} }
void AsanPlatformThreadInit() { void AsanPlatformThreadInit() {
// For the first program thread, we can't replace the allocator before
// __CFInitialize() has been called. If it hasn't, we'll call
// MaybeReplaceCFAllocator() later on this thread.
// For other threads __CFInitialize() has been called before their creation.
// See also asan_malloc_mac.cc.
if (((CFRuntimeBase*)kCFAllocatorSystemDefault)->_cfisa) {
MaybeReplaceCFAllocator();
}
}
AsanLock::AsanLock(LinkerInitialized) {
// We assume that OS_SPINLOCK_INIT is zero
}
void AsanLock::Lock() {
CHECK(sizeof(OSSpinLock) <= sizeof(opaque_storage_));
CHECK(OS_SPINLOCK_INIT == 0);
CHECK(owner_ != (uptr)pthread_self());
OSSpinLockLock((OSSpinLock*)&opaque_storage_);
CHECK(!owner_);
owner_ = (uptr)pthread_self();
}
void AsanLock::Unlock() {
CHECK(owner_ == (uptr)pthread_self());
owner_ = 0;
OSSpinLockUnlock((OSSpinLock*)&opaque_storage_);
} }
void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp, bool fast) { void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp, bool fast) {
...@@ -170,7 +142,7 @@ void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp, bool fast) { ...@@ -170,7 +142,7 @@ void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp, bool fast) {
} }
} }
void ClearShadowMemoryForContext(void *context) { void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
UNIMPLEMENTED(); UNIMPLEMENTED();
} }
...@@ -254,9 +226,6 @@ mach_error_t __interception_deallocate_island(void *ptr) { ...@@ -254,9 +226,6 @@ mach_error_t __interception_deallocate_island(void *ptr) {
// The implementation details are at // The implementation details are at
// http://libdispatch.macosforge.org/trac/browser/trunk/src/queue.c // http://libdispatch.macosforge.org/trac/browser/trunk/src/queue.c
typedef void* pthread_workqueue_t;
typedef void* pthread_workitem_handle_t;
typedef void* dispatch_group_t; typedef void* dispatch_group_t;
typedef void* dispatch_queue_t; typedef void* dispatch_queue_t;
typedef void* dispatch_source_t; typedef void* dispatch_source_t;
...@@ -287,9 +256,6 @@ void dispatch_barrier_async_f(dispatch_queue_t dq, void *ctxt, ...@@ -287,9 +256,6 @@ void dispatch_barrier_async_f(dispatch_queue_t dq, void *ctxt,
dispatch_function_t func); dispatch_function_t func);
void dispatch_group_async_f(dispatch_group_t group, dispatch_queue_t dq, void dispatch_group_async_f(dispatch_group_t group, dispatch_queue_t dq,
void *ctxt, dispatch_function_t func); void *ctxt, dispatch_function_t func);
int pthread_workqueue_additem_np(pthread_workqueue_t workq,
void *(*workitem_func)(void *), void * workitem_arg,
pthread_workitem_handle_t * itemhandlep, unsigned int *gencountp);
} // extern "C" } // extern "C"
static ALWAYS_INLINE static ALWAYS_INLINE
...@@ -444,66 +410,6 @@ INTERCEPTOR(void, dispatch_source_set_event_handler, ...@@ -444,66 +410,6 @@ INTERCEPTOR(void, dispatch_source_set_event_handler,
} }
#endif #endif
// The following stuff has been extremely helpful while looking for the
// unhandled functions that spawned jobs on Chromium shutdown. If the verbosity
// level is 2 or greater, we wrap pthread_workqueue_additem_np() in order to
// find the points of worker thread creation (each of such threads may be used
// to run several tasks, that's why this is not enough to support the whole
// libdispatch API.
extern "C"
void *wrap_workitem_func(void *arg) {
if (flags()->verbosity >= 2) {
Report("wrap_workitem_func: %p, pthread_self: %p\n", arg, pthread_self());
}
asan_block_context_t *ctxt = (asan_block_context_t*)arg;
worker_t fn = (worker_t)(ctxt->func);
void *result = fn(ctxt->block);
GET_STACK_TRACE_THREAD;
asan_free(arg, &stack, FROM_MALLOC);
return result;
}
INTERCEPTOR(int, pthread_workqueue_additem_np, pthread_workqueue_t workq,
void *(*workitem_func)(void *), void * workitem_arg,
pthread_workitem_handle_t * itemhandlep, unsigned int *gencountp) {
GET_STACK_TRACE_THREAD;
asan_block_context_t *asan_ctxt =
(asan_block_context_t*) asan_malloc(sizeof(asan_block_context_t), &stack);
asan_ctxt->block = workitem_arg;
asan_ctxt->func = (dispatch_function_t)workitem_func;
asan_ctxt->parent_tid = asanThreadRegistry().GetCurrentTidOrInvalid();
if (flags()->verbosity >= 2) {
Report("pthread_workqueue_additem_np: %p\n", asan_ctxt);
PRINT_CURRENT_STACK();
}
return REAL(pthread_workqueue_additem_np)(workq, wrap_workitem_func,
asan_ctxt, itemhandlep,
gencountp);
}
// See http://opensource.apple.com/source/CF/CF-635.15/CFString.c
int __CFStrIsConstant(CFStringRef str) {
CFRuntimeBase *base = (CFRuntimeBase*)str;
#if __LP64__
return base->_rc == 0;
#else
return (base->_cfinfo[CF_RC_BITS]) == 0;
#endif
}
INTERCEPTOR(CFStringRef, CFStringCreateCopy, CFAllocatorRef alloc,
CFStringRef str) {
if (__CFStrIsConstant(str)) {
return str;
} else {
return REAL(CFStringCreateCopy)(alloc, str);
}
}
DECLARE_REAL_AND_INTERCEPTOR(void, free, void *ptr)
DECLARE_REAL_AND_INTERCEPTOR(void, __CFInitialize, void)
namespace __asan { namespace __asan {
void InitializeMacInterceptors() { void InitializeMacInterceptors() {
...@@ -512,26 +418,6 @@ void InitializeMacInterceptors() { ...@@ -512,26 +418,6 @@ void InitializeMacInterceptors() {
CHECK(INTERCEPT_FUNCTION(dispatch_after_f)); CHECK(INTERCEPT_FUNCTION(dispatch_after_f));
CHECK(INTERCEPT_FUNCTION(dispatch_barrier_async_f)); CHECK(INTERCEPT_FUNCTION(dispatch_barrier_async_f));
CHECK(INTERCEPT_FUNCTION(dispatch_group_async_f)); CHECK(INTERCEPT_FUNCTION(dispatch_group_async_f));
// We don't need to intercept pthread_workqueue_additem_np() to support the
// libdispatch API, but it helps us to debug the unsupported functions. Let's
// intercept it only during verbose runs.
if (flags()->verbosity >= 2) {
CHECK(INTERCEPT_FUNCTION(pthread_workqueue_additem_np));
}
// Normally CFStringCreateCopy should not copy constant CF strings.
// Replacing the default CFAllocator causes constant strings to be copied
// rather than just returned, which leads to bugs in big applications like
// Chromium and WebKit, see
// http://code.google.com/p/address-sanitizer/issues/detail?id=10
// Until this problem is fixed we need to check that the string is
// non-constant before calling CFStringCreateCopy.
CHECK(INTERCEPT_FUNCTION(CFStringCreateCopy));
// Some of the library functions call free() directly, so we have to
// intercept it.
CHECK(INTERCEPT_FUNCTION(free));
if (flags()->replace_cfallocator) {
CHECK(INTERCEPT_FUNCTION(__CFInitialize));
}
} }
} // namespace __asan } // namespace __asan
......
...@@ -34,85 +34,108 @@ using namespace __asan; // NOLINT ...@@ -34,85 +34,108 @@ using namespace __asan; // NOLINT
// TODO(glider): do we need both zones? // TODO(glider): do we need both zones?
static malloc_zone_t *system_malloc_zone = 0; static malloc_zone_t *system_malloc_zone = 0;
static malloc_zone_t *system_purgeable_zone = 0;
static malloc_zone_t asan_zone; static malloc_zone_t asan_zone;
CFAllocatorRef cf_asan = 0;
INTERCEPTOR(malloc_zone_t *, malloc_create_zone,
// _CFRuntimeCreateInstance() checks whether the supplied allocator is vm_size_t start_size, unsigned zone_flags) {
// kCFAllocatorSystemDefault and, if it is not, stores the allocator reference if (!asan_inited) __asan_init();
// at the beginning of the allocated memory and returns the pointer to the GET_STACK_TRACE_MALLOC;
// allocated memory plus sizeof(CFAllocatorRef). See malloc_zone_t *new_zone =
// http://www.opensource.apple.com/source/CF/CF-635.21/CFRuntime.c (malloc_zone_t*)asan_malloc(sizeof(asan_zone), &stack);
// Pointers returned by _CFRuntimeCreateInstance() can then be passed directly internal_memcpy(new_zone, &asan_zone, sizeof(asan_zone));
// to free() or CFAllocatorDeallocate(), which leads to false invalid free new_zone->zone_name = NULL; // The name will be changed anyway.
// reports. return new_zone;
// The corresponding rdar bug is http://openradar.appspot.com/radar?id=1796404. }
void* ALWAYS_INLINE get_saved_cfallocator_ref(void *ptr) {
if (flags()->replace_cfallocator) { INTERCEPTOR(malloc_zone_t *, malloc_default_zone, void) {
// Make sure we're not hitting the previous page. This may be incorrect if (!asan_inited) __asan_init();
// if ASan's malloc returns an address ending with 0xFF8, which will be return &asan_zone;
// then padded to a page boundary with a CFAllocatorRef. }
uptr arith_ptr = (uptr)ptr;
if ((arith_ptr & 0xFFF) > sizeof(CFAllocatorRef)) { INTERCEPTOR(malloc_zone_t *, malloc_default_purgeable_zone, void) {
CFAllocatorRef *saved = // FIXME: ASan should support purgeable allocations.
(CFAllocatorRef*)(arith_ptr - sizeof(CFAllocatorRef)); // https://code.google.com/p/address-sanitizer/issues/detail?id=139
if ((*saved == cf_asan) && asan_mz_size(saved)) ptr = (void*)saved; if (!asan_inited) __asan_init();
} return &asan_zone;
}
INTERCEPTOR(void, malloc_make_purgeable, void *ptr) {
// FIXME: ASan should support purgeable allocations. Ignoring them is fine
// for now.
if (!asan_inited) __asan_init();
}
INTERCEPTOR(int, malloc_make_nonpurgeable, void *ptr) {
// FIXME: ASan should support purgeable allocations. Ignoring them is fine
// for now.
if (!asan_inited) __asan_init();
// Must return 0 if the contents were not purged since the last call to
// malloc_make_purgeable().
return 0;
}
INTERCEPTOR(void, malloc_set_zone_name, malloc_zone_t *zone, const char *name) {
if (!asan_inited) __asan_init();
// Allocate |strlen("asan-") + 1 + internal_strlen(name)| bytes.
size_t buflen = 6 + (name ? internal_strlen(name) : 0);
InternalScopedBuffer<char> new_name(buflen);
if (name && zone->introspect == asan_zone.introspect) {
internal_snprintf(new_name.data(), buflen, "asan-%s", name);
name = new_name.data();
} }
return ptr;
// Call the system malloc's implementation for both external and our zones,
// since that appropriately changes VM region protections on the zone.
REAL(malloc_set_zone_name)(zone, name);
}
INTERCEPTOR(void *, malloc, size_t size) {
if (!asan_inited) __asan_init();
GET_STACK_TRACE_MALLOC;
void *res = asan_malloc(size, &stack);
return res;
} }
// The free() implementation provided by OS X calls malloc_zone_from_ptr()
// to find the owner of |ptr|. If the result is 0, an invalid free() is
// reported. Our implementation falls back to asan_free() in this case
// in order to print an ASan-style report.
//
// For the objects created by _CFRuntimeCreateInstance a CFAllocatorRef is
// placed at the beginning of the allocated chunk and the pointer returned by
// our allocator is off by sizeof(CFAllocatorRef). This pointer can be then
// passed directly to free(), which will lead to errors.
// To overcome this we're checking whether |ptr-sizeof(CFAllocatorRef)|
// contains a pointer to our CFAllocator (assuming no other allocator is used).
// See http://code.google.com/p/address-sanitizer/issues/detail?id=70 for more
// info.
INTERCEPTOR(void, free, void *ptr) { INTERCEPTOR(void, free, void *ptr) {
malloc_zone_t *zone = malloc_zone_from_ptr(ptr); if (!asan_inited) __asan_init();
if (zone) { if (!ptr) return;
#if defined(MAC_OS_X_VERSION_10_6) && \ GET_STACK_TRACE_FREE;
MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6 asan_free(ptr, &stack, FROM_MALLOC);
if ((zone->version >= 6) && (zone->free_definite_size)) {
zone->free_definite_size(zone, ptr, malloc_size(ptr));
} else {
malloc_zone_free(zone, ptr);
}
#else
malloc_zone_free(zone, ptr);
#endif
} else {
if (!asan_mz_size(ptr)) ptr = get_saved_cfallocator_ref(ptr);
GET_STACK_TRACE_FREE;
asan_free(ptr, &stack, FROM_MALLOC);
}
} }
// We can't always replace the default CFAllocator with cf_asan right in INTERCEPTOR(void *, realloc, void *ptr, size_t size) {
// ReplaceSystemMalloc(), because it is sometimes called before if (!asan_inited) __asan_init();
// __CFInitialize(), when the default allocator is invalid and replacing it may GET_STACK_TRACE_MALLOC;
// crash the program. Instead we wait for the allocator to initialize and jump return asan_realloc(ptr, size, &stack);
// in just after __CFInitialize(). Nobody is going to allocate memory using }
// CFAllocators before that, so we won't miss anything.
// INTERCEPTOR(void *, calloc, size_t nmemb, size_t size) {
// See http://code.google.com/p/address-sanitizer/issues/detail?id=87 if (!asan_inited) __asan_init();
// and http://opensource.apple.com/source/CF/CF-550.43/CFRuntime.c GET_STACK_TRACE_MALLOC;
INTERCEPTOR(void, __CFInitialize, void) { return asan_calloc(nmemb, size, &stack);
// If the runtime is built as dynamic library, __CFInitialize wrapper may be }
// called before __asan_init.
#if !MAC_INTERPOSE_FUNCTIONS INTERCEPTOR(void *, valloc, size_t size) {
CHECK(flags()->replace_cfallocator); if (!asan_inited) __asan_init();
CHECK(asan_inited); GET_STACK_TRACE_MALLOC;
#endif return asan_memalign(GetPageSizeCached(), size, &stack, FROM_MALLOC);
REAL(__CFInitialize)(); }
if (!cf_asan && asan_inited) MaybeReplaceCFAllocator();
INTERCEPTOR(size_t, malloc_good_size, size_t size) {
if (!asan_inited) __asan_init();
return asan_zone.introspect->good_size(&asan_zone, size);
}
INTERCEPTOR(int, posix_memalign, void **memptr, size_t alignment, size_t size) {
if (!asan_inited) __asan_init();
CHECK(memptr);
GET_STACK_TRACE_MALLOC;
void *result = asan_memalign(alignment, size, &stack, FROM_MALLOC);
if (result) {
*memptr = result;
return 0;
}
return -1;
} }
namespace { namespace {
...@@ -132,15 +155,6 @@ void *mz_malloc(malloc_zone_t *zone, size_t size) { ...@@ -132,15 +155,6 @@ void *mz_malloc(malloc_zone_t *zone, size_t size) {
return asan_malloc(size, &stack); return asan_malloc(size, &stack);
} }
void *cf_malloc(CFIndex size, CFOptionFlags hint, void *info) {
if (!asan_inited) {
CHECK(system_malloc_zone);
return malloc_zone_malloc(system_malloc_zone, size);
}
GET_STACK_TRACE_MALLOC;
return asan_malloc(size, &stack);
}
void *mz_calloc(malloc_zone_t *zone, size_t nmemb, size_t size) { void *mz_calloc(malloc_zone_t *zone, size_t nmemb, size_t size) {
if (!asan_inited) { if (!asan_inited) {
// Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym. // Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
...@@ -172,31 +186,14 @@ void *mz_valloc(malloc_zone_t *zone, size_t size) { ...@@ -172,31 +186,14 @@ void *mz_valloc(malloc_zone_t *zone, size_t size) {
void ALWAYS_INLINE free_common(void *context, void *ptr) { void ALWAYS_INLINE free_common(void *context, void *ptr) {
if (!ptr) return; if (!ptr) return;
if (asan_mz_size(ptr)) { GET_STACK_TRACE_FREE;
GET_STACK_TRACE_FREE; // FIXME: need to retire this flag.
if (!flags()->mac_ignore_invalid_free) {
asan_free(ptr, &stack, FROM_MALLOC); asan_free(ptr, &stack, FROM_MALLOC);
} else { } else {
// If the pointer does not belong to any of the zones, use one of the GET_ZONE_FOR_PTR(ptr);
// fallback methods to free memory. WarnMacFreeUnallocated((uptr)ptr, (uptr)zone_ptr, zone_name, &stack);
malloc_zone_t *zone_ptr = malloc_zone_from_ptr(ptr); return;
if (zone_ptr == system_purgeable_zone) {
// allocations from malloc_default_purgeable_zone() done before
// __asan_init() may be occasionally freed via free_common().
// see http://code.google.com/p/address-sanitizer/issues/detail?id=99.
malloc_zone_free(zone_ptr, ptr);
} else {
// If the memory chunk pointer was moved to store additional
// CFAllocatorRef, fix it back.
ptr = get_saved_cfallocator_ref(ptr);
GET_STACK_TRACE_FREE;
if (!flags()->mac_ignore_invalid_free) {
asan_free(ptr, &stack, FROM_MALLOC);
} else {
GET_ZONE_FOR_PTR(ptr);
WarnMacFreeUnallocated((uptr)ptr, (uptr)zone_ptr, zone_name, &stack);
return;
}
}
} }
} }
...@@ -205,10 +202,6 @@ void mz_free(malloc_zone_t *zone, void *ptr) { ...@@ -205,10 +202,6 @@ void mz_free(malloc_zone_t *zone, void *ptr) {
free_common(zone, ptr); free_common(zone, ptr);
} }
void cf_free(void *ptr, void *info) {
free_common(info, ptr);
}
void *mz_realloc(malloc_zone_t *zone, void *ptr, size_t size) { void *mz_realloc(malloc_zone_t *zone, void *ptr, size_t size) {
if (!ptr) { if (!ptr) {
GET_STACK_TRACE_MALLOC; GET_STACK_TRACE_MALLOC;
...@@ -228,29 +221,11 @@ void *mz_realloc(malloc_zone_t *zone, void *ptr, size_t size) { ...@@ -228,29 +221,11 @@ void *mz_realloc(malloc_zone_t *zone, void *ptr, size_t size) {
} }
} }
void *cf_realloc(void *ptr, CFIndex size, CFOptionFlags hint, void *info) {
if (!ptr) {
GET_STACK_TRACE_MALLOC;
return asan_malloc(size, &stack);
} else {
if (asan_mz_size(ptr)) {
GET_STACK_TRACE_MALLOC;
return asan_realloc(ptr, size, &stack);
} else {
// We can't recover from reallocating an unknown address, because
// this would require reading at most |size| bytes from
// potentially unaccessible memory.
GET_STACK_TRACE_FREE;
GET_ZONE_FOR_PTR(ptr);
ReportMacCfReallocUnknown((uptr)ptr, (uptr)zone_ptr, zone_name, &stack);
}
}
}
void mz_destroy(malloc_zone_t* zone) { void mz_destroy(malloc_zone_t* zone) {
// A no-op -- we will not be destroyed! // A no-op -- we will not be destroyed!
Printf("mz_destroy() called -- ignoring\n"); Report("mz_destroy() called -- ignoring\n");
} }
// from AvailabilityMacros.h // from AvailabilityMacros.h
#if defined(MAC_OS_X_VERSION_10_6) && \ #if defined(MAC_OS_X_VERSION_10_6) && \
MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6 MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6
...@@ -322,23 +297,7 @@ boolean_t mi_zone_locked(malloc_zone_t *zone) { ...@@ -322,23 +297,7 @@ boolean_t mi_zone_locked(malloc_zone_t *zone) {
} // unnamed namespace } // unnamed namespace
extern int __CFRuntimeClassTableSize;
namespace __asan { namespace __asan {
void MaybeReplaceCFAllocator() {
static CFAllocatorContext asan_context = {
/*version*/ 0, /*info*/ &asan_zone,
/*retain*/ 0, /*release*/ 0,
/*copyDescription*/0,
/*allocate*/ &cf_malloc,
/*reallocate*/ &cf_realloc,
/*deallocate*/ &cf_free,
/*preferredSize*/ 0 };
if (!cf_asan)
cf_asan = CFAllocatorCreate(kCFAllocatorUseContext, &asan_context);
if (flags()->replace_cfallocator && CFAllocatorGetDefault() != cf_asan)
CFAllocatorSetDefault(cf_asan);
}
void ReplaceSystemMalloc() { void ReplaceSystemMalloc() {
static malloc_introspection_t asan_introspection; static malloc_introspection_t asan_introspection;
...@@ -378,41 +337,10 @@ void ReplaceSystemMalloc() { ...@@ -378,41 +337,10 @@ void ReplaceSystemMalloc() {
asan_zone.free_definite_size = 0; asan_zone.free_definite_size = 0;
asan_zone.memalign = &mz_memalign; asan_zone.memalign = &mz_memalign;
asan_introspection.zone_locked = &mi_zone_locked; asan_introspection.zone_locked = &mi_zone_locked;
// Request the default purgable zone to force its creation. The
// current default zone is registered with the purgable zone for
// doing tiny and small allocs. Sadly, it assumes that the default
// zone is the szone implementation from OS X and will crash if it
// isn't. By creating the zone now, this will be true and changing
// the default zone won't cause a problem. (OS X 10.6 and higher.)
system_purgeable_zone = malloc_default_purgeable_zone();
#endif #endif
// Register the ASan zone. At this point, it will not be the // Register the ASan zone.
// default zone.
malloc_zone_register(&asan_zone); malloc_zone_register(&asan_zone);
// Unregister and reregister the default zone. Unregistering swaps
// the specified zone with the last one registered which for the
// default zone makes the more recently registered zone the default
// zone. The default zone is then re-registered to ensure that
// allocations made from it earlier will be handled correctly.
// Things are not guaranteed to work that way, but it's how they work now.
system_malloc_zone = malloc_default_zone();
malloc_zone_unregister(system_malloc_zone);
malloc_zone_register(system_malloc_zone);
// Make sure the default allocator was replaced.
CHECK(malloc_default_zone() == &asan_zone);
// If __CFInitialize() hasn't been called yet, cf_asan will be created and
// installed as the default allocator after __CFInitialize() finishes (see
// the interceptor for __CFInitialize() above). Otherwise install cf_asan
// right now. On both Snow Leopard and Lion __CFInitialize() calls
// __CFAllocatorInitialize(), which initializes the _base._cfisa field of
// the default allocators we check here.
if (((CFRuntimeBase*)kCFAllocatorSystemDefault)->_cfisa) {
MaybeReplaceCFAllocator();
}
} }
} // namespace __asan } // namespace __asan
......
...@@ -109,6 +109,10 @@ static inline bool AddrIsInShadow(uptr a) { ...@@ -109,6 +109,10 @@ static inline bool AddrIsInShadow(uptr a) {
} }
static inline bool AddrIsInShadowGap(uptr a) { static inline bool AddrIsInShadowGap(uptr a) {
// In zero-based shadow mode we treat addresses near zero as addresses
// in shadow gap as well.
if (SHADOW_OFFSET == 0)
return a <= kShadowGapEnd;
return a >= kShadowGapBeg && a <= kShadowGapEnd; return a >= kShadowGapBeg && a <= kShadowGapEnd;
} }
......
...@@ -25,8 +25,9 @@ void ReplaceOperatorsNewAndDelete() { } ...@@ -25,8 +25,9 @@ void ReplaceOperatorsNewAndDelete() { }
using namespace __asan; // NOLINT using namespace __asan; // NOLINT
// On Android new() goes through malloc interceptors. // On Mac and Android new() goes through malloc interceptors.
#if !ASAN_ANDROID // See also https://code.google.com/p/address-sanitizer/issues/detail?id=131.
#if !ASAN_ANDROID && !ASAN_MAC
// Fake std::nothrow_t to avoid including <new>. // Fake std::nothrow_t to avoid including <new>.
namespace std { namespace std {
......
...@@ -23,7 +23,7 @@ void PoisonShadow(uptr addr, uptr size, u8 value) { ...@@ -23,7 +23,7 @@ void PoisonShadow(uptr addr, uptr size, u8 value) {
CHECK(AddrIsAlignedByGranularity(addr)); CHECK(AddrIsAlignedByGranularity(addr));
CHECK(AddrIsAlignedByGranularity(addr + size)); CHECK(AddrIsAlignedByGranularity(addr + size));
uptr shadow_beg = MemToShadow(addr); uptr shadow_beg = MemToShadow(addr);
uptr shadow_end = MemToShadow(addr + size); uptr shadow_end = MemToShadow(addr + size - SHADOW_GRANULARITY) + 1;
CHECK(REAL(memset) != 0); CHECK(REAL(memset) != 0);
REAL(memset)((void*)shadow_beg, value, shadow_end - shadow_beg); REAL(memset)((void*)shadow_beg, value, shadow_end - shadow_beg);
} }
......
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
#include "asan_allocator.h" #include "asan_allocator.h"
#include "asan_interceptors.h" #include "asan_interceptors.h"
#include "asan_internal.h" #include "asan_internal.h"
#include "asan_lock.h"
#include "asan_mapping.h" #include "asan_mapping.h"
#include "asan_report.h" #include "asan_report.h"
#include "asan_stack.h" #include "asan_stack.h"
...@@ -140,10 +139,12 @@ void InitializeFlags(Flags *f, const char *env) { ...@@ -140,10 +139,12 @@ void InitializeFlags(Flags *f, const char *env) {
f->allow_reexec = true; f->allow_reexec = true;
f->print_full_thread_history = true; f->print_full_thread_history = true;
f->log_path = 0; f->log_path = 0;
f->fast_unwind_on_fatal = true; f->fast_unwind_on_fatal = false;
f->fast_unwind_on_malloc = true; f->fast_unwind_on_malloc = true;
f->poison_heap = true; f->poison_heap = true;
f->alloc_dealloc_mismatch = true; // Turn off alloc/dealloc mismatch checker on Mac for now.
// TODO(glider): Fix known issues and enable this back.
f->alloc_dealloc_mismatch = (ASAN_MAC == 0);
f->use_stack_depot = true; // Only affects allocator2. f->use_stack_depot = true; // Only affects allocator2.
// Override from user-specified string. // Override from user-specified string.
...@@ -228,7 +229,6 @@ static NOINLINE void force_interface_symbols() { ...@@ -228,7 +229,6 @@ static NOINLINE void force_interface_symbols() {
case 8: __asan_report_store4(0); break; case 8: __asan_report_store4(0); break;
case 9: __asan_report_store8(0); break; case 9: __asan_report_store8(0); break;
case 10: __asan_report_store16(0); break; case 10: __asan_report_store16(0); break;
case 11: __asan_register_global(0, 0, 0); break;
case 12: __asan_register_globals(0, 0); break; case 12: __asan_register_globals(0, 0); break;
case 13: __asan_unregister_globals(0, 0); break; case 13: __asan_unregister_globals(0, 0); break;
case 14: __asan_set_death_callback(0); break; case 14: __asan_set_death_callback(0); break;
......
...@@ -11,7 +11,6 @@ ...@@ -11,7 +11,6 @@
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#include "asan_interceptors.h" #include "asan_interceptors.h"
#include "asan_internal.h" #include "asan_internal.h"
#include "asan_lock.h"
#include "asan_stats.h" #include "asan_stats.h"
#include "asan_thread_registry.h" #include "asan_thread_registry.h"
#include "sanitizer/asan_interface.h" #include "sanitizer/asan_interface.h"
...@@ -53,13 +52,13 @@ void AsanStats::Print() { ...@@ -53,13 +52,13 @@ void AsanStats::Print() {
malloc_large, malloc_small_slow); malloc_large, malloc_small_slow);
} }
static AsanLock print_lock(LINKER_INITIALIZED); static BlockingMutex print_lock(LINKER_INITIALIZED);
static void PrintAccumulatedStats() { static void PrintAccumulatedStats() {
AsanStats stats; AsanStats stats;
asanThreadRegistry().GetAccumulatedStats(&stats); asanThreadRegistry().GetAccumulatedStats(&stats);
// Use lock to keep reports from mixing up. // Use lock to keep reports from mixing up.
ScopedLock lock(&print_lock); BlockingMutexLock lock(&print_lock);
stats.Print(); stats.Print();
StackDepotStats *stack_depot_stats = StackDepotGetStats(); StackDepotStats *stack_depot_stats = StackDepotGetStats();
Printf("Stats: StackDepot: %zd ids; %zdM mapped\n", Printf("Stats: StackDepot: %zd ids; %zdM mapped\n",
......
...@@ -72,7 +72,7 @@ void AsanThread::Destroy() { ...@@ -72,7 +72,7 @@ void AsanThread::Destroy() {
void AsanThread::Init() { void AsanThread::Init() {
SetThreadStackTopAndBottom(); SetThreadStackTopAndBottom();
CHECK(AddrIsInMem(stack_bottom_)); CHECK(AddrIsInMem(stack_bottom_));
CHECK(AddrIsInMem(stack_top_)); CHECK(AddrIsInMem(stack_top_ - 1));
ClearShadowForThreadStack(); ClearShadowForThreadStack();
if (flags()->verbosity >= 1) { if (flags()->verbosity >= 1) {
int local = 0; int local = 0;
......
...@@ -42,7 +42,7 @@ void AsanThreadRegistry::Init() { ...@@ -42,7 +42,7 @@ void AsanThreadRegistry::Init() {
} }
void AsanThreadRegistry::RegisterThread(AsanThread *thread) { void AsanThreadRegistry::RegisterThread(AsanThread *thread) {
ScopedLock lock(&mu_); BlockingMutexLock lock(&mu_);
u32 tid = n_threads_; u32 tid = n_threads_;
n_threads_++; n_threads_++;
CHECK(n_threads_ < kMaxNumberOfThreads); CHECK(n_threads_ < kMaxNumberOfThreads);
...@@ -54,7 +54,7 @@ void AsanThreadRegistry::RegisterThread(AsanThread *thread) { ...@@ -54,7 +54,7 @@ void AsanThreadRegistry::RegisterThread(AsanThread *thread) {
} }
void AsanThreadRegistry::UnregisterThread(AsanThread *thread) { void AsanThreadRegistry::UnregisterThread(AsanThread *thread) {
ScopedLock lock(&mu_); BlockingMutexLock lock(&mu_);
FlushToAccumulatedStatsUnlocked(&thread->stats()); FlushToAccumulatedStatsUnlocked(&thread->stats());
AsanThreadSummary *summary = thread->summary(); AsanThreadSummary *summary = thread->summary();
CHECK(summary); CHECK(summary);
...@@ -103,13 +103,13 @@ AsanStats &AsanThreadRegistry::GetCurrentThreadStats() { ...@@ -103,13 +103,13 @@ AsanStats &AsanThreadRegistry::GetCurrentThreadStats() {
} }
void AsanThreadRegistry::GetAccumulatedStats(AsanStats *stats) { void AsanThreadRegistry::GetAccumulatedStats(AsanStats *stats) {
ScopedLock lock(&mu_); BlockingMutexLock lock(&mu_);
UpdateAccumulatedStatsUnlocked(); UpdateAccumulatedStatsUnlocked();
internal_memcpy(stats, &accumulated_stats_, sizeof(accumulated_stats_)); internal_memcpy(stats, &accumulated_stats_, sizeof(accumulated_stats_));
} }
uptr AsanThreadRegistry::GetCurrentAllocatedBytes() { uptr AsanThreadRegistry::GetCurrentAllocatedBytes() {
ScopedLock lock(&mu_); BlockingMutexLock lock(&mu_);
UpdateAccumulatedStatsUnlocked(); UpdateAccumulatedStatsUnlocked();
uptr malloced = accumulated_stats_.malloced; uptr malloced = accumulated_stats_.malloced;
uptr freed = accumulated_stats_.freed; uptr freed = accumulated_stats_.freed;
...@@ -119,13 +119,13 @@ uptr AsanThreadRegistry::GetCurrentAllocatedBytes() { ...@@ -119,13 +119,13 @@ uptr AsanThreadRegistry::GetCurrentAllocatedBytes() {
} }
uptr AsanThreadRegistry::GetHeapSize() { uptr AsanThreadRegistry::GetHeapSize() {
ScopedLock lock(&mu_); BlockingMutexLock lock(&mu_);
UpdateAccumulatedStatsUnlocked(); UpdateAccumulatedStatsUnlocked();
return accumulated_stats_.mmaped - accumulated_stats_.munmaped; return accumulated_stats_.mmaped - accumulated_stats_.munmaped;
} }
uptr AsanThreadRegistry::GetFreeBytes() { uptr AsanThreadRegistry::GetFreeBytes() {
ScopedLock lock(&mu_); BlockingMutexLock lock(&mu_);
UpdateAccumulatedStatsUnlocked(); UpdateAccumulatedStatsUnlocked();
uptr total_free = accumulated_stats_.mmaped uptr total_free = accumulated_stats_.mmaped
- accumulated_stats_.munmaped - accumulated_stats_.munmaped
...@@ -141,7 +141,7 @@ uptr AsanThreadRegistry::GetFreeBytes() { ...@@ -141,7 +141,7 @@ uptr AsanThreadRegistry::GetFreeBytes() {
// Return several stats counters with a single call to // Return several stats counters with a single call to
// UpdateAccumulatedStatsUnlocked(). // UpdateAccumulatedStatsUnlocked().
void AsanThreadRegistry::FillMallocStatistics(AsanMallocStats *malloc_stats) { void AsanThreadRegistry::FillMallocStatistics(AsanMallocStats *malloc_stats) {
ScopedLock lock(&mu_); BlockingMutexLock lock(&mu_);
UpdateAccumulatedStatsUnlocked(); UpdateAccumulatedStatsUnlocked();
malloc_stats->blocks_in_use = accumulated_stats_.mallocs; malloc_stats->blocks_in_use = accumulated_stats_.mallocs;
malloc_stats->size_in_use = accumulated_stats_.malloced; malloc_stats->size_in_use = accumulated_stats_.malloced;
...@@ -156,7 +156,7 @@ AsanThreadSummary *AsanThreadRegistry::FindByTid(u32 tid) { ...@@ -156,7 +156,7 @@ AsanThreadSummary *AsanThreadRegistry::FindByTid(u32 tid) {
} }
AsanThread *AsanThreadRegistry::FindThreadByStackAddress(uptr addr) { AsanThread *AsanThreadRegistry::FindThreadByStackAddress(uptr addr) {
ScopedLock lock(&mu_); BlockingMutexLock lock(&mu_);
for (u32 tid = 0; tid < n_threads_; tid++) { for (u32 tid = 0; tid < n_threads_; tid++) {
AsanThread *t = thread_summaries_[tid]->thread(); AsanThread *t = thread_summaries_[tid]->thread();
if (!t || !(t->fake_stack().StackSize())) continue; if (!t || !(t->fake_stack().StackSize())) continue;
......
...@@ -13,10 +13,10 @@ ...@@ -13,10 +13,10 @@
#ifndef ASAN_THREAD_REGISTRY_H #ifndef ASAN_THREAD_REGISTRY_H
#define ASAN_THREAD_REGISTRY_H #define ASAN_THREAD_REGISTRY_H
#include "asan_lock.h"
#include "asan_stack.h" #include "asan_stack.h"
#include "asan_stats.h" #include "asan_stats.h"
#include "asan_thread.h" #include "asan_thread.h"
#include "sanitizer_common/sanitizer_mutex.h"
namespace __asan { namespace __asan {
...@@ -71,7 +71,7 @@ class AsanThreadRegistry { ...@@ -71,7 +71,7 @@ class AsanThreadRegistry {
// per-thread AsanStats. // per-thread AsanStats.
uptr max_malloced_memory_; uptr max_malloced_memory_;
u32 n_threads_; u32 n_threads_;
AsanLock mu_; BlockingMutex mu_;
bool inited_; bool inited_;
}; };
......
...@@ -15,18 +15,16 @@ ...@@ -15,18 +15,16 @@
#include <dbghelp.h> #include <dbghelp.h>
#include <stdlib.h> #include <stdlib.h>
#include <new> // FIXME: temporarily needed for placement new in AsanLock.
#include "asan_interceptors.h" #include "asan_interceptors.h"
#include "asan_internal.h" #include "asan_internal.h"
#include "asan_lock.h"
#include "asan_thread.h" #include "asan_thread.h"
#include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_mutex.h"
namespace __asan { namespace __asan {
// ---------------------- Stacktraces, symbols, etc. ---------------- {{{1 // ---------------------- Stacktraces, symbols, etc. ---------------- {{{1
static AsanLock dbghelp_lock(LINKER_INITIALIZED); static BlockingMutex dbghelp_lock(LINKER_INITIALIZED);
static bool dbghelp_initialized = false; static bool dbghelp_initialized = false;
#pragma comment(lib, "dbghelp.lib") #pragma comment(lib, "dbghelp.lib")
...@@ -54,42 +52,6 @@ void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp, bool fast) { ...@@ -54,42 +52,6 @@ void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp, bool fast) {
stack->trace[i] = (uptr)tmp[i + offset]; stack->trace[i] = (uptr)tmp[i + offset];
} }
// ---------------------- AsanLock ---------------- {{{1
enum LockState {
LOCK_UNINITIALIZED = 0,
LOCK_READY = -1,
};
AsanLock::AsanLock(LinkerInitialized li) {
// FIXME: see comments in AsanLock::Lock() for the details.
CHECK(li == LINKER_INITIALIZED || owner_ == LOCK_UNINITIALIZED);
CHECK(sizeof(CRITICAL_SECTION) <= sizeof(opaque_storage_));
InitializeCriticalSection((LPCRITICAL_SECTION)opaque_storage_);
owner_ = LOCK_READY;
}
void AsanLock::Lock() {
if (owner_ == LOCK_UNINITIALIZED) {
// FIXME: hm, global AsanLock objects are not initialized?!?
// This might be a side effect of the clang+cl+link Frankenbuild...
new(this) AsanLock((LinkerInitialized)(LINKER_INITIALIZED + 1));
// FIXME: If it turns out the linker doesn't invoke our
// constructors, we should probably manually Lock/Unlock all the global
// locks while we're starting in one thread to avoid double-init races.
}
EnterCriticalSection((LPCRITICAL_SECTION)opaque_storage_);
CHECK(owner_ == LOCK_READY);
owner_ = GetThreadSelf();
}
void AsanLock::Unlock() {
CHECK(owner_ == GetThreadSelf());
owner_ = LOCK_READY;
LeaveCriticalSection((LPCRITICAL_SECTION)opaque_storage_);
}
// ---------------------- TSD ---------------- {{{1 // ---------------------- TSD ---------------- {{{1
static bool tsd_key_inited = false; static bool tsd_key_inited = false;
...@@ -138,7 +100,7 @@ void AsanPlatformThreadInit() { ...@@ -138,7 +100,7 @@ void AsanPlatformThreadInit() {
// Nothing here for now. // Nothing here for now.
} }
void ClearShadowMemoryForContext(void *context) { void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
UNIMPLEMENTED(); UNIMPLEMENTED();
} }
...@@ -150,7 +112,7 @@ using namespace __asan; // NOLINT ...@@ -150,7 +112,7 @@ using namespace __asan; // NOLINT
extern "C" { extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE NOINLINE SANITIZER_INTERFACE_ATTRIBUTE NOINLINE
bool __asan_symbolize(const void *addr, char *out_buffer, int buffer_size) { bool __asan_symbolize(const void *addr, char *out_buffer, int buffer_size) {
ScopedLock lock(&dbghelp_lock); BlockingMutexLock lock(&dbghelp_lock);
if (!dbghelp_initialized) { if (!dbghelp_initialized) {
SymSetOptions(SYMOPT_DEFERRED_LOADS | SymSetOptions(SYMOPT_DEFERRED_LOADS |
SYMOPT_UNDNAME | SYMOPT_UNDNAME |
......
...@@ -99,9 +99,19 @@ const interpose_substitution substitutions[] ...@@ -99,9 +99,19 @@ const interpose_substitution substitutions[]
INTERPOSE_FUNCTION(signal), INTERPOSE_FUNCTION(signal),
INTERPOSE_FUNCTION(sigaction), INTERPOSE_FUNCTION(sigaction),
INTERPOSE_FUNCTION(__CFInitialize), INTERPOSE_FUNCTION(malloc_create_zone),
INTERPOSE_FUNCTION(CFStringCreateCopy), INTERPOSE_FUNCTION(malloc_default_zone),
INTERPOSE_FUNCTION(malloc_default_purgeable_zone),
INTERPOSE_FUNCTION(malloc_make_purgeable),
INTERPOSE_FUNCTION(malloc_make_nonpurgeable),
INTERPOSE_FUNCTION(malloc_set_zone_name),
INTERPOSE_FUNCTION(malloc),
INTERPOSE_FUNCTION(free), INTERPOSE_FUNCTION(free),
INTERPOSE_FUNCTION(realloc),
INTERPOSE_FUNCTION(calloc),
INTERPOSE_FUNCTION(valloc),
INTERPOSE_FUNCTION(malloc_good_size),
INTERPOSE_FUNCTION(posix_memalign),
}; };
} // namespace __asan } // namespace __asan
......
...@@ -26,11 +26,6 @@ extern "C" { ...@@ -26,11 +26,6 @@ extern "C" {
// before any instrumented code is executed and before any call to malloc. // before any instrumented code is executed and before any call to malloc.
void __asan_init() SANITIZER_INTERFACE_ATTRIBUTE; void __asan_init() SANITIZER_INTERFACE_ATTRIBUTE;
// This function should be called by the instrumented code.
// 'addr' is the address of a global variable called 'name' of 'size' bytes.
void __asan_register_global(uptr addr, uptr size, const char *name)
SANITIZER_INTERFACE_ATTRIBUTE;
// This structure describes an instrumented global variable. // This structure describes an instrumented global variable.
struct __asan_global { struct __asan_global {
uptr beg; // The address of the global. uptr beg; // The address of the global.
......
...@@ -23,6 +23,8 @@ ...@@ -23,6 +23,8 @@
// the standard system types (e.g. SSIZE_T instead of ssize_t) // the standard system types (e.g. SSIZE_T instead of ssize_t)
typedef __sanitizer::uptr SIZE_T; typedef __sanitizer::uptr SIZE_T;
typedef __sanitizer::sptr SSIZE_T; typedef __sanitizer::sptr SSIZE_T;
typedef __sanitizer::sptr PTRDIFF_T;
typedef __sanitizer::s64 INTMAX_T;
typedef __sanitizer::u64 OFF_T; typedef __sanitizer::u64 OFF_T;
typedef __sanitizer::u64 OFF64_T; typedef __sanitizer::u64 OFF64_T;
......
...@@ -16,7 +16,7 @@ get_current_rev() { ...@@ -16,7 +16,7 @@ get_current_rev() {
} }
list_files() { list_files() {
(cd $1; ls *.{cc,h} 2> /dev/null) (cd $1; ls *.{cc,h,inc} 2> /dev/null)
} }
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include "sanitizer_libc.h" #include "sanitizer_libc.h"
#include "sanitizer_list.h" #include "sanitizer_list.h"
#include "sanitizer_mutex.h" #include "sanitizer_mutex.h"
#include "sanitizer_lfstack.h"
namespace __sanitizer { namespace __sanitizer {
...@@ -62,7 +63,8 @@ namespace __sanitizer { ...@@ -62,7 +63,8 @@ namespace __sanitizer {
// c32 => s: 512 diff: +32 06% l 9 cached: 64 32768; id 32 // c32 => s: 512 diff: +32 06% l 9 cached: 64 32768; id 32
template <uptr kMaxSizeLog, uptr kMaxNumCached, uptr kMaxBytesCachedLog> template <uptr kMaxSizeLog, uptr kMaxNumCachedT, uptr kMaxBytesCachedLog,
uptr kMinBatchClassT>
class SizeClassMap { class SizeClassMap {
static const uptr kMinSizeLog = 3; static const uptr kMinSizeLog = 3;
static const uptr kMidSizeLog = kMinSizeLog + 4; static const uptr kMidSizeLog = kMinSizeLog + 4;
...@@ -73,6 +75,14 @@ class SizeClassMap { ...@@ -73,6 +75,14 @@ class SizeClassMap {
static const uptr M = (1 << S) - 1; static const uptr M = (1 << S) - 1;
public: public:
static const uptr kMaxNumCached = kMaxNumCachedT;
struct TransferBatch {
TransferBatch *next;
uptr count;
void *batch[kMaxNumCached];
};
static const uptr kMinBatchClass = kMinBatchClassT;
static const uptr kMaxSize = 1 << kMaxSizeLog; static const uptr kMaxSize = 1 << kMaxSizeLog;
static const uptr kNumClasses = static const uptr kNumClasses =
kMidClass + ((kMaxSizeLog - kMidSizeLog) << S) + 1; kMidClass + ((kMaxSizeLog - kMidSizeLog) << S) + 1;
...@@ -148,44 +158,25 @@ class SizeClassMap { ...@@ -148,44 +158,25 @@ class SizeClassMap {
if (c > 0) if (c > 0)
CHECK_LT(Size(c-1), s); CHECK_LT(Size(c-1), s);
} }
}
};
typedef SizeClassMap<15, 256, 16> DefaultSizeClassMap;
typedef SizeClassMap<15, 64, 14> CompactSizeClassMap;
// TransferBatch for kMinBatchClass must fit into the block itself.
struct AllocatorListNode { const uptr batch_size = sizeof(TransferBatch)
AllocatorListNode *next; - sizeof(void*) // NOLINT
* (kMaxNumCached - MaxCached(kMinBatchClass));
CHECK_LE(batch_size, Size(kMinBatchClass));
// TransferBatch for kMinBatchClass-1 must not fit into the block itself.
const uptr batch_size1 = sizeof(TransferBatch)
- sizeof(void*) // NOLINT
* (kMaxNumCached - MaxCached(kMinBatchClass - 1));
CHECK_GT(batch_size1, Size(kMinBatchClass - 1));
}
}; };
typedef IntrusiveList<AllocatorListNode> AllocatorFreeList; typedef SizeClassMap<17, 256, 16, FIRST_32_SECOND_64(33, 36)>
DefaultSizeClassMap;
// Move at most max_count chunks from allocate_from to allocate_to. typedef SizeClassMap<17, 64, 14, FIRST_32_SECOND_64(25, 28)>
// This function is better be a method of AllocatorFreeList, but we can't CompactSizeClassMap;
// inherit it from IntrusiveList as the ancient gcc complains about non-PODness. template<class SizeClassAllocator> struct SizeClassAllocatorLocalCache;
static inline uptr BulkMove(uptr max_count,
AllocatorFreeList *allocate_from,
AllocatorFreeList *allocate_to) {
CHECK(!allocate_from->empty());
CHECK(allocate_to->empty());
uptr res = 0;
if (allocate_from->size() <= max_count) {
res = allocate_from->size();
allocate_to->append_front(allocate_from);
CHECK(allocate_from->empty());
} else {
for (uptr i = 0; i < max_count; i++) {
AllocatorListNode *node = allocate_from->front();
allocate_from->pop_front();
allocate_to->push_front(node);
}
res = max_count;
CHECK(!allocate_from->empty());
}
CHECK(!allocate_to->empty());
return res;
}
// Allocators call these callbacks on mmap/munmap. // Allocators call these callbacks on mmap/munmap.
struct NoOpMapUnmapCallback { struct NoOpMapUnmapCallback {
...@@ -214,6 +205,11 @@ template <const uptr kSpaceBeg, const uptr kSpaceSize, ...@@ -214,6 +205,11 @@ template <const uptr kSpaceBeg, const uptr kSpaceSize,
class MapUnmapCallback = NoOpMapUnmapCallback> class MapUnmapCallback = NoOpMapUnmapCallback>
class SizeClassAllocator64 { class SizeClassAllocator64 {
public: public:
typedef typename SizeClassMap::TransferBatch Batch;
typedef SizeClassAllocator64<kSpaceBeg, kSpaceSize, kMetadataSize,
SizeClassMap, MapUnmapCallback> ThisT;
typedef SizeClassAllocatorLocalCache<ThisT> AllocatorCache;
void Init() { void Init() {
CHECK_EQ(kSpaceBeg, CHECK_EQ(kSpaceBeg,
reinterpret_cast<uptr>(Mprotect(kSpaceBeg, kSpaceSize))); reinterpret_cast<uptr>(Mprotect(kSpaceBeg, kSpaceSize)));
...@@ -235,36 +231,20 @@ class SizeClassAllocator64 { ...@@ -235,36 +231,20 @@ class SizeClassAllocator64 {
alignment <= SizeClassMap::kMaxSize; alignment <= SizeClassMap::kMaxSize;
} }
void *Allocate(uptr size, uptr alignment) { Batch *NOINLINE AllocateBatch(AllocatorCache *c, uptr class_id) {
if (size < alignment) size = alignment;
CHECK(CanAllocate(size, alignment));
return AllocateBySizeClass(ClassID(size));
}
void Deallocate(void *p) {
CHECK(PointerIsMine(p));
DeallocateBySizeClass(p, GetSizeClass(p));
}
// Allocate several chunks of the given class_id.
void BulkAllocate(uptr class_id, AllocatorFreeList *free_list) {
CHECK_LT(class_id, kNumClasses); CHECK_LT(class_id, kNumClasses);
RegionInfo *region = GetRegionInfo(class_id); RegionInfo *region = GetRegionInfo(class_id);
SpinMutexLock l(&region->mutex); Batch *b = region->free_list.Pop();
if (region->free_list.empty()) { if (b == 0)
PopulateFreeList(class_id, region); b = PopulateFreeList(c, class_id, region);
} region->n_allocated += b->count;
region->n_allocated += BulkMove(SizeClassMap::MaxCached(class_id), return b;
&region->free_list, free_list);
} }
// Swallow the entire free_list for the given class_id. void NOINLINE DeallocateBatch(uptr class_id, Batch *b) {
void BulkDeallocate(uptr class_id, AllocatorFreeList *free_list) {
CHECK_LT(class_id, kNumClasses);
RegionInfo *region = GetRegionInfo(class_id); RegionInfo *region = GetRegionInfo(class_id);
SpinMutexLock l(&region->mutex); region->free_list.Push(b);
region->n_freed += free_list->size(); region->n_freed += b->count;
region->free_list.append_front(free_list);
} }
static bool PointerIsMine(void *p) { static bool PointerIsMine(void *p) {
...@@ -352,15 +332,15 @@ class SizeClassAllocator64 { ...@@ -352,15 +332,15 @@ class SizeClassAllocator64 {
COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2))); COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2)));
// Populate the free list with at most this number of bytes at once // Populate the free list with at most this number of bytes at once
// or with one element if its size is greater. // or with one element if its size is greater.
static const uptr kPopulateSize = 1 << 15; static const uptr kPopulateSize = 1 << 14;
// Call mmap for user memory with at least this size. // Call mmap for user memory with at least this size.
static const uptr kUserMapSize = 1 << 15; static const uptr kUserMapSize = 1 << 15;
// Call mmap for metadata memory with at least this size. // Call mmap for metadata memory with at least this size.
static const uptr kMetaMapSize = 1 << 16; static const uptr kMetaMapSize = 1 << 16;
struct RegionInfo { struct RegionInfo {
SpinMutex mutex; BlockingMutex mutex;
AllocatorFreeList free_list; LFStack<Batch> free_list;
uptr allocated_user; // Bytes allocated for user memory. uptr allocated_user; // Bytes allocated for user memory.
uptr allocated_meta; // Bytes allocated for metadata. uptr allocated_meta; // Bytes allocated for metadata.
uptr mapped_user; // Bytes mapped for user memory. uptr mapped_user; // Bytes mapped for user memory.
...@@ -388,11 +368,16 @@ class SizeClassAllocator64 { ...@@ -388,11 +368,16 @@ class SizeClassAllocator64 {
return offset / (u32)size; return offset / (u32)size;
} }
void PopulateFreeList(uptr class_id, RegionInfo *region) { Batch *NOINLINE PopulateFreeList(AllocatorCache *c, uptr class_id,
CHECK(region->free_list.empty()); RegionInfo *region) {
BlockingMutexLock l(&region->mutex);
Batch *b = region->free_list.Pop();
if (b)
return b;
uptr size = SizeClassMap::Size(class_id); uptr size = SizeClassMap::Size(class_id);
uptr count = size < kPopulateSize ? SizeClassMap::MaxCached(class_id) : 1;
uptr beg_idx = region->allocated_user; uptr beg_idx = region->allocated_user;
uptr end_idx = beg_idx + kPopulateSize; uptr end_idx = beg_idx + count * size;
uptr region_beg = kSpaceBeg + kRegionSize * class_id; uptr region_beg = kSpaceBeg + kRegionSize * class_id;
if (end_idx + size > region->mapped_user) { if (end_idx + size > region->mapped_user) {
// Do the mmap for the user memory. // Do the mmap for the user memory.
...@@ -403,17 +388,9 @@ class SizeClassAllocator64 { ...@@ -403,17 +388,9 @@ class SizeClassAllocator64 {
MapWithCallback(region_beg + region->mapped_user, map_size); MapWithCallback(region_beg + region->mapped_user, map_size);
region->mapped_user += map_size; region->mapped_user += map_size;
} }
uptr idx = beg_idx; uptr total_count = (region->mapped_user - beg_idx - size)
uptr i = 0; / size / count * count;
do { // do-while loop because we need to put at least one item. region->allocated_meta += total_count * kMetadataSize;
uptr p = region_beg + idx;
region->free_list.push_front(reinterpret_cast<AllocatorListNode*>(p));
idx += size;
i++;
} while (idx < end_idx);
region->allocated_user += idx - beg_idx;
CHECK_LE(region->allocated_user, region->mapped_user);
region->allocated_meta += i * kMetadataSize;
if (region->allocated_meta > region->mapped_meta) { if (region->allocated_meta > region->mapped_meta) {
uptr map_size = kMetaMapSize; uptr map_size = kMetaMapSize;
while (region->allocated_meta > region->mapped_meta + map_size) while (region->allocated_meta > region->mapped_meta + map_size)
...@@ -431,27 +408,22 @@ class SizeClassAllocator64 { ...@@ -431,27 +408,22 @@ class SizeClassAllocator64 {
kRegionSize / 1024 / 1024, size); kRegionSize / 1024 / 1024, size);
Die(); Die();
} }
} for (;;) {
if (class_id < SizeClassMap::kMinBatchClass)
void *AllocateBySizeClass(uptr class_id) { b = (Batch*)c->Allocate(this, SizeClassMap::ClassID(sizeof(Batch)));
CHECK_LT(class_id, kNumClasses); else
RegionInfo *region = GetRegionInfo(class_id); b = (Batch*)(region_beg + beg_idx);
SpinMutexLock l(&region->mutex); b->count = count;
if (region->free_list.empty()) { for (uptr i = 0; i < count; i++)
PopulateFreeList(class_id, region); b->batch[i] = (void*)(region_beg + beg_idx + i * size);
region->allocated_user += count * size;
CHECK_LE(region->allocated_user, region->mapped_user);
beg_idx += count * size;
if (beg_idx + count * size + size > region->mapped_user)
break;
region->free_list.Push(b);
} }
CHECK(!region->free_list.empty()); return b;
AllocatorListNode *node = region->free_list.front();
region->free_list.pop_front();
region->n_allocated++;
return reinterpret_cast<void*>(node);
}
void DeallocateBySizeClass(void *p, uptr class_id) {
RegionInfo *region = GetRegionInfo(class_id);
SpinMutexLock l(&region->mutex);
region->free_list.push_front(reinterpret_cast<AllocatorListNode*>(p));
region->n_freed++;
} }
}; };
...@@ -480,6 +452,11 @@ template <const uptr kSpaceBeg, const u64 kSpaceSize, ...@@ -480,6 +452,11 @@ template <const uptr kSpaceBeg, const u64 kSpaceSize,
class MapUnmapCallback = NoOpMapUnmapCallback> class MapUnmapCallback = NoOpMapUnmapCallback>
class SizeClassAllocator32 { class SizeClassAllocator32 {
public: public:
typedef typename SizeClassMap::TransferBatch Batch;
typedef SizeClassAllocator32<kSpaceBeg, kSpaceSize, kMetadataSize,
SizeClassMap, MapUnmapCallback> ThisT;
typedef SizeClassAllocatorLocalCache<ThisT> AllocatorCache;
void Init() { void Init() {
state_ = reinterpret_cast<State *>(MapWithCallback(sizeof(State))); state_ = reinterpret_cast<State *>(MapWithCallback(sizeof(State)));
} }
...@@ -500,17 +477,6 @@ class SizeClassAllocator32 { ...@@ -500,17 +477,6 @@ class SizeClassAllocator32 {
alignment <= SizeClassMap::kMaxSize; alignment <= SizeClassMap::kMaxSize;
} }
void *Allocate(uptr size, uptr alignment) {
if (size < alignment) size = alignment;
CHECK(CanAllocate(size, alignment));
return AllocateBySizeClass(ClassID(size));
}
void Deallocate(void *p) {
CHECK(PointerIsMine(p));
DeallocateBySizeClass(p, GetSizeClass(p));
}
void *GetMetaData(void *p) { void *GetMetaData(void *p) {
CHECK(PointerIsMine(p)); CHECK(PointerIsMine(p));
uptr mem = reinterpret_cast<uptr>(p); uptr mem = reinterpret_cast<uptr>(p);
...@@ -522,20 +488,23 @@ class SizeClassAllocator32 { ...@@ -522,20 +488,23 @@ class SizeClassAllocator32 {
return reinterpret_cast<void*>(meta); return reinterpret_cast<void*>(meta);
} }
// Allocate several chunks of the given class_id. Batch *NOINLINE AllocateBatch(AllocatorCache *c, uptr class_id) {
void BulkAllocate(uptr class_id, AllocatorFreeList *free_list) { CHECK_LT(class_id, kNumClasses);
SizeClassInfo *sci = GetSizeClassInfo(class_id); SizeClassInfo *sci = GetSizeClassInfo(class_id);
SpinMutexLock l(&sci->mutex); SpinMutexLock l(&sci->mutex);
EnsureSizeClassHasAvailableChunks(sci, class_id); if (sci->free_list.empty())
PopulateFreeList(c, sci, class_id);
CHECK(!sci->free_list.empty()); CHECK(!sci->free_list.empty());
BulkMove(SizeClassMap::MaxCached(class_id), &sci->free_list, free_list); Batch *b = sci->free_list.front();
sci->free_list.pop_front();
return b;
} }
// Swallow the entire free_list for the given class_id. void NOINLINE DeallocateBatch(uptr class_id, Batch *b) {
void BulkDeallocate(uptr class_id, AllocatorFreeList *free_list) { CHECK_LT(class_id, kNumClasses);
SizeClassInfo *sci = GetSizeClassInfo(class_id); SizeClassInfo *sci = GetSizeClassInfo(class_id);
SpinMutexLock l(&sci->mutex); SpinMutexLock l(&sci->mutex);
sci->free_list.append_front(free_list); sci->free_list.push_front(b);
} }
bool PointerIsMine(void *p) { bool PointerIsMine(void *p) {
...@@ -593,8 +562,8 @@ class SizeClassAllocator32 { ...@@ -593,8 +562,8 @@ class SizeClassAllocator32 {
struct SizeClassInfo { struct SizeClassInfo {
SpinMutex mutex; SpinMutex mutex;
AllocatorFreeList free_list; IntrusiveList<Batch> free_list;
char padding[kCacheLineSize - sizeof(uptr) - sizeof(AllocatorFreeList)]; char padding[kCacheLineSize - sizeof(uptr) - sizeof(IntrusiveList<Batch>)];
}; };
COMPILER_CHECK(sizeof(SizeClassInfo) == kCacheLineSize); COMPILER_CHECK(sizeof(SizeClassInfo) == kCacheLineSize);
...@@ -624,31 +593,28 @@ class SizeClassAllocator32 { ...@@ -624,31 +593,28 @@ class SizeClassAllocator32 {
return &state_->size_class_info_array[class_id]; return &state_->size_class_info_array[class_id];
} }
void EnsureSizeClassHasAvailableChunks(SizeClassInfo *sci, uptr class_id) { void PopulateFreeList(AllocatorCache *c, SizeClassInfo *sci, uptr class_id) {
if (!sci->free_list.empty()) return;
uptr size = SizeClassMap::Size(class_id); uptr size = SizeClassMap::Size(class_id);
uptr reg = AllocateRegion(class_id); uptr reg = AllocateRegion(class_id);
uptr n_chunks = kRegionSize / (size + kMetadataSize); uptr n_chunks = kRegionSize / (size + kMetadataSize);
for (uptr i = reg; i < reg + n_chunks * size; i += size) uptr max_count = SizeClassMap::MaxCached(class_id);
sci->free_list.push_back(reinterpret_cast<AllocatorListNode*>(i)); Batch *b = 0;
} for (uptr i = reg; i < reg + n_chunks * size; i += size) {
if (b == 0) {
void *AllocateBySizeClass(uptr class_id) { if (class_id < SizeClassMap::kMinBatchClass)
CHECK_LT(class_id, kNumClasses); b = (Batch*)c->Allocate(this, SizeClassMap::ClassID(sizeof(Batch)));
SizeClassInfo *sci = GetSizeClassInfo(class_id); else
SpinMutexLock l(&sci->mutex); b = (Batch*)i;
EnsureSizeClassHasAvailableChunks(sci, class_id); b->count = 0;
CHECK(!sci->free_list.empty()); }
AllocatorListNode *node = sci->free_list.front(); b->batch[b->count++] = (void*)i;
sci->free_list.pop_front(); if (b->count == max_count) {
return reinterpret_cast<void*>(node); sci->free_list.push_back(b);
} b = 0;
}
void DeallocateBySizeClass(void *p, uptr class_id) { }
CHECK_LT(class_id, kNumClasses); if (b)
SizeClassInfo *sci = GetSizeClassInfo(class_id); sci->free_list.push_back(b);
SpinMutexLock l(&sci->mutex);
sci->free_list.push_front(reinterpret_cast<AllocatorListNode*>(p));
} }
struct State { struct State {
...@@ -658,13 +624,14 @@ class SizeClassAllocator32 { ...@@ -658,13 +624,14 @@ class SizeClassAllocator32 {
State *state_; State *state_;
}; };
// Objects of this type should be used as local caches for SizeClassAllocator64. // Objects of this type should be used as local caches for SizeClassAllocator64
// Since the typical use of this class is to have one object per thread in TLS, // or SizeClassAllocator32. Since the typical use of this class is to have one
// is has to be POD. // object per thread in TLS, is has to be POD.
template<class SizeClassAllocator> template<class SizeClassAllocator>
struct SizeClassAllocatorLocalCache { struct SizeClassAllocatorLocalCache {
typedef SizeClassAllocator Allocator; typedef SizeClassAllocator Allocator;
static const uptr kNumClasses = SizeClassAllocator::kNumClasses; static const uptr kNumClasses = SizeClassAllocator::kNumClasses;
// Don't need to call Init if the object is a global (i.e. zero-initialized). // Don't need to call Init if the object is a global (i.e. zero-initialized).
void Init() { void Init() {
internal_memset(this, 0, sizeof(*this)); internal_memset(this, 0, sizeof(*this));
...@@ -673,46 +640,77 @@ struct SizeClassAllocatorLocalCache { ...@@ -673,46 +640,77 @@ struct SizeClassAllocatorLocalCache {
void *Allocate(SizeClassAllocator *allocator, uptr class_id) { void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
CHECK_NE(class_id, 0UL); CHECK_NE(class_id, 0UL);
CHECK_LT(class_id, kNumClasses); CHECK_LT(class_id, kNumClasses);
AllocatorFreeList *free_list = &free_lists_[class_id]; PerClass *c = &per_class_[class_id];
if (free_list->empty()) if (UNLIKELY(c->count == 0))
allocator->BulkAllocate(class_id, free_list); Refill(allocator, class_id);
CHECK(!free_list->empty()); void *res = c->batch[--c->count];
void *res = free_list->front(); PREFETCH(c->batch[c->count - 1]);
free_list->pop_front();
return res; return res;
} }
void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) { void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
CHECK_NE(class_id, 0UL); CHECK_NE(class_id, 0UL);
CHECK_LT(class_id, kNumClasses); CHECK_LT(class_id, kNumClasses);
AllocatorFreeList *free_list = &free_lists_[class_id]; PerClass *c = &per_class_[class_id];
free_list->push_front(reinterpret_cast<AllocatorListNode*>(p)); if (UNLIKELY(c->count == c->max_count))
if (free_list->size() >= 2 * SizeClassMap::MaxCached(class_id)) Drain(allocator, class_id);
DrainHalf(allocator, class_id); c->batch[c->count++] = p;
} }
void Drain(SizeClassAllocator *allocator) { void Drain(SizeClassAllocator *allocator) {
for (uptr i = 0; i < kNumClasses; i++) { for (uptr class_id = 0; class_id < kNumClasses; class_id++) {
allocator->BulkDeallocate(i, &free_lists_[i]); PerClass *c = &per_class_[class_id];
CHECK(free_lists_[i].empty()); while (c->count > 0)
Drain(allocator, class_id);
} }
} }
// private: // private:
typedef typename SizeClassAllocator::SizeClassMapT SizeClassMap; typedef typename SizeClassAllocator::SizeClassMapT SizeClassMap;
AllocatorFreeList free_lists_[kNumClasses]; typedef typename SizeClassMap::TransferBatch Batch;
struct PerClass {
void DrainHalf(SizeClassAllocator *allocator, uptr class_id) { uptr count;
AllocatorFreeList *free_list = &free_lists_[class_id]; uptr max_count;
AllocatorFreeList half; void *batch[2 * SizeClassMap::kMaxNumCached];
half.clear(); };
const uptr count = free_list->size() / 2; PerClass per_class_[kNumClasses];
for (uptr i = 0; i < count; i++) {
AllocatorListNode *node = free_list->front(); void InitCache() {
free_list->pop_front(); if (per_class_[0].max_count)
half.push_front(node); return;
for (uptr i = 0; i < kNumClasses; i++) {
PerClass *c = &per_class_[i];
c->max_count = 2 * SizeClassMap::MaxCached(i);
}
}
void NOINLINE Refill(SizeClassAllocator *allocator, uptr class_id) {
InitCache();
PerClass *c = &per_class_[class_id];
Batch *b = allocator->AllocateBatch(this, class_id);
for (uptr i = 0; i < b->count; i++)
c->batch[i] = b->batch[i];
c->count = b->count;
if (class_id < SizeClassMap::kMinBatchClass)
Deallocate(allocator, SizeClassMap::ClassID(sizeof(Batch)), b);
}
void NOINLINE Drain(SizeClassAllocator *allocator, uptr class_id) {
InitCache();
PerClass *c = &per_class_[class_id];
Batch *b;
if (class_id < SizeClassMap::kMinBatchClass)
b = (Batch*)Allocate(allocator, SizeClassMap::ClassID(sizeof(Batch)));
else
b = (Batch*)c->batch[0];
uptr cnt = Min(c->max_count / 2, c->count);
for (uptr i = 0; i < cnt; i++) {
b->batch[i] = c->batch[i];
c->batch[i] = c->batch[i + c->max_count / 2];
} }
allocator->BulkDeallocate(class_id, &half); b->count = cnt;
c->count -= cnt;
allocator->DeallocateBatch(class_id, b);
} }
}; };
...@@ -726,6 +724,7 @@ class LargeMmapAllocator { ...@@ -726,6 +724,7 @@ class LargeMmapAllocator {
internal_memset(this, 0, sizeof(*this)); internal_memset(this, 0, sizeof(*this));
page_size_ = GetPageSizeCached(); page_size_ = GetPageSizeCached();
} }
void *Allocate(uptr size, uptr alignment) { void *Allocate(uptr size, uptr alignment) {
CHECK(IsPowerOfTwo(alignment)); CHECK(IsPowerOfTwo(alignment));
uptr map_size = RoundUpMapSize(size); uptr map_size = RoundUpMapSize(size);
...@@ -745,6 +744,8 @@ class LargeMmapAllocator { ...@@ -745,6 +744,8 @@ class LargeMmapAllocator {
h->size = size; h->size = size;
h->map_beg = map_beg; h->map_beg = map_beg;
h->map_size = map_size; h->map_size = map_size;
uptr size_log = SANITIZER_WORDSIZE - __builtin_clzl(map_size) - 1;
CHECK_LT(size_log, ARRAY_SIZE(stats.by_size_log));
{ {
SpinMutexLock l(&mutex_); SpinMutexLock l(&mutex_);
uptr idx = n_chunks_++; uptr idx = n_chunks_++;
...@@ -754,6 +755,7 @@ class LargeMmapAllocator { ...@@ -754,6 +755,7 @@ class LargeMmapAllocator {
stats.n_allocs++; stats.n_allocs++;
stats.currently_allocated += map_size; stats.currently_allocated += map_size;
stats.max_allocated = Max(stats.max_allocated, stats.currently_allocated); stats.max_allocated = Max(stats.max_allocated, stats.currently_allocated);
stats.by_size_log[size_log]++;
} }
return reinterpret_cast<void*>(res); return reinterpret_cast<void*>(res);
} }
...@@ -825,9 +827,15 @@ class LargeMmapAllocator { ...@@ -825,9 +827,15 @@ class LargeMmapAllocator {
void PrintStats() { void PrintStats() {
Printf("Stats: LargeMmapAllocator: allocated %zd times, " Printf("Stats: LargeMmapAllocator: allocated %zd times, "
"remains %zd (%zd K) max %zd M\n", "remains %zd (%zd K) max %zd M; by size logs: ",
stats.n_allocs, stats.n_allocs - stats.n_frees, stats.n_allocs, stats.n_allocs - stats.n_frees,
stats.currently_allocated >> 10, stats.max_allocated >> 20); stats.currently_allocated >> 10, stats.max_allocated >> 20);
for (uptr i = 0; i < ARRAY_SIZE(stats.by_size_log); i++) {
uptr c = stats.by_size_log[i];
if (!c) continue;
Printf("%zd:%zd; ", i, c);
}
Printf("\n");
} }
private: private:
...@@ -858,7 +866,7 @@ class LargeMmapAllocator { ...@@ -858,7 +866,7 @@ class LargeMmapAllocator {
Header *chunks_[kMaxNumChunks]; Header *chunks_[kMaxNumChunks];
uptr n_chunks_; uptr n_chunks_;
struct Stats { struct Stats {
uptr n_allocs, n_frees, currently_allocated, max_allocated; uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];
} stats; } stats;
SpinMutex mutex_; SpinMutex mutex_;
}; };
...@@ -888,14 +896,10 @@ class CombinedAllocator { ...@@ -888,14 +896,10 @@ class CombinedAllocator {
if (alignment > 8) if (alignment > 8)
size = RoundUpTo(size, alignment); size = RoundUpTo(size, alignment);
void *res; void *res;
if (primary_.CanAllocate(size, alignment)) { if (primary_.CanAllocate(size, alignment))
if (cache) // Allocate from cache. res = cache->Allocate(&primary_, primary_.ClassID(size));
res = cache->Allocate(&primary_, primary_.ClassID(size)); else
else // No thread-local cache, allocate directly from primary allocator.
res = primary_.Allocate(size, alignment);
} else { // Secondary allocator does not use cache.
res = secondary_.Allocate(size, alignment); res = secondary_.Allocate(size, alignment);
}
if (alignment > 8) if (alignment > 8)
CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0); CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
if (cleared && res) if (cleared && res)
......
...@@ -39,6 +39,7 @@ INLINE typename T::Type atomic_load( ...@@ -39,6 +39,7 @@ INLINE typename T::Type atomic_load(
| memory_order_acquire | memory_order_seq_cst)); | memory_order_acquire | memory_order_seq_cst));
DCHECK(!((uptr)a % sizeof(*a))); DCHECK(!((uptr)a % sizeof(*a)));
typename T::Type v; typename T::Type v;
// FIXME(dvyukov): 64-bit load is not atomic on 32-bits.
if (mo == memory_order_relaxed) { if (mo == memory_order_relaxed) {
v = a->val_dont_use; v = a->val_dont_use;
} else { } else {
...@@ -54,6 +55,7 @@ INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) { ...@@ -54,6 +55,7 @@ INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_release DCHECK(mo & (memory_order_relaxed | memory_order_release
| memory_order_seq_cst)); | memory_order_seq_cst));
DCHECK(!((uptr)a % sizeof(*a))); DCHECK(!((uptr)a % sizeof(*a)));
// FIXME(dvyukov): 64-bit store is not atomic on 32-bits.
if (mo == memory_order_relaxed) { if (mo == memory_order_relaxed) {
a->val_dont_use = v; a->val_dont_use = v;
} else { } else {
......
...@@ -70,6 +70,7 @@ INLINE typename T::Type atomic_load( ...@@ -70,6 +70,7 @@ INLINE typename T::Type atomic_load(
| memory_order_acquire | memory_order_seq_cst)); | memory_order_acquire | memory_order_seq_cst));
DCHECK(!((uptr)a % sizeof(*a))); DCHECK(!((uptr)a % sizeof(*a)));
typename T::Type v; typename T::Type v;
// FIXME(dvyukov): 64-bit load is not atomic on 32-bits.
if (mo == memory_order_relaxed) { if (mo == memory_order_relaxed) {
v = a->val_dont_use; v = a->val_dont_use;
} else { } else {
...@@ -85,6 +86,7 @@ INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) { ...@@ -85,6 +86,7 @@ INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_release DCHECK(mo & (memory_order_relaxed | memory_order_release
| memory_order_seq_cst)); | memory_order_seq_cst));
DCHECK(!((uptr)a % sizeof(*a))); DCHECK(!((uptr)a % sizeof(*a)));
// FIXME(dvyukov): 64-bit store is not atomic on 32-bits.
if (mo == memory_order_relaxed) { if (mo == memory_order_relaxed) {
a->val_dont_use = v; a->val_dont_use = v;
} else { } else {
......
...@@ -21,10 +21,16 @@ uptr GetPageSizeCached() { ...@@ -21,10 +21,16 @@ uptr GetPageSizeCached() {
return PageSize; return PageSize;
} }
// By default, dump to stderr. If report_fd is kInvalidFd, try to obtain file static bool log_to_file = false; // Set to true by __sanitizer_set_report_path
// descriptor by opening file in report_path.
// By default, dump to stderr. If |log_to_file| is true and |report_fd_pid|
// isn't equal to the current PID, try to obtain file descriptor by opening
// file "report_path_prefix.<PID>".
static fd_t report_fd = kStderrFd; static fd_t report_fd = kStderrFd;
static char report_path[4096]; // Set via __sanitizer_set_report_path. static char report_path_prefix[4096]; // Set via __sanitizer_set_report_path.
// PID of process that opened |report_fd|. If a fork() occurs, the PID of the
// child thread will be different from |report_fd_pid|.
static int report_fd_pid = 0;
static void (*DieCallback)(void); static void (*DieCallback)(void);
void SetDieCallback(void (*callback)(void)) { void SetDieCallback(void (*callback)(void)) {
...@@ -48,21 +54,29 @@ void NORETURN CheckFailed(const char *file, int line, const char *cond, ...@@ -48,21 +54,29 @@ void NORETURN CheckFailed(const char *file, int line, const char *cond,
if (CheckFailedCallback) { if (CheckFailedCallback) {
CheckFailedCallback(file, line, cond, v1, v2); CheckFailedCallback(file, line, cond, v1, v2);
} }
Report("Sanitizer CHECK failed: %s:%d %s (%zd, %zd)\n", file, line, cond, Report("Sanitizer CHECK failed: %s:%d %s (%lld, %lld)\n", file, line, cond,
v1, v2); v1, v2);
Die(); Die();
} }
static void MaybeOpenReportFile() { static void MaybeOpenReportFile() {
if (report_fd != kInvalidFd) if (!log_to_file || (report_fd_pid == GetPid())) return;
return; InternalScopedBuffer<char> report_path_full(4096);
fd_t fd = internal_open(report_path, true); internal_snprintf(report_path_full.data(), report_path_full.size(),
"%s.%d", report_path_prefix, GetPid());
fd_t fd = internal_open(report_path_full.data(), true);
if (fd == kInvalidFd) { if (fd == kInvalidFd) {
report_fd = kStderrFd; report_fd = kStderrFd;
Report("ERROR: Can't open file: %s\n", report_path); log_to_file = false;
Report("ERROR: Can't open file: %s\n", report_path_full.data());
Die(); Die();
} }
if (report_fd != kInvalidFd) {
// We're in the child. Close the parent's log.
internal_close(report_fd);
}
report_fd = fd; report_fd = fd;
report_fd_pid = GetPid();
} }
bool PrintsToTty() { bool PrintsToTty() {
...@@ -182,14 +196,16 @@ extern "C" { ...@@ -182,14 +196,16 @@ extern "C" {
void __sanitizer_set_report_path(const char *path) { void __sanitizer_set_report_path(const char *path) {
if (!path) return; if (!path) return;
uptr len = internal_strlen(path); uptr len = internal_strlen(path);
if (len > sizeof(report_path) - 100) { if (len > sizeof(report_path_prefix) - 100) {
Report("ERROR: Path is too long: %c%c%c%c%c%c%c%c...\n", Report("ERROR: Path is too long: %c%c%c%c%c%c%c%c...\n",
path[0], path[1], path[2], path[3], path[0], path[1], path[2], path[3],
path[4], path[5], path[6], path[7]); path[4], path[5], path[6], path[7]);
Die(); Die();
} }
internal_snprintf(report_path, sizeof(report_path), "%s.%d", path, GetPid()); internal_strncpy(report_path_prefix, path, sizeof(report_path_prefix));
report_path_prefix[len] = '\0';
report_fd = kInvalidFd; report_fd = kInvalidFd;
log_to_file = true;
} }
void __sanitizer_set_report_fd(int fd) { void __sanitizer_set_report_fd(int fd) {
......
//===-- sanitizer_common_interceptors.h -------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Common function interceptors for tools like AddressSanitizer,
// ThreadSanitizer, MemorySanitizer, etc.
//
// This file should be included into the tool's interceptor file,
// which has to define it's own macros:
// COMMON_INTERCEPTOR_ENTER
// COMMON_INTERCEPTOR_READ_RANGE
// COMMON_INTERCEPTOR_WRITE_RANGE
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_COMMON_INTERCEPTORS_H
#define SANITIZER_COMMON_INTERCEPTORS_H
#include "interception/interception.h"
#include "sanitizer_platform_interceptors.h"
#if SANITIZER_INTERCEPT_READ
INTERCEPTOR(SSIZE_T, read, int fd, void *ptr, SIZE_T count) {
COMMON_INTERCEPTOR_ENTER(read, fd, ptr, count);
SSIZE_T res = REAL(read)(fd, ptr, count);
if (res > 0)
COMMON_INTERCEPTOR_WRITE_RANGE(ptr, res);
return res;
}
#endif
#if SANITIZER_INTERCEPT_PREAD
INTERCEPTOR(SSIZE_T, pread, int fd, void *ptr, SIZE_T count, OFF_T offset) {
COMMON_INTERCEPTOR_ENTER(pread, fd, ptr, count, offset);
SSIZE_T res = REAL(pread)(fd, ptr, count, offset);
if (res > 0)
COMMON_INTERCEPTOR_WRITE_RANGE(ptr, res);
return res;
}
#endif
#if SANITIZER_INTERCEPT_PREAD64
INTERCEPTOR(SSIZE_T, pread64, int fd, void *ptr, SIZE_T count, OFF64_T offset) {
COMMON_INTERCEPTOR_ENTER(pread64, fd, ptr, count, offset);
SSIZE_T res = REAL(pread64)(fd, ptr, count, offset);
if (res > 0)
COMMON_INTERCEPTOR_WRITE_RANGE(ptr, res);
return res;
}
#endif
#if SANITIZER_INTERCEPT_READ
# define INIT_READ INTERCEPT_FUNCTION(read)
#else
# define INIT_READ
#endif
#if SANITIZER_INTERCEPT_PREAD
# define INIT_PREAD INTERCEPT_FUNCTION(pread)
#else
# define INIT_PREAD
#endif
#if SANITIZER_INTERCEPT_PREAD64
# define INIT_PREAD64 INTERCEPT_FUNCTION(pread64)
#else
# define INIT_PREAD64
#endif
#define SANITIZER_COMMON_INTERCEPTORS_INIT \
INIT_READ; \
INIT_PREAD; \
INIT_PREAD64; \
#endif // SANITIZER_COMMON_INTERCEPTORS_H
//===-- sanitizer_common_interceptors.inc -----------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Common function interceptors for tools like AddressSanitizer,
// ThreadSanitizer, MemorySanitizer, etc.
//
// This file should be included into the tool's interceptor file,
// which has to define it's own macros:
// COMMON_INTERCEPTOR_ENTER
// COMMON_INTERCEPTOR_READ_RANGE
// COMMON_INTERCEPTOR_WRITE_RANGE
// COMMON_INTERCEPTOR_FD_ACQUIRE
// COMMON_INTERCEPTOR_FD_RELEASE
// COMMON_INTERCEPTOR_SET_THREAD_NAME
//===----------------------------------------------------------------------===//
#include "interception/interception.h"
#include "sanitizer_platform_interceptors.h"
#include <stdarg.h>
#if SANITIZER_INTERCEPT_READ
INTERCEPTOR(SSIZE_T, read, int fd, void *ptr, SIZE_T count) {
void* ctx;
COMMON_INTERCEPTOR_ENTER(ctx, read, fd, ptr, count);
SSIZE_T res = REAL(read)(fd, ptr, count);
if (res > 0)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, res);
if (res >= 0 && fd >= 0)
COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
return res;
}
# define INIT_READ INTERCEPT_FUNCTION(read)
#else
# define INIT_READ
#endif
#if SANITIZER_INTERCEPT_PREAD
INTERCEPTOR(SSIZE_T, pread, int fd, void *ptr, SIZE_T count, OFF_T offset) {
void* ctx;
COMMON_INTERCEPTOR_ENTER(ctx, pread, fd, ptr, count, offset);
SSIZE_T res = REAL(pread)(fd, ptr, count, offset);
if (res > 0)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, res);
if (res >= 0 && fd >= 0)
COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
return res;
}
# define INIT_PREAD INTERCEPT_FUNCTION(pread)
#else
# define INIT_PREAD
#endif
#if SANITIZER_INTERCEPT_PREAD64
INTERCEPTOR(SSIZE_T, pread64, int fd, void *ptr, SIZE_T count, OFF64_T offset) {
void* ctx;
COMMON_INTERCEPTOR_ENTER(ctx, pread64, fd, ptr, count, offset);
SSIZE_T res = REAL(pread64)(fd, ptr, count, offset);
if (res > 0)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, res);
if (res >= 0 && fd >= 0)
COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
return res;
}
# define INIT_PREAD64 INTERCEPT_FUNCTION(pread64)
#else
# define INIT_PREAD64
#endif
#if SANITIZER_INTERCEPT_WRITE
INTERCEPTOR(SSIZE_T, write, int fd, void *ptr, SIZE_T count) {
void* ctx;
COMMON_INTERCEPTOR_ENTER(ctx, write, fd, ptr, count);
if (fd >= 0)
COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);
SSIZE_T res = REAL(write)(fd, ptr, count);
if (res > 0)
COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, res);
return res;
}
# define INIT_WRITE INTERCEPT_FUNCTION(write)
#else
# define INIT_WRITE
#endif
#if SANITIZER_INTERCEPT_PWRITE
INTERCEPTOR(SSIZE_T, pwrite, int fd, void *ptr, SIZE_T count) {
void* ctx;
COMMON_INTERCEPTOR_ENTER(ctx, pwrite, fd, ptr, count);
if (fd >= 0)
COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);
SSIZE_T res = REAL(pwrite)(fd, ptr, count);
if (res > 0)
COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, res);
return res;
}
# define INIT_PWRITE INTERCEPT_FUNCTION(pwrite)
#else
# define INIT_PWRITE
#endif
#if SANITIZER_INTERCEPT_PWRITE64
INTERCEPTOR(SSIZE_T, pwrite64, int fd, void *ptr, OFF64_T count) {
void* ctx;
COMMON_INTERCEPTOR_ENTER(ctx, pwrite64, fd, ptr, count);
if (fd >= 0)
COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);
SSIZE_T res = REAL(pwrite64)(fd, ptr, count);
if (res > 0)
COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, res);
return res;
}
# define INIT_PWRITE64 INTERCEPT_FUNCTION(pwrite64)
#else
# define INIT_PWRITE64
#endif
#if SANITIZER_INTERCEPT_PRCTL
INTERCEPTOR(int, prctl, int option,
unsigned long arg2, unsigned long arg3, // NOLINT
unsigned long arg4, unsigned long arg5) { // NOLINT
void* ctx;
COMMON_INTERCEPTOR_ENTER(ctx, prctl, option, arg2, arg3, arg4, arg5);
static const int PR_SET_NAME = 15;
int res = REAL(prctl(option, arg2, arg3, arg4, arg5));
if (option == PR_SET_NAME) {
char buff[16];
internal_strncpy(buff, (char*)arg2, 15);
buff[15] = 0;
COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, buff);
}
return res;
}
# define INIT_PRCTL INTERCEPT_FUNCTION(prctl)
#else
# define INIT_PRCTL
#endif // SANITIZER_INTERCEPT_PRCTL
#if SANITIZER_INTERCEPT_SCANF
#include "sanitizer_common_interceptors_scanf.inc"
INTERCEPTOR(int, vscanf, const char *format, va_list ap) { // NOLINT
void* ctx;
COMMON_INTERCEPTOR_ENTER(ctx, vscanf, format, ap);
scanf_common(ctx, format, ap);
int res = REAL(vscanf)(format, ap); // NOLINT
return res;
}
INTERCEPTOR(int, vsscanf, const char *str, const char *format, // NOLINT
va_list ap) {
void* ctx;
COMMON_INTERCEPTOR_ENTER(ctx, vsscanf, str, format, ap);
scanf_common(ctx, format, ap);
int res = REAL(vsscanf)(str, format, ap); // NOLINT
// FIXME: read of str
return res;
}
INTERCEPTOR(int, vfscanf, void *stream, const char *format, // NOLINT
va_list ap) {
void* ctx;
COMMON_INTERCEPTOR_ENTER(ctx, vfscanf, stream, format, ap);
scanf_common(ctx, format, ap);
int res = REAL(vfscanf)(stream, format, ap); // NOLINT
return res;
}
INTERCEPTOR(int, scanf, const char *format, ...) { // NOLINT
void* ctx;
COMMON_INTERCEPTOR_ENTER(ctx, scanf, format);
va_list ap;
va_start(ap, format);
int res = vscanf(format, ap); // NOLINT
va_end(ap);
return res;
}
INTERCEPTOR(int, fscanf, void* stream, const char *format, ...) { // NOLINT
void* ctx;
COMMON_INTERCEPTOR_ENTER(ctx, fscanf, stream, format);
va_list ap;
va_start(ap, format);
int res = vfscanf(stream, format, ap); // NOLINT
va_end(ap);
return res;
}
INTERCEPTOR(int, sscanf, const char *str, const char *format, ...) { // NOLINT
void* ctx;
COMMON_INTERCEPTOR_ENTER(ctx, sscanf, str, format); // NOLINT
va_list ap;
va_start(ap, format);
int res = vsscanf(str, format, ap); // NOLINT
va_end(ap);
return res;
}
#define INIT_SCANF \
INTERCEPT_FUNCTION(scanf); \
INTERCEPT_FUNCTION(sscanf); /* NOLINT */ \
INTERCEPT_FUNCTION(fscanf); \
INTERCEPT_FUNCTION(vscanf); \
INTERCEPT_FUNCTION(vsscanf); \
INTERCEPT_FUNCTION(vfscanf)
#else
#define INIT_SCANF
#endif
#define SANITIZER_COMMON_INTERCEPTORS_INIT \
INIT_READ; \
INIT_PREAD; \
INIT_PREAD64; \
INIT_PRCTL; \
INIT_WRITE; \
INIT_SCANF;
//===-- sanitizer_common_interceptors_scanf.inc -----------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Scanf implementation for use in *Sanitizer interceptors.
//
//===----------------------------------------------------------------------===//
#include <stdarg.h>
#ifdef _WIN32
#define va_copy(dst, src) ((dst) = (src))
#endif // _WIN32
struct ScanfSpec {
char c;
unsigned size;
};
// One-letter specs.
static const ScanfSpec scanf_specs[] = {
{'p', sizeof(void *)},
{'e', sizeof(float)},
{'E', sizeof(float)},
{'a', sizeof(float)},
{'f', sizeof(float)},
{'g', sizeof(float)},
{'d', sizeof(int)},
{'i', sizeof(int)},
{'o', sizeof(int)},
{'u', sizeof(int)},
{'x', sizeof(int)},
{'X', sizeof(int)},
{'n', sizeof(int)},
{'t', sizeof(PTRDIFF_T)},
{'z', sizeof(SIZE_T)},
{'j', sizeof(INTMAX_T)},
{'h', sizeof(short)}
};
static const unsigned scanf_specs_cnt =
sizeof(scanf_specs) / sizeof(scanf_specs[0]);
// %ll?, %L?, %q? specs
static const ScanfSpec scanf_llspecs[] = {
{'e', sizeof(long double)},
{'f', sizeof(long double)},
{'g', sizeof(long double)},
{'d', sizeof(long long)},
{'i', sizeof(long long)},
{'o', sizeof(long long)},
{'u', sizeof(long long)},
{'x', sizeof(long long)}
};
static const unsigned scanf_llspecs_cnt =
sizeof(scanf_llspecs) / sizeof(scanf_llspecs[0]);
// %l? specs
static const ScanfSpec scanf_lspecs[] = {
{'e', sizeof(double)},
{'f', sizeof(double)},
{'g', sizeof(double)},
{'d', sizeof(long)},
{'i', sizeof(long)},
{'o', sizeof(long)},
{'u', sizeof(long)},
{'x', sizeof(long)},
{'X', sizeof(long)},
};
static const unsigned scanf_lspecs_cnt =
sizeof(scanf_lspecs) / sizeof(scanf_lspecs[0]);
static unsigned match_spec(const struct ScanfSpec *spec, unsigned n, char c) {
for (unsigned i = 0; i < n; ++i)
if (spec[i].c == c)
return spec[i].size;
return 0;
}
static void scanf_common(void *ctx, const char *format, va_list ap_const) {
va_list aq;
va_copy(aq, ap_const);
const char *p = format;
unsigned size;
while (*p) {
if (*p != '%') {
++p;
continue;
}
++p;
if (*p == '*' || *p == '%' || *p == 0) {
++p;
continue;
}
if (*p == '0' || (*p >= '1' && *p <= '9')) {
size = internal_atoll(p);
// +1 for the \0 at the end
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, va_arg(aq, void *), size + 1);
++p;
continue;
}
if (*p == 'L' || *p == 'q') {
++p;
size = match_spec(scanf_llspecs, scanf_llspecs_cnt, *p);
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, va_arg(aq, void *), size);
continue;
}
if (*p == 'l') {
++p;
if (*p == 'l') {
++p;
size = match_spec(scanf_llspecs, scanf_llspecs_cnt, *p);
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, va_arg(aq, void *), size);
continue;
} else {
size = match_spec(scanf_lspecs, scanf_lspecs_cnt, *p);
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, va_arg(aq, void *), size);
continue;
}
}
if (*p == 'h' && *(p + 1) == 'h') {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, va_arg(aq, void *), sizeof(char));
p += 2;
continue;
}
size = match_spec(scanf_specs, scanf_specs_cnt, *p);
if (size) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, va_arg(aq, void *), size);
++p;
continue;
}
}
va_end(aq);
}
...@@ -36,6 +36,7 @@ using namespace __sanitizer; // NOLINT ...@@ -36,6 +36,7 @@ using namespace __sanitizer; // NOLINT
# define UNLIKELY(x) (x) # define UNLIKELY(x) (x)
# define UNUSED # define UNUSED
# define USED # define USED
# define PREFETCH(x) /* _mm_prefetch(x, _MM_HINT_NTA) */
#else // _MSC_VER #else // _MSC_VER
# define ALWAYS_INLINE __attribute__((always_inline)) # define ALWAYS_INLINE __attribute__((always_inline))
# define ALIAS(x) __attribute__((alias(x))) # define ALIAS(x) __attribute__((alias(x)))
...@@ -49,6 +50,12 @@ using namespace __sanitizer; // NOLINT ...@@ -49,6 +50,12 @@ using namespace __sanitizer; // NOLINT
# define UNLIKELY(x) __builtin_expect(!!(x), 0) # define UNLIKELY(x) __builtin_expect(!!(x), 0)
# define UNUSED __attribute__((unused)) # define UNUSED __attribute__((unused))
# define USED __attribute__((used)) # define USED __attribute__((used))
# if defined(__i386__) || defined(__x86_64__)
// __builtin_prefetch(x) generates prefetchnt0 on x86
# define PREFETCH(x) __asm__("prefetchnta (%0)" : : "r" (x))
# else
# define PREFETCH(x) __builtin_prefetch(x)
# endif
#endif // _MSC_VER #endif // _MSC_VER
#if defined(_WIN32) #if defined(_WIN32)
......
//===-- sanitizer_lfstack.h -=-----------------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Lock-free stack.
// Uses 32/17 bits as ABA-counter on 32/64-bit platforms.
// The memory passed to Push() must not be ever munmap'ed.
// The type T must contain T *next field.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_LFSTACK_H
#define SANITIZER_LFSTACK_H
#include "sanitizer_internal_defs.h"
#include "sanitizer_common.h"
#include "sanitizer_atomic.h"
namespace __sanitizer {
template<typename T>
struct LFStack {
void Clear() {
atomic_store(&head_, 0, memory_order_relaxed);
}
bool Empty() const {
return (atomic_load(&head_, memory_order_relaxed) & kPtrMask) == 0;
}
void Push(T *p) {
u64 cmp = atomic_load(&head_, memory_order_relaxed);
for (;;) {
u64 cnt = (cmp & kCounterMask) + kCounterInc;
u64 xch = (u64)(uptr)p | cnt;
p->next = (T*)(uptr)(cmp & kPtrMask);
if (atomic_compare_exchange_weak(&head_, &cmp, xch,
memory_order_release))
break;
}
}
T *Pop() {
u64 cmp = atomic_load(&head_, memory_order_acquire);
for (;;) {
T *cur = (T*)(uptr)(cmp & kPtrMask);
if (cur == 0)
return 0;
T *nxt = cur->next;
u64 cnt = (cmp & kCounterMask);
u64 xch = (u64)(uptr)nxt | cnt;
if (atomic_compare_exchange_weak(&head_, &cmp, xch,
memory_order_acquire))
return cur;
}
}
// private:
static const int kCounterBits = FIRST_32_SECOND_64(32, 17);
static const u64 kPtrMask = ((u64)-1) >> kCounterBits;
static const u64 kCounterMask = ~kPtrMask;
static const u64 kCounterInc = kPtrMask + 1;
atomic_uint64_t head_;
};
}
#endif // #ifndef SANITIZER_LFSTACK_H
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include <unwind.h> #include <unwind.h>
#include <errno.h> #include <errno.h>
#include <sys/prctl.h> #include <sys/prctl.h>
#include <linux/futex.h>
// Are we using 32-bit or 64-bit syscalls? // Are we using 32-bit or 64-bit syscalls?
// x32 (which defines __x86_64__) has SANITIZER_WORDSIZE == 32 // x32 (which defines __x86_64__) has SANITIZER_WORDSIZE == 32
...@@ -198,24 +199,31 @@ const char *GetEnv(const char *name) { ...@@ -198,24 +199,31 @@ const char *GetEnv(const char *name) {
return 0; // Not found. return 0; // Not found.
} }
void ReExec() { static void ReadNullSepFileToArray(const char *path, char ***arr,
static const int kMaxArgv = 100; int arr_size) {
InternalScopedBuffer<char*> argv(kMaxArgv + 1); char *buff;
static char *buff;
uptr buff_size = 0; uptr buff_size = 0;
ReadFileToBuffer("/proc/self/cmdline", &buff, &buff_size, 1024 * 1024); *arr = (char **)MmapOrDie(arr_size * sizeof(char *), "NullSepFileArray");
argv[0] = buff; ReadFileToBuffer(path, &buff, &buff_size, 1024 * 1024);
int argc, i; (*arr)[0] = buff;
for (argc = 1, i = 1; ; i++) { int count, i;
for (count = 1, i = 1; ; i++) {
if (buff[i] == 0) { if (buff[i] == 0) {
if (buff[i+1] == 0) break; if (buff[i+1] == 0) break;
argv[argc] = &buff[i+1]; (*arr)[count] = &buff[i+1];
CHECK_LE(argc, kMaxArgv); // FIXME: make this more flexible. CHECK_LE(count, arr_size - 1); // FIXME: make this more flexible.
argc++; count++;
} }
} }
argv[argc] = 0; (*arr)[count] = 0;
execv(argv[0], argv.data()); }
void ReExec() {
static const int kMaxArgv = 100, kMaxEnvp = 1000;
char **argv, **envp;
ReadNullSepFileToArray("/proc/self/cmdline", &argv, kMaxArgv);
ReadNullSepFileToArray("/proc/self/environ", &envp, kMaxEnvp);
execve(argv[0], argv, envp);
} }
void PrepareForSandboxing() { void PrepareForSandboxing() {
...@@ -366,16 +374,24 @@ bool MemoryMappingLayout::GetObjectNameAndOffset(uptr addr, uptr *offset, ...@@ -366,16 +374,24 @@ bool MemoryMappingLayout::GetObjectNameAndOffset(uptr addr, uptr *offset,
} }
bool SanitizerSetThreadName(const char *name) { bool SanitizerSetThreadName(const char *name) {
#ifdef PR_SET_NAME
return 0 == prctl(PR_SET_NAME, (unsigned long)name, 0, 0, 0); // NOLINT return 0 == prctl(PR_SET_NAME, (unsigned long)name, 0, 0, 0); // NOLINT
#else
return false;
#endif
} }
bool SanitizerGetThreadName(char *name, int max_len) { bool SanitizerGetThreadName(char *name, int max_len) {
#ifdef PR_GET_NAME
char buff[17]; char buff[17];
if (prctl(PR_GET_NAME, (unsigned long)buff, 0, 0, 0)) // NOLINT if (prctl(PR_GET_NAME, (unsigned long)buff, 0, 0, 0)) // NOLINT
return false; return false;
internal_strncpy(name, buff, max_len); internal_strncpy(name, buff, max_len);
name[max_len] = 0; name[max_len] = 0;
return true; return true;
#else
return false;
#endif
} }
#ifndef SANITIZER_GO #ifndef SANITIZER_GO
...@@ -434,6 +450,32 @@ void StackTrace::SlowUnwindStack(uptr pc, uptr max_depth) { ...@@ -434,6 +450,32 @@ void StackTrace::SlowUnwindStack(uptr pc, uptr max_depth) {
#endif // #ifndef SANITIZER_GO #endif // #ifndef SANITIZER_GO
enum MutexState {
MtxUnlocked = 0,
MtxLocked = 1,
MtxSleeping = 2
};
BlockingMutex::BlockingMutex(LinkerInitialized) {
CHECK_EQ(owner_, 0);
}
void BlockingMutex::Lock() {
atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
if (atomic_exchange(m, MtxLocked, memory_order_acquire) == MtxUnlocked)
return;
while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked)
syscall(__NR_futex, m, FUTEX_WAIT, MtxSleeping, 0, 0, 0);
}
void BlockingMutex::Unlock() {
atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
u32 v = atomic_exchange(m, MtxUnlocked, memory_order_relaxed);
CHECK_NE(v, MtxUnlocked);
if (v == MtxSleeping)
syscall(__NR_futex, m, FUTEX_WAKE, 1, 0, 0, 0);
}
} // namespace __sanitizer } // namespace __sanitizer
#endif // __linux__ #endif // __linux__
...@@ -70,6 +70,8 @@ struct IntrusiveList { ...@@ -70,6 +70,8 @@ struct IntrusiveList {
void append_front(IntrusiveList<Item> *l) { void append_front(IntrusiveList<Item> *l) {
CHECK_NE(this, l); CHECK_NE(this, l);
if (l->empty())
return;
if (empty()) { if (empty()) {
*this = *l; *this = *l;
} else if (!l->empty()) { } else if (!l->empty()) {
...@@ -82,6 +84,8 @@ struct IntrusiveList { ...@@ -82,6 +84,8 @@ struct IntrusiveList {
void append_back(IntrusiveList<Item> *l) { void append_back(IntrusiveList<Item> *l) {
CHECK_NE(this, l); CHECK_NE(this, l);
if (l->empty())
return;
if (empty()) { if (empty()) {
*this = *l; *this = *l;
} else { } else {
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include <sys/stat.h> #include <sys/stat.h>
#include <sys/types.h> #include <sys/types.h>
#include <unistd.h> #include <unistd.h>
#include <libkern/OSAtomic.h>
namespace __sanitizer { namespace __sanitizer {
...@@ -265,6 +266,25 @@ bool MemoryMappingLayout::GetObjectNameAndOffset(uptr addr, uptr *offset, ...@@ -265,6 +266,25 @@ bool MemoryMappingLayout::GetObjectNameAndOffset(uptr addr, uptr *offset,
return IterateForObjectNameAndOffset(addr, offset, filename, filename_size); return IterateForObjectNameAndOffset(addr, offset, filename, filename_size);
} }
BlockingMutex::BlockingMutex(LinkerInitialized) {
// We assume that OS_SPINLOCK_INIT is zero
}
void BlockingMutex::Lock() {
CHECK(sizeof(OSSpinLock) <= sizeof(opaque_storage_));
CHECK(OS_SPINLOCK_INIT == 0);
CHECK(owner_ != (uptr)pthread_self());
OSSpinLockLock((OSSpinLock*)&opaque_storage_);
CHECK(!owner_);
owner_ = (uptr)pthread_self();
}
void BlockingMutex::Unlock() {
CHECK(owner_ == (uptr)pthread_self());
owner_ = 0;
OSSpinLockUnlock((OSSpinLock*)&opaque_storage_);
}
} // namespace __sanitizer } // namespace __sanitizer
#endif // __APPLE__ #endif // __APPLE__
...@@ -25,11 +25,15 @@ class StaticSpinMutex { ...@@ -25,11 +25,15 @@ class StaticSpinMutex {
} }
void Lock() { void Lock() {
if (atomic_exchange(&state_, 1, memory_order_acquire) == 0) if (TryLock())
return; return;
LockSlow(); LockSlow();
} }
bool TryLock() {
return atomic_exchange(&state_, 1, memory_order_acquire) == 0;
}
void Unlock() { void Unlock() {
atomic_store(&state_, 0, memory_order_release); atomic_store(&state_, 0, memory_order_release);
} }
...@@ -61,6 +65,16 @@ class SpinMutex : public StaticSpinMutex { ...@@ -61,6 +65,16 @@ class SpinMutex : public StaticSpinMutex {
void operator=(const SpinMutex&); void operator=(const SpinMutex&);
}; };
class BlockingMutex {
public:
explicit BlockingMutex(LinkerInitialized);
void Lock();
void Unlock();
private:
uptr opaque_storage_[10];
uptr owner_; // for debugging
};
template<typename MutexType> template<typename MutexType>
class GenericScopedLock { class GenericScopedLock {
public: public:
...@@ -100,6 +114,7 @@ class GenericScopedReadLock { ...@@ -100,6 +114,7 @@ class GenericScopedReadLock {
}; };
typedef GenericScopedLock<StaticSpinMutex> SpinMutexLock; typedef GenericScopedLock<StaticSpinMutex> SpinMutexLock;
typedef GenericScopedLock<BlockingMutex> BlockingMutexLock;
} // namespace __sanitizer } // namespace __sanitizer
......
...@@ -13,15 +13,24 @@ ...@@ -13,15 +13,24 @@
#include "sanitizer_internal_defs.h" #include "sanitizer_internal_defs.h"
#if !defined(_WIN32) #if !defined(_WIN32)
# define SANITIZER_INTERCEPT_READ 1 # define SI_NOT_WINDOWS 1
# define SANITIZER_INTERCEPT_PREAD 1
#else #else
# define SANITIZER_INTERCEPT_READ 0 # define SI_NOT_WINDOWS 0
# define SANITIZER_INTERCEPT_PREAD 0
#endif #endif
#if defined(__linux__) && !defined(ANDROID) #if defined(__linux__) && !defined(ANDROID)
# define SANITIZER_INTERCEPT_PREAD64 1 # define SI_LINUX_NOT_ANDROID 1
#else #else
# define SANITIZER_INTERCEPT_PREAD64 0 # define SI_LINUX_NOT_ANDROID 0
#endif #endif
# define SANITIZER_INTERCEPT_READ SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_PREAD SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_WRITE SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_PWRITE SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_PREAD64 SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_PWRITE64 SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_PRCTL SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_SCANF 0
//===-- sanitizer_quarantine.h ----------------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Memory quarantine for AddressSanitizer and potentially other tools.
// Quarantine caches some specified amount of memory in per-thread caches,
// then evicts to global FIFO queue. When the queue reaches specified threshold,
// oldest memory is recycled.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_QUARANTINE_H
#define SANITIZER_QUARANTINE_H
#include "sanitizer_internal_defs.h"
#include "sanitizer_mutex.h"
#include "sanitizer_list.h"
namespace __sanitizer {
template<typename Node> class QuarantineCache;
struct QuarantineBatch {
static const uptr kSize = 1024;
QuarantineBatch *next;
uptr size;
uptr count;
void *batch[kSize];
};
// The callback interface is:
// void Callback::Recycle(Node *ptr);
// void *cb.Allocate(uptr size);
// void cb.Deallocate(void *ptr);
template<typename Callback, typename Node>
class Quarantine {
public:
typedef QuarantineCache<Callback> Cache;
explicit Quarantine(LinkerInitialized)
: cache_(LINKER_INITIALIZED) {
}
void Init(uptr size, uptr cache_size) {
max_size_ = size;
min_size_ = size / 10 * 9; // 90% of max size.
max_cache_size_ = cache_size;
}
void Put(Cache *c, Callback cb, Node *ptr, uptr size) {
c->Enqueue(cb, ptr, size);
if (c->Size() > max_cache_size_)
Drain(c, cb);
}
void NOINLINE Drain(Cache *c, Callback cb) {
{
SpinMutexLock l(&cache_mutex_);
cache_.Transfer(c);
}
if (cache_.Size() > max_size_ && recycle_mutex_.TryLock())
Recycle(cb);
}
private:
// Read-only data.
char pad0_[kCacheLineSize];
uptr max_size_;
uptr min_size_;
uptr max_cache_size_;
char pad1_[kCacheLineSize];
SpinMutex cache_mutex_;
SpinMutex recycle_mutex_;
Cache cache_;
char pad2_[kCacheLineSize];
void NOINLINE Recycle(Callback cb) {
Cache tmp;
{
SpinMutexLock l(&cache_mutex_);
while (cache_.Size() > min_size_) {
QuarantineBatch *b = cache_.DequeueBatch();
tmp.EnqueueBatch(b);
}
}
recycle_mutex_.Unlock();
DoRecycle(&tmp, cb);
}
void NOINLINE DoRecycle(Cache *c, Callback cb) {
while (QuarantineBatch *b = c->DequeueBatch()) {
const uptr kPrefetch = 16;
for (uptr i = 0; i < kPrefetch; i++)
PREFETCH(b->batch[i]);
for (uptr i = 0; i < b->count; i++) {
PREFETCH(b->batch[i + kPrefetch]);
cb.Recycle((Node*)b->batch[i]);
}
cb.Deallocate(b);
}
}
};
// Per-thread cache of memory blocks.
template<typename Callback>
class QuarantineCache {
public:
explicit QuarantineCache(LinkerInitialized) {
}
QuarantineCache()
: size_() {
list_.clear();
}
uptr Size() const {
return atomic_load(&size_, memory_order_relaxed);
}
void Enqueue(Callback cb, void *ptr, uptr size) {
if (list_.empty() || list_.back()->count == QuarantineBatch::kSize)
AllocBatch(cb);
QuarantineBatch *b = list_.back();
b->batch[b->count++] = ptr;
b->size += size;
SizeAdd(size);
}
void Transfer(QuarantineCache *c) {
list_.append_back(&c->list_);
SizeAdd(c->Size());
atomic_store(&c->size_, 0, memory_order_relaxed);
}
void EnqueueBatch(QuarantineBatch *b) {
list_.push_back(b);
SizeAdd(b->size);
}
QuarantineBatch *DequeueBatch() {
if (list_.empty())
return 0;
QuarantineBatch *b = list_.front();
list_.pop_front();
SizeAdd(-b->size);
return b;
}
private:
IntrusiveList<QuarantineBatch> list_;
atomic_uintptr_t size_;
void SizeAdd(uptr add) {
atomic_store(&size_, Size() + add, memory_order_relaxed);
}
QuarantineBatch *NOINLINE AllocBatch(Callback cb) {
QuarantineBatch *b = (QuarantineBatch *)cb.Allocate(sizeof(*b));
b->count = 0;
b->size = 0;
list_.push_back(b);
return b;
}
};
}
#endif // #ifndef SANITIZER_QUARANTINE_H
...@@ -66,7 +66,18 @@ static const char *ExtractInt(const char *str, const char *delims, ...@@ -66,7 +66,18 @@ static const char *ExtractInt(const char *str, const char *delims,
char *buff; char *buff;
const char *ret = ExtractToken(str, delims, &buff); const char *ret = ExtractToken(str, delims, &buff);
if (buff != 0) { if (buff != 0) {
*result = internal_atoll(buff); *result = (int)internal_atoll(buff);
}
InternalFree(buff);
return ret;
}
static const char *ExtractUptr(const char *str, const char *delims,
uptr *result) {
char *buff;
const char *ret = ExtractToken(str, delims, &buff);
if (buff != 0) {
*result = (uptr)internal_atoll(buff);
} }
InternalFree(buff); InternalFree(buff);
return ret; return ret;
...@@ -96,66 +107,15 @@ class ExternalSymbolizer { ...@@ -96,66 +107,15 @@ class ExternalSymbolizer {
CHECK_NE(output_fd_, kInvalidFd); CHECK_NE(output_fd_, kInvalidFd);
} }
// Returns the number of frames for a given address, or zero if char *SendCommand(bool is_data, const char *module_name, uptr module_offset) {
// symbolization failed.
uptr SymbolizeCode(uptr addr, const char *module_name, uptr module_offset,
AddressInfo *frames, uptr max_frames) {
CHECK(module_name); CHECK(module_name);
// FIXME: Make sure this buffer always has sufficient size to hold internal_snprintf(buffer_, kBufferSize, "%s%s 0x%zx\n",
// large debug info. is_data ? "DATA " : "", module_name, module_offset);
static const int kMaxBufferSize = 4096; if (!writeToSymbolizer(buffer_, internal_strlen(buffer_)))
InternalScopedBuffer<char> buffer(kMaxBufferSize);
char *buffer_data = buffer.data();
internal_snprintf(buffer_data, kMaxBufferSize, "%s 0x%zx\n",
module_name, module_offset);
if (!writeToSymbolizer(buffer_data, internal_strlen(buffer_data)))
return 0; return 0;
if (!readFromSymbolizer(buffer_, kBufferSize))
if (!readFromSymbolizer(buffer_data, kMaxBufferSize))
return 0; return 0;
const char *str = buffer_data; return buffer_;
uptr frame_id;
CHECK_GT(max_frames, 0);
for (frame_id = 0; frame_id < max_frames; frame_id++) {
AddressInfo *info = &frames[frame_id];
char *function_name = 0;
str = ExtractToken(str, "\n", &function_name);
CHECK(function_name);
if (function_name[0] == '\0') {
// There are no more frames.
break;
}
info->Clear();
info->FillAddressAndModuleInfo(addr, module_name, module_offset);
info->function = function_name;
// Parse <file>:<line>:<column> buffer.
char *file_line_info = 0;
str = ExtractToken(str, "\n", &file_line_info);
CHECK(file_line_info);
const char *line_info = ExtractToken(file_line_info, ":", &info->file);
line_info = ExtractInt(line_info, ":", &info->line);
line_info = ExtractInt(line_info, "", &info->column);
InternalFree(file_line_info);
// Functions and filenames can be "??", in which case we write 0
// to address info to mark that names are unknown.
if (0 == internal_strcmp(info->function, "??")) {
InternalFree(info->function);
info->function = 0;
}
if (0 == internal_strcmp(info->file, "??")) {
InternalFree(info->file);
info->file = 0;
}
}
if (frame_id == 0) {
// Make sure we return at least one frame.
AddressInfo *info = &frames[0];
info->Clear();
info->FillAddressAndModuleInfo(addr, module_name, module_offset);
frame_id = 1;
}
return frame_id;
} }
bool Restart() { bool Restart() {
...@@ -189,6 +149,7 @@ class ExternalSymbolizer { ...@@ -189,6 +149,7 @@ class ExternalSymbolizer {
} }
return true; return true;
} }
bool writeToSymbolizer(const char *buffer, uptr length) { bool writeToSymbolizer(const char *buffer, uptr length) {
if (length == 0) if (length == 0)
return true; return true;
...@@ -204,6 +165,9 @@ class ExternalSymbolizer { ...@@ -204,6 +165,9 @@ class ExternalSymbolizer {
int input_fd_; int input_fd_;
int output_fd_; int output_fd_;
static const uptr kBufferSize = 16 * 1024;
char buffer_[kBufferSize];
static const uptr kMaxTimesRestarted = 5; static const uptr kMaxTimesRestarted = 5;
uptr times_restarted_; uptr times_restarted_;
}; };
...@@ -220,30 +184,8 @@ class Symbolizer { ...@@ -220,30 +184,8 @@ class Symbolizer {
return 0; return 0;
const char *module_name = module->full_name(); const char *module_name = module->full_name();
uptr module_offset = addr - module->base_address(); uptr module_offset = addr - module->base_address();
uptr actual_frames = 0; const char *str = SendCommand(false, module_name, module_offset);
if (external_symbolizer_ == 0) { if (str == 0) {
ReportExternalSymbolizerError(
"WARNING: Trying to symbolize code, but external "
"symbolizer is not initialized!\n");
} else {
while (true) {
actual_frames = external_symbolizer_->SymbolizeCode(
addr, module_name, module_offset, frames, max_frames);
if (actual_frames > 0) {
// Symbolization was successful.
break;
}
// Try to restart symbolizer subprocess. If we don't succeed, forget
// about it and don't try to use it later.
if (!external_symbolizer_->Restart()) {
ReportExternalSymbolizerError(
"WARNING: Failed to use and restart external symbolizer!\n");
external_symbolizer_ = 0;
break;
}
}
}
if (external_symbolizer_ == 0) {
// External symbolizer was not initialized or failed. Fill only data // External symbolizer was not initialized or failed. Fill only data
// about module name and offset. // about module name and offset.
AddressInfo *info = &frames[0]; AddressInfo *info = &frames[0];
...@@ -251,17 +193,66 @@ class Symbolizer { ...@@ -251,17 +193,66 @@ class Symbolizer {
info->FillAddressAndModuleInfo(addr, module_name, module_offset); info->FillAddressAndModuleInfo(addr, module_name, module_offset);
return 1; return 1;
} }
// Otherwise, the data was filled by external symbolizer. uptr frame_id = 0;
return actual_frames; for (frame_id = 0; frame_id < max_frames; frame_id++) {
AddressInfo *info = &frames[frame_id];
char *function_name = 0;
str = ExtractToken(str, "\n", &function_name);
CHECK(function_name);
if (function_name[0] == '\0') {
// There are no more frames.
break;
}
info->Clear();
info->FillAddressAndModuleInfo(addr, module_name, module_offset);
info->function = function_name;
// Parse <file>:<line>:<column> buffer.
char *file_line_info = 0;
str = ExtractToken(str, "\n", &file_line_info);
CHECK(file_line_info);
const char *line_info = ExtractToken(file_line_info, ":", &info->file);
line_info = ExtractInt(line_info, ":", &info->line);
line_info = ExtractInt(line_info, "", &info->column);
InternalFree(file_line_info);
// Functions and filenames can be "??", in which case we write 0
// to address info to mark that names are unknown.
if (0 == internal_strcmp(info->function, "??")) {
InternalFree(info->function);
info->function = 0;
}
if (0 == internal_strcmp(info->file, "??")) {
InternalFree(info->file);
info->file = 0;
}
}
if (frame_id == 0) {
// Make sure we return at least one frame.
AddressInfo *info = &frames[0];
info->Clear();
info->FillAddressAndModuleInfo(addr, module_name, module_offset);
frame_id = 1;
}
return frame_id;
} }
bool SymbolizeData(uptr addr, AddressInfo *frame) { bool SymbolizeData(uptr addr, DataInfo *info) {
LoadedModule *module = FindModuleForAddress(addr); LoadedModule *module = FindModuleForAddress(addr);
if (module == 0) if (module == 0)
return false; return false;
const char *module_name = module->full_name(); const char *module_name = module->full_name();
uptr module_offset = addr - module->base_address(); uptr module_offset = addr - module->base_address();
frame->FillAddressAndModuleInfo(addr, module_name, module_offset); internal_memset(info, 0, sizeof(*info));
info->address = addr;
info->module = internal_strdup(module_name);
info->module_offset = module_offset;
const char *str = SendCommand(true, module_name, module_offset);
if (str == 0)
return true;
str = ExtractToken(str, "\n", &info->name);
str = ExtractUptr(str, " ", &info->start);
str = ExtractUptr(str, "\n", &info->size);
info->start += module->base_address();
return true; return true;
} }
...@@ -276,6 +267,29 @@ class Symbolizer { ...@@ -276,6 +267,29 @@ class Symbolizer {
} }
private: private:
char *SendCommand(bool is_data, const char *module_name, uptr module_offset) {
if (external_symbolizer_ == 0) {
ReportExternalSymbolizerError(
"WARNING: Trying to symbolize code, but external "
"symbolizer is not initialized!\n");
return 0;
}
for (;;) {
char *reply = external_symbolizer_->SendCommand(is_data, module_name,
module_offset);
if (reply)
return reply;
// Try to restart symbolizer subprocess. If we don't succeed, forget
// about it and don't try to use it later.
if (!external_symbolizer_->Restart()) {
ReportExternalSymbolizerError(
"WARNING: Failed to use and restart external symbolizer!\n");
external_symbolizer_ = 0;
return 0;
}
}
}
LoadedModule *FindModuleForAddress(uptr address) { LoadedModule *FindModuleForAddress(uptr address) {
if (modules_ == 0) { if (modules_ == 0) {
modules_ = (LoadedModule*)(symbolizer_allocator.Allocate( modules_ = (LoadedModule*)(symbolizer_allocator.Allocate(
...@@ -316,8 +330,8 @@ uptr SymbolizeCode(uptr address, AddressInfo *frames, uptr max_frames) { ...@@ -316,8 +330,8 @@ uptr SymbolizeCode(uptr address, AddressInfo *frames, uptr max_frames) {
return symbolizer.SymbolizeCode(address, frames, max_frames); return symbolizer.SymbolizeCode(address, frames, max_frames);
} }
bool SymbolizeData(uptr address, AddressInfo *frame) { bool SymbolizeData(uptr address, DataInfo *info) {
return symbolizer.SymbolizeData(address, frame); return symbolizer.SymbolizeData(address, info);
} }
bool InitializeExternalSymbolizer(const char *path_to_symbolizer) { bool InitializeExternalSymbolizer(const char *path_to_symbolizer) {
......
...@@ -51,12 +51,21 @@ struct AddressInfo { ...@@ -51,12 +51,21 @@ struct AddressInfo {
} }
}; };
struct DataInfo {
uptr address;
char *module;
uptr module_offset;
char *name;
uptr start;
uptr size;
};
// Fills at most "max_frames" elements of "frames" with descriptions // Fills at most "max_frames" elements of "frames" with descriptions
// for a given address (in all inlined functions). Returns the number // for a given address (in all inlined functions). Returns the number
// of descriptions actually filled. // of descriptions actually filled.
// This function should NOT be called from two threads simultaneously. // This function should NOT be called from two threads simultaneously.
uptr SymbolizeCode(uptr address, AddressInfo *frames, uptr max_frames); uptr SymbolizeCode(uptr address, AddressInfo *frames, uptr max_frames);
bool SymbolizeData(uptr address, AddressInfo *frame); bool SymbolizeData(uptr address, DataInfo *info);
// Attempts to demangle the provided C++ mangled name. // Attempts to demangle the provided C++ mangled name.
const char *Demangle(const char *Name); const char *Demangle(const char *Name);
......
...@@ -18,6 +18,8 @@ ...@@ -18,6 +18,8 @@
#include "sanitizer_common.h" #include "sanitizer_common.h"
#include "sanitizer_libc.h" #include "sanitizer_libc.h"
#include "sanitizer_placement_new.h"
#include "sanitizer_mutex.h"
namespace __sanitizer { namespace __sanitizer {
...@@ -224,6 +226,42 @@ int internal_sched_yield() { ...@@ -224,6 +226,42 @@ int internal_sched_yield() {
return 0; return 0;
} }
// ---------------------- BlockingMutex ---------------- {{{1
enum LockState {
LOCK_UNINITIALIZED = 0,
LOCK_READY = -1,
};
BlockingMutex::BlockingMutex(LinkerInitialized li) {
// FIXME: see comments in BlockingMutex::Lock() for the details.
CHECK(li == LINKER_INITIALIZED || owner_ == LOCK_UNINITIALIZED);
CHECK(sizeof(CRITICAL_SECTION) <= sizeof(opaque_storage_));
InitializeCriticalSection((LPCRITICAL_SECTION)opaque_storage_);
owner_ = LOCK_READY;
}
void BlockingMutex::Lock() {
if (owner_ == LOCK_UNINITIALIZED) {
// FIXME: hm, global BlockingMutex objects are not initialized?!?
// This might be a side effect of the clang+cl+link Frankenbuild...
new(this) BlockingMutex((LinkerInitialized)(LINKER_INITIALIZED + 1));
// FIXME: If it turns out the linker doesn't invoke our
// constructors, we should probably manually Lock/Unlock all the global
// locks while we're starting in one thread to avoid double-init races.
}
EnterCriticalSection((LPCRITICAL_SECTION)opaque_storage_);
CHECK(owner_ == LOCK_READY);
owner_ = GetThreadSelf();
}
void BlockingMutex::Unlock() {
CHECK(owner_ == GetThreadSelf());
owner_ = LOCK_READY;
LeaveCriticalSection((LPCRITICAL_SECTION)opaque_storage_);
}
} // namespace __sanitizer } // namespace __sanitizer
#endif // _WIN32 #endif // _WIN32
...@@ -162,6 +162,12 @@ void FdRelease(ThreadState *thr, uptr pc, int fd) { ...@@ -162,6 +162,12 @@ void FdRelease(ThreadState *thr, uptr pc, int fd) {
MemoryRead8Byte(thr, pc, (uptr)d); MemoryRead8Byte(thr, pc, (uptr)d);
} }
void FdAccess(ThreadState *thr, uptr pc, int fd) {
DPrintf("#%d: FdAccess(%d)\n", thr->tid, fd);
FdDesc *d = fddesc(thr, pc, fd);
MemoryRead8Byte(thr, pc, (uptr)d);
}
void FdClose(ThreadState *thr, uptr pc, int fd) { void FdClose(ThreadState *thr, uptr pc, int fd) {
DPrintf("#%d: FdClose(%d)\n", thr->tid, fd); DPrintf("#%d: FdClose(%d)\n", thr->tid, fd);
FdDesc *d = fddesc(thr, pc, fd); FdDesc *d = fddesc(thr, pc, fd);
......
...@@ -39,6 +39,7 @@ namespace __tsan { ...@@ -39,6 +39,7 @@ namespace __tsan {
void FdInit(); void FdInit();
void FdAcquire(ThreadState *thr, uptr pc, int fd); void FdAcquire(ThreadState *thr, uptr pc, int fd);
void FdRelease(ThreadState *thr, uptr pc, int fd); void FdRelease(ThreadState *thr, uptr pc, int fd);
void FdAccess(ThreadState *thr, uptr pc, int fd);
void FdClose(ThreadState *thr, uptr pc, int fd); void FdClose(ThreadState *thr, uptr pc, int fd);
void FdFileCreate(ThreadState *thr, uptr pc, int fd); void FdFileCreate(ThreadState *thr, uptr pc, int fd);
void FdDup(ThreadState *thr, uptr pc, int oldfd, int newfd); void FdDup(ThreadState *thr, uptr pc, int oldfd, int newfd);
......
...@@ -1239,33 +1239,6 @@ TSAN_INTERCEPTOR(int, pipe2, int *pipefd, int flags) { ...@@ -1239,33 +1239,6 @@ TSAN_INTERCEPTOR(int, pipe2, int *pipefd, int flags) {
return res; return res;
} }
TSAN_INTERCEPTOR(long_t, read, int fd, void *buf, long_t sz) {
SCOPED_TSAN_INTERCEPTOR(read, fd, buf, sz);
int res = REAL(read)(fd, buf, sz);
if (res >= 0 && fd >= 0) {
FdAcquire(thr, pc, fd);
}
return res;
}
TSAN_INTERCEPTOR(long_t, pread, int fd, void *buf, long_t sz, unsigned off) {
SCOPED_TSAN_INTERCEPTOR(pread, fd, buf, sz, off);
int res = REAL(pread)(fd, buf, sz, off);
if (res >= 0 && fd >= 0) {
FdAcquire(thr, pc, fd);
}
return res;
}
TSAN_INTERCEPTOR(long_t, pread64, int fd, void *buf, long_t sz, u64 off) {
SCOPED_TSAN_INTERCEPTOR(pread64, fd, buf, sz, off);
int res = REAL(pread64)(fd, buf, sz, off);
if (res >= 0 && fd >= 0) {
FdAcquire(thr, pc, fd);
}
return res;
}
TSAN_INTERCEPTOR(long_t, readv, int fd, void *vec, int cnt) { TSAN_INTERCEPTOR(long_t, readv, int fd, void *vec, int cnt) {
SCOPED_TSAN_INTERCEPTOR(readv, fd, vec, cnt); SCOPED_TSAN_INTERCEPTOR(readv, fd, vec, cnt);
int res = REAL(readv)(fd, vec, cnt); int res = REAL(readv)(fd, vec, cnt);
...@@ -1284,30 +1257,6 @@ TSAN_INTERCEPTOR(long_t, preadv64, int fd, void *vec, int cnt, u64 off) { ...@@ -1284,30 +1257,6 @@ TSAN_INTERCEPTOR(long_t, preadv64, int fd, void *vec, int cnt, u64 off) {
return res; return res;
} }
TSAN_INTERCEPTOR(long_t, write, int fd, void *buf, long_t sz) {
SCOPED_TSAN_INTERCEPTOR(write, fd, buf, sz);
if (fd >= 0)
FdRelease(thr, pc, fd);
int res = REAL(write)(fd, buf, sz);
return res;
}
TSAN_INTERCEPTOR(long_t, pwrite, int fd, void *buf, long_t sz, unsigned off) {
SCOPED_TSAN_INTERCEPTOR(pwrite, fd, buf, sz, off);
if (fd >= 0)
FdRelease(thr, pc, fd);
int res = REAL(pwrite)(fd, buf, sz, off);
return res;
}
TSAN_INTERCEPTOR(long_t, pwrite64, int fd, void *buf, long_t sz, u64 off) {
SCOPED_TSAN_INTERCEPTOR(pwrite64, fd, buf, sz, off);
if (fd >= 0)
FdRelease(thr, pc, fd);
int res = REAL(pwrite64)(fd, buf, sz, off);
return res;
}
TSAN_INTERCEPTOR(long_t, writev, int fd, void *vec, int cnt) { TSAN_INTERCEPTOR(long_t, writev, int fd, void *vec, int cnt) {
SCOPED_TSAN_INTERCEPTOR(writev, fd, vec, cnt); SCOPED_TSAN_INTERCEPTOR(writev, fd, vec, cnt);
if (fd >= 0) if (fd >= 0)
...@@ -1449,6 +1398,8 @@ TSAN_INTERCEPTOR(int, epoll_ctl, int epfd, int op, int fd, void *ev) { ...@@ -1449,6 +1398,8 @@ TSAN_INTERCEPTOR(int, epoll_ctl, int epfd, int op, int fd, void *ev) {
FdRelease(thr, pc, epfd); FdRelease(thr, pc, epfd);
} }
int res = REAL(epoll_ctl)(epfd, op, fd, ev); int res = REAL(epoll_ctl)(epfd, op, fd, ev);
if (fd >= 0)
FdAccess(thr, pc, fd);
return res; return res;
} }
...@@ -1641,6 +1592,33 @@ TSAN_INTERCEPTOR(int, fork, int fake) { ...@@ -1641,6 +1592,33 @@ TSAN_INTERCEPTOR(int, fork, int fake) {
return pid; return pid;
} }
struct TsanInterceptorContext {
ThreadState *thr;
const uptr caller_pc;
const uptr pc;
};
#define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
MemoryAccessRange(((TsanInterceptorContext*)ctx)->thr, \
((TsanInterceptorContext*)ctx)->pc, \
(uptr)ptr, size, true)
#define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \
MemoryAccessRange(((TsanInterceptorContext*)ctx)->thr, \
((TsanInterceptorContext*)ctx)->pc, \
(uptr)ptr, size, false)
#define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
SCOPED_TSAN_INTERCEPTOR(func, __VA_ARGS__) \
TsanInterceptorContext _ctx = {thr, caller_pc, pc}; \
ctx = (void*)&_ctx; \
(void)ctx;
#define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \
FdAcquire(((TsanInterceptorContext*)ctx)->thr, pc, fd)
#define COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd) \
FdRelease(((TsanInterceptorContext*)ctx)->thr, pc, fd)
#define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) \
ThreadSetName(((TsanInterceptorContext*)ctx)->thr, name)
#include "sanitizer_common/sanitizer_common_interceptors.inc"
namespace __tsan { namespace __tsan {
void ProcessPendingSignals(ThreadState *thr) { void ProcessPendingSignals(ThreadState *thr) {
...@@ -1675,6 +1653,7 @@ void ProcessPendingSignals(ThreadState *thr) { ...@@ -1675,6 +1653,7 @@ void ProcessPendingSignals(ThreadState *thr) {
(uptr)sigactions[sig].sa_sigaction : (uptr)sigactions[sig].sa_sigaction :
(uptr)sigactions[sig].sa_handler; (uptr)sigactions[sig].sa_handler;
stack.Init(&pc, 1); stack.Init(&pc, 1);
Lock l(&ctx->thread_mtx);
ScopedReport rep(ReportTypeErrnoInSignal); ScopedReport rep(ReportTypeErrnoInSignal);
if (!IsFiredSuppression(ctx, rep, stack)) { if (!IsFiredSuppression(ctx, rep, stack)) {
rep.AddStack(&stack); rep.AddStack(&stack);
...@@ -1703,6 +1682,8 @@ void InitializeInterceptors() { ...@@ -1703,6 +1682,8 @@ void InitializeInterceptors() {
REAL(memcpy) = internal_memcpy; REAL(memcpy) = internal_memcpy;
REAL(memcmp) = internal_memcmp; REAL(memcmp) = internal_memcmp;
SANITIZER_COMMON_INTERCEPTORS_INIT;
TSAN_INTERCEPT(longjmp); TSAN_INTERCEPT(longjmp);
TSAN_INTERCEPT(siglongjmp); TSAN_INTERCEPT(siglongjmp);
...@@ -1806,14 +1787,8 @@ void InitializeInterceptors() { ...@@ -1806,14 +1787,8 @@ void InitializeInterceptors() {
TSAN_INTERCEPT(pipe); TSAN_INTERCEPT(pipe);
TSAN_INTERCEPT(pipe2); TSAN_INTERCEPT(pipe2);
TSAN_INTERCEPT(read);
TSAN_INTERCEPT(pread);
TSAN_INTERCEPT(pread64);
TSAN_INTERCEPT(readv); TSAN_INTERCEPT(readv);
TSAN_INTERCEPT(preadv64); TSAN_INTERCEPT(preadv64);
TSAN_INTERCEPT(write);
TSAN_INTERCEPT(pwrite);
TSAN_INTERCEPT(pwrite64);
TSAN_INTERCEPT(writev); TSAN_INTERCEPT(writev);
TSAN_INTERCEPT(pwritev64); TSAN_INTERCEPT(pwritev64);
TSAN_INTERCEPT(send); TSAN_INTERCEPT(send);
......
...@@ -46,6 +46,7 @@ static void SignalUnsafeCall(ThreadState *thr, uptr pc) { ...@@ -46,6 +46,7 @@ static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
Context *ctx = CTX(); Context *ctx = CTX();
StackTrace stack; StackTrace stack;
stack.ObtainCurrent(thr, pc); stack.ObtainCurrent(thr, pc);
Lock l(&ctx->thread_mtx);
ScopedReport rep(ReportTypeSignalUnsafe); ScopedReport rep(ReportTypeSignalUnsafe);
if (!IsFiredSuppression(ctx, rep, stack)) { if (!IsFiredSuppression(ctx, rep, stack)) {
rep.AddStack(&stack); rep.AddStack(&stack);
......
...@@ -102,16 +102,17 @@ static void PrintMop(const ReportMop *mop, bool first) { ...@@ -102,16 +102,17 @@ static void PrintMop(const ReportMop *mop, bool first) {
static void PrintLocation(const ReportLocation *loc) { static void PrintLocation(const ReportLocation *loc) {
char thrbuf[kThreadBufSize]; char thrbuf[kThreadBufSize];
if (loc->type == ReportLocationGlobal) { if (loc->type == ReportLocationGlobal) {
Printf(" Location is global '%s' of size %zu at %zx %s:%d (%s+%p)\n\n", Printf(" Location is global '%s' of size %zu at %zx (%s+%p)\n\n",
loc->name, loc->size, loc->addr, loc->file, loc->line, loc->name, loc->size, loc->addr, loc->module, loc->offset);
loc->module, loc->offset);
} else if (loc->type == ReportLocationHeap) { } else if (loc->type == ReportLocationHeap) {
char thrbuf[kThreadBufSize]; char thrbuf[kThreadBufSize];
Printf(" Location is heap block of size %zu at %p allocated by %s:\n", Printf(" Location is heap block of size %zu at %p allocated by %s:\n",
loc->size, loc->addr, thread_name(thrbuf, loc->tid)); loc->size, loc->addr, thread_name(thrbuf, loc->tid));
PrintStack(loc->stack); PrintStack(loc->stack);
} else if (loc->type == ReportLocationStack) { } else if (loc->type == ReportLocationStack) {
Printf(" Location is stack of %s\n\n", thread_name(thrbuf, loc->tid)); Printf(" Location is stack of %s.\n\n", thread_name(thrbuf, loc->tid));
} else if (loc->type == ReportLocationTLS) {
Printf(" Location is TLS of %s.\n\n", thread_name(thrbuf, loc->tid));
} else if (loc->type == ReportLocationFD) { } else if (loc->type == ReportLocationFD) {
Printf(" Location is file descriptor %d created by %s at:\n", Printf(" Location is file descriptor %d created by %s at:\n",
loc->fd, thread_name(thrbuf, loc->tid)); loc->fd, thread_name(thrbuf, loc->tid));
......
...@@ -56,6 +56,7 @@ enum ReportLocationType { ...@@ -56,6 +56,7 @@ enum ReportLocationType {
ReportLocationGlobal, ReportLocationGlobal,
ReportLocationHeap, ReportLocationHeap,
ReportLocationStack, ReportLocationStack,
ReportLocationTLS,
ReportLocationFD ReportLocationFD
}; };
......
...@@ -53,6 +53,7 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) { ...@@ -53,6 +53,7 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
&& s->owner_tid != SyncVar::kInvalidTid && s->owner_tid != SyncVar::kInvalidTid
&& !s->is_broken) { && !s->is_broken) {
s->is_broken = true; s->is_broken = true;
Lock l(&ctx->thread_mtx);
ScopedReport rep(ReportTypeMutexDestroyLocked); ScopedReport rep(ReportTypeMutexDestroyLocked);
rep.AddMutex(s); rep.AddMutex(s);
StackTrace trace; StackTrace trace;
......
...@@ -119,6 +119,7 @@ static ReportStack *SymbolizeStack(const StackTrace& trace) { ...@@ -119,6 +119,7 @@ static ReportStack *SymbolizeStack(const StackTrace& trace) {
ScopedReport::ScopedReport(ReportType typ) { ScopedReport::ScopedReport(ReportType typ) {
ctx_ = CTX(); ctx_ = CTX();
ctx_->thread_mtx.CheckLocked();
void *mem = internal_alloc(MBlockReport, sizeof(ReportDesc)); void *mem = internal_alloc(MBlockReport, sizeof(ReportDesc));
rep_ = new(mem) ReportDesc; rep_ = new(mem) ReportDesc;
rep_->typ = typ; rep_->typ = typ;
...@@ -185,15 +186,37 @@ void ScopedReport::AddThread(const ThreadContext *tctx) { ...@@ -185,15 +186,37 @@ void ScopedReport::AddThread(const ThreadContext *tctx) {
#ifndef TSAN_GO #ifndef TSAN_GO
static ThreadContext *FindThread(int unique_id) { static ThreadContext *FindThread(int unique_id) {
CTX()->thread_mtx.CheckLocked(); Context *ctx = CTX();
ctx->thread_mtx.CheckLocked();
for (unsigned i = 0; i < kMaxTid; i++) { for (unsigned i = 0; i < kMaxTid; i++) {
ThreadContext *tctx = CTX()->threads[i]; ThreadContext *tctx = ctx->threads[i];
if (tctx && tctx->unique_id == unique_id) { if (tctx && tctx->unique_id == unique_id) {
return tctx; return tctx;
} }
} }
return 0; return 0;
} }
ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) {
Context *ctx = CTX();
ctx->thread_mtx.CheckLocked();
for (unsigned i = 0; i < kMaxTid; i++) {
ThreadContext *tctx = ctx->threads[i];
if (tctx == 0 || tctx->status != ThreadStatusRunning)
continue;
ThreadState *thr = tctx->thr;
CHECK(thr);
if (addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size) {
*is_stack = true;
return tctx;
}
if (addr >= thr->tls_addr && addr < thr->tls_addr + thr->tls_size) {
*is_stack = false;
return tctx;
}
}
return 0;
}
#endif #endif
void ScopedReport::AddMutex(const SyncVar *s) { void ScopedReport::AddMutex(const SyncVar *s) {
...@@ -274,25 +297,21 @@ void ScopedReport::AddLocation(uptr addr, uptr size) { ...@@ -274,25 +297,21 @@ void ScopedReport::AddLocation(uptr addr, uptr size) {
AddThread(tctx); AddThread(tctx);
return; return;
} }
#endif bool is_stack = false;
ReportStack *symb = SymbolizeData(addr); if (ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack)) {
if (symb) {
void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation)); void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation));
ReportLocation *loc = new(mem) ReportLocation(); ReportLocation *loc = new(mem) ReportLocation();
rep_->locs.PushBack(loc); rep_->locs.PushBack(loc);
loc->type = ReportLocationGlobal; loc->type = is_stack ? ReportLocationStack : ReportLocationTLS;
loc->addr = addr; loc->tid = tctx->tid;
loc->size = size; AddThread(tctx);
loc->module = symb->module ? internal_strdup(symb->module) : 0; }
loc->offset = symb->offset; ReportLocation *loc = SymbolizeData(addr);
loc->tid = 0; if (loc) {
loc->name = symb->func ? internal_strdup(symb->func) : 0; rep_->locs.PushBack(loc);
loc->file = symb->file ? internal_strdup(symb->file) : 0;
loc->line = symb->line;
loc->stack = 0;
internal_free(symb);
return; return;
} }
#endif
} }
#ifndef TSAN_GO #ifndef TSAN_GO
...@@ -386,7 +405,7 @@ static bool HandleRacyStacks(ThreadState *thr, const StackTrace (&traces)[2], ...@@ -386,7 +405,7 @@ static bool HandleRacyStacks(ThreadState *thr, const StackTrace (&traces)[2],
uptr addr_min, uptr addr_max) { uptr addr_min, uptr addr_max) {
Context *ctx = CTX(); Context *ctx = CTX();
bool equal_stack = false; bool equal_stack = false;
RacyStacks hash = {}; RacyStacks hash;
if (flags()->suppress_equal_stacks) { if (flags()->suppress_equal_stacks) {
hash.hash[0] = md5_hash(traces[0].Begin(), traces[0].Size() * sizeof(uptr)); hash.hash[0] = md5_hash(traces[0].Begin(), traces[0].Size() * sizeof(uptr));
hash.hash[1] = md5_hash(traces[1].Begin(), traces[1].Size() * sizeof(uptr)); hash.hash[1] = md5_hash(traces[1].Begin(), traces[1].Size() * sizeof(uptr));
......
...@@ -202,6 +202,7 @@ void StatOutput(u64 *stat) { ...@@ -202,6 +202,7 @@ void StatOutput(u64 *stat) {
name[StatInt_pipe] = " pipe "; name[StatInt_pipe] = " pipe ";
name[StatInt_pipe2] = " pipe2 "; name[StatInt_pipe2] = " pipe2 ";
name[StatInt_read] = " read "; name[StatInt_read] = " read ";
name[StatInt_prctl] = " prctl ";
name[StatInt_pread] = " pread "; name[StatInt_pread] = " pread ";
name[StatInt_pread64] = " pread64 "; name[StatInt_pread64] = " pread64 ";
name[StatInt_readv] = " readv "; name[StatInt_readv] = " readv ";
...@@ -233,6 +234,12 @@ void StatOutput(u64 *stat) { ...@@ -233,6 +234,12 @@ void StatOutput(u64 *stat) {
name[StatInt_nanosleep] = " nanosleep "; name[StatInt_nanosleep] = " nanosleep ";
name[StatInt_gettimeofday] = " gettimeofday "; name[StatInt_gettimeofday] = " gettimeofday ";
name[StatInt_fork] = " fork "; name[StatInt_fork] = " fork ";
name[StatInt_vscanf] = " vscanf ";
name[StatInt_vsscanf] = " vsscanf ";
name[StatInt_vfscanf] = " vfscanf ";
name[StatInt_scanf] = " scanf ";
name[StatInt_sscanf] = " sscanf ";
name[StatInt_fscanf] = " fscanf ";
name[StatAnnotation] = "Dynamic annotations "; name[StatAnnotation] = "Dynamic annotations ";
name[StatAnnotateHappensBefore] = " HappensBefore "; name[StatAnnotateHappensBefore] = " HappensBefore ";
......
...@@ -197,6 +197,7 @@ enum StatType { ...@@ -197,6 +197,7 @@ enum StatType {
StatInt_pipe, StatInt_pipe,
StatInt_pipe2, StatInt_pipe2,
StatInt_read, StatInt_read,
StatInt_prctl,
StatInt_pread, StatInt_pread,
StatInt_pread64, StatInt_pread64,
StatInt_readv, StatInt_readv,
...@@ -232,6 +233,12 @@ enum StatType { ...@@ -232,6 +233,12 @@ enum StatType {
StatInt_nanosleep, StatInt_nanosleep,
StatInt_gettimeofday, StatInt_gettimeofday,
StatInt_fork, StatInt_fork,
StatInt_vscanf,
StatInt_vsscanf,
StatInt_vfscanf,
StatInt_scanf,
StatInt_sscanf,
StatInt_fscanf,
// Dynamic annotations. // Dynamic annotations.
StatAnnotation, StatAnnotation,
......
...@@ -27,21 +27,24 @@ ReportStack *NewReportStackEntry(uptr addr) { ...@@ -27,21 +27,24 @@ ReportStack *NewReportStackEntry(uptr addr) {
return ent; return ent;
} }
// Strip module path to make output shorter.
static char *StripModuleName(const char *module) {
if (module == 0)
return 0;
const char *short_module_name = internal_strrchr(module, '/');
if (short_module_name)
short_module_name += 1;
else
short_module_name = module;
return internal_strdup(short_module_name);
}
static ReportStack *NewReportStackEntry(const AddressInfo &info) { static ReportStack *NewReportStackEntry(const AddressInfo &info) {
ReportStack *ent = NewReportStackEntry(info.address); ReportStack *ent = NewReportStackEntry(info.address);
if (info.module) { ent->module = StripModuleName(info.module);
// Strip module path to make output shorter.
const char *short_module_name = internal_strrchr(info.module, '/');
if (short_module_name)
short_module_name += 1;
else
short_module_name = info.module;
ent->module = internal_strdup(short_module_name);
}
ent->offset = info.module_offset; ent->offset = info.module_offset;
if (info.function) { if (info.function)
ent->func = internal_strdup(info.function); ent->func = internal_strdup(info.function);
}
if (info.file) if (info.file)
ent->file = internal_strdup(info.file); ent->file = internal_strdup(info.file);
ent->line = info.line; ent->line = info.line;
...@@ -76,14 +79,23 @@ ReportStack *SymbolizeCode(uptr addr) { ...@@ -76,14 +79,23 @@ ReportStack *SymbolizeCode(uptr addr) {
return SymbolizeCodeAddr2Line(addr); return SymbolizeCodeAddr2Line(addr);
} }
ReportStack *SymbolizeData(uptr addr) { ReportLocation *SymbolizeData(uptr addr) {
if (flags()->external_symbolizer_path[0]) { if (flags()->external_symbolizer_path[0] == 0)
AddressInfo frame; return 0;
if (!__sanitizer::SymbolizeData(addr, &frame)) DataInfo info;
return 0; if (!__sanitizer::SymbolizeData(addr, &info))
return NewReportStackEntry(frame); return 0;
} ReportLocation *ent = (ReportLocation*)internal_alloc(MBlockReportStack,
return SymbolizeDataAddr2Line(addr); sizeof(ReportLocation));
internal_memset(ent, 0, sizeof(*ent));
ent->type = ReportLocationGlobal;
ent->module = StripModuleName(info.module);
ent->offset = info.module_offset;
if (info.name)
ent->name = internal_strdup(info.name);
ent->addr = info.start;
ent->size = info.size;
return ent;
} }
} // namespace __tsan } // namespace __tsan
...@@ -17,10 +17,9 @@ ...@@ -17,10 +17,9 @@
namespace __tsan { namespace __tsan {
ReportStack *SymbolizeCode(uptr addr); ReportStack *SymbolizeCode(uptr addr);
ReportStack *SymbolizeData(uptr addr); ReportLocation *SymbolizeData(uptr addr);
ReportStack *SymbolizeCodeAddr2Line(uptr addr); ReportStack *SymbolizeCodeAddr2Line(uptr addr);
ReportStack *SymbolizeDataAddr2Line(uptr addr);
ReportStack *NewReportStackEntry(uptr addr); ReportStack *NewReportStackEntry(uptr addr);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment