Commit df77f0e4 by Kostya Serebryany Committed by Kostya Serebryany

libsanitizer merge from upstream r196090

From-SVN: r205695
parent 649d196d
2013-12-05 Kostya Serebryany <kcc@google.com>
* c-c++-common/asan/null-deref-1.c: Update the test
to match the fresh asan run-time.
2013-12-05 Richard Biener <rguenther@suse.de> 2013-12-05 Richard Biener <rguenther@suse.de>
PR tree-optimization/59374 PR tree-optimization/59374
......
...@@ -18,6 +18,5 @@ int main() ...@@ -18,6 +18,5 @@ int main()
/* { dg-output "ERROR: AddressSanitizer:? SEGV on unknown address\[^\n\r]*" } */ /* { dg-output "ERROR: AddressSanitizer:? SEGV on unknown address\[^\n\r]*" } */
/* { dg-output "0x\[0-9a-f\]+ \[^\n\r]*pc 0x\[0-9a-f\]+\[^\n\r]*(\n|\r\n|\r)" } */ /* { dg-output "0x\[0-9a-f\]+ \[^\n\r]*pc 0x\[0-9a-f\]+\[^\n\r]*(\n|\r\n|\r)" } */
/* { dg-output "\[^\n\r]*AddressSanitizer can not provide additional info.*(\n|\r\n|\r)" } */
/* { dg-output " #0 0x\[0-9a-f\]+ (in \[^\n\r]*NullDeref\[^\n\r]* (\[^\n\r]*null-deref-1.c:10|\[^\n\r]*:0)|\[(\])\[^\n\r]*(\n|\r\n|\r)" } */ /* { dg-output " #0 0x\[0-9a-f\]+ (in \[^\n\r]*NullDeref\[^\n\r]* (\[^\n\r]*null-deref-1.c:10|\[^\n\r]*:0)|\[(\])\[^\n\r]*(\n|\r\n|\r)" } */
/* { dg-output " #1 0x\[0-9a-f\]+ (in _*main (\[^\n\r]*null-deref-1.c:15|\[^\n\r]*:0)|\[(\])\[^\n\r]*(\n|\r\n|\r)" } */ /* { dg-output " #1 0x\[0-9a-f\]+ (in _*main (\[^\n\r]*null-deref-1.c:15|\[^\n\r]*:0)|\[(\])\[^\n\r]*(\n|\r\n|\r)" } */
2013-12-05 Kostya Serebryany <kcc@google.com>
* All source files: Merge from upstream r196090.
* tsan/Makefile.am (tsan_files): Added new files.
* tsan/Makefile.in: Regenerate.
* sanitizer_common/Makefile.am (sanitizer_common_files): Added new fles.
* sanitizer_common/Makefile.in: Regenerate.
* lsan/Makefile.am (lsan_files): Added new files.
* lsan/Makefile.in: Regenerate.
2013-11-29 Jakub Jelinek <jakub@redhat.com> 2013-11-29 Jakub Jelinek <jakub@redhat.com>
Yury Gribov <y.gribov@samsung.com> Yury Gribov <y.gribov@samsung.com>
......
191666 196090
The first line of this file holds the svn revision number of the The first line of this file holds the svn revision number of the
last merge done from the master library sources. last merge done from the master library sources.
...@@ -33,10 +33,11 @@ void InitializeAllocator(); ...@@ -33,10 +33,11 @@ void InitializeAllocator();
class AsanChunkView { class AsanChunkView {
public: public:
explicit AsanChunkView(AsanChunk *chunk) : chunk_(chunk) {} explicit AsanChunkView(AsanChunk *chunk) : chunk_(chunk) {}
bool IsValid() { return chunk_ != 0; } bool IsValid(); // Checks if AsanChunkView points to a valid allocated
uptr Beg(); // first byte of user memory. // or quarantined chunk.
uptr End(); // last byte of user memory. uptr Beg(); // First byte of user memory.
uptr UsedSize(); // size requested by the user. uptr End(); // Last byte of user memory.
uptr UsedSize(); // Size requested by the user.
uptr AllocTid(); uptr AllocTid();
uptr FreeTid(); uptr FreeTid();
void GetAllocStack(StackTrace *stack); void GetAllocStack(StackTrace *stack);
...@@ -88,16 +89,12 @@ class AsanChunkFifoList: public IntrusiveList<AsanChunk> { ...@@ -88,16 +89,12 @@ class AsanChunkFifoList: public IntrusiveList<AsanChunk> {
}; };
struct AsanThreadLocalMallocStorage { struct AsanThreadLocalMallocStorage {
explicit AsanThreadLocalMallocStorage(LinkerInitialized x)
{ }
AsanThreadLocalMallocStorage() {
CHECK(REAL(memset));
REAL(memset)(this, 0, sizeof(AsanThreadLocalMallocStorage));
}
uptr quarantine_cache[16]; uptr quarantine_cache[16];
uptr allocator2_cache[96 * (512 * 8 + 16)]; // Opaque. uptr allocator2_cache[96 * (512 * 8 + 16)]; // Opaque.
void CommitBack(); void CommitBack();
private:
// These objects are allocated via mmap() and are zero-initialized.
AsanThreadLocalMallocStorage() {}
}; };
void *asan_memalign(uptr alignment, uptr size, StackTrace *stack, void *asan_memalign(uptr alignment, uptr size, StackTrace *stack,
...@@ -112,7 +109,7 @@ void *asan_pvalloc(uptr size, StackTrace *stack); ...@@ -112,7 +109,7 @@ void *asan_pvalloc(uptr size, StackTrace *stack);
int asan_posix_memalign(void **memptr, uptr alignment, uptr size, int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
StackTrace *stack); StackTrace *stack);
uptr asan_malloc_usable_size(void *ptr, StackTrace *stack); uptr asan_malloc_usable_size(void *ptr, uptr pc, uptr bp);
uptr asan_mz_size(const void *ptr); uptr asan_mz_size(const void *ptr);
void asan_mz_force_lock(); void asan_mz_force_lock();
......
...@@ -92,7 +92,7 @@ AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) { ...@@ -92,7 +92,7 @@ AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) {
static Allocator allocator; static Allocator allocator;
static const uptr kMaxAllowedMallocSize = static const uptr kMaxAllowedMallocSize =
FIRST_32_SECOND_64(3UL << 30, 8UL << 30); FIRST_32_SECOND_64(3UL << 30, 64UL << 30);
static const uptr kMaxThreadLocalQuarantine = static const uptr kMaxThreadLocalQuarantine =
FIRST_32_SECOND_64(1 << 18, 1 << 20); FIRST_32_SECOND_64(1 << 18, 1 << 20);
...@@ -184,14 +184,19 @@ COMPILER_CHECK(kChunkHeader2Size <= 16); ...@@ -184,14 +184,19 @@ COMPILER_CHECK(kChunkHeader2Size <= 16);
struct AsanChunk: ChunkBase { struct AsanChunk: ChunkBase {
uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; } uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
uptr UsedSize() { uptr UsedSize(bool locked_version = false) {
if (user_requested_size != SizeClassMap::kMaxSize) if (user_requested_size != SizeClassMap::kMaxSize)
return user_requested_size; return user_requested_size;
return *reinterpret_cast<uptr *>(allocator.GetMetaData(AllocBeg())); return *reinterpret_cast<uptr *>(
} allocator.GetMetaData(AllocBeg(locked_version)));
void *AllocBeg() { }
if (from_memalign) void *AllocBeg(bool locked_version = false) {
if (from_memalign) {
if (locked_version)
return allocator.GetBlockBeginFastLocked(
reinterpret_cast<void *>(this));
return allocator.GetBlockBegin(reinterpret_cast<void *>(this)); return allocator.GetBlockBegin(reinterpret_cast<void *>(this));
}
return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log)); return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log));
} }
// If we don't use stack depot, we store the alloc/free stack traces // If we don't use stack depot, we store the alloc/free stack traces
...@@ -211,11 +216,14 @@ struct AsanChunk: ChunkBase { ...@@ -211,11 +216,14 @@ struct AsanChunk: ChunkBase {
uptr available = RoundUpTo(user_requested_size, SHADOW_GRANULARITY); uptr available = RoundUpTo(user_requested_size, SHADOW_GRANULARITY);
return (available - kChunkHeader2Size) / sizeof(u32); return (available - kChunkHeader2Size) / sizeof(u32);
} }
bool AddrIsInside(uptr addr) { bool AddrIsInside(uptr addr, bool locked_version = false) {
return (addr >= Beg()) && (addr < Beg() + UsedSize()); return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version));
} }
}; };
bool AsanChunkView::IsValid() {
return chunk_ != 0 && chunk_->chunk_state != CHUNK_AVAILABLE;
}
uptr AsanChunkView::Beg() { return chunk_->Beg(); } uptr AsanChunkView::Beg() { return chunk_->Beg(); }
uptr AsanChunkView::End() { return Beg() + UsedSize(); } uptr AsanChunkView::End() { return Beg() + UsedSize(); }
uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); } uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); }
...@@ -226,25 +234,16 @@ static void GetStackTraceFromId(u32 id, StackTrace *stack) { ...@@ -226,25 +234,16 @@ static void GetStackTraceFromId(u32 id, StackTrace *stack) {
CHECK(id); CHECK(id);
uptr size = 0; uptr size = 0;
const uptr *trace = StackDepotGet(id, &size); const uptr *trace = StackDepotGet(id, &size);
CHECK_LT(size, kStackTraceMax); CHECK(trace);
internal_memcpy(stack->trace, trace, sizeof(uptr) * size); stack->CopyFrom(trace, size);
stack->size = size;
} }
void AsanChunkView::GetAllocStack(StackTrace *stack) { void AsanChunkView::GetAllocStack(StackTrace *stack) {
if (flags()->use_stack_depot) GetStackTraceFromId(chunk_->alloc_context_id, stack);
GetStackTraceFromId(chunk_->alloc_context_id, stack);
else
StackTrace::UncompressStack(stack, chunk_->AllocStackBeg(),
chunk_->AllocStackSize());
} }
void AsanChunkView::GetFreeStack(StackTrace *stack) { void AsanChunkView::GetFreeStack(StackTrace *stack) {
if (flags()->use_stack_depot) GetStackTraceFromId(chunk_->free_context_id, stack);
GetStackTraceFromId(chunk_->free_context_id, stack);
else
StackTrace::UncompressStack(stack, chunk_->FreeStackBeg(),
chunk_->FreeStackSize());
} }
struct QuarantineCallback; struct QuarantineCallback;
...@@ -390,12 +389,7 @@ static void *Allocate(uptr size, uptr alignment, StackTrace *stack, ...@@ -390,12 +389,7 @@ static void *Allocate(uptr size, uptr alignment, StackTrace *stack,
meta[1] = chunk_beg; meta[1] = chunk_beg;
} }
if (fl.use_stack_depot) { m->alloc_context_id = StackDepotPut(stack->trace, stack->size);
m->alloc_context_id = StackDepotPut(stack->trace, stack->size);
} else {
m->alloc_context_id = 0;
StackTrace::CompressStack(stack, m->AllocStackBeg(), m->AllocStackSize());
}
uptr size_rounded_down_to_granularity = RoundDownTo(size, SHADOW_GRANULARITY); uptr size_rounded_down_to_granularity = RoundDownTo(size, SHADOW_GRANULARITY);
// Unpoison the bulk of the memory region. // Unpoison the bulk of the memory region.
...@@ -404,7 +398,7 @@ static void *Allocate(uptr size, uptr alignment, StackTrace *stack, ...@@ -404,7 +398,7 @@ static void *Allocate(uptr size, uptr alignment, StackTrace *stack,
// Deal with the end of the region if size is not aligned to granularity. // Deal with the end of the region if size is not aligned to granularity.
if (size != size_rounded_down_to_granularity && fl.poison_heap) { if (size != size_rounded_down_to_granularity && fl.poison_heap) {
u8 *shadow = (u8*)MemToShadow(user_beg + size_rounded_down_to_granularity); u8 *shadow = (u8*)MemToShadow(user_beg + size_rounded_down_to_granularity);
*shadow = size & (SHADOW_GRANULARITY - 1); *shadow = fl.poison_partial ? (size & (SHADOW_GRANULARITY - 1)) : 0;
} }
AsanStats &thread_stats = GetCurrentThreadStats(); AsanStats &thread_stats = GetCurrentThreadStats();
...@@ -463,12 +457,7 @@ static void QuarantineChunk(AsanChunk *m, void *ptr, ...@@ -463,12 +457,7 @@ static void QuarantineChunk(AsanChunk *m, void *ptr,
CHECK_EQ(m->free_tid, kInvalidTid); CHECK_EQ(m->free_tid, kInvalidTid);
AsanThread *t = GetCurrentThread(); AsanThread *t = GetCurrentThread();
m->free_tid = t ? t->tid() : 0; m->free_tid = t ? t->tid() : 0;
if (flags()->use_stack_depot) { m->free_context_id = StackDepotPut(stack->trace, stack->size);
m->free_context_id = StackDepotPut(stack->trace, stack->size);
} else {
m->free_context_id = 0;
StackTrace::CompressStack(stack, m->FreeStackBeg(), m->FreeStackSize());
}
// Poison the region. // Poison the region.
PoisonShadow(m->Beg(), PoisonShadow(m->Beg(),
RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY), RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
...@@ -673,12 +662,13 @@ int asan_posix_memalign(void **memptr, uptr alignment, uptr size, ...@@ -673,12 +662,13 @@ int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
return 0; return 0;
} }
uptr asan_malloc_usable_size(void *ptr, StackTrace *stack) { uptr asan_malloc_usable_size(void *ptr, uptr pc, uptr bp) {
CHECK(stack);
if (ptr == 0) return 0; if (ptr == 0) return 0;
uptr usable_size = AllocationSize(reinterpret_cast<uptr>(ptr)); uptr usable_size = AllocationSize(reinterpret_cast<uptr>(ptr));
if (flags()->check_malloc_usable_size && (usable_size == 0)) if (flags()->check_malloc_usable_size && (usable_size == 0)) {
ReportMallocUsableSizeNotOwned((uptr)ptr, stack); GET_STACK_TRACE_FATAL(pc, bp);
ReportMallocUsableSizeNotOwned((uptr)ptr, &stack);
}
return usable_size; return usable_size;
} }
...@@ -718,7 +708,8 @@ uptr PointsIntoChunk(void* p) { ...@@ -718,7 +708,8 @@ uptr PointsIntoChunk(void* p) {
__asan::AsanChunk *m = __asan::GetAsanChunkByAddrFastLocked(addr); __asan::AsanChunk *m = __asan::GetAsanChunkByAddrFastLocked(addr);
if (!m) return 0; if (!m) return 0;
uptr chunk = m->Beg(); uptr chunk = m->Beg();
if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr)) if ((m->chunk_state == __asan::CHUNK_ALLOCATED) &&
m->AddrIsInside(addr, /*locked_version=*/true))
return chunk; return chunk;
return 0; return 0;
} }
...@@ -751,7 +742,7 @@ void LsanMetadata::set_tag(ChunkTag value) { ...@@ -751,7 +742,7 @@ void LsanMetadata::set_tag(ChunkTag value) {
uptr LsanMetadata::requested_size() const { uptr LsanMetadata::requested_size() const {
__asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
return m->UsedSize(); return m->UsedSize(/*locked_version=*/true);
} }
u32 LsanMetadata::stack_trace_id() const { u32 LsanMetadata::stack_trace_id() const {
......
...@@ -130,6 +130,8 @@ extern "C" { ...@@ -130,6 +130,8 @@ extern "C" {
} }
} }
WRAP_V_V(__asan_handle_no_return)
WRAP_V_W(__asan_report_store1) WRAP_V_W(__asan_report_store1)
WRAP_V_W(__asan_report_store2) WRAP_V_W(__asan_report_store2)
WRAP_V_W(__asan_report_store4) WRAP_V_W(__asan_report_store4)
......
...@@ -43,7 +43,7 @@ FakeStack *FakeStack::Create(uptr stack_size_log) { ...@@ -43,7 +43,7 @@ FakeStack *FakeStack::Create(uptr stack_size_log) {
FakeStack *res = reinterpret_cast<FakeStack *>( FakeStack *res = reinterpret_cast<FakeStack *>(
MmapOrDie(RequiredSize(stack_size_log), "FakeStack")); MmapOrDie(RequiredSize(stack_size_log), "FakeStack"));
res->stack_size_log_ = stack_size_log; res->stack_size_log_ = stack_size_log;
if (flags()->verbosity) { if (common_flags()->verbosity) {
u8 *p = reinterpret_cast<u8 *>(res); u8 *p = reinterpret_cast<u8 *>(res);
Report("T%d: FakeStack created: %p -- %p stack_size_log: %zd \n", Report("T%d: FakeStack created: %p -- %p stack_size_log: %zd \n",
GetCurrentTidOrInvalid(), p, GetCurrentTidOrInvalid(), p,
...@@ -132,6 +132,20 @@ NOINLINE void FakeStack::GC(uptr real_stack) { ...@@ -132,6 +132,20 @@ NOINLINE void FakeStack::GC(uptr real_stack) {
needs_gc_ = false; needs_gc_ = false;
} }
void FakeStack::ForEachFakeFrame(RangeIteratorCallback callback, void *arg) {
for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) {
u8 *flags = GetFlags(stack_size_log(), class_id);
for (uptr i = 0, n = NumberOfFrames(stack_size_log(), class_id); i < n;
i++) {
if (flags[i] == 0) continue; // not allocated.
FakeFrame *ff = reinterpret_cast<FakeFrame *>(
GetFrame(stack_size_log(), class_id, i));
uptr begin = reinterpret_cast<uptr>(ff);
callback(begin, begin + FakeStack::BytesInSizeClass(class_id), arg);
}
}
}
#if SANITIZER_LINUX && !SANITIZER_ANDROID #if SANITIZER_LINUX && !SANITIZER_ANDROID
static THREADLOCAL FakeStack *fake_stack_tls; static THREADLOCAL FakeStack *fake_stack_tls;
......
...@@ -146,6 +146,8 @@ class FakeStack { ...@@ -146,6 +146,8 @@ class FakeStack {
void HandleNoReturn(); void HandleNoReturn();
void GC(uptr real_stack); void GC(uptr real_stack);
void ForEachFakeFrame(RangeIteratorCallback callback, void *arg);
private: private:
FakeStack() { } FakeStack() { }
static const uptr kFlagsOffset = 4096; // This is were the flags begin. static const uptr kFlagsOffset = 4096; // This is were the flags begin.
......
...@@ -30,8 +30,6 @@ struct Flags { ...@@ -30,8 +30,6 @@ struct Flags {
// Lower value may reduce memory usage but increase the chance of // Lower value may reduce memory usage but increase the chance of
// false negatives. // false negatives.
int quarantine_size; int quarantine_size;
// Verbosity level (0 - silent, 1 - a bit of output, 2+ - more output).
int verbosity;
// Size (in bytes) of redzones around heap objects. // Size (in bytes) of redzones around heap objects.
// Requirement: redzone >= 32, is a power of two. // Requirement: redzone >= 32, is a power of two.
int redzone; int redzone;
...@@ -83,6 +81,9 @@ struct Flags { ...@@ -83,6 +81,9 @@ struct Flags {
bool print_legend; bool print_legend;
// If set, prints ASan exit stats even after program terminates successfully. // If set, prints ASan exit stats even after program terminates successfully.
bool atexit; bool atexit;
// If set, coverage information will be dumped at shutdown time if the
// appropriate instrumentation was enabled.
bool coverage;
// By default, disable core dumper on 64-bit - it makes little sense // By default, disable core dumper on 64-bit - it makes little sense
// to dump 16T+ core. // to dump 16T+ core.
bool disable_core; bool disable_core;
...@@ -96,10 +97,11 @@ struct Flags { ...@@ -96,10 +97,11 @@ struct Flags {
// Poison (or not) the heap memory on [de]allocation. Zero value is useful // Poison (or not) the heap memory on [de]allocation. Zero value is useful
// for benchmarking the allocator or instrumentator. // for benchmarking the allocator or instrumentator.
bool poison_heap; bool poison_heap;
// If true, poison partially addressable 8-byte aligned words (default=true).
// This flag affects heap and global buffers, but not stack buffers.
bool poison_partial;
// Report errors on malloc/delete, new/free, new/delete[], etc. // Report errors on malloc/delete, new/free, new/delete[], etc.
bool alloc_dealloc_mismatch; bool alloc_dealloc_mismatch;
// Use stack depot instead of storing stacks in the redzones.
bool use_stack_depot;
// If true, assume that memcmp(p1, p2, n) always reads n bytes before // If true, assume that memcmp(p1, p2, n) always reads n bytes before
// comparing p1 and p2. // comparing p1 and p2.
bool strict_memcmp; bool strict_memcmp;
......
...@@ -92,15 +92,13 @@ static void RegisterGlobal(const Global *g) { ...@@ -92,15 +92,13 @@ static void RegisterGlobal(const Global *g) {
CHECK(AddrIsAlignedByGranularity(g->size_with_redzone)); CHECK(AddrIsAlignedByGranularity(g->size_with_redzone));
if (flags()->poison_heap) if (flags()->poison_heap)
PoisonRedZones(*g); PoisonRedZones(*g);
ListOfGlobals *l = ListOfGlobals *l = new(allocator_for_globals) ListOfGlobals;
(ListOfGlobals*)allocator_for_globals.Allocate(sizeof(ListOfGlobals));
l->g = g; l->g = g;
l->next = list_of_all_globals; l->next = list_of_all_globals;
list_of_all_globals = l; list_of_all_globals = l;
if (g->has_dynamic_init) { if (g->has_dynamic_init) {
if (dynamic_init_globals == 0) { if (dynamic_init_globals == 0) {
void *mem = allocator_for_globals.Allocate(sizeof(VectorOfGlobals)); dynamic_init_globals = new(allocator_for_globals)
dynamic_init_globals = new(mem)
VectorOfGlobals(kDynamicInitGlobalsInitialCapacity); VectorOfGlobals(kDynamicInitGlobalsInitialCapacity);
} }
DynInitGlobal dyn_global = { *g, false }; DynInitGlobal dyn_global = { *g, false };
......
...@@ -92,6 +92,11 @@ void SetThreadName(const char *name) { ...@@ -92,6 +92,11 @@ void SetThreadName(const char *name) {
asanThreadRegistry().SetThreadName(t->tid(), name); asanThreadRegistry().SetThreadName(t->tid(), name);
} }
int OnExit() {
// FIXME: ask frontend whether we need to return failure.
return 0;
}
} // namespace __asan } // namespace __asan
// ---------------------- Wrappers ---------------- {{{1 // ---------------------- Wrappers ---------------- {{{1
...@@ -100,6 +105,19 @@ using namespace __asan; // NOLINT ...@@ -100,6 +105,19 @@ using namespace __asan; // NOLINT
DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, uptr) DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, uptr)
DECLARE_REAL_AND_INTERCEPTOR(void, free, void *) DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
#if !SANITIZER_MAC
#define ASAN_INTERCEPT_FUNC(name) \
do { \
if ((!INTERCEPT_FUNCTION(name) || !REAL(name)) && \
common_flags()->verbosity > 0) \
Report("AddressSanitizer: failed to intercept '" #name "'\n"); \
} while (0)
#else
// OS X interceptors don't need to be initialized with INTERCEPT_FUNCTION.
#define ASAN_INTERCEPT_FUNC(name)
#endif // SANITIZER_MAC
#define COMMON_INTERCEPT_FUNCTION(name) ASAN_INTERCEPT_FUNC(name)
#define COMMON_INTERCEPTOR_UNPOISON_PARAM(ctx, count) \ #define COMMON_INTERCEPTOR_UNPOISON_PARAM(ctx, count) \
do { \ do { \
} while (false) } while (false)
...@@ -124,16 +142,28 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *) ...@@ -124,16 +142,28 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
do { \ do { \
} while (false) } while (false)
#define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) SetThreadName(name) #define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) SetThreadName(name)
// Should be asanThreadRegistry().SetThreadNameByUserId(thread, name)
// But asan does not remember UserId's for threads (pthread_t);
// and remembers all ever existed threads, so the linear search by UserId
// can be slow.
#define COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name) \
do { \
} while (false)
#define COMMON_INTERCEPTOR_BLOCK_REAL(name) REAL(name) #define COMMON_INTERCEPTOR_BLOCK_REAL(name) REAL(name)
#define COMMON_INTERCEPTOR_ON_EXIT(ctx) OnExit()
#include "sanitizer_common/sanitizer_common_interceptors.inc" #include "sanitizer_common/sanitizer_common_interceptors.inc"
#define COMMON_SYSCALL_PRE_READ_RANGE(p, s) ASAN_READ_RANGE(p, s) #define COMMON_SYSCALL_PRE_READ_RANGE(p, s) ASAN_READ_RANGE(p, s)
#define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) ASAN_WRITE_RANGE(p, s) #define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) ASAN_WRITE_RANGE(p, s)
#define COMMON_SYSCALL_POST_READ_RANGE(p, s) \ #define COMMON_SYSCALL_POST_READ_RANGE(p, s) \
do { \ do { \
(void)(p); \
(void)(s); \
} while (false) } while (false)
#define COMMON_SYSCALL_POST_WRITE_RANGE(p, s) \ #define COMMON_SYSCALL_POST_WRITE_RANGE(p, s) \
do { \ do { \
(void)(p); \
(void)(s); \
} while (false) } while (false)
#include "sanitizer_common/sanitizer_common_syscalls.inc" #include "sanitizer_common/sanitizer_common_syscalls.inc"
...@@ -144,8 +174,6 @@ static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) { ...@@ -144,8 +174,6 @@ static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) {
} }
#if ASAN_INTERCEPT_PTHREAD_CREATE #if ASAN_INTERCEPT_PTHREAD_CREATE
extern "C" int pthread_attr_getdetachstate(void *attr, int *v);
INTERCEPTOR(int, pthread_create, void *thread, INTERCEPTOR(int, pthread_create, void *thread,
void *attr, void *(*start_routine)(void*), void *arg) { void *attr, void *(*start_routine)(void*), void *arg) {
EnsureMainThreadIDIsCorrect(); EnsureMainThreadIDIsCorrect();
...@@ -155,7 +183,7 @@ INTERCEPTOR(int, pthread_create, void *thread, ...@@ -155,7 +183,7 @@ INTERCEPTOR(int, pthread_create, void *thread,
GET_STACK_TRACE_THREAD; GET_STACK_TRACE_THREAD;
int detached = 0; int detached = 0;
if (attr != 0) if (attr != 0)
pthread_attr_getdetachstate(attr, &detached); REAL(pthread_attr_getdetachstate)(attr, &detached);
u32 current_tid = GetCurrentTidOrInvalid(); u32 current_tid = GetCurrentTidOrInvalid();
AsanThread *t = AsanThread::Create(start_routine, arg); AsanThread *t = AsanThread::Create(start_routine, arg);
...@@ -256,7 +284,7 @@ static void MlockIsUnsupported() { ...@@ -256,7 +284,7 @@ static void MlockIsUnsupported() {
static bool printed = false; static bool printed = false;
if (printed) return; if (printed) return;
printed = true; printed = true;
if (flags()->verbosity > 0) { if (common_flags()->verbosity > 0) {
Printf("INFO: AddressSanitizer ignores " Printf("INFO: AddressSanitizer ignores "
"mlock/mlockall/munlock/munlockall\n"); "mlock/mlockall/munlock/munlockall\n");
} }
...@@ -645,16 +673,6 @@ INTERCEPTOR(int, __cxa_atexit, void (*func)(void *), void *arg, ...@@ -645,16 +673,6 @@ INTERCEPTOR(int, __cxa_atexit, void (*func)(void *), void *arg,
} }
#endif // ASAN_INTERCEPT___CXA_ATEXIT #endif // ASAN_INTERCEPT___CXA_ATEXIT
#if !SANITIZER_MAC
#define ASAN_INTERCEPT_FUNC(name) do { \
if (!INTERCEPT_FUNCTION(name) && flags()->verbosity > 0) \
Report("AddressSanitizer: failed to intercept '" #name "'\n"); \
} while (0)
#else
// OS X interceptors don't need to be initialized with INTERCEPT_FUNCTION.
#define ASAN_INTERCEPT_FUNC(name)
#endif // SANITIZER_MAC
#if SANITIZER_WINDOWS #if SANITIZER_WINDOWS
INTERCEPTOR_WINAPI(DWORD, CreateThread, INTERCEPTOR_WINAPI(DWORD, CreateThread,
void* security, uptr stack_size, void* security, uptr stack_size,
...@@ -767,7 +785,7 @@ void InitializeAsanInterceptors() { ...@@ -767,7 +785,7 @@ void InitializeAsanInterceptors() {
InitializeWindowsInterceptors(); InitializeWindowsInterceptors();
#endif #endif
if (flags()->verbosity > 0) { if (common_flags()->verbosity > 0) {
Report("AddressSanitizer: libc interceptors initialized\n"); Report("AddressSanitizer: libc interceptors initialized\n");
} }
} }
......
...@@ -96,6 +96,7 @@ void StopInitOrderChecking(); ...@@ -96,6 +96,7 @@ void StopInitOrderChecking();
void AsanTSDInit(void (*destructor)(void *tsd)); void AsanTSDInit(void (*destructor)(void *tsd));
void *AsanTSDGet(); void *AsanTSDGet();
void AsanTSDSet(void *tsd); void AsanTSDSet(void *tsd);
void PlatformTSDDtor(void *tsd);
void AppendToErrorMessageBuffer(const char *buffer); void AppendToErrorMessageBuffer(const char *buffer);
...@@ -133,6 +134,7 @@ const int kAsanStackPartialRedzoneMagic = 0xf4; ...@@ -133,6 +134,7 @@ const int kAsanStackPartialRedzoneMagic = 0xf4;
const int kAsanStackAfterReturnMagic = 0xf5; const int kAsanStackAfterReturnMagic = 0xf5;
const int kAsanInitializationOrderMagic = 0xf6; const int kAsanInitializationOrderMagic = 0xf6;
const int kAsanUserPoisonedMemoryMagic = 0xf7; const int kAsanUserPoisonedMemoryMagic = 0xf7;
const int kAsanContiguousContainerOOBMagic = 0xfc;
const int kAsanStackUseAfterScopeMagic = 0xf8; const int kAsanStackUseAfterScopeMagic = 0xf8;
const int kAsanGlobalRedzoneMagic = 0xf9; const int kAsanGlobalRedzoneMagic = 0xf9;
const int kAsanInternalHeapMagic = 0xfe; const int kAsanInternalHeapMagic = 0xfe;
......
...@@ -56,6 +56,12 @@ void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) { ...@@ -56,6 +56,12 @@ void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
*pc = ucontext->uc_mcontext.arm_pc; *pc = ucontext->uc_mcontext.arm_pc;
*bp = ucontext->uc_mcontext.arm_fp; *bp = ucontext->uc_mcontext.arm_fp;
*sp = ucontext->uc_mcontext.arm_sp; *sp = ucontext->uc_mcontext.arm_sp;
# elif defined(__hppa__)
ucontext_t *ucontext = (ucontext_t*)context;
*pc = ucontext->uc_mcontext.sc_iaoq[0];
/* GCC uses %r3 whenever a frame pointer is needed. */
*bp = ucontext->uc_mcontext.sc_gr[3];
*sp = ucontext->uc_mcontext.sc_gr[30];
# elif defined(__x86_64__) # elif defined(__x86_64__)
ucontext_t *ucontext = (ucontext_t*)context; ucontext_t *ucontext = (ucontext_t*)context;
*pc = ucontext->uc_mcontext.gregs[REG_RIP]; *pc = ucontext->uc_mcontext.gregs[REG_RIP];
......
...@@ -172,7 +172,7 @@ void MaybeReexec() { ...@@ -172,7 +172,7 @@ void MaybeReexec() {
// Set DYLD_INSERT_LIBRARIES equal to the runtime dylib name. // Set DYLD_INSERT_LIBRARIES equal to the runtime dylib name.
setenv(kDyldInsertLibraries, info.dli_fname, /*overwrite*/0); setenv(kDyldInsertLibraries, info.dli_fname, /*overwrite*/0);
} }
if (flags()->verbosity >= 1) { if (common_flags()->verbosity >= 1) {
Report("exec()-ing the program with\n"); Report("exec()-ing the program with\n");
Report("%s=%s\n", kDyldInsertLibraries, new_env); Report("%s=%s\n", kDyldInsertLibraries, new_env);
Report("to enable ASan wrappers.\n"); Report("to enable ASan wrappers.\n");
...@@ -309,7 +309,7 @@ extern "C" ...@@ -309,7 +309,7 @@ extern "C"
void asan_dispatch_call_block_and_release(void *block) { void asan_dispatch_call_block_and_release(void *block) {
GET_STACK_TRACE_THREAD; GET_STACK_TRACE_THREAD;
asan_block_context_t *context = (asan_block_context_t*)block; asan_block_context_t *context = (asan_block_context_t*)block;
if (flags()->verbosity >= 2) { if (common_flags()->verbosity >= 2) {
Report("asan_dispatch_call_block_and_release(): " Report("asan_dispatch_call_block_and_release(): "
"context: %p, pthread_self: %p\n", "context: %p, pthread_self: %p\n",
block, pthread_self()); block, pthread_self());
...@@ -344,7 +344,7 @@ asan_block_context_t *alloc_asan_context(void *ctxt, dispatch_function_t func, ...@@ -344,7 +344,7 @@ asan_block_context_t *alloc_asan_context(void *ctxt, dispatch_function_t func,
dispatch_function_t func) { \ dispatch_function_t func) { \
GET_STACK_TRACE_THREAD; \ GET_STACK_TRACE_THREAD; \
asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack); \ asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack); \
if (flags()->verbosity >= 2) { \ if (common_flags()->verbosity >= 2) { \
Report(#dispatch_x_f "(): context: %p, pthread_self: %p\n", \ Report(#dispatch_x_f "(): context: %p, pthread_self: %p\n", \
asan_ctxt, pthread_self()); \ asan_ctxt, pthread_self()); \
PRINT_CURRENT_STACK(); \ PRINT_CURRENT_STACK(); \
...@@ -362,7 +362,7 @@ INTERCEPTOR(void, dispatch_after_f, dispatch_time_t when, ...@@ -362,7 +362,7 @@ INTERCEPTOR(void, dispatch_after_f, dispatch_time_t when,
dispatch_function_t func) { dispatch_function_t func) {
GET_STACK_TRACE_THREAD; GET_STACK_TRACE_THREAD;
asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack); asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
if (flags()->verbosity >= 2) { if (common_flags()->verbosity >= 2) {
Report("dispatch_after_f: %p\n", asan_ctxt); Report("dispatch_after_f: %p\n", asan_ctxt);
PRINT_CURRENT_STACK(); PRINT_CURRENT_STACK();
} }
...@@ -375,7 +375,7 @@ INTERCEPTOR(void, dispatch_group_async_f, dispatch_group_t group, ...@@ -375,7 +375,7 @@ INTERCEPTOR(void, dispatch_group_async_f, dispatch_group_t group,
dispatch_function_t func) { dispatch_function_t func) {
GET_STACK_TRACE_THREAD; GET_STACK_TRACE_THREAD;
asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack); asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
if (flags()->verbosity >= 2) { if (common_flags()->verbosity >= 2) {
Report("dispatch_group_async_f(): context: %p, pthread_self: %p\n", Report("dispatch_group_async_f(): context: %p, pthread_self: %p\n",
asan_ctxt, pthread_self()); asan_ctxt, pthread_self());
PRINT_CURRENT_STACK(); PRINT_CURRENT_STACK();
......
...@@ -103,8 +103,9 @@ INTERCEPTOR(void*, __libc_memalign, uptr align, uptr s) ...@@ -103,8 +103,9 @@ INTERCEPTOR(void*, __libc_memalign, uptr align, uptr s)
ALIAS("memalign"); ALIAS("memalign");
INTERCEPTOR(uptr, malloc_usable_size, void *ptr) { INTERCEPTOR(uptr, malloc_usable_size, void *ptr) {
GET_STACK_TRACE_MALLOC; GET_CURRENT_PC_BP_SP;
return asan_malloc_usable_size(ptr, &stack); (void)sp;
return asan_malloc_usable_size(ptr, pc, bp);
} }
// We avoid including malloc.h for portability reasons. // We avoid including malloc.h for portability reasons.
......
...@@ -96,8 +96,9 @@ void* _recalloc(void* p, size_t n, size_t elem_size) { ...@@ -96,8 +96,9 @@ void* _recalloc(void* p, size_t n, size_t elem_size) {
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
size_t _msize(void *ptr) { size_t _msize(void *ptr) {
GET_STACK_TRACE_MALLOC; GET_CURRENT_PC_BP_SP;
return asan_malloc_usable_size(ptr, &stack); (void)sp;
return asan_malloc_usable_size(ptr, pc, bp);
} }
int _CrtDbgReport(int, const char*, int, int _CrtDbgReport(int, const char*, int,
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include "asan_poisoning.h" #include "asan_poisoning.h"
#include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_flags.h"
namespace __asan { namespace __asan {
...@@ -66,7 +67,7 @@ void __asan_poison_memory_region(void const volatile *addr, uptr size) { ...@@ -66,7 +67,7 @@ void __asan_poison_memory_region(void const volatile *addr, uptr size) {
if (!flags()->allow_user_poisoning || size == 0) return; if (!flags()->allow_user_poisoning || size == 0) return;
uptr beg_addr = (uptr)addr; uptr beg_addr = (uptr)addr;
uptr end_addr = beg_addr + size; uptr end_addr = beg_addr + size;
if (flags()->verbosity >= 1) { if (common_flags()->verbosity >= 1) {
Printf("Trying to poison memory region [%p, %p)\n", Printf("Trying to poison memory region [%p, %p)\n",
(void*)beg_addr, (void*)end_addr); (void*)beg_addr, (void*)end_addr);
} }
...@@ -108,7 +109,7 @@ void __asan_unpoison_memory_region(void const volatile *addr, uptr size) { ...@@ -108,7 +109,7 @@ void __asan_unpoison_memory_region(void const volatile *addr, uptr size) {
if (!flags()->allow_user_poisoning || size == 0) return; if (!flags()->allow_user_poisoning || size == 0) return;
uptr beg_addr = (uptr)addr; uptr beg_addr = (uptr)addr;
uptr end_addr = beg_addr + size; uptr end_addr = beg_addr + size;
if (flags()->verbosity >= 1) { if (common_flags()->verbosity >= 1) {
Printf("Trying to unpoison memory region [%p, %p)\n", Printf("Trying to unpoison memory region [%p, %p)\n",
(void*)beg_addr, (void*)end_addr); (void*)beg_addr, (void*)end_addr);
} }
...@@ -242,13 +243,57 @@ static void PoisonAlignedStackMemory(uptr addr, uptr size, bool do_poison) { ...@@ -242,13 +243,57 @@ static void PoisonAlignedStackMemory(uptr addr, uptr size, bool do_poison) {
} }
void __asan_poison_stack_memory(uptr addr, uptr size) { void __asan_poison_stack_memory(uptr addr, uptr size) {
if (flags()->verbosity > 0) if (common_flags()->verbosity > 0)
Report("poisoning: %p %zx\n", (void*)addr, size); Report("poisoning: %p %zx\n", (void*)addr, size);
PoisonAlignedStackMemory(addr, size, true); PoisonAlignedStackMemory(addr, size, true);
} }
void __asan_unpoison_stack_memory(uptr addr, uptr size) { void __asan_unpoison_stack_memory(uptr addr, uptr size) {
if (flags()->verbosity > 0) if (common_flags()->verbosity > 0)
Report("unpoisoning: %p %zx\n", (void*)addr, size); Report("unpoisoning: %p %zx\n", (void*)addr, size);
PoisonAlignedStackMemory(addr, size, false); PoisonAlignedStackMemory(addr, size, false);
} }
void __sanitizer_annotate_contiguous_container(const void *beg_p,
const void *end_p,
const void *old_mid_p,
const void *new_mid_p) {
if (common_flags()->verbosity >= 2)
Printf("contiguous_container: %p %p %p %p\n", beg_p, end_p, old_mid_p,
new_mid_p);
uptr beg = reinterpret_cast<uptr>(beg_p);
uptr end= reinterpret_cast<uptr>(end_p);
uptr old_mid = reinterpret_cast<uptr>(old_mid_p);
uptr new_mid = reinterpret_cast<uptr>(new_mid_p);
uptr granularity = SHADOW_GRANULARITY;
CHECK(beg <= old_mid && beg <= new_mid && old_mid <= end && new_mid <= end &&
IsAligned(beg, granularity));
CHECK_LE(end - beg,
FIRST_32_SECOND_64(1UL << 30, 1UL << 34)); // Sanity check.
uptr a = RoundDownTo(Min(old_mid, new_mid), granularity);
uptr c = RoundUpTo(Max(old_mid, new_mid), granularity);
uptr d1 = RoundDownTo(old_mid, granularity);
uptr d2 = RoundUpTo(old_mid, granularity);
// Currently we should be in this state:
// [a, d1) is good, [d2, c) is bad, [d1, d2) is partially good.
// Make a quick sanity check that we are indeed in this state.
if (d1 != d2)
CHECK_EQ(*(u8*)MemToShadow(d1), old_mid - d1);
if (a + granularity <= d1)
CHECK_EQ(*(u8*)MemToShadow(a), 0);
if (d2 + granularity <= c && c <= end)
CHECK_EQ(*(u8 *)MemToShadow(c - granularity),
kAsanContiguousContainerOOBMagic);
uptr b1 = RoundDownTo(new_mid, granularity);
uptr b2 = RoundUpTo(new_mid, granularity);
// New state:
// [a, b1) is good, [b2, c) is bad, [b1, b2) is partially good.
PoisonShadow(a, b1 - a, 0);
PoisonShadow(b2, c - b2, kAsanContiguousContainerOOBMagic);
if (b1 != b2) {
CHECK_EQ(b2 - b1, granularity);
*(u8*)MemToShadow(b1) = static_cast<u8>(new_mid - b1);
}
}
...@@ -41,6 +41,7 @@ ALWAYS_INLINE void FastPoisonShadow(uptr aligned_beg, uptr aligned_size, ...@@ -41,6 +41,7 @@ ALWAYS_INLINE void FastPoisonShadow(uptr aligned_beg, uptr aligned_size,
ALWAYS_INLINE void FastPoisonShadowPartialRightRedzone( ALWAYS_INLINE void FastPoisonShadowPartialRightRedzone(
uptr aligned_addr, uptr size, uptr redzone_size, u8 value) { uptr aligned_addr, uptr size, uptr redzone_size, u8 value) {
DCHECK(flags()->poison_heap); DCHECK(flags()->poison_heap);
bool poison_partial = flags()->poison_partial;
u8 *shadow = (u8*)MEM_TO_SHADOW(aligned_addr); u8 *shadow = (u8*)MEM_TO_SHADOW(aligned_addr);
for (uptr i = 0; i < redzone_size; i += SHADOW_GRANULARITY, shadow++) { for (uptr i = 0; i < redzone_size; i += SHADOW_GRANULARITY, shadow++) {
if (i + SHADOW_GRANULARITY <= size) { if (i + SHADOW_GRANULARITY <= size) {
...@@ -49,7 +50,7 @@ ALWAYS_INLINE void FastPoisonShadowPartialRightRedzone( ...@@ -49,7 +50,7 @@ ALWAYS_INLINE void FastPoisonShadowPartialRightRedzone(
*shadow = (SHADOW_GRANULARITY == 128) ? 0xff : value; // unaddressable *shadow = (SHADOW_GRANULARITY == 128) ? 0xff : value; // unaddressable
} else { } else {
// first size-i bytes are addressable // first size-i bytes are addressable
*shadow = static_cast<u8>(size - i); *shadow = poison_partial ? static_cast<u8>(size - i) : 0;
} }
} }
} }
......
//===-- asan_linux.cc -----------------------------------------------------===// //===-- asan_posix.cc -----------------------------------------------------===//
// //
// This file is distributed under the University of Illinois Open Source // This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details. // License. See LICENSE.TXT for details.
...@@ -42,7 +42,7 @@ static void MaybeInstallSigaction(int signum, ...@@ -42,7 +42,7 @@ static void MaybeInstallSigaction(int signum,
sigact.sa_flags = SA_SIGINFO; sigact.sa_flags = SA_SIGINFO;
if (flags()->use_sigaltstack) sigact.sa_flags |= SA_ONSTACK; if (flags()->use_sigaltstack) sigact.sa_flags |= SA_ONSTACK;
CHECK_EQ(0, REAL(sigaction)(signum, &sigact, 0)); CHECK_EQ(0, REAL(sigaction)(signum, &sigact, 0));
if (flags()->verbosity >= 1) { if (common_flags()->verbosity >= 1) {
Report("Installed the sigaction for signal %d\n", signum); Report("Installed the sigaction for signal %d\n", signum);
} }
} }
...@@ -69,7 +69,7 @@ void SetAlternateSignalStack() { ...@@ -69,7 +69,7 @@ void SetAlternateSignalStack() {
altstack.ss_flags = 0; altstack.ss_flags = 0;
altstack.ss_size = kAltStackSize; altstack.ss_size = kAltStackSize;
CHECK_EQ(0, sigaltstack(&altstack, 0)); CHECK_EQ(0, sigaltstack(&altstack, 0));
if (flags()->verbosity > 0) { if (common_flags()->verbosity > 0) {
Report("Alternative stack for T%d set: [%p,%p)\n", Report("Alternative stack for T%d set: [%p,%p)\n",
GetCurrentTidOrInvalid(), GetCurrentTidOrInvalid(),
altstack.ss_sp, (char*)altstack.ss_sp + altstack.ss_size); altstack.ss_sp, (char*)altstack.ss_sp + altstack.ss_size);
...@@ -114,6 +114,15 @@ void AsanTSDSet(void *tsd) { ...@@ -114,6 +114,15 @@ void AsanTSDSet(void *tsd) {
pthread_setspecific(tsd_key, tsd); pthread_setspecific(tsd_key, tsd);
} }
void PlatformTSDDtor(void *tsd) {
AsanThreadContext *context = (AsanThreadContext*)tsd;
if (context->destructor_iterations > 1) {
context->destructor_iterations--;
CHECK_EQ(0, pthread_setspecific(tsd_key, tsd));
return;
}
AsanThread::TSDDtor(tsd);
}
} // namespace __asan } // namespace __asan
#endif // SANITIZER_LINUX || SANITIZER_MAC #endif // SANITIZER_LINUX || SANITIZER_MAC
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h" #include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_report_decorator.h" #include "sanitizer_common/sanitizer_report_decorator.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_symbolizer.h" #include "sanitizer_common/sanitizer_symbolizer.h"
namespace __asan { namespace __asan {
...@@ -71,6 +72,7 @@ class Decorator: private __sanitizer::AnsiColorDecorator { ...@@ -71,6 +72,7 @@ class Decorator: private __sanitizer::AnsiColorDecorator {
case kAsanInitializationOrderMagic: case kAsanInitializationOrderMagic:
return Cyan(); return Cyan();
case kAsanUserPoisonedMemoryMagic: case kAsanUserPoisonedMemoryMagic:
case kAsanContiguousContainerOOBMagic:
return Blue(); return Blue();
case kAsanStackUseAfterScopeMagic: case kAsanStackUseAfterScopeMagic:
return Magenta(); return Magenta();
...@@ -117,19 +119,21 @@ static void PrintLegend() { ...@@ -117,19 +119,21 @@ static void PrintLegend() {
for (u8 i = 1; i < SHADOW_GRANULARITY; i++) for (u8 i = 1; i < SHADOW_GRANULARITY; i++)
PrintShadowByte("", i, " "); PrintShadowByte("", i, " ");
Printf("\n"); Printf("\n");
PrintShadowByte(" Heap left redzone: ", kAsanHeapLeftRedzoneMagic); PrintShadowByte(" Heap left redzone: ", kAsanHeapLeftRedzoneMagic);
PrintShadowByte(" Heap right redzone: ", kAsanHeapRightRedzoneMagic); PrintShadowByte(" Heap right redzone: ", kAsanHeapRightRedzoneMagic);
PrintShadowByte(" Freed heap region: ", kAsanHeapFreeMagic); PrintShadowByte(" Freed heap region: ", kAsanHeapFreeMagic);
PrintShadowByte(" Stack left redzone: ", kAsanStackLeftRedzoneMagic); PrintShadowByte(" Stack left redzone: ", kAsanStackLeftRedzoneMagic);
PrintShadowByte(" Stack mid redzone: ", kAsanStackMidRedzoneMagic); PrintShadowByte(" Stack mid redzone: ", kAsanStackMidRedzoneMagic);
PrintShadowByte(" Stack right redzone: ", kAsanStackRightRedzoneMagic); PrintShadowByte(" Stack right redzone: ", kAsanStackRightRedzoneMagic);
PrintShadowByte(" Stack partial redzone: ", kAsanStackPartialRedzoneMagic); PrintShadowByte(" Stack partial redzone: ", kAsanStackPartialRedzoneMagic);
PrintShadowByte(" Stack after return: ", kAsanStackAfterReturnMagic); PrintShadowByte(" Stack after return: ", kAsanStackAfterReturnMagic);
PrintShadowByte(" Stack use after scope: ", kAsanStackUseAfterScopeMagic); PrintShadowByte(" Stack use after scope: ", kAsanStackUseAfterScopeMagic);
PrintShadowByte(" Global redzone: ", kAsanGlobalRedzoneMagic); PrintShadowByte(" Global redzone: ", kAsanGlobalRedzoneMagic);
PrintShadowByte(" Global init order: ", kAsanInitializationOrderMagic); PrintShadowByte(" Global init order: ", kAsanInitializationOrderMagic);
PrintShadowByte(" Poisoned by user: ", kAsanUserPoisonedMemoryMagic); PrintShadowByte(" Poisoned by user: ", kAsanUserPoisonedMemoryMagic);
PrintShadowByte(" ASan internal: ", kAsanInternalHeapMagic); PrintShadowByte(" Contiguous container OOB:",
kAsanContiguousContainerOOBMagic);
PrintShadowByte(" ASan internal: ", kAsanInternalHeapMagic);
} }
static void PrintShadowMemoryForAddress(uptr addr) { static void PrintShadowMemoryForAddress(uptr addr) {
...@@ -178,8 +182,8 @@ static bool IsASCII(unsigned char c) { ...@@ -178,8 +182,8 @@ static bool IsASCII(unsigned char c) {
static const char *MaybeDemangleGlobalName(const char *name) { static const char *MaybeDemangleGlobalName(const char *name) {
// We can spoil names of globals with C linkage, so use an heuristic // We can spoil names of globals with C linkage, so use an heuristic
// approach to check if the name should be demangled. // approach to check if the name should be demangled.
return (name[0] == '_' && name[1] == 'Z' && &getSymbolizer) return (name[0] == '_' && name[1] == 'Z')
? getSymbolizer()->Demangle(name) ? Symbolizer::Get()->Demangle(name)
: name; : name;
} }
...@@ -412,7 +416,11 @@ static void DescribeAccessToHeapChunk(AsanChunkView chunk, uptr addr, ...@@ -412,7 +416,11 @@ static void DescribeAccessToHeapChunk(AsanChunkView chunk, uptr addr,
void DescribeHeapAddress(uptr addr, uptr access_size) { void DescribeHeapAddress(uptr addr, uptr access_size) {
AsanChunkView chunk = FindHeapChunkByAddress(addr); AsanChunkView chunk = FindHeapChunkByAddress(addr);
if (!chunk.IsValid()) return; if (!chunk.IsValid()) {
Printf("AddressSanitizer can not describe address in more detail "
"(wild memory access suspected).\n");
return;
}
DescribeAccessToHeapChunk(chunk, addr, access_size); DescribeAccessToHeapChunk(chunk, addr, access_size);
CHECK(chunk.AllocTid() != kInvalidTid); CHECK(chunk.AllocTid() != kInvalidTid);
asanThreadRegistry().CheckLocked(); asanThreadRegistry().CheckLocked();
...@@ -479,7 +487,9 @@ void DescribeThread(AsanThreadContext *context) { ...@@ -479,7 +487,9 @@ void DescribeThread(AsanThreadContext *context) {
context->parent_tid, context->parent_tid,
ThreadNameWithParenthesis(context->parent_tid, ThreadNameWithParenthesis(context->parent_tid,
tname, sizeof(tname))); tname, sizeof(tname)));
PrintStack(&context->stack); uptr stack_size;
const uptr *stack_trace = StackDepotGet(context->stack_id, &stack_size);
PrintStack(stack_trace, stack_size);
// Recursively described parent thread if needed. // Recursively described parent thread if needed.
if (flags()->print_full_thread_history) { if (flags()->print_full_thread_history) {
AsanThreadContext *parent_context = AsanThreadContext *parent_context =
...@@ -540,22 +550,6 @@ class ScopedInErrorReport { ...@@ -540,22 +550,6 @@ class ScopedInErrorReport {
} }
}; };
static void ReportSummary(const char *error_type, StackTrace *stack) {
if (!stack->size) return;
if (&getSymbolizer && getSymbolizer()->IsAvailable()) {
AddressInfo ai;
// Currently, we include the first stack frame into the report summary.
// Maybe sometimes we need to choose another frame (e.g. skip memcpy/etc).
uptr pc = StackTrace::GetPreviousInstructionPc(stack->trace[0]);
getSymbolizer()->SymbolizeCode(pc, &ai, 1);
ReportErrorSummary(error_type,
StripPathPrefix(ai.file,
common_flags()->strip_path_prefix),
ai.line, ai.function);
}
// FIXME: do we need to print anything at all if there is no symbolizer?
}
void ReportSIGSEGV(uptr pc, uptr sp, uptr bp, uptr addr) { void ReportSIGSEGV(uptr pc, uptr sp, uptr bp, uptr addr) {
ScopedInErrorReport in_report; ScopedInErrorReport in_report;
Decorator d; Decorator d;
...@@ -565,13 +559,13 @@ void ReportSIGSEGV(uptr pc, uptr sp, uptr bp, uptr addr) { ...@@ -565,13 +559,13 @@ void ReportSIGSEGV(uptr pc, uptr sp, uptr bp, uptr addr) {
(void*)addr, (void*)pc, (void*)sp, (void*)bp, (void*)addr, (void*)pc, (void*)sp, (void*)bp,
GetCurrentTidOrInvalid()); GetCurrentTidOrInvalid());
Printf("%s", d.EndWarning()); Printf("%s", d.EndWarning());
Printf("AddressSanitizer can not provide additional info.\n");
GET_STACK_TRACE_FATAL(pc, bp); GET_STACK_TRACE_FATAL(pc, bp);
PrintStack(&stack); PrintStack(&stack);
ReportSummary("SEGV", &stack); Printf("AddressSanitizer can not provide additional info.\n");
ReportErrorSummary("SEGV", &stack);
} }
void ReportDoubleFree(uptr addr, StackTrace *stack) { void ReportDoubleFree(uptr addr, StackTrace *free_stack) {
ScopedInErrorReport in_report; ScopedInErrorReport in_report;
Decorator d; Decorator d;
Printf("%s", d.Warning()); Printf("%s", d.Warning());
...@@ -581,14 +575,15 @@ void ReportDoubleFree(uptr addr, StackTrace *stack) { ...@@ -581,14 +575,15 @@ void ReportDoubleFree(uptr addr, StackTrace *stack) {
"thread T%d%s:\n", "thread T%d%s:\n",
addr, curr_tid, addr, curr_tid,
ThreadNameWithParenthesis(curr_tid, tname, sizeof(tname))); ThreadNameWithParenthesis(curr_tid, tname, sizeof(tname)));
Printf("%s", d.EndWarning()); Printf("%s", d.EndWarning());
PrintStack(stack); CHECK_GT(free_stack->size, 0);
GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp);
PrintStack(&stack);
DescribeHeapAddress(addr, 1); DescribeHeapAddress(addr, 1);
ReportSummary("double-free", stack); ReportErrorSummary("double-free", &stack);
} }
void ReportFreeNotMalloced(uptr addr, StackTrace *stack) { void ReportFreeNotMalloced(uptr addr, StackTrace *free_stack) {
ScopedInErrorReport in_report; ScopedInErrorReport in_report;
Decorator d; Decorator d;
Printf("%s", d.Warning()); Printf("%s", d.Warning());
...@@ -598,12 +593,14 @@ void ReportFreeNotMalloced(uptr addr, StackTrace *stack) { ...@@ -598,12 +593,14 @@ void ReportFreeNotMalloced(uptr addr, StackTrace *stack) {
"which was not malloc()-ed: %p in thread T%d%s\n", addr, "which was not malloc()-ed: %p in thread T%d%s\n", addr,
curr_tid, ThreadNameWithParenthesis(curr_tid, tname, sizeof(tname))); curr_tid, ThreadNameWithParenthesis(curr_tid, tname, sizeof(tname)));
Printf("%s", d.EndWarning()); Printf("%s", d.EndWarning());
PrintStack(stack); CHECK_GT(free_stack->size, 0);
GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp);
PrintStack(&stack);
DescribeHeapAddress(addr, 1); DescribeHeapAddress(addr, 1);
ReportSummary("bad-free", stack); ReportErrorSummary("bad-free", &stack);
} }
void ReportAllocTypeMismatch(uptr addr, StackTrace *stack, void ReportAllocTypeMismatch(uptr addr, StackTrace *free_stack,
AllocType alloc_type, AllocType alloc_type,
AllocType dealloc_type) { AllocType dealloc_type) {
static const char *alloc_names[] = static const char *alloc_names[] =
...@@ -617,9 +614,11 @@ void ReportAllocTypeMismatch(uptr addr, StackTrace *stack, ...@@ -617,9 +614,11 @@ void ReportAllocTypeMismatch(uptr addr, StackTrace *stack,
Report("ERROR: AddressSanitizer: alloc-dealloc-mismatch (%s vs %s) on %p\n", Report("ERROR: AddressSanitizer: alloc-dealloc-mismatch (%s vs %s) on %p\n",
alloc_names[alloc_type], dealloc_names[dealloc_type], addr); alloc_names[alloc_type], dealloc_names[dealloc_type], addr);
Printf("%s", d.EndWarning()); Printf("%s", d.EndWarning());
PrintStack(stack); CHECK_GT(free_stack->size, 0);
GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp);
PrintStack(&stack);
DescribeHeapAddress(addr, 1); DescribeHeapAddress(addr, 1);
ReportSummary("alloc-dealloc-mismatch", stack); ReportErrorSummary("alloc-dealloc-mismatch", &stack);
Report("HINT: if you don't care about these warnings you may set " Report("HINT: if you don't care about these warnings you may set "
"ASAN_OPTIONS=alloc_dealloc_mismatch=0\n"); "ASAN_OPTIONS=alloc_dealloc_mismatch=0\n");
} }
...@@ -634,7 +633,7 @@ void ReportMallocUsableSizeNotOwned(uptr addr, StackTrace *stack) { ...@@ -634,7 +633,7 @@ void ReportMallocUsableSizeNotOwned(uptr addr, StackTrace *stack) {
Printf("%s", d.EndWarning()); Printf("%s", d.EndWarning());
PrintStack(stack); PrintStack(stack);
DescribeHeapAddress(addr, 1); DescribeHeapAddress(addr, 1);
ReportSummary("bad-malloc_usable_size", stack); ReportErrorSummary("bad-malloc_usable_size", stack);
} }
void ReportAsanGetAllocatedSizeNotOwned(uptr addr, StackTrace *stack) { void ReportAsanGetAllocatedSizeNotOwned(uptr addr, StackTrace *stack) {
...@@ -647,7 +646,7 @@ void ReportAsanGetAllocatedSizeNotOwned(uptr addr, StackTrace *stack) { ...@@ -647,7 +646,7 @@ void ReportAsanGetAllocatedSizeNotOwned(uptr addr, StackTrace *stack) {
Printf("%s", d.EndWarning()); Printf("%s", d.EndWarning());
PrintStack(stack); PrintStack(stack);
DescribeHeapAddress(addr, 1); DescribeHeapAddress(addr, 1);
ReportSummary("bad-__asan_get_allocated_size", stack); ReportErrorSummary("bad-__asan_get_allocated_size", stack);
} }
void ReportStringFunctionMemoryRangesOverlap( void ReportStringFunctionMemoryRangesOverlap(
...@@ -665,7 +664,7 @@ void ReportStringFunctionMemoryRangesOverlap( ...@@ -665,7 +664,7 @@ void ReportStringFunctionMemoryRangesOverlap(
PrintStack(stack); PrintStack(stack);
DescribeAddress((uptr)offset1, length1); DescribeAddress((uptr)offset1, length1);
DescribeAddress((uptr)offset2, length2); DescribeAddress((uptr)offset2, length2);
ReportSummary(bug_type, stack); ReportErrorSummary(bug_type, stack);
} }
// ----------------------- Mac-specific reports ----------------- {{{1 // ----------------------- Mac-specific reports ----------------- {{{1
...@@ -747,6 +746,9 @@ void __asan_report_error(uptr pc, uptr bp, uptr sp, ...@@ -747,6 +746,9 @@ void __asan_report_error(uptr pc, uptr bp, uptr sp,
case kAsanUserPoisonedMemoryMagic: case kAsanUserPoisonedMemoryMagic:
bug_descr = "use-after-poison"; bug_descr = "use-after-poison";
break; break;
case kAsanContiguousContainerOOBMagic:
bug_descr = "container-overflow";
break;
case kAsanStackUseAfterScopeMagic: case kAsanStackUseAfterScopeMagic:
bug_descr = "stack-use-after-scope"; bug_descr = "stack-use-after-scope";
break; break;
...@@ -775,7 +777,7 @@ void __asan_report_error(uptr pc, uptr bp, uptr sp, ...@@ -775,7 +777,7 @@ void __asan_report_error(uptr pc, uptr bp, uptr sp,
PrintStack(&stack); PrintStack(&stack);
DescribeAddress(addr, access_size); DescribeAddress(addr, access_size);
ReportSummary(bug_descr, &stack); ReportErrorSummary(bug_descr, &stack);
PrintShadowMemoryForAddress(addr); PrintShadowMemoryForAddress(addr);
} }
......
...@@ -31,9 +31,9 @@ void DescribeThread(AsanThreadContext *context); ...@@ -31,9 +31,9 @@ void DescribeThread(AsanThreadContext *context);
// Different kinds of error reports. // Different kinds of error reports.
void NORETURN ReportSIGSEGV(uptr pc, uptr sp, uptr bp, uptr addr); void NORETURN ReportSIGSEGV(uptr pc, uptr sp, uptr bp, uptr addr);
void NORETURN ReportDoubleFree(uptr addr, StackTrace *stack); void NORETURN ReportDoubleFree(uptr addr, StackTrace *free_stack);
void NORETURN ReportFreeNotMalloced(uptr addr, StackTrace *stack); void NORETURN ReportFreeNotMalloced(uptr addr, StackTrace *free_stack);
void NORETURN ReportAllocTypeMismatch(uptr addr, StackTrace *stack, void NORETURN ReportAllocTypeMismatch(uptr addr, StackTrace *free_stack,
AllocType alloc_type, AllocType alloc_type,
AllocType dealloc_type); AllocType dealloc_type);
void NORETURN ReportMallocUsableSizeNotOwned(uptr addr, void NORETURN ReportMallocUsableSizeNotOwned(uptr addr,
......
...@@ -49,6 +49,8 @@ static void AsanDie() { ...@@ -49,6 +49,8 @@ static void AsanDie() {
UnmapOrDie((void*)kLowShadowBeg, kHighShadowEnd - kLowShadowBeg); UnmapOrDie((void*)kLowShadowBeg, kHighShadowEnd - kLowShadowBeg);
} }
} }
if (flags()->coverage)
__sanitizer_cov_dump();
if (death_callback) if (death_callback)
death_callback(); death_callback();
if (flags()->abort_on_error) if (flags()->abort_on_error)
...@@ -86,11 +88,11 @@ static const char *MaybeUseAsanDefaultOptionsCompileDefiniton() { ...@@ -86,11 +88,11 @@ static const char *MaybeUseAsanDefaultOptionsCompileDefiniton() {
} }
static void ParseFlagsFromString(Flags *f, const char *str) { static void ParseFlagsFromString(Flags *f, const char *str) {
ParseCommonFlagsFromString(str); CommonFlags *cf = common_flags();
CHECK((uptr)common_flags()->malloc_context_size <= kStackTraceMax); ParseCommonFlagsFromString(cf, str);
CHECK((uptr)cf->malloc_context_size <= kStackTraceMax);
ParseFlag(str, &f->quarantine_size, "quarantine_size"); ParseFlag(str, &f->quarantine_size, "quarantine_size");
ParseFlag(str, &f->verbosity, "verbosity");
ParseFlag(str, &f->redzone, "redzone"); ParseFlag(str, &f->redzone, "redzone");
CHECK_GE(f->redzone, 16); CHECK_GE(f->redzone, 16);
CHECK(IsPowerOfTwo(f->redzone)); CHECK(IsPowerOfTwo(f->redzone));
...@@ -119,32 +121,25 @@ static void ParseFlagsFromString(Flags *f, const char *str) { ...@@ -119,32 +121,25 @@ static void ParseFlagsFromString(Flags *f, const char *str) {
ParseFlag(str, &f->print_stats, "print_stats"); ParseFlag(str, &f->print_stats, "print_stats");
ParseFlag(str, &f->print_legend, "print_legend"); ParseFlag(str, &f->print_legend, "print_legend");
ParseFlag(str, &f->atexit, "atexit"); ParseFlag(str, &f->atexit, "atexit");
ParseFlag(str, &f->coverage, "coverage");
ParseFlag(str, &f->disable_core, "disable_core"); ParseFlag(str, &f->disable_core, "disable_core");
ParseFlag(str, &f->allow_reexec, "allow_reexec"); ParseFlag(str, &f->allow_reexec, "allow_reexec");
ParseFlag(str, &f->print_full_thread_history, "print_full_thread_history"); ParseFlag(str, &f->print_full_thread_history, "print_full_thread_history");
ParseFlag(str, &f->poison_heap, "poison_heap"); ParseFlag(str, &f->poison_heap, "poison_heap");
ParseFlag(str, &f->poison_partial, "poison_partial");
ParseFlag(str, &f->alloc_dealloc_mismatch, "alloc_dealloc_mismatch"); ParseFlag(str, &f->alloc_dealloc_mismatch, "alloc_dealloc_mismatch");
ParseFlag(str, &f->use_stack_depot, "use_stack_depot");
ParseFlag(str, &f->strict_memcmp, "strict_memcmp"); ParseFlag(str, &f->strict_memcmp, "strict_memcmp");
ParseFlag(str, &f->strict_init_order, "strict_init_order"); ParseFlag(str, &f->strict_init_order, "strict_init_order");
} }
void InitializeFlags(Flags *f, const char *env) { void InitializeFlags(Flags *f, const char *env) {
CommonFlags *cf = common_flags(); CommonFlags *cf = common_flags();
SetCommonFlagsDefaults(cf);
cf->external_symbolizer_path = GetEnv("ASAN_SYMBOLIZER_PATH"); cf->external_symbolizer_path = GetEnv("ASAN_SYMBOLIZER_PATH");
cf->symbolize = true;
cf->malloc_context_size = kDefaultMallocContextSize; cf->malloc_context_size = kDefaultMallocContextSize;
cf->fast_unwind_on_fatal = false;
cf->fast_unwind_on_malloc = true;
cf->strip_path_prefix = "";
cf->handle_ioctl = false;
cf->log_path = 0;
cf->detect_leaks = false;
cf->leak_check_at_exit = true;
internal_memset(f, 0, sizeof(*f)); internal_memset(f, 0, sizeof(*f));
f->quarantine_size = (ASAN_LOW_MEMORY) ? 1UL << 26 : 1UL << 28; f->quarantine_size = (ASAN_LOW_MEMORY) ? 1UL << 26 : 1UL << 28;
f->verbosity = 0;
f->redzone = 16; f->redzone = 16;
f->debug = false; f->debug = false;
f->report_globals = 1; f->report_globals = 1;
...@@ -168,14 +163,15 @@ void InitializeFlags(Flags *f, const char *env) { ...@@ -168,14 +163,15 @@ void InitializeFlags(Flags *f, const char *env) {
f->print_stats = false; f->print_stats = false;
f->print_legend = true; f->print_legend = true;
f->atexit = false; f->atexit = false;
f->coverage = false;
f->disable_core = (SANITIZER_WORDSIZE == 64); f->disable_core = (SANITIZER_WORDSIZE == 64);
f->allow_reexec = true; f->allow_reexec = true;
f->print_full_thread_history = true; f->print_full_thread_history = true;
f->poison_heap = true; f->poison_heap = true;
f->poison_partial = true;
// Turn off alloc/dealloc mismatch checker on Mac and Windows for now. // Turn off alloc/dealloc mismatch checker on Mac and Windows for now.
// TODO(glider,timurrrr): Fix known issues and enable this back. // TODO(glider,timurrrr): Fix known issues and enable this back.
f->alloc_dealloc_mismatch = (SANITIZER_MAC == 0) && (SANITIZER_WINDOWS == 0); f->alloc_dealloc_mismatch = (SANITIZER_MAC == 0) && (SANITIZER_WINDOWS == 0);
f->use_stack_depot = true;
f->strict_memcmp = true; f->strict_memcmp = true;
f->strict_init_order = false; f->strict_init_order = false;
...@@ -184,7 +180,7 @@ void InitializeFlags(Flags *f, const char *env) { ...@@ -184,7 +180,7 @@ void InitializeFlags(Flags *f, const char *env) {
// Override from user-specified string. // Override from user-specified string.
ParseFlagsFromString(f, MaybeCallAsanDefaultOptions()); ParseFlagsFromString(f, MaybeCallAsanDefaultOptions());
if (flags()->verbosity) { if (cf->verbosity) {
Report("Using the defaults from __asan_default_options: %s\n", Report("Using the defaults from __asan_default_options: %s\n",
MaybeCallAsanDefaultOptions()); MaybeCallAsanDefaultOptions());
} }
...@@ -200,10 +196,10 @@ void InitializeFlags(Flags *f, const char *env) { ...@@ -200,10 +196,10 @@ void InitializeFlags(Flags *f, const char *env) {
} }
#endif #endif
if (cf->detect_leaks && !f->use_stack_depot) { // Make "strict_init_order" imply "check_initialization_order".
Report("%s: detect_leaks is ignored (requires use_stack_depot).\n", // TODO(samsonov): Use a single runtime flag for an init-order checker.
SanitizerToolName); if (f->strict_init_order) {
cf->detect_leaks = false; f->check_initialization_order = true;
} }
} }
...@@ -462,7 +458,7 @@ void __asan_init() { ...@@ -462,7 +458,7 @@ void __asan_init() {
__asan_option_detect_stack_use_after_return = __asan_option_detect_stack_use_after_return =
flags()->detect_stack_use_after_return; flags()->detect_stack_use_after_return;
if (flags()->verbosity && options) { if (common_flags()->verbosity && options) {
Report("Parsed ASAN_OPTIONS: %s\n", options); Report("Parsed ASAN_OPTIONS: %s\n", options);
} }
...@@ -472,11 +468,6 @@ void __asan_init() { ...@@ -472,11 +468,6 @@ void __asan_init() {
// Setup internal allocator callback. // Setup internal allocator callback.
SetLowLevelAllocateCallback(OnLowLevelAllocate); SetLowLevelAllocateCallback(OnLowLevelAllocate);
if (flags()->atexit) {
Atexit(asan_atexit);
}
// interceptors
InitializeAsanInterceptors(); InitializeAsanInterceptors();
ReplaceSystemMalloc(); ReplaceSystemMalloc();
...@@ -495,7 +486,7 @@ void __asan_init() { ...@@ -495,7 +486,7 @@ void __asan_init() {
} }
#endif #endif
if (flags()->verbosity) if (common_flags()->verbosity)
PrintAddressSpaceLayout(); PrintAddressSpaceLayout();
if (flags()->disable_core) { if (flags()->disable_core) {
...@@ -531,17 +522,18 @@ void __asan_init() { ...@@ -531,17 +522,18 @@ void __asan_init() {
Die(); Die();
} }
AsanTSDInit(PlatformTSDDtor);
InstallSignalHandlers(); InstallSignalHandlers();
AsanTSDInit(AsanThread::TSDDtor);
// Allocator should be initialized before starting external symbolizer, as // Allocator should be initialized before starting external symbolizer, as
// fork() on Mac locks the allocator. // fork() on Mac locks the allocator.
InitializeAllocator(); InitializeAllocator();
// Start symbolizer process if necessary. // Start symbolizer process if necessary.
if (common_flags()->symbolize && &getSymbolizer) { if (common_flags()->symbolize) {
getSymbolizer() Symbolizer::Init(common_flags()->external_symbolizer_path);
->InitializeExternal(common_flags()->external_symbolizer_path); } else {
Symbolizer::Disable();
} }
// On Linux AsanThread::ThreadStart() calls malloc() that's why asan_inited // On Linux AsanThread::ThreadStart() calls malloc() that's why asan_inited
...@@ -549,6 +541,13 @@ void __asan_init() { ...@@ -549,6 +541,13 @@ void __asan_init() {
asan_inited = 1; asan_inited = 1;
asan_init_is_running = false; asan_init_is_running = false;
if (flags()->atexit)
Atexit(asan_atexit);
if (flags()->coverage)
Atexit(__sanitizer_cov_dump);
// interceptors
InitTlsSize(); InitTlsSize();
// Create main thread. // Create main thread.
...@@ -568,7 +567,7 @@ void __asan_init() { ...@@ -568,7 +567,7 @@ void __asan_init() {
} }
#endif // CAN_SANITIZE_LEAKS #endif // CAN_SANITIZE_LEAKS
if (flags()->verbosity) { if (common_flags()->verbosity) {
Report("AddressSanitizer Init done\n"); Report("AddressSanitizer Init done\n");
} }
} }
...@@ -22,9 +22,12 @@ static bool MaybeCallAsanSymbolize(const void *pc, char *out_buffer, ...@@ -22,9 +22,12 @@ static bool MaybeCallAsanSymbolize(const void *pc, char *out_buffer,
: false; : false;
} }
void PrintStack(const uptr *trace, uptr size) {
StackTrace::PrintStack(trace, size, MaybeCallAsanSymbolize);
}
void PrintStack(StackTrace *stack) { void PrintStack(StackTrace *stack) {
stack->PrintStack(stack->trace, stack->size, common_flags()->symbolize, PrintStack(stack->trace, stack->size);
common_flags()->strip_path_prefix, MaybeCallAsanSymbolize);
} }
} // namespace __asan } // namespace __asan
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
namespace __asan { namespace __asan {
void PrintStack(StackTrace *stack); void PrintStack(StackTrace *stack);
void PrintStack(const uptr *trace, uptr size);
} // namespace __asan } // namespace __asan
...@@ -29,19 +30,24 @@ void PrintStack(StackTrace *stack); ...@@ -29,19 +30,24 @@ void PrintStack(StackTrace *stack);
#if SANITIZER_WINDOWS #if SANITIZER_WINDOWS
#define GET_STACK_TRACE_WITH_PC_AND_BP(max_s, pc, bp, fast) \ #define GET_STACK_TRACE_WITH_PC_AND_BP(max_s, pc, bp, fast) \
StackTrace stack; \ StackTrace stack; \
GetStackTrace(&stack, max_s, pc, bp, 0, 0, fast) stack.Unwind(max_s, pc, bp, 0, 0, fast)
#else #else
#define GET_STACK_TRACE_WITH_PC_AND_BP(max_s, pc, bp, fast) \ #define GET_STACK_TRACE_WITH_PC_AND_BP(max_s, pc, bp, fast) \
StackTrace stack; \ StackTrace stack; \
{ \ { \
AsanThread *t; \ AsanThread *t; \
stack.size = 0; \ stack.size = 0; \
if (asan_inited && (t = GetCurrentThread()) && !t->isUnwinding()) { \ if (asan_inited) { \
uptr stack_top = t->stack_top(); \ if ((t = GetCurrentThread()) && !t->isUnwinding()) { \
uptr stack_bottom = t->stack_bottom(); \ uptr stack_top = t->stack_top(); \
ScopedUnwinding unwind_scope(t); \ uptr stack_bottom = t->stack_bottom(); \
GetStackTrace(&stack, max_s, pc, bp, stack_top, stack_bottom, fast); \ ScopedUnwinding unwind_scope(t); \
} \ stack.Unwind(max_s, pc, bp, stack_top, stack_bottom, fast); \
} else if (t == 0 && !fast) { \
/* If GetCurrentThread() has failed, try to do slow unwind anyways. */ \
stack.Unwind(max_s, pc, bp, 0, 0, false); \
} \
} \
} }
#endif // SANITIZER_WINDOWS #endif // SANITIZER_WINDOWS
......
...@@ -45,9 +45,9 @@ struct AsanStats { ...@@ -45,9 +45,9 @@ struct AsanStats {
uptr malloc_large; uptr malloc_large;
uptr malloc_small_slow; uptr malloc_small_slow;
// Ctor for global AsanStats (accumulated stats and main thread stats). // Ctor for global AsanStats (accumulated stats for dead threads).
explicit AsanStats(LinkerInitialized) { } explicit AsanStats(LinkerInitialized) { }
// Default ctor for thread-local stats. // Creates empty stats.
AsanStats(); AsanStats();
void Print(); // Prints formatted stats to stderr. void Print(); // Prints formatted stats to stderr.
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include "asan_mapping.h" #include "asan_mapping.h"
#include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_placement_new.h" #include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "lsan/lsan_common.h" #include "lsan/lsan_common.h"
namespace __asan { namespace __asan {
...@@ -25,9 +26,8 @@ namespace __asan { ...@@ -25,9 +26,8 @@ namespace __asan {
void AsanThreadContext::OnCreated(void *arg) { void AsanThreadContext::OnCreated(void *arg) {
CreateThreadContextArgs *args = static_cast<CreateThreadContextArgs*>(arg); CreateThreadContextArgs *args = static_cast<CreateThreadContextArgs*>(arg);
if (args->stack) { if (args->stack)
internal_memcpy(&stack, args->stack, sizeof(stack)); stack_id = StackDepotPut(args->stack->trace, args->stack->size);
}
thread = args->thread; thread = args->thread;
thread->set_context(this); thread->set_context(this);
} }
...@@ -41,9 +41,12 @@ void AsanThreadContext::OnFinished() { ...@@ -41,9 +41,12 @@ void AsanThreadContext::OnFinished() {
static ALIGNED(16) char thread_registry_placeholder[sizeof(ThreadRegistry)]; static ALIGNED(16) char thread_registry_placeholder[sizeof(ThreadRegistry)];
static ThreadRegistry *asan_thread_registry; static ThreadRegistry *asan_thread_registry;
static BlockingMutex mu_for_thread_context(LINKER_INITIALIZED);
static LowLevelAllocator allocator_for_thread_context;
static ThreadContextBase *GetAsanThreadContext(u32 tid) { static ThreadContextBase *GetAsanThreadContext(u32 tid) {
void *mem = MmapOrDie(sizeof(AsanThreadContext), "AsanThreadContext"); BlockingMutexLock lock(&mu_for_thread_context);
return new(mem) AsanThreadContext(tid); return new(allocator_for_thread_context) AsanThreadContext(tid);
} }
ThreadRegistry &asanThreadRegistry() { ThreadRegistry &asanThreadRegistry() {
...@@ -76,24 +79,25 @@ AsanThread *AsanThread::Create(thread_callback_t start_routine, ...@@ -76,24 +79,25 @@ AsanThread *AsanThread::Create(thread_callback_t start_routine,
AsanThread *thread = (AsanThread*)MmapOrDie(size, __FUNCTION__); AsanThread *thread = (AsanThread*)MmapOrDie(size, __FUNCTION__);
thread->start_routine_ = start_routine; thread->start_routine_ = start_routine;
thread->arg_ = arg; thread->arg_ = arg;
thread->context_ = 0;
return thread; return thread;
} }
void AsanThread::TSDDtor(void *tsd) { void AsanThread::TSDDtor(void *tsd) {
AsanThreadContext *context = (AsanThreadContext*)tsd; AsanThreadContext *context = (AsanThreadContext*)tsd;
if (flags()->verbosity >= 1) if (common_flags()->verbosity >= 1)
Report("T%d TSDDtor\n", context->tid); Report("T%d TSDDtor\n", context->tid);
if (context->thread) if (context->thread)
context->thread->Destroy(); context->thread->Destroy();
} }
void AsanThread::Destroy() { void AsanThread::Destroy() {
if (flags()->verbosity >= 1) { if (common_flags()->verbosity >= 1) {
Report("T%d exited\n", tid()); Report("T%d exited\n", tid());
} }
malloc_storage().CommitBack();
if (flags()->use_sigaltstack) UnsetAlternateSignalStack();
asanThreadRegistry().FinishThread(tid()); asanThreadRegistry().FinishThread(tid());
FlushToDeadThreadStats(&stats_); FlushToDeadThreadStats(&stats_);
// We also clear the shadow on thread destruction because // We also clear the shadow on thread destruction because
...@@ -136,7 +140,7 @@ void AsanThread::Init() { ...@@ -136,7 +140,7 @@ void AsanThread::Init() {
CHECK(AddrIsInMem(stack_bottom_)); CHECK(AddrIsInMem(stack_bottom_));
CHECK(AddrIsInMem(stack_top_ - 1)); CHECK(AddrIsInMem(stack_top_ - 1));
ClearShadowForThreadStackAndTLS(); ClearShadowForThreadStackAndTLS();
if (flags()->verbosity >= 1) { if (common_flags()->verbosity >= 1) {
int local = 0; int local = 0;
Report("T%d: stack [%p,%p) size 0x%zx; local=%p\n", Report("T%d: stack [%p,%p) size 0x%zx; local=%p\n",
tid(), (void*)stack_bottom_, (void*)stack_top_, tid(), (void*)stack_bottom_, (void*)stack_top_,
...@@ -160,10 +164,14 @@ thread_return_t AsanThread::ThreadStart(uptr os_id) { ...@@ -160,10 +164,14 @@ thread_return_t AsanThread::ThreadStart(uptr os_id) {
} }
thread_return_t res = start_routine_(arg_); thread_return_t res = start_routine_(arg_);
malloc_storage().CommitBack();
if (flags()->use_sigaltstack) UnsetAlternateSignalStack();
this->Destroy(); // On POSIX systems we defer this to the TSD destructor. LSan will consider
// the thread's memory as non-live from the moment we call Destroy(), even
// though that memory might contain pointers to heap objects which will be
// cleaned up by a user-defined TSD destructor. Thus, calling Destroy() before
// the TSD destructors have run might cause false positives in LSan.
if (!SANITIZER_POSIX)
this->Destroy();
return res; return res;
} }
...@@ -257,7 +265,7 @@ AsanThread *GetCurrentThread() { ...@@ -257,7 +265,7 @@ AsanThread *GetCurrentThread() {
void SetCurrentThread(AsanThread *t) { void SetCurrentThread(AsanThread *t) {
CHECK(t->context()); CHECK(t->context());
if (flags()->verbosity >= 2) { if (common_flags()->verbosity >= 2) {
Report("SetCurrentThread: %p for thread %p\n", Report("SetCurrentThread: %p for thread %p\n",
t->context(), (void*)GetThreadSelf()); t->context(), (void*)GetThreadSelf());
} }
...@@ -286,6 +294,13 @@ void EnsureMainThreadIDIsCorrect() { ...@@ -286,6 +294,13 @@ void EnsureMainThreadIDIsCorrect() {
if (context && (context->tid == 0)) if (context && (context->tid == 0))
context->os_id = GetTid(); context->os_id = GetTid();
} }
__asan::AsanThread *GetAsanThreadByOsIDLocked(uptr os_id) {
__asan::AsanThreadContext *context = static_cast<__asan::AsanThreadContext *>(
__asan::asanThreadRegistry().FindThreadContextByOsIDLocked(os_id));
if (!context) return 0;
return context->thread;
}
} // namespace __asan } // namespace __asan
// --- Implementation of LSan-specific functions --- {{{1 // --- Implementation of LSan-specific functions --- {{{1
...@@ -293,10 +308,7 @@ namespace __lsan { ...@@ -293,10 +308,7 @@ namespace __lsan {
bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end, bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end,
uptr *tls_begin, uptr *tls_end, uptr *tls_begin, uptr *tls_end,
uptr *cache_begin, uptr *cache_end) { uptr *cache_begin, uptr *cache_end) {
__asan::AsanThreadContext *context = static_cast<__asan::AsanThreadContext *>( __asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id);
__asan::asanThreadRegistry().FindThreadContextByOsIDLocked(os_id));
if (!context) return false;
__asan::AsanThread *t = context->thread;
if (!t) return false; if (!t) return false;
*stack_begin = t->stack_bottom(); *stack_begin = t->stack_bottom();
*stack_end = t->stack_top(); *stack_end = t->stack_top();
...@@ -308,6 +320,13 @@ bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end, ...@@ -308,6 +320,13 @@ bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end,
return true; return true;
} }
void ForEachExtraStackRange(uptr os_id, RangeIteratorCallback callback,
void *arg) {
__asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id);
if (t && t->has_fake_stack())
t->fake_stack()->ForEachFakeFrame(callback, arg);
}
void LockThreadRegistry() { void LockThreadRegistry() {
__asan::asanThreadRegistry().Lock(); __asan::asanThreadRegistry().Lock();
} }
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include "asan_fake_stack.h" #include "asan_fake_stack.h"
#include "asan_stack.h" #include "asan_stack.h"
#include "asan_stats.h" #include "asan_stats.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_thread_registry.h" #include "sanitizer_common/sanitizer_thread_registry.h"
...@@ -34,11 +35,13 @@ class AsanThreadContext : public ThreadContextBase { ...@@ -34,11 +35,13 @@ class AsanThreadContext : public ThreadContextBase {
explicit AsanThreadContext(int tid) explicit AsanThreadContext(int tid)
: ThreadContextBase(tid), : ThreadContextBase(tid),
announced(false), announced(false),
destructor_iterations(kPthreadDestructorIterations),
stack_id(0),
thread(0) { thread(0) {
internal_memset(&stack, 0, sizeof(stack));
} }
bool announced; bool announced;
StackTrace stack; u8 destructor_iterations;
u32 stack_id;
AsanThread *thread; AsanThread *thread;
void OnCreated(void *arg); void OnCreated(void *arg);
...@@ -46,7 +49,7 @@ class AsanThreadContext : public ThreadContextBase { ...@@ -46,7 +49,7 @@ class AsanThreadContext : public ThreadContextBase {
}; };
// AsanThreadContext objects are never freed, so we need many of them. // AsanThreadContext objects are never freed, so we need many of them.
COMPILER_CHECK(sizeof(AsanThreadContext) <= 4096); COMPILER_CHECK(sizeof(AsanThreadContext) <= 256);
// AsanThread are stored in TSD and destroyed when the thread dies. // AsanThread are stored in TSD and destroyed when the thread dies.
class AsanThread { class AsanThread {
...@@ -96,14 +99,15 @@ class AsanThread { ...@@ -96,14 +99,15 @@ class AsanThread {
// True is this thread is currently unwinding stack (i.e. collecting a stack // True is this thread is currently unwinding stack (i.e. collecting a stack
// trace). Used to prevent deadlocks on platforms where libc unwinder calls // trace). Used to prevent deadlocks on platforms where libc unwinder calls
// malloc internally. See PR17116 for more details. // malloc internally. See PR17116 for more details.
bool isUnwinding() const { return unwinding; } bool isUnwinding() const { return unwinding_; }
void setUnwinding(bool b) { unwinding = b; } void setUnwinding(bool b) { unwinding_ = b; }
AsanThreadLocalMallocStorage &malloc_storage() { return malloc_storage_; } AsanThreadLocalMallocStorage &malloc_storage() { return malloc_storage_; }
AsanStats &stats() { return stats_; } AsanStats &stats() { return stats_; }
private: private:
AsanThread() : unwinding(false) {} // NOTE: There is no AsanThread constructor. It is allocated
// via mmap() and *must* be valid in zero-initialized state.
void SetThreadStackAndTls(); void SetThreadStackAndTls();
void ClearShadowForThreadStackAndTLS(); void ClearShadowForThreadStackAndTLS();
FakeStack *AsyncSignalSafeLazyInitFakeStack(); FakeStack *AsyncSignalSafeLazyInitFakeStack();
...@@ -111,18 +115,18 @@ class AsanThread { ...@@ -111,18 +115,18 @@ class AsanThread {
AsanThreadContext *context_; AsanThreadContext *context_;
thread_callback_t start_routine_; thread_callback_t start_routine_;
void *arg_; void *arg_;
uptr stack_top_; uptr stack_top_;
uptr stack_bottom_; uptr stack_bottom_;
// stack_size_ == stack_top_ - stack_bottom_; // stack_size_ == stack_top_ - stack_bottom_;
// It needs to be set in a async-signal-safe manner. // It needs to be set in a async-signal-safe manner.
uptr stack_size_; uptr stack_size_;
uptr tls_begin_; uptr tls_begin_;
uptr tls_end_; uptr tls_end_;
FakeStack *fake_stack_; FakeStack *fake_stack_;
AsanThreadLocalMallocStorage malloc_storage_; AsanThreadLocalMallocStorage malloc_storage_;
AsanStats stats_; AsanStats stats_;
bool unwinding; bool unwinding_;
}; };
// ScopedUnwinding is a scope for stacktracing member of a context // ScopedUnwinding is a scope for stacktracing member of a context
......
...@@ -58,6 +58,9 @@ void AsanTSDSet(void *tsd) { ...@@ -58,6 +58,9 @@ void AsanTSDSet(void *tsd) {
fake_tsd = tsd; fake_tsd = tsd;
} }
void PlatformTSDDtor(void *tsd) {
AsanThread::TSDDtor(tsd);
}
// ---------------------- Various stuff ---------------- {{{1 // ---------------------- Various stuff ---------------- {{{1
void MaybeReexec() { void MaybeReexec() {
// No need to re-exec on Windows. // No need to re-exec on Windows.
......
...@@ -25,10 +25,6 @@ extern "C" { ...@@ -25,10 +25,6 @@ extern "C" {
// Tell the tools to write their reports to "path.<pid>" instead of stderr. // Tell the tools to write their reports to "path.<pid>" instead of stderr.
void __sanitizer_set_report_path(const char *path); void __sanitizer_set_report_path(const char *path);
// Tell the tools to write their reports to given file descriptor instead of
// stderr.
void __sanitizer_set_report_fd(int fd);
// Notify the tools that the sandbox is going to be turned on. The reserved // Notify the tools that the sandbox is going to be turned on. The reserved
// parameter will be used in the future to hold a structure with functions // parameter will be used in the future to hold a structure with functions
// that the tools may call to bypass the sandbox. // that the tools may call to bypass the sandbox.
...@@ -49,6 +45,44 @@ extern "C" { ...@@ -49,6 +45,44 @@ extern "C" {
void __sanitizer_unaligned_store32(void *p, uint32_t x); void __sanitizer_unaligned_store32(void *p, uint32_t x);
void __sanitizer_unaligned_store64(void *p, uint64_t x); void __sanitizer_unaligned_store64(void *p, uint64_t x);
// Record and dump coverage info.
void __sanitizer_cov_dump();
// Annotate the current state of a contiguous container, such as
// std::vector, std::string or similar.
// A contiguous container is a container that keeps all of its elements
// in a contiguous region of memory. The container owns the region of memory
// [beg, end); the memory [beg, mid) is used to store the current elements
// and the memory [mid, end) is reserved for future elements;
// end <= mid <= end. For example, in "std::vector<> v"
// beg = &v[0];
// end = beg + v.capacity() * sizeof(v[0]);
// mid = beg + v.size() * sizeof(v[0]);
//
// This annotation tells the Sanitizer tool about the current state of the
// container so that the tool can report errors when memory from [mid, end)
// is accessed. Insert this annotation into methods like push_back/pop_back.
// Supply the old and the new values of mid (old_mid/new_mid).
// In the initial state mid == end and so should be the final
// state when the container is destroyed or when it reallocates the storage.
//
// Use with caution and don't use for anything other than vector-like classes.
//
// For AddressSanitizer, 'beg' should be 8-aligned and 'end' should
// be either 8-aligned or it should point to the end of a separate heap-,
// stack-, or global- allocated buffer. I.e. the following will not work:
// int64_t x[2]; // 16 bytes, 8-aligned.
// char *beg = (char *)&x[0];
// char *end = beg + 12; // Not 8 aligned, not the end of the buffer.
// This however will work fine:
// int32_t x[3]; // 12 bytes, but 8-aligned under AddressSanitizer.
// char *beg = (char*)&x[0];
// char *end = beg + 12; // Not 8-aligned, but is the end of the buffer.
void __sanitizer_annotate_contiguous_container(const void *beg,
const void *end,
const void *old_mid,
const void *new_mid);
#ifdef __cplusplus #ifdef __cplusplus
} // extern "C" } // extern "C"
#endif #endif
......
...@@ -985,10 +985,11 @@ ...@@ -985,10 +985,11 @@
#else #else
#define __sanitizer_syscall_pre_pread64(fd, buf, count, pos0, pos1) \ #define __sanitizer_syscall_pre_pread64(fd, buf, count, pos0, pos1) \
__sanitizer_syscall_pre_impl_pread64((long)(fd), (long)(buf), (long)(count), \ __sanitizer_syscall_pre_impl_pread64((long)(fd), (long)(buf), (long)(count), \
(long)(pos)) (long)(pos0), (long)(pos1))
#define __sanitizer_syscall_post_pread64(res, fd, buf, count, pos0, pos1) \ #define __sanitizer_syscall_post_pread64(res, fd, buf, count, pos0, pos1) \
__sanitizer_syscall_post_impl_pread64(res, (long)(fd), (long)(buf), \ __sanitizer_syscall_post_impl_pread64(res, (long)(fd), (long)(buf), \
(long)(count), (long)(pos)) (long)(count), (long)(pos0), \
(long)(pos1))
#define __sanitizer_syscall_pre_pwrite64(fd, buf, count, pos0, pos1) \ #define __sanitizer_syscall_pre_pwrite64(fd, buf, count, pos0, pos1) \
__sanitizer_syscall_pre_impl_pwrite64( \ __sanitizer_syscall_pre_impl_pwrite64( \
(long)(fd), (long)(buf), (long)(count), (long)(pos0), (long)(pos1)) (long)(fd), (long)(buf), (long)(count), (long)(pos0), (long)(pos1))
......
...@@ -236,12 +236,18 @@ typedef unsigned long uptr; // NOLINT ...@@ -236,12 +236,18 @@ typedef unsigned long uptr; // NOLINT
#if defined(__linux__) #if defined(__linux__)
# include "interception_linux.h" # include "interception_linux.h"
# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_LINUX(func) # define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_LINUX(func)
# define INTERCEPT_FUNCTION_VER(func, symver) \
INTERCEPT_FUNCTION_VER_LINUX(func, symver)
#elif defined(__APPLE__) #elif defined(__APPLE__)
# include "interception_mac.h" # include "interception_mac.h"
# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_MAC(func) # define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_MAC(func)
# define INTERCEPT_FUNCTION_VER(func, symver) \
INTERCEPT_FUNCTION_VER_MAC(func, symver)
#else // defined(_WIN32) #else // defined(_WIN32)
# include "interception_win.h" # include "interception_win.h"
# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_WIN(func) # define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_WIN(func)
# define INTERCEPT_FUNCTION_VER(func, symver) \
INTERCEPT_FUNCTION_VER_WIN(func, symver)
#endif #endif
#undef INCLUDED_FROM_INTERCEPTION_LIB #undef INCLUDED_FROM_INTERCEPTION_LIB
......
...@@ -33,9 +33,12 @@ void *GetFuncAddrVer(const char *func_name, const char *ver); ...@@ -33,9 +33,12 @@ void *GetFuncAddrVer(const char *func_name, const char *ver);
(::__interception::uptr)&WRAP(func)) (::__interception::uptr)&WRAP(func))
#if !defined(__ANDROID__) // android does not have dlvsym #if !defined(__ANDROID__) // android does not have dlvsym
#define INTERCEPT_FUNCTION_VER(func, symver) \ # define INTERCEPT_FUNCTION_VER_LINUX(func, symver) \
::__interception::real_##func = (func##_f)(unsigned long) \ ::__interception::real_##func = (func##_f)(unsigned long) \
::__interception::GetFuncAddrVer(#func, #symver) ::__interception::GetFuncAddrVer(#func, symver)
#else
# define INTERCEPT_FUNCTION_VER_LINUX(func, symver) \
INTERCEPT_FUNCTION_LINUX(func)
#endif // !defined(__ANDROID__) #endif // !defined(__ANDROID__)
#endif // INTERCEPTION_LINUX_H #endif // INTERCEPTION_LINUX_H
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#define INTERCEPTION_MAC_H #define INTERCEPTION_MAC_H
#define INTERCEPT_FUNCTION_MAC(func) #define INTERCEPT_FUNCTION_MAC(func)
#define INTERCEPT_FUNCTION_VER_MAC(func, symver)
#endif // INTERCEPTION_MAC_H #endif // INTERCEPTION_MAC_H
#endif // __APPLE__ #endif // __APPLE__
...@@ -39,5 +39,8 @@ bool OverrideFunction(uptr old_func, uptr new_func, uptr *orig_old_func); ...@@ -39,5 +39,8 @@ bool OverrideFunction(uptr old_func, uptr new_func, uptr *orig_old_func);
(::__interception::uptr*)&REAL(func)) (::__interception::uptr*)&REAL(func))
#endif #endif
#define INTERCEPT_FUNCTION_VER_WIN(func, symver) \
INTERCEPT_FUNCTION_WIN(func)
#endif // INTERCEPTION_WIN_H #endif // INTERCEPTION_WIN_H
#endif // _WIN32 #endif // _WIN32
...@@ -22,6 +22,7 @@ lsan_files = \ ...@@ -22,6 +22,7 @@ lsan_files = \
lsan.cc \ lsan.cc \
lsan_allocator.cc \ lsan_allocator.cc \
lsan_interceptors.cc \ lsan_interceptors.cc \
lsan_preinit.cc \
lsan_thread.cc lsan_thread.cc
libsanitizer_lsan_la_SOURCES = $(sanitizer_lsan_files) libsanitizer_lsan_la_SOURCES = $(sanitizer_lsan_files)
......
...@@ -83,7 +83,7 @@ liblsan_la_DEPENDENCIES = \ ...@@ -83,7 +83,7 @@ liblsan_la_DEPENDENCIES = \
$(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1)
am__objects_1 = lsan_common.lo lsan_common_linux.lo am__objects_1 = lsan_common.lo lsan_common_linux.lo
am__objects_2 = $(am__objects_1) lsan.lo lsan_allocator.lo \ am__objects_2 = $(am__objects_1) lsan.lo lsan_allocator.lo \
lsan_interceptors.lo lsan_thread.lo lsan_interceptors.lo lsan_preinit.lo lsan_thread.lo
am_liblsan_la_OBJECTS = $(am__objects_2) am_liblsan_la_OBJECTS = $(am__objects_2)
liblsan_la_OBJECTS = $(am_liblsan_la_OBJECTS) liblsan_la_OBJECTS = $(am_liblsan_la_OBJECTS)
liblsan_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ liblsan_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \
...@@ -264,6 +264,7 @@ lsan_files = \ ...@@ -264,6 +264,7 @@ lsan_files = \
lsan.cc \ lsan.cc \
lsan_allocator.cc \ lsan_allocator.cc \
lsan_interceptors.cc \ lsan_interceptors.cc \
lsan_preinit.cc \
lsan_thread.cc lsan_thread.cc
libsanitizer_lsan_la_SOURCES = $(sanitizer_lsan_files) libsanitizer_lsan_la_SOURCES = $(sanitizer_lsan_files)
...@@ -400,6 +401,7 @@ distclean-compile: ...@@ -400,6 +401,7 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/lsan_common.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/lsan_common.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/lsan_common_linux.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/lsan_common_linux.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/lsan_interceptors.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/lsan_interceptors.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/lsan_preinit.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/lsan_thread.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/lsan_thread.Plo@am__quote@
.cc.o: .cc.o:
......
...@@ -18,26 +18,30 @@ ...@@ -18,26 +18,30 @@
#include "lsan_common.h" #include "lsan_common.h"
#include "lsan_thread.h" #include "lsan_thread.h"
bool lsan_inited;
bool lsan_init_is_running;
namespace __lsan { namespace __lsan {
static void InitializeCommonFlags() { static void InitializeCommonFlags() {
CommonFlags *cf = common_flags(); CommonFlags *cf = common_flags();
SetCommonFlagsDefaults(cf);
cf->external_symbolizer_path = GetEnv("LSAN_SYMBOLIZER_PATH"); cf->external_symbolizer_path = GetEnv("LSAN_SYMBOLIZER_PATH");
cf->symbolize = true;
cf->strip_path_prefix = "";
cf->fast_unwind_on_malloc = true;
cf->malloc_context_size = 30; cf->malloc_context_size = 30;
cf->detect_leaks = true; cf->detect_leaks = true;
cf->leak_check_at_exit = true;
ParseCommonFlagsFromString(GetEnv("LSAN_OPTIONS")); ParseCommonFlagsFromString(cf, GetEnv("LSAN_OPTIONS"));
} }
void Init() { } // namespace __lsan
static bool inited;
if (inited) using namespace __lsan; // NOLINT
extern "C" void __lsan_init() {
CHECK(!lsan_init_is_running);
if (lsan_inited)
return; return;
inited = true; lsan_init_is_running = true;
SanitizerToolName = "LeakSanitizer"; SanitizerToolName = "LeakSanitizer";
InitializeCommonFlags(); InitializeCommonFlags();
InitializeAllocator(); InitializeAllocator();
...@@ -51,13 +55,14 @@ void Init() { ...@@ -51,13 +55,14 @@ void Init() {
// Start symbolizer process if necessary. // Start symbolizer process if necessary.
if (common_flags()->symbolize) { if (common_flags()->symbolize) {
getSymbolizer() Symbolizer::Init(common_flags()->external_symbolizer_path);
->InitializeExternal(common_flags()->external_symbolizer_path); } else {
Symbolizer::Disable();
} }
InitCommonLsan(); InitCommonLsan();
if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit)
Atexit(DoLeakCheck); Atexit(DoLeakCheck);
lsan_inited = true;
lsan_init_is_running = false;
} }
} // namespace __lsan
...@@ -15,7 +15,11 @@ ...@@ -15,7 +15,11 @@
namespace __lsan { namespace __lsan {
void Init();
void InitializeInterceptors(); void InitializeInterceptors();
} // namespace __lsan } // namespace __lsan
extern bool lsan_inited;
extern bool lsan_init_is_running;
extern "C" void __lsan_init();
...@@ -18,6 +18,8 @@ ...@@ -18,6 +18,8 @@
#include "sanitizer_common/sanitizer_stacktrace.h" #include "sanitizer_common/sanitizer_stacktrace.h"
#include "lsan_common.h" #include "lsan_common.h"
extern "C" void *memset(void *ptr, int value, uptr num);
namespace __lsan { namespace __lsan {
static const uptr kMaxAllowedMallocSize = 8UL << 30; static const uptr kMaxAllowedMallocSize = 8UL << 30;
...@@ -32,7 +34,7 @@ struct ChunkMetadata { ...@@ -32,7 +34,7 @@ struct ChunkMetadata {
}; };
typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize,
sizeof(ChunkMetadata), CompactSizeClassMap> PrimaryAllocator; sizeof(ChunkMetadata), DefaultSizeClassMap> PrimaryAllocator;
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache; typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
typedef LargeMmapAllocator<> SecondaryAllocator; typedef LargeMmapAllocator<> SecondaryAllocator;
typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
...@@ -78,7 +80,10 @@ void *Allocate(const StackTrace &stack, uptr size, uptr alignment, ...@@ -78,7 +80,10 @@ void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", size); Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", size);
return 0; return 0;
} }
void *p = allocator.Allocate(&cache, size, alignment, cleared); void *p = allocator.Allocate(&cache, size, alignment, false);
// Do not rely on the allocator to clear the memory (it's slow).
if (cleared && allocator.FromPrimary(p))
memset(p, 0, size);
RegisterAllocation(stack, p, size); RegisterAllocation(stack, p, size);
return p; return p;
} }
......
...@@ -91,8 +91,12 @@ void InitializeSuppressions() { ...@@ -91,8 +91,12 @@ void InitializeSuppressions() {
void InitCommonLsan() { void InitCommonLsan() {
InitializeFlags(); InitializeFlags();
InitializeSuppressions(); if (common_flags()->detect_leaks) {
InitializePlatformSpecificModules(); // Initialization which can fail or print warnings should only be done if
// LSan is actually enabled.
InitializeSuppressions();
InitializePlatformSpecificModules();
}
} }
class Decorator: private __sanitizer::AnsiColorDecorator { class Decorator: private __sanitizer::AnsiColorDecorator {
...@@ -136,6 +140,8 @@ void ScanRangeForPointers(uptr begin, uptr end, ...@@ -136,6 +140,8 @@ void ScanRangeForPointers(uptr begin, uptr end,
if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue; if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue;
uptr chunk = PointsIntoChunk(p); uptr chunk = PointsIntoChunk(p);
if (!chunk) continue; if (!chunk) continue;
// Pointers to self don't count. This matters when tag == kIndirectlyLeaked.
if (chunk == begin) continue;
LsanMetadata m(chunk); LsanMetadata m(chunk);
// Reachable beats ignored beats leaked. // Reachable beats ignored beats leaked.
if (m.tag() == kReachable) continue; if (m.tag() == kReachable) continue;
...@@ -149,6 +155,11 @@ void ScanRangeForPointers(uptr begin, uptr end, ...@@ -149,6 +155,11 @@ void ScanRangeForPointers(uptr begin, uptr end,
} }
} }
void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg) {
Frontier *frontier = reinterpret_cast<Frontier *>(arg);
ScanRangeForPointers(begin, end, frontier, "FAKE STACK", kReachable);
}
// Scans thread data (stacks and TLS) for heap pointers. // Scans thread data (stacks and TLS) for heap pointers.
static void ProcessThreads(SuspendedThreadsList const &suspended_threads, static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
Frontier *frontier) { Frontier *frontier) {
...@@ -197,6 +208,7 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads, ...@@ -197,6 +208,7 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
} }
ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK", ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK",
kReachable); kReachable);
ForEachExtraStackRange(os_id, ForEachExtraStackRangeCb, frontier);
} }
if (flags()->use_tls) { if (flags()->use_tls) {
...@@ -261,6 +273,8 @@ static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) { ...@@ -261,6 +273,8 @@ static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) {
// The check here is relatively expensive, so we do this in a separate flood // The check here is relatively expensive, so we do this in a separate flood
// fill. That way we can skip the check for chunks that are reachable // fill. That way we can skip the check for chunks that are reachable
// otherwise. // otherwise.
if (flags()->log_pointers)
Report("Processing platform-specific allocations.\n");
ProcessPlatformSpecificAllocations(&frontier); ProcessPlatformSpecificAllocations(&frontier);
FloodFillTag(&frontier, kReachable); FloodFillTag(&frontier, kReachable);
...@@ -281,8 +295,7 @@ static void PrintStackTraceById(u32 stack_trace_id) { ...@@ -281,8 +295,7 @@ static void PrintStackTraceById(u32 stack_trace_id) {
CHECK(stack_trace_id); CHECK(stack_trace_id);
uptr size = 0; uptr size = 0;
const uptr *trace = StackDepotGet(stack_trace_id, &size); const uptr *trace = StackDepotGet(stack_trace_id, &size);
StackTrace::PrintStack(trace, size, common_flags()->symbolize, StackTrace::PrintStack(trace, size);
common_flags()->strip_path_prefix, 0);
} }
// ForEachChunk callback. Aggregates unreachable chunks into a LeakReport. // ForEachChunk callback. Aggregates unreachable chunks into a LeakReport.
...@@ -400,8 +413,8 @@ static Suppression *GetSuppressionForAddr(uptr addr) { ...@@ -400,8 +413,8 @@ static Suppression *GetSuppressionForAddr(uptr addr) {
static const uptr kMaxAddrFrames = 16; static const uptr kMaxAddrFrames = 16;
InternalScopedBuffer<AddressInfo> addr_frames(kMaxAddrFrames); InternalScopedBuffer<AddressInfo> addr_frames(kMaxAddrFrames);
for (uptr i = 0; i < kMaxAddrFrames; i++) new (&addr_frames[i]) AddressInfo(); for (uptr i = 0; i < kMaxAddrFrames; i++) new (&addr_frames[i]) AddressInfo();
uptr addr_frames_num = uptr addr_frames_num = Symbolizer::Get()->SymbolizeCode(
getSymbolizer()->SymbolizeCode(addr, addr_frames.data(), kMaxAddrFrames); addr, addr_frames.data(), kMaxAddrFrames);
for (uptr i = 0; i < addr_frames_num; i++) { for (uptr i = 0; i < addr_frames_num; i++) {
Suppression* s; Suppression* s;
if (suppression_ctx->Match(addr_frames[i].function, SuppressionLeak, &s) || if (suppression_ctx->Match(addr_frames[i].function, SuppressionLeak, &s) ||
...@@ -479,7 +492,6 @@ void LeakReport::PrintLargest(uptr num_leaks_to_print) { ...@@ -479,7 +492,6 @@ void LeakReport::PrintLargest(uptr num_leaks_to_print) {
leaks_[i].total_size, leaks_[i].hit_count); leaks_[i].total_size, leaks_[i].hit_count);
Printf("%s", d.End()); Printf("%s", d.End());
PrintStackTraceById(leaks_[i].stack_trace_id); PrintStackTraceById(leaks_[i].stack_trace_id);
Printf("\n");
leaks_printed++; leaks_printed++;
if (leaks_printed == num_leaks_to_print) break; if (leaks_printed == num_leaks_to_print) break;
} }
...@@ -497,12 +509,11 @@ void LeakReport::PrintSummary() { ...@@ -497,12 +509,11 @@ void LeakReport::PrintSummary() {
bytes += leaks_[i].total_size; bytes += leaks_[i].total_size;
allocations += leaks_[i].hit_count; allocations += leaks_[i].hit_count;
} }
const int kMaxSummaryLength = 128;
InternalScopedBuffer<char> summary(kMaxSummaryLength); InternalScopedBuffer<char> summary(kMaxSummaryLength);
internal_snprintf(summary.data(), kMaxSummaryLength, internal_snprintf(summary.data(), summary.size(),
"LeakSanitizer: %zu byte(s) leaked in %zu allocation(s).", "%zu byte(s) leaked in %zu allocation(s).", bytes,
bytes, allocations); allocations);
__sanitizer_report_error_summary(summary.data()); ReportErrorSummary(summary.data());
} }
uptr LeakReport::ApplySuppressions() { uptr LeakReport::ApplySuppressions() {
...@@ -528,6 +539,8 @@ extern "C" { ...@@ -528,6 +539,8 @@ extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
void __lsan_ignore_object(const void *p) { void __lsan_ignore_object(const void *p) {
#if CAN_SANITIZE_LEAKS #if CAN_SANITIZE_LEAKS
if (!common_flags()->detect_leaks)
return;
// Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not
// locked. // locked.
BlockingMutexLock l(&global_mutex); BlockingMutexLock l(&global_mutex);
...@@ -552,7 +565,7 @@ void __lsan_disable() { ...@@ -552,7 +565,7 @@ void __lsan_disable() {
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
void __lsan_enable() { void __lsan_enable() {
#if CAN_SANITIZE_LEAKS #if CAN_SANITIZE_LEAKS
if (!__lsan::disable_counter) { if (!__lsan::disable_counter && common_flags()->detect_leaks) {
Report("Unmatched call to __lsan_enable().\n"); Report("Unmatched call to __lsan_enable().\n");
Die(); Die();
} }
......
...@@ -133,6 +133,8 @@ void UnlockThreadRegistry(); ...@@ -133,6 +133,8 @@ void UnlockThreadRegistry();
bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end, bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end,
uptr *tls_begin, uptr *tls_end, uptr *tls_begin, uptr *tls_end,
uptr *cache_begin, uptr *cache_end); uptr *cache_begin, uptr *cache_end);
void ForEachExtraStackRange(uptr os_id, RangeIteratorCallback callback,
void *arg);
// If called from the main thread, updates the main thread's TID in the thread // If called from the main thread, updates the main thread's TID in the thread
// registry. We need this to handle processes that fork() without a subsequent // registry. We need this to handle processes that fork() without a subsequent
// exec(), which invalidates the recorded TID. To update it, we must call // exec(), which invalidates the recorded TID. To update it, we must call
......
...@@ -42,11 +42,17 @@ int pthread_setspecific(unsigned key, const void *v); ...@@ -42,11 +42,17 @@ int pthread_setspecific(unsigned key, const void *v);
stack_top = t->stack_end(); \ stack_top = t->stack_end(); \
stack_bottom = t->stack_begin(); \ stack_bottom = t->stack_begin(); \
} \ } \
GetStackTrace(&stack, __sanitizer::common_flags()->malloc_context_size, \ stack.Unwind(__sanitizer::common_flags()->malloc_context_size, \
StackTrace::GetCurrentPc(), \ StackTrace::GetCurrentPc(), \
GET_CURRENT_FRAME(), stack_top, stack_bottom, fast); \ GET_CURRENT_FRAME(), stack_top, stack_bottom, fast); \
} }
#define ENSURE_LSAN_INITED do { \
CHECK(!lsan_init_is_running); \
if (!lsan_inited) \
__lsan_init(); \
} while (0)
///// Malloc/free interceptors. ///// ///// Malloc/free interceptors. /////
const bool kAlwaysClearMemory = true; const bool kAlwaysClearMemory = true;
...@@ -56,38 +62,49 @@ namespace std { ...@@ -56,38 +62,49 @@ namespace std {
} }
INTERCEPTOR(void*, malloc, uptr size) { INTERCEPTOR(void*, malloc, uptr size) {
Init(); ENSURE_LSAN_INITED;
GET_STACK_TRACE; GET_STACK_TRACE;
return Allocate(stack, size, 1, kAlwaysClearMemory); return Allocate(stack, size, 1, kAlwaysClearMemory);
} }
INTERCEPTOR(void, free, void *p) { INTERCEPTOR(void, free, void *p) {
Init(); ENSURE_LSAN_INITED;
Deallocate(p); Deallocate(p);
} }
INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) { INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) {
if (lsan_init_is_running) {
// Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
const uptr kCallocPoolSize = 1024;
static uptr calloc_memory_for_dlsym[kCallocPoolSize];
static uptr allocated;
uptr size_in_words = ((nmemb * size) + kWordSize - 1) / kWordSize;
void *mem = (void*)&calloc_memory_for_dlsym[allocated];
allocated += size_in_words;
CHECK(allocated < kCallocPoolSize);
return mem;
}
if (CallocShouldReturnNullDueToOverflow(size, nmemb)) return 0; if (CallocShouldReturnNullDueToOverflow(size, nmemb)) return 0;
Init(); ENSURE_LSAN_INITED;
GET_STACK_TRACE; GET_STACK_TRACE;
size *= nmemb; size *= nmemb;
return Allocate(stack, size, 1, true); return Allocate(stack, size, 1, true);
} }
INTERCEPTOR(void*, realloc, void *q, uptr size) { INTERCEPTOR(void*, realloc, void *q, uptr size) {
Init(); ENSURE_LSAN_INITED;
GET_STACK_TRACE; GET_STACK_TRACE;
return Reallocate(stack, q, size, 1); return Reallocate(stack, q, size, 1);
} }
INTERCEPTOR(void*, memalign, uptr alignment, uptr size) { INTERCEPTOR(void*, memalign, uptr alignment, uptr size) {
Init(); ENSURE_LSAN_INITED;
GET_STACK_TRACE; GET_STACK_TRACE;
return Allocate(stack, size, alignment, kAlwaysClearMemory); return Allocate(stack, size, alignment, kAlwaysClearMemory);
} }
INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) { INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) {
Init(); ENSURE_LSAN_INITED;
GET_STACK_TRACE; GET_STACK_TRACE;
*memptr = Allocate(stack, size, alignment, kAlwaysClearMemory); *memptr = Allocate(stack, size, alignment, kAlwaysClearMemory);
// FIXME: Return ENOMEM if user requested more than max alloc size. // FIXME: Return ENOMEM if user requested more than max alloc size.
...@@ -95,7 +112,7 @@ INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) { ...@@ -95,7 +112,7 @@ INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) {
} }
INTERCEPTOR(void*, valloc, uptr size) { INTERCEPTOR(void*, valloc, uptr size) {
Init(); ENSURE_LSAN_INITED;
GET_STACK_TRACE; GET_STACK_TRACE;
if (size == 0) if (size == 0)
size = GetPageSizeCached(); size = GetPageSizeCached();
...@@ -103,7 +120,7 @@ INTERCEPTOR(void*, valloc, uptr size) { ...@@ -103,7 +120,7 @@ INTERCEPTOR(void*, valloc, uptr size) {
} }
INTERCEPTOR(uptr, malloc_usable_size, void *ptr) { INTERCEPTOR(uptr, malloc_usable_size, void *ptr) {
Init(); ENSURE_LSAN_INITED;
return GetMallocUsableSize(ptr); return GetMallocUsableSize(ptr);
} }
...@@ -122,7 +139,7 @@ INTERCEPTOR(int, mallopt, int cmd, int value) { ...@@ -122,7 +139,7 @@ INTERCEPTOR(int, mallopt, int cmd, int value) {
} }
INTERCEPTOR(void*, pvalloc, uptr size) { INTERCEPTOR(void*, pvalloc, uptr size) {
Init(); ENSURE_LSAN_INITED;
GET_STACK_TRACE; GET_STACK_TRACE;
uptr PageSize = GetPageSizeCached(); uptr PageSize = GetPageSizeCached();
size = RoundUpTo(size, PageSize); size = RoundUpTo(size, PageSize);
...@@ -136,7 +153,7 @@ INTERCEPTOR(void*, pvalloc, uptr size) { ...@@ -136,7 +153,7 @@ INTERCEPTOR(void*, pvalloc, uptr size) {
INTERCEPTOR(void, cfree, void *p) ALIAS("free"); INTERCEPTOR(void, cfree, void *p) ALIAS("free");
#define OPERATOR_NEW_BODY \ #define OPERATOR_NEW_BODY \
Init(); \ ENSURE_LSAN_INITED; \
GET_STACK_TRACE; \ GET_STACK_TRACE; \
return Allocate(stack, size, 1, kAlwaysClearMemory); return Allocate(stack, size, 1, kAlwaysClearMemory);
...@@ -150,7 +167,7 @@ INTERCEPTOR_ATTRIBUTE ...@@ -150,7 +167,7 @@ INTERCEPTOR_ATTRIBUTE
void *operator new[](uptr size, std::nothrow_t const&) { OPERATOR_NEW_BODY; } void *operator new[](uptr size, std::nothrow_t const&) { OPERATOR_NEW_BODY; }
#define OPERATOR_DELETE_BODY \ #define OPERATOR_DELETE_BODY \
Init(); \ ENSURE_LSAN_INITED; \
Deallocate(ptr); Deallocate(ptr);
INTERCEPTOR_ATTRIBUTE INTERCEPTOR_ATTRIBUTE
...@@ -190,9 +207,6 @@ struct ThreadParam { ...@@ -190,9 +207,6 @@ struct ThreadParam {
atomic_uintptr_t tid; atomic_uintptr_t tid;
}; };
// PTHREAD_DESTRUCTOR_ITERATIONS from glibc.
const uptr kPthreadDestructorIterations = 4;
extern "C" void *__lsan_thread_start_func(void *arg) { extern "C" void *__lsan_thread_start_func(void *arg) {
ThreadParam *p = (ThreadParam*)arg; ThreadParam *p = (ThreadParam*)arg;
void* (*callback)(void *arg) = p->callback; void* (*callback)(void *arg) = p->callback;
...@@ -215,14 +229,14 @@ extern "C" void *__lsan_thread_start_func(void *arg) { ...@@ -215,14 +229,14 @@ extern "C" void *__lsan_thread_start_func(void *arg) {
INTERCEPTOR(int, pthread_create, void *th, void *attr, INTERCEPTOR(int, pthread_create, void *th, void *attr,
void *(*callback)(void *), void *param) { void *(*callback)(void *), void *param) {
Init(); ENSURE_LSAN_INITED;
EnsureMainThreadIDIsCorrect(); EnsureMainThreadIDIsCorrect();
__sanitizer_pthread_attr_t myattr; __sanitizer_pthread_attr_t myattr;
if (attr == 0) { if (attr == 0) {
pthread_attr_init(&myattr); pthread_attr_init(&myattr);
attr = &myattr; attr = &myattr;
} }
AdjustStackSizeLinux(attr, 0); AdjustStackSizeLinux(attr);
int detached = 0; int detached = 0;
pthread_attr_getdetachstate(attr, &detached); pthread_attr_getdetachstate(attr, &detached);
ThreadParam p; ThreadParam p;
...@@ -243,7 +257,7 @@ INTERCEPTOR(int, pthread_create, void *th, void *attr, ...@@ -243,7 +257,7 @@ INTERCEPTOR(int, pthread_create, void *th, void *attr,
} }
INTERCEPTOR(int, pthread_join, void *th, void **ret) { INTERCEPTOR(int, pthread_join, void *th, void **ret) {
Init(); ENSURE_LSAN_INITED;
int tid = ThreadTid((uptr)th); int tid = ThreadTid((uptr)th);
int res = REAL(pthread_join)(th, ret); int res = REAL(pthread_join)(th, ret);
if (res == 0) if (res == 0)
......
//===-- lsan_preinit.cc ---------------------------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of LeakSanitizer.
//
// Call __lsan_init at the very early stage of process startup.
//===----------------------------------------------------------------------===//
#include "lsan.h"
#ifndef LSAN_USE_PREINIT_ARRAY
#define LSAN_USE_PREINIT_ARRAY 1
#endif
#if LSAN_USE_PREINIT_ARRAY && !defined(PIC)
// We force __lsan_init to be called before anyone else by placing it into
// .preinit_array section.
__attribute__((section(".preinit_array"), used))
void (*__local_lsan_preinit)(void) = __lsan_init;
#endif
...@@ -143,6 +143,10 @@ bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end, ...@@ -143,6 +143,10 @@ bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end,
return true; return true;
} }
void ForEachExtraStackRange(uptr os_id, RangeIteratorCallback callback,
void *arg) {
}
void LockThreadRegistry() { void LockThreadRegistry() {
thread_registry->Lock(); thread_registry->Lock();
} }
......
...@@ -14,22 +14,28 @@ sanitizer_common_files = \ ...@@ -14,22 +14,28 @@ sanitizer_common_files = \
sanitizer_allocator.cc \ sanitizer_allocator.cc \
sanitizer_common.cc \ sanitizer_common.cc \
sanitizer_common_libcdep.cc \ sanitizer_common_libcdep.cc \
sanitizer_coverage.cc \
sanitizer_flags.cc \ sanitizer_flags.cc \
sanitizer_libc.cc \ sanitizer_libc.cc \
sanitizer_libignore.cc \
sanitizer_linux.cc \ sanitizer_linux.cc \
sanitizer_linux_libcdep.cc \ sanitizer_linux_libcdep.cc \
sanitizer_mac.cc \ sanitizer_mac.cc \
sanitizer_platform_limits_linux.cc \ sanitizer_platform_limits_linux.cc \
sanitizer_platform_limits_posix.cc \ sanitizer_platform_limits_posix.cc \
sanitizer_posix.cc \
sanitizer_posix_libcdep.cc \ sanitizer_posix_libcdep.cc \
sanitizer_posix.cc \
sanitizer_printf.cc \ sanitizer_printf.cc \
sanitizer_stackdepot.cc \ sanitizer_stackdepot.cc \
sanitizer_stacktrace.cc \ sanitizer_stacktrace.cc \
sanitizer_stacktrace_libcdep.cc \
sanitizer_stoptheworld_linux_libcdep.cc \ sanitizer_stoptheworld_linux_libcdep.cc \
sanitizer_suppressions.cc \ sanitizer_suppressions.cc \
sanitizer_symbolizer_posix_libcdep.cc \ sanitizer_symbolizer_posix_libcdep.cc \
sanitizer_symbolizer_win.cc \ sanitizer_symbolizer_win.cc \
sanitizer_symbolizer.cc \
sanitizer_symbolizer_libbacktrace.cc \
sanitizer_symbolizer_libcdep.cc \
sanitizer_thread_registry.cc \ sanitizer_thread_registry.cc \
sanitizer_win.cc sanitizer_win.cc
......
...@@ -56,17 +56,19 @@ CONFIG_CLEAN_VPATH_FILES = ...@@ -56,17 +56,19 @@ CONFIG_CLEAN_VPATH_FILES =
LTLIBRARIES = $(noinst_LTLIBRARIES) LTLIBRARIES = $(noinst_LTLIBRARIES)
libsanitizer_common_la_LIBADD = libsanitizer_common_la_LIBADD =
am__objects_1 = sanitizer_allocator.lo sanitizer_common.lo \ am__objects_1 = sanitizer_allocator.lo sanitizer_common.lo \
sanitizer_common_libcdep.lo sanitizer_flags.lo \ sanitizer_common_libcdep.lo sanitizer_coverage.lo \
sanitizer_libc.lo sanitizer_linux.lo \ sanitizer_flags.lo sanitizer_libc.lo sanitizer_libignore.lo \
sanitizer_linux_libcdep.lo sanitizer_mac.lo \ sanitizer_linux.lo sanitizer_linux_libcdep.lo sanitizer_mac.lo \
sanitizer_platform_limits_linux.lo \ sanitizer_platform_limits_linux.lo \
sanitizer_platform_limits_posix.lo sanitizer_posix.lo \ sanitizer_platform_limits_posix.lo sanitizer_posix_libcdep.lo \
sanitizer_posix_libcdep.lo sanitizer_printf.lo \ sanitizer_posix.lo sanitizer_printf.lo sanitizer_stackdepot.lo \
sanitizer_stackdepot.lo sanitizer_stacktrace.lo \ sanitizer_stacktrace.lo sanitizer_stacktrace_libcdep.lo \
sanitizer_stoptheworld_linux_libcdep.lo \ sanitizer_stoptheworld_linux_libcdep.lo \
sanitizer_suppressions.lo \ sanitizer_suppressions.lo \
sanitizer_symbolizer_posix_libcdep.lo \ sanitizer_symbolizer_posix_libcdep.lo \
sanitizer_symbolizer_win.lo sanitizer_thread_registry.lo \ sanitizer_symbolizer_win.lo sanitizer_symbolizer.lo \
sanitizer_symbolizer_libbacktrace.lo \
sanitizer_symbolizer_libcdep.lo sanitizer_thread_registry.lo \
sanitizer_win.lo sanitizer_win.lo
am_libsanitizer_common_la_OBJECTS = $(am__objects_1) am_libsanitizer_common_la_OBJECTS = $(am__objects_1)
libsanitizer_common_la_OBJECTS = $(am_libsanitizer_common_la_OBJECTS) libsanitizer_common_la_OBJECTS = $(am_libsanitizer_common_la_OBJECTS)
...@@ -235,22 +237,28 @@ sanitizer_common_files = \ ...@@ -235,22 +237,28 @@ sanitizer_common_files = \
sanitizer_allocator.cc \ sanitizer_allocator.cc \
sanitizer_common.cc \ sanitizer_common.cc \
sanitizer_common_libcdep.cc \ sanitizer_common_libcdep.cc \
sanitizer_coverage.cc \
sanitizer_flags.cc \ sanitizer_flags.cc \
sanitizer_libc.cc \ sanitizer_libc.cc \
sanitizer_libignore.cc \
sanitizer_linux.cc \ sanitizer_linux.cc \
sanitizer_linux_libcdep.cc \ sanitizer_linux_libcdep.cc \
sanitizer_mac.cc \ sanitizer_mac.cc \
sanitizer_platform_limits_linux.cc \ sanitizer_platform_limits_linux.cc \
sanitizer_platform_limits_posix.cc \ sanitizer_platform_limits_posix.cc \
sanitizer_posix.cc \
sanitizer_posix_libcdep.cc \ sanitizer_posix_libcdep.cc \
sanitizer_posix.cc \
sanitizer_printf.cc \ sanitizer_printf.cc \
sanitizer_stackdepot.cc \ sanitizer_stackdepot.cc \
sanitizer_stacktrace.cc \ sanitizer_stacktrace.cc \
sanitizer_stacktrace_libcdep.cc \
sanitizer_stoptheworld_linux_libcdep.cc \ sanitizer_stoptheworld_linux_libcdep.cc \
sanitizer_suppressions.cc \ sanitizer_suppressions.cc \
sanitizer_symbolizer_posix_libcdep.cc \ sanitizer_symbolizer_posix_libcdep.cc \
sanitizer_symbolizer_win.cc \ sanitizer_symbolizer_win.cc \
sanitizer_symbolizer.cc \
sanitizer_symbolizer_libbacktrace.cc \
sanitizer_symbolizer_libcdep.cc \
sanitizer_thread_registry.cc \ sanitizer_thread_registry.cc \
sanitizer_win.cc sanitizer_win.cc
...@@ -350,8 +358,10 @@ distclean-compile: ...@@ -350,8 +358,10 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_allocator.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_allocator.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_common.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_common.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_common_libcdep.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_common_libcdep.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_coverage.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_flags.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_flags.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_libc.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_libc.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_libignore.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_linux.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_linux.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_linux_libcdep.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_linux_libcdep.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_mac.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_mac.Plo@am__quote@
...@@ -362,8 +372,12 @@ distclean-compile: ...@@ -362,8 +372,12 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_printf.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_printf.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stackdepot.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stackdepot.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stacktrace.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stacktrace.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stacktrace_libcdep.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stoptheworld_linux_libcdep.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stoptheworld_linux_libcdep.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_suppressions.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_suppressions.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_libbacktrace.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_libcdep.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_posix_libcdep.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_posix_libcdep.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_win.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_win.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_thread_registry.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_thread_registry.Plo@am__quote@
......
...@@ -585,7 +585,69 @@ class FlatByteMap { ...@@ -585,7 +585,69 @@ class FlatByteMap {
u8 map_[kSize]; u8 map_[kSize];
}; };
// FIXME: Also implement TwoLevelByteMap. // TwoLevelByteMap maps integers in range [0, kSize1*kSize2) to u8 values.
// It is implemented as a two-dimensional array: array of kSize1 pointers
// to kSize2-byte arrays. The secondary arrays are mmaped on demand.
// Each value is initially zero and can be set to something else only once.
// Setting and getting values from multiple threads is safe w/o extra locking.
template <u64 kSize1, u64 kSize2, class MapUnmapCallback = NoOpMapUnmapCallback>
class TwoLevelByteMap {
public:
void TestOnlyInit() {
internal_memset(map1_, 0, sizeof(map1_));
mu_.Init();
}
void TestOnlyUnmap() {
for (uptr i = 0; i < kSize1; i++) {
u8 *p = Get(i);
if (!p) continue;
MapUnmapCallback().OnUnmap(reinterpret_cast<uptr>(p), kSize2);
UnmapOrDie(p, kSize2);
}
}
uptr size() const { return kSize1 * kSize2; }
uptr size1() const { return kSize1; }
uptr size2() const { return kSize2; }
void set(uptr idx, u8 val) {
CHECK_LT(idx, kSize1 * kSize2);
u8 *map2 = GetOrCreate(idx / kSize2);
CHECK_EQ(0U, map2[idx % kSize2]);
map2[idx % kSize2] = val;
}
u8 operator[] (uptr idx) const {
CHECK_LT(idx, kSize1 * kSize2);
u8 *map2 = Get(idx / kSize2);
if (!map2) return 0;
return map2[idx % kSize2];
}
private:
u8 *Get(uptr idx) const {
CHECK_LT(idx, kSize1);
return reinterpret_cast<u8 *>(
atomic_load(&map1_[idx], memory_order_acquire));
}
u8 *GetOrCreate(uptr idx) {
u8 *res = Get(idx);
if (!res) {
SpinMutexLock l(&mu_);
if (!(res = Get(idx))) {
res = (u8*)MmapOrDie(kSize2, "TwoLevelByteMap");
MapUnmapCallback().OnMap(reinterpret_cast<uptr>(res), kSize2);
atomic_store(&map1_[idx], reinterpret_cast<uptr>(res),
memory_order_release);
}
}
return res;
}
atomic_uintptr_t map1_[kSize1];
StaticSpinMutex mu_;
};
// SizeClassAllocator32 -- allocator for 32-bit address space. // SizeClassAllocator32 -- allocator for 32-bit address space.
// This allocator can theoretically be used on 64-bit arch, but there it is less // This allocator can theoretically be used on 64-bit arch, but there it is less
...@@ -1049,6 +1111,7 @@ class LargeMmapAllocator { ...@@ -1049,6 +1111,7 @@ class LargeMmapAllocator {
// This function does the same as GetBlockBegin, but is much faster. // This function does the same as GetBlockBegin, but is much faster.
// Must be called with the allocator locked. // Must be called with the allocator locked.
void *GetBlockBeginFastLocked(void *ptr) { void *GetBlockBeginFastLocked(void *ptr) {
mutex_.CheckLocked();
uptr p = reinterpret_cast<uptr>(ptr); uptr p = reinterpret_cast<uptr>(ptr);
uptr n = n_chunks_; uptr n = n_chunks_;
if (!n) return 0; if (!n) return 0;
...@@ -1181,14 +1244,15 @@ class CombinedAllocator { ...@@ -1181,14 +1244,15 @@ class CombinedAllocator {
if (alignment > 8) if (alignment > 8)
size = RoundUpTo(size, alignment); size = RoundUpTo(size, alignment);
void *res; void *res;
if (primary_.CanAllocate(size, alignment)) bool from_primary = primary_.CanAllocate(size, alignment);
if (from_primary)
res = cache->Allocate(&primary_, primary_.ClassID(size)); res = cache->Allocate(&primary_, primary_.ClassID(size));
else else
res = secondary_.Allocate(&stats_, size, alignment); res = secondary_.Allocate(&stats_, size, alignment);
if (alignment > 8) if (alignment > 8)
CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0); CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
if (cleared && res) if (cleared && res && from_primary)
internal_memset(res, 0, size); internal_bzero_aligned16(res, RoundUpTo(size, 16));
return res; return res;
} }
......
...@@ -25,21 +25,25 @@ static const uptr kInternalAllocatorSpace = 0; ...@@ -25,21 +25,25 @@ static const uptr kInternalAllocatorSpace = 0;
#if SANITIZER_WORDSIZE == 32 #if SANITIZER_WORDSIZE == 32
static const u64 kInternalAllocatorSize = (1ULL << 32); static const u64 kInternalAllocatorSize = (1ULL << 32);
static const uptr kInternalAllocatorRegionSizeLog = 20; static const uptr kInternalAllocatorRegionSizeLog = 20;
static const uptr kInternalAllocatorNumRegions =
kInternalAllocatorSize >> kInternalAllocatorRegionSizeLog;
typedef FlatByteMap<kInternalAllocatorNumRegions> ByteMap;
#else #else
static const u64 kInternalAllocatorSize = (1ULL << 47); static const u64 kInternalAllocatorSize = (1ULL << 47);
static const uptr kInternalAllocatorRegionSizeLog = 24; static const uptr kInternalAllocatorRegionSizeLog = 24;
#endif static const uptr kInternalAllocatorNumRegions =
static const uptr kInternalAllocatorFlatByteMapSize =
kInternalAllocatorSize >> kInternalAllocatorRegionSizeLog; kInternalAllocatorSize >> kInternalAllocatorRegionSizeLog;
typedef TwoLevelByteMap<(kInternalAllocatorNumRegions >> 12), 1 << 12> ByteMap;
#endif
typedef SizeClassAllocator32< typedef SizeClassAllocator32<
kInternalAllocatorSpace, kInternalAllocatorSize, 16, InternalSizeClassMap, kInternalAllocatorSpace, kInternalAllocatorSize, 16, InternalSizeClassMap,
kInternalAllocatorRegionSizeLog, kInternalAllocatorRegionSizeLog, ByteMap> PrimaryInternalAllocator;
FlatByteMap<kInternalAllocatorFlatByteMapSize> > PrimaryInternalAllocator;
typedef SizeClassAllocatorLocalCache<PrimaryInternalAllocator> typedef SizeClassAllocatorLocalCache<PrimaryInternalAllocator>
InternalAllocatorCache; InternalAllocatorCache;
// We don't want our internal allocator to do any map/unmap operations. // We don't want our internal allocator to do any map/unmap operations from
// LargeMmapAllocator.
struct CrashOnMapUnmap { struct CrashOnMapUnmap {
void OnMap(uptr p, uptr size) const { void OnMap(uptr p, uptr size) const {
RAW_CHECK_MSG(0, "Unexpected mmap in InternalAllocator!"); RAW_CHECK_MSG(0, "Unexpected mmap in InternalAllocator!");
......
...@@ -10,12 +10,14 @@ ...@@ -10,12 +10,14 @@
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#include "sanitizer_common.h" #include "sanitizer_common.h"
#include "sanitizer_flags.h"
#include "sanitizer_libc.h" #include "sanitizer_libc.h"
#include "sanitizer_stacktrace.h"
#include "sanitizer_symbolizer.h"
namespace __sanitizer { namespace __sanitizer {
const char *SanitizerToolName = "SanitizerTool"; const char *SanitizerToolName = "SanitizerTool";
uptr SanitizerVerbosity = 0;
uptr GetPageSizeCached() { uptr GetPageSizeCached() {
static uptr PageSize; static uptr PageSize;
...@@ -134,14 +136,71 @@ void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) { ...@@ -134,14 +136,71 @@ void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) {
return (void*)res; return (void*)res;
} }
const char *StripPathPrefix(const char *filepath,
const char *strip_path_prefix) {
if (filepath == 0) return 0;
if (strip_path_prefix == 0) return filepath;
const char *pos = internal_strstr(filepath, strip_path_prefix);
if (pos == 0) return filepath;
pos += internal_strlen(strip_path_prefix);
if (pos[0] == '.' && pos[1] == '/')
pos += 2;
return pos;
}
void PrintSourceLocation(InternalScopedString *buffer, const char *file,
int line, int column) {
CHECK(file);
buffer->append("%s",
StripPathPrefix(file, common_flags()->strip_path_prefix));
if (line > 0) {
buffer->append(":%d", line);
if (column > 0)
buffer->append(":%d", column);
}
}
void PrintModuleAndOffset(InternalScopedString *buffer, const char *module,
uptr offset) {
buffer->append("(%s+0x%zx)",
StripPathPrefix(module, common_flags()->strip_path_prefix),
offset);
}
void ReportErrorSummary(const char *error_message) {
if (!common_flags()->print_summary)
return;
InternalScopedBuffer<char> buff(kMaxSummaryLength);
internal_snprintf(buff.data(), buff.size(),
"SUMMARY: %s: %s", SanitizerToolName, error_message);
__sanitizer_report_error_summary(buff.data());
}
void ReportErrorSummary(const char *error_type, const char *file, void ReportErrorSummary(const char *error_type, const char *file,
int line, const char *function) { int line, const char *function) {
const int kMaxSize = 1024; // We don't want a summary too long. if (!common_flags()->print_summary)
InternalScopedBuffer<char> buff(kMaxSize); return;
internal_snprintf(buff.data(), kMaxSize, "%s: %s %s:%d %s", InternalScopedBuffer<char> buff(kMaxSummaryLength);
SanitizerToolName, error_type, internal_snprintf(
file ? file : "??", line, function ? function : "??"); buff.data(), buff.size(), "%s %s:%d %s", error_type,
__sanitizer_report_error_summary(buff.data()); file ? StripPathPrefix(file, common_flags()->strip_path_prefix) : "??",
line, function ? function : "??");
ReportErrorSummary(buff.data());
}
void ReportErrorSummary(const char *error_type, StackTrace *stack) {
if (!common_flags()->print_summary)
return;
AddressInfo ai;
#if !SANITIZER_GO
if (stack->size > 0 && Symbolizer::Get()->IsAvailable()) {
// Currently, we include the first stack frame into the report summary.
// Maybe sometimes we need to choose another frame (e.g. skip memcpy/etc).
uptr pc = StackTrace::GetPreviousInstructionPc(stack->trace[0]);
Symbolizer::Get()->SymbolizeCode(pc, &ai, 1);
}
#endif
ReportErrorSummary(error_type, ai.file, ai.line, ai.function);
} }
LoadedModule::LoadedModule(const char *module_name, uptr base_address) { LoadedModule::LoadedModule(const char *module_name, uptr base_address) {
...@@ -165,13 +224,25 @@ bool LoadedModule::containsAddress(uptr address) const { ...@@ -165,13 +224,25 @@ bool LoadedModule::containsAddress(uptr address) const {
return false; return false;
} }
char *StripModuleName(const char *module) {
if (module == 0)
return 0;
const char *short_module_name = internal_strrchr(module, '/');
if (short_module_name)
short_module_name += 1;
else
short_module_name = module;
return internal_strdup(short_module_name);
}
} // namespace __sanitizer } // namespace __sanitizer
using namespace __sanitizer; // NOLINT using namespace __sanitizer; // NOLINT
extern "C" { extern "C" {
void __sanitizer_set_report_path(const char *path) { void __sanitizer_set_report_path(const char *path) {
if (!path) return; if (!path)
return;
uptr len = internal_strlen(path); uptr len = internal_strlen(path);
if (len > sizeof(report_path_prefix) - 100) { if (len > sizeof(report_path_prefix) - 100) {
Report("ERROR: Path is too long: %c%c%c%c%c%c%c%c...\n", Report("ERROR: Path is too long: %c%c%c%c%c%c%c%c...\n",
...@@ -179,18 +250,21 @@ void __sanitizer_set_report_path(const char *path) { ...@@ -179,18 +250,21 @@ void __sanitizer_set_report_path(const char *path) {
path[4], path[5], path[6], path[7]); path[4], path[5], path[6], path[7]);
Die(); Die();
} }
internal_strncpy(report_path_prefix, path, sizeof(report_path_prefix));
report_path_prefix[len] = '\0';
report_fd = kInvalidFd;
log_to_file = true;
}
void __sanitizer_set_report_fd(int fd) {
if (report_fd != kStdoutFd && if (report_fd != kStdoutFd &&
report_fd != kStderrFd && report_fd != kStderrFd &&
report_fd != kInvalidFd) report_fd != kInvalidFd)
internal_close(report_fd); internal_close(report_fd);
report_fd = fd; report_fd = kInvalidFd;
log_to_file = false;
if (internal_strcmp(path, "stdout") == 0) {
report_fd = kStdoutFd;
} else if (internal_strcmp(path, "stderr") == 0) {
report_fd = kStderrFd;
} else {
internal_strncpy(report_path_prefix, path, sizeof(report_path_prefix));
report_path_prefix[len] = '\0';
log_to_file = true;
}
} }
void NOINLINE __sanitizer_sandbox_on_notify(void *reserved) { void NOINLINE __sanitizer_sandbox_on_notify(void *reserved) {
...@@ -199,6 +273,6 @@ void NOINLINE __sanitizer_sandbox_on_notify(void *reserved) { ...@@ -199,6 +273,6 @@ void NOINLINE __sanitizer_sandbox_on_notify(void *reserved) {
} }
void __sanitizer_report_error_summary(const char *error_summary) { void __sanitizer_report_error_summary(const char *error_summary) {
Printf("SUMMARY: %s\n", error_summary); Printf("%s\n", error_summary);
} }
} // extern "C" } // extern "C"
...@@ -34,7 +34,6 @@ const uptr kCacheLineSize = 64; ...@@ -34,7 +34,6 @@ const uptr kCacheLineSize = 64;
const uptr kMaxPathLength = 512; const uptr kMaxPathLength = 512;
extern const char *SanitizerToolName; // Can be changed by the tool. extern const char *SanitizerToolName; // Can be changed by the tool.
extern uptr SanitizerVerbosity;
uptr GetPageSize(); uptr GetPageSize();
uptr GetPageSizeCached(); uptr GetPageSizeCached();
...@@ -86,6 +85,23 @@ class InternalScopedBuffer { ...@@ -86,6 +85,23 @@ class InternalScopedBuffer {
void operator=(const InternalScopedBuffer&); void operator=(const InternalScopedBuffer&);
}; };
class InternalScopedString : public InternalScopedBuffer<char> {
public:
explicit InternalScopedString(uptr max_length)
: InternalScopedBuffer<char>(max_length), length_(0) {
(*this)[0] = '\0';
}
uptr length() { return length_; }
void clear() {
(*this)[0] = '\0';
length_ = 0;
}
void append(const char *format, ...);
private:
uptr length_;
};
// Simple low-level (mmap-based) allocator for internal use. Doesn't have // Simple low-level (mmap-based) allocator for internal use. Doesn't have
// constructor, so all instances of LowLevelAllocator should be // constructor, so all instances of LowLevelAllocator should be
// linker initialized. // linker initialized.
...@@ -110,6 +126,7 @@ bool PrintsToTtyCached(); ...@@ -110,6 +126,7 @@ bool PrintsToTtyCached();
void Printf(const char *format, ...); void Printf(const char *format, ...);
void Report(const char *format, ...); void Report(const char *format, ...);
void SetPrintfAndReportCallback(void (*callback)(const char *)); void SetPrintfAndReportCallback(void (*callback)(const char *));
// Can be used to prevent mixing error reports from different sanitizers. // Can be used to prevent mixing error reports from different sanitizers.
extern StaticSpinMutex CommonSanitizerReportMutex; extern StaticSpinMutex CommonSanitizerReportMutex;
void MaybeOpenReportFile(); void MaybeOpenReportFile();
...@@ -130,6 +147,14 @@ uptr ReadFileToBuffer(const char *file_name, char **buff, ...@@ -130,6 +147,14 @@ uptr ReadFileToBuffer(const char *file_name, char **buff,
// in '*buff_size'. // in '*buff_size'.
void *MapFileToMemory(const char *file_name, uptr *buff_size); void *MapFileToMemory(const char *file_name, uptr *buff_size);
// Error report formatting.
const char *StripPathPrefix(const char *filepath,
const char *strip_file_prefix);
void PrintSourceLocation(InternalScopedString *buffer, const char *file,
int line, int column);
void PrintModuleAndOffset(InternalScopedString *buffer,
const char *module, uptr offset);
// OS // OS
void DisableCoreDumper(); void DisableCoreDumper();
void DumpProcessMap(); void DumpProcessMap();
...@@ -153,6 +178,9 @@ void SleepForMillis(int millis); ...@@ -153,6 +178,9 @@ void SleepForMillis(int millis);
u64 NanoTime(); u64 NanoTime();
int Atexit(void (*function)(void)); int Atexit(void (*function)(void));
void SortArray(uptr *array, uptr size); void SortArray(uptr *array, uptr size);
// Strip the directories from the module name, return a new string allocated
// with internal_strdup.
char *StripModuleName(const char *module);
// Exit // Exit
void NORETURN Abort(); void NORETURN Abort();
...@@ -176,11 +204,17 @@ typedef void (*CheckFailedCallbackType)(const char *, int, const char *, ...@@ -176,11 +204,17 @@ typedef void (*CheckFailedCallbackType)(const char *, int, const char *,
u64, u64); u64, u64);
void SetCheckFailedCallback(CheckFailedCallbackType callback); void SetCheckFailedCallback(CheckFailedCallbackType callback);
// Construct a one-line string like // We don't want a summary too long.
// SanitizerToolName: error_type file:line function const int kMaxSummaryLength = 1024;
// and call __sanitizer_report_error_summary on it. // Construct a one-line string:
// SUMMARY: SanitizerToolName: error_message
// and pass it to __sanitizer_report_error_summary.
void ReportErrorSummary(const char *error_message);
// Same as above, but construct error_message as:
// error_type: file:line function
void ReportErrorSummary(const char *error_type, const char *file, void ReportErrorSummary(const char *error_type, const char *file,
int line, const char *function); int line, const char *function);
void ReportErrorSummary(const char *error_type, StackTrace *trace);
// Math // Math
#if SANITIZER_WINDOWS && !defined(__clang__) && !defined(__GNUC__) #if SANITIZER_WINDOWS && !defined(__clang__) && !defined(__GNUC__)
...@@ -326,6 +360,8 @@ class InternalMmapVector { ...@@ -326,6 +360,8 @@ class InternalMmapVector {
return capacity_; return capacity_;
} }
void clear() { size_ = 0; }
private: private:
void Resize(uptr new_capacity) { void Resize(uptr new_capacity) {
CHECK_GT(new_capacity, 0); CHECK_GT(new_capacity, 0);
...@@ -431,6 +467,20 @@ typedef bool (*string_predicate_t)(const char *); ...@@ -431,6 +467,20 @@ typedef bool (*string_predicate_t)(const char *);
uptr GetListOfModules(LoadedModule *modules, uptr max_modules, uptr GetListOfModules(LoadedModule *modules, uptr max_modules,
string_predicate_t filter); string_predicate_t filter);
#if SANITIZER_POSIX
const uptr kPthreadDestructorIterations = 4;
#else
// Unused on Windows.
const uptr kPthreadDestructorIterations = 0;
#endif
// Callback type for iterating over a set of memory ranges.
typedef void (*RangeIteratorCallback)(uptr begin, uptr end, void *arg);
} // namespace __sanitizer } // namespace __sanitizer
inline void *operator new(__sanitizer::operator_new_size_type size,
__sanitizer::LowLevelAllocator &alloc) {
return alloc.Allocate(size);
}
#endif // SANITIZER_COMMON_H #endif // SANITIZER_COMMON_H
...@@ -86,7 +86,7 @@ static void ioctl_table_fill() { ...@@ -86,7 +86,7 @@ static void ioctl_table_fill() {
_(TIOCSTI, READ, sizeof(char)); _(TIOCSTI, READ, sizeof(char));
_(TIOCSWINSZ, READ, struct_winsize_sz); _(TIOCSWINSZ, READ, struct_winsize_sz);
#if (SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_MAC #if (SANITIZER_LINUX && !SANITIZER_ANDROID)
_(SIOCGETSGCNT, WRITE, struct_sioc_sg_req_sz); _(SIOCGETSGCNT, WRITE, struct_sioc_sg_req_sz);
_(SIOCGETVIFCNT, WRITE, struct_sioc_vif_req_sz); _(SIOCGETVIFCNT, WRITE, struct_sioc_vif_req_sz);
#endif #endif
......
...@@ -23,8 +23,16 @@ ...@@ -23,8 +23,16 @@
// COMMON_SYSCALL_POST_WRITE_RANGE // COMMON_SYSCALL_POST_WRITE_RANGE
// Called in posthook for regions that were written to by the kernel // Called in posthook for regions that were written to by the kernel
// and are now initialized. // and are now initialized.
// COMMON_SYSCALL_ACQUIRE(addr)
// Acquire memory visibility from addr.
// COMMON_SYSCALL_RELEASE(addr)
// Release memory visibility to addr.
// COMMON_SYSCALL_FD_CLOSE(fd) // COMMON_SYSCALL_FD_CLOSE(fd)
// Called before closing file descriptor fd. // Called before closing file descriptor fd.
// COMMON_SYSCALL_FD_ACQUIRE(fd)
// Acquire memory visibility from fd.
// COMMON_SYSCALL_FD_RELEASE(fd)
// Release memory visibility to fd.
// COMMON_SYSCALL_PRE_FORK() // COMMON_SYSCALL_PRE_FORK()
// Called before fork syscall. // Called before fork syscall.
// COMMON_SYSCALL_POST_FORK(long res) // COMMON_SYSCALL_POST_FORK(long res)
...@@ -46,20 +54,34 @@ ...@@ -46,20 +54,34 @@
#define POST_READ(p, s) COMMON_SYSCALL_POST_READ_RANGE(p, s) #define POST_READ(p, s) COMMON_SYSCALL_POST_READ_RANGE(p, s)
#define POST_WRITE(p, s) COMMON_SYSCALL_POST_WRITE_RANGE(p, s) #define POST_WRITE(p, s) COMMON_SYSCALL_POST_WRITE_RANGE(p, s)
#ifndef COMMON_SYSCALL_ACQUIRE
# define COMMON_SYSCALL_ACQUIRE(addr) ((void)(addr))
#endif
#ifndef COMMON_SYSCALL_RELEASE
# define COMMON_SYSCALL_RELEASE(addr) ((void)(addr))
#endif
#ifndef COMMON_SYSCALL_FD_CLOSE #ifndef COMMON_SYSCALL_FD_CLOSE
# define COMMON_SYSCALL_FD_CLOSE(fd) # define COMMON_SYSCALL_FD_CLOSE(fd) ((void)(fd))
#endif
#ifndef COMMON_SYSCALL_FD_ACQUIRE
# define COMMON_SYSCALL_FD_ACQUIRE(fd) ((void)(fd))
#endif
#ifndef COMMON_SYSCALL_FD_RELEASE
# define COMMON_SYSCALL_FD_RELEASE(fd) ((void)(fd))
#endif #endif
#ifndef COMMON_SYSCALL_PRE_FORK #ifndef COMMON_SYSCALL_PRE_FORK
# define COMMON_SYSCALL_PRE_FORK() # define COMMON_SYSCALL_PRE_FORK() {}
#endif #endif
#ifndef COMMON_SYSCALL_POST_FORK #ifndef COMMON_SYSCALL_POST_FORK
# define COMMON_SYSCALL_POST_FORK(res) # define COMMON_SYSCALL_POST_FORK(res) {}
#endif #endif
#ifdef SYSCALL_INTERCEPTION
// FIXME: do some kind of PRE_READ for all syscall arguments (int(s) and such). // FIXME: do some kind of PRE_READ for all syscall arguments (int(s) and such).
extern "C" { extern "C" {
...@@ -1245,11 +1267,17 @@ PRE_SYSCALL(flock)(long fd, long cmd) {} ...@@ -1245,11 +1267,17 @@ PRE_SYSCALL(flock)(long fd, long cmd) {}
POST_SYSCALL(flock)(long res, long fd, long cmd) {} POST_SYSCALL(flock)(long res, long fd, long cmd) {}
PRE_SYSCALL(io_setup)(long nr_reqs, void *ctx) {} PRE_SYSCALL(io_setup)(long nr_reqs, void **ctx) {
if (ctx) PRE_WRITE(ctx, sizeof(*ctx));
}
POST_SYSCALL(io_setup)(long res, long nr_reqs, void *ctx) { POST_SYSCALL(io_setup)(long res, long nr_reqs, void **ctx) {
if (res >= 0) { if (res >= 0) {
if (ctx) POST_WRITE(ctx, sizeof(long)); if (ctx) POST_WRITE(ctx, sizeof(*ctx));
// (*ctx) is actually a pointer to a kernel mapped page, and there are
// people out there who are crazy enough to peek into that page's 32-byte
// header.
if (*ctx) POST_WRITE(*ctx, 32);
} }
} }
...@@ -1257,29 +1285,70 @@ PRE_SYSCALL(io_destroy)(long ctx) {} ...@@ -1257,29 +1285,70 @@ PRE_SYSCALL(io_destroy)(long ctx) {}
POST_SYSCALL(io_destroy)(long res, long ctx) {} POST_SYSCALL(io_destroy)(long res, long ctx) {}
PRE_SYSCALL(io_getevents)(long ctx_id, long min_nr, long nr, void *events, PRE_SYSCALL(io_getevents)(long ctx_id, long min_nr, long nr,
void *timeout) { __sanitizer_io_event *ioevpp, void *timeout) {
if (timeout) PRE_READ(timeout, struct_timespec_sz); if (timeout) PRE_READ(timeout, struct_timespec_sz);
} }
POST_SYSCALL(io_getevents)(long res, long ctx_id, long min_nr, long nr, POST_SYSCALL(io_getevents)(long res, long ctx_id, long min_nr, long nr,
void *events, void *timeout) { __sanitizer_io_event *ioevpp, void *timeout) {
if (res >= 0) { if (res >= 0) {
if (events) POST_WRITE(events, res * struct_io_event_sz); if (ioevpp) POST_WRITE(ioevpp, res * sizeof(*ioevpp));
if (timeout) POST_WRITE(timeout, struct_timespec_sz); if (timeout) POST_WRITE(timeout, struct_timespec_sz);
} }
for (long i = 0; i < res; i++) {
// We synchronize io_submit -> io_getevents/io_cancel using the
// user-provided data context. Data is not necessary a pointer, it can be
// an int, 0 or whatever; acquire/release will correctly handle this.
// This scheme can lead to false negatives, e.g. when all operations
// synchronize on 0. But there does not seem to be a better solution
// (except wrapping all operations in own context, which is unreliable).
// We can not reliably extract fildes in io_getevents.
COMMON_SYSCALL_ACQUIRE((void*)ioevpp[i].data);
}
}
PRE_SYSCALL(io_submit)(long ctx_id, long nr, __sanitizer_iocb **iocbpp) {
for (long i = 0; i < nr; ++i) {
uptr op = iocbpp[i]->aio_lio_opcode;
void *data = (void*)iocbpp[i]->aio_data;
void *buf = (void*)iocbpp[i]->aio_buf;
uptr len = (uptr)iocbpp[i]->aio_nbytes;
if (op == iocb_cmd_pwrite && buf && len) {
PRE_READ(buf, len);
} else if (op == iocb_cmd_pread && buf && len) {
POST_WRITE(buf, len);
} else if (op == iocb_cmd_pwritev) {
__sanitizer_iovec *iovec = (__sanitizer_iovec*)iocbpp[i]->aio_buf;
for (uptr v = 0; v < len; v++)
PRE_READ(iovec[i].iov_base, iovec[i].iov_len);
} else if (op == iocb_cmd_preadv) {
__sanitizer_iovec *iovec = (__sanitizer_iovec*)iocbpp[i]->aio_buf;
for (uptr v = 0; v < len; v++)
POST_WRITE(iovec[i].iov_base, iovec[i].iov_len);
}
// See comment in io_getevents.
COMMON_SYSCALL_RELEASE(data);
}
} }
PRE_SYSCALL(io_submit)(long, long arg1, void *arg2) {} POST_SYSCALL(io_submit)(long res, long ctx_id, long nr,
__sanitizer_iocb **iocbpp) {}
POST_SYSCALL(io_submit)(long res, long, long arg1, void *arg2) {}
PRE_SYSCALL(io_cancel)(long ctx_id, void *iocb, void *result) {} PRE_SYSCALL(io_cancel)(long ctx_id, __sanitizer_iocb *iocb,
__sanitizer_io_event *result) {
}
POST_SYSCALL(io_cancel)(long res, long ctx_id, void *iocb, void *result) { POST_SYSCALL(io_cancel)(long res, long ctx_id, __sanitizer_iocb *iocb,
if (res >= 0) { __sanitizer_io_event *result) {
if (iocb) POST_WRITE(iocb, struct_iocb_sz); if (res == 0) {
if (result) POST_WRITE(result, struct_io_event_sz); if (result) {
// See comment in io_getevents.
COMMON_SYSCALL_ACQUIRE((void*)result->data);
POST_WRITE(result, sizeof(*result));
}
if (iocb)
POST_WRITE(iocb, sizeof(*iocb));
} }
} }
...@@ -2063,14 +2132,6 @@ POST_SYSCALL(shmdt)(long res, void *shmaddr) { ...@@ -2063,14 +2132,6 @@ POST_SYSCALL(shmdt)(long res, void *shmaddr) {
} }
} }
PRE_SYSCALL(shmctl)(long shmid, long cmd, void *buf) {}
POST_SYSCALL(shmctl)(long res, long shmid, long cmd, void *buf) {
if (res >= 0) {
if (buf) POST_WRITE(buf, struct_shmid_ds_sz);
}
}
PRE_SYSCALL(ipc)(long call, long first, long second, long third, void *ptr, PRE_SYSCALL(ipc)(long call, long first, long second, long third, void *ptr,
long fifth) {} long fifth) {}
...@@ -2078,6 +2139,14 @@ POST_SYSCALL(ipc)(long res, long call, long first, long second, long third, ...@@ -2078,6 +2139,14 @@ POST_SYSCALL(ipc)(long res, long call, long first, long second, long third,
void *ptr, long fifth) {} void *ptr, long fifth) {}
#if !SANITIZER_ANDROID #if !SANITIZER_ANDROID
PRE_SYSCALL(shmctl)(long shmid, long cmd, void *buf) {}
POST_SYSCALL(shmctl)(long res, long shmid, long cmd, void *buf) {
if (res >= 0) {
if (buf) POST_WRITE(buf, sizeof(__sanitizer_shmid_ds));
}
}
PRE_SYSCALL(mq_open)(const void *name, long oflag, long mode, void *attr) { PRE_SYSCALL(mq_open)(const void *name, long oflag, long mode, void *attr) {
if (name) if (name)
PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1); PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);
...@@ -2218,9 +2287,49 @@ PRE_SYSCALL(ni_syscall)() {} ...@@ -2218,9 +2287,49 @@ PRE_SYSCALL(ni_syscall)() {}
POST_SYSCALL(ni_syscall)(long res) {} POST_SYSCALL(ni_syscall)(long res) {}
PRE_SYSCALL(ptrace)(long request, long pid, long addr, long data) {} PRE_SYSCALL(ptrace)(long request, long pid, long addr, long data) {
#if defined(__i386) || defined (__x86_64)
if (data) {
if (request == ptrace_setregs) {
PRE_READ((void *)data, struct_user_regs_struct_sz);
} else if (request == ptrace_setfpregs) {
PRE_READ((void *)data, struct_user_fpregs_struct_sz);
} else if (request == ptrace_setfpxregs) {
PRE_READ((void *)data, struct_user_fpxregs_struct_sz);
} else if (request == ptrace_setsiginfo) {
PRE_READ((void *)data, siginfo_t_sz);
} else if (request == ptrace_setregset) {
__sanitizer_iovec *iov = (__sanitizer_iovec *)data;
PRE_READ(iov->iov_base, iov->iov_len);
}
}
#endif
}
POST_SYSCALL(ptrace)(long res, long request, long pid, long addr, long data) {} POST_SYSCALL(ptrace)(long res, long request, long pid, long addr, long data) {
#if defined(__i386) || defined (__x86_64)
if (res >= 0 && data) {
// Note that this is different from the interceptor in
// sanitizer_common_interceptors.inc.
// PEEK* requests return resulting values through data pointer.
if (request == ptrace_getregs) {
POST_WRITE((void *)data, struct_user_regs_struct_sz);
} else if (request == ptrace_getfpregs) {
POST_WRITE((void *)data, struct_user_fpregs_struct_sz);
} else if (request == ptrace_getfpxregs) {
POST_WRITE((void *)data, struct_user_fpxregs_struct_sz);
} else if (request == ptrace_getsiginfo) {
POST_WRITE((void *)data, siginfo_t_sz);
} else if (request == ptrace_getregset) {
__sanitizer_iovec *iov = (__sanitizer_iovec *)data;
POST_WRITE(iov->iov_base, iov->iov_len);
} else if (request == ptrace_peekdata || request == ptrace_peektext ||
request == ptrace_peekuser) {
POST_WRITE((void *)data, sizeof(void *));
}
}
#endif
}
PRE_SYSCALL(add_key)(const void *_type, const void *_description, PRE_SYSCALL(add_key)(const void *_type, const void *_description,
const void *_payload, long plen, long destringid) { const void *_payload, long plen, long destringid) {
...@@ -2648,16 +2757,14 @@ PRE_SYSCALL(syncfs)(long fd) {} ...@@ -2648,16 +2757,14 @@ PRE_SYSCALL(syncfs)(long fd) {}
POST_SYSCALL(syncfs)(long res, long fd) {} POST_SYSCALL(syncfs)(long res, long fd) {}
PRE_SYSCALL(perf_event_open)(void *attr_uptr, long pid, long cpu, long group_fd, PRE_SYSCALL(perf_event_open)(__sanitizer_perf_event_attr *attr_uptr, long pid,
long flags) {} long cpu, long group_fd, long flags) {
if (attr_uptr) PRE_READ(attr_uptr, attr_uptr->size);
POST_SYSCALL(perf_event_open)(long res, void *attr_uptr, long pid, long cpu,
long group_fd, long flags) {
if (res >= 0) {
if (attr_uptr) POST_WRITE(attr_uptr, struct_perf_event_attr_sz);
}
} }
POST_SYSCALL(perf_event_open)(long res, __sanitizer_perf_event_attr *attr_uptr,
long pid, long cpu, long group_fd, long flags) {}
PRE_SYSCALL(mmap_pgoff)(long addr, long len, long prot, long flags, long fd, PRE_SYSCALL(mmap_pgoff)(long addr, long len, long prot, long flags, long fd,
long pgoff) {} long pgoff) {}
...@@ -2724,8 +2831,6 @@ POST_SYSCALL(vfork)(long res) { ...@@ -2724,8 +2831,6 @@ POST_SYSCALL(vfork)(long res) {
} }
} // extern "C" } // extern "C"
#endif
#undef PRE_SYSCALL #undef PRE_SYSCALL
#undef PRE_READ #undef PRE_READ
#undef PRE_WRITE #undef PRE_WRITE
......
//===-- sanitizer_coverage.cc ---------------------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Sanitizer Coverage.
// This file implements run-time support for a poor man's coverage tool.
//
// Compiler instrumentation:
// For every function F the compiler injects the following code:
// if (*Guard) {
// __sanitizer_cov(&F);
// *Guard = 1;
// }
// It's fine to call __sanitizer_cov more than once for a given function.
//
// Run-time:
// - __sanitizer_cov(pc): record that we've executed a given PC.
// - __sanitizer_cov_dump: dump the coverage data to disk.
// For every module of the current process that has coverage data
// this will create a file module_name.PID.sancov. The file format is simple:
// it's just a sorted sequence of 4-byte offsets in the module.
//
// Eventually, this coverage implementation should be obsoleted by a more
// powerful general purpose Clang/LLVM coverage instrumentation.
// Consider this implementation as prototype.
//
// FIXME: support (or at least test with) dlclose.
//===----------------------------------------------------------------------===//
#include "sanitizer_allocator_internal.h"
#include "sanitizer_common.h"
#include "sanitizer_libc.h"
#include "sanitizer_mutex.h"
#include "sanitizer_procmaps.h"
#include "sanitizer_flags.h"
struct CovData {
BlockingMutex mu;
InternalMmapVector<uptr> v;
};
static uptr cov_data_placeholder[sizeof(CovData) / sizeof(uptr)];
COMPILER_CHECK(sizeof(cov_data_placeholder) >= sizeof(CovData));
static CovData *cov_data = reinterpret_cast<CovData*>(cov_data_placeholder);
namespace __sanitizer {
// Simply add the pc into the vector under lock. If the function is called more
// than once for a given PC it will be inserted multiple times, which is fine.
static void CovAdd(uptr pc) {
BlockingMutexLock lock(&cov_data->mu);
cov_data->v.push_back(pc);
}
static inline bool CompareLess(const uptr &a, const uptr &b) {
return a < b;
}
// Dump the coverage on disk.
void CovDump() {
#if !SANITIZER_WINDOWS
BlockingMutexLock lock(&cov_data->mu);
InternalMmapVector<uptr> &v = cov_data->v;
InternalSort(&v, v.size(), CompareLess);
InternalMmapVector<u32> offsets(v.size());
const uptr *vb = v.data();
const uptr *ve = vb + v.size();
MemoryMappingLayout proc_maps(/*cache_enabled*/false);
uptr mb, me, off, prot;
InternalScopedBuffer<char> module(4096);
InternalScopedBuffer<char> path(4096 * 2);
for (int i = 0;
proc_maps.Next(&mb, &me, &off, module.data(), module.size(), &prot);
i++) {
if ((prot & MemoryMappingLayout::kProtectionExecute) == 0)
continue;
if (vb >= ve) break;
if (mb <= *vb && *vb < me) {
offsets.clear();
const uptr *old_vb = vb;
CHECK_LE(off, *vb);
for (; vb < ve && *vb < me; vb++) {
uptr diff = *vb - (i ? mb : 0) + off;
CHECK_LE(diff, 0xffffffffU);
offsets.push_back(static_cast<u32>(diff));
}
char *module_name = StripModuleName(module.data());
internal_snprintf((char *)path.data(), path.size(), "%s.%zd.sancov",
module_name, internal_getpid());
InternalFree(module_name);
uptr fd = OpenFile(path.data(), true);
internal_write(fd, offsets.data(), offsets.size() * sizeof(u32));
internal_close(fd);
if (common_flags()->verbosity)
Report(" CovDump: %s: %zd PCs written\n", path.data(), vb - old_vb);
}
}
#endif // !SANITIZER_WINDOWS
}
} // namespace __sanitizer
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov(void *pc) {
CovAdd(reinterpret_cast<uptr>(pc));
}
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump() { CovDump(); }
} // extern "C"
...@@ -16,20 +16,40 @@ ...@@ -16,20 +16,40 @@
namespace __sanitizer { namespace __sanitizer {
CommonFlags common_flags_dont_use_directly; void SetCommonFlagsDefaults(CommonFlags *f) {
f->symbolize = true;
f->external_symbolizer_path = 0;
f->strip_path_prefix = "";
f->fast_unwind_on_fatal = false;
f->fast_unwind_on_malloc = true;
f->handle_ioctl = false;
f->malloc_context_size = 1;
f->log_path = "stderr";
f->verbosity = 0;
f->detect_leaks = false;
f->leak_check_at_exit = true;
f->allocator_may_return_null = false;
f->print_summary = true;
}
void ParseCommonFlagsFromString(const char *str) { void ParseCommonFlagsFromString(CommonFlags *f, const char *str) {
CommonFlags *f = common_flags(); ParseFlag(str, &f->symbolize, "symbolize");
ParseFlag(str, &f->malloc_context_size, "malloc_context_size"); ParseFlag(str, &f->external_symbolizer_path, "external_symbolizer_path");
ParseFlag(str, &f->strip_path_prefix, "strip_path_prefix"); ParseFlag(str, &f->strip_path_prefix, "strip_path_prefix");
ParseFlag(str, &f->fast_unwind_on_fatal, "fast_unwind_on_fatal"); ParseFlag(str, &f->fast_unwind_on_fatal, "fast_unwind_on_fatal");
ParseFlag(str, &f->fast_unwind_on_malloc, "fast_unwind_on_malloc"); ParseFlag(str, &f->fast_unwind_on_malloc, "fast_unwind_on_malloc");
ParseFlag(str, &f->symbolize, "symbolize");
ParseFlag(str, &f->handle_ioctl, "handle_ioctl"); ParseFlag(str, &f->handle_ioctl, "handle_ioctl");
ParseFlag(str, &f->malloc_context_size, "malloc_context_size");
ParseFlag(str, &f->log_path, "log_path"); ParseFlag(str, &f->log_path, "log_path");
ParseFlag(str, &f->verbosity, "verbosity");
ParseFlag(str, &f->detect_leaks, "detect_leaks"); ParseFlag(str, &f->detect_leaks, "detect_leaks");
ParseFlag(str, &f->leak_check_at_exit, "leak_check_at_exit"); ParseFlag(str, &f->leak_check_at_exit, "leak_check_at_exit");
ParseFlag(str, &f->allocator_may_return_null, "allocator_may_return_null"); ParseFlag(str, &f->allocator_may_return_null, "allocator_may_return_null");
ParseFlag(str, &f->print_summary, "print_summary");
// Do a sanity check for certain flags.
if (f->malloc_context_size < 1)
f->malloc_context_size = 1;
} }
static bool GetFlagValue(const char *env, const char *name, static bool GetFlagValue(const char *env, const char *name,
......
...@@ -23,7 +23,8 @@ void ParseFlag(const char *env, const char **flag, const char *name); ...@@ -23,7 +23,8 @@ void ParseFlag(const char *env, const char **flag, const char *name);
struct CommonFlags { struct CommonFlags {
// If set, use the online symbolizer from common sanitizer runtime. // If set, use the online symbolizer from common sanitizer runtime.
bool symbolize; bool symbolize;
// Path to external symbolizer. // Path to external symbolizer. If it is NULL, symbolizer will be looked for
// in PATH. If it is empty, external symbolizer will not be started.
const char *external_symbolizer_path; const char *external_symbolizer_path;
// Strips this prefix from file paths in error reports. // Strips this prefix from file paths in error reports.
const char *strip_path_prefix; const char *strip_path_prefix;
...@@ -35,8 +36,12 @@ struct CommonFlags { ...@@ -35,8 +36,12 @@ struct CommonFlags {
bool handle_ioctl; bool handle_ioctl;
// Max number of stack frames kept for each allocation/deallocation. // Max number of stack frames kept for each allocation/deallocation.
int malloc_context_size; int malloc_context_size;
// Write logs to "log_path.pid" instead of stderr. // Write logs to "log_path.pid".
// The special values are "stdout" and "stderr".
// The default is "stderr".
const char *log_path; const char *log_path;
// Verbosity level (0 - silent, 1 - a bit of output, 2+ - more output).
int verbosity;
// Enable memory leak detection. // Enable memory leak detection.
bool detect_leaks; bool detect_leaks;
// Invoke leak checking in an atexit handler. Has no effect if // Invoke leak checking in an atexit handler. Has no effect if
...@@ -45,15 +50,17 @@ struct CommonFlags { ...@@ -45,15 +50,17 @@ struct CommonFlags {
bool leak_check_at_exit; bool leak_check_at_exit;
// If false, the allocator will crash instead of returning 0 on out-of-memory. // If false, the allocator will crash instead of returning 0 on out-of-memory.
bool allocator_may_return_null; bool allocator_may_return_null;
// If false, disable printing error summaries in addition to error reports.
bool print_summary;
}; };
extern CommonFlags common_flags_dont_use_directly;
inline CommonFlags *common_flags() { inline CommonFlags *common_flags() {
return &common_flags_dont_use_directly; static CommonFlags f;
return &f;
} }
void ParseCommonFlagsFromString(const char *str); void SetCommonFlagsDefaults(CommonFlags *f);
void ParseCommonFlagsFromString(CommonFlags *f, const char *str);
} // namespace __sanitizer } // namespace __sanitizer
......
...@@ -32,6 +32,12 @@ ...@@ -32,6 +32,12 @@
# define SANITIZER_SUPPORTS_WEAK_HOOKS 0 # define SANITIZER_SUPPORTS_WEAK_HOOKS 0
#endif #endif
#if __LP64__ || defined(_WIN64)
# define SANITIZER_WORDSIZE 64
#else
# define SANITIZER_WORDSIZE 32
#endif
// GCC does not understand __has_feature // GCC does not understand __has_feature
#if !defined(__has_feature) #if !defined(__has_feature)
# define __has_feature(x) 0 # define __has_feature(x) 0
...@@ -77,18 +83,20 @@ typedef u64 OFF_T; ...@@ -77,18 +83,20 @@ typedef u64 OFF_T;
typedef uptr OFF_T; typedef uptr OFF_T;
#endif #endif
typedef u64 OFF64_T; typedef u64 OFF64_T;
#if (SANITIZER_WORDSIZE == 64) || SANITIZER_MAC
typedef uptr operator_new_size_type;
#else
typedef u32 operator_new_size_type;
#endif
} // namespace __sanitizer } // namespace __sanitizer
extern "C" { extern "C" {
// Tell the tools to write their reports to "path.<pid>" instead of stderr. // Tell the tools to write their reports to "path.<pid>" instead of stderr.
// The special values are "stdout" and "stderr".
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_set_report_path(const char *path); void __sanitizer_set_report_path(const char *path);
// Tell the tools to write their reports to given file descriptor instead of
// stderr.
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_set_report_fd(int fd);
// Notify the tools that the sandbox is going to be turned on. The reserved // Notify the tools that the sandbox is going to be turned on. The reserved
// parameter will be used in the future to hold a structure with functions // parameter will be used in the future to hold a structure with functions
// that the tools may call to bypass the sandbox. // that the tools may call to bypass the sandbox.
...@@ -100,6 +108,14 @@ extern "C" { ...@@ -100,6 +108,14 @@ extern "C" {
// the error message. This function can be overridden by the client. // the error message. This function can be overridden by the client.
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_report_error_summary(const char *error_summary); void __sanitizer_report_error_summary(const char *error_summary);
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump();
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov(void *pc);
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_annotate_contiguous_container(const void *beg,
const void *end,
const void *old_mid,
const void *new_mid);
} // extern "C" } // extern "C"
...@@ -169,12 +185,6 @@ typedef void* thread_return_t; ...@@ -169,12 +185,6 @@ typedef void* thread_return_t;
#endif // _WIN32 #endif // _WIN32
typedef thread_return_t (THREAD_CALLING_CONV *thread_callback_t)(void* arg); typedef thread_return_t (THREAD_CALLING_CONV *thread_callback_t)(void* arg);
#if __LP64__ || defined(_WIN64)
# define SANITIZER_WORDSIZE 64
#else
# define SANITIZER_WORDSIZE 32
#endif
// NOTE: Functions below must be defined in each run-time. // NOTE: Functions below must be defined in each run-time.
namespace __sanitizer { namespace __sanitizer {
void NORETURN Die(); void NORETURN Die();
......
...@@ -14,6 +14,16 @@ ...@@ -14,6 +14,16 @@
namespace __sanitizer { namespace __sanitizer {
// Make the compiler think that something is going on there.
static inline void break_optimization(void *arg) {
#if SANITIZER_WINDOWS
// FIXME: make sure this is actually enough.
__asm;
#else
__asm__ __volatile__("" : : "r" (arg) : "memory");
#endif
}
s64 internal_atoll(const char *nptr) { s64 internal_atoll(const char *nptr) {
return internal_simple_strtoll(nptr, (char**)0, 10); return internal_simple_strtoll(nptr, (char**)0, 10);
} }
...@@ -60,6 +70,16 @@ void *internal_memmove(void *dest, const void *src, uptr n) { ...@@ -60,6 +70,16 @@ void *internal_memmove(void *dest, const void *src, uptr n) {
return dest; return dest;
} }
// Semi-fast bzero for 16-aligned data. Still far from peak performance.
void internal_bzero_aligned16(void *s, uptr n) {
struct S16 { u64 a, b; } ALIGNED(16);
CHECK_EQ((reinterpret_cast<uptr>(s) | n) & 15, 0);
for (S16 *p = reinterpret_cast<S16*>(s), *end = p + n / 16; p < end; p++) {
p->a = p->b = 0;
break_optimization(0); // Make sure this does not become memset.
}
}
void *internal_memset(void* s, int c, uptr n) { void *internal_memset(void* s, int c, uptr n) {
// The next line prevents Clang from making a call to memset() instead of the // The next line prevents Clang from making a call to memset() instead of the
// loop below. // loop below.
......
...@@ -27,6 +27,8 @@ void *internal_memchr(const void *s, int c, uptr n); ...@@ -27,6 +27,8 @@ void *internal_memchr(const void *s, int c, uptr n);
int internal_memcmp(const void* s1, const void* s2, uptr n); int internal_memcmp(const void* s1, const void* s2, uptr n);
void *internal_memcpy(void *dest, const void *src, uptr n); void *internal_memcpy(void *dest, const void *src, uptr n);
void *internal_memmove(void *dest, const void *src, uptr n); void *internal_memmove(void *dest, const void *src, uptr n);
// Set [s, s + n) to 0. Both s and n should be 16-aligned.
void internal_bzero_aligned16(void *s, uptr n);
// Should not be used in performance-critical places. // Should not be used in performance-critical places.
void *internal_memset(void *s, int c, uptr n); void *internal_memset(void *s, int c, uptr n);
char* internal_strchr(const char *s, int c); char* internal_strchr(const char *s, int c);
......
//===-- sanitizer_libignore.cc --------------------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
#if SANITIZER_LINUX
#include "sanitizer_libignore.h"
#include "sanitizer_flags.h"
#include "sanitizer_procmaps.h"
namespace __sanitizer {
LibIgnore::LibIgnore(LinkerInitialized) {
}
void LibIgnore::Init(const SuppressionContext &supp) {
BlockingMutexLock lock(&mutex_);
CHECK_EQ(count_, 0);
const uptr n = supp.SuppressionCount();
for (uptr i = 0; i < n; i++) {
const Suppression *s = supp.SuppressionAt(i);
if (s->type != SuppressionLib)
continue;
if (count_ >= kMaxLibs) {
Report("%s: too many called_from_lib suppressions (max: %d)\n",
SanitizerToolName, kMaxLibs);
Die();
}
Lib *lib = &libs_[count_++];
lib->templ = internal_strdup(s->templ);
lib->name = 0;
lib->loaded = false;
}
}
void LibIgnore::OnLibraryLoaded(const char *name) {
BlockingMutexLock lock(&mutex_);
// Try to match suppressions with symlink target.
InternalScopedBuffer<char> buf(4096);
if (name != 0 && internal_readlink(name, buf.data(), buf.size() - 1) > 0 &&
buf.data()[0]) {
for (uptr i = 0; i < count_; i++) {
Lib *lib = &libs_[i];
if (!lib->loaded && lib->real_name == 0 &&
TemplateMatch(lib->templ, name))
lib->real_name = internal_strdup(buf.data());
}
}
// Scan suppressions list and find newly loaded and unloaded libraries.
MemoryMappingLayout proc_maps(/*cache_enabled*/false);
InternalScopedBuffer<char> module(4096);
for (uptr i = 0; i < count_; i++) {
Lib *lib = &libs_[i];
bool loaded = false;
proc_maps.Reset();
uptr b, e, off, prot;
while (proc_maps.Next(&b, &e, &off, module.data(), module.size(), &prot)) {
if ((prot & MemoryMappingLayout::kProtectionExecute) == 0)
continue;
if (TemplateMatch(lib->templ, module.data()) ||
(lib->real_name != 0 &&
internal_strcmp(lib->real_name, module.data()) == 0)) {
if (loaded) {
Report("%s: called_from_lib suppression '%s' is matched against"
" 2 libraries: '%s' and '%s'\n",
SanitizerToolName, lib->templ, lib->name, module.data());
Die();
}
loaded = true;
if (lib->loaded)
continue;
if (common_flags()->verbosity)
Report("Matched called_from_lib suppression '%s' against library"
" '%s'\n", lib->templ, module.data());
lib->loaded = true;
lib->name = internal_strdup(module.data());
const uptr idx = atomic_load(&loaded_count_, memory_order_relaxed);
code_ranges_[idx].begin = b;
code_ranges_[idx].end = e;
atomic_store(&loaded_count_, idx + 1, memory_order_release);
}
}
if (lib->loaded && !loaded) {
Report("%s: library '%s' that was matched against called_from_lib"
" suppression '%s' is unloaded\n",
SanitizerToolName, lib->name, lib->templ);
Die();
}
}
}
void LibIgnore::OnLibraryUnloaded() {
OnLibraryLoaded(0);
}
} // namespace __sanitizer
#endif // #if SANITIZER_LINUX
//===-- sanitizer_libignore.h -----------------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// LibIgnore allows to ignore all interceptors called from a particular set
// of dynamic libraries. LibIgnore remembers all "called_from_lib" suppressions
// from the provided SuppressionContext; finds code ranges for the libraries;
// and checks whether the provided PC value belongs to the code ranges.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_LIBIGNORE_H
#define SANITIZER_LIBIGNORE_H
#include "sanitizer_internal_defs.h"
#include "sanitizer_common.h"
#include "sanitizer_suppressions.h"
#include "sanitizer_atomic.h"
#include "sanitizer_mutex.h"
namespace __sanitizer {
class LibIgnore {
public:
explicit LibIgnore(LinkerInitialized);
// Fetches all "called_from_lib" suppressions from the SuppressionContext.
void Init(const SuppressionContext &supp);
// Must be called after a new dynamic library is loaded.
void OnLibraryLoaded(const char *name);
// Must be called after a dynamic library is unloaded.
void OnLibraryUnloaded();
// Checks whether the provided PC belongs to one of the ignored libraries.
bool IsIgnored(uptr pc) const;
private:
struct Lib {
char *templ;
char *name;
char *real_name; // target of symlink
bool loaded;
};
struct LibCodeRange {
uptr begin;
uptr end;
};
static const uptr kMaxLibs = 128;
// Hot part:
atomic_uintptr_t loaded_count_;
LibCodeRange code_ranges_[kMaxLibs];
// Cold part:
BlockingMutex mutex_;
uptr count_;
Lib libs_[kMaxLibs];
// Disallow copying of LibIgnore objects.
LibIgnore(const LibIgnore&); // not implemented
void operator = (const LibIgnore&); // not implemented
};
inline bool LibIgnore::IsIgnored(uptr pc) const {
const uptr n = atomic_load(&loaded_count_, memory_order_acquire);
for (uptr i = 0; i < n; i++) {
if (pc >= code_ranges_[i].begin && pc < code_ranges_[i].end)
return true;
}
return false;
}
} // namespace __sanitizer
#endif // SANITIZER_LIBIGNORE_H
...@@ -77,7 +77,8 @@ namespace __sanitizer { ...@@ -77,7 +77,8 @@ namespace __sanitizer {
uptr internal_mmap(void *addr, uptr length, int prot, int flags, uptr internal_mmap(void *addr, uptr length, int prot, int flags,
int fd, u64 offset) { int fd, u64 offset) {
#if SANITIZER_LINUX_USES_64BIT_SYSCALLS #if SANITIZER_LINUX_USES_64BIT_SYSCALLS
return internal_syscall(__NR_mmap, (uptr)addr, length, prot, flags, fd, offset); return internal_syscall(__NR_mmap, (uptr)addr, length, prot, flags, fd,
offset);
#else #else
return internal_syscall(__NR_mmap2, addr, length, prot, flags, fd, offset); return internal_syscall(__NR_mmap2, addr, length, prot, flags, fd, offset);
#endif #endif
...@@ -216,7 +217,8 @@ uptr GetTid() { ...@@ -216,7 +217,8 @@ uptr GetTid() {
} }
u64 NanoTime() { u64 NanoTime() {
kernel_timeval tv = {}; kernel_timeval tv;
internal_memset(&tv, 0, sizeof(tv));
internal_syscall(__NR_gettimeofday, (uptr)&tv, 0); internal_syscall(__NR_gettimeofday, (uptr)&tv, 0);
return (u64)tv.tv_sec * 1000*1000*1000 + tv.tv_usec * 1000; return (u64)tv.tv_sec * 1000*1000*1000 + tv.tv_usec * 1000;
} }
...@@ -309,7 +311,8 @@ void PrepareForSandboxing() { ...@@ -309,7 +311,8 @@ void PrepareForSandboxing() {
MemoryMappingLayout::CacheMemoryMappings(); MemoryMappingLayout::CacheMemoryMappings();
// Same for /proc/self/exe in the symbolizer. // Same for /proc/self/exe in the symbolizer.
#if !SANITIZER_GO #if !SANITIZER_GO
getSymbolizer()->PrepareForSandboxing(); if (Symbolizer *sym = Symbolizer::GetOrNull())
sym->PrepareForSandboxing();
#endif #endif
} }
...@@ -572,7 +575,8 @@ uptr internal_ptrace(int request, int pid, void *addr, void *data) { ...@@ -572,7 +575,8 @@ uptr internal_ptrace(int request, int pid, void *addr, void *data) {
} }
uptr internal_waitpid(int pid, int *status, int options) { uptr internal_waitpid(int pid, int *status, int options) {
return internal_syscall(__NR_wait4, pid, (uptr)status, options, 0 /* rusage */); return internal_syscall(__NR_wait4, pid, (uptr)status, options,
0 /* rusage */);
} }
uptr internal_getpid() { uptr internal_getpid() {
...@@ -600,6 +604,31 @@ uptr internal_sigaltstack(const struct sigaltstack *ss, ...@@ -600,6 +604,31 @@ uptr internal_sigaltstack(const struct sigaltstack *ss,
return internal_syscall(__NR_sigaltstack, (uptr)ss, (uptr)oss); return internal_syscall(__NR_sigaltstack, (uptr)ss, (uptr)oss);
} }
uptr internal_sigaction(int signum, const __sanitizer_kernel_sigaction_t *act,
__sanitizer_kernel_sigaction_t *oldact) {
return internal_syscall(__NR_rt_sigaction, signum, act, oldact,
sizeof(__sanitizer_kernel_sigset_t));
}
uptr internal_sigprocmask(int how, __sanitizer_kernel_sigset_t *set,
__sanitizer_kernel_sigset_t *oldset) {
return internal_syscall(__NR_rt_sigprocmask, (uptr)how, &set->sig[0],
&oldset->sig[0], sizeof(__sanitizer_kernel_sigset_t));
}
void internal_sigfillset(__sanitizer_kernel_sigset_t *set) {
internal_memset(set, 0xff, sizeof(*set));
}
void internal_sigdelset(__sanitizer_kernel_sigset_t *set, int signum) {
signum -= 1;
CHECK_GE(signum, 0);
CHECK_LT(signum, sizeof(*set) * 8);
const uptr idx = signum / (sizeof(set->sig[0]) * 8);
const uptr bit = signum % (sizeof(set->sig[0]) * 8);
set->sig[idx] &= ~(1 << bit);
}
// ThreadLister implementation. // ThreadLister implementation.
ThreadLister::ThreadLister(int pid) ThreadLister::ThreadLister(int pid)
: pid_(pid), : pid_(pid),
...@@ -775,8 +804,8 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, ...@@ -775,8 +804,8 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
child_stack = (char *)child_stack - 2 * sizeof(unsigned long long); child_stack = (char *)child_stack - 2 * sizeof(unsigned long long);
((unsigned long long *)child_stack)[0] = (uptr)fn; ((unsigned long long *)child_stack)[0] = (uptr)fn;
((unsigned long long *)child_stack)[1] = (uptr)arg; ((unsigned long long *)child_stack)[1] = (uptr)arg;
register void *r8 __asm__ ("r8") = newtls; register void *r8 __asm__("r8") = newtls;
register int *r10 __asm__ ("r10") = child_tidptr; register int *r10 __asm__("r10") = child_tidptr;
__asm__ __volatile__( __asm__ __volatile__(
/* %rax = syscall(%rax = __NR_clone, /* %rax = syscall(%rax = __NR_clone,
* %rdi = flags, * %rdi = flags,
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#if SANITIZER_LINUX #if SANITIZER_LINUX
#include "sanitizer_common.h" #include "sanitizer_common.h"
#include "sanitizer_internal_defs.h" #include "sanitizer_internal_defs.h"
#include "sanitizer_platform_limits_posix.h"
struct link_map; // Opaque type returned by dlopen(). struct link_map; // Opaque type returned by dlopen().
struct sigaltstack; struct sigaltstack;
...@@ -29,6 +30,13 @@ uptr internal_getdents(fd_t fd, struct linux_dirent *dirp, unsigned int count); ...@@ -29,6 +30,13 @@ uptr internal_getdents(fd_t fd, struct linux_dirent *dirp, unsigned int count);
uptr internal_prctl(int option, uptr arg2, uptr arg3, uptr arg4, uptr arg5); uptr internal_prctl(int option, uptr arg2, uptr arg3, uptr arg4, uptr arg5);
uptr internal_sigaltstack(const struct sigaltstack* ss, uptr internal_sigaltstack(const struct sigaltstack* ss,
struct sigaltstack* oss); struct sigaltstack* oss);
uptr internal_sigaction(int signum, const __sanitizer_kernel_sigaction_t *act,
__sanitizer_kernel_sigaction_t *oldact);
uptr internal_sigprocmask(int how, __sanitizer_kernel_sigset_t *set,
__sanitizer_kernel_sigset_t *oldset);
void internal_sigfillset(__sanitizer_kernel_sigset_t *set);
void internal_sigdelset(__sanitizer_kernel_sigset_t *set, int signum);
#ifdef __x86_64__ #ifdef __x86_64__
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
int *parent_tidptr, void *newtls, int *child_tidptr); int *parent_tidptr, void *newtls, int *child_tidptr);
...@@ -56,7 +64,7 @@ class ThreadLister { ...@@ -56,7 +64,7 @@ class ThreadLister {
int bytes_read_; int bytes_read_;
}; };
void AdjustStackSizeLinux(void *attr, int verbosity); void AdjustStackSizeLinux(void *attr);
// Exposed for testing. // Exposed for testing.
uptr ThreadDescriptorSize(); uptr ThreadDescriptorSize();
...@@ -74,7 +82,6 @@ void CacheBinaryName(); ...@@ -74,7 +82,6 @@ void CacheBinaryName();
// Call cb for each region mapped by map. // Call cb for each region mapped by map.
void ForEachMappedRegion(link_map *map, void (*cb)(const void *, uptr)); void ForEachMappedRegion(link_map *map, void (*cb)(const void *, uptr));
} // namespace __sanitizer } // namespace __sanitizer
#endif // SANITIZER_LINUX #endif // SANITIZER_LINUX
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#if SANITIZER_LINUX #if SANITIZER_LINUX
#include "sanitizer_common.h" #include "sanitizer_common.h"
#include "sanitizer_flags.h"
#include "sanitizer_linux.h" #include "sanitizer_linux.h"
#include "sanitizer_placement_new.h" #include "sanitizer_placement_new.h"
#include "sanitizer_procmaps.h" #include "sanitizer_procmaps.h"
...@@ -30,6 +31,12 @@ ...@@ -30,6 +31,12 @@
#include <link.h> #include <link.h>
#endif #endif
// This function is defined elsewhere if we intercepted pthread_attr_getstack.
SANITIZER_WEAK_ATTRIBUTE
int __sanitizer_pthread_attr_getstack(void *attr, void **addr, size_t *size) {
return pthread_attr_getstack((pthread_attr_t*)attr, addr, size);
}
namespace __sanitizer { namespace __sanitizer {
void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top, void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
...@@ -71,7 +78,7 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top, ...@@ -71,7 +78,7 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0); CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);
uptr stacksize = 0; uptr stacksize = 0;
void *stackaddr = 0; void *stackaddr = 0;
pthread_attr_getstack(&attr, &stackaddr, (size_t*)&stacksize); __sanitizer_pthread_attr_getstack(&attr, &stackaddr, (size_t*)&stacksize);
pthread_attr_destroy(&attr); pthread_attr_destroy(&attr);
CHECK_LE(stacksize, kMaxThreadStackSize); // Sanity check. CHECK_LE(stacksize, kMaxThreadStackSize); // Sanity check.
...@@ -137,35 +144,33 @@ uptr Unwind_GetIP(struct _Unwind_Context *ctx) { ...@@ -137,35 +144,33 @@ uptr Unwind_GetIP(struct _Unwind_Context *ctx) {
#endif #endif
} }
struct UnwindTraceArg {
StackTrace *stack;
uptr max_depth;
};
_Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx, void *param) { _Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx, void *param) {
StackTrace *b = (StackTrace*)param; UnwindTraceArg *arg = (UnwindTraceArg*)param;
CHECK(b->size < b->max_size); CHECK_LT(arg->stack->size, arg->max_depth);
uptr pc = Unwind_GetIP(ctx); uptr pc = Unwind_GetIP(ctx);
b->trace[b->size++] = pc; arg->stack->trace[arg->stack->size++] = pc;
if (b->size == b->max_size) return UNWIND_STOP; if (arg->stack->size == arg->max_depth) return UNWIND_STOP;
return UNWIND_CONTINUE; return UNWIND_CONTINUE;
} }
static bool MatchPc(uptr cur_pc, uptr trace_pc) {
return cur_pc - trace_pc <= 64 || trace_pc - cur_pc <= 64;
}
void StackTrace::SlowUnwindStack(uptr pc, uptr max_depth) { void StackTrace::SlowUnwindStack(uptr pc, uptr max_depth) {
this->size = 0; size = 0;
this->max_size = max_depth; if (max_depth == 0)
if (max_depth > 1) { return;
_Unwind_Backtrace(Unwind_Trace, this); UnwindTraceArg arg = {this, Min(max_depth + 1, kStackTraceMax)};
// We need to pop a few frames so that pc is on top. _Unwind_Backtrace(Unwind_Trace, &arg);
// trace[0] belongs to the current function so we always pop it. // We need to pop a few frames so that pc is on top.
int to_pop = 1; uptr to_pop = LocatePcInTrace(pc);
/**/ if (size > 1 && MatchPc(pc, trace[1])) to_pop = 1; // trace[0] belongs to the current function so we always pop it.
else if (size > 2 && MatchPc(pc, trace[2])) to_pop = 2; if (to_pop == 0)
else if (size > 3 && MatchPc(pc, trace[3])) to_pop = 3; to_pop = 1;
else if (size > 4 && MatchPc(pc, trace[4])) to_pop = 4; PopStackFrames(to_pop);
else if (size > 5 && MatchPc(pc, trace[5])) to_pop = 5; trace[0] = pc;
this->PopStackFrames(to_pop);
}
this->trace[0] = pc;
} }
#endif // !SANITIZER_GO #endif // !SANITIZER_GO
...@@ -265,11 +270,11 @@ void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size, ...@@ -265,11 +270,11 @@ void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
#endif // SANITIZER_GO #endif // SANITIZER_GO
} }
void AdjustStackSizeLinux(void *attr_, int verbosity) { void AdjustStackSizeLinux(void *attr_) {
pthread_attr_t *attr = (pthread_attr_t *)attr_; pthread_attr_t *attr = (pthread_attr_t *)attr_;
uptr stackaddr = 0; uptr stackaddr = 0;
size_t stacksize = 0; size_t stacksize = 0;
pthread_attr_getstack(attr, (void**)&stackaddr, &stacksize); __sanitizer_pthread_attr_getstack(attr, (void**)&stackaddr, &stacksize);
// GLibC will return (0 - stacksize) as the stack address in the case when // GLibC will return (0 - stacksize) as the stack address in the case when
// stacksize is set, but stackaddr is not. // stacksize is set, but stackaddr is not.
bool stack_set = (stackaddr != 0) && (stackaddr + stacksize != 0); bool stack_set = (stackaddr != 0) && (stackaddr + stacksize != 0);
...@@ -277,7 +282,7 @@ void AdjustStackSizeLinux(void *attr_, int verbosity) { ...@@ -277,7 +282,7 @@ void AdjustStackSizeLinux(void *attr_, int verbosity) {
const uptr minstacksize = GetTlsSize() + 128*1024; const uptr minstacksize = GetTlsSize() + 128*1024;
if (stacksize < minstacksize) { if (stacksize < minstacksize) {
if (!stack_set) { if (!stack_set) {
if (verbosity && stacksize != 0) if (common_flags()->verbosity && stacksize != 0)
Printf("Sanitizer: increasing stacksize %zu->%zu\n", stacksize, Printf("Sanitizer: increasing stacksize %zu->%zu\n", stacksize,
minstacksize); minstacksize);
pthread_attr_setstacksize(attr, minstacksize); pthread_attr_setstacksize(attr, minstacksize);
......
...@@ -143,7 +143,11 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top, ...@@ -143,7 +143,11 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
const char *GetEnv(const char *name) { const char *GetEnv(const char *name) {
char ***env_ptr = _NSGetEnviron(); char ***env_ptr = _NSGetEnviron();
CHECK(env_ptr); if (!env_ptr) {
Report("_NSGetEnviron() returned NULL. Please make sure __asan_init() is "
"called after libSystem_initializer().\n");
CHECK(env_ptr);
}
char **environ = *env_ptr; char **environ = *env_ptr;
CHECK(environ); CHECK(environ);
uptr name_len = internal_strlen(name); uptr name_len = internal_strlen(name);
......
...@@ -38,6 +38,10 @@ class StaticSpinMutex { ...@@ -38,6 +38,10 @@ class StaticSpinMutex {
atomic_store(&state_, 0, memory_order_release); atomic_store(&state_, 0, memory_order_release);
} }
void CheckLocked() {
CHECK_EQ(atomic_load(&state_, memory_order_relaxed), 1);
}
private: private:
atomic_uint8_t state_; atomic_uint8_t state_;
......
...@@ -16,15 +16,7 @@ ...@@ -16,15 +16,7 @@
#include "sanitizer_internal_defs.h" #include "sanitizer_internal_defs.h"
namespace __sanitizer { inline void *operator new(__sanitizer::operator_new_size_type sz, void *p) {
#if (SANITIZER_WORDSIZE == 64) || SANITIZER_MAC
typedef uptr operator_new_ptr_type;
#else
typedef u32 operator_new_ptr_type;
#endif
} // namespace __sanitizer
inline void *operator new(__sanitizer::operator_new_ptr_type sz, void *p) {
return p; return p;
} }
......
...@@ -23,8 +23,15 @@ ...@@ -23,8 +23,15 @@
#if defined(__APPLE__) #if defined(__APPLE__)
# define SANITIZER_MAC 1 # define SANITIZER_MAC 1
# include <TargetConditionals.h>
# if TARGET_OS_IPHONE
# define SANITIZER_IOS 1
# else
# define SANITIZER_IOS 0
# endif
#else #else
# define SANITIZER_MAC 0 # define SANITIZER_MAC 0
# define SANITIZER_IOS 0
#endif #endif
#if defined(_WIN32) #if defined(_WIN32)
......
...@@ -39,6 +39,12 @@ ...@@ -39,6 +39,12 @@
# define SI_MAC 0 # define SI_MAC 0
#endif #endif
#if SANITIZER_IOS
# define SI_IOS 1
#else
# define SI_IOS 0
#endif
# define SANITIZER_INTERCEPT_STRCMP 1 # define SANITIZER_INTERCEPT_STRCMP 1
# define SANITIZER_INTERCEPT_STRCASECMP SI_NOT_WINDOWS # define SANITIZER_INTERCEPT_STRCASECMP SI_NOT_WINDOWS
...@@ -61,6 +67,7 @@ ...@@ -61,6 +67,7 @@
# define SANITIZER_INTERCEPT_PRCTL SI_LINUX # define SANITIZER_INTERCEPT_PRCTL SI_LINUX
# define SANITIZER_INTERCEPT_LOCALTIME_AND_FRIENDS SI_NOT_WINDOWS # define SANITIZER_INTERCEPT_LOCALTIME_AND_FRIENDS SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_STRPTIME SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_SCANF SI_NOT_WINDOWS # define SANITIZER_INTERCEPT_SCANF SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_ISOC99_SCANF SI_LINUX # define SANITIZER_INTERCEPT_ISOC99_SCANF SI_LINUX
...@@ -111,6 +118,7 @@ ...@@ -111,6 +118,7 @@
# define SANITIZER_INTERCEPT_SCHED_GETAFFINITY SI_LINUX_NOT_ANDROID # define SANITIZER_INTERCEPT_SCHED_GETAFFINITY SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_STRERROR SI_NOT_WINDOWS # define SANITIZER_INTERCEPT_STRERROR SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_STRERROR_R SI_NOT_WINDOWS # define SANITIZER_INTERCEPT_STRERROR_R SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_XPG_STRERROR_R SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_SCANDIR SI_LINUX_NOT_ANDROID # define SANITIZER_INTERCEPT_SCANDIR SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_SCANDIR64 SI_LINUX_NOT_ANDROID # define SANITIZER_INTERCEPT_SCANDIR64 SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_GETGROUPS SI_NOT_WINDOWS # define SANITIZER_INTERCEPT_GETGROUPS SI_NOT_WINDOWS
...@@ -124,5 +132,41 @@ ...@@ -124,5 +132,41 @@
# define SANITIZER_INTERCEPT_SIGPENDING SI_NOT_WINDOWS # define SANITIZER_INTERCEPT_SIGPENDING SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_SIGPROCMASK SI_NOT_WINDOWS # define SANITIZER_INTERCEPT_SIGPROCMASK SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_BACKTRACE SI_LINUX_NOT_ANDROID # define SANITIZER_INTERCEPT_BACKTRACE SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_GETMNTENT SI_LINUX
# define SANITIZER_INTERCEPT_GETMNTENT_R SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_STATFS SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_STATFS64 \
(SI_MAC && !SI_IOS) || SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_STATVFS SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_STATVFS64 SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_INITGROUPS SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_ETHER SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_ETHER_R SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_SHMCTL \
(SI_LINUX_NOT_ANDROID && SANITIZER_WORDSIZE == 64)
# define SANITIZER_INTERCEPT_RANDOM_R SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_PTHREAD_ATTR_GET SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETINHERITSCHED \
SI_MAC || SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETAFFINITY_NP SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_TMPNAM SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_TMPNAM_R SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_TEMPNAM SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_SINCOS SI_LINUX
# define SANITIZER_INTERCEPT_REMQUO SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_LGAMMA SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_LGAMMA_R SI_LINUX
# define SANITIZER_INTERCEPT_DRAND48_R SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_ICONV SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_TIMES SI_NOT_WINDOWS
// FIXME: getline seems to be available on OSX 10.7
# define SANITIZER_INTERCEPT_GETLINE SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT__EXIT SI_LINUX
# define SANITIZER_INTERCEPT_PHTREAD_MUTEX SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_PTHREAD_COND SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_PTHREAD_SETNAME_NP SI_LINUX_NOT_ANDROID
#endif // #ifndef SANITIZER_PLATFORM_INTERCEPTORS_H #endif // #ifndef SANITIZER_PLATFORM_INTERCEPTORS_H
...@@ -14,34 +14,88 @@ ...@@ -14,34 +14,88 @@
// userspace headers. // userspace headers.
// Most "normal" includes go in sanitizer_platform_limits_posix.cc // Most "normal" includes go in sanitizer_platform_limits_posix.cc
#ifdef SYSCALL_INTERCEPTION
#include "sanitizer_platform.h" #include "sanitizer_platform.h"
#if SANITIZER_LINUX #if SANITIZER_LINUX
#include "sanitizer_internal_defs.h"
#include "sanitizer_platform_limits_posix.h"
// For offsetof -> __builtin_offsetof definition.
#include <stddef.h>
// With old kernels (and even new kernels on powerpc) asm/stat.h uses types that
// are not defined anywhere in userspace headers. Fake them. This seems to work
// fine with newer headers, too.
#include <asm/posix_types.h>
#define ino_t __kernel_ino_t
#define mode_t __kernel_mode_t
#define nlink_t __kernel_nlink_t
#define uid_t __kernel_uid_t
#define gid_t __kernel_gid_t
#define off_t __kernel_off_t
// This header seems to contain the definitions of _kernel_ stat* structs. // This header seems to contain the definitions of _kernel_ stat* structs.
#include <asm/stat.h> #include <asm/stat.h>
#undef ino_t
#undef mode_t
#undef nlink_t
#undef uid_t
#undef gid_t
#undef off_t
#include <linux/aio_abi.h> #include <linux/aio_abi.h>
#if SANITIZER_ANDROID
#include <asm/statfs.h>
#else
#include <sys/statfs.h>
#endif
#if !SANITIZER_ANDROID #if !SANITIZER_ANDROID
#include <linux/perf_event.h> #include <linux/perf_event.h>
#endif #endif
namespace __sanitizer { namespace __sanitizer {
unsigned struct___old_kernel_stat_sz = sizeof(struct __old_kernel_stat); unsigned struct_statfs64_sz = sizeof(struct statfs64);
unsigned struct_kernel_stat_sz = sizeof(struct stat); } // namespace __sanitizer
unsigned struct_io_event_sz = sizeof(struct io_event);
unsigned struct_iocb_sz = sizeof(struct iocb);
#if !defined(_LP64) && !defined(__x86_64__) #if !defined(__powerpc64__)
unsigned struct_kernel_stat64_sz = sizeof(struct stat64); COMPILER_CHECK(struct___old_kernel_stat_sz == sizeof(struct __old_kernel_stat));
#else #endif
unsigned struct_kernel_stat64_sz = 0;
COMPILER_CHECK(struct_kernel_stat_sz == sizeof(struct stat));
#if defined(__i386__)
COMPILER_CHECK(struct_kernel_stat64_sz == sizeof(struct stat64));
#endif #endif
CHECK_TYPE_SIZE(io_event);
CHECK_SIZE_AND_OFFSET(io_event, data);
CHECK_SIZE_AND_OFFSET(io_event, obj);
CHECK_SIZE_AND_OFFSET(io_event, res);
CHECK_SIZE_AND_OFFSET(io_event, res2);
#if !SANITIZER_ANDROID #if !SANITIZER_ANDROID
unsigned struct_perf_event_attr_sz = sizeof(struct perf_event_attr); COMPILER_CHECK(sizeof(struct __sanitizer_perf_event_attr) <=
sizeof(struct perf_event_attr));
CHECK_SIZE_AND_OFFSET(perf_event_attr, type);
CHECK_SIZE_AND_OFFSET(perf_event_attr, size);
#endif #endif
} // namespace __sanitizer
#endif // SANITIZER_LINUX COMPILER_CHECK(iocb_cmd_pread == IOCB_CMD_PREAD);
COMPILER_CHECK(iocb_cmd_pwrite == IOCB_CMD_PWRITE);
#if !SANITIZER_ANDROID
COMPILER_CHECK(iocb_cmd_preadv == IOCB_CMD_PREADV);
COMPILER_CHECK(iocb_cmd_pwritev == IOCB_CMD_PWRITEV);
#endif #endif
CHECK_TYPE_SIZE(iocb);
CHECK_SIZE_AND_OFFSET(iocb, aio_data);
// Skip aio_key, it's weird.
CHECK_SIZE_AND_OFFSET(iocb, aio_lio_opcode);
CHECK_SIZE_AND_OFFSET(iocb, aio_reqprio);
CHECK_SIZE_AND_OFFSET(iocb, aio_fildes);
CHECK_SIZE_AND_OFFSET(iocb, aio_buf);
CHECK_SIZE_AND_OFFSET(iocb, aio_nbytes);
CHECK_SIZE_AND_OFFSET(iocb, aio_offset);
#endif // SANITIZER_LINUX
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <arpa/inet.h> #include <arpa/inet.h>
#include <dirent.h> #include <dirent.h>
#include <errno.h>
#include <grp.h> #include <grp.h>
#include <limits.h> #include <limits.h>
#include <net/if.h> #include <net/if.h>
...@@ -42,6 +43,8 @@ ...@@ -42,6 +43,8 @@
#include <wchar.h> #include <wchar.h>
#if SANITIZER_LINUX #if SANITIZER_LINUX
#include <mntent.h>
#include <netinet/ether.h>
#include <utime.h> #include <utime.h>
#include <sys/mount.h> #include <sys/mount.h>
#include <sys/ptrace.h> #include <sys/ptrace.h>
...@@ -75,6 +78,7 @@ ...@@ -75,6 +78,7 @@
#include <sys/mtio.h> #include <sys/mtio.h>
#include <sys/kd.h> #include <sys/kd.h>
#include <sys/shm.h> #include <sys/shm.h>
#include <sys/statvfs.h>
#include <sys/timex.h> #include <sys/timex.h>
#include <sys/user.h> #include <sys/user.h>
#include <sys/ustat.h> #include <sys/ustat.h>
...@@ -87,6 +91,8 @@ ...@@ -87,6 +91,8 @@
#include <linux/scc.h> #include <linux/scc.h>
#include <linux/serial.h> #include <linux/serial.h>
#include <sys/msg.h> #include <sys/msg.h>
#include <sys/ipc.h>
#include <sys/shm.h>
#endif // SANITIZER_LINUX && !SANITIZER_ANDROID #endif // SANITIZER_LINUX && !SANITIZER_ANDROID
#if SANITIZER_ANDROID #if SANITIZER_ANDROID
...@@ -100,20 +106,22 @@ ...@@ -100,20 +106,22 @@
#include <link.h> #include <link.h>
#include <sys/vfs.h> #include <sys/vfs.h>
#include <sys/epoll.h> #include <sys/epoll.h>
// #include <asm/stat.h>
#include <linux/capability.h> #include <linux/capability.h>
#endif // SANITIZER_LINUX #endif // SANITIZER_LINUX
#if SANITIZER_MAC #if SANITIZER_MAC
#include <netinet/ip_mroute.h> #include <net/ethernet.h>
#include <sys/filio.h> #include <sys/filio.h>
#include <sys/mount.h>
#include <sys/sockio.h> #include <sys/sockio.h>
#endif #endif
namespace __sanitizer { namespace __sanitizer {
unsigned struct_utsname_sz = sizeof(struct utsname); unsigned struct_utsname_sz = sizeof(struct utsname);
unsigned struct_stat_sz = sizeof(struct stat); unsigned struct_stat_sz = sizeof(struct stat);
#if !SANITIZER_IOS
unsigned struct_stat64_sz = sizeof(struct stat64); unsigned struct_stat64_sz = sizeof(struct stat64);
#endif // !SANITIZER_IOS
unsigned struct_rusage_sz = sizeof(struct rusage); unsigned struct_rusage_sz = sizeof(struct rusage);
unsigned struct_tm_sz = sizeof(struct tm); unsigned struct_tm_sz = sizeof(struct tm);
unsigned struct_passwd_sz = sizeof(struct passwd); unsigned struct_passwd_sz = sizeof(struct passwd);
...@@ -122,6 +130,7 @@ namespace __sanitizer { ...@@ -122,6 +130,7 @@ namespace __sanitizer {
unsigned struct_sigaction_sz = sizeof(struct sigaction); unsigned struct_sigaction_sz = sizeof(struct sigaction);
unsigned struct_itimerval_sz = sizeof(struct itimerval); unsigned struct_itimerval_sz = sizeof(struct itimerval);
unsigned pthread_t_sz = sizeof(pthread_t); unsigned pthread_t_sz = sizeof(pthread_t);
unsigned pthread_cond_t_sz = sizeof(pthread_cond_t);
unsigned pid_t_sz = sizeof(pid_t); unsigned pid_t_sz = sizeof(pid_t);
unsigned timeval_sz = sizeof(timeval); unsigned timeval_sz = sizeof(timeval);
unsigned uid_t_sz = sizeof(uid_t); unsigned uid_t_sz = sizeof(uid_t);
...@@ -131,6 +140,11 @@ namespace __sanitizer { ...@@ -131,6 +140,11 @@ namespace __sanitizer {
unsigned struct_tms_sz = sizeof(struct tms); unsigned struct_tms_sz = sizeof(struct tms);
unsigned struct_sigevent_sz = sizeof(struct sigevent); unsigned struct_sigevent_sz = sizeof(struct sigevent);
unsigned struct_sched_param_sz = sizeof(struct sched_param); unsigned struct_sched_param_sz = sizeof(struct sched_param);
unsigned struct_statfs_sz = sizeof(struct statfs);
#if SANITIZER_MAC && !SANITIZER_IOS
unsigned struct_statfs64_sz = sizeof(struct statfs64);
#endif // SANITIZER_MAC && !SANITIZER_IOS
#if !SANITIZER_ANDROID #if !SANITIZER_ANDROID
unsigned ucontext_t_sz = sizeof(ucontext_t); unsigned ucontext_t_sz = sizeof(ucontext_t);
...@@ -138,7 +152,6 @@ namespace __sanitizer { ...@@ -138,7 +152,6 @@ namespace __sanitizer {
#if SANITIZER_LINUX #if SANITIZER_LINUX
unsigned struct_rlimit_sz = sizeof(struct rlimit); unsigned struct_rlimit_sz = sizeof(struct rlimit);
unsigned struct_statfs_sz = sizeof(struct statfs);
unsigned struct_epoll_event_sz = sizeof(struct epoll_event); unsigned struct_epoll_event_sz = sizeof(struct epoll_event);
unsigned struct_sysinfo_sz = sizeof(struct sysinfo); unsigned struct_sysinfo_sz = sizeof(struct sysinfo);
unsigned struct_timespec_sz = sizeof(struct timespec); unsigned struct_timespec_sz = sizeof(struct timespec);
...@@ -155,11 +168,11 @@ namespace __sanitizer { ...@@ -155,11 +168,11 @@ namespace __sanitizer {
#if SANITIZER_LINUX && !SANITIZER_ANDROID #if SANITIZER_LINUX && !SANITIZER_ANDROID
unsigned struct_rlimit64_sz = sizeof(struct rlimit64); unsigned struct_rlimit64_sz = sizeof(struct rlimit64);
unsigned struct_statfs64_sz = sizeof(struct statfs64);
unsigned struct_timex_sz = sizeof(struct timex); unsigned struct_timex_sz = sizeof(struct timex);
unsigned struct_msqid_ds_sz = sizeof(struct msqid_ds); unsigned struct_msqid_ds_sz = sizeof(struct msqid_ds);
unsigned struct_shmid_ds_sz = sizeof(struct shmid_ds);
unsigned struct_mq_attr_sz = sizeof(struct mq_attr); unsigned struct_mq_attr_sz = sizeof(struct mq_attr);
unsigned struct_statvfs_sz = sizeof(struct statvfs);
unsigned struct_statvfs64_sz = sizeof(struct statvfs64);
#endif // SANITIZER_LINUX && !SANITIZER_ANDROID #endif // SANITIZER_LINUX && !SANITIZER_ANDROID
uptr sig_ign = (uptr)SIG_IGN; uptr sig_ign = (uptr)SIG_IGN;
...@@ -170,6 +183,16 @@ namespace __sanitizer { ...@@ -170,6 +183,16 @@ namespace __sanitizer {
int e_tabsz = (int)E_TABSZ; int e_tabsz = (int)E_TABSZ;
#endif #endif
#if SANITIZER_LINUX && !SANITIZER_ANDROID
unsigned struct_shminfo_sz = sizeof(struct shminfo);
unsigned struct_shm_info_sz = sizeof(struct shm_info);
int shmctl_ipc_stat = (int)IPC_STAT;
int shmctl_ipc_info = (int)IPC_INFO;
int shmctl_shm_info = (int)SHM_INFO;
int shmctl_shm_stat = (int)SHM_INFO;
#endif
int af_inet = (int)AF_INET; int af_inet = (int)AF_INET;
int af_inet6 = (int)AF_INET6; int af_inet6 = (int)AF_INET6;
...@@ -197,6 +220,9 @@ namespace __sanitizer { ...@@ -197,6 +220,9 @@ namespace __sanitizer {
unsigned struct_user_fpxregs_struct_sz = sizeof(struct user_fpxregs_struct); unsigned struct_user_fpxregs_struct_sz = sizeof(struct user_fpxregs_struct);
#endif #endif
int ptrace_peektext = PTRACE_PEEKTEXT;
int ptrace_peekdata = PTRACE_PEEKDATA;
int ptrace_peekuser = PTRACE_PEEKUSER;
int ptrace_getregs = PTRACE_GETREGS; int ptrace_getregs = PTRACE_GETREGS;
int ptrace_setregs = PTRACE_SETREGS; int ptrace_setregs = PTRACE_SETREGS;
int ptrace_getfpregs = PTRACE_GETFPREGS; int ptrace_getfpregs = PTRACE_GETFPREGS;
...@@ -295,7 +321,7 @@ namespace __sanitizer { ...@@ -295,7 +321,7 @@ namespace __sanitizer {
unsigned struct_unimapinit_sz = sizeof(struct unimapinit); unsigned struct_unimapinit_sz = sizeof(struct unimapinit);
#endif #endif
#if !SANITIZER_ANDROID #if !SANITIZER_ANDROID && !SANITIZER_MAC
unsigned struct_sioc_sg_req_sz = sizeof(struct sioc_sg_req); unsigned struct_sioc_sg_req_sz = sizeof(struct sioc_sg_req);
unsigned struct_sioc_vif_req_sz = sizeof(struct sioc_vif_req); unsigned struct_sioc_vif_req_sz = sizeof(struct sioc_vif_req);
#endif #endif
...@@ -346,7 +372,7 @@ namespace __sanitizer { ...@@ -346,7 +372,7 @@ namespace __sanitizer {
unsigned IOCTL_TIOCSPGRP = TIOCSPGRP; unsigned IOCTL_TIOCSPGRP = TIOCSPGRP;
unsigned IOCTL_TIOCSTI = TIOCSTI; unsigned IOCTL_TIOCSTI = TIOCSTI;
unsigned IOCTL_TIOCSWINSZ = TIOCSWINSZ; unsigned IOCTL_TIOCSWINSZ = TIOCSWINSZ;
#if (SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_MAC #if (SANITIZER_LINUX && !SANITIZER_ANDROID)
unsigned IOCTL_SIOCGETSGCNT = SIOCGETSGCNT; unsigned IOCTL_SIOCGETSGCNT = SIOCGETSGCNT;
unsigned IOCTL_SIOCGETVIFCNT = SIOCGETVIFCNT; unsigned IOCTL_SIOCGETVIFCNT = SIOCGETVIFCNT;
#endif #endif
...@@ -733,24 +759,9 @@ namespace __sanitizer { ...@@ -733,24 +759,9 @@ namespace __sanitizer {
unsigned IOCTL_TIOCSERSETMULTI = TIOCSERSETMULTI; unsigned IOCTL_TIOCSERSETMULTI = TIOCSERSETMULTI;
unsigned IOCTL_TIOCSSERIAL = TIOCSSERIAL; unsigned IOCTL_TIOCSSERIAL = TIOCSSERIAL;
#endif #endif
} // namespace __sanitizer
#define CHECK_TYPE_SIZE(TYPE) \ extern const int errno_EOWNERDEAD = EOWNERDEAD;
COMPILER_CHECK(sizeof(__sanitizer_##TYPE) == sizeof(TYPE)) } // namespace __sanitizer
#define CHECK_SIZE_AND_OFFSET(CLASS, MEMBER) \
COMPILER_CHECK(sizeof(((__sanitizer_##CLASS *) NULL)->MEMBER) == \
sizeof(((CLASS *) NULL)->MEMBER)); \
COMPILER_CHECK(offsetof(__sanitizer_##CLASS, MEMBER) == \
offsetof(CLASS, MEMBER))
// For sigaction, which is a function and struct at the same time,
// and thus requires explicit "struct" in sizeof() expression.
#define CHECK_STRUCT_SIZE_AND_OFFSET(CLASS, MEMBER) \
COMPILER_CHECK(sizeof(((struct __sanitizer_##CLASS *) NULL)->MEMBER) == \
sizeof(((struct CLASS *) NULL)->MEMBER)); \
COMPILER_CHECK(offsetof(struct __sanitizer_##CLASS, MEMBER) == \
offsetof(struct CLASS, MEMBER))
COMPILER_CHECK(sizeof(__sanitizer_pthread_attr_t) >= sizeof(pthread_attr_t)); COMPILER_CHECK(sizeof(__sanitizer_pthread_attr_t) >= sizeof(pthread_attr_t));
...@@ -855,7 +866,6 @@ CHECK_STRUCT_SIZE_AND_OFFSET(sigaction, sa_flags); ...@@ -855,7 +866,6 @@ CHECK_STRUCT_SIZE_AND_OFFSET(sigaction, sa_flags);
CHECK_STRUCT_SIZE_AND_OFFSET(sigaction, sa_restorer); CHECK_STRUCT_SIZE_AND_OFFSET(sigaction, sa_restorer);
#endif #endif
#ifdef SYSCALL_INTERCEPTION
#if SANITIZER_LINUX #if SANITIZER_LINUX
CHECK_TYPE_SIZE(__sysctl_args); CHECK_TYPE_SIZE(__sysctl_args);
CHECK_SIZE_AND_OFFSET(__sysctl_args, name); CHECK_SIZE_AND_OFFSET(__sysctl_args, name);
...@@ -873,7 +883,6 @@ CHECK_TYPE_SIZE(__kernel_off_t); ...@@ -873,7 +883,6 @@ CHECK_TYPE_SIZE(__kernel_off_t);
CHECK_TYPE_SIZE(__kernel_loff_t); CHECK_TYPE_SIZE(__kernel_loff_t);
CHECK_TYPE_SIZE(__kernel_fd_set); CHECK_TYPE_SIZE(__kernel_fd_set);
#endif #endif
#endif
#if !SANITIZER_ANDROID #if !SANITIZER_ANDROID
CHECK_TYPE_SIZE(wordexp_t); CHECK_TYPE_SIZE(wordexp_t);
...@@ -882,4 +891,52 @@ CHECK_SIZE_AND_OFFSET(wordexp_t, we_wordv); ...@@ -882,4 +891,52 @@ CHECK_SIZE_AND_OFFSET(wordexp_t, we_wordv);
CHECK_SIZE_AND_OFFSET(wordexp_t, we_offs); CHECK_SIZE_AND_OFFSET(wordexp_t, we_offs);
#endif #endif
CHECK_TYPE_SIZE(tm);
CHECK_SIZE_AND_OFFSET(tm, tm_sec);
CHECK_SIZE_AND_OFFSET(tm, tm_min);
CHECK_SIZE_AND_OFFSET(tm, tm_hour);
CHECK_SIZE_AND_OFFSET(tm, tm_mday);
CHECK_SIZE_AND_OFFSET(tm, tm_mon);
CHECK_SIZE_AND_OFFSET(tm, tm_year);
CHECK_SIZE_AND_OFFSET(tm, tm_wday);
CHECK_SIZE_AND_OFFSET(tm, tm_yday);
CHECK_SIZE_AND_OFFSET(tm, tm_isdst);
CHECK_SIZE_AND_OFFSET(tm, tm_gmtoff);
CHECK_SIZE_AND_OFFSET(tm, tm_zone);
#if SANITIZER_LINUX
CHECK_TYPE_SIZE(mntent);
CHECK_SIZE_AND_OFFSET(mntent, mnt_fsname);
CHECK_SIZE_AND_OFFSET(mntent, mnt_dir);
CHECK_SIZE_AND_OFFSET(mntent, mnt_type);
CHECK_SIZE_AND_OFFSET(mntent, mnt_opts);
CHECK_SIZE_AND_OFFSET(mntent, mnt_freq);
CHECK_SIZE_AND_OFFSET(mntent, mnt_passno);
#endif
CHECK_TYPE_SIZE(ether_addr);
#if SANITIZER_LINUX && !SANITIZER_ANDROID
CHECK_TYPE_SIZE(ipc_perm);
CHECK_SIZE_AND_OFFSET(ipc_perm, __key);
CHECK_SIZE_AND_OFFSET(ipc_perm, uid);
CHECK_SIZE_AND_OFFSET(ipc_perm, gid);
CHECK_SIZE_AND_OFFSET(ipc_perm, cuid);
CHECK_SIZE_AND_OFFSET(ipc_perm, cgid);
CHECK_SIZE_AND_OFFSET(ipc_perm, mode);
CHECK_SIZE_AND_OFFSET(ipc_perm, __seq);
CHECK_TYPE_SIZE(shmid_ds);
CHECK_SIZE_AND_OFFSET(shmid_ds, shm_perm);
CHECK_SIZE_AND_OFFSET(shmid_ds, shm_segsz);
CHECK_SIZE_AND_OFFSET(shmid_ds, shm_atime);
CHECK_SIZE_AND_OFFSET(shmid_ds, shm_dtime);
CHECK_SIZE_AND_OFFSET(shmid_ds, shm_ctime);
CHECK_SIZE_AND_OFFSET(shmid_ds, shm_cpid);
CHECK_SIZE_AND_OFFSET(shmid_ds, shm_lpid);
CHECK_SIZE_AND_OFFSET(shmid_ds, shm_nattch);
#endif
CHECK_TYPE_SIZE(clock_t);
#endif // SANITIZER_LINUX || SANITIZER_MAC #endif // SANITIZER_LINUX || SANITIZER_MAC
...@@ -13,19 +13,22 @@ ...@@ -13,19 +13,22 @@
#ifndef SANITIZER_PLATFORM_LIMITS_POSIX_H #ifndef SANITIZER_PLATFORM_LIMITS_POSIX_H
#define SANITIZER_PLATFORM_LIMITS_POSIX_H #define SANITIZER_PLATFORM_LIMITS_POSIX_H
#include "sanitizer_internal_defs.h"
#include "sanitizer_platform.h" #include "sanitizer_platform.h"
namespace __sanitizer { namespace __sanitizer {
extern unsigned struct_utsname_sz; extern unsigned struct_utsname_sz;
extern unsigned struct_stat_sz; extern unsigned struct_stat_sz;
#if !SANITIZER_IOS
extern unsigned struct_stat64_sz; extern unsigned struct_stat64_sz;
#endif
extern unsigned struct_rusage_sz; extern unsigned struct_rusage_sz;
extern unsigned struct_tm_sz;
extern unsigned struct_passwd_sz; extern unsigned struct_passwd_sz;
extern unsigned struct_group_sz; extern unsigned struct_group_sz;
extern unsigned siginfo_t_sz; extern unsigned siginfo_t_sz;
extern unsigned struct_itimerval_sz; extern unsigned struct_itimerval_sz;
extern unsigned pthread_t_sz; extern unsigned pthread_t_sz;
extern unsigned pthread_cond_t_sz;
extern unsigned pid_t_sz; extern unsigned pid_t_sz;
extern unsigned timeval_sz; extern unsigned timeval_sz;
extern unsigned uid_t_sz; extern unsigned uid_t_sz;
...@@ -35,30 +38,52 @@ namespace __sanitizer { ...@@ -35,30 +38,52 @@ namespace __sanitizer {
extern unsigned struct_itimerspec_sz; extern unsigned struct_itimerspec_sz;
extern unsigned struct_sigevent_sz; extern unsigned struct_sigevent_sz;
extern unsigned struct_sched_param_sz; extern unsigned struct_sched_param_sz;
extern unsigned struct_statfs_sz;
extern unsigned struct_statfs64_sz;
#if !SANITIZER_ANDROID #if !SANITIZER_ANDROID
extern unsigned ucontext_t_sz; extern unsigned ucontext_t_sz;
#endif // !SANITIZER_ANDROID #endif // !SANITIZER_ANDROID
#if SANITIZER_LINUX #if SANITIZER_LINUX
extern unsigned struct___old_kernel_stat_sz;
extern unsigned struct_kernel_stat_sz; #if defined(__x86_64__)
extern unsigned struct_kernel_stat64_sz; const unsigned struct___old_kernel_stat_sz = 32;
extern unsigned struct_io_event_sz; const unsigned struct_kernel_stat_sz = 144;
extern unsigned struct_iocb_sz; const unsigned struct_kernel_stat64_sz = 0;
#elif defined(__i386__)
const unsigned struct___old_kernel_stat_sz = 32;
const unsigned struct_kernel_stat_sz = 64;
const unsigned struct_kernel_stat64_sz = 96;
#elif defined(__arm__)
const unsigned struct___old_kernel_stat_sz = 32;
const unsigned struct_kernel_stat_sz = 64;
const unsigned struct_kernel_stat64_sz = 104;
#elif defined(__powerpc__) && !defined(__powerpc64__)
const unsigned struct___old_kernel_stat_sz = 32;
const unsigned struct_kernel_stat_sz = 72;
const unsigned struct_kernel_stat64_sz = 104;
#elif defined(__powerpc64__)
const unsigned struct___old_kernel_stat_sz = 0;
const unsigned struct_kernel_stat_sz = 144;
const unsigned struct_kernel_stat64_sz = 104;
#endif
struct __sanitizer_perf_event_attr {
unsigned type;
unsigned size;
// More fields that vary with the kernel version.
};
extern unsigned struct_utimbuf_sz; extern unsigned struct_utimbuf_sz;
extern unsigned struct_new_utsname_sz; extern unsigned struct_new_utsname_sz;
extern unsigned struct_old_utsname_sz; extern unsigned struct_old_utsname_sz;
extern unsigned struct_oldold_utsname_sz; extern unsigned struct_oldold_utsname_sz;
extern unsigned struct_msqid_ds_sz; extern unsigned struct_msqid_ds_sz;
extern unsigned struct_shmid_ds_sz;
extern unsigned struct_mq_attr_sz; extern unsigned struct_mq_attr_sz;
extern unsigned struct_perf_event_attr_sz;
extern unsigned struct_timex_sz; extern unsigned struct_timex_sz;
extern unsigned struct_ustat_sz; extern unsigned struct_ustat_sz;
extern unsigned struct_rlimit_sz; extern unsigned struct_rlimit_sz;
extern unsigned struct_statfs_sz;
extern unsigned struct_epoll_event_sz; extern unsigned struct_epoll_event_sz;
extern unsigned struct_sysinfo_sz; extern unsigned struct_sysinfo_sz;
extern unsigned struct_timespec_sz; extern unsigned struct_timespec_sz;
...@@ -67,6 +92,32 @@ namespace __sanitizer { ...@@ -67,6 +92,32 @@ namespace __sanitizer {
const unsigned old_sigset_t_sz = sizeof(unsigned long); const unsigned old_sigset_t_sz = sizeof(unsigned long);
const unsigned struct_kexec_segment_sz = 4 * sizeof(unsigned long); const unsigned struct_kexec_segment_sz = 4 * sizeof(unsigned long);
struct __sanitizer_iocb {
u64 aio_data;
u32 aio_key_or_aio_reserved1; // Simply crazy.
u32 aio_reserved1_or_aio_key; // Luckily, we don't need these.
u16 aio_lio_opcode;
s16 aio_reqprio;
u32 aio_fildes;
u64 aio_buf;
u64 aio_nbytes;
s64 aio_offset;
u64 aio_reserved2;
u64 aio_reserved3;
};
struct __sanitizer_io_event {
u64 data;
u64 obj;
u64 res;
u64 res2;
};
const unsigned iocb_cmd_pread = 0;
const unsigned iocb_cmd_pwrite = 1;
const unsigned iocb_cmd_preadv = 7;
const unsigned iocb_cmd_pwritev = 8;
struct __sanitizer___sysctl_args { struct __sanitizer___sysctl_args {
int *name; int *name;
int nlen; int nlen;
...@@ -80,8 +131,55 @@ namespace __sanitizer { ...@@ -80,8 +131,55 @@ namespace __sanitizer {
#if SANITIZER_LINUX && !SANITIZER_ANDROID #if SANITIZER_LINUX && !SANITIZER_ANDROID
extern unsigned struct_rlimit64_sz; extern unsigned struct_rlimit64_sz;
extern unsigned struct_statfs64_sz; extern unsigned struct_statvfs_sz;
#endif // SANITIZER_LINUX && !SANITIZER_ANDROID extern unsigned struct_statvfs64_sz;
struct __sanitizer_ipc_perm {
int __key;
int uid;
int gid;
int cuid;
int cgid;
#ifdef __powerpc64__
unsigned mode;
unsigned __seq;
#else
unsigned short mode;
unsigned short __pad1;
unsigned short __seq;
unsigned short __pad2;
#endif
uptr __unused1;
uptr __unused2;
};
struct __sanitizer_shmid_ds {
__sanitizer_ipc_perm shm_perm;
#ifndef __powerpc__
uptr shm_segsz;
#endif
uptr shm_atime;
#ifndef _LP64
uptr __unused1;
#endif
uptr shm_dtime;
#ifndef _LP64
uptr __unused2;
#endif
uptr shm_ctime;
#ifndef _LP64
uptr __unused3;
#endif
#ifdef __powerpc__
uptr shm_segsz;
#endif
int shm_cpid;
int shm_lpid;
uptr shm_nattch;
uptr __unused4;
uptr __unused5;
};
#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
struct __sanitizer_iovec { struct __sanitizer_iovec {
void *iov_base; void *iov_base;
...@@ -94,6 +192,35 @@ namespace __sanitizer { ...@@ -94,6 +192,35 @@ namespace __sanitizer {
typedef unsigned __sanitizer_pthread_key_t; typedef unsigned __sanitizer_pthread_key_t;
#endif #endif
struct __sanitizer_ether_addr {
u8 octet[6];
};
struct __sanitizer_tm {
int tm_sec;
int tm_min;
int tm_hour;
int tm_mday;
int tm_mon;
int tm_year;
int tm_wday;
int tm_yday;
int tm_isdst;
long int tm_gmtoff;
const char *tm_zone;
};
#if SANITIZER_LINUX
struct __sanitizer_mntent {
char *mnt_fsname;
char *mnt_dir;
char *mnt_type;
char *mnt_opts;
int mnt_freq;
int mnt_passno;
};
#endif
#if SANITIZER_ANDROID || SANITIZER_MAC #if SANITIZER_ANDROID || SANITIZER_MAC
struct __sanitizer_msghdr { struct __sanitizer_msghdr {
void *msg_name; void *msg_name;
...@@ -158,6 +285,8 @@ namespace __sanitizer { ...@@ -158,6 +285,8 @@ namespace __sanitizer {
}; };
#endif #endif
typedef long __sanitizer_clock_t;
#if SANITIZER_LINUX #if SANITIZER_LINUX
#if defined(_LP64) || defined(__x86_64__) #if defined(_LP64) || defined(__x86_64__)
typedef unsigned __sanitizer___kernel_uid_t; typedef unsigned __sanitizer___kernel_uid_t;
...@@ -168,8 +297,15 @@ namespace __sanitizer { ...@@ -168,8 +297,15 @@ namespace __sanitizer {
typedef unsigned short __sanitizer___kernel_gid_t; typedef unsigned short __sanitizer___kernel_gid_t;
typedef long __sanitizer___kernel_off_t; typedef long __sanitizer___kernel_off_t;
#endif #endif
#if defined(__powerpc64__)
typedef unsigned int __sanitizer___kernel_old_uid_t;
typedef unsigned int __sanitizer___kernel_old_gid_t;
#else
typedef unsigned short __sanitizer___kernel_old_uid_t; typedef unsigned short __sanitizer___kernel_old_uid_t;
typedef unsigned short __sanitizer___kernel_old_gid_t; typedef unsigned short __sanitizer___kernel_old_gid_t;
#endif
typedef long long __sanitizer___kernel_loff_t; typedef long long __sanitizer___kernel_loff_t;
typedef struct { typedef struct {
unsigned long fds_bits[1024 / (8 * sizeof(long))]; unsigned long fds_bits[1024 / (8 * sizeof(long))];
...@@ -207,6 +343,20 @@ namespace __sanitizer { ...@@ -207,6 +343,20 @@ namespace __sanitizer {
#endif #endif
}; };
struct __sanitizer_kernel_sigset_t {
u8 sig[8];
};
struct __sanitizer_kernel_sigaction_t {
union {
void (*sigaction)(int signo, void *info, void *ctx);
void (*handler)(int signo);
};
unsigned long sa_flags;
void (*sa_restorer)(void);
__sanitizer_kernel_sigset_t sa_mask;
};
extern uptr sig_ign; extern uptr sig_ign;
extern uptr sig_dfl; extern uptr sig_dfl;
extern uptr sa_siginfo; extern uptr sa_siginfo;
...@@ -297,6 +447,9 @@ namespace __sanitizer { ...@@ -297,6 +447,9 @@ namespace __sanitizer {
extern unsigned struct_user_fpregs_struct_sz; extern unsigned struct_user_fpregs_struct_sz;
extern unsigned struct_user_fpxregs_struct_sz; extern unsigned struct_user_fpxregs_struct_sz;
extern int ptrace_peektext;
extern int ptrace_peekdata;
extern int ptrace_peekuser;
extern int ptrace_getregs; extern int ptrace_getregs;
extern int ptrace_setregs; extern int ptrace_setregs;
extern int ptrace_getfpregs; extern int ptrace_getfpregs;
...@@ -309,6 +462,15 @@ namespace __sanitizer { ...@@ -309,6 +462,15 @@ namespace __sanitizer {
extern int ptrace_setregset; extern int ptrace_setregset;
#endif #endif
#if SANITIZER_LINUX && !SANITIZER_ANDROID
extern unsigned struct_shminfo_sz;
extern unsigned struct_shm_info_sz;
extern int shmctl_ipc_stat;
extern int shmctl_ipc_info;
extern int shmctl_shm_info;
extern int shmctl_shm_stat;
#endif
// ioctl arguments // ioctl arguments
struct __sanitizer_ifconf { struct __sanitizer_ifconf {
int ifc_len; int ifc_len;
...@@ -390,7 +552,7 @@ namespace __sanitizer { ...@@ -390,7 +552,7 @@ namespace __sanitizer {
extern unsigned struct_unimapinit_sz; extern unsigned struct_unimapinit_sz;
#endif #endif
#if !SANITIZER_ANDROID #if !SANITIZER_ANDROID && !SANITIZER_MAC
extern unsigned struct_sioc_sg_req_sz; extern unsigned struct_sioc_sg_req_sz;
extern unsigned struct_sioc_vif_req_sz; extern unsigned struct_sioc_vif_req_sz;
#endif #endif
...@@ -445,7 +607,7 @@ namespace __sanitizer { ...@@ -445,7 +607,7 @@ namespace __sanitizer {
extern unsigned IOCTL_TIOCSPGRP; extern unsigned IOCTL_TIOCSPGRP;
extern unsigned IOCTL_TIOCSTI; extern unsigned IOCTL_TIOCSTI;
extern unsigned IOCTL_TIOCSWINSZ; extern unsigned IOCTL_TIOCSWINSZ;
#if (SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_MAC #if (SANITIZER_LINUX && !SANITIZER_ANDROID)
extern unsigned IOCTL_SIOCGETSGCNT; extern unsigned IOCTL_SIOCGETSGCNT;
extern unsigned IOCTL_SIOCGETVIFCNT; extern unsigned IOCTL_SIOCGETVIFCNT;
#endif #endif
...@@ -807,6 +969,25 @@ namespace __sanitizer { ...@@ -807,6 +969,25 @@ namespace __sanitizer {
extern unsigned IOCTL_TIOCSERSETMULTI; extern unsigned IOCTL_TIOCSERSETMULTI;
extern unsigned IOCTL_TIOCSSERIAL; extern unsigned IOCTL_TIOCSSERIAL;
#endif #endif
extern const int errno_EOWNERDEAD;
} // namespace __sanitizer } // namespace __sanitizer
#define CHECK_TYPE_SIZE(TYPE) \
COMPILER_CHECK(sizeof(__sanitizer_##TYPE) == sizeof(TYPE))
#define CHECK_SIZE_AND_OFFSET(CLASS, MEMBER) \
COMPILER_CHECK(sizeof(((__sanitizer_##CLASS *) NULL)->MEMBER) == \
sizeof(((CLASS *) NULL)->MEMBER)); \
COMPILER_CHECK(offsetof(__sanitizer_##CLASS, MEMBER) == \
offsetof(CLASS, MEMBER))
// For sigaction, which is a function and struct at the same time,
// and thus requires explicit "struct" in sizeof() expression.
#define CHECK_STRUCT_SIZE_AND_OFFSET(CLASS, MEMBER) \
COMPILER_CHECK(sizeof(((struct __sanitizer_##CLASS *) NULL)->MEMBER) == \
sizeof(((struct CLASS *) NULL)->MEMBER)); \
COMPILER_CHECK(offsetof(struct __sanitizer_##CLASS, MEMBER) == \
offsetof(struct CLASS, MEMBER))
#endif #endif
...@@ -87,28 +87,6 @@ int internal_isatty(fd_t fd) { ...@@ -87,28 +87,6 @@ int internal_isatty(fd_t fd) {
return isatty(fd); return isatty(fd);
} }
#ifndef SANITIZER_GO
void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp,
uptr stack_top, uptr stack_bottom, bool fast) {
#if !SANITIZER_CAN_FAST_UNWIND
fast = false;
#endif
#if SANITIZER_MAC
// Always unwind fast on Mac.
(void)fast;
#else
if (!fast)
return stack->SlowUnwindStack(pc, max_s);
#endif // SANITIZER_MAC
stack->size = 0;
stack->trace[0] = pc;
if (max_s > 1) {
stack->max_size = max_s;
stack->FastUnwindStack(pc, bp, stack_top, stack_bottom);
}
}
#endif // SANITIZER_GO
} // namespace __sanitizer } // namespace __sanitizer
#endif #endif
...@@ -193,17 +193,22 @@ void SetPrintfAndReportCallback(void (*callback)(const char *)) { ...@@ -193,17 +193,22 @@ void SetPrintfAndReportCallback(void (*callback)(const char *)) {
PrintfAndReportCallback = callback; PrintfAndReportCallback = callback;
} }
#if SANITIZER_SUPPORTS_WEAK_HOOKS
// Can be overriden in frontend. // Can be overriden in frontend.
#if SANITIZER_SUPPORTS_WEAK_HOOKS
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void OnPrint(const char *str) {
(void)str;
}
#elif defined(SANITIZER_GO) && defined(TSAN_EXTERNAL_HOOKS)
void OnPrint(const char *str); void OnPrint(const char *str);
#else
void OnPrint(const char *str) {
(void)str;
}
#endif #endif
static void CallPrintfAndReportCallback(const char *str) { static void CallPrintfAndReportCallback(const char *str) {
#if SANITIZER_SUPPORTS_WEAK_HOOKS OnPrint(str);
if (&OnPrint != NULL)
OnPrint(str);
#endif
if (PrintfAndReportCallback) if (PrintfAndReportCallback)
PrintfAndReportCallback(str); PrintfAndReportCallback(str);
} }
...@@ -287,4 +292,13 @@ int internal_snprintf(char *buffer, uptr length, const char *format, ...) { ...@@ -287,4 +292,13 @@ int internal_snprintf(char *buffer, uptr length, const char *format, ...) {
return needed_length; return needed_length;
} }
void InternalScopedString::append(const char *format, ...) {
CHECK_LT(length_, size());
va_list args;
va_start(args, format);
VSNPrintf(data() + length_, size() - length_, format, args);
va_end(args);
length_ += internal_strlen(data() + length_);
}
} // namespace __sanitizer } // namespace __sanitizer
...@@ -24,13 +24,15 @@ namespace __sanitizer { ...@@ -24,13 +24,15 @@ namespace __sanitizer {
template<typename Node> class QuarantineCache; template<typename Node> class QuarantineCache;
struct QuarantineBatch { struct QuarantineBatch {
static const uptr kSize = 1024; static const uptr kSize = 1021;
QuarantineBatch *next; QuarantineBatch *next;
uptr size; uptr size;
uptr count; uptr count;
void *batch[kSize]; void *batch[kSize];
}; };
COMPILER_CHECK(sizeof(QuarantineBatch) <= (1 << 13)); // 8Kb.
// The callback interface is: // The callback interface is:
// void Callback::Recycle(Node *ptr); // void Callback::Recycle(Node *ptr);
// void *cb.Allocate(uptr size); // void *cb.Allocate(uptr size);
...@@ -121,8 +123,10 @@ class QuarantineCache { ...@@ -121,8 +123,10 @@ class QuarantineCache {
} }
void Enqueue(Callback cb, void *ptr, uptr size) { void Enqueue(Callback cb, void *ptr, uptr size) {
if (list_.empty() || list_.back()->count == QuarantineBatch::kSize) if (list_.empty() || list_.back()->count == QuarantineBatch::kSize) {
AllocBatch(cb); AllocBatch(cb);
size += sizeof(QuarantineBatch); // Count the batch in Quarantine size.
}
QuarantineBatch *b = list_.back(); QuarantineBatch *b = list_.back();
b->batch[b->count++] = ptr; b->batch[b->count++] = ptr;
b->size += size; b->size += size;
...@@ -145,9 +149,7 @@ class QuarantineCache { ...@@ -145,9 +149,7 @@ class QuarantineCache {
return 0; return 0;
QuarantineBatch *b = list_.front(); QuarantineBatch *b = list_.front();
list_.pop_front(); list_.pop_front();
// FIXME: should probably add SizeSub method? SizeSub(b->size);
// See https://code.google.com/p/thread-sanitizer/issues/detail?id=20
SizeAdd(0 - b->size);
return b; return b;
} }
...@@ -158,6 +160,9 @@ class QuarantineCache { ...@@ -158,6 +160,9 @@ class QuarantineCache {
void SizeAdd(uptr add) { void SizeAdd(uptr add) {
atomic_store(&size_, Size() + add, memory_order_relaxed); atomic_store(&size_, Size() + add, memory_order_relaxed);
} }
void SizeSub(uptr sub) {
atomic_store(&size_, Size() - sub, memory_order_relaxed);
}
NOINLINE QuarantineBatch* AllocBatch(Callback cb) { NOINLINE QuarantineBatch* AllocBatch(Callback cb) {
QuarantineBatch *b = (QuarantineBatch *)cb.Allocate(sizeof(*b)); QuarantineBatch *b = (QuarantineBatch *)cb.Allocate(sizeof(*b));
......
...@@ -21,58 +21,55 @@ static const uptr kStackTraceMax = 256; ...@@ -21,58 +21,55 @@ static const uptr kStackTraceMax = 256;
defined(__powerpc__) || defined(__powerpc64__) || \ defined(__powerpc__) || defined(__powerpc64__) || \
defined(__sparc__) || \ defined(__sparc__) || \
defined(__mips__)) defined(__mips__))
#define SANITIZER_CAN_FAST_UNWIND 0 # define SANITIZER_CAN_FAST_UNWIND 0
#elif SANITIZER_WINDOWS
# define SANITIZER_CAN_FAST_UNWIND 0
#else #else
#define SANITIZER_CAN_FAST_UNWIND 1 # define SANITIZER_CAN_FAST_UNWIND 1
#endif #endif
struct StackTrace { struct StackTrace {
typedef bool (*SymbolizeCallback)(const void *pc, char *out_buffer, typedef bool (*SymbolizeCallback)(const void *pc, char *out_buffer,
int out_size); int out_size);
uptr top_frame_bp;
uptr size; uptr size;
uptr max_size;
uptr trace[kStackTraceMax]; uptr trace[kStackTraceMax];
// Prints a symbolized stacktrace, followed by an empty line.
static void PrintStack(const uptr *addr, uptr size, static void PrintStack(const uptr *addr, uptr size,
bool symbolize, const char *strip_file_prefix, SymbolizeCallback symbolize_callback = 0);
SymbolizeCallback symbolize_callback);
void CopyTo(uptr *dst, uptr dst_size) {
for (uptr i = 0; i < size && i < dst_size; i++)
dst[i] = trace[i];
for (uptr i = size; i < dst_size; i++)
dst[i] = 0;
}
void CopyFrom(uptr *src, uptr src_size) { void CopyFrom(const uptr *src, uptr src_size) {
top_frame_bp = 0;
size = src_size; size = src_size;
if (size > kStackTraceMax) size = kStackTraceMax; if (size > kStackTraceMax) size = kStackTraceMax;
for (uptr i = 0; i < size; i++) { for (uptr i = 0; i < size; i++)
trace[i] = src[i]; trace[i] = src[i];
}
} }
void FastUnwindStack(uptr pc, uptr bp, uptr stack_top, uptr stack_bottom); static bool WillUseFastUnwind(bool request_fast_unwind) {
void SlowUnwindStack(uptr pc, uptr max_depth); // Check if fast unwind is available. Fast unwind is the only option on Mac.
if (!SANITIZER_CAN_FAST_UNWIND)
return false;
else if (SANITIZER_MAC)
return true;
return request_fast_unwind;
}
void PopStackFrames(uptr count); void Unwind(uptr max_depth, uptr pc, uptr bp, uptr stack_top,
uptr stack_bottom, bool request_fast_unwind);
static uptr GetCurrentPc(); static uptr GetCurrentPc();
static uptr GetPreviousInstructionPc(uptr pc); static uptr GetPreviousInstructionPc(uptr pc);
SANITIZER_INTERFACE_ATTRIBUTE private:
static uptr CompressStack(StackTrace *stack, void FastUnwindStack(uptr pc, uptr bp, uptr stack_top, uptr stack_bottom,
u32 *compressed, uptr size); uptr max_depth);
SANITIZER_INTERFACE_ATTRIBUTE void SlowUnwindStack(uptr pc, uptr max_depth);
static void UncompressStack(StackTrace *stack, void PopStackFrames(uptr count);
u32 *compressed, uptr size); uptr LocatePcInTrace(uptr pc);
}; };
const char *StripPathPrefix(const char *filepath,
const char *strip_file_prefix);
void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp,
uptr stack_top, uptr stack_bottom, bool fast);
} // namespace __sanitizer } // namespace __sanitizer
// Use this macro if you want to print stack trace with the caller // Use this macro if you want to print stack trace with the caller
......
//===-- sanitizer_stacktrace_libcdep.cc -----------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is shared between AddressSanitizer and ThreadSanitizer
// run-time libraries.
//===----------------------------------------------------------------------===//
#include "sanitizer_stacktrace.h"
namespace __sanitizer {
void StackTrace::Unwind(uptr max_depth, uptr pc, uptr bp, uptr stack_top,
uptr stack_bottom, bool request_fast_unwind) {
if (!WillUseFastUnwind(request_fast_unwind))
SlowUnwindStack(pc, max_depth);
else
FastUnwindStack(pc, bp, stack_top, stack_bottom, max_depth);
top_frame_bp = size ? bp : 0;
}
} // namespace __sanitizer
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
namespace __sanitizer { namespace __sanitizer {
static const char *const kTypeStrings[SuppressionTypeCount] = { static const char *const kTypeStrings[SuppressionTypeCount] = {
"none", "race", "mutex", "thread", "signal", "leak" "none", "race", "mutex", "thread", "signal", "leak", "called_from_lib"
}; };
bool TemplateMatch(char *templ, const char *str) { bool TemplateMatch(char *templ, const char *str) {
...@@ -127,10 +127,15 @@ void SuppressionContext::Parse(const char *str) { ...@@ -127,10 +127,15 @@ void SuppressionContext::Parse(const char *str) {
} }
} }
uptr SuppressionContext::SuppressionCount() { uptr SuppressionContext::SuppressionCount() const {
return suppressions_.size(); return suppressions_.size();
} }
const Suppression *SuppressionContext::SuppressionAt(uptr i) const {
CHECK_LT(i, suppressions_.size());
return &suppressions_[i];
}
void SuppressionContext::GetMatched( void SuppressionContext::GetMatched(
InternalMmapVector<Suppression *> *matched) { InternalMmapVector<Suppression *> *matched) {
for (uptr i = 0; i < suppressions_.size(); i++) for (uptr i = 0; i < suppressions_.size(); i++)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment