Commit c5be964a by Kostya Serebryany Committed by Kostya Serebryany

libsanitizer merge from upstream r221802

From-SVN: r217518
parent 47bf94b7
2014-11-13 Kostya Serebryany <kcc@google.com>
* All source files: Merge from upstream r221802.
* sanitizer_common/sanitizer_symbolizer_libbacktrace.cc
(LibbacktraceSymbolizer::SymbolizeData): Replace 'address'
with 'start' to follow the new interface.
* asan/Makefile.am (AM_CXXFLAGS): Added -std=c++11.
* interception/Makefile.am (AM_CXXFLAGS): Added -std=c++11.
* libbacktrace/Makefile.am (AM_CXXFLAGS): Added -std=c++11.
* lsan/Makefile.am (AM_CXXFLAGS): Added -std=c++11.
* sanitizer_common/Makefile.am (sanitizer_common_files): Added new
files.
(AM_CXXFLAGS): Added -std=c++11.
* tsan/Makefile.am (AM_CXXFLAGS): Added -std=c++11.
* ubsan/Makefile.am (AM_CXXFLAGS): Added -std=c++11.
* asan/Makefile.in: Regenerate.
* interception/Makefile.in: Regenerate.
* libbacktrace/Makefile.in: Regenerate.
* lsan/Makefile.in: Regenerate.
* sanitizer_common/Makefile.in: Regenerate.
* tsan/Makefile.in: Regenerate.
* ubsan/Makefile.in: Regenerate.
2014-11-11 Francois-Xavier Coudert <fxcoudert@gcc.gnu.org> 2014-11-11 Francois-Xavier Coudert <fxcoudert@gcc.gnu.org>
PR target/63610 PR target/63610
......
218156 221802
The first line of this file holds the svn revision number of the The first line of this file holds the svn revision number of the
last merge done from the master library sources. last merge done from the master library sources.
...@@ -9,6 +9,7 @@ DEFS += -DMAC_INTERPOSE_FUNCTIONS -DMISSING_BLOCKS_SUPPORT ...@@ -9,6 +9,7 @@ DEFS += -DMAC_INTERPOSE_FUNCTIONS -DMISSING_BLOCKS_SUPPORT
endif endif
AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic -Wno-long-long -fPIC -fno-builtin -fno-exceptions -fno-rtti -fomit-frame-pointer -funwind-tables -fvisibility=hidden -Wno-variadic-macros -fno-ipa-icf AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic -Wno-long-long -fPIC -fno-builtin -fno-exceptions -fno-rtti -fomit-frame-pointer -funwind-tables -fvisibility=hidden -Wno-variadic-macros -fno-ipa-icf
AM_CXXFLAGS += $(LIBSTDCXX_RAW_CXX_CXXFLAGS) AM_CXXFLAGS += $(LIBSTDCXX_RAW_CXX_CXXFLAGS)
AM_CXXFLAGS += -std=c++11
ACLOCAL_AMFLAGS = -I $(top_srcdir) -I $(top_srcdir)/config ACLOCAL_AMFLAGS = -I $(top_srcdir) -I $(top_srcdir)/config
toolexeclib_LTLIBRARIES = libasan.la toolexeclib_LTLIBRARIES = libasan.la
......
...@@ -270,7 +270,7 @@ AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic \ ...@@ -270,7 +270,7 @@ AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic \
-Wno-long-long -fPIC -fno-builtin -fno-exceptions -fno-rtti \ -Wno-long-long -fPIC -fno-builtin -fno-exceptions -fno-rtti \
-fomit-frame-pointer -funwind-tables -fvisibility=hidden \ -fomit-frame-pointer -funwind-tables -fvisibility=hidden \
-Wno-variadic-macros -fno-ipa-icf \ -Wno-variadic-macros -fno-ipa-icf \
$(LIBSTDCXX_RAW_CXX_CXXFLAGS) $(LIBSTDCXX_RAW_CXX_CXXFLAGS) -std=c++11
ACLOCAL_AMFLAGS = -I $(top_srcdir) -I $(top_srcdir)/config ACLOCAL_AMFLAGS = -I $(top_srcdir) -I $(top_srcdir)/config
toolexeclib_LTLIBRARIES = libasan.la toolexeclib_LTLIBRARIES = libasan.la
nodist_toolexeclib_HEADERS = libasan_preinit.o nodist_toolexeclib_HEADERS = libasan_preinit.o
......
...@@ -43,8 +43,8 @@ class AsanChunkView { ...@@ -43,8 +43,8 @@ class AsanChunkView {
uptr AllocTid(); uptr AllocTid();
uptr FreeTid(); uptr FreeTid();
bool Eq(const AsanChunkView &c) const { return chunk_ == c.chunk_; } bool Eq(const AsanChunkView &c) const { return chunk_ == c.chunk_; }
void GetAllocStack(StackTrace *stack); StackTrace GetAllocStack();
void GetFreeStack(StackTrace *stack); StackTrace GetFreeStack();
bool AddrIsInside(uptr addr, uptr access_size, sptr *offset) { bool AddrIsInside(uptr addr, uptr access_size, sptr *offset) {
if (addr >= Beg() && (addr + access_size) <= End()) { if (addr >= Beg() && (addr + access_size) <= End()) {
*offset = addr - Beg(); *offset = addr - Beg();
...@@ -137,20 +137,20 @@ struct AsanThreadLocalMallocStorage { ...@@ -137,20 +137,20 @@ struct AsanThreadLocalMallocStorage {
AsanThreadLocalMallocStorage() {} AsanThreadLocalMallocStorage() {}
}; };
void *asan_memalign(uptr alignment, uptr size, StackTrace *stack, void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
AllocType alloc_type); AllocType alloc_type);
void asan_free(void *ptr, StackTrace *stack, AllocType alloc_type); void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type);
void asan_sized_free(void *ptr, uptr size, StackTrace *stack, void asan_sized_free(void *ptr, uptr size, BufferedStackTrace *stack,
AllocType alloc_type); AllocType alloc_type);
void *asan_malloc(uptr size, StackTrace *stack); void *asan_malloc(uptr size, BufferedStackTrace *stack);
void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack); void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack);
void *asan_realloc(void *p, uptr size, StackTrace *stack); void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack);
void *asan_valloc(uptr size, StackTrace *stack); void *asan_valloc(uptr size, BufferedStackTrace *stack);
void *asan_pvalloc(uptr size, StackTrace *stack); void *asan_pvalloc(uptr size, BufferedStackTrace *stack);
int asan_posix_memalign(void **memptr, uptr alignment, uptr size, int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
StackTrace *stack); BufferedStackTrace *stack);
uptr asan_malloc_usable_size(void *ptr, uptr pc, uptr bp); uptr asan_malloc_usable_size(void *ptr, uptr pc, uptr bp);
uptr asan_mz_size(const void *ptr); uptr asan_mz_size(const void *ptr);
......
...@@ -180,20 +180,19 @@ uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); } ...@@ -180,20 +180,19 @@ uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); }
uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; } uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; }
uptr AsanChunkView::FreeTid() { return chunk_->free_tid; } uptr AsanChunkView::FreeTid() { return chunk_->free_tid; }
static void GetStackTraceFromId(u32 id, StackTrace *stack) { static StackTrace GetStackTraceFromId(u32 id) {
CHECK(id); CHECK(id);
uptr size = 0; StackTrace res = StackDepotGet(id);
const uptr *trace = StackDepotGet(id, &size); CHECK(res.trace);
CHECK(trace); return res;
stack->CopyFrom(trace, size);
} }
void AsanChunkView::GetAllocStack(StackTrace *stack) { StackTrace AsanChunkView::GetAllocStack() {
GetStackTraceFromId(chunk_->alloc_context_id, stack); return GetStackTraceFromId(chunk_->alloc_context_id);
} }
void AsanChunkView::GetFreeStack(StackTrace *stack) { StackTrace AsanChunkView::GetFreeStack() {
GetStackTraceFromId(chunk_->free_context_id, stack); return GetStackTraceFromId(chunk_->free_context_id);
} }
struct QuarantineCallback; struct QuarantineCallback;
...@@ -261,7 +260,7 @@ void ReInitializeAllocator() { ...@@ -261,7 +260,7 @@ void ReInitializeAllocator() {
quarantine.Init((uptr)flags()->quarantine_size, kMaxThreadLocalQuarantine); quarantine.Init((uptr)flags()->quarantine_size, kMaxThreadLocalQuarantine);
} }
static void *Allocate(uptr size, uptr alignment, StackTrace *stack, static void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack,
AllocType alloc_type, bool can_fill) { AllocType alloc_type, bool can_fill) {
if (UNLIKELY(!asan_inited)) if (UNLIKELY(!asan_inited))
AsanInitFromRtl(); AsanInitFromRtl();
...@@ -353,7 +352,7 @@ static void *Allocate(uptr size, uptr alignment, StackTrace *stack, ...@@ -353,7 +352,7 @@ static void *Allocate(uptr size, uptr alignment, StackTrace *stack,
meta[1] = chunk_beg; meta[1] = chunk_beg;
} }
m->alloc_context_id = StackDepotPut(stack->trace, stack->size); m->alloc_context_id = StackDepotPut(*stack);
uptr size_rounded_down_to_granularity = RoundDownTo(size, SHADOW_GRANULARITY); uptr size_rounded_down_to_granularity = RoundDownTo(size, SHADOW_GRANULARITY);
// Unpoison the bulk of the memory region. // Unpoison the bulk of the memory region.
...@@ -389,15 +388,16 @@ static void *Allocate(uptr size, uptr alignment, StackTrace *stack, ...@@ -389,15 +388,16 @@ static void *Allocate(uptr size, uptr alignment, StackTrace *stack,
return res; return res;
} }
static void ReportInvalidFree(void *ptr, u8 chunk_state, StackTrace *stack) { static void ReportInvalidFree(void *ptr, u8 chunk_state,
BufferedStackTrace *stack) {
if (chunk_state == CHUNK_QUARANTINE) if (chunk_state == CHUNK_QUARANTINE)
ReportDoubleFree((uptr)ptr, stack); ReportDoubleFree((uptr)ptr, stack);
else else
ReportFreeNotMalloced((uptr)ptr, stack); ReportFreeNotMalloced((uptr)ptr, stack);
} }
static void AtomicallySetQuarantineFlag(AsanChunk *m, static void AtomicallySetQuarantineFlag(AsanChunk *m, void *ptr,
void *ptr, StackTrace *stack) { BufferedStackTrace *stack) {
u8 old_chunk_state = CHUNK_ALLOCATED; u8 old_chunk_state = CHUNK_ALLOCATED;
// Flip the chunk_state atomically to avoid race on double-free. // Flip the chunk_state atomically to avoid race on double-free.
if (!atomic_compare_exchange_strong((atomic_uint8_t*)m, &old_chunk_state, if (!atomic_compare_exchange_strong((atomic_uint8_t*)m, &old_chunk_state,
...@@ -408,8 +408,8 @@ static void AtomicallySetQuarantineFlag(AsanChunk *m, ...@@ -408,8 +408,8 @@ static void AtomicallySetQuarantineFlag(AsanChunk *m,
// Expects the chunk to already be marked as quarantined by using // Expects the chunk to already be marked as quarantined by using
// AtomicallySetQuarantineFlag. // AtomicallySetQuarantineFlag.
static void QuarantineChunk(AsanChunk *m, void *ptr, static void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack,
StackTrace *stack, AllocType alloc_type) { AllocType alloc_type) {
CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE); CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
if (m->alloc_type != alloc_type && flags()->alloc_dealloc_mismatch) if (m->alloc_type != alloc_type && flags()->alloc_dealloc_mismatch)
...@@ -421,7 +421,7 @@ static void QuarantineChunk(AsanChunk *m, void *ptr, ...@@ -421,7 +421,7 @@ static void QuarantineChunk(AsanChunk *m, void *ptr,
CHECK_EQ(m->free_tid, kInvalidTid); CHECK_EQ(m->free_tid, kInvalidTid);
AsanThread *t = GetCurrentThread(); AsanThread *t = GetCurrentThread();
m->free_tid = t ? t->tid() : 0; m->free_tid = t ? t->tid() : 0;
m->free_context_id = StackDepotPut(stack->trace, stack->size); m->free_context_id = StackDepotPut(*stack);
// Poison the region. // Poison the region.
PoisonShadow(m->Beg(), PoisonShadow(m->Beg(),
RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY), RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
...@@ -445,7 +445,7 @@ static void QuarantineChunk(AsanChunk *m, void *ptr, ...@@ -445,7 +445,7 @@ static void QuarantineChunk(AsanChunk *m, void *ptr,
} }
} }
static void Deallocate(void *ptr, uptr delete_size, StackTrace *stack, static void Deallocate(void *ptr, uptr delete_size, BufferedStackTrace *stack,
AllocType alloc_type) { AllocType alloc_type) {
uptr p = reinterpret_cast<uptr>(ptr); uptr p = reinterpret_cast<uptr>(ptr);
if (p == 0) return; if (p == 0) return;
...@@ -462,7 +462,8 @@ static void Deallocate(void *ptr, uptr delete_size, StackTrace *stack, ...@@ -462,7 +462,8 @@ static void Deallocate(void *ptr, uptr delete_size, StackTrace *stack,
QuarantineChunk(m, ptr, stack, alloc_type); QuarantineChunk(m, ptr, stack, alloc_type);
} }
static void *Reallocate(void *old_ptr, uptr new_size, StackTrace *stack) { static void *Reallocate(void *old_ptr, uptr new_size,
BufferedStackTrace *stack) {
CHECK(old_ptr && new_size); CHECK(old_ptr && new_size);
uptr p = reinterpret_cast<uptr>(old_ptr); uptr p = reinterpret_cast<uptr>(old_ptr);
uptr chunk_beg = p - kChunkHeaderSize; uptr chunk_beg = p - kChunkHeaderSize;
...@@ -575,25 +576,25 @@ void PrintInternalAllocatorStats() { ...@@ -575,25 +576,25 @@ void PrintInternalAllocatorStats() {
allocator.PrintStats(); allocator.PrintStats();
} }
void *asan_memalign(uptr alignment, uptr size, StackTrace *stack, void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
AllocType alloc_type) { AllocType alloc_type) {
return Allocate(size, alignment, stack, alloc_type, true); return Allocate(size, alignment, stack, alloc_type, true);
} }
void asan_free(void *ptr, StackTrace *stack, AllocType alloc_type) { void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) {
Deallocate(ptr, 0, stack, alloc_type); Deallocate(ptr, 0, stack, alloc_type);
} }
void asan_sized_free(void *ptr, uptr size, StackTrace *stack, void asan_sized_free(void *ptr, uptr size, BufferedStackTrace *stack,
AllocType alloc_type) { AllocType alloc_type) {
Deallocate(ptr, size, stack, alloc_type); Deallocate(ptr, size, stack, alloc_type);
} }
void *asan_malloc(uptr size, StackTrace *stack) { void *asan_malloc(uptr size, BufferedStackTrace *stack) {
return Allocate(size, 8, stack, FROM_MALLOC, true); return Allocate(size, 8, stack, FROM_MALLOC, true);
} }
void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack) { void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
if (CallocShouldReturnNullDueToOverflow(size, nmemb)) if (CallocShouldReturnNullDueToOverflow(size, nmemb))
return AllocatorReturnNull(); return AllocatorReturnNull();
void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false); void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
...@@ -604,7 +605,7 @@ void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack) { ...@@ -604,7 +605,7 @@ void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
return ptr; return ptr;
} }
void *asan_realloc(void *p, uptr size, StackTrace *stack) { void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) {
if (p == 0) if (p == 0)
return Allocate(size, 8, stack, FROM_MALLOC, true); return Allocate(size, 8, stack, FROM_MALLOC, true);
if (size == 0) { if (size == 0) {
...@@ -614,11 +615,11 @@ void *asan_realloc(void *p, uptr size, StackTrace *stack) { ...@@ -614,11 +615,11 @@ void *asan_realloc(void *p, uptr size, StackTrace *stack) {
return Reallocate(p, size, stack); return Reallocate(p, size, stack);
} }
void *asan_valloc(uptr size, StackTrace *stack) { void *asan_valloc(uptr size, BufferedStackTrace *stack) {
return Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true); return Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true);
} }
void *asan_pvalloc(uptr size, StackTrace *stack) { void *asan_pvalloc(uptr size, BufferedStackTrace *stack) {
uptr PageSize = GetPageSizeCached(); uptr PageSize = GetPageSizeCached();
size = RoundUpTo(size, PageSize); size = RoundUpTo(size, PageSize);
if (size == 0) { if (size == 0) {
...@@ -629,7 +630,7 @@ void *asan_pvalloc(uptr size, StackTrace *stack) { ...@@ -629,7 +630,7 @@ void *asan_pvalloc(uptr size, StackTrace *stack) {
} }
int asan_posix_memalign(void **memptr, uptr alignment, uptr size, int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
StackTrace *stack) { BufferedStackTrace *stack) {
void *ptr = Allocate(size, alignment, stack, FROM_MALLOC, true); void *ptr = Allocate(size, alignment, stack, FROM_MALLOC, true);
CHECK(IsAligned((uptr)ptr, alignment)); CHECK(IsAligned((uptr)ptr, alignment));
*memptr = ptr; *memptr = ptr;
......
...@@ -15,31 +15,88 @@ ...@@ -15,31 +15,88 @@
#include "asan_flags.h" #include "asan_flags.h"
#include "asan_internal.h" #include "asan_internal.h"
#include "asan_mapping.h" #include "asan_mapping.h"
#include "asan_report.h"
#include "asan_thread.h" #include "asan_thread.h"
namespace __asan { namespace __asan {
void GetInfoForStackVar(uptr addr, AddressDescription *descr, AsanThread *t) {
descr->name[0] = 0;
descr->region_address = 0;
descr->region_size = 0;
descr->region_kind = "stack";
AsanThread::StackFrameAccess access;
if (!t->GetStackFrameAccessByAddr(addr, &access))
return;
InternalMmapVector<StackVarDescr> vars(16);
if (!ParseFrameDescription(access.frame_descr, &vars)) {
return;
}
for (uptr i = 0; i < vars.size(); i++) {
if (access.offset <= vars[i].beg + vars[i].size) {
internal_strncat(descr->name, vars[i].name_pos,
Min(descr->name_size, vars[i].name_len));
descr->region_address = addr - (access.offset - vars[i].beg);
descr->region_size = vars[i].size;
return;
}
}
}
void GetInfoForHeapAddress(uptr addr, AddressDescription *descr) {
AsanChunkView chunk = FindHeapChunkByAddress(addr);
descr->name[0] = 0;
descr->region_address = 0;
descr->region_size = 0;
if (!chunk.IsValid()) {
descr->region_kind = "heap-invalid";
return;
}
descr->region_address = chunk.Beg();
descr->region_size = chunk.UsedSize();
descr->region_kind = "heap";
}
void AsanLocateAddress(uptr addr, AddressDescription *descr) {
if (DescribeAddressIfShadow(addr, descr, /* print */ false)) {
return;
}
if (GetInfoForAddressIfGlobal(addr, descr)) {
return;
}
asanThreadRegistry().Lock();
AsanThread *thread = FindThreadByStackAddress(addr);
asanThreadRegistry().Unlock();
if (thread) {
GetInfoForStackVar(addr, descr, thread);
return;
}
GetInfoForHeapAddress(addr, descr);
}
uptr AsanGetStack(uptr addr, uptr *trace, uptr size, u32 *thread_id, uptr AsanGetStack(uptr addr, uptr *trace, uptr size, u32 *thread_id,
bool alloc_stack) { bool alloc_stack) {
AsanChunkView chunk = FindHeapChunkByAddress(addr); AsanChunkView chunk = FindHeapChunkByAddress(addr);
if (!chunk.IsValid()) return 0; if (!chunk.IsValid()) return 0;
StackTrace stack; StackTrace stack(nullptr, 0);
if (alloc_stack) { if (alloc_stack) {
if (chunk.AllocTid() == kInvalidTid) return 0; if (chunk.AllocTid() == kInvalidTid) return 0;
chunk.GetAllocStack(&stack); stack = chunk.GetAllocStack();
if (thread_id) *thread_id = chunk.AllocTid(); if (thread_id) *thread_id = chunk.AllocTid();
} else { } else {
if (chunk.FreeTid() == kInvalidTid) return 0; if (chunk.FreeTid() == kInvalidTid) return 0;
chunk.GetFreeStack(&stack); stack = chunk.GetFreeStack();
if (thread_id) *thread_id = chunk.FreeTid(); if (thread_id) *thread_id = chunk.FreeTid();
} }
if (trace && size) { if (trace && size) {
if (size > kStackTraceMax) size = Min(size, Min(stack.size, kStackTraceMax));
size = kStackTraceMax;
if (size > stack.size)
size = stack.size;
for (uptr i = 0; i < size; i++) for (uptr i = 0; i < size; i++)
trace[i] = StackTrace::GetPreviousInstructionPc(stack.trace[i]); trace[i] = StackTrace::GetPreviousInstructionPc(stack.trace[i]);
...@@ -54,6 +111,16 @@ uptr AsanGetStack(uptr addr, uptr *trace, uptr size, u32 *thread_id, ...@@ -54,6 +111,16 @@ uptr AsanGetStack(uptr addr, uptr *trace, uptr size, u32 *thread_id,
using namespace __asan; using namespace __asan;
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
const char *__asan_locate_address(uptr addr, char *name, uptr name_size,
uptr *region_address, uptr *region_size) {
AddressDescription descr = { name, name_size, 0, 0, 0 };
AsanLocateAddress(addr, &descr);
if (region_address) *region_address = descr.region_address;
if (region_size) *region_size = descr.region_size;
return descr.region_kind;
}
SANITIZER_INTERFACE_ATTRIBUTE
uptr __asan_get_alloc_stack(uptr addr, uptr *trace, uptr size, u32 *thread_id) { uptr __asan_get_alloc_stack(uptr addr, uptr *trace, uptr size, u32 *thread_id) {
return AsanGetStack(addr, trace, size, thread_id, /* alloc_stack */ true); return AsanGetStack(addr, trace, size, thread_id, /* alloc_stack */ true);
} }
......
...@@ -63,6 +63,7 @@ struct Flags { ...@@ -63,6 +63,7 @@ struct Flags {
int detect_invalid_pointer_pairs; int detect_invalid_pointer_pairs;
bool detect_container_overflow; bool detect_container_overflow;
int detect_odr_violation; int detect_odr_violation;
bool dump_instruction_bytes;
}; };
extern Flags asan_flags_dont_use_directly; extern Flags asan_flags_dont_use_directly;
......
...@@ -69,6 +69,14 @@ ALWAYS_INLINE void PoisonRedZones(const Global &g) { ...@@ -69,6 +69,14 @@ ALWAYS_INLINE void PoisonRedZones(const Global &g) {
} }
} }
const uptr kMinimalDistanceFromAnotherGlobal = 64;
bool IsAddressNearGlobal(uptr addr, const __asan_global &g) {
if (addr <= g.beg - kMinimalDistanceFromAnotherGlobal) return false;
if (addr >= g.beg + g.size_with_redzone) return false;
return true;
}
static void ReportGlobal(const Global &g, const char *prefix) { static void ReportGlobal(const Global &g, const char *prefix) {
Report("%s Global[%p]: beg=%p size=%zu/%zu name=%s module=%s dyn_init=%zu\n", Report("%s Global[%p]: beg=%p size=%zu/%zu name=%s module=%s dyn_init=%zu\n",
prefix, &g, (void *)g.beg, g.size, g.size_with_redzone, g.name, prefix, &g, (void *)g.beg, g.size, g.size_with_redzone, g.name,
...@@ -80,19 +88,45 @@ static void ReportGlobal(const Global &g, const char *prefix) { ...@@ -80,19 +88,45 @@ static void ReportGlobal(const Global &g, const char *prefix) {
} }
} }
bool DescribeAddressIfGlobal(uptr addr, uptr size) { static bool DescribeOrGetInfoIfGlobal(uptr addr, uptr size, bool print,
Global *output_global) {
if (!flags()->report_globals) return false; if (!flags()->report_globals) return false;
BlockingMutexLock lock(&mu_for_globals); BlockingMutexLock lock(&mu_for_globals);
bool res = false; bool res = false;
for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) { for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
const Global &g = *l->g; const Global &g = *l->g;
if (flags()->report_globals >= 2) if (print) {
ReportGlobal(g, "Search"); if (flags()->report_globals >= 2)
res |= DescribeAddressRelativeToGlobal(addr, size, g); ReportGlobal(g, "Search");
res |= DescribeAddressRelativeToGlobal(addr, size, g);
} else {
if (IsAddressNearGlobal(addr, g)) {
CHECK(output_global);
*output_global = g;
return true;
}
}
} }
return res; return res;
} }
bool DescribeAddressIfGlobal(uptr addr, uptr size) {
return DescribeOrGetInfoIfGlobal(addr, size, /* print */ true,
/* output_global */ nullptr);
}
bool GetInfoForAddressIfGlobal(uptr addr, AddressDescription *descr) {
Global g = {};
if (DescribeOrGetInfoIfGlobal(addr, /* size */ 1, /* print */ false, &g)) {
internal_strncpy(descr->name, g.name, descr->name_size);
descr->region_address = g.beg;
descr->region_size = g.size;
descr->region_kind = "global";
return true;
}
return false;
}
u32 FindRegistrationSite(const Global *g) { u32 FindRegistrationSite(const Global *g) {
CHECK(global_registration_site_vector); CHECK(global_registration_site_vector);
for (uptr i = 0, n = global_registration_site_vector->size(); i < n; i++) { for (uptr i = 0, n = global_registration_site_vector->size(); i < n; i++) {
...@@ -181,7 +215,7 @@ using namespace __asan; // NOLINT ...@@ -181,7 +215,7 @@ using namespace __asan; // NOLINT
void __asan_register_globals(__asan_global *globals, uptr n) { void __asan_register_globals(__asan_global *globals, uptr n) {
if (!flags()->report_globals) return; if (!flags()->report_globals) return;
GET_STACK_TRACE_FATAL_HERE; GET_STACK_TRACE_FATAL_HERE;
u32 stack_id = StackDepotPut(stack.trace, stack.size); u32 stack_id = StackDepotPut(stack);
BlockingMutexLock lock(&mu_for_globals); BlockingMutexLock lock(&mu_for_globals);
if (!global_registration_site_vector) if (!global_registration_site_vector)
global_registration_site_vector = global_registration_site_vector =
......
...@@ -89,6 +89,28 @@ extern "C" { ...@@ -89,6 +89,28 @@ extern "C" {
void __asan_describe_address(uptr addr); void __asan_describe_address(uptr addr);
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
int __asan_report_present();
SANITIZER_INTERFACE_ATTRIBUTE
uptr __asan_get_report_pc();
SANITIZER_INTERFACE_ATTRIBUTE
uptr __asan_get_report_bp();
SANITIZER_INTERFACE_ATTRIBUTE
uptr __asan_get_report_sp();
SANITIZER_INTERFACE_ATTRIBUTE
uptr __asan_get_report_address();
SANITIZER_INTERFACE_ATTRIBUTE
int __asan_get_report_access_type();
SANITIZER_INTERFACE_ATTRIBUTE
uptr __asan_get_report_access_size();
SANITIZER_INTERFACE_ATTRIBUTE
const char * __asan_get_report_description();
SANITIZER_INTERFACE_ATTRIBUTE
const char * __asan_locate_address(uptr addr, char *name, uptr name_size,
uptr *region_address, uptr *region_size);
SANITIZER_INTERFACE_ATTRIBUTE
uptr __asan_get_alloc_stack(uptr addr, uptr *trace, uptr size, uptr __asan_get_alloc_stack(uptr addr, uptr *trace, uptr size,
u32 *thread_id); u32 *thread_id);
...@@ -149,6 +171,10 @@ extern "C" { ...@@ -149,6 +171,10 @@ extern "C" {
void __asan_poison_cxx_array_cookie(uptr p); void __asan_poison_cxx_array_cookie(uptr p);
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
uptr __asan_load_cxx_array_cookie(uptr *p); uptr __asan_load_cxx_array_cookie(uptr *p);
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_poison_intra_object_redzone(uptr p, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_unpoison_intra_object_redzone(uptr p, uptr size);
} // extern "C" } // extern "C"
#endif // ASAN_INTERFACE_INTERNAL_H #endif // ASAN_INTERFACE_INTERNAL_H
...@@ -133,6 +133,7 @@ const int kAsanStackUseAfterScopeMagic = 0xf8; ...@@ -133,6 +133,7 @@ const int kAsanStackUseAfterScopeMagic = 0xf8;
const int kAsanGlobalRedzoneMagic = 0xf9; const int kAsanGlobalRedzoneMagic = 0xf9;
const int kAsanInternalHeapMagic = 0xfe; const int kAsanInternalHeapMagic = 0xfe;
const int kAsanArrayCookieMagic = 0xac; const int kAsanArrayCookieMagic = 0xac;
const int kAsanIntraObjectRedzone = 0xbb;
static const uptr kCurrentStackFrameMagic = 0x41B58AB3; static const uptr kCurrentStackFrameMagic = 0x41B58AB3;
static const uptr kRetiredStackFrameMagic = 0x45E0360E; static const uptr kRetiredStackFrameMagic = 0x45E0360E;
......
...@@ -295,7 +295,7 @@ using namespace __asan; // NOLINT ...@@ -295,7 +295,7 @@ using namespace __asan; // NOLINT
// The caller retains control of the allocated context. // The caller retains control of the allocated context.
extern "C" extern "C"
asan_block_context_t *alloc_asan_context(void *ctxt, dispatch_function_t func, asan_block_context_t *alloc_asan_context(void *ctxt, dispatch_function_t func,
StackTrace *stack) { BufferedStackTrace *stack) {
asan_block_context_t *asan_ctxt = asan_block_context_t *asan_ctxt =
(asan_block_context_t*) asan_malloc(sizeof(asan_block_context_t), stack); (asan_block_context_t*) asan_malloc(sizeof(asan_block_context_t), stack);
asan_ctxt->block = ctxt; asan_ctxt->block = ctxt;
......
...@@ -58,11 +58,11 @@ ...@@ -58,11 +58,11 @@
// || `[0x00000000, 0x1fffffff]` || LowMem || // || `[0x00000000, 0x1fffffff]` || LowMem ||
// //
// Default Linux/MIPS mapping: // Default Linux/MIPS mapping:
// || `[0x2aaa8000, 0xffffffff]` || HighMem || // || `[0x2aaa0000, 0xffffffff]` || HighMem ||
// || `[0x0fffd000, 0x2aaa7fff]` || HighShadow || // || `[0x0fff4000, 0x2aa9ffff]` || HighShadow ||
// || `[0x0bffd000, 0x0fffcfff]` || ShadowGap || // || `[0x0bff4000, 0x0fff3fff]` || ShadowGap ||
// || `[0x0aaa8000, 0x0bffcfff]` || LowShadow || // || `[0x0aaa0000, 0x0bff3fff]` || LowShadow ||
// || `[0x00000000, 0x0aaa7fff]` || LowMem || // || `[0x00000000, 0x0aa9ffff]` || LowMem ||
// //
// Shadow mapping on FreeBSD/x86-64 with SHADOW_OFFSET == 0x400000000000: // Shadow mapping on FreeBSD/x86-64 with SHADOW_OFFSET == 0x400000000000:
// || `[0x500000000000, 0x7fffffffffff]` || HighMem || // || `[0x500000000000, 0x7fffffffffff]` || HighMem ||
...@@ -84,7 +84,8 @@ static const u64 kIosShadowOffset32 = 1ULL << 30; // 0x40000000 ...@@ -84,7 +84,8 @@ static const u64 kIosShadowOffset32 = 1ULL << 30; // 0x40000000
static const u64 kDefaultShadowOffset64 = 1ULL << 44; static const u64 kDefaultShadowOffset64 = 1ULL << 44;
static const u64 kDefaultShort64bitShadowOffset = 0x7FFF8000; // < 2G. static const u64 kDefaultShort64bitShadowOffset = 0x7FFF8000; // < 2G.
static const u64 kAArch64_ShadowOffset64 = 1ULL << 36; static const u64 kAArch64_ShadowOffset64 = 1ULL << 36;
static const u64 kMIPS32_ShadowOffset32 = 0x0aaa8000; static const u64 kMIPS32_ShadowOffset32 = 0x0aaa0000;
static const u64 kMIPS64_ShadowOffset64 = 1ULL << 36;
static const u64 kPPC64_ShadowOffset64 = 1ULL << 41; static const u64 kPPC64_ShadowOffset64 = 1ULL << 41;
static const u64 kFreeBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000 static const u64 kFreeBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000
static const u64 kFreeBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000 static const u64 kFreeBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000
...@@ -114,6 +115,8 @@ static const u64 kFreeBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000 ...@@ -114,6 +115,8 @@ static const u64 kFreeBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000
# define SHADOW_OFFSET kFreeBSD_ShadowOffset64 # define SHADOW_OFFSET kFreeBSD_ShadowOffset64
# elif SANITIZER_MAC # elif SANITIZER_MAC
# define SHADOW_OFFSET kDefaultShadowOffset64 # define SHADOW_OFFSET kDefaultShadowOffset64
# elif defined(__mips64)
# define SHADOW_OFFSET kMIPS64_ShadowOffset64
# else # else
# define SHADOW_OFFSET kDefaultShort64bitShadowOffset # define SHADOW_OFFSET kDefaultShort64bitShadowOffset
# endif # endif
......
...@@ -59,6 +59,27 @@ void FlushUnneededASanShadowMemory(uptr p, uptr size) { ...@@ -59,6 +59,27 @@ void FlushUnneededASanShadowMemory(uptr p, uptr size) {
FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg); FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg);
} }
void AsanPoisonOrUnpoisonIntraObjectRedzone(uptr ptr, uptr size, bool poison) {
uptr end = ptr + size;
if (common_flags()->verbosity) {
Printf("__asan_%spoison_intra_object_redzone [%p,%p) %zd\n",
poison ? "" : "un", ptr, end, size);
if (common_flags()->verbosity >= 2)
PRINT_CURRENT_STACK();
}
CHECK(size);
CHECK_LE(size, 4096);
CHECK(IsAligned(end, SHADOW_GRANULARITY));
if (!IsAligned(ptr, SHADOW_GRANULARITY)) {
*(u8 *)MemToShadow(ptr) =
poison ? static_cast<u8>(ptr % SHADOW_GRANULARITY) : 0;
ptr |= SHADOW_GRANULARITY - 1;
ptr++;
}
for (; ptr < end; ptr += SHADOW_GRANULARITY)
*(u8*)MemToShadow(ptr) = poison ? kAsanIntraObjectRedzone : 0;
}
} // namespace __asan } // namespace __asan
// ---------------------- Interface ---------------- {{{1 // ---------------------- Interface ---------------- {{{1
...@@ -250,7 +271,8 @@ uptr __asan_load_cxx_array_cookie(uptr *p) { ...@@ -250,7 +271,8 @@ uptr __asan_load_cxx_array_cookie(uptr *p) {
"expect a double-free report\n"); "expect a double-free report\n");
return 0; return 0;
} }
// FIXME: apparently it can be something else; need to find a reproducer. // The cookie may remain unpoisoned if e.g. it comes from a custom
// operator new defined inside a class.
return *p; return *p;
} }
...@@ -372,6 +394,17 @@ int __sanitizer_verify_contiguous_container(const void *beg_p, ...@@ -372,6 +394,17 @@ int __sanitizer_verify_contiguous_container(const void *beg_p,
return 0; return 0;
return 1; return 1;
} }
extern "C" SANITIZER_INTERFACE_ATTRIBUTE
void __asan_poison_intra_object_redzone(uptr ptr, uptr size) {
AsanPoisonOrUnpoisonIntraObjectRedzone(ptr, size, true);
}
extern "C" SANITIZER_INTERFACE_ATTRIBUTE
void __asan_unpoison_intra_object_redzone(uptr ptr, uptr size) {
AsanPoisonOrUnpoisonIntraObjectRedzone(ptr, size, false);
}
// --- Implementation of LSan-specific functions --- {{{1 // --- Implementation of LSan-specific functions --- {{{1
namespace __lsan { namespace __lsan {
bool WordIsPoisoned(uptr addr) { bool WordIsPoisoned(uptr addr) {
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
namespace __asan { namespace __asan {
void AsanOnSIGSEGV(int, void *siginfo, void *context) { void AsanOnSIGSEGV(int, void *siginfo, void *context) {
ScopedDeadlySignal signal_scope(GetCurrentThread());
uptr addr = (uptr)((siginfo_t*)siginfo)->si_addr; uptr addr = (uptr)((siginfo_t*)siginfo)->si_addr;
int code = (int)((siginfo_t*)siginfo)->si_code; int code = (int)((siginfo_t*)siginfo)->si_code;
// Write the first message using the bullet-proof write. // Write the first message using the bullet-proof write.
...@@ -39,12 +40,12 @@ void AsanOnSIGSEGV(int, void *siginfo, void *context) { ...@@ -39,12 +40,12 @@ void AsanOnSIGSEGV(int, void *siginfo, void *context) {
GetPcSpBp(context, &pc, &sp, &bp); GetPcSpBp(context, &pc, &sp, &bp);
// Access at a reasonable offset above SP, or slightly below it (to account // Access at a reasonable offset above SP, or slightly below it (to account
// for x86_64 redzone, ARM push of multiple registers, etc) is probably a // for x86_64 or PowerPC redzone, ARM push of multiple registers, etc) is
// stack overflow. // probably a stack overflow.
// We also check si_code to filter out SEGV caused by something else other // We also check si_code to filter out SEGV caused by something else other
// then hitting the guard page or unmapped memory, like, for example, // then hitting the guard page or unmapped memory, like, for example,
// unaligned memory access. // unaligned memory access.
if (addr + 128 > sp && addr < sp + 0xFFFF && if (addr + 512 > sp && addr < sp + 0xFFFF &&
(code == si_SEGV_MAPERR || code == si_SEGV_ACCERR)) (code == si_SEGV_MAPERR || code == si_SEGV_ACCERR))
ReportStackOverflow(pc, sp, bp, context, addr); ReportStackOverflow(pc, sp, bp, context, addr);
else else
......
...@@ -23,13 +23,24 @@ struct StackVarDescr { ...@@ -23,13 +23,24 @@ struct StackVarDescr {
uptr name_len; uptr name_len;
}; };
struct AddressDescription {
char *name;
uptr name_size;
uptr region_address;
uptr region_size;
const char *region_kind;
};
// The following functions prints address description depending // The following functions prints address description depending
// on the memory type (shadow/heap/stack/global). // on the memory type (shadow/heap/stack/global).
void DescribeHeapAddress(uptr addr, uptr access_size); void DescribeHeapAddress(uptr addr, uptr access_size);
bool DescribeAddressIfGlobal(uptr addr, uptr access_size); bool DescribeAddressIfGlobal(uptr addr, uptr access_size);
bool DescribeAddressRelativeToGlobal(uptr addr, uptr access_size, bool DescribeAddressRelativeToGlobal(uptr addr, uptr access_size,
const __asan_global &g); const __asan_global &g);
bool DescribeAddressIfShadow(uptr addr); bool IsAddressNearGlobal(uptr addr, const __asan_global &g);
bool GetInfoForAddressIfGlobal(uptr addr, AddressDescription *descr);
bool DescribeAddressIfShadow(uptr addr, AddressDescription *descr = nullptr,
bool print = true);
bool ParseFrameDescription(const char *frame_descr, bool ParseFrameDescription(const char *frame_descr,
InternalMmapVector<StackVarDescr> *vars); InternalMmapVector<StackVarDescr> *vars);
bool DescribeAddressIfStack(uptr addr, uptr access_size); bool DescribeAddressIfStack(uptr addr, uptr access_size);
...@@ -44,35 +55,41 @@ void NORETURN ...@@ -44,35 +55,41 @@ void NORETURN
void NORETURN ReportSIGSEGV(const char *description, uptr pc, uptr sp, uptr bp, void NORETURN ReportSIGSEGV(const char *description, uptr pc, uptr sp, uptr bp,
void *context, uptr addr); void *context, uptr addr);
void NORETURN ReportNewDeleteSizeMismatch(uptr addr, uptr delete_size, void NORETURN ReportNewDeleteSizeMismatch(uptr addr, uptr delete_size,
StackTrace *free_stack); BufferedStackTrace *free_stack);
void NORETURN ReportDoubleFree(uptr addr, StackTrace *free_stack); void NORETURN ReportDoubleFree(uptr addr, BufferedStackTrace *free_stack);
void NORETURN ReportFreeNotMalloced(uptr addr, StackTrace *free_stack); void NORETURN ReportFreeNotMalloced(uptr addr, BufferedStackTrace *free_stack);
void NORETURN ReportAllocTypeMismatch(uptr addr, StackTrace *free_stack, void NORETURN ReportAllocTypeMismatch(uptr addr, BufferedStackTrace *free_stack,
AllocType alloc_type, AllocType alloc_type,
AllocType dealloc_type); AllocType dealloc_type);
void NORETURN ReportMallocUsableSizeNotOwned(uptr addr,
StackTrace *stack);
void NORETURN void NORETURN
ReportSanitizerGetAllocatedSizeNotOwned(uptr addr, StackTrace *stack); ReportMallocUsableSizeNotOwned(uptr addr, BufferedStackTrace *stack);
void NORETURN ReportStringFunctionMemoryRangesOverlap( void NORETURN
const char *function, const char *offset1, uptr length1, ReportSanitizerGetAllocatedSizeNotOwned(uptr addr,
const char *offset2, uptr length2, StackTrace *stack); BufferedStackTrace *stack);
void NORETURN void NORETURN
ReportStringFunctionSizeOverflow(uptr offset, uptr size, StackTrace *stack); ReportStringFunctionMemoryRangesOverlap(const char *function,
const char *offset1, uptr length1,
const char *offset2, uptr length2,
BufferedStackTrace *stack);
void NORETURN ReportStringFunctionSizeOverflow(uptr offset, uptr size,
BufferedStackTrace *stack);
void NORETURN void NORETURN
ReportBadParamsToAnnotateContiguousContainer(uptr beg, uptr end, uptr old_mid, ReportBadParamsToAnnotateContiguousContainer(uptr beg, uptr end,
uptr new_mid, StackTrace *stack); uptr old_mid, uptr new_mid,
BufferedStackTrace *stack);
void NORETURN void NORETURN
ReportODRViolation(const __asan_global *g1, u32 stack_id1, ReportODRViolation(const __asan_global *g1, u32 stack_id1,
const __asan_global *g2, u32 stack_id2); const __asan_global *g2, u32 stack_id2);
// Mac-specific errors and warnings. // Mac-specific errors and warnings.
void WarnMacFreeUnallocated( void WarnMacFreeUnallocated(uptr addr, uptr zone_ptr, const char *zone_name,
uptr addr, uptr zone_ptr, const char *zone_name, StackTrace *stack); BufferedStackTrace *stack);
void NORETURN ReportMacMzReallocUnknown( void NORETURN ReportMacMzReallocUnknown(uptr addr, uptr zone_ptr,
uptr addr, uptr zone_ptr, const char *zone_name, StackTrace *stack); const char *zone_name,
void NORETURN ReportMacCfReallocUnknown( BufferedStackTrace *stack);
uptr addr, uptr zone_ptr, const char *zone_name, StackTrace *stack); void NORETURN ReportMacCfReallocUnknown(uptr addr, uptr zone_ptr,
const char *zone_name,
BufferedStackTrace *stack);
} // namespace __asan } // namespace __asan
...@@ -65,7 +65,7 @@ static void AsanCheckFailed(const char *file, int line, const char *cond, ...@@ -65,7 +65,7 @@ static void AsanCheckFailed(const char *file, int line, const char *cond,
Report("AddressSanitizer CHECK failed: %s:%d \"%s\" (0x%zx, 0x%zx)\n", file, Report("AddressSanitizer CHECK failed: %s:%d \"%s\" (0x%zx, 0x%zx)\n", file,
line, cond, (uptr)v1, (uptr)v2); line, cond, (uptr)v1, (uptr)v2);
// FIXME: check for infinite recursion without a thread-local counter here. // FIXME: check for infinite recursion without a thread-local counter here.
PRINT_CURRENT_STACK(); PRINT_CURRENT_STACK_CHECK();
Die(); Die();
} }
...@@ -228,6 +228,9 @@ static void ParseFlagsFromString(Flags *f, const char *str) { ...@@ -228,6 +228,9 @@ static void ParseFlagsFromString(Flags *f, const char *str) {
"If >=2, detect violation of One-Definition-Rule (ODR); " "If >=2, detect violation of One-Definition-Rule (ODR); "
"If ==1, detect ODR-violation only if the two variables " "If ==1, detect ODR-violation only if the two variables "
"have different sizes"); "have different sizes");
ParseFlag(str, &f->dump_instruction_bytes, "dump_instruction_bytes",
"If true, dump 16 bytes starting at the instruction that caused SEGV");
} }
void InitializeFlags(Flags *f, const char *env) { void InitializeFlags(Flags *f, const char *env) {
...@@ -281,6 +284,7 @@ void InitializeFlags(Flags *f, const char *env) { ...@@ -281,6 +284,7 @@ void InitializeFlags(Flags *f, const char *env) {
f->detect_invalid_pointer_pairs = 0; f->detect_invalid_pointer_pairs = 0;
f->detect_container_overflow = true; f->detect_container_overflow = true;
f->detect_odr_violation = 2; f->detect_odr_violation = 2;
f->dump_instruction_bytes = false;
// Override from compile definition. // Override from compile definition.
ParseFlagsFromString(f, MaybeUseAsanDefaultOptionsCompileDefinition()); ParseFlagsFromString(f, MaybeUseAsanDefaultOptionsCompileDefinition());
......
...@@ -23,8 +23,9 @@ namespace __asan { ...@@ -23,8 +23,9 @@ namespace __asan {
// The pc will be in the position 0 of the resulting stack trace. // The pc will be in the position 0 of the resulting stack trace.
// The bp may refer to the current frame or to the caller's frame. // The bp may refer to the current frame or to the caller's frame.
ALWAYS_INLINE ALWAYS_INLINE
void GetStackTraceWithPcBpAndContext(StackTrace *stack, uptr max_depth, uptr pc, void GetStackTraceWithPcBpAndContext(BufferedStackTrace *stack, uptr max_depth,
uptr bp, void *context, bool fast) { uptr pc, uptr bp, void *context,
bool fast) {
#if SANITIZER_WINDOWS #if SANITIZER_WINDOWS
stack->Unwind(max_depth, pc, bp, context, 0, 0, fast); stack->Unwind(max_depth, pc, bp, context, 0, 0, fast);
#else #else
...@@ -32,6 +33,10 @@ void GetStackTraceWithPcBpAndContext(StackTrace *stack, uptr max_depth, uptr pc, ...@@ -32,6 +33,10 @@ void GetStackTraceWithPcBpAndContext(StackTrace *stack, uptr max_depth, uptr pc,
stack->size = 0; stack->size = 0;
if (LIKELY(asan_inited)) { if (LIKELY(asan_inited)) {
if ((t = GetCurrentThread()) && !t->isUnwinding()) { if ((t = GetCurrentThread()) && !t->isUnwinding()) {
// On FreeBSD the slow unwinding that leverages _Unwind_Backtrace()
// yields the call stack of the signal's handler and not of the code
// that raised the signal (as it does on Linux).
if (SANITIZER_FREEBSD && t->isInDeadlySignal()) fast = true;
uptr stack_top = t->stack_top(); uptr stack_top = t->stack_top();
uptr stack_bottom = t->stack_bottom(); uptr stack_bottom = t->stack_bottom();
ScopedUnwinding unwind_scope(t); ScopedUnwinding unwind_scope(t);
...@@ -51,14 +56,14 @@ void GetStackTraceWithPcBpAndContext(StackTrace *stack, uptr max_depth, uptr pc, ...@@ -51,14 +56,14 @@ void GetStackTraceWithPcBpAndContext(StackTrace *stack, uptr max_depth, uptr pc,
// don't want stack trace to contain functions from ASan internals. // don't want stack trace to contain functions from ASan internals.
#define GET_STACK_TRACE(max_size, fast) \ #define GET_STACK_TRACE(max_size, fast) \
StackTrace stack; \ BufferedStackTrace stack; \
if (max_size <= 2) { \ if (max_size <= 2) { \
stack.size = max_size; \ stack.size = max_size; \
if (max_size > 0) { \ if (max_size > 0) { \
stack.top_frame_bp = GET_CURRENT_FRAME(); \ stack.top_frame_bp = GET_CURRENT_FRAME(); \
stack.trace[0] = StackTrace::GetCurrentPc(); \ stack.trace_buffer[0] = StackTrace::GetCurrentPc(); \
if (max_size > 1) \ if (max_size > 1) \
stack.trace[1] = GET_CALLER_PC(); \ stack.trace_buffer[1] = GET_CALLER_PC(); \
} \ } \
} else { \ } else { \
GetStackTraceWithPcBpAndContext(&stack, max_size, \ GetStackTraceWithPcBpAndContext(&stack, max_size, \
...@@ -67,18 +72,21 @@ void GetStackTraceWithPcBpAndContext(StackTrace *stack, uptr max_depth, uptr pc, ...@@ -67,18 +72,21 @@ void GetStackTraceWithPcBpAndContext(StackTrace *stack, uptr max_depth, uptr pc,
} }
#define GET_STACK_TRACE_FATAL(pc, bp) \ #define GET_STACK_TRACE_FATAL(pc, bp) \
StackTrace stack; \ BufferedStackTrace stack; \
GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, pc, bp, 0, \ GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, pc, bp, 0, \
common_flags()->fast_unwind_on_fatal) common_flags()->fast_unwind_on_fatal)
#define GET_STACK_TRACE_SIGNAL(pc, bp, context) \ #define GET_STACK_TRACE_SIGNAL(pc, bp, context) \
StackTrace stack; \ BufferedStackTrace stack; \
GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, pc, bp, context, \ GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, pc, bp, context, \
common_flags()->fast_unwind_on_fatal) common_flags()->fast_unwind_on_fatal)
#define GET_STACK_TRACE_FATAL_HERE \ #define GET_STACK_TRACE_FATAL_HERE \
GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_fatal) GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_fatal)
#define GET_STACK_TRACE_CHECK_HERE \
GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_check)
#define GET_STACK_TRACE_THREAD \ #define GET_STACK_TRACE_THREAD \
GET_STACK_TRACE(kStackTraceMax, true) GET_STACK_TRACE(kStackTraceMax, true)
...@@ -94,4 +102,10 @@ void GetStackTraceWithPcBpAndContext(StackTrace *stack, uptr max_depth, uptr pc, ...@@ -94,4 +102,10 @@ void GetStackTraceWithPcBpAndContext(StackTrace *stack, uptr max_depth, uptr pc,
stack.Print(); \ stack.Print(); \
} }
#define PRINT_CURRENT_STACK_CHECK() \
{ \
GET_STACK_TRACE_CHECK_HERE; \
stack.Print(); \
}
#endif // ASAN_STACK_H #endif // ASAN_STACK_H
...@@ -28,7 +28,7 @@ namespace __asan { ...@@ -28,7 +28,7 @@ namespace __asan {
void AsanThreadContext::OnCreated(void *arg) { void AsanThreadContext::OnCreated(void *arg) {
CreateThreadContextArgs *args = static_cast<CreateThreadContextArgs*>(arg); CreateThreadContextArgs *args = static_cast<CreateThreadContextArgs*>(arg);
if (args->stack) if (args->stack)
stack_id = StackDepotPut(args->stack->trace, args->stack->size); stack_id = StackDepotPut(*args->stack);
thread = args->thread; thread = args->thread;
thread->set_context(this); thread->set_context(this);
} }
...@@ -196,17 +196,18 @@ void AsanThread::ClearShadowForThreadStackAndTLS() { ...@@ -196,17 +196,18 @@ void AsanThread::ClearShadowForThreadStackAndTLS() {
PoisonShadow(tls_begin_, tls_end_ - tls_begin_, 0); PoisonShadow(tls_begin_, tls_end_ - tls_begin_, 0);
} }
const char *AsanThread::GetFrameNameByAddr(uptr addr, uptr *offset, bool AsanThread::GetStackFrameAccessByAddr(uptr addr,
uptr *frame_pc) { StackFrameAccess *access) {
uptr bottom = 0; uptr bottom = 0;
if (AddrIsInStack(addr)) { if (AddrIsInStack(addr)) {
bottom = stack_bottom(); bottom = stack_bottom();
} else if (has_fake_stack()) { } else if (has_fake_stack()) {
bottom = fake_stack()->AddrIsInFakeStack(addr); bottom = fake_stack()->AddrIsInFakeStack(addr);
CHECK(bottom); CHECK(bottom);
*offset = addr - bottom; access->offset = addr - bottom;
*frame_pc = ((uptr*)bottom)[2]; access->frame_pc = ((uptr*)bottom)[2];
return (const char *)((uptr*)bottom)[1]; access->frame_descr = (const char *)((uptr*)bottom)[1];
return true;
} }
uptr aligned_addr = addr & ~(SANITIZER_WORDSIZE/8 - 1); // align addr. uptr aligned_addr = addr & ~(SANITIZER_WORDSIZE/8 - 1); // align addr.
u8 *shadow_ptr = (u8*)MemToShadow(aligned_addr); u8 *shadow_ptr = (u8*)MemToShadow(aligned_addr);
...@@ -223,15 +224,15 @@ const char *AsanThread::GetFrameNameByAddr(uptr addr, uptr *offset, ...@@ -223,15 +224,15 @@ const char *AsanThread::GetFrameNameByAddr(uptr addr, uptr *offset,
} }
if (shadow_ptr < shadow_bottom) { if (shadow_ptr < shadow_bottom) {
*offset = 0; return false;
return "UNKNOWN";
} }
uptr* ptr = (uptr*)SHADOW_TO_MEM((uptr)(shadow_ptr + 1)); uptr* ptr = (uptr*)SHADOW_TO_MEM((uptr)(shadow_ptr + 1));
CHECK(ptr[0] == kCurrentStackFrameMagic); CHECK(ptr[0] == kCurrentStackFrameMagic);
*offset = addr - (uptr)ptr; access->offset = addr - (uptr)ptr;
*frame_pc = ptr[2]; access->frame_pc = ptr[2];
return (const char*)ptr[1]; access->frame_descr = (const char*)ptr[1];
return true;
} }
static bool ThreadStackContainsAddress(ThreadContextBase *tctx_base, static bool ThreadStackContainsAddress(ThreadContextBase *tctx_base,
......
...@@ -69,7 +69,12 @@ class AsanThread { ...@@ -69,7 +69,12 @@ class AsanThread {
AsanThreadContext *context() { return context_; } AsanThreadContext *context() { return context_; }
void set_context(AsanThreadContext *context) { context_ = context; } void set_context(AsanThreadContext *context) { context_ = context; }
const char *GetFrameNameByAddr(uptr addr, uptr *offset, uptr *frame_pc); struct StackFrameAccess {
uptr offset;
uptr frame_pc;
const char *frame_descr;
};
bool GetStackFrameAccessByAddr(uptr addr, StackFrameAccess *access);
bool AddrIsInStack(uptr addr) { bool AddrIsInStack(uptr addr) {
return addr >= stack_bottom_ && addr < stack_top_; return addr >= stack_bottom_ && addr < stack_top_;
...@@ -101,6 +106,10 @@ class AsanThread { ...@@ -101,6 +106,10 @@ class AsanThread {
bool isUnwinding() const { return unwinding_; } bool isUnwinding() const { return unwinding_; }
void setUnwinding(bool b) { unwinding_ = b; } void setUnwinding(bool b) { unwinding_ = b; }
// True if we are in a deadly signal handler.
bool isInDeadlySignal() const { return in_deadly_signal_; }
void setInDeadlySignal(bool b) { in_deadly_signal_ = b; }
AsanThreadLocalMallocStorage &malloc_storage() { return malloc_storage_; } AsanThreadLocalMallocStorage &malloc_storage() { return malloc_storage_; }
AsanStats &stats() { return stats_; } AsanStats &stats() { return stats_; }
...@@ -126,6 +135,7 @@ class AsanThread { ...@@ -126,6 +135,7 @@ class AsanThread {
AsanThreadLocalMallocStorage malloc_storage_; AsanThreadLocalMallocStorage malloc_storage_;
AsanStats stats_; AsanStats stats_;
bool unwinding_; bool unwinding_;
bool in_deadly_signal_;
}; };
// ScopedUnwinding is a scope for stacktracing member of a context // ScopedUnwinding is a scope for stacktracing member of a context
...@@ -140,6 +150,20 @@ class ScopedUnwinding { ...@@ -140,6 +150,20 @@ class ScopedUnwinding {
AsanThread *thread; AsanThread *thread;
}; };
// ScopedDeadlySignal is a scope for handling deadly signals.
class ScopedDeadlySignal {
public:
explicit ScopedDeadlySignal(AsanThread *t) : thread(t) {
if (thread) thread->setInDeadlySignal(true);
}
~ScopedDeadlySignal() {
if (thread) thread->setInDeadlySignal(false);
}
private:
AsanThread *thread;
};
struct CreateThreadContextArgs { struct CreateThreadContextArgs {
AsanThread *thread; AsanThread *thread;
StackTrace *stack; StackTrace *stack;
......
...@@ -53,13 +53,39 @@ extern "C" { ...@@ -53,13 +53,39 @@ extern "C" {
// Otherwise returns 0. // Otherwise returns 0.
int __asan_address_is_poisoned(void const volatile *addr); int __asan_address_is_poisoned(void const volatile *addr);
// If at least on byte in [beg, beg+size) is poisoned, return the address // If at least one byte in [beg, beg+size) is poisoned, return the address
// of the first such byte. Otherwise return 0. // of the first such byte. Otherwise return 0.
void *__asan_region_is_poisoned(void *beg, size_t size); void *__asan_region_is_poisoned(void *beg, size_t size);
// Print the description of addr (useful when debugging in gdb). // Print the description of addr (useful when debugging in gdb).
void __asan_describe_address(void *addr); void __asan_describe_address(void *addr);
// Useful for calling from a debugger to get information about an ASan error.
// Returns 1 if an error has been (or is being) reported, otherwise returns 0.
int __asan_report_present();
// Useful for calling from a debugger to get information about an ASan error.
// If an error has been (or is being) reported, the following functions return
// the pc, bp, sp, address, access type (0 = read, 1 = write), access size and
// bug description (e.g. "heap-use-after-free"). Otherwise they return 0.
void *__asan_get_report_pc();
void *__asan_get_report_bp();
void *__asan_get_report_sp();
void *__asan_get_report_address();
int __asan_get_report_access_type();
size_t __asan_get_report_access_size();
const char *__asan_get_report_description();
// Useful for calling from the debugger to get information about a pointer.
// Returns the category of the given pointer as a constant string.
// Possible return values are "global", "stack", "stack-fake", "heap",
// "heap-invalid", "shadow-low", "shadow-gap", "shadow-high", "unknown".
// If global or stack, tries to also return the variable name, address and
// size. If heap, tries to return the chunk address and size. 'name' should
// point to an allocated buffer of size 'name_size'.
const char *__asan_locate_address(void *addr, char *name, size_t name_size,
void **region_address, size_t *region_size);
// Useful for calling from the debugger to get the allocation stack trace // Useful for calling from the debugger to get the allocation stack trace
// and thread ID for a heap address. Stores up to 'size' frames into 'trace', // and thread ID for a heap address. Stores up to 'size' frames into 'trace',
// returns the number of stored frames or 0 on error. // returns the number of stored frames or 0 on error.
......
...@@ -103,7 +103,7 @@ extern "C" { ...@@ -103,7 +103,7 @@ extern "C" {
const void *end, const void *end,
const void *old_mid, const void *old_mid,
const void *new_mid); const void *new_mid);
// Returns true if the contiguous container [beg, end) ir properly poisoned // Returns true if the contiguous container [beg, end) is properly poisoned
// (e.g. with __sanitizer_annotate_contiguous_container), i.e. if // (e.g. with __sanitizer_annotate_contiguous_container), i.e. if
// - [beg, mid) is addressable, // - [beg, mid) is addressable,
// - [mid, end) is unaddressable. // - [mid, end) is unaddressable.
......
...@@ -6,6 +6,7 @@ gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER) ...@@ -6,6 +6,7 @@ gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER)
DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS
AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic -Wno-long-long -fPIC -fno-builtin -fno-exceptions -fno-rtti -fomit-frame-pointer -funwind-tables -fvisibility=hidden -Wno-variadic-macros AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic -Wno-long-long -fPIC -fno-builtin -fno-exceptions -fno-rtti -fomit-frame-pointer -funwind-tables -fvisibility=hidden -Wno-variadic-macros
AM_CXXFLAGS += $(LIBSTDCXX_RAW_CXX_CXXFLAGS) AM_CXXFLAGS += $(LIBSTDCXX_RAW_CXX_CXXFLAGS)
AM_CXXFLAGS += -std=c++11
ACLOCAL_AMFLAGS = -I m4 ACLOCAL_AMFLAGS = -I m4
noinst_LTLIBRARIES = libinterception.la noinst_LTLIBRARIES = libinterception.la
......
...@@ -225,7 +225,7 @@ gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER) ...@@ -225,7 +225,7 @@ gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER)
AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic \ AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic \
-Wno-long-long -fPIC -fno-builtin -fno-exceptions -fno-rtti \ -Wno-long-long -fPIC -fno-builtin -fno-exceptions -fno-rtti \
-fomit-frame-pointer -funwind-tables -fvisibility=hidden \ -fomit-frame-pointer -funwind-tables -fvisibility=hidden \
-Wno-variadic-macros $(LIBSTDCXX_RAW_CXX_CXXFLAGS) -Wno-variadic-macros $(LIBSTDCXX_RAW_CXX_CXXFLAGS) -std=c++11
ACLOCAL_AMFLAGS = -I m4 ACLOCAL_AMFLAGS = -I m4
noinst_LTLIBRARIES = libinterception.la noinst_LTLIBRARIES = libinterception.la
interception_files = \ interception_files = \
......
...@@ -180,7 +180,10 @@ bool OverrideFunction(uptr old_func, uptr new_func, uptr *orig_old_func) { ...@@ -180,7 +180,10 @@ bool OverrideFunction(uptr old_func, uptr new_func, uptr *orig_old_func) {
} }
static const void **InterestingDLLsAvailable() { static const void **InterestingDLLsAvailable() {
const char *InterestingDLLs[] = { "kernel32.dll", "msvcr120.dll", NULL }; const char *InterestingDLLs[] = {"kernel32.dll",
"msvcr110.dll", // VS2012
"msvcr120.dll", // VS2013
NULL};
static void *result[ARRAY_SIZE(InterestingDLLs)] = { 0 }; static void *result[ARRAY_SIZE(InterestingDLLs)] = { 0 };
if (!result[0]) { if (!result[0]) {
for (size_t i = 0, j = 0; InterestingDLLs[i]; ++i) { for (size_t i = 0, j = 0; InterestingDLLs[i]; ++i) {
......
...@@ -40,6 +40,7 @@ C_WARN_FLAGS = $(WARN_FLAGS) -Wstrict-prototypes -Wmissing-prototypes -Wold-styl ...@@ -40,6 +40,7 @@ C_WARN_FLAGS = $(WARN_FLAGS) -Wstrict-prototypes -Wmissing-prototypes -Wold-styl
CXX_WARN_FLAGS = $(WARN_FLAGS) -Wno-unused-parameter CXX_WARN_FLAGS = $(WARN_FLAGS) -Wno-unused-parameter
AM_CFLAGS = $(C_WARN_FLAGS) AM_CFLAGS = $(C_WARN_FLAGS)
AM_CXXFLAGS = $(CXX_WARN_FLAGS) -fno-rtti -fno-exceptions AM_CXXFLAGS = $(CXX_WARN_FLAGS) -fno-rtti -fno-exceptions
AM_CXXFLAGS += -std=c++11
noinst_LTLIBRARIES = libsanitizer_libbacktrace.la noinst_LTLIBRARIES = libsanitizer_libbacktrace.la
......
...@@ -270,7 +270,7 @@ WARN_FLAGS = -W -Wall -Wwrite-strings -Wmissing-format-attribute \ ...@@ -270,7 +270,7 @@ WARN_FLAGS = -W -Wall -Wwrite-strings -Wmissing-format-attribute \
C_WARN_FLAGS = $(WARN_FLAGS) -Wstrict-prototypes -Wmissing-prototypes -Wold-style-definition C_WARN_FLAGS = $(WARN_FLAGS) -Wstrict-prototypes -Wmissing-prototypes -Wold-style-definition
CXX_WARN_FLAGS = $(WARN_FLAGS) -Wno-unused-parameter CXX_WARN_FLAGS = $(WARN_FLAGS) -Wno-unused-parameter
AM_CFLAGS = $(C_WARN_FLAGS) AM_CFLAGS = $(C_WARN_FLAGS)
AM_CXXFLAGS = $(CXX_WARN_FLAGS) -fno-rtti -fno-exceptions AM_CXXFLAGS = $(CXX_WARN_FLAGS) -fno-rtti -fno-exceptions -std=c++11
noinst_LTLIBRARIES = libsanitizer_libbacktrace.la noinst_LTLIBRARIES = libsanitizer_libbacktrace.la
libsanitizer_libbacktrace_la_SOURCES = \ libsanitizer_libbacktrace_la_SOURCES = \
../../libbacktrace/backtrace.h \ ../../libbacktrace/backtrace.h \
......
...@@ -6,6 +6,7 @@ gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER) ...@@ -6,6 +6,7 @@ gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER)
DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS
AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic -Wno-long-long -fPIC -fno-builtin -fno-exceptions -fno-rtti -fomit-frame-pointer -funwind-tables -fvisibility=hidden -Wno-variadic-macros AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic -Wno-long-long -fPIC -fno-builtin -fno-exceptions -fno-rtti -fomit-frame-pointer -funwind-tables -fvisibility=hidden -Wno-variadic-macros
AM_CXXFLAGS += $(LIBSTDCXX_RAW_CXX_CXXFLAGS) AM_CXXFLAGS += $(LIBSTDCXX_RAW_CXX_CXXFLAGS)
AM_CXXFLAGS += -std=c++11
ACLOCAL_AMFLAGS = -I m4 ACLOCAL_AMFLAGS = -I m4
noinst_LTLIBRARIES = libsanitizer_lsan.la noinst_LTLIBRARIES = libsanitizer_lsan.la
......
...@@ -260,7 +260,7 @@ gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER) ...@@ -260,7 +260,7 @@ gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER)
AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic \ AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic \
-Wno-long-long -fPIC -fno-builtin -fno-exceptions -fno-rtti \ -Wno-long-long -fPIC -fno-builtin -fno-exceptions -fno-rtti \
-fomit-frame-pointer -funwind-tables -fvisibility=hidden \ -fomit-frame-pointer -funwind-tables -fvisibility=hidden \
-Wno-variadic-macros $(LIBSTDCXX_RAW_CXX_CXXFLAGS) -Wno-variadic-macros $(LIBSTDCXX_RAW_CXX_CXXFLAGS) -std=c++11
ACLOCAL_AMFLAGS = -I m4 ACLOCAL_AMFLAGS = -I m4
noinst_LTLIBRARIES = libsanitizer_lsan.la noinst_LTLIBRARIES = libsanitizer_lsan.la
@LSAN_SUPPORTED_TRUE@toolexeclib_LTLIBRARIES = liblsan.la @LSAN_SUPPORTED_TRUE@toolexeclib_LTLIBRARIES = liblsan.la
......
...@@ -13,17 +13,17 @@ ...@@ -13,17 +13,17 @@
#include "sanitizer_common/sanitizer_flags.h" #include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_stacktrace.h" #include "sanitizer_common/sanitizer_stacktrace.h"
#define GET_STACK_TRACE(max_size, fast) \ #define GET_STACK_TRACE(max_size, fast) \
StackTrace stack; \ BufferedStackTrace stack; \
{ \ { \
uptr stack_top = 0, stack_bottom = 0; \ uptr stack_top = 0, stack_bottom = 0; \
ThreadContext *t; \ ThreadContext *t; \
if (fast && (t = CurrentThreadContext())) { \ if (fast && (t = CurrentThreadContext())) { \
stack_top = t->stack_end(); \ stack_top = t->stack_end(); \
stack_bottom = t->stack_begin(); \ stack_bottom = t->stack_begin(); \
} \ } \
stack.Unwind(max_size, StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), \ stack.Unwind(max_size, StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), \
/* context */ 0, stack_top, stack_bottom, fast); \ /* context */ 0, stack_top, stack_bottom, fast); \
} }
#define GET_STACK_TRACE_FATAL \ #define GET_STACK_TRACE_FATAL \
......
...@@ -61,7 +61,7 @@ static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) { ...@@ -61,7 +61,7 @@ static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) {
ChunkMetadata *m = Metadata(p); ChunkMetadata *m = Metadata(p);
CHECK(m); CHECK(m);
m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked; m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked;
m->stack_trace_id = StackDepotPut(stack.trace, stack.size); m->stack_trace_id = StackDepotPut(stack);
m->requested_size = size; m->requested_size = size;
atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 1, memory_order_relaxed); atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 1, memory_order_relaxed);
} }
......
...@@ -353,9 +353,7 @@ static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) { ...@@ -353,9 +353,7 @@ static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) {
static void PrintStackTraceById(u32 stack_trace_id) { static void PrintStackTraceById(u32 stack_trace_id) {
CHECK(stack_trace_id); CHECK(stack_trace_id);
uptr size = 0; StackDepotGet(stack_trace_id).Print();
const uptr *trace = StackDepotGet(stack_trace_id, &size);
StackTrace::PrintStack(trace, size);
} }
// ForEachChunk callback. Aggregates information about unreachable chunks into // ForEachChunk callback. Aggregates information about unreachable chunks into
...@@ -370,10 +368,9 @@ static void CollectLeaksCb(uptr chunk, void *arg) { ...@@ -370,10 +368,9 @@ static void CollectLeaksCb(uptr chunk, void *arg) {
uptr resolution = flags()->resolution; uptr resolution = flags()->resolution;
u32 stack_trace_id = 0; u32 stack_trace_id = 0;
if (resolution > 0) { if (resolution > 0) {
uptr size = 0; StackTrace stack = StackDepotGet(m.stack_trace_id());
const uptr *trace = StackDepotGet(m.stack_trace_id(), &size); stack.size = Min(stack.size, resolution);
size = Min(size, resolution); stack_trace_id = StackDepotPut(stack);
stack_trace_id = StackDepotPut(trace, size);
} else { } else {
stack_trace_id = m.stack_trace_id(); stack_trace_id = m.stack_trace_id();
} }
...@@ -449,8 +446,11 @@ void DoLeakCheck() { ...@@ -449,8 +446,11 @@ void DoLeakCheck() {
PrintMatchedSuppressions(); PrintMatchedSuppressions();
if (unsuppressed_count > 0) { if (unsuppressed_count > 0) {
param.leak_report.PrintSummary(); param.leak_report.PrintSummary();
if (flags()->exitcode) if (flags()->exitcode) {
if (common_flags()->coverage)
__sanitizer_cov_dump();
internal__exit(flags()->exitcode); internal__exit(flags()->exitcode);
}
} }
} }
...@@ -482,11 +482,10 @@ static Suppression *GetSuppressionForAddr(uptr addr) { ...@@ -482,11 +482,10 @@ static Suppression *GetSuppressionForAddr(uptr addr) {
} }
static Suppression *GetSuppressionForStack(u32 stack_trace_id) { static Suppression *GetSuppressionForStack(u32 stack_trace_id) {
uptr size = 0; StackTrace stack = StackDepotGet(stack_trace_id);
const uptr *trace = StackDepotGet(stack_trace_id, &size); for (uptr i = 0; i < stack.size; i++) {
for (uptr i = 0; i < size; i++) { Suppression *s = GetSuppressionForAddr(
Suppression *s = StackTrace::GetPreviousInstructionPc(stack.trace[i]));
GetSuppressionForAddr(StackTrace::GetPreviousInstructionPc(trace[i]));
if (s) return s; if (s) return s;
} }
return 0; return 0;
......
...@@ -92,11 +92,10 @@ void ProcessGlobalRegions(Frontier *frontier) { ...@@ -92,11 +92,10 @@ void ProcessGlobalRegions(Frontier *frontier) {
static uptr GetCallerPC(u32 stack_id, StackDepotReverseMap *map) { static uptr GetCallerPC(u32 stack_id, StackDepotReverseMap *map) {
CHECK(stack_id); CHECK(stack_id);
uptr size = 0; StackTrace stack = map->Get(stack_id);
const uptr *trace = map->Get(stack_id, &size);
// The top frame is our malloc/calloc/etc. The next frame is the caller. // The top frame is our malloc/calloc/etc. The next frame is the caller.
if (size >= 2) if (stack.size >= 2)
return trace[1]; return stack.trace[1];
return 0; return 0;
} }
......
...@@ -6,6 +6,7 @@ gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER) ...@@ -6,6 +6,7 @@ gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER)
DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS
AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic -Wno-long-long -fPIC -fno-builtin -fno-exceptions -fno-rtti -fomit-frame-pointer -funwind-tables -fvisibility=hidden -Wno-variadic-macros AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic -Wno-long-long -fPIC -fno-builtin -fno-exceptions -fno-rtti -fomit-frame-pointer -funwind-tables -fvisibility=hidden -Wno-variadic-macros
AM_CXXFLAGS += $(LIBSTDCXX_RAW_CXX_CXXFLAGS) AM_CXXFLAGS += $(LIBSTDCXX_RAW_CXX_CXXFLAGS)
AM_CXXFLAGS += -std=c++11
if LIBBACKTRACE_SUPPORTED if LIBBACKTRACE_SUPPORTED
AM_CXXFLAGS += -DSANITIZER_LIBBACKTRACE -DSANITIZER_CP_DEMANGLE \ AM_CXXFLAGS += -DSANITIZER_LIBBACKTRACE -DSANITIZER_CP_DEMANGLE \
-I $(top_srcdir)/../libbacktrace \ -I $(top_srcdir)/../libbacktrace \
...@@ -44,6 +45,7 @@ sanitizer_common_files = \ ...@@ -44,6 +45,7 @@ sanitizer_common_files = \
sanitizer_stackdepot.cc \ sanitizer_stackdepot.cc \
sanitizer_stacktrace.cc \ sanitizer_stacktrace.cc \
sanitizer_stacktrace_libcdep.cc \ sanitizer_stacktrace_libcdep.cc \
sanitizer_stacktrace_printer.cc \
sanitizer_stoptheworld_linux_libcdep.cc \ sanitizer_stoptheworld_linux_libcdep.cc \
sanitizer_suppressions.cc \ sanitizer_suppressions.cc \
sanitizer_symbolizer.cc \ sanitizer_symbolizer.cc \
......
...@@ -78,6 +78,7 @@ am__objects_1 = sanitizer_allocator.lo sanitizer_common.lo \ ...@@ -78,6 +78,7 @@ am__objects_1 = sanitizer_allocator.lo sanitizer_common.lo \
sanitizer_procmaps_linux.lo sanitizer_procmaps_mac.lo \ sanitizer_procmaps_linux.lo sanitizer_procmaps_mac.lo \
sanitizer_stackdepot.lo sanitizer_stacktrace.lo \ sanitizer_stackdepot.lo sanitizer_stacktrace.lo \
sanitizer_stacktrace_libcdep.lo \ sanitizer_stacktrace_libcdep.lo \
sanitizer_stacktrace_printer.lo \
sanitizer_stoptheworld_linux_libcdep.lo \ sanitizer_stoptheworld_linux_libcdep.lo \
sanitizer_suppressions.lo sanitizer_symbolizer.lo \ sanitizer_suppressions.lo sanitizer_symbolizer.lo \
sanitizer_symbolizer_libbacktrace.lo \ sanitizer_symbolizer_libbacktrace.lo \
...@@ -252,7 +253,7 @@ gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER) ...@@ -252,7 +253,7 @@ gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER)
AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic \ AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic \
-Wno-long-long -fPIC -fno-builtin -fno-exceptions -fno-rtti \ -Wno-long-long -fPIC -fno-builtin -fno-exceptions -fno-rtti \
-fomit-frame-pointer -funwind-tables -fvisibility=hidden \ -fomit-frame-pointer -funwind-tables -fvisibility=hidden \
-Wno-variadic-macros $(LIBSTDCXX_RAW_CXX_CXXFLAGS) \ -Wno-variadic-macros $(LIBSTDCXX_RAW_CXX_CXXFLAGS) -std=c++11 \
$(am__append_1) $(am__append_1)
ACLOCAL_AMFLAGS = -I m4 ACLOCAL_AMFLAGS = -I m4
noinst_LTLIBRARIES = libsanitizer_common.la noinst_LTLIBRARIES = libsanitizer_common.la
...@@ -283,6 +284,7 @@ sanitizer_common_files = \ ...@@ -283,6 +284,7 @@ sanitizer_common_files = \
sanitizer_stackdepot.cc \ sanitizer_stackdepot.cc \
sanitizer_stacktrace.cc \ sanitizer_stacktrace.cc \
sanitizer_stacktrace_libcdep.cc \ sanitizer_stacktrace_libcdep.cc \
sanitizer_stacktrace_printer.cc \
sanitizer_stoptheworld_linux_libcdep.cc \ sanitizer_stoptheworld_linux_libcdep.cc \
sanitizer_suppressions.cc \ sanitizer_suppressions.cc \
sanitizer_symbolizer.cc \ sanitizer_symbolizer.cc \
...@@ -414,6 +416,7 @@ distclean-compile: ...@@ -414,6 +416,7 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stackdepot.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stackdepot.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stacktrace.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stacktrace.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stacktrace_libcdep.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stacktrace_libcdep.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stacktrace_printer.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stoptheworld_linux_libcdep.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stoptheworld_linux_libcdep.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_suppressions.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_suppressions.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer.Plo@am__quote@
......
...@@ -459,6 +459,11 @@ class SizeClassAllocator64 { ...@@ -459,6 +459,11 @@ class SizeClassAllocator64 {
} }
} }
static uptr AdditionalSize() {
return RoundUpTo(sizeof(RegionInfo) * kNumClassesRounded,
GetPageSizeCached());
}
typedef SizeClassMap SizeClassMapT; typedef SizeClassMap SizeClassMapT;
static const uptr kNumClasses = SizeClassMap::kNumClasses; static const uptr kNumClasses = SizeClassMap::kNumClasses;
static const uptr kNumClassesRounded = SizeClassMap::kNumClassesRounded; static const uptr kNumClassesRounded = SizeClassMap::kNumClassesRounded;
...@@ -488,11 +493,6 @@ class SizeClassAllocator64 { ...@@ -488,11 +493,6 @@ class SizeClassAllocator64 {
}; };
COMPILER_CHECK(sizeof(RegionInfo) >= kCacheLineSize); COMPILER_CHECK(sizeof(RegionInfo) >= kCacheLineSize);
static uptr AdditionalSize() {
return RoundUpTo(sizeof(RegionInfo) * kNumClassesRounded,
GetPageSizeCached());
}
RegionInfo *GetRegionInfo(uptr class_id) { RegionInfo *GetRegionInfo(uptr class_id) {
CHECK_LT(class_id, kNumClasses); CHECK_LT(class_id, kNumClasses);
RegionInfo *regions = reinterpret_cast<RegionInfo*>(kSpaceBeg + kSpaceSize); RegionInfo *regions = reinterpret_cast<RegionInfo*>(kSpaceBeg + kSpaceSize);
...@@ -1013,12 +1013,15 @@ class LargeMmapAllocator { ...@@ -1013,12 +1013,15 @@ class LargeMmapAllocator {
if (map_size < size) return AllocatorReturnNull(); // Overflow. if (map_size < size) return AllocatorReturnNull(); // Overflow.
uptr map_beg = reinterpret_cast<uptr>( uptr map_beg = reinterpret_cast<uptr>(
MmapOrDie(map_size, "LargeMmapAllocator")); MmapOrDie(map_size, "LargeMmapAllocator"));
CHECK(IsAligned(map_beg, page_size_));
MapUnmapCallback().OnMap(map_beg, map_size); MapUnmapCallback().OnMap(map_beg, map_size);
uptr map_end = map_beg + map_size; uptr map_end = map_beg + map_size;
uptr res = map_beg + page_size_; uptr res = map_beg + page_size_;
if (res & (alignment - 1)) // Align. if (res & (alignment - 1)) // Align.
res += alignment - (res & (alignment - 1)); res += alignment - (res & (alignment - 1));
CHECK_EQ(0, res & (alignment - 1)); CHECK(IsAligned(res, alignment));
CHECK(IsAligned(res, page_size_));
CHECK_GE(res + size, map_beg);
CHECK_LE(res + size, map_end); CHECK_LE(res + size, map_end);
Header *h = GetHeader(res); Header *h = GetHeader(res);
h->size = size; h->size = size;
......
...@@ -153,23 +153,12 @@ const char *StripPathPrefix(const char *filepath, ...@@ -153,23 +153,12 @@ const char *StripPathPrefix(const char *filepath,
return pos; return pos;
} }
void PrintSourceLocation(InternalScopedString *buffer, const char *file, const char *StripModuleName(const char *module) {
int line, int column) { if (module == 0)
CHECK(file); return 0;
buffer->append("%s", if (const char *slash_pos = internal_strrchr(module, '/'))
StripPathPrefix(file, common_flags()->strip_path_prefix)); return slash_pos + 1;
if (line > 0) { return module;
buffer->append(":%d", line);
if (column > 0)
buffer->append(":%d", column);
}
}
void PrintModuleAndOffset(InternalScopedString *buffer, const char *module,
uptr offset) {
buffer->append("(%s+0x%zx)",
StripPathPrefix(module, common_flags()->strip_path_prefix),
offset);
} }
void ReportErrorSummary(const char *error_message) { void ReportErrorSummary(const char *error_message) {
...@@ -215,17 +204,6 @@ bool LoadedModule::containsAddress(uptr address) const { ...@@ -215,17 +204,6 @@ bool LoadedModule::containsAddress(uptr address) const {
return false; return false;
} }
char *StripModuleName(const char *module) {
if (module == 0)
return 0;
const char *short_module_name = internal_strrchr(module, '/');
if (short_module_name)
short_module_name += 1;
else
short_module_name = module;
return internal_strdup(short_module_name);
}
static atomic_uintptr_t g_total_mmaped; static atomic_uintptr_t g_total_mmaped;
void IncreaseTotalMmap(uptr size) { void IncreaseTotalMmap(uptr size) {
......
...@@ -170,10 +170,8 @@ bool IsAccessibleMemoryRange(uptr beg, uptr size); ...@@ -170,10 +170,8 @@ bool IsAccessibleMemoryRange(uptr beg, uptr size);
// Error report formatting. // Error report formatting.
const char *StripPathPrefix(const char *filepath, const char *StripPathPrefix(const char *filepath,
const char *strip_file_prefix); const char *strip_file_prefix);
void PrintSourceLocation(InternalScopedString *buffer, const char *file, // Strip the directories from the module name.
int line, int column); const char *StripModuleName(const char *module);
void PrintModuleAndOffset(InternalScopedString *buffer,
const char *module, uptr offset);
// OS // OS
void DisableCoreDumperIfNecessary(); void DisableCoreDumperIfNecessary();
...@@ -207,9 +205,6 @@ void SleepForMillis(int millis); ...@@ -207,9 +205,6 @@ void SleepForMillis(int millis);
u64 NanoTime(); u64 NanoTime();
int Atexit(void (*function)(void)); int Atexit(void (*function)(void));
void SortArray(uptr *array, uptr size); void SortArray(uptr *array, uptr size);
// Strip the directories from the module name, return a new string allocated
// with internal_strdup.
char *StripModuleName(const char *module);
// Exit // Exit
void NORETURN Abort(); void NORETURN Abort();
......
...@@ -39,6 +39,10 @@ ...@@ -39,6 +39,10 @@
#define va_copy(dst, src) ((dst) = (src)) #define va_copy(dst, src) ((dst) = (src))
#endif // _WIN32 #endif // _WIN32
#if SANITIZER_FREEBSD
#define pthread_setname_np pthread_set_name_np
#endif
#ifndef COMMON_INTERCEPTOR_INITIALIZE_RANGE #ifndef COMMON_INTERCEPTOR_INITIALIZE_RANGE
#define COMMON_INTERCEPTOR_INITIALIZE_RANGE(p, size) {} #define COMMON_INTERCEPTOR_INITIALIZE_RANGE(p, size) {}
#endif #endif
......
...@@ -36,6 +36,7 @@ ...@@ -36,6 +36,7 @@
#include "sanitizer_mutex.h" #include "sanitizer_mutex.h"
#include "sanitizer_procmaps.h" #include "sanitizer_procmaps.h"
#include "sanitizer_stacktrace.h" #include "sanitizer_stacktrace.h"
#include "sanitizer_symbolizer.h"
#include "sanitizer_flags.h" #include "sanitizer_flags.h"
atomic_uint32_t dump_once_guard; // Ensure that CovDump runs only once. atomic_uint32_t dump_once_guard; // Ensure that CovDump runs only once.
...@@ -61,6 +62,9 @@ class CoverageData { ...@@ -61,6 +62,9 @@ class CoverageData {
void AfterFork(int child_pid); void AfterFork(int child_pid);
void Extend(uptr npcs); void Extend(uptr npcs);
void Add(uptr pc); void Add(uptr pc);
void IndirCall(uptr caller, uptr callee, uptr callee_cache[],
uptr cache_size);
void DumpCallerCalleePairs();
uptr *data(); uptr *data();
uptr size(); uptr size();
...@@ -83,6 +87,14 @@ class CoverageData { ...@@ -83,6 +87,14 @@ class CoverageData {
uptr pc_array_mapped_size; uptr pc_array_mapped_size;
// Descriptor of the file mapped pc array. // Descriptor of the file mapped pc array.
int pc_fd; int pc_fd;
// Caller-Callee (cc) array, size and current index.
static const uptr kCcArrayMaxSize = FIRST_32_SECOND_64(1 << 18, 1 << 24);
uptr **cc_array;
atomic_uintptr_t cc_array_index;
atomic_uintptr_t cc_array_size;
StaticSpinMutex mu; StaticSpinMutex mu;
void DirectOpen(); void DirectOpen();
...@@ -116,6 +128,11 @@ void CoverageData::Init() { ...@@ -116,6 +128,11 @@ void CoverageData::Init() {
atomic_store(&pc_array_size, kPcArrayMaxSize, memory_order_relaxed); atomic_store(&pc_array_size, kPcArrayMaxSize, memory_order_relaxed);
atomic_store(&pc_array_index, 0, memory_order_relaxed); atomic_store(&pc_array_index, 0, memory_order_relaxed);
} }
cc_array = reinterpret_cast<uptr **>(MmapNoReserveOrDie(
sizeof(uptr *) * kCcArrayMaxSize, "CovInit::cc_array"));
atomic_store(&cc_array_size, kCcArrayMaxSize, memory_order_relaxed);
atomic_store(&cc_array_index, 0, memory_order_relaxed);
} }
void CoverageData::ReInit() { void CoverageData::ReInit() {
...@@ -184,6 +201,38 @@ void CoverageData::Add(uptr pc) { ...@@ -184,6 +201,38 @@ void CoverageData::Add(uptr pc) {
pc_array[idx] = pc; pc_array[idx] = pc;
} }
// Registers a pair caller=>callee.
// When a given caller is seen for the first time, the callee_cache is added
// to the global array cc_array, callee_cache[0] is set to caller and
// callee_cache[1] is set to cache_size.
// Then we are trying to add callee to callee_cache [2,cache_size) if it is
// not there yet.
// If the cache is full we drop the callee (may want to fix this later).
void CoverageData::IndirCall(uptr caller, uptr callee, uptr callee_cache[],
uptr cache_size) {
if (!cc_array) return;
atomic_uintptr_t *atomic_callee_cache =
reinterpret_cast<atomic_uintptr_t *>(callee_cache);
uptr zero = 0;
if (atomic_compare_exchange_strong(&atomic_callee_cache[0], &zero, caller,
memory_order_seq_cst)) {
uptr idx = atomic_fetch_add(&cc_array_index, 1, memory_order_relaxed);
CHECK_LT(idx * sizeof(uptr),
atomic_load(&cc_array_size, memory_order_acquire));
callee_cache[1] = cache_size;
cc_array[idx] = callee_cache;
}
CHECK_EQ(atomic_load(&atomic_callee_cache[0], memory_order_relaxed), caller);
for (uptr i = 2; i < cache_size; i++) {
uptr was = 0;
if (atomic_compare_exchange_strong(&atomic_callee_cache[i], &was, callee,
memory_order_seq_cst))
return;
if (was == callee) // Already have this callee.
return;
}
}
uptr *CoverageData::data() { uptr *CoverageData::data() {
return pc_array; return pc_array;
} }
...@@ -266,6 +315,45 @@ static int CovOpenFile(bool packed, const char* name) { ...@@ -266,6 +315,45 @@ static int CovOpenFile(bool packed, const char* name) {
return fd; return fd;
} }
// This function dumps the caller=>callee pairs into a file as a sequence of
// lines like "module_name offset".
void CoverageData::DumpCallerCalleePairs() {
uptr max_idx = atomic_load(&cc_array_index, memory_order_relaxed);
if (!max_idx) return;
auto sym = Symbolizer::GetOrInit();
if (!sym)
return;
InternalScopedString out(32 << 20);
uptr total = 0;
for (uptr i = 0; i < max_idx; i++) {
uptr *cc_cache = cc_array[i];
CHECK(cc_cache);
uptr caller = cc_cache[0];
uptr n_callees = cc_cache[1];
const char *caller_module_name = "<unknown>";
uptr caller_module_address = 0;
sym->GetModuleNameAndOffsetForPC(caller, &caller_module_name,
&caller_module_address);
for (uptr j = 2; j < n_callees; j++) {
uptr callee = cc_cache[j];
if (!callee) break;
total++;
const char *callee_module_name = "<unknown>";
uptr callee_module_address = 0;
sym->GetModuleNameAndOffsetForPC(callee, &callee_module_name,
&callee_module_address);
out.append("%s 0x%zx\n%s 0x%zx\n", caller_module_name,
caller_module_address, callee_module_name,
callee_module_address);
}
}
int fd = CovOpenFile(false, "caller-callee");
if (fd < 0) return;
internal_write(fd, out.data(), out.length());
internal_close(fd);
VReport(1, " CovDump: %zd caller-callee pairs written\n", total);
}
// Dump the coverage on disk. // Dump the coverage on disk.
static void CovDump() { static void CovDump() {
if (!common_flags()->coverage || common_flags()->coverage_direct) return; if (!common_flags()->coverage || common_flags()->coverage_direct) return;
...@@ -297,7 +385,7 @@ static void CovDump() { ...@@ -297,7 +385,7 @@ static void CovDump() {
CHECK_LE(diff, 0xffffffffU); CHECK_LE(diff, 0xffffffffU);
offsets.push_back(static_cast<u32>(diff)); offsets.push_back(static_cast<u32>(diff));
} }
char *module_name = StripModuleName(module.data()); const char *module_name = StripModuleName(module.data());
if (cov_sandboxed) { if (cov_sandboxed) {
if (cov_fd >= 0) { if (cov_fd >= 0) {
CovWritePacked(internal_getpid(), module_name, offsets.data(), CovWritePacked(internal_getpid(), module_name, offsets.data(),
...@@ -317,11 +405,11 @@ static void CovDump() { ...@@ -317,11 +405,11 @@ static void CovDump() {
vb - old_vb); vb - old_vb);
} }
} }
InternalFree(module_name);
} }
} }
if (cov_fd >= 0) if (cov_fd >= 0)
internal_close(cov_fd); internal_close(cov_fd);
coverage_data.DumpCallerCalleePairs();
#endif // !SANITIZER_WINDOWS #endif // !SANITIZER_WINDOWS
} }
...@@ -357,6 +445,11 @@ extern "C" { ...@@ -357,6 +445,11 @@ extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov() { SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov() {
coverage_data.Add(StackTrace::GetPreviousInstructionPc(GET_CALLER_PC())); coverage_data.Add(StackTrace::GetPreviousInstructionPc(GET_CALLER_PC()));
} }
SANITIZER_INTERFACE_ATTRIBUTE void
__sanitizer_cov_indir_call16(uptr callee, uptr callee_cache16[]) {
coverage_data.IndirCall(StackTrace::GetPreviousInstructionPc(GET_CALLER_PC()),
callee, callee_cache16, 16);
}
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump() { CovDump(); } SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump() { CovDump(); }
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_init() { SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_init() {
coverage_data.Init(); coverage_data.Init();
......
...@@ -78,7 +78,7 @@ void CovUpdateMapping(uptr caller_pc) { ...@@ -78,7 +78,7 @@ void CovUpdateMapping(uptr caller_pc) {
text.append("%d\n", sizeof(uptr) * 8); text.append("%d\n", sizeof(uptr) * 8);
for (int i = 0; i < n_modules; ++i) { for (int i = 0; i < n_modules; ++i) {
char *module_name = StripModuleName(modules[i].full_name()); const char *module_name = StripModuleName(modules[i].full_name());
for (unsigned j = 0; j < modules[i].n_ranges(); ++j) { for (unsigned j = 0; j < modules[i].n_ranges(); ++j) {
if (modules[i].address_range_executable(j)) { if (modules[i].address_range_executable(j)) {
uptr start = modules[i].address_range_start(j); uptr start = modules[i].address_range_start(j);
...@@ -89,7 +89,6 @@ void CovUpdateMapping(uptr caller_pc) { ...@@ -89,7 +89,6 @@ void CovUpdateMapping(uptr caller_pc) {
cached_mapping.SetModuleRange(start, end); cached_mapping.SetModuleRange(start, end);
} }
} }
InternalFree(module_name);
} }
int err; int err;
......
...@@ -37,6 +37,7 @@ void SetCommonFlagsDefaults(CommonFlags *f) { ...@@ -37,6 +37,7 @@ void SetCommonFlagsDefaults(CommonFlags *f) {
f->external_symbolizer_path = 0; f->external_symbolizer_path = 0;
f->allow_addr2line = false; f->allow_addr2line = false;
f->strip_path_prefix = ""; f->strip_path_prefix = "";
f->fast_unwind_on_check = false;
f->fast_unwind_on_fatal = false; f->fast_unwind_on_fatal = false;
f->fast_unwind_on_malloc = true; f->fast_unwind_on_malloc = true;
f->handle_ioctl = false; f->handle_ioctl = false;
...@@ -64,6 +65,8 @@ void SetCommonFlagsDefaults(CommonFlags *f) { ...@@ -64,6 +65,8 @@ void SetCommonFlagsDefaults(CommonFlags *f) {
f->suppressions = ""; f->suppressions = "";
f->print_suppressions = true; f->print_suppressions = true;
f->disable_coredump = (SANITIZER_WORDSIZE == 64); f->disable_coredump = (SANITIZER_WORDSIZE == 64);
f->symbolize_inline_frames = true;
f->stack_trace_format = "DEFAULT";
} }
void ParseCommonFlagsFromString(CommonFlags *f, const char *str) { void ParseCommonFlagsFromString(CommonFlags *f, const char *str) {
...@@ -79,6 +82,9 @@ void ParseCommonFlagsFromString(CommonFlags *f, const char *str) { ...@@ -79,6 +82,9 @@ void ParseCommonFlagsFromString(CommonFlags *f, const char *str) {
"unavailable."); "unavailable.");
ParseFlag(str, &f->strip_path_prefix, "strip_path_prefix", ParseFlag(str, &f->strip_path_prefix, "strip_path_prefix",
"Strips this prefix from file paths in error reports."); "Strips this prefix from file paths in error reports.");
ParseFlag(str, &f->fast_unwind_on_check, "fast_unwind_on_check",
"If available, use the fast frame-pointer-based unwinder on "
"internal CHECK failures.");
ParseFlag(str, &f->fast_unwind_on_fatal, "fast_unwind_on_fatal", ParseFlag(str, &f->fast_unwind_on_fatal, "fast_unwind_on_fatal",
"If available, use the fast frame-pointer-based unwinder on fatal " "If available, use the fast frame-pointer-based unwinder on fatal "
"errors."); "errors.");
...@@ -152,6 +158,12 @@ void ParseCommonFlagsFromString(CommonFlags *f, const char *str) { ...@@ -152,6 +158,12 @@ void ParseCommonFlagsFromString(CommonFlags *f, const char *str) {
"Disable core dumping. By default, disable_core=1 on 64-bit to avoid " "Disable core dumping. By default, disable_core=1 on 64-bit to avoid "
"dumping a 16T+ core file. Ignored on OSes that don't dump core by" "dumping a 16T+ core file. Ignored on OSes that don't dump core by"
"default and for sanitizers that don't reserve lots of virtual memory."); "default and for sanitizers that don't reserve lots of virtual memory.");
ParseFlag(str, &f->symbolize_inline_frames, "symbolize_inline_frames",
"Print inlined frames in stacktraces. Defaults to true.");
ParseFlag(str, &f->stack_trace_format, "stack_trace_format",
"Format string used to render stack frames. "
"See sanitizer_stacktrace_printer.h for the format description. "
"Use DEFAULT to get default format.");
// Do a sanity check for certain flags. // Do a sanity check for certain flags.
if (f->malloc_context_size < 1) if (f->malloc_context_size < 1)
......
...@@ -30,6 +30,7 @@ struct CommonFlags { ...@@ -30,6 +30,7 @@ struct CommonFlags {
const char *external_symbolizer_path; const char *external_symbolizer_path;
bool allow_addr2line; bool allow_addr2line;
const char *strip_path_prefix; const char *strip_path_prefix;
bool fast_unwind_on_check;
bool fast_unwind_on_fatal; bool fast_unwind_on_fatal;
bool fast_unwind_on_malloc; bool fast_unwind_on_malloc;
bool handle_ioctl; bool handle_ioctl;
...@@ -58,6 +59,8 @@ struct CommonFlags { ...@@ -58,6 +59,8 @@ struct CommonFlags {
const char *suppressions; const char *suppressions;
bool print_suppressions; bool print_suppressions;
bool disable_coredump; bool disable_coredump;
bool symbolize_inline_frames;
const char *stack_trace_format;
}; };
inline CommonFlags *common_flags() { inline CommonFlags *common_flags() {
......
...@@ -158,7 +158,7 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top, ...@@ -158,7 +158,7 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
// pthread_get_stacksize_np() returns an incorrect stack size for the main // pthread_get_stacksize_np() returns an incorrect stack size for the main
// thread on Mavericks. See // thread on Mavericks. See
// https://code.google.com/p/address-sanitizer/issues/detail?id=261 // https://code.google.com/p/address-sanitizer/issues/detail?id=261
if ((GetMacosVersion() == MACOS_VERSION_MAVERICKS) && at_initialization && if ((GetMacosVersion() >= MACOS_VERSION_MAVERICKS) && at_initialization &&
stacksize == (1 << 19)) { stacksize == (1 << 19)) {
struct rlimit rl; struct rlimit rl;
CHECK_EQ(getrlimit(RLIMIT_STACK, &rl), 0); CHECK_EQ(getrlimit(RLIMIT_STACK, &rl), 0);
...@@ -295,6 +295,7 @@ MacosVersion GetMacosVersionInternal() { ...@@ -295,6 +295,7 @@ MacosVersion GetMacosVersionInternal() {
case '1': return MACOS_VERSION_LION; case '1': return MACOS_VERSION_LION;
case '2': return MACOS_VERSION_MOUNTAIN_LION; case '2': return MACOS_VERSION_MOUNTAIN_LION;
case '3': return MACOS_VERSION_MAVERICKS; case '3': return MACOS_VERSION_MAVERICKS;
case '4': return MACOS_VERSION_YOSEMITE;
default: return MACOS_VERSION_UNKNOWN; default: return MACOS_VERSION_UNKNOWN;
} }
} }
......
...@@ -23,7 +23,8 @@ enum MacosVersion { ...@@ -23,7 +23,8 @@ enum MacosVersion {
MACOS_VERSION_SNOW_LEOPARD, MACOS_VERSION_SNOW_LEOPARD,
MACOS_VERSION_LION, MACOS_VERSION_LION,
MACOS_VERSION_MOUNTAIN_LION, MACOS_VERSION_MOUNTAIN_LION,
MACOS_VERSION_MAVERICKS MACOS_VERSION_MAVERICKS,
MACOS_VERSION_YOSEMITE,
}; };
MacosVersion GetMacosVersion(); MacosVersion GetMacosVersion();
......
...@@ -47,7 +47,7 @@ ...@@ -47,7 +47,7 @@
# define SANITIZER_WINDOWS 0 # define SANITIZER_WINDOWS 0
#endif #endif
#if defined(__ANDROID__) || defined(ANDROID) #if defined(__ANDROID__)
# define SANITIZER_ANDROID 1 # define SANITIZER_ANDROID 1
#else #else
# define SANITIZER_ANDROID 0 # define SANITIZER_ANDROID 0
...@@ -79,7 +79,7 @@ ...@@ -79,7 +79,7 @@
// For such platforms build this code with -DSANITIZER_CAN_USE_ALLOCATOR64=0 or // For such platforms build this code with -DSANITIZER_CAN_USE_ALLOCATOR64=0 or
// change the definition of SANITIZER_CAN_USE_ALLOCATOR64 here. // change the definition of SANITIZER_CAN_USE_ALLOCATOR64 here.
#ifndef SANITIZER_CAN_USE_ALLOCATOR64 #ifndef SANITIZER_CAN_USE_ALLOCATOR64
# if defined(__aarch64__) # if defined(__aarch64__) || defined(__mips64)
# define SANITIZER_CAN_USE_ALLOCATOR64 0 # define SANITIZER_CAN_USE_ALLOCATOR64 0
# else # else
# define SANITIZER_CAN_USE_ALLOCATOR64 (SANITIZER_WORDSIZE == 64) # define SANITIZER_CAN_USE_ALLOCATOR64 (SANITIZER_WORDSIZE == 64)
...@@ -107,4 +107,10 @@ ...@@ -107,4 +107,10 @@
# endif # endif
#endif #endif
#ifdef __mips__
# define SANITIZER_POINTER_FORMAT_LENGTH FIRST_32_SECOND_64(8, 10)
#else
# define SANITIZER_POINTER_FORMAT_LENGTH FIRST_32_SECOND_64(8, 12)
#endif
#endif // SANITIZER_PLATFORM_H #endif // SANITIZER_PLATFORM_H
...@@ -204,7 +204,8 @@ ...@@ -204,7 +204,8 @@
#define SANITIZER_INTERCEPT__EXIT SI_LINUX || SI_FREEBSD #define SANITIZER_INTERCEPT__EXIT SI_LINUX || SI_FREEBSD
#define SANITIZER_INTERCEPT_PHTREAD_MUTEX SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_PHTREAD_MUTEX SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_PTHREAD_SETNAME_NP SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_PTHREAD_SETNAME_NP \
SI_FREEBSD || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_TLS_GET_ADDR SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_TLS_GET_ADDR SI_LINUX_NOT_ANDROID
...@@ -226,7 +227,8 @@ ...@@ -226,7 +227,8 @@
#define SANITIZER_INTERCEPT_OBSTACK SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_OBSTACK SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_FFLUSH SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_FFLUSH SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_FCLOSE SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_FCLOSE SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_DLOPEN_DLCLOSE SI_LINUX_NOT_ANDROID || SI_MAC #define SANITIZER_INTERCEPT_DLOPEN_DLCLOSE \
SI_FREEBSD || SI_LINUX_NOT_ANDROID || SI_MAC
#define SANITIZER_INTERCEPT_GETPASS SI_LINUX_NOT_ANDROID || SI_MAC #define SANITIZER_INTERCEPT_GETPASS SI_LINUX_NOT_ANDROID || SI_MAC
#define SANITIZER_INTERCEPT_TIMERFD SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_TIMERFD SI_LINUX_NOT_ANDROID
......
...@@ -36,7 +36,6 @@ ...@@ -36,7 +36,6 @@
#define uid_t __kernel_uid_t #define uid_t __kernel_uid_t
#define gid_t __kernel_gid_t #define gid_t __kernel_gid_t
#define off_t __kernel_off_t #define off_t __kernel_off_t
#define time_t __kernel_time_t
// This header seems to contain the definitions of _kernel_ stat* structs. // This header seems to contain the definitions of _kernel_ stat* structs.
#include <asm/stat.h> #include <asm/stat.h>
#undef ino_t #undef ino_t
...@@ -61,7 +60,7 @@ namespace __sanitizer { ...@@ -61,7 +60,7 @@ namespace __sanitizer {
} // namespace __sanitizer } // namespace __sanitizer
#if !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__aarch64__)\ #if !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__aarch64__)\
&& !defined(__mips__) && !defined(__sparc__) && !defined(__mips__)
COMPILER_CHECK(struct___old_kernel_stat_sz == sizeof(struct __old_kernel_stat)); COMPILER_CHECK(struct___old_kernel_stat_sz == sizeof(struct __old_kernel_stat));
#endif #endif
......
...@@ -13,7 +13,19 @@ ...@@ -13,7 +13,19 @@
#include "sanitizer_platform.h" #include "sanitizer_platform.h"
#if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_MAC #if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_MAC
// Tests in this file assume that off_t-dependent data structures match the
// libc ABI. For example, struct dirent here is what readdir() function (as
// exported from libc) returns, and not the user-facing "dirent", which
// depends on _FILE_OFFSET_BITS setting.
// To get this "true" dirent definition, we undefine _FILE_OFFSET_BITS below.
#ifdef _FILE_OFFSET_BITS
#undef _FILE_OFFSET_BITS
#endif
#if SANITIZER_FREEBSD
#define _WANT_RTENTRY
#include <sys/param.h>
#include <sys/socketvar.h>
#endif
#include <arpa/inet.h> #include <arpa/inet.h>
#include <dirent.h> #include <dirent.h>
#include <errno.h> #include <errno.h>
...@@ -551,7 +563,9 @@ namespace __sanitizer { ...@@ -551,7 +563,9 @@ namespace __sanitizer {
unsigned IOCTL_PPPIOCSMAXCID = PPPIOCSMAXCID; unsigned IOCTL_PPPIOCSMAXCID = PPPIOCSMAXCID;
unsigned IOCTL_PPPIOCSMRU = PPPIOCSMRU; unsigned IOCTL_PPPIOCSMRU = PPPIOCSMRU;
unsigned IOCTL_PPPIOCSXASYNCMAP = PPPIOCSXASYNCMAP; unsigned IOCTL_PPPIOCSXASYNCMAP = PPPIOCSXASYNCMAP;
unsigned IOCTL_SIOCADDRT = SIOCADDRT;
unsigned IOCTL_SIOCDARP = SIOCDARP; unsigned IOCTL_SIOCDARP = SIOCDARP;
unsigned IOCTL_SIOCDELRT = SIOCDELRT;
unsigned IOCTL_SIOCDRARP = SIOCDRARP; unsigned IOCTL_SIOCDRARP = SIOCDRARP;
unsigned IOCTL_SIOCGARP = SIOCGARP; unsigned IOCTL_SIOCGARP = SIOCGARP;
unsigned IOCTL_SIOCGIFENCAP = SIOCGIFENCAP; unsigned IOCTL_SIOCGIFENCAP = SIOCGIFENCAP;
...@@ -637,8 +651,6 @@ namespace __sanitizer { ...@@ -637,8 +651,6 @@ namespace __sanitizer {
#if SANITIZER_LINUX || SANITIZER_FREEBSD #if SANITIZER_LINUX || SANITIZER_FREEBSD
unsigned IOCTL_MTIOCGET = MTIOCGET; unsigned IOCTL_MTIOCGET = MTIOCGET;
unsigned IOCTL_MTIOCTOP = MTIOCTOP; unsigned IOCTL_MTIOCTOP = MTIOCTOP;
unsigned IOCTL_SIOCADDRT = SIOCADDRT;
unsigned IOCTL_SIOCDELRT = SIOCDELRT;
unsigned IOCTL_SNDCTL_DSP_GETBLKSIZE = SNDCTL_DSP_GETBLKSIZE; unsigned IOCTL_SNDCTL_DSP_GETBLKSIZE = SNDCTL_DSP_GETBLKSIZE;
unsigned IOCTL_SNDCTL_DSP_GETFMTS = SNDCTL_DSP_GETFMTS; unsigned IOCTL_SNDCTL_DSP_GETFMTS = SNDCTL_DSP_GETFMTS;
unsigned IOCTL_SNDCTL_DSP_NONBLOCK = SNDCTL_DSP_NONBLOCK; unsigned IOCTL_SNDCTL_DSP_NONBLOCK = SNDCTL_DSP_NONBLOCK;
......
...@@ -72,14 +72,6 @@ namespace __sanitizer { ...@@ -72,14 +72,6 @@ namespace __sanitizer {
const unsigned struct_kernel_stat_sz = 144; const unsigned struct_kernel_stat_sz = 144;
#endif #endif
const unsigned struct_kernel_stat64_sz = 104; const unsigned struct_kernel_stat64_sz = 104;
#elif defined(__sparc__) && defined(__arch64__)
const unsigned struct___old_kernel_stat_sz = 0;
const unsigned struct_kernel_stat_sz = 104;
const unsigned struct_kernel_stat64_sz = 144;
#elif defined(__sparc__) && !defined(__arch64__)
const unsigned struct___old_kernel_stat_sz = 0;
const unsigned struct_kernel_stat_sz = 64;
const unsigned struct_kernel_stat64_sz = 104;
#endif #endif
struct __sanitizer_perf_event_attr { struct __sanitizer_perf_event_attr {
unsigned type; unsigned type;
...@@ -102,7 +94,7 @@ namespace __sanitizer { ...@@ -102,7 +94,7 @@ namespace __sanitizer {
#if defined(__powerpc64__) #if defined(__powerpc64__)
const unsigned struct___old_kernel_stat_sz = 0; const unsigned struct___old_kernel_stat_sz = 0;
#elif !defined(__sparc__) #else
const unsigned struct___old_kernel_stat_sz = 32; const unsigned struct___old_kernel_stat_sz = 32;
#endif #endif
...@@ -181,18 +173,6 @@ namespace __sanitizer { ...@@ -181,18 +173,6 @@ namespace __sanitizer {
unsigned short __pad1; unsigned short __pad1;
unsigned long __unused1; unsigned long __unused1;
unsigned long __unused2; unsigned long __unused2;
#elif defined(__sparc__)
# if defined(__arch64__)
unsigned mode;
unsigned short __pad1;
# else
unsigned short __pad1;
unsigned short mode;
unsigned short __pad2;
# endif
unsigned short __seq;
unsigned long long __unused1;
unsigned long long __unused2;
#else #else
unsigned short mode; unsigned short mode;
unsigned short __pad1; unsigned short __pad1;
...@@ -210,26 +190,6 @@ namespace __sanitizer { ...@@ -210,26 +190,6 @@ namespace __sanitizer {
struct __sanitizer_shmid_ds { struct __sanitizer_shmid_ds {
__sanitizer_ipc_perm shm_perm; __sanitizer_ipc_perm shm_perm;
#if defined(__sparc__)
# if !defined(__arch64__)
u32 __pad1;
# endif
long shm_atime;
# if !defined(__arch64__)
u32 __pad2;
# endif
long shm_dtime;
# if !defined(__arch64__)
u32 __pad3;
# endif
long shm_ctime;
uptr shm_segsz;
int shm_cpid;
int shm_lpid;
unsigned long shm_nattch;
unsigned long __glibc_reserved1;
unsigned long __glibc_reserved2;
#else
#ifndef __powerpc__ #ifndef __powerpc__
uptr shm_segsz; uptr shm_segsz;
#elif !defined(__powerpc64__) #elif !defined(__powerpc64__)
...@@ -267,7 +227,6 @@ namespace __sanitizer { ...@@ -267,7 +227,6 @@ namespace __sanitizer {
uptr __unused4; uptr __unused4;
uptr __unused5; uptr __unused5;
#endif #endif
#endif
}; };
#elif SANITIZER_FREEBSD #elif SANITIZER_FREEBSD
struct __sanitizer_ipc_perm { struct __sanitizer_ipc_perm {
...@@ -511,7 +470,7 @@ namespace __sanitizer { ...@@ -511,7 +470,7 @@ namespace __sanitizer {
typedef long __sanitizer___kernel_off_t; typedef long __sanitizer___kernel_off_t;
#endif #endif
#if defined(__powerpc__) || defined(__aarch64__) || defined(__mips__) #if defined(__powerpc__) || defined(__mips__)
typedef unsigned int __sanitizer___kernel_old_uid_t; typedef unsigned int __sanitizer___kernel_old_uid_t;
typedef unsigned int __sanitizer___kernel_old_gid_t; typedef unsigned int __sanitizer___kernel_old_gid_t;
#else #else
...@@ -564,13 +523,9 @@ namespace __sanitizer { ...@@ -564,13 +523,9 @@ namespace __sanitizer {
#else #else
__sanitizer_sigset_t sa_mask; __sanitizer_sigset_t sa_mask;
#ifndef __mips__ #ifndef __mips__
#if defined(__sparc__)
unsigned long sa_flags;
#else
int sa_flags; int sa_flags;
#endif #endif
#endif #endif
#endif
#if SANITIZER_LINUX #if SANITIZER_LINUX
void (*sa_restorer)(); void (*sa_restorer)();
#endif #endif
...@@ -790,7 +745,7 @@ struct __sanitizer_obstack { ...@@ -790,7 +745,7 @@ struct __sanitizer_obstack {
#define IOC_NRBITS 8 #define IOC_NRBITS 8
#define IOC_TYPEBITS 8 #define IOC_TYPEBITS 8
#if defined(__powerpc__) || defined(__powerpc64__) || defined(__mips__) || defined(__sparc__) #if defined(__powerpc__) || defined(__powerpc64__) || defined(__mips__)
#define IOC_SIZEBITS 13 #define IOC_SIZEBITS 13
#define IOC_DIRBITS 3 #define IOC_DIRBITS 3
#define IOC_NONE 1U #define IOC_NONE 1U
...@@ -820,17 +775,7 @@ struct __sanitizer_obstack { ...@@ -820,17 +775,7 @@ struct __sanitizer_obstack {
#define IOC_DIR(nr) (((nr) >> IOC_DIRSHIFT) & IOC_DIRMASK) #define IOC_DIR(nr) (((nr) >> IOC_DIRSHIFT) & IOC_DIRMASK)
#define IOC_TYPE(nr) (((nr) >> IOC_TYPESHIFT) & IOC_TYPEMASK) #define IOC_TYPE(nr) (((nr) >> IOC_TYPESHIFT) & IOC_TYPEMASK)
#define IOC_NR(nr) (((nr) >> IOC_NRSHIFT) & IOC_NRMASK) #define IOC_NR(nr) (((nr) >> IOC_NRSHIFT) & IOC_NRMASK)
#if defined(__sparc__)
// In sparc the 14 bits SIZE field overlaps with the
// least significant bit of DIR, so either IOC_READ or
// IOC_WRITE shall be 1 in order to get a non-zero SIZE.
# define IOC_SIZE(nr) \
((((((nr) >> 29) & 0x7) & (4U|2U)) == 0)? \
0 : (((nr) >> 16) & 0x3fff))
#else
#define IOC_SIZE(nr) (((nr) >> IOC_SIZESHIFT) & IOC_SIZEMASK) #define IOC_SIZE(nr) (((nr) >> IOC_SIZESHIFT) & IOC_SIZEMASK)
#endif
extern unsigned struct_arpreq_sz; extern unsigned struct_arpreq_sz;
extern unsigned struct_ifreq_sz; extern unsigned struct_ifreq_sz;
......
...@@ -82,9 +82,12 @@ uptr GetMaxVirtualAddress() { ...@@ -82,9 +82,12 @@ uptr GetMaxVirtualAddress() {
// one of 0x00000fffffffffffUL and 0x00003fffffffffffUL. // one of 0x00000fffffffffffUL and 0x00003fffffffffffUL.
// Note that with 'ulimit -s unlimited' the stack is moved away from the top // Note that with 'ulimit -s unlimited' the stack is moved away from the top
// of the address space, so simply checking the stack address is not enough. // of the address space, so simply checking the stack address is not enough.
return (1ULL << 44) - 1; // 0x00000fffffffffffUL // This should (does) work for both PowerPC64 Endian modes.
return (1ULL << (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1)) - 1;
# elif defined(__aarch64__) # elif defined(__aarch64__)
return (1ULL << 39) - 1; return (1ULL << 39) - 1;
# elif defined(__mips64)
return (1ULL << 40) - 1;
# else # else
return (1ULL << 47) - 1; // 0x00007fffffffffffUL; return (1ULL << 47) - 1; // 0x00007fffffffffffUL;
# endif # endif
......
...@@ -111,7 +111,7 @@ static int AppendPointer(char **buff, const char *buff_end, u64 ptr_value) { ...@@ -111,7 +111,7 @@ static int AppendPointer(char **buff, const char *buff_end, u64 ptr_value) {
int result = 0; int result = 0;
result += AppendString(buff, buff_end, -1, "0x"); result += AppendString(buff, buff_end, -1, "0x");
result += AppendUnsigned(buff, buff_end, ptr_value, 16, result += AppendUnsigned(buff, buff_end, ptr_value, 16,
(SANITIZER_WORDSIZE == 64) ? 12 : 8, true); SANITIZER_POINTER_FORMAT_LENGTH, true);
return result; return result;
} }
......
...@@ -16,31 +16,6 @@ ...@@ -16,31 +16,6 @@
namespace __sanitizer { namespace __sanitizer {
struct StackDepotDesc {
const uptr *stack;
uptr size;
u32 hash() const {
// murmur2
const u32 m = 0x5bd1e995;
const u32 seed = 0x9747b28c;
const u32 r = 24;
u32 h = seed ^ (size * sizeof(uptr));
for (uptr i = 0; i < size; i++) {
u32 k = stack[i];
k *= m;
k ^= k >> r;
k *= m;
h *= m;
h ^= k;
}
h ^= h >> 13;
h *= m;
h ^= h >> 15;
return h;
}
bool is_valid() { return size > 0 && stack; }
};
struct StackDepotNode { struct StackDepotNode {
StackDepotNode *link; StackDepotNode *link;
u32 id; u32 id;
...@@ -56,28 +31,49 @@ struct StackDepotNode { ...@@ -56,28 +31,49 @@ struct StackDepotNode {
static const u32 kUseCountMask = (1 << kUseCountBits) - 1; static const u32 kUseCountMask = (1 << kUseCountBits) - 1;
static const u32 kHashMask = ~kUseCountMask; static const u32 kHashMask = ~kUseCountMask;
typedef StackDepotDesc args_type; typedef StackTrace args_type;
bool eq(u32 hash, const args_type &args) const { bool eq(u32 hash, const args_type &args) const {
u32 hash_bits = u32 hash_bits =
atomic_load(&hash_and_use_count, memory_order_relaxed) & kHashMask; atomic_load(&hash_and_use_count, memory_order_relaxed) & kHashMask;
if ((hash & kHashMask) != hash_bits || args.size != size) return false; if ((hash & kHashMask) != hash_bits || args.size != size) return false;
uptr i = 0; uptr i = 0;
for (; i < size; i++) { for (; i < size; i++) {
if (stack[i] != args.stack[i]) return false; if (stack[i] != args.trace[i]) return false;
} }
return true; return true;
} }
static uptr storage_size(const args_type &args) { static uptr storage_size(const args_type &args) {
return sizeof(StackDepotNode) + (args.size - 1) * sizeof(uptr); return sizeof(StackDepotNode) + (args.size - 1) * sizeof(uptr);
} }
static u32 hash(const args_type &args) {
// murmur2
const u32 m = 0x5bd1e995;
const u32 seed = 0x9747b28c;
const u32 r = 24;
u32 h = seed ^ (args.size * sizeof(uptr));
for (uptr i = 0; i < args.size; i++) {
u32 k = args.trace[i];
k *= m;
k ^= k >> r;
k *= m;
h *= m;
h ^= k;
}
h ^= h >> 13;
h *= m;
h ^= h >> 15;
return h;
}
static bool is_valid(const args_type &args) {
return args.size > 0 && args.trace;
}
void store(const args_type &args, u32 hash) { void store(const args_type &args, u32 hash) {
atomic_store(&hash_and_use_count, hash & kHashMask, memory_order_relaxed); atomic_store(&hash_and_use_count, hash & kHashMask, memory_order_relaxed);
size = args.size; size = args.size;
internal_memcpy(stack, args.stack, size * sizeof(uptr)); internal_memcpy(stack, args.trace, size * sizeof(uptr));
} }
args_type load() const { args_type load() const {
args_type ret = {&stack[0], size}; return args_type(&stack[0], size);
return ret;
} }
StackDepotHandle get_handle() { return StackDepotHandle(this); } StackDepotHandle get_handle() { return StackDepotHandle(this); }
...@@ -97,8 +93,6 @@ void StackDepotHandle::inc_use_count_unsafe() { ...@@ -97,8 +93,6 @@ void StackDepotHandle::inc_use_count_unsafe() {
StackDepotNode::kUseCountMask; StackDepotNode::kUseCountMask;
CHECK_LT(prev + 1, StackDepotNode::kMaxUseCount); CHECK_LT(prev + 1, StackDepotNode::kMaxUseCount);
} }
uptr StackDepotHandle::size() { return node_->size; }
uptr *StackDepotHandle::stack() { return &node_->stack[0]; }
// FIXME(dvyukov): this single reserved bit is used in TSan. // FIXME(dvyukov): this single reserved bit is used in TSan.
typedef StackDepotBase<StackDepotNode, 1, StackDepotNode::kTabSizeLog> typedef StackDepotBase<StackDepotNode, 1, StackDepotNode::kTabSizeLog>
...@@ -109,21 +103,17 @@ StackDepotStats *StackDepotGetStats() { ...@@ -109,21 +103,17 @@ StackDepotStats *StackDepotGetStats() {
return theDepot.GetStats(); return theDepot.GetStats();
} }
u32 StackDepotPut(const uptr *stack, uptr size) { u32 StackDepotPut(StackTrace stack) {
StackDepotDesc desc = {stack, size}; StackDepotHandle h = theDepot.Put(stack);
StackDepotHandle h = theDepot.Put(desc);
return h.valid() ? h.id() : 0; return h.valid() ? h.id() : 0;
} }
StackDepotHandle StackDepotPut_WithHandle(const uptr *stack, uptr size) { StackDepotHandle StackDepotPut_WithHandle(StackTrace stack) {
StackDepotDesc desc = {stack, size}; return theDepot.Put(stack);
return theDepot.Put(desc);
} }
const uptr *StackDepotGet(u32 id, uptr *size) { StackTrace StackDepotGet(u32 id) {
StackDepotDesc desc = theDepot.Get(id); return theDepot.Get(id);
*size = desc.size;
return desc.stack;
} }
void StackDepotLockAll() { void StackDepotLockAll() {
...@@ -154,18 +144,15 @@ StackDepotReverseMap::StackDepotReverseMap() ...@@ -154,18 +144,15 @@ StackDepotReverseMap::StackDepotReverseMap()
InternalSort(&map_, map_.size(), IdDescPair::IdComparator); InternalSort(&map_, map_.size(), IdDescPair::IdComparator);
} }
const uptr *StackDepotReverseMap::Get(u32 id, uptr *size) { StackTrace StackDepotReverseMap::Get(u32 id) {
if (!map_.size()) return 0; if (!map_.size())
return StackTrace();
IdDescPair pair = {id, 0}; IdDescPair pair = {id, 0};
uptr idx = InternalBinarySearch(map_, 0, map_.size(), pair, uptr idx = InternalBinarySearch(map_, 0, map_.size(), pair,
IdDescPair::IdComparator); IdDescPair::IdComparator);
if (idx > map_.size()) { if (idx > map_.size())
*size = 0; return StackTrace();
return 0; return map_[idx].desc->load();
}
StackDepotNode *desc = map_[idx].desc;
*size = desc->size;
return desc->stack;
} }
} // namespace __sanitizer } // namespace __sanitizer
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include "sanitizer_common.h" #include "sanitizer_common.h"
#include "sanitizer_internal_defs.h" #include "sanitizer_internal_defs.h"
#include "sanitizer_stacktrace.h"
namespace __sanitizer { namespace __sanitizer {
...@@ -26,17 +27,15 @@ struct StackDepotHandle { ...@@ -26,17 +27,15 @@ struct StackDepotHandle {
u32 id(); u32 id();
int use_count(); int use_count();
void inc_use_count_unsafe(); void inc_use_count_unsafe();
uptr size();
uptr *stack();
}; };
const int kStackDepotMaxUseCount = 1U << 20; const int kStackDepotMaxUseCount = 1U << 20;
StackDepotStats *StackDepotGetStats(); StackDepotStats *StackDepotGetStats();
u32 StackDepotPut(const uptr *stack, uptr size); u32 StackDepotPut(StackTrace stack);
StackDepotHandle StackDepotPut_WithHandle(const uptr *stack, uptr size); StackDepotHandle StackDepotPut_WithHandle(StackTrace stack);
// Retrieves a stored stack trace by the id. // Retrieves a stored stack trace by the id.
const uptr *StackDepotGet(u32 id, uptr *size); StackTrace StackDepotGet(u32 id);
void StackDepotLockAll(); void StackDepotLockAll();
void StackDepotUnlockAll(); void StackDepotUnlockAll();
...@@ -48,7 +47,7 @@ void StackDepotUnlockAll(); ...@@ -48,7 +47,7 @@ void StackDepotUnlockAll();
class StackDepotReverseMap { class StackDepotReverseMap {
public: public:
StackDepotReverseMap(); StackDepotReverseMap();
const uptr *Get(u32 id, uptr *size); StackTrace Get(u32 id);
private: private:
struct IdDescPair { struct IdDescPair {
......
...@@ -95,8 +95,8 @@ typename StackDepotBase<Node, kReservedBits, kTabSizeLog>::handle_type ...@@ -95,8 +95,8 @@ typename StackDepotBase<Node, kReservedBits, kTabSizeLog>::handle_type
StackDepotBase<Node, kReservedBits, kTabSizeLog>::Put(args_type args, StackDepotBase<Node, kReservedBits, kTabSizeLog>::Put(args_type args,
bool *inserted) { bool *inserted) {
if (inserted) *inserted = false; if (inserted) *inserted = false;
if (!args.is_valid()) return handle_type(); if (!Node::is_valid(args)) return handle_type();
uptr h = args.hash(); uptr h = Node::hash(args);
atomic_uintptr_t *p = &tab[h % kTabSize]; atomic_uintptr_t *p = &tab[h % kTabSize];
uptr v = atomic_load(p, memory_order_consume); uptr v = atomic_load(p, memory_order_consume);
Node *s = (Node *)(v & ~1); Node *s = (Node *)(v & ~1);
......
...@@ -23,7 +23,7 @@ uptr StackTrace::GetPreviousInstructionPc(uptr pc) { ...@@ -23,7 +23,7 @@ uptr StackTrace::GetPreviousInstructionPc(uptr pc) {
#if defined(__powerpc__) || defined(__powerpc64__) #if defined(__powerpc__) || defined(__powerpc64__)
// PCs are always 4 byte aligned. // PCs are always 4 byte aligned.
return pc - 4; return pc - 4;
#elif defined(__sparc__) #elif defined(__sparc__) || defined(__mips__)
return pc - 8; return pc - 8;
#else #else
return pc - 1; return pc - 1;
...@@ -34,6 +34,15 @@ uptr StackTrace::GetCurrentPc() { ...@@ -34,6 +34,15 @@ uptr StackTrace::GetCurrentPc() {
return GET_CALLER_PC(); return GET_CALLER_PC();
} }
void BufferedStackTrace::Init(const uptr *pcs, uptr cnt, uptr extra_top_pc) {
size = cnt + !!extra_top_pc;
CHECK_LE(size, kStackTraceMax);
internal_memcpy(trace_buffer, pcs, cnt * sizeof(trace_buffer[0]));
if (extra_top_pc)
trace_buffer[cnt] = extra_top_pc;
top_frame_bp = 0;
}
// Check if given pointer points into allocated stack area. // Check if given pointer points into allocated stack area.
static inline bool IsValidFrame(uptr frame, uptr stack_top, uptr stack_bottom) { static inline bool IsValidFrame(uptr frame, uptr stack_top, uptr stack_bottom) {
return frame > stack_bottom && frame < stack_top - 2 * sizeof (uhwptr); return frame > stack_bottom && frame < stack_top - 2 * sizeof (uhwptr);
...@@ -49,32 +58,40 @@ static inline uhwptr *GetCanonicFrame(uptr bp, ...@@ -49,32 +58,40 @@ static inline uhwptr *GetCanonicFrame(uptr bp,
if (!IsValidFrame(bp, stack_top, stack_bottom)) return 0; if (!IsValidFrame(bp, stack_top, stack_bottom)) return 0;
uhwptr *bp_prev = (uhwptr *)bp; uhwptr *bp_prev = (uhwptr *)bp;
if (IsValidFrame((uptr)bp_prev[0], stack_top, stack_bottom)) return bp_prev; if (IsValidFrame((uptr)bp_prev[0], stack_top, stack_bottom)) return bp_prev;
return bp_prev - 1; // The next frame pointer does not look right. This could be a GCC frame, step
// back by 1 word and try again.
if (IsValidFrame((uptr)bp_prev[-1], stack_top, stack_bottom))
return bp_prev - 1;
// Nope, this does not look right either. This means the frame after next does
// not have a valid frame pointer, but we can still extract the caller PC.
// Unfortunately, there is no way to decide between GCC and LLVM frame
// layouts. Assume LLVM.
return bp_prev;
#else #else
return (uhwptr*)bp; return (uhwptr*)bp;
#endif #endif
} }
void StackTrace::FastUnwindStack(uptr pc, uptr bp, void BufferedStackTrace::FastUnwindStack(uptr pc, uptr bp, uptr stack_top,
uptr stack_top, uptr stack_bottom, uptr stack_bottom, uptr max_depth) {
uptr max_depth) {
CHECK_GE(max_depth, 2); CHECK_GE(max_depth, 2);
trace[0] = pc; trace_buffer[0] = pc;
size = 1; size = 1;
if (stack_top < 4096) return; // Sanity check for stack top. if (stack_top < 4096) return; // Sanity check for stack top.
uhwptr *frame = GetCanonicFrame(bp, stack_top, stack_bottom); uhwptr *frame = GetCanonicFrame(bp, stack_top, stack_bottom);
uhwptr *prev_frame = 0; // Lowest possible address that makes sense as the next frame pointer.
// Goes up as we walk the stack.
uptr bottom = stack_bottom;
// Avoid infinite loop when frame == frame[0] by using frame > prev_frame. // Avoid infinite loop when frame == frame[0] by using frame > prev_frame.
while (frame > prev_frame && while (IsValidFrame((uptr)frame, stack_top, bottom) &&
IsValidFrame((uptr)frame, stack_top, stack_bottom) &&
IsAligned((uptr)frame, sizeof(*frame)) && IsAligned((uptr)frame, sizeof(*frame)) &&
size < max_depth) { size < max_depth) {
uhwptr pc1 = frame[1]; uhwptr pc1 = frame[1];
if (pc1 != pc) { if (pc1 != pc) {
trace[size++] = (uptr) pc1; trace_buffer[size++] = (uptr) pc1;
} }
prev_frame = frame; bottom = (uptr)frame;
frame = GetCanonicFrame((uptr)frame[0], stack_top, stack_bottom); frame = GetCanonicFrame((uptr)frame[0], stack_top, bottom);
} }
} }
...@@ -82,15 +99,15 @@ static bool MatchPc(uptr cur_pc, uptr trace_pc, uptr threshold) { ...@@ -82,15 +99,15 @@ static bool MatchPc(uptr cur_pc, uptr trace_pc, uptr threshold) {
return cur_pc - trace_pc <= threshold || trace_pc - cur_pc <= threshold; return cur_pc - trace_pc <= threshold || trace_pc - cur_pc <= threshold;
} }
void StackTrace::PopStackFrames(uptr count) { void BufferedStackTrace::PopStackFrames(uptr count) {
CHECK_LT(count, size); CHECK_LT(count, size);
size -= count; size -= count;
for (uptr i = 0; i < size; ++i) { for (uptr i = 0; i < size; ++i) {
trace[i] = trace[i + count]; trace_buffer[i] = trace_buffer[i + count];
} }
} }
uptr StackTrace::LocatePcInTrace(uptr pc) { uptr BufferedStackTrace::LocatePcInTrace(uptr pc) {
// Use threshold to find PC in stack trace, as PC we want to unwind from may // Use threshold to find PC in stack trace, as PC we want to unwind from may
// slightly differ from return address in the actual unwinded stack trace. // slightly differ from return address in the actual unwinded stack trace.
const int kPcThreshold = 288; const int kPcThreshold = 288;
......
...@@ -27,44 +27,49 @@ static const uptr kStackTraceMax = 256; ...@@ -27,44 +27,49 @@ static const uptr kStackTraceMax = 256;
# define SANITIZER_CAN_FAST_UNWIND 1 # define SANITIZER_CAN_FAST_UNWIND 1
#endif #endif
// Fast unwind is the only option on Mac for now; we will need to
// revisit this macro when slow unwind works on Mac, see
// https://code.google.com/p/address-sanitizer/issues/detail?id=137
#if SANITIZER_MAC
# define SANITIZER_CAN_SLOW_UNWIND 0
#else
# define SANITIZER_CAN_SLOW_UNWIND 1
#endif
struct StackTrace { struct StackTrace {
typedef bool (*SymbolizeCallback)(const void *pc, char *out_buffer, const uptr *trace;
int out_size);
uptr top_frame_bp;
uptr size; uptr size;
uptr trace[kStackTraceMax];
// Prints a symbolized stacktrace, followed by an empty line. StackTrace() : trace(nullptr), size(0) {}
static void PrintStack(const uptr *addr, uptr size); StackTrace(const uptr *trace, uptr size) : trace(trace), size(size) {}
void Print() const {
PrintStack(trace, size);
}
void CopyFrom(const uptr *src, uptr src_size) { // Prints a symbolized stacktrace, followed by an empty line.
top_frame_bp = 0; void Print() const;
size = src_size;
if (size > kStackTraceMax) size = kStackTraceMax;
for (uptr i = 0; i < size; i++)
trace[i] = src[i];
}
static bool WillUseFastUnwind(bool request_fast_unwind) { static bool WillUseFastUnwind(bool request_fast_unwind) {
// Check if fast unwind is available. Fast unwind is the only option on Mac.
// It is also the only option on FreeBSD as the slow unwinding that
// leverages _Unwind_Backtrace() yields the call stack of the signal's
// handler and not of the code that raised the signal (as it does on Linux).
if (!SANITIZER_CAN_FAST_UNWIND) if (!SANITIZER_CAN_FAST_UNWIND)
return false; return false;
else if (SANITIZER_MAC != 0 || SANITIZER_FREEBSD != 0) else if (!SANITIZER_CAN_SLOW_UNWIND)
return true; return true;
return request_fast_unwind; return request_fast_unwind;
} }
void Unwind(uptr max_depth, uptr pc, uptr bp, void *context, uptr stack_top,
uptr stack_bottom, bool request_fast_unwind);
static uptr GetCurrentPc(); static uptr GetCurrentPc();
static uptr GetPreviousInstructionPc(uptr pc); static uptr GetPreviousInstructionPc(uptr pc);
typedef bool (*SymbolizeCallback)(const void *pc, char *out_buffer,
int out_size);
};
// StackTrace that owns the buffer used to store the addresses.
struct BufferedStackTrace : public StackTrace {
uptr trace_buffer[kStackTraceMax];
uptr top_frame_bp; // Optional bp of a top frame.
BufferedStackTrace() : StackTrace(trace_buffer, 0), top_frame_bp(0) {}
void Init(const uptr *pcs, uptr cnt, uptr extra_top_pc = 0);
void Unwind(uptr max_depth, uptr pc, uptr bp, void *context, uptr stack_top,
uptr stack_bottom, bool request_fast_unwind);
private: private:
void FastUnwindStack(uptr pc, uptr bp, uptr stack_top, uptr stack_bottom, void FastUnwindStack(uptr pc, uptr bp, uptr stack_top, uptr stack_bottom,
...@@ -74,6 +79,9 @@ struct StackTrace { ...@@ -74,6 +79,9 @@ struct StackTrace {
uptr max_depth); uptr max_depth);
void PopStackFrames(uptr count); void PopStackFrames(uptr count);
uptr LocatePcInTrace(uptr pc); uptr LocatePcInTrace(uptr pc);
BufferedStackTrace(const BufferedStackTrace &);
void operator=(const BufferedStackTrace &);
}; };
} // namespace __sanitizer } // namespace __sanitizer
......
...@@ -10,58 +10,40 @@ ...@@ -10,58 +10,40 @@
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#include "sanitizer_common.h" #include "sanitizer_common.h"
#include "sanitizer_placement_new.h"
#include "sanitizer_stacktrace.h" #include "sanitizer_stacktrace.h"
#include "sanitizer_stacktrace_printer.h"
#include "sanitizer_symbolizer.h" #include "sanitizer_symbolizer.h"
namespace __sanitizer { namespace __sanitizer {
static void PrintStackFramePrefix(InternalScopedString *buffer, uptr frame_num, void StackTrace::Print() const {
uptr pc) { if (trace == nullptr || size == 0) {
buffer->append(" #%zu 0x%zx", frame_num, pc);
}
void StackTrace::PrintStack(const uptr *addr, uptr size) {
if (addr == 0 || size == 0) {
Printf(" <empty stack>\n\n"); Printf(" <empty stack>\n\n");
return; return;
} }
InternalScopedBuffer<char> buff(GetPageSizeCached() * 2); const int kMaxAddrFrames = 64;
InternalScopedBuffer<AddressInfo> addr_frames(64); InternalScopedBuffer<AddressInfo> addr_frames(kMaxAddrFrames);
for (uptr i = 0; i < kMaxAddrFrames; i++)
new(&addr_frames[i]) AddressInfo();
InternalScopedString frame_desc(GetPageSizeCached() * 2); InternalScopedString frame_desc(GetPageSizeCached() * 2);
uptr frame_num = 0; uptr frame_num = 0;
for (uptr i = 0; i < size && addr[i]; i++) { for (uptr i = 0; i < size && trace[i]; i++) {
// PCs in stack traces are actually the return addresses, that is, // PCs in stack traces are actually the return addresses, that is,
// addresses of the next instructions after the call. // addresses of the next instructions after the call.
uptr pc = GetPreviousInstructionPc(addr[i]); uptr pc = GetPreviousInstructionPc(trace[i]);
uptr addr_frames_num = Symbolizer::GetOrInit()->SymbolizePC( uptr addr_frames_num = Symbolizer::GetOrInit()->SymbolizePC(
pc, addr_frames.data(), addr_frames.size()); pc, addr_frames.data(), kMaxAddrFrames);
if (addr_frames_num == 0) { if (addr_frames_num == 0) {
frame_desc.clear(); addr_frames[0].address = pc;
PrintStackFramePrefix(&frame_desc, frame_num, pc); addr_frames_num = 1;
frame_desc.append(" (<unknown module>)");
Printf("%s\n", frame_desc.data());
frame_num++;
continue;
} }
for (uptr j = 0; j < addr_frames_num; j++) { for (uptr j = 0; j < addr_frames_num; j++) {
AddressInfo &info = addr_frames[j]; AddressInfo &info = addr_frames[j];
frame_desc.clear(); frame_desc.clear();
PrintStackFramePrefix(&frame_desc, frame_num, pc); RenderFrame(&frame_desc, common_flags()->stack_trace_format, frame_num++,
if (info.function) { info, common_flags()->strip_path_prefix);
frame_desc.append(" in %s", info.function);
// Print offset in function if we don't know the source file.
if (!info.file && info.function_offset != AddressInfo::kUnknown)
frame_desc.append("+0x%zx", info.function_offset);
}
if (info.file) {
frame_desc.append(" ");
PrintSourceLocation(&frame_desc, info.file, info.line, info.column);
} else if (info.module) {
frame_desc.append(" ");
PrintModuleAndOffset(&frame_desc, info.module, info.module_offset);
}
Printf("%s\n", frame_desc.data()); Printf("%s\n", frame_desc.data());
frame_num++;
info.Clear(); info.Clear();
} }
} }
...@@ -69,9 +51,9 @@ void StackTrace::PrintStack(const uptr *addr, uptr size) { ...@@ -69,9 +51,9 @@ void StackTrace::PrintStack(const uptr *addr, uptr size) {
Printf("\n"); Printf("\n");
} }
void StackTrace::Unwind(uptr max_depth, uptr pc, uptr bp, void *context, void BufferedStackTrace::Unwind(uptr max_depth, uptr pc, uptr bp, void *context,
uptr stack_top, uptr stack_bottom, uptr stack_top, uptr stack_bottom,
bool request_fast_unwind) { bool request_fast_unwind) {
top_frame_bp = (max_depth > 0) ? bp : 0; top_frame_bp = (max_depth > 0) ? bp : 0;
// Avoid doing any work for small max_depth. // Avoid doing any work for small max_depth.
if (max_depth == 0) { if (max_depth == 0) {
...@@ -80,7 +62,7 @@ void StackTrace::Unwind(uptr max_depth, uptr pc, uptr bp, void *context, ...@@ -80,7 +62,7 @@ void StackTrace::Unwind(uptr max_depth, uptr pc, uptr bp, void *context,
} }
if (max_depth == 1) { if (max_depth == 1) {
size = 1; size = 1;
trace[0] = pc; trace_buffer[0] = pc;
return; return;
} }
if (!WillUseFastUnwind(request_fast_unwind)) { if (!WillUseFastUnwind(request_fast_unwind)) {
......
//===-- sanitizer_common.cc -----------------------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is shared between sanitizers' run-time libraries.
//
//===----------------------------------------------------------------------===//
#include "sanitizer_stacktrace_printer.h"
namespace __sanitizer {
static const char *StripFunctionName(const char *function, const char *prefix) {
if (function == 0) return 0;
if (prefix == 0) return function;
uptr prefix_len = internal_strlen(prefix);
if (0 == internal_strncmp(function, prefix, prefix_len))
return function + prefix_len;
return function;
}
static const char kDefaultFormat[] = " #%n %p %F %L";
void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
const AddressInfo &info, const char *strip_path_prefix,
const char *strip_func_prefix) {
if (0 == internal_strcmp(format, "DEFAULT"))
format = kDefaultFormat;
for (const char *p = format; *p != '\0'; p++) {
if (*p != '%') {
buffer->append("%c", *p);
continue;
}
p++;
switch (*p) {
case '%':
buffer->append("%%");
break;
// Frame number and all fields of AddressInfo structure.
case 'n':
buffer->append("%zu", frame_no);
break;
case 'p':
buffer->append("0x%zx", info.address);
break;
case 'm':
buffer->append("%s", StripPathPrefix(info.module, strip_path_prefix));
break;
case 'o':
buffer->append("0x%zx", info.module_offset);
break;
case 'f':
buffer->append("%s", StripFunctionName(info.function, strip_func_prefix));
break;
case 'q':
buffer->append("0x%zx", info.function_offset != AddressInfo::kUnknown
? info.function_offset
: 0x0);
break;
case 's':
buffer->append("%s", StripPathPrefix(info.file, strip_path_prefix));
break;
case 'l':
buffer->append("%d", info.line);
break;
case 'c':
buffer->append("%d", info.column);
break;
// Smarter special cases.
case 'F':
// Function name and offset, if file is unknown.
if (info.function) {
buffer->append("in %s",
StripFunctionName(info.function, strip_func_prefix));
if (!info.file && info.function_offset != AddressInfo::kUnknown)
buffer->append("+0x%zx", info.function_offset);
}
break;
case 'S':
// File/line information.
RenderSourceLocation(buffer, info.file, info.line, info.column,
strip_path_prefix);
break;
case 'L':
// Source location, or module location.
if (info.file) {
RenderSourceLocation(buffer, info.file, info.line, info.column,
strip_path_prefix);
} else if (info.module) {
RenderModuleLocation(buffer, info.module, info.module_offset,
strip_path_prefix);
} else {
buffer->append("(<unknown module>)");
}
break;
case 'M':
// Module basename and offset, or PC.
if (info.module)
buffer->append("(%s+%p)", StripModuleName(info.module),
(void *)info.module_offset);
else
buffer->append("(%p)", (void *)info.address);
break;
default:
Report("Unsupported specifier in stack frame format: %c (0x%zx)!\n",
*p, *p);
Die();
}
}
}
void RenderSourceLocation(InternalScopedString *buffer, const char *file,
int line, int column, const char *strip_path_prefix) {
buffer->append("%s", StripPathPrefix(file, strip_path_prefix));
if (line > 0) {
buffer->append(":%d", line);
if (column > 0)
buffer->append(":%d", column);
}
}
void RenderModuleLocation(InternalScopedString *buffer, const char *module,
uptr offset, const char *strip_path_prefix) {
buffer->append("(%s+0x%zx)", StripPathPrefix(module, strip_path_prefix),
offset);
}
} // namespace __sanitizer
//===-- sanitizer_stacktrace_printer.h --------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is shared between sanitizers' run-time libraries.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_STACKTRACE_PRINTER_H
#define SANITIZER_STACKTRACE_PRINTER_H
#include "sanitizer_common.h"
#include "sanitizer_symbolizer.h"
namespace __sanitizer {
// Render the contents of "info" structure, which represents the contents of
// stack frame "frame_no" and appends it to the "buffer". "format" is a
// string with placeholders, which is copied to the output with
// placeholders substituted with the contents of "info". For example,
// format string
// " frame %n: function %F at %S"
// will be turned into
// " frame 10: function foo::bar() at my/file.cc:10"
// You may additionally pass "strip_path_prefix" to strip prefixes of paths to
// source files and modules, and "strip_func_prefix" to strip prefixes of
// function names.
// Here's the full list of available placeholders:
// %% - represents a '%' character;
// %n - frame number (copy of frame_no);
// %p - PC in hex format;
// %m - path to module (binary or shared object);
// %o - offset in the module in hex format;
// %f - function name;
// %q - offset in the function in hex format (*if available*);
// %s - path to source file;
// %l - line in the source file;
// %c - column in the source file;
// %F - if function is known to be <foo>, prints "in <foo>", possibly
// followed by the offset in this function, but only if source file
// is unknown;
// %S - prints file/line/column information;
// %L - prints location information: file/line/column, if it is known, or
// module+offset if it is known, or (<unknown module>) string.
// %M - prints module basename and offset, if it is known, or PC.
void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
const AddressInfo &info, const char *strip_path_prefix = "",
const char *strip_func_prefix = "");
void RenderSourceLocation(InternalScopedString *buffer, const char *file,
int line, int column, const char *strip_path_prefix);
void RenderModuleLocation(InternalScopedString *buffer, const char *module,
uptr offset, const char *strip_path_prefix);
} // namespace __sanitizer
#endif // SANITIZER_STACKTRACE_PRINTER_H
...@@ -59,13 +59,23 @@ struct AddressInfo { ...@@ -59,13 +59,23 @@ struct AddressInfo {
} }
}; };
// For now, DataInfo is used to describe global variable.
struct DataInfo { struct DataInfo {
uptr address;
char *module; char *module;
uptr module_offset; uptr module_offset;
char *name; char *name;
uptr start; uptr start;
uptr size; uptr size;
DataInfo() {
internal_memset(this, 0, sizeof(DataInfo));
}
void Clear() {
InternalFree(module);
InternalFree(name);
internal_memset(this, 0, sizeof(DataInfo));
}
}; };
class Symbolizer { class Symbolizer {
......
...@@ -166,7 +166,7 @@ uptr LibbacktraceSymbolizer::SymbolizeCode(uptr addr, AddressInfo *frames, ...@@ -166,7 +166,7 @@ uptr LibbacktraceSymbolizer::SymbolizeCode(uptr addr, AddressInfo *frames,
} }
bool LibbacktraceSymbolizer::SymbolizeData(DataInfo *info) { bool LibbacktraceSymbolizer::SymbolizeData(DataInfo *info) {
backtrace_syminfo((backtrace_state *)state_, info->address, backtrace_syminfo((backtrace_state *)state_, info->start,
SymbolizeDataCallback, ErrorCallback, info); SymbolizeDataCallback, ErrorCallback, info);
return true; return true;
} }
......
...@@ -339,12 +339,19 @@ class LLVMSymbolizerProcess : public SymbolizerProcess { ...@@ -339,12 +339,19 @@ class LLVMSymbolizerProcess : public SymbolizerProcess {
const char* const kSymbolizerArch = "--default-arch=x86_64"; const char* const kSymbolizerArch = "--default-arch=x86_64";
#elif defined(__i386__) #elif defined(__i386__)
const char* const kSymbolizerArch = "--default-arch=i386"; const char* const kSymbolizerArch = "--default-arch=i386";
#elif defined(__powerpc64__) #elif defined(__powerpc64__) && defined(__BIG_ENDIAN__)
const char* const kSymbolizerArch = "--default-arch=powerpc64"; const char* const kSymbolizerArch = "--default-arch=powerpc64";
#elif defined(__powerpc64__) && defined(__LITTLE_ENDIAN__)
const char* const kSymbolizerArch = "--default-arch=powerpc64le";
#else #else
const char* const kSymbolizerArch = "--default-arch=unknown"; const char* const kSymbolizerArch = "--default-arch=unknown";
#endif #endif
execl(path_to_binary, path_to_binary, kSymbolizerArch, (char *)0);
const char *const inline_flag = common_flags()->symbolize_inline_frames
? "--inlining=true"
: "--inlining=false";
execl(path_to_binary, path_to_binary, inline_flag, kSymbolizerArch,
(char *)0);
} }
}; };
...@@ -580,8 +587,7 @@ class POSIXSymbolizer : public Symbolizer { ...@@ -580,8 +587,7 @@ class POSIXSymbolizer : public Symbolizer {
return false; return false;
const char *module_name = module->full_name(); const char *module_name = module->full_name();
uptr module_offset = addr - module->base_address(); uptr module_offset = addr - module->base_address();
internal_memset(info, 0, sizeof(*info)); info->Clear();
info->address = addr;
info->module = internal_strdup(module_name); info->module = internal_strdup(module_name);
info->module_offset = module_offset; info->module_offset = module_offset;
// First, try to use libbacktrace symbolizer (if it's available). // First, try to use libbacktrace symbolizer (if it's available).
......
...@@ -93,7 +93,7 @@ uptr Unwind_GetIP(struct _Unwind_Context *ctx) { ...@@ -93,7 +93,7 @@ uptr Unwind_GetIP(struct _Unwind_Context *ctx) {
} }
struct UnwindTraceArg { struct UnwindTraceArg {
StackTrace *stack; BufferedStackTrace *stack;
uptr max_depth; uptr max_depth;
}; };
...@@ -101,27 +101,27 @@ _Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx, void *param) { ...@@ -101,27 +101,27 @@ _Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx, void *param) {
UnwindTraceArg *arg = (UnwindTraceArg*)param; UnwindTraceArg *arg = (UnwindTraceArg*)param;
CHECK_LT(arg->stack->size, arg->max_depth); CHECK_LT(arg->stack->size, arg->max_depth);
uptr pc = Unwind_GetIP(ctx); uptr pc = Unwind_GetIP(ctx);
arg->stack->trace[arg->stack->size++] = pc; arg->stack->trace_buffer[arg->stack->size++] = pc;
if (arg->stack->size == arg->max_depth) return UNWIND_STOP; if (arg->stack->size == arg->max_depth) return UNWIND_STOP;
return UNWIND_CONTINUE; return UNWIND_CONTINUE;
} }
void StackTrace::SlowUnwindStack(uptr pc, uptr max_depth) { void BufferedStackTrace::SlowUnwindStack(uptr pc, uptr max_depth) {
CHECK_GE(max_depth, 2); CHECK_GE(max_depth, 2);
size = 0; size = 0;
UnwindTraceArg arg = {this, Min(max_depth + 1, kStackTraceMax)}; UnwindTraceArg arg = {this, Min(max_depth + 1, kStackTraceMax)};
_Unwind_Backtrace(Unwind_Trace, &arg); _Unwind_Backtrace(Unwind_Trace, &arg);
// We need to pop a few frames so that pc is on top. // We need to pop a few frames so that pc is on top.
uptr to_pop = LocatePcInTrace(pc); uptr to_pop = LocatePcInTrace(pc);
// trace[0] belongs to the current function so we always pop it. // trace_buffer[0] belongs to the current function so we always pop it.
if (to_pop == 0) if (to_pop == 0 && size > 1)
to_pop = 1; to_pop = 1;
PopStackFrames(to_pop); PopStackFrames(to_pop);
trace[0] = pc; trace_buffer[0] = pc;
} }
void StackTrace::SlowUnwindStackWithContext(uptr pc, void *context, void BufferedStackTrace::SlowUnwindStackWithContext(uptr pc, void *context,
uptr max_depth) { uptr max_depth) {
CHECK_GE(max_depth, 2); CHECK_GE(max_depth, 2);
if (!unwind_backtrace_signal_arch) { if (!unwind_backtrace_signal_arch) {
SlowUnwindStack(pc, max_depth); SlowUnwindStack(pc, max_depth);
...@@ -143,7 +143,7 @@ void StackTrace::SlowUnwindStackWithContext(uptr pc, void *context, ...@@ -143,7 +143,7 @@ void StackTrace::SlowUnwindStackWithContext(uptr pc, void *context,
// +2 compensate for libcorkscrew unwinder returning addresses of call // +2 compensate for libcorkscrew unwinder returning addresses of call
// instructions instead of raw return addresses. // instructions instead of raw return addresses.
for (sptr i = 0; i < res; ++i) for (sptr i = 0; i < res; ++i)
trace[size++] = frames[i].absolute_pc + 2; trace_buffer[size++] = frames[i].absolute_pc + 2;
} }
} // namespace __sanitizer } // namespace __sanitizer
......
...@@ -442,7 +442,7 @@ void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size, ...@@ -442,7 +442,7 @@ void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
} }
#if !SANITIZER_GO #if !SANITIZER_GO
void StackTrace::SlowUnwindStack(uptr pc, uptr max_depth) { void BufferedStackTrace::SlowUnwindStack(uptr pc, uptr max_depth) {
CHECK_GE(max_depth, 2); CHECK_GE(max_depth, 2);
// FIXME: CaptureStackBackTrace might be too slow for us. // FIXME: CaptureStackBackTrace might be too slow for us.
// FIXME: Compare with StackWalk64. // FIXME: Compare with StackWalk64.
...@@ -457,8 +457,8 @@ void StackTrace::SlowUnwindStack(uptr pc, uptr max_depth) { ...@@ -457,8 +457,8 @@ void StackTrace::SlowUnwindStack(uptr pc, uptr max_depth) {
PopStackFrames(pc_location); PopStackFrames(pc_location);
} }
void StackTrace::SlowUnwindStackWithContext(uptr pc, void *context, void BufferedStackTrace::SlowUnwindStackWithContext(uptr pc, void *context,
uptr max_depth) { uptr max_depth) {
CONTEXT ctx = *(CONTEXT *)context; CONTEXT ctx = *(CONTEXT *)context;
STACKFRAME64 stack_frame; STACKFRAME64 stack_frame;
memset(&stack_frame, 0, sizeof(stack_frame)); memset(&stack_frame, 0, sizeof(stack_frame));
...@@ -481,7 +481,7 @@ void StackTrace::SlowUnwindStackWithContext(uptr pc, void *context, ...@@ -481,7 +481,7 @@ void StackTrace::SlowUnwindStackWithContext(uptr pc, void *context,
&stack_frame, &ctx, NULL, &SymFunctionTableAccess64, &stack_frame, &ctx, NULL, &SymFunctionTableAccess64,
&SymGetModuleBase64, NULL) && &SymGetModuleBase64, NULL) &&
size < Min(max_depth, kStackTraceMax)) { size < Min(max_depth, kStackTraceMax)) {
trace[size++] = (uptr)stack_frame.AddrPC.Offset; trace_buffer[size++] = (uptr)stack_frame.AddrPC.Offset;
} }
} }
#endif // #if !SANITIZER_GO #endif // #if !SANITIZER_GO
......
...@@ -6,6 +6,7 @@ gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER) ...@@ -6,6 +6,7 @@ gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER)
DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS
AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic -Wno-long-long -fPIC -fno-builtin -fno-exceptions -fno-rtti -fomit-frame-pointer -funwind-tables -fvisibility=hidden -Wno-variadic-macros AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic -Wno-long-long -fPIC -fno-builtin -fno-exceptions -fno-rtti -fomit-frame-pointer -funwind-tables -fvisibility=hidden -Wno-variadic-macros
AM_CXXFLAGS += $(LIBSTDCXX_RAW_CXX_CXXFLAGS) AM_CXXFLAGS += $(LIBSTDCXX_RAW_CXX_CXXFLAGS)
AM_CXXFLAGS += -std=c++11
ACLOCAL_AMFLAGS = -I m4 ACLOCAL_AMFLAGS = -I m4
toolexeclib_LTLIBRARIES = libtsan.la toolexeclib_LTLIBRARIES = libtsan.la
......
...@@ -276,7 +276,7 @@ gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER) ...@@ -276,7 +276,7 @@ gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER)
AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic \ AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic \
-Wno-long-long -fPIC -fno-builtin -fno-exceptions -fno-rtti \ -Wno-long-long -fPIC -fno-builtin -fno-exceptions -fno-rtti \
-fomit-frame-pointer -funwind-tables -fvisibility=hidden \ -fomit-frame-pointer -funwind-tables -fvisibility=hidden \
-Wno-variadic-macros $(LIBSTDCXX_RAW_CXX_CXXFLAGS) -Wno-variadic-macros $(LIBSTDCXX_RAW_CXX_CXXFLAGS) -std=c++11
ACLOCAL_AMFLAGS = -I m4 ACLOCAL_AMFLAGS = -I m4
toolexeclib_LTLIBRARIES = libtsan.la toolexeclib_LTLIBRARIES = libtsan.la
tsan_files = \ tsan_files = \
......
...@@ -41,7 +41,6 @@ const unsigned kMaxTidInClock = kMaxTid * 2; // This includes msb 'freed' bit. ...@@ -41,7 +41,6 @@ const unsigned kMaxTidInClock = kMaxTid * 2; // This includes msb 'freed' bit.
const int kClkBits = 42; const int kClkBits = 42;
const unsigned kMaxTidReuse = (1 << (64 - kClkBits)) - 1; const unsigned kMaxTidReuse = (1 << (64 - kClkBits)) - 1;
const uptr kShadowStackSize = 64 * 1024; const uptr kShadowStackSize = 64 * 1024;
const uptr kTraceStackSize = 256;
#ifdef TSAN_SHADOW_COUNT #ifdef TSAN_SHADOW_COUNT
# if TSAN_SHADOW_COUNT == 2 \ # if TSAN_SHADOW_COUNT == 2 \
...@@ -172,7 +171,6 @@ struct Context; ...@@ -172,7 +171,6 @@ struct Context;
struct ReportStack; struct ReportStack;
class ReportDesc; class ReportDesc;
class RegionAlloc; class RegionAlloc;
class StackTrace;
// Descriptor of user's memory block. // Descriptor of user's memory block.
struct MBlock { struct MBlock {
......
...@@ -46,7 +46,8 @@ static bool bogusfd(int fd) { ...@@ -46,7 +46,8 @@ static bool bogusfd(int fd) {
} }
static FdSync *allocsync(ThreadState *thr, uptr pc) { static FdSync *allocsync(ThreadState *thr, uptr pc) {
FdSync *s = (FdSync*)user_alloc(thr, pc, sizeof(FdSync)); FdSync *s = (FdSync*)user_alloc(thr, pc, sizeof(FdSync), kDefaultAlignment,
false);
atomic_store(&s->rc, 1, memory_order_relaxed); atomic_store(&s->rc, 1, memory_order_relaxed);
return s; return s;
} }
...@@ -63,7 +64,7 @@ static void unref(ThreadState *thr, uptr pc, FdSync *s) { ...@@ -63,7 +64,7 @@ static void unref(ThreadState *thr, uptr pc, FdSync *s) {
CHECK_NE(s, &fdctx.globsync); CHECK_NE(s, &fdctx.globsync);
CHECK_NE(s, &fdctx.filesync); CHECK_NE(s, &fdctx.filesync);
CHECK_NE(s, &fdctx.socksync); CHECK_NE(s, &fdctx.socksync);
user_free(thr, pc, s); user_free(thr, pc, s, false);
} }
} }
} }
...@@ -76,13 +77,13 @@ static FdDesc *fddesc(ThreadState *thr, uptr pc, int fd) { ...@@ -76,13 +77,13 @@ static FdDesc *fddesc(ThreadState *thr, uptr pc, int fd) {
if (l1 == 0) { if (l1 == 0) {
uptr size = kTableSizeL2 * sizeof(FdDesc); uptr size = kTableSizeL2 * sizeof(FdDesc);
// We need this to reside in user memory to properly catch races on it. // We need this to reside in user memory to properly catch races on it.
void *p = user_alloc(thr, pc, size); void *p = user_alloc(thr, pc, size, kDefaultAlignment, false);
internal_memset(p, 0, size); internal_memset(p, 0, size);
MemoryResetRange(thr, (uptr)&fddesc, (uptr)p, size); MemoryResetRange(thr, (uptr)&fddesc, (uptr)p, size);
if (atomic_compare_exchange_strong(pl1, &l1, (uptr)p, memory_order_acq_rel)) if (atomic_compare_exchange_strong(pl1, &l1, (uptr)p, memory_order_acq_rel))
l1 = (uptr)p; l1 = (uptr)p;
else else
user_free(thr, pc, p); user_free(thr, pc, p, false);
} }
return &((FdDesc*)l1)[fd % kTableSizeL2]; // NOLINT return &((FdDesc*)l1)[fd % kTableSizeL2]; // NOLINT
} }
......
...@@ -97,6 +97,7 @@ void InitializeFlags(Flags *f, const char *env) { ...@@ -97,6 +97,7 @@ void InitializeFlags(Flags *f, const char *env) {
cf->allow_addr2line = true; cf->allow_addr2line = true;
cf->detect_deadlocks = true; cf->detect_deadlocks = true;
cf->print_suppressions = false; cf->print_suppressions = false;
cf->stack_trace_format = " #%n %f %S %M";
// Let a frontend override. // Let a frontend override.
ParseFlags(f, __tsan_default_options()); ParseFlags(f, __tsan_default_options());
......
...@@ -52,7 +52,7 @@ class ScopedAnnotation { ...@@ -52,7 +52,7 @@ class ScopedAnnotation {
StatInc(thr, StatAnnotation); \ StatInc(thr, StatAnnotation); \
StatInc(thr, Stat##typ); \ StatInc(thr, Stat##typ); \
ScopedAnnotation sa(thr, __func__, f, l, caller_pc); \ ScopedAnnotation sa(thr, __func__, f, l, caller_pc); \
const uptr pc = __sanitizer::StackTrace::GetCurrentPc(); \ const uptr pc = StackTrace::GetCurrentPc(); \
(void)pc; \ (void)pc; \
/**/ /**/
...@@ -452,4 +452,6 @@ const char INTERFACE_ATTRIBUTE* ThreadSanitizerQuery(const char *query) { ...@@ -452,4 +452,6 @@ const char INTERFACE_ATTRIBUTE* ThreadSanitizerQuery(const char *query) {
void INTERFACE_ATTRIBUTE void INTERFACE_ATTRIBUTE
AnnotateMemoryIsInitialized(char *f, int l, uptr mem, uptr sz) {} AnnotateMemoryIsInitialized(char *f, int l, uptr mem, uptr sz) {}
void INTERFACE_ATTRIBUTE
AnnotateMemoryIsUninitialized(char *f, int l, uptr mem, uptr sz) {}
} // extern "C" } // extern "C"
...@@ -472,7 +472,7 @@ static void AtomicFence(ThreadState *thr, uptr pc, morder mo) { ...@@ -472,7 +472,7 @@ static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
#define SCOPED_ATOMIC(func, ...) \ #define SCOPED_ATOMIC(func, ...) \
const uptr callpc = (uptr)__builtin_return_address(0); \ const uptr callpc = (uptr)__builtin_return_address(0); \
uptr pc = __sanitizer::StackTrace::GetCurrentPc(); \ uptr pc = StackTrace::GetCurrentPc(); \
mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \ mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
ThreadState *const thr = cur_thread(); \ ThreadState *const thr = cur_thread(); \
if (thr->ignore_interceptors) \ if (thr->ignore_interceptors) \
......
...@@ -59,7 +59,7 @@ static JavaContext *jctx; ...@@ -59,7 +59,7 @@ static JavaContext *jctx;
#define SCOPED_JAVA_FUNC(func) \ #define SCOPED_JAVA_FUNC(func) \
ThreadState *thr = cur_thread(); \ ThreadState *thr = cur_thread(); \
const uptr caller_pc = GET_CALLER_PC(); \ const uptr caller_pc = GET_CALLER_PC(); \
const uptr pc = __sanitizer::StackTrace::GetCurrentPc(); \ const uptr pc = StackTrace::GetCurrentPc(); \
(void)pc; \ (void)pc; \
ScopedJavaFunc scoped(thr, caller_pc); \ ScopedJavaFunc scoped(thr, caller_pc); \
/**/ /**/
......
...@@ -64,17 +64,17 @@ static void SignalUnsafeCall(ThreadState *thr, uptr pc) { ...@@ -64,17 +64,17 @@ static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
if (atomic_load(&thr->in_signal_handler, memory_order_relaxed) == 0 || if (atomic_load(&thr->in_signal_handler, memory_order_relaxed) == 0 ||
!flags()->report_signal_unsafe) !flags()->report_signal_unsafe)
return; return;
StackTrace stack; VarSizeStackTrace stack;
stack.ObtainCurrent(thr, pc); ObtainCurrentStack(thr, pc, &stack);
ThreadRegistryLock l(ctx->thread_registry); ThreadRegistryLock l(ctx->thread_registry);
ScopedReport rep(ReportTypeSignalUnsafe); ScopedReport rep(ReportTypeSignalUnsafe);
if (!IsFiredSuppression(ctx, rep, stack)) { if (!IsFiredSuppression(ctx, rep, stack)) {
rep.AddStack(&stack, true); rep.AddStack(stack, true);
OutputReport(thr, rep); OutputReport(thr, rep);
} }
} }
void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align) { void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) {
if ((sz >= (1ull << 40)) || (align >= (1ull << 40))) if ((sz >= (1ull << 40)) || (align >= (1ull << 40)))
return AllocatorReturnNull(); return AllocatorReturnNull();
void *p = allocator()->Allocate(&thr->alloc_cache, sz, align); void *p = allocator()->Allocate(&thr->alloc_cache, sz, align);
...@@ -82,15 +82,17 @@ void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align) { ...@@ -82,15 +82,17 @@ void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align) {
return 0; return 0;
if (ctx && ctx->initialized) if (ctx && ctx->initialized)
OnUserAlloc(thr, pc, (uptr)p, sz, true); OnUserAlloc(thr, pc, (uptr)p, sz, true);
SignalUnsafeCall(thr, pc); if (signal)
SignalUnsafeCall(thr, pc);
return p; return p;
} }
void user_free(ThreadState *thr, uptr pc, void *p) { void user_free(ThreadState *thr, uptr pc, void *p, bool signal) {
if (ctx && ctx->initialized) if (ctx && ctx->initialized)
OnUserFree(thr, pc, (uptr)p, true); OnUserFree(thr, pc, (uptr)p, true);
allocator()->Deallocate(&thr->alloc_cache, p); allocator()->Deallocate(&thr->alloc_cache, p);
SignalUnsafeCall(thr, pc); if (signal)
SignalUnsafeCall(thr, pc);
} }
void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) { void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) {
......
...@@ -24,9 +24,9 @@ void AllocatorPrintStats(); ...@@ -24,9 +24,9 @@ void AllocatorPrintStats();
// For user allocations. // For user allocations.
void *user_alloc(ThreadState *thr, uptr pc, uptr sz, void *user_alloc(ThreadState *thr, uptr pc, uptr sz,
uptr align = kDefaultAlignment); uptr align = kDefaultAlignment, bool signal = true);
// Does not accept NULL. // Does not accept NULL.
void user_free(ThreadState *thr, uptr pc, void *p); void user_free(ThreadState *thr, uptr pc, void *p, bool signal = true);
void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz); void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz);
void *user_alloc_aligned(ThreadState *thr, uptr pc, uptr sz, uptr align); void *user_alloc_aligned(ThreadState *thr, uptr pc, uptr sz, uptr align);
uptr user_alloc_usable_size(const void *p); uptr user_alloc_usable_size(const void *p);
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
// //
// This file is a part of ThreadSanitizer (TSan), a race detector. // This file is a part of ThreadSanitizer (TSan), a race detector.
// //
// Linux-specific code. // Linux- and FreeBSD-specific code.
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_procmaps.h" #include "sanitizer_common/sanitizer_procmaps.h"
#include "sanitizer_common/sanitizer_stoptheworld.h" #include "sanitizer_common/sanitizer_stoptheworld.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "tsan_platform.h" #include "tsan_platform.h"
#include "tsan_rtl.h" #include "tsan_rtl.h"
#include "tsan_flags.h" #include "tsan_flags.h"
...@@ -60,6 +61,9 @@ void *__libc_stack_end = 0; ...@@ -60,6 +61,9 @@ void *__libc_stack_end = 0;
namespace __tsan { namespace __tsan {
static uptr g_data_start;
static uptr g_data_end;
const uptr kPageSize = 4096; const uptr kPageSize = 4096;
enum { enum {
...@@ -74,22 +78,26 @@ enum { ...@@ -74,22 +78,26 @@ enum {
MemCount = 8, MemCount = 8,
}; };
void FillProfileCallback(uptr start, uptr rss, bool file, void FillProfileCallback(uptr p, uptr rss, bool file,
uptr *mem, uptr stats_size) { uptr *mem, uptr stats_size) {
mem[MemTotal] += rss; mem[MemTotal] += rss;
start >>= 40; if (p >= kShadowBeg && p < kShadowEnd)
if (start < 0x10)
mem[MemShadow] += rss; mem[MemShadow] += rss;
else if (start >= 0x20 && start < 0x30) else if (p >= kMetaShadowBeg && p < kMetaShadowEnd)
mem[file ? MemFile : MemMmap] += rss;
else if (start >= 0x30 && start < 0x40)
mem[MemMeta] += rss; mem[MemMeta] += rss;
else if (start >= 0x7e) #ifndef TSAN_GO
else if (p >= kHeapMemBeg && p < kHeapMemEnd)
mem[MemHeap] += rss;
else if (p >= kLoAppMemBeg && p < kLoAppMemEnd)
mem[file ? MemFile : MemMmap] += rss; mem[file ? MemFile : MemMmap] += rss;
else if (start >= 0x60 && start < 0x62) else if (p >= kHiAppMemBeg && p < kHiAppMemEnd)
mem[file ? MemFile : MemMmap] += rss;
#else
else if (p >= kAppMemBeg && p < kAppMemEnd)
mem[file ? MemFile : MemMmap] += rss;
#endif
else if (p >= kTraceMemBeg && p < kTraceMemEnd)
mem[MemTrace] += rss; mem[MemTrace] += rss;
else if (start >= 0x7d && start < 0x7e)
mem[MemHeap] += rss;
else else
mem[MemOther] += rss; mem[MemOther] += rss;
} }
...@@ -97,12 +105,14 @@ void FillProfileCallback(uptr start, uptr rss, bool file, ...@@ -97,12 +105,14 @@ void FillProfileCallback(uptr start, uptr rss, bool file,
void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) { void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) {
uptr mem[MemCount] = {}; uptr mem[MemCount] = {};
__sanitizer::GetMemoryProfile(FillProfileCallback, mem, 7); __sanitizer::GetMemoryProfile(FillProfileCallback, mem, 7);
StackDepotStats *stacks = StackDepotGetStats();
internal_snprintf(buf, buf_size, internal_snprintf(buf, buf_size,
"RSS %zd MB: shadow:%zd meta:%zd file:%zd mmap:%zd" "RSS %zd MB: shadow:%zd meta:%zd file:%zd mmap:%zd"
" trace:%zd heap:%zd other:%zd nthr=%zd/%zd\n", " trace:%zd heap:%zd other:%zd stacks=%zd[%zd] nthr=%zd/%zd\n",
mem[MemTotal] >> 20, mem[MemShadow] >> 20, mem[MemMeta] >> 20, mem[MemTotal] >> 20, mem[MemShadow] >> 20, mem[MemMeta] >> 20,
mem[MemFile] >> 20, mem[MemMmap] >> 20, mem[MemTrace] >> 20, mem[MemFile] >> 20, mem[MemMmap] >> 20, mem[MemTrace] >> 20,
mem[MemHeap] >> 20, mem[MemOther] >> 20, mem[MemHeap] >> 20, mem[MemOther] >> 20,
stacks->allocated >> 20, stacks->n_uniq_ids,
nlive, nthread); nlive, nthread);
} }
...@@ -137,7 +147,7 @@ uptr GetRSS() { ...@@ -137,7 +147,7 @@ uptr GetRSS() {
void FlushShadowMemoryCallback( void FlushShadowMemoryCallback(
const SuspendedThreadsList &suspended_threads_list, const SuspendedThreadsList &suspended_threads_list,
void *argument) { void *argument) {
FlushUnneededShadowMemory(kLinuxShadowBeg, kLinuxShadowEnd - kLinuxShadowBeg); FlushUnneededShadowMemory(kShadowBeg, kShadowEnd - kShadowBeg);
} }
#endif #endif
...@@ -218,12 +228,12 @@ static void MapRodata() { ...@@ -218,12 +228,12 @@ static void MapRodata() {
void InitializeShadowMemory() { void InitializeShadowMemory() {
// Map memory shadow. // Map memory shadow.
uptr shadow = (uptr)MmapFixedNoReserve(kLinuxShadowBeg, uptr shadow = (uptr)MmapFixedNoReserve(kShadowBeg,
kLinuxShadowEnd - kLinuxShadowBeg); kShadowEnd - kShadowBeg);
if (shadow != kLinuxShadowBeg) { if (shadow != kShadowBeg) {
Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n"); Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n");
Printf("FATAL: Make sure to compile with -fPIE and " Printf("FATAL: Make sure to compile with -fPIE and "
"to link with -pie (%p, %p).\n", shadow, kLinuxShadowBeg); "to link with -pie (%p, %p).\n", shadow, kShadowBeg);
Die(); Die();
} }
// This memory range is used for thread stacks and large user mmaps. // This memory range is used for thread stacks and large user mmaps.
...@@ -235,78 +245,42 @@ void InitializeShadowMemory() { ...@@ -235,78 +245,42 @@ void InitializeShadowMemory() {
0x10000000000ULL * kShadowMultiplier, MADV_NOHUGEPAGE); 0x10000000000ULL * kShadowMultiplier, MADV_NOHUGEPAGE);
#endif #endif
DPrintf("memory shadow: %zx-%zx (%zuGB)\n", DPrintf("memory shadow: %zx-%zx (%zuGB)\n",
kLinuxShadowBeg, kLinuxShadowEnd, kShadowBeg, kShadowEnd,
(kLinuxShadowEnd - kLinuxShadowBeg) >> 30); (kShadowEnd - kShadowBeg) >> 30);
// Map meta shadow. // Map meta shadow.
if (MemToMeta(kLinuxAppMemBeg) < (u32*)kMetaShadow) { uptr meta_size = kMetaShadowEnd - kMetaShadowBeg;
Printf("ThreadSanitizer: bad meta shadow (%p -> %p < %p)\n", uptr meta = (uptr)MmapFixedNoReserve(kMetaShadowBeg, meta_size);
kLinuxAppMemBeg, MemToMeta(kLinuxAppMemBeg), kMetaShadow); if (meta != kMetaShadowBeg) {
Die();
}
if (MemToMeta(kLinuxAppMemEnd) >= (u32*)(kMetaShadow + kMetaSize)) {
Printf("ThreadSanitizer: bad meta shadow (%p -> %p >= %p)\n",
kLinuxAppMemEnd, MemToMeta(kLinuxAppMemEnd), kMetaShadow + kMetaSize);
Die();
}
uptr meta = (uptr)MmapFixedNoReserve(kMetaShadow, kMetaSize);
if (meta != kMetaShadow) {
Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n"); Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n");
Printf("FATAL: Make sure to compile with -fPIE and " Printf("FATAL: Make sure to compile with -fPIE and "
"to link with -pie (%p, %p).\n", meta, kMetaShadow); "to link with -pie (%p, %p).\n", meta, kMetaShadowBeg);
Die(); Die();
} }
DPrintf("meta shadow: %zx-%zx (%zuGB)\n", DPrintf("meta shadow: %zx-%zx (%zuGB)\n",
kMetaShadow, kMetaShadow + kMetaSize, kMetaSize >> 30); meta, meta + meta_size, meta_size >> 30);
// Protect gaps.
const uptr kClosedLowBeg = 0x200000;
const uptr kClosedLowEnd = kLinuxShadowBeg - 1;
const uptr kClosedMidBeg = kLinuxShadowEnd + 1;
const uptr kClosedMidEnd = min(min(kLinuxAppMemBeg, kTraceMemBegin),
kMetaShadow);
ProtectRange(kClosedLowBeg, kClosedLowEnd);
ProtectRange(kClosedMidBeg, kClosedMidEnd);
VPrintf(2, "kClosedLow %zx-%zx (%zuGB)\n",
kClosedLowBeg, kClosedLowEnd, (kClosedLowEnd - kClosedLowBeg) >> 30);
VPrintf(2, "kClosedMid %zx-%zx (%zuGB)\n",
kClosedMidBeg, kClosedMidEnd, (kClosedMidEnd - kClosedMidBeg) >> 30);
VPrintf(2, "app mem: %zx-%zx (%zuGB)\n",
kLinuxAppMemBeg, kLinuxAppMemEnd,
(kLinuxAppMemEnd - kLinuxAppMemBeg) >> 30);
VPrintf(2, "stack: %zx\n", (uptr)&shadow);
MapRodata(); MapRodata();
} }
#endif
static uptr g_data_start;
static uptr g_data_end;
#ifndef TSAN_GO
static void CheckPIE() {
// Ensure that the binary is indeed compiled with -pie.
MemoryMappingLayout proc_maps(true);
uptr start, end;
if (proc_maps.Next(&start, &end,
/*offset*/0, /*filename*/0, /*filename_size*/0,
/*protection*/0)) {
if ((u64)start < kLinuxAppMemBeg) {
Printf("FATAL: ThreadSanitizer can not mmap the shadow memory ("
"something is mapped at 0x%zx < 0x%zx)\n",
start, kLinuxAppMemBeg);
Printf("FATAL: Make sure to compile with -fPIE"
" and to link with -pie.\n");
Die();
}
}
}
static void InitDataSeg() { static void InitDataSeg() {
MemoryMappingLayout proc_maps(true); MemoryMappingLayout proc_maps(true);
uptr start, end, offset; uptr start, end, offset;
char name[128]; char name[128];
#if SANITIZER_FREEBSD
// On FreeBSD BSS is usually the last block allocated within the
// low range and heap is the last block allocated within the range
// 0x800000000-0x8ffffffff.
while (proc_maps.Next(&start, &end, &offset, name, ARRAY_SIZE(name),
/*protection*/ 0)) {
DPrintf("%p-%p %p %s\n", start, end, offset, name);
if ((start & 0xffff00000000ULL) == 0 && (end & 0xffff00000000ULL) == 0 &&
name[0] == '\0') {
g_data_start = start;
g_data_end = end;
}
}
#else
bool prev_is_data = false; bool prev_is_data = false;
while (proc_maps.Next(&start, &end, &offset, name, ARRAY_SIZE(name), while (proc_maps.Next(&start, &end, &offset, name, ARRAY_SIZE(name),
/*protection*/ 0)) { /*protection*/ 0)) {
...@@ -322,12 +296,35 @@ static void InitDataSeg() { ...@@ -322,12 +296,35 @@ static void InitDataSeg() {
g_data_end = end; g_data_end = end;
prev_is_data = is_data; prev_is_data = is_data;
} }
#endif
DPrintf("guessed data_start=%p data_end=%p\n", g_data_start, g_data_end); DPrintf("guessed data_start=%p data_end=%p\n", g_data_start, g_data_end);
CHECK_LT(g_data_start, g_data_end); CHECK_LT(g_data_start, g_data_end);
CHECK_GE((uptr)&g_data_start, g_data_start); CHECK_GE((uptr)&g_data_start, g_data_start);
CHECK_LT((uptr)&g_data_start, g_data_end); CHECK_LT((uptr)&g_data_start, g_data_end);
} }
static void CheckAndProtect() {
// Ensure that the binary is indeed compiled with -pie.
MemoryMappingLayout proc_maps(true);
uptr p, end;
while (proc_maps.Next(&p, &end, 0, 0, 0, 0)) {
if (IsAppMem(p))
continue;
if (p >= kHeapMemEnd &&
p < kHeapMemEnd + PrimaryAllocator::AdditionalSize())
continue;
if (p >= 0xf000000000000000ull) // vdso
break;
Printf("FATAL: ThreadSanitizer: unexpected memory mapping %p-%p\n", p, end);
Die();
}
ProtectRange(kLoAppMemEnd, kShadowBeg);
ProtectRange(kShadowEnd, kMetaShadowBeg);
ProtectRange(kMetaShadowEnd, kTraceMemBeg);
ProtectRange(kTraceMemEnd, kHeapMemBeg);
ProtectRange(kHeapMemEnd + PrimaryAllocator::AdditionalSize(), kHiAppMemBeg);
}
#endif // #ifndef TSAN_GO #endif // #ifndef TSAN_GO
void InitializePlatform() { void InitializePlatform() {
...@@ -363,7 +360,7 @@ void InitializePlatform() { ...@@ -363,7 +360,7 @@ void InitializePlatform() {
} }
#ifndef TSAN_GO #ifndef TSAN_GO
CheckPIE(); CheckAndProtect();
InitTlsSize(); InitTlsSize();
InitDataSeg(); InitDataSeg();
#endif #endif
...@@ -426,4 +423,4 @@ int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m, ...@@ -426,4 +423,4 @@ int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m,
} // namespace __tsan } // namespace __tsan
#endif // SANITIZER_LINUX #endif // SANITIZER_LINUX || SANITIZER_FREEBSD
...@@ -54,20 +54,20 @@ uptr GetRSS() { ...@@ -54,20 +54,20 @@ uptr GetRSS() {
#ifndef TSAN_GO #ifndef TSAN_GO
void InitializeShadowMemory() { void InitializeShadowMemory() {
uptr shadow = (uptr)MmapFixedNoReserve(kLinuxShadowBeg, uptr shadow = (uptr)MmapFixedNoReserve(kShadowBeg,
kLinuxShadowEnd - kLinuxShadowBeg); kShadowEnd - kShadowBeg);
if (shadow != kLinuxShadowBeg) { if (shadow != kShadowBeg) {
Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n"); Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n");
Printf("FATAL: Make sure to compile with -fPIE and " Printf("FATAL: Make sure to compile with -fPIE and "
"to link with -pie.\n"); "to link with -pie.\n");
Die(); Die();
} }
DPrintf("kLinuxShadow %zx-%zx (%zuGB)\n", DPrintf("kShadow %zx-%zx (%zuGB)\n",
kLinuxShadowBeg, kLinuxShadowEnd, kShadowBeg, kShadowEnd,
(kLinuxShadowEnd - kLinuxShadowBeg) >> 30); (kShadowEnd - kShadowBeg) >> 30);
DPrintf("kLinuxAppMem %zx-%zx (%zuGB)\n", DPrintf("kAppMem %zx-%zx (%zuGB)\n",
kLinuxAppMemBeg, kLinuxAppMemEnd, kAppMemBeg, kAppMemEnd,
(kLinuxAppMemEnd - kLinuxAppMemBeg) >> 30); (kAppMemEnd - kAppMemBeg) >> 30);
} }
#endif #endif
...@@ -75,10 +75,6 @@ void InitializePlatform() { ...@@ -75,10 +75,6 @@ void InitializePlatform() {
DisableCoreDumperIfNecessary(); DisableCoreDumperIfNecessary();
} }
void FinalizePlatform() {
fflush(0);
}
#ifndef TSAN_GO #ifndef TSAN_GO
int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m, int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m,
void *abstime), void *c, void *m, void *abstime, void *abstime), void *c, void *m, void *abstime,
......
...@@ -36,10 +36,6 @@ uptr GetRSS() { ...@@ -36,10 +36,6 @@ uptr GetRSS() {
void InitializePlatform() { void InitializePlatform() {
} }
void FinalizePlatform() {
fflush(0);
}
} // namespace __tsan } // namespace __tsan
#endif // SANITIZER_WINDOWS #endif // SANITIZER_WINDOWS
...@@ -11,10 +11,30 @@ ...@@ -11,10 +11,30 @@
#include "tsan_report.h" #include "tsan_report.h"
#include "tsan_platform.h" #include "tsan_platform.h"
#include "tsan_rtl.h" #include "tsan_rtl.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_report_decorator.h" #include "sanitizer_common/sanitizer_report_decorator.h"
#include "sanitizer_common/sanitizer_stacktrace_printer.h"
namespace __tsan { namespace __tsan {
ReportStack::ReportStack() : next(nullptr), info(), suppressable(false) {}
ReportStack *ReportStack::New(uptr addr) {
void *mem = internal_alloc(MBlockReportStack, sizeof(ReportStack));
ReportStack *res = new(mem) ReportStack();
res->info.address = addr;
return res;
}
ReportLocation::ReportLocation(ReportLocationType type)
: type(type), global(), heap_chunk_start(0), heap_chunk_size(0), tid(0),
fd(0), suppressable(false), stack(nullptr) {}
ReportLocation *ReportLocation::New(ReportLocationType type) {
void *mem = internal_alloc(MBlockReportStack, sizeof(ReportLocation));
return new(mem) ReportLocation(type);
}
class Decorator: public __sanitizer::SanitizerCommonDecorator { class Decorator: public __sanitizer::SanitizerCommonDecorator {
public: public:
Decorator() : SanitizerCommonDecorator() { } Decorator() : SanitizerCommonDecorator() { }
...@@ -68,6 +88,8 @@ static const char *ReportTypeString(ReportType typ) { ...@@ -68,6 +88,8 @@ static const char *ReportTypeString(ReportType typ) {
return "data race on vptr (ctor/dtor vs virtual call)"; return "data race on vptr (ctor/dtor vs virtual call)";
if (typ == ReportTypeUseAfterFree) if (typ == ReportTypeUseAfterFree)
return "heap-use-after-free"; return "heap-use-after-free";
if (typ == ReportTypeVptrUseAfterFree)
return "heap-use-after-free (virtual call vs free)";
if (typ == ReportTypeThreadLeak) if (typ == ReportTypeThreadLeak)
return "thread leak"; return "thread leak";
if (typ == ReportTypeMutexDestroyLocked) if (typ == ReportTypeMutexDestroyLocked)
...@@ -94,14 +116,11 @@ void PrintStack(const ReportStack *ent) { ...@@ -94,14 +116,11 @@ void PrintStack(const ReportStack *ent) {
Printf(" [failed to restore the stack]\n\n"); Printf(" [failed to restore the stack]\n\n");
return; return;
} }
for (int i = 0; ent; ent = ent->next, i++) { for (int i = 0; ent && ent->info.address; ent = ent->next, i++) {
Printf(" #%d %s %s:%d", i, ent->func, ent->file, ent->line); InternalScopedString res(2 * GetPageSizeCached());
if (ent->col) RenderFrame(&res, common_flags()->stack_trace_format, i, ent->info,
Printf(":%d", ent->col); common_flags()->strip_path_prefix, "__interceptor_");
if (ent->module && ent->offset) Printf("%s\n", res.data());
Printf(" (%s+%p)\n", ent->module, (void*)ent->offset);
else
Printf(" (%p)\n", (void*)ent->pc);
} }
Printf("\n"); Printf("\n");
} }
...@@ -143,12 +162,15 @@ static void PrintLocation(const ReportLocation *loc) { ...@@ -143,12 +162,15 @@ static void PrintLocation(const ReportLocation *loc) {
bool print_stack = false; bool print_stack = false;
Printf("%s", d.Location()); Printf("%s", d.Location());
if (loc->type == ReportLocationGlobal) { if (loc->type == ReportLocationGlobal) {
const DataInfo &global = loc->global;
Printf(" Location is global '%s' of size %zu at %p (%s+%p)\n\n", Printf(" Location is global '%s' of size %zu at %p (%s+%p)\n\n",
loc->name, loc->size, loc->addr, loc->module, loc->offset); global.name, global.size, global.start,
StripModuleName(global.module), global.module_offset);
} else if (loc->type == ReportLocationHeap) { } else if (loc->type == ReportLocationHeap) {
char thrbuf[kThreadBufSize]; char thrbuf[kThreadBufSize];
Printf(" Location is heap block of size %zu at %p allocated by %s:\n", Printf(" Location is heap block of size %zu at %p allocated by %s:\n",
loc->size, loc->addr, thread_name(thrbuf, loc->tid)); loc->heap_chunk_size, loc->heap_chunk_start,
thread_name(thrbuf, loc->tid));
print_stack = true; print_stack = true;
} else if (loc->type == ReportLocationStack) { } else if (loc->type == ReportLocationStack) {
Printf(" Location is stack of %s.\n\n", thread_name(thrbuf, loc->tid)); Printf(" Location is stack of %s.\n\n", thread_name(thrbuf, loc->tid));
...@@ -301,8 +323,10 @@ void PrintReport(const ReportDesc *rep) { ...@@ -301,8 +323,10 @@ void PrintReport(const ReportDesc *rep) {
if (rep->typ == ReportTypeThreadLeak && rep->count > 1) if (rep->typ == ReportTypeThreadLeak && rep->count > 1)
Printf(" And %d more similar thread leaks.\n\n", rep->count - 1); Printf(" And %d more similar thread leaks.\n\n", rep->count - 1);
if (ReportStack *ent = SkipTsanInternalFrames(ChooseSummaryStack(rep))) if (ReportStack *ent = SkipTsanInternalFrames(ChooseSummaryStack(rep))) {
ReportErrorSummary(rep_typ_str, ent->file, ent->line, ent->func); const AddressInfo &info = ent->info;
ReportErrorSummary(rep_typ_str, info.file, info.line, info.function);
}
Printf("==================\n"); Printf("==================\n");
} }
...@@ -317,8 +341,9 @@ void PrintStack(const ReportStack *ent) { ...@@ -317,8 +341,9 @@ void PrintStack(const ReportStack *ent) {
return; return;
} }
for (int i = 0; ent; ent = ent->next, i++) { for (int i = 0; ent; ent = ent->next, i++) {
Printf(" %s()\n %s:%d +0x%zx\n", const AddressInfo &info = ent->info;
ent->func, ent->file, ent->line, (void*)ent->offset); Printf(" %s()\n %s:%d +0x%zx\n", info.function, info.file, info.line,
(void *)info.module_offset);
} }
} }
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#ifndef TSAN_REPORT_H #ifndef TSAN_REPORT_H
#define TSAN_REPORT_H #define TSAN_REPORT_H
#include "sanitizer_common/sanitizer_symbolizer.h"
#include "tsan_defs.h" #include "tsan_defs.h"
#include "tsan_vector.h" #include "tsan_vector.h"
...@@ -20,6 +21,7 @@ enum ReportType { ...@@ -20,6 +21,7 @@ enum ReportType {
ReportTypeRace, ReportTypeRace,
ReportTypeVptrRace, ReportTypeVptrRace,
ReportTypeUseAfterFree, ReportTypeUseAfterFree,
ReportTypeVptrUseAfterFree,
ReportTypeThreadLeak, ReportTypeThreadLeak,
ReportTypeMutexDestroyLocked, ReportTypeMutexDestroyLocked,
ReportTypeMutexDoubleLock, ReportTypeMutexDoubleLock,
...@@ -33,14 +35,12 @@ enum ReportType { ...@@ -33,14 +35,12 @@ enum ReportType {
struct ReportStack { struct ReportStack {
ReportStack *next; ReportStack *next;
char *module; AddressInfo info;
uptr offset;
uptr pc;
char *func;
char *file;
int line;
int col;
bool suppressable; bool suppressable;
static ReportStack *New(uptr addr);
private:
ReportStack();
}; };
struct ReportMopMutex { struct ReportMopMutex {
...@@ -70,17 +70,17 @@ enum ReportLocationType { ...@@ -70,17 +70,17 @@ enum ReportLocationType {
struct ReportLocation { struct ReportLocation {
ReportLocationType type; ReportLocationType type;
uptr addr; DataInfo global;
uptr size; uptr heap_chunk_start;
char *module; uptr heap_chunk_size;
uptr offset;
int tid; int tid;
int fd; int fd;
char *name;
char *file;
int line;
bool suppressable; bool suppressable;
ReportStack *stack; ReportStack *stack;
static ReportLocation *New(ReportLocationType type);
private:
explicit ReportLocation(ReportLocationType type);
}; };
struct ReportThread { struct ReportThread {
......
...@@ -259,8 +259,8 @@ void MapShadow(uptr addr, uptr size) { ...@@ -259,8 +259,8 @@ void MapShadow(uptr addr, uptr size) {
void MapThreadTrace(uptr addr, uptr size) { void MapThreadTrace(uptr addr, uptr size) {
DPrintf("#0: Mapping trace at %p-%p(0x%zx)\n", addr, addr + size, size); DPrintf("#0: Mapping trace at %p-%p(0x%zx)\n", addr, addr + size, size);
CHECK_GE(addr, kTraceMemBegin); CHECK_GE(addr, kTraceMemBeg);
CHECK_LE(addr + size, kTraceMemBegin + kTraceMemSize); CHECK_LE(addr + size, kTraceMemEnd);
CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
uptr addr1 = (uptr)MmapFixedNoReserve(addr, size); uptr addr1 = (uptr)MmapFixedNoReserve(addr, size);
if (addr1 != addr) { if (addr1 != addr) {
...@@ -270,6 +270,28 @@ void MapThreadTrace(uptr addr, uptr size) { ...@@ -270,6 +270,28 @@ void MapThreadTrace(uptr addr, uptr size) {
} }
} }
static void CheckShadowMapping() {
for (uptr i = 0; i < ARRAY_SIZE(UserRegions); i += 2) {
const uptr beg = UserRegions[i];
const uptr end = UserRegions[i + 1];
VPrintf(3, "checking shadow region %p-%p\n", beg, end);
for (uptr p0 = beg; p0 <= end; p0 += (end - beg) / 4) {
for (int x = -1; x <= 1; x++) {
const uptr p = p0 + x;
if (p < beg || p >= end)
continue;
const uptr s = MemToShadow(p);
VPrintf(3, " checking pointer %p -> %p\n", p, s);
CHECK(IsAppMem(p));
CHECK(IsShadowMem(s));
CHECK_EQ(p & ~(kShadowCell - 1), ShadowToMem(s));
const uptr m = (uptr)MemToMeta(p);
CHECK(IsMetaMem(m));
}
}
}
}
void Initialize(ThreadState *thr) { void Initialize(ThreadState *thr) {
// Thread safe because done before all threads exist. // Thread safe because done before all threads exist.
static bool is_initialized = false; static bool is_initialized = false;
...@@ -289,6 +311,7 @@ void Initialize(ThreadState *thr) { ...@@ -289,6 +311,7 @@ void Initialize(ThreadState *thr) {
InitializeAllocator(); InitializeAllocator();
#endif #endif
InitializeInterceptors(); InitializeInterceptors();
CheckShadowMapping();
InitializePlatform(); InitializePlatform();
InitializeMutex(); InitializeMutex();
InitializeDynamicAnnotations(); InitializeDynamicAnnotations();
...@@ -437,8 +460,8 @@ u32 CurrentStackId(ThreadState *thr, uptr pc) { ...@@ -437,8 +460,8 @@ u32 CurrentStackId(ThreadState *thr, uptr pc) {
thr->shadow_stack_pos[0] = pc; thr->shadow_stack_pos[0] = pc;
thr->shadow_stack_pos++; thr->shadow_stack_pos++;
} }
u32 id = StackDepotPut(thr->shadow_stack, u32 id = StackDepotPut(
thr->shadow_stack_pos - thr->shadow_stack); StackTrace(thr->shadow_stack, thr->shadow_stack_pos - thr->shadow_stack));
if (pc != 0) if (pc != 0)
thr->shadow_stack_pos--; thr->shadow_stack_pos--;
return id; return id;
...@@ -451,7 +474,7 @@ void TraceSwitch(ThreadState *thr) { ...@@ -451,7 +474,7 @@ void TraceSwitch(ThreadState *thr) {
unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % TraceParts(); unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % TraceParts();
TraceHeader *hdr = &thr_trace->headers[trace]; TraceHeader *hdr = &thr_trace->headers[trace];
hdr->epoch0 = thr->fast_state.epoch(); hdr->epoch0 = thr->fast_state.epoch();
hdr->stack0.ObtainCurrent(thr, 0); ObtainCurrentStack(thr, 0, &hdr->stack0);
hdr->mset0 = thr->mset; hdr->mset0 = thr->mset;
thr->nomalloc--; thr->nomalloc--;
} }
...@@ -690,6 +713,8 @@ ALWAYS_INLINE ...@@ -690,6 +713,8 @@ ALWAYS_INLINE
bool ContainsSameAccess(u64 *s, u64 a, u64 sync_epoch, bool is_write) { bool ContainsSameAccess(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
#if defined(__SSE3__) && TSAN_SHADOW_COUNT == 4 #if defined(__SSE3__) && TSAN_SHADOW_COUNT == 4
bool res = ContainsSameAccessFast(s, a, sync_epoch, is_write); bool res = ContainsSameAccessFast(s, a, sync_epoch, is_write);
// NOTE: this check can fail if the shadow is concurrently mutated
// by other threads.
DCHECK_EQ(res, ContainsSameAccessSlow(s, a, sync_epoch, is_write)); DCHECK_EQ(res, ContainsSameAccessSlow(s, a, sync_epoch, is_write));
return res; return res;
#else #else
......
...@@ -51,11 +51,8 @@ ...@@ -51,11 +51,8 @@
namespace __tsan { namespace __tsan {
#ifndef TSAN_GO #ifndef TSAN_GO
const uptr kAllocatorSpace = 0x7d0000000000ULL;
const uptr kAllocatorSize = 0x10000000000ULL; // 1T.
struct MapUnmapCallback; struct MapUnmapCallback;
typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0, typedef SizeClassAllocator64<kHeapMemBeg, kHeapMemEnd - kHeapMemBeg, 0,
DefaultSizeClassMap, MapUnmapCallback> PrimaryAllocator; DefaultSizeClassMap, MapUnmapCallback> PrimaryAllocator;
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache; typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
typedef LargeMmapAllocator<MapUnmapCallback> SecondaryAllocator; typedef LargeMmapAllocator<MapUnmapCallback> SecondaryAllocator;
...@@ -499,9 +496,9 @@ class ScopedReport { ...@@ -499,9 +496,9 @@ class ScopedReport {
explicit ScopedReport(ReportType typ); explicit ScopedReport(ReportType typ);
~ScopedReport(); ~ScopedReport();
void AddMemoryAccess(uptr addr, Shadow s, const StackTrace *stack, void AddMemoryAccess(uptr addr, Shadow s, StackTrace stack,
const MutexSet *mset); const MutexSet *mset);
void AddStack(const StackTrace *stack, bool suppressable = false); void AddStack(StackTrace stack, bool suppressable = false);
void AddThread(const ThreadContext *tctx, bool suppressable = false); void AddThread(const ThreadContext *tctx, bool suppressable = false);
void AddThread(int unique_tid, bool suppressable = false); void AddThread(int unique_tid, bool suppressable = false);
void AddUniqueTid(int unique_tid); void AddUniqueTid(int unique_tid);
...@@ -525,7 +522,20 @@ class ScopedReport { ...@@ -525,7 +522,20 @@ class ScopedReport {
void operator = (const ScopedReport&); void operator = (const ScopedReport&);
}; };
void RestoreStack(int tid, const u64 epoch, StackTrace *stk, MutexSet *mset); void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk,
MutexSet *mset);
template<typename StackTraceTy>
void ObtainCurrentStack(ThreadState *thr, uptr toppc, StackTraceTy *stack) {
uptr size = thr->shadow_stack_pos - thr->shadow_stack;
uptr start = 0;
if (size + !!toppc > kStackTraceMax) {
start = size + !!toppc - kStackTraceMax;
size = kStackTraceMax - !!toppc;
}
stack->Init(&thr->shadow_stack[start], size, toppc);
}
void StatAggregate(u64 *dst, u64 *src); void StatAggregate(u64 *dst, u64 *src);
void StatOutput(u64 *stat); void StatOutput(u64 *stat);
...@@ -552,9 +562,8 @@ void ForkChildAfter(ThreadState *thr, uptr pc); ...@@ -552,9 +562,8 @@ void ForkChildAfter(ThreadState *thr, uptr pc);
void ReportRace(ThreadState *thr); void ReportRace(ThreadState *thr);
bool OutputReport(ThreadState *thr, const ScopedReport &srep); bool OutputReport(ThreadState *thr, const ScopedReport &srep);
bool IsFiredSuppression(Context *ctx, bool IsFiredSuppression(Context *ctx, const ScopedReport &srep,
const ScopedReport &srep, StackTrace trace);
const StackTrace &trace);
bool IsExpectedReport(uptr addr, uptr size); bool IsExpectedReport(uptr addr, uptr size);
void PrintMatchedBenignRaces(); void PrintMatchedBenignRaces();
bool FrameIsInternal(const ReportStack *frame); bool FrameIsInternal(const ReportStack *frame);
...@@ -575,7 +584,7 @@ ReportStack *SkipTsanInternalFrames(ReportStack *ent); ...@@ -575,7 +584,7 @@ ReportStack *SkipTsanInternalFrames(ReportStack *ent);
u32 CurrentStackId(ThreadState *thr, uptr pc); u32 CurrentStackId(ThreadState *thr, uptr pc);
ReportStack *SymbolizeStackId(u32 stack_id); ReportStack *SymbolizeStackId(u32 stack_id);
void PrintCurrentStack(ThreadState *thr, uptr pc); void PrintCurrentStack(ThreadState *thr, uptr pc);
void PrintCurrentStackSlow(); // uses libunwind void PrintCurrentStackSlow(uptr pc); // uses libunwind
void Initialize(ThreadState *thr); void Initialize(ThreadState *thr);
int Finalize(ThreadState *thr); int Finalize(ThreadState *thr);
......
...@@ -170,10 +170,15 @@ setjmp: ...@@ -170,10 +170,15 @@ setjmp:
CFI_ADJUST_CFA_OFFSET(8) CFI_ADJUST_CFA_OFFSET(8)
CFI_REL_OFFSET(%rdi, 0) CFI_REL_OFFSET(%rdi, 0)
// obtain %rsp // obtain %rsp
#if defined(__FreeBSD__)
lea 8(%rsp), %rdi
mov %rdi, %rsi
#else
lea 16(%rsp), %rdi lea 16(%rsp), %rdi
mov %rdi, %rsi mov %rdi, %rsi
xor %fs:0x30, %rsi // magic mangling of rsp (see libc setjmp) xor %fs:0x30, %rsi // magic mangling of rsp (see libc setjmp)
rol $0x11, %rsi rol $0x11, %rsi
#endif
// call tsan interceptor // call tsan interceptor
call __tsan_setjmp call __tsan_setjmp
// restore env parameter // restore env parameter
...@@ -197,10 +202,15 @@ _setjmp: ...@@ -197,10 +202,15 @@ _setjmp:
CFI_ADJUST_CFA_OFFSET(8) CFI_ADJUST_CFA_OFFSET(8)
CFI_REL_OFFSET(%rdi, 0) CFI_REL_OFFSET(%rdi, 0)
// obtain %rsp // obtain %rsp
#if defined(__FreeBSD__)
lea 8(%rsp), %rdi
mov %rdi, %rsi
#else
lea 16(%rsp), %rdi lea 16(%rsp), %rdi
mov %rdi, %rsi mov %rdi, %rsi
xor %fs:0x30, %rsi // magic mangling of rsp (see libc setjmp) xor %fs:0x30, %rsi // magic mangling of rsp (see libc setjmp)
rol $0x11, %rsi rol $0x11, %rsi
#endif
// call tsan interceptor // call tsan interceptor
call __tsan_setjmp call __tsan_setjmp
// restore env parameter // restore env parameter
...@@ -231,10 +241,15 @@ sigsetjmp: ...@@ -231,10 +241,15 @@ sigsetjmp:
sub $8, %rsp sub $8, %rsp
CFI_ADJUST_CFA_OFFSET(8) CFI_ADJUST_CFA_OFFSET(8)
// obtain %rsp // obtain %rsp
#if defined(__FreeBSD__)
lea 24(%rsp), %rdi
mov %rdi, %rsi
#else
lea 32(%rsp), %rdi lea 32(%rsp), %rdi
mov %rdi, %rsi mov %rdi, %rsi
xor %fs:0x30, %rsi // magic mangling of rsp (see libc setjmp) xor %fs:0x30, %rsi // magic mangling of rsp (see libc setjmp)
rol $0x11, %rsi rol $0x11, %rsi
#endif
// call tsan interceptor // call tsan interceptor
call __tsan_setjmp call __tsan_setjmp
// unalign stack frame // unalign stack frame
...@@ -272,10 +287,15 @@ __sigsetjmp: ...@@ -272,10 +287,15 @@ __sigsetjmp:
sub $8, %rsp sub $8, %rsp
CFI_ADJUST_CFA_OFFSET(8) CFI_ADJUST_CFA_OFFSET(8)
// obtain %rsp // obtain %rsp
#if defined(__FreeBSD__)
lea 24(%rsp), %rdi
mov %rdi, %rsi
#else
lea 32(%rsp), %rdi lea 32(%rsp), %rdi
mov %rdi, %rsi mov %rdi, %rsi
xor %fs:0x30, %rsi // magic mangling of rsp (see libc setjmp) xor %fs:0x30, %rsi // magic mangling of rsp (see libc setjmp)
rol $0x11, %rsi rol $0x11, %rsi
#endif
// call tsan interceptor // call tsan interceptor
call __tsan_setjmp call __tsan_setjmp
// unalign stack frame // unalign stack frame
...@@ -296,7 +316,7 @@ __sigsetjmp: ...@@ -296,7 +316,7 @@ __sigsetjmp:
CFI_ENDPROC CFI_ENDPROC
.size __sigsetjmp, .-__sigsetjmp .size __sigsetjmp, .-__sigsetjmp
#ifdef __linux__ #if defined(__FreeBSD__) || defined(__linux__)
/* We do not need executable stack. */ /* We do not need executable stack. */
.section .note.GNU-stack,"",@progbits .section .note.GNU-stack,"",@progbits
#endif #endif
...@@ -57,9 +57,9 @@ static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ, ...@@ -57,9 +57,9 @@ static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ,
ThreadRegistryLock l(ctx->thread_registry); ThreadRegistryLock l(ctx->thread_registry);
ScopedReport rep(typ); ScopedReport rep(typ);
rep.AddMutex(mid); rep.AddMutex(mid);
StackTrace trace; VarSizeStackTrace trace;
trace.ObtainCurrent(thr, pc); ObtainCurrentStack(thr, pc, &trace);
rep.AddStack(&trace, true); rep.AddStack(trace, true);
rep.AddLocation(addr, 1); rep.AddLocation(addr, 1);
OutputReport(thr, rep); OutputReport(thr, rep);
} }
...@@ -122,12 +122,12 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) { ...@@ -122,12 +122,12 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
ThreadRegistryLock l(ctx->thread_registry); ThreadRegistryLock l(ctx->thread_registry);
ScopedReport rep(ReportTypeMutexDestroyLocked); ScopedReport rep(ReportTypeMutexDestroyLocked);
rep.AddMutex(mid); rep.AddMutex(mid);
StackTrace trace; VarSizeStackTrace trace;
trace.ObtainCurrent(thr, pc); ObtainCurrentStack(thr, pc, &trace);
rep.AddStack(&trace); rep.AddStack(trace);
FastState last(last_lock); FastState last(last_lock);
RestoreStack(last.tid(), last.epoch(), &trace, 0); RestoreStack(last.tid(), last.epoch(), &trace, 0);
rep.AddStack(&trace, true); rep.AddStack(trace, true);
rep.AddLocation(addr, 1); rep.AddLocation(addr, 1);
OutputReport(thr, rep); OutputReport(thr, rep);
} }
...@@ -470,21 +470,17 @@ void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) { ...@@ -470,21 +470,17 @@ void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) {
rep.AddUniqueTid((int)r->loop[i].thr_ctx); rep.AddUniqueTid((int)r->loop[i].thr_ctx);
rep.AddThread((int)r->loop[i].thr_ctx); rep.AddThread((int)r->loop[i].thr_ctx);
} }
InternalScopedBuffer<StackTrace> stacks(2 * DDReport::kMaxLoopSize);
uptr dummy_pc = 0x42; uptr dummy_pc = 0x42;
for (int i = 0; i < r->n; i++) { for (int i = 0; i < r->n; i++) {
uptr size;
for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) { for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) {
u32 stk = r->loop[i].stk[j]; u32 stk = r->loop[i].stk[j];
if (stk) { if (stk) {
const uptr *trace = StackDepotGet(stk, &size); rep.AddStack(StackDepotGet(stk), true);
stacks[i].Init(const_cast<uptr *>(trace), size);
} else { } else {
// Sometimes we fail to extract the stack trace (FIXME: investigate), // Sometimes we fail to extract the stack trace (FIXME: investigate),
// but we should still produce some stack trace in the report. // but we should still produce some stack trace in the report.
stacks[i].Init(&dummy_pc, 1); rep.AddStack(StackTrace(&dummy_pc, 1), true);
} }
rep.AddStack(&stacks[i], true);
} }
} }
OutputReport(thr, rep); OutputReport(thr, rep);
......
...@@ -8,103 +8,37 @@ ...@@ -8,103 +8,37 @@
// This file is a part of ThreadSanitizer (TSan), a race detector. // This file is a part of ThreadSanitizer (TSan), a race detector.
// //
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
//#include "sanitizer_common/sanitizer_placement_new.h"
#include "tsan_stack_trace.h" #include "tsan_stack_trace.h"
#include "tsan_rtl.h" #include "tsan_rtl.h"
#include "tsan_mman.h" #include "tsan_mman.h"
namespace __tsan { namespace __tsan {
StackTrace::StackTrace() VarSizeStackTrace::VarSizeStackTrace()
: n_() : StackTrace(nullptr, 0), trace_buffer(nullptr) {}
, s_()
, c_() {
}
StackTrace::StackTrace(uptr *buf, uptr cnt)
: n_()
, s_(buf)
, c_(cnt) {
CHECK_NE(buf, 0);
CHECK_NE(cnt, 0);
}
StackTrace::~StackTrace() {
Reset();
}
void StackTrace::Reset() { VarSizeStackTrace::~VarSizeStackTrace() {
if (s_ && !c_) { ResizeBuffer(0);
CHECK_NE(n_, 0);
internal_free(s_);
s_ = 0;
}
n_ = 0;
} }
void StackTrace::Init(const uptr *pcs, uptr cnt) { void VarSizeStackTrace::ResizeBuffer(uptr new_size) {
Reset(); if (trace_buffer) {
if (cnt == 0) internal_free(trace_buffer);
return;
if (c_) {
CHECK_NE(s_, 0);
CHECK_LE(cnt, c_);
} else {
s_ = (uptr*)internal_alloc(MBlockStackTrace, cnt * sizeof(s_[0]));
}
n_ = cnt;
internal_memcpy(s_, pcs, cnt * sizeof(s_[0]));
}
void StackTrace::ObtainCurrent(ThreadState *thr, uptr toppc) {
Reset();
n_ = thr->shadow_stack_pos - thr->shadow_stack;
if (n_ + !!toppc == 0)
return;
uptr start = 0;
if (c_) {
CHECK_NE(s_, 0);
if (n_ + !!toppc > c_) {
start = n_ - c_ + !!toppc;
n_ = c_ - !!toppc;
}
} else {
// Cap potentially huge stacks.
if (n_ + !!toppc > kTraceStackSize) {
start = n_ - kTraceStackSize + !!toppc;
n_ = kTraceStackSize - !!toppc;
}
s_ = (uptr*)internal_alloc(MBlockStackTrace,
(n_ + !!toppc) * sizeof(s_[0]));
}
for (uptr i = 0; i < n_; i++)
s_[i] = thr->shadow_stack[start + i];
if (toppc) {
s_[n_] = toppc;
n_++;
} }
} trace_buffer =
(new_size > 0)
void StackTrace::CopyFrom(const StackTrace& other) { ? (uptr *)internal_alloc(MBlockStackTrace,
Reset(); new_size * sizeof(trace_buffer[0]))
Init(other.Begin(), other.Size()); : nullptr;
} trace = trace_buffer;
size = new_size;
bool StackTrace::IsEmpty() const { }
return n_ == 0;
} void VarSizeStackTrace::Init(const uptr *pcs, uptr cnt, uptr extra_top_pc) {
ResizeBuffer(cnt + !!extra_top_pc);
uptr StackTrace::Size() const { internal_memcpy(trace_buffer, pcs, cnt * sizeof(trace_buffer[0]));
return n_; if (extra_top_pc)
} trace_buffer[cnt] = extra_top_pc;
uptr StackTrace::Get(uptr i) const {
CHECK_LT(i, n_);
return s_[i];
}
const uptr *StackTrace::Begin() const {
return s_;
} }
} // namespace __tsan } // namespace __tsan
...@@ -11,40 +11,25 @@ ...@@ -11,40 +11,25 @@
#ifndef TSAN_STACK_TRACE_H #ifndef TSAN_STACK_TRACE_H
#define TSAN_STACK_TRACE_H #define TSAN_STACK_TRACE_H
//#include "sanitizer_common/sanitizer_atomic.h" #include "sanitizer_common/sanitizer_stacktrace.h"
//#include "sanitizer_common/sanitizer_common.h"
//#include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
#include "tsan_defs.h" #include "tsan_defs.h"
//#include "tsan_clock.h"
//#include "tsan_mutex.h"
//#include "tsan_dense_alloc.h"
namespace __tsan { namespace __tsan {
class StackTrace { // StackTrace which calls malloc/free to allocate the buffer for
public: // addresses in stack traces.
StackTrace(); struct VarSizeStackTrace : public StackTrace {
// Initialized the object in "static mode", uptr *trace_buffer; // Owned.
// in this mode it never calls malloc/free but uses the provided buffer.
StackTrace(uptr *buf, uptr cnt); VarSizeStackTrace();
~StackTrace(); ~VarSizeStackTrace();
void Reset(); void Init(const uptr *pcs, uptr cnt, uptr extra_top_pc = 0);
void Init(const uptr *pcs, uptr cnt);
void ObtainCurrent(ThreadState *thr, uptr toppc);
bool IsEmpty() const;
uptr Size() const;
uptr Get(uptr i) const;
const uptr *Begin() const;
void CopyFrom(const StackTrace& other);
private: private:
uptr n_; void ResizeBuffer(uptr new_size);
uptr *s_;
const uptr c_;
StackTrace(const StackTrace&); VarSizeStackTrace(const VarSizeStackTrace &);
void operator = (const StackTrace&); void operator=(const VarSizeStackTrace &);
}; };
} // namespace __tsan } // namespace __tsan
......
...@@ -58,6 +58,8 @@ SuppressionType conv(ReportType typ) { ...@@ -58,6 +58,8 @@ SuppressionType conv(ReportType typ) {
return SuppressionRace; return SuppressionRace;
else if (typ == ReportTypeUseAfterFree) else if (typ == ReportTypeUseAfterFree)
return SuppressionRace; return SuppressionRace;
else if (typ == ReportTypeVptrUseAfterFree)
return SuppressionRace;
else if (typ == ReportTypeThreadLeak) else if (typ == ReportTypeThreadLeak)
return SuppressionThread; return SuppressionThread;
else if (typ == ReportTypeMutexDestroyLocked) else if (typ == ReportTypeMutexDestroyLocked)
...@@ -89,13 +91,14 @@ uptr IsSuppressed(ReportType typ, const ReportStack *stack, Suppression **sp) { ...@@ -89,13 +91,14 @@ uptr IsSuppressed(ReportType typ, const ReportStack *stack, Suppression **sp) {
return 0; return 0;
Suppression *s; Suppression *s;
for (const ReportStack *frame = stack; frame; frame = frame->next) { for (const ReportStack *frame = stack; frame; frame = frame->next) {
if (SuppressionContext::Get()->Match(frame->func, stype, &s) || const AddressInfo &info = frame->info;
SuppressionContext::Get()->Match(frame->file, stype, &s) || if (SuppressionContext::Get()->Match(info.function, stype, &s) ||
SuppressionContext::Get()->Match(frame->module, stype, &s)) { SuppressionContext::Get()->Match(info.file, stype, &s) ||
SuppressionContext::Get()->Match(info.module, stype, &s)) {
DPrintf("ThreadSanitizer: matched suppression '%s'\n", s->templ); DPrintf("ThreadSanitizer: matched suppression '%s'\n", s->templ);
s->hit_count++; s->hit_count++;
*sp = s; *sp = s;
return frame->pc; return info.address;
} }
} }
return 0; return 0;
...@@ -109,13 +112,13 @@ uptr IsSuppressed(ReportType typ, const ReportLocation *loc, Suppression **sp) { ...@@ -109,13 +112,13 @@ uptr IsSuppressed(ReportType typ, const ReportLocation *loc, Suppression **sp) {
if (stype == SuppressionNone) if (stype == SuppressionNone)
return 0; return 0;
Suppression *s; Suppression *s;
if (SuppressionContext::Get()->Match(loc->name, stype, &s) || const DataInfo &global = loc->global;
SuppressionContext::Get()->Match(loc->file, stype, &s) || if (SuppressionContext::Get()->Match(global.name, stype, &s) ||
SuppressionContext::Get()->Match(loc->module, stype, &s)) { SuppressionContext::Get()->Match(global.module, stype, &s)) {
DPrintf("ThreadSanitizer: matched suppression '%s'\n", s->templ); DPrintf("ThreadSanitizer: matched suppression '%s'\n", s->templ);
s->hit_count++; s->hit_count++;
*sp = s; *sp = s;
return loc->addr; return global.start;
} }
return 0; return 0;
} }
......
...@@ -34,38 +34,6 @@ void ExitSymbolizer() { ...@@ -34,38 +34,6 @@ void ExitSymbolizer() {
thr->ignore_interceptors--; thr->ignore_interceptors--;
} }
ReportStack *NewReportStackEntry(uptr addr) {
ReportStack *ent = (ReportStack*)internal_alloc(MBlockReportStack,
sizeof(ReportStack));
internal_memset(ent, 0, sizeof(*ent));
ent->pc = addr;
return ent;
}
static ReportStack *NewReportStackEntry(const AddressInfo &info) {
ReportStack *ent = NewReportStackEntry(info.address);
ent->module = StripModuleName(info.module);
ent->offset = info.module_offset;
if (info.function)
ent->func = internal_strdup(info.function);
if (info.file)
ent->file = internal_strdup(info.file);
ent->line = info.line;
ent->col = info.column;
return ent;
}
ReportStack *next;
char *module;
uptr offset;
uptr pc;
char *func;
char *file;
int line;
int col;
// Denotes fake PC values that come from JIT/JAVA/etc. // Denotes fake PC values that come from JIT/JAVA/etc.
// For such PC values __tsan_symbolize_external() will be called. // For such PC values __tsan_symbolize_external() will be called.
const uptr kExternalPCBit = 1ULL << 60; const uptr kExternalPCBit = 1ULL << 60;
...@@ -93,16 +61,14 @@ ReportStack *SymbolizeCode(uptr addr) { ...@@ -93,16 +61,14 @@ ReportStack *SymbolizeCode(uptr addr) {
static char func_buf[1024]; static char func_buf[1024];
static char file_buf[1024]; static char file_buf[1024];
int line, col; int line, col;
ReportStack *ent = ReportStack::New(addr);
if (!__tsan_symbolize_external(addr, func_buf, sizeof(func_buf), if (!__tsan_symbolize_external(addr, func_buf, sizeof(func_buf),
file_buf, sizeof(file_buf), &line, &col)) file_buf, sizeof(file_buf), &line, &col))
return NewReportStackEntry(addr); return ent;
ReportStack *ent = NewReportStackEntry(addr); ent->info.function = internal_strdup(func_buf);
ent->module = 0; ent->info.file = internal_strdup(file_buf);
ent->offset = 0; ent->info.line = line;
ent->func = internal_strdup(func_buf); ent->info.column = col;
ent->file = internal_strdup(file_buf);
ent->line = line;
ent->col = col;
return ent; return ent;
} }
static const uptr kMaxAddrFrames = 16; static const uptr kMaxAddrFrames = 16;
...@@ -112,13 +78,12 @@ ReportStack *SymbolizeCode(uptr addr) { ...@@ -112,13 +78,12 @@ ReportStack *SymbolizeCode(uptr addr) {
uptr addr_frames_num = Symbolizer::GetOrInit()->SymbolizePC( uptr addr_frames_num = Symbolizer::GetOrInit()->SymbolizePC(
addr, addr_frames.data(), kMaxAddrFrames); addr, addr_frames.data(), kMaxAddrFrames);
if (addr_frames_num == 0) if (addr_frames_num == 0)
return NewReportStackEntry(addr); return ReportStack::New(addr);
ReportStack *top = 0; ReportStack *top = 0;
ReportStack *bottom = 0; ReportStack *bottom = 0;
for (uptr i = 0; i < addr_frames_num; i++) { for (uptr i = 0; i < addr_frames_num; i++) {
ReportStack *cur_entry = NewReportStackEntry(addr_frames[i]); ReportStack *cur_entry = ReportStack::New(addr);
CHECK(cur_entry); cur_entry->info = addr_frames[i];
addr_frames[i].Clear();
if (i == 0) if (i == 0)
top = cur_entry; top = cur_entry;
else else
...@@ -132,16 +97,8 @@ ReportLocation *SymbolizeData(uptr addr) { ...@@ -132,16 +97,8 @@ ReportLocation *SymbolizeData(uptr addr) {
DataInfo info; DataInfo info;
if (!Symbolizer::GetOrInit()->SymbolizeData(addr, &info)) if (!Symbolizer::GetOrInit()->SymbolizeData(addr, &info))
return 0; return 0;
ReportLocation *ent = (ReportLocation*)internal_alloc(MBlockReportStack, ReportLocation *ent = ReportLocation::New(ReportLocationGlobal);
sizeof(ReportLocation)); ent->global = info;
internal_memset(ent, 0, sizeof(*ent));
ent->type = ReportLocationGlobal;
ent->module = StripModuleName(info.module);
ent->offset = info.module_offset;
if (info.name)
ent->name = internal_strdup(info.name);
ent->addr = info.start;
ent->size = info.size;
return ent; return ent;
} }
......
...@@ -40,21 +40,15 @@ enum EventType { ...@@ -40,21 +40,15 @@ enum EventType {
typedef u64 Event; typedef u64 Event;
struct TraceHeader { struct TraceHeader {
StackTrace stack0; // Start stack for the trace.
u64 epoch0; // Start epoch for the trace.
MutexSet mset0;
#ifndef TSAN_GO
uptr stack0buf[kTraceStackSize];
#endif
TraceHeader()
#ifndef TSAN_GO #ifndef TSAN_GO
: stack0(stack0buf, kTraceStackSize) BufferedStackTrace stack0; // Start stack for the trace.
#else #else
: stack0() VarSizeStackTrace stack0;
#endif #endif
, epoch0() { u64 epoch0; // Start epoch for the trace.
} MutexSet mset0;
TraceHeader() : stack0(), epoch0() {}
}; };
struct Trace { struct Trace {
......
...@@ -6,6 +6,7 @@ gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER) ...@@ -6,6 +6,7 @@ gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER)
DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS -DPIC DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS -DPIC
AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic -Wno-long-long -fPIC -fno-builtin -fno-exceptions -fno-rtti -fomit-frame-pointer -funwind-tables -fvisibility=hidden -Wno-variadic-macros AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic -Wno-long-long -fPIC -fno-builtin -fno-exceptions -fno-rtti -fomit-frame-pointer -funwind-tables -fvisibility=hidden -Wno-variadic-macros
AM_CXXFLAGS += $(LIBSTDCXX_RAW_CXX_CXXFLAGS) AM_CXXFLAGS += $(LIBSTDCXX_RAW_CXX_CXXFLAGS)
AM_CXXFLAGS += -std=c++11
ACLOCAL_AMFLAGS = -I m4 ACLOCAL_AMFLAGS = -I m4
toolexeclib_LTLIBRARIES = libubsan.la toolexeclib_LTLIBRARIES = libubsan.la
......
...@@ -256,7 +256,7 @@ gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER) ...@@ -256,7 +256,7 @@ gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER)
AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic \ AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic \
-Wno-long-long -fPIC -fno-builtin -fno-exceptions -fno-rtti \ -Wno-long-long -fPIC -fno-builtin -fno-exceptions -fno-rtti \
-fomit-frame-pointer -funwind-tables -fvisibility=hidden \ -fomit-frame-pointer -funwind-tables -fvisibility=hidden \
-Wno-variadic-macros $(LIBSTDCXX_RAW_CXX_CXXFLAGS) -Wno-variadic-macros $(LIBSTDCXX_RAW_CXX_CXXFLAGS) -std=c++11
ACLOCAL_AMFLAGS = -I m4 ACLOCAL_AMFLAGS = -I m4
toolexeclib_LTLIBRARIES = libubsan.la toolexeclib_LTLIBRARIES = libubsan.la
ubsan_files = \ ubsan_files = \
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include "ubsan_flags.h" #include "ubsan_flags.h"
#include "sanitizer_common/sanitizer_report_decorator.h" #include "sanitizer_common/sanitizer_report_decorator.h"
#include "sanitizer_common/sanitizer_stacktrace.h" #include "sanitizer_common/sanitizer_stacktrace.h"
#include "sanitizer_common/sanitizer_stacktrace_printer.h"
#include "sanitizer_common/sanitizer_symbolizer.h" #include "sanitizer_common/sanitizer_symbolizer.h"
#include <stdio.h> #include <stdio.h>
...@@ -31,7 +32,7 @@ static void MaybePrintStackTrace(uptr pc, uptr bp) { ...@@ -31,7 +32,7 @@ static void MaybePrintStackTrace(uptr pc, uptr bp) {
// under ASan). // under ASan).
if (StackTrace::WillUseFastUnwind(false)) if (StackTrace::WillUseFastUnwind(false))
return; return;
StackTrace stack; BufferedStackTrace stack;
stack.Unwind(kStackTraceMax, pc, bp, 0, 0, 0, false); stack.Unwind(kStackTraceMax, pc, bp, 0, 0, 0, false);
stack.Print(); stack.Print();
} }
...@@ -44,12 +45,12 @@ static void MaybeReportErrorSummary(Location Loc) { ...@@ -44,12 +45,12 @@ static void MaybeReportErrorSummary(Location Loc) {
if (Loc.isSourceLocation()) { if (Loc.isSourceLocation()) {
SourceLocation SLoc = Loc.getSourceLocation(); SourceLocation SLoc = Loc.getSourceLocation();
if (!SLoc.isInvalid()) { if (!SLoc.isInvalid()) {
ReportErrorSummary("runtime-error", SLoc.getFilename(), SLoc.getLine(), ReportErrorSummary("undefined-behavior", SLoc.getFilename(),
""); SLoc.getLine(), "");
return; return;
} }
} }
ReportErrorSummary("runtime-error"); ReportErrorSummary("undefined-behavior");
} }
namespace { namespace {
...@@ -127,14 +128,16 @@ static void renderLocation(Location Loc) { ...@@ -127,14 +128,16 @@ static void renderLocation(Location Loc) {
if (SLoc.isInvalid()) if (SLoc.isInvalid())
LocBuffer.append("<unknown>"); LocBuffer.append("<unknown>");
else else
PrintSourceLocation(&LocBuffer, SLoc.getFilename(), SLoc.getLine(), RenderSourceLocation(&LocBuffer, SLoc.getFilename(), SLoc.getLine(),
SLoc.getColumn()); SLoc.getColumn(), common_flags()->strip_path_prefix);
break; break;
} }
case Location::LK_Module: case Location::LK_Module: {
PrintModuleAndOffset(&LocBuffer, Loc.getModuleLocation().getModuleName(), ModuleLocation MLoc = Loc.getModuleLocation();
Loc.getModuleLocation().getOffset()); RenderModuleLocation(&LocBuffer, MLoc.getModuleName(), MLoc.getOffset(),
common_flags()->strip_path_prefix);
break; break;
}
case Location::LK_Memory: case Location::LK_Memory:
LocBuffer.append("%p", Loc.getMemoryLocation()); LocBuffer.append("%p", Loc.getMemoryLocation());
break; break;
......
...@@ -28,10 +28,10 @@ static bool ignoreReport(SourceLocation SLoc, ReportOptions Opts) { ...@@ -28,10 +28,10 @@ static bool ignoreReport(SourceLocation SLoc, ReportOptions Opts) {
} }
namespace __ubsan { namespace __ubsan {
const char *TypeCheckKinds[] = { const char *TypeCheckKinds[] = {
"load of", "store to", "reference binding to", "member access within", "load of", "store to", "reference binding to", "member access within",
"member call on", "constructor call on", "downcast of", "downcast of" "member call on", "constructor call on", "downcast of", "downcast of",
}; "upcast of", "cast to virtual base of"};
} }
static void handleTypeMismatchImpl(TypeMismatchData *Data, ValueHandle Pointer, static void handleTypeMismatchImpl(TypeMismatchData *Data, ValueHandle Pointer,
......
...@@ -113,7 +113,8 @@ __ubsan::__ubsan_vptr_type_cache[__ubsan::VptrTypeCacheSize]; ...@@ -113,7 +113,8 @@ __ubsan::__ubsan_vptr_type_cache[__ubsan::VptrTypeCacheSize];
/// \brief Determine whether \p Derived has a \p Base base class subobject at /// \brief Determine whether \p Derived has a \p Base base class subobject at
/// offset \p Offset. /// offset \p Offset.
static bool isDerivedFromAtOffset(const abi::__class_type_info *Derived, static bool isDerivedFromAtOffset(sptr Object,
const abi::__class_type_info *Derived,
const abi::__class_type_info *Base, const abi::__class_type_info *Base,
sptr Offset) { sptr Offset) {
if (Derived->__type_name == Base->__type_name) if (Derived->__type_name == Base->__type_name)
...@@ -121,7 +122,7 @@ static bool isDerivedFromAtOffset(const abi::__class_type_info *Derived, ...@@ -121,7 +122,7 @@ static bool isDerivedFromAtOffset(const abi::__class_type_info *Derived,
if (const abi::__si_class_type_info *SI = if (const abi::__si_class_type_info *SI =
dynamic_cast<const abi::__si_class_type_info*>(Derived)) dynamic_cast<const abi::__si_class_type_info*>(Derived))
return isDerivedFromAtOffset(SI->__base_type, Base, Offset); return isDerivedFromAtOffset(Object, SI->__base_type, Base, Offset);
const abi::__vmi_class_type_info *VTI = const abi::__vmi_class_type_info *VTI =
dynamic_cast<const abi::__vmi_class_type_info*>(Derived); dynamic_cast<const abi::__vmi_class_type_info*>(Derived);
...@@ -136,13 +137,13 @@ static bool isDerivedFromAtOffset(const abi::__class_type_info *Derived, ...@@ -136,13 +137,13 @@ static bool isDerivedFromAtOffset(const abi::__class_type_info *Derived,
sptr OffsetHere = VTI->base_info[base].__offset_flags >> sptr OffsetHere = VTI->base_info[base].__offset_flags >>
abi::__base_class_type_info::__offset_shift; abi::__base_class_type_info::__offset_shift;
if (VTI->base_info[base].__offset_flags & if (VTI->base_info[base].__offset_flags &
abi::__base_class_type_info::__virtual_mask) abi::__base_class_type_info::__virtual_mask) {
// For now, just punt on virtual bases and say 'yes'. sptr VTable = *reinterpret_cast<const sptr *>(Object);
// FIXME: OffsetHere is the offset in the vtable of the virtual base OffsetHere = *reinterpret_cast<const sptr *>(VTable + OffsetHere);
// offset. Read the vbase offset out of the vtable and use it. }
return true; if (isDerivedFromAtOffset(Object + OffsetHere,
if (isDerivedFromAtOffset(VTI->base_info[base].__base_type, VTI->base_info[base].__base_type, Base,
Base, Offset - OffsetHere)) Offset - OffsetHere))
return true; return true;
} }
...@@ -151,14 +152,15 @@ static bool isDerivedFromAtOffset(const abi::__class_type_info *Derived, ...@@ -151,14 +152,15 @@ static bool isDerivedFromAtOffset(const abi::__class_type_info *Derived,
/// \brief Find the derived-most dynamic base class of \p Derived at offset /// \brief Find the derived-most dynamic base class of \p Derived at offset
/// \p Offset. /// \p Offset.
static const abi::__class_type_info *findBaseAtOffset( static const abi::__class_type_info *
const abi::__class_type_info *Derived, sptr Offset) { findBaseAtOffset(sptr Object, const abi::__class_type_info *Derived,
sptr Offset) {
if (!Offset) if (!Offset)
return Derived; return Derived;
if (const abi::__si_class_type_info *SI = if (const abi::__si_class_type_info *SI =
dynamic_cast<const abi::__si_class_type_info*>(Derived)) dynamic_cast<const abi::__si_class_type_info*>(Derived))
return findBaseAtOffset(SI->__base_type, Offset); return findBaseAtOffset(Object, SI->__base_type, Offset);
const abi::__vmi_class_type_info *VTI = const abi::__vmi_class_type_info *VTI =
dynamic_cast<const abi::__vmi_class_type_info*>(Derived); dynamic_cast<const abi::__vmi_class_type_info*>(Derived);
...@@ -170,12 +172,13 @@ static const abi::__class_type_info *findBaseAtOffset( ...@@ -170,12 +172,13 @@ static const abi::__class_type_info *findBaseAtOffset(
sptr OffsetHere = VTI->base_info[base].__offset_flags >> sptr OffsetHere = VTI->base_info[base].__offset_flags >>
abi::__base_class_type_info::__offset_shift; abi::__base_class_type_info::__offset_shift;
if (VTI->base_info[base].__offset_flags & if (VTI->base_info[base].__offset_flags &
abi::__base_class_type_info::__virtual_mask) abi::__base_class_type_info::__virtual_mask) {
// FIXME: Can't handle virtual bases yet. sptr VTable = *reinterpret_cast<const sptr *>(Object);
continue; OffsetHere = *reinterpret_cast<const sptr *>(VTable + OffsetHere);
if (const abi::__class_type_info *Base = }
findBaseAtOffset(VTI->base_info[base].__base_type, if (const abi::__class_type_info *Base = findBaseAtOffset(
Offset - OffsetHere)) Object + OffsetHere, VTI->base_info[base].__base_type,
Offset - OffsetHere))
return Base; return Base;
} }
...@@ -227,7 +230,8 @@ bool __ubsan::checkDynamicType(void *Object, void *Type, HashValue Hash) { ...@@ -227,7 +230,8 @@ bool __ubsan::checkDynamicType(void *Object, void *Type, HashValue Hash) {
return false; return false;
abi::__class_type_info *Base = (abi::__class_type_info*)Type; abi::__class_type_info *Base = (abi::__class_type_info*)Type;
if (!isDerivedFromAtOffset(Derived, Base, -Vtable->Offset)) if (!isDerivedFromAtOffset(reinterpret_cast<sptr>(Object), Derived, Base,
-Vtable->Offset))
return false; return false;
// Success. Cache this result. // Success. Cache this result.
...@@ -241,8 +245,9 @@ __ubsan::DynamicTypeInfo __ubsan::getDynamicTypeInfo(void *Object) { ...@@ -241,8 +245,9 @@ __ubsan::DynamicTypeInfo __ubsan::getDynamicTypeInfo(void *Object) {
if (!Vtable) if (!Vtable)
return DynamicTypeInfo(0, 0, 0); return DynamicTypeInfo(0, 0, 0);
const abi::__class_type_info *ObjectType = findBaseAtOffset( const abi::__class_type_info *ObjectType = findBaseAtOffset(
static_cast<const abi::__class_type_info*>(Vtable->TypeInfo), reinterpret_cast<sptr>(Object),
-Vtable->Offset); static_cast<const abi::__class_type_info *>(Vtable->TypeInfo),
-Vtable->Offset);
return DynamicTypeInfo(Vtable->TypeInfo->__type_name, -Vtable->Offset, return DynamicTypeInfo(Vtable->TypeInfo->__type_name, -Vtable->Offset,
ObjectType ? ObjectType->__type_name : "<unknown>"); ObjectType ? ObjectType->__type_name : "<unknown>");
} }
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment