Commit 2660d12d by Kostya Serebryany Committed by Kostya Serebryany

libsanitizer merge from upstream r173241

From-SVN: r195404
parent b3996898
2013-01-23 Kostya Serebryany <kcc@google.com>
* config/darwin.h: remove dependency on CoreFoundation (asan on Mac OS).
2013-01-23 Jakub Jelinek <jakub@redhat.com>
PR target/49069
......
......@@ -178,7 +178,7 @@ extern GTY(()) int darwin_ms_struct;
%{L*} %(link_libgcc) %o %{fprofile-arcs|fprofile-generate*|coverage:-lgcov} \
%{fopenmp|ftree-parallelize-loops=*: \
%{static|static-libgcc|static-libstdc++|static-libgfortran: libgomp.a%s; : -lgomp } } \
%{fsanitize=address: -framework CoreFoundation -lasan } \
%{fsanitize=address: -lasan } \
%{fgnu-tm: \
%{static|static-libgcc|static-libstdc++|static-libgfortran: libitm.a%s; : -litm } } \
%{!nostdlib:%{!nodefaultlibs:\
......
2013-01-23 Kostya Serebryany <kcc@google.com>
PR sanitizer/55989
* All source files: Merge from upstream r173241.
* merge.sh: Support merging .inc files.
2013-01-16 Jakub Jelinek <jakub@redhat.com>
* sanitizer_common/Makefile.am (AM_CXXFLAGS): Remove
......
171973
173241
The first line of this file holds the svn revision number of the
last merge done from the master library sources.
......@@ -27,7 +27,6 @@
#if ASAN_ALLOCATOR_VERSION == 1
#include "asan_interceptors.h"
#include "asan_internal.h"
#include "asan_lock.h"
#include "asan_mapping.h"
#include "asan_stats.h"
#include "asan_report.h"
......@@ -35,6 +34,7 @@
#include "asan_thread_registry.h"
#include "sanitizer/asan_interface.h"
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_mutex.h"
namespace __asan {
......@@ -227,7 +227,7 @@ class MallocInfo {
AsanChunk *m = 0;
AsanChunk **fl = &free_lists_[size_class];
{
ScopedLock lock(&mu_);
BlockingMutexLock lock(&mu_);
for (uptr i = 0; i < n_chunks; i++) {
if (!(*fl)) {
*fl = GetNewChunks(size_class);
......@@ -245,7 +245,7 @@ class MallocInfo {
void SwallowThreadLocalMallocStorage(AsanThreadLocalMallocStorage *x,
bool eat_free_lists) {
CHECK(flags()->quarantine_size > 0);
ScopedLock lock(&mu_);
BlockingMutexLock lock(&mu_);
AsanChunkFifoList *q = &x->quarantine_;
if (q->size() > 0) {
quarantine_.PushList(q);
......@@ -269,18 +269,18 @@ class MallocInfo {
}
void BypassThreadLocalQuarantine(AsanChunk *chunk) {
ScopedLock lock(&mu_);
BlockingMutexLock lock(&mu_);
quarantine_.Push(chunk);
}
AsanChunk *FindChunkByAddr(uptr addr) {
ScopedLock lock(&mu_);
BlockingMutexLock lock(&mu_);
return FindChunkByAddrUnlocked(addr);
}
uptr AllocationSize(uptr ptr) {
if (!ptr) return 0;
ScopedLock lock(&mu_);
BlockingMutexLock lock(&mu_);
// Make sure this is our chunk and |ptr| actually points to the beginning
// of the allocated memory.
......@@ -303,7 +303,7 @@ class MallocInfo {
}
void PrintStatus() {
ScopedLock lock(&mu_);
BlockingMutexLock lock(&mu_);
uptr malloced = 0;
Printf(" MallocInfo: in quarantine: %zu malloced: %zu; ",
......@@ -321,7 +321,7 @@ class MallocInfo {
}
PageGroup *FindPageGroup(uptr addr) {
ScopedLock lock(&mu_);
BlockingMutexLock lock(&mu_);
return FindPageGroupUnlocked(addr);
}
......@@ -479,7 +479,7 @@ class MallocInfo {
AsanChunk *free_lists_[kNumberOfSizeClasses];
AsanChunkFifoList quarantine_;
AsanLock mu_;
BlockingMutex mu_;
PageGroup *page_groups_[kMaxAvailableRam / kMinMmapSize];
atomic_uint32_t n_page_groups_;
......
......@@ -20,8 +20,14 @@
// We are in the process of transitioning from the old allocator (version 1)
// to a new one (version 2). The change is quite intrusive so both allocators
// will co-exist in the source base for a while. The actual allocator is chosen
// at build time by redefining this macrozz.
#define ASAN_ALLOCATOR_VERSION 1
// at build time by redefining this macro.
#ifndef ASAN_ALLOCATOR_VERSION
# if ASAN_LINUX && !ASAN_ANDROID
# define ASAN_ALLOCATOR_VERSION 2
# else
# define ASAN_ALLOCATOR_VERSION 1
# endif
#endif // ASAN_ALLOCATOR_VERSION
namespace __asan {
......@@ -96,17 +102,21 @@ class AsanChunkFifoList: public IntrusiveList<AsanChunk> {
struct AsanThreadLocalMallocStorage {
explicit AsanThreadLocalMallocStorage(LinkerInitialized x)
: quarantine_(x) { }
#if ASAN_ALLOCATOR_VERSION == 1
: quarantine_(x)
#endif
{ }
AsanThreadLocalMallocStorage() {
CHECK(REAL(memset));
REAL(memset)(this, 0, sizeof(AsanThreadLocalMallocStorage));
}
AsanChunkFifoList quarantine_;
#if ASAN_ALLOCATOR_VERSION == 1
AsanChunkFifoList quarantine_;
AsanChunk *free_lists_[kNumberOfSizeClasses];
#else
uptr allocator2_cache[1024]; // Opaque.
uptr quarantine_cache[16];
uptr allocator2_cache[96 * (512 * 8 + 16)]; // Opaque.
#endif
void CommitBack();
};
......
......@@ -25,6 +25,7 @@
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_list.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_quarantine.h"
namespace __asan {
......@@ -90,15 +91,6 @@ static const uptr kMaxThreadLocalQuarantine =
static const uptr kReturnOnZeroMalloc = 2048; // Zero page is protected.
static int inited = 0;
static void Init() {
if (inited) return;
__asan_init();
inited = true; // this must happen before any threads are created.
allocator.Init();
}
// Every chunk of memory allocated by this allocator can be in one of 3 states:
// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
// CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
......@@ -244,31 +236,26 @@ void AsanChunkView::GetFreeStack(StackTrace *stack) {
chunk_->FreeStackSize());
}
class Quarantine: public AsanChunkFifoList {
public:
void SwallowThreadLocalQuarantine(AsanThreadLocalMallocStorage *ms) {
AsanChunkFifoList *q = &ms->quarantine_;
if (!q->size()) return;
SpinMutexLock l(&mutex_);
PushList(q);
PopAndDeallocateLoop(ms);
}
struct QuarantineCallback;
typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine;
typedef AsanQuarantine::Cache QuarantineCache;
static AsanQuarantine quarantine(LINKER_INITIALIZED);
static QuarantineCache fallback_quarantine_cache(LINKER_INITIALIZED);
static AllocatorCache fallback_allocator_cache;
static SpinMutex fallback_mutex;
void BypassThreadLocalQuarantine(AsanChunk *m) {
SpinMutexLock l(&mutex_);
Push(m);
}
QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) {
CHECK(ms);
CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache));
return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache);
}
private:
void PopAndDeallocateLoop(AsanThreadLocalMallocStorage *ms) {
while (size() > (uptr)flags()->quarantine_size) {
PopAndDeallocate(ms);
}
struct QuarantineCallback {
explicit QuarantineCallback(AllocatorCache *cache)
: cache_(cache) {
}
void PopAndDeallocate(AsanThreadLocalMallocStorage *ms) {
CHECK_GT(size(), 0);
AsanChunk *m = Pop();
CHECK(m);
void Recycle(AsanChunk *m) {
CHECK(m->chunk_state == CHUNK_QUARANTINE);
m->chunk_state = CHUNK_AVAILABLE;
CHECK_NE(m->alloc_tid, kInvalidTid);
......@@ -288,34 +275,27 @@ class Quarantine: public AsanChunkFifoList {
thread_stats.real_frees++;
thread_stats.really_freed += m->UsedSize();
allocator.Deallocate(GetAllocatorCache(ms), p);
allocator.Deallocate(cache_, p);
}
SpinMutex mutex_;
};
static Quarantine quarantine;
void *Allocate(uptr size) {
return allocator.Allocate(cache_, size, 1, false);
}
void AsanChunkFifoList::PushList(AsanChunkFifoList *q) {
CHECK(q->size() > 0);
size_ += q->size();
append_back(q);
q->clear();
}
void Deallocate(void *p) {
allocator.Deallocate(cache_, p);
}
void AsanChunkFifoList::Push(AsanChunk *n) {
push_back(n);
size_ += n->UsedSize();
}
AllocatorCache *cache_;
};
// Interesting performance observation: this function takes up to 15% of overal
// allocator time. That's because *first_ has been evicted from cache long time
// ago. Not sure if we can or want to do anything with this.
AsanChunk *AsanChunkFifoList::Pop() {
CHECK(first_);
AsanChunk *res = front();
size_ -= res->UsedSize();
pop_front();
return res;
static void Init() {
static int inited = 0;
if (inited) return;
__asan_init();
inited = true; // this must happen before any threads are created.
allocator.Init();
quarantine.Init((uptr)flags()->quarantine_size, kMaxThreadLocalQuarantine);
}
static void *Allocate(uptr size, uptr alignment, StackTrace *stack,
......@@ -355,9 +335,18 @@ static void *Allocate(uptr size, uptr alignment, StackTrace *stack,
}
AsanThread *t = asanThreadRegistry().GetCurrent();
AllocatorCache *cache = t ? GetAllocatorCache(&t->malloc_storage()) : 0;
void *allocated = allocator.Allocate(cache, needed_size, 8, false);
void *allocated;
if (t) {
AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
allocated = allocator.Allocate(cache, needed_size, 8, false);
} else {
SpinMutexLock l(&fallback_mutex);
AllocatorCache *cache = &fallback_allocator_cache;
allocated = allocator.Allocate(cache, needed_size, 8, false);
}
uptr alloc_beg = reinterpret_cast<uptr>(allocated);
// Clear the first allocated word (an old kMemalignMagic may still be there).
reinterpret_cast<uptr *>(alloc_beg)[0] = 0;
uptr alloc_end = alloc_beg + needed_size;
uptr beg_plus_redzone = alloc_beg + rz_size;
uptr user_beg = beg_plus_redzone;
......@@ -432,7 +421,7 @@ static void Deallocate(void *ptr, StackTrace *stack, AllocType alloc_type) {
// Flip the chunk_state atomically to avoid race on double-free.
u8 old_chunk_state = atomic_exchange((atomic_uint8_t*)m, CHUNK_QUARANTINE,
memory_order_acq_rel);
memory_order_relaxed);
if (old_chunk_state == CHUNK_QUARANTINE)
ReportDoubleFree((uptr)ptr, stack);
......@@ -466,13 +455,15 @@ static void Deallocate(void *ptr, StackTrace *stack, AllocType alloc_type) {
// Push into quarantine.
if (t) {
AsanChunkFifoList &q = t->malloc_storage().quarantine_;
q.Push(m);
if (q.size() > kMaxThreadLocalQuarantine)
quarantine.SwallowThreadLocalQuarantine(&t->malloc_storage());
AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
AllocatorCache *ac = GetAllocatorCache(ms);
quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac),
m, m->UsedSize());
} else {
quarantine.BypassThreadLocalQuarantine(m);
SpinMutexLock l(&fallback_mutex);
AllocatorCache *ac = &fallback_allocator_cache;
quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac),
m, m->UsedSize());
}
ASAN_FREE_HOOK(ptr);
......@@ -584,7 +575,8 @@ AsanChunkView FindHeapChunkByAddress(uptr addr) {
}
void AsanThreadLocalMallocStorage::CommitBack() {
quarantine.SwallowThreadLocalQuarantine(this);
AllocatorCache *ac = GetAllocatorCache(this);
quarantine.Drain(GetQuarantineCache(this), QuarantineCallback(ac));
allocator.SwallowCache(GetAllocatorCache(this));
}
......@@ -681,16 +673,18 @@ uptr __asan_get_estimated_allocated_size(uptr size) {
}
bool __asan_get_ownership(const void *p) {
return AllocationSize(reinterpret_cast<uptr>(p)) > 0;
uptr ptr = reinterpret_cast<uptr>(p);
return (ptr == kReturnOnZeroMalloc) || (AllocationSize(ptr) > 0);
}
uptr __asan_get_allocated_size(const void *p) {
if (p == 0) return 0;
uptr allocated_size = AllocationSize(reinterpret_cast<uptr>(p));
uptr ptr = reinterpret_cast<uptr>(p);
uptr allocated_size = AllocationSize(ptr);
// Die if p is not malloced or if it is already freed.
if (allocated_size == 0) {
if (allocated_size == 0 && ptr != kReturnOnZeroMalloc) {
GET_STACK_TRACE_FATAL_HERE;
ReportAsanGetAllocatedSizeNotOwned(reinterpret_cast<uptr>(p), &stack);
ReportAsanGetAllocatedSizeNotOwned(ptr, &stack);
}
return allocated_size;
}
......
......@@ -11,13 +11,13 @@
//===----------------------------------------------------------------------===//
#include "asan_interceptors.h"
#include "asan_internal.h"
#include "asan_lock.h"
#include "asan_mapping.h"
#include "asan_report.h"
#include "asan_stack.h"
#include "asan_stats.h"
#include "asan_thread.h"
#include "sanitizer/asan_interface.h"
#include "sanitizer_common/sanitizer_mutex.h"
namespace __asan {
......@@ -28,7 +28,7 @@ struct ListOfGlobals {
ListOfGlobals *next;
};
static AsanLock mu_for_globals(LINKER_INITIALIZED);
static BlockingMutex mu_for_globals(LINKER_INITIALIZED);
static LowLevelAllocator allocator_for_globals;
static ListOfGlobals *list_of_all_globals;
static ListOfGlobals *list_of_dynamic_init_globals;
......@@ -53,14 +53,9 @@ void PoisonRedZones(const Global &g) {
}
}
static uptr GetAlignedSize(uptr size) {
return ((size + kGlobalAndStackRedzone - 1) / kGlobalAndStackRedzone)
* kGlobalAndStackRedzone;
}
bool DescribeAddressIfGlobal(uptr addr) {
if (!flags()->report_globals) return false;
ScopedLock lock(&mu_for_globals);
BlockingMutexLock lock(&mu_for_globals);
bool res = false;
for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
const Global &g = *l->g;
......@@ -140,23 +135,10 @@ static void UnpoisonGlobal(const Global *g) {
// ---------------------- Interface ---------------- {{{1
using namespace __asan; // NOLINT
// Register one global with a default redzone.
void __asan_register_global(uptr addr, uptr size,
const char *name) {
if (!flags()->report_globals) return;
ScopedLock lock(&mu_for_globals);
Global *g = (Global *)allocator_for_globals.Allocate(sizeof(Global));
g->beg = addr;
g->size = size;
g->size_with_redzone = GetAlignedSize(size) + kGlobalAndStackRedzone;
g->name = name;
RegisterGlobal(g);
}
// Register an array of globals.
void __asan_register_globals(__asan_global *globals, uptr n) {
if (!flags()->report_globals) return;
ScopedLock lock(&mu_for_globals);
BlockingMutexLock lock(&mu_for_globals);
for (uptr i = 0; i < n; i++) {
RegisterGlobal(&globals[i]);
}
......@@ -166,7 +148,7 @@ void __asan_register_globals(__asan_global *globals, uptr n) {
// We must do this when a shared objects gets dlclosed.
void __asan_unregister_globals(__asan_global *globals, uptr n) {
if (!flags()->report_globals) return;
ScopedLock lock(&mu_for_globals);
BlockingMutexLock lock(&mu_for_globals);
for (uptr i = 0; i < n; i++) {
UnregisterGlobal(&globals[i]);
}
......@@ -179,7 +161,7 @@ void __asan_unregister_globals(__asan_global *globals, uptr n) {
void __asan_before_dynamic_init(uptr first_addr, uptr last_addr) {
if (!flags()->check_initialization_order) return;
CHECK(list_of_dynamic_init_globals);
ScopedLock lock(&mu_for_globals);
BlockingMutexLock lock(&mu_for_globals);
bool from_current_tu = false;
// The list looks like:
// a => ... => b => last_addr => ... => first_addr => c => ...
......@@ -200,7 +182,7 @@ void __asan_before_dynamic_init(uptr first_addr, uptr last_addr) {
// TU are poisoned. It simply unpoisons all dynamically initialized globals.
void __asan_after_dynamic_init() {
if (!flags()->check_initialization_order) return;
ScopedLock lock(&mu_for_globals);
BlockingMutexLock lock(&mu_for_globals);
for (ListOfGlobals *l = list_of_dynamic_init_globals; l; l = l->next)
UnpoisonGlobal(l->g);
}
......@@ -16,6 +16,8 @@
#include "interception/interception.h"
#include "sanitizer_common/sanitizer_platform_interceptors.h"
#include <stdarg.h>
using __sanitizer::uptr;
// Use macro to describe if specific function should be
......@@ -40,10 +42,8 @@ using __sanitizer::uptr;
#if defined(__linux__)
# define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 1
# define ASAN_INTERCEPT_PRCTL 1
#else
# define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 0
# define ASAN_INTERCEPT_PRCTL 0
#endif
#if !defined(__APPLE__)
......@@ -105,7 +105,7 @@ DECLARE_FUNCTION_AND_WRAPPER(void, siglongjmp, void *env, int value);
# endif
# if ASAN_INTERCEPT___CXA_THROW
DECLARE_FUNCTION_AND_WRAPPER(void, __cxa_throw, void *a, void *b, void *c);
#endif
# endif
// string.h / strings.h
DECLARE_FUNCTION_AND_WRAPPER(int, memcmp,
......@@ -139,9 +139,9 @@ DECLARE_FUNCTION_AND_WRAPPER(char*, strdup, const char *s);
# if ASAN_INTERCEPT_STRNLEN
DECLARE_FUNCTION_AND_WRAPPER(uptr, strnlen, const char *s, uptr maxlen);
# endif
#if ASAN_INTERCEPT_INDEX
# if ASAN_INTERCEPT_INDEX
DECLARE_FUNCTION_AND_WRAPPER(char*, index, const char *string, int c);
#endif
# endif
// stdlib.h
DECLARE_FUNCTION_AND_WRAPPER(int, atoi, const char *nptr);
......@@ -165,6 +165,13 @@ DECLARE_FUNCTION_AND_WRAPPER(SSIZE_T, pread64, int fd, void *buf,
SIZE_T count, OFF64_T offset);
# endif
# if SANITIZER_INTERCEPT_WRITE
DECLARE_FUNCTION_AND_WRAPPER(SSIZE_T, write, int fd, void *ptr, SIZE_T count);
# endif
# if SANITIZER_INTERCEPT_PWRITE
DECLARE_FUNCTION_AND_WRAPPER(SSIZE_T, pwrite, int fd, void *ptr, SIZE_T count);
# endif
# if ASAN_INTERCEPT_MLOCKX
// mlock/munlock
DECLARE_FUNCTION_AND_WRAPPER(int, mlock, const void *addr, SIZE_T len);
......@@ -186,7 +193,18 @@ DECLARE_FUNCTION_AND_WRAPPER(int, pthread_create,
void *(*start_routine)(void*), void *arg);
# endif
#if defined(__APPLE__)
DECLARE_FUNCTION_AND_WRAPPER(int, vscanf, const char *format, va_list ap);
DECLARE_FUNCTION_AND_WRAPPER(int, vsscanf, const char *str, const char *format,
va_list ap);
DECLARE_FUNCTION_AND_WRAPPER(int, vfscanf, void *stream, const char *format,
va_list ap);
DECLARE_FUNCTION_AND_WRAPPER(int, scanf, const char *format, ...);
DECLARE_FUNCTION_AND_WRAPPER(int, fscanf,
void* stream, const char *format, ...);
DECLARE_FUNCTION_AND_WRAPPER(int, sscanf, // NOLINT
const char *str, const char *format, ...);
# if defined(__APPLE__)
typedef void* pthread_workqueue_t;
typedef void* pthread_workitem_handle_t;
......@@ -196,8 +214,6 @@ typedef void* dispatch_source_t;
typedef u64 dispatch_time_t;
typedef void (*dispatch_function_t)(void *block);
typedef void* (*worker_t)(void *block);
typedef void* CFStringRef;
typedef void* CFAllocatorRef;
DECLARE_FUNCTION_AND_WRAPPER(void, dispatch_async_f,
dispatch_queue_t dq,
......@@ -215,11 +231,7 @@ DECLARE_FUNCTION_AND_WRAPPER(void, dispatch_group_async_f,
dispatch_group_t group, dispatch_queue_t dq,
void *ctxt, dispatch_function_t func);
DECLARE_FUNCTION_AND_WRAPPER(void, __CFInitialize, void);
DECLARE_FUNCTION_AND_WRAPPER(CFStringRef, CFStringCreateCopy,
CFAllocatorRef alloc, CFStringRef str);
DECLARE_FUNCTION_AND_WRAPPER(void, free, void* ptr);
#if MAC_INTERPOSE_FUNCTIONS && !defined(MISSING_BLOCKS_SUPPORT)
# if MAC_INTERPOSE_FUNCTIONS && !defined(MISSING_BLOCKS_SUPPORT)
DECLARE_FUNCTION_AND_WRAPPER(void, dispatch_group_async,
dispatch_group_t dg,
dispatch_queue_t dq, void (^work)(void));
......@@ -231,9 +243,35 @@ DECLARE_FUNCTION_AND_WRAPPER(void, dispatch_source_set_event_handler,
dispatch_source_t ds, void (^work)(void));
DECLARE_FUNCTION_AND_WRAPPER(void, dispatch_source_set_cancel_handler,
dispatch_source_t ds, void (^work)(void));
#endif // MAC_INTERPOSE_FUNCTIONS
#endif // __APPLE__
# endif // MAC_INTERPOSE_FUNCTIONS
typedef void malloc_zone_t;
typedef size_t vm_size_t;
DECLARE_FUNCTION_AND_WRAPPER(malloc_zone_t *, malloc_create_zone,
vm_size_t start_size, unsigned flags);
DECLARE_FUNCTION_AND_WRAPPER(malloc_zone_t *, malloc_default_zone, void);
DECLARE_FUNCTION_AND_WRAPPER(
malloc_zone_t *, malloc_default_purgeable_zone, void);
DECLARE_FUNCTION_AND_WRAPPER(void, malloc_make_purgeable, void *ptr);
DECLARE_FUNCTION_AND_WRAPPER(int, malloc_make_nonpurgeable, void *ptr);
DECLARE_FUNCTION_AND_WRAPPER(void, malloc_set_zone_name,
malloc_zone_t *zone, const char *name);
DECLARE_FUNCTION_AND_WRAPPER(void *, malloc, size_t size);
DECLARE_FUNCTION_AND_WRAPPER(void, free, void *ptr);
DECLARE_FUNCTION_AND_WRAPPER(void *, realloc, void *ptr, size_t size);
DECLARE_FUNCTION_AND_WRAPPER(void *, calloc, size_t nmemb, size_t size);
DECLARE_FUNCTION_AND_WRAPPER(void *, valloc, size_t size);
DECLARE_FUNCTION_AND_WRAPPER(size_t, malloc_good_size, size_t size);
DECLARE_FUNCTION_AND_WRAPPER(int, posix_memalign,
void **memptr, size_t alignment, size_t size);
DECLARE_FUNCTION_AND_WRAPPER(void, _malloc_fork_prepare, void);
DECLARE_FUNCTION_AND_WRAPPER(void, _malloc_fork_parent, void);
DECLARE_FUNCTION_AND_WRAPPER(void, _malloc_fork_child, void);
# endif // __APPLE__
} // extern "C"
#endif
#endif // defined(__APPLE__) || (defined(_WIN32) && !defined(_DLL))
#endif // ASAN_INTERCEPTED_FUNCTIONS_H
......@@ -73,15 +73,30 @@ static inline uptr MaybeRealStrnlen(const char *s, uptr maxlen) {
return internal_strnlen(s, maxlen);
}
void SetThreadName(const char *name) {
AsanThread *t = asanThreadRegistry().GetCurrent();
if (t)
t->summary()->set_name(name);
}
} // namespace __asan
// ---------------------- Wrappers ---------------- {{{1
using namespace __asan; // NOLINT
#define COMMON_INTERCEPTOR_WRITE_RANGE(ptr, size) ASAN_WRITE_RANGE(ptr, size)
#define COMMON_INTERCEPTOR_READ_RANGE(ptr, size) ASAN_READ_RANGE(ptr, size)
#define COMMON_INTERCEPTOR_ENTER(func, ...) ENSURE_ASAN_INITED()
#include "sanitizer_common/sanitizer_common_interceptors.h"
#define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
ASAN_WRITE_RANGE(ptr, size)
#define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) ASAN_READ_RANGE(ptr, size)
#define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
do { \
ctx = 0; \
(void)ctx; \
ENSURE_ASAN_INITED(); \
} while (false)
#define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) do { } while (false)
#define COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd) do { } while (false)
#define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) SetThreadName(name)
#include "sanitizer_common/sanitizer_common_interceptors.inc"
static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) {
AsanThread *t = (AsanThread*)arg;
......@@ -122,6 +137,18 @@ DEFINE_REAL(int, sigaction, int signum, const struct sigaction *act,
#endif // ASAN_INTERCEPT_SIGNAL_AND_SIGACTION
#if ASAN_INTERCEPT_SWAPCONTEXT
static void ClearShadowMemoryForContextStack(uptr stack, uptr ssize) {
// Align to page size.
uptr PageSize = GetPageSizeCached();
uptr bottom = stack & ~(PageSize - 1);
ssize += stack - bottom;
ssize = RoundUpTo(ssize, PageSize);
static const uptr kMaxSaneContextStackSize = 1 << 22; // 4 Mb
if (ssize && ssize <= kMaxSaneContextStackSize) {
PoisonShadow(bottom, ssize, 0);
}
}
INTERCEPTOR(int, swapcontext, struct ucontext_t *oucp,
struct ucontext_t *ucp) {
static bool reported_warning = false;
......@@ -132,16 +159,18 @@ INTERCEPTOR(int, swapcontext, struct ucontext_t *oucp,
}
// Clear shadow memory for new context (it may share stack
// with current context).
ClearShadowMemoryForContext(ucp);
uptr stack, ssize;
ReadContextStack(ucp, &stack, &ssize);
ClearShadowMemoryForContextStack(stack, ssize);
int res = REAL(swapcontext)(oucp, ucp);
// swapcontext technically does not return, but program may swap context to
// "oucp" later, that would look as if swapcontext() returned 0.
// We need to clear shadow for ucp once again, as it may be in arbitrary
// state.
ClearShadowMemoryForContext(ucp);
ClearShadowMemoryForContextStack(stack, ssize);
return res;
}
#endif
#endif // ASAN_INTERCEPT_SWAPCONTEXT
INTERCEPTOR(void, longjmp, void *env, int val) {
__asan_handle_no_return();
......@@ -162,25 +191,6 @@ INTERCEPTOR(void, siglongjmp, void *env, int val) {
}
#endif
#if ASAN_INTERCEPT_PRCTL
#define PR_SET_NAME 15
INTERCEPTOR(int, prctl, int option,
unsigned long arg2, unsigned long arg3, // NOLINT
unsigned long arg4, unsigned long arg5) { // NOLINT
int res = REAL(prctl(option, arg2, arg3, arg4, arg5));
if (option == PR_SET_NAME) {
AsanThread *t = asanThreadRegistry().GetCurrent();
if (t) {
char buff[17];
internal_strncpy(buff, (char*)arg2, 16);
buff[16] = 0;
t->summary()->set_name(buff);
}
}
return res;
}
#endif
#if ASAN_INTERCEPT___CXA_THROW
INTERCEPTOR(void, __cxa_throw, void *a, void *b, void *c) {
CHECK(REAL(__cxa_throw));
......@@ -727,9 +737,6 @@ void InitializeAsanInterceptors() {
#if ASAN_INTERCEPT_SIGLONGJMP
ASAN_INTERCEPT_FUNC(siglongjmp);
#endif
#if ASAN_INTERCEPT_PRCTL
ASAN_INTERCEPT_FUNC(prctl);
#endif
// Intercept exception handling functions.
#if ASAN_INTERCEPT___CXA_THROW
......
......@@ -114,7 +114,7 @@ bool AsanInterceptsSignal(int signum);
void SetAlternateSignalStack();
void UnsetAlternateSignalStack();
void InstallSignalHandlers();
void ClearShadowMemoryForContext(void *context);
void ReadContextStack(void *context, uptr *stack, uptr *ssize);
void AsanPlatformThreadInit();
// Wrapper for TLS/TSD.
......
......@@ -13,7 +13,6 @@
#include "asan_interceptors.h"
#include "asan_internal.h"
#include "asan_lock.h"
#include "asan_thread.h"
#include "asan_thread_registry.h"
#include "sanitizer_common/sanitizer_libc.h"
......@@ -100,26 +99,6 @@ void AsanPlatformThreadInit() {
// Nothing here for now.
}
AsanLock::AsanLock(LinkerInitialized) {
// We assume that pthread_mutex_t initialized to all zeroes is a valid
// unlocked mutex. We can not use PTHREAD_MUTEX_INITIALIZER as it triggers
// a gcc warning:
// extended initializer lists only available with -std=c++0x or -std=gnu++0x
}
void AsanLock::Lock() {
CHECK(sizeof(pthread_mutex_t) <= sizeof(opaque_storage_));
pthread_mutex_lock((pthread_mutex_t*)&opaque_storage_);
CHECK(!owner_);
owner_ = (uptr)pthread_self();
}
void AsanLock::Unlock() {
CHECK(owner_ == (uptr)pthread_self());
owner_ = 0;
pthread_mutex_unlock((pthread_mutex_t*)&opaque_storage_);
}
void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp, bool fast) {
#if defined(__arm__) || \
defined(__powerpc__) || defined(__powerpc64__) || \
......@@ -139,19 +118,13 @@ void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp, bool fast) {
}
#if !ASAN_ANDROID
void ClearShadowMemoryForContext(void *context) {
void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
ucontext_t *ucp = (ucontext_t*)context;
uptr sp = (uptr)ucp->uc_stack.ss_sp;
uptr size = ucp->uc_stack.ss_size;
// Align to page size.
uptr PageSize = GetPageSizeCached();
uptr bottom = sp & ~(PageSize - 1);
size += sp - bottom;
size = RoundUpTo(size, PageSize);
PoisonShadow(bottom, size, 0);
*stack = (uptr)ucp->uc_stack.ss_sp;
*ssize = ucp->uc_stack.ss_size;
}
#else
void ClearShadowMemoryForContext(void *context) {
void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
UNIMPLEMENTED();
}
#endif
......
//===-- asan_lock.h ---------------------------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// A wrapper for a simple lock.
//===----------------------------------------------------------------------===//
#ifndef ASAN_LOCK_H
#define ASAN_LOCK_H
#include "sanitizer_common/sanitizer_mutex.h"
#include "asan_internal.h"
// The locks in ASan are global objects and they are never destroyed to avoid
// at-exit races (that is, a lock is being used by other threads while the main
// thread is doing atexit destructors).
// We define the class using opaque storage to avoid including system headers.
namespace __asan {
class AsanLock {
public:
explicit AsanLock(LinkerInitialized);
void Lock();
void Unlock();
bool IsLocked() { return owner_ != 0; }
private:
uptr opaque_storage_[10];
uptr owner_; // for debugging and for malloc_introspection_t interface
};
typedef GenericScopedLock<AsanLock> ScopedLock;
} // namespace __asan
#endif // ASAN_LOCK_H
......@@ -34,7 +34,6 @@
#include <stdlib.h> // for free()
#include <unistd.h>
#include <libkern/OSAtomic.h>
#include <CoreFoundation/CFString.h>
namespace __asan {
......@@ -129,33 +128,6 @@ bool AsanInterceptsSignal(int signum) {
}
void AsanPlatformThreadInit() {
// For the first program thread, we can't replace the allocator before
// __CFInitialize() has been called. If it hasn't, we'll call
// MaybeReplaceCFAllocator() later on this thread.
// For other threads __CFInitialize() has been called before their creation.
// See also asan_malloc_mac.cc.
if (((CFRuntimeBase*)kCFAllocatorSystemDefault)->_cfisa) {
MaybeReplaceCFAllocator();
}
}
AsanLock::AsanLock(LinkerInitialized) {
// We assume that OS_SPINLOCK_INIT is zero
}
void AsanLock::Lock() {
CHECK(sizeof(OSSpinLock) <= sizeof(opaque_storage_));
CHECK(OS_SPINLOCK_INIT == 0);
CHECK(owner_ != (uptr)pthread_self());
OSSpinLockLock((OSSpinLock*)&opaque_storage_);
CHECK(!owner_);
owner_ = (uptr)pthread_self();
}
void AsanLock::Unlock() {
CHECK(owner_ == (uptr)pthread_self());
owner_ = 0;
OSSpinLockUnlock((OSSpinLock*)&opaque_storage_);
}
void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp, bool fast) {
......@@ -170,7 +142,7 @@ void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp, bool fast) {
}
}
void ClearShadowMemoryForContext(void *context) {
void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
UNIMPLEMENTED();
}
......@@ -254,9 +226,6 @@ mach_error_t __interception_deallocate_island(void *ptr) {
// The implementation details are at
// http://libdispatch.macosforge.org/trac/browser/trunk/src/queue.c
typedef void* pthread_workqueue_t;
typedef void* pthread_workitem_handle_t;
typedef void* dispatch_group_t;
typedef void* dispatch_queue_t;
typedef void* dispatch_source_t;
......@@ -287,9 +256,6 @@ void dispatch_barrier_async_f(dispatch_queue_t dq, void *ctxt,
dispatch_function_t func);
void dispatch_group_async_f(dispatch_group_t group, dispatch_queue_t dq,
void *ctxt, dispatch_function_t func);
int pthread_workqueue_additem_np(pthread_workqueue_t workq,
void *(*workitem_func)(void *), void * workitem_arg,
pthread_workitem_handle_t * itemhandlep, unsigned int *gencountp);
} // extern "C"
static ALWAYS_INLINE
......@@ -444,66 +410,6 @@ INTERCEPTOR(void, dispatch_source_set_event_handler,
}
#endif
// The following stuff has been extremely helpful while looking for the
// unhandled functions that spawned jobs on Chromium shutdown. If the verbosity
// level is 2 or greater, we wrap pthread_workqueue_additem_np() in order to
// find the points of worker thread creation (each of such threads may be used
// to run several tasks, that's why this is not enough to support the whole
// libdispatch API.
extern "C"
void *wrap_workitem_func(void *arg) {
if (flags()->verbosity >= 2) {
Report("wrap_workitem_func: %p, pthread_self: %p\n", arg, pthread_self());
}
asan_block_context_t *ctxt = (asan_block_context_t*)arg;
worker_t fn = (worker_t)(ctxt->func);
void *result = fn(ctxt->block);
GET_STACK_TRACE_THREAD;
asan_free(arg, &stack, FROM_MALLOC);
return result;
}
INTERCEPTOR(int, pthread_workqueue_additem_np, pthread_workqueue_t workq,
void *(*workitem_func)(void *), void * workitem_arg,
pthread_workitem_handle_t * itemhandlep, unsigned int *gencountp) {
GET_STACK_TRACE_THREAD;
asan_block_context_t *asan_ctxt =
(asan_block_context_t*) asan_malloc(sizeof(asan_block_context_t), &stack);
asan_ctxt->block = workitem_arg;
asan_ctxt->func = (dispatch_function_t)workitem_func;
asan_ctxt->parent_tid = asanThreadRegistry().GetCurrentTidOrInvalid();
if (flags()->verbosity >= 2) {
Report("pthread_workqueue_additem_np: %p\n", asan_ctxt);
PRINT_CURRENT_STACK();
}
return REAL(pthread_workqueue_additem_np)(workq, wrap_workitem_func,
asan_ctxt, itemhandlep,
gencountp);
}
// See http://opensource.apple.com/source/CF/CF-635.15/CFString.c
int __CFStrIsConstant(CFStringRef str) {
CFRuntimeBase *base = (CFRuntimeBase*)str;
#if __LP64__
return base->_rc == 0;
#else
return (base->_cfinfo[CF_RC_BITS]) == 0;
#endif
}
INTERCEPTOR(CFStringRef, CFStringCreateCopy, CFAllocatorRef alloc,
CFStringRef str) {
if (__CFStrIsConstant(str)) {
return str;
} else {
return REAL(CFStringCreateCopy)(alloc, str);
}
}
DECLARE_REAL_AND_INTERCEPTOR(void, free, void *ptr)
DECLARE_REAL_AND_INTERCEPTOR(void, __CFInitialize, void)
namespace __asan {
void InitializeMacInterceptors() {
......@@ -512,26 +418,6 @@ void InitializeMacInterceptors() {
CHECK(INTERCEPT_FUNCTION(dispatch_after_f));
CHECK(INTERCEPT_FUNCTION(dispatch_barrier_async_f));
CHECK(INTERCEPT_FUNCTION(dispatch_group_async_f));
// We don't need to intercept pthread_workqueue_additem_np() to support the
// libdispatch API, but it helps us to debug the unsupported functions. Let's
// intercept it only during verbose runs.
if (flags()->verbosity >= 2) {
CHECK(INTERCEPT_FUNCTION(pthread_workqueue_additem_np));
}
// Normally CFStringCreateCopy should not copy constant CF strings.
// Replacing the default CFAllocator causes constant strings to be copied
// rather than just returned, which leads to bugs in big applications like
// Chromium and WebKit, see
// http://code.google.com/p/address-sanitizer/issues/detail?id=10
// Until this problem is fixed we need to check that the string is
// non-constant before calling CFStringCreateCopy.
CHECK(INTERCEPT_FUNCTION(CFStringCreateCopy));
// Some of the library functions call free() directly, so we have to
// intercept it.
CHECK(INTERCEPT_FUNCTION(free));
if (flags()->replace_cfallocator) {
CHECK(INTERCEPT_FUNCTION(__CFInitialize));
}
}
} // namespace __asan
......
......@@ -109,6 +109,10 @@ static inline bool AddrIsInShadow(uptr a) {
}
static inline bool AddrIsInShadowGap(uptr a) {
// In zero-based shadow mode we treat addresses near zero as addresses
// in shadow gap as well.
if (SHADOW_OFFSET == 0)
return a <= kShadowGapEnd;
return a >= kShadowGapBeg && a <= kShadowGapEnd;
}
......
......@@ -25,8 +25,9 @@ void ReplaceOperatorsNewAndDelete() { }
using namespace __asan; // NOLINT
// On Android new() goes through malloc interceptors.
#if !ASAN_ANDROID
// On Mac and Android new() goes through malloc interceptors.
// See also https://code.google.com/p/address-sanitizer/issues/detail?id=131.
#if !ASAN_ANDROID && !ASAN_MAC
// Fake std::nothrow_t to avoid including <new>.
namespace std {
......
......@@ -23,7 +23,7 @@ void PoisonShadow(uptr addr, uptr size, u8 value) {
CHECK(AddrIsAlignedByGranularity(addr));
CHECK(AddrIsAlignedByGranularity(addr + size));
uptr shadow_beg = MemToShadow(addr);
uptr shadow_end = MemToShadow(addr + size);
uptr shadow_end = MemToShadow(addr + size - SHADOW_GRANULARITY) + 1;
CHECK(REAL(memset) != 0);
REAL(memset)((void*)shadow_beg, value, shadow_end - shadow_beg);
}
......
......@@ -12,7 +12,6 @@
#include "asan_allocator.h"
#include "asan_interceptors.h"
#include "asan_internal.h"
#include "asan_lock.h"
#include "asan_mapping.h"
#include "asan_report.h"
#include "asan_stack.h"
......@@ -140,10 +139,12 @@ void InitializeFlags(Flags *f, const char *env) {
f->allow_reexec = true;
f->print_full_thread_history = true;
f->log_path = 0;
f->fast_unwind_on_fatal = true;
f->fast_unwind_on_fatal = false;
f->fast_unwind_on_malloc = true;
f->poison_heap = true;
f->alloc_dealloc_mismatch = true;
// Turn off alloc/dealloc mismatch checker on Mac for now.
// TODO(glider): Fix known issues and enable this back.
f->alloc_dealloc_mismatch = (ASAN_MAC == 0);
f->use_stack_depot = true; // Only affects allocator2.
// Override from user-specified string.
......@@ -228,7 +229,6 @@ static NOINLINE void force_interface_symbols() {
case 8: __asan_report_store4(0); break;
case 9: __asan_report_store8(0); break;
case 10: __asan_report_store16(0); break;
case 11: __asan_register_global(0, 0, 0); break;
case 12: __asan_register_globals(0, 0); break;
case 13: __asan_unregister_globals(0, 0); break;
case 14: __asan_set_death_callback(0); break;
......
......@@ -11,7 +11,6 @@
//===----------------------------------------------------------------------===//
#include "asan_interceptors.h"
#include "asan_internal.h"
#include "asan_lock.h"
#include "asan_stats.h"
#include "asan_thread_registry.h"
#include "sanitizer/asan_interface.h"
......@@ -53,13 +52,13 @@ void AsanStats::Print() {
malloc_large, malloc_small_slow);
}
static AsanLock print_lock(LINKER_INITIALIZED);
static BlockingMutex print_lock(LINKER_INITIALIZED);
static void PrintAccumulatedStats() {
AsanStats stats;
asanThreadRegistry().GetAccumulatedStats(&stats);
// Use lock to keep reports from mixing up.
ScopedLock lock(&print_lock);
BlockingMutexLock lock(&print_lock);
stats.Print();
StackDepotStats *stack_depot_stats = StackDepotGetStats();
Printf("Stats: StackDepot: %zd ids; %zdM mapped\n",
......
......@@ -72,7 +72,7 @@ void AsanThread::Destroy() {
void AsanThread::Init() {
SetThreadStackTopAndBottom();
CHECK(AddrIsInMem(stack_bottom_));
CHECK(AddrIsInMem(stack_top_));
CHECK(AddrIsInMem(stack_top_ - 1));
ClearShadowForThreadStack();
if (flags()->verbosity >= 1) {
int local = 0;
......
......@@ -42,7 +42,7 @@ void AsanThreadRegistry::Init() {
}
void AsanThreadRegistry::RegisterThread(AsanThread *thread) {
ScopedLock lock(&mu_);
BlockingMutexLock lock(&mu_);
u32 tid = n_threads_;
n_threads_++;
CHECK(n_threads_ < kMaxNumberOfThreads);
......@@ -54,7 +54,7 @@ void AsanThreadRegistry::RegisterThread(AsanThread *thread) {
}
void AsanThreadRegistry::UnregisterThread(AsanThread *thread) {
ScopedLock lock(&mu_);
BlockingMutexLock lock(&mu_);
FlushToAccumulatedStatsUnlocked(&thread->stats());
AsanThreadSummary *summary = thread->summary();
CHECK(summary);
......@@ -103,13 +103,13 @@ AsanStats &AsanThreadRegistry::GetCurrentThreadStats() {
}
void AsanThreadRegistry::GetAccumulatedStats(AsanStats *stats) {
ScopedLock lock(&mu_);
BlockingMutexLock lock(&mu_);
UpdateAccumulatedStatsUnlocked();
internal_memcpy(stats, &accumulated_stats_, sizeof(accumulated_stats_));
}
uptr AsanThreadRegistry::GetCurrentAllocatedBytes() {
ScopedLock lock(&mu_);
BlockingMutexLock lock(&mu_);
UpdateAccumulatedStatsUnlocked();
uptr malloced = accumulated_stats_.malloced;
uptr freed = accumulated_stats_.freed;
......@@ -119,13 +119,13 @@ uptr AsanThreadRegistry::GetCurrentAllocatedBytes() {
}
uptr AsanThreadRegistry::GetHeapSize() {
ScopedLock lock(&mu_);
BlockingMutexLock lock(&mu_);
UpdateAccumulatedStatsUnlocked();
return accumulated_stats_.mmaped - accumulated_stats_.munmaped;
}
uptr AsanThreadRegistry::GetFreeBytes() {
ScopedLock lock(&mu_);
BlockingMutexLock lock(&mu_);
UpdateAccumulatedStatsUnlocked();
uptr total_free = accumulated_stats_.mmaped
- accumulated_stats_.munmaped
......@@ -141,7 +141,7 @@ uptr AsanThreadRegistry::GetFreeBytes() {
// Return several stats counters with a single call to
// UpdateAccumulatedStatsUnlocked().
void AsanThreadRegistry::FillMallocStatistics(AsanMallocStats *malloc_stats) {
ScopedLock lock(&mu_);
BlockingMutexLock lock(&mu_);
UpdateAccumulatedStatsUnlocked();
malloc_stats->blocks_in_use = accumulated_stats_.mallocs;
malloc_stats->size_in_use = accumulated_stats_.malloced;
......@@ -156,7 +156,7 @@ AsanThreadSummary *AsanThreadRegistry::FindByTid(u32 tid) {
}
AsanThread *AsanThreadRegistry::FindThreadByStackAddress(uptr addr) {
ScopedLock lock(&mu_);
BlockingMutexLock lock(&mu_);
for (u32 tid = 0; tid < n_threads_; tid++) {
AsanThread *t = thread_summaries_[tid]->thread();
if (!t || !(t->fake_stack().StackSize())) continue;
......
......@@ -13,10 +13,10 @@
#ifndef ASAN_THREAD_REGISTRY_H
#define ASAN_THREAD_REGISTRY_H
#include "asan_lock.h"
#include "asan_stack.h"
#include "asan_stats.h"
#include "asan_thread.h"
#include "sanitizer_common/sanitizer_mutex.h"
namespace __asan {
......@@ -71,7 +71,7 @@ class AsanThreadRegistry {
// per-thread AsanStats.
uptr max_malloced_memory_;
u32 n_threads_;
AsanLock mu_;
BlockingMutex mu_;
bool inited_;
};
......
......@@ -15,18 +15,16 @@
#include <dbghelp.h>
#include <stdlib.h>
#include <new> // FIXME: temporarily needed for placement new in AsanLock.
#include "asan_interceptors.h"
#include "asan_internal.h"
#include "asan_lock.h"
#include "asan_thread.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_mutex.h"
namespace __asan {
// ---------------------- Stacktraces, symbols, etc. ---------------- {{{1
static AsanLock dbghelp_lock(LINKER_INITIALIZED);
static BlockingMutex dbghelp_lock(LINKER_INITIALIZED);
static bool dbghelp_initialized = false;
#pragma comment(lib, "dbghelp.lib")
......@@ -54,42 +52,6 @@ void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp, bool fast) {
stack->trace[i] = (uptr)tmp[i + offset];
}
// ---------------------- AsanLock ---------------- {{{1
enum LockState {
LOCK_UNINITIALIZED = 0,
LOCK_READY = -1,
};
AsanLock::AsanLock(LinkerInitialized li) {
// FIXME: see comments in AsanLock::Lock() for the details.
CHECK(li == LINKER_INITIALIZED || owner_ == LOCK_UNINITIALIZED);
CHECK(sizeof(CRITICAL_SECTION) <= sizeof(opaque_storage_));
InitializeCriticalSection((LPCRITICAL_SECTION)opaque_storage_);
owner_ = LOCK_READY;
}
void AsanLock::Lock() {
if (owner_ == LOCK_UNINITIALIZED) {
// FIXME: hm, global AsanLock objects are not initialized?!?
// This might be a side effect of the clang+cl+link Frankenbuild...
new(this) AsanLock((LinkerInitialized)(LINKER_INITIALIZED + 1));
// FIXME: If it turns out the linker doesn't invoke our
// constructors, we should probably manually Lock/Unlock all the global
// locks while we're starting in one thread to avoid double-init races.
}
EnterCriticalSection((LPCRITICAL_SECTION)opaque_storage_);
CHECK(owner_ == LOCK_READY);
owner_ = GetThreadSelf();
}
void AsanLock::Unlock() {
CHECK(owner_ == GetThreadSelf());
owner_ = LOCK_READY;
LeaveCriticalSection((LPCRITICAL_SECTION)opaque_storage_);
}
// ---------------------- TSD ---------------- {{{1
static bool tsd_key_inited = false;
......@@ -138,7 +100,7 @@ void AsanPlatformThreadInit() {
// Nothing here for now.
}
void ClearShadowMemoryForContext(void *context) {
void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
UNIMPLEMENTED();
}
......@@ -150,7 +112,7 @@ using namespace __asan; // NOLINT
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE NOINLINE
bool __asan_symbolize(const void *addr, char *out_buffer, int buffer_size) {
ScopedLock lock(&dbghelp_lock);
BlockingMutexLock lock(&dbghelp_lock);
if (!dbghelp_initialized) {
SymSetOptions(SYMOPT_DEFERRED_LOADS |
SYMOPT_UNDNAME |
......
......@@ -99,9 +99,19 @@ const interpose_substitution substitutions[]
INTERPOSE_FUNCTION(signal),
INTERPOSE_FUNCTION(sigaction),
INTERPOSE_FUNCTION(__CFInitialize),
INTERPOSE_FUNCTION(CFStringCreateCopy),
INTERPOSE_FUNCTION(malloc_create_zone),
INTERPOSE_FUNCTION(malloc_default_zone),
INTERPOSE_FUNCTION(malloc_default_purgeable_zone),
INTERPOSE_FUNCTION(malloc_make_purgeable),
INTERPOSE_FUNCTION(malloc_make_nonpurgeable),
INTERPOSE_FUNCTION(malloc_set_zone_name),
INTERPOSE_FUNCTION(malloc),
INTERPOSE_FUNCTION(free),
INTERPOSE_FUNCTION(realloc),
INTERPOSE_FUNCTION(calloc),
INTERPOSE_FUNCTION(valloc),
INTERPOSE_FUNCTION(malloc_good_size),
INTERPOSE_FUNCTION(posix_memalign),
};
} // namespace __asan
......
......@@ -26,11 +26,6 @@ extern "C" {
// before any instrumented code is executed and before any call to malloc.
void __asan_init() SANITIZER_INTERFACE_ATTRIBUTE;
// This function should be called by the instrumented code.
// 'addr' is the address of a global variable called 'name' of 'size' bytes.
void __asan_register_global(uptr addr, uptr size, const char *name)
SANITIZER_INTERFACE_ATTRIBUTE;
// This structure describes an instrumented global variable.
struct __asan_global {
uptr beg; // The address of the global.
......
......@@ -23,6 +23,8 @@
// the standard system types (e.g. SSIZE_T instead of ssize_t)
typedef __sanitizer::uptr SIZE_T;
typedef __sanitizer::sptr SSIZE_T;
typedef __sanitizer::sptr PTRDIFF_T;
typedef __sanitizer::s64 INTMAX_T;
typedef __sanitizer::u64 OFF_T;
typedef __sanitizer::u64 OFF64_T;
......
......@@ -16,7 +16,7 @@ get_current_rev() {
}
list_files() {
(cd $1; ls *.{cc,h} 2> /dev/null)
(cd $1; ls *.{cc,h,inc} 2> /dev/null)
}
......
......@@ -39,6 +39,7 @@ INLINE typename T::Type atomic_load(
| memory_order_acquire | memory_order_seq_cst));
DCHECK(!((uptr)a % sizeof(*a)));
typename T::Type v;
// FIXME(dvyukov): 64-bit load is not atomic on 32-bits.
if (mo == memory_order_relaxed) {
v = a->val_dont_use;
} else {
......@@ -54,6 +55,7 @@ INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_release
| memory_order_seq_cst));
DCHECK(!((uptr)a % sizeof(*a)));
// FIXME(dvyukov): 64-bit store is not atomic on 32-bits.
if (mo == memory_order_relaxed) {
a->val_dont_use = v;
} else {
......
......@@ -70,6 +70,7 @@ INLINE typename T::Type atomic_load(
| memory_order_acquire | memory_order_seq_cst));
DCHECK(!((uptr)a % sizeof(*a)));
typename T::Type v;
// FIXME(dvyukov): 64-bit load is not atomic on 32-bits.
if (mo == memory_order_relaxed) {
v = a->val_dont_use;
} else {
......@@ -85,6 +86,7 @@ INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_release
| memory_order_seq_cst));
DCHECK(!((uptr)a % sizeof(*a)));
// FIXME(dvyukov): 64-bit store is not atomic on 32-bits.
if (mo == memory_order_relaxed) {
a->val_dont_use = v;
} else {
......
......@@ -21,10 +21,16 @@ uptr GetPageSizeCached() {
return PageSize;
}
// By default, dump to stderr. If report_fd is kInvalidFd, try to obtain file
// descriptor by opening file in report_path.
static bool log_to_file = false; // Set to true by __sanitizer_set_report_path
// By default, dump to stderr. If |log_to_file| is true and |report_fd_pid|
// isn't equal to the current PID, try to obtain file descriptor by opening
// file "report_path_prefix.<PID>".
static fd_t report_fd = kStderrFd;
static char report_path[4096]; // Set via __sanitizer_set_report_path.
static char report_path_prefix[4096]; // Set via __sanitizer_set_report_path.
// PID of process that opened |report_fd|. If a fork() occurs, the PID of the
// child thread will be different from |report_fd_pid|.
static int report_fd_pid = 0;
static void (*DieCallback)(void);
void SetDieCallback(void (*callback)(void)) {
......@@ -48,21 +54,29 @@ void NORETURN CheckFailed(const char *file, int line, const char *cond,
if (CheckFailedCallback) {
CheckFailedCallback(file, line, cond, v1, v2);
}
Report("Sanitizer CHECK failed: %s:%d %s (%zd, %zd)\n", file, line, cond,
v1, v2);
Report("Sanitizer CHECK failed: %s:%d %s (%lld, %lld)\n", file, line, cond,
v1, v2);
Die();
}
static void MaybeOpenReportFile() {
if (report_fd != kInvalidFd)
return;
fd_t fd = internal_open(report_path, true);
if (!log_to_file || (report_fd_pid == GetPid())) return;
InternalScopedBuffer<char> report_path_full(4096);
internal_snprintf(report_path_full.data(), report_path_full.size(),
"%s.%d", report_path_prefix, GetPid());
fd_t fd = internal_open(report_path_full.data(), true);
if (fd == kInvalidFd) {
report_fd = kStderrFd;
Report("ERROR: Can't open file: %s\n", report_path);
log_to_file = false;
Report("ERROR: Can't open file: %s\n", report_path_full.data());
Die();
}
if (report_fd != kInvalidFd) {
// We're in the child. Close the parent's log.
internal_close(report_fd);
}
report_fd = fd;
report_fd_pid = GetPid();
}
bool PrintsToTty() {
......@@ -182,14 +196,16 @@ extern "C" {
void __sanitizer_set_report_path(const char *path) {
if (!path) return;
uptr len = internal_strlen(path);
if (len > sizeof(report_path) - 100) {
if (len > sizeof(report_path_prefix) - 100) {
Report("ERROR: Path is too long: %c%c%c%c%c%c%c%c...\n",
path[0], path[1], path[2], path[3],
path[4], path[5], path[6], path[7]);
Die();
}
internal_snprintf(report_path, sizeof(report_path), "%s.%d", path, GetPid());
internal_strncpy(report_path_prefix, path, sizeof(report_path_prefix));
report_path_prefix[len] = '\0';
report_fd = kInvalidFd;
log_to_file = true;
}
void __sanitizer_set_report_fd(int fd) {
......
//===-- sanitizer_common_interceptors.h -------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Common function interceptors for tools like AddressSanitizer,
// ThreadSanitizer, MemorySanitizer, etc.
//
// This file should be included into the tool's interceptor file,
// which has to define it's own macros:
// COMMON_INTERCEPTOR_ENTER
// COMMON_INTERCEPTOR_READ_RANGE
// COMMON_INTERCEPTOR_WRITE_RANGE
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_COMMON_INTERCEPTORS_H
#define SANITIZER_COMMON_INTERCEPTORS_H
#include "interception/interception.h"
#include "sanitizer_platform_interceptors.h"
#if SANITIZER_INTERCEPT_READ
INTERCEPTOR(SSIZE_T, read, int fd, void *ptr, SIZE_T count) {
COMMON_INTERCEPTOR_ENTER(read, fd, ptr, count);
SSIZE_T res = REAL(read)(fd, ptr, count);
if (res > 0)
COMMON_INTERCEPTOR_WRITE_RANGE(ptr, res);
return res;
}
#endif
#if SANITIZER_INTERCEPT_PREAD
INTERCEPTOR(SSIZE_T, pread, int fd, void *ptr, SIZE_T count, OFF_T offset) {
COMMON_INTERCEPTOR_ENTER(pread, fd, ptr, count, offset);
SSIZE_T res = REAL(pread)(fd, ptr, count, offset);
if (res > 0)
COMMON_INTERCEPTOR_WRITE_RANGE(ptr, res);
return res;
}
#endif
#if SANITIZER_INTERCEPT_PREAD64
INTERCEPTOR(SSIZE_T, pread64, int fd, void *ptr, SIZE_T count, OFF64_T offset) {
COMMON_INTERCEPTOR_ENTER(pread64, fd, ptr, count, offset);
SSIZE_T res = REAL(pread64)(fd, ptr, count, offset);
if (res > 0)
COMMON_INTERCEPTOR_WRITE_RANGE(ptr, res);
return res;
}
#endif
#if SANITIZER_INTERCEPT_READ
# define INIT_READ INTERCEPT_FUNCTION(read)
#else
# define INIT_READ
#endif
#if SANITIZER_INTERCEPT_PREAD
# define INIT_PREAD INTERCEPT_FUNCTION(pread)
#else
# define INIT_PREAD
#endif
#if SANITIZER_INTERCEPT_PREAD64
# define INIT_PREAD64 INTERCEPT_FUNCTION(pread64)
#else
# define INIT_PREAD64
#endif
#define SANITIZER_COMMON_INTERCEPTORS_INIT \
INIT_READ; \
INIT_PREAD; \
INIT_PREAD64; \
#endif // SANITIZER_COMMON_INTERCEPTORS_H
//===-- sanitizer_common_interceptors.inc -----------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Common function interceptors for tools like AddressSanitizer,
// ThreadSanitizer, MemorySanitizer, etc.
//
// This file should be included into the tool's interceptor file,
// which has to define it's own macros:
// COMMON_INTERCEPTOR_ENTER
// COMMON_INTERCEPTOR_READ_RANGE
// COMMON_INTERCEPTOR_WRITE_RANGE
// COMMON_INTERCEPTOR_FD_ACQUIRE
// COMMON_INTERCEPTOR_FD_RELEASE
// COMMON_INTERCEPTOR_SET_THREAD_NAME
//===----------------------------------------------------------------------===//
#include "interception/interception.h"
#include "sanitizer_platform_interceptors.h"
#include <stdarg.h>
#if SANITIZER_INTERCEPT_READ
INTERCEPTOR(SSIZE_T, read, int fd, void *ptr, SIZE_T count) {
void* ctx;
COMMON_INTERCEPTOR_ENTER(ctx, read, fd, ptr, count);
SSIZE_T res = REAL(read)(fd, ptr, count);
if (res > 0)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, res);
if (res >= 0 && fd >= 0)
COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
return res;
}
# define INIT_READ INTERCEPT_FUNCTION(read)
#else
# define INIT_READ
#endif
#if SANITIZER_INTERCEPT_PREAD
INTERCEPTOR(SSIZE_T, pread, int fd, void *ptr, SIZE_T count, OFF_T offset) {
void* ctx;
COMMON_INTERCEPTOR_ENTER(ctx, pread, fd, ptr, count, offset);
SSIZE_T res = REAL(pread)(fd, ptr, count, offset);
if (res > 0)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, res);
if (res >= 0 && fd >= 0)
COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
return res;
}
# define INIT_PREAD INTERCEPT_FUNCTION(pread)
#else
# define INIT_PREAD
#endif
#if SANITIZER_INTERCEPT_PREAD64
INTERCEPTOR(SSIZE_T, pread64, int fd, void *ptr, SIZE_T count, OFF64_T offset) {
void* ctx;
COMMON_INTERCEPTOR_ENTER(ctx, pread64, fd, ptr, count, offset);
SSIZE_T res = REAL(pread64)(fd, ptr, count, offset);
if (res > 0)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, res);
if (res >= 0 && fd >= 0)
COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
return res;
}
# define INIT_PREAD64 INTERCEPT_FUNCTION(pread64)
#else
# define INIT_PREAD64
#endif
#if SANITIZER_INTERCEPT_WRITE
INTERCEPTOR(SSIZE_T, write, int fd, void *ptr, SIZE_T count) {
void* ctx;
COMMON_INTERCEPTOR_ENTER(ctx, write, fd, ptr, count);
if (fd >= 0)
COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);
SSIZE_T res = REAL(write)(fd, ptr, count);
if (res > 0)
COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, res);
return res;
}
# define INIT_WRITE INTERCEPT_FUNCTION(write)
#else
# define INIT_WRITE
#endif
#if SANITIZER_INTERCEPT_PWRITE
INTERCEPTOR(SSIZE_T, pwrite, int fd, void *ptr, SIZE_T count) {
void* ctx;
COMMON_INTERCEPTOR_ENTER(ctx, pwrite, fd, ptr, count);
if (fd >= 0)
COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);
SSIZE_T res = REAL(pwrite)(fd, ptr, count);
if (res > 0)
COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, res);
return res;
}
# define INIT_PWRITE INTERCEPT_FUNCTION(pwrite)
#else
# define INIT_PWRITE
#endif
#if SANITIZER_INTERCEPT_PWRITE64
INTERCEPTOR(SSIZE_T, pwrite64, int fd, void *ptr, OFF64_T count) {
void* ctx;
COMMON_INTERCEPTOR_ENTER(ctx, pwrite64, fd, ptr, count);
if (fd >= 0)
COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);
SSIZE_T res = REAL(pwrite64)(fd, ptr, count);
if (res > 0)
COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, res);
return res;
}
# define INIT_PWRITE64 INTERCEPT_FUNCTION(pwrite64)
#else
# define INIT_PWRITE64
#endif
#if SANITIZER_INTERCEPT_PRCTL
INTERCEPTOR(int, prctl, int option,
unsigned long arg2, unsigned long arg3, // NOLINT
unsigned long arg4, unsigned long arg5) { // NOLINT
void* ctx;
COMMON_INTERCEPTOR_ENTER(ctx, prctl, option, arg2, arg3, arg4, arg5);
static const int PR_SET_NAME = 15;
int res = REAL(prctl(option, arg2, arg3, arg4, arg5));
if (option == PR_SET_NAME) {
char buff[16];
internal_strncpy(buff, (char*)arg2, 15);
buff[15] = 0;
COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, buff);
}
return res;
}
# define INIT_PRCTL INTERCEPT_FUNCTION(prctl)
#else
# define INIT_PRCTL
#endif // SANITIZER_INTERCEPT_PRCTL
#if SANITIZER_INTERCEPT_SCANF
#include "sanitizer_common_interceptors_scanf.inc"
INTERCEPTOR(int, vscanf, const char *format, va_list ap) { // NOLINT
void* ctx;
COMMON_INTERCEPTOR_ENTER(ctx, vscanf, format, ap);
scanf_common(ctx, format, ap);
int res = REAL(vscanf)(format, ap); // NOLINT
return res;
}
INTERCEPTOR(int, vsscanf, const char *str, const char *format, // NOLINT
va_list ap) {
void* ctx;
COMMON_INTERCEPTOR_ENTER(ctx, vsscanf, str, format, ap);
scanf_common(ctx, format, ap);
int res = REAL(vsscanf)(str, format, ap); // NOLINT
// FIXME: read of str
return res;
}
INTERCEPTOR(int, vfscanf, void *stream, const char *format, // NOLINT
va_list ap) {
void* ctx;
COMMON_INTERCEPTOR_ENTER(ctx, vfscanf, stream, format, ap);
scanf_common(ctx, format, ap);
int res = REAL(vfscanf)(stream, format, ap); // NOLINT
return res;
}
INTERCEPTOR(int, scanf, const char *format, ...) { // NOLINT
void* ctx;
COMMON_INTERCEPTOR_ENTER(ctx, scanf, format);
va_list ap;
va_start(ap, format);
int res = vscanf(format, ap); // NOLINT
va_end(ap);
return res;
}
INTERCEPTOR(int, fscanf, void* stream, const char *format, ...) { // NOLINT
void* ctx;
COMMON_INTERCEPTOR_ENTER(ctx, fscanf, stream, format);
va_list ap;
va_start(ap, format);
int res = vfscanf(stream, format, ap); // NOLINT
va_end(ap);
return res;
}
INTERCEPTOR(int, sscanf, const char *str, const char *format, ...) { // NOLINT
void* ctx;
COMMON_INTERCEPTOR_ENTER(ctx, sscanf, str, format); // NOLINT
va_list ap;
va_start(ap, format);
int res = vsscanf(str, format, ap); // NOLINT
va_end(ap);
return res;
}
#define INIT_SCANF \
INTERCEPT_FUNCTION(scanf); \
INTERCEPT_FUNCTION(sscanf); /* NOLINT */ \
INTERCEPT_FUNCTION(fscanf); \
INTERCEPT_FUNCTION(vscanf); \
INTERCEPT_FUNCTION(vsscanf); \
INTERCEPT_FUNCTION(vfscanf)
#else
#define INIT_SCANF
#endif
#define SANITIZER_COMMON_INTERCEPTORS_INIT \
INIT_READ; \
INIT_PREAD; \
INIT_PREAD64; \
INIT_PRCTL; \
INIT_WRITE; \
INIT_SCANF;
//===-- sanitizer_common_interceptors_scanf.inc -----------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Scanf implementation for use in *Sanitizer interceptors.
//
//===----------------------------------------------------------------------===//
#include <stdarg.h>
#ifdef _WIN32
#define va_copy(dst, src) ((dst) = (src))
#endif // _WIN32
struct ScanfSpec {
char c;
unsigned size;
};
// One-letter specs.
static const ScanfSpec scanf_specs[] = {
{'p', sizeof(void *)},
{'e', sizeof(float)},
{'E', sizeof(float)},
{'a', sizeof(float)},
{'f', sizeof(float)},
{'g', sizeof(float)},
{'d', sizeof(int)},
{'i', sizeof(int)},
{'o', sizeof(int)},
{'u', sizeof(int)},
{'x', sizeof(int)},
{'X', sizeof(int)},
{'n', sizeof(int)},
{'t', sizeof(PTRDIFF_T)},
{'z', sizeof(SIZE_T)},
{'j', sizeof(INTMAX_T)},
{'h', sizeof(short)}
};
static const unsigned scanf_specs_cnt =
sizeof(scanf_specs) / sizeof(scanf_specs[0]);
// %ll?, %L?, %q? specs
static const ScanfSpec scanf_llspecs[] = {
{'e', sizeof(long double)},
{'f', sizeof(long double)},
{'g', sizeof(long double)},
{'d', sizeof(long long)},
{'i', sizeof(long long)},
{'o', sizeof(long long)},
{'u', sizeof(long long)},
{'x', sizeof(long long)}
};
static const unsigned scanf_llspecs_cnt =
sizeof(scanf_llspecs) / sizeof(scanf_llspecs[0]);
// %l? specs
static const ScanfSpec scanf_lspecs[] = {
{'e', sizeof(double)},
{'f', sizeof(double)},
{'g', sizeof(double)},
{'d', sizeof(long)},
{'i', sizeof(long)},
{'o', sizeof(long)},
{'u', sizeof(long)},
{'x', sizeof(long)},
{'X', sizeof(long)},
};
static const unsigned scanf_lspecs_cnt =
sizeof(scanf_lspecs) / sizeof(scanf_lspecs[0]);
static unsigned match_spec(const struct ScanfSpec *spec, unsigned n, char c) {
for (unsigned i = 0; i < n; ++i)
if (spec[i].c == c)
return spec[i].size;
return 0;
}
static void scanf_common(void *ctx, const char *format, va_list ap_const) {
va_list aq;
va_copy(aq, ap_const);
const char *p = format;
unsigned size;
while (*p) {
if (*p != '%') {
++p;
continue;
}
++p;
if (*p == '*' || *p == '%' || *p == 0) {
++p;
continue;
}
if (*p == '0' || (*p >= '1' && *p <= '9')) {
size = internal_atoll(p);
// +1 for the \0 at the end
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, va_arg(aq, void *), size + 1);
++p;
continue;
}
if (*p == 'L' || *p == 'q') {
++p;
size = match_spec(scanf_llspecs, scanf_llspecs_cnt, *p);
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, va_arg(aq, void *), size);
continue;
}
if (*p == 'l') {
++p;
if (*p == 'l') {
++p;
size = match_spec(scanf_llspecs, scanf_llspecs_cnt, *p);
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, va_arg(aq, void *), size);
continue;
} else {
size = match_spec(scanf_lspecs, scanf_lspecs_cnt, *p);
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, va_arg(aq, void *), size);
continue;
}
}
if (*p == 'h' && *(p + 1) == 'h') {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, va_arg(aq, void *), sizeof(char));
p += 2;
continue;
}
size = match_spec(scanf_specs, scanf_specs_cnt, *p);
if (size) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, va_arg(aq, void *), size);
++p;
continue;
}
}
va_end(aq);
}
......@@ -36,6 +36,7 @@ using namespace __sanitizer; // NOLINT
# define UNLIKELY(x) (x)
# define UNUSED
# define USED
# define PREFETCH(x) /* _mm_prefetch(x, _MM_HINT_NTA) */
#else // _MSC_VER
# define ALWAYS_INLINE __attribute__((always_inline))
# define ALIAS(x) __attribute__((alias(x)))
......@@ -49,6 +50,12 @@ using namespace __sanitizer; // NOLINT
# define UNLIKELY(x) __builtin_expect(!!(x), 0)
# define UNUSED __attribute__((unused))
# define USED __attribute__((used))
# if defined(__i386__) || defined(__x86_64__)
// __builtin_prefetch(x) generates prefetchnt0 on x86
# define PREFETCH(x) __asm__("prefetchnta (%0)" : : "r" (x))
# else
# define PREFETCH(x) __builtin_prefetch(x)
# endif
#endif // _MSC_VER
#if defined(_WIN32)
......
//===-- sanitizer_lfstack.h -=-----------------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Lock-free stack.
// Uses 32/17 bits as ABA-counter on 32/64-bit platforms.
// The memory passed to Push() must not be ever munmap'ed.
// The type T must contain T *next field.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_LFSTACK_H
#define SANITIZER_LFSTACK_H
#include "sanitizer_internal_defs.h"
#include "sanitizer_common.h"
#include "sanitizer_atomic.h"
namespace __sanitizer {
template<typename T>
struct LFStack {
void Clear() {
atomic_store(&head_, 0, memory_order_relaxed);
}
bool Empty() const {
return (atomic_load(&head_, memory_order_relaxed) & kPtrMask) == 0;
}
void Push(T *p) {
u64 cmp = atomic_load(&head_, memory_order_relaxed);
for (;;) {
u64 cnt = (cmp & kCounterMask) + kCounterInc;
u64 xch = (u64)(uptr)p | cnt;
p->next = (T*)(uptr)(cmp & kPtrMask);
if (atomic_compare_exchange_weak(&head_, &cmp, xch,
memory_order_release))
break;
}
}
T *Pop() {
u64 cmp = atomic_load(&head_, memory_order_acquire);
for (;;) {
T *cur = (T*)(uptr)(cmp & kPtrMask);
if (cur == 0)
return 0;
T *nxt = cur->next;
u64 cnt = (cmp & kCounterMask);
u64 xch = (u64)(uptr)nxt | cnt;
if (atomic_compare_exchange_weak(&head_, &cmp, xch,
memory_order_acquire))
return cur;
}
}
// private:
static const int kCounterBits = FIRST_32_SECOND_64(32, 17);
static const u64 kPtrMask = ((u64)-1) >> kCounterBits;
static const u64 kCounterMask = ~kPtrMask;
static const u64 kCounterInc = kPtrMask + 1;
atomic_uint64_t head_;
};
}
#endif // #ifndef SANITIZER_LFSTACK_H
......@@ -32,6 +32,7 @@
#include <unwind.h>
#include <errno.h>
#include <sys/prctl.h>
#include <linux/futex.h>
// Are we using 32-bit or 64-bit syscalls?
// x32 (which defines __x86_64__) has SANITIZER_WORDSIZE == 32
......@@ -198,24 +199,31 @@ const char *GetEnv(const char *name) {
return 0; // Not found.
}
void ReExec() {
static const int kMaxArgv = 100;
InternalScopedBuffer<char*> argv(kMaxArgv + 1);
static char *buff;
static void ReadNullSepFileToArray(const char *path, char ***arr,
int arr_size) {
char *buff;
uptr buff_size = 0;
ReadFileToBuffer("/proc/self/cmdline", &buff, &buff_size, 1024 * 1024);
argv[0] = buff;
int argc, i;
for (argc = 1, i = 1; ; i++) {
*arr = (char **)MmapOrDie(arr_size * sizeof(char *), "NullSepFileArray");
ReadFileToBuffer(path, &buff, &buff_size, 1024 * 1024);
(*arr)[0] = buff;
int count, i;
for (count = 1, i = 1; ; i++) {
if (buff[i] == 0) {
if (buff[i+1] == 0) break;
argv[argc] = &buff[i+1];
CHECK_LE(argc, kMaxArgv); // FIXME: make this more flexible.
argc++;
(*arr)[count] = &buff[i+1];
CHECK_LE(count, arr_size - 1); // FIXME: make this more flexible.
count++;
}
}
argv[argc] = 0;
execv(argv[0], argv.data());
(*arr)[count] = 0;
}
void ReExec() {
static const int kMaxArgv = 100, kMaxEnvp = 1000;
char **argv, **envp;
ReadNullSepFileToArray("/proc/self/cmdline", &argv, kMaxArgv);
ReadNullSepFileToArray("/proc/self/environ", &envp, kMaxEnvp);
execve(argv[0], argv, envp);
}
void PrepareForSandboxing() {
......@@ -366,16 +374,24 @@ bool MemoryMappingLayout::GetObjectNameAndOffset(uptr addr, uptr *offset,
}
bool SanitizerSetThreadName(const char *name) {
#ifdef PR_SET_NAME
return 0 == prctl(PR_SET_NAME, (unsigned long)name, 0, 0, 0); // NOLINT
#else
return false;
#endif
}
bool SanitizerGetThreadName(char *name, int max_len) {
#ifdef PR_GET_NAME
char buff[17];
if (prctl(PR_GET_NAME, (unsigned long)buff, 0, 0, 0)) // NOLINT
return false;
internal_strncpy(name, buff, max_len);
name[max_len] = 0;
return true;
#else
return false;
#endif
}
#ifndef SANITIZER_GO
......@@ -434,6 +450,32 @@ void StackTrace::SlowUnwindStack(uptr pc, uptr max_depth) {
#endif // #ifndef SANITIZER_GO
enum MutexState {
MtxUnlocked = 0,
MtxLocked = 1,
MtxSleeping = 2
};
BlockingMutex::BlockingMutex(LinkerInitialized) {
CHECK_EQ(owner_, 0);
}
void BlockingMutex::Lock() {
atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
if (atomic_exchange(m, MtxLocked, memory_order_acquire) == MtxUnlocked)
return;
while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked)
syscall(__NR_futex, m, FUTEX_WAIT, MtxSleeping, 0, 0, 0);
}
void BlockingMutex::Unlock() {
atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
u32 v = atomic_exchange(m, MtxUnlocked, memory_order_relaxed);
CHECK_NE(v, MtxUnlocked);
if (v == MtxSleeping)
syscall(__NR_futex, m, FUTEX_WAKE, 1, 0, 0, 0);
}
} // namespace __sanitizer
#endif // __linux__
......@@ -70,6 +70,8 @@ struct IntrusiveList {
void append_front(IntrusiveList<Item> *l) {
CHECK_NE(this, l);
if (l->empty())
return;
if (empty()) {
*this = *l;
} else if (!l->empty()) {
......@@ -82,6 +84,8 @@ struct IntrusiveList {
void append_back(IntrusiveList<Item> *l) {
CHECK_NE(this, l);
if (l->empty())
return;
if (empty()) {
*this = *l;
} else {
......
......@@ -28,6 +28,7 @@
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include <libkern/OSAtomic.h>
namespace __sanitizer {
......@@ -265,6 +266,25 @@ bool MemoryMappingLayout::GetObjectNameAndOffset(uptr addr, uptr *offset,
return IterateForObjectNameAndOffset(addr, offset, filename, filename_size);
}
BlockingMutex::BlockingMutex(LinkerInitialized) {
// We assume that OS_SPINLOCK_INIT is zero
}
void BlockingMutex::Lock() {
CHECK(sizeof(OSSpinLock) <= sizeof(opaque_storage_));
CHECK(OS_SPINLOCK_INIT == 0);
CHECK(owner_ != (uptr)pthread_self());
OSSpinLockLock((OSSpinLock*)&opaque_storage_);
CHECK(!owner_);
owner_ = (uptr)pthread_self();
}
void BlockingMutex::Unlock() {
CHECK(owner_ == (uptr)pthread_self());
owner_ = 0;
OSSpinLockUnlock((OSSpinLock*)&opaque_storage_);
}
} // namespace __sanitizer
#endif // __APPLE__
......@@ -25,11 +25,15 @@ class StaticSpinMutex {
}
void Lock() {
if (atomic_exchange(&state_, 1, memory_order_acquire) == 0)
if (TryLock())
return;
LockSlow();
}
bool TryLock() {
return atomic_exchange(&state_, 1, memory_order_acquire) == 0;
}
void Unlock() {
atomic_store(&state_, 0, memory_order_release);
}
......@@ -61,6 +65,16 @@ class SpinMutex : public StaticSpinMutex {
void operator=(const SpinMutex&);
};
class BlockingMutex {
public:
explicit BlockingMutex(LinkerInitialized);
void Lock();
void Unlock();
private:
uptr opaque_storage_[10];
uptr owner_; // for debugging
};
template<typename MutexType>
class GenericScopedLock {
public:
......@@ -100,6 +114,7 @@ class GenericScopedReadLock {
};
typedef GenericScopedLock<StaticSpinMutex> SpinMutexLock;
typedef GenericScopedLock<BlockingMutex> BlockingMutexLock;
} // namespace __sanitizer
......
......@@ -13,15 +13,24 @@
#include "sanitizer_internal_defs.h"
#if !defined(_WIN32)
# define SANITIZER_INTERCEPT_READ 1
# define SANITIZER_INTERCEPT_PREAD 1
# define SI_NOT_WINDOWS 1
#else
# define SANITIZER_INTERCEPT_READ 0
# define SANITIZER_INTERCEPT_PREAD 0
# define SI_NOT_WINDOWS 0
#endif
#if defined(__linux__) && !defined(ANDROID)
# define SANITIZER_INTERCEPT_PREAD64 1
# define SI_LINUX_NOT_ANDROID 1
#else
# define SANITIZER_INTERCEPT_PREAD64 0
# define SI_LINUX_NOT_ANDROID 0
#endif
# define SANITIZER_INTERCEPT_READ SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_PREAD SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_WRITE SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_PWRITE SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_PREAD64 SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_PWRITE64 SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_PRCTL SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_SCANF 0
//===-- sanitizer_quarantine.h ----------------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Memory quarantine for AddressSanitizer and potentially other tools.
// Quarantine caches some specified amount of memory in per-thread caches,
// then evicts to global FIFO queue. When the queue reaches specified threshold,
// oldest memory is recycled.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_QUARANTINE_H
#define SANITIZER_QUARANTINE_H
#include "sanitizer_internal_defs.h"
#include "sanitizer_mutex.h"
#include "sanitizer_list.h"
namespace __sanitizer {
template<typename Node> class QuarantineCache;
struct QuarantineBatch {
static const uptr kSize = 1024;
QuarantineBatch *next;
uptr size;
uptr count;
void *batch[kSize];
};
// The callback interface is:
// void Callback::Recycle(Node *ptr);
// void *cb.Allocate(uptr size);
// void cb.Deallocate(void *ptr);
template<typename Callback, typename Node>
class Quarantine {
public:
typedef QuarantineCache<Callback> Cache;
explicit Quarantine(LinkerInitialized)
: cache_(LINKER_INITIALIZED) {
}
void Init(uptr size, uptr cache_size) {
max_size_ = size;
min_size_ = size / 10 * 9; // 90% of max size.
max_cache_size_ = cache_size;
}
void Put(Cache *c, Callback cb, Node *ptr, uptr size) {
c->Enqueue(cb, ptr, size);
if (c->Size() > max_cache_size_)
Drain(c, cb);
}
void NOINLINE Drain(Cache *c, Callback cb) {
{
SpinMutexLock l(&cache_mutex_);
cache_.Transfer(c);
}
if (cache_.Size() > max_size_ && recycle_mutex_.TryLock())
Recycle(cb);
}
private:
// Read-only data.
char pad0_[kCacheLineSize];
uptr max_size_;
uptr min_size_;
uptr max_cache_size_;
char pad1_[kCacheLineSize];
SpinMutex cache_mutex_;
SpinMutex recycle_mutex_;
Cache cache_;
char pad2_[kCacheLineSize];
void NOINLINE Recycle(Callback cb) {
Cache tmp;
{
SpinMutexLock l(&cache_mutex_);
while (cache_.Size() > min_size_) {
QuarantineBatch *b = cache_.DequeueBatch();
tmp.EnqueueBatch(b);
}
}
recycle_mutex_.Unlock();
DoRecycle(&tmp, cb);
}
void NOINLINE DoRecycle(Cache *c, Callback cb) {
while (QuarantineBatch *b = c->DequeueBatch()) {
const uptr kPrefetch = 16;
for (uptr i = 0; i < kPrefetch; i++)
PREFETCH(b->batch[i]);
for (uptr i = 0; i < b->count; i++) {
PREFETCH(b->batch[i + kPrefetch]);
cb.Recycle((Node*)b->batch[i]);
}
cb.Deallocate(b);
}
}
};
// Per-thread cache of memory blocks.
template<typename Callback>
class QuarantineCache {
public:
explicit QuarantineCache(LinkerInitialized) {
}
QuarantineCache()
: size_() {
list_.clear();
}
uptr Size() const {
return atomic_load(&size_, memory_order_relaxed);
}
void Enqueue(Callback cb, void *ptr, uptr size) {
if (list_.empty() || list_.back()->count == QuarantineBatch::kSize)
AllocBatch(cb);
QuarantineBatch *b = list_.back();
b->batch[b->count++] = ptr;
b->size += size;
SizeAdd(size);
}
void Transfer(QuarantineCache *c) {
list_.append_back(&c->list_);
SizeAdd(c->Size());
atomic_store(&c->size_, 0, memory_order_relaxed);
}
void EnqueueBatch(QuarantineBatch *b) {
list_.push_back(b);
SizeAdd(b->size);
}
QuarantineBatch *DequeueBatch() {
if (list_.empty())
return 0;
QuarantineBatch *b = list_.front();
list_.pop_front();
SizeAdd(-b->size);
return b;
}
private:
IntrusiveList<QuarantineBatch> list_;
atomic_uintptr_t size_;
void SizeAdd(uptr add) {
atomic_store(&size_, Size() + add, memory_order_relaxed);
}
QuarantineBatch *NOINLINE AllocBatch(Callback cb) {
QuarantineBatch *b = (QuarantineBatch *)cb.Allocate(sizeof(*b));
b->count = 0;
b->size = 0;
list_.push_back(b);
return b;
}
};
}
#endif // #ifndef SANITIZER_QUARANTINE_H
......@@ -66,7 +66,18 @@ static const char *ExtractInt(const char *str, const char *delims,
char *buff;
const char *ret = ExtractToken(str, delims, &buff);
if (buff != 0) {
*result = internal_atoll(buff);
*result = (int)internal_atoll(buff);
}
InternalFree(buff);
return ret;
}
static const char *ExtractUptr(const char *str, const char *delims,
uptr *result) {
char *buff;
const char *ret = ExtractToken(str, delims, &buff);
if (buff != 0) {
*result = (uptr)internal_atoll(buff);
}
InternalFree(buff);
return ret;
......@@ -96,66 +107,15 @@ class ExternalSymbolizer {
CHECK_NE(output_fd_, kInvalidFd);
}
// Returns the number of frames for a given address, or zero if
// symbolization failed.
uptr SymbolizeCode(uptr addr, const char *module_name, uptr module_offset,
AddressInfo *frames, uptr max_frames) {
char *SendCommand(bool is_data, const char *module_name, uptr module_offset) {
CHECK(module_name);
// FIXME: Make sure this buffer always has sufficient size to hold
// large debug info.
static const int kMaxBufferSize = 4096;
InternalScopedBuffer<char> buffer(kMaxBufferSize);
char *buffer_data = buffer.data();
internal_snprintf(buffer_data, kMaxBufferSize, "%s 0x%zx\n",
module_name, module_offset);
if (!writeToSymbolizer(buffer_data, internal_strlen(buffer_data)))
internal_snprintf(buffer_, kBufferSize, "%s%s 0x%zx\n",
is_data ? "DATA " : "", module_name, module_offset);
if (!writeToSymbolizer(buffer_, internal_strlen(buffer_)))
return 0;
if (!readFromSymbolizer(buffer_data, kMaxBufferSize))
if (!readFromSymbolizer(buffer_, kBufferSize))
return 0;
const char *str = buffer_data;
uptr frame_id;
CHECK_GT(max_frames, 0);
for (frame_id = 0; frame_id < max_frames; frame_id++) {
AddressInfo *info = &frames[frame_id];
char *function_name = 0;
str = ExtractToken(str, "\n", &function_name);
CHECK(function_name);
if (function_name[0] == '\0') {
// There are no more frames.
break;
}
info->Clear();
info->FillAddressAndModuleInfo(addr, module_name, module_offset);
info->function = function_name;
// Parse <file>:<line>:<column> buffer.
char *file_line_info = 0;
str = ExtractToken(str, "\n", &file_line_info);
CHECK(file_line_info);
const char *line_info = ExtractToken(file_line_info, ":", &info->file);
line_info = ExtractInt(line_info, ":", &info->line);
line_info = ExtractInt(line_info, "", &info->column);
InternalFree(file_line_info);
// Functions and filenames can be "??", in which case we write 0
// to address info to mark that names are unknown.
if (0 == internal_strcmp(info->function, "??")) {
InternalFree(info->function);
info->function = 0;
}
if (0 == internal_strcmp(info->file, "??")) {
InternalFree(info->file);
info->file = 0;
}
}
if (frame_id == 0) {
// Make sure we return at least one frame.
AddressInfo *info = &frames[0];
info->Clear();
info->FillAddressAndModuleInfo(addr, module_name, module_offset);
frame_id = 1;
}
return frame_id;
return buffer_;
}
bool Restart() {
......@@ -189,6 +149,7 @@ class ExternalSymbolizer {
}
return true;
}
bool writeToSymbolizer(const char *buffer, uptr length) {
if (length == 0)
return true;
......@@ -204,6 +165,9 @@ class ExternalSymbolizer {
int input_fd_;
int output_fd_;
static const uptr kBufferSize = 16 * 1024;
char buffer_[kBufferSize];
static const uptr kMaxTimesRestarted = 5;
uptr times_restarted_;
};
......@@ -220,30 +184,8 @@ class Symbolizer {
return 0;
const char *module_name = module->full_name();
uptr module_offset = addr - module->base_address();
uptr actual_frames = 0;
if (external_symbolizer_ == 0) {
ReportExternalSymbolizerError(
"WARNING: Trying to symbolize code, but external "
"symbolizer is not initialized!\n");
} else {
while (true) {
actual_frames = external_symbolizer_->SymbolizeCode(
addr, module_name, module_offset, frames, max_frames);
if (actual_frames > 0) {
// Symbolization was successful.
break;
}
// Try to restart symbolizer subprocess. If we don't succeed, forget
// about it and don't try to use it later.
if (!external_symbolizer_->Restart()) {
ReportExternalSymbolizerError(
"WARNING: Failed to use and restart external symbolizer!\n");
external_symbolizer_ = 0;
break;
}
}
}
if (external_symbolizer_ == 0) {
const char *str = SendCommand(false, module_name, module_offset);
if (str == 0) {
// External symbolizer was not initialized or failed. Fill only data
// about module name and offset.
AddressInfo *info = &frames[0];
......@@ -251,17 +193,66 @@ class Symbolizer {
info->FillAddressAndModuleInfo(addr, module_name, module_offset);
return 1;
}
// Otherwise, the data was filled by external symbolizer.
return actual_frames;
uptr frame_id = 0;
for (frame_id = 0; frame_id < max_frames; frame_id++) {
AddressInfo *info = &frames[frame_id];
char *function_name = 0;
str = ExtractToken(str, "\n", &function_name);
CHECK(function_name);
if (function_name[0] == '\0') {
// There are no more frames.
break;
}
info->Clear();
info->FillAddressAndModuleInfo(addr, module_name, module_offset);
info->function = function_name;
// Parse <file>:<line>:<column> buffer.
char *file_line_info = 0;
str = ExtractToken(str, "\n", &file_line_info);
CHECK(file_line_info);
const char *line_info = ExtractToken(file_line_info, ":", &info->file);
line_info = ExtractInt(line_info, ":", &info->line);
line_info = ExtractInt(line_info, "", &info->column);
InternalFree(file_line_info);
// Functions and filenames can be "??", in which case we write 0
// to address info to mark that names are unknown.
if (0 == internal_strcmp(info->function, "??")) {
InternalFree(info->function);
info->function = 0;
}
if (0 == internal_strcmp(info->file, "??")) {
InternalFree(info->file);
info->file = 0;
}
}
if (frame_id == 0) {
// Make sure we return at least one frame.
AddressInfo *info = &frames[0];
info->Clear();
info->FillAddressAndModuleInfo(addr, module_name, module_offset);
frame_id = 1;
}
return frame_id;
}
bool SymbolizeData(uptr addr, AddressInfo *frame) {
bool SymbolizeData(uptr addr, DataInfo *info) {
LoadedModule *module = FindModuleForAddress(addr);
if (module == 0)
return false;
const char *module_name = module->full_name();
uptr module_offset = addr - module->base_address();
frame->FillAddressAndModuleInfo(addr, module_name, module_offset);
internal_memset(info, 0, sizeof(*info));
info->address = addr;
info->module = internal_strdup(module_name);
info->module_offset = module_offset;
const char *str = SendCommand(true, module_name, module_offset);
if (str == 0)
return true;
str = ExtractToken(str, "\n", &info->name);
str = ExtractUptr(str, " ", &info->start);
str = ExtractUptr(str, "\n", &info->size);
info->start += module->base_address();
return true;
}
......@@ -276,6 +267,29 @@ class Symbolizer {
}
private:
char *SendCommand(bool is_data, const char *module_name, uptr module_offset) {
if (external_symbolizer_ == 0) {
ReportExternalSymbolizerError(
"WARNING: Trying to symbolize code, but external "
"symbolizer is not initialized!\n");
return 0;
}
for (;;) {
char *reply = external_symbolizer_->SendCommand(is_data, module_name,
module_offset);
if (reply)
return reply;
// Try to restart symbolizer subprocess. If we don't succeed, forget
// about it and don't try to use it later.
if (!external_symbolizer_->Restart()) {
ReportExternalSymbolizerError(
"WARNING: Failed to use and restart external symbolizer!\n");
external_symbolizer_ = 0;
return 0;
}
}
}
LoadedModule *FindModuleForAddress(uptr address) {
if (modules_ == 0) {
modules_ = (LoadedModule*)(symbolizer_allocator.Allocate(
......@@ -316,8 +330,8 @@ uptr SymbolizeCode(uptr address, AddressInfo *frames, uptr max_frames) {
return symbolizer.SymbolizeCode(address, frames, max_frames);
}
bool SymbolizeData(uptr address, AddressInfo *frame) {
return symbolizer.SymbolizeData(address, frame);
bool SymbolizeData(uptr address, DataInfo *info) {
return symbolizer.SymbolizeData(address, info);
}
bool InitializeExternalSymbolizer(const char *path_to_symbolizer) {
......
......@@ -51,12 +51,21 @@ struct AddressInfo {
}
};
struct DataInfo {
uptr address;
char *module;
uptr module_offset;
char *name;
uptr start;
uptr size;
};
// Fills at most "max_frames" elements of "frames" with descriptions
// for a given address (in all inlined functions). Returns the number
// of descriptions actually filled.
// This function should NOT be called from two threads simultaneously.
uptr SymbolizeCode(uptr address, AddressInfo *frames, uptr max_frames);
bool SymbolizeData(uptr address, AddressInfo *frame);
bool SymbolizeData(uptr address, DataInfo *info);
// Attempts to demangle the provided C++ mangled name.
const char *Demangle(const char *Name);
......
......@@ -18,6 +18,8 @@
#include "sanitizer_common.h"
#include "sanitizer_libc.h"
#include "sanitizer_placement_new.h"
#include "sanitizer_mutex.h"
namespace __sanitizer {
......@@ -224,6 +226,42 @@ int internal_sched_yield() {
return 0;
}
// ---------------------- BlockingMutex ---------------- {{{1
enum LockState {
LOCK_UNINITIALIZED = 0,
LOCK_READY = -1,
};
BlockingMutex::BlockingMutex(LinkerInitialized li) {
// FIXME: see comments in BlockingMutex::Lock() for the details.
CHECK(li == LINKER_INITIALIZED || owner_ == LOCK_UNINITIALIZED);
CHECK(sizeof(CRITICAL_SECTION) <= sizeof(opaque_storage_));
InitializeCriticalSection((LPCRITICAL_SECTION)opaque_storage_);
owner_ = LOCK_READY;
}
void BlockingMutex::Lock() {
if (owner_ == LOCK_UNINITIALIZED) {
// FIXME: hm, global BlockingMutex objects are not initialized?!?
// This might be a side effect of the clang+cl+link Frankenbuild...
new(this) BlockingMutex((LinkerInitialized)(LINKER_INITIALIZED + 1));
// FIXME: If it turns out the linker doesn't invoke our
// constructors, we should probably manually Lock/Unlock all the global
// locks while we're starting in one thread to avoid double-init races.
}
EnterCriticalSection((LPCRITICAL_SECTION)opaque_storage_);
CHECK(owner_ == LOCK_READY);
owner_ = GetThreadSelf();
}
void BlockingMutex::Unlock() {
CHECK(owner_ == GetThreadSelf());
owner_ = LOCK_READY;
LeaveCriticalSection((LPCRITICAL_SECTION)opaque_storage_);
}
} // namespace __sanitizer
#endif // _WIN32
......@@ -162,6 +162,12 @@ void FdRelease(ThreadState *thr, uptr pc, int fd) {
MemoryRead8Byte(thr, pc, (uptr)d);
}
void FdAccess(ThreadState *thr, uptr pc, int fd) {
DPrintf("#%d: FdAccess(%d)\n", thr->tid, fd);
FdDesc *d = fddesc(thr, pc, fd);
MemoryRead8Byte(thr, pc, (uptr)d);
}
void FdClose(ThreadState *thr, uptr pc, int fd) {
DPrintf("#%d: FdClose(%d)\n", thr->tid, fd);
FdDesc *d = fddesc(thr, pc, fd);
......
......@@ -39,6 +39,7 @@ namespace __tsan {
void FdInit();
void FdAcquire(ThreadState *thr, uptr pc, int fd);
void FdRelease(ThreadState *thr, uptr pc, int fd);
void FdAccess(ThreadState *thr, uptr pc, int fd);
void FdClose(ThreadState *thr, uptr pc, int fd);
void FdFileCreate(ThreadState *thr, uptr pc, int fd);
void FdDup(ThreadState *thr, uptr pc, int oldfd, int newfd);
......
......@@ -1239,33 +1239,6 @@ TSAN_INTERCEPTOR(int, pipe2, int *pipefd, int flags) {
return res;
}
TSAN_INTERCEPTOR(long_t, read, int fd, void *buf, long_t sz) {
SCOPED_TSAN_INTERCEPTOR(read, fd, buf, sz);
int res = REAL(read)(fd, buf, sz);
if (res >= 0 && fd >= 0) {
FdAcquire(thr, pc, fd);
}
return res;
}
TSAN_INTERCEPTOR(long_t, pread, int fd, void *buf, long_t sz, unsigned off) {
SCOPED_TSAN_INTERCEPTOR(pread, fd, buf, sz, off);
int res = REAL(pread)(fd, buf, sz, off);
if (res >= 0 && fd >= 0) {
FdAcquire(thr, pc, fd);
}
return res;
}
TSAN_INTERCEPTOR(long_t, pread64, int fd, void *buf, long_t sz, u64 off) {
SCOPED_TSAN_INTERCEPTOR(pread64, fd, buf, sz, off);
int res = REAL(pread64)(fd, buf, sz, off);
if (res >= 0 && fd >= 0) {
FdAcquire(thr, pc, fd);
}
return res;
}
TSAN_INTERCEPTOR(long_t, readv, int fd, void *vec, int cnt) {
SCOPED_TSAN_INTERCEPTOR(readv, fd, vec, cnt);
int res = REAL(readv)(fd, vec, cnt);
......@@ -1284,30 +1257,6 @@ TSAN_INTERCEPTOR(long_t, preadv64, int fd, void *vec, int cnt, u64 off) {
return res;
}
TSAN_INTERCEPTOR(long_t, write, int fd, void *buf, long_t sz) {
SCOPED_TSAN_INTERCEPTOR(write, fd, buf, sz);
if (fd >= 0)
FdRelease(thr, pc, fd);
int res = REAL(write)(fd, buf, sz);
return res;
}
TSAN_INTERCEPTOR(long_t, pwrite, int fd, void *buf, long_t sz, unsigned off) {
SCOPED_TSAN_INTERCEPTOR(pwrite, fd, buf, sz, off);
if (fd >= 0)
FdRelease(thr, pc, fd);
int res = REAL(pwrite)(fd, buf, sz, off);
return res;
}
TSAN_INTERCEPTOR(long_t, pwrite64, int fd, void *buf, long_t sz, u64 off) {
SCOPED_TSAN_INTERCEPTOR(pwrite64, fd, buf, sz, off);
if (fd >= 0)
FdRelease(thr, pc, fd);
int res = REAL(pwrite64)(fd, buf, sz, off);
return res;
}
TSAN_INTERCEPTOR(long_t, writev, int fd, void *vec, int cnt) {
SCOPED_TSAN_INTERCEPTOR(writev, fd, vec, cnt);
if (fd >= 0)
......@@ -1449,6 +1398,8 @@ TSAN_INTERCEPTOR(int, epoll_ctl, int epfd, int op, int fd, void *ev) {
FdRelease(thr, pc, epfd);
}
int res = REAL(epoll_ctl)(epfd, op, fd, ev);
if (fd >= 0)
FdAccess(thr, pc, fd);
return res;
}
......@@ -1641,6 +1592,33 @@ TSAN_INTERCEPTOR(int, fork, int fake) {
return pid;
}
struct TsanInterceptorContext {
ThreadState *thr;
const uptr caller_pc;
const uptr pc;
};
#define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
MemoryAccessRange(((TsanInterceptorContext*)ctx)->thr, \
((TsanInterceptorContext*)ctx)->pc, \
(uptr)ptr, size, true)
#define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \
MemoryAccessRange(((TsanInterceptorContext*)ctx)->thr, \
((TsanInterceptorContext*)ctx)->pc, \
(uptr)ptr, size, false)
#define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
SCOPED_TSAN_INTERCEPTOR(func, __VA_ARGS__) \
TsanInterceptorContext _ctx = {thr, caller_pc, pc}; \
ctx = (void*)&_ctx; \
(void)ctx;
#define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \
FdAcquire(((TsanInterceptorContext*)ctx)->thr, pc, fd)
#define COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd) \
FdRelease(((TsanInterceptorContext*)ctx)->thr, pc, fd)
#define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) \
ThreadSetName(((TsanInterceptorContext*)ctx)->thr, name)
#include "sanitizer_common/sanitizer_common_interceptors.inc"
namespace __tsan {
void ProcessPendingSignals(ThreadState *thr) {
......@@ -1675,6 +1653,7 @@ void ProcessPendingSignals(ThreadState *thr) {
(uptr)sigactions[sig].sa_sigaction :
(uptr)sigactions[sig].sa_handler;
stack.Init(&pc, 1);
Lock l(&ctx->thread_mtx);
ScopedReport rep(ReportTypeErrnoInSignal);
if (!IsFiredSuppression(ctx, rep, stack)) {
rep.AddStack(&stack);
......@@ -1703,6 +1682,8 @@ void InitializeInterceptors() {
REAL(memcpy) = internal_memcpy;
REAL(memcmp) = internal_memcmp;
SANITIZER_COMMON_INTERCEPTORS_INIT;
TSAN_INTERCEPT(longjmp);
TSAN_INTERCEPT(siglongjmp);
......@@ -1806,14 +1787,8 @@ void InitializeInterceptors() {
TSAN_INTERCEPT(pipe);
TSAN_INTERCEPT(pipe2);
TSAN_INTERCEPT(read);
TSAN_INTERCEPT(pread);
TSAN_INTERCEPT(pread64);
TSAN_INTERCEPT(readv);
TSAN_INTERCEPT(preadv64);
TSAN_INTERCEPT(write);
TSAN_INTERCEPT(pwrite);
TSAN_INTERCEPT(pwrite64);
TSAN_INTERCEPT(writev);
TSAN_INTERCEPT(pwritev64);
TSAN_INTERCEPT(send);
......
......@@ -46,6 +46,7 @@ static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
Context *ctx = CTX();
StackTrace stack;
stack.ObtainCurrent(thr, pc);
Lock l(&ctx->thread_mtx);
ScopedReport rep(ReportTypeSignalUnsafe);
if (!IsFiredSuppression(ctx, rep, stack)) {
rep.AddStack(&stack);
......
......@@ -102,16 +102,17 @@ static void PrintMop(const ReportMop *mop, bool first) {
static void PrintLocation(const ReportLocation *loc) {
char thrbuf[kThreadBufSize];
if (loc->type == ReportLocationGlobal) {
Printf(" Location is global '%s' of size %zu at %zx %s:%d (%s+%p)\n\n",
loc->name, loc->size, loc->addr, loc->file, loc->line,
loc->module, loc->offset);
Printf(" Location is global '%s' of size %zu at %zx (%s+%p)\n\n",
loc->name, loc->size, loc->addr, loc->module, loc->offset);
} else if (loc->type == ReportLocationHeap) {
char thrbuf[kThreadBufSize];
Printf(" Location is heap block of size %zu at %p allocated by %s:\n",
loc->size, loc->addr, thread_name(thrbuf, loc->tid));
PrintStack(loc->stack);
} else if (loc->type == ReportLocationStack) {
Printf(" Location is stack of %s\n\n", thread_name(thrbuf, loc->tid));
Printf(" Location is stack of %s.\n\n", thread_name(thrbuf, loc->tid));
} else if (loc->type == ReportLocationTLS) {
Printf(" Location is TLS of %s.\n\n", thread_name(thrbuf, loc->tid));
} else if (loc->type == ReportLocationFD) {
Printf(" Location is file descriptor %d created by %s at:\n",
loc->fd, thread_name(thrbuf, loc->tid));
......
......@@ -56,6 +56,7 @@ enum ReportLocationType {
ReportLocationGlobal,
ReportLocationHeap,
ReportLocationStack,
ReportLocationTLS,
ReportLocationFD
};
......
......@@ -53,6 +53,7 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
&& s->owner_tid != SyncVar::kInvalidTid
&& !s->is_broken) {
s->is_broken = true;
Lock l(&ctx->thread_mtx);
ScopedReport rep(ReportTypeMutexDestroyLocked);
rep.AddMutex(s);
StackTrace trace;
......
......@@ -119,6 +119,7 @@ static ReportStack *SymbolizeStack(const StackTrace& trace) {
ScopedReport::ScopedReport(ReportType typ) {
ctx_ = CTX();
ctx_->thread_mtx.CheckLocked();
void *mem = internal_alloc(MBlockReport, sizeof(ReportDesc));
rep_ = new(mem) ReportDesc;
rep_->typ = typ;
......@@ -185,15 +186,37 @@ void ScopedReport::AddThread(const ThreadContext *tctx) {
#ifndef TSAN_GO
static ThreadContext *FindThread(int unique_id) {
CTX()->thread_mtx.CheckLocked();
Context *ctx = CTX();
ctx->thread_mtx.CheckLocked();
for (unsigned i = 0; i < kMaxTid; i++) {
ThreadContext *tctx = CTX()->threads[i];
ThreadContext *tctx = ctx->threads[i];
if (tctx && tctx->unique_id == unique_id) {
return tctx;
}
}
return 0;
}
ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) {
Context *ctx = CTX();
ctx->thread_mtx.CheckLocked();
for (unsigned i = 0; i < kMaxTid; i++) {
ThreadContext *tctx = ctx->threads[i];
if (tctx == 0 || tctx->status != ThreadStatusRunning)
continue;
ThreadState *thr = tctx->thr;
CHECK(thr);
if (addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size) {
*is_stack = true;
return tctx;
}
if (addr >= thr->tls_addr && addr < thr->tls_addr + thr->tls_size) {
*is_stack = false;
return tctx;
}
}
return 0;
}
#endif
void ScopedReport::AddMutex(const SyncVar *s) {
......@@ -274,25 +297,21 @@ void ScopedReport::AddLocation(uptr addr, uptr size) {
AddThread(tctx);
return;
}
#endif
ReportStack *symb = SymbolizeData(addr);
if (symb) {
bool is_stack = false;
if (ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack)) {
void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation));
ReportLocation *loc = new(mem) ReportLocation();
rep_->locs.PushBack(loc);
loc->type = ReportLocationGlobal;
loc->addr = addr;
loc->size = size;
loc->module = symb->module ? internal_strdup(symb->module) : 0;
loc->offset = symb->offset;
loc->tid = 0;
loc->name = symb->func ? internal_strdup(symb->func) : 0;
loc->file = symb->file ? internal_strdup(symb->file) : 0;
loc->line = symb->line;
loc->stack = 0;
internal_free(symb);
loc->type = is_stack ? ReportLocationStack : ReportLocationTLS;
loc->tid = tctx->tid;
AddThread(tctx);
}
ReportLocation *loc = SymbolizeData(addr);
if (loc) {
rep_->locs.PushBack(loc);
return;
}
#endif
}
#ifndef TSAN_GO
......@@ -386,7 +405,7 @@ static bool HandleRacyStacks(ThreadState *thr, const StackTrace (&traces)[2],
uptr addr_min, uptr addr_max) {
Context *ctx = CTX();
bool equal_stack = false;
RacyStacks hash = {};
RacyStacks hash;
if (flags()->suppress_equal_stacks) {
hash.hash[0] = md5_hash(traces[0].Begin(), traces[0].Size() * sizeof(uptr));
hash.hash[1] = md5_hash(traces[1].Begin(), traces[1].Size() * sizeof(uptr));
......
......@@ -202,6 +202,7 @@ void StatOutput(u64 *stat) {
name[StatInt_pipe] = " pipe ";
name[StatInt_pipe2] = " pipe2 ";
name[StatInt_read] = " read ";
name[StatInt_prctl] = " prctl ";
name[StatInt_pread] = " pread ";
name[StatInt_pread64] = " pread64 ";
name[StatInt_readv] = " readv ";
......@@ -233,6 +234,12 @@ void StatOutput(u64 *stat) {
name[StatInt_nanosleep] = " nanosleep ";
name[StatInt_gettimeofday] = " gettimeofday ";
name[StatInt_fork] = " fork ";
name[StatInt_vscanf] = " vscanf ";
name[StatInt_vsscanf] = " vsscanf ";
name[StatInt_vfscanf] = " vfscanf ";
name[StatInt_scanf] = " scanf ";
name[StatInt_sscanf] = " sscanf ";
name[StatInt_fscanf] = " fscanf ";
name[StatAnnotation] = "Dynamic annotations ";
name[StatAnnotateHappensBefore] = " HappensBefore ";
......
......@@ -197,6 +197,7 @@ enum StatType {
StatInt_pipe,
StatInt_pipe2,
StatInt_read,
StatInt_prctl,
StatInt_pread,
StatInt_pread64,
StatInt_readv,
......@@ -232,6 +233,12 @@ enum StatType {
StatInt_nanosleep,
StatInt_gettimeofday,
StatInt_fork,
StatInt_vscanf,
StatInt_vsscanf,
StatInt_vfscanf,
StatInt_scanf,
StatInt_sscanf,
StatInt_fscanf,
// Dynamic annotations.
StatAnnotation,
......
......@@ -27,21 +27,24 @@ ReportStack *NewReportStackEntry(uptr addr) {
return ent;
}
// Strip module path to make output shorter.
static char *StripModuleName(const char *module) {
if (module == 0)
return 0;
const char *short_module_name = internal_strrchr(module, '/');
if (short_module_name)
short_module_name += 1;
else
short_module_name = module;
return internal_strdup(short_module_name);
}
static ReportStack *NewReportStackEntry(const AddressInfo &info) {
ReportStack *ent = NewReportStackEntry(info.address);
if (info.module) {
// Strip module path to make output shorter.
const char *short_module_name = internal_strrchr(info.module, '/');
if (short_module_name)
short_module_name += 1;
else
short_module_name = info.module;
ent->module = internal_strdup(short_module_name);
}
ent->module = StripModuleName(info.module);
ent->offset = info.module_offset;
if (info.function) {
if (info.function)
ent->func = internal_strdup(info.function);
}
if (info.file)
ent->file = internal_strdup(info.file);
ent->line = info.line;
......@@ -76,14 +79,23 @@ ReportStack *SymbolizeCode(uptr addr) {
return SymbolizeCodeAddr2Line(addr);
}
ReportStack *SymbolizeData(uptr addr) {
if (flags()->external_symbolizer_path[0]) {
AddressInfo frame;
if (!__sanitizer::SymbolizeData(addr, &frame))
return 0;
return NewReportStackEntry(frame);
}
return SymbolizeDataAddr2Line(addr);
ReportLocation *SymbolizeData(uptr addr) {
if (flags()->external_symbolizer_path[0] == 0)
return 0;
DataInfo info;
if (!__sanitizer::SymbolizeData(addr, &info))
return 0;
ReportLocation *ent = (ReportLocation*)internal_alloc(MBlockReportStack,
sizeof(ReportLocation));
internal_memset(ent, 0, sizeof(*ent));
ent->type = ReportLocationGlobal;
ent->module = StripModuleName(info.module);
ent->offset = info.module_offset;
if (info.name)
ent->name = internal_strdup(info.name);
ent->addr = info.start;
ent->size = info.size;
return ent;
}
} // namespace __tsan
......@@ -17,10 +17,9 @@
namespace __tsan {
ReportStack *SymbolizeCode(uptr addr);
ReportStack *SymbolizeData(uptr addr);
ReportLocation *SymbolizeData(uptr addr);
ReportStack *SymbolizeCodeAddr2Line(uptr addr);
ReportStack *SymbolizeDataAddr2Line(uptr addr);
ReportStack *NewReportStackEntry(uptr addr);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment