Commit 10189819 by Maxim Ostapenko Committed by Maxim Ostapenko

All source files: Merge from upstream 285547.

libsanitizer/

	* All source files: Merge from upstream 285547.
	* configure.tgt (SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS): New
	variable.
	* configure.ac (SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS): Handle it.
	* asan/Makefile.am (asan_files): Add new files.
	* asan/Makefile.in: Regenerate.
	* ubsan/Makefile.in: Likewise.
	* lsan/Makefile.in: Likewise.
	* tsan/Makefile.am (tsan_files): Add new files.
	* tsan/Makefile.in: Regenerate.
	* sanitizer_common/Makefile.am (sanitizer_common_files): Add new files.
	(EXTRA_libsanitizer_common_la_SOURCES): Define.
	(libsanitizer_common_la_LIBADD): Likewise.
	(libsanitizer_common_la_DEPENDENCIES): Likewise.
	* sanitizer_common/Makefile.in: Regenerate.
	* interception/Makefile.in: Likewise.
	* libbacktace/Makefile.in: Likewise.
	* Makefile.in: Likewise.
	* configure: Likewise.
	* merge.sh: Handle builtins/assembly.h merging.
	* builtins/assembly.h: New file.
	* asan/libtool-version: Bump the libasan SONAME.

From-SVN: r241977
parent f31d9224
2016-11-09 Maxim Ostapenko <m.ostapenko@samsung.com>
* All source files: Merge from upstream 285547.
* configure.tgt (SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS): New
variable.
* configure.ac (SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS): Handle it.
* asan/Makefile.am (asan_files): Add new files.
* asan/Makefile.in: Regenerate.
* ubsan/Makefile.in: Likewise.
* lsan/Makefile.in: Likewise.
* tsan/Makefile.am (tsan_files): Add new files.
* tsan/Makefile.in: Regenerate.
* sanitizer_common/Makefile.am (sanitizer_common_files): Add new files.
(EXTRA_libsanitizer_common_la_SOURCES): Define.
(libsanitizer_common_la_LIBADD): Likewise.
(libsanitizer_common_la_DEPENDENCIES): Likewise.
* sanitizer_common/Makefile.in: Regenerate.
* interception/Makefile.in: Likewise.
* libbacktace/Makefile.in: Likewise.
* Makefile.in: Likewise.
* configure: Likewise.
* merge.sh: Handle builtins/assembly.h merging.
* builtins/assembly.h: New file.
* asan/libtool-version: Bump the libasan SONAME.
2016-09-21 Jakub Jelinek <jakub@redhat.com>
PR sanitizer/77567
......
253555
285547
The first line of this file holds the svn revision number of the
last merge done from the master library sources.
......@@ -210,6 +210,7 @@ PACKAGE_VERSION = @PACKAGE_VERSION@
PATH_SEPARATOR = @PATH_SEPARATOR@
RANLIB = @RANLIB@
RPC_DEFS = @RPC_DEFS@
SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS = @SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS@
SED = @SED@
SET_MAKE = @SET_MAKE@
SHELL = @SHELL@
......
......@@ -19,6 +19,8 @@ asan_files = \
asan_activation.cc \
asan_allocator.cc \
asan_debugging.cc \
asan_descriptions.cc \
asan_errors.cc \
asan_fake_stack.cc \
asan_flags.cc \
asan_globals.cc \
......@@ -28,6 +30,7 @@ asan_files = \
asan_malloc_linux.cc \
asan_malloc_mac.cc \
asan_malloc_win.cc \
asan_memory_profile.cc \
asan_new_delete.cc \
asan_poisoning.cc \
asan_posix.cc \
......
......@@ -112,9 +112,10 @@ libasan_la_DEPENDENCIES = \
$(top_builddir)/lsan/libsanitizer_lsan.la $(am__append_2) \
$(am__append_3) $(am__DEPENDENCIES_1)
am__objects_1 = asan_activation.lo asan_allocator.lo asan_debugging.lo \
asan_fake_stack.lo asan_flags.lo asan_globals.lo \
asan_interceptors.lo asan_linux.lo asan_mac.lo \
asan_malloc_linux.lo asan_malloc_mac.lo asan_malloc_win.lo \
asan_descriptions.lo asan_errors.lo asan_fake_stack.lo \
asan_flags.lo asan_globals.lo asan_interceptors.lo \
asan_linux.lo asan_mac.lo asan_malloc_linux.lo \
asan_malloc_mac.lo asan_malloc_win.lo asan_memory_profile.lo \
asan_new_delete.lo asan_poisoning.lo asan_posix.lo \
asan_report.lo asan_rtl.lo asan_stack.lo asan_stats.lo \
asan_suppressions.lo asan_thread.lo asan_win.lo \
......@@ -219,6 +220,7 @@ PACKAGE_VERSION = @PACKAGE_VERSION@
PATH_SEPARATOR = @PATH_SEPARATOR@
RANLIB = @RANLIB@
RPC_DEFS = @RPC_DEFS@
SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS = @SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS@
SED = @SED@
SET_MAKE = @SET_MAKE@
SHELL = @SHELL@
......@@ -308,6 +310,8 @@ asan_files = \
asan_activation.cc \
asan_allocator.cc \
asan_debugging.cc \
asan_descriptions.cc \
asan_errors.cc \
asan_fake_stack.cc \
asan_flags.cc \
asan_globals.cc \
......@@ -317,6 +321,7 @@ asan_files = \
asan_malloc_linux.cc \
asan_malloc_mac.cc \
asan_malloc_win.cc \
asan_memory_profile.cc \
asan_new_delete.cc \
asan_poisoning.cc \
asan_posix.cc \
......@@ -454,6 +459,8 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_activation.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_allocator.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_debugging.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_descriptions.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_errors.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_fake_stack.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_flags.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_globals.Plo@am__quote@
......@@ -463,6 +470,7 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_malloc_linux.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_malloc_mac.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_malloc_win.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_memory_profile.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_new_delete.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_poisoning.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_posix.Plo@am__quote@
......
......@@ -45,6 +45,7 @@ static struct AsanDeactivatedFlags {
FlagParser parser;
RegisterActivationFlags(&parser, &f, &cf);
cf.SetDefaults();
// Copy the current activation flags.
allocator_options.CopyTo(&f, &cf);
cf.malloc_context_size = malloc_context_size;
......@@ -59,12 +60,7 @@ static struct AsanDeactivatedFlags {
parser.ParseString(env);
}
// Override from getprop asan.options.
char buf[100];
GetExtraActivationFlags(buf, sizeof(buf));
parser.ParseString(buf);
SetVerbosity(cf.verbosity);
InitializeCommonFlags(&cf);
if (Verbosity()) ReportUnrecognizedFlags();
......
......@@ -221,7 +221,7 @@ void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) {
struct Allocator {
static const uptr kMaxAllowedMallocSize =
FIRST_32_SECOND_64(3UL << 30, 1UL << 40);
FIRST_32_SECOND_64(3UL << 30, 1ULL << 40);
static const uptr kMaxThreadLocalQuarantine =
FIRST_32_SECOND_64(1 << 18, 1 << 20);
......@@ -264,9 +264,43 @@ struct Allocator {
SharedInitCode(options);
}
void RePoisonChunk(uptr chunk) {
// This could a user-facing chunk (with redzones), or some internal
// housekeeping chunk, like TransferBatch. Start by assuming the former.
AsanChunk *ac = GetAsanChunk((void *)chunk);
uptr allocated_size = allocator.GetActuallyAllocatedSize((void *)ac);
uptr beg = ac->Beg();
uptr end = ac->Beg() + ac->UsedSize(true);
uptr chunk_end = chunk + allocated_size;
if (chunk < beg && beg < end && end <= chunk_end) {
// Looks like a valid AsanChunk. Or maybe not. Be conservative and only
// poison the redzones.
PoisonShadow(chunk, beg - chunk, kAsanHeapLeftRedzoneMagic);
uptr end_aligned_down = RoundDownTo(end, SHADOW_GRANULARITY);
FastPoisonShadowPartialRightRedzone(
end_aligned_down, end - end_aligned_down,
chunk_end - end_aligned_down, kAsanHeapLeftRedzoneMagic);
} else {
// This can not be an AsanChunk. Poison everything. It may be reused as
// AsanChunk later.
PoisonShadow(chunk, allocated_size, kAsanHeapLeftRedzoneMagic);
}
}
void ReInitialize(const AllocatorOptions &options) {
allocator.SetMayReturnNull(options.may_return_null);
SharedInitCode(options);
// Poison all existing allocation's redzones.
if (CanPoisonMemory()) {
allocator.ForceLock();
allocator.ForEachChunk(
[](uptr chunk, void *alloc) {
((Allocator *)alloc)->RePoisonChunk(chunk);
},
this);
allocator.ForceUnlock();
}
}
void GetOptions(AllocatorOptions *options) const {
......@@ -354,7 +388,7 @@ struct Allocator {
if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n",
(void*)size);
return allocator.ReturnNullOrDie();
return allocator.ReturnNullOrDieOnBadRequest();
}
AsanThread *t = GetCurrentThread();
......@@ -371,8 +405,7 @@ struct Allocator {
allocator.Allocate(cache, needed_size, 8, false, check_rss_limit);
}
if (!allocated)
return allocator.ReturnNullOrDie();
if (!allocated) return allocator.ReturnNullOrDieOnOOM();
if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && CanPoisonMemory()) {
// Heap poisoning is enabled, but the allocator provides an unpoisoned
......@@ -455,29 +488,28 @@ struct Allocator {
return res;
}
void AtomicallySetQuarantineFlag(AsanChunk *m, void *ptr,
// Set quarantine flag if chunk is allocated, issue ASan error report on
// available and quarantined chunks. Return true on success, false otherwise.
bool AtomicallySetQuarantineFlagIfAllocated(AsanChunk *m, void *ptr,
BufferedStackTrace *stack) {
u8 old_chunk_state = CHUNK_ALLOCATED;
// Flip the chunk_state atomically to avoid race on double-free.
if (!atomic_compare_exchange_strong((atomic_uint8_t*)m, &old_chunk_state,
CHUNK_QUARANTINE, memory_order_acquire))
if (!atomic_compare_exchange_strong((atomic_uint8_t *)m, &old_chunk_state,
CHUNK_QUARANTINE,
memory_order_acquire)) {
ReportInvalidFree(ptr, old_chunk_state, stack);
// It's not safe to push a chunk in quarantine on invalid free.
return false;
}
CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state);
return true;
}
// Expects the chunk to already be marked as quarantined by using
// AtomicallySetQuarantineFlag.
// AtomicallySetQuarantineFlagIfAllocated.
void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack,
AllocType alloc_type) {
CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
if (m->alloc_type != alloc_type) {
if (atomic_load(&alloc_dealloc_mismatch, memory_order_acquire)) {
ReportAllocTypeMismatch((uptr)ptr, stack, (AllocType)m->alloc_type,
(AllocType)alloc_type);
}
}
CHECK_GE(m->alloc_tid, 0);
if (SANITIZER_WORDSIZE == 64) // On 32-bits this resides in user area.
CHECK_EQ(m->free_tid, kInvalidTid);
......@@ -514,13 +546,24 @@ struct Allocator {
uptr chunk_beg = p - kChunkHeaderSize;
AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
ASAN_FREE_HOOK(ptr);
// Must mark the chunk as quarantined before any changes to its metadata.
// Do not quarantine given chunk if we failed to set CHUNK_QUARANTINE flag.
if (!AtomicallySetQuarantineFlagIfAllocated(m, ptr, stack)) return;
if (m->alloc_type != alloc_type) {
if (atomic_load(&alloc_dealloc_mismatch, memory_order_acquire)) {
ReportAllocTypeMismatch((uptr)ptr, stack, (AllocType)m->alloc_type,
(AllocType)alloc_type);
}
}
if (delete_size && flags()->new_delete_type_mismatch &&
delete_size != m->UsedSize()) {
ReportNewDeleteSizeMismatch(p, delete_size, stack);
}
ASAN_FREE_HOOK(ptr);
// Must mark the chunk as quarantined before any changes to its metadata.
AtomicallySetQuarantineFlag(m, ptr, stack);
QuarantineChunk(m, ptr, stack, alloc_type);
}
......@@ -551,7 +594,7 @@ struct Allocator {
void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
if (CallocShouldReturnNullDueToOverflow(size, nmemb))
return allocator.ReturnNullOrDie();
return allocator.ReturnNullOrDieOnBadRequest();
void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
// If the memory comes from the secondary allocator no need to clear it
// as it comes directly from mmap.
......@@ -642,6 +685,8 @@ struct Allocator {
fallback_mutex.Unlock();
allocator.ForceUnlock();
}
void ReleaseToOS() { allocator.ReleaseToOS(); }
};
static Allocator instance(LINKER_INITIALIZED);
......@@ -653,11 +698,17 @@ static AsanAllocator &get_allocator() {
bool AsanChunkView::IsValid() {
return chunk_ && chunk_->chunk_state != CHUNK_AVAILABLE;
}
bool AsanChunkView::IsAllocated() {
return chunk_ && chunk_->chunk_state == CHUNK_ALLOCATED;
}
uptr AsanChunkView::Beg() { return chunk_->Beg(); }
uptr AsanChunkView::End() { return Beg() + UsedSize(); }
uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); }
uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; }
uptr AsanChunkView::FreeTid() { return chunk_->free_tid; }
AllocType AsanChunkView::GetAllocType() {
return (AllocType)chunk_->alloc_type;
}
static StackTrace GetStackTraceFromId(u32 id) {
CHECK(id);
......@@ -666,16 +717,22 @@ static StackTrace GetStackTraceFromId(u32 id) {
return res;
}
u32 AsanChunkView::GetAllocStackId() { return chunk_->alloc_context_id; }
u32 AsanChunkView::GetFreeStackId() { return chunk_->free_context_id; }
StackTrace AsanChunkView::GetAllocStack() {
return GetStackTraceFromId(chunk_->alloc_context_id);
return GetStackTraceFromId(GetAllocStackId());
}
StackTrace AsanChunkView::GetFreeStack() {
return GetStackTraceFromId(chunk_->free_context_id);
return GetStackTraceFromId(GetFreeStackId());
}
void ReleaseToOS() { instance.ReleaseToOS(); }
void InitializeAllocator(const AllocatorOptions &options) {
instance.Initialize(options);
SetAllocatorReleaseToOSCallback(ReleaseToOS);
}
void ReInitializeAllocator(const AllocatorOptions &options) {
......@@ -689,6 +746,9 @@ void GetAllocatorOptions(AllocatorOptions *options) {
AsanChunkView FindHeapChunkByAddress(uptr addr) {
return instance.FindHeapChunkByAddress(addr);
}
AsanChunkView FindHeapChunkByAllocBeg(uptr addr) {
return AsanChunkView(instance.GetAsanChunk(reinterpret_cast<void*>(addr)));
}
void AsanThreadLocalMallocStorage::CommitBack() {
instance.CommitBack(this);
......@@ -752,7 +812,7 @@ int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
return 0;
}
uptr asan_malloc_usable_size(void *ptr, uptr pc, uptr bp) {
uptr asan_malloc_usable_size(const void *ptr, uptr pc, uptr bp) {
if (!ptr) return 0;
uptr usable_size = instance.AllocationSize(reinterpret_cast<uptr>(ptr));
if (flags()->check_malloc_usable_size && (usable_size == 0)) {
......
......@@ -47,16 +47,20 @@ void GetAllocatorOptions(AllocatorOptions *options);
class AsanChunkView {
public:
explicit AsanChunkView(AsanChunk *chunk) : chunk_(chunk) {}
bool IsValid(); // Checks if AsanChunkView points to a valid allocated
// or quarantined chunk.
uptr Beg(); // First byte of user memory.
uptr End(); // Last byte of user memory.
uptr UsedSize(); // Size requested by the user.
bool IsValid(); // Checks if AsanChunkView points to a valid allocated
// or quarantined chunk.
bool IsAllocated(); // Checks if the memory is currently allocated.
uptr Beg(); // First byte of user memory.
uptr End(); // Last byte of user memory.
uptr UsedSize(); // Size requested by the user.
uptr AllocTid();
uptr FreeTid();
bool Eq(const AsanChunkView &c) const { return chunk_ == c.chunk_; }
u32 GetAllocStackId();
u32 GetFreeStackId();
StackTrace GetAllocStack();
StackTrace GetFreeStack();
AllocType GetAllocType();
bool AddrIsInside(uptr addr, uptr access_size, sptr *offset) {
if (addr >= Beg() && (addr + access_size) <= End()) {
*offset = addr - Beg();
......@@ -85,6 +89,7 @@ class AsanChunkView {
};
AsanChunkView FindHeapChunkByAddress(uptr address);
AsanChunkView FindHeapChunkByAllocBeg(uptr address);
// List of AsanChunks with total size.
class AsanChunkFifoList: public IntrusiveList<AsanChunk> {
......@@ -112,18 +117,36 @@ struct AsanMapUnmapCallback {
# if defined(__powerpc64__)
const uptr kAllocatorSpace = 0xa0000000000ULL;
const uptr kAllocatorSize = 0x20000000000ULL; // 2T.
typedef DefaultSizeClassMap SizeClassMap;
# elif defined(__aarch64__) && SANITIZER_ANDROID
const uptr kAllocatorSpace = 0x3000000000ULL;
const uptr kAllocatorSize = 0x2000000000ULL; // 128G.
typedef VeryCompactSizeClassMap SizeClassMap;
# elif defined(__aarch64__)
// AArch64/SANITIZIER_CAN_USER_ALLOCATOR64 is only for 42-bit VMA
// AArch64/SANITIZER_CAN_USER_ALLOCATOR64 is only for 42-bit VMA
// so no need to different values for different VMA.
const uptr kAllocatorSpace = 0x10000000000ULL;
const uptr kAllocatorSize = 0x10000000000ULL; // 3T.
typedef DefaultSizeClassMap SizeClassMap;
# elif SANITIZER_WINDOWS
const uptr kAllocatorSpace = ~(uptr)0;
const uptr kAllocatorSize = 0x8000000000ULL; // 500G
typedef DefaultSizeClassMap SizeClassMap;
# else
const uptr kAllocatorSpace = 0x600000000000ULL;
const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
# endif
typedef DefaultSizeClassMap SizeClassMap;
typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0 /*metadata*/,
SizeClassMap, AsanMapUnmapCallback> PrimaryAllocator;
# endif
struct AP64 { // Allocator64 parameters. Deliberately using a short name.
static const uptr kSpaceBeg = kAllocatorSpace;
static const uptr kSpaceSize = kAllocatorSize;
static const uptr kMetadataSize = 0;
typedef __asan::SizeClassMap SizeClassMap;
typedef AsanMapUnmapCallback MapUnmapCallback;
static const uptr kFlags = 0;
};
typedef SizeClassAllocator64<AP64> PrimaryAllocator;
#else // Fallback to SizeClassAllocator32.
static const uptr kRegionSizeLog = 20;
static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
......@@ -169,7 +192,7 @@ void *asan_pvalloc(uptr size, BufferedStackTrace *stack);
int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
BufferedStackTrace *stack);
uptr asan_malloc_usable_size(void *ptr, uptr pc, uptr bp);
uptr asan_malloc_usable_size(const void *ptr, uptr pc, uptr bp);
uptr asan_mz_size(const void *ptr);
void asan_mz_force_lock();
......
......@@ -12,74 +12,39 @@
//===----------------------------------------------------------------------===//
#include "asan_allocator.h"
#include "asan_descriptions.h"
#include "asan_flags.h"
#include "asan_internal.h"
#include "asan_mapping.h"
#include "asan_report.h"
#include "asan_thread.h"
namespace __asan {
void GetInfoForStackVar(uptr addr, AddressDescription *descr, AsanThread *t) {
descr->name[0] = 0;
descr->region_address = 0;
descr->region_size = 0;
descr->region_kind = "stack";
namespace {
using namespace __asan;
AsanThread::StackFrameAccess access;
if (!t->GetStackFrameAccessByAddr(addr, &access))
return;
static void FindInfoForStackVar(uptr addr, const char *frame_descr, uptr offset,
char *name, uptr name_size,
uptr &region_address, uptr &region_size) {
InternalMmapVector<StackVarDescr> vars(16);
if (!ParseFrameDescription(access.frame_descr, &vars)) {
if (!ParseFrameDescription(frame_descr, &vars)) {
return;
}
for (uptr i = 0; i < vars.size(); i++) {
if (access.offset <= vars[i].beg + vars[i].size) {
internal_strncat(descr->name, vars[i].name_pos,
Min(descr->name_size, vars[i].name_len));
descr->region_address = addr - (access.offset - vars[i].beg);
descr->region_size = vars[i].size;
if (offset <= vars[i].beg + vars[i].size) {
// We use name_len + 1 because strlcpy will guarantee a \0 at the end, so
// if we're limiting the copy due to name_len, we add 1 to ensure we copy
// the whole name and then terminate with '\0'.
internal_strlcpy(name, vars[i].name_pos,
Min(name_size, vars[i].name_len + 1));
region_address = addr - (offset - vars[i].beg);
region_size = vars[i].size;
return;
}
}
}
void GetInfoForHeapAddress(uptr addr, AddressDescription *descr) {
AsanChunkView chunk = FindHeapChunkByAddress(addr);
descr->name[0] = 0;
descr->region_address = 0;
descr->region_size = 0;
if (!chunk.IsValid()) {
descr->region_kind = "heap-invalid";
return;
}
descr->region_address = chunk.Beg();
descr->region_size = chunk.UsedSize();
descr->region_kind = "heap";
}
void AsanLocateAddress(uptr addr, AddressDescription *descr) {
if (DescribeAddressIfShadow(addr, descr, /* print */ false)) {
return;
}
if (GetInfoForAddressIfGlobal(addr, descr)) {
return;
}
asanThreadRegistry().Lock();
AsanThread *thread = FindThreadByStackAddress(addr);
asanThreadRegistry().Unlock();
if (thread) {
GetInfoForStackVar(addr, descr, thread);
return;
}
GetInfoForHeapAddress(addr, descr);
}
static uptr AsanGetStack(uptr addr, uptr *trace, u32 size, u32 *thread_id,
uptr AsanGetStack(uptr addr, uptr *trace, u32 size, u32 *thread_id,
bool alloc_stack) {
AsanChunkView chunk = FindHeapChunkByAddress(addr);
if (!chunk.IsValid()) return 0;
......@@ -106,18 +71,58 @@ static uptr AsanGetStack(uptr addr, uptr *trace, u32 size, u32 *thread_id,
return 0;
}
} // namespace __asan
using namespace __asan;
} // namespace
SANITIZER_INTERFACE_ATTRIBUTE
const char *__asan_locate_address(uptr addr, char *name, uptr name_size,
uptr *region_address, uptr *region_size) {
AddressDescription descr = { name, name_size, 0, 0, nullptr };
AsanLocateAddress(addr, &descr);
if (region_address) *region_address = descr.region_address;
if (region_size) *region_size = descr.region_size;
return descr.region_kind;
uptr *region_address_ptr,
uptr *region_size_ptr) {
AddressDescription descr(addr);
uptr region_address = 0;
uptr region_size = 0;
const char *region_kind = nullptr;
if (name && name_size > 0) name[0] = 0;
if (auto shadow = descr.AsShadow()) {
// region_{address,size} are already 0
switch (shadow->kind) {
case kShadowKindLow:
region_kind = "low shadow";
break;
case kShadowKindGap:
region_kind = "shadow gap";
break;
case kShadowKindHigh:
region_kind = "high shadow";
break;
}
} else if (auto heap = descr.AsHeap()) {
region_kind = "heap";
region_address = heap->chunk_access.chunk_begin;
region_size = heap->chunk_access.chunk_size;
} else if (auto stack = descr.AsStack()) {
region_kind = "stack";
if (!stack->frame_descr) {
// region_{address,size} are already 0
} else {
FindInfoForStackVar(addr, stack->frame_descr, stack->offset, name,
name_size, region_address, region_size);
}
} else if (auto global = descr.AsGlobal()) {
region_kind = "global";
auto &g = global->globals[0];
internal_strlcpy(name, g.name, name_size);
region_address = g.beg;
region_size = g.size;
} else {
// region_{address,size} are already 0
region_kind = "heap-invalid";
}
CHECK(region_kind);
if (region_address_ptr) *region_address_ptr = region_address;
if (region_size_ptr) *region_size_ptr = region_size;
return region_kind;
}
SANITIZER_INTERFACE_ATTRIBUTE
......
//===-- asan_descriptions.h -------------------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// ASan-private header for asan_descriptions.cc.
// TODO(filcab): Most struct definitions should move to the interface headers.
//===----------------------------------------------------------------------===//
#ifndef ASAN_DESCRIPTIONS_H
#define ASAN_DESCRIPTIONS_H
#include "asan_allocator.h"
#include "asan_thread.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_report_decorator.h"
namespace __asan {
void DescribeThread(AsanThreadContext *context);
static inline void DescribeThread(AsanThread *t) {
if (t) DescribeThread(t->context());
}
const char *ThreadNameWithParenthesis(AsanThreadContext *t, char buff[],
uptr buff_len);
const char *ThreadNameWithParenthesis(u32 tid, char buff[], uptr buff_len);
class Decorator : public __sanitizer::SanitizerCommonDecorator {
public:
Decorator() : SanitizerCommonDecorator() {}
const char *Access() { return Blue(); }
const char *EndAccess() { return Default(); }
const char *Location() { return Green(); }
const char *EndLocation() { return Default(); }
const char *Allocation() { return Magenta(); }
const char *EndAllocation() { return Default(); }
const char *ShadowByte(u8 byte) {
switch (byte) {
case kAsanHeapLeftRedzoneMagic:
case kAsanArrayCookieMagic:
return Red();
case kAsanHeapFreeMagic:
return Magenta();
case kAsanStackLeftRedzoneMagic:
case kAsanStackMidRedzoneMagic:
case kAsanStackRightRedzoneMagic:
return Red();
case kAsanStackAfterReturnMagic:
return Magenta();
case kAsanInitializationOrderMagic:
return Cyan();
case kAsanUserPoisonedMemoryMagic:
case kAsanContiguousContainerOOBMagic:
case kAsanAllocaLeftMagic:
case kAsanAllocaRightMagic:
return Blue();
case kAsanStackUseAfterScopeMagic:
return Magenta();
case kAsanGlobalRedzoneMagic:
return Red();
case kAsanInternalHeapMagic:
return Yellow();
case kAsanIntraObjectRedzone:
return Yellow();
default:
return Default();
}
}
const char *EndShadowByte() { return Default(); }
const char *MemoryByte() { return Magenta(); }
const char *EndMemoryByte() { return Default(); }
};
enum ShadowKind : u8 {
kShadowKindLow,
kShadowKindGap,
kShadowKindHigh,
};
static const char *const ShadowNames[] = {"low shadow", "shadow gap",
"high shadow"};
struct ShadowAddressDescription {
uptr addr;
ShadowKind kind;
u8 shadow_byte;
void Print() const;
};
bool GetShadowAddressInformation(uptr addr, ShadowAddressDescription *descr);
bool DescribeAddressIfShadow(uptr addr);
enum AccessType {
kAccessTypeLeft,
kAccessTypeRight,
kAccessTypeInside,
kAccessTypeUnknown, // This means we have an AddressSanitizer bug!
};
struct ChunkAccess {
uptr bad_addr;
sptr offset;
uptr chunk_begin;
uptr chunk_size;
u32 access_type : 2;
u32 alloc_type : 2;
};
struct HeapAddressDescription {
uptr addr;
uptr alloc_tid;
uptr free_tid;
u32 alloc_stack_id;
u32 free_stack_id;
ChunkAccess chunk_access;
void Print() const;
};
bool GetHeapAddressInformation(uptr addr, uptr access_size,
HeapAddressDescription *descr);
bool DescribeAddressIfHeap(uptr addr, uptr access_size = 1);
struct StackAddressDescription {
uptr addr;
uptr tid;
uptr offset;
uptr frame_pc;
uptr access_size;
const char *frame_descr;
void Print() const;
};
bool GetStackAddressInformation(uptr addr, uptr access_size,
StackAddressDescription *descr);
struct GlobalAddressDescription {
uptr addr;
// Assume address is close to at most four globals.
static const int kMaxGlobals = 4;
__asan_global globals[kMaxGlobals];
u32 reg_sites[kMaxGlobals];
uptr access_size;
u8 size;
void Print(const char *bug_type = "") const;
};
bool GetGlobalAddressInformation(uptr addr, uptr access_size,
GlobalAddressDescription *descr);
bool DescribeAddressIfGlobal(uptr addr, uptr access_size, const char *bug_type);
// General function to describe an address. Will try to describe the address as
// a shadow, global (variable), stack, or heap address.
// bug_type is optional and is used for checking if we're reporting an
// initialization-order-fiasco
// The proper access_size should be passed for stack, global, and heap
// addresses. Defaults to 1.
// Each of the *AddressDescription functions has its own Print() member, which
// may take access_size and bug_type parameters if needed.
void PrintAddressDescription(uptr addr, uptr access_size = 1,
const char *bug_type = "");
enum AddressKind {
kAddressKindWild,
kAddressKindShadow,
kAddressKindHeap,
kAddressKindStack,
kAddressKindGlobal,
};
class AddressDescription {
struct AddressDescriptionData {
AddressKind kind;
union {
ShadowAddressDescription shadow;
HeapAddressDescription heap;
StackAddressDescription stack;
GlobalAddressDescription global;
uptr addr;
};
};
AddressDescriptionData data;
public:
AddressDescription() = default;
// shouldLockThreadRegistry allows us to skip locking if we're sure we already
// have done it.
AddressDescription(uptr addr, bool shouldLockThreadRegistry = true)
: AddressDescription(addr, 1, shouldLockThreadRegistry) {}
AddressDescription(uptr addr, uptr access_size,
bool shouldLockThreadRegistry = true);
uptr Address() const {
switch (data.kind) {
case kAddressKindWild:
return data.addr;
case kAddressKindShadow:
return data.shadow.addr;
case kAddressKindHeap:
return data.heap.addr;
case kAddressKindStack:
return data.stack.addr;
case kAddressKindGlobal:
return data.global.addr;
}
UNREACHABLE("AddressInformation kind is invalid");
}
void Print(const char *bug_descr = nullptr) const {
switch (data.kind) {
case kAddressKindWild:
Printf("Address %p is a wild pointer.\n", data.addr);
return;
case kAddressKindShadow:
return data.shadow.Print();
case kAddressKindHeap:
return data.heap.Print();
case kAddressKindStack:
return data.stack.Print();
case kAddressKindGlobal:
// initialization-order-fiasco has a special Print()
return data.global.Print(bug_descr);
}
UNREACHABLE("AddressInformation kind is invalid");
}
void StoreTo(AddressDescriptionData *dst) const { *dst = data; }
const ShadowAddressDescription *AsShadow() const {
return data.kind == kAddressKindShadow ? &data.shadow : nullptr;
}
const HeapAddressDescription *AsHeap() const {
return data.kind == kAddressKindHeap ? &data.heap : nullptr;
}
const StackAddressDescription *AsStack() const {
return data.kind == kAddressKindStack ? &data.stack : nullptr;
}
const GlobalAddressDescription *AsGlobal() const {
return data.kind == kAddressKindGlobal ? &data.global : nullptr;
}
};
} // namespace __asan
#endif // ASAN_DESCRIPTIONS_H
......@@ -29,7 +29,7 @@ ALWAYS_INLINE void SetShadow(uptr ptr, uptr size, uptr class_id, u64 magic) {
CHECK_EQ(SHADOW_SCALE, 3); // This code expects SHADOW_SCALE=3.
u64 *shadow = reinterpret_cast<u64*>(MemToShadow(ptr));
if (class_id <= 6) {
for (uptr i = 0; i < (1U << class_id); i++) {
for (uptr i = 0; i < (((uptr)1) << class_id); i++) {
shadow[i] = magic;
// Make sure this does not become memset.
SanitizerBreakOptimization(nullptr);
......@@ -98,7 +98,7 @@ FakeFrame *FakeStack::Allocate(uptr stack_size_log, uptr class_id,
// if the signal arrives between checking and setting flags[pos], the
// signal handler's fake stack will start from a different hint_position
// and so will not touch this particular byte. So, it is safe to do this
// with regular non-atimic load and store (at least I was not able to make
// with regular non-atomic load and store (at least I was not able to make
// this code crash).
if (flags[pos]) continue;
flags[pos] = 1;
......@@ -119,7 +119,7 @@ uptr FakeStack::AddrIsInFakeStack(uptr ptr, uptr *frame_beg, uptr *frame_end) {
uptr class_id = (ptr - beg) >> stack_size_log;
uptr base = beg + (class_id << stack_size_log);
CHECK_LE(base, ptr);
CHECK_LT(ptr, base + (1UL << stack_size_log));
CHECK_LT(ptr, base + (((uptr)1) << stack_size_log));
uptr pos = (ptr - base) >> (kMinStackFrameSizeLog + class_id);
uptr res = base + pos * BytesInSizeClass(class_id);
*frame_end = res + BytesInSizeClass(class_id);
......
......@@ -50,7 +50,7 @@ struct FakeFrame {
// Allocate() flips the appropriate allocation flag atomically, thus achieving
// async-signal safety.
// This allocator does not have quarantine per se, but it tries to allocate the
// frames in round robin fasion to maximize the delay between a deallocation
// frames in round robin fashion to maximize the delay between a deallocation
// and the next allocation.
class FakeStack {
static const uptr kMinStackFrameSizeLog = 6; // Min frame is 64B.
......@@ -67,12 +67,12 @@ class FakeStack {
// stack_size_log is at least 15 (stack_size >= 32K).
static uptr SizeRequiredForFlags(uptr stack_size_log) {
return 1UL << (stack_size_log + 1 - kMinStackFrameSizeLog);
return ((uptr)1) << (stack_size_log + 1 - kMinStackFrameSizeLog);
}
// Each size class occupies stack_size bytes.
static uptr SizeRequiredForFrames(uptr stack_size_log) {
return (1ULL << stack_size_log) * kNumberOfSizeClasses;
return (((uptr)1) << stack_size_log) * kNumberOfSizeClasses;
}
// Number of bytes requires for the whole object.
......@@ -89,20 +89,20 @@ class FakeStack {
// and so on.
static uptr FlagsOffset(uptr stack_size_log, uptr class_id) {
uptr t = kNumberOfSizeClasses - 1 - class_id;
const uptr all_ones = (1 << (kNumberOfSizeClasses - 1)) - 1;
const uptr all_ones = (((uptr)1) << (kNumberOfSizeClasses - 1)) - 1;
return ((all_ones >> t) << t) << (stack_size_log - 15);
}
static uptr NumberOfFrames(uptr stack_size_log, uptr class_id) {
return 1UL << (stack_size_log - kMinStackFrameSizeLog - class_id);
return ((uptr)1) << (stack_size_log - kMinStackFrameSizeLog - class_id);
}
// Divide n by the numbe of frames in size class.
// Divide n by the number of frames in size class.
static uptr ModuloNumberOfFrames(uptr stack_size_log, uptr class_id, uptr n) {
return n & (NumberOfFrames(stack_size_log, class_id) - 1);
}
// The the pointer to the flags of the given class_id.
// The pointer to the flags of the given class_id.
u8 *GetFlags(uptr stack_size_log, uptr class_id) {
return reinterpret_cast<u8 *>(this) + kFlagsOffset +
FlagsOffset(stack_size_log, class_id);
......@@ -112,7 +112,8 @@ class FakeStack {
u8 *GetFrame(uptr stack_size_log, uptr class_id, uptr pos) {
return reinterpret_cast<u8 *>(this) + kFlagsOffset +
SizeRequiredForFlags(stack_size_log) +
(1 << stack_size_log) * class_id + BytesInSizeClass(class_id) * pos;
(((uptr)1) << stack_size_log) * class_id +
BytesInSizeClass(class_id) * pos;
}
// Allocate the fake frame.
......@@ -135,7 +136,7 @@ class FakeStack {
// Number of bytes in a fake frame of this size class.
static uptr BytesInSizeClass(uptr class_id) {
return 1UL << (class_id + kMinStackFrameSizeLog);
return ((uptr)1) << (class_id + kMinStackFrameSizeLog);
}
// The fake frame is guaranteed to have a right redzone.
......@@ -157,7 +158,7 @@ class FakeStack {
static const uptr kFlagsOffset = 4096; // This is were the flags begin.
// Must match the number of uses of DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID
COMPILER_CHECK(kNumberOfSizeClasses == 11);
static const uptr kMaxStackMallocSize = 1 << kMaxStackFrameSizeLog;
static const uptr kMaxStackMallocSize = ((uptr)1) << kMaxStackFrameSizeLog;
uptr hint_position_[kNumberOfSizeClasses];
uptr stack_size_log_;
......
......@@ -114,15 +114,7 @@ void InitializeFlags() {
ubsan_parser.ParseString(GetEnv("UBSAN_OPTIONS"));
#endif
// Let activation flags override current settings. On Android they come
// from a system property. On other platforms this is no-op.
if (!flags()->start_deactivated) {
char buf[100];
GetExtraActivationFlags(buf, sizeof(buf));
asan_parser.ParseString(buf);
}
SetVerbosity(common_flags()->verbosity);
InitializeCommonFlags();
// TODO(eugenis): dump all flags at verbosity>=2?
if (Verbosity()) ReportUnrecognizedFlags();
......@@ -165,6 +157,14 @@ void InitializeFlags() {
(ASAN_LOW_MEMORY) ? 1UL << 6 : 1UL << 8;
f->quarantine_size_mb = kDefaultQuarantineSizeMb;
}
if (!f->replace_str && common_flags()->intercept_strlen) {
Report("WARNING: strlen interceptor is enabled even though replace_str=0. "
"Use intercept_strlen=0 to disable it.");
}
if (!f->replace_str && common_flags()->intercept_strchr) {
Report("WARNING: strchr* interceptors are enabled even though "
"replace_str=0. Use intercept_strchr=0 to disable them.");
}
}
} // namespace __asan
......
......@@ -41,10 +41,7 @@ ASAN_FLAG(
"If set, uses custom wrappers and replacements for libc string functions "
"to find more errors.")
ASAN_FLAG(bool, replace_intrin, true,
"If set, uses custom wrappers for memset/memcpy/memmove intinsics.")
ASAN_FLAG(bool, mac_ignore_invalid_free, false,
"Ignore invalid free() calls to work around some bugs. Used on OS X "
"only.")
"If set, uses custom wrappers for memset/memcpy/memmove intrinsics.")
ASAN_FLAG(bool, detect_stack_use_after_return, false,
"Enables stack-use-after-return checking at run-time.")
ASAN_FLAG(int, min_uar_stack_size_log, 16, // We can't do smaller anyway.
......@@ -78,6 +75,8 @@ ASAN_FLAG(bool, print_stats, false,
"Print various statistics after printing an error message or if "
"atexit=1.")
ASAN_FLAG(bool, print_legend, true, "Print the legend for the shadow bytes.")
ASAN_FLAG(bool, print_scariness, false,
"Print the scariness score. Experimental.")
ASAN_FLAG(bool, atexit, false,
"If set, prints ASan exit stats even after program terminates "
"successfully.")
......@@ -97,15 +96,15 @@ ASAN_FLAG(bool, poison_array_cookie, true,
"Poison (or not) the array cookie after operator new[].")
// Turn off alloc/dealloc mismatch checker on Mac and Windows for now.
// https://code.google.com/p/address-sanitizer/issues/detail?id=131
// https://code.google.com/p/address-sanitizer/issues/detail?id=309
// https://github.com/google/sanitizers/issues/131
// https://github.com/google/sanitizers/issues/309
// TODO(glider,timurrrr): Fix known issues and enable this back.
ASAN_FLAG(bool, alloc_dealloc_mismatch,
(SANITIZER_MAC == 0) && (SANITIZER_WINDOWS == 0),
!SANITIZER_MAC && !SANITIZER_WINDOWS && !SANITIZER_ANDROID,
"Report errors on malloc/delete, new/free, new/delete[], etc.")
ASAN_FLAG(bool, new_delete_type_mismatch, true,
"Report errors on mismatch betwen size of new and delete.")
"Report errors on mismatch between size of new and delete.")
ASAN_FLAG(
bool, strict_init_order, false,
"If true, assume that dynamic initializers can never access globals from "
......@@ -124,8 +123,8 @@ ASAN_FLAG(
"The bigger the value the harder we try.")
ASAN_FLAG(
bool, detect_container_overflow, true,
"If true, honor the container overflow annotations. "
"See https://code.google.com/p/address-sanitizer/wiki/ContainerOverflow")
"If true, honor the container overflow annotations. See "
"https://github.com/google/sanitizers/wiki/AddressSanitizerContainerOverflow")
ASAN_FLAG(int, detect_odr_violation, 2,
"If >=2, detect violation of One-Definition-Rule (ODR); "
"If ==1, detect ODR-violation only if the two variables "
......@@ -136,3 +135,5 @@ ASAN_FLAG(const char *, suppressions, "", "Suppressions file name.")
ASAN_FLAG(bool, halt_on_error, true,
"Crash the program after printing the first error report "
"(WARNING: USE AT YOUR OWN RISK!)")
ASAN_FLAG(bool, use_odr_indicator, false,
"Use special ODR indicator symbol for ODR violation detection")
......@@ -23,6 +23,7 @@
#include "sanitizer_common/sanitizer_mutex.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
namespace __asan {
......@@ -121,16 +122,68 @@ int GetGlobalsForAddress(uptr addr, Global *globals, u32 *reg_sites,
return res;
}
bool GetInfoForAddressIfGlobal(uptr addr, AddressDescription *descr) {
Global g = {};
if (GetGlobalsForAddress(addr, &g, nullptr, 1)) {
internal_strncpy(descr->name, g.name, descr->name_size);
descr->region_address = g.beg;
descr->region_size = g.size;
descr->region_kind = "global";
return true;
enum GlobalSymbolState {
UNREGISTERED = 0,
REGISTERED = 1
};
// Check ODR violation for given global G via special ODR indicator. We use
// this method in case compiler instruments global variables through their
// local aliases.
static void CheckODRViolationViaIndicator(const Global *g) {
u8 *odr_indicator = reinterpret_cast<u8 *>(g->odr_indicator);
if (*odr_indicator == UNREGISTERED) {
*odr_indicator = REGISTERED;
return;
}
// If *odr_indicator is DEFINED, some module have already registered
// externally visible symbol with the same name. This is an ODR violation.
for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
if (g->odr_indicator == l->g->odr_indicator &&
(flags()->detect_odr_violation >= 2 || g->size != l->g->size) &&
!IsODRViolationSuppressed(g->name))
ReportODRViolation(g, FindRegistrationSite(g),
l->g, FindRegistrationSite(l->g));
}
}
// Check ODR violation for given global G by checking if it's already poisoned.
// We use this method in case compiler doesn't use private aliases for global
// variables.
static void CheckODRViolationViaPoisoning(const Global *g) {
if (__asan_region_is_poisoned(g->beg, g->size_with_redzone)) {
// This check may not be enough: if the first global is much larger
// the entire redzone of the second global may be within the first global.
for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
if (g->beg == l->g->beg &&
(flags()->detect_odr_violation >= 2 || g->size != l->g->size) &&
!IsODRViolationSuppressed(g->name))
ReportODRViolation(g, FindRegistrationSite(g),
l->g, FindRegistrationSite(l->g));
}
}
return false;
}
// Clang provides two different ways for global variables protection:
// it can poison the global itself or its private alias. In former
// case we may poison same symbol multiple times, that can help us to
// cheaply detect ODR violation: if we try to poison an already poisoned
// global, we have ODR violation error.
// In latter case, we poison each symbol exactly once, so we use special
// indicator symbol to perform similar check.
// In either case, compiler provides a special odr_indicator field to Global
// structure, that can contain two kinds of values:
// 1) Non-zero value. In this case, odr_indicator is an address of
// corresponding indicator variable for given global.
// 2) Zero. This means that we don't use private aliases for global variables
// and can freely check ODR violation with the first method.
//
// This routine chooses between two different methods of ODR violation
// detection.
static inline bool UseODRIndicator(const Global *g) {
// Use ODR indicator method iff use_odr_indicator flag is set and
// indicator symbol address is not 0.
return flags()->use_odr_indicator && g->odr_indicator > 0;
}
// Register a global variable.
......@@ -142,24 +195,24 @@ static void RegisterGlobal(const Global *g) {
ReportGlobal(*g, "Added");
CHECK(flags()->report_globals);
CHECK(AddrIsInMem(g->beg));
CHECK(AddrIsAlignedByGranularity(g->beg));
if (!AddrIsAlignedByGranularity(g->beg)) {
Report("The following global variable is not properly aligned.\n");
Report("This may happen if another global with the same name\n");
Report("resides in another non-instrumented module.\n");
Report("Or the global comes from a C file built w/o -fno-common.\n");
Report("In either case this is likely an ODR violation bug,\n");
Report("but AddressSanitizer can not provide more details.\n");
ReportODRViolation(g, FindRegistrationSite(g), g, FindRegistrationSite(g));
CHECK(AddrIsAlignedByGranularity(g->beg));
}
CHECK(AddrIsAlignedByGranularity(g->size_with_redzone));
// This "ODR violation" detection is fundamentally incompatible with
// how GCC registers globals. Disable as useless until rewritten upstream.
if (0 && flags()->detect_odr_violation) {
if (flags()->detect_odr_violation) {
// Try detecting ODR (One Definition Rule) violation, i.e. the situation
// where two globals with the same name are defined in different modules.
if (__asan_region_is_poisoned(g->beg, g->size_with_redzone)) {
// This check may not be enough: if the first global is much larger
// the entire redzone of the second global may be within the first global.
for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
if (g->beg == l->g->beg &&
(flags()->detect_odr_violation >= 2 || g->size != l->g->size) &&
!IsODRViolationSuppressed(g->name))
ReportODRViolation(g, FindRegistrationSite(g),
l->g, FindRegistrationSite(l->g));
}
}
if (UseODRIndicator(g))
CheckODRViolationViaIndicator(g);
else
CheckODRViolationViaPoisoning(g);
}
if (CanPoisonMemory())
PoisonRedZones(*g);
......@@ -190,6 +243,12 @@ static void UnregisterGlobal(const Global *g) {
// We unpoison the shadow memory for the global but we do not remove it from
// the list because that would require O(n^2) time with the current list
// implementation. It might not be worth doing anyway.
// Release ODR indicator.
if (UseODRIndicator(g)) {
u8 *odr_indicator = reinterpret_cast<u8 *>(g->odr_indicator);
*odr_indicator = UNREGISTERED;
}
}
void StopInitOrderChecking() {
......@@ -207,11 +266,70 @@ void StopInitOrderChecking() {
}
}
static bool IsASCII(unsigned char c) { return /*0x00 <= c &&*/ c <= 0x7F; }
const char *MaybeDemangleGlobalName(const char *name) {
// We can spoil names of globals with C linkage, so use an heuristic
// approach to check if the name should be demangled.
bool should_demangle = false;
if (name[0] == '_' && name[1] == 'Z')
should_demangle = true;
else if (SANITIZER_WINDOWS && name[0] == '\01' && name[1] == '?')
should_demangle = true;
return should_demangle ? Symbolizer::GetOrInit()->Demangle(name) : name;
}
// Check if the global is a zero-terminated ASCII string. If so, print it.
void PrintGlobalNameIfASCII(InternalScopedString *str, const __asan_global &g) {
for (uptr p = g.beg; p < g.beg + g.size - 1; p++) {
unsigned char c = *(unsigned char *)p;
if (c == '\0' || !IsASCII(c)) return;
}
if (*(char *)(g.beg + g.size - 1) != '\0') return;
str->append(" '%s' is ascii string '%s'\n", MaybeDemangleGlobalName(g.name),
(char *)g.beg);
}
static const char *GlobalFilename(const __asan_global &g) {
const char *res = g.module_name;
// Prefer the filename from source location, if is available.
if (g.location) res = g.location->filename;
CHECK(res);
return res;
}
void PrintGlobalLocation(InternalScopedString *str, const __asan_global &g) {
str->append("%s", GlobalFilename(g));
if (!g.location) return;
if (g.location->line_no) str->append(":%d", g.location->line_no);
if (g.location->column_no) str->append(":%d", g.location->column_no);
}
} // namespace __asan
// ---------------------- Interface ---------------- {{{1
using namespace __asan; // NOLINT
// Apply __asan_register_globals to all globals found in the same loaded
// executable or shared library as `flag'. The flag tracks whether globals have
// already been registered or not for this image.
void __asan_register_image_globals(uptr *flag) {
if (*flag)
return;
AsanApplyToGlobals(__asan_register_globals, flag);
*flag = 1;
}
// This mirrors __asan_register_image_globals.
void __asan_unregister_image_globals(uptr *flag) {
if (!*flag)
return;
AsanApplyToGlobals(__asan_unregister_globals, flag);
*flag = 0;
}
// Register an array of globals.
void __asan_register_globals(__asan_global *globals, uptr n) {
if (!flags()->report_globals) return;
......
......@@ -17,16 +17,20 @@ extern "C" {
// Every time the ASan ABI changes we also change the version number in the
// __asan_init function name. Objects built with incompatible ASan ABI
// versions will not link with run-time.
//
// Changes between ABI versions:
// v1=>v2: added 'module_name' to __asan_global
// v2=>v3: stack frame description (created by the compiler)
// contains the function PC as the 3-rd field (see
// DescribeAddressIfStack).
// v3=>v4: added '__asan_global_source_location' to __asan_global.
// contains the function PC as the 3rd field (see
// DescribeAddressIfStack)
// v3=>v4: added '__asan_global_source_location' to __asan_global
// v4=>v5: changed the semantics and format of __asan_stack_malloc_ and
// __asan_stack_free_ functions.
// __asan_stack_free_ functions
// v5=>v6: changed the name of the version check symbol
#define __asan_version_mismatch_check __asan_version_mismatch_check_v6
// v6=>v7: added 'odr_indicator' to __asan_global
// v7=>v8: added '__asan_(un)register_image_globals' functions for dead
// stripping support on Mach-O platforms
#define __asan_version_mismatch_check __asan_version_mismatch_check_v8
}
#endif // ASAN_INIT_VERSION_H
......@@ -19,6 +19,7 @@
#include "asan_stack.h"
#include "asan_stats.h"
#include "asan_suppressions.h"
#include "lsan/lsan_common.h"
#include "sanitizer_common/sanitizer_libc.h"
#if SANITIZER_POSIX
......@@ -108,7 +109,7 @@ static inline bool RangesOverlap(const char *offset1, uptr length1,
} while (0)
static inline uptr MaybeRealStrnlen(const char *s, uptr maxlen) {
#if ASAN_INTERCEPT_STRNLEN
#if SANITIZER_INTERCEPT_STRNLEN
if (REAL(strnlen)) {
return REAL(strnlen)(s, maxlen);
}
......@@ -141,6 +142,8 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
(void) ctx; \
#define COMMON_INTERCEPT_FUNCTION(name) ASAN_INTERCEPT_FUNC(name)
#define COMMON_INTERCEPT_FUNCTION_VER(name, ver) \
ASAN_INTERCEPT_FUNC_VER(name, ver)
#define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
ASAN_WRITE_RANGE(ctx, ptr, size)
#define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \
......@@ -176,7 +179,7 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
} while (false)
#define COMMON_INTERCEPTOR_BLOCK_REAL(name) REAL(name)
// Strict init-order checking is dlopen-hostile:
// https://code.google.com/p/address-sanitizer/issues/detail?id=178
// https://github.com/google/sanitizers/issues/178
#define COMMON_INTERCEPTOR_ON_DLOPEN(filename, flag) \
if (flags()->strict_init_order) { \
StopInitOrderChecking(); \
......@@ -193,6 +196,10 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
} else { \
*begin = *end = 0; \
}
// Asan needs custom handling of these:
#undef SANITIZER_INTERCEPT_MEMSET
#undef SANITIZER_INTERCEPT_MEMMOVE
#undef SANITIZER_INTERCEPT_MEMCPY
#include "sanitizer_common/sanitizer_common_interceptors.inc"
// Syscall interceptors don't have contexts, we don't support suppressions
......@@ -216,6 +223,7 @@ struct ThreadStartParam {
atomic_uintptr_t is_registered;
};
#if ASAN_INTERCEPT_PTHREAD_CREATE
static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) {
ThreadStartParam *param = reinterpret_cast<ThreadStartParam *>(arg);
AsanThread *t = nullptr;
......@@ -226,7 +234,6 @@ static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) {
return t->ThreadStart(GetTid(), &param->is_registered);
}
#if ASAN_INTERCEPT_PTHREAD_CREATE
INTERCEPTOR(int, pthread_create, void *thread,
void *attr, void *(*start_routine)(void*), void *arg) {
EnsureMainThreadIDIsCorrect();
......@@ -240,7 +247,17 @@ INTERCEPTOR(int, pthread_create, void *thread,
ThreadStartParam param;
atomic_store(&param.t, 0, memory_order_relaxed);
atomic_store(&param.is_registered, 0, memory_order_relaxed);
int result = REAL(pthread_create)(thread, attr, asan_thread_start, &param);
int result;
{
// Ignore all allocations made by pthread_create: thread stack/TLS may be
// stored by pthread for future reuse even after thread destruction, and
// the linked list it's stored in doesn't even hold valid pointers to the
// objects, the latter are calculated by obscure pointer arithmetic.
#if CAN_SANITIZE_LEAKS
__lsan::ScopedInterceptorDisabler disabler;
#endif
result = REAL(pthread_create)(thread, attr, asan_thread_start, &param);
}
if (result == 0) {
u32 current_tid = GetCurrentTidOrInvalid();
AsanThread *t =
......@@ -269,7 +286,8 @@ DEFINE_REAL_PTHREAD_FUNCTIONS
#if SANITIZER_ANDROID
INTERCEPTOR(void*, bsd_signal, int signum, void *handler) {
if (!IsDeadlySignal(signum) || common_flags()->allow_user_segv_handler) {
if (!IsHandledDeadlySignal(signum) ||
common_flags()->allow_user_segv_handler) {
return REAL(bsd_signal)(signum, handler);
}
return 0;
......@@ -277,7 +295,8 @@ INTERCEPTOR(void*, bsd_signal, int signum, void *handler) {
#endif
INTERCEPTOR(void*, signal, int signum, void *handler) {
if (!IsDeadlySignal(signum) || common_flags()->allow_user_segv_handler) {
if (!IsHandledDeadlySignal(signum) ||
common_flags()->allow_user_segv_handler) {
return REAL(signal)(signum, handler);
}
return nullptr;
......@@ -285,7 +304,8 @@ INTERCEPTOR(void*, signal, int signum, void *handler) {
INTERCEPTOR(int, sigaction, int signum, const struct sigaction *act,
struct sigaction *oldact) {
if (!IsDeadlySignal(signum) || common_flags()->allow_user_segv_handler) {
if (!IsHandledDeadlySignal(signum) ||
common_flags()->allow_user_segv_handler) {
return REAL(sigaction)(signum, act, oldact);
}
return 0;
......@@ -451,25 +471,6 @@ INTERCEPTOR(void*, memset, void *block, int c, uptr size) {
ASAN_MEMSET_IMPL(ctx, block, c, size);
}
INTERCEPTOR(char*, strchr, const char *str, int c) {
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, strchr);
if (UNLIKELY(!asan_inited)) return internal_strchr(str, c);
// strchr is called inside create_purgeable_zone() when MallocGuardEdges=1 is
// used.
if (asan_init_is_running) {
return REAL(strchr)(str, c);
}
ENSURE_ASAN_INITED();
char *result = REAL(strchr)(str, c);
if (flags()->replace_str) {
uptr len = REAL(strlen)(str);
uptr bytes_read = (result ? result - str : len) + 1;
ASAN_READ_STRING_OF_LEN(ctx, str, len, bytes_read);
}
return result;
}
#if ASAN_INTERCEPT_INDEX
# if ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX
INTERCEPTOR(char*, index, const char *string, int c)
......@@ -547,7 +548,6 @@ INTERCEPTOR(char*, strcpy, char *to, const char *from) { // NOLINT
return REAL(strcpy)(to, from); // NOLINT
}
#if ASAN_INTERCEPT_STRDUP
INTERCEPTOR(char*, strdup, const char *s) {
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, strdup);
......@@ -562,29 +562,28 @@ INTERCEPTOR(char*, strdup, const char *s) {
REAL(memcpy)(new_mem, s, length + 1);
return reinterpret_cast<char*>(new_mem);
}
#endif
INTERCEPTOR(SIZE_T, strlen, const char *s) {
#if ASAN_INTERCEPT___STRDUP
INTERCEPTOR(char*, __strdup, const char *s) {
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, strlen);
if (UNLIKELY(!asan_inited)) return internal_strlen(s);
// strlen is called from malloc_default_purgeable_zone()
// in __asan::ReplaceSystemAlloc() on Mac.
if (asan_init_is_running) {
return REAL(strlen)(s);
}
ASAN_INTERCEPTOR_ENTER(ctx, strdup);
if (UNLIKELY(!asan_inited)) return internal_strdup(s);
ENSURE_ASAN_INITED();
SIZE_T length = REAL(strlen)(s);
uptr length = REAL(strlen)(s);
if (flags()->replace_str) {
ASAN_READ_RANGE(ctx, s, length + 1);
}
return length;
GET_STACK_TRACE_MALLOC;
void *new_mem = asan_malloc(length + 1, &stack);
REAL(memcpy)(new_mem, s, length + 1);
return reinterpret_cast<char*>(new_mem);
}
#endif // ASAN_INTERCEPT___STRDUP
INTERCEPTOR(SIZE_T, wcslen, const wchar_t *s) {
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, wcslen);
SIZE_T length = REAL(wcslen)(s);
SIZE_T length = internal_wcslen(s);
if (!asan_init_is_running) {
ENSURE_ASAN_INITED();
ASAN_READ_RANGE(ctx, s, (length + 1) * sizeof(wchar_t));
......@@ -605,19 +604,6 @@ INTERCEPTOR(char*, strncpy, char *to, const char *from, uptr size) {
return REAL(strncpy)(to, from, size);
}
#if ASAN_INTERCEPT_STRNLEN
INTERCEPTOR(uptr, strnlen, const char *s, uptr maxlen) {
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, strnlen);
ENSURE_ASAN_INITED();
uptr length = REAL(strnlen)(s, maxlen);
if (flags()->replace_str) {
ASAN_READ_RANGE(ctx, s, Min(length + 1, maxlen));
}
return length;
}
#endif // ASAN_INTERCEPT_STRNLEN
INTERCEPTOR(long, strtol, const char *nptr, // NOLINT
char **endptr, int base) {
void *ctx;
......@@ -700,12 +686,12 @@ INTERCEPTOR(long long, atoll, const char *nptr) { // NOLINT
}
#endif // ASAN_INTERCEPT_ATOLL_AND_STRTOLL
#if ASAN_INTERCEPT___CXA_ATEXIT
static void AtCxaAtexit(void *unused) {
(void)unused;
StopInitOrderChecking();
}
#if ASAN_INTERCEPT___CXA_ATEXIT
INTERCEPTOR(int, __cxa_atexit, void (*func)(void *), void *arg,
void *dso_handle) {
#if SANITIZER_MAC
......@@ -732,7 +718,7 @@ INTERCEPTOR(int, fork, void) {
namespace __asan {
void InitializeAsanInterceptors() {
static bool was_called_once;
CHECK(was_called_once == false);
CHECK(!was_called_once);
was_called_once = true;
InitializeCommonInterceptors();
......@@ -740,22 +726,22 @@ void InitializeAsanInterceptors() {
ASAN_INTERCEPT_FUNC(memmove);
ASAN_INTERCEPT_FUNC(memset);
if (PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE) {
// In asan, REAL(memmove) is not used, but it is used in msan.
ASAN_INTERCEPT_FUNC(memcpy);
} else {
ASSIGN_REAL(memcpy, memmove);
}
CHECK(REAL(memcpy));
// Intercept str* functions.
ASAN_INTERCEPT_FUNC(strcat); // NOLINT
ASAN_INTERCEPT_FUNC(strchr);
ASAN_INTERCEPT_FUNC(strcpy); // NOLINT
ASAN_INTERCEPT_FUNC(strlen);
ASAN_INTERCEPT_FUNC(wcslen);
ASAN_INTERCEPT_FUNC(strncat);
ASAN_INTERCEPT_FUNC(strncpy);
#if ASAN_INTERCEPT_STRDUP
ASAN_INTERCEPT_FUNC(strdup);
#endif
#if ASAN_INTERCEPT_STRNLEN
ASAN_INTERCEPT_FUNC(strnlen);
#if ASAN_INTERCEPT___STRDUP
ASAN_INTERCEPT_FUNC(__strdup);
#endif
#if ASAN_INTERCEPT_INDEX && ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX
ASAN_INTERCEPT_FUNC(index);
......
......@@ -21,14 +21,12 @@
#if !SANITIZER_WINDOWS
# define ASAN_INTERCEPT_ATOLL_AND_STRTOLL 1
# define ASAN_INTERCEPT__LONGJMP 1
# define ASAN_INTERCEPT_STRDUP 1
# define ASAN_INTERCEPT_INDEX 1
# define ASAN_INTERCEPT_PTHREAD_CREATE 1
# define ASAN_INTERCEPT_FORK 1
#else
# define ASAN_INTERCEPT_ATOLL_AND_STRTOLL 0
# define ASAN_INTERCEPT__LONGJMP 0
# define ASAN_INTERCEPT_STRDUP 0
# define ASAN_INTERCEPT_INDEX 0
# define ASAN_INTERCEPT_PTHREAD_CREATE 0
# define ASAN_INTERCEPT_FORK 0
......@@ -40,12 +38,6 @@
# define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 0
#endif
#if !SANITIZER_MAC
# define ASAN_INTERCEPT_STRNLEN 1
#else
# define ASAN_INTERCEPT_STRNLEN 0
#endif
#if SANITIZER_LINUX && !SANITIZER_ANDROID
# define ASAN_INTERCEPT_SWAPCONTEXT 1
#else
......@@ -78,6 +70,12 @@
# define ASAN_INTERCEPT___CXA_ATEXIT 0
#endif
#if SANITIZER_LINUX && !SANITIZER_ANDROID
# define ASAN_INTERCEPT___STRDUP 1
#else
# define ASAN_INTERCEPT___STRDUP 0
#endif
DECLARE_REAL(int, memcmp, const void *a1, const void *a2, uptr size)
DECLARE_REAL(void*, memcpy, void *to, const void *from, uptr size)
DECLARE_REAL(void*, memset, void *block, int c, uptr size)
......
......@@ -21,6 +21,8 @@
#include "asan_init_version.h"
using __sanitizer::uptr;
using __sanitizer::u64;
using __sanitizer::u32;
extern "C" {
// This function should be called at the very beginning of the process,
......@@ -52,8 +54,17 @@ extern "C" {
uptr has_dynamic_init; // Non-zero if the global has dynamic initializer.
__asan_global_source_location *location; // Source location of a global,
// or NULL if it is unknown.
uptr odr_indicator; // The address of the ODR indicator symbol.
};
// These functions can be called on some platforms to find globals in the same
// loaded image as `flag' and apply __asan_(un)register_globals to them,
// filtering out redundant calls.
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_register_image_globals(uptr *flag);
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_unregister_image_globals(uptr *flag);
// These two functions should be called by the instrumented code.
// 'globals' is an array of structures describing 'n' globals.
SANITIZER_INTERFACE_ATTRIBUTE
......@@ -68,6 +79,20 @@ extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_after_dynamic_init();
// Sets bytes of the given range of the shadow memory into specific value.
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_set_shadow_00(uptr addr, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_set_shadow_f1(uptr addr, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_set_shadow_f2(uptr addr, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_set_shadow_f3(uptr addr, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_set_shadow_f5(uptr addr, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_set_shadow_f8(uptr addr, uptr size);
// These two functions are used by instrumented code in the
// use-after-scope mode. They mark memory for local variables as
// unaddressable when they leave scope and addressable before the
......@@ -145,6 +170,9 @@ extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
/* OPTIONAL */ const char* __asan_default_options();
SANITIZER_INTERFACE_ATTRIBUTE
extern uptr __asan_shadow_memory_dynamic_address;
// Global flag, copy of ASAN_OPTIONS=detect_stack_use_after_return
SANITIZER_INTERFACE_ATTRIBUTE
extern int __asan_option_detect_stack_use_after_return;
......
......@@ -34,9 +34,9 @@
// If set, values like allocator chunk size, as well as defaults for some flags
// will be changed towards less memory overhead.
#ifndef ASAN_LOW_MEMORY
#if SANITIZER_WORDSIZE == 32
# if SANITIZER_IOS || (SANITIZER_WORDSIZE == 32)
# define ASAN_LOW_MEMORY 1
#else
# else
# define ASAN_LOW_MEMORY 0
# endif
#endif
......@@ -60,6 +60,12 @@ using __sanitizer::StackTrace;
void AsanInitFromRtl();
// asan_win.cc
void InitializePlatformExceptionHandlers();
// asan_win.cc / asan_posix.cc
const char *DescribeSignalOrException(int signo);
// asan_rtl.cc
void NORETURN ShowStatsAndAbort();
......@@ -71,10 +77,15 @@ void *AsanDoesNotSupportStaticLinkage();
void AsanCheckDynamicRTPrereqs();
void AsanCheckIncompatibleRT();
// Support function for __asan_(un)register_image_globals. Searches for the
// loaded image containing `needle' and then enumerates all global metadata
// structures declared in that image, applying `op' (e.g.,
// __asan_(un)register_globals) to them.
typedef void (*globals_op_fptr)(__asan_global *, uptr);
void AsanApplyToGlobals(globals_op_fptr op, const void *needle);
void AsanOnDeadlySignal(int, void *siginfo, void *context);
void DisableReexec();
void MaybeReexec();
void ReadContextStack(void *context, uptr *stack, uptr *ssize);
void StopInitOrderChecking();
......@@ -95,16 +106,24 @@ void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name);
bool PlatformHasDifferentMemcpyAndMemmove();
# define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE \
(PlatformHasDifferentMemcpyAndMemmove())
#elif SANITIZER_WINDOWS64
# define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE false
#else
# define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE true
#endif // SANITIZER_MAC
// Add convenient macro for interface functions that may be represented as
// weak hooks.
#define ASAN_MALLOC_HOOK(ptr, size) \
if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(ptr, size)
#define ASAN_FREE_HOOK(ptr) \
if (&__sanitizer_free_hook) __sanitizer_free_hook(ptr)
#define ASAN_MALLOC_HOOK(ptr, size) \
do { \
if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(ptr, size); \
RunMallocHooks(ptr, size); \
} while (false)
#define ASAN_FREE_HOOK(ptr) \
do { \
if (&__sanitizer_free_hook) __sanitizer_free_hook(ptr); \
RunFreeHooks(ptr); \
} while (false)
#define ASAN_ON_ERROR() \
if (&__asan_on_error) __asan_on_error()
......@@ -112,15 +131,12 @@ extern int asan_inited;
// Used to avoid infinite recursion in __asan_init().
extern bool asan_init_is_running;
extern void (*death_callback)(void);
// These magic values are written to shadow for better error reporting.
const int kAsanHeapLeftRedzoneMagic = 0xfa;
const int kAsanHeapRightRedzoneMagic = 0xfb;
const int kAsanHeapFreeMagic = 0xfd;
const int kAsanStackLeftRedzoneMagic = 0xf1;
const int kAsanStackMidRedzoneMagic = 0xf2;
const int kAsanStackRightRedzoneMagic = 0xf3;
const int kAsanStackPartialRedzoneMagic = 0xf4;
const int kAsanStackAfterReturnMagic = 0xf5;
const int kAsanInitializationOrderMagic = 0xf6;
const int kAsanUserPoisonedMemoryMagic = 0xf7;
......
......@@ -67,20 +67,17 @@ asan_rt_version_t __asan_rt_version;
namespace __asan {
void InitializePlatformInterceptors() {}
void DisableReexec() {
// No need to re-exec on Linux.
}
void MaybeReexec() {
// No need to re-exec on Linux.
}
void InitializePlatformExceptionHandlers() {}
void *AsanDoesNotSupportStaticLinkage() {
// This will fail to link with -static.
return &_DYNAMIC; // defined in link.h
}
void AsanApplyToGlobals(globals_op_fptr op, const void *needle) {
UNIMPLEMENTED();
}
#if SANITIZER_ANDROID
// FIXME: should we do anything for Android?
void AsanCheckDynamicRTPrereqs() {}
......
......@@ -22,18 +22,11 @@
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_mac.h"
#if !SANITIZER_IOS
#include <crt_externs.h> // for _NSGetArgv and _NSGetEnviron
#else
extern "C" {
extern char ***_NSGetArgv(void);
}
#endif
#include <dlfcn.h> // for dladdr()
#include <dlfcn.h>
#include <fcntl.h>
#include <libkern/OSAtomic.h>
#include <mach-o/dyld.h>
#include <mach-o/getsect.h>
#include <mach-o/loader.h>
#include <pthread.h>
#include <stdlib.h> // for free()
......@@ -43,193 +36,26 @@ extern "C" {
#include <sys/ucontext.h>
#include <unistd.h>
// from <crt_externs.h>, but we don't have that file on iOS
extern "C" {
extern char ***_NSGetArgv(void);
extern char ***_NSGetEnviron(void);
}
namespace __asan {
void InitializePlatformInterceptors() {}
void InitializePlatformExceptionHandlers() {}
bool PlatformHasDifferentMemcpyAndMemmove() {
// On OS X 10.7 memcpy() and memmove() are both resolved
// into memmove$VARIANT$sse42.
// See also http://code.google.com/p/address-sanitizer/issues/detail?id=34.
// See also https://github.com/google/sanitizers/issues/34.
// TODO(glider): need to check dynamically that memcpy() and memmove() are
// actually the same function.
return GetMacosVersion() == MACOS_VERSION_SNOW_LEOPARD;
}
extern "C"
void __asan_init();
static const char kDyldInsertLibraries[] = "DYLD_INSERT_LIBRARIES";
LowLevelAllocator allocator_for_env;
// Change the value of the env var |name|, leaking the original value.
// If |name_value| is NULL, the variable is deleted from the environment,
// otherwise the corresponding "NAME=value" string is replaced with
// |name_value|.
void LeakyResetEnv(const char *name, const char *name_value) {
char **env = GetEnviron();
uptr name_len = internal_strlen(name);
while (*env != 0) {
uptr len = internal_strlen(*env);
if (len > name_len) {
const char *p = *env;
if (!internal_memcmp(p, name, name_len) && p[name_len] == '=') {
// Match.
if (name_value) {
// Replace the old value with the new one.
*env = const_cast<char*>(name_value);
} else {
// Shift the subsequent pointers back.
char **del = env;
do {
del[0] = del[1];
} while (*del++);
}
}
}
env++;
}
}
static bool reexec_disabled = false;
void DisableReexec() {
reexec_disabled = true;
}
extern "C" SANITIZER_WEAK_ATTRIBUTE double dyldVersionNumber;
static const double kMinDyldVersionWithAutoInterposition = 360.0;
bool DyldNeedsEnvVariable() {
// Although sanitizer support was added to LLVM on OS X 10.7+, GCC users
// still may want use them on older systems. On older Darwin platforms, dyld
// doesn't export dyldVersionNumber symbol and we simply return true.
if (!&dyldVersionNumber) return true;
// If running on OS X 10.11+ or iOS 9.0+, dyld will interpose even if
// DYLD_INSERT_LIBRARIES is not set. However, checking OS version via
// GetMacosVersion() doesn't work for the simulator. Let's instead check
// `dyldVersionNumber`, which is exported by dyld, against a known version
// number from the first OS release where this appeared.
return dyldVersionNumber < kMinDyldVersionWithAutoInterposition;
}
void MaybeReexec() {
if (reexec_disabled) return;
// Make sure the dynamic ASan runtime library is preloaded so that the
// wrappers work. If it is not, set DYLD_INSERT_LIBRARIES and re-exec
// ourselves.
Dl_info info;
CHECK(dladdr((void*)((uptr)__asan_init), &info));
char *dyld_insert_libraries =
const_cast<char*>(GetEnv(kDyldInsertLibraries));
uptr old_env_len = dyld_insert_libraries ?
internal_strlen(dyld_insert_libraries) : 0;
uptr fname_len = internal_strlen(info.dli_fname);
const char *dylib_name = StripModuleName(info.dli_fname);
uptr dylib_name_len = internal_strlen(dylib_name);
bool lib_is_in_env =
dyld_insert_libraries && REAL(strstr)(dyld_insert_libraries, dylib_name);
if (DyldNeedsEnvVariable() && !lib_is_in_env) {
// DYLD_INSERT_LIBRARIES is not set or does not contain the runtime
// library.
char program_name[1024];
uint32_t buf_size = sizeof(program_name);
_NSGetExecutablePath(program_name, &buf_size);
char *new_env = const_cast<char*>(info.dli_fname);
if (dyld_insert_libraries) {
// Append the runtime dylib name to the existing value of
// DYLD_INSERT_LIBRARIES.
new_env = (char*)allocator_for_env.Allocate(old_env_len + fname_len + 2);
internal_strncpy(new_env, dyld_insert_libraries, old_env_len);
new_env[old_env_len] = ':';
// Copy fname_len and add a trailing zero.
internal_strncpy(new_env + old_env_len + 1, info.dli_fname,
fname_len + 1);
// Ok to use setenv() since the wrappers don't depend on the value of
// asan_inited.
setenv(kDyldInsertLibraries, new_env, /*overwrite*/1);
} else {
// Set DYLD_INSERT_LIBRARIES equal to the runtime dylib name.
setenv(kDyldInsertLibraries, info.dli_fname, /*overwrite*/0);
}
VReport(1, "exec()-ing the program with\n");
VReport(1, "%s=%s\n", kDyldInsertLibraries, new_env);
VReport(1, "to enable ASan wrappers.\n");
execv(program_name, *_NSGetArgv());
// We get here only if execv() failed.
Report("ERROR: The process is launched without DYLD_INSERT_LIBRARIES, "
"which is required for ASan to work. ASan tried to set the "
"environment variable and re-execute itself, but execv() failed, "
"possibly because of sandbox restrictions. Make sure to launch the "
"executable with:\n%s=%s\n", kDyldInsertLibraries, new_env);
CHECK("execv failed" && 0);
}
if (!lib_is_in_env)
return;
// DYLD_INSERT_LIBRARIES is set and contains the runtime library. Let's remove
// the dylib from the environment variable, because interceptors are installed
// and we don't want our children to inherit the variable.
uptr env_name_len = internal_strlen(kDyldInsertLibraries);
// Allocate memory to hold the previous env var name, its value, the '='
// sign and the '\0' char.
char *new_env = (char*)allocator_for_env.Allocate(
old_env_len + 2 + env_name_len);
CHECK(new_env);
internal_memset(new_env, '\0', old_env_len + 2 + env_name_len);
internal_strncpy(new_env, kDyldInsertLibraries, env_name_len);
new_env[env_name_len] = '=';
char *new_env_pos = new_env + env_name_len + 1;
// Iterate over colon-separated pieces of |dyld_insert_libraries|.
char *piece_start = dyld_insert_libraries;
char *piece_end = NULL;
char *old_env_end = dyld_insert_libraries + old_env_len;
do {
if (piece_start[0] == ':') piece_start++;
piece_end = REAL(strchr)(piece_start, ':');
if (!piece_end) piece_end = dyld_insert_libraries + old_env_len;
if ((uptr)(piece_start - dyld_insert_libraries) > old_env_len) break;
uptr piece_len = piece_end - piece_start;
char *filename_start =
(char *)internal_memrchr(piece_start, '/', piece_len);
uptr filename_len = piece_len;
if (filename_start) {
filename_start += 1;
filename_len = piece_len - (filename_start - piece_start);
} else {
filename_start = piece_start;
}
// If the current piece isn't the runtime library name,
// append it to new_env.
if ((dylib_name_len != filename_len) ||
(internal_memcmp(filename_start, dylib_name, dylib_name_len) != 0)) {
if (new_env_pos != new_env + env_name_len + 1) {
new_env_pos[0] = ':';
new_env_pos++;
}
internal_strncpy(new_env_pos, piece_start, piece_len);
new_env_pos += piece_len;
}
// Move on to the next piece.
piece_start = piece_end;
} while (piece_start < old_env_end);
// Can't use setenv() here, because it requires the allocator to be
// initialized.
// FIXME: instead of filtering DYLD_INSERT_LIBRARIES here, do it in
// a separate function called after InitializeAllocator().
if (new_env_pos == new_env + env_name_len + 1) new_env = NULL;
LeakyResetEnv(kDyldInsertLibraries, new_env);
}
// No-op. Mac does not support static linkage anyway.
void *AsanDoesNotSupportStaticLinkage() {
return 0;
......@@ -241,6 +67,30 @@ void AsanCheckDynamicRTPrereqs() {}
// No-op. Mac does not support static linkage anyway.
void AsanCheckIncompatibleRT() {}
void AsanApplyToGlobals(globals_op_fptr op, const void *needle) {
// Find the Mach-O header for the image containing the needle
Dl_info info;
int err = dladdr(needle, &info);
if (err == 0) return;
#if __LP64__
const struct mach_header_64 *mh = (struct mach_header_64 *)info.dli_fbase;
#else
const struct mach_header *mh = (struct mach_header *)info.dli_fbase;
#endif
// Look up the __asan_globals section in that image and register its globals
unsigned long size = 0;
__asan_global *globals = (__asan_global *)getsectiondata(
mh,
"__DATA", "__asan_globals",
&size);
if (!globals) return;
if (size % sizeof(__asan_global) != 0) return;
op(globals, size / sizeof(__asan_global));
}
void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
UNIMPLEMENTED();
}
......
......@@ -76,7 +76,13 @@ INTERCEPTOR(void*, realloc, void *ptr, uptr size) {
if (UNLIKELY(IsInDlsymAllocPool(ptr))) {
uptr offset = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
uptr copy_size = Min(size, kDlsymAllocPoolSize - offset);
void *new_ptr = asan_malloc(size, &stack);
void *new_ptr;
if (UNLIKELY(!asan_inited)) {
new_ptr = AllocateFromLocalPool(size);
} else {
copy_size = size;
new_ptr = asan_malloc(copy_size, &stack);
}
internal_memcpy(new_ptr, ptr, copy_size);
return new_ptr;
}
......@@ -96,7 +102,7 @@ INTERCEPTOR(void*, aligned_alloc, uptr boundary, uptr size) {
INTERCEPTOR(void*, __libc_memalign, uptr boundary, uptr size) {
GET_STACK_TRACE_MALLOC;
void *res = asan_memalign(boundary, size, &stack, FROM_MALLOC);
DTLS_on_libc_memalign(res, size * boundary);
DTLS_on_libc_memalign(res, size);
return res;
}
......
......@@ -52,10 +52,6 @@ using namespace __asan;
#define COMMON_MALLOC_REPORT_UNKNOWN_REALLOC(ptr, zone_ptr, zone_name) \
GET_STACK_TRACE_FREE; \
ReportMacMzReallocUnknown((uptr)ptr, (uptr)zone_ptr, zone_name, &stack);
#define COMMON_MALLOC_IGNORE_INVALID_FREE flags()->mac_ignore_invalid_free
#define COMMON_MALLOC_REPORT_FREE_UNALLOCATED(ptr, zone_ptr, zone_name) \
GET_STACK_TRACE_FREE; \
WarnMacFreeUnallocated((uptr)ptr, (uptr)zone_ptr, zone_name, &stack);
#define COMMON_MALLOC_NAMESPACE __asan
#include "sanitizer_common/sanitizer_malloc_mac.inc"
......
......@@ -12,6 +12,8 @@
#include "sanitizer_common/sanitizer_platform.h"
#if SANITIZER_WINDOWS
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#include "asan_allocator.h"
#include "asan_interceptors.h"
......@@ -47,6 +49,11 @@ void _free_dbg(void *ptr, int) {
}
ALLOCATION_FUNCTION_ATTRIBUTE
void _free_base(void *ptr) {
free(ptr);
}
ALLOCATION_FUNCTION_ATTRIBUTE
void cfree(void *ptr) {
CHECK(!"cfree() should not be used on Windows");
}
......@@ -58,6 +65,11 @@ void *malloc(size_t size) {
}
ALLOCATION_FUNCTION_ATTRIBUTE
void *_malloc_base(size_t size) {
return malloc(size);
}
ALLOCATION_FUNCTION_ATTRIBUTE
void *_malloc_dbg(size_t size, int, const char *, int) {
return malloc(size);
}
......@@ -69,6 +81,11 @@ void *calloc(size_t nmemb, size_t size) {
}
ALLOCATION_FUNCTION_ATTRIBUTE
void *_calloc_base(size_t nmemb, size_t size) {
return calloc(nmemb, size);
}
ALLOCATION_FUNCTION_ATTRIBUTE
void *_calloc_dbg(size_t nmemb, size_t size, int, const char *, int) {
return calloc(nmemb, size);
}
......@@ -91,6 +108,11 @@ void *_realloc_dbg(void *ptr, size_t size, int) {
}
ALLOCATION_FUNCTION_ATTRIBUTE
void *_realloc_base(void *ptr, size_t size) {
return realloc(ptr, size);
}
ALLOCATION_FUNCTION_ATTRIBUTE
void *_recalloc(void *p, size_t n, size_t elem_size) {
if (!p)
return calloc(n, elem_size);
......@@ -101,7 +123,12 @@ void *_recalloc(void *p, size_t n, size_t elem_size) {
}
ALLOCATION_FUNCTION_ATTRIBUTE
size_t _msize(void *ptr) {
void *_recalloc_base(void *p, size_t n, size_t elem_size) {
return _recalloc(p, n, elem_size);
}
ALLOCATION_FUNCTION_ATTRIBUTE
size_t _msize(const void *ptr) {
GET_CURRENT_PC_BP_SP;
(void)sp;
return asan_malloc_usable_size(ptr, pc, bp);
......@@ -137,38 +164,90 @@ int _CrtSetReportMode(int, int) {
}
} // extern "C"
INTERCEPTOR_WINAPI(LPVOID, HeapAlloc, HANDLE hHeap, DWORD dwFlags,
SIZE_T dwBytes) {
GET_STACK_TRACE_MALLOC;
void *p = asan_malloc(dwBytes, &stack);
// Reading MSDN suggests that the *entire* usable allocation is zeroed out.
// Otherwise it is difficult to HeapReAlloc with HEAP_ZERO_MEMORY.
// https://blogs.msdn.microsoft.com/oldnewthing/20120316-00/?p=8083
if (dwFlags == HEAP_ZERO_MEMORY)
internal_memset(p, 0, asan_mz_size(p));
else
CHECK(dwFlags == 0 && "unsupported heap flags");
return p;
}
INTERCEPTOR_WINAPI(BOOL, HeapFree, HANDLE hHeap, DWORD dwFlags, LPVOID lpMem) {
CHECK(dwFlags == 0 && "unsupported heap flags");
GET_STACK_TRACE_FREE;
asan_free(lpMem, &stack, FROM_MALLOC);
return true;
}
INTERCEPTOR_WINAPI(LPVOID, HeapReAlloc, HANDLE hHeap, DWORD dwFlags,
LPVOID lpMem, SIZE_T dwBytes) {
GET_STACK_TRACE_MALLOC;
// Realloc should never reallocate in place.
if (dwFlags & HEAP_REALLOC_IN_PLACE_ONLY)
return nullptr;
CHECK(dwFlags == 0 && "unsupported heap flags");
return asan_realloc(lpMem, dwBytes, &stack);
}
INTERCEPTOR_WINAPI(SIZE_T, HeapSize, HANDLE hHeap, DWORD dwFlags,
LPCVOID lpMem) {
CHECK(dwFlags == 0 && "unsupported heap flags");
GET_CURRENT_PC_BP_SP;
(void)sp;
return asan_malloc_usable_size(lpMem, pc, bp);
}
namespace __asan {
static void TryToOverrideFunction(const char *fname, uptr new_func) {
// Failure here is not fatal. The CRT may not be present, and different CRT
// versions use different symbols.
if (!__interception::OverrideFunction(fname, new_func))
VPrintf(2, "Failed to override function %s\n", fname);
}
void ReplaceSystemMalloc() {
#if defined(ASAN_DYNAMIC)
// We don't check the result because CRT might not be used in the process.
__interception::OverrideFunction("free", (uptr)free);
__interception::OverrideFunction("malloc", (uptr)malloc);
__interception::OverrideFunction("_malloc_crt", (uptr)malloc);
__interception::OverrideFunction("calloc", (uptr)calloc);
__interception::OverrideFunction("_calloc_crt", (uptr)calloc);
__interception::OverrideFunction("realloc", (uptr)realloc);
__interception::OverrideFunction("_realloc_crt", (uptr)realloc);
__interception::OverrideFunction("_recalloc", (uptr)_recalloc);
__interception::OverrideFunction("_recalloc_crt", (uptr)_recalloc);
__interception::OverrideFunction("_msize", (uptr)_msize);
__interception::OverrideFunction("_expand", (uptr)_expand);
// Override different versions of 'operator new' and 'operator delete'.
// No need to override the nothrow versions as they just wrap the throw
// versions.
// FIXME: Unfortunately, MSVC miscompiles the statements that take the
// addresses of the array versions of these operators,
// see https://connect.microsoft.com/VisualStudio/feedbackdetail/view/946992
// We might want to try to work around this by [inline] assembly or compiling
// parts of the RTL with Clang.
void *(*op_new)(size_t sz) = operator new;
void (*op_delete)(void *p) = operator delete;
void *(*op_array_new)(size_t sz) = operator new[];
void (*op_array_delete)(void *p) = operator delete[];
__interception::OverrideFunction("??2@YAPAXI@Z", (uptr)op_new);
__interception::OverrideFunction("??3@YAXPAX@Z", (uptr)op_delete);
__interception::OverrideFunction("??_U@YAPAXI@Z", (uptr)op_array_new);
__interception::OverrideFunction("??_V@YAXPAX@Z", (uptr)op_array_delete);
TryToOverrideFunction("free", (uptr)free);
TryToOverrideFunction("_free_base", (uptr)free);
TryToOverrideFunction("malloc", (uptr)malloc);
TryToOverrideFunction("_malloc_base", (uptr)malloc);
TryToOverrideFunction("_malloc_crt", (uptr)malloc);
TryToOverrideFunction("calloc", (uptr)calloc);
TryToOverrideFunction("_calloc_base", (uptr)calloc);
TryToOverrideFunction("_calloc_crt", (uptr)calloc);
TryToOverrideFunction("realloc", (uptr)realloc);
TryToOverrideFunction("_realloc_base", (uptr)realloc);
TryToOverrideFunction("_realloc_crt", (uptr)realloc);
TryToOverrideFunction("_recalloc", (uptr)_recalloc);
TryToOverrideFunction("_recalloc_base", (uptr)_recalloc);
TryToOverrideFunction("_recalloc_crt", (uptr)_recalloc);
TryToOverrideFunction("_msize", (uptr)_msize);
TryToOverrideFunction("_expand", (uptr)_expand);
TryToOverrideFunction("_expand_base", (uptr)_expand);
// Recent versions of ucrtbase.dll appear to be built with PGO and LTCG, which
// enable cross-module inlining. This means our _malloc_base hook won't catch
// all CRT allocations. This code here patches the import table of
// ucrtbase.dll so that all attempts to use the lower-level win32 heap
// allocation API will be directed to ASan's heap. We don't currently
// intercept all calls to HeapAlloc. If we did, we would have to check on
// HeapFree whether the pointer came from ASan of from the system.
#define INTERCEPT_UCRT_FUNCTION(func) \
if (!INTERCEPT_FUNCTION_DLLIMPORT("ucrtbase.dll", \
"api-ms-win-core-heap-l1-1-0.dll", func)) \
VPrintf(2, "Failed to intercept ucrtbase.dll import %s\n", #func);
INTERCEPT_UCRT_FUNCTION(HeapAlloc);
INTERCEPT_UCRT_FUNCTION(HeapFree);
INTERCEPT_UCRT_FUNCTION(HeapReAlloc);
INTERCEPT_UCRT_FUNCTION(HeapSize);
#undef INTERCEPT_UCRT_FUNCTION
#endif
}
} // namespace __asan
......
......@@ -15,7 +15,7 @@
#include "asan_internal.h"
// The full explanation of the memory mapping could be found here:
// http://code.google.com/p/address-sanitizer/wiki/AddressSanitizerAlgorithm
// https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm
//
// Typical shadow mapping on Linux/x86_64 with SHADOW_OFFSET == 0x00007fff8000:
// || `[0x10007fff8000, 0x7fffffffffff]` || HighMem ||
......@@ -85,6 +85,20 @@
// || `[0x08000000000, 0x08fffffffff]` || lowshadow ||
// || `[0x00000000000, 0x07fffffffff]` || lowmem ||
//
// Default Linux/S390 mapping:
// || `[0x30000000, 0x7fffffff]` || HighMem ||
// || `[0x26000000, 0x2fffffff]` || HighShadow ||
// || `[0x24000000, 0x25ffffff]` || ShadowGap ||
// || `[0x20000000, 0x23ffffff]` || LowShadow ||
// || `[0x00000000, 0x1fffffff]` || LowMem ||
//
// Default Linux/SystemZ mapping:
// || `[0x14000000000000, 0x1fffffffffffff]` || HighMem ||
// || `[0x12800000000000, 0x13ffffffffffff]` || HighShadow ||
// || `[0x12000000000000, 0x127fffffffffff]` || ShadowGap ||
// || `[0x10000000000000, 0x11ffffffffffff]` || LowShadow ||
// || `[0x00000000000000, 0x0fffffffffffff]` || LowMem ||
//
// Shadow mapping on FreeBSD/x86-64 with SHADOW_OFFSET == 0x400000000000:
// || `[0x500000000000, 0x7fffffffffff]` || HighMem ||
// || `[0x4a0000000000, 0x4fffffffffff]` || HighShadow ||
......@@ -109,17 +123,19 @@
// || `[0x00000000, 0x2fffffff]` || LowMem ||
static const u64 kDefaultShadowScale = 3;
static const u64 kDefaultShadowSentinel = ~(uptr)0;
static const u64 kDefaultShadowOffset32 = 1ULL << 29; // 0x20000000
static const u64 kDefaultShadowOffset64 = 1ULL << 44;
static const u64 kDefaultShort64bitShadowOffset = 0x7FFF8000; // < 2G.
static const u64 kIosShadowOffset32 = 1ULL << 30; // 0x40000000
static const u64 kIosShadowOffset64 = 0x130000000;
static const u64 kIosShadowOffset64 = 0x120200000;
static const u64 kIosSimShadowOffset32 = 1ULL << 30;
static const u64 kIosSimShadowOffset64 = kDefaultShadowOffset64;
static const u64 kAArch64_ShadowOffset64 = 1ULL << 36;
static const u64 kMIPS32_ShadowOffset32 = 0x0aaa0000;
static const u64 kMIPS64_ShadowOffset64 = 1ULL << 37;
static const u64 kPPC64_ShadowOffset64 = 1ULL << 41;
static const u64 kSystemZ_ShadowOffset64 = 1ULL << 52;
static const u64 kFreeBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000
static const u64 kFreeBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000
static const u64 kWindowsShadowOffset32 = 3ULL << 28; // 0x30000000
......@@ -136,28 +152,36 @@ static const u64 kWindowsShadowOffset32 = 3ULL << 28; // 0x30000000
# define SHADOW_OFFSET kFreeBSD_ShadowOffset32
# elif SANITIZER_WINDOWS
# define SHADOW_OFFSET kWindowsShadowOffset32
# elif SANITIZER_IOSSIM
# define SHADOW_OFFSET kIosSimShadowOffset32
# elif SANITIZER_IOS
# define SHADOW_OFFSET kIosShadowOffset32
# if SANITIZER_IOSSIM
# define SHADOW_OFFSET kIosSimShadowOffset32
# else
# define SHADOW_OFFSET kIosShadowOffset32
# endif
# else
# define SHADOW_OFFSET kDefaultShadowOffset32
# endif
#else
# if defined(__aarch64__)
# if SANITIZER_IOS
# if SANITIZER_IOSSIM
# define SHADOW_OFFSET kIosSimShadowOffset64
# else
# define SHADOW_OFFSET __asan_shadow_memory_dynamic_address
# endif
# elif defined(__aarch64__)
# define SHADOW_OFFSET kAArch64_ShadowOffset64
# elif defined(__powerpc64__)
# define SHADOW_OFFSET kPPC64_ShadowOffset64
# elif defined(__s390x__)
# define SHADOW_OFFSET kSystemZ_ShadowOffset64
# elif SANITIZER_FREEBSD
# define SHADOW_OFFSET kFreeBSD_ShadowOffset64
# elif SANITIZER_MAC
# define SHADOW_OFFSET kDefaultShadowOffset64
# elif defined(__mips64)
# define SHADOW_OFFSET kMIPS64_ShadowOffset64
# elif SANITIZER_IOSSIM
# define SHADOW_OFFSET kIosSimShadowOffset64
# elif SANITIZER_IOS
# define SHADOW_OFFSET kIosShadowOffset64
# elif SANITIZER_WINDOWS64
# define SHADOW_OFFSET __asan_shadow_memory_dynamic_address
# else
# define SHADOW_OFFSET kDefaultShort64bitShadowOffset
# endif
......@@ -243,9 +267,25 @@ static inline bool AddrIsInMidMem(uptr a) {
return kMidMemBeg && a >= kMidMemBeg && a <= kMidMemEnd;
}
static inline bool AddrIsInShadowGap(uptr a) {
PROFILE_ASAN_MAPPING();
if (kMidMemBeg) {
if (a <= kShadowGapEnd)
return SHADOW_OFFSET == 0 || a >= kShadowGapBeg;
return (a >= kShadowGap2Beg && a <= kShadowGap2End) ||
(a >= kShadowGap3Beg && a <= kShadowGap3End);
}
// In zero-based shadow mode we treat addresses near zero as addresses
// in shadow gap as well.
if (SHADOW_OFFSET == 0)
return a <= kShadowGapEnd;
return a >= kShadowGapBeg && a <= kShadowGapEnd;
}
static inline bool AddrIsInMem(uptr a) {
PROFILE_ASAN_MAPPING();
return AddrIsInLowMem(a) || AddrIsInMidMem(a) || AddrIsInHighMem(a);
return AddrIsInLowMem(a) || AddrIsInMidMem(a) || AddrIsInHighMem(a) ||
(flags()->protect_shadow_gap == 0 && AddrIsInShadowGap(a));
}
static inline uptr MemToShadow(uptr p) {
......@@ -269,21 +309,6 @@ static inline bool AddrIsInShadow(uptr a) {
return AddrIsInLowShadow(a) || AddrIsInMidShadow(a) || AddrIsInHighShadow(a);
}
static inline bool AddrIsInShadowGap(uptr a) {
PROFILE_ASAN_MAPPING();
if (kMidMemBeg) {
if (a <= kShadowGapEnd)
return SHADOW_OFFSET == 0 || a >= kShadowGapBeg;
return (a >= kShadowGap2Beg && a <= kShadowGap2End) ||
(a >= kShadowGap3Beg && a <= kShadowGap3End);
}
// In zero-based shadow mode we treat addresses near zero as addresses
// in shadow gap as well.
if (SHADOW_OFFSET == 0)
return a <= kShadowGapEnd;
return a >= kShadowGapBeg && a <= kShadowGapEnd;
}
static inline bool AddrIsAlignedByGranularity(uptr a) {
PROFILE_ASAN_MAPPING();
return (a & (SHADOW_GRANULARITY - 1)) == 0;
......
//===-- asan_memory_profile.cc.cc -----------------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// This file implements __sanitizer_print_memory_profile.
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
#include "sanitizer_common/sanitizer_stoptheworld.h"
#include "lsan/lsan_common.h"
#include "asan/asan_allocator.h"
#if CAN_SANITIZE_LEAKS
namespace __asan {
struct AllocationSite {
u32 id;
uptr total_size;
uptr count;
};
class HeapProfile {
public:
HeapProfile() : allocations_(1024) {}
void Insert(u32 id, uptr size) {
total_allocated_ += size;
total_count_++;
// Linear lookup will be good enough for most cases (although not all).
for (uptr i = 0; i < allocations_.size(); i++) {
if (allocations_[i].id == id) {
allocations_[i].total_size += size;
allocations_[i].count++;
return;
}
}
allocations_.push_back({id, size, 1});
}
void Print(uptr top_percent) {
InternalSort(&allocations_, allocations_.size(),
[](const AllocationSite &a, const AllocationSite &b) {
return a.total_size > b.total_size;
});
CHECK(total_allocated_);
uptr total_shown = 0;
Printf("Live Heap Allocations: %zd bytes from %zd allocations; "
"showing top %zd%%\n", total_allocated_, total_count_, top_percent);
for (uptr i = 0; i < allocations_.size(); i++) {
auto &a = allocations_[i];
Printf("%zd byte(s) (%zd%%) in %zd allocation(s)\n", a.total_size,
a.total_size * 100 / total_allocated_, a.count);
StackDepotGet(a.id).Print();
total_shown += a.total_size;
if (total_shown * 100 / total_allocated_ > top_percent)
break;
}
}
private:
uptr total_allocated_ = 0;
uptr total_count_ = 0;
InternalMmapVector<AllocationSite> allocations_;
};
static void ChunkCallback(uptr chunk, void *arg) {
HeapProfile *hp = reinterpret_cast<HeapProfile*>(arg);
AsanChunkView cv = FindHeapChunkByAllocBeg(chunk);
if (!cv.IsAllocated()) return;
u32 id = cv.GetAllocStackId();
if (!id) return;
hp->Insert(id, cv.UsedSize());
}
static void MemoryProfileCB(const SuspendedThreadsList &suspended_threads_list,
void *argument) {
HeapProfile hp;
__lsan::ForEachChunk(ChunkCallback, &hp);
hp.Print(reinterpret_cast<uptr>(argument));
}
} // namespace __asan
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_print_memory_profile(uptr top_percent) {
__sanitizer::StopTheWorld(__asan::MemoryProfileCB, (void*)top_percent);
}
} // extern "C"
#endif // CAN_SANITIZE_LEAKS
......@@ -18,9 +18,25 @@
#include <stddef.h>
// C++ operators can't have visibility attributes on Windows.
// C++ operators can't have dllexport attributes on Windows. We export them
// anyway by passing extra -export flags to the linker, which is exactly that
// dllexport would normally do. We need to export them in order to make the
// VS2015 dynamic CRT (MD) work.
#if SANITIZER_WINDOWS
# define CXX_OPERATOR_ATTRIBUTE
# ifdef _WIN64
# pragma comment(linker, "/export:??2@YAPEAX_K@Z") // operator new
# pragma comment(linker, "/export:??3@YAXPEAX@Z") // operator delete
# pragma comment(linker, "/export:??3@YAXPEAX_K@Z") // sized operator delete
# pragma comment(linker, "/export:??_U@YAPEAX_K@Z") // operator new[]
# pragma comment(linker, "/export:??_V@YAXPEAX@Z") // operator delete[]
# else
# pragma comment(linker, "/export:??2@YAPAXI@Z") // operator new
# pragma comment(linker, "/export:??3@YAXPAX@Z") // operator delete
# pragma comment(linker, "/export:??3@YAXPAXI@Z") // sized operator delete
# pragma comment(linker, "/export:??_U@YAPAXI@Z") // operator new[]
# pragma comment(linker, "/export:??_V@YAXPAX@Z") // operator delete[]
# endif
#else
# define CXX_OPERATOR_ATTRIBUTE INTERCEPTOR_ATTRIBUTE
#endif
......@@ -37,7 +53,7 @@ using namespace __asan; // NOLINT
#endif // SANITIZER_FREEBSD && SANITIZER_WORDSIZE == 32
// This code has issues on OSX.
// See https://code.google.com/p/address-sanitizer/issues/detail?id=131.
// See https://github.com/google/sanitizers/issues/131.
// Fake std::nothrow_t and std::align_val_t to avoid including <new>.
namespace std {
......
......@@ -67,7 +67,7 @@ void FlushUnneededASanShadowMemory(uptr p, uptr size) {
uptr page_size = GetPageSizeCached();
uptr shadow_beg = RoundUpTo(MemToShadow(p), page_size);
uptr shadow_end = RoundDownTo(MemToShadow(p + size), page_size);
FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg);
ReleaseMemoryToOS(shadow_beg, shadow_end - shadow_beg);
}
void AsanPoisonOrUnpoisonIntraObjectRedzone(uptr ptr, uptr size, bool poison) {
......@@ -100,7 +100,7 @@ using namespace __asan; // NOLINT
// that user program (un)poisons the memory it owns. It poisons memory
// conservatively, and unpoisons progressively to make sure asan shadow
// mapping invariant is preserved (see detailed mapping description here:
// http://code.google.com/p/address-sanitizer/wiki/AddressSanitizerAlgorithm).
// https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm).
//
// * if user asks to poison region [left, right), the program poisons
// at least [left, AlignDown(right)).
......@@ -115,9 +115,9 @@ void __asan_poison_memory_region(void const volatile *addr, uptr size) {
ShadowSegmentEndpoint beg(beg_addr);
ShadowSegmentEndpoint end(end_addr);
if (beg.chunk == end.chunk) {
CHECK(beg.offset < end.offset);
CHECK_LT(beg.offset, end.offset);
s8 value = beg.value;
CHECK(value == end.value);
CHECK_EQ(value, end.value);
// We can only poison memory if the byte in end.offset is unaddressable.
// No need to re-poison memory if it is poisoned already.
if (value > 0 && value <= end.offset) {
......@@ -129,7 +129,7 @@ void __asan_poison_memory_region(void const volatile *addr, uptr size) {
}
return;
}
CHECK(beg.chunk < end.chunk);
CHECK_LT(beg.chunk, end.chunk);
if (beg.offset > 0) {
// Mark bytes from beg.offset as unaddressable.
if (beg.value == 0) {
......@@ -155,9 +155,9 @@ void __asan_unpoison_memory_region(void const volatile *addr, uptr size) {
ShadowSegmentEndpoint beg(beg_addr);
ShadowSegmentEndpoint end(end_addr);
if (beg.chunk == end.chunk) {
CHECK(beg.offset < end.offset);
CHECK_LT(beg.offset, end.offset);
s8 value = beg.value;
CHECK(value == end.value);
CHECK_EQ(value, end.value);
// We unpoison memory bytes up to enbytes up to end.offset if it is not
// unpoisoned already.
if (value != 0) {
......@@ -165,7 +165,7 @@ void __asan_unpoison_memory_region(void const volatile *addr, uptr size) {
}
return;
}
CHECK(beg.chunk < end.chunk);
CHECK_LT(beg.chunk, end.chunk);
if (beg.offset > 0) {
*beg.chunk = 0;
beg.chunk++;
......@@ -312,6 +312,30 @@ static void PoisonAlignedStackMemory(uptr addr, uptr size, bool do_poison) {
}
}
void __asan_set_shadow_00(uptr addr, uptr size) {
REAL(memset)((void *)addr, 0, size);
}
void __asan_set_shadow_f1(uptr addr, uptr size) {
REAL(memset)((void *)addr, 0xf1, size);
}
void __asan_set_shadow_f2(uptr addr, uptr size) {
REAL(memset)((void *)addr, 0xf2, size);
}
void __asan_set_shadow_f3(uptr addr, uptr size) {
REAL(memset)((void *)addr, 0xf3, size);
}
void __asan_set_shadow_f5(uptr addr, uptr size) {
REAL(memset)((void *)addr, 0xf5, size);
}
void __asan_set_shadow_f8(uptr addr, uptr size) {
REAL(memset)((void *)addr, 0xf8, size);
}
void __asan_poison_stack_memory(uptr addr, uptr size) {
VReport(1, "poisoning: %p %zx\n", (void *)addr, size);
PoisonAlignedStackMemory(addr, size, true);
......@@ -341,7 +365,7 @@ void __sanitizer_annotate_contiguous_container(const void *beg_p,
&stack);
}
CHECK_LE(end - beg,
FIRST_32_SECOND_64(1UL << 30, 1UL << 34)); // Sanity check.
FIRST_32_SECOND_64(1UL << 30, 1ULL << 34)); // Sanity check.
uptr a = RoundDownTo(Min(old_mid, new_mid), granularity);
uptr c = RoundUpTo(Max(old_mid, new_mid), granularity);
......@@ -352,7 +376,7 @@ void __sanitizer_annotate_contiguous_container(const void *beg_p,
// Make a quick sanity check that we are indeed in this state.
//
// FIXME: Two of these three checks are disabled until we fix
// https://code.google.com/p/address-sanitizer/issues/detail?id=258.
// https://github.com/google/sanitizers/issues/258.
// if (d1 != d2)
// CHECK_EQ(*(u8*)MemToShadow(d1), old_mid - d1);
if (a + granularity <= d1)
......
......@@ -84,7 +84,7 @@ ALWAYS_INLINE void FastPoisonShadowPartialRightRedzone(
}
}
// Calls __sanitizer::FlushUnneededShadowMemory() on
// Calls __sanitizer::ReleaseMemoryToOS() on
// [MemToShadow(p), MemToShadow(p+size)] with proper rounding.
void FlushUnneededASanShadowMemory(uptr p, uptr size);
......
......@@ -31,17 +31,39 @@
namespace __asan {
const char *DescribeSignalOrException(int signo) {
switch (signo) {
case SIGFPE:
return "FPE";
case SIGILL:
return "ILL";
case SIGABRT:
return "ABRT";
default:
return "SEGV";
}
}
void AsanOnDeadlySignal(int signo, void *siginfo, void *context) {
ScopedDeadlySignal signal_scope(GetCurrentThread());
int code = (int)((siginfo_t*)siginfo)->si_code;
// Write the first message using the bullet-proof write.
if (18 != internal_write(2, "ASAN:DEADLYSIGNAL\n", 18)) Die();
// Write the first message using fd=2, just in case.
// It may actually fail to write in case stderr is closed.
internal_write(2, "ASAN:DEADLYSIGNAL\n", 18);
SignalContext sig = SignalContext::Create(siginfo, context);
// Access at a reasonable offset above SP, or slightly below it (to account
// for x86_64 or PowerPC redzone, ARM push of multiple registers, etc) is
// probably a stack overflow.
#ifdef __s390__
// On s390, the fault address in siginfo points to start of the page, not
// to the precise word that was accessed. Mask off the low bits of sp to
// take it into account.
bool IsStackAccess = sig.addr >= (sig.sp & ~0xFFF) &&
sig.addr < sig.sp + 0xFFFF;
#else
bool IsStackAccess = sig.addr + 512 > sig.sp && sig.addr < sig.sp + 0xFFFF;
#endif
#if __powerpc__
// Large stack frames can be allocated with e.g.
......@@ -73,10 +95,8 @@ void AsanOnDeadlySignal(int signo, void *siginfo, void *context) {
// unaligned memory access.
if (IsStackAccess && (code == si_SEGV_MAPERR || code == si_SEGV_ACCERR))
ReportStackOverflow(sig);
else if (signo == SIGFPE)
ReportDeadlySignal("FPE", sig);
else
ReportDeadlySignal("SEGV", sig);
ReportDeadlySignal(signo, sig);
}
// ---------------------- TSD ---------------- {{{1
......
......@@ -23,34 +23,28 @@ struct StackVarDescr {
uptr name_len;
};
struct AddressDescription {
char *name;
uptr name_size;
uptr region_address;
uptr region_size;
const char *region_kind;
};
// Returns the number of globals close to the provided address and copies
// them to "globals" array.
int GetGlobalsForAddress(uptr addr, __asan_global *globals, u32 *reg_sites,
int max_globals);
bool GetInfoForAddressIfGlobal(uptr addr, AddressDescription *descr);
const char *MaybeDemangleGlobalName(const char *name);
void PrintGlobalNameIfASCII(InternalScopedString *str, const __asan_global &g);
void PrintGlobalLocation(InternalScopedString *str, const __asan_global &g);
void PrintMemoryByte(InternalScopedString *str, const char *before, u8 byte,
bool in_shadow, const char *after = "\n");
// The following functions prints address description depending
// on the memory type (shadow/heap/stack/global).
void DescribeHeapAddress(uptr addr, uptr access_size);
bool DescribeAddressIfShadow(uptr addr, AddressDescription *descr = nullptr,
bool print = true);
bool ParseFrameDescription(const char *frame_descr,
InternalMmapVector<StackVarDescr> *vars);
bool DescribeAddressIfStack(uptr addr, uptr access_size);
void DescribeThread(AsanThreadContext *context);
// Different kinds of error reports.
void ReportGenericError(uptr pc, uptr bp, uptr sp, uptr addr, bool is_write,
uptr access_size, u32 exp, bool fatal);
void ReportStackOverflow(const SignalContext &sig);
void ReportDeadlySignal(const char *description, const SignalContext &sig);
void ReportDeadlySignal(int signo, const SignalContext &sig);
void ReportNewDeleteSizeMismatch(uptr addr, uptr delete_size,
BufferedStackTrace *free_stack);
void ReportDoubleFree(uptr addr, BufferedStackTrace *free_stack);
......@@ -75,8 +69,6 @@ void ReportODRViolation(const __asan_global *g1, u32 stack_id1,
const __asan_global *g2, u32 stack_id2);
// Mac-specific errors and warnings.
void WarnMacFreeUnallocated(uptr addr, uptr zone_ptr, const char *zone_name,
BufferedStackTrace *stack);
void ReportMacMzReallocUnknown(uptr addr, uptr zone_ptr,
const char *zone_name,
BufferedStackTrace *stack);
......
......@@ -30,6 +30,7 @@
#include "ubsan/ubsan_init.h"
#include "ubsan/ubsan_platform.h"
uptr __asan_shadow_memory_dynamic_address; // Global interface symbol.
int __asan_option_detect_stack_use_after_return; // Global interface symbol.
uptr *__asan_test_only_reported_buggy_pointer; // Used only for testing asan.
......@@ -84,8 +85,8 @@ void ShowStatsAndAbort() {
// Reserve memory range [beg, end].
// We need to use inclusive range because end+1 may not be representable.
void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name) {
CHECK_EQ((beg % GetPageSizeCached()), 0);
CHECK_EQ(((end + 1) % GetPageSizeCached()), 0);
CHECK_EQ((beg % GetMmapGranularity()), 0);
CHECK_EQ(((end + 1) % GetMmapGranularity()), 0);
uptr size = end - beg + 1;
DecreaseTotalMmap(size); // Don't count the shadow against mmap_limit_mb.
void *res = MmapFixedNoReserve(beg, size, name);
......@@ -261,6 +262,7 @@ static NOINLINE void force_interface_symbols() {
volatile int fake_condition = 0; // prevent dead condition elimination.
// __asan_report_* functions are noreturn, so we need a switch to prevent
// the compiler from removing any of them.
// clang-format off
switch (fake_condition) {
case 1: __asan_report_load1(0); break;
case 2: __asan_report_load2(0); break;
......@@ -300,7 +302,14 @@ static NOINLINE void force_interface_symbols() {
case 37: __asan_unpoison_stack_memory(0, 0); break;
case 38: __asan_region_is_poisoned(0, 0); break;
case 39: __asan_describe_address(0); break;
case 40: __asan_set_shadow_00(0, 0); break;
case 41: __asan_set_shadow_f1(0, 0); break;
case 42: __asan_set_shadow_f2(0, 0); break;
case 43: __asan_set_shadow_f3(0, 0); break;
case 44: __asan_set_shadow_f5(0, 0); break;
case 45: __asan_set_shadow_f8(0, 0); break;
}
// clang-format on
}
static void asan_atexit() {
......@@ -318,26 +327,39 @@ static void InitializeHighMemEnd() {
kHighMemEnd = GetMaxVirtualAddress();
// Increase kHighMemEnd to make sure it's properly
// aligned together with kHighMemBeg:
kHighMemEnd |= SHADOW_GRANULARITY * GetPageSizeCached() - 1;
kHighMemEnd |= SHADOW_GRANULARITY * GetMmapGranularity() - 1;
#endif // !ASAN_FIXED_MAPPING
CHECK_EQ((kHighMemBeg % GetPageSizeCached()), 0);
CHECK_EQ((kHighMemBeg % GetMmapGranularity()), 0);
}
static void ProtectGap(uptr addr, uptr size) {
if (!flags()->protect_shadow_gap)
if (!flags()->protect_shadow_gap) {
// The shadow gap is unprotected, so there is a chance that someone
// is actually using this memory. Which means it needs a shadow...
uptr GapShadowBeg = RoundDownTo(MEM_TO_SHADOW(addr), GetPageSizeCached());
uptr GapShadowEnd =
RoundUpTo(MEM_TO_SHADOW(addr + size), GetPageSizeCached()) - 1;
if (Verbosity())
Printf("protect_shadow_gap=0:"
" not protecting shadow gap, allocating gap's shadow\n"
"|| `[%p, %p]` || ShadowGap's shadow ||\n", GapShadowBeg,
GapShadowEnd);
ReserveShadowMemoryRange(GapShadowBeg, GapShadowEnd,
"unprotected gap shadow");
return;
void *res = MmapNoAccess(addr, size, "shadow gap");
}
void *res = MmapFixedNoAccess(addr, size, "shadow gap");
if (addr == (uptr)res)
return;
// A few pages at the start of the address space can not be protected.
// But we really want to protect as much as possible, to prevent this memory
// being returned as a result of a non-FIXED mmap().
if (addr == kZeroBaseShadowStart) {
uptr step = GetPageSizeCached();
uptr step = GetMmapGranularity();
while (size > step && addr < kZeroBaseMaxShadowStart) {
addr += step;
size -= step;
void *res = MmapNoAccess(addr, size, "shadow gap");
void *res = MmapFixedNoAccess(addr, size, "shadow gap");
if (addr == (uptr)res)
return;
}
......@@ -413,10 +435,13 @@ static void AsanInitInternal() {
AsanCheckIncompatibleRT();
AsanCheckDynamicRTPrereqs();
AvoidCVE_2016_2143();
SetCanPoisonMemory(flags()->poison_heap);
SetMallocContextSize(common_flags()->malloc_context_size);
InitializePlatformExceptionHandlers();
InitializeHighMemEnd();
// Make sure we are not statically linked.
......@@ -429,7 +454,6 @@ static void AsanInitInternal() {
__sanitizer_set_report_path(common_flags()->log_path);
// Enable UAR detection, if required.
__asan_option_detect_stack_use_after_return =
flags()->detect_stack_use_after_return;
......@@ -448,7 +472,30 @@ static void AsanInitInternal() {
ReplaceSystemMalloc();
// Set the shadow memory address to uninitialized.
__asan_shadow_memory_dynamic_address = kDefaultShadowSentinel;
uptr shadow_start = kLowShadowBeg;
// Detect if a dynamic shadow address must used and find a available location
// when necessary. When dynamic address is used, the macro |kLowShadowBeg|
// expands to |__asan_shadow_memory_dynamic_address| which is
// |kDefaultShadowSentinel|.
if (shadow_start == kDefaultShadowSentinel) {
__asan_shadow_memory_dynamic_address = 0;
CHECK_EQ(0, kLowShadowBeg);
uptr granularity = GetMmapGranularity();
uptr alignment = 8 * granularity;
uptr left_padding = granularity;
uptr space_size = kHighShadowEnd + left_padding;
shadow_start = FindAvailableMemoryRange(space_size, alignment, granularity);
CHECK_NE((uptr)0, shadow_start);
CHECK(IsAligned(shadow_start, alignment));
}
// Update the shadow memory address (potentially) used by instrumentation.
__asan_shadow_memory_dynamic_address = shadow_start;
if (kLowShadowBeg)
shadow_start -= GetMmapGranularity();
bool full_shadow_is_available =
......@@ -537,12 +584,12 @@ static void AsanInitInternal() {
force_interface_symbols(); // no-op.
SanitizerInitializeUnwinder();
#if CAN_SANITIZE_LEAKS
__lsan::InitCommonLsan();
if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) {
Atexit(__lsan::DoLeakCheck);
if (CAN_SANITIZE_LEAKS) {
__lsan::InitCommonLsan();
if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) {
Atexit(__lsan::DoLeakCheck);
}
}
#endif // CAN_SANITIZE_LEAKS
#if CAN_SANITIZE_UB
__ubsan::InitAsPlugin();
......@@ -550,6 +597,15 @@ static void AsanInitInternal() {
InitializeSuppressions();
if (CAN_SANITIZE_LEAKS) {
// LateInitialize() calls dlsym, which can allocate an error string buffer
// in the TLS. Let's ignore the allocation to avoid reporting a leak.
__lsan::ScopedInterceptorDisabler disabler;
Symbolizer::LateInitialize();
} else {
Symbolizer::LateInitialize();
}
VReport(1, "AddressSanitizer Init done\n");
}
......@@ -579,6 +635,9 @@ static AsanInitializer asan_initializer;
using namespace __asan; // NOLINT
void NOINLINE __asan_handle_no_return() {
if (asan_init_is_running)
return;
int local_stack;
AsanThread *curr_thread = GetCurrentThread();
uptr PageSize = GetPageSizeCached();
......@@ -603,7 +662,7 @@ void NOINLINE __asan_handle_no_return() {
"stack top: %p; bottom %p; size: %p (%zd)\n"
"False positive error reports may follow\n"
"For details see "
"http://code.google.com/p/address-sanitizer/issues/detail?id=189\n",
"https://github.com/google/sanitizers/issues/189\n",
top, bottom, top - bottom, top - bottom);
return;
}
......
//===-- asan_scariness_score.h ----------------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// Compute the level of scariness of the error message.
// Don't expect any deep science here, just a set of heuristics that suggest
// that e.g. 1-byte-read-global-buffer-overflow is less scary than
// 8-byte-write-stack-use-after-return.
//
// Every error report has one or more features, such as memory access size,
// type (read or write), type of accessed memory (e.g. free-d heap, or a global
// redzone), etc. Every such feature has an int score and a string description.
// The overall score is the sum of all feature scores and the description
// is a concatenation of feature descriptions.
// Examples:
// 17 (4-byte-read-heap-buffer-overflow)
// 65 (multi-byte-write-stack-use-after-return)
// 10 (null-deref)
//
//===----------------------------------------------------------------------===//
#ifndef ASAN_SCARINESS_SCORE_H
#define ASAN_SCARINESS_SCORE_H
#include "asan_flags.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_libc.h"
namespace __asan {
struct ScarinessScoreBase {
void Clear() {
descr[0] = 0;
score = 0;
}
void Scare(int add_to_score, const char *reason) {
if (descr[0])
internal_strlcat(descr, "-", sizeof(descr));
internal_strlcat(descr, reason, sizeof(descr));
score += add_to_score;
};
int GetScore() const { return score; }
const char *GetDescription() const { return descr; }
void Print() {
if (score && flags()->print_scariness)
Printf("SCARINESS: %d (%s)\n", score, descr);
}
static void PrintSimple(int score, const char *descr) {
ScarinessScoreBase SSB;
SSB.Clear();
SSB.Scare(score, descr);
SSB.Print();
}
private:
int score;
char descr[1024];
};
struct ScarinessScore : ScarinessScoreBase {
ScarinessScore() {
Clear();
}
};
} // namespace __asan
#endif // ASAN_SCARINESS_SCORE_H
......@@ -46,7 +46,10 @@ void GetStackTraceWithPcBpAndContext(BufferedStackTrace *stack, uptr max_depth,
uptr stack_top = t->stack_top();
uptr stack_bottom = t->stack_bottom();
ScopedUnwinding unwind_scope(t);
stack->Unwind(max_depth, pc, bp, context, stack_top, stack_bottom, fast);
if (!SANITIZER_MIPS || IsValidFrame(bp, stack_top, stack_bottom)) {
stack->Unwind(max_depth, pc, bp, context, stack_top, stack_bottom,
fast);
}
} else if (!t && !fast) {
/* If GetCurrentThread() has failed, try to do slow unwind anyways. */
stack->Unwind(max_depth, pc, bp, context, 0, 0, false);
......
......@@ -87,6 +87,7 @@ bool IsStackTraceSuppressed(const StackTrace *stack) {
if (suppression_ctx->HasSuppressionType(kInterceptorViaFunction)) {
SymbolizedStack *frames = symbolizer->SymbolizePC(addr);
CHECK(frames);
for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
const char *function_name = cur->info.function;
if (!function_name) {
......
......@@ -118,6 +118,77 @@ void AsanThread::Destroy() {
DTLS_Destroy();
}
void AsanThread::StartSwitchFiber(FakeStack **fake_stack_save, uptr bottom,
uptr size) {
if (atomic_load(&stack_switching_, memory_order_relaxed)) {
Report("ERROR: starting fiber switch while in fiber switch\n");
Die();
}
next_stack_bottom_ = bottom;
next_stack_top_ = bottom + size;
atomic_store(&stack_switching_, 1, memory_order_release);
FakeStack *current_fake_stack = fake_stack_;
if (fake_stack_save)
*fake_stack_save = fake_stack_;
fake_stack_ = nullptr;
SetTLSFakeStack(nullptr);
// if fake_stack_save is null, the fiber will die, delete the fakestack
if (!fake_stack_save && current_fake_stack)
current_fake_stack->Destroy(this->tid());
}
void AsanThread::FinishSwitchFiber(FakeStack *fake_stack_save,
uptr *bottom_old,
uptr *size_old) {
if (!atomic_load(&stack_switching_, memory_order_relaxed)) {
Report("ERROR: finishing a fiber switch that has not started\n");
Die();
}
if (fake_stack_save) {
SetTLSFakeStack(fake_stack_save);
fake_stack_ = fake_stack_save;
}
if (bottom_old)
*bottom_old = stack_bottom_;
if (size_old)
*size_old = stack_top_ - stack_bottom_;
stack_bottom_ = next_stack_bottom_;
stack_top_ = next_stack_top_;
atomic_store(&stack_switching_, 0, memory_order_release);
next_stack_top_ = 0;
next_stack_bottom_ = 0;
}
inline AsanThread::StackBounds AsanThread::GetStackBounds() const {
if (!atomic_load(&stack_switching_, memory_order_acquire))
return StackBounds{stack_bottom_, stack_top_}; // NOLINT
char local;
const uptr cur_stack = (uptr)&local;
// Note: need to check next stack first, because FinishSwitchFiber
// may be in process of overwriting stack_top_/bottom_. But in such case
// we are already on the next stack.
if (cur_stack >= next_stack_bottom_ && cur_stack < next_stack_top_)
return StackBounds{next_stack_bottom_, next_stack_top_}; // NOLINT
return StackBounds{stack_bottom_, stack_top_}; // NOLINT
}
uptr AsanThread::stack_top() {
return GetStackBounds().top;
}
uptr AsanThread::stack_bottom() {
return GetStackBounds().bottom;
}
uptr AsanThread::stack_size() {
const auto bounds = GetStackBounds();
return bounds.top - bounds.bottom;
}
// We want to create the FakeStack lazyly on the first use, but not eralier
// than the stack size is known and the procedure has to be async-signal safe.
FakeStack *AsanThread::AsyncSignalSafeLazyInitFakeStack() {
......@@ -148,6 +219,8 @@ FakeStack *AsanThread::AsyncSignalSafeLazyInitFakeStack() {
}
void AsanThread::Init() {
next_stack_top_ = next_stack_bottom_ = 0;
atomic_store(&stack_switching_, false, memory_order_release);
fake_stack_ = nullptr; // Will be initialized lazily if needed.
CHECK_EQ(this->stack_size(), 0U);
SetThreadStackAndTls();
......@@ -193,10 +266,12 @@ thread_return_t AsanThread::ThreadStart(
void AsanThread::SetThreadStackAndTls() {
uptr tls_size = 0;
GetThreadStackAndTls(tid() == 0, &stack_bottom_, &stack_size_, &tls_begin_,
&tls_size);
stack_top_ = stack_bottom_ + stack_size_;
uptr stack_size = 0;
GetThreadStackAndTls(tid() == 0, const_cast<uptr *>(&stack_bottom_),
const_cast<uptr *>(&stack_size), &tls_begin_, &tls_size);
stack_top_ = stack_bottom_ + stack_size;
tls_end_ = tls_begin_ + tls_size;
dtls_ = DTLS_Get();
int local;
CHECK(AddrIsInStack((uptr)&local));
......@@ -247,6 +322,11 @@ bool AsanThread::GetStackFrameAccessByAddr(uptr addr,
return true;
}
bool AsanThread::AddrIsInStack(uptr addr) {
const auto bounds = GetStackBounds();
return addr >= bounds.bottom && addr < bounds.top;
}
static bool ThreadStackContainsAddress(ThreadContextBase *tctx_base,
void *addr) {
AsanThreadContext *tctx = static_cast<AsanThreadContext*>(tctx_base);
......@@ -269,7 +349,7 @@ AsanThread *GetCurrentThread() {
// limits, so only do this magic on Android, and only if the found thread
// is the main thread.
AsanThreadContext *tctx = GetThreadContextByTidLocked(0);
if (ThreadStackContainsAddress(tctx, &context)) {
if (tctx && ThreadStackContainsAddress(tctx, &context)) {
SetCurrentThread(tctx->thread);
return tctx->thread;
}
......@@ -320,8 +400,8 @@ __asan::AsanThread *GetAsanThreadByOsIDLocked(uptr os_id) {
// --- Implementation of LSan-specific functions --- {{{1
namespace __lsan {
bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end,
uptr *tls_begin, uptr *tls_end,
uptr *cache_begin, uptr *cache_end) {
uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
uptr *cache_end, DTLS **dtls) {
__asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id);
if (!t) return false;
*stack_begin = t->stack_bottom();
......@@ -331,6 +411,7 @@ bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end,
// ASan doesn't keep allocator caches in TLS, so these are unused.
*cache_begin = 0;
*cache_end = 0;
*dtls = t->dtls();
return true;
}
......@@ -353,3 +434,33 @@ void EnsureMainThreadIDIsCorrect() {
__asan::EnsureMainThreadIDIsCorrect();
}
} // namespace __lsan
// ---------------------- Interface ---------------- {{{1
using namespace __asan; // NOLINT
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_start_switch_fiber(void **fakestacksave, const void *bottom,
uptr size) {
AsanThread *t = GetCurrentThread();
if (!t) {
VReport(1, "__asan_start_switch_fiber called from unknown thread\n");
return;
}
t->StartSwitchFiber((FakeStack**)fakestacksave, (uptr)bottom, size);
}
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_finish_switch_fiber(void* fakestack,
const void **bottom_old,
uptr *size_old) {
AsanThread *t = GetCurrentThread();
if (!t) {
VReport(1, "__asan_finish_switch_fiber called from unknown thread\n");
return;
}
t->FinishSwitchFiber((FakeStack*)fakestack,
(uptr*)bottom_old,
(uptr*)size_old);
}
}
......@@ -21,6 +21,10 @@
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_thread_registry.h"
namespace __sanitizer {
struct DTLS;
} // namespace __sanitizer
namespace __asan {
const u32 kInvalidTid = 0xffffff; // Must fit into 24 bits.
......@@ -60,11 +64,12 @@ class AsanThread {
thread_return_t ThreadStart(uptr os_id,
atomic_uintptr_t *signal_thread_is_registered);
uptr stack_top() { return stack_top_; }
uptr stack_bottom() { return stack_bottom_; }
uptr stack_size() { return stack_size_; }
uptr stack_top();
uptr stack_bottom();
uptr stack_size();
uptr tls_begin() { return tls_begin_; }
uptr tls_end() { return tls_end_; }
DTLS *dtls() { return dtls_; }
u32 tid() { return context_->tid; }
AsanThreadContext *context() { return context_; }
void set_context(AsanThreadContext *context) { context_ = context; }
......@@ -76,9 +81,7 @@ class AsanThread {
};
bool GetStackFrameAccessByAddr(uptr addr, StackFrameAccess *access);
bool AddrIsInStack(uptr addr) {
return addr >= stack_bottom_ && addr < stack_top_;
}
bool AddrIsInStack(uptr addr);
void DeleteFakeStack(int tid) {
if (!fake_stack_) return;
......@@ -88,13 +91,20 @@ class AsanThread {
t->Destroy(tid);
}
void StartSwitchFiber(FakeStack **fake_stack_save, uptr bottom, uptr size);
void FinishSwitchFiber(FakeStack *fake_stack_save, uptr *bottom_old,
uptr *size_old);
bool has_fake_stack() {
return (reinterpret_cast<uptr>(fake_stack_) > 1);
return !atomic_load(&stack_switching_, memory_order_relaxed) &&
(reinterpret_cast<uptr>(fake_stack_) > 1);
}
FakeStack *fake_stack() {
if (!__asan_option_detect_stack_use_after_return)
return nullptr;
if (atomic_load(&stack_switching_, memory_order_relaxed))
return nullptr;
if (!has_fake_stack())
return AsyncSignalSafeLazyInitFakeStack();
return fake_stack_;
......@@ -120,16 +130,27 @@ class AsanThread {
void ClearShadowForThreadStackAndTLS();
FakeStack *AsyncSignalSafeLazyInitFakeStack();
struct StackBounds {
uptr bottom;
uptr top;
};
StackBounds GetStackBounds() const;
AsanThreadContext *context_;
thread_callback_t start_routine_;
void *arg_;
uptr stack_top_;
uptr stack_bottom_;
// stack_size_ == stack_top_ - stack_bottom_;
// It needs to be set in a async-signal-safe manner.
uptr stack_size_;
// these variables are used when the thread is about to switch stack
uptr next_stack_top_;
uptr next_stack_bottom_;
// true if switching is in progress
atomic_uint8_t stack_switching_;
uptr tls_begin_;
uptr tls_end_;
DTLS *dtls_;
FakeStack *fake_stack_;
AsanThreadLocalMallocStorage malloc_storage_;
......
......@@ -10,16 +10,16 @@
// This file defines a family of thunks that should be statically linked into
// the DLLs that have ASan instrumentation in order to delegate the calls to the
// shared runtime that lives in the main binary.
// See https://code.google.com/p/address-sanitizer/issues/detail?id=209 for the
// details.
// See https://github.com/google/sanitizers/issues/209 for the details.
//===----------------------------------------------------------------------===//
// Only compile this code when buidling asan_dll_thunk.lib
// Only compile this code when building asan_dll_thunk.lib
// Using #ifdef rather than relying on Makefiles etc.
// simplifies the build procedure.
#ifdef ASAN_DLL_THUNK
#include "asan_init_version.h"
#include "interception/interception.h"
#include "sanitizer_common/sanitizer_platform_interceptors.h"
// ---------- Function interception helper functions and macros ----------- {{{1
extern "C" {
......@@ -28,6 +28,8 @@ void *__stdcall GetProcAddress(void *module, const char *proc_name);
void abort();
}
using namespace __sanitizer;
static uptr getRealProcAddressOrDie(const char *name) {
uptr ret =
__interception::InternalGetProcAddress((void *)GetModuleHandleA(0), name);
......@@ -196,9 +198,11 @@ static void InterceptHooks();
// Don't use the INTERFACE_FUNCTION machinery for this function as we actually
// want to call it in the __asan_init interceptor.
WRAP_W_V(__asan_should_detect_stack_use_after_return)
WRAP_W_V(__asan_get_shadow_memory_dynamic_address)
extern "C" {
int __asan_option_detect_stack_use_after_return;
uptr __asan_shadow_memory_dynamic_address;
// Manually wrap __asan_init as we need to initialize
// __asan_option_detect_stack_use_after_return afterwards.
......@@ -212,7 +216,8 @@ extern "C" {
fn();
__asan_option_detect_stack_use_after_return =
(__asan_should_detect_stack_use_after_return() != 0);
__asan_shadow_memory_dynamic_address =
(uptr)__asan_get_shadow_memory_dynamic_address();
InterceptHooks();
}
}
......@@ -255,6 +260,13 @@ INTERFACE_FUNCTION(__asan_memcpy);
INTERFACE_FUNCTION(__asan_memset);
INTERFACE_FUNCTION(__asan_memmove);
INTERFACE_FUNCTION(__asan_set_shadow_00);
INTERFACE_FUNCTION(__asan_set_shadow_f1);
INTERFACE_FUNCTION(__asan_set_shadow_f2);
INTERFACE_FUNCTION(__asan_set_shadow_f3);
INTERFACE_FUNCTION(__asan_set_shadow_f5);
INTERFACE_FUNCTION(__asan_set_shadow_f8);
INTERFACE_FUNCTION(__asan_alloca_poison);
INTERFACE_FUNCTION(__asan_allocas_unpoison);
......@@ -309,8 +321,6 @@ INTERFACE_FUNCTION(__sanitizer_cov_init)
INTERFACE_FUNCTION(__sanitizer_cov_module_init)
INTERFACE_FUNCTION(__sanitizer_cov_trace_basic_block)
INTERFACE_FUNCTION(__sanitizer_cov_trace_func_enter)
INTERFACE_FUNCTION(__sanitizer_cov_trace_cmp)
INTERFACE_FUNCTION(__sanitizer_cov_trace_switch)
INTERFACE_FUNCTION(__sanitizer_cov_with_check)
INTERFACE_FUNCTION(__sanitizer_get_allocated_size)
INTERFACE_FUNCTION(__sanitizer_get_coverage_guards)
......@@ -324,6 +334,8 @@ INTERFACE_FUNCTION(__sanitizer_get_total_unique_coverage)
INTERFACE_FUNCTION(__sanitizer_get_unmapped_bytes)
INTERFACE_FUNCTION(__sanitizer_maybe_open_cov_file)
INTERFACE_FUNCTION(__sanitizer_print_stack_trace)
INTERFACE_FUNCTION(__sanitizer_symbolize_pc)
INTERFACE_FUNCTION(__sanitizer_symbolize_global)
INTERFACE_FUNCTION(__sanitizer_ptr_cmp)
INTERFACE_FUNCTION(__sanitizer_ptr_sub)
INTERFACE_FUNCTION(__sanitizer_report_error_summary)
......@@ -333,6 +345,7 @@ INTERFACE_FUNCTION(__sanitizer_update_counter_bitset_and_clear_counters)
INTERFACE_FUNCTION(__sanitizer_sandbox_on_notify)
INTERFACE_FUNCTION(__sanitizer_set_death_callback)
INTERFACE_FUNCTION(__sanitizer_set_report_path)
INTERFACE_FUNCTION(__sanitizer_set_report_fd)
INTERFACE_FUNCTION(__sanitizer_unaligned_load16)
INTERFACE_FUNCTION(__sanitizer_unaligned_load32)
INTERFACE_FUNCTION(__sanitizer_unaligned_load64)
......@@ -340,23 +353,31 @@ INTERFACE_FUNCTION(__sanitizer_unaligned_store16)
INTERFACE_FUNCTION(__sanitizer_unaligned_store32)
INTERFACE_FUNCTION(__sanitizer_unaligned_store64)
INTERFACE_FUNCTION(__sanitizer_verify_contiguous_container)
INTERFACE_FUNCTION(__sanitizer_install_malloc_and_free_hooks)
INTERFACE_FUNCTION(__sanitizer_start_switch_fiber)
INTERFACE_FUNCTION(__sanitizer_finish_switch_fiber)
// TODO(timurrrr): Add more interface functions on the as-needed basis.
// ----------------- Memory allocation functions ---------------------
WRAP_V_W(free)
WRAP_V_W(_free_base)
WRAP_V_WW(_free_dbg)
WRAP_W_W(malloc)
WRAP_W_W(_malloc_base)
WRAP_W_WWWW(_malloc_dbg)
WRAP_W_WW(calloc)
WRAP_W_WW(_calloc_base)
WRAP_W_WWWWW(_calloc_dbg)
WRAP_W_WWW(_calloc_impl)
WRAP_W_WW(realloc)
WRAP_W_WW(_realloc_base)
WRAP_W_WWW(_realloc_dbg)
WRAP_W_WWW(_recalloc)
WRAP_W_WWW(_recalloc_base)
WRAP_W_W(_msize)
WRAP_W_W(_expand)
......@@ -369,6 +390,10 @@ WRAP_W_W(_expand_dbg)
INTERCEPT_LIBRARY_FUNCTION(atoi);
INTERCEPT_LIBRARY_FUNCTION(atol);
#ifdef _WIN64
INTERCEPT_LIBRARY_FUNCTION(__C_specific_handler);
#else
INTERCEPT_LIBRARY_FUNCTION(_except_handler3);
// _except_handler4 checks -GS cookie which is different for each module, so we
......@@ -377,10 +402,13 @@ INTERCEPTOR(int, _except_handler4, void *a, void *b, void *c, void *d) {
__asan_handle_no_return();
return REAL(_except_handler4)(a, b, c, d);
}
#endif
INTERCEPT_LIBRARY_FUNCTION(frexp);
INTERCEPT_LIBRARY_FUNCTION(longjmp);
#if SANITIZER_INTERCEPT_MEMCHR
INTERCEPT_LIBRARY_FUNCTION(memchr);
#endif
INTERCEPT_LIBRARY_FUNCTION(memcmp);
INTERCEPT_LIBRARY_FUNCTION(memcpy);
INTERCEPT_LIBRARY_FUNCTION(memmove);
......@@ -390,12 +418,14 @@ INTERCEPT_LIBRARY_FUNCTION(strchr);
INTERCEPT_LIBRARY_FUNCTION(strcmp);
INTERCEPT_LIBRARY_FUNCTION(strcpy); // NOLINT
INTERCEPT_LIBRARY_FUNCTION(strcspn);
INTERCEPT_LIBRARY_FUNCTION(strdup);
INTERCEPT_LIBRARY_FUNCTION(strlen);
INTERCEPT_LIBRARY_FUNCTION(strncat);
INTERCEPT_LIBRARY_FUNCTION(strncmp);
INTERCEPT_LIBRARY_FUNCTION(strncpy);
INTERCEPT_LIBRARY_FUNCTION(strnlen);
INTERCEPT_LIBRARY_FUNCTION(strpbrk);
INTERCEPT_LIBRARY_FUNCTION(strrchr);
INTERCEPT_LIBRARY_FUNCTION(strspn);
INTERCEPT_LIBRARY_FUNCTION(strstr);
INTERCEPT_LIBRARY_FUNCTION(strtol);
......@@ -405,7 +435,9 @@ INTERCEPT_LIBRARY_FUNCTION(wcslen);
// is defined.
void InterceptHooks() {
INTERCEPT_HOOKS();
#ifndef _WIN64
INTERCEPT_FUNCTION(_except_handler4);
#endif
}
// We want to call __asan_init before C/C++ initializers/constructors are
......
//===-- asan_win_uar_thunk.cc ---------------------------------------------===//
//===-- asan_win_dynamic_runtime_thunk.cc ---------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
......@@ -14,11 +14,11 @@
// This includes:
// - forwarding the detect_stack_use_after_return runtime option
// - working around deficiencies of the MD runtime
// - installing a custom SEH handlerx
// - installing a custom SEH handler
//
//===----------------------------------------------------------------------===//
// Only compile this code when buidling asan_dynamic_runtime_thunk.lib
// Only compile this code when building asan_dynamic_runtime_thunk.lib
// Using #ifdef rather than relying on Makefiles etc.
// simplifies the build procedure.
#ifdef ASAN_DYNAMIC_RUNTIME_THUNK
......@@ -27,7 +27,7 @@
// First, declare CRT sections we'll be using in this file
#pragma section(".CRT$XID", long, read) // NOLINT
#pragma section(".CRT$XIZ", long, read) // NOLINT
#pragma section(".CRT$XCAB", long, read) // NOLINT
#pragma section(".CRT$XTW", long, read) // NOLINT
#pragma section(".CRT$XTY", long, read) // NOLINT
......@@ -40,12 +40,16 @@
// attribute adds __imp_ prefix to the symbol name of a variable.
// Since in general we don't know if a given TU is going to be used
// with a MT or MD runtime and we don't want to use ugly __imp_ names on Windows
// just to work around this issue, let's clone the a variable that is
// constant after initialization anyways.
// just to work around this issue, let's clone the variable that is constant
// after initialization anyways.
extern "C" {
__declspec(dllimport) int __asan_should_detect_stack_use_after_return();
int __asan_option_detect_stack_use_after_return =
__asan_should_detect_stack_use_after_return();
__declspec(dllimport) void* __asan_get_shadow_memory_dynamic_address();
void* __asan_shadow_memory_dynamic_address =
__asan_get_shadow_memory_dynamic_address();
}
////////////////////////////////////////////////////////////////////////////////
......@@ -57,6 +61,7 @@ int __asan_option_detect_stack_use_after_return =
// using atexit() that calls a small subset of C terminators
// where LLVM global_dtors is placed. Fingers crossed, no other C terminators
// are there.
extern "C" int __cdecl atexit(void (__cdecl *f)(void));
extern "C" void __cdecl _initterm(void *a, void *b);
namespace {
......@@ -70,6 +75,7 @@ void UnregisterGlobals() {
int ScheduleUnregisterGlobals() {
return atexit(UnregisterGlobals);
}
} // namespace
// We need to call 'atexit(UnregisterGlobals);' as early as possible, but after
// atexit() is initialized (.CRT$XIC). As this is executed before C++
......@@ -78,8 +84,6 @@ int ScheduleUnregisterGlobals() {
__declspec(allocate(".CRT$XID"))
int (*__asan_schedule_unregister_globals)() = ScheduleUnregisterGlobals;
} // namespace
////////////////////////////////////////////////////////////////////////////////
// ASan SEH handling.
// We need to set the ASan-specific SEH handler at the end of CRT initialization
......@@ -90,7 +94,8 @@ static int SetSEHFilter() { return __asan_set_seh_filter(); }
// Unfortunately, putting a pointer to __asan_set_seh_filter into
// __asan_intercept_seh gets optimized out, so we have to use an extra function.
__declspec(allocate(".CRT$XIZ")) int (*__asan_seh_interceptor)() = SetSEHFilter;
__declspec(allocate(".CRT$XCAB")) int (*__asan_seh_interceptor)() =
SetSEHFilter;
}
#endif // ASAN_DYNAMIC_RUNTIME_THUNK
......@@ -3,4 +3,4 @@
# a separate file so that version updates don't involve re-running
# automake.
# CURRENT:REVISION:AGE
3:0:0
4:0:0
/* ===-- assembly.h - compiler-rt assembler support macros -----------------===
*
* The LLVM Compiler Infrastructure
*
* This file is dual licensed under the MIT and the University of Illinois Open
* Source Licenses. See LICENSE.TXT for details.
*
* ===----------------------------------------------------------------------===
*
* This file defines macros for use in compiler-rt assembler source.
* This file is not part of the interface of this library.
*
* ===----------------------------------------------------------------------===
*/
#ifndef COMPILERRT_ASSEMBLY_H
#define COMPILERRT_ASSEMBLY_H
#if defined(__POWERPC__) || defined(__powerpc__) || defined(__ppc__)
#define SEPARATOR @
#else
#define SEPARATOR ;
#endif
#if defined(__APPLE__)
#define HIDDEN(name) .private_extern name
#define LOCAL_LABEL(name) L_##name
// tell linker it can break up file at label boundaries
#define FILE_LEVEL_DIRECTIVE .subsections_via_symbols
#define SYMBOL_IS_FUNC(name)
#define CONST_SECTION .const
#define NO_EXEC_STACK_DIRECTIVE
#elif defined(__ELF__)
#define HIDDEN(name) .hidden name
#define LOCAL_LABEL(name) .L_##name
#define FILE_LEVEL_DIRECTIVE
#if defined(__arm__)
#define SYMBOL_IS_FUNC(name) .type name,%function
#else
#define SYMBOL_IS_FUNC(name) .type name,@function
#endif
#define CONST_SECTION .section .rodata
#if defined(__GNU__) || defined(__ANDROID__) || defined(__FreeBSD__)
#define NO_EXEC_STACK_DIRECTIVE .section .note.GNU-stack,"",%progbits
#else
#define NO_EXEC_STACK_DIRECTIVE
#endif
#else // !__APPLE__ && !__ELF__
#define HIDDEN(name)
#define LOCAL_LABEL(name) .L ## name
#define FILE_LEVEL_DIRECTIVE
#define SYMBOL_IS_FUNC(name) \
.def name SEPARATOR \
.scl 2 SEPARATOR \
.type 32 SEPARATOR \
.endef
#define CONST_SECTION .section .rdata,"rd"
#define NO_EXEC_STACK_DIRECTIVE
#endif
#if defined(__arm__)
#if defined(__ARM_ARCH_4T__) || __ARM_ARCH >= 5
#define ARM_HAS_BX
#endif
#if !defined(__ARM_FEATURE_CLZ) && \
(__ARM_ARCH >= 6 || (__ARM_ARCH == 5 && !defined(__ARM_ARCH_5__)))
#define __ARM_FEATURE_CLZ
#endif
#ifdef ARM_HAS_BX
#define JMP(r) bx r
#define JMPc(r, c) bx##c r
#else
#define JMP(r) mov pc, r
#define JMPc(r, c) mov##c pc, r
#endif
// pop {pc} can't switch Thumb mode on ARMv4T
#if __ARM_ARCH >= 5
#define POP_PC() pop {pc}
#else
#define POP_PC() \
pop {ip}; \
JMP(ip)
#endif
#if __ARM_ARCH_ISA_THUMB == 2
#define IT(cond) it cond
#define ITT(cond) itt cond
#else
#define IT(cond)
#define ITT(cond)
#endif
#if __ARM_ARCH_ISA_THUMB == 2
#define WIDE(op) op.w
#else
#define WIDE(op) op
#endif
#endif
#define GLUE2(a, b) a##b
#define GLUE(a, b) GLUE2(a, b)
#define SYMBOL_NAME(name) GLUE(__USER_LABEL_PREFIX__, name)
#ifdef VISIBILITY_HIDDEN
#define DECLARE_SYMBOL_VISIBILITY(name) \
HIDDEN(SYMBOL_NAME(name)) SEPARATOR
#else
#define DECLARE_SYMBOL_VISIBILITY(name)
#endif
#define DEFINE_COMPILERRT_FUNCTION(name) \
FILE_LEVEL_DIRECTIVE SEPARATOR \
.globl SYMBOL_NAME(name) SEPARATOR \
SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \
DECLARE_SYMBOL_VISIBILITY(name) \
SYMBOL_NAME(name):
#define DEFINE_COMPILERRT_THUMB_FUNCTION(name) \
FILE_LEVEL_DIRECTIVE SEPARATOR \
.globl SYMBOL_NAME(name) SEPARATOR \
SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \
DECLARE_SYMBOL_VISIBILITY(name) SEPARATOR \
.thumb_func SEPARATOR \
SYMBOL_NAME(name):
#define DEFINE_COMPILERRT_PRIVATE_FUNCTION(name) \
FILE_LEVEL_DIRECTIVE SEPARATOR \
.globl SYMBOL_NAME(name) SEPARATOR \
SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \
HIDDEN(SYMBOL_NAME(name)) SEPARATOR \
SYMBOL_NAME(name):
#define DEFINE_COMPILERRT_PRIVATE_FUNCTION_UNMANGLED(name) \
.globl name SEPARATOR \
SYMBOL_IS_FUNC(name) SEPARATOR \
HIDDEN(name) SEPARATOR \
name:
#define DEFINE_COMPILERRT_FUNCTION_ALIAS(name, target) \
.globl SYMBOL_NAME(name) SEPARATOR \
SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \
DECLARE_SYMBOL_VISIBILITY(SYMBOL_NAME(name)) SEPARATOR \
.set SYMBOL_NAME(name), SYMBOL_NAME(target) SEPARATOR
#if defined(__ARM_EABI__)
#define DEFINE_AEABI_FUNCTION_ALIAS(aeabi_name, name) \
DEFINE_COMPILERRT_FUNCTION_ALIAS(aeabi_name, name)
#else
#define DEFINE_AEABI_FUNCTION_ALIAS(aeabi_name, name)
#endif
#ifdef __ELF__
#define END_COMPILERRT_FUNCTION(name) \
.size SYMBOL_NAME(name), . - SYMBOL_NAME(name)
#else
#define END_COMPILERRT_FUNCTION(name)
#endif
#endif /* COMPILERRT_ASSEMBLY_H */
......@@ -604,6 +604,7 @@ ac_subst_vars='am__EXEEXT_FALSE
am__EXEEXT_TRUE
LTLIBOBJS
LIBOBJS
SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS
TSAN_TARGET_DEPENDENT_OBJECTS
LIBBACKTRACE_SUPPORTED_FALSE
LIBBACKTRACE_SUPPORTED_TRUE
......@@ -12027,7 +12028,7 @@ else
lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
lt_status=$lt_dlunknown
cat > conftest.$ac_ext <<_LT_EOF
#line 12030 "configure"
#line 12031 "configure"
#include "confdefs.h"
#if HAVE_DLFCN_H
......@@ -12133,7 +12134,7 @@ else
lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
lt_status=$lt_dlunknown
cat > conftest.$ac_ext <<_LT_EOF
#line 12136 "configure"
#line 12137 "configure"
#include "confdefs.h"
#if HAVE_DLFCN_H
......@@ -16498,6 +16499,7 @@ fi
cat >confcache <<\_ACEOF
# This file is a shell script that caches the results of configure
# tests run on this system so they can be shared between configure
......
......@@ -375,5 +375,6 @@ _EOF
fi
AC_SUBST([TSAN_TARGET_DEPENDENT_OBJECTS])
AC_SUBST([SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS])
AC_OUTPUT
......@@ -20,12 +20,14 @@
# Filter out unsupported systems.
TSAN_TARGET_DEPENDENT_OBJECTS=
SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS=
case "${target}" in
x86_64-*-linux* | i?86-*-linux*)
if test x$ac_cv_sizeof_void_p = x8; then
TSAN_SUPPORTED=yes
LSAN_SUPPORTED=yes
TSAN_TARGET_DEPENDENT_OBJECTS=tsan_rtl_amd64.lo
SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS=sanitizer_linux_x86_64.lo
fi
;;
powerpc*-*-linux*)
......
......@@ -57,6 +57,23 @@ extern "C" {
deallocation of "ptr". */
void __sanitizer_malloc_hook(const volatile void *ptr, size_t size);
void __sanitizer_free_hook(const volatile void *ptr);
/* Installs a pair of hooks for malloc/free.
Several (currently, 5) hook pairs may be installed, they are executed
in the order they were installed and after calling
__sanitizer_malloc_hook/__sanitizer_free_hook.
Unlike __sanitizer_malloc_hook/__sanitizer_free_hook these hooks can be
chained and do not rely on weak symbols working on the platform, but
require __sanitizer_install_malloc_and_free_hooks to be called at startup
and thus will not be called on malloc/free very early in the process.
Returns the number of hooks currently installed or 0 on failure.
Not thread-safe, should be called in the main thread before starting
other threads.
*/
int __sanitizer_install_malloc_and_free_hooks(
void (*malloc_hook)(const volatile void *, size_t),
void (*free_hook)(const volatile void *));
#ifdef __cplusplus
} // extern "C"
#endif
......
......@@ -39,6 +39,9 @@ extern "C" {
// Tell the tools to write their reports to "path.<pid>" instead of stderr.
void __sanitizer_set_report_path(const char *path);
// Tell the tools to write their reports to the provided file descriptor
// (casted to void *).
void __sanitizer_set_report_fd(void *fd);
// Notify the tools that the sandbox is going to be turned on. The reserved
// parameter will be used in the future to hold a structure with functions
......@@ -112,6 +115,16 @@ extern "C" {
// Print the stack trace leading to this call. Useful for debugging user code.
void __sanitizer_print_stack_trace();
// Symbolizes the supplied 'pc' using the format string 'fmt'.
// Outputs at most 'out_buf_size' bytes into 'out_buf'.
// The format syntax is described in
// lib/sanitizer_common/sanitizer_stacktrace_printer.h.
void __sanitizer_symbolize_pc(void *pc, const char *fmt, char *out_buf,
size_t out_buf_size);
// Same as __sanitizer_symbolize_pc, but for data section (i.e. globals).
void __sanitizer_symbolize_global(void *data_ptr, const char *fmt,
char *out_buf, size_t out_buf_size);
// Sets the callback to be called right before death on error.
// Passing 0 will unset the callback.
void __sanitizer_set_death_callback(void (*callback)(void));
......@@ -123,9 +136,50 @@ extern "C" {
// to know what is being passed to libc functions, e.g. memcmp.
// FIXME: implement more hooks.
void __sanitizer_weak_hook_memcmp(void *called_pc, const void *s1,
const void *s2, size_t n);
const void *s2, size_t n, int result);
void __sanitizer_weak_hook_strncmp(void *called_pc, const char *s1,
const char *s2, size_t n);
const char *s2, size_t n, int result);
void __sanitizer_weak_hook_strncasecmp(void *called_pc, const char *s1,
const char *s2, size_t n, int result);
void __sanitizer_weak_hook_strcmp(void *called_pc, const char *s1,
const char *s2, int result);
void __sanitizer_weak_hook_strcasecmp(void *called_pc, const char *s1,
const char *s2, int result);
void __sanitizer_weak_hook_strstr(void *called_pc, const char *s1,
const char *s2, char *result);
void __sanitizer_weak_hook_strcasestr(void *called_pc, const char *s1,
const char *s2, char *result);
void __sanitizer_weak_hook_memmem(void *called_pc,
const void *s1, size_t len1,
const void *s2, size_t len2, void *result);
// Prints stack traces for all live heap allocations ordered by total
// allocation size until `top_percent` of total live heap is shown.
// `top_percent` should be between 1 and 100.
// Experimental feature currently available only with asan on Linux/x86_64.
void __sanitizer_print_memory_profile(size_t top_percent);
// Fiber annotation interface.
// Before switching to a different stack, one must call
// __sanitizer_start_switch_fiber with a pointer to the bottom of the
// destination stack and its size. When code starts running on the new stack,
// it must call __sanitizer_finish_switch_fiber to finalize the switch.
// The start_switch function takes a void** to store the current fake stack if
// there is one (it is needed when detect_stack_use_after_return is enabled).
// When restoring a stack, this pointer must be given to the finish_switch
// function. In most cases, this void* can be stored on the stack just before
// switching. When leaving a fiber definitely, null must be passed as first
// argument to the start_switch function so that the fake stack is destroyed.
// If you do not want support for stack use-after-return detection, you can
// always pass null to these two functions.
// Note that the fake stack mechanism is disabled during fiber switch, so if a
// signal callback runs during the switch, it will not benefit from the stack
// use-after-return detection.
void __sanitizer_start_switch_fiber(void **fake_stack_save,
const void *bottom, size_t size);
void __sanitizer_finish_switch_fiber(void *fake_stack_save,
const void **bottom_old,
size_t *size_old);
#ifdef __cplusplus
} // extern "C"
#endif
......
......@@ -56,6 +56,7 @@ extern "C" {
// __sanitizer_get_number_of_counters bytes long and 8-aligned.
uintptr_t
__sanitizer_update_counter_bitset_and_clear_counters(uint8_t *bitset);
#ifdef __cplusplus
} // extern "C"
#endif
......
//===-- sanitizer/esan_interface.h ------------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of EfficiencySanitizer, a family of performance tuners.
//
// Public interface header.
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_ESAN_INTERFACE_H
#define SANITIZER_ESAN_INTERFACE_H
#include <sanitizer/common_interface_defs.h>
// We declare our interface routines as weak to allow the user to avoid
// ifdefs and instead use this pattern to allow building the same sources
// with and without our runtime library:
// if (__esan_report)
// __esan_report();
#ifdef _MSC_VER
/* selectany is as close to weak as we'll get. */
#define COMPILER_RT_WEAK __declspec(selectany)
#elif __GNUC__
#define COMPILER_RT_WEAK __attribute__((weak))
#else
#define COMPILER_RT_WEAK
#endif
#ifdef __cplusplus
extern "C" {
#endif
// This function can be called mid-run (or at the end of a run for
// a server process that doesn't shut down normally) to request that
// data for that point in the run be reported from the tool.
void COMPILER_RT_WEAK __esan_report();
// This function returns the number of samples that the esan tool has collected
// to this point. This is useful for testing.
unsigned int COMPILER_RT_WEAK __esan_get_sample_count();
#ifdef __cplusplus
} // extern "C"
#endif
#endif // SANITIZER_ESAN_INTERFACE_H
......@@ -1833,6 +1833,17 @@
__sanitizer_syscall_pre_impl_vfork()
#define __sanitizer_syscall_post_vfork(res) \
__sanitizer_syscall_post_impl_vfork(res)
#define __sanitizer_syscall_pre_sigaction(signum, act, oldact) \
__sanitizer_syscall_pre_impl_sigaction((long)signum, (long)act, (long)oldact)
#define __sanitizer_syscall_post_sigaction(res, signum, act, oldact) \
__sanitizer_syscall_post_impl_sigaction(res, (long)signum, (long)act, \
(long)oldact)
#define __sanitizer_syscall_pre_rt_sigaction(signum, act, oldact, sz) \
__sanitizer_syscall_pre_impl_rt_sigaction((long)signum, (long)act, \
(long)oldact, (long)sz)
#define __sanitizer_syscall_post_rt_sigaction(res, signum, act, oldact, sz) \
__sanitizer_syscall_post_impl_rt_sigaction(res, (long)signum, (long)act, \
(long)oldact, (long)sz)
// And now a few syscalls we don't handle yet.
#define __sanitizer_syscall_pre_afs_syscall(...)
......@@ -1887,7 +1898,6 @@
#define __sanitizer_syscall_pre_query_module(...)
#define __sanitizer_syscall_pre_readahead(...)
#define __sanitizer_syscall_pre_readdir(...)
#define __sanitizer_syscall_pre_rt_sigaction(...)
#define __sanitizer_syscall_pre_rt_sigreturn(...)
#define __sanitizer_syscall_pre_rt_sigsuspend(...)
#define __sanitizer_syscall_pre_security(...)
......@@ -1901,7 +1911,6 @@
#define __sanitizer_syscall_pre_setreuid32(...)
#define __sanitizer_syscall_pre_set_thread_area(...)
#define __sanitizer_syscall_pre_setuid32(...)
#define __sanitizer_syscall_pre_sigaction(...)
#define __sanitizer_syscall_pre_sigaltstack(...)
#define __sanitizer_syscall_pre_sigreturn(...)
#define __sanitizer_syscall_pre_sigsuspend(...)
......@@ -1969,7 +1978,6 @@
#define __sanitizer_syscall_post_query_module(res, ...)
#define __sanitizer_syscall_post_readahead(res, ...)
#define __sanitizer_syscall_post_readdir(res, ...)
#define __sanitizer_syscall_post_rt_sigaction(res, ...)
#define __sanitizer_syscall_post_rt_sigreturn(res, ...)
#define __sanitizer_syscall_post_rt_sigsuspend(res, ...)
#define __sanitizer_syscall_post_security(res, ...)
......@@ -1983,7 +1991,6 @@
#define __sanitizer_syscall_post_setreuid32(res, ...)
#define __sanitizer_syscall_post_set_thread_area(res, ...)
#define __sanitizer_syscall_post_setuid32(res, ...)
#define __sanitizer_syscall_post_sigaction(res, ...)
#define __sanitizer_syscall_post_sigaltstack(res, ...)
#define __sanitizer_syscall_post_sigreturn(res, ...)
#define __sanitizer_syscall_post_sigsuspend(res, ...)
......@@ -3060,7 +3067,13 @@ void __sanitizer_syscall_pre_impl_fork();
void __sanitizer_syscall_post_impl_fork(long res);
void __sanitizer_syscall_pre_impl_vfork();
void __sanitizer_syscall_post_impl_vfork(long res);
void __sanitizer_syscall_pre_impl_sigaction(long signum, long act, long oldact);
void __sanitizer_syscall_post_impl_sigaction(long res, long signum, long act,
long oldact);
void __sanitizer_syscall_pre_impl_rt_sigaction(long signum, long act,
long oldact, long sz);
void __sanitizer_syscall_post_impl_rt_sigaction(long res, long signum, long act,
long oldact, long sz);
#ifdef __cplusplus
} // extern "C"
#endif
......
......@@ -169,6 +169,7 @@ PACKAGE_VERSION = @PACKAGE_VERSION@
PATH_SEPARATOR = @PATH_SEPARATOR@
RANLIB = @RANLIB@
RPC_DEFS = @RPC_DEFS@
SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS = @SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS@
SED = @SED@
SET_MAKE = @SET_MAKE@
SHELL = @SHELL@
......
......@@ -90,8 +90,8 @@ typedef __sanitizer::OFF64_T OFF64_T;
// Just a pair of pointers.
struct interpose_substitution {
const uptr replacement;
const uptr original;
const __sanitizer::uptr replacement;
const __sanitizer::uptr original;
};
// For a function foo() create a global pair of pointers { wrap_foo, foo } in
......@@ -156,10 +156,12 @@ const interpose_substitution substitution_##func_name[] \
namespace __interception { \
extern FUNC_TYPE(func) PTR_TO_REAL(func); \
}
# define ASSIGN_REAL(dst, src) REAL(dst) = REAL(src)
#else // __APPLE__
# define REAL(x) x
# define DECLARE_REAL(ret_type, func, ...) \
extern "C" ret_type func(__VA_ARGS__);
# define ASSIGN_REAL(x, y)
#endif // __APPLE__
#define DECLARE_REAL_AND_INTERCEPTOR(ret_type, func, ...) \
......
......@@ -32,6 +32,31 @@ bool OverrideFunction(const char *name, uptr new_func, uptr *orig_old_func = 0);
// Windows-only replacement for GetProcAddress. Useful for some sanitizers.
uptr InternalGetProcAddress(void *module, const char *func_name);
// Overrides a function only when it is called from a specific DLL. For example,
// this is used to override calls to HeapAlloc/HeapFree from ucrtbase without
// affecting other third party libraries.
bool OverrideImportedFunction(const char *module_to_patch,
const char *imported_module,
const char *function_name, uptr new_function,
uptr *orig_old_func);
#if !SANITIZER_WINDOWS64
// Exposed for unittests
bool OverrideFunctionWithDetour(
uptr old_func, uptr new_func, uptr *orig_old_func);
#endif
// Exposed for unittests
bool OverrideFunctionWithRedirectJump(
uptr old_func, uptr new_func, uptr *orig_old_func);
bool OverrideFunctionWithHotPatch(
uptr old_func, uptr new_func, uptr *orig_old_func);
bool OverrideFunctionWithTrampoline(
uptr old_func, uptr new_func, uptr *orig_old_func);
// Exposed for unittests
void TestOnlyReleaseTrampolineRegions();
} // namespace __interception
#if defined(INTERCEPTION_DYNAMIC_CRT)
......@@ -48,5 +73,10 @@ uptr InternalGetProcAddress(void *module, const char *func_name);
#define INTERCEPT_FUNCTION_VER_WIN(func, symver) INTERCEPT_FUNCTION_WIN(func)
#define INTERCEPT_FUNCTION_DLLIMPORT(user_dll, provider_dll, func) \
::__interception::OverrideImportedFunction( \
user_dll, provider_dll, #func, (::__interception::uptr)WRAP(func), \
(::__interception::uptr *)&REAL(func))
#endif // INTERCEPTION_WIN_H
#endif // _WIN32
......@@ -211,6 +211,7 @@ PACKAGE_VERSION = @PACKAGE_VERSION@
PATH_SEPARATOR = @PATH_SEPARATOR@
RANLIB = @RANLIB@
RPC_DEFS = @RPC_DEFS@
SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS = @SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS@
SED = @SED@
SET_MAKE = @SET_MAKE@
SHELL = @SHELL@
......
......@@ -210,6 +210,7 @@ PACKAGE_VERSION = @PACKAGE_VERSION@
PATH_SEPARATOR = @PATH_SEPARATOR@
RANLIB = @RANLIB@
RPC_DEFS = @RPC_DEFS@
SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS = @SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS@
SED = @SED@
SET_MAKE = @SET_MAKE@
SHELL = @SHELL@
......
......@@ -41,6 +41,7 @@ static void InitializeFlags() {
cf.CopyFrom(*common_flags());
cf.external_symbolizer_path = GetEnv("LSAN_SYMBOLIZER_PATH");
cf.malloc_context_size = 30;
cf.intercept_tls_get_addr = true;
cf.detect_leaks = true;
cf.exitcode = 23;
OverrideCommonFlags(cf);
......@@ -69,6 +70,7 @@ extern "C" void __lsan_init() {
lsan_init_is_running = true;
SanitizerToolName = "LeakSanitizer";
CacheBinaryName();
AvoidCVE_2016_2143();
InitializeFlags();
InitCommonLsan();
InitializeAllocator();
......
......@@ -22,8 +22,11 @@
stack_top = t->stack_end(); \
stack_bottom = t->stack_begin(); \
} \
stack.Unwind(max_size, StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), \
/* context */ 0, stack_top, stack_bottom, fast); \
if (!SANITIZER_MIPS || \
IsValidFrame(GET_CURRENT_FRAME(), stack_top, stack_bottom)) { \
stack.Unwind(max_size, StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), \
/* context */ 0, stack_top, stack_bottom, fast); \
} \
}
#define GET_STACK_TRACE_FATAL \
......
......@@ -41,10 +41,17 @@ typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE,
PrimaryAllocator;
#else
static const uptr kMaxAllowedMallocSize = 8UL << 30;
static const uptr kAllocatorSpace = 0x600000000000ULL;
static const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize,
sizeof(ChunkMetadata), DefaultSizeClassMap> PrimaryAllocator;
struct AP64 { // Allocator64 parameters. Deliberately using a short name.
static const uptr kSpaceBeg = 0x600000000000ULL;
static const uptr kSpaceSize = 0x40000000000ULL; // 4T.
static const uptr kMetadataSize = sizeof(ChunkMetadata);
typedef DefaultSizeClassMap SizeClassMap;
typedef NoOpMapUnmapCallback MapUnmapCallback;
static const uptr kFlags = 0;
};
typedef SizeClassAllocator64<AP64> PrimaryAllocator;
#endif
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
typedef LargeMmapAllocator<> SecondaryAllocator;
......@@ -97,11 +104,13 @@ void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
memset(p, 0, size);
RegisterAllocation(stack, p, size);
if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(p, size);
RunMallocHooks(p, size);
return p;
}
void Deallocate(void *p) {
if (&__sanitizer_free_hook) __sanitizer_free_hook(p);
RunFreeHooks(p);
RegisterDeallocation(p);
allocator.Deallocate(&cache, p);
}
......
......@@ -21,6 +21,7 @@
#include "sanitizer_common/sanitizer_stacktrace.h"
#include "sanitizer_common/sanitizer_suppressions.h"
#include "sanitizer_common/sanitizer_report_decorator.h"
#include "sanitizer_common/sanitizer_tls_get_addr.h"
#if CAN_SANITIZE_LEAKS
namespace __lsan {
......@@ -29,8 +30,17 @@ namespace __lsan {
// also to protect the global list of root regions.
BlockingMutex global_mutex(LINKER_INITIALIZED);
__attribute__((tls_model("initial-exec")))
THREADLOCAL int disable_counter;
bool DisabledInThisThread() { return disable_counter > 0; }
void DisableInThisThread() { disable_counter++; }
void EnableInThisThread() {
if (!disable_counter && common_flags()->detect_leaks) {
Report("Unmatched call to __lsan_enable().\n");
Die();
}
disable_counter--;
}
Flags lsan_flags;
......@@ -183,9 +193,10 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
uptr os_id = static_cast<uptr>(suspended_threads.GetThreadID(i));
LOG_THREADS("Processing thread %d.\n", os_id);
uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
DTLS *dtls;
bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end,
&tls_begin, &tls_end,
&cache_begin, &cache_end);
&cache_begin, &cache_end, &dtls);
if (!thread_found) {
// If a thread can't be found in the thread registry, it's probably in the
// process of destruction. Log this event and move on.
......@@ -209,9 +220,18 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
LOG_THREADS("Stack at %p-%p (SP = %p).\n", stack_begin, stack_end, sp);
if (sp < stack_begin || sp >= stack_end) {
// SP is outside the recorded stack range (e.g. the thread is running a
// signal handler on alternate stack). Again, consider the entire stack
// range to be reachable.
// signal handler on alternate stack, or swapcontext was used).
// Again, consider the entire stack range to be reachable.
LOG_THREADS("WARNING: stack pointer not in stack range.\n");
uptr page_size = GetPageSizeCached();
int skipped = 0;
while (stack_begin < stack_end &&
!IsAccessibleMemoryRange(stack_begin, 1)) {
skipped++;
stack_begin += page_size;
}
LOG_THREADS("Skipped %d guard page(s) to obtain stack %p-%p.\n",
skipped, stack_begin, stack_end);
} else {
// Shrink the stack range to ignore out-of-scope values.
stack_begin = sp;
......@@ -236,6 +256,17 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
if (tls_end > cache_end)
ScanRangeForPointers(cache_end, tls_end, frontier, "TLS", kReachable);
}
if (dtls) {
for (uptr j = 0; j < dtls->dtv_size; ++j) {
uptr dtls_beg = dtls->dtv[j].beg;
uptr dtls_end = dtls_beg + dtls->dtv[j].size;
if (dtls_beg < dtls_end) {
LOG_THREADS("DTLS %zu at %p-%p.\n", j, dtls_beg, dtls_end);
ScanRangeForPointers(dtls_beg, dtls_end, frontier, "DTLS",
kReachable);
}
}
}
}
}
}
......@@ -414,6 +445,11 @@ static bool CheckForLeaks() {
if (!param.success) {
Report("LeakSanitizer has encountered a fatal error.\n");
Report(
"HINT: For debugging, try setting environment variable "
"LSAN_OPTIONS=verbosity=1:log_threads=1\n");
Report(
"HINT: LeakSanitizer does not work under ptrace (strace, gdb, etc)\n");
Die();
}
param.leak_report.ApplySuppressions();
......@@ -615,6 +651,13 @@ uptr LeakReport::UnsuppressedLeakCount() {
}
} // namespace __lsan
#else // CAN_SANITIZE_LEAKS
namespace __lsan {
void InitCommonLsan() { }
void DoLeakCheck() { }
void DisableInThisThread() { }
void EnableInThisThread() { }
}
#endif // CAN_SANITIZE_LEAKS
using namespace __lsan; // NOLINT
......@@ -680,18 +723,14 @@ void __lsan_unregister_root_region(const void *begin, uptr size) {
SANITIZER_INTERFACE_ATTRIBUTE
void __lsan_disable() {
#if CAN_SANITIZE_LEAKS
__lsan::disable_counter++;
__lsan::DisableInThisThread();
#endif
}
SANITIZER_INTERFACE_ATTRIBUTE
void __lsan_enable() {
#if CAN_SANITIZE_LEAKS
if (!__lsan::disable_counter && common_flags()->detect_leaks) {
Report("Unmatched call to __lsan_enable().\n");
Die();
}
__lsan::disable_counter--;
__lsan::EnableInThisThread();
#endif
}
......
......@@ -29,6 +29,7 @@
namespace __sanitizer {
class FlagParser;
struct DTLS;
}
namespace __lsan {
......@@ -116,6 +117,16 @@ void InitCommonLsan();
void DoLeakCheck();
bool DisabledInThisThread();
// Used to implement __lsan::ScopedDisabler.
void DisableInThisThread();
void EnableInThisThread();
// Can be used to ignore memory allocated by an intercepted
// function.
struct ScopedInterceptorDisabler {
ScopedInterceptorDisabler() { DisableInThisThread(); }
~ScopedInterceptorDisabler() { EnableInThisThread(); }
};
// Special case for "new T[0]" where T is a type with DTOR.
// new T[0] will allocate one word for the array size (0) and store a pointer
// to the end of allocated chunk.
......@@ -139,8 +150,8 @@ bool WordIsPoisoned(uptr addr);
void LockThreadRegistry();
void UnlockThreadRegistry();
bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end,
uptr *tls_begin, uptr *tls_end,
uptr *cache_begin, uptr *cache_end);
uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
uptr *cache_end, DTLS **dtls);
void ForEachExtraStackRange(uptr os_id, RangeIteratorCallback callback,
void *arg);
// If called from the main thread, updates the main thread's TID in the thread
......
......@@ -24,9 +24,8 @@
namespace __lsan {
static const char kLinkerName[] = "ld";
// We request 2 modules matching "ld", so we can print a warning if there's more
// than one match. But only the first one is actually used.
static char linker_placeholder[2 * sizeof(LoadedModule)] ALIGNED(64);
static char linker_placeholder[sizeof(LoadedModule)] ALIGNED(64);
static LoadedModule *linker = nullptr;
static bool IsLinker(const char* full_name) {
......@@ -34,20 +33,24 @@ static bool IsLinker(const char* full_name) {
}
void InitializePlatformSpecificModules() {
internal_memset(linker_placeholder, 0, sizeof(linker_placeholder));
uptr num_matches = GetListOfModules(
reinterpret_cast<LoadedModule *>(linker_placeholder), 2, IsLinker);
if (num_matches == 1) {
linker = reinterpret_cast<LoadedModule *>(linker_placeholder);
return;
ListOfModules modules;
modules.init();
for (LoadedModule &module : modules) {
if (!IsLinker(module.full_name())) continue;
if (linker == nullptr) {
linker = reinterpret_cast<LoadedModule *>(linker_placeholder);
*linker = module;
module = LoadedModule();
} else {
VReport(1, "LeakSanitizer: Multiple modules match \"%s\". "
"TLS will not be handled correctly.\n", kLinkerName);
linker->clear();
linker = nullptr;
return;
}
}
if (num_matches == 0)
VReport(1, "LeakSanitizer: Dynamic linker not found. "
"TLS will not be handled correctly.\n");
else if (num_matches > 1)
VReport(1, "LeakSanitizer: Multiple modules match \"%s\". "
"TLS will not be handled correctly.\n", kLinkerName);
linker = nullptr;
VReport(1, "LeakSanitizer: Dynamic linker not found. "
"TLS will not be handled correctly.\n");
}
static int ProcessGlobalRegionsCallback(struct dl_phdr_info *info, size_t size,
......@@ -66,7 +69,7 @@ static int ProcessGlobalRegionsCallback(struct dl_phdr_info *info, size_t size,
GetAllocatorGlobalRange(&allocator_begin, &allocator_end);
if (begin <= allocator_begin && allocator_begin < end) {
CHECK_LE(allocator_begin, allocator_end);
CHECK_LT(allocator_end, end);
CHECK_LE(allocator_end, end);
if (begin < allocator_begin)
ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL",
kReachable);
......@@ -98,6 +101,7 @@ static uptr GetCallerPC(u32 stack_id, StackDepotReverseMap *map) {
struct ProcessPlatformAllocParam {
Frontier *frontier;
StackDepotReverseMap *stack_depot_reverse_map;
bool skip_linker_allocations;
};
// ForEachChunk callback. Identifies unreachable chunks which must be treated as
......@@ -115,7 +119,8 @@ static void ProcessPlatformSpecificAllocationsCb(uptr chunk, void *arg) {
caller_pc = GetCallerPC(stack_id, param->stack_depot_reverse_map);
// If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
// it as reachable, as we can't properly report its allocation stack anyway.
if (caller_pc == 0 || linker->containsAddress(caller_pc)) {
if (caller_pc == 0 || (param->skip_linker_allocations &&
linker->containsAddress(caller_pc))) {
m.set_tag(kReachable);
param->frontier->push_back(chunk);
}
......@@ -140,10 +145,12 @@ static void ProcessPlatformSpecificAllocationsCb(uptr chunk, void *arg) {
// guaranteed to include all dynamic TLS blocks (and possibly other allocations
// which we don't care about).
void ProcessPlatformSpecificAllocations(Frontier *frontier) {
if (!flags()->use_tls) return;
if (!linker) return;
StackDepotReverseMap stack_depot_reverse_map;
ProcessPlatformAllocParam arg = {frontier, &stack_depot_reverse_map};
ProcessPlatformAllocParam arg;
arg.frontier = frontier;
arg.stack_depot_reverse_map = &stack_depot_reverse_map;
arg.skip_linker_allocations =
flags()->use_tls && flags()->use_ld_allocations && linker != nullptr;
ForEachChunk(ProcessPlatformSpecificAllocationsCb, &arg);
}
......
......@@ -32,6 +32,10 @@ LSAN_FLAG(bool, use_tls, true,
"Root set: include TLS and thread-specific storage")
LSAN_FLAG(bool, use_root_regions, true,
"Root set: include regions added via __lsan_register_root_region().")
LSAN_FLAG(bool, use_ld_allocations, true,
"Root set: mark as reachable all allocations made from dynamic "
"linker. This was the old way to handle dynamic TLS, and will "
"be removed soon. Do not use this flag.")
LSAN_FLAG(bool, use_unaligned, false, "Consider unaligned pointers valid.")
LSAN_FLAG(bool, use_poisoned, false,
......
......@@ -18,8 +18,10 @@
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_linux.h"
#include "sanitizer_common/sanitizer_platform_limits_posix.h"
#include "sanitizer_common/sanitizer_tls_get_addr.h"
#include "lsan.h"
#include "lsan_allocator.h"
#include "lsan_common.h"
#include "lsan_thread.h"
using namespace __lsan;
......@@ -102,6 +104,14 @@ INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) {
return 0;
}
INTERCEPTOR(void *, __libc_memalign, uptr alignment, uptr size) {
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
void *res = Allocate(stack, size, alignment, kAlwaysClearMemory);
DTLS_on_libc_memalign(res, size);
return res;
}
INTERCEPTOR(void*, valloc, uptr size) {
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
......@@ -172,11 +182,6 @@ void operator delete[](void *ptr, std::nothrow_t const &) {
OPERATOR_DELETE_BODY;
}
// We need this to intercept the __libc_memalign calls that are used to
// allocate dynamic TLS space in ld-linux.so.
INTERCEPTOR(void *, __libc_memalign, uptr align, uptr s)
ALIAS(WRAPPER_NAME(memalign));
///// Thread initialization and finalization. /////
static unsigned g_thread_finalize_key;
......@@ -235,7 +240,15 @@ INTERCEPTOR(int, pthread_create, void *th, void *attr,
p.callback = callback;
p.param = param;
atomic_store(&p.tid, 0, memory_order_relaxed);
int res = REAL(pthread_create)(th, attr, __lsan_thread_start_func, &p);
int res;
{
// Ignore all allocations made by pthread_create: thread stack/TLS may be
// stored by pthread for future reuse even after thread destruction, and
// the linked list it's stored in doesn't even hold valid pointers to the
// objects, the latter are calculated by obscure pointer arithmetic.
ScopedInterceptorDisabler disabler;
res = REAL(pthread_create)(th, attr, __lsan_thread_start_func, &p);
}
if (res == 0) {
int tid = ThreadCreate(GetCurrentThread(), *(uptr *)th, detached);
CHECK_NE(tid, 0);
......
......@@ -15,6 +15,7 @@
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_thread_registry.h"
#include "sanitizer_common/sanitizer_tls_get_addr.h"
#include "lsan_allocator.h"
namespace __lsan {
......@@ -33,7 +34,7 @@ static const uptr kMaxThreads = 1 << 13;
static const uptr kThreadQuarantineSize = 64;
void InitializeThreadRegistry() {
static char thread_registry_placeholder[sizeof(ThreadRegistry)] ALIGNED(64);
static ALIGNED(64) char thread_registry_placeholder[sizeof(ThreadRegistry)];
thread_registry = new(thread_registry_placeholder)
ThreadRegistry(CreateThreadContext, kMaxThreads, kThreadQuarantineSize);
}
......@@ -47,18 +48,20 @@ void SetCurrentThread(u32 tid) {
}
ThreadContext::ThreadContext(int tid)
: ThreadContextBase(tid),
stack_begin_(0),
stack_end_(0),
cache_begin_(0),
cache_end_(0),
tls_begin_(0),
tls_end_(0) {}
: ThreadContextBase(tid),
stack_begin_(0),
stack_end_(0),
cache_begin_(0),
cache_end_(0),
tls_begin_(0),
tls_end_(0),
dtls_(nullptr) {}
struct OnStartedArgs {
uptr stack_begin, stack_end,
cache_begin, cache_end,
tls_begin, tls_end;
DTLS *dtls;
};
void ThreadContext::OnStarted(void *arg) {
......@@ -69,10 +72,12 @@ void ThreadContext::OnStarted(void *arg) {
tls_end_ = args->tls_end;
cache_begin_ = args->cache_begin;
cache_end_ = args->cache_end;
dtls_ = args->dtls;
}
void ThreadContext::OnFinished() {
AllocatorThreadFinish();
DTLS_Destroy();
}
u32 ThreadCreate(u32 parent_tid, uptr user_id, bool detached) {
......@@ -89,6 +94,7 @@ void ThreadStart(u32 tid, uptr os_id) {
args.stack_end = args.stack_begin + stack_size;
args.tls_end = args.tls_begin + tls_size;
GetAllocatorCacheRange(&args.cache_begin, &args.cache_end);
args.dtls = DTLS_Get();
thread_registry->StartThread(tid, os_id, &args);
}
......@@ -129,8 +135,8 @@ void EnsureMainThreadIDIsCorrect() {
///// Interface to the common LSan module. /////
bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end,
uptr *tls_begin, uptr *tls_end,
uptr *cache_begin, uptr *cache_end) {
uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
uptr *cache_end, DTLS **dtls) {
ThreadContext *context = static_cast<ThreadContext *>(
thread_registry->FindThreadContextByOsIDLocked(os_id));
if (!context) return false;
......@@ -140,6 +146,7 @@ bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end,
*tls_end = context->tls_end();
*cache_begin = context->cache_begin();
*cache_end = context->cache_end();
*dtls = context->dtls();
return true;
}
......
......@@ -15,6 +15,10 @@
#include "sanitizer_common/sanitizer_thread_registry.h"
namespace __sanitizer {
struct DTLS;
}
namespace __lsan {
class ThreadContext : public ThreadContextBase {
......@@ -28,10 +32,13 @@ class ThreadContext : public ThreadContextBase {
uptr tls_end() { return tls_end_; }
uptr cache_begin() { return cache_begin_; }
uptr cache_end() { return cache_end_; }
DTLS *dtls() { return dtls_; }
private:
uptr stack_begin_, stack_end_,
cache_begin_, cache_end_,
tls_begin_, tls_end_;
DTLS *dtls_;
};
void InitializeThreadRegistry();
......
......@@ -72,6 +72,10 @@ merge lib/sanitizer_common sanitizer_common
merge lib/interception interception
merge lib/ubsan ubsan
# Need to merge lib/builtins/assembly.h file:
mkdir -p builtins
cp -v upstream/lib/builtins/assembly.h builtins/assembly.h
rm -rf upstream
# Update the MERGE file.
......
......@@ -32,6 +32,7 @@ sanitizer_common_files = \
sanitizer_libignore.cc \
sanitizer_linux.cc \
sanitizer_linux_libcdep.cc \
sanitizer_linux_s390.cc \
sanitizer_mac.cc \
sanitizer_persistent_allocator.cc \
sanitizer_platform_limits_linux.cc \
......@@ -55,6 +56,7 @@ sanitizer_common_files = \
sanitizer_symbolizer_libcdep.cc \
sanitizer_symbolizer_posix_libcdep.cc \
sanitizer_symbolizer_win.cc \
sanitizer_termination.cc \
sanitizer_thread_registry.cc \
sanitizer_tls_get_addr.cc \
sanitizer_unwind_linux_libcdep.cc \
......@@ -62,6 +64,9 @@ sanitizer_common_files = \
libsanitizer_common_la_SOURCES = $(sanitizer_common_files)
EXTRA_libsanitizer_common_la_SOURCES = sanitizer_linux_mips64.S sanitizer_linux_x86_64.S
libsanitizer_common_la_LIBADD = $(SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS)
libsanitizer_common_la_DEPENDENCIES = $(SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS)
# Work around what appears to be a GNU make bug handling MAKEFLAGS
# values defined in terms of make variables, as is the case for CC and
......
......@@ -79,7 +79,7 @@ CONFIG_HEADER = $(top_builddir)/config.h
CONFIG_CLEAN_FILES =
CONFIG_CLEAN_VPATH_FILES =
LTLIBRARIES = $(noinst_LTLIBRARIES)
libsanitizer_common_la_LIBADD =
am__DEPENDENCIES_1 =
am__objects_1 = sanitizer_allocator.lo sanitizer_common.lo \
sanitizer_common_libcdep.lo sanitizer_coverage_libcdep.lo \
sanitizer_coverage_mapping_libcdep.lo \
......@@ -87,8 +87,8 @@ am__objects_1 = sanitizer_allocator.lo sanitizer_common.lo \
sanitizer_deadlock_detector2.lo sanitizer_flags.lo \
sanitizer_flag_parser.lo sanitizer_libc.lo \
sanitizer_libignore.lo sanitizer_linux.lo \
sanitizer_linux_libcdep.lo sanitizer_mac.lo \
sanitizer_persistent_allocator.lo \
sanitizer_linux_libcdep.lo sanitizer_linux_s390.lo \
sanitizer_mac.lo sanitizer_persistent_allocator.lo \
sanitizer_platform_limits_linux.lo \
sanitizer_platform_limits_posix.lo sanitizer_posix.lo \
sanitizer_posix_libcdep.lo sanitizer_printf.lo \
......@@ -102,15 +102,20 @@ am__objects_1 = sanitizer_allocator.lo sanitizer_common.lo \
sanitizer_symbolizer_libbacktrace.lo \
sanitizer_symbolizer_libcdep.lo \
sanitizer_symbolizer_posix_libcdep.lo \
sanitizer_symbolizer_win.lo sanitizer_thread_registry.lo \
sanitizer_tls_get_addr.lo sanitizer_unwind_linux_libcdep.lo \
sanitizer_win.lo
sanitizer_symbolizer_win.lo sanitizer_termination.lo \
sanitizer_thread_registry.lo sanitizer_tls_get_addr.lo \
sanitizer_unwind_linux_libcdep.lo sanitizer_win.lo
am_libsanitizer_common_la_OBJECTS = $(am__objects_1)
libsanitizer_common_la_OBJECTS = $(am_libsanitizer_common_la_OBJECTS)
DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir)
depcomp = $(SHELL) $(top_srcdir)/../depcomp
am__depfiles_maybe = depfiles
am__mv = mv -f
CPPASCOMPILE = $(CCAS) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
$(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CCASFLAGS) $(CCASFLAGS)
LTCPPASCOMPILE = $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
--mode=compile $(CCAS) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
$(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CCASFLAGS) $(CCASFLAGS)
CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
$(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS)
LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
......@@ -120,7 +125,17 @@ CXXLD = $(CXX)
CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
--mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \
$(LDFLAGS) -o $@
SOURCES = $(libsanitizer_common_la_SOURCES)
COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
$(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
--mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
$(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
CCLD = $(CC)
LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
--mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \
$(LDFLAGS) -o $@
SOURCES = $(libsanitizer_common_la_SOURCES) \
$(EXTRA_libsanitizer_common_la_SOURCES)
am__can_run_installinfo = \
case $$AM_UPDATE_INFO_DIR in \
n|no|NO) false;; \
......@@ -198,6 +213,7 @@ PACKAGE_VERSION = @PACKAGE_VERSION@
PATH_SEPARATOR = @PATH_SEPARATOR@
RANLIB = @RANLIB@
RPC_DEFS = @RPC_DEFS@
SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS = @SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS@
SED = @SED@
SET_MAKE = @SET_MAKE@
SHELL = @SHELL@
......@@ -296,6 +312,7 @@ sanitizer_common_files = \
sanitizer_libignore.cc \
sanitizer_linux.cc \
sanitizer_linux_libcdep.cc \
sanitizer_linux_s390.cc \
sanitizer_mac.cc \
sanitizer_persistent_allocator.cc \
sanitizer_platform_limits_linux.cc \
......@@ -319,12 +336,16 @@ sanitizer_common_files = \
sanitizer_symbolizer_libcdep.cc \
sanitizer_symbolizer_posix_libcdep.cc \
sanitizer_symbolizer_win.cc \
sanitizer_termination.cc \
sanitizer_thread_registry.cc \
sanitizer_tls_get_addr.cc \
sanitizer_unwind_linux_libcdep.cc \
sanitizer_win.cc
libsanitizer_common_la_SOURCES = $(sanitizer_common_files)
EXTRA_libsanitizer_common_la_SOURCES = sanitizer_linux_mips64.S sanitizer_linux_x86_64.S
libsanitizer_common_la_LIBADD = $(SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS)
libsanitizer_common_la_DEPENDENCIES = $(SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS)
# Work around what appears to be a GNU make bug handling MAKEFLAGS
# values defined in terms of make variables, as is the case for CC and
......@@ -368,7 +389,7 @@ MAKEOVERRIDES =
all: all-am
.SUFFIXES:
.SUFFIXES: .cc .lo .o .obj
.SUFFIXES: .S .cc .lo .o .obj
$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
@for dep in $?; do \
case '$(am__configure_deps)' in \
......@@ -430,6 +451,9 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_libignore.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_linux.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_linux_libcdep.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_linux_mips64.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_linux_s390.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_linux_x86_64.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_mac.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_persistent_allocator.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_platform_limits_linux.Plo@am__quote@
......@@ -453,11 +477,33 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_mac.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_posix_libcdep.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_win.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_termination.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_thread_registry.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_tls_get_addr.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_unwind_linux_libcdep.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_win.Plo@am__quote@
.S.o:
@am__fastdepCCAS_TRUE@ $(CPPASCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
@am__fastdepCCAS_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
@AMDEP_TRUE@@am__fastdepCCAS_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCCAS_FALSE@ DEPDIR=$(DEPDIR) $(CCASDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCCAS_FALSE@ $(CPPASCOMPILE) -c -o $@ $<
.S.obj:
@am__fastdepCCAS_TRUE@ $(CPPASCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
@am__fastdepCCAS_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
@AMDEP_TRUE@@am__fastdepCCAS_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCCAS_FALSE@ DEPDIR=$(DEPDIR) $(CCASDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCCAS_FALSE@ $(CPPASCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'`
.S.lo:
@am__fastdepCCAS_TRUE@ $(LTCPPASCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
@am__fastdepCCAS_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
@AMDEP_TRUE@@am__fastdepCCAS_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCCAS_FALSE@ DEPDIR=$(DEPDIR) $(CCASDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCCAS_FALSE@ $(LTCPPASCOMPILE) -c -o $@ $<
.cc.o:
@am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
@am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
......
......@@ -11,39 +11,72 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_allocator.h"
#include "sanitizer_allocator_internal.h"
#include "sanitizer_atomic.h"
#include "sanitizer_common.h"
namespace __sanitizer {
// ThreadSanitizer for Go uses libc malloc/free.
#if defined(SANITIZER_GO) || defined(SANITIZER_USE_MALLOC)
#if SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
# if SANITIZER_LINUX && !SANITIZER_ANDROID
extern "C" void *__libc_malloc(uptr size);
# if !SANITIZER_GO
extern "C" void *__libc_memalign(uptr alignment, uptr size);
# endif
extern "C" void *__libc_realloc(void *ptr, uptr size);
extern "C" void __libc_free(void *ptr);
# define LIBC_MALLOC __libc_malloc
# define LIBC_FREE __libc_free
# else
# include <stdlib.h>
# define LIBC_MALLOC malloc
# define LIBC_FREE free
# define __libc_malloc malloc
# if !SANITIZER_GO
static void *__libc_memalign(uptr alignment, uptr size) {
void *p;
uptr error = posix_memalign(&p, alignment, size);
if (error) return nullptr;
return p;
}
# endif
# define __libc_realloc realloc
# define __libc_free free
# endif
static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache) {
static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache,
uptr alignment) {
(void)cache;
#if !SANITIZER_GO
if (alignment == 0)
return __libc_malloc(size);
else
return __libc_memalign(alignment, size);
#else
// Windows does not provide __libc_memalign/posix_memalign. It provides
// __aligned_malloc, but the allocated blocks can't be passed to free,
// they need to be passed to __aligned_free. InternalAlloc interface does
// not account for such requirement. Alignemnt does not seem to be used
// anywhere in runtime, so just call __libc_malloc for now.
DCHECK_EQ(alignment, 0);
return __libc_malloc(size);
#endif
}
static void *RawInternalRealloc(void *ptr, uptr size,
InternalAllocatorCache *cache) {
(void)cache;
return LIBC_MALLOC(size);
return __libc_realloc(ptr, size);
}
static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
(void)cache;
LIBC_FREE(ptr);
__libc_free(ptr);
}
InternalAllocator *internal_allocator() {
return 0;
}
#else // SANITIZER_GO
#else // SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
static ALIGNED(64) char internal_alloc_placeholder[sizeof(InternalAllocator)];
static atomic_uint8_t internal_allocator_initialized;
......@@ -66,13 +99,26 @@ InternalAllocator *internal_allocator() {
return internal_allocator_instance;
}
static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache) {
static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache,
uptr alignment) {
if (alignment == 0) alignment = 8;
if (cache == 0) {
SpinMutexLock l(&internal_allocator_cache_mu);
return internal_allocator()->Allocate(&internal_allocator_cache, size,
alignment, false);
}
return internal_allocator()->Allocate(cache, size, alignment, false);
}
static void *RawInternalRealloc(void *ptr, uptr size,
InternalAllocatorCache *cache) {
uptr alignment = 8;
if (cache == 0) {
SpinMutexLock l(&internal_allocator_cache_mu);
return internal_allocator()->Allocate(&internal_allocator_cache, size, 8,
false);
return internal_allocator()->Reallocate(&internal_allocator_cache, ptr,
size, alignment);
}
return internal_allocator()->Allocate(cache, size, 8, false);
return internal_allocator()->Reallocate(cache, ptr, size, alignment);
}
static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
......@@ -83,20 +129,42 @@ static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
internal_allocator()->Deallocate(cache, ptr);
}
#endif // SANITIZER_GO
#endif // SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
const u64 kBlockMagic = 0x6A6CB03ABCEBC041ull;
void *InternalAlloc(uptr size, InternalAllocatorCache *cache) {
void *InternalAlloc(uptr size, InternalAllocatorCache *cache, uptr alignment) {
if (size + sizeof(u64) < size)
return nullptr;
void *p = RawInternalAlloc(size + sizeof(u64), cache);
void *p = RawInternalAlloc(size + sizeof(u64), cache, alignment);
if (!p)
return nullptr;
((u64*)p)[0] = kBlockMagic;
return (char*)p + sizeof(u64);
}
void *InternalRealloc(void *addr, uptr size, InternalAllocatorCache *cache) {
if (!addr)
return InternalAlloc(size, cache);
if (size + sizeof(u64) < size)
return nullptr;
addr = (char*)addr - sizeof(u64);
size = size + sizeof(u64);
CHECK_EQ(kBlockMagic, ((u64*)addr)[0]);
void *p = RawInternalRealloc(addr, size, cache);
if (!p)
return nullptr;
return (char*)p + sizeof(u64);
}
void *InternalCalloc(uptr count, uptr size, InternalAllocatorCache *cache) {
if (CallocShouldReturnNullDueToOverflow(count, size))
return internal_allocator()->ReturnNullOrDieOnBadRequest();
void *p = InternalAlloc(count * size, cache);
if (p) internal_memset(p, 0, count * size);
return p;
}
void InternalFree(void *addr, InternalAllocatorCache *cache) {
if (!addr)
return;
......@@ -138,7 +206,12 @@ bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n) {
return (max / size) < n;
}
void NORETURN ReportAllocatorCannotReturnNull() {
static atomic_uint8_t reporting_out_of_memory = {0};
bool IsReportingOOM() { return atomic_load_relaxed(&reporting_out_of_memory); }
void NORETURN ReportAllocatorCannotReturnNull(bool out_of_memory) {
if (out_of_memory) atomic_store_relaxed(&reporting_out_of_memory, 1);
Report("%s's allocator is terminating the process instead of returning 0\n",
SanitizerToolName);
Report("If you don't like this behavior set allocator_may_return_null=1\n");
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment