Commit eac97531 by Martin Liska Committed by Martin Liska

backport: All source files: Merge from upstream 345033.

Merge from upstream 345033.

2018-10-31  Martin Liska  <mliska@suse.cz>

	* All source files: Merge from upstream 345033.

From-SVN: r265665
parent 95fba530
2018-10-31 Martin Liska <mliska@suse.cz> 2018-10-31 Martin Liska <mliska@suse.cz>
* All source files: Merge from upstream 345033.
2018-10-31 Martin Liska <mliska@suse.cz>
* HOWTO_MERGE: Enhance documentation. * HOWTO_MERGE: Enhance documentation.
* merge.sh: Add support for git as well. * merge.sh: Add support for git as well.
......
315899 345033
The first line of this file holds the svn revision number of the The first line of this file holds the svn revision number of the
last merge done from the master library sources. last merge done from the master library sources.
...@@ -14,8 +14,10 @@ ...@@ -14,8 +14,10 @@
#include "asan_allocator.h" #include "asan_allocator.h"
#include "asan_flags.h" #include "asan_flags.h"
#include "asan_internal.h" #include "asan_internal.h"
#include "asan_mapping.h"
#include "asan_poisoning.h" #include "asan_poisoning.h"
#include "asan_stack.h" #include "asan_stack.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h" #include "sanitizer_common/sanitizer_flags.h"
namespace __asan { namespace __asan {
...@@ -108,8 +110,9 @@ void AsanDeactivate() { ...@@ -108,8 +110,9 @@ void AsanDeactivate() {
AllocatorOptions disabled = asan_deactivated_flags.allocator_options; AllocatorOptions disabled = asan_deactivated_flags.allocator_options;
disabled.quarantine_size_mb = 0; disabled.quarantine_size_mb = 0;
disabled.thread_local_quarantine_size_kb = 0; disabled.thread_local_quarantine_size_kb = 0;
disabled.min_redzone = 16; // Redzone must be at least 16 bytes long. // Redzone must be at least Max(16, granularity) bytes long.
disabled.max_redzone = 16; disabled.min_redzone = Max(16, (int)SHADOW_GRANULARITY);
disabled.max_redzone = disabled.min_redzone;
disabled.alloc_dealloc_mismatch = false; disabled.alloc_dealloc_mismatch = false;
disabled.may_return_null = true; disabled.may_return_null = true;
ReInitializeAllocator(disabled); ReInitializeAllocator(disabled);
......
...@@ -56,6 +56,7 @@ class AsanChunkView { ...@@ -56,6 +56,7 @@ class AsanChunkView {
uptr Beg() const; // First byte of user memory. uptr Beg() const; // First byte of user memory.
uptr End() const; // Last byte of user memory. uptr End() const; // Last byte of user memory.
uptr UsedSize() const; // Size requested by the user. uptr UsedSize() const; // Size requested by the user.
u32 UserRequestedAlignment() const; // Originally requested alignment.
uptr AllocTid() const; uptr AllocTid() const;
uptr FreeTid() const; uptr FreeTid() const;
bool Eq(const AsanChunkView &c) const { return chunk_ == c.chunk_; } bool Eq(const AsanChunkView &c) const { return chunk_ == c.chunk_; }
...@@ -126,7 +127,8 @@ const uptr kAllocatorSpace = ~(uptr)0; ...@@ -126,7 +127,8 @@ const uptr kAllocatorSpace = ~(uptr)0;
const uptr kAllocatorSize = 0x20000000000ULL; // 2T. const uptr kAllocatorSize = 0x20000000000ULL; // 2T.
typedef DefaultSizeClassMap SizeClassMap; typedef DefaultSizeClassMap SizeClassMap;
# elif defined(__aarch64__) && SANITIZER_ANDROID # elif defined(__aarch64__) && SANITIZER_ANDROID
const uptr kAllocatorSpace = 0x3000000000ULL; // Android needs to support 39, 42 and 48 bit VMA.
const uptr kAllocatorSpace = ~(uptr)0;
const uptr kAllocatorSize = 0x2000000000ULL; // 128G. const uptr kAllocatorSize = 0x2000000000ULL; // 128G.
typedef VeryCompactSizeClassMap SizeClassMap; typedef VeryCompactSizeClassMap SizeClassMap;
# elif defined(__aarch64__) # elif defined(__aarch64__)
...@@ -195,8 +197,8 @@ struct AsanThreadLocalMallocStorage { ...@@ -195,8 +197,8 @@ struct AsanThreadLocalMallocStorage {
void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack, void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
AllocType alloc_type); AllocType alloc_type);
void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type); void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type);
void asan_sized_free(void *ptr, uptr size, BufferedStackTrace *stack, void asan_delete(void *ptr, uptr size, uptr alignment,
AllocType alloc_type); BufferedStackTrace *stack, AllocType alloc_type);
void *asan_malloc(uptr size, BufferedStackTrace *stack); void *asan_malloc(uptr size, BufferedStackTrace *stack);
void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack); void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack);
...@@ -204,6 +206,7 @@ void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack); ...@@ -204,6 +206,7 @@ void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack);
void *asan_valloc(uptr size, BufferedStackTrace *stack); void *asan_valloc(uptr size, BufferedStackTrace *stack);
void *asan_pvalloc(uptr size, BufferedStackTrace *stack); void *asan_pvalloc(uptr size, BufferedStackTrace *stack);
void *asan_aligned_alloc(uptr alignment, uptr size, BufferedStackTrace *stack);
int asan_posix_memalign(void **memptr, uptr alignment, uptr size, int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
BufferedStackTrace *stack); BufferedStackTrace *stack);
uptr asan_malloc_usable_size(const void *ptr, uptr pc, uptr bp); uptr asan_malloc_usable_size(const void *ptr, uptr pc, uptr bp);
......
...@@ -25,7 +25,8 @@ using namespace __asan; ...@@ -25,7 +25,8 @@ using namespace __asan;
static void FindInfoForStackVar(uptr addr, const char *frame_descr, uptr offset, static void FindInfoForStackVar(uptr addr, const char *frame_descr, uptr offset,
char *name, uptr name_size, char *name, uptr name_size,
uptr &region_address, uptr &region_size) { uptr &region_address, uptr &region_size) {
InternalMmapVector<StackVarDescr> vars(16); InternalMmapVector<StackVarDescr> vars;
vars.reserve(16);
if (!ParseFrameDescription(frame_descr, &vars)) { if (!ParseFrameDescription(frame_descr, &vars)) {
return; return;
} }
......
...@@ -18,23 +18,25 @@ ...@@ -18,23 +18,25 @@
namespace __asan { namespace __asan {
// Return " (thread_name) " or an empty string if the name is empty. AsanThreadIdAndName::AsanThreadIdAndName(AsanThreadContext *t) {
const char *ThreadNameWithParenthesis(AsanThreadContext *t, char buff[], Init(t->tid, t->name);
uptr buff_len) {
const char *name = t->name;
if (name[0] == '\0') return "";
buff[0] = 0;
internal_strncat(buff, " (", 3);
internal_strncat(buff, name, buff_len - 4);
internal_strncat(buff, ")", 2);
return buff;
} }
const char *ThreadNameWithParenthesis(u32 tid, char buff[], uptr buff_len) { AsanThreadIdAndName::AsanThreadIdAndName(u32 tid) {
if (tid == kInvalidTid) return ""; if (tid == kInvalidTid) {
asanThreadRegistry().CheckLocked(); Init(tid, "");
AsanThreadContext *t = GetThreadContextByTidLocked(tid); } else {
return ThreadNameWithParenthesis(t, buff, buff_len); asanThreadRegistry().CheckLocked();
AsanThreadContext *t = GetThreadContextByTidLocked(tid);
Init(tid, t->name);
}
}
void AsanThreadIdAndName::Init(u32 tid, const char *tname) {
int len = internal_snprintf(name, sizeof(name), "T%d", tid);
CHECK(((unsigned int)len) < sizeof(name));
if (tname[0] != '\0')
internal_snprintf(&name[len], sizeof(name) - len, " (%s)", tname);
} }
void DescribeThread(AsanThreadContext *context) { void DescribeThread(AsanThreadContext *context) {
...@@ -45,18 +47,15 @@ void DescribeThread(AsanThreadContext *context) { ...@@ -45,18 +47,15 @@ void DescribeThread(AsanThreadContext *context) {
return; return;
} }
context->announced = true; context->announced = true;
char tname[128];
InternalScopedString str(1024); InternalScopedString str(1024);
str.append("Thread T%d%s", context->tid, str.append("Thread %s", AsanThreadIdAndName(context).c_str());
ThreadNameWithParenthesis(context->tid, tname, sizeof(tname)));
if (context->parent_tid == kInvalidTid) { if (context->parent_tid == kInvalidTid) {
str.append(" created by unknown thread\n"); str.append(" created by unknown thread\n");
Printf("%s", str.data()); Printf("%s", str.data());
return; return;
} }
str.append( str.append(" created by %s here:\n",
" created by T%d%s here:\n", context->parent_tid, AsanThreadIdAndName(context->parent_tid).c_str());
ThreadNameWithParenthesis(context->parent_tid, tname, sizeof(tname)));
Printf("%s", str.data()); Printf("%s", str.data());
StackDepotGet(context->stack_id).Print(); StackDepotGet(context->stack_id).Print();
// Recursively described parent thread if needed. // Recursively described parent thread if needed.
...@@ -120,6 +119,7 @@ static void GetAccessToHeapChunkInformation(ChunkAccess *descr, ...@@ -120,6 +119,7 @@ static void GetAccessToHeapChunkInformation(ChunkAccess *descr,
} }
descr->chunk_begin = chunk.Beg(); descr->chunk_begin = chunk.Beg();
descr->chunk_size = chunk.UsedSize(); descr->chunk_size = chunk.UsedSize();
descr->user_requested_alignment = chunk.UserRequestedAlignment();
descr->alloc_type = chunk.GetAllocType(); descr->alloc_type = chunk.GetAllocType();
} }
...@@ -355,10 +355,9 @@ bool GlobalAddressDescription::PointsInsideTheSameVariable( ...@@ -355,10 +355,9 @@ bool GlobalAddressDescription::PointsInsideTheSameVariable(
void StackAddressDescription::Print() const { void StackAddressDescription::Print() const {
Decorator d; Decorator d;
char tname[128];
Printf("%s", d.Location()); Printf("%s", d.Location());
Printf("Address %p is located in stack of thread T%d%s", addr, tid, Printf("Address %p is located in stack of thread %s", addr,
ThreadNameWithParenthesis(tid, tname, sizeof(tname))); AsanThreadIdAndName(tid).c_str());
if (!frame_descr) { if (!frame_descr) {
Printf("%s\n", d.Default()); Printf("%s\n", d.Default());
...@@ -377,7 +376,8 @@ void StackAddressDescription::Print() const { ...@@ -377,7 +376,8 @@ void StackAddressDescription::Print() const {
StackTrace alloca_stack(&frame_pc, 1); StackTrace alloca_stack(&frame_pc, 1);
alloca_stack.Print(); alloca_stack.Print();
InternalMmapVector<StackVarDescr> vars(16); InternalMmapVector<StackVarDescr> vars;
vars.reserve(16);
if (!ParseFrameDescription(frame_descr, &vars)) { if (!ParseFrameDescription(frame_descr, &vars)) {
Printf( Printf(
"AddressSanitizer can't parse the stack frame " "AddressSanitizer can't parse the stack frame "
...@@ -399,7 +399,7 @@ void StackAddressDescription::Print() const { ...@@ -399,7 +399,7 @@ void StackAddressDescription::Print() const {
} }
Printf( Printf(
"HINT: this may be a false positive if your program uses " "HINT: this may be a false positive if your program uses "
"some custom stack unwind mechanism or swapcontext\n"); "some custom stack unwind mechanism, swapcontext or vfork\n");
if (SANITIZER_WINDOWS) if (SANITIZER_WINDOWS)
Printf(" (longjmp, SEH and C++ exceptions *are* supported)\n"); Printf(" (longjmp, SEH and C++ exceptions *are* supported)\n");
else else
...@@ -415,26 +415,19 @@ void HeapAddressDescription::Print() const { ...@@ -415,26 +415,19 @@ void HeapAddressDescription::Print() const {
AsanThreadContext *alloc_thread = GetThreadContextByTidLocked(alloc_tid); AsanThreadContext *alloc_thread = GetThreadContextByTidLocked(alloc_tid);
StackTrace alloc_stack = GetStackTraceFromId(alloc_stack_id); StackTrace alloc_stack = GetStackTraceFromId(alloc_stack_id);
char tname[128];
Decorator d; Decorator d;
AsanThreadContext *free_thread = nullptr; AsanThreadContext *free_thread = nullptr;
if (free_tid != kInvalidTid) { if (free_tid != kInvalidTid) {
free_thread = GetThreadContextByTidLocked(free_tid); free_thread = GetThreadContextByTidLocked(free_tid);
Printf("%sfreed by thread T%d%s here:%s\n", d.Allocation(), Printf("%sfreed by thread %s here:%s\n", d.Allocation(),
free_thread->tid, AsanThreadIdAndName(free_thread).c_str(), d.Default());
ThreadNameWithParenthesis(free_thread, tname, sizeof(tname)),
d.Default());
StackTrace free_stack = GetStackTraceFromId(free_stack_id); StackTrace free_stack = GetStackTraceFromId(free_stack_id);
free_stack.Print(); free_stack.Print();
Printf("%spreviously allocated by thread T%d%s here:%s\n", d.Allocation(), Printf("%spreviously allocated by thread %s here:%s\n", d.Allocation(),
alloc_thread->tid, AsanThreadIdAndName(alloc_thread).c_str(), d.Default());
ThreadNameWithParenthesis(alloc_thread, tname, sizeof(tname)),
d.Default());
} else { } else {
Printf("%sallocated by thread T%d%s here:%s\n", d.Allocation(), Printf("%sallocated by thread %s here:%s\n", d.Allocation(),
alloc_thread->tid, AsanThreadIdAndName(alloc_thread).c_str(), d.Default());
ThreadNameWithParenthesis(alloc_thread, tname, sizeof(tname)),
d.Default());
} }
alloc_stack.Print(); alloc_stack.Print();
DescribeThread(GetCurrentThread()); DescribeThread(GetCurrentThread());
......
...@@ -24,9 +24,20 @@ void DescribeThread(AsanThreadContext *context); ...@@ -24,9 +24,20 @@ void DescribeThread(AsanThreadContext *context);
static inline void DescribeThread(AsanThread *t) { static inline void DescribeThread(AsanThread *t) {
if (t) DescribeThread(t->context()); if (t) DescribeThread(t->context());
} }
const char *ThreadNameWithParenthesis(AsanThreadContext *t, char buff[],
uptr buff_len); class AsanThreadIdAndName {
const char *ThreadNameWithParenthesis(u32 tid, char buff[], uptr buff_len); public:
explicit AsanThreadIdAndName(AsanThreadContext *t);
explicit AsanThreadIdAndName(u32 tid);
// Contains "T%tid (%name)" or "T%tid" if the name is empty.
const char *c_str() const { return &name[0]; }
private:
void Init(u32 tid, const char *tname);
char name[128];
};
class Decorator : public __sanitizer::SanitizerCommonDecorator { class Decorator : public __sanitizer::SanitizerCommonDecorator {
public: public:
...@@ -100,6 +111,7 @@ struct ChunkAccess { ...@@ -100,6 +111,7 @@ struct ChunkAccess {
sptr offset; sptr offset;
uptr chunk_begin; uptr chunk_begin;
uptr chunk_size; uptr chunk_size;
u32 user_requested_alignment : 12;
u32 access_type : 2; u32 access_type : 2;
u32 alloc_type : 2; u32 alloc_type : 2;
}; };
......
...@@ -26,9 +26,9 @@ static const u64 kAllocaRedzoneMask = 31UL; ...@@ -26,9 +26,9 @@ static const u64 kAllocaRedzoneMask = 31UL;
// For small size classes inline PoisonShadow for better performance. // For small size classes inline PoisonShadow for better performance.
ALWAYS_INLINE void SetShadow(uptr ptr, uptr size, uptr class_id, u64 magic) { ALWAYS_INLINE void SetShadow(uptr ptr, uptr size, uptr class_id, u64 magic) {
CHECK_EQ(SHADOW_SCALE, 3); // This code expects SHADOW_SCALE=3.
u64 *shadow = reinterpret_cast<u64*>(MemToShadow(ptr)); u64 *shadow = reinterpret_cast<u64*>(MemToShadow(ptr));
if (class_id <= 6) { if (SHADOW_SCALE == 3 && class_id <= 6) {
// This code expects SHADOW_SCALE=3.
for (uptr i = 0; i < (((uptr)1) << class_id); i++) { for (uptr i = 0; i < (((uptr)1) << class_id); i++) {
shadow[i] = magic; shadow[i] = magic;
// Make sure this does not become memset. // Make sure this does not become memset.
......
...@@ -31,10 +31,7 @@ static const char *MaybeCallAsanDefaultOptions() { ...@@ -31,10 +31,7 @@ static const char *MaybeCallAsanDefaultOptions() {
static const char *MaybeUseAsanDefaultOptionsCompileDefinition() { static const char *MaybeUseAsanDefaultOptionsCompileDefinition() {
#ifdef ASAN_DEFAULT_OPTIONS #ifdef ASAN_DEFAULT_OPTIONS
// Stringize the macro value. return SANITIZER_STRINGIFY(ASAN_DEFAULT_OPTIONS);
# define ASAN_STRINGIZE(x) #x
# define ASAN_STRINGIZE_OPTIONS(options) ASAN_STRINGIZE(options)
return ASAN_STRINGIZE_OPTIONS(ASAN_DEFAULT_OPTIONS);
#else #else
return ""; return "";
#endif #endif
...@@ -146,6 +143,9 @@ void InitializeFlags() { ...@@ -146,6 +143,9 @@ void InitializeFlags() {
SanitizerToolName); SanitizerToolName);
Die(); Die();
} }
// Ensure that redzone is at least SHADOW_GRANULARITY.
if (f->redzone < (int)SHADOW_GRANULARITY)
f->redzone = SHADOW_GRANULARITY;
// Make "strict_init_order" imply "check_initialization_order". // Make "strict_init_order" imply "check_initialization_order".
// TODO(samsonov): Use a single runtime flag for an init-order checker. // TODO(samsonov): Use a single runtime flag for an init-order checker.
if (f->strict_init_order) { if (f->strict_init_order) {
...@@ -158,6 +158,10 @@ void InitializeFlags() { ...@@ -158,6 +158,10 @@ void InitializeFlags() {
CHECK_LE(f->max_redzone, 2048); CHECK_LE(f->max_redzone, 2048);
CHECK(IsPowerOfTwo(f->redzone)); CHECK(IsPowerOfTwo(f->redzone));
CHECK(IsPowerOfTwo(f->max_redzone)); CHECK(IsPowerOfTwo(f->max_redzone));
if (SANITIZER_RTEMS) {
CHECK(!f->unmap_shadow_on_exit);
CHECK(!f->protect_shadow_gap);
}
// quarantine_size is deprecated but we still honor it. // quarantine_size is deprecated but we still honor it.
// quarantine_size can not be used together with quarantine_size_mb. // quarantine_size can not be used together with quarantine_size_mb.
......
...@@ -86,7 +86,8 @@ ASAN_FLAG(bool, check_malloc_usable_size, true, ...@@ -86,7 +86,8 @@ ASAN_FLAG(bool, check_malloc_usable_size, true,
"295.*.") "295.*.")
ASAN_FLAG(bool, unmap_shadow_on_exit, false, ASAN_FLAG(bool, unmap_shadow_on_exit, false,
"If set, explicitly unmaps the (huge) shadow at exit.") "If set, explicitly unmaps the (huge) shadow at exit.")
ASAN_FLAG(bool, protect_shadow_gap, true, "If set, mprotect the shadow gap") ASAN_FLAG(bool, protect_shadow_gap, !SANITIZER_RTEMS,
"If set, mprotect the shadow gap")
ASAN_FLAG(bool, print_stats, false, ASAN_FLAG(bool, print_stats, false,
"Print various statistics after printing an error message or if " "Print various statistics after printing an error message or if "
"atexit=1.") "atexit=1.")
......
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
namespace __asan { namespace __asan {
// The system already set up the shadow memory for us. // The system already set up the shadow memory for us.
// __sanitizer::GetMaxVirtualAddress has already been called by // __sanitizer::GetMaxUserVirtualAddress has already been called by
// AsanInitInternal->InitializeHighMemEnd (asan_rtl.cc). // AsanInitInternal->InitializeHighMemEnd (asan_rtl.cc).
// Just do some additional sanity checks here. // Just do some additional sanity checks here.
void InitializeShadowMemory() { void InitializeShadowMemory() {
......
...@@ -147,6 +147,23 @@ static void CheckODRViolationViaIndicator(const Global *g) { ...@@ -147,6 +147,23 @@ static void CheckODRViolationViaIndicator(const Global *g) {
} }
} }
// Check ODR violation for given global G by checking if it's already poisoned.
// We use this method in case compiler doesn't use private aliases for global
// variables.
static void CheckODRViolationViaPoisoning(const Global *g) {
if (__asan_region_is_poisoned(g->beg, g->size_with_redzone)) {
// This check may not be enough: if the first global is much larger
// the entire redzone of the second global may be within the first global.
for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
if (g->beg == l->g->beg &&
(flags()->detect_odr_violation >= 2 || g->size != l->g->size) &&
!IsODRViolationSuppressed(g->name))
ReportODRViolation(g, FindRegistrationSite(g),
l->g, FindRegistrationSite(l->g));
}
}
}
// Clang provides two different ways for global variables protection: // Clang provides two different ways for global variables protection:
// it can poison the global itself or its private alias. In former // it can poison the global itself or its private alias. In former
// case we may poison same symbol multiple times, that can help us to // case we may poison same symbol multiple times, that can help us to
...@@ -194,6 +211,8 @@ static void RegisterGlobal(const Global *g) { ...@@ -194,6 +211,8 @@ static void RegisterGlobal(const Global *g) {
// where two globals with the same name are defined in different modules. // where two globals with the same name are defined in different modules.
if (UseODRIndicator(g)) if (UseODRIndicator(g))
CheckODRViolationViaIndicator(g); CheckODRViolationViaIndicator(g);
else
CheckODRViolationViaPoisoning(g);
} }
if (CanPoisonMemory()) if (CanPoisonMemory())
PoisonRedZones(*g); PoisonRedZones(*g);
...@@ -203,8 +222,9 @@ static void RegisterGlobal(const Global *g) { ...@@ -203,8 +222,9 @@ static void RegisterGlobal(const Global *g) {
list_of_all_globals = l; list_of_all_globals = l;
if (g->has_dynamic_init) { if (g->has_dynamic_init) {
if (!dynamic_init_globals) { if (!dynamic_init_globals) {
dynamic_init_globals = new(allocator_for_globals) dynamic_init_globals =
VectorOfGlobals(kDynamicInitGlobalsInitialCapacity); new (allocator_for_globals) VectorOfGlobals; // NOLINT
dynamic_init_globals->reserve(kDynamicInitGlobalsInitialCapacity);
} }
DynInitGlobal dyn_global = { *g, false }; DynInitGlobal dyn_global = { *g, false };
dynamic_init_globals->push_back(dyn_global); dynamic_init_globals->push_back(dyn_global);
...@@ -337,9 +357,11 @@ void __asan_register_globals(__asan_global *globals, uptr n) { ...@@ -337,9 +357,11 @@ void __asan_register_globals(__asan_global *globals, uptr n) {
GET_STACK_TRACE_MALLOC; GET_STACK_TRACE_MALLOC;
u32 stack_id = StackDepotPut(stack); u32 stack_id = StackDepotPut(stack);
BlockingMutexLock lock(&mu_for_globals); BlockingMutexLock lock(&mu_for_globals);
if (!global_registration_site_vector) if (!global_registration_site_vector) {
global_registration_site_vector = global_registration_site_vector =
new(allocator_for_globals) GlobalRegistrationSiteVector(128); new (allocator_for_globals) GlobalRegistrationSiteVector; // NOLINT
global_registration_site_vector->reserve(128);
}
GlobalRegistrationSite site = {stack_id, &globals[0], &globals[n - 1]}; GlobalRegistrationSite site = {stack_id, &globals[0], &globals[n - 1]};
global_registration_site_vector->push_back(site); global_registration_site_vector->push_back(site);
if (flags()->report_globals >= 2) { if (flags()->report_globals >= 2) {
......
...@@ -17,9 +17,9 @@ namespace __asan { ...@@ -17,9 +17,9 @@ namespace __asan {
#pragma section(".ASAN$GA", read, write) // NOLINT #pragma section(".ASAN$GA", read, write) // NOLINT
#pragma section(".ASAN$GZ", read, write) // NOLINT #pragma section(".ASAN$GZ", read, write) // NOLINT
extern "C" __declspec(allocate(".ASAN$GA")) extern "C" __declspec(allocate(".ASAN$GA"))
__asan_global __asan_globals_start = {}; ALIGNED(sizeof(__asan_global)) __asan_global __asan_globals_start = {};
extern "C" __declspec(allocate(".ASAN$GZ")) extern "C" __declspec(allocate(".ASAN$GZ"))
__asan_global __asan_globals_end = {}; ALIGNED(sizeof(__asan_global)) __asan_global __asan_globals_end = {};
#pragma comment(linker, "/merge:.ASAN=.data") #pragma comment(linker, "/merge:.ASAN=.data")
static void call_on_globals(void (*hook)(__asan_global *, uptr)) { static void call_on_globals(void (*hook)(__asan_global *, uptr)) {
...@@ -27,7 +27,7 @@ static void call_on_globals(void (*hook)(__asan_global *, uptr)) { ...@@ -27,7 +27,7 @@ static void call_on_globals(void (*hook)(__asan_global *, uptr)) {
__asan_global *end = &__asan_globals_end; __asan_global *end = &__asan_globals_end;
uptr bytediff = (uptr)end - (uptr)start; uptr bytediff = (uptr)end - (uptr)start;
if (bytediff % sizeof(__asan_global) != 0) { if (bytediff % sizeof(__asan_global) != 0) {
#ifdef SANITIZER_DLL_THUNK #if defined(SANITIZER_DLL_THUNK) || defined(SANITIZER_DYNAMIC_RUNTIME_THUNK)
__debugbreak(); __debugbreak();
#else #else
CHECK("corrupt asan global array"); CHECK("corrupt asan global array");
......
...@@ -13,6 +13,8 @@ ...@@ -13,6 +13,8 @@
#ifndef ASAN_INIT_VERSION_H #ifndef ASAN_INIT_VERSION_H
#define ASAN_INIT_VERSION_H #define ASAN_INIT_VERSION_H
#include "sanitizer_common/sanitizer_platform.h"
extern "C" { extern "C" {
// Every time the ASan ABI changes we also change the version number in the // Every time the ASan ABI changes we also change the version number in the
// __asan_init function name. Objects built with incompatible ASan ABI // __asan_init function name. Objects built with incompatible ASan ABI
...@@ -30,7 +32,12 @@ extern "C" { ...@@ -30,7 +32,12 @@ extern "C" {
// v6=>v7: added 'odr_indicator' to __asan_global // v6=>v7: added 'odr_indicator' to __asan_global
// v7=>v8: added '__asan_(un)register_image_globals' functions for dead // v7=>v8: added '__asan_(un)register_image_globals' functions for dead
// stripping support on Mach-O platforms // stripping support on Mach-O platforms
#if SANITIZER_WORDSIZE == 32 && SANITIZER_ANDROID
// v8=>v9: 32-bit Android switched to dynamic shadow
#define __asan_version_mismatch_check __asan_version_mismatch_check_v9
#else
#define __asan_version_mismatch_check __asan_version_mismatch_check_v8 #define __asan_version_mismatch_check __asan_version_mismatch_check_v8
#endif
} }
#endif // ASAN_INIT_VERSION_H #endif // ASAN_INIT_VERSION_H
...@@ -22,15 +22,20 @@ ...@@ -22,15 +22,20 @@
#include "lsan/lsan_common.h" #include "lsan/lsan_common.h"
#include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_libc.h"
// There is no general interception at all on Fuchsia. // There is no general interception at all on Fuchsia and RTEMS.
// Only the functions in asan_interceptors_memintrinsics.cc are // Only the functions in asan_interceptors_memintrinsics.cc are
// really defined to replace libc functions. // really defined to replace libc functions.
#if !SANITIZER_FUCHSIA #if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
#if SANITIZER_POSIX #if SANITIZER_POSIX
#include "sanitizer_common/sanitizer_posix.h" #include "sanitizer_common/sanitizer_posix.h"
#endif #endif
#if ASAN_INTERCEPT__UNWIND_RAISEEXCEPTION || \
ASAN_INTERCEPT__SJLJ_UNWIND_RAISEEXCEPTION
#include <unwind.h>
#endif
#if defined(__i386) && SANITIZER_LINUX #if defined(__i386) && SANITIZER_LINUX
#define ASAN_PTHREAD_CREATE_VERSION "GLIBC_2.1" #define ASAN_PTHREAD_CREATE_VERSION "GLIBC_2.1"
#elif defined(__mips__) && SANITIZER_LINUX #elif defined(__mips__) && SANITIZER_LINUX
...@@ -176,6 +181,7 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *) ...@@ -176,6 +181,7 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
(void)(s); \ (void)(s); \
} while (false) } while (false)
#include "sanitizer_common/sanitizer_common_syscalls.inc" #include "sanitizer_common/sanitizer_common_syscalls.inc"
#include "sanitizer_common/sanitizer_syscalls_netbsd.inc"
struct ThreadStartParam { struct ThreadStartParam {
atomic_uintptr_t t; atomic_uintptr_t t;
...@@ -324,6 +330,32 @@ INTERCEPTOR(void, __cxa_throw, void *a, void *b, void *c) { ...@@ -324,6 +330,32 @@ INTERCEPTOR(void, __cxa_throw, void *a, void *b, void *c) {
} }
#endif #endif
#if ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION
INTERCEPTOR(void, __cxa_rethrow_primary_exception, void *a) {
CHECK(REAL(__cxa_rethrow_primary_exception));
__asan_handle_no_return();
REAL(__cxa_rethrow_primary_exception)(a);
}
#endif
#if ASAN_INTERCEPT__UNWIND_RAISEEXCEPTION
INTERCEPTOR(_Unwind_Reason_Code, _Unwind_RaiseException,
_Unwind_Exception *object) {
CHECK(REAL(_Unwind_RaiseException));
__asan_handle_no_return();
return REAL(_Unwind_RaiseException)(object);
}
#endif
#if ASAN_INTERCEPT__SJLJ_UNWIND_RAISEEXCEPTION
INTERCEPTOR(_Unwind_Reason_Code, _Unwind_SjLj_RaiseException,
_Unwind_Exception *object) {
CHECK(REAL(_Unwind_SjLj_RaiseException));
__asan_handle_no_return();
return REAL(_Unwind_SjLj_RaiseException)(object);
}
#endif
#if ASAN_INTERCEPT_INDEX #if ASAN_INTERCEPT_INDEX
# if ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX # if ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX
INTERCEPTOR(char*, index, const char *string, int c) INTERCEPTOR(char*, index, const char *string, int c)
...@@ -546,14 +578,6 @@ INTERCEPTOR(int, __cxa_atexit, void (*func)(void *), void *arg, ...@@ -546,14 +578,6 @@ INTERCEPTOR(int, __cxa_atexit, void (*func)(void *), void *arg,
} }
#endif // ASAN_INTERCEPT___CXA_ATEXIT #endif // ASAN_INTERCEPT___CXA_ATEXIT
#if ASAN_INTERCEPT_FORK
INTERCEPTOR(int, fork, void) {
ENSURE_ASAN_INITED();
int pid = REAL(fork)();
return pid;
}
#endif // ASAN_INTERCEPT_FORK
// ---------------------- InitializeAsanInterceptors ---------------- {{{1 // ---------------------- InitializeAsanInterceptors ---------------- {{{1
namespace __asan { namespace __asan {
void InitializeAsanInterceptors() { void InitializeAsanInterceptors() {
...@@ -604,6 +628,17 @@ void InitializeAsanInterceptors() { ...@@ -604,6 +628,17 @@ void InitializeAsanInterceptors() {
#if ASAN_INTERCEPT___CXA_THROW #if ASAN_INTERCEPT___CXA_THROW
ASAN_INTERCEPT_FUNC(__cxa_throw); ASAN_INTERCEPT_FUNC(__cxa_throw);
#endif #endif
#if ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION
ASAN_INTERCEPT_FUNC(__cxa_rethrow_primary_exception);
#endif
// Indirectly intercept std::rethrow_exception.
#if ASAN_INTERCEPT__UNWIND_RAISEEXCEPTION
INTERCEPT_FUNCTION(_Unwind_RaiseException);
#endif
// Indirectly intercept std::rethrow_exception.
#if ASAN_INTERCEPT__UNWIND_SJLJ_RAISEEXCEPTION
INTERCEPT_FUNCTION(_Unwind_SjLj_RaiseException);
#endif
// Intercept threading-related functions // Intercept threading-related functions
#if ASAN_INTERCEPT_PTHREAD_CREATE #if ASAN_INTERCEPT_PTHREAD_CREATE
...@@ -620,10 +655,6 @@ void InitializeAsanInterceptors() { ...@@ -620,10 +655,6 @@ void InitializeAsanInterceptors() {
ASAN_INTERCEPT_FUNC(__cxa_atexit); ASAN_INTERCEPT_FUNC(__cxa_atexit);
#endif #endif
#if ASAN_INTERCEPT_FORK
ASAN_INTERCEPT_FUNC(fork);
#endif
InitializePlatformInterceptors(); InitializePlatformInterceptors();
VReport(1, "AddressSanitizer: libc interceptors initialized\n"); VReport(1, "AddressSanitizer: libc interceptors initialized\n");
......
...@@ -32,10 +32,10 @@ void InitializePlatformInterceptors(); ...@@ -32,10 +32,10 @@ void InitializePlatformInterceptors();
} // namespace __asan } // namespace __asan
// There is no general interception at all on Fuchsia. // There is no general interception at all on Fuchsia and RTEMS.
// Only the functions in asan_interceptors_memintrinsics.h are // Only the functions in asan_interceptors_memintrinsics.h are
// really defined to replace libc functions. // really defined to replace libc functions.
#if !SANITIZER_FUCHSIA #if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
// Use macro to describe if specific function should be // Use macro to describe if specific function should be
// intercepted on a given platform. // intercepted on a given platform.
...@@ -44,22 +44,21 @@ void InitializePlatformInterceptors(); ...@@ -44,22 +44,21 @@ void InitializePlatformInterceptors();
# define ASAN_INTERCEPT__LONGJMP 1 # define ASAN_INTERCEPT__LONGJMP 1
# define ASAN_INTERCEPT_INDEX 1 # define ASAN_INTERCEPT_INDEX 1
# define ASAN_INTERCEPT_PTHREAD_CREATE 1 # define ASAN_INTERCEPT_PTHREAD_CREATE 1
# define ASAN_INTERCEPT_FORK 1
#else #else
# define ASAN_INTERCEPT_ATOLL_AND_STRTOLL 0 # define ASAN_INTERCEPT_ATOLL_AND_STRTOLL 0
# define ASAN_INTERCEPT__LONGJMP 0 # define ASAN_INTERCEPT__LONGJMP 0
# define ASAN_INTERCEPT_INDEX 0 # define ASAN_INTERCEPT_INDEX 0
# define ASAN_INTERCEPT_PTHREAD_CREATE 0 # define ASAN_INTERCEPT_PTHREAD_CREATE 0
# define ASAN_INTERCEPT_FORK 0
#endif #endif
#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD #if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
SANITIZER_SOLARIS
# define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 1 # define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 1
#else #else
# define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 0 # define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 0
#endif #endif
#if SANITIZER_LINUX && !SANITIZER_ANDROID #if (SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_SOLARIS
# define ASAN_INTERCEPT_SWAPCONTEXT 1 # define ASAN_INTERCEPT_SWAPCONTEXT 1
#else #else
# define ASAN_INTERCEPT_SWAPCONTEXT 0 # define ASAN_INTERCEPT_SWAPCONTEXT 0
...@@ -77,12 +76,20 @@ void InitializePlatformInterceptors(); ...@@ -77,12 +76,20 @@ void InitializePlatformInterceptors();
# define ASAN_INTERCEPT___LONGJMP_CHK 0 # define ASAN_INTERCEPT___LONGJMP_CHK 0
#endif #endif
// Android bug: https://code.google.com/p/android/issues/detail?id=61799 #if ASAN_HAS_EXCEPTIONS && !SANITIZER_WINDOWS && !SANITIZER_SOLARIS && \
#if ASAN_HAS_EXCEPTIONS && !SANITIZER_WINDOWS && \ !SANITIZER_NETBSD
!(SANITIZER_ANDROID && defined(__i386))
# define ASAN_INTERCEPT___CXA_THROW 1 # define ASAN_INTERCEPT___CXA_THROW 1
# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 1
# if defined(_GLIBCXX_SJLJ_EXCEPTIONS) || (SANITIZER_IOS && defined(__arm__))
# define ASAN_INTERCEPT__UNWIND_SJLJ_RAISEEXCEPTION 1
# else
# define ASAN_INTERCEPT__UNWIND_RAISEEXCEPTION 1
# endif
#else #else
# define ASAN_INTERCEPT___CXA_THROW 0 # define ASAN_INTERCEPT___CXA_THROW 0
# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 0
# define ASAN_INTERCEPT__UNWIND_RAISEEXCEPTION 0
# define ASAN_INTERCEPT__UNWIND_SJLJ_RAISEEXCEPTION 0
#endif #endif
#if !SANITIZER_WINDOWS #if !SANITIZER_WINDOWS
...@@ -103,9 +110,6 @@ DECLARE_REAL(SIZE_T, strlen, const char *s) ...@@ -103,9 +110,6 @@ DECLARE_REAL(SIZE_T, strlen, const char *s)
DECLARE_REAL(char*, strncpy, char *to, const char *from, uptr size) DECLARE_REAL(char*, strncpy, char *to, const char *from, uptr size)
DECLARE_REAL(uptr, strnlen, const char *s, uptr maxlen) DECLARE_REAL(uptr, strnlen, const char *s, uptr maxlen)
DECLARE_REAL(char*, strstr, const char *s1, const char *s2) DECLARE_REAL(char*, strstr, const char *s1, const char *s2)
struct sigaction;
DECLARE_REAL(int, sigaction, int signum, const struct sigaction *act,
struct sigaction *oldact)
#if !SANITIZER_MAC #if !SANITIZER_MAC
#define ASAN_INTERCEPT_FUNC(name) \ #define ASAN_INTERCEPT_FUNC(name) \
......
...@@ -29,14 +29,14 @@ void *__asan_memmove(void *to, const void *from, uptr size) { ...@@ -29,14 +29,14 @@ void *__asan_memmove(void *to, const void *from, uptr size) {
ASAN_MEMMOVE_IMPL(nullptr, to, from, size); ASAN_MEMMOVE_IMPL(nullptr, to, from, size);
} }
#if SANITIZER_FUCHSIA #if SANITIZER_FUCHSIA || SANITIZER_RTEMS
// Fuchsia doesn't use sanitizer_common_interceptors.inc, but the only // Fuchsia and RTEMS don't use sanitizer_common_interceptors.inc, but
// things there it wants are these three. Just define them as aliases // the only things there it wants are these three. Just define them
// here rather than repeating the contents. // as aliases here rather than repeating the contents.
decltype(memcpy) memcpy[[gnu::alias("__asan_memcpy")]]; extern "C" decltype(__asan_memcpy) memcpy[[gnu::alias("__asan_memcpy")]];
decltype(memmove) memmove[[gnu::alias("__asan_memmove")]]; extern "C" decltype(__asan_memmove) memmove[[gnu::alias("__asan_memmove")]];
decltype(memset) memset[[gnu::alias("__asan_memset")]]; extern "C" decltype(__asan_memset) memset[[gnu::alias("__asan_memset")]];
#endif // SANITIZER_FUCHSIA #endif // SANITIZER_FUCHSIA || SANITIZER_RTEMS
...@@ -131,15 +131,22 @@ static inline bool RangesOverlap(const char *offset1, uptr length1, ...@@ -131,15 +131,22 @@ static inline bool RangesOverlap(const char *offset1, uptr length1,
const char *offset2, uptr length2) { const char *offset2, uptr length2) {
return !((offset1 + length1 <= offset2) || (offset2 + length2 <= offset1)); return !((offset1 + length1 <= offset2) || (offset2 + length2 <= offset1));
} }
#define CHECK_RANGES_OVERLAP(name, _offset1, length1, _offset2, length2) do { \ #define CHECK_RANGES_OVERLAP(name, _offset1, length1, _offset2, length2) \
const char *offset1 = (const char*)_offset1; \ do { \
const char *offset2 = (const char*)_offset2; \ const char *offset1 = (const char *)_offset1; \
if (RangesOverlap(offset1, length1, offset2, length2)) { \ const char *offset2 = (const char *)_offset2; \
GET_STACK_TRACE_FATAL_HERE; \ if (RangesOverlap(offset1, length1, offset2, length2)) { \
ReportStringFunctionMemoryRangesOverlap(name, offset1, length1, \ GET_STACK_TRACE_FATAL_HERE; \
offset2, length2, &stack); \ bool suppressed = IsInterceptorSuppressed(name); \
} \ if (!suppressed && HaveStackTraceBasedSuppressions()) { \
} while (0) suppressed = IsStackTraceSuppressed(&stack); \
} \
if (!suppressed) { \
ReportStringFunctionMemoryRangesOverlap(name, offset1, length1, \
offset2, length2, &stack); \
} \
} \
} while (0)
} // namespace __asan } // namespace __asan
......
...@@ -34,7 +34,7 @@ ...@@ -34,7 +34,7 @@
// If set, values like allocator chunk size, as well as defaults for some flags // If set, values like allocator chunk size, as well as defaults for some flags
// will be changed towards less memory overhead. // will be changed towards less memory overhead.
#ifndef ASAN_LOW_MEMORY #ifndef ASAN_LOW_MEMORY
# if SANITIZER_IOS || SANITIZER_ANDROID # if SANITIZER_IOS || SANITIZER_ANDROID || SANITIZER_RTEMS
# define ASAN_LOW_MEMORY 1 # define ASAN_LOW_MEMORY 1
# else # else
# define ASAN_LOW_MEMORY 0 # define ASAN_LOW_MEMORY 0
...@@ -76,7 +76,7 @@ void InitializeShadowMemory(); ...@@ -76,7 +76,7 @@ void InitializeShadowMemory();
// asan_malloc_linux.cc / asan_malloc_mac.cc // asan_malloc_linux.cc / asan_malloc_mac.cc
void ReplaceSystemMalloc(); void ReplaceSystemMalloc();
// asan_linux.cc / asan_mac.cc / asan_win.cc // asan_linux.cc / asan_mac.cc / asan_rtems.cc / asan_win.cc
uptr FindDynamicShadowStart(); uptr FindDynamicShadowStart();
void *AsanDoesNotSupportStaticLinkage(); void *AsanDoesNotSupportStaticLinkage();
void AsanCheckDynamicRTPrereqs(); void AsanCheckDynamicRTPrereqs();
...@@ -145,6 +145,9 @@ const int kAsanArrayCookieMagic = 0xac; ...@@ -145,6 +145,9 @@ const int kAsanArrayCookieMagic = 0xac;
const int kAsanIntraObjectRedzone = 0xbb; const int kAsanIntraObjectRedzone = 0xbb;
const int kAsanAllocaLeftMagic = 0xca; const int kAsanAllocaLeftMagic = 0xca;
const int kAsanAllocaRightMagic = 0xcb; const int kAsanAllocaRightMagic = 0xcb;
// Used to populate the shadow gap for systems without memory
// protection there (i.e. Myriad).
const int kAsanShadowGap = 0xcc;
static const uptr kCurrentStackFrameMagic = 0x41B58AB3; static const uptr kCurrentStackFrameMagic = 0x41B58AB3;
static const uptr kRetiredStackFrameMagic = 0x45E0360E; static const uptr kRetiredStackFrameMagic = 0x45E0360E;
......
...@@ -11,10 +11,12 @@ ...@@ -11,10 +11,12 @@
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_platform.h" #include "sanitizer_common/sanitizer_platform.h"
#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD #if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
SANITIZER_SOLARIS
#include "asan_interceptors.h" #include "asan_interceptors.h"
#include "asan_internal.h" #include "asan_internal.h"
#include "asan_premap_shadow.h"
#include "asan_thread.h" #include "asan_thread.h"
#include "sanitizer_common/sanitizer_flags.h" #include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_freebsd.h" #include "sanitizer_common/sanitizer_freebsd.h"
...@@ -28,6 +30,7 @@ ...@@ -28,6 +30,7 @@
#include <sys/types.h> #include <sys/types.h>
#include <dlfcn.h> #include <dlfcn.h>
#include <fcntl.h> #include <fcntl.h>
#include <limits.h>
#include <pthread.h> #include <pthread.h>
#include <stdio.h> #include <stdio.h>
#include <unistd.h> #include <unistd.h>
...@@ -37,7 +40,11 @@ ...@@ -37,7 +40,11 @@
#include <sys/link_elf.h> #include <sys/link_elf.h>
#endif #endif
#if SANITIZER_ANDROID || SANITIZER_FREEBSD #if SANITIZER_SOLARIS
#include <link.h>
#endif
#if SANITIZER_ANDROID || SANITIZER_FREEBSD || SANITIZER_SOLARIS
#include <ucontext.h> #include <ucontext.h>
extern "C" void* _DYNAMIC; extern "C" void* _DYNAMIC;
#elif SANITIZER_NETBSD #elif SANITIZER_NETBSD
...@@ -79,9 +86,51 @@ void *AsanDoesNotSupportStaticLinkage() { ...@@ -79,9 +86,51 @@ void *AsanDoesNotSupportStaticLinkage() {
return &_DYNAMIC; // defined in link.h return &_DYNAMIC; // defined in link.h
} }
static void UnmapFromTo(uptr from, uptr to) {
CHECK(to >= from);
if (to == from) return;
uptr res = internal_munmap(reinterpret_cast<void *>(from), to - from);
if (UNLIKELY(internal_iserror(res))) {
Report(
"ERROR: AddresSanitizer failed to unmap 0x%zx (%zd) bytes at address "
"%p\n",
to - from, to - from, from);
CHECK("unable to unmap" && 0);
}
}
#if ASAN_PREMAP_SHADOW
uptr FindPremappedShadowStart() {
uptr granularity = GetMmapGranularity();
uptr shadow_start = reinterpret_cast<uptr>(&__asan_shadow);
uptr premap_shadow_size = PremapShadowSize();
uptr shadow_size = RoundUpTo(kHighShadowEnd, granularity);
// We may have mapped too much. Release extra memory.
UnmapFromTo(shadow_start + shadow_size, shadow_start + premap_shadow_size);
return shadow_start;
}
#endif
uptr FindDynamicShadowStart() { uptr FindDynamicShadowStart() {
UNREACHABLE("FindDynamicShadowStart is not available"); #if ASAN_PREMAP_SHADOW
return 0; if (!PremapShadowFailed())
return FindPremappedShadowStart();
#endif
uptr granularity = GetMmapGranularity();
uptr alignment = granularity * 8;
uptr left_padding = granularity;
uptr shadow_size = RoundUpTo(kHighShadowEnd, granularity);
uptr map_size = shadow_size + left_padding + alignment;
uptr map_start = (uptr)MmapNoAccess(map_size);
CHECK_NE(map_start, ~(uptr)0);
uptr shadow_start = RoundUpTo(map_start + left_padding, alignment);
UnmapFromTo(map_start, shadow_start - left_padding);
UnmapFromTo(shadow_start + shadow_size, map_start + map_size);
return shadow_start;
} }
void AsanApplyToGlobals(globals_op_fptr op, const void *needle) { void AsanApplyToGlobals(globals_op_fptr op, const void *needle) {
...@@ -95,6 +144,9 @@ void AsanCheckIncompatibleRT() {} ...@@ -95,6 +144,9 @@ void AsanCheckIncompatibleRT() {}
#else #else
static int FindFirstDSOCallback(struct dl_phdr_info *info, size_t size, static int FindFirstDSOCallback(struct dl_phdr_info *info, size_t size,
void *data) { void *data) {
VReport(2, "info->dlpi_name = %s\tinfo->dlpi_addr = %p\n",
info->dlpi_name, info->dlpi_addr);
// Continue until the first dynamic library is found // Continue until the first dynamic library is found
if (!info->dlpi_name || info->dlpi_name[0] == 0) if (!info->dlpi_name || info->dlpi_name[0] == 0)
return 0; return 0;
...@@ -103,7 +155,7 @@ static int FindFirstDSOCallback(struct dl_phdr_info *info, size_t size, ...@@ -103,7 +155,7 @@ static int FindFirstDSOCallback(struct dl_phdr_info *info, size_t size,
if (internal_strncmp(info->dlpi_name, "linux-", sizeof("linux-") - 1) == 0) if (internal_strncmp(info->dlpi_name, "linux-", sizeof("linux-") - 1) == 0)
return 0; return 0;
#if SANITIZER_NETBSD #if SANITIZER_FREEBSD || SANITIZER_NETBSD
// Ignore first entry (the main program) // Ignore first entry (the main program)
char **p = (char **)data; char **p = (char **)data;
if (!(*p)) { if (!(*p)) {
...@@ -112,6 +164,12 @@ static int FindFirstDSOCallback(struct dl_phdr_info *info, size_t size, ...@@ -112,6 +164,12 @@ static int FindFirstDSOCallback(struct dl_phdr_info *info, size_t size,
} }
#endif #endif
#if SANITIZER_SOLARIS
// Ignore executable on Solaris
if (info->dlpi_addr == 0)
return 0;
#endif
*(const char **)data = info->dlpi_name; *(const char **)data = info->dlpi_name;
return 1; return 1;
} }
...@@ -155,7 +213,7 @@ void AsanCheckIncompatibleRT() { ...@@ -155,7 +213,7 @@ void AsanCheckIncompatibleRT() {
// the functions in dynamic ASan runtime instead of the functions in // the functions in dynamic ASan runtime instead of the functions in
// system libraries, causing crashes later in ASan initialization. // system libraries, causing crashes later in ASan initialization.
MemoryMappingLayout proc_maps(/*cache_enabled*/true); MemoryMappingLayout proc_maps(/*cache_enabled*/true);
char filename[128]; char filename[PATH_MAX];
MemoryMappedSegment segment(filename, sizeof(filename)); MemoryMappedSegment segment(filename, sizeof(filename));
while (proc_maps.Next(&segment)) { while (proc_maps.Next(&segment)) {
if (IsDynamicRTName(segment.filename)) { if (IsDynamicRTName(segment.filename)) {
...@@ -190,4 +248,5 @@ void *AsanDlSymNext(const char *sym) { ...@@ -190,4 +248,5 @@ void *AsanDlSymNext(const char *sym) {
} // namespace __asan } // namespace __asan
#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD #endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD ||
// SANITIZER_SOLARIS
...@@ -60,16 +60,36 @@ uptr FindDynamicShadowStart() { ...@@ -60,16 +60,36 @@ uptr FindDynamicShadowStart() {
uptr space_size = kHighShadowEnd + left_padding; uptr space_size = kHighShadowEnd + left_padding;
uptr largest_gap_found = 0; uptr largest_gap_found = 0;
uptr shadow_start = FindAvailableMemoryRange(space_size, alignment, uptr max_occupied_addr = 0;
granularity, &largest_gap_found); VReport(2, "FindDynamicShadowStart, space_size = %p\n", space_size);
uptr shadow_start =
FindAvailableMemoryRange(space_size, alignment, granularity,
&largest_gap_found, &max_occupied_addr);
// If the shadow doesn't fit, restrict the address space to make it fit. // If the shadow doesn't fit, restrict the address space to make it fit.
if (shadow_start == 0) { if (shadow_start == 0) {
VReport(
2,
"Shadow doesn't fit, largest_gap_found = %p, max_occupied_addr = %p\n",
largest_gap_found, max_occupied_addr);
uptr new_max_vm = RoundDownTo(largest_gap_found << SHADOW_SCALE, alignment); uptr new_max_vm = RoundDownTo(largest_gap_found << SHADOW_SCALE, alignment);
if (new_max_vm < max_occupied_addr) {
Report("Unable to find a memory range for dynamic shadow.\n");
Report(
"space_size = %p, largest_gap_found = %p, max_occupied_addr = %p, "
"new_max_vm = %p\n",
space_size, largest_gap_found, max_occupied_addr, new_max_vm);
CHECK(0 && "cannot place shadow");
}
RestrictMemoryToMaxAddress(new_max_vm); RestrictMemoryToMaxAddress(new_max_vm);
kHighMemEnd = new_max_vm - 1; kHighMemEnd = new_max_vm - 1;
space_size = kHighShadowEnd + left_padding; space_size = kHighShadowEnd + left_padding;
shadow_start = VReport(2, "FindDynamicShadowStart, space_size = %p\n", space_size);
FindAvailableMemoryRange(space_size, alignment, granularity, nullptr); shadow_start = FindAvailableMemoryRange(space_size, alignment, granularity,
nullptr, nullptr);
if (shadow_start == 0) {
Report("Unable to find a memory range after restricting VM.\n");
CHECK(0 && "cannot place shadow after restricting vm");
}
} }
CHECK_NE((uptr)0, shadow_start); CHECK_NE((uptr)0, shadow_start);
CHECK(IsAligned(shadow_start, alignment)); CHECK(IsAligned(shadow_start, alignment));
......
...@@ -14,19 +14,23 @@ ...@@ -14,19 +14,23 @@
#include "sanitizer_common/sanitizer_platform.h" #include "sanitizer_common/sanitizer_platform.h"
#if SANITIZER_FREEBSD || SANITIZER_FUCHSIA || SANITIZER_LINUX || \ #if SANITIZER_FREEBSD || SANITIZER_FUCHSIA || SANITIZER_LINUX || \
SANITIZER_NETBSD SANITIZER_NETBSD || SANITIZER_RTEMS || SANITIZER_SOLARIS
#include "sanitizer_common/sanitizer_allocator_checks.h"
#include "sanitizer_common/sanitizer_errno.h"
#include "sanitizer_common/sanitizer_tls_get_addr.h" #include "sanitizer_common/sanitizer_tls_get_addr.h"
#include "asan_allocator.h" #include "asan_allocator.h"
#include "asan_interceptors.h" #include "asan_interceptors.h"
#include "asan_internal.h" #include "asan_internal.h"
#include "asan_malloc_local.h"
#include "asan_stack.h" #include "asan_stack.h"
// ---------------------- Replacement functions ---------------- {{{1 // ---------------------- Replacement functions ---------------- {{{1
using namespace __asan; // NOLINT using namespace __asan; // NOLINT
static uptr allocated_for_dlsym; static uptr allocated_for_dlsym;
static const uptr kDlsymAllocPoolSize = 1024; static uptr last_dlsym_alloc_size_in_words;
static const uptr kDlsymAllocPoolSize = SANITIZER_RTEMS ? 4096 : 1024;
static uptr alloc_memory_for_dlsym[kDlsymAllocPoolSize]; static uptr alloc_memory_for_dlsym[kDlsymAllocPoolSize];
static INLINE bool IsInDlsymAllocPool(const void *ptr) { static INLINE bool IsInDlsymAllocPool(const void *ptr) {
...@@ -37,21 +41,73 @@ static INLINE bool IsInDlsymAllocPool(const void *ptr) { ...@@ -37,21 +41,73 @@ static INLINE bool IsInDlsymAllocPool(const void *ptr) {
static void *AllocateFromLocalPool(uptr size_in_bytes) { static void *AllocateFromLocalPool(uptr size_in_bytes) {
uptr size_in_words = RoundUpTo(size_in_bytes, kWordSize) / kWordSize; uptr size_in_words = RoundUpTo(size_in_bytes, kWordSize) / kWordSize;
void *mem = (void*)&alloc_memory_for_dlsym[allocated_for_dlsym]; void *mem = (void*)&alloc_memory_for_dlsym[allocated_for_dlsym];
last_dlsym_alloc_size_in_words = size_in_words;
allocated_for_dlsym += size_in_words; allocated_for_dlsym += size_in_words;
CHECK_LT(allocated_for_dlsym, kDlsymAllocPoolSize); CHECK_LT(allocated_for_dlsym, kDlsymAllocPoolSize);
return mem; return mem;
} }
static void DeallocateFromLocalPool(const void *ptr) {
// Hack: since glibc 2.27 dlsym no longer uses stack-allocated memory to store
// error messages and instead uses malloc followed by free. To avoid pool
// exhaustion due to long object filenames, handle that special case here.
uptr prev_offset = allocated_for_dlsym - last_dlsym_alloc_size_in_words;
void *prev_mem = (void*)&alloc_memory_for_dlsym[prev_offset];
if (prev_mem == ptr) {
REAL(memset)(prev_mem, 0, last_dlsym_alloc_size_in_words * kWordSize);
allocated_for_dlsym = prev_offset;
last_dlsym_alloc_size_in_words = 0;
}
}
static int PosixMemalignFromLocalPool(void **memptr, uptr alignment,
uptr size_in_bytes) {
if (UNLIKELY(!CheckPosixMemalignAlignment(alignment)))
return errno_EINVAL;
CHECK(alignment >= kWordSize);
uptr addr = (uptr)&alloc_memory_for_dlsym[allocated_for_dlsym];
uptr aligned_addr = RoundUpTo(addr, alignment);
uptr aligned_size = RoundUpTo(size_in_bytes, kWordSize);
uptr *end_mem = (uptr*)(aligned_addr + aligned_size);
uptr allocated = end_mem - alloc_memory_for_dlsym;
if (allocated >= kDlsymAllocPoolSize)
return errno_ENOMEM;
allocated_for_dlsym = allocated;
*memptr = (void*)aligned_addr;
return 0;
}
#if SANITIZER_RTEMS
void* MemalignFromLocalPool(uptr alignment, uptr size) {
void *ptr = nullptr;
alignment = Max(alignment, kWordSize);
PosixMemalignFromLocalPool(&ptr, alignment, size);
return ptr;
}
bool IsFromLocalPool(const void *ptr) {
return IsInDlsymAllocPool(ptr);
}
#endif
static INLINE bool MaybeInDlsym() { static INLINE bool MaybeInDlsym() {
// Fuchsia doesn't use dlsym-based interceptors. // Fuchsia doesn't use dlsym-based interceptors.
return !SANITIZER_FUCHSIA && asan_init_is_running; return !SANITIZER_FUCHSIA && asan_init_is_running;
} }
static INLINE bool UseLocalPool() {
return EarlyMalloc() || MaybeInDlsym();
}
static void *ReallocFromLocalPool(void *ptr, uptr size) { static void *ReallocFromLocalPool(void *ptr, uptr size) {
const uptr offset = (uptr)ptr - (uptr)alloc_memory_for_dlsym; const uptr offset = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
const uptr copy_size = Min(size, kDlsymAllocPoolSize - offset); const uptr copy_size = Min(size, kDlsymAllocPoolSize - offset);
void *new_ptr; void *new_ptr;
if (UNLIKELY(MaybeInDlsym())) { if (UNLIKELY(UseLocalPool())) {
new_ptr = AllocateFromLocalPool(size); new_ptr = AllocateFromLocalPool(size);
} else { } else {
ENSURE_ASAN_INITED(); ENSURE_ASAN_INITED();
...@@ -64,8 +120,10 @@ static void *ReallocFromLocalPool(void *ptr, uptr size) { ...@@ -64,8 +120,10 @@ static void *ReallocFromLocalPool(void *ptr, uptr size) {
INTERCEPTOR(void, free, void *ptr) { INTERCEPTOR(void, free, void *ptr) {
GET_STACK_TRACE_FREE; GET_STACK_TRACE_FREE;
if (UNLIKELY(IsInDlsymAllocPool(ptr))) if (UNLIKELY(IsInDlsymAllocPool(ptr))) {
DeallocateFromLocalPool(ptr);
return; return;
}
asan_free(ptr, &stack, FROM_MALLOC); asan_free(ptr, &stack, FROM_MALLOC);
} }
...@@ -79,7 +137,7 @@ INTERCEPTOR(void, cfree, void *ptr) { ...@@ -79,7 +137,7 @@ INTERCEPTOR(void, cfree, void *ptr) {
#endif // SANITIZER_INTERCEPT_CFREE #endif // SANITIZER_INTERCEPT_CFREE
INTERCEPTOR(void*, malloc, uptr size) { INTERCEPTOR(void*, malloc, uptr size) {
if (UNLIKELY(MaybeInDlsym())) if (UNLIKELY(UseLocalPool()))
// Hack: dlsym calls malloc before REAL(malloc) is retrieved from dlsym. // Hack: dlsym calls malloc before REAL(malloc) is retrieved from dlsym.
return AllocateFromLocalPool(size); return AllocateFromLocalPool(size);
ENSURE_ASAN_INITED(); ENSURE_ASAN_INITED();
...@@ -88,7 +146,7 @@ INTERCEPTOR(void*, malloc, uptr size) { ...@@ -88,7 +146,7 @@ INTERCEPTOR(void*, malloc, uptr size) {
} }
INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) { INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) {
if (UNLIKELY(MaybeInDlsym())) if (UNLIKELY(UseLocalPool()))
// Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym. // Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
return AllocateFromLocalPool(nmemb * size); return AllocateFromLocalPool(nmemb * size);
ENSURE_ASAN_INITED(); ENSURE_ASAN_INITED();
...@@ -99,7 +157,7 @@ INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) { ...@@ -99,7 +157,7 @@ INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) {
INTERCEPTOR(void*, realloc, void *ptr, uptr size) { INTERCEPTOR(void*, realloc, void *ptr, uptr size) {
if (UNLIKELY(IsInDlsymAllocPool(ptr))) if (UNLIKELY(IsInDlsymAllocPool(ptr)))
return ReallocFromLocalPool(ptr, size); return ReallocFromLocalPool(ptr, size);
if (UNLIKELY(MaybeInDlsym())) if (UNLIKELY(UseLocalPool()))
return AllocateFromLocalPool(size); return AllocateFromLocalPool(size);
ENSURE_ASAN_INITED(); ENSURE_ASAN_INITED();
GET_STACK_TRACE_MALLOC; GET_STACK_TRACE_MALLOC;
...@@ -120,10 +178,12 @@ INTERCEPTOR(void*, __libc_memalign, uptr boundary, uptr size) { ...@@ -120,10 +178,12 @@ INTERCEPTOR(void*, __libc_memalign, uptr boundary, uptr size) {
} }
#endif // SANITIZER_INTERCEPT_MEMALIGN #endif // SANITIZER_INTERCEPT_MEMALIGN
#if SANITIZER_INTERCEPT_ALIGNED_ALLOC
INTERCEPTOR(void*, aligned_alloc, uptr boundary, uptr size) { INTERCEPTOR(void*, aligned_alloc, uptr boundary, uptr size) {
GET_STACK_TRACE_MALLOC; GET_STACK_TRACE_MALLOC;
return asan_memalign(boundary, size, &stack, FROM_MALLOC); return asan_aligned_alloc(boundary, size, &stack);
} }
#endif // SANITIZER_INTERCEPT_ALIGNED_ALLOC
INTERCEPTOR(uptr, malloc_usable_size, void *ptr) { INTERCEPTOR(uptr, malloc_usable_size, void *ptr) {
GET_CURRENT_PC_BP_SP; GET_CURRENT_PC_BP_SP;
...@@ -152,8 +212,9 @@ INTERCEPTOR(int, mallopt, int cmd, int value) { ...@@ -152,8 +212,9 @@ INTERCEPTOR(int, mallopt, int cmd, int value) {
#endif // SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO #endif // SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO
INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) { INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) {
if (UNLIKELY(UseLocalPool()))
return PosixMemalignFromLocalPool(memptr, alignment, size);
GET_STACK_TRACE_MALLOC; GET_STACK_TRACE_MALLOC;
// Printf("posix_memalign: %zx %zu\n", alignment, size);
return asan_posix_memalign(memptr, alignment, size, &stack); return asan_posix_memalign(memptr, alignment, size, &stack);
} }
...@@ -234,4 +295,4 @@ void ReplaceSystemMalloc() { ...@@ -234,4 +295,4 @@ void ReplaceSystemMalloc() {
#endif // SANITIZER_ANDROID #endif // SANITIZER_ANDROID
#endif // SANITIZER_FREEBSD || SANITIZER_FUCHSIA || SANITIZER_LINUX || #endif // SANITIZER_FREEBSD || SANITIZER_FUCHSIA || SANITIZER_LINUX ||
// SANITIZER_NETBSD // SANITIZER_NETBSD || SANITIZER_SOLARIS
//===-- asan_malloc_local.h -------------------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// Provide interfaces to check for and handle local pool memory allocation.
//===----------------------------------------------------------------------===//
#ifndef ASAN_MALLOC_LOCAL_H
#define ASAN_MALLOC_LOCAL_H
#include "sanitizer_common/sanitizer_platform.h"
#include "asan_internal.h"
// On RTEMS, we use the local pool to handle memory allocation when the ASan
// run-time is not up.
static INLINE bool EarlyMalloc() {
return SANITIZER_RTEMS && (!__asan::asan_inited ||
__asan::asan_init_is_running);
}
void* MemalignFromLocalPool(uptr alignment, uptr size);
#if SANITIZER_RTEMS
bool IsFromLocalPool(const void *ptr);
#define ALLOCATE_FROM_LOCAL_POOL UNLIKELY(EarlyMalloc())
#define IS_FROM_LOCAL_POOL(ptr) UNLIKELY(IsFromLocalPool(ptr))
#else // SANITIZER_RTEMS
#define ALLOCATE_FROM_LOCAL_POOL 0
#define IS_FROM_LOCAL_POOL(ptr) 0
#endif // SANITIZER_RTEMS
#endif // ASAN_MALLOC_LOCAL_H
...@@ -36,6 +36,9 @@ using namespace __asan; ...@@ -36,6 +36,9 @@ using namespace __asan;
#define COMMON_MALLOC_CALLOC(count, size) \ #define COMMON_MALLOC_CALLOC(count, size) \
GET_STACK_TRACE_MALLOC; \ GET_STACK_TRACE_MALLOC; \
void *p = asan_calloc(count, size, &stack); void *p = asan_calloc(count, size, &stack);
#define COMMON_MALLOC_POSIX_MEMALIGN(memptr, alignment, size) \
GET_STACK_TRACE_MALLOC; \
int res = asan_posix_memalign(memptr, alignment, size, &stack);
#define COMMON_MALLOC_VALLOC(size) \ #define COMMON_MALLOC_VALLOC(size) \
GET_STACK_TRACE_MALLOC; \ GET_STACK_TRACE_MALLOC; \
void *p = asan_memalign(GetPageSizeCached(), size, &stack, FROM_MALLOC); void *p = asan_memalign(GetPageSizeCached(), size, &stack, FROM_MALLOC);
......
...@@ -12,8 +12,17 @@ ...@@ -12,8 +12,17 @@
#include "sanitizer_common/sanitizer_platform.h" #include "sanitizer_common/sanitizer_platform.h"
#if SANITIZER_WINDOWS #if SANITIZER_WINDOWS
#define WIN32_LEAN_AND_MEAN // Intentionally not including windows.h here, to avoid the risk of
#include <windows.h> // pulling in conflicting declarations of these functions. (With mingw-w64,
// there's a risk of windows.h pulling in stdint.h.)
typedef int BOOL;
typedef void *HANDLE;
typedef const void *LPCVOID;
typedef void *LPVOID;
#define HEAP_ZERO_MEMORY 0x00000008
#define HEAP_REALLOC_IN_PLACE_ONLY 0x00000010
#include "asan_allocator.h" #include "asan_allocator.h"
#include "asan_interceptors.h" #include "asan_interceptors.h"
...@@ -123,7 +132,7 @@ void *_recalloc_base(void *p, size_t n, size_t elem_size) { ...@@ -123,7 +132,7 @@ void *_recalloc_base(void *p, size_t n, size_t elem_size) {
} }
ALLOCATION_FUNCTION_ATTRIBUTE ALLOCATION_FUNCTION_ATTRIBUTE
size_t _msize(const void *ptr) { size_t _msize(void *ptr) {
GET_CURRENT_PC_BP_SP; GET_CURRENT_PC_BP_SP;
(void)sp; (void)sp;
return asan_malloc_usable_size(ptr, pc, bp); return asan_malloc_usable_size(ptr, pc, bp);
......
...@@ -120,6 +120,13 @@ ...@@ -120,6 +120,13 @@
// || `[0x400000000000, 0x47ffffffffff]` || LowShadow || // || `[0x400000000000, 0x47ffffffffff]` || LowShadow ||
// || `[0x000000000000, 0x3fffffffffff]` || LowMem || // || `[0x000000000000, 0x3fffffffffff]` || LowMem ||
// //
// Shadow mapping on NetBSD/i386 with SHADOW_OFFSET == 0x40000000:
// || `[0x60000000, 0xfffff000]` || HighMem ||
// || `[0x4c000000, 0x5fffffff]` || HighShadow ||
// || `[0x48000000, 0x4bffffff]` || ShadowGap ||
// || `[0x40000000, 0x47ffffff]` || LowShadow ||
// || `[0x00000000, 0x3fffffff]` || LowMem ||
//
// Default Windows/i386 mapping: // Default Windows/i386 mapping:
// (the exact location of HighShadow/HighMem may vary depending // (the exact location of HighShadow/HighMem may vary depending
// on WoW64, /LARGEADDRESSAWARE, etc). // on WoW64, /LARGEADDRESSAWARE, etc).
...@@ -128,12 +135,23 @@ ...@@ -128,12 +135,23 @@
// || `[0x36000000, 0x39ffffff]` || ShadowGap || // || `[0x36000000, 0x39ffffff]` || ShadowGap ||
// || `[0x30000000, 0x35ffffff]` || LowShadow || // || `[0x30000000, 0x35ffffff]` || LowShadow ||
// || `[0x00000000, 0x2fffffff]` || LowMem || // || `[0x00000000, 0x2fffffff]` || LowMem ||
//
static const u64 kDefaultShadowScale = 3; // Shadow mapping on Myriad2 (for shadow scale 5):
// || `[0x9ff80000, 0x9fffffff]` || ShadowGap ||
// || `[0x9f000000, 0x9ff7ffff]` || LowShadow ||
// || `[0x80000000, 0x9effffff]` || LowMem ||
// || `[0x00000000, 0x7fffffff]` || Ignored ||
#if defined(ASAN_SHADOW_SCALE)
static const u64 kDefaultShadowScale = ASAN_SHADOW_SCALE;
#else
static const u64 kDefaultShadowScale = SANITIZER_MYRIAD2 ? 5 : 3;
#endif
static const u64 kDefaultShadowSentinel = ~(uptr)0; static const u64 kDefaultShadowSentinel = ~(uptr)0;
static const u64 kDefaultShadowOffset32 = 1ULL << 29; // 0x20000000 static const u64 kDefaultShadowOffset32 = 1ULL << 29; // 0x20000000
static const u64 kDefaultShadowOffset64 = 1ULL << 44; static const u64 kDefaultShadowOffset64 = 1ULL << 44;
static const u64 kDefaultShort64bitShadowOffset = 0x7FFF8000; // < 2G. static const u64 kDefaultShort64bitShadowOffset =
0x7FFFFFFF & (~0xFFFULL << kDefaultShadowScale); // < 2G.
static const u64 kIosShadowOffset32 = 1ULL << 30; // 0x40000000 static const u64 kIosShadowOffset32 = 1ULL << 30; // 0x40000000
static const u64 kIosShadowOffset64 = 0x120200000; static const u64 kIosShadowOffset64 = 0x120200000;
static const u64 kIosSimShadowOffset32 = 1ULL << 30; static const u64 kIosSimShadowOffset32 = 1ULL << 30;
...@@ -141,24 +159,36 @@ static const u64 kIosSimShadowOffset64 = kDefaultShadowOffset64; ...@@ -141,24 +159,36 @@ static const u64 kIosSimShadowOffset64 = kDefaultShadowOffset64;
static const u64 kAArch64_ShadowOffset64 = 1ULL << 36; static const u64 kAArch64_ShadowOffset64 = 1ULL << 36;
static const u64 kMIPS32_ShadowOffset32 = 0x0aaa0000; static const u64 kMIPS32_ShadowOffset32 = 0x0aaa0000;
static const u64 kMIPS64_ShadowOffset64 = 1ULL << 37; static const u64 kMIPS64_ShadowOffset64 = 1ULL << 37;
static const u64 kPPC64_ShadowOffset64 = 1ULL << 41; static const u64 kPPC64_ShadowOffset64 = 1ULL << 44;
static const u64 kSystemZ_ShadowOffset64 = 1ULL << 52; static const u64 kSystemZ_ShadowOffset64 = 1ULL << 52;
static const u64 kFreeBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000 static const u64 kFreeBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000
static const u64 kFreeBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000 static const u64 kFreeBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000
static const u64 kNetBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000
static const u64 kNetBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000 static const u64 kNetBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000
static const u64 kWindowsShadowOffset32 = 3ULL << 28; // 0x30000000 static const u64 kWindowsShadowOffset32 = 3ULL << 28; // 0x30000000
static const u64 kMyriadMemoryOffset32 = 0x80000000ULL;
static const u64 kMyriadMemorySize32 = 0x20000000ULL;
static const u64 kMyriadMemoryEnd32 =
kMyriadMemoryOffset32 + kMyriadMemorySize32 - 1;
static const u64 kMyriadShadowOffset32 =
(kMyriadMemoryOffset32 + kMyriadMemorySize32 -
(kMyriadMemorySize32 >> kDefaultShadowScale));
static const u64 kMyriadCacheBitMask32 = 0x40000000ULL;
#define SHADOW_SCALE kDefaultShadowScale #define SHADOW_SCALE kDefaultShadowScale
#if SANITIZER_FUCHSIA #if SANITIZER_FUCHSIA
# define SHADOW_OFFSET (0) # define SHADOW_OFFSET (0)
#elif SANITIZER_WORDSIZE == 32 #elif SANITIZER_WORDSIZE == 32
# if SANITIZER_ANDROID # if SANITIZER_ANDROID
# define SHADOW_OFFSET (0) # define SHADOW_OFFSET __asan_shadow_memory_dynamic_address
# elif defined(__mips__) # elif defined(__mips__)
# define SHADOW_OFFSET kMIPS32_ShadowOffset32 # define SHADOW_OFFSET kMIPS32_ShadowOffset32
# elif SANITIZER_FREEBSD # elif SANITIZER_FREEBSD
# define SHADOW_OFFSET kFreeBSD_ShadowOffset32 # define SHADOW_OFFSET kFreeBSD_ShadowOffset32
# elif SANITIZER_NETBSD
# define SHADOW_OFFSET kNetBSD_ShadowOffset32
# elif SANITIZER_WINDOWS # elif SANITIZER_WINDOWS
# define SHADOW_OFFSET kWindowsShadowOffset32 # define SHADOW_OFFSET kWindowsShadowOffset32
# elif SANITIZER_IOS # elif SANITIZER_IOS
...@@ -167,6 +197,8 @@ static const u64 kWindowsShadowOffset32 = 3ULL << 28; // 0x30000000 ...@@ -167,6 +197,8 @@ static const u64 kWindowsShadowOffset32 = 3ULL << 28; // 0x30000000
# else # else
# define SHADOW_OFFSET kIosShadowOffset32 # define SHADOW_OFFSET kIosShadowOffset32
# endif # endif
# elif SANITIZER_MYRIAD2
# define SHADOW_OFFSET kMyriadShadowOffset32
# else # else
# define SHADOW_OFFSET kDefaultShadowOffset32 # define SHADOW_OFFSET kDefaultShadowOffset32
# endif # endif
...@@ -198,7 +230,46 @@ static const u64 kWindowsShadowOffset32 = 3ULL << 28; // 0x30000000 ...@@ -198,7 +230,46 @@ static const u64 kWindowsShadowOffset32 = 3ULL << 28; // 0x30000000
# endif # endif
#endif #endif
#if SANITIZER_ANDROID && defined(__arm__)
# define ASAN_PREMAP_SHADOW 1
#else
# define ASAN_PREMAP_SHADOW 0
#endif
#define SHADOW_GRANULARITY (1ULL << SHADOW_SCALE) #define SHADOW_GRANULARITY (1ULL << SHADOW_SCALE)
#define DO_ASAN_MAPPING_PROFILE 0 // Set to 1 to profile the functions below.
#if DO_ASAN_MAPPING_PROFILE
# define PROFILE_ASAN_MAPPING() AsanMappingProfile[__LINE__]++;
#else
# define PROFILE_ASAN_MAPPING()
#endif
// If 1, all shadow boundaries are constants.
// Don't set to 1 other than for testing.
#define ASAN_FIXED_MAPPING 0
namespace __asan {
extern uptr AsanMappingProfile[];
#if ASAN_FIXED_MAPPING
// Fixed mapping for 64-bit Linux. Mostly used for performance comparison
// with non-fixed mapping. As of r175253 (Feb 2013) the performance
// difference between fixed and non-fixed mapping is below the noise level.
static uptr kHighMemEnd = 0x7fffffffffffULL;
static uptr kMidMemBeg = 0x3000000000ULL;
static uptr kMidMemEnd = 0x4fffffffffULL;
#else
extern uptr kHighMemEnd, kMidMemBeg, kMidMemEnd; // Initialized in __asan_init.
#endif
} // namespace __asan
#if SANITIZER_MYRIAD2
#include "asan_mapping_myriad.h"
#else
#define MEM_TO_SHADOW(mem) (((mem) >> SHADOW_SCALE) + (SHADOW_OFFSET)) #define MEM_TO_SHADOW(mem) (((mem) >> SHADOW_SCALE) + (SHADOW_OFFSET))
#define kLowMemBeg 0 #define kLowMemBeg 0
...@@ -230,36 +301,11 @@ static const u64 kWindowsShadowOffset32 = 3ULL << 28; // 0x30000000 ...@@ -230,36 +301,11 @@ static const u64 kWindowsShadowOffset32 = 3ULL << 28; // 0x30000000
#define kShadowGap3Beg (kMidMemBeg ? kMidMemEnd + 1 : 0) #define kShadowGap3Beg (kMidMemBeg ? kMidMemEnd + 1 : 0)
#define kShadowGap3End (kMidMemBeg ? kHighShadowBeg - 1 : 0) #define kShadowGap3End (kMidMemBeg ? kHighShadowBeg - 1 : 0)
#define DO_ASAN_MAPPING_PROFILE 0 // Set to 1 to profile the functions below.
#if DO_ASAN_MAPPING_PROFILE
# define PROFILE_ASAN_MAPPING() AsanMappingProfile[__LINE__]++;
#else
# define PROFILE_ASAN_MAPPING()
#endif
// If 1, all shadow boundaries are constants.
// Don't set to 1 other than for testing.
#define ASAN_FIXED_MAPPING 0
namespace __asan { namespace __asan {
extern uptr AsanMappingProfile[];
#if ASAN_FIXED_MAPPING
// Fixed mapping for 64-bit Linux. Mostly used for performance comparison
// with non-fixed mapping. As of r175253 (Feb 2013) the performance
// difference between fixed and non-fixed mapping is below the noise level.
static uptr kHighMemEnd = 0x7fffffffffffULL;
static uptr kMidMemBeg = 0x3000000000ULL;
static uptr kMidMemEnd = 0x4fffffffffULL;
#else
extern uptr kHighMemEnd, kMidMemBeg, kMidMemEnd; // Initialized in __asan_init.
#endif
static inline bool AddrIsInLowMem(uptr a) { static inline bool AddrIsInLowMem(uptr a) {
PROFILE_ASAN_MAPPING(); PROFILE_ASAN_MAPPING();
return a < kLowMemEnd; return a <= kLowMemEnd;
} }
static inline bool AddrIsInLowShadow(uptr a) { static inline bool AddrIsInLowShadow(uptr a) {
...@@ -267,14 +313,24 @@ static inline bool AddrIsInLowShadow(uptr a) { ...@@ -267,14 +313,24 @@ static inline bool AddrIsInLowShadow(uptr a) {
return a >= kLowShadowBeg && a <= kLowShadowEnd; return a >= kLowShadowBeg && a <= kLowShadowEnd;
} }
static inline bool AddrIsInMidMem(uptr a) {
PROFILE_ASAN_MAPPING();
return kMidMemBeg && a >= kMidMemBeg && a <= kMidMemEnd;
}
static inline bool AddrIsInMidShadow(uptr a) {
PROFILE_ASAN_MAPPING();
return kMidMemBeg && a >= kMidShadowBeg && a <= kMidShadowEnd;
}
static inline bool AddrIsInHighMem(uptr a) { static inline bool AddrIsInHighMem(uptr a) {
PROFILE_ASAN_MAPPING(); PROFILE_ASAN_MAPPING();
return a >= kHighMemBeg && a <= kHighMemEnd; return kHighMemBeg && a >= kHighMemBeg && a <= kHighMemEnd;
} }
static inline bool AddrIsInMidMem(uptr a) { static inline bool AddrIsInHighShadow(uptr a) {
PROFILE_ASAN_MAPPING(); PROFILE_ASAN_MAPPING();
return kMidMemBeg && a >= kMidMemBeg && a <= kMidMemEnd; return kHighMemBeg && a >= kHighShadowBeg && a <= kHighShadowEnd;
} }
static inline bool AddrIsInShadowGap(uptr a) { static inline bool AddrIsInShadowGap(uptr a) {
...@@ -292,6 +348,12 @@ static inline bool AddrIsInShadowGap(uptr a) { ...@@ -292,6 +348,12 @@ static inline bool AddrIsInShadowGap(uptr a) {
return a >= kShadowGapBeg && a <= kShadowGapEnd; return a >= kShadowGapBeg && a <= kShadowGapEnd;
} }
} // namespace __asan
#endif // SANITIZER_MYRIAD2
namespace __asan {
static inline bool AddrIsInMem(uptr a) { static inline bool AddrIsInMem(uptr a) {
PROFILE_ASAN_MAPPING(); PROFILE_ASAN_MAPPING();
return AddrIsInLowMem(a) || AddrIsInMidMem(a) || AddrIsInHighMem(a) || return AddrIsInLowMem(a) || AddrIsInMidMem(a) || AddrIsInHighMem(a) ||
...@@ -304,16 +366,6 @@ static inline uptr MemToShadow(uptr p) { ...@@ -304,16 +366,6 @@ static inline uptr MemToShadow(uptr p) {
return MEM_TO_SHADOW(p); return MEM_TO_SHADOW(p);
} }
static inline bool AddrIsInHighShadow(uptr a) {
PROFILE_ASAN_MAPPING();
return a >= kHighShadowBeg && a <= kHighMemEnd;
}
static inline bool AddrIsInMidShadow(uptr a) {
PROFILE_ASAN_MAPPING();
return kMidMemBeg && a >= kMidShadowBeg && a <= kMidMemEnd;
}
static inline bool AddrIsInShadow(uptr a) { static inline bool AddrIsInShadow(uptr a) {
PROFILE_ASAN_MAPPING(); PROFILE_ASAN_MAPPING();
return AddrIsInLowShadow(a) || AddrIsInMidShadow(a) || AddrIsInHighShadow(a); return AddrIsInLowShadow(a) || AddrIsInMidShadow(a) || AddrIsInHighShadow(a);
...@@ -326,6 +378,8 @@ static inline bool AddrIsAlignedByGranularity(uptr a) { ...@@ -326,6 +378,8 @@ static inline bool AddrIsAlignedByGranularity(uptr a) {
static inline bool AddressIsPoisoned(uptr a) { static inline bool AddressIsPoisoned(uptr a) {
PROFILE_ASAN_MAPPING(); PROFILE_ASAN_MAPPING();
if (SANITIZER_MYRIAD2 && !AddrIsInMem(a) && !AddrIsInShadow(a))
return false;
const uptr kAccessSize = 1; const uptr kAccessSize = 1;
u8 *shadow_address = (u8*)MEM_TO_SHADOW(a); u8 *shadow_address = (u8*)MEM_TO_SHADOW(a);
s8 shadow_value = *shadow_address; s8 shadow_value = *shadow_address;
......
//===-- asan_mapping_myriad.h -----------------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// Myriad-specific definitions for ASan memory mapping.
//===----------------------------------------------------------------------===//
#ifndef ASAN_MAPPING_MYRIAD_H
#define ASAN_MAPPING_MYRIAD_H
#define RAW_ADDR(mem) ((mem) & ~kMyriadCacheBitMask32)
#define MEM_TO_SHADOW(mem) \
(((RAW_ADDR(mem) - kLowMemBeg) >> SHADOW_SCALE) + (SHADOW_OFFSET))
#define kLowMemBeg kMyriadMemoryOffset32
#define kLowMemEnd (SHADOW_OFFSET - 1)
#define kLowShadowBeg SHADOW_OFFSET
#define kLowShadowEnd MEM_TO_SHADOW(kLowMemEnd)
#define kHighMemBeg 0
#define kHighShadowBeg 0
#define kHighShadowEnd 0
#define kMidShadowBeg 0
#define kMidShadowEnd 0
#define kShadowGapBeg (kLowShadowEnd + 1)
#define kShadowGapEnd kMyriadMemoryEnd32
#define kShadowGap2Beg 0
#define kShadowGap2End 0
#define kShadowGap3Beg 0
#define kShadowGap3End 0
namespace __asan {
static inline bool AddrIsInLowMem(uptr a) {
PROFILE_ASAN_MAPPING();
a = RAW_ADDR(a);
return a >= kLowMemBeg && a <= kLowMemEnd;
}
static inline bool AddrIsInLowShadow(uptr a) {
PROFILE_ASAN_MAPPING();
a = RAW_ADDR(a);
return a >= kLowShadowBeg && a <= kLowShadowEnd;
}
static inline bool AddrIsInMidMem(uptr a) {
PROFILE_ASAN_MAPPING();
return false;
}
static inline bool AddrIsInMidShadow(uptr a) {
PROFILE_ASAN_MAPPING();
return false;
}
static inline bool AddrIsInHighMem(uptr a) {
PROFILE_ASAN_MAPPING();
return false;
}
static inline bool AddrIsInHighShadow(uptr a) {
PROFILE_ASAN_MAPPING();
return false;
}
static inline bool AddrIsInShadowGap(uptr a) {
PROFILE_ASAN_MAPPING();
a = RAW_ADDR(a);
return a >= kShadowGapBeg && a <= kShadowGapEnd;
}
} // namespace __asan
#endif // ASAN_MAPPING_MYRIAD_H
...@@ -29,9 +29,9 @@ struct AllocationSite { ...@@ -29,9 +29,9 @@ struct AllocationSite {
class HeapProfile { class HeapProfile {
public: public:
HeapProfile() : allocations_(1024) {} HeapProfile() { allocations_.reserve(1024); }
void ProcessChunk(const AsanChunkView& cv) { void ProcessChunk(const AsanChunkView &cv) {
if (cv.IsAllocated()) { if (cv.IsAllocated()) {
total_allocated_user_size_ += cv.UsedSize(); total_allocated_user_size_ += cv.UsedSize();
total_allocated_count_++; total_allocated_count_++;
...@@ -47,10 +47,10 @@ class HeapProfile { ...@@ -47,10 +47,10 @@ class HeapProfile {
} }
void Print(uptr top_percent, uptr max_number_of_contexts) { void Print(uptr top_percent, uptr max_number_of_contexts) {
InternalSort(&allocations_, allocations_.size(), Sort(allocations_.data(), allocations_.size(),
[](const AllocationSite &a, const AllocationSite &b) { [](const AllocationSite &a, const AllocationSite &b) {
return a.total_size > b.total_size; return a.total_size > b.total_size;
}); });
CHECK(total_allocated_user_size_); CHECK(total_allocated_user_size_);
uptr total_shown = 0; uptr total_shown = 0;
Printf("Live Heap Allocations: %zd bytes in %zd chunks; quarantined: " Printf("Live Heap Allocations: %zd bytes in %zd chunks; quarantined: "
......
...@@ -12,6 +12,8 @@ ...@@ -12,6 +12,8 @@
#include "asan_allocator.h" #include "asan_allocator.h"
#include "asan_internal.h" #include "asan_internal.h"
#include "asan_malloc_local.h"
#include "asan_report.h"
#include "asan_stack.h" #include "asan_stack.h"
#include "interception/interception.h" #include "interception/interception.h"
...@@ -22,7 +24,7 @@ ...@@ -22,7 +24,7 @@
// anyway by passing extra -export flags to the linker, which is exactly that // anyway by passing extra -export flags to the linker, which is exactly that
// dllexport would normally do. We need to export them in order to make the // dllexport would normally do. We need to export them in order to make the
// VS2015 dynamic CRT (MD) work. // VS2015 dynamic CRT (MD) work.
#if SANITIZER_WINDOWS #if SANITIZER_WINDOWS && defined(_MSC_VER)
#define CXX_OPERATOR_ATTRIBUTE #define CXX_OPERATOR_ATTRIBUTE
#define COMMENT_EXPORT(sym) __pragma(comment(linker, "/export:" sym)) #define COMMENT_EXPORT(sym) __pragma(comment(linker, "/export:" sym))
#ifdef _WIN64 #ifdef _WIN64
...@@ -65,16 +67,28 @@ struct nothrow_t {}; ...@@ -65,16 +67,28 @@ struct nothrow_t {};
enum class align_val_t: size_t {}; enum class align_val_t: size_t {};
} // namespace std } // namespace std
// TODO(alekseys): throw std::bad_alloc instead of dying on OOM. // TODO(alekseyshl): throw std::bad_alloc instead of dying on OOM.
// For local pool allocation, align to SHADOW_GRANULARITY to match asan
// allocator behavior.
#define OPERATOR_NEW_BODY(type, nothrow) \ #define OPERATOR_NEW_BODY(type, nothrow) \
if (ALLOCATE_FROM_LOCAL_POOL) {\
void *res = MemalignFromLocalPool(SHADOW_GRANULARITY, size);\
if (!nothrow) CHECK(res);\
return res;\
}\
GET_STACK_TRACE_MALLOC;\ GET_STACK_TRACE_MALLOC;\
void *res = asan_memalign(0, size, &stack, type);\ void *res = asan_memalign(0, size, &stack, type);\
if (!nothrow && UNLIKELY(!res)) DieOnFailure::OnOOM();\ if (!nothrow && UNLIKELY(!res)) ReportOutOfMemory(size, &stack);\
return res; return res;
#define OPERATOR_NEW_BODY_ALIGN(type, nothrow) \ #define OPERATOR_NEW_BODY_ALIGN(type, nothrow) \
if (ALLOCATE_FROM_LOCAL_POOL) {\
void *res = MemalignFromLocalPool((uptr)align, size);\
if (!nothrow) CHECK(res);\
return res;\
}\
GET_STACK_TRACE_MALLOC;\ GET_STACK_TRACE_MALLOC;\
void *res = asan_memalign((uptr)align, size, &stack, type);\ void *res = asan_memalign((uptr)align, size, &stack, type);\
if (!nothrow && UNLIKELY(!res)) DieOnFailure::OnOOM();\ if (!nothrow && UNLIKELY(!res)) ReportOutOfMemory(size, &stack);\
return res; return res;
// On OS X it's not enough to just provide our own 'operator new' and // On OS X it's not enough to just provide our own 'operator new' and
...@@ -123,77 +137,73 @@ INTERCEPTOR(void *, _ZnwmRKSt9nothrow_t, size_t size, std::nothrow_t const&) { ...@@ -123,77 +137,73 @@ INTERCEPTOR(void *, _ZnwmRKSt9nothrow_t, size_t size, std::nothrow_t const&) {
INTERCEPTOR(void *, _ZnamRKSt9nothrow_t, size_t size, std::nothrow_t const&) { INTERCEPTOR(void *, _ZnamRKSt9nothrow_t, size_t size, std::nothrow_t const&) {
OPERATOR_NEW_BODY(FROM_NEW_BR, true /*nothrow*/); OPERATOR_NEW_BODY(FROM_NEW_BR, true /*nothrow*/);
} }
#endif #endif // !SANITIZER_MAC
#define OPERATOR_DELETE_BODY(type) \ #define OPERATOR_DELETE_BODY(type) \
if (IS_FROM_LOCAL_POOL(ptr)) return;\
GET_STACK_TRACE_FREE;\
asan_delete(ptr, 0, 0, &stack, type);
#define OPERATOR_DELETE_BODY_SIZE(type) \
if (IS_FROM_LOCAL_POOL(ptr)) return;\
GET_STACK_TRACE_FREE;\
asan_delete(ptr, size, 0, &stack, type);
#define OPERATOR_DELETE_BODY_ALIGN(type) \
if (IS_FROM_LOCAL_POOL(ptr)) return;\
GET_STACK_TRACE_FREE;\
asan_delete(ptr, 0, static_cast<uptr>(align), &stack, type);
#define OPERATOR_DELETE_BODY_SIZE_ALIGN(type) \
if (IS_FROM_LOCAL_POOL(ptr)) return;\
GET_STACK_TRACE_FREE;\ GET_STACK_TRACE_FREE;\
asan_free(ptr, &stack, type); asan_delete(ptr, size, static_cast<uptr>(align), &stack, type);
#if !SANITIZER_MAC #if !SANITIZER_MAC
CXX_OPERATOR_ATTRIBUTE CXX_OPERATOR_ATTRIBUTE
void operator delete(void *ptr) NOEXCEPT { void operator delete(void *ptr) NOEXCEPT
OPERATOR_DELETE_BODY(FROM_NEW); { OPERATOR_DELETE_BODY(FROM_NEW); }
}
CXX_OPERATOR_ATTRIBUTE CXX_OPERATOR_ATTRIBUTE
void operator delete[](void *ptr) NOEXCEPT { void operator delete[](void *ptr) NOEXCEPT
OPERATOR_DELETE_BODY(FROM_NEW_BR); { OPERATOR_DELETE_BODY(FROM_NEW_BR); }
}
CXX_OPERATOR_ATTRIBUTE CXX_OPERATOR_ATTRIBUTE
void operator delete(void *ptr, std::nothrow_t const&) { void operator delete(void *ptr, std::nothrow_t const&)
OPERATOR_DELETE_BODY(FROM_NEW); { OPERATOR_DELETE_BODY(FROM_NEW); }
}
CXX_OPERATOR_ATTRIBUTE CXX_OPERATOR_ATTRIBUTE
void operator delete[](void *ptr, std::nothrow_t const&) { void operator delete[](void *ptr, std::nothrow_t const&)
OPERATOR_DELETE_BODY(FROM_NEW_BR); { OPERATOR_DELETE_BODY(FROM_NEW_BR); }
}
CXX_OPERATOR_ATTRIBUTE CXX_OPERATOR_ATTRIBUTE
void operator delete(void *ptr, size_t size) NOEXCEPT { void operator delete(void *ptr, size_t size) NOEXCEPT
GET_STACK_TRACE_FREE; { OPERATOR_DELETE_BODY_SIZE(FROM_NEW); }
asan_sized_free(ptr, size, &stack, FROM_NEW);
}
CXX_OPERATOR_ATTRIBUTE CXX_OPERATOR_ATTRIBUTE
void operator delete[](void *ptr, size_t size) NOEXCEPT { void operator delete[](void *ptr, size_t size) NOEXCEPT
GET_STACK_TRACE_FREE; { OPERATOR_DELETE_BODY_SIZE(FROM_NEW_BR); }
asan_sized_free(ptr, size, &stack, FROM_NEW_BR);
}
CXX_OPERATOR_ATTRIBUTE CXX_OPERATOR_ATTRIBUTE
void operator delete(void *ptr, std::align_val_t) NOEXCEPT { void operator delete(void *ptr, std::align_val_t align) NOEXCEPT
OPERATOR_DELETE_BODY(FROM_NEW); { OPERATOR_DELETE_BODY_ALIGN(FROM_NEW); }
}
CXX_OPERATOR_ATTRIBUTE CXX_OPERATOR_ATTRIBUTE
void operator delete[](void *ptr, std::align_val_t) NOEXCEPT { void operator delete[](void *ptr, std::align_val_t align) NOEXCEPT
OPERATOR_DELETE_BODY(FROM_NEW_BR); { OPERATOR_DELETE_BODY_ALIGN(FROM_NEW_BR); }
}
CXX_OPERATOR_ATTRIBUTE CXX_OPERATOR_ATTRIBUTE
void operator delete(void *ptr, std::align_val_t, std::nothrow_t const&) { void operator delete(void *ptr, std::align_val_t align, std::nothrow_t const&)
OPERATOR_DELETE_BODY(FROM_NEW); { OPERATOR_DELETE_BODY_ALIGN(FROM_NEW); }
}
CXX_OPERATOR_ATTRIBUTE CXX_OPERATOR_ATTRIBUTE
void operator delete[](void *ptr, std::align_val_t, std::nothrow_t const&) { void operator delete[](void *ptr, std::align_val_t align, std::nothrow_t const&)
OPERATOR_DELETE_BODY(FROM_NEW_BR); { OPERATOR_DELETE_BODY_ALIGN(FROM_NEW_BR); }
}
CXX_OPERATOR_ATTRIBUTE CXX_OPERATOR_ATTRIBUTE
void operator delete(void *ptr, size_t size, std::align_val_t) NOEXCEPT { void operator delete(void *ptr, size_t size, std::align_val_t align) NOEXCEPT
GET_STACK_TRACE_FREE; { OPERATOR_DELETE_BODY_SIZE_ALIGN(FROM_NEW); }
asan_sized_free(ptr, size, &stack, FROM_NEW);
}
CXX_OPERATOR_ATTRIBUTE CXX_OPERATOR_ATTRIBUTE
void operator delete[](void *ptr, size_t size, std::align_val_t) NOEXCEPT { void operator delete[](void *ptr, size_t size, std::align_val_t align) NOEXCEPT
GET_STACK_TRACE_FREE; { OPERATOR_DELETE_BODY_SIZE_ALIGN(FROM_NEW_BR); }
asan_sized_free(ptr, size, &stack, FROM_NEW_BR);
}
#else // SANITIZER_MAC #else // SANITIZER_MAC
INTERCEPTOR(void, _ZdlPv, void *ptr) { INTERCEPTOR(void, _ZdlPv, void *ptr)
OPERATOR_DELETE_BODY(FROM_NEW); { OPERATOR_DELETE_BODY(FROM_NEW); }
} INTERCEPTOR(void, _ZdaPv, void *ptr)
INTERCEPTOR(void, _ZdaPv, void *ptr) { { OPERATOR_DELETE_BODY(FROM_NEW_BR); }
OPERATOR_DELETE_BODY(FROM_NEW_BR); INTERCEPTOR(void, _ZdlPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&)
} { OPERATOR_DELETE_BODY(FROM_NEW); }
INTERCEPTOR(void, _ZdlPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&) { INTERCEPTOR(void, _ZdaPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&)
OPERATOR_DELETE_BODY(FROM_NEW); { OPERATOR_DELETE_BODY(FROM_NEW_BR); }
} #endif // !SANITIZER_MAC
INTERCEPTOR(void, _ZdaPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&) {
OPERATOR_DELETE_BODY(FROM_NEW_BR);
}
#endif
...@@ -30,7 +30,7 @@ bool CanPoisonMemory() { ...@@ -30,7 +30,7 @@ bool CanPoisonMemory() {
} }
void PoisonShadow(uptr addr, uptr size, u8 value) { void PoisonShadow(uptr addr, uptr size, u8 value) {
if (!CanPoisonMemory()) return; if (value && !CanPoisonMemory()) return;
CHECK(AddrIsAlignedByGranularity(addr)); CHECK(AddrIsAlignedByGranularity(addr));
CHECK(AddrIsInMem(addr)); CHECK(AddrIsInMem(addr));
CHECK(AddrIsAlignedByGranularity(addr + size)); CHECK(AddrIsAlignedByGranularity(addr + size));
...@@ -180,8 +180,15 @@ int __asan_address_is_poisoned(void const volatile *addr) { ...@@ -180,8 +180,15 @@ int __asan_address_is_poisoned(void const volatile *addr) {
uptr __asan_region_is_poisoned(uptr beg, uptr size) { uptr __asan_region_is_poisoned(uptr beg, uptr size) {
if (!size) return 0; if (!size) return 0;
uptr end = beg + size; uptr end = beg + size;
if (!AddrIsInMem(beg)) return beg; if (SANITIZER_MYRIAD2) {
if (!AddrIsInMem(end)) return end; // On Myriad, address not in DRAM range need to be treated as
// unpoisoned.
if (!AddrIsInMem(beg) && !AddrIsInShadow(beg)) return 0;
if (!AddrIsInMem(end) && !AddrIsInShadow(end)) return 0;
} else {
if (!AddrIsInMem(beg)) return beg;
if (!AddrIsInMem(end)) return end;
}
CHECK_LT(beg, end); CHECK_LT(beg, end);
uptr aligned_b = RoundUpTo(beg, SHADOW_GRANULARITY); uptr aligned_b = RoundUpTo(beg, SHADOW_GRANULARITY);
uptr aligned_e = RoundDownTo(end, SHADOW_GRANULARITY); uptr aligned_e = RoundDownTo(end, SHADOW_GRANULARITY);
......
...@@ -36,7 +36,7 @@ void PoisonShadowPartialRightRedzone(uptr addr, ...@@ -36,7 +36,7 @@ void PoisonShadowPartialRightRedzone(uptr addr,
// performance-critical code with care. // performance-critical code with care.
ALWAYS_INLINE void FastPoisonShadow(uptr aligned_beg, uptr aligned_size, ALWAYS_INLINE void FastPoisonShadow(uptr aligned_beg, uptr aligned_size,
u8 value) { u8 value) {
DCHECK(CanPoisonMemory()); DCHECK(!value || CanPoisonMemory());
uptr shadow_beg = MEM_TO_SHADOW(aligned_beg); uptr shadow_beg = MEM_TO_SHADOW(aligned_beg);
uptr shadow_end = MEM_TO_SHADOW( uptr shadow_end = MEM_TO_SHADOW(
aligned_beg + aligned_size - SHADOW_GRANULARITY) + 1; aligned_beg + aligned_size - SHADOW_GRANULARITY) + 1;
...@@ -49,6 +49,9 @@ ALWAYS_INLINE void FastPoisonShadow(uptr aligned_beg, uptr aligned_size, ...@@ -49,6 +49,9 @@ ALWAYS_INLINE void FastPoisonShadow(uptr aligned_beg, uptr aligned_size,
// changed at all. It doesn't currently have an efficient means // changed at all. It doesn't currently have an efficient means
// to zero a bunch of pages, but maybe we should add one. // to zero a bunch of pages, but maybe we should add one.
SANITIZER_FUCHSIA == 1 || SANITIZER_FUCHSIA == 1 ||
// RTEMS doesn't have have pages, let alone a fast way to zero
// them, so default to memset.
SANITIZER_RTEMS == 1 ||
shadow_end - shadow_beg < common_flags()->clear_shadow_mmap_threshold) { shadow_end - shadow_beg < common_flags()->clear_shadow_mmap_threshold) {
REAL(memset)((void*)shadow_beg, value, shadow_end - shadow_beg); REAL(memset)((void*)shadow_beg, value, shadow_end - shadow_beg);
} else { } else {
......
...@@ -23,7 +23,6 @@ ...@@ -23,7 +23,6 @@
#include "sanitizer_common/sanitizer_procmaps.h" #include "sanitizer_common/sanitizer_procmaps.h"
#include <pthread.h> #include <pthread.h>
#include <signal.h>
#include <stdlib.h> #include <stdlib.h>
#include <sys/time.h> #include <sys/time.h>
#include <sys/resource.h> #include <sys/resource.h>
......
//===-- asan_premap_shadow.cc ---------------------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// Reserve shadow memory with an ifunc resolver.
//===----------------------------------------------------------------------===//
#include "asan_mapping.h"
#if ASAN_PREMAP_SHADOW
#include "asan_premap_shadow.h"
#include "sanitizer_common/sanitizer_posix.h"
namespace __asan {
// The code in this file needs to run in an unrelocated binary. It may not
// access any external symbol, including its own non-hidden globals.
// Conservative upper limit.
uptr PremapShadowSize() {
uptr granularity = GetMmapGranularity();
return RoundUpTo(GetMaxVirtualAddress() >> SHADOW_SCALE, granularity);
}
// Returns an address aligned to 8 pages, such that one page on the left and
// PremapShadowSize() bytes on the right of it are mapped r/o.
uptr PremapShadow() {
uptr granularity = GetMmapGranularity();
uptr alignment = granularity * 8;
uptr left_padding = granularity;
uptr shadow_size = PremapShadowSize();
uptr map_size = shadow_size + left_padding + alignment;
uptr map_start = (uptr)MmapNoAccess(map_size);
CHECK_NE(map_start, ~(uptr)0);
uptr shadow_start = RoundUpTo(map_start + left_padding, alignment);
uptr shadow_end = shadow_start + shadow_size;
internal_munmap(reinterpret_cast<void *>(map_start),
shadow_start - left_padding - map_start);
internal_munmap(reinterpret_cast<void *>(shadow_end),
map_start + map_size - shadow_end);
return shadow_start;
}
bool PremapShadowFailed() {
uptr shadow = reinterpret_cast<uptr>(&__asan_shadow);
uptr resolver = reinterpret_cast<uptr>(&__asan_premap_shadow);
// shadow == resolver is how Android KitKat and older handles ifunc.
// shadow == 0 just in case.
if (shadow == 0 || shadow == resolver)
return true;
return false;
}
} // namespace __asan
extern "C" {
decltype(__asan_shadow)* __asan_premap_shadow() {
// The resolver may be called multiple times. Map the shadow just once.
static uptr premapped_shadow = 0;
if (!premapped_shadow) premapped_shadow = __asan::PremapShadow();
return reinterpret_cast<decltype(__asan_shadow)*>(premapped_shadow);
}
// __asan_shadow is a "function" that has the same address as the first byte of
// the shadow mapping.
INTERFACE_ATTRIBUTE __attribute__((ifunc("__asan_premap_shadow"))) void
__asan_shadow();
}
#endif // ASAN_PREMAP_SHADOW
//===-- asan_mapping.h ------------------------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// Premap shadow range with an ifunc resolver.
//===----------------------------------------------------------------------===//
#ifndef ASAN_PREMAP_SHADOW_H
#define ASAN_PREMAP_SHADOW_H
#if ASAN_PREMAP_SHADOW
namespace __asan {
// Conservative upper limit.
uptr PremapShadowSize();
bool PremapShadowFailed();
}
#endif
extern "C" INTERFACE_ATTRIBUTE void __asan_shadow();
extern "C" decltype(__asan_shadow)* __asan_premap_shadow();
#endif // ASAN_PREMAP_SHADOW_H
...@@ -82,7 +82,7 @@ static void PrintZoneForPointer(uptr ptr, uptr zone_ptr, ...@@ -82,7 +82,7 @@ static void PrintZoneForPointer(uptr ptr, uptr zone_ptr,
bool ParseFrameDescription(const char *frame_descr, bool ParseFrameDescription(const char *frame_descr,
InternalMmapVector<StackVarDescr> *vars) { InternalMmapVector<StackVarDescr> *vars) {
CHECK(frame_descr); CHECK(frame_descr);
char *p; const char *p;
// This string is created by the compiler and has the following form: // This string is created by the compiler and has the following form:
// "n alloc_1 alloc_2 ... alloc_n" // "n alloc_1 alloc_2 ... alloc_n"
// where alloc_i looks like "offset size len ObjectName" // where alloc_i looks like "offset size len ObjectName"
...@@ -132,6 +132,10 @@ class ScopedInErrorReport { ...@@ -132,6 +132,10 @@ class ScopedInErrorReport {
} }
~ScopedInErrorReport() { ~ScopedInErrorReport() {
if (halt_on_error_ && !__sanitizer_acquire_crash_state()) {
asanThreadRegistry().Unlock();
return;
}
ASAN_ON_ERROR(); ASAN_ON_ERROR();
if (current_error_.IsValid()) current_error_.Print(); if (current_error_.IsValid()) current_error_.Print();
...@@ -150,7 +154,7 @@ class ScopedInErrorReport { ...@@ -150,7 +154,7 @@ class ScopedInErrorReport {
// Copy the message buffer so that we could start logging without holding a // Copy the message buffer so that we could start logging without holding a
// lock that gets aquired during printing. // lock that gets aquired during printing.
InternalScopedBuffer<char> buffer_copy(kErrorMessageBufferSize); InternalMmapVector<char> buffer_copy(kErrorMessageBufferSize);
{ {
BlockingMutexLock l(&error_message_buf_mutex); BlockingMutexLock l(&error_message_buf_mutex);
internal_memcpy(buffer_copy.data(), internal_memcpy(buffer_copy.data(),
...@@ -200,7 +204,7 @@ class ScopedInErrorReport { ...@@ -200,7 +204,7 @@ class ScopedInErrorReport {
bool halt_on_error_; bool halt_on_error_;
}; };
ErrorDescription ScopedInErrorReport::current_error_; ErrorDescription ScopedInErrorReport::current_error_(LINKER_INITIALIZED);
void ReportDeadlySignal(const SignalContext &sig) { void ReportDeadlySignal(const SignalContext &sig) {
ScopedInErrorReport in_report(/*fatal*/ true); ScopedInErrorReport in_report(/*fatal*/ true);
...@@ -214,11 +218,12 @@ void ReportDoubleFree(uptr addr, BufferedStackTrace *free_stack) { ...@@ -214,11 +218,12 @@ void ReportDoubleFree(uptr addr, BufferedStackTrace *free_stack) {
in_report.ReportError(error); in_report.ReportError(error);
} }
void ReportNewDeleteSizeMismatch(uptr addr, uptr delete_size, void ReportNewDeleteTypeMismatch(uptr addr, uptr delete_size,
uptr delete_alignment,
BufferedStackTrace *free_stack) { BufferedStackTrace *free_stack) {
ScopedInErrorReport in_report; ScopedInErrorReport in_report;
ErrorNewDeleteSizeMismatch error(GetCurrentTidOrInvalid(), free_stack, addr, ErrorNewDeleteTypeMismatch error(GetCurrentTidOrInvalid(), free_stack, addr,
delete_size); delete_size, delete_alignment);
in_report.ReportError(error); in_report.ReportError(error);
} }
...@@ -251,6 +256,62 @@ void ReportSanitizerGetAllocatedSizeNotOwned(uptr addr, ...@@ -251,6 +256,62 @@ void ReportSanitizerGetAllocatedSizeNotOwned(uptr addr,
in_report.ReportError(error); in_report.ReportError(error);
} }
void ReportCallocOverflow(uptr count, uptr size, BufferedStackTrace *stack) {
ScopedInErrorReport in_report(/*fatal*/ true);
ErrorCallocOverflow error(GetCurrentTidOrInvalid(), stack, count, size);
in_report.ReportError(error);
}
void ReportPvallocOverflow(uptr size, BufferedStackTrace *stack) {
ScopedInErrorReport in_report(/*fatal*/ true);
ErrorPvallocOverflow error(GetCurrentTidOrInvalid(), stack, size);
in_report.ReportError(error);
}
void ReportInvalidAllocationAlignment(uptr alignment,
BufferedStackTrace *stack) {
ScopedInErrorReport in_report(/*fatal*/ true);
ErrorInvalidAllocationAlignment error(GetCurrentTidOrInvalid(), stack,
alignment);
in_report.ReportError(error);
}
void ReportInvalidAlignedAllocAlignment(uptr size, uptr alignment,
BufferedStackTrace *stack) {
ScopedInErrorReport in_report(/*fatal*/ true);
ErrorInvalidAlignedAllocAlignment error(GetCurrentTidOrInvalid(), stack,
size, alignment);
in_report.ReportError(error);
}
void ReportInvalidPosixMemalignAlignment(uptr alignment,
BufferedStackTrace *stack) {
ScopedInErrorReport in_report(/*fatal*/ true);
ErrorInvalidPosixMemalignAlignment error(GetCurrentTidOrInvalid(), stack,
alignment);
in_report.ReportError(error);
}
void ReportAllocationSizeTooBig(uptr user_size, uptr total_size, uptr max_size,
BufferedStackTrace *stack) {
ScopedInErrorReport in_report(/*fatal*/ true);
ErrorAllocationSizeTooBig error(GetCurrentTidOrInvalid(), stack, user_size,
total_size, max_size);
in_report.ReportError(error);
}
void ReportRssLimitExceeded(BufferedStackTrace *stack) {
ScopedInErrorReport in_report(/*fatal*/ true);
ErrorRssLimitExceeded error(GetCurrentTidOrInvalid(), stack);
in_report.ReportError(error);
}
void ReportOutOfMemory(uptr requested_size, BufferedStackTrace *stack) {
ScopedInErrorReport in_report(/*fatal*/ true);
ErrorOutOfMemory error(GetCurrentTidOrInvalid(), stack, requested_size);
in_report.ReportError(error);
}
void ReportStringFunctionMemoryRangesOverlap(const char *function, void ReportStringFunctionMemoryRangesOverlap(const char *function,
const char *offset1, uptr length1, const char *offset1, uptr length1,
const char *offset2, uptr length2, const char *offset2, uptr length2,
......
...@@ -10,6 +10,9 @@ ...@@ -10,6 +10,9 @@
// ASan-private header for error reporting functions. // ASan-private header for error reporting functions.
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#ifndef ASAN_REPORT_H
#define ASAN_REPORT_H
#include "asan_allocator.h" #include "asan_allocator.h"
#include "asan_internal.h" #include "asan_internal.h"
#include "asan_thread.h" #include "asan_thread.h"
...@@ -45,7 +48,8 @@ bool ParseFrameDescription(const char *frame_descr, ...@@ -45,7 +48,8 @@ bool ParseFrameDescription(const char *frame_descr,
void ReportGenericError(uptr pc, uptr bp, uptr sp, uptr addr, bool is_write, void ReportGenericError(uptr pc, uptr bp, uptr sp, uptr addr, bool is_write,
uptr access_size, u32 exp, bool fatal); uptr access_size, u32 exp, bool fatal);
void ReportDeadlySignal(const SignalContext &sig); void ReportDeadlySignal(const SignalContext &sig);
void ReportNewDeleteSizeMismatch(uptr addr, uptr delete_size, void ReportNewDeleteTypeMismatch(uptr addr, uptr delete_size,
uptr delete_alignment,
BufferedStackTrace *free_stack); BufferedStackTrace *free_stack);
void ReportDoubleFree(uptr addr, BufferedStackTrace *free_stack); void ReportDoubleFree(uptr addr, BufferedStackTrace *free_stack);
void ReportFreeNotMalloced(uptr addr, BufferedStackTrace *free_stack); void ReportFreeNotMalloced(uptr addr, BufferedStackTrace *free_stack);
...@@ -55,6 +59,18 @@ void ReportAllocTypeMismatch(uptr addr, BufferedStackTrace *free_stack, ...@@ -55,6 +59,18 @@ void ReportAllocTypeMismatch(uptr addr, BufferedStackTrace *free_stack,
void ReportMallocUsableSizeNotOwned(uptr addr, BufferedStackTrace *stack); void ReportMallocUsableSizeNotOwned(uptr addr, BufferedStackTrace *stack);
void ReportSanitizerGetAllocatedSizeNotOwned(uptr addr, void ReportSanitizerGetAllocatedSizeNotOwned(uptr addr,
BufferedStackTrace *stack); BufferedStackTrace *stack);
void ReportCallocOverflow(uptr count, uptr size, BufferedStackTrace *stack);
void ReportPvallocOverflow(uptr size, BufferedStackTrace *stack);
void ReportInvalidAllocationAlignment(uptr alignment,
BufferedStackTrace *stack);
void ReportInvalidAlignedAllocAlignment(uptr size, uptr alignment,
BufferedStackTrace *stack);
void ReportInvalidPosixMemalignAlignment(uptr alignment,
BufferedStackTrace *stack);
void ReportAllocationSizeTooBig(uptr user_size, uptr total_size, uptr max_size,
BufferedStackTrace *stack);
void ReportRssLimitExceeded(BufferedStackTrace *stack);
void ReportOutOfMemory(uptr requested_size, BufferedStackTrace *stack);
void ReportStringFunctionMemoryRangesOverlap(const char *function, void ReportStringFunctionMemoryRangesOverlap(const char *function,
const char *offset1, uptr length1, const char *offset1, uptr length1,
const char *offset2, uptr length2, const char *offset2, uptr length2,
...@@ -77,3 +93,4 @@ void ReportMacCfReallocUnknown(uptr addr, uptr zone_ptr, ...@@ -77,3 +93,4 @@ void ReportMacCfReallocUnknown(uptr addr, uptr zone_ptr,
BufferedStackTrace *stack); BufferedStackTrace *stack);
} // namespace __asan } // namespace __asan
#endif // ASAN_REPORT_H
//===-- asan_rtems.cc -----------------------------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// RTEMS-specific details.
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_rtems.h"
#if SANITIZER_RTEMS
#include "asan_internal.h"
#include "asan_interceptors.h"
#include "asan_mapping.h"
#include "asan_poisoning.h"
#include "asan_report.h"
#include "asan_stack.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_libc.h"
#include <pthread.h>
#include <stdlib.h>
namespace __asan {
static void ResetShadowMemory() {
uptr shadow_start = SHADOW_OFFSET;
uptr shadow_end = MEM_TO_SHADOW(kMyriadMemoryEnd32);
uptr gap_start = MEM_TO_SHADOW(shadow_start);
uptr gap_end = MEM_TO_SHADOW(shadow_end);
REAL(memset)((void *)shadow_start, 0, shadow_end - shadow_start);
REAL(memset)((void *)gap_start, kAsanShadowGap, gap_end - gap_start);
}
void InitializeShadowMemory() {
kHighMemEnd = 0;
kMidMemBeg = 0;
kMidMemEnd = 0;
ResetShadowMemory();
}
void AsanApplyToGlobals(globals_op_fptr op, const void *needle) {
UNIMPLEMENTED();
}
void AsanCheckDynamicRTPrereqs() {}
void AsanCheckIncompatibleRT() {}
void InitializeAsanInterceptors() {}
void InitializePlatformInterceptors() {}
void InitializePlatformExceptionHandlers() {}
// RTEMS only support static linking; it sufficies to return with no
// error.
void *AsanDoesNotSupportStaticLinkage() { return nullptr; }
void AsanOnDeadlySignal(int signo, void *siginfo, void *context) {
UNIMPLEMENTED();
}
void EarlyInit() {
// Provide early initialization of shadow memory so that
// instrumented code running before full initialzation will not
// report spurious errors.
ResetShadowMemory();
}
// We can use a plain thread_local variable for TSD.
static thread_local void *per_thread;
void *AsanTSDGet() { return per_thread; }
void AsanTSDSet(void *tsd) { per_thread = tsd; }
// There's no initialization needed, and the passed-in destructor
// will never be called. Instead, our own thread destruction hook
// (below) will call AsanThread::TSDDtor directly.
void AsanTSDInit(void (*destructor)(void *tsd)) {
DCHECK(destructor == &PlatformTSDDtor);
}
void PlatformTSDDtor(void *tsd) { UNREACHABLE(__func__); }
//
// Thread registration. We provide an API similar to the Fushia port.
//
struct AsanThread::InitOptions {
uptr stack_bottom, stack_size, tls_bottom, tls_size;
};
// Shared setup between thread creation and startup for the initial thread.
static AsanThread *CreateAsanThread(StackTrace *stack, u32 parent_tid,
uptr user_id, bool detached,
uptr stack_bottom, uptr stack_size,
uptr tls_bottom, uptr tls_size) {
// In lieu of AsanThread::Create.
AsanThread *thread = (AsanThread *)MmapOrDie(sizeof(AsanThread), __func__);
AsanThreadContext::CreateThreadContextArgs args = {thread, stack};
asanThreadRegistry().CreateThread(user_id, detached, parent_tid, &args);
// On other systems, AsanThread::Init() is called from the new
// thread itself. But on RTEMS we already know the stack address
// range beforehand, so we can do most of the setup right now.
const AsanThread::InitOptions options = {stack_bottom, stack_size,
tls_bottom, tls_size};
thread->Init(&options);
return thread;
}
// This gets the same arguments passed to Init by CreateAsanThread, above.
// We're in the creator thread before the new thread is actually started, but
// its stack and tls address range are already known.
void AsanThread::SetThreadStackAndTls(const AsanThread::InitOptions *options) {
DCHECK_NE(GetCurrentThread(), this);
DCHECK_NE(GetCurrentThread(), nullptr);
CHECK_NE(options->stack_bottom, 0);
CHECK_NE(options->stack_size, 0);
stack_bottom_ = options->stack_bottom;
stack_top_ = options->stack_bottom + options->stack_size;
tls_begin_ = options->tls_bottom;
tls_end_ = options->tls_bottom + options->tls_size;
}
// Called by __asan::AsanInitInternal (asan_rtl.c). Unlike other ports, the
// main thread on RTEMS does not require special treatment; its AsanThread is
// already created by the provided hooks. This function simply looks up and
// returns the created thread.
AsanThread *CreateMainThread() {
return GetThreadContextByTidLocked(0)->thread;
}
// This is called before each thread creation is attempted. So, in
// its first call, the calling thread is the initial and sole thread.
static void *BeforeThreadCreateHook(uptr user_id, bool detached,
uptr stack_bottom, uptr stack_size,
uptr tls_bottom, uptr tls_size) {
EnsureMainThreadIDIsCorrect();
// Strict init-order checking is thread-hostile.
if (flags()->strict_init_order) StopInitOrderChecking();
GET_STACK_TRACE_THREAD;
u32 parent_tid = GetCurrentTidOrInvalid();
return CreateAsanThread(&stack, parent_tid, user_id, detached,
stack_bottom, stack_size, tls_bottom, tls_size);
}
// This is called after creating a new thread (in the creating thread),
// with the pointer returned by BeforeThreadCreateHook (above).
static void ThreadCreateHook(void *hook, bool aborted) {
AsanThread *thread = static_cast<AsanThread *>(hook);
if (!aborted) {
// The thread was created successfully.
// ThreadStartHook is already running in the new thread.
} else {
// The thread wasn't created after all.
// Clean up everything we set up in BeforeThreadCreateHook.
asanThreadRegistry().FinishThread(thread->tid());
UnmapOrDie(thread, sizeof(AsanThread));
}
}
// This is called (1) in the newly-created thread before it runs anything else,
// with the pointer returned by BeforeThreadCreateHook (above). (2) before a
// thread restart.
static void ThreadStartHook(void *hook, uptr os_id) {
if (!hook)
return;
AsanThread *thread = static_cast<AsanThread *>(hook);
SetCurrentThread(thread);
ThreadStatus status =
asanThreadRegistry().GetThreadLocked(thread->tid())->status;
DCHECK(status == ThreadStatusCreated || status == ThreadStatusRunning);
// Determine whether we are starting or restarting the thread.
if (status == ThreadStatusCreated)
// In lieu of AsanThread::ThreadStart.
asanThreadRegistry().StartThread(thread->tid(), os_id,
/*workerthread*/ false, nullptr);
else {
// In a thread restart, a thread may resume execution at an
// arbitrary function entry point, with its stack and TLS state
// reset. We unpoison the stack in that case.
PoisonShadow(thread->stack_bottom(), thread->stack_size(), 0);
}
}
// Each thread runs this just before it exits,
// with the pointer returned by BeforeThreadCreateHook (above).
// All per-thread destructors have already been called.
static void ThreadExitHook(void *hook, uptr os_id) {
AsanThread *thread = static_cast<AsanThread *>(hook);
if (thread)
AsanThread::TSDDtor(thread->context());
}
static void HandleExit() {
// Disable ASan by setting it to uninitialized. Also reset the
// shadow memory to avoid reporting errors after the run-time has
// been desroyed.
if (asan_inited) {
asan_inited = false;
ResetShadowMemory();
}
}
} // namespace __asan
// These are declared (in extern "C") by <some_path/sanitizer.h>.
// The system runtime will call our definitions directly.
extern "C" {
void __sanitizer_early_init() {
__asan::EarlyInit();
}
void *__sanitizer_before_thread_create_hook(uptr thread, bool detached,
const char *name,
void *stack_base, size_t stack_size,
void *tls_base, size_t tls_size) {
return __asan::BeforeThreadCreateHook(
thread, detached,
reinterpret_cast<uptr>(stack_base), stack_size,
reinterpret_cast<uptr>(tls_base), tls_size);
}
void __sanitizer_thread_create_hook(void *handle, uptr thread, int status) {
__asan::ThreadCreateHook(handle, status != 0);
}
void __sanitizer_thread_start_hook(void *handle, uptr self) {
__asan::ThreadStartHook(handle, self);
}
void __sanitizer_thread_exit_hook(void *handle, uptr self) {
__asan::ThreadExitHook(handle, self);
}
void __sanitizer_exit() {
__asan::HandleExit();
}
} // "C"
#endif // SANITIZER_RTEMS
...@@ -54,7 +54,8 @@ static void AsanDie() { ...@@ -54,7 +54,8 @@ static void AsanDie() {
UnmapOrDie((void*)kLowShadowBeg, kMidMemBeg - kLowShadowBeg); UnmapOrDie((void*)kLowShadowBeg, kMidMemBeg - kLowShadowBeg);
UnmapOrDie((void*)kMidMemEnd, kHighShadowEnd - kMidMemEnd); UnmapOrDie((void*)kMidMemEnd, kHighShadowEnd - kMidMemEnd);
} else { } else {
UnmapOrDie((void*)kLowShadowBeg, kHighShadowEnd - kLowShadowBeg); if (kHighShadowEnd)
UnmapOrDie((void*)kLowShadowBeg, kHighShadowEnd - kLowShadowBeg);
} }
} }
} }
...@@ -63,8 +64,14 @@ static void AsanCheckFailed(const char *file, int line, const char *cond, ...@@ -63,8 +64,14 @@ static void AsanCheckFailed(const char *file, int line, const char *cond,
u64 v1, u64 v2) { u64 v1, u64 v2) {
Report("AddressSanitizer CHECK failed: %s:%d \"%s\" (0x%zx, 0x%zx)\n", file, Report("AddressSanitizer CHECK failed: %s:%d \"%s\" (0x%zx, 0x%zx)\n", file,
line, cond, (uptr)v1, (uptr)v2); line, cond, (uptr)v1, (uptr)v2);
// FIXME: check for infinite recursion without a thread-local counter here.
PRINT_CURRENT_STACK_CHECK(); // Print a stack trace the first time we come here. Otherwise, we probably
// failed a CHECK during symbolization.
static atomic_uint32_t num_calls;
if (atomic_fetch_add(&num_calls, 1, memory_order_relaxed) == 0) {
PRINT_CURRENT_STACK_CHECK();
}
Die(); Die();
} }
...@@ -138,6 +145,8 @@ ASAN_REPORT_ERROR_N(load, false) ...@@ -138,6 +145,8 @@ ASAN_REPORT_ERROR_N(load, false)
ASAN_REPORT_ERROR_N(store, true) ASAN_REPORT_ERROR_N(store, true)
#define ASAN_MEMORY_ACCESS_CALLBACK_BODY(type, is_write, size, exp_arg, fatal) \ #define ASAN_MEMORY_ACCESS_CALLBACK_BODY(type, is_write, size, exp_arg, fatal) \
if (SANITIZER_MYRIAD2 && !AddrIsInMem(addr) && !AddrIsInShadow(addr)) \
return; \
uptr sp = MEM_TO_SHADOW(addr); \ uptr sp = MEM_TO_SHADOW(addr); \
uptr s = size <= SHADOW_GRANULARITY ? *reinterpret_cast<u8 *>(sp) \ uptr s = size <= SHADOW_GRANULARITY ? *reinterpret_cast<u8 *>(sp) \
: *reinterpret_cast<u16 *>(sp); \ : *reinterpret_cast<u16 *>(sp); \
...@@ -304,20 +313,24 @@ static void asan_atexit() { ...@@ -304,20 +313,24 @@ static void asan_atexit() {
} }
static void InitializeHighMemEnd() { static void InitializeHighMemEnd() {
#if !SANITIZER_MYRIAD2
#if !ASAN_FIXED_MAPPING #if !ASAN_FIXED_MAPPING
kHighMemEnd = GetMaxVirtualAddress(); kHighMemEnd = GetMaxUserVirtualAddress();
// Increase kHighMemEnd to make sure it's properly // Increase kHighMemEnd to make sure it's properly
// aligned together with kHighMemBeg: // aligned together with kHighMemBeg:
kHighMemEnd |= SHADOW_GRANULARITY * GetMmapGranularity() - 1; kHighMemEnd |= SHADOW_GRANULARITY * GetMmapGranularity() - 1;
#endif // !ASAN_FIXED_MAPPING #endif // !ASAN_FIXED_MAPPING
CHECK_EQ((kHighMemBeg % GetMmapGranularity()), 0); CHECK_EQ((kHighMemBeg % GetMmapGranularity()), 0);
#endif // !SANITIZER_MYRIAD2
} }
void PrintAddressSpaceLayout() { void PrintAddressSpaceLayout() {
Printf("|| `[%p, %p]` || HighMem ||\n", if (kHighMemBeg) {
(void*)kHighMemBeg, (void*)kHighMemEnd); Printf("|| `[%p, %p]` || HighMem ||\n",
Printf("|| `[%p, %p]` || HighShadow ||\n", (void*)kHighMemBeg, (void*)kHighMemEnd);
(void*)kHighShadowBeg, (void*)kHighShadowEnd); Printf("|| `[%p, %p]` || HighShadow ||\n",
(void*)kHighShadowBeg, (void*)kHighShadowEnd);
}
if (kMidMemBeg) { if (kMidMemBeg) {
Printf("|| `[%p, %p]` || ShadowGap3 ||\n", Printf("|| `[%p, %p]` || ShadowGap3 ||\n",
(void*)kShadowGap3Beg, (void*)kShadowGap3End); (void*)kShadowGap3Beg, (void*)kShadowGap3End);
...@@ -336,11 +349,14 @@ void PrintAddressSpaceLayout() { ...@@ -336,11 +349,14 @@ void PrintAddressSpaceLayout() {
Printf("|| `[%p, %p]` || LowMem ||\n", Printf("|| `[%p, %p]` || LowMem ||\n",
(void*)kLowMemBeg, (void*)kLowMemEnd); (void*)kLowMemBeg, (void*)kLowMemEnd);
} }
Printf("MemToShadow(shadow): %p %p %p %p", Printf("MemToShadow(shadow): %p %p",
(void*)MEM_TO_SHADOW(kLowShadowBeg), (void*)MEM_TO_SHADOW(kLowShadowBeg),
(void*)MEM_TO_SHADOW(kLowShadowEnd), (void*)MEM_TO_SHADOW(kLowShadowEnd));
(void*)MEM_TO_SHADOW(kHighShadowBeg), if (kHighMemBeg) {
(void*)MEM_TO_SHADOW(kHighShadowEnd)); Printf(" %p %p",
(void*)MEM_TO_SHADOW(kHighShadowBeg),
(void*)MEM_TO_SHADOW(kHighShadowEnd));
}
if (kMidMemBeg) { if (kMidMemBeg) {
Printf(" %p %p", Printf(" %p %p",
(void*)MEM_TO_SHADOW(kMidShadowBeg), (void*)MEM_TO_SHADOW(kMidShadowBeg),
...@@ -372,6 +388,7 @@ static void AsanInitInternal() { ...@@ -372,6 +388,7 @@ static void AsanInitInternal() {
asan_init_is_running = true; asan_init_is_running = true;
CacheBinaryName(); CacheBinaryName();
CheckASLR();
// Initialize flags. This must be done early, because most of the // Initialize flags. This must be done early, because most of the
// initialization steps look at flags(). // initialization steps look at flags().
...@@ -405,6 +422,7 @@ static void AsanInitInternal() { ...@@ -405,6 +422,7 @@ static void AsanInitInternal() {
MaybeReexec(); MaybeReexec();
// Setup internal allocator callback. // Setup internal allocator callback.
SetLowLevelAllocateMinAlignment(SHADOW_GRANULARITY);
SetLowLevelAllocateCallback(OnLowLevelAllocate); SetLowLevelAllocateCallback(OnLowLevelAllocate);
InitializeAsanInterceptors(); InitializeAsanInterceptors();
...@@ -523,6 +541,9 @@ void NOINLINE __asan_handle_no_return() { ...@@ -523,6 +541,9 @@ void NOINLINE __asan_handle_no_return() {
if (curr_thread) { if (curr_thread) {
top = curr_thread->stack_top(); top = curr_thread->stack_top();
bottom = ((uptr)&local_stack - PageSize) & ~(PageSize - 1); bottom = ((uptr)&local_stack - PageSize) & ~(PageSize - 1);
} else if (SANITIZER_RTEMS) {
// Give up On RTEMS.
return;
} else { } else {
CHECK(!SANITIZER_FUCHSIA); CHECK(!SANITIZER_FUCHSIA);
// If we haven't seen this thread, try asking the OS for stack bounds. // If we haven't seen this thread, try asking the OS for stack bounds.
......
...@@ -12,8 +12,9 @@ ...@@ -12,8 +12,9 @@
#include "sanitizer_common/sanitizer_platform.h" #include "sanitizer_common/sanitizer_platform.h"
// asan_fuchsia.cc has its own InitializeShadowMemory implementation. // asan_fuchsia.cc and asan_rtems.cc have their own
#if !SANITIZER_FUCHSIA // InitializeShadowMemory implementation.
#if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
#include "asan_internal.h" #include "asan_internal.h"
#include "asan_mapping.h" #include "asan_mapping.h"
...@@ -28,8 +29,7 @@ void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name) { ...@@ -28,8 +29,7 @@ void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name) {
CHECK_EQ(((end + 1) % GetMmapGranularity()), 0); CHECK_EQ(((end + 1) % GetMmapGranularity()), 0);
uptr size = end - beg + 1; uptr size = end - beg + 1;
DecreaseTotalMmap(size); // Don't count the shadow against mmap_limit_mb. DecreaseTotalMmap(size); // Don't count the shadow against mmap_limit_mb.
void *res = MmapFixedNoReserve(beg, size, name); if (!MmapFixedNoReserve(beg, size, name)) {
if (res != (void *)beg) {
Report( Report(
"ReserveShadowMemoryRange failed while trying to map 0x%zx bytes. " "ReserveShadowMemoryRange failed while trying to map 0x%zx bytes. "
"Perhaps you're using ulimit -v\n", "Perhaps you're using ulimit -v\n",
...@@ -97,17 +97,21 @@ void InitializeShadowMemory() { ...@@ -97,17 +97,21 @@ void InitializeShadowMemory() {
// when necessary. When dynamic address is used, the macro |kLowShadowBeg| // when necessary. When dynamic address is used, the macro |kLowShadowBeg|
// expands to |__asan_shadow_memory_dynamic_address| which is // expands to |__asan_shadow_memory_dynamic_address| which is
// |kDefaultShadowSentinel|. // |kDefaultShadowSentinel|.
bool full_shadow_is_available = false;
if (shadow_start == kDefaultShadowSentinel) { if (shadow_start == kDefaultShadowSentinel) {
__asan_shadow_memory_dynamic_address = 0; __asan_shadow_memory_dynamic_address = 0;
CHECK_EQ(0, kLowShadowBeg); CHECK_EQ(0, kLowShadowBeg);
shadow_start = FindDynamicShadowStart(); shadow_start = FindDynamicShadowStart();
if (SANITIZER_LINUX) full_shadow_is_available = true;
} }
// Update the shadow memory address (potentially) used by instrumentation. // Update the shadow memory address (potentially) used by instrumentation.
__asan_shadow_memory_dynamic_address = shadow_start; __asan_shadow_memory_dynamic_address = shadow_start;
if (kLowShadowBeg) shadow_start -= GetMmapGranularity(); if (kLowShadowBeg) shadow_start -= GetMmapGranularity();
bool full_shadow_is_available =
MemoryRangeIsAvailable(shadow_start, kHighShadowEnd); if (!full_shadow_is_available)
full_shadow_is_available =
MemoryRangeIsAvailable(shadow_start, kHighShadowEnd);
#if SANITIZER_LINUX && defined(__x86_64__) && defined(_LP64) && \ #if SANITIZER_LINUX && defined(__x86_64__) && defined(_LP64) && \
!ASAN_FIXED_MAPPING !ASAN_FIXED_MAPPING
...@@ -156,4 +160,4 @@ void InitializeShadowMemory() { ...@@ -156,4 +160,4 @@ void InitializeShadowMemory() {
} // namespace __asan } // namespace __asan
#endif // !SANITIZER_FUCHSIA #endif // !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
...@@ -29,9 +29,8 @@ u32 GetMallocContextSize(); ...@@ -29,9 +29,8 @@ u32 GetMallocContextSize();
// The pc will be in the position 0 of the resulting stack trace. // The pc will be in the position 0 of the resulting stack trace.
// The bp may refer to the current frame or to the caller's frame. // The bp may refer to the current frame or to the caller's frame.
ALWAYS_INLINE ALWAYS_INLINE
void GetStackTraceWithPcBpAndContext(BufferedStackTrace *stack, uptr max_depth, void GetStackTrace(BufferedStackTrace *stack, uptr max_depth, uptr pc, uptr bp,
uptr pc, uptr bp, void *context, void *context, bool fast) {
bool fast) {
#if SANITIZER_WINDOWS #if SANITIZER_WINDOWS
stack->Unwind(max_depth, pc, bp, context, 0, 0, fast); stack->Unwind(max_depth, pc, bp, context, 0, 0, fast);
#else #else
...@@ -60,32 +59,29 @@ void GetStackTraceWithPcBpAndContext(BufferedStackTrace *stack, uptr max_depth, ...@@ -60,32 +59,29 @@ void GetStackTraceWithPcBpAndContext(BufferedStackTrace *stack, uptr max_depth,
// as early as possible (in functions exposed to the user), as we generally // as early as possible (in functions exposed to the user), as we generally
// don't want stack trace to contain functions from ASan internals. // don't want stack trace to contain functions from ASan internals.
#define GET_STACK_TRACE(max_size, fast) \ #define GET_STACK_TRACE(max_size, fast) \
BufferedStackTrace stack; \ BufferedStackTrace stack; \
if (max_size <= 2) { \ if (max_size <= 2) { \
stack.size = max_size; \ stack.size = max_size; \
if (max_size > 0) { \ if (max_size > 0) { \
stack.top_frame_bp = GET_CURRENT_FRAME(); \ stack.top_frame_bp = GET_CURRENT_FRAME(); \
stack.trace_buffer[0] = StackTrace::GetCurrentPc(); \ stack.trace_buffer[0] = StackTrace::GetCurrentPc(); \
if (max_size > 1) \ if (max_size > 1) stack.trace_buffer[1] = GET_CALLER_PC(); \
stack.trace_buffer[1] = GET_CALLER_PC(); \ } \
} \ } else { \
} else { \ GetStackTrace(&stack, max_size, StackTrace::GetCurrentPc(), \
GetStackTraceWithPcBpAndContext(&stack, max_size, \ GET_CURRENT_FRAME(), 0, fast); \
StackTrace::GetCurrentPc(), \
GET_CURRENT_FRAME(), 0, fast); \
} }
#define GET_STACK_TRACE_FATAL(pc, bp) \ #define GET_STACK_TRACE_FATAL(pc, bp) \
BufferedStackTrace stack; \ BufferedStackTrace stack; \
GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, pc, bp, 0, \ GetStackTrace(&stack, kStackTraceMax, pc, bp, 0, \
common_flags()->fast_unwind_on_fatal) common_flags()->fast_unwind_on_fatal)
#define GET_STACK_TRACE_SIGNAL(sig) \ #define GET_STACK_TRACE_SIGNAL(sig) \
BufferedStackTrace stack; \ BufferedStackTrace stack; \
GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, \ GetStackTrace(&stack, kStackTraceMax, (sig).pc, (sig).bp, (sig).context, \
(sig).pc, (sig).bp, (sig).context, \ common_flags()->fast_unwind_on_fatal)
common_flags()->fast_unwind_on_fatal)
#define GET_STACK_TRACE_FATAL_HERE \ #define GET_STACK_TRACE_FATAL_HERE \
GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_fatal) GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_fatal)
......
...@@ -219,22 +219,25 @@ FakeStack *AsanThread::AsyncSignalSafeLazyInitFakeStack() { ...@@ -219,22 +219,25 @@ FakeStack *AsanThread::AsyncSignalSafeLazyInitFakeStack() {
void AsanThread::Init(const InitOptions *options) { void AsanThread::Init(const InitOptions *options) {
next_stack_top_ = next_stack_bottom_ = 0; next_stack_top_ = next_stack_bottom_ = 0;
atomic_store(&stack_switching_, false, memory_order_release); atomic_store(&stack_switching_, false, memory_order_release);
fake_stack_ = nullptr; // Will be initialized lazily if needed.
CHECK_EQ(this->stack_size(), 0U); CHECK_EQ(this->stack_size(), 0U);
SetThreadStackAndTls(options); SetThreadStackAndTls(options);
CHECK_GT(this->stack_size(), 0U); CHECK_GT(this->stack_size(), 0U);
CHECK(AddrIsInMem(stack_bottom_)); CHECK(AddrIsInMem(stack_bottom_));
CHECK(AddrIsInMem(stack_top_ - 1)); CHECK(AddrIsInMem(stack_top_ - 1));
ClearShadowForThreadStackAndTLS(); ClearShadowForThreadStackAndTLS();
fake_stack_ = nullptr;
if (__asan_option_detect_stack_use_after_return)
AsyncSignalSafeLazyInitFakeStack();
int local = 0; int local = 0;
VReport(1, "T%d: stack [%p,%p) size 0x%zx; local=%p\n", tid(), VReport(1, "T%d: stack [%p,%p) size 0x%zx; local=%p\n", tid(),
(void *)stack_bottom_, (void *)stack_top_, stack_top_ - stack_bottom_, (void *)stack_bottom_, (void *)stack_top_, stack_top_ - stack_bottom_,
&local); &local);
} }
// Fuchsia doesn't use ThreadStart. // Fuchsia and RTEMS don't use ThreadStart.
// asan_fuchsia.c defines CreateMainThread and SetThreadStackAndTls. // asan_fuchsia.c/asan_rtems.c define CreateMainThread and
#if !SANITIZER_FUCHSIA // SetThreadStackAndTls.
#if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
thread_return_t AsanThread::ThreadStart( thread_return_t AsanThread::ThreadStart(
tid_t os_id, atomic_uintptr_t *signal_thread_is_registered) { tid_t os_id, atomic_uintptr_t *signal_thread_is_registered) {
...@@ -294,12 +297,17 @@ void AsanThread::SetThreadStackAndTls(const InitOptions *options) { ...@@ -294,12 +297,17 @@ void AsanThread::SetThreadStackAndTls(const InitOptions *options) {
CHECK(AddrIsInStack((uptr)&local)); CHECK(AddrIsInStack((uptr)&local));
} }
#endif // !SANITIZER_FUCHSIA #endif // !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
void AsanThread::ClearShadowForThreadStackAndTLS() { void AsanThread::ClearShadowForThreadStackAndTLS() {
PoisonShadow(stack_bottom_, stack_top_ - stack_bottom_, 0); PoisonShadow(stack_bottom_, stack_top_ - stack_bottom_, 0);
if (tls_begin_ != tls_end_) if (tls_begin_ != tls_end_) {
PoisonShadow(tls_begin_, tls_end_ - tls_begin_, 0); uptr tls_begin_aligned = RoundDownTo(tls_begin_, SHADOW_GRANULARITY);
uptr tls_end_aligned = RoundUpTo(tls_end_, SHADOW_GRANULARITY);
FastPoisonShadowPartialRightRedzone(tls_begin_aligned,
tls_end_ - tls_begin_aligned,
tls_end_aligned - tls_end_, 0);
}
} }
bool AsanThread::GetStackFrameAccessByAddr(uptr addr, bool AsanThread::GetStackFrameAccessByAddr(uptr addr,
...@@ -384,6 +392,9 @@ static bool ThreadStackContainsAddress(ThreadContextBase *tctx_base, ...@@ -384,6 +392,9 @@ static bool ThreadStackContainsAddress(ThreadContextBase *tctx_base,
} }
AsanThread *GetCurrentThread() { AsanThread *GetCurrentThread() {
if (SANITIZER_RTEMS && !asan_inited)
return nullptr;
AsanThreadContext *context = AsanThreadContext *context =
reinterpret_cast<AsanThreadContext *>(AsanTSDGet()); reinterpret_cast<AsanThreadContext *>(AsanTSDGet());
if (!context) { if (!context) {
...@@ -475,6 +486,11 @@ void UnlockThreadRegistry() { ...@@ -475,6 +486,11 @@ void UnlockThreadRegistry() {
__asan::asanThreadRegistry().Unlock(); __asan::asanThreadRegistry().Unlock();
} }
ThreadRegistry *GetThreadRegistryLocked() {
__asan::asanThreadRegistry().CheckLocked();
return &__asan::asanThreadRegistry();
}
void EnsureMainThreadIDIsCorrect() { void EnsureMainThreadIDIsCorrect() {
__asan::EnsureMainThreadIDIsCorrect(); __asan::EnsureMainThreadIDIsCorrect();
} }
......
...@@ -157,6 +157,14 @@ INTERCEPTOR_WINAPI(DWORD, CreateThread, ...@@ -157,6 +157,14 @@ INTERCEPTOR_WINAPI(DWORD, CreateThread,
namespace __asan { namespace __asan {
void InitializePlatformInterceptors() { void InitializePlatformInterceptors() {
// The interceptors were not designed to be removable, so we have to keep this
// module alive for the life of the process.
HMODULE pinned;
CHECK(GetModuleHandleExW(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS |
GET_MODULE_HANDLE_EX_FLAG_PIN,
(LPCWSTR)&InitializePlatformInterceptors,
&pinned));
ASAN_INTERCEPT_FUNC(CreateThread); ASAN_INTERCEPT_FUNC(CreateThread);
ASAN_INTERCEPT_FUNC(SetUnhandledExceptionFilter); ASAN_INTERCEPT_FUNC(SetUnhandledExceptionFilter);
...@@ -220,8 +228,8 @@ uptr FindDynamicShadowStart() { ...@@ -220,8 +228,8 @@ uptr FindDynamicShadowStart() {
uptr alignment = 8 * granularity; uptr alignment = 8 * granularity;
uptr left_padding = granularity; uptr left_padding = granularity;
uptr space_size = kHighShadowEnd + left_padding; uptr space_size = kHighShadowEnd + left_padding;
uptr shadow_start = uptr shadow_start = FindAvailableMemoryRange(space_size, alignment,
FindAvailableMemoryRange(space_size, alignment, granularity, nullptr); granularity, nullptr, nullptr);
CHECK_NE((uptr)0, shadow_start); CHECK_NE((uptr)0, shadow_start);
CHECK(IsAligned(shadow_start, alignment)); CHECK(IsAligned(shadow_start, alignment));
return shadow_start; return shadow_start;
...@@ -263,11 +271,6 @@ ShadowExceptionHandler(PEXCEPTION_POINTERS exception_pointers) { ...@@ -263,11 +271,6 @@ ShadowExceptionHandler(PEXCEPTION_POINTERS exception_pointers) {
// Determine the address of the page that is being accessed. // Determine the address of the page that is being accessed.
uptr page = RoundDownTo(addr, page_size); uptr page = RoundDownTo(addr, page_size);
// Query the existing page.
MEMORY_BASIC_INFORMATION mem_info = {};
if (::VirtualQuery((LPVOID)page, &mem_info, sizeof(mem_info)) == 0)
return EXCEPTION_CONTINUE_SEARCH;
// Commit the page. // Commit the page.
uptr result = uptr result =
(uptr)::VirtualAlloc((LPVOID)page, page_size, MEM_COMMIT, PAGE_READWRITE); (uptr)::VirtualAlloc((LPVOID)page, page_size, MEM_COMMIT, PAGE_READWRITE);
......
...@@ -97,7 +97,7 @@ INTERCEPTOR(int, _except_handler4, void *a, void *b, void *c, void *d) { ...@@ -97,7 +97,7 @@ INTERCEPTOR(int, _except_handler4, void *a, void *b, void *c, void *d) {
} }
#endif #endif
// Window specific functions not included in asan_interface.inc. // Windows specific functions not included in asan_interface.inc.
INTERCEPT_WRAP_W_V(__asan_should_detect_stack_use_after_return) INTERCEPT_WRAP_W_V(__asan_should_detect_stack_use_after_return)
INTERCEPT_WRAP_W_V(__asan_get_shadow_memory_dynamic_address) INTERCEPT_WRAP_W_V(__asan_get_shadow_memory_dynamic_address)
INTERCEPT_WRAP_W_W(__asan_unhandled_exception_filter) INTERCEPT_WRAP_W_W(__asan_unhandled_exception_filter)
......
...@@ -30,7 +30,7 @@ extern "C" { ...@@ -30,7 +30,7 @@ extern "C" {
size_t __sanitizer_get_allocated_size(const volatile void *p); size_t __sanitizer_get_allocated_size(const volatile void *p);
/* Number of bytes, allocated and not yet freed by the application. */ /* Number of bytes, allocated and not yet freed by the application. */
size_t __sanitizer_get_current_allocated_bytes(); size_t __sanitizer_get_current_allocated_bytes(void);
/* Number of bytes, mmaped by the allocator to fulfill allocation requests. /* Number of bytes, mmaped by the allocator to fulfill allocation requests.
Generally, for request of X bytes, allocator can reserve and add to free Generally, for request of X bytes, allocator can reserve and add to free
...@@ -38,17 +38,17 @@ extern "C" { ...@@ -38,17 +38,17 @@ extern "C" {
All these chunks count toward the heap size. Currently, allocator never All these chunks count toward the heap size. Currently, allocator never
releases memory to OS (instead, it just puts freed chunks to free releases memory to OS (instead, it just puts freed chunks to free
lists). */ lists). */
size_t __sanitizer_get_heap_size(); size_t __sanitizer_get_heap_size(void);
/* Number of bytes, mmaped by the allocator, which can be used to fulfill /* Number of bytes, mmaped by the allocator, which can be used to fulfill
allocation requests. When a user program frees memory chunk, it can first allocation requests. When a user program frees memory chunk, it can first
fall into quarantine and will count toward __sanitizer_get_free_bytes() fall into quarantine and will count toward __sanitizer_get_free_bytes()
later. */ later. */
size_t __sanitizer_get_free_bytes(); size_t __sanitizer_get_free_bytes(void);
/* Number of bytes in unmapped pages, that are released to OS. Currently, /* Number of bytes in unmapped pages, that are released to OS. Currently,
always returns 0. */ always returns 0. */
size_t __sanitizer_get_unmapped_bytes(); size_t __sanitizer_get_unmapped_bytes(void);
/* Malloc hooks that may be optionally provided by user. /* Malloc hooks that may be optionally provided by user.
__sanitizer_malloc_hook(ptr, size) is called immediately after __sanitizer_malloc_hook(ptr, size) is called immediately after
...@@ -74,6 +74,12 @@ extern "C" { ...@@ -74,6 +74,12 @@ extern "C" {
void (*malloc_hook)(const volatile void *, size_t), void (*malloc_hook)(const volatile void *, size_t),
void (*free_hook)(const volatile void *)); void (*free_hook)(const volatile void *));
/* Drains allocator quarantines (calling thread's and global ones), returns
freed memory back to OS and releases other non-essential internal allocator
resources in attempt to reduce process RSS.
Currently available with ASan only.
*/
void __sanitizer_purge_allocator(void);
#ifdef __cplusplus #ifdef __cplusplus
} // extern "C" } // extern "C"
#endif #endif
......
...@@ -62,19 +62,19 @@ extern "C" { ...@@ -62,19 +62,19 @@ extern "C" {
// Useful for calling from a debugger to get information about an ASan error. // Useful for calling from a debugger to get information about an ASan error.
// Returns 1 if an error has been (or is being) reported, otherwise returns 0. // Returns 1 if an error has been (or is being) reported, otherwise returns 0.
int __asan_report_present(); int __asan_report_present(void);
// Useful for calling from a debugger to get information about an ASan error. // Useful for calling from a debugger to get information about an ASan error.
// If an error has been (or is being) reported, the following functions return // If an error has been (or is being) reported, the following functions return
// the pc, bp, sp, address, access type (0 = read, 1 = write), access size and // the pc, bp, sp, address, access type (0 = read, 1 = write), access size and
// bug description (e.g. "heap-use-after-free"). Otherwise they return 0. // bug description (e.g. "heap-use-after-free"). Otherwise they return 0.
void *__asan_get_report_pc(); void *__asan_get_report_pc(void);
void *__asan_get_report_bp(); void *__asan_get_report_bp(void);
void *__asan_get_report_sp(); void *__asan_get_report_sp(void);
void *__asan_get_report_address(); void *__asan_get_report_address(void);
int __asan_get_report_access_type(); int __asan_get_report_access_type(void);
size_t __asan_get_report_access_size(); size_t __asan_get_report_access_size(void);
const char *__asan_get_report_description(); const char *__asan_get_report_description(void);
// Useful for calling from the debugger to get information about a pointer. // Useful for calling from the debugger to get information about a pointer.
// Returns the category of the given pointer as a constant string. // Returns the category of the given pointer as a constant string.
...@@ -116,21 +116,21 @@ extern "C" { ...@@ -116,21 +116,21 @@ extern "C" {
// User may provide function that would be called right when ASan detects // User may provide function that would be called right when ASan detects
// an error. This can be used to notice cases when ASan detects an error, but // an error. This can be used to notice cases when ASan detects an error, but
// the program crashes before ASan report is printed. // the program crashes before ASan report is printed.
void __asan_on_error(); void __asan_on_error(void);
// Prints accumulated stats to stderr. Used for debugging. // Prints accumulated stats to stderr. Used for debugging.
void __asan_print_accumulated_stats(); void __asan_print_accumulated_stats(void);
// This function may be optionally provided by user and should return // This function may be optionally provided by user and should return
// a string containing ASan runtime options. See asan_flags.h for details. // a string containing ASan runtime options. See asan_flags.h for details.
const char* __asan_default_options(); const char* __asan_default_options(void);
// The following 2 functions facilitate garbage collection in presence of // The following 2 functions facilitate garbage collection in presence of
// asan's fake stack. // asan's fake stack.
// Returns an opaque handler to be used later in __asan_addr_is_in_fake_stack. // Returns an opaque handler to be used later in __asan_addr_is_in_fake_stack.
// Returns NULL if the current thread does not have a fake stack. // Returns NULL if the current thread does not have a fake stack.
void *__asan_get_current_fake_stack(); void *__asan_get_current_fake_stack(void);
// If fake_stack is non-NULL and addr belongs to a fake frame in // If fake_stack is non-NULL and addr belongs to a fake frame in
// fake_stack, returns the address on real stack that corresponds to // fake_stack, returns the address on real stack that corresponds to
......
...@@ -63,6 +63,11 @@ extern "C" { ...@@ -63,6 +63,11 @@ extern "C" {
void __sanitizer_unaligned_store32(void *p, uint32_t x); void __sanitizer_unaligned_store32(void *p, uint32_t x);
void __sanitizer_unaligned_store64(void *p, uint64_t x); void __sanitizer_unaligned_store64(void *p, uint64_t x);
// Returns 1 on the first call, then returns 0 thereafter. Called by the tool
// to ensure only one report is printed when multiple errors occur
// simultaneously.
int __sanitizer_acquire_crash_state();
// Annotate the current state of a contiguous container, such as // Annotate the current state of a contiguous container, such as
// std::vector, std::string or similar. // std::vector, std::string or similar.
// A contiguous container is a container that keeps all of its elements // A contiguous container is a container that keeps all of its elements
...@@ -113,10 +118,16 @@ extern "C" { ...@@ -113,10 +118,16 @@ extern "C" {
const void *beg, const void *mid, const void *end); const void *beg, const void *mid, const void *end);
// Print the stack trace leading to this call. Useful for debugging user code. // Print the stack trace leading to this call. Useful for debugging user code.
void __sanitizer_print_stack_trace(); void __sanitizer_print_stack_trace(void);
// Symbolizes the supplied 'pc' using the format string 'fmt'. // Symbolizes the supplied 'pc' using the format string 'fmt'.
// Outputs at most 'out_buf_size' bytes into 'out_buf'. // Outputs at most 'out_buf_size' bytes into 'out_buf'.
// If 'out_buf' is not empty then output is zero or more non empty C strings
// followed by single empty C string. Multiple strings can be returned if PC
// corresponds to inlined function. Inlined frames are printed in the order
// from "most-inlined" to the "least-inlined", so the last frame should be the
// not inlined function.
// Inlined frames can be removed with 'symbolize_inline_frames=0'.
// The format syntax is described in // The format syntax is described in
// lib/sanitizer_common/sanitizer_stacktrace_printer.h. // lib/sanitizer_common/sanitizer_stacktrace_printer.h.
void __sanitizer_symbolize_pc(void *pc, const char *fmt, char *out_buf, void __sanitizer_symbolize_pc(void *pc, const char *fmt, char *out_buf,
......
...@@ -18,10 +18,10 @@ extern "C" { ...@@ -18,10 +18,10 @@ extern "C" {
#endif #endif
// Record and dump coverage info. // Record and dump coverage info.
void __sanitizer_cov_dump(); void __sanitizer_cov_dump(void);
// Clear collected coverage info. // Clear collected coverage info.
void __sanitizer_cov_reset(); void __sanitizer_cov_reset(void);
// Dump collected coverage info. Sorts pcs by module into individual .sancov // Dump collected coverage info. Sorts pcs by module into individual .sancov
// files. // files.
......
...@@ -35,11 +35,11 @@ extern "C" { ...@@ -35,11 +35,11 @@ extern "C" {
// This function can be called mid-run (or at the end of a run for // This function can be called mid-run (or at the end of a run for
// a server process that doesn't shut down normally) to request that // a server process that doesn't shut down normally) to request that
// data for that point in the run be reported from the tool. // data for that point in the run be reported from the tool.
void COMPILER_RT_WEAK __esan_report(); void COMPILER_RT_WEAK __esan_report(void);
// This function returns the number of samples that the esan tool has collected // This function returns the number of samples that the esan tool has collected
// to this point. This is useful for testing. // to this point. This is useful for testing.
unsigned int COMPILER_RT_WEAK __esan_get_sample_count(); unsigned int COMPILER_RT_WEAK __esan_get_sample_count(void);
#ifdef __cplusplus #ifdef __cplusplus
} // extern "C" } // extern "C"
......
//===-- sanitizer/asan_interface.h ------------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of HWAddressSanitizer.
//
// Public interface header.
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_HWASAN_INTERFACE_H
#define SANITIZER_HWASAN_INTERFACE_H
#include <sanitizer/common_interface_defs.h>
#ifdef __cplusplus
extern "C" {
#endif
// Initialize shadow but not the rest of the runtime.
// Does not call libc unless there is an error.
// Can be called multiple times, or not at all (in which case shadow will
// be initialized in compiler-inserted __hwasan_init() call).
void __hwasan_shadow_init(void);
// This function may be optionally provided by user and should return
// a string containing HWASan runtime options. See asan_flags.h for details.
const char* __hwasan_default_options(void);
void __hwasan_enable_allocator_tagging(void);
void __hwasan_disable_allocator_tagging(void);
// Mark region of memory with the given tag. Both address and size need to be
// 16-byte aligned.
void __hwasan_tag_memory(const volatile void *p, unsigned char tag,
size_t size);
/// Set pointer tag. Previous tag is lost.
void *__hwasan_tag_pointer(const volatile void *p, unsigned char tag);
// Set memory tag from the current SP address to the given address to zero.
// This is meant to annotate longjmp and other non-local jumps.
// This function needs to know the (almost) exact destination frame address;
// clearing shadow for the entire thread stack like __asan_handle_no_return
// does would cause false reports.
void __hwasan_handle_longjmp(const void *sp_dst);
// Libc hook for thread creation. Should be called in the child thread before
// any instrumented code.
void __hwasan_thread_enter();
// Libc hook for thread destruction. No instrumented code should run after
// this call.
void __hwasan_thread_exit();
// Print shadow and origin for the memory range to stderr in a human-readable
// format.
void __hwasan_print_shadow(const volatile void *x, size_t size);
// Print one-line report about the memory usage of the current process.
void __hwasan_print_memory_usage();
int __sanitizer_posix_memalign(void **memptr, size_t alignment, size_t size);
void * __sanitizer_memalign(size_t alignment, size_t size);
void * __sanitizer_aligned_alloc(size_t alignment, size_t size);
void * __sanitizer___libc_memalign(size_t alignment, size_t size);
void * __sanitizer_valloc(size_t size);
void * __sanitizer_pvalloc(size_t size);
void __sanitizer_free(void *ptr);
void __sanitizer_cfree(void *ptr);
size_t __sanitizer_malloc_usable_size(const void *ptr);
struct mallinfo __sanitizer_mallinfo();
int __sanitizer_mallopt(int cmd, int value);
void __sanitizer_malloc_stats(void);
void * __sanitizer_calloc(size_t nmemb, size_t size);
void * __sanitizer_realloc(void *ptr, size_t size);
void * __sanitizer_malloc(size_t size);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // SANITIZER_HWASAN_INTERFACE_H
...@@ -19,8 +19,8 @@ extern "C" { ...@@ -19,8 +19,8 @@ extern "C" {
#endif #endif
// Allocations made between calls to __lsan_disable() and __lsan_enable() will // Allocations made between calls to __lsan_disable() and __lsan_enable() will
// be treated as non-leaks. Disable/enable pairs may be nested. // be treated as non-leaks. Disable/enable pairs may be nested.
void __lsan_disable(); void __lsan_disable(void);
void __lsan_enable(); void __lsan_enable(void);
// The heap object into which p points will be treated as a non-leak. // The heap object into which p points will be treated as a non-leak.
void __lsan_ignore_object(const void *p); void __lsan_ignore_object(const void *p);
...@@ -47,7 +47,7 @@ extern "C" { ...@@ -47,7 +47,7 @@ extern "C" {
// the time of first invocation of this function. // the time of first invocation of this function.
// By calling this function early during process shutdown, you can instruct // By calling this function early during process shutdown, you can instruct
// LSan to ignore shutdown-only leaks which happen later on. // LSan to ignore shutdown-only leaks which happen later on.
void __lsan_do_leak_check(); void __lsan_do_leak_check(void);
// Check for leaks now. Returns zero if no leaks have been found or if leak // Check for leaks now. Returns zero if no leaks have been found or if leak
// detection is disabled, non-zero otherwise. // detection is disabled, non-zero otherwise.
...@@ -56,7 +56,7 @@ extern "C" { ...@@ -56,7 +56,7 @@ extern "C" {
// terminate the process. It does not affect the behavior of // terminate the process. It does not affect the behavior of
// __lsan_do_leak_check() or the end-of-process leak check, and is not // __lsan_do_leak_check() or the end-of-process leak check, and is not
// affected by them. // affected by them.
int __lsan_do_recoverable_leak_check(); int __lsan_do_recoverable_leak_check(void);
// The user may optionally provide this function to disallow leak checking // The user may optionally provide this function to disallow leak checking
// for the program it is linked into (if the return value is non-zero). This // for the program it is linked into (if the return value is non-zero). This
...@@ -64,15 +64,15 @@ extern "C" { ...@@ -64,15 +64,15 @@ extern "C" {
// that is unsupported. // that is unsupported.
// To avoid dead stripping, you may need to define this function with // To avoid dead stripping, you may need to define this function with
// __attribute__((used)) // __attribute__((used))
int __lsan_is_turned_off(); int __lsan_is_turned_off(void);
// This function may be optionally provided by user and should return // This function may be optionally provided by user and should return
// a string containing LSan runtime options. See lsan_flags.inc for details. // a string containing LSan runtime options. See lsan_flags.inc for details.
const char *__lsan_default_options(); const char *__lsan_default_options(void);
// This function may be optionally provided by the user and should return // This function may be optionally provided by the user and should return
// a string containing LSan suppressions. // a string containing LSan suppressions.
const char *__lsan_default_suppressions(); const char *__lsan_default_suppressions(void);
#ifdef __cplusplus #ifdef __cplusplus
} // extern "C" } // extern "C"
......
...@@ -29,10 +29,10 @@ extern "C" { ...@@ -29,10 +29,10 @@ extern "C" {
int __msan_origin_is_descendant_or_same(uint32_t this_id, uint32_t prev_id); int __msan_origin_is_descendant_or_same(uint32_t this_id, uint32_t prev_id);
/* Returns non-zero if tracking origins. */ /* Returns non-zero if tracking origins. */
int __msan_get_track_origins(); int __msan_get_track_origins(void);
/* Returns the origin id of the latest UMR in the calling thread. */ /* Returns the origin id of the latest UMR in the calling thread. */
uint32_t __msan_get_umr_origin(); uint32_t __msan_get_umr_origin(void);
/* Make memory region fully initialized (without changing its contents). */ /* Make memory region fully initialized (without changing its contents). */
void __msan_unpoison(const volatile void *a, size_t size); void __msan_unpoison(const volatile void *a, size_t size);
...@@ -80,7 +80,7 @@ extern "C" { ...@@ -80,7 +80,7 @@ extern "C" {
void __msan_dump_shadow(const volatile void *x, size_t size); void __msan_dump_shadow(const volatile void *x, size_t size);
/* Returns true if running under a dynamic tool (DynamoRio-based). */ /* Returns true if running under a dynamic tool (DynamoRio-based). */
int __msan_has_dynamic_component(); int __msan_has_dynamic_component(void);
/* Tell MSan about newly allocated memory (ex.: custom allocator). /* Tell MSan about newly allocated memory (ex.: custom allocator).
Memory will be marked uninitialized, with origin at the call site. */ Memory will be marked uninitialized, with origin at the call site. */
...@@ -91,7 +91,7 @@ extern "C" { ...@@ -91,7 +91,7 @@ extern "C" {
/* This function may be optionally provided by user and should return /* This function may be optionally provided by user and should return
a string containing Msan runtime options. See msan_flags.h for details. */ a string containing Msan runtime options. See msan_flags.h for details. */
const char* __msan_default_options(); const char* __msan_default_options(void);
/* Deprecated. Call __sanitizer_set_death_callback instead. */ /* Deprecated. Call __sanitizer_set_death_callback instead. */
void __msan_set_death_callback(void (*callback)(void)); void __msan_set_death_callback(void (*callback)(void));
...@@ -102,6 +102,14 @@ extern "C" { ...@@ -102,6 +102,14 @@ extern "C" {
copy. Source and destination regions can overlap. */ copy. Source and destination regions can overlap. */
void __msan_copy_shadow(const volatile void *dst, const volatile void *src, void __msan_copy_shadow(const volatile void *dst, const volatile void *src,
size_t size); size_t size);
/* Disables uninitialized memory checks in interceptors. */
void __msan_scoped_disable_interceptor_checks(void);
/* Re-enables uninitialized memory checks in interceptors after a previous
call to __msan_scoped_disable_interceptor_checks. */
void __msan_scoped_enable_interceptor_checks(void);
#ifdef __cplusplus #ifdef __cplusplus
} // extern "C" } // extern "C"
#endif #endif
......
This source diff could not be displayed because it is too large. You can view the blob instead.
//===-- sanitizer/scudo_interface.h -----------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
/// Public Scudo interface header.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_SCUDO_INTERFACE_H_
#define SANITIZER_SCUDO_INTERFACE_H_
#include <sanitizer/common_interface_defs.h>
#ifdef __cplusplus
extern "C" {
#endif
// This function may be optionally provided by a user and should return
// a string containing Scudo runtime options. See scudo_flags.h for details.
const char* __scudo_default_options(void);
// This function allows to set the RSS limit at runtime. This can be either
// the hard limit (HardLimit=1) or the soft limit (HardLimit=0). The limit
// can be removed by setting LimitMb to 0. This function's parameters should
// be fully trusted to avoid security mishaps.
void __scudo_set_rss_limit(size_t LimitMb, int HardLimit);
// This function outputs various allocator statistics for both the Primary
// and Secondary allocators, including memory usage, number of allocations
// and deallocations.
void __scudo_print_stats(void);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // SANITIZER_SCUDO_INTERFACE_H_
...@@ -42,6 +42,11 @@ const unsigned __tsan_mutex_linker_init = 1 << 0; ...@@ -42,6 +42,11 @@ const unsigned __tsan_mutex_linker_init = 1 << 0;
const unsigned __tsan_mutex_write_reentrant = 1 << 1; const unsigned __tsan_mutex_write_reentrant = 1 << 1;
// Mutex is read reentrant. // Mutex is read reentrant.
const unsigned __tsan_mutex_read_reentrant = 1 << 2; const unsigned __tsan_mutex_read_reentrant = 1 << 2;
// Mutex does not have static storage duration, and must not be used after
// its destructor runs. The opposite of __tsan_mutex_linker_init.
// If this flag is passed to __tsan_mutex_destroy, then the destruction
// is ignored unless this flag was previously set on the mutex.
const unsigned __tsan_mutex_not_static = 1 << 8;
// Mutex operation flags: // Mutex operation flags:
...@@ -68,6 +73,7 @@ void __tsan_mutex_create(void *addr, unsigned flags); ...@@ -68,6 +73,7 @@ void __tsan_mutex_create(void *addr, unsigned flags);
// Annotate destruction of a mutex. // Annotate destruction of a mutex.
// Supported flags: // Supported flags:
// - __tsan_mutex_linker_init // - __tsan_mutex_linker_init
// - __tsan_mutex_not_static
void __tsan_mutex_destroy(void *addr, unsigned flags); void __tsan_mutex_destroy(void *addr, unsigned flags);
// Annotate start of lock operation. // Annotate start of lock operation.
......
...@@ -13,19 +13,21 @@ ...@@ -13,19 +13,21 @@
#ifndef INTERCEPTION_H #ifndef INTERCEPTION_H
#define INTERCEPTION_H #define INTERCEPTION_H
#if !defined(__linux__) && !defined(__FreeBSD__) && !defined(__APPLE__) && \ #include "sanitizer_common/sanitizer_internal_defs.h"
!defined(__NetBSD__) && !defined(_WIN32) && !defined(__Fuchsia__)
#if !SANITIZER_LINUX && !SANITIZER_FREEBSD && !SANITIZER_MAC && \
!SANITIZER_NETBSD && !SANITIZER_OPENBSD && !SANITIZER_WINDOWS && \
!SANITIZER_FUCHSIA && !SANITIZER_RTEMS && !SANITIZER_SOLARIS
# error "Interception doesn't work on this operating system." # error "Interception doesn't work on this operating system."
#endif #endif
#include "sanitizer_common/sanitizer_internal_defs.h"
// These typedefs should be used only in the interceptor definitions to replace // These typedefs should be used only in the interceptor definitions to replace
// the standard system types (e.g. SSIZE_T instead of ssize_t) // the standard system types (e.g. SSIZE_T instead of ssize_t)
typedef __sanitizer::uptr SIZE_T; typedef __sanitizer::uptr SIZE_T;
typedef __sanitizer::sptr SSIZE_T; typedef __sanitizer::sptr SSIZE_T;
typedef __sanitizer::sptr PTRDIFF_T; typedef __sanitizer::sptr PTRDIFF_T;
typedef __sanitizer::s64 INTMAX_T; typedef __sanitizer::s64 INTMAX_T;
typedef __sanitizer::u64 UINTMAX_T;
typedef __sanitizer::OFF_T OFF_T; typedef __sanitizer::OFF_T OFF_T;
typedef __sanitizer::OFF64_T OFF64_T; typedef __sanitizer::OFF64_T OFF64_T;
...@@ -85,7 +87,7 @@ typedef __sanitizer::OFF64_T OFF64_T; ...@@ -85,7 +87,7 @@ typedef __sanitizer::OFF64_T OFF64_T;
// As it's decided at compile time which functions are to be intercepted on Mac, // As it's decided at compile time which functions are to be intercepted on Mac,
// INTERCEPT_FUNCTION() is effectively a no-op on this system. // INTERCEPT_FUNCTION() is effectively a no-op on this system.
#if defined(__APPLE__) #if SANITIZER_MAC
#include <sys/cdefs.h> // For __DARWIN_ALIAS_C(). #include <sys/cdefs.h> // For __DARWIN_ALIAS_C().
// Just a pair of pointers. // Just a pair of pointers.
...@@ -119,7 +121,7 @@ const interpose_substitution substitution_##func_name[] \ ...@@ -119,7 +121,7 @@ const interpose_substitution substitution_##func_name[] \
# define INTERCEPTOR_ATTRIBUTE # define INTERCEPTOR_ATTRIBUTE
# define DECLARE_WRAPPER(ret_type, func, ...) # define DECLARE_WRAPPER(ret_type, func, ...)
#elif defined(_WIN32) #elif SANITIZER_WINDOWS
# define WRAP(x) __asan_wrap_##x # define WRAP(x) __asan_wrap_##x
# define WRAPPER_NAME(x) "__asan_wrap_"#x # define WRAPPER_NAME(x) "__asan_wrap_"#x
# define INTERCEPTOR_ATTRIBUTE __declspec(dllexport) # define INTERCEPTOR_ATTRIBUTE __declspec(dllexport)
...@@ -127,7 +129,12 @@ const interpose_substitution substitution_##func_name[] \ ...@@ -127,7 +129,12 @@ const interpose_substitution substitution_##func_name[] \
extern "C" ret_type func(__VA_ARGS__); extern "C" ret_type func(__VA_ARGS__);
# define DECLARE_WRAPPER_WINAPI(ret_type, func, ...) \ # define DECLARE_WRAPPER_WINAPI(ret_type, func, ...) \
extern "C" __declspec(dllimport) ret_type __stdcall func(__VA_ARGS__); extern "C" __declspec(dllimport) ret_type __stdcall func(__VA_ARGS__);
#elif defined(__FreeBSD__) || defined(__NetBSD__) #elif SANITIZER_RTEMS
# define WRAP(x) x
# define WRAPPER_NAME(x) #x
# define INTERCEPTOR_ATTRIBUTE
# define DECLARE_WRAPPER(ret_type, func, ...)
#elif SANITIZER_FREEBSD || SANITIZER_NETBSD
# define WRAP(x) __interceptor_ ## x # define WRAP(x) __interceptor_ ## x
# define WRAPPER_NAME(x) "__interceptor_" #x # define WRAPPER_NAME(x) "__interceptor_" #x
# define INTERCEPTOR_ATTRIBUTE __attribute__((visibility("default"))) # define INTERCEPTOR_ATTRIBUTE __attribute__((visibility("default")))
...@@ -137,7 +144,7 @@ const interpose_substitution substitution_##func_name[] \ ...@@ -137,7 +144,7 @@ const interpose_substitution substitution_##func_name[] \
# define DECLARE_WRAPPER(ret_type, func, ...) \ # define DECLARE_WRAPPER(ret_type, func, ...) \
extern "C" ret_type func(__VA_ARGS__) \ extern "C" ret_type func(__VA_ARGS__) \
__attribute__((alias("__interceptor_" #func), visibility("default"))); __attribute__((alias("__interceptor_" #func), visibility("default")));
#elif !defined(__Fuchsia__) #elif !SANITIZER_FUCHSIA
# define WRAP(x) __interceptor_ ## x # define WRAP(x) __interceptor_ ## x
# define WRAPPER_NAME(x) "__interceptor_" #x # define WRAPPER_NAME(x) "__interceptor_" #x
# define INTERCEPTOR_ATTRIBUTE __attribute__((visibility("default"))) # define INTERCEPTOR_ATTRIBUTE __attribute__((visibility("default")))
...@@ -146,7 +153,7 @@ const interpose_substitution substitution_##func_name[] \ ...@@ -146,7 +153,7 @@ const interpose_substitution substitution_##func_name[] \
__attribute__((weak, alias("__interceptor_" #func), visibility("default"))); __attribute__((weak, alias("__interceptor_" #func), visibility("default")));
#endif #endif
#if defined(__Fuchsia__) #if SANITIZER_FUCHSIA
// There is no general interception at all on Fuchsia. // There is no general interception at all on Fuchsia.
// Sanitizer runtimes just define functions directly to preempt them, // Sanitizer runtimes just define functions directly to preempt them,
// and have bespoke ways to access the underlying libc functions. // and have bespoke ways to access the underlying libc functions.
...@@ -154,10 +161,14 @@ const interpose_substitution substitution_##func_name[] \ ...@@ -154,10 +161,14 @@ const interpose_substitution substitution_##func_name[] \
# define INTERCEPTOR_ATTRIBUTE __attribute__((visibility("default"))) # define INTERCEPTOR_ATTRIBUTE __attribute__((visibility("default")))
# define REAL(x) __unsanitized_##x # define REAL(x) __unsanitized_##x
# define DECLARE_REAL(ret_type, func, ...) # define DECLARE_REAL(ret_type, func, ...)
#elif !defined(__APPLE__) #elif SANITIZER_RTEMS
# define REAL(x) __real_ ## x
# define DECLARE_REAL(ret_type, func, ...) \
extern "C" ret_type REAL(func)(__VA_ARGS__);
#elif !SANITIZER_MAC
# define PTR_TO_REAL(x) real_##x # define PTR_TO_REAL(x) real_##x
# define REAL(x) __interception::PTR_TO_REAL(x) # define REAL(x) __interception::PTR_TO_REAL(x)
# define FUNC_TYPE(x) x##_f # define FUNC_TYPE(x) x##_type
# define DECLARE_REAL(ret_type, func, ...) \ # define DECLARE_REAL(ret_type, func, ...) \
typedef ret_type (*FUNC_TYPE(func))(__VA_ARGS__); \ typedef ret_type (*FUNC_TYPE(func))(__VA_ARGS__); \
...@@ -165,14 +176,14 @@ const interpose_substitution substitution_##func_name[] \ ...@@ -165,14 +176,14 @@ const interpose_substitution substitution_##func_name[] \
extern FUNC_TYPE(func) PTR_TO_REAL(func); \ extern FUNC_TYPE(func) PTR_TO_REAL(func); \
} }
# define ASSIGN_REAL(dst, src) REAL(dst) = REAL(src) # define ASSIGN_REAL(dst, src) REAL(dst) = REAL(src)
#else // __APPLE__ #else // SANITIZER_MAC
# define REAL(x) x # define REAL(x) x
# define DECLARE_REAL(ret_type, func, ...) \ # define DECLARE_REAL(ret_type, func, ...) \
extern "C" ret_type func(__VA_ARGS__); extern "C" ret_type func(__VA_ARGS__);
# define ASSIGN_REAL(x, y) # define ASSIGN_REAL(x, y)
#endif // __APPLE__ #endif // SANITIZER_MAC
#if !defined(__Fuchsia__) #if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
#define DECLARE_REAL_AND_INTERCEPTOR(ret_type, func, ...) \ #define DECLARE_REAL_AND_INTERCEPTOR(ret_type, func, ...) \
DECLARE_REAL(ret_type, func, __VA_ARGS__) \ DECLARE_REAL(ret_type, func, __VA_ARGS__) \
extern "C" ret_type WRAP(func)(__VA_ARGS__); extern "C" ret_type WRAP(func)(__VA_ARGS__);
...@@ -184,7 +195,7 @@ const interpose_substitution substitution_##func_name[] \ ...@@ -184,7 +195,7 @@ const interpose_substitution substitution_##func_name[] \
// macros does its job. In exceptional cases you may need to call REAL(foo) // macros does its job. In exceptional cases you may need to call REAL(foo)
// without defining INTERCEPTOR(..., foo, ...). For example, if you override // without defining INTERCEPTOR(..., foo, ...). For example, if you override
// foo with an interceptor for other function. // foo with an interceptor for other function.
#if !defined(__APPLE__) && !defined(__Fuchsia__) #if !SANITIZER_MAC && !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
# define DEFINE_REAL(ret_type, func, ...) \ # define DEFINE_REAL(ret_type, func, ...) \
typedef ret_type (*FUNC_TYPE(func))(__VA_ARGS__); \ typedef ret_type (*FUNC_TYPE(func))(__VA_ARGS__); \
namespace __interception { \ namespace __interception { \
...@@ -194,7 +205,7 @@ const interpose_substitution substitution_##func_name[] \ ...@@ -194,7 +205,7 @@ const interpose_substitution substitution_##func_name[] \
# define DEFINE_REAL(ret_type, func, ...) # define DEFINE_REAL(ret_type, func, ...)
#endif #endif
#if defined(__Fuchsia__) #if SANITIZER_FUCHSIA
// We need to define the __interceptor_func name just to get // We need to define the __interceptor_func name just to get
// sanitizer_common/scripts/gen_dynamic_list.py to export func. // sanitizer_common/scripts/gen_dynamic_list.py to export func.
...@@ -204,7 +215,7 @@ const interpose_substitution substitution_##func_name[] \ ...@@ -204,7 +215,7 @@ const interpose_substitution substitution_##func_name[] \
__interceptor_##func(__VA_ARGS__); \ __interceptor_##func(__VA_ARGS__); \
extern "C" INTERCEPTOR_ATTRIBUTE ret_type func(__VA_ARGS__) extern "C" INTERCEPTOR_ATTRIBUTE ret_type func(__VA_ARGS__)
#elif !defined(__APPLE__) #elif !SANITIZER_MAC
#define INTERCEPTOR(ret_type, func, ...) \ #define INTERCEPTOR(ret_type, func, ...) \
DEFINE_REAL(ret_type, func, __VA_ARGS__) \ DEFINE_REAL(ret_type, func, __VA_ARGS__) \
...@@ -217,7 +228,7 @@ const interpose_substitution substitution_##func_name[] \ ...@@ -217,7 +228,7 @@ const interpose_substitution substitution_##func_name[] \
#define INTERCEPTOR_WITH_SUFFIX(ret_type, func, ...) \ #define INTERCEPTOR_WITH_SUFFIX(ret_type, func, ...) \
INTERCEPTOR(ret_type, func, __VA_ARGS__) INTERCEPTOR(ret_type, func, __VA_ARGS__)
#else // __APPLE__ #else // SANITIZER_MAC
#define INTERCEPTOR_ZZZ(suffix, ret_type, func, ...) \ #define INTERCEPTOR_ZZZ(suffix, ret_type, func, ...) \
extern "C" ret_type func(__VA_ARGS__) suffix; \ extern "C" ret_type func(__VA_ARGS__) suffix; \
...@@ -236,7 +247,7 @@ const interpose_substitution substitution_##func_name[] \ ...@@ -236,7 +247,7 @@ const interpose_substitution substitution_##func_name[] \
INTERPOSER_2(overridee, WRAP(overrider)) INTERPOSER_2(overridee, WRAP(overrider))
#endif #endif
#if defined(_WIN32) #if SANITIZER_WINDOWS
# define INTERCEPTOR_WINAPI(ret_type, func, ...) \ # define INTERCEPTOR_WINAPI(ret_type, func, ...) \
typedef ret_type (__stdcall *FUNC_TYPE(func))(__VA_ARGS__); \ typedef ret_type (__stdcall *FUNC_TYPE(func))(__VA_ARGS__); \
namespace __interception { \ namespace __interception { \
...@@ -262,17 +273,19 @@ typedef unsigned long uptr; // NOLINT ...@@ -262,17 +273,19 @@ typedef unsigned long uptr; // NOLINT
#define INCLUDED_FROM_INTERCEPTION_LIB #define INCLUDED_FROM_INTERCEPTION_LIB
#if defined(__linux__) || defined(__FreeBSD__) || defined(__NetBSD__) #if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || \
SANITIZER_OPENBSD || SANITIZER_SOLARIS
# include "interception_linux.h" # include "interception_linux.h"
# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func) # define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func)
# define INTERCEPT_FUNCTION_VER(func, symver) \ # define INTERCEPT_FUNCTION_VER(func, symver) \
INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver) INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver)
#elif defined(__APPLE__) #elif SANITIZER_MAC
# include "interception_mac.h" # include "interception_mac.h"
# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_MAC(func) # define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_MAC(func)
# define INTERCEPT_FUNCTION_VER(func, symver) \ # define INTERCEPT_FUNCTION_VER(func, symver) \
INTERCEPT_FUNCTION_VER_MAC(func, symver) INTERCEPT_FUNCTION_VER_MAC(func, symver)
#elif defined(_WIN32) #elif SANITIZER_WINDOWS
# include "interception_win.h" # include "interception_win.h"
# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_WIN(func) # define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_WIN(func)
# define INTERCEPT_FUNCTION_VER(func, symver) \ # define INTERCEPT_FUNCTION_VER(func, symver) \
......
...@@ -10,32 +10,44 @@ ...@@ -10,32 +10,44 @@
// Linux-specific interception methods. // Linux-specific interception methods.
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#if defined(__linux__) || defined(__FreeBSD__) || defined(__NetBSD__)
#include "interception.h" #include "interception.h"
#if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || \
SANITIZER_OPENBSD || SANITIZER_SOLARIS
#include <dlfcn.h> // for dlsym() and dlvsym() #include <dlfcn.h> // for dlsym() and dlvsym()
#ifdef __NetBSD__ #if SANITIZER_NETBSD
#include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_libc.h"
#endif #endif
namespace __interception { namespace __interception {
bool GetRealFunctionAddress(const char *func_name, uptr *func_addr, bool GetRealFunctionAddress(const char *func_name, uptr *func_addr,
uptr real, uptr wrapper) { uptr real, uptr wrapper) {
#ifdef __NetBSD__ #if SANITIZER_NETBSD
// XXX: Find a better way to handle renames // XXX: Find a better way to handle renames
if (internal_strcmp(func_name, "sigaction") == 0) func_name = "__sigaction14"; if (internal_strcmp(func_name, "sigaction") == 0) func_name = "__sigaction14";
#endif #endif
*func_addr = (uptr)dlsym(RTLD_NEXT, func_name); *func_addr = (uptr)dlsym(RTLD_NEXT, func_name);
if (!*func_addr) {
// If the lookup using RTLD_NEXT failed, the sanitizer runtime library is
// later in the library search order than the DSO that we are trying to
// intercept, which means that we cannot intercept this function. We still
// want the address of the real definition, though, so look it up using
// RTLD_DEFAULT.
*func_addr = (uptr)dlsym(RTLD_DEFAULT, func_name);
}
return real == wrapper; return real == wrapper;
} }
#if !defined(__ANDROID__) // android does not have dlvsym // Android and Solaris do not have dlvsym
#if !SANITIZER_ANDROID && !SANITIZER_SOLARIS && !SANITIZER_OPENBSD
void *GetFuncAddrVer(const char *func_name, const char *ver) { void *GetFuncAddrVer(const char *func_name, const char *ver) {
return dlvsym(RTLD_NEXT, func_name, ver); return dlvsym(RTLD_NEXT, func_name, ver);
} }
#endif // !defined(__ANDROID__) #endif // !SANITIZER_ANDROID
} // namespace __interception } // namespace __interception
#endif // __linux__ || __FreeBSD__ || __NetBSD__ #endif // SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD ||
// SANITIZER_OPENBSD || SANITIZER_SOLARIS
...@@ -10,7 +10,8 @@ ...@@ -10,7 +10,8 @@
// Linux-specific interception methods. // Linux-specific interception methods.
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#if defined(__linux__) || defined(__FreeBSD__) || defined(__NetBSD__) #if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || \
SANITIZER_OPENBSD || SANITIZER_SOLARIS
#if !defined(INCLUDED_FROM_INTERCEPTION_LIB) #if !defined(INCLUDED_FROM_INTERCEPTION_LIB)
# error "interception_linux.h should be included from interception library only" # error "interception_linux.h should be included from interception library only"
...@@ -32,14 +33,16 @@ void *GetFuncAddrVer(const char *func_name, const char *ver); ...@@ -32,14 +33,16 @@ void *GetFuncAddrVer(const char *func_name, const char *ver);
(::__interception::uptr) & (func), \ (::__interception::uptr) & (func), \
(::__interception::uptr) & WRAP(func)) (::__interception::uptr) & WRAP(func))
#if !defined(__ANDROID__) // android does not have dlvsym // Android, Solaris and OpenBSD do not have dlvsym
#if !SANITIZER_ANDROID && !SANITIZER_SOLARIS && !SANITIZER_OPENBSD
#define INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver) \ #define INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver) \
(::__interception::real_##func = (func##_f)( \ (::__interception::real_##func = (func##_type)( \
unsigned long)::__interception::GetFuncAddrVer(#func, symver)) unsigned long)::__interception::GetFuncAddrVer(#func, symver))
#else #else
#define INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver) \ #define INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver) \
INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func) INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func)
#endif // !defined(__ANDROID__) #endif // !SANITIZER_ANDROID && !SANITIZER_SOLARIS
#endif // INTERCEPTION_LINUX_H #endif // INTERCEPTION_LINUX_H
#endif // __linux__ || __FreeBSD__ || __NetBSD__ #endif // SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD ||
// SANITIZER_OPENBSD || SANITIZER_SOLARIS
...@@ -10,9 +10,8 @@ ...@@ -10,9 +10,8 @@
// Mac-specific interception methods. // Mac-specific interception methods.
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#ifdef __APPLE__
#include "interception.h" #include "interception.h"
#if SANITIZER_MAC
#endif // __APPLE__ #endif // SANITIZER_MAC
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
// Mac-specific interception methods. // Mac-specific interception methods.
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#ifdef __APPLE__ #if SANITIZER_MAC
#if !defined(INCLUDED_FROM_INTERCEPTION_LIB) #if !defined(INCLUDED_FROM_INTERCEPTION_LIB)
# error "interception_mac.h should be included from interception.h only" # error "interception_mac.h should be included from interception.h only"
...@@ -23,4 +23,4 @@ ...@@ -23,4 +23,4 @@
#define INTERCEPT_FUNCTION_VER_MAC(func, symver) #define INTERCEPT_FUNCTION_VER_MAC(func, symver)
#endif // INTERCEPTION_MAC_H #endif // INTERCEPTION_MAC_H
#endif // __APPLE__ #endif // SANITIZER_MAC
...@@ -10,9 +10,10 @@ ...@@ -10,9 +10,10 @@
// Compile-time tests of the internal type definitions. // Compile-time tests of the internal type definitions.
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#if defined(__linux__) || defined(__APPLE__)
#include "interception.h" #include "interception.h"
#if SANITIZER_LINUX || SANITIZER_MAC
#include <sys/types.h> #include <sys/types.h>
#include <stddef.h> #include <stddef.h>
#include <stdint.h> #include <stdint.h>
...@@ -22,14 +23,14 @@ COMPILER_CHECK(sizeof(::SSIZE_T) == sizeof(ssize_t)); ...@@ -22,14 +23,14 @@ COMPILER_CHECK(sizeof(::SSIZE_T) == sizeof(ssize_t));
COMPILER_CHECK(sizeof(::PTRDIFF_T) == sizeof(ptrdiff_t)); COMPILER_CHECK(sizeof(::PTRDIFF_T) == sizeof(ptrdiff_t));
COMPILER_CHECK(sizeof(::INTMAX_T) == sizeof(intmax_t)); COMPILER_CHECK(sizeof(::INTMAX_T) == sizeof(intmax_t));
#ifndef __APPLE__ #if !SANITIZER_MAC
COMPILER_CHECK(sizeof(::OFF64_T) == sizeof(off64_t)); COMPILER_CHECK(sizeof(::OFF64_T) == sizeof(off64_t));
#endif #endif
// The following are the cases when pread (and friends) is used instead of // The following are the cases when pread (and friends) is used instead of
// pread64. In those cases we need OFF_T to match off_t. We don't care about the // pread64. In those cases we need OFF_T to match off_t. We don't care about the
// rest (they depend on _FILE_OFFSET_BITS setting when building an application). // rest (they depend on _FILE_OFFSET_BITS setting when building an application).
# if defined(__ANDROID__) || !defined _FILE_OFFSET_BITS || \ # if SANITIZER_ANDROID || !defined _FILE_OFFSET_BITS || \
_FILE_OFFSET_BITS != 64 _FILE_OFFSET_BITS != 64
COMPILER_CHECK(sizeof(::OFF_T) == sizeof(off_t)); COMPILER_CHECK(sizeof(::OFF_T) == sizeof(off_t));
# endif # endif
......
...@@ -123,9 +123,9 @@ ...@@ -123,9 +123,9 @@
// addr2: .bytes <body> // addr2: .bytes <body>
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#ifdef _WIN32
#include "interception.h" #include "interception.h"
#if SANITIZER_WINDOWS
#include "sanitizer_common/sanitizer_platform.h" #include "sanitizer_common/sanitizer_platform.h"
#define WIN32_LEAN_AND_MEAN #define WIN32_LEAN_AND_MEAN
#include <windows.h> #include <windows.h>
...@@ -221,8 +221,8 @@ static bool IsMemoryPadding(uptr address, uptr size) { ...@@ -221,8 +221,8 @@ static bool IsMemoryPadding(uptr address, uptr size) {
return true; return true;
} }
static const u8 kHintNop9Bytes[] = { static const u8 kHintNop8Bytes[] = {
0x66, 0x0F, 0x1F, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00 0x0F, 0x1F, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00
}; };
template<class T> template<class T>
...@@ -237,8 +237,8 @@ static bool FunctionHasPrefix(uptr address, const T &pattern) { ...@@ -237,8 +237,8 @@ static bool FunctionHasPrefix(uptr address, const T &pattern) {
static bool FunctionHasPadding(uptr address, uptr size) { static bool FunctionHasPadding(uptr address, uptr size) {
if (IsMemoryPadding(address - size, size)) if (IsMemoryPadding(address - size, size))
return true; return true;
if (size <= sizeof(kHintNop9Bytes) && if (size <= sizeof(kHintNop8Bytes) &&
FunctionHasPrefix(address, kHintNop9Bytes)) FunctionHasPrefix(address, kHintNop8Bytes))
return true; return true;
return false; return false;
} }
...@@ -451,6 +451,7 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) { ...@@ -451,6 +451,7 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
} }
switch (*(u16*)(address)) { switch (*(u16*)(address)) {
case 0x018A: // 8A 01 : mov al, byte ptr [ecx]
case 0xFF8B: // 8B FF : mov edi, edi case 0xFF8B: // 8B FF : mov edi, edi
case 0xEC8B: // 8B EC : mov ebp, esp case 0xEC8B: // 8B EC : mov ebp, esp
case 0xc889: // 89 C8 : mov eax, ecx case 0xc889: // 89 C8 : mov eax, ecx
...@@ -551,7 +552,10 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) { ...@@ -551,7 +552,10 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
case 0x246c8948: // 48 89 6C 24 XX : mov QWORD ptr [rsp + XX], rbp case 0x246c8948: // 48 89 6C 24 XX : mov QWORD ptr [rsp + XX], rbp
case 0x245c8948: // 48 89 5c 24 XX : mov QWORD PTR [rsp + XX], rbx case 0x245c8948: // 48 89 5c 24 XX : mov QWORD PTR [rsp + XX], rbx
case 0x24748948: // 48 89 74 24 XX : mov QWORD PTR [rsp + XX], rsi case 0x24748948: // 48 89 74 24 XX : mov QWORD PTR [rsp + XX], rsi
case 0x244C8948: // 48 89 4C 24 XX : mov QWORD PTR [rsp + XX], rcx
return 5; return 5;
case 0x24648348: // 48 83 64 24 XX : and QWORD PTR [rsp + XX], YY
return 6;
} }
#else #else
...@@ -830,6 +834,7 @@ bool OverrideFunction( ...@@ -830,6 +834,7 @@ bool OverrideFunction(
static void **InterestingDLLsAvailable() { static void **InterestingDLLsAvailable() {
static const char *InterestingDLLs[] = { static const char *InterestingDLLs[] = {
"kernel32.dll", "kernel32.dll",
"msvcr100.dll", // VS2010
"msvcr110.dll", // VS2012 "msvcr110.dll", // VS2012
"msvcr120.dll", // VS2013 "msvcr120.dll", // VS2013
"vcruntime140.dll", // VS2015 "vcruntime140.dll", // VS2015
...@@ -1007,4 +1012,4 @@ bool OverrideImportedFunction(const char *module_to_patch, ...@@ -1007,4 +1012,4 @@ bool OverrideImportedFunction(const char *module_to_patch,
} // namespace __interception } // namespace __interception
#endif // _WIN32 #endif // SANITIZER_MAC
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
// Windows-specific interception methods. // Windows-specific interception methods.
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#ifdef _WIN32 #if SANITIZER_WINDOWS
#if !defined(INCLUDED_FROM_INTERCEPTION_LIB) #if !defined(INCLUDED_FROM_INTERCEPTION_LIB)
# error "interception_win.h should be included from interception library only" # error "interception_win.h should be included from interception library only"
...@@ -79,4 +79,4 @@ void TestOnlyReleaseTrampolineRegions(); ...@@ -79,4 +79,4 @@ void TestOnlyReleaseTrampolineRegions();
(::__interception::uptr *)&REAL(func)) (::__interception::uptr *)&REAL(func))
#endif // INTERCEPTION_WIN_H #endif // INTERCEPTION_WIN_H
#endif // _WIN32 #endif // SANITIZER_WINDOWS
...@@ -64,16 +64,17 @@ static void InitializeFlags() { ...@@ -64,16 +64,17 @@ static void InitializeFlags() {
if (Verbosity()) ReportUnrecognizedFlags(); if (Verbosity()) ReportUnrecognizedFlags();
if (common_flags()->help) parser.PrintFlagDescriptions(); if (common_flags()->help) parser.PrintFlagDescriptions();
__sanitizer_set_report_path(common_flags()->log_path);
} }
static void OnStackUnwind(const SignalContext &sig, const void *, static void OnStackUnwind(const SignalContext &sig, const void *,
BufferedStackTrace *stack) { BufferedStackTrace *stack) {
GetStackTraceWithPcBpAndContext(stack, kStackTraceMax, sig.pc, sig.bp, GetStackTrace(stack, kStackTraceMax, sig.pc, sig.bp, sig.context,
sig.context, common_flags()->fast_unwind_on_fatal);
common_flags()->fast_unwind_on_fatal);
} }
void LsanOnDeadlySignal(int signo, void *siginfo, void *context) { static void LsanOnDeadlySignal(int signo, void *siginfo, void *context) {
HandleDeadlySignal(siginfo, context, GetCurrentThread(), &OnStackUnwind, HandleDeadlySignal(siginfo, context, GetCurrentThread(), &OnStackUnwind,
nullptr); nullptr);
} }
......
...@@ -16,9 +16,8 @@ ...@@ -16,9 +16,8 @@
#define GET_STACK_TRACE(max_size, fast) \ #define GET_STACK_TRACE(max_size, fast) \
__sanitizer::BufferedStackTrace stack; \ __sanitizer::BufferedStackTrace stack; \
GetStackTraceWithPcBpAndContext(&stack, max_size, \ GetStackTrace(&stack, max_size, StackTrace::GetCurrentPc(), \
StackTrace::GetCurrentPc(), \ GET_CURRENT_FRAME(), nullptr, fast);
GET_CURRENT_FRAME(), nullptr, fast);
#define GET_STACK_TRACE_FATAL \ #define GET_STACK_TRACE_FATAL \
GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_fatal) GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_fatal)
...@@ -44,10 +43,9 @@ void ReplaceSystemMalloc(); ...@@ -44,10 +43,9 @@ void ReplaceSystemMalloc();
// The pc will be in the position 0 of the resulting stack trace. // The pc will be in the position 0 of the resulting stack trace.
// The bp may refer to the current frame or to the caller's frame. // The bp may refer to the current frame or to the caller's frame.
ALWAYS_INLINE ALWAYS_INLINE
void GetStackTraceWithPcBpAndContext(__sanitizer::BufferedStackTrace *stack, void GetStackTrace(__sanitizer::BufferedStackTrace *stack,
__sanitizer::uptr max_depth, __sanitizer::uptr max_depth, __sanitizer::uptr pc,
__sanitizer::uptr pc, __sanitizer::uptr bp, __sanitizer::uptr bp, void *context, bool fast) {
void *context, bool fast) {
uptr stack_top = 0, stack_bottom = 0; uptr stack_top = 0, stack_bottom = 0;
ThreadContext *t; ThreadContext *t;
if (fast && (t = CurrentThreadContext())) { if (fast && (t = CurrentThreadContext())) {
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include "sanitizer_common/sanitizer_allocator.h" #include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_allocator_checks.h" #include "sanitizer_common/sanitizer_allocator_checks.h"
#include "sanitizer_common/sanitizer_allocator_interface.h" #include "sanitizer_common/sanitizer_allocator_interface.h"
#include "sanitizer_common/sanitizer_allocator_report.h"
#include "sanitizer_common/sanitizer_errno.h" #include "sanitizer_common/sanitizer_errno.h"
#include "sanitizer_common/sanitizer_internal_defs.h" #include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_stackdepot.h" #include "sanitizer_common/sanitizer_stackdepot.h"
...@@ -68,15 +69,27 @@ static void RegisterDeallocation(void *p) { ...@@ -68,15 +69,27 @@ static void RegisterDeallocation(void *p) {
atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 0, memory_order_relaxed); atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 0, memory_order_relaxed);
} }
static void *ReportAllocationSizeTooBig(uptr size, const StackTrace &stack) {
if (AllocatorMayReturnNull()) {
Report("WARNING: LeakSanitizer failed to allocate 0x%zx bytes\n", size);
return nullptr;
}
ReportAllocationSizeTooBig(size, kMaxAllowedMallocSize, &stack);
}
void *Allocate(const StackTrace &stack, uptr size, uptr alignment, void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
bool cleared) { bool cleared) {
if (size == 0) if (size == 0)
size = 1; size = 1;
if (size > kMaxAllowedMallocSize) { if (size > kMaxAllowedMallocSize)
Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", size); return ReportAllocationSizeTooBig(size, stack);
return Allocator::FailureHandler::OnBadRequest();
}
void *p = allocator.Allocate(GetAllocatorCache(), size, alignment); void *p = allocator.Allocate(GetAllocatorCache(), size, alignment);
if (UNLIKELY(!p)) {
SetAllocatorOutOfMemory();
if (AllocatorMayReturnNull())
return nullptr;
ReportOutOfMemory(size, &stack);
}
// Do not rely on the allocator to clear the memory (it's slow). // Do not rely on the allocator to clear the memory (it's slow).
if (cleared && allocator.FromPrimary(p)) if (cleared && allocator.FromPrimary(p))
memset(p, 0, size); memset(p, 0, size);
...@@ -87,8 +100,11 @@ void *Allocate(const StackTrace &stack, uptr size, uptr alignment, ...@@ -87,8 +100,11 @@ void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
} }
static void *Calloc(uptr nmemb, uptr size, const StackTrace &stack) { static void *Calloc(uptr nmemb, uptr size, const StackTrace &stack) {
if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
return Allocator::FailureHandler::OnBadRequest(); if (AllocatorMayReturnNull())
return nullptr;
ReportCallocOverflow(nmemb, size, &stack);
}
size *= nmemb; size *= nmemb;
return Allocate(stack, size, 1, true); return Allocate(stack, size, 1, true);
} }
...@@ -104,9 +120,8 @@ void *Reallocate(const StackTrace &stack, void *p, uptr new_size, ...@@ -104,9 +120,8 @@ void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
uptr alignment) { uptr alignment) {
RegisterDeallocation(p); RegisterDeallocation(p);
if (new_size > kMaxAllowedMallocSize) { if (new_size > kMaxAllowedMallocSize) {
Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", new_size);
allocator.Deallocate(GetAllocatorCache(), p); allocator.Deallocate(GetAllocatorCache(), p);
return Allocator::FailureHandler::OnBadRequest(); return ReportAllocationSizeTooBig(new_size, stack);
} }
p = allocator.Reallocate(GetAllocatorCache(), p, new_size, alignment); p = allocator.Reallocate(GetAllocatorCache(), p, new_size, alignment);
RegisterAllocation(stack, p, new_size); RegisterAllocation(stack, p, new_size);
...@@ -124,10 +139,38 @@ uptr GetMallocUsableSize(const void *p) { ...@@ -124,10 +139,38 @@ uptr GetMallocUsableSize(const void *p) {
return m->requested_size; return m->requested_size;
} }
int lsan_posix_memalign(void **memptr, uptr alignment, uptr size,
const StackTrace &stack) {
if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
if (AllocatorMayReturnNull())
return errno_EINVAL;
ReportInvalidPosixMemalignAlignment(alignment, &stack);
}
void *ptr = Allocate(stack, size, alignment, kAlwaysClearMemory);
if (UNLIKELY(!ptr))
// OOM error is already taken care of by Allocate.
return errno_ENOMEM;
CHECK(IsAligned((uptr)ptr, alignment));
*memptr = ptr;
return 0;
}
void *lsan_aligned_alloc(uptr alignment, uptr size, const StackTrace &stack) {
if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
errno = errno_EINVAL;
if (AllocatorMayReturnNull())
return nullptr;
ReportInvalidAlignedAllocAlignment(size, alignment, &stack);
}
return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory));
}
void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack) { void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack) {
if (UNLIKELY(!IsPowerOfTwo(alignment))) { if (UNLIKELY(!IsPowerOfTwo(alignment))) {
errno = errno_EINVAL; errno = errno_EINVAL;
return Allocator::FailureHandler::OnBadRequest(); if (AllocatorMayReturnNull())
return nullptr;
ReportInvalidAllocationAlignment(alignment, &stack);
} }
return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory)); return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory));
} }
...@@ -153,6 +196,19 @@ void *lsan_valloc(uptr size, const StackTrace &stack) { ...@@ -153,6 +196,19 @@ void *lsan_valloc(uptr size, const StackTrace &stack) {
Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory)); Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory));
} }
void *lsan_pvalloc(uptr size, const StackTrace &stack) {
uptr PageSize = GetPageSizeCached();
if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
errno = errno_ENOMEM;
if (AllocatorMayReturnNull())
return nullptr;
ReportPvallocOverflow(size, &stack);
}
// pvalloc(0) should allocate one page.
size = size ? RoundUpTo(size, PageSize) : PageSize;
return SetErrnoOnNull(Allocate(stack, size, PageSize, kAlwaysClearMemory));
}
uptr lsan_mz_size(const void *p) { uptr lsan_mz_size(const void *p) {
return GetMallocUsableSize(p); return GetMallocUsableSize(p);
} }
......
...@@ -66,9 +66,16 @@ struct AP32 { ...@@ -66,9 +66,16 @@ struct AP32 {
}; };
typedef SizeClassAllocator32<AP32> PrimaryAllocator; typedef SizeClassAllocator32<AP32> PrimaryAllocator;
#elif defined(__x86_64__) || defined(__powerpc64__) #elif defined(__x86_64__) || defined(__powerpc64__)
# if defined(__powerpc64__)
const uptr kAllocatorSpace = 0xa0000000000ULL;
const uptr kAllocatorSize = 0x20000000000ULL; // 2T.
# else
const uptr kAllocatorSpace = 0x600000000000ULL;
const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
# endif
struct AP64 { // Allocator64 parameters. Deliberately using a short name. struct AP64 { // Allocator64 parameters. Deliberately using a short name.
static const uptr kSpaceBeg = 0x600000000000ULL; static const uptr kSpaceBeg = kAllocatorSpace;
static const uptr kSpaceSize = 0x40000000000ULL; // 4T. static const uptr kSpaceSize = kAllocatorSize;
static const uptr kMetadataSize = sizeof(ChunkMetadata); static const uptr kMetadataSize = sizeof(ChunkMetadata);
typedef DefaultSizeClassMap SizeClassMap; typedef DefaultSizeClassMap SizeClassMap;
typedef NoOpMapUnmapCallback MapUnmapCallback; typedef NoOpMapUnmapCallback MapUnmapCallback;
...@@ -81,12 +88,16 @@ typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache; ...@@ -81,12 +88,16 @@ typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
AllocatorCache *GetAllocatorCache(); AllocatorCache *GetAllocatorCache();
int lsan_posix_memalign(void **memptr, uptr alignment, uptr size,
const StackTrace &stack);
void *lsan_aligned_alloc(uptr alignment, uptr size, const StackTrace &stack);
void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack); void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack);
void *lsan_malloc(uptr size, const StackTrace &stack); void *lsan_malloc(uptr size, const StackTrace &stack);
void lsan_free(void *p); void lsan_free(void *p);
void *lsan_realloc(void *p, uptr size, const StackTrace &stack); void *lsan_realloc(void *p, uptr size, const StackTrace &stack);
void *lsan_calloc(uptr nmemb, uptr size, const StackTrace &stack); void *lsan_calloc(uptr nmemb, uptr size, const StackTrace &stack);
void *lsan_valloc(uptr size, const StackTrace &stack); void *lsan_valloc(uptr size, const StackTrace &stack);
void *lsan_pvalloc(uptr size, const StackTrace &stack);
uptr lsan_mz_size(const void *p); uptr lsan_mz_size(const void *p);
} // namespace __lsan } // namespace __lsan
......
...@@ -13,14 +13,15 @@ ...@@ -13,14 +13,15 @@
#include "lsan_common.h" #include "lsan_common.h"
#include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_flag_parser.h" #include "sanitizer_common/sanitizer_flag_parser.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_placement_new.h" #include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_procmaps.h" #include "sanitizer_common/sanitizer_procmaps.h"
#include "sanitizer_common/sanitizer_report_decorator.h"
#include "sanitizer_common/sanitizer_stackdepot.h" #include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_stacktrace.h" #include "sanitizer_common/sanitizer_stacktrace.h"
#include "sanitizer_common/sanitizer_suppressions.h" #include "sanitizer_common/sanitizer_suppressions.h"
#include "sanitizer_common/sanitizer_report_decorator.h" #include "sanitizer_common/sanitizer_thread_registry.h"
#include "sanitizer_common/sanitizer_tls_get_addr.h" #include "sanitizer_common/sanitizer_tls_get_addr.h"
#if CAN_SANITIZE_LEAKS #if CAN_SANITIZE_LEAKS
...@@ -102,7 +103,7 @@ InternalMmapVector<RootRegion> const *GetRootRegions() { return root_regions; } ...@@ -102,7 +103,7 @@ InternalMmapVector<RootRegion> const *GetRootRegions() { return root_regions; }
void InitializeRootRegions() { void InitializeRootRegions() {
CHECK(!root_regions); CHECK(!root_regions);
ALIGNED(64) static char placeholder[sizeof(InternalMmapVector<RootRegion>)]; ALIGNED(64) static char placeholder[sizeof(InternalMmapVector<RootRegion>)];
root_regions = new(placeholder) InternalMmapVector<RootRegion>(1); root_regions = new (placeholder) InternalMmapVector<RootRegion>(); // NOLINT
} }
const char *MaybeCallLsanDefaultOptions() { const char *MaybeCallLsanDefaultOptions() {
...@@ -212,9 +213,10 @@ void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg) { ...@@ -212,9 +213,10 @@ void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg) {
// Scans thread data (stacks and TLS) for heap pointers. // Scans thread data (stacks and TLS) for heap pointers.
static void ProcessThreads(SuspendedThreadsList const &suspended_threads, static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
Frontier *frontier) { Frontier *frontier) {
InternalScopedBuffer<uptr> registers(suspended_threads.RegisterCount()); InternalMmapVector<uptr> registers(suspended_threads.RegisterCount());
uptr registers_begin = reinterpret_cast<uptr>(registers.data()); uptr registers_begin = reinterpret_cast<uptr>(registers.data());
uptr registers_end = registers_begin + registers.size(); uptr registers_end =
reinterpret_cast<uptr>(registers.data() + registers.size());
for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) { for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) {
tid_t os_id = static_cast<tid_t>(suspended_threads.GetThreadID(i)); tid_t os_id = static_cast<tid_t>(suspended_threads.GetThreadID(i));
LOG_THREADS("Processing thread %d.\n", os_id); LOG_THREADS("Processing thread %d.\n", os_id);
...@@ -409,8 +411,9 @@ static void MarkInvalidPCCb(uptr chunk, void *arg) { ...@@ -409,8 +411,9 @@ static void MarkInvalidPCCb(uptr chunk, void *arg) {
} }
} }
// On Linux, handles dynamically allocated TLS blocks by treating all chunks // On Linux, treats all chunks allocated from ld-linux.so as reachable, which
// allocated from ld-linux.so as reachable. // covers dynamically allocated TLS blocks, internal dynamic loader's loaded
// modules accounting etc.
// Dynamic TLS blocks contain the TLS variables of dynamically loaded modules. // Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
// They are allocated with a __libc_memalign() call in allocate_and_init() // They are allocated with a __libc_memalign() call in allocate_and_init()
// (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those // (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
...@@ -441,7 +444,7 @@ void ProcessPC(Frontier *frontier) { ...@@ -441,7 +444,7 @@ void ProcessPC(Frontier *frontier) {
// Sets the appropriate tag on each chunk. // Sets the appropriate tag on each chunk.
static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) { static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) {
// Holds the flood fill frontier. // Holds the flood fill frontier.
Frontier frontier(1); Frontier frontier;
ForEachChunk(CollectIgnoredCb, &frontier); ForEachChunk(CollectIgnoredCb, &frontier);
ProcessGlobalRegions(&frontier); ProcessGlobalRegions(&frontier);
...@@ -503,7 +506,7 @@ static void CollectLeaksCb(uptr chunk, void *arg) { ...@@ -503,7 +506,7 @@ static void CollectLeaksCb(uptr chunk, void *arg) {
} }
static void PrintMatchedSuppressions() { static void PrintMatchedSuppressions() {
InternalMmapVector<Suppression *> matched(1); InternalMmapVector<Suppression *> matched;
GetSuppressionContext()->GetMatched(&matched); GetSuppressionContext()->GetMatched(&matched);
if (!matched.size()) if (!matched.size())
return; return;
...@@ -522,11 +525,36 @@ struct CheckForLeaksParam { ...@@ -522,11 +525,36 @@ struct CheckForLeaksParam {
LeakReport leak_report; LeakReport leak_report;
}; };
static void ReportIfNotSuspended(ThreadContextBase *tctx, void *arg) {
const InternalMmapVector<tid_t> &suspended_threads =
*(const InternalMmapVector<tid_t> *)arg;
if (tctx->status == ThreadStatusRunning) {
uptr i = InternalLowerBound(suspended_threads, 0, suspended_threads.size(),
tctx->os_id, CompareLess<int>());
if (i >= suspended_threads.size() || suspended_threads[i] != tctx->os_id)
Report("Running thread %d was not suspended. False leaks are possible.\n",
tctx->os_id);
};
}
static void ReportUnsuspendedThreads(
const SuspendedThreadsList &suspended_threads) {
InternalMmapVector<tid_t> threads(suspended_threads.ThreadCount());
for (uptr i = 0; i < suspended_threads.ThreadCount(); ++i)
threads[i] = suspended_threads.GetThreadID(i);
Sort(threads.data(), threads.size());
GetThreadRegistryLocked()->RunCallbackForEachThreadLocked(
&ReportIfNotSuspended, &threads);
}
static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads, static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads,
void *arg) { void *arg) {
CheckForLeaksParam *param = reinterpret_cast<CheckForLeaksParam *>(arg); CheckForLeaksParam *param = reinterpret_cast<CheckForLeaksParam *>(arg);
CHECK(param); CHECK(param);
CHECK(!param->success); CHECK(!param->success);
ReportUnsuspendedThreads(suspended_threads);
ClassifyAllChunks(suspended_threads); ClassifyAllChunks(suspended_threads);
ForEachChunk(CollectLeaksCb, &param->leak_report); ForEachChunk(CollectLeaksCb, &param->leak_report);
// Clean up for subsequent leak checks. This assumes we did not overwrite any // Clean up for subsequent leak checks. This assumes we did not overwrite any
...@@ -681,7 +709,7 @@ void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) { ...@@ -681,7 +709,7 @@ void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) {
uptr unsuppressed_count = UnsuppressedLeakCount(); uptr unsuppressed_count = UnsuppressedLeakCount();
if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count) if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count)
Printf("The %zu top leak(s):\n", num_leaks_to_report); Printf("The %zu top leak(s):\n", num_leaks_to_report);
InternalSort(&leaks_, leaks_.size(), LeakComparator); Sort(leaks_.data(), leaks_.size(), &LeakComparator);
uptr leaks_reported = 0; uptr leaks_reported = 0;
for (uptr i = 0; i < leaks_.size(); i++) { for (uptr i = 0; i < leaks_.size(); i++) {
if (leaks_[i].is_suppressed) continue; if (leaks_[i].is_suppressed) continue;
......
...@@ -25,9 +25,9 @@ ...@@ -25,9 +25,9 @@
// because of "small" (4 bytes) pointer size that leads to high false negative // because of "small" (4 bytes) pointer size that leads to high false negative
// ratio on large leaks. But we still want to have it for some 32 bit arches // ratio on large leaks. But we still want to have it for some 32 bit arches
// (e.g. x86), see https://github.com/google/sanitizers/issues/403. // (e.g. x86), see https://github.com/google/sanitizers/issues/403.
// To enable LeakSanitizer on new architecture, one need to implement // To enable LeakSanitizer on a new architecture, one needs to implement the
// internal_clone function as well as (probably) adjust TLS machinery for // internal_clone function as well as (probably) adjust the TLS machinery for
// new architecture inside sanitizer library. // the new architecture inside the sanitizer library.
#if (SANITIZER_LINUX && !SANITIZER_ANDROID || SANITIZER_MAC) && \ #if (SANITIZER_LINUX && !SANITIZER_ANDROID || SANITIZER_MAC) && \
(SANITIZER_WORDSIZE == 64) && \ (SANITIZER_WORDSIZE == 64) && \
(defined(__x86_64__) || defined(__mips64) || defined(__aarch64__) || \ (defined(__x86_64__) || defined(__mips64) || defined(__aarch64__) || \
...@@ -45,6 +45,7 @@ ...@@ -45,6 +45,7 @@
namespace __sanitizer { namespace __sanitizer {
class FlagParser; class FlagParser;
class ThreadRegistry;
struct DTLS; struct DTLS;
} }
...@@ -93,7 +94,7 @@ struct LeakedObject { ...@@ -93,7 +94,7 @@ struct LeakedObject {
// Aggregates leaks by stack trace prefix. // Aggregates leaks by stack trace prefix.
class LeakReport { class LeakReport {
public: public:
LeakReport() : next_id_(0), leaks_(1), leaked_objects_(1) {} LeakReport() {}
void AddLeakedChunk(uptr chunk, u32 stack_trace_id, uptr leaked_size, void AddLeakedChunk(uptr chunk, u32 stack_trace_id, uptr leaked_size,
ChunkTag tag); ChunkTag tag);
void ReportTopLeaks(uptr max_leaks); void ReportTopLeaks(uptr max_leaks);
...@@ -101,12 +102,11 @@ class LeakReport { ...@@ -101,12 +102,11 @@ class LeakReport {
void ApplySuppressions(); void ApplySuppressions();
uptr UnsuppressedLeakCount(); uptr UnsuppressedLeakCount();
private: private:
void PrintReportForLeak(uptr index); void PrintReportForLeak(uptr index);
void PrintLeakedObjectsForLeak(uptr index); void PrintLeakedObjectsForLeak(uptr index);
u32 next_id_; u32 next_id_ = 0;
InternalMmapVector<Leak> leaks_; InternalMmapVector<Leak> leaks_;
InternalMmapVector<LeakedObject> leaked_objects_; InternalMmapVector<LeakedObject> leaked_objects_;
}; };
...@@ -203,6 +203,7 @@ bool WordIsPoisoned(uptr addr); ...@@ -203,6 +203,7 @@ bool WordIsPoisoned(uptr addr);
// Wrappers for ThreadRegistry access. // Wrappers for ThreadRegistry access.
void LockThreadRegistry(); void LockThreadRegistry();
void UnlockThreadRegistry(); void UnlockThreadRegistry();
ThreadRegistry *GetThreadRegistryLocked();
bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end, bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
uptr *tls_begin, uptr *tls_end, uptr *cache_begin, uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
uptr *cache_end, DTLS **dtls); uptr *cache_end, DTLS **dtls);
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h" #include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_getauxval.h"
#include "sanitizer_common/sanitizer_linux.h" #include "sanitizer_common/sanitizer_linux.h"
#include "sanitizer_common/sanitizer_stackdepot.h" #include "sanitizer_common/sanitizer_stackdepot.h"
...@@ -28,8 +29,12 @@ static const char kLinkerName[] = "ld"; ...@@ -28,8 +29,12 @@ static const char kLinkerName[] = "ld";
static char linker_placeholder[sizeof(LoadedModule)] ALIGNED(64); static char linker_placeholder[sizeof(LoadedModule)] ALIGNED(64);
static LoadedModule *linker = nullptr; static LoadedModule *linker = nullptr;
static bool IsLinker(const char* full_name) { static bool IsLinker(const LoadedModule& module) {
return LibraryNameIs(full_name, kLinkerName); #if SANITIZER_USE_GETAUXVAL
return module.base_address() == getauxval(AT_BASE);
#else
return LibraryNameIs(module.full_name(), kLinkerName);
#endif // SANITIZER_USE_GETAUXVAL
} }
__attribute__((tls_model("initial-exec"))) __attribute__((tls_model("initial-exec")))
...@@ -47,22 +52,25 @@ void InitializePlatformSpecificModules() { ...@@ -47,22 +52,25 @@ void InitializePlatformSpecificModules() {
ListOfModules modules; ListOfModules modules;
modules.init(); modules.init();
for (LoadedModule &module : modules) { for (LoadedModule &module : modules) {
if (!IsLinker(module.full_name())) continue; if (!IsLinker(module))
continue;
if (linker == nullptr) { if (linker == nullptr) {
linker = reinterpret_cast<LoadedModule *>(linker_placeholder); linker = reinterpret_cast<LoadedModule *>(linker_placeholder);
*linker = module; *linker = module;
module = LoadedModule(); module = LoadedModule();
} else { } else {
VReport(1, "LeakSanitizer: Multiple modules match \"%s\". " VReport(1, "LeakSanitizer: Multiple modules match \"%s\". "
"TLS will not be handled correctly.\n", kLinkerName); "TLS and other allocations originating from linker might be "
"falsely reported as leaks.\n", kLinkerName);
linker->clear(); linker->clear();
linker = nullptr; linker = nullptr;
return; return;
} }
} }
if (linker == nullptr) { if (linker == nullptr) {
VReport(1, "LeakSanitizer: Dynamic linker not found. " VReport(1, "LeakSanitizer: Dynamic linker not found. TLS and other "
"TLS will not be handled correctly.\n"); "allocations originating from linker might be falsely reported "
"as leaks.\n");
} }
} }
......
...@@ -117,7 +117,8 @@ void ProcessGlobalRegions(Frontier *frontier) { ...@@ -117,7 +117,8 @@ void ProcessGlobalRegions(Frontier *frontier) {
for (auto name : kSkippedSecNames) CHECK(ARRAY_SIZE(name) < kMaxSegName); for (auto name : kSkippedSecNames) CHECK(ARRAY_SIZE(name) < kMaxSegName);
MemoryMappingLayout memory_mapping(false); MemoryMappingLayout memory_mapping(false);
InternalMmapVector<LoadedModule> modules(/*initial_capacity*/ 128); InternalMmapVector<LoadedModule> modules;
modules.reserve(128);
memory_mapping.DumpListOfModules(&modules); memory_mapping.DumpListOfModules(&modules);
for (uptr i = 0; i < modules.size(); ++i) { for (uptr i = 0; i < modules.size(); ++i) {
// Even when global scanning is disabled, we still need to scan // Even when global scanning is disabled, we still need to scan
...@@ -139,12 +140,6 @@ void ProcessGlobalRegions(Frontier *frontier) { ...@@ -139,12 +140,6 @@ void ProcessGlobalRegions(Frontier *frontier) {
} }
void ProcessPlatformSpecificAllocations(Frontier *frontier) { void ProcessPlatformSpecificAllocations(Frontier *frontier) {
mach_port_name_t port;
if (task_for_pid(mach_task_self(), internal_getpid(), &port)
!= KERN_SUCCESS) {
return;
}
unsigned depth = 1; unsigned depth = 1;
vm_size_t size = 0; vm_size_t size = 0;
vm_address_t address = 0; vm_address_t address = 0;
...@@ -155,7 +150,7 @@ void ProcessPlatformSpecificAllocations(Frontier *frontier) { ...@@ -155,7 +150,7 @@ void ProcessPlatformSpecificAllocations(Frontier *frontier) {
while (err == KERN_SUCCESS) { while (err == KERN_SUCCESS) {
struct vm_region_submap_info_64 info; struct vm_region_submap_info_64 info;
err = vm_region_recurse_64(port, &address, &size, &depth, err = vm_region_recurse_64(mach_task_self(), &address, &size, &depth,
(vm_region_info_t)&info, &count); (vm_region_info_t)&info, &count);
uptr end_address = address + size; uptr end_address = address + size;
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include "interception/interception.h" #include "interception/interception.h"
#include "sanitizer_common/sanitizer_allocator.h" #include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_allocator_report.h"
#include "sanitizer_common/sanitizer_atomic.h" #include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h" #include "sanitizer_common/sanitizer_flags.h"
...@@ -84,9 +85,7 @@ INTERCEPTOR(void*, realloc, void *q, uptr size) { ...@@ -84,9 +85,7 @@ INTERCEPTOR(void*, realloc, void *q, uptr size) {
INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) { INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) {
ENSURE_LSAN_INITED; ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC; GET_STACK_TRACE_MALLOC;
*memptr = lsan_memalign(alignment, size, stack); return lsan_posix_memalign(memptr, alignment, size, stack);
// FIXME: Return ENOMEM if user requested more than max alloc size.
return 0;
} }
INTERCEPTOR(void*, valloc, uptr size) { INTERCEPTOR(void*, valloc, uptr size) {
...@@ -121,7 +120,7 @@ INTERCEPTOR(void *, __libc_memalign, uptr alignment, uptr size) { ...@@ -121,7 +120,7 @@ INTERCEPTOR(void *, __libc_memalign, uptr alignment, uptr size) {
INTERCEPTOR(void*, aligned_alloc, uptr alignment, uptr size) { INTERCEPTOR(void*, aligned_alloc, uptr alignment, uptr size) {
ENSURE_LSAN_INITED; ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC; GET_STACK_TRACE_MALLOC;
return lsan_memalign(alignment, size, stack); return lsan_aligned_alloc(alignment, size, stack);
} }
#define LSAN_MAYBE_INTERCEPT_ALIGNED_ALLOC INTERCEPT_FUNCTION(aligned_alloc) #define LSAN_MAYBE_INTERCEPT_ALIGNED_ALLOC INTERCEPT_FUNCTION(aligned_alloc)
#else #else
...@@ -164,13 +163,7 @@ INTERCEPTOR(int, mallopt, int cmd, int value) { ...@@ -164,13 +163,7 @@ INTERCEPTOR(int, mallopt, int cmd, int value) {
INTERCEPTOR(void*, pvalloc, uptr size) { INTERCEPTOR(void*, pvalloc, uptr size) {
ENSURE_LSAN_INITED; ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC; GET_STACK_TRACE_MALLOC;
uptr PageSize = GetPageSizeCached(); return lsan_pvalloc(size, stack);
size = RoundUpTo(size, PageSize);
if (size == 0) {
// pvalloc(0) should allocate one page.
size = PageSize;
}
return Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory);
} }
#define LSAN_MAYBE_INTERCEPT_PVALLOC INTERCEPT_FUNCTION(pvalloc) #define LSAN_MAYBE_INTERCEPT_PVALLOC INTERCEPT_FUNCTION(pvalloc)
#else #else
...@@ -200,21 +193,21 @@ INTERCEPTOR(int, mprobe, void *ptr) { ...@@ -200,21 +193,21 @@ INTERCEPTOR(int, mprobe, void *ptr) {
// TODO(alekseys): throw std::bad_alloc instead of dying on OOM. // TODO(alekseys): throw std::bad_alloc instead of dying on OOM.
#define OPERATOR_NEW_BODY(nothrow) \ #define OPERATOR_NEW_BODY(nothrow)\
ENSURE_LSAN_INITED; \ ENSURE_LSAN_INITED;\
GET_STACK_TRACE_MALLOC; \ GET_STACK_TRACE_MALLOC;\
void *res = lsan_malloc(size, stack); \ void *res = lsan_malloc(size, stack);\
if (!nothrow && UNLIKELY(!res)) DieOnFailure::OnOOM(); \ if (!nothrow && UNLIKELY(!res)) ReportOutOfMemory(size, &stack);\
return res; return res;
#define OPERATOR_NEW_BODY_ALIGN(nothrow) \ #define OPERATOR_NEW_BODY_ALIGN(nothrow)\
ENSURE_LSAN_INITED; \ ENSURE_LSAN_INITED;\
GET_STACK_TRACE_MALLOC; \ GET_STACK_TRACE_MALLOC;\
void *res = lsan_memalign((uptr)align, size, stack); \ void *res = lsan_memalign((uptr)align, size, stack);\
if (!nothrow && UNLIKELY(!res)) DieOnFailure::OnOOM(); \ if (!nothrow && UNLIKELY(!res)) ReportOutOfMemory(size, &stack);\
return res; return res;
#define OPERATOR_DELETE_BODY \ #define OPERATOR_DELETE_BODY\
ENSURE_LSAN_INITED; \ ENSURE_LSAN_INITED;\
lsan_free(ptr); lsan_free(ptr);
// On OS X it's not enough to just provide our own 'operator new' and // On OS X it's not enough to just provide our own 'operator new' and
...@@ -307,6 +300,7 @@ INTERCEPTOR(void, _ZdaPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&) ...@@ -307,6 +300,7 @@ INTERCEPTOR(void, _ZdaPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&)
///// Thread initialization and finalization. ///// ///// Thread initialization and finalization. /////
#if !SANITIZER_NETBSD && !SANITIZER_FREEBSD
static unsigned g_thread_finalize_key; static unsigned g_thread_finalize_key;
static void thread_finalize(void *v) { static void thread_finalize(void *v) {
...@@ -320,6 +314,29 @@ static void thread_finalize(void *v) { ...@@ -320,6 +314,29 @@ static void thread_finalize(void *v) {
} }
ThreadFinish(); ThreadFinish();
} }
#endif
#if SANITIZER_NETBSD
INTERCEPTOR(void, _lwp_exit) {
ENSURE_LSAN_INITED;
ThreadFinish();
REAL(_lwp_exit)();
}
#define LSAN_MAYBE_INTERCEPT__LWP_EXIT INTERCEPT_FUNCTION(_lwp_exit)
#else
#define LSAN_MAYBE_INTERCEPT__LWP_EXIT
#endif
#if SANITIZER_INTERCEPT_THR_EXIT
INTERCEPTOR(void, thr_exit, tid_t *state) {
ENSURE_LSAN_INITED;
ThreadFinish();
REAL(thr_exit)(state);
}
#define LSAN_MAYBE_INTERCEPT_THR_EXIT INTERCEPT_FUNCTION(thr_exit)
#else
#define LSAN_MAYBE_INTERCEPT_THR_EXIT
#endif
struct ThreadParam { struct ThreadParam {
void *(*callback)(void *arg); void *(*callback)(void *arg);
...@@ -333,11 +350,13 @@ extern "C" void *__lsan_thread_start_func(void *arg) { ...@@ -333,11 +350,13 @@ extern "C" void *__lsan_thread_start_func(void *arg) {
void *param = p->param; void *param = p->param;
// Wait until the last iteration to maximize the chance that we are the last // Wait until the last iteration to maximize the chance that we are the last
// destructor to run. // destructor to run.
#if !SANITIZER_NETBSD && !SANITIZER_FREEBSD
if (pthread_setspecific(g_thread_finalize_key, if (pthread_setspecific(g_thread_finalize_key,
(void*)GetPthreadDestructorIterations())) { (void*)GetPthreadDestructorIterations())) {
Report("LeakSanitizer: failed to set thread key.\n"); Report("LeakSanitizer: failed to set thread key.\n");
Die(); Die();
} }
#endif
int tid = 0; int tid = 0;
while ((tid = atomic_load(&p->tid, memory_order_acquire)) == 0) while ((tid = atomic_load(&p->tid, memory_order_acquire)) == 0)
internal_sched_yield(); internal_sched_yield();
...@@ -425,10 +444,15 @@ void InitializeInterceptors() { ...@@ -425,10 +444,15 @@ void InitializeInterceptors() {
INTERCEPT_FUNCTION(pthread_join); INTERCEPT_FUNCTION(pthread_join);
INTERCEPT_FUNCTION(_exit); INTERCEPT_FUNCTION(_exit);
LSAN_MAYBE_INTERCEPT__LWP_EXIT;
LSAN_MAYBE_INTERCEPT_THR_EXIT;
#if !SANITIZER_NETBSD && !SANITIZER_FREEBSD
if (pthread_key_create(&g_thread_finalize_key, &thread_finalize)) { if (pthread_key_create(&g_thread_finalize_key, &thread_finalize)) {
Report("LeakSanitizer: failed to create thread key.\n"); Report("LeakSanitizer: failed to create thread key.\n");
Die(); Die();
} }
#endif
} }
} // namespace __lsan } // namespace __lsan
...@@ -35,6 +35,9 @@ using namespace __lsan; ...@@ -35,6 +35,9 @@ using namespace __lsan;
#define COMMON_MALLOC_CALLOC(count, size) \ #define COMMON_MALLOC_CALLOC(count, size) \
GET_STACK_TRACE_MALLOC; \ GET_STACK_TRACE_MALLOC; \
void *p = lsan_calloc(count, size, stack) void *p = lsan_calloc(count, size, stack)
#define COMMON_MALLOC_POSIX_MEMALIGN(memptr, alignment, size) \
GET_STACK_TRACE_MALLOC; \
int res = lsan_posix_memalign(memptr, alignment, size, stack)
#define COMMON_MALLOC_VALLOC(size) \ #define COMMON_MALLOC_VALLOC(size) \
GET_STACK_TRACE_MALLOC; \ GET_STACK_TRACE_MALLOC; \
void *p = lsan_valloc(size, stack) void *p = lsan_valloc(size, stack)
......
...@@ -153,4 +153,9 @@ void UnlockThreadRegistry() { ...@@ -153,4 +153,9 @@ void UnlockThreadRegistry() {
thread_registry->Unlock(); thread_registry->Unlock();
} }
ThreadRegistry *GetThreadRegistryLocked() {
thread_registry->CheckLocked();
return thread_registry;
}
} // namespace __lsan } // namespace __lsan
.type __start___sancov_guards,@object
.globl __start___sancov_guards
.section __sancov_guards,"aw",@progbits
.p2align 2
__start___sancov_guards:
.type __stop___sancov_guards,@object
.globl __stop___sancov_guards
.section __sancov_guards,"aw",@progbits
.p2align 2
__stop___sancov_guards:
...@@ -19,6 +19,10 @@ ...@@ -19,6 +19,10 @@
namespace __sanitizer { namespace __sanitizer {
// Default allocator names.
const char *PrimaryAllocatorName = "SizeClassAllocator";
const char *SecondaryAllocatorName = "LargeMmapAllocator";
// ThreadSanitizer for Go uses libc malloc/free. // ThreadSanitizer for Go uses libc malloc/free.
#if SANITIZER_GO || defined(SANITIZER_USE_MALLOC) #if SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
# if SANITIZER_LINUX && !SANITIZER_ANDROID # if SANITIZER_LINUX && !SANITIZER_ANDROID
...@@ -134,12 +138,19 @@ static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) { ...@@ -134,12 +138,19 @@ static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
const u64 kBlockMagic = 0x6A6CB03ABCEBC041ull; const u64 kBlockMagic = 0x6A6CB03ABCEBC041ull;
static void NORETURN ReportInternalAllocatorOutOfMemory(uptr requested_size) {
SetAllocatorOutOfMemory();
Report("FATAL: %s: internal allocator is out of memory trying to allocate "
"0x%zx bytes\n", SanitizerToolName, requested_size);
Die();
}
void *InternalAlloc(uptr size, InternalAllocatorCache *cache, uptr alignment) { void *InternalAlloc(uptr size, InternalAllocatorCache *cache, uptr alignment) {
if (size + sizeof(u64) < size) if (size + sizeof(u64) < size)
return nullptr; return nullptr;
void *p = RawInternalAlloc(size + sizeof(u64), cache, alignment); void *p = RawInternalAlloc(size + sizeof(u64), cache, alignment);
if (!p) if (UNLIKELY(!p))
return nullptr; ReportInternalAllocatorOutOfMemory(size + sizeof(u64));
((u64*)p)[0] = kBlockMagic; ((u64*)p)[0] = kBlockMagic;
return (char*)p + sizeof(u64); return (char*)p + sizeof(u64);
} }
...@@ -153,16 +164,21 @@ void *InternalRealloc(void *addr, uptr size, InternalAllocatorCache *cache) { ...@@ -153,16 +164,21 @@ void *InternalRealloc(void *addr, uptr size, InternalAllocatorCache *cache) {
size = size + sizeof(u64); size = size + sizeof(u64);
CHECK_EQ(kBlockMagic, ((u64*)addr)[0]); CHECK_EQ(kBlockMagic, ((u64*)addr)[0]);
void *p = RawInternalRealloc(addr, size, cache); void *p = RawInternalRealloc(addr, size, cache);
if (!p) if (UNLIKELY(!p))
return nullptr; ReportInternalAllocatorOutOfMemory(size);
return (char*)p + sizeof(u64); return (char*)p + sizeof(u64);
} }
void *InternalCalloc(uptr count, uptr size, InternalAllocatorCache *cache) { void *InternalCalloc(uptr count, uptr size, InternalAllocatorCache *cache) {
if (UNLIKELY(CheckForCallocOverflow(count, size))) if (UNLIKELY(CheckForCallocOverflow(count, size))) {
return InternalAllocator::FailureHandler::OnBadRequest(); Report("FATAL: %s: calloc parameters overflow: count * size (%zd * %zd) "
"cannot be represented in type size_t\n", SanitizerToolName, count,
size);
Die();
}
void *p = InternalAlloc(count * size, cache); void *p = InternalAlloc(count * size, cache);
if (p) internal_memset(p, 0, count * size); if (LIKELY(p))
internal_memset(p, 0, count * size);
return p; return p;
} }
...@@ -176,11 +192,13 @@ void InternalFree(void *addr, InternalAllocatorCache *cache) { ...@@ -176,11 +192,13 @@ void InternalFree(void *addr, InternalAllocatorCache *cache) {
} }
// LowLevelAllocator // LowLevelAllocator
constexpr uptr kLowLevelAllocatorDefaultAlignment = 8;
static uptr low_level_alloc_min_alignment = kLowLevelAllocatorDefaultAlignment;
static LowLevelAllocateCallback low_level_alloc_callback; static LowLevelAllocateCallback low_level_alloc_callback;
void *LowLevelAllocator::Allocate(uptr size) { void *LowLevelAllocator::Allocate(uptr size) {
// Align allocation size. // Align allocation size.
size = RoundUpTo(size, 8); size = RoundUpTo(size, low_level_alloc_min_alignment);
if (allocated_end_ - allocated_current_ < (sptr)size) { if (allocated_end_ - allocated_current_ < (sptr)size) {
uptr size_to_allocate = Max(size, GetPageSizeCached()); uptr size_to_allocate = Max(size, GetPageSizeCached());
allocated_current_ = allocated_current_ =
...@@ -197,10 +215,17 @@ void *LowLevelAllocator::Allocate(uptr size) { ...@@ -197,10 +215,17 @@ void *LowLevelAllocator::Allocate(uptr size) {
return res; return res;
} }
void SetLowLevelAllocateMinAlignment(uptr alignment) {
CHECK(IsPowerOfTwo(alignment));
low_level_alloc_min_alignment = Max(alignment, low_level_alloc_min_alignment);
}
void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback) { void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback) {
low_level_alloc_callback = callback; low_level_alloc_callback = callback;
} }
// Allocator's OOM and other errors handling support.
static atomic_uint8_t allocator_out_of_memory = {0}; static atomic_uint8_t allocator_out_of_memory = {0};
static atomic_uint8_t allocator_may_return_null = {0}; static atomic_uint8_t allocator_may_return_null = {0};
...@@ -208,13 +233,8 @@ bool IsAllocatorOutOfMemory() { ...@@ -208,13 +233,8 @@ bool IsAllocatorOutOfMemory() {
return atomic_load_relaxed(&allocator_out_of_memory); return atomic_load_relaxed(&allocator_out_of_memory);
} }
// Prints error message and kills the program. void SetAllocatorOutOfMemory() {
void NORETURN ReportAllocatorCannotReturnNull() { atomic_store_relaxed(&allocator_out_of_memory, 1);
Report("%s's allocator is terminating the process instead of returning 0\n",
SanitizerToolName);
Report("If you don't like this behavior set allocator_may_return_null=1\n");
CHECK(0);
Die();
} }
bool AllocatorMayReturnNull() { bool AllocatorMayReturnNull() {
...@@ -226,26 +246,9 @@ void SetAllocatorMayReturnNull(bool may_return_null) { ...@@ -226,26 +246,9 @@ void SetAllocatorMayReturnNull(bool may_return_null) {
memory_order_relaxed); memory_order_relaxed);
} }
void *ReturnNullOrDieOnFailure::OnBadRequest() { void PrintHintAllocatorCannotReturnNull() {
if (AllocatorMayReturnNull()) Report("HINT: if you don't care about these errors you may set "
return nullptr; "allocator_may_return_null=1\n");
ReportAllocatorCannotReturnNull();
}
void *ReturnNullOrDieOnFailure::OnOOM() {
atomic_store_relaxed(&allocator_out_of_memory, 1);
if (AllocatorMayReturnNull())
return nullptr;
ReportAllocatorCannotReturnNull();
}
void NORETURN *DieOnFailure::OnBadRequest() {
ReportAllocatorCannotReturnNull();
}
void NORETURN *DieOnFailure::OnOOM() {
atomic_store_relaxed(&allocator_out_of_memory, 1);
ReportAllocatorCannotReturnNull();
} }
} // namespace __sanitizer } // namespace __sanitizer
...@@ -22,28 +22,23 @@ ...@@ -22,28 +22,23 @@
namespace __sanitizer { namespace __sanitizer {
// Allows the tools to name their allocations appropriately.
extern const char *PrimaryAllocatorName;
extern const char *SecondaryAllocatorName;
// Since flags are immutable and allocator behavior can be changed at runtime // Since flags are immutable and allocator behavior can be changed at runtime
// (unit tests or ASan on Android are some examples), allocator_may_return_null // (unit tests or ASan on Android are some examples), allocator_may_return_null
// flag value is cached here and can be altered later. // flag value is cached here and can be altered later.
bool AllocatorMayReturnNull(); bool AllocatorMayReturnNull();
void SetAllocatorMayReturnNull(bool may_return_null); void SetAllocatorMayReturnNull(bool may_return_null);
// Allocator failure handling policies:
// Implements AllocatorMayReturnNull policy, returns null when the flag is set,
// dies otherwise.
struct ReturnNullOrDieOnFailure {
static void *OnBadRequest();
static void *OnOOM();
};
// Always dies on the failure.
struct DieOnFailure {
static void NORETURN *OnBadRequest();
static void NORETURN *OnOOM();
};
// Returns true if allocator detected OOM condition. Can be used to avoid memory // Returns true if allocator detected OOM condition. Can be used to avoid memory
// hungry operations. Set when AllocatorReturnNullOrDieOnOOM() is called. // hungry operations.
bool IsAllocatorOutOfMemory(); bool IsAllocatorOutOfMemory();
// Should be called by a particular allocator when OOM is detected.
void SetAllocatorOutOfMemory();
void PrintHintAllocatorCannotReturnNull();
// Allocators call these callbacks on mmap/munmap. // Allocators call these callbacks on mmap/munmap.
struct NoOpMapUnmapCallback { struct NoOpMapUnmapCallback {
...@@ -54,6 +49,21 @@ struct NoOpMapUnmapCallback { ...@@ -54,6 +49,21 @@ struct NoOpMapUnmapCallback {
// Callback type for iterating over chunks. // Callback type for iterating over chunks.
typedef void (*ForEachChunkCallback)(uptr chunk, void *arg); typedef void (*ForEachChunkCallback)(uptr chunk, void *arg);
INLINE u32 Rand(u32 *state) { // ANSI C linear congruential PRNG.
return (*state = *state * 1103515245 + 12345) >> 16;
}
INLINE u32 RandN(u32 *state, u32 n) { return Rand(state) % n; } // [0, n)
template<typename T>
INLINE void RandomShuffle(T *a, u32 n, u32 *rand_state) {
if (n <= 1) return;
u32 state = *rand_state;
for (u32 i = n - 1; i > 0; i--)
Swap(a[i], a[RandN(&state, i + 1)]);
*rand_state = state;
}
#include "sanitizer_allocator_size_class_map.h" #include "sanitizer_allocator_size_class_map.h"
#include "sanitizer_allocator_stats.h" #include "sanitizer_allocator_stats.h"
#include "sanitizer_allocator_primary64.h" #include "sanitizer_allocator_primary64.h"
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
template<u64 kSize> template<u64 kSize>
class FlatByteMap { class FlatByteMap {
public: public:
void TestOnlyInit() { void Init() {
internal_memset(map_, 0, sizeof(map_)); internal_memset(map_, 0, sizeof(map_));
} }
...@@ -42,7 +42,7 @@ class FlatByteMap { ...@@ -42,7 +42,7 @@ class FlatByteMap {
template <u64 kSize1, u64 kSize2, class MapUnmapCallback = NoOpMapUnmapCallback> template <u64 kSize1, u64 kSize2, class MapUnmapCallback = NoOpMapUnmapCallback>
class TwoLevelByteMap { class TwoLevelByteMap {
public: public:
void TestOnlyInit() { void Init() {
internal_memset(map1_, 0, sizeof(map1_)); internal_memset(map1_, 0, sizeof(map1_));
mu_.Init(); mu_.Init();
} }
......
...@@ -42,16 +42,18 @@ INLINE void *SetErrnoOnNull(void *ptr) { ...@@ -42,16 +42,18 @@ INLINE void *SetErrnoOnNull(void *ptr) {
// of alignment. // of alignment.
INLINE bool CheckAlignedAllocAlignmentAndSize(uptr alignment, uptr size) { INLINE bool CheckAlignedAllocAlignmentAndSize(uptr alignment, uptr size) {
#if SANITIZER_POSIX #if SANITIZER_POSIX
return IsPowerOfTwo(alignment) && (size & (alignment - 1)) == 0; return alignment != 0 && IsPowerOfTwo(alignment) &&
(size & (alignment - 1)) == 0;
#else #else
return size % alignment == 0; return alignment != 0 && size % alignment == 0;
#endif #endif
} }
// Checks posix_memalign() parameters, verifies that alignment is a power of two // Checks posix_memalign() parameters, verifies that alignment is a power of two
// and a multiple of sizeof(void *). // and a multiple of sizeof(void *).
INLINE bool CheckPosixMemalignAlignment(uptr alignment) { INLINE bool CheckPosixMemalignAlignment(uptr alignment) {
return IsPowerOfTwo(alignment) && (alignment % sizeof(void *)) == 0; // NOLINT return alignment != 0 && IsPowerOfTwo(alignment) &&
(alignment % sizeof(void *)) == 0; // NOLINT
} }
// Returns true if calloc(size, n) call overflows on size*n calculation. // Returns true if calloc(size, n) call overflows on size*n calculation.
......
...@@ -22,8 +22,6 @@ template <class PrimaryAllocator, class AllocatorCache, ...@@ -22,8 +22,6 @@ template <class PrimaryAllocator, class AllocatorCache,
class SecondaryAllocator> // NOLINT class SecondaryAllocator> // NOLINT
class CombinedAllocator { class CombinedAllocator {
public: public:
typedef typename SecondaryAllocator::FailureHandler FailureHandler;
void InitLinkerInitialized(s32 release_to_os_interval_ms) { void InitLinkerInitialized(s32 release_to_os_interval_ms) {
primary_.Init(release_to_os_interval_ms); primary_.Init(release_to_os_interval_ms);
secondary_.InitLinkerInitialized(); secondary_.InitLinkerInitialized();
...@@ -40,8 +38,12 @@ class CombinedAllocator { ...@@ -40,8 +38,12 @@ class CombinedAllocator {
// Returning 0 on malloc(0) may break a lot of code. // Returning 0 on malloc(0) may break a lot of code.
if (size == 0) if (size == 0)
size = 1; size = 1;
if (size + alignment < size) if (size + alignment < size) {
return FailureHandler::OnBadRequest(); Report("WARNING: %s: CombinedAllocator allocation overflow: "
"0x%zx bytes with 0x%zx alignment requested\n",
SanitizerToolName, size, alignment);
return nullptr;
}
uptr original_size = size; uptr original_size = size;
// If alignment requirements are to be fulfilled by the frontend allocator // If alignment requirements are to be fulfilled by the frontend allocator
// rather than by the primary or secondary, passing an alignment lower than // rather than by the primary or secondary, passing an alignment lower than
...@@ -60,8 +62,6 @@ class CombinedAllocator { ...@@ -60,8 +62,6 @@ class CombinedAllocator {
res = cache->Allocate(&primary_, primary_.ClassID(size)); res = cache->Allocate(&primary_, primary_.ClassID(size));
else else
res = secondary_.Allocate(&stats_, original_size, alignment); res = secondary_.Allocate(&stats_, original_size, alignment);
if (!res)
return FailureHandler::OnOOM();
if (alignment > 8) if (alignment > 8)
CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0); CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
return res; return res;
...@@ -75,6 +75,10 @@ class CombinedAllocator { ...@@ -75,6 +75,10 @@ class CombinedAllocator {
primary_.SetReleaseToOSIntervalMs(release_to_os_interval_ms); primary_.SetReleaseToOSIntervalMs(release_to_os_interval_ms);
} }
void ForceReleaseToOS() {
primary_.ForceReleaseToOS();
}
void Deallocate(AllocatorCache *cache, void *p) { void Deallocate(AllocatorCache *cache, void *p) {
if (!p) return; if (!p) return;
if (primary_.PointerIsMine(p)) if (primary_.PointerIsMine(p))
......
...@@ -37,6 +37,9 @@ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE ...@@ -37,6 +37,9 @@ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_free_hook(void *ptr); void __sanitizer_free_hook(void *ptr);
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
__sanitizer_purge_allocator();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
__sanitizer_print_memory_profile(uptr top_percent, uptr max_number_of_contexts); __sanitizer_print_memory_profile(uptr top_percent, uptr max_number_of_contexts);
} // extern "C" } // extern "C"
......
...@@ -44,9 +44,12 @@ typedef SizeClassAllocator32<AP32> PrimaryInternalAllocator; ...@@ -44,9 +44,12 @@ typedef SizeClassAllocator32<AP32> PrimaryInternalAllocator;
typedef SizeClassAllocatorLocalCache<PrimaryInternalAllocator> typedef SizeClassAllocatorLocalCache<PrimaryInternalAllocator>
InternalAllocatorCache; InternalAllocatorCache;
typedef LargeMmapAllocator<NoOpMapUnmapCallback,
LargeMmapAllocatorPtrArrayStatic>
SecondaryInternalAllocator;
typedef CombinedAllocator<PrimaryInternalAllocator, InternalAllocatorCache, typedef CombinedAllocator<PrimaryInternalAllocator, InternalAllocatorCache,
LargeMmapAllocator<NoOpMapUnmapCallback, DieOnFailure> SecondaryInternalAllocator> InternalAllocator;
> InternalAllocator;
void *InternalAlloc(uptr size, InternalAllocatorCache *cache = nullptr, void *InternalAlloc(uptr size, InternalAllocatorCache *cache = nullptr,
uptr alignment = 0); uptr alignment = 0);
...@@ -57,15 +60,6 @@ void *InternalCalloc(uptr countr, uptr size, ...@@ -57,15 +60,6 @@ void *InternalCalloc(uptr countr, uptr size,
void InternalFree(void *p, InternalAllocatorCache *cache = nullptr); void InternalFree(void *p, InternalAllocatorCache *cache = nullptr);
InternalAllocator *internal_allocator(); InternalAllocator *internal_allocator();
enum InternalAllocEnum {
INTERNAL_ALLOC
};
} // namespace __sanitizer } // namespace __sanitizer
inline void *operator new(__sanitizer::operator_new_size_type size,
__sanitizer::InternalAllocEnum) {
return __sanitizer::InternalAlloc(size);
}
#endif // SANITIZER_ALLOCATOR_INTERNAL_H #endif // SANITIZER_ALLOCATOR_INTERNAL_H
...@@ -17,8 +17,7 @@ ...@@ -17,8 +17,7 @@
// object per thread in TLS, is has to be POD. // object per thread in TLS, is has to be POD.
template<class SizeClassAllocator> template<class SizeClassAllocator>
struct SizeClassAllocatorLocalCache struct SizeClassAllocatorLocalCache
: SizeClassAllocator::AllocatorCache { : SizeClassAllocator::AllocatorCache {};
};
// Cache used by SizeClassAllocator64. // Cache used by SizeClassAllocator64.
template <class SizeClassAllocator> template <class SizeClassAllocator>
...@@ -44,13 +43,12 @@ struct SizeClassAllocator64LocalCache { ...@@ -44,13 +43,12 @@ struct SizeClassAllocator64LocalCache {
if (UNLIKELY(c->count == 0)) { if (UNLIKELY(c->count == 0)) {
if (UNLIKELY(!Refill(c, allocator, class_id))) if (UNLIKELY(!Refill(c, allocator, class_id)))
return nullptr; return nullptr;
DCHECK_GT(c->count, 0);
} }
stats_.Add(AllocatorStatAllocated, c->class_size);
CHECK_GT(c->count, 0);
CompactPtrT chunk = c->chunks[--c->count]; CompactPtrT chunk = c->chunks[--c->count];
void *res = reinterpret_cast<void *>(allocator->CompactPtrToPointer( stats_.Add(AllocatorStatAllocated, c->class_size);
return reinterpret_cast<void *>(allocator->CompactPtrToPointer(
allocator->GetRegionBeginBySizeClass(class_id), chunk)); allocator->GetRegionBeginBySizeClass(class_id), chunk));
return res;
} }
void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) { void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
...@@ -58,20 +56,19 @@ struct SizeClassAllocator64LocalCache { ...@@ -58,20 +56,19 @@ struct SizeClassAllocator64LocalCache {
CHECK_LT(class_id, kNumClasses); CHECK_LT(class_id, kNumClasses);
// If the first allocator call on a new thread is a deallocation, then // If the first allocator call on a new thread is a deallocation, then
// max_count will be zero, leading to check failure. // max_count will be zero, leading to check failure.
InitCache();
PerClass *c = &per_class_[class_id]; PerClass *c = &per_class_[class_id];
stats_.Sub(AllocatorStatAllocated, c->class_size); InitCache(c);
CHECK_NE(c->max_count, 0UL);
if (UNLIKELY(c->count == c->max_count)) if (UNLIKELY(c->count == c->max_count))
Drain(c, allocator, class_id, c->max_count / 2); Drain(c, allocator, class_id, c->max_count / 2);
CompactPtrT chunk = allocator->PointerToCompactPtr( CompactPtrT chunk = allocator->PointerToCompactPtr(
allocator->GetRegionBeginBySizeClass(class_id), allocator->GetRegionBeginBySizeClass(class_id),
reinterpret_cast<uptr>(p)); reinterpret_cast<uptr>(p));
c->chunks[c->count++] = chunk; c->chunks[c->count++] = chunk;
stats_.Sub(AllocatorStatAllocated, c->class_size);
} }
void Drain(SizeClassAllocator *allocator) { void Drain(SizeClassAllocator *allocator) {
for (uptr i = 0; i < kNumClasses; i++) { for (uptr i = 1; i < kNumClasses; i++) {
PerClass *c = &per_class_[i]; PerClass *c = &per_class_[i];
while (c->count > 0) while (c->count > 0)
Drain(c, allocator, i, c->count); Drain(c, allocator, i, c->count);
...@@ -92,20 +89,22 @@ struct SizeClassAllocator64LocalCache { ...@@ -92,20 +89,22 @@ struct SizeClassAllocator64LocalCache {
PerClass per_class_[kNumClasses]; PerClass per_class_[kNumClasses];
AllocatorStats stats_; AllocatorStats stats_;
void InitCache() { void InitCache(PerClass *c) {
if (LIKELY(per_class_[1].max_count)) if (LIKELY(c->max_count))
return; return;
for (uptr i = 0; i < kNumClasses; i++) { for (uptr i = 1; i < kNumClasses; i++) {
PerClass *c = &per_class_[i]; PerClass *c = &per_class_[i];
c->max_count = 2 * SizeClassMap::MaxCachedHint(i); const uptr size = Allocator::ClassIdToSize(i);
c->class_size = Allocator::ClassIdToSize(i); c->max_count = 2 * SizeClassMap::MaxCachedHint(size);
c->class_size = size;
} }
DCHECK_NE(c->max_count, 0UL);
} }
NOINLINE bool Refill(PerClass *c, SizeClassAllocator *allocator, NOINLINE bool Refill(PerClass *c, SizeClassAllocator *allocator,
uptr class_id) { uptr class_id) {
InitCache(); InitCache(c);
uptr num_requested_chunks = c->max_count / 2; const uptr num_requested_chunks = c->max_count / 2;
if (UNLIKELY(!allocator->GetFromAllocator(&stats_, class_id, c->chunks, if (UNLIKELY(!allocator->GetFromAllocator(&stats_, class_id, c->chunks,
num_requested_chunks))) num_requested_chunks)))
return false; return false;
...@@ -115,9 +114,8 @@ struct SizeClassAllocator64LocalCache { ...@@ -115,9 +114,8 @@ struct SizeClassAllocator64LocalCache {
NOINLINE void Drain(PerClass *c, SizeClassAllocator *allocator, uptr class_id, NOINLINE void Drain(PerClass *c, SizeClassAllocator *allocator, uptr class_id,
uptr count) { uptr count) {
InitCache();
CHECK_GE(c->count, count); CHECK_GE(c->count, count);
uptr first_idx_to_drain = c->count - count; const uptr first_idx_to_drain = c->count - count;
c->count -= count; c->count -= count;
allocator->ReturnToAllocator(&stats_, class_id, allocator->ReturnToAllocator(&stats_, class_id,
&c->chunks[first_idx_to_drain], count); &c->chunks[first_idx_to_drain], count);
...@@ -162,12 +160,13 @@ struct SizeClassAllocator32LocalCache { ...@@ -162,12 +160,13 @@ struct SizeClassAllocator32LocalCache {
CHECK_LT(class_id, kNumClasses); CHECK_LT(class_id, kNumClasses);
PerClass *c = &per_class_[class_id]; PerClass *c = &per_class_[class_id];
if (UNLIKELY(c->count == 0)) { if (UNLIKELY(c->count == 0)) {
if (UNLIKELY(!Refill(allocator, class_id))) if (UNLIKELY(!Refill(c, allocator, class_id)))
return nullptr; return nullptr;
DCHECK_GT(c->count, 0);
} }
stats_.Add(AllocatorStatAllocated, c->class_size);
void *res = c->batch[--c->count]; void *res = c->batch[--c->count];
PREFETCH(c->batch[c->count - 1]); PREFETCH(c->batch[c->count - 1]);
stats_.Add(AllocatorStatAllocated, c->class_size);
return res; return res;
} }
...@@ -176,20 +175,19 @@ struct SizeClassAllocator32LocalCache { ...@@ -176,20 +175,19 @@ struct SizeClassAllocator32LocalCache {
CHECK_LT(class_id, kNumClasses); CHECK_LT(class_id, kNumClasses);
// If the first allocator call on a new thread is a deallocation, then // If the first allocator call on a new thread is a deallocation, then
// max_count will be zero, leading to check failure. // max_count will be zero, leading to check failure.
InitCache();
PerClass *c = &per_class_[class_id]; PerClass *c = &per_class_[class_id];
stats_.Sub(AllocatorStatAllocated, c->class_size); InitCache(c);
CHECK_NE(c->max_count, 0UL);
if (UNLIKELY(c->count == c->max_count)) if (UNLIKELY(c->count == c->max_count))
Drain(allocator, class_id); Drain(c, allocator, class_id);
c->batch[c->count++] = p; c->batch[c->count++] = p;
stats_.Sub(AllocatorStatAllocated, c->class_size);
} }
void Drain(SizeClassAllocator *allocator) { void Drain(SizeClassAllocator *allocator) {
for (uptr i = 0; i < kNumClasses; i++) { for (uptr i = 1; i < kNumClasses; i++) {
PerClass *c = &per_class_[i]; PerClass *c = &per_class_[i];
while (c->count > 0) while (c->count > 0)
Drain(allocator, i); Drain(c, allocator, i);
} }
} }
...@@ -214,15 +212,16 @@ struct SizeClassAllocator32LocalCache { ...@@ -214,15 +212,16 @@ struct SizeClassAllocator32LocalCache {
PerClass per_class_[kNumClasses]; PerClass per_class_[kNumClasses];
AllocatorStats stats_; AllocatorStats stats_;
void InitCache() { void InitCache(PerClass *c) {
if (LIKELY(per_class_[1].max_count)) if (LIKELY(c->max_count))
return; return;
const uptr batch_class_id = SizeClassMap::ClassID(sizeof(TransferBatch)); const uptr batch_class_id = SizeClassMap::ClassID(sizeof(TransferBatch));
for (uptr i = 0; i < kNumClasses; i++) { for (uptr i = 1; i < kNumClasses; i++) {
PerClass *c = &per_class_[i]; PerClass *c = &per_class_[i];
uptr max_cached = TransferBatch::MaxCached(i); const uptr size = Allocator::ClassIdToSize(i);
const uptr max_cached = TransferBatch::MaxCached(size);
c->max_count = 2 * max_cached; c->max_count = 2 * max_cached;
c->class_size = Allocator::ClassIdToSize(i); c->class_size = size;
// Precompute the class id to use to store batches for the current class // Precompute the class id to use to store batches for the current class
// id. 0 means the class size is large enough to store a batch within one // id. 0 means the class size is large enough to store a batch within one
// of the chunks. If using a separate size class, it will always be // of the chunks. If using a separate size class, it will always be
...@@ -230,16 +229,17 @@ struct SizeClassAllocator32LocalCache { ...@@ -230,16 +229,17 @@ struct SizeClassAllocator32LocalCache {
if (kUseSeparateSizeClassForBatch) { if (kUseSeparateSizeClassForBatch) {
c->batch_class_id = (i == kBatchClassID) ? 0 : kBatchClassID; c->batch_class_id = (i == kBatchClassID) ? 0 : kBatchClassID;
} else { } else {
c->batch_class_id = (c->class_size < c->batch_class_id = (size <
TransferBatch::AllocationSizeRequiredForNElements(max_cached)) ? TransferBatch::AllocationSizeRequiredForNElements(max_cached)) ?
batch_class_id : 0; batch_class_id : 0;
} }
} }
DCHECK_NE(c->max_count, 0UL);
} }
NOINLINE bool Refill(SizeClassAllocator *allocator, uptr class_id) { NOINLINE bool Refill(PerClass *c, SizeClassAllocator *allocator,
InitCache(); uptr class_id) {
PerClass *c = &per_class_[class_id]; InitCache(c);
TransferBatch *b = allocator->AllocateBatch(&stats_, this, class_id); TransferBatch *b = allocator->AllocateBatch(&stats_, this, class_id);
if (UNLIKELY(!b)) if (UNLIKELY(!b))
return false; return false;
...@@ -250,20 +250,21 @@ struct SizeClassAllocator32LocalCache { ...@@ -250,20 +250,21 @@ struct SizeClassAllocator32LocalCache {
return true; return true;
} }
NOINLINE void Drain(SizeClassAllocator *allocator, uptr class_id) { NOINLINE void Drain(PerClass *c, SizeClassAllocator *allocator,
InitCache(); uptr class_id) {
PerClass *c = &per_class_[class_id]; const uptr count = Min(c->max_count / 2, c->count);
uptr cnt = Min(c->max_count / 2, c->count); const uptr first_idx_to_drain = c->count - count;
uptr first_idx_to_drain = c->count - cnt;
TransferBatch *b = CreateBatch( TransferBatch *b = CreateBatch(
class_id, allocator, (TransferBatch *)c->batch[first_idx_to_drain]); class_id, allocator, (TransferBatch *)c->batch[first_idx_to_drain]);
// Failure to allocate a batch while releasing memory is non recoverable. // Failure to allocate a batch while releasing memory is non recoverable.
// TODO(alekseys): Figure out how to do it without allocating a new batch. // TODO(alekseys): Figure out how to do it without allocating a new batch.
if (UNLIKELY(!b)) if (UNLIKELY(!b)) {
DieOnFailure::OnOOM(); Report("FATAL: Internal error: %s's allocator failed to allocate a "
b->SetFromArray(allocator->GetRegionBeginBySizeClass(class_id), "transfer batch.\n", SanitizerToolName);
&c->batch[first_idx_to_drain], cnt); Die();
c->count -= cnt; }
b->SetFromArray(&c->batch[first_idx_to_drain], count);
c->count -= count;
allocator->DeallocateBatch(&stats_, class_id, b); allocator->DeallocateBatch(&stats_, class_id, b);
} }
}; };
...@@ -61,9 +61,9 @@ class SizeClassAllocator32 { ...@@ -61,9 +61,9 @@ class SizeClassAllocator32 {
struct TransferBatch { struct TransferBatch {
static const uptr kMaxNumCached = SizeClassMap::kMaxNumCachedHint - 2; static const uptr kMaxNumCached = SizeClassMap::kMaxNumCachedHint - 2;
void SetFromArray(uptr region_beg_unused, void *batch[], uptr count) { void SetFromArray(void *batch[], uptr count) {
DCHECK_LE(count, kMaxNumCached);
count_ = count; count_ = count;
CHECK_LE(count_, kMaxNumCached);
for (uptr i = 0; i < count; i++) for (uptr i = 0; i < count; i++)
batch_[i] = batch[i]; batch_[i] = batch[i];
} }
...@@ -71,9 +71,9 @@ class SizeClassAllocator32 { ...@@ -71,9 +71,9 @@ class SizeClassAllocator32 {
void Clear() { count_ = 0; } void Clear() { count_ = 0; }
void Add(void *ptr) { void Add(void *ptr) {
batch_[count_++] = ptr; batch_[count_++] = ptr;
CHECK_LE(count_, kMaxNumCached); DCHECK_LE(count_, kMaxNumCached);
} }
void CopyToArray(void *to_batch[]) { void CopyToArray(void *to_batch[]) const {
for (uptr i = 0, n = Count(); i < n; i++) for (uptr i = 0, n = Count(); i < n; i++)
to_batch[i] = batch_[i]; to_batch[i] = batch_[i];
} }
...@@ -82,8 +82,8 @@ class SizeClassAllocator32 { ...@@ -82,8 +82,8 @@ class SizeClassAllocator32 {
static uptr AllocationSizeRequiredForNElements(uptr n) { static uptr AllocationSizeRequiredForNElements(uptr n) {
return sizeof(uptr) * 2 + sizeof(void *) * n; return sizeof(uptr) * 2 + sizeof(void *) * n;
} }
static uptr MaxCached(uptr class_id) { static uptr MaxCached(uptr size) {
return Min(kMaxNumCached, SizeClassMap::MaxCachedHint(class_id)); return Min(kMaxNumCached, SizeClassMap::MaxCachedHint(size));
} }
TransferBatch *next; TransferBatch *next;
...@@ -106,7 +106,7 @@ class SizeClassAllocator32 { ...@@ -106,7 +106,7 @@ class SizeClassAllocator32 {
typedef SizeClassAllocator32LocalCache<ThisT> AllocatorCache; typedef SizeClassAllocator32LocalCache<ThisT> AllocatorCache;
void Init(s32 release_to_os_interval_ms) { void Init(s32 release_to_os_interval_ms) {
possible_regions.TestOnlyInit(); possible_regions.Init();
internal_memset(size_class_info_array, 0, sizeof(size_class_info_array)); internal_memset(size_class_info_array, 0, sizeof(size_class_info_array));
} }
...@@ -118,8 +118,12 @@ class SizeClassAllocator32 { ...@@ -118,8 +118,12 @@ class SizeClassAllocator32 {
// This is empty here. Currently only implemented in 64-bit allocator. // This is empty here. Currently only implemented in 64-bit allocator.
} }
void ForceReleaseToOS() {
// Currently implemented in 64-bit allocator only.
}
void *MapWithCallback(uptr size) { void *MapWithCallback(uptr size) {
void *res = MmapOrDie(size, "SizeClassAllocator32"); void *res = MmapOrDie(size, PrimaryAllocatorName);
MapUnmapCallback().OnMap((uptr)res, size); MapUnmapCallback().OnMap((uptr)res, size);
return res; return res;
} }
...@@ -147,13 +151,14 @@ class SizeClassAllocator32 { ...@@ -147,13 +151,14 @@ class SizeClassAllocator32 {
NOINLINE TransferBatch *AllocateBatch(AllocatorStats *stat, AllocatorCache *c, NOINLINE TransferBatch *AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
uptr class_id) { uptr class_id) {
CHECK_LT(class_id, kNumClasses); DCHECK_LT(class_id, kNumClasses);
SizeClassInfo *sci = GetSizeClassInfo(class_id); SizeClassInfo *sci = GetSizeClassInfo(class_id);
SpinMutexLock l(&sci->mutex); SpinMutexLock l(&sci->mutex);
if (sci->free_list.empty() && if (sci->free_list.empty()) {
UNLIKELY(!PopulateFreeList(stat, c, sci, class_id))) if (UNLIKELY(!PopulateFreeList(stat, c, sci, class_id)))
return nullptr; return nullptr;
CHECK(!sci->free_list.empty()); DCHECK(!sci->free_list.empty());
}
TransferBatch *b = sci->free_list.front(); TransferBatch *b = sci->free_list.front();
sci->free_list.pop_front(); sci->free_list.pop_front();
return b; return b;
...@@ -161,15 +166,13 @@ class SizeClassAllocator32 { ...@@ -161,15 +166,13 @@ class SizeClassAllocator32 {
NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id, NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id,
TransferBatch *b) { TransferBatch *b) {
CHECK_LT(class_id, kNumClasses); DCHECK_LT(class_id, kNumClasses);
CHECK_GT(b->Count(), 0); CHECK_GT(b->Count(), 0);
SizeClassInfo *sci = GetSizeClassInfo(class_id); SizeClassInfo *sci = GetSizeClassInfo(class_id);
SpinMutexLock l(&sci->mutex); SpinMutexLock l(&sci->mutex);
sci->free_list.push_front(b); sci->free_list.push_front(b);
} }
uptr GetRegionBeginBySizeClass(uptr class_id) { return 0; }
bool PointerIsMine(const void *p) { bool PointerIsMine(const void *p) {
uptr mem = reinterpret_cast<uptr>(p); uptr mem = reinterpret_cast<uptr>(p);
if (mem < kSpaceBeg || mem >= kSpaceBeg + kSpaceSize) if (mem < kSpaceBeg || mem >= kSpaceBeg + kSpaceSize)
...@@ -245,12 +248,9 @@ class SizeClassAllocator32 { ...@@ -245,12 +248,9 @@ class SizeClassAllocator32 {
} }
} }
void PrintStats() { void PrintStats() {}
}
static uptr AdditionalSize() { static uptr AdditionalSize() { return 0; }
return 0;
}
typedef SizeClassMap SizeClassMapT; typedef SizeClassMap SizeClassMapT;
static const uptr kNumClasses = SizeClassMap::kNumClasses; static const uptr kNumClasses = SizeClassMap::kNumClasses;
...@@ -259,16 +259,15 @@ class SizeClassAllocator32 { ...@@ -259,16 +259,15 @@ class SizeClassAllocator32 {
static const uptr kRegionSize = 1 << kRegionSizeLog; static const uptr kRegionSize = 1 << kRegionSizeLog;
static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize; static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize;
struct SizeClassInfo { struct ALIGNED(SANITIZER_CACHE_LINE_SIZE) SizeClassInfo {
SpinMutex mutex; StaticSpinMutex mutex;
IntrusiveList<TransferBatch> free_list; IntrusiveList<TransferBatch> free_list;
char padding[kCacheLineSize - sizeof(uptr) - u32 rand_state;
sizeof(IntrusiveList<TransferBatch>)];
}; };
COMPILER_CHECK(sizeof(SizeClassInfo) == kCacheLineSize); COMPILER_CHECK(sizeof(SizeClassInfo) % kCacheLineSize == 0);
uptr ComputeRegionId(uptr mem) { uptr ComputeRegionId(uptr mem) {
uptr res = mem >> kRegionSizeLog; const uptr res = mem >> kRegionSizeLog;
CHECK_LT(res, kNumPossibleRegions); CHECK_LT(res, kNumPossibleRegions);
return res; return res;
} }
...@@ -278,9 +277,9 @@ class SizeClassAllocator32 { ...@@ -278,9 +277,9 @@ class SizeClassAllocator32 {
} }
uptr AllocateRegion(AllocatorStats *stat, uptr class_id) { uptr AllocateRegion(AllocatorStats *stat, uptr class_id) {
CHECK_LT(class_id, kNumClasses); DCHECK_LT(class_id, kNumClasses);
uptr res = reinterpret_cast<uptr>(MmapAlignedOrDieOnFatalError( const uptr res = reinterpret_cast<uptr>(MmapAlignedOrDieOnFatalError(
kRegionSize, kRegionSize, "SizeClassAllocator32")); kRegionSize, kRegionSize, PrimaryAllocatorName));
if (UNLIKELY(!res)) if (UNLIKELY(!res))
return 0; return 0;
MapUnmapCallback().OnMap(res, kRegionSize); MapUnmapCallback().OnMap(res, kRegionSize);
...@@ -291,33 +290,66 @@ class SizeClassAllocator32 { ...@@ -291,33 +290,66 @@ class SizeClassAllocator32 {
} }
SizeClassInfo *GetSizeClassInfo(uptr class_id) { SizeClassInfo *GetSizeClassInfo(uptr class_id) {
CHECK_LT(class_id, kNumClasses); DCHECK_LT(class_id, kNumClasses);
return &size_class_info_array[class_id]; return &size_class_info_array[class_id];
} }
bool PopulateFreeList(AllocatorStats *stat, AllocatorCache *c, bool PopulateBatches(AllocatorCache *c, SizeClassInfo *sci, uptr class_id,
SizeClassInfo *sci, uptr class_id) { TransferBatch **current_batch, uptr max_count,
uptr size = ClassIdToSize(class_id); uptr *pointers_array, uptr count) {
uptr reg = AllocateRegion(stat, class_id); // If using a separate class for batches, we do not need to shuffle it.
if (UNLIKELY(!reg)) if (kRandomShuffleChunks && (!kUseSeparateSizeClassForBatch ||
return false; class_id != SizeClassMap::kBatchClassID))
uptr n_chunks = kRegionSize / (size + kMetadataSize); RandomShuffle(pointers_array, count, &sci->rand_state);
uptr max_count = TransferBatch::MaxCached(class_id); TransferBatch *b = *current_batch;
CHECK_GT(max_count, 0); for (uptr i = 0; i < count; i++) {
TransferBatch *b = nullptr;
for (uptr i = reg; i < reg + n_chunks * size; i += size) {
if (!b) { if (!b) {
b = c->CreateBatch(class_id, this, (TransferBatch*)i); b = c->CreateBatch(class_id, this, (TransferBatch*)pointers_array[i]);
if (UNLIKELY(!b)) if (UNLIKELY(!b))
return false; return false;
b->Clear(); b->Clear();
} }
b->Add((void*)i); b->Add((void*)pointers_array[i]);
if (b->Count() == max_count) { if (b->Count() == max_count) {
sci->free_list.push_back(b); sci->free_list.push_back(b);
b = nullptr; b = nullptr;
} }
} }
*current_batch = b;
return true;
}
bool PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
SizeClassInfo *sci, uptr class_id) {
const uptr region = AllocateRegion(stat, class_id);
if (UNLIKELY(!region))
return false;
if (kRandomShuffleChunks)
if (UNLIKELY(sci->rand_state == 0))
// The random state is initialized from ASLR (PIE) and time.
sci->rand_state = reinterpret_cast<uptr>(sci) ^ NanoTime();
const uptr size = ClassIdToSize(class_id);
const uptr n_chunks = kRegionSize / (size + kMetadataSize);
const uptr max_count = TransferBatch::MaxCached(size);
DCHECK_GT(max_count, 0);
TransferBatch *b = nullptr;
constexpr uptr kShuffleArraySize = 48;
uptr shuffle_array[kShuffleArraySize];
uptr count = 0;
for (uptr i = region; i < region + n_chunks * size; i += size) {
shuffle_array[count++] = i;
if (count == kShuffleArraySize) {
if (UNLIKELY(!PopulateBatches(c, sci, class_id, &b, max_count,
shuffle_array, count)))
return false;
count = 0;
}
}
if (count) {
if (UNLIKELY(!PopulateBatches(c, sci, class_id, &b, max_count,
shuffle_array, count)))
return false;
}
if (b) { if (b) {
CHECK_GT(b->Count(), 0); CHECK_GT(b->Count(), 0);
sci->free_list.push_back(b); sci->free_list.push_back(b);
......
//===-- sanitizer_allocator_report.cc ---------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// \file
/// Shared allocator error reporting for ThreadSanitizer, MemorySanitizer, etc.
///
//===----------------------------------------------------------------------===//
#include "sanitizer_allocator.h"
#include "sanitizer_allocator_report.h"
#include "sanitizer_common.h"
#include "sanitizer_report_decorator.h"
namespace __sanitizer {
class ScopedAllocatorErrorReport {
public:
ScopedAllocatorErrorReport(const char *error_summary_,
const StackTrace *stack_)
: error_summary(error_summary_),
stack(stack_) {
Printf("%s", d.Error());
}
~ScopedAllocatorErrorReport() {
Printf("%s", d.Default());
stack->Print();
PrintHintAllocatorCannotReturnNull();
ReportErrorSummary(error_summary, stack);
}
private:
ScopedErrorReportLock lock;
const char *error_summary;
const StackTrace* const stack;
const SanitizerCommonDecorator d;
};
void NORETURN ReportCallocOverflow(uptr count, uptr size,
const StackTrace *stack) {
{
ScopedAllocatorErrorReport report("calloc-overflow", stack);
Report("ERROR: %s: calloc parameters overflow: count * size (%zd * %zd) "
"cannot be represented in type size_t\n", SanitizerToolName, count,
size);
}
Die();
}
void NORETURN ReportPvallocOverflow(uptr size, const StackTrace *stack) {
{
ScopedAllocatorErrorReport report("pvalloc-overflow", stack);
Report("ERROR: %s: pvalloc parameters overflow: size 0x%zx rounded up to "
"system page size 0x%zx cannot be represented in type size_t\n",
SanitizerToolName, size, GetPageSizeCached());
}
Die();
}
void NORETURN ReportInvalidAllocationAlignment(uptr alignment,
const StackTrace *stack) {
{
ScopedAllocatorErrorReport report("invalid-allocation-alignment", stack);
Report("ERROR: %s: invalid allocation alignment: %zd, alignment must be a "
"power of two\n", SanitizerToolName, alignment);
}
Die();
}
void NORETURN ReportInvalidAlignedAllocAlignment(uptr size, uptr alignment,
const StackTrace *stack) {
{
ScopedAllocatorErrorReport report("invalid-aligned-alloc-alignment", stack);
#if SANITIZER_POSIX
Report("ERROR: %s: invalid alignment requested in "
"aligned_alloc: %zd, alignment must be a power of two and the "
"requested size 0x%zx must be a multiple of alignment\n",
SanitizerToolName, alignment, size);
#else
Report("ERROR: %s: invalid alignment requested in aligned_alloc: %zd, "
"the requested size 0x%zx must be a multiple of alignment\n",
SanitizerToolName, alignment, size);
#endif
}
Die();
}
void NORETURN ReportInvalidPosixMemalignAlignment(uptr alignment,
const StackTrace *stack) {
{
ScopedAllocatorErrorReport report("invalid-posix-memalign-alignment",
stack);
Report("ERROR: %s: invalid alignment requested in "
"posix_memalign: %zd, alignment must be a power of two and a "
"multiple of sizeof(void*) == %zd\n", SanitizerToolName, alignment,
sizeof(void*)); // NOLINT
}
Die();
}
void NORETURN ReportAllocationSizeTooBig(uptr user_size, uptr max_size,
const StackTrace *stack) {
{
ScopedAllocatorErrorReport report("allocation-size-too-big", stack);
Report("ERROR: %s: requested allocation size 0x%zx exceeds maximum "
"supported size of 0x%zx\n", SanitizerToolName, user_size, max_size);
}
Die();
}
void NORETURN ReportOutOfMemory(uptr requested_size, const StackTrace *stack) {
{
ScopedAllocatorErrorReport report("out-of-memory", stack);
Report("ERROR: %s: allocator is out of memory trying to allocate 0x%zx "
"bytes\n", SanitizerToolName, requested_size);
}
Die();
}
} // namespace __sanitizer
This source diff could not be displayed because it is too large. You can view the blob instead.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment