Commit dee5ea7a by Kostya Serebryany Committed by Kostya Serebryany

libsanitizer merge from upstream r209283

From-SVN: r210743
parent b9559136
2014-05-22 Kostya Serebryany <kcc@google.com>
* c-c++-common/tsan/mutexset1.c: Update the test to match
upstream r209283.
* g++.dg/asan/symbolize-callback-1.C: Delete the deprecated test.
2014-05-21 Marek Polacek <polacek@redhat.com>
PR sanitizer/61272
......
......@@ -35,6 +35,6 @@ int main() {
/* { dg-output "WARNING: ThreadSanitizer: data race.*(\n|\r\n|\r)" } */
/* { dg-output " Read of size 4 at 0x\[0-9a-f\]+ by thread T1 \\(mutexes: write M\[0-9\]\\):.*" } */
/* { dg-output " Previous write of size 4 at 0x\[0-9a-f\]+ by thread T2:.*" } */
/* { dg-output " Mutex M\[0-9\] created at:.*" } */
/* { dg-output " Mutex M\[0-9\] \\(0x.*\\) created at:.*" } */
/* { dg-output " #0 pthread_mutex_init.*" } */
/* { dg-output " #1 main (.*mutexset1.c|\\?{2}):\[0-9]+.*" } */
// { dg-do run }
// { dg-skip-if "" { *-*-* } { "*" } { "-O2" } }
// { dg-options "-fno-builtin-malloc -fno-builtin-free" }
// { dg-shouldfail "asan" }
#include <stdio.h>
#include <stdlib.h>
extern "C"
bool __asan_symbolize(const void *, char *out_buffer, int out_size) {
snprintf(out_buffer, out_size, "MySymbolizer");
return true;
}
int main() {
char *x = (char*)malloc(10);
free(x);
return x[5];
}
// { dg-output "MySymbolizer" }
2014-05-22 Kostya Serebryany <kcc@google.com>
* All source files: Merge from upstream r209283.
* asan/Makefile.am (asan_files): Added new files.
* asan/Makefile.in: Regenerate.
* tsan/Makefile.am (tsan_files): Added new files.
* tsan/Makefile.in: Regenerate.
* sanitizer_common/Makefile.am (sanitizer_common_files): Added new files.
* sanitizer_common/Makefile.in: Regenerate.
2014-05-14 Yury Gribov <y.gribov@samsung.com>
PR sanitizer/61100
......
196489
209283
The first line of this file holds the svn revision number of the
last merge done from the master library sources.
......@@ -15,6 +15,7 @@ toolexeclib_LTLIBRARIES = libasan.la
nodist_toolexeclib_HEADERS = libasan_preinit.o
asan_files = \
asan_activation.cc \
asan_allocator2.cc \
asan_dll_thunk.cc \
asan_fake_stack.cc \
......
......@@ -88,12 +88,13 @@ libasan_la_DEPENDENCIES = \
$(top_builddir)/sanitizer_common/libsanitizer_common.la \
$(top_builddir)/lsan/libsanitizer_lsan.la $(am__append_2) \
$(am__append_3) $(am__DEPENDENCIES_1)
am__objects_1 = asan_allocator2.lo asan_dll_thunk.lo \
asan_fake_stack.lo asan_globals.lo asan_interceptors.lo \
asan_linux.lo asan_mac.lo asan_malloc_linux.lo \
asan_malloc_mac.lo asan_malloc_win.lo asan_new_delete.lo \
asan_poisoning.lo asan_posix.lo asan_report.lo asan_rtl.lo \
asan_stack.lo asan_stats.lo asan_thread.lo asan_win.lo
am__objects_1 = asan_activation.lo asan_allocator2.lo \
asan_dll_thunk.lo asan_fake_stack.lo asan_globals.lo \
asan_interceptors.lo asan_linux.lo asan_mac.lo \
asan_malloc_linux.lo asan_malloc_mac.lo asan_malloc_win.lo \
asan_new_delete.lo asan_poisoning.lo asan_posix.lo \
asan_report.lo asan_rtl.lo asan_stack.lo asan_stats.lo \
asan_thread.lo asan_win.lo
am_libasan_la_OBJECTS = $(am__objects_1)
libasan_la_OBJECTS = $(am_libasan_la_OBJECTS)
libasan_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \
......@@ -272,6 +273,7 @@ ACLOCAL_AMFLAGS = -I $(top_srcdir) -I $(top_srcdir)/config
toolexeclib_LTLIBRARIES = libasan.la
nodist_toolexeclib_HEADERS = libasan_preinit.o
asan_files = \
asan_activation.cc \
asan_allocator2.cc \
asan_dll_thunk.cc \
asan_fake_stack.cc \
......@@ -412,6 +414,7 @@ mostlyclean-compile:
distclean-compile:
-rm -f *.tab.c
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_activation.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_allocator2.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_dll_thunk.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_fake_stack.Plo@am__quote@
......
//===-- asan_activation.cc --------------------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// ASan activation/deactivation logic.
//===----------------------------------------------------------------------===//
#include "asan_activation.h"
#include "asan_allocator.h"
#include "asan_flags.h"
#include "asan_internal.h"
#include "sanitizer_common/sanitizer_flags.h"
namespace __asan {
static struct AsanDeactivatedFlags {
int quarantine_size;
int max_redzone;
int malloc_context_size;
bool poison_heap;
} asan_deactivated_flags;
static bool asan_is_deactivated;
void AsanStartDeactivated() {
VReport(1, "Deactivating ASan\n");
// Save flag values.
asan_deactivated_flags.quarantine_size = flags()->quarantine_size;
asan_deactivated_flags.max_redzone = flags()->max_redzone;
asan_deactivated_flags.poison_heap = flags()->poison_heap;
asan_deactivated_flags.malloc_context_size =
common_flags()->malloc_context_size;
flags()->quarantine_size = 0;
flags()->max_redzone = 16;
flags()->poison_heap = false;
common_flags()->malloc_context_size = 0;
asan_is_deactivated = true;
}
void AsanActivate() {
if (!asan_is_deactivated) return;
VReport(1, "Activating ASan\n");
// Restore flag values.
// FIXME: this is not atomic, and there may be other threads alive.
flags()->quarantine_size = asan_deactivated_flags.quarantine_size;
flags()->max_redzone = asan_deactivated_flags.max_redzone;
flags()->poison_heap = asan_deactivated_flags.poison_heap;
common_flags()->malloc_context_size =
asan_deactivated_flags.malloc_context_size;
ParseExtraActivationFlags();
ReInitializeAllocator();
asan_is_deactivated = false;
VReport(
1,
"quarantine_size %d, max_redzone %d, poison_heap %d, malloc_context_size "
"%d\n",
flags()->quarantine_size, flags()->max_redzone, flags()->poison_heap,
common_flags()->malloc_context_size);
}
} // namespace __asan
//===-- asan_activation.h ---------------------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// ASan activation/deactivation logic.
//===----------------------------------------------------------------------===//
#ifndef ASAN_ACTIVATION_H
#define ASAN_ACTIVATION_H
namespace __asan {
void AsanStartDeactivated();
void AsanActivate();
} // namespace __asan
#endif // ASAN_ACTIVATION_H
......@@ -15,6 +15,7 @@
#include "asan_internal.h"
#include "asan_interceptors.h"
#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_list.h"
namespace __asan {
......@@ -29,6 +30,7 @@ static const uptr kNumberOfSizeClasses = 255;
struct AsanChunk;
void InitializeAllocator();
void ReInitializeAllocator();
class AsanChunkView {
public:
......@@ -40,6 +42,7 @@ class AsanChunkView {
uptr UsedSize(); // Size requested by the user.
uptr AllocTid();
uptr FreeTid();
bool Eq(const AsanChunkView &c) const { return chunk_ == c.chunk_; }
void GetAllocStack(StackTrace *stack);
void GetFreeStack(StackTrace *stack);
bool AddrIsInside(uptr addr, uptr access_size, sptr *offset) {
......@@ -88,9 +91,46 @@ class AsanChunkFifoList: public IntrusiveList<AsanChunk> {
uptr size_;
};
struct AsanMapUnmapCallback {
void OnMap(uptr p, uptr size) const;
void OnUnmap(uptr p, uptr size) const;
};
#if SANITIZER_CAN_USE_ALLOCATOR64
# if defined(__powerpc64__)
const uptr kAllocatorSpace = 0xa0000000000ULL;
const uptr kAllocatorSize = 0x20000000000ULL; // 2T.
# else
const uptr kAllocatorSpace = 0x600000000000ULL;
const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
# endif
typedef DefaultSizeClassMap SizeClassMap;
typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0 /*metadata*/,
SizeClassMap, AsanMapUnmapCallback> PrimaryAllocator;
#else // Fallback to SizeClassAllocator32.
static const uptr kRegionSizeLog = 20;
static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
# if SANITIZER_WORDSIZE == 32
typedef FlatByteMap<kNumRegions> ByteMap;
# elif SANITIZER_WORDSIZE == 64
typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap;
# endif
typedef CompactSizeClassMap SizeClassMap;
typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE, 16,
SizeClassMap, kRegionSizeLog,
ByteMap,
AsanMapUnmapCallback> PrimaryAllocator;
#endif // SANITIZER_CAN_USE_ALLOCATOR64
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
typedef LargeMmapAllocator<AsanMapUnmapCallback> SecondaryAllocator;
typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
SecondaryAllocator> Allocator;
struct AsanThreadLocalMallocStorage {
uptr quarantine_cache[16];
uptr allocator2_cache[96 * (512 * 8 + 16)]; // Opaque.
AllocatorCache allocator2_cache;
void CommitBack();
private:
// These objects are allocated via mmap() and are zero-initialized.
......
......@@ -17,8 +17,8 @@
#include "asan_mapping.h"
#include "asan_poisoning.h"
#include "asan_report.h"
#include "asan_stack.h"
#include "asan_thread.h"
#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_list.h"
......@@ -28,65 +28,30 @@
namespace __asan {
struct AsanMapUnmapCallback {
void OnMap(uptr p, uptr size) const {
PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
// Statistics.
AsanStats &thread_stats = GetCurrentThreadStats();
thread_stats.mmaps++;
thread_stats.mmaped += size;
}
void OnUnmap(uptr p, uptr size) const {
PoisonShadow(p, size, 0);
// We are about to unmap a chunk of user memory.
// Mark the corresponding shadow memory as not needed.
// Since asan's mapping is compacting, the shadow chunk may be
// not page-aligned, so we only flush the page-aligned portion.
uptr page_size = GetPageSizeCached();
uptr shadow_beg = RoundUpTo(MemToShadow(p), page_size);
uptr shadow_end = RoundDownTo(MemToShadow(p + size), page_size);
FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg);
// Statistics.
AsanStats &thread_stats = GetCurrentThreadStats();
thread_stats.munmaps++;
thread_stats.munmaped += size;
}
};
#if SANITIZER_WORDSIZE == 64
#if defined(__powerpc64__)
const uptr kAllocatorSpace = 0xa0000000000ULL;
const uptr kAllocatorSize = 0x20000000000ULL; // 2T.
#else
const uptr kAllocatorSpace = 0x600000000000ULL;
const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
#endif
typedef DefaultSizeClassMap SizeClassMap;
typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0 /*metadata*/,
SizeClassMap, AsanMapUnmapCallback> PrimaryAllocator;
#elif SANITIZER_WORDSIZE == 32
static const u64 kAddressSpaceSize = 1ULL << 32;
typedef CompactSizeClassMap SizeClassMap;
static const uptr kRegionSizeLog = 20;
static const uptr kFlatByteMapSize = kAddressSpaceSize >> kRegionSizeLog;
typedef SizeClassAllocator32<0, kAddressSpaceSize, 16,
SizeClassMap, kRegionSizeLog,
FlatByteMap<kFlatByteMapSize>,
AsanMapUnmapCallback> PrimaryAllocator;
#endif
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
typedef LargeMmapAllocator<AsanMapUnmapCallback> SecondaryAllocator;
typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
SecondaryAllocator> Allocator;
void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const {
PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
// Statistics.
AsanStats &thread_stats = GetCurrentThreadStats();
thread_stats.mmaps++;
thread_stats.mmaped += size;
}
void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const {
PoisonShadow(p, size, 0);
// We are about to unmap a chunk of user memory.
// Mark the corresponding shadow memory as not needed.
FlushUnneededASanShadowMemory(p, size);
// Statistics.
AsanStats &thread_stats = GetCurrentThreadStats();
thread_stats.munmaps++;
thread_stats.munmaped += size;
}
// We can not use THREADLOCAL because it is not supported on some of the
// platforms we care about (OSX 10.6, Android).
// static THREADLOCAL AllocatorCache cache;
AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) {
CHECK(ms);
CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator2_cache));
return reinterpret_cast<AllocatorCache *>(ms->allocator2_cache);
return &ms->allocator2_cache;
}
static Allocator allocator;
......@@ -132,7 +97,8 @@ static uptr ComputeRZLog(uptr user_requested_size) {
user_requested_size <= (1 << 14) - 256 ? 4 :
user_requested_size <= (1 << 15) - 512 ? 5 :
user_requested_size <= (1 << 16) - 1024 ? 6 : 7;
return Max(rz_log, RZSize2Log(flags()->redzone));
return Min(Max(rz_log, RZSize2Log(flags()->redzone)),
RZSize2Log(flags()->max_redzone));
}
// The memory chunk allocated from the underlying allocator looks like this:
......@@ -307,10 +273,14 @@ void InitializeAllocator() {
quarantine.Init((uptr)flags()->quarantine_size, kMaxThreadLocalQuarantine);
}
void ReInitializeAllocator() {
quarantine.Init((uptr)flags()->quarantine_size, kMaxThreadLocalQuarantine);
}
static void *Allocate(uptr size, uptr alignment, StackTrace *stack,
AllocType alloc_type, bool can_fill) {
if (!asan_inited)
__asan_init();
if (UNLIKELY(!asan_inited))
AsanInitFromRtl();
Flags &fl = *flags();
CHECK(stack);
const uptr min_alignment = SHADOW_GRANULARITY;
......@@ -355,6 +325,16 @@ static void *Allocate(uptr size, uptr alignment, StackTrace *stack,
AllocatorCache *cache = &fallback_allocator_cache;
allocated = allocator.Allocate(cache, needed_size, 8, false);
}
if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && flags()->poison_heap) {
// Heap poisoning is enabled, but the allocator provides an unpoisoned
// chunk. This is possible if flags()->poison_heap was disabled for some
// time, for example, due to flags()->start_disabled.
// Anyway, poison the block before using it for anything else.
uptr allocated_size = allocator.GetActuallyAllocatedSize(allocated);
PoisonShadow((uptr)allocated, allocated_size, kAsanHeapLeftRedzoneMagic);
}
uptr alloc_beg = reinterpret_cast<uptr>(allocated);
uptr alloc_end = alloc_beg + needed_size;
uptr beg_plus_redzone = alloc_beg + rz_size;
......@@ -708,8 +688,12 @@ uptr PointsIntoChunk(void* p) {
__asan::AsanChunk *m = __asan::GetAsanChunkByAddrFastLocked(addr);
if (!m) return 0;
uptr chunk = m->Beg();
if ((m->chunk_state == __asan::CHUNK_ALLOCATED) &&
m->AddrIsInside(addr, /*locked_version=*/true))
if (m->chunk_state != __asan::CHUNK_ALLOCATED)
return 0;
if (m->AddrIsInside(addr, /*locked_version=*/true))
return chunk;
if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(/*locked_version*/ true),
addr))
return chunk;
return 0;
}
......@@ -778,7 +762,7 @@ uptr __asan_get_estimated_allocated_size(uptr size) {
return size;
}
bool __asan_get_ownership(const void *p) {
int __asan_get_ownership(const void *p) {
uptr ptr = reinterpret_cast<uptr>(p);
return (AllocationSize(ptr) > 0);
}
......
......@@ -18,6 +18,7 @@
// Using #ifdef rather than relying on Makefiles etc.
// simplifies the build procedure.
#ifdef ASAN_DLL_THUNK
#include "sanitizer_common/sanitizer_interception.h"
// ----------------- Helper functions and macros --------------------- {{{1
extern "C" {
......@@ -113,7 +114,50 @@ static void *getRealProcAddressOrDie(const char *name) {
}
// }}}
// --------- Interface interception helper functions and macros ----------- {{{1
// We need to intercept the ASan interface exported by the DLL thunk and forward
// all the functions to the runtime in the main module.
// However, we don't want to keep two lists of interface functions.
// To avoid that, the list of interface functions should be defined using the
// INTERFACE_FUNCTION macro. Then, all the interface can be intercepted at once
// by calling INTERCEPT_ASAN_INTERFACE().
// Use macro+template magic to automatically generate the list of interface
// functions. Each interface function at line LINE defines a template class
// with a static InterfaceInteceptor<LINE>::Execute() method intercepting the
// function. The default implementation of InterfaceInteceptor<LINE> is to call
// the Execute() method corresponding to the previous line.
template<int LINE>
struct InterfaceInteceptor {
static void Execute() { InterfaceInteceptor<LINE-1>::Execute(); }
};
// There shouldn't be any interface function with negative line number.
template<>
struct InterfaceInteceptor<0> {
static void Execute() {}
};
#define INTERFACE_FUNCTION(name) \
extern "C" void name() { __debugbreak(); } \
template<> struct InterfaceInteceptor<__LINE__> { \
static void Execute() { \
void *wrapper = getRealProcAddressOrDie(#name); \
if (!__interception::OverrideFunction((uptr)name, (uptr)wrapper, 0)) \
abort(); \
InterfaceInteceptor<__LINE__-1>::Execute(); \
} \
};
// INTERCEPT_ASAN_INTERFACE must be used after the last INTERFACE_FUNCTION.
#define INTERCEPT_ASAN_INTERFACE InterfaceInteceptor<__LINE__>::Execute
static void InterceptASanInterface();
// }}}
// ----------------- ASan own interface functions --------------------
// Don't use the INTERFACE_FUNCTION machinery for this function as we actually
// want to call it in the __asan_init interceptor.
WRAP_W_V(__asan_should_detect_stack_use_after_return)
extern "C" {
......@@ -123,54 +167,75 @@ extern "C" {
// __asan_option_detect_stack_use_after_return afterwards.
void __asan_init_v3() {
typedef void (*fntype)();
static fntype fn = (fntype)getRealProcAddressOrDie("__asan_init_v3");
static fntype fn = 0;
if (fn) return;
fn = (fntype)getRealProcAddressOrDie("__asan_init_v3");
fn();
__asan_option_detect_stack_use_after_return =
(__asan_should_detect_stack_use_after_return() != 0);
InterceptASanInterface();
}
}
WRAP_V_V(__asan_handle_no_return)
WRAP_V_W(__asan_report_store1)
WRAP_V_W(__asan_report_store2)
WRAP_V_W(__asan_report_store4)
WRAP_V_W(__asan_report_store8)
WRAP_V_W(__asan_report_store16)
WRAP_V_WW(__asan_report_store_n)
WRAP_V_W(__asan_report_load1)
WRAP_V_W(__asan_report_load2)
WRAP_V_W(__asan_report_load4)
WRAP_V_W(__asan_report_load8)
WRAP_V_W(__asan_report_load16)
WRAP_V_WW(__asan_report_load_n)
WRAP_V_WW(__asan_register_globals)
WRAP_V_WW(__asan_unregister_globals)
WRAP_W_WW(__asan_stack_malloc_0)
WRAP_W_WW(__asan_stack_malloc_1)
WRAP_W_WW(__asan_stack_malloc_2)
WRAP_W_WW(__asan_stack_malloc_3)
WRAP_W_WW(__asan_stack_malloc_4)
WRAP_W_WW(__asan_stack_malloc_5)
WRAP_W_WW(__asan_stack_malloc_6)
WRAP_W_WW(__asan_stack_malloc_7)
WRAP_W_WW(__asan_stack_malloc_8)
WRAP_W_WW(__asan_stack_malloc_9)
WRAP_W_WW(__asan_stack_malloc_10)
WRAP_V_WWW(__asan_stack_free_0)
WRAP_V_WWW(__asan_stack_free_1)
WRAP_V_WWW(__asan_stack_free_2)
WRAP_V_WWW(__asan_stack_free_4)
WRAP_V_WWW(__asan_stack_free_5)
WRAP_V_WWW(__asan_stack_free_6)
WRAP_V_WWW(__asan_stack_free_7)
WRAP_V_WWW(__asan_stack_free_8)
WRAP_V_WWW(__asan_stack_free_9)
WRAP_V_WWW(__asan_stack_free_10)
INTERFACE_FUNCTION(__asan_handle_no_return)
INTERFACE_FUNCTION(__asan_report_store1)
INTERFACE_FUNCTION(__asan_report_store2)
INTERFACE_FUNCTION(__asan_report_store4)
INTERFACE_FUNCTION(__asan_report_store8)
INTERFACE_FUNCTION(__asan_report_store16)
INTERFACE_FUNCTION(__asan_report_store_n)
INTERFACE_FUNCTION(__asan_report_load1)
INTERFACE_FUNCTION(__asan_report_load2)
INTERFACE_FUNCTION(__asan_report_load4)
INTERFACE_FUNCTION(__asan_report_load8)
INTERFACE_FUNCTION(__asan_report_load16)
INTERFACE_FUNCTION(__asan_report_load_n)
INTERFACE_FUNCTION(__asan_memcpy);
INTERFACE_FUNCTION(__asan_memset);
INTERFACE_FUNCTION(__asan_memmove);
INTERFACE_FUNCTION(__asan_register_globals)
INTERFACE_FUNCTION(__asan_unregister_globals)
INTERFACE_FUNCTION(__asan_before_dynamic_init)
INTERFACE_FUNCTION(__asan_after_dynamic_init)
INTERFACE_FUNCTION(__asan_poison_stack_memory)
INTERFACE_FUNCTION(__asan_unpoison_stack_memory)
INTERFACE_FUNCTION(__asan_poison_memory_region)
INTERFACE_FUNCTION(__asan_unpoison_memory_region)
INTERFACE_FUNCTION(__asan_get_current_fake_stack)
INTERFACE_FUNCTION(__asan_addr_is_in_fake_stack)
INTERFACE_FUNCTION(__asan_stack_malloc_0)
INTERFACE_FUNCTION(__asan_stack_malloc_1)
INTERFACE_FUNCTION(__asan_stack_malloc_2)
INTERFACE_FUNCTION(__asan_stack_malloc_3)
INTERFACE_FUNCTION(__asan_stack_malloc_4)
INTERFACE_FUNCTION(__asan_stack_malloc_5)
INTERFACE_FUNCTION(__asan_stack_malloc_6)
INTERFACE_FUNCTION(__asan_stack_malloc_7)
INTERFACE_FUNCTION(__asan_stack_malloc_8)
INTERFACE_FUNCTION(__asan_stack_malloc_9)
INTERFACE_FUNCTION(__asan_stack_malloc_10)
INTERFACE_FUNCTION(__asan_stack_free_0)
INTERFACE_FUNCTION(__asan_stack_free_1)
INTERFACE_FUNCTION(__asan_stack_free_2)
INTERFACE_FUNCTION(__asan_stack_free_4)
INTERFACE_FUNCTION(__asan_stack_free_5)
INTERFACE_FUNCTION(__asan_stack_free_6)
INTERFACE_FUNCTION(__asan_stack_free_7)
INTERFACE_FUNCTION(__asan_stack_free_8)
INTERFACE_FUNCTION(__asan_stack_free_9)
INTERFACE_FUNCTION(__asan_stack_free_10)
// TODO(timurrrr): Add more interface functions on the as-needed basis.
......@@ -190,7 +255,16 @@ WRAP_W_WWW(_realloc_dbg)
WRAP_W_WWW(_recalloc)
WRAP_W_W(_msize)
WRAP_W_W(_expand)
WRAP_W_W(_expand_dbg)
// TODO(timurrrr): Might want to add support for _aligned_* allocation
// functions to detect a bit more bugs. Those functions seem to wrap malloc().
// TODO(timurrrr): Do we need to add _Crt* stuff here? (see asan_malloc_win.cc).
void InterceptASanInterface() {
INTERCEPT_ASAN_INTERFACE();
}
#endif // ASAN_DLL_THUNK
......@@ -40,21 +40,32 @@ FakeStack *FakeStack::Create(uptr stack_size_log) {
stack_size_log = kMinStackSizeLog;
if (stack_size_log > kMaxStackSizeLog)
stack_size_log = kMaxStackSizeLog;
uptr size = RequiredSize(stack_size_log);
FakeStack *res = reinterpret_cast<FakeStack *>(
MmapOrDie(RequiredSize(stack_size_log), "FakeStack"));
flags()->uar_noreserve ? MmapNoReserveOrDie(size, "FakeStack")
: MmapOrDie(size, "FakeStack"));
res->stack_size_log_ = stack_size_log;
if (common_flags()->verbosity) {
u8 *p = reinterpret_cast<u8 *>(res);
Report("T%d: FakeStack created: %p -- %p stack_size_log: %zd \n",
GetCurrentTidOrInvalid(), p,
p + FakeStack::RequiredSize(stack_size_log), stack_size_log);
}
u8 *p = reinterpret_cast<u8 *>(res);
VReport(1, "T%d: FakeStack created: %p -- %p stack_size_log: %zd; "
"mmapped %zdK, noreserve=%d \n",
GetCurrentTidOrInvalid(), p,
p + FakeStack::RequiredSize(stack_size_log), stack_size_log,
size >> 10, flags()->uar_noreserve);
return res;
}
void FakeStack::Destroy() {
void FakeStack::Destroy(int tid) {
PoisonAll(0);
UnmapOrDie(this, RequiredSize(stack_size_log_));
if (common_flags()->verbosity >= 2) {
InternalScopedString str(kNumberOfSizeClasses * 50);
for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++)
str.append("%zd: %zd/%zd; ", class_id, hint_position_[class_id],
NumberOfFrames(stack_size_log(), class_id));
Report("T%d: FakeStack destroyed: %s\n", tid, str.data());
}
uptr size = RequiredSize(stack_size_log_);
FlushUnneededASanShadowMemory(reinterpret_cast<uptr>(this), size);
UnmapOrDie(this, size);
}
void FakeStack::PoisonAll(u8 magic) {
......@@ -91,7 +102,7 @@ FakeFrame *FakeStack::Allocate(uptr stack_size_log, uptr class_id,
return 0; // We are out of fake stack.
}
uptr FakeStack::AddrIsInFakeStack(uptr ptr) {
uptr FakeStack::AddrIsInFakeStack(uptr ptr, uptr *frame_beg, uptr *frame_end) {
uptr stack_size_log = this->stack_size_log();
uptr beg = reinterpret_cast<uptr>(GetFrame(stack_size_log, 0, 0));
uptr end = reinterpret_cast<uptr>(this) + RequiredSize(stack_size_log);
......@@ -101,7 +112,10 @@ uptr FakeStack::AddrIsInFakeStack(uptr ptr) {
CHECK_LE(base, ptr);
CHECK_LT(ptr, base + (1UL << stack_size_log));
uptr pos = (ptr - base) >> (kMinStackFrameSizeLog + class_id);
return base + pos * BytesInSizeClass(class_id);
uptr res = base + pos * BytesInSizeClass(class_id);
*frame_end = res + BytesInSizeClass(class_id);
*frame_beg = res + sizeof(FakeFrame);
return res;
}
void FakeStack::HandleNoReturn() {
......@@ -195,14 +209,15 @@ ALWAYS_INLINE void OnFree(uptr ptr, uptr class_id, uptr size, uptr real_stack) {
} // namespace __asan
// ---------------------- Interface ---------------- {{{1
using namespace __asan;
#define DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(class_id) \
extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr \
__asan_stack_malloc_##class_id(uptr size, uptr real_stack) { \
return __asan::OnMalloc(class_id, size, real_stack); \
return OnMalloc(class_id, size, real_stack); \
} \
extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __asan_stack_free_##class_id( \
uptr ptr, uptr size, uptr real_stack) { \
__asan::OnFree(ptr, class_id, size, real_stack); \
OnFree(ptr, class_id, size, real_stack); \
}
DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(0)
......@@ -216,3 +231,23 @@ DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(7)
DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(8)
DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(9)
DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(10)
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
void *__asan_get_current_fake_stack() { return GetFakeStackFast(); }
SANITIZER_INTERFACE_ATTRIBUTE
void *__asan_addr_is_in_fake_stack(void *fake_stack, void *addr, void **beg,
void **end) {
FakeStack *fs = reinterpret_cast<FakeStack*>(fake_stack);
if (!fs) return 0;
uptr frame_beg, frame_end;
FakeFrame *frame = reinterpret_cast<FakeFrame *>(fs->AddrIsInFakeStack(
reinterpret_cast<uptr>(addr), &frame_beg, &frame_end));
if (!frame) return 0;
if (frame->magic != kCurrentStackFrameMagic)
return 0;
if (beg) *beg = reinterpret_cast<void*>(frame_beg);
if (end) *end = reinterpret_cast<void*>(frame_end);
return reinterpret_cast<void*>(frame->real_stack);
}
} // extern "C"
......@@ -63,7 +63,7 @@ class FakeStack {
// CTOR: create the FakeStack as a single mmap-ed object.
static FakeStack *Create(uptr stack_size_log);
void Destroy();
void Destroy(int tid);
// stack_size_log is at least 15 (stack_size >= 32K).
static uptr SizeRequiredForFlags(uptr stack_size_log) {
......@@ -127,7 +127,11 @@ class FakeStack {
void PoisonAll(u8 magic);
// Return the beginning of the FakeFrame or 0 if the address is not ours.
uptr AddrIsInFakeStack(uptr addr);
uptr AddrIsInFakeStack(uptr addr, uptr *frame_beg, uptr *frame_end);
USED uptr AddrIsInFakeStack(uptr addr) {
uptr t1, t2;
return AddrIsInFakeStack(addr, &t1, &t2);
}
// Number of bytes in a fake frame of this size class.
static uptr BytesInSizeClass(uptr class_id) {
......
......@@ -26,88 +26,42 @@
namespace __asan {
struct Flags {
// Size (in bytes) of quarantine used to detect use-after-free errors.
// Lower value may reduce memory usage but increase the chance of
// false negatives.
// Flag descriptions are in asan_rtl.cc.
int quarantine_size;
// Size (in bytes) of redzones around heap objects.
// Requirement: redzone >= 32, is a power of two.
int redzone;
// If set, prints some debugging information and does additional checks.
int max_redzone;
bool debug;
// Controls the way to handle globals (0 - don't detect buffer overflow
// on globals, 1 - detect buffer overflow, 2 - print data about registered
// globals).
int report_globals;
// If set, attempts to catch initialization order issues.
bool check_initialization_order;
// If set, uses custom wrappers and replacements for libc string functions
// to find more errors.
bool replace_str;
// If set, uses custom wrappers for memset/memcpy/memmove intinsics.
bool replace_intrin;
// Used on Mac only.
bool mac_ignore_invalid_free;
// Enables stack-use-after-return checking at run-time.
bool detect_stack_use_after_return;
// The minimal fake stack size log.
int uar_stack_size_log;
// ASan allocator flag. max_malloc_fill_size is the maximal amount of bytes
// that will be filled with malloc_fill_byte on malloc.
int min_uar_stack_size_log;
int max_uar_stack_size_log;
bool uar_noreserve;
int max_malloc_fill_size, malloc_fill_byte;
// Override exit status if something was reported.
int exitcode;
// If set, user may manually mark memory regions as poisoned or unpoisoned.
bool allow_user_poisoning;
// Number of seconds to sleep between printing an error report and
// terminating application. Useful for debug purposes (when one needs
// to attach gdb, for example).
int sleep_before_dying;
// If set, registers ASan custom segv handler.
bool handle_segv;
// If set, allows user register segv handler even if ASan registers one.
bool allow_user_segv_handler;
// If set, uses alternate stack for signal handling.
bool use_sigaltstack;
// Allow the users to work around the bug in Nvidia drivers prior to 295.*.
bool check_malloc_usable_size;
// If set, explicitly unmaps (huge) shadow at exit.
bool unmap_shadow_on_exit;
// If set, calls abort() instead of _exit() after printing an error report.
bool abort_on_error;
// Print various statistics after printing an error message or if atexit=1.
bool print_stats;
// Print the legend for the shadow bytes.
bool print_legend;
// If set, prints ASan exit stats even after program terminates successfully.
bool atexit;
// If set, coverage information will be dumped at shutdown time if the
// appropriate instrumentation was enabled.
bool coverage;
// By default, disable core dumper on 64-bit - it makes little sense
// to dump 16T+ core.
bool disable_core;
// Allow the tool to re-exec the program. This may interfere badly with the
// debugger.
bool allow_reexec;
// If set, prints not only thread creation stacks for threads in error report,
// but also thread creation stacks for threads that created those threads,
// etc. up to main thread.
bool print_full_thread_history;
// Poison (or not) the heap memory on [de]allocation. Zero value is useful
// for benchmarking the allocator or instrumentator.
bool poison_heap;
// If true, poison partially addressable 8-byte aligned words (default=true).
// This flag affects heap and global buffers, but not stack buffers.
bool poison_partial;
// Report errors on malloc/delete, new/free, new/delete[], etc.
bool alloc_dealloc_mismatch;
// If true, assume that memcmp(p1, p2, n) always reads n bytes before
// comparing p1 and p2.
bool strict_memcmp;
// If true, assume that dynamic initializers can never access globals from
// other modules, even if the latter are already initialized.
bool strict_init_order;
bool start_deactivated;
int detect_invalid_pointer_pairs;
bool detect_container_overflow;
int detect_odr_violation;
};
extern Flags asan_flags_dont_use_directly;
......
......@@ -90,6 +90,19 @@ static void RegisterGlobal(const Global *g) {
CHECK(AddrIsInMem(g->beg));
CHECK(AddrIsAlignedByGranularity(g->beg));
CHECK(AddrIsAlignedByGranularity(g->size_with_redzone));
if (flags()->detect_odr_violation) {
// Try detecting ODR (One Definition Rule) violation, i.e. the situation
// where two globals with the same name are defined in different modules.
if (__asan_region_is_poisoned(g->beg, g->size_with_redzone)) {
// This check may not be enough: if the first global is much larger
// the entire redzone of the second global may be within the first global.
for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
if (g->beg == l->g->beg &&
(flags()->detect_odr_violation >= 2 || g->size != l->g->size))
ReportODRViolation(g, l->g);
}
}
}
if (flags()->poison_heap)
PoisonRedZones(*g);
ListOfGlobals *l = new(allocator_for_globals) ListOfGlobals;
......
//===-- asan_intercepted_functions.h ----------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// ASan-private header containing prototypes for wrapper functions and wrappers
//===----------------------------------------------------------------------===//
#ifndef ASAN_INTERCEPTED_FUNCTIONS_H
#define ASAN_INTERCEPTED_FUNCTIONS_H
#include "sanitizer_common/sanitizer_platform_interceptors.h"
// Use macro to describe if specific function should be
// intercepted on a given platform.
#if !SANITIZER_WINDOWS
# define ASAN_INTERCEPT_ATOLL_AND_STRTOLL 1
# define ASAN_INTERCEPT__LONGJMP 1
# define ASAN_INTERCEPT_STRDUP 1
# define ASAN_INTERCEPT_INDEX 1
# define ASAN_INTERCEPT_PTHREAD_CREATE 1
# define ASAN_INTERCEPT_MLOCKX 1
#else
# define ASAN_INTERCEPT_ATOLL_AND_STRTOLL 0
# define ASAN_INTERCEPT__LONGJMP 0
# define ASAN_INTERCEPT_STRDUP 0
# define ASAN_INTERCEPT_INDEX 0
# define ASAN_INTERCEPT_PTHREAD_CREATE 0
# define ASAN_INTERCEPT_MLOCKX 0
#endif
#if SANITIZER_LINUX
# define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 1
#else
# define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 0
#endif
#if !SANITIZER_MAC
# define ASAN_INTERCEPT_STRNLEN 1
#else
# define ASAN_INTERCEPT_STRNLEN 0
#endif
#if SANITIZER_LINUX && !SANITIZER_ANDROID
# define ASAN_INTERCEPT_SWAPCONTEXT 1
#else
# define ASAN_INTERCEPT_SWAPCONTEXT 0
#endif
#if !SANITIZER_ANDROID && !SANITIZER_WINDOWS
# define ASAN_INTERCEPT_SIGNAL_AND_SIGACTION 1
#else
# define ASAN_INTERCEPT_SIGNAL_AND_SIGACTION 0
#endif
#if !SANITIZER_WINDOWS
# define ASAN_INTERCEPT_SIGLONGJMP 1
#else
# define ASAN_INTERCEPT_SIGLONGJMP 0
#endif
#if ASAN_HAS_EXCEPTIONS && !SANITIZER_WINDOWS
# define ASAN_INTERCEPT___CXA_THROW 1
#else
# define ASAN_INTERCEPT___CXA_THROW 0
#endif
#if !SANITIZER_WINDOWS
# define ASAN_INTERCEPT___CXA_ATEXIT 1
#else
# define ASAN_INTERCEPT___CXA_ATEXIT 0
#endif
#endif // ASAN_INTERCEPTED_FUNCTIONS_H
......@@ -13,7 +13,68 @@
#define ASAN_INTERCEPTORS_H
#include "asan_internal.h"
#include "interception/interception.h"
#include "sanitizer_common/sanitizer_interception.h"
#include "sanitizer_common/sanitizer_platform_interceptors.h"
// Use macro to describe if specific function should be
// intercepted on a given platform.
#if !SANITIZER_WINDOWS
# define ASAN_INTERCEPT_ATOLL_AND_STRTOLL 1
# define ASAN_INTERCEPT__LONGJMP 1
# define ASAN_INTERCEPT_STRDUP 1
# define ASAN_INTERCEPT_INDEX 1
# define ASAN_INTERCEPT_PTHREAD_CREATE 1
# define ASAN_INTERCEPT_MLOCKX 1
#else
# define ASAN_INTERCEPT_ATOLL_AND_STRTOLL 0
# define ASAN_INTERCEPT__LONGJMP 0
# define ASAN_INTERCEPT_STRDUP 0
# define ASAN_INTERCEPT_INDEX 0
# define ASAN_INTERCEPT_PTHREAD_CREATE 0
# define ASAN_INTERCEPT_MLOCKX 0
#endif
#if SANITIZER_FREEBSD || SANITIZER_LINUX
# define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 1
#else
# define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 0
#endif
#if !SANITIZER_MAC
# define ASAN_INTERCEPT_STRNLEN 1
#else
# define ASAN_INTERCEPT_STRNLEN 0
#endif
#if SANITIZER_LINUX && !SANITIZER_ANDROID
# define ASAN_INTERCEPT_SWAPCONTEXT 1
#else
# define ASAN_INTERCEPT_SWAPCONTEXT 0
#endif
#if !SANITIZER_WINDOWS
# define ASAN_INTERCEPT_SIGNAL_AND_SIGACTION 1
#else
# define ASAN_INTERCEPT_SIGNAL_AND_SIGACTION 0
#endif
#if !SANITIZER_WINDOWS
# define ASAN_INTERCEPT_SIGLONGJMP 1
#else
# define ASAN_INTERCEPT_SIGLONGJMP 0
#endif
#if ASAN_HAS_EXCEPTIONS && !SANITIZER_WINDOWS
# define ASAN_INTERCEPT___CXA_THROW 1
#else
# define ASAN_INTERCEPT___CXA_THROW 0
#endif
#if !SANITIZER_WINDOWS
# define ASAN_INTERCEPT___CXA_ATEXIT 1
#else
# define ASAN_INTERCEPT___CXA_ATEXIT 0
#endif
DECLARE_REAL(int, memcmp, const void *a1, const void *a2, uptr size)
DECLARE_REAL(void*, memcpy, void *to, const void *from, uptr size)
......@@ -31,6 +92,13 @@ namespace __asan {
void InitializeAsanInterceptors();
#define ENSURE_ASAN_INITED() do { \
CHECK(!asan_init_is_running); \
if (UNLIKELY(!asan_inited)) { \
AsanInitFromRtl(); \
} \
} while (0)
} // namespace __asan
#endif // ASAN_INTERCEPTORS_H
......@@ -20,7 +20,7 @@ using __sanitizer::uptr;
extern "C" {
// This function should be called at the very beginning of the process,
// before any instrumented code is executed and before any call to malloc.
// Everytime the asan ABI changes we also change the version number in this
// Every time the asan ABI changes we also change the version number in this
// name. Objects build with incompatible asan ABI version
// will not link with run-time.
// Changes between ABI versions:
......@@ -75,7 +75,7 @@ extern "C" {
void __asan_unpoison_memory_region(void const volatile *addr, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
bool __asan_address_is_poisoned(void const volatile *addr);
int __asan_address_is_poisoned(void const volatile *addr);
SANITIZER_INTERFACE_ATTRIBUTE
uptr __asan_region_is_poisoned(uptr beg, uptr size);
......@@ -85,7 +85,7 @@ extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_report_error(uptr pc, uptr bp, uptr sp,
uptr addr, bool is_write, uptr access_size);
uptr addr, int is_write, uptr access_size);
SANITIZER_INTERFACE_ATTRIBUTE
int __asan_set_error_exit_code(int exit_code);
......@@ -97,14 +97,10 @@ extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
/* OPTIONAL */ void __asan_on_error();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
/* OPTIONAL */ bool __asan_symbolize(const void *pc, char *out_buffer,
int out_size);
SANITIZER_INTERFACE_ATTRIBUTE
uptr __asan_get_estimated_allocated_size(uptr size);
SANITIZER_INTERFACE_ATTRIBUTE bool __asan_get_ownership(const void *p);
SANITIZER_INTERFACE_ATTRIBUTE int __asan_get_ownership(const void *p);
SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_get_allocated_size(const void *p);
SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_get_current_allocated_bytes();
SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_get_heap_size();
......@@ -123,6 +119,29 @@ extern "C" {
// Global flag, copy of ASAN_OPTIONS=detect_stack_use_after_return
SANITIZER_INTERFACE_ATTRIBUTE
extern int __asan_option_detect_stack_use_after_return;
SANITIZER_INTERFACE_ATTRIBUTE
extern uptr *__asan_test_only_reported_buggy_pointer;
SANITIZER_INTERFACE_ATTRIBUTE void __asan_load1(uptr p);
SANITIZER_INTERFACE_ATTRIBUTE void __asan_load2(uptr p);
SANITIZER_INTERFACE_ATTRIBUTE void __asan_load4(uptr p);
SANITIZER_INTERFACE_ATTRIBUTE void __asan_load8(uptr p);
SANITIZER_INTERFACE_ATTRIBUTE void __asan_load16(uptr p);
SANITIZER_INTERFACE_ATTRIBUTE void __asan_store1(uptr p);
SANITIZER_INTERFACE_ATTRIBUTE void __asan_store2(uptr p);
SANITIZER_INTERFACE_ATTRIBUTE void __asan_store4(uptr p);
SANITIZER_INTERFACE_ATTRIBUTE void __asan_store8(uptr p);
SANITIZER_INTERFACE_ATTRIBUTE void __asan_store16(uptr p);
SANITIZER_INTERFACE_ATTRIBUTE void __asan_loadN(uptr p, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE void __asan_storeN(uptr p, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
void* __asan_memcpy(void *dst, const void *src, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
void* __asan_memset(void *s, int c, uptr n);
SANITIZER_INTERFACE_ATTRIBUTE
void* __asan_memmove(void* dest, const void* src, uptr n);
} // extern "C"
#endif // ASAN_INTERFACE_INTERNAL_H
......@@ -28,26 +28,11 @@
// Build-time configuration options.
// If set, asan will install its own SEGV signal handler.
#ifndef ASAN_NEEDS_SEGV
# if SANITIZER_ANDROID == 1
# define ASAN_NEEDS_SEGV 0
# else
# define ASAN_NEEDS_SEGV 1
# endif
#endif
// If set, asan will intercept C++ exception api call(s).
#ifndef ASAN_HAS_EXCEPTIONS
# define ASAN_HAS_EXCEPTIONS 1
#endif
// If set, asan uses the values of SHADOW_SCALE and SHADOW_OFFSET
// provided by the instrumented objects. Otherwise constants are used.
#ifndef ASAN_FLEXIBLE_MAPPING_AND_OFFSET
# define ASAN_FLEXIBLE_MAPPING_AND_OFFSET 0
#endif
// If set, values like allocator chunk size, as well as defaults for some flags
// will be changed towards less memory overhead.
#ifndef ASAN_LOW_MEMORY
......@@ -62,32 +47,41 @@
# define ASAN_USE_PREINIT_ARRAY (SANITIZER_LINUX && !SANITIZER_ANDROID)
#endif
#ifndef ASAN_DYNAMIC
# ifdef PIC
# define ASAN_DYNAMIC 1
# else
# define ASAN_DYNAMIC 0
# endif
#endif
// All internal functions in asan reside inside the __asan namespace
// to avoid namespace collisions with the user programs.
// Seperate namespace also makes it simpler to distinguish the asan run-time
// Separate namespace also makes it simpler to distinguish the asan run-time
// functions from the instrumented user code in a profile.
namespace __asan {
class AsanThread;
using __sanitizer::StackTrace;
void AsanInitFromRtl();
// asan_rtl.cc
void NORETURN ShowStatsAndAbort();
void ReplaceOperatorsNewAndDelete();
// asan_malloc_linux.cc / asan_malloc_mac.cc
void ReplaceSystemMalloc();
// asan_linux.cc / asan_mac.cc / asan_win.cc
void *AsanDoesNotSupportStaticLinkage();
void AsanCheckDynamicRTPrereqs();
void AsanCheckIncompatibleRT();
void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp);
void AsanOnSIGSEGV(int, void *siginfo, void *context);
void MaybeReexec();
bool AsanInterceptsSignal(int signum);
void SetAlternateSignalStack();
void UnsetAlternateSignalStack();
void InstallSignalHandlers();
void ReadContextStack(void *context, uptr *stack, uptr *ssize);
void AsanPlatformThreadInit();
void StopInitOrderChecking();
......@@ -100,7 +94,9 @@ void PlatformTSDDtor(void *tsd);
void AppendToErrorMessageBuffer(const char *buffer);
// Platfrom-specific options.
void ParseExtraActivationFlags();
// Platform-specific options.
#if SANITIZER_MAC
bool PlatformHasDifferentMemcpyAndMemmove();
# define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE \
......
......@@ -11,11 +11,12 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_platform.h"
#if SANITIZER_LINUX
#if SANITIZER_FREEBSD || SANITIZER_LINUX
#include "asan_interceptors.h"
#include "asan_internal.h"
#include "asan_thread.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_procmaps.h"
......@@ -30,12 +31,41 @@
#include <unistd.h>
#include <unwind.h>
#if !SANITIZER_ANDROID
// FIXME: where to get ucontext on Android?
#include <sys/ucontext.h>
#if SANITIZER_FREEBSD
#include <sys/link_elf.h>
#endif
#if SANITIZER_ANDROID || SANITIZER_FREEBSD
#include <ucontext.h>
extern "C" void* _DYNAMIC;
#else
#include <sys/ucontext.h>
#include <dlfcn.h>
#include <link.h>
#endif
// x86_64 FreeBSD 9.2 and older define 64-bit register names in both 64-bit
// and 32-bit modes.
#if SANITIZER_FREEBSD
#include <sys/param.h>
# if __FreeBSD_version <= 902001 // v9.2
# define mc_eip mc_rip
# define mc_ebp mc_rbp
# define mc_esp mc_rsp
# endif
#endif
typedef enum {
ASAN_RT_VERSION_UNDEFINED = 0,
ASAN_RT_VERSION_DYNAMIC,
ASAN_RT_VERSION_STATIC,
} asan_rt_version_t;
// FIXME: perhaps also store abi version here?
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
asan_rt_version_t __asan_rt_version;
}
namespace __asan {
......@@ -48,38 +78,115 @@ void *AsanDoesNotSupportStaticLinkage() {
return &_DYNAMIC; // defined in link.h
}
void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
#if SANITIZER_ANDROID
*pc = *sp = *bp = 0;
#elif defined(__arm__)
// FIXME: should we do anything for Android?
void AsanCheckDynamicRTPrereqs() {}
void AsanCheckIncompatibleRT() {}
#else
static int FindFirstDSOCallback(struct dl_phdr_info *info, size_t size,
void *data) {
// Continue until the first dynamic library is found
if (!info->dlpi_name || info->dlpi_name[0] == 0)
return 0;
*(const char **)data = info->dlpi_name;
return 1;
}
static bool IsDynamicRTName(const char *libname) {
return internal_strstr(libname, "libclang_rt.asan") ||
internal_strstr(libname, "libasan.so");
}
static void ReportIncompatibleRT() {
Report("Your application is linked against incompatible ASan runtimes.\n");
Die();
}
void AsanCheckDynamicRTPrereqs() {
// Ensure that dynamic RT is the first DSO in the list
const char *first_dso_name = 0;
dl_iterate_phdr(FindFirstDSOCallback, &first_dso_name);
if (first_dso_name && !IsDynamicRTName(first_dso_name)) {
Report("ASan runtime does not come first in initial library list; "
"you should either link runtime to your application or "
"manually preload it with LD_PRELOAD.\n");
Die();
}
}
void AsanCheckIncompatibleRT() {
if (ASAN_DYNAMIC) {
if (__asan_rt_version == ASAN_RT_VERSION_UNDEFINED) {
__asan_rt_version = ASAN_RT_VERSION_DYNAMIC;
} else if (__asan_rt_version != ASAN_RT_VERSION_DYNAMIC) {
ReportIncompatibleRT();
}
} else {
if (__asan_rt_version == ASAN_RT_VERSION_UNDEFINED) {
// Ensure that dynamic runtime is not present. We should detect it
// as early as possible, otherwise ASan interceptors could bind to
// the functions in dynamic ASan runtime instead of the functions in
// system libraries, causing crashes later in ASan initialization.
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
char filename[128];
while (proc_maps.Next(0, 0, 0, filename, sizeof(filename), 0)) {
if (IsDynamicRTName(filename)) {
Report("Your application is linked against "
"incompatible ASan runtimes.\n");
Die();
}
}
__asan_rt_version = ASAN_RT_VERSION_STATIC;
} else if (__asan_rt_version != ASAN_RT_VERSION_STATIC) {
ReportIncompatibleRT();
}
}
}
#endif // SANITIZER_ANDROID
void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
#if defined(__arm__)
ucontext_t *ucontext = (ucontext_t*)context;
*pc = ucontext->uc_mcontext.arm_pc;
*bp = ucontext->uc_mcontext.arm_fp;
*sp = ucontext->uc_mcontext.arm_sp;
# elif defined(__hppa__)
#elif defined(__aarch64__)
ucontext_t *ucontext = (ucontext_t*)context;
*pc = ucontext->uc_mcontext.pc;
*bp = ucontext->uc_mcontext.regs[29];
*sp = ucontext->uc_mcontext.sp;
#elif defined(__hppa__)
ucontext_t *ucontext = (ucontext_t*)context;
*pc = ucontext->uc_mcontext.sc_iaoq[0];
/* GCC uses %r3 whenever a frame pointer is needed. */
*bp = ucontext->uc_mcontext.sc_gr[3];
*sp = ucontext->uc_mcontext.sc_gr[30];
# elif defined(__x86_64__)
#elif defined(__x86_64__)
# if SANITIZER_FREEBSD
ucontext_t *ucontext = (ucontext_t*)context;
*pc = ucontext->uc_mcontext.mc_rip;
*bp = ucontext->uc_mcontext.mc_rbp;
*sp = ucontext->uc_mcontext.mc_rsp;
# else
ucontext_t *ucontext = (ucontext_t*)context;
*pc = ucontext->uc_mcontext.gregs[REG_RIP];
*bp = ucontext->uc_mcontext.gregs[REG_RBP];
*sp = ucontext->uc_mcontext.gregs[REG_RSP];
# elif defined(__i386__)
# endif
#elif defined(__i386__)
# if SANITIZER_FREEBSD
ucontext_t *ucontext = (ucontext_t*)context;
*pc = ucontext->uc_mcontext.mc_eip;
*bp = ucontext->uc_mcontext.mc_ebp;
*sp = ucontext->uc_mcontext.mc_esp;
# else
ucontext_t *ucontext = (ucontext_t*)context;
*pc = ucontext->uc_mcontext.gregs[REG_EIP];
*bp = ucontext->uc_mcontext.gregs[REG_EBP];
*sp = ucontext->uc_mcontext.gregs[REG_ESP];
# elif defined(__powerpc__) || defined(__powerpc64__)
ucontext_t *ucontext = (ucontext_t*)context;
*pc = ucontext->uc_mcontext.regs->nip;
*sp = ucontext->uc_mcontext.regs->gpr[PT_R1];
// The powerpc{,64}-linux ABIs do not specify r31 as the frame
// pointer, but GCC always uses r31 when we need a frame pointer.
*bp = ucontext->uc_mcontext.regs->gpr[PT_R31];
# elif defined(__sparc__)
# endif
#elif defined(__sparc__)
ucontext_t *ucontext = (ucontext_t*)context;
uptr *stk_ptr;
# if defined (__arch64__)
......@@ -93,7 +200,7 @@ void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
stk_ptr = (uptr *) *sp;
*bp = stk_ptr[15];
# endif
# elif defined(__mips__)
#elif defined(__mips__)
ucontext_t *ucontext = (ucontext_t*)context;
*pc = ucontext->uc_mcontext.gregs[31];
*bp = ucontext->uc_mcontext.gregs[30];
......@@ -104,7 +211,7 @@ void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
}
bool AsanInterceptsSignal(int signum) {
return signum == SIGSEGV && flags()->handle_segv;
return signum == SIGSEGV && common_flags()->handle_segv;
}
void AsanPlatformThreadInit() {
......@@ -125,4 +232,4 @@ void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
} // namespace __asan
#endif // SANITIZER_LINUX
#endif // SANITIZER_FREEBSD || SANITIZER_LINUX
......@@ -15,12 +15,12 @@
#include "asan_interceptors.h"
#include "asan_internal.h"
#include "asan_mac.h"
#include "asan_mapping.h"
#include "asan_stack.h"
#include "asan_thread.h"
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_mac.h"
#include <crt_externs.h> // for _NSGetArgv
#include <dlfcn.h> // for dladdr()
......@@ -51,43 +51,6 @@ void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
# endif // SANITIZER_WORDSIZE
}
MacosVersion cached_macos_version = MACOS_VERSION_UNINITIALIZED;
MacosVersion GetMacosVersionInternal() {
int mib[2] = { CTL_KERN, KERN_OSRELEASE };
char version[100];
uptr len = 0, maxlen = sizeof(version) / sizeof(version[0]);
for (uptr i = 0; i < maxlen; i++) version[i] = '\0';
// Get the version length.
CHECK_NE(sysctl(mib, 2, 0, &len, 0, 0), -1);
CHECK_LT(len, maxlen);
CHECK_NE(sysctl(mib, 2, version, &len, 0, 0), -1);
switch (version[0]) {
case '9': return MACOS_VERSION_LEOPARD;
case '1': {
switch (version[1]) {
case '0': return MACOS_VERSION_SNOW_LEOPARD;
case '1': return MACOS_VERSION_LION;
case '2': return MACOS_VERSION_MOUNTAIN_LION;
case '3': return MACOS_VERSION_MAVERICKS;
default: return MACOS_VERSION_UNKNOWN;
}
}
default: return MACOS_VERSION_UNKNOWN;
}
}
MacosVersion GetMacosVersion() {
atomic_uint32_t *cache =
reinterpret_cast<atomic_uint32_t*>(&cached_macos_version);
MacosVersion result =
static_cast<MacosVersion>(atomic_load(cache, memory_order_acquire));
if (result == MACOS_VERSION_UNINITIALIZED) {
result = GetMacosVersionInternal();
atomic_store(cache, result, memory_order_release);
}
return result;
}
bool PlatformHasDifferentMemcpyAndMemmove() {
// On OS X 10.7 memcpy() and memmove() are both resolved
......@@ -172,12 +135,10 @@ void MaybeReexec() {
// Set DYLD_INSERT_LIBRARIES equal to the runtime dylib name.
setenv(kDyldInsertLibraries, info.dli_fname, /*overwrite*/0);
}
if (common_flags()->verbosity >= 1) {
Report("exec()-ing the program with\n");
Report("%s=%s\n", kDyldInsertLibraries, new_env);
Report("to enable ASan wrappers.\n");
Report("Set ASAN_OPTIONS=allow_reexec=0 to disable this.\n");
}
VReport(1, "exec()-ing the program with\n");
VReport(1, "%s=%s\n", kDyldInsertLibraries, new_env);
VReport(1, "to enable ASan wrappers.\n");
VReport(1, "Set ASAN_OPTIONS=allow_reexec=0 to disable this.\n");
execv(program_name, *_NSGetArgv());
} else {
// DYLD_INSERT_LIBRARIES is set and contains the runtime library.
......@@ -236,8 +197,15 @@ void *AsanDoesNotSupportStaticLinkage() {
return 0;
}
// No-op. Mac does not support static linkage anyway.
void AsanCheckDynamicRTPrereqs() {}
// No-op. Mac does not support static linkage anyway.
void AsanCheckIncompatibleRT() {}
bool AsanInterceptsSignal(int signum) {
return (signum == SIGSEGV || signum == SIGBUS) && flags()->handle_segv;
return (signum == SIGSEGV || signum == SIGBUS) &&
common_flags()->handle_segv;
}
void AsanPlatformThreadInit() {
......@@ -309,11 +277,10 @@ extern "C"
void asan_dispatch_call_block_and_release(void *block) {
GET_STACK_TRACE_THREAD;
asan_block_context_t *context = (asan_block_context_t*)block;
if (common_flags()->verbosity >= 2) {
Report("asan_dispatch_call_block_and_release(): "
"context: %p, pthread_self: %p\n",
block, pthread_self());
}
VReport(2,
"asan_dispatch_call_block_and_release(): "
"context: %p, pthread_self: %p\n",
block, pthread_self());
asan_register_worker_thread(context->parent_tid, &stack);
// Call the original dispatcher for the block.
context->func(context->block);
......@@ -347,10 +314,10 @@ asan_block_context_t *alloc_asan_context(void *ctxt, dispatch_function_t func,
if (common_flags()->verbosity >= 2) { \
Report(#dispatch_x_f "(): context: %p, pthread_self: %p\n", \
asan_ctxt, pthread_self()); \
PRINT_CURRENT_STACK(); \
} \
return REAL(dispatch_x_f)(dq, (void*)asan_ctxt, \
asan_dispatch_call_block_and_release); \
PRINT_CURRENT_STACK(); \
} \
return REAL(dispatch_x_f)(dq, (void*)asan_ctxt, \
asan_dispatch_call_block_and_release); \
}
INTERCEPT_DISPATCH_X_F_3(dispatch_async_f)
......@@ -386,7 +353,6 @@ INTERCEPTOR(void, dispatch_group_async_f, dispatch_group_t group,
#if !defined(MISSING_BLOCKS_SUPPORT)
extern "C" {
// FIXME: consolidate these declarations with asan_intercepted_functions.h.
void dispatch_async(dispatch_queue_t dq, void(^work)(void));
void dispatch_group_async(dispatch_group_t dg, dispatch_queue_t dq,
void(^work)(void));
......
//===-- asan_mac.h ----------------------------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// Mac-specific ASan definitions.
//===----------------------------------------------------------------------===//
#ifndef ASAN_MAC_H
#define ASAN_MAC_H
// CF_RC_BITS, the layout of CFRuntimeBase and __CFStrIsConstant are internal
// and subject to change in further CoreFoundation versions. Apple does not
// guarantee any binary compatibility from release to release.
// See http://opensource.apple.com/source/CF/CF-635.15/CFInternal.h
#if defined(__BIG_ENDIAN__)
#define CF_RC_BITS 0
#endif
#if defined(__LITTLE_ENDIAN__)
#define CF_RC_BITS 3
#endif
// See http://opensource.apple.com/source/CF/CF-635.15/CFRuntime.h
typedef struct __CFRuntimeBase {
uptr _cfisa;
u8 _cfinfo[4];
#if __LP64__
u32 _rc;
#endif
} CFRuntimeBase;
enum MacosVersion {
MACOS_VERSION_UNINITIALIZED = 0,
MACOS_VERSION_UNKNOWN,
MACOS_VERSION_LEOPARD,
MACOS_VERSION_SNOW_LEOPARD,
MACOS_VERSION_LION,
MACOS_VERSION_MOUNTAIN_LION,
MACOS_VERSION_MAVERICKS
};
// Used by asan_malloc_mac.cc and asan_mac.cc
extern "C" void __CFInitialize();
namespace __asan {
MacosVersion GetMacosVersion();
void MaybeReplaceCFAllocator();
} // namespace __asan
#endif // ASAN_MAC_H
......@@ -13,8 +13,9 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_platform.h"
#if SANITIZER_LINUX
#if SANITIZER_FREEBSD || SANITIZER_LINUX
#include "sanitizer_common/sanitizer_tls_get_addr.h"
#include "asan_allocator.h"
#include "asan_interceptors.h"
#include "asan_internal.h"
......@@ -74,7 +75,7 @@ INTERCEPTOR(void*, malloc, uptr size) {
}
INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) {
if (!asan_inited) {
if (UNLIKELY(!asan_inited)) {
// Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
const uptr kCallocPoolSize = 1024;
static uptr calloc_memory_for_dlsym[kCallocPoolSize];
......@@ -99,8 +100,12 @@ INTERCEPTOR(void*, memalign, uptr boundary, uptr size) {
return asan_memalign(boundary, size, &stack, FROM_MALLOC);
}
INTERCEPTOR(void*, __libc_memalign, uptr align, uptr s)
ALIAS("memalign");
INTERCEPTOR(void*, __libc_memalign, uptr boundary, uptr size) {
GET_STACK_TRACE_MALLOC;
void *res = asan_memalign(boundary, size, &stack, FROM_MALLOC);
DTLS_on_libc_memalign(res, size * boundary);
return res;
}
INTERCEPTOR(uptr, malloc_usable_size, void *ptr) {
GET_CURRENT_PC_BP_SP;
......@@ -146,4 +151,4 @@ INTERCEPTOR(void, malloc_stats, void) {
__asan_print_accumulated_stats();
}
#endif // SANITIZER_LINUX
#endif // SANITIZER_FREEBSD || SANITIZER_LINUX
......@@ -22,10 +22,10 @@
#include "asan_allocator.h"
#include "asan_interceptors.h"
#include "asan_internal.h"
#include "asan_mac.h"
#include "asan_report.h"
#include "asan_stack.h"
#include "asan_stats.h"
#include "sanitizer_common/sanitizer_mac.h"
// Similar code is used in Google Perftools,
// http://code.google.com/p/google-perftools.
......@@ -39,7 +39,7 @@ static malloc_zone_t asan_zone;
INTERCEPTOR(malloc_zone_t *, malloc_create_zone,
vm_size_t start_size, unsigned zone_flags) {
if (!asan_inited) __asan_init();
ENSURE_ASAN_INITED();
GET_STACK_TRACE_MALLOC;
uptr page_size = GetPageSizeCached();
uptr allocated_size = RoundUpTo(sizeof(asan_zone), page_size);
......@@ -58,34 +58,34 @@ INTERCEPTOR(malloc_zone_t *, malloc_create_zone,
}
INTERCEPTOR(malloc_zone_t *, malloc_default_zone, void) {
if (!asan_inited) __asan_init();
ENSURE_ASAN_INITED();
return &asan_zone;
}
INTERCEPTOR(malloc_zone_t *, malloc_default_purgeable_zone, void) {
// FIXME: ASan should support purgeable allocations.
// https://code.google.com/p/address-sanitizer/issues/detail?id=139
if (!asan_inited) __asan_init();
ENSURE_ASAN_INITED();
return &asan_zone;
}
INTERCEPTOR(void, malloc_make_purgeable, void *ptr) {
// FIXME: ASan should support purgeable allocations. Ignoring them is fine
// for now.
if (!asan_inited) __asan_init();
ENSURE_ASAN_INITED();
}
INTERCEPTOR(int, malloc_make_nonpurgeable, void *ptr) {
// FIXME: ASan should support purgeable allocations. Ignoring them is fine
// for now.
if (!asan_inited) __asan_init();
ENSURE_ASAN_INITED();
// Must return 0 if the contents were not purged since the last call to
// malloc_make_purgeable().
return 0;
}
INTERCEPTOR(void, malloc_set_zone_name, malloc_zone_t *zone, const char *name) {
if (!asan_inited) __asan_init();
ENSURE_ASAN_INITED();
// Allocate |strlen("asan-") + 1 + internal_strlen(name)| bytes.
size_t buflen = 6 + (name ? internal_strlen(name) : 0);
InternalScopedBuffer<char> new_name(buflen);
......@@ -100,44 +100,44 @@ INTERCEPTOR(void, malloc_set_zone_name, malloc_zone_t *zone, const char *name) {
}
INTERCEPTOR(void *, malloc, size_t size) {
if (!asan_inited) __asan_init();
ENSURE_ASAN_INITED();
GET_STACK_TRACE_MALLOC;
void *res = asan_malloc(size, &stack);
return res;
}
INTERCEPTOR(void, free, void *ptr) {
if (!asan_inited) __asan_init();
ENSURE_ASAN_INITED();
if (!ptr) return;
GET_STACK_TRACE_FREE;
asan_free(ptr, &stack, FROM_MALLOC);
}
INTERCEPTOR(void *, realloc, void *ptr, size_t size) {
if (!asan_inited) __asan_init();
ENSURE_ASAN_INITED();
GET_STACK_TRACE_MALLOC;
return asan_realloc(ptr, size, &stack);
}
INTERCEPTOR(void *, calloc, size_t nmemb, size_t size) {
if (!asan_inited) __asan_init();
ENSURE_ASAN_INITED();
GET_STACK_TRACE_MALLOC;
return asan_calloc(nmemb, size, &stack);
}
INTERCEPTOR(void *, valloc, size_t size) {
if (!asan_inited) __asan_init();
ENSURE_ASAN_INITED();
GET_STACK_TRACE_MALLOC;
return asan_memalign(GetPageSizeCached(), size, &stack, FROM_MALLOC);
}
INTERCEPTOR(size_t, malloc_good_size, size_t size) {
if (!asan_inited) __asan_init();
ENSURE_ASAN_INITED();
return asan_zone.introspect->good_size(&asan_zone, size);
}
INTERCEPTOR(int, posix_memalign, void **memptr, size_t alignment, size_t size) {
if (!asan_inited) __asan_init();
ENSURE_ASAN_INITED();
CHECK(memptr);
GET_STACK_TRACE_MALLOC;
void *result = asan_memalign(alignment, size, &stack, FROM_MALLOC);
......@@ -157,7 +157,7 @@ size_t mz_size(malloc_zone_t* zone, const void* ptr) {
}
void *mz_malloc(malloc_zone_t *zone, size_t size) {
if (!asan_inited) {
if (UNLIKELY(!asan_inited)) {
CHECK(system_malloc_zone);
return malloc_zone_malloc(system_malloc_zone, size);
}
......@@ -166,7 +166,7 @@ void *mz_malloc(malloc_zone_t *zone, size_t size) {
}
void *mz_calloc(malloc_zone_t *zone, size_t nmemb, size_t size) {
if (!asan_inited) {
if (UNLIKELY(!asan_inited)) {
// Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
const size_t kCallocPoolSize = 1024;
static uptr calloc_memory_for_dlsym[kCallocPoolSize];
......@@ -182,7 +182,7 @@ void *mz_calloc(malloc_zone_t *zone, size_t nmemb, size_t size) {
}
void *mz_valloc(malloc_zone_t *zone, size_t size) {
if (!asan_inited) {
if (UNLIKELY(!asan_inited)) {
CHECK(system_malloc_zone);
return malloc_zone_valloc(system_malloc_zone, size);
}
......@@ -240,7 +240,7 @@ void mz_destroy(malloc_zone_t* zone) {
#if defined(MAC_OS_X_VERSION_10_6) && \
MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6
void *mz_memalign(malloc_zone_t *zone, size_t align, size_t size) {
if (!asan_inited) {
if (UNLIKELY(!asan_inited)) {
CHECK(system_malloc_zone);
return malloc_zone_memalign(system_malloc_zone, align, size);
}
......
......@@ -17,7 +17,7 @@
#include "asan_interceptors.h"
#include "asan_internal.h"
#include "asan_stack.h"
#include "interception/interception.h"
#include "sanitizer_common/sanitizer_interception.h"
#include <stddef.h>
......@@ -101,6 +101,21 @@ size_t _msize(void *ptr) {
return asan_malloc_usable_size(ptr, pc, bp);
}
SANITIZER_INTERFACE_ATTRIBUTE
void *_expand(void *memblock, size_t size) {
// _expand is used in realloc-like functions to resize the buffer if possible.
// We don't want memory to stand still while resizing buffers, so return 0.
return 0;
}
SANITIZER_INTERFACE_ATTRIBUTE
void *_expand_dbg(void *memblock, size_t size) {
return 0;
}
// TODO(timurrrr): Might want to add support for _aligned_* allocation
// functions to detect a bit more bugs. Those functions seem to wrap malloc().
int _CrtDbgReport(int, const char*, int,
const char*, const char*, ...) {
ShowStatsAndAbort();
......
......@@ -41,54 +41,81 @@
// || `[0x00007fff8000, 0x00008fff6fff]` || LowShadow ||
// || `[0x000000000000, 0x00007fff7fff]` || LowMem ||
//
// Default Linux/i386 mapping:
// Default Linux/i386 mapping on x86_64 machine:
// || `[0x40000000, 0xffffffff]` || HighMem ||
// || `[0x28000000, 0x3fffffff]` || HighShadow ||
// || `[0x24000000, 0x27ffffff]` || ShadowGap ||
// || `[0x20000000, 0x23ffffff]` || LowShadow ||
// || `[0x00000000, 0x1fffffff]` || LowMem ||
//
// Default Linux/i386 mapping on i386 machine
// (addresses starting with 0xc0000000 are reserved
// for kernel and thus not sanitized):
// || `[0x38000000, 0xbfffffff]` || HighMem ||
// || `[0x27000000, 0x37ffffff]` || HighShadow ||
// || `[0x24000000, 0x26ffffff]` || ShadowGap ||
// || `[0x20000000, 0x23ffffff]` || LowShadow ||
// || `[0x00000000, 0x1fffffff]` || LowMem ||
//
// Default Linux/MIPS mapping:
// || `[0x2aaa8000, 0xffffffff]` || HighMem ||
// || `[0x0fffd000, 0x2aaa7fff]` || HighShadow ||
// || `[0x0bffd000, 0x0fffcfff]` || ShadowGap ||
// || `[0x0aaa8000, 0x0bffcfff]` || LowShadow ||
// || `[0x00000000, 0x0aaa7fff]` || LowMem ||
//
// Shadow mapping on FreeBSD/x86-64 with SHADOW_OFFSET == 0x400000000000:
// || `[0x500000000000, 0x7fffffffffff]` || HighMem ||
// || `[0x4a0000000000, 0x4fffffffffff]` || HighShadow ||
// || `[0x480000000000, 0x49ffffffffff]` || ShadowGap ||
// || `[0x400000000000, 0x47ffffffffff]` || LowShadow ||
// || `[0x000000000000, 0x3fffffffffff]` || LowMem ||
//
// Shadow mapping on FreeBSD/i386 with SHADOW_OFFSET == 0x40000000:
// || `[0x60000000, 0xffffffff]` || HighMem ||
// || `[0x4c000000, 0x5fffffff]` || HighShadow ||
// || `[0x48000000, 0x4bffffff]` || ShadowGap ||
// || `[0x40000000, 0x47ffffff]` || LowShadow ||
// || `[0x00000000, 0x3fffffff]` || LowMem ||
static const u64 kDefaultShadowScale = 3;
static const u64 kDefaultShadowOffset32 = 1ULL << 29;
static const u64 kDefaultShadowOffset32 = 1ULL << 29; // 0x20000000
static const u64 kIosShadowOffset32 = 1ULL << 30; // 0x40000000
static const u64 kDefaultShadowOffset64 = 1ULL << 44;
static const u64 kDefaultShort64bitShadowOffset = 0x7FFF8000; // < 2G.
static const u64 kPPC64_ShadowOffset64 = 1ULL << 41;
static const u64 kAArch64_ShadowOffset64 = 1ULL << 36;
static const u64 kMIPS32_ShadowOffset32 = 0x0aaa8000;
static const u64 kFreeBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000
static const u64 kFreeBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000
#if ASAN_FLEXIBLE_MAPPING_AND_OFFSET == 1
extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_mapping_scale;
extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_mapping_offset;
# define SHADOW_SCALE (__asan_mapping_scale)
# define SHADOW_OFFSET (__asan_mapping_offset)
#define SHADOW_SCALE kDefaultShadowScale
#if SANITIZER_ANDROID
# define SHADOW_OFFSET (0)
#else
# define SHADOW_SCALE kDefaultShadowScale
# if SANITIZER_ANDROID
# define SHADOW_OFFSET (0)
# if SANITIZER_WORDSIZE == 32
# if defined(__mips__)
# define SHADOW_OFFSET kMIPS32_ShadowOffset32
# elif SANITIZER_FREEBSD
# define SHADOW_OFFSET kFreeBSD_ShadowOffset32
# else
# if SANITIZER_IOS
# define SHADOW_OFFSET kIosShadowOffset32
# else
# define SHADOW_OFFSET kDefaultShadowOffset32
# endif
# endif
# else
# if SANITIZER_WORDSIZE == 32
# if defined(__mips__)
# define SHADOW_OFFSET kMIPS32_ShadowOffset32
# else
# define SHADOW_OFFSET kDefaultShadowOffset32
# endif
# if defined(__aarch64__)
# define SHADOW_OFFSET kAArch64_ShadowOffset64
# elif SANITIZER_FREEBSD
# define SHADOW_OFFSET kFreeBSD_ShadowOffset64
# elif SANITIZER_MAC
# define SHADOW_OFFSET kDefaultShadowOffset64
# else
# if defined(__powerpc64__)
# define SHADOW_OFFSET kPPC64_ShadowOffset64
# elif SANITIZER_MAC
# define SHADOW_OFFSET kDefaultShadowOffset64
# else
# define SHADOW_OFFSET kDefaultShort64bitShadowOffset
# endif
# define SHADOW_OFFSET kDefaultShort64bitShadowOffset
# endif
# endif
#endif // ASAN_FLEXIBLE_MAPPING_AND_OFFSET
#endif
#define SHADOW_GRANULARITY (1ULL << SHADOW_SCALE)
#define MEM_TO_SHADOW(mem) (((mem) >> SHADOW_SCALE) + (SHADOW_OFFSET))
......
......@@ -14,20 +14,14 @@
#include "asan_internal.h"
#include "asan_stack.h"
#include <stddef.h>
#include "sanitizer_common/sanitizer_interception.h"
namespace __asan {
// This function is a no-op. We need it to make sure that object file
// with our replacements will actually be loaded from static ASan
// run-time library at link-time.
void ReplaceOperatorsNewAndDelete() { }
}
#include <stddef.h>
using namespace __asan; // NOLINT
// On Android new() goes through malloc interceptors.
// See also https://code.google.com/p/address-sanitizer/issues/detail?id=131.
#if !SANITIZER_ANDROID
// This code has issues on OSX.
// See https://code.google.com/p/address-sanitizer/issues/detail?id=131.
// Fake std::nothrow_t to avoid including <new>.
namespace std {
......@@ -46,6 +40,15 @@ struct nothrow_t {};
// To make sure that C++ allocation/deallocation operators are overridden on
// OS X we need to intercept them using their mangled names.
#if !SANITIZER_MAC
// FreeBSD prior v9.2 have wrong definition of 'size_t'.
// http://svnweb.freebsd.org/base?view=revision&revision=232261
#if SANITIZER_FREEBSD && SANITIZER_WORDSIZE == 32
#include <sys/param.h>
#if __FreeBSD_version <= 902001 // v9.2
#define size_t unsigned
#endif // __FreeBSD_version
#endif // SANITIZER_FREEBSD && SANITIZER_WORDSIZE == 32
INTERCEPTOR_ATTRIBUTE
void *operator new(size_t size) { OPERATOR_NEW_BODY(FROM_NEW); }
INTERCEPTOR_ATTRIBUTE
......@@ -78,15 +81,21 @@ INTERCEPTOR(void *, _ZnamRKSt9nothrow_t, size_t size, std::nothrow_t const&) {
#if !SANITIZER_MAC
INTERCEPTOR_ATTRIBUTE
void operator delete(void *ptr) { OPERATOR_DELETE_BODY(FROM_NEW); }
void operator delete(void *ptr) throw() {
OPERATOR_DELETE_BODY(FROM_NEW);
}
INTERCEPTOR_ATTRIBUTE
void operator delete[](void *ptr) { OPERATOR_DELETE_BODY(FROM_NEW_BR); }
void operator delete[](void *ptr) throw() {
OPERATOR_DELETE_BODY(FROM_NEW_BR);
}
INTERCEPTOR_ATTRIBUTE
void operator delete(void *ptr, std::nothrow_t const&)
{ OPERATOR_DELETE_BODY(FROM_NEW); }
void operator delete(void *ptr, std::nothrow_t const&) {
OPERATOR_DELETE_BODY(FROM_NEW);
}
INTERCEPTOR_ATTRIBUTE
void operator delete[](void *ptr, std::nothrow_t const&)
{ OPERATOR_DELETE_BODY(FROM_NEW_BR); }
void operator delete[](void *ptr, std::nothrow_t const&) {
OPERATOR_DELETE_BODY(FROM_NEW_BR);
}
#else // SANITIZER_MAC
INTERCEPTOR(void, _ZdlPv, void *ptr) {
......@@ -102,5 +111,3 @@ INTERCEPTOR(void, _ZdaPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&) {
OPERATOR_DELETE_BODY(FROM_NEW_BR);
}
#endif
#endif
......@@ -11,6 +11,8 @@
//===----------------------------------------------------------------------===//
#include "asan_poisoning.h"
#include "asan_report.h"
#include "asan_stack.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_flags.h"
......@@ -48,6 +50,15 @@ struct ShadowSegmentEndpoint {
}
};
void FlushUnneededASanShadowMemory(uptr p, uptr size) {
// Since asan's mapping is compacting, the shadow chunk may be
// not page-aligned, so we only flush the page-aligned portion.
uptr page_size = GetPageSizeCached();
uptr shadow_beg = RoundUpTo(MemToShadow(p), page_size);
uptr shadow_end = RoundDownTo(MemToShadow(p + size), page_size);
FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg);
}
} // namespace __asan
// ---------------------- Interface ---------------- {{{1
......@@ -67,10 +78,8 @@ void __asan_poison_memory_region(void const volatile *addr, uptr size) {
if (!flags()->allow_user_poisoning || size == 0) return;
uptr beg_addr = (uptr)addr;
uptr end_addr = beg_addr + size;
if (common_flags()->verbosity >= 1) {
Printf("Trying to poison memory region [%p, %p)\n",
(void*)beg_addr, (void*)end_addr);
}
VPrintf(1, "Trying to poison memory region [%p, %p)\n", (void *)beg_addr,
(void *)end_addr);
ShadowSegmentEndpoint beg(beg_addr);
ShadowSegmentEndpoint end(end_addr);
if (beg.chunk == end.chunk) {
......@@ -109,10 +118,8 @@ void __asan_unpoison_memory_region(void const volatile *addr, uptr size) {
if (!flags()->allow_user_poisoning || size == 0) return;
uptr beg_addr = (uptr)addr;
uptr end_addr = beg_addr + size;
if (common_flags()->verbosity >= 1) {
Printf("Trying to unpoison memory region [%p, %p)\n",
(void*)beg_addr, (void*)end_addr);
}
VPrintf(1, "Trying to unpoison memory region [%p, %p)\n", (void *)beg_addr,
(void *)end_addr);
ShadowSegmentEndpoint beg(beg_addr);
ShadowSegmentEndpoint end(end_addr);
if (beg.chunk == end.chunk) {
......@@ -137,7 +144,7 @@ void __asan_unpoison_memory_region(void const volatile *addr, uptr size) {
}
}
bool __asan_address_is_poisoned(void const volatile *addr) {
int __asan_address_is_poisoned(void const volatile *addr) {
return __asan::AddressIsPoisoned((uptr)addr);
}
......@@ -146,6 +153,7 @@ uptr __asan_region_is_poisoned(uptr beg, uptr size) {
uptr end = beg + size;
if (!AddrIsInMem(beg)) return beg;
if (!AddrIsInMem(end)) return end;
CHECK_LT(beg, end);
uptr aligned_b = RoundUpTo(beg, SHADOW_GRANULARITY);
uptr aligned_e = RoundDownTo(end, SHADOW_GRANULARITY);
uptr shadow_beg = MemToShadow(aligned_b);
......@@ -243,14 +251,12 @@ static void PoisonAlignedStackMemory(uptr addr, uptr size, bool do_poison) {
}
void __asan_poison_stack_memory(uptr addr, uptr size) {
if (common_flags()->verbosity > 0)
Report("poisoning: %p %zx\n", (void*)addr, size);
VReport(1, "poisoning: %p %zx\n", (void *)addr, size);
PoisonAlignedStackMemory(addr, size, true);
}
void __asan_unpoison_stack_memory(uptr addr, uptr size) {
if (common_flags()->verbosity > 0)
Report("unpoisoning: %p %zx\n", (void*)addr, size);
VReport(1, "unpoisoning: %p %zx\n", (void *)addr, size);
PoisonAlignedStackMemory(addr, size, false);
}
......@@ -258,33 +264,40 @@ void __sanitizer_annotate_contiguous_container(const void *beg_p,
const void *end_p,
const void *old_mid_p,
const void *new_mid_p) {
if (common_flags()->verbosity >= 2)
Printf("contiguous_container: %p %p %p %p\n", beg_p, end_p, old_mid_p,
new_mid_p);
if (!flags()->detect_container_overflow) return;
VPrintf(2, "contiguous_container: %p %p %p %p\n", beg_p, end_p, old_mid_p,
new_mid_p);
uptr beg = reinterpret_cast<uptr>(beg_p);
uptr end= reinterpret_cast<uptr>(end_p);
uptr end = reinterpret_cast<uptr>(end_p);
uptr old_mid = reinterpret_cast<uptr>(old_mid_p);
uptr new_mid = reinterpret_cast<uptr>(new_mid_p);
uptr granularity = SHADOW_GRANULARITY;
CHECK(beg <= old_mid && beg <= new_mid && old_mid <= end && new_mid <= end &&
IsAligned(beg, granularity));
if (!(beg <= old_mid && beg <= new_mid && old_mid <= end && new_mid <= end &&
IsAligned(beg, granularity))) {
GET_STACK_TRACE_FATAL_HERE;
ReportBadParamsToAnnotateContiguousContainer(beg, end, old_mid, new_mid,
&stack);
}
CHECK_LE(end - beg,
FIRST_32_SECOND_64(1UL << 30, 1UL << 34)); // Sanity check.
uptr a = RoundDownTo(Min(old_mid, new_mid), granularity);
uptr c = RoundUpTo(Max(old_mid, new_mid), granularity);
uptr d1 = RoundDownTo(old_mid, granularity);
uptr d2 = RoundUpTo(old_mid, granularity);
// uptr d2 = RoundUpTo(old_mid, granularity);
// Currently we should be in this state:
// [a, d1) is good, [d2, c) is bad, [d1, d2) is partially good.
// Make a quick sanity check that we are indeed in this state.
if (d1 != d2)
CHECK_EQ(*(u8*)MemToShadow(d1), old_mid - d1);
//
// FIXME: Two of these three checks are disabled until we fix
// https://code.google.com/p/address-sanitizer/issues/detail?id=258.
// if (d1 != d2)
// CHECK_EQ(*(u8*)MemToShadow(d1), old_mid - d1);
if (a + granularity <= d1)
CHECK_EQ(*(u8*)MemToShadow(a), 0);
if (d2 + granularity <= c && c <= end)
CHECK_EQ(*(u8 *)MemToShadow(c - granularity),
kAsanContiguousContainerOOBMagic);
// if (d2 + granularity <= c && c <= end)
// CHECK_EQ(*(u8 *)MemToShadow(c - granularity),
// kAsanContiguousContainerOOBMagic);
uptr b1 = RoundDownTo(new_mid, granularity);
uptr b2 = RoundUpTo(new_mid, granularity);
......@@ -297,3 +310,42 @@ void __sanitizer_annotate_contiguous_container(const void *beg_p,
*(u8*)MemToShadow(b1) = static_cast<u8>(new_mid - b1);
}
}
int __sanitizer_verify_contiguous_container(const void *beg_p,
const void *mid_p,
const void *end_p) {
if (!flags()->detect_container_overflow) return 1;
uptr beg = reinterpret_cast<uptr>(beg_p);
uptr end = reinterpret_cast<uptr>(end_p);
uptr mid = reinterpret_cast<uptr>(mid_p);
CHECK_LE(beg, mid);
CHECK_LE(mid, end);
// Check some bytes starting from beg, some bytes around mid, and some bytes
// ending with end.
uptr kMaxRangeToCheck = 32;
uptr r1_beg = beg;
uptr r1_end = Min(end + kMaxRangeToCheck, mid);
uptr r2_beg = Max(beg, mid - kMaxRangeToCheck);
uptr r2_end = Min(end, mid + kMaxRangeToCheck);
uptr r3_beg = Max(end - kMaxRangeToCheck, mid);
uptr r3_end = end;
for (uptr i = r1_beg; i < r1_end; i++)
if (AddressIsPoisoned(i))
return 0;
for (uptr i = r2_beg; i < mid; i++)
if (AddressIsPoisoned(i))
return 0;
for (uptr i = mid; i < r2_end; i++)
if (!AddressIsPoisoned(i))
return 0;
for (uptr i = r3_beg; i < r3_end; i++)
if (!AddressIsPoisoned(i))
return 0;
return 1;
}
// --- Implementation of LSan-specific functions --- {{{1
namespace __lsan {
bool WordIsPoisoned(uptr addr) {
return (__asan_region_is_poisoned(addr, sizeof(uptr)) != 0);
}
}
......@@ -13,6 +13,7 @@
#include "asan_interceptors.h"
#include "asan_internal.h"
#include "asan_mapping.h"
#include "sanitizer_common/sanitizer_flags.h"
namespace __asan {
......@@ -32,10 +33,35 @@ void PoisonShadowPartialRightRedzone(uptr addr,
ALWAYS_INLINE void FastPoisonShadow(uptr aligned_beg, uptr aligned_size,
u8 value) {
DCHECK(flags()->poison_heap);
uptr PageSize = GetPageSizeCached();
uptr shadow_beg = MEM_TO_SHADOW(aligned_beg);
uptr shadow_end = MEM_TO_SHADOW(
aligned_beg + aligned_size - SHADOW_GRANULARITY) + 1;
REAL(memset)((void*)shadow_beg, value, shadow_end - shadow_beg);
// FIXME: Page states are different on Windows, so using the same interface
// for mapping shadow and zeroing out pages doesn't "just work", so we should
// probably provide higher-level interface for these operations.
// For now, just memset on Windows.
if (value ||
SANITIZER_WINDOWS == 1 ||
shadow_end - shadow_beg < common_flags()->clear_shadow_mmap_threshold) {
REAL(memset)((void*)shadow_beg, value, shadow_end - shadow_beg);
} else {
uptr page_beg = RoundUpTo(shadow_beg, PageSize);
uptr page_end = RoundDownTo(shadow_end, PageSize);
if (page_beg >= page_end) {
REAL(memset)((void *)shadow_beg, 0, shadow_end - shadow_beg);
} else {
if (page_beg != shadow_beg) {
REAL(memset)((void *)shadow_beg, 0, page_beg - shadow_beg);
}
if (page_end != shadow_end) {
REAL(memset)((void *)page_end, 0, shadow_end - page_end);
}
void *res = MmapFixedNoReserve(page_beg, page_end - page_beg);
CHECK_EQ(page_beg, res);
}
}
}
ALWAYS_INLINE void FastPoisonShadowPartialRightRedzone(
......@@ -55,4 +81,8 @@ ALWAYS_INLINE void FastPoisonShadowPartialRightRedzone(
}
}
// Calls __sanitizer::FlushUnneededShadowMemory() on
// [MemToShadow(p), MemToShadow(p+size)] with proper rounding.
void FlushUnneededASanShadowMemory(uptr p, uptr size);
} // namespace __asan
......@@ -11,7 +11,7 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_platform.h"
#if SANITIZER_LINUX || SANITIZER_MAC
#if SANITIZER_POSIX
#include "asan_internal.h"
#include "asan_interceptors.h"
......@@ -28,70 +28,27 @@
#include <sys/resource.h>
#include <unistd.h>
static const uptr kAltStackSize = SIGSTKSZ * 4; // SIGSTKSZ is not enough.
namespace __asan {
static void MaybeInstallSigaction(int signum,
void (*handler)(int, siginfo_t *, void *)) {
if (!AsanInterceptsSignal(signum))
return;
struct sigaction sigact;
REAL(memset)(&sigact, 0, sizeof(sigact));
sigact.sa_sigaction = handler;
sigact.sa_flags = SA_SIGINFO;
if (flags()->use_sigaltstack) sigact.sa_flags |= SA_ONSTACK;
CHECK_EQ(0, REAL(sigaction)(signum, &sigact, 0));
if (common_flags()->verbosity >= 1) {
Report("Installed the sigaction for signal %d\n", signum);
}
}
static void ASAN_OnSIGSEGV(int, siginfo_t *siginfo, void *context) {
uptr addr = (uptr)siginfo->si_addr;
void AsanOnSIGSEGV(int, void *siginfo, void *context) {
uptr addr = (uptr)((siginfo_t*)siginfo)->si_addr;
int code = (int)((siginfo_t*)siginfo)->si_code;
// Write the first message using the bullet-proof write.
if (13 != internal_write(2, "ASAN:SIGSEGV\n", 13)) Die();
uptr pc, sp, bp;
GetPcSpBp(context, &pc, &sp, &bp);
ReportSIGSEGV(pc, sp, bp, addr);
}
void SetAlternateSignalStack() {
stack_t altstack, oldstack;
CHECK_EQ(0, sigaltstack(0, &oldstack));
// If the alternate stack is already in place, do nothing.
if ((oldstack.ss_flags & SS_DISABLE) == 0) return;
// TODO(glider): the mapped stack should have the MAP_STACK flag in the
// future. It is not required by man 2 sigaltstack now (they're using
// malloc()).
void* base = MmapOrDie(kAltStackSize, __FUNCTION__);
altstack.ss_sp = base;
altstack.ss_flags = 0;
altstack.ss_size = kAltStackSize;
CHECK_EQ(0, sigaltstack(&altstack, 0));
if (common_flags()->verbosity > 0) {
Report("Alternative stack for T%d set: [%p,%p)\n",
GetCurrentTidOrInvalid(),
altstack.ss_sp, (char*)altstack.ss_sp + altstack.ss_size);
}
}
void UnsetAlternateSignalStack() {
stack_t altstack, oldstack;
altstack.ss_sp = 0;
altstack.ss_flags = SS_DISABLE;
altstack.ss_size = 0;
CHECK_EQ(0, sigaltstack(&altstack, &oldstack));
UnmapOrDie(oldstack.ss_sp, oldstack.ss_size);
}
void InstallSignalHandlers() {
// Set the alternate signal stack for the main thread.
// This will cause SetAlternateSignalStack to be called twice, but the stack
// will be actually set only once.
if (flags()->use_sigaltstack) SetAlternateSignalStack();
MaybeInstallSigaction(SIGSEGV, ASAN_OnSIGSEGV);
MaybeInstallSigaction(SIGBUS, ASAN_OnSIGSEGV);
// Access at a reasonable offset above SP, or slightly below it (to account
// for x86_64 redzone, ARM push of multiple registers, etc) is probably a
// stack overflow.
// We also check si_code to filter out SEGV caused by something else other
// then hitting the guard page or unmapped memory, like, for example,
// unaligned memory access.
if (addr + 128 > sp && addr < sp + 0xFFFF &&
(code == si_SEGV_MAPERR || code == si_SEGV_ACCERR))
ReportStackOverflow(pc, sp, bp, context, addr);
else
ReportSIGSEGV(pc, sp, bp, context, addr);
}
// ---------------------- TSD ---------------- {{{1
......@@ -125,4 +82,4 @@ void PlatformTSDDtor(void *tsd) {
}
} // namespace __asan
#endif // SANITIZER_LINUX || SANITIZER_MAC
#endif // SANITIZER_POSIX
......@@ -30,7 +30,10 @@ void DescribeAddress(uptr addr, uptr access_size);
void DescribeThread(AsanThreadContext *context);
// Different kinds of error reports.
void NORETURN ReportSIGSEGV(uptr pc, uptr sp, uptr bp, uptr addr);
void NORETURN
ReportStackOverflow(uptr pc, uptr sp, uptr bp, void *context, uptr addr);
void NORETURN
ReportSIGSEGV(uptr pc, uptr sp, uptr bp, void *context, uptr addr);
void NORETURN ReportDoubleFree(uptr addr, StackTrace *free_stack);
void NORETURN ReportFreeNotMalloced(uptr addr, StackTrace *free_stack);
void NORETURN ReportAllocTypeMismatch(uptr addr, StackTrace *free_stack,
......@@ -43,6 +46,14 @@ void NORETURN ReportAsanGetAllocatedSizeNotOwned(uptr addr,
void NORETURN ReportStringFunctionMemoryRangesOverlap(
const char *function, const char *offset1, uptr length1,
const char *offset2, uptr length2, StackTrace *stack);
void NORETURN
ReportStringFunctionSizeOverflow(uptr offset, uptr size, StackTrace *stack);
void NORETURN
ReportBadParamsToAnnotateContiguousContainer(uptr beg, uptr end, uptr old_mid,
uptr new_mid, StackTrace *stack);
void NORETURN
ReportODRViolation(const __asan_global *g1, const __asan_global *g2);
// Mac-specific errors and warnings.
void WarnMacFreeUnallocated(
......
......@@ -10,40 +10,10 @@
// Code for ASan stack trace.
//===----------------------------------------------------------------------===//
#include "asan_internal.h"
#include "asan_flags.h"
#include "asan_stack.h"
#include "sanitizer_common/sanitizer_flags.h"
namespace __asan {
static bool MaybeCallAsanSymbolize(const void *pc, char *out_buffer,
int out_size) {
return (&__asan_symbolize) ? __asan_symbolize(pc, out_buffer, out_size)
: false;
}
void PrintStack(const uptr *trace, uptr size) {
StackTrace::PrintStack(trace, size, MaybeCallAsanSymbolize);
}
void PrintStack(StackTrace *stack) {
PrintStack(stack->trace, stack->size);
}
} // namespace __asan
// ------------------ Interface -------------- {{{1
// Provide default implementation of __asan_symbolize that does nothing
// and may be overriden by user if he wants to use his own symbolization.
// ASan on Windows has its own implementation of this.
#if !SANITIZER_WINDOWS && !SANITIZER_SUPPORTS_WEAK_HOOKS
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE NOINLINE
bool __asan_symbolize(const void *pc, char *out_buffer, int out_size) {
return false;
}
#endif
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_print_stack_trace() {
......
......@@ -19,49 +19,62 @@
namespace __asan {
void PrintStack(StackTrace *stack);
void PrintStack(const uptr *trace, uptr size);
} // namespace __asan
// Get the stack trace with the given pc and bp.
// The pc will be in the position 0 of the resulting stack trace.
// The bp may refer to the current frame or to the caller's frame.
ALWAYS_INLINE
void GetStackTraceWithPcBpAndContext(StackTrace *stack, uptr max_depth, uptr pc,
uptr bp, void *context, bool fast) {
#if SANITIZER_WINDOWS
#define GET_STACK_TRACE_WITH_PC_AND_BP(max_s, pc, bp, fast) \
StackTrace stack; \
stack.Unwind(max_s, pc, bp, 0, 0, fast)
stack->Unwind(max_depth, pc, bp, context, 0, 0, fast);
#else
#define GET_STACK_TRACE_WITH_PC_AND_BP(max_s, pc, bp, fast) \
StackTrace stack; \
{ \
AsanThread *t; \
stack.size = 0; \
if (asan_inited) { \
if ((t = GetCurrentThread()) && !t->isUnwinding()) { \
uptr stack_top = t->stack_top(); \
uptr stack_bottom = t->stack_bottom(); \
ScopedUnwinding unwind_scope(t); \
stack.Unwind(max_s, pc, bp, stack_top, stack_bottom, fast); \
} else if (t == 0 && !fast) { \
/* If GetCurrentThread() has failed, try to do slow unwind anyways. */ \
stack.Unwind(max_s, pc, bp, 0, 0, false); \
} \
} \
AsanThread *t;
stack->size = 0;
if (LIKELY(asan_inited)) {
if ((t = GetCurrentThread()) && !t->isUnwinding()) {
uptr stack_top = t->stack_top();
uptr stack_bottom = t->stack_bottom();
ScopedUnwinding unwind_scope(t);
stack->Unwind(max_depth, pc, bp, context, stack_top, stack_bottom, fast);
} else if (t == 0 && !fast) {
/* If GetCurrentThread() has failed, try to do slow unwind anyways. */
stack->Unwind(max_depth, pc, bp, context, 0, 0, false);
}
}
#endif // SANITIZER_WINDOWS
}
} // namespace __asan
// NOTE: A Rule of thumb is to retrieve stack trace in the interceptors
// as early as possible (in functions exposed to the user), as we generally
// don't want stack trace to contain functions from ASan internals.
#define GET_STACK_TRACE(max_size, fast) \
GET_STACK_TRACE_WITH_PC_AND_BP(max_size, \
StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), fast)
#define GET_STACK_TRACE(max_size, fast) \
StackTrace stack; \
if (max_size <= 2) { \
stack.size = max_size; \
if (max_size > 0) { \
stack.top_frame_bp = GET_CURRENT_FRAME(); \
stack.trace[0] = StackTrace::GetCurrentPc(); \
if (max_size > 1) \
stack.trace[1] = GET_CALLER_PC(); \
} \
} else { \
GetStackTraceWithPcBpAndContext(&stack, max_size, \
StackTrace::GetCurrentPc(), \
GET_CURRENT_FRAME(), 0, fast); \
}
#define GET_STACK_TRACE_FATAL(pc, bp) \
GET_STACK_TRACE_WITH_PC_AND_BP(kStackTraceMax, pc, bp, \
common_flags()->fast_unwind_on_fatal)
#define GET_STACK_TRACE_FATAL(pc, bp) \
StackTrace stack; \
GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, pc, bp, 0, \
common_flags()->fast_unwind_on_fatal)
#define GET_STACK_TRACE_SIGNAL(pc, bp, context) \
StackTrace stack; \
GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, pc, bp, context, \
common_flags()->fast_unwind_on_fatal)
#define GET_STACK_TRACE_FATAL_HERE \
GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_fatal)
......@@ -78,7 +91,7 @@ void PrintStack(const uptr *trace, uptr size);
#define PRINT_CURRENT_STACK() \
{ \
GET_STACK_TRACE_FATAL_HERE; \
PrintStack(&stack); \
stack.Print(); \
}
#endif // ASAN_STACK_H
......@@ -18,6 +18,7 @@
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_tls_get_addr.h"
#include "lsan/lsan_common.h"
namespace __asan {
......@@ -76,7 +77,7 @@ AsanThread *AsanThread::Create(thread_callback_t start_routine,
void *arg) {
uptr PageSize = GetPageSizeCached();
uptr size = RoundUpTo(sizeof(AsanThread), PageSize);
AsanThread *thread = (AsanThread*)MmapOrDie(size, __FUNCTION__);
AsanThread *thread = (AsanThread*)MmapOrDie(size, __func__);
thread->start_routine_ = start_routine;
thread->arg_ = arg;
......@@ -85,28 +86,27 @@ AsanThread *AsanThread::Create(thread_callback_t start_routine,
void AsanThread::TSDDtor(void *tsd) {
AsanThreadContext *context = (AsanThreadContext*)tsd;
if (common_flags()->verbosity >= 1)
Report("T%d TSDDtor\n", context->tid);
VReport(1, "T%d TSDDtor\n", context->tid);
if (context->thread)
context->thread->Destroy();
}
void AsanThread::Destroy() {
if (common_flags()->verbosity >= 1) {
Report("T%d exited\n", tid());
}
int tid = this->tid();
VReport(1, "T%d exited\n", tid);
malloc_storage().CommitBack();
if (flags()->use_sigaltstack) UnsetAlternateSignalStack();
asanThreadRegistry().FinishThread(tid());
if (common_flags()->use_sigaltstack) UnsetAlternateSignalStack();
asanThreadRegistry().FinishThread(tid);
FlushToDeadThreadStats(&stats_);
// We also clear the shadow on thread destruction because
// some code may still be executing in later TSD destructors
// and we don't want it to have any poisoned stack.
ClearShadowForThreadStackAndTLS();
DeleteFakeStack();
DeleteFakeStack(tid);
uptr size = RoundUpTo(sizeof(AsanThread), GetPageSizeCached());
UnmapOrDie(this, size);
DTLS_Destroy();
}
// We want to create the FakeStack lazyly on the first use, but not eralier
......@@ -121,13 +121,16 @@ FakeStack *AsanThread::AsyncSignalSafeLazyInitFakeStack() {
// 1 -- being initialized
// ptr -- initialized
// This CAS checks if the state was 0 and if so changes it to state 1,
// if that was successfull, it initilizes the pointer.
// if that was successful, it initializes the pointer.
if (atomic_compare_exchange_strong(
reinterpret_cast<atomic_uintptr_t *>(&fake_stack_), &old_val, 1UL,
memory_order_relaxed)) {
uptr stack_size_log = Log2(RoundUpToPowerOfTwo(stack_size));
if (flags()->uar_stack_size_log)
stack_size_log = static_cast<uptr>(flags()->uar_stack_size_log);
CHECK_LE(flags()->min_uar_stack_size_log, flags()->max_uar_stack_size_log);
stack_size_log =
Min(stack_size_log, static_cast<uptr>(flags()->max_uar_stack_size_log));
stack_size_log =
Max(stack_size_log, static_cast<uptr>(flags()->min_uar_stack_size_log));
fake_stack_ = FakeStack::Create(stack_size_log);
SetTLSFakeStack(fake_stack_);
return fake_stack_;
......@@ -140,12 +143,10 @@ void AsanThread::Init() {
CHECK(AddrIsInMem(stack_bottom_));
CHECK(AddrIsInMem(stack_top_ - 1));
ClearShadowForThreadStackAndTLS();
if (common_flags()->verbosity >= 1) {
int local = 0;
Report("T%d: stack [%p,%p) size 0x%zx; local=%p\n",
tid(), (void*)stack_bottom_, (void*)stack_top_,
stack_top_ - stack_bottom_, &local);
}
int local = 0;
VReport(1, "T%d: stack [%p,%p) size 0x%zx; local=%p\n", tid(),
(void *)stack_bottom_, (void *)stack_top_, stack_top_ - stack_bottom_,
&local);
fake_stack_ = 0; // Will be initialized lazily if needed.
AsanPlatformThreadInit();
}
......@@ -153,7 +154,7 @@ void AsanThread::Init() {
thread_return_t AsanThread::ThreadStart(uptr os_id) {
Init();
asanThreadRegistry().StartThread(tid(), os_id, 0);
if (flags()->use_sigaltstack) SetAlternateSignalStack();
if (common_flags()->use_sigaltstack) SetAlternateSignalStack();
if (!start_routine_) {
// start_routine_ == 0 if we're on the main thread or on one of the
......@@ -265,10 +266,8 @@ AsanThread *GetCurrentThread() {
void SetCurrentThread(AsanThread *t) {
CHECK(t->context());
if (common_flags()->verbosity >= 2) {
Report("SetCurrentThread: %p for thread %p\n",
t->context(), (void*)GetThreadSelf());
}
VReport(2, "SetCurrentThread: %p for thread %p\n", t->context(),
(void *)GetThreadSelf());
// Make sure we do not reset the current AsanThread.
CHECK_EQ(0, AsanTSDGet());
AsanTSDSet(t->context());
......
......@@ -15,7 +15,6 @@
#include "asan_allocator.h"
#include "asan_internal.h"
#include "asan_fake_stack.h"
#include "asan_stack.h"
#include "asan_stats.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_libc.h"
......@@ -76,12 +75,12 @@ class AsanThread {
return addr >= stack_bottom_ && addr < stack_top_;
}
void DeleteFakeStack() {
void DeleteFakeStack(int tid) {
if (!fake_stack_) return;
FakeStack *t = fake_stack_;
fake_stack_ = 0;
SetTLSFakeStack(0);
t->Destroy();
t->Destroy(tid);
}
bool has_fake_stack() {
......
......@@ -33,11 +33,6 @@ extern "C" {
namespace __asan {
// ---------------------- Stacktraces, symbols, etc. ---------------- {{{1
static BlockingMutex dbghelp_lock(LINKER_INITIALIZED);
static bool dbghelp_initialized = false;
#pragma comment(lib, "dbghelp.lib")
// ---------------------- TSD ---------------- {{{1
static bool tsd_key_inited = false;
......@@ -73,17 +68,9 @@ void *AsanDoesNotSupportStaticLinkage() {
return 0;
}
void SetAlternateSignalStack() {
// FIXME: Decide what to do on Windows.
}
void AsanCheckDynamicRTPrereqs() { UNIMPLEMENTED(); }
void UnsetAlternateSignalStack() {
// FIXME: Decide what to do on Windows.
}
void InstallSignalHandlers() {
// FIXME: Decide what to do on Windows.
}
void AsanCheckIncompatibleRT() {}
void AsanPlatformThreadInit() {
// Nothing here for now.
......@@ -93,54 +80,10 @@ void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
UNIMPLEMENTED();
}
} // namespace __asan
// ---------------------- Interface ---------------- {{{1
using namespace __asan; // NOLINT
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE NOINLINE
bool __asan_symbolize(const void *addr, char *out_buffer, int buffer_size) {
BlockingMutexLock lock(&dbghelp_lock);
if (!dbghelp_initialized) {
SymSetOptions(SYMOPT_DEFERRED_LOADS |
SYMOPT_UNDNAME |
SYMOPT_LOAD_LINES);
CHECK(SymInitialize(GetCurrentProcess(), 0, TRUE));
// FIXME: We don't call SymCleanup() on exit yet - should we?
dbghelp_initialized = true;
}
// See http://msdn.microsoft.com/en-us/library/ms680578(VS.85).aspx
char buffer[sizeof(SYMBOL_INFO) + MAX_SYM_NAME * sizeof(CHAR)];
PSYMBOL_INFO symbol = (PSYMBOL_INFO)buffer;
symbol->SizeOfStruct = sizeof(SYMBOL_INFO);
symbol->MaxNameLen = MAX_SYM_NAME;
DWORD64 offset = 0;
BOOL got_objname = SymFromAddr(GetCurrentProcess(),
(DWORD64)addr, &offset, symbol);
if (!got_objname)
return false;
DWORD unused;
IMAGEHLP_LINE64 info;
info.SizeOfStruct = sizeof(IMAGEHLP_LINE64);
BOOL got_fileline = SymGetLineFromAddr64(GetCurrentProcess(),
(DWORD64)addr, &unused, &info);
int written = 0;
out_buffer[0] = '\0';
// FIXME: it might be useful to print out 'obj' or 'obj+offset' info too.
if (got_fileline) {
written += internal_snprintf(out_buffer + written, buffer_size - written,
" %s %s:%d", symbol->Name,
info.FileName, info.LineNumber);
} else {
written += internal_snprintf(out_buffer + written, buffer_size - written,
" %s+0x%p", symbol->Name, offset);
}
return true;
void AsanOnSIGSEGV(int, void *siginfo, void *context) {
UNIMPLEMENTED();
}
} // extern "C"
} // namespace __asan
#endif // _WIN32
......@@ -48,9 +48,10 @@ extern "C" {
((void)(addr), (void)(size))
#endif
// Returns true iff addr is poisoned (i.e. 1-byte read/write access to this
// Returns 1 if addr is poisoned (i.e. 1-byte read/write access to this
// address will result in error report from AddressSanitizer).
bool __asan_address_is_poisoned(void const volatile *addr);
// Otherwise returns 0.
int __asan_address_is_poisoned(void const volatile *addr);
// If at least on byte in [beg, beg+size) is poisoned, return the address
// of the first such byte. Otherwise return 0.
......@@ -63,7 +64,7 @@ extern "C" {
// However it is still a part of the interface because users may want to
// set a breakpoint on this function in a debugger.
void __asan_report_error(void *pc, void *bp, void *sp,
void *addr, bool is_write, size_t access_size);
void *addr, int is_write, size_t access_size);
// Sets the exit code to use when reporting an error.
// Returns the old value.
......@@ -80,22 +81,14 @@ extern "C" {
// the program crashes before ASan report is printed.
void __asan_on_error();
// User may provide its own implementation for symbolization function.
// It should print the description of instruction at address "pc" to
// "out_buffer". Description should be at most "out_size" bytes long.
// User-specified function should return true if symbolization was
// successful.
bool __asan_symbolize(const void *pc, char *out_buffer,
int out_size);
// Returns the estimated number of bytes that will be reserved by allocator
// for request of "size" bytes. If ASan allocator can't allocate that much
// memory, returns the maximal possible allocation size, otherwise returns
// "size".
size_t __asan_get_estimated_allocated_size(size_t size);
// Returns true if p was returned by the ASan allocator and
// is not yet freed.
bool __asan_get_ownership(const void *p);
// Returns 1 if p was returned by the ASan allocator and is not yet freed.
// Otherwise returns 0.
int __asan_get_ownership(const void *p);
// Returns the number of bytes reserved for the pointer p.
// Requires (get_ownership(p) == true) or (p == 0).
size_t __asan_get_allocated_size(const void *p);
......@@ -128,6 +121,24 @@ extern "C" {
// deallocation of "ptr".
void __asan_malloc_hook(void *ptr, size_t size);
void __asan_free_hook(void *ptr);
// The following 2 functions facilitate garbage collection in presence of
// asan's fake stack.
// Returns an opaque handler to be used later in __asan_addr_is_in_fake_stack.
// Returns NULL if the current thread does not have a fake stack.
void *__asan_get_current_fake_stack();
// If fake_stack is non-NULL and addr belongs to a fake frame in
// fake_stack, returns the address on real stack that corresponds to
// the fake frame and sets beg/end to the boundaries of this fake frame.
// Otherwise returns NULL and does not touch beg/end.
// If beg/end are NULL, they are not touched.
// This function may be called from a thread other than the owner of
// fake_stack, but the owner thread need to be alive.
void *__asan_addr_is_in_fake_stack(void *fake_stack, void *addr, void **beg,
void **end);
#ifdef __cplusplus
} // extern "C"
#endif
......
......@@ -22,13 +22,28 @@
#ifdef __cplusplus
extern "C" {
#endif
// Arguments for __sanitizer_sandbox_on_notify() below.
typedef struct {
// Enable sandbox support in sanitizer coverage.
int coverage_sandboxed;
// File descriptor to write coverage data to. If -1 is passed, a file will
// be pre-opened by __sanitizer_sandobx_on_notify(). This field has no
// effect if coverage_sandboxed == 0.
intptr_t coverage_fd;
// If non-zero, split the coverage data into well-formed blocks. This is
// useful when coverage_fd is a socket descriptor. Each block will contain
// a header, allowing data from multiple processes to be sent over the same
// socket.
unsigned int coverage_max_block_size;
} __sanitizer_sandbox_arguments;
// Tell the tools to write their reports to "path.<pid>" instead of stderr.
void __sanitizer_set_report_path(const char *path);
// Notify the tools that the sandbox is going to be turned on. The reserved
// parameter will be used in the future to hold a structure with functions
// that the tools may call to bypass the sandbox.
void __sanitizer_sandbox_on_notify(void *reserved);
void __sanitizer_sandbox_on_notify(__sanitizer_sandbox_arguments *args);
// This function is called by the tool when it has just finished reporting
// an error. 'error_summary' is a one-line string that summarizes
......@@ -45,6 +60,8 @@ extern "C" {
void __sanitizer_unaligned_store32(void *p, uint32_t x);
void __sanitizer_unaligned_store64(void *p, uint64_t x);
// Initialize coverage.
void __sanitizer_cov_init();
// Record and dump coverage info.
void __sanitizer_cov_dump();
......@@ -54,7 +71,7 @@ extern "C" {
// in a contiguous region of memory. The container owns the region of memory
// [beg, end); the memory [beg, mid) is used to store the current elements
// and the memory [mid, end) is reserved for future elements;
// end <= mid <= end. For example, in "std::vector<> v"
// beg <= mid <= end. For example, in "std::vector<> v"
// beg = &v[0];
// end = beg + v.capacity() * sizeof(v[0]);
// mid = beg + v.size() * sizeof(v[0]);
......@@ -82,6 +99,14 @@ extern "C" {
const void *end,
const void *old_mid,
const void *new_mid);
// Returns true if the contiguous container [beg, end) ir properly poisoned
// (e.g. with __sanitizer_annotate_contiguous_container), i.e. if
// - [beg, mid) is addressable,
// - [mid, end) is unaddressable.
// Full verification requires O(end-beg) time; this function tries to avoid
// such complexity by touching only parts of the container around beg/mid/end.
int __sanitizer_verify_contiguous_container(const void *beg, const void *mid,
const void *end);
// Print the stack trace leading to this call. Useful for debugging user code.
void __sanitizer_print_stack_trace();
......
......@@ -37,6 +37,9 @@ struct dfsan_label_info {
void *userdata;
};
/// Signature of the callback argument to dfsan_set_write_callback().
typedef void (*dfsan_write_callback_t)(int fd, const void *buf, size_t count);
/// Computes the union of \c l1 and \c l2, possibly creating a union label in
/// the process.
dfsan_label dfsan_union(dfsan_label l1, dfsan_label l2);
......@@ -72,6 +75,14 @@ int dfsan_has_label(dfsan_label label, dfsan_label elem);
/// that label, else returns 0.
dfsan_label dfsan_has_label_with_desc(dfsan_label label, const char *desc);
/// Returns the number of labels allocated.
size_t dfsan_get_label_count(void);
/// Sets a callback to be invoked on calls to write(). The callback is invoked
/// before the write is done. The write is not guaranteed to succeed when the
/// callback executes. Pass in NULL to remove any callback.
void dfsan_set_write_callback(dfsan_write_callback_t labeled_write_callback);
#ifdef __cplusplus
} // extern "C"
......
......@@ -21,13 +21,24 @@ extern "C" {
// be treated as non-leaks. Disable/enable pairs may be nested.
void __lsan_disable();
void __lsan_enable();
// The heap object into which p points will be treated as a non-leak.
void __lsan_ignore_object(const void *p);
// The user may optionally provide this function to disallow leak checking
// for the program it is linked into (if the return value is non-zero). This
// function must be defined as returning a constant value; any behavior beyond
// that is unsupported.
int __lsan_is_turned_off();
// Memory regions registered through this interface will be treated as sources
// of live pointers during leak checking. Useful if you store pointers in
// mapped memory.
// Points of note:
// - __lsan_unregister_root_region() must be called with the same pointer and
// size that have earlier been passed to __lsan_register_root_region()
// - LSan will skip any inaccessible memory when scanning a root region. E.g.,
// if you map memory within a larger region that you have mprotect'ed, you can
// register the entire large region.
// - the implementation is not optimized for performance. This interface is
// intended to be used for a small number of relatively static regions.
void __lsan_register_root_region(const void *p, size_t size);
void __lsan_unregister_root_region(const void *p, size_t size);
// Calling this function makes LSan enter the leak checking phase immediately.
// Use this if normal end-of-process leak checking happens too late (e.g. if
// you have intentional memory leaks in your shutdown code). Calling this
......@@ -35,6 +46,16 @@ extern "C" {
// most once per process. This function will terminate the process if there
// are memory leaks and the exit_code flag is non-zero.
void __lsan_do_leak_check();
// The user may optionally provide this function to disallow leak checking
// for the program it is linked into (if the return value is non-zero). This
// function must be defined as returning a constant value; any behavior beyond
// that is unsupported.
int __lsan_is_turned_off();
// This function may be optionally provided by the user and should return
// a string containing LSan suppressions.
const char *__lsan_default_suppressions();
#ifdef __cplusplus
} // extern "C"
......
......@@ -17,13 +17,10 @@
#ifdef __cplusplus
extern "C" {
#endif
#if __has_feature(memory_sanitizer)
/* Returns a string describing a stack origin.
Return NULL if the origin is invalid, or is not a stack origin. */
const char *__msan_get_origin_descr_if_stack(uint32_t id);
/* Set raw origin for the memory range. */
void __msan_set_origin(const volatile void *a, size_t size, uint32_t origin);
......@@ -39,6 +36,10 @@ extern "C" {
/* Make memory region fully initialized (without changing its contents). */
void __msan_unpoison(const volatile void *a, size_t size);
/* Make a null-terminated string fully initialized (without changing its
contents). */
void __msan_unpoison_string(const volatile char *a);
/* Make memory region fully uninitialized (without changing its contents). */
void __msan_poison(const volatile void *a, size_t size);
......@@ -51,6 +52,10 @@ extern "C" {
memory range, or -1 if the whole range is good. */
intptr_t __msan_test_shadow(const volatile void *x, size_t size);
/* Checks that memory range is fully initialized, and reports an error if it
* is not. */
void __msan_check_mem_is_initialized(const volatile void *x, size_t size);
/* Set exit code when error(s) were detected.
Value of 0 means don't change the program exit code. */
void __msan_set_exit_code(int exit_code);
......@@ -67,13 +72,13 @@ extern "C" {
modules that were compiled without the corresponding compiler flag. */
void __msan_set_keep_going(int keep_going);
/* Print shadow and origin for the memory range to stdout in a human-readable
/* Print shadow and origin for the memory range to stderr in a human-readable
format. */
void __msan_print_shadow(const volatile void *x, size_t size);
/* Print current function arguments shadow and origin to stdout in a
/* Print shadow for the memory range to stderr in a minimalistic
human-readable format. */
void __msan_print_param_shadow();
void __msan_dump_shadow(const volatile void *x, size_t size);
/* Returns true if running under a dynamic tool (DynamoRio-based). */
int __msan_has_dynamic_component();
......@@ -86,6 +91,9 @@ extern "C" {
a string containing Msan runtime options. See msan_flags.h for details. */
const char* __msan_default_options();
// Sets the callback to be called right before death on error.
// Passing 0 will unset the callback.
void __msan_set_death_callback(void (*callback)(void));
/***********************************/
/* Allocator statistics interface. */
......@@ -132,27 +140,6 @@ extern "C" {
deallocation of "ptr". */
void __msan_malloc_hook(const volatile void *ptr, size_t size);
void __msan_free_hook(const volatile void *ptr);
#else // __has_feature(memory_sanitizer)
#define __msan_get_origin_descr_if_stack(id) ((const char*)0)
#define __msan_set_origin(a, size, origin)
#define __msan_get_origin(a) ((uint32_t)-1)
#define __msan_get_track_origins() (0)
#define __msan_get_umr_origin() ((uint32_t)-1)
#define __msan_unpoison(a, size)
#define __msan_poison(a, size)
#define __msan_partial_poison(data, shadow, size)
#define __msan_test_shadow(x, size) ((intptr_t)-1)
#define __msan_set_exit_code(exit_code)
#define __msan_set_expect_umr(expect_umr)
#define __msan_print_shadow(x, size)
#define __msan_print_param_shadow()
#define __msan_has_dynamic_component() (0)
#define __msan_allocated_memory(data, size)
#endif // __has_feature(memory_sanitizer)
#ifdef __cplusplus
} // extern "C"
#endif
......
......@@ -13,7 +13,8 @@
#ifndef INTERCEPTION_H
#define INTERCEPTION_H
#if !defined(__linux__) && !defined(__APPLE__) && !defined(_WIN32)
#if !defined(__linux__) && !defined(__FreeBSD__) && \
!defined(__APPLE__) && !defined(_WIN32)
# error "Interception doesn't work on this operating system."
#endif
......@@ -233,11 +234,11 @@ typedef unsigned long uptr; // NOLINT
#define INCLUDED_FROM_INTERCEPTION_LIB
#if defined(__linux__)
#if defined(__linux__) || defined(__FreeBSD__)
# include "interception_linux.h"
# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_LINUX(func)
# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func)
# define INTERCEPT_FUNCTION_VER(func, symver) \
INTERCEPT_FUNCTION_VER_LINUX(func, symver)
INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver)
#elif defined(__APPLE__)
# include "interception_mac.h"
# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_MAC(func)
......
......@@ -10,10 +10,10 @@
// Linux-specific interception methods.
//===----------------------------------------------------------------------===//
#ifdef __linux__
#if defined(__linux__) || defined(__FreeBSD__)
#include "interception.h"
#include <dlfcn.h> // for dlsym
#include <dlfcn.h> // for dlsym() and dlvsym()
namespace __interception {
bool GetRealFunctionAddress(const char *func_name, uptr *func_addr,
......@@ -31,4 +31,4 @@ void *GetFuncAddrVer(const char *func_name, const char *ver) {
} // namespace __interception
#endif // __linux__
#endif // __linux__ || __FreeBSD__
......@@ -10,7 +10,7 @@
// Linux-specific interception methods.
//===----------------------------------------------------------------------===//
#ifdef __linux__
#if defined(__linux__) || defined(__FreeBSD__)
#if !defined(INCLUDED_FROM_INTERCEPTION_LIB)
# error "interception_linux.h should be included from interception library only"
......@@ -26,20 +26,20 @@ bool GetRealFunctionAddress(const char *func_name, uptr *func_addr,
void *GetFuncAddrVer(const char *func_name, const char *ver);
} // namespace __interception
#define INTERCEPT_FUNCTION_LINUX(func) \
::__interception::GetRealFunctionAddress( \
#func, (::__interception::uptr*)&REAL(func), \
(::__interception::uptr)&(func), \
(::__interception::uptr)&WRAP(func))
#define INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func) \
::__interception::GetRealFunctionAddress( \
#func, (::__interception::uptr *)&__interception::PTR_TO_REAL(func), \
(::__interception::uptr) & (func), \
(::__interception::uptr) & WRAP(func))
#if !defined(__ANDROID__) // android does not have dlvsym
# define INTERCEPT_FUNCTION_VER_LINUX(func, symver) \
# define INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver) \
::__interception::real_##func = (func##_f)(unsigned long) \
::__interception::GetFuncAddrVer(#func, symver)
#else
# define INTERCEPT_FUNCTION_VER_LINUX(func, symver) \
INTERCEPT_FUNCTION_LINUX(func)
# define INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver) \
INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func)
#endif // !defined(__ANDROID__)
#endif // INTERCEPTION_LINUX_H
#endif // __linux__
#endif // __linux__ || __FreeBSD__
......@@ -17,13 +17,13 @@
#include <stddef.h>
#include <stdint.h>
COMPILER_CHECK(sizeof(SIZE_T) == sizeof(size_t));
COMPILER_CHECK(sizeof(SSIZE_T) == sizeof(ssize_t));
COMPILER_CHECK(sizeof(PTRDIFF_T) == sizeof(ptrdiff_t));
COMPILER_CHECK(sizeof(INTMAX_T) == sizeof(intmax_t));
COMPILER_CHECK(sizeof(::SIZE_T) == sizeof(size_t));
COMPILER_CHECK(sizeof(::SSIZE_T) == sizeof(ssize_t));
COMPILER_CHECK(sizeof(::PTRDIFF_T) == sizeof(ptrdiff_t));
COMPILER_CHECK(sizeof(::INTMAX_T) == sizeof(intmax_t));
#ifndef __APPLE__
COMPILER_CHECK(sizeof(OFF64_T) == sizeof(off64_t));
COMPILER_CHECK(sizeof(::OFF64_T) == sizeof(off64_t));
#endif
// The following are the cases when pread (and friends) is used instead of
......@@ -31,7 +31,7 @@ COMPILER_CHECK(sizeof(OFF64_T) == sizeof(off64_t));
// rest (they depend on _FILE_OFFSET_BITS setting when building an application).
# if defined(__ANDROID__) || !defined _FILE_OFFSET_BITS || \
_FILE_OFFSET_BITS != 64
COMPILER_CHECK(sizeof(OFF_T) == sizeof(off_t));
COMPILER_CHECK(sizeof(::OFF_T) == sizeof(off_t));
# endif
#endif
......@@ -54,91 +54,136 @@ static void WriteJumpInstruction(char *jmp_from, char *to) {
*(ptrdiff_t*)(jmp_from + 1) = offset;
}
bool OverrideFunction(uptr old_func, uptr new_func, uptr *orig_old_func) {
#ifdef _WIN64
# error OverrideFunction was not tested on x64
#endif
// Basic idea:
// We write 5 bytes (jmp-to-new_func) at the beginning of the 'old_func'
// to override it. We want to be able to execute the original 'old_func' from
// the wrapper, so we need to keep the leading 5+ bytes ('head') of the
// original instructions somewhere with a "jmp old_func+head".
// We call these 'head'+5 bytes of instructions a "trampoline".
static char *GetMemoryForTrampoline(size_t size) {
// Trampolines are allocated from a common pool.
const int POOL_SIZE = 1024;
static char *pool = NULL;
static size_t pool_used = 0;
if (pool == NULL) {
pool = (char*)VirtualAlloc(NULL, POOL_SIZE,
MEM_RESERVE | MEM_COMMIT,
PAGE_EXECUTE_READWRITE);
// FIXME: set PAGE_EXECUTE_READ access after setting all interceptors?
if (pool == NULL)
return false;
if (!pool) {
pool = (char *)VirtualAlloc(NULL, POOL_SIZE, MEM_RESERVE | MEM_COMMIT,
PAGE_EXECUTE_READWRITE);
// FIXME: Might want to apply PAGE_EXECUTE_READ access after all the
// interceptors are in place.
if (!pool)
return NULL;
_memset(pool, 0xCC /* int 3 */, POOL_SIZE);
}
char* old_bytes = (char*)old_func;
char* trampoline = pool + pool_used;
if (pool_used + size > POOL_SIZE)
return NULL;
// Find out the number of bytes of the instructions we need to copy to the
// island and store it in 'head'.
size_t head = 0;
while (head < 5) {
switch (old_bytes[head]) {
char *ret = pool + pool_used;
pool_used += size;
return ret;
}
// Returns 0 on error.
static size_t RoundUpToInstrBoundary(size_t size, char *code) {
size_t cursor = 0;
while (cursor < size) {
switch (code[cursor]) {
case '\x51': // push ecx
case '\x52': // push edx
case '\x53': // push ebx
case '\x54': // push esp
case '\x55': // push ebp
case '\x56': // push esi
case '\x57': // push edi
head++;
case '\x5D': // pop ebp
cursor++;
continue;
case '\x6A': // 6A XX = push XX
cursor += 2;
continue;
case '\xE9': // E9 XX YY ZZ WW = jmp WWZZYYXX
cursor += 5;
continue;
}
switch (*(unsigned short*)(old_bytes + head)) { // NOLINT
switch (*(unsigned short*)(code + cursor)) { // NOLINT
case 0xFF8B: // 8B FF = mov edi, edi
case 0xEC8B: // 8B EC = mov ebp, esp
case 0xC033: // 33 C0 = xor eax, eax
head += 2;
cursor += 2;
continue;
case 0x458B: // 8B 45 XX = mov eax, dword ptr [ebp+XXh]
case 0x5D8B: // 8B 5D XX = mov ebx, dword ptr [ebp+XXh]
case 0xEC83: // 83 EC XX = sub esp, XX
head += 3;
cursor += 3;
continue;
case 0xC1F7: // F7 C1 XX YY ZZ WW = test ecx, WWZZYYXX
head += 6;
cursor += 6;
continue;
case 0x3D83: // 83 3D XX YY ZZ WW TT = cmp TT, WWZZYYXX
cursor += 7;
continue;
}
switch (0x00FFFFFF & *(unsigned int*)(old_bytes + head)) {
switch (0x00FFFFFF & *(unsigned int*)(code + cursor)) {
case 0x24448A: // 8A 44 24 XX = mov eal, dword ptr [esp+XXh]
case 0x244C8B: // 8B 4C 24 XX = mov ecx, dword ptr [esp+XXh]
case 0x24548B: // 8B 54 24 XX = mov edx, dword ptr [esp+XXh]
case 0x24748B: // 8B 74 24 XX = mov esi, dword ptr [esp+XXh]
case 0x247C8B: // 8B 7C 24 XX = mov edi, dword ptr [esp+XXh]
head += 4;
cursor += 4;
continue;
}
// Unknown instruction!
return false;
// FIXME: Unknown instruction failures might happen when we add a new
// interceptor or a new compiler version. In either case, they should result
// in visible and readable error messages. However, merely calling abort()
// or __debugbreak() leads to an infinite recursion in CheckFailed.
// Do we have a good way to abort with an error message here?
return 0;
}
if (pool_used + head + 5 > POOL_SIZE)
return false;
return cursor;
}
bool OverrideFunction(uptr old_func, uptr new_func, uptr *orig_old_func) {
#ifdef _WIN64
#error OverrideFunction is not yet supported on x64
#endif
// Function overriding works basically like this:
// We write "jmp <new_func>" (5 bytes) at the beginning of the 'old_func'
// to override it.
// We might want to be able to execute the original 'old_func' from the
// wrapper, in this case we need to keep the leading 5+ bytes ('head')
// of the original code somewhere with a "jmp <old_func+head>".
// We call these 'head'+5 bytes of instructions a "trampoline".
char *old_bytes = (char *)old_func;
// We'll need at least 5 bytes for a 'jmp'.
size_t head = 5;
if (orig_old_func) {
// Find out the number of bytes of the instructions we need to copy
// to the trampoline and store it in 'head'.
head = RoundUpToInstrBoundary(head, old_bytes);
if (!head)
return false;
// Put the needed instructions into the trampoline bytes.
char *trampoline = GetMemoryForTrampoline(head + 5);
if (!trampoline)
return false;
_memcpy(trampoline, old_bytes, head);
WriteJumpInstruction(trampoline + head, old_bytes + head);
*orig_old_func = (uptr)trampoline;
}
// Now put the "jump to trampoline" instruction into the original code.
// Now put the "jmp <new_func>" instruction at the original code location.
// We should preserve the EXECUTE flag as some of our own code might be
// located in the same page (sic!). FIXME: might consider putting the
// __interception code into a separate section or something?
DWORD old_prot, unused_prot;
if (!VirtualProtect((void*)old_func, head, PAGE_EXECUTE_READWRITE,
if (!VirtualProtect((void *)old_bytes, head, PAGE_EXECUTE_READWRITE,
&old_prot))
return false;
// Put the needed instructions into the trampoline bytes.
_memcpy(trampoline, old_bytes, head);
WriteJumpInstruction(trampoline + head, old_bytes + head);
*orig_old_func = (uptr)trampoline;
pool_used += head + 5;
// Intercept the 'old_func'.
WriteJumpInstruction(old_bytes, (char*)new_func);
WriteJumpInstruction(old_bytes, (char *)new_func);
_memset(old_bytes + 5, 0xCC /* int 3 */, head - 5);
if (!VirtualProtect((void*)old_func, head, old_prot, &unused_prot))
// Restore the original permissions.
if (!VirtualProtect((void *)old_bytes, head, old_prot, &unused_prot))
return false; // not clear if this failure bothers us.
return true;
......
......@@ -33,6 +33,11 @@ static void InitializeCommonFlags() {
ParseCommonFlagsFromString(cf, GetEnv("LSAN_OPTIONS"));
}
///// Interface to the common LSan module. /////
bool WordIsPoisoned(uptr addr) {
return false;
}
} // namespace __lsan
using namespace __lsan; // NOLINT
......@@ -53,12 +58,7 @@ extern "C" void __lsan_init() {
ThreadStart(tid, GetTid());
SetCurrentThread(tid);
// Start symbolizer process if necessary.
if (common_flags()->symbolize) {
Symbolizer::Init(common_flags()->external_symbolizer_path);
} else {
Symbolizer::Disable();
}
Symbolizer::Init(common_flags()->external_symbolizer_path);
InitCommonLsan();
if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit)
......
......@@ -141,7 +141,11 @@ uptr PointsIntoChunk(void* p) {
if (addr < chunk) return 0;
ChunkMetadata *m = Metadata(reinterpret_cast<void *>(chunk));
CHECK(m);
if (m->allocated && addr < chunk + m->requested_size)
if (!m->allocated)
return 0;
if (addr < chunk + m->requested_size)
return chunk;
if (IsSpecialCaseOfOperatorNew0(chunk, m->requested_size, addr))
return chunk;
return 0;
}
......
......@@ -19,7 +19,7 @@
#include "sanitizer_common/sanitizer_platform.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
#if SANITIZER_LINUX && defined(__x86_64__)
#if SANITIZER_LINUX && defined(__x86_64__) && (SANITIZER_WORDSIZE == 64)
#define CAN_SANITIZE_LEAKS 1
#else
#define CAN_SANITIZE_LEAKS 0
......@@ -49,6 +49,8 @@ struct Flags {
int max_leaks;
// If nonzero kill the process with this exit code upon finding leaks.
int exitcode;
// Print matched suppressions after leak checking.
bool print_suppressions;
// Suppressions file name.
const char* suppressions;
......@@ -61,12 +63,13 @@ struct Flags {
bool use_registers;
// TLS and thread-specific storage.
bool use_tls;
// Regions added via __lsan_register_root_region().
bool use_root_regions;
// Consider unaligned pointers valid.
bool use_unaligned;
// User-visible verbosity.
int verbosity;
// Consider pointers found in poisoned memory to be valid.
bool use_poisoned;
// Debug logging.
bool log_pointers;
......@@ -77,6 +80,7 @@ extern Flags lsan_flags;
inline Flags *flags() { return &lsan_flags; }
struct Leak {
u32 id;
uptr hit_count;
uptr total_size;
u32 stack_trace_id;
......@@ -84,17 +88,31 @@ struct Leak {
bool is_suppressed;
};
struct LeakedObject {
u32 leak_id;
uptr addr;
uptr size;
};
// Aggregates leaks by stack trace prefix.
class LeakReport {
public:
LeakReport() : leaks_(1) {}
void Add(u32 stack_trace_id, uptr leaked_size, ChunkTag tag);
void PrintLargest(uptr max_leaks);
LeakReport() : next_id_(0), leaks_(1), leaked_objects_(1) {}
void AddLeakedChunk(uptr chunk, u32 stack_trace_id, uptr leaked_size,
ChunkTag tag);
void ReportTopLeaks(uptr max_leaks);
void PrintSummary();
bool IsEmpty() { return leaks_.size() == 0; }
uptr ApplySuppressions();
void ApplySuppressions();
uptr UnsuppressedLeakCount();
private:
void PrintReportForLeak(uptr index);
void PrintLeakedObjectsForLeak(uptr index);
u32 next_id_;
InternalMmapVector<Leak> leaks_;
InternalMmapVector<LeakedObject> leaked_objects_;
};
typedef InternalMmapVector<uptr> Frontier;
......@@ -119,6 +137,15 @@ void InitCommonLsan();
void DoLeakCheck();
bool DisabledInThisThread();
// Special case for "new T[0]" where T is a type with DTOR.
// new T[0] will allocate one word for the array size (0) and store a pointer
// to the end of allocated chunk.
inline bool IsSpecialCaseOfOperatorNew0(uptr chunk_beg, uptr chunk_size,
uptr addr) {
return chunk_size == sizeof(uptr) && chunk_beg + chunk_size == addr &&
*reinterpret_cast<uptr *>(chunk_beg) == 0;
}
// The following must be implemented in the parent tool.
void ForEachChunk(ForEachChunkCallback callback, void *arg);
......@@ -127,6 +154,8 @@ void GetAllocatorGlobalRange(uptr *begin, uptr *end);
// Wrappers for allocator's ForceLock()/ForceUnlock().
void LockAllocator();
void UnlockAllocator();
// Returns true if [addr, addr + sizeof(void *)) is poisoned.
bool WordIsPoisoned(uptr addr);
// Wrappers for ThreadRegistry access.
void LockThreadRegistry();
void UnlockThreadRegistry();
......
......@@ -17,6 +17,7 @@
#include <link.h>
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_linux.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
......@@ -41,11 +42,11 @@ void InitializePlatformSpecificModules() {
return;
}
if (num_matches == 0)
Report("LeakSanitizer: Dynamic linker not found. "
"TLS will not be handled correctly.\n");
VReport(1, "LeakSanitizer: Dynamic linker not found. "
"TLS will not be handled correctly.\n");
else if (num_matches > 1)
Report("LeakSanitizer: Multiple modules match \"%s\". "
"TLS will not be handled correctly.\n", kLinkerName);
VReport(1, "LeakSanitizer: Multiple modules match \"%s\". "
"TLS will not be handled correctly.\n", kLinkerName);
linker = 0;
}
......@@ -81,6 +82,7 @@ static int ProcessGlobalRegionsCallback(struct dl_phdr_info *info, size_t size,
// Scans global variables for heap pointers.
void ProcessGlobalRegions(Frontier *frontier) {
if (!flags()->use_globals) return;
// FIXME: dl_iterate_phdr acquires a linker lock, so we run a risk of
// deadlocking by running this under StopTheWorld. However, the lock is
// reentrant, so we should be able to fix this by acquiring the lock before
......@@ -127,6 +129,21 @@ static void ProcessPlatformSpecificAllocationsCb(uptr chunk, void *arg) {
// Handles dynamically allocated TLS blocks by treating all chunks allocated
// from ld-linux.so as reachable.
// Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
// They are allocated with a __libc_memalign() call in allocate_and_init()
// (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
// blocks, but we can make sure they come from our own allocator by intercepting
// __libc_memalign(). On top of that, there is no easy way to reach them. Their
// addresses are stored in a dynamically allocated array (the DTV) which is
// referenced from the static TLS. Unfortunately, we can't just rely on the DTV
// being reachable from the static TLS, and the dynamic TLS being reachable from
// the DTV. This is because the initial DTV is allocated before our interception
// mechanism kicks in, and thus we don't recognize it as allocated memory. We
// can't special-case it either, since we don't know its size.
// Our solution is to include in the root set all allocations made from
// ld-linux.so (which is where allocate_and_init() is implemented). This is
// guaranteed to include all dynamic TLS blocks (and possibly other allocations
// which we don't care about).
void ProcessPlatformSpecificAllocations(Frontier *frontier) {
if (!flags()->use_tls) return;
if (!linker) return;
......
......@@ -10,11 +10,11 @@
//
//===----------------------------------------------------------------------===//
#include "interception/interception.h"
#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_interception.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_linux.h"
#include "sanitizer_common/sanitizer_platform_limits_posix.h"
......@@ -32,19 +32,19 @@ int pthread_key_create(unsigned *key, void (*destructor)(void* v));
int pthread_setspecific(unsigned key, const void *v);
}
#define GET_STACK_TRACE \
StackTrace stack; \
{ \
uptr stack_top = 0, stack_bottom = 0; \
ThreadContext *t; \
bool fast = common_flags()->fast_unwind_on_malloc; \
if (fast && (t = CurrentThreadContext())) { \
stack_top = t->stack_end(); \
stack_bottom = t->stack_begin(); \
} \
stack.Unwind(__sanitizer::common_flags()->malloc_context_size, \
StackTrace::GetCurrentPc(), \
GET_CURRENT_FRAME(), stack_top, stack_bottom, fast); \
#define GET_STACK_TRACE \
StackTrace stack; \
{ \
uptr stack_top = 0, stack_bottom = 0; \
ThreadContext *t; \
bool fast = common_flags()->fast_unwind_on_malloc; \
if (fast && (t = CurrentThreadContext())) { \
stack_top = t->stack_end(); \
stack_bottom = t->stack_begin(); \
} \
stack.Unwind(__sanitizer::common_flags()->malloc_context_size, \
StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), 0, \
stack_top, stack_bottom, fast); \
}
#define ENSURE_LSAN_INITED do { \
......@@ -150,7 +150,7 @@ INTERCEPTOR(void*, pvalloc, uptr size) {
return Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory);
}
INTERCEPTOR(void, cfree, void *p) ALIAS("free");
INTERCEPTOR(void, cfree, void *p) ALIAS(WRAPPER_NAME(free));
#define OPERATOR_NEW_BODY \
ENSURE_LSAN_INITED; \
......@@ -171,9 +171,9 @@ void *operator new[](uptr size, std::nothrow_t const&) { OPERATOR_NEW_BODY; }
Deallocate(ptr);
INTERCEPTOR_ATTRIBUTE
void operator delete(void *ptr) { OPERATOR_DELETE_BODY; }
void operator delete(void *ptr) throw() { OPERATOR_DELETE_BODY; }
INTERCEPTOR_ATTRIBUTE
void operator delete[](void *ptr) { OPERATOR_DELETE_BODY; }
void operator delete[](void *ptr) throw() { OPERATOR_DELETE_BODY; }
INTERCEPTOR_ATTRIBUTE
void operator delete(void *ptr, std::nothrow_t const&) { OPERATOR_DELETE_BODY; }
INTERCEPTOR_ATTRIBUTE
......@@ -183,7 +183,8 @@ void operator delete[](void *ptr, std::nothrow_t const &) {
// We need this to intercept the __libc_memalign calls that are used to
// allocate dynamic TLS space in ld-linux.so.
INTERCEPTOR(void *, __libc_memalign, uptr align, uptr s) ALIAS("memalign");
INTERCEPTOR(void *, __libc_memalign, uptr align, uptr s)
ALIAS(WRAPPER_NAME(memalign));
///// Thread initialization and finalization. /////
......@@ -236,7 +237,7 @@ INTERCEPTOR(int, pthread_create, void *th, void *attr,
pthread_attr_init(&myattr);
attr = &myattr;
}
AdjustStackSizeLinux(attr);
AdjustStackSize(attr);
int detached = 0;
pthread_attr_getdetachstate(attr, &detached);
ThreadParam p;
......
......@@ -22,6 +22,8 @@ sanitizer_common_files = \
sanitizer_common.cc \
sanitizer_common_libcdep.cc \
sanitizer_coverage.cc \
sanitizer_deadlock_detector1.cc \
sanitizer_deadlock_detector2.cc \
sanitizer_flags.cc \
sanitizer_libc.cc \
sanitizer_libignore.cc \
......@@ -30,20 +32,23 @@ sanitizer_common_files = \
sanitizer_mac.cc \
sanitizer_platform_limits_linux.cc \
sanitizer_platform_limits_posix.cc \
sanitizer_posix_libcdep.cc \
sanitizer_posix.cc \
sanitizer_posix_libcdep.cc \
sanitizer_printf.cc \
sanitizer_procmaps_linux.cc \
sanitizer_procmaps_mac.cc \
sanitizer_stackdepot.cc \
sanitizer_stacktrace.cc \
sanitizer_stacktrace_libcdep.cc \
sanitizer_stoptheworld_linux_libcdep.cc \
sanitizer_suppressions.cc \
sanitizer_symbolizer_posix_libcdep.cc \
sanitizer_symbolizer_win.cc \
sanitizer_symbolizer.cc \
sanitizer_symbolizer_libbacktrace.cc \
sanitizer_symbolizer_libcdep.cc \
sanitizer_symbolizer_posix_libcdep.cc \
sanitizer_symbolizer_win.cc \
sanitizer_thread_registry.cc \
sanitizer_tls_get_addr.cc \
sanitizer_win.cc
libsanitizer_common_la_SOURCES = $(sanitizer_common_files)
......
......@@ -65,19 +65,23 @@ LTLIBRARIES = $(noinst_LTLIBRARIES)
libsanitizer_common_la_LIBADD =
am__objects_1 = sanitizer_allocator.lo sanitizer_common.lo \
sanitizer_common_libcdep.lo sanitizer_coverage.lo \
sanitizer_flags.lo sanitizer_libc.lo sanitizer_libignore.lo \
sanitizer_linux.lo sanitizer_linux_libcdep.lo sanitizer_mac.lo \
sanitizer_deadlock_detector1.lo \
sanitizer_deadlock_detector2.lo sanitizer_flags.lo \
sanitizer_libc.lo sanitizer_libignore.lo sanitizer_linux.lo \
sanitizer_linux_libcdep.lo sanitizer_mac.lo \
sanitizer_platform_limits_linux.lo \
sanitizer_platform_limits_posix.lo sanitizer_posix_libcdep.lo \
sanitizer_posix.lo sanitizer_printf.lo sanitizer_stackdepot.lo \
sanitizer_stacktrace.lo sanitizer_stacktrace_libcdep.lo \
sanitizer_platform_limits_posix.lo sanitizer_posix.lo \
sanitizer_posix_libcdep.lo sanitizer_printf.lo \
sanitizer_procmaps_linux.lo sanitizer_procmaps_mac.lo \
sanitizer_stackdepot.lo sanitizer_stacktrace.lo \
sanitizer_stacktrace_libcdep.lo \
sanitizer_stoptheworld_linux_libcdep.lo \
sanitizer_suppressions.lo \
sanitizer_symbolizer_posix_libcdep.lo \
sanitizer_symbolizer_win.lo sanitizer_symbolizer.lo \
sanitizer_suppressions.lo sanitizer_symbolizer.lo \
sanitizer_symbolizer_libbacktrace.lo \
sanitizer_symbolizer_libcdep.lo sanitizer_thread_registry.lo \
sanitizer_win.lo
sanitizer_symbolizer_libcdep.lo \
sanitizer_symbolizer_posix_libcdep.lo \
sanitizer_symbolizer_win.lo sanitizer_thread_registry.lo \
sanitizer_tls_get_addr.lo sanitizer_win.lo
am_libsanitizer_common_la_OBJECTS = $(am__objects_1)
libsanitizer_common_la_OBJECTS = $(am_libsanitizer_common_la_OBJECTS)
DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir)
......@@ -253,6 +257,8 @@ sanitizer_common_files = \
sanitizer_common.cc \
sanitizer_common_libcdep.cc \
sanitizer_coverage.cc \
sanitizer_deadlock_detector1.cc \
sanitizer_deadlock_detector2.cc \
sanitizer_flags.cc \
sanitizer_libc.cc \
sanitizer_libignore.cc \
......@@ -261,20 +267,23 @@ sanitizer_common_files = \
sanitizer_mac.cc \
sanitizer_platform_limits_linux.cc \
sanitizer_platform_limits_posix.cc \
sanitizer_posix_libcdep.cc \
sanitizer_posix.cc \
sanitizer_posix_libcdep.cc \
sanitizer_printf.cc \
sanitizer_procmaps_linux.cc \
sanitizer_procmaps_mac.cc \
sanitizer_stackdepot.cc \
sanitizer_stacktrace.cc \
sanitizer_stacktrace_libcdep.cc \
sanitizer_stoptheworld_linux_libcdep.cc \
sanitizer_suppressions.cc \
sanitizer_symbolizer_posix_libcdep.cc \
sanitizer_symbolizer_win.cc \
sanitizer_symbolizer.cc \
sanitizer_symbolizer_libbacktrace.cc \
sanitizer_symbolizer_libcdep.cc \
sanitizer_symbolizer_posix_libcdep.cc \
sanitizer_symbolizer_win.cc \
sanitizer_thread_registry.cc \
sanitizer_tls_get_addr.cc \
sanitizer_win.cc
libsanitizer_common_la_SOURCES = $(sanitizer_common_files)
......@@ -374,6 +383,8 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_common.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_common_libcdep.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_coverage.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_deadlock_detector1.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_deadlock_detector2.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_flags.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_libc.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_libignore.Plo@am__quote@
......@@ -385,6 +396,8 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_posix.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_posix_libcdep.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_printf.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_procmaps_linux.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_procmaps_mac.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stackdepot.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stacktrace.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stacktrace_libcdep.Plo@am__quote@
......@@ -396,6 +409,7 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_posix_libcdep.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_win.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_thread_registry.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_tls_get_addr.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_win.Plo@am__quote@
.cc.o:
......
//===-- sanitizer_addrhashmap.h ---------------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Concurrent uptr->T hashmap.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_ADDRHASHMAP_H
#define SANITIZER_ADDRHASHMAP_H
#include "sanitizer_common.h"
#include "sanitizer_mutex.h"
#include "sanitizer_atomic.h"
#include "sanitizer_allocator_internal.h"
namespace __sanitizer {
// Concurrent uptr->T hashmap.
// T must be a POD type, kSize is preferably a prime but can be any number.
// Usage example:
//
// typedef AddrHashMap<uptr, 11> Map;
// Map m;
// {
// Map::Handle h(&m, addr);
// use h.operator->() to access the data
// if h.created() then the element was just created, and the current thread
// has exclusive access to it
// otherwise the current thread has only read access to the data
// }
// {
// Map::Handle h(&m, addr, true);
// this will remove the data from the map in Handle dtor
// the current thread has exclusive access to the data
// if !h.exists() then the element never existed
// }
template<typename T, uptr kSize>
class AddrHashMap {
private:
struct Cell {
atomic_uintptr_t addr;
T val;
};
struct AddBucket {
uptr cap;
uptr size;
Cell cells[1]; // variable len
};
static const uptr kBucketSize = 3;
struct Bucket {
RWMutex mtx;
atomic_uintptr_t add;
Cell cells[kBucketSize];
};
public:
AddrHashMap();
class Handle {
public:
Handle(AddrHashMap<T, kSize> *map, uptr addr);
Handle(AddrHashMap<T, kSize> *map, uptr addr, bool remove);
Handle(AddrHashMap<T, kSize> *map, uptr addr, bool remove, bool create);
~Handle();
T *operator->();
bool created() const;
bool exists() const;
private:
friend AddrHashMap<T, kSize>;
AddrHashMap<T, kSize> *map_;
Bucket *bucket_;
Cell *cell_;
uptr addr_;
uptr addidx_;
bool created_;
bool remove_;
bool create_;
};
private:
friend class Handle;
Bucket *table_;
void acquire(Handle *h);
void release(Handle *h);
uptr calcHash(uptr addr);
};
template<typename T, uptr kSize>
AddrHashMap<T, kSize>::Handle::Handle(AddrHashMap<T, kSize> *map, uptr addr) {
map_ = map;
addr_ = addr;
remove_ = false;
create_ = true;
map_->acquire(this);
}
template<typename T, uptr kSize>
AddrHashMap<T, kSize>::Handle::Handle(AddrHashMap<T, kSize> *map, uptr addr,
bool remove) {
map_ = map;
addr_ = addr;
remove_ = remove;
create_ = true;
map_->acquire(this);
}
template<typename T, uptr kSize>
AddrHashMap<T, kSize>::Handle::Handle(AddrHashMap<T, kSize> *map, uptr addr,
bool remove, bool create) {
map_ = map;
addr_ = addr;
remove_ = remove;
create_ = create;
map_->acquire(this);
}
template<typename T, uptr kSize>
AddrHashMap<T, kSize>::Handle::~Handle() {
map_->release(this);
}
template <typename T, uptr kSize>
T *AddrHashMap<T, kSize>::Handle::operator->() {
return &cell_->val;
}
template<typename T, uptr kSize>
bool AddrHashMap<T, kSize>::Handle::created() const {
return created_;
}
template<typename T, uptr kSize>
bool AddrHashMap<T, kSize>::Handle::exists() const {
return cell_ != 0;
}
template<typename T, uptr kSize>
AddrHashMap<T, kSize>::AddrHashMap() {
table_ = (Bucket*)MmapOrDie(kSize * sizeof(table_[0]), "AddrHashMap");
}
template<typename T, uptr kSize>
void AddrHashMap<T, kSize>::acquire(Handle *h) {
uptr addr = h->addr_;
uptr hash = calcHash(addr);
Bucket *b = &table_[hash];
h->created_ = false;
h->addidx_ = -1U;
h->bucket_ = b;
h->cell_ = 0;
// If we want to remove the element, we need exclusive access to the bucket,
// so skip the lock-free phase.
if (h->remove_)
goto locked;
retry:
// First try to find an existing element w/o read mutex.
CHECK(!h->remove_);
// Check the embed cells.
for (uptr i = 0; i < kBucketSize; i++) {
Cell *c = &b->cells[i];
uptr addr1 = atomic_load(&c->addr, memory_order_acquire);
if (addr1 == addr) {
h->cell_ = c;
return;
}
}
// Check the add cells with read lock.
if (atomic_load(&b->add, memory_order_relaxed)) {
b->mtx.ReadLock();
AddBucket *add = (AddBucket*)atomic_load(&b->add, memory_order_relaxed);
for (uptr i = 0; i < add->size; i++) {
Cell *c = &add->cells[i];
uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
if (addr1 == addr) {
h->addidx_ = i;
h->cell_ = c;
return;
}
}
b->mtx.ReadUnlock();
}
locked:
// Re-check existence under write lock.
// Embed cells.
b->mtx.Lock();
for (uptr i = 0; i < kBucketSize; i++) {
Cell *c = &b->cells[i];
uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
if (addr1 == addr) {
if (h->remove_) {
h->cell_ = c;
return;
}
b->mtx.Unlock();
goto retry;
}
}
// Add cells.
AddBucket *add = (AddBucket*)atomic_load(&b->add, memory_order_relaxed);
if (add) {
for (uptr i = 0; i < add->size; i++) {
Cell *c = &add->cells[i];
uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
if (addr1 == addr) {
if (h->remove_) {
h->addidx_ = i;
h->cell_ = c;
return;
}
b->mtx.Unlock();
goto retry;
}
}
}
// The element does not exist, no need to create it if we want to remove.
if (h->remove_ || !h->create_) {
b->mtx.Unlock();
return;
}
// Now try to create it under the mutex.
h->created_ = true;
// See if we have a free embed cell.
for (uptr i = 0; i < kBucketSize; i++) {
Cell *c = &b->cells[i];
uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
if (addr1 == 0) {
h->cell_ = c;
return;
}
}
// Store in the add cells.
if (add == 0) {
// Allocate a new add array.
const uptr kInitSize = 64;
add = (AddBucket*)InternalAlloc(kInitSize);
internal_memset(add, 0, kInitSize);
add->cap = (kInitSize - sizeof(*add)) / sizeof(add->cells[0]) + 1;
add->size = 0;
atomic_store(&b->add, (uptr)add, memory_order_relaxed);
}
if (add->size == add->cap) {
// Grow existing add array.
uptr oldsize = sizeof(*add) + (add->cap - 1) * sizeof(add->cells[0]);
uptr newsize = oldsize * 2;
AddBucket *add1 = (AddBucket*)InternalAlloc(newsize);
internal_memset(add1, 0, newsize);
add1->cap = (newsize - sizeof(*add)) / sizeof(add->cells[0]) + 1;
add1->size = add->size;
internal_memcpy(add1->cells, add->cells, add->size * sizeof(add->cells[0]));
InternalFree(add);
atomic_store(&b->add, (uptr)add1, memory_order_relaxed);
add = add1;
}
// Store.
uptr i = add->size++;
Cell *c = &add->cells[i];
CHECK_EQ(atomic_load(&c->addr, memory_order_relaxed), 0);
h->addidx_ = i;
h->cell_ = c;
}
template<typename T, uptr kSize>
void AddrHashMap<T, kSize>::release(Handle *h) {
if (h->cell_ == 0)
return;
Bucket *b = h->bucket_;
Cell *c = h->cell_;
uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
if (h->created_) {
// Denote completion of insertion.
CHECK_EQ(addr1, 0);
// After the following store, the element becomes available
// for lock-free reads.
atomic_store(&c->addr, h->addr_, memory_order_release);
b->mtx.Unlock();
} else if (h->remove_) {
// Denote that the cell is empty now.
CHECK_EQ(addr1, h->addr_);
atomic_store(&c->addr, 0, memory_order_release);
// See if we need to compact the bucket.
AddBucket *add = (AddBucket*)atomic_load(&b->add, memory_order_relaxed);
if (h->addidx_ == -1U) {
// Removed from embed array, move an add element into the freed cell.
if (add && add->size != 0) {
uptr last = --add->size;
Cell *c1 = &add->cells[last];
c->val = c1->val;
uptr addr1 = atomic_load(&c1->addr, memory_order_relaxed);
atomic_store(&c->addr, addr1, memory_order_release);
atomic_store(&c1->addr, 0, memory_order_release);
}
} else {
// Removed from add array, compact it.
uptr last = --add->size;
Cell *c1 = &add->cells[last];
if (c != c1) {
*c = *c1;
atomic_store(&c1->addr, 0, memory_order_relaxed);
}
}
if (add && add->size == 0) {
// FIXME(dvyukov): free add?
}
b->mtx.Unlock();
} else {
CHECK_EQ(addr1, h->addr_);
if (h->addidx_ != -1U)
b->mtx.ReadUnlock();
}
}
template<typename T, uptr kSize>
uptr AddrHashMap<T, kSize>::calcHash(uptr addr) {
addr += addr << 10;
addr ^= addr >> 6;
return addr % kSize;
}
} // namespace __sanitizer
#endif // SANITIZER_ADDRHASHMAP_H
......@@ -17,7 +17,7 @@
namespace __sanitizer {
// ThreadSanitizer for Go uses libc malloc/free.
#if defined(SANITIZER_GO)
#if defined(SANITIZER_GO) || defined(SANITIZER_USE_MALLOC)
# if SANITIZER_LINUX && !SANITIZER_ANDROID
extern "C" void *__libc_malloc(uptr size);
extern "C" void __libc_free(void *ptr);
......@@ -115,7 +115,7 @@ void *LowLevelAllocator::Allocate(uptr size) {
if (allocated_end_ - allocated_current_ < (sptr)size) {
uptr size_to_allocate = Max(size, GetPageSizeCached());
allocated_current_ =
(char*)MmapOrDie(size_to_allocate, __FUNCTION__);
(char*)MmapOrDie(size_to_allocate, __func__);
allocated_end_ = allocated_current_ + size_to_allocate;
if (low_level_alloc_callback) {
low_level_alloc_callback((uptr)allocated_current_,
......
......@@ -22,14 +22,13 @@ namespace __sanitizer {
typedef CompactSizeClassMap InternalSizeClassMap;
static const uptr kInternalAllocatorSpace = 0;
static const u64 kInternalAllocatorSize = SANITIZER_MMAP_RANGE_SIZE;
#if SANITIZER_WORDSIZE == 32
static const u64 kInternalAllocatorSize = (1ULL << 32);
static const uptr kInternalAllocatorRegionSizeLog = 20;
static const uptr kInternalAllocatorNumRegions =
kInternalAllocatorSize >> kInternalAllocatorRegionSizeLog;
typedef FlatByteMap<kInternalAllocatorNumRegions> ByteMap;
#else
static const u64 kInternalAllocatorSize = (1ULL << 47);
static const uptr kInternalAllocatorRegionSizeLog = 24;
static const uptr kInternalAllocatorNumRegions =
kInternalAllocatorSize >> kInternalAllocatorRegionSizeLog;
......@@ -46,10 +45,10 @@ typedef SizeClassAllocatorLocalCache<PrimaryInternalAllocator>
// LargeMmapAllocator.
struct CrashOnMapUnmap {
void OnMap(uptr p, uptr size) const {
RAW_CHECK_MSG(0, "Unexpected mmap in InternalAllocator!");
RAW_CHECK_MSG(0, "Unexpected mmap in InternalAllocator!\n");
}
void OnUnmap(uptr p, uptr size) const {
RAW_CHECK_MSG(0, "Unexpected munmap in InternalAllocator!");
RAW_CHECK_MSG(0, "Unexpected munmap in InternalAllocator!\n");
}
};
......
......@@ -42,7 +42,8 @@ struct atomic_uint32_t {
struct atomic_uint64_t {
typedef u64 Type;
volatile Type val_dont_use;
// On 32-bit platforms u64 is not necessary aligned on 8 bytes.
volatile ALIGNED(8) Type val_dont_use;
};
struct atomic_uintptr_t {
......
......@@ -13,8 +13,26 @@
#ifndef SANITIZER_ATOMIC_CLANG_H
#define SANITIZER_ATOMIC_CLANG_H
#if defined(__i386__) || defined(__x86_64__)
# include "sanitizer_atomic_clang_x86.h"
#else
# include "sanitizer_atomic_clang_other.h"
#endif
namespace __sanitizer {
// We would like to just use compiler builtin atomic operations
// for loads and stores, but they are mostly broken in clang:
// - they lead to vastly inefficient code generation
// (http://llvm.org/bugs/show_bug.cgi?id=17281)
// - 64-bit atomic operations are not implemented on x86_32
// (http://llvm.org/bugs/show_bug.cgi?id=15034)
// - they are not implemented on ARM
// error: undefined reference to '__atomic_load_4'
// See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
// for mappings of the memory model to different processors.
INLINE void atomic_signal_fence(memory_order) {
__asm__ __volatile__("" ::: "memory");
}
......@@ -23,59 +41,6 @@ INLINE void atomic_thread_fence(memory_order) {
__sync_synchronize();
}
INLINE void proc_yield(int cnt) {
__asm__ __volatile__("" ::: "memory");
#if defined(__i386__) || defined(__x86_64__)
for (int i = 0; i < cnt; i++)
__asm__ __volatile__("pause");
#endif
__asm__ __volatile__("" ::: "memory");
}
template<typename T>
INLINE typename T::Type atomic_load(
const volatile T *a, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_consume
| memory_order_acquire | memory_order_seq_cst));
DCHECK(!((uptr)a % sizeof(*a)));
typename T::Type v;
// FIXME:
// 64-bit atomic operations are not atomic on 32-bit platforms.
// The implementation lacks necessary memory fences on ARM/PPC.
// We would like to use compiler builtin atomic operations,
// but they are mostly broken:
// - they lead to vastly inefficient code generation
// (http://llvm.org/bugs/show_bug.cgi?id=17281)
// - 64-bit atomic operations are not implemented on x86_32
// (http://llvm.org/bugs/show_bug.cgi?id=15034)
// - they are not implemented on ARM
// error: undefined reference to '__atomic_load_4'
if (mo == memory_order_relaxed) {
v = a->val_dont_use;
} else {
atomic_signal_fence(memory_order_seq_cst);
v = a->val_dont_use;
atomic_signal_fence(memory_order_seq_cst);
}
return v;
}
template<typename T>
INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_release
| memory_order_seq_cst));
DCHECK(!((uptr)a % sizeof(*a)));
if (mo == memory_order_relaxed) {
a->val_dont_use = v;
} else {
atomic_signal_fence(memory_order_seq_cst);
a->val_dont_use = v;
atomic_signal_fence(memory_order_seq_cst);
}
if (mo == memory_order_seq_cst)
atomic_thread_fence(memory_order_seq_cst);
}
template<typename T>
INLINE typename T::Type atomic_fetch_add(volatile T *a,
typename T::Type v, memory_order mo) {
......
//===-- sanitizer_atomic_clang_other.h --------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
// Not intended for direct inclusion. Include sanitizer_atomic.h.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_ATOMIC_CLANG_OTHER_H
#define SANITIZER_ATOMIC_CLANG_OTHER_H
namespace __sanitizer {
INLINE void proc_yield(int cnt) {
__asm__ __volatile__("" ::: "memory");
}
template<typename T>
INLINE typename T::Type atomic_load(
const volatile T *a, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_consume
| memory_order_acquire | memory_order_seq_cst));
DCHECK(!((uptr)a % sizeof(*a)));
typename T::Type v;
if (sizeof(*a) < 8 || sizeof(void*) == 8) {
// Assume that aligned loads are atomic.
if (mo == memory_order_relaxed) {
v = a->val_dont_use;
} else if (mo == memory_order_consume) {
// Assume that processor respects data dependencies
// (and that compiler won't break them).
__asm__ __volatile__("" ::: "memory");
v = a->val_dont_use;
__asm__ __volatile__("" ::: "memory");
} else if (mo == memory_order_acquire) {
__asm__ __volatile__("" ::: "memory");
v = a->val_dont_use;
__sync_synchronize();
} else { // seq_cst
// E.g. on POWER we need a hw fence even before the store.
__sync_synchronize();
v = a->val_dont_use;
__sync_synchronize();
}
} else {
// 64-bit load on 32-bit platform.
// Gross, but simple and reliable.
// Assume that it is not in read-only memory.
v = __sync_fetch_and_add(
const_cast<typename T::Type volatile *>(&a->val_dont_use), 0);
}
return v;
}
template<typename T>
INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_release
| memory_order_seq_cst));
DCHECK(!((uptr)a % sizeof(*a)));
if (sizeof(*a) < 8 || sizeof(void*) == 8) {
// Assume that aligned loads are atomic.
if (mo == memory_order_relaxed) {
a->val_dont_use = v;
} else if (mo == memory_order_release) {
__sync_synchronize();
a->val_dont_use = v;
__asm__ __volatile__("" ::: "memory");
} else { // seq_cst
__sync_synchronize();
a->val_dont_use = v;
__sync_synchronize();
}
} else {
// 64-bit store on 32-bit platform.
// Gross, but simple and reliable.
typename T::Type cmp = a->val_dont_use;
typename T::Type cur;
for (;;) {
cur = __sync_val_compare_and_swap(&a->val_dont_use, cmp, v);
if (cmp == v)
break;
cmp = cur;
}
}
}
} // namespace __sanitizer
#endif // #ifndef SANITIZER_ATOMIC_CLANG_OTHER_H
//===-- sanitizer_atomic_clang_x86.h ----------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
// Not intended for direct inclusion. Include sanitizer_atomic.h.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_ATOMIC_CLANG_X86_H
#define SANITIZER_ATOMIC_CLANG_X86_H
namespace __sanitizer {
INLINE void proc_yield(int cnt) {
__asm__ __volatile__("" ::: "memory");
for (int i = 0; i < cnt; i++)
__asm__ __volatile__("pause");
__asm__ __volatile__("" ::: "memory");
}
template<typename T>
INLINE typename T::Type atomic_load(
const volatile T *a, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_consume
| memory_order_acquire | memory_order_seq_cst));
DCHECK(!((uptr)a % sizeof(*a)));
typename T::Type v;
if (sizeof(*a) < 8 || sizeof(void*) == 8) {
// Assume that aligned loads are atomic.
if (mo == memory_order_relaxed) {
v = a->val_dont_use;
} else if (mo == memory_order_consume) {
// Assume that processor respects data dependencies
// (and that compiler won't break them).
__asm__ __volatile__("" ::: "memory");
v = a->val_dont_use;
__asm__ __volatile__("" ::: "memory");
} else if (mo == memory_order_acquire) {
__asm__ __volatile__("" ::: "memory");
v = a->val_dont_use;
// On x86 loads are implicitly acquire.
__asm__ __volatile__("" ::: "memory");
} else { // seq_cst
// On x86 plain MOV is enough for seq_cst store.
__asm__ __volatile__("" ::: "memory");
v = a->val_dont_use;
__asm__ __volatile__("" ::: "memory");
}
} else {
// 64-bit load on 32-bit platform.
__asm__ __volatile__(
"movq %1, %%mm0;" // Use mmx reg for 64-bit atomic moves
"movq %%mm0, %0;" // (ptr could be read-only)
"emms;" // Empty mmx state/Reset FP regs
: "=m" (v)
: "m" (a->val_dont_use)
: // mark the FP stack and mmx registers as clobbered
"st", "st(1)", "st(2)", "st(3)", "st(4)", "st(5)", "st(6)", "st(7)",
#ifdef __MMX__
"mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7",
#endif // #ifdef __MMX__
"memory");
}
return v;
}
template<typename T>
INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_release
| memory_order_seq_cst));
DCHECK(!((uptr)a % sizeof(*a)));
if (sizeof(*a) < 8 || sizeof(void*) == 8) {
// Assume that aligned loads are atomic.
if (mo == memory_order_relaxed) {
a->val_dont_use = v;
} else if (mo == memory_order_release) {
// On x86 stores are implicitly release.
__asm__ __volatile__("" ::: "memory");
a->val_dont_use = v;
__asm__ __volatile__("" ::: "memory");
} else { // seq_cst
// On x86 stores are implicitly release.
__asm__ __volatile__("" ::: "memory");
a->val_dont_use = v;
__sync_synchronize();
}
} else {
// 64-bit store on 32-bit platform.
__asm__ __volatile__(
"movq %1, %%mm0;" // Use mmx reg for 64-bit atomic moves
"movq %%mm0, %0;"
"emms;" // Empty mmx state/Reset FP regs
: "=m" (a->val_dont_use)
: "m" (v)
: // mark the FP stack and mmx registers as clobbered
"st", "st(1)", "st(2)", "st(3)", "st(4)", "st(5)", "st(6)", "st(7)",
#ifdef __MMX__
"mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7",
#endif // #ifdef __MMX__
"memory");
if (mo == memory_order_seq_cst)
__sync_synchronize();
}
}
} // namespace __sanitizer
#endif // #ifndef SANITIZER_ATOMIC_CLANG_X86_H
......@@ -22,8 +22,20 @@ extern "C" void _mm_pause();
extern "C" long _InterlockedExchangeAdd( // NOLINT
long volatile * Addend, long Value); // NOLINT
#pragma intrinsic(_InterlockedExchangeAdd)
extern "C" short _InterlockedCompareExchange16( // NOLINT
short volatile *Destination, // NOLINT
short Exchange, short Comparand); // NOLINT
#pragma intrinsic(_InterlockedCompareExchange16)
extern "C"
long long _InterlockedCompareExchange64( // NOLINT
long long volatile *Destination, // NOLINT
long long Exchange, long long Comparand); // NOLINT
#pragma intrinsic(_InterlockedCompareExchange64)
#ifdef _WIN64
extern "C" long long _InterlockedExchangeAdd64( // NOLINT
long long volatile * Addend, long long Value); // NOLINT
#pragma intrinsic(_InterlockedExchangeAdd64)
extern "C" void *_InterlockedCompareExchangePointer(
void *volatile *Destination,
void *Exchange, void *Comparand);
......@@ -106,6 +118,40 @@ INLINE u32 atomic_fetch_add(volatile atomic_uint32_t *a,
(volatile long*)&a->val_dont_use, (long)v); // NOLINT
}
INLINE uptr atomic_fetch_add(volatile atomic_uintptr_t *a,
uptr v, memory_order mo) {
(void)mo;
DCHECK(!((uptr)a % sizeof(*a)));
#ifdef _WIN64
return (uptr)_InterlockedExchangeAdd64(
(volatile long long*)&a->val_dont_use, (long long)v); // NOLINT
#else
return (uptr)_InterlockedExchangeAdd(
(volatile long*)&a->val_dont_use, (long)v); // NOLINT
#endif
}
INLINE u32 atomic_fetch_sub(volatile atomic_uint32_t *a,
u32 v, memory_order mo) {
(void)mo;
DCHECK(!((uptr)a % sizeof(*a)));
return (u32)_InterlockedExchangeAdd(
(volatile long*)&a->val_dont_use, -(long)v); // NOLINT
}
INLINE uptr atomic_fetch_sub(volatile atomic_uintptr_t *a,
uptr v, memory_order mo) {
(void)mo;
DCHECK(!((uptr)a % sizeof(*a)));
#ifdef _WIN64
return (uptr)_InterlockedExchangeAdd64(
(volatile long long*)&a->val_dont_use, -(long long)v); // NOLINT
#else
return (uptr)_InterlockedExchangeAdd(
(volatile long*)&a->val_dont_use, -(long)v); // NOLINT
#endif
}
INLINE u8 atomic_exchange(volatile atomic_uint8_t *a,
u8 v, memory_order mo) {
(void)mo;
......@@ -166,6 +212,45 @@ INLINE bool atomic_compare_exchange_strong(volatile atomic_uintptr_t *a,
return false;
}
INLINE bool atomic_compare_exchange_strong(volatile atomic_uint16_t *a,
u16 *cmp,
u16 xchg,
memory_order mo) {
u16 cmpv = *cmp;
u16 prev = (u16)_InterlockedCompareExchange16(
(volatile short*)&a->val_dont_use, (short)xchg, (short)cmpv);
if (prev == cmpv)
return true;
*cmp = prev;
return false;
}
INLINE bool atomic_compare_exchange_strong(volatile atomic_uint32_t *a,
u32 *cmp,
u32 xchg,
memory_order mo) {
u32 cmpv = *cmp;
u32 prev = (u32)_InterlockedCompareExchange(
(volatile long*)&a->val_dont_use, (long)xchg, (long)cmpv);
if (prev == cmpv)
return true;
*cmp = prev;
return false;
}
INLINE bool atomic_compare_exchange_strong(volatile atomic_uint64_t *a,
u64 *cmp,
u64 xchg,
memory_order mo) {
u64 cmpv = *cmp;
u64 prev = (u64)_InterlockedCompareExchange64(
(volatile long long*)&a->val_dont_use, (long long)xchg, (long long)cmpv);
if (prev == cmpv)
return true;
*cmp = prev;
return false;
}
template<typename T>
INLINE bool atomic_compare_exchange_weak(volatile T *a,
typename T::Type *cmp,
......
//===-- sanitizer_bitvector.h -----------------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Specializer BitVector implementation.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_BITVECTOR_H
#define SANITIZER_BITVECTOR_H
#include "sanitizer_common.h"
namespace __sanitizer {
// Fixed size bit vector based on a single basic integer.
template <class basic_int_t = uptr>
class BasicBitVector {
public:
enum SizeEnum { kSize = sizeof(basic_int_t) * 8 };
uptr size() const { return kSize; }
// No CTOR.
void clear() { bits_ = 0; }
void setAll() { bits_ = ~(basic_int_t)0; }
bool empty() const { return bits_ == 0; }
// Returns true if the bit has changed from 0 to 1.
bool setBit(uptr idx) {
basic_int_t old = bits_;
bits_ |= mask(idx);
return bits_ != old;
}
// Returns true if the bit has changed from 1 to 0.
bool clearBit(uptr idx) {
basic_int_t old = bits_;
bits_ &= ~mask(idx);
return bits_ != old;
}
bool getBit(uptr idx) const { return (bits_ & mask(idx)) != 0; }
uptr getAndClearFirstOne() {
CHECK(!empty());
uptr idx = LeastSignificantSetBitIndex(bits_);
clearBit(idx);
return idx;
}
// Do "this |= v" and return whether new bits have been added.
bool setUnion(const BasicBitVector &v) {
basic_int_t old = bits_;
bits_ |= v.bits_;
return bits_ != old;
}
// Do "this &= v" and return whether any bits have been removed.
bool setIntersection(const BasicBitVector &v) {
basic_int_t old = bits_;
bits_ &= v.bits_;
return bits_ != old;
}
// Do "this &= ~v" and return whether any bits have been removed.
bool setDifference(const BasicBitVector &v) {
basic_int_t old = bits_;
bits_ &= ~v.bits_;
return bits_ != old;
}
void copyFrom(const BasicBitVector &v) { bits_ = v.bits_; }
// Returns true if 'this' intersects with 'v'.
bool intersectsWith(const BasicBitVector &v) const {
return (bits_ & v.bits_) != 0;
}
// for (BasicBitVector<>::Iterator it(bv); it.hasNext();) {
// uptr idx = it.next();
// use(idx);
// }
class Iterator {
public:
Iterator() { }
explicit Iterator(const BasicBitVector &bv) : bv_(bv) {}
bool hasNext() const { return !bv_.empty(); }
uptr next() { return bv_.getAndClearFirstOne(); }
void clear() { bv_.clear(); }
private:
BasicBitVector bv_;
};
private:
basic_int_t mask(uptr idx) const {
CHECK_LT(idx, size());
return (basic_int_t)1UL << idx;
}
basic_int_t bits_;
};
// Fixed size bit vector of (kLevel1Size*BV::kSize**2) bits.
// The implementation is optimized for better performance on
// sparse bit vectors, i.e. the those with few set bits.
template <uptr kLevel1Size = 1, class BV = BasicBitVector<> >
class TwoLevelBitVector {
// This is essentially a 2-level bit vector.
// Set bit in the first level BV indicates that there are set bits
// in the corresponding BV of the second level.
// This structure allows O(kLevel1Size) time for clear() and empty(),
// as well fast handling of sparse BVs.
public:
enum SizeEnum { kSize = BV::kSize * BV::kSize * kLevel1Size };
// No CTOR.
uptr size() const { return kSize; }
void clear() {
for (uptr i = 0; i < kLevel1Size; i++)
l1_[i].clear();
}
void setAll() {
for (uptr i0 = 0; i0 < kLevel1Size; i0++) {
l1_[i0].setAll();
for (uptr i1 = 0; i1 < BV::kSize; i1++)
l2_[i0][i1].setAll();
}
}
bool empty() const {
for (uptr i = 0; i < kLevel1Size; i++)
if (!l1_[i].empty())
return false;
return true;
}
// Returns true if the bit has changed from 0 to 1.
bool setBit(uptr idx) {
check(idx);
uptr i0 = idx0(idx);
uptr i1 = idx1(idx);
uptr i2 = idx2(idx);
if (!l1_[i0].getBit(i1)) {
l1_[i0].setBit(i1);
l2_[i0][i1].clear();
}
bool res = l2_[i0][i1].setBit(i2);
// Printf("%s: %zd => %zd %zd %zd; %d\n", __func__,
// idx, i0, i1, i2, res);
return res;
}
bool clearBit(uptr idx) {
check(idx);
uptr i0 = idx0(idx);
uptr i1 = idx1(idx);
uptr i2 = idx2(idx);
bool res = false;
if (l1_[i0].getBit(i1)) {
res = l2_[i0][i1].clearBit(i2);
if (l2_[i0][i1].empty())
l1_[i0].clearBit(i1);
}
return res;
}
bool getBit(uptr idx) const {
check(idx);
uptr i0 = idx0(idx);
uptr i1 = idx1(idx);
uptr i2 = idx2(idx);
// Printf("%s: %zd => %zd %zd %zd\n", __func__, idx, i0, i1, i2);
return l1_[i0].getBit(i1) && l2_[i0][i1].getBit(i2);
}
uptr getAndClearFirstOne() {
for (uptr i0 = 0; i0 < kLevel1Size; i0++) {
if (l1_[i0].empty()) continue;
uptr i1 = l1_[i0].getAndClearFirstOne();
uptr i2 = l2_[i0][i1].getAndClearFirstOne();
if (!l2_[i0][i1].empty())
l1_[i0].setBit(i1);
uptr res = i0 * BV::kSize * BV::kSize + i1 * BV::kSize + i2;
// Printf("getAndClearFirstOne: %zd %zd %zd => %zd\n", i0, i1, i2, res);
return res;
}
CHECK(0);
return 0;
}
// Do "this |= v" and return whether new bits have been added.
bool setUnion(const TwoLevelBitVector &v) {
bool res = false;
for (uptr i0 = 0; i0 < kLevel1Size; i0++) {
BV t = v.l1_[i0];
while (!t.empty()) {
uptr i1 = t.getAndClearFirstOne();
if (l1_[i0].setBit(i1))
l2_[i0][i1].clear();
if (l2_[i0][i1].setUnion(v.l2_[i0][i1]))
res = true;
}
}
return res;
}
// Do "this &= v" and return whether any bits have been removed.
bool setIntersection(const TwoLevelBitVector &v) {
bool res = false;
for (uptr i0 = 0; i0 < kLevel1Size; i0++) {
if (l1_[i0].setIntersection(v.l1_[i0]))
res = true;
if (!l1_[i0].empty()) {
BV t = l1_[i0];
while (!t.empty()) {
uptr i1 = t.getAndClearFirstOne();
if (l2_[i0][i1].setIntersection(v.l2_[i0][i1]))
res = true;
if (l2_[i0][i1].empty())
l1_[i0].clearBit(i1);
}
}
}
return res;
}
// Do "this &= ~v" and return whether any bits have been removed.
bool setDifference(const TwoLevelBitVector &v) {
bool res = false;
for (uptr i0 = 0; i0 < kLevel1Size; i0++) {
BV t = l1_[i0];
t.setIntersection(v.l1_[i0]);
while (!t.empty()) {
uptr i1 = t.getAndClearFirstOne();
if (l2_[i0][i1].setDifference(v.l2_[i0][i1]))
res = true;
if (l2_[i0][i1].empty())
l1_[i0].clearBit(i1);
}
}
return res;
}
void copyFrom(const TwoLevelBitVector &v) {
clear();
setUnion(v);
}
// Returns true if 'this' intersects with 'v'.
bool intersectsWith(const TwoLevelBitVector &v) const {
for (uptr i0 = 0; i0 < kLevel1Size; i0++) {
BV t = l1_[i0];
t.setIntersection(v.l1_[i0]);
while (!t.empty()) {
uptr i1 = t.getAndClearFirstOne();
if (!v.l1_[i0].getBit(i1)) continue;
if (l2_[i0][i1].intersectsWith(v.l2_[i0][i1]))
return true;
}
}
return false;
}
// for (TwoLevelBitVector<>::Iterator it(bv); it.hasNext();) {
// uptr idx = it.next();
// use(idx);
// }
class Iterator {
public:
Iterator() { }
explicit Iterator(const TwoLevelBitVector &bv) : bv_(bv), i0_(0), i1_(0) {
it1_.clear();
it2_.clear();
}
bool hasNext() const {
if (it1_.hasNext()) return true;
for (uptr i = i0_; i < kLevel1Size; i++)
if (!bv_.l1_[i].empty()) return true;
return false;
}
uptr next() {
// Printf("++++: %zd %zd; %d %d; size %zd\n", i0_, i1_, it1_.hasNext(),
// it2_.hasNext(), kSize);
if (!it1_.hasNext() && !it2_.hasNext()) {
for (; i0_ < kLevel1Size; i0_++) {
if (bv_.l1_[i0_].empty()) continue;
it1_ = typename BV::Iterator(bv_.l1_[i0_]);
// Printf("+i0: %zd %zd; %d %d; size %zd\n", i0_, i1_, it1_.hasNext(),
// it2_.hasNext(), kSize);
break;
}
}
if (!it2_.hasNext()) {
CHECK(it1_.hasNext());
i1_ = it1_.next();
it2_ = typename BV::Iterator(bv_.l2_[i0_][i1_]);
// Printf("++i1: %zd %zd; %d %d; size %zd\n", i0_, i1_, it1_.hasNext(),
// it2_.hasNext(), kSize);
}
CHECK(it2_.hasNext());
uptr i2 = it2_.next();
uptr res = i0_ * BV::kSize * BV::kSize + i1_ * BV::kSize + i2;
// Printf("+ret: %zd %zd; %d %d; size %zd; res: %zd\n", i0_, i1_,
// it1_.hasNext(), it2_.hasNext(), kSize, res);
if (!it1_.hasNext() && !it2_.hasNext())
i0_++;
return res;
}
private:
const TwoLevelBitVector &bv_;
uptr i0_, i1_;
typename BV::Iterator it1_, it2_;
};
private:
void check(uptr idx) const { CHECK_LE(idx, size()); }
uptr idx0(uptr idx) const {
uptr res = idx / (BV::kSize * BV::kSize);
CHECK_LE(res, kLevel1Size);
return res;
}
uptr idx1(uptr idx) const {
uptr res = (idx / BV::kSize) % BV::kSize;
CHECK_LE(res, BV::kSize);
return res;
}
uptr idx2(uptr idx) const {
uptr res = idx % BV::kSize;
CHECK_LE(res, BV::kSize);
return res;
}
BV l1_[kLevel1Size];
BV l2_[kLevel1Size][BV::kSize];
};
} // namespace __sanitizer
#endif // SANITIZER_BITVECTOR_H
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment