Commit ef1b3fda by Kostya Serebryany Committed by Kostya Serebryany

libsanitizer merge from upstream r191666

This may break gcc-asan on Mac, will follow up separately.

From-SVN: r204368
parent fd5564d3
2013-11-04 Kostya Serebryany <kcc@google.com>
Update to match the changed asan API.
* asan.c (asan_function_start): New function.
(asan_emit_stack_protection): Update the string stored in the
stack red zone to match new API. Store the PC of the current
function in the red zone.
(asan_global_struct): Update the __asan_global definition to match
the new API.
(asan_add_global): Ditto.
* asan.h (asan_function_start): New prototype.
* final.c (final_start_function): Call asan_function_start.
* sanitizer.def (BUILT_IN_ASAN_INIT): Rename __asan_init_v1 to __asan_init_v3.
2013-11-04 Wei Mi <wmi@google.com>
* gcc/config/i386/i386-c.c (ix86_target_macros_internal): Separate
......@@ -59,11 +59,13 @@ along with GCC; see the file COPYING3. If not see
if ((X & 7) + N - 1 > ShadowValue)
__asan_report_loadN(X);
Stores are instrumented similarly, but using __asan_report_storeN functions.
A call too __asan_init() is inserted to the list of module CTORs.
A call too __asan_init_vN() is inserted to the list of module CTORs.
N is the version number of the AddressSanitizer API. The changes between the
API versions are listed in libsanitizer/asan/asan_interface_internal.h.
The run-time library redefines malloc (so that redzone are inserted around
the allocated memory) and free (so that reuse of free-ed memory is delayed),
provides __asan_report* and __asan_init functions.
provides __asan_report* and __asan_init_vN functions.
Read more:
http://code.google.com/p/address-sanitizer/wiki/AddressSanitizerAlgorithm
......@@ -125,9 +127,11 @@ along with GCC; see the file COPYING3. If not see
where '(...){n}' means the content inside the parenthesis occurs 'n'
times, with 'n' being the number of variables on the stack.
3/ The following 8 bytes contain the PC of the current function which
will be used by the run-time library to print an error message.
3/ The following 16 bytes of the red zone have no particular
format.
4/ The following 8 bytes are reserved for internal use by the run-time.
The shadow memory for that stack layout is going to look like this:
......@@ -205,6 +209,9 @@ along with GCC; see the file COPYING3. If not see
// Name of the global variable.
const void *__name;
// Name of the module where the global variable is declared.
const void *__module_name;
// This is always set to NULL for now.
uptr __has_dynamic_init;
}
......@@ -914,6 +921,15 @@ asan_clear_shadow (rtx shadow_mem, HOST_WIDE_INT len)
add_int_reg_note (jump, REG_BR_PROB, REG_BR_PROB_BASE * 80 / 100);
}
void
asan_function_start (void)
{
section *fnsec = function_section (current_function_decl);
switch_to_section (fnsec);
ASM_OUTPUT_DEBUG_LABEL (asm_out_file, "LASANPC",
current_function_funcdef_no);
}
/* Insert code to protect stack vars. The prologue sequence should be emitted
directly, epilogue sequence returned. BASE is the register holding the
stack base, against which OFFSETS array offsets are relative to, OFFSETS
......@@ -929,12 +945,13 @@ asan_emit_stack_protection (rtx base, HOST_WIDE_INT *offsets, tree *decls,
int length)
{
rtx shadow_base, shadow_mem, ret, mem;
char buf[30];
unsigned char shadow_bytes[4];
HOST_WIDE_INT base_offset = offsets[length - 1], offset, prev_offset;
HOST_WIDE_INT last_offset, last_size;
int l;
unsigned char cur_shadow_byte = ASAN_STACK_MAGIC_LEFT;
tree str_cst;
tree str_cst, decl, id;
if (shadow_ptr_types[0] == NULL_TREE)
asan_init_shadow_ptr_types ();
......@@ -942,11 +959,6 @@ asan_emit_stack_protection (rtx base, HOST_WIDE_INT *offsets, tree *decls,
/* First of all, prepare the description string. */
pretty_printer asan_pp;
if (DECL_NAME (current_function_decl))
pp_tree_identifier (&asan_pp, DECL_NAME (current_function_decl));
else
pp_string (&asan_pp, "<unknown>");
pp_space (&asan_pp);
pp_decimal_int (&asan_pp, length / 2 - 1);
pp_space (&asan_pp);
for (l = length - 2; l; l -= 2)
......@@ -976,6 +988,20 @@ asan_emit_stack_protection (rtx base, HOST_WIDE_INT *offsets, tree *decls,
emit_move_insn (mem, gen_int_mode (ASAN_STACK_FRAME_MAGIC, ptr_mode));
mem = adjust_address (mem, VOIDmode, GET_MODE_SIZE (ptr_mode));
emit_move_insn (mem, expand_normal (str_cst));
mem = adjust_address (mem, VOIDmode, GET_MODE_SIZE (ptr_mode));
ASM_GENERATE_INTERNAL_LABEL (buf, "LASANPC", current_function_funcdef_no);
id = get_identifier (buf);
decl = build_decl (DECL_SOURCE_LOCATION (current_function_decl),
VAR_DECL, id, char_type_node);
SET_DECL_ASSEMBLER_NAME (decl, id);
TREE_ADDRESSABLE (decl) = 1;
TREE_READONLY (decl) = 1;
DECL_ARTIFICIAL (decl) = 1;
DECL_IGNORED_P (decl) = 1;
TREE_STATIC (decl) = 1;
TREE_PUBLIC (decl) = 0;
TREE_USED (decl) = 1;
emit_move_insn (mem, expand_normal (build_fold_addr_expr (decl)));
shadow_base = expand_binop (Pmode, lshr_optab, base,
GEN_INT (ASAN_SHADOW_SHIFT),
NULL_RTX, 1, OPTAB_DIRECT);
......@@ -1924,20 +1950,21 @@ transform_statements (void)
uptr __size;
uptr __size_with_redzone;
const void *__name;
const void *__module_name;
uptr __has_dynamic_init;
} type. */
static tree
asan_global_struct (void)
{
static const char *field_names[5]
static const char *field_names[6]
= { "__beg", "__size", "__size_with_redzone",
"__name", "__has_dynamic_init" };
tree fields[5], ret;
"__name", "__module_name", "__has_dynamic_init" };
tree fields[6], ret;
int i;
ret = make_node (RECORD_TYPE);
for (i = 0; i < 5; i++)
for (i = 0; i < 6; i++)
{
fields[i]
= build_decl (UNKNOWN_LOCATION, FIELD_DECL,
......@@ -1962,21 +1989,20 @@ asan_add_global (tree decl, tree type, vec<constructor_elt, va_gc> *v)
{
tree init, uptr = TREE_TYPE (DECL_CHAIN (TYPE_FIELDS (type)));
unsigned HOST_WIDE_INT size;
tree str_cst, refdecl = decl;
tree str_cst, module_name_cst, refdecl = decl;
vec<constructor_elt, va_gc> *vinner = NULL;
pretty_printer asan_pp;
pretty_printer asan_pp, module_name_pp;
if (DECL_NAME (decl))
pp_tree_identifier (&asan_pp, DECL_NAME (decl));
else
pp_string (&asan_pp, "<unknown>");
pp_space (&asan_pp);
pp_left_paren (&asan_pp);
pp_string (&asan_pp, main_input_filename);
pp_right_paren (&asan_pp);
str_cst = asan_pp_string (&asan_pp);
pp_string (&module_name_pp, main_input_filename);
module_name_cst = asan_pp_string (&module_name_pp);
if (asan_needs_local_alias (decl))
{
char buf[20];
......@@ -2004,6 +2030,8 @@ asan_add_global (tree decl, tree type, vec<constructor_elt, va_gc> *v)
CONSTRUCTOR_APPEND_ELT (vinner, NULL_TREE, build_int_cst (uptr, size));
CONSTRUCTOR_APPEND_ELT (vinner, NULL_TREE,
fold_convert (const_ptr_type_node, str_cst));
CONSTRUCTOR_APPEND_ELT (vinner, NULL_TREE,
fold_convert (const_ptr_type_node, module_name_cst));
CONSTRUCTOR_APPEND_ELT (vinner, NULL_TREE, build_int_cst (uptr, 0));
init = build_constructor (type, vinner);
CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, init);
......@@ -2158,7 +2186,7 @@ add_string_csts (void **slot, void *data)
static GTY(()) tree asan_ctor_statements;
/* Module-level instrumentation.
- Insert __asan_init() into the list of CTORs.
- Insert __asan_init_vN() into the list of CTORs.
- TODO: insert redzones around globals.
*/
......
......@@ -21,6 +21,7 @@ along with GCC; see the file COPYING3. If not see
#ifndef TREE_ASAN
#define TREE_ASAN
extern void asan_function_start (void);
extern void asan_finish_file (void);
extern rtx asan_emit_stack_protection (rtx, HOST_WIDE_INT *, tree *, int);
extern bool asan_protect_global (tree);
......
......@@ -78,6 +78,7 @@ along with GCC; see the file COPYING3. If not see
#include "cfgloop.h"
#include "params.h"
#include "tree-pretty-print.h" /* for dump_function_header */
#include "asan.h"
#ifdef XCOFF_DEBUGGING_INFO
#include "xcoffout.h" /* Needed for external data
......@@ -1738,6 +1739,9 @@ final_start_function (rtx first, FILE *file,
high_block_linenum = high_function_linenum = last_linenum;
if (flag_sanitize & SANITIZE_ADDRESS)
asan_function_start ();
if (!DECL_IGNORED_P (current_function_decl))
debug_hooks->begin_prologue (last_linenum, last_filename);
......
......@@ -27,7 +27,7 @@ along with GCC; see the file COPYING3. If not see
for other FEs by asan.c. */
/* Address Sanitizer */
DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_INIT, "__asan_init_v1",
DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_INIT, "__asan_init_v3",
BT_FN_VOID, ATTR_NOTHROW_LEAF_LIST)
/* Do not reorder the BUILT_IN_ASAN_REPORT* builtins, e.g. cfgcleanup.c
relies on this order. */
......
2013-11-04 Kostya Serebryany <kcc@google.com>
* g++.dg/asan/asan_test.cc: Update the test
to match the fresh asan run-time.
* c-c++-common/asan/stack-overflow-1.c: Ditto.
2013-11-04 Ian Lance Taylor <iant@google.com>
* g++.dg/ext/sync-4.C: New test.
......
......@@ -19,4 +19,5 @@ int main() {
/* { dg-output "READ of size 1 at 0x\[0-9a-f\]+ thread T0\[^\n\r]*(\n|\r\n|\r)" } */
/* { dg-output " #0 0x\[0-9a-f\]+ (in _*main (\[^\n\r]*stack-overflow-1.c:16|\[^\n\r]*:0)|\[(\]).*(\n|\r\n|\r)" } */
/* { dg-output "\[^\n\r]*Address 0x\[0-9a-f\]+ is\[^\n\r]*frame <main>" } */
/* { dg-output "\[^\n\r]*Address 0x\[0-9a-f\]+ is located in stack of thread T0.*(\n|\r\n|\r)" */
/* { dg-output "\[^\n\r]*in main.*stack-overflow-1.c.*(\n|\r\n|\r)" */
......@@ -204,16 +204,6 @@ TEST(AddressSanitizer, BitFieldNegativeTest) {
delete Ident(x);
}
TEST(AddressSanitizer, OutOfMemoryTest) {
size_t size = SANITIZER_WORDSIZE == 64 ? (size_t)(1ULL << 48) : (0xf0000000);
EXPECT_EQ(0, realloc(0, size));
EXPECT_EQ(0, realloc(0, ~Ident(0)));
EXPECT_EQ(0, malloc(size));
EXPECT_EQ(0, malloc(~Ident(0)));
EXPECT_EQ(0, calloc(1, size));
EXPECT_EQ(0, calloc(1, ~Ident(0)));
}
#if ASAN_NEEDS_SEGV
namespace {
......@@ -497,42 +487,6 @@ TEST(AddressSanitizer, ManyStackObjectsTest) {
EXPECT_DEATH(Ident(ZZZ)[-1] = 0, ASAN_PCRE_DOTALL "XXX.*YYY.*ZZZ");
}
NOINLINE static void Frame0(int frame, char *a, char *b, char *c) {
char d[4] = {0};
char *D = Ident(d);
switch (frame) {
case 3: a[5]++; break;
case 2: b[5]++; break;
case 1: c[5]++; break;
case 0: D[5]++; break;
}
}
NOINLINE static void Frame1(int frame, char *a, char *b) {
char c[4] = {0}; Frame0(frame, a, b, c);
break_optimization(0);
}
NOINLINE static void Frame2(int frame, char *a) {
char b[4] = {0}; Frame1(frame, a, b);
break_optimization(0);
}
NOINLINE static void Frame3(int frame) {
char a[4] = {0}; Frame2(frame, a);
break_optimization(0);
}
TEST(AddressSanitizer, GuiltyStackFrame0Test) {
EXPECT_DEATH(Frame3(0), "located .*in frame <.*Frame0");
}
TEST(AddressSanitizer, GuiltyStackFrame1Test) {
EXPECT_DEATH(Frame3(1), "located .*in frame <.*Frame1");
}
TEST(AddressSanitizer, GuiltyStackFrame2Test) {
EXPECT_DEATH(Frame3(2), "located .*in frame <.*Frame2");
}
TEST(AddressSanitizer, GuiltyStackFrame3Test) {
EXPECT_DEATH(Frame3(3), "located .*in frame <.*Frame3");
}
NOINLINE void LongJmpFunc1(jmp_buf buf) {
// create three red zones for these two stack objects.
int a;
......
2013-11-04 Kostya Serebryany <kcc@google.com>
* All source files: Merge from upstream r191666.
* merge.sh: Added lsan.
* configure.ac (AC_CONFIG_FILES): Added lsan.
* Makefile.am (SUBDIRS): Added lsan.
* sanitizer_common/Makefile.am (sanitizer_common_files): Added new fles.
* asan/Makefile.am (asan_files): Added new files.
(libasan_la_LIBADD): Added a dependency on lsan.
* lsan/Makefile.am: New file.
* asan/Makefile.in: Regenerate.
* lsan/Makefile.in: Regenerate.
* Makefile.in: Regenerate.
* configure: Regenerate.
* sanitizer_common/Makefile.in: Regenerate.
2013-09-20 Alan Modra <amodra@gmail.com>
* configure: Regenerate.
......
175733
191666
The first line of this file holds the svn revision number of the
last merge done from the master library sources.
ACLOCAL_AMFLAGS = -I .. -I ../config
if TSAN_SUPPORTED
SUBDIRS = interception sanitizer_common asan tsan ubsan
SUBDIRS = interception sanitizer_common lsan asan tsan ubsan
else
SUBDIRS = interception sanitizer_common asan ubsan
SUBDIRS = interception sanitizer_common lsan asan ubsan
endif
if USING_MAC_INTERPOSE
SUBDIRS = sanitizer_common asan ubsan
SUBDIRS = sanitizer_common lsan asan ubsan
endif
# Work around what appears to be a GNU make bug handling MAKEFLAGS
......
......@@ -76,7 +76,7 @@ AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \
$(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS
ETAGS = etags
CTAGS = ctags
DIST_SUBDIRS = interception sanitizer_common asan ubsan tsan
DIST_SUBDIRS = interception sanitizer_common lsan asan ubsan tsan
ACLOCAL = @ACLOCAL@
AMTAR = @AMTAR@
AR = @AR@
......@@ -209,9 +209,9 @@ top_build_prefix = @top_build_prefix@
top_builddir = @top_builddir@
top_srcdir = @top_srcdir@
ACLOCAL_AMFLAGS = -I .. -I ../config
@TSAN_SUPPORTED_FALSE@SUBDIRS = interception sanitizer_common asan ubsan
@TSAN_SUPPORTED_TRUE@SUBDIRS = interception sanitizer_common asan tsan ubsan
@USING_MAC_INTERPOSE_TRUE@SUBDIRS = sanitizer_common asan ubsan
@TSAN_SUPPORTED_FALSE@SUBDIRS = interception sanitizer_common lsan asan ubsan
@TSAN_SUPPORTED_TRUE@SUBDIRS = interception sanitizer_common lsan asan tsan ubsan
@USING_MAC_INTERPOSE_TRUE@SUBDIRS = sanitizer_common lsan asan ubsan
# Work around what appears to be a GNU make bug handling MAKEFLAGS
# values defined in terms of make variables, as is the case for CC and
......
......@@ -15,32 +15,31 @@ toolexeclib_LTLIBRARIES = libasan.la
nodist_toolexeclib_HEADERS = libasan_preinit.o
asan_files = \
asan_allocator.cc \
asan_allocator2.cc \
asan_interceptors.cc \
asan_mac.cc \
asan_malloc_mac.cc \
asan_new_delete.cc \
asan_posix.cc \
asan_rtl.cc \
asan_stats.cc \
asan_thread_registry.cc \
asan_dll_thunk.cc \
asan_fake_stack.cc \
asan_globals.cc \
asan_interceptors.cc \
asan_linux.cc \
asan_mac.cc \
asan_malloc_linux.cc \
asan_malloc_mac.cc \
asan_malloc_win.cc \
asan_new_delete.cc \
asan_poisoning.cc \
asan_posix.cc \
asan_report.cc \
asan_rtl.cc \
asan_stack.cc \
asan_stats.cc \
asan_thread.cc \
asan_win.cc
libasan_la_SOURCES = $(asan_files)
if USING_MAC_INTERPOSE
libasan_la_LIBADD = $(top_builddir)/sanitizer_common/libsanitizer_common.la
libasan_la_LIBADD = $(top_builddir)/sanitizer_common/libsanitizer_common.la $(top_builddir)/lsan/libsanitizer_lsan.la
else
libasan_la_LIBADD = $(top_builddir)/sanitizer_common/libsanitizer_common.la $(top_builddir)/interception/libinterception.la
libasan_la_LIBADD = $(top_builddir)/sanitizer_common/libsanitizer_common.la $(top_builddir)/lsan/libsanitizer_lsan.la $(top_builddir)/interception/libinterception.la
endif
libasan_la_LIBADD += $(LIBSTDCXX_RAW_CXX_LDFLAGS)
......
......@@ -81,17 +81,18 @@ am__installdirs = "$(DESTDIR)$(toolexeclibdir)" \
LTLIBRARIES = $(toolexeclib_LTLIBRARIES)
am__DEPENDENCIES_1 =
@USING_MAC_INTERPOSE_FALSE@libasan_la_DEPENDENCIES = $(top_builddir)/sanitizer_common/libsanitizer_common.la \
@USING_MAC_INTERPOSE_FALSE@ $(top_builddir)/lsan/libsanitizer_lsan.la \
@USING_MAC_INTERPOSE_FALSE@ $(top_builddir)/interception/libinterception.la \
@USING_MAC_INTERPOSE_FALSE@ $(am__DEPENDENCIES_1)
@USING_MAC_INTERPOSE_TRUE@libasan_la_DEPENDENCIES = $(top_builddir)/sanitizer_common/libsanitizer_common.la \
@USING_MAC_INTERPOSE_TRUE@ $(top_builddir)/lsan/libsanitizer_lsan.la \
@USING_MAC_INTERPOSE_TRUE@ $(am__DEPENDENCIES_1)
am__objects_1 = asan_allocator.lo asan_allocator2.lo \
asan_interceptors.lo asan_mac.lo asan_malloc_mac.lo \
asan_new_delete.lo asan_posix.lo asan_rtl.lo asan_stats.lo \
asan_thread_registry.lo asan_fake_stack.lo asan_globals.lo \
asan_linux.lo asan_malloc_linux.lo asan_malloc_win.lo \
asan_poisoning.lo asan_report.lo asan_stack.lo asan_thread.lo \
asan_win.lo
am__objects_1 = asan_allocator2.lo asan_dll_thunk.lo \
asan_fake_stack.lo asan_globals.lo asan_interceptors.lo \
asan_linux.lo asan_mac.lo asan_malloc_linux.lo \
asan_malloc_mac.lo asan_malloc_win.lo asan_new_delete.lo \
asan_poisoning.lo asan_posix.lo asan_report.lo asan_rtl.lo \
asan_stack.lo asan_stats.lo asan_thread.lo asan_win.lo
am_libasan_la_OBJECTS = $(am__objects_1)
libasan_la_OBJECTS = $(am_libasan_la_OBJECTS)
libasan_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \
......@@ -260,32 +261,33 @@ ACLOCAL_AMFLAGS = -I $(top_srcdir) -I $(top_srcdir)/config
toolexeclib_LTLIBRARIES = libasan.la
nodist_toolexeclib_HEADERS = libasan_preinit.o
asan_files = \
asan_allocator.cc \
asan_allocator2.cc \
asan_interceptors.cc \
asan_mac.cc \
asan_malloc_mac.cc \
asan_new_delete.cc \
asan_posix.cc \
asan_rtl.cc \
asan_stats.cc \
asan_thread_registry.cc \
asan_dll_thunk.cc \
asan_fake_stack.cc \
asan_globals.cc \
asan_interceptors.cc \
asan_linux.cc \
asan_mac.cc \
asan_malloc_linux.cc \
asan_malloc_mac.cc \
asan_malloc_win.cc \
asan_new_delete.cc \
asan_poisoning.cc \
asan_posix.cc \
asan_report.cc \
asan_rtl.cc \
asan_stack.cc \
asan_stats.cc \
asan_thread.cc \
asan_win.cc
libasan_la_SOURCES = $(asan_files)
@USING_MAC_INTERPOSE_FALSE@libasan_la_LIBADD = $(top_builddir)/sanitizer_common/libsanitizer_common.la \
@USING_MAC_INTERPOSE_FALSE@ $(top_builddir)/lsan/libsanitizer_lsan.la \
@USING_MAC_INTERPOSE_FALSE@ $(top_builddir)/interception/libinterception.la \
@USING_MAC_INTERPOSE_FALSE@ $(LIBSTDCXX_RAW_CXX_LDFLAGS)
@USING_MAC_INTERPOSE_TRUE@libasan_la_LIBADD = $(top_builddir)/sanitizer_common/libsanitizer_common.la \
@USING_MAC_INTERPOSE_TRUE@ $(top_builddir)/lsan/libsanitizer_lsan.la \
@USING_MAC_INTERPOSE_TRUE@ $(LIBSTDCXX_RAW_CXX_LDFLAGS)
libasan_la_LDFLAGS = -version-info `grep -v '^\#' $(srcdir)/libtool-version` -lpthread -ldl
......@@ -402,8 +404,8 @@ mostlyclean-compile:
distclean-compile:
-rm -f *.tab.c
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_allocator.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_allocator2.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_dll_thunk.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_fake_stack.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_globals.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_interceptors.Plo@am__quote@
......@@ -420,7 +422,6 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_stack.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_stats.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_thread.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_thread_registry.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_win.Plo@am__quote@
.cc.o:
......
......@@ -7,7 +7,7 @@
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// ASan-private header for asan_allocator.cc.
// ASan-private header for asan_allocator2.cc.
//===----------------------------------------------------------------------===//
#ifndef ASAN_ALLOCATOR_H
......@@ -17,18 +17,6 @@
#include "asan_interceptors.h"
#include "sanitizer_common/sanitizer_list.h"
// We are in the process of transitioning from the old allocator (version 1)
// to a new one (version 2). The change is quite intrusive so both allocators
// will co-exist in the source base for a while. The actual allocator is chosen
// at build time by redefining this macro.
#ifndef ASAN_ALLOCATOR_VERSION
# if (ASAN_LINUX && !ASAN_ANDROID) || ASAN_MAC || ASAN_WINDOWS
# define ASAN_ALLOCATOR_VERSION 2
# else
# define ASAN_ALLOCATOR_VERSION 1
# endif
#endif // ASAN_ALLOCATOR_VERSION
namespace __asan {
enum AllocType {
......@@ -101,109 +89,17 @@ class AsanChunkFifoList: public IntrusiveList<AsanChunk> {
struct AsanThreadLocalMallocStorage {
explicit AsanThreadLocalMallocStorage(LinkerInitialized x)
#if ASAN_ALLOCATOR_VERSION == 1
: quarantine_(x)
#endif
{ }
AsanThreadLocalMallocStorage() {
CHECK(REAL(memset));
REAL(memset)(this, 0, sizeof(AsanThreadLocalMallocStorage));
}
#if ASAN_ALLOCATOR_VERSION == 1
AsanChunkFifoList quarantine_;
AsanChunk *free_lists_[kNumberOfSizeClasses];
#else
uptr quarantine_cache[16];
uptr allocator2_cache[96 * (512 * 8 + 16)]; // Opaque.
#endif
void CommitBack();
};
// Fake stack frame contains local variables of one function.
// This struct should fit into a stack redzone (32 bytes).
struct FakeFrame {
uptr magic; // Modified by the instrumented code.
uptr descr; // Modified by the instrumented code.
FakeFrame *next;
u64 real_stack : 48;
u64 size_minus_one : 16;
};
struct FakeFrameFifo {
public:
void FifoPush(FakeFrame *node);
FakeFrame *FifoPop();
private:
FakeFrame *first_, *last_;
};
class FakeFrameLifo {
public:
void LifoPush(FakeFrame *node) {
node->next = top_;
top_ = node;
}
void LifoPop() {
CHECK(top_);
top_ = top_->next;
}
FakeFrame *top() { return top_; }
private:
FakeFrame *top_;
};
// For each thread we create a fake stack and place stack objects on this fake
// stack instead of the real stack. The fake stack is not really a stack but
// a fast malloc-like allocator so that when a function exits the fake stack
// is not poped but remains there for quite some time until gets used again.
// So, we poison the objects on the fake stack when function returns.
// It helps us find use-after-return bugs.
// We can not rely on __asan_stack_free being called on every function exit,
// so we maintain a lifo list of all current fake frames and update it on every
// call to __asan_stack_malloc.
class FakeStack {
public:
FakeStack();
explicit FakeStack(LinkerInitialized) {}
void Init(uptr stack_size);
void StopUsingFakeStack() { alive_ = false; }
void Cleanup();
uptr AllocateStack(uptr size, uptr real_stack);
static void OnFree(uptr ptr, uptr size, uptr real_stack);
// Return the bottom of the maped region.
uptr AddrIsInFakeStack(uptr addr);
bool StackSize() { return stack_size_; }
private:
static const uptr kMinStackFrameSizeLog = 9; // Min frame is 512B.
static const uptr kMaxStackFrameSizeLog = 16; // Max stack frame is 64K.
static const uptr kMaxStackMallocSize = 1 << kMaxStackFrameSizeLog;
static const uptr kNumberOfSizeClasses =
kMaxStackFrameSizeLog - kMinStackFrameSizeLog + 1;
bool AddrIsInSizeClass(uptr addr, uptr size_class);
// Each size class should be large enough to hold all frames.
uptr ClassMmapSize(uptr size_class);
uptr ClassSize(uptr size_class) {
return 1UL << (size_class + kMinStackFrameSizeLog);
}
void DeallocateFrame(FakeFrame *fake_frame);
uptr ComputeSizeClass(uptr alloc_size);
void AllocateOneSizeClass(uptr size_class);
uptr stack_size_;
bool alive_;
uptr allocated_size_classes_[kNumberOfSizeClasses];
FakeFrameFifo size_classes_[kNumberOfSizeClasses];
FakeFrameLifo call_stack_;
};
void *asan_memalign(uptr alignment, uptr size, StackTrace *stack,
AllocType alloc_type);
void asan_free(void *ptr, StackTrace *stack, AllocType alloc_type);
......
//===-- asan_dll_thunk.cc -------------------------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// This file defines a family of thunks that should be statically linked into
// the DLLs that have ASan instrumentation in order to delegate the calls to the
// shared runtime that lives in the main binary.
// See https://code.google.com/p/address-sanitizer/issues/detail?id=209 for the
// details.
//===----------------------------------------------------------------------===//
// Only compile this code when buidling asan_dll_thunk.lib
// Using #ifdef rather than relying on Makefiles etc.
// simplifies the build procedure.
#ifdef ASAN_DLL_THUNK
// ----------------- Helper functions and macros --------------------- {{{1
extern "C" {
void *__stdcall GetModuleHandleA(const char *module_name);
void *__stdcall GetProcAddress(void *module, const char *proc_name);
void abort();
}
static void *getRealProcAddressOrDie(const char *name) {
void *ret = GetProcAddress(GetModuleHandleA(0), name);
if (!ret)
abort();
return ret;
}
#define WRAP_V_V(name) \
extern "C" void name() { \
typedef void (*fntype)(); \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
fn(); \
}
#define WRAP_V_W(name) \
extern "C" void name(void *arg) { \
typedef void (*fntype)(void *arg); \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
fn(arg); \
}
#define WRAP_V_WW(name) \
extern "C" void name(void *arg1, void *arg2) { \
typedef void (*fntype)(void *, void *); \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
fn(arg1, arg2); \
}
#define WRAP_V_WWW(name) \
extern "C" void name(void *arg1, void *arg2, void *arg3) { \
typedef void *(*fntype)(void *, void *, void *); \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
fn(arg1, arg2, arg3); \
}
#define WRAP_W_V(name) \
extern "C" void *name() { \
typedef void *(*fntype)(); \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
return fn(); \
}
#define WRAP_W_W(name) \
extern "C" void *name(void *arg) { \
typedef void *(*fntype)(void *arg); \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
return fn(arg); \
}
#define WRAP_W_WW(name) \
extern "C" void *name(void *arg1, void *arg2) { \
typedef void *(*fntype)(void *, void *); \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
return fn(arg1, arg2); \
}
#define WRAP_W_WWW(name) \
extern "C" void *name(void *arg1, void *arg2, void *arg3) { \
typedef void *(*fntype)(void *, void *, void *); \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
return fn(arg1, arg2, arg3); \
}
#define WRAP_W_WWWW(name) \
extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4) { \
typedef void *(*fntype)(void *, void *, void *, void *); \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
return fn(arg1, arg2, arg3, arg4); \
}
#define WRAP_W_WWWWW(name) \
extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4, \
void *arg5) { \
typedef void *(*fntype)(void *, void *, void *, void *, void *); \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
return fn(arg1, arg2, arg3, arg4, arg5); \
}
#define WRAP_W_WWWWWW(name) \
extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4, \
void *arg5, void *arg6) { \
typedef void *(*fntype)(void *, void *, void *, void *, void *, void *); \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
return fn(arg1, arg2, arg3, arg4, arg5, arg6); \
}
// }}}
// ----------------- ASan own interface functions --------------------
WRAP_W_V(__asan_should_detect_stack_use_after_return)
extern "C" {
int __asan_option_detect_stack_use_after_return;
// Manually wrap __asan_init as we need to initialize
// __asan_option_detect_stack_use_after_return afterwards.
void __asan_init_v3() {
typedef void (*fntype)();
static fntype fn = (fntype)getRealProcAddressOrDie("__asan_init_v3");
fn();
__asan_option_detect_stack_use_after_return =
(__asan_should_detect_stack_use_after_return() != 0);
}
}
WRAP_V_W(__asan_report_store1)
WRAP_V_W(__asan_report_store2)
WRAP_V_W(__asan_report_store4)
WRAP_V_W(__asan_report_store8)
WRAP_V_W(__asan_report_store16)
WRAP_V_WW(__asan_report_store_n)
WRAP_V_W(__asan_report_load1)
WRAP_V_W(__asan_report_load2)
WRAP_V_W(__asan_report_load4)
WRAP_V_W(__asan_report_load8)
WRAP_V_W(__asan_report_load16)
WRAP_V_WW(__asan_report_load_n)
WRAP_V_WW(__asan_register_globals)
WRAP_V_WW(__asan_unregister_globals)
WRAP_W_WW(__asan_stack_malloc_0)
WRAP_W_WW(__asan_stack_malloc_1)
WRAP_W_WW(__asan_stack_malloc_2)
WRAP_W_WW(__asan_stack_malloc_3)
WRAP_W_WW(__asan_stack_malloc_4)
WRAP_W_WW(__asan_stack_malloc_5)
WRAP_W_WW(__asan_stack_malloc_6)
WRAP_W_WW(__asan_stack_malloc_7)
WRAP_W_WW(__asan_stack_malloc_8)
WRAP_W_WW(__asan_stack_malloc_9)
WRAP_W_WW(__asan_stack_malloc_10)
WRAP_V_WWW(__asan_stack_free_0)
WRAP_V_WWW(__asan_stack_free_1)
WRAP_V_WWW(__asan_stack_free_2)
WRAP_V_WWW(__asan_stack_free_4)
WRAP_V_WWW(__asan_stack_free_5)
WRAP_V_WWW(__asan_stack_free_6)
WRAP_V_WWW(__asan_stack_free_7)
WRAP_V_WWW(__asan_stack_free_8)
WRAP_V_WWW(__asan_stack_free_9)
WRAP_V_WWW(__asan_stack_free_10)
// TODO(timurrrr): Add more interface functions on the as-needed basis.
// ----------------- Memory allocation functions ---------------------
WRAP_V_W(free)
WRAP_V_WW(_free_dbg)
WRAP_W_W(malloc)
WRAP_W_WWWW(_malloc_dbg)
WRAP_W_WW(calloc)
WRAP_W_WWWWW(_calloc_dbg)
WRAP_W_WWW(_calloc_impl)
WRAP_W_WW(realloc)
WRAP_W_WWW(_realloc_dbg)
WRAP_W_WWW(_recalloc)
WRAP_W_W(_msize)
// TODO(timurrrr): Do we need to add _Crt* stuff here? (see asan_malloc_win.cc).
#endif // ASAN_DLL_THUNK
//===-- asan_fake_stack.h ---------------------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// ASan-private header for asan_fake_stack.cc, implements FakeStack.
//===----------------------------------------------------------------------===//
#ifndef ASAN_FAKE_STACK_H
#define ASAN_FAKE_STACK_H
#include "sanitizer_common/sanitizer_common.h"
namespace __asan {
// Fake stack frame contains local variables of one function.
struct FakeFrame {
uptr magic; // Modified by the instrumented code.
uptr descr; // Modified by the instrumented code.
uptr pc; // Modified by the instrumented code.
uptr real_stack;
};
// For each thread we create a fake stack and place stack objects on this fake
// stack instead of the real stack. The fake stack is not really a stack but
// a fast malloc-like allocator so that when a function exits the fake stack
// is not popped but remains there for quite some time until gets used again.
// So, we poison the objects on the fake stack when function returns.
// It helps us find use-after-return bugs.
//
// The FakeStack objects is allocated by a single mmap call and has no other
// pointers. The size of the fake stack depends on the actual thread stack size
// and thus can not be a constant.
// stack_size is a power of two greater or equal to the thread's stack size;
// we store it as its logarithm (stack_size_log).
// FakeStack has kNumberOfSizeClasses (11) size classes, each size class
// is a power of two, starting from 64 bytes. Each size class occupies
// stack_size bytes and thus can allocate
// NumberOfFrames=(stack_size/BytesInSizeClass) fake frames (also a power of 2).
// For each size class we have NumberOfFrames allocation flags,
// each flag indicates whether the given frame is currently allocated.
// All flags for size classes 0 .. 10 are stored in a single contiguous region
// followed by another contiguous region which contains the actual memory for
// size classes. The addresses are computed by GetFlags and GetFrame without
// any memory accesses solely based on 'this' and stack_size_log.
// Allocate() flips the appropriate allocation flag atomically, thus achieving
// async-signal safety.
// This allocator does not have quarantine per se, but it tries to allocate the
// frames in round robin fasion to maximize the delay between a deallocation
// and the next allocation.
class FakeStack {
static const uptr kMinStackFrameSizeLog = 6; // Min frame is 64B.
static const uptr kMaxStackFrameSizeLog = 16; // Max stack frame is 64K.
public:
static const uptr kNumberOfSizeClasses =
kMaxStackFrameSizeLog - kMinStackFrameSizeLog + 1;
// CTOR: create the FakeStack as a single mmap-ed object.
static FakeStack *Create(uptr stack_size_log);
void Destroy();
// stack_size_log is at least 15 (stack_size >= 32K).
static uptr SizeRequiredForFlags(uptr stack_size_log) {
return 1UL << (stack_size_log + 1 - kMinStackFrameSizeLog);
}
// Each size class occupies stack_size bytes.
static uptr SizeRequiredForFrames(uptr stack_size_log) {
return (1ULL << stack_size_log) * kNumberOfSizeClasses;
}
// Number of bytes requires for the whole object.
static uptr RequiredSize(uptr stack_size_log) {
return kFlagsOffset + SizeRequiredForFlags(stack_size_log) +
SizeRequiredForFrames(stack_size_log);
}
// Offset of the given flag from the first flag.
// The flags for class 0 begin at offset 000000000
// The flags for class 1 begin at offset 100000000
// ....................2................ 110000000
// ....................3................ 111000000
// and so on.
static uptr FlagsOffset(uptr stack_size_log, uptr class_id) {
uptr t = kNumberOfSizeClasses - 1 - class_id;
const uptr all_ones = (1 << (kNumberOfSizeClasses - 1)) - 1;
return ((all_ones >> t) << t) << (stack_size_log - 15);
}
static uptr NumberOfFrames(uptr stack_size_log, uptr class_id) {
return 1UL << (stack_size_log - kMinStackFrameSizeLog - class_id);
}
// Divide n by the numbe of frames in size class.
static uptr ModuloNumberOfFrames(uptr stack_size_log, uptr class_id, uptr n) {
return n & (NumberOfFrames(stack_size_log, class_id) - 1);
}
// The the pointer to the flags of the given class_id.
u8 *GetFlags(uptr stack_size_log, uptr class_id) {
return reinterpret_cast<u8 *>(this) + kFlagsOffset +
FlagsOffset(stack_size_log, class_id);
}
// Get frame by class_id and pos.
u8 *GetFrame(uptr stack_size_log, uptr class_id, uptr pos) {
return reinterpret_cast<u8 *>(this) + kFlagsOffset +
SizeRequiredForFlags(stack_size_log) +
(1 << stack_size_log) * class_id + BytesInSizeClass(class_id) * pos;
}
// Allocate the fake frame.
FakeFrame *Allocate(uptr stack_size_log, uptr class_id, uptr real_stack);
// Deallocate the fake frame: read the saved flag address and write 0 there.
static void Deallocate(uptr x, uptr class_id) {
**SavedFlagPtr(x, class_id) = 0;
}
// Poison the entire FakeStack's shadow with the magic value.
void PoisonAll(u8 magic);
// Return the beginning of the FakeFrame or 0 if the address is not ours.
uptr AddrIsInFakeStack(uptr addr);
// Number of bytes in a fake frame of this size class.
static uptr BytesInSizeClass(uptr class_id) {
return 1UL << (class_id + kMinStackFrameSizeLog);
}
// The fake frame is guaranteed to have a right redzone.
// We use the last word of that redzone to store the address of the flag
// that corresponds to the current frame to make faster deallocation.
static u8 **SavedFlagPtr(uptr x, uptr class_id) {
return reinterpret_cast<u8 **>(x + BytesInSizeClass(class_id) - sizeof(x));
}
uptr stack_size_log() const { return stack_size_log_; }
void HandleNoReturn();
void GC(uptr real_stack);
private:
FakeStack() { }
static const uptr kFlagsOffset = 4096; // This is were the flags begin.
// Must match the number of uses of DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID
COMPILER_CHECK(kNumberOfSizeClasses == 11);
static const uptr kMaxStackMallocSize = 1 << kMaxStackFrameSizeLog;
uptr hint_position_[kNumberOfSizeClasses];
uptr stack_size_log_;
// a bit is set if something was allocated from the corresponding size class.
bool needs_gc_;
};
FakeStack *GetTLSFakeStack();
void SetTLSFakeStack(FakeStack *fs);
} // namespace __asan
#endif // ASAN_FAKE_STACK_H
......@@ -30,8 +30,6 @@ struct Flags {
// Lower value may reduce memory usage but increase the chance of
// false negatives.
int quarantine_size;
// If set, uses in-process symbolizer from common sanitizer runtime.
bool symbolize;
// Verbosity level (0 - silent, 1 - a bit of output, 2+ - more output).
int verbosity;
// Size (in bytes) of redzones around heap objects.
......@@ -45,8 +43,6 @@ struct Flags {
int report_globals;
// If set, attempts to catch initialization order issues.
bool check_initialization_order;
// Max number of stack frames kept for each allocation/deallocation.
int malloc_context_size;
// If set, uses custom wrappers and replacements for libc string functions
// to find more errors.
bool replace_str;
......@@ -54,11 +50,13 @@ struct Flags {
bool replace_intrin;
// Used on Mac only.
bool mac_ignore_invalid_free;
// ASan allocator flag. See asan_allocator.cc.
bool use_fake_stack;
// ASan allocator flag. Sets the maximal size of allocation request
// that would return memory filled with zero bytes.
int max_malloc_fill_size;
// Enables stack-use-after-return checking at run-time.
bool detect_stack_use_after_return;
// The minimal fake stack size log.
int uar_stack_size_log;
// ASan allocator flag. max_malloc_fill_size is the maximal amount of bytes
// that will be filled with malloc_fill_byte on malloc.
int max_malloc_fill_size, malloc_fill_byte;
// Override exit status if something was reported.
int exitcode;
// If set, user may manually mark memory regions as poisoned or unpoisoned.
......@@ -69,6 +67,8 @@ struct Flags {
int sleep_before_dying;
// If set, registers ASan custom segv handler.
bool handle_segv;
// If set, allows user register segv handler even if ASan registers one.
bool allow_user_segv_handler;
// If set, uses alternate stack for signal handling.
bool use_sigaltstack;
// Allow the users to work around the bug in Nvidia drivers prior to 295.*.
......@@ -89,18 +89,10 @@ struct Flags {
// Allow the tool to re-exec the program. This may interfere badly with the
// debugger.
bool allow_reexec;
// Strips this prefix from file paths in error reports.
const char *strip_path_prefix;
// If set, prints not only thread creation stacks for threads in error report,
// but also thread creation stacks for threads that created those threads,
// etc. up to main thread.
bool print_full_thread_history;
// ASan will write logs to "log_path.pid" instead of stderr.
const char *log_path;
// Use fast (frame-pointer-based) unwinder on fatal errors (if available).
bool fast_unwind_on_fatal;
// Use fast (frame-pointer-based) unwinder on malloc/free (if available).
bool fast_unwind_on_malloc;
// Poison (or not) the heap memory on [de]allocation. Zero value is useful
// for benchmarking the allocator or instrumentator.
bool poison_heap;
......@@ -108,9 +100,18 @@ struct Flags {
bool alloc_dealloc_mismatch;
// Use stack depot instead of storing stacks in the redzones.
bool use_stack_depot;
// If true, assume that memcmp(p1, p2, n) always reads n bytes before
// comparing p1 and p2.
bool strict_memcmp;
// If true, assume that dynamic initializers can never access globals from
// other modules, even if the latter are already initialized.
bool strict_init_order;
};
Flags *flags();
extern Flags asan_flags_dont_use_directly;
inline Flags *flags() {
return &asan_flags_dont_use_directly;
}
void InitializeFlags(Flags *f, const char *env);
} // namespace __asan
......
......@@ -12,11 +12,14 @@
#include "asan_interceptors.h"
#include "asan_internal.h"
#include "asan_mapping.h"
#include "asan_poisoning.h"
#include "asan_report.h"
#include "asan_stack.h"
#include "asan_stats.h"
#include "asan_thread.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_mutex.h"
#include "sanitizer_common/sanitizer_placement_new.h"
namespace __asan {
......@@ -30,15 +33,26 @@ struct ListOfGlobals {
static BlockingMutex mu_for_globals(LINKER_INITIALIZED);
static LowLevelAllocator allocator_for_globals;
static ListOfGlobals *list_of_all_globals;
static ListOfGlobals *list_of_dynamic_init_globals;
void PoisonRedZones(const Global &g) {
static const int kDynamicInitGlobalsInitialCapacity = 512;
struct DynInitGlobal {
Global g;
bool initialized;
};
typedef InternalMmapVector<DynInitGlobal> VectorOfGlobals;
// Lazy-initialized and never deleted.
static VectorOfGlobals *dynamic_init_globals;
ALWAYS_INLINE void PoisonShadowForGlobal(const Global *g, u8 value) {
FastPoisonShadow(g->beg, g->size_with_redzone, value);
}
ALWAYS_INLINE void PoisonRedZones(const Global &g) {
uptr aligned_size = RoundUpTo(g.size, SHADOW_GRANULARITY);
PoisonShadow(g.beg + aligned_size, g.size_with_redzone - aligned_size,
kAsanGlobalRedzoneMagic);
FastPoisonShadow(g.beg + aligned_size, g.size_with_redzone - aligned_size,
kAsanGlobalRedzoneMagic);
if (g.size != aligned_size) {
// partial right redzone
PoisonShadowPartialRightRedzone(
FastPoisonShadowPartialRightRedzone(
g.beg + RoundDownTo(g.size, SHADOW_GRANULARITY),
g.size % SHADOW_GRANULARITY,
SHADOW_GRANULARITY,
......@@ -46,6 +60,12 @@ void PoisonRedZones(const Global &g) {
}
}
static void ReportGlobal(const Global &g, const char *prefix) {
Report("%s Global: beg=%p size=%zu/%zu name=%s module=%s dyn_init=%zu\n",
prefix, (void*)g.beg, g.size, g.size_with_redzone, g.name,
g.module_name, g.has_dynamic_init);
}
bool DescribeAddressIfGlobal(uptr addr, uptr size) {
if (!flags()->report_globals) return false;
BlockingMutexLock lock(&mu_for_globals);
......@@ -53,8 +73,7 @@ bool DescribeAddressIfGlobal(uptr addr, uptr size) {
for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
const Global &g = *l->g;
if (flags()->report_globals >= 2)
Report("Search Global: beg=%p size=%zu name=%s\n",
(void*)g.beg, g.size, (char*)g.name);
ReportGlobal(g, "Search");
res |= DescribeAddressRelativeToGlobal(addr, size, g);
}
return res;
......@@ -66,24 +85,26 @@ bool DescribeAddressIfGlobal(uptr addr, uptr size) {
static void RegisterGlobal(const Global *g) {
CHECK(asan_inited);
if (flags()->report_globals >= 2)
Report("Added Global: beg=%p size=%zu/%zu name=%s dyn.init=%zu\n",
(void*)g->beg, g->size, g->size_with_redzone, g->name,
g->has_dynamic_init);
ReportGlobal(*g, "Added");
CHECK(flags()->report_globals);
CHECK(AddrIsInMem(g->beg));
CHECK(AddrIsAlignedByGranularity(g->beg));
CHECK(AddrIsAlignedByGranularity(g->size_with_redzone));
PoisonRedZones(*g);
if (flags()->poison_heap)
PoisonRedZones(*g);
ListOfGlobals *l =
(ListOfGlobals*)allocator_for_globals.Allocate(sizeof(ListOfGlobals));
l->g = g;
l->next = list_of_all_globals;
list_of_all_globals = l;
if (g->has_dynamic_init) {
l = (ListOfGlobals*)allocator_for_globals.Allocate(sizeof(ListOfGlobals));
l->g = g;
l->next = list_of_dynamic_init_globals;
list_of_dynamic_init_globals = l;
if (dynamic_init_globals == 0) {
void *mem = allocator_for_globals.Allocate(sizeof(VectorOfGlobals));
dynamic_init_globals = new(mem)
VectorOfGlobals(kDynamicInitGlobalsInitialCapacity);
}
DynInitGlobal dyn_global = { *g, false };
dynamic_init_globals->push_back(dyn_global);
}
}
......@@ -93,34 +114,26 @@ static void UnregisterGlobal(const Global *g) {
CHECK(AddrIsInMem(g->beg));
CHECK(AddrIsAlignedByGranularity(g->beg));
CHECK(AddrIsAlignedByGranularity(g->size_with_redzone));
PoisonShadow(g->beg, g->size_with_redzone, 0);
if (flags()->poison_heap)
PoisonShadowForGlobal(g, 0);
// We unpoison the shadow memory for the global but we do not remove it from
// the list because that would require O(n^2) time with the current list
// implementation. It might not be worth doing anyway.
}
// Poison all shadow memory for a single global.
static void PoisonGlobalAndRedzones(const Global *g) {
CHECK(asan_inited);
CHECK(flags()->check_initialization_order);
CHECK(AddrIsInMem(g->beg));
CHECK(AddrIsAlignedByGranularity(g->beg));
CHECK(AddrIsAlignedByGranularity(g->size_with_redzone));
if (flags()->report_globals >= 3)
Printf("DynInitPoison : %s\n", g->name);
PoisonShadow(g->beg, g->size_with_redzone, kAsanInitializationOrderMagic);
}
static void UnpoisonGlobal(const Global *g) {
CHECK(asan_inited);
CHECK(flags()->check_initialization_order);
CHECK(AddrIsInMem(g->beg));
CHECK(AddrIsAlignedByGranularity(g->beg));
CHECK(AddrIsAlignedByGranularity(g->size_with_redzone));
if (flags()->report_globals >= 3)
Printf("DynInitUnpoison: %s\n", g->name);
PoisonShadow(g->beg, g->size_with_redzone, 0);
PoisonRedZones(*g);
void StopInitOrderChecking() {
BlockingMutexLock lock(&mu_for_globals);
if (!flags()->check_initialization_order || !dynamic_init_globals)
return;
flags()->check_initialization_order = false;
for (uptr i = 0, n = dynamic_init_globals->size(); i < n; ++i) {
DynInitGlobal &dyn_g = (*dynamic_init_globals)[i];
const Global *g = &dyn_g.g;
// Unpoison the whole global.
PoisonShadowForGlobal(g, 0);
// Poison redzones back.
PoisonRedZones(*g);
}
}
} // namespace __asan
......@@ -151,31 +164,47 @@ void __asan_unregister_globals(__asan_global *globals, uptr n) {
// when all dynamically initialized globals are unpoisoned. This method
// poisons all global variables not defined in this TU, so that a dynamic
// initializer can only touch global variables in the same TU.
void __asan_before_dynamic_init(uptr first_addr, uptr last_addr) {
if (!flags()->check_initialization_order) return;
CHECK(list_of_dynamic_init_globals);
void __asan_before_dynamic_init(const char *module_name) {
if (!flags()->check_initialization_order ||
!flags()->poison_heap)
return;
bool strict_init_order = flags()->strict_init_order;
CHECK(dynamic_init_globals);
CHECK(module_name);
CHECK(asan_inited);
BlockingMutexLock lock(&mu_for_globals);
bool from_current_tu = false;
// The list looks like:
// a => ... => b => last_addr => ... => first_addr => c => ...
// The globals of the current TU reside between last_addr and first_addr.
for (ListOfGlobals *l = list_of_dynamic_init_globals; l; l = l->next) {
if (l->g->beg == last_addr)
from_current_tu = true;
if (!from_current_tu)
PoisonGlobalAndRedzones(l->g);
if (l->g->beg == first_addr)
from_current_tu = false;
if (flags()->report_globals >= 3)
Printf("DynInitPoison module: %s\n", module_name);
for (uptr i = 0, n = dynamic_init_globals->size(); i < n; ++i) {
DynInitGlobal &dyn_g = (*dynamic_init_globals)[i];
const Global *g = &dyn_g.g;
if (dyn_g.initialized)
continue;
if (g->module_name != module_name)
PoisonShadowForGlobal(g, kAsanInitializationOrderMagic);
else if (!strict_init_order)
dyn_g.initialized = true;
}
CHECK(!from_current_tu);
}
// This method runs immediately after dynamic initialization in each TU, when
// all dynamically initialized globals except for those defined in the current
// TU are poisoned. It simply unpoisons all dynamically initialized globals.
void __asan_after_dynamic_init() {
if (!flags()->check_initialization_order) return;
if (!flags()->check_initialization_order ||
!flags()->poison_heap)
return;
CHECK(asan_inited);
BlockingMutexLock lock(&mu_for_globals);
for (ListOfGlobals *l = list_of_dynamic_init_globals; l; l = l->next)
UnpoisonGlobal(l->g);
// FIXME: Optionally report that we're unpoisoning globals from a module.
for (uptr i = 0, n = dynamic_init_globals->size(); i < n; ++i) {
DynInitGlobal &dyn_g = (*dynamic_init_globals)[i];
const Global *g = &dyn_g.g;
if (!dyn_g.initialized) {
// Unpoison the whole global.
PoisonShadowForGlobal(g, 0);
// Poison redzones back.
PoisonRedZones(*g);
}
}
}
......@@ -23,8 +23,13 @@ extern "C" {
// Everytime the asan ABI changes we also change the version number in this
// name. Objects build with incompatible asan ABI version
// will not link with run-time.
void __asan_init_v1() SANITIZER_INTERFACE_ATTRIBUTE;
#define __asan_init __asan_init_v1
// Changes between ABI versions:
// v1=>v2: added 'module_name' to __asan_global
// v2=>v3: stack frame description (created by the compiler)
// contains the function PC as the 3-rd field (see
// DescribeAddressIfStack).
SANITIZER_INTERFACE_ATTRIBUTE void __asan_init_v3();
#define __asan_init __asan_init_v3
// This structure describes an instrumented global variable.
struct __asan_global {
......@@ -32,102 +37,92 @@ extern "C" {
uptr size; // The original size of the global.
uptr size_with_redzone; // The size with the redzone.
const char *name; // Name as a C string.
const char *module_name; // Module name as a C string. This pointer is a
// unique identifier of a module.
uptr has_dynamic_init; // Non-zero if the global has dynamic initializer.
};
// These two functions should be called by the instrumented code.
// 'globals' is an array of structures describing 'n' globals.
void __asan_register_globals(__asan_global *globals, uptr n)
SANITIZER_INTERFACE_ATTRIBUTE;
void __asan_unregister_globals(__asan_global *globals, uptr n)
SANITIZER_INTERFACE_ATTRIBUTE;
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_register_globals(__asan_global *globals, uptr n);
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_unregister_globals(__asan_global *globals, uptr n);
// These two functions should be called before and after dynamic initializers
// run, respectively. They should be called with parameters describing all
// dynamically initialized globals defined in the calling TU.
void __asan_before_dynamic_init(uptr first_addr, uptr last_addr)
SANITIZER_INTERFACE_ATTRIBUTE;
void __asan_after_dynamic_init()
SANITIZER_INTERFACE_ATTRIBUTE;
// These two functions are used by the instrumented code in the
// use-after-return mode. __asan_stack_malloc allocates size bytes of
// fake stack and __asan_stack_free poisons it. real_stack is a pointer to
// the real stack region.
uptr __asan_stack_malloc(uptr size, uptr real_stack)
SANITIZER_INTERFACE_ATTRIBUTE;
void __asan_stack_free(uptr ptr, uptr size, uptr real_stack)
SANITIZER_INTERFACE_ATTRIBUTE;
// of a single module run, respectively.
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_before_dynamic_init(const char *module_name);
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_after_dynamic_init();
// These two functions are used by instrumented code in the
// use-after-scope mode. They mark memory for local variables as
// unaddressable when they leave scope and addressable before the
// function exits.
void __asan_poison_stack_memory(uptr addr, uptr size)
SANITIZER_INTERFACE_ATTRIBUTE;
void __asan_unpoison_stack_memory(uptr addr, uptr size)
SANITIZER_INTERFACE_ATTRIBUTE;
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_poison_stack_memory(uptr addr, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_unpoison_stack_memory(uptr addr, uptr size);
// Performs cleanup before a NoReturn function. Must be called before things
// like _exit and execl to avoid false positives on stack.
void __asan_handle_no_return() SANITIZER_INTERFACE_ATTRIBUTE;
SANITIZER_INTERFACE_ATTRIBUTE void __asan_handle_no_return();
void __asan_poison_memory_region(void const volatile *addr, uptr size)
SANITIZER_INTERFACE_ATTRIBUTE;
void __asan_unpoison_memory_region(void const volatile *addr, uptr size)
SANITIZER_INTERFACE_ATTRIBUTE;
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_poison_memory_region(void const volatile *addr, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_unpoison_memory_region(void const volatile *addr, uptr size);
bool __asan_address_is_poisoned(void const volatile *addr)
SANITIZER_INTERFACE_ATTRIBUTE;
SANITIZER_INTERFACE_ATTRIBUTE
bool __asan_address_is_poisoned(void const volatile *addr);
uptr __asan_region_is_poisoned(uptr beg, uptr size)
SANITIZER_INTERFACE_ATTRIBUTE;
SANITIZER_INTERFACE_ATTRIBUTE
uptr __asan_region_is_poisoned(uptr beg, uptr size);
void __asan_describe_address(uptr addr)
SANITIZER_INTERFACE_ATTRIBUTE;
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_describe_address(uptr addr);
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_report_error(uptr pc, uptr bp, uptr sp,
uptr addr, bool is_write, uptr access_size)
SANITIZER_INTERFACE_ATTRIBUTE;
uptr addr, bool is_write, uptr access_size);
int __asan_set_error_exit_code(int exit_code)
SANITIZER_INTERFACE_ATTRIBUTE;
void __asan_set_death_callback(void (*callback)(void))
SANITIZER_INTERFACE_ATTRIBUTE;
void __asan_set_error_report_callback(void (*callback)(const char*))
SANITIZER_INTERFACE_ATTRIBUTE;
SANITIZER_INTERFACE_ATTRIBUTE
int __asan_set_error_exit_code(int exit_code);
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_set_death_callback(void (*callback)(void));
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_set_error_report_callback(void (*callback)(const char*));
/* OPTIONAL */ void __asan_on_error()
SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE;
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
/* OPTIONAL */ void __asan_on_error();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
/* OPTIONAL */ bool __asan_symbolize(const void *pc, char *out_buffer,
int out_size)
SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE;
uptr __asan_get_estimated_allocated_size(uptr size)
SANITIZER_INTERFACE_ATTRIBUTE;
bool __asan_get_ownership(const void *p)
SANITIZER_INTERFACE_ATTRIBUTE;
uptr __asan_get_allocated_size(const void *p)
SANITIZER_INTERFACE_ATTRIBUTE;
uptr __asan_get_current_allocated_bytes()
SANITIZER_INTERFACE_ATTRIBUTE;
uptr __asan_get_heap_size()
SANITIZER_INTERFACE_ATTRIBUTE;
uptr __asan_get_free_bytes()
SANITIZER_INTERFACE_ATTRIBUTE;
uptr __asan_get_unmapped_bytes()
SANITIZER_INTERFACE_ATTRIBUTE;
void __asan_print_accumulated_stats()
SANITIZER_INTERFACE_ATTRIBUTE;
/* OPTIONAL */ const char* __asan_default_options()
SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE;
/* OPTIONAL */ void __asan_malloc_hook(void *ptr, uptr size)
SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE;
/* OPTIONAL */ void __asan_free_hook(void *ptr)
SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE;
int out_size);
SANITIZER_INTERFACE_ATTRIBUTE
uptr __asan_get_estimated_allocated_size(uptr size);
SANITIZER_INTERFACE_ATTRIBUTE bool __asan_get_ownership(const void *p);
SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_get_allocated_size(const void *p);
SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_get_current_allocated_bytes();
SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_get_heap_size();
SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_get_free_bytes();
SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_get_unmapped_bytes();
SANITIZER_INTERFACE_ATTRIBUTE void __asan_print_accumulated_stats();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
/* OPTIONAL */ const char* __asan_default_options();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
/* OPTIONAL */ void __asan_malloc_hook(void *ptr, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
/* OPTIONAL */ void __asan_free_hook(void *ptr);
// Global flag, copy of ASAN_OPTIONS=detect_stack_use_after_return
SANITIZER_INTERFACE_ATTRIBUTE
extern int __asan_option_detect_stack_use_after_return;
} // extern "C"
#endif // ASAN_INTERFACE_INTERNAL_H
......@@ -19,39 +19,8 @@
#include "sanitizer_common/sanitizer_stacktrace.h"
#include "sanitizer_common/sanitizer_libc.h"
#if !defined(__linux__) && !defined(__APPLE__) && !defined(_WIN32)
# error "This operating system is not supported by AddressSanitizer"
#endif
#define ASAN_DEFAULT_FAILURE_EXITCODE 1
#if defined(__linux__)
# define ASAN_LINUX 1
#else
# define ASAN_LINUX 0
#endif
#if defined(__APPLE__)
# define ASAN_MAC 1
#else
# define ASAN_MAC 0
#endif
#if defined(_WIN32)
# define ASAN_WINDOWS 1
#else
# define ASAN_WINDOWS 0
#endif
#if defined(__ANDROID__) || defined(ANDROID)
# define ASAN_ANDROID 1
#else
# define ASAN_ANDROID 0
#endif
#define ASAN_POSIX (ASAN_LINUX || ASAN_MAC)
#if __has_feature(address_sanitizer) || defined(__SANITIZE_ADDRESS__)
# error "The AddressSanitizer run-time should not be"
" instrumented by AddressSanitizer"
......@@ -61,7 +30,7 @@
// If set, asan will install its own SEGV signal handler.
#ifndef ASAN_NEEDS_SEGV
# if ASAN_ANDROID == 1
# if SANITIZER_ANDROID == 1
# define ASAN_NEEDS_SEGV 0
# else
# define ASAN_NEEDS_SEGV 1
......@@ -90,7 +59,7 @@
#endif
#ifndef ASAN_USE_PREINIT_ARRAY
# define ASAN_USE_PREINIT_ARRAY (ASAN_LINUX && !ASAN_ANDROID)
# define ASAN_USE_PREINIT_ARRAY (SANITIZER_LINUX && !SANITIZER_ANDROID)
#endif
// All internal functions in asan reside inside the __asan namespace
......@@ -121,6 +90,7 @@ void UnsetAlternateSignalStack();
void InstallSignalHandlers();
void ReadContextStack(void *context, uptr *stack, uptr *ssize);
void AsanPlatformThreadInit();
void StopInitOrderChecking();
// Wrapper for TLS/TSD.
void AsanTSDInit(void (*destructor)(void *tsd));
......@@ -129,24 +99,14 @@ void AsanTSDSet(void *tsd);
void AppendToErrorMessageBuffer(const char *buffer);
// asan_poisoning.cc
// Poisons the shadow memory for "size" bytes starting from "addr".
void PoisonShadow(uptr addr, uptr size, u8 value);
// Poisons the shadow memory for "redzone_size" bytes starting from
// "addr + size".
void PoisonShadowPartialRightRedzone(uptr addr,
uptr size,
uptr redzone_size,
u8 value);
// Platfrom-specific options.
#ifdef __APPLE__
#if SANITIZER_MAC
bool PlatformHasDifferentMemcpyAndMemmove();
# define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE \
(PlatformHasDifferentMemcpyAndMemmove())
#else
# define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE true
#endif // __APPLE__
#endif // SANITIZER_MAC
// Add convenient macro for interface functions that may be represented as
// weak hooks.
......
......@@ -9,12 +9,13 @@
//
// Linux-specific details.
//===----------------------------------------------------------------------===//
#ifdef __linux__
#include "sanitizer_common/sanitizer_platform.h"
#if SANITIZER_LINUX
#include "asan_interceptors.h"
#include "asan_internal.h"
#include "asan_thread.h"
#include "asan_thread_registry.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_procmaps.h"
......@@ -29,7 +30,7 @@
#include <unistd.h>
#include <unwind.h>
#if !ASAN_ANDROID
#if !SANITIZER_ANDROID
// FIXME: where to get ucontext on Android?
#include <sys/ucontext.h>
#endif
......@@ -48,7 +49,7 @@ void *AsanDoesNotSupportStaticLinkage() {
}
void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
#if ASAN_ANDROID
#if SANITIZER_ANDROID
*pc = *sp = *bp = 0;
#elif defined(__arm__)
ucontext_t *ucontext = (ucontext_t*)context;
......@@ -86,6 +87,11 @@ void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
stk_ptr = (uptr *) *sp;
*bp = stk_ptr[15];
# endif
# elif defined(__mips__)
ucontext_t *ucontext = (ucontext_t*)context;
*pc = ucontext->uc_mcontext.gregs[31];
*bp = ucontext->uc_mcontext.gregs[30];
*sp = ucontext->uc_mcontext.gregs[29];
#else
# error "Unsupported arch"
#endif
......@@ -99,25 +105,7 @@ void AsanPlatformThreadInit() {
// Nothing here for now.
}
void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp, bool fast) {
#if defined(__arm__) || \
defined(__powerpc__) || defined(__powerpc64__) || \
defined(__sparc__)
fast = false;
#endif
if (!fast)
return stack->SlowUnwindStack(pc, max_s);
stack->size = 0;
stack->trace[0] = pc;
if (max_s > 1) {
stack->max_size = max_s;
if (!asan_inited) return;
if (AsanThread *t = asanThreadRegistry().GetCurrent())
stack->FastUnwindStack(pc, bp, t->stack_top(), t->stack_bottom());
}
}
#if !ASAN_ANDROID
#if !SANITIZER_ANDROID
void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
ucontext_t *ucp = (ucontext_t*)context;
*stack = (uptr)ucp->uc_stack.ss_sp;
......@@ -131,4 +119,4 @@ void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
} // namespace __asan
#endif // __linux__
#endif // SANITIZER_LINUX
......@@ -10,7 +10,8 @@
// Mac-specific details.
//===----------------------------------------------------------------------===//
#ifdef __APPLE__
#include "sanitizer_common/sanitizer_platform.h"
#if SANITIZER_MAC
#include "asan_interceptors.h"
#include "asan_internal.h"
......@@ -18,7 +19,7 @@
#include "asan_mapping.h"
#include "asan_stack.h"
#include "asan_thread.h"
#include "asan_thread_registry.h"
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_libc.h"
#include <crt_externs.h> // for _NSGetArgv
......@@ -50,15 +51,17 @@ void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
# endif // SANITIZER_WORDSIZE
}
int GetMacosVersion() {
MacosVersion cached_macos_version = MACOS_VERSION_UNINITIALIZED;
MacosVersion GetMacosVersionInternal() {
int mib[2] = { CTL_KERN, KERN_OSRELEASE };
char version[100];
uptr len = 0, maxlen = sizeof(version) / sizeof(version[0]);
for (uptr i = 0; i < maxlen; i++) version[i] = '\0';
// Get the version length.
CHECK(sysctl(mib, 2, 0, &len, 0, 0) != -1);
CHECK(len < maxlen);
CHECK(sysctl(mib, 2, version, &len, 0, 0) != -1);
CHECK_NE(sysctl(mib, 2, 0, &len, 0, 0), -1);
CHECK_LT(len, maxlen);
CHECK_NE(sysctl(mib, 2, version, &len, 0, 0), -1);
switch (version[0]) {
case '9': return MACOS_VERSION_LEOPARD;
case '1': {
......@@ -66,6 +69,7 @@ int GetMacosVersion() {
case '0': return MACOS_VERSION_SNOW_LEOPARD;
case '1': return MACOS_VERSION_LION;
case '2': return MACOS_VERSION_MOUNTAIN_LION;
case '3': return MACOS_VERSION_MAVERICKS;
default: return MACOS_VERSION_UNKNOWN;
}
}
......@@ -73,6 +77,18 @@ int GetMacosVersion() {
}
}
MacosVersion GetMacosVersion() {
atomic_uint32_t *cache =
reinterpret_cast<atomic_uint32_t*>(&cached_macos_version);
MacosVersion result =
static_cast<MacosVersion>(atomic_load(cache, memory_order_acquire));
if (result == MACOS_VERSION_UNINITIALIZED) {
result = GetMacosVersionInternal();
atomic_store(cache, result, memory_order_release);
}
return result;
}
bool PlatformHasDifferentMemcpyAndMemmove() {
// On OS X 10.7 memcpy() and memmove() are both resolved
// into memmove$VARIANT$sse42.
......@@ -227,18 +243,6 @@ bool AsanInterceptsSignal(int signum) {
void AsanPlatformThreadInit() {
}
void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp, bool fast) {
(void)fast;
stack->size = 0;
stack->trace[0] = pc;
if ((max_s) > 1) {
stack->max_size = max_s;
if (!asan_inited) return;
if (AsanThread *t = asanThreadRegistry().GetCurrent())
stack->FastUnwindStack(pc, bp, t->stack_top(), t->stack_bottom());
}
}
void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
UNIMPLEMENTED();
}
......@@ -286,32 +290,16 @@ typedef struct {
u32 parent_tid;
} asan_block_context_t;
// We use extern declarations of libdispatch functions here instead
// of including <dispatch/dispatch.h>. This header is not present on
// Mac OS X Leopard and eariler, and although we don't expect ASan to
// work on legacy systems, it's bad to break the build of
// LLVM compiler-rt there.
extern "C" {
void dispatch_async_f(dispatch_queue_t dq, void *ctxt,
dispatch_function_t func);
void dispatch_sync_f(dispatch_queue_t dq, void *ctxt,
dispatch_function_t func);
void dispatch_after_f(dispatch_time_t when, dispatch_queue_t dq, void *ctxt,
dispatch_function_t func);
void dispatch_barrier_async_f(dispatch_queue_t dq, void *ctxt,
dispatch_function_t func);
void dispatch_group_async_f(dispatch_group_t group, dispatch_queue_t dq,
void *ctxt, dispatch_function_t func);
} // extern "C"
static ALWAYS_INLINE
ALWAYS_INLINE
void asan_register_worker_thread(int parent_tid, StackTrace *stack) {
AsanThread *t = asanThreadRegistry().GetCurrent();
AsanThread *t = GetCurrentThread();
if (!t) {
t = AsanThread::Create(parent_tid, 0, 0, stack);
asanThreadRegistry().RegisterThread(t);
t = AsanThread::Create(0, 0);
CreateThreadContextArgs args = { t, stack };
asanThreadRegistry().CreateThread(*(uptr*)t, true, parent_tid, &args);
t->Init();
asanThreadRegistry().SetCurrent(t);
asanThreadRegistry().StartThread(t->tid(), 0, 0);
SetCurrentThread(t);
}
}
......@@ -345,7 +333,7 @@ asan_block_context_t *alloc_asan_context(void *ctxt, dispatch_function_t func,
(asan_block_context_t*) asan_malloc(sizeof(asan_block_context_t), stack);
asan_ctxt->block = ctxt;
asan_ctxt->func = func;
asan_ctxt->parent_tid = asanThreadRegistry().GetCurrentTidOrInvalid();
asan_ctxt->parent_tid = GetCurrentTidOrInvalid();
return asan_ctxt;
}
......@@ -411,7 +399,7 @@ void dispatch_source_set_event_handler(dispatch_source_t ds, void(^work)(void));
#define GET_ASAN_BLOCK(work) \
void (^asan_block)(void); \
int parent_tid = asanThreadRegistry().GetCurrentTidOrInvalid(); \
int parent_tid = GetCurrentTidOrInvalid(); \
asan_block = ^(void) { \
GET_STACK_TRACE_THREAD; \
asan_register_worker_thread(parent_tid, &stack); \
......@@ -449,4 +437,4 @@ INTERCEPTOR(void, dispatch_source_set_event_handler,
}
#endif
#endif // __APPLE__
#endif // SANITIZER_MAC
......@@ -34,12 +34,14 @@ typedef struct __CFRuntimeBase {
#endif
} CFRuntimeBase;
enum {
MACOS_VERSION_UNKNOWN = 0,
enum MacosVersion {
MACOS_VERSION_UNINITIALIZED = 0,
MACOS_VERSION_UNKNOWN,
MACOS_VERSION_LEOPARD,
MACOS_VERSION_SNOW_LEOPARD,
MACOS_VERSION_LION,
MACOS_VERSION_MOUNTAIN_LION
MACOS_VERSION_MOUNTAIN_LION,
MACOS_VERSION_MAVERICKS
};
// Used by asan_malloc_mac.cc and asan_mac.cc
......@@ -47,7 +49,7 @@ extern "C" void __CFInitialize();
namespace __asan {
int GetMacosVersion();
MacosVersion GetMacosVersion();
void MaybeReplaceCFAllocator();
} // namespace __asan
......
......@@ -11,15 +11,16 @@
// We simply define functions like malloc, free, realloc, etc.
// They will replace the corresponding libc functions automagically.
//===----------------------------------------------------------------------===//
#ifdef __linux__
#include "sanitizer_common/sanitizer_platform.h"
#if SANITIZER_LINUX
#include "asan_allocator.h"
#include "asan_interceptors.h"
#include "asan_internal.h"
#include "asan_stack.h"
#include "asan_thread_registry.h"
#if ASAN_ANDROID
#if SANITIZER_ANDROID
DECLARE_REAL_AND_INTERCEPTOR(void*, malloc, uptr size)
DECLARE_REAL_AND_INTERCEPTOR(void, free, void *ptr)
DECLARE_REAL_AND_INTERCEPTOR(void*, calloc, uptr nmemb, uptr size)
......@@ -144,4 +145,4 @@ INTERCEPTOR(void, malloc_stats, void) {
__asan_print_accumulated_stats();
}
#endif // __linux__
#endif // SANITIZER_LINUX
......@@ -10,12 +10,14 @@
// Mac-specific malloc interception.
//===----------------------------------------------------------------------===//
#ifdef __APPLE__
#include "sanitizer_common/sanitizer_platform.h"
#if SANITIZER_MAC
#include <AvailabilityMacros.h>
#include <CoreFoundation/CFBase.h>
#include <dlfcn.h>
#include <malloc/malloc.h>
#include <sys/mman.h>
#include "asan_allocator.h"
#include "asan_interceptors.h"
......@@ -24,7 +26,6 @@
#include "asan_report.h"
#include "asan_stack.h"
#include "asan_stats.h"
#include "asan_thread_registry.h"
// Similar code is used in Google Perftools,
// http://code.google.com/p/google-perftools.
......@@ -40,10 +41,19 @@ INTERCEPTOR(malloc_zone_t *, malloc_create_zone,
vm_size_t start_size, unsigned zone_flags) {
if (!asan_inited) __asan_init();
GET_STACK_TRACE_MALLOC;
uptr page_size = GetPageSizeCached();
uptr allocated_size = RoundUpTo(sizeof(asan_zone), page_size);
malloc_zone_t *new_zone =
(malloc_zone_t*)asan_malloc(sizeof(asan_zone), &stack);
(malloc_zone_t*)asan_memalign(page_size, allocated_size,
&stack, FROM_MALLOC);
internal_memcpy(new_zone, &asan_zone, sizeof(asan_zone));
new_zone->zone_name = NULL; // The name will be changed anyway.
if (GetMacosVersion() >= MACOS_VERSION_LION) {
// Prevent the client app from overwriting the zone contents.
// Library functions that need to modify the zone will set PROT_WRITE on it.
// This matches the behavior of malloc_create_zone() on OSX 10.7 and higher.
mprotect(new_zone, allocated_size, PROT_READ);
}
return new_zone;
}
......@@ -282,7 +292,7 @@ void mi_force_unlock(malloc_zone_t *zone) {
void mi_statistics(malloc_zone_t *zone, malloc_statistics_t *stats) {
AsanMallocStats malloc_stats;
asanThreadRegistry().FillMallocStatistics(&malloc_stats);
FillMallocStatistics(&malloc_stats);
CHECK(sizeof(malloc_statistics_t) == sizeof(AsanMallocStats));
internal_memcpy(stats, &malloc_stats, sizeof(malloc_statistics_t));
}
......@@ -344,4 +354,4 @@ void ReplaceSystemMalloc() {
}
} // namespace __asan
#endif // __APPLE__
#endif // SANITIZER_MAC
......@@ -9,7 +9,9 @@
//
// Windows-specific malloc interception.
//===----------------------------------------------------------------------===//
#ifdef _WIN32
#include "sanitizer_common/sanitizer_platform.h"
#if SANITIZER_WINDOWS
#include "asan_allocator.h"
#include "asan_interceptors.h"
......@@ -28,11 +30,13 @@ using namespace __asan; // NOLINT
// revisited in the future.
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
void free(void *ptr) {
GET_STACK_TRACE_FREE;
return asan_free(ptr, &stack, FROM_MALLOC);
}
SANITIZER_INTERFACE_ATTRIBUTE
void _free_dbg(void* ptr, int) {
free(ptr);
}
......@@ -41,38 +45,46 @@ void cfree(void *ptr) {
CHECK(!"cfree() should not be used on Windows?");
}
SANITIZER_INTERFACE_ATTRIBUTE
void *malloc(size_t size) {
GET_STACK_TRACE_MALLOC;
return asan_malloc(size, &stack);
}
SANITIZER_INTERFACE_ATTRIBUTE
void* _malloc_dbg(size_t size, int , const char*, int) {
return malloc(size);
}
SANITIZER_INTERFACE_ATTRIBUTE
void *calloc(size_t nmemb, size_t size) {
GET_STACK_TRACE_MALLOC;
return asan_calloc(nmemb, size, &stack);
}
SANITIZER_INTERFACE_ATTRIBUTE
void* _calloc_dbg(size_t n, size_t size, int, const char*, int) {
return calloc(n, size);
}
SANITIZER_INTERFACE_ATTRIBUTE
void *_calloc_impl(size_t nmemb, size_t size, int *errno_tmp) {
return calloc(nmemb, size);
}
SANITIZER_INTERFACE_ATTRIBUTE
void *realloc(void *ptr, size_t size) {
GET_STACK_TRACE_MALLOC;
return asan_realloc(ptr, size, &stack);
}
SANITIZER_INTERFACE_ATTRIBUTE
void *_realloc_dbg(void *ptr, size_t size, int) {
CHECK(!"_realloc_dbg should not exist!");
return 0;
}
SANITIZER_INTERFACE_ATTRIBUTE
void* _recalloc(void* p, size_t n, size_t elem_size) {
if (!p)
return calloc(n, elem_size);
......@@ -82,6 +94,7 @@ void* _recalloc(void* p, size_t n, size_t elem_size) {
return realloc(p, size);
}
SANITIZER_INTERFACE_ATTRIBUTE
size_t _msize(void *ptr) {
GET_STACK_TRACE_MALLOC;
return asan_malloc_usable_size(ptr, &stack);
......
......@@ -47,6 +47,20 @@
// || `[0x24000000, 0x27ffffff]` || ShadowGap ||
// || `[0x20000000, 0x23ffffff]` || LowShadow ||
// || `[0x00000000, 0x1fffffff]` || LowMem ||
//
// Default Linux/MIPS mapping:
// || `[0x2aaa8000, 0xffffffff]` || HighMem ||
// || `[0x0fffd000, 0x2aaa7fff]` || HighShadow ||
// || `[0x0bffd000, 0x0fffcfff]` || ShadowGap ||
// || `[0x0aaa8000, 0x0bffcfff]` || LowShadow ||
// || `[0x00000000, 0x0aaa7fff]` || LowMem ||
static const u64 kDefaultShadowScale = 3;
static const u64 kDefaultShadowOffset32 = 1ULL << 29;
static const u64 kDefaultShadowOffset64 = 1ULL << 44;
static const u64 kDefaultShort64bitShadowOffset = 0x7FFF8000; // < 2G.
static const u64 kPPC64_ShadowOffset64 = 1ULL << 41;
static const u64 kMIPS32_ShadowOffset32 = 0x0aaa8000;
#if ASAN_FLEXIBLE_MAPPING_AND_OFFSET == 1
extern SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_mapping_scale;
......@@ -54,22 +68,23 @@ extern SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_mapping_offset;
# define SHADOW_SCALE (__asan_mapping_scale)
# define SHADOW_OFFSET (__asan_mapping_offset)
#else
# if ASAN_ANDROID
# define SHADOW_SCALE (3)
# define SHADOW_SCALE kDefaultShadowScale
# if SANITIZER_ANDROID
# define SHADOW_OFFSET (0)
# else
# define SHADOW_SCALE (3)
# if SANITIZER_WORDSIZE == 32
# define SHADOW_OFFSET (1 << 29)
# if defined(__mips__)
# define SHADOW_OFFSET kMIPS32_ShadowOffset32
# else
# define SHADOW_OFFSET kDefaultShadowOffset32
# endif
# else
# if defined(__powerpc64__)
# define SHADOW_OFFSET (1ULL << 41)
# define SHADOW_OFFSET kPPC64_ShadowOffset64
# elif SANITIZER_MAC
# define SHADOW_OFFSET kDefaultShadowOffset64
# else
# if ASAN_MAC
# define SHADOW_OFFSET (1ULL << 44)
# else
# define SHADOW_OFFSET 0x7fff8000ULL
# endif
# define SHADOW_OFFSET kDefaultShort64bitShadowOffset
# endif
# endif
# endif
......@@ -131,7 +146,6 @@ static uptr kHighMemEnd = 0x7fffffffffffULL;
static uptr kMidMemBeg = 0x3000000000ULL;
static uptr kMidMemEnd = 0x4fffffffffULL;
#else
SANITIZER_INTERFACE_ATTRIBUTE
extern uptr kHighMemEnd, kMidMemBeg, kMidMemEnd; // Initialized in __asan_init.
#endif
......
......@@ -27,7 +27,7 @@ using namespace __asan; // NOLINT
// On Android new() goes through malloc interceptors.
// See also https://code.google.com/p/address-sanitizer/issues/detail?id=131.
#if !ASAN_ANDROID
#if !SANITIZER_ANDROID
// Fake std::nothrow_t to avoid including <new>.
namespace std {
......@@ -38,6 +38,14 @@ struct nothrow_t {};
GET_STACK_TRACE_MALLOC;\
return asan_memalign(0, size, &stack, type);
// On OS X it's not enough to just provide our own 'operator new' and
// 'operator delete' implementations, because they're going to be in the
// runtime dylib, and the main executable will depend on both the runtime
// dylib and libstdc++, each of those'll have its implementation of new and
// delete.
// To make sure that C++ allocation/deallocation operators are overridden on
// OS X we need to intercept them using their mangled names.
#if !SANITIZER_MAC
INTERCEPTOR_ATTRIBUTE
void *operator new(size_t size) { OPERATOR_NEW_BODY(FROM_NEW); }
INTERCEPTOR_ATTRIBUTE
......@@ -49,10 +57,26 @@ INTERCEPTOR_ATTRIBUTE
void *operator new[](size_t size, std::nothrow_t const&)
{ OPERATOR_NEW_BODY(FROM_NEW_BR); }
#else // SANITIZER_MAC
INTERCEPTOR(void *, _Znwm, size_t size) {
OPERATOR_NEW_BODY(FROM_NEW);
}
INTERCEPTOR(void *, _Znam, size_t size) {
OPERATOR_NEW_BODY(FROM_NEW_BR);
}
INTERCEPTOR(void *, _ZnwmRKSt9nothrow_t, size_t size, std::nothrow_t const&) {
OPERATOR_NEW_BODY(FROM_NEW);
}
INTERCEPTOR(void *, _ZnamRKSt9nothrow_t, size_t size, std::nothrow_t const&) {
OPERATOR_NEW_BODY(FROM_NEW_BR);
}
#endif
#define OPERATOR_DELETE_BODY(type) \
GET_STACK_TRACE_FREE;\
asan_free(ptr, &stack, type);
#if !SANITIZER_MAC
INTERCEPTOR_ATTRIBUTE
void operator delete(void *ptr) { OPERATOR_DELETE_BODY(FROM_NEW); }
INTERCEPTOR_ATTRIBUTE
......@@ -64,4 +88,19 @@ INTERCEPTOR_ATTRIBUTE
void operator delete[](void *ptr, std::nothrow_t const&)
{ OPERATOR_DELETE_BODY(FROM_NEW_BR); }
#else // SANITIZER_MAC
INTERCEPTOR(void, _ZdlPv, void *ptr) {
OPERATOR_DELETE_BODY(FROM_NEW);
}
INTERCEPTOR(void, _ZdaPv, void *ptr) {
OPERATOR_DELETE_BODY(FROM_NEW_BR);
}
INTERCEPTOR(void, _ZdlPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&) {
OPERATOR_DELETE_BODY(FROM_NEW);
}
INTERCEPTOR(void, _ZdaPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&) {
OPERATOR_DELETE_BODY(FROM_NEW_BR);
}
#endif
#endif
......@@ -10,9 +10,7 @@
// Shadow memory poisoning by ASan RTL and by user application.
//===----------------------------------------------------------------------===//
#include "asan_interceptors.h"
#include "asan_internal.h"
#include "asan_mapping.h"
#include "asan_poisoning.h"
#include "sanitizer_common/sanitizer_libc.h"
namespace __asan {
......@@ -20,11 +18,11 @@ namespace __asan {
void PoisonShadow(uptr addr, uptr size, u8 value) {
if (!flags()->poison_heap) return;
CHECK(AddrIsAlignedByGranularity(addr));
CHECK(AddrIsInMem(addr));
CHECK(AddrIsAlignedByGranularity(addr + size));
uptr shadow_beg = MemToShadow(addr);
uptr shadow_end = MemToShadow(addr + size - SHADOW_GRANULARITY) + 1;
CHECK(REAL(memset) != 0);
REAL(memset)((void*)shadow_beg, value, shadow_end - shadow_beg);
CHECK(AddrIsInMem(addr + size - SHADOW_GRANULARITY));
CHECK(REAL(memset));
FastPoisonShadow(addr, size, value);
}
void PoisonShadowPartialRightRedzone(uptr addr,
......@@ -33,20 +31,10 @@ void PoisonShadowPartialRightRedzone(uptr addr,
u8 value) {
if (!flags()->poison_heap) return;
CHECK(AddrIsAlignedByGranularity(addr));
u8 *shadow = (u8*)MemToShadow(addr);
for (uptr i = 0; i < redzone_size;
i += SHADOW_GRANULARITY, shadow++) {
if (i + SHADOW_GRANULARITY <= size) {
*shadow = 0; // fully addressable
} else if (i >= size) {
*shadow = (SHADOW_GRANULARITY == 128) ? 0xff : value; // unaddressable
} else {
*shadow = size - i; // first size-i bytes are addressable
}
}
CHECK(AddrIsInMem(addr));
FastPoisonShadowPartialRightRedzone(addr, size, redzone_size, value);
}
struct ShadowSegmentEndpoint {
u8 *chunk;
s8 offset; // in [0, SHADOW_GRANULARITY)
......@@ -179,6 +167,55 @@ uptr __asan_region_is_poisoned(uptr beg, uptr size) {
return 0;
}
#define CHECK_SMALL_REGION(p, size, isWrite) \
do { \
uptr __p = reinterpret_cast<uptr>(p); \
uptr __size = size; \
if (UNLIKELY(__asan::AddressIsPoisoned(__p) || \
__asan::AddressIsPoisoned(__p + __size - 1))) { \
GET_CURRENT_PC_BP_SP; \
uptr __bad = __asan_region_is_poisoned(__p, __size); \
__asan_report_error(pc, bp, sp, __bad, isWrite, __size);\
} \
} while (false); \
extern "C" SANITIZER_INTERFACE_ATTRIBUTE
u16 __sanitizer_unaligned_load16(const uu16 *p) {
CHECK_SMALL_REGION(p, sizeof(*p), false);
return *p;
}
extern "C" SANITIZER_INTERFACE_ATTRIBUTE
u32 __sanitizer_unaligned_load32(const uu32 *p) {
CHECK_SMALL_REGION(p, sizeof(*p), false);
return *p;
}
extern "C" SANITIZER_INTERFACE_ATTRIBUTE
u64 __sanitizer_unaligned_load64(const uu64 *p) {
CHECK_SMALL_REGION(p, sizeof(*p), false);
return *p;
}
extern "C" SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_unaligned_store16(uu16 *p, u16 x) {
CHECK_SMALL_REGION(p, sizeof(*p), true);
*p = x;
}
extern "C" SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_unaligned_store32(uu32 *p, u32 x) {
CHECK_SMALL_REGION(p, sizeof(*p), true);
*p = x;
}
extern "C" SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_unaligned_store64(uu64 *p, u64 x) {
CHECK_SMALL_REGION(p, sizeof(*p), true);
*p = x;
}
// This is a simplified version of __asan_(un)poison_memory_region, which
// assumes that left border of region to be poisoned is properly aligned.
static void PoisonAlignedStackMemory(uptr addr, uptr size, bool do_poison) {
......
//===-- asan_poisoning.h ----------------------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// Shadow memory poisoning by ASan RTL and by user application.
//===----------------------------------------------------------------------===//
#include "asan_interceptors.h"
#include "asan_internal.h"
#include "asan_mapping.h"
namespace __asan {
// Poisons the shadow memory for "size" bytes starting from "addr".
void PoisonShadow(uptr addr, uptr size, u8 value);
// Poisons the shadow memory for "redzone_size" bytes starting from
// "addr + size".
void PoisonShadowPartialRightRedzone(uptr addr,
uptr size,
uptr redzone_size,
u8 value);
// Fast versions of PoisonShadow and PoisonShadowPartialRightRedzone that
// assume that memory addresses are properly aligned. Use in
// performance-critical code with care.
ALWAYS_INLINE void FastPoisonShadow(uptr aligned_beg, uptr aligned_size,
u8 value) {
DCHECK(flags()->poison_heap);
uptr shadow_beg = MEM_TO_SHADOW(aligned_beg);
uptr shadow_end = MEM_TO_SHADOW(
aligned_beg + aligned_size - SHADOW_GRANULARITY) + 1;
REAL(memset)((void*)shadow_beg, value, shadow_end - shadow_beg);
}
ALWAYS_INLINE void FastPoisonShadowPartialRightRedzone(
uptr aligned_addr, uptr size, uptr redzone_size, u8 value) {
DCHECK(flags()->poison_heap);
u8 *shadow = (u8*)MEM_TO_SHADOW(aligned_addr);
for (uptr i = 0; i < redzone_size; i += SHADOW_GRANULARITY, shadow++) {
if (i + SHADOW_GRANULARITY <= size) {
*shadow = 0; // fully addressable
} else if (i >= size) {
*shadow = (SHADOW_GRANULARITY == 128) ? 0xff : value; // unaddressable
} else {
// first size-i bytes are addressable
*shadow = static_cast<u8>(size - i);
}
}
}
} // namespace __asan
......@@ -9,14 +9,15 @@
//
// Posix-specific details.
//===----------------------------------------------------------------------===//
#if defined(__linux__) || defined(__APPLE__)
#include "sanitizer_common/sanitizer_platform.h"
#if SANITIZER_LINUX || SANITIZER_MAC
#include "asan_internal.h"
#include "asan_interceptors.h"
#include "asan_mapping.h"
#include "asan_report.h"
#include "asan_stack.h"
#include "asan_thread_registry.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_procmaps.h"
......@@ -40,7 +41,7 @@ static void MaybeInstallSigaction(int signum,
sigact.sa_sigaction = handler;
sigact.sa_flags = SA_SIGINFO;
if (flags()->use_sigaltstack) sigact.sa_flags |= SA_ONSTACK;
CHECK(0 == REAL(sigaction)(signum, &sigact, 0));
CHECK_EQ(0, REAL(sigaction)(signum, &sigact, 0));
if (flags()->verbosity >= 1) {
Report("Installed the sigaction for signal %d\n", signum);
}
......@@ -57,7 +58,7 @@ static void ASAN_OnSIGSEGV(int, siginfo_t *siginfo, void *context) {
void SetAlternateSignalStack() {
stack_t altstack, oldstack;
CHECK(0 == sigaltstack(0, &oldstack));
CHECK_EQ(0, sigaltstack(0, &oldstack));
// If the alternate stack is already in place, do nothing.
if ((oldstack.ss_flags & SS_DISABLE) == 0) return;
// TODO(glider): the mapped stack should have the MAP_STACK flag in the
......@@ -67,10 +68,10 @@ void SetAlternateSignalStack() {
altstack.ss_sp = base;
altstack.ss_flags = 0;
altstack.ss_size = kAltStackSize;
CHECK(0 == sigaltstack(&altstack, 0));
CHECK_EQ(0, sigaltstack(&altstack, 0));
if (flags()->verbosity > 0) {
Report("Alternative stack for T%d set: [%p,%p)\n",
asanThreadRegistry().GetCurrentTidOrInvalid(),
GetCurrentTidOrInvalid(),
altstack.ss_sp, (char*)altstack.ss_sp + altstack.ss_size);
}
}
......@@ -80,7 +81,7 @@ void UnsetAlternateSignalStack() {
altstack.ss_sp = 0;
altstack.ss_flags = SS_DISABLE;
altstack.ss_size = 0;
CHECK(0 == sigaltstack(&altstack, &oldstack));
CHECK_EQ(0, sigaltstack(&altstack, &oldstack));
UnmapOrDie(oldstack.ss_sp, oldstack.ss_size);
}
......@@ -100,7 +101,7 @@ static bool tsd_key_inited = false;
void AsanTSDInit(void (*destructor)(void *tsd)) {
CHECK(!tsd_key_inited);
tsd_key_inited = true;
CHECK(0 == pthread_key_create(&tsd_key, destructor));
CHECK_EQ(0, pthread_key_create(&tsd_key, destructor));
}
void *AsanTSDGet() {
......@@ -115,4 +116,4 @@ void AsanTSDSet(void *tsd) {
} // namespace __asan
#endif // __linux__ || __APPLE_
#endif // SANITIZER_LINUX || SANITIZER_MAC
......@@ -16,9 +16,11 @@
// On Linux, we force __asan_init to be called before anyone else
// by placing it into .preinit_array section.
// FIXME: do we have anything like this on Mac?
// The symbol is called __local_asan_preinit, because it's not intended to be
// exported.
__attribute__((section(".preinit_array"), used))
void (*__asan_preinit)(void) =__asan_init;
#elif defined(_WIN32) && defined(_DLL)
void (*__local_asan_preinit)(void) = __asan_init;
#elif SANITIZER_WINDOWS && defined(_DLL)
// On Windows, when using dynamic CRT (/MD), we can put a pointer
// to __asan_init into the global list of C initializers.
// See crt0dat.c in the CRT sources for the details.
......
......@@ -27,7 +27,7 @@ bool DescribeAddressIfStack(uptr addr, uptr access_size);
// Determines memory type on its own.
void DescribeAddress(uptr addr, uptr access_size);
void DescribeThread(AsanThreadSummary *summary);
void DescribeThread(AsanThreadContext *context);
// Different kinds of error reports.
void NORETURN ReportSIGSEGV(uptr pc, uptr sp, uptr bp, uptr addr);
......
......@@ -12,6 +12,7 @@
#include "asan_internal.h"
#include "asan_flags.h"
#include "asan_stack.h"
#include "sanitizer_common/sanitizer_flags.h"
namespace __asan {
......@@ -22,8 +23,8 @@ static bool MaybeCallAsanSymbolize(const void *pc, char *out_buffer,
}
void PrintStack(StackTrace *stack) {
stack->PrintStack(stack->trace, stack->size, flags()->symbolize,
flags()->strip_path_prefix, MaybeCallAsanSymbolize);
stack->PrintStack(stack->trace, stack->size, common_flags()->symbolize,
common_flags()->strip_path_prefix, MaybeCallAsanSymbolize);
}
} // namespace __asan
......@@ -33,8 +34,8 @@ void PrintStack(StackTrace *stack) {
// Provide default implementation of __asan_symbolize that does nothing
// and may be overriden by user if he wants to use his own symbolization.
// ASan on Windows has its own implementation of this.
#if !defined(_WIN32) && !SANITIZER_SUPPORTS_WEAK_HOOKS
SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE NOINLINE
#if !SANITIZER_WINDOWS && !SANITIZER_SUPPORTS_WEAK_HOOKS
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE NOINLINE
bool __asan_symbolize(const void *pc, char *out_buffer, int out_size) {
return false;
}
......
......@@ -12,12 +12,13 @@
#ifndef ASAN_STACK_H
#define ASAN_STACK_H
#include "sanitizer_common/sanitizer_stacktrace.h"
#include "asan_flags.h"
#include "asan_thread.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
namespace __asan {
void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp, bool fast);
void PrintStack(StackTrace *stack);
} // namespace __asan
......@@ -25,10 +26,24 @@ void PrintStack(StackTrace *stack);
// Get the stack trace with the given pc and bp.
// The pc will be in the position 0 of the resulting stack trace.
// The bp may refer to the current frame or to the caller's frame.
// fast_unwind is currently unused.
#define GET_STACK_TRACE_WITH_PC_AND_BP(max_s, pc, bp, fast) \
StackTrace stack; \
GetStackTrace(&stack, max_s, pc, bp, fast)
#if SANITIZER_WINDOWS
#define GET_STACK_TRACE_WITH_PC_AND_BP(max_s, pc, bp, fast) \
StackTrace stack; \
GetStackTrace(&stack, max_s, pc, bp, 0, 0, fast)
#else
#define GET_STACK_TRACE_WITH_PC_AND_BP(max_s, pc, bp, fast) \
StackTrace stack; \
{ \
AsanThread *t; \
stack.size = 0; \
if (asan_inited && (t = GetCurrentThread()) && !t->isUnwinding()) { \
uptr stack_top = t->stack_top(); \
uptr stack_bottom = t->stack_bottom(); \
ScopedUnwinding unwind_scope(t); \
GetStackTrace(&stack, max_s, pc, bp, stack_top, stack_bottom, fast); \
} \
}
#endif // SANITIZER_WINDOWS
// NOTE: A Rule of thumb is to retrieve stack trace in the interceptors
// as early as possible (in functions exposed to the user), as we generally
......@@ -40,24 +55,24 @@ void PrintStack(StackTrace *stack);
#define GET_STACK_TRACE_FATAL(pc, bp) \
GET_STACK_TRACE_WITH_PC_AND_BP(kStackTraceMax, pc, bp, \
flags()->fast_unwind_on_fatal)
common_flags()->fast_unwind_on_fatal)
#define GET_STACK_TRACE_FATAL_HERE \
GET_STACK_TRACE(kStackTraceMax, flags()->fast_unwind_on_fatal)
#define GET_STACK_TRACE_FATAL_HERE \
GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_fatal)
#define GET_STACK_TRACE_THREAD \
#define GET_STACK_TRACE_THREAD \
GET_STACK_TRACE(kStackTraceMax, true)
#define GET_STACK_TRACE_MALLOC \
GET_STACK_TRACE(flags()->malloc_context_size, \
flags()->fast_unwind_on_malloc)
#define GET_STACK_TRACE_MALLOC \
GET_STACK_TRACE(common_flags()->malloc_context_size, \
common_flags()->fast_unwind_on_malloc)
#define GET_STACK_TRACE_FREE GET_STACK_TRACE_MALLOC
#define PRINT_CURRENT_STACK() \
{ \
GET_STACK_TRACE(kStackTraceMax, \
flags()->fast_unwind_on_fatal); \
common_flags()->fast_unwind_on_fatal); \
PrintStack(&stack); \
}
......
......@@ -12,13 +12,18 @@
#include "asan_interceptors.h"
#include "asan_internal.h"
#include "asan_stats.h"
#include "asan_thread_registry.h"
#include "asan_thread.h"
#include "sanitizer_common/sanitizer_mutex.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
namespace __asan {
AsanStats::AsanStats() {
CHECK(REAL(memset) != 0);
Clear();
}
void AsanStats::Clear() {
CHECK(REAL(memset));
REAL(memset)(this, 0, sizeof(AsanStats));
}
......@@ -51,11 +56,73 @@ void AsanStats::Print() {
malloc_large, malloc_small_slow);
}
void AsanStats::MergeFrom(const AsanStats *stats) {
uptr *dst_ptr = reinterpret_cast<uptr*>(this);
const uptr *src_ptr = reinterpret_cast<const uptr*>(stats);
uptr num_fields = sizeof(*this) / sizeof(uptr);
for (uptr i = 0; i < num_fields; i++)
dst_ptr[i] += src_ptr[i];
}
static BlockingMutex print_lock(LINKER_INITIALIZED);
static AsanStats unknown_thread_stats(LINKER_INITIALIZED);
static AsanStats dead_threads_stats(LINKER_INITIALIZED);
static BlockingMutex dead_threads_stats_lock(LINKER_INITIALIZED);
// Required for malloc_zone_statistics() on OS X. This can't be stored in
// per-thread AsanStats.
static uptr max_malloced_memory;
static void MergeThreadStats(ThreadContextBase *tctx_base, void *arg) {
AsanStats *accumulated_stats = reinterpret_cast<AsanStats*>(arg);
AsanThreadContext *tctx = static_cast<AsanThreadContext*>(tctx_base);
if (AsanThread *t = tctx->thread)
accumulated_stats->MergeFrom(&t->stats());
}
static void GetAccumulatedStats(AsanStats *stats) {
stats->Clear();
{
ThreadRegistryLock l(&asanThreadRegistry());
asanThreadRegistry()
.RunCallbackForEachThreadLocked(MergeThreadStats, stats);
}
stats->MergeFrom(&unknown_thread_stats);
{
BlockingMutexLock lock(&dead_threads_stats_lock);
stats->MergeFrom(&dead_threads_stats);
}
// This is not very accurate: we may miss allocation peaks that happen
// between two updates of accumulated_stats_. For more accurate bookkeeping
// the maximum should be updated on every malloc(), which is unacceptable.
if (max_malloced_memory < stats->malloced) {
max_malloced_memory = stats->malloced;
}
}
void FlushToDeadThreadStats(AsanStats *stats) {
BlockingMutexLock lock(&dead_threads_stats_lock);
dead_threads_stats.MergeFrom(stats);
stats->Clear();
}
void FillMallocStatistics(AsanMallocStats *malloc_stats) {
AsanStats stats;
GetAccumulatedStats(&stats);
malloc_stats->blocks_in_use = stats.mallocs;
malloc_stats->size_in_use = stats.malloced;
malloc_stats->max_size_in_use = max_malloced_memory;
malloc_stats->size_allocated = stats.mmaped;
}
AsanStats &GetCurrentThreadStats() {
AsanThread *t = GetCurrentThread();
return (t) ? t->stats() : unknown_thread_stats;
}
static void PrintAccumulatedStats() {
AsanStats stats;
asanThreadRegistry().GetAccumulatedStats(&stats);
GetAccumulatedStats(&stats);
// Use lock to keep reports from mixing up.
BlockingMutexLock lock(&print_lock);
stats.Print();
......@@ -71,15 +138,33 @@ static void PrintAccumulatedStats() {
using namespace __asan; // NOLINT
uptr __asan_get_current_allocated_bytes() {
return asanThreadRegistry().GetCurrentAllocatedBytes();
AsanStats stats;
GetAccumulatedStats(&stats);
uptr malloced = stats.malloced;
uptr freed = stats.freed;
// Return sane value if malloced < freed due to racy
// way we update accumulated stats.
return (malloced > freed) ? malloced - freed : 1;
}
uptr __asan_get_heap_size() {
return asanThreadRegistry().GetHeapSize();
AsanStats stats;
GetAccumulatedStats(&stats);
return stats.mmaped - stats.munmaped;
}
uptr __asan_get_free_bytes() {
return asanThreadRegistry().GetFreeBytes();
AsanStats stats;
GetAccumulatedStats(&stats);
uptr total_free = stats.mmaped
- stats.munmaped
+ stats.really_freed
+ stats.really_freed_redzones;
uptr total_used = stats.malloced
+ stats.malloced_redzones;
// Return sane value if total_free < total_used due to racy
// way we update accumulated stats.
return (total_free > total_used) ? total_free - total_used : 1;
}
uptr __asan_get_unmapped_bytes() {
......
......@@ -50,10 +50,17 @@ struct AsanStats {
// Default ctor for thread-local stats.
AsanStats();
// Prints formatted stats to stderr.
void Print();
void Print(); // Prints formatted stats to stderr.
void Clear();
void MergeFrom(const AsanStats *stats);
};
// Returns stats for GetCurrentThread(), or stats for fake "unknown thread"
// if GetCurrentThread() returns 0.
AsanStats &GetCurrentThreadStats();
// Flushes a given stats into accumulated stats of dead threads.
void FlushToDeadThreadStats(AsanStats *stats);
// A cross-platform equivalent of malloc_statistics_t on Mac OS.
struct AsanMallocStats {
uptr blocks_in_use;
......@@ -62,6 +69,8 @@ struct AsanMallocStats {
uptr size_allocated;
};
void FillMallocStatistics(AsanMallocStats *malloc_stats);
} // namespace __asan
#endif // ASAN_STATS_H
......@@ -14,99 +14,148 @@
#include "asan_allocator.h"
#include "asan_internal.h"
#include "asan_fake_stack.h"
#include "asan_stack.h"
#include "asan_stats.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_thread_registry.h"
namespace __asan {
const u32 kInvalidTid = 0xffffff; // Must fit into 24 bits.
const u32 kMaxNumberOfThreads = (1 << 22); // 4M
class AsanThread;
// These objects are created for every thread and are never deleted,
// so we can find them by tid even if the thread is long dead.
class AsanThreadSummary {
class AsanThreadContext : public ThreadContextBase {
public:
explicit AsanThreadSummary(LinkerInitialized) { } // for T0.
void Init(u32 parent_tid, StackTrace *stack) {
parent_tid_ = parent_tid;
announced_ = false;
tid_ = kInvalidTid;
if (stack) {
internal_memcpy(&stack_, stack, sizeof(*stack));
}
thread_ = 0;
name_[0] = 0;
explicit AsanThreadContext(int tid)
: ThreadContextBase(tid),
announced(false),
thread(0) {
internal_memset(&stack, 0, sizeof(stack));
}
u32 tid() { return tid_; }
void set_tid(u32 tid) { tid_ = tid; }
u32 parent_tid() { return parent_tid_; }
bool announced() { return announced_; }
void set_announced(bool announced) { announced_ = announced; }
StackTrace *stack() { return &stack_; }
AsanThread *thread() { return thread_; }
void set_thread(AsanThread *thread) { thread_ = thread; }
static void TSDDtor(void *tsd);
void set_name(const char *name) {
internal_strncpy(name_, name, sizeof(name_) - 1);
}
const char *name() { return name_; }
bool announced;
StackTrace stack;
AsanThread *thread;
private:
u32 tid_;
u32 parent_tid_;
bool announced_;
StackTrace stack_;
AsanThread *thread_;
char name_[128];
void OnCreated(void *arg);
void OnFinished();
};
// AsanThreadSummary objects are never freed, so we need many of them.
COMPILER_CHECK(sizeof(AsanThreadSummary) <= 4094);
// AsanThreadContext objects are never freed, so we need many of them.
COMPILER_CHECK(sizeof(AsanThreadContext) <= 4096);
// AsanThread are stored in TSD and destroyed when the thread dies.
class AsanThread {
public:
explicit AsanThread(LinkerInitialized); // for T0.
static AsanThread *Create(u32 parent_tid, thread_callback_t start_routine,
void *arg, StackTrace *stack);
static AsanThread *Create(thread_callback_t start_routine, void *arg);
static void TSDDtor(void *tsd);
void Destroy();
void Init(); // Should be called from the thread itself.
thread_return_t ThreadStart();
thread_return_t ThreadStart(uptr os_id);
uptr stack_top() { return stack_top_; }
uptr stack_bottom() { return stack_bottom_; }
uptr stack_size() { return stack_top_ - stack_bottom_; }
u32 tid() { return summary_->tid(); }
AsanThreadSummary *summary() { return summary_; }
void set_summary(AsanThreadSummary *summary) { summary_ = summary; }
uptr stack_size() { return stack_size_; }
uptr tls_begin() { return tls_begin_; }
uptr tls_end() { return tls_end_; }
u32 tid() { return context_->tid; }
AsanThreadContext *context() { return context_; }
void set_context(AsanThreadContext *context) { context_ = context; }
const char *GetFrameNameByAddr(uptr addr, uptr *offset);
const char *GetFrameNameByAddr(uptr addr, uptr *offset, uptr *frame_pc);
bool AddrIsInStack(uptr addr) {
return addr >= stack_bottom_ && addr < stack_top_;
}
FakeStack &fake_stack() { return fake_stack_; }
void DeleteFakeStack() {
if (!fake_stack_) return;
FakeStack *t = fake_stack_;
fake_stack_ = 0;
SetTLSFakeStack(0);
t->Destroy();
}
bool has_fake_stack() {
return (reinterpret_cast<uptr>(fake_stack_) > 1);
}
FakeStack *fake_stack() {
if (!__asan_option_detect_stack_use_after_return)
return 0;
if (!has_fake_stack())
return AsyncSignalSafeLazyInitFakeStack();
return fake_stack_;
}
// True is this thread is currently unwinding stack (i.e. collecting a stack
// trace). Used to prevent deadlocks on platforms where libc unwinder calls
// malloc internally. See PR17116 for more details.
bool isUnwinding() const { return unwinding; }
void setUnwinding(bool b) { unwinding = b; }
AsanThreadLocalMallocStorage &malloc_storage() { return malloc_storage_; }
AsanStats &stats() { return stats_; }
private:
void SetThreadStackTopAndBottom();
void ClearShadowForThreadStack();
AsanThreadSummary *summary_;
AsanThread() : unwinding(false) {}
void SetThreadStackAndTls();
void ClearShadowForThreadStackAndTLS();
FakeStack *AsyncSignalSafeLazyInitFakeStack();
AsanThreadContext *context_;
thread_callback_t start_routine_;
void *arg_;
uptr stack_top_;
uptr stack_bottom_;
// stack_size_ == stack_top_ - stack_bottom_;
// It needs to be set in a async-signal-safe manner.
uptr stack_size_;
uptr tls_begin_;
uptr tls_end_;
FakeStack fake_stack_;
FakeStack *fake_stack_;
AsanThreadLocalMallocStorage malloc_storage_;
AsanStats stats_;
bool unwinding;
};
// ScopedUnwinding is a scope for stacktracing member of a context
class ScopedUnwinding {
public:
explicit ScopedUnwinding(AsanThread *t) : thread(t) {
t->setUnwinding(true);
}
~ScopedUnwinding() { thread->setUnwinding(false); }
private:
AsanThread *thread;
};
struct CreateThreadContextArgs {
AsanThread *thread;
StackTrace *stack;
};
// Returns a single instance of registry.
ThreadRegistry &asanThreadRegistry();
// Must be called under ThreadRegistryLock.
AsanThreadContext *GetThreadContextByTidLocked(u32 tid);
// Get the current thread. May return 0.
AsanThread *GetCurrentThread();
void SetCurrentThread(AsanThread *t);
u32 GetCurrentTidOrInvalid();
AsanThread *FindThreadByStackAddress(uptr addr);
// Used to handle fork().
void EnsureMainThreadIDIsCorrect();
} // namespace __asan
#endif // ASAN_THREAD_H
//===-- asan_thread_registry.cc -------------------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// AsanThreadRegistry-related code. AsanThreadRegistry is a container
// for summaries of all created threads.
//===----------------------------------------------------------------------===//
#include "asan_stack.h"
#include "asan_thread.h"
#include "asan_thread_registry.h"
#include "sanitizer_common/sanitizer_common.h"
namespace __asan {
static AsanThreadRegistry asan_thread_registry(LINKER_INITIALIZED);
AsanThreadRegistry &asanThreadRegistry() {
return asan_thread_registry;
}
AsanThreadRegistry::AsanThreadRegistry(LinkerInitialized x)
: main_thread_(x),
main_thread_summary_(x),
accumulated_stats_(x),
max_malloced_memory_(x),
mu_(x) { }
void AsanThreadRegistry::Init() {
AsanTSDInit(AsanThreadSummary::TSDDtor);
main_thread_.set_summary(&main_thread_summary_);
main_thread_summary_.set_thread(&main_thread_);
RegisterThread(&main_thread_);
SetCurrent(&main_thread_);
// At this point only one thread exists.
inited_ = true;
}
void AsanThreadRegistry::RegisterThread(AsanThread *thread) {
BlockingMutexLock lock(&mu_);
u32 tid = n_threads_;
n_threads_++;
CHECK(n_threads_ < kMaxNumberOfThreads);
AsanThreadSummary *summary = thread->summary();
CHECK(summary != 0);
summary->set_tid(tid);
thread_summaries_[tid] = summary;
}
void AsanThreadRegistry::UnregisterThread(AsanThread *thread) {
BlockingMutexLock lock(&mu_);
FlushToAccumulatedStatsUnlocked(&thread->stats());
AsanThreadSummary *summary = thread->summary();
CHECK(summary);
summary->set_thread(0);
}
AsanThread *AsanThreadRegistry::GetMain() {
return &main_thread_;
}
AsanThread *AsanThreadRegistry::GetCurrent() {
AsanThreadSummary *summary = (AsanThreadSummary *)AsanTSDGet();
if (!summary) {
#if ASAN_ANDROID
// On Android, libc constructor is called _after_ asan_init, and cleans up
// TSD. Try to figure out if this is still the main thread by the stack
// address. We are not entirely sure that we have correct main thread
// limits, so only do this magic on Android, and only if the found thread is
// the main thread.
AsanThread* thread = FindThreadByStackAddress((uptr)&summary);
if (thread && thread->tid() == 0) {
SetCurrent(thread);
return thread;
}
#endif
return 0;
}
return summary->thread();
}
void AsanThreadRegistry::SetCurrent(AsanThread *t) {
CHECK(t->summary());
if (flags()->verbosity >= 2) {
Report("SetCurrent: %p for thread %p\n",
t->summary(), (void*)GetThreadSelf());
}
// Make sure we do not reset the current AsanThread.
CHECK(AsanTSDGet() == 0);
AsanTSDSet(t->summary());
CHECK(AsanTSDGet() == t->summary());
}
AsanStats &AsanThreadRegistry::GetCurrentThreadStats() {
AsanThread *t = GetCurrent();
return (t) ? t->stats() : main_thread_.stats();
}
void AsanThreadRegistry::GetAccumulatedStats(AsanStats *stats) {
BlockingMutexLock lock(&mu_);
UpdateAccumulatedStatsUnlocked();
internal_memcpy(stats, &accumulated_stats_, sizeof(accumulated_stats_));
}
uptr AsanThreadRegistry::GetCurrentAllocatedBytes() {
BlockingMutexLock lock(&mu_);
UpdateAccumulatedStatsUnlocked();
uptr malloced = accumulated_stats_.malloced;
uptr freed = accumulated_stats_.freed;
// Return sane value if malloced < freed due to racy
// way we update accumulated stats.
return (malloced > freed) ? malloced - freed : 1;
}
uptr AsanThreadRegistry::GetHeapSize() {
BlockingMutexLock lock(&mu_);
UpdateAccumulatedStatsUnlocked();
return accumulated_stats_.mmaped - accumulated_stats_.munmaped;
}
uptr AsanThreadRegistry::GetFreeBytes() {
BlockingMutexLock lock(&mu_);
UpdateAccumulatedStatsUnlocked();
uptr total_free = accumulated_stats_.mmaped
- accumulated_stats_.munmaped
+ accumulated_stats_.really_freed
+ accumulated_stats_.really_freed_redzones;
uptr total_used = accumulated_stats_.malloced
+ accumulated_stats_.malloced_redzones;
// Return sane value if total_free < total_used due to racy
// way we update accumulated stats.
return (total_free > total_used) ? total_free - total_used : 1;
}
// Return several stats counters with a single call to
// UpdateAccumulatedStatsUnlocked().
void AsanThreadRegistry::FillMallocStatistics(AsanMallocStats *malloc_stats) {
BlockingMutexLock lock(&mu_);
UpdateAccumulatedStatsUnlocked();
malloc_stats->blocks_in_use = accumulated_stats_.mallocs;
malloc_stats->size_in_use = accumulated_stats_.malloced;
malloc_stats->max_size_in_use = max_malloced_memory_;
malloc_stats->size_allocated = accumulated_stats_.mmaped;
}
AsanThreadSummary *AsanThreadRegistry::FindByTid(u32 tid) {
CHECK(tid < n_threads_);
CHECK(thread_summaries_[tid]);
return thread_summaries_[tid];
}
AsanThread *AsanThreadRegistry::FindThreadByStackAddress(uptr addr) {
BlockingMutexLock lock(&mu_);
for (u32 tid = 0; tid < n_threads_; tid++) {
AsanThread *t = thread_summaries_[tid]->thread();
if (!t || !(t->fake_stack().StackSize())) continue;
if (t->fake_stack().AddrIsInFakeStack(addr) || t->AddrIsInStack(addr)) {
return t;
}
}
return 0;
}
void AsanThreadRegistry::UpdateAccumulatedStatsUnlocked() {
for (u32 tid = 0; tid < n_threads_; tid++) {
AsanThread *t = thread_summaries_[tid]->thread();
if (t != 0) {
FlushToAccumulatedStatsUnlocked(&t->stats());
}
}
// This is not very accurate: we may miss allocation peaks that happen
// between two updates of accumulated_stats_. For more accurate bookkeeping
// the maximum should be updated on every malloc(), which is unacceptable.
if (max_malloced_memory_ < accumulated_stats_.malloced) {
max_malloced_memory_ = accumulated_stats_.malloced;
}
}
void AsanThreadRegistry::FlushToAccumulatedStatsUnlocked(AsanStats *stats) {
// AsanStats consists of variables of type uptr only.
uptr *dst = (uptr*)&accumulated_stats_;
uptr *src = (uptr*)stats;
uptr num_fields = sizeof(AsanStats) / sizeof(uptr);
for (uptr i = 0; i < num_fields; i++) {
dst[i] += src[i];
src[i] = 0;
}
}
} // namespace __asan
//===-- asan_thread_registry.h ----------------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// ASan-private header for asan_thread_registry.cc
//===----------------------------------------------------------------------===//
#ifndef ASAN_THREAD_REGISTRY_H
#define ASAN_THREAD_REGISTRY_H
#include "asan_stack.h"
#include "asan_stats.h"
#include "asan_thread.h"
#include "sanitizer_common/sanitizer_mutex.h"
namespace __asan {
// Stores summaries of all created threads, returns current thread,
// thread by tid, thread by stack address. There is a single instance
// of AsanThreadRegistry for the whole program.
// AsanThreadRegistry is thread-safe.
class AsanThreadRegistry {
public:
explicit AsanThreadRegistry(LinkerInitialized);
void Init();
void RegisterThread(AsanThread *thread);
void UnregisterThread(AsanThread *thread);
AsanThread *GetMain();
// Get the current thread. May return 0.
AsanThread *GetCurrent();
void SetCurrent(AsanThread *t);
u32 GetCurrentTidOrInvalid() {
if (!inited_) return 0;
AsanThread *t = GetCurrent();
return t ? t->tid() : kInvalidTid;
}
// Returns stats for GetCurrent(), or stats for
// T0 if GetCurrent() returns 0.
AsanStats &GetCurrentThreadStats();
// Flushes all thread-local stats to accumulated stats, and makes
// a copy of accumulated stats.
void GetAccumulatedStats(AsanStats *stats);
uptr GetCurrentAllocatedBytes();
uptr GetHeapSize();
uptr GetFreeBytes();
void FillMallocStatistics(AsanMallocStats *malloc_stats);
AsanThreadSummary *FindByTid(u32 tid);
AsanThread *FindThreadByStackAddress(uptr addr);
private:
void UpdateAccumulatedStatsUnlocked();
// Adds values of all counters in "stats" to accumulated stats,
// and fills "stats" with zeroes.
void FlushToAccumulatedStatsUnlocked(AsanStats *stats);
static const u32 kMaxNumberOfThreads = (1 << 22); // 4M
AsanThreadSummary *thread_summaries_[kMaxNumberOfThreads];
AsanThread main_thread_;
AsanThreadSummary main_thread_summary_;
AsanStats accumulated_stats_;
// Required for malloc_zone_statistics() on OS X. This can't be stored in
// per-thread AsanStats.
uptr max_malloced_memory_;
u32 n_threads_;
BlockingMutex mu_;
bool inited_;
};
// Returns a single instance of registry.
AsanThreadRegistry &asanThreadRegistry();
} // namespace __asan
#endif // ASAN_THREAD_REGISTRY_H
......@@ -9,7 +9,9 @@
//
// Windows-specific details.
//===----------------------------------------------------------------------===//
#ifdef _WIN32
#include "sanitizer_common/sanitizer_platform.h"
#if SANITIZER_WINDOWS
#include <windows.h>
#include <dbghelp.h>
......@@ -21,6 +23,14 @@
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_mutex.h"
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
int __asan_should_detect_stack_use_after_return() {
__asan_init();
return __asan_option_detect_stack_use_after_return;
}
}
namespace __asan {
// ---------------------- Stacktraces, symbols, etc. ---------------- {{{1
......@@ -28,30 +38,6 @@ static BlockingMutex dbghelp_lock(LINKER_INITIALIZED);
static bool dbghelp_initialized = false;
#pragma comment(lib, "dbghelp.lib")
void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp, bool fast) {
(void)fast;
stack->max_size = max_s;
void *tmp[kStackTraceMax];
// FIXME: CaptureStackBackTrace might be too slow for us.
// FIXME: Compare with StackWalk64.
// FIXME: Look at LLVMUnhandledExceptionFilter in Signals.inc
uptr cs_ret = CaptureStackBackTrace(1, stack->max_size, tmp, 0);
uptr offset = 0;
// Skip the RTL frames by searching for the PC in the stacktrace.
// FIXME: this doesn't work well for the malloc/free stacks yet.
for (uptr i = 0; i < cs_ret; i++) {
if (pc != (uptr)tmp[i])
continue;
offset = i;
break;
}
stack->size = cs_ret - offset;
for (uptr i = 0; i < stack->size; i++)
stack->trace[i] = (uptr)tmp[i + offset];
}
// ---------------------- TSD ---------------- {{{1
static bool tsd_key_inited = false;
......
# This file is used to maintain libtool version info for libmudflap. See
# This file is used to maintain libtool version info for libasan. See
# the libtool manual to understand the meaning of the fields. This is
# a separate file so that version updates don't involve re-running
# automake.
# CURRENT:REVISION:AGE
0:0:0
1:0:0
......@@ -14549,7 +14549,7 @@ fi
ac_config_files="$ac_config_files Makefile"
ac_config_files="$ac_config_files interception/Makefile sanitizer_common/Makefile asan/Makefile ubsan/Makefile"
ac_config_files="$ac_config_files interception/Makefile sanitizer_common/Makefile lsan/Makefile asan/Makefile ubsan/Makefile"
if test "x$TSAN_SUPPORTED" = "xyes"; then
......@@ -15679,6 +15679,7 @@ do
"Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;;
"interception/Makefile") CONFIG_FILES="$CONFIG_FILES interception/Makefile" ;;
"sanitizer_common/Makefile") CONFIG_FILES="$CONFIG_FILES sanitizer_common/Makefile" ;;
"lsan/Makefile") CONFIG_FILES="$CONFIG_FILES lsan/Makefile" ;;
"asan/Makefile") CONFIG_FILES="$CONFIG_FILES asan/Makefile" ;;
"ubsan/Makefile") CONFIG_FILES="$CONFIG_FILES ubsan/Makefile" ;;
"tsan/Makefile") CONFIG_FILES="$CONFIG_FILES tsan/Makefile" ;;
......@@ -17035,6 +17036,17 @@ _EOF
. ${multi_basedir}/config-ml.in
{ ml_norecursion=; unset ml_norecursion;}
;;
"lsan/Makefile":F) cat > vpsed$$ << \_EOF
s!`test -f '$<' || echo '$(srcdir)/'`!!
_EOF
sed -f vpsed$$ $ac_file > tmp$$
mv tmp$$ $ac_file
rm vpsed$$
echo 'MULTISUBDIR =' >> $ac_file
ml_norecursion=yes
. ${multi_basedir}/config-ml.in
{ ml_norecursion=; unset ml_norecursion;}
;;
"asan/Makefile":F) cat > vpsed$$ << \_EOF
s!`test -f '$<' || echo '$(srcdir)/'`!!
_EOF
......
......@@ -89,7 +89,7 @@ AM_CONDITIONAL(USING_MAC_INTERPOSE, $MAC_INTERPOSE)
AC_CONFIG_FILES([Makefile])
AC_CONFIG_FILES(AC_FOREACH([DIR], [interception sanitizer_common asan ubsan], [DIR/Makefile ]),
AC_CONFIG_FILES(AC_FOREACH([DIR], [interception sanitizer_common lsan asan ubsan], [DIR/Makefile ]),
[cat > vpsed$$ << \_EOF
s!`test -f '$<' || echo '$(srcdir)/'`!!
_EOF
......
......@@ -39,6 +39,16 @@ extern "C" {
// the error message. This function can be overridden by the client.
void __sanitizer_report_error_summary(const char *error_summary);
// Some of the sanitizers (e.g. asan/tsan) may miss bugs that happen
// in unaligned loads/stores. In order to find such bugs reliably one needs
// to replace plain unaligned loads/stores with these calls.
uint16_t __sanitizer_unaligned_load16(const void *p);
uint32_t __sanitizer_unaligned_load32(const void *p);
uint64_t __sanitizer_unaligned_load64(const void *p);
void __sanitizer_unaligned_store16(void *p, uint16_t x);
void __sanitizer_unaligned_store32(void *p, uint32_t x);
void __sanitizer_unaligned_store64(void *p, uint64_t x);
#ifdef __cplusplus
} // extern "C"
#endif
......
//===-- dfsan_interface.h -------------------------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of DataFlowSanitizer.
//
// Public interface header.
//===----------------------------------------------------------------------===//
#ifndef DFSAN_INTERFACE_H
#define DFSAN_INTERFACE_H
#include <stddef.h>
#include <stdint.h>
#include <sanitizer/common_interface_defs.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef uint16_t dfsan_label;
/// Stores information associated with a specific label identifier. A label
/// may be a base label created using dfsan_create_label, with associated
/// text description and user data, or an automatically created union label,
/// which represents the union of two label identifiers (which may themselves
/// be base or union labels).
struct dfsan_label_info {
// Fields for union labels, set to 0 for base labels.
dfsan_label l1;
dfsan_label l2;
// Fields for base labels.
const char *desc;
void *userdata;
};
/// Computes the union of \c l1 and \c l2, possibly creating a union label in
/// the process.
dfsan_label dfsan_union(dfsan_label l1, dfsan_label l2);
/// Creates and returns a base label with the given description and user data.
dfsan_label dfsan_create_label(const char *desc, void *userdata);
/// Sets the label for each address in [addr,addr+size) to \c label.
void dfsan_set_label(dfsan_label label, void *addr, size_t size);
/// Sets the label for each address in [addr,addr+size) to the union of the
/// current label for that address and \c label.
void dfsan_add_label(dfsan_label label, void *addr, size_t size);
/// Retrieves the label associated with the given data.
///
/// The type of 'data' is arbitrary. The function accepts a value of any type,
/// which can be truncated or extended (implicitly or explicitly) as necessary.
/// The truncation/extension operations will preserve the label of the original
/// value.
dfsan_label dfsan_get_label(long data);
/// Retrieves the label associated with the data at the given address.
dfsan_label dfsan_read_label(const void *addr, size_t size);
/// Retrieves a pointer to the dfsan_label_info struct for the given label.
const struct dfsan_label_info *dfsan_get_label_info(dfsan_label label);
/// Returns whether the given label label contains the label elem.
int dfsan_has_label(dfsan_label label, dfsan_label elem);
/// If the given label label contains a label with the description desc, returns
/// that label, else returns 0.
dfsan_label dfsan_has_label_with_desc(dfsan_label label, const char *desc);
#ifdef __cplusplus
} // extern "C"
template <typename T>
void dfsan_set_label(dfsan_label label, T &data) { // NOLINT
dfsan_set_label(label, (void *)&data, sizeof(T));
}
#endif
#endif // DFSAN_INTERFACE_H
This source diff could not be displayed because it is too large. You can view the blob instead.
//===-- sanitizer/lsan_interface.h ------------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of LeakSanitizer.
//
// Public interface header.
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_LSAN_INTERFACE_H
#define SANITIZER_LSAN_INTERFACE_H
#include <sanitizer/common_interface_defs.h>
#ifdef __cplusplus
extern "C" {
#endif
// Allocations made between calls to __lsan_disable() and __lsan_enable() will
// be treated as non-leaks. Disable/enable pairs may be nested.
void __lsan_disable();
void __lsan_enable();
// The heap object into which p points will be treated as a non-leak.
void __lsan_ignore_object(const void *p);
// The user may optionally provide this function to disallow leak checking
// for the program it is linked into (if the return value is non-zero). This
// function must be defined as returning a constant value; any behavior beyond
// that is unsupported.
int __lsan_is_turned_off();
// Calling this function makes LSan enter the leak checking phase immediately.
// Use this if normal end-of-process leak checking happens too late (e.g. if
// you have intentional memory leaks in your shutdown code). Calling this
// function overrides end-of-process leak checking; it must be called at
// most once per process. This function will terminate the process if there
// are memory leaks and the exit_code flag is non-zero.
void __lsan_do_leak_check();
#ifdef __cplusplus
} // extern "C"
namespace __lsan {
class ScopedDisabler {
public:
ScopedDisabler() { __lsan_disable(); }
~ScopedDisabler() { __lsan_enable(); }
};
} // namespace __lsan
#endif
#endif // SANITIZER_LSAN_INTERFACE_H
//===-- msan_interface.h --------------------------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of MemorySanitizer.
//
// Public interface header.
//===----------------------------------------------------------------------===//
#ifndef MSAN_INTERFACE_H
#define MSAN_INTERFACE_H
#include <sanitizer/common_interface_defs.h>
#ifdef __cplusplus
extern "C" {
#endif
#if __has_feature(memory_sanitizer)
/* Returns a string describing a stack origin.
Return NULL if the origin is invalid, or is not a stack origin. */
const char *__msan_get_origin_descr_if_stack(uint32_t id);
/* Set raw origin for the memory range. */
void __msan_set_origin(const volatile void *a, size_t size, uint32_t origin);
/* Get raw origin for an address. */
uint32_t __msan_get_origin(const volatile void *a);
/* Returns non-zero if tracking origins. */
int __msan_get_track_origins();
/* Returns the origin id of the latest UMR in the calling thread. */
uint32_t __msan_get_umr_origin();
/* Make memory region fully initialized (without changing its contents). */
void __msan_unpoison(const volatile void *a, size_t size);
/* Make memory region fully uninitialized (without changing its contents). */
void __msan_poison(const volatile void *a, size_t size);
/* Make memory region partially uninitialized (without changing its contents).
*/
void __msan_partial_poison(const volatile void *data, void *shadow,
size_t size);
/* Returns the offset of the first (at least partially) poisoned byte in the
memory range, or -1 if the whole range is good. */
intptr_t __msan_test_shadow(const volatile void *x, size_t size);
/* Set exit code when error(s) were detected.
Value of 0 means don't change the program exit code. */
void __msan_set_exit_code(int exit_code);
/* For testing:
__msan_set_expect_umr(1);
... some buggy code ...
__msan_set_expect_umr(0);
The last line will verify that a UMR happened. */
void __msan_set_expect_umr(int expect_umr);
/* Change the value of keep_going flag. Non-zero value means don't terminate
program execution when an error is detected. This will not affect error in
modules that were compiled without the corresponding compiler flag. */
void __msan_set_keep_going(int keep_going);
/* Print shadow and origin for the memory range to stdout in a human-readable
format. */
void __msan_print_shadow(const volatile void *x, size_t size);
/* Print current function arguments shadow and origin to stdout in a
human-readable format. */
void __msan_print_param_shadow();
/* Returns true if running under a dynamic tool (DynamoRio-based). */
int __msan_has_dynamic_component();
/* Tell MSan about newly allocated memory (ex.: custom allocator).
Memory will be marked uninitialized, with origin at the call site. */
void __msan_allocated_memory(const volatile void* data, size_t size);
/* This function may be optionally provided by user and should return
a string containing Msan runtime options. See msan_flags.h for details. */
const char* __msan_default_options();
/***********************************/
/* Allocator statistics interface. */
/* Returns the estimated number of bytes that will be reserved by allocator
for request of "size" bytes. If Msan allocator can't allocate that much
memory, returns the maximal possible allocation size, otherwise returns
"size". */
size_t __msan_get_estimated_allocated_size(size_t size);
/* Returns true if p was returned by the Msan allocator and
is not yet freed. */
int __msan_get_ownership(const volatile void *p);
/* Returns the number of bytes reserved for the pointer p.
Requires (get_ownership(p) == true) or (p == 0). */
size_t __msan_get_allocated_size(const volatile void *p);
/* Number of bytes, allocated and not yet freed by the application. */
size_t __msan_get_current_allocated_bytes();
/* Number of bytes, mmaped by msan allocator to fulfill allocation requests.
Generally, for request of X bytes, allocator can reserve and add to free
lists a large number of chunks of size X to use them for future requests.
All these chunks count toward the heap size. Currently, allocator never
releases memory to OS (instead, it just puts freed chunks to free
lists). */
size_t __msan_get_heap_size();
/* Number of bytes, mmaped by msan allocator, which can be used to fulfill
allocation requests. When a user program frees memory chunk, it can first
fall into quarantine and will count toward __msan_get_free_bytes()
later. */
size_t __msan_get_free_bytes();
/* Number of bytes in unmapped pages, that are released to OS. Currently,
always returns 0. */
size_t __msan_get_unmapped_bytes();
/* Malloc hooks that may be optionally provided by user.
__msan_malloc_hook(ptr, size) is called immediately after
allocation of "size" bytes, which returned "ptr".
__msan_free_hook(ptr) is called immediately before
deallocation of "ptr". */
void __msan_malloc_hook(const volatile void *ptr, size_t size);
void __msan_free_hook(const volatile void *ptr);
#else // __has_feature(memory_sanitizer)
#define __msan_get_origin_descr_if_stack(id) ((const char*)0)
#define __msan_set_origin(a, size, origin)
#define __msan_get_origin(a) ((uint32_t)-1)
#define __msan_get_track_origins() (0)
#define __msan_get_umr_origin() ((uint32_t)-1)
#define __msan_unpoison(a, size)
#define __msan_poison(a, size)
#define __msan_partial_poison(data, shadow, size)
#define __msan_test_shadow(x, size) ((intptr_t)-1)
#define __msan_set_exit_code(exit_code)
#define __msan_set_expect_umr(expect_umr)
#define __msan_print_shadow(x, size)
#define __msan_print_param_shadow()
#define __msan_has_dynamic_component() (0)
#define __msan_allocated_memory(data, size)
#endif // __has_feature(memory_sanitizer)
#ifdef __cplusplus
} // extern "C"
#endif
#endif
......@@ -21,27 +21,20 @@
// These typedefs should be used only in the interceptor definitions to replace
// the standard system types (e.g. SSIZE_T instead of ssize_t)
typedef __sanitizer::uptr SIZE_T;
typedef __sanitizer::sptr SSIZE_T;
typedef __sanitizer::sptr PTRDIFF_T;
typedef __sanitizer::s64 INTMAX_T;
// WARNING: OFF_T may be different from OS type off_t, depending on the value of
// _FILE_OFFSET_BITS. This definition of OFF_T matches the ABI of system calls
// like pread and mmap, as opposed to pread64 and mmap64.
// Mac and Linux/x86-64 are special.
#if defined(__APPLE__) || (defined(__linux__) && defined(__x86_64__))
typedef __sanitizer::u64 OFF_T;
#else
typedef __sanitizer::uptr OFF_T;
#endif
typedef __sanitizer::u64 OFF64_T;
typedef __sanitizer::uptr SIZE_T;
typedef __sanitizer::sptr SSIZE_T;
typedef __sanitizer::sptr PTRDIFF_T;
typedef __sanitizer::s64 INTMAX_T;
typedef __sanitizer::OFF_T OFF_T;
typedef __sanitizer::OFF64_T OFF64_T;
// How to add an interceptor:
// Suppose you need to wrap/replace system function (generally, from libc):
// int foo(const char *bar, double baz);
// You'll need to:
// 1) define INTERCEPTOR(int, foo, const char *bar, double baz) { ... } in
// your source file.
// your source file. See the notes below for cases when
// INTERCEPTOR_WITH_SUFFIX(...) should be used instead.
// 2) Call "INTERCEPT_FUNCTION(foo)" prior to the first call of "foo".
// INTERCEPT_FUNCTION(foo) evaluates to "true" iff the function was
// intercepted successfully.
......@@ -55,15 +48,20 @@ typedef __sanitizer::u64 OFF64_T;
// 3b) add DECLARE_REAL_AND_INTERCEPTOR(int, foo, const char*, double)
// to a header file.
// Notes: 1. Things may not work properly if macro INTERCEPT(...) {...} or
// Notes: 1. Things may not work properly if macro INTERCEPTOR(...) {...} or
// DECLARE_REAL(...) are located inside namespaces.
// 2. On Mac you can also use: "OVERRIDE_FUNCTION(foo, zoo);" to
// 2. On Mac you can also use: "OVERRIDE_FUNCTION(foo, zoo)" to
// effectively redirect calls from "foo" to "zoo". In this case
// you aren't required to implement
// INTERCEPTOR(int, foo, const char *bar, double baz) {...}
// but instead you'll have to add
// DEFINE_REAL(int, foo, const char *bar, double baz) in your
// DECLARE_REAL(int, foo, const char *bar, double baz) in your
// source file (to define a pointer to overriden function).
// 3. Some Mac functions have symbol variants discriminated by
// additional suffixes, e.g. _$UNIX2003 (see
// https://developer.apple.com/library/mac/#releasenotes/Darwin/SymbolVariantsRelNotes/index.html
// for more details). To intercept such functions you need to use the
// INTERCEPTOR_WITH_SUFFIX(...) macro.
// How it works:
// To replace system functions on Linux we just need to declare functions
......@@ -73,6 +71,7 @@ typedef __sanitizer::u64 OFF64_T;
// we intercept. To resolve this we declare our interceptors with __interceptor_
// prefix, and then make actual interceptors weak aliases to __interceptor_
// functions.
//
// This is not so on Mac OS, where the two-level namespace makes
// our replacement functions invisible to other libraries. This may be overcomed
// using the DYLD_FORCE_FLAT_NAMESPACE, but some errors loading the shared
......@@ -82,12 +81,43 @@ typedef __sanitizer::u64 OFF64_T;
// preloaded before an executable using DYLD_INSERT_LIBRARIES, it routes all
// the calls to interposed functions done through stubs to the wrapper
// functions.
// As it's decided at compile time which functions are to be intercepted on Mac,
// INTERCEPT_FUNCTION() is effectively a no-op on this system.
#if defined(__APPLE__)
#include <sys/cdefs.h> // For __DARWIN_ALIAS_C().
// Just a pair of pointers.
struct interpose_substitution {
const uptr replacement;
const uptr original;
};
// For a function foo() create a global pair of pointers { wrap_foo, foo } in
// the __DATA,__interpose section.
// As a result all the calls to foo() will be routed to wrap_foo() at runtime.
#define INTERPOSER(func_name) __attribute__((used)) \
const interpose_substitution substitution_##func_name[] \
__attribute__((section("__DATA, __interpose"))) = { \
{ reinterpret_cast<const uptr>(WRAP(func_name)), \
reinterpret_cast<const uptr>(func_name) } \
}
// For a function foo() and a wrapper function bar() create a global pair
// of pointers { bar, foo } in the __DATA,__interpose section.
// As a result all the calls to foo() will be routed to bar() at runtime.
#define INTERPOSER_2(func_name, wrapper_name) __attribute__((used)) \
const interpose_substitution substitution_##func_name[] \
__attribute__((section("__DATA, __interpose"))) = { \
{ reinterpret_cast<const uptr>(wrapper_name), \
reinterpret_cast<const uptr>(func_name) } \
}
# define WRAP(x) wrap_##x
# define WRAPPER_NAME(x) "wrap_"#x
# define INTERCEPTOR_ATTRIBUTE
# define DECLARE_WRAPPER(ret_type, func, ...)
#elif defined(_WIN32)
# if defined(_DLL) // DLL CRT
# define WRAP(x) x
......@@ -98,7 +128,10 @@ typedef __sanitizer::u64 OFF64_T;
# define WRAPPER_NAME(x) "wrap_"#x
# define INTERCEPTOR_ATTRIBUTE
# endif
# define DECLARE_WRAPPER(ret_type, func, ...)
# define DECLARE_WRAPPER(ret_type, func, ...) \
extern "C" ret_type func(__VA_ARGS__);
# define DECLARE_WRAPPER_WINAPI(ret_type, func, ...) \
extern "C" __declspec(dllimport) ret_type __stdcall func(__VA_ARGS__);
#else
# define WRAP(x) __interceptor_ ## x
# define WRAPPER_NAME(x) "__interceptor_" #x
......@@ -142,6 +175,7 @@ typedef __sanitizer::u64 OFF64_T;
# define DEFINE_REAL(ret_type, func, ...)
#endif
#if !defined(__APPLE__)
#define INTERCEPTOR(ret_type, func, ...) \
DEFINE_REAL(ret_type, func, __VA_ARGS__) \
DECLARE_WRAPPER(ret_type, func, __VA_ARGS__) \
......@@ -149,13 +183,36 @@ typedef __sanitizer::u64 OFF64_T;
INTERCEPTOR_ATTRIBUTE \
ret_type WRAP(func)(__VA_ARGS__)
// We don't need INTERCEPTOR_WITH_SUFFIX on non-Darwin for now.
#define INTERCEPTOR_WITH_SUFFIX(ret_type, func, ...) \
INTERCEPTOR(ret_type, func, __VA_ARGS__)
#else // __APPLE__
#define INTERCEPTOR_ZZZ(suffix, ret_type, func, ...) \
extern "C" ret_type func(__VA_ARGS__) suffix; \
extern "C" ret_type WRAP(func)(__VA_ARGS__); \
INTERPOSER(func); \
extern "C" INTERCEPTOR_ATTRIBUTE ret_type WRAP(func)(__VA_ARGS__)
#define INTERCEPTOR(ret_type, func, ...) \
INTERCEPTOR_ZZZ(/*no symbol variants*/, ret_type, func, __VA_ARGS__)
#define INTERCEPTOR_WITH_SUFFIX(ret_type, func, ...) \
INTERCEPTOR_ZZZ(__DARWIN_ALIAS_C(func), ret_type, func, __VA_ARGS__)
// Override |overridee| with |overrider|.
#define OVERRIDE_FUNCTION(overridee, overrider) \
INTERPOSER_2(overridee, WRAP(overrider))
#endif
#if defined(_WIN32)
# define INTERCEPTOR_WINAPI(ret_type, func, ...) \
typedef ret_type (__stdcall *FUNC_TYPE(func))(__VA_ARGS__); \
namespace __interception { \
FUNC_TYPE(func) PTR_TO_REAL(func); \
} \
DECLARE_WRAPPER(ret_type, func, __VA_ARGS__) \
DECLARE_WRAPPER_WINAPI(ret_type, func, __VA_ARGS__) \
extern "C" \
INTERCEPTOR_ATTRIBUTE \
ret_type __stdcall WRAP(func)(__VA_ARGS__)
......@@ -181,8 +238,6 @@ typedef unsigned long uptr; // NOLINT
# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_LINUX(func)
#elif defined(__APPLE__)
# include "interception_mac.h"
# define OVERRIDE_FUNCTION(old_func, new_func) \
OVERRIDE_FUNCTION_MAC(old_func, new_func)
# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_MAC(func)
#else // defined(_WIN32)
# include "interception_win.h"
......
......@@ -13,7 +13,6 @@
#ifdef __linux__
#include "interception.h"
#include <stddef.h> // for NULL
#include <dlfcn.h> // for dlsym
namespace __interception {
......@@ -22,6 +21,13 @@ bool GetRealFunctionAddress(const char *func_name, uptr *func_addr,
*func_addr = (uptr)dlsym(RTLD_NEXT, func_name);
return real == wrapper;
}
#if !defined(__ANDROID__) // android does not have dlvsym
void *GetFuncAddrVer(const char *func_name, const char *ver) {
return dlvsym(RTLD_NEXT, func_name, ver);
}
#endif // !defined(__ANDROID__)
} // namespace __interception
......
......@@ -23,6 +23,7 @@ namespace __interception {
// returns true if a function with the given name was found.
bool GetRealFunctionAddress(const char *func_name, uptr *func_addr,
uptr real, uptr wrapper);
void *GetFuncAddrVer(const char *func_name, const char *ver);
} // namespace __interception
#define INTERCEPT_FUNCTION_LINUX(func) \
......@@ -31,5 +32,11 @@ bool GetRealFunctionAddress(const char *func_name, uptr *func_addr,
(::__interception::uptr)&(func), \
(::__interception::uptr)&WRAP(func))
#if !defined(__ANDROID__) // android does not have dlvsym
#define INTERCEPT_FUNCTION_VER(func, symver) \
::__interception::real_##func = (func##_f)(unsigned long) \
::__interception::GetFuncAddrVer(#func, #symver)
#endif // !defined(__ANDROID__)
#endif // INTERCEPTION_LINUX_H
#endif // __linux__
AM_CPPFLAGS = -I $(top_srcdir)/include -I $(top_srcdir)
# May be used by toolexeclibdir.
gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER)
DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS
AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic -Wno-long-long -fPIC -fno-builtin -fno-exceptions -fomit-frame-pointer -funwind-tables -fvisibility=hidden -Wno-variadic-macros
AM_CXXFLAGS += $(LIBSTDCXX_RAW_CXX_CXXFLAGS)
ACLOCAL_AMFLAGS = -I m4
noinst_LTLIBRARIES = libsanitizer_lsan.la
sanitizer_lsan_files = \
lsan_common.cc \
lsan_common_linux.cc
libsanitizer_lsan_la_SOURCES = $(sanitizer_lsan_files)
# Work around what appears to be a GNU make bug handling MAKEFLAGS
# values defined in terms of make variables, as is the case for CC and
# friends when we are called from the top level Makefile.
AM_MAKEFLAGS = \
"AR_FLAGS=$(AR_FLAGS)" \
"CC_FOR_BUILD=$(CC_FOR_BUILD)" \
"CFLAGS=$(CFLAGS)" \
"CXXFLAGS=$(CXXFLAGS)" \
"CFLAGS_FOR_BUILD=$(CFLAGS_FOR_BUILD)" \
"CFLAGS_FOR_TARGET=$(CFLAGS_FOR_TARGET)" \
"INSTALL=$(INSTALL)" \
"INSTALL_DATA=$(INSTALL_DATA)" \
"INSTALL_PROGRAM=$(INSTALL_PROGRAM)" \
"INSTALL_SCRIPT=$(INSTALL_SCRIPT)" \
"JC1FLAGS=$(JC1FLAGS)" \
"LDFLAGS=$(LDFLAGS)" \
"LIBCFLAGS=$(LIBCFLAGS)" \
"LIBCFLAGS_FOR_TARGET=$(LIBCFLAGS_FOR_TARGET)" \
"MAKE=$(MAKE)" \
"MAKEINFO=$(MAKEINFO) $(MAKEINFOFLAGS)" \
"PICFLAG=$(PICFLAG)" \
"PICFLAG_FOR_TARGET=$(PICFLAG_FOR_TARGET)" \
"SHELL=$(SHELL)" \
"RUNTESTFLAGS=$(RUNTESTFLAGS)" \
"exec_prefix=$(exec_prefix)" \
"infodir=$(infodir)" \
"libdir=$(libdir)" \
"prefix=$(prefix)" \
"includedir=$(includedir)" \
"AR=$(AR)" \
"AS=$(AS)" \
"LD=$(LD)" \
"LIBCFLAGS=$(LIBCFLAGS)" \
"NM=$(NM)" \
"PICFLAG=$(PICFLAG)" \
"RANLIB=$(RANLIB)" \
"DESTDIR=$(DESTDIR)"
MAKEOVERRIDES=
## ################################################################
# This file is used to maintain libtool version info for libmudflap. See
# the libtool manual to understand the meaning of the fields. This is
# a separate file so that version updates don't involve re-running
# automake.
# CURRENT:REVISION:AGE
0:0:0
//=-- lsan.cc -------------------------------------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of LeakSanitizer.
// Standalone LSan RTL.
//
//===----------------------------------------------------------------------===//
#include "lsan.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
#include "lsan_allocator.h"
#include "lsan_common.h"
#include "lsan_thread.h"
namespace __lsan {
static void InitializeCommonFlags() {
CommonFlags *cf = common_flags();
cf->external_symbolizer_path = GetEnv("LSAN_SYMBOLIZER_PATH");
cf->symbolize = true;
cf->strip_path_prefix = "";
cf->fast_unwind_on_malloc = true;
cf->malloc_context_size = 30;
cf->detect_leaks = true;
cf->leak_check_at_exit = true;
ParseCommonFlagsFromString(GetEnv("LSAN_OPTIONS"));
}
void Init() {
static bool inited;
if (inited)
return;
inited = true;
SanitizerToolName = "LeakSanitizer";
InitializeCommonFlags();
InitializeAllocator();
InitTlsSize();
InitializeInterceptors();
InitializeThreadRegistry();
u32 tid = ThreadCreate(0, 0, true);
CHECK_EQ(tid, 0);
ThreadStart(tid, GetTid());
SetCurrentThread(tid);
// Start symbolizer process if necessary.
if (common_flags()->symbolize) {
getSymbolizer()
->InitializeExternal(common_flags()->external_symbolizer_path);
}
InitCommonLsan();
if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit)
Atexit(DoLeakCheck);
}
} // namespace __lsan
//=-- lsan.h --------------------------------------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of LeakSanitizer.
// Private header for standalone LSan RTL.
//
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
namespace __lsan {
void Init();
void InitializeInterceptors();
} // namespace __lsan
//=-- lsan_allocator.cc ---------------------------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of LeakSanitizer.
// See lsan_allocator.h for details.
//
//===----------------------------------------------------------------------===//
#include "lsan_allocator.h"
#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
#include "lsan_common.h"
namespace __lsan {
static const uptr kMaxAllowedMallocSize = 8UL << 30;
static const uptr kAllocatorSpace = 0x600000000000ULL;
static const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
struct ChunkMetadata {
bool allocated : 8; // Must be first.
ChunkTag tag : 2;
uptr requested_size : 54;
u32 stack_trace_id;
};
typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize,
sizeof(ChunkMetadata), CompactSizeClassMap> PrimaryAllocator;
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
typedef LargeMmapAllocator<> SecondaryAllocator;
typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
SecondaryAllocator> Allocator;
static Allocator allocator;
static THREADLOCAL AllocatorCache cache;
void InitializeAllocator() {
allocator.Init();
}
void AllocatorThreadFinish() {
allocator.SwallowCache(&cache);
}
static ChunkMetadata *Metadata(void *p) {
return reinterpret_cast<ChunkMetadata *>(allocator.GetMetaData(p));
}
static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) {
if (!p) return;
ChunkMetadata *m = Metadata(p);
CHECK(m);
m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked;
m->stack_trace_id = StackDepotPut(stack.trace, stack.size);
m->requested_size = size;
atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 1, memory_order_relaxed);
}
static void RegisterDeallocation(void *p) {
if (!p) return;
ChunkMetadata *m = Metadata(p);
CHECK(m);
atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 0, memory_order_relaxed);
}
void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
bool cleared) {
if (size == 0)
size = 1;
if (size > kMaxAllowedMallocSize) {
Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", size);
return 0;
}
void *p = allocator.Allocate(&cache, size, alignment, cleared);
RegisterAllocation(stack, p, size);
return p;
}
void Deallocate(void *p) {
RegisterDeallocation(p);
allocator.Deallocate(&cache, p);
}
void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
uptr alignment) {
RegisterDeallocation(p);
if (new_size > kMaxAllowedMallocSize) {
Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", new_size);
allocator.Deallocate(&cache, p);
return 0;
}
p = allocator.Reallocate(&cache, p, new_size, alignment);
RegisterAllocation(stack, p, new_size);
return p;
}
void GetAllocatorCacheRange(uptr *begin, uptr *end) {
*begin = (uptr)&cache;
*end = *begin + sizeof(cache);
}
uptr GetMallocUsableSize(void *p) {
ChunkMetadata *m = Metadata(p);
if (!m) return 0;
return m->requested_size;
}
///// Interface to the common LSan module. /////
void LockAllocator() {
allocator.ForceLock();
}
void UnlockAllocator() {
allocator.ForceUnlock();
}
void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
*begin = (uptr)&allocator;
*end = *begin + sizeof(allocator);
}
uptr PointsIntoChunk(void* p) {
uptr addr = reinterpret_cast<uptr>(p);
uptr chunk = reinterpret_cast<uptr>(allocator.GetBlockBeginFastLocked(p));
if (!chunk) return 0;
// LargeMmapAllocator considers pointers to the meta-region of a chunk to be
// valid, but we don't want that.
if (addr < chunk) return 0;
ChunkMetadata *m = Metadata(reinterpret_cast<void *>(chunk));
CHECK(m);
if (m->allocated && addr < chunk + m->requested_size)
return chunk;
return 0;
}
uptr GetUserBegin(uptr chunk) {
return chunk;
}
LsanMetadata::LsanMetadata(uptr chunk) {
metadata_ = Metadata(reinterpret_cast<void *>(chunk));
CHECK(metadata_);
}
bool LsanMetadata::allocated() const {
return reinterpret_cast<ChunkMetadata *>(metadata_)->allocated;
}
ChunkTag LsanMetadata::tag() const {
return reinterpret_cast<ChunkMetadata *>(metadata_)->tag;
}
void LsanMetadata::set_tag(ChunkTag value) {
reinterpret_cast<ChunkMetadata *>(metadata_)->tag = value;
}
uptr LsanMetadata::requested_size() const {
return reinterpret_cast<ChunkMetadata *>(metadata_)->requested_size;
}
u32 LsanMetadata::stack_trace_id() const {
return reinterpret_cast<ChunkMetadata *>(metadata_)->stack_trace_id;
}
void ForEachChunk(ForEachChunkCallback callback, void *arg) {
allocator.ForEachChunk(callback, arg);
}
IgnoreObjectResult IgnoreObjectLocked(const void *p) {
void *chunk = allocator.GetBlockBegin(p);
if (!chunk || p < chunk) return kIgnoreObjectInvalid;
ChunkMetadata *m = Metadata(chunk);
CHECK(m);
if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size) {
if (m->tag == kIgnored)
return kIgnoreObjectAlreadyIgnored;
m->tag = kIgnored;
return kIgnoreObjectSuccess;
} else {
return kIgnoreObjectInvalid;
}
}
} // namespace __lsan
//=-- lsan_allocator.h ----------------------------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of LeakSanitizer.
// Allocator for standalone LSan.
//
//===----------------------------------------------------------------------===//
#ifndef LSAN_ALLOCATOR_H
#define LSAN_ALLOCATOR_H
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
namespace __lsan {
void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
bool cleared);
void Deallocate(void *p);
void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
uptr alignment);
uptr GetMallocUsableSize(void *p);
template<typename Callable>
void ForEachChunk(const Callable &callback);
void GetAllocatorCacheRange(uptr *begin, uptr *end);
void AllocatorThreadFinish();
void InitializeAllocator();
} // namespace __lsan
#endif // LSAN_ALLOCATOR_H
//=-- lsan_common.h -------------------------------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of LeakSanitizer.
// Private LSan header.
//
//===----------------------------------------------------------------------===//
#ifndef LSAN_COMMON_H
#define LSAN_COMMON_H
#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_platform.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
#if SANITIZER_LINUX && defined(__x86_64__)
#define CAN_SANITIZE_LEAKS 1
#else
#define CAN_SANITIZE_LEAKS 0
#endif
namespace __lsan {
// Chunk tags.
enum ChunkTag {
kDirectlyLeaked = 0, // default
kIndirectlyLeaked = 1,
kReachable = 2,
kIgnored = 3
};
struct Flags {
uptr pointer_alignment() const {
return use_unaligned ? 1 : sizeof(uptr);
}
// Print addresses of leaked objects after main leak report.
bool report_objects;
// Aggregate two objects into one leak if this many stack frames match. If
// zero, the entire stack trace must match.
int resolution;
// The number of leaks reported.
int max_leaks;
// If nonzero kill the process with this exit code upon finding leaks.
int exitcode;
// Suppressions file name.
const char* suppressions;
// Flags controlling the root set of reachable memory.
// Global variables (.data and .bss).
bool use_globals;
// Thread stacks.
bool use_stacks;
// Thread registers.
bool use_registers;
// TLS and thread-specific storage.
bool use_tls;
// Consider unaligned pointers valid.
bool use_unaligned;
// User-visible verbosity.
int verbosity;
// Debug logging.
bool log_pointers;
bool log_threads;
};
extern Flags lsan_flags;
inline Flags *flags() { return &lsan_flags; }
struct Leak {
uptr hit_count;
uptr total_size;
u32 stack_trace_id;
bool is_directly_leaked;
bool is_suppressed;
};
// Aggregates leaks by stack trace prefix.
class LeakReport {
public:
LeakReport() : leaks_(1) {}
void Add(u32 stack_trace_id, uptr leaked_size, ChunkTag tag);
void PrintLargest(uptr max_leaks);
void PrintSummary();
bool IsEmpty() { return leaks_.size() == 0; }
uptr ApplySuppressions();
private:
InternalMmapVector<Leak> leaks_;
};
typedef InternalMmapVector<uptr> Frontier;
// Platform-specific functions.
void InitializePlatformSpecificModules();
void ProcessGlobalRegions(Frontier *frontier);
void ProcessPlatformSpecificAllocations(Frontier *frontier);
void ScanRangeForPointers(uptr begin, uptr end,
Frontier *frontier,
const char *region_type, ChunkTag tag);
enum IgnoreObjectResult {
kIgnoreObjectSuccess,
kIgnoreObjectAlreadyIgnored,
kIgnoreObjectInvalid
};
// Functions called from the parent tool.
void InitCommonLsan();
void DoLeakCheck();
bool DisabledInThisThread();
// The following must be implemented in the parent tool.
void ForEachChunk(ForEachChunkCallback callback, void *arg);
// Returns the address range occupied by the global allocator object.
void GetAllocatorGlobalRange(uptr *begin, uptr *end);
// Wrappers for allocator's ForceLock()/ForceUnlock().
void LockAllocator();
void UnlockAllocator();
// Wrappers for ThreadRegistry access.
void LockThreadRegistry();
void UnlockThreadRegistry();
bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end,
uptr *tls_begin, uptr *tls_end,
uptr *cache_begin, uptr *cache_end);
// If called from the main thread, updates the main thread's TID in the thread
// registry. We need this to handle processes that fork() without a subsequent
// exec(), which invalidates the recorded TID. To update it, we must call
// gettid() from the main thread. Our solution is to call this function before
// leak checking and also before every call to pthread_create() (to handle cases
// where leak checking is initiated from a non-main thread).
void EnsureMainThreadIDIsCorrect();
// If p points into a chunk that has been allocated to the user, returns its
// user-visible address. Otherwise, returns 0.
uptr PointsIntoChunk(void *p);
// Returns address of user-visible chunk contained in this allocator chunk.
uptr GetUserBegin(uptr chunk);
// Helper for __lsan_ignore_object().
IgnoreObjectResult IgnoreObjectLocked(const void *p);
// Wrapper for chunk metadata operations.
class LsanMetadata {
public:
// Constructor accepts address of user-visible chunk.
explicit LsanMetadata(uptr chunk);
bool allocated() const;
ChunkTag tag() const;
void set_tag(ChunkTag value);
uptr requested_size() const;
u32 stack_trace_id() const;
private:
void *metadata_;
};
} // namespace __lsan
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
int __lsan_is_turned_off();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
const char *__lsan_default_suppressions();
} // extern "C"
#endif // LSAN_COMMON_H
//=-- lsan_common_linux.cc ------------------------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of LeakSanitizer.
// Implementation of common leak checking functionality. Linux-specific code.
//
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_platform.h"
#include "lsan_common.h"
#if CAN_SANITIZE_LEAKS && SANITIZER_LINUX
#include <link.h>
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_linux.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
namespace __lsan {
static const char kLinkerName[] = "ld";
// We request 2 modules matching "ld", so we can print a warning if there's more
// than one match. But only the first one is actually used.
static char linker_placeholder[2 * sizeof(LoadedModule)] ALIGNED(64);
static LoadedModule *linker = 0;
static bool IsLinker(const char* full_name) {
return LibraryNameIs(full_name, kLinkerName);
}
void InitializePlatformSpecificModules() {
internal_memset(linker_placeholder, 0, sizeof(linker_placeholder));
uptr num_matches = GetListOfModules(
reinterpret_cast<LoadedModule *>(linker_placeholder), 2, IsLinker);
if (num_matches == 1) {
linker = reinterpret_cast<LoadedModule *>(linker_placeholder);
return;
}
if (num_matches == 0)
Report("LeakSanitizer: Dynamic linker not found. "
"TLS will not be handled correctly.\n");
else if (num_matches > 1)
Report("LeakSanitizer: Multiple modules match \"%s\". "
"TLS will not be handled correctly.\n", kLinkerName);
linker = 0;
}
static int ProcessGlobalRegionsCallback(struct dl_phdr_info *info, size_t size,
void *data) {
Frontier *frontier = reinterpret_cast<Frontier *>(data);
for (uptr j = 0; j < info->dlpi_phnum; j++) {
const ElfW(Phdr) *phdr = &(info->dlpi_phdr[j]);
// We're looking for .data and .bss sections, which reside in writeable,
// loadable segments.
if (!(phdr->p_flags & PF_W) || (phdr->p_type != PT_LOAD) ||
(phdr->p_memsz == 0))
continue;
uptr begin = info->dlpi_addr + phdr->p_vaddr;
uptr end = begin + phdr->p_memsz;
uptr allocator_begin = 0, allocator_end = 0;
GetAllocatorGlobalRange(&allocator_begin, &allocator_end);
if (begin <= allocator_begin && allocator_begin < end) {
CHECK_LE(allocator_begin, allocator_end);
CHECK_LT(allocator_end, end);
if (begin < allocator_begin)
ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL",
kReachable);
if (allocator_end < end)
ScanRangeForPointers(allocator_end, end, frontier, "GLOBAL",
kReachable);
} else {
ScanRangeForPointers(begin, end, frontier, "GLOBAL", kReachable);
}
}
return 0;
}
// Scans global variables for heap pointers.
void ProcessGlobalRegions(Frontier *frontier) {
// FIXME: dl_iterate_phdr acquires a linker lock, so we run a risk of
// deadlocking by running this under StopTheWorld. However, the lock is
// reentrant, so we should be able to fix this by acquiring the lock before
// suspending threads.
dl_iterate_phdr(ProcessGlobalRegionsCallback, frontier);
}
static uptr GetCallerPC(u32 stack_id, StackDepotReverseMap *map) {
CHECK(stack_id);
uptr size = 0;
const uptr *trace = map->Get(stack_id, &size);
// The top frame is our malloc/calloc/etc. The next frame is the caller.
if (size >= 2)
return trace[1];
return 0;
}
struct ProcessPlatformAllocParam {
Frontier *frontier;
StackDepotReverseMap *stack_depot_reverse_map;
};
// ForEachChunk callback. Identifies unreachable chunks which must be treated as
// reachable. Marks them as reachable and adds them to the frontier.
static void ProcessPlatformSpecificAllocationsCb(uptr chunk, void *arg) {
CHECK(arg);
ProcessPlatformAllocParam *param =
reinterpret_cast<ProcessPlatformAllocParam *>(arg);
chunk = GetUserBegin(chunk);
LsanMetadata m(chunk);
if (m.allocated() && m.tag() != kReachable) {
u32 stack_id = m.stack_trace_id();
uptr caller_pc = 0;
if (stack_id > 0)
caller_pc = GetCallerPC(stack_id, param->stack_depot_reverse_map);
// If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
// it as reachable, as we can't properly report its allocation stack anyway.
if (caller_pc == 0 || linker->containsAddress(caller_pc)) {
m.set_tag(kReachable);
param->frontier->push_back(chunk);
}
}
}
// Handles dynamically allocated TLS blocks by treating all chunks allocated
// from ld-linux.so as reachable.
void ProcessPlatformSpecificAllocations(Frontier *frontier) {
if (!flags()->use_tls) return;
if (!linker) return;
StackDepotReverseMap stack_depot_reverse_map;
ProcessPlatformAllocParam arg = {frontier, &stack_depot_reverse_map};
ForEachChunk(ProcessPlatformSpecificAllocationsCb, &arg);
}
} // namespace __lsan
#endif // CAN_SANITIZE_LEAKS && SANITIZER_LINUX
//=-- lsan_interceptors.cc ------------------------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of LeakSanitizer.
// Interceptors for standalone LSan.
//
//===----------------------------------------------------------------------===//
#include "interception/interception.h"
#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_linux.h"
#include "sanitizer_common/sanitizer_platform_limits_posix.h"
#include "lsan.h"
#include "lsan_allocator.h"
#include "lsan_thread.h"
using namespace __lsan;
extern "C" {
int pthread_attr_init(void *attr);
int pthread_attr_destroy(void *attr);
int pthread_attr_getdetachstate(void *attr, int *v);
int pthread_key_create(unsigned *key, void (*destructor)(void* v));
int pthread_setspecific(unsigned key, const void *v);
}
#define GET_STACK_TRACE \
StackTrace stack; \
{ \
uptr stack_top = 0, stack_bottom = 0; \
ThreadContext *t; \
bool fast = common_flags()->fast_unwind_on_malloc; \
if (fast && (t = CurrentThreadContext())) { \
stack_top = t->stack_end(); \
stack_bottom = t->stack_begin(); \
} \
GetStackTrace(&stack, __sanitizer::common_flags()->malloc_context_size, \
StackTrace::GetCurrentPc(), \
GET_CURRENT_FRAME(), stack_top, stack_bottom, fast); \
}
///// Malloc/free interceptors. /////
const bool kAlwaysClearMemory = true;
namespace std {
struct nothrow_t;
}
INTERCEPTOR(void*, malloc, uptr size) {
Init();
GET_STACK_TRACE;
return Allocate(stack, size, 1, kAlwaysClearMemory);
}
INTERCEPTOR(void, free, void *p) {
Init();
Deallocate(p);
}
INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) {
if (CallocShouldReturnNullDueToOverflow(size, nmemb)) return 0;
Init();
GET_STACK_TRACE;
size *= nmemb;
return Allocate(stack, size, 1, true);
}
INTERCEPTOR(void*, realloc, void *q, uptr size) {
Init();
GET_STACK_TRACE;
return Reallocate(stack, q, size, 1);
}
INTERCEPTOR(void*, memalign, uptr alignment, uptr size) {
Init();
GET_STACK_TRACE;
return Allocate(stack, size, alignment, kAlwaysClearMemory);
}
INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) {
Init();
GET_STACK_TRACE;
*memptr = Allocate(stack, size, alignment, kAlwaysClearMemory);
// FIXME: Return ENOMEM if user requested more than max alloc size.
return 0;
}
INTERCEPTOR(void*, valloc, uptr size) {
Init();
GET_STACK_TRACE;
if (size == 0)
size = GetPageSizeCached();
return Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory);
}
INTERCEPTOR(uptr, malloc_usable_size, void *ptr) {
Init();
return GetMallocUsableSize(ptr);
}
struct fake_mallinfo {
int x[10];
};
INTERCEPTOR(struct fake_mallinfo, mallinfo, void) {
struct fake_mallinfo res;
internal_memset(&res, 0, sizeof(res));
return res;
}
INTERCEPTOR(int, mallopt, int cmd, int value) {
return -1;
}
INTERCEPTOR(void*, pvalloc, uptr size) {
Init();
GET_STACK_TRACE;
uptr PageSize = GetPageSizeCached();
size = RoundUpTo(size, PageSize);
if (size == 0) {
// pvalloc(0) should allocate one page.
size = PageSize;
}
return Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory);
}
INTERCEPTOR(void, cfree, void *p) ALIAS("free");
#define OPERATOR_NEW_BODY \
Init(); \
GET_STACK_TRACE; \
return Allocate(stack, size, 1, kAlwaysClearMemory);
INTERCEPTOR_ATTRIBUTE
void *operator new(uptr size) { OPERATOR_NEW_BODY; }
INTERCEPTOR_ATTRIBUTE
void *operator new[](uptr size) { OPERATOR_NEW_BODY; }
INTERCEPTOR_ATTRIBUTE
void *operator new(uptr size, std::nothrow_t const&) { OPERATOR_NEW_BODY; }
INTERCEPTOR_ATTRIBUTE
void *operator new[](uptr size, std::nothrow_t const&) { OPERATOR_NEW_BODY; }
#define OPERATOR_DELETE_BODY \
Init(); \
Deallocate(ptr);
INTERCEPTOR_ATTRIBUTE
void operator delete(void *ptr) { OPERATOR_DELETE_BODY; }
INTERCEPTOR_ATTRIBUTE
void operator delete[](void *ptr) { OPERATOR_DELETE_BODY; }
INTERCEPTOR_ATTRIBUTE
void operator delete(void *ptr, std::nothrow_t const&) { OPERATOR_DELETE_BODY; }
INTERCEPTOR_ATTRIBUTE
void operator delete[](void *ptr, std::nothrow_t const &) {
OPERATOR_DELETE_BODY;
}
// We need this to intercept the __libc_memalign calls that are used to
// allocate dynamic TLS space in ld-linux.so.
INTERCEPTOR(void *, __libc_memalign, uptr align, uptr s) ALIAS("memalign");
///// Thread initialization and finalization. /////
static unsigned g_thread_finalize_key;
static void thread_finalize(void *v) {
uptr iter = (uptr)v;
if (iter > 1) {
if (pthread_setspecific(g_thread_finalize_key, (void*)(iter - 1))) {
Report("LeakSanitizer: failed to set thread key.\n");
Die();
}
return;
}
ThreadFinish();
}
struct ThreadParam {
void *(*callback)(void *arg);
void *param;
atomic_uintptr_t tid;
};
// PTHREAD_DESTRUCTOR_ITERATIONS from glibc.
const uptr kPthreadDestructorIterations = 4;
extern "C" void *__lsan_thread_start_func(void *arg) {
ThreadParam *p = (ThreadParam*)arg;
void* (*callback)(void *arg) = p->callback;
void *param = p->param;
// Wait until the last iteration to maximize the chance that we are the last
// destructor to run.
if (pthread_setspecific(g_thread_finalize_key,
(void*)kPthreadDestructorIterations)) {
Report("LeakSanitizer: failed to set thread key.\n");
Die();
}
int tid = 0;
while ((tid = atomic_load(&p->tid, memory_order_acquire)) == 0)
internal_sched_yield();
atomic_store(&p->tid, 0, memory_order_release);
SetCurrentThread(tid);
ThreadStart(tid, GetTid());
return callback(param);
}
INTERCEPTOR(int, pthread_create, void *th, void *attr,
void *(*callback)(void *), void *param) {
Init();
EnsureMainThreadIDIsCorrect();
__sanitizer_pthread_attr_t myattr;
if (attr == 0) {
pthread_attr_init(&myattr);
attr = &myattr;
}
AdjustStackSizeLinux(attr, 0);
int detached = 0;
pthread_attr_getdetachstate(attr, &detached);
ThreadParam p;
p.callback = callback;
p.param = param;
atomic_store(&p.tid, 0, memory_order_relaxed);
int res = REAL(pthread_create)(th, attr, __lsan_thread_start_func, &p);
if (res == 0) {
int tid = ThreadCreate(GetCurrentThread(), *(uptr *)th, detached);
CHECK_NE(tid, 0);
atomic_store(&p.tid, tid, memory_order_release);
while (atomic_load(&p.tid, memory_order_acquire) != 0)
internal_sched_yield();
}
if (attr == &myattr)
pthread_attr_destroy(&myattr);
return res;
}
INTERCEPTOR(int, pthread_join, void *th, void **ret) {
Init();
int tid = ThreadTid((uptr)th);
int res = REAL(pthread_join)(th, ret);
if (res == 0)
ThreadJoin(tid);
return res;
}
namespace __lsan {
void InitializeInterceptors() {
INTERCEPT_FUNCTION(malloc);
INTERCEPT_FUNCTION(free);
INTERCEPT_FUNCTION(cfree);
INTERCEPT_FUNCTION(calloc);
INTERCEPT_FUNCTION(realloc);
INTERCEPT_FUNCTION(memalign);
INTERCEPT_FUNCTION(posix_memalign);
INTERCEPT_FUNCTION(__libc_memalign);
INTERCEPT_FUNCTION(valloc);
INTERCEPT_FUNCTION(pvalloc);
INTERCEPT_FUNCTION(malloc_usable_size);
INTERCEPT_FUNCTION(mallinfo);
INTERCEPT_FUNCTION(mallopt);
INTERCEPT_FUNCTION(pthread_create);
INTERCEPT_FUNCTION(pthread_join);
if (pthread_key_create(&g_thread_finalize_key, &thread_finalize)) {
Report("LeakSanitizer: failed to create thread key.\n");
Die();
}
}
} // namespace __lsan
//=-- lsan_thread.cc ------------------------------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of LeakSanitizer.
// See lsan_thread.h for details.
//
//===----------------------------------------------------------------------===//
#include "lsan_thread.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_thread_registry.h"
#include "lsan_allocator.h"
namespace __lsan {
const u32 kInvalidTid = (u32) -1;
static ThreadRegistry *thread_registry;
static THREADLOCAL u32 current_thread_tid = kInvalidTid;
static ThreadContextBase *CreateThreadContext(u32 tid) {
void *mem = MmapOrDie(sizeof(ThreadContext), "ThreadContext");
return new(mem) ThreadContext(tid);
}
static const uptr kMaxThreads = 1 << 13;
static const uptr kThreadQuarantineSize = 64;
void InitializeThreadRegistry() {
static char thread_registry_placeholder[sizeof(ThreadRegistry)] ALIGNED(64);
thread_registry = new(thread_registry_placeholder)
ThreadRegistry(CreateThreadContext, kMaxThreads, kThreadQuarantineSize);
}
u32 GetCurrentThread() {
return current_thread_tid;
}
void SetCurrentThread(u32 tid) {
current_thread_tid = tid;
}
ThreadContext::ThreadContext(int tid)
: ThreadContextBase(tid),
stack_begin_(0),
stack_end_(0),
cache_begin_(0),
cache_end_(0),
tls_begin_(0),
tls_end_(0) {}
struct OnStartedArgs {
uptr stack_begin, stack_end,
cache_begin, cache_end,
tls_begin, tls_end;
};
void ThreadContext::OnStarted(void *arg) {
OnStartedArgs *args = reinterpret_cast<OnStartedArgs *>(arg);
stack_begin_ = args->stack_begin;
stack_end_ = args->stack_end;
tls_begin_ = args->tls_begin;
tls_end_ = args->tls_end;
cache_begin_ = args->cache_begin;
cache_end_ = args->cache_end;
}
void ThreadContext::OnFinished() {
AllocatorThreadFinish();
}
u32 ThreadCreate(u32 parent_tid, uptr user_id, bool detached) {
return thread_registry->CreateThread(user_id, detached, parent_tid,
/* arg */ 0);
}
void ThreadStart(u32 tid, uptr os_id) {
OnStartedArgs args;
uptr stack_size = 0;
uptr tls_size = 0;
GetThreadStackAndTls(tid == 0, &args.stack_begin, &stack_size,
&args.tls_begin, &tls_size);
args.stack_end = args.stack_begin + stack_size;
args.tls_end = args.tls_begin + tls_size;
GetAllocatorCacheRange(&args.cache_begin, &args.cache_end);
thread_registry->StartThread(tid, os_id, &args);
}
void ThreadFinish() {
thread_registry->FinishThread(GetCurrentThread());
}
ThreadContext *CurrentThreadContext() {
if (!thread_registry) return 0;
if (GetCurrentThread() == kInvalidTid)
return 0;
// No lock needed when getting current thread.
return (ThreadContext *)thread_registry->GetThreadLocked(GetCurrentThread());
}
static bool FindThreadByUid(ThreadContextBase *tctx, void *arg) {
uptr uid = (uptr)arg;
if (tctx->user_id == uid && tctx->status != ThreadStatusInvalid) {
return true;
}
return false;
}
u32 ThreadTid(uptr uid) {
return thread_registry->FindThread(FindThreadByUid, (void*)uid);
}
void ThreadJoin(u32 tid) {
CHECK_NE(tid, kInvalidTid);
thread_registry->JoinThread(tid, /* arg */0);
}
void EnsureMainThreadIDIsCorrect() {
if (GetCurrentThread() == 0)
CurrentThreadContext()->os_id = GetTid();
}
///// Interface to the common LSan module. /////
bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end,
uptr *tls_begin, uptr *tls_end,
uptr *cache_begin, uptr *cache_end) {
ThreadContext *context = static_cast<ThreadContext *>(
thread_registry->FindThreadContextByOsIDLocked(os_id));
if (!context) return false;
*stack_begin = context->stack_begin();
*stack_end = context->stack_end();
*tls_begin = context->tls_begin();
*tls_end = context->tls_end();
*cache_begin = context->cache_begin();
*cache_end = context->cache_end();
return true;
}
void LockThreadRegistry() {
thread_registry->Lock();
}
void UnlockThreadRegistry() {
thread_registry->Unlock();
}
} // namespace __lsan
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment