Commit 6e644a50 by Martin Liska Committed by Martin Liska

Make red zone size more flexible for stack variables (PR sanitizer/81715).

2018-11-30  Martin Liska  <mliska@suse.cz>

	PR sanitizer/81715
	* asan.c (asan_shadow_cst): Remove, partially transform
	into flush_redzone_payload.
	(RZ_BUFFER_SIZE): New.
	(struct asan_redzone_buffer): New.
	(asan_redzone_buffer::emit_redzone_byte): Likewise.
	(asan_redzone_buffer::flush_redzone_payload): Likewise.
	(asan_redzone_buffer::flush_if_full): Likewise.
	(asan_emit_stack_protection): Use asan_redzone_buffer class
	that is responsible for proper aligned stores and flushing
	of shadow memory payload.
	* asan.h (ASAN_MIN_RED_ZONE_SIZE): New.
	(asan_var_and_redzone_size): Likewise.
	* cfgexpand.c (expand_stack_vars): Use smaller alignment
	(ASAN_MIN_RED_ZONE_SIZE) in order to make shadow memory
	for automatic variables more compact.
2018-11-30  Martin Liska  <mliska@suse.cz>

	PR sanitizer/81715
	* c-c++-common/asan/asan-stack-small.c: New test.

From-SVN: r266664
parent b49f1a7e
2018-11-30 Martin Liska <mliska@suse.cz>
PR sanitizer/81715
* asan.c (asan_shadow_cst): Remove, partially transform
into flush_redzone_payload.
(RZ_BUFFER_SIZE): New.
(struct asan_redzone_buffer): New.
(asan_redzone_buffer::emit_redzone_byte): Likewise.
(asan_redzone_buffer::flush_redzone_payload): Likewise.
(asan_redzone_buffer::flush_if_full): Likewise.
(asan_emit_stack_protection): Use asan_redzone_buffer class
that is responsible for proper aligned stores and flushing
of shadow memory payload.
* asan.h (ASAN_MIN_RED_ZONE_SIZE): New.
(asan_var_and_redzone_size): Likewise.
* cfgexpand.c (expand_stack_vars): Use smaller alignment
(ASAN_MIN_RED_ZONE_SIZE) in order to make shadow memory
for automatic variables more compact.
2018-11-30 Alan Modra <amodra@gmail.com>
* config/rs6000/predicates.md (easy_fp_constant): Avoid long
......@@ -1155,20 +1155,6 @@ asan_pp_string (pretty_printer *pp)
return build1 (ADDR_EXPR, shadow_ptr_types[0], ret);
}
/* Return a CONST_INT representing 4 subsequent shadow memory bytes. */
static rtx
asan_shadow_cst (unsigned char shadow_bytes[4])
{
int i;
unsigned HOST_WIDE_INT val = 0;
gcc_assert (WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN);
for (i = 0; i < 4; i++)
val |= (unsigned HOST_WIDE_INT) shadow_bytes[BYTES_BIG_ENDIAN ? 3 - i : i]
<< (BITS_PER_UNIT * i);
return gen_int_mode (val, SImode);
}
/* Clear shadow memory at SHADOW_MEM, LEN bytes. Can't call a library call here
though. */
......@@ -1235,6 +1221,136 @@ shadow_mem_size (unsigned HOST_WIDE_INT size)
return ROUND_UP (size, ASAN_SHADOW_GRANULARITY) / ASAN_SHADOW_GRANULARITY;
}
/* Always emit 4 bytes at a time. */
#define RZ_BUFFER_SIZE 4
/* ASAN redzone buffer container that handles emission of shadow bytes. */
struct asan_redzone_buffer
{
/* Constructor. */
asan_redzone_buffer (rtx shadow_mem, HOST_WIDE_INT prev_offset):
m_shadow_mem (shadow_mem), m_prev_offset (prev_offset),
m_original_offset (prev_offset), m_shadow_bytes (RZ_BUFFER_SIZE)
{}
/* Emit VALUE shadow byte at a given OFFSET. */
void emit_redzone_byte (HOST_WIDE_INT offset, unsigned char value);
/* Emit RTX emission of the content of the buffer. */
void flush_redzone_payload (void);
private:
/* Flush if the content of the buffer is full
(equal to RZ_BUFFER_SIZE). */
void flush_if_full (void);
/* Memory where we last emitted a redzone payload. */
rtx m_shadow_mem;
/* Relative offset where we last emitted a redzone payload. */
HOST_WIDE_INT m_prev_offset;
/* Relative original offset. Used for checking only. */
HOST_WIDE_INT m_original_offset;
public:
/* Buffer with redzone payload. */
auto_vec<unsigned char> m_shadow_bytes;
};
/* Emit VALUE shadow byte at a given OFFSET. */
void
asan_redzone_buffer::emit_redzone_byte (HOST_WIDE_INT offset,
unsigned char value)
{
gcc_assert ((offset & (ASAN_SHADOW_GRANULARITY - 1)) == 0);
gcc_assert (offset >= m_prev_offset);
HOST_WIDE_INT off
= m_prev_offset + ASAN_SHADOW_GRANULARITY * m_shadow_bytes.length ();
if (off == offset)
{
/* Consecutive shadow memory byte. */
m_shadow_bytes.safe_push (value);
flush_if_full ();
}
else
{
if (!m_shadow_bytes.is_empty ())
flush_redzone_payload ();
/* Maybe start earlier in order to use aligned store. */
HOST_WIDE_INT align = (offset - m_prev_offset) % ASAN_RED_ZONE_SIZE;
if (align)
{
offset -= align;
for (unsigned i = 0; i < align / BITS_PER_UNIT; i++)
m_shadow_bytes.safe_push (0);
}
/* Adjust m_prev_offset and m_shadow_mem. */
HOST_WIDE_INT diff = offset - m_prev_offset;
m_shadow_mem = adjust_address (m_shadow_mem, VOIDmode,
diff >> ASAN_SHADOW_SHIFT);
m_prev_offset = offset;
m_shadow_bytes.safe_push (value);
flush_if_full ();
}
}
/* Emit RTX emission of the content of the buffer. */
void
asan_redzone_buffer::flush_redzone_payload (void)
{
gcc_assert (WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN);
if (m_shadow_bytes.is_empty ())
return;
/* Be sure we always emit to an aligned address. */
gcc_assert (((m_prev_offset - m_original_offset)
& (ASAN_RED_ZONE_SIZE - 1)) == 0);
/* Fill it to RZ_BUFFER_SIZE bytes with zeros if needed. */
unsigned l = m_shadow_bytes.length ();
for (unsigned i = 0; i <= RZ_BUFFER_SIZE - l; i++)
m_shadow_bytes.safe_push (0);
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file,
"Flushing rzbuffer at offset %" PRId64 " with: ", m_prev_offset);
unsigned HOST_WIDE_INT val = 0;
for (unsigned i = 0; i < RZ_BUFFER_SIZE; i++)
{
unsigned char v
= m_shadow_bytes[BYTES_BIG_ENDIAN ? RZ_BUFFER_SIZE - i : i];
val |= (unsigned HOST_WIDE_INT)v << (BITS_PER_UNIT * i);
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "%02x ", v);
}
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "\n");
rtx c = gen_int_mode (val, SImode);
m_shadow_mem = adjust_address (m_shadow_mem, SImode, 0);
emit_move_insn (m_shadow_mem, c);
m_shadow_bytes.truncate (0);
}
/* Flush if the content of the buffer is full
(equal to RZ_BUFFER_SIZE). */
void
asan_redzone_buffer::flush_if_full (void)
{
if (m_shadow_bytes.length () == RZ_BUFFER_SIZE)
flush_redzone_payload ();
}
/* Insert code to protect stack vars. The prologue sequence should be emitted
directly, epilogue sequence returned. BASE is the register holding the
stack base, against which OFFSETS array offsets are relative to, OFFSETS
......@@ -1256,7 +1372,6 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
rtx_code_label *lab;
rtx_insn *insns;
char buf[32];
unsigned char shadow_bytes[4];
HOST_WIDE_INT base_offset = offsets[length - 1];
HOST_WIDE_INT base_align_bias = 0, offset, prev_offset;
HOST_WIDE_INT asan_frame_size = offsets[0] - base_offset;
......@@ -1421,46 +1536,43 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
if (STRICT_ALIGNMENT)
set_mem_align (shadow_mem, (GET_MODE_ALIGNMENT (SImode)));
prev_offset = base_offset;
asan_redzone_buffer rz_buffer (shadow_mem, prev_offset);
for (l = length; l; l -= 2)
{
if (l == 2)
cur_shadow_byte = ASAN_STACK_MAGIC_RIGHT;
offset = offsets[l - 1];
if ((offset - base_offset) & (ASAN_RED_ZONE_SIZE - 1))
bool extra_byte = (offset - base_offset) & (ASAN_SHADOW_GRANULARITY - 1);
/* If a red-zone is not aligned to ASAN_SHADOW_GRANULARITY then
the previous stack variable has size % ASAN_SHADOW_GRANULARITY != 0.
In that case we have to emit one extra byte that will describe
how many bytes (our of ASAN_SHADOW_GRANULARITY) can be accessed. */
if (extra_byte)
{
int i;
HOST_WIDE_INT aoff
= base_offset + ((offset - base_offset)
& ~(ASAN_RED_ZONE_SIZE - HOST_WIDE_INT_1));
shadow_mem = adjust_address (shadow_mem, VOIDmode,
(aoff - prev_offset)
>> ASAN_SHADOW_SHIFT);
prev_offset = aoff;
for (i = 0; i < 4; i++, aoff += ASAN_SHADOW_GRANULARITY)
if (aoff < offset)
{
if (aoff < offset - (HOST_WIDE_INT)ASAN_SHADOW_GRANULARITY + 1)
shadow_bytes[i] = 0;
else
shadow_bytes[i] = offset - aoff;
}
else
shadow_bytes[i] = ASAN_STACK_MAGIC_MIDDLE;
emit_move_insn (shadow_mem, asan_shadow_cst (shadow_bytes));
offset = aoff;
& ~(ASAN_SHADOW_GRANULARITY - HOST_WIDE_INT_1));
rz_buffer.emit_redzone_byte (aoff, offset - aoff);
offset = aoff + ASAN_SHADOW_GRANULARITY;
}
while (offset <= offsets[l - 2] - ASAN_RED_ZONE_SIZE)
/* Calculate size of red zone payload. */
while (offset < offsets[l - 2])
{
shadow_mem = adjust_address (shadow_mem, VOIDmode,
(offset - prev_offset)
>> ASAN_SHADOW_SHIFT);
prev_offset = offset;
memset (shadow_bytes, cur_shadow_byte, 4);
emit_move_insn (shadow_mem, asan_shadow_cst (shadow_bytes));
offset += ASAN_RED_ZONE_SIZE;
rz_buffer.emit_redzone_byte (offset, cur_shadow_byte);
offset += ASAN_SHADOW_GRANULARITY;
}
cur_shadow_byte = ASAN_STACK_MAGIC_MIDDLE;
}
/* As the automatic variables are aligned to
ASAN_RED_ZONE_SIZE / ASAN_SHADOW_GRANULARITY, the buffer should be
flushed here. */
gcc_assert (rz_buffer.m_shadow_bytes.is_empty ());
do_pending_stack_adjust ();
/* Construct epilogue sequence. */
......@@ -1519,7 +1631,7 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
for (l = length; l; l -= 2)
{
offset = base_offset + ((offsets[l - 1] - base_offset)
& ~(ASAN_RED_ZONE_SIZE - HOST_WIDE_INT_1));
& ~(ASAN_MIN_RED_ZONE_SIZE - HOST_WIDE_INT_1));
if (last_offset + last_size != offset)
{
shadow_mem = adjust_address (shadow_mem, VOIDmode,
......@@ -1531,7 +1643,7 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
last_size = 0;
}
last_size += base_offset + ((offsets[l - 2] - base_offset)
& ~(ASAN_RED_ZONE_SIZE - HOST_WIDE_INT_1))
& ~(ASAN_MIN_RED_ZONE_SIZE - HOST_WIDE_INT_1))
- offset;
/* Unpoison shadow memory that corresponds to a variable that is
......@@ -1552,7 +1664,7 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
"%s (%" PRId64 " B)\n", n, size);
}
last_size += size & ~(ASAN_RED_ZONE_SIZE - HOST_WIDE_INT_1);
last_size += size & ~(ASAN_MIN_RED_ZONE_SIZE - HOST_WIDE_INT_1);
}
}
}
......
......@@ -53,6 +53,11 @@ extern hash_set <tree> *asan_used_labels;
up to 2 * ASAN_RED_ZONE_SIZE - 1 bytes. */
#define ASAN_RED_ZONE_SIZE 32
/* Stack variable use more compact red zones. The size includes also
size of variable itself. */
#define ASAN_MIN_RED_ZONE_SIZE 16
/* Shadow memory values for stack protection. Left is below protected vars,
the first pointer in stack corresponding to that offset contains
ASAN_STACK_FRAME_MAGIC word, the second pointer to a string describing
......@@ -102,6 +107,26 @@ asan_red_zone_size (unsigned int size)
return c ? 2 * ASAN_RED_ZONE_SIZE - c : ASAN_RED_ZONE_SIZE;
}
/* Return how much a stack variable occupis on a stack
including a space for red zone. */
static inline unsigned HOST_WIDE_INT
asan_var_and_redzone_size (unsigned HOST_WIDE_INT size)
{
if (size <= 4)
return 16;
else if (size <= 16)
return 32;
else if (size <= 128)
return size + 32;
else if (size <= 512)
return size + 64;
else if (size <= 4096)
return size + 128;
else
return size + 256;
}
extern bool set_asan_shadow_offset (const char *);
extern void set_sanitized_sections (const char *);
......
......@@ -1125,13 +1125,17 @@ expand_stack_vars (bool (*pred) (size_t), struct stack_vars_data *data)
&& stack_vars[i].size.is_constant ())
{
prev_offset = align_base (prev_offset,
MAX (alignb, ASAN_RED_ZONE_SIZE),
MAX (alignb, ASAN_MIN_RED_ZONE_SIZE),
!FRAME_GROWS_DOWNWARD);
tree repr_decl = NULL_TREE;
offset
= alloc_stack_frame_space (stack_vars[i].size
+ ASAN_RED_ZONE_SIZE,
MAX (alignb, ASAN_RED_ZONE_SIZE));
unsigned HOST_WIDE_INT size
= asan_var_and_redzone_size (stack_vars[i].size.to_constant ());
if (data->asan_vec.is_empty ())
size = MAX (size, ASAN_RED_ZONE_SIZE);
unsigned HOST_WIDE_INT alignment = MAX (alignb,
ASAN_MIN_RED_ZONE_SIZE);
offset = alloc_stack_frame_space (size, alignment);
data->asan_vec.safe_push (prev_offset);
/* Allocating a constant amount of space from a constant
......
2018-11-30 Martin Liska <mliska@suse.cz>
PR sanitizer/81715
* c-c++-common/asan/asan-stack-small.c: New test.
2018-11-30 Richard Biener <rguenther@suse.de>
* gcc.dg/gimplefe-34.c: New testcase.
......
/* { dg-do run } */
char *pa;
char *pb;
char *pc;
void access (volatile char *ptr)
{
*ptr = 'x';
}
int main (int argc, char **argv)
{
char a;
char b;
char c;
pa = &a;
pb = &b;
pc = &c;
access (pb);
access (pc);
// access 'b' here
access (pa + 32);
return 0;
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment