Commit 6a78b724 by DJ Delorie Committed by DJ Delorie

common.opt (-fstrict-volatile-bitfields): new.

* common.opt (-fstrict-volatile-bitfields): new.
* doc/invoke.texi: Document it.
* fold-const.c (optimize_bit_field_compare): For volatile
bitfields, use the field's type to determine the mode, not the
field's size.
* expr.c (expand_assignment): Likewise.
(get_inner_reference): Likewise.
(expand_expr_real_1): Likewise.
* expmed.c (store_fixed_bit_field): Likewise.
(extract_bit_field_1): Likewise.
(extract_fixed_bit_field): Likewise.

* gcc.target/i386/volatile-bitfields-1.c: New.
* gcc.target/i386/volatile-bitfields-2.c: New.

From-SVN: r160865
parent f1071b12
2010-06-16 DJ Delorie <dj@redhat.com>
* common.opt (-fstrict-volatile-bitfields): new.
* doc/invoke.texi: Document it.
* fold-const.c (optimize_bit_field_compare): For volatile
bitfields, use the field's type to determine the mode, not the
field's size.
* expr.c (expand_assignment): Likewise.
(get_inner_reference): Likewise.
(expand_expr_real_1): Likewise.
* expmed.c (store_fixed_bit_field): Likewise.
(extract_bit_field_1): Likewise.
(extract_fixed_bit_field): Likewise.
2010-06-16 Richard Guenther <rguenther@suse.de>
* tree-inline.c (remap_gimple_op_r): Recurse using
......
......@@ -629,6 +629,10 @@ floop-block
Common Report Var(flag_loop_block) Optimization
Enable Loop Blocking transformation
fstrict-volatile-bitfields
Common Report Var(flag_strict_volatile_bitfields) Init(-1)
Force bitfield accesses to match their type width
fguess-branch-probability
Common Report Var(flag_guess_branch_prob) Optimization
Enable guessing of branch probabilities
......
......@@ -17725,6 +17725,32 @@ the DSOs.
An overview of these techniques, their benefits and how to use them
is at @w{@uref{http://gcc.gnu.org/wiki/Visibility}}.
@item -fstrict-volatile-bitfields
This option should be used if accesses to volatile bitfields (or other
structure fields, although the compiler usually honors those types
anyway) should use a single access in a mode of the same size as the
container's type, aligned to a natural alignment if possible. For
example, targets with memory-mapped peripheral registers might require
all such accesses to be 16 bits wide; with this flag the user could
declare all peripheral bitfields as ``unsigned short'' (assuming short
is 16 bits on these targets) to force GCC to use 16 bit accesses
instead of, perhaps, a more efficient 32 bit access.
If this option is disabled, the compiler will use the most efficient
instruction. In the previous example, that might be a 32-bit load
instruction, even though that will access bytes that do not contain
any portion of the bitfield, or memory-mapped registers unrelated to
the one being updated.
If the target requires strict alignment, and honoring the container
type would require violating this alignment, a warning is issued.
However, the access happens as the user requested, under the
assumption that the user knows something about the target hardware
that GCC is unaware of.
The default value of this option is determined by the application binary
interface for the target processor.
@end table
@c man end
......
......@@ -903,8 +903,14 @@ store_fixed_bit_field (rtx op0, unsigned HOST_WIDE_INT offset,
if (GET_MODE_BITSIZE (mode) == 0
|| GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (word_mode))
mode = word_mode;
mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
MEM_ALIGN (op0), mode, MEM_VOLATILE_P (op0));
if (MEM_VOLATILE_P (op0)
&& GET_MODE_BITSIZE (GET_MODE (op0)) > 0
&& flag_strict_volatile_bitfields > 0)
mode = GET_MODE (op0);
else
mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
MEM_ALIGN (op0), mode, MEM_VOLATILE_P (op0));
if (mode == VOIDmode)
{
......@@ -1377,6 +1383,14 @@ extract_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
? mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0)
: mode);
/* If the bitfield is volatile, we need to make sure the access
remains on a type-aligned boundary. */
if (GET_CODE (op0) == MEM
&& MEM_VOLATILE_P (op0)
&& GET_MODE_BITSIZE (GET_MODE (op0)) > 0
&& flag_strict_volatile_bitfields > 0)
goto no_subreg_mode_swap;
if (((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode)
&& bitpos % BITS_PER_WORD == 0)
|| (mode1 != BLKmode
......@@ -1729,8 +1743,19 @@ extract_fixed_bit_field (enum machine_mode tmode, rtx op0,
includes the entire field. If such a mode would be larger than
a word, we won't be doing the extraction the normal way. */
mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
MEM_ALIGN (op0), word_mode, MEM_VOLATILE_P (op0));
if (MEM_VOLATILE_P (op0)
&& flag_strict_volatile_bitfields > 0)
{
if (GET_MODE_BITSIZE (GET_MODE (op0)) > 0)
mode = GET_MODE (op0);
else if (target && GET_MODE_BITSIZE (GET_MODE (target)) > 0)
mode = GET_MODE (target);
else
mode = tmode;
}
else
mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
MEM_ALIGN (op0), word_mode, MEM_VOLATILE_P (op0));
if (mode == VOIDmode)
/* The only way this should occur is if the field spans word
......@@ -1751,12 +1776,51 @@ extract_fixed_bit_field (enum machine_mode tmode, rtx op0,
* BITS_PER_UNIT);
}
/* Get ref to an aligned byte, halfword, or word containing the field.
Adjust BITPOS to be position within a word,
and OFFSET to be the offset of that word.
Then alter OP0 to refer to that word. */
bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
offset -= (offset % (total_bits / BITS_PER_UNIT));
/* If we're accessing a volatile MEM, we can't do the next
alignment step if it results in a multi-word access where we
otherwise wouldn't have one. So, check for that case
here. */
if (MEM_P (op0)
&& MEM_VOLATILE_P (op0)
&& flag_strict_volatile_bitfields > 0
&& bitpos + bitsize <= total_bits
&& bitpos + bitsize + (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT > total_bits)
{
if (STRICT_ALIGNMENT)
{
static bool informed_about_misalignment = false;
bool warned;
if (bitsize == total_bits)
warned = warning_at (input_location, OPT_fstrict_volatile_bitfields,
"mis-aligned access used for structure member");
else
warned = warning_at (input_location, OPT_fstrict_volatile_bitfields,
"mis-aligned access used for structure bitfield");
if (! informed_about_misalignment && warned)
{
informed_about_misalignment = true;
inform (input_location,
"When a volatile object spans multiple type-sized locations,"
" the compiler must choose between using a single mis-aligned access to"
" preserve the volatility, or using multiple aligned accesses to avoid"
" runtime faults. This code may fail at runtime if the hardware does"
" not allow this access.");
}
}
}
else
{
/* Get ref to an aligned byte, halfword, or word containing the field.
Adjust BITPOS to be position within a word,
and OFFSET to be the offset of that word.
Then alter OP0 to refer to that word. */
bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
offset -= (offset % (total_bits / BITS_PER_UNIT));
}
op0 = adjust_address (op0, mode, offset);
}
......
......@@ -4229,6 +4229,13 @@ expand_assignment (tree to, tree from, bool nontemporal)
to_rtx = expand_normal (tem);
/* If the bitfield is volatile, we want to access it in the
field's mode, not the computed mode. */
if (volatilep
&& GET_CODE (to_rtx) == MEM
&& flag_strict_volatile_bitfields > 0)
to_rtx = adjust_address (to_rtx, mode1, 0);
if (offset != 0)
{
enum machine_mode address_mode;
......@@ -5990,6 +5997,12 @@ get_inner_reference (tree exp, HOST_WIDE_INT *pbitsize,
mode = DECL_MODE (field);
else if (DECL_MODE (field) == BLKmode)
blkmode_bitfield = true;
else if (TREE_THIS_VOLATILE (exp)
&& flag_strict_volatile_bitfields > 0)
/* Volatile bitfields should be accessed in the mode of the
field's type, not the mode computed based on the bit
size. */
mode = TYPE_MODE (DECL_BIT_FIELD_TYPE (field));
*punsignedp = DECL_UNSIGNED (field);
}
......@@ -8966,6 +8979,14 @@ expand_expr_real_1 (tree exp, rtx target, enum machine_mode tmode,
|| modifier == EXPAND_STACK_PARM)
? modifier : EXPAND_NORMAL);
/* If the bitfield is volatile, we want to access it in the
field's mode, not the computed mode. */
if (volatilep
&& GET_CODE (op0) == MEM
&& flag_strict_volatile_bitfields > 0)
op0 = adjust_address (op0, mode1, 0);
mode2
= CONSTANT_P (op0) ? TYPE_MODE (TREE_TYPE (tem)) : GET_MODE (op0);
......@@ -9091,6 +9112,9 @@ expand_expr_real_1 (tree exp, rtx target, enum machine_mode tmode,
&& GET_MODE_CLASS (mode) != MODE_COMPLEX_FLOAT
&& modifier != EXPAND_CONST_ADDRESS
&& modifier != EXPAND_INITIALIZER)
/* If the field is volatile, we always want an aligned
access. */
|| (volatilep && flag_strict_volatile_bitfields > 0)
/* If the field isn't aligned enough to fetch as a memref,
fetch it as a bit field. */
|| (mode1 != BLKmode
......
......@@ -3463,11 +3463,16 @@ optimize_bit_field_compare (location_t loc, enum tree_code code,
/* See if we can find a mode to refer to this field. We should be able to,
but fail if we can't. */
nmode = get_best_mode (lbitsize, lbitpos,
const_p ? TYPE_ALIGN (TREE_TYPE (linner))
: MIN (TYPE_ALIGN (TREE_TYPE (linner)),
TYPE_ALIGN (TREE_TYPE (rinner))),
word_mode, lvolatilep || rvolatilep);
if (lvolatilep
&& GET_MODE_BITSIZE (lmode) > 0
&& flag_strict_volatile_bitfields > 0)
nmode = lmode;
else
nmode = get_best_mode (lbitsize, lbitpos,
const_p ? TYPE_ALIGN (TREE_TYPE (linner))
: MIN (TYPE_ALIGN (TREE_TYPE (linner)),
TYPE_ALIGN (TREE_TYPE (rinner))),
word_mode, lvolatilep || rvolatilep);
if (nmode == VOIDmode)
return 0;
......
2010-06-16 DJ Delorie <dj@redhat.com>
* gcc.target/i386/volatile-bitfields-1.c: New.
* gcc.target/i386/volatile-bitfields-2.c: New.
2010-06-16 Jason Merrill <jason@redhat.com>
* g++.dg/cpp0x/noexcept03.C: Test -Wnoexcept.
......
/* { dg-do compile } */
/* { dg-options "-O2 -fstrict-volatile-bitfields" } */
typedef struct {
char a:1;
char b:7;
int c;
} BitStruct;
volatile BitStruct bits;
int foo ()
{
return bits.b;
}
/* { dg-final { scan-assembler "movzbl.*bits" } } */
/* { dg-do compile } */
/* { dg-options "-O2 -fno-strict-volatile-bitfields" } */
typedef struct {
char a:1;
char b:7;
int c;
} BitStruct;
volatile BitStruct bits;
int foo ()
{
return bits.b;
}
/* { dg-final { scan-assembler "movl.*bits" } } */
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment