Commit 67ae67ec by Eric Botcazou Committed by Eric Botcazou

fold-const.c (const_binop): Adjust comment.

	* fold-const.c (const_binop): Adjust comment.
	(extract_muldiv_1): Likewise.
	(fold_comparison): Likewise.
	* stor-layout.c (place_field): Fix typo.
	* tree.c (double_int_fits_to_tree_p): Remove obsolete comment.
	(force_fit_type_double): Likewise.  Fix long line.

From-SVN: r193812
parent 29be7510
2012-11-26 Eric Botcazou <ebotcazou@adacore.com>
* fold-const.c (const_binop): Adjust comment.
(extract_muldiv_1): Likewise.
(fold_comparison): Likewise.
* stor-layout.c (place_field): Fix typo.
* tree.c (double_int_fits_to_tree_p): Remove obsolete comment.
(force_fit_type_double): Likewise. Fix long line.
2012-11-26 Greta Yorsh <Greta.Yorsh@arm.com>
* config/arm/arm.h (TARGET_LDRD): Reject Thumb1 targets.
......@@ -1359,7 +1359,7 @@ const_binop (enum tree_code code, tree arg1, tree arg2)
return NULL_TREE;
}
/* Create a size type INT_CST node with NUMBER sign extended. KIND
/* Create a sizetype INT_CST node with NUMBER sign extended. KIND
indicates which particular sizetype to create. */
tree
......@@ -5809,11 +5809,9 @@ extract_muldiv_1 (tree t, tree c, enum tree_code code, tree wide_type,
else
break;
/* If we have an unsigned type is not a sizetype, we cannot widen
the operation since it will change the result if the original
computation overflowed. */
if (TYPE_UNSIGNED (ctype)
&& ctype != type)
/* If we have an unsigned type, we cannot widen the operation since it
will change the result if the original computation overflowed. */
if (TYPE_UNSIGNED (ctype) && ctype != type)
break;
/* If we were able to eliminate our operation from the first side,
......@@ -9006,7 +9004,7 @@ fold_comparison (location_t loc, enum tree_code code, tree type,
}
/* We can simplify the comparison to a comparison of the variable
offset parts if the constant offset parts are equal.
Be careful to use signed size type here because otherwise we
Be careful to use signed sizetype here because otherwise we
mess with array offsets in the wrong way. This is possible
because pointer arithmetic is restricted to retain within an
object and overflow on pointer differences is undefined as of
......@@ -9016,11 +9014,11 @@ fold_comparison (location_t loc, enum tree_code code, tree type,
|| (indirect_base0 && DECL_P (base0))
|| POINTER_TYPE_OVERFLOW_UNDEFINED))
{
/* By converting to signed size type we cover middle-end pointer
/* By converting to signed sizetype we cover middle-end pointer
arithmetic which operates on unsigned pointer types of size
type size and ARRAY_REF offsets which are properly sign or
zero extended from their type in case it is narrower than
size type. */
sizetype. */
if (offset0 == NULL_TREE)
offset0 = build_int_cst (ssizetype, 0);
else
......
......@@ -1374,7 +1374,7 @@ place_field (record_layout_info rli, tree field)
normalize_rli (rli);
}
/* If we're starting a new run of same size type bitfields
/* If we're starting a new run of same type size bitfields
(or a run of non-bitfields), set up the "first of the run"
fields.
......
......@@ -1074,7 +1074,6 @@ double_int_to_tree (tree type, double_int cst)
bool
double_int_fits_to_tree_p (const_tree type, double_int cst)
{
/* Size types *are* sign extended. */
bool sign_extended_type = !TYPE_UNSIGNED (type);
double_int ext
......@@ -1102,10 +1101,7 @@ tree
force_fit_type_double (tree type, double_int cst, int overflowable,
bool overflowed)
{
bool sign_extended_type;
/* Size types *are* sign extended. */
sign_extended_type = !TYPE_UNSIGNED (type);
bool sign_extended_type = !TYPE_UNSIGNED (type);
/* If we need to set overflow flags, return a new unshared node. */
if (overflowed || !double_int_fits_to_tree_p(type, cst))
......@@ -1115,8 +1111,8 @@ force_fit_type_double (tree type, double_int cst, int overflowable,
|| (overflowable > 0 && sign_extended_type))
{
tree t = make_node (INTEGER_CST);
TREE_INT_CST (t) = cst.ext (TYPE_PRECISION (type),
!sign_extended_type);
TREE_INT_CST (t)
= cst.ext (TYPE_PRECISION (type), !sign_extended_type);
TREE_TYPE (t) = type;
TREE_OVERFLOW (t) = 1;
return t;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment