Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
R
riscv-gcc-1
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
lvzhengyang
riscv-gcc-1
Commits
48063b9d
Commit
48063b9d
authored
Jun 04, 2013
by
Ian Bolton
Committed by
Ian Bolton
Jun 04, 2013
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
AArch64 - Improve MOVI handling (3/5)
From-SVN: r199656
parent
3ea63f60
Show whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
90 additions
and
73 deletions
+90
-73
gcc/ChangeLog
+15
-0
gcc/config/aarch64/aarch64-protos.h
+2
-2
gcc/config/aarch64/aarch64.c
+71
-68
gcc/config/aarch64/constraints.md
+2
-3
No files found.
gcc/ChangeLog
View file @
48063b9d
2013-06-04 Ian Bolton <ian.bolton@arm.com>
2013-06-04 Ian Bolton <ian.bolton@arm.com>
* config/aarch64/aarch64.c (simd_immediate_info): Struct to hold
information completed by aarch64_simd_valid_immediate.
(aarch64_legitimate_constant_p): Update arguments.
(aarch64_simd_valid_immediate): Work with struct rather than many
pointers.
(aarch64_simd_scalar_immediate_valid_for_move): Update arguments.
(aarch64_simd_make_constant): Update arguments.
(aarch64_output_simd_mov_immediate): Work with struct rather than
many pointers. Output immediate directly rather than as operand.
* config/aarch64/aarch64-protos.h (aarch64_simd_valid_immediate):
Update prototype.
* config/aarch64/constraints.md (Dn): Update arguments.
2013-06-04 Ian Bolton <ian.bolton@arm.com>
* config/aarch64/aarch64.c (aarch64_simd_valid_immediate): No
* config/aarch64/aarch64.c (aarch64_simd_valid_immediate): No
longer static.
longer static.
(aarch64_simd_immediate_valid_for_move): Remove.
(aarch64_simd_immediate_valid_for_move): Remove.
...
...
gcc/config/aarch64/aarch64-protos.h
View file @
48063b9d
...
@@ -157,8 +157,8 @@ bool aarch64_simd_imm_scalar_p (rtx x, enum machine_mode mode);
...
@@ -157,8 +157,8 @@ bool aarch64_simd_imm_scalar_p (rtx x, enum machine_mode mode);
bool
aarch64_simd_imm_zero_p
(
rtx
,
enum
machine_mode
);
bool
aarch64_simd_imm_zero_p
(
rtx
,
enum
machine_mode
);
bool
aarch64_simd_scalar_immediate_valid_for_move
(
rtx
,
enum
machine_mode
);
bool
aarch64_simd_scalar_immediate_valid_for_move
(
rtx
,
enum
machine_mode
);
bool
aarch64_simd_shift_imm_p
(
rtx
,
enum
machine_mode
,
bool
);
bool
aarch64_simd_shift_imm_p
(
rtx
,
enum
machine_mode
,
bool
);
bool
aarch64_simd_valid_immediate
(
rtx
,
enum
machine_mode
,
int
,
rtx
*
,
bool
aarch64_simd_valid_immediate
(
rtx
,
enum
machine_mode
,
bool
,
int
*
,
unsigned
char
*
,
int
*
,
int
*
);
struct
simd_immediate_info
*
);
bool
aarch64_symbolic_address_p
(
rtx
);
bool
aarch64_symbolic_address_p
(
rtx
);
bool
aarch64_symbolic_constant_p
(
rtx
,
enum
aarch64_symbol_context
,
bool
aarch64_symbolic_constant_p
(
rtx
,
enum
aarch64_symbol_context
,
enum
aarch64_symbol_type
*
);
enum
aarch64_symbol_type
*
);
...
...
gcc/config/aarch64/aarch64.c
View file @
48063b9d
...
@@ -87,6 +87,15 @@ struct aarch64_address_info {
...
@@ -87,6 +87,15 @@ struct aarch64_address_info {
enum
aarch64_symbol_type
symbol_type
;
enum
aarch64_symbol_type
symbol_type
;
};
};
struct
simd_immediate_info
{
rtx
value
;
int
shift
;
int
element_width
;
unsigned
char
element_char
;
bool
mvn
;
};
/* The current code model. */
/* The current code model. */
enum
aarch64_code_model
aarch64_cmodel
;
enum
aarch64_code_model
aarch64_cmodel
;
...
@@ -5150,8 +5159,7 @@ aarch64_legitimate_constant_p (enum machine_mode mode, rtx x)
...
@@ -5150,8 +5159,7 @@ aarch64_legitimate_constant_p (enum machine_mode mode, rtx x)
/* This could probably go away because
/* This could probably go away because
we now decompose CONST_INTs according to expand_mov_immediate. */
we now decompose CONST_INTs according to expand_mov_immediate. */
if
((
GET_CODE
(
x
)
==
CONST_VECTOR
if
((
GET_CODE
(
x
)
==
CONST_VECTOR
&&
aarch64_simd_valid_immediate
(
x
,
mode
,
false
,
&&
aarch64_simd_valid_immediate
(
x
,
mode
,
false
,
NULL
))
NULL
,
NULL
,
NULL
,
NULL
,
NULL
))
||
CONST_INT_P
(
x
)
||
aarch64_valid_floating_const
(
mode
,
x
))
||
CONST_INT_P
(
x
)
||
aarch64_valid_floating_const
(
mode
,
x
))
return
!
targetm
.
cannot_force_const_mem
(
mode
,
x
);
return
!
targetm
.
cannot_force_const_mem
(
mode
,
x
);
...
@@ -6144,10 +6152,8 @@ aarch64_vect_float_const_representable_p (rtx x)
...
@@ -6144,10 +6152,8 @@ aarch64_vect_float_const_representable_p (rtx x)
/* Return true for valid and false for invalid. */
/* Return true for valid and false for invalid. */
bool
bool
aarch64_simd_valid_immediate
(
rtx
op
,
enum
machine_mode
mode
,
int
inverse
,
aarch64_simd_valid_immediate
(
rtx
op
,
enum
machine_mode
mode
,
bool
inverse
,
rtx
*
modconst
,
int
*
elementwidth
,
struct
simd_immediate_info
*
info
)
unsigned
char
*
elementchar
,
int
*
mvn
,
int
*
shift
)
{
{
#define CHECK(STRIDE, ELSIZE, CLASS, TEST, SHIFT, NEG) \
#define CHECK(STRIDE, ELSIZE, CLASS, TEST, SHIFT, NEG) \
matches = 1; \
matches = 1; \
...
@@ -6181,17 +6187,14 @@ aarch64_simd_valid_immediate (rtx op, enum machine_mode mode, int inverse,
...
@@ -6181,17 +6187,14 @@ aarch64_simd_valid_immediate (rtx op, enum machine_mode mode, int inverse,
||
aarch64_vect_float_const_representable_p
(
op
)))
||
aarch64_vect_float_const_representable_p
(
op
)))
return
false
;
return
false
;
if
(
modconst
)
if
(
info
)
*
modconst
=
CONST_VECTOR_ELT
(
op
,
0
);
{
info
->
value
=
CONST_VECTOR_ELT
(
op
,
0
);
if
(
elementwidth
)
info
->
element_width
=
elem_width
;
*
elementwidth
=
elem_width
;
info
->
element_char
=
sizetochar
(
elem_width
);
info
->
mvn
=
false
;
if
(
elementchar
)
info
->
shift
=
0
;
*
elementchar
=
sizetochar
(
elem_width
);
}
if
(
shift
)
*
shift
=
0
;
return
true
;
return
true
;
}
}
...
@@ -6293,21 +6296,13 @@ aarch64_simd_valid_immediate (rtx op, enum machine_mode mode, int inverse,
...
@@ -6293,21 +6296,13 @@ aarch64_simd_valid_immediate (rtx op, enum machine_mode mode, int inverse,
||
immtype
==
18
)
||
immtype
==
18
)
return
false
;
return
false
;
if
(
info
)
if
(
elementwidth
)
*
elementwidth
=
elsize
;
if
(
elementchar
)
*
elementchar
=
elchar
;
if
(
mvn
)
*
mvn
=
emvn
;
if
(
shift
)
*
shift
=
eshift
;
if
(
modconst
)
{
{
info
->
element_width
=
elsize
;
info
->
element_char
=
elchar
;
info
->
mvn
=
emvn
!=
0
;
info
->
shift
=
eshift
;
unsigned
HOST_WIDE_INT
imm
=
0
;
unsigned
HOST_WIDE_INT
imm
=
0
;
/* Un-invert bytes of recognized vector, if necessary. */
/* Un-invert bytes of recognized vector, if necessary. */
...
@@ -6324,26 +6319,24 @@ aarch64_simd_valid_immediate (rtx op, enum machine_mode mode, int inverse,
...
@@ -6324,26 +6319,24 @@ aarch64_simd_valid_immediate (rtx op, enum machine_mode mode, int inverse,
imm
|=
(
unsigned
HOST_WIDE_INT
)
(
bytes
[
i
]
?
0xff
:
0
)
imm
|=
(
unsigned
HOST_WIDE_INT
)
(
bytes
[
i
]
?
0xff
:
0
)
<<
(
i
*
BITS_PER_UNIT
);
<<
(
i
*
BITS_PER_UNIT
);
*
modconst
=
GEN_INT
(
imm
);
info
->
value
=
GEN_INT
(
imm
);
}
}
else
else
{
{
unsigned
HOST_WIDE_INT
imm
=
0
;
for
(
i
=
0
;
i
<
elsize
/
BITS_PER_UNIT
;
i
++
)
for
(
i
=
0
;
i
<
elsize
/
BITS_PER_UNIT
;
i
++
)
imm
|=
(
unsigned
HOST_WIDE_INT
)
bytes
[
i
]
<<
(
i
*
BITS_PER_UNIT
);
imm
|=
(
unsigned
HOST_WIDE_INT
)
bytes
[
i
]
<<
(
i
*
BITS_PER_UNIT
);
/* Construct 'abcdefgh' because the assembler cannot handle
/* Construct 'abcdefgh' because the assembler cannot handle
generic constants. */
generic constants. */
gcc_assert
(
shift
!=
NULL
&&
mvn
!=
NULL
);
if
(
info
->
mvn
)
if
(
*
mvn
)
imm
=
~
imm
;
imm
=
~
imm
;
imm
=
(
imm
>>
*
shift
)
&
0xff
;
imm
=
(
imm
>>
info
->
shift
)
&
0xff
;
*
modconst
=
GEN_INT
(
imm
);
info
->
value
=
GEN_INT
(
imm
);
}
}
}
}
return
(
immtype
>=
0
)
;
return
true
;
#undef CHECK
#undef CHECK
}
}
...
@@ -6451,8 +6444,7 @@ aarch64_simd_scalar_immediate_valid_for_move (rtx op, enum machine_mode mode)
...
@@ -6451,8 +6444,7 @@ aarch64_simd_scalar_immediate_valid_for_move (rtx op, enum machine_mode mode)
gcc_assert
(
!
VECTOR_MODE_P
(
mode
));
gcc_assert
(
!
VECTOR_MODE_P
(
mode
));
vmode
=
aarch64_preferred_simd_mode
(
mode
);
vmode
=
aarch64_preferred_simd_mode
(
mode
);
rtx
op_v
=
aarch64_simd_gen_const_vector_dup
(
vmode
,
INTVAL
(
op
));
rtx
op_v
=
aarch64_simd_gen_const_vector_dup
(
vmode
,
INTVAL
(
op
));
return
aarch64_simd_valid_immediate
(
op_v
,
vmode
,
0
,
NULL
,
return
aarch64_simd_valid_immediate
(
op_v
,
vmode
,
false
,
NULL
);
NULL
,
NULL
,
NULL
,
NULL
);
}
}
/* Construct and return a PARALLEL RTX vector. */
/* Construct and return a PARALLEL RTX vector. */
...
@@ -6680,8 +6672,7 @@ aarch64_simd_make_constant (rtx vals)
...
@@ -6680,8 +6672,7 @@ aarch64_simd_make_constant (rtx vals)
gcc_unreachable
();
gcc_unreachable
();
if
(
const_vec
!=
NULL_RTX
if
(
const_vec
!=
NULL_RTX
&&
aarch64_simd_valid_immediate
(
const_vec
,
mode
,
0
,
NULL
,
&&
aarch64_simd_valid_immediate
(
const_vec
,
mode
,
false
,
NULL
))
NULL
,
NULL
,
NULL
,
NULL
))
/* Load using MOVI/MVNI. */
/* Load using MOVI/MVNI. */
return
const_vec
;
return
const_vec
;
else
if
((
const_dup
=
aarch64_simd_dup_constant
(
vals
))
!=
NULL_RTX
)
else
if
((
const_dup
=
aarch64_simd_dup_constant
(
vals
))
!=
NULL_RTX
)
...
@@ -7244,45 +7235,57 @@ aarch64_output_simd_mov_immediate (rtx *const_vector,
...
@@ -7244,45 +7235,57 @@ aarch64_output_simd_mov_immediate (rtx *const_vector,
unsigned
width
)
unsigned
width
)
{
{
bool
is_valid
;
bool
is_valid
;
unsigned
char
widthc
;
int
lane_width_bits
;
static
char
templ
[
40
];
static
char
templ
[
40
];
int
shift
=
0
,
mvn
=
0
;
const
char
*
mnemonic
;
const
char
*
mnemonic
;
unsigned
int
lane_count
=
0
;
unsigned
int
lane_count
=
0
;
/* This will return true to show const_vector is legal for use as either
struct
simd_immediate_info
info
;
a AdvSIMD MOVI instruction (or, implicitly, MVNI) immediate. It
writes back various values via the int pointers and it modifies the
/* This will return true to show const_vector is legal for use as either
operand pointed to by CONST_VECTOR in-place, if required. */
a AdvSIMD MOVI instruction (or, implicitly, MVNI) immediate. It will
is_valid
=
also update INFO to show how the immediate should be generated. */
aarch64_simd_valid_immediate
(
*
const_vector
,
mode
,
0
,
is_valid
=
aarch64_simd_valid_immediate
(
*
const_vector
,
mode
,
false
,
&
info
);
const_vector
,
&
lane_width_bits
,
&
widthc
,
&
mvn
,
&
shift
);
gcc_assert
(
is_valid
);
gcc_assert
(
is_valid
);
gcc_assert
(
info
.
element_width
!=
0
);
lane_count
=
width
/
info
.
element_width
;
mode
=
GET_MODE_INNER
(
mode
);
mode
=
GET_MODE_INNER
(
mode
);
if
(
mode
==
SFmode
||
mode
==
DFmode
)
if
(
mode
==
SFmode
||
mode
==
DFmode
)
{
{
bool
zero_p
=
gcc_assert
(
info
.
shift
==
0
&&
!
info
.
mvn
);
aarch64_float_const_zero_rtx_p
(
*
const_vector
);
if
(
aarch64_float_const_zero_rtx_p
(
info
.
value
))
gcc_assert
(
shift
==
0
);
info
.
value
=
GEN_INT
(
0
);
mnemonic
=
zero_p
?
"movi"
:
"fmov"
;
else
}
{
#define buf_size 20
REAL_VALUE_TYPE
r
;
REAL_VALUE_FROM_CONST_DOUBLE
(
r
,
info
.
value
);
char
float_buf
[
buf_size
]
=
{
'\0'
};
real_to_decimal_for_mode
(
float_buf
,
&
r
,
buf_size
,
buf_size
,
1
,
mode
);
#undef buf_size
if
(
lane_count
==
1
)
snprintf
(
templ
,
sizeof
(
templ
),
"fmov
\t
%%d0, %s"
,
float_buf
);
else
else
mnemonic
=
mvn
?
"mvni"
:
"movi"
;
snprintf
(
templ
,
sizeof
(
templ
),
"fmov
\t
%%0.%d%c, %s"
,
lane_count
,
info
.
element_char
,
float_buf
);
return
templ
;
}
}
gcc_assert
(
lane_width_bits
!=
0
);
mnemonic
=
info
.
mvn
?
"mvni"
:
"movi"
;
lane_count
=
width
/
lane_width_bits
;
if
(
lane_count
==
1
)
if
(
lane_count
==
1
)
snprintf
(
templ
,
sizeof
(
templ
),
"%s
\t
%%d0, %%1"
,
mnemonic
);
snprintf
(
templ
,
sizeof
(
templ
),
"%s
\t
%%d0, "
HOST_WIDE_INT_PRINT_HEX
,
else
if
(
shift
)
mnemonic
,
UINTVAL
(
info
.
value
));
snprintf
(
templ
,
sizeof
(
templ
),
"%s
\t
%%0.%d%c, %%1, lsl %d"
,
else
if
(
info
.
shift
)
mnemonic
,
lane_count
,
widthc
,
shift
);
snprintf
(
templ
,
sizeof
(
templ
),
"%s
\t
%%0.%d%c, "
HOST_WIDE_INT_PRINT_HEX
", lsl %d"
,
mnemonic
,
lane_count
,
info
.
element_char
,
UINTVAL
(
info
.
value
),
info
.
shift
);
else
else
snprintf
(
templ
,
sizeof
(
templ
),
"%s
\t
%%0.%d%c,
%%1"
,
snprintf
(
templ
,
sizeof
(
templ
),
"%s
\t
%%0.%d%c,
"
HOST_WIDE_INT_PRINT_HEX
,
mnemonic
,
lane_count
,
widthc
);
mnemonic
,
lane_count
,
info
.
element_char
,
UINTVAL
(
info
.
value
)
);
return
templ
;
return
templ
;
}
}
...
...
gcc/config/aarch64/constraints.md
View file @
48063b9d
...
@@ -143,9 +143,8 @@
...
@@ -143,9 +143,8 @@
"@internal
"@internal
A constraint that matches vector of immediates."
A constraint that matches vector of immediates."
(and (match_code "const_vector")
(and (match_code "const_vector")
(match_test "aarch64_simd_valid_immediate (op, GET_MODE (op), 0,
(match_test "aarch64_simd_valid_immediate (op, GET_MODE (op),
NULL, NULL, NULL,
false, NULL)")))
NULL, NULL)")))
(define_constraint "Dh"
(define_constraint "Dh"
"@internal
"@internal
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment