Commit c6fc9e43 by Yufeng Zhang Committed by Yufeng Zhang

aarch64.c (aarch64_simd_mangle_map_entry): New typedef.

gcc/

2012-12-05  Yufeng Zhang  <yufeng.zhang@arm.com>

	* config/aarch64/aarch64.c (aarch64_simd_mangle_map_entry): New
	typedef.
	(aarch64_simd_mangle_map): New table.
	(aarch64_mangle_type): Locate and return the mangled name for
	a given AdvSIMD vector type.

gcc/testsuite/

2012-12-05  Yufeng Zhang  <yufeng.zhang@arm.com>

	* g++.dg/abi/mangle-neon-aarch64.C: New test.

From-SVN: r194208
parent 18a2ad5d
2012-12-05 Yufeng Zhang <yufeng.zhang@arm.com> 2012-12-05 Yufeng Zhang <yufeng.zhang@arm.com>
* config/aarch64/aarch64.c (aarch64_simd_mangle_map_entry): New
typedef.
(aarch64_simd_mangle_map): New table.
(aarch64_mangle_type): Locate and return the mangled name for
a given AdvSIMD vector type.
2012-12-05 Yufeng Zhang <yufeng.zhang@arm.com>
* config/aarch64/aarch64.c (aarch64_mangle_type): New function. * config/aarch64/aarch64.c (aarch64_mangle_type): New function.
(TARGET_MANGLE_TYPE): Define. (TARGET_MANGLE_TYPE): Define.
...@@ -5862,6 +5862,50 @@ aarch64_preferred_simd_mode (enum machine_mode mode) ...@@ -5862,6 +5862,50 @@ aarch64_preferred_simd_mode (enum machine_mode mode)
return word_mode; return word_mode;
} }
/* A table to help perform AArch64-specific name mangling for AdvSIMD
vector types in order to conform to the AAPCS64 (see "Procedure
Call Standard for the ARM 64-bit Architecture", Appendix A). To
qualify for emission with the mangled names defined in that document,
a vector type must not only be of the correct mode but also be
composed of AdvSIMD vector element types (e.g.
_builtin_aarch64_simd_qi); these types are registered by
aarch64_init_simd_builtins (). In other words, vector types defined
in other ways e.g. via vector_size attribute will get default
mangled names. */
typedef struct
{
enum machine_mode mode;
const char *element_type_name;
const char *mangled_name;
} aarch64_simd_mangle_map_entry;
static aarch64_simd_mangle_map_entry aarch64_simd_mangle_map[] = {
/* 64-bit containerized types. */
{ V8QImode, "__builtin_aarch64_simd_qi", "10__Int8x8_t" },
{ V8QImode, "__builtin_aarch64_simd_uqi", "11__Uint8x8_t" },
{ V4HImode, "__builtin_aarch64_simd_hi", "11__Int16x4_t" },
{ V4HImode, "__builtin_aarch64_simd_uhi", "12__Uint16x4_t" },
{ V2SImode, "__builtin_aarch64_simd_si", "11__Int32x2_t" },
{ V2SImode, "__builtin_aarch64_simd_usi", "12__Uint32x2_t" },
{ V2SFmode, "__builtin_aarch64_simd_sf", "13__Float32x2_t" },
{ V8QImode, "__builtin_aarch64_simd_poly8", "11__Poly8x8_t" },
{ V4HImode, "__builtin_aarch64_simd_poly16", "12__Poly16x4_t" },
/* 128-bit containerized types. */
{ V16QImode, "__builtin_aarch64_simd_qi", "11__Int8x16_t" },
{ V16QImode, "__builtin_aarch64_simd_uqi", "12__Uint8x16_t" },
{ V8HImode, "__builtin_aarch64_simd_hi", "11__Int16x8_t" },
{ V8HImode, "__builtin_aarch64_simd_uhi", "12__Uint16x8_t" },
{ V4SImode, "__builtin_aarch64_simd_si", "11__Int32x4_t" },
{ V4SImode, "__builtin_aarch64_simd_usi", "12__Uint32x4_t" },
{ V2DImode, "__builtin_aarch64_simd_di", "11__Int64x2_t" },
{ V2DImode, "__builtin_aarch64_simd_udi", "12__Uint64x2_t" },
{ V4SFmode, "__builtin_aarch64_simd_sf", "13__Float32x4_t" },
{ V2DFmode, "__builtin_aarch64_simd_df", "13__Float64x2_t" },
{ V16QImode, "__builtin_aarch64_simd_poly8", "12__Poly8x16_t" },
{ V8HImode, "__builtin_aarch64_simd_poly16", "12__Poly16x8_t" },
{ VOIDmode, NULL, NULL }
};
/* Implement TARGET_MANGLE_TYPE. */ /* Implement TARGET_MANGLE_TYPE. */
const char * const char *
...@@ -5872,6 +5916,26 @@ aarch64_mangle_type (const_tree type) ...@@ -5872,6 +5916,26 @@ aarch64_mangle_type (const_tree type)
if (lang_hooks.types_compatible_p (CONST_CAST_TREE (type), va_list_type)) if (lang_hooks.types_compatible_p (CONST_CAST_TREE (type), va_list_type))
return "St9__va_list"; return "St9__va_list";
/* Check the mode of the vector type, and the name of the vector
element type, against the table. */
if (TREE_CODE (type) == VECTOR_TYPE)
{
aarch64_simd_mangle_map_entry *pos = aarch64_simd_mangle_map;
while (pos->mode != VOIDmode)
{
tree elt_type = TREE_TYPE (type);
if (pos->mode == TYPE_MODE (type)
&& TREE_CODE (TYPE_NAME (elt_type)) == TYPE_DECL
&& !strcmp (IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (elt_type))),
pos->element_type_name))
return pos->mangled_name;
pos++;
}
}
/* Use the default mangling. */ /* Use the default mangling. */
return NULL; return NULL;
} }
......
2012-12-05 Yufeng Zhang <yufeng.zhang@arm.com> 2012-12-05 Yufeng Zhang <yufeng.zhang@arm.com>
* g++.dg/abi/mangle-neon-aarch64.C: New test.
2012-12-05 Yufeng Zhang <yufeng.zhang@arm.com>
* g++.dg/abi/arm_va_list.C: Also test on aarch64*-*-*. * g++.dg/abi/arm_va_list.C: Also test on aarch64*-*-*.
2012-12-05 James Greenhalgh <james.greenhalgh@arm.com> 2012-12-05 James Greenhalgh <james.greenhalgh@arm.com>
......
// Test that AArch64 AdvSIMD (NEON) vector types have their names mangled
// correctly.
// { dg-do compile { target { aarch64*-*-* } } }
#include <arm_neon.h>
void f0 (int8x8_t a) {}
void f1 (int16x4_t a) {}
void f2 (int32x2_t a) {}
void f3 (uint8x8_t a) {}
void f4 (uint16x4_t a) {}
void f5 (uint32x2_t a) {}
void f6 (float32x2_t a) {}
void f7 (poly8x8_t a) {}
void f8 (poly16x4_t a) {}
void f9 (int8x16_t a) {}
void f10 (int16x8_t a) {}
void f11 (int32x4_t a) {}
void f12 (int64x2_t a) {}
void f13 (uint8x16_t a) {}
void f14 (uint16x8_t a) {}
void f15 (uint32x4_t a) {}
void f16 (uint64x2_t a) {}
void f17 (float32x4_t a) {}
void f18 (float64x2_t a) {}
void f19 (poly8x16_t a) {}
void f20 (poly16x8_t a) {}
void f21 (int8x16_t, int8x16_t) {}
// { dg-final { scan-assembler "_Z2f010__Int8x8_t:" } }
// { dg-final { scan-assembler "_Z2f111__Int16x4_t:" } }
// { dg-final { scan-assembler "_Z2f211__Int32x2_t:" } }
// { dg-final { scan-assembler "_Z2f311__Uint8x8_t:" } }
// { dg-final { scan-assembler "_Z2f412__Uint16x4_t:" } }
// { dg-final { scan-assembler "_Z2f512__Uint32x2_t:" } }
// { dg-final { scan-assembler "_Z2f613__Float32x2_t:" } }
// { dg-final { scan-assembler "_Z2f711__Poly8x8_t:" } }
// { dg-final { scan-assembler "_Z2f812__Poly16x4_t:" } }
// { dg-final { scan-assembler "_Z2f911__Int8x16_t:" } }
// { dg-final { scan-assembler "_Z3f1011__Int16x8_t:" } }
// { dg-final { scan-assembler "_Z3f1111__Int32x4_t:" } }
// { dg-final { scan-assembler "_Z3f1211__Int64x2_t:" } }
// { dg-final { scan-assembler "_Z3f1312__Uint8x16_t:" } }
// { dg-final { scan-assembler "_Z3f1412__Uint16x8_t:" } }
// { dg-final { scan-assembler "_Z3f1512__Uint32x4_t:" } }
// { dg-final { scan-assembler "_Z3f1612__Uint64x2_t:" } }
// { dg-final { scan-assembler "_Z3f1713__Float32x4_t:" } }
// { dg-final { scan-assembler "_Z3f1813__Float64x2_t:" } }
// { dg-final { scan-assembler "_Z3f1912__Poly8x16_t:" } }
// { dg-final { scan-assembler "_Z3f2012__Poly16x8_t:" } }
// { dg-final { scan-assembler "_Z3f2111__Int8x16_tS_:" } }
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment