Commit 383f9b34 by Tejas Belagod Committed by Tejas Belagod

arm_neon.h (vmovn_high_is16, [...]): Fix source operand number and update copyright.

2013-01-07  Tejas Belagod  <tejas.belagod@arm.com>

        * config/aarch64/arm_neon.h (vmovn_high_is16, vmovn_high_s32,
        vmovn_high_s64, vmovn_high_u16, vmovn_high_u32, vmovn_high_u64,
        vqmovn_high_s16, vqmovn_high_s32, vqmovn_high_s64, vqmovn_high_u16,
        vqmovn_high_u32, vqmovn_high_u64, vqmovun_high_s16, vqmovun_high_s32,
        vqmovun_high_s64): Fix source operand number and update copyright.

From-SVN: r194977
parent 3626621a
2013-01-07 Tejas Belagod <tejas.belagod@arm.com>
* config/aarch64/arm_neon.h (vmovn_high_is16, vmovn_high_s32,
vmovn_high_s64, vmovn_high_u16, vmovn_high_u32, vmovn_high_u64,
vqmovn_high_s16, vqmovn_high_s32, vqmovn_high_s64, vqmovn_high_u16,
vqmovn_high_u32, vqmovn_high_u64, vqmovun_high_s16, vqmovun_high_s32,
vqmovun_high_s64): Fix source operand number and update copyright.
2013-01-07 Richard Biener <rguenther@suse.de> 2013-01-07 Richard Biener <rguenther@suse.de>
PR middle-end/55890 PR middle-end/55890
......
/* ARM NEON intrinsics include file. /* ARM NEON intrinsics include file.
Copyright (C) 2011, 2012 Free Software Foundation, Inc. Copyright (C) 2011-2013 Free Software Foundation, Inc.
Contributed by ARM Ltd. Contributed by ARM Ltd.
This file is part of GCC. This file is part of GCC.
...@@ -11647,7 +11647,7 @@ __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) ...@@ -11647,7 +11647,7 @@ __extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vmovn_high_s16 (int8x8_t a, int16x8_t b) vmovn_high_s16 (int8x8_t a, int16x8_t b)
{ {
int8x16_t result = vcombine_s8 (a, vcreate_s8 (UINT64_C (0x0))); int8x16_t result = vcombine_s8 (a, vcreate_s8 (UINT64_C (0x0)));
__asm__ ("xtn2 %0.16b,%2.8h" __asm__ ("xtn2 %0.16b,%1.8h"
: "+w"(result) : "+w"(result)
: "w"(b) : "w"(b)
: /* No clobbers */); : /* No clobbers */);
...@@ -11658,7 +11658,7 @@ __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) ...@@ -11658,7 +11658,7 @@ __extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vmovn_high_s32 (int16x4_t a, int32x4_t b) vmovn_high_s32 (int16x4_t a, int32x4_t b)
{ {
int16x8_t result = vcombine_s16 (a, vcreate_s16 (UINT64_C (0x0))); int16x8_t result = vcombine_s16 (a, vcreate_s16 (UINT64_C (0x0)));
__asm__ ("xtn2 %0.8h,%2.4s" __asm__ ("xtn2 %0.8h,%1.4s"
: "+w"(result) : "+w"(result)
: "w"(b) : "w"(b)
: /* No clobbers */); : /* No clobbers */);
...@@ -11669,7 +11669,7 @@ __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) ...@@ -11669,7 +11669,7 @@ __extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vmovn_high_s64 (int32x2_t a, int64x2_t b) vmovn_high_s64 (int32x2_t a, int64x2_t b)
{ {
int32x4_t result = vcombine_s32 (a, vcreate_s32 (UINT64_C (0x0))); int32x4_t result = vcombine_s32 (a, vcreate_s32 (UINT64_C (0x0)));
__asm__ ("xtn2 %0.4s,%2.2d" __asm__ ("xtn2 %0.4s,%1.2d"
: "+w"(result) : "+w"(result)
: "w"(b) : "w"(b)
: /* No clobbers */); : /* No clobbers */);
...@@ -11680,7 +11680,7 @@ __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) ...@@ -11680,7 +11680,7 @@ __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
vmovn_high_u16 (uint8x8_t a, uint16x8_t b) vmovn_high_u16 (uint8x8_t a, uint16x8_t b)
{ {
uint8x16_t result = vcombine_u8 (a, vcreate_u8 (UINT64_C (0x0))); uint8x16_t result = vcombine_u8 (a, vcreate_u8 (UINT64_C (0x0)));
__asm__ ("xtn2 %0.16b,%2.8h" __asm__ ("xtn2 %0.16b,%1.8h"
: "+w"(result) : "+w"(result)
: "w"(b) : "w"(b)
: /* No clobbers */); : /* No clobbers */);
...@@ -11691,7 +11691,7 @@ __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) ...@@ -11691,7 +11691,7 @@ __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vmovn_high_u32 (uint16x4_t a, uint32x4_t b) vmovn_high_u32 (uint16x4_t a, uint32x4_t b)
{ {
uint16x8_t result = vcombine_u16 (a, vcreate_u16 (UINT64_C (0x0))); uint16x8_t result = vcombine_u16 (a, vcreate_u16 (UINT64_C (0x0)));
__asm__ ("xtn2 %0.8h,%2.4s" __asm__ ("xtn2 %0.8h,%1.4s"
: "+w"(result) : "+w"(result)
: "w"(b) : "w"(b)
: /* No clobbers */); : /* No clobbers */);
...@@ -11702,7 +11702,7 @@ __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) ...@@ -11702,7 +11702,7 @@ __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vmovn_high_u64 (uint32x2_t a, uint64x2_t b) vmovn_high_u64 (uint32x2_t a, uint64x2_t b)
{ {
uint32x4_t result = vcombine_u32 (a, vcreate_u32 (UINT64_C (0x0))); uint32x4_t result = vcombine_u32 (a, vcreate_u32 (UINT64_C (0x0)));
__asm__ ("xtn2 %0.4s,%2.2d" __asm__ ("xtn2 %0.4s,%1.2d"
: "+w"(result) : "+w"(result)
: "w"(b) : "w"(b)
: /* No clobbers */); : /* No clobbers */);
...@@ -14121,7 +14121,7 @@ __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) ...@@ -14121,7 +14121,7 @@ __extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vqmovn_high_s16 (int8x8_t a, int16x8_t b) vqmovn_high_s16 (int8x8_t a, int16x8_t b)
{ {
int8x16_t result = vcombine_s8 (a, vcreate_s8 (UINT64_C (0x0))); int8x16_t result = vcombine_s8 (a, vcreate_s8 (UINT64_C (0x0)));
__asm__ ("sqxtn2 %0.16b, %2.8h" __asm__ ("sqxtn2 %0.16b, %1.8h"
: "+w"(result) : "+w"(result)
: "w"(b) : "w"(b)
: /* No clobbers */); : /* No clobbers */);
...@@ -14132,7 +14132,7 @@ __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) ...@@ -14132,7 +14132,7 @@ __extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vqmovn_high_s32 (int16x4_t a, int32x4_t b) vqmovn_high_s32 (int16x4_t a, int32x4_t b)
{ {
int16x8_t result = vcombine_s16 (a, vcreate_s16 (UINT64_C (0x0))); int16x8_t result = vcombine_s16 (a, vcreate_s16 (UINT64_C (0x0)));
__asm__ ("sqxtn2 %0.8h, %2.4s" __asm__ ("sqxtn2 %0.8h, %1.4s"
: "+w"(result) : "+w"(result)
: "w"(b) : "w"(b)
: /* No clobbers */); : /* No clobbers */);
...@@ -14143,7 +14143,7 @@ __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) ...@@ -14143,7 +14143,7 @@ __extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vqmovn_high_s64 (int32x2_t a, int64x2_t b) vqmovn_high_s64 (int32x2_t a, int64x2_t b)
{ {
int32x4_t result = vcombine_s32 (a, vcreate_s32 (UINT64_C (0x0))); int32x4_t result = vcombine_s32 (a, vcreate_s32 (UINT64_C (0x0)));
__asm__ ("sqxtn2 %0.4s, %2.2d" __asm__ ("sqxtn2 %0.4s, %1.2d"
: "+w"(result) : "+w"(result)
: "w"(b) : "w"(b)
: /* No clobbers */); : /* No clobbers */);
...@@ -14154,7 +14154,7 @@ __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) ...@@ -14154,7 +14154,7 @@ __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
vqmovn_high_u16 (uint8x8_t a, uint16x8_t b) vqmovn_high_u16 (uint8x8_t a, uint16x8_t b)
{ {
uint8x16_t result = vcombine_u8 (a, vcreate_u8 (UINT64_C (0x0))); uint8x16_t result = vcombine_u8 (a, vcreate_u8 (UINT64_C (0x0)));
__asm__ ("uqxtn2 %0.16b, %2.8h" __asm__ ("uqxtn2 %0.16b, %1.8h"
: "+w"(result) : "+w"(result)
: "w"(b) : "w"(b)
: /* No clobbers */); : /* No clobbers */);
...@@ -14165,7 +14165,7 @@ __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) ...@@ -14165,7 +14165,7 @@ __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vqmovn_high_u32 (uint16x4_t a, uint32x4_t b) vqmovn_high_u32 (uint16x4_t a, uint32x4_t b)
{ {
uint16x8_t result = vcombine_u16 (a, vcreate_u16 (UINT64_C (0x0))); uint16x8_t result = vcombine_u16 (a, vcreate_u16 (UINT64_C (0x0)));
__asm__ ("uqxtn2 %0.8h, %2.4s" __asm__ ("uqxtn2 %0.8h, %1.4s"
: "+w"(result) : "+w"(result)
: "w"(b) : "w"(b)
: /* No clobbers */); : /* No clobbers */);
...@@ -14176,7 +14176,7 @@ __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) ...@@ -14176,7 +14176,7 @@ __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vqmovn_high_u64 (uint32x2_t a, uint64x2_t b) vqmovn_high_u64 (uint32x2_t a, uint64x2_t b)
{ {
uint32x4_t result = vcombine_u32 (a, vcreate_u32 (UINT64_C (0x0))); uint32x4_t result = vcombine_u32 (a, vcreate_u32 (UINT64_C (0x0)));
__asm__ ("uqxtn2 %0.4s, %2.2d" __asm__ ("uqxtn2 %0.4s, %1.2d"
: "+w"(result) : "+w"(result)
: "w"(b) : "w"(b)
: /* No clobbers */); : /* No clobbers */);
...@@ -14187,7 +14187,7 @@ __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) ...@@ -14187,7 +14187,7 @@ __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
vqmovun_high_s16 (uint8x8_t a, int16x8_t b) vqmovun_high_s16 (uint8x8_t a, int16x8_t b)
{ {
uint8x16_t result = vcombine_u8 (a, vcreate_u8 (UINT64_C (0x0))); uint8x16_t result = vcombine_u8 (a, vcreate_u8 (UINT64_C (0x0)));
__asm__ ("sqxtun2 %0.16b, %2.8h" __asm__ ("sqxtun2 %0.16b, %1.8h"
: "+w"(result) : "+w"(result)
: "w"(b) : "w"(b)
: /* No clobbers */); : /* No clobbers */);
...@@ -14198,7 +14198,7 @@ __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) ...@@ -14198,7 +14198,7 @@ __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vqmovun_high_s32 (uint16x4_t a, int32x4_t b) vqmovun_high_s32 (uint16x4_t a, int32x4_t b)
{ {
uint16x8_t result = vcombine_u16 (a, vcreate_u16 (UINT64_C (0x0))); uint16x8_t result = vcombine_u16 (a, vcreate_u16 (UINT64_C (0x0)));
__asm__ ("sqxtun2 %0.8h, %2.4s" __asm__ ("sqxtun2 %0.8h, %1.4s"
: "+w"(result) : "+w"(result)
: "w"(b) : "w"(b)
: /* No clobbers */); : /* No clobbers */);
...@@ -14209,7 +14209,7 @@ __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) ...@@ -14209,7 +14209,7 @@ __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vqmovun_high_s64 (uint32x2_t a, int64x2_t b) vqmovun_high_s64 (uint32x2_t a, int64x2_t b)
{ {
uint32x4_t result = vcombine_u32 (a, vcreate_u32 (UINT64_C (0x0))); uint32x4_t result = vcombine_u32 (a, vcreate_u32 (UINT64_C (0x0)));
__asm__ ("sqxtun2 %0.4s, %2.2d" __asm__ ("sqxtun2 %0.4s, %1.2d"
: "+w"(result) : "+w"(result)
: "w"(b) : "w"(b)
: /* No clobbers */); : /* No clobbers */);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment