summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBill Wendling <isanbard@gmail.com>2013-11-25 05:39:42 +0000
committerBill Wendling <isanbard@gmail.com>2013-11-25 05:39:42 +0000
commitbc5de89da7dbd930c339757b1d01cb926be768fc (patch)
treea141bd6b0c1e9f7a8c00749bf9e8410a6843a0b8
parentf27429d06ab3cea0e97ad9878c4440dfb06d2fe7 (diff)
downloadclang-bc5de89da7dbd930c339757b1d01cb926be768fc.tar.gz
clang-bc5de89da7dbd930c339757b1d01cb926be768fc.tar.bz2
clang-bc5de89da7dbd930c339757b1d01cb926be768fc.tar.xz
Merging r195329:
------------------------------------------------------------------------ r195329 | apazos | 2013-11-21 00:15:01 -0800 (Thu, 21 Nov 2013) | 5 lines Implemented Neon scalar vdup_lane intrinsics. Fixed scalar dup alias and added test case. ------------------------------------------------------------------------ git-svn-id: https://llvm.org/svn/llvm-project/cfe/branches/release_34@195614 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--include/clang/Basic/arm_neon.td2
-rw-r--r--lib/CodeGen/CGBuiltin.cpp25
-rw-r--r--test/CodeGen/aarch64-neon-scalar-copy.c173
3 files changed, 200 insertions, 0 deletions
diff --git a/include/clang/Basic/arm_neon.td b/include/clang/Basic/arm_neon.td
index ef0e7daa53..b2da84803c 100644
--- a/include/clang/Basic/arm_neon.td
+++ b/include/clang/Basic/arm_neon.td
@@ -1271,4 +1271,6 @@ def SCALAR_SQDMULH_LANEQ : SOpInst<"vqdmulh_laneq", "ssji", "SsSi", OP_SCALAR_QD
def SCALAR_SQRDMULH_LANE : SOpInst<"vqrdmulh_lane", "ssdi", "SsSi", OP_SCALAR_QRDMULH_LN>;
def SCALAR_SQRDMULH_LANEQ : SOpInst<"vqrdmulh_laneq", "ssji", "SsSi", OP_SCALAR_QRDMULH_LNQ>;
+def SCALAR_VDUP_LANE : IInst<"vdup_lane", "sdi", "ScSsSiSlSfSdSUcSUsSUiSUlSPcSPs">;
+def SCALAR_VDUP_LANEQ : IInst<"vdup_laneq", "sji", "ScSsSiSlSfSdSUcSUsSUiSUlSPcSPs">;
}
diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp
index 2d2fa092cd..d496c7ae46 100644
--- a/lib/CodeGen/CGBuiltin.cpp
+++ b/lib/CodeGen/CGBuiltin.cpp
@@ -1772,6 +1772,31 @@ static Value *EmitAArch64ScalarBuiltinExpr(CodeGenFunction &CGF,
// argument that specifies the vector type, need to handle each case.
switch (BuiltinID) {
default: break;
+ case AArch64::BI__builtin_neon_vdups_lane_f32:
+ case AArch64::BI__builtin_neon_vdupd_lane_f64:
+ case AArch64::BI__builtin_neon_vdups_laneq_f32:
+ case AArch64::BI__builtin_neon_vdupd_laneq_f64: {
+ return CGF.Builder.CreateExtractElement(Ops[0], Ops[1], "vdup_lane");
+ }
+ case AArch64::BI__builtin_neon_vdupb_lane_i8:
+ case AArch64::BI__builtin_neon_vduph_lane_i16:
+ case AArch64::BI__builtin_neon_vdups_lane_i32:
+ case AArch64::BI__builtin_neon_vdupd_lane_i64:
+ case AArch64::BI__builtin_neon_vdupb_laneq_i8:
+ case AArch64::BI__builtin_neon_vduph_laneq_i16:
+ case AArch64::BI__builtin_neon_vdups_laneq_i32:
+ case AArch64::BI__builtin_neon_vdupd_laneq_i64: {
+ // The backend treats Neon scalar types as v1ix types
+ // So we want to dup lane from any vector to v1ix vector
+ // with shufflevector
+ s = "vdup_lane";
+ Value* SV = llvm::ConstantVector::getSplat(1, cast<ConstantInt>(Ops[1]));
+ Value *Result = CGF.Builder.CreateShuffleVector(Ops[0], Ops[0], SV, s);
+ llvm::Type *Ty = CGF.ConvertType(E->getCallReturnType());
+ // AArch64 intrinsic one-element vector type cast to
+ // scalar type expected by the builtin
+ return CGF.Builder.CreateBitCast(Result, Ty, s);
+ }
case AArch64::BI__builtin_neon_vqdmlalh_lane_s16 :
case AArch64::BI__builtin_neon_vqdmlalh_laneq_s16 :
case AArch64::BI__builtin_neon_vqdmlals_lane_s32 :
diff --git a/test/CodeGen/aarch64-neon-scalar-copy.c b/test/CodeGen/aarch64-neon-scalar-copy.c
new file mode 100644
index 0000000000..33e97c792f
--- /dev/null
+++ b/test/CodeGen/aarch64-neon-scalar-copy.c
@@ -0,0 +1,173 @@
+// REQUIRES: aarch64-registered-target
+// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +neon \
+// RUN: -ffp-contract=fast -S -O3 -o - %s | FileCheck %s
+
+
+#include <arm_neon.h>
+
+// CHECK: test_vdups_lane_f32
+float32_t test_vdups_lane_f32(float32x2_t a) {
+ return vdups_lane_f32(a, 1);
+// CHECK: ret
+// CHECK-NOT: dup {{s[0-9]+}}, {{v[0-9]+}}.s[1]
+}
+
+
+// CHECK: test_vdupd_lane_f64
+float64_t test_vdupd_lane_f64(float64x1_t a) {
+ return vdupd_lane_f64(a, 0);
+// CHECK: ret
+// CHECK-NOT: dup {{d[0-9]+}}, {{v[0-9]+}}.d[0]
+}
+
+
+// CHECK: test_vdups_laneq_f32
+float32_t test_vdups_laneq_f32(float32x4_t a) {
+ return vdups_laneq_f32(a, 3);
+// CHECK: ret
+// CHECK-NOT: dup {{s[0-9]+}}, {{v[0-9]+}}.s[3]
+}
+
+
+// CHECK: test_vdupd_laneq_f64
+float64_t test_vdupd_laneq_f64(float64x2_t a) {
+ return vdupd_laneq_f64(a, 1);
+// CHECK: ret
+// CHECK-NOT: dup {{d[0-9]+}}, {{v[0-9]+}}.d[1]
+}
+
+
+// CHECK: test_vdupb_lane_s8
+int8_t test_vdupb_lane_s8(int8x8_t a) {
+ return vdupb_lane_s8(a, 7);
+// CHECK: umov {{w[0-9]+}}, {{v[0-9]+}}.b[7]
+}
+
+
+// CHECK: test_vduph_lane_s16
+int16_t test_vduph_lane_s16(int16x4_t a) {
+ return vduph_lane_s16(a, 3);
+// CHECK: umov {{w[0-9]+}}, {{v[0-9]+}}.h[3]
+}
+
+
+// CHECK: test_vdups_lane_s32
+int32_t test_vdups_lane_s32(int32x2_t a) {
+ return vdups_lane_s32(a, 1);
+// CHECK: umov {{w[0-9]+}}, {{v[0-9]+}}.s[1]
+}
+
+
+// CHECK: test_vdupd_lane_s64
+int64_t test_vdupd_lane_s64(int64x1_t a) {
+ return vdupd_lane_s64(a, 0);
+// CHECK: fmov {{x[0-9]+}}, {{d[0-9]+}}
+}
+
+
+// CHECK: test_vdupb_lane_u8
+uint8_t test_vdupb_lane_u8(uint8x8_t a) {
+ return vdupb_lane_u8(a, 7);
+// CHECK: umov {{w[0-9]+}}, {{v[0-9]+}}.b[7]
+}
+
+
+// CHECK: test_vduph_lane_u16
+uint16_t test_vduph_lane_u16(uint16x4_t a) {
+ return vduph_lane_u16(a, 3);
+// CHECK: umov {{w[0-9]+}}, {{v[0-9]+}}.h[3]
+}
+
+
+// CHECK: test_vdups_lane_u32
+uint32_t test_vdups_lane_u32(uint32x2_t a) {
+ return vdups_lane_u32(a, 1);
+// CHECK: umov {{w[0-9]+}}, {{v[0-9]+}}.s[1]
+}
+
+
+// CHECK: test_vdupd_lane_u64
+uint64_t test_vdupd_lane_u64(uint64x1_t a) {
+ return vdupd_lane_u64(a, 0);
+// CHECK: fmov {{x[0-9]+}}, {{d[0-9]+}}
+}
+
+// CHECK: test_vdupb_laneq_s8
+int8_t test_vdupb_laneq_s8(int8x16_t a) {
+ return vdupb_laneq_s8(a, 15);
+// CHECK: umov {{w[0-9]+}}, {{v[0-9]+}}.b[15]
+}
+
+
+// CHECK: test_vduph_laneq_s16
+int16_t test_vduph_laneq_s16(int16x8_t a) {
+ return vduph_laneq_s16(a, 7);
+// CHECK: umov {{w[0-9]+}}, {{v[0-9]+}}.h[7]
+}
+
+
+// CHECK: test_vdups_laneq_s32
+int32_t test_vdups_laneq_s32(int32x4_t a) {
+ return vdups_laneq_s32(a, 3);
+// CHECK: umov {{w[0-9]+}}, {{v[0-9]+}}.s[3]
+}
+
+
+// CHECK: test_vdupd_laneq_s64
+int64_t test_vdupd_laneq_s64(int64x2_t a) {
+ return vdupd_laneq_s64(a, 1);
+// CHECK: umov {{x[0-9]+}}, {{v[0-9]+}}.d[1]
+}
+
+
+// CHECK: test_vdupb_laneq_u8
+uint8_t test_vdupb_laneq_u8(uint8x16_t a) {
+ return vdupb_laneq_u8(a, 15);
+// CHECK: umov {{w[0-9]+}}, {{v[0-9]+}}.b[15]
+}
+
+
+// CHECK: test_vduph_laneq_u16
+uint16_t test_vduph_laneq_u16(uint16x8_t a) {
+ return vduph_laneq_u16(a, 7);
+// CHECK: umov {{w[0-9]+}}, {{v[0-9]+}}.h[7]
+}
+
+
+// CHECK: test_vdups_laneq_u32
+uint32_t test_vdups_laneq_u32(uint32x4_t a) {
+ return vdups_laneq_u32(a, 3);
+// CHECK: umov {{w[0-9]+}}, {{v[0-9]+}}.s[3]
+}
+
+
+// CHECK: test_vdupd_laneq_u64
+uint64_t test_vdupd_laneq_u64(uint64x2_t a) {
+ return vdupd_laneq_u64(a, 1);
+// CHECK: umov {{x[0-9]+}}, {{v[0-9]+}}.d[1]
+}
+
+// CHECK: test_vdupb_lane_p8
+poly8_t test_vdupb_lane_p8(poly8x8_t a) {
+ return vdupb_lane_p8(a, 7);
+// CHECK: umov {{w[0-9]+}}, {{v[0-9]+}}.b[7]
+}
+
+// CHECK: test_vduph_lane_p16
+poly16_t test_vduph_lane_p16(poly16x4_t a) {
+ return vduph_lane_p16(a, 3);
+// CHECK: umov {{w[0-9]+}}, {{v[0-9]+}}.h[3]
+}
+
+// CHECK: test_vdupb_laneq_p8
+poly8_t test_vdupb_laneq_p8(poly8x16_t a) {
+ return vdupb_laneq_p8(a, 15);
+// CHECK: umov {{w[0-9]+}}, {{v[0-9]+}}.b[15]
+}
+
+// CHECK: test_vduph_laneq_p16
+poly16_t test_vduph_laneq_p16(poly16x8_t a) {
+ return vduph_laneq_p16(a, 7);
+// CHECK: umov {{w[0-9]+}}, {{v[0-9]+}}.h[7]
+}
+