diff options
author | Kevin Qin <Kevin.Qin@arm.com> | 2014-04-23 06:22:48 +0000 |
---|---|---|
committer | Kevin Qin <Kevin.Qin@arm.com> | 2014-04-23 06:22:48 +0000 |
commit | 81ea345894636edc6dc016b6b93ecb7d259b4ae6 (patch) | |
tree | fd8798fe21fab87056e468a180eda9248c919567 /test/CodeGen/ARM64 | |
parent | b0015735153741b6f0978127976002fda9503a3c (diff) | |
download | llvm-81ea345894636edc6dc016b6b93ecb7d259b4ae6.tar.gz llvm-81ea345894636edc6dc016b6b93ecb7d259b4ae6.tar.bz2 llvm-81ea345894636edc6dc016b6b93ecb7d259b4ae6.tar.xz |
[ARM64] Enable feature predicates for NEON / FP / CRYPTO.
AArch64 has feature predicates for NEON, FP and CRYPTO instructions.
This allows the compiler to generate code without using FP, NEON
or CRYPTO instructions.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@206949 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/ARM64')
-rw-r--r-- | test/CodeGen/ARM64/complex-copy-noneon.ll | 21 | ||||
-rw-r--r-- | test/CodeGen/ARM64/crypto.ll | 2 | ||||
-rw-r--r-- | test/CodeGen/ARM64/reg-copy-noneon.ll | 20 |
3 files changed, 42 insertions, 1 deletions
diff --git a/test/CodeGen/ARM64/complex-copy-noneon.ll b/test/CodeGen/ARM64/complex-copy-noneon.ll new file mode 100644 index 0000000000..f65b116128 --- /dev/null +++ b/test/CodeGen/ARM64/complex-copy-noneon.ll @@ -0,0 +1,21 @@ +; RUN: llc -mtriple=arm64-none-linux-gnu -mattr=-neon < %s + +; The DAG combiner decided to use a vector load/store for this struct copy +; previously. This probably shouldn't happen without NEON, but the most +; important thing is that it compiles. + +define void @store_combine() nounwind { + %src = alloca { double, double }, align 8 + %dst = alloca { double, double }, align 8 + + %src.realp = getelementptr inbounds { double, double }* %src, i32 0, i32 0 + %src.real = load double* %src.realp + %src.imagp = getelementptr inbounds { double, double }* %src, i32 0, i32 1 + %src.imag = load double* %src.imagp + + %dst.realp = getelementptr inbounds { double, double }* %dst, i32 0, i32 0 + %dst.imagp = getelementptr inbounds { double, double }* %dst, i32 0, i32 1 + store double %src.real, double* %dst.realp + store double %src.imag, double* %dst.imagp + ret void +} diff --git a/test/CodeGen/ARM64/crypto.ll b/test/CodeGen/ARM64/crypto.ll index 3804310287..0020865bcd 100644 --- a/test/CodeGen/ARM64/crypto.ll +++ b/test/CodeGen/ARM64/crypto.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 -arm64-neon-syntax=apple -o - %s | FileCheck %s +; RUN: llc -march=arm64 -mattr=crypto -arm64-neon-syntax=apple -o - %s | FileCheck %s declare <16 x i8> @llvm.arm64.crypto.aese(<16 x i8> %data, <16 x i8> %key) declare <16 x i8> @llvm.arm64.crypto.aesd(<16 x i8> %data, <16 x i8> %key) diff --git a/test/CodeGen/ARM64/reg-copy-noneon.ll b/test/CodeGen/ARM64/reg-copy-noneon.ll new file mode 100644 index 0000000000..29255ef187 --- /dev/null +++ b/test/CodeGen/ARM64/reg-copy-noneon.ll @@ -0,0 +1,20 @@ +; RUN: llc -mtriple=arm64-none-linux-gnu -mattr=-neon < %s | FileCheck %s + +define float @copy_FPR32(float %a, float %b) { +;CHECK-LABEL: copy_FPR32: +;CHECK: fmov s0, s1 + ret float %b; +} + +define double @copy_FPR64(double %a, double %b) { +;CHECK-LABEL: copy_FPR64: +;CHECK: fmov d0, d1 + ret double %b; +} + +define fp128 @copy_FPR128(fp128 %a, fp128 %b) { +;CHECK-LABEL: copy_FPR128: +;CHECK: str q1, [sp, #-16]! +;CHECK-NEXT: ldr q0, [sp, #16]! + ret fp128 %b; +} |