summaryrefslogtreecommitdiff
path: root/test/CodeGen/ARM/neon_fpconv.ll
diff options
context:
space:
mode:
authorArnold Schwaighofer <aschwaighofer@apple.com>2013-02-19 15:27:05 +0000
committerArnold Schwaighofer <aschwaighofer@apple.com>2013-02-19 15:27:05 +0000
commit2e750c12e91ab09949ef1617ab3af14e1b6cd239 (patch)
tree57fb0f606bf5b772b6ad67016d2b51ad91dc417d /test/CodeGen/ARM/neon_fpconv.ll
parente5839d0fc9999a3d53659354a3cceb838cb87711 (diff)
downloadllvm-2e750c12e91ab09949ef1617ab3af14e1b6cd239.tar.gz
llvm-2e750c12e91ab09949ef1617ab3af14e1b6cd239.tar.bz2
llvm-2e750c12e91ab09949ef1617ab3af14e1b6cd239.tar.xz
ARM NEON: Merge a f32 bitcast of a v2i32 extractelt
A vectorized sitfp on doubles will get scalarized to a sequence of an extract_element of <2 x i32>, a bitcast to f32 and a sitofp. Due to the the extract_element, and the bitcast we will uneccessarily generate moves between scalar and vector registers. The patch fixes this by using a COPY_TO_REGCLASS and a EXTRACT_SUBREG to extract the element from the vector instead. radar://13191881 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@175520 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/ARM/neon_fpconv.ll')
-rw-r--r--test/CodeGen/ARM/neon_fpconv.ll25
1 files changed, 25 insertions, 0 deletions
diff --git a/test/CodeGen/ARM/neon_fpconv.ll b/test/CodeGen/ARM/neon_fpconv.ll
index 1948ad8471..149f4c7770 100644
--- a/test/CodeGen/ARM/neon_fpconv.ll
+++ b/test/CodeGen/ARM/neon_fpconv.ll
@@ -15,3 +15,28 @@ define <2 x double> @vextend(<2 x float> %a) {
ret <2 x double> %ve
}
+; We used to generate vmovs between scalar and vfp/neon registers.
+; CHECK: vsitofp_double
+define void @vsitofp_double(<2 x i32>* %loadaddr,
+ <2 x double>* %storeaddr) {
+ %v0 = load <2 x i32>* %loadaddr
+; CHECK: vldr
+; CHECK-NEXT: vcvt.f64.s32
+; CHECK-NEXT: vcvt.f64.s32
+; CHECK-NEXT: vst
+ %r = sitofp <2 x i32> %v0 to <2 x double>
+ store <2 x double> %r, <2 x double>* %storeaddr
+ ret void
+}
+; CHECK: vuitofp_double
+define void @vuitofp_double(<2 x i32>* %loadaddr,
+ <2 x double>* %storeaddr) {
+ %v0 = load <2 x i32>* %loadaddr
+; CHECK: vldr
+; CHECK-NEXT: vcvt.f64.u32
+; CHECK-NEXT: vcvt.f64.u32
+; CHECK-NEXT: vst
+ %r = uitofp <2 x i32> %v0 to <2 x double>
+ store <2 x double> %r, <2 x double>* %storeaddr
+ ret void
+}