summaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authorEvan Cheng <evan.cheng@apple.com>2014-02-11 23:49:31 +0000
committerEvan Cheng <evan.cheng@apple.com>2014-02-11 23:49:31 +0000
commit9f5baa3c7e01e632722a0d7c37b3aa82591bc8a8 (patch)
treee99668f1d962ebed1f93617d766e50b8a49440f1 /test
parente48e9419eaa7f3cf18924ecf99da9fd84f7b94d8 (diff)
downloadllvm-9f5baa3c7e01e632722a0d7c37b3aa82591bc8a8.tar.gz
llvm-9f5baa3c7e01e632722a0d7c37b3aa82591bc8a8.tar.bz2
llvm-9f5baa3c7e01e632722a0d7c37b3aa82591bc8a8.tar.xz
Tweak ARM fastcc by adopting these two AAPCS rules:
* CPRCs may be allocated to co-processor registers or the stack – they may never be allocated to core registers * When a CPRC is allocated to the stack, all other VFP registers should be marked as unavailable The difference is only noticeable in rare cases where there are a large number of floating point arguments (e.g. 7 doubles + additional float, double arguments). Although it's probably still better to avoid vmov as it can cause stalls in some older ARM cores. The other, more subtle benefit, is to minimize difference between the various calling conventions. rdar://16039676 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@201193 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test')
-rw-r--r--test/CodeGen/ARM/fastcc-vfp.ll40
1 files changed, 40 insertions, 0 deletions
diff --git a/test/CodeGen/ARM/fastcc-vfp.ll b/test/CodeGen/ARM/fastcc-vfp.ll
new file mode 100644
index 0000000000..4c98150c70
--- /dev/null
+++ b/test/CodeGen/ARM/fastcc-vfp.ll
@@ -0,0 +1,40 @@
+; RUN: llc < %s -mtriple=armv7-apple-ios -mattr=+vfp2 | FileCheck %s
+
+define fastcc double @t1(double %d0, double %d1, double %d2, double %d3, double %d4, double %d5, double %d6, double %d7, float %a, float %b) {
+entry:
+; CHECK-LABEL: t1:
+; CHECK-NOT: vmov
+; CHECK: vldr
+ %add = fadd float %a, %b
+ %conv = fpext float %add to double
+ ret double %conv
+}
+
+define fastcc double @t2(double %d0, double %d1, double %d2, double %d3, double %d4, double %d5, double %a, float %b, double %c) {
+entry:
+; CHECK-LABEL: t2:
+; CHECK-NOT: vmov
+; CHECK: vldr
+ %add = fadd double %a, %c
+ ret double %add
+}
+
+define fastcc float @t3(double %d0, double %d1, double %d2, double %d3, double %d4, double %d5, double %d6, float %a, double %b, float %c) {
+entry:
+; CHECK-LABEL: t3:
+; CHECK: vldr
+ %add = fadd float %a, %c
+ ret float %add
+}
+
+define fastcc double @t4(double %a, double %b) #0 {
+entry:
+; CHECK-LABEL: t4:
+; CHECK: vstr
+ %add = fadd double %a, %b
+ %sub = fsub double %a, %b
+ %call = tail call fastcc double @x(double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double %add, float 0.000000e+00, double %sub) #2
+ ret double %call
+}
+
+declare fastcc double @x(double, double, double, double, double, double, double, float, double)