summaryrefslogtreecommitdiff
path: root/test/CodeGen
diff options
context:
space:
mode:
authorBob Wilson <bob.wilson@apple.com>2012-08-03 23:29:17 +0000
committerBob Wilson <bob.wilson@apple.com>2012-08-03 23:29:17 +0000
commit53624a2df557b4a24f2ee98cfce1a69bf83243af (patch)
treece940e202cc56262d4a9c96df2ac551def98d380 /test/CodeGen
parent6ac8066ae49a5e9910f24e08af0b168210270946 (diff)
downloadllvm-53624a2df557b4a24f2ee98cfce1a69bf83243af.tar.gz
llvm-53624a2df557b4a24f2ee98cfce1a69bf83243af.tar.bz2
llvm-53624a2df557b4a24f2ee98cfce1a69bf83243af.tar.xz
Refactor and check "onlyReadsMemory" before optimizing builtins.
This patch is mostly just refactoring a bunch of copy-and-pasted code, but it also adds a check that the call instructions are readnone or readonly. That check was already present for sin, cos, sqrt, log2, and exp2 calls, but it was missing for the rest of the builtins being handled in this code. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@161282 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen')
-rw-r--r--test/CodeGen/ARM/fabss.ll2
-rw-r--r--test/CodeGen/ARM/fcopysign.ll6
-rw-r--r--test/CodeGen/ARM/fparith.ll4
-rw-r--r--test/CodeGen/ARM/vfp.ll4
-rw-r--r--test/CodeGen/CellSPU/fcmp32.ll4
-rw-r--r--test/CodeGen/CellSPU/fneg-fabs.ll4
-rw-r--r--test/CodeGen/Hexagon/opt-fabs.ll2
-rw-r--r--test/CodeGen/PowerPC/fabs.ll2
-rw-r--r--test/CodeGen/PowerPC/fnabs.ll2
-rw-r--r--test/CodeGen/X86/fabs.ll4
-rw-r--r--test/CodeGen/X86/stack-align.ll4
11 files changed, 19 insertions, 19 deletions
diff --git a/test/CodeGen/ARM/fabss.ll b/test/CodeGen/ARM/fabss.ll
index 45c322dce8..5c0db88407 100644
--- a/test/CodeGen/ARM/fabss.ll
+++ b/test/CodeGen/ARM/fabss.ll
@@ -6,7 +6,7 @@
define float @test(float %a, float %b) {
entry:
%dum = fadd float %a, %b
- %0 = tail call float @fabsf(float %dum)
+ %0 = tail call float @fabsf(float %dum) readnone
%dum1 = fadd float %0, %b
ret float %dum1
}
diff --git a/test/CodeGen/ARM/fcopysign.ll b/test/CodeGen/ARM/fcopysign.ll
index 27fa2b093d..5511d24cb2 100644
--- a/test/CodeGen/ARM/fcopysign.ll
+++ b/test/CodeGen/ARM/fcopysign.ll
@@ -11,7 +11,7 @@ entry:
; HARD: test1:
; HARD: vmov.i32 [[REG1:(d[0-9]+)]], #0x80000000
; HARD: vbsl [[REG1]], d
- %0 = tail call float @copysignf(float %x, float %y) nounwind
+ %0 = tail call float @copysignf(float %x, float %y) nounwind readnone
ret float %0
}
@@ -25,7 +25,7 @@ entry:
; HARD: vmov.i32 [[REG2:(d[0-9]+)]], #0x80000000
; HARD: vshl.i64 [[REG2]], [[REG2]], #32
; HARD: vbsl [[REG2]], d1, d0
- %0 = tail call double @copysign(double %x, double %y) nounwind
+ %0 = tail call double @copysign(double %x, double %y) nounwind readnone
ret double %0
}
@@ -36,7 +36,7 @@ entry:
; SOFT: vshl.i64 [[REG3]], [[REG3]], #32
; SOFT: vbsl [[REG3]],
%0 = fmul double %x, %y
- %1 = tail call double @copysign(double %0, double %z) nounwind
+ %1 = tail call double @copysign(double %0, double %z) nounwind readnone
ret double %1
}
diff --git a/test/CodeGen/ARM/fparith.ll b/test/CodeGen/ARM/fparith.ll
index ce6d6b29e9..a8bae3b951 100644
--- a/test/CodeGen/ARM/fparith.ll
+++ b/test/CodeGen/ARM/fparith.ll
@@ -84,7 +84,7 @@ define float @f11(float %a) {
;CHECK: f11:
;CHECK: bic
entry:
- %tmp1 = call float @fabsf( float %a ) ; <float> [#uses=1]
+ %tmp1 = call float @fabsf( float %a ) readnone ; <float> [#uses=1]
ret float %tmp1
}
@@ -94,7 +94,7 @@ define double @f12(double %a) {
;CHECK: f12:
;CHECK: vabs.f64
entry:
- %tmp1 = call double @fabs( double %a ) ; <double> [#uses=1]
+ %tmp1 = call double @fabs( double %a ) readnone ; <double> [#uses=1]
ret double %tmp1
}
diff --git a/test/CodeGen/ARM/vfp.ll b/test/CodeGen/ARM/vfp.ll
index 49a69827bc..bf0f3680c1 100644
--- a/test/CodeGen/ARM/vfp.ll
+++ b/test/CodeGen/ARM/vfp.ll
@@ -17,11 +17,11 @@ define void @test_abs(float* %P, double* %D) {
;CHECK: test_abs:
%a = load float* %P ; <float> [#uses=1]
;CHECK: vabs.f32
- %b = call float @fabsf( float %a ) ; <float> [#uses=1]
+ %b = call float @fabsf( float %a ) readnone ; <float> [#uses=1]
store float %b, float* %P
%A = load double* %D ; <double> [#uses=1]
;CHECK: vabs.f64
- %B = call double @fabs( double %A ) ; <double> [#uses=1]
+ %B = call double @fabs( double %A ) readnone ; <double> [#uses=1]
store double %B, double* %D
ret void
}
diff --git a/test/CodeGen/CellSPU/fcmp32.ll b/test/CodeGen/CellSPU/fcmp32.ll
index c14fd7ba4a..b2a7317c12 100644
--- a/test/CodeGen/CellSPU/fcmp32.ll
+++ b/test/CodeGen/CellSPU/fcmp32.ll
@@ -15,8 +15,8 @@ define i1 @fcmp_eq(float %arg1, float %arg2) {
define i1 @fcmp_mag_eq(float %arg1, float %arg2) {
; CHECK: fcmeq
; CHECK: bi $lr
- %1 = call float @fabsf(float %arg1)
- %2 = call float @fabsf(float %arg2)
+ %1 = call float @fabsf(float %arg1) readnone
+ %2 = call float @fabsf(float %arg2) readnone
%3 = fcmp oeq float %1, %2
ret i1 %3
}
diff --git a/test/CodeGen/CellSPU/fneg-fabs.ll b/test/CodeGen/CellSPU/fneg-fabs.ll
index 1e5e3b3414..6e01906dae 100644
--- a/test/CodeGen/CellSPU/fneg-fabs.ll
+++ b/test/CodeGen/CellSPU/fneg-fabs.ll
@@ -32,11 +32,11 @@ declare double @fabs(double)
declare float @fabsf(float)
define double @fabs_dp(double %X) {
- %Y = call double @fabs( double %X )
+ %Y = call double @fabs( double %X ) readnone
ret double %Y
}
define float @fabs_sp(float %X) {
- %Y = call float @fabsf( float %X )
+ %Y = call float @fabsf( float %X ) readnone
ret float %Y
}
diff --git a/test/CodeGen/Hexagon/opt-fabs.ll b/test/CodeGen/Hexagon/opt-fabs.ll
index 1cf0dd0cd9..f1a66f4e65 100644
--- a/test/CodeGen/Hexagon/opt-fabs.ll
+++ b/test/CodeGen/Hexagon/opt-fabs.ll
@@ -8,7 +8,7 @@ entry:
%x.addr = alloca float, align 4
store float %x, float* %x.addr, align 4
%0 = load float* %x.addr, align 4
- %call = call float @fabsf(float %0)
+ %call = call float @fabsf(float %0) readnone
ret float %call
}
diff --git a/test/CodeGen/PowerPC/fabs.ll b/test/CodeGen/PowerPC/fabs.ll
index 156e00b4e5..ddcce74508 100644
--- a/test/CodeGen/PowerPC/fabs.ll
+++ b/test/CodeGen/PowerPC/fabs.ll
@@ -2,6 +2,6 @@
define double @fabs(double %f) {
entry:
- %tmp2 = tail call double @fabs( double %f ) ; <double> [#uses=1]
+ %tmp2 = tail call double @fabs( double %f ) readnone ; <double> [#uses=1]
ret double %tmp2
}
diff --git a/test/CodeGen/PowerPC/fnabs.ll b/test/CodeGen/PowerPC/fnabs.ll
index bbd5c7159e..9fa2dcb290 100644
--- a/test/CodeGen/PowerPC/fnabs.ll
+++ b/test/CodeGen/PowerPC/fnabs.ll
@@ -3,7 +3,7 @@
declare double @fabs(double)
define double @test(double %X) {
- %Y = call double @fabs( double %X ) ; <double> [#uses=1]
+ %Y = call double @fabs( double %X ) readnone ; <double> [#uses=1]
%Z = fsub double -0.000000e+00, %Y ; <double> [#uses=1]
ret double %Z
}
diff --git a/test/CodeGen/X86/fabs.ll b/test/CodeGen/X86/fabs.ll
index 07a8d91185..2074f04d40 100644
--- a/test/CodeGen/X86/fabs.ll
+++ b/test/CodeGen/X86/fabs.ll
@@ -11,7 +11,7 @@ declare x86_fp80 @fabsl(x86_fp80)
; UNSAFE: test1:
; NOOPT: test1:
define float @test1(float %X) {
- %Y = call float @fabsf(float %X)
+ %Y = call float @fabsf(float %X) readnone
ret float %Y
}
; CHECK: {{^[ \t]+fabs$}}
@@ -42,7 +42,7 @@ define double @test2(double %X) {
; UNSAFE: test3:
; NOOPT: test3:
define x86_fp80 @test3(x86_fp80 %X) {
- %Y = call x86_fp80 @fabsl(x86_fp80 %X)
+ %Y = call x86_fp80 @fabsl(x86_fp80 %X) readnone
ret x86_fp80 %Y
}
; CHECK: {{^[ \t]+fabs$}}
diff --git a/test/CodeGen/X86/stack-align.ll b/test/CodeGen/X86/stack-align.ll
index f6c13ec0ad..0ddb2378ef 100644
--- a/test/CodeGen/X86/stack-align.ll
+++ b/test/CodeGen/X86/stack-align.ll
@@ -10,11 +10,11 @@ target triple = "i686-apple-darwin8"
define void @test({ double, double }* byval %z, double* %P) nounwind {
entry:
%tmp3 = load double* @G, align 16 ; <double> [#uses=1]
- %tmp4 = tail call double @fabs( double %tmp3 ) ; <double> [#uses=1]
+ %tmp4 = tail call double @fabs( double %tmp3 ) readnone ; <double> [#uses=1]
store volatile double %tmp4, double* %P
%tmp = getelementptr { double, double }* %z, i32 0, i32 0 ; <double*> [#uses=1]
%tmp1 = load volatile double* %tmp, align 8 ; <double> [#uses=1]
- %tmp2 = tail call double @fabs( double %tmp1 ) ; <double> [#uses=1]
+ %tmp2 = tail call double @fabs( double %tmp1 ) readnone ; <double> [#uses=1]
; CHECK: andpd{{.*}}4(%esp), %xmm
%tmp6 = fadd double %tmp4, %tmp2 ; <double> [#uses=1]
store volatile double %tmp6, double* %P, align 8