summaryrefslogtreecommitdiff
path: root/test/CodeGen/X86
diff options
context:
space:
mode:
authorTanya Lattner <tonic@nondot.org>2008-03-25 04:26:08 +0000
committerTanya Lattner <tonic@nondot.org>2008-03-25 04:26:08 +0000
commit6f729d601c8a6a9710356aadb42dc8d0efa95bf2 (patch)
treee662b3ee5539d7594ab49eda3eae140424ce499d /test/CodeGen/X86
parenta2fb634defce316ec972aa6f3ca3a941b4656f5e (diff)
downloadllvm-6f729d601c8a6a9710356aadb42dc8d0efa95bf2.tar.gz
llvm-6f729d601c8a6a9710356aadb42dc8d0efa95bf2.tar.bz2
llvm-6f729d601c8a6a9710356aadb42dc8d0efa95bf2.tar.xz
Byebye llvm-upgrade!
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@48762 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/X86')
-rw-r--r--test/CodeGen/X86/2002-12-23-LocalRAProblem.llx20
-rw-r--r--test/CodeGen/X86/2002-12-23-SubProblem.llx10
-rw-r--r--test/CodeGen/X86/2003-08-03-CallArgLiveRanges.llx17
-rw-r--r--test/CodeGen/X86/2003-08-23-DeadBlockTest.llx19
-rw-r--r--test/CodeGen/X86/2003-11-03-GlobalBool.llx5
-rw-r--r--test/CodeGen/X86/2004-02-13-FrameReturnAddress.llx20
-rw-r--r--test/CodeGen/X86/2004-02-14-InefficientStackPointer.llx6
-rw-r--r--test/CodeGen/X86/2004-02-22-Casts.llx17
-rw-r--r--test/CodeGen/X86/2004-03-30-Select-Max.llx11
-rw-r--r--test/CodeGen/X86/2004-04-09-SameValueCoalescing.llx9
-rw-r--r--test/CodeGen/X86/2004-04-13-FPCMOV-Crash.llx11
-rw-r--r--test/CodeGen/X86/2004-06-10-StackifierCrash.llx8
-rw-r--r--test/CodeGen/X86/2004-10-08-SelectSetCCFold.llx10
-rw-r--r--test/CodeGen/X86/2005-01-17-CycleInDAG.ll17
-rw-r--r--test/CodeGen/X86/2005-02-14-IllegalAssembler.ll6
-rw-r--r--test/CodeGen/X86/2005-05-08-FPStackifierPHI.ll79
-rw-r--r--test/CodeGen/X86/2006-01-19-ISelFoldingBug.ll24
-rw-r--r--test/CodeGen/X86/2006-03-01-InstrSchedBug.ll19
-rw-r--r--test/CodeGen/X86/2006-03-02-InstrSchedBug.ll17
-rw-r--r--test/CodeGen/X86/2006-04-04-CrossBlockCrash.ll73
-rw-r--r--test/CodeGen/X86/2006-04-27-ISelFoldingBug.ll52
-rw-r--r--test/CodeGen/X86/2006-05-01-SchedCausingSpills.ll132
-rw-r--r--test/CodeGen/X86/2006-05-02-InstrSched1.ll35
-rw-r--r--test/CodeGen/X86/2006-05-02-InstrSched2.ll33
-rw-r--r--test/CodeGen/X86/2006-05-08-CoalesceSubRegClass.ll32
-rw-r--r--test/CodeGen/X86/2006-05-08-InstrSched.ll39
-rw-r--r--test/CodeGen/X86/2006-05-17-VectorArg.ll19
-rw-r--r--test/CodeGen/X86/2006-05-22-FPSetEQ.ll13
-rw-r--r--test/CodeGen/X86/2006-05-25-CycleInDAG.ll35
-rw-r--r--test/CodeGen/X86/2006-07-10-InlineAsmAConstraint.ll9
-rw-r--r--test/CodeGen/X86/2006-07-12-InlineAsmQConstraint.ll18
-rw-r--r--test/CodeGen/X86/2006-07-19-ATTAsm.ll91
-rw-r--r--test/CodeGen/X86/2006-07-20-InlineAsm.ll31
-rw-r--r--test/CodeGen/X86/2006-07-28-AsmPrint-Long-As-Pointer.ll4
-rw-r--r--test/CodeGen/X86/2006-07-31-SingleRegClass.ll12
-rw-r--r--test/CodeGen/X86/2006-10-09-CycleInDAG.ll15
-rw-r--r--test/CodeGen/X86/2006-10-10-FindModifiedNodeSlotBug.ll45
-rw-r--r--test/CodeGen/X86/2006-11-12-CSRetCC.ll107
-rw-r--r--test/CodeGen/X86/store_op_load_fold2.ll67
39 files changed, 584 insertions, 603 deletions
diff --git a/test/CodeGen/X86/2002-12-23-LocalRAProblem.llx b/test/CodeGen/X86/2002-12-23-LocalRAProblem.llx
index f79781cb7b..df4a8f52f0 100644
--- a/test/CodeGen/X86/2002-12-23-LocalRAProblem.llx
+++ b/test/CodeGen/X86/2002-12-23-LocalRAProblem.llx
@@ -1,11 +1,15 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -regalloc=simple
+; RUN: llvm-as < %s | llc -march=x86 -regalloc=simple
-int %main() {
- %A = add int 0, 0 ; %A = 0
- %B = add int 0, 1 ; %B = 1
+define i32 @main() {
+ ; %A = 0
+ %A = add i32 0, 0 ; <i32> [#uses=1]
+ ; %B = 1
+ %B = add i32 0, 1 ; <i32> [#uses=2]
br label %bb1
-bb1:
- %X = mul int %A, %B ; %X = 0*1 = 0
- %R = sub int %B, 1 ; %r = 0
- ret int %R
+bb1: ; preds = %0
+ ; %X = 0*1 = 0
+ %X = mul i32 %A, %B ; <i32> [#uses=0]
+ ; %r = 0
+ %R = sub i32 %B, 1 ; <i32> [#uses=1]
+ ret i32 %R
}
diff --git a/test/CodeGen/X86/2002-12-23-SubProblem.llx b/test/CodeGen/X86/2002-12-23-SubProblem.llx
index 3d89378609..68200ff234 100644
--- a/test/CodeGen/X86/2002-12-23-SubProblem.llx
+++ b/test/CodeGen/X86/2002-12-23-SubProblem.llx
@@ -1,7 +1,7 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -regalloc=simple
+; RUN: llvm-as < %s | llc -march=x86 -regalloc=simple
-int %main(int %B) {
- ;%B = add int 0, 1
- %R = sub int %B, 1 ; %r = 0
- ret int %R
+define i32 @main(i32 %B) {
+ ;%B = add i32 0, 1;
+ %R = sub i32 %B, 1 ; %r = 0
+ ret i32 %R
}
diff --git a/test/CodeGen/X86/2003-08-03-CallArgLiveRanges.llx b/test/CodeGen/X86/2003-08-03-CallArgLiveRanges.llx
index 9a4541b717..2b4242aaa1 100644
--- a/test/CodeGen/X86/2003-08-03-CallArgLiveRanges.llx
+++ b/test/CodeGen/X86/2003-08-03-CallArgLiveRanges.llx
@@ -3,13 +3,16 @@
; it makes a ton of annoying overlapping live ranges. This code should not
; cause spills!
;
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -stats |& not grep spilled
+; RUN: llvm-as < %s | llc -march=x86 -stats |& not grep spilled
-target endian = little
-target pointersize = 32
+target datalayout = "e-p:32:32"
-int %test(int, int, int, int, int, int, int, int, int, int) { ret int 0 }
-int %main() {
- %X = call int %test(int 1, int 2, int 3, int 4, int 5, int 6, int 7, int 8, int 9, int 10)
- ret int %X
+define i32 @test(i32, i32, i32, i32, i32, i32, i32, i32, i32, i32) {
+ ret i32 0
}
+
+define i32 @main() {
+ %X = call i32 @test( i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10 ) ; <i32> [#uses=1]
+ ret i32 %X
+}
+
diff --git a/test/CodeGen/X86/2003-08-23-DeadBlockTest.llx b/test/CodeGen/X86/2003-08-23-DeadBlockTest.llx
index 48623b9012..a4d558949e 100644
--- a/test/CodeGen/X86/2003-08-23-DeadBlockTest.llx
+++ b/test/CodeGen/X86/2003-08-23-DeadBlockTest.llx
@@ -1,13 +1,12 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86
+; RUN: llvm-as < %s | llc -march=x86
-implementation
-
-int %test() {
-entry: ret int 7
-Test: ; dead block!
- %A = call int %test()
- %B = call int %test()
- %C = add int %A, %B
- ret int %C
+define i32 @test() {
+entry:
+ ret i32 7
+Test: ; No predecessors!
+ %A = call i32 @test( ) ; <i32> [#uses=1]
+ %B = call i32 @test( ) ; <i32> [#uses=1]
+ %C = add i32 %A, %B ; <i32> [#uses=1]
+ ret i32 %C
}
diff --git a/test/CodeGen/X86/2003-11-03-GlobalBool.llx b/test/CodeGen/X86/2003-11-03-GlobalBool.llx
index 150d6a9bb3..4de3c79fdc 100644
--- a/test/CodeGen/X86/2003-11-03-GlobalBool.llx
+++ b/test/CodeGen/X86/2003-11-03-GlobalBool.llx
@@ -1,5 +1,4 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 | \
+; RUN: llvm-as < %s | llc -march=x86 | \
; RUN: not grep {.byte\[\[:space:\]\]*true}
-%X = global bool true
-
+@X = global i1 true ; <i1*> [#uses=0]
diff --git a/test/CodeGen/X86/2004-02-13-FrameReturnAddress.llx b/test/CodeGen/X86/2004-02-13-FrameReturnAddress.llx
index 366865ac71..f48b1d3adf 100644
--- a/test/CodeGen/X86/2004-02-13-FrameReturnAddress.llx
+++ b/test/CodeGen/X86/2004-02-13-FrameReturnAddress.llx
@@ -1,14 +1,16 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 | grep {(%esp}
+; RUN: llvm-as < %s | llc -march=x86 | grep {(%esp}
-declare sbyte* %llvm.returnaddress(uint)
-declare sbyte* %llvm.frameaddress(uint)
+declare i8* @llvm.returnaddress(i32)
-sbyte *%test1() {
- %X = call sbyte* %llvm.returnaddress(uint 0)
- ret sbyte* %X
+declare i8* @llvm.frameaddress(i32)
+
+define i8* @test1() {
+ %X = call i8* @llvm.returnaddress( i32 0 ) ; <i8*> [#uses=1]
+ ret i8* %X
}
-sbyte *%test2() {
- %X = call sbyte* %llvm.frameaddress(uint 0)
- ret sbyte* %X
+define i8* @test2() {
+ %X = call i8* @llvm.frameaddress( i32 0 ) ; <i8*> [#uses=1]
+ ret i8* %X
}
+
diff --git a/test/CodeGen/X86/2004-02-14-InefficientStackPointer.llx b/test/CodeGen/X86/2004-02-14-InefficientStackPointer.llx
index a0196aa6cb..b25dfaf5d9 100644
--- a/test/CodeGen/X86/2004-02-14-InefficientStackPointer.llx
+++ b/test/CodeGen/X86/2004-02-14-InefficientStackPointer.llx
@@ -1,5 +1,5 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 | grep -i ESP | not grep sub
+; RUN: llvm-as < %s | llc -march=x86 | grep -i ESP | not grep sub
-int %test(int %X) {
- ret int %X
+define i32 @test(i32 %X) {
+ ret i32 %X
}
diff --git a/test/CodeGen/X86/2004-02-22-Casts.llx b/test/CodeGen/X86/2004-02-22-Casts.llx
index 8f5f5f8671..40d5f39df6 100644
--- a/test/CodeGen/X86/2004-02-22-Casts.llx
+++ b/test/CodeGen/X86/2004-02-22-Casts.llx
@@ -1,11 +1,12 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86
-
-bool %test1(double %X) {
- %V = cast double %X to bool
- ret bool %V
+; RUN: llvm-as < %s | llc -march=x86
+define i1 @test1(double %X) {
+ %V = fcmp one double %X, 0.000000e+00 ; <i1> [#uses=1]
+ ret i1 %V
}
-double %test2(ulong %X) {
- %V = cast ulong %X to double
- ret double %V
+define double @test2(i64 %X) {
+ %V = uitofp i64 %X to double ; <double> [#uses=1]
+ ret double %V
}
+
+
diff --git a/test/CodeGen/X86/2004-03-30-Select-Max.llx b/test/CodeGen/X86/2004-03-30-Select-Max.llx
index bd7ab47d1e..5021fd89df 100644
--- a/test/CodeGen/X86/2004-03-30-Select-Max.llx
+++ b/test/CodeGen/X86/2004-03-30-Select-Max.llx
@@ -1,7 +1,8 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 | not grep {j\[lgbe\]}
+; RUN: llvm-as < %s | llc -march=x86 | not grep {j\[lgbe\]}
-int %max(int %A, int %B) {
- %gt = setgt int %A, %B
- %R = select bool %gt, int %A, int %B
- ret int %R
+define i32 @max(i32 %A, i32 %B) {
+ %gt = icmp sgt i32 %A, %B ; <i1> [#uses=1]
+ %R = select i1 %gt, i32 %A, i32 %B ; <i32> [#uses=1]
+ ret i32 %R
}
+
diff --git a/test/CodeGen/X86/2004-04-09-SameValueCoalescing.llx b/test/CodeGen/X86/2004-04-09-SameValueCoalescing.llx
index 52c5da4ede..633a615645 100644
--- a/test/CodeGen/X86/2004-04-09-SameValueCoalescing.llx
+++ b/test/CodeGen/X86/2004-04-09-SameValueCoalescing.llx
@@ -2,11 +2,12 @@
; overlapping live intervals. When two overlapping intervals have the same
; value, they can be joined though.
;
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -regalloc=linearscan | \
+; RUN: llvm-as < %s | llc -march=x86 -regalloc=linearscan | \
; RUN: not grep {mov %\[A-Z\]\\\{2,3\\\}, %\[A-Z\]\\\{2,3\\\}}
-long %test(long %x) {
+define i64 @test(i64 %x) {
entry:
- %tmp.1 = mul long %x, 4294967297 ; <long> [#uses=1]
- ret long %tmp.1
+ %tmp.1 = mul i64 %x, 4294967297 ; <i64> [#uses=1]
+ ret i64 %tmp.1
}
+
diff --git a/test/CodeGen/X86/2004-04-13-FPCMOV-Crash.llx b/test/CodeGen/X86/2004-04-13-FPCMOV-Crash.llx
index 5896c142ee..858605c231 100644
--- a/test/CodeGen/X86/2004-04-13-FPCMOV-Crash.llx
+++ b/test/CodeGen/X86/2004-04-13-FPCMOV-Crash.llx
@@ -1,8 +1,7 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86
+; RUN: llvm-as < %s | llc -march=x86
-implementation ; Functions:
-
-double %test(double %d) {
- %X = select bool false, double %d, double %d ; <double> [#uses=0]
- ret double %X
+define double @test(double %d) {
+ %X = select i1 false, double %d, double %d ; <double> [#uses=1]
+ ret double %X
}
+
diff --git a/test/CodeGen/X86/2004-06-10-StackifierCrash.llx b/test/CodeGen/X86/2004-06-10-StackifierCrash.llx
index 3df962b5a3..1a51bee404 100644
--- a/test/CodeGen/X86/2004-06-10-StackifierCrash.llx
+++ b/test/CodeGen/X86/2004-06-10-StackifierCrash.llx
@@ -1,6 +1,6 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86
+; RUN: llvm-as < %s | llc -march=x86
-bool %T(double %X) {
- %V = seteq double %X, %X
- ret bool %V
+define i1 @T(double %X) {
+ %V = fcmp oeq double %X, %X ; <i1> [#uses=1]
+ ret i1 %V
}
diff --git a/test/CodeGen/X86/2004-10-08-SelectSetCCFold.llx b/test/CodeGen/X86/2004-10-08-SelectSetCCFold.llx
index 6757be2782..9ee773c91a 100644
--- a/test/CodeGen/X86/2004-10-08-SelectSetCCFold.llx
+++ b/test/CodeGen/X86/2004-10-08-SelectSetCCFold.llx
@@ -1,8 +1,8 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86
+; RUN: llvm-as < %s | llc -march=x86
-bool %test(bool %C, bool %D, int %X, int %Y) {
- %E = setlt int %X, %Y
- %F = select bool %C, bool %D, bool %E
- ret bool %F
+define i1 @test(i1 %C, i1 %D, i32 %X, i32 %Y) {
+ %E = icmp slt i32 %X, %Y ; <i1> [#uses=1]
+ %F = select i1 %C, i1 %D, i1 %E ; <i1> [#uses=1]
+ ret i1 %F
}
diff --git a/test/CodeGen/X86/2005-01-17-CycleInDAG.ll b/test/CodeGen/X86/2005-01-17-CycleInDAG.ll
index 74233ebda0..37cff57f30 100644
--- a/test/CodeGen/X86/2005-01-17-CycleInDAG.ll
+++ b/test/CodeGen/X86/2005-01-17-CycleInDAG.ll
@@ -3,14 +3,15 @@
; is invalid code (there is no correct way to order the instruction). Check
; that we do not fold the load into the sub.
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 | not grep sub.*GLOBAL
+; RUN: llvm-as < %s | llc -march=x86 | not grep sub.*GLOBAL
-%GLOBAL = external global int
+@GLOBAL = external global i32 ; <i32*> [#uses=1]
-int %test(int* %P1, int* %P2, int* %P3) {
- %L = load int* %GLOBAL
- store int 12, int* %P2
- %Y = load int* %P3
- %Z = sub int %Y, %L
- ret int %Z
+define i32 @test(i32* %P1, i32* %P2, i32* %P3) {
+ %L = load i32* @GLOBAL ; <i32> [#uses=1]
+ store i32 12, i32* %P2
+ %Y = load i32* %P3 ; <i32> [#uses=1]
+ %Z = sub i32 %Y, %L ; <i32> [#uses=1]
+ ret i32 %Z
}
+
diff --git a/test/CodeGen/X86/2005-02-14-IllegalAssembler.ll b/test/CodeGen/X86/2005-02-14-IllegalAssembler.ll
index 4547bff657..762047b7d8 100644
--- a/test/CodeGen/X86/2005-02-14-IllegalAssembler.ll
+++ b/test/CodeGen/X86/2005-02-14-IllegalAssembler.ll
@@ -1,5 +1,5 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 | not grep 18446744073709551612
+; RUN: llvm-as < %s | llc -march=x86 | not grep 18446744073709551612
-%A = external global int
+@A = external global i32 ; <i32*> [#uses=1]
+@Y = global i32* getelementptr (i32* @A, i32 -1) ; <i32**> [#uses=0]
-%Y = global int* getelementptr (int* %A, int -1)
diff --git a/test/CodeGen/X86/2005-05-08-FPStackifierPHI.ll b/test/CodeGen/X86/2005-05-08-FPStackifierPHI.ll
index 5a304db3fb..b5d215b205 100644
--- a/test/CodeGen/X86/2005-05-08-FPStackifierPHI.ll
+++ b/test/CodeGen/X86/2005-05-08-FPStackifierPHI.ll
@@ -1,49 +1,38 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -mcpu=generic
+; RUN: llvm-as < %s | llc -march=x86 -mcpu=generic
; Make sure LLC doesn't crash in the stackifier due to FP PHI nodes.
-void %radfg_() {
+define void @radfg_() {
entry:
- br bool false, label %no_exit.16.preheader, label %loopentry.0
-
-loopentry.0: ; preds = %entry
- ret void
-
-no_exit.16.preheader: ; preds = %entry
- br label %no_exit.16
-
-no_exit.16: ; preds = %no_exit.16, %no_exit.16.preheader
- br bool false, label %loopexit.16.loopexit, label %no_exit.16
-
-loopexit.16.loopexit: ; preds = %no_exit.16
- br label %no_exit.18
-
-no_exit.18: ; preds = %loopexit.20, %loopexit.16.loopexit
- %tmp.882 = add float 0.000000e+00, 0.000000e+00 ; <float> [#uses=2]
- br bool false, label %loopexit.19, label %no_exit.19.preheader
-
-no_exit.19.preheader: ; preds = %no_exit.18
- ret void
-
-loopexit.19: ; preds = %no_exit.18
- br bool false, label %loopexit.20, label %no_exit.20
-
-no_exit.20: ; preds = %loopexit.21, %loopexit.19
- %ai2.1122.tmp.3 = phi float [ %tmp.958, %loopexit.21 ], [ %tmp.882, %loopexit.19 ] ; <float> [#uses=1]
- %tmp.950 = mul float %tmp.882, %ai2.1122.tmp.3 ; <float> [#uses=1]
- %tmp.951 = sub float 0.000000e+00, %tmp.950 ; <float> [#uses=1]
- %tmp.958 = add float 0.000000e+00, 0.000000e+00 ; <float> [#uses=1]
- br bool false, label %loopexit.21, label %no_exit.21.preheader
-
-no_exit.21.preheader: ; preds = %no_exit.20
- ret void
-
-loopexit.21: ; preds = %no_exit.20
- br bool false, label %loopexit.20, label %no_exit.20
-
-loopexit.20: ; preds = %loopexit.21, %loopexit.19
- %ar2.1124.tmp.2 = phi float [ 0.000000e+00, %loopexit.19 ], [ %tmp.951, %loopexit.21 ] ; <float> [#uses=0]
- br bool false, label %loopexit.18.loopexit, label %no_exit.18
-
-loopexit.18.loopexit: ; preds = %loopexit.20
- ret void
+ br i1 false, label %no_exit.16.preheader, label %loopentry.0
+loopentry.0: ; preds = %entry
+ ret void
+no_exit.16.preheader: ; preds = %entry
+ br label %no_exit.16
+no_exit.16: ; preds = %no_exit.16, %no_exit.16.preheader
+ br i1 false, label %loopexit.16.loopexit, label %no_exit.16
+loopexit.16.loopexit: ; preds = %no_exit.16
+ br label %no_exit.18
+no_exit.18: ; preds = %loopexit.20, %loopexit.16.loopexit
+ %tmp.882 = add float 0.000000e+00, 0.000000e+00 ; <float> [#uses=2]
+ br i1 false, label %loopexit.19, label %no_exit.19.preheader
+no_exit.19.preheader: ; preds = %no_exit.18
+ ret void
+loopexit.19: ; preds = %no_exit.18
+ br i1 false, label %loopexit.20, label %no_exit.20
+no_exit.20: ; preds = %loopexit.21, %loopexit.19
+ %ai2.1122.tmp.3 = phi float [ %tmp.958, %loopexit.21 ], [ %tmp.882, %loopexit.19 ] ; <float> [#uses=1]
+ %tmp.950 = mul float %tmp.882, %ai2.1122.tmp.3 ; <float> [#uses=1]
+ %tmp.951 = sub float 0.000000e+00, %tmp.950 ; <float> [#uses=1]
+ %tmp.958 = add float 0.000000e+00, 0.000000e+00 ; <float> [#uses=1]
+ br i1 false, label %loopexit.21, label %no_exit.21.preheader
+no_exit.21.preheader: ; preds = %no_exit.20
+ ret void
+loopexit.21: ; preds = %no_exit.20
+ br i1 false, label %loopexit.20, label %no_exit.20
+loopexit.20: ; preds = %loopexit.21, %loopexit.19
+ %ar2.1124.tmp.2 = phi float [ 0.000000e+00, %loopexit.19 ], [ %tmp.951, %loopexit.21 ] ; <float> [#uses=0]
+ br i1 false, label %loopexit.18.loopexit, label %no_exit.18
+loopexit.18.loopexit: ; preds = %loopexit.20
+ ret void
}
+
diff --git a/test/CodeGen/X86/2006-01-19-ISelFoldingBug.ll b/test/CodeGen/X86/2006-01-19-ISelFoldingBug.ll
index aa141d20cf..817b281243 100644
--- a/test/CodeGen/X86/2006-01-19-ISelFoldingBug.ll
+++ b/test/CodeGen/X86/2006-01-19-ISelFoldingBug.ll
@@ -1,16 +1,20 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 | \
+; RUN: llvm-as < %s | llc -march=x86 | \
; RUN: grep shld | count 1
;
; Check that the isel does not fold the shld, which already folds a load
; and has two uses, into a store.
-%A = external global uint
-uint %test5(uint %B, ubyte %C) {
- %tmp.1 = load uint *%A;
- %tmp.2 = shl uint %tmp.1, ubyte %C
- %tmp.3 = sub ubyte 32, %C
- %tmp.4 = shr uint %B, ubyte %tmp.3
- %tmp.5 = or uint %tmp.4, %tmp.2
- store uint %tmp.5, uint* %A
- ret uint %tmp.5
+@A = external global i32 ; <i32*> [#uses=2]
+
+define i32 @test5(i32 %B, i8 %C) {
+ %tmp.1 = load i32* @A ; <i32> [#uses=1]
+ %shift.upgrd.1 = zext i8 %C to i32 ; <i32> [#uses=1]
+ %tmp.2 = shl i32 %tmp.1, %shift.upgrd.1 ; <i32> [#uses=1]
+ %tmp.3 = sub i8 32, %C ; <i8> [#uses=1]
+ %shift.upgrd.2 = zext i8 %tmp.3 to i32 ; <i32> [#uses=1]
+ %tmp.4 = lshr i32 %B, %shift.upgrd.2 ; <i32> [#uses=1]
+ %tmp.5 = or i32 %tmp.4, %tmp.2 ; <i32> [#uses=2]
+ store i32 %tmp.5, i32* @A
+ ret i32 %tmp.5
}
+
diff --git a/test/CodeGen/X86/2006-03-01-InstrSchedBug.ll b/test/CodeGen/X86/2006-03-01-InstrSchedBug.ll
index b7f08cfc9c..51d2fb2fe2 100644
--- a/test/CodeGen/X86/2006-03-01-InstrSchedBug.ll
+++ b/test/CodeGen/X86/2006-03-01-InstrSchedBug.ll
@@ -1,11 +1,12 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 | not grep {subl.*%esp}
+; RUN: llvm-as < %s | llc -march=x86 | not grep {subl.*%esp}
-int %f(int %a, int %b) {
- %tmp.2 = mul int %a, %a
- %tmp.5 = shl int %a, ubyte 1
- %tmp.6 = mul int %tmp.5, %b
- %tmp.10 = mul int %b, %b
- %tmp.7 = add int %tmp.10, %tmp.2
- %tmp.11 = add int %tmp.7, %tmp.6
- ret int %tmp.11
+define i32 @f(i32 %a, i32 %b) {
+ %tmp.2 = mul i32 %a, %a ; <i32> [#uses=1]
+ %tmp.5 = shl i32 %a, 1 ; <i32> [#uses=1]
+ %tmp.6 = mul i32 %tmp.5, %b ; <i32> [#uses=1]
+ %tmp.10 = mul i32 %b, %b ; <i32> [#uses=1]
+ %tmp.7 = add i32 %tmp.10, %tmp.2 ; <i32> [#uses=1]
+ %tmp.11 = add i32 %tmp.7, %tmp.6 ; <i32> [#uses=1]
+ ret i32 %tmp.11
}
+
diff --git a/test/CodeGen/X86/2006-03-02-InstrSchedBug.ll b/test/CodeGen/X86/2006-03-02-InstrSchedBug.ll
index da063df46c..96a6ca3884 100644
--- a/test/CodeGen/X86/2006-03-02-InstrSchedBug.ll
+++ b/test/CodeGen/X86/2006-03-02-InstrSchedBug.ll
@@ -1,11 +1,12 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -stats |& \
+; RUN: llvm-as < %s | llc -march=x86 -stats |& \
; RUN: grep asm-printer | grep 7
-int %g(int %a, int %b) {
- %tmp.1 = shl int %b, ubyte 1
- %tmp.3 = add int %tmp.1, %a
- %tmp.5 = mul int %tmp.3, %a
- %tmp.8 = mul int %b, %b
- %tmp.9 = add int %tmp.5, %tmp.8
- ret int %tmp.9
+define i32 @g(i32 %a, i32 %b) {
+ %tmp.1 = shl i32 %b, 1 ; <i32> [#uses=1]
+ %tmp.3 = add i32 %tmp.1, %a ; <i32> [#uses=1]
+ %tmp.5 = mul i32 %tmp.3, %a ; <i32> [#uses=1]
+ %tmp.8 = mul i32 %b, %b ; <i32> [#uses=1]
+ %tmp.9 = add i32 %tmp.5, %tmp.8 ; <i32> [#uses=1]
+ ret i32 %tmp.9
}
+
diff --git a/test/CodeGen/X86/2006-04-04-CrossBlockCrash.ll b/test/CodeGen/X86/2006-04-04-CrossBlockCrash.ll
index 5d380b5791..743790cad0 100644
--- a/test/CodeGen/X86/2006-04-04-CrossBlockCrash.ll
+++ b/test/CodeGen/X86/2006-04-04-CrossBlockCrash.ll
@@ -1,55 +1,50 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -mcpu=yonah
+; RUN: llvm-as < %s | llc -march=x86 -mcpu=yonah
+; END.
-target endian = little
-target pointersize = 32
+target datalayout = "e-p:32:32"
target triple = "i686-apple-darwin8.6.1"
%struct.GLTColor4 = type { float, float, float, float }
%struct.GLTCoord3 = type { float, float, float }
- %struct.__GLIContextRec = type { { %struct.anon, { [24 x [16 x float]], [24 x [16 x float]] }, %struct.GLTColor4, { float, float, float, float, %struct.GLTCoord3, float } }, { float, float, float, float, float, float, float, float, [4 x uint], [4 x uint], [4 x uint] } }
- %struct.__GLvertex = type { %struct.GLTColor4, %struct.GLTColor4, %struct.GLTColor4, %struct.GLTColor4, %struct.GLTColor4, %struct.GLTCoord3, float, %struct.GLTColor4, float, float, float, ubyte, ubyte, ubyte, ubyte, [4 x float], [2 x sbyte*], uint, uint, [16 x %struct.GLTColor4] }
+ %struct.__GLIContextRec = type { { %struct.anon, { [24 x [16 x float]], [24 x [16 x float]] }, %struct.GLTColor4, { float, float, float, float, %struct.GLTCoord3, float } }, { float, float, float, float, float, float, float, float, [4 x i32], [4 x i32], [4 x i32] } }
+ %struct.__GLvertex = type { %struct.GLTColor4, %struct.GLTColor4, %struct.GLTColor4, %struct.GLTColor4, %struct.GLTColor4, %struct.GLTCoord3, float, %struct.GLTColor4, float, float, float, i8, i8, i8, i8, [4 x float], [2 x i8*], i32, i32, [16 x %struct.GLTColor4] }
%struct.anon = type { float, float, float, float, float, float, float, float }
-implementation ; Functions:
+declare <4 x float> @llvm.x86.sse.cmp.ps(<4 x float>, <4 x float>, i8)
-declare <4 x float> %llvm.x86.sse.cmp.ps(<4 x float>, <4 x float>, sbyte)
+declare <4 x i32> @llvm.x86.sse2.packssdw.128(<4 x i32>, <4 x i32>)
-declare <4 x int> %llvm.x86.sse2.packssdw.128(<4 x int>, <4 x int>)
+declare i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8>)
-declare int %llvm.x86.sse2.pmovmskb.128(<16 x sbyte>)
-
-void %gleLLVMVecInterpolateClip() {
+define void @gleLLVMVecInterpolateClip() {
entry:
- br bool false, label %cond_false, label %cond_false183
-
+ br i1 false, label %cond_false, label %cond_false183
cond_false: ; preds = %entry
- br bool false, label %cond_false183, label %cond_true69
-
+ br i1 false, label %cond_false183, label %cond_true69
cond_true69: ; preds = %cond_false
ret void
-
cond_false183: ; preds = %cond_false, %entry
- %vuizmsk.0.1 = phi <4 x int> [ < int -1, int -1, int -1, int 0 >, %entry ], [ < int -1, int 0, int 0, int 0 >, %cond_false ] ; <<4 x int>> [#uses=2]
- %tmp192 = extractelement <4 x int> %vuizmsk.0.1, uint 2 ; <int> [#uses=1]
- %tmp193 = extractelement <4 x int> %vuizmsk.0.1, uint 3 ; <int> [#uses=2]
- %tmp195 = insertelement <4 x int> zeroinitializer, int %tmp192, uint 1 ; <<4 x int>> [#uses=1]
- %tmp196 = insertelement <4 x int> %tmp195, int %tmp193, uint 2 ; <<4 x int>> [#uses=1]
- %tmp197 = insertelement <4 x int> %tmp196, int %tmp193, uint 3 ; <<4 x int>> [#uses=1]
- %tmp336 = and <4 x int> zeroinitializer, %tmp197 ; <<4 x int>> [#uses=1]
- %tmp337 = cast <4 x int> %tmp336 to <4 x float> ; <<4 x float>> [#uses=1]
- %tmp378 = tail call <4 x float> %llvm.x86.sse.cmp.ps( <4 x float> %tmp337, <4 x float> zeroinitializer, sbyte 1 ) ; <<4 x float>> [#uses=1]
- %tmp379 = cast <4 x float> %tmp378 to <4 x int> ; <<4 x int>> [#uses=1]
- %tmp388 = tail call <4 x int> %llvm.x86.sse2.packssdw.128( <4 x int> zeroinitializer, <4 x int> %tmp379 ) ; <<4 x int>> [#uses=1]
- %tmp392 = cast <4 x int> %tmp388 to <8 x short> ; <<8 x short>> [#uses=1]
- %tmp399 = extractelement <8 x short> %tmp392, uint 7 ; <short> [#uses=1]
- %tmp423 = insertelement <8 x short> zeroinitializer, short %tmp399, uint 7 ; <<8 x short>> [#uses=1]
- %tmp427 = cast <8 x short> %tmp423 to <16 x sbyte> ; <<16 x sbyte>> [#uses=1]
- %tmp428 = tail call int %llvm.x86.sse2.pmovmskb.128( <16 x sbyte> %tmp427 ) ; <int> [#uses=1]
- %tmp432 = cast int %tmp428 to sbyte ; <sbyte> [#uses=1]
- %tmp = and sbyte %tmp432, 42 ; <sbyte> [#uses=1]
- %tmp436 = cast sbyte %tmp to ubyte ; <ubyte> [#uses=1]
- %tmp446 = cast ubyte %tmp436 to uint ; <uint> [#uses=1]
- %tmp447 = shl uint %tmp446, ubyte 24 ; <uint> [#uses=1]
- %tmp449 = or uint 0, %tmp447 ; <uint> [#uses=1]
- store uint %tmp449, uint* null
+ %vuizmsk.0.1 = phi <4 x i32> [ < i32 -1, i32 -1, i32 -1, i32 0 >, %entry ], [ < i32 -1, i32 0, i32 0, i32 0 >, %cond_false ] ; <<4 x i32>> [#uses=2]
+ %tmp192 = extractelement <4 x i32> %vuizmsk.0.1, i32 2 ; <i32> [#uses=1]
+ %tmp193 = extractelement <4 x i32> %vuizmsk.0.1, i32 3 ; <i32> [#uses=2]
+ %tmp195 = insertelement <4 x i32> zeroinitializer, i32 %tmp192, i32 1 ; <<4 x i32>> [#uses=1]
+ %tmp196 = insertelement <4 x i32> %tmp195, i32 %tmp193, i32 2 ; <<4 x i32>> [#uses=1]
+ %tmp197 = insertelement <4 x i32> %tmp196, i32 %tmp193, i32 3 ; <<4 x i32>> [#uses=1]
+ %tmp336 = and <4 x i32> zeroinitializer, %tmp197 ; <<4 x i32>> [#uses=1]
+ %tmp337 = bitcast <4 x i32> %tmp336 to <4 x float> ; <<4 x float>> [#uses=1]
+ %tmp378 = tail call <4 x float> @llvm.x86.sse.cmp.ps( <4 x float> %tmp337, <4 x float> zeroinitializer, i8 1 ) ; <<4 x float>> [#uses=1]
+ %tmp379 = bitcast <4 x float> %tmp378 to <4 x i32> ; <<4 x i32>> [#uses=1]
+ %tmp388 = tail call <4 x i32> @llvm.x86.sse2.packssdw.128( <4 x i32> zeroinitializer, <4 x i32> %tmp379 ) ; <<4 x i32>> [#uses=1]
+ %tmp392 = bitcast <4 x i32> %tmp388 to <8 x i16> ; <<8 x i16>> [#uses=1]
+ %tmp399 = extractelement <8 x i16> %tmp392, i32 7 ; <i16> [#uses=1]
+ %tmp423 = insertelement <8 x i16> zeroinitializer, i16 %tmp399, i32 7 ; <<8 x i16>> [#uses=1]
+ %tmp427 = bitcast <8 x i16> %tmp423 to <16 x i8> ; <<16 x i8>> [#uses=1]
+ %tmp428 = tail call i32 @llvm.x86.sse2.pmovmskb.128( <16 x i8> %tmp427 ) ; <i32> [#uses=1]
+ %tmp432 = trunc i32 %tmp428 to i8 ; <i8> [#uses=1]
+ %tmp = and i8 %tmp432, 42 ; <i8> [#uses=1]
+ %tmp436 = bitcast i8 %tmp to i8 ; <i8> [#uses=1]
+ %tmp446 = zext i8 %tmp436 to i32 ; <i32> [#uses=1]
+ %tmp447 = shl i32 %tmp446, 24 ; <i32> [#uses=1]
+ %tmp449 = or i32 0, %tmp447 ; <i32> [#uses=1]
+ store i32 %tmp449, i32* null
ret void
}
diff --git a/test/CodeGen/X86/2006-04-27-ISelFoldingBug.ll b/test/CodeGen/X86/2006-04-27-ISelFoldingBug.ll
index 03e7f7bd9c..1a72a20a4f 100644
--- a/test/CodeGen/X86/2006-04-27-ISelFoldingBug.ll
+++ b/test/CodeGen/X86/2006-04-27-ISelFoldingBug.ll
@@ -1,36 +1,32 @@
-; RUN: llvm-upgrade < %s | llvm-as | \
+; RUN: llvm-as < %s | \
; RUN: llc -march=x86 -mtriple=i686-apple-darwin8 -relocation-model=static | \
; RUN: grep {movl _last} | count 1
-; RUN: llvm-upgrade < %s | llvm-as | \
+; RUN: llvm-as < %s | \
; RUN: llc -march=x86 -mtriple=i686-apple-darwin8 -relocation-model=static | \
; RUN: grep {cmpl.*_last} | count 1
-%block = external global ubyte* ; <ubyte**> [#uses=1]
-%last = external global int ; <int*> [#uses=3]
+@block = external global i8* ; <i8**> [#uses=1]
+@last = external global i32 ; <i32*> [#uses=3]
-implementation ; Functions:
-
-bool %loadAndRLEsource_no_exit_2E_1_label_2E_0(int %tmp.21.reload, int %tmp.8) {
+define i1 @loadAndRLEsource_no_exit_2E_1_label_2E_0(i32 %tmp.21.reload, i32 %tmp.8) {
newFuncRoot:
- br label %label.0
-
-label.0.no_exit.1_crit_edge.exitStub: ; preds = %label.0
- ret bool true
-
-codeRepl5.exitStub: ; preds = %label.0
- ret bool false
-
-label.0: ; preds = %newFuncRoot
- %tmp.35 = load int* %last ; <int> [#uses=1]
- %inc.1 = add int %tmp.35, 1 ; <int> [#uses=2]
- store int %inc.1, int* %last
- %tmp.36 = load ubyte** %block ; <ubyte*> [#uses=1]
- %tmp.38 = getelementptr ubyte* %tmp.36, int %inc.1 ; <ubyte*> [#uses=1]
- %tmp.40 = cast int %tmp.21.reload to ubyte ; <ubyte> [#uses=1]
- store ubyte %tmp.40, ubyte* %tmp.38
- %tmp.910 = load int* %last ; <int> [#uses=1]
- %tmp.1111 = setlt int %tmp.910, %tmp.8 ; <bool> [#uses=1]
- %tmp.1412 = setne int %tmp.21.reload, 257 ; <bool> [#uses=1]
- %tmp.1613 = and bool %tmp.1111, %tmp.1412 ; <bool> [#uses=1]
- br bool %tmp.1613, label %label.0.no_exit.1_crit_edge.exitStub, label %codeRepl5.exitStub
+ br label %label.0
+label.0.no_exit.1_crit_edge.exitStub: ; preds = %label.0
+ ret i1 true
+codeRepl5.exitStub: ; preds = %label.0
+ ret i1 false
+label.0: ; preds = %newFuncRoot
+ %tmp.35 = load i32* @last ; <i32> [#uses=1]
+ %inc.1 = add i32 %tmp.35, 1 ; <i32> [#uses=2]
+ store i32 %inc.1, i32* @last
+ %tmp.36 = load i8** @block ; <i8*> [#uses=1]
+ %tmp.38 = getelementptr i8* %tmp.36, i32 %inc.1 ; <i8*> [#uses=1]
+ %tmp.40 = trunc i32 %tmp.21.reload to i8 ; <i8> [#uses=1]
+ store i8 %tmp.40, i8* %tmp.38
+ %tmp.910 = load i32* @last ; <i32> [#uses=1]
+ %tmp.1111 = icmp slt i32 %tmp.910, %tmp.8 ; <i1> [#uses=1]
+ %tmp.1412 = icmp ne i32 %tmp.21.reload, 257 ; <i1> [#uses=1]
+ %tmp.1613 = and i1 %tmp.1111, %tmp.1412 ; <i1> [#uses=1]
+ br i1 %tmp.1613, label %label.0.no_exit.1_crit_edge.exitStub, label %codeRepl5.exitStub
}
+
diff --git a/test/CodeGen/X86/2006-05-01-SchedCausingSpills.ll b/test/CodeGen/X86/2006-05-01-SchedCausingSpills.ll
index f89cfe0449..f28366699c 100644
--- a/test/CodeGen/X86/2006-05-01-SchedCausingSpills.ll
+++ b/test/CodeGen/X86/2006-05-01-SchedCausingSpills.ll
@@ -1,74 +1,76 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -mcpu=yonah -stats |& \
+; RUN: llvm-as < %s | llc -march=x86 -mcpu=yonah -stats |& \
; RUN: not grep {Number of register spills}
+; END.
-int %foo(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c, <4 x float>* %d) {
+
+define i32 @foo(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c, <4 x float>* %d) {
%tmp44 = load <4 x float>* %a ; <<4 x float>> [#uses=9]
%tmp46 = load <4 x float>* %b ; <<4 x float>> [#uses=1]
%tmp48 = load <4 x float>* %c ; <<4 x float>> [#uses=1]
%tmp50 = load <4 x float>* %d ; <<4 x float>> [#uses=1]
- %tmp51 = cast <4 x float> %tmp44 to <4 x int> ; <<4 x int>> [#uses=1]
- %tmp = shufflevector <4 x int> %tmp51, <4 x int> undef, <4 x uint> < uint 3, uint 3, uint 3, uint 3 > ; <<4 x int>> [#uses=2]
- %tmp52 = cast <4 x int> %tmp to <4 x float> ; <<4 x float>> [#uses=1]
- %tmp60 = xor <4 x int> %tmp, < int -2147483648, int -2147483648, int -2147483648, int -2147483648 > ; <<4 x int>> [#uses=1]
- %tmp61 = cast <4 x int> %tmp60 to <4 x float> ; <<4 x float>> [#uses=1]
- %tmp74 = tail call <4 x float> %llvm.x86.sse.cmp.ps( <4 x float> %tmp52, <4 x float> %tmp44, sbyte 1 ) ; <<4 x float>> [#uses=1]
- %tmp75 = cast <4 x float> %tmp74 to <4 x int> ; <<4 x int>> [#uses=1]
- %tmp88 = tail call <4 x float> %llvm.x86.sse.cmp.ps( <4 x float> %tmp44, <4 x float> %tmp61, sbyte 1 ) ; <<4 x float>> [#uses=1]
- %tmp89 = cast <4 x float> %tmp88 to <4 x int> ; <<4 x int>> [#uses=1]
- %tmp98 = tail call <4 x int> %llvm.x86.sse2.packssdw.128( <4 x int> %tmp75, <4 x int> %tmp89 ) ; <<4 x int>> [#uses=1]
- %tmp102 = cast <4 x int> %tmp98 to <8 x short> ; <<8 x short>> [#uses=1]
- %tmp = shufflevector <8 x short> %tmp102, <8 x short> undef, <8 x uint> < uint 0, uint 1, uint 2, uint 3, uint 6, uint 5, uint 4, uint 7 > ; <<8 x short>> [#uses=1]
- %tmp105 = shufflevector <8 x short> %tmp, <8 x short> undef, <8 x uint> < uint 2, uint 1, uint 0, uint 3, uint 4, uint 5, uint 6, uint 7 > ; <<8 x short>> [#uses=1]
- %tmp105 = cast <8 x short> %tmp105 to <4 x float> ; <<4 x float>> [#uses=1]
- store <4 x float> %tmp105, <4 x float>* %a
- %tmp108 = cast <4 x float> %tmp46 to <4 x int> ; <<4 x int>> [#uses=1]
- %tmp109 = shufflevector <4 x int> %tmp108, <4 x int> undef, <4 x uint> < uint 3, uint 3, uint 3, uint 3 > ; <<4 x int>> [#uses=2]
- %tmp109 = cast <4 x int> %tmp109 to <4 x float> ; <<4 x float>> [#uses=1]
- %tmp119 = xor <4 x int> %tmp109, < int -2147483648, int -2147483648, int -2147483648, int -2147483648 > ; <<4 x int>> [#uses=1]
- %tmp120 = cast <4 x int> %tmp119 to <4 x float> ; <<4 x float>> [#uses=1]
- %tmp133 = tail call <4 x float> %llvm.x86.sse.cmp.ps( <4 x float> %tmp109, <4 x float> %tmp44, sbyte 1 ) ; <<4 x float>> [#uses=1]
- %tmp134 = cast <4 x float> %tmp133 to <4 x int> ; <<4 x int>> [#uses=1]
- %tmp147 = tail call <4 x float> %llvm.x86.sse.cmp.ps( <4 x float> %tmp44, <4 x float> %tmp120, sbyte 1 ) ; <<4 x float>> [#uses=1]
- %tmp148 = cast <4 x float> %tmp147 to <4 x int> ; <<4 x int>> [#uses=1]
- %tmp159 = tail call <4 x int> %llvm.x86.sse2.packssdw.128( <4 x int> %tmp134, <4 x int> %tmp148 ) ; <<4 x int>> [#uses=1]
- %tmp163 = cast <4 x int> %tmp159 to <8 x short> ; <<8 x short>> [#uses=1]
- %tmp164 = shufflevector <8 x short> %tmp163, <8 x short> undef, <8 x uint> < uint 0, uint 1, uint 2, uint 3, uint 6, uint 5, uint 4, uint 7 > ; <<8 x short>> [#uses=1]
- %tmp166 = shufflevector <8 x short> %tmp164, <8 x short> undef, <8 x uint> < uint 2, uint 1, uint 0, uint 3, uint 4, uint 5, uint 6, uint 7 > ; <<8 x short>> [#uses=1]
- %tmp166 = cast <8 x short> %tmp166 to <4 x float> ; <<4 x float>> [#uses=1]
- store <4 x float> %tmp166, <4 x float>* %b
- %tmp169 = cast <4 x float> %tmp48 to <4 x int> ; <<4 x int>> [#uses=1]
- %tmp170 = shufflevector <4 x int> %tmp169, <4 x int> undef, <4 x uint> < uint 3, uint 3, uint 3, uint 3 > ; <<4 x int>> [#uses=2]
- %tmp170 = cast <4 x int> %tmp170 to <4 x float> ; <<4 x float>> [#uses=1]
- %tmp180 = xor <4 x int> %tmp170, < int -2147483648, int -2147483648, int -2147483648, int -2147483648 > ; <<4 x int>> [#uses=1]
- %tmp181 = cast <4 x int> %tmp180 to <4 x float> ; <<4 x float>> [#uses=1]
- %tmp194 = tail call <4 x float> %llvm.x86.sse.cmp.ps( <4 x float> %tmp170, <4 x float> %tmp44, sbyte 1 ) ; <<4 x float>> [#uses=1]
- %tmp195 = cast <4 x float> %tmp194 to <4 x int> ; <<4 x int>> [#uses=1]
- %tmp208 = tail call <4 x float> %llvm.x86.sse.cmp.ps( <4 x float> %tmp44, <4 x float> %tmp181, sbyte 1 ) ; <<4 x float>> [#uses=1]
- %tmp209 = cast <4 x float> %tmp208 to <4 x int> ; <<4 x int>> [#uses=1]
- %tmp220 = tail call <4 x int> %llvm.x86.sse2.packssdw.128( <4 x int> %tmp195, <4 x int> %tmp209 ) ; <<4 x int>> [#uses=1]
- %tmp224 = cast <4 x int> %tmp220 to <8 x short> ; <<8 x short>> [#uses=1]
- %tmp225 = shufflevector <8 x short> %tmp224, <8 x short> undef, <8 x uint> < uint 0, uint 1, uint 2, uint 3, uint 6, uint 5, uint 4, uint 7 > ; <<8 x short>> [#uses=1]
- %tmp227 = shufflevector <8 x short> %tmp225, <8 x short> undef, <8 x uint> < uint 2, uint 1, uint 0, uint 3, uint 4, uint 5, uint 6, uint 7 > ; <<8 x short>> [#uses=1]
- %tmp227 = cast <8 x short> %tmp227 to <4 x float> ; <<4 x float>> [#uses=1]
- store <4 x float> %tmp227, <4 x float>* %c
- %tmp230 = cast <4 x float> %tmp50 to <4 x int> ; <<4 x int>> [#uses=1]
- %tmp231 = shufflevector <4 x int> %tmp230, <4 x int> undef, <4 x uint> < uint 3, uint 3, uint 3, uint 3 > ; <<4 x int>> [#uses=2]
- %tmp231 = cast <4 x int> %tmp231 to <4 x float> ; <<4 x float>> [#uses=1]
- %tmp241 = xor <4 x int> %tmp231, < int -2147483648, int -2147483648, int -2147483648, int -2147483648 > ; <<4 x int>> [#uses=1]
- %tmp242 = cast <4 x int> %tmp241 to <4 x float> ; <<4 x float>> [#uses=1]
- %tmp255 = tail call <4 x float> %llvm.x86.sse.cmp.ps( <4 x float> %tmp231, <4 x float> %tmp44, sbyte 1 ) ; <<4 x float>> [#uses=1]
- %tmp256 = cast <4 x float> %tmp255 to <4 x int> ; <<4 x int>> [#uses=1]
- %tmp269 = tail call <4 x float> %llvm.x86.sse.cmp.ps( <4 x float> %tmp44, <4 x float> %tmp242, sbyte 1 ) ; <<4 x float>> [#uses=1]
- %tmp270 = cast <4 x float> %tmp269 to <4 x int> ; <<4 x int>> [#uses=1]
- %tmp281 = tail call <4 x int> %llvm.x86.sse2.packssdw.128( <4 x int> %tmp256, <4 x int> %tmp270 ) ; <<4 x int>> [#uses=1]
- %tmp285 = cast <4 x int> %tmp281 to <8 x short> ; <<8 x short>> [#uses=1]
- %tmp286 = shufflevector <8 x short> %tmp285, <8 x short> undef, <8 x uint> < uint 0, uint 1, uint 2, uint 3, uint 6, uint 5, uint 4, uint 7 > ; <<8 x short>> [#uses=1]
- %tmp288 = shufflevector <8 x short> %tmp286, <8 x short> undef, <8 x uint> < uint 2, uint 1, uint 0, uint 3, uint 4, uint 5, uint 6, uint 7 > ; <<8 x short>> [#uses=1]
- %tmp288 = cast <8 x short> %tmp288 to <4 x float> ; <<4 x float>> [#uses=1]
- store <4 x float> %tmp288, <4 x float>* %d
- ret int 0
+ %tmp51 = bitcast <4 x float> %tmp44 to <4 x i32> ; <<4 x i32>> [#uses=1]
+ %tmp = shufflevector <4 x i32> %tmp51, <4 x i32> undef, <4 x i32> < i32 3, i32 3, i32 3, i32 3 > ; <<4 x i32>> [#uses=2]
+ %tmp52 = bitcast <4 x i32> %tmp to <4 x float> ; <<4 x float>> [#uses=1]
+ %tmp60 = xor <4 x i32> %tmp, < i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648 > ; <<4 x i32>> [#uses=1]
+ %tmp61 = bitcast <4 x i32> %tmp60 to <4 x float> ; <<4 x float>> [#uses=1]
+ %tmp74 = tail call <4 x float> @llvm.x86.sse.cmp.ps( <4 x float> %tmp52, <4 x float> %tmp44, i8 1 ) ; <<4 x float>> [#uses=1]
+ %tmp75 = bitcast <4 x float> %tmp74 to <4 x i32> ; <<4 x i32>> [#uses=1]
+ %tmp88 = tail call <4 x float> @llvm.x86.sse.cmp.ps( <4 x float> %tmp44, <4 x float> %tmp61, i8 1 ) ; <<4 x float>> [#uses=1]
+ %tmp89 = bitcast <4 x float> %tmp88 to <4 x i32> ; <<4 x i32>> [#uses=1]
+ %tmp98 = tail call <4 x i32> @llvm.x86.sse2.packssdw.128( <4 x i32> %tmp75, <4 x i32> %tmp89 ) ; <<4 x i32>> [#uses=1]
+ %tmp102 = bitcast <4 x i32> %tmp98 to <8 x i16> ; <<8 x i16>> [#uses=1]
+ %tmp.upgrd.1 = shufflevector <8 x i16> %tmp102, <8 x i16> undef, <8 x i32> < i32 0, i32 1, i32 2, i32 3, i32 6, i32 5, i32 4, i32 7 > ; <<8 x i16>> [#uses=1]
+ %tmp105 = shufflevector <8 x i16> %tmp.upgrd.1, <8 x i16> undef, <8 x i32> < i32 2, i32 1, i32 0, i32 3, i32 4, i32 5, i32 6, i32 7 > ; <<8 x i16>> [#uses=1]
+ %tmp105.upgrd.2 = bitcast <8 x i16> %tmp105 to <4 x float> ; <<4 x float>> [#uses=1]
+ store <4 x float> %tmp105.upgrd.2, <4 x float>* %a
+ %tmp108 = bitcast <4 x float> %tmp46 to <4 x i32> ; <<4 x i32>> [#uses=1]
+ %tmp109 = shufflevector <4 x i32> %tmp108, <4 x i32> undef, <4 x i32> < i32 3, i32 3, i32 3, i32 3 > ; <<4 x i32>> [#uses=2]
+ %tmp109.upgrd.3 = bitcast <4 x i32> %tmp109 to <4 x float> ; <<4 x float>> [#uses=1]
+ %tmp119 = xor <4 x i32> %tmp109, < i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648 > ; <<4 x i32>> [#uses=1]
+ %tmp120 = bitcast <4 x i32> %tmp119 to <4 x float> ; <<4 x float>> [#uses=1]
+ %tmp133 = tail call <4 x float> @llvm.x86.sse.cmp.ps( <4 x float> %tmp109.upgrd.3, <4 x float> %tmp44, i8 1 ) ; <<4 x float>> [#uses=1]
+ %tmp134 = bitcast <4 x float> %tmp133 to <4 x i32> ; <<4 x i32>> [#uses=1]
+ %tmp147 = tail call <4 x float> @llvm.x86.sse.cmp.ps( <4 x float> %tmp44, <4 x float> %tmp120, i8 1 ) ; <<4 x float>> [#uses=1]
+ %tmp148 = bitcast <4 x float> %tmp147 to <4 x i32> ; <<4 x i32>> [#uses=1]
+ %tmp159 = tail call <4 x i32> @llvm.x86.sse2.packssdw.128( <4 x i32> %tmp134, <4 x i32> %tmp148 ) ; <<4 x i32>> [#uses=1]
+ %tmp163 = bitcast <4 x i32> %tmp159 to <8 x i16> ; <<8 x i16>> [#uses=1]
+ %tmp164 = shufflevector <8 x i16> %tmp163, <8 x i16> undef, <8 x i32> < i32 0, i32 1, i32 2, i32 3, i32 6, i32 5, i32 4, i32 7 > ; <<8 x i16>> [#uses=1]
+ %tmp166 = shufflevector <8 x i16> %tmp164, <8 x i16> undef, <8 x i32> < i32 2, i32 1, i32 0, i32 3, i32 4, i32 5, i32 6, i32 7 > ; <<8 x i16>> [#uses=1]
+ %tmp166.upgrd.4 = bitcast <8 x i16> %tmp166 to <4 x float> ; <<4 x float>> [#uses=1]
+ store <4 x float> %tmp166.upgrd.4, <4 x float>* %b
+ %tmp169 = bitcast <4 x float> %tmp48 to <4 x i32> ; <<4 x i32>> [#uses=1]
+ %tmp170 = shufflevector <4 x i32> %tmp169, <4 x i32> undef, <4 x i32> < i32 3, i32 3, i32 3, i32 3 > ; <<4 x i32>> [#uses=2]
+ %tmp170.upgrd.5 = bitcast <4 x i32> %tmp170 to <4 x float> ; <<4 x float>> [#uses=1]
+ %tmp180 = xor <4 x i32> %tmp170, < i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648 > ; <<4 x i32>> [#uses=1]
+ %tmp181 = bitcast <4 x i32> %tmp180 to <4 x float> ; <<4 x float>> [#uses=1]
+ %tmp194 = tail call <4 x float> @llvm.x86.sse.cmp.ps( <4 x float> %tmp170.upgrd.5, <4 x float> %tmp44, i8 1 ) ; <<4 x float>> [#uses=1]
+ %tmp195 = bitcast <4 x float> %tmp194 to <4 x i32> ; <<4 x i32>> [#uses=1]
+ %tmp208 = tail call <4 x float> @llvm.x86.sse.cmp.ps( <4 x float> %tmp44, <4 x float> %tmp181, i8 1 ) ; <<4 x float>> [#uses=1]
+ %tmp209 = bitcast <4 x float> %tmp208 to <4 x i32> ; <<4 x i32>> [#uses=1]
+ %tmp220 = tail call <4 x i32> @llvm.x86.sse2.packssdw.128( <4 x i32> %tmp195, <4 x i32> %tmp209 ) ; <<4 x i32>> [#uses=1]
+ %tmp224 = bitcast <4 x i32> %tmp220 to <8 x i16> ; <<8 x i16>> [#uses=1]
+ %tmp225 = shufflevector <8 x i16> %tmp224, <8 x i16> undef, <8 x i32> < i32 0, i32 1, i32 2, i32 3, i32 6, i32 5, i32 4, i32 7 > ; <<8 x i16>> [#uses=1]
+ %tmp227 = shufflevector <8 x i16> %tmp225, <8 x i16> undef, <8 x i32> < i32 2, i32 1, i32 0, i32 3, i32 4, i32 5, i32 6, i32 7 > ; <<8 x i16>> [#uses=1]
+ %tmp227.upgrd.6 = bitcast <8 x i16> %tmp227 to <4 x float> ; <<4 x float>> [#uses=1]
+ store <4 x float> %tmp227.upgrd.6, <4 x float>* %c
+ %tmp230 = bitcast <4 x float> %tmp50 to <4 x i32> ; <<4 x i32>> [#uses=1]
+ %tmp231 = shufflevector <4 x i32> %tmp230, <4 x i32> undef, <4 x i32> < i32 3, i32 3, i32 3, i32 3 > ; <<4 x i32>> [#uses=2]
+ %tmp231.upgrd.7 = bitcast <4 x i32> %tmp231 to <4 x float> ; <<4 x float>> [#uses=1]
+ %tmp241 = xor <4 x i32> %tmp231, < i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648 > ; <<4 x i32>> [#uses=1]
+ %tmp242 = bitcast <4 x i32> %tmp241 to <4 x float> ; <<4 x float>> [#uses=1]
+ %tmp255 = tail call <4 x float> @llvm.x86.sse.cmp.ps( <4 x float> %tmp231.upgrd.7, <4 x float> %tmp44, i8 1 ) ; <<4 x float>> [#uses=1]
+ %tmp256 = bitcast <4 x float> %tmp255 to <4 x i32> ; <<4 x i32>> [#uses=1]
+ %tmp269 = tail call <4 x float> @llvm.x86.sse.cmp.ps( <4 x float> %tmp44, <4 x float> %tmp242, i8 1 ) ; <<4 x float>> [#uses=1]
+ %tmp270 = bitcast <4 x float> %tmp269 to <4 x i32> ; <<4 x i32>> [#uses=1]
+ %tmp281 = tail call <4 x i32> @llvm.x86.sse2.packssdw.128( <4 x i32> %tmp256, <4 x i32> %tmp270 ) ; <<4 x i32>> [#uses=1]
+ %tmp285 = bitcast <4 x i32> %tmp281 to <8 x i16> ; <<8 x i16>> [#uses=1]
+ %tmp286 = shufflevector <8 x i16> %tmp285, <8 x i16> undef, <8 x i32> < i32 0, i32 1, i32 2, i32 3, i32 6, i32 5, i32 4, i32 7 > ; <<8 x i16>> [#uses=1]
+ %tmp288 = shufflevector <8 x i16> %tmp286, <8 x i16> undef, <8 x i32> < i32 2, i32 1, i32 0, i32 3, i32 4, i32 5, i32 6, i32 7 > ; <<8 x i16>> [#uses=1]
+ %tmp288.upgrd.8 = bitcast <8 x i16> %tmp288 to <4 x float> ; <<4 x float>> [#uses=1]
+ store <4 x float> %tmp288.upgrd.8, <4 x float>* %d
+ ret i32 0
}
-declare <4 x float> %llvm.x86.sse.cmp.ps(<4 x float>, <4 x float>, sbyte)
+declare <4 x float> @llvm.x86.sse.cmp.ps(<4 x float>, <4 x float>, i8)
-declare <4 x int> %llvm.x86.sse2.packssdw.128(<4 x int>, <4 x int>)
+declare <4 x i32> @llvm.x86.sse2.packssdw.128(<4 x i32>, <4 x i32>)
diff --git a/test/CodeGen/X86/2006-05-02-InstrSched1.ll b/test/CodeGen/X86/2006-05-02-InstrSched1.ll
index 59a15f4ec9..1357419077 100644
--- a/test/CodeGen/X86/2006-05-02-InstrSched1.ll
+++ b/test/CodeGen/X86/2006-05-02-InstrSched1.ll
@@ -1,23 +1,24 @@
-; RUN: llvm-upgrade < %s | llvm-as | \
+; RUN: llvm-as < %s | \
; RUN: llc -march=x86 -relocation-model=static -stats |& \
; RUN: grep asm-printer | grep 14
;
-%size20 = external global uint ; <uint*> [#uses=1]
-%in5 = external global ubyte* ; <ubyte**> [#uses=1]
+@size20 = external global i32 ; <i32*> [#uses=1]
+@in5 = external global i8* ; <i8**> [#uses=1]
-int %compare(sbyte* %a, sbyte* %b) {
- %tmp = cast sbyte* %a to uint* ; <uint*> [#uses=1]
- %tmp1 = cast sbyte* %b to uint* ; <uint*> [#uses=1]
- %tmp = load uint* %size20 ; <uint> [#uses=1]
- %tmp = load ubyte** %in5 ; <ubyte*> [#uses=2]
- %tmp3 = load uint* %tmp1 ; <uint> [#uses=1]
- %tmp4 = getelementptr ubyte* %tmp, uint %tmp3 ; <ubyte*> [#uses=1]
- %tmp7 = load uint* %tmp ; <uint> [#uses=1]
- %tmp8 = getelementptr ubyte* %tmp, uint %tmp7 ; <ubyte*> [#uses=1]
- %tmp8 = cast ubyte* %tmp8 to sbyte* ; <sbyte*> [#uses=1]
- %tmp4 = cast ubyte* %tmp4 to sbyte* ; <sbyte*> [#uses=1]
- %tmp = tail call int %memcmp( sbyte* %tmp8, sbyte* %tmp4, uint %tmp ) ; <int> [#uses=1]
- ret int %tmp
+define i32 @compare(i8* %a, i8* %b) {
+ %tmp = bitcast i8* %a to i32* ; <i32*> [#uses=1]
+ %tmp1 = bitcast i8* %b to i32* ; <i32*> [#uses=1]
+ %tmp.upgrd.1 = load i32* @size20 ; <i32> [#uses=1]
+ %tmp.upgrd.2 = load i8** @in5 ; <i8*> [#uses=2]
+ %tmp3 = load i32* %tmp1 ; <i32> [#uses=1]
+ %gep.upgrd.3 = zext i32 %tmp3 to i64 ; <i64> [#uses=1]
+ %tmp4 = getelementptr i8* %tmp.upgrd.2, i64 %gep.upgrd.3 ; <i8*> [#uses=2]
+ %tmp7 = load i32* %tmp ; <i32> [#uses=1]
+ %gep.upgrd.4 = zext i32 %tmp7 to i64 ; <i64> [#uses=1]
+ %tmp8 = getelementptr i8* %tmp.upgrd.2, i64 %gep.upgrd.4 ; <i8*> [#uses=2]
+ %tmp.upgrd.5 = tail call i32 @memcmp( i8* %tmp8, i8* %tmp4, i32 %tmp.upgrd.1 ) ; <i32> [#uses=1]
+ ret i32 %tmp.upgrd.5
}
-declare int %memcmp(sbyte*, sbyte*, uint)
+declare i32 @memcmp(i8*, i8*, i32)
+
diff --git a/test/CodeGen/X86/2006-05-02-InstrSched2.ll b/test/CodeGen/X86/2006-05-02-InstrSched2.ll
index fb9c67cf93..6e1610e098 100644
--- a/test/CodeGen/X86/2006-05-02-InstrSched2.ll
+++ b/test/CodeGen/X86/2006-05-02-InstrSched2.ll
@@ -1,25 +1,24 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -stats |& \
+; RUN: llvm-as < %s | llc -march=x86 -stats |& \
; RUN: grep asm-printer | grep 13
-void %_ZN9__gnu_cxx9hashtableISt4pairIKPKciES3_NS_4hashIS3_EESt10_Select1stIS5_E5eqstrSaIiEE14find_or_insertERKS5__cond_true456.i(sbyte* %tmp435.i, uint* %tmp449.i.out) {
+define void @_ZN9__gnu_cxx9hashtableISt4pairIKPKciES3_NS_4hashIS3_EESt10_Select1stIS5_E5eqstrSaIiEE14find_or_insertERKS5__cond_true456.i(i8* %tmp435.i, i32* %tmp449.i.out) {
newFuncRoot:
br label %cond_true456.i
-
bb459.i.exitStub: ; preds = %cond_true456.i
- store uint %tmp449.i, uint* %tmp449.i.out
+ store i32 %tmp449.i, i32* %tmp449.i.out
ret void
-
cond_true456.i: ; preds = %cond_true456.i, %newFuncRoot
- %__s441.2.4.i = phi sbyte* [ %tmp451.i, %cond_true456.i ], [ %tmp435.i, %newFuncRoot ] ; <sbyte*> [#uses=2]
- %__h.2.4.i = phi uint [ %tmp449.i, %cond_true456.i ], [ 0, %newFuncRoot ] ; <uint> [#uses=1]
- %tmp446.i = mul uint %__h.2.4.i, 5 ; <uint> [#uses=1]
- %tmp.i = load sbyte* %__s441.2.4.i ; <sbyte> [#uses=1]
- %tmp448.i = cast sbyte %tmp.i to uint ; <uint> [#uses=1]
- %tmp449.i = add uint %tmp448.i, %tmp446.i ; <uint> [#uses=2]
- %tmp450.i = cast sbyte* %__s441.2.4.i to uint ; <uint> [#uses=1]
- %tmp451.i = add uint %tmp450.i, 1 ; <uint> [#uses=1]
- %tmp451.i = cast uint %tmp451.i to sbyte* ; <sbyte*> [#uses=2]
- %tmp45435.i = load sbyte* %tmp451.i ; <sbyte> [#uses=1]
- %tmp45536.i = seteq sbyte %tmp45435.i, 0 ; <bool> [#uses=1]
- br bool %tmp45536.i, label %bb459.i.exitStub, label %cond_true456.i
+ %__s441.2.4.i = phi i8* [ %tmp451.i.upgrd.1, %cond_true456.i ], [ %tmp435.i, %newFuncRoot ] ; <i8*> [#uses=2]
+ %__h.2.4.i = phi i32 [ %tmp449.i, %cond_true456.i ], [ 0, %newFuncRoot ] ; <i32> [#uses=1]
+ %tmp446.i = mul i32 %__h.2.4.i, 5 ; <i32> [#uses=1]
+ %tmp.i = load i8* %__s441.2.4.i ; <i8> [#uses=1]
+ %tmp448.i = sext i8 %tmp.i to i32 ; <i32> [#uses=1]
+ %tmp449.i = add i32 %tmp448.i, %tmp446.i ; <i32> [#uses=2]
+ %tmp450.i = ptrtoint i8* %__s441.2.4.i to i32 ; <i32> [#uses=1]
+ %tmp451.i = add i32 %tmp450.i, 1 ; <i32> [#uses=1]
+ %tmp451.i.upgrd.1 = inttoptr i32 %tmp451.i to i8* ; <i8*> [#uses=2]
+ %tmp45435.i = load i8* %tmp451.i.upgrd.1 ; <i8> [#uses=1]
+ %tmp45536.i = icmp eq i8 %tmp45435.i, 0 ; <i1> [#uses=1]
+ br i1 %tmp45536.i, label %bb459.i.exitStub, label %cond_true456.i
}
+
diff --git a/test/CodeGen/X86/2006-05-08-CoalesceSubRegClass.ll b/test/CodeGen/X86/2006-05-08-CoalesceSubRegClass.ll
index e97e38743e..900abe55cd 100644
--- a/test/CodeGen/X86/2006-05-08-CoalesceSubRegClass.ll
+++ b/test/CodeGen/X86/2006-05-08-CoalesceSubRegClass.ll
@@ -1,23 +1,25 @@
; Coalescing from R32 to a subset R32_. Once another register coalescer bug is
; fixed, the movb should go away as well.
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -relocation-model=static | \
+; RUN: llvm-as < %s | llc -march=x86 -relocation-model=static | \
; RUN: grep movl
-%B = external global uint
-%C = external global ushort*
+@B = external global i32 ; <i32*> [#uses=2]
+@C = external global i16* ; <i16**> [#uses=2]
-void %test(uint %A) {
- %A = cast uint %A to ubyte
- %tmp2 = load uint* %B
- %tmp3 = and ubyte %A, 16
- %tmp4 = shl uint %tmp2, ubyte %tmp3
- store uint %tmp4, uint* %B
- %tmp6 = shr uint %A, ubyte 3
- %tmp = load ushort** %C
- %tmp8 = cast ushort* %tmp to uint
- %tmp9 = add uint %tmp8, %tmp6
- %tmp9 = cast uint %tmp9 to ushort*
- store ushort* %tmp9, ushort** %C
+define void @test(i32 %A) {
+ %A.upgrd.1 = trunc i32 %A to i8 ; <i8> [#uses=1]
+ %tmp2 = load i32* @B ; <i32> [#uses=1]
+ %tmp3 = and i8 %A.upgrd.1, 16 ; <i8> [#uses=1]
+ %shift.upgrd.2 = zext i8 %tmp3 to i32 ; <i32> [#uses=1]
+ %tmp4 = shl i32 %tmp2, %shift.upgrd.2 ; <i32> [#uses=1]
+ store i32 %tmp4, i32* @B
+ %tmp6 = lshr i32 %A, 3 ; <i32> [#uses=1]
+ %tmp = load i16** @C ; <i16*> [#uses=1]
+ %tmp8 = ptrtoint i16* %tmp to i32 ; <i32> [#uses=1]
+ %tmp9 = add i32 %tmp8, %tmp6 ; <i32> [#uses=1]
+ %tmp9.upgrd.3 = inttoptr i32 %tmp9 to i16* ; <i16*> [#uses=1]
+ store i16* %tmp9.upgrd.3, i16** @C
ret void
}
+
diff --git a/test/CodeGen/X86/2006-05-08-InstrSched.ll b/test/CodeGen/X86/2006-05-08-InstrSched.ll
index fd35f9f5cc..c39b377cc7 100644
--- a/test/CodeGen/X86/2006-05-08-InstrSched.ll
+++ b/test/CodeGen/X86/2006-05-08-InstrSched.ll
@@ -1,23 +1,26 @@
-; RUN: llvm-upgrade < %s | llvm-as | \
+; RUN: llvm-as < %s | \
; RUN: llc -march=x86 -relocation-model=static | not grep {subl.*%esp}
-%A = external global ushort*
-%B = external global uint
-%C = external global uint
+@A = external global i16* ; <i16**> [#uses=1]
+@B = external global i32 ; <i32*> [#uses=1]
+@C = external global i32 ; <i32*> [#uses=2]
-void %test() {
- %tmp = load ushort** %A
- %tmp1 = getelementptr ushort* %tmp, int 1
- %tmp = load ushort* %tmp1
- %tmp3 = cast ushort %tmp to uint
- %tmp = load uint* %B
- %tmp4 = and uint %tmp, 16
- %tmp5 = load uint* %C
- %tmp6 = cast uint %tmp4 to ubyte
- %tmp7 = shl uint %tmp5, ubyte %tmp6
- %tmp9 = xor ubyte %tmp6, 16
- %tmp11 = shr uint %tmp3, ubyte %tmp9
- %tmp12 = or uint %tmp11, %tmp7
- store uint %tmp12, uint* %C
+define void @test() {
+ %tmp = load i16** @A ; <i16*> [#uses=1]
+ %tmp1 = getelementptr i16* %tmp, i32 1 ; <i16*> [#uses=1]
+ %tmp.upgrd.1 = load i16* %tmp1 ; <i16> [#uses=1]
+ %tmp3 = zext i16 %tmp.upgrd.1 to i32 ; <i32> [#uses=1]
+ %tmp.upgrd.2 = load i32* @B ; <i32> [#uses=1]
+ %tmp4 = and i32 %tmp.upgrd.2, 16 ; <i32> [#uses=1]
+ %tmp5 = load i32* @C ; <i32> [#uses=1]
+ %tmp6 = trunc i32 %tmp4 to i8 ; <i8> [#uses=2]
+ %shift.upgrd.3 = zext i8 %tmp6 to i32 ; <i32> [#uses=1]
+ %tmp7 = shl i32 %tmp5, %shift.upgrd.3 ; <i32> [#uses=1]
+ %tmp9 = xor i8 %tmp6, 16 ; <i8> [#uses=1]
+ %shift.upgrd.4 = zext i8 %tmp9 to i32 ; <i32> [#uses=1]
+ %tmp11 = lshr i32 %tmp3, %shift.upgrd.4 ; <i32> [#uses=1]
+ %tmp12 = or i32 %tmp11, %tmp7 ; <i32> [#uses=1]
+ store i32 %tmp12, i32* @C
ret void
}
+
diff --git a/test/CodeGen/X86/2006-05-17-VectorArg.ll b/test/CodeGen/X86/2006-05-17-VectorArg.ll
index 1f2af1473a..1d24b776db 100644
--- a/test/CodeGen/X86/2006-05-17-VectorArg.ll
+++ b/test/CodeGen/X86/2006-05-17-VectorArg.ll
@@ -1,14 +1,15 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -mattr=+sse2
+; RUN: llvm-as < %s | llc -march=x86 -mattr=+sse2
-<4 x float> %opRSQ(<4 x float> %a) {
+define <4 x float> @opRSQ(<4 x float> %a) {
entry:
- %tmp2 = extractelement <4 x float> %a, uint 3
- %abscond = setge float %tmp2, -0.000000e+00
- %abs = select bool %abscond, float %tmp2, float 0.000000e+00
- %tmp3 = tail call float %llvm.sqrt.f32( float %abs )
- %tmp4 = div float 1.000000e+00, %tmp3
- %tmp11 = insertelement <4 x float> zeroinitializer, float %tmp4, uint 3
+ %tmp2 = extractelement <4 x float> %a, i32 3 ; <float> [#uses=2]
+ %abscond = fcmp oge float %tmp2, -0.000000e+00 ; <i1> [#uses=1]
+ %abs = select i1 %abscond, float %tmp2, float 0.000000e+00 ; <float> [#uses=1]
+ %tmp3 = tail call float @llvm.sqrt.f32( float %abs ) ; <float> [#uses=1]
+ %tmp4 = fdiv float 1.000000e+00, %tmp3 ; <float> [#uses=1]
+ %tmp11 = insertelement <4 x float> zeroinitializer, float %tmp4, i32 3 ; <<4 x float>> [#uses=1]
ret <4 x float> %tmp11
}
-declare float %llvm.sqrt.f32(float)
+declare float @llvm.sqrt.f32(float)
+
diff --git a/test/CodeGen/X86/2006-05-22-FPSetEQ.ll b/test/CodeGen/X86/2006-05-22-FPSetEQ.ll
index 32281db624..ae18c90d8c 100644
--- a/test/CodeGen/X86/2006-05-22-FPSetEQ.ll
+++ b/test/CodeGen/X86/2006-05-22-FPSetEQ.ll
@@ -1,9 +1,10 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 | grep setnp
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -enable-unsafe-fp-math | \
+; RUN: llvm-as < %s | llc -march=x86 | grep setnp
+; RUN: llvm-as < %s | llc -march=x86 -enable-unsafe-fp-math | \
; RUN: not grep setnp
-uint %test(float %f) {
- %tmp = seteq float %f, 0.000000e+00
- %tmp = cast bool %tmp to uint
- ret uint %tmp
+define i32 @test(float %f) {
+ %tmp = fcmp oeq float %f, 0.000000e+00 ; <i1> [#uses=1]
+ %tmp.upgrd.1 = zext i1 %tmp to i32 ; <i32> [#uses=1]
+ ret i32 %tmp.upgrd.1
}
+
diff --git a/test/CodeGen/X86/2006-05-25-CycleInDAG.ll b/test/CodeGen/X86/2006-05-25-CycleInDAG.ll
index 8258f0b322..c9a004965f 100644
--- a/test/CodeGen/X86/2006-05-25-CycleInDAG.ll
+++ b/test/CodeGen/X86/2006-05-25-CycleInDAG.ll
@@ -1,21 +1,20 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86
+; RUN: llvm-as < %s | llc -march=x86
-int %test() {
- br bool false, label %cond_next33, label %cond_true12
-
-cond_true12:
- ret int 0
-
-cond_next33:
- %tmp44.i = call double %foo( double 0.000000e+00, int 32 )
- %tmp61.i = load ubyte* null
- %tmp61.i = cast ubyte %tmp61.i to int
- %tmp58.i = or int 0, %tmp61.i
- %tmp62.i = or int %tmp58.i, 0
- %tmp62.i = cast int %tmp62.i to double
- %tmp64.i = add double %tmp62.i, %tmp44.i
- %tmp68.i = call double %foo( double %tmp64.i, int 0 )
- ret int 0
+define i32 @test() {
+ br i1 false, label %cond_next33, label %cond_true12
+cond_true12: ; preds = %0
+ ret i32 0
+cond_next33: ; preds = %0
+ %tmp44.i = call double @foo( double 0.000000e+00, i32 32 ) ; <double> [#uses=1]
+ %tmp61.i = load i8* null ; <i8> [#uses=1]
+ %tmp61.i.upgrd.1 = zext i8 %tmp61.i to i32 ; <i32> [#uses=1]
+ %tmp58.i = or i32 0, %tmp61.i.upgrd.1 ; <i32> [#uses=1]
+ %tmp62.i = or i32 %tmp58.i, 0 ; <i32> [#uses=1]
+ %tmp62.i.upgrd.2 = sitofp i32 %tmp62.i to double ; <double> [#uses=1]
+ %tmp64.i = add double %tmp62.i.upgrd.2, %tmp44.i ; <double> [#uses=1]
+ %tmp68.i = call double @foo( double %tmp64.i, i32 0 ) ; <double> [#uses=0]
+ ret i32 0
}
-declare double %foo(double, int)
+declare double @foo(double, i32)
+
diff --git a/test/CodeGen/X86/2006-07-10-InlineAsmAConstraint.ll b/test/CodeGen/X86/2006-07-10-InlineAsmAConstraint.ll
index d044fd7153..760fe3650e 100644
--- a/test/CodeGen/X86/2006-07-10-InlineAsmAConstraint.ll
+++ b/test/CodeGen/X86/2006-07-10-InlineAsmAConstraint.ll
@@ -1,7 +1,8 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86
+; RUN: llvm-as < %s | llc -march=x86
; PR825
-long %test() {
- %tmp.i5 = call long asm sideeffect "rdtsc", "=A,~{dirflag},~{fpsr},~{flags}"( ) ; <long> [#uses=0]
- ret long %tmp.i5
+define i64 @test() {
+ %tmp.i5 = call i64 asm sideeffect "rdtsc", "=A,~{dirflag},~{fpsr},~{flags}"( ) ; <i64> [#uses=1]
+ ret i64 %tmp.i5
}
+
diff --git a/test/CodeGen/X86/2006-07-12-InlineAsmQConstraint.ll b/test/CodeGen/X86/2006-07-12-InlineAsmQConstraint.ll
index 1bacc16573..1db3921ecd 100644
--- a/test/CodeGen/X86/2006-07-12-InlineAsmQConstraint.ll
+++ b/test/CodeGen/X86/2006-07-12-InlineAsmQConstraint.ll
@@ -1,18 +1,12 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86
+; RUN: llvm-as < %s | llc -march=x86
; PR828
-target endian = little
-target pointersize = 32
+target datalayout = "e-p:32:32"
target triple = "i686-pc-linux-gnu"
-implementation ; Functions:
-
-void %_ZN5() {
-
-cond_true9: ; preds = %entry
- %tmp3.i.i = call int asm sideeffect "lock; cmpxchg $1,$2",
-"={ax},q,m,0,~{dirflag},~{fpsr},~{flags},~{memory}"( int 0, int* null, int 0 )
- ; <int> [#uses=0]
- ret void
+define void @_ZN5() {
+cond_true9:
+ %tmp3.i.i = call i32 asm sideeffect "lock; cmpxchg $1,$2", "={ax},q,m,0,~{dirflag},~{fpsr},~{flags},~{memory}"( i32 0, i32* null, i32 0 ) ; <i32> [#uses=0]
+ ret void
}
diff --git a/test/CodeGen/X86/2006-07-19-ATTAsm.ll b/test/CodeGen/X86/2006-07-19-ATTAsm.ll
index adfe88c9d1..1724f2b2ff 100644
--- a/test/CodeGen/X86/2006-07-19-ATTAsm.ll
+++ b/test/CodeGen/X86/2006-07-19-ATTAsm.ll
@@ -1,51 +1,48 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -x86-asm-syntax=att
+; RUN: llvm-as < %s | llc -march=x86 -x86-asm-syntax=att
; PR834
+; END.
-target endian = little
-target pointersize = 32
+target datalayout = "e-p:32:32"
target triple = "i386-unknown-freebsd6.1"
-
- %llvm.dbg.anchor.type = type { uint, uint }
- %llvm.dbg.basictype.type = type { uint, { }*, sbyte*, { }*, uint, ulong, ulong, ulong, uint, uint }
- %llvm.dbg.compile_unit.type = type { uint, { }*, uint, sbyte*, sbyte*, sbyte* }
- %llvm.dbg.global_variable.type = type { uint, { }*, { }*, sbyte*, sbyte*, { }*, uint, { }*, bool, bool, { }* }
-%x = global int 0 ; <int*> [#uses=1]
-%llvm.dbg.global_variable = internal constant %llvm.dbg.global_variable.type {
- uint 327732,
- { }* cast (%llvm.dbg.anchor.type* %llvm.dbg.global_variables to { }*),
- { }* cast (%llvm.dbg.compile_unit.type* %llvm.dbg.compile_unit to { }*),
- sbyte* getelementptr ([2 x sbyte]* %str, int 0, int 0),
- sbyte* null,
- { }* cast (%llvm.dbg.compile_unit.type* %llvm.dbg.compile_unit to { }*),
- uint 1,
- { }* cast (%llvm.dbg.basictype.type* %llvm.dbg.basictype to { }*),
- bool false,
- bool true,
- { }* cast (int* %x to { }*) }, section "llvm.metadata" ; <%llvm.dbg.global_variable.type*> [#uses=0]
-%llvm.dbg.global_variables = linkonce constant %llvm.dbg.anchor.type { uint 327680, uint 52 }, section "llvm.metadata" ; <%llvm.dbg.anchor.type*> [#uses=1]
-%llvm.dbg.compile_unit = internal constant %llvm.dbg.compile_unit.type {
- uint 327697,
- { }* cast (%llvm.dbg.anchor.type* %llvm.dbg.compile_units to { }*),
- uint 4,
- sbyte* getelementptr ([10 x sbyte]* %str, int 0, int 0),
- sbyte* getelementptr ([32 x sbyte]* %str, int 0, int 0),
- sbyte* getelementptr ([45 x sbyte]* %str, int 0, int 0) }, section "llvm.metadata" ; <%llvm.dbg.compile_unit.type*> [#uses=1]
-%llvm.dbg.compile_units = linkonce constant %llvm.dbg.anchor.type { uint 327680, uint 17 }, section "llvm.metadata" ; <%llvm.dbg.anchor.type*> [#uses=1]
-%str = internal constant [10 x sbyte] c"testb.cpp\00", section "llvm.metadata" ; <[10 x sbyte]*> [#uses=1]
-%str = internal constant [32 x sbyte] c"/Sources/Projects/DwarfTesting/\00", section "llvm.metadata" ; <[32 x sbyte]*> [#uses=1]
-%str = internal constant [45 x sbyte] c"4.0.1 LLVM (Apple Computer, Inc. build 5400)\00", section "llvm.metadata" ; <[45 x sbyte]*> [#uses=1]
-%str = internal constant [2 x sbyte] c"x\00", section "llvm.metadata" ; <[2 x sbyte]*> [#uses=1]
-%llvm.dbg.basictype = internal constant %llvm.dbg.basictype.type {
- uint 327716,
- { }* cast (%llvm.dbg.compile_unit.type* %llvm.dbg.compile_unit to { }*),
- sbyte* getelementptr ([4 x sbyte]* %str, int 0, int 0),
+ %llvm.dbg.anchor.type = type { i32, i32 }
+ %llvm.dbg.basictype.type = type { i32, { }*, i8*, { }*, i32, i64, i64, i64, i32, i32 }
+ %llvm.dbg.compile_unit.type = type { i32, { }*, i32, i8*, i8*, i8* }
+ %llvm.dbg.global_variable.type = type { i32, { }*, { }*, i8*, i8*, { }*, i32, { }*, i1, i1, { }* }
+@x = global i32 0 ; <i32*> [#uses=1]
+@llvm.dbg.global_variable = internal constant %llvm.dbg.global_variable.type {
+ i32 327732,
+ { }* bitcast (%llvm.dbg.anchor.type* @llvm.dbg.global_variables to { }*),
+ { }* bitcast (%llvm.dbg.compile_unit.type* @llvm.dbg.compile_unit to { }*),
+ i8* getelementptr ([2 x i8]* @str, i64 0, i64 0),
+ i8* null,
+ { }* bitcast (%llvm.dbg.compile_unit.type* @llvm.dbg.compile_unit to { }*),
+ i32 1,
+ { }* bitcast (%llvm.dbg.basictype.type* @llvm.dbg.basictype to { }*),
+ i1 false,
+ i1 true,
+ { }* bitcast (i32* @x to { }*) }, section "llvm.metadata" ; <%llvm.dbg.global_variable.type*> [#uses=0]
+@llvm.dbg.global_variables = linkonce constant %llvm.dbg.anchor.type { i32 327680, i32 52 }, section "llvm.metadata" ; <%llvm.dbg.anchor.type*> [#uses=1]
+@llvm.dbg.compile_unit = internal constant %llvm.dbg.compile_unit.type {
+ i32 327697,
+ { }* bitcast (%llvm.dbg.anchor.type* @llvm.dbg.compile_units to { }*),
+ i32 4,
+ i8* getelementptr ([10 x i8]* @str1, i64 0, i64 0),
+ i8* getelementptr ([32 x i8]* @str2, i64 0, i64 0),
+ i8* getelementptr ([45 x i8]* @str3, i64 0, i64 0) }, section "llvm.metadata" ; <%llvm.dbg.compile_unit.type*> [#uses=1]
+@llvm.dbg.compile_units = linkonce constant %llvm.dbg.anchor.type { i32 327680, i32 17 }, section "llvm.metadata" ; <%llvm.dbg.anchor.type*> [#uses=1]
+@str1 = internal constant [10 x i8] c"testb.cpp\00", section "llvm.metadata" ; <[10 x i8]*> [#uses=1]
+@str2 = internal constant [32 x i8] c"/Sources/Projects/DwarfTesting/\00", section "llvm.metadata" ; <[32 x i8]*> [#uses=1]
+@str3 = internal constant [45 x i8] c"4.0.1 LLVM (Apple Computer, Inc. build 5400)\00", section "llvm.metadata" ; <[45 x i8]*> [#uses=1]
+@str = internal constant [2 x i8] c"x\00", section "llvm.metadata" ; <[2 x i8]*> [#uses=1]
+@llvm.dbg.basictype = internal constant %llvm.dbg.basictype.type {
+ i32 327716,
+ { }* bitcast (%llvm.dbg.compile_unit.type* @llvm.dbg.compile_unit to { }*),
+ i8* getelementptr ([4 x i8]* @str4, i64 0, i64 0),
{ }* null,
- uint 0,
- ulong 32,
- ulong 32,
- ulong 0,
- uint 0,
- uint 5 }, section "llvm.metadata" ; <%llvm.dbg.basictype.type*> [#uses=1]
-%str = internal constant [4 x sbyte] c"int\00", section "llvm.metadata" ; <[4 x sbyte]*> [#uses=1]
-
-implementation ; Functions:
+ i32 0,
+ i64 32,
+ i64 32,
+ i64 0,
+ i32 0,
+ i32 5 }, section "llvm.metadata" ; <%llvm.dbg.basictype.type*> [#uses=1]
+@str4 = internal constant [4 x i8] c"int\00", section "llvm.metadata" ; <[4 x i8]*> [#uses=1]
diff --git a/test/CodeGen/X86/2006-07-20-InlineAsm.ll b/test/CodeGen/X86/2006-07-20-InlineAsm.ll
index 16ad579cce..08510a8a65 100644
--- a/test/CodeGen/X86/2006-07-20-InlineAsm.ll
+++ b/test/CodeGen/X86/2006-07-20-InlineAsm.ll
@@ -1,24 +1,23 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86
+; RUN: llvm-as < %s | llc -march=x86
; PR833
-%G = weak global int 0 ; <int*> [#uses=3]
+@G = weak global i32 0 ; <i32*> [#uses=3]
-implementation ; Functions:
-
-int %foo(int %X) {
+define i32 @foo(i32 %X) {
entry:
- %X_addr = alloca int ; <int*> [#uses=3]
- store int %X, int* %X_addr
- call void asm sideeffect "xchg{l} {$0,$1|$1,$0}", "=*m,=*r,m,1,~{dirflag},~{fpsr},~{flags}"( int* %G, int* %X_addr, int* %G, int %X )
- %tmp1 = load int* %X_addr ; <int> [#uses=1]
- ret int %tmp1
+ %X_addr = alloca i32 ; <i32*> [#uses=3]
+ store i32 %X, i32* %X_addr
+ call void asm sideeffect "xchg{l} {$0,$1|$1,$0}", "=*m,=*r,m,1,~{dirflag},~{fpsr},~{flags}"( i32* @G, i32* %X_addr, i32* @G, i32 %X )
+ %tmp1 = load i32* %X_addr ; <i32> [#uses=1]
+ ret i32 %tmp1
}
-int %foo2(int %X) {
+define i32 @foo2(i32 %X) {
entry:
- %X_addr = alloca int ; <int*> [#uses=3]
- store int %X, int* %X_addr
- call void asm sideeffect "xchg{l} {$0,$1|$1,$0}", "=*m,=*r,1,~{dirflag},~{fpsr},~{flags}"( int* %G, int* %X_addr, int %X )
- %tmp1 = load int* %X_addr ; <int> [#uses=1]
- ret int %tmp1
+ %X_addr = alloca i32 ; <i32*> [#uses=3]
+ store i32 %X, i32* %X_addr
+ call void asm sideeffect "xchg{l} {$0,$1|$1,$0}", "=*m,=*r,1,~{dirflag},~{fpsr},~{flags}"( i32* @G, i32* %X_addr, i32 %X )
+ %tmp1 = load i32* %X_addr ; <i32> [#uses=1]
+ ret i32 %tmp1
}
+
diff --git a/test/CodeGen/X86/2006-07-28-AsmPrint-Long-As-Pointer.ll b/test/CodeGen/X86/2006-07-28-AsmPrint-Long-As-Pointer.ll
index 26c71a312e..a82612b5a6 100644
--- a/test/CodeGen/X86/2006-07-28-AsmPrint-Long-As-Pointer.ll
+++ b/test/CodeGen/X86/2006-07-28-AsmPrint-Long-As-Pointer.ll
@@ -1,5 +1,5 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 | grep -- 4294967240
+; RUN: llvm-as < %s | llc -march=x86 | grep -- 4294967240
; PR853
-%X = global int* cast (ulong 18446744073709551560 to int*)
+@X = global i32* inttoptr (i64 -56 to i32*) ; <i32**> [#uses=0]
diff --git a/test/CodeGen/X86/2006-07-31-SingleRegClass.ll b/test/CodeGen/X86/2006-07-31-SingleRegClass.ll
index aa02bf7c96..561aff3307 100644
--- a/test/CodeGen/X86/2006-07-31-SingleRegClass.ll
+++ b/test/CodeGen/X86/2006-07-31-SingleRegClass.ll
@@ -1,11 +1,11 @@
; PR850
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -x86-asm-syntax=att | \
+; RUN: llvm-as < %s | llc -march=x86 -x86-asm-syntax=att | \
; RUN: grep {movl 4(%eax),%ebp}
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -x86-asm-syntax=att | \
+; RUN: llvm-as < %s | llc -march=x86 -x86-asm-syntax=att | \
; RUN: grep {movl 0(%eax), %ebx}
-int %foo(int %__s.i.i, int %tmp5.i.i, int %tmp6.i.i, int %tmp7.i.i, int %tmp8.i.i ) {
-
-%tmp9.i.i = call int asm sideeffect "push %ebp\0Apush %ebx\0Amovl 4($2),%ebp\0Amovl 0($2), %ebx\0Amovl $1,%eax\0Aint $$0x80\0Apop %ebx\0Apop %ebp", "={ax},i,0,{cx},{dx},{si},{di}"(int 192, int %__s.i.i, int %tmp5.i.i, int %tmp6.i.i, int %tmp7.i.i, int %tmp8.i.i )
- ret int %tmp9.i.i
+define i32 @foo(i32 %__s.i.i, i32 %tmp5.i.i, i32 %tmp6.i.i, i32 %tmp7.i.i, i32 %tmp8.i.i) {
+ %tmp9.i.i = call i32 asm sideeffect "push %ebp\0Apush %ebx\0Amovl 4($2),%ebp\0Amovl 0($2), %ebx\0Amovl $1,%eax\0Aint $$0x80\0Apop %ebx\0Apop %ebp", "={ax},i,0,{cx},{dx},{si},{di}"( i32 192, i32 %__s.i.i, i32 %tmp5.i.i, i32 %tmp6.i.i, i32 %tmp7.i.i, i32 %tmp8.i.i ) ; <i32> [#uses=1]
+ ret i32 %tmp9.i.i
}
+
diff --git a/test/CodeGen/X86/2006-10-09-CycleInDAG.ll b/test/CodeGen/X86/2006-10-09-CycleInDAG.ll
index fbcc5cd078..d627d1bf21 100644
--- a/test/CodeGen/X86/2006-10-09-CycleInDAG.ll
+++ b/test/CodeGen/X86/2006-10-09-CycleInDAG.ll
@@ -1,10 +1,11 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86
+; RUN: llvm-as < %s | llc -march=x86
-void %_ZN13QFSFileEngine4readEPcx() {
- %tmp201 = load int* null
- %tmp201 = cast int %tmp201 to long
- %tmp202 = load long* null
- %tmp203 = add long %tmp201, %tmp202
- store long %tmp203, long* null
+define void @_ZN13QFSFileEngine4readEPcx() {
+ %tmp201 = load i32* null ; <i32> [#uses=1]
+ %tmp201.upgrd.1 = sext i32 %tmp201 to i64 ; <i64> [#uses=1]
+ %tmp202 = load i64* null ; <i64> [#uses=1]
+ %tmp203 = add i64 %tmp201.upgrd.1, %tmp202 ; <i64> [#uses=1]
+ store i64 %tmp203, i64* null
ret void
}
+
diff --git a/test/CodeGen/X86/2006-10-10-FindModifiedNodeSlotBug.ll b/test/CodeGen/X86/2006-10-10-FindModifiedNodeSlotBug.ll
index 8baba8118c..5dc1cb3d9a 100644
--- a/test/CodeGen/X86/2006-10-10-FindModifiedNodeSlotBug.ll
+++ b/test/CodeGen/X86/2006-10-10-FindModifiedNodeSlotBug.ll
@@ -1,31 +1,28 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 | grep shrl
+; RUN: llvm-as < %s | llc -march=x86 | grep shrl
; Bug in FindModifiedNodeSlot cause tmp14 load to become a zextload and shr 31
; is then optimized away.
+@tree_code_type = external global [0 x i32] ; <[0 x i32]*> [#uses=1]
-%tree_code_type = external global [0 x uint]
-
-void %copy_if_shared_r() {
- %tmp = load uint* null
- %tmp56 = and uint %tmp, 255
- %tmp8 = getelementptr [0 x uint]* %tree_code_type, int 0, uint %tmp56
- %tmp9 = load uint* %tmp8
- %tmp10 = add uint %tmp9, 4294967295
- %tmp = setgt uint %tmp10, 2
- %tmp14 = load uint* null
- %tmp15 = shr uint %tmp14, ubyte 31
- %tmp15 = cast uint %tmp15 to ubyte
- %tmp16 = setne ubyte %tmp15, 0
- br bool %tmp, label %cond_false25, label %cond_true
-
-cond_true:
- br bool %tmp16, label %cond_true17, label %cond_false
-
-cond_true17:
+define void @copy_if_shared_r() {
+ %tmp = load i32* null ; <i32> [#uses=1]
+ %tmp56 = and i32 %tmp, 255 ; <i32> [#uses=1]
+ %gep.upgrd.1 = zext i32 %tmp56 to i64 ; <i64> [#uses=1]
+ %tmp8 = getelementptr [0 x i32]* @tree_code_type, i32 0, i64 %gep.upgrd.1 ; <i32*> [#uses=1]
+ %tmp9 = load i32* %tmp8 ; <i32> [#uses=1]
+ %tmp10 = add i32 %tmp9, -1 ; <i32> [#uses=1]
+ %tmp.upgrd.2 = icmp ugt i32 %tmp10, 2 ; <i1> [#uses=1]
+ %tmp14 = load i32* null ; <i32> [#uses=1]
+ %tmp15 = lshr i32 %tmp14, 31 ; <i32> [#uses=1]
+ %tmp15.upgrd.3 = trunc i32 %tmp15 to i8 ; <i8> [#uses=1]
+ %tmp16 = icmp ne i8 %tmp15.upgrd.3, 0 ; <i1> [#uses=1]
+ br i1 %tmp.upgrd.2, label %cond_false25, label %cond_true
+cond_true: ; preds = %0
+ br i1 %tmp16, label %cond_true17, label %cond_false
+cond_true17: ; preds = %cond_true
ret void
-
-cond_false:
+cond_false: ; preds = %cond_true
ret void
-
-cond_false25:
+cond_false25: ; preds = %0
ret void
}
+
diff --git a/test/CodeGen/X86/2006-11-12-CSRetCC.ll b/test/CodeGen/X86/2006-11-12-CSRetCC.ll
index 3917c099d3..1a92852f06 100644
--- a/test/CodeGen/X86/2006-11-12-CSRetCC.ll
+++ b/test/CodeGen/X86/2006-11-12-CSRetCC.ll
@@ -1,62 +1,59 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 | grep {subl \$4, %esp}
+; RUN: llvm-as < %s | llc -march=x86 | grep {subl \$4, %esp}
target triple = "i686-pc-linux-gnu"
+@str = internal constant [9 x i8] c"%f+%f*i\0A\00" ; <[9 x i8]*> [#uses=1]
-%str = internal constant [9 x sbyte] c"%f+%f*i\0A\00" ; <[9 x sbyte]*> [#uses=1]
-
-implementation ; Functions:
-
-int %main() {
+define i32 @main() {
entry:
- %retval = alloca int, align 4 ; <int*> [#uses=1]
- %tmp = alloca { double, double }, align 16 ; <{ double, double }*> [#uses=4]
- %tmp1 = alloca { double, double }, align 16 ; <{ double, double }*> [#uses=4]
- %tmp2 = alloca { double, double }, align 16 ; <{ double, double }*> [#uses=3]
- %pi = alloca double, align 8 ; <double*> [#uses=2]
- %z = alloca { double, double }, align 16 ; <{ double, double }*> [#uses=4]
- "alloca point" = cast int 0 to int ; <int> [#uses=0]
- store double 0x400921FB54442D18, double* %pi
- %tmp = load double* %pi ; <double> [#uses=1]
- %real = getelementptr { double, double }* %tmp1, uint 0, uint 0 ; <double*> [#uses=1]
- store double 0.000000e+00, double* %real
- %real3 = getelementptr { double, double }* %tmp1, uint 0, uint 1 ; <double*> [#uses=1]
- store double %tmp, double* %real3
- %tmp = getelementptr { double, double }* %tmp, uint 0, uint 0 ; <double*> [#uses=1]
- %tmp4 = getelementptr { double, double }* %tmp1, uint 0, uint 0 ; <double*> [#uses=1]
- %tmp5 = load double* %tmp4 ; <double> [#uses=1]
- store double %tmp5, double* %tmp
- %tmp6 = getelementptr { double, double }* %tmp, uint 0, uint 1 ; <double*> [#uses=1]
- %tmp7 = getelementptr { double, double }* %tmp1, uint 0, uint 1 ; <double*> [#uses=1]
- %tmp8 = load double* %tmp7 ; <double> [#uses=1]
- store double %tmp8, double* %tmp6
- %tmp = cast { double, double }* %tmp to { long, long }* ; <{ long, long }*> [#uses=1]
- %tmp = getelementptr { long, long }* %tmp, uint 0, uint 0 ; <long*> [#uses=1]
- %tmp = load long* %tmp ; <long> [#uses=1]
- %tmp9 = cast { double, double }* %tmp to { long, long }* ; <{ long, long }*> [#uses=1]
- %tmp10 = getelementptr { long, long }* %tmp9, uint 0, uint 1 ; <long*> [#uses=1]
- %tmp11 = load long* %tmp10 ; <long> [#uses=1]
- call csretcc void %cexp( { double, double }* %tmp2, long %tmp, long %tmp11 )
- %tmp12 = getelementptr { double, double }* %z, uint 0, uint 0 ; <double*> [#uses=1]
- %tmp13 = getelementptr { double, double }* %tmp2, uint 0, uint 0 ; <double*> [#uses=1]
- %tmp14 = load double* %tmp13 ; <double> [#uses=1]
- store double %tmp14, double* %tmp12
- %tmp15 = getelementptr { double, double }* %z, uint 0, uint 1 ; <double*> [#uses=1]
- %tmp16 = getelementptr { double, double }* %tmp2, uint 0, uint 1 ; <double*> [#uses=1]
- %tmp17 = load double* %tmp16 ; <double> [#uses=1]
- store double %tmp17, double* %tmp15
- %tmp18 = getelementptr { double, double }* %z, uint 0, uint 1 ; <double*> [#uses=1]
- %tmp19 = load double* %tmp18 ; <double> [#uses=1]
- %tmp20 = getelementptr { double, double }* %z, uint 0, uint 0 ; <double*> [#uses=1]
- %tmp21 = load double* %tmp20 ; <double> [#uses=1]
- %tmp = getelementptr [9 x sbyte]* %str, int 0, uint 0 ; <sbyte*> [#uses=1]
- %tmp = call int (sbyte*, ...)* %printf( sbyte* %tmp, double %tmp21, double %tmp19 ) ; <int> [#uses=0]
- br label %return
-
-return: ; preds = %entry
- %retval = load int* %retval ; <int> [#uses=1]
- ret int %retval
+ %retval = alloca i32, align 4 ; <i32*> [#uses=1]
+ %tmp = alloca { double, double }, align 16 ; <{ double, double }*> [#uses=4]
+ %tmp1 = alloca { double, double }, align 16 ; <{ double, double }*> [#uses=4]
+ %tmp2 = alloca { double, double }, align 16 ; <{ double, double }*> [#uses=3]
+ %pi = alloca double, align 8 ; <double*> [#uses=2]
+ %z = alloca { double, double }, align 16 ; <{ double, double }*> [#uses=4]
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ store double 0x400921FB54442D18, double* %pi
+ %tmp.upgrd.1 = load double* %pi ; <double> [#uses=1]
+ %real = getelementptr { double, double }* %tmp1, i64 0, i32 0 ; <double*> [#uses=1]
+ store double 0.000000e+00, double* %real
+ %real3 = getelementptr { double, double }* %tmp1, i64 0, i32 1 ; <double*> [#uses=1]
+ store double %tmp.upgrd.1, double* %real3
+ %tmp.upgrd.2 = getelementptr { double, double }* %tmp, i64 0, i32 0 ; <double*> [#uses=1]
+ %tmp4 = getelementptr { double, double }* %tmp1, i64 0, i32 0 ; <double*> [#uses=1]
+ %tmp5 = load double* %tmp4 ; <double> [#uses=1]
+ store double %tmp5, double* %tmp.upgrd.2
+ %tmp6 = getelementptr { double, double }* %tmp, i64 0, i32 1 ; <double*> [#uses=1]
+ %tmp7 = getelementptr { double, double }* %tmp1, i64 0, i32 1 ; <double*> [#uses=1]
+ %tmp8 = load double* %tmp7 ; <double> [#uses=1]
+ store double %tmp8, double* %tmp6
+ %tmp.upgrd.3 = bitcast { double, double }* %tmp to { i64, i64 }* ; <{ i64, i64 }*> [#uses=1]
+ %tmp.upgrd.4 = getelementptr { i64, i64 }* %tmp.upgrd.3, i64 0, i32 0 ; <i64*> [#uses=1]
+ %tmp.upgrd.5 = load i64* %tmp.upgrd.4 ; <i64> [#uses=1]
+ %tmp9 = bitcast { double, double }* %tmp to { i64, i64 }* ; <{ i64, i64 }*> [#uses=1]
+ %tmp10 = getelementptr { i64, i64 }* %tmp9, i64 0, i32 1 ; <i64*> [#uses=1]
+ %tmp11 = load i64* %tmp10 ; <i64> [#uses=1]
+ call void @cexp( { double, double }* sret %tmp2, i64 %tmp.upgrd.5, i64 %tmp11 )
+ %tmp12 = getelementptr { double, double }* %z, i64 0, i32 0 ; <double*> [#uses=1]
+ %tmp13 = getelementptr { double, double }* %tmp2, i64 0, i32 0 ; <double*> [#uses=1]
+ %tmp14 = load double* %tmp13 ; <double> [#uses=1]
+ store double %tmp14, double* %tmp12
+ %tmp15 = getelementptr { double, double }* %z, i64 0, i32 1 ; <double*> [#uses=1]
+ %tmp16 = getelementptr { double, double }* %tmp2, i64 0, i32 1 ; <double*> [#uses=1]
+ %tmp17 = load double* %tmp16 ; <double> [#uses=1]
+ store double %tmp17, double* %tmp15
+ %tmp18 = getelementptr { double, double }* %z, i64 0, i32 1 ; <double*> [#uses=1]
+ %tmp19 = load double* %tmp18 ; <double> [#uses=1]
+ %tmp20 = getelementptr { double, double }* %z, i64 0, i32 0 ; <double*> [#uses=1]
+ %tmp21 = load double* %tmp20 ; <double> [#uses=1]
+ %tmp.upgrd.6 = getelementptr [9 x i8]* @str, i32 0, i64 0 ; <i8*> [#uses=1]
+ %tmp.upgrd.7 = call i32 (i8*, ...)* @printf( i8* %tmp.upgrd.6, double %tmp21, double %tmp19 ) ; <i32> [#uses=0]
+ br label %return
+return: ; preds = %entry
+ %retval.upgrd.8 = load i32* %retval ; <i32> [#uses=1]
+ ret i32 %retval.upgrd.8
}
-declare csretcc void %cexp({ double, double }*, long, long)
+declare void @cexp({ double, double }* sret , i64, i64)
+
+declare i32 @printf(i8*, ...)
-declare int %printf(sbyte*, ...)
diff --git a/test/CodeGen/X86/store_op_load_fold2.ll b/test/CodeGen/X86/store_op_load_fold2.ll
index a7d719ee2e..09aaba155d 100644
--- a/test/CodeGen/X86/store_op_load_fold2.ll
+++ b/test/CodeGen/X86/store_op_load_fold2.ll
@@ -1,43 +1,34 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -x86-asm-syntax=intel | \
+; RUN: llvm-as < %s | llc -march=x86 -x86-asm-syntax=intel | \
; RUN: grep {and DWORD PTR} | count 2
-target endian = little
-target pointersize = 32
+target datalayout = "e-p:32:32"
+ %struct.Macroblock = type { i32, i32, i32, i32, i32, [8 x i32], %struct.Macroblock*, %struct.Macroblock*, i32, [2 x [4 x [4 x [2 x i32]]]], [16 x i8], [16 x i8], i32, i64, [4 x i32], [4 x i32], i64, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i16, double, i32, i32, i32, i32, i32, i32, i32, i32, i32 }
- %struct.Macroblock = type { int, int, int, int, int, [8 x int], %struct.Macroblock*, %struct.Macroblock*, int, [2 x [4 x [4 x [2 x int]]]], [16 x sbyte], [16 x sbyte], int, long, [4 x int], [4 x int], long, int, int, int, int, int, int, int, int, int, int, int, int, int, int, int, short, double, int, int, int, int, int, int, int, int, int }
-
-implementation ; Functions:
-
-internal fastcc int %dct_chroma(int %uv, int %cr_cbp) {
+define internal fastcc i32 @dct_chroma(i32 %uv, i32 %cr_cbp) {
entry:
- br bool true, label %cond_true2732.preheader, label %cond_true129
-
-cond_true129: ; preds = %entry
- ret int 0
-
-cond_true2732.preheader: ; preds = %bb2611
- %tmp2666 = getelementptr %struct.Macroblock* null, int 0, uint 13 ; <long*> [#uses=2]
- %tmp2674 = cast int 0 to ubyte ; <ubyte> [#uses=1]
- br bool true, label %cond_true2732.preheader.split.us, label %cond_true2732.preheader.split
-
-cond_true2732.preheader.split.us: ; preds = %cond_true2732.preheader
- br bool true, label %cond_true2732.outer.us.us, label %cond_true2732.outer.us
-
-cond_true2732.outer.us.us: ; preds = %cond_true2732.preheader.split.us
- %tmp2667.us.us = load long* %tmp2666 ; <long> [#uses=1]
- %tmp2670.us.us = load long* null ; <long> [#uses=1]
- %tmp2675.us.us = shl long %tmp2670.us.us, ubyte %tmp2674 ; <long> [#uses=1]
- %tmp2675not.us.us = xor long %tmp2675.us.us, -1 ; <long> [#uses=1]
- %tmp2676.us.us = and long %tmp2667.us.us, %tmp2675not.us.us ; <long> [#uses=1]
- store long %tmp2676.us.us, long* %tmp2666
- ret int 0
-
-cond_true2732.outer.us: ; preds = %cond_true2732.preheader.split.us
- ret int 0
-
-cond_true2732.preheader.split: ; preds = %cond_true2732.preheader
- ret int 0
-
-cond_next2752: ; preds = %bb2611
- ret int 0
+ br i1 true, label %cond_true2732.preheader, label %cond_true129
+cond_true129: ; preds = %entry
+ ret i32 0
+cond_true2732.preheader: ; preds = %entry
+ %tmp2666 = getelementptr %struct.Macroblock* null, i32 0, i32 13 ; <i64*> [#uses=2]
+ %tmp2674 = trunc i32 0 to i8 ; <i8> [#uses=1]
+ br i1 true, label %cond_true2732.preheader.split.us, label %cond_true2732.preheader.split
+cond_true2732.preheader.split.us: ; preds = %cond_true2732.preheader
+ br i1 true, label %cond_true2732.outer.us.us, label %cond_true2732.outer.us
+cond_true2732.outer.us.us: ; preds = %cond_true2732.preheader.split.us
+ %tmp2667.us.us = load i64* %tmp2666 ; <i64> [#uses=1]
+ %tmp2670.us.us = load i64* null ; <i64> [#uses=1]
+ %shift.upgrd.1 = zext i8 %tmp2674 to i64 ; <i64> [#uses=1]
+ %tmp2675.us.us = shl i64 %tmp2670.us.us, %shift.upgrd.1 ; <i64> [#uses=1]
+ %tmp2675not.us.us = xor i64 %tmp2675.us.us, -1 ; <i64> [#uses=1]
+ %tmp2676.us.us = and i64 %tmp2667.us.us, %tmp2675not.us.us ; <i64> [#uses=1]
+ store i64 %tmp2676.us.us, i64* %tmp2666
+ ret i32 0
+cond_true2732.outer.us: ; preds = %cond_true2732.preheader.split.us
+ ret i32 0
+cond_true2732.preheader.split: ; preds = %cond_true2732.preheader
+ ret i32 0
+cond_next2752: ; No predecessors!
+ ret i32 0
}
+