summaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authorHal Finkel <hfinkel@anl.gov>2011-11-22 16:21:04 +0000
committerHal Finkel <hfinkel@anl.gov>2011-11-22 16:21:04 +0000
commit768c65f677af3f05c2e94982043f90a1bfaceda5 (patch)
tree4e16ba3e65d2a908780a7c290cc12512b95de8e7 /test
parent796c193768547459cd6cbd667c8a43fedd601022 (diff)
downloadllvm-768c65f677af3f05c2e94982043f90a1bfaceda5.tar.gz
llvm-768c65f677af3f05c2e94982043f90a1bfaceda5.tar.bz2
llvm-768c65f677af3f05c2e94982043f90a1bfaceda5.tar.xz
add basic PPC register-pressure feedback; adjust the vaarg test to match the new register-allocation pattern
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@145065 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test')
-rw-r--r--test/CodeGen/PowerPC/ppc32-vaarg.ll182
1 files changed, 90 insertions, 92 deletions
diff --git a/test/CodeGen/PowerPC/ppc32-vaarg.ll b/test/CodeGen/PowerPC/ppc32-vaarg.ll
index 393800b434..725c106dd6 100644
--- a/test/CodeGen/PowerPC/ppc32-vaarg.ll
+++ b/test/CodeGen/PowerPC/ppc32-vaarg.ll
@@ -12,10 +12,9 @@ target triple = "powerpc-unknown-freebsd9.0"
define void @ppcvaargtest(%struct.__va_list_tag* %ap) nounwind {
entry:
%x = va_arg %struct.__va_list_tag* %ap, i64; Get from r5,r6
-; CHECK: lbz 4, 0(3)
-; CHECK-NEXT: rlwinm 5, 4, 0, 31, 31
-; CHECK-NEXT: cmplwi 0, 5, 0
-; CHECK-NEXT: addi 5, 4, 1
+; CHECK: addi 5, 4, 1
+; CHECK-NEXT: rlwinm 6, 4, 0, 31, 31
+; CHECK-NEXT: cmplwi 0, 6, 0
; CHECK-NEXT: stw 3, -4(1)
; CHECK-NEXT: stw 5, -8(1)
; CHECK-NEXT: stw 4, -12(1)
@@ -25,138 +24,137 @@ define void @ppcvaargtest(%struct.__va_list_tag* %ap) nounwind {
; CHECK-NEXT: stw 3, -8(1)
; CHECK-NEXT: .LBB0_2: # %entry
; CHECK-NEXT: lwz 3, -8(1)
-; CHECK-NEXT: slwi 4, 3, 2
+; CHECK-NEXT: addi 4, 3, 2
; CHECK-NEXT: lwz 5, -4(1)
; CHECK-NEXT: lwz 6, 4(5)
; CHECK-NEXT: lwz 7, 8(5)
-; CHECK-NEXT: add 4, 7, 4
+; CHECK-NEXT: stb 4, 0(5)
; CHECK-NEXT: cmpwi 0, 3, 8
+; CHECK-NEXT: addi 4, 6, 4
+; CHECK-NEXT: mr 8, 6
+; CHECK-NEXT: stw 7, -16(1)
+; CHECK-NEXT: stw 4, -20(1)
+; CHECK-NEXT: stw 3, -24(1)
+; CHECK-NEXT: stw 8, -28(1)
+; CHECK-NEXT: stw 6, -32(1)
; CHECK-NEXT: mfcr 0 # cr0
-; CHECK-NEXT: stw 0, -16(1)
-; CHECK-NEXT: stw 3, -20(1)
-; CHECK-NEXT: stw 4, -24(1)
-; CHECK-NEXT: stw 6, -28(1)
+; CHECK-NEXT: stw 0, -36(1)
; CHECK-NEXT: blt 0, .LBB0_4
; CHECK-NEXT: # BB#3: # %entry
-; CHECK-NEXT: lwz 3, -28(1)
-; CHECK-NEXT: stw 3, -24(1)
+; CHECK-NEXT: lwz 3, -20(1)
+; CHECK-NEXT: stw 3, -28(1)
; CHECK-NEXT: .LBB0_4: # %entry
+; CHECK-NEXT: lwz 3, -28(1)
+; CHECK-NEXT: lwz 4, -4(1)
+; CHECK-NEXT: stw 3, 4(4)
+ store i64 %x, i64* @var1, align 8
; CHECK-NEXT: lwz 3, -24(1)
-; CHECK-NEXT: lwz 4, -28(1)
-; CHECK-NEXT: addi 5, 4, 4
-; CHECK-NEXT: lwz 0, -16(1)
+; CHECK-NEXT: slwi 5, 3, 2
+; CHECK-NEXT: lwz 6, -16(1)
+; CHECK-NEXT: add 5, 6, 5
+; CHECK-NEXT: lwz 0, -36(1)
; CHECK-NEXT: mtcrf 128, 0
-; CHECK-NEXT: stw 4, -32(1)
-; CHECK-NEXT: stw 5, -36(1)
-; CHECK-NEXT: stw 3, -40(1)
+; CHECK-NEXT: stw 5, -40(1)
; CHECK-NEXT: blt 0, .LBB0_6
; CHECK-NEXT: # BB#5: # %entry
-; CHECK-NEXT: lwz 3, -36(1)
-; CHECK-NEXT: stw 3, -32(1)
-; CHECK-NEXT: .LBB0_6: # %entry
; CHECK-NEXT: lwz 3, -32(1)
-; CHECK-NEXT: lwz 4, -20(1)
-; CHECK-NEXT: addi 5, 4, 2
-; CHECK-NEXT: lwz 6, -4(1)
-; CHECK-NEXT: stb 5, 0(6)
-; CHECK-NEXT: stw 3, 4(6)
- store i64 %x, i64* @var1, align 8
+; CHECK-NEXT: stw 3, -40(1)
+; CHECK-NEXT: .LBB0_6: # %entry
; CHECK-NEXT: lwz 3, -40(1)
-; CHECK-NEXT: lwz 5, 0(3)
-; CHECK-NEXT: lwz 7, 4(3)
-; CHECK-NEXT: lis 8, var1@ha
-; CHECK-NEXT: la 9, var1@l(8)
-; CHECK-NEXT: stw 7, 4(9)
-; CHECK-NEXT: stw 5, var1@l(8)
+; CHECK-NEXT: lwz 4, 0(3)
+; CHECK-NEXT: lwz 3, 4(3)
+; CHECK-NEXT: lis 5, var1@ha
+; CHECK-NEXT: la 6, var1@l(5)
+; CHECK-NEXT: stw 3, 4(6)
+; CHECK-NEXT: stw 4, var1@l(5)
+; CHECK-NEXT: lwz 3, -4(1)
%y = va_arg %struct.__va_list_tag* %ap, double; From f1
-; CHECK-NEXT: lbz 5, 1(6)
-; CHECK-NEXT: lwz 7, 4(6)
-; CHECK-NEXT: lwz 8, 8(6)
-; CHECK-NEXT: slwi 9, 5, 3
-; CHECK-NEXT: add 8, 8, 9
-; CHECK-NEXT: cmpwi 0, 5, 8
-; CHECK-NEXT: addi 9, 7, 8
-; CHECK-NEXT: mr 10, 7
-; CHECK-NEXT: stw 9, -44(1)
+; CHECK-NEXT: lbz 4, 1(3)
+; CHECK-NEXT: lwz 5, 4(3)
+; CHECK-NEXT: lwz 6, 8(3)
+; CHECK-NEXT: addi 7, 4, 1
+; CHECK-NEXT: stb 7, 1(3)
+; CHECK-NEXT: cmpwi 0, 4, 8
+; CHECK-NEXT: addi 7, 5, 8
+; CHECK-NEXT: mr 8, 5
+; CHECK-NEXT: stw 5, -44(1)
; CHECK-NEXT: stw 7, -48(1)
+; CHECK-NEXT: stw 4, -52(1)
+; CHECK-NEXT: stw 6, -56(1)
+; CHECK-NEXT: stw 8, -60(1)
; CHECK-NEXT: mfcr 0 # cr0
-; CHECK-NEXT: stw 0, -52(1)
-; CHECK-NEXT: stw 5, -56(1)
-; CHECK-NEXT: stw 10, -60(1)
-; CHECK-NEXT: stw 8, -64(1)
+; CHECK-NEXT: stw 0, -64(1)
; CHECK-NEXT: blt 0, .LBB0_8
; CHECK-NEXT: # BB#7: # %entry
-; CHECK-NEXT: lwz 3, -44(1)
+; CHECK-NEXT: lwz 3, -48(1)
; CHECK-NEXT: stw 3, -60(1)
; CHECK-NEXT: .LBB0_8: # %entry
; CHECK-NEXT: lwz 3, -60(1)
-; CHECK-NEXT: lwz 4, -64(1)
-; CHECK-NEXT: addi 4, 4, 32
-; CHECK-NEXT: lwz 0, -52(1)
+; CHECK-NEXT: lwz 4, -4(1)
+; CHECK-NEXT: stw 3, 4(4)
+; CHECK-NEXT: lwz 3, -52(1)
+; CHECK-NEXT: slwi 5, 3, 3
+; CHECK-NEXT: lwz 6, -56(1)
+; CHECK-NEXT: add 5, 6, 5
+; CHECK-NEXT: addi 5, 5, 32
+; CHECK-NEXT: lwz 0, -64(1)
; CHECK-NEXT: mtcrf 128, 0
-; CHECK-NEXT: stw 4, -68(1)
-; CHECK-NEXT: stw 3, -72(1)
+; CHECK-NEXT: stw 5, -68(1)
; CHECK-NEXT: blt 0, .LBB0_10
; CHECK-NEXT: # BB#9: # %entry
-; CHECK-NEXT: lwz 3, -48(1)
+; CHECK-NEXT: lwz 3, -44(1)
; CHECK-NEXT: stw 3, -68(1)
; CHECK-NEXT: .LBB0_10: # %entry
; CHECK-NEXT: lwz 3, -68(1)
-; CHECK-NEXT: lwz 4, -56(1)
-; CHECK-NEXT: addi 5, 4, 1
-; CHECK-NEXT: lwz 6, -4(1)
-; CHECK-NEXT: stb 5, 1(6)
-; CHECK-NEXT: lwz 5, -72(1)
-; CHECK-NEXT: stw 5, 4(6)
; CHECK-NEXT: lfd 0, 0(3)
store double %y, double* @var2, align 8
; CHECK-NEXT: lis 3, var2@ha
; CHECK-NEXT: stfd 0, var2@l(3)
%z = va_arg %struct.__va_list_tag* %ap, i32; From r7
-; CHECK-NEXT: lbz 3, 0(6)
-; CHECK-NEXT: lwz 5, 4(6)
-; CHECK-NEXT: lwz 7, 8(6)
-; CHECK-NEXT: slwi 8, 3, 2
-; CHECK-NEXT: add 7, 7, 8
-; CHECK-NEXT: cmpwi 0, 3, 8
-; CHECK-NEXT: addi 8, 5, 4
-; CHECK-NEXT: mr 9, 5
-; CHECK-NEXT: stw 3, -76(1)
-; CHECK-NEXT: stw 7, -80(1)
-; CHECK-NEXT: stw 8, -84(1)
-; CHECK-NEXT: stw 5, -88(1)
-; CHECK-NEXT: stw 9, -92(1)
+; CHECK-NEXT: lwz 3, -4(1)
+; CHECK-NEXT: lbz 4, 0(3)
+; CHECK-NEXT: lwz 5, 4(3)
+; CHECK-NEXT: lwz 6, 8(3)
+; CHECK-NEXT: addi 7, 4, 1
+; CHECK-NEXT: stb 7, 0(3)
+; CHECK-NEXT: cmpwi 0, 4, 8
+; CHECK-NEXT: addi 7, 5, 4
+; CHECK-NEXT: mr 8, 5
+; CHECK-NEXT: stw 4, -72(1)
+; CHECK-NEXT: stw 6, -76(1)
; CHECK-NEXT: mfcr 0 # cr0
-; CHECK-NEXT: stw 0, -96(1)
+; CHECK-NEXT: stw 0, -80(1)
+; CHECK-NEXT: stw 5, -84(1)
+; CHECK-NEXT: stw 8, -88(1)
+; CHECK-NEXT: stw 7, -92(1)
; CHECK-NEXT: blt 0, .LBB0_12
; CHECK-NEXT: # BB#11: # %entry
-; CHECK-NEXT: lwz 3, -84(1)
-; CHECK-NEXT: stw 3, -92(1)
-; CHECK-NEXT: .LBB0_12: # %entry
; CHECK-NEXT: lwz 3, -92(1)
-; CHECK-NEXT: lwz 4, -80(1)
-; CHECK-NEXT: lwz 0, -96(1)
+; CHECK-NEXT: stw 3, -88(1)
+; CHECK-NEXT: .LBB0_12: # %entry
+; CHECK-NEXT: lwz 3, -88(1)
+; CHECK-NEXT: lwz 4, -4(1)
+; CHECK-NEXT: stw 3, 4(4)
+; CHECK-NEXT: lwz 3, -72(1)
+; CHECK-NEXT: slwi 5, 3, 2
+; CHECK-NEXT: lwz 6, -76(1)
+; CHECK-NEXT: add 5, 6, 5
+; CHECK-NEXT: lwz 0, -80(1)
; CHECK-NEXT: mtcrf 128, 0
-; CHECK-NEXT: stw 3, -100(1)
-; CHECK-NEXT: stw 4, -104(1)
+; CHECK-NEXT: stw 5, -96(1)
; CHECK-NEXT: blt 0, .LBB0_14
; CHECK-NEXT: # BB#13: # %entry
-; CHECK-NEXT: lwz 3, -88(1)
-; CHECK-NEXT: stw 3, -104(1)
+; CHECK-NEXT: lwz 3, -84(1)
+; CHECK-NEXT: stw 3, -96(1)
; CHECK-NEXT: .LBB0_14: # %entry
-; CHECK-NEXT: lwz 3, -104(1)
-; CHECK-NEXT: lwz 4, -76(1)
-; CHECK-NEXT: addi 5, 4, 1
-; CHECK-NEXT: lwz 6, -4(1)
-; CHECK-NEXT: stb 5, 0(6)
-; CHECK-NEXT: lwz 5, -100(1)
-; CHECK-NEXT: stw 5, 4(6)
+; CHECK-NEXT: lwz 3, -96(1)
; CHECK-NEXT: lwz 3, 0(3)
store i32 %z, i32* @var3, align 4
-; CHECK-NEXT: lis 5, var3@ha
-; CHECK-NEXT: stw 3, var3@l(5)
+; CHECK-NEXT: lis 4, var3@ha
+; CHECK-NEXT: stw 3, var3@l(4)
+; CHECK-NEXT: lwz 3, -4(1)
ret void
-; CHECK-NEXT: stw 6, -108(1)
+; CHECK-NEXT: stw 3, -100(1)
; CHECK-NEXT: blr
}