summaryrefslogtreecommitdiff
path: root/test/CodeGen/SystemZ
diff options
context:
space:
mode:
authorRichard Sandiford <rsandifo@linux.vnet.ibm.com>2013-10-28 13:53:37 +0000
committerRichard Sandiford <rsandifo@linux.vnet.ibm.com>2013-10-28 13:53:37 +0000
commit349baa60393c637f479ee31b25c65555b095a2ef (patch)
tree80faee69092f3ce2360fd840c792a186dd74cfbc /test/CodeGen/SystemZ
parenta7be36c8eb4717e44b05e00008544b883fc87de9 (diff)
downloadllvm-349baa60393c637f479ee31b25c65555b095a2ef.tar.gz
llvm-349baa60393c637f479ee31b25c65555b095a2ef.tar.bz2
llvm-349baa60393c637f479ee31b25c65555b095a2ef.tar.xz
[SystemZ] Set usaAA to true
useAA significantly improves the handling of vector code that has TBAA information attached. It also helps other cases, as shown by the testsuite changes here. The only real downside I've seen is that it interferes with MergeConsecutiveStores. The problem is that that optimization works top down, starting at the first store in the chain, and looks for cases where the chain result is only used by a single related store. These related stores don't alias, so useAA will have rewritten all the later stores to use a different chain input (typically the same one as the first store). I think the advantages outweigh the disadvantages though, so for now I've just disabled alias analysis for the unaligned-01.ll test. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@193521 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/SystemZ')
-rw-r--r--test/CodeGen/SystemZ/alias-01.ll3
-rw-r--r--test/CodeGen/SystemZ/bswap-02.ll3
-rw-r--r--test/CodeGen/SystemZ/int-add-09.ll8
-rw-r--r--test/CodeGen/SystemZ/int-add-10.ll18
-rw-r--r--test/CodeGen/SystemZ/unaligned-01.ll5
5 files changed, 19 insertions, 18 deletions
diff --git a/test/CodeGen/SystemZ/alias-01.ll b/test/CodeGen/SystemZ/alias-01.ll
index e00d3c8b0a..8839aade7a 100644
--- a/test/CodeGen/SystemZ/alias-01.ll
+++ b/test/CodeGen/SystemZ/alias-01.ll
@@ -1,7 +1,6 @@
; Test 32-bit ANDs in which the second operand is variable.
;
-; RUN: llc < %s -mtriple=s390x-linux-gnu -combiner-alias-analysis \
-; RUN: -combiner-global-alias-analysis | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
; Check that there are no spills.
define void @f1(<16 x i32> *%src1, <16 x float> *%dest) {
diff --git a/test/CodeGen/SystemZ/bswap-02.ll b/test/CodeGen/SystemZ/bswap-02.ll
index 4bb49ebdda..db69ea53df 100644
--- a/test/CodeGen/SystemZ/bswap-02.ll
+++ b/test/CodeGen/SystemZ/bswap-02.ll
@@ -1,7 +1,6 @@
; Test 32-bit byteswaps from memory to registers.
;
-; RUN: llc < %s -mtriple=s390x-linux-gnu -combiner-alias-analysis \
-; RUN: -combiner-global-alias-analysis | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
declare i32 @llvm.bswap.i32(i32 %a)
diff --git a/test/CodeGen/SystemZ/int-add-09.ll b/test/CodeGen/SystemZ/int-add-09.ll
index 717fed00e2..fd151a7f97 100644
--- a/test/CodeGen/SystemZ/int-add-09.ll
+++ b/test/CodeGen/SystemZ/int-add-09.ll
@@ -7,7 +7,7 @@
define void @f1(i128 *%aptr) {
; CHECK-LABEL: f1:
; CHECK: algfi {{%r[0-5]}}, 1
-; CHECK: alcgr
+; CHECK: alcg
; CHECK: br %r14
%a = load i128 *%aptr
%xor = xor i128 %a, 128
@@ -20,7 +20,7 @@ define void @f1(i128 *%aptr) {
define void @f2(i128 *%aptr) {
; CHECK-LABEL: f2:
; CHECK: algfi {{%r[0-5]}}, 4294967295
-; CHECK: alcgr
+; CHECK: alcg
; CHECK: br %r14
%a = load i128 *%aptr
%xor = xor i128 %a, 128
@@ -33,7 +33,7 @@ define void @f2(i128 *%aptr) {
define void @f3(i128 *%aptr) {
; CHECK-LABEL: f3:
; CHECK: algr
-; CHECK: alcgr
+; CHECK: alcg
; CHECK: br %r14
%a = load i128 *%aptr
%xor = xor i128 %a, 128
@@ -46,7 +46,7 @@ define void @f3(i128 *%aptr) {
define void @f4(i128 *%aptr) {
; CHECK-LABEL: f4:
; CHECK: algr
-; CHECK: alcgr
+; CHECK: alcg
; CHECK: br %r14
%a = load i128 *%aptr
%xor = xor i128 %a, 128
diff --git a/test/CodeGen/SystemZ/int-add-10.ll b/test/CodeGen/SystemZ/int-add-10.ll
index 66a275bc6c..01d0a661ed 100644
--- a/test/CodeGen/SystemZ/int-add-10.ll
+++ b/test/CodeGen/SystemZ/int-add-10.ll
@@ -7,7 +7,7 @@
define void @f1(i128 *%aptr, i32 %b) {
; CHECK-LABEL: f1:
; CHECK: algfr {{%r[0-5]}}, %r3
-; CHECK: alcgr
+; CHECK: alcg
; CHECK: br %r14
%a = load i128 *%aptr
%xor = xor i128 %a, 127
@@ -21,7 +21,7 @@ define void @f1(i128 *%aptr, i32 %b) {
define void @f2(i128 *%aptr, i64 %b) {
; CHECK-LABEL: f2:
; CHECK: algfr {{%r[0-5]}}, %r3
-; CHECK: alcgr
+; CHECK: alcg
; CHECK: br %r14
%a = load i128 *%aptr
%xor = xor i128 %a, 127
@@ -37,7 +37,7 @@ define void @f2(i128 *%aptr, i64 %b) {
define void @f3(i128 *%aptr, i64 %b) {
; CHECK-LABEL: f3:
; CHECK: algfr {{%r[0-5]}}, %r3
-; CHECK: alcgr
+; CHECK: alcg
; CHECK: br %r14
%a = load i128 *%aptr
%xor = xor i128 %a, 127
@@ -52,7 +52,7 @@ define void @f3(i128 *%aptr, i64 %b) {
define void @f4(i128 *%aptr, i32 *%bsrc) {
; CHECK-LABEL: f4:
; CHECK: algf {{%r[0-5]}}, 0(%r3)
-; CHECK: alcgr
+; CHECK: alcg
; CHECK: br %r14
%a = load i128 *%aptr
%xor = xor i128 %a, 127
@@ -67,7 +67,7 @@ define void @f4(i128 *%aptr, i32 *%bsrc) {
define void @f5(i128 *%aptr, i32 *%bsrc) {
; CHECK-LABEL: f5:
; CHECK: algf {{%r[0-5]}}, 524284(%r3)
-; CHECK: alcgr
+; CHECK: alcg
; CHECK: br %r14
%a = load i128 *%aptr
%xor = xor i128 %a, 127
@@ -85,7 +85,7 @@ define void @f6(i128 *%aptr, i32 *%bsrc) {
; CHECK-LABEL: f6:
; CHECK: agfi %r3, 524288
; CHECK: algf {{%r[0-5]}}, 0(%r3)
-; CHECK: alcgr
+; CHECK: alcg
; CHECK: br %r14
%a = load i128 *%aptr
%xor = xor i128 %a, 127
@@ -101,7 +101,7 @@ define void @f6(i128 *%aptr, i32 *%bsrc) {
define void @f7(i128 *%aptr, i32 *%bsrc) {
; CHECK-LABEL: f7:
; CHECK: algf {{%r[0-5]}}, -4(%r3)
-; CHECK: alcgr
+; CHECK: alcg
; CHECK: br %r14
%a = load i128 *%aptr
%xor = xor i128 %a, 127
@@ -117,7 +117,7 @@ define void @f7(i128 *%aptr, i32 *%bsrc) {
define void @f8(i128 *%aptr, i32 *%bsrc) {
; CHECK-LABEL: f8:
; CHECK: algf {{%r[0-5]}}, -524288(%r3)
-; CHECK: alcgr
+; CHECK: alcg
; CHECK: br %r14
%a = load i128 *%aptr
%xor = xor i128 %a, 127
@@ -135,7 +135,7 @@ define void @f9(i128 *%aptr, i32 *%bsrc) {
; CHECK-LABEL: f9:
; CHECK: agfi %r3, -524292
; CHECK: algf {{%r[0-5]}}, 0(%r3)
-; CHECK: alcgr
+; CHECK: alcg
; CHECK: br %r14
%a = load i128 *%aptr
%xor = xor i128 %a, 127
diff --git a/test/CodeGen/SystemZ/unaligned-01.ll b/test/CodeGen/SystemZ/unaligned-01.ll
index 621069d239..526a068100 100644
--- a/test/CodeGen/SystemZ/unaligned-01.ll
+++ b/test/CodeGen/SystemZ/unaligned-01.ll
@@ -1,7 +1,10 @@
; Check that unaligned accesses are allowed in general. We check the
; few exceptions (like CRL) in their respective test files.
;
-; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+; FIXME: -combiner-alias-analysis (the default for SystemZ) stops
+; f1 from being optimized.
+; RUN: llc < %s -mtriple=s390x-linux-gnu -combiner-alias-analysis=false \
+; RUN: | FileCheck %s
; Check that these four byte stores become a single word store.
define void @f1(i8 *%ptr) {