summaryrefslogtreecommitdiff
path: root/test/CodeGen/PowerPC/store-update.ll
diff options
context:
space:
mode:
authorUlrich Weigand <ulrich.weigand@de.ibm.com>2013-03-19 19:52:04 +0000
committerUlrich Weigand <ulrich.weigand@de.ibm.com>2013-03-19 19:52:04 +0000
commit5882e3d82831710a7ea1fe8de4813350d4eecf05 (patch)
tree7f60f5158ec2ad7b5e6ed8e76bc784b122e6ab5f /test/CodeGen/PowerPC/store-update.ll
parentec8d1a5b72b1cb2d230ba52b25a017231393b182 (diff)
downloadllvm-5882e3d82831710a7ea1fe8de4813350d4eecf05.tar.gz
llvm-5882e3d82831710a7ea1fe8de4813350d4eecf05.tar.bz2
llvm-5882e3d82831710a7ea1fe8de4813350d4eecf05.tar.xz
Rewrite pre-increment store patterns to use standard memory operands.
Currently, pre-increment store patterns are written to use two separate operands to represent address base and displacement: stwu $rS, $ptroff($ptrreg) This causes problems when implementing the assembler parser, so this commit changes the patterns to use standard (complex) memory operands like in all other memory access instruction patterns: stwu $rS, $dst To still match those instructions against the appropriate pre_store SelectionDAG nodes, the patch uses the new feature that allows a Pat to match multiple DAG operands against a single (complex) instruction operand. Approved by Hal Finkel. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@177429 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/PowerPC/store-update.ll')
-rw-r--r--test/CodeGen/PowerPC/store-update.ll170
1 files changed, 170 insertions, 0 deletions
diff --git a/test/CodeGen/PowerPC/store-update.ll b/test/CodeGen/PowerPC/store-update.ll
new file mode 100644
index 0000000000..538ed24fbc
--- /dev/null
+++ b/test/CodeGen/PowerPC/store-update.ll
@@ -0,0 +1,170 @@
+; RUN: llc < %s | FileCheck %s
+
+target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+define i8* @stbu(i8* %base, i8 zeroext %val) nounwind {
+entry:
+ %arrayidx = getelementptr inbounds i8* %base, i64 16
+ store i8 %val, i8* %arrayidx, align 1
+ ret i8* %arrayidx
+}
+; CHECK: @stbu
+; CHECK: %entry
+; CHECK-NEXT: stbu
+; CHECK-NEXT: blr
+
+define i8* @stbux(i8* %base, i8 zeroext %val, i64 %offset) nounwind {
+entry:
+ %arrayidx = getelementptr inbounds i8* %base, i64 %offset
+ store i8 %val, i8* %arrayidx, align 1
+ ret i8* %arrayidx
+}
+; CHECK: @stbux
+; CHECK: %entry
+; CHECK-NEXT: stbux
+; CHECK-NEXT: blr
+
+define i16* @sthu(i16* %base, i16 zeroext %val) nounwind {
+entry:
+ %arrayidx = getelementptr inbounds i16* %base, i64 16
+ store i16 %val, i16* %arrayidx, align 2
+ ret i16* %arrayidx
+}
+; CHECK: @sthu
+; CHECK: %entry
+; CHECK-NEXT: sthu
+; CHECK-NEXT: blr
+
+define i16* @sthux(i16* %base, i16 zeroext %val, i64 %offset) nounwind {
+entry:
+ %arrayidx = getelementptr inbounds i16* %base, i64 %offset
+ store i16 %val, i16* %arrayidx, align 2
+ ret i16* %arrayidx
+}
+; CHECK: @sthux
+; CHECK: %entry
+; CHECK-NEXT: sldi
+; CHECK-NEXT: sthux
+; CHECK-NEXT: blr
+
+define i32* @stwu(i32* %base, i32 zeroext %val) nounwind {
+entry:
+ %arrayidx = getelementptr inbounds i32* %base, i64 16
+ store i32 %val, i32* %arrayidx, align 4
+ ret i32* %arrayidx
+}
+; CHECK: @stwu
+; CHECK: %entry
+; CHECK-NEXT: stwu
+; CHECK-NEXT: blr
+
+define i32* @stwux(i32* %base, i32 zeroext %val, i64 %offset) nounwind {
+entry:
+ %arrayidx = getelementptr inbounds i32* %base, i64 %offset
+ store i32 %val, i32* %arrayidx, align 4
+ ret i32* %arrayidx
+}
+; CHECK: @stwux
+; CHECK: %entry
+; CHECK-NEXT: sldi
+; CHECK-NEXT: stwux
+; CHECK-NEXT: blr
+
+define i8* @stbu8(i8* %base, i64 %val) nounwind {
+entry:
+ %conv = trunc i64 %val to i8
+ %arrayidx = getelementptr inbounds i8* %base, i64 16
+ store i8 %conv, i8* %arrayidx, align 1
+ ret i8* %arrayidx
+}
+; CHECK: @stbu
+; CHECK: %entry
+; CHECK-NEXT: stbu
+; CHECK-NEXT: blr
+
+define i8* @stbux8(i8* %base, i64 %val, i64 %offset) nounwind {
+entry:
+ %conv = trunc i64 %val to i8
+ %arrayidx = getelementptr inbounds i8* %base, i64 %offset
+ store i8 %conv, i8* %arrayidx, align 1
+ ret i8* %arrayidx
+}
+; CHECK: @stbux
+; CHECK: %entry
+; CHECK-NEXT: stbux
+; CHECK-NEXT: blr
+
+define i16* @sthu8(i16* %base, i64 %val) nounwind {
+entry:
+ %conv = trunc i64 %val to i16
+ %arrayidx = getelementptr inbounds i16* %base, i64 16
+ store i16 %conv, i16* %arrayidx, align 2
+ ret i16* %arrayidx
+}
+; CHECK: @sthu
+; CHECK: %entry
+; CHECK-NEXT: sthu
+; CHECK-NEXT: blr
+
+define i16* @sthux8(i16* %base, i64 %val, i64 %offset) nounwind {
+entry:
+ %conv = trunc i64 %val to i16
+ %arrayidx = getelementptr inbounds i16* %base, i64 %offset
+ store i16 %conv, i16* %arrayidx, align 2
+ ret i16* %arrayidx
+}
+; CHECK: @sthux
+; CHECK: %entry
+; CHECK-NEXT: sldi
+; CHECK-NEXT: sthux
+; CHECK-NEXT: blr
+
+define i32* @stwu8(i32* %base, i64 %val) nounwind {
+entry:
+ %conv = trunc i64 %val to i32
+ %arrayidx = getelementptr inbounds i32* %base, i64 16
+ store i32 %conv, i32* %arrayidx, align 4
+ ret i32* %arrayidx
+}
+; CHECK: @stwu
+; CHECK: %entry
+; CHECK-NEXT: stwu
+; CHECK-NEXT: blr
+
+define i32* @stwux8(i32* %base, i64 %val, i64 %offset) nounwind {
+entry:
+ %conv = trunc i64 %val to i32
+ %arrayidx = getelementptr inbounds i32* %base, i64 %offset
+ store i32 %conv, i32* %arrayidx, align 4
+ ret i32* %arrayidx
+}
+; CHECK: @stwux
+; CHECK: %entry
+; CHECK-NEXT: sldi
+; CHECK-NEXT: stwux
+; CHECK-NEXT: blr
+
+define i64* @stdu(i64* %base, i64 %val) nounwind {
+entry:
+ %arrayidx = getelementptr inbounds i64* %base, i64 16
+ store i64 %val, i64* %arrayidx, align 8
+ ret i64* %arrayidx
+}
+; CHECK: @stdu
+; CHECK: %entry
+; CHECK-NEXT: stdu
+; CHECK-NEXT: blr
+
+define i64* @stdux(i64* %base, i64 %val, i64 %offset) nounwind {
+entry:
+ %arrayidx = getelementptr inbounds i64* %base, i64 %offset
+ store i64 %val, i64* %arrayidx, align 8
+ ret i64* %arrayidx
+}
+; CHECK: @stdux
+; CHECK: %entry
+; CHECK-NEXT: sldi
+; CHECK-NEXT: stdux
+; CHECK-NEXT: blr
+