summaryrefslogtreecommitdiff
path: root/test/CodeGen/X86
diff options
context:
space:
mode:
authorBob Wilson <bob.wilson@apple.com>2013-11-15 19:09:27 +0000
committerBob Wilson <bob.wilson@apple.com>2013-11-15 19:09:27 +0000
commitcc7052343e5e955d4e2f48885c06360f9003390a (patch)
tree8832fd14ebae789ee88bab5009cc33fbc89e3ec7 /test/CodeGen/X86
parent5cddda6d13ab66c462ccbd61255ad6e6f95e9f6f (diff)
downloadllvm-cc7052343e5e955d4e2f48885c06360f9003390a.tar.gz
llvm-cc7052343e5e955d4e2f48885c06360f9003390a.tar.bz2
llvm-cc7052343e5e955d4e2f48885c06360f9003390a.tar.xz
Avoid illegal integer promotion in fastisel
Stop folding constant adds into GEP when the type size doesn't match. Otherwise, the adds' operands are effectively being promoted, changing the conditions of an overflow. Results are different when: sext(a) + sext(b) != sext(a + b) Problem originally found on x86-64, but also fixed issues with ARM and PPC, which used similar code. <rdar://problem/15292280> Patch by Duncan Exon Smith! git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@194840 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/X86')
-rw-r--r--test/CodeGen/X86/fastisel-gep-promote-before-add.ll37
1 files changed, 37 insertions, 0 deletions
diff --git a/test/CodeGen/X86/fastisel-gep-promote-before-add.ll b/test/CodeGen/X86/fastisel-gep-promote-before-add.ll
new file mode 100644
index 0000000000..f87a34c4ab
--- /dev/null
+++ b/test/CodeGen/X86/fastisel-gep-promote-before-add.ll
@@ -0,0 +1,37 @@
+; fastisel should not fold add with non-pointer bitwidth
+; sext(a) + sext(b) != sext(a + b)
+; RUN: llc -mtriple=x86_64-apple-darwin %s -O0 -o - | FileCheck %s
+
+define zeroext i8 @gep_promotion(i8* %ptr) nounwind uwtable ssp {
+entry:
+ %ptr.addr = alloca i8*, align 8
+ %add = add i8 64, 64 ; 0x40 + 0x40
+ %0 = load i8** %ptr.addr, align 8
+
+ ; CHECK-LABEL: _gep_promotion:
+ ; CHECK: movzbl ({{.*}})
+ %arrayidx = getelementptr inbounds i8* %0, i8 %add
+
+ %1 = load i8* %arrayidx, align 1
+ ret i8 %1
+}
+
+define zeroext i8 @gep_promotion_nonconst(i8 %i, i8* %ptr) nounwind uwtable ssp {
+entry:
+ %i.addr = alloca i8, align 4
+ %ptr.addr = alloca i8*, align 8
+ store i8 %i, i8* %i.addr, align 4
+ store i8* %ptr, i8** %ptr.addr, align 8
+ %0 = load i8* %i.addr, align 4
+ ; CHECK-LABEL: _gep_promotion_nonconst:
+ ; CHECK: movzbl ({{.*}})
+ %xor = xor i8 %0, -128 ; %0 ^ 0x80
+ %add = add i8 %xor, -127 ; %xor + 0x81
+ %1 = load i8** %ptr.addr, align 8
+
+ %arrayidx = getelementptr inbounds i8* %1, i8 %add
+
+ %2 = load i8* %arrayidx, align 1
+ ret i8 %2
+}
+