summaryrefslogtreecommitdiff
path: root/test/Transforms
diff options
context:
space:
mode:
authorRafael Espindola <rafael.espindola@gmail.com>2014-05-28 15:30:40 +0000
committerRafael Espindola <rafael.espindola@gmail.com>2014-05-28 15:30:40 +0000
commite8075c6877d1f2e1be6c2646135912406616249c (patch)
treed81d3c860f30aa2b7230f2b5a20d858f5324b5ae /test/Transforms
parentb8af23fe1ecc74021e2cc5a3ad1ecdcbbcb65c4e (diff)
downloadllvm-e8075c6877d1f2e1be6c2646135912406616249c.tar.gz
llvm-e8075c6877d1f2e1be6c2646135912406616249c.tar.bz2
llvm-e8075c6877d1f2e1be6c2646135912406616249c.tar.xz
InstCombine: Improvement to check if signed addition overflows.
This patch implements two things: 1. If we know one number is positive and another is negative, we return true as signed addition of two opposite signed numbers will never overflow. 2. Implemented TODO : If one of the operands only has one non-zero bit, and if the other operand has a known-zero bit in a more significant place than it (not including the sign bit) the ripple may go up to and fill the zero, but won't change the sign. e.x - (x & ~4) + 1 We make sure that we are ignoring 0 at MSB. Patch by Suyog Sarda. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@209746 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/Transforms')
-rw-r--r--test/Transforms/InstCombine/AddOverflow.ll56
1 files changed, 56 insertions, 0 deletions
diff --git a/test/Transforms/InstCombine/AddOverflow.ll b/test/Transforms/InstCombine/AddOverflow.ll
new file mode 100644
index 0000000000..1bbd1fc59a
--- /dev/null
+++ b/test/Transforms/InstCombine/AddOverflow.ll
@@ -0,0 +1,56 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; CHECK-LABEL: @ripple(
+; CHECK: add nsw i16 %tmp1, 1
+define i32 @ripple(i16 signext %x) {
+bb:
+ %tmp = sext i16 %x to i32
+ %tmp1 = and i32 %tmp, -5
+ %tmp2 = trunc i32 %tmp1 to i16
+ %tmp3 = sext i16 %tmp2 to i32
+ %tmp4 = add i32 %tmp3, 1
+ ret i32 %tmp4
+}
+
+; CHECK-LABEL: @ripplenot(
+; CHECK: add i32 %tmp3, 4
+define i32 @ripplenot(i16 signext %x) {
+bb:
+ %tmp = sext i16 %x to i32
+ %tmp1 = and i32 %tmp, -3
+ %tmp2 = trunc i32 %tmp1 to i16
+ %tmp3 = sext i16 %tmp2 to i32
+ %tmp4 = add i32 %tmp3, 4
+ ret i32 %tmp4
+}
+
+; CHECK-LABEL: @oppositesign(
+; CHECK: add nsw i16 %tmp1, 4
+define i32 @oppositesign(i16 signext %x) {
+bb:
+ %tmp = sext i16 %x to i32
+ %tmp1 = or i32 %tmp, 32768
+ %tmp2 = trunc i32 %tmp1 to i16
+ %tmp3 = sext i16 %tmp2 to i32
+ %tmp4 = add i32 %tmp3, 4
+ ret i32 %tmp4
+}
+
+; CHECK-LABEL: @ripplenot_var(
+; CHECK: add i32 %tmp6, %tmp7
+define i32 @ripplenot_var(i16 signext %x, i16 signext %y) {
+bb:
+ %tmp = sext i16 %x to i32
+ %tmp1 = and i32 %tmp, -5
+ %tmp2 = trunc i32 %tmp1 to i16
+ %tmp3 = sext i16 %y to i32
+ %tmp4 = or i32 %tmp3, 2
+ %tmp5 = trunc i32 %tmp4 to i16
+ %tmp6 = sext i16 %tmp5 to i32
+ %tmp7 = sext i16 %tmp2 to i32
+ %tmp8 = add i32 %tmp6, %tmp7
+ ret i32 %tmp8
+}