summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRafael Espindola <rafael.espindola@gmail.com>2014-05-28 21:43:52 +0000
committerRafael Espindola <rafael.espindola@gmail.com>2014-05-28 21:43:52 +0000
commitc81cf72ef34bfc846bc9f271f590e8e02e38061b (patch)
tree697ee359ec2480718aa00f5782196bdab29faa9b
parent836475b2e2b45d34bcc558497f47a87585157254 (diff)
downloadllvm-c81cf72ef34bfc846bc9f271f590e8e02e38061b.tar.gz
llvm-c81cf72ef34bfc846bc9f271f590e8e02e38061b.tar.bz2
llvm-c81cf72ef34bfc846bc9f271f590e8e02e38061b.tar.xz
Revert "Revert "InstCombine: Improvement to check if signed addition overflows.""
This reverts commit r209762, bringing back r209746. It was not responsible for the libc++ build failure git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@209776 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--lib/Transforms/InstCombine/InstCombineAddSub.cpp50
-rw-r--r--test/Transforms/InstCombine/AddOverflow.ll56
2 files changed, 100 insertions, 6 deletions
diff --git a/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/lib/Transforms/InstCombine/InstCombineAddSub.cpp
index c37a9cf2ef..eca4e4a787 100644
--- a/lib/Transforms/InstCombine/InstCombineAddSub.cpp
+++ b/lib/Transforms/InstCombine/InstCombineAddSub.cpp
@@ -889,11 +889,34 @@ static inline Value *dyn_castFoldableMul(Value *V, Constant *&CST) {
return nullptr;
}
+// If one of the operands only has one non-zero bit, and if the other
+// operand has a known-zero bit in a more significant place than it (not
+// including the sign bit) the ripple may go up to and fill the zero, but
+// won't change the sign. For example, (X & ~4) + 1.
+// FIXME: Handle case where LHS has a zero before the 1 in the RHS, but also
+// has one after.
+static bool CheckRippleForAdd(APInt Op0KnownZero, APInt Op0KnownOne,
+ APInt Op1KnownZero, APInt Op1KnownOne) {
+ // Make sure that one of the operand has only one bit set to 1 and all other
+ // bit set to 0.
+ if ((~Op1KnownZero).countPopulation() == 1) {
+ int BitWidth = Op0KnownZero.getBitWidth();
+ // Ignore Sign Bit.
+ Op0KnownZero.clearBit(BitWidth - 1);
+ int Op1OnePosition = BitWidth - Op1KnownOne.countLeadingZeros() - 1;
+ int Op0ZeroPosition = BitWidth - Op0KnownZero.countLeadingZeros() - 1;
+ if ((Op0ZeroPosition != (BitWidth - 1)) &&
+ (Op0ZeroPosition >= Op1OnePosition))
+ return true;
+ }
+ return false;
+}
/// WillNotOverflowSignedAdd - Return true if we can prove that:
/// (sext (add LHS, RHS)) === (add (sext LHS), (sext RHS))
/// This basically requires proving that the add in the original type would not
/// overflow to change the sign bit or have a carry out.
+/// TODO: Handle this for Vectors.
bool InstCombiner::WillNotOverflowSignedAdd(Value *LHS, Value *RHS) {
// There are different heuristics we can use for this. Here are some simple
// ones.
@@ -905,14 +928,29 @@ bool InstCombiner::WillNotOverflowSignedAdd(Value *LHS, Value *RHS) {
if (ComputeNumSignBits(LHS) > 1 && ComputeNumSignBits(RHS) > 1)
return true;
+ if (IntegerType *IT = dyn_cast<IntegerType>(LHS->getType())) {
- // If one of the operands only has one non-zero bit, and if the other operand
- // has a known-zero bit in a more significant place than it (not including the
- // sign bit) the ripple may go up to and fill the zero, but won't change the
- // sign. For example, (X & ~4) + 1.
-
- // TODO: Implement.
+ int BitWidth = IT->getBitWidth();
+ APInt LHSKnownZero(BitWidth, 0, /*isSigned*/ true);
+ APInt LHSKnownOne(BitWidth, 0, /*isSigned*/ true);
+ computeKnownBits(LHS, LHSKnownZero, LHSKnownOne);
+ APInt RHSKnownZero(BitWidth, 0, /*isSigned*/ true);
+ APInt RHSKnownOne(BitWidth, 0, /*isSigned*/ true);
+ computeKnownBits(RHS, RHSKnownZero, RHSKnownOne);
+
+ // Addition of two 2's compliment numbers having opposite signs will never
+ // overflow.
+ if ((LHSKnownOne[BitWidth - 1] && RHSKnownZero[BitWidth - 1]) ||
+ (LHSKnownZero[BitWidth - 1] && RHSKnownOne[BitWidth - 1]))
+ return true;
+
+ // Check if carry bit of addition will not cause overflow.
+ if (CheckRippleForAdd(LHSKnownZero, LHSKnownOne, RHSKnownZero, RHSKnownOne))
+ return true;
+ if (CheckRippleForAdd(RHSKnownZero, RHSKnownOne, LHSKnownZero, LHSKnownOne))
+ return true;
+ }
return false;
}
diff --git a/test/Transforms/InstCombine/AddOverflow.ll b/test/Transforms/InstCombine/AddOverflow.ll
new file mode 100644
index 0000000000..1bbd1fc59a
--- /dev/null
+++ b/test/Transforms/InstCombine/AddOverflow.ll
@@ -0,0 +1,56 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; CHECK-LABEL: @ripple(
+; CHECK: add nsw i16 %tmp1, 1
+define i32 @ripple(i16 signext %x) {
+bb:
+ %tmp = sext i16 %x to i32
+ %tmp1 = and i32 %tmp, -5
+ %tmp2 = trunc i32 %tmp1 to i16
+ %tmp3 = sext i16 %tmp2 to i32
+ %tmp4 = add i32 %tmp3, 1
+ ret i32 %tmp4
+}
+
+; CHECK-LABEL: @ripplenot(
+; CHECK: add i32 %tmp3, 4
+define i32 @ripplenot(i16 signext %x) {
+bb:
+ %tmp = sext i16 %x to i32
+ %tmp1 = and i32 %tmp, -3
+ %tmp2 = trunc i32 %tmp1 to i16
+ %tmp3 = sext i16 %tmp2 to i32
+ %tmp4 = add i32 %tmp3, 4
+ ret i32 %tmp4
+}
+
+; CHECK-LABEL: @oppositesign(
+; CHECK: add nsw i16 %tmp1, 4
+define i32 @oppositesign(i16 signext %x) {
+bb:
+ %tmp = sext i16 %x to i32
+ %tmp1 = or i32 %tmp, 32768
+ %tmp2 = trunc i32 %tmp1 to i16
+ %tmp3 = sext i16 %tmp2 to i32
+ %tmp4 = add i32 %tmp3, 4
+ ret i32 %tmp4
+}
+
+; CHECK-LABEL: @ripplenot_var(
+; CHECK: add i32 %tmp6, %tmp7
+define i32 @ripplenot_var(i16 signext %x, i16 signext %y) {
+bb:
+ %tmp = sext i16 %x to i32
+ %tmp1 = and i32 %tmp, -5
+ %tmp2 = trunc i32 %tmp1 to i16
+ %tmp3 = sext i16 %y to i32
+ %tmp4 = or i32 %tmp3, 2
+ %tmp5 = trunc i32 %tmp4 to i16
+ %tmp6 = sext i16 %tmp5 to i32
+ %tmp7 = sext i16 %tmp2 to i32
+ %tmp8 = add i32 %tmp6, %tmp7
+ ret i32 %tmp8
+}