summaryrefslogtreecommitdiff
path: root/lib/Transforms/InstCombine
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Transforms/InstCombine')
-rw-r--r--lib/Transforms/InstCombine/InstCombineAddSub.cpp4
-rw-r--r--lib/Transforms/InstCombine/InstCombineAndOrXor.cpp38
-rw-r--r--lib/Transforms/InstCombine/InstCombineCasts.cpp18
-rw-r--r--lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp2
-rw-r--r--lib/Transforms/InstCombine/InstCombineMulDivRem.cpp4
-rw-r--r--lib/Transforms/InstCombine/InstCombineShifts.cpp4
-rw-r--r--lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp4
-rw-r--r--lib/Transforms/InstCombine/InstructionCombining.cpp4
8 files changed, 39 insertions, 39 deletions
diff --git a/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/lib/Transforms/InstCombine/InstCombineAddSub.cpp
index 99b62f8d05..874bb8f292 100644
--- a/lib/Transforms/InstCombine/InstCombineAddSub.cpp
+++ b/lib/Transforms/InstCombine/InstCombineAddSub.cpp
@@ -200,7 +200,7 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
if (dyn_castFoldableMul(RHS, C2) == LHS)
return BinaryOperator::CreateMul(LHS, AddOne(C2));
- // A+B --> A|B iff A and B have no bits set in common.
+ // A+B --> A|B if A and B have no bits set in common.
if (IntegerType *IT = dyn_cast<IntegerType>(I.getType())) {
APInt LHSKnownOne(IT->getBitWidth(), 0);
APInt LHSKnownZero(IT->getBitWidth(), 0);
@@ -216,7 +216,7 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
}
}
- // W*X + Y*Z --> W * (X+Z) iff W == Y
+ // W*X + Y*Z --> W * (X+Z) if W == Y
{
Value *W, *X, *Y, *Z;
if (match(LHS, m_Mul(m_Value(W), m_Value(X))) &&
diff --git a/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index 7d0af0d802..85e18a3591 100644
--- a/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -315,7 +315,7 @@ Value *InstCombiner::InsertRangeTest(Value *V, Constant *Lo, Constant *Hi,
return Builder->CreateICmpUGT(Add, LowerBound);
}
-// isRunOfOnes - Returns true iff Val consists of one contiguous run of 1s with
+// isRunOfOnes - Returns true if Val consists of one contiguous run of 1s with
// any number of 0s on either side. The 1s are allowed to wrap from LSB to
// MSB, so 0x000FFF0, 0x0000FFFF, and 0xFF0000FF are all runs. 0x0F0F0000 is
// not, since all 1s are not contiguous.
@@ -335,9 +335,9 @@ static bool isRunOfOnes(ConstantInt *Val, uint32_t &MB, uint32_t &ME) {
/// where isSub determines whether the operator is a sub. If we can fold one of
/// the following xforms:
///
-/// ((A & N) +/- B) & Mask -> (A +/- B) & Mask iff N&Mask == Mask
-/// ((A | N) +/- B) & Mask -> (A +/- B) & Mask iff N&Mask == 0
-/// ((A ^ N) +/- B) & Mask -> (A +/- B) & Mask iff N&Mask == 0
+/// ((A & N) +/- B) & Mask -> (A +/- B) & Mask if N&Mask == Mask
+/// ((A | N) +/- B) & Mask -> (A +/- B) & Mask if N&Mask == 0
+/// ((A ^ N) +/- B) & Mask -> (A +/- B) & Mask if N&Mask == 0
///
/// return (A +/- B).
///
@@ -752,7 +752,7 @@ Value *InstCombiner::FoldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS) {
// (trunc x) == C1 & (and x, CA) == C2 -> (and x, CA|CMAX) == C1|C2
// where CMAX is the all ones value for the truncated type,
- // iff the lower bits of C2 and CA are zero.
+ // if the lower bits of C2 and CA are zero.
if (LHSCC == ICmpInst::ICMP_EQ && LHSCC == RHSCC &&
LHS->hasOneUse() && RHS->hasOneUse()) {
Value *V;
@@ -1062,9 +1062,9 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
break;
}
case Instruction::Add:
- // ((A & N) + B) & AndRHS -> (A + B) & AndRHS iff N&AndRHS == AndRHS.
- // ((A | N) + B) & AndRHS -> (A + B) & AndRHS iff N&AndRHS == 0
- // ((A ^ N) + B) & AndRHS -> (A + B) & AndRHS iff N&AndRHS == 0
+ // ((A & N) + B) & AndRHS -> (A + B) & AndRHS if N&AndRHS == AndRHS.
+ // ((A | N) + B) & AndRHS -> (A + B) & AndRHS if N&AndRHS == 0
+ // ((A ^ N) + B) & AndRHS -> (A + B) & AndRHS if N&AndRHS == 0
if (Value *V = FoldLogicalPlusAnd(Op0LHS, Op0RHS, AndRHS, false, I))
return BinaryOperator::CreateAnd(V, AndRHS);
if (Value *V = FoldLogicalPlusAnd(Op0RHS, Op0LHS, AndRHS, false, I))
@@ -1072,13 +1072,13 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
break;
case Instruction::Sub:
- // ((A & N) - B) & AndRHS -> (A - B) & AndRHS iff N&AndRHS == AndRHS.
- // ((A | N) - B) & AndRHS -> (A - B) & AndRHS iff N&AndRHS == 0
- // ((A ^ N) - B) & AndRHS -> (A - B) & AndRHS iff N&AndRHS == 0
+ // ((A & N) - B) & AndRHS -> (A - B) & AndRHS if N&AndRHS == AndRHS.
+ // ((A | N) - B) & AndRHS -> (A - B) & AndRHS if N&AndRHS == 0
+ // ((A ^ N) - B) & AndRHS -> (A - B) & AndRHS if N&AndRHS == 0
if (Value *V = FoldLogicalPlusAnd(Op0LHS, Op0RHS, AndRHS, true, I))
return BinaryOperator::CreateAnd(V, AndRHS);
- // (A - N) & AndRHS -> -N & AndRHS iff A&AndRHS==0 and AndRHS
+ // (A - N) & AndRHS -> -N & AndRHS if A&AndRHS==0 and AndRHS
// has 1's for all bits that the subtraction with A might affect.
if (Op0I->hasOneUse() && !match(Op0LHS, m_Zero())) {
uint32_t BitWidth = AndRHSMask.getBitWidth();
@@ -1472,7 +1472,7 @@ Value *InstCombiner::FoldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS) {
}
// (icmp ult (X + CA), C1) | (icmp eq X, C2) -> (icmp ule (X + CA), C1)
- // iff C2 + CA == C1.
+ // if C2 + CA == C1.
if (LHSCC == ICmpInst::ICMP_ULT && RHSCC == ICmpInst::ICMP_EQ) {
ConstantInt *AddCst;
if (match(Val, m_Add(m_Specific(Val2), m_ConstantInt(AddCst))))
@@ -1735,7 +1735,7 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
ConstantInt *C1 = 0; Value *X = 0;
// (X & C1) | C2 --> (X | C2) & (C1|C2)
- // iff (C1 & C2) == 0.
+ // if (C1 & C2) == 0.
if (match(Op0, m_And(m_Value(X), m_ConstantInt(C1))) &&
(RHS->getValue() & C1->getValue()) != 0 &&
Op0->hasOneUse()) {
@@ -1779,7 +1779,7 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
return BSwap;
}
- // (X^C)|Y -> (X|Y)^C iff Y&C == 0
+ // (X^C)|Y -> (X|Y)^C if Y&C == 0
if (Op0->hasOneUse() &&
match(Op0, m_Xor(m_Value(A), m_ConstantInt(C1))) &&
MaskedValueIsZero(Op1, C1->getValue())) {
@@ -1788,7 +1788,7 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
return BinaryOperator::CreateXor(NOr, C1);
}
- // Y|(X^C) -> (X|Y)^C iff Y&C == 0
+ // Y|(X^C) -> (X|Y)^C if Y&C == 0
if (Op1->hasOneUse() &&
match(Op1, m_Xor(m_Value(A), m_ConstantInt(C1))) &&
MaskedValueIsZero(Op0, C1->getValue())) {
@@ -1830,7 +1830,7 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
if ((C1->getValue() & C2->getValue()) == 0) {
// ((V | N) & C1) | (V & C2) --> (V|N) & (C1|C2)
- // iff (C1&C2) == 0 and (N&~C1) == 0
+ // if (C1&C2) == 0 and (N&~C1) == 0
if (match(A, m_Or(m_Value(V1), m_Value(V2))) &&
((V1 == B && MaskedValueIsZero(V2, ~C1->getValue())) || // (V|N)
(V2 == B && MaskedValueIsZero(V1, ~C1->getValue())))) // (N|V)
@@ -1846,7 +1846,7 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
C1->getValue()|C2->getValue()));
// ((V|C3)&C1) | ((V|C4)&C2) --> (V|C3|C4)&(C1|C2)
- // iff (C1&C2) == 0 and (C3&~C1) == 0 and (C4&~C2) == 0.
+ // if (C1&C2) == 0 and (C3&~C1) == 0 and (C4&~C2) == 0.
ConstantInt *C3 = 0, *C4 = 0;
if (match(A, m_Or(m_Value(V1), m_ConstantInt(C3))) &&
(C3->getValue() & ~C1->getValue()) == 0 &&
@@ -2146,7 +2146,7 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) {
}
} else if (Op0I->getOpcode() == Instruction::Or) {
- // (X|C1)^C2 -> X^(C1|C2) iff X&~C1 == 0
+ // (X|C1)^C2 -> X^(C1|C2) if X&~C1 == 0
if (MaskedValueIsZero(Op0I->getOperand(0), Op0CI->getValue())) {
Constant *NewRHS = ConstantExpr::getOr(Op0CI, RHS);
// Anything in both C1 and C2 is known to be zero, remove it from
diff --git a/lib/Transforms/InstCombine/InstCombineCasts.cpp b/lib/Transforms/InstCombine/InstCombineCasts.cpp
index 555b4428d2..b8b61d721a 100644
--- a/lib/Transforms/InstCombine/InstCombineCasts.cpp
+++ b/lib/Transforms/InstCombine/InstCombineCasts.cpp
@@ -381,7 +381,7 @@ static bool CanEvaluateTruncated(Value *V, Type *Ty) {
break;
case Instruction::LShr:
// If this is a truncate of a logical shr, we can truncate it to a smaller
- // lshr iff we know that the bits we would otherwise be shifting in are
+ // lshr if we know that the bits we would otherwise be shifting in are
// already zeros.
if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits();
@@ -527,14 +527,14 @@ Instruction *InstCombiner::transformZExtICmp(ICmpInst *ICI, Instruction &CI,
return ReplaceInstUsesWith(CI, In);
}
- // zext (X == 0) to i32 --> X^1 iff X has only the low bit set.
- // zext (X == 0) to i32 --> (X>>1)^1 iff X has only the 2nd bit set.
- // zext (X == 1) to i32 --> X iff X has only the low bit set.
- // zext (X == 2) to i32 --> X>>1 iff X has only the 2nd bit set.
- // zext (X != 0) to i32 --> X iff X has only the low bit set.
- // zext (X != 0) to i32 --> X>>1 iff X has only the 2nd bit set.
- // zext (X != 1) to i32 --> X^1 iff X has only the low bit set.
- // zext (X != 2) to i32 --> (X>>1)^1 iff X has only the 2nd bit set.
+ // zext (X == 0) to i32 --> X^1 if X has only the low bit set.
+ // zext (X == 0) to i32 --> (X>>1)^1 if X has only the 2nd bit set.
+ // zext (X == 1) to i32 --> X if X has only the low bit set.
+ // zext (X == 2) to i32 --> X>>1 if X has only the 2nd bit set.
+ // zext (X != 0) to i32 --> X if X has only the low bit set.
+ // zext (X != 0) to i32 --> X>>1 if X has only the 2nd bit set.
+ // zext (X != 1) to i32 --> X^1 if X has only the low bit set.
+ // zext (X != 2) to i32 --> (X>>1)^1 if X has only the 2nd bit set.
if ((Op1CV == 0 || Op1CV.isPowerOf2()) &&
// This only works for EQ and NE
ICI->isEquality()) {
diff --git a/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
index a446e427e5..89972f610c 100644
--- a/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
+++ b/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
@@ -368,7 +368,7 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
LI.setAlignment(EffectiveLoadAlign);
}
- // load (cast X) --> cast (load X) iff safe.
+ // load (cast X) --> cast (load X) if safe.
if (isa<CastInst>(Op))
if (Instruction *Res = InstCombineLoadCast(*this, LI, TD))
return Res;
diff --git a/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp b/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
index 3361a1e7fb..6d81d6dff8 100644
--- a/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
+++ b/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
@@ -550,7 +550,7 @@ Instruction *InstCombiner::visitSDiv(BinaryOperator &I) {
APInt Mask(APInt::getSignBit(I.getType()->getPrimitiveSizeInBits()));
if (MaskedValueIsZero(Op0, Mask)) {
if (MaskedValueIsZero(Op1, Mask)) {
- // X sdiv Y -> X udiv Y, iff X and Y don't have sign bit set
+ // X sdiv Y -> X udiv Y, if X and Y don't have sign bit set
return BinaryOperator::CreateUDiv(Op0, Op1, I.getName());
}
@@ -692,7 +692,7 @@ Instruction *InstCombiner::visitSRem(BinaryOperator &I) {
if (I.getType()->isIntegerTy()) {
APInt Mask(APInt::getSignBit(I.getType()->getPrimitiveSizeInBits()));
if (MaskedValueIsZero(Op1, Mask) && MaskedValueIsZero(Op0, Mask)) {
- // X srem Y -> X urem Y, iff X and Y don't have sign bit set
+ // X srem Y -> X urem Y, if X and Y don't have sign bit set
return BinaryOperator::CreateURem(Op0, Op1, I.getName());
}
}
diff --git a/lib/Transforms/InstCombine/InstCombineShifts.cpp b/lib/Transforms/InstCombine/InstCombineShifts.cpp
index 4bb2403299..598b4d3bd8 100644
--- a/lib/Transforms/InstCombine/InstCombineShifts.cpp
+++ b/lib/Transforms/InstCombine/InstCombineShifts.cpp
@@ -37,7 +37,7 @@ Instruction *InstCombiner::commonShiftTransforms(BinaryOperator &I) {
if (Instruction *Res = FoldShiftByConstant(Op0, CUI, I))
return Res;
- // X shift (A srem B) -> X shift (A and B-1) iff B is a power of 2.
+ // X shift (A srem B) -> X shift (A and B-1) if B is a power of 2.
// Because shifts by negative values (which could occur if A were negative)
// are undefined.
Value *A; const APInt *B;
@@ -85,7 +85,7 @@ static bool CanEvaluateShifted(Value *V, unsigned NumBits, bool isLeftShift,
// TODO: Check that the input bits are already zero with MaskedValueIsZero
#if 0
// If this is a truncate of a logical shr, we can truncate it to a smaller
- // lshr iff we know that the bits we would otherwise be shifting in are
+ // lshr if we know that the bits we would otherwise be shifting in are
// already zeros.
uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits();
uint32_t BitWidth = Ty->getScalarSizeInBits();
diff --git a/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp b/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
index 54be8ed3fa..9857f6ab5a 100644
--- a/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
+++ b/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
@@ -304,7 +304,7 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
// If all of the demanded bits are known to be zero on one side or the
// other, turn this into an *inclusive* or.
- // e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0
+ // e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) if C1&C2 == 0
if ((DemandedMask & ~RHSKnownZero & ~LHSKnownZero) == 0) {
Instruction *Or =
BinaryOperator::CreateOr(I->getOperand(0), I->getOperand(1),
@@ -315,7 +315,7 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
// If all of the demanded bits on one side are known, and all of the set
// bits on that side are also known to be set on the other side, turn this
// into an AND, as we know the bits will be cleared.
- // e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2
+ // e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 if (C1&C2) == C2
if ((DemandedMask & (RHSKnownZero|RHSKnownOne)) == DemandedMask) {
// all known
if ((RHSKnownOne & LHSKnownOne) == RHSKnownOne) {
diff --git a/lib/Transforms/InstCombine/InstructionCombining.cpp b/lib/Transforms/InstCombine/InstructionCombining.cpp
index ff758c40af..faa51aa0ba 100644
--- a/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -1724,7 +1724,7 @@ Instruction *InstCombiner::visitLandingPadInst(LandingPadInst &LI) {
continue;
// At this point we know that LFilter has at least one element.
if (isa<ConstantAggregateZero>(LFilter)) { // LFilter only contains zeros.
- // Filter is a subset of LFilter iff Filter contains only zeros (as we
+ // Filter is a subset of LFilter if Filter contains only zeros (as we
// already know that Filter is not longer than LFilter).
if (isa<ConstantAggregateZero>(Filter)) {
assert(FElts <= LElts && "Should have handled this case earlier!");
@@ -1738,7 +1738,7 @@ Instruction *InstCombiner::visitLandingPadInst(LandingPadInst &LI) {
ConstantArray *LArray = cast<ConstantArray>(LFilter);
if (isa<ConstantAggregateZero>(Filter)) { // Filter only contains zeros.
// Since Filter is non-empty and contains only zeros, it is a subset of
- // LFilter iff LFilter contains a zero.
+ // LFilter if LFilter contains a zero.
assert(FElts > 0 && "Should have eliminated the empty filter earlier!");
for (unsigned l = 0; l != LElts; ++l)
if (LArray->getOperand(l)->isNullValue()) {