summaryrefslogtreecommitdiff
path: root/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
diff options
context:
space:
mode:
authorOwen Anderson <resistor@mac.com>2010-08-31 04:41:06 +0000
committerOwen Anderson <resistor@mac.com>2010-08-31 04:41:06 +0000
commitc97fb52799eea845569425c0430b2b944cad7093 (patch)
tree64dca7dfa0778734893fe9dfdd768bfb45b1dd4b /lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
parent2e46e78c53011ddbc0587135c09b39077beb0c1e (diff)
downloadllvm-c97fb52799eea845569425c0430b2b944cad7093.tar.gz
llvm-c97fb52799eea845569425c0430b2b944cad7093.tar.bz2
llvm-c97fb52799eea845569425c0430b2b944cad7093.tar.xz
Remove r111665, which implemented store-narrowing in InstCombine. Chris discovered a miscompilation in it, and it's not easily
fixable at the optimizer level. I'll investigate reimplementing it in DAGCombine. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@112575 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp')
-rw-r--r--lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp47
1 files changed, 0 insertions, 47 deletions
diff --git a/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
index 87533d3fe2..b68fbc2db5 100644
--- a/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
+++ b/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
@@ -14,13 +14,11 @@
#include "InstCombine.h"
#include "llvm/IntrinsicInst.h"
#include "llvm/Analysis/Loads.h"
-#include "llvm/Support/PatternMatch.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/ADT/Statistic.h"
using namespace llvm;
-using namespace PatternMatch;
STATISTIC(NumDeadStore, "Number of dead stores eliminated");
@@ -475,51 +473,6 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
if (SI.isVolatile()) return 0; // Don't hack volatile stores.
- // Attempt to narrow sequences where we load a wide value, perform bitmasks
- // that only affect the low bits of it, and then store it back. This
- // typically arises from bitfield initializers in C++.
- ConstantInt *CI1 =0, *CI2 = 0;
- Value *Ld = 0;
- if (getTargetData() &&
- match(SI.getValueOperand(),
- m_And(m_Or(m_Value(Ld), m_ConstantInt(CI1)), m_ConstantInt(CI2))) &&
- isa<LoadInst>(Ld) &&
- equivalentAddressValues(cast<LoadInst>(Ld)->getPointerOperand(), Ptr)) {
- APInt OrMask = CI1->getValue();
- APInt AndMask = CI2->getValue();
-
- // Compute the prefix of the value that is unmodified by the bitmasking.
- unsigned LeadingAndOnes = AndMask.countLeadingOnes();
- unsigned LeadingOrZeros = OrMask.countLeadingZeros();
- unsigned Prefix = std::min(LeadingAndOnes, LeadingOrZeros);
- uint64_t NewWidth = AndMask.getBitWidth() - Prefix;
- while (NewWidth < AndMask.getBitWidth() &&
- getTargetData()->isIllegalInteger(NewWidth))
- NewWidth = NextPowerOf2(NewWidth);
-
- // If we can find a power-of-2 prefix (and if the values we're working with
- // are themselves POT widths), then we can narrow the store. We rely on
- // later iterations of instcombine to propagate the demanded bits to narrow
- // the other computations in the chain.
- if (NewWidth < AndMask.getBitWidth() &&
- getTargetData()->isLegalInteger(NewWidth)) {
- const Type *NewType = IntegerType::get(Ptr->getContext(), NewWidth);
- const Type *NewPtrType = PointerType::getUnqual(NewType);
-
- Value *NewVal = Builder->CreateTrunc(SI.getValueOperand(), NewType);
- Value *NewPtr = Builder->CreateBitCast(Ptr, NewPtrType);
-
- // On big endian targets, we need to offset from the original pointer
- // in order to store to the low-bit suffix.
- if (getTargetData()->isBigEndian()) {
- uint64_t GEPOffset = (AndMask.getBitWidth() - NewWidth) / 8;
- NewPtr = Builder->CreateConstGEP1_64(NewPtr, GEPOffset);
- }
-
- return new StoreInst(NewVal, NewPtr);
- }
- }
-
// store X, null -> turns into 'unreachable' in SimplifyCFG
if (isa<ConstantPointerNull>(Ptr) && SI.getPointerAddressSpace() == 0) {
if (!isa<UndefValue>(Val)) {