summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorEli Friedman <eli.friedman@gmail.com>2011-08-17 22:22:24 +0000
committerEli Friedman <eli.friedman@gmail.com>2011-08-17 22:22:24 +0000
commit56efe24431b045be120d1fd5f6b0aa43a6b01c48 (patch)
tree61079456e621c36efec6f4efff6f91faaa0d8c76 /lib
parent0dd80b0d695b94b3baa8ec9195e9b41f3493da4f (diff)
downloadllvm-56efe24431b045be120d1fd5f6b0aa43a6b01c48.tar.gz
llvm-56efe24431b045be120d1fd5f6b0aa43a6b01c48.tar.bz2
llvm-56efe24431b045be120d1fd5f6b0aa43a6b01c48.tar.xz
Atomic load/store handling for the passes using memdep (GVN, DSE, memcpyopt).
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@137888 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib')
-rw-r--r--lib/Transforms/Scalar/DeadStoreElimination.cpp8
-rw-r--r--lib/Transforms/Scalar/GVN.cpp6
-rw-r--r--lib/Transforms/Scalar/MemCpyOptimizer.cpp6
3 files changed, 11 insertions, 9 deletions
diff --git a/lib/Transforms/Scalar/DeadStoreElimination.cpp b/lib/Transforms/Scalar/DeadStoreElimination.cpp
index a645f92116..8559147b71 100644
--- a/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -213,9 +213,9 @@ getLocForRead(Instruction *Inst, AliasAnalysis &AA) {
/// isRemovable - If the value of this instruction and the memory it writes to
/// is unused, may we delete this instruction?
static bool isRemovable(Instruction *I) {
- // Don't remove volatile stores.
+ // Don't remove volatile/atomic stores.
if (StoreInst *SI = dyn_cast<StoreInst>(I))
- return !SI->isVolatile();
+ return SI->isUnordered();
IntrinsicInst *II = cast<IntrinsicInst>(I);
switch (II->getIntrinsicID()) {
@@ -447,7 +447,7 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) {
if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
if (LoadInst *DepLoad = dyn_cast<LoadInst>(InstDep.getInst())) {
if (SI->getPointerOperand() == DepLoad->getPointerOperand() &&
- SI->getOperand(0) == DepLoad && !SI->isVolatile()) {
+ SI->getOperand(0) == DepLoad && isRemovable(SI)) {
DEBUG(dbgs() << "DSE: Remove Store Of Load from same pointer:\n "
<< "LOAD: " << *DepLoad << "\n STORE: " << *SI << '\n');
@@ -670,6 +670,8 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
// If we encounter a use of the pointer, it is no longer considered dead
if (LoadInst *L = dyn_cast<LoadInst>(BBI)) {
+ if (!L->isUnordered()) // Be conservative with atomic/volatile load
+ break;
LoadedLoc = AA->getLocation(L);
} else if (VAArgInst *V = dyn_cast<VAArgInst>(BBI)) {
LoadedLoc = AA->getLocation(V);
diff --git a/lib/Transforms/Scalar/GVN.cpp b/lib/Transforms/Scalar/GVN.cpp
index 9785b31892..3b4c252990 100644
--- a/lib/Transforms/Scalar/GVN.cpp
+++ b/lib/Transforms/Scalar/GVN.cpp
@@ -982,8 +982,8 @@ static Value *GetLoadValueForLoad(LoadInst *SrcVal, unsigned Offset,
unsigned SrcValSize = TD.getTypeStoreSize(SrcVal->getType());
unsigned LoadSize = TD.getTypeStoreSize(LoadTy);
if (Offset+LoadSize > SrcValSize) {
- assert(!SrcVal->isVolatile() && "Cannot widen volatile load!");
- assert(isa<IntegerType>(SrcVal->getType())&&"Can't widen non-integer load");
+ assert(SrcVal->isSimple() && "Cannot widen volatile/atomic load!");
+ assert(SrcVal->getType()->isIntegerTy() && "Can't widen non-integer load");
// If we have a load/load clobber an DepLI can be widened to cover this
// load, then we should widen it to the next power of 2 size big enough!
unsigned NewLoadSize = Offset+LoadSize;
@@ -1669,7 +1669,7 @@ bool GVN::processLoad(LoadInst *L) {
if (!MD)
return false;
- if (L->isVolatile())
+ if (!L->isSimple())
return false;
if (L->use_empty()) {
diff --git a/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/lib/Transforms/Scalar/MemCpyOptimizer.cpp
index ba5ee68ebb..a2ef70f12e 100644
--- a/lib/Transforms/Scalar/MemCpyOptimizer.cpp
+++ b/lib/Transforms/Scalar/MemCpyOptimizer.cpp
@@ -384,7 +384,7 @@ Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst,
if (StoreInst *NextStore = dyn_cast<StoreInst>(BI)) {
// If this is a store, see if we can merge it in.
- if (NextStore->isVolatile()) break;
+ if (!NextStore->isSimple()) break;
// Check to see if this stored value is of the same byte-splattable value.
if (ByteVal != isBytewiseValue(NextStore->getOperand(0)))
@@ -479,7 +479,7 @@ Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst,
bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
- if (SI->isVolatile()) return false;
+ if (!SI->isSimple()) return false;
if (TD == 0) return false;
@@ -487,7 +487,7 @@ bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
// happen to be using a load-store pair to implement it, rather than
// a memcpy.
if (LoadInst *LI = dyn_cast<LoadInst>(SI->getOperand(0))) {
- if (!LI->isVolatile() && LI->hasOneUse() &&
+ if (LI->isSimple() && LI->hasOneUse() &&
LI->getParent() == SI->getParent()) {
MemDepResult ldep = MD->getDependency(LI);
CallInst *C = 0;