summaryrefslogtreecommitdiff
path: root/lib/VMCore
diff options
context:
space:
mode:
authorEli Friedman <eli.friedman@gmail.com>2011-10-06 23:20:49 +0000
committerEli Friedman <eli.friedman@gmail.com>2011-10-06 23:20:49 +0000
commit8540101252d3ff69f288e83821aa9f27b366227b (patch)
tree0b070829642ea1fbc7b50f066de5c04430acda79 /lib/VMCore
parent95ce2e9c52ea220f5090d9e8bacabf8e62f88d06 (diff)
downloadllvm-8540101252d3ff69f288e83821aa9f27b366227b.tar.gz
llvm-8540101252d3ff69f288e83821aa9f27b366227b.tar.bz2
llvm-8540101252d3ff69f288e83821aa9f27b366227b.tar.xz
Remove the old atomic instrinsics. autoupgrade functionality is included with this patch.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@141333 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/VMCore')
-rw-r--r--lib/VMCore/AutoUpgrade.cpp91
1 files changed, 91 insertions, 0 deletions
diff --git a/lib/VMCore/AutoUpgrade.cpp b/lib/VMCore/AutoUpgrade.cpp
index 04221d461d..b849d3ef8d 100644
--- a/lib/VMCore/AutoUpgrade.cpp
+++ b/lib/VMCore/AutoUpgrade.cpp
@@ -43,6 +43,20 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
switch (Name[0]) {
default: break;
+ case 'a':
+ if (Name.startswith("atomic.cmp.swap") ||
+ Name.startswith("atomic.swap") ||
+ Name.startswith("atomic.load.add") ||
+ Name.startswith("atomic.load.sub") ||
+ Name.startswith("atomic.load.and") ||
+ Name.startswith("atomic.load.nand") ||
+ Name.startswith("atomic.load.or") ||
+ Name.startswith("atomic.load.xor") ||
+ Name.startswith("atomic.load.max") ||
+ Name.startswith("atomic.load.min") ||
+ Name.startswith("atomic.load.umax") ||
+ Name.startswith("atomic.load.umin"))
+ return true;
case 'i':
// This upgrades the old llvm.init.trampoline to the new
// llvm.init.trampoline and llvm.adjust.trampoline pair.
@@ -63,6 +77,9 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
FTy->getParamType(2), (Type *)0));
return true;
}
+ case 'm':
+ if (Name == "memory.barrier")
+ return true;
case 'p':
// This upgrades the llvm.prefetch intrinsic to accept one more parameter,
// which is a instruction / data cache identifier. The old version only
@@ -206,6 +223,80 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
// Remove intrinsic.
CI->eraseFromParent();
+ } else if (F->getName().startswith("llvm.atomic.cmp.swap")) {
+ IRBuilder<> Builder(C);
+ Builder.SetInsertPoint(CI->getParent(), CI);
+ Value *Val = Builder.CreateAtomicCmpXchg(CI->getArgOperand(0),
+ CI->getArgOperand(1),
+ CI->getArgOperand(2),
+ Monotonic);
+
+ // Replace intrinsic.
+ Val->takeName(CI);
+ if (!CI->use_empty())
+ CI->replaceAllUsesWith(Val);
+ CI->eraseFromParent();
+ } else if (F->getName().startswith("llvm.atomic")) {
+ IRBuilder<> Builder(C);
+ Builder.SetInsertPoint(CI->getParent(), CI);
+
+ AtomicRMWInst::BinOp Op;
+ if (F->getName().startswith("llvm.atomic.swap"))
+ Op = AtomicRMWInst::Xchg;
+ else if (F->getName().startswith("llvm.atomic.load.add"))
+ Op = AtomicRMWInst::Add;
+ else if (F->getName().startswith("llvm.atomic.load.sub"))
+ Op = AtomicRMWInst::Sub;
+ else if (F->getName().startswith("llvm.atomic.load.and"))
+ Op = AtomicRMWInst::And;
+ else if (F->getName().startswith("llvm.atomic.load.nand"))
+ Op = AtomicRMWInst::Nand;
+ else if (F->getName().startswith("llvm.atomic.load.or"))
+ Op = AtomicRMWInst::Or;
+ else if (F->getName().startswith("llvm.atomic.load.xor"))
+ Op = AtomicRMWInst::Xor;
+ else if (F->getName().startswith("llvm.atomic.load.max"))
+ Op = AtomicRMWInst::Max;
+ else if (F->getName().startswith("llvm.atomic.load.min"))
+ Op = AtomicRMWInst::Min;
+ else if (F->getName().startswith("llvm.atomic.load.umax"))
+ Op = AtomicRMWInst::UMax;
+ else if (F->getName().startswith("llvm.atomic.load.umin"))
+ Op = AtomicRMWInst::UMin;
+ else
+ llvm_unreachable("Unknown atomic");
+
+ Value *Val = Builder.CreateAtomicRMW(Op, CI->getArgOperand(0),
+ CI->getArgOperand(1),
+ Monotonic);
+
+ // Replace intrinsic.
+ Val->takeName(CI);
+ if (!CI->use_empty())
+ CI->replaceAllUsesWith(Val);
+ CI->eraseFromParent();
+ } else if (F->getName() == "llvm.memory.barrier") {
+ IRBuilder<> Builder(C);
+ Builder.SetInsertPoint(CI->getParent(), CI);
+
+ // Note that this conversion ignores the "device" bit; it was not really
+ // well-defined, and got abused because nobody paid enough attention to
+ // get it right. In practice, this probably doesn't matter; application
+ // code generally doesn't need anything stronger than
+ // SequentiallyConsistent (and realistically, SequentiallyConsistent
+ // is lowered to a strong enough barrier for almost anything).
+
+ if (cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue())
+ Builder.CreateFence(SequentiallyConsistent);
+ else if (!cast<ConstantInt>(CI->getArgOperand(0))->getZExtValue())
+ Builder.CreateFence(Release);
+ else if (!cast<ConstantInt>(CI->getArgOperand(3))->getZExtValue())
+ Builder.CreateFence(Acquire);
+ else
+ Builder.CreateFence(AcquireRelease);
+
+ // Remove intrinsic.
+ CI->eraseFromParent();
} else {
llvm_unreachable("Unknown function for CallInst upgrade.");
}