summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTim Northover <Tim.Northover@arm.com>2013-04-20 12:32:43 +0000
committerTim Northover <Tim.Northover@arm.com>2013-04-20 12:32:43 +0000
commit8b71994fde0f0fcdf7a8260dc773fb7376b1231f (patch)
treed1f2a8fb857f69681fa26a311e1709a581cf6825
parent6265d5c91a18b2fb6499eb581c488315880c044d (diff)
downloadllvm-8b71994fde0f0fcdf7a8260dc773fb7376b1231f.tar.gz
llvm-8b71994fde0f0fcdf7a8260dc773fb7376b1231f.tar.bz2
llvm-8b71994fde0f0fcdf7a8260dc773fb7376b1231f.tar.xz
Remove unused ShouldFoldAtomicFences flag.
I think it's almost impossible to fold atomic fences profitably under LLVM/C++11 semantics. As a result, this is now unused and just cluttering up the target interface. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@179940 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--include/llvm/Target/TargetLowering.h18
-rw-r--r--lib/CodeGen/TargetLoweringBase.cpp1
-rw-r--r--lib/Target/AArch64/AArch64ISelLowering.cpp4
-rw-r--r--lib/Target/ARM/ARMISelLowering.cpp2
-rw-r--r--lib/Target/X86/X86ISelLowering.cpp7
5 files changed, 0 insertions, 32 deletions
diff --git a/include/llvm/Target/TargetLowering.h b/include/llvm/Target/TargetLowering.h
index 1e7ccd8f8e..d5c9ebe0f2 100644
--- a/include/llvm/Target/TargetLowering.h
+++ b/include/llvm/Target/TargetLowering.h
@@ -810,13 +810,6 @@ public:
return PrefLoopAlignment;
}
- /// getShouldFoldAtomicFences - return whether the combiner should fold
- /// fence MEMBARRIER instructions into the atomic intrinsic instructions.
- ///
- bool getShouldFoldAtomicFences() const {
- return ShouldFoldAtomicFences;
- }
-
/// getInsertFencesFor - return whether the DAG builder should automatically
/// insert fences and reduce ordering for atomics.
///
@@ -1101,12 +1094,6 @@ protected:
MinStackArgumentAlignment = Align;
}
- /// setShouldFoldAtomicFences - Set if the target's implementation of the
- /// atomic operation intrinsics includes locking. Default is false.
- void setShouldFoldAtomicFences(bool fold) {
- ShouldFoldAtomicFences = fold;
- }
-
/// setInsertFencesForAtomic - Set if the DAG builder should
/// automatically insert fences and reduce the order of atomic memory
/// operations to Monotonic.
@@ -1364,11 +1351,6 @@ private:
///
unsigned PrefLoopAlignment;
- /// ShouldFoldAtomicFences - Whether fencing MEMBARRIER instructions should
- /// be folded into the enclosed atomic intrinsic instruction by the
- /// combiner.
- bool ShouldFoldAtomicFences;
-
/// InsertFencesForAtomic - Whether the DAG builder should automatically
/// insert fences and reduce ordering for atomics. (This will be set for
/// for most architectures with weak memory ordering.)
diff --git a/lib/CodeGen/TargetLoweringBase.cpp b/lib/CodeGen/TargetLoweringBase.cpp
index 82bb37ef97..8074d167f4 100644
--- a/lib/CodeGen/TargetLoweringBase.cpp
+++ b/lib/CodeGen/TargetLoweringBase.cpp
@@ -647,7 +647,6 @@ TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm,
PrefFunctionAlignment = 0;
PrefLoopAlignment = 0;
MinStackArgumentAlignment = 1;
- ShouldFoldAtomicFences = false;
InsertFencesForAtomic = false;
SupportJumpTables = true;
MinimumJumpTableEntries = 4;
diff --git a/lib/Target/AArch64/AArch64ISelLowering.cpp b/lib/Target/AArch64/AArch64ISelLowering.cpp
index 6deae75488..786b1ba1d5 100644
--- a/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -59,10 +59,6 @@ AArch64TargetLowering::AArch64TargetLowering(AArch64TargetMachine &TM)
computeRegisterProperties();
- // We have particularly efficient implementations of atomic fences if they can
- // be combined with nearby atomic loads and stores.
- setShouldFoldAtomicFences(true);
-
// We combine OR nodes for bitfield and NEON BSL operations.
setTargetDAGCombine(ISD::OR);
diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp
index 698c8a7e3e..23d7ef1290 100644
--- a/lib/Target/ARM/ARMISelLowering.cpp
+++ b/lib/Target/ARM/ARMISelLowering.cpp
@@ -763,8 +763,6 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
// Unordered/Monotonic case.
setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom);
setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom);
- // Since the libcalls include locking, fold in the fences
- setShouldFoldAtomicFences(true);
}
setOperationAction(ISD::PREFETCH, MVT::Other, Custom);
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index 94370ae4ee..b7ba0b8188 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -528,13 +528,6 @@ void X86TargetLowering::resetOperationActions() {
setOperationAction(ISD::ATOMIC_FENCE , MVT::Other, Custom);
- // On X86 and X86-64, atomic operations are lowered to locked instructions.
- // Locked instructions, in turn, have implicit fence semantics (all memory
- // operations are flushed before issuing the locked instruction, and they
- // are not buffered), so we can fold away the common pattern of
- // fence-atomic-fence.
- setShouldFoldAtomicFences(true);
-
// Expand certain atomics
for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) {
MVT VT = IntVTs[i];