summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/llvm/Target/TargetLowering.h18
-rw-r--r--lib/CodeGen/TargetLoweringBase.cpp1
-rw-r--r--lib/Target/AArch64/AArch64ISelLowering.cpp4
-rw-r--r--lib/Target/ARM/ARMISelLowering.cpp2
-rw-r--r--lib/Target/X86/X86ISelLowering.cpp7
5 files changed, 0 insertions, 32 deletions
diff --git a/include/llvm/Target/TargetLowering.h b/include/llvm/Target/TargetLowering.h
index 1e7ccd8f8e..d5c9ebe0f2 100644
--- a/include/llvm/Target/TargetLowering.h
+++ b/include/llvm/Target/TargetLowering.h
@@ -810,13 +810,6 @@ public:
return PrefLoopAlignment;
}
- /// getShouldFoldAtomicFences - return whether the combiner should fold
- /// fence MEMBARRIER instructions into the atomic intrinsic instructions.
- ///
- bool getShouldFoldAtomicFences() const {
- return ShouldFoldAtomicFences;
- }
-
/// getInsertFencesFor - return whether the DAG builder should automatically
/// insert fences and reduce ordering for atomics.
///
@@ -1101,12 +1094,6 @@ protected:
MinStackArgumentAlignment = Align;
}
- /// setShouldFoldAtomicFences - Set if the target's implementation of the
- /// atomic operation intrinsics includes locking. Default is false.
- void setShouldFoldAtomicFences(bool fold) {
- ShouldFoldAtomicFences = fold;
- }
-
/// setInsertFencesForAtomic - Set if the DAG builder should
/// automatically insert fences and reduce the order of atomic memory
/// operations to Monotonic.
@@ -1364,11 +1351,6 @@ private:
///
unsigned PrefLoopAlignment;
- /// ShouldFoldAtomicFences - Whether fencing MEMBARRIER instructions should
- /// be folded into the enclosed atomic intrinsic instruction by the
- /// combiner.
- bool ShouldFoldAtomicFences;
-
/// InsertFencesForAtomic - Whether the DAG builder should automatically
/// insert fences and reduce ordering for atomics. (This will be set for
/// for most architectures with weak memory ordering.)
diff --git a/lib/CodeGen/TargetLoweringBase.cpp b/lib/CodeGen/TargetLoweringBase.cpp
index 82bb37ef97..8074d167f4 100644
--- a/lib/CodeGen/TargetLoweringBase.cpp
+++ b/lib/CodeGen/TargetLoweringBase.cpp
@@ -647,7 +647,6 @@ TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm,
PrefFunctionAlignment = 0;
PrefLoopAlignment = 0;
MinStackArgumentAlignment = 1;
- ShouldFoldAtomicFences = false;
InsertFencesForAtomic = false;
SupportJumpTables = true;
MinimumJumpTableEntries = 4;
diff --git a/lib/Target/AArch64/AArch64ISelLowering.cpp b/lib/Target/AArch64/AArch64ISelLowering.cpp
index 6deae75488..786b1ba1d5 100644
--- a/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -59,10 +59,6 @@ AArch64TargetLowering::AArch64TargetLowering(AArch64TargetMachine &TM)
computeRegisterProperties();
- // We have particularly efficient implementations of atomic fences if they can
- // be combined with nearby atomic loads and stores.
- setShouldFoldAtomicFences(true);
-
// We combine OR nodes for bitfield and NEON BSL operations.
setTargetDAGCombine(ISD::OR);
diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp
index 698c8a7e3e..23d7ef1290 100644
--- a/lib/Target/ARM/ARMISelLowering.cpp
+++ b/lib/Target/ARM/ARMISelLowering.cpp
@@ -763,8 +763,6 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
// Unordered/Monotonic case.
setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom);
setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom);
- // Since the libcalls include locking, fold in the fences
- setShouldFoldAtomicFences(true);
}
setOperationAction(ISD::PREFETCH, MVT::Other, Custom);
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index 94370ae4ee..b7ba0b8188 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -528,13 +528,6 @@ void X86TargetLowering::resetOperationActions() {
setOperationAction(ISD::ATOMIC_FENCE , MVT::Other, Custom);
- // On X86 and X86-64, atomic operations are lowered to locked instructions.
- // Locked instructions, in turn, have implicit fence semantics (all memory
- // operations are flushed before issuing the locked instruction, and they
- // are not buffered), so we can fold away the common pattern of
- // fence-atomic-fence.
- setShouldFoldAtomicFences(true);
-
// Expand certain atomics
for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) {
MVT VT = IntVTs[i];