summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorRobert Lytton <robert@xmos.com>2014-02-11 10:36:18 +0000
committerRobert Lytton <robert@xmos.com>2014-02-11 10:36:18 +0000
commit04a573a41f037eae148f70bf2038602f7e32dd71 (patch)
treeabc5e9d880e786d285c0de09b66f491b00d30611 /lib
parente9d5f6e387bb56f3809a56fe8e41501a297cc14e (diff)
downloadllvm-04a573a41f037eae148f70bf2038602f7e32dd71.tar.gz
llvm-04a573a41f037eae148f70bf2038602f7e32dd71.tar.bz2
llvm-04a573a41f037eae148f70bf2038602f7e32dd71.tar.xz
XCore target: Lower ATOMIC_LOAD & ATOMIC_STORE
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@201143 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib')
-rw-r--r--lib/Target/XCore/XCoreISelLowering.cpp68
-rw-r--r--lib/Target/XCore/XCoreISelLowering.h2
2 files changed, 70 insertions, 0 deletions
diff --git a/lib/Target/XCore/XCoreISelLowering.cpp b/lib/Target/XCore/XCoreISelLowering.cpp
index dd268c4cdd..82ad0d069a 100644
--- a/lib/Target/XCore/XCoreISelLowering.cpp
+++ b/lib/Target/XCore/XCoreISelLowering.cpp
@@ -159,7 +159,12 @@ XCoreTargetLowering::XCoreTargetLowering(XCoreTargetMachine &XTM)
setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom);
// Atomic operations
+ // We request a fence for ATOMIC_* instructions, to reduce them to Monotonic.
+ // As we are always Sequential Consistent, an ATOMIC_FENCE becomes a no OP.
+ setInsertFencesForAtomic(true);
setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom);
+ setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom);
+ setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom);
// TRAMPOLINE is custom lowered.
setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
@@ -223,6 +228,8 @@ LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG);
+ case ISD::ATOMIC_LOAD: return LowerATOMIC_LOAD(Op, DAG);
+ case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op, DAG);
default:
llvm_unreachable("unimplemented operand");
}
@@ -964,6 +971,67 @@ LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const {
return DAG.getNode(XCoreISD::MEMBARRIER, DL, MVT::Other, Op.getOperand(0));
}
+SDValue XCoreTargetLowering::
+LowerATOMIC_LOAD(SDValue Op, SelectionDAG &DAG) const {
+ AtomicSDNode *N = cast<AtomicSDNode>(Op);
+ assert(N->getOpcode() == ISD::ATOMIC_LOAD && "Bad Atomic OP");
+ assert(N->getOrdering() <= Monotonic &&
+ "setInsertFencesForAtomic(true) and yet greater than Monotonic");
+ if (N->getMemoryVT() == MVT::i32) {
+ if (N->getAlignment() < 4)
+ report_fatal_error("atomic load must be aligned");
+ return DAG.getLoad(getPointerTy(), SDLoc(Op), N->getChain(),
+ N->getBasePtr(), N->getPointerInfo(),
+ N->isVolatile(), N->isNonTemporal(),
+ N->isInvariant(), N->getAlignment(),
+ N->getTBAAInfo(), N->getRanges());
+ }
+ if (N->getMemoryVT() == MVT::i16) {
+ if (N->getAlignment() < 2)
+ report_fatal_error("atomic load must be aligned");
+ return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), MVT::i32, N->getChain(),
+ N->getBasePtr(), N->getPointerInfo(), MVT::i16,
+ N->isVolatile(), N->isNonTemporal(),
+ N->getAlignment(), N->getTBAAInfo());
+ }
+ if (N->getMemoryVT() == MVT::i8)
+ return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), MVT::i32, N->getChain(),
+ N->getBasePtr(), N->getPointerInfo(), MVT::i8,
+ N->isVolatile(), N->isNonTemporal(),
+ N->getAlignment(), N->getTBAAInfo());
+ return SDValue();
+}
+
+SDValue XCoreTargetLowering::
+LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) const {
+ AtomicSDNode *N = cast<AtomicSDNode>(Op);
+ assert(N->getOpcode() == ISD::ATOMIC_STORE && "Bad Atomic OP");
+ assert(N->getOrdering() <= Monotonic &&
+ "setInsertFencesForAtomic(true) and yet greater than Monotonic");
+ if (N->getMemoryVT() == MVT::i32) {
+ if (N->getAlignment() < 4)
+ report_fatal_error("atomic store must be aligned");
+ return DAG.getStore(N->getChain(), SDLoc(Op), N->getVal(),
+ N->getBasePtr(), N->getPointerInfo(),
+ N->isVolatile(), N->isNonTemporal(),
+ N->getAlignment(), N->getTBAAInfo());
+ }
+ if (N->getMemoryVT() == MVT::i16) {
+ if (N->getAlignment() < 2)
+ report_fatal_error("atomic store must be aligned");
+ return DAG.getTruncStore(N->getChain(), SDLoc(Op), N->getVal(),
+ N->getBasePtr(), N->getPointerInfo(), MVT::i16,
+ N->isVolatile(), N->isNonTemporal(),
+ N->getAlignment(), N->getTBAAInfo());
+ }
+ if (N->getMemoryVT() == MVT::i8)
+ return DAG.getTruncStore(N->getChain(), SDLoc(Op), N->getVal(),
+ N->getBasePtr(), N->getPointerInfo(), MVT::i8,
+ N->isVolatile(), N->isNonTemporal(),
+ N->getAlignment(), N->getTBAAInfo());
+ return SDValue();
+}
+
//===----------------------------------------------------------------------===//
// Calling Convention Implementation
//===----------------------------------------------------------------------===//
diff --git a/lib/Target/XCore/XCoreISelLowering.h b/lib/Target/XCore/XCoreISelLowering.h
index 05e2a86a9b..e05068f773 100644
--- a/lib/Target/XCore/XCoreISelLowering.h
+++ b/lib/Target/XCore/XCoreISelLowering.h
@@ -172,6 +172,8 @@ namespace llvm {
SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerATOMIC_LOAD(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) const;
// Inline asm support
std::pair<unsigned, const TargetRegisterClass*>