summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorJakob Stoklund Olesen <stoklund@2pi.dk>2014-01-30 04:48:46 +0000
committerJakob Stoklund Olesen <stoklund@2pi.dk>2014-01-30 04:48:46 +0000
commit17ca0f8f80b034131d05233076c85e958572ad4d (patch)
treef982ef8a73649e51f822f7263c2909473d2e3b0d /lib
parent09f38a0ef13bf542f7b3f1862d718e33e7de587b (diff)
downloadllvm-17ca0f8f80b034131d05233076c85e958572ad4d.tar.gz
llvm-17ca0f8f80b034131d05233076c85e958572ad4d.tar.bz2
llvm-17ca0f8f80b034131d05233076c85e958572ad4d.tar.xz
Implement SPARCv9 atomic_swap_64 with a pseudo.
The SWAP instruction only exists in a 32-bit variant, but the 64-bit atomic swap can be implemented in terms of CASX, like the other atomic rmw primitives. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@200453 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib')
-rw-r--r--lib/Target/Sparc/SparcISelLowering.cpp10
-rw-r--r--lib/Target/Sparc/SparcInstr64Bit.td8
2 files changed, 15 insertions, 3 deletions
diff --git a/lib/Target/Sparc/SparcISelLowering.cpp b/lib/Target/Sparc/SparcISelLowering.cpp
index 75b0167acd..da5788d880 100644
--- a/lib/Target/Sparc/SparcISelLowering.cpp
+++ b/lib/Target/Sparc/SparcISelLowering.cpp
@@ -1495,7 +1495,7 @@ SparcTargetLowering::SparcTargetLowering(TargetMachine &TM)
if (Subtarget->is64Bit()) {
setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Legal);
- setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Expand);
+ setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Legal);
setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom);
setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Custom);
}
@@ -2874,6 +2874,9 @@ SparcTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case SP::ATOMIC_LOAD_NAND_64:
return expandAtomicRMW(MI, BB, SP::ANDXrr);
+ case SP::ATOMIC_SWAP_64:
+ return expandAtomicRMW(MI, BB, 0);
+
case SP::ATOMIC_LOAD_MAX_32:
return expandAtomicRMW(MI, BB, SP::MOVICCrr, SPCC::ICC_G);
case SP::ATOMIC_LOAD_MAX_64:
@@ -3012,7 +3015,8 @@ SparcTargetLowering::expandAtomicRMW(MachineInstr *MI,
// Build the loop block.
unsigned ValReg = MRI.createVirtualRegister(ValueRC);
- unsigned UpdReg = MRI.createVirtualRegister(ValueRC);
+ // Opcode == 0 means try to write Rs2Reg directly (ATOMIC_SWAP).
+ unsigned UpdReg = (Opcode ? MRI.createVirtualRegister(ValueRC) : Rs2Reg);
BuildMI(LoopMBB, DL, TII.get(SP::PHI), ValReg)
.addReg(Val0Reg).addMBB(MBB)
@@ -3024,7 +3028,7 @@ SparcTargetLowering::expandAtomicRMW(MachineInstr *MI,
BuildMI(LoopMBB, DL, TII.get(SP::CMPrr)).addReg(ValReg).addReg(Rs2Reg);
BuildMI(LoopMBB, DL, TII.get(Opcode), UpdReg)
.addReg(ValReg).addReg(Rs2Reg).addImm(CondCode);
- } else {
+ } else if (Opcode) {
BuildMI(LoopMBB, DL, TII.get(Opcode), UpdReg)
.addReg(ValReg).addReg(Rs2Reg);
}
diff --git a/lib/Target/Sparc/SparcInstr64Bit.td b/lib/Target/Sparc/SparcInstr64Bit.td
index 5f21334291..9146098a23 100644
--- a/lib/Target/Sparc/SparcInstr64Bit.td
+++ b/lib/Target/Sparc/SparcInstr64Bit.td
@@ -463,6 +463,14 @@ defm ATOMIC_LOAD_MAX : AtomicRMW<atomic_load_max_32, atomic_load_max_64>;
defm ATOMIC_LOAD_UMIN : AtomicRMW<atomic_load_umin_32, atomic_load_umin_64>;
defm ATOMIC_LOAD_UMAX : AtomicRMW<atomic_load_umax_32, atomic_load_umax_64>;
+// There is no 64-bit variant of SWAP, so use a pseudo.
+let usesCustomInserter = 1, hasCtrlDep = 1, mayLoad = 1, mayStore = 1,
+ Defs = [ICC], Predicates = [Is64Bit] in
+def ATOMIC_SWAP_64 : Pseudo<(outs I64Regs:$rd),
+ (ins ptr_rc:$addr, I64Regs:$rs2), "",
+ [(set i64:$rd,
+ (atomic_swap_64 iPTR:$addr, i64:$rs2))]>;
+
// Global addresses, constant pool entries
let Predicates = [Is64Bit] in {