summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Liao <michael.liao@intel.com>2012-09-21 03:18:52 +0000
committerMichael Liao <michael.liao@intel.com>2012-09-21 03:18:52 +0000
commitfe87c302aa600fdac6d80349cde21e06337f8bfe (patch)
treedfc8fa592e7d9929d76d05f13ca492ff68088169
parent0838249a6a327ad0e1a909d1d4e2077b23b4a272 (diff)
downloadllvm-fe87c302aa600fdac6d80349cde21e06337f8bfe.tar.gz
llvm-fe87c302aa600fdac6d80349cde21e06337f8bfe.tar.bz2
llvm-fe87c302aa600fdac6d80349cde21e06337f8bfe.tar.xz
Add missing i8 max/min/umax/umin support
- Fix PR5145 and turn on test 8-bit atomic ops git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@164358 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--lib/Target/X86/X86ISelLowering.cpp53
-rw-r--r--test/CodeGen/X86/atomic8.ll1
-rw-r--r--test/CodeGen/X86/pr5145.ll35
3 files changed, 79 insertions, 10 deletions
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index 0e6e4a3294..b2b94794f2 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -11966,15 +11966,19 @@ static unsigned getNonAtomicOpcodeWithExtraOpc(unsigned Opc,
case X86::ATOMNAND16: ExtraOpc = X86::NOT16r; return X86::AND16rr;
case X86::ATOMNAND32: ExtraOpc = X86::NOT32r; return X86::AND32rr;
case X86::ATOMNAND64: ExtraOpc = X86::NOT64r; return X86::AND64rr;
+ case X86::ATOMMAX8: ExtraOpc = X86::CMP8rr; return X86::CMOVL32rr;
case X86::ATOMMAX16: ExtraOpc = X86::CMP16rr; return X86::CMOVL16rr;
case X86::ATOMMAX32: ExtraOpc = X86::CMP32rr; return X86::CMOVL32rr;
case X86::ATOMMAX64: ExtraOpc = X86::CMP64rr; return X86::CMOVL64rr;
+ case X86::ATOMMIN8: ExtraOpc = X86::CMP8rr; return X86::CMOVG32rr;
case X86::ATOMMIN16: ExtraOpc = X86::CMP16rr; return X86::CMOVG16rr;
case X86::ATOMMIN32: ExtraOpc = X86::CMP32rr; return X86::CMOVG32rr;
case X86::ATOMMIN64: ExtraOpc = X86::CMP64rr; return X86::CMOVG64rr;
+ case X86::ATOMUMAX8: ExtraOpc = X86::CMP8rr; return X86::CMOVB32rr;
case X86::ATOMUMAX16: ExtraOpc = X86::CMP16rr; return X86::CMOVB16rr;
case X86::ATOMUMAX32: ExtraOpc = X86::CMP32rr; return X86::CMOVB32rr;
case X86::ATOMUMAX64: ExtraOpc = X86::CMP64rr; return X86::CMOVB64rr;
+ case X86::ATOMUMIN8: ExtraOpc = X86::CMP8rr; return X86::CMOVA32rr;
case X86::ATOMUMIN16: ExtraOpc = X86::CMP16rr; return X86::CMOVA16rr;
case X86::ATOMUMIN32: ExtraOpc = X86::CMP32rr; return X86::CMOVA32rr;
case X86::ATOMUMIN64: ExtraOpc = X86::CMP64rr; return X86::CMOVA64rr;
@@ -12013,6 +12017,7 @@ static unsigned getNonAtomic6432OpcodeWithExtraOpc(unsigned Opc,
// Get pseudo CMOV opcode from the specified data type.
static unsigned getPseudoCMOVOpc(EVT VT) {
switch (VT.getSimpleVT().SimpleTy) {
+ case MVT::i8: return X86::CMOV_GR8;
case MVT::i16: return X86::CMOV_GR16;
case MVT::i32: return X86::CMOV_GR32;
default:
@@ -12154,19 +12159,18 @@ X86TargetLowering::EmitAtomicLoadArith(MachineInstr *MI,
break;
}
case X86::ATOMMAX8:
- case X86::ATOMMIN8:
- case X86::ATOMUMAX8:
- case X86::ATOMUMIN8:
- llvm_unreachable("Not supported yet!");
case X86::ATOMMAX16:
case X86::ATOMMAX32:
case X86::ATOMMAX64:
+ case X86::ATOMMIN8:
case X86::ATOMMIN16:
case X86::ATOMMIN32:
case X86::ATOMMIN64:
+ case X86::ATOMUMAX8:
case X86::ATOMUMAX16:
case X86::ATOMUMAX32:
case X86::ATOMUMAX64:
+ case X86::ATOMUMIN8:
case X86::ATOMUMIN16:
case X86::ATOMUMIN32:
case X86::ATOMUMIN64: {
@@ -12178,13 +12182,40 @@ X86TargetLowering::EmitAtomicLoadArith(MachineInstr *MI,
.addReg(AccReg);
if (Subtarget->hasCMov()) {
- // Native support
- BuildMI(mainMBB, DL, TII->get(CMOVOpc), t1)
- .addReg(SrcReg)
- .addReg(AccReg);
+ if (VT != MVT::i8) {
+ // Native support
+ BuildMI(mainMBB, DL, TII->get(CMOVOpc), t1)
+ .addReg(SrcReg)
+ .addReg(AccReg);
+ } else {
+ // Promote i8 to i32 to use CMOV32
+ const TargetRegisterClass *RC32 = getRegClassFor(MVT::i32);
+ unsigned SrcReg32 = MRI.createVirtualRegister(RC32);
+ unsigned AccReg32 = MRI.createVirtualRegister(RC32);
+ unsigned t2 = MRI.createVirtualRegister(RC32);
+
+ unsigned Undef = MRI.createVirtualRegister(RC32);
+ BuildMI(mainMBB, DL, TII->get(TargetOpcode::IMPLICIT_DEF), Undef);
+
+ BuildMI(mainMBB, DL, TII->get(TargetOpcode::INSERT_SUBREG), SrcReg32)
+ .addReg(Undef)
+ .addReg(SrcReg)
+ .addImm(X86::sub_8bit);
+ BuildMI(mainMBB, DL, TII->get(TargetOpcode::INSERT_SUBREG), AccReg32)
+ .addReg(Undef)
+ .addReg(AccReg)
+ .addImm(X86::sub_8bit);
+
+ BuildMI(mainMBB, DL, TII->get(CMOVOpc), t2)
+ .addReg(SrcReg32)
+ .addReg(AccReg32);
+
+ BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), t1)
+ .addReg(t2, 0, X86::sub_8bit);
+ }
} else {
// Use pseudo select and lower them.
- assert((VT == MVT::i16 || VT == MVT::i32) &&
+ assert((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) &&
"Invalid atomic-load-op transformation!");
unsigned SelOpc = getPseudoCMOVOpc(VT);
X86::CondCode CC = X86::getCondFromCMovOpc(CMOVOpc);
@@ -13314,18 +13345,22 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case X86::ATOMNAND32:
case X86::ATOMNAND64:
// Fall through
+ case X86::ATOMMAX8:
case X86::ATOMMAX16:
case X86::ATOMMAX32:
case X86::ATOMMAX64:
// Fall through
+ case X86::ATOMMIN8:
case X86::ATOMMIN16:
case X86::ATOMMIN32:
case X86::ATOMMIN64:
// Fall through
+ case X86::ATOMUMAX8:
case X86::ATOMUMAX16:
case X86::ATOMUMAX32:
case X86::ATOMUMAX64:
// Fall through
+ case X86::ATOMUMIN8:
case X86::ATOMUMIN16:
case X86::ATOMUMIN32:
case X86::ATOMUMIN64:
diff --git a/test/CodeGen/X86/atomic8.ll b/test/CodeGen/X86/atomic8.ll
index 035a28dbff..412428406d 100644
--- a/test/CodeGen/X86/atomic8.ll
+++ b/test/CodeGen/X86/atomic8.ll
@@ -1,6 +1,5 @@
; RUN: llc < %s -O0 -march=x86-64 -mcpu=corei7 | FileCheck %s --check-prefix X64
; RUN: llc < %s -O0 -march=x86 -mcpu=corei7 | FileCheck %s --check-prefix X32
-; XFAIL: *
@sc8 = external global i8
diff --git a/test/CodeGen/X86/pr5145.ll b/test/CodeGen/X86/pr5145.ll
new file mode 100644
index 0000000000..06ecd7a145
--- /dev/null
+++ b/test/CodeGen/X86/pr5145.ll
@@ -0,0 +1,35 @@
+; RUN: llc -march=x86-64 < %s | FileCheck %s
+@sc8 = external global i8
+
+define void @atomic_maxmin_i8() {
+; CHECK: atomic_maxmin_i8
+ %1 = atomicrmw max i8* @sc8, i8 5 acquire
+; CHECK: [[LABEL:.LBB[0-9]+_[0-9]+]]
+; CHECK: cmpb
+; CHECK: cmovl
+; CHECK: lock
+; CHECK-NEXT: cmpxchgb
+; CHECK: jne [[LABEL]]
+ %2 = atomicrmw min i8* @sc8, i8 6 acquire
+; CHECK: [[LABEL:.LBB[0-9]+_[0-9]+]]
+; CHECK: cmpb
+; CHECK: cmovg
+; CHECK: lock
+; CHECK-NEXT: cmpxchgb
+; CHECK: jne [[LABEL]]
+ %3 = atomicrmw umax i8* @sc8, i8 7 acquire
+; CHECK: [[LABEL:.LBB[0-9]+_[0-9]+]]
+; CHECK: cmpb
+; CHECK: cmovb
+; CHECK: lock
+; CHECK-NEXT: cmpxchgb
+; CHECK: jne [[LABEL]]
+ %4 = atomicrmw umin i8* @sc8, i8 8 acquire
+; CHECK: [[LABEL:.LBB[0-9]+_[0-9]+]]
+; CHECK: cmpb
+; CHECK: cmova
+; CHECK: lock
+; CHECK-NEXT: cmpxchgb
+; CHECK: jne [[LABEL]]
+ ret void
+}