summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorChris Lattner <sabre@nondot.org>2004-02-17 06:16:44 +0000
committerChris Lattner <sabre@nondot.org>2004-02-17 06:16:44 +0000
commit6e173a0d9c39add52bd291d754a80bcb40af5e6f (patch)
tree1951bfb8c1d93a976e5035abb88549d129a50f42 /lib
parent90c38c814171135c1c4d91dfb36d6799d4217eb6 (diff)
downloadllvm-6e173a0d9c39add52bd291d754a80bcb40af5e6f.tar.gz
llvm-6e173a0d9c39add52bd291d754a80bcb40af5e6f.tar.bz2
llvm-6e173a0d9c39add52bd291d754a80bcb40af5e6f.tar.xz
Rename MOVi[mr] instructions to MOV[rm]i
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@11527 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib')
-rw-r--r--lib/Target/X86/InstSelectSimple.cpp40
-rw-r--r--lib/Target/X86/PeepholeOptimizer.cpp16
-rw-r--r--lib/Target/X86/Printer.cpp10
-rw-r--r--lib/Target/X86/X86AsmPrinter.cpp10
-rw-r--r--lib/Target/X86/X86ISelSimple.cpp40
-rw-r--r--lib/Target/X86/X86InstrInfo.td12
-rw-r--r--lib/Target/X86/X86PeepholeOpt.cpp16
7 files changed, 78 insertions, 66 deletions
diff --git a/lib/Target/X86/InstSelectSimple.cpp b/lib/Target/X86/InstSelectSimple.cpp
index 14089b9abb..446479379e 100644
--- a/lib/Target/X86/InstSelectSimple.cpp
+++ b/lib/Target/X86/InstSelectSimple.cpp
@@ -306,7 +306,7 @@ namespace {
RegMap.erase(V); // Assign a new name to this constant if ref'd again
} else if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
// Move the address of the global into the register
- BMI(MBB, IPt, X86::MOVir32, 1, Reg).addGlobalAddress(GV);
+ BMI(MBB, IPt, X86::MOVri32, 1, Reg).addGlobalAddress(GV);
RegMap.erase(V); // Assign a new name to this address if ref'd again
}
@@ -423,19 +423,19 @@ void ISel::copyConstantToRegister(MachineBasicBlock *MBB,
if (Class == cLong) {
// Copy the value into the register pair.
uint64_t Val = cast<ConstantInt>(C)->getRawValue();
- BMI(MBB, IP, X86::MOVir32, 1, R).addZImm(Val & 0xFFFFFFFF);
- BMI(MBB, IP, X86::MOVir32, 1, R+1).addZImm(Val >> 32);
+ BMI(MBB, IP, X86::MOVri32, 1, R).addZImm(Val & 0xFFFFFFFF);
+ BMI(MBB, IP, X86::MOVri32, 1, R+1).addZImm(Val >> 32);
return;
}
assert(Class <= cInt && "Type not handled yet!");
static const unsigned IntegralOpcodeTab[] = {
- X86::MOVir8, X86::MOVir16, X86::MOVir32
+ X86::MOVri8, X86::MOVri16, X86::MOVri32
};
if (C->getType() == Type::BoolTy) {
- BMI(MBB, IP, X86::MOVir8, 1, R).addZImm(C == ConstantBool::True);
+ BMI(MBB, IP, X86::MOVri8, 1, R).addZImm(C == ConstantBool::True);
} else {
ConstantInt *CI = cast<ConstantInt>(C);
BMI(MBB, IP, IntegralOpcodeTab[Class], 1, R).addZImm(CI->getRawValue());
@@ -458,7 +458,7 @@ void ISel::copyConstantToRegister(MachineBasicBlock *MBB,
} else if (isa<ConstantPointerNull>(C)) {
// Copy zero (null pointer) to the register.
- BMI(MBB, IP, X86::MOVir32, 1, R).addZImm(0);
+ BMI(MBB, IP, X86::MOVri32, 1, R).addZImm(0);
} else if (ConstantPointerRef *CPR = dyn_cast<ConstantPointerRef>(C)) {
unsigned SrcReg = getReg(CPR->getValue(), MBB, IP);
BMI(MBB, IP, X86::MOVrr32, 1, R).addReg(SrcReg);
@@ -1211,7 +1211,7 @@ void ISel::visitIntrinsicCall(Intrinsic::ID ID, CallInst &CI) {
}
} else {
// Values other than zero are not implemented yet.
- BuildMI(BB, X86::MOVir32, 1, TmpReg1).addZImm(0);
+ BuildMI(BB, X86::MOVri32, 1, TmpReg1).addZImm(0);
}
return;
@@ -1287,7 +1287,7 @@ void ISel::visitIntrinsicCall(Intrinsic::ID ID, CallInst &CI) {
CountReg = makeAnotherReg(Type::IntTy);
BuildMI(BB, X86::SHRir32, 2, CountReg).addReg(ByteReg).addZImm(1);
}
- BuildMI(BB, X86::MOVir16, 1, X86::AX).addZImm((Val << 8) | Val);
+ BuildMI(BB, X86::MOVri16, 1, X86::AX).addZImm((Val << 8) | Val);
Opcode = X86::REP_STOSW;
break;
case 0: // DWORD aligned
@@ -1298,13 +1298,13 @@ void ISel::visitIntrinsicCall(Intrinsic::ID ID, CallInst &CI) {
BuildMI(BB, X86::SHRir32, 2, CountReg).addReg(ByteReg).addZImm(2);
}
Val = (Val << 8) | Val;
- BuildMI(BB, X86::MOVir32, 1, X86::EAX).addZImm((Val << 16) | Val);
+ BuildMI(BB, X86::MOVri32, 1, X86::EAX).addZImm((Val << 16) | Val);
Opcode = X86::REP_STOSD;
break;
case 1: // BYTE aligned
case 3: // BYTE aligned
CountReg = getReg(CI.getOperand(3));
- BuildMI(BB, X86::MOVir8, 1, X86::AL).addZImm(Val);
+ BuildMI(BB, X86::MOVri8, 1, X86::AL).addZImm(Val);
Opcode = X86::REP_STOSB;
break;
}
@@ -1532,12 +1532,12 @@ void ISel::doMultiplyConst(MachineBasicBlock *MBB,
}
// Most general case, emit a normal multiply...
- static const unsigned MOVirTab[] = {
- X86::MOVir8, X86::MOVir16, X86::MOVir32
+ static const unsigned MOVriTab[] = {
+ X86::MOVri8, X86::MOVri16, X86::MOVri32
};
unsigned TmpReg = makeAnotherReg(DestTy);
- BMI(MBB, IP, MOVirTab[Class], 1, TmpReg).addZImm(ConstRHS);
+ BMI(MBB, IP, MOVriTab[Class], 1, TmpReg).addZImm(ConstRHS);
// Emit a MUL to multiply the register holding the index by
// elementSize, putting the result in OffsetReg.
@@ -1647,7 +1647,7 @@ void ISel::emitDivRemOperation(MachineBasicBlock *BB,
static const unsigned Regs[] ={ X86::AL , X86::AX , X86::EAX };
static const unsigned MovOpcode[]={ X86::MOVrr8, X86::MOVrr16, X86::MOVrr32 };
static const unsigned SarOpcode[]={ X86::SARir8, X86::SARir16, X86::SARir32 };
- static const unsigned ClrOpcode[]={ X86::MOVir8, X86::MOVir16, X86::MOVir32 };
+ static const unsigned ClrOpcode[]={ X86::MOVri8, X86::MOVri16, X86::MOVri32 };
static const unsigned ExtRegs[] ={ X86::AH , X86::DX , X86::EDX };
static const unsigned DivOpcode[][4] = {
@@ -1742,12 +1742,12 @@ void ISel::emitShiftOperation(MachineBasicBlock *MBB,
if (isLeftShift) {
BMI(MBB, IP, X86::SHLir32, 2,
DestReg + 1).addReg(SrcReg).addZImm(Amount);
- BMI(MBB, IP, X86::MOVir32, 1,
+ BMI(MBB, IP, X86::MOVri32, 1,
DestReg).addZImm(0);
} else {
unsigned Opcode = isSigned ? X86::SARir32 : X86::SHRir32;
BMI(MBB, IP, Opcode, 2, DestReg).addReg(SrcReg+1).addZImm(Amount);
- BMI(MBB, IP, X86::MOVir32, 1, DestReg+1).addZImm(0);
+ BMI(MBB, IP, X86::MOVri32, 1, DestReg+1).addZImm(0);
}
}
} else {
@@ -1761,7 +1761,7 @@ void ISel::emitShiftOperation(MachineBasicBlock *MBB,
} else {
// Other shifts use a fixed zero value if the shift is more than 32
// bits.
- BMI(MBB, IP, X86::MOVir32, 1, TmpReg).addZImm(0);
+ BMI(MBB, IP, X86::MOVri32, 1, TmpReg).addZImm(0);
}
// Initialize CL with the shift amount...
@@ -1989,7 +1989,7 @@ void ISel::emitCastOperation(MachineBasicBlock *BB,
if (isLong) { // Handle upper 32 bits as appropriate...
if (isUnsigned) // Zero out top bits...
- BMI(BB, IP, X86::MOVir32, 1, DestReg+1).addZImm(0);
+ BMI(BB, IP, X86::MOVri32, 1, DestReg+1).addZImm(0);
else // Sign extend bottom half...
BMI(BB, IP, X86::SARir32, 2, DestReg+1).addReg(DestReg).addZImm(31);
}
@@ -2040,7 +2040,7 @@ void ISel::emitCastOperation(MachineBasicBlock *BB,
// Make a 64 bit temporary... and zero out the top of it...
unsigned TmpReg = makeAnotherReg(Type::LongTy);
BMI(BB, IP, X86::MOVrr32, 1, TmpReg).addReg(SrcReg);
- BMI(BB, IP, X86::MOVir32, 1, TmpReg+1).addZImm(0);
+ BMI(BB, IP, X86::MOVri32, 1, TmpReg+1).addZImm(0);
SrcTy = Type::LongTy;
SrcClass = cLong;
SrcReg = TmpReg;
@@ -2093,7 +2093,7 @@ void ISel::emitCastOperation(MachineBasicBlock *BB,
addFrameReference(BMI(BB, IP, X86::MOVmr8, 4, HighPartOfCW), CWFrameIdx, 1);
// Set the high part to be round to zero...
- addFrameReference(BMI(BB, IP, X86::MOVim8, 5), CWFrameIdx, 1).addZImm(12);
+ addFrameReference(BMI(BB, IP, X86::MOVmi8, 5), CWFrameIdx, 1).addZImm(12);
// Reload the modified control word now...
addFrameReference(BMI(BB, IP, X86::FLDCWm16, 4), CWFrameIdx);
diff --git a/lib/Target/X86/PeepholeOptimizer.cpp b/lib/Target/X86/PeepholeOptimizer.cpp
index e2b4a46d0c..7bc2ec2a3c 100644
--- a/lib/Target/X86/PeepholeOptimizer.cpp
+++ b/lib/Target/X86/PeepholeOptimizer.cpp
@@ -179,9 +179,9 @@ bool PH::PeepholeOptimize(MachineBasicBlock &MBB,
return false;
#if 0
- case X86::MOVir32: Size++;
- case X86::MOVir16: Size++;
- case X86::MOVir8:
+ case X86::MOVri32: Size++;
+ case X86::MOVri16: Size++;
+ case X86::MOVri8:
// FIXME: We can only do this transformation if we know that flags are not
// used here, because XOR clobbers the flags!
if (MI->getOperand(1).isImmediate()) { // avoid mov EAX, <value>
@@ -373,7 +373,7 @@ bool SSAPH::OptimizeAddress(MachineInstr *MI, unsigned OpNo) {
// Attempt to fold instructions used by the base register into the instruction
if (MachineInstr *DefInst = getDefiningInst(BaseRegOp)) {
switch (DefInst->getOpcode()) {
- case X86::MOVir32:
+ case X86::MOVri32:
// If there is no displacement set for this instruction set one now.
// FIXME: If we can fold two immediates together, we should do so!
if (DisplacementOp.isImmediate() && !DisplacementOp.getImmedValue()) {
@@ -461,14 +461,14 @@ bool SSAPH::PeepholeOptimize(MachineBasicBlock &MBB,
// Register to memory stores. Format: <base,scale,indexreg,immdisp>, srcreg
case X86::MOVrm32: case X86::MOVrm16: case X86::MOVrm8:
- case X86::MOVim32: case X86::MOVim16: case X86::MOVim8:
+ case X86::MOVmi32: case X86::MOVmi16: case X86::MOVmi8:
// Check to see if we can fold the source instruction into this one...
if (MachineInstr *SrcInst = getDefiningInst(MI->getOperand(4))) {
switch (SrcInst->getOpcode()) {
// Fold the immediate value into the store, if possible.
- case X86::MOVir8: return Propagate(MI, 4, SrcInst, 1, X86::MOVim8);
- case X86::MOVir16: return Propagate(MI, 4, SrcInst, 1, X86::MOVim16);
- case X86::MOVir32: return Propagate(MI, 4, SrcInst, 1, X86::MOVim32);
+ case X86::MOVri8: return Propagate(MI, 4, SrcInst, 1, X86::MOVmi8);
+ case X86::MOVri16: return Propagate(MI, 4, SrcInst, 1, X86::MOVmi16);
+ case X86::MOVri32: return Propagate(MI, 4, SrcInst, 1, X86::MOVmi32);
default: break;
}
}
diff --git a/lib/Target/X86/Printer.cpp b/lib/Target/X86/Printer.cpp
index f19b3422ae..1b70773b53 100644
--- a/lib/Target/X86/Printer.cpp
+++ b/lib/Target/X86/Printer.cpp
@@ -642,13 +642,19 @@ void Printer::printMachineInstruction(const MachineInstr *MI) {
// These instructions are the same as MRMDestReg, but instead of having a
// register reference for the mod/rm field, it's a memory reference.
//
- assert(isMem(MI, 0) && MI->getNumOperands() == 4+1 &&
- MI->getOperand(4).isRegister() && "Bad format for MRMDestMem!");
+ assert(isMem(MI, 0) &&
+ (MI->getNumOperands() == 4+1 ||
+ (MI->getNumOperands() == 4+2 && MI->getOperand(5).isImmediate()))
+ && "Bad format for MRMDestMem!");
O << TII.getName(MI->getOpcode()) << " " << sizePtr(Desc) << " ";
printMemReference(MI, 0);
O << ", ";
printOp(MI->getOperand(4));
+ if (MI->getNumOperands() == 4+2) {
+ O << ", ";
+ printOp(MI->getOperand(5));
+ }
O << "\n";
return;
}
diff --git a/lib/Target/X86/X86AsmPrinter.cpp b/lib/Target/X86/X86AsmPrinter.cpp
index f19b3422ae..1b70773b53 100644
--- a/lib/Target/X86/X86AsmPrinter.cpp
+++ b/lib/Target/X86/X86AsmPrinter.cpp
@@ -642,13 +642,19 @@ void Printer::printMachineInstruction(const MachineInstr *MI) {
// These instructions are the same as MRMDestReg, but instead of having a
// register reference for the mod/rm field, it's a memory reference.
//
- assert(isMem(MI, 0) && MI->getNumOperands() == 4+1 &&
- MI->getOperand(4).isRegister() && "Bad format for MRMDestMem!");
+ assert(isMem(MI, 0) &&
+ (MI->getNumOperands() == 4+1 ||
+ (MI->getNumOperands() == 4+2 && MI->getOperand(5).isImmediate()))
+ && "Bad format for MRMDestMem!");
O << TII.getName(MI->getOpcode()) << " " << sizePtr(Desc) << " ";
printMemReference(MI, 0);
O << ", ";
printOp(MI->getOperand(4));
+ if (MI->getNumOperands() == 4+2) {
+ O << ", ";
+ printOp(MI->getOperand(5));
+ }
O << "\n";
return;
}
diff --git a/lib/Target/X86/X86ISelSimple.cpp b/lib/Target/X86/X86ISelSimple.cpp
index 14089b9abb..446479379e 100644
--- a/lib/Target/X86/X86ISelSimple.cpp
+++ b/lib/Target/X86/X86ISelSimple.cpp
@@ -306,7 +306,7 @@ namespace {
RegMap.erase(V); // Assign a new name to this constant if ref'd again
} else if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
// Move the address of the global into the register
- BMI(MBB, IPt, X86::MOVir32, 1, Reg).addGlobalAddress(GV);
+ BMI(MBB, IPt, X86::MOVri32, 1, Reg).addGlobalAddress(GV);
RegMap.erase(V); // Assign a new name to this address if ref'd again
}
@@ -423,19 +423,19 @@ void ISel::copyConstantToRegister(MachineBasicBlock *MBB,
if (Class == cLong) {
// Copy the value into the register pair.
uint64_t Val = cast<ConstantInt>(C)->getRawValue();
- BMI(MBB, IP, X86::MOVir32, 1, R).addZImm(Val & 0xFFFFFFFF);
- BMI(MBB, IP, X86::MOVir32, 1, R+1).addZImm(Val >> 32);
+ BMI(MBB, IP, X86::MOVri32, 1, R).addZImm(Val & 0xFFFFFFFF);
+ BMI(MBB, IP, X86::MOVri32, 1, R+1).addZImm(Val >> 32);
return;
}
assert(Class <= cInt && "Type not handled yet!");
static const unsigned IntegralOpcodeTab[] = {
- X86::MOVir8, X86::MOVir16, X86::MOVir32
+ X86::MOVri8, X86::MOVri16, X86::MOVri32
};
if (C->getType() == Type::BoolTy) {
- BMI(MBB, IP, X86::MOVir8, 1, R).addZImm(C == ConstantBool::True);
+ BMI(MBB, IP, X86::MOVri8, 1, R).addZImm(C == ConstantBool::True);
} else {
ConstantInt *CI = cast<ConstantInt>(C);
BMI(MBB, IP, IntegralOpcodeTab[Class], 1, R).addZImm(CI->getRawValue());
@@ -458,7 +458,7 @@ void ISel::copyConstantToRegister(MachineBasicBlock *MBB,
} else if (isa<ConstantPointerNull>(C)) {
// Copy zero (null pointer) to the register.
- BMI(MBB, IP, X86::MOVir32, 1, R).addZImm(0);
+ BMI(MBB, IP, X86::MOVri32, 1, R).addZImm(0);
} else if (ConstantPointerRef *CPR = dyn_cast<ConstantPointerRef>(C)) {
unsigned SrcReg = getReg(CPR->getValue(), MBB, IP);
BMI(MBB, IP, X86::MOVrr32, 1, R).addReg(SrcReg);
@@ -1211,7 +1211,7 @@ void ISel::visitIntrinsicCall(Intrinsic::ID ID, CallInst &CI) {
}
} else {
// Values other than zero are not implemented yet.
- BuildMI(BB, X86::MOVir32, 1, TmpReg1).addZImm(0);
+ BuildMI(BB, X86::MOVri32, 1, TmpReg1).addZImm(0);
}
return;
@@ -1287,7 +1287,7 @@ void ISel::visitIntrinsicCall(Intrinsic::ID ID, CallInst &CI) {
CountReg = makeAnotherReg(Type::IntTy);
BuildMI(BB, X86::SHRir32, 2, CountReg).addReg(ByteReg).addZImm(1);
}
- BuildMI(BB, X86::MOVir16, 1, X86::AX).addZImm((Val << 8) | Val);
+ BuildMI(BB, X86::MOVri16, 1, X86::AX).addZImm((Val << 8) | Val);
Opcode = X86::REP_STOSW;
break;
case 0: // DWORD aligned
@@ -1298,13 +1298,13 @@ void ISel::visitIntrinsicCall(Intrinsic::ID ID, CallInst &CI) {
BuildMI(BB, X86::SHRir32, 2, CountReg).addReg(ByteReg).addZImm(2);
}
Val = (Val << 8) | Val;
- BuildMI(BB, X86::MOVir32, 1, X86::EAX).addZImm((Val << 16) | Val);
+ BuildMI(BB, X86::MOVri32, 1, X86::EAX).addZImm((Val << 16) | Val);
Opcode = X86::REP_STOSD;
break;
case 1: // BYTE aligned
case 3: // BYTE aligned
CountReg = getReg(CI.getOperand(3));
- BuildMI(BB, X86::MOVir8, 1, X86::AL).addZImm(Val);
+ BuildMI(BB, X86::MOVri8, 1, X86::AL).addZImm(Val);
Opcode = X86::REP_STOSB;
break;
}
@@ -1532,12 +1532,12 @@ void ISel::doMultiplyConst(MachineBasicBlock *MBB,
}
// Most general case, emit a normal multiply...
- static const unsigned MOVirTab[] = {
- X86::MOVir8, X86::MOVir16, X86::MOVir32
+ static const unsigned MOVriTab[] = {
+ X86::MOVri8, X86::MOVri16, X86::MOVri32
};
unsigned TmpReg = makeAnotherReg(DestTy);
- BMI(MBB, IP, MOVirTab[Class], 1, TmpReg).addZImm(ConstRHS);
+ BMI(MBB, IP, MOVriTab[Class], 1, TmpReg).addZImm(ConstRHS);
// Emit a MUL to multiply the register holding the index by
// elementSize, putting the result in OffsetReg.
@@ -1647,7 +1647,7 @@ void ISel::emitDivRemOperation(MachineBasicBlock *BB,
static const unsigned Regs[] ={ X86::AL , X86::AX , X86::EAX };
static const unsigned MovOpcode[]={ X86::MOVrr8, X86::MOVrr16, X86::MOVrr32 };
static const unsigned SarOpcode[]={ X86::SARir8, X86::SARir16, X86::SARir32 };
- static const unsigned ClrOpcode[]={ X86::MOVir8, X86::MOVir16, X86::MOVir32 };
+ static const unsigned ClrOpcode[]={ X86::MOVri8, X86::MOVri16, X86::MOVri32 };
static const unsigned ExtRegs[] ={ X86::AH , X86::DX , X86::EDX };
static const unsigned DivOpcode[][4] = {
@@ -1742,12 +1742,12 @@ void ISel::emitShiftOperation(MachineBasicBlock *MBB,
if (isLeftShift) {
BMI(MBB, IP, X86::SHLir32, 2,
DestReg + 1).addReg(SrcReg).addZImm(Amount);
- BMI(MBB, IP, X86::MOVir32, 1,
+ BMI(MBB, IP, X86::MOVri32, 1,
DestReg).addZImm(0);
} else {
unsigned Opcode = isSigned ? X86::SARir32 : X86::SHRir32;
BMI(MBB, IP, Opcode, 2, DestReg).addReg(SrcReg+1).addZImm(Amount);
- BMI(MBB, IP, X86::MOVir32, 1, DestReg+1).addZImm(0);
+ BMI(MBB, IP, X86::MOVri32, 1, DestReg+1).addZImm(0);
}
}
} else {
@@ -1761,7 +1761,7 @@ void ISel::emitShiftOperation(MachineBasicBlock *MBB,
} else {
// Other shifts use a fixed zero value if the shift is more than 32
// bits.
- BMI(MBB, IP, X86::MOVir32, 1, TmpReg).addZImm(0);
+ BMI(MBB, IP, X86::MOVri32, 1, TmpReg).addZImm(0);
}
// Initialize CL with the shift amount...
@@ -1989,7 +1989,7 @@ void ISel::emitCastOperation(MachineBasicBlock *BB,
if (isLong) { // Handle upper 32 bits as appropriate...
if (isUnsigned) // Zero out top bits...
- BMI(BB, IP, X86::MOVir32, 1, DestReg+1).addZImm(0);
+ BMI(BB, IP, X86::MOVri32, 1, DestReg+1).addZImm(0);
else // Sign extend bottom half...
BMI(BB, IP, X86::SARir32, 2, DestReg+1).addReg(DestReg).addZImm(31);
}
@@ -2040,7 +2040,7 @@ void ISel::emitCastOperation(MachineBasicBlock *BB,
// Make a 64 bit temporary... and zero out the top of it...
unsigned TmpReg = makeAnotherReg(Type::LongTy);
BMI(BB, IP, X86::MOVrr32, 1, TmpReg).addReg(SrcReg);
- BMI(BB, IP, X86::MOVir32, 1, TmpReg+1).addZImm(0);
+ BMI(BB, IP, X86::MOVri32, 1, TmpReg+1).addZImm(0);
SrcTy = Type::LongTy;
SrcClass = cLong;
SrcReg = TmpReg;
@@ -2093,7 +2093,7 @@ void ISel::emitCastOperation(MachineBasicBlock *BB,
addFrameReference(BMI(BB, IP, X86::MOVmr8, 4, HighPartOfCW), CWFrameIdx, 1);
// Set the high part to be round to zero...
- addFrameReference(BMI(BB, IP, X86::MOVim8, 5), CWFrameIdx, 1).addZImm(12);
+ addFrameReference(BMI(BB, IP, X86::MOVmi8, 5), CWFrameIdx, 1).addZImm(12);
// Reload the modified control word now...
addFrameReference(BMI(BB, IP, X86::FLDCWm16, 4), CWFrameIdx);
diff --git a/lib/Target/X86/X86InstrInfo.td b/lib/Target/X86/X86InstrInfo.td
index b34ee6f13e..267c1fb36d 100644
--- a/lib/Target/X86/X86InstrInfo.td
+++ b/lib/Target/X86/X86InstrInfo.td
@@ -195,12 +195,12 @@ def REP_STOSD : X86Inst<"rep stosd", 0xAB, RawFrm, NoArg>, REP,
def MOVrr8 : X86Inst<"mov", 0x88, MRMDestReg, Arg8>, Pattern<(set R8 , R8 )>;
def MOVrr16 : X86Inst<"mov", 0x89, MRMDestReg, Arg16>, OpSize, Pattern<(set R16, R16)>;
def MOVrr32 : X86Inst<"mov", 0x89, MRMDestReg, Arg32>, Pattern<(set R32, R32)>;
-def MOVir8 : X86Inst<"mov", 0xB0, AddRegFrm , Arg8>, Pattern<(set R8 , imm )>;
-def MOVir16 : X86Inst<"mov", 0xB8, AddRegFrm , Arg16>, OpSize, Pattern<(set R16, imm)>;
-def MOVir32 : X86Inst<"mov", 0xB8, AddRegFrm , Arg32>, Pattern<(set R32, imm)>;
-def MOVim8 : X86Inst<"mov", 0xC6, MRMS0m , Arg8>; // [mem] = imm8
-def MOVim16 : X86Inst<"mov", 0xC7, MRMS0m , Arg16>, OpSize; // [mem] = imm16
-def MOVim32 : X86Inst<"mov", 0xC7, MRMS0m , Arg32>; // [mem] = imm32
+def MOVri8 : X86Inst<"mov", 0xB0, AddRegFrm , Arg8>, Pattern<(set R8 , imm )>;
+def MOVri16 : X86Inst<"mov", 0xB8, AddRegFrm , Arg16>, OpSize, Pattern<(set R16, imm)>;
+def MOVri32 : X86Inst<"mov", 0xB8, AddRegFrm , Arg32>, Pattern<(set R32, imm)>;
+def MOVmi8 : X86Inst<"mov", 0xC6, MRMS0m , Arg8>; // [mem] = imm8
+def MOVmi16 : X86Inst<"mov", 0xC7, MRMS0m , Arg16>, OpSize; // [mem] = imm16
+def MOVmi32 : X86Inst<"mov", 0xC7, MRMS0m , Arg32>; // [mem] = imm32
def MOVmr8 : X86Inst<"mov", 0x8A, MRMSrcMem , Arg8>; // R8 = [mem]
def MOVmr16 : X86Inst<"mov", 0x8B, MRMSrcMem , Arg16>, OpSize, // R16 = [mem]
diff --git a/lib/Target/X86/X86PeepholeOpt.cpp b/lib/Target/X86/X86PeepholeOpt.cpp
index e2b4a46d0c..7bc2ec2a3c 100644
--- a/lib/Target/X86/X86PeepholeOpt.cpp
+++ b/lib/Target/X86/X86PeepholeOpt.cpp
@@ -179,9 +179,9 @@ bool PH::PeepholeOptimize(MachineBasicBlock &MBB,
return false;
#if 0
- case X86::MOVir32: Size++;
- case X86::MOVir16: Size++;
- case X86::MOVir8:
+ case X86::MOVri32: Size++;
+ case X86::MOVri16: Size++;
+ case X86::MOVri8:
// FIXME: We can only do this transformation if we know that flags are not
// used here, because XOR clobbers the flags!
if (MI->getOperand(1).isImmediate()) { // avoid mov EAX, <value>
@@ -373,7 +373,7 @@ bool SSAPH::OptimizeAddress(MachineInstr *MI, unsigned OpNo) {
// Attempt to fold instructions used by the base register into the instruction
if (MachineInstr *DefInst = getDefiningInst(BaseRegOp)) {
switch (DefInst->getOpcode()) {
- case X86::MOVir32:
+ case X86::MOVri32:
// If there is no displacement set for this instruction set one now.
// FIXME: If we can fold two immediates together, we should do so!
if (DisplacementOp.isImmediate() && !DisplacementOp.getImmedValue()) {
@@ -461,14 +461,14 @@ bool SSAPH::PeepholeOptimize(MachineBasicBlock &MBB,
// Register to memory stores. Format: <base,scale,indexreg,immdisp>, srcreg
case X86::MOVrm32: case X86::MOVrm16: case X86::MOVrm8:
- case X86::MOVim32: case X86::MOVim16: case X86::MOVim8:
+ case X86::MOVmi32: case X86::MOVmi16: case X86::MOVmi8:
// Check to see if we can fold the source instruction into this one...
if (MachineInstr *SrcInst = getDefiningInst(MI->getOperand(4))) {
switch (SrcInst->getOpcode()) {
// Fold the immediate value into the store, if possible.
- case X86::MOVir8: return Propagate(MI, 4, SrcInst, 1, X86::MOVim8);
- case X86::MOVir16: return Propagate(MI, 4, SrcInst, 1, X86::MOVim16);
- case X86::MOVir32: return Propagate(MI, 4, SrcInst, 1, X86::MOVim32);
+ case X86::MOVri8: return Propagate(MI, 4, SrcInst, 1, X86::MOVmi8);
+ case X86::MOVri16: return Propagate(MI, 4, SrcInst, 1, X86::MOVmi16);
+ case X86::MOVri32: return Propagate(MI, 4, SrcInst, 1, X86::MOVmi32);
default: break;
}
}