summaryrefslogtreecommitdiff
path: root/lib/Target/X86/PeepholeOptimizer.cpp
diff options
context:
space:
mode:
authorChris Lattner <sabre@nondot.org>2003-10-20 05:53:31 +0000
committerChris Lattner <sabre@nondot.org>2003-10-20 05:53:31 +0000
commit43a5ff8d402dcd71629d1ff9f32e8f46806ab8e3 (patch)
tree9e1d1a4938da970b40dffe0d84d502cb7bd872e7 /lib/Target/X86/PeepholeOptimizer.cpp
parente738656c0b380e5059cb522927aeb49554d01e46 (diff)
downloadllvm-43a5ff8d402dcd71629d1ff9f32e8f46806ab8e3.tar.gz
llvm-43a5ff8d402dcd71629d1ff9f32e8f46806ab8e3.tar.bz2
llvm-43a5ff8d402dcd71629d1ff9f32e8f46806ab8e3.tar.xz
Emit x86 instructions for: A = B op C, where A and B are 16-bit registers,
C is a constant which can be sign-extended from 8 bits without value loss, and op is one of: add, sub, imul, and, or, xor. This allows the JIT to emit the one byte version of the constant instead of the two or 4 byte version. Because these instructions are very common, this can save a LOT of code space. For example, I sampled two benchmarks, 176.gcc and 254.gap. BM Old New Reduction 176.gcc 2673621 2548962 4.89% 254.gap 498261 475104 4.87% Note that while the percentage is not spectacular, this did eliminate 124.6 _KILOBYTES_ of codespace from gcc. Not bad. Note that this doesn't effect the llc version at all, because the assembler already does this optimization. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@9284 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Target/X86/PeepholeOptimizer.cpp')
-rw-r--r--lib/Target/X86/PeepholeOptimizer.cpp40
1 files changed, 40 insertions, 0 deletions
diff --git a/lib/Target/X86/PeepholeOptimizer.cpp b/lib/Target/X86/PeepholeOptimizer.cpp
index 559baeeae9..efb6cc3e6a 100644
--- a/lib/Target/X86/PeepholeOptimizer.cpp
+++ b/lib/Target/X86/PeepholeOptimizer.cpp
@@ -51,6 +51,46 @@ bool PH::PeepholeOptimize(MachineBasicBlock &MBB,
}
return false;
+ // A large number of X86 instructions have forms which take an 8-bit
+ // immediate despite the fact that the operands are 16 or 32 bits. Because
+ // this can save three bytes of code size (and icache space), we want to
+ // shrink them if possible.
+ case X86::ADDri16: case X86::ADDri32:
+ case X86::SUBri16: case X86::SUBri32:
+ case X86::IMULri16: case X86::IMULri32:
+ case X86::ANDri16: case X86::ANDri32:
+ case X86::ORri16: case X86::ORri32:
+ case X86::XORri16: case X86::XORri32:
+ assert(MI->getNumOperands() == 3 && "These should all have 3 operands!");
+ if (MI->getOperand(2).isImmediate()) {
+ int Val = MI->getOperand(2).getImmedValue();
+ // If the value is the same when signed extended from 8 bits...
+ if (Val == (signed int)(signed char)Val) {
+ unsigned Opcode;
+ switch (MI->getOpcode()) {
+ default: assert(0 && "Unknown opcode value!");
+ case X86::ADDri16: Opcode = X86::ADDri16b; break;
+ case X86::ADDri32: Opcode = X86::ADDri32b; break;
+ case X86::SUBri16: Opcode = X86::SUBri16b; break;
+ case X86::SUBri32: Opcode = X86::SUBri32b; break;
+ case X86::IMULri16: Opcode = X86::IMULri16b; break;
+ case X86::IMULri32: Opcode = X86::IMULri32b; break;
+ case X86::ANDri16: Opcode = X86::ANDri16b; break;
+ case X86::ANDri32: Opcode = X86::ANDri32b; break;
+ case X86::ORri16: Opcode = X86::ORri16b; break;
+ case X86::ORri32: Opcode = X86::ORri32b; break;
+ case X86::XORri16: Opcode = X86::XORri16b; break;
+ case X86::XORri32: Opcode = X86::XORri32b; break;
+ }
+ unsigned R0 = MI->getOperand(0).getReg();
+ unsigned R1 = MI->getOperand(1).getReg();
+ *I = BuildMI(Opcode, 2, R0).addReg(R1).addZImm((char)Val);
+ delete MI;
+ return true;
+ }
+ }
+ return false;
+
#if 0
case X86::MOVir32: Size++;
case X86::MOVir16: Size++;