summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorBill Wendling <isanbard@gmail.com>2011-04-12 22:46:31 +0000
committerBill Wendling <isanbard@gmail.com>2011-04-12 22:46:31 +0000
commitd5f323d70bd2d9bc8a63a68bfe439a69e0104bbf (patch)
tree51593a18718d656918be4f28c4a5f5706e0a0366 /lib
parent0b756349a718e046abba84c316877a682eb0ff2f (diff)
downloadllvm-d5f323d70bd2d9bc8a63a68bfe439a69e0104bbf.tar.gz
llvm-d5f323d70bd2d9bc8a63a68bfe439a69e0104bbf.tar.bz2
llvm-d5f323d70bd2d9bc8a63a68bfe439a69e0104bbf.tar.xz
Remove the unaligned load intrinsics in favor of using native unaligned loads.
Now that we have a first-class way to represent unaligned loads, the unaligned load intrinsics are superfluous. First part of <rdar://problem/8460511>. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@129401 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib')
-rw-r--r--lib/Target/X86/X86InstrInfo.cpp5
-rw-r--r--lib/Target/X86/X86InstrSSE.td26
-rw-r--r--lib/Transforms/InstCombine/InstCombineCalls.cpp6
-rw-r--r--lib/Transforms/Scalar/LoopStrengthReduce.cpp3
-rw-r--r--lib/VMCore/AutoUpgrade.cpp30
5 files changed, 30 insertions, 40 deletions
diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp
index 21df57c9cd..85ab916d71 100644
--- a/lib/Target/X86/X86InstrInfo.cpp
+++ b/lib/Target/X86/X86InstrInfo.cpp
@@ -916,7 +916,6 @@ X86InstrInfo::isReallyTriviallyReMaterializable(const MachineInstr *MI,
case X86::MOVSDrm:
case X86::MOVAPSrm:
case X86::MOVUPSrm:
- case X86::MOVUPSrm_Int:
case X86::MOVAPDrm:
case X86::MOVDQArm:
case X86::MMX_MOVD64rm:
@@ -2845,11 +2844,9 @@ X86InstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
case X86::FsMOVAPDrm:
case X86::MOVAPSrm:
case X86::MOVUPSrm:
- case X86::MOVUPSrm_Int:
case X86::MOVAPDrm:
case X86::MOVDQArm:
case X86::MOVDQUrm:
- case X86::MOVDQUrm_Int:
break;
}
switch (Opc2) {
@@ -2869,11 +2866,9 @@ X86InstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
case X86::FsMOVAPDrm:
case X86::MOVAPSrm:
case X86::MOVUPSrm:
- case X86::MOVUPSrm_Int:
case X86::MOVAPDrm:
case X86::MOVDQArm:
case X86::MOVDQUrm:
- case X86::MOVDQUrm_Int:
break;
}
diff --git a/lib/Target/X86/X86InstrSSE.td b/lib/Target/X86/X86InstrSSE.td
index 8f08e68a26..c36e975e3c 100644
--- a/lib/Target/X86/X86InstrSSE.td
+++ b/lib/Target/X86/X86InstrSSE.td
@@ -329,15 +329,6 @@ def MOVUPDmr : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
// Intrinsic forms of MOVUPS/D load and store
let isAsmParserOnly = 0 in {
- let canFoldAsLoad = 1, isReMaterializable = 1 in
- def VMOVUPSrm_Int : VPSI<0x10, MRMSrcMem, (outs VR128:$dst),
- (ins f128mem:$src),
- "movups\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse_loadu_ps addr:$src))]>, VEX;
- def VMOVUPDrm_Int : VPDI<0x10, MRMSrcMem, (outs VR128:$dst),
- (ins f128mem:$src),
- "movupd\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_loadu_pd addr:$src))]>, VEX;
def VMOVUPSmr_Int : VPSI<0x11, MRMDestMem, (outs),
(ins f128mem:$dst, VR128:$src),
"movups\t{$src, $dst|$dst, $src}",
@@ -347,13 +338,6 @@ let isAsmParserOnly = 0 in {
"movupd\t{$src, $dst|$dst, $src}",
[(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>, VEX;
}
-let canFoldAsLoad = 1, isReMaterializable = 1 in
-def MOVUPSrm_Int : PSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
- "movups\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse_loadu_ps addr:$src))]>;
-def MOVUPDrm_Int : PDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
- "movupd\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_loadu_pd addr:$src))]>;
def MOVUPSmr_Int : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
"movups\t{$src, $dst|$dst, $src}",
@@ -2229,22 +2213,12 @@ def MOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
// Intrinsic forms of MOVDQU load and store
let isAsmParserOnly = 0 in {
-let canFoldAsLoad = 1 in
-def VMOVDQUrm_Int : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
- "vmovdqu\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_loadu_dq addr:$src))]>,
- XS, VEX, Requires<[HasAVX]>;
def VMOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
"vmovdqu\t{$src, $dst|$dst, $src}",
[(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
XS, VEX, Requires<[HasAVX]>;
}
-let canFoldAsLoad = 1 in
-def MOVDQUrm_Int : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
- "movdqu\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_loadu_dq addr:$src))]>,
- XS, Requires<[HasSSE2]>;
def MOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
"movdqu\t{$src, $dst|$dst, $src}",
[(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
diff --git a/lib/Transforms/InstCombine/InstCombineCalls.cpp b/lib/Transforms/InstCombine/InstCombineCalls.cpp
index 875e9cae58..726105f75d 100644
--- a/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -537,11 +537,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
break;
case Intrinsic::ppc_altivec_lvx:
case Intrinsic::ppc_altivec_lvxl:
- case Intrinsic::x86_sse_loadu_ps:
- case Intrinsic::x86_sse2_loadu_pd:
- case Intrinsic::x86_sse2_loadu_dq:
- // Turn PPC lvx -> load if the pointer is known aligned.
- // Turn X86 loadups -> load if the pointer is known aligned.
+ // Turn PPC lvx -> load if the pointer is known aligned.
if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, TD) >= 16) {
Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
PointerType::getUnqual(II->getType()));
diff --git a/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/lib/Transforms/Scalar/LoopStrengthReduce.cpp
index 87e78fab82..f6e2c88bc6 100644
--- a/lib/Transforms/Scalar/LoopStrengthReduce.cpp
+++ b/lib/Transforms/Scalar/LoopStrengthReduce.cpp
@@ -572,9 +572,6 @@ static bool isAddressUse(Instruction *Inst, Value *OperandVal) {
switch (II->getIntrinsicID()) {
default: break;
case Intrinsic::prefetch:
- case Intrinsic::x86_sse2_loadu_dq:
- case Intrinsic::x86_sse2_loadu_pd:
- case Intrinsic::x86_sse_loadu_ps:
case Intrinsic::x86_sse_storeu_ps:
case Intrinsic::x86_sse2_storeu_pd:
case Intrinsic::x86_sse2_storeu_dq:
diff --git a/lib/VMCore/AutoUpgrade.cpp b/lib/VMCore/AutoUpgrade.cpp
index 9e551bb060..4541f381ed 100644
--- a/lib/VMCore/AutoUpgrade.cpp
+++ b/lib/VMCore/AutoUpgrade.cpp
@@ -527,6 +527,12 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
// or 0.
NewFn = 0;
return true;
+ } else if (Name.compare(5, 16, "x86.sse.loadu.ps", 16) == 0 ||
+ Name.compare(5, 17, "x86.sse2.loadu.dq", 17) == 0 ||
+ Name.compare(5, 17, "x86.sse2.loadu.pd", 17) == 0) {
+ // Calls to these instructions are transformed into unaligned loads.
+ NewFn = 0;
+ return true;
} else if (Name.compare(5, 17, "x86.ssse3.pshuf.w", 17) == 0) {
// This is an SSE/MMX instruction.
const Type *X86_MMXTy = VectorType::getX86_MMXTy(FTy->getContext());
@@ -946,7 +952,29 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
// Remove upgraded instruction.
CI->eraseFromParent();
-
+
+ } else if (F->getName() == "llvm.x86.sse.loadu.ps" ||
+ F->getName() == "llvm.x86.sse2.loadu.dq" ||
+ F->getName() == "llvm.x86.sse2.loadu.pd") {
+ // Convert to a native, unaligned load.
+ const Type *VecTy = CI->getType();
+ const Type *IntTy = IntegerType::get(C, 128);
+ IRBuilder<> Builder(C);
+ Builder.SetInsertPoint(CI->getParent(), CI);
+
+ Value *BC = Builder.CreateBitCast(CI->getArgOperand(0),
+ PointerType::getUnqual(IntTy),
+ "cast");
+ LoadInst *LI = Builder.CreateLoad(BC, CI->getName());
+ LI->setAlignment(1); // Unaligned load.
+ BC = Builder.CreateBitCast(LI, VecTy, "new.cast");
+
+ // Fix up all the uses with our new load.
+ if (!CI->use_empty())
+ CI->replaceAllUsesWith(BC);
+
+ // Remove intrinsic.
+ CI->eraseFromParent();
} else {
llvm_unreachable("Unknown function for CallInst upgrade.");
}