summaryrefslogtreecommitdiff
path: root/lib/Target/ARM/ARMFastISel.cpp
diff options
context:
space:
mode:
authorChad Rosier <mcrosier@apple.com>2011-11-14 23:04:09 +0000
committerChad Rosier <mcrosier@apple.com>2011-11-14 23:04:09 +0000
commit2c42b8c912b62071c27454182cdef60e3b584083 (patch)
tree55c848a18b5e5989b802529e96d4ffa962d792a3 /lib/Target/ARM/ARMFastISel.cpp
parentffc658b056b7cc0b3f6a2626694b6a4216ed728d (diff)
downloadllvm-2c42b8c912b62071c27454182cdef60e3b584083.tar.gz
llvm-2c42b8c912b62071c27454182cdef60e3b584083.tar.bz2
llvm-2c42b8c912b62071c27454182cdef60e3b584083.tar.xz
Supporting inline memmove isn't going to be worthwhile. The only way to avoid
violating a dependency is to emit all loads prior to stores. This would likely cause a great deal of spillage offsetting any potential gains. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@144585 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Target/ARM/ARMFastISel.cpp')
-rw-r--r--lib/Target/ARM/ARMFastISel.cpp25
1 files changed, 9 insertions, 16 deletions
diff --git a/lib/Target/ARM/ARMFastISel.cpp b/lib/Target/ARM/ARMFastISel.cpp
index 67ba14b6a3..4df084f34b 100644
--- a/lib/Target/ARM/ARMFastISel.cpp
+++ b/lib/Target/ARM/ARMFastISel.cpp
@@ -185,9 +185,8 @@ class ARMFastISel : public FastISel {
bool ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr);
bool ARMComputeAddress(const Value *Obj, Address &Addr);
void ARMSimplifyAddress(Address &Addr, EVT VT, bool useAM3);
- bool ARMIsMemXferSmall(uint64_t Len);
- bool ARMTryEmitSmallMemXfer(Address Dest, Address Src, uint64_t Len,
- bool isMemCpy);
+ bool ARMIsMemCpySmall(uint64_t Len);
+ bool ARMTryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len);
unsigned ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT, bool isZExt);
unsigned ARMMaterializeFP(const ConstantFP *CFP, EVT VT);
unsigned ARMMaterializeInt(const Constant *C, EVT VT);
@@ -2196,19 +2195,13 @@ bool ARMFastISel::SelectCall(const Instruction *I,
return true;
}
-bool ARMFastISel::ARMIsMemXferSmall(uint64_t Len) {
+bool ARMFastISel::ARMIsMemCpySmall(uint64_t Len) {
return Len <= 16;
}
-bool ARMFastISel::ARMTryEmitSmallMemXfer(Address Dest, Address Src, uint64_t Len,
- bool isMemCpy) {
- // FIXME: Memmove's require a little more care because their source and
- // destination may overlap.
- if (!isMemCpy)
- return false;
-
+bool ARMFastISel::ARMTryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len) {
// Make sure we don't bloat code by inlining very large memcpy's.
- if (!ARMIsMemXferSmall(Len))
+ if (!ARMIsMemCpySmall(Len))
return false;
// We don't care about alignment here since we just emit integer accesses.
@@ -2254,15 +2247,15 @@ bool ARMFastISel::SelectIntrinsicCall(const IntrinsicInst &I) {
// we would emit dead code because we don't currently handle memmoves.
bool isMemCpy = (I.getIntrinsicID() == Intrinsic::memcpy);
if (isa<ConstantInt>(MTI.getLength()) && isMemCpy) {
- // Small memcpy/memmove's are common enough that we want to do them
- // without a call if possible.
+ // Small memcpy's are common enough that we want to do them without a call
+ // if possible.
uint64_t Len = cast<ConstantInt>(MTI.getLength())->getZExtValue();
- if (ARMIsMemXferSmall(Len)) {
+ if (ARMIsMemCpySmall(Len)) {
Address Dest, Src;
if (!ARMComputeAddress(MTI.getRawDest(), Dest) ||
!ARMComputeAddress(MTI.getRawSource(), Src))
return false;
- if (ARMTryEmitSmallMemXfer(Dest, Src, Len, isMemCpy))
+ if (ARMTryEmitSmallMemCpy(Dest, Src, Len))
return true;
}
}