diff options
author | Jakob Stoklund Olesen <stoklund@2pi.dk> | 2012-05-23 22:37:27 +0000 |
---|---|---|
committer | Jakob Stoklund Olesen <stoklund@2pi.dk> | 2012-05-23 22:37:27 +0000 |
commit | d74d2847573df690b6a91254688ef3fd974f83f7 (patch) | |
tree | f793c5feb12ffc47c6b14a91936ff52d6d7b0630 /test/CodeGen/Thumb2/inflate-regs.ll | |
parent | dbb4e57a3c7fb18d5ff2d9504c5cacb5df20fab4 (diff) | |
download | llvm-d74d2847573df690b6a91254688ef3fd974f83f7.tar.gz llvm-d74d2847573df690b6a91254688ef3fd974f83f7.tar.bz2 llvm-d74d2847573df690b6a91254688ef3fd974f83f7.tar.xz |
Add a last resort tryInstructionSplit() to RAGreedy.
Live ranges with a constrained register class may benefit from splitting
around individual uses. It allows the remaining live range to use a
larger register class where it may allocate. This is like spilling to a
different register class.
This is only attempted on constrained register classes.
<rdar://problem/11438902>
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@157354 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/Thumb2/inflate-regs.ll')
-rw-r--r-- | test/CodeGen/Thumb2/inflate-regs.ll | 22 |
1 files changed, 22 insertions, 0 deletions
diff --git a/test/CodeGen/Thumb2/inflate-regs.ll b/test/CodeGen/Thumb2/inflate-regs.ll new file mode 100644 index 0000000000..264863512e --- /dev/null +++ b/test/CodeGen/Thumb2/inflate-regs.ll @@ -0,0 +1,22 @@ +; RUN: llc < %s -mcpu=cortex-a8 | FileCheck %s +target triple = "thumbv7-apple-ios" + +; CHECK: local_split +; +; The load must go into d0-15 which are all clobbered by the asm. +; RAGreedy should split the range and use d16-d31 to avoid a spill. +; +; CHECK: vldr s +; CHECK-NOT: vstr +; CHECK: vadd.f32 +; CHECK-NOT: vstr +; CHECK: vorr +; CHECK: vstr s +define void @local_split(float* nocapture %p) nounwind ssp { +entry: + %x = load float* %p, align 4 + %a = fadd float %x, 1.0 + tail call void asm sideeffect "", "~{d0},~{d1},~{d2},~{d3},~{d4},~{d5},~{d6},~{d7},~{d8},~{d9},~{d10},~{d11},~{d12},~{d13},~{d14},~{d15}"() nounwind + store float %a, float* %p, align 4 + ret void +} |