From bc511725f08c45984be6ff47d069c3773a2f2eb0 Mon Sep 17 00:00:00 2001 From: Dan Gohman Date: Fri, 13 Feb 2009 00:26:43 +0000 Subject: Fix LSR's IV sorting function to explicitly sort by bitwidth after sorting by stride value. This prevents it from missing IV reuse opportunities in a host-sensitive manner. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@64415 91177308-0d34-0410-b5e6-96231b3b80d8 --- test/CodeGen/X86/lsr-sort.ll | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 test/CodeGen/X86/lsr-sort.ll (limited to 'test/CodeGen/X86/lsr-sort.ll') diff --git a/test/CodeGen/X86/lsr-sort.ll b/test/CodeGen/X86/lsr-sort.ll new file mode 100644 index 0000000000..00e1d694ef --- /dev/null +++ b/test/CodeGen/X86/lsr-sort.ll @@ -0,0 +1,22 @@ +; RUN: llvm-as < %s | llc -march=x86-64 > %t +; RUN: grep inc %t | count 1 +; RUN: not grep incw %t + +@X = common global i16 0 ; [#uses=1] + +define void @foo(i32 %N) nounwind { +entry: + %0 = icmp sgt i32 %N, 0 ; [#uses=1] + br i1 %0, label %bb, label %return + +bb: ; preds = %bb, %entry + %i.03 = phi i32 [ 0, %entry ], [ %indvar.next, %bb ] ; [#uses=2] + %1 = trunc i32 %i.03 to i16 ; [#uses=1] + volatile store i16 %1, i16* @X, align 2 + %indvar.next = add i32 %i.03, 1 ; [#uses=2] + %exitcond = icmp eq i32 %indvar.next, %N ; [#uses=1] + br i1 %exitcond, label %return, label %bb + +return: ; preds = %bb, %entry + ret void +} -- cgit v1.2.3