diff options
author | Dan Gohman <gohman@apple.com> | 2010-09-02 21:18:42 +0000 |
---|---|---|
committer | Dan Gohman <gohman@apple.com> | 2010-09-02 21:18:42 +0000 |
commit | 24bde5bce192119ee0fc4f94ef8757fd4031e5f6 (patch) | |
tree | 33efa1083f81d0600127fc08d38b6aac0ff7d819 /test/CodeGen/X86/store-narrow.ll | |
parent | 8e741ed2fa16f46ab586bf3a54526cd9c2338c5b (diff) | |
download | llvm-24bde5bce192119ee0fc4f94ef8757fd4031e5f6.tar.gz llvm-24bde5bce192119ee0fc4f94ef8757fd4031e5f6.tar.bz2 llvm-24bde5bce192119ee0fc4f94ef8757fd4031e5f6.tar.xz |
Don't narrow the load and store in a load+twiddle+store sequence unless
there are clearly no stores between the load and the store. This fixes
this miscompile reported as PR7833.
This breaks the test/CodeGen/X86/narrow_op-2.ll optimization, which is
safe, but awkward to prove safe. Move it to X86's README.txt.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@112861 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/X86/store-narrow.ll')
-rw-r--r-- | test/CodeGen/X86/store-narrow.ll | 31 |
1 files changed, 29 insertions, 2 deletions
diff --git a/test/CodeGen/X86/store-narrow.ll b/test/CodeGen/X86/store-narrow.ll index 5682e7caf8..abc5174c98 100644 --- a/test/CodeGen/X86/store-narrow.ll +++ b/test/CodeGen/X86/store-narrow.ll @@ -1,6 +1,6 @@ ; rdar://7860110 -; RUN: llc < %s | FileCheck %s -check-prefix=X64 -; RUN: llc -march=x86 < %s | FileCheck %s -check-prefix=X32 +; RUN: llc -asm-verbose=false < %s | FileCheck %s -check-prefix=X64 +; RUN: llc -march=x86 -asm-verbose=false < %s | FileCheck %s -check-prefix=X32 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64" target triple = "x86_64-apple-darwin10.2" @@ -125,3 +125,30 @@ entry: ; X32: movb %cl, 5(%{{.*}}) } +; PR7833 + +@g_16 = internal global i32 -1 + +; X64: test8: +; X64-NEXT: movl _g_16(%rip), %eax +; X64-NEXT: movl $0, _g_16(%rip) +; X64-NEXT: orl $1, %eax +; X64-NEXT: movl %eax, _g_16(%rip) +; X64-NEXT: ret +define void @test8() nounwind { + %tmp = load i32* @g_16 + store i32 0, i32* @g_16 + %or = or i32 %tmp, 1 + store i32 %or, i32* @g_16 + ret void +} + +; X64: test9: +; X64-NEXT: orb $1, _g_16(%rip) +; X64-NEXT: ret +define void @test9() nounwind { + %tmp = load i32* @g_16 + %or = or i32 %tmp, 1 + store i32 %or, i32* @g_16 + ret void +} |