From 39ffcb7b62a75d186a7f14b38aacb1615593fdbd Mon Sep 17 00:00:00 2001 From: Chris Lattner Date: Mon, 20 Dec 2010 01:16:03 +0000 Subject: We lower setb to sbb with the hope that the and will go away, when it doesn't, match it back to setb. On a 64-bit version of the testcase before we'd get: movq %rdi, %rax addq %rsi, %rax sbbb %dl, %dl andb $1, %dl ret now we get: movq %rdi, %rax addq %rsi, %rax setb %dl ret git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@122217 91177308-0d34-0410-b5e6-96231b3b80d8 --- test/CodeGen/X86/add.ll | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'test/CodeGen/X86/add.ll') diff --git a/test/CodeGen/X86/add.ll b/test/CodeGen/X86/add.ll index 3991a6849f..1201c06fcc 100644 --- a/test/CodeGen/X86/add.ll +++ b/test/CodeGen/X86/add.ll @@ -92,3 +92,12 @@ define i64 @test6(i64 %A, i32 %B) nounwind { ; X64: ret } +define {i32, i1} @test7(i32 %v1, i32 %v2) nounwind { + %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2) + ret {i32, i1} %t +} + +; X64: test7: +; X64: addl %esi, %eax +; X64-NEXT: setb %dl +; X64-NEXT: ret -- cgit v1.2.3