summaryrefslogtreecommitdiff
path: root/test/CodeGen/X86/fast-isel-gep.ll
blob: 4e47c7455c53c69684f5846671a8ae4fbc3da8f6 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
; RUN: llc < %s -mtriple=x86_64-linux -O0 | FileCheck %s --check-prefix=X64
; RUN: llc < %s -mtriple=x86_64-win32 -O0 | FileCheck %s --check-prefix=X64
; RUN: llc < %s -march=x86 -O0 | FileCheck %s --check-prefix=X32

; GEP indices are interpreted as signed integers, so they
; should be sign-extended to 64 bits on 64-bit targets.
; PR3181
define i32 @test1(i32 %t3, i32* %t1) nounwind {
       %t9 = getelementptr i32* %t1, i32 %t3           ; <i32*> [#uses=1]
       %t15 = load i32* %t9            ; <i32> [#uses=1]
       ret i32 %t15
; X32-LABEL: test1:
; X32:  	movl	(%eax,%ecx,4), %eax
; X32:  	ret

; X64-LABEL: test1:
; X64:  	movslq	%e[[A0:di|cx]], %rax
; X64:  	movl	(%r[[A1:si|dx]],%rax,4), %eax
; X64:  	ret

}
define i32 @test2(i64 %t3, i32* %t1) nounwind {
       %t9 = getelementptr i32* %t1, i64 %t3           ; <i32*> [#uses=1]
       %t15 = load i32* %t9            ; <i32> [#uses=1]
       ret i32 %t15
; X32-LABEL: test2:
; X32:  	movl	(%edx,%ecx,4), %e
; X32:  	ret

; X64-LABEL: test2:
; X64:  	movl	(%r[[A1]],%r[[A0]],4), %eax
; X64:  	ret
}



; PR4984
define i8 @test3(i8* %start) nounwind {
entry:
  %A = getelementptr i8* %start, i64 -2               ; <i8*> [#uses=1]
  %B = load i8* %A, align 1                       ; <i8> [#uses=1]
  ret i8 %B
  
  
; X32-LABEL: test3:
; X32:  	movl	4(%esp), %eax
; X32:  	movb	-2(%eax), %al
; X32:  	ret

; X64-LABEL: test3:
; X64:  	movb	-2(%r[[A0]]), %al
; X64:  	ret

}

define double @test4(i64 %x, double* %p) nounwind {
entry:
  %x.addr = alloca i64, align 8                   ; <i64*> [#uses=2]
  %p.addr = alloca double*, align 8               ; <double**> [#uses=2]
  store i64 %x, i64* %x.addr
  store double* %p, double** %p.addr
  %tmp = load i64* %x.addr                        ; <i64> [#uses=1]
  %add = add nsw i64 %tmp, 16                     ; <i64> [#uses=1]
  %tmp1 = load double** %p.addr                   ; <double*> [#uses=1]
  %arrayidx = getelementptr inbounds double* %tmp1, i64 %add ; <double*> [#uses=1]
  %tmp2 = load double* %arrayidx                  ; <double> [#uses=1]
  ret double %tmp2

; X32-LABEL: test4:
; X32: 128(%e{{.*}},%e{{.*}},8)
; X64-LABEL: test4:
; X64: 128(%r{{.*}},%r{{.*}},8)
}

; PR8961 - Make sure the sext for the GEP addressing comes before the load that
; is folded.
define i64 @test5(i8* %A, i32 %I, i64 %B) nounwind {
  %v8 = getelementptr i8* %A, i32 %I
  %v9 = bitcast i8* %v8 to i64*
  %v10 = load i64* %v9
  %v11 = add i64 %B, %v10
  ret i64 %v11
; X64-LABEL: test5:
; X64: movslq	%e[[A1]], %rax
; X64-NEXT: (%r[[A0]],%rax),
; X64: ret
}

; PR9500, rdar://9156159 - Don't do non-local address mode folding,
; because it may require values which wouldn't otherwise be live out
; of their blocks.
define void @test6() {
if.end:                                           ; preds = %if.then, %invoke.cont
  %tmp15 = load i64* undef
  %dec = add i64 %tmp15, 13
  store i64 %dec, i64* undef
  %call17 = invoke i8* @_ZNK18G__FastAllocString4dataEv()
          to label %invoke.cont16 unwind label %lpad

invoke.cont16:                                    ; preds = %if.then14
  %arrayidx18 = getelementptr inbounds i8* %call17, i64 %dec
  store i8 0, i8* %arrayidx18
  unreachable

lpad:                                             ; preds = %if.end19, %if.then14, %if.end, %entry
  %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0
            cleanup
  unreachable
}
declare i8* @_ZNK18G__FastAllocString4dataEv() nounwind


; PR10605 / rdar://9930964 - Don't fold loads incorrectly.  The load should
; happen before the store.  
define i32 @test7({i32,i32,i32}* %tmp1, i32 %tmp71, i32 %tmp63) nounwind  {
; X64-LABEL: test7:
; X64:    movl	8({{%rdi|%rcx}}), %eax
; X64:     movl	$4, 8({{%rdi|%rcx}})


  %tmp29 = getelementptr inbounds {i32,i32,i32}* %tmp1, i32 0, i32 2
  %tmp30 = load i32* %tmp29, align 4

  %p2 = getelementptr inbounds {i32,i32,i32}* %tmp1, i32 0, i32 2
  store i32 4, i32* %p2
  
  %tmp72 = or i32 %tmp71, %tmp30
  %tmp73 = icmp ne i32 %tmp63, 32
  br i1 %tmp73, label %T, label %F

T:
  ret i32 %tmp72

F:
  ret i32 4
}

declare i32 @__gxx_personality_v0(...)