summaryrefslogtreecommitdiff
path: root/test/CodeGen/X86/rotate4.ll
blob: b549a9bd935a5a964f62fb3e0ff14e5f079ade54 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=generic | FileCheck %s

; Check that we recognize this idiom for rotation too:
;    a << (b & (OpSize-1)) | a >> ((0 - b) & (OpSize-1))

define i32 @rotate_left_32(i32 %a, i32 %b) {
; CHECK-LABEL: rotate_left_32:
; CHECK: roll
entry:
  %and = and i32 %b, 31
  %shl = shl i32 %a, %and
  %0 = sub i32 0, %b
  %and3 = and i32 %0, 31
  %shr = lshr i32 %a, %and3
  %or = or i32 %shl, %shr
  ret i32 %or
}

define i32 @rotate_right_32(i32 %a, i32 %b) {
; CHECK-LABEL: rotate_right_32:
; CHECK: rorl
entry:
  %and = and i32 %b, 31
  %shl = lshr i32 %a, %and
  %0 = sub i32 0, %b
  %and3 = and i32 %0, 31
  %shr = shl i32 %a, %and3
  %or = or i32 %shl, %shr
  ret i32 %or
}

define i64 @rotate_left_64(i64 %a, i64 %b) {
; CHECK-LABEL: rotate_left_64:
; CHECK: rolq
entry:
  %and = and i64 %b, 63
  %shl = shl i64 %a, %and
  %0 = sub i64 0, %b
  %and3 = and i64 %0, 63
  %shr = lshr i64 %a, %and3
  %or = or i64 %shl, %shr
  ret i64 %or
}

define i64 @rotate_right_64(i64 %a, i64 %b) {
; CHECK-LABEL: rotate_right_64:
; CHECK: rorq
entry:
  %and = and i64 %b, 63
  %shl = lshr i64 %a, %and
  %0 = sub i64 0, %b
  %and3 = and i64 %0, 63
  %shr = shl i64 %a, %and3
  %or = or i64 %shl, %shr
  ret i64 %or
}

; Also check mem operand.

define void @rotate_left_m32(i32 *%pa, i32 %b) {
; CHECK-LABEL: rotate_left_m32:
; CHECK: roll
; no store:
; CHECK-NOT: mov
entry:
  %a = load i32* %pa, align 16
  %and = and i32 %b, 31
  %shl = shl i32 %a, %and
  %0 = sub i32 0, %b
  %and3 = and i32 %0, 31
  %shr = lshr i32 %a, %and3
  %or = or i32 %shl, %shr
  store i32 %or, i32* %pa, align 32
  ret void
}

define void @rotate_right_m32(i32 *%pa, i32 %b) {
; CHECK-LABEL: rotate_right_m32:
; CHECK: rorl
; no store:
; CHECK-NOT: mov
entry:
  %a = load i32* %pa, align 16
  %and = and i32 %b, 31
  %shl = lshr i32 %a, %and
  %0 = sub i32 0, %b
  %and3 = and i32 %0, 31
  %shr = shl i32 %a, %and3
  %or = or i32 %shl, %shr
  store i32 %or, i32* %pa, align 32
  ret void
}

define void @rotate_left_m64(i64 *%pa, i64 %b) {
; CHECK-LABEL: rotate_left_m64:
; CHECK: rolq
; no store:
; CHECK-NOT: mov
entry:
  %a = load i64* %pa, align 16
  %and = and i64 %b, 63
  %shl = shl i64 %a, %and
  %0 = sub i64 0, %b
  %and3 = and i64 %0, 63
  %shr = lshr i64 %a, %and3
  %or = or i64 %shl, %shr
  store i64 %or, i64* %pa, align 64
  ret void
}

define void @rotate_right_m64(i64 *%pa, i64 %b) {
; CHECK-LABEL: rotate_right_m64:
; CHECK: rorq
; no store:
; CHECK-NOT: mov
entry:
  %a = load i64* %pa, align 16
  %and = and i64 %b, 63
  %shl = lshr i64 %a, %and
  %0 = sub i64 0, %b
  %and3 = and i64 %0, 63
  %shr = shl i64 %a, %and3
  %or = or i64 %shl, %shr
  store i64 %or, i64* %pa, align 64
  ret void
}