summaryrefslogtreecommitdiff
path: root/test/Transforms
diff options
context:
space:
mode:
authorJingyue Wu <jingyue@google.com>2014-06-05 22:07:33 +0000
committerJingyue Wu <jingyue@google.com>2014-06-05 22:07:33 +0000
commit6156e562ef42ed25274c2894aedb22b909184771 (patch)
tree037b8a91c77a8647d7a5d67d824397e58fadfe7c /test/Transforms
parentd334d25a10f45fc996ba117f8c16495940127e39 (diff)
downloadllvm-6156e562ef42ed25274c2894aedb22b909184771.tar.gz
llvm-6156e562ef42ed25274c2894aedb22b909184771.tar.bz2
llvm-6156e562ef42ed25274c2894aedb22b909184771.tar.xz
Fixed several correctness issues in SeparateConstOffsetFromGEP
Most issues are on mishandling s/zext. Fixes: 1. When rebuilding new indices, s/zext should be distributed to sub-expressions. e.g., sext(a +nsw (b +nsw 5)) = sext(a) + sext(b) + 5 but not sext(a + b) + 5. This also affects the logic of recursively looking for a constant offset, we need to include s/zext into the context of the searching. 2. Function find should return the bitwidth of the constant offset instead of always sign-extending it to i64. 3. Stop shortcutting zext'ed GEP indices. LLVM conceptually sign-extends GEP indices to pointer-size before computing the address. Therefore, gep base, zext(a + b) != gep base, a + b Improvements: 1. Add an optimization for splitting sext(a + b): if a + b is proven non-negative (e.g., used as an index of an inbound GEP) and one of a, b is non-negative, sext(a + b) = sext(a) + sext(b) 2. Function Distributable checks whether both sext and zext can be distributed to operands of a binary operator. This helps us split zext(sext(a + b)) to zext(sext(a) + zext(sext(b)) when a + b does not signed or unsigned overflow. Refactoring: Merge some common logic of handling add/sub/or in find. Testing: Add many tests in split-gep.ll and split-gep-and-gvn.ll to verify the changes we made. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@210291 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/Transforms')
-rw-r--r--test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep-and-gvn.ll55
-rw-r--r--test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep.ll193
2 files changed, 193 insertions, 55 deletions
diff --git a/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep-and-gvn.ll b/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep-and-gvn.ll
index 850fc4cde8..6b72bcdc0e 100644
--- a/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep-and-gvn.ll
+++ b/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep-and-gvn.ll
@@ -1,4 +1,3 @@
-; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s --check-prefix=PTX
; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 | FileCheck %s --check-prefix=PTX
; RUN: opt < %s -S -separate-const-offset-from-gep -gvn -dce | FileCheck %s --check-prefix=IR
@@ -20,20 +19,20 @@ target triple = "nvptx64-unknown-unknown"
define void @sum_of_array(i32 %x, i32 %y, float* nocapture %output) {
.preheader:
- %0 = zext i32 %y to i64
- %1 = zext i32 %x to i64
+ %0 = sext i32 %y to i64
+ %1 = sext i32 %x to i64
%2 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %0
%3 = addrspacecast float addrspace(3)* %2 to float*
%4 = load float* %3, align 4
%5 = fadd float %4, 0.000000e+00
%6 = add i32 %y, 1
- %7 = zext i32 %6 to i64
+ %7 = sext i32 %6 to i64
%8 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %7
%9 = addrspacecast float addrspace(3)* %8 to float*
%10 = load float* %9, align 4
%11 = fadd float %5, %10
%12 = add i32 %x, 1
- %13 = zext i32 %12 to i64
+ %13 = sext i32 %12 to i64
%14 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %13, i64 %0
%15 = addrspacecast float addrspace(3)* %14 to float*
%16 = load float* %15, align 4
@@ -45,7 +44,6 @@ define void @sum_of_array(i32 %x, i32 %y, float* nocapture %output) {
store float %21, float* %output, align 4
ret void
}
-
; PTX-LABEL: sum_of_array(
; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG:%(rl|r)[0-9]+]]{{\]}}
; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+4{{\]}}
@@ -53,7 +51,50 @@ define void @sum_of_array(i32 %x, i32 %y, float* nocapture %output) {
; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+132{{\]}}
; IR-LABEL: @sum_of_array(
-; IR: [[BASE_PTR:%[0-9]+]] = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i32 %x, i32 %y
+; IR: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
+; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 1
+; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 32
+; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 33
+
+; @sum_of_array2 is very similar to @sum_of_array. The only difference is in
+; the order of "sext" and "add" when computing the array indices. @sum_of_array
+; computes add before sext, e.g., array[sext(x + 1)][sext(y + 1)], while
+; @sum_of_array2 computes sext before add,
+; e.g., array[sext(x) + 1][sext(y) + 1]. SeparateConstOffsetFromGEP should be
+; able to extract constant offsets from both forms.
+define void @sum_of_array2(i32 %x, i32 %y, float* nocapture %output) {
+.preheader:
+ %0 = sext i32 %y to i64
+ %1 = sext i32 %x to i64
+ %2 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %0
+ %3 = addrspacecast float addrspace(3)* %2 to float*
+ %4 = load float* %3, align 4
+ %5 = fadd float %4, 0.000000e+00
+ %6 = add i64 %0, 1
+ %7 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %6
+ %8 = addrspacecast float addrspace(3)* %7 to float*
+ %9 = load float* %8, align 4
+ %10 = fadd float %5, %9
+ %11 = add i64 %1, 1
+ %12 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %11, i64 %0
+ %13 = addrspacecast float addrspace(3)* %12 to float*
+ %14 = load float* %13, align 4
+ %15 = fadd float %10, %14
+ %16 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %11, i64 %6
+ %17 = addrspacecast float addrspace(3)* %16 to float*
+ %18 = load float* %17, align 4
+ %19 = fadd float %15, %18
+ store float %19, float* %output, align 4
+ ret void
+}
+; PTX-LABEL: sum_of_array2(
+; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG:%(rl|r)[0-9]+]]{{\]}}
+; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+4{{\]}}
+; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+128{{\]}}
+; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+132{{\]}}
+
+; IR-LABEL: @sum_of_array2(
+; IR: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 1
; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 32
; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 33
diff --git a/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep.ll b/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep.ll
index 2e50f5fd0c..31989dd691 100644
--- a/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep.ll
+++ b/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep.ll
@@ -23,71 +23,92 @@ entry:
%p = getelementptr inbounds [1024 x %struct.S]* @struct_array, i64 0, i64 %idxprom, i32 1
ret double* %p
}
-; CHECK-LABEL: @struct
-; CHECK: getelementptr [1024 x %struct.S]* @struct_array, i64 0, i32 %i, i32 1
+; CHECK-LABEL: @struct(
+; CHECK: getelementptr [1024 x %struct.S]* @struct_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i32 1
-; We should be able to trace into sext/zext if it's directly used as a GEP
-; index.
-define float* @sext_zext(i32 %i, i32 %j) {
+; We should be able to trace into s/zext(a + b) if a + b is non-negative
+; (e.g., used as an index of an inbounds GEP) and one of a and b is
+; non-negative.
+define float* @sext_add(i32 %i, i32 %j) {
entry:
- %i1 = add i32 %i, 1
- %j2 = add i32 %j, 2
- %i1.ext = sext i32 %i1 to i64
- %j2.ext = zext i32 %j2 to i64
- %p = getelementptr inbounds [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i1.ext, i64 %j2.ext
+ %0 = add i32 %i, 1
+ %1 = sext i32 %0 to i64 ; inbound sext(i + 1) = sext(i) + 1
+ %2 = sub i32 %j, 2
+ ; However, inbound sext(j - 2) != sext(j) - 2, e.g., j = INT_MIN
+ %3 = sext i32 %2 to i64
+ %p = getelementptr inbounds [32 x [32 x float]]* @float_2d_array, i64 0, i64 %1, i64 %3
ret float* %p
}
-; CHECK-LABEL: @sext_zext
-; CHECK: getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i32 %i, i32 %j
-; CHECK: getelementptr float* %{{[0-9]+}}, i64 34
+; CHECK-LABEL: @sext_add(
+; CHECK-NOT: = add
+; CHECK: getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
+; CHECK: getelementptr float* %{{[a-zA-Z0-9]+}}, i64 32
; We should be able to trace into sext/zext if it can be distributed to both
; operands, e.g., sext (add nsw a, b) == add nsw (sext a), (sext b)
+;
+; This test verifies we can transform
+; gep base, a + sext(b +nsw 1), c + zext(d +nuw 1)
+; to
+; gep base, a + sext(b), c + zext(d); gep ..., 1 * 32 + 1
define float* @ext_add_no_overflow(i64 %a, i32 %b, i64 %c, i32 %d) {
%b1 = add nsw i32 %b, 1
%b2 = sext i32 %b1 to i64
- %i = add i64 %a, %b2
+ %i = add i64 %a, %b2 ; i = a + sext(b +nsw 1)
%d1 = add nuw i32 %d, 1
%d2 = zext i32 %d1 to i64
- %j = add i64 %c, %d2
+ %j = add i64 %c, %d2 ; j = c + zext(d +nuw 1)
%p = getelementptr inbounds [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i, i64 %j
ret float* %p
}
-; CHECK-LABEL: @ext_add_no_overflow
-; CHECK: [[BASE_PTR:%[0-9]+]] = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[0-9]+}}, i64 %{{[0-9]+}}
+; CHECK-LABEL: @ext_add_no_overflow(
+; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
; CHECK: getelementptr float* [[BASE_PTR]], i64 33
-; Similar to @ext_add_no_overflow, we should be able to trace into sext/zext if
-; its operand is an "or" instruction.
-define float* @ext_or(i64 %a, i32 %b) {
+; Verifies we handle nested sext/zext correctly.
+define void @sext_zext(i32 %a, i32 %b, float** %out1, float** %out2) {
+entry:
+ %0 = add nsw nuw i32 %a, 1
+ %1 = sext i32 %0 to i48
+ %2 = zext i48 %1 to i64 ; zext(sext(a +nsw nuw 1)) = zext(sext(a)) + 1
+ %3 = add nsw i32 %b, 2
+ %4 = sext i32 %3 to i48
+ %5 = zext i48 %4 to i64 ; zext(sext(a +nsw 2)) != zext(sext(a)) + 2
+ %p1 = getelementptr inbounds [32 x [32 x float]]* @float_2d_array, i64 0, i64 %2, i64 %5
+ store float* %p1, float** %out1
+ %6 = add nuw i32 %a, 3
+ %7 = zext i32 %6 to i48
+ %8 = sext i48 %7 to i64 ; sext(zext(b +nuw 3)) = zext(b +nuw 3) = zext(b) + 3
+ %9 = add nsw i32 %b, 4
+ %10 = zext i32 %9 to i48
+ %11 = sext i48 %10 to i64 ; sext(zext(b +nsw 4)) != zext(b) + 4
+ %p2 = getelementptr inbounds [32 x [32 x float]]* @float_2d_array, i64 0, i64 %8, i64 %11
+ store float* %p2, float** %out2
+ ret void
+}
+; CHECK-LABEL: @sext_zext(
+; CHECK: [[BASE_PTR_1:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
+; CHECK: getelementptr float* [[BASE_PTR_1]], i64 32
+; CHECK: [[BASE_PTR_2:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
+; CHECK: getelementptr float* [[BASE_PTR_2]], i64 96
+
+; Similar to @ext_add_no_overflow, we should be able to trace into s/zext if
+; its operand is an OR and the two operands of the OR have no common bits.
+define float* @sext_or(i64 %a, i32 %b) {
entry:
%b1 = shl i32 %b, 2
- %b2 = or i32 %b1, 1
- %b3 = or i32 %b1, 2
- %b2.ext = sext i32 %b2 to i64
+ %b2 = or i32 %b1, 1 ; (b << 2) and 1 have no common bits
+ %b3 = or i32 %b1, 4 ; (b << 2) and 4 may have common bits
+ %b2.ext = zext i32 %b2 to i64
%b3.ext = sext i32 %b3 to i64
%i = add i64 %a, %b2.ext
%j = add i64 %a, %b3.ext
%p = getelementptr inbounds [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i, i64 %j
ret float* %p
}
-; CHECK-LABEL: @ext_or
-; CHECK: [[BASE_PTR:%[0-9]+]] = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[0-9]+}}, i64 %{{[0-9]+}}
-; CHECK: getelementptr float* [[BASE_PTR]], i64 34
-
-; We should treat "or" with no common bits (%k) as "add", and leave "or" with
-; potentially common bits (%l) as is.
-define float* @or(i64 %i) {
-entry:
- %j = shl i64 %i, 2
- %k = or i64 %j, 3 ; no common bits
- %l = or i64 %j, 4 ; potentially common bits
- %p = getelementptr inbounds [32 x [32 x float]]* @float_2d_array, i64 0, i64 %k, i64 %l
- ret float* %p
-}
-; CHECK-LABEL: @or
-; CHECK: [[BASE_PTR:%[0-9]+]] = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %j, i64 %l
-; CHECK: getelementptr float* [[BASE_PTR]], i64 96
+; CHECK-LABEL: @sext_or(
+; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
+; CHECK: getelementptr float* [[BASE_PTR]], i64 32
; The subexpression (b + 5) is used in both "i = a + (b + 5)" and "*out = b +
; 5". When extracting the constant offset 5, make sure "*out = b + 5" isn't
@@ -100,11 +121,28 @@ entry:
store i64 %b5, i64* %out
ret float* %p
}
-; CHECK-LABEL: @expr
-; CHECK: [[BASE_PTR:%[0-9]+]] = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %0, i64 0
+; CHECK-LABEL: @expr(
+; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 0
; CHECK: getelementptr float* [[BASE_PTR]], i64 160
; CHECK: store i64 %b5, i64* %out
+; d + sext(a +nsw (b +nsw (c +nsw 8))) => (d + sext(a) + sext(b) + sext(c)) + 8
+define float* @sext_expr(i32 %a, i32 %b, i32 %c, i64 %d) {
+entry:
+ %0 = add nsw i32 %c, 8
+ %1 = add nsw i32 %b, %0
+ %2 = add nsw i32 %a, %1
+ %3 = sext i32 %2 to i64
+ %i = add i64 %d, %3
+ %p = getelementptr inbounds [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %i
+ ret float* %p
+}
+; CHECK-LABEL: @sext_expr(
+; CHECK: sext i32
+; CHECK: sext i32
+; CHECK: sext i32
+; CHECK: getelementptr float* %{{[a-zA-Z0-9]+}}, i64 8
+
; Verifies we handle "sub" correctly.
define float* @sub(i64 %i, i64 %j) {
%i2 = sub i64 %i, 5 ; i - 5
@@ -112,9 +150,9 @@ define float* @sub(i64 %i, i64 %j) {
%p = getelementptr inbounds [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i2, i64 %j2
ret float* %p
}
-; CHECK-LABEL: @sub
-; CHECK: %[[j2:[0-9]+]] = sub i64 0, %j
-; CHECK: [[BASE_PTR:%[0-9]+]] = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i, i64 %[[j2]]
+; CHECK-LABEL: @sub(
+; CHECK: %[[j2:[a-zA-Z0-9]+]] = sub i64 0, %j
+; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i, i64 %[[j2]]
; CHECK: getelementptr float* [[BASE_PTR]], i64 -155
%struct.Packed = type <{ [3 x i32], [8 x i64] }> ; <> means packed
@@ -130,8 +168,67 @@ entry:
%arrayidx3 = getelementptr inbounds [1024 x %struct.Packed]* %s, i64 0, i64 %idxprom2, i32 1, i64 %idxprom
ret i64* %arrayidx3
}
-; CHECK-LABEL: @packed_struct
-; CHECK: [[BASE_PTR:%[0-9]+]] = getelementptr [1024 x %struct.Packed]* %s, i64 0, i32 %i, i32 1, i32 %j
-; CHECK: [[CASTED_PTR:%[0-9]+]] = bitcast i64* [[BASE_PTR]] to i8*
+; CHECK-LABEL: @packed_struct(
+; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [1024 x %struct.Packed]* %s, i64 0, i64 %{{[a-zA-Z0-9]+}}, i32 1, i64 %{{[a-zA-Z0-9]+}}
+; CHECK: [[CASTED_PTR:%[a-zA-Z0-9]+]] = bitcast i64* [[BASE_PTR]] to i8*
; CHECK: %uglygep = getelementptr i8* [[CASTED_PTR]], i64 100
; CHECK: bitcast i8* %uglygep to i64*
+
+; We shouldn't be able to extract the 8 from "zext(a +nuw (b + 8))",
+; because "zext(b + 8) != zext(b) + 8"
+define float* @zext_expr(i32 %a, i32 %b) {
+entry:
+ %0 = add i32 %b, 8
+ %1 = add nuw i32 %a, %0
+ %i = zext i32 %1 to i64
+ %p = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %i
+ ret float* %p
+}
+; CHECK-LABEL: zext_expr(
+; CHECK: getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %i
+
+; Per http://llvm.org/docs/LangRef.html#id181, the indices of a off-bound gep
+; should be considered sign-extended to the pointer size. Therefore,
+; gep base, (add i32 a, b) != gep (gep base, i32 a), i32 b
+; because
+; sext(a + b) != sext(a) + sext(b)
+;
+; This test verifies we do not illegitimately extract the 8 from
+; gep base, (i32 a + 8)
+define float* @i32_add(i32 %a) {
+entry:
+ %i = add i32 %a, 8
+ %p = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i32 %i
+ ret float* %p
+}
+; CHECK-LABEL: @i32_add(
+; CHECK: getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %{{[a-zA-Z0-9]+}}
+; CHECK-NOT: getelementptr
+
+; Verifies that we compute the correct constant offset when the index is
+; sign-extended and then zero-extended. The old version of our code failed to
+; handle this case because it simply computed the constant offset as the
+; sign-extended value of the constant part of the GEP index.
+define float* @apint(i1 %a) {
+entry:
+ %0 = add nsw nuw i1 %a, 1
+ %1 = sext i1 %0 to i4
+ %2 = zext i4 %1 to i64 ; zext (sext i1 1 to i4) to i64 = 15
+ %p = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %2
+ ret float* %p
+}
+; CHECK-LABEL: @apint(
+; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %{{[a-zA-Z0-9]+}}
+; CHECK: getelementptr float* [[BASE_PTR]], i64 15
+
+; Do not trace into binary operators other than ADD, SUB, and OR.
+define float* @and(i64 %a) {
+entry:
+ %0 = shl i64 %a, 2
+ %1 = and i64 %0, 1
+ %p = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %1
+ ret float* %p
+}
+; CHECK-LABEL: @and(
+; CHECK: getelementptr [32 x [32 x float]]* @float_2d_array
+; CHECK-NOT: getelementptr