summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp58
-rw-r--r--test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep-and-gvn.ll41
-rw-r--r--test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep.ll33
3 files changed, 128 insertions, 4 deletions
diff --git a/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp b/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp
index 7c65351ad1..62f2026b8d 100644
--- a/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp
+++ b/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp
@@ -268,8 +268,27 @@ class SeparateConstOffsetFromGEP : public FunctionPass {
/// This canonicalization is very likely already done in clang and
/// instcombine. Therefore, the program will probably remain the same.
///
+ /// Returns true if the module changes.
+ ///
/// Verified in @i32_add in split-gep.ll
bool canonicalizeArrayIndicesToPointerSize(GetElementPtrInst *GEP);
+ /// For each array index that is in the form of zext(a), convert it to sext(a)
+ /// if we can prove zext(a) <= max signed value of typeof(a). We prefer
+ /// sext(a) to zext(a), because in the special case where x + y >= 0 and
+ /// (x >= 0 or y >= 0), function CanTraceInto can split sext(x + y),
+ /// while no such case exists for zext(x + y).
+ ///
+ /// Note that
+ /// zext(x + y) = zext(x) + zext(y)
+ /// is wrong, e.g.,
+ /// zext i32(UINT_MAX + 1) to i64 !=
+ /// (zext i32 UINT_MAX to i64) + (zext i32 1 to i64)
+ ///
+ /// Returns true if the module changes.
+ ///
+ /// Verified in @inbounds_zext_add in split-gep.ll and @sum_of_array3 in
+ /// split-gep-and-gvn.ll
+ bool convertInBoundsZExtToSExt(GetElementPtrInst *GEP);
const DataLayout *DL;
};
@@ -403,7 +422,6 @@ APInt ConstantOffsetExtractor::find(Value *V, bool SignExtended,
// sext(zext(a)) = zext(a). Verified in @sext_zext in split-gep.ll.
//
// Clear the NonNegative flag, because zext(a) >= 0 does not imply a >= 0.
- // TODO: if zext(a) < 2 ^ (bitwidth(a) - 1), we can prove a >= 0.
ConstantOffset =
find(U->getOperand(0), /* SignExtended */ false,
/* ZeroExtended */ true, /* NonNegative */ false).zext(BitWidth);
@@ -595,6 +613,43 @@ bool SeparateConstOffsetFromGEP::canonicalizeArrayIndicesToPointerSize(
return Changed;
}
+bool
+SeparateConstOffsetFromGEP::convertInBoundsZExtToSExt(GetElementPtrInst *GEP) {
+ if (!GEP->isInBounds())
+ return false;
+
+ // TODO: consider alloca
+ GlobalVariable *UnderlyingObject =
+ dyn_cast<GlobalVariable>(GEP->getPointerOperand());
+ if (UnderlyingObject == nullptr)
+ return false;
+
+ uint64_t ObjectSize =
+ DL->getTypeAllocSize(UnderlyingObject->getType()->getElementType());
+ gep_type_iterator GTI = gep_type_begin(*GEP);
+ bool Changed = false;
+ for (User::op_iterator I = GEP->op_begin() + 1, E = GEP->op_end(); I != E;
+ ++I, ++GTI) {
+ if (isa<SequentialType>(*GTI)) {
+ if (ZExtInst *Extended = dyn_cast<ZExtInst>(*I)) {
+ unsigned SrcBitWidth =
+ cast<IntegerType>(Extended->getSrcTy())->getBitWidth();
+ // For GEP operand zext(a), if a <= max signed value of typeof(a), then
+ // the sign bit of a is zero and sext(a) = zext(a). Because the GEP is
+ // in bounds, we know a <= ObjectSize, so the condition can be reduced
+ // to ObjectSize <= max signed value of typeof(a).
+ if (ObjectSize <=
+ APInt::getSignedMaxValue(SrcBitWidth).getZExtValue()) {
+ *I = new SExtInst(Extended->getOperand(0), Extended->getType(),
+ Extended->getName(), GEP);
+ Changed = true;
+ }
+ }
+ }
+ }
+ return Changed;
+}
+
int64_t
SeparateConstOffsetFromGEP::accumulateByteOffset(GetElementPtrInst *GEP,
bool &NeedsExtraction) {
@@ -631,6 +686,7 @@ bool SeparateConstOffsetFromGEP::splitGEP(GetElementPtrInst *GEP) {
bool Changed = false;
Changed |= canonicalizeArrayIndicesToPointerSize(GEP);
+ Changed |= convertInBoundsZExtToSExt(GEP);
bool NeedsExtraction;
int64_t AccumulativeByteOffset = accumulateByteOffset(GEP, NeedsExtraction);
diff --git a/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep-and-gvn.ll b/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep-and-gvn.ll
index 6b72bcdc0e..c07440cd78 100644
--- a/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep-and-gvn.ll
+++ b/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep-and-gvn.ll
@@ -98,3 +98,44 @@ define void @sum_of_array2(i32 %x, i32 %y, float* nocapture %output) {
; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 1
; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 32
; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 33
+
+; Similar to @sum_of_array3, but extends array indices using zext instead of
+; sext. e.g., array[zext(x + 1)][zext(y + 1)].
+define void @sum_of_array3(i32 %x, i32 %y, float* nocapture %output) {
+.preheader:
+ %0 = zext i32 %y to i64
+ %1 = zext i32 %x to i64
+ %2 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %0
+ %3 = addrspacecast float addrspace(3)* %2 to float*
+ %4 = load float* %3, align 4
+ %5 = fadd float %4, 0.000000e+00
+ %6 = add i32 %y, 1
+ %7 = zext i32 %6 to i64
+ %8 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %7
+ %9 = addrspacecast float addrspace(3)* %8 to float*
+ %10 = load float* %9, align 4
+ %11 = fadd float %5, %10
+ %12 = add i32 %x, 1
+ %13 = zext i32 %12 to i64
+ %14 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %13, i64 %0
+ %15 = addrspacecast float addrspace(3)* %14 to float*
+ %16 = load float* %15, align 4
+ %17 = fadd float %11, %16
+ %18 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %13, i64 %7
+ %19 = addrspacecast float addrspace(3)* %18 to float*
+ %20 = load float* %19, align 4
+ %21 = fadd float %17, %20
+ store float %21, float* %output, align 4
+ ret void
+}
+; PTX-LABEL: sum_of_array3(
+; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG:%(rl|r)[0-9]+]]{{\]}}
+; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+4{{\]}}
+; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+128{{\]}}
+; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+132{{\]}}
+
+; IR-LABEL: @sum_of_array3(
+; IR: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
+; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 1
+; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 32
+; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 33
diff --git a/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep.ll b/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep.ll
index 5ea90414d1..ed40c7e955 100644
--- a/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep.ll
+++ b/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep.ll
@@ -26,21 +26,23 @@ entry:
; CHECK-LABEL: @struct(
; CHECK: getelementptr [1024 x %struct.S]* @struct_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i32 1
-; We should be able to trace into s/zext(a + b) if a + b is non-negative
+; We should be able to trace into sext(a + b) if a + b is non-negative
; (e.g., used as an index of an inbounds GEP) and one of a and b is
; non-negative.
define float* @sext_add(i32 %i, i32 %j) {
entry:
%0 = add i32 %i, 1
%1 = sext i32 %0 to i64 ; inbound sext(i + 1) = sext(i) + 1
- %2 = sub i32 %j, 2
- ; However, inbound sext(j - 2) != sext(j) - 2, e.g., j = INT_MIN
+ %2 = add i32 %j, -2
+ ; However, inbound sext(j + -2) != sext(j) + -2, e.g., j = INT_MIN
%3 = sext i32 %2 to i64
%p = getelementptr inbounds [32 x [32 x float]]* @float_2d_array, i64 0, i64 %1, i64 %3
ret float* %p
}
; CHECK-LABEL: @sext_add(
; CHECK-NOT: = add
+; CHECK: add i32 %j, -2
+; CHECK: sext
; CHECK: getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
; CHECK: getelementptr float* %{{[a-zA-Z0-9]+}}, i64 32
@@ -232,3 +234,28 @@ entry:
; CHECK-LABEL: @and(
; CHECK: getelementptr [32 x [32 x float]]* @float_2d_array
; CHECK-NOT: getelementptr
+
+; if zext(a + b) <= max signed value of typeof(a + b), then we can prove
+; a + b >= 0 and zext(a + b) == sext(a + b). If we can prove further a or b is
+; non-negative, we have zext(a + b) == sext(a) + sext(b).
+define float* @inbounds_zext_add(i32 %i, i4 %j) {
+entry:
+ %0 = add i32 %i, 1
+ %1 = zext i32 %0 to i64
+ ; Because zext(i + 1) is an index of an in bounds GEP based on
+ ; float_2d_array, zext(i + 1) <= sizeof(float_2d_array) = 4096.
+ ; Furthermore, since typeof(i + 1) is i32 and 4096 < 2^31, we are sure the
+ ; sign bit of i + 1 is 0. This implies zext(i + 1) = sext(i + 1).
+ %2 = add i4 %j, 2
+ %3 = zext i4 %2 to i64
+ ; In this case, typeof(j + 2) is i4, so zext(j + 2) <= 4096 does not imply
+ ; the sign bit of j + 2 is 0.
+ %p = getelementptr inbounds [32 x [32 x float]]* @float_2d_array, i64 0, i64 %1, i64 %3
+ ret float* %p
+}
+; CHECK-LABEL: @inbounds_zext_add(
+; CHECK-NOT: add
+; CHECK: add i4 %j, 2
+; CHECK: sext
+; CHECK: getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
+; CHECK: getelementptr float* %{{[a-zA-Z0-9]+}}, i64 32