summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDan Gohman <gohman@apple.com>2009-08-18 16:46:41 +0000
committerDan Gohman <gohman@apple.com>2009-08-18 16:46:41 +0000
commitc40f17b08774c2dcc5787fd83241e3c64ba82974 (patch)
treeae3c3455972b1fca1b2c67e7381205bba7dc2843
parent4d35fce60c5ac108d24428829e51a97eeca7836c (diff)
downloadllvm-c40f17b08774c2dcc5787fd83241e3c64ba82974.tar.gz
llvm-c40f17b08774c2dcc5787fd83241e3c64ba82974.tar.bz2
llvm-c40f17b08774c2dcc5787fd83241e3c64ba82974.tar.xz
Generalize ScalarEvolution to be able to analyze GEPs when
TargetData is not present. It still uses TargetData when available. This generalization also fixed some limitations in the TargetData case; the attached testcase covers this. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@79344 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--include/llvm/Analysis/ScalarEvolution.h2
-rw-r--r--include/llvm/Analysis/ScalarEvolutionExpander.h4
-rw-r--r--include/llvm/Analysis/ScalarEvolutionExpressions.h92
-rw-r--r--lib/Analysis/ScalarEvolution.cpp200
-rw-r--r--lib/Analysis/ScalarEvolutionExpander.cpp313
-rw-r--r--test/Transforms/IndVarSimplify/preserve-gep-nested.ll75
6 files changed, 565 insertions, 121 deletions
diff --git a/include/llvm/Analysis/ScalarEvolution.h b/include/llvm/Analysis/ScalarEvolution.h
index b98f535217..558cd011f5 100644
--- a/include/llvm/Analysis/ScalarEvolution.h
+++ b/include/llvm/Analysis/ScalarEvolution.h
@@ -433,6 +433,8 @@ namespace llvm {
const SCEV *getUMaxExpr(SmallVectorImpl<const SCEV *> &Operands);
const SCEV *getSMinExpr(const SCEV *LHS, const SCEV *RHS);
const SCEV *getUMinExpr(const SCEV *LHS, const SCEV *RHS);
+ const SCEV *getFieldOffsetExpr(const StructType *STy, unsigned FieldNo);
+ const SCEV *getAllocSizeExpr(const Type *AllocTy);
const SCEV *getUnknown(Value *V);
const SCEV *getCouldNotCompute();
diff --git a/include/llvm/Analysis/ScalarEvolutionExpander.h b/include/llvm/Analysis/ScalarEvolutionExpander.h
index 9266bf9713..cc0204b6fa 100644
--- a/include/llvm/Analysis/ScalarEvolutionExpander.h
+++ b/include/llvm/Analysis/ScalarEvolutionExpander.h
@@ -115,6 +115,10 @@ namespace llvm {
Value *visitUMaxExpr(const SCEVUMaxExpr *S);
+ Value *visitFieldOffsetExpr(const SCEVFieldOffsetExpr *S);
+
+ Value *visitAllocSizeExpr(const SCEVAllocSizeExpr *S);
+
Value *visitUnknown(const SCEVUnknown *S) {
return S->getValue();
}
diff --git a/include/llvm/Analysis/ScalarEvolutionExpressions.h b/include/llvm/Analysis/ScalarEvolutionExpressions.h
index 99df1dfefc..35372be126 100644
--- a/include/llvm/Analysis/ScalarEvolutionExpressions.h
+++ b/include/llvm/Analysis/ScalarEvolutionExpressions.h
@@ -26,8 +26,8 @@ namespace llvm {
// These should be ordered in terms of increasing complexity to make the
// folders simpler.
scConstant, scTruncate, scZeroExtend, scSignExtend, scAddExpr, scMulExpr,
- scUDivExpr, scAddRecExpr, scUMaxExpr, scSMaxExpr, scUnknown,
- scCouldNotCompute
+ scUDivExpr, scAddRecExpr, scUMaxExpr, scSMaxExpr,
+ scFieldOffset, scAllocSize, scUnknown, scCouldNotCompute
};
//===--------------------------------------------------------------------===//
@@ -488,6 +488,90 @@ namespace llvm {
}
};
+ //===--------------------------------------------------------------------===//
+ /// SCEVTargetDataConstant - This node is the base class for representing
+ /// target-dependent values in a target-independent way.
+ ///
+ class SCEVTargetDataConstant : public SCEV {
+ protected:
+ const Type *Ty;
+ SCEVTargetDataConstant(const FoldingSetNodeID &ID, enum SCEVTypes T,
+ const Type *ty) :
+ SCEV(ID, T), Ty(ty) {}
+
+ public:
+ virtual bool isLoopInvariant(const Loop *) const { return true; }
+ virtual bool hasComputableLoopEvolution(const Loop *) const {
+ return false; // not computable
+ }
+
+ virtual bool hasOperand(const SCEV *) const {
+ return false;
+ }
+
+ bool dominates(BasicBlock *, DominatorTree *) const {
+ return true;
+ }
+
+ virtual const Type *getType() const { return Ty; }
+
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const SCEVTargetDataConstant *S) { return true; }
+ static inline bool classof(const SCEV *S) {
+ return S->getSCEVType() == scFieldOffset ||
+ S->getSCEVType() == scAllocSize;
+ }
+ };
+
+ //===--------------------------------------------------------------------===//
+ /// SCEVFieldOffsetExpr - This node represents an offsetof expression.
+ ///
+ class SCEVFieldOffsetExpr : public SCEVTargetDataConstant {
+ friend class ScalarEvolution;
+
+ const StructType *STy;
+ unsigned FieldNo;
+ SCEVFieldOffsetExpr(const FoldingSetNodeID &ID, const Type *ty,
+ const StructType *sty, unsigned fieldno) :
+ SCEVTargetDataConstant(ID, scFieldOffset, ty),
+ STy(sty), FieldNo(fieldno) {}
+
+ public:
+ const StructType *getStructType() const { return STy; }
+ unsigned getFieldNo() const { return FieldNo; }
+
+ virtual void print(raw_ostream &OS) const;
+
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const SCEVFieldOffsetExpr *S) { return true; }
+ static inline bool classof(const SCEV *S) {
+ return S->getSCEVType() == scFieldOffset;
+ }
+ };
+
+ //===--------------------------------------------------------------------===//
+ /// SCEVAllocSize - This node represents a sizeof expression.
+ ///
+ class SCEVAllocSizeExpr : public SCEVTargetDataConstant {
+ friend class ScalarEvolution;
+
+ const Type *AllocTy;
+ SCEVAllocSizeExpr(const FoldingSetNodeID &ID,
+ const Type *ty, const Type *allocty) :
+ SCEVTargetDataConstant(ID, scAllocSize, ty),
+ AllocTy(allocty) {}
+
+ public:
+ const Type *getAllocType() const { return AllocTy; }
+
+ virtual void print(raw_ostream &OS) const;
+
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const SCEVAllocSizeExpr *S) { return true; }
+ static inline bool classof(const SCEV *S) {
+ return S->getSCEVType() == scAllocSize;
+ }
+ };
//===--------------------------------------------------------------------===//
/// SCEVUnknown - This means that we are dealing with an entirely unknown SCEV
@@ -552,6 +636,10 @@ namespace llvm {
return ((SC*)this)->visitSMaxExpr((const SCEVSMaxExpr*)S);
case scUMaxExpr:
return ((SC*)this)->visitUMaxExpr((const SCEVUMaxExpr*)S);
+ case scFieldOffset:
+ return ((SC*)this)->visitFieldOffsetExpr((const SCEVFieldOffsetExpr*)S);
+ case scAllocSize:
+ return ((SC*)this)->visitAllocSizeExpr((const SCEVAllocSizeExpr*)S);
case scUnknown:
return ((SC*)this)->visitUnknown((const SCEVUnknown*)S);
case scCouldNotCompute:
diff --git a/lib/Analysis/ScalarEvolution.cpp b/lib/Analysis/ScalarEvolution.cpp
index 8ce812cc06..11feee7fac 100644
--- a/lib/Analysis/ScalarEvolution.cpp
+++ b/lib/Analysis/ScalarEvolution.cpp
@@ -307,6 +307,15 @@ void SCEVAddRecExpr::print(raw_ostream &OS) const {
OS << "}<" << L->getHeader()->getName() + ">";
}
+void SCEVFieldOffsetExpr::print(raw_ostream &OS) const {
+ // LLVM struct fields don't have names, so just print the field number.
+ OS << "offsetof(" << *STy << ", " << FieldNo << ")";
+}
+
+void SCEVAllocSizeExpr::print(raw_ostream &OS) const {
+ OS << "sizeof(" << *AllocTy << ")";
+}
+
bool SCEVUnknown::isLoopInvariant(const Loop *L) const {
// All non-instruction values are loop invariant. All instructions are loop
// invariant if they are not contained in the specified loop.
@@ -335,6 +344,41 @@ void SCEVUnknown::print(raw_ostream &OS) const {
// SCEV Utilities
//===----------------------------------------------------------------------===//
+static bool CompareTypes(const Type *A, const Type *B) {
+ if (A->getTypeID() != B->getTypeID())
+ return A->getTypeID() < B->getTypeID();
+ if (const IntegerType *AI = dyn_cast<IntegerType>(A)) {
+ const IntegerType *BI = cast<IntegerType>(B);
+ return AI->getBitWidth() < BI->getBitWidth();
+ }
+ if (const PointerType *AI = dyn_cast<PointerType>(A)) {
+ const PointerType *BI = cast<PointerType>(B);
+ return CompareTypes(AI->getElementType(), BI->getElementType());
+ }
+ if (const ArrayType *AI = dyn_cast<ArrayType>(A)) {
+ const ArrayType *BI = cast<ArrayType>(B);
+ if (AI->getNumElements() != BI->getNumElements())
+ return AI->getNumElements() < BI->getNumElements();
+ return CompareTypes(AI->getElementType(), BI->getElementType());
+ }
+ if (const VectorType *AI = dyn_cast<VectorType>(A)) {
+ const VectorType *BI = cast<VectorType>(B);
+ if (AI->getNumElements() != BI->getNumElements())
+ return AI->getNumElements() < BI->getNumElements();
+ return CompareTypes(AI->getElementType(), BI->getElementType());
+ }
+ if (const StructType *AI = dyn_cast<StructType>(A)) {
+ const StructType *BI = cast<StructType>(B);
+ if (AI->getNumElements() != BI->getNumElements())
+ return AI->getNumElements() < BI->getNumElements();
+ for (unsigned i = 0, e = AI->getNumElements(); i != e; ++i)
+ if (CompareTypes(AI->getElementType(i), BI->getElementType(i)) ||
+ CompareTypes(BI->getElementType(i), AI->getElementType(i)))
+ return CompareTypes(AI->getElementType(i), BI->getElementType(i));
+ }
+ return false;
+}
+
namespace {
/// SCEVComplexityCompare - Return true if the complexity of the LHS is less
/// than the complexity of the RHS. This comparator is used to canonicalize
@@ -447,6 +491,21 @@ namespace {
return operator()(LC->getOperand(), RC->getOperand());
}
+ // Compare offsetof expressions.
+ if (const SCEVFieldOffsetExpr *LA = dyn_cast<SCEVFieldOffsetExpr>(LHS)) {
+ const SCEVFieldOffsetExpr *RA = cast<SCEVFieldOffsetExpr>(RHS);
+ if (CompareTypes(LA->getStructType(), RA->getStructType()) ||
+ CompareTypes(RA->getStructType(), LA->getStructType()))
+ return CompareTypes(LA->getStructType(), RA->getStructType());
+ return LA->getFieldNo() < RA->getFieldNo();
+ }
+
+ // Compare sizeof expressions by the allocation type.
+ if (const SCEVAllocSizeExpr *LA = dyn_cast<SCEVAllocSizeExpr>(LHS)) {
+ const SCEVAllocSizeExpr *RA = cast<SCEVAllocSizeExpr>(RHS);
+ return CompareTypes(LA->getAllocType(), RA->getAllocType());
+ }
+
llvm_unreachable("Unknown SCEV kind!");
return false;
}
@@ -976,7 +1035,7 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
/// unspecified bits out to the given type.
///
const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op,
- const Type *Ty) {
+ const Type *Ty) {
assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
"This is not an extending conversion!");
assert(isSCEVable(Ty) &&
@@ -2001,6 +2060,76 @@ const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS,
return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
}
+const SCEV *ScalarEvolution::getFieldOffsetExpr(const StructType *STy,
+ unsigned FieldNo) {
+ // If we have TargetData we can determine the constant offset.
+ if (TD) {
+ const Type *IntPtrTy = TD->getIntPtrType(getContext());
+ const StructLayout &SL = *TD->getStructLayout(STy);
+ uint64_t Offset = SL.getElementOffset(FieldNo);
+ return getIntegerSCEV(Offset, IntPtrTy);
+ }
+
+ // Field 0 is always at offset 0.
+ if (FieldNo == 0) {
+ const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(STy));
+ return getIntegerSCEV(0, Ty);
+ }
+
+ // Okay, it looks like we really DO need an offsetof expr. Check to see if we
+ // already have one, otherwise create a new one.
+ FoldingSetNodeID ID;
+ ID.AddInteger(scFieldOffset);
+ ID.AddPointer(STy);
+ ID.AddInteger(FieldNo);
+ void *IP = 0;
+ if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
+ SCEV *S = SCEVAllocator.Allocate<SCEVFieldOffsetExpr>();
+ const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(STy));
+ new (S) SCEVFieldOffsetExpr(ID, Ty, STy, FieldNo);
+ UniqueSCEVs.InsertNode(S, IP);
+ return S;
+}
+
+const SCEV *ScalarEvolution::getAllocSizeExpr(const Type *AllocTy) {
+ // If we have TargetData we can determine the constant size.
+ if (TD && AllocTy->isSized()) {
+ const Type *IntPtrTy = TD->getIntPtrType(getContext());
+ return getIntegerSCEV(TD->getTypeAllocSize(AllocTy), IntPtrTy);
+ }
+
+ // Expand an array size into the element size times the number
+ // of elements.
+ if (const ArrayType *ATy = dyn_cast<ArrayType>(AllocTy)) {
+ const SCEV *E = getAllocSizeExpr(ATy->getElementType());
+ return getMulExpr(
+ E, getConstant(ConstantInt::get(cast<IntegerType>(E->getType()),
+ ATy->getNumElements())));
+ }
+
+ // Expand a vector size into the element size times the number
+ // of elements.
+ if (const VectorType *VTy = dyn_cast<VectorType>(AllocTy)) {
+ const SCEV *E = getAllocSizeExpr(VTy->getElementType());
+ return getMulExpr(
+ E, getConstant(ConstantInt::get(cast<IntegerType>(E->getType()),
+ VTy->getNumElements())));
+ }
+
+ // Okay, it looks like we really DO need a sizeof expr. Check to see if we
+ // already have one, otherwise create a new one.
+ FoldingSetNodeID ID;
+ ID.AddInteger(scAllocSize);
+ ID.AddPointer(AllocTy);
+ void *IP = 0;
+ if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
+ SCEV *S = SCEVAllocator.Allocate<SCEVAllocSizeExpr>();
+ const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
+ new (S) SCEVAllocSizeExpr(ID, Ty, AllocTy);
+ UniqueSCEVs.InsertNode(S, IP);
+ return S;
+}
+
const SCEV *ScalarEvolution::getUnknown(Value *V) {
// Don't attempt to do anything other than create a SCEVUnknown object
// here. createSCEV only calls getUnknown after checking for all other
@@ -2027,17 +2156,8 @@ const SCEV *ScalarEvolution::getUnknown(Value *V) {
/// can optionally include pointer types if the ScalarEvolution class
/// has access to target-specific information.
bool ScalarEvolution::isSCEVable(const Type *Ty) const {
- // Integers are always SCEVable.
- if (Ty->isInteger())
- return true;
-
- // Pointers are SCEVable if TargetData information is available
- // to provide pointer size information.
- if (isa<PointerType>(Ty))
- return TD != NULL;
-
- // Otherwise it's not SCEVable.
- return false;
+ // Integers and pointers are always SCEVable.
+ return Ty->isInteger() || isa<PointerType>(Ty);
}
/// getTypeSizeInBits - Return the size in bits of the specified type,
@@ -2049,9 +2169,14 @@ uint64_t ScalarEvolution::getTypeSizeInBits(const Type *Ty) const {
if (TD)
return TD->getTypeSizeInBits(Ty);
- // Otherwise, we support only integer types.
- assert(Ty->isInteger() && "isSCEVable permitted a non-SCEVable type!");
- return Ty->getPrimitiveSizeInBits();
+ // Integer types have fixed sizes.
+ if (Ty->isInteger())
+ return Ty->getPrimitiveSizeInBits();
+
+ // The only other support type is pointer. Without TargetData, conservatively
+ // assume pointers are 64-bit.
+ assert(isa<PointerType>(Ty) && "isSCEVable permitted a non-SCEVable type!");
+ return 64;
}
/// getEffectiveSCEVType - Return a type with the same bitwidth as
@@ -2064,8 +2189,12 @@ const Type *ScalarEvolution::getEffectiveSCEVType(const Type *Ty) const {
if (Ty->isInteger())
return Ty;
+ // The only other support type is pointer.
assert(isa<PointerType>(Ty) && "Unexpected non-pointer non-integer type!");
- return TD->getIntPtrType(getContext());
+ if (TD) return TD->getIntPtrType(getContext());
+
+ // Without TargetData, conservatively assume pointers are 64-bit.
+ return Type::getInt64Ty(getContext());
}
const SCEV *ScalarEvolution::getCouldNotCompute() {
@@ -2132,8 +2261,8 @@ const SCEV *
ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V,
const Type *Ty) {
const Type *SrcTy = V->getType();
- assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
- (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
+ assert((SrcTy->isInteger() || isa<PointerType>(SrcTy)) &&
+ (Ty->isInteger() || isa<PointerType>(Ty)) &&
"Cannot truncate or zero extend with non-integer arguments!");
if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
return V; // No conversion
@@ -2149,8 +2278,8 @@ const SCEV *
ScalarEvolution::getTruncateOrSignExtend(const SCEV *V,
const Type *Ty) {
const Type *SrcTy = V->getType();
- assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
- (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
+ assert((SrcTy->isInteger() || isa<PointerType>(SrcTy)) &&
+ (Ty->isInteger() || isa<PointerType>(Ty)) &&
"Cannot truncate or zero extend with non-integer arguments!");
if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
return V; // No conversion
@@ -2165,8 +2294,8 @@ ScalarEvolution::getTruncateOrSignExtend(const SCEV *V,
const SCEV *
ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, const Type *Ty) {
const Type *SrcTy = V->getType();
- assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
- (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
+ assert((SrcTy->isInteger() || isa<PointerType>(SrcTy)) &&
+ (Ty->isInteger() || isa<PointerType>(Ty)) &&
"Cannot noop or zero extend with non-integer arguments!");
assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
"getNoopOrZeroExtend cannot truncate!");
@@ -2181,8 +2310,8 @@ ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, const Type *Ty) {
const SCEV *
ScalarEvolution::getNoopOrSignExtend(const SCEV *V, const Type *Ty) {
const Type *SrcTy = V->getType();
- assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
- (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
+ assert((SrcTy->isInteger() || isa<PointerType>(SrcTy)) &&
+ (Ty->isInteger() || isa<PointerType>(Ty)) &&
"Cannot noop or sign extend with non-integer arguments!");
assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
"getNoopOrSignExtend cannot truncate!");
@@ -2198,8 +2327,8 @@ ScalarEvolution::getNoopOrSignExtend(const SCEV *V, const Type *Ty) {
const SCEV *
ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, const Type *Ty) {
const Type *SrcTy = V->getType();
- assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
- (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
+ assert((SrcTy->isInteger() || isa<PointerType>(SrcTy)) &&
+ (Ty->isInteger() || isa<PointerType>(Ty)) &&
"Cannot noop or any extend with non-integer arguments!");
assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
"getNoopOrAnyExtend cannot truncate!");
@@ -2213,8 +2342,8 @@ ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, const Type *Ty) {
const SCEV *
ScalarEvolution::getTruncateOrNoop(const SCEV *V, const Type *Ty) {
const Type *SrcTy = V->getType();
- assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
- (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
+ assert((SrcTy->isInteger() || isa<PointerType>(SrcTy)) &&
+ (Ty->isInteger() || isa<PointerType>(Ty)) &&
"Cannot truncate or noop with non-integer arguments!");
assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) &&
"getTruncateOrNoop cannot extend!");
@@ -2433,7 +2562,7 @@ const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
///
const SCEV *ScalarEvolution::createNodeForGEP(Operator *GEP) {
- const Type *IntPtrTy = TD->getIntPtrType(getContext());
+ const Type *IntPtrTy = getEffectiveSCEVType(GEP->getType());
Value *Base = GEP->getOperand(0);
// Don't attempt to analyze GEPs over unsized objects.
if (!cast<PointerType>(Base->getType())->getElementType()->isSized())
@@ -2447,19 +2576,16 @@ const SCEV *ScalarEvolution::createNodeForGEP(Operator *GEP) {
// Compute the (potentially symbolic) offset in bytes for this index.
if (const StructType *STy = dyn_cast<StructType>(*GTI++)) {
// For a struct, add the member offset.
- const StructLayout &SL = *TD->getStructLayout(STy);
unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
- uint64_t Offset = SL.getElementOffset(FieldNo);
- TotalOffset = getAddExpr(TotalOffset, getIntegerSCEV(Offset, IntPtrTy));
+ TotalOffset = getAddExpr(TotalOffset,
+ getFieldOffsetExpr(STy, FieldNo));
} else {
// For an array, add the element offset, explicitly scaled.
const SCEV *LocalOffset = getSCEV(Index);
if (!isa<PointerType>(LocalOffset->getType()))
// Getelementptr indicies are signed.
LocalOffset = getTruncateOrSignExtend(LocalOffset, IntPtrTy);
- LocalOffset =
- getMulExpr(LocalOffset,
- getIntegerSCEV(TD->getTypeAllocSize(*GTI), IntPtrTy));
+ LocalOffset = getMulExpr(LocalOffset, getAllocSizeExpr(*GTI));
TotalOffset = getAddExpr(TotalOffset, LocalOffset);
}
}
@@ -2952,7 +3078,6 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
// expressions we handle are GEPs and address literals.
case Instruction::GetElementPtr:
- if (!TD) break; // Without TD we can't analyze pointers.
return createNodeForGEP(U);
case Instruction::PHI:
@@ -3947,6 +4072,9 @@ const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) {
return getTruncateExpr(Op, Cast->getType());
}
+ if (isa<SCEVTargetDataConstant>(V))
+ return V;
+
llvm_unreachable("Unknown SCEV type!");
return 0;
}
diff --git a/lib/Analysis/ScalarEvolutionExpander.cpp b/lib/Analysis/ScalarEvolutionExpander.cpp
index 3ec6fe42d4..999fd55c86 100644
--- a/lib/Analysis/ScalarEvolutionExpander.cpp
+++ b/lib/Analysis/ScalarEvolutionExpander.cpp
@@ -158,53 +158,93 @@ Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode,
/// check to see if the divide was folded.
static bool FactorOutConstant(const SCEV *&S,
const SCEV *&Remainder,
- const APInt &Factor,
- ScalarEvolution &SE) {
+ const SCEV *Factor,
+ ScalarEvolution &SE,
+ const TargetData *TD) {
// Everything is divisible by one.
- if (Factor == 1)
+ if (Factor->isOne())
+ return true;
+
+ // x/x == 1.
+ if (S == Factor) {
+ S = SE.getIntegerSCEV(1, S->getType());
return true;
+ }
// For a Constant, check for a multiple of the given factor.
if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) {
- ConstantInt *CI =
- ConstantInt::get(SE.getContext(), C->getValue()->getValue().sdiv(Factor));
- // If the quotient is zero and the remainder is non-zero, reject
- // the value at this scale. It will be considered for subsequent
- // smaller scales.
- if (C->isZero() || !CI->isZero()) {
- const SCEV *Div = SE.getConstant(CI);
- S = Div;
- Remainder =
- SE.getAddExpr(Remainder,
- SE.getConstant(C->getValue()->getValue().srem(Factor)));
+ // 0/x == 0.
+ if (C->isZero())
return true;
+ // Check for divisibility.
+ if (const SCEVConstant *FC = dyn_cast<SCEVConstant>(Factor)) {
+ ConstantInt *CI =
+ ConstantInt::get(SE.getContext(),
+ C->getValue()->getValue().sdiv(
+ FC->getValue()->getValue()));
+ // If the quotient is zero and the remainder is non-zero, reject
+ // the value at this scale. It will be considered for subsequent
+ // smaller scales.
+ if (!CI->isZero()) {
+ const SCEV *Div = SE.getConstant(CI);
+ S = Div;
+ Remainder =
+ SE.getAddExpr(Remainder,
+ SE.getConstant(C->getValue()->getValue().srem(
+ FC->getValue()->getValue())));
+ return true;
+ }
}
}
// In a Mul, check if there is a constant operand which is a multiple
// of the given factor.
- if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S))
- if (const SCEVConstant *C = dyn_cast<SCEVConstant>(M->getOperand(0)))
- if (!C->getValue()->getValue().srem(Factor)) {
- const SmallVectorImpl<const SCEV *> &MOperands = M->getOperands();
- SmallVector<const SCEV *, 4> NewMulOps(MOperands.begin(),
- MOperands.end());
- NewMulOps[0] =
- SE.getConstant(C->getValue()->getValue().sdiv(Factor));
- S = SE.getMulExpr(NewMulOps);
- return true;
+ if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
+ if (TD) {
+ // With TargetData, the size is known. Check if there is a constant
+ // operand which is a multiple of the given factor. If so, we can
+ // factor it.
+ const SCEVConstant *FC = cast<SCEVConstant>(Factor);
+ if (const SCEVConstant *C = dyn_cast<SCEVConstant>(M->getOperand(0)))
+ if (!C->getValue()->getValue().srem(FC->getValue()->getValue())) {
+ const SmallVectorImpl<const SCEV *> &MOperands = M->getOperands();
+ SmallVector<const SCEV *, 4> NewMulOps(MOperands.begin(),
+ MOperands.end());
+ NewMulOps[0] =
+ SE.getConstant(C->getValue()->getValue().sdiv(
+ FC->getValue()->getValue()));
+ S = SE.getMulExpr(NewMulOps);
+ return true;
+ }
+ } else {
+ // Without TargetData, check if Factor can be factored out of any of the
+ // Mul's operands. If so, we can just remove it.
+ for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
+ const SCEV *SOp = M->getOperand(i);
+ const SCEV *Remainder = SE.getIntegerSCEV(0, SOp->getType());
+ if (FactorOutConstant(SOp, Remainder, Factor, SE, TD) &&
+ Remainder->isZero()) {
+ const SmallVectorImpl<const SCEV *> &MOperands = M->getOperands();
+ SmallVector<const SCEV *, 4> NewMulOps(MOperands.begin(),
+ MOperands.end());
+ NewMulOps[i] = SOp;
+ S = SE.getMulExpr(NewMulOps);
+ return true;
+ }
}
+ }
+ }
// In an AddRec, check if both start and step are divisible.
if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
const SCEV *Step = A->getStepRecurrence(SE);
const SCEV *StepRem = SE.getIntegerSCEV(0, Step->getType());
- if (!FactorOutConstant(Step, StepRem, Factor, SE))
+ if (!FactorOutConstant(Step, StepRem, Factor, SE, TD))
return false;
if (!StepRem->isZero())
return false;
const SCEV *Start = A->getStart();
- if (!FactorOutConstant(Start, Remainder, Factor, SE))
+ if (!FactorOutConstant(Start, Remainder, Factor, SE, TD))
return false;
S = SE.getAddRecExpr(Start, Step, A->getLoop());
return true;
@@ -213,9 +253,73 @@ static bool FactorOutConstant(const SCEV *&S,
return false;
}
+/// SimplifyAddOperands - Sort and simplify a list of add operands. NumAddRecs
+/// is the number of SCEVAddRecExprs present, which are kept at the end of
+/// the list.
+///
+static void SimplifyAddOperands(SmallVectorImpl<const SCEV *> &Ops,
+ const Type *Ty,
+ ScalarEvolution &SE) {
+ unsigned NumAddRecs = 0;
+ for (unsigned i = Ops.size(); i > 0 && isa<SCEVAddRecExpr>(Ops[i-1]); --i)
+ ++NumAddRecs;
+ // Group Ops into non-addrecs and addrecs.
+ SmallVector<const SCEV *, 8> NoAddRecs(Ops.begin(), Ops.end() - NumAddRecs);
+ SmallVector<const SCEV *, 8> AddRecs(Ops.end() - NumAddRecs, Ops.end());
+ // Let ScalarEvolution sort and simplify the non-addrecs list.
+ const SCEV *Sum = NoAddRecs.empty() ?
+ SE.getIntegerSCEV(0, Ty) :
+ SE.getAddExpr(NoAddRecs);
+ // If it returned an add, use the operands. Otherwise it simplified
+ // the sum into a single value, so just use that.
+ if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Sum))
+ Ops = Add->getOperands();
+ else {
+ Ops.clear();
+ if (!Sum->isZero())
+ Ops.push_back(Sum);
+ }
+ // Then append the addrecs.
+ Ops.insert(Ops.end(), AddRecs.begin(), AddRecs.end());
+}
+
+/// SplitAddRecs - Flatten a list of add operands, moving addrec start values
+/// out to the top level. For example, convert {a + b,+,c} to a, b, {0,+,d}.
+/// This helps expose more opportunities for folding parts of the expressions
+/// into GEP indices.
+///
+static void SplitAddRecs(SmallVectorImpl<const SCEV *> &Ops,
+ const Type *Ty,
+ ScalarEvolution &SE) {
+ // Find the addrecs.
+ SmallVector<const SCEV *, 8> AddRecs;
+ for (unsigned i = 0, e = Ops.size(); i != e; ++i)
+ while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Ops[i])) {
+ const SCEV *Start = A->getStart();
+ if (Start->isZero()) break;
+ const SCEV *Zero = SE.getIntegerSCEV(0, Ty);
+ AddRecs.push_back(SE.getAddRecExpr(Zero,
+ A->getStepRecurrence(SE),
+ A->getLoop()));
+ if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Start)) {
+ Ops[i] = Zero;
+ Ops.insert(Ops.end(), Add->op_begin(), Add->op_end());
+ e += Add->getNumOperands();
+ } else {
+ Ops[i] = Start;
+ }
+ }
+ if (!AddRecs.empty()) {
+ // Add the addrecs onto the end of the list.
+ Ops.insert(Ops.end(), AddRecs.begin(), AddRecs.end());
+ // Resort the operand list, moving any constants to the front.
+ SimplifyAddOperands(Ops, Ty, SE);
+ }
+}
+
/// expandAddToGEP - Expand a SCEVAddExpr with a pointer type into a GEP
/// instead of using ptrtoint+arithmetic+inttoptr. This helps
-/// BasicAliasAnalysis analyze the result.
+/// BasicAliasAnalysis and other passes analyze the result.
///
/// Design note: This depends on ScalarEvolution not recognizing inttoptr
/// and ptrtoint operators, as they may introduce pointer arithmetic
@@ -246,52 +350,62 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
SmallVector<const SCEV *, 8> Ops(op_begin, op_end);
bool AnyNonZeroIndices = false;
+ // Split AddRecs up into parts as either of the parts may be usable
+ // without the other.
+ SplitAddRecs(Ops, Ty, SE);
+
// Decend down the pointer's type and attempt to convert the other
// operands into GEP indices, at each level. The first index in a GEP
// indexes into the array implied by the pointer operand; the rest of
// the indices index into the element or field type selected by the
// preceding index.
for (;;) {
- APInt ElSize = APInt(SE.getTypeSizeInBits(Ty),
- ElTy->isSized() ? SE.TD->getTypeAllocSize(ElTy) : 0);
- SmallVector<const SCEV *, 8> NewOps;
+ const SCEV *ElSize = SE.getAllocSizeExpr(ElTy);
+ // If the scale size is not 0, attempt to factor out a scale for
+ // array indexing.
SmallVector<const SCEV *, 8> ScaledOps;
- for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
- // Split AddRecs up into parts as either of the parts may be usable
- // without the other.
- if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Ops[i]))
- if (!A->getStart()->isZero()) {
- const SCEV *Start = A->getStart();
- Ops.push_back(SE.getAddRecExpr(SE.getIntegerSCEV(0, A->getType()),
- A->getStepRecurrence(SE),
- A->getLoop()));
- Ops[i] = Start;
- ++e;
- }
- // If the scale size is not 0, attempt to factor out a scale.
- if (ElSize != 0) {
+ if (ElTy->isSized() && !ElSize->isZero()) {
+ SmallVector<const SCEV *, 8> NewOps;
+ for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
const SCEV *Op = Ops[i];
- const SCEV *Remainder = SE.getIntegerSCEV(0, Op->getType());
- if (FactorOutConstant(Op, Remainder, ElSize, SE)) {
- ScaledOps.push_back(Op); // Op now has ElSize factored out.
- NewOps.push_back(Remainder);
- continue;
+ const SCEV *Remainder = SE.getIntegerSCEV(0, Ty);
+ if (FactorOutConstant(Op, Remainder, ElSize, SE, SE.TD)) {
+ // Op now has ElSize factored out.
+ ScaledOps.push_back(Op);
+ if (!Remainder->isZero())
+ NewOps.push_back(Remainder);
+ AnyNonZeroIndices = true;
+ } else {
+ // The operand was not divisible, so add it to the list of operands
+ // we'll scan next iteration.
+ NewOps.push_back(Ops[i]);
}
}
- // If the operand was not divisible, add it to the list of operands
- // we'll scan next iteration.
- NewOps.push_back(Ops[i]);
+ // If we made any changes, update Ops.
+ if (!ScaledOps.empty()) {
+ Ops = NewOps;
+ SimplifyAddOperands(Ops, Ty, SE);
+ }
}
- Ops = NewOps;
- AnyNonZeroIndices |= !ScaledOps.empty();
+
+ // Record the scaled array index for this level of the type. If
+ // we didn't find any operands that could be factored, tentatively
+ // assume that element zero was selected (since the zero offset
+ // would obviously be folded away).
Value *Scaled = ScaledOps.empty() ?
Constant::getNullValue(Ty) :
expandCodeFor(SE.getAddExpr(ScaledOps), Ty);
GepIndices.push_back(Scaled);
// Collect struct field index operands.
- if (!Ops.empty())
- while (const StructType *STy = dyn_cast<StructType>(ElTy)) {
+ while (const StructType *STy = dyn_cast<StructType>(ElTy)) {
+ bool FoundFieldNo = false;
+ // An empty struct has no fields.
+ if (STy->getNumElements() == 0) break;
+ if (SE.TD) {
+ // With TargetData, field offsets are known. See if a constant offset
+ // falls within any of the struct fields.
+ if (Ops.empty()) break;
if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[0]))
if (SE.getTypeSizeInBits(C->getType()) <= 64) {
const StructLayout &SL = *SE.TD->getStructLayout(STy);
@@ -304,25 +418,52 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
Ops[0] =
SE.getConstant(Ty, FullOffset - SL.getElementOffset(ElIdx));
AnyNonZeroIndices = true;
- continue;
+ FoundFieldNo = true;
}
}
- break;
+ } else {
+ // Without TargetData, just check for a SCEVFieldOffsetExpr of the
+ // appropriate struct type.
+ for (unsigned i = 0, e = Ops.size(); i != e; ++i)
+ if (const SCEVFieldOffsetExpr *FO =
+ dyn_cast<SCEVFieldOffsetExpr>(Ops[i]))
+ if (FO->getStructType() == STy) {
+ unsigned FieldNo = FO->getFieldNo();
+ GepIndices.push_back(
+ ConstantInt::get(Type::getInt32Ty(Ty->getContext()),
+ FieldNo));
+ ElTy = STy->getTypeAtIndex(FieldNo);
+ Ops[i] = SE.getConstant(Ty, 0);
+ AnyNonZeroIndices = true;
+ FoundFieldNo = true;
+ break;
+ }
+ }
+ // If no struct field offsets were found, tentatively assume that
+ // field zero was selected (since the zero offset would obviously
+ // be folded away).
+ if (!FoundFieldNo) {
+ ElTy = STy->getTypeAtIndex(0u);
+ GepIndices.push_back(
+ Constant::getNullValue(Type::getInt32Ty(Ty->getContext())));
}
+ }
- if (const ArrayType *ATy = dyn_cast<ArrayType>(ElTy)) {
+ if (const ArrayType *ATy = dyn_cast<ArrayType>(ElTy))
ElTy = ATy->getElementType();
- continue;
- }
- break;
+ else
+ break;
}
// If none of the operands were convertable to proper GEP indices, cast
// the base to i8* and do an ugly getelementptr with that. It's still
// better than ptrtoint+arithmetic+inttoptr at least.
if (!AnyNonZeroIndices) {
+ // Cast the base to i8*.
V = InsertNoopCastOfTo(V,
Type::getInt8Ty(Ty->getContext())->getPointerTo(PTy->getAddressSpace()));
+
+ // Expand the operands for a plain byte offset.
Value *Idx = expandCodeFor(SE.getAddExpr(Ops), Ty);
// Fold a GEP with constant operands.
@@ -345,7 +486,8 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
}
}
- Value *GEP = Builder.CreateGEP(V, Idx, "scevgep");
+ // Emit a GEP.
+ Value *GEP = Builder.CreateGEP(V, Idx, "uglygep");
InsertedValues.insert(GEP);
return GEP;
}
@@ -368,11 +510,10 @@ Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
// Turn things like ptrtoint+arithmetic+inttoptr into GEP. See the
// comments on expandAddToGEP for details.
- if (SE.TD)
- if (const PointerType *PTy = dyn_cast<PointerType>(V->getType())) {
- const SmallVectorImpl<const SCEV *> &Ops = S->getOperands();
- return expandAddToGEP(&Ops[0], &Ops[Ops.size() - 1], PTy, Ty, V);
- }
+ if (const PointerType *PTy = dyn_cast<PointerType>(V->getType())) {
+ const SmallVectorImpl<const SCEV *> &Ops = S->getOperands();
+ return expandAddToGEP(&Ops[0], &Ops[Ops.size() - 1], PTy, Ty, V);
+ }
V = InsertNoopCastOfTo(V, Ty);
@@ -484,21 +625,19 @@ Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
// Turn things like ptrtoint+arithmetic+inttoptr into GEP. See the
// comments on expandAddToGEP for details.
- if (SE.TD) {
- const SCEV *Base = S->getStart();
- const SCEV *RestArray[1] = { Rest };
- // Dig into the expression to find the pointer base for a GEP.
- ExposePointerBase(Base, RestArray[0], SE);
- // If we found a pointer, expand the AddRec with a GEP.
- if (const PointerType *PTy = dyn_cast<PointerType>(Base->getType())) {
- // Make sure the Base isn't something exotic, such as a multiplied
- // or divided pointer value. In those cases, the result type isn't
- // actually a pointer type.
- if (!isa<SCEVMulExpr>(Base) && !isa<SCEVUDivExpr>(Base)) {
- Value *StartV = expand(Base);
- assert(StartV->getType() == PTy && "Pointer type mismatch for GEP!");
- return expandAddToGEP(RestArray, RestArray+1, PTy, Ty, StartV);
- }
+ const SCEV *Base = S->getStart();
+ const SCEV *RestArray[1] = { Rest };
+ // Dig into the expression to find the pointer base for a GEP.
+ ExposePointerBase(Base, RestArray[0], SE);
+ // If we found a pointer, expand the AddRec with a GEP.
+ if (const PointerType *PTy = dyn_cast<PointerType>(Base->getType())) {
+ // Make sure the Base isn't something exotic, such as a multiplied
+ // or divided pointer value. In those cases, the result type isn't
+ // actually a pointer type.
+ if (!isa<SCEVMulExpr>(Base) && !isa<SCEVUDivExpr>(Base)) {
+ Value *StartV = expand(Base);
+ assert(StartV->getType() == PTy && "Pointer type mismatch for GEP!");
+ return expandAddToGEP(RestArray, RestArray+1, PTy, Ty, StartV);
}
}
@@ -656,6 +795,14 @@ Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) {
return LHS;
}
+Value *SCEVExpander::visitFieldOffsetExpr(const SCEVFieldOffsetExpr *S) {
+ return ConstantExpr::getOffsetOf(S->getStructType(), S->getFieldNo());
+}
+
+Value *SCEVExpander::visitAllocSizeExpr(const SCEVAllocSizeExpr *S) {
+ return ConstantExpr::getSizeOf(S->getAllocType());
+}
+
Value *SCEVExpander::expandCodeFor(const SCEV *SH, const Type *Ty) {
// Expand the code for this SCEV.
Value *V = expand(SH);
diff --git a/test/Transforms/IndVarSimplify/preserve-gep-nested.ll b/test/Transforms/IndVarSimplify/preserve-gep-nested.ll
new file mode 100644
index 0000000000..6959064d6a
--- /dev/null
+++ b/test/Transforms/IndVarSimplify/preserve-gep-nested.ll
@@ -0,0 +1,75 @@
+; RUN: llvm-as < %s | opt -indvars | llvm-dis > %t
+; Exactly one getelementptr for each load+store.
+; RUN: grep getelementptr %t | count 6
+; Each getelementptr using %struct.Q* %s as a base and not i8*.
+; RUN: grep {getelementptr \[%\]struct\\.Q\\* \[%\]s,} %t | count 6
+; No explicit integer multiplications!
+; RUN: not grep {= mul} %t
+; No i8* arithmetic or pointer casting anywhere!
+; RUN: not grep {i8\\*} %t
+; RUN: not grep bitcast %t
+; RUN: not grep inttoptr %t
+; RUN: not grep ptrtoint %t
+
+; FIXME: This test should pass with or without TargetData. Until opt
+; supports running tests without targetdata, just hardware this in.
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
+
+%struct.Q = type { [10 x %struct.N] }
+%struct.N = type { %struct.S }
+%struct.S = type { [100 x double], [100 x double] }
+
+define void @foo(%struct.Q* %s, i64 %n) nounwind {
+entry:
+ br label %bb1
+
+bb1:
+ %i = phi i64 [ 2, %entry ], [ %i.next, %bb ]
+ %j = phi i64 [ 0, %entry ], [ %j.next, %bb ]
+ %t5 = icmp slt i64 %i, %n
+ br i1 %t5, label %bb, label %return
+
+bb:
+ %t0 = getelementptr inbounds %struct.Q* %s, i64 0, i32 0, i64 0, i32 0, i32 0, i64 %i
+ %t1 = load double* %t0, align 8
+ %t2 = fmul double %t1, 3.200000e+00
+ %t3 = getelementptr inbounds %struct.Q* %s, i64 0, i32 0, i64 0, i32 0, i32 0, i64 %i
+ store double %t2, double* %t3, align 8
+
+ %s0 = getelementptr inbounds %struct.Q* %s, i64 13, i32 0, i64 7, i32 0, i32 1, i64 %i
+ %s1 = load double* %s0, align 8
+ %s2 = fmul double %s1, 3.200000e+00
+ %s3 = getelementptr inbounds %struct.Q* %s, i64 13, i32 0, i64 7, i32 0, i32 1, i64 %i
+ store double %s2, double* %s3, align 8
+
+ %u0 = getelementptr inbounds %struct.Q* %s, i64 0, i32 0, i64 7, i32 0, i32 1, i64 %j
+ %u1 = load double* %u0, align 8
+ %u2 = fmul double %u1, 3.200000e+00
+ %u3 = getelementptr inbounds %struct.Q* %s, i64 0, i32 0, i64 7, i32 0, i32 1, i64 %j
+ store double %u2, double* %u3, align 8
+
+ %v0 = getelementptr inbounds %struct.Q* %s, i64 0, i32 0, i64 0, i32 0, i32 1, i64 %i
+ %v1 = load double* %v0, align 8
+ %v2 = fmul double %v1, 3.200000e+00
+ %v3 = getelementptr inbounds %struct.Q* %s, i64 0, i32 0, i64 0, i32 0, i32 1, i64 %i
+ store double %v2, double* %v3, align 8
+
+ %w0 = getelementptr inbounds %struct.Q* %s, i64 0, i32 0, i64 0, i32 0, i32 0, i64 %j
+ %w1 = load double* %w0, align 8
+ %w2 = fmul double %w1, 3.200000e+00
+ %w3 = getelementptr inbounds %struct.Q* %s, i64 0, i32 0, i64 0, i32 0, i32 0, i64 %j
+ store double %w2, double* %w3, align 8
+
+ %x0 = getelementptr inbounds %struct.Q* %s, i64 0, i32 0, i64 3, i32 0, i32 0, i64 %i
+ %x1 = load double* %x0, align 8
+ %x2 = fmul double %x1, 3.200000e+00
+ %x3 = getelementptr inbounds %struct.Q* %s, i64 0, i32 0, i64 3, i32 0, i32 0, i64 %i
+ store double %x2, double* %x3, align 8
+
+ %i.next = add i64 %i, 1
+ %j.next = add i64 %j, 1
+ br label %bb1
+
+return:
+ ret void
+}