summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorQuentin Colombet <qcolombet@apple.com>2013-09-17 22:01:26 +0000
committerQuentin Colombet <qcolombet@apple.com>2013-09-17 22:01:26 +0000
commit5383a377476529e55e3c244e83ec8ad66159cc22 (patch)
tree05ececd11dd9e04ef7046d1e8f87f02558adbded
parent3168868bb91ac871dbb83c879e763d39a39e607e (diff)
downloadllvm-5383a377476529e55e3c244e83ec8ad66159cc22.tar.gz
llvm-5383a377476529e55e3c244e83ec8ad66159cc22.tar.bz2
llvm-5383a377476529e55e3c244e83ec8ad66159cc22.tar.xz
Revert the load slicing done in r190870.
To avoid regressions with bitfield optimizations, this slicing should take place later, like ISel time. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@190891 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp285
-rw-r--r--test/Transforms/InstCombine/load-slice.ll330
2 files changed, 0 insertions, 615 deletions
diff --git a/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
index 0579c27db8..88e16e9725 100644
--- a/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
+++ b/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
@@ -16,20 +16,10 @@
#include "llvm/Analysis/Loads.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/IntrinsicInst.h"
-#include "llvm/Support/CommandLine.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
using namespace llvm;
-/// Hidden option to stress test load slicing, i.e., when this option
-/// is enabled, load slicing bypasses most of its profitability guards.
-/// It will also generate, uncanonalized form of slicing.
-static cl::opt<bool>
-StressLoadSlicing("instcombine-stress-load-slicing", cl::Hidden,
- cl::desc("Bypass the profitability model of load "
- "slicing"),
- cl::init(false));
-
STATISTIC(NumDeadStore, "Number of dead stores eliminated");
STATISTIC(NumGlobalCopies, "Number of allocas copied from constant global");
@@ -347,274 +337,6 @@ static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI,
return 0;
}
-namespace {
- /// \brief Helper structure used to slice a load in smaller loads.
- struct LoadedSlice {
- // The last instruction that represent the slice. This should be a
- // truncate instruction.
- Instruction *Inst;
- // The original load instruction.
- LoadInst *Origin;
- // The right shift amount in bits from the original load.
- unsigned Shift;
-
- LoadedSlice(Instruction *Inst = NULL, LoadInst *Origin = NULL,
- unsigned Shift = 0)
- : Inst(Inst), Origin(Origin), Shift(Shift) {}
-
- LoadedSlice(const LoadedSlice& LS) : Inst(LS.Inst), Origin(LS.Origin),
- Shift(LS.Shift) {}
-
- /// \brief Get the bits used in a chunk of bits \p BitWidth large.
- /// \return Result is \p BitWidth and has used bits set to 1 and
- /// not used bits set to 0.
- APInt getUsedBits() const {
- // Reproduce the trunc(lshr) sequence:
- // - Start from the truncated value.
- // - Zero extend to the desired bit width.
- // - Shift left.
- assert(Origin && "No original load to compare against.");
- unsigned BitWidth = Origin->getType()->getPrimitiveSizeInBits();
- assert(Inst && "This slice is not bound to an instruction");
- assert(Inst->getType()->getPrimitiveSizeInBits() <= BitWidth &&
- "Extracted slice is smaller than the whole type!");
- APInt UsedBits(Inst->getType()->getPrimitiveSizeInBits(), 0);
- UsedBits.setAllBits();
- UsedBits = UsedBits.zext(BitWidth);
- UsedBits <<= Shift;
- return UsedBits;
- }
-
- /// \brief Get the size of the slice to be loaded in bytes.
- unsigned getLoadedSize() const {
- unsigned SliceSize = getUsedBits().countPopulation();
- assert(!(SliceSize & 0x7) && "Size is not a multiple of a byte.");
- return SliceSize / 8;
- }
-
- /// \brief Get the offset in bytes of this slice in the original chunk of
- /// bits, whose layout is defined by \p IsBigEndian.
- uint64_t getOffsetFromBase(bool IsBigEndian) const {
- assert(!(Shift & 0x7) && "Shifts not aligned on Bytes are not support.");
- uint64_t Offset = Shift / 8;
- unsigned TySizeInBytes = Origin->getType()->getPrimitiveSizeInBits() / 8;
- assert(!(Origin->getType()->getPrimitiveSizeInBits() & 0x7) &&
- "The size of the original loaded type is not a multiple of a"
- " byte.");
- // If Offset is bigger than TySizeInBytes, it means we are loading all
- // zeros. This should have been optimized before in the process.
- assert(TySizeInBytes > Offset &&
- "Invalid shift amount for given loaded size");
- if (IsBigEndian)
- Offset = TySizeInBytes - Offset - getLoadedSize();
- return Offset;
- }
-
- /// \brief Generate the sequence of instructions to load the slice
- /// represented by this object and redirect the uses of this slice to
- /// this new sequence of instructions.
- /// \pre this->Inst && this->Origin are valid Instructions.
- /// \return The last instruction of the sequence used to load the slice.
- Instruction *loadSlice(InstCombiner::BuilderTy &Builder,
- bool IsBigEndian) const {
- assert(Inst && Origin && "Unable to replace a non-existing slice.");
- Value *BaseAddr = Origin->getOperand(0);
- unsigned Alignment = Origin->getAlignment();
- Builder.SetInsertPoint(Origin);
- // Assume we are looking at a chunk of bytes.
- // BaseAddr = (i8*)BaseAddr.
- BaseAddr = Builder.CreateBitCast(BaseAddr, Builder.getInt8PtrTy(),
- "raw_cast");
- // Get the offset in that chunk of bytes w.r.t. the endianess.
- uint64_t Offset = getOffsetFromBase(IsBigEndian);
- if (Offset) {
- APInt APOffset(64, Offset);
- // BaseAddr = BaseAddr + Offset.
- BaseAddr = Builder.CreateInBoundsGEP(BaseAddr, Builder.getInt(APOffset),
- "raw_idx");
- }
-
- // Create the type of the loaded slice according to its size.
- Type *SliceType =
- Type::getIntNTy(Origin->getContext(), getLoadedSize() * 8);
-
- // Bit cast the raw pointer to the pointer type of the slice.
- BaseAddr = Builder.CreateBitCast(BaseAddr, SliceType->getPointerTo(),
- "cast");
-
- // Compute the new alignment.
- if (Offset != 0)
- Alignment = MinAlign(Alignment, Alignment + Offset);
-
- // Create the load for the slice.
- Instruction *LastInst = Builder.CreateAlignedLoad(BaseAddr, Alignment,
- Inst->getName()+".val");
- // If the final type is not the same as the loaded type, this means that
- // we have to pad with zero. Create a zero extend for that.
- Type * FinalType = Inst->getType();
- if (SliceType != FinalType)
- LastInst = cast<Instruction>(Builder.CreateZExt(LastInst, FinalType));
-
- // Update the IR to reflect the new access to the slice.
- Inst->replaceAllUsesWith(LastInst);
-
- return LastInst;
- }
-
- /// \brief Check if it would be profitable to expand this slice as an
- /// independant load.
- bool isProfitable() const {
- // Slicing is assumed to be profitable iff the chains leads to arithmetic
- // operations.
- SmallVector<const Instruction *, 8> Uses;
- Uses.push_back(Inst);
- do {
- const Instruction *Use = Uses.pop_back_val();
- for (Value::const_use_iterator UseIt = Use->use_begin(),
- UseItEnd = Use->use_end(); UseIt != UseItEnd; ++UseIt) {
- const Instruction *UseOfUse = cast<Instruction>(*UseIt);
- // Consider these instructions as arithmetic operations.
- if (isa<BinaryOperator>(UseOfUse) ||
- isa<CastInst>(UseOfUse) ||
- isa<PHINode>(UseOfUse) ||
- isa<GetElementPtrInst>(UseOfUse))
- return true;
- // No need to check if the Use has already been checked as we do not
- // insert any PHINode.
- Uses.push_back(UseOfUse);
- }
- } while (!Uses.empty());
- DEBUG(dbgs() << "IC: Not a profitable slice " << *Inst << '\n');
- return false;
- }
- };
-}
-
-/// \brief Check the profitability of all involved LoadedSlice.
-/// Unless StressLoadSlicing is specified, this also returns false
-/// when slicing is not in the canonical form.
-/// The canonical form of sliced load is (1) two loads,
-/// which are (2) next to each other in memory.
-///
-/// FIXME: We may want to allow more slices to be created but
-/// this means other passes should know how to deal with all those
-/// slices.
-/// FIXME: We may want to split loads to different types, e.g.,
-/// int vs. float.
-static bool
-isSlicingProfitable(const SmallVectorImpl<LoadedSlice> &LoadedSlices,
- const APInt &UsedBits) {
- unsigned NbOfSlices = LoadedSlices.size();
- // Check (1).
- if (!StressLoadSlicing && NbOfSlices != 2)
- return false;
-
- // Check (2).
- if (!StressLoadSlicing && !UsedBits.isAllOnesValue()) {
- // Get rid of the unused bits on the right.
- APInt MemoryLayout = UsedBits.lshr(UsedBits.countTrailingZeros());
- // Get rid of the unused bits on the left.
- if (MemoryLayout.countLeadingZeros())
- MemoryLayout = MemoryLayout.trunc(MemoryLayout.getActiveBits());
- // Check that the chunk of memory is completely used.
- if (!MemoryLayout.isAllOnesValue())
- return false;
- }
-
- unsigned NbOfProfitableSlices = 0;
- for (unsigned CurrSlice = 0; CurrSlice < NbOfSlices; ++CurrSlice) {
- if (LoadedSlices[CurrSlice].isProfitable())
- ++NbOfProfitableSlices;
- else if (!StressLoadSlicing)
- return false;
- }
- // In Stress mode, we may have 0 profitable slice.
- // Check that here.
- // In non-Stress mode, all the slices are profitable at this point.
- return NbOfProfitableSlices > 0;
-}
-
-/// \brief If the given load, \p LI, is used only by trunc or trunc(lshr)
-/// operations, split it in the various pieces being extracted.
-///
-/// This sort of thing is introduced by SROA.
-/// This slicing takes care not to insert overlapping loads.
-/// \pre LI is a simple load (i.e., not an atomic or volatile load).
-static Instruction *sliceUpLoadInst(LoadInst &LI,
- InstCombiner::BuilderTy &Builder,
- DataLayout &TD) {
- assert(LI.isSimple() && "We are trying to transform a non-simple load!");
-
- // FIXME: If we want to support floating point and vector types, we should
- // support bitcast and extract/insert element instructions.
- Type *LITy = LI.getType();
- if (!LITy->isIntegerTy()) return 0;
-
- // Keep track of already used bits to detect overlapping values.
- // In that case, we will just abort the transformation.
- APInt UsedBits(LITy->getPrimitiveSizeInBits(), 0);
-
- SmallVector<LoadedSlice, 4> LoadedSlices;
-
- // Check if this load is used as several smaller chunks of bits.
- // Basically, look for uses in trunc or trunc(lshr) and record a new chain
- // of computation for each trunc.
- for (Value::use_iterator UI = LI.use_begin(), UIEnd = LI.use_end();
- UI != UIEnd; ++UI) {
- Instruction *User = cast<Instruction>(*UI);
- unsigned Shift = 0;
-
- // Check if this is a trunc(lshr).
- if (User->getOpcode() == Instruction::LShr && User->hasOneUse() &&
- isa<ConstantInt>(User->getOperand(1))) {
- Shift = cast<ConstantInt>(User->getOperand(1))->getZExtValue();
- User = User->use_back();
- }
-
- // At this point, User is a TruncInst, iff we encountered, trunc or
- // trunc(lshr).
- if (!isa<TruncInst>(User))
- return 0;
-
- // The width of the type must be a power of 2 and greater than 8-bits.
- // Otherwise the load cannot be represented in LLVM IR.
- // Moreover, if we shifted with a non 8-bits multiple, the slice
- // will be accross several bytes. We do not support that.
- unsigned Width = User->getType()->getPrimitiveSizeInBits();
- if (Width < 8 || !isPowerOf2_32(Width) || (Shift & 0x7))
- return 0;
-
- // Build the slice for this chain of computations.
- LoadedSlice LS(User, &LI, Shift);
- APInt CurrentUsedBits = LS.getUsedBits();
-
- // Check if this slice overlaps with another.
- if ((CurrentUsedBits & UsedBits) != 0)
- return 0;
- // Update the bits used globally.
- UsedBits |= CurrentUsedBits;
-
- // Record the slice.
- LoadedSlices.push_back(LS);
- }
-
- // Abort slicing if it does not seem to be profitable.
- if (!isSlicingProfitable(LoadedSlices, UsedBits))
- return 0;
-
- // Rewrite each chain to use an independent load.
- // By construction, each chain can be represented by a unique load.
- bool IsBigEndian = TD.isBigEndian();
- for (SmallVectorImpl<LoadedSlice>::const_iterator LSIt = LoadedSlices.begin(),
- LSItEnd = LoadedSlices.end(); LSIt != LSItEnd; ++LSIt) {
- Instruction *SliceInst = LSIt->loadSlice(Builder, IsBigEndian);
- (void)SliceInst;
- DEBUG(dbgs() << "IC: Replacing " << *LSIt->Inst << "\n"
- " with " << *SliceInst << '\n');
- }
- return 0; // Don't do anything with LI.
-}
-
Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
Value *Op = LI.getOperand(0);
@@ -721,13 +443,6 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
}
}
}
-
- // Try to split a load in smaller non-overlapping loads to expose independant
- // chain of computations and get rid of trunc/lshr sequence of code.
- // The data layout is required for that operation, as code generation will
- // change with respect to endianess.
- if (TD)
- return sliceUpLoadInst(LI, *Builder, *TD);
return 0;
}
diff --git a/test/Transforms/InstCombine/load-slice.ll b/test/Transforms/InstCombine/load-slice.ll
deleted file mode 100644
index 8926653c88..0000000000
--- a/test/Transforms/InstCombine/load-slice.ll
+++ /dev/null
@@ -1,330 +0,0 @@
-; RUN: opt -default-data-layout="E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" -instcombine -instcombine-stress-load-slicing -S < %s -o - | FileCheck %s --check-prefix=BIG
-; RUN: opt -default-data-layout="e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" -instcombine -instcombine-stress-load-slicing -S < %s -o - | FileCheck %s --check-prefix=LITTLE
-;
-; <rdar://problem/14477220>
-
-%class.Complex = type { float, float }
-
-
-; Check that independant slices leads to independant loads.
-;
-; The 64-bits should have been split in two 32-bits slices.
-; The big endian layout is:
-; MSB 7 6 5 4 | 3 2 1 0 LSB
-; High Low
-; The base address points to 7 and is 8-bytes aligned.
-; Low slice starts at 3 (base + 4-bytes) and is 4-bytes aligned.
-; High slice starts at 7 (base) and is 8-bytes aligned.
-;
-; The little endian layout is:
-; LSB 0 1 2 3 | 4 5 6 7 MSB
-; Low High
-; The base address points to 0 and is 8-bytes aligned.
-; Low slice starts at 0 (base) and is 8-bytes aligned.
-; High slice starts at 4 (base + 4-bytes) and is 4-bytes aligned.
-;
-define void @t1(%class.Complex* nocapture %out, i64 %out_start) {
-; BIG-LABEL: @t1
-; Original load should have been sliced.
-; BIG-NOT: load i64*
-; BIG-NOT: trunc i64
-; BIG-NOT: lshr i64
-;
-; First 32-bits slice.
-; BIG: [[HIGH_SLICE_BASEADDR:%[a-zA-Z.0-9_]+]] = getelementptr inbounds %class.Complex* %out, i64 %out_start
-; BIG: [[HIGH_SLICE_ADDR:%[a-zA-Z.0-9_]+]] = bitcast %class.Complex* [[HIGH_SLICE_BASEADDR]] to i32*
-; BIG: [[HIGH_SLICE:%[a-zA-Z.0-9_]+]] = load i32* [[HIGH_SLICE_ADDR]], align 8
-;
-; Second 32-bits slice.
-; BIG: [[LOW_SLICE_BASEADDR:%[a-zA-Z.0-9_]+]] = getelementptr inbounds %class.Complex* %out, i64 %out_start, i32 1
-; BIG: [[LOW_SLICE_ADDR:%[a-zA-Z.0-9_]+]] = bitcast float* [[LOW_SLICE_BASEADDR]] to i32*
-; BIG: [[LOW_SLICE:%[a-zA-Z.0-9_]+]] = load i32* [[LOW_SLICE_ADDR]], align 4
-;
-; Cast to the final type.
-; BIG: [[LOW_SLICE_FLOAT:%[a-zA-Z.0-9_]+]] = bitcast i32 [[LOW_SLICE]] to float
-; BIG: [[HIGH_SLICE_FLOAT:%[a-zA-Z.0-9_]+]] = bitcast i32 [[HIGH_SLICE]] to float
-;
-; Uses of the slices.
-; BIG: fadd float {{%[a-zA-Z.0-9_]+}}, [[LOW_SLICE_FLOAT]]
-; BIG: fadd float {{%[a-zA-Z.0-9_]+}}, [[HIGH_SLICE_FLOAT]]
-;
-; LITTLE-LABEL: @t1
-; Original load should have been sliced.
-; LITTLE-NOT: load i64*
-; LITTLE-NOT: trunc i64
-; LITTLE-NOT: lshr i64
-;
-; LITTLE: [[BASEADDR:%[a-zA-Z.0-9_]+]] = getelementptr inbounds %class.Complex* %out, i64 %out_start
-;
-; First 32-bits slice.
-; LITTLE: [[HIGH_SLICE_BASEADDR:%[a-zA-Z.0-9_]+]] = getelementptr inbounds %class.Complex* %out, i64 %out_start, i32 1
-; LITTLE: [[HIGH_SLICE_ADDR:%[a-zA-Z.0-9_]+]] = bitcast float* [[HIGH_SLICE_BASEADDR]] to i32*
-; LITTLE: [[HIGH_SLICE:%[a-zA-Z.0-9_]+]] = load i32* [[HIGH_SLICE_ADDR]], align 4
-;
-; Second 32-bits slice.
-; LITTLE: [[LOW_SLICE_ADDR:%[a-zA-Z.0-9_]+]] = bitcast %class.Complex* [[BASEADDR]] to i32*
-; LITTLE: [[LOW_SLICE:%[a-zA-Z.0-9_]+]] = load i32* [[LOW_SLICE_ADDR]], align 8
-;
-; Cast to the final type.
-; LITTLE: [[LOW_SLICE_FLOAT:%[a-zA-Z.0-9_]+]] = bitcast i32 [[LOW_SLICE]] to float
-; LITTLE: [[HIGH_SLICE_FLOAT:%[a-zA-Z.0-9_]+]] = bitcast i32 [[HIGH_SLICE]] to float
-;
-; Uses of the slices.
-; LITTLE: fadd float {{%[a-zA-Z.0-9_]+}}, [[LOW_SLICE_FLOAT]]
-; LITTLE: fadd float {{%[a-zA-Z.0-9_]+}}, [[HIGH_SLICE_FLOAT]]
-entry:
- %arrayidx = getelementptr inbounds %class.Complex* %out, i64 %out_start
- %tmp = bitcast %class.Complex* %arrayidx to i64*
- %tmp1 = load i64* %tmp, align 8
- %t0.sroa.0.0.extract.trunc = trunc i64 %tmp1 to i32
- %tmp2 = bitcast i32 %t0.sroa.0.0.extract.trunc to float
- %t0.sroa.2.0.extract.shift = lshr i64 %tmp1, 32
- %t0.sroa.2.0.extract.trunc = trunc i64 %t0.sroa.2.0.extract.shift to i32
- %tmp3 = bitcast i32 %t0.sroa.2.0.extract.trunc to float
- %add = add i64 %out_start, 8
- %arrayidx2 = getelementptr inbounds %class.Complex* %out, i64 %add
- %i.i = getelementptr inbounds %class.Complex* %arrayidx2, i64 0, i32 0
- %tmp4 = load float* %i.i, align 4
- %add.i = fadd float %tmp4, %tmp2
- %retval.sroa.0.0.vec.insert.i = insertelement <2 x float> undef, float %add.i, i32 0
- %r.i = getelementptr inbounds %class.Complex* %arrayidx2, i64 0, i32 1
- %tmp5 = load float* %r.i, align 4
- %add5.i = fadd float %tmp5, %tmp3
- %retval.sroa.0.4.vec.insert.i = insertelement <2 x float> %retval.sroa.0.0.vec.insert.i, float %add5.i, i32 1
- %ref.tmp.sroa.0.0.cast = bitcast %class.Complex* %arrayidx to <2 x float>*
- store <2 x float> %retval.sroa.0.4.vec.insert.i, <2 x float>* %ref.tmp.sroa.0.0.cast, align 4
- ret void
-}
-
-; Function Attrs: nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) #1
-
-; Function Attrs: nounwind
-declare void @llvm.lifetime.start(i64, i8* nocapture)
-
-; Function Attrs: nounwind
-declare void @llvm.lifetime.end(i64, i8* nocapture)
-
-; Check that slices not involved in arithmetic are not split in independant loads.
-; BIG-LABEL: @t2
-; BIG: load i16*
-; BIG: trunc i16 {{%[a-zA-Z.0-9_]+}} to i8
-; BIG: lshr i16 {{%[a-zA-Z.0-9_]+}}, 8
-; BIG: trunc i16 {{%[a-zA-Z.0-9_]+}} to i8
-;
-; LITTLE-LABEL: @t2
-; LITTLE: load i16*
-; LITTLE: trunc i16 {{%[a-zA-Z.0-9_]+}} to i8
-; LITTLE: lshr i16 {{%[a-zA-Z.0-9_]+}}, 8
-; LITTLE: trunc i16 {{%[a-zA-Z.0-9_]+}} to i8
-define void @t2(%class.Complex* nocapture %out, i64 %out_start) {
- %arrayidx = getelementptr inbounds %class.Complex* %out, i64 %out_start
- %bitcast = bitcast %class.Complex* %arrayidx to i16*
- %chunk16 = load i16* %bitcast, align 8
- %slice8_low = trunc i16 %chunk16 to i8
- %shift = lshr i16 %chunk16, 8
- %slice8_high = trunc i16 %shift to i8
- %vec = insertelement <2 x i8> undef, i8 %slice8_high, i32 0
- %vec1 = insertelement <2 x i8> %vec, i8 %slice8_low, i32 1
- %addr = bitcast %class.Complex* %arrayidx to <2 x i8>*
- store <2 x i8> %vec1, <2 x i8>* %addr, align 8
- ret void
-}
-
-; Check that we do not read outside of the chunk of bits of the original loads.
-;
-; The 64-bits should have been split in one 32-bits and one 16-bits slices.
-; The 16-bits should be zero extended to match the final type.
-; The big endian layout is:
-; MSB 7 6 | 5 4 | 3 2 1 0 LSB
-; High Low
-; The base address points to 7 and is 8-bytes aligned.
-; Low slice starts at 3 (base + 4-bytes) and is 4-bytes aligned.
-; High slice starts at 7 (base) and is 8-bytes aligned.
-;
-; The little endian layout is:
-; LSB 0 1 2 3 | 4 5 | 6 7 MSB
-; Low High
-; The base address points to 0 and is 8-bytes aligned.
-; Low slice starts at 0 (base) and is 8-bytes aligned.
-; High slice starts at 6 (base + 6-bytes) and is 2-bytes aligned.
-;
-; BIG-LABEL: @t3
-; Original load should have been sliced.
-; BIG-NOT: load i64*
-; BIG-NOT: trunc i64
-; BIG-NOT: lshr i64
-;
-; First 32-bits slice where only 16-bits comes from the memory.
-; BIG: [[HIGH_SLICE_BASEADDR:%[a-zA-Z.0-9_]+]] = getelementptr inbounds %class.Complex* %out, i64 %out_start
-; BIG: [[HIGH_SLICE_ADDR:%[a-zA-Z.0-9_]+]] = bitcast %class.Complex* [[HIGH_SLICE_BASEADDR]] to i16*
-; BIG: [[HIGH_SLICE:%[a-zA-Z.0-9_]+]] = load i16* [[HIGH_SLICE_ADDR]], align 8
-; BIG: [[HIGH_SLICE_ZEXT:%[a-zA-Z.0-9_]+]] = zext i16 [[HIGH_SLICE]] to i32
-;
-; Second 32-bits slice.
-; BIG: [[LOW_SLICE_BASEADDR:%[a-zA-Z.0-9_]+]] = getelementptr inbounds %class.Complex* %out, i64 %out_start, i32 1
-; BIG: [[LOW_SLICE_ADDR:%[a-zA-Z.0-9_]+]] = bitcast float* [[LOW_SLICE_BASEADDR]] to i32*
-; BIG: [[LOW_SLICE:%[a-zA-Z.0-9_]+]] = load i32* [[LOW_SLICE_ADDR]], align 4
-;
-; Use of the slices.
-; BIG: add i32 [[HIGH_SLICE_ZEXT]], [[LOW_SLICE]]
-;
-; LITTLE-LABEL: @t3
-; Original load should have been sliced.
-; LITTLE-NOT: load i64*
-; LITTLE-NOT: trunc i64
-; LITTLE-NOT: lshr i64
-;
-; LITTLE: [[BASEADDR:%[a-zA-Z.0-9_]+]] = getelementptr inbounds %class.Complex* %out, i64 %out_start
-;
-; First 32-bits slice where only 16-bits comes from the memory.
-; LITTLE: [[HIGH_SLICE_ADDR:%[a-zA-Z.0-9_]+]] = bitcast %class.Complex* [[BASEADDR]] to i8*
-; LITTLE: [[HIGH_SLICE_ADDR_I8:%[a-zA-Z.0-9_]+]] = getelementptr inbounds i8* [[HIGH_SLICE_ADDR]], i64 6
-; LITTLE: [[HIGH_SLICE_ADDR_I16:%[a-zA-Z.0-9_]+]] = bitcast i8* [[HIGH_SLICE_ADDR_I8]] to i16*
-; LITTLE: [[HIGH_SLICE:%[a-zA-Z.0-9_]+]] = load i16* [[HIGH_SLICE_ADDR_I16]], align 2
-; LITTLE: [[HIGH_SLICE_ZEXT:%[a-zA-Z.0-9_]+]] = zext i16 [[HIGH_SLICE]] to i32
-;
-; Second 32-bits slice.
-; LITTLE: [[LOW_SLICE_ADDR:%[a-zA-Z.0-9_]+]] = bitcast %class.Complex* [[BASEADDR]] to i32*
-; LITTLE: [[LOW_SLICE:%[a-zA-Z.0-9_]+]] = load i32* [[LOW_SLICE_ADDR]], align 8
-;
-; Use of the slices.
-; LITTLE: add i32 [[HIGH_SLICE_ZEXT]], [[LOW_SLICE]]
-define i32 @t3(%class.Complex* nocapture %out, i64 %out_start) {
- %arrayidx = getelementptr inbounds %class.Complex* %out, i64 %out_start
- %bitcast = bitcast %class.Complex* %arrayidx to i64*
- %chunk64 = load i64* %bitcast, align 8
- %slice32_low = trunc i64 %chunk64 to i32
- %shift48 = lshr i64 %chunk64, 48
- %slice32_high = trunc i64 %shift48 to i32
- %res = add i32 %slice32_high, %slice32_low
- ret i32 %res
-}
-
-; Check that we do not optimize overlapping slices.
-;
-; The 64-bits should NOT have been split in as slices are overlapping.
-; First slice uses bytes numbered 0 to 3.
-; Second slice uses bytes numbered 6 and 7.
-; Third slice uses bytes numbered 4 to 7.
-; BIG-LABEL: @t4
-; BIG: load i64* {{%[a-zA-Z.0-9_]+}}, align 8
-; BIG: trunc i64 {{%[a-zA-Z.0-9_]+}} to i32
-; BIG: lshr i64 {{%[a-zA-Z.0-9_]+}}, 48
-; BIG: trunc i64 {{%[a-zA-Z.0-9_]+}} to i32
-; BIG: lshr i64 {{%[a-zA-Z.0-9_]+}}, 32
-; BIG: trunc i64 {{%[a-zA-Z.0-9_]+}} to i32
-;
-; LITTLE-LABEL: @t4
-; LITTLE: load i64* {{%[a-zA-Z.0-9_]+}}, align 8
-; LITTLE: trunc i64 {{%[a-zA-Z.0-9_]+}} to i32
-; LITTLE: lshr i64 {{%[a-zA-Z.0-9_]+}}, 48
-; LITTLE: trunc i64 {{%[a-zA-Z.0-9_]+}} to i32
-; LITTLE: lshr i64 {{%[a-zA-Z.0-9_]+}}, 32
-; LITTLE: trunc i64 {{%[a-zA-Z.0-9_]+}} to i32
-define i32 @t4(%class.Complex* nocapture %out, i64 %out_start) {
- %arrayidx = getelementptr inbounds %class.Complex* %out, i64 %out_start
- %bitcast = bitcast %class.Complex* %arrayidx to i64*
- %chunk64 = load i64* %bitcast, align 8
- %slice32_low = trunc i64 %chunk64 to i32
- %shift48 = lshr i64 %chunk64, 48
- %slice32_high = trunc i64 %shift48 to i32
- %shift32 = lshr i64 %chunk64, 32
- %slice32_lowhigh = trunc i64 %shift32 to i32
- %tmpres = add i32 %slice32_high, %slice32_low
- %res = add i32 %slice32_lowhigh, %tmpres
- ret i32 %res
-}
-
-; Check that we optimize when 3 slices are involved.
-; The 64-bits should have been split in one 32-bits and one 16-bits slices.
-; The 16-bits should be zero extended to match the final type.
-; The big endian layout is:
-; MSB 7 6 | 5 4 | 3 2 1 0 LSB
-; High LowHigh Low
-; The base address points to 7 and is 8-bytes aligned.
-; Low slice starts at 3 (base + 4-bytes) and is 4-bytes aligned.
-; High slice starts at 7 (base) and is 8-bytes aligned.
-; LowHigh slice starts at 5 (base + 2-bytes) and is 2-bytes aligned.
-;
-; The little endian layout is:
-; LSB 0 1 2 3 | 4 5 | 6 7 MSB
-; Low LowHigh High
-; The base address points to 0 and is 8-bytes aligned.
-; Low slice starts at 0 (base) and is 8-bytes aligned.
-; High slice starts at 6 (base + 6-bytes) and is 2-bytes aligned.
-; LowHigh slice starts at 4 (base + 4-bytes) and is 4-bytes aligned.
-;
-; Original load should have been sliced.
-; BIG-LABEL: @t5
-; BIG-NOT: load i64*
-; BIG-NOT: trunc i64
-; BIG-NOT: lshr i64
-;
-; LowHigh 32-bits slice where only 16-bits comes from the memory.
-; BIG: [[LOWHIGH_SLICE_BASEADDR:%[a-zA-Z.0-9_]+]] = getelementptr inbounds %class.Complex* %out, i64 %out_start
-; BIG: [[LOWHIGH_SLICE_BASEADDR_I8:%[a-zA-Z.0-9_]+]] = bitcast %class.Complex* [[LOWHIGH_SLICE_BASEADDR]] to i8*
-; BIG: [[LOWHIGH_SLICE_ADDR:%[a-zA-Z.0-9_]+]] = getelementptr inbounds i8* [[LOWHIGH_SLICE_BASEADDR_I8]], i64 2
-; BIG: [[LOWHIGH_SLICE_ADDR_I16:%[a-zA-Z.0-9_]+]] = bitcast i8* [[LOWHIGH_SLICE_ADDR]] to i16*
-; BIG: [[LOWHIGH_SLICE:%[a-zA-Z.0-9_]+]] = load i16* [[LOWHIGH_SLICE_ADDR_I16]], align 2
-;
-; First 32-bits slice where only 16-bits comes from the memory.
-; BIG: [[HIGH_SLICE_ADDR:%[a-zA-Z.0-9_]+]] = bitcast %class.Complex* [[LOWHIGH_SLICE_BASEADDR]] to i16*
-; BIG: [[HIGH_SLICE:%[a-zA-Z.0-9_]+]] = load i16* [[HIGH_SLICE_ADDR]], align 8
-; BIG: [[HIGH_SLICE_ZEXT:%[a-zA-Z.0-9_]+]] = zext i16 [[HIGH_SLICE]] to i32
-;
-; Second 32-bits slice.
-; BIG: [[LOW_SLICE_BASEADDR:%[a-zA-Z.0-9_]+]] = getelementptr inbounds %class.Complex* %out, i64 %out_start, i32 1
-; BIG: [[LOW_SLICE_ADDR:%[a-zA-Z.0-9_]+]] = bitcast float* [[LOW_SLICE_BASEADDR]] to i32*
-; BIG: [[LOW_SLICE:%[a-zA-Z.0-9_]+]] = load i32* [[LOW_SLICE_ADDR]], align 4
-;
-; Original sext is still here.
-; BIG: [[LOWHIGH_SLICE_SEXT:%[a-zA-Z.0-9_]+]] = sext i16 [[LOWHIGH_SLICE]] to i32
-;
-; Uses of the slices.
-; BIG: [[RES:%[a-zA-Z.0-9_]+]] = add i32 [[HIGH_SLICE_ZEXT]], [[LOW_SLICE]]
-; BIG: add i32 [[LOWHIGH_SLICE_SEXT]], [[RES]]
-;
-; LITTLE-LABEL: @t5
-; LITTLE-NOT: load i64*
-; LITTLE-NOT: trunc i64
-; LITTLE-NOT: lshr i64
-;
-; LITTLE: [[BASEADDR:%[a-zA-Z.0-9_]+]] = getelementptr inbounds %class.Complex* %out, i64 %out_start
-;
-; LowHigh 32-bits slice where only 16-bits comes from the memory.
-; LITTLE: [[LOWHIGH_SLICE_BASEADDR:%[a-zA-Z.0-9_]+]] = getelementptr inbounds %class.Complex* %out, i64 %out_start, i32 1
-; LITTLE: [[LOWHIGH_SLICE_ADDR_I16:%[a-zA-Z.0-9_]+]] = bitcast float* [[LOWHIGH_SLICE_BASEADDR]] to i16*
-; LITTLE: [[LOWHIGH_SLICE:%[a-zA-Z.0-9_]+]] = load i16* [[LOWHIGH_SLICE_ADDR_I16]], align 4
-;
-; First 32-bits slice where only 16-bits comes from the memory.
-; LITTLE: [[HIGH_SLICE_BASEADDR:%[a-zA-Z.0-9_]+]] = bitcast %class.Complex* [[BASEADDR]] to i8*
-; LITTLE: [[HIGH_SLICE_ADDR_I8:%[a-zA-Z.0-9_]+]] = getelementptr inbounds i8* [[HIGH_SLICE_BASEADDR]], i64 6
-; LITTLE: [[HIGH_SLICE_ADDR_I16:%[a-zA-Z.0-9_]+]] = bitcast i8* [[HIGH_SLICE_ADDR_I8]] to i16*
-; LITTLE: [[HIGH_SLICE:%[a-zA-Z.0-9_]+]] = load i16* [[HIGH_SLICE_ADDR_I16]], align 2
-; LITTLE: [[HIGH_SLICE_ZEXT:%[a-zA-Z.0-9_]+]] = zext i16 [[HIGH_SLICE]] to i32
-;
-; Second 32-bits slice.
-; LITTLE: [[LOW_SLICE_ADDR:%[a-zA-Z.0-9_]+]] = bitcast %class.Complex* [[BASEADDR]] to i32*
-; LITTLE: [[LOW_SLICE:%[a-zA-Z.0-9_]+]] = load i32* [[LOW_SLICE_ADDR]], align 8
-;
-; Original sext is still here.
-; LITTLE: [[LOWHIGH_SLICE_SEXT:%[a-zA-Z.0-9_]+]] = sext i16 [[LOWHIGH_SLICE]] to i32
-;
-; Uses of the slices.
-; LITTLE: [[RES:%[a-zA-Z.0-9_]+]] = add i32 [[HIGH_SLICE_ZEXT]], [[LOW_SLICE]]
-; LITTLE: add i32 [[LOWHIGH_SLICE_SEXT]], [[RES]]
-define i32 @t5(%class.Complex* nocapture %out, i64 %out_start) {
- %arrayidx = getelementptr inbounds %class.Complex* %out, i64 %out_start
- %bitcast = bitcast %class.Complex* %arrayidx to i64*
- %chunk64 = load i64* %bitcast, align 8
- %slice32_low = trunc i64 %chunk64 to i32
- %shift48 = lshr i64 %chunk64, 48
- %slice32_high = trunc i64 %shift48 to i32
- %shift32 = lshr i64 %chunk64, 32
- %slice16_lowhigh = trunc i64 %shift32 to i16
- %slice32_lowhigh = sext i16 %slice16_lowhigh to i32
- %tmpres = add i32 %slice32_high, %slice32_low
- %res = add i32 %slice32_lowhigh, %tmpres
- ret i32 %res
-}