-//===-- Evaluator.h - LLVM IR evaluator -------------------------*- C++ -*-===//
+//===- Evaluator.h - LLVM IR evaluator --------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/BasicBlock.h"
-#include "llvm/IR/Constant.h"
#include "llvm/IR/GlobalVariable.h"
-
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Casting.h"
+#include <cassert>
#include <deque>
#include <memory>
const TargetLibraryInfo *TLI;
};
-}
+} // end namespace llvm
-#endif
+#endif // LLVM_TRANSFORMS_UTILS_EVALUATOR_H
#ifndef LLVM_TRANSFORMS_UTILS_MEM2REG_H
#define LLVM_TRANSFORMS_UTILS_MEM2REG_H
-#include "llvm/IR/Function.h"
#include "llvm/IR/PassManager.h"
namespace llvm {
+
+class Function;
+
class PromotePass : public PassInfoMixin<PromotePass> {
public:
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
-}
+
+} // end namespace llvm
#endif // LLVM_TRANSFORMS_UTILS_MEM2REG_H
namespace llvm {
class Module;
-class StringRef;
/// Splits the module M into N linkable partitions. The function ModuleCallback
/// is called N times passing each individual partition as the MPart argument.
function_ref<void(std::unique_ptr<Module> MPart)> ModuleCallback,
bool PreserveLocals = false);
-} // End llvm namespace
+} // end namespace llvm
-#endif
+#endif // LLVM_TRANSFORMS_UTILS_SPLITMODULE_H
#ifndef LLVM_TRANSFORMS_UTILS_UNROLLLOOP_H
#define LLVM_TRANSFORMS_UTILS_UNROLLLOOP_H
-// Needed because we can't forward-declare the nested struct
-// TargetTransformInfo::UnrollingPreferences
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/Analysis/TargetTransformInfo.h"
namespace llvm {
-class StringRef;
class AssumptionCache;
+class BasicBlock;
class DominatorTree;
class Loop;
class LoopInfo;
-class LPPassManager;
class MDNode;
-class Pass;
class OptimizationRemarkEmitter;
class ScalarEvolution;
-typedef SmallDenseMap<const Loop *, Loop *, 4> NewLoopsMap;
+using NewLoopsMap = SmallDenseMap<const Loop *, Loop *, 4>;
const Loop* addClonedBlockToLoopInfo(BasicBlock *OriginalBB,
BasicBlock *ClonedBB, LoopInfo *LI,
DominatorTree *DT, AssumptionCache *AC, bool PreserveLCSSA);
MDNode *GetUnrollMetadata(MDNode *LoopID, StringRef Name);
-}
-#endif
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_UNROLLLOOP_H
namespace llvm {
-class Value;
+class Constant;
+class Function;
+class GlobalAlias;
+class GlobalVariable;
class Instruction;
-typedef ValueMap<const Value *, WeakTrackingVH> ValueToValueMapTy;
+class MDNode;
+class Metadata;
+class Type;
+class Value;
+
+using ValueToValueMapTy = ValueMap<const Value *, WeakTrackingVH>;
/// This is a class that can be implemented by clients to remap types when
/// cloning constants and instructions.
virtual void anchor(); // Out of line method.
protected:
- ~ValueMaterializer() = default;
ValueMaterializer() = default;
ValueMaterializer(const ValueMaterializer &) = default;
ValueMaterializer &operator=(const ValueMaterializer &) = default;
+ ~ValueMaterializer() = default;
public:
/// This method can be implemented to generate a mapped Value on demand. For
-//===----------- LoopVersioningLICM.cpp - LICM Loop Versioning ------------===//
+//===- LoopVersioningLICM.cpp - LICM Loop Versioning ----------------------===//
//
// The LLVM Compiler Infrastructure
//
//
//===----------------------------------------------------------------------===//
-#include "llvm/ADT/MapVector.h"
-#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/ADT/Statistic.h"
-#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/AliasSetTracker.h"
-#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/GlobalsModRef.h"
#include "llvm/Analysis/LoopAccessAnalysis.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/ScalarEvolution.h"
-#include "llvm/Analysis/ScalarEvolutionExpander.h"
-#include "llvm/Analysis/TargetLibraryInfo.h"
-#include "llvm/Analysis/ValueTracking.h"
-#include "llvm/Analysis/VectorUtils.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/Constants.h"
#include "llvm/IR/Dominators.h"
-#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/MDBuilder.h"
-#include "llvm/IR/PatternMatch.h"
-#include "llvm/IR/PredIteratorCache.h"
+#include "llvm/IR/Metadata.h"
#include "llvm/IR/Type.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Scalar.h"
-#include "llvm/Transforms/Utils/BasicBlockUtils.h"
-#include "llvm/Transforms/Utils/Cloning.h"
#include "llvm/Transforms/Utils/LoopUtils.h"
#include "llvm/Transforms/Utils/LoopVersioning.h"
-#include "llvm/Transforms/Utils/ValueMapper.h"
+#include <cassert>
+#include <memory>
+
+using namespace llvm;
#define DEBUG_TYPE "loop-versioning-licm"
-static const char *LICMVersioningMetaData = "llvm.loop.licm_versioning.disable";
-using namespace llvm;
+static const char *LICMVersioningMetaData = "llvm.loop.licm_versioning.disable";
/// Threshold minimum allowed percentage for possible
/// invariant instructions in a loop.
}
namespace {
+
struct LoopVersioningLICM : public LoopPass {
static char ID;
+ LoopVersioningLICM()
+ : LoopPass(ID), LoopDepthThreshold(LVLoopDepthThreshold),
+ InvariantThreshold(LVInvarThreshold) {
+ initializeLoopVersioningLICMPass(*PassRegistry::getPassRegistry());
+ }
+
bool runOnLoop(Loop *L, LPPassManager &LPM) override;
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addPreserved<GlobalsAAWrapperPass>();
}
- LoopVersioningLICM()
- : LoopPass(ID), AA(nullptr), SE(nullptr), LAA(nullptr), LAI(nullptr),
- CurLoop(nullptr), LoopDepthThreshold(LVLoopDepthThreshold),
- InvariantThreshold(LVInvarThreshold), LoadAndStoreCounter(0),
- InvariantCounter(0), IsReadOnlyLoop(true) {
- initializeLoopVersioningLICMPass(*PassRegistry::getPassRegistry());
- }
StringRef getPassName() const override { return "Loop Versioning for LICM"; }
void reset() {
};
private:
- AliasAnalysis *AA; // Current AliasAnalysis information
- ScalarEvolution *SE; // Current ScalarEvolution
- LoopAccessLegacyAnalysis *LAA; // Current LoopAccessAnalysis
- const LoopAccessInfo *LAI; // Current Loop's LoopAccessInfo
+ // Current AliasAnalysis information
+ AliasAnalysis *AA = nullptr;
+
+ // Current ScalarEvolution
+ ScalarEvolution *SE = nullptr;
+
+ // Current LoopAccessAnalysis
+ LoopAccessLegacyAnalysis *LAA = nullptr;
+
+ // Current Loop's LoopAccessInfo
+ const LoopAccessInfo *LAI = nullptr;
+
+ // The current loop we are working on.
+ Loop *CurLoop = nullptr;
+
+ // AliasSet information for the current loop.
+ std::unique_ptr<AliasSetTracker> CurAST;
- Loop *CurLoop; // The current loop we are working on.
- std::unique_ptr<AliasSetTracker>
- CurAST; // AliasSet information for the current loop.
+ // Maximum loop nest threshold
+ unsigned LoopDepthThreshold;
- unsigned LoopDepthThreshold; // Maximum loop nest threshold
- float InvariantThreshold; // Minimum invariant threshold
- unsigned LoadAndStoreCounter; // Counter to track num of load & store
- unsigned InvariantCounter; // Counter to track num of invariant
- bool IsReadOnlyLoop; // Read only loop marker.
+ // Minimum invariant threshold
+ float InvariantThreshold;
+
+ // Counter to track num of load & store
+ unsigned LoadAndStoreCounter = 0;
+
+ // Counter to track num of invariant
+ unsigned InvariantCounter = 0;
+
+ // Read only loop marker.
+ bool IsReadOnlyLoop = true;
bool isLegalForVersioning();
bool legalLoopStructure();
bool legalLoopInstructions();
bool legalLoopMemoryAccesses();
bool isLoopAlreadyVisited();
- void setNoAliasToLoop(Loop *);
- bool instructionSafeForVersioning(Instruction *);
+ void setNoAliasToLoop(Loop *VerLoop);
+ bool instructionSafeForVersioning(Instruction *I);
};
-}
+
+} // end anonymous namespace
/// \brief Check loop structure and confirms it's good for LoopVersioningLICM.
bool LoopVersioningLICM::legalLoopStructure() {
return false;
}
// Loop should be innermost loop, if not return false.
- if (CurLoop->getSubLoops().size()) {
+ if (!CurLoop->getSubLoops().empty()) {
DEBUG(dbgs() << " loop is not innermost\n");
return false;
}
}
char LoopVersioningLICM::ID = 0;
+
INITIALIZE_PASS_BEGIN(LoopVersioningLICM, "loop-versioning-licm",
"Loop Versioning For LICM", false, false)
INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
// - Code size is smaller, both because jumps are removed and because the
// encoding of a 2*n byte compare is smaller than that of two n-byte
// compares.
-
+//
//===----------------------------------------------------------------------===//
-#include <algorithm>
-#include <numeric>
-#include <utility>
-#include <vector>
-#include "llvm/ADT/APSInt.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/Analysis/Loads.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/IRBuilder.h"
-#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Value.h"
#include "llvm/Pass.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Utils/BuildLibCalls.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <numeric>
+#include <utility>
+#include <vector>
using namespace llvm;
-namespace {
-
#define DEBUG_TYPE "mergeicmps"
+namespace {
+
// A BCE atom.
struct BCEAtom {
- BCEAtom() : GEP(nullptr), LoadI(nullptr), Offset() {}
+ BCEAtom() = default;
const Value *Base() const { return GEP ? GEP->getPointerOperand() : nullptr; }
return NameCmp < 0;
}
- GetElementPtrInst *GEP;
- LoadInst *LoadI;
+ GetElementPtrInst *GEP = nullptr;
+ LoadInst *LoadI = nullptr;
APInt Offset;
};
+} // end anonymous namespace
+
// If this value is a load from a constant offset w.r.t. a base address, and
// there are no othe rusers of the load or address, returns the base address and
// the offset.
-BCEAtom visitICmpLoadOperand(Value *const Val) {
+static BCEAtom visitICmpLoadOperand(Value *const Val) {
BCEAtom Result;
if (auto *const LoadI = dyn_cast<LoadInst>(Val)) {
DEBUG(dbgs() << "load\n");
return Result;
}
+namespace {
+
// A basic block with a comparison between two BCE atoms.
// Note: the terminology is misleading: the comparison is symmetric, so there
// is no real {l/r}hs. What we want though is to have the same base on the
// left (resp. right), so that we can detect consecutive loads. To ensure this
// we put the smallest atom on the left.
class BCECmpBlock {
- public:
- BCECmpBlock() {}
+public:
+ BCECmpBlock() = default;
BCECmpBlock(BCEAtom L, BCEAtom R, int SizeBits)
: Lhs_(L), Rhs_(R), SizeBits_(SizeBits) {
// The basic block where this comparison happens.
BasicBlock *BB = nullptr;
+
// The ICMP for this comparison.
ICmpInst *CmpI = nullptr;
+
// The terminating branch.
BranchInst *BranchI = nullptr;
- private:
+private:
BCEAtom Lhs_;
BCEAtom Rhs_;
int SizeBits_ = 0;
};
+} // end anonymous namespace
+
bool BCECmpBlock::doesOtherWork() const {
AssertConsistent();
// TODO(courbet): Can we allow some other things ? This is very conservative.
// Visit the given comparison. If this is a comparison between two valid
// BCE atoms, returns the comparison.
-BCECmpBlock visitICmp(const ICmpInst *const CmpI,
- const ICmpInst::Predicate ExpectedPredicate) {
+static BCECmpBlock visitICmp(const ICmpInst *const CmpI,
+ const ICmpInst::Predicate ExpectedPredicate) {
if (CmpI->getPredicate() == ExpectedPredicate) {
DEBUG(dbgs() << "cmp "
<< (ExpectedPredicate == ICmpInst::ICMP_EQ ? "eq" : "ne")
// Visit the given comparison block. If this is a comparison between two valid
// BCE atoms, returns the comparison.
-BCECmpBlock visitCmpBlock(Value *const Val, BasicBlock *const Block,
- const BasicBlock *const PhiBlock) {
+static BCECmpBlock visitCmpBlock(Value *const Val, BasicBlock *const Block,
+ const BasicBlock *const PhiBlock) {
if (Block->empty()) return {};
auto *const BranchI = dyn_cast<BranchInst>(Block->getTerminator());
if (!BranchI) return {};
return {};
}
+namespace {
+
// A chain of comparisons.
class BCECmpChain {
- public:
+public:
BCECmpChain(const std::vector<BasicBlock *> &Blocks, PHINode &Phi);
int size() const { return Comparisons_.size(); }
bool simplify(const TargetLibraryInfo *const TLI);
- private:
+private:
static bool IsContiguous(const BCECmpBlock &First,
const BCECmpBlock &Second) {
return First.Lhs().Base() == Second.Lhs().Base() &&
PHINode &Phi_;
std::vector<BCECmpBlock> Comparisons_;
+
// The original entry block (before sorting);
BasicBlock *EntryBlock_;
};
+} // end anonymous namespace
+
BCECmpChain::BCECmpChain(const std::vector<BasicBlock *> &Blocks, PHINode &Phi)
: Phi_(Phi) {
// Now look inside blocks to check for BCE comparisons.
IRBuilder<> Builder(BB);
const auto &DL = Phi.getModule()->getDataLayout();
Value *const MemCmpCall = emitMemCmp(
- FirstComparison.Lhs().GEP, FirstComparison.Rhs().GEP, ConstantInt::get(DL.getIntPtrType(Context), TotalSize),
+ FirstComparison.Lhs().GEP, FirstComparison.Rhs().GEP,
+ ConstantInt::get(DL.getIntPtrType(Context), TotalSize),
Builder, DL, TLI);
Value *const MemCmpIsZero = Builder.CreateICmpEQ(
MemCmpCall, ConstantInt::get(Type::getInt32Ty(Context), 0));
}
}
-std::vector<BasicBlock *> getOrderedBlocks(PHINode &Phi,
- BasicBlock *const LastBlock,
- int NumBlocks) {
+static std::vector<BasicBlock *> getOrderedBlocks(PHINode &Phi,
+ BasicBlock *const LastBlock,
+ int NumBlocks) {
// Walk up from the last block to find other blocks.
std::vector<BasicBlock *> Blocks(NumBlocks);
BasicBlock *CurBlock = LastBlock;
return Blocks;
}
-bool processPhi(PHINode &Phi, const TargetLibraryInfo *const TLI) {
+static bool processPhi(PHINode &Phi, const TargetLibraryInfo *const TLI) {
DEBUG(dbgs() << "processPhi()\n");
if (Phi.getNumIncomingValues() <= 1) {
DEBUG(dbgs() << "skip: only one incoming value in phi\n");
return CmpChain.simplify(TLI);
}
+namespace {
+
class MergeICmps : public FunctionPass {
- public:
+public:
static char ID;
MergeICmps() : FunctionPass(ID) {
return !PA.areAllPreserved();
}
- private:
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<TargetLibraryInfoWrapperPass>();
AU.addRequired<TargetTransformInfoWrapperPass>();
}
+private:
PreservedAnalyses runImpl(Function &F, const TargetLibraryInfo *TLI,
const TargetTransformInfo *TTI);
};
+} // end anonymous namespace
+
PreservedAnalyses MergeICmps::runImpl(Function &F, const TargetLibraryInfo *TLI,
const TargetTransformInfo *TTI) {
DEBUG(dbgs() << "MergeICmpsPass: " << F.getName() << "\n");
return PreservedAnalyses::all();
}
-} // namespace
-
char MergeICmps::ID = 0;
+
INITIALIZE_PASS_BEGIN(MergeICmps, "mergeicmps",
"Merge contiguous icmps into a memcmp", false, false)
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
-//===-- SeparateConstOffsetFromGEP.cpp - ------------------------*- C++ -*-===//
+//===- SeparateConstOffsetFromGEP.cpp -------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
//
//===----------------------------------------------------------------------===//
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DepthFirstIterator.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Constant.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Dominators.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
-#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
-#include "llvm/IR/Operator.h"
#include "llvm/IR/PatternMatch.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/User.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Utils/Local.h"
+#include <cassert>
+#include <cstdint>
+#include <string>
using namespace llvm;
using namespace llvm::PatternMatch;
"disable-separate-const-offset-from-gep", cl::init(false),
cl::desc("Do not separate the constant offset from a GEP instruction"),
cl::Hidden);
+
// Setting this flag may emit false positives when the input module already
// contains dead instructions. Therefore, we set it only in unit tests that are
// free of dead code.
/// garbage-collect unused instructions in UserChain.
static Value *Extract(Value *Idx, GetElementPtrInst *GEP,
User *&UserChainTail, const DominatorTree *DT);
+
/// Looks for a constant offset from the given GEP index without extracting
/// it. It returns the numeric value of the extracted constant offset (0 if
/// failed). The meaning of the arguments are the same as Extract.
ConstantOffsetExtractor(Instruction *InsertionPt, const DominatorTree *DT)
: IP(InsertionPt), DL(InsertionPt->getModule()->getDataLayout()), DT(DT) {
}
+
/// Searches the expression that computes V for a non-zero constant C s.t.
/// V can be reassociated into the form V' + C. If the searching is
/// successful, returns C and update UserChain as a def-use chain from C to V;
/// non-negative. Levaraging this, we can better split
/// inbounds GEPs.
APInt find(Value *V, bool SignExtended, bool ZeroExtended, bool NonNegative);
+
/// A helper function to look into both operands of a binary operator.
APInt findInEitherOperand(BinaryOperator *BO, bool SignExtended,
bool ZeroExtended);
+
/// After finding the constant offset C from the GEP index I, we build a new
/// index I' s.t. I' + C = I. This function builds and returns the new
/// index I' according to UserChain produced by function "find".
/// (sext(a) + sext(b)) + 5.
/// Given this form, we know I' is sext(a) + sext(b).
Value *rebuildWithoutConstOffset();
+
/// After the first step of rebuilding the GEP index without the constant
/// offset, distribute s/zext to the operands of all operators in UserChain.
/// e.g., zext(sext(a + (b + 5)) (assuming no overflow) =>
/// UserChain.size() - 1, and is decremented during
/// the recursion.
Value *distributeExtsAndCloneChain(unsigned ChainIndex);
+
/// Reassociates the GEP index to the form I' + C and returns I'.
Value *removeConstOffset(unsigned ChainIndex);
+
/// A helper function to apply ExtInsts, a list of s/zext, to value V.
/// e.g., if ExtInsts = [sext i32 to i64, zext i16 to i32], this function
/// returns "sext i32 (zext i16 V to i32) to i64".
///
/// This path helps to rebuild the new GEP index.
SmallVector<User *, 8> UserChain;
+
/// A data structure used in rebuildWithoutConstOffset. Contains all
/// sext/zext instructions along UserChain.
SmallVector<CastInst *, 16> ExtInsts;
- Instruction *IP; /// Insertion position of cloned instructions.
+
+ /// Insertion position of cloned instructions.
+ Instruction *IP;
+
const DataLayout &DL;
const DominatorTree *DT;
};
class SeparateConstOffsetFromGEP : public FunctionPass {
public:
static char ID;
+
SeparateConstOffsetFromGEP(const TargetMachine *TM = nullptr,
bool LowerGEP = false)
- : FunctionPass(ID), DL(nullptr), DT(nullptr), TM(TM), LowerGEP(LowerGEP) {
+ : FunctionPass(ID), TM(TM), LowerGEP(LowerGEP) {
initializeSeparateConstOffsetFromGEPPass(*PassRegistry::getPassRegistry());
}
DL = &M.getDataLayout();
return false;
}
+
bool runOnFunction(Function &F) override;
private:
/// Tries to split the given GEP into a variadic base and a constant offset,
/// and returns true if the splitting succeeds.
bool splitGEP(GetElementPtrInst *GEP);
+
/// Lower a GEP with multiple indices into multiple GEPs with a single index.
/// Function splitGEP already split the original GEP into a variadic part and
/// a constant offset (i.e., AccumulativeByteOffset). This function lowers the
/// \p AccumulativeByteOffset The constant offset.
void lowerToSingleIndexGEPs(GetElementPtrInst *Variadic,
int64_t AccumulativeByteOffset);
+
/// Lower a GEP with multiple indices into ptrtoint+arithmetics+inttoptr form.
/// Function splitGEP already split the original GEP into a variadic part and
/// a constant offset (i.e., AccumulativeByteOffset). This function lowers the
/// \p AccumulativeByteOffset The constant offset.
void lowerToArithmetics(GetElementPtrInst *Variadic,
int64_t AccumulativeByteOffset);
+
/// Finds the constant offset within each index and accumulates them. If
/// LowerGEP is true, it finds in indices of both sequential and structure
/// types, otherwise it only finds in sequential indices. The output
/// NeedsExtraction indicates whether we successfully find a non-zero constant
/// offset.
int64_t accumulateByteOffset(GetElementPtrInst *GEP, bool &NeedsExtraction);
+
/// Canonicalize array indices to pointer-size integers. This helps to
/// simplify the logic of splitting a GEP. For example, if a + b is a
/// pointer-size integer, we have
///
/// Verified in @i32_add in split-gep.ll
bool canonicalizeArrayIndicesToPointerSize(GetElementPtrInst *GEP);
+
/// Optimize sext(a)+sext(b) to sext(a+b) when a+b can't sign overflow.
/// SeparateConstOffsetFromGEP distributes a sext to leaves before extracting
/// the constant offset. After extraction, it becomes desirable to reunion the
/// => constant extraction &a[sext(i) + sext(j)] + 5
/// => reunion &a[sext(i +nsw j)] + 5
bool reuniteExts(Function &F);
+
/// A helper that reunites sexts in an instruction.
bool reuniteExts(Instruction *I);
+
/// Find the closest dominator of <Dominatee> that is equivalent to <Key>.
Instruction *findClosestMatchingDominator(const SCEV *Key,
Instruction *Dominatee);
void verifyNoDeadCode(Function &F);
bool hasMoreThanOneUseInLoop(Value *v, Loop *L);
+
// Swap the index operand of two GEP.
void swapGEPOperand(GetElementPtrInst *First, GetElementPtrInst *Second);
+
// Check if it is safe to swap operand of two GEP.
bool isLegalToSwapOperand(GetElementPtrInst *First, GetElementPtrInst *Second,
Loop *CurLoop);
- const DataLayout *DL;
- DominatorTree *DT;
+ const DataLayout *DL = nullptr;
+ DominatorTree *DT = nullptr;
ScalarEvolution *SE;
const TargetMachine *TM;
LoopInfo *LI;
TargetLibraryInfo *TLI;
+
/// Whether to lower a GEP with multiple indices into arithmetic operations or
/// multiple GEPs with a single index.
bool LowerGEP;
+
DenseMap<const SCEV *, SmallVector<Instruction *, 2>> DominatingExprs;
};
-} // anonymous namespace
+
+} // end anonymous namespace
char SeparateConstOffsetFromGEP::ID = 0;
+
INITIALIZE_PASS_BEGIN(
SeparateConstOffsetFromGEP, "separate-const-offset-from-gep",
"Split GEPs to a variadic base and a constant offset for better CSE", false,
//===----------------------------------------------------------------------===//
#include "llvm/Transforms/Utils/Evaluator.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CallSite.h"
+#include "llvm/IR/Constant.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
-#include "llvm/IR/DiagnosticPrinter.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Operator.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/User.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
+#include <iterator>
#define DEBUG_TYPE "evaluator"
bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
BasicBlock *&NextBB) {
// This is the main evaluation loop.
- while (1) {
+ while (true) {
Constant *InstResult = nullptr;
DEBUG(dbgs() << "Evaluating Instruction: " << *CurInst << "\n");
DEBUG(dbgs() << "Found a GEP! Simplifying: " << *InstResult
<< "\n");
} else if (LoadInst *LI = dyn_cast<LoadInst>(CurInst)) {
-
if (!LI->isSimple()) {
DEBUG(dbgs() << "Found a Load! Not a simple load, can not evaluate.\n");
return false; // no volatile/atomic accesses.
return false; // Cannot handle array allocs.
}
Type *Ty = AI->getAllocatedType();
- AllocaTmps.push_back(
- make_unique<GlobalVariable>(Ty, false, GlobalValue::InternalLinkage,
- UndefValue::get(Ty), AI->getName()));
+ AllocaTmps.push_back(llvm::make_unique<GlobalVariable>(
+ Ty, false, GlobalValue::InternalLinkage, UndefValue::get(Ty),
+ AI->getName()));
InstResult = AllocaTmps.back().get();
DEBUG(dbgs() << "Found an alloca. Result: " << *InstResult << "\n");
} else if (isa<CallInst>(CurInst) || isa<InvokeInst>(CurInst)) {
BasicBlock::iterator CurInst = CurBB->begin();
- while (1) {
+ while (true) {
BasicBlock *NextBB = nullptr; // Initialized to avoid compiler warnings.
DEBUG(dbgs() << "Trying to evaluate BB: " << *CurBB << "\n");
CurBB = NextBB;
}
}
-
-//===- FlatternCFG.cpp - Code to perform CFG flattening ---------------===//
+//===- FlatternCFG.cpp - Code to perform CFG flattening -------------------===//
//
// The LLVM Compiler Infrastructure
//
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
+#include <cassert>
+
using namespace llvm;
#define DEBUG_TYPE "flattencfg"
namespace {
+
class FlattenCFGOpt {
AliasAnalysis *AA;
+
/// \brief Use parallel-and or parallel-or to generate conditions for
/// conditional branches.
bool FlattenParallelAndOr(BasicBlock *BB, IRBuilder<> &Builder);
+
/// \brief If \param BB is the merge block of an if-region, attempt to merge
/// the if-region with an adjacent if-region upstream if two if-regions
/// contain identical instructions.
bool MergeIfRegion(BasicBlock *BB, IRBuilder<> &Builder);
+
/// \brief Compare a pair of blocks: \p Block1 and \p Block2, which
/// are from two if-regions whose entry blocks are \p Head1 and \p
/// Head2. \returns true if \p Block1 and \p Block2 contain identical
public:
FlattenCFGOpt(AliasAnalysis *AA) : AA(AA) {}
+
bool run(BasicBlock *BB);
};
-}
+
+} // end anonymous namespace
/// If \param [in] BB has more than one predecessor that is a conditional
/// branch, attempt to use parallel and/or for the branch condition. \returns
/// In Case 1, \param BB (BB4) has an unconditional branch (BB3) as
/// its predecessor. In Case 2, \param BB (BB3) only has conditional branches
/// as its predecessors.
-///
bool FlattenCFGOpt::FlattenParallelAndOr(BasicBlock *BB, IRBuilder<> &Builder) {
PHINode *PHI = dyn_cast<PHINode>(BB->begin());
if (PHI)
// Do branch inversion.
BasicBlock *CurrBlock = LastCondBlock;
bool EverChanged = false;
- for (;CurrBlock != FirstCondBlock;
- CurrBlock = CurrBlock->getSinglePredecessor()) {
+ for (; CurrBlock != FirstCondBlock;
+ CurrBlock = CurrBlock->getSinglePredecessor()) {
BranchInst *BI = dyn_cast<BranchInst>(CurrBlock->getTerminator());
CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition());
if (!CI)
// in the 2nd if-region to compare. \returns true if \param Block1 and \param
/// Block2 have identical instructions and do not have memory reference alias
/// with \param Head2.
-///
bool FlattenCFGOpt::CompareIfRegionBlock(BasicBlock *Head1, BasicBlock *Head2,
BasicBlock *Block1,
BasicBlock *Block2) {
BasicBlock::iterator iter2 = Block2->begin();
BasicBlock::iterator end2 = Block2->getTerminator()->getIterator();
- while (1) {
+ while (true) {
if (iter1 == end1) {
if (iter2 != end2)
return false;
/// To:
/// if (a || b)
/// statement;
-///
bool FlattenCFGOpt::MergeIfRegion(BasicBlock *BB, IRBuilder<> &Builder) {
BasicBlock *IfTrue2, *IfFalse2;
Value *IfCond2 = GetIfCondition(BB, IfTrue2, IfFalse2);
/// FlattenCFG - This function is used to flatten a CFG. For
/// example, it uses parallel-and and parallel-or mode to collapse
-// if-conditions and merge if-regions with identical statements.
-///
+/// if-conditions and merge if-regions with identical statements.
bool llvm::FlattenCFG(BasicBlock *BB, AliasAnalysis *AA) {
return FlattenCFGOpt(AA).run(BB);
}
#include "llvm/Transforms/Utils/Mem2Reg.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AssumptionCache.h"
+#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Utils/PromoteMemToReg.h"
-#include "llvm/Transforms/Utils/UnifyFunctionExitNodes.h"
+#include <vector>
+
using namespace llvm;
#define DEBUG_TYPE "mem2reg"
BasicBlock &BB = F.getEntryBlock(); // Get the entry node for the function
bool Changed = false;
- while (1) {
+ while (true) {
Allocas.clear();
// Find allocas that are safe to promote, by looking at all instructions in
}
namespace {
+
struct PromoteLegacyPass : public FunctionPass {
- static char ID; // Pass identification, replacement for typeid
+ // Pass identification, replacement for typeid
+ static char ID;
+
PromoteLegacyPass() : FunctionPass(ID) {
initializePromoteLegacyPassPass(*PassRegistry::getPassRegistry());
}
// runOnFunction - To run this pass, first we calculate the alloca
// instructions that are safe for promotion, then we promote each one.
- //
bool runOnFunction(Function &F) override {
if (skipFunction(F))
return false;
AU.addRequired<DominatorTreeWrapperPass>();
AU.setPreservesCFG();
}
- };
-} // end of anonymous namespace
+};
+
+} // end anonymous namespace
char PromoteLegacyPass::ID = 0;
+
INITIALIZE_PASS_BEGIN(PromoteLegacyPass, "mem2reg", "Promote Memory to "
"Register",
false, false)
false, false)
// createPromoteMemoryToRegister - Provide an entry point to create this pass.
-//
FunctionPass *llvm::createPromoteMemoryToRegisterPass() {
return new PromoteLegacyPass();
}
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/EHPersonalities.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/IR/Attributes.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CFG.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/ConstantRange.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
-#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/PatternMatch.h"
#include "llvm/IR/Type.h"
+#include "llvm/IR/Use.h"
#include "llvm/IR/User.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/Casting.h"
#include <iterator>
#include <map>
#include <set>
+#include <tuple>
#include <utility>
#include <vector>
// The first field contains the value that the switch produces when a certain
// case group is selected, and the second field is a vector containing the
// cases composing the case group.
-typedef SmallVector<std::pair<Constant *, SmallVector<ConstantInt *, 4>>, 2>
- SwitchCaseResultVectorTy;
+using SwitchCaseResultVectorTy =
+ SmallVector<std::pair<Constant *, SmallVector<ConstantInt *, 4>>, 2>;
+
// The first field contains the phi node that generates a result of the switch
// and the second field contains the value generated for a certain case in the
// switch for that PHI.
-typedef SmallVector<std::pair<PHINode *, Constant *>, 4> SwitchCaseResultsTy;
+using SwitchCaseResultsTy = SmallVector<std::pair<PHINode *, Constant *>, 4>;
/// ValueEqualityComparisonCase - Represents a case of a switch.
struct ValueEqualityComparisonCase {
/// fail.
struct ConstantComparesGatherer {
const DataLayout &DL;
- Value *CompValue; /// Value found for the switch comparison
- Value *Extra; /// Extra clause to be checked before the switch
- SmallVector<ConstantInt *, 8> Vals; /// Set of integers to match in switch
- unsigned UsedICmps; /// Number of comparisons matched in the and/or chain
+
+ /// Value found for the switch comparison
+ Value *CompValue = nullptr;
+
+ /// Extra clause to be checked before the switch
+ Value *Extra = nullptr;
+
+ /// Set of integers to match in switch
+ SmallVector<ConstantInt *, 8> Vals;
+
+ /// Number of comparisons matched in the and/or chain
+ unsigned UsedICmps = 0;
/// Construct and compute the result for the comparison instruction Cond
- ConstantComparesGatherer(Instruction *Cond, const DataLayout &DL)
- : DL(DL), CompValue(nullptr), Extra(nullptr), UsedICmps(0) {
+ ConstantComparesGatherer(Instruction *Cond, const DataLayout &DL) : DL(DL) {
gather(Cond);
}
- /// Prevent copy
ConstantComparesGatherer(const ConstantComparesGatherer &) = delete;
ConstantComparesGatherer &
operator=(const ConstantComparesGatherer &) = delete;
// (x & ~2^z) == y --> x == y || x == y|2^z
// This undoes a transformation done by instcombine to fuse 2 compares.
if (ICI->getPredicate() == (isEQ ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE)) {
-
// It's a little bit hard to see why the following transformations are
// correct. Here is a CVC3 program to verify them for 64-bit values:
ArrayRef<BasicBlock*> Blocks;
SmallVector<Instruction*,4> Insts;
bool Fail;
+
public:
- LockstepReverseIterator(ArrayRef<BasicBlock*> Blocks) :
- Blocks(Blocks) {
+ LockstepReverseIterator(ArrayRef<BasicBlock*> Blocks) : Blocks(Blocks) {
reset();
}
return !Fail;
}
- void operator -- () {
+ void operator--() {
if (Fail)
return;
for (auto *&Inst : Insts) {
// We model triangles as a type of diamond with a nullptr "true" block.
// Triangles are canonicalized so that the fallthrough edge is represented by
// a true condition, as in the diagram above.
- //
BasicBlock *PTB = PBI->getSuccessor(0);
BasicBlock *PFB = PBI->getSuccessor(1);
BasicBlock *QTB = QBI->getSuccessor(0);
/// dominated by the switch, if that would mean that some of the destination
/// blocks of the switch can be folded away. Return true if a change is made.
static bool ForwardSwitchConditionToPHI(SwitchInst *SI) {
- typedef DenseMap<PHINode *, SmallVector<int, 4>> ForwardingNodesMap;
+ using ForwardingNodesMap = DenseMap<PHINode *, SmallVector<int, 4>>;
+
ForwardingNodesMap ForwardingNodes;
BasicBlock *SwitchBlock = SI->getParent();
bool Changed = false;
} Kind;
// For SingleValueKind, this is the single value.
- Constant *SingleValue;
+ Constant *SingleValue = nullptr;
// For BitMapKind, this is the bitmap.
- ConstantInt *BitMap;
- IntegerType *BitMapElementTy;
+ ConstantInt *BitMap = nullptr;
+ IntegerType *BitMapElementTy = nullptr;
// For LinearMapKind, these are the constants used to derive the value.
- ConstantInt *LinearOffset;
- ConstantInt *LinearMultiplier;
+ ConstantInt *LinearOffset = nullptr;
+ ConstantInt *LinearMultiplier = nullptr;
// For ArrayKind, this is the array.
- GlobalVariable *Array;
+ GlobalVariable *Array = nullptr;
};
} // end anonymous namespace
SwitchLookupTable::SwitchLookupTable(
Module &M, uint64_t TableSize, ConstantInt *Offset,
const SmallVectorImpl<std::pair<ConstantInt *, Constant *>> &Values,
- Constant *DefaultValue, const DataLayout &DL, const StringRef &FuncName)
- : SingleValue(nullptr), BitMap(nullptr), BitMapElementTy(nullptr),
- LinearOffset(nullptr), LinearMultiplier(nullptr), Array(nullptr) {
+ Constant *DefaultValue, const DataLayout &DL, const StringRef &FuncName) {
assert(Values.size() && "Can't build lookup table without values!");
assert(TableSize >= Values.size() && "Can't fit values in table!");
User *PhiUser, BasicBlock *PhiBlock, BranchInst *RangeCheckBranch,
Constant *DefaultValue,
const SmallVectorImpl<std::pair<ConstantInt *, Constant *>> &Values) {
-
ICmpInst *CmpInst = dyn_cast<ICmpInst>(PhiUser);
if (!CmpInst)
return;
ConstantInt *MaxCaseVal = CI->getCaseValue();
BasicBlock *CommonDest = nullptr;
- typedef SmallVector<std::pair<ConstantInt *, Constant *>, 4> ResultListTy;
+
+ using ResultListTy = SmallVector<std::pair<ConstantInt *, Constant *>, 4>;
SmallDenseMap<PHINode *, ResultListTy> ResultLists;
+
SmallDenseMap<PHINode *, Constant *> DefaultResults;
SmallDenseMap<PHINode *, Type *> ResultTypes;
SmallVector<PHINode *, 4> PHIs;
MaxCaseVal = CaseVal;
// Resulting value at phi nodes for this case value.
- typedef SmallVector<std::pair<PHINode *, Constant *>, 4> ResultsTy;
+ using ResultsTy = SmallVector<std::pair<PHINode *, Constant *>, 4>;
ResultsTy Results;
if (!GetCaseResults(SI, CaseVal, CI->getCaseSuccessor(), &CommonDest,
Results, DL, TTI))
LandingPadInst *LPad2 = dyn_cast<LandingPadInst>(I);
if (!LPad2 || !LPad2->isIdenticalTo(LPad))
continue;
- for (++I; isa<DbgInfoIntrinsic>(I); ++I) {
- }
+ for (++I; isa<DbgInfoIntrinsic>(I); ++I)
+ ;
BranchInst *BI2 = dyn_cast<BranchInst>(I);
if (!BI2 || !BI2->isIdenticalTo(BI))
continue;
// See if we can merge an empty landing pad block with another which is
// equivalent.
if (LandingPadInst *LPad = dyn_cast<LandingPadInst>(I)) {
- for (++I; isa<DbgInfoIntrinsic>(I); ++I) {
- }
+ for (++I; isa<DbgInfoIntrinsic>(I); ++I)
+ ;
if (I->isTerminator() && TryToMergeLandingPad(LPad, BI, BB))
return true;
}
// Merge basic blocks into their predecessor if there is only one distinct
// pred, and if there is only one distinct successor of the predecessor, and
// if there are no PHI nodes.
- //
if (MergeBlockIntoPredecessor(BB))
return true;
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "split-module"
-
#include "llvm/Transforms/Utils/SplitModule.h"
+#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/EquivalenceClasses.h"
-#include "llvm/ADT/Hashing.h"
-#include "llvm/ADT/MapVector.h"
-#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/Comdat.h"
+#include "llvm/IR/Constant.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalAlias.h"
#include "llvm/IR/GlobalObject.h"
+#include "llvm/IR/GlobalIndirectSymbol.h"
#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/Instruction.h"
#include "llvm/IR/Module.h"
+#include "llvm/IR/User.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MD5.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils/Cloning.h"
+#include "llvm/Transforms/Utils/ValueMapper.h"
+#include <algorithm>
+#include <cassert>
+#include <iterator>
+#include <memory>
#include <queue>
+#include <utility>
+#include <vector>
using namespace llvm;
+#define DEBUG_TYPE "split-module"
+
namespace {
-typedef EquivalenceClasses<const GlobalValue *> ClusterMapType;
-typedef DenseMap<const Comdat *, const GlobalValue *> ComdatMembersType;
-typedef DenseMap<const GlobalValue *, unsigned> ClusterIDMapType;
-}
+
+using ClusterMapType = EquivalenceClasses<const GlobalValue *>;
+using ComdatMembersType = DenseMap<const Comdat *, const GlobalValue *>;
+using ClusterIDMapType = DenseMap<const GlobalValue *, unsigned>;
+
+} // end anonymous namespace
static void addNonConstUser(ClusterMapType &GVtoClusterMap,
const GlobalValue *GV, const User *U) {
for (unsigned i = 0; i < N; ++i)
BalancinQueue.push(std::make_pair(i, 0));
- typedef std::pair<unsigned, ClusterMapType::iterator> SortType;
+ using SortType = std::pair<unsigned, ClusterMapType::iterator>;
+
SmallVector<SortType, 64> Sets;
SmallPtrSet<const GlobalValue *, 32> Visited;
//===----------------------------------------------------------------------===//
#include "llvm/Transforms/Utils/ValueMapper.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/Argument.h"
+#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CallSite.h"
+#include "llvm/IR/Constant.h"
#include "llvm/IR/Constants.h"
-#include "llvm/IR/DebugInfoMetadata.h"
+#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalAlias.h"
+#include "llvm/IR/GlobalObject.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/InlineAsm.h"
+#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Operator.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Casting.h"
+#include <cassert>
+#include <limits>
+#include <memory>
+#include <utility>
+
using namespace llvm;
// Out of line method to get vtable etc for class.
: VM(&VM), Materializer(Materializer) {}
};
-class MDNodeMapper;
class Mapper {
friend class MDNodeMapper;
/// Data about a node in \a UniquedGraph.
struct Data {
bool HasChanged = false;
- unsigned ID = ~0u;
+ unsigned ID = std::numeric_limits<unsigned>::max();
TempMDNode Placeholder;
};
void remapOperands(MDNode &N, OperandMapper mapOperand);
};
-} // end namespace
+} // end anonymous namespace
Value *Mapper::mapValue(const Value *V) {
ValueToValueMapTy::iterator I = getVM().find(V);
}
namespace {
+
/// An entry in the worklist for the post-order traversal.
struct POTWorklistEntry {
MDNode *N; ///< Current node.
POTWorklistEntry(MDNode &N) : N(&N), Op(N.op_begin()) {}
};
-} // end namespace
+
+} // end anonymous namespace
bool MDNodeMapper::createPOT(UniquedGraph &G, const MDNode &FirstN) {
assert(G.Info.empty() && "Expected a fresh traversal");
if (D.HasChanged)
continue;
- if (none_of(N->operands(), [&](const Metadata *Op) {
+ if (llvm::none_of(N->operands(), [&](const Metadata *Op) {
auto Where = Info.find(Op);
return Where != Info.end() && Where->second.HasChanged;
}))
MapMetadataDisabler(ValueToValueMapTy &VM) : VM(VM) {
VM.disableMapMetadata();
}
+
~MapMetadataDisabler() { VM.enableMapMetadata(); }
};
-} // end namespace
+} // end anonymous namespace
Optional<Metadata *> Mapper::mapSimpleMetadata(const Metadata *MD) {
// If the value already exists in the map, use it.
explicit FlushingMapper(void *pImpl) : M(*getAsMapper(pImpl)) {
assert(!M.hasWorkToDo() && "Expected to be flushed");
}
+
~FlushingMapper() { M.flush(); }
+
Mapper *operator->() const { return &M; }
};
-} // end namespace
+} // end anonymous namespace
ValueMapper::ValueMapper(ValueToValueMapTy &VM, RemapFlags Flags,
ValueMapTypeRemapper *TypeMapper,