-//===- ADCE.h - Aggressive dead code elimination --------------------------===//
+//===- ADCE.h - Aggressive dead code elimination ----------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
#ifndef LLVM_TRANSFORMS_SCALAR_ADCE_H
#define LLVM_TRANSFORMS_SCALAR_ADCE_H
-#include "llvm/IR/Function.h"
#include "llvm/IR/PassManager.h"
namespace llvm {
+class Function;
+
/// A DCE pass that assumes instructions are dead until proven otherwise.
///
/// This pass eliminates dead code by optimistically assuming that all
struct ADCEPass : PassInfoMixin<ADCEPass> {
PreservedAnalyses run(Function &F, FunctionAnalysisManager &);
};
-}
+
+} // end namespace llvm
#endif // LLVM_TRANSFORMS_SCALAR_ADCE_H
-//===---- CorrelatedValuePropagation.h --------------------------*- C++ -*-===//
+//===- CorrelatedValuePropagation.h -----------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
#ifndef LLVM_TRANSFORMS_SCALAR_CORRELATEDVALUEPROPAGATION_H
#define LLVM_TRANSFORMS_SCALAR_CORRELATEDVALUEPROPAGATION_H
-#include "llvm/IR/Function.h"
#include "llvm/IR/PassManager.h"
namespace llvm {
+class Function;
+
struct CorrelatedValuePropagationPass
: PassInfoMixin<CorrelatedValuePropagationPass> {
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
-}
+
+} // end namespace llvm
#endif // LLVM_TRANSFORMS_SCALAR_CORRELATEDVALUEPROPAGATION_H
-//===- DeadStoreElimination.h - Fast Dead Store Elimination -------------===//
+//===- DeadStoreElimination.h - Fast Dead Store Elimination -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_TRANSFORMS_SCALAR_DSE_H
-#define LLVM_TRANSFORMS_SCALAR_DSE_H
+#ifndef LLVM_TRANSFORMS_SCALAR_DEADSTOREELIMINATION_H
+#define LLVM_TRANSFORMS_SCALAR_DEADSTOREELIMINATION_H
-#include "llvm/IR/Function.h"
#include "llvm/IR/PassManager.h"
namespace llvm {
+class Function;
+
/// This class implements a trivial dead store elimination. We consider
/// only the redundant stores that are local to a single Basic Block.
class DSEPass : public PassInfoMixin<DSEPass> {
public:
PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM);
};
-}
-#endif // LLVM_TRANSFORMS_SCALAR_DSE_H
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_DEADSTOREELIMINATION_H
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
+//
/// \file
/// This file provides the interface for a simple, fast CSE pass.
-///
+//
//===----------------------------------------------------------------------===//
#ifndef LLVM_TRANSFORMS_SCALAR_EARLYCSE_H
#define LLVM_TRANSFORMS_SCALAR_EARLYCSE_H
-#include "llvm/IR/Function.h"
#include "llvm/IR/PassManager.h"
namespace llvm {
+class Function;
+
/// \brief A simple and fast domtree-based CSE pass.
///
/// This pass does a simple depth-first walk over the dominator tree,
bool UseMemorySSA;
};
-}
+} // end namespace llvm
-#endif
+#endif // LLVM_TRANSFORMS_SCALAR_EARLYCSE_H
//===----------------------------------------------------------------------===//
#include "llvm/Transforms/Scalar/ADCE.h"
-
+#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DepthFirstIterator.h"
+#include "llvm/ADT/GraphTraits.h"
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CFG.h"
#include "llvm/IR/DebugInfoMetadata.h"
+#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/Function.h"
#include "llvm/IR/InstIterator.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/IR/Use.h"
+#include "llvm/IR/Value.h"
#include "llvm/Pass.h"
#include "llvm/ProfileData/InstrProf.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Scalar.h"
+#include <cassert>
+#include <cstddef>
+#include <utility>
+
using namespace llvm;
#define DEBUG_TYPE "adce"
cl::Hidden);
namespace {
+
/// Information about Instructions
struct InstInfoType {
/// True if the associated instruction is live.
bool Live = false;
+
/// Quick access to information for block containing associated Instruction.
struct BlockInfoType *Block = nullptr;
};
struct BlockInfoType {
/// True when this block contains a live instructions.
bool Live = false;
+
/// True when this block ends in an unconditional branch.
bool UnconditionalBranch = false;
+
/// True when this block is known to have live PHI nodes.
bool HasLivePhiNodes = false;
+
/// Control dependence sources need to be live for this block.
bool CFLive = false;
/// holds the value &InstInfo[Terminator]
InstInfoType *TerminatorLiveInfo = nullptr;
- bool terminatorIsLive() const { return TerminatorLiveInfo->Live; }
-
/// Corresponding BasicBlock.
BasicBlock *BB = nullptr;
/// Post-order numbering of reverse control flow graph.
unsigned PostOrder;
+
+ bool terminatorIsLive() const { return TerminatorLiveInfo->Live; }
};
class AggressiveDeadCodeElimination {
/// Instructions known to be live where we need to mark
/// reaching definitions as live.
SmallVector<Instruction *, 128> Worklist;
+
/// Debug info scopes around a live instruction.
SmallPtrSet<const Metadata *, 32> AliveScopes;
/// Set up auxiliary data structures for Instructions and BasicBlocks and
/// initialize the Worklist to the set of must-be-live Instruscions.
void initialize();
+
/// Return true for operations which are always treated as live.
bool isAlwaysLive(Instruction &I);
+
/// Return true for instrumentation instructions for value profiling.
bool isInstrumentsConstant(Instruction &I);
/// Propagate liveness to reaching definitions.
void markLiveInstructions();
+
/// Mark an instruction as live.
void markLive(Instruction *I);
+
/// Mark a block as live.
void markLive(BlockInfoType &BB);
void markLive(BasicBlock *BB) { markLive(BlockInfo[BB]); }
void makeUnconditional(BasicBlock *BB, BasicBlock *Target);
public:
- AggressiveDeadCodeElimination(Function &F, DominatorTree &DT,
- PostDominatorTree &PDT)
- : F(F), DT(DT), PDT(PDT) {}
- bool performDeadCodeElimination();
+ AggressiveDeadCodeElimination(Function &F, DominatorTree &DT,
+ PostDominatorTree &PDT)
+ : F(F), DT(DT), PDT(PDT) {}
+
+ bool performDeadCodeElimination();
};
-}
+
+} // end anonymous namespace
bool AggressiveDeadCodeElimination::performDeadCodeElimination() {
initialize();
}
void AggressiveDeadCodeElimination::initialize() {
-
auto NumBlocks = F.size();
// We will have an entry in the map for each block so we grow the
// to recording which nodes have been visited we also record whether
// a node is currently on the "stack" of active ancestors of the current
// node.
- typedef DenseMap<BasicBlock *, bool> StatusMap ;
+ using StatusMap = DenseMap<BasicBlock *, bool>;
+
class DFState : public StatusMap {
public:
std::pair<StatusMap::iterator, bool> insert(BasicBlock *BB) {
}
void AggressiveDeadCodeElimination::markLiveInstructions() {
-
// Propagate liveness backwards to operands.
do {
// Worklist holds newly discovered live instructions
}
void AggressiveDeadCodeElimination::markLive(Instruction *I) {
-
auto &Info = InstInfo[I];
if (Info.Live)
return;
}
void AggressiveDeadCodeElimination::markLiveBranchesFromControlDependences() {
-
if (BlocksWithDeadTerminators.empty())
return;
//
//===----------------------------------------------------------------------===//
bool AggressiveDeadCodeElimination::removeDeadInstructions() {
-
// Updates control and dataflow around dead blocks
updateDeadRegions();
// A dead region is the set of dead blocks with a common live post-dominator.
void AggressiveDeadCodeElimination::updateDeadRegions() {
-
DEBUG({
dbgs() << "final dead terminator blocks: " << '\n';
for (auto *BB : BlocksWithDeadTerminators)
// reverse top-sort order
void AggressiveDeadCodeElimination::computeReversePostOrder() {
-
// This provides a post-order numbering of the reverse control flow graph
// Note that it is incomplete in the presence of infinite loops but we don't
// need numbers blocks which don't reach the end of the functions since
}
namespace {
+
struct ADCELegacyPass : public FunctionPass {
static char ID; // Pass identification, replacement for typeid
+
ADCELegacyPass() : FunctionPass(ID) {
initializeADCELegacyPassPass(*PassRegistry::getPassRegistry());
}
AU.addPreserved<GlobalsAAWrapperPass>();
}
};
-}
+
+} // end anonymous namespace
char ADCELegacyPass::ID = 0;
+
INITIALIZE_PASS_BEGIN(ADCELegacyPass, "adce",
"Aggressive Dead Code Elimination", false, false)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
//===----------------------------------------------------------------------===//
#include "llvm/Transforms/Scalar/CorrelatedValuePropagation.h"
+#include "llvm/ADT/DepthFirstIterator.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
-#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/GlobalsModRef.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/LazyValueInfo.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CFG.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/Constant.h"
#include "llvm/IR/ConstantRange.h"
#include "llvm/IR/Constants.h"
+#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
-#include "llvm/IR/Module.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Value.h"
#include "llvm/Pass.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Utils/Local.h"
+#include <cassert>
+#include <utility>
+
using namespace llvm;
#define DEBUG_TYPE "correlated-value-propagation"
static cl::opt<bool> DontProcessAdds("cvp-dont-process-adds", cl::init(true));
namespace {
+
class CorrelatedValuePropagation : public FunctionPass {
public:
static char ID;
+
CorrelatedValuePropagation(): FunctionPass(ID) {
initializeCorrelatedValuePropagationPass(*PassRegistry::getPassRegistry());
}
AU.addPreserved<GlobalsAAWrapperPass>();
}
};
-}
+
+} // end anonymous namespace
char CorrelatedValuePropagation::ID = 0;
+
INITIALIZE_PASS_BEGIN(CorrelatedValuePropagation, "correlated-propagation",
"Value Propagation", false, false)
INITIALIZE_PASS_DEPENDENCY(LazyValueInfoWrapperPass)
}
static bool processAdd(BinaryOperator *AddOp, LazyValueInfo *LVI) {
- typedef OverflowingBinaryOperator OBO;
+ using OBO = OverflowingBinaryOperator;
if (DontProcessAdds)
return false;
//===----------------------------------------------------------------------===//
#include "llvm/Transforms/Scalar/DeadStoreElimination.h"
+#include "llvm/ADT/APInt.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/CaptureTracking.h"
#include "llvm/Analysis/GlobalsModRef.h"
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Analysis/MemoryDependenceAnalysis.h"
+#include "llvm/Analysis/MemoryLocation.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/IR/Argument.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/Constant.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
-#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/IR/Value.h"
#include "llvm/Pass.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Utils/Local.h"
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <cstddef>
+#include <iterator>
#include <map>
+#include <utility>
+
using namespace llvm;
#define DEBUG_TYPE "dse"
cl::init(true), cl::Hidden,
cl::desc("Enable partial store merging in DSE"));
-
//===----------------------------------------------------------------------===//
// Helper functions
//===----------------------------------------------------------------------===//
-typedef std::map<int64_t, int64_t> OverlapIntervalsTy;
-typedef DenseMap<Instruction *, OverlapIntervalsTy> InstOverlapIntervalsTy;
+using OverlapIntervalsTy = std::map<int64_t, int64_t>;
+using InstOverlapIntervalsTy = DenseMap<Instruction *, OverlapIntervalsTy>;
/// Delete this instruction. Before we do, go through and zero out all the
/// operands of this instruction. If any of them become dead, delete them and
case Intrinsic::init_trampoline:
// Always safe to remove init_trampoline.
return true;
-
case Intrinsic::memset:
case Intrinsic::memmove:
case Intrinsic::memcpy:
return false;
}
-
/// Returns true if the end of this instruction can be safely shortened in
/// length.
static bool isShortenableAtTheEnd(Instruction *I) {
}
namespace {
+
enum OverwriteResult {
OW_Begin,
OW_Complete,
OW_PartialEarlierWithFullLater,
OW_Unknown
};
-}
+
+} // end anonymous namespace
/// Return 'OW_Complete' if a store to the 'Later' location completely
/// overwrites a store to the 'Earlier' location, 'OW_End' if the end of the
if (!IsOverwriteEnd)
LaterOffset = int64_t(LaterOffset + LaterSize);
- if (!(llvm::isPowerOf2_64(LaterOffset) && EarlierWriteAlign <= LaterOffset) &&
+ if (!(isPowerOf2_64(LaterOffset) && EarlierWriteAlign <= LaterOffset) &&
!((EarlierWriteAlign != 0) && LaterOffset % EarlierWriteAlign == 0))
return false;
}
namespace {
+
/// A legacy pass for the legacy pass manager that wraps \c DSEPass.
class DSELegacyPass : public FunctionPass {
public:
+ static char ID; // Pass identification, replacement for typeid
+
DSELegacyPass() : FunctionPass(ID) {
initializeDSELegacyPassPass(*PassRegistry::getPassRegistry());
}
AU.addPreserved<GlobalsAAWrapperPass>();
AU.addPreserved<MemoryDependenceWrapperPass>();
}
-
- static char ID; // Pass identification, replacement for typeid
};
+
} // end anonymous namespace
char DSELegacyPass::ID = 0;
+
INITIALIZE_PASS_BEGIN(DSELegacyPass, "dse", "Dead Store Elimination", false,
false)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
//===----------------------------------------------------------------------===//
#include "llvm/Transforms/Scalar/EarlyCSE.h"
+#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/ADT/Hashing.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/ScopedHashTable.h"
#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/GlobalsModRef.h"
#include "llvm/Analysis/MemorySSAUpdater.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Dominators.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/PassManager.h"
#include "llvm/IR/PatternMatch.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Use.h"
+#include "llvm/IR/Value.h"
#include "llvm/Pass.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/AtomicOrdering.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/RecyclingAllocator.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Utils/Local.h"
+#include <cassert>
#include <deque>
+#include <memory>
+#include <utility>
+
using namespace llvm;
using namespace llvm::PatternMatch;
//===----------------------------------------------------------------------===//
namespace {
+
/// \brief Struct representing the available values in the scoped hash table.
struct SimpleValue {
Instruction *Inst;
isa<ExtractValueInst>(Inst) || isa<InsertValueInst>(Inst);
}
};
-}
+
+} // end anonymous namespace
namespace llvm {
+
template <> struct DenseMapInfo<SimpleValue> {
static inline SimpleValue getEmptyKey() {
return DenseMapInfo<Instruction *>::getEmptyKey();
}
+
static inline SimpleValue getTombstoneKey() {
return DenseMapInfo<Instruction *>::getTombstoneKey();
}
+
static unsigned getHashValue(SimpleValue Val);
static bool isEqual(SimpleValue LHS, SimpleValue RHS);
};
-}
+
+} // end namespace llvm
unsigned DenseMapInfo<SimpleValue>::getHashValue(SimpleValue Val) {
Instruction *Inst = Val.Inst;
//===----------------------------------------------------------------------===//
namespace {
+
/// \brief Struct representing the available call values in the scoped hash
/// table.
struct CallValue {
return true;
}
};
-}
+
+} // end anonymous namespace
namespace llvm {
+
template <> struct DenseMapInfo<CallValue> {
static inline CallValue getEmptyKey() {
return DenseMapInfo<Instruction *>::getEmptyKey();
}
+
static inline CallValue getTombstoneKey() {
return DenseMapInfo<Instruction *>::getTombstoneKey();
}
+
static unsigned getHashValue(CallValue Val);
static bool isEqual(CallValue LHS, CallValue RHS);
};
-}
+
+} // end namespace llvm
unsigned DenseMapInfo<CallValue>::getHashValue(CallValue Val) {
Instruction *Inst = Val.Inst;
//===----------------------------------------------------------------------===//
namespace {
+
/// \brief A simple and fast domtree-based CSE pass.
///
/// This pass does a simple depth-first walk over the dominator tree,
const SimplifyQuery SQ;
MemorySSA *MSSA;
std::unique_ptr<MemorySSAUpdater> MSSAUpdater;
- typedef RecyclingAllocator<
- BumpPtrAllocator, ScopedHashTableVal<SimpleValue, Value *>> AllocatorTy;
- typedef ScopedHashTable<SimpleValue, Value *, DenseMapInfo<SimpleValue>,
- AllocatorTy> ScopedHTType;
+
+ using AllocatorTy =
+ RecyclingAllocator<BumpPtrAllocator,
+ ScopedHashTableVal<SimpleValue, Value *>>;
+ using ScopedHTType =
+ ScopedHashTable<SimpleValue, Value *, DenseMapInfo<SimpleValue>,
+ AllocatorTy>;
/// \brief A scoped hash table of the current values of all of our simple
/// scalar expressions.
/// present the table; it is the responsibility of the consumer to inspect
/// the atomicity/volatility if needed.
struct LoadValue {
- Instruction *DefInst;
- unsigned Generation;
- int MatchingId;
- bool IsAtomic;
- bool IsInvariant;
- LoadValue()
- : DefInst(nullptr), Generation(0), MatchingId(-1), IsAtomic(false),
- IsInvariant(false) {}
+ Instruction *DefInst = nullptr;
+ unsigned Generation = 0;
+ int MatchingId = -1;
+ bool IsAtomic = false;
+ bool IsInvariant = false;
+
+ LoadValue() = default;
LoadValue(Instruction *Inst, unsigned Generation, unsigned MatchingId,
bool IsAtomic, bool IsInvariant)
: DefInst(Inst), Generation(Generation), MatchingId(MatchingId),
IsAtomic(IsAtomic), IsInvariant(IsInvariant) {}
};
- typedef RecyclingAllocator<BumpPtrAllocator,
- ScopedHashTableVal<Value *, LoadValue>>
- LoadMapAllocator;
- typedef ScopedHashTable<Value *, LoadValue, DenseMapInfo<Value *>,
- LoadMapAllocator> LoadHTType;
+
+ using LoadMapAllocator =
+ RecyclingAllocator<BumpPtrAllocator,
+ ScopedHashTableVal<Value *, LoadValue>>;
+ using LoadHTType =
+ ScopedHashTable<Value *, LoadValue, DenseMapInfo<Value *>,
+ LoadMapAllocator>;
+
LoadHTType AvailableLoads;
/// \brief A scoped hash table of the current values of read-only call
/// values.
///
/// It uses the same generation count as loads.
- typedef ScopedHashTable<CallValue, std::pair<Instruction *, unsigned>>
- CallHTType;
+ using CallHTType =
+ ScopedHashTable<CallValue, std::pair<Instruction *, unsigned>>;
CallHTType AvailableCalls;
/// \brief This is the current generation of the memory value.
- unsigned CurrentGeneration;
+ unsigned CurrentGeneration = 0;
/// \brief Set up the EarlyCSE runner for a particular function.
EarlyCSE(const DataLayout &DL, const TargetLibraryInfo &TLI,
const TargetTransformInfo &TTI, DominatorTree &DT,
AssumptionCache &AC, MemorySSA *MSSA)
: TLI(TLI), TTI(TTI), DT(DT), AC(AC), SQ(DL, &TLI, &DT, &AC), MSSA(MSSA),
- MSSAUpdater(make_unique<MemorySSAUpdater>(MSSA)), CurrentGeneration(0) {
- }
+ MSSAUpdater(llvm::make_unique<MemorySSAUpdater>(MSSA)) {}
bool run();
CallHTType &AvailableCalls)
: Scope(AvailableValues), LoadScope(AvailableLoads),
CallScope(AvailableCalls) {}
-
- private:
NodeScope(const NodeScope &) = delete;
- void operator=(const NodeScope &) = delete;
+ NodeScope &operator=(const NodeScope &) = delete;
+ private:
ScopedHTType::ScopeTy Scope;
LoadHTType::ScopeTy LoadScope;
CallHTType::ScopeTy CallScope;
CallHTType &AvailableCalls, unsigned cg, DomTreeNode *n,
DomTreeNode::iterator child, DomTreeNode::iterator end)
: CurrentGeneration(cg), ChildGeneration(cg), Node(n), ChildIter(child),
- EndIter(end), Scopes(AvailableValues, AvailableLoads, AvailableCalls),
- Processed(false) {}
+ EndIter(end), Scopes(AvailableValues, AvailableLoads, AvailableCalls)
+ {}
+ StackNode(const StackNode &) = delete;
+ StackNode &operator=(const StackNode &) = delete;
// Accessors.
unsigned currentGeneration() { return CurrentGeneration; }
void childGeneration(unsigned generation) { ChildGeneration = generation; }
DomTreeNode *node() { return Node; }
DomTreeNode::iterator childIter() { return ChildIter; }
+
DomTreeNode *nextChild() {
DomTreeNode *child = *ChildIter;
++ChildIter;
return child;
}
+
DomTreeNode::iterator end() { return EndIter; }
bool isProcessed() { return Processed; }
void process() { Processed = true; }
private:
- StackNode(const StackNode &) = delete;
- void operator=(const StackNode &) = delete;
-
- // Members.
unsigned CurrentGeneration;
unsigned ChildGeneration;
DomTreeNode *Node;
DomTreeNode::iterator ChildIter;
DomTreeNode::iterator EndIter;
NodeScope Scopes;
- bool Processed;
+ bool Processed = false;
};
/// \brief Wrapper class to handle memory instructions, including loads,
class ParseMemoryInst {
public:
ParseMemoryInst(Instruction *Inst, const TargetTransformInfo &TTI)
- : IsTargetMemInst(false), Inst(Inst) {
+ : Inst(Inst) {
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst))
if (TTI.getTgtMemIntrinsic(II, Info))
IsTargetMemInst = true;
}
+
bool isLoad() const {
if (IsTargetMemInst) return Info.ReadMem;
return isa<LoadInst>(Inst);
}
+
bool isStore() const {
if (IsTargetMemInst) return Info.WriteMem;
return isa<StoreInst>(Inst);
}
+
bool isAtomic() const {
if (IsTargetMemInst)
return Info.Ordering != AtomicOrdering::NotAtomic;
return Inst->isAtomic();
}
+
bool isUnordered() const {
if (IsTargetMemInst)
return Info.isUnordered();
return (getPointerOperand() == Inst.getPointerOperand() &&
getMatchingId() == Inst.getMatchingId());
}
+
bool isValid() const { return getPointerOperand() != nullptr; }
// For regular (non-intrinsic) loads/stores, this is set to -1. For
if (IsTargetMemInst) return Info.MatchingId;
return -1;
}
+
Value *getPointerOperand() const {
if (IsTargetMemInst) return Info.PtrVal;
if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
}
return nullptr;
}
+
bool mayReadFromMemory() const {
if (IsTargetMemInst) return Info.ReadMem;
return Inst->mayReadFromMemory();
}
+
bool mayWriteToMemory() const {
if (IsTargetMemInst) return Info.WriteMem;
return Inst->mayWriteToMemory();
}
private:
- bool IsTargetMemInst;
+ bool IsTargetMemInst = false;
MemIntrinsicInfo Info;
Instruction *Inst;
};
for (MemoryPhi *MP : PhisToCheck) {
MemoryAccess *FirstIn = MP->getIncomingValue(0);
- if (all_of(MP->incoming_values(),
- [=](Use &In) { return In == FirstIn; }))
+ if (llvm::all_of(MP->incoming_values(),
+ [=](Use &In) { return In == FirstIn; }))
WorkQueue.push_back(MP);
}
PhisToCheck.clear();
}
}
};
-}
+
+} // end anonymous namespace
/// Determine if the memory referenced by LaterInst is from the same heap
/// version as EarlierInst.
}
namespace {
+
/// \brief A simple and fast domtree-based CSE pass.
///
/// This pass does a simple depth-first walk over the dominator tree,
AU.setPreservesCFG();
}
};
-}
+
+} // end anonymous namespace
using EarlyCSELegacyPass = EarlyCSELegacyCommonPass</*UseMemorySSA=*/false>;
//===----------------------------------------------------------------------===//
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/GlobalsModRef.h"
#include "llvm/Analysis/IteratedDominanceFrontier.h"
+#include "llvm/Analysis/MemoryDependenceAnalysis.h"
#include "llvm/Analysis/MemorySSA.h"
#include "llvm/Analysis/MemorySSAUpdater.h"
#include "llvm/Analysis/PostDominators.h"
#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/IR/Argument.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/CFG.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/IR/Use.h"
+#include "llvm/IR/User.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Scalar/GVN.h"
#include "llvm/Transforms/Utils/Local.h"
-
-#include <stack>
+#include <algorithm>
+#include <cassert>
+#include <iterator>
+#include <memory>
+#include <utility>
+#include <vector>
using namespace llvm;
MaxHoistedThreshold("gvn-max-hoisted", cl::Hidden, cl::init(-1),
cl::desc("Max number of instructions to hoist "
"(default unlimited = -1)"));
+
static cl::opt<int> MaxNumberOfBBSInPath(
"gvn-hoist-max-bbs", cl::Hidden, cl::init(4),
cl::desc("Max number of basic blocks on the path between "
namespace llvm {
-typedef DenseMap<const BasicBlock *, bool> BBSideEffectsSet;
-typedef SmallVector<Instruction *, 4> SmallVecInsn;
-typedef SmallVectorImpl<Instruction *> SmallVecImplInsn;
+using BBSideEffectsSet = DenseMap<const BasicBlock *, bool>;
+using SmallVecInsn = SmallVector<Instruction *, 4>;
+using SmallVecImplInsn = SmallVectorImpl<Instruction *>;
+
// Each element of a hoisting list contains the basic block where to hoist and
// a list of instructions to be hoisted.
-typedef std::pair<BasicBlock *, SmallVecInsn> HoistingPointInfo;
-typedef SmallVector<HoistingPointInfo, 4> HoistingPointList;
+using HoistingPointInfo = std::pair<BasicBlock *, SmallVecInsn>;
+
+using HoistingPointList = SmallVector<HoistingPointInfo, 4>;
+
// A map from a pair of VNs to all the instructions with those VNs.
-typedef std::pair<unsigned, unsigned> VNType;
-typedef DenseMap<VNType, SmallVector<Instruction *, 4>> VNtoInsns;
+using VNType = std::pair<unsigned, unsigned>;
+
+using VNtoInsns = DenseMap<VNType, SmallVector<Instruction *, 4>>;
// CHI keeps information about values flowing out of a basic block. It is
// similar to PHI but in the inverse graph, and used for outgoing values on each
// instruction as well as the edge where the value is flowing to.
struct CHIArg {
VNType VN;
+
// Edge destination (shows the direction of flow), may not be where the I is.
BasicBlock *Dest;
+
// The instruction (VN) which uses the values flowing out of CHI.
Instruction *I;
+
bool operator==(const CHIArg &A) { return VN == A.VN; }
bool operator!=(const CHIArg &A) { return !(*this == A); }
};
-typedef SmallVectorImpl<CHIArg>::iterator CHIIt;
-typedef iterator_range<CHIIt> CHIArgs;
-typedef DenseMap<BasicBlock *, SmallVector<CHIArg, 2>> OutValuesType;
-typedef DenseMap<BasicBlock *, SmallVector<std::pair<VNType, Instruction *>, 2>>
- InValuesType;
+using CHIIt = SmallVectorImpl<CHIArg>::iterator;
+using CHIArgs = iterator_range<CHIIt>;
+using OutValuesType = DenseMap<BasicBlock *, SmallVector<CHIArg, 2>>;
+using InValuesType =
+ DenseMap<BasicBlock *, SmallVector<std::pair<VNType, Instruction *>, 2>>;
// An invalid value number Used when inserting a single value number into
// VNtoInsns.
}
const VNtoInsns &getScalarVNTable() const { return VNtoCallsScalars; }
-
const VNtoInsns &getLoadVNTable() const { return VNtoCallsLoads; }
-
const VNtoInsns &getStoreVNTable() const { return VNtoCallsStores; }
};
GVNHoist(DominatorTree *DT, PostDominatorTree *PDT, AliasAnalysis *AA,
MemoryDependenceResults *MD, MemorySSA *MSSA)
: DT(DT), PDT(PDT), AA(AA), MD(MD), MSSA(MSSA),
- MSSAUpdater(make_unique<MemorySSAUpdater>(MSSA)),
- HoistingGeps(false) {}
+ MSSAUpdater(llvm::make_unique<MemorySSAUpdater>(MSSA)) {}
bool run(Function &F) {
NumFuncArgs = F.arg_size();
int ChainLength = 0;
// FIXME: use lazy evaluation of VN to avoid the fix-point computation.
- while (1) {
+ while (true) {
if (MaxChainLength != -1 && ++ChainLength >= MaxChainLength)
return Res;
DenseMap<const Value *, unsigned> DFSNumber;
BBSideEffectsSet BBSideEffects;
DenseSet<const BasicBlock *> HoistBarrier;
-
SmallVector<BasicBlock *, 32> IDFBlocks;
unsigned NumFuncArgs;
- const bool HoistingGeps;
+ const bool HoistingGeps = false;
enum InsKind { Unknown, Scalar, Load, Store };
return false;
}
- /* Return true when I1 appears before I2 in the instructions of BB. */
+ // Return true when I1 appears before I2 in the instructions of BB.
bool firstInBB(const Instruction *I1, const Instruction *I2) {
assert(I1->getParent() == I2->getParent());
unsigned I1DFS = DFSNumber.lookup(I1);
// to NewPt.
bool safeToHoistLdSt(const Instruction *NewPt, const Instruction *OldPt,
MemoryUseOrDef *U, InsKind K, int &NBBsOnAllPaths) {
-
// In place hoisting is safe.
if (NewPt == OldPt)
return true;
for (auto CHI : C) {
BasicBlock *Dest = CHI.Dest;
// Find if all the edges have values flowing out of BB.
- bool Found = any_of(TI->successors(), [Dest](const BasicBlock *BB) {
+ bool Found = llvm::any_of(TI->successors(), [Dest](const BasicBlock *BB) {
return BB == Dest; });
if (!Found)
return false;
}
}
- typedef DenseMap<VNType, SmallVector<Instruction *, 2>> RenameStackType;
+ using RenameStackType = DenseMap<VNType, SmallVector<Instruction *, 2>>;
+
// Push all the VNs corresponding to BB into RenameStack.
void fillRenameStack(BasicBlock *BB, InValuesType &ValueBBs,
RenameStackType &RenameStack) {
Instruction *ClonedGep = Gep->clone();
for (unsigned i = 0, e = Gep->getNumOperands(); i != e; ++i)
if (Instruction *Op = dyn_cast<Instruction>(Gep->getOperand(i))) {
-
// Check whether the operand is already available.
if (DT->dominates(Op->getParent(), HoistPt))
continue;
for (MemoryPhi *Phi : UsePhis) {
auto In = Phi->incoming_values();
- if (all_of(In, [&](Use &U) { return U == NewMemAcc; })) {
+ if (llvm::all_of(In, [&](Use &U) { return U == NewMemAcc; })) {
Phi->replaceAllUsesWith(NewMemAcc);
MSSAUpdater->removeMemoryAccess(Phi);
}
// The order in which hoistings are done may influence the availability
// of operands.
if (!allOperandsAvailable(Repl, DestBB)) {
-
// When HoistingGeps there is nothing more we can do to make the
// operands available: just continue.
if (HoistingGeps)
NR += removeAndReplace(InstructionsToHoist, Repl, DestBB, MoveAccess);
-
if (isa<LoadInst>(Repl))
++NL;
else if (isa<StoreInst>(Repl))
AU.addPreserved<GlobalsAAWrapperPass>();
}
};
-} // namespace llvm
+
+} // end namespace llvm
PreservedAnalyses GVNHoistPass::run(Function &F, FunctionAnalysisManager &AM) {
DominatorTree &DT = AM.getResult<DominatorTreeAnalysis>(F);
}
char GVNHoistLegacyPass::ID = 0;
+
INITIALIZE_PASS_BEGIN(GVNHoistLegacyPass, "gvn-hoist",
"Early GVN Hoisting of Expressions", false, false)
INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass)
-//===- GVNSink.cpp - sink expressions into successors -------------------===//
+//===- GVNSink.cpp - sink expressions into successors ---------------------===//
//
// The LLVM Compiler Infrastructure
//
/// replace %a1 with %c1, will it contribute in an equivalent way to all
/// successive instructions?". The PostValueTable class in GVN provides this
/// mapping.
-///
+//
//===----------------------------------------------------------------------===//
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/Hashing.h"
+#include "llvm/ADT/None.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/PostOrderIterator.h"
-#include "llvm/ADT/SCCIterator.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Analysis/GlobalsModRef.h"
-#include "llvm/Analysis/MemorySSA.h"
-#include "llvm/Analysis/PostDominators.h"
-#include "llvm/Analysis/TargetTransformInfo.h"
-#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/CFG.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
-#include "llvm/IR/Verifier.h"
-#include "llvm/Support/MathExtras.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Use.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/ArrayRecycler.h"
+#include "llvm/Support/AtomicOrdering.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Scalar/GVN.h"
#include "llvm/Transforms/Scalar/GVNExpression.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
-#include <unordered_set>
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <iterator>
+#include <utility>
+
using namespace llvm;
#define DEBUG_TYPE "gvn-sink"
dbgs() << "\n";
}
-}
-}
+} // end namespace GVNExpression
+} // end namespace llvm
namespace {
NumExtraPHIs) // PHIs are expensive, so make sure they're worth it.
- SplitEdgeCost;
}
+
bool operator>(const SinkingInstructionCandidate &Other) const {
return Cost > Other.Cost;
}
};
#ifndef NDEBUG
-llvm::raw_ostream &operator<<(llvm::raw_ostream &OS,
- const SinkingInstructionCandidate &C) {
+raw_ostream &operator<<(raw_ostream &OS, const SinkingInstructionCandidate &C) {
OS << "<Candidate Cost=" << C.Cost << " #Blocks=" << C.NumBlocks
<< " #Insts=" << C.NumInstructions << " #PHIs=" << C.NumPHIs << ">";
return OS;
SmallVector<BasicBlock *, 4> Blocks;
public:
- ModelledPHI() {}
+ ModelledPHI() = default;
+
ModelledPHI(const PHINode *PN) {
// BasicBlock comes first so we sort by basic block pointer order, then by value pointer order.
SmallVector<std::pair<BasicBlock *, Value *>, 4> Ops;
Values.push_back(P.second);
}
}
+
/// Create a dummy ModelledPHI that will compare unequal to any other ModelledPHI
/// without the same ID.
/// \note This is specifically for DenseMapInfo - do not use this!
ArrayRef<Value *> getValues() const { return Values; }
bool areAllIncomingValuesSame() const {
- return all_of(Values, [&](Value *V) { return V == Values[0]; });
+ return llvm::all_of(Values, [&](Value *V) { return V == Values[0]; });
}
+
bool areAllIncomingValuesSameType() const {
- return all_of(
+ return llvm::all_of(
Values, [&](Value *V) { return V->getType() == Values[0]->getType(); });
}
+
bool areAnyIncomingValuesConstant() const {
- return any_of(Values, [&](Value *V) { return isa<Constant>(V); });
+ return llvm::any_of(Values, [&](Value *V) { return isa<Constant>(V); });
}
+
// Hash functor
unsigned hash() const {
return (unsigned)hash_combine_range(Values.begin(), Values.end());
}
+
bool operator==(const ModelledPHI &Other) const {
return Values == Other.Values && Blocks == Other.Blocks;
}
static ModelledPHI Dummy = ModelledPHI::createDummy(0);
return Dummy;
}
+
static inline ModelledPHI &getTombstoneKey() {
static ModelledPHI Dummy = ModelledPHI::createDummy(1);
return Dummy;
}
+
static unsigned getHashValue(const ModelledPHI &V) { return V.hash(); }
+
static bool isEqual(const ModelledPHI &LHS, const ModelledPHI &RHS) {
return LHS == RHS;
}
};
-typedef DenseSet<ModelledPHI, DenseMapInfo<ModelledPHI>> ModelledPHISet;
+using ModelledPHISet = DenseSet<ModelledPHI, DenseMapInfo<ModelledPHI>>;
//===----------------------------------------------------------------------===//
// ValueTable
op_push_back(U.getUser());
std::sort(op_begin(), op_end());
}
+
void setMemoryUseOrder(unsigned MUO) { MemoryUseOrder = MUO; }
void setVolatile(bool V) { Volatile = V; }
- virtual hash_code getHashValue() const {
+ hash_code getHashValue() const override {
return hash_combine(GVNExpression::BasicExpression::getHashValue(),
MemoryUseOrder, Volatile);
}
DenseMap<size_t, uint32_t> HashNumbering;
BumpPtrAllocator Allocator;
ArrayRecycler<Value *> Recycler;
- uint32_t nextValueNumber;
+ uint32_t nextValueNumber = 1;
/// Create an expression for I based on its opcode and its uses. If I
/// touches or reads memory, the expression is also based upon its memory
}
public:
+ ValueTable() = default;
+
/// Returns the value number for the specified value, assigning
/// it a new number if it did not have one before.
uint32_t lookupOrAdd(Value *V) {
nextValueNumber = 1;
}
- ValueTable() : nextValueNumber(1) {}
-
/// \c Inst uses or touches memory. Return an ID describing the memory state
/// at \c Inst such that if getMemoryUseOrder(I1) == getMemoryUseOrder(I2),
/// the exact same memory operations happen after I1 and I2.
class GVNSink {
public:
- GVNSink() : VN() {}
+ GVNSink() = default;
+
bool run(Function &F) {
DEBUG(dbgs() << "GVNSink: running on function @" << F.getName() << "\n");
void foldPointlessPHINodes(BasicBlock *BB) {
auto I = BB->begin();
while (PHINode *PN = dyn_cast<PHINode>(I++)) {
- if (!all_of(PN->incoming_values(),
- [&](const Value *V) { return V == PN->getIncomingValue(0); }))
+ if (!llvm::all_of(PN->incoming_values(), [&](const Value *V) {
+ return V == PN->getIncomingValue(0);
+ }))
continue;
if (PN->getIncomingValue(0) != PN)
PN->replaceAllUsesWith(PN->getIncomingValue(0));
SmallVector<Value *, 4> NewOperands;
for (unsigned O = 0, E = I0->getNumOperands(); O != E; ++O) {
- bool NeedPHI = any_of(Insts, [&I0, O](const Instruction *I) {
+ bool NeedPHI = llvm::any_of(Insts, [&I0, O](const Instruction *I) {
return I->getOperand(O) != I0->getOperand(O);
});
if (!NeedPHI) {
AU.addPreserved<GlobalsAAWrapperPass>();
}
};
-} // namespace
+
+} // end anonymous namespace
PreservedAnalyses GVNSinkPass::run(Function &F, FunctionAnalysisManager &AM) {
GVNSink G;
}
char GVNSinkLegacyPass::ID = 0;
+
INITIALIZE_PASS_BEGIN(GVNSinkLegacyPass, "gvn-sink",
"Early GVN sinking of Expressions", false, false)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)