/// Iterate the given function (typically something like doubling the width)
/// on Ty until we find a legal type for this operation.
LLT findLegalType(const InstrAspect &Aspect,
- std::function<LLT(LLT)> NextType) const {
+ function_ref<LLT(LLT)> NextType) const {
LegalizeAction Action;
const TypeMap &Map = Actions[Aspect.Opcode - FirstOp][Aspect.Idx];
LLT Ty = Aspect.Type;
struct FlowStringValue : StringValue {
FlowStringValue() {}
- FlowStringValue(std::string Value) : StringValue(Value) {}
+ FlowStringValue(std::string Value) : StringValue(std::move(Value)) {}
};
template <> struct ScalarTraits<FlowStringValue> {
private:
typedef PBQP::Graph<RegAllocSolverImpl> BaseT;
public:
- PBQPRAGraph(GraphMetadata Metadata) : BaseT(Metadata) {}
+ PBQPRAGraph(GraphMetadata Metadata) : BaseT(std::move(Metadata)) {}
/// @brief Dump this graph to dbgs().
void dump() const;
std::function<void(SDNode *, SDNode *)> Callback;
DAGNodeDeletedListener(SelectionDAG &DAG,
std::function<void(SDNode *, SDNode *)> Callback)
- : DAGUpdateListener(DAG), Callback(Callback) {}
+ : DAGUpdateListener(DAG), Callback(std::move(Callback)) {}
void NodeDeleted(SDNode *N, SDNode *E) override { Callback(N, E); }
};
return Data == Other.Data;
}
- bool operator!=(BinaryAnnotationIterator Other) const {
+ bool operator!=(const BinaryAnnotationIterator &Other) const {
return !(*this == Other);
}
TypeIndex getNextTypeIndex() const;
/// Records the name of a type, and reserves its type index.
- void recordType(StringRef Name, CVType Data);
+ void recordType(StringRef Name, const CVType &Data);
/// Saves the name in a StringSet and creates a stable StringRef.
StringRef saveTypeName(StringRef TypeName);
public:
IRBuilderCallbackInserter(std::function<void(Instruction *)> Callback)
- : Callback(Callback) {}
+ : Callback(std::move(Callback)) {}
protected:
void InsertHelper(Instruction *I, const Twine &Name,
/// Create a local file system cache which uses the given cache directory and
/// file callback.
-NativeObjectCache localCache(std::string CacheDirectoryPath, AddFileFn AddFile);
+NativeObjectCache localCache(StringRef CacheDirectoryPath, AddFileFn AddFile);
} // namespace lto
} // namespace llvm
bool parseEOL(const Twine &ErrMsg);
- bool parseMany(std::function<bool()> parseOne, bool hasComma = true);
+ bool parseMany(function_ref<bool()> parseOne, bool hasComma = true);
bool parseIntToken(int64_t &V, const Twine &ErrMsg);
return getParser().parseToken(T, Msg);
}
- bool parseMany(std::function<bool()> parseOne, bool hasComma = true) {
+ bool parseMany(function_ref<bool()> parseOne, bool hasComma = true) {
return getParser().parseMany(parseOne, hasComma);
}
protected: // Can only create subclasses.
LLVMTargetMachine(const Target &T, StringRef DataLayoutString,
const Triple &TargetTriple, StringRef CPU, StringRef FS,
- TargetOptions Options, Reloc::Model RM, CodeModel::Model CM,
- CodeGenOpt::Level OL);
+ const TargetOptions &Options, Reloc::Model RM,
+ CodeModel::Model CM, CodeGenOpt::Level OL);
void initAsmInfo();
public:
public:
FunctionInfo(const Function &, const SmallVectorImpl<Value *> &,
- const ReachabilitySet &, AliasAttrMap);
+ const ReachabilitySet &, const AliasAttrMap &);
bool mayAlias(const Value *, uint64_t, const Value *, uint64_t) const;
const AliasSummary &getAliasSummary() const { return Summary; }
CFLAndersAAResult::FunctionInfo::FunctionInfo(
const Function &Fn, const SmallVectorImpl<Value *> &RetVals,
- const ReachabilitySet &ReachSet, AliasAttrMap AMap) {
+ const ReachabilitySet &ReachSet, const AliasAttrMap &AMap) {
populateAttrMap(AttrMap, AMap);
populateExternalAttributes(Summary.RetParamAttributes, Fn, RetVals, AMap);
populateAliasMap(AliasMap, ReachSet);
PlaceholderQueue &Placeholders, StringRef Blob,
unsigned &NextMetadataNo);
Error parseMetadataStrings(ArrayRef<uint64_t> Record, StringRef Blob,
- std::function<void(StringRef)> CallBack);
+ function_ref<void(StringRef)> CallBack);
Error parseGlobalObjectAttachment(GlobalObject &GO,
ArrayRef<uint64_t> Record);
Error parseMetadataKindRecord(SmallVectorImpl<uint64_t> &Record);
bool IsImporting)
: MetadataList(TheModule.getContext()), ValueList(ValueList),
Stream(Stream), Context(TheModule.getContext()), TheModule(TheModule),
- getTypeByID(getTypeByID), IsImporting(IsImporting) {}
+ getTypeByID(std::move(getTypeByID)), IsImporting(IsImporting) {}
Error parseMetadata(bool ModuleLevel);
Error MetadataLoader::MetadataLoaderImpl::parseMetadataStrings(
ArrayRef<uint64_t> Record, StringRef Blob,
- std::function<void(StringRef)> CallBack) {
+ function_ref<void(StringRef)> CallBack) {
// All the MDStrings in the block are emitted together in a single
// record. The strings are concatenated and stored in a blob along with
// their sizes.
BitcodeReaderValueList &ValueList,
bool IsImporting,
std::function<Type *(unsigned)> getTypeByID)
- : Pimpl(llvm::make_unique<MetadataLoaderImpl>(Stream, TheModule, ValueList,
- getTypeByID, IsImporting)) {}
+ : Pimpl(llvm::make_unique<MetadataLoaderImpl>(
+ Stream, TheModule, ValueList, std::move(getTypeByID), IsImporting)) {}
Error MetadataLoader::parseMetadata(bool ModuleLevel) {
return Pimpl->parseMetadata(ModuleLevel);
void MachineIRBuilder::recordInsertions(
std::function<void(MachineInstr *)> Inserted) {
- InsertedInstr = Inserted;
+ InsertedInstr = std::move(Inserted);
}
void MachineIRBuilder::stopRecordingInsertions() {
LLVMTargetMachine::LLVMTargetMachine(const Target &T,
StringRef DataLayoutString,
const Triple &TT, StringRef CPU,
- StringRef FS, TargetOptions Options,
+ StringRef FS, const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL)
: TargetMachine(T, DataLayoutString, TT, CPU, FS, Options) {
SDValue reduceBuildVecExtToExtBuildVec(SDNode *N);
SDValue reduceBuildVecConvertToConvertBuildVec(SDNode *N);
SDValue reduceBuildVecToShuffle(SDNode *N);
- SDValue createBuildVecShuffle(SDLoc DL, SDNode *N, ArrayRef<int> VectorMask,
- SDValue VecIn1, SDValue VecIn2,
- unsigned LeftIdx);
+ SDValue createBuildVecShuffle(const SDLoc &DL, SDNode *N,
+ ArrayRef<int> VectorMask, SDValue VecIn1,
+ SDValue VecIn2, unsigned LeftIdx);
SDValue GetDemandedBits(SDValue V, const APInt &Mask);
return DAG.getNode(Opcode, DL, VT, BV);
}
-SDValue DAGCombiner::createBuildVecShuffle(SDLoc DL, SDNode *N,
+SDValue DAGCombiner::createBuildVecShuffle(const SDLoc &DL, SDNode *N,
ArrayRef<int> VectorMask,
SDValue VecIn1, SDValue VecIn2,
unsigned LeftIdx) {
SDDbgValue *SelectionDAGBuilder::getDbgValue(SDValue N,
DILocalVariable *Variable,
DIExpression *Expr, int64_t Offset,
- DebugLoc dl,
+ const DebugLoc &dl,
unsigned DbgSDNodeOrder) {
SDDbgValue *SDV;
auto *FISDN = dyn_cast<FrameIndexSDNode>(N.getNode());
/// Return the appropriate SDDbgValue based on N.
SDDbgValue *getDbgValue(SDValue N, DILocalVariable *Variable,
- DIExpression *Expr, int64_t Offset, DebugLoc dl,
- unsigned DbgSDNodeOrder);
+ DIExpression *Expr, int64_t Offset,
+ const DebugLoc &dl, unsigned DbgSDNodeOrder);
};
/// RegsForValue - This struct represents the registers (physical or virtual)
}
/// Records the name of a type, and reserves its type index.
-void TypeDatabase::recordType(StringRef Name, CVType Data) {
+void TypeDatabase::recordType(StringRef Name, const CVType &Data) {
CVUDTNames.push_back(Name);
TypeRecords.push_back(Data);
}
}
}
-NativeObjectCache lto::localCache(std::string CacheDirectoryPath,
+NativeObjectCache lto::localCache(StringRef CacheDirectoryPath,
AddFileFn AddFile) {
return [=](unsigned Task, StringRef Key) -> AddStreamFn {
// First, see if we have a cache hit.
CacheStream(std::unique_ptr<raw_pwrite_stream> OS, AddFileFn AddFile,
std::string TempFilename, std::string EntryPath,
unsigned Task)
- : NativeObjectStream(std::move(OS)), AddFile(AddFile),
- TempFilename(TempFilename), EntryPath(EntryPath), Task(Task) {}
+ : NativeObjectStream(std::move(OS)), AddFile(std::move(AddFile)),
+ TempFilename(std::move(TempFilename)),
+ EntryPath(std::move(EntryPath)), Task(Task) {}
~CacheStream() {
// Make sure the file is closed before committing it.
return true;
}
-bool MCAsmParser::parseMany(std::function<bool()> parseOne, bool hasComma) {
+bool MCAsmParser::parseMany(function_ref<bool()> parseOne, bool hasComma) {
if (parseOptionalToken(AsmToken::EndOfStatement))
return false;
while (1) {
MachineInstrBuilder MIB;
};
-void AArch64CallLowering::splitToValueTypes(const ArgInfo &OrigArg,
- SmallVectorImpl<ArgInfo> &SplitArgs,
- const DataLayout &DL,
- MachineRegisterInfo &MRI,
- SplitArgTy PerformArgSplit) const {
+void AArch64CallLowering::splitToValueTypes(
+ const ArgInfo &OrigArg, SmallVectorImpl<ArgInfo> &SplitArgs,
+ const DataLayout &DL, MachineRegisterInfo &MRI,
+ const SplitArgTy &PerformArgSplit) const {
const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
LLVMContext &Ctx = OrigArg.Ty->getContext();
void splitToValueTypes(const ArgInfo &OrigArgInfo,
SmallVectorImpl<ArgInfo> &SplitArgs,
const DataLayout &DL, MachineRegisterInfo &MRI,
- SplitArgTy SplitArg) const;
+ const SplitArgTy &SplitArg) const;
};
} // End of namespace llvm;
#endif
return static_cast<MCELFStreamer &>(Streamer);
}
-void
-AMDGPUTargetELFStreamer::EmitAMDGPUNote(const MCExpr* DescSZ,
- PT_NOTE::NoteType Type,
- std::function<void(MCELFStreamer &)> EmitDesc) {
+void AMDGPUTargetELFStreamer::EmitAMDGPUNote(
+ const MCExpr *DescSZ, PT_NOTE::NoteType Type,
+ function_ref<void(MCELFStreamer &)> EmitDesc) {
auto &S = getStreamer();
auto &Context = S.getContext();
class AMDGPUTargetELFStreamer : public AMDGPUTargetStreamer {
MCStreamer &Streamer;
- void EmitAMDGPUNote(const MCExpr* DescSize,
- AMDGPU::PT_NOTE::NoteType Type,
- std::function<void(MCELFStreamer &)> EmitDesc);
+ void EmitAMDGPUNote(const MCExpr *DescSize, AMDGPU::PT_NOTE::NoteType Type,
+ function_ref<void(MCELFStreamer &)> EmitDesc);
public:
AMDGPUTargetELFStreamer(MCStreamer &S);
!shouldEmitGOTReloc(GA->getGlobal());
}
-static SDValue buildPCRelGlobalAddress(SelectionDAG &DAG, const GlobalValue *GV,
- SDLoc DL, unsigned Offset, EVT PtrVT,
- unsigned GAFlags = SIInstrInfo::MO_NONE) {
+static SDValue
+buildPCRelGlobalAddress(SelectionDAG &DAG, const GlobalValue *GV,
+ const SDLoc &DL, unsigned Offset, EVT PtrVT,
+ unsigned GAFlags = SIInstrInfo::MO_NONE) {
// In order to support pc-relative addressing, the PC_ADD_REL_OFFSET SDNode is
// lowered to the following code sequence:
//
DAG.getValueType(VT));
}
-static SDValue emitNonHSAIntrinsicError(SelectionDAG& DAG, SDLoc DL, EVT VT) {
+static SDValue emitNonHSAIntrinsicError(SelectionDAG &DAG, const SDLoc &DL,
+ EVT VT) {
DiagnosticInfoUnsupported BadIntrin(*DAG.getMachineFunction().getFunction(),
"non-hsa intrinsic with hsa target",
DL.getDebugLoc());
return DAG.getUNDEF(VT);
}
-static SDValue emitRemovedIntrinsicError(SelectionDAG& DAG, SDLoc DL, EVT VT) {
+static SDValue emitRemovedIntrinsicError(SelectionDAG &DAG, const SDLoc &DL,
+ EVT VT) {
DiagnosticInfoUnsupported BadIntrin(*DAG.getMachineFunction().getFunction(),
"intrinsic not supported on subtarget",
DL.getDebugLoc());
ARMCallLowering::ARMCallLowering(const ARMTargetLowering &TLI)
: CallLowering(&TLI) {}
-static bool isSupportedType(const DataLayout DL, const ARMTargetLowering &TLI,
+static bool isSupportedType(const DataLayout &DL, const ARMTargetLowering &TLI,
Type *T) {
EVT VT = TLI.getValueType(DL, T);
if (!VT.isSimple() || !VT.isInteger() || VT.isVector())
}
static SDValue promoteToConstantPool(const GlobalValue *GV, SelectionDAG &DAG,
- EVT PtrVT, SDLoc dl) {
+ EVT PtrVT, const SDLoc &dl) {
// If we're creating a pool entry for a constant global with unnamed address,
// and the global is small enough, we can emit it inline into the constant pool
// to save ourselves an indirection.
return SDValue();
}
-static Constant *getConstantVector(MVT VT, APInt SplatValue,
+static Constant *getConstantVector(MVT VT, const APInt &SplatValue,
unsigned SplatBitSize, LLVMContext &C) {
unsigned ScalarSize = VT.getScalarSizeInBits();
unsigned NumElm = SplatBitSize / ScalarSize;
return Imm;
}
-static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask, SDLoc DL,
+static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask, const SDLoc &DL,
SelectionDAG &DAG) {
return DAG.getConstant(getV4X86ShuffleImm(Mask), DL, MVT::i8);
}
//
// The function looks for a sub-mask that the nonzero elements are in
// increasing order. If such sub-mask exist. The function returns true.
-static bool isNonZeroElementsInOrder(const SmallBitVector Zeroable,
- ArrayRef<int> Mask,const EVT &VectorType,
+static bool isNonZeroElementsInOrder(const SmallBitVector &Zeroable,
+ ArrayRef<int> Mask, const EVT &VectorType,
bool &IsZeroSideLeft) {
int NextElement = -1;
// Check if the Mask's nonzero elements are in increasing order.
}
/// \brief Handle lowering of 16-lane 32-bit floating point shuffles.
-static SDValue lowerV16F32VectorShuffle(SDLoc DL, ArrayRef<int> Mask,
+static SDValue lowerV16F32VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
const SmallBitVector &Zeroable,
SDValue V1, SDValue V2,
const X86Subtarget &Subtarget,
}
explicit SimpleInliner(InlineParams Params)
- : LegacyInlinerBase(ID), Params(Params) {
+ : LegacyInlinerBase(ID), Params(std::move(Params)) {
initializeSimpleInlinerPass(*PassRegistry::getPassRegistry());
}
namespace {
struct PartialInlinerImpl {
- PartialInlinerImpl(InlineFunctionInfo IFI) : IFI(IFI) {}
+ PartialInlinerImpl(InlineFunctionInfo IFI) : IFI(std::move(IFI)) {}
bool run(Module &M);
Function *unswitchFunction(Function *F);
}
void filterModule(
- Module *M, std::function<bool(const GlobalValue *)> ShouldKeepDefinition) {
+ Module *M, function_ref<bool(const GlobalValue *)> ShouldKeepDefinition) {
for (Function &F : *M) {
if (ShouldKeepDefinition(&F))
continue;
// bit widths (APInt Operator- does not like that). If the value cannot be
// represented in uint64 we return an "empty" APInt. This is then interpreted
// as the value is not in range.
-static llvm::Optional<APInt> calculateOffsetDiff(APInt V1, APInt V2)
-{
+static llvm::Optional<APInt> calculateOffsetDiff(const APInt &V1,
+ const APInt &V2) {
llvm::Optional<APInt> Res = None;
unsigned BW = V1.getBitWidth() > V2.getBitWidth() ?
V1.getBitWidth() : V2.getBitWidth();
// Values of type BDVState form a lattice, and this function implements the meet
// operation.
-static BDVState meetBDVState(BDVState LHS, BDVState RHS) {
+static BDVState meetBDVState(const BDVState &LHS, const BDVState &RHS) {
BDVState Result = meetBDVStateImpl(LHS, RHS);
assert(Result == meetBDVStateImpl(RHS, LHS) &&
"Math is wrong: meet does not commute!");