}
void CodeGenModule::EmitOMPRequiresDecl(const OMPRequiresDecl *D) {
- getOpenMPRuntime().checkArchForUnifiedAddressing(*this, D);
+ getOpenMPRuntime().checkArchForUnifiedAddressing(D);
}
}
llvm::Function *CGOpenMPRuntime::emitReductionFunction(
- CodeGenModule &CGM, SourceLocation Loc, llvm::Type *ArgsType,
- ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> LHSExprs,
- ArrayRef<const Expr *> RHSExprs, ArrayRef<const Expr *> ReductionOps) {
+ SourceLocation Loc, llvm::Type *ArgsType, ArrayRef<const Expr *> Privates,
+ ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs,
+ ArrayRef<const Expr *> ReductionOps) {
ASTContext &C = CGM.getContext();
// void reduction_func(void *LHSArg, void *RHSArg);
// 2. Emit reduce_func().
llvm::Function *ReductionFn = emitReductionFunction(
- CGM, Loc, CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(),
- Privates, LHSExprs, RHSExprs, ReductionOps);
+ Loc, CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(), Privates,
+ LHSExprs, RHSExprs, ReductionOps);
// 3. Create static kmp_critical_name lock = { 0 };
std::string Name = getName({"reduction"});
/// \param RHSExprs List of RHS in \a ReductionOps reduction operations.
/// \param ReductionOps List of reduction operations in form 'LHS binop RHS'
/// or 'operator binop(LHS, RHS)'.
- llvm::Function *emitReductionFunction(CodeGenModule &CGM, SourceLocation Loc,
+ llvm::Function *emitReductionFunction(SourceLocation Loc,
llvm::Type *ArgsType,
ArrayRef<const Expr *> Privates,
ArrayRef<const Expr *> LHSExprs,
/// Perform check on requires decl to ensure that target architecture
/// supports unified addressing
- virtual void checkArchForUnifiedAddressing(CodeGenModule &CGM,
- const OMPRequiresDecl *D) const {}
+ virtual void checkArchForUnifiedAddressing(const OMPRequiresDecl *D) const {}
};
/// Class supports emissionof SIMD-only code.
llvm::Value *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
ReductionList.getPointer(), CGF.VoidPtrTy);
llvm::Function *ReductionFn = emitReductionFunction(
- CGM, Loc, CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(),
- Privates, LHSExprs, RHSExprs, ReductionOps);
+ Loc, CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(), Privates,
+ LHSExprs, RHSExprs, ReductionOps);
llvm::Value *ReductionArrayTySize = CGF.getTypeSize(ReductionArrayTy);
llvm::Function *ShuffleAndReduceFn = emitShuffleAndReduceFunction(
CGM, Privates, ReductionArrayTy, ReductionFn, Loc);
/// Check to see if target architecture supports unified addressing which is
/// a restriction for OpenMP requires clause "unified_shared_memory".
void CGOpenMPRuntimeNVPTX::checkArchForUnifiedAddressing(
- CodeGenModule &CGM, const OMPRequiresDecl *D) const {
+ const OMPRequiresDecl *D) const {
for (const OMPClause *Clause : D->clauselists()) {
if (Clause->getClauseKind() == OMPC_unified_shared_memory) {
switch (getCudaArch(CGM)) {
/// Perform check on requires decl to ensure that target architecture
/// supports unified addressing
- void checkArchForUnifiedAddressing(CodeGenModule &CGM,
- const OMPRequiresDecl *D) const override;
+ void checkArchForUnifiedAddressing(const OMPRequiresDecl *D) const override;
/// Returns default address space for the constant firstprivates, __constant__
/// address space by default.