template <typename Derived>
bool RecursiveASTVisitor<Derived>::VisitOMPCopyinClause(OMPCopyinClause *C) {
TRY_TO(VisitOMPClauseList(C));
+ for (auto *E : C->source_exprs()) {
+ TRY_TO(TraverseStmt(E));
+ }
+ for (auto *E : C->destination_exprs()) {
+ TRY_TO(TraverseStmt(E));
+ }
+ for (auto *E : C->assignment_ops()) {
+ TRY_TO(TraverseStmt(E));
+ }
return true;
}
/// with the variables 'a' and 'b'.
///
class OMPCopyinClause : public OMPVarListClause<OMPCopyinClause> {
+ // Class has 3 additional tail allocated arrays:
+ // 1. List of helper expressions for proper generation of assignment operation
+ // required for copyin clause. This list represents sources.
+ // 2. List of helper expressions for proper generation of assignment operation
+ // required for copyin clause. This list represents destinations.
+ // 3. List of helper expressions that represents assignment operation:
+ // \code
+ // DstExprs = SrcExprs;
+ // \endcode
+ // Required for proper codegen of propagation of master's thread values of
+ // threadprivate variables to local instances of that variables in other
+ // implicit threads.
+
+ friend class OMPClauseReader;
/// \brief Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
SourceLocation(), SourceLocation(),
N) {}
+ /// \brief Set list of helper expressions, required for proper codegen of the
+ /// clause. These expressions represent source expression in the final
+ /// assignment statement performed by the copyin clause.
+ void setSourceExprs(ArrayRef<Expr *> SrcExprs);
+
+ /// \brief Get the list of helper source expressions.
+ MutableArrayRef<Expr *> getSourceExprs() {
+ return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
+ }
+ ArrayRef<const Expr *> getSourceExprs() const {
+ return llvm::makeArrayRef(varlist_end(), varlist_size());
+ }
+
+ /// \brief Set list of helper expressions, required for proper codegen of the
+ /// clause. These expressions represent destination expression in the final
+ /// assignment statement performed by the copyin clause.
+ void setDestinationExprs(ArrayRef<Expr *> DstExprs);
+
+ /// \brief Get the list of helper destination expressions.
+ MutableArrayRef<Expr *> getDestinationExprs() {
+ return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size());
+ }
+ ArrayRef<const Expr *> getDestinationExprs() const {
+ return llvm::makeArrayRef(getSourceExprs().end(), varlist_size());
+ }
+
+ /// \brief Set list of helper assignment expressions, required for proper
+ /// codegen of the clause. These expressions are assignment expressions that
+ /// assign source helper expressions to destination helper expressions
+ /// correspondingly.
+ void setAssignmentOps(ArrayRef<Expr *> AssignmentOps);
+
+ /// \brief Get the list of helper assignment expressions.
+ MutableArrayRef<Expr *> getAssignmentOps() {
+ return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size());
+ }
+ ArrayRef<const Expr *> getAssignmentOps() const {
+ return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size());
+ }
+
public:
/// \brief Creates clause with a list of variables \a VL.
///
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
+ /// \param SrcExprs List of helper expressions for proper generation of
+ /// assignment operation required for copyin clause. This list represents
+ /// sources.
+ /// \param DstExprs List of helper expressions for proper generation of
+ /// assignment operation required for copyin clause. This list represents
+ /// destinations.
+ /// \param AssignmentOps List of helper expressions that represents assignment
+ /// operation:
+ /// \code
+ /// DstExprs = SrcExprs;
+ /// \endcode
+ /// Required for proper codegen of propagation of master's thread values of
+ /// threadprivate variables to local instances of that variables in other
+ /// implicit threads.
///
- static OMPCopyinClause *Create(const ASTContext &C, SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation EndLoc, ArrayRef<Expr *> VL);
+ static OMPCopyinClause *
+ Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
+ SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs,
+ ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps);
/// \brief Creates an empty clause with \a N variables.
///
/// \param C AST context.
///
static OMPCopyinClause *CreateEmpty(const ASTContext &C, unsigned N);
+ typedef MutableArrayRef<Expr *>::iterator helper_expr_iterator;
+ typedef ArrayRef<const Expr *>::iterator helper_expr_const_iterator;
+ typedef llvm::iterator_range<helper_expr_iterator> helper_expr_range;
+ typedef llvm::iterator_range<helper_expr_const_iterator>
+ helper_expr_const_range;
+
+ helper_expr_const_range source_exprs() const {
+ return helper_expr_const_range(getSourceExprs().begin(),
+ getSourceExprs().end());
+ }
+ helper_expr_range source_exprs() {
+ return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end());
+ }
+ helper_expr_const_range destination_exprs() const {
+ return helper_expr_const_range(getDestinationExprs().begin(),
+ getDestinationExprs().end());
+ }
+ helper_expr_range destination_exprs() {
+ return helper_expr_range(getDestinationExprs().begin(),
+ getDestinationExprs().end());
+ }
+ helper_expr_const_range assignment_ops() const {
+ return helper_expr_const_range(getAssignmentOps().begin(),
+ getAssignmentOps().end());
+ }
+ helper_expr_range assignment_ops() {
+ return helper_expr_range(getAssignmentOps().begin(),
+ getAssignmentOps().end());
+ }
+
StmtRange children() {
return StmtRange(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
template <typename Derived>
bool RecursiveASTVisitor<Derived>::VisitOMPCopyinClause(OMPCopyinClause *C) {
TRY_TO(VisitOMPClauseList(C));
+ for (auto *E : C->source_exprs()) {
+ TRY_TO(TraverseStmt(E));
+ }
+ for (auto *E : C->destination_exprs()) {
+ TRY_TO(TraverseStmt(E));
+ }
+ for (auto *E : C->assignment_ops()) {
+ TRY_TO(TraverseStmt(E));
+ }
return true;
}
return new (Mem) OMPAlignedClause(NumVars);
}
-OMPCopyinClause *OMPCopyinClause::Create(const ASTContext &C,
- SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation EndLoc,
- ArrayRef<Expr *> VL) {
+void OMPCopyinClause::setSourceExprs(ArrayRef<Expr *> SrcExprs) {
+ assert(SrcExprs.size() == varlist_size() && "Number of source expressions is "
+ "not the same as the "
+ "preallocated buffer");
+ std::copy(SrcExprs.begin(), SrcExprs.end(), varlist_end());
+}
+
+void OMPCopyinClause::setDestinationExprs(ArrayRef<Expr *> DstExprs) {
+ assert(DstExprs.size() == varlist_size() && "Number of destination "
+ "expressions is not the same as "
+ "the preallocated buffer");
+ std::copy(DstExprs.begin(), DstExprs.end(), getSourceExprs().end());
+}
+
+void OMPCopyinClause::setAssignmentOps(ArrayRef<Expr *> AssignmentOps) {
+ assert(AssignmentOps.size() == varlist_size() &&
+ "Number of assignment expressions is not the same as the preallocated "
+ "buffer");
+ std::copy(AssignmentOps.begin(), AssignmentOps.end(),
+ getDestinationExprs().end());
+}
+
+OMPCopyinClause *OMPCopyinClause::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
+ SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs,
+ ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps) {
void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPCopyinClause),
llvm::alignOf<Expr *>()) +
- sizeof(Expr *) * VL.size());
+ 4 * sizeof(Expr *) * VL.size());
OMPCopyinClause *Clause = new (Mem) OMPCopyinClause(StartLoc, LParenLoc,
EndLoc, VL.size());
Clause->setVarRefs(VL);
+ Clause->setSourceExprs(SrcExprs);
+ Clause->setDestinationExprs(DstExprs);
+ Clause->setAssignmentOps(AssignmentOps);
return Clause;
}
unsigned N) {
void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPCopyinClause),
llvm::alignOf<Expr *>()) +
- sizeof(Expr *) * N);
+ 4 * sizeof(Expr *) * N);
return new (Mem) OMPCopyinClause(N);
}
}
void OMPClauseProfiler::VisitOMPCopyinClause(const OMPCopyinClause *C) {
VisitOMPClauseList(C);
+ for (auto *E : C->source_exprs()) {
+ Profiler->VisitStmt(E);
+ }
+ for (auto *E : C->destination_exprs()) {
+ Profiler->VisitStmt(E);
+ }
+ for (auto *E : C->assignment_ops()) {
+ Profiler->VisitStmt(E);
+ }
}
void
OMPClauseProfiler::VisitOMPCopyprivateClause(const OMPCopyprivateClause *C) {
}
bool clang::isOpenMPThreadPrivate(OpenMPClauseKind Kind) {
- return Kind == OMPC_threadprivate ||
- Kind == OMPC_copyin; // TODO add next clauses like 'copyprivate'.
+ return Kind == OMPC_threadprivate || Kind == OMPC_copyin;
}
}
}
+bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) {
+ // threadprivate_var1 = master_threadprivate_var1;
+ // operator=(threadprivate_var2, master_threadprivate_var2);
+ // ...
+ // __kmpc_barrier(&loc, global_tid);
+ auto CopyinFilter = [](const OMPClause *C) -> bool {
+ return C->getClauseKind() == OMPC_copyin;
+ };
+ llvm::DenseSet<const VarDecl *> CopiedVars;
+ llvm::BasicBlock *CopyBegin = nullptr, *CopyEnd = nullptr;
+ for (OMPExecutableDirective::filtered_clause_iterator<decltype(CopyinFilter)>
+ I(D.clauses(), CopyinFilter);
+ I; ++I) {
+ auto *C = cast<OMPCopyinClause>(*I);
+ auto IRef = C->varlist_begin();
+ auto ISrcRef = C->source_exprs().begin();
+ auto IDestRef = C->destination_exprs().begin();
+ for (auto *AssignOp : C->assignment_ops()) {
+ auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
+ if (CopiedVars.insert(VD->getCanonicalDecl()).second) {
+ // Get the address of the master variable.
+ auto *MasterAddr = VD->isStaticLocal()
+ ? CGM.getStaticLocalDeclAddress(VD)
+ : CGM.GetAddrOfGlobal(VD);
+ // Get the address of the threadprivate variable.
+ auto *PrivateAddr = EmitLValue(*IRef).getAddress();
+ if (CopiedVars.size() == 1) {
+ // At first check if current thread is a master thread. If it is, no
+ // need to copy data.
+ CopyBegin = createBasicBlock("copyin.not.master");
+ CopyEnd = createBasicBlock("copyin.not.master.end");
+ Builder.CreateCondBr(
+ Builder.CreateICmpNE(
+ Builder.CreatePtrToInt(MasterAddr, CGM.IntPtrTy),
+ Builder.CreatePtrToInt(PrivateAddr, CGM.IntPtrTy)),
+ CopyBegin, CopyEnd);
+ EmitBlock(CopyBegin);
+ }
+ auto *SrcVD = cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl());
+ auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
+ EmitOMPCopy(*this, (*IRef)->getType(), PrivateAddr, MasterAddr, DestVD,
+ SrcVD, AssignOp);
+ }
+ ++IRef;
+ ++ISrcRef;
+ ++IDestRef;
+ }
+ }
+ if (CopyEnd) {
+ // Exit out of copying procedure for non-master thread.
+ EmitBlock(CopyEnd, /*IsFinished=*/true);
+ return true;
+ }
+ return false;
+}
+
bool CodeGenFunction::EmitOMPLastprivateClauseInit(
const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope) {
auto LastprivateFilter = [](const OMPClause *C) -> bool {
// Emit parallel region as a standalone region.
auto &&CodeGen = [&S](CodeGenFunction &CGF) {
OMPPrivateScope PrivateScope(CGF);
- if (CGF.EmitOMPFirstprivateClause(S, PrivateScope)) {
+ bool Copyins = CGF.EmitOMPCopyinClause(S);
+ bool Firstprivates = CGF.EmitOMPFirstprivateClause(S, PrivateScope);
+ if (Copyins || Firstprivates) {
// Emit implicit barrier to synchronize threads and avoid data races on
- // initialization of firstprivate variables.
+ // initialization of firstprivate variables or propagation master's thread
+ // values of threadprivate variables to local instances of that variables
+ // of all other implicit threads.
CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(),
OMPD_unknown);
}
OMPPrivateScope &PrivateScope);
void EmitOMPPrivateClause(const OMPExecutableDirective &D,
OMPPrivateScope &PrivateScope);
+ /// \brief Emit code for copyin clause in \a D directive. The next code is
+ /// generated at the start of outlined functions for directives:
+ /// \code
+ /// threadprivate_var1 = master_threadprivate_var1;
+ /// operator=(threadprivate_var2, master_threadprivate_var2);
+ /// ...
+ /// __kmpc_barrier(&loc, global_tid);
+ /// \endcode
+ ///
+ /// \param D OpenMP directive possibly with 'copyin' clause(s).
+ /// \returns true if at least one copyin variable is found, false otherwise.
+ bool EmitOMPCopyinClause(const OMPExecutableDirective &D);
/// \brief Emit initial code for lastprivate variables. If some variable is
/// not also firstprivate, then the default initialization is used. Otherwise
/// initialization of this variable is performed by EmitOMPFirstprivateClause
VarDecl *VD = cast<VarDecl>(DE->getDecl());
SourceLocation ILoc = DE->getExprLoc();
+ QualType QType = VD->getType();
+ if (QType->isDependentType() || QType->isInstantiationDependentType()) {
+ // It will be analyzed later.
+ Vars.push_back(DE);
+ continue;
+ }
+
// OpenMP [2.9.2, Restrictions, C/C++, p.10]
// A threadprivate variable must not have an incomplete type.
if (RequireCompleteType(ILoc, VD->getType(),
SourceLocation LParenLoc,
SourceLocation EndLoc) {
SmallVector<Expr *, 8> Vars;
+ SmallVector<Expr *, 8> SrcExprs;
+ SmallVector<Expr *, 8> DstExprs;
+ SmallVector<Expr *, 8> AssignmentOps;
for (auto &RefExpr : VarList) {
assert(RefExpr && "NULL expr in OpenMP copyin clause.");
if (isa<DependentScopeDeclRefExpr>(RefExpr)) {
// It will be analyzed later.
Vars.push_back(RefExpr);
+ SrcExprs.push_back(nullptr);
+ DstExprs.push_back(nullptr);
+ AssignmentOps.push_back(nullptr);
continue;
}
if (Type->isDependentType() || Type->isInstantiationDependentType()) {
// It will be analyzed later.
Vars.push_back(DE);
+ SrcExprs.push_back(nullptr);
+ DstExprs.push_back(nullptr);
+ AssignmentOps.push_back(nullptr);
continue;
}
// A variable of class type (or array thereof) that appears in a
// copyin clause requires an accessible, unambiguous copy assignment
// operator for the class type.
- Type = Context.getBaseElementType(Type);
- CXXRecordDecl *RD =
- getLangOpts().CPlusPlus ? Type->getAsCXXRecordDecl() : nullptr;
- // FIXME This code must be replaced by actual assignment of the
- // threadprivate variable.
- if (RD) {
- CXXMethodDecl *MD = LookupCopyingAssignment(RD, 0, false, 0);
- DeclAccessPair FoundDecl = DeclAccessPair::make(MD, MD->getAccess());
- if (MD) {
- if (CheckMemberAccess(ELoc, RD, FoundDecl) == AR_inaccessible ||
- MD->isDeleted()) {
- Diag(ELoc, diag::err_omp_required_method)
- << getOpenMPClauseName(OMPC_copyin) << 2;
- bool IsDecl = VD->isThisDeclarationADefinition(Context) ==
- VarDecl::DeclarationOnly;
- Diag(VD->getLocation(),
- IsDecl ? diag::note_previous_decl : diag::note_defined_here)
- << VD;
- Diag(RD->getLocation(), diag::note_previous_decl) << RD;
- continue;
- }
- MarkFunctionReferenced(ELoc, MD);
- DiagnoseUseOfDecl(MD, ELoc);
- }
- }
+ Type = Context.getBaseElementType(Type).getNonReferenceType();
+ auto *SrcVD = BuildVarDecl(*this, DE->getLocStart(),
+ Type.getUnqualifiedType(), ".copyin.src");
+ auto *PseudoSrcExpr = BuildDeclRefExpr(SrcVD, Type.getUnqualifiedType(),
+ VK_LValue, DE->getExprLoc())
+ .get();
+ auto *DstVD = BuildVarDecl(*this, DE->getLocStart(), Type, ".copyin.dst");
+ auto *PseudoDstExpr =
+ BuildDeclRefExpr(DstVD, Type, VK_LValue, DE->getExprLoc()).get();
+ // For arrays generate assignment operation for single element and replace
+ // it by the original array element in CodeGen.
+ auto AssignmentOp = BuildBinOp(/*S=*/nullptr, DE->getExprLoc(), BO_Assign,
+ PseudoDstExpr, PseudoSrcExpr);
+ if (AssignmentOp.isInvalid())
+ continue;
+ AssignmentOp = ActOnFinishFullExpr(AssignmentOp.get(), DE->getExprLoc(),
+ /*DiscardedValue=*/true);
+ if (AssignmentOp.isInvalid())
+ continue;
DSAStack->addDSA(VD, DE, OMPC_copyin);
Vars.push_back(DE);
+ SrcExprs.push_back(PseudoSrcExpr);
+ DstExprs.push_back(PseudoDstExpr);
+ AssignmentOps.push_back(AssignmentOp.get());
}
if (Vars.empty())
return nullptr;
- return OMPCopyinClause::Create(Context, StartLoc, LParenLoc, EndLoc, Vars);
+ return OMPCopyinClause::Create(Context, StartLoc, LParenLoc, EndLoc, Vars,
+ SrcExprs, DstExprs, AssignmentOps);
}
OMPClause *Sema::ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
void OMPClauseReader::VisitOMPCopyinClause(OMPCopyinClause *C) {
C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
unsigned NumVars = C->varlist_size();
- SmallVector<Expr *, 16> Vars;
- Vars.reserve(NumVars);
+ SmallVector<Expr *, 16> Exprs;
+ Exprs.reserve(NumVars);
for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Reader.ReadSubExpr());
- C->setVarRefs(Vars);
+ Exprs.push_back(Reader->Reader.ReadSubExpr());
+ C->setVarRefs(Exprs);
+ Exprs.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Exprs.push_back(Reader->Reader.ReadSubExpr());
+ C->setSourceExprs(Exprs);
+ Exprs.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Exprs.push_back(Reader->Reader.ReadSubExpr());
+ C->setDestinationExprs(Exprs);
+ Exprs.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Exprs.push_back(Reader->Reader.ReadSubExpr());
+ C->setAssignmentOps(Exprs);
}
void OMPClauseReader::VisitOMPCopyprivateClause(OMPCopyprivateClause *C) {
Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record);
for (auto *VE : C->varlists())
Writer->Writer.AddStmt(VE);
+ for (auto *E : C->source_exprs())
+ Writer->Writer.AddStmt(E);
+ for (auto *E : C->destination_exprs())
+ Writer->Writer.AddStmt(E);
+ for (auto *E : C->assignment_ops())
+ Writer->Writer.AddStmt(E);
}
void OMPClauseWriter::VisitOMPCopyprivateClause(OMPCopyprivateClause *C) {
--- /dev/null
+// RUN: %clang_cc1 -verify -fopenmp=libiomp5 -x c++ -triple %itanium_abi_triple -emit-llvm %s -o - | FileCheck %s
+// RUN: %clang_cc1 -fopenmp=libiomp5 -x c++ -std=c++11 -triple %itanium_abi_triple -emit-pch -o %t %s
+// RUN: %clang_cc1 -fopenmp=libiomp5 -x c++ -triple %itanium_abi_triple -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s
+// RUN: %clang_cc1 -verify -fopenmp=libiomp5 -x c++ -std=c++11 -DLAMBDA -triple %itanium_abi_triple -emit-llvm %s -o - | FileCheck -check-prefix=LAMBDA %s
+// RUN: %clang_cc1 -verify -fopenmp=libiomp5 -x c++ -fblocks -DBLOCKS -triple %itanium_abi_triple -emit-llvm %s -o - | FileCheck -check-prefix=BLOCKS %s
+// expected-no-diagnostics
+#ifndef HEADER
+#define HEADER
+
+volatile int g = 1212;
+#pragma omp threadprivate(g)
+
+template <class T>
+struct S {
+ T f;
+ S(T a) : f(a + g) {}
+ S() : f(g) {}
+ S &operator=(const S &) { return *this; };
+ operator T() { return T(); }
+ ~S() {}
+};
+
+// CHECK-DAG: [[S_FLOAT_TY:%.+]] = type { float }
+// CHECK-DAG: [[S_INT_TY:%.+]] = type { i{{[0-9]+}} }
+// CHECK-DAG: [[IMPLICIT_BARRIER_LOC:@.+]] = private unnamed_addr constant %{{.+}} { i32 0, i32 66, i32 0, i32 0, i8*
+
+
+// CHECK-DAG: [[T_VAR:@.+]] = internal global i{{[0-9]+}} 1122,
+// CHECK-DAG: [[VEC:@.+]] = internal global [2 x i{{[0-9]+}}] [i{{[0-9]+}} 1, i{{[0-9]+}} 2],
+// CHECK-DAG: [[S_ARR:@.+]] = internal global [2 x [[S_FLOAT_TY]]] zeroinitializer,
+// CHECK-DAG: [[VAR:@.+]] = internal global [[S_FLOAT_TY]] zeroinitializer,
+// CHECK-DAG: [[TMAIN_T_VAR:@.+]] = linkonce_odr global i{{[0-9]+}} 333,
+// CHECK-DAG: [[TMAIN_VEC:@.+]] = linkonce_odr global [2 x i{{[0-9]+}}] [i{{[0-9]+}} 3, i{{[0-9]+}} 3],
+// CHECK-DAG: [[TMAIN_S_ARR:@.+]] = linkonce_odr global [2 x [[S_INT_TY]]] zeroinitializer,
+// CHECK-DAG: [[TMAIN_VAR:@.+]] = linkonce_odr global [[S_INT_TY]] zeroinitializer,
+template <typename T>
+T tmain() {
+ S<T> test;
+ test = S<T>();
+ static T t_var = 333;
+ static T vec[] = {3, 3};
+ static S<T> s_arr[] = {1, 2};
+ static S<T> var(3);
+#pragma omp threadprivate(t_var, vec, s_arr, var)
+#pragma omp parallel copyin(t_var, vec, s_arr, var)
+ {
+ vec[0] = t_var;
+ s_arr[0] = var;
+ }
+#pragma omp parallel copyin(t_var)
+ {}
+ return T();
+}
+
+int main() {
+#ifdef LAMBDA
+ // LAMBDA: [[G:@.+]] = global i{{[0-9]+}} 1212,
+ // LAMBDA-LABEL: @main
+ // LAMBDA: call{{( x86_thiscallcc)?}} void [[OUTER_LAMBDA:@.+]](
+ [&]() {
+ // LAMBDA: define{{.*}} internal{{.*}} void [[OUTER_LAMBDA]](
+ // LAMBDA: call void {{.+}}* @__kmpc_fork_call({{.+}}, i32 1, {{.+}}* [[OMP_REGION:@.+]] to {{.+}}, i8*
+#pragma omp parallel copyin(g)
+ {
+ // LAMBDA: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* %{{.+}}, i32* %{{.+}}, %{{.+}}* [[ARG:%.+]])
+
+ // threadprivate_g = g;
+ // LAMBDA: call i8* @__kmpc_threadprivate_cached({{.+}} [[G]]
+ // LAMBDA: ptrtoint i{{[0-9]+}}* %{{.+}} to i{{[0-9]+}}
+ // LAMBDA: icmp ne i{{[0-9]+}} ptrtoint (i{{[0-9]+}}* [[G]] to i{{[0-9]+}}), %{{.+}}
+ // LAMBDA: br i1 %{{.+}}, label %[[NOT_MASTER:.+]], label %[[DONE:.+]]
+ // LAMBDA: [[NOT_MASTER]]
+ // LAMBDA: load i{{[0-9]+}}, i{{[0-9]+}}* [[G]],
+ // LAMBDA: store volatile i{{[0-9]+}} %{{.+}}, i{{[0-9]+}}* %{{.+}},
+ // LAMBDA: [[DONE]]
+
+ // LAMBDA: call i32 @__kmpc_cancel_barrier(
+ g = 1;
+ // LAMBDA: call{{( x86_thiscallcc)?}} void [[INNER_LAMBDA:@.+]](%{{.+}}*
+ [&]() {
+ // LAMBDA: define {{.+}} void [[INNER_LAMBDA]](%{{.+}}* [[ARG_PTR:%.+]])
+ // LAMBDA: store %{{.+}}* [[ARG_PTR]], %{{.+}}** [[ARG_PTR_REF:%.+]],
+ g = 2;
+ // LAMBDA: [[ARG_PTR:%.+]] = load %{{.+}}*, %{{.+}}** [[ARG_PTR_REF]]
+ }();
+ }
+ }();
+ return 0;
+#elif defined(BLOCKS)
+ // BLOCKS: [[G:@.+]] = global i{{[0-9]+}} 1212,
+ // BLOCKS-LABEL: @main
+ // BLOCKS: call void {{%.+}}(i8*
+ ^{
+ // BLOCKS: define{{.*}} internal{{.*}} void {{.+}}(i8*
+ // BLOCKS: call void {{.+}}* @__kmpc_fork_call({{.+}}, i32 1, {{.+}}* [[OMP_REGION:@.+]] to {{.+}}, i8*
+#pragma omp parallel copyin(g)
+ {
+ // BLOCKS: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* %{{.+}}, i32* %{{.+}}, %{{.+}}* [[ARG:%.+]])
+
+ // threadprivate_g = g;
+ // BLOCKS: call i8* @__kmpc_threadprivate_cached({{.+}} [[G]]
+ // BLOCKS: ptrtoint i{{[0-9]+}}* %{{.+}} to i{{[0-9]+}}
+ // BLOCKS: icmp ne i{{[0-9]+}} ptrtoint (i{{[0-9]+}}* [[G]] to i{{[0-9]+}}), %{{.+}}
+ // BLOCKS: br i1 %{{.+}}, label %[[NOT_MASTER:.+]], label %[[DONE:.+]]
+ // BLOCKS: [[NOT_MASTER]]
+ // BLOCKS: load i{{[0-9]+}}, i{{[0-9]+}}* [[G]],
+ // BLOCKS: store volatile i{{[0-9]+}} %{{.+}}, i{{[0-9]+}}* %{{.+}},
+ // BLOCKS: [[DONE]]
+
+ // BLOCKS: call i32 @__kmpc_cancel_barrier(
+ g = 1;
+ // BLOCKS: store volatile i{{[0-9]+}} 1, i{{[0-9]+}}*
+ // BLOCKS-NOT: [[G]]{{[[^:word:]]}}
+ // BLOCKS: call void {{%.+}}(i8*
+ ^{
+ // BLOCKS: define {{.+}} void {{@.+}}(i8*
+ g = 2;
+ // BLOCKS-NOT: [[G]]{{[[^:word:]]}}
+ // BLOCKS: call i8* @__kmpc_threadprivate_cached({{.+}} [[G]]
+ // BLOCKS: store volatile i{{[0-9]+}} 2, i{{[0-9]+}}*
+ // BLOCKS-NOT: [[G]]{{[[^:word:]]}}
+ // BLOCKS: ret
+ }();
+ }
+ }();
+ return 0;
+#else
+ S<float> test;
+ test = S<float>();
+ static int t_var = 1122;
+ static int vec[] = {1, 2};
+ static S<float> s_arr[] = {1, 2};
+ static S<float> var(3);
+#pragma omp threadprivate(t_var, vec, s_arr, var)
+#pragma omp parallel copyin(t_var, vec, s_arr, var)
+ {
+ vec[0] = t_var;
+ s_arr[0] = var;
+ }
+#pragma omp parallel copyin(t_var)
+ {}
+ return tmain<int>();
+#endif
+}
+
+// CHECK-LABEL: @main
+// CHECK: [[TEST:%.+]] = alloca [[S_FLOAT_TY]],
+// CHECK: call {{.*}} [[S_FLOAT_TY_COPY_ASSIGN:@.+]]([[S_FLOAT_TY]]* [[TEST]], [[S_FLOAT_TY]]*
+// CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...)* @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 1, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, {{%.+}}*)* [[MAIN_MICROTASK:@.+]] to void (i32*, i32*, ...)*), i8* %{{.+}})
+// CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...)* @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 1, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, {{%.+}}*)* [[MAIN_MICROTASK1:@.+]] to void (i32*, i32*, ...)*), i8* %{{.+}})
+// CHECK: = call {{.*}}i{{.+}} [[TMAIN_INT:@.+]]()
+// CHECK: call {{.*}} [[S_FLOAT_TY_DESTR:@.+]]([[S_FLOAT_TY]]*
+// CHECK: ret
+//
+// CHECK: define internal void [[MAIN_MICROTASK]](i{{[0-9]+}}* [[GTID_ADDR:%.+]], i{{[0-9]+}}* %{{.+}}, {{%.+}}* %{{.+}})
+// CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]],
+// CHECK: [[GTID_ADDR:%.+]] = load i32*, i32** [[GTID_ADDR_ADDR]],
+// CHECK: [[GTID:%.+]] = load i32, i32* [[GTID_ADDR]],
+
+// threadprivate_t_var = t_var;
+// CHECK: call i8* @__kmpc_threadprivate_cached({{.+}} [[T_VAR]]
+// CHECK: ptrtoint i{{[0-9]+}}* %{{.+}} to i{{[0-9]+}}
+// CHECK: icmp ne i{{[0-9]+}} ptrtoint (i{{[0-9]+}}* [[T_VAR]] to i{{[0-9]+}}), %{{.+}}
+// CHECK: br i1 %{{.+}}, label %[[NOT_MASTER:.+]], label %[[DONE:.+]]
+// CHECK: [[NOT_MASTER]]
+// CHECK: load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR]],
+// CHECK: store i{{[0-9]+}} %{{.+}}, i{{[0-9]+}}* %{{.+}},
+
+// threadprivate_vec = vec;
+// CHECK: call i8* @__kmpc_threadprivate_cached({{.+}} [[VEC]]
+// CHECK: call void @llvm.memcpy{{.*}}(i8* %{{.+}}, i8* bitcast ([2 x i{{[0-9]+}}]* [[VEC]] to i8*),
+
+// threadprivate_s_arr = s_arr;
+// CHECK: call i8* @__kmpc_threadprivate_cached({{.+}} [[S_ARR]]
+// CHECK: [[S_ARR_PRIV_BEGIN:%.+]] = getelementptr inbounds [2 x [[S_FLOAT_TY]]], [2 x [[S_FLOAT_TY]]]* {{%.+}}, i{{[0-9]+}} 0, i{{[0-9]+}} 0
+// CHECK: [[S_ARR_PRIV_END:%.+]] = getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[S_ARR_PRIV_BEGIN]], i{{[0-9]+}} 2
+// CHECK: [[IS_EMPTY:%.+]] = icmp eq [[S_FLOAT_TY]]* [[S_ARR_PRIV_BEGIN]], [[S_ARR_PRIV_END]]
+// CHECK: br i1 [[IS_EMPTY]], label %[[S_ARR_BODY_DONE:.+]], label %[[S_ARR_BODY:.+]]
+// CHECK: [[S_ARR_BODY]]
+// CHECK: call {{.*}} [[S_FLOAT_TY_COPY_ASSIGN]]([[S_FLOAT_TY]]* {{.+}}, [[S_FLOAT_TY]]* {{.+}})
+// CHECK: br i1 {{.+}}, label %{{.+}}, label %[[S_ARR_BODY]]
+
+// threadprivate_var = var;
+// CHECK: call i8* @__kmpc_threadprivate_cached({{.+}} [[VAR]]
+// CHECK: call {{.*}} [[S_FLOAT_TY_COPY_ASSIGN]]([[S_FLOAT_TY]]* {{%.+}}, [[S_FLOAT_TY]]* {{.*}}[[VAR]])
+// CHECK: [[DONE]]
+
+// CHECK: call i32 @__kmpc_cancel_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i32 [[GTID]])
+// CHECK: ret void
+
+// CHECK: define internal void [[MAIN_MICROTASK1]](i{{[0-9]+}}* [[GTID_ADDR:%.+]], i{{[0-9]+}}* %{{.+}}, {{%.+}}* %{{.+}})
+// CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]],
+// CHECK: [[GTID_ADDR:%.+]] = load i32*, i32** [[GTID_ADDR_ADDR]],
+// CHECK: [[GTID:%.+]] = load i32, i32* [[GTID_ADDR]],
+
+// threadprivate_t_var = t_var;
+// CHECK: call i8* @__kmpc_threadprivate_cached({{.+}} [[T_VAR]]
+// CHECK: ptrtoint i{{[0-9]+}}* %{{.+}} to i{{[0-9]+}}
+// CHECK: icmp ne i{{[0-9]+}} ptrtoint (i{{[0-9]+}}* [[T_VAR]] to i{{[0-9]+}}), %{{.+}}
+// CHECK: br i1 %{{.+}}, label %[[NOT_MASTER:.+]], label %[[DONE:.+]]
+// CHECK: [[NOT_MASTER]]
+// CHECK: load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR]],
+// CHECK: store i{{[0-9]+}} %{{.+}}, i{{[0-9]+}}* %{{.+}},
+// CHECK: [[DONE]]
+
+// CHECK: call i32 @__kmpc_cancel_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i32 [[GTID]])
+// CHECK: ret void
+
+// CHECK: define {{.*}} i{{[0-9]+}} [[TMAIN_INT]]()
+// CHECK: [[TEST:%.+]] = alloca [[S_INT_TY]],
+// CHECK: call {{.*}} [[S_INT_TY_COPY_ASSIGN:@.+]]([[S_INT_TY]]* [[TEST]], [[S_INT_TY]]*
+// CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...)* @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 1, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, {{%.+}}*)* [[TMAIN_MICROTASK:@.+]] to void (i32*, i32*, ...)*), i8* %{{.+}})
+// CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...)* @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 1, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, {{%.+}}*)* [[TMAIN_MICROTASK1:@.+]] to void (i32*, i32*, ...)*), i8* %{{.+}})
+// CHECK: call {{.*}} [[S_INT_TY_DESTR:@.+]]([[S_INT_TY]]*
+// CHECK: ret
+//
+// CHECK: define internal void [[TMAIN_MICROTASK]](i{{[0-9]+}}* [[GTID_ADDR:%.+]], i{{[0-9]+}}* %{{.+}}, {{%.+}}* %{{.+}})
+// CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]],
+// CHECK: [[GTID_ADDR:%.+]] = load i32*, i32** [[GTID_ADDR_ADDR]],
+// CHECK: [[GTID:%.+]] = load i32, i32* [[GTID_ADDR]],
+
+// threadprivate_t_var = t_var;
+// CHECK: call i8* @__kmpc_threadprivate_cached({{.+}} [[TMAIN_T_VAR]]
+// CHECK: ptrtoint i{{[0-9]+}}* %{{.+}} to i{{[0-9]+}}
+// CHECK: icmp ne i{{[0-9]+}} ptrtoint (i{{[0-9]+}}* [[TMAIN_T_VAR]] to i{{[0-9]+}}), %{{.+}}
+// CHECK: br i1 %{{.+}}, label %[[NOT_MASTER:.+]], label %[[DONE:.+]]
+// CHECK: [[NOT_MASTER]]
+// CHECK: load i{{[0-9]+}}, i{{[0-9]+}}* [[TMAIN_T_VAR]],
+// CHECK: store i{{[0-9]+}} %{{.+}}, i{{[0-9]+}}* %{{.+}},
+
+// threadprivate_vec = vec;
+// CHECK: call i8* @__kmpc_threadprivate_cached({{.+}} [[TMAIN_VEC]]
+// CHECK: call void @llvm.memcpy{{.*}}(i8* %{{.+}}, i8* bitcast ([2 x i{{[0-9]+}}]* [[TMAIN_VEC]] to i8*),
+
+// threadprivate_s_arr = s_arr;
+// CHECK: call i8* @__kmpc_threadprivate_cached({{.+}} [[TMAIN_S_ARR]]
+// CHECK: [[S_ARR_PRIV_BEGIN:%.+]] = getelementptr inbounds [2 x [[S_INT_TY]]], [2 x [[S_INT_TY]]]* {{%.+}}, i{{[0-9]+}} 0, i{{[0-9]+}} 0
+// CHECK: [[S_ARR_PRIV_END:%.+]] = getelementptr [[S_INT_TY]], [[S_INT_TY]]* [[S_ARR_PRIV_BEGIN]], i{{[0-9]+}} 2
+// CHECK: [[IS_EMPTY:%.+]] = icmp eq [[S_INT_TY]]* [[S_ARR_PRIV_BEGIN]], [[S_ARR_PRIV_END]]
+// CHECK: br i1 [[IS_EMPTY]], label %[[S_ARR_BODY_DONE:.+]], label %[[S_ARR_BODY:.+]]
+// CHECK: [[S_ARR_BODY]]
+// CHECK: call {{.*}} [[S_INT_TY_COPY_ASSIGN]]([[S_INT_TY]]* {{.+}}, [[S_INT_TY]]* {{.+}})
+// CHECK: br i1 {{.+}}, label %{{.+}}, label %[[S_ARR_BODY]]
+
+// threadprivate_var = var;
+// CHECK: call i8* @__kmpc_threadprivate_cached({{.+}} [[TMAIN_VAR]]
+// CHECK: call {{.*}} [[S_INT_TY_COPY_ASSIGN]]([[S_INT_TY]]* {{%.+}}, [[S_INT_TY]]* {{.*}}[[TMAIN_VAR]])
+// CHECK: [[DONE]]
+
+// CHECK: call i32 @__kmpc_cancel_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i32 [[GTID]])
+// CHECK: ret void
+
+// CHECK: define internal void [[TMAIN_MICROTASK1]](i{{[0-9]+}}* [[GTID_ADDR:%.+]], i{{[0-9]+}}* %{{.+}}, {{%.+}}* %{{.+}})
+// CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]],
+// CHECK: [[GTID_ADDR:%.+]] = load i32*, i32** [[GTID_ADDR_ADDR]],
+// CHECK: [[GTID:%.+]] = load i32, i32* [[GTID_ADDR]],
+
+// threadprivate_t_var = t_var;
+// CHECK: call i8* @__kmpc_threadprivate_cached({{.+}} [[TMAIN_T_VAR]]
+// CHECK: ptrtoint i{{[0-9]+}}* %{{.+}} to i{{[0-9]+}}
+// CHECK: icmp ne i{{[0-9]+}} ptrtoint (i{{[0-9]+}}* [[TMAIN_T_VAR]] to i{{[0-9]+}}), %{{.+}}
+// CHECK: br i1 %{{.+}}, label %[[NOT_MASTER:.+]], label %[[DONE:.+]]
+// CHECK: [[NOT_MASTER]]
+// CHECK: load i{{[0-9]+}}, i{{[0-9]+}}* [[TMAIN_T_VAR]],
+// CHECK: store i{{[0-9]+}} %{{.+}}, i{{[0-9]+}}* %{{.+}},
+// CHECK: [[DONE]]
+
+// CHECK: call i32 @__kmpc_cancel_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i32 [[GTID]])
+// CHECK: ret void
+
+#endif
+
S3():a(0) { }
S3 &operator =(S3 &s3) { return *this; }
};
-class S4 { // expected-note {{'S4' declared here}}
+class S4 {
int a;
S4();
- S4 &operator =(const S4 &s4);
+ S4 &operator =(const S4 &s4); // expected-note {{implicitly declared private here}}
public:
S4(int v):a(v) { }
};
-class S5 { // expected-note {{'S5' declared here}}
+class S5 {
int a;
S5():a(0) {}
- S5 &operator =(const S5 &s5) { return *this; }
+ S5 &operator =(const S5 &s5) { return *this; } // expected-note {{implicitly declared private here}}
public:
S5(int v):a(v) { }
};
S2 k;
S3 h;
-S4 l(3); // expected-note {{'l' defined here}}
-S5 m(4); // expected-note {{'m' defined here}}
+S4 l(3);
+S5 m(4);
#pragma omp threadprivate(h, k, l, m)
int main(int argc, char **argv) {
#pragma omp parallel copyin (k // expected-error {{expected ')'}} expected-note {{to match this '('}}
#pragma omp parallel copyin (h, // expected-error {{expected expression}} expected-error {{expected ')'}} expected-note {{to match this '('}}
#pragma omp parallel copyin (argc > 0 ? argv[1] : argv[2]) // expected-error {{expected variable name}}
- #pragma omp parallel copyin (l) // expected-error {{copyin variable must have an accessible, unambiguous copy assignment operator}}
+ #pragma omp parallel copyin (l) // expected-error {{'operator=' is a private member of 'S4'}}
#pragma omp parallel copyin (S1) // expected-error {{'S1' does not refer to a value}}
#pragma omp parallel copyin (argv[1]) // expected-error {{expected variable name}}
#pragma omp parallel copyin(i) // expected-error {{copyin variable must be threadprivate}}
- #pragma omp parallel copyin(m) // expected-error {{copyin variable must have an accessible, unambiguous copy assignment operator}}
+ #pragma omp parallel copyin(m) // expected-error {{'operator=' is a private member of 'S5'}}
#pragma omp parallel copyin(ST<int>::s) // expected-error {{copyin variable must be threadprivate}}
foo();
S3() : a(0) {}
S3 &operator=(S3 &s3) { return *this; }
};
-class S4 { // expected-note {{'S4' declared here}}
+class S4 {
int a;
S4();
- S4 &operator=(const S4 &s4);
+ S4 &operator=(const S4 &s4); // expected-note {{implicitly declared private here}}
public:
S4(int v) : a(v) {}
};
-class S5 { // expected-note {{'S5' declared here}}
+class S5 {
int a;
S5() : a(0) {}
- S5 &operator=(const S5 &s5) { return *this; }
+ S5 &operator=(const S5 &s5) { return *this; } // expected-note {{implicitly declared private here}}
public:
S5(int v) : a(v) {}
S2 k;
S3 h;
-S4 l(3); // expected-note {{'l' defined here}}
-S5 m(4); // expected-note {{'m' defined here}}
+S4 l(3);
+S5 m(4);
#pragma omp threadprivate(h, k, l, m)
int main(int argc, char **argv) {
#pragma omp parallel for copyin(argc > 0 ? argv[1] : argv[2]) // expected-error {{expected variable name}}
for (i = 0; i < argc; ++i)
foo();
-#pragma omp parallel for copyin(l) // expected-error {{copyin variable must have an accessible, unambiguous copy assignment operator}}
+#pragma omp parallel for copyin(l) // expected-error {{'operator=' is a private member of 'S4'}}
for (i = 0; i < argc; ++i)
foo();
#pragma omp parallel for copyin(S1) // expected-error {{'S1' does not refer to a value}}
#pragma omp parallel for copyin(i) // expected-error {{copyin variable must be threadprivate}}
for (i = 0; i < argc; ++i)
foo();
-#pragma omp parallel for copyin(m) // expected-error {{copyin variable must have an accessible, unambiguous copy assignment operator}}
+#pragma omp parallel for copyin(m) // expected-error {{'operator=' is a private member of 'S5'}}
for (i = 0; i < argc; ++i)
foo();
#pragma omp parallel for copyin(ST < int > ::s) // expected-error {{copyin variable must be threadprivate}}
S3() : a(0) {}
S3 &operator=(S3 &s3) { return *this; }
};
-class S4 { // expected-note {{'S4' declared here}}
+class S4 {
int a;
S4();
- S4 &operator=(const S4 &s4);
+ S4 &operator=(const S4 &s4); // expected-note {{implicitly declared private here}}
public:
S4(int v) : a(v) {}
};
-class S5 { // expected-note {{'S5' declared here}}
+class S5 {
int a;
S5() : a(0) {}
- S5 &operator=(const S5 &s5) { return *this; }
+ S5 &operator=(const S5 &s5) { return *this; } // expected-note {{implicitly declared private here}}
public:
S5(int v) : a(v) {}
S2 k;
S3 h;
-S4 l(3); // expected-note {{'l' defined here}}
-S5 m(4); // expected-note {{'m' defined here}}
+S4 l(3);
+S5 m(4);
#pragma omp threadprivate(h, k, l, m)
int main(int argc, char **argv) {
#pragma omp parallel for simd copyin(argc > 0 ? argv[1] : argv[2]) // expected-error {{expected variable name}}
for (i = 0; i < argc; ++i)
foo();
-#pragma omp parallel for simd copyin(l) // expected-error {{copyin variable must have an accessible, unambiguous copy assignment operator}}
+#pragma omp parallel for simd copyin(l) // expected-error {{'operator=' is a private member of 'S4'}}
for (i = 0; i < argc; ++i)
foo();
#pragma omp parallel for simd copyin(S1) // expected-error {{'S1' does not refer to a value}}
#pragma omp parallel for simd copyin(i) // expected-error {{copyin variable must be threadprivate}}
for (i = 0; i < argc; ++i)
foo();
-#pragma omp parallel for simd copyin(m) // expected-error {{copyin variable must have an accessible, unambiguous copy assignment operator}}
+#pragma omp parallel for simd copyin(m) // expected-error {{'operator=' is a private member of 'S5'}}
for (i = 0; i < argc; ++i)
foo();
#pragma omp parallel for simd copyin(ST < int > ::s) // expected-error {{copyin variable must be threadprivate}}
S3() : a(0) {}
S3 &operator=(S3 &s3) { return *this; }
};
-class S4 { // expected-note {{'S4' declared here}}
+class S4 {
int a;
S4();
- S4 &operator=(const S4 &s4);
+ S4 &operator=(const S4 &s4); // expected-note {{implicitly declared private here}}
public:
S4(int v) : a(v) {}
};
-class S5 { // expected-note {{'S5' declared here}}
+class S5 {
int a;
S5() : a(0) {}
- S5 &operator=(const S5 &s5) { return *this; }
+ S5 &operator=(const S5 &s5) { return *this; } // expected-note {{implicitly declared private here}}
public:
S5(int v) : a(v) {}
S2 k;
S3 h;
-S4 l(3); // expected-note {{'l' defined here}}
-S5 m(4); // expected-note {{'m' defined here}}
+S4 l(3);
+S5 m(4);
#pragma omp threadprivate(h, k, l, m)
int main(int argc, char **argv) {
{
foo();
}
-#pragma omp parallel sections copyin(l) // expected-error {{copyin variable must have an accessible, unambiguous copy assignment operator}}
+#pragma omp parallel sections copyin(l) // expected-error {{'operator=' is a private member of 'S4'}}
{
foo();
}
{
foo();
}
-#pragma omp parallel sections copyin(m) // expected-error {{copyin variable must have an accessible, unambiguous copy assignment operator}}
+#pragma omp parallel sections copyin(m) // expected-error {{'operator=' is a private member of 'S5'}}
{
foo();
}
}
void OMPClauseEnqueue::VisitOMPCopyinClause(const OMPCopyinClause *C) {
VisitOMPClauseList(C);
+ for (auto *E : C->source_exprs()) {
+ Visitor->AddStmt(E);
+ }
+ for (auto *E : C->destination_exprs()) {
+ Visitor->AddStmt(E);
+ }
+ for (auto *E : C->assignment_ops()) {
+ Visitor->AddStmt(E);
+ }
}
void
OMPClauseEnqueue::VisitOMPCopyprivateClause(const OMPCopyprivateClause *C) {