// target region should not be captured outside the scope of the region.
if (RSI->CapRegionKind == CR_OpenMP) {
bool IsOpenMPPrivateDecl = isOpenMPPrivateDecl(Var, RSI->OpenMPLevel);
- auto IsTargetCap = !IsOpenMPPrivateDecl &&
+ // If the variable is private (i.e. not captured) and has variably
+ // modified type, we still need to capture the type for correct
+ // codegen in all regions, associated with the construct. Currently,
+ // it is captured in the innermost captured region only.
+ if (IsOpenMPPrivateDecl && Var->getType()->isVariablyModifiedType()) {
+ QualType QTy = Var->getType();
+ if (ParmVarDecl *PVD = dyn_cast_or_null<ParmVarDecl>(Var))
+ QTy = PVD->getOriginalType();
+ for (int I = 1, E = getNumberOfConstructScopes(RSI->OpenMPLevel);
+ I < E; ++I) {
+ auto *OuterRSI = cast<CapturedRegionScopeInfo>(
+ FunctionScopes[FunctionScopesIndex - I]);
+ assert(RSI->OpenMPLevel == OuterRSI->OpenMPLevel &&
+ "Wrong number of captured regions associated with the "
+ "OpenMP construct.");
+ captureVariablyModifiedType(Context, QTy, OuterRSI);
+ }
+ }
+ bool IsTargetCap = !IsOpenMPPrivateDecl &&
isOpenMPTargetCapturedDecl(Var, RSI->OpenMPLevel);
// When we detect target captures we are looking from inside the
// target region, therefore we need to propagate the capture from the
// TCHECK: @{{.+}} = weak constant [[ENTTY]]
// TCHECK: @{{.+}} = weak constant [[ENTTY]]
// TCHECK: @{{.+}} = weak constant [[ENTTY]]
+// TCHECK: @{{.+}} = weak constant [[ENTTY]]
// TCHECK-NOT: @{{.+}} = weak constant [[ENTTY]]
// Check if offloading descriptor is created.
d.Y += 1;
}
+ const int nn = 0;
+ #pragma omp target teams shared(nn)
+ #pragma omp parallel firstprivate(nn)
+ (void)nn;
+ #pragma omp target teams firstprivate(nn)
+ #pragma omp parallel shared(nn)
+ (void)nn;
return a;
}
// CHECK: define internal {{.*}}void [[OMP_OUTLINED4]](i32* noalias %.global_tid., i32* noalias %.bound_tid., i[[SZ]] %{{.+}}, [10 x float]* {{.+}}, i[[SZ]] %{{.+}}, float* {{.+}}, [5 x [10 x double]]* {{.+}}, i[[SZ]] %{{.+}}, i[[SZ]] %{{.+}}, double* {{.+}}, [[TT]]* {{.+}})
// To reduce complexity, we're only going as far as validating the signature of the outlined parallel function.
+// CHECK: define {{.*}}void @__omp_offloading_{{.*}}foo{{.*}}_l346(i[[SZ]] %{{.+}})
+// CHECK: define internal void {{@.+}}(i32* {{.+}}, i32* {{.+}}, i[[SZ]] %{{.+}})
+// CHECK: define {{.*}}void @__omp_offloading_{{.*}}foo{{.*}}_l349(i[[SZ]] %{{.+}})
+// CHECK: define internal void {{@.+}}(i32* {{.+}}, i32* {{.+}}, i32* dereferenceable{{.+}})
+
+void bazzzz(int n, int f[n]) {
+// CHECK: define internal void @__omp_offloading_{{.+}}bazzzz{{.+}}_l501(i[[SZ]] %{{[^,]+}})
+// CHECK: [[VLA:%.+]] = load i[[SZ]], i[[SZ]]* %
+// CHECK: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @{{.+}}, i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i[[SZ]])* @{{.+}} to void (i32*, i32*, ...)*), i[[SZ]] [[VLA]])
+#pragma omp target teams private(f)
+ ;
+}
+
template<typename tx>
tx ftemplate(int n) {
tx a = 0;
// CHECK: define internal {{.*}}void [[OMP_OUTLINED7]](i32* noalias %.global_tid., i32* noalias %.bound_tid., i[[SZ]] %{{.+}}, i[[SZ]] %{{.+}}, [10 x i32]* {{.+}})
// To reduce complexity, we're only going as far as validating the signature of the outlined parallel function.
-void foo1() {
- const int n = 0;
- #pragma omp target teams shared(n)
- #pragma omp parallel firstprivate(n)
- (void)n;
-}
-void foo() {
- const int n = 0;
- #pragma omp target teams firstprivate(n)
- #pragma omp parallel shared(n)
- (void)n;
-}
-
-// define {{.*}}void @__omp_offloading_{{.*}}foo1{{.*}}_l841(i[[SZ]] %{{.+}})
-// define internal void {{@.+}}(i32* {{.+}}, i32* {{.+}}, i[[SZ]] %{{.+}})
-// define {{.*}}void @__omp_offloading_{{.*}}foo1{{.*}}_l847(i[[SZ]] %{{.+}})
-// define internal void {{@.+}}(i32* {{.+}}, i32* {{.+}}, i32* dereferenceable{{.+}})
#endif