if (PrivateScope.Privatize())
// Emit implicit barrier to synchronize threads and avoid data races.
CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, Directive.getLocStart(),
- /*IsExplicit=*/false);
+ OMPD_unknown);
CGCapturedStmtInfo::EmitBody(CGF, S);
}
}
void CGOpenMPRuntime::emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
- bool IsExplicit) {
+ OpenMPDirectiveKind Kind) {
// Build call __kmpc_cancel_barrier(loc, thread_id);
- auto Flags = static_cast<OpenMPLocationFlags>(
- OMP_IDENT_KMPC |
- (IsExplicit ? OMP_IDENT_BARRIER_EXPL : OMP_IDENT_BARRIER_IMPL));
+ OpenMPLocationFlags Flags = OMP_IDENT_KMPC;
+ if (Kind == OMPD_for) {
+ Flags =
+ static_cast<OpenMPLocationFlags>(Flags | OMP_IDENT_BARRIER_IMPL_FOR);
+ } else if (Kind == OMPD_sections) {
+ Flags = static_cast<OpenMPLocationFlags>(Flags |
+ OMP_IDENT_BARRIER_IMPL_SECTIONS);
+ } else if (Kind == OMPD_single) {
+ Flags =
+ static_cast<OpenMPLocationFlags>(Flags | OMP_IDENT_BARRIER_IMPL_SINGLE);
+ } else if (Kind == OMPD_barrier) {
+ Flags = static_cast<OpenMPLocationFlags>(Flags | OMP_IDENT_BARRIER_EXPL);
+ } else {
+ Flags = static_cast<OpenMPLocationFlags>(Flags | OMP_IDENT_BARRIER_IMPL);
+ }
// Build call __kmpc_cancel_barrier(loc, thread_id);
// Replace __kmpc_barrier() function by __kmpc_cancel_barrier() because this
// one provides the same functionality and adds initial support for
ArrayRef<const Expr *> DstExprs,
ArrayRef<const Expr *> AssignmentOps);
- /// \brief Emits explicit barrier for OpenMP threads.
- /// \param IsExplicit true, if it is explicitly specified barrier.
+ /// \brief Emit an implicit/explicit barrier for OpenMP threads.
+ /// \param Kind Directive for which this implicit barrier call must be
+ /// generated. Must be OMPD_barrier for explicit barrier generation.
///
virtual void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
- bool IsExplicit = true);
+ OpenMPDirectiveKind Kind);
/// \brief Check if the specified \a ScheduleKind is static non-chunked.
/// This kind of worksharing directive is emitted without outer loop.
EmitOMPWorksharingLoop(S);
// Emit an implicit barrier at the end.
- CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(),
- /*IsExplicit*/ false);
+ if (!S.getSingleClause(OMPC_nowait)) {
+ CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_for);
+ }
}
void CodeGenFunction::EmitOMPForSimdDirective(const OMPForSimdDirective &) {
}
// Emit an implicit barrier at the end.
- if (!S.getSingleClause(OMPC_nowait))
- CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(),
- /*IsExplicit=*/false);
+ if (!S.getSingleClause(OMPC_nowait)) {
+ CGM.getOpenMPRuntime().emitBarrierCall(
+ *this, S.getLocStart(),
+ (CS && CS->size() > 1) ? OMPD_sections : OMPD_single);
+ }
}
void CodeGenFunction::EmitOMPSectionDirective(const OMPSectionDirective &S) {
EnsureInsertPoint();
}, S.getLocStart(), CopyprivateVars, SrcExprs, DstExprs, AssignmentOps);
// Emit an implicit barrier at the end.
- if (!S.getSingleClause(OMPC_nowait))
- CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(),
- /*IsExplicit=*/false);
+ if (!S.getSingleClause(OMPC_nowait)) {
+ CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_single);
+ }
}
void CodeGenFunction::EmitOMPMasterDirective(const OMPMasterDirective &S) {
}
void CodeGenFunction::EmitOMPBarrierDirective(const OMPBarrierDirective &S) {
- CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart());
+ CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_barrier);
}
void CodeGenFunction::EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &) {
#define HEADER
// CHECK: [[IDENT_T_TY:%.+]] = type { i32, i32, i32, i32, i8* }
+// CHECK: [[IMPLICIT_BARRIER_LOC:@.+]] = private unnamed_addr constant %{{.+}} { i32 0, i32 66, i32 0, i32 0, i8*
// CHECK-LABEL: define {{.*void}} @{{.*}}without_schedule_clause{{.*}}(float* {{.+}}, float* {{.+}}, float* {{.+}}, float* {{.+}})
void without_schedule_clause(float *a, float *b, float *c, float *d) {
// CHECK: [[GTID:%.+]] = call i32 @__kmpc_global_thread_num([[IDENT_T_TY]]* [[DEFAULT_LOC:[@%].+]])
- #pragma omp for
+ #pragma omp for nowait
// CHECK: call void @__kmpc_for_static_init_4([[IDENT_T_TY]]* [[DEFAULT_LOC]], i32 [[GTID]], i32 34, i32* [[IS_LAST:%[^,]+]], i32* [[OMP_LB:%[^,]+]], i32* [[OMP_UB:%[^,]+]], i32* [[OMP_ST:%[^,]+]], i32 1, i32 1)
// UB = min(UB, GlobalUB)
// CHECK-NEXT: [[UB:%.+]] = load i32, i32* [[OMP_UB]]
}
// CHECK: [[LOOP1_END]]
// CHECK: call void @__kmpc_for_static_fini([[IDENT_T_TY]]* [[DEFAULT_LOC]], i32 [[GTID]])
-// CHECK: call {{.+}} @__kmpc_cancel_barrier([[IDENT_T_TY]]* [[DEFAULT_LOC_BARRIER:[@%].+]], i32 [[GTID]])
+// CHECK-NOT: __kmpc_cancel_barrier
// CHECK: ret void
}
}
// CHECK: [[LOOP1_END]]
// CHECK: call void @__kmpc_for_static_fini([[IDENT_T_TY]]* [[DEFAULT_LOC]], i32 [[GTID]])
-// CHECK: call {{.+}} @__kmpc_cancel_barrier([[IDENT_T_TY]]* [[DEFAULT_LOC_BARRIER:[@%].+]], i32 [[GTID]])
+// CHECK: call {{.+}} @__kmpc_cancel_barrier([[IDENT_T_TY]]* [[IMPLICIT_BARRIER_LOC]], i32 [[GTID]])
// CHECK: ret void
}
// CHECK: [[O_LOOP1_END]]
// CHECK: call void @__kmpc_for_static_fini([[IDENT_T_TY]]* [[DEFAULT_LOC]], i32 [[GTID]])
-// CHECK: call {{.+}} @__kmpc_cancel_barrier([[IDENT_T_TY]]* [[DEFAULT_LOC_BARRIER:[@%].+]], i32 [[GTID]])
+// CHECK: call {{.+}} @__kmpc_cancel_barrier([[IDENT_T_TY]]* [[IMPLICIT_BARRIER_LOC]], i32 [[GTID]])
// CHECK: ret void
}
}
// CHECK: [[LOOP1_END]]
// CHECK: [[O_LOOP1_END]]
-// CHECK: call {{.+}} @__kmpc_cancel_barrier([[IDENT_T_TY]]* [[DEFAULT_LOC_BARRIER:[@%].+]], i32 [[GTID]])
+// CHECK: call {{.+}} @__kmpc_cancel_barrier([[IDENT_T_TY]]* [[IMPLICIT_BARRIER_LOC]], i32 [[GTID]])
// CHECK: ret void
}
}
// CHECK: [[LOOP1_END]]
// CHECK: [[O_LOOP1_END]]
-// CHECK: call {{.+}} @__kmpc_cancel_barrier([[IDENT_T_TY]]* [[DEFAULT_LOC_BARRIER:[@%].+]], i32 [[GTID]])
+// CHECK: call {{.+}} @__kmpc_cancel_barrier([[IDENT_T_TY]]* [[IMPLICIT_BARRIER_LOC]], i32 [[GTID]])
// CHECK: ret void
}
}
// CHECK: [[LOOP1_END]]
// CHECK: [[O_LOOP1_END]]
-// CHECK: call {{.+}} @__kmpc_cancel_barrier([[IDENT_T_TY]]* [[DEFAULT_LOC_BARRIER:[@%].+]], i32 [[GTID]])
+// CHECK: call {{.+}} @__kmpc_cancel_barrier([[IDENT_T_TY]]* [[IMPLICIT_BARRIER_LOC]], i32 [[GTID]])
// CHECK: ret void
}
}
// CHECK: [[LOOP1_END]]
// CHECK: [[O_LOOP1_END]]
-// CHECK: call {{.+}} @__kmpc_cancel_barrier([[IDENT_T_TY]]* [[DEFAULT_LOC_BARRIER:[@%].+]], i32 [[GTID]])
+// CHECK: call {{.+}} @__kmpc_cancel_barrier([[IDENT_T_TY]]* [[IMPLICIT_BARRIER_LOC]], i32 [[GTID]])
// CHECK: ret void
}
#ifndef HEADER
#define HEADER
-
+// CHECK: [[IMPLICIT_BARRIER_SECTIONS_LOC:@.+]] = private unnamed_addr constant %{{.+}} { i32 0, i32 194, i32 0, i32 0, i8*
+// CHECK: [[IMPLICIT_BARRIER_SINGLE_LOC:@.+]] = private unnamed_addr constant %{{.+}} { i32 0, i32 322, i32 0, i32 0, i8*
// CHECK-LABEL: foo
void foo() {};
// CHECK-LABEL: bar
float l = 0.0; // Used as a base point in checks.
// CHECK: [[GTID:%.+]] = call{{.*}} i32 @__kmpc_global_thread_num({{.*}})
// CHECK: store float
-#pragma omp sections nowait
+#pragma omp sections
{
// CHECK: store i32 0, i32* [[LB_PTR:%.+]],
// CHECK: store i32 1, i32* [[UB_PTR:%.+]],
// CHECK: [[INNER_LOOP_END]]
}
// CHECK: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 [[GTID]])
+// CHECK: call i32 @__kmpc_cancel_barrier(%{{.+}}* [[IMPLICIT_BARRIER_SECTIONS_LOC]],
+#pragma omp sections nowait
+ {
+ foo();
+#pragma omp section
+ bar();
+ }
// CHECK-NOT: __kmpc_cancel_barrier
return tmain<int>();
}
// CHECK: call void @__kmpc_end_single(
// CHECK-NEXT: br label %[[END]]
// CHECK: [[END]]
-// CHECK-NEXT: call i32 @__kmpc_cancel_barrier(
+// CHECK-NEXT: call i32 @__kmpc_cancel_barrier(%{{.+}}* [[IMPLICIT_BARRIER_SINGLE_LOC]],
// CHECK-NEXT: ret
// CHECK: [[TERM_LPAD]]
// CHECK: call void @__clang_call_terminate(i8*
// CHECK-DAG: [[TEST_CLASS_TY:%.+]] = type { i{{[0-9]+}} }
// CHECK: [[IDENT_T_TY:%.+]] = type { i32, i32, i32, i32, i8* }
+// CHECK: [[IMPLICIT_BARRIER_SINGLE_LOC:@.+]] = private unnamed_addr constant %{{.+}} { i32 0, i32 322, i32 0, i32 0, i8*
// CHECK: define void [[FOO:@.+]]()
// CHECK-NEXT: call void @__kmpc_end_single([[IDENT_T_TY]]* [[DEFAULT_LOC]], i32 [[GTID]])
// CHECK-NEXT: br label {{%?}}[[EXIT]]
// CHECK: [[EXIT]]
+// CHECK-NOT: __kmpc_cancel_barrier
#pragma omp single nowait
a = 2;
// CHECK: [[RES:%.+]] = call i32 @__kmpc_single([[IDENT_T_TY]]* [[DEFAULT_LOC]], i32 [[GTID]])
// CHECK: [[COPY_LIST_VOID_PTR:%.+]] = bitcast [3 x i8*]* [[COPY_LIST]] to i8*
// CHECK: [[DID_IT_VAL:%.+]] = load i32, i32* [[DID_IT]],
// CHECK: call void @__kmpc_copyprivate([[IDENT_T_TY]]* [[DEFAULT_LOC]], i32 [[GTID]], i32 24, i8* [[COPY_LIST_VOID_PTR]], void (i8*, i8*)* [[COPY_FUNC:@.+]], i32 [[DID_IT_VAL]])
-// CHECK: call{{.*}} @__kmpc_cancel_barrier([[IDENT_T_TY]]* {{@.+}}, i32 [[GTID]])
+// CHECK: call{{.*}} @__kmpc_cancel_barrier([[IDENT_T_TY]]* [[IMPLICIT_BARRIER_SINGLE_LOC]], i32 [[GTID]])
#pragma omp single copyprivate(a, c, tc)
foo();
// CHECK-NOT: call i32 @__kmpc_single