------------------------------------------------------------------------
r295474 | hahnfeld | 2017-02-17 10:32:58 -0800 (Fri, 17 Feb 2017) | 6 lines
[OpenMP] Fix cancellation point in task with no cancel
With tasks, the cancel may happen in another task. This has a different
region info which means that we can't find it here.
Differential Revision: https://reviews.llvm.org/D30091
------------------------------------------------------------------------
git-svn-id: https://llvm.org/svn/llvm-project/cfe/branches/release_40@296139
91177308-0d34-0410-b5e6-
96231b3b80d8
// global_tid, kmp_int32 cncl_kind);
if (auto *OMPRegionInfo =
dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
- if (OMPRegionInfo->hasCancel()) {
+ // For 'cancellation point taskgroup', the task region info may not have a
+ // cancel. This may instead happen in another adjacent task.
+ if (CancelRegion == OMPD_taskgroup || OMPRegionInfo->hasCancel()) {
llvm::Value *Args[] = {
emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
CGF.Builder.getInt32(getCancellationKind(CancelRegion))};
/// Get the value the ATOMIC_*_LOCK_FREE macro should have for a type with
/// the specified properties.
-static const char *getLockFreeValue(unsigned TypeWidth, unsigned InlineWidth) {
+static const char *getLockFreeValue(unsigned TypeWidth, unsigned TypeAlign,
+ unsigned InlineWidth) {
// Fully-aligned, power-of-2 sizes no larger than the inline
// width will be inlined as lock-free operations.
- // Note: we do not need to check alignment since _Atomic(T) is always
- // appropriately-aligned in clang.
- if ((TypeWidth & (TypeWidth - 1)) == 0 && TypeWidth <= InlineWidth)
+ if (TypeWidth == TypeAlign && (TypeWidth & (TypeWidth - 1)) == 0 &&
+ TypeWidth <= InlineWidth)
return "2"; // "always lock free"
// We cannot be certain what operations the lib calls might be
// able to implement as lock-free on future processors.
#define DEFINE_LOCK_FREE_MACRO(TYPE, Type) \
Builder.defineMacro("__GCC_ATOMIC_" #TYPE "_LOCK_FREE", \
getLockFreeValue(TI.get##Type##Width(), \
+ TI.get##Type##Align(), \
InlineWidthBits));
DEFINE_LOCK_FREE_MACRO(BOOL, Bool);
DEFINE_LOCK_FREE_MACRO(CHAR, Char);
DEFINE_LOCK_FREE_MACRO(LLONG, LongLong);
Builder.defineMacro("__GCC_ATOMIC_POINTER_LOCK_FREE",
getLockFreeValue(TI.getPointerWidth(0),
+ TI.getPointerAlign(0),
InlineWidthBits));
#undef DEFINE_LOCK_FREE_MACRO
}
}
// CHECK: call i8* @__kmpc_omp_task_alloc(
// CHECK: call i32 @__kmpc_omp_task(
+#pragma omp task
+{
+#pragma omp cancellation point taskgroup
+}
+// CHECK: call i8* @__kmpc_omp_task_alloc(
+// CHECK: call i32 @__kmpc_omp_task(
#pragma omp parallel sections
{
{
// CHECK: [[RETURN]]
// CHECK: ret i32 0
+// CHECK: define internal i32 @{{[^(]+}}(i32
+// CHECK: [[RES:%.+]] = call i32 @__kmpc_cancellationpoint(%ident_t* {{[^,]+}}, i32 {{[^,]+}}, i32 4)
+// CHECK: [[CMP:%.+]] = icmp ne i32 [[RES]], 0
+// CHECK: br i1 [[CMP]], label %[[EXIT:[^,]+]],
+// CHECK: [[EXIT]]
+// CHECK: br label %[[RETURN:.+]]
+// CHECK: [[RETURN]]
+// CHECK: ret i32 0
+
// CHECK: define internal void @{{[^(]+}}(i32* {{[^,]+}}, i32* {{[^,]+}})
// CHECK: call void @__kmpc_for_static_init_4(
// CHECK: [[RES:%.+]] = call i32 @__kmpc_cancellationpoint(%ident_t* {{[^,]+}}, i32 [[GTID:%.+]], i32 3)
_Static_assert(__GCC_ATOMIC_SHORT_LOCK_FREE == 2, "");
_Static_assert(__GCC_ATOMIC_INT_LOCK_FREE == 2, "");
_Static_assert(__GCC_ATOMIC_LONG_LOCK_FREE == 2, "");
+#ifdef __i386__
+_Static_assert(__GCC_ATOMIC_LLONG_LOCK_FREE == 1, "");
+#else
_Static_assert(__GCC_ATOMIC_LLONG_LOCK_FREE == 2, "");
+#endif
_Static_assert(__GCC_ATOMIC_POINTER_LOCK_FREE == 2, "");
_Static_assert(__c11_atomic_is_lock_free(1), "");