// global_tid, kmp_int32 cncl_kind);
if (auto *OMPRegionInfo =
dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
- // For 'cancellation point taskgroup', the task region info may not have a
- // cancel. This may instead happen in another adjacent task.
- if (CancelRegion == OMPD_taskgroup || OMPRegionInfo->hasCancel()) {
+ if (OMPRegionInfo->hasCancel()) {
llvm::Value *Args[] = {
emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
CGF.Builder.getInt32(getCancellationKind(CancelRegion))};
/// Get the value the ATOMIC_*_LOCK_FREE macro should have for a type with
/// the specified properties.
-static const char *getLockFreeValue(unsigned TypeWidth, unsigned TypeAlign,
- unsigned InlineWidth) {
+static const char *getLockFreeValue(unsigned TypeWidth, unsigned InlineWidth) {
// Fully-aligned, power-of-2 sizes no larger than the inline
// width will be inlined as lock-free operations.
- if (TypeWidth == TypeAlign && (TypeWidth & (TypeWidth - 1)) == 0 &&
- TypeWidth <= InlineWidth)
+ // Note: we do not need to check alignment since _Atomic(T) is always
+ // appropriately-aligned in clang.
+ if ((TypeWidth & (TypeWidth - 1)) == 0 && TypeWidth <= InlineWidth)
return "2"; // "always lock free"
// We cannot be certain what operations the lib calls might be
// able to implement as lock-free on future processors.
#define DEFINE_LOCK_FREE_MACRO(TYPE, Type) \
Builder.defineMacro("__GCC_ATOMIC_" #TYPE "_LOCK_FREE", \
getLockFreeValue(TI.get##Type##Width(), \
- TI.get##Type##Align(), \
InlineWidthBits));
DEFINE_LOCK_FREE_MACRO(BOOL, Bool);
DEFINE_LOCK_FREE_MACRO(CHAR, Char);
DEFINE_LOCK_FREE_MACRO(LLONG, LongLong);
Builder.defineMacro("__GCC_ATOMIC_POINTER_LOCK_FREE",
getLockFreeValue(TI.getPointerWidth(0),
- TI.getPointerAlign(0),
InlineWidthBits));
#undef DEFINE_LOCK_FREE_MACRO
}
}
// CHECK: call i8* @__kmpc_omp_task_alloc(
// CHECK: call i32 @__kmpc_omp_task(
-#pragma omp task
-{
-#pragma omp cancellation point taskgroup
-}
-// CHECK: call i8* @__kmpc_omp_task_alloc(
-// CHECK: call i32 @__kmpc_omp_task(
#pragma omp parallel sections
{
{
// CHECK: [[RETURN]]
// CHECK: ret i32 0
-// CHECK: define internal i32 @{{[^(]+}}(i32
-// CHECK: [[RES:%.+]] = call i32 @__kmpc_cancellationpoint(%ident_t* {{[^,]+}}, i32 {{[^,]+}}, i32 4)
-// CHECK: [[CMP:%.+]] = icmp ne i32 [[RES]], 0
-// CHECK: br i1 [[CMP]], label %[[EXIT:[^,]+]],
-// CHECK: [[EXIT]]
-// CHECK: br label %[[RETURN:.+]]
-// CHECK: [[RETURN]]
-// CHECK: ret i32 0
-
// CHECK: define internal void @{{[^(]+}}(i32* {{[^,]+}}, i32* {{[^,]+}})
// CHECK: call void @__kmpc_for_static_init_4(
// CHECK: [[RES:%.+]] = call i32 @__kmpc_cancellationpoint(%ident_t* {{[^,]+}}, i32 [[GTID:%.+]], i32 3)
_Static_assert(__GCC_ATOMIC_SHORT_LOCK_FREE == 2, "");
_Static_assert(__GCC_ATOMIC_INT_LOCK_FREE == 2, "");
_Static_assert(__GCC_ATOMIC_LONG_LOCK_FREE == 2, "");
-#ifdef __i386__
-_Static_assert(__GCC_ATOMIC_LLONG_LOCK_FREE == 1, "");
-#else
_Static_assert(__GCC_ATOMIC_LLONG_LOCK_FREE == 2, "");
-#endif
_Static_assert(__GCC_ATOMIC_POINTER_LOCK_FREE == 2, "");
_Static_assert(__c11_atomic_is_lock_free(1), "");