OMPRTL_NVPTX__kmpc_get_team_static_memory,
/// Call to void __kmpc_restore_team_static_memory(int16_t is_shared);
OMPRTL_NVPTX__kmpc_restore_team_static_memory,
- // Call to void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid);
+ /// Call to void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid);
OMPRTL__kmpc_barrier,
+ /// Call to void __kmpc_barrier_simple_spmd(ident_t *loc, kmp_int32
+ /// global_tid);
+ OMPRTL__kmpc_barrier_simple_spmd,
};
/// Pre(post)-action for different OpenMP constructs specialized for NVPTX.
"nvptx_num_threads");
}
-/// Get barrier to synchronize all threads in a block.
-static void getNVPTXCTABarrier(CodeGenFunction &CGF) {
- llvm::Function *F = llvm::Intrinsic::getDeclaration(
- &CGF.CGM.getModule(), llvm::Intrinsic::nvvm_barrier0);
- F->addFnAttr(llvm::Attribute::Convergent);
- CGF.EmitRuntimeCall(F);
-}
-
-/// Synchronize all GPU threads in a block.
-static void syncCTAThreads(CodeGenFunction &CGF) { getNVPTXCTABarrier(CGF); }
-
/// Get the value of the thread_limit clause in the teams directive.
/// For the 'generic' execution mode, the runtime encodes thread_limit in
/// the launch parameters, always starting thread_limit+warpSize threads per
cast<llvm::Function>(RTLFn)->addFnAttr(llvm::Attribute::Convergent);
break;
}
+ case OMPRTL__kmpc_barrier_simple_spmd: {
+ // Build void __kmpc_barrier_simple_spmd(ident_t *loc, kmp_int32
+ // global_tid);
+ llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
+ auto *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
+ RTLFn =
+ CGM.CreateRuntimeFunction(FnTy, /*Name*/ "__kmpc_barrier_simple_spmd");
+ cast<llvm::Function>(RTLFn)->addFnAttr(llvm::Attribute::Convergent);
+ break;
+ }
}
return RTLFn;
}
}
}
+void CGOpenMPRuntimeNVPTX::syncCTAThreads(CodeGenFunction &CGF) {
+ // Always emit simple barriers!
+ if (!CGF.HaveInsertPoint())
+ return;
+ // Build call __kmpc_barrier_simple_spmd(nullptr, 0);
+ // This function does not use parameters, so we can emit just default values.
+ llvm::Value *Args[] = {
+ llvm::ConstantPointerNull::get(
+ cast<llvm::PointerType>(getIdentTyPointerTy())),
+ llvm::ConstantInt::get(CGF.Int32Ty, /*V=*/0, /*isSigned=*/true)};
+ CGF.EmitRuntimeCall(
+ createNVPTXRuntimeFunction(OMPRTL__kmpc_barrier_simple_spmd), Args);
+}
+
void CGOpenMPRuntimeNVPTX::emitBarrierCall(CodeGenFunction &CGF,
SourceLocation Loc,
OpenMPDirectiveKind Kind, bool,
bool requiresFullRuntime() const { return RequiresFullRuntime; }
+ /// Get barrier to synchronize all threads in a block.
+ void syncCTAThreads(CodeGenFunction &CGF);
+
/// Emit the worker function for the current target region.
void emitWorkerFunction(WorkerFunctionState &WST);
/// ========= In the worker function ========= ///
// CK1: {{.*}}define internal void @__omp_offloading{{.*}}test_ds{{.*}}_worker()
-// CK1: call void @llvm.nvvm.barrier0()
+// CK1: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
// CK1-NOT: call void @__kmpc_data_sharing_init_stack
/// ========= In the kernel function ========= ///
// CK1: [[SHARGSTMP2:%.+]] = getelementptr inbounds i8*, i8** [[SHARGSTMP1]], i64 0
// CK1: [[SHAREDVAR:%.+]] = bitcast i32* [[A]] to i8*
// CK1: store i8* [[SHAREDVAR]], i8** [[SHARGSTMP2]]
-// CK1: call void @llvm.nvvm.barrier0()
-// CK1: call void @llvm.nvvm.barrier0()
+// CK1: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
+// CK1: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
// CK1: call void @__kmpc_end_sharing_variables()
// CK1: store i32 100, i32* [[B]]
// CK1: call void @__kmpc_kernel_prepare_parallel({{.*}}, i16 1)
// CK1: [[SHARGSTMP12:%.+]] = getelementptr inbounds i8*, i8** [[SHARGSTMP3]], i64 1
// CK1: [[SHAREDVAR2:%.+]] = bitcast i32* [[A]] to i8*
// CK1: store i8* [[SHAREDVAR2]], i8** [[SHARGSTMP12]]
-// CK1: call void @llvm.nvvm.barrier0()
-// CK1: call void @llvm.nvvm.barrier0()
+// CK1: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
+// CK1: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
// CK1: call void @__kmpc_end_sharing_variables()
// CK1: [[SHARED_MEM_FLAG:%.+]] = load i16, i16* [[KERNEL_SHARED]],
// CK1: call void @__kmpc_restore_team_static_memory(i16 [[SHARED_MEM_FLAG]])
// CHECK: br label {{%?}}[[AWAIT_WORK:.+]]
//
// CHECK: [[AWAIT_WORK]]
-// CHECK: call void @llvm.nvvm.barrier0()
+// CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
// CHECK: [[KPR:%.+]] = call i1 @__kmpc_kernel_parallel(i8** [[OMP_WORK_FN]]
// CHECK: [[KPRB:%.+]] = zext i1 [[KPR]] to i8
// store i8 [[KPRB]], i8* [[OMP_EXEC_STATUS]], align 1
// CHECK: br label {{%?}}[[BAR_PARALLEL]]
//
// CHECK: [[BAR_PARALLEL]]
-// CHECK: call void @llvm.nvvm.barrier0()
+// CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
// CHECK: br label {{%?}}[[AWAIT_WORK]]
//
// CHECK: [[EXIT]]
// CHECK: [[MTMP1:%.+]] = sub nuw i32 [[MNTH]], [[MWS]]
// CHECK: call void @__kmpc_kernel_init(i32 [[MTMP1]]
// CHECK: call void @__kmpc_kernel_prepare_parallel(i8* bitcast (void (i16, i32)* [[PARALLEL_FN1]]_wrapper to i8*),
-// CHECK: call void @llvm.nvvm.barrier0()
-// CHECK: call void @llvm.nvvm.barrier0()
+// CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
+// CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
// CHECK: call void @__kmpc_serialized_parallel(
// CHECK: {{call|invoke}} void [[PARALLEL_FN3:@.+]](
// CHECK: call void @__kmpc_end_serialized_parallel(
// CHECK: call void @__kmpc_kernel_prepare_parallel(i8* bitcast (void (i16, i32)* [[PARALLEL_FN2]]_wrapper to i8*),
-// CHECK: call void @llvm.nvvm.barrier0()
-// CHECK: call void @llvm.nvvm.barrier0()
+// CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
+// CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
// CHECK-64-DAG: load i32, i32* [[REF_A]]
// CHECK-32-DAG: load i32, i32* [[LOCAL_A]]
// CHECK: br label {{%?}}[[TERMINATE:.+]]
//
// CHECK: [[TERMINATE]]
// CHECK: call void @__kmpc_kernel_deinit(
-// CHECK: call void @llvm.nvvm.barrier0()
+// CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
// CHECK: br label {{%?}}[[EXIT]]
//
// CHECK: [[EXIT]]
// CHECK: br label {{%?}}[[AWAIT_WORK:.+]]
//
// CHECK: [[AWAIT_WORK]]
-// CHECK: call void @llvm.nvvm.barrier0()
+// CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
// CHECK: [[KPR:%.+]] = call i1 @__kmpc_kernel_parallel(i8** [[OMP_WORK_FN]],
// CHECK: [[KPRB:%.+]] = zext i1 [[KPR]] to i8
// store i8 [[KPRB]], i8* [[OMP_EXEC_STATUS]], align 1
// CHECK: br label {{%?}}[[BAR_PARALLEL]]
//
// CHECK: [[BAR_PARALLEL]]
-// CHECK: call void @llvm.nvvm.barrier0()
+// CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
// CHECK: br label {{%?}}[[AWAIT_WORK]]
//
// CHECK: [[EXIT]]
//
// CHECK: [[IF_THEN]]
// CHECK: call void @__kmpc_kernel_prepare_parallel(i8* bitcast (void (i16, i32)* [[PARALLEL_FN4]]_wrapper to i8*),
-// CHECK: call void @llvm.nvvm.barrier0()
-// CHECK: call void @llvm.nvvm.barrier0()
+// CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
+// CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
// CHECK: br label {{%?}}[[IF_END:.+]]
//
// CHECK: [[IF_ELSE]]
//
// CHECK: [[TERMINATE]]
// CHECK: call void @__kmpc_kernel_deinit(
-// CHECK: call void @llvm.nvvm.barrier0()
+// CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
// CHECK: br label {{%?}}[[EXIT]]
//
// CHECK: [[EXIT]]
// CHECK-DAG: [[KERNEL_SHARED:@.+]] = internal unnamed_addr constant i16 1
// CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+template.+l12}}_worker()
-// CHECK: call void @llvm.nvvm.barrier0()
+// CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
// CHECK: call i1 @__kmpc_kernel_parallel(
// CHECK: call void @__omp_outlined___wrapper(
// CHECK: [[STACK:%.+]] = getelementptr inbounds i8, i8* [[KERNEL_RD]], i{{64|32}} 0
// CHECK: call void @__kmpc_kernel_prepare_parallel(
// CHECK: call void @__kmpc_begin_sharing_variables({{.*}}, i64 2)
-// CHECK: call void @llvm.nvvm.barrier0()
-// CHECK: call void @llvm.nvvm.barrier0()
+// CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
+// CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
// CHECK: call void @__kmpc_end_sharing_variables()
// CHECK: [[IS_SHARED:%.+]] = load i16, i16* [[KERNEL_SHARED]],
// CHECK: call void @__kmpc_restore_team_static_memory(i16 [[IS_SHARED]])
// CHECK: [[ARG_PTR2:%.+]] = getelementptr inbounds i8*, i8** [[ARG_PTRS]], i{{[0-9]+}} 1
// CHECK: [[BC:%.+]] = bitcast i32** [[PTR2_REF]] to i8*
// CHECK: store i8* [[BC]], i8** [[ARG_PTR2]],
-// CHECK: call void @llvm.nvvm.barrier0()
-// CHECK: call void @llvm.nvvm.barrier0()
+// CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
+// CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
// CHECK: call void @__kmpc_end_sharing_variables()
void targetBar(int *Ptr1, int *Ptr2) {
#pragma omp target map(Ptr1[:0], Ptr2)
// CHECK: br label {{%?}}[[AWAIT_WORK:.+]]
//
// CHECK: [[AWAIT_WORK]]
- // CHECK: call void @llvm.nvvm.barrier0()
+ // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
// CHECK: [[WORK:%.+]] = load i8*, i8** [[OMP_WORK_FN]],
// CHECK: [[SHOULD_EXIT:%.+]] = icmp eq i8* [[WORK]], null
// CHECK: br i1 [[SHOULD_EXIT]], label {{%?}}[[EXIT:.+]], label {{%?}}[[SEL_WORKERS:.+]]
// CHECK: br label {{%?}}[[BAR_PARALLEL]]
//
// CHECK: [[BAR_PARALLEL]]
- // CHECK: call void @llvm.nvvm.barrier0()
+ // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
// CHECK: br label {{%?}}[[AWAIT_WORK]]
//
// CHECK: [[EXIT]]
//
// CHECK: [[TERMINATE]]
// CHECK: call void @__kmpc_kernel_deinit(
- // CHECK: call void @llvm.nvvm.barrier0()
+ // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
// CHECK: br label {{%?}}[[EXIT]]
//
// CHECK: [[EXIT]]
// CHECK: br label {{%?}}[[AWAIT_WORK:.+]]
//
// CHECK: [[AWAIT_WORK]]
- // CHECK: call void @llvm.nvvm.barrier0()
+ // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
// CHECK: [[WORK:%.+]] = load i8*, i8** [[OMP_WORK_FN]],
// CHECK: [[SHOULD_EXIT:%.+]] = icmp eq i8* [[WORK]], null
// CHECK: br i1 [[SHOULD_EXIT]], label {{%?}}[[EXIT:.+]], label {{%?}}[[SEL_WORKERS:.+]]
// CHECK: br label {{%?}}[[BAR_PARALLEL]]
//
// CHECK: [[BAR_PARALLEL]]
- // CHECK: call void @llvm.nvvm.barrier0()
+ // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
// CHECK: br label {{%?}}[[AWAIT_WORK]]
//
// CHECK: [[EXIT]]
//
// CHECK: [[TERMINATE]]
// CHECK: call void @__kmpc_kernel_deinit(
- // CHECK: call void @llvm.nvvm.barrier0()
+ // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
// CHECK: br label {{%?}}[[EXIT]]
//
// CHECK: [[EXIT]]
// CHECK: br label {{%?}}[[AWAIT_WORK:.+]]
//
// CHECK: [[AWAIT_WORK]]
- // CHECK: call void @llvm.nvvm.barrier0()
+ // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
// CHECK: [[WORK:%.+]] = load i8*, i8** [[OMP_WORK_FN]],
// CHECK: [[SHOULD_EXIT:%.+]] = icmp eq i8* [[WORK]], null
// CHECK: br i1 [[SHOULD_EXIT]], label {{%?}}[[EXIT:.+]], label {{%?}}[[SEL_WORKERS:.+]]
// CHECK: br label {{%?}}[[BAR_PARALLEL]]
//
// CHECK: [[BAR_PARALLEL]]
- // CHECK: call void @llvm.nvvm.barrier0()
+ // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
// CHECK: br label {{%?}}[[AWAIT_WORK]]
//
// CHECK: [[EXIT]]
//
// CHECK: [[TERMINATE]]
// CHECK: call void @__kmpc_kernel_deinit(
- // CHECK: call void @llvm.nvvm.barrier0()
+ // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
// CHECK: br label {{%?}}[[EXIT]]
//
// CHECK: [[EXIT]]
// CHECK: br label {{%?}}[[AWAIT_WORK:.+]]
//
// CHECK: [[AWAIT_WORK]]
- // CHECK: call void @llvm.nvvm.barrier0()
+ // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
// CHECK: [[WORK:%.+]] = load i8*, i8** [[OMP_WORK_FN]],
// CHECK: [[SHOULD_EXIT:%.+]] = icmp eq i8* [[WORK]], null
// CHECK: br i1 [[SHOULD_EXIT]], label {{%?}}[[EXIT:.+]], label {{%?}}[[SEL_WORKERS:.+]]
// CHECK: br label {{%?}}[[BAR_PARALLEL]]
//
// CHECK: [[BAR_PARALLEL]]
- // CHECK: call void @llvm.nvvm.barrier0()
+ // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
// CHECK: br label {{%?}}[[AWAIT_WORK]]
//
// CHECK: [[EXIT]]
//
// CHECK: [[TERMINATE]]
// CHECK: call void @__kmpc_kernel_deinit(
- // CHECK: call void @llvm.nvvm.barrier0()
+ // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
// CHECK: br label {{%?}}[[EXIT]]
//
// CHECK: [[EXIT]]
// CHECK: br label {{%?}}[[AWAIT_WORK:.+]]
//
// CHECK: [[AWAIT_WORK]]
- // CHECK: call void @llvm.nvvm.barrier0()
+ // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
// CHECK: [[WORK:%.+]] = load i8*, i8** [[OMP_WORK_FN]],
// CHECK: [[SHOULD_EXIT:%.+]] = icmp eq i8* [[WORK]], null
// CHECK: br i1 [[SHOULD_EXIT]], label {{%?}}[[EXIT:.+]], label {{%?}}[[SEL_WORKERS:.+]]
// CHECK: br label {{%?}}[[BAR_PARALLEL]]
//
// CHECK: [[BAR_PARALLEL]]
- // CHECK: call void @llvm.nvvm.barrier0()
+ // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
// CHECK: br label {{%?}}[[AWAIT_WORK]]
//
// CHECK: [[EXIT]]
//
// CHECK: [[TERMINATE]]
// CHECK: call void @__kmpc_kernel_deinit(
- // CHECK: call void @llvm.nvvm.barrier0()
+ // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
// CHECK: br label {{%?}}[[EXIT]]
//
// CHECK: [[EXIT]]
// CHECK: [[REF:%.+]] = getelementptr inbounds i8*, i8** [[SHARED]], i{{64|32}} 0
// CHECK: [[F_REF:%.+]] = bitcast i32* [[F_PTR]] to i8*
// CHECK: store i8* [[F_REF]], i8** [[REF]],
- // CHECK: call void @llvm.nvvm.barrier0()
- // CHECK: call void @llvm.nvvm.barrier0()
+ // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
+ // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
// CHECK: call void @__kmpc_end_sharing_variables()
// CHECK: br label
// CHECK: br label {{%?}}[[AWAIT_WORK:.+]]
//
// CHECK: [[AWAIT_WORK]]
- // CHECK: call void @llvm.nvvm.barrier0()
+ // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
// CHECK: [[WORK:%.+]] = load i8*, i8** [[OMP_WORK_FN]],
// CHECK: [[SHOULD_EXIT:%.+]] = icmp eq i8* [[WORK]], null
// CHECK: br i1 [[SHOULD_EXIT]], label {{%?}}[[EXIT:.+]], label {{%?}}[[SEL_WORKERS:.+]]
// CHECK: br label {{%?}}[[BAR_PARALLEL]]
//
// CHECK: [[BAR_PARALLEL]]
- // CHECK: call void @llvm.nvvm.barrier0()
+ // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
// CHECK: br label {{%?}}[[AWAIT_WORK]]
//
// CHECK: [[EXIT]]
//
// CHECK: [[TERMINATE]]
// CHECK: call void @__kmpc_kernel_deinit(
- // CHECK: call void @llvm.nvvm.barrier0()
+ // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
// CHECK: br label {{%?}}[[EXIT]]
//
// CHECK: [[EXIT]]
// CHECK: br label {{%?}}[[AWAIT_WORK:.+]]
//
// CHECK: [[AWAIT_WORK]]
- // CHECK: call void @llvm.nvvm.barrier0()
+ // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
// CHECK: [[KPR:%.+]] = call i1 @__kmpc_kernel_parallel(i8** [[OMP_WORK_FN]], i16 1)
// CHECK: [[KPRB:%.+]] = zext i1 [[KPR]] to i8
// store i8 [[KPRB]], i8* [[OMP_EXEC_STATUS]], align 1
// CHECK: br label {{%?}}[[BAR_PARALLEL]]
//
// CHECK: [[BAR_PARALLEL]]
- // CHECK: call void @llvm.nvvm.barrier0()
+ // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
// CHECK: br label {{%?}}[[AWAIT_WORK]]
//
// CHECK: [[EXIT]]
//
// CHECK: [[TERMINATE]]
// CHECK: call void @__kmpc_kernel_deinit(
- // CHECK: call void @llvm.nvvm.barrier0()
+ // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
// CHECK: br label {{%?}}[[EXIT]]
//
// CHECK: [[EXIT]]
// CHECK: br label {{%?}}[[AWAIT_WORK:.+]]
//
// CHECK: [[AWAIT_WORK]]
- // CHECK: call void @llvm.nvvm.barrier0()
+ // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
// CHECK: [[KPR:%.+]] = call i1 @__kmpc_kernel_parallel(i8** [[OMP_WORK_FN]], i16 1)
// CHECK: [[KPRB:%.+]] = zext i1 [[KPR]] to i8
// store i8 [[KPRB]], i8* [[OMP_EXEC_STATUS]], align 1
// CHECK: br label {{%?}}[[BAR_PARALLEL]]
//
// CHECK: [[BAR_PARALLEL]]
- // CHECK: call void @llvm.nvvm.barrier0()
+ // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
// CHECK: br label {{%?}}[[AWAIT_WORK]]
//
// CHECK: [[EXIT]]
//
// CHECK: [[TERMINATE]]
// CHECK: call void @__kmpc_kernel_deinit(
- // CHECK: call void @llvm.nvvm.barrier0()
+ // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
// CHECK: br label {{%?}}[[EXIT]]
//
// CHECK: [[EXIT]]
// CHECK: [[SHARED_VARS_BUF:%.+]] = load i8**, i8*** [[SHARED_VARS_PTR]],
// CHECK: [[I_ADDR_BC:%.+]] = bitcast i32* [[I_ADDR]] to i8*
// CHECK: store i8* [[I_ADDR_BC]], i8** [[SHARED_VARS_BUF]],
- // CHECK: call void @llvm.nvvm.barrier0()
- // CHECK: call void @llvm.nvvm.barrier0()
+ // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
+ // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
// CHECK: call void @__kmpc_end_sharing_variables()
// CHECK: call void @__kmpc_for_static_fini(
// CHECK: br label {{%?}}[[TERMINATE:.+]]
//
// CHECK: [[TERMINATE]]
// CHECK: call void @__kmpc_kernel_deinit(
- // CHECK: call void @llvm.nvvm.barrier0()
+ // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
// CHECK: br label {{%?}}[[EXIT]]
//
// CHECK: [[EXIT]]