/// a target region. The appropriate mode (SPMD|NON-SPMD) is set on entry
/// to the target region and used by containing directives such as 'parallel'
/// to emit optimized code.
-class ExecutionModeRAII {
+class ExecutionRuntimeModesRAII {
private:
- CGOpenMPRuntimeNVPTX::ExecutionMode SavedMode;
- CGOpenMPRuntimeNVPTX::ExecutionMode &Mode;
+ CGOpenMPRuntimeNVPTX::ExecutionMode SavedExecMode =
+ CGOpenMPRuntimeNVPTX::EM_Unknown;
+ CGOpenMPRuntimeNVPTX::ExecutionMode &ExecMode;
+ bool SavedRuntimeMode = false;
+ bool *RuntimeMode = nullptr;
public:
- ExecutionModeRAII(CGOpenMPRuntimeNVPTX::ExecutionMode &Mode, bool IsSPMD)
- : Mode(Mode) {
- SavedMode = Mode;
- Mode = IsSPMD ? CGOpenMPRuntimeNVPTX::EM_SPMD
- : CGOpenMPRuntimeNVPTX::EM_NonSPMD;
+ /// Constructor for Non-SPMD mode.
+ ExecutionRuntimeModesRAII(CGOpenMPRuntimeNVPTX::ExecutionMode &ExecMode)
+ : ExecMode(ExecMode) {
+ SavedExecMode = ExecMode;
+ ExecMode = CGOpenMPRuntimeNVPTX::EM_NonSPMD;
+ }
+ /// Constructor for SPMD mode.
+ ExecutionRuntimeModesRAII(CGOpenMPRuntimeNVPTX::ExecutionMode &ExecMode,
+ bool &RuntimeMode, bool FullRuntimeMode)
+ : ExecMode(ExecMode), RuntimeMode(&RuntimeMode) {
+ SavedExecMode = ExecMode;
+ SavedRuntimeMode = RuntimeMode;
+ ExecMode = CGOpenMPRuntimeNVPTX::EM_SPMD;
+ RuntimeMode = FullRuntimeMode;
+ }
+ ~ExecutionRuntimeModesRAII() {
+ ExecMode = SavedExecMode;
+ if (RuntimeMode)
+ *RuntimeMode = SavedRuntimeMode;
}
- ~ExecutionModeRAII() { Mode = SavedMode; }
};
/// GPU Configuration: This information can be derived from cuda registers,
llvm::Constant *&OutlinedFnID,
bool IsOffloadEntry,
const RegionCodeGenTy &CodeGen) {
- ExecutionModeRAII ModeRAII(CurrentExecutionMode, /*IsSPMD=*/false);
+ ExecutionRuntimeModesRAII ModeRAII(CurrentExecutionMode);
EntryFunctionState EST;
WorkerFunctionState WST(CGM, D.getBeginLoc());
Work.clear();
llvm::Constant *&OutlinedFnID,
bool IsOffloadEntry,
const RegionCodeGenTy &CodeGen) {
- ExecutionModeRAII ModeRAII(CurrentExecutionMode, /*IsSPMD=*/true);
+ ExecutionRuntimeModesRAII ModeRAII(
+ CurrentExecutionMode, RequiresFullRuntime,
+ CGM.getLangOpts().OpenMPCUDAForceFullRuntime ||
+ !supportsLightweightRuntime(CGM.getContext(), D));
EntryFunctionState EST;
// Emit target region as a standalone region.
llvm::BasicBlock *ExecuteBB = CGF.createBasicBlock(".execute");
EST.ExitBB = CGF.createBasicBlock(".exit");
- // Initialize the OMP state in the runtime; called by all active threads.
- bool RequiresFullRuntime = CGM.getLangOpts().OpenMPCUDAForceFullRuntime ||
- !supportsLightweightRuntime(CGF.getContext(), D);
llvm::Value *Args[] = {getThreadLimit(CGF, /*IsInSPMDExecutionMode=*/true),
/*RequiresOMPRuntime=*/
Bld.getInt16(RequiresFullRuntime ? 1 : 0),
} // anonymous namespace
unsigned CGOpenMPRuntimeNVPTX::getDefaultLocationReserved2Flags() const {
- return UndefinedMode;
+ switch (getExecutionMode()) {
+ case EM_SPMD:
+ if (requiresFullRuntime())
+ return KMP_IDENT_SPMD_MODE & (~KMP_IDENT_SIMPLE_RT_MODE);
+ return KMP_IDENT_SPMD_MODE | KMP_IDENT_SIMPLE_RT_MODE;
+ case EM_NonSPMD:
+ assert(requiresFullRuntime() && "Expected full runtime.");
+ return (~KMP_IDENT_SPMD_MODE) & (~KMP_IDENT_SIMPLE_RT_MODE);
+ case EM_Unknown:
+ return UndefinedMode;
+ }
+ llvm_unreachable("Unknown flags are requested.");
}
CGOpenMPRuntimeNVPTX::CGOpenMPRuntimeNVPTX(CodeGenModule &CGM)
#define HEADER
// CHECK-NOT: @__omp_offloading_{{.+}}_exec_mode = weak constant i8 1
-// CHECK-DAG: private unnamed_addr constant %struct.ident_t { i32 0, i32 2050, i32 2, i32 0, i8* getelementptr inbounds
-// CHECK-DAG: private unnamed_addr constant %struct.ident_t { i32 0, i32 514, i32 2, i32 0, i8* getelementptr inbounds
-// CHECK-DAG: private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 2, i32 0, i8* getelementptr inbounds
-// CHECK-DAG: private unnamed_addr constant %struct.ident_t { i32 0, i32 66, i32 2, i32 0, i8* getelementptr inbounds
+// CHECK-DAG: [[DISTR_LIGHT:@.+]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 2050, i32 3, i32 0, i8* getelementptr inbounds
+// CHECK-DAG: [[FOR_LIGHT:@.+]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 514, i32 3, i32 0, i8* getelementptr inbounds
+// CHECK-DAG: [[LIGHT:@.+]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 3, i32 0, i8* getelementptr inbounds
+// CHECK-DAG: [[DISTR_FULL:@.+]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 2050, i32 1, i32 0, i8* getelementptr inbounds
+// CHECK-DAG: [[FULL:@.+]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 1, i32 0, i8* getelementptr inbounds
+// CHECK-DAG: [[BAR_LIGHT:@.+]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 66, i32 3, i32 0, i8* getelementptr inbounds
+// CHECK-DAG: [[BAR_FULL:@.+]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 66, i32 1, i32 0, i8* getelementptr inbounds
// CHECK-NOT: @__omp_offloading_{{.+}}_exec_mode = weak constant i8 1
void foo() {
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 0, i16 0)
+// CHECK-DAG: [[DISTR_LIGHT]]
+// CHECK-DAG: [[FOR_LIGHT]]
+// CHECK-DAG: [[LIGHT]]
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 0, i16 0)
+// CHECK-DAG: [[DISTR_LIGHT]]
+// CHECK-DAG: [[FOR_LIGHT]]
+// CHECK-DAG: [[LIGHT]]
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 0, i16 0)
+// CHECK-DAG: [[DISTR_LIGHT]]
+// CHECK-DAG: [[FOR_LIGHT]]
+// CHECK-DAG: [[LIGHT]]
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 1, i16 {{.+}})
+// CHECK-DAG: [[DISTR_FULL]]
+// CHECK-DAG: [[FULL]]
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 1, i16 {{.+}})
+// CHECK-DAG: [[DISTR_FULL]]
+// CHECK-DAG: [[FULL]]
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 1, i16 {{.+}})
+// CHECK-DAG: [[DISTR_FULL]]
+// CHECK-DAG: [[FULL]]
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 1, i16 {{.+}})
+// CHECK-DAG: [[DISTR_FULL]]
+// CHECK-DAG: [[FULL]]
#pragma omp target teams distribute parallel for simd
for (int i = 0; i < 10; ++i)
;
;
int a;
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 0, i16 0)
+// CHECK-DAG: [[DISTR_LIGHT]]
+// CHECK-DAG: [[FOR_LIGHT]]
+// CHECK-DAG: [[LIGHT]]
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 0, i16 0)
+// CHECK-DAG: [[DISTR_LIGHT]]
+// CHECK-DAG: [[FOR_LIGHT]]
+// CHECK-DAG: [[LIGHT]]
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 0, i16 0)
+// CHECK-DAG: [[DISTR_LIGHT]]
+// CHECK-DAG: [[FOR_LIGHT]]
+// CHECK-DAG: [[LIGHT]]
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 1, i16 {{.+}})
+// CHECK-DAG: [[DISTR_FULL]]
+// CHECK-DAG: [[FULL]]
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 1, i16 {{.+}})
+// CHECK-DAG: [[DISTR_FULL]]
+// CHECK-DAG: [[FULL]]
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 1, i16 {{.+}})
+// CHECK-DAG: [[DISTR_FULL]]
+// CHECK-DAG: [[FULL]]
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 1, i16 {{.+}})
+// CHECK-DAG: [[DISTR_FULL]]
+// CHECK-DAG: [[FULL]]
#pragma omp target teams distribute parallel for lastprivate(a)
for (int i = 0; i < 10; ++i)
a = i;
for (int i = 0; i < 10; ++i)
;
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 0, i16 0)
+// CHECK-DAG: [[DISTR_LIGHT]]
+// CHECK-DAG: [[FOR_LIGHT]]
+// CHECK-DAG: [[LIGHT]]
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 0, i16 0)
+// CHECK-DAG: [[DISTR_LIGHT]]
+// CHECK-DAG: [[FOR_LIGHT]]
+// CHECK-DAG: [[LIGHT]]
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 0, i16 0)
+// CHECK-DAG: [[DISTR_LIGHT]]
+// CHECK-DAG: [[FOR_LIGHT]]
+// CHECK-DAG: [[LIGHT]]
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 1, i16 {{.+}})
+// CHECK-DAG: [[DISTR_FULL]]
+// CHECK-DAG: [[FULL]]
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 1, i16 {{.+}})
+// CHECK-DAG: [[DISTR_FULL]]
+// CHECK-DAG: [[FULL]]
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 1, i16 {{.+}})
+// CHECK-DAG: [[DISTR_FULL]]
+// CHECK-DAG: [[FULL]]
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 1, i16 {{.+}})
+// CHECK-DAG: [[DISTR_FULL]]
+// CHECK-DAG: [[FULL]]
#pragma omp target teams
{
int b;
for (int i = 0; i < 10; ++i)
;
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 0, i16 0)
+// CHECK-DAG: [[DISTR_LIGHT]]
+// CHECK-DAG: [[FOR_LIGHT]]
+// CHECK-DAG: [[LIGHT]]
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 0, i16 0)
+// CHECK-DAG: [[DISTR_LIGHT]]
+// CHECK-DAG: [[FOR_LIGHT]]
+// CHECK-DAG: [[LIGHT]]
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 0, i16 0)
+// CHECK-DAG: [[DISTR_LIGHT]]
+// CHECK-DAG: [[FOR_LIGHT]]
+// CHECK-DAG: [[LIGHT]]
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 1, i16 {{.+}})
+// CHECK-DAG: [[DISTR_FULL]]
+// CHECK-DAG: [[FULL]]
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 1, i16 {{.+}})
+// CHECK-DAG: [[DISTR_FULL]]
+// CHECK-DAG: [[FULL]]
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 1, i16 {{.+}})
+// CHECK-DAG: [[DISTR_FULL]]
+// CHECK-DAG: [[FULL]]
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 1, i16 {{.+}})
+// CHECK-DAG: [[DISTR_FULL]]
+// CHECK-DAG: [[FULL]]
#pragma omp target teams
#pragma omp distribute parallel for
for (int i = 0; i < 10; ++i)
for (int i = 0; i < 10; ++i)
;
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 0, i16 0)
+// CHECK-DAG: [[DISTR_LIGHT]]
+// CHECK-DAG: [[FOR_LIGHT]]
+// CHECK-DAG: [[LIGHT]]
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 0, i16 0)
+// CHECK-DAG: [[DISTR_LIGHT]]
+// CHECK-DAG: [[FOR_LIGHT]]
+// CHECK-DAG: [[LIGHT]]
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 0, i16 0)
+// CHECK-DAG: [[DISTR_LIGHT]]
+// CHECK-DAG: [[FOR_LIGHT]]
+// CHECK-DAG: [[LIGHT]]
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 1, i16 {{.+}})
+// CHECK-DAG: [[DISTR_FULL]]
+// CHECK-DAG: [[FULL]]
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 1, i16 {{.+}})
+// CHECK-DAG: [[DISTR_FULL]]
+// CHECK-DAG: [[FULL]]
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 1, i16 {{.+}})
+// CHECK-DAG: [[DISTR_FULL]]
+// CHECK-DAG: [[FULL]]
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 1, i16 {{.+}})
+// CHECK-DAG: [[DISTR_FULL]]
+// CHECK-DAG: [[FULL]]
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for
for (int i = 0; i < 10; ++i)
;
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 0, i16 0)
+// CHECK-DAG: [[FOR_LIGHT]]
+// CHECK-DAG: [[LIGHT]]
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 0, i16 0)
+// CHECK-DAG: [[FOR_LIGHT]]
+// CHECK-DAG: [[LIGHT]]
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 0, i16 0)
+// CHECK-DAG: [[FOR_LIGHT]]
+// CHECK-DAG: [[LIGHT]]
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 1, i16 {{.+}})
+// CHECK-DAG: [[FULL]]
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 1, i16 {{.+}})
+// CHECK-DAG: [[FULL]]
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 1, i16 {{.+}})
+// CHECK-DAG: [[FULL]]
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 1, i16 {{.+}})
+// CHECK-DAG: [[FULL]]
#pragma omp target parallel for
for (int i = 0; i < 10; ++i)
;
for (int i = 0; i < 10; ++i)
;
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 0, i16 0)
+// CHECK-DAG: [[FOR_LIGHT]]
+// CHECK-DAG: [[LIGHT]]
+// CHECK-DAG: [[BAR_LIGHT]]
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 0, i16 0)
+// CHECK-DAG: [[FOR_LIGHT]]
+// CHECK-DAG: [[LIGHT]]
+// CHECK-DAG: [[BAR_LIGHT]]
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 0, i16 0)
+// CHECK-DAG: [[FOR_LIGHT]]
+// CHECK-DAG: [[LIGHT]]
+// CHECK-DAG: [[BAR_LIGHT]]
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 1, i16 {{.+}})
+// CHECK-DAG: [[FULL]]
+// CHECK-DAG: [[BAR_FULL]]
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 1, i16 {{.+}})
+// CHECK-DAG: [[FULL]]
+// CHECK-DAG: [[BAR_FULL]]
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 1, i16 {{.+}})
+// CHECK-DAG: [[FULL]]
+// CHECK-DAG: [[BAR_FULL]]
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 1, i16 {{.+}})
+// CHECK-DAG: [[FULL]]
+// CHECK-DAG: [[BAR_FULL]]
#pragma omp target parallel
#pragma omp for simd
for (int i = 0; i < 10; ++i)
for (int i = 0; i < 10; ++i)
;
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 1, i16 {{.+}})
+// CHECK-DAG: [[FULL]]
+// CHECK-DAG: [[BAR_FULL]]
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 0, i16 0)
+// CHECK-DAG: [[FOR_LIGHT]]
+// CHECK-DAG: [[LIGHT]]
+// CHECK-DAG: [[BAR_LIGHT]]
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 0, i16 0)
+// CHECK-DAG: [[FOR_LIGHT]]
+// CHECK-DAG: [[LIGHT]]
+// CHECK-DAG: [[BAR_LIGHT]]
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 1, i16 {{.+}})
+// CHECK-DAG: [[FULL]]
+// CHECK-DAG: [[BAR_FULL]]
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 1, i16 {{.+}})
+// CHECK-DAG: [[FULL]]
+// CHECK-DAG: [[BAR_FULL]]
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 1, i16 {{.+}})
+// CHECK-DAG: [[FULL]]
+// CHECK-DAG: [[BAR_FULL]]
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 1, i16 {{.+}})
+// CHECK-DAG: [[FULL]]
+// CHECK-DAG: [[BAR_FULL]]
#pragma omp target
#pragma omp parallel
#pragma omp for simd ordered
for (int i = 0; i < 10; ++i)
;
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 0, i16 0)
+// CHECK-DAG: [[FOR_LIGHT]]
+// CHECK-DAG: [[LIGHT]]
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 0, i16 0)
+// CHECK-DAG: [[FOR_LIGHT]]
+// CHECK-DAG: [[LIGHT]]
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 0, i16 0)
+// CHECK-DAG: [[FOR_LIGHT]]
+// CHECK-DAG: [[LIGHT]]
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 1, i16 {{.+}})
+// CHECK-DAG: [[FULL]]
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 1, i16 {{.+}})
+// CHECK-DAG: [[FULL]]
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 1, i16 {{.+}})
+// CHECK-DAG: [[FULL]]
// CHECK: call void @__kmpc_spmd_kernel_init(i32 {{.+}}, i16 1, i16 {{.+}})
+// CHECK-DAG: [[FULL]]
#pragma omp target
#pragma omp parallel for
for (int i = 0; i < 10; ++i)
#define HEADER
// Check that the execution mode of all 6 target regions is set to Generic Mode.
-// CHECK-DAG: {{@__omp_offloading_.+l103}}_exec_mode = weak constant i8 1
-// CHECK-DAG: {{@__omp_offloading_.+l180}}_exec_mode = weak constant i8 1
-// CHECK-DAG: {{@__omp_offloading_.+l290}}_exec_mode = weak constant i8 1
-// CHECK-DAG: {{@__omp_offloading_.+l328}}_exec_mode = weak constant i8 1
-// CHECK-DAG: {{@__omp_offloading_.+l346}}_exec_mode = weak constant i8 1
-// CHECK-DAG: {{@__omp_offloading_.+l311}}_exec_mode = weak constant i8 1
+// CHECK-DAG: [[NONSPMD:@.+]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 0, i32 0, i8* getelementptr inbounds
+// CHECK-DAG: [[UNKNOWN:@.+]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 2, i32 0, i8* getelementptr inbounds
+// CHECK-DAG: {{@__omp_offloading_.+l105}}_exec_mode = weak constant i8 1
+// CHECK-DAG: {{@__omp_offloading_.+l182}}_exec_mode = weak constant i8 1
+// CHECK-DAG: {{@__omp_offloading_.+l292}}_exec_mode = weak constant i8 1
+// CHECK-DAG: {{@__omp_offloading_.+l330}}_exec_mode = weak constant i8 1
+// CHECK-DAG: {{@__omp_offloading_.+l348}}_exec_mode = weak constant i8 1
+// CHECK-DAG: {{@__omp_offloading_.+l313}}_exec_mode = weak constant i8 1
__thread int id;
double cn[5][n];
TT<long long, char> d;
- // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+foo.+l103}}_worker()
+ // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+foo.+l105}}_worker()
// CHECK-DAG: [[OMP_EXEC_STATUS:%.+]] = alloca i8,
// CHECK-DAG: [[OMP_WORK_FN:%.+]] = alloca i8*,
// CHECK: store i8* null, i8** [[OMP_WORK_FN]],
// CHECK: [[EXIT]]
// CHECK: ret void
- // CHECK: define {{.*}}void [[T1:@__omp_offloading_.+foo.+l103]]()
+ // CHECK: define {{.*}}void [[T1:@__omp_offloading_.+foo.+l105]]()
// CHECK-DAG: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
// CHECK-DAG: [[NTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
// CHECK-DAG: [[WS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
{
}
- // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+foo.+l180}}_worker()
+ // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+foo.+l182}}_worker()
// CHECK-DAG: [[OMP_EXEC_STATUS:%.+]] = alloca i8,
// CHECK-DAG: [[OMP_WORK_FN:%.+]] = alloca i8*,
// CHECK: store i8* null, i8** [[OMP_WORK_FN]],
// CHECK: [[EXIT]]
// CHECK: ret void
- // CHECK: define {{.*}}void [[T2:@__omp_offloading_.+foo.+l180]](i[[SZ:32|64]] [[ARG1:%[a-zA-Z_]+]], i[[SZ:32|64]] [[ID:%[a-zA-Z_]+]])
+ // CHECK: define {{.*}}void [[T2:@__omp_offloading_.+foo.+l182]](i[[SZ:32|64]] [[ARG1:%[a-zA-Z_]+]], i[[SZ:32|64]] [[ID:%[a-zA-Z_]+]])
// CHECK: [[AA_ADDR:%.+]] = alloca i[[SZ]],
// CHECK: store i[[SZ]] [[ARG1]], i[[SZ]]* [[AA_ADDR]],
// CHECK: [[AA_CADDR:%.+]] = bitcast i[[SZ]]* [[AA_ADDR]] to i16*
id = aa;
}
- // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+foo.+l290}}_worker()
+ // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+foo.+l292}}_worker()
// CHECK-DAG: [[OMP_EXEC_STATUS:%.+]] = alloca i8,
// CHECK-DAG: [[OMP_WORK_FN:%.+]] = alloca i8*,
// CHECK: store i8* null, i8** [[OMP_WORK_FN]],
// CHECK: [[EXIT]]
// CHECK: ret void
- // CHECK: define {{.*}}void [[T3:@__omp_offloading_.+foo.+l290]](i[[SZ]]
+ // CHECK: define {{.*}}void [[T3:@__omp_offloading_.+foo.+l292]](i[[SZ]]
// Create local storage for each capture.
// CHECK: [[LOCAL_A:%.+]] = alloca i[[SZ]]
// CHECK: [[LOCAL_B:%.+]] = alloca [10 x float]*
return f;
}
- // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+static.+328}}_worker()
+ // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+static.+330}}_worker()
// CHECK-DAG: [[OMP_EXEC_STATUS:%.+]] = alloca i8,
// CHECK-DAG: [[OMP_WORK_FN:%.+]] = alloca i8*,
// CHECK: store i8* null, i8** [[OMP_WORK_FN]],
// CHECK: [[EXIT]]
// CHECK: ret void
- // CHECK: define {{.*}}void [[T4:@__omp_offloading_.+static.+l328]](i[[SZ]]
+ // CHECK: define {{.*}}void [[T4:@__omp_offloading_.+static.+l330]](i[[SZ]]
// Create local storage for each capture.
// CHECK: [[LOCAL_A:%.+]] = alloca i[[SZ]]
// CHECK: [[LOCAL_AA:%.+]] = alloca i[[SZ]]
- // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+S1.+l346}}_worker()
+ // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+S1.+l348}}_worker()
// CHECK-DAG: [[OMP_EXEC_STATUS:%.+]] = alloca i8,
// CHECK-DAG: [[OMP_WORK_FN:%.+]] = alloca i8*,
- // CHECK: [[GTID:%.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t*
+ // CHECK: [[GTID:%.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* [[NONSPMD]]
// CHECK: store i8* null, i8** [[OMP_WORK_FN]],
// CHECK: store i8 0, i8* [[OMP_EXEC_STATUS]],
// CHECK: br label {{%?}}[[AWAIT_WORK:.+]]
// CHECK: [[EXIT]]
// CHECK: ret void
- // CHECK: define {{.*}}void [[T5:@__omp_offloading_.+S1.+l346]](
+ // CHECK: define {{.*}}void [[T5:@__omp_offloading_.+S1.+l348]](
// Create local storage for each capture.
// CHECK: [[LOCAL_THIS:%.+]] = alloca [[S1:%struct.*]]*
// CHECK: [[LOCAL_B:%.+]] = alloca i[[SZ]]
// CHECK: [[LOCAL_F_PTR:%.+]] = alloca i32,
// CHECK: [[ZERO_ADDR:%.+]] = alloca i32,
// CHECK: store i32 0, i32* [[ZERO_ADDR]]
- // CHECK: [[GTID:%.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t*
- // CHECK: [[PAR_LEVEL:%.+]] = call i16 @__kmpc_parallel_level(%struct.ident_t* @0, i32 [[GTID]])
+ // CHECK: [[GTID:%.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* [[UNKNOWN]]
+ // CHECK: [[PAR_LEVEL:%.+]] = call i16 @__kmpc_parallel_level(%struct.ident_t* [[UNKNOWN]], i32 [[GTID]])
// CHECK: [[IS_TTD:%.+]] = icmp eq i16 %1, 0
// CHECK: [[RES:%.+]] = call i8 @__kmpc_is_spmd_exec_mode()
// CHECK: [[IS_SPMD:%.+]] = icmp ne i8 [[RES]], 0
// CHECK: icmp ne i8 [[RES]], 0
// CHECK: br i1
- // CHECK: [[RES:%.+]] = call i16 @__kmpc_parallel_level(%struct.ident_t* @{{.+}}, i32 [[GTID]])
+ // CHECK: [[RES:%.+]] = call i16 @__kmpc_parallel_level(%struct.ident_t* [[UNKNOWN]], i32 [[GTID]])
// CHECK: icmp ne i16 [[RES]], 0
// CHECK: br i1
- // CHECK: call void @__kmpc_serialized_parallel(%struct.ident_t* @{{.+}}, i32 [[GTID]])
+ // CHECK: call void @__kmpc_serialized_parallel(%struct.ident_t* [[UNKNOWN]], i32 [[GTID]])
// CHECK: call void [[OUTLINED:@.+]](i32* [[ZERO_ADDR]], i32* [[ZERO_ADDR]], i32* [[F_PTR]], double* %{{.+}})
- // CHECK: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @{{.+}}, i32 [[GTID]])
+ // CHECK: call void @__kmpc_end_serialized_parallel(%struct.ident_t* [[UNKNOWN]], i32 [[GTID]])
// CHECK: br label
// CHECK: call void @__kmpc_kernel_prepare_parallel(i8* bitcast (void (i16, i32)* @{{.+}} to i8*), i16 1)
// CHECK: ret i32 [[RES]]
- // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+template.+l311}}_worker()
+ // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+template.+l313}}_worker()
// CHECK-DAG: [[OMP_EXEC_STATUS:%.+]] = alloca i8,
// CHECK-DAG: [[OMP_WORK_FN:%.+]] = alloca i8*,
// CHECK: store i8* null, i8** [[OMP_WORK_FN]],
// CHECK: [[EXIT]]
// CHECK: ret void
- // CHECK: define {{.*}}void [[T6:@__omp_offloading_.+template.+l311]](i[[SZ]]
+ // CHECK: define {{.*}}void [[T6:@__omp_offloading_.+template.+l313]](i[[SZ]]
// Create local storage for each capture.
// CHECK: [[LOCAL_A:%.+]] = alloca i[[SZ]]
// CHECK: [[LOCAL_AA:%.+]] = alloca i[[SZ]]