diag::err_thread_non_global)
<< DeclSpec::getSpecifierName(TSCS);
else if (!Context.getTargetInfo().isTLSSupported()) {
- if (getLangOpts().CUDA) {
+ if (getLangOpts().CUDA || getLangOpts().OpenMPIsDevice) {
// Postpone error emission until we've collected attributes required to
// figure out whether it's a host or device variable and whether the
// error should be ignored.
// Handle attributes prior to checking for duplicates in MergeVarDecl
ProcessDeclAttributes(S, NewVD, D);
- if (getLangOpts().CUDA) {
- if (EmitTLSUnsupportedError && DeclAttrsMatchCUDAMode(getLangOpts(), NewVD))
+ if (getLangOpts().CUDA || getLangOpts().OpenMPIsDevice) {
+ if (EmitTLSUnsupportedError &&
+ ((getLangOpts().CUDA && DeclAttrsMatchCUDAMode(getLangOpts(), NewVD)) ||
+ (getLangOpts().OpenMPIsDevice &&
+ NewVD->hasAttr<OMPDeclareTargetDeclAttr>())))
Diag(D.getDeclSpec().getThreadStorageClassSpecLoc(),
diag::err_thread_unsupported);
// CUDA B.2.5: "__shared__ and __constant__ variables have implied static
#define HEADER
// Check that the execution mode of all 6 target regions is set to Generic Mode.
-// CHECK-DAG: {{@__omp_offloading_.+l98}}_exec_mode = weak constant i8 1
-// CHECK-DAG: {{@__omp_offloading_.+l175}}_exec_mode = weak constant i8 1
-// CHECK-DAG: {{@__omp_offloading_.+l284}}_exec_mode = weak constant i8 1
-// CHECK-DAG: {{@__omp_offloading_.+l321}}_exec_mode = weak constant i8 1
-// CHECK-DAG: {{@__omp_offloading_.+l339}}_exec_mode = weak constant i8 1
-// CHECK-DAG: {{@__omp_offloading_.+l304}}_exec_mode = weak constant i8 1
+// CHECK-DAG: {{@__omp_offloading_.+l100}}_exec_mode = weak constant i8 1
+// CHECK-DAG: {{@__omp_offloading_.+l177}}_exec_mode = weak constant i8 1
+// CHECK-DAG: {{@__omp_offloading_.+l287}}_exec_mode = weak constant i8 1
+// CHECK-DAG: {{@__omp_offloading_.+l324}}_exec_mode = weak constant i8 1
+// CHECK-DAG: {{@__omp_offloading_.+l342}}_exec_mode = weak constant i8 1
+// CHECK-DAG: {{@__omp_offloading_.+l307}}_exec_mode = weak constant i8 1
+
+__thread int id;
template<typename tx, typename ty>
struct TT{
double cn[5][n];
TT<long long, char> d;
- // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+foo.+l98}}_worker()
+ // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+foo.+l100}}_worker()
// CHECK-DAG: [[OMP_EXEC_STATUS:%.+]] = alloca i8,
// CHECK-DAG: [[OMP_WORK_FN:%.+]] = alloca i8*,
// CHECK: store i8* null, i8** [[OMP_WORK_FN]],
// CHECK: [[EXIT]]
// CHECK: ret void
- // CHECK: define {{.*}}void [[T1:@__omp_offloading_.+foo.+l98]]()
+ // CHECK: define {{.*}}void [[T1:@__omp_offloading_.+foo.+l100]]()
// CHECK-DAG: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
// CHECK-DAG: [[NTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
// CHECK-DAG: [[WS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
{
}
- // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+foo.+l175}}_worker()
+ // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+foo.+l177}}_worker()
// CHECK-DAG: [[OMP_EXEC_STATUS:%.+]] = alloca i8,
// CHECK-DAG: [[OMP_WORK_FN:%.+]] = alloca i8*,
// CHECK: store i8* null, i8** [[OMP_WORK_FN]],
// CHECK: [[EXIT]]
// CHECK: ret void
- // CHECK: define {{.*}}void [[T2:@__omp_offloading_.+foo.+l175]](i[[SZ:32|64]] [[ARG1:%[a-zA-Z_]+]])
+ // CHECK: define {{.*}}void [[T2:@__omp_offloading_.+foo.+l177]](i[[SZ:32|64]] [[ARG1:%[a-zA-Z_]+]], i[[SZ:32|64]] [[ID:%[a-zA-Z_]+]])
// CHECK: [[AA_ADDR:%.+]] = alloca i[[SZ]],
// CHECK: store i[[SZ]] [[ARG1]], i[[SZ]]* [[AA_ADDR]],
// CHECK: [[AA_CADDR:%.+]] = bitcast i[[SZ]]* [[AA_ADDR]] to i16*
#pragma omp target if(1)
{
aa += 1;
+ id = aa;
}
- // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+foo.+l284}}_worker()
+ // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+foo.+l287}}_worker()
// CHECK-DAG: [[OMP_EXEC_STATUS:%.+]] = alloca i8,
// CHECK-DAG: [[OMP_WORK_FN:%.+]] = alloca i8*,
// CHECK: store i8* null, i8** [[OMP_WORK_FN]],
// CHECK: [[EXIT]]
// CHECK: ret void
- // CHECK: define {{.*}}void [[T3:@__omp_offloading_.+foo.+l284]](i[[SZ]]
+ // CHECK: define {{.*}}void [[T3:@__omp_offloading_.+foo.+l287]](i[[SZ]]
// Create local storage for each capture.
// CHECK: [[LOCAL_A:%.+]] = alloca i[[SZ]]
// CHECK: [[LOCAL_B:%.+]] = alloca [10 x float]*
return a;
}
- // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+static.+321}}_worker()
+ // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+static.+324}}_worker()
// CHECK-DAG: [[OMP_EXEC_STATUS:%.+]] = alloca i8,
// CHECK-DAG: [[OMP_WORK_FN:%.+]] = alloca i8*,
// CHECK: store i8* null, i8** [[OMP_WORK_FN]],
// CHECK: [[EXIT]]
// CHECK: ret void
- // CHECK: define {{.*}}void [[T4:@__omp_offloading_.+static.+l321]](i[[SZ]]
+ // CHECK: define {{.*}}void [[T4:@__omp_offloading_.+static.+l324]](i[[SZ]]
// Create local storage for each capture.
// CHECK: [[LOCAL_A:%.+]] = alloca i[[SZ]]
// CHECK: [[LOCAL_AA:%.+]] = alloca i[[SZ]]
- // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+S1.+l339}}_worker()
+ // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+S1.+l342}}_worker()
// CHECK-DAG: [[OMP_EXEC_STATUS:%.+]] = alloca i8,
// CHECK-DAG: [[OMP_WORK_FN:%.+]] = alloca i8*,
// CHECK: store i8* null, i8** [[OMP_WORK_FN]],
// CHECK: [[EXIT]]
// CHECK: ret void
- // CHECK: define {{.*}}void [[T5:@__omp_offloading_.+S1.+l339]](
+ // CHECK: define {{.*}}void [[T5:@__omp_offloading_.+S1.+l342]](
// Create local storage for each capture.
// CHECK: [[LOCAL_THIS:%.+]] = alloca [[S1:%struct.*]]*
// CHECK: [[LOCAL_B:%.+]] = alloca i[[SZ]]
- // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+template.+l304}}_worker()
+ // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+template.+l307}}_worker()
// CHECK-DAG: [[OMP_EXEC_STATUS:%.+]] = alloca i8,
// CHECK-DAG: [[OMP_WORK_FN:%.+]] = alloca i8*,
// CHECK: store i8* null, i8** [[OMP_WORK_FN]],
// CHECK: [[EXIT]]
// CHECK: ret void
- // CHECK: define {{.*}}void [[T6:@__omp_offloading_.+template.+l304]](i[[SZ]]
+ // CHECK: define {{.*}}void [[T6:@__omp_offloading_.+template.+l307]](i[[SZ]]
// Create local storage for each capture.
// CHECK: [[LOCAL_A:%.+]] = alloca i[[SZ]]
// CHECK: [[LOCAL_AA:%.+]] = alloca i[[SZ]]