}
if (T->isVariableArrayType() && !Context.getTargetInfo().isVLASupported()) {
- if (getLangOpts().CUDA) {
- // CUDA device code doesn't support VLAs.
- CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget();
- } else if (!getLangOpts().OpenMP ||
- shouldDiagnoseTargetSupportFromOpenMP()) {
- // Some targets don't support VLAs.
- Diag(Loc, diag::err_vla_unsupported);
- return QualType();
- }
+ // CUDA device code and some other targets don't support VLAs.
+ targetDiag(Loc, (getLangOpts().CUDA && getLangOpts().CUDAIsDevice)
+ ? diag::err_cuda_vla
+ : diag::err_vla_unsupported)
+ << ((getLangOpts().CUDA && getLangOpts().CUDAIsDevice)
+ ? CurrentCUDATarget()
+ : CFT_InvalidTarget);
}
// If this is not C99, extwarn about VLA's and C99 array size modifiers.
#pragma omp target
{
#ifdef NO_VLA
- // expected-error@+2 {{variable length arrays are not supported for the current target}}
+ // expected-error@+2 2 {{variable length arrays are not supported for the current target}}
#endif
T vla[arg];
}
}
}
+#ifdef NO_VLA
+ // expected-note@+2 {{in instantiation of function template specialization 'target_template<long>' requested here}}
+#endif
target_template<long>(arg);
}
// RUN: %clang_cc1 -triple nvptx64-nvidia-cuda -fcuda-is-device -verify %s
-// RUN: %clang_cc1 -triple nvptx64-nvidia-cuda -verify -DHOST %s
+// RUN: %clang_cc1 -triple x86_64-unknown-linux -verify -DHOST %s
+
+#ifndef __CUDA_ARCH__
+// expected-no-diagnostics
+#endif
#include "Inputs/cuda.h"
}
__device__ void device(int n) {
- int x[n]; // expected-error {{cannot use variable-length arrays in __device__ functions}}
+ int x[n];
+#ifdef __CUDA_ARCH__
+ // expected-error@-2 {{cannot use variable-length arrays in __device__ functions}}
+#endif
}
__host__ __device__ void hd(int n) {