From 248e5bf6ef4ecf292928b99bf755b26b6ebd3c6f Mon Sep 17 00:00:00 2001 From: Yaxun Liu Date: Tue, 29 Jan 2019 13:20:23 +0000 Subject: [PATCH] [CUDA][HIP] Do not diagnose use of _Float16 r352221 caused regressions in CUDA/HIP since device function may use _Float16 whereas host does not support it. In this case host compilation should not diagnose usage of _Float16 in device functions or variables. For now just do not diagnose _Float16 for CUDA/HIP. In the future we should have more precise check. Differential Revision: https://reviews.llvm.org/D57369 git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@352488 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Lex/LiteralSupport.cpp | 7 +++++-- lib/Sema/SemaType.cpp | 5 ++++- test/SemaCUDA/float16.cu | 7 +++++++ 3 files changed, 16 insertions(+), 3 deletions(-) create mode 100644 test/SemaCUDA/float16.cu diff --git a/lib/Lex/LiteralSupport.cpp b/lib/Lex/LiteralSupport.cpp index b9ee3190f5..2108408377 100644 --- a/lib/Lex/LiteralSupport.cpp +++ b/lib/Lex/LiteralSupport.cpp @@ -616,8 +616,11 @@ NumericLiteralParser::NumericLiteralParser(StringRef TokSpelling, if (isHalf || isFloat || isLong || isFloat128) break; // HF, FF, LF, QF invalid. - if (PP.getTargetInfo().hasFloat16Type() && s + 2 < ThisTokEnd && - s[1] == '1' && s[2] == '6') { + // CUDA host and device may have different _Float16 support, therefore + // allows f16 literals to avoid false alarm. + // ToDo: more precise check for CUDA. + if ((PP.getTargetInfo().hasFloat16Type() || PP.getLangOpts().CUDA) && + s + 2 < ThisTokEnd && s[1] == '1' && s[2] == '6') { s += 2; // success, eat up 2 characters. isFloat16 = true; continue; diff --git a/lib/Sema/SemaType.cpp b/lib/Sema/SemaType.cpp index 321bed63d9..090d9431be 100644 --- a/lib/Sema/SemaType.cpp +++ b/lib/Sema/SemaType.cpp @@ -1442,7 +1442,10 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) { Result = Context.Int128Ty; break; case DeclSpec::TST_float16: - if (!S.Context.getTargetInfo().hasFloat16Type()) + // CUDA host and device may have different _Float16 support, therefore + // do not diagnose _Float16 usage to avoid false alarm. + // ToDo: more precise diagnostics for CUDA. + if (!S.Context.getTargetInfo().hasFloat16Type() && !S.getLangOpts().CUDA) S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_unsupported) << "_Float16"; Result = Context.Float16Ty; diff --git a/test/SemaCUDA/float16.cu b/test/SemaCUDA/float16.cu new file mode 100644 index 0000000000..a9cbe87f32 --- /dev/null +++ b/test/SemaCUDA/float16.cu @@ -0,0 +1,7 @@ +// RUN: %clang_cc1 -fsyntax-only -triple x86_64 -aux-triple amdgcn -verify %s +// expected-no-diagnostics +#include "Inputs/cuda.h" + +__device__ void f(_Float16 x); + +__device__ _Float16 x = 1.0f16; -- 2.40.0