From f69b9c5a06575236f75d3e543efe43db22de4c3c Mon Sep 17 00:00:00 2001 From: Carey Williams Date: Fri, 22 Mar 2019 16:20:45 +0000 Subject: [PATCH] [ARM] Fix bug 39982 - pcs("aapcs-vfp") is not consistent Correctly handle homogeneous aggregates when a function's ABI is specified via the pcs attribute. Bug: https://bugs.llvm.org/show_bug.cgi?id=39982 Differential Revision: https://reviews.llvm.org/D59094 git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@356776 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/CodeGen/TargetInfo.cpp | 52 +++++++++++++++++++++++++------------ test/CodeGenCXX/arm-pcs.cpp | 51 ++++++++++++++++++++++++++++++++++++ 2 files changed, 87 insertions(+), 16 deletions(-) create mode 100644 test/CodeGenCXX/arm-pcs.cpp diff --git a/lib/CodeGen/TargetInfo.cpp b/lib/CodeGen/TargetInfo.cpp index f48f19966e..d9bbf594ed 100644 --- a/lib/CodeGen/TargetInfo.cpp +++ b/lib/CodeGen/TargetInfo.cpp @@ -5597,8 +5597,10 @@ public: ABIKind getABIKind() const { return Kind; } private: - ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic) const; - ABIArgInfo classifyArgumentType(QualType RetTy, bool isVariadic) const; + ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic, + unsigned functionCallConv) const; + ABIArgInfo classifyArgumentType(QualType RetTy, bool isVariadic, + unsigned functionCallConv) const; ABIArgInfo classifyHomogeneousAggregate(QualType Ty, const Type *Base, uint64_t Members) const; ABIArgInfo coerceIllegalVector(QualType Ty) const; @@ -5608,6 +5610,8 @@ private: bool isHomogeneousAggregateSmallEnough(const Type *Ty, uint64_t Members) const override; + bool isEffectivelyAAPCS_VFP(unsigned callConvention, bool acceptHalf) const; + void computeInfo(CGFunctionInfo &FI) const override; Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, @@ -5728,11 +5732,13 @@ void WindowsARMTargetCodeGenInfo::setTargetAttributes( void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const { if (!::classifyReturnType(getCXXABI(), FI, *this)) - FI.getReturnInfo() = - classifyReturnType(FI.getReturnType(), FI.isVariadic()); + FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), FI.isVariadic(), + FI.getCallingConvention()); for (auto &I : FI.arguments()) - I.info = classifyArgumentType(I.type, FI.isVariadic()); + I.info = classifyArgumentType(I.type, FI.isVariadic(), + FI.getCallingConvention()); + // Always honor user-specified calling convention. if (FI.getCallingConvention() != llvm::CallingConv::C) @@ -5811,8 +5817,8 @@ ABIArgInfo ARMABIInfo::classifyHomogeneousAggregate(QualType Ty, return ABIArgInfo::getDirect(nullptr, 0, nullptr, false); } -ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, - bool isVariadic) const { +ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic, + unsigned functionCallConv) const { // 6.1.2.1 The following argument types are VFP CPRCs: // A single-precision floating-point type (including promoted // half-precision types); A double-precision floating-point type; @@ -5820,7 +5826,9 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, // with a Base Type of a single- or double-precision floating-point type, // 64-bit containerized vectors or 128-bit containerized vectors with one // to four Elements. - bool IsEffectivelyAAPCS_VFP = getABIKind() == AAPCS_VFP && !isVariadic; + // Variadic functions should always marshal to the base standard. + bool IsAAPCS_VFP = + !isVariadic && isEffectivelyAAPCS_VFP(functionCallConv, /* AAPCS16 */ false); Ty = useFirstFieldIfTransparentUnion(Ty); @@ -5833,7 +5841,7 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, // half type natively, and does not need to interwork with AAPCS code. if ((Ty->isFloat16Type() || Ty->isHalfType()) && !getContext().getLangOpts().NativeHalfArgsAndReturns) { - llvm::Type *ResType = IsEffectivelyAAPCS_VFP ? + llvm::Type *ResType = IsAAPCS_VFP ? llvm::Type::getFloatTy(getVMContext()) : llvm::Type::getInt32Ty(getVMContext()); return ABIArgInfo::getDirect(ResType); @@ -5857,7 +5865,7 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, if (isEmptyRecord(getContext(), Ty, true)) return ABIArgInfo::getIgnore(); - if (IsEffectivelyAAPCS_VFP) { + if (IsAAPCS_VFP) { // Homogeneous Aggregates need to be expanded when we can fit the aggregate // into VFP registers. const Type *Base = nullptr; @@ -6014,10 +6022,12 @@ static bool isIntegerLikeType(QualType Ty, ASTContext &Context, return true; } -ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, - bool isVariadic) const { - bool IsEffectivelyAAPCS_VFP = - (getABIKind() == AAPCS_VFP || getABIKind() == AAPCS16_VFP) && !isVariadic; +ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, bool isVariadic, + unsigned functionCallConv) const { + + // Variadic functions should always marshal to the base standard. + bool IsAAPCS_VFP = + !isVariadic && isEffectivelyAAPCS_VFP(functionCallConv, /* AAPCS16 */ true); if (RetTy->isVoidType()) return ABIArgInfo::getIgnore(); @@ -6038,7 +6048,7 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, // half type natively, and does not need to interwork with AAPCS code. if ((RetTy->isFloat16Type() || RetTy->isHalfType()) && !getContext().getLangOpts().NativeHalfArgsAndReturns) { - llvm::Type *ResType = IsEffectivelyAAPCS_VFP ? + llvm::Type *ResType = IsAAPCS_VFP ? llvm::Type::getFloatTy(getVMContext()) : llvm::Type::getInt32Ty(getVMContext()); return ABIArgInfo::getDirect(ResType); @@ -6087,7 +6097,7 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, return ABIArgInfo::getIgnore(); // Check for homogeneous aggregates with AAPCS-VFP. - if (IsEffectivelyAAPCS_VFP) { + if (IsAAPCS_VFP) { const Type *Base = nullptr; uint64_t Members = 0; if (isHomogeneousAggregate(RetTy, Base, Members)) @@ -6192,6 +6202,16 @@ bool ARMABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base, return Members <= 4; } +bool ARMABIInfo::isEffectivelyAAPCS_VFP(unsigned callConvention, + bool acceptHalf) const { + // Give precedence to user-specified calling conventions. + if (callConvention != llvm::CallingConv::C) + return (callConvention == llvm::CallingConv::ARM_AAPCS_VFP); + else + return (getABIKind() == AAPCS_VFP) || + (acceptHalf && (getABIKind() == AAPCS16_VFP)); +} + Address ARMABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty) const { CharUnits SlotSize = CharUnits::fromQuantity(4); diff --git a/test/CodeGenCXX/arm-pcs.cpp b/test/CodeGenCXX/arm-pcs.cpp new file mode 100644 index 0000000000..1d327d794b --- /dev/null +++ b/test/CodeGenCXX/arm-pcs.cpp @@ -0,0 +1,51 @@ +// Covers a bug fix for ABI selection with homogenous aggregates: +// See: https://bugs.llvm.org/show_bug.cgi?id=39982 + +// REQUIRES: arm-registered-target +// RUN: %clang -mfloat-abi=hard --target=armv7-unknown-linux-gnueabi -O3 -S -o - %s | FileCheck %s -check-prefixes=HARD,CHECK +// RUN: %clang -mfloat-abi=softfp --target=armv7-unknown-linux-gnueabi -O3 -S -o - %s | FileCheck %s -check-prefixes=SOFTFP,CHECK +// RUN: %clang -mfloat-abi=soft --target=armv7-unknown-linux-gnueabi -O3 -S -o - %s | FileCheck %s -check-prefixes=SOFT,CHECK + +struct S { + float f; + float d; + float c; + float t; +}; + +// Variadic functions should always marshal for the base standard. +// See section 5.5 (Parameter Passing) of the AAPCS. +float __attribute__((pcs("aapcs-vfp"))) variadic(S s, ...) { + // CHECK-NOT: vmov s{{[0-9]+}}, s{{[0-9]+}} + // CHECK: mov r{{[0-9]+}}, r{{[0-9]+}} + return s.d; +} + +float no_attribute(S s) { + // SOFT: mov r{{[0-9]+}}, r{{[0-9]+}} + // SOFTFP: mov r{{[0-9]+}}, r{{[0-9]+}} + // HARD: vmov.f32 s{{[0-9]+}}, s{{[0-9]+}} + return s.d; +} + +float __attribute__((pcs("aapcs-vfp"))) baz(float x, float y) { + // CHECK-NOT: mov s{{[0-9]+}}, r{{[0-9]+}} + // SOFT: mov r{{[0-9]+}}, r{{[0-9]+}} + // SOFTFP: vmov.f32 s{{[0-9]+}}, s{{[0-9]+}} + // HARD: vmov.f32 s{{[0-9]+}}, s{{[0-9]+}} + return y; +} + +float __attribute__((pcs("aapcs-vfp"))) foo(S s) { + // CHECK-NOT: mov s{{[0-9]+}}, r{{[0-9]+}} + // SOFT: mov r{{[0-9]+}}, r{{[0-9]+}} + // SOFTFP: vmov.f32 s{{[0-9]+}}, s{{[0-9]+}} + // HARD: vmov.f32 s{{[0-9]+}}, s{{[0-9]+}} + return s.d; +} + +float __attribute__((pcs("aapcs"))) bar(S s) { + // CHECK-NOT: vmov.f32 s{{[0-9]+}}, s{{[0-9]+}} + // CHECK: mov r{{[0-9]+}}, r{{[0-9]+}} + return s.d; +} -- 2.40.0