ABIKind getABIKind() const { return Kind; }
private:
- ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic) const;
- ABIArgInfo classifyArgumentType(QualType RetTy, bool isVariadic) const;
+ ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic,
+ unsigned functionCallConv) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy, bool isVariadic,
+ unsigned functionCallConv) const;
ABIArgInfo classifyHomogeneousAggregate(QualType Ty, const Type *Base,
uint64_t Members) const;
ABIArgInfo coerceIllegalVector(QualType Ty) const;
bool isHomogeneousAggregateSmallEnough(const Type *Ty,
uint64_t Members) const override;
+ bool isEffectivelyAAPCS_VFP(unsigned callConvention, bool acceptHalf) const;
+
void computeInfo(CGFunctionInfo &FI) const override;
Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
if (!::classifyReturnType(getCXXABI(), FI, *this))
- FI.getReturnInfo() =
- classifyReturnType(FI.getReturnType(), FI.isVariadic());
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), FI.isVariadic(),
+ FI.getCallingConvention());
for (auto &I : FI.arguments())
- I.info = classifyArgumentType(I.type, FI.isVariadic());
+ I.info = classifyArgumentType(I.type, FI.isVariadic(),
+ FI.getCallingConvention());
+
// Always honor user-specified calling convention.
if (FI.getCallingConvention() != llvm::CallingConv::C)
return ABIArgInfo::getDirect(nullptr, 0, nullptr, false);
}
-ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty,
- bool isVariadic) const {
+ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic,
+ unsigned functionCallConv) const {
// 6.1.2.1 The following argument types are VFP CPRCs:
// A single-precision floating-point type (including promoted
// half-precision types); A double-precision floating-point type;
// with a Base Type of a single- or double-precision floating-point type,
// 64-bit containerized vectors or 128-bit containerized vectors with one
// to four Elements.
- bool IsEffectivelyAAPCS_VFP = getABIKind() == AAPCS_VFP && !isVariadic;
+ // Variadic functions should always marshal to the base standard.
+ bool IsAAPCS_VFP =
+ !isVariadic && isEffectivelyAAPCS_VFP(functionCallConv, /* AAPCS16 */ false);
Ty = useFirstFieldIfTransparentUnion(Ty);
// half type natively, and does not need to interwork with AAPCS code.
if ((Ty->isFloat16Type() || Ty->isHalfType()) &&
!getContext().getLangOpts().NativeHalfArgsAndReturns) {
- llvm::Type *ResType = IsEffectivelyAAPCS_VFP ?
+ llvm::Type *ResType = IsAAPCS_VFP ?
llvm::Type::getFloatTy(getVMContext()) :
llvm::Type::getInt32Ty(getVMContext());
return ABIArgInfo::getDirect(ResType);
if (isEmptyRecord(getContext(), Ty, true))
return ABIArgInfo::getIgnore();
- if (IsEffectivelyAAPCS_VFP) {
+ if (IsAAPCS_VFP) {
// Homogeneous Aggregates need to be expanded when we can fit the aggregate
// into VFP registers.
const Type *Base = nullptr;
return true;
}
-ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
- bool isVariadic) const {
- bool IsEffectivelyAAPCS_VFP =
- (getABIKind() == AAPCS_VFP || getABIKind() == AAPCS16_VFP) && !isVariadic;
+ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, bool isVariadic,
+ unsigned functionCallConv) const {
+
+ // Variadic functions should always marshal to the base standard.
+ bool IsAAPCS_VFP =
+ !isVariadic && isEffectivelyAAPCS_VFP(functionCallConv, /* AAPCS16 */ true);
if (RetTy->isVoidType())
return ABIArgInfo::getIgnore();
// half type natively, and does not need to interwork with AAPCS code.
if ((RetTy->isFloat16Type() || RetTy->isHalfType()) &&
!getContext().getLangOpts().NativeHalfArgsAndReturns) {
- llvm::Type *ResType = IsEffectivelyAAPCS_VFP ?
+ llvm::Type *ResType = IsAAPCS_VFP ?
llvm::Type::getFloatTy(getVMContext()) :
llvm::Type::getInt32Ty(getVMContext());
return ABIArgInfo::getDirect(ResType);
return ABIArgInfo::getIgnore();
// Check for homogeneous aggregates with AAPCS-VFP.
- if (IsEffectivelyAAPCS_VFP) {
+ if (IsAAPCS_VFP) {
const Type *Base = nullptr;
uint64_t Members = 0;
if (isHomogeneousAggregate(RetTy, Base, Members))
return Members <= 4;
}
+bool ARMABIInfo::isEffectivelyAAPCS_VFP(unsigned callConvention,
+ bool acceptHalf) const {
+ // Give precedence to user-specified calling conventions.
+ if (callConvention != llvm::CallingConv::C)
+ return (callConvention == llvm::CallingConv::ARM_AAPCS_VFP);
+ else
+ return (getABIKind() == AAPCS_VFP) ||
+ (acceptHalf && (getABIKind() == AAPCS16_VFP));
+}
+
Address ARMABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
QualType Ty) const {
CharUnits SlotSize = CharUnits::fromQuantity(4);
--- /dev/null
+// Covers a bug fix for ABI selection with homogenous aggregates:
+// See: https://bugs.llvm.org/show_bug.cgi?id=39982
+
+// REQUIRES: arm-registered-target
+// RUN: %clang -mfloat-abi=hard --target=armv7-unknown-linux-gnueabi -O3 -S -o - %s | FileCheck %s -check-prefixes=HARD,CHECK
+// RUN: %clang -mfloat-abi=softfp --target=armv7-unknown-linux-gnueabi -O3 -S -o - %s | FileCheck %s -check-prefixes=SOFTFP,CHECK
+// RUN: %clang -mfloat-abi=soft --target=armv7-unknown-linux-gnueabi -O3 -S -o - %s | FileCheck %s -check-prefixes=SOFT,CHECK
+
+struct S {
+ float f;
+ float d;
+ float c;
+ float t;
+};
+
+// Variadic functions should always marshal for the base standard.
+// See section 5.5 (Parameter Passing) of the AAPCS.
+float __attribute__((pcs("aapcs-vfp"))) variadic(S s, ...) {
+ // CHECK-NOT: vmov s{{[0-9]+}}, s{{[0-9]+}}
+ // CHECK: mov r{{[0-9]+}}, r{{[0-9]+}}
+ return s.d;
+}
+
+float no_attribute(S s) {
+ // SOFT: mov r{{[0-9]+}}, r{{[0-9]+}}
+ // SOFTFP: mov r{{[0-9]+}}, r{{[0-9]+}}
+ // HARD: vmov.f32 s{{[0-9]+}}, s{{[0-9]+}}
+ return s.d;
+}
+
+float __attribute__((pcs("aapcs-vfp"))) baz(float x, float y) {
+ // CHECK-NOT: mov s{{[0-9]+}}, r{{[0-9]+}}
+ // SOFT: mov r{{[0-9]+}}, r{{[0-9]+}}
+ // SOFTFP: vmov.f32 s{{[0-9]+}}, s{{[0-9]+}}
+ // HARD: vmov.f32 s{{[0-9]+}}, s{{[0-9]+}}
+ return y;
+}
+
+float __attribute__((pcs("aapcs-vfp"))) foo(S s) {
+ // CHECK-NOT: mov s{{[0-9]+}}, r{{[0-9]+}}
+ // SOFT: mov r{{[0-9]+}}, r{{[0-9]+}}
+ // SOFTFP: vmov.f32 s{{[0-9]+}}, s{{[0-9]+}}
+ // HARD: vmov.f32 s{{[0-9]+}}, s{{[0-9]+}}
+ return s.d;
+}
+
+float __attribute__((pcs("aapcs"))) bar(S s) {
+ // CHECK-NOT: vmov.f32 s{{[0-9]+}}, s{{[0-9]+}}
+ // CHECK: mov r{{[0-9]+}}, r{{[0-9]+}}
+ return s.d;
+}