llvm::Value *LHSr = Op.LHS.first, *LHSi = Op.LHS.second;
llvm::Value *RHSr = Op.RHS.first, *RHSi = Op.RHS.second;
-
llvm::Value *DSTr, *DSTi;
if (LHSr->getType()->isFloatingPointTy()) {
- // If we have a complex operand on the RHS, we delegate to a libcall to
- // handle all of the complexities and minimize underflow/overflow cases.
+ // If we have a complex operand on the RHS and FastMath is not allowed, we
+ // delegate to a libcall to handle all of the complexities and minimize
+ // underflow/overflow cases. When FastMath is allowed we construct the
+ // divide inline using the same algorithm as for integer operands.
//
// FIXME: We would be able to avoid the libcall in many places if we
// supported imaginary types in addition to complex types.
- if (RHSi) {
+ if (RHSi && !CGF.getLangOpts().FastMath) {
BinOpInfo LibCallOp = Op;
// If LHS was a real, supply a null imaginary part.
if (!LHSi)
case llvm::Type::FP128TyID:
return EmitComplexBinOpLibCall("__divtc3", LibCallOp);
}
- }
- assert(LHSi && "Can have at most one non-complex operand!");
+ } else if (RHSi) {
+ if (!LHSi)
+ LHSi = llvm::Constant::getNullValue(RHSi->getType());
+
+ // (a+ib) / (c+id) = ((ac+bd)/(cc+dd)) + i((bc-ad)/(cc+dd))
+ llvm::Value *AC = Builder.CreateFMul(LHSr, RHSr); // a*c
+ llvm::Value *BD = Builder.CreateFMul(LHSi, RHSi); // b*d
+ llvm::Value *ACpBD = Builder.CreateFAdd(AC, BD); // ac+bd
+
+ llvm::Value *CC = Builder.CreateFMul(RHSr, RHSr); // c*c
+ llvm::Value *DD = Builder.CreateFMul(RHSi, RHSi); // d*d
+ llvm::Value *CCpDD = Builder.CreateFAdd(CC, DD); // cc+dd
- DSTr = Builder.CreateFDiv(LHSr, RHSr);
- DSTi = Builder.CreateFDiv(LHSi, RHSr);
+ llvm::Value *BC = Builder.CreateFMul(LHSi, RHSr); // b*c
+ llvm::Value *AD = Builder.CreateFMul(LHSr, RHSi); // a*d
+ llvm::Value *BCmAD = Builder.CreateFSub(BC, AD); // bc-ad
+
+ DSTr = Builder.CreateFDiv(ACpBD, CCpDD);
+ DSTi = Builder.CreateFDiv(BCmAD, CCpDD);
+ } else {
+ assert(LHSi && "Can have at most one non-complex operand!");
+
+ DSTr = Builder.CreateFDiv(LHSr, RHSr);
+ DSTi = Builder.CreateFDiv(LHSi, RHSr);
+ }
} else {
assert(Op.LHS.second && Op.RHS.second &&
"Both operands of integer complex operators must be complex!");
// RUN %clang_cc1 %s -O1 -emit-llvm -triple armv7-none-linux-gnueabi -o - | FileCheck %s --check-prefix=ARM
// RUN: %clang_cc1 %s -O1 -emit-llvm -triple armv7-none-linux-gnueabihf -o - | FileCheck %s --check-prefix=ARMHF
// RUN: %clang_cc1 %s -O1 -emit-llvm -triple thumbv7k-apple-watchos2.0 -o - -target-abi aapcs16 | FileCheck %s --check-prefix=ARM7K
+// RUN: %clang_cc1 %s -O1 -emit-llvm -triple aarch64-unknown-unknown -ffast-math -o - | FileCheck %s --check-prefix=AARCH64-FASTMATH
float _Complex add_float_rr(float a, float b) {
// X86-LABEL: @add_float_rr(
// X86-NOT: fdiv
// X86: call {{.*}} @__divsc3(
// X86: ret
+
+ // a / b = (A+iB) / (C+iD) = ((AC+BD)/(CC+DD)) + i((BC-AD)/(CC+DD))
+ // AARCH64-FASTMATH-LABEL: @div_float_rc(float %a, [2 x float] %b.coerce)
+ // A = a
+ // B = 0
+ // AARCH64-FASTMATH: [[C:%.*]] = extractvalue [2 x float] %b.coerce, 0
+ // AARCH64-FASTMATH: [[D:%.*]] = extractvalue [2 x float] %b.coerce, 1
+ //
+ // AARCH64-FASTMATH: [[AC:%.*]] = fmul fast float [[C]], %a
+ // BD = 0
+ // ACpBD = AC
+ //
+ // AARCH64-FASTMATH: [[CC:%.*]] = fmul fast float [[C]], [[C]]
+ // AARCH64-FASTMATH: [[DD:%.*]] = fmul fast float [[D]], [[D]]
+ // AARCH64-FASTMATH: [[CCpDD:%.*]] = fadd fast float [[CC]], [[DD]]
+ //
+ // BC = 0
+ // AARCH64-FASTMATH: [[AD:%.*]] = fmul fast float [[D]], %a
+ // AARCH64-FASTMATH: [[BCmAD:%.*]] = fsub fast float -0.000000e+00, [[AD]]
+ //
+ // AARCH64-FASTMATH: fdiv fast float [[AC]], [[CCpDD]]
+ // AARCH64-FASTMATH: fdiv fast float [[BCmAD]], [[CCpDD]]
+ // AARCH64-FASTMATH: ret
return a / b;
}
float _Complex div_float_cc(float _Complex a, float _Complex b) {
// X86-NOT: fdiv
// X86: call {{.*}} @__divsc3(
// X86: ret
+
+ // a / b = (A+iB) / (C+iD) = ((AC+BD)/(CC+DD)) + i((BC-AD)/(CC+DD))
+ // AARCH64-FASTMATH-LABEL: @div_float_cc([2 x float] %a.coerce, [2 x float] %b.coerce)
+ // AARCH64-FASTMATH: [[A:%.*]] = extractvalue [2 x float] %a.coerce, 0
+ // AARCH64-FASTMATH: [[B:%.*]] = extractvalue [2 x float] %a.coerce, 1
+ // AARCH64-FASTMATH: [[C:%.*]] = extractvalue [2 x float] %b.coerce, 0
+ // AARCH64-FASTMATH: [[D:%.*]] = extractvalue [2 x float] %b.coerce, 1
+ //
+ // AARCH64-FASTMATH: [[AC:%.*]] = fmul fast float [[C]], [[A]]
+ // AARCH64-FASTMATH: [[BD:%.*]] = fmul fast float [[D]], [[B]]
+ // AARCH64-FASTMATH: [[ACpBD:%.*]] = fadd fast float [[AC]], [[BD]]
+ //
+ // AARCH64-FASTMATH: [[CC:%.*]] = fmul fast float [[C]], [[C]]
+ // AARCH64-FASTMATH: [[DD:%.*]] = fmul fast float [[D]], [[D]]
+ // AARCH64-FASTMATH: [[CCpDD:%.*]] = fadd fast float [[CC]], [[DD]]
+ //
+ // AARCH64-FASTMATH: [[BC:%.*]] = fmul fast float [[C]], [[B]]
+ // AARCH64-FASTMATH: [[AD:%.*]] = fmul fast float [[D]], [[A]]
+ // AARCH64-FASTMATH: [[BCmAD:%.*]] = fsub fast float [[BC]], [[AD]]
+ //
+ // AARCH64-FASTMATH: fdiv fast float [[ACpBD]], [[CCpDD]]
+ // AARCH64-FASTMATH: fdiv fast float [[BCmAD]], [[CCpDD]]
+ // AARCH64-FASTMATH: ret
return a / b;
}
// X86-NOT: fdiv
// X86: call {{.*}} @__divdc3(
// X86: ret
+
+ // a / b = (A+iB) / (C+iD) = ((AC+BD)/(CC+DD)) + i((BC-AD)/(CC+DD))
+ // AARCH64-FASTMATH-LABEL: @div_double_rc(double %a, [2 x double] %b.coerce)
+ // A = a
+ // B = 0
+ // AARCH64-FASTMATH: [[C:%.*]] = extractvalue [2 x double] %b.coerce, 0
+ // AARCH64-FASTMATH: [[D:%.*]] = extractvalue [2 x double] %b.coerce, 1
+ //
+ // AARCH64-FASTMATH: [[AC:%.*]] = fmul fast double [[C]], %a
+ // BD = 0
+ // ACpBD = AC
+ //
+ // AARCH64-FASTMATH: [[CC:%.*]] = fmul fast double [[C]], [[C]]
+ // AARCH64-FASTMATH: [[DD:%.*]] = fmul fast double [[D]], [[D]]
+ // AARCH64-FASTMATH: [[CCpDD:%.*]] = fadd fast double [[CC]], [[DD]]
+ //
+ // BC = 0
+ // AARCH64-FASTMATH: [[AD:%.*]] = fmul fast double [[D]], %a
+ // AARCH64-FASTMATH: [[BCmAD:%.*]] = fsub fast double -0.000000e+00, [[AD]]
+ //
+ // AARCH64-FASTMATH: fdiv fast double [[AC]], [[CCpDD]]
+ // AARCH64-FASTMATH: fdiv fast double [[BCmAD]], [[CCpDD]]
+ // AARCH64-FASTMATH: ret
return a / b;
}
double _Complex div_double_cc(double _Complex a, double _Complex b) {
// X86-NOT: fdiv
// X86: call {{.*}} @__divdc3(
// X86: ret
+
+ // a / b = (A+iB) / (C+iD) = ((AC+BD)/(CC+DD)) + i((BC-AD)/(CC+DD))
+ // AARCH64-FASTMATH-LABEL: @div_double_cc([2 x double] %a.coerce, [2 x double] %b.coerce)
+ // AARCH64-FASTMATH: [[A:%.*]] = extractvalue [2 x double] %a.coerce, 0
+ // AARCH64-FASTMATH: [[B:%.*]] = extractvalue [2 x double] %a.coerce, 1
+ // AARCH64-FASTMATH: [[C:%.*]] = extractvalue [2 x double] %b.coerce, 0
+ // AARCH64-FASTMATH: [[D:%.*]] = extractvalue [2 x double] %b.coerce, 1
+ //
+ // AARCH64-FASTMATH: [[AC:%.*]] = fmul fast double [[C]], [[A]]
+ // AARCH64-FASTMATH: [[BD:%.*]] = fmul fast double [[D]], [[B]]
+ // AARCH64-FASTMATH: [[ACpBD:%.*]] = fadd fast double [[AC]], [[BD]]
+ //
+ // AARCH64-FASTMATH: [[CC:%.*]] = fmul fast double [[C]], [[C]]
+ // AARCH64-FASTMATH: [[DD:%.*]] = fmul fast double [[D]], [[D]]
+ // AARCH64-FASTMATH: [[CCpDD:%.*]] = fadd fast double [[CC]], [[DD]]
+ //
+ // AARCH64-FASTMATH: [[BC:%.*]] = fmul fast double [[C]], [[B]]
+ // AARCH64-FASTMATH: [[AD:%.*]] = fmul fast double [[D]], [[A]]
+ // AARCH64-FASTMATH: [[BCmAD:%.*]] = fsub fast double [[BC]], [[AD]]
+ //
+ // AARCH64-FASTMATH: fdiv fast double [[ACpBD]], [[CCpDD]]
+ // AARCH64-FASTMATH: fdiv fast double [[BCmAD]], [[CCpDD]]
+ // AARCH64-FASTMATH: ret
return a / b;
}
// PPC-NOT: fdiv
// PPC: call {{.*}} @__divtc3(
// PPC: ret
+
+ // a / b = (A+iB) / (C+iD) = ((AC+BD)/(CC+DD)) + i((BC-AD)/(CC+DD))
+ // AARCH64-FASTMATH-LABEL: @div_long_double_rc(fp128 %a, [2 x fp128] %b.coerce)
+ // A = a
+ // B = 0
+ // AARCH64-FASTMATH: [[C:%.*]] = extractvalue [2 x fp128] %b.coerce, 0
+ // AARCH64-FASTMATH: [[D:%.*]] = extractvalue [2 x fp128] %b.coerce, 1
+ //
+ // AARCH64-FASTMATH: [[AC:%.*]] = fmul fast fp128 [[C]], %a
+ // BD = 0
+ // ACpBD = AC
+ //
+ // AARCH64-FASTMATH: [[CC:%.*]] = fmul fast fp128 [[C]], [[C]]
+ // AARCH64-FASTMATH: [[DD:%.*]] = fmul fast fp128 [[D]], [[D]]
+ // AARCH64-FASTMATH: [[CCpDD:%.*]] = fadd fast fp128 [[CC]], [[DD]]
+ //
+ // BC = 0
+ // AARCH64-FASTMATH: [[AD:%.*]] = fmul fast fp128 [[D]], %a
+ // AARCH64-FASTMATH: [[BCmAD:%.*]] = fsub fast fp128 0xL00000000000000008000000000000000, [[AD]]
+ //
+ // AARCH64-FASTMATH: fdiv fast fp128 [[AC]], [[CCpDD]]
+ // AARCH64-FASTMATH: fdiv fast fp128 [[BCmAD]], [[CCpDD]]
+ // AARCH64-FASTMATH: ret
return a / b;
}
long double _Complex div_long_double_cc(long double _Complex a, long double _Complex b) {
// PPC-NOT: fdiv
// PPC: call {{.*}} @__divtc3(
// PPC: ret
+
+ // a / b = (A+iB) / (C+iD) = ((AC+BD)/(CC+DD)) + i((BC-AD)/(CC+DD))
+ // AARCH64-FASTMATH-LABEL: @div_long_double_cc([2 x fp128] %a.coerce, [2 x fp128] %b.coerce)
+ // AARCH64-FASTMATH: [[A:%.*]] = extractvalue [2 x fp128] %a.coerce, 0
+ // AARCH64-FASTMATH: [[B:%.*]] = extractvalue [2 x fp128] %a.coerce, 1
+ // AARCH64-FASTMATH: [[C:%.*]] = extractvalue [2 x fp128] %b.coerce, 0
+ // AARCH64-FASTMATH: [[D:%.*]] = extractvalue [2 x fp128] %b.coerce, 1
+ //
+ // AARCH64-FASTMATH: [[AC:%.*]] = fmul fast fp128 [[C]], [[A]]
+ // AARCH64-FASTMATH: [[BD:%.*]] = fmul fast fp128 [[D]], [[B]]
+ // AARCH64-FASTMATH: [[ACpBD:%.*]] = fadd fast fp128 [[AC]], [[BD]]
+ //
+ // AARCH64-FASTMATH: [[CC:%.*]] = fmul fast fp128 [[C]], [[C]]
+ // AARCH64-FASTMATH: [[DD:%.*]] = fmul fast fp128 [[D]], [[D]]
+ // AARCH64-FASTMATH: [[CCpDD:%.*]] = fadd fast fp128 [[CC]], [[DD]]
+ //
+ // AARCH64-FASTMATH: [[BC:%.*]] = fmul fast fp128 [[C]], [[B]]
+ // AARCH64-FASTMATH: [[AD:%.*]] = fmul fast fp128 [[D]], [[A]]
+ // AARCH64-FASTMATH: [[BCmAD:%.*]] = fsub fast fp128 [[BC]], [[AD]]
+ //
+ // AARCH64-FASTMATH: fdiv fast fp128 [[ACpBD]], [[CCpDD]]
+ // AARCH64-FASTMATH: fdiv fast fp128 [[BCmAD]], [[CCpDD]]
+ // AARCH64-FASTMATH: ret
return a / b;
}