If a function that has an ``sspstrong`` attribute is inlined into a
function that doesn't have an ``sspstrong`` attribute, then the
resulting function will have an ``sspstrong`` attribute.
+``strictfp``
+ This attribute indicates that the function was called from a scope that
+ requires strict floating point semantics. LLVM will not attempt any
+ optimizations that require assumptions about the floating point rounding
+ mode or that might alter the state of floating point status flags that
+ might otherwise be set or cleared by calling this function.
``"thunk"``
This attribute indicates that the function will delegate to some other
function with a tail call. The prototype of a thunk should not be used for
ATTR_KIND_INACCESSIBLEMEM_OR_ARGMEMONLY = 50,
ATTR_KIND_ALLOC_SIZE = 51,
ATTR_KIND_WRITEONLY = 52,
- ATTR_KIND_SPECULATABLE = 53
+ ATTR_KIND_SPECULATABLE = 53,
+ ATTR_KIND_STRICT_FP = 54,
};
enum ComdatSelectionKindCodes {
/// Strong Stack protection.
def StackProtectStrong : EnumAttr<"sspstrong">;
+/// Function was called in a scope requiring strict floating point semantics.
+def StrictFP : EnumAttr<"strictfp">;
+
/// Hidden pointer to structure to return.
def StructRet : EnumAttr<"sret">;
CALLSITE_DELEGATE_GETTER(isNoBuiltin());
}
+ /// Return true if the call requires strict floating point semantics.
+ bool isStrictFP() const {
+ CALLSITE_DELEGATE_GETTER(isStrictFP());
+ }
+
/// Return true if the call should not be inlined.
bool isNoInline() const {
CALLSITE_DELEGATE_GETTER(isNoInline());
!hasFnAttrImpl(Attribute::Builtin);
}
+ /// Determine if the call requires strict floating point semantics.
+ bool isStrictFP() const { return hasFnAttr(Attribute::StrictFP); }
+
/// Return true if the call should not be inlined.
bool isNoInline() const { return hasFnAttr(Attribute::NoInline); }
void setIsNoInline() {
!hasFnAttrImpl(Attribute::Builtin);
}
+ /// Determine if the call requires strict floating point semantics.
+ bool isStrictFP() const { return hasFnAttr(Attribute::StrictFP); }
+
/// Return true if the call should not be inlined.
bool isNoInline() const { return hasFnAttr(Attribute::NoInline); }
void setIsNoInline() {
Value *optimizeSqrt(CallInst *CI, IRBuilder<> &B);
Value *optimizeSinCosPi(CallInst *CI, IRBuilder<> &B);
Value *optimizeTan(CallInst *CI, IRBuilder<> &B);
+ // Wrapper for all floating point library call optimizations
+ Value *optimizeFloatingPointLibCall(CallInst *CI, LibFunc Func,
+ IRBuilder<> &B);
// Integer Library Call Optimizations
Value *optimizeFFS(CallInst *CI, IRBuilder<> &B);
//
bool llvm::canConstantFoldCallTo(ImmutableCallSite CS, const Function *F) {
- if (CS.isNoBuiltin())
+ if (CS.isNoBuiltin() || CS.isStrictFP())
return false;
switch (F->getIntrinsicID()) {
case Intrinsic::fabs:
llvm::ConstantFoldCall(ImmutableCallSite CS, Function *F,
ArrayRef<Constant *> Operands,
const TargetLibraryInfo *TLI) {
- if (CS.isNoBuiltin())
+ if (CS.isNoBuiltin() || CS.isStrictFP())
return nullptr;
if (!F->hasName())
return nullptr;
bool llvm::isMathLibCallNoop(CallSite CS, const TargetLibraryInfo *TLI) {
// FIXME: Refactor this code; this duplicates logic in LibCallsShrinkWrap
// (and to some extent ConstantFoldScalarCall).
- if (CS.isNoBuiltin())
+ if (CS.isNoBuiltin() || CS.isStrictFP())
return false;
Function *F = CS.getCalledFunction();
if (!F)
KEYWORD(ssp);
KEYWORD(sspreq);
KEYWORD(sspstrong);
+ KEYWORD(strictfp);
KEYWORD(safestack);
KEYWORD(sanitize_address);
KEYWORD(sanitize_thread);
B.addAttribute(Attribute::SanitizeThread); break;
case lltok::kw_sanitize_memory:
B.addAttribute(Attribute::SanitizeMemory); break;
+ case lltok::kw_strictfp: B.addAttribute(Attribute::StrictFP); break;
case lltok::kw_uwtable: B.addAttribute(Attribute::UWTable); break;
case lltok::kw_writeonly: B.addAttribute(Attribute::WriteOnly); break;
case lltok::kw_sspreq:
case lltok::kw_sspstrong:
case lltok::kw_safestack:
+ case lltok::kw_strictfp:
case lltok::kw_uwtable:
HaveError |= Error(Lex.getLoc(), "invalid use of function-only attribute");
break;
case lltok::kw_sspreq:
case lltok::kw_sspstrong:
case lltok::kw_safestack:
+ case lltok::kw_strictfp:
case lltok::kw_uwtable:
HaveError |= Error(Lex.getLoc(), "invalid use of function-only attribute");
break;
kw_sret,
kw_sanitize_thread,
kw_sanitize_memory,
+ kw_strictfp,
kw_swifterror,
kw_swiftself,
kw_uwtable,
case Attribute::SwiftError: return 1ULL << 52;
case Attribute::WriteOnly: return 1ULL << 53;
case Attribute::Speculatable: return 1ULL << 54;
+ case Attribute::StrictFP: return 1ULL << 55;
case Attribute::Dereferenceable:
llvm_unreachable("dereferenceable attribute not supported in raw format");
break;
return Attribute::StackProtectStrong;
case bitc::ATTR_KIND_SAFESTACK:
return Attribute::SafeStack;
+ case bitc::ATTR_KIND_STRICT_FP:
+ return Attribute::StrictFP;
case bitc::ATTR_KIND_STRUCT_RET:
return Attribute::StructRet;
case bitc::ATTR_KIND_SANITIZE_ADDRESS:
return bitc::ATTR_KIND_STACK_PROTECT_STRONG;
case Attribute::SafeStack:
return bitc::ATTR_KIND_SAFESTACK;
+ case Attribute::StrictFP:
+ return bitc::ATTR_KIND_STRICT_FP;
case Attribute::StructRet:
return bitc::ATTR_KIND_STRUCT_RET;
case Attribute::SanitizeAddress:
// Check for well-known libc/libm calls. If the function is internal, it
// can't be a library call. Don't do the check if marked as nobuiltin for
- // some reason.
+ // some reason or the call site requires strict floating point semantics.
LibFunc Func;
- if (!I.isNoBuiltin() && !F->hasLocalLinkage() && F->hasName() &&
- LibInfo->getLibFunc(*F, Func) &&
+ if (!I.isNoBuiltin() && !I.isStrictFP() && !F->hasLocalLinkage() &&
+ F->hasName() && LibInfo->getLibFunc(*F, Func) &&
LibInfo->hasOptimizedCodeGen(Func)) {
switch (Func) {
default: break;
return "sspstrong";
if (hasAttribute(Attribute::SafeStack))
return "safestack";
+ if (hasAttribute(Attribute::StrictFP))
+ return "strictfp";
if (hasAttribute(Attribute::StructRet))
return "sret";
if (hasAttribute(Attribute::SanitizeThread))
case Attribute::InaccessibleMemOrArgMemOnly:
case Attribute::AllocSize:
case Attribute::Speculatable:
+ case Attribute::StrictFP:
return true;
default:
break;
.Case("ssp", Attribute::StackProtect)
.Case("sspreq", Attribute::StackProtectReq)
.Case("sspstrong", Attribute::StackProtectStrong)
+ .Case("strictfp", Attribute::StrictFP)
.Case("uwtable", Attribute::UWTable)
.Default(Attribute::None);
}
return nullptr;
}
+Value *LibCallSimplifier::optimizeFloatingPointLibCall(CallInst *CI,
+ LibFunc Func,
+ IRBuilder<> &Builder) {
+ // Don't optimize calls that require strict floating point semantics.
+ if (CI->isStrictFP())
+ return nullptr;
+
+ switch (Func) {
+ case LibFunc_cosf:
+ case LibFunc_cos:
+ case LibFunc_cosl:
+ return optimizeCos(CI, Builder);
+ case LibFunc_sinpif:
+ case LibFunc_sinpi:
+ case LibFunc_cospif:
+ case LibFunc_cospi:
+ return optimizeSinCosPi(CI, Builder);
+ case LibFunc_powf:
+ case LibFunc_pow:
+ case LibFunc_powl:
+ return optimizePow(CI, Builder);
+ case LibFunc_exp2l:
+ case LibFunc_exp2:
+ case LibFunc_exp2f:
+ return optimizeExp2(CI, Builder);
+ case LibFunc_fabsf:
+ case LibFunc_fabs:
+ case LibFunc_fabsl:
+ return replaceUnaryCall(CI, Builder, Intrinsic::fabs);
+ case LibFunc_sqrtf:
+ case LibFunc_sqrt:
+ case LibFunc_sqrtl:
+ return optimizeSqrt(CI, Builder);
+ case LibFunc_log:
+ case LibFunc_log10:
+ case LibFunc_log1p:
+ case LibFunc_log2:
+ case LibFunc_logb:
+ return optimizeLog(CI, Builder);
+ case LibFunc_tan:
+ case LibFunc_tanf:
+ case LibFunc_tanl:
+ return optimizeTan(CI, Builder);
+ case LibFunc_ceil:
+ return replaceUnaryCall(CI, Builder, Intrinsic::ceil);
+ case LibFunc_floor:
+ return replaceUnaryCall(CI, Builder, Intrinsic::floor);
+ case LibFunc_round:
+ return replaceUnaryCall(CI, Builder, Intrinsic::round);
+ case LibFunc_nearbyint:
+ return replaceUnaryCall(CI, Builder, Intrinsic::nearbyint);
+ case LibFunc_rint:
+ return replaceUnaryCall(CI, Builder, Intrinsic::rint);
+ case LibFunc_trunc:
+ return replaceUnaryCall(CI, Builder, Intrinsic::trunc);
+ case LibFunc_acos:
+ case LibFunc_acosh:
+ case LibFunc_asin:
+ case LibFunc_asinh:
+ case LibFunc_atan:
+ case LibFunc_atanh:
+ case LibFunc_cbrt:
+ case LibFunc_cosh:
+ case LibFunc_exp:
+ case LibFunc_exp10:
+ case LibFunc_expm1:
+ case LibFunc_sin:
+ case LibFunc_sinh:
+ case LibFunc_tanh:
+ if (UnsafeFPShrink && hasFloatVersion(CI->getCalledFunction()->getName()))
+ return optimizeUnaryDoubleFP(CI, Builder, true);
+ return nullptr;
+ case LibFunc_copysign:
+ if (hasFloatVersion(CI->getCalledFunction()->getName()))
+ return optimizeBinaryDoubleFP(CI, Builder);
+ return nullptr;
+ case LibFunc_fminf:
+ case LibFunc_fmin:
+ case LibFunc_fminl:
+ case LibFunc_fmaxf:
+ case LibFunc_fmax:
+ case LibFunc_fmaxl:
+ return optimizeFMinFMax(CI, Builder);
+ default:
+ return nullptr;
+ }
+}
+
Value *LibCallSimplifier::optimizeCall(CallInst *CI) {
+ // TODO: Split out the code below that operates on FP calls so that
+ // we can all non-FP calls with the StrictFP attribute to be
+ // optimized.
if (CI->isNoBuiltin())
return nullptr;
LibFunc Func;
Function *Callee = CI->getCalledFunction();
- StringRef FuncName = Callee->getName();
SmallVector<OperandBundleDef, 2> OpBundles;
CI->getOperandBundlesAsDefs(OpBundles);
bool isCallingConvC = isCallingConvCCompatible(CI);
// Command-line parameter overrides instruction attribute.
+ // This can't be moved to optimizeFloatingPointLibCall() because it may be
+ // used by the intrinsic optimizations.
if (EnableUnsafeFPShrink.getNumOccurrences() > 0)
UnsafeFPShrink = EnableUnsafeFPShrink;
else if (isa<FPMathOperator>(CI) && CI->hasUnsafeAlgebra())
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI)) {
if (!isCallingConvC)
return nullptr;
+ // The FP intrinsics have corresponding constrained versions so we don't
+ // need to check for the StrictFP attribute here.
switch (II->getIntrinsicID()) {
case Intrinsic::pow:
return optimizePow(CI, Builder);
return nullptr;
if (Value *V = optimizeStringMemoryLibCall(CI, Builder))
return V;
+ if (Value *V = optimizeFloatingPointLibCall(CI, Func, Builder))
+ return V;
switch (Func) {
- case LibFunc_cosf:
- case LibFunc_cos:
- case LibFunc_cosl:
- return optimizeCos(CI, Builder);
- case LibFunc_sinpif:
- case LibFunc_sinpi:
- case LibFunc_cospif:
- case LibFunc_cospi:
- return optimizeSinCosPi(CI, Builder);
- case LibFunc_powf:
- case LibFunc_pow:
- case LibFunc_powl:
- return optimizePow(CI, Builder);
- case LibFunc_exp2l:
- case LibFunc_exp2:
- case LibFunc_exp2f:
- return optimizeExp2(CI, Builder);
- case LibFunc_fabsf:
- case LibFunc_fabs:
- case LibFunc_fabsl:
- return replaceUnaryCall(CI, Builder, Intrinsic::fabs);
- case LibFunc_sqrtf:
- case LibFunc_sqrt:
- case LibFunc_sqrtl:
- return optimizeSqrt(CI, Builder);
case LibFunc_ffs:
case LibFunc_ffsl:
case LibFunc_ffsll:
return optimizeFWrite(CI, Builder);
case LibFunc_fputs:
return optimizeFPuts(CI, Builder);
- case LibFunc_log:
- case LibFunc_log10:
- case LibFunc_log1p:
- case LibFunc_log2:
- case LibFunc_logb:
- return optimizeLog(CI, Builder);
case LibFunc_puts:
return optimizePuts(CI, Builder);
- case LibFunc_tan:
- case LibFunc_tanf:
- case LibFunc_tanl:
- return optimizeTan(CI, Builder);
case LibFunc_perror:
return optimizeErrorReporting(CI, Builder);
case LibFunc_vfprintf:
return optimizeErrorReporting(CI, Builder, 0);
case LibFunc_fputc:
return optimizeErrorReporting(CI, Builder, 1);
- case LibFunc_ceil:
- return replaceUnaryCall(CI, Builder, Intrinsic::ceil);
- case LibFunc_floor:
- return replaceUnaryCall(CI, Builder, Intrinsic::floor);
- case LibFunc_round:
- return replaceUnaryCall(CI, Builder, Intrinsic::round);
- case LibFunc_nearbyint:
- return replaceUnaryCall(CI, Builder, Intrinsic::nearbyint);
- case LibFunc_rint:
- return replaceUnaryCall(CI, Builder, Intrinsic::rint);
- case LibFunc_trunc:
- return replaceUnaryCall(CI, Builder, Intrinsic::trunc);
- case LibFunc_acos:
- case LibFunc_acosh:
- case LibFunc_asin:
- case LibFunc_asinh:
- case LibFunc_atan:
- case LibFunc_atanh:
- case LibFunc_cbrt:
- case LibFunc_cosh:
- case LibFunc_exp:
- case LibFunc_exp10:
- case LibFunc_expm1:
- case LibFunc_sin:
- case LibFunc_sinh:
- case LibFunc_tanh:
- if (UnsafeFPShrink && hasFloatVersion(FuncName))
- return optimizeUnaryDoubleFP(CI, Builder, true);
- return nullptr;
- case LibFunc_copysign:
- if (hasFloatVersion(FuncName))
- return optimizeBinaryDoubleFP(CI, Builder);
- return nullptr;
- case LibFunc_fminf:
- case LibFunc_fmin:
- case LibFunc_fminl:
- case LibFunc_fmaxf:
- case LibFunc_fmax:
- case LibFunc_fmaxl:
- return optimizeFMinFMax(CI, Builder);
default:
return nullptr;
}
; CHECK: declare void @f.inaccessiblememonly() #33
declare void @f.inaccessiblemem_or_argmemonly() inaccessiblemem_or_argmemonly
; CHECK: declare void @f.inaccessiblemem_or_argmemonly() #34
+declare void @f.strictfp() #35
; Functions -- section
declare void @f.section() section "80"
call void @f.nobuiltin() builtin
; CHECK: call void @f.nobuiltin() #42
+ call void @f.strictfp() strictfp
+ ; CHECK: call void @f.strictfp() #43
+
call fastcc noalias i32* @f.noalias() noinline
; CHECK: call fastcc noalias i32* @f.noalias() #12
tail call ghccc nonnull i32* @f.nonnull() minsize
; CHECK: attributes #40 = { writeonly }
; CHECK: attributes #41 = { speculatable }
; CHECK: attributes #42 = { builtin }
+; CHECK: attributes #43 = { strictfp }
;; Metadata
; CHECK-NEXT: %cos3 = call double @cos(double 0.000000e+00)
%cos3 = call double @cos(double 0.000000e+00) nobuiltin
+; cos(1) strictfp sets FP status flags
+; CHECK-NEXT: %cos4 = call double @cos(double 1.000000e+00)
+ %cos4 = call double @cos(double 1.000000e+00) strictfp
+
; pow(0, 1) is 0
%pow1 = call double @pow(double 0x7FF0000000000000, double 1.000000e+00)
ret double %pi
}
+; Check that we don't constant fold builtin functions.
+
define double @test_acos_nobuiltin() {
; CHECK-LABEL: @test_acos_nobuiltin
%pi = call double @acos(double -1.000000e+00) nobuiltin
; CHECK: call double @acos(double -1.000000e+00)
ret double %pi
}
+
+; Check that we don't constant fold strictfp results that require rounding.
+
+define double @test_acos_strictfp() {
+; CHECK-LABEL: @test_acos_strictfp
+ %pi = call double @acos(double -1.000000e+00) strictfp
+; CHECK: call double @acos(double -1.000000e+00)
+ ret double %pi
+}
ret i8* %ret
}
+; Verify that the strictfp attr doesn't block this optimization.
+
+define i8* @test_simplify2(i8* %mem1, i8* %mem2, i32 %size) {
+; CHECK-LABEL: @test_simplify2(
+ %ret = call i8* @memcpy(i8* %mem1, i8* %mem2, i32 %size) strictfp
+; CHECK: call void @llvm.memcpy
+ ret i8* %ret
+; CHECK: ret i8* %mem1
+}
\ ssp
\ sspreq
\ sspstrong
+ \ strictfp
\ swiftcc
\ tail
\ target