As for other floating-point rounding builtins that can be optimized
when build with -fno-math-errno, this patch adds support for lrint
and llrint. It currently only optimize for AArch64 backend.
Reviewed By: craig.topper
Differential Revision: https://reviews.llvm.org/D62019
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@361878
91177308-0d34-0410-b5e6-
96231b3b80d8
case Builtin::BI__builtin_llroundl:
return RValue::get(emitFPToIntRoundBuiltin(*this, E, Intrinsic::llround));
+ case Builtin::BIlrint:
+ case Builtin::BIlrintf:
+ case Builtin::BIlrintl:
+ case Builtin::BI__builtin_lrint:
+ case Builtin::BI__builtin_lrintf:
+ case Builtin::BI__builtin_lrintl:
+ return RValue::get(emitFPToIntRoundBuiltin(*this, E, Intrinsic::lrint));
+
+ case Builtin::BIllrint:
+ case Builtin::BIllrintf:
+ case Builtin::BIllrintl:
+ case Builtin::BI__builtin_llrint:
+ case Builtin::BI__builtin_llrintf:
+ case Builtin::BI__builtin_llrintl:
+ return RValue::get(emitFPToIntRoundBuiltin(*this, E, Intrinsic::llrint));
+
default:
break;
}
resli = __builtin_lroundl (LD);
// CHECK: call i64 @llvm.lround.i64.f80
+
+ resli = __builtin_lrintf (F);
+ // CHECK: call i64 @llvm.lrint.i64.f32
+
+ resli = __builtin_lrint (D);
+ // CHECK: call i64 @llvm.lrint.i64.f64
+
+ resli = __builtin_lrintl (LD);
+ // CHECK: call i64 @llvm.lrint.i64.f80
}
// __builtin_longjmp isn't supported on all platforms, so only test it on X86.
__builtin_llrint(f); __builtin_llrintf(f); __builtin_llrintl(f);
-// NO__ERRNO: declare i64 @llrint(double) [[READNONE]]
-// NO__ERRNO: declare i64 @llrintf(float) [[READNONE]]
-// NO__ERRNO: declare i64 @llrintl(x86_fp80) [[READNONE]]
+// NO__ERRNO: declare i64 @llvm.llrint.i64.f64(double) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare i64 @llvm.llrint.i64.f32(float) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare i64 @llvm.llrint.i64.f80(x86_fp80) [[READNONE_INTRINSIC]]
// HAS_ERRNO: declare i64 @llrint(double) [[NOT_READNONE]]
// HAS_ERRNO: declare i64 @llrintf(float) [[NOT_READNONE]]
// HAS_ERRNO: declare i64 @llrintl(x86_fp80) [[NOT_READNONE]]
__builtin_lrint(f); __builtin_lrintf(f); __builtin_lrintl(f);
-// NO__ERRNO: declare i64 @lrint(double) [[READNONE]]
-// NO__ERRNO: declare i64 @lrintf(float) [[READNONE]]
-// NO__ERRNO: declare i64 @lrintl(x86_fp80) [[READNONE]]
+// NO__ERRNO: declare i64 @llvm.lrint.i64.f64(double) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare i64 @llvm.lrint.i64.f32(float) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare i64 @llvm.lrint.i64.f80(x86_fp80) [[READNONE_INTRINSIC]]
// HAS_ERRNO: declare i64 @lrint(double) [[NOT_READNONE]]
// HAS_ERRNO: declare i64 @lrintf(float) [[NOT_READNONE]]
// HAS_ERRNO: declare i64 @lrintl(x86_fp80) [[NOT_READNONE]]
llrint(f); llrintf(f); llrintl(f);
-// NO__ERRNO: declare i64 @llrint(double) [[READNONE]]
-// NO__ERRNO: declare i64 @llrintf(float) [[READNONE]]
-// NO__ERRNO: declare i64 @llrintl(x86_fp80) [[READNONE]]
+// NO__ERRNO: declare i64 @llvm.llrint.i64.f64(double) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare i64 @llvm.llrint.i64.f32(float) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare i64 @llvm.llrint.i64.f80(x86_fp80) [[READNONE_INTRINSIC]]
// HAS_ERRNO: declare i64 @llrint(double) [[NOT_READNONE]]
// HAS_ERRNO: declare i64 @llrintf(float) [[NOT_READNONE]]
// HAS_ERRNO: declare i64 @llrintl(x86_fp80) [[NOT_READNONE]]
lrint(f); lrintf(f); lrintl(f);
-// NO__ERRNO: declare i64 @lrint(double) [[READNONE]]
-// NO__ERRNO: declare i64 @lrintf(float) [[READNONE]]
-// NO__ERRNO: declare i64 @lrintl(x86_fp80) [[READNONE]]
+// NO__ERRNO: declare i64 @llvm.lrint.i64.f64(double) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare i64 @llvm.lrint.i64.f32(float) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare i64 @llvm.lrint.i64.f80(x86_fp80) [[READNONE_INTRINSIC]]
// HAS_ERRNO: declare i64 @lrint(double) [[NOT_READNONE]]
// HAS_ERRNO: declare i64 @lrintf(float) [[NOT_READNONE]]
// HAS_ERRNO: declare i64 @lrintl(x86_fp80) [[NOT_READNONE]]