mode argument is only intended as information to the compiler.
+'``llvm.experimental.constrained.lrint``' Intrinsic
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Syntax:
+"""""""
+
+::
+
+ declare <inttype>
+ @llvm.experimental.constrained.lrint(<fptype> <op1>,
+ metadata <rounding mode>,
+ metadata <exception behavior>)
+
+Overview:
+"""""""""
+
+The '``llvm.experimental.constrained.lrint``' intrinsic returns the first
+operand rounded to the nearest integer. An inexact floating-point exception
+will be raised if the operand is not an integer. An invalid exception is
+raised if the result is too large to fit into a supported integer type,
+and in this case the result is undefined.
+
+Arguments:
+""""""""""
+
+The first argument is a floating-point number. The return value is an
+integer type. Not all types are supported on all targets. The supported
+types are the same as the ``llvm.lrint`` intrinsic and the ``lrint``
+libm functions.
+
+The second and third arguments specify the rounding mode and exception
+behavior as described above.
+
+Semantics:
+""""""""""
+
+This function returns the same values as the libm ``lrint`` functions
+would, and handles error conditions in the same way.
+
+The rounding mode is described, not determined, by the rounding mode
+argument. The actual rounding mode is determined by the runtime floating-point
+environment. The rounding mode argument is only intended as information
+to the compiler.
+
+If the runtime floating-point environment is using the default rounding mode
+then the results will be the same as the llvm.lrint intrinsic.
+
+
+'``llvm.experimental.constrained.llrint``' Intrinsic
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Syntax:
+"""""""
+
+::
+
+ declare <inttype>
+ @llvm.experimental.constrained.llrint(<fptype> <op1>,
+ metadata <rounding mode>,
+ metadata <exception behavior>)
+
+Overview:
+"""""""""
+
+The '``llvm.experimental.constrained.llrint``' intrinsic returns the first
+operand rounded to the nearest integer. An inexact floating-point exception
+will be raised if the operand is not an integer. An invalid exception is
+raised if the result is too large to fit into a supported integer type,
+and in this case the result is undefined.
+
+Arguments:
+""""""""""
+
+The first argument is a floating-point number. The return value is an
+integer type. Not all types are supported on all targets. The supported
+types are the same as the ``llvm.llrint`` intrinsic and the ``llrint``
+libm functions.
+
+The second and third arguments specify the rounding mode and exception
+behavior as described above.
+
+Semantics:
+""""""""""
+
+This function returns the same values as the libm ``llrint`` functions
+would, and handles error conditions in the same way.
+
+The rounding mode is described, not determined, by the rounding mode
+argument. The actual rounding mode is determined by the runtime floating-point
+environment. The rounding mode argument is only intended as information
+to the compiler.
+
+If the runtime floating-point environment is using the default rounding mode
+then the results will be the same as the llvm.llrint intrinsic.
+
+
'``llvm.experimental.constrained.nearbyint``' Intrinsic
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
would and handles error conditions in the same way.
+'``llvm.experimental.constrained.lround``' Intrinsic
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Syntax:
+"""""""
+
+::
+
+ declare <inttype>
+ @llvm.experimental.constrained.lround(<fptype> <op1>,
+ metadata <exception behavior>)
+
+Overview:
+"""""""""
+
+The '``llvm.experimental.constrained.lround``' intrinsic returns the first
+operand rounded to the nearest integer with ties away from zero. It will
+raise an inexact floating-point exception if the operand is not an integer.
+An invalid exception is raised if the result is too large to fit into a
+supported integer type, and in this case the result is undefined.
+
+Arguments:
+""""""""""
+
+The first argument is a floating-point number. The return value is an
+integer type. Not all types are supported on all targets. The supported
+types are the same as the ``llvm.lround`` intrinsic and the ``lround``
+libm functions.
+
+The second argument specifies the exception behavior as described above.
+
+Semantics:
+""""""""""
+
+This function returns the same values as the libm ``lround`` functions
+would and handles error conditions in the same way.
+
+
+'``llvm.experimental.constrained.llround``' Intrinsic
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Syntax:
+"""""""
+
+::
+
+ declare <inttype>
+ @llvm.experimental.constrained.llround(<fptype> <op1>,
+ metadata <exception behavior>)
+
+Overview:
+"""""""""
+
+The '``llvm.experimental.constrained.llround``' intrinsic returns the first
+operand rounded to the nearest integer with ties away from zero. It will
+raise an inexact floating-point exception if the operand is not an integer.
+An invalid exception is raised if the result is too large to fit into a
+supported integer type, and in this case the result is undefined.
+
+Arguments:
+""""""""""
+
+The first argument is a floating-point number. The return value is an
+integer type. Not all types are supported on all targets. The supported
+types are the same as the ``llvm.llround`` intrinsic and the ``llround``
+libm functions.
+
+The second argument specifies the exception behavior as described above.
+
+Semantics:
+""""""""""
+
+This function returns the same values as the libm ``llround`` functions
+would and handles error conditions in the same way.
+
+
'``llvm.experimental.constrained.trunc``' Intrinsic
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
STRICT_FEXP, STRICT_FEXP2, STRICT_FLOG, STRICT_FLOG10, STRICT_FLOG2,
STRICT_FRINT, STRICT_FNEARBYINT, STRICT_FMAXNUM, STRICT_FMINNUM,
STRICT_FCEIL, STRICT_FFLOOR, STRICT_FROUND, STRICT_FTRUNC,
+ STRICT_LROUND, STRICT_LLROUND, STRICT_LRINT, STRICT_LLRINT,
/// STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or
/// unsigned integer. These have the same semantics as fptosi and fptoui
case ISD::STRICT_FLOG:
case ISD::STRICT_FLOG10:
case ISD::STRICT_FLOG2:
+ case ISD::STRICT_LRINT:
+ case ISD::STRICT_LLRINT:
case ISD::STRICT_FRINT:
case ISD::STRICT_FNEARBYINT:
case ISD::STRICT_FMAXNUM:
case ISD::STRICT_FMINNUM:
case ISD::STRICT_FCEIL:
case ISD::STRICT_FFLOOR:
+ case ISD::STRICT_LROUND:
+ case ISD::STRICT_LLROUND:
case ISD::STRICT_FROUND:
case ISD::STRICT_FTRUNC:
case ISD::STRICT_FP_TO_SINT:
case ISD::STRICT_FLOG: EqOpc = ISD::FLOG; break;
case ISD::STRICT_FLOG10: EqOpc = ISD::FLOG10; break;
case ISD::STRICT_FLOG2: EqOpc = ISD::FLOG2; break;
+ case ISD::STRICT_LRINT: EqOpc = ISD::LRINT; break;
+ case ISD::STRICT_LLRINT: EqOpc = ISD::LLRINT; break;
case ISD::STRICT_FRINT: EqOpc = ISD::FRINT; break;
case ISD::STRICT_FNEARBYINT: EqOpc = ISD::FNEARBYINT; break;
case ISD::STRICT_FMAXNUM: EqOpc = ISD::FMAXNUM; break;
case ISD::STRICT_FMINNUM: EqOpc = ISD::FMINNUM; break;
case ISD::STRICT_FCEIL: EqOpc = ISD::FCEIL; break;
case ISD::STRICT_FFLOOR: EqOpc = ISD::FFLOOR; break;
+ case ISD::STRICT_LROUND: EqOpc = ISD::LROUND; break;
+ case ISD::STRICT_LLROUND: EqOpc = ISD::LLROUND; break;
case ISD::STRICT_FROUND: EqOpc = ISD::FROUND; break;
case ISD::STRICT_FTRUNC: EqOpc = ISD::FTRUNC; break;
case ISD::STRICT_FP_TO_SINT: EqOpc = ISD::FP_TO_SINT; break;
case Intrinsic::experimental_constrained_log:
case Intrinsic::experimental_constrained_log10:
case Intrinsic::experimental_constrained_log2:
+ case Intrinsic::experimental_constrained_lrint:
+ case Intrinsic::experimental_constrained_llrint:
case Intrinsic::experimental_constrained_rint:
case Intrinsic::experimental_constrained_nearbyint:
case Intrinsic::experimental_constrained_maxnum:
case Intrinsic::experimental_constrained_minnum:
case Intrinsic::experimental_constrained_ceil:
case Intrinsic::experimental_constrained_floor:
+ case Intrinsic::experimental_constrained_lround:
+ case Intrinsic::experimental_constrained_llround:
case Intrinsic::experimental_constrained_round:
case Intrinsic::experimental_constrained_trunc:
return true;
[ LLVMMatchType<0>,
llvm_metadata_ty,
llvm_metadata_ty ]>;
+ def int_experimental_constrained_lrint : Intrinsic<[ llvm_anyint_ty ],
+ [ llvm_anyfloat_ty,
+ llvm_metadata_ty,
+ llvm_metadata_ty ]>;
+ def int_experimental_constrained_llrint : Intrinsic<[ llvm_anyint_ty ],
+ [ llvm_anyfloat_ty,
+ llvm_metadata_ty,
+ llvm_metadata_ty ]>;
def int_experimental_constrained_maxnum : Intrinsic<[ llvm_anyfloat_ty ],
[ LLVMMatchType<0>,
LLVMMatchType<0>,
[ LLVMMatchType<0>,
llvm_metadata_ty,
llvm_metadata_ty ]>;
+ def int_experimental_constrained_lround : Intrinsic<[ llvm_anyint_ty ],
+ [ llvm_anyfloat_ty,
+ llvm_metadata_ty ]>;
+ def int_experimental_constrained_llround : Intrinsic<[ llvm_anyint_ty ],
+ [ llvm_anyfloat_ty,
+ llvm_metadata_ty ]>;
def int_experimental_constrained_round : Intrinsic<[ llvm_anyfloat_ty ],
[ LLVMMatchType<0>,
llvm_metadata_ty,
SDTFPUnaryOp, [SDNPHasChain]>;
def strict_frint : SDNode<"ISD::STRICT_FRINT",
SDTFPUnaryOp, [SDNPHasChain]>;
+def strict_lrint : SDNode<"ISD::STRICT_LRINT",
+ SDTFPToIntOp, [SDNPHasChain]>;
+def strict_llrint : SDNode<"ISD::STRICT_LLRINT",
+ SDTFPToIntOp, [SDNPHasChain]>;
def strict_fnearbyint : SDNode<"ISD::STRICT_FNEARBYINT",
SDTFPUnaryOp, [SDNPHasChain]>;
def strict_fceil : SDNode<"ISD::STRICT_FCEIL",
SDTFPUnaryOp, [SDNPHasChain]>;
def strict_ffloor : SDNode<"ISD::STRICT_FFLOOR",
SDTFPUnaryOp, [SDNPHasChain]>;
+def strict_lround : SDNode<"ISD::STRICT_LROUND",
+ SDTFPToIntOp, [SDNPHasChain]>;
+def strict_llround : SDNode<"ISD::STRICT_LLROUND",
+ SDTFPToIntOp, [SDNPHasChain]>;
def strict_fround : SDNode<"ISD::STRICT_FROUND",
SDTFPUnaryOp, [SDNPHasChain]>;
def strict_ftrunc : SDNode<"ISD::STRICT_FTRUNC",
def any_frint : PatFrags<(ops node:$src),
[(strict_frint node:$src),
(frint node:$src)]>;
+def any_lrint : PatFrags<(ops node:$src),
+ [(strict_lrint node:$src),
+ (lrint node:$src)]>;
+def any_llrint : PatFrags<(ops node:$src),
+ [(strict_llrint node:$src),
+ (llrint node:$src)]>;
def any_fnearbyint : PatFrags<(ops node:$src),
[(strict_fnearbyint node:$src),
(fnearbyint node:$src)]>;
def any_ffloor : PatFrags<(ops node:$src),
[(strict_ffloor node:$src),
(ffloor node:$src)]>;
+def any_lround : PatFrags<(ops node:$src),
+ [(strict_lround node:$src),
+ (lround node:$src)]>;
+def any_llround : PatFrags<(ops node:$src),
+ [(strict_llround node:$src),
+ (llround node:$src)]>;
def any_fround : PatFrags<(ops node:$src),
[(strict_fround node:$src),
(fround node:$src)]>;
return;
}
break;
+ case ISD::STRICT_LRINT:
+ case ISD::STRICT_LLRINT:
+ case ISD::STRICT_LROUND:
+ case ISD::STRICT_LLROUND:
+ // These pseudo-ops are the same as the other STRICT_ ops except
+ // they are registered with setOperationAction() using the input type
+ // instead of the output type.
+ Action = TLI.getStrictFPOperationAction(Node->getOpcode(),
+ Node->getOperand(1).getValueType());
+ break;
case ISD::SADDSAT:
case ISD::UADDSAT:
case ISD::SSUBSAT:
RTLIB::Libcall Call_F80,
RTLIB::Libcall Call_F128,
RTLIB::Libcall Call_PPCF128) {
+ if (Node->isStrictFPOpcode())
+ Node = DAG.mutateStrictFPToFP(Node);
+
RTLIB::Libcall LC;
switch (Node->getOperand(0).getValueType().getSimpleVT().SimpleTy) {
default: llvm_unreachable("Unexpected request for libcall!");
return true;
}
break;
- case ISD::LROUND:
- Results.push_back(ExpandArgFPLibCall(Node, RTLIB::LROUND_F32,
- RTLIB::LROUND_F64, RTLIB::LROUND_F80,
- RTLIB::LROUND_F128,
- RTLIB::LROUND_PPCF128));
- break;
- case ISD::LLROUND:
- Results.push_back(ExpandArgFPLibCall(Node, RTLIB::LLROUND_F32,
- RTLIB::LLROUND_F64, RTLIB::LLROUND_F80,
- RTLIB::LLROUND_F128,
- RTLIB::LLROUND_PPCF128));
- break;
- case ISD::LRINT:
- Results.push_back(ExpandArgFPLibCall(Node, RTLIB::LRINT_F32,
- RTLIB::LRINT_F64, RTLIB::LRINT_F80,
- RTLIB::LRINT_F128,
- RTLIB::LRINT_PPCF128));
- break;
- case ISD::LLRINT:
- Results.push_back(ExpandArgFPLibCall(Node, RTLIB::LLRINT_F32,
- RTLIB::LLRINT_F64, RTLIB::LLRINT_F80,
- RTLIB::LLRINT_F128,
- RTLIB::LLRINT_PPCF128));
- break;
case ISD::VAARG:
Results.push_back(DAG.expandVAArg(Node));
Results.push_back(Results[0].getValue(1));
// the "strict" properties. For now, we just fall back to the non-strict
// version if that is legal on the target. The actual mutation of the
// operation will happen in SelectionDAGISel::DoInstructionSelection.
- if (TLI.getStrictFPOperationAction(Node->getOpcode(),
- Node->getValueType(0))
- == TargetLowering::Legal)
- return true;
+ switch (Node->getOpcode()) {
+ default:
+ if (TLI.getStrictFPOperationAction(Node->getOpcode(),
+ Node->getValueType(0))
+ == TargetLowering::Legal)
+ return true;
+ break;
+ case ISD::STRICT_LRINT:
+ case ISD::STRICT_LLRINT:
+ case ISD::STRICT_LROUND:
+ case ISD::STRICT_LLROUND:
+ // These are registered by the operand type instead of the value
+ // type. Reflect that here.
+ if (TLI.getStrictFPOperationAction(Node->getOpcode(),
+ Node->getOperand(1).getValueType())
+ == TargetLowering::Legal)
+ return true;
+ break;
+ }
}
// Replace the original node with the legalized result.
RTLIB::POW_F80, RTLIB::POW_F128,
RTLIB::POW_PPCF128));
break;
+ case ISD::LROUND:
+ case ISD::STRICT_LROUND:
+ Results.push_back(ExpandArgFPLibCall(Node, RTLIB::LROUND_F32,
+ RTLIB::LROUND_F64, RTLIB::LROUND_F80,
+ RTLIB::LROUND_F128,
+ RTLIB::LROUND_PPCF128));
+ break;
+ case ISD::LLROUND:
+ case ISD::STRICT_LLROUND:
+ Results.push_back(ExpandArgFPLibCall(Node, RTLIB::LLROUND_F32,
+ RTLIB::LLROUND_F64, RTLIB::LLROUND_F80,
+ RTLIB::LLROUND_F128,
+ RTLIB::LLROUND_PPCF128));
+ break;
+ case ISD::LRINT:
+ case ISD::STRICT_LRINT:
+ Results.push_back(ExpandArgFPLibCall(Node, RTLIB::LRINT_F32,
+ RTLIB::LRINT_F64, RTLIB::LRINT_F80,
+ RTLIB::LRINT_F128,
+ RTLIB::LRINT_PPCF128));
+ break;
+ case ISD::LLRINT:
+ case ISD::STRICT_LLRINT:
+ Results.push_back(ExpandArgFPLibCall(Node, RTLIB::LLRINT_F32,
+ RTLIB::LLRINT_F64, RTLIB::LLRINT_F80,
+ RTLIB::LLRINT_F128,
+ RTLIB::LLRINT_PPCF128));
+ break;
case ISD::FDIV:
Results.push_back(ExpandFPLibCall(Node, RTLIB::DIV_F32, RTLIB::DIV_F64,
RTLIB::DIV_F80, RTLIB::DIV_F128,
case ISD::STRICT_FLOG: NewOpc = ISD::FLOG; break;
case ISD::STRICT_FLOG10: NewOpc = ISD::FLOG10; break;
case ISD::STRICT_FLOG2: NewOpc = ISD::FLOG2; break;
+ case ISD::STRICT_LRINT: NewOpc = ISD::LRINT; break;
+ case ISD::STRICT_LLRINT: NewOpc = ISD::LLRINT; break;
case ISD::STRICT_FRINT: NewOpc = ISD::FRINT; break;
case ISD::STRICT_FNEARBYINT: NewOpc = ISD::FNEARBYINT; break;
case ISD::STRICT_FMAXNUM: NewOpc = ISD::FMAXNUM; break;
case ISD::STRICT_FMINNUM: NewOpc = ISD::FMINNUM; break;
case ISD::STRICT_FCEIL: NewOpc = ISD::FCEIL; break;
case ISD::STRICT_FFLOOR: NewOpc = ISD::FFLOOR; break;
+ case ISD::STRICT_LROUND: NewOpc = ISD::LROUND; break;
+ case ISD::STRICT_LLROUND: NewOpc = ISD::LLROUND; break;
case ISD::STRICT_FROUND: NewOpc = ISD::FROUND; break;
case ISD::STRICT_FTRUNC: NewOpc = ISD::FTRUNC; break;
case ISD::STRICT_FP_ROUND: NewOpc = ISD::FP_ROUND; break;
case Intrinsic::experimental_constrained_log:
case Intrinsic::experimental_constrained_log10:
case Intrinsic::experimental_constrained_log2:
+ case Intrinsic::experimental_constrained_lrint:
+ case Intrinsic::experimental_constrained_llrint:
case Intrinsic::experimental_constrained_rint:
case Intrinsic::experimental_constrained_nearbyint:
case Intrinsic::experimental_constrained_maxnum:
case Intrinsic::experimental_constrained_minnum:
case Intrinsic::experimental_constrained_ceil:
case Intrinsic::experimental_constrained_floor:
+ case Intrinsic::experimental_constrained_lround:
+ case Intrinsic::experimental_constrained_llround:
case Intrinsic::experimental_constrained_round:
case Intrinsic::experimental_constrained_trunc:
visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(I));
case Intrinsic::experimental_constrained_log2:
Opcode = ISD::STRICT_FLOG2;
break;
+ case Intrinsic::experimental_constrained_lrint:
+ Opcode = ISD::STRICT_LRINT;
+ break;
+ case Intrinsic::experimental_constrained_llrint:
+ Opcode = ISD::STRICT_LLRINT;
+ break;
case Intrinsic::experimental_constrained_rint:
Opcode = ISD::STRICT_FRINT;
break;
case Intrinsic::experimental_constrained_floor:
Opcode = ISD::STRICT_FFLOOR;
break;
+ case Intrinsic::experimental_constrained_lround:
+ Opcode = ISD::STRICT_LROUND;
+ break;
+ case Intrinsic::experimental_constrained_llround:
+ Opcode = ISD::STRICT_LLROUND;
+ break;
case Intrinsic::experimental_constrained_round:
Opcode = ISD::STRICT_FROUND;
break;
case ISD::FP16_TO_FP: return "fp16_to_fp";
case ISD::FP_TO_FP16: return "fp_to_fp16";
case ISD::LROUND: return "lround";
+ case ISD::STRICT_LROUND: return "strict_lround";
case ISD::LLROUND: return "llround";
+ case ISD::STRICT_LLROUND: return "strict_llround";
case ISD::LRINT: return "lrint";
+ case ISD::STRICT_LRINT: return "strict_lrint";
case ISD::LLRINT: return "llrint";
+ case ISD::STRICT_LLRINT: return "strict_llrint";
// Control flow instructions
case ISD::BR: return "br";
setOperationAction(ISD::STRICT_FLOG, VT, Expand);
setOperationAction(ISD::STRICT_FLOG10, VT, Expand);
setOperationAction(ISD::STRICT_FLOG2, VT, Expand);
+ setOperationAction(ISD::STRICT_LRINT, VT, Expand);
+ setOperationAction(ISD::STRICT_LLRINT, VT, Expand);
setOperationAction(ISD::STRICT_FRINT, VT, Expand);
setOperationAction(ISD::STRICT_FNEARBYINT, VT, Expand);
setOperationAction(ISD::STRICT_FCEIL, VT, Expand);
setOperationAction(ISD::STRICT_FFLOOR, VT, Expand);
+ setOperationAction(ISD::STRICT_LROUND, VT, Expand);
+ setOperationAction(ISD::STRICT_LLROUND, VT, Expand);
setOperationAction(ISD::STRICT_FROUND, VT, Expand);
setOperationAction(ISD::STRICT_FTRUNC, VT, Expand);
setOperationAction(ISD::STRICT_FMAXNUM, VT, Expand);
case Intrinsic::experimental_constrained_log:
case Intrinsic::experimental_constrained_log10:
case Intrinsic::experimental_constrained_log2:
+ case Intrinsic::experimental_constrained_lrint:
+ case Intrinsic::experimental_constrained_llrint:
case Intrinsic::experimental_constrained_rint:
case Intrinsic::experimental_constrained_nearbyint:
case Intrinsic::experimental_constrained_ceil:
case Intrinsic::experimental_constrained_floor:
+ case Intrinsic::experimental_constrained_lround:
+ case Intrinsic::experimental_constrained_llround:
case Intrinsic::experimental_constrained_round:
case Intrinsic::experimental_constrained_trunc:
return true;
case Intrinsic::experimental_constrained_log:
case Intrinsic::experimental_constrained_log10:
case Intrinsic::experimental_constrained_log2:
+ case Intrinsic::experimental_constrained_lrint:
+ case Intrinsic::experimental_constrained_llrint:
case Intrinsic::experimental_constrained_rint:
case Intrinsic::experimental_constrained_nearbyint:
case Intrinsic::experimental_constrained_maxnum:
case Intrinsic::experimental_constrained_minnum:
case Intrinsic::experimental_constrained_ceil:
case Intrinsic::experimental_constrained_floor:
+ case Intrinsic::experimental_constrained_lround:
+ case Intrinsic::experimental_constrained_llround:
case Intrinsic::experimental_constrained_round:
case Intrinsic::experimental_constrained_trunc:
visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(Call));
HasRoundingMD = true;
break;
+ case Intrinsic::experimental_constrained_lrint:
+ case Intrinsic::experimental_constrained_llrint: {
+ Assert((NumOperands == 3), "invalid arguments for constrained FP intrinsic",
+ &FPI);
+ Type *ValTy = FPI.getArgOperand(0)->getType();
+ Type *ResultTy = FPI.getType();
+ Assert(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
+ "Intrinsic does not support vectors", &FPI);
+ HasExceptionMD = true;
+ HasRoundingMD = true;
+ }
+ break;
+
+ case Intrinsic::experimental_constrained_lround:
+ case Intrinsic::experimental_constrained_llround: {
+ Assert((NumOperands == 2), "invalid arguments for constrained FP intrinsic",
+ &FPI);
+ Type *ValTy = FPI.getArgOperand(0)->getType();
+ Type *ResultTy = FPI.getType();
+ Assert(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
+ "Intrinsic does not support vectors", &FPI);
+ HasExceptionMD = true;
+ break;
+ }
+
case Intrinsic::experimental_constrained_fma:
Assert((NumOperands == 5), "invalid arguments for constrained FP intrinsic",
&FPI);
ret double %result
}
+; CHECK-LABEL: f23
+; COMMON: jmp lrint
+define i32 @f23(double %x) #0 {
+entry:
+ %result = call i32 @llvm.experimental.constrained.lrint.i32.f64(double %x,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret i32 %result
+}
+
+; CHECK-LABEL: f24
+; COMMON: jmp lrintf
+define i32 @f24(float %x) #0 {
+entry:
+ %result = call i32 @llvm.experimental.constrained.lrint.i32.f32(float %x,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret i32 %result
+}
+
+; CHECK-LABEL: f25
+; COMMON: jmp llrint
+define i64 @f25(double %x) #0 {
+entry:
+ %result = call i64 @llvm.experimental.constrained.llrint.i64.f64(double %x,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret i64 %result
+}
+
+; CHECK-LABEL: f26
+; COMMON: jmp llrintf
+define i64 @f26(float %x) {
+entry:
+ %result = call i64 @llvm.experimental.constrained.llrint.i64.f32(float %x,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret i64 %result
+}
+
+; CHECK-LABEL: f27
+; COMMON: jmp lround
+define i32 @f27(double %x) #0 {
+entry:
+ %result = call i32 @llvm.experimental.constrained.lround.i32.f64(double %x,
+ metadata !"fpexcept.strict") #0
+ ret i32 %result
+}
+
+; CHECK-LABEL: f28
+; COMMON: jmp lroundf
+define i32 @f28(float %x) #0 {
+entry:
+ %result = call i32 @llvm.experimental.constrained.lround.i32.f32(float %x,
+ metadata !"fpexcept.strict") #0
+ ret i32 %result
+}
+
+; CHECK-LABEL: f29
+; COMMON: jmp llround
+define i64 @f29(double %x) #0 {
+entry:
+ %result = call i64 @llvm.experimental.constrained.llround.i64.f64(double %x,
+ metadata !"fpexcept.strict") #0
+ ret i64 %result
+}
+
+; CHECK-LABEL: f30
+; COMMON: jmp llroundf
+define i64 @f30(float %x) #0 {
+entry:
+ %result = call i64 @llvm.experimental.constrained.llround.i64.f32(float %x,
+ metadata !"fpexcept.strict") #0
+ ret i64 %result
+}
+
attributes #0 = { strictfp }
@llvm.fp.env = thread_local global i8 zeroinitializer, section "llvm.metadata"
declare i32 @llvm.experimental.constrained.fptoui.i32.f64(double, metadata)
declare float @llvm.experimental.constrained.fptrunc.f32.f64(double, metadata, metadata)
declare double @llvm.experimental.constrained.fpext.f64.f32(float, metadata)
+declare i32 @llvm.experimental.constrained.lrint.i32.f64(double, metadata, metadata)
+declare i32 @llvm.experimental.constrained.lrint.i32.f32(float, metadata, metadata)
+declare i64 @llvm.experimental.constrained.llrint.i64.f64(double, metadata, metadata)
+declare i64 @llvm.experimental.constrained.llrint.i64.f32(float, metadata, metadata)
+declare i32 @llvm.experimental.constrained.lround.i32.f64(double, metadata)
+declare i32 @llvm.experimental.constrained.lround.i32.f32(float, metadata)
+declare i64 @llvm.experimental.constrained.llround.i64.f64(double, metadata)
+declare i64 @llvm.experimental.constrained.llround.i64.f32(float, metadata)
ret double %result
}
+; Verify that lrint(42.1) isn't simplified when the rounding mode is unknown.
+; CHECK-LABEL: f22
+; CHECK: call i32 @llvm.experimental.constrained.lrint
+define i32 @f22() #0 {
+entry:
+ %result = call i32 @llvm.experimental.constrained.lrint.i32.f64(double 42.1,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret i32 %result
+}
+
+; Verify that lrintf(42.0) isn't simplified when the rounding mode is unknown.
+; CHECK-LABEL: f23
+; CHECK: call i32 @llvm.experimental.constrained.lrint
+define i32 @f23() #0 {
+entry:
+ %result = call i32 @llvm.experimental.constrained.lrint.i32.f32(float 42.0,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret i32 %result
+}
+
+; Verify that llrint(42.1) isn't simplified when the rounding mode is unknown.
+; CHECK-LABEL: f24
+; CHECK: call i64 @llvm.experimental.constrained.llrint
+define i64 @f24() #0 {
+entry:
+ %result = call i64 @llvm.experimental.constrained.llrint.i64.f64(double 42.1,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret i64 %result
+}
+
+; Verify that llrint(42.0) isn't simplified when the rounding mode is unknown.
+; CHECK-LABEL: f25
+; CHECK: call i64 @llvm.experimental.constrained.llrint
+define i64 @f25() #0 {
+entry:
+ %result = call i64 @llvm.experimental.constrained.llrint.i64.f32(float 42.0,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret i64 %result
+}
+
+; Verify that lround(42.1) isn't simplified when the rounding mode is unknown.
+; CHECK-LABEL: f26
+; CHECK: call i32 @llvm.experimental.constrained.lround
+define i32 @f26() #0 {
+entry:
+ %result = call i32 @llvm.experimental.constrained.lround.i32.f64(double 42.1,
+ metadata !"fpexcept.strict") #0
+ ret i32 %result
+}
+
+; Verify that lround(42.0) isn't simplified when the rounding mode is unknown.
+; CHECK-LABEL: f27
+; CHECK: call i32 @llvm.experimental.constrained.lround
+define i32 @f27() #0 {
+entry:
+ %result = call i32 @llvm.experimental.constrained.lround.i32.f32(float 42.0,
+ metadata !"fpexcept.strict") #0
+ ret i32 %result
+}
+
+; Verify that llround(42.1) isn't simplified when the rounding mode is unknown.
+; CHECK-LABEL: f28
+; CHECK: call i64 @llvm.experimental.constrained.llround
+define i64 @f28() #0 {
+entry:
+ %result = call i64 @llvm.experimental.constrained.llround.i64.f64(double 42.1,
+ metadata !"fpexcept.strict") #0
+ ret i64 %result
+}
+
+; Verify that llround(42.0) isn't simplified when the rounding mode is unknown.
+; CHECK-LABEL: f29
+; CHECK: call i64 @llvm.experimental.constrained.llround
+define i64 @f29() #0 {
+entry:
+ %result = call i64 @llvm.experimental.constrained.llround.i64.f32(float 42.0,
+ metadata !"fpexcept.strict") #0
+ ret i64 %result
+}
+
attributes #0 = { strictfp }
@llvm.fp.env = thread_local global i8 zeroinitializer, section "llvm.metadata"
declare i32 @llvm.experimental.constrained.fptoui.i32.f64(double, metadata)
declare float @llvm.experimental.constrained.fptrunc.f32.f64(double, metadata, metadata)
declare double @llvm.experimental.constrained.fpext.f64.f32(float, metadata)
+declare i32 @llvm.experimental.constrained.lrint.i32.f64(double, metadata, metadata)
+declare i32 @llvm.experimental.constrained.lrint.i32.f32(float, metadata, metadata)
+declare i64 @llvm.experimental.constrained.llrint.i64.f64(double, metadata, metadata)
+declare i64 @llvm.experimental.constrained.llrint.i64.f32(float, metadata, metadata)
+declare i32 @llvm.experimental.constrained.lround.i32.f64(double, metadata)
+declare i32 @llvm.experimental.constrained.lround.i32.f32(float, metadata)
+declare i64 @llvm.experimental.constrained.llround.i64.f64(double, metadata)
+declare i64 @llvm.experimental.constrained.llround.i64.f32(float, metadata)