else if (Pred == CmpInst::FCMP_TRUE)
MIRBuilder.buildCopy(
Res, getOrCreateVReg(*Constant::getAllOnesValue(CI->getType())));
- else
- MIRBuilder.buildFCmp(Pred, Res, Op0, Op1);
+ else {
+ auto FCmp = MIRBuilder.buildFCmp(Pred, Res, Op0, Op1);
+ FCmp->copyIRFlags(*CI);
+ }
return true;
}
ArrayRef<unsigned> Op0Regs = getOrCreateVRegs(*U.getOperand(1));
ArrayRef<unsigned> Op1Regs = getOrCreateVRegs(*U.getOperand(2));
- for (unsigned i = 0; i < ResRegs.size(); ++i)
- MIRBuilder.buildSelect(ResRegs[i], Tst, Op0Regs[i], Op1Regs[i]);
+ const SelectInst &SI = cast<SelectInst>(U);
+ const CmpInst *Cmp = dyn_cast<CmpInst>(SI.getCondition());
+ for (unsigned i = 0; i < ResRegs.size(); ++i) {
+ auto Select =
+ MIRBuilder.buildSelect(ResRegs[i], Tst, Op0Regs[i], Op1Regs[i]);
+ if (Cmp && isa<FPMathOperator>(Cmp)) {
+ Select->copyIRFlags(*Cmp);
+ }
+ }
return true;
}
return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder);
case Intrinsic::smul_with_overflow:
return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder);
- case Intrinsic::pow:
- MIRBuilder.buildInstr(TargetOpcode::G_FPOW)
+ case Intrinsic::pow: {
+ auto Pow = MIRBuilder.buildInstr(TargetOpcode::G_FPOW)
.addDef(getOrCreateVReg(CI))
.addUse(getOrCreateVReg(*CI.getArgOperand(0)))
.addUse(getOrCreateVReg(*CI.getArgOperand(1)));
+ Pow->copyIRFlags(CI);
return true;
- case Intrinsic::exp:
- MIRBuilder.buildInstr(TargetOpcode::G_FEXP)
+ }
+ case Intrinsic::exp: {
+ auto Exp = MIRBuilder.buildInstr(TargetOpcode::G_FEXP)
.addDef(getOrCreateVReg(CI))
.addUse(getOrCreateVReg(*CI.getArgOperand(0)));
+ Exp->copyIRFlags(CI);
return true;
- case Intrinsic::exp2:
- MIRBuilder.buildInstr(TargetOpcode::G_FEXP2)
+ }
+ case Intrinsic::exp2: {
+ auto Exp2 = MIRBuilder.buildInstr(TargetOpcode::G_FEXP2)
.addDef(getOrCreateVReg(CI))
.addUse(getOrCreateVReg(*CI.getArgOperand(0)));
+ Exp2->copyIRFlags(CI);
return true;
- case Intrinsic::log:
- MIRBuilder.buildInstr(TargetOpcode::G_FLOG)
+ }
+ case Intrinsic::log: {
+ auto Log = MIRBuilder.buildInstr(TargetOpcode::G_FLOG)
.addDef(getOrCreateVReg(CI))
.addUse(getOrCreateVReg(*CI.getArgOperand(0)));
+ Log->copyIRFlags(CI);
return true;
- case Intrinsic::log2:
- MIRBuilder.buildInstr(TargetOpcode::G_FLOG2)
+ }
+ case Intrinsic::log2: {
+ auto Log2 = MIRBuilder.buildInstr(TargetOpcode::G_FLOG2)
.addDef(getOrCreateVReg(CI))
.addUse(getOrCreateVReg(*CI.getArgOperand(0)));
+ Log2->copyIRFlags(CI);
return true;
- case Intrinsic::log10:
- MIRBuilder.buildInstr(TargetOpcode::G_FLOG10)
+ }
+ case Intrinsic::log10: {
+ auto Log10 = MIRBuilder.buildInstr(TargetOpcode::G_FLOG10)
.addDef(getOrCreateVReg(CI))
.addUse(getOrCreateVReg(*CI.getArgOperand(0)));
+ Log10->copyIRFlags(CI);
return true;
- case Intrinsic::fabs:
- MIRBuilder.buildInstr(TargetOpcode::G_FABS)
+ }
+ case Intrinsic::fabs: {
+ auto Fabs = MIRBuilder.buildInstr(TargetOpcode::G_FABS)
.addDef(getOrCreateVReg(CI))
.addUse(getOrCreateVReg(*CI.getArgOperand(0)));
+ Fabs->copyIRFlags(CI);
return true;
+ }
case Intrinsic::trunc:
MIRBuilder.buildInstr(TargetOpcode::G_INTRINSIC_TRUNC)
.addDef(getOrCreateVReg(CI))
.addDef(getOrCreateVReg(CI))
.addUse(getOrCreateVReg(*CI.getArgOperand(0)));
return true;
- case Intrinsic::fma:
- MIRBuilder.buildInstr(TargetOpcode::G_FMA)
+ case Intrinsic::fma: {
+ auto FMA = MIRBuilder.buildInstr(TargetOpcode::G_FMA)
.addDef(getOrCreateVReg(CI))
.addUse(getOrCreateVReg(*CI.getArgOperand(0)))
.addUse(getOrCreateVReg(*CI.getArgOperand(1)))
.addUse(getOrCreateVReg(*CI.getArgOperand(2)));
+ FMA->copyIRFlags(CI);
return true;
+ }
case Intrinsic::fmuladd: {
const TargetMachine &TM = MF->getTarget();
const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
TLI.isFMAFasterThanFMulAndFAdd(TLI.getValueType(*DL, CI.getType()))) {
// TODO: Revisit this to see if we should move this part of the
// lowering to the combiner.
- MIRBuilder.buildInstr(TargetOpcode::G_FMA, {Dst}, {Op0, Op1, Op2});
+ auto FMA = MIRBuilder.buildInstr(TargetOpcode::G_FMA, {Dst}, {Op0, Op1, Op2});
+ FMA->copyIRFlags(CI);
} else {
LLT Ty = getLLTForType(*CI.getType(), *DL);
auto FMul = MIRBuilder.buildInstr(TargetOpcode::G_FMUL, {Ty}, {Op0, Op1});
- MIRBuilder.buildInstr(TargetOpcode::G_FADD, {Dst}, {FMul, Op2});
+ FMul->copyIRFlags(CI);
+ auto FAdd = MIRBuilder.buildInstr(TargetOpcode::G_FADD, {Dst}, {FMul, Op2});
+ FAdd->copyIRFlags(CI);
}
return true;
}
; CHECK: [[BOOLADDR:%[0-9]+]]:_(p0) = COPY $x2
; CHECK: [[LHS:%[0-9]+]]:_(s32) = G_LOAD [[LHSADDR]](p0)
; CHECK: [[RHS:%[0-9]+]]:_(s32) = G_LOAD [[RHSADDR]](p0)
-; CHECK: [[TST:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[LHS]](s32), [[RHS]]
+; CHECK: [[TST:%[0-9]+]]:_(s1) = nnan ninf nsz arcp contract afn reassoc G_FCMP floatpred(oge), [[LHS]](s32), [[RHS]]
; CHECK: G_STORE [[TST]](s1), [[BOOLADDR]](p0)
define void @float_comparison(float* %a.addr, float* %b.addr, i1* %bool.addr) {
%a = load float, float* %a.addr
%b = load float, float* %b.addr
- %res = fcmp oge float %a, %b
+ %res = fcmp nnan ninf nsz arcp contract afn reassoc oge float %a, %b
store i1 %res, i1* %bool.addr
ret void
}
; CHECK-LABEL: name: test_pow_intrin
; CHECK: [[LHS:%[0-9]+]]:_(s32) = COPY $s0
; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY $s1
-; CHECK: [[RES:%[0-9]+]]:_(s32) = G_FPOW [[LHS]], [[RHS]]
+; CHECK: [[RES:%[0-9]+]]:_(s32) = nnan ninf nsz arcp contract afn reassoc G_FPOW [[LHS]], [[RHS]]
; CHECK: $s0 = COPY [[RES]]
- %res = call float @llvm.pow.f32(float %l, float %r)
+ %res = call nnan ninf nsz arcp contract afn reassoc float @llvm.pow.f32(float %l, float %r)
ret float %res
}
; CHECK: [[A:%[0-9]+]]:_(s32) = COPY $s0
; CHECK: [[B:%[0-9]+]]:_(s32) = COPY $s1
; CHECK: [[C:%[0-9]+]]:_(s32) = COPY $s2
-; CHECK: [[RES:%[0-9]+]]:_(s32) = G_FMA [[A]], [[B]], [[C]]
+; CHECK: [[RES:%[0-9]+]]:_(s32) = nnan ninf nsz arcp contract afn reassoc G_FMA [[A]], [[B]], [[C]]
; CHECK: $s0 = COPY [[RES]]
- %res = call float @llvm.fma.f32(float %a, float %b, float %c)
+ %res = call nnan ninf nsz arcp contract afn reassoc float @llvm.fma.f32(float %a, float %b, float %c)
ret float %res
}
define float @test_exp_intrin(float %a) {
; CHECK-LABEL: name: test_exp_intrin
; CHECK: [[A:%[0-9]+]]:_(s32) = COPY $s0
-; CHECK: [[RES:%[0-9]+]]:_(s32) = G_FEXP [[A]]
+; CHECK: [[RES:%[0-9]+]]:_(s32) = nnan ninf nsz arcp contract afn reassoc G_FEXP [[A]]
; CHECK: $s0 = COPY [[RES]]
- %res = call float @llvm.exp.f32(float %a)
+ %res = call nnan ninf nsz arcp contract afn reassoc float @llvm.exp.f32(float %a)
ret float %res
}
define float @test_exp2_intrin(float %a) {
; CHECK-LABEL: name: test_exp2_intrin
; CHECK: [[A:%[0-9]+]]:_(s32) = COPY $s0
-; CHECK: [[RES:%[0-9]+]]:_(s32) = G_FEXP2 [[A]]
+; CHECK: [[RES:%[0-9]+]]:_(s32) = nnan ninf nsz arcp contract afn reassoc G_FEXP2 [[A]]
; CHECK: $s0 = COPY [[RES]]
- %res = call float @llvm.exp2.f32(float %a)
+ %res = call nnan ninf nsz arcp contract afn reassoc float @llvm.exp2.f32(float %a)
ret float %res
}
define float @test_log_intrin(float %a) {
; CHECK-LABEL: name: test_log_intrin
; CHECK: [[A:%[0-9]+]]:_(s32) = COPY $s0
-; CHECK: [[RES:%[0-9]+]]:_(s32) = G_FLOG [[A]]
+; CHECK: [[RES:%[0-9]+]]:_(s32) = nnan ninf nsz arcp contract afn reassoc G_FLOG [[A]]
; CHECK: $s0 = COPY [[RES]]
- %res = call float @llvm.log.f32(float %a)
+ %res = call nnan ninf nsz arcp contract afn reassoc float @llvm.log.f32(float %a)
ret float %res
}
define float @test_log10_intrin(float %a) {
; CHECK-LABEL: name: test_log10_intrin
; CHECK: [[A:%[0-9]+]]:_(s32) = COPY $s0
-; CHECK: [[RES:%[0-9]+]]:_(s32) = G_FLOG10 [[A]]
+; CHECK: [[RES:%[0-9]+]]:_(s32) = nnan ninf nsz arcp contract afn reassoc G_FLOG10 [[A]]
; CHECK: $s0 = COPY [[RES]]
- %res = call float @llvm.log10.f32(float %a)
+ %res = call nnan ninf nsz arcp contract afn reassoc float @llvm.log10.f32(float %a)
ret float %res
}
define float @test_fabs_intrin(float %a) {
; CHECK-LABEL: name: test_fabs_intrin
; CHECK: [[A:%[0-9]+]]:_(s32) = COPY $s0
-; CHECK: [[RES:%[0-9]+]]:_(s32) = G_FABS [[A]]
+; CHECK: [[RES:%[0-9]+]]:_(s32) = nnan ninf nsz arcp contract afn reassoc G_FABS [[A]]
; CHECK: $s0 = COPY [[RES]]
- %res = call float @llvm.fabs.f32(float %a)
+ %res = call nnan ninf nsz arcp contract afn reassoc float @llvm.fabs.f32(float %a)
ret float %res
}