LegalizeResult lowerU64ToF32BitOps(MachineInstr &MI);
LegalizeResult lowerUITOFP(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
LegalizeResult lowerSITOFP(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
+ LegalizeResult lowerFPTOUI(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
LegalizeResult lowerMinMax(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
LegalizeResult lowerFCopySign(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
LegalizeResult lowerFMinNumMaxNum(MachineInstr &MI);
return lowerUITOFP(MI, TypeIdx, Ty);
case G_SITOFP:
return lowerSITOFP(MI, TypeIdx, Ty);
+ case G_FPTOUI:
+ return lowerFPTOUI(MI, TypeIdx, Ty);
case G_SMIN:
case G_SMAX:
case G_UMIN:
return UnableToLegalize;
}
+LegalizerHelper::LegalizeResult
+LegalizerHelper::lowerFPTOUI(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
+ Register Dst = MI.getOperand(0).getReg();
+ Register Src = MI.getOperand(1).getReg();
+ LLT DstTy = MRI.getType(Dst);
+ LLT SrcTy = MRI.getType(Src);
+ const LLT S64 = LLT::scalar(64);
+ const LLT S32 = LLT::scalar(32);
+
+ if (SrcTy != S64 && SrcTy != S32)
+ return UnableToLegalize;
+ if (DstTy != S32 && DstTy != S64)
+ return UnableToLegalize;
+
+ // FPTOSI gives same result as FPTOUI for positive signed integers.
+ // FPTOUI needs to deal with fp values that convert to unsigned integers
+ // greater or equal to 2^31 for float or 2^63 for double. For brevity 2^Exp.
+
+ APInt TwoPExpInt = APInt::getSignMask(DstTy.getSizeInBits());
+ APFloat TwoPExpFP(SrcTy.getSizeInBits() == 32 ? APFloat::IEEEsingle()
+ : APFloat::IEEEdouble(),
+ APInt::getNullValue(SrcTy.getSizeInBits()));
+ TwoPExpFP.convertFromAPInt(TwoPExpInt, false, APFloat::rmNearestTiesToEven);
+
+ MachineInstrBuilder FPTOSI = MIRBuilder.buildFPTOSI(DstTy, Src);
+
+ MachineInstrBuilder Threshold = MIRBuilder.buildFConstant(SrcTy, TwoPExpFP);
+ // For fp Value greater or equal to Threshold(2^Exp), we use FPTOSI on
+ // (Value - 2^Exp) and add 2^Exp by setting highest bit in result to 1.
+ MachineInstrBuilder FSub = MIRBuilder.buildFSub(SrcTy, Src, Threshold);
+ MachineInstrBuilder ResLowBits = MIRBuilder.buildFPTOSI(DstTy, FSub);
+ MachineInstrBuilder ResHighBit = MIRBuilder.buildConstant(DstTy, TwoPExpInt);
+ MachineInstrBuilder Res = MIRBuilder.buildXor(DstTy, ResLowBits, ResHighBit);
+
+ MachineInstrBuilder FCMP =
+ MIRBuilder.buildFCmp(CmpInst::FCMP_ULT, DstTy, Src, Threshold);
+ MIRBuilder.buildSelect(Dst, FCMP, FPTOSI, Res);
+
+ MI.eraseFromParent();
+ return Legalized;
+}
+
static CmpInst::Predicate minMaxToCompare(unsigned Opc) {
switch (Opc) {
case TargetOpcode::G_SMIN:
getActionDefinitionsBuilder(G_FPTOUI)
.libcallForCartesianProduct({s64}, {s64, s32})
+ .lowerForCartesianProduct({s32}, {s64, s32})
.minScalar(0, s32);
// Int to FP conversion instructions
define void @f64toi16() {entry: ret void}
define void @f64toi8() {entry: ret void}
define void @f32tou64() {entry: ret void}
+ define void @f32tou32() {entry: ret void}
+ define void @f32tou16() {entry: ret void}
+ define void @f32tou8() {entry: ret void}
define void @f64tou64() {entry: ret void}
+ define void @f64tou32() {entry: ret void}
+ define void @f64tou16() {entry: ret void}
+ define void @f64tou8() {entry: ret void}
...
---
$v1 = COPY %3(s32)
RetRA implicit $v0, implicit $v1
+...
+---
+name: f32tou32
+alignment: 2
+tracksRegLiveness: true
+body: |
+ bb.1.entry:
+ liveins: $f12
+
+ ; FP32-LABEL: name: f32tou32
+ ; FP32: liveins: $f12
+ ; FP32: [[COPY:%[0-9]+]]:_(s32) = COPY $f12
+ ; FP32: [[FPTOSI:%[0-9]+]]:_(s32) = G_FPTOSI [[COPY]](s32)
+ ; FP32: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0x41E0000000000000
+ ; FP32: [[FSUB:%[0-9]+]]:_(s32) = G_FSUB [[COPY]], [[C]]
+ ; FP32: [[FPTOSI1:%[0-9]+]]:_(s32) = G_FPTOSI [[FSUB]](s32)
+ ; FP32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
+ ; FP32: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[FPTOSI1]], [[C1]]
+ ; FP32: [[FCMP:%[0-9]+]]:_(s32) = G_FCMP floatpred(ult), [[COPY]](s32), [[C]]
+ ; FP32: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[FCMP]](s32), [[FPTOSI]], [[XOR]]
+ ; FP32: $v0 = COPY [[SELECT]](s32)
+ ; FP32: RetRA implicit $v0
+ ; FP64-LABEL: name: f32tou32
+ ; FP64: liveins: $f12
+ ; FP64: [[COPY:%[0-9]+]]:_(s32) = COPY $f12
+ ; FP64: [[FPTOSI:%[0-9]+]]:_(s32) = G_FPTOSI [[COPY]](s32)
+ ; FP64: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0x41E0000000000000
+ ; FP64: [[FSUB:%[0-9]+]]:_(s32) = G_FSUB [[COPY]], [[C]]
+ ; FP64: [[FPTOSI1:%[0-9]+]]:_(s32) = G_FPTOSI [[FSUB]](s32)
+ ; FP64: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
+ ; FP64: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[FPTOSI1]], [[C1]]
+ ; FP64: [[FCMP:%[0-9]+]]:_(s32) = G_FCMP floatpred(ult), [[COPY]](s32), [[C]]
+ ; FP64: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[FCMP]](s32), [[FPTOSI]], [[XOR]]
+ ; FP64: $v0 = COPY [[SELECT]](s32)
+ ; FP64: RetRA implicit $v0
+ %0:_(s32) = COPY $f12
+ %1:_(s32) = G_FPTOUI %0(s32)
+ $v0 = COPY %1(s32)
+ RetRA implicit $v0
+
+...
+---
+name: f32tou16
+alignment: 2
+tracksRegLiveness: true
+body: |
+ bb.1.entry:
+ liveins: $f12
+
+ ; FP32-LABEL: name: f32tou16
+ ; FP32: liveins: $f12
+ ; FP32: [[COPY:%[0-9]+]]:_(s32) = COPY $f12
+ ; FP32: [[FPTOSI:%[0-9]+]]:_(s32) = G_FPTOSI [[COPY]](s32)
+ ; FP32: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0x41E0000000000000
+ ; FP32: [[FSUB:%[0-9]+]]:_(s32) = G_FSUB [[COPY]], [[C]]
+ ; FP32: [[FPTOSI1:%[0-9]+]]:_(s32) = G_FPTOSI [[FSUB]](s32)
+ ; FP32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
+ ; FP32: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[FPTOSI1]], [[C1]]
+ ; FP32: [[FCMP:%[0-9]+]]:_(s32) = G_FCMP floatpred(ult), [[COPY]](s32), [[C]]
+ ; FP32: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[FCMP]](s32), [[FPTOSI]], [[XOR]]
+ ; FP32: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
+ ; FP32: [[COPY1:%[0-9]+]]:_(s32) = COPY [[SELECT]](s32)
+ ; FP32: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C2]]
+ ; FP32: $v0 = COPY [[AND]](s32)
+ ; FP32: RetRA implicit $v0
+ ; FP64-LABEL: name: f32tou16
+ ; FP64: liveins: $f12
+ ; FP64: [[COPY:%[0-9]+]]:_(s32) = COPY $f12
+ ; FP64: [[FPTOSI:%[0-9]+]]:_(s32) = G_FPTOSI [[COPY]](s32)
+ ; FP64: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0x41E0000000000000
+ ; FP64: [[FSUB:%[0-9]+]]:_(s32) = G_FSUB [[COPY]], [[C]]
+ ; FP64: [[FPTOSI1:%[0-9]+]]:_(s32) = G_FPTOSI [[FSUB]](s32)
+ ; FP64: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
+ ; FP64: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[FPTOSI1]], [[C1]]
+ ; FP64: [[FCMP:%[0-9]+]]:_(s32) = G_FCMP floatpred(ult), [[COPY]](s32), [[C]]
+ ; FP64: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[FCMP]](s32), [[FPTOSI]], [[XOR]]
+ ; FP64: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
+ ; FP64: [[COPY1:%[0-9]+]]:_(s32) = COPY [[SELECT]](s32)
+ ; FP64: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C2]]
+ ; FP64: $v0 = COPY [[AND]](s32)
+ ; FP64: RetRA implicit $v0
+ %0:_(s32) = COPY $f12
+ %1:_(s16) = G_FPTOUI %0(s32)
+ %2:_(s32) = G_ZEXT %1(s16)
+ $v0 = COPY %2(s32)
+ RetRA implicit $v0
+
+...
+---
+name: f32tou8
+alignment: 2
+tracksRegLiveness: true
+body: |
+ bb.1.entry:
+ liveins: $f12
+
+ ; FP32-LABEL: name: f32tou8
+ ; FP32: liveins: $f12
+ ; FP32: [[COPY:%[0-9]+]]:_(s32) = COPY $f12
+ ; FP32: [[FPTOSI:%[0-9]+]]:_(s32) = G_FPTOSI [[COPY]](s32)
+ ; FP32: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0x41E0000000000000
+ ; FP32: [[FSUB:%[0-9]+]]:_(s32) = G_FSUB [[COPY]], [[C]]
+ ; FP32: [[FPTOSI1:%[0-9]+]]:_(s32) = G_FPTOSI [[FSUB]](s32)
+ ; FP32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
+ ; FP32: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[FPTOSI1]], [[C1]]
+ ; FP32: [[FCMP:%[0-9]+]]:_(s32) = G_FCMP floatpred(ult), [[COPY]](s32), [[C]]
+ ; FP32: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[FCMP]](s32), [[FPTOSI]], [[XOR]]
+ ; FP32: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
+ ; FP32: [[COPY1:%[0-9]+]]:_(s32) = COPY [[SELECT]](s32)
+ ; FP32: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C2]]
+ ; FP32: $v0 = COPY [[AND]](s32)
+ ; FP32: RetRA implicit $v0
+ ; FP64-LABEL: name: f32tou8
+ ; FP64: liveins: $f12
+ ; FP64: [[COPY:%[0-9]+]]:_(s32) = COPY $f12
+ ; FP64: [[FPTOSI:%[0-9]+]]:_(s32) = G_FPTOSI [[COPY]](s32)
+ ; FP64: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0x41E0000000000000
+ ; FP64: [[FSUB:%[0-9]+]]:_(s32) = G_FSUB [[COPY]], [[C]]
+ ; FP64: [[FPTOSI1:%[0-9]+]]:_(s32) = G_FPTOSI [[FSUB]](s32)
+ ; FP64: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
+ ; FP64: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[FPTOSI1]], [[C1]]
+ ; FP64: [[FCMP:%[0-9]+]]:_(s32) = G_FCMP floatpred(ult), [[COPY]](s32), [[C]]
+ ; FP64: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[FCMP]](s32), [[FPTOSI]], [[XOR]]
+ ; FP64: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
+ ; FP64: [[COPY1:%[0-9]+]]:_(s32) = COPY [[SELECT]](s32)
+ ; FP64: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C2]]
+ ; FP64: $v0 = COPY [[AND]](s32)
+ ; FP64: RetRA implicit $v0
+ %0:_(s32) = COPY $f12
+ %1:_(s8) = G_FPTOUI %0(s32)
+ %2:_(s32) = G_ZEXT %1(s8)
+ $v0 = COPY %2(s32)
+ RetRA implicit $v0
+
...
---
name: f64tou64
RetRA implicit $v0, implicit $v1
...
+---
+name: f64tou32
+alignment: 2
+tracksRegLiveness: true
+body: |
+ bb.1.entry:
+ liveins: $d6
+
+ ; FP32-LABEL: name: f64tou32
+ ; FP32: liveins: $d6
+ ; FP32: [[COPY:%[0-9]+]]:_(s64) = COPY $d6
+ ; FP32: [[FPTOSI:%[0-9]+]]:_(s32) = G_FPTOSI [[COPY]](s64)
+ ; FP32: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 0x41E0000000000000
+ ; FP32: [[FSUB:%[0-9]+]]:_(s64) = G_FSUB [[COPY]], [[C]]
+ ; FP32: [[FPTOSI1:%[0-9]+]]:_(s32) = G_FPTOSI [[FSUB]](s64)
+ ; FP32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
+ ; FP32: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[FPTOSI1]], [[C1]]
+ ; FP32: [[FCMP:%[0-9]+]]:_(s32) = G_FCMP floatpred(ult), [[COPY]](s64), [[C]]
+ ; FP32: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[FCMP]](s32), [[FPTOSI]], [[XOR]]
+ ; FP32: $v0 = COPY [[SELECT]](s32)
+ ; FP32: RetRA implicit $v0
+ ; FP64-LABEL: name: f64tou32
+ ; FP64: liveins: $d6
+ ; FP64: [[COPY:%[0-9]+]]:_(s64) = COPY $d6
+ ; FP64: [[FPTOSI:%[0-9]+]]:_(s32) = G_FPTOSI [[COPY]](s64)
+ ; FP64: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 0x41E0000000000000
+ ; FP64: [[FSUB:%[0-9]+]]:_(s64) = G_FSUB [[COPY]], [[C]]
+ ; FP64: [[FPTOSI1:%[0-9]+]]:_(s32) = G_FPTOSI [[FSUB]](s64)
+ ; FP64: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
+ ; FP64: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[FPTOSI1]], [[C1]]
+ ; FP64: [[FCMP:%[0-9]+]]:_(s32) = G_FCMP floatpred(ult), [[COPY]](s64), [[C]]
+ ; FP64: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[FCMP]](s32), [[FPTOSI]], [[XOR]]
+ ; FP64: $v0 = COPY [[SELECT]](s32)
+ ; FP64: RetRA implicit $v0
+ %0:_(s64) = COPY $d6
+ %1:_(s32) = G_FPTOUI %0(s64)
+ $v0 = COPY %1(s32)
+ RetRA implicit $v0
+
+...
+---
+name: f64tou16
+alignment: 2
+tracksRegLiveness: true
+body: |
+ bb.1.entry:
+ liveins: $d6
+
+ ; FP32-LABEL: name: f64tou16
+ ; FP32: liveins: $d6
+ ; FP32: [[COPY:%[0-9]+]]:_(s64) = COPY $d6
+ ; FP32: [[FPTOSI:%[0-9]+]]:_(s32) = G_FPTOSI [[COPY]](s64)
+ ; FP32: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 0x41E0000000000000
+ ; FP32: [[FSUB:%[0-9]+]]:_(s64) = G_FSUB [[COPY]], [[C]]
+ ; FP32: [[FPTOSI1:%[0-9]+]]:_(s32) = G_FPTOSI [[FSUB]](s64)
+ ; FP32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
+ ; FP32: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[FPTOSI1]], [[C1]]
+ ; FP32: [[FCMP:%[0-9]+]]:_(s32) = G_FCMP floatpred(ult), [[COPY]](s64), [[C]]
+ ; FP32: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[FCMP]](s32), [[FPTOSI]], [[XOR]]
+ ; FP32: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
+ ; FP32: [[COPY1:%[0-9]+]]:_(s32) = COPY [[SELECT]](s32)
+ ; FP32: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C2]]
+ ; FP32: $v0 = COPY [[AND]](s32)
+ ; FP32: RetRA implicit $v0
+ ; FP64-LABEL: name: f64tou16
+ ; FP64: liveins: $d6
+ ; FP64: [[COPY:%[0-9]+]]:_(s64) = COPY $d6
+ ; FP64: [[FPTOSI:%[0-9]+]]:_(s32) = G_FPTOSI [[COPY]](s64)
+ ; FP64: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 0x41E0000000000000
+ ; FP64: [[FSUB:%[0-9]+]]:_(s64) = G_FSUB [[COPY]], [[C]]
+ ; FP64: [[FPTOSI1:%[0-9]+]]:_(s32) = G_FPTOSI [[FSUB]](s64)
+ ; FP64: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
+ ; FP64: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[FPTOSI1]], [[C1]]
+ ; FP64: [[FCMP:%[0-9]+]]:_(s32) = G_FCMP floatpred(ult), [[COPY]](s64), [[C]]
+ ; FP64: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[FCMP]](s32), [[FPTOSI]], [[XOR]]
+ ; FP64: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
+ ; FP64: [[COPY1:%[0-9]+]]:_(s32) = COPY [[SELECT]](s32)
+ ; FP64: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C2]]
+ ; FP64: $v0 = COPY [[AND]](s32)
+ ; FP64: RetRA implicit $v0
+ %0:_(s64) = COPY $d6
+ %1:_(s16) = G_FPTOUI %0(s64)
+ %2:_(s32) = G_ZEXT %1(s16)
+ $v0 = COPY %2(s32)
+ RetRA implicit $v0
+
+...
+---
+name: f64tou8
+alignment: 2
+tracksRegLiveness: true
+body: |
+ bb.1.entry:
+ liveins: $d6
+
+ ; FP32-LABEL: name: f64tou8
+ ; FP32: liveins: $d6
+ ; FP32: [[COPY:%[0-9]+]]:_(s64) = COPY $d6
+ ; FP32: [[FPTOSI:%[0-9]+]]:_(s32) = G_FPTOSI [[COPY]](s64)
+ ; FP32: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 0x41E0000000000000
+ ; FP32: [[FSUB:%[0-9]+]]:_(s64) = G_FSUB [[COPY]], [[C]]
+ ; FP32: [[FPTOSI1:%[0-9]+]]:_(s32) = G_FPTOSI [[FSUB]](s64)
+ ; FP32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
+ ; FP32: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[FPTOSI1]], [[C1]]
+ ; FP32: [[FCMP:%[0-9]+]]:_(s32) = G_FCMP floatpred(ult), [[COPY]](s64), [[C]]
+ ; FP32: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[FCMP]](s32), [[FPTOSI]], [[XOR]]
+ ; FP32: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
+ ; FP32: [[COPY1:%[0-9]+]]:_(s32) = COPY [[SELECT]](s32)
+ ; FP32: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C2]]
+ ; FP32: $v0 = COPY [[AND]](s32)
+ ; FP32: RetRA implicit $v0
+ ; FP64-LABEL: name: f64tou8
+ ; FP64: liveins: $d6
+ ; FP64: [[COPY:%[0-9]+]]:_(s64) = COPY $d6
+ ; FP64: [[FPTOSI:%[0-9]+]]:_(s32) = G_FPTOSI [[COPY]](s64)
+ ; FP64: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 0x41E0000000000000
+ ; FP64: [[FSUB:%[0-9]+]]:_(s64) = G_FSUB [[COPY]], [[C]]
+ ; FP64: [[FPTOSI1:%[0-9]+]]:_(s32) = G_FPTOSI [[FSUB]](s64)
+ ; FP64: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
+ ; FP64: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[FPTOSI1]], [[C1]]
+ ; FP64: [[FCMP:%[0-9]+]]:_(s32) = G_FCMP floatpred(ult), [[COPY]](s64), [[C]]
+ ; FP64: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[FCMP]](s32), [[FPTOSI]], [[XOR]]
+ ; FP64: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
+ ; FP64: [[COPY1:%[0-9]+]]:_(s32) = COPY [[SELECT]](s32)
+ ; FP64: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C2]]
+ ; FP64: $v0 = COPY [[AND]](s32)
+ ; FP64: RetRA implicit $v0
+ %0:_(s64) = COPY $d6
+ %1:_(s8) = G_FPTOUI %0(s64)
+ %2:_(s32) = G_ZEXT %1(s8)
+ $v0 = COPY %2(s32)
+ RetRA implicit $v0
+
+...
ret i64 %conv
}
+define i32 @f32tou32(float %a) {
+; MIPS32-LABEL: f32tou32:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: trunc.w.s $f0, $f12
+; MIPS32-NEXT: mfc1 $1, $f0
+; MIPS32-NEXT: lui $2, 20224
+; MIPS32-NEXT: mtc1 $2, $f0
+; MIPS32-NEXT: sub.s $f1, $f12, $f0
+; MIPS32-NEXT: trunc.w.s $f1, $f1
+; MIPS32-NEXT: mfc1 $2, $f1
+; MIPS32-NEXT: lui $3, 32768
+; MIPS32-NEXT: xor $2, $2, $3
+; MIPS32-NEXT: addiu $3, $zero, 1
+; MIPS32-NEXT: c.ult.s $f12, $f0
+; MIPS32-NEXT: movf $3, $zero, $fcc0
+; MIPS32-NEXT: movn $2, $1, $3
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: nop
+entry:
+ %conv = fptoui float %a to i32
+ ret i32 %conv
+}
+
+define zeroext i16 @f32tou16(float %a) {
+; MIPS32-LABEL: f32tou16:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: trunc.w.s $f0, $f12
+; MIPS32-NEXT: mfc1 $1, $f0
+; MIPS32-NEXT: lui $2, 20224
+; MIPS32-NEXT: mtc1 $2, $f0
+; MIPS32-NEXT: sub.s $f1, $f12, $f0
+; MIPS32-NEXT: trunc.w.s $f1, $f1
+; MIPS32-NEXT: mfc1 $2, $f1
+; MIPS32-NEXT: lui $3, 32768
+; MIPS32-NEXT: xor $2, $2, $3
+; MIPS32-NEXT: addiu $3, $zero, 1
+; MIPS32-NEXT: c.ult.s $f12, $f0
+; MIPS32-NEXT: movf $3, $zero, $fcc0
+; MIPS32-NEXT: movn $2, $1, $3
+; MIPS32-NEXT: ori $1, $zero, 65535
+; MIPS32-NEXT: and $2, $2, $1
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: nop
+entry:
+ %conv = fptoui float %a to i16
+ ret i16 %conv
+}
+
+define zeroext i8 @f32tou8(float %a) {
+; MIPS32-LABEL: f32tou8:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: trunc.w.s $f0, $f12
+; MIPS32-NEXT: mfc1 $1, $f0
+; MIPS32-NEXT: lui $2, 20224
+; MIPS32-NEXT: mtc1 $2, $f0
+; MIPS32-NEXT: sub.s $f1, $f12, $f0
+; MIPS32-NEXT: trunc.w.s $f1, $f1
+; MIPS32-NEXT: mfc1 $2, $f1
+; MIPS32-NEXT: lui $3, 32768
+; MIPS32-NEXT: xor $2, $2, $3
+; MIPS32-NEXT: addiu $3, $zero, 1
+; MIPS32-NEXT: c.ult.s $f12, $f0
+; MIPS32-NEXT: movf $3, $zero, $fcc0
+; MIPS32-NEXT: movn $2, $1, $3
+; MIPS32-NEXT: ori $1, $zero, 255
+; MIPS32-NEXT: and $2, $2, $1
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: nop
+entry:
+ %conv = fptoui float %a to i8
+ ret i8 %conv
+}
+
define i64 @f64tou64(double %a) {
; MIPS32-LABEL: f64tou64:
; MIPS32: # %bb.0: # %entry
%conv = fptoui double %a to i64
ret i64 %conv
}
+
+define i32 @f64tou32(double %a) {
+; FP32-LABEL: f64tou32:
+; FP32: # %bb.0: # %entry
+; FP32-NEXT: trunc.w.d $f0, $f12
+; FP32-NEXT: mfc1 $1, $f0
+; FP32-NEXT: lui $2, 16864
+; FP32-NEXT: ori $3, $zero, 0
+; FP32-NEXT: mtc1 $3, $f2
+; FP32-NEXT: mtc1 $2, $f3
+; FP32-NEXT: sub.d $f4, $f12, $f2
+; FP32-NEXT: trunc.w.d $f0, $f4
+; FP32-NEXT: mfc1 $2, $f0
+; FP32-NEXT: lui $3, 32768
+; FP32-NEXT: xor $2, $2, $3
+; FP32-NEXT: addiu $3, $zero, 1
+; FP32-NEXT: c.ult.d $f12, $f2
+; FP32-NEXT: movf $3, $zero, $fcc0
+; FP32-NEXT: movn $2, $1, $3
+; FP32-NEXT: jr $ra
+; FP32-NEXT: nop
+;
+; FP64-LABEL: f64tou32:
+; FP64: # %bb.0: # %entry
+; FP64-NEXT: trunc.w.d $f0, $f12
+; FP64-NEXT: mfc1 $1, $f0
+; FP64-NEXT: lui $2, 16864
+; FP64-NEXT: ori $3, $zero, 0
+; FP64-NEXT: mtc1 $3, $f1
+; FP64-NEXT: mthc1 $2, $f1
+; FP64-NEXT: sub.d $f2, $f12, $f1
+; FP64-NEXT: trunc.w.d $f0, $f2
+; FP64-NEXT: mfc1 $2, $f0
+; FP64-NEXT: lui $3, 32768
+; FP64-NEXT: xor $2, $2, $3
+; FP64-NEXT: addiu $3, $zero, 1
+; FP64-NEXT: c.ult.d $f12, $f1
+; FP64-NEXT: movf $3, $zero, $fcc0
+; FP64-NEXT: movn $2, $1, $3
+; FP64-NEXT: jr $ra
+; FP64-NEXT: nop
+entry:
+ %conv = fptoui double %a to i32
+ ret i32 %conv
+}
+
+define zeroext i16 @f64tou16(double %a) {
+; FP32-LABEL: f64tou16:
+; FP32: # %bb.0: # %entry
+; FP32-NEXT: trunc.w.d $f0, $f12
+; FP32-NEXT: mfc1 $1, $f0
+; FP32-NEXT: lui $2, 16864
+; FP32-NEXT: ori $3, $zero, 0
+; FP32-NEXT: mtc1 $3, $f2
+; FP32-NEXT: mtc1 $2, $f3
+; FP32-NEXT: sub.d $f4, $f12, $f2
+; FP32-NEXT: trunc.w.d $f0, $f4
+; FP32-NEXT: mfc1 $2, $f0
+; FP32-NEXT: lui $3, 32768
+; FP32-NEXT: xor $2, $2, $3
+; FP32-NEXT: addiu $3, $zero, 1
+; FP32-NEXT: c.ult.d $f12, $f2
+; FP32-NEXT: movf $3, $zero, $fcc0
+; FP32-NEXT: movn $2, $1, $3
+; FP32-NEXT: ori $1, $zero, 65535
+; FP32-NEXT: and $2, $2, $1
+; FP32-NEXT: jr $ra
+; FP32-NEXT: nop
+;
+; FP64-LABEL: f64tou16:
+; FP64: # %bb.0: # %entry
+; FP64-NEXT: trunc.w.d $f0, $f12
+; FP64-NEXT: mfc1 $1, $f0
+; FP64-NEXT: lui $2, 16864
+; FP64-NEXT: ori $3, $zero, 0
+; FP64-NEXT: mtc1 $3, $f1
+; FP64-NEXT: mthc1 $2, $f1
+; FP64-NEXT: sub.d $f2, $f12, $f1
+; FP64-NEXT: trunc.w.d $f0, $f2
+; FP64-NEXT: mfc1 $2, $f0
+; FP64-NEXT: lui $3, 32768
+; FP64-NEXT: xor $2, $2, $3
+; FP64-NEXT: addiu $3, $zero, 1
+; FP64-NEXT: c.ult.d $f12, $f1
+; FP64-NEXT: movf $3, $zero, $fcc0
+; FP64-NEXT: movn $2, $1, $3
+; FP64-NEXT: ori $1, $zero, 65535
+; FP64-NEXT: and $2, $2, $1
+; FP64-NEXT: jr $ra
+; FP64-NEXT: nop
+entry:
+ %conv = fptoui double %a to i16
+ ret i16 %conv
+}
+
+define zeroext i8 @f64tou8(double %a) {
+; FP32-LABEL: f64tou8:
+; FP32: # %bb.0: # %entry
+; FP32-NEXT: trunc.w.d $f0, $f12
+; FP32-NEXT: mfc1 $1, $f0
+; FP32-NEXT: lui $2, 16864
+; FP32-NEXT: ori $3, $zero, 0
+; FP32-NEXT: mtc1 $3, $f2
+; FP32-NEXT: mtc1 $2, $f3
+; FP32-NEXT: sub.d $f4, $f12, $f2
+; FP32-NEXT: trunc.w.d $f0, $f4
+; FP32-NEXT: mfc1 $2, $f0
+; FP32-NEXT: lui $3, 32768
+; FP32-NEXT: xor $2, $2, $3
+; FP32-NEXT: addiu $3, $zero, 1
+; FP32-NEXT: c.ult.d $f12, $f2
+; FP32-NEXT: movf $3, $zero, $fcc0
+; FP32-NEXT: movn $2, $1, $3
+; FP32-NEXT: ori $1, $zero, 255
+; FP32-NEXT: and $2, $2, $1
+; FP32-NEXT: jr $ra
+; FP32-NEXT: nop
+;
+; FP64-LABEL: f64tou8:
+; FP64: # %bb.0: # %entry
+; FP64-NEXT: trunc.w.d $f0, $f12
+; FP64-NEXT: mfc1 $1, $f0
+; FP64-NEXT: lui $2, 16864
+; FP64-NEXT: ori $3, $zero, 0
+; FP64-NEXT: mtc1 $3, $f1
+; FP64-NEXT: mthc1 $2, $f1
+; FP64-NEXT: sub.d $f2, $f12, $f1
+; FP64-NEXT: trunc.w.d $f0, $f2
+; FP64-NEXT: mfc1 $2, $f0
+; FP64-NEXT: lui $3, 32768
+; FP64-NEXT: xor $2, $2, $3
+; FP64-NEXT: addiu $3, $zero, 1
+; FP64-NEXT: c.ult.d $f12, $f1
+; FP64-NEXT: movf $3, $zero, $fcc0
+; FP64-NEXT: movn $2, $1, $3
+; FP64-NEXT: ori $1, $zero, 255
+; FP64-NEXT: and $2, $2, $1
+; FP64-NEXT: jr $ra
+; FP64-NEXT: nop
+entry:
+ %conv = fptoui double %a to i8
+ ret i8 %conv
+}