It'll usually be immediately legalized back to a libcall, but occasionally
something can be done with it so we'd just as well enable that flexibility from
the start.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@294530
91177308-0d34-0410-b5e6-
96231b3b80d8
let hasSideEffects = 0;
}
+// Floating point exponentiation.
+def G_FPOW : Instruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$src1, type0:$src2);
+ let hasSideEffects = 0;
+}
+
//------------------------------------------------------------------------------
// Memory ops
//------------------------------------------------------------------------------
/// Generic FP remainder.
HANDLE_TARGET_OPCODE(G_FREM)
+/// Generic FP exponentiation.
+HANDLE_TARGET_OPCODE(G_FPOW)
+
/// Generic float to signed-int conversion
HANDLE_TARGET_OPCODE(G_FPEXT)
return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder);
case Intrinsic::smul_with_overflow:
return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder);
+ case Intrinsic::pow:
+ MIRBuilder.buildInstr(TargetOpcode::G_FPOW)
+ .addDef(getOrCreateVReg(CI))
+ .addUse(getOrCreateVReg(*CI.getArgOperand(0)))
+ .addUse(getOrCreateVReg(*CI.getArgOperand(1)));
+ return true;
case Intrinsic::memcpy:
case Intrinsic::memmove:
case Intrinsic::memset:
call void @llvm.va_end(i8* %list)
ret void
}
+
+declare float @llvm.pow.f32(float, float)
+define float @test_pow_intrin(float %l, float %r) {
+; CHECK-LABEL: name: test_pow_intrin
+; CHECK: [[LHS:%[0-9]+]](s32) = COPY %s0
+; CHECK: [[RHS:%[0-9]+]](s32) = COPY %s1
+; CHECK: [[RES:%[0-9]+]](s32) = G_FPOW [[LHS]], [[RHS]]
+; CHECK: %s0 = COPY [[RES]]
+ %res = call float @llvm.pow.f32(float %l, float %r)
+ ret float %res
+}