}
}
+bool SITargetLowering::hasBitPreservingFPLogic(EVT VT) const {
+ return isTypeLegal(VT.getScalarType());
+}
+
bool SITargetLowering::enableAggressiveFMAFusion(EVT VT) const {
// This currently forces unfolding various combinations of fsub into fma with
// free fneg'd operands. As long as we have fast FMA (controlled by
MachineBasicBlock *
EmitInstrWithCustomInserter(MachineInstr &MI,
MachineBasicBlock *BB) const override;
+
+ bool hasBitPreservingFPLogic(EVT VT) const override;
bool enableAggressiveFMAFusion(EVT VT) const override;
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
EVT VT) const override;
ret void
}
-; GCN-LABEL: {{^}}fabs_fold:
+; FUNC-LABEL: {{^}}fabs_fold:
; SI: s_load_dword [[ABS_VALUE:s[0-9]+]], s[{{[0-9]+:[0-9]+}}], 0xb
; VI: s_load_dword [[ABS_VALUE:s[0-9]+]], s[{{[0-9]+:[0-9]+}}], 0x2c
; GCN-NOT: and
ret void
}
+; Make sure we turn some integer operations back into fabs
+; FUNC-LABEL: {{^}}bitpreserve_fabs_f32:
+; GCN: v_add_f32_e64 v{{[0-9]+}}, |s{{[0-9]+}}|, 1.0
+define amdgpu_kernel void @bitpreserve_fabs_f32(float addrspace(1)* %out, float %in) {
+ %in.bc = bitcast float %in to i32
+ %int.abs = and i32 %in.bc, 2147483647
+ %bc = bitcast i32 %int.abs to float
+ %fadd = fadd float %bc, 1.0
+ store float %fadd, float addrspace(1)* %out
+ ret void
+}
+
declare float @fabs(float) readnone
declare float @llvm.fabs.f32(float) readnone
declare <2 x float> @llvm.fabs.v2f32(<2 x float>) readnone
store float %fmul, float addrspace(1)* %out
ret void
}
+
+; Make sure we turn some integer operations back into fabs
+; FUNC-LABEL: {{^}}bitpreserve_fneg_f32:
+; GCN: v_mul_f32_e64 v{{[0-9]+}}, s{{[0-9]+}}, -4.0
+define amdgpu_kernel void @bitpreserve_fneg_f32(float addrspace(1)* %out, float %in) {
+ %in.bc = bitcast float %in to i32
+ %int.abs = xor i32 %in.bc, 2147483648
+ %bc = bitcast i32 %int.abs to float
+ %fadd = fmul float %bc, 4.0
+ store float %fadd, float addrspace(1)* %out
+ ret void
+}