From: Stanislav Mekhanoshin Date: Wed, 25 Sep 2019 18:50:34 +0000 (+0000) Subject: [AMDGPU] Improve fma.f64 test. NFC. X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=336dc6d0e831674e239c2e97cff3565bda24b3bf;p=llvm [AMDGPU] Improve fma.f64 test. NFC. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@372908 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/test/CodeGen/AMDGPU/fma.f64.ll b/test/CodeGen/AMDGPU/fma.f64.ll index 907121f1cd4..4bc8f5ec463 100644 --- a/test/CodeGen/AMDGPU/fma.f64.ll +++ b/test/CodeGen/AMDGPU/fma.f64.ll @@ -4,7 +4,7 @@ declare double @llvm.fma.f64(double, double, double) nounwind readnone declare <2 x double> @llvm.fma.v2f64(<2 x double>, <2 x double>, <2 x double>) nounwind readnone declare <4 x double> @llvm.fma.v4f64(<4 x double>, <4 x double>, <4 x double>) nounwind readnone - +declare double @llvm.fabs.f64(double) nounwind readnone ; FUNC-LABEL: {{^}}fma_f64: ; SI: v_fma_f64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\]}} @@ -45,3 +45,156 @@ define amdgpu_kernel void @fma_v4f64(<4 x double> addrspace(1)* %out, <4 x doubl store <4 x double> %r3, <4 x double> addrspace(1)* %out ret void } + +; FUNC-LABEL: {{^}}fma_f64_abs_src0: +; SI: v_fma_f64 {{v\[[0-9]+:[0-9]+\], |v\[[0-9]+:[0-9]+\]|, v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\]}} +define amdgpu_kernel void @fma_f64_abs_src0(double addrspace(1)* %out, double addrspace(1)* %in1, + double addrspace(1)* %in2, double addrspace(1)* %in3) { + %r0 = load double, double addrspace(1)* %in1 + %r1 = load double, double addrspace(1)* %in2 + %r2 = load double, double addrspace(1)* %in3 + %fabs = call double @llvm.fabs.f64(double %r0) + %r3 = tail call double @llvm.fma.f64(double %fabs, double %r1, double %r2) + store double %r3, double addrspace(1)* %out + ret void +} + +; FUNC-LABEL: {{^}}fma_f64_abs_src1: +; SI: v_fma_f64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], \|v\[[0-9]+:[0-9]+\]\|, v\[[0-9]+:[0-9]+\]}} +define amdgpu_kernel void @fma_f64_abs_src1(double addrspace(1)* %out, double addrspace(1)* %in1, + double addrspace(1)* %in2, double addrspace(1)* %in3) { + %r0 = load double, double addrspace(1)* %in1 + %r1 = load double, double addrspace(1)* %in2 + %r2 = load double, double addrspace(1)* %in3 + %fabs = call double @llvm.fabs.f64(double %r1) + %r3 = tail call double @llvm.fma.f64(double %r0, double %fabs, double %r2) + store double %r3, double addrspace(1)* %out + ret void +} + +; FUNC-LABEL: {{^}}fma_f64_abs_src2: +; SI: v_fma_f64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], \|v\[[0-9]+:[0-9]+\]\|}} +define amdgpu_kernel void @fma_f64_abs_src2(double addrspace(1)* %out, double addrspace(1)* %in1, + double addrspace(1)* %in2, double addrspace(1)* %in3) { + %r0 = load double, double addrspace(1)* %in1 + %r1 = load double, double addrspace(1)* %in2 + %r2 = load double, double addrspace(1)* %in3 + %fabs = call double @llvm.fabs.f64(double %r2) + %r3 = tail call double @llvm.fma.f64(double %r0, double %r1, double %fabs) + store double %r3, double addrspace(1)* %out + ret void +} + +; FUNC-LABEL: {{^}}fma_f64_neg_src0: +; SI: v_fma_f64 {{v\[[0-9]+:[0-9]+\], -v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\]}} +define amdgpu_kernel void @fma_f64_neg_src0(double addrspace(1)* %out, double addrspace(1)* %in1, + double addrspace(1)* %in2, double addrspace(1)* %in3) { + %r0 = load double, double addrspace(1)* %in1 + %r1 = load double, double addrspace(1)* %in2 + %r2 = load double, double addrspace(1)* %in3 + %fsub = fsub double -0.000000e+00, %r0 + %r3 = tail call double @llvm.fma.f64(double %fsub, double %r1, double %r2) + store double %r3, double addrspace(1)* %out + ret void +} + +; FUNC-LABEL: {{^}}fma_f64_neg_src1: +; SI: v_fma_f64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], -v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\]}} +define amdgpu_kernel void @fma_f64_neg_src1(double addrspace(1)* %out, double addrspace(1)* %in1, + double addrspace(1)* %in2, double addrspace(1)* %in3) { + %r0 = load double, double addrspace(1)* %in1 + %r1 = load double, double addrspace(1)* %in2 + %r2 = load double, double addrspace(1)* %in3 + %fsub = fsub double -0.000000e+00, %r1 + %r3 = tail call double @llvm.fma.f64(double %r0, double %fsub, double %r2) + store double %r3, double addrspace(1)* %out + ret void +} + +; FUNC-LABEL: {{^}}fma_f64_neg_src2: +; SI: v_fma_f64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], -v\[[0-9]+:[0-9]+\]}} +define amdgpu_kernel void @fma_f64_neg_src2(double addrspace(1)* %out, double addrspace(1)* %in1, + double addrspace(1)* %in2, double addrspace(1)* %in3) { + %r0 = load double, double addrspace(1)* %in1 + %r1 = load double, double addrspace(1)* %in2 + %r2 = load double, double addrspace(1)* %in3 + %fsub = fsub double -0.000000e+00, %r2 + %r3 = tail call double @llvm.fma.f64(double %r0, double %r1, double %fsub) + store double %r3, double addrspace(1)* %out + ret void +} + +; FUNC-LABEL: {{^}}fma_f64_abs_neg_src0: +; SI: v_fma_f64 {{v\[[0-9]+:[0-9]+\], -\|v\[[0-9]+:[0-9]+\]\|, v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\]}} +define amdgpu_kernel void @fma_f64_abs_neg_src0(double addrspace(1)* %out, double addrspace(1)* %in1, + double addrspace(1)* %in2, double addrspace(1)* %in3) { + %r0 = load double, double addrspace(1)* %in1 + %r1 = load double, double addrspace(1)* %in2 + %r2 = load double, double addrspace(1)* %in3 + %fabs = call double @llvm.fabs.f64(double %r0) + %fsub = fsub double -0.000000e+00, %fabs + %r3 = tail call double @llvm.fma.f64(double %fsub, double %r1, double %r2) + store double %r3, double addrspace(1)* %out + ret void +} + +; FUNC-LABEL: {{^}}fma_f64_abs_neg_src1: +; SI: v_fma_f64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], -\|v\[[0-9]+:[0-9]+\]\|, v\[[0-9]+:[0-9]+\]}} +define amdgpu_kernel void @fma_f64_abs_neg_src1(double addrspace(1)* %out, double addrspace(1)* %in1, + double addrspace(1)* %in2, double addrspace(1)* %in3) { + %r0 = load double, double addrspace(1)* %in1 + %r1 = load double, double addrspace(1)* %in2 + %r2 = load double, double addrspace(1)* %in3 + %fabs = call double @llvm.fabs.f64(double %r1) + %fsub = fsub double -0.000000e+00, %fabs + %r3 = tail call double @llvm.fma.f64(double %r0, double %fsub, double %r2) + store double %r3, double addrspace(1)* %out + ret void +} + +; FUNC-LABEL: {{^}}fma_f64_abs_neg_src2: +; SI: v_fma_f64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], -\|v\[[0-9]+:[0-9]+\]\|}} +define amdgpu_kernel void @fma_f64_abs_neg_src2(double addrspace(1)* %out, double addrspace(1)* %in1, + double addrspace(1)* %in2, double addrspace(1)* %in3) { + %r0 = load double, double addrspace(1)* %in1 + %r1 = load double, double addrspace(1)* %in2 + %r2 = load double, double addrspace(1)* %in3 + %fabs = call double @llvm.fabs.f64(double %r2) + %fsub = fsub double -0.000000e+00, %fabs + %r3 = tail call double @llvm.fma.f64(double %r0, double %r1, double %fsub) + store double %r3, double addrspace(1)* %out + ret void +} + +; FUNC-LABEL: {{^}}fma_f64_lit_src0: +; SI: v_fma_f64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], 2.0, v\[[0-9]+:[0-9]+\]}} +define amdgpu_kernel void @fma_f64_lit_src0(double addrspace(1)* %out, + double addrspace(1)* %in2, double addrspace(1)* %in3) { + %r1 = load double, double addrspace(1)* %in2 + %r2 = load double, double addrspace(1)* %in3 + %r3 = tail call double @llvm.fma.f64(double +2.0, double %r1, double %r2) + store double %r3, double addrspace(1)* %out + ret void +} + +; FUNC-LABEL: {{^}}fma_f64_lit_src1: +; SI: v_fma_f64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], 2.0, v\[[0-9]+:[0-9]+\]}} +define amdgpu_kernel void @fma_f64_lit_src1(double addrspace(1)* %out, double addrspace(1)* %in1, + double addrspace(1)* %in3) { + %r0 = load double, double addrspace(1)* %in1 + %r2 = load double, double addrspace(1)* %in3 + %r3 = tail call double @llvm.fma.f64(double %r0, double +2.0, double %r2) + store double %r3, double addrspace(1)* %out + ret void +} + +; FUNC-LABEL: {{^}}fma_f64_lit_src2: +; SI: v_fma_f64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], 2.0}} +define amdgpu_kernel void @fma_f64_lit_src2(double addrspace(1)* %out, double addrspace(1)* %in1, + double addrspace(1)* %in2) { + %r0 = load double, double addrspace(1)* %in1 + %r1 = load double, double addrspace(1)* %in2 + %r3 = tail call double @llvm.fma.f64(double %r0, double %r1, double +2.0) + store double %r3, double addrspace(1)* %out + ret void +}