From 9bd050eb9372a57228e48a4e45aef0998afb52ac Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Thu, 12 Jul 2018 22:14:10 +0000 Subject: [PATCH] [X86] Add AVX512 equivalents of some isel patterns so we get EVEX instructions. These are the patterns for matching fceil, ffloor, and sqrt to intrinsic instructions if they have a MOVSS/SD. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@336954 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86InstrAVX512.td | 41 ++++++++++++++++--- lib/Target/X86/X86InstrSSE.td | 24 +++++------ test/CodeGen/X86/sse-intrinsics-fast-isel.ll | 13 ++++-- .../CodeGen/X86/sse-intrinsics-x86-upgrade.ll | 13 ++++-- test/CodeGen/X86/sse2-intrinsics-fast-isel.ll | 13 ++++-- .../X86/sse2-intrinsics-x86-upgrade.ll | 17 +++++--- 6 files changed, 86 insertions(+), 35 deletions(-) diff --git a/lib/Target/X86/X86InstrAVX512.td b/lib/Target/X86/X86InstrAVX512.td index 20a7dbd5c38..c703e610715 100644 --- a/lib/Target/X86/X86InstrAVX512.td +++ b/lib/Target/X86/X86InstrAVX512.td @@ -11484,13 +11484,13 @@ multiclass AVX512_scalar_math_fp_patterns("V"#OpcPrefix#Zrr_Int) _.VT:$dst, + (!cast("V"#OpcPrefix#Zrr_Int) _.VT:$dst, (COPY_TO_REGCLASS _.FRC:$src, VR128X))>; // vector math op with insert via movss def : Pat<(_.VT (MoveNode (_.VT VR128X:$dst), (Op (_.VT VR128X:$dst), (_.VT VR128X:$src)))), - (!cast("V"#OpcPrefix#Zrr_Int) _.VT:$dst, _.VT:$src)>; + (!cast("V"#OpcPrefix#Zrr_Int) _.VT:$dst, _.VT:$src)>; // extracted masked scalar math op with insert via movss def : Pat<(MoveNode (_.VT VR128X:$src1), @@ -11499,17 +11499,17 @@ multiclass AVX512_scalar_math_fp_patterns("V"#OpcPrefix#Zrr_Intk) (COPY_TO_REGCLASS _.FRC:$src0, VR128X), + (!cast("V"#OpcPrefix#Zrr_Intk) (COPY_TO_REGCLASS _.FRC:$src0, VR128X), VK1WM:$mask, _.VT:$src1, (COPY_TO_REGCLASS _.FRC:$src2, VR128X))>; - + // extracted masked scalar math op with insert via movss def : Pat<(MoveNode (_.VT VR128X:$src1), (scalar_to_vector (X86selects VK1WM:$mask, (Op (_.EltVT (extractelt (_.VT VR128X:$src1), (iPTR 0))), _.FRC:$src2), (_.EltVT ZeroFP)))), - (!cast("V"#OpcPrefix#Zrr_Intkz) + (!cast("V"#OpcPrefix#Zrr_Intkz) VK1WM:$mask, _.VT:$src1, (COPY_TO_REGCLASS _.FRC:$src2, VR128X))>; } @@ -11525,6 +11525,37 @@ defm : AVX512_scalar_math_fp_patterns; defm : AVX512_scalar_math_fp_patterns; +multiclass AVX512_scalar_unary_math_patterns { + let Predicates = [HasAVX512] in { + def : Pat<(_.VT (Move _.VT:$dst, + (scalar_to_vector (OpNode (extractelt _.VT:$src, 0))))), + (!cast("V"#OpcPrefix#Zr_Int) _.VT:$dst, _.VT:$src)>; + } +} + +defm : AVX512_scalar_unary_math_patterns; +defm : AVX512_scalar_unary_math_patterns; + +multiclass AVX512_scalar_unary_math_imm_patterns ImmV> { + let Predicates = [HasAVX512] in { + def : Pat<(_.VT (Move _.VT:$dst, + (scalar_to_vector (OpNode (extractelt _.VT:$src, 0))))), + (!cast("V"#OpcPrefix#Zr_Int) _.VT:$dst, _.VT:$src, + (i32 ImmV))>; + } +} + +defm : AVX512_scalar_unary_math_imm_patterns; +defm : AVX512_scalar_unary_math_imm_patterns; +defm : AVX512_scalar_unary_math_imm_patterns; +defm : AVX512_scalar_unary_math_imm_patterns; //===----------------------------------------------------------------------===// // AES instructions diff --git a/lib/Target/X86/X86InstrSSE.td b/lib/Target/X86/X86InstrSSE.td index 69f71295300..b15ac4a378e 100644 --- a/lib/Target/X86/X86InstrSSE.td +++ b/lib/Target/X86/X86InstrSSE.td @@ -2647,13 +2647,13 @@ multiclass scalar_math_patterns(OpcPrefix#rr_Int) VT:$dst, + (!cast(OpcPrefix#rr_Int) VT:$dst, (COPY_TO_REGCLASS RC:$src, VR128))>; // vector math op with insert via movss/movsd def : Pat<(VT (Move (VT VR128:$dst), (Op (VT VR128:$dst), (VT VR128:$src)))), - (!cast(OpcPrefix#rr_Int) VT:$dst, VT:$src)>; + (!cast(OpcPrefix#rr_Int) VT:$dst, VT:$src)>; } // Repeat for AVX versions of the instructions. @@ -2662,13 +2662,13 @@ multiclass scalar_math_patterns("V"#OpcPrefix#rr_Int) VT:$dst, + (!cast("V"#OpcPrefix#rr_Int) VT:$dst, (COPY_TO_REGCLASS RC:$src, VR128))>; // vector math op with insert via movss/movsd def : Pat<(VT (Move (VT VR128:$dst), (Op (VT VR128:$dst), (VT VR128:$src)))), - (!cast("V"#OpcPrefix#rr_Int) VT:$dst, VT:$src)>; + (!cast("V"#OpcPrefix#rr_Int) VT:$dst, VT:$src)>; } } @@ -2927,14 +2927,14 @@ multiclass scalar_unary_math_patterns(OpcPrefix#r_Int) VT:$dst, VT:$src)>; + (!cast(OpcPrefix#r_Int) VT:$dst, VT:$src)>; } // Repeat for AVX versions of the instructions. - let Predicates = [HasAVX] in { + let Predicates = [UseAVX] in { def : Pat<(VT (Move VT:$dst, (scalar_to_vector (OpNode (extractelt VT:$src, 0))))), - (!cast("V"#OpcPrefix#r_Int) VT:$dst, VT:$src)>; + (!cast("V"#OpcPrefix#r_Int) VT:$dst, VT:$src)>; } } @@ -2944,14 +2944,14 @@ multiclass scalar_unary_math_imm_patterns(OpcPrefix#r_Int) VT:$dst, VT:$src, (i32 ImmV))>; + (!cast(OpcPrefix#r_Int) VT:$dst, VT:$src, (i32 ImmV))>; } // Repeat for AVX versions of the instructions. - let Predicates = [HasAVX] in { + let Predicates = [UseAVX] in { def : Pat<(VT (Move VT:$dst, (scalar_to_vector (OpNode (extractelt VT:$src, 0))))), - (!cast("V"#OpcPrefix#r_Int) VT:$dst, VT:$src, (i32 ImmV))>; + (!cast("V"#OpcPrefix#r_Int) VT:$dst, VT:$src, (i32 ImmV))>; } } @@ -2963,13 +2963,13 @@ multiclass scalar_unary_math_intr_patterns { let Predicates = [BasePredicate] in { def : Pat<(VT (Move VT:$dst, (Intr VT:$src))), - (!cast(OpcPrefix#r_Int) VT:$dst, VT:$src)>; + (!cast(OpcPrefix#r_Int) VT:$dst, VT:$src)>; } // Repeat for AVX versions of the instructions. let Predicates = [HasAVX] in { def : Pat<(VT (Move VT:$dst, (Intr VT:$src))), - (!cast("V"#OpcPrefix#r_Int) VT:$dst, VT:$src)>; + (!cast("V"#OpcPrefix#r_Int) VT:$dst, VT:$src)>; } } diff --git a/test/CodeGen/X86/sse-intrinsics-fast-isel.ll b/test/CodeGen/X86/sse-intrinsics-fast-isel.ll index fce52bf6699..009ea22400e 100644 --- a/test/CodeGen/X86/sse-intrinsics-fast-isel.ll +++ b/test/CodeGen/X86/sse-intrinsics-fast-isel.ll @@ -2558,10 +2558,15 @@ define <4 x float> @test_mm_sqrt_ss(<4 x float> %a0) { ; SSE-NEXT: sqrtss %xmm0, %xmm0 # encoding: [0xf3,0x0f,0x51,0xc0] ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3] ; -; AVX-LABEL: test_mm_sqrt_ss: -; AVX: # %bb.0: -; AVX-NEXT: vsqrtss %xmm0, %xmm0, %xmm0 # encoding: [0xc5,0xfa,0x51,0xc0] -; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3] +; AVX1-LABEL: test_mm_sqrt_ss: +; AVX1: # %bb.0: +; AVX1-NEXT: vsqrtss %xmm0, %xmm0, %xmm0 # encoding: [0xc5,0xfa,0x51,0xc0] +; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3] +; +; AVX512-LABEL: test_mm_sqrt_ss: +; AVX512: # %bb.0: +; AVX512-NEXT: vsqrtss %xmm0, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x51,0xc0] +; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3] %ext = extractelement <4 x float> %a0, i32 0 %sqrt = call float @llvm.sqrt.f32(float %ext) %ins = insertelement <4 x float> %a0, float %sqrt, i32 0 diff --git a/test/CodeGen/X86/sse-intrinsics-x86-upgrade.ll b/test/CodeGen/X86/sse-intrinsics-x86-upgrade.ll index d153d2f42d6..cd593676f31 100644 --- a/test/CodeGen/X86/sse-intrinsics-x86-upgrade.ll +++ b/test/CodeGen/X86/sse-intrinsics-x86-upgrade.ll @@ -34,10 +34,15 @@ define <4 x float> @test_x86_sse_sqrt_ss(<4 x float> %a0) { ; SSE-NEXT: sqrtss %xmm0, %xmm0 ## encoding: [0xf3,0x0f,0x51,0xc0] ; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3] ; -; AVX-LABEL: test_x86_sse_sqrt_ss: -; AVX: ## %bb.0: -; AVX-NEXT: vsqrtss %xmm0, %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x51,0xc0] -; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; AVX1-LABEL: test_x86_sse_sqrt_ss: +; AVX1: ## %bb.0: +; AVX1-NEXT: vsqrtss %xmm0, %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x51,0xc0] +; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; +; AVX512-LABEL: test_x86_sse_sqrt_ss: +; AVX512: ## %bb.0: +; AVX512-NEXT: vsqrtss %xmm0, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x51,0xc0] +; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3] %res = call <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float> %a0) ; <<4 x float>> [#uses=1] ret <4 x float> %res } diff --git a/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll b/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll index be389890bb7..e9af4ebca72 100644 --- a/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll +++ b/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll @@ -4896,10 +4896,15 @@ define <2 x double> @test_mm_sqrt_sd(<2 x double> %a0, <2 x double> %a1) nounwin ; SSE-NEXT: movapd %xmm1, %xmm0 # encoding: [0x66,0x0f,0x28,0xc1] ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3] ; -; AVX-LABEL: test_mm_sqrt_sd: -; AVX: # %bb.0: -; AVX-NEXT: vsqrtsd %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf3,0x51,0xc0] -; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3] +; AVX1-LABEL: test_mm_sqrt_sd: +; AVX1: # %bb.0: +; AVX1-NEXT: vsqrtsd %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf3,0x51,0xc0] +; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3] +; +; AVX512-LABEL: test_mm_sqrt_sd: +; AVX512: # %bb.0: +; AVX512-NEXT: vsqrtsd %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf3,0x51,0xc0] +; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3] %ext = extractelement <2 x double> %a0, i32 0 %sqrt = call double @llvm.sqrt.f64(double %ext) %ins = insertelement <2 x double> %a1, double %sqrt, i32 0 diff --git a/test/CodeGen/X86/sse2-intrinsics-x86-upgrade.ll b/test/CodeGen/X86/sse2-intrinsics-x86-upgrade.ll index 54529b177e5..1c1900deafe 100644 --- a/test/CodeGen/X86/sse2-intrinsics-x86-upgrade.ll +++ b/test/CodeGen/X86/sse2-intrinsics-x86-upgrade.ll @@ -34,10 +34,15 @@ define <2 x double> @test_x86_sse2_sqrt_sd(<2 x double> %a0) { ; SSE-NEXT: sqrtsd %xmm0, %xmm0 ## encoding: [0xf2,0x0f,0x51,0xc0] ; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3] ; -; AVX-LABEL: test_x86_sse2_sqrt_sd: -; AVX: ## %bb.0: -; AVX-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x51,0xc0] -; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; AVX1-LABEL: test_x86_sse2_sqrt_sd: +; AVX1: ## %bb.0: +; AVX1-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x51,0xc0] +; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3] +; +; AVX512-LABEL: test_x86_sse2_sqrt_sd: +; AVX512: ## %bb.0: +; AVX512-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x51,0xc0] +; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3] %res = call <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double> %a0) ; <<2 x double>> [#uses=1] ret <2 x double> %res } @@ -63,7 +68,7 @@ define <2 x double> @test_x86_sse2_sqrt_sd_vec_load(<2 x double>* %a0) { ; X86-AVX512: ## %bb.0: ; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] ; X86-AVX512-NEXT: vmovapd (%eax), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0x00] -; X86-AVX512-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x51,0xc0] +; X86-AVX512-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x51,0xc0] ; X86-AVX512-NEXT: retl ## encoding: [0xc3] ; ; X64-SSE-LABEL: test_x86_sse2_sqrt_sd_vec_load: @@ -81,7 +86,7 @@ define <2 x double> @test_x86_sse2_sqrt_sd_vec_load(<2 x double>* %a0) { ; X64-AVX512-LABEL: test_x86_sse2_sqrt_sd_vec_load: ; X64-AVX512: ## %bb.0: ; X64-AVX512-NEXT: vmovapd (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0x07] -; X64-AVX512-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x51,0xc0] +; X64-AVX512-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x51,0xc0] ; X64-AVX512-NEXT: retq ## encoding: [0xc3] %a1 = load <2 x double>, <2 x double>* %a0, align 16 %res = call <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double> %a1) ; <<2 x double>> [#uses=1] -- 2.50.1