From e53dbeb2addf8e19cad61227d29e117392e00994 Mon Sep 17 00:00:00 2001 From: Sanjay Patel Date: Wed, 25 Mar 2015 17:36:01 +0000 Subject: [PATCH] [X86, AVX] improve insertion into zero element of 256-bit vector This patch allows AVX blend instructions to handle insertion into the low element of a 256-bit vector for the appropriate data types. For f32, instead of: vblendps $1, %xmm1, %xmm0, %xmm1 ## xmm1 = xmm1[0],xmm0[1,2,3] vblendps $15, %ymm1, %ymm0, %ymm0 ## ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] we get: vblendps $1, %ymm1, %ymm0, %ymm0 ## ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7] For f64, instead of: vmovsd %xmm1, %xmm0, %xmm1 ## xmm1 = xmm1[0],xmm0[1] vblendpd $3, %ymm1, %ymm0, %ymm0 ## ymm0 = ymm1[0,1],ymm0[2,3] we get: vblendpd $1, %ymm1, %ymm0, %ymm0 ## ymm0 = ymm1[0],ymm0[1,2,3] For the hardware-neglected integer data types, I left a TODO comment in the code and added regression tests for a follow-on patch. Differential Revision: http://reviews.llvm.org/D8609 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@233199 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 14 +++++ test/CodeGen/X86/avx-insertelt.ll | 83 ++++++++++++++++++++++++++++++ 2 files changed, 97 insertions(+) create mode 100644 test/CodeGen/X86/avx-insertelt.ll diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 147864379a6..b21575e54ae 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -10551,6 +10551,20 @@ SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, // If the vector is wider than 128 bits, extract the 128-bit subvector, insert // into that, and then insert the subvector back into the result. if (VT.is256BitVector() || VT.is512BitVector()) { + // With a 256-bit vector, we can insert into the zero element efficiently + // using a blend if we have AVX or AVX2 and the right data type. + if (VT.is256BitVector() && IdxVal == 0) { + // TODO: It is worthwhile to cast integer to floating point and back + // and incur a domain crossing penalty if that's what we'll end up + // doing anyway after extracting to a 128-bit vector. + if ((Subtarget->hasAVX() && (EltVT == MVT::f64 || EltVT == MVT::f32)) || + (Subtarget->hasAVX2() && EltVT == MVT::i32)) { + SDValue N1Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, N1); + N2 = DAG.getIntPtrConstant(1); + return DAG.getNode(X86ISD::BLENDI, dl, VT, N0, N1Vec, N2); + } + } + // Get the desired 128-bit vector chunk. SDValue V = Extract128BitVector(N0, IdxVal, DAG, dl); diff --git a/test/CodeGen/X86/avx-insertelt.ll b/test/CodeGen/X86/avx-insertelt.ll new file mode 100644 index 00000000000..c159d689451 --- /dev/null +++ b/test/CodeGen/X86/avx-insertelt.ll @@ -0,0 +1,83 @@ +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX2 + +define <8 x float> @insert_f32(<8 x float> %y, float %f, <8 x float> %x) { +; ALL-LABEL: insert_f32: +; ALL: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7] +; ALL-NEXT: retq + %i0 = insertelement <8 x float> %y, float %f, i32 0 + ret <8 x float> %i0 +} + +define <4 x double> @insert_f64(<4 x double> %y, double %f, <4 x double> %x) { +; ALL-LABEL: insert_f64: +; ALL: vblendpd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3] +; ALL-NEXT: retq + %i0 = insertelement <4 x double> %y, double %f, i32 0 + ret <4 x double> %i0 +} + +define <32 x i8> @insert_i8(<32 x i8> %y, i8 %f, <32 x i8> %x) { +; AVX-LABEL: insert_i8: +; AVX: # BB#0: +; AVX-NEXT: vpinsrb $0, %edi, %xmm0, %xmm1 +; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] +; AVX-NEXT: retq +; +; AVX2-LABEL: insert_i8: +; AVX2: # BB#0: +; AVX2-NEXT: vpinsrb $0, %edi, %xmm0, %xmm1 +; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] +; AVX2-NEXT: retq + %i0 = insertelement <32 x i8> %y, i8 %f, i32 0 + ret <32 x i8> %i0 +} + +define <16 x i16> @insert_i16(<16 x i16> %y, i16 %f, <16 x i16> %x) { +; AVX-LABEL: insert_i16: +; AVX: # BB#0: +; AVX-NEXT: vpinsrw $0, %edi, %xmm0, %xmm1 +; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] +; AVX-NEXT: retq +; +; AVX2-LABEL: insert_i16: +; AVX2: # BB#0: +; AVX2-NEXT: vpinsrw $0, %edi, %xmm0, %xmm1 +; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] +; AVX2-NEXT: retq + %i0 = insertelement <16 x i16> %y, i16 %f, i32 0 + ret <16 x i16> %i0 +} + +define <8 x i32> @insert_i32(<8 x i32> %y, i32 %f, <8 x i32> %x) { +; AVX-LABEL: insert_i32: +; AVX: # BB#0: +; AVX-NEXT: vpinsrd $0, %edi, %xmm0, %xmm1 +; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] +; AVX-NEXT: retq +; +; AVX2-LABEL: insert_i32: +; AVX2: # BB#0: +; AVX2-NEXT: vmovd %edi, %xmm1 +; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7] +; AVX2-NEXT: retq + %i0 = insertelement <8 x i32> %y, i32 %f, i32 0 + ret <8 x i32> %i0 +} + +define <4 x i64> @insert_i64(<4 x i64> %y, i64 %f, <4 x i64> %x) { +; AVX-LABEL: insert_i64: +; AVX: # BB#0: +; AVX-NEXT: vpinsrq $0, %rdi, %xmm0, %xmm1 +; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] +; AVX-NEXT: retq +; +; AVX2-LABEL: insert_i64: +; AVX2: # BB#0: +; AVX2-NEXT: vpinsrq $0, %rdi, %xmm0, %xmm1 +; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] +; AVX2-NEXT: retq + %i0 = insertelement <4 x i64> %y, i64 %f, i32 0 + ret <4 x i64> %i0 +} + -- 2.40.0