From 1f1c4b3f65079250cbb98a6b09a5d5235f4fcd25 Mon Sep 17 00:00:00 2001 From: Matt Arsenault Date: Mon, 23 Jan 2017 22:48:53 +0000 Subject: [PATCH] DAG: Don't fold vector extract into load if target doesn't want to Fixes turning a 32-bit scalar load into an extending vector load for AMDGPU when dynamically indexing a vector. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@292842 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 5 +++ test/CodeGen/AMDGPU/extract_vector_elt-i16.ll | 31 +++++++++++++++++++ 2 files changed, 36 insertions(+) diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index dad94c58ce9..60038e2924c 100644 --- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -12560,6 +12560,11 @@ SDValue DAGCombiner::ReplaceExtractVectorEltOfLoadWithNarrowedLoad( if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, VecEltVT)) return SDValue(); + ISD::LoadExtType ExtTy = ResultVT.bitsGT(VecEltVT) ? + ISD::NON_EXTLOAD : ISD::EXTLOAD; + if (!TLI.shouldReduceLoadWidth(OriginalLoad, ExtTy, VecEltVT)) + return SDValue(); + Align = NewAlign; SDValue NewPtr = OriginalLoad->getBasePtr(); diff --git a/test/CodeGen/AMDGPU/extract_vector_elt-i16.ll b/test/CodeGen/AMDGPU/extract_vector_elt-i16.ll index eea44b8a006..c32c5fccc04 100644 --- a/test/CodeGen/AMDGPU/extract_vector_elt-i16.ll +++ b/test/CodeGen/AMDGPU/extract_vector_elt-i16.ll @@ -15,6 +15,34 @@ define void @extract_vector_elt_v2i16(i16 addrspace(1)* %out, <2 x i16> %foo) #0 ret void } +; GCN-LABEL: {{^}}extract_vector_elt_v2i16_dynamic_sgpr: +; GCN: s_load_dword [[VEC:s[0-9]+]] +; GCN: s_load_dword [[IDX:s[0-9]+]] +; GCN: s_lshr_b32 s{{[0-9]+}}, [[IDX]], 16 +; GCN: v_mov_b32_e32 [[VVEC:v[0-9]+]], [[VEC]] +define void @extract_vector_elt_v2i16_dynamic_sgpr(i16 addrspace(1)* %out, <2 x i16> addrspace(2)* %vec.ptr, i32 %idx) #0 { + %vec = load <2 x i16>, <2 x i16> addrspace(2)* %vec.ptr + %elt = extractelement <2 x i16> %vec, i32 %idx + store i16 %elt, i16 addrspace(1)* %out, align 2 + ret void +} + +; GCN-LABEL: {{^}}extract_vector_elt_v2i16_dynamic_vgpr: +; GCN: {{buffer|flat}}_load_dword [[IDX:v[0-9]+]] +; GCN: buffer_load_dword [[VEC:v[0-9]+]] +; GCN: v_lshrrev_b32_e32 [[ELT:v[0-9]+]], 16, [[VEC]] +define void @extract_vector_elt_v2i16_dynamic_vgpr(i16 addrspace(1)* %out, <2 x i16> addrspace(1)* %vec.ptr, i32 addrspace(1)* %idx.ptr) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %tid.ext = sext i32 %tid to i64 + %gep = getelementptr inbounds i32, i32 addrspace(1)* %idx.ptr, i64 %tid.ext + %out.gep = getelementptr inbounds i16, i16 addrspace(1)* %out, i64 %tid.ext + %idx = load volatile i32, i32 addrspace(1)* %gep + %vec = load <2 x i16>, <2 x i16> addrspace(1)* %vec.ptr + %elt = extractelement <2 x i16> %vec, i32 %idx + store i16 %elt, i16 addrspace(1)* %out.gep, align 2 + ret void +} + ; GCN-LABEL: {{^}}extract_vector_elt_v3i16: ; GCN: buffer_load_ushort ; GCN: buffer_store_short @@ -80,4 +108,7 @@ define void @dynamic_extract_vector_elt_v4i16(i16 addrspace(1)* %out, <4 x i16> ret void } +declare i32 @llvm.amdgcn.workitem.id.x() #1 + attributes #0 = { nounwind } +attributes #1 = { nounwind readnone } -- 2.40.0