From 51bfae79dd56d4fb985ec9d0d53e29ec2dbe7a8c Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Fri, 5 Jul 2019 17:31:25 +0000 Subject: [PATCH] [X86] Remove unnecessary isel pattern for MOVLPSmr. This was identical to a pattern for MOVPQI2QImr with a bitcast as an input. But we should be able to turn MOVPQI2QImr into MOVLPSmr in the execution domain fixup pass so we shouldn't need this. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@365224 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86InstrSSE.td | 5 ----- 1 file changed, 5 deletions(-) diff --git a/lib/Target/X86/X86InstrSSE.td b/lib/Target/X86/X86InstrSSE.td index e27074fb27e..e4791eb3c32 100644 --- a/lib/Target/X86/X86InstrSSE.td +++ b/lib/Target/X86/X86InstrSSE.td @@ -657,11 +657,6 @@ def MOVLPDmr : PDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src), } // SchedRW let Predicates = [UseSSE1] in { - // (store (vector_shuffle (load addr), v2, <4, 5, 2, 3>), addr) using MOVLPS - def : Pat<(store (i64 (extractelt (bc_v2i64 (v4f32 VR128:$src2)), - (iPTR 0))), addr:$src1), - (MOVLPSmr addr:$src1, VR128:$src2)>; - // This pattern helps select MOVLPS on SSE1 only targets. With SSE2 we'll // end up with a movsd or blend instead of shufp. // No need for aligned load, we're only loading 64-bits. -- 2.40.0