From: Craig Topper Date: Wed, 18 Jul 2018 05:10:53 +0000 (+0000) Subject: [X86] Remove patterns that mix X86ISD::MOVLHPS/MOVHLPS with v2i64/v2f64 types. X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=a65225ba1f48c0ffb9f3965f2f38c85cf71124f4;p=llvm [X86] Remove patterns that mix X86ISD::MOVLHPS/MOVHLPS with v2i64/v2f64 types. The X86ISD::MOVLHPS/MOVHLPS should now only be emitted in SSE1 only. This means that the v2i64/v2f64 types would be illegal thus we don't need these patterns. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@337349 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Target/X86/X86InstrAVX512.td b/lib/Target/X86/X86InstrAVX512.td index 2035e49720f..b239f230915 100644 --- a/lib/Target/X86/X86InstrAVX512.td +++ b/lib/Target/X86/X86InstrAVX512.td @@ -6464,13 +6464,6 @@ defm VMOVLPDZ128 : avx512_mov_hilo_packed<0x12, "vmovlpd", X86Movsd, v2f64x_info>, EVEX_CD8<64, CD8VT1>, PD, VEX_W; let Predicates = [HasAVX512] in { - // VMOVHPS patterns - def : Pat<(X86Movlhps VR128X:$src1, - (bc_v4f32 (v2i64 (scalar_to_vector (loadi64 addr:$src2))))), - (VMOVHPSZ128rm VR128X:$src1, addr:$src2)>; - def : Pat<(X86Movlhps VR128X:$src1, - (bc_v4f32 (v2i64 (X86vzload addr:$src2)))), - (VMOVHPSZ128rm VR128X:$src1, addr:$src2)>; // VMOVHPD patterns def : Pat<(v2f64 (X86Unpckl VR128X:$src1, (bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src2)))))), diff --git a/lib/Target/X86/X86InstrSSE.td b/lib/Target/X86/X86InstrSSE.td index af40b009d97..06a8799b13f 100644 --- a/lib/Target/X86/X86InstrSSE.td +++ b/lib/Target/X86/X86InstrSSE.td @@ -766,25 +766,12 @@ def MOVHPDmr : PDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src), } // SchedRW let Predicates = [UseAVX] in { - // VMOVHPS patterns - def : Pat<(X86Movlhps VR128:$src1, - (bc_v4f32 (v2i64 (scalar_to_vector (loadi64 addr:$src2))))), - (VMOVHPSrm VR128:$src1, addr:$src2)>; - def : Pat<(X86Movlhps VR128:$src1, - (bc_v4f32 (v2i64 (X86vzload addr:$src2)))), - (VMOVHPSrm VR128:$src1, addr:$src2)>; - // Also handle an i64 load because that may get selected as a faster way to // load the data. def : Pat<(v2f64 (X86Unpckl VR128:$src1, (bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src2)))))), (VMOVHPDrm VR128:$src1, addr:$src2)>; - def : Pat<(store (f64 (extractelt - (bc_v2f64 (v4f32 (X86Movhlps VR128:$src, VR128:$src))), - (iPTR 0))), addr:$dst), - (VMOVHPDmr addr:$dst, VR128:$src)>; - def : Pat<(store (f64 (extractelt (v2f64 (X86VPermilpi VR128:$src, (i8 1))), (iPTR 0))), addr:$dst), @@ -792,14 +779,6 @@ let Predicates = [UseAVX] in { } let Predicates = [UseSSE1] in { - // MOVHPS patterns - def : Pat<(X86Movlhps VR128:$src1, - (bc_v4f32 (v2i64 (scalar_to_vector (loadi64 addr:$src2))))), - (MOVHPSrm VR128:$src1, addr:$src2)>; - def : Pat<(X86Movlhps VR128:$src1, - (bc_v4f32 (v2i64 (X86vzload addr:$src2)))), - (MOVHPSrm VR128:$src1, addr:$src2)>; - // This pattern helps select MOVHPS on SSE1 only targets. With SSE2 we'll // end up with a movsd or bleand instead of shufp. // No need for aligned load, we're only loading 64-bits. @@ -816,11 +795,6 @@ let Predicates = [UseSSE2] in { (bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src2)))))), (MOVHPDrm VR128:$src1, addr:$src2)>; - def : Pat<(store (f64 (extractelt - (bc_v2f64 (v4f32 (X86Movhlps VR128:$src, VR128:$src))), - (iPTR 0))), addr:$dst), - (MOVHPDmr addr:$dst, VR128:$src)>; - def : Pat<(store (f64 (extractelt (v2f64 (X86Shufp VR128:$src, VR128:$src, (i8 1))), (iPTR 0))), addr:$dst),