From: Craig Topper Date: Tue, 17 Jul 2018 23:26:20 +0000 (+0000) Subject: [X86] Remove the vector alignment requirement from the patterns added in r337320. X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=9722c06a8a8e373e821cdab7d6fe6e0906f2216a;p=llvm [X86] Remove the vector alignment requirement from the patterns added in r337320. The resulting instruction will only load 64 bits so alignment isn't required. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@337334 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Target/X86/X86InstrSSE.td b/lib/Target/X86/X86InstrSSE.td index 3797d91fb31..c8ad7d9eabb 100644 --- a/lib/Target/X86/X86InstrSSE.td +++ b/lib/Target/X86/X86InstrSSE.td @@ -725,7 +725,8 @@ let Predicates = [UseSSE1] in { // This pattern helps select MOVLPS on SSE1 only targets. With SSE2 we'll // end up with a movsd or bleand instead of shufp. - def : Pat<(X86Shufp (memopv4f32 addr:$src2), VR128:$src1, (i8 -28)), + // No need for aligned load, we're only loading 64-bits. + def : Pat<(X86Shufp (loadv4f32 addr:$src2), VR128:$src1, (i8 -28)), (MOVLPSrm VR128:$src1, addr:$src2)>; } @@ -801,7 +802,8 @@ let Predicates = [UseSSE1] in { // This pattern helps select MOVHPS on SSE1 only targets. With SSE2 we'll // end up with a movsd or bleand instead of shufp. - def : Pat<(X86Movlhps VR128:$src1, (memopv4f32 addr:$src2)), + // No need for aligned load, we're only loading 64-bits. + def : Pat<(X86Movlhps VR128:$src1, (loadv4f32 addr:$src2)), (MOVHPSrm VR128:$src1, addr:$src2)>; } diff --git a/test/CodeGen/X86/vector-shuffle-sse1.ll b/test/CodeGen/X86/vector-shuffle-sse1.ll index dda46e062d5..eb0f0b043e2 100644 --- a/test/CodeGen/X86/vector-shuffle-sse1.ll +++ b/test/CodeGen/X86/vector-shuffle-sse1.ll @@ -280,7 +280,7 @@ define <4 x float> @shuffle_mem_v4f32_0145(<4 x float> %a, <4 x float>* %pb) { ; SSE1: # %bb.0: ; SSE1-NEXT: movhps {{.*#+}} xmm0 = xmm0[0,1],mem[0,1] ; SSE1-NEXT: retq - %b = load <4 x float>, <4 x float>* %pb, align 16 + %b = load <4 x float>, <4 x float>* %pb, align 1 %shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> ret <4 x float> %shuffle } @@ -300,7 +300,7 @@ define <4 x float> @shuffle_mem_v4f32_4523(<4 x float> %a, <4 x float>* %pb) { ; SSE1: # %bb.0: ; SSE1-NEXT: movlps {{.*#+}} xmm0 = mem[0,1],xmm0[2,3] ; SSE1-NEXT: retq - %b = load <4 x float>, <4 x float>* %pb, align 16 + %b = load <4 x float>, <4 x float>* %pb, align 1 %shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> ret <4 x float> %shuffle }