From: Sanjay Patel Date: Fri, 3 Mar 2017 16:58:51 +0000 (+0000) Subject: [x86] regenerate checks; NFC X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=16776ebf7c5fa9b9020a951433a0c138da3ae0b9;p=llvm [x86] regenerate checks; NFC git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@296883 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/test/CodeGen/X86/sse41.ll b/test/CodeGen/X86/sse41.ll index 7e12a23c35f..645899a35a2 100644 --- a/test/CodeGen/X86/sse41.ll +++ b/test/CodeGen/X86/sse41.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=i686-apple-darwin9 -mattr=sse4.1 -mcpu=penryn | FileCheck %s --check-prefix=X32 ; RUN: llc < %s -mtriple=x86_64-apple-darwin9 -mattr=sse4.1 -mcpu=penryn | FileCheck %s --check-prefix=X64 @@ -793,12 +794,12 @@ define <4 x float> @insertps_from_vector_load(<4 x float> %a, <4 x float>* nocap ; X32-LABEL: insertps_from_vector_load: ; X32: ## BB#0: ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: insertps $48, (%{{...}}), {{.*#+}} xmm0 = xmm0[0,1,2],mem[0] +; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0] ; X32-NEXT: retl ; ; X64-LABEL: insertps_from_vector_load: ; X64: ## BB#0: -; X64-NEXT: insertps $48, (%{{...}}), {{.*#+}} xmm0 = xmm0[0,1,2],mem[0] +; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0] ; X64-NEXT: retq %1 = load <4 x float>, <4 x float>* %pb, align 16 %2 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %1, i32 48) @@ -811,12 +812,12 @@ define <4 x float> @insertps_from_vector_load_offset(<4 x float> %a, <4 x float> ; X32-LABEL: insertps_from_vector_load_offset: ; X32: ## BB#0: ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: insertps $32, 4(%{{...}}), {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3] +; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3] ; X32-NEXT: retl ; ; X64-LABEL: insertps_from_vector_load_offset: ; X64: ## BB#0: -; X64-NEXT: insertps $32, 4(%{{...}}), {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3] +; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3] ; X64-NEXT: retq %1 = load <4 x float>, <4 x float>* %pb, align 16 %2 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %1, i32 96) @@ -830,13 +831,13 @@ define <4 x float> @insertps_from_vector_load_offset_2(<4 x float> %a, <4 x floa ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: shll $4, %ecx -; X32-NEXT: insertps $0, 12(%{{...}},%{{...}}), {{.*#+}} xmm0 = mem[0],xmm0[1,2,3] +; X32-NEXT: insertps {{.*#+}} xmm0 = mem[0],xmm0[1,2,3] ; X32-NEXT: retl ; ; X64-LABEL: insertps_from_vector_load_offset_2: ; X64: ## BB#0: ; X64-NEXT: shlq $4, %rsi -; X64-NEXT: insertps $0, 12(%{{...}},%{{...}}), {{.*#+}} xmm0 = mem[0],xmm0[1,2,3] +; X64-NEXT: insertps {{.*#+}} xmm0 = mem[0],xmm0[1,2,3] ; X64-NEXT: retq %1 = getelementptr inbounds <4 x float>, <4 x float>* %pb, i64 %index %2 = load <4 x float>, <4 x float>* %1, align 16 @@ -992,15 +993,14 @@ define void @insertps_pr20411(<4 x i32> %shuffle109, <4 x i32> %shuffle116, i32* define <4 x float> @insertps_4(<4 x float> %A, <4 x float> %B) { ; X32-LABEL: insertps_4: -; X32: ## BB#0: ## %entry +; X32: ## BB#0: ; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm1[2],zero ; X32-NEXT: retl ; ; X64-LABEL: insertps_4: -; X64: ## BB#0: ## %entry +; X64: ## BB#0: ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm1[2],zero ; X64-NEXT: retq -entry: %vecext = extractelement <4 x float> %A, i32 0 %vecinit = insertelement <4 x float> undef, float %vecext, i32 0 %vecinit1 = insertelement <4 x float> %vecinit, float 0.000000e+00, i32 1 @@ -1012,15 +1012,14 @@ entry: define <4 x float> @insertps_5(<4 x float> %A, <4 x float> %B) { ; X32-LABEL: insertps_5: -; X32: ## BB#0: ## %entry +; X32: ## BB#0: ; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[1],zero,zero ; X32-NEXT: retl ; ; X64-LABEL: insertps_5: -; X64: ## BB#0: ## %entry +; X64: ## BB#0: ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[1],zero,zero ; X64-NEXT: retq -entry: %vecext = extractelement <4 x float> %A, i32 0 %vecinit = insertelement <4 x float> undef, float %vecext, i32 0 %vecext1 = extractelement <4 x float> %B, i32 1 @@ -1032,15 +1031,14 @@ entry: define <4 x float> @insertps_6(<4 x float> %A, <4 x float> %B) { ; X32-LABEL: insertps_6: -; X32: ## BB#0: ## %entry +; X32: ## BB#0: ; X32-NEXT: insertps {{.*#+}} xmm0 = zero,xmm0[1],xmm1[2],zero ; X32-NEXT: retl ; ; X64-LABEL: insertps_6: -; X64: ## BB#0: ## %entry +; X64: ## BB#0: ; X64-NEXT: insertps {{.*#+}} xmm0 = zero,xmm0[1],xmm1[2],zero ; X64-NEXT: retq -entry: %vecext = extractelement <4 x float> %A, i32 1 %vecinit = insertelement <4 x float> , float %vecext, i32 1 %vecext1 = extractelement <4 x float> %B, i32 2 @@ -1051,15 +1049,14 @@ entry: define <4 x float> @insertps_7(<4 x float> %A, <4 x float> %B) { ; X32-LABEL: insertps_7: -; X32: ## BB#0: ## %entry +; X32: ## BB#0: ; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm1[1],zero ; X32-NEXT: retl ; ; X64-LABEL: insertps_7: -; X64: ## BB#0: ## %entry +; X64: ## BB#0: ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm1[1],zero ; X64-NEXT: retq -entry: %vecext = extractelement <4 x float> %A, i32 0 %vecinit = insertelement <4 x float> undef, float %vecext, i32 0 %vecinit1 = insertelement <4 x float> %vecinit, float 0.000000e+00, i32 1 @@ -1071,15 +1068,14 @@ entry: define <4 x float> @insertps_8(<4 x float> %A, <4 x float> %B) { ; X32-LABEL: insertps_8: -; X32: ## BB#0: ## %entry +; X32: ## BB#0: ; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],zero,zero ; X32-NEXT: retl ; ; X64-LABEL: insertps_8: -; X64: ## BB#0: ## %entry +; X64: ## BB#0: ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],zero,zero ; X64-NEXT: retq -entry: %vecext = extractelement <4 x float> %A, i32 0 %vecinit = insertelement <4 x float> undef, float %vecext, i32 0 %vecext1 = extractelement <4 x float> %B, i32 0 @@ -1091,17 +1087,16 @@ entry: define <4 x float> @insertps_9(<4 x float> %A, <4 x float> %B) { ; X32-LABEL: insertps_9: -; X32: ## BB#0: ## %entry +; X32: ## BB#0: ; X32-NEXT: insertps {{.*#+}} xmm1 = zero,xmm0[0],xmm1[2],zero ; X32-NEXT: movaps %xmm1, %xmm0 ; X32-NEXT: retl ; ; X64-LABEL: insertps_9: -; X64: ## BB#0: ## %entry +; X64: ## BB#0: ; X64-NEXT: insertps {{.*#+}} xmm1 = zero,xmm0[0],xmm1[2],zero ; X64-NEXT: movaps %xmm1, %xmm0 ; X64-NEXT: retq -entry: %vecext = extractelement <4 x float> %A, i32 0 %vecinit = insertelement <4 x float> , float %vecext, i32 1 %vecext1 = extractelement <4 x float> %B, i32 2 @@ -1110,7 +1105,7 @@ entry: ret <4 x float> %vecinit3 } -define <4 x float> @insertps_10(<4 x float> %A) +define <4 x float> @insertps_10(<4 x float> %A) { ; X32-LABEL: insertps_10: ; X32: ## BB#0: ; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm0[0],zero @@ -1120,7 +1115,6 @@ define <4 x float> @insertps_10(<4 x float> %A) ; X64: ## BB#0: ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm0[0],zero ; X64-NEXT: retq -{ %vecext = extractelement <4 x float> %A, i32 0 %vecbuild1 = insertelement <4 x float> , float %vecext, i32 0 %vecbuild2 = insertelement <4 x float> %vecbuild1, float %vecext, i32 2 @@ -1129,17 +1123,16 @@ define <4 x float> @insertps_10(<4 x float> %A) define <4 x float> @build_vector_to_shuffle_1(<4 x float> %A) { ; X32-LABEL: build_vector_to_shuffle_1: -; X32: ## BB#0: ## %entry +; X32: ## BB#0: ; X32-NEXT: xorps %xmm1, %xmm1 ; X32-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3] ; X32-NEXT: retl ; ; X64-LABEL: build_vector_to_shuffle_1: -; X64: ## BB#0: ## %entry +; X64: ## BB#0: ; X64-NEXT: xorps %xmm1, %xmm1 ; X64-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3] ; X64-NEXT: retq -entry: %vecext = extractelement <4 x float> %A, i32 1 %vecinit = insertelement <4 x float> zeroinitializer, float %vecext, i32 1 %vecinit1 = insertelement <4 x float> %vecinit, float 0.0, i32 2 @@ -1149,17 +1142,16 @@ entry: define <4 x float> @build_vector_to_shuffle_2(<4 x float> %A) { ; X32-LABEL: build_vector_to_shuffle_2: -; X32: ## BB#0: ## %entry +; X32: ## BB#0: ; X32-NEXT: xorps %xmm1, %xmm1 ; X32-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3] ; X32-NEXT: retl ; ; X64-LABEL: build_vector_to_shuffle_2: -; X64: ## BB#0: ## %entry +; X64: ## BB#0: ; X64-NEXT: xorps %xmm1, %xmm1 ; X64-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3] ; X64-NEXT: retq -entry: %vecext = extractelement <4 x float> %A, i32 1 %vecinit = insertelement <4 x float> zeroinitializer, float %vecext, i32 1 %vecinit1 = insertelement <4 x float> %vecinit, float 0.0, i32 2