; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,X64
define <32 x i8> @funcA(<32 x i8> %a) nounwind uwtable readnone ssp {
; CHECK-LABEL: funcA:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5]
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; CHECK-NEXT: retq
+; CHECK-NEXT: ret{{[l|q]}}
entry:
%shuffle = shufflevector <32 x i8> %a, <32 x i8> undef, <32 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
ret <32 x i8> %shuffle
; CHECK-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,6,7]
; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,2,2]
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; CHECK-NEXT: retq
+; CHECK-NEXT: ret{{[l|q]}}
entry:
%shuffle = shufflevector <16 x i16> %a, <16 x i16> undef, <16 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
ret <16 x i16> %shuffle
}
define <4 x i64> @funcC(i64 %q) nounwind uwtable readnone ssp {
-; CHECK-LABEL: funcC:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmovq %rdi, %xmm0
-; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
-; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; CHECK-NEXT: retq
+; X86-LABEL: funcC:
+; X86: # %bb.0: # %entry
+; X86-NEXT: vbroadcastsd {{[0-9]+}}(%esp), %ymm0
+; X86-NEXT: retl
+;
+; X64-LABEL: funcC:
+; X64: # %bb.0: # %entry
+; X64-NEXT: vmovq %rdi, %xmm0
+; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
+; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; X64-NEXT: retq
entry:
%vecinit.i = insertelement <4 x i64> undef, i64 %q, i32 0
%vecinit2.i = insertelement <4 x i64> %vecinit.i, i64 %q, i32 1
}
define <4 x double> @funcD(double %q) nounwind uwtable readnone ssp {
-; CHECK-LABEL: funcD:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
-; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; CHECK-NEXT: retq
+; X86-LABEL: funcD:
+; X86: # %bb.0: # %entry
+; X86-NEXT: vbroadcastsd {{[0-9]+}}(%esp), %ymm0
+; X86-NEXT: retl
+;
+; X64-LABEL: funcD:
+; X64: # %bb.0: # %entry
+; X64-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
+; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; X64-NEXT: retq
entry:
%vecinit.i = insertelement <4 x double> undef, double %q, i32 0
%vecinit2.i = insertelement <4 x double> %vecinit.i, double %q, i32 1
; shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0>
;
define <8 x float> @funcE() nounwind {
-; CHECK-LABEL: funcE:
-; CHECK: # %bb.0: # %allocas
-; CHECK-NEXT: xorl %eax, %eax
-; CHECK-NEXT: testb %al, %al
-; CHECK-NEXT: # implicit-def: $ymm0
-; CHECK-NEXT: jne .LBB4_2
-; CHECK-NEXT: # %bb.1: # %load.i1247
-; CHECK-NEXT: pushq %rbp
-; CHECK-NEXT: movq %rsp, %rbp
-; CHECK-NEXT: andq $-32, %rsp
-; CHECK-NEXT: subq $1312, %rsp # imm = 0x520
-; CHECK-NEXT: vbroadcastss {{[0-9]+}}(%rsp), %ymm0
-; CHECK-NEXT: movq %rbp, %rsp
-; CHECK-NEXT: popq %rbp
-; CHECK-NEXT: .LBB4_2: # %__load_and_broadcast_32.exit1249
-; CHECK-NEXT: retq
+; X86-LABEL: funcE:
+; X86: # %bb.0: # %allocas
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: testb %al, %al
+; X86-NEXT: # implicit-def: $ymm0
+; X86-NEXT: jne .LBB4_2
+; X86-NEXT: # %bb.1: # %load.i1247
+; X86-NEXT: pushl %ebp
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: andl $-32, %esp
+; X86-NEXT: subl $1312, %esp # imm = 0x520
+; X86-NEXT: vbroadcastss {{[0-9]+}}(%esp), %ymm0
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: .LBB4_2: # %__load_and_broadcast_32.exit1249
+; X86-NEXT: retl
+;
+; X64-LABEL: funcE:
+; X64: # %bb.0: # %allocas
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: testb %al, %al
+; X64-NEXT: # implicit-def: $ymm0
+; X64-NEXT: jne .LBB4_2
+; X64-NEXT: # %bb.1: # %load.i1247
+; X64-NEXT: pushq %rbp
+; X64-NEXT: movq %rsp, %rbp
+; X64-NEXT: andq $-32, %rsp
+; X64-NEXT: subq $1312, %rsp # imm = 0x520
+; X64-NEXT: vbroadcastss {{[0-9]+}}(%rsp), %ymm0
+; X64-NEXT: movq %rbp, %rsp
+; X64-NEXT: popq %rbp
+; X64-NEXT: .LBB4_2: # %__load_and_broadcast_32.exit1249
+; X64-NEXT: retq
allocas:
%udx495 = alloca [18 x [18 x float]], align 32
br label %for_test505.preheader
}
define <8 x float> @funcF(i32 %val) nounwind {
-; CHECK-LABEL: funcF:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vmovd %edi, %xmm0
-; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,0]
-; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; CHECK-NEXT: retq
+; X86-LABEL: funcF:
+; X86: # %bb.0:
+; X86-NEXT: vbroadcastss {{[0-9]+}}(%esp), %ymm0
+; X86-NEXT: retl
+;
+; X64-LABEL: funcF:
+; X64: # %bb.0:
+; X64-NEXT: vmovd %edi, %xmm0
+; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,0]
+; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; X64-NEXT: retq
%ret6 = insertelement <8 x i32> undef, i32 %val, i32 6
%ret7 = insertelement <8 x i32> %ret6, i32 %val, i32 7
%tmp = bitcast <8 x i32> %ret7 to <8 x float>
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,0,0]
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; CHECK-NEXT: retq
+; CHECK-NEXT: ret{{[l|q]}}
entry:
%shuffle = shufflevector <8 x float> %a, <8 x float> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
ret <8 x float> %shuffle
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,1,1,1,5,5,5,5]
; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; CHECK-NEXT: retq
+; CHECK-NEXT: ret{{[l|q]}}
entry:
%shuffle = shufflevector <8 x float> %a, <8 x float> undef, <8 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
ret <8 x float> %shuffle
}
define <2 x double> @splat_load_2f64_11(<2 x double>* %ptr) {
-; CHECK-LABEL: splat_load_2f64_11:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
-; CHECK-NEXT: retq
+; X86-LABEL: splat_load_2f64_11:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
+; X86-NEXT: retl
+;
+; X64-LABEL: splat_load_2f64_11:
+; X64: # %bb.0:
+; X64-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
+; X64-NEXT: retq
%x = load <2 x double>, <2 x double>* %ptr
%x1 = shufflevector <2 x double> %x, <2 x double> undef, <2 x i32> <i32 1, i32 1>
ret <2 x double> %x1
}
define <4 x double> @splat_load_4f64_2222(<4 x double>* %ptr) {
-; CHECK-LABEL: splat_load_4f64_2222:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vbroadcastsd 16(%rdi), %ymm0
-; CHECK-NEXT: retq
+; X86-LABEL: splat_load_4f64_2222:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: vbroadcastsd 16(%eax), %ymm0
+; X86-NEXT: retl
+;
+; X64-LABEL: splat_load_4f64_2222:
+; X64: # %bb.0:
+; X64-NEXT: vbroadcastsd 16(%rdi), %ymm0
+; X64-NEXT: retq
%x = load <4 x double>, <4 x double>* %ptr
%x1 = shufflevector <4 x double> %x, <4 x double> undef, <4 x i32> <i32 2, i32 2, i32 2, i32 2>
ret <4 x double> %x1
}
define <4 x float> @splat_load_4f32_0000(<4 x float>* %ptr) {
-; CHECK-LABEL: splat_load_4f32_0000:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vbroadcastss (%rdi), %xmm0
-; CHECK-NEXT: retq
+; X86-LABEL: splat_load_4f32_0000:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: vbroadcastss (%eax), %xmm0
+; X86-NEXT: retl
+;
+; X64-LABEL: splat_load_4f32_0000:
+; X64: # %bb.0:
+; X64-NEXT: vbroadcastss (%rdi), %xmm0
+; X64-NEXT: retq
%x = load <4 x float>, <4 x float>* %ptr
%x1 = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
ret <4 x float> %x1
}
define <8 x float> @splat_load_8f32_77777777(<8 x float>* %ptr) {
-; CHECK-LABEL: splat_load_8f32_77777777:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vbroadcastss 28(%rdi), %ymm0
-; CHECK-NEXT: retq
+; X86-LABEL: splat_load_8f32_77777777:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: vbroadcastss 28(%eax), %ymm0
+; X86-NEXT: retl
+;
+; X64-LABEL: splat_load_8f32_77777777:
+; X64: # %bb.0:
+; X64-NEXT: vbroadcastss 28(%rdi), %ymm0
+; X64-NEXT: retq
%x = load <8 x float>, <8 x float>* %ptr
%x1 = shufflevector <8 x float> %x, <8 x float> undef, <8 x i32> <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
ret <8 x float> %x1