%res = fptoui <2 x float> %src to <2 x i32>
ret <2 x i32> %res
}
+
+define <32 x i8> @PR40146(<4 x i64> %x) {
+; CHECK-LABEL: PR40146:
+; CHECK: ## %bb.0:
+; CHECK-NEXT: vpextrd $2, %xmm0, %eax
+; CHECK-NEXT: movzbl %ah, %ecx
+; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; CHECK-NEXT: vpinsrb $0, %eax, %xmm1, %xmm2
+; CHECK-NEXT: vpinsrb $2, %ecx, %xmm2, %xmm2
+; CHECK-NEXT: movl %eax, %ecx
+; CHECK-NEXT: shrl $16, %ecx
+; CHECK-NEXT: vpinsrb $4, %ecx, %xmm2, %xmm2
+; CHECK-NEXT: vpextrd $3, %xmm0, %ecx
+; CHECK-NEXT: shrl $24, %eax
+; CHECK-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2
+; CHECK-NEXT: movzbl %ch, %eax
+; CHECK-NEXT: vpinsrb $8, %ecx, %xmm2, %xmm2
+; CHECK-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
+; CHECK-NEXT: movl %ecx, %eax
+; CHECK-NEXT: shrl $16, %eax
+; CHECK-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
+; CHECK-NEXT: vmovd %xmm0, %eax
+; CHECK-NEXT: shrl $24, %ecx
+; CHECK-NEXT: vpinsrb $14, %ecx, %xmm2, %xmm2
+; CHECK-NEXT: movzbl %ah, %ecx
+; CHECK-NEXT: vpinsrb $0, %eax, %xmm1, %xmm1
+; CHECK-NEXT: vpinsrb $2, %ecx, %xmm1, %xmm1
+; CHECK-NEXT: movl %eax, %ecx
+; CHECK-NEXT: shrl $16, %ecx
+; CHECK-NEXT: vpinsrb $4, %ecx, %xmm1, %xmm1
+; CHECK-NEXT: vpextrd $1, %xmm0, %ecx
+; CHECK-NEXT: shrl $24, %eax
+; CHECK-NEXT: vpinsrb $6, %eax, %xmm1, %xmm0
+; CHECK-NEXT: movzbl %ch, %eax
+; CHECK-NEXT: vpinsrb $8, %ecx, %xmm0, %xmm0
+; CHECK-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
+; CHECK-NEXT: movl %ecx, %eax
+; CHECK-NEXT: shrl $16, %eax
+; CHECK-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
+; CHECK-NEXT: shrl $24, %ecx
+; CHECK-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; CHECK-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; CHECK-NEXT: retl
+;
+; CHECK-WIDE-LABEL: PR40146:
+; CHECK-WIDE: ## %bb.0:
+; CHECK-WIDE-NEXT: vpextrd $2, %xmm0, %eax
+; CHECK-WIDE-NEXT: movzbl %ah, %ecx
+; CHECK-WIDE-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; CHECK-WIDE-NEXT: vpinsrb $0, %eax, %xmm1, %xmm2
+; CHECK-WIDE-NEXT: vpinsrb $2, %ecx, %xmm2, %xmm2
+; CHECK-WIDE-NEXT: movl %eax, %ecx
+; CHECK-WIDE-NEXT: shrl $16, %ecx
+; CHECK-WIDE-NEXT: vpinsrb $4, %ecx, %xmm2, %xmm2
+; CHECK-WIDE-NEXT: vpextrd $3, %xmm0, %ecx
+; CHECK-WIDE-NEXT: shrl $24, %eax
+; CHECK-WIDE-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2
+; CHECK-WIDE-NEXT: movzbl %ch, %eax
+; CHECK-WIDE-NEXT: vpinsrb $8, %ecx, %xmm2, %xmm2
+; CHECK-WIDE-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
+; CHECK-WIDE-NEXT: movl %ecx, %eax
+; CHECK-WIDE-NEXT: shrl $16, %eax
+; CHECK-WIDE-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
+; CHECK-WIDE-NEXT: vmovd %xmm0, %eax
+; CHECK-WIDE-NEXT: shrl $24, %ecx
+; CHECK-WIDE-NEXT: vpinsrb $14, %ecx, %xmm2, %xmm2
+; CHECK-WIDE-NEXT: movzbl %ah, %ecx
+; CHECK-WIDE-NEXT: vpinsrb $0, %eax, %xmm1, %xmm1
+; CHECK-WIDE-NEXT: vpinsrb $2, %ecx, %xmm1, %xmm1
+; CHECK-WIDE-NEXT: movl %eax, %ecx
+; CHECK-WIDE-NEXT: shrl $16, %ecx
+; CHECK-WIDE-NEXT: vpinsrb $4, %ecx, %xmm1, %xmm1
+; CHECK-WIDE-NEXT: vpextrd $1, %xmm0, %ecx
+; CHECK-WIDE-NEXT: shrl $24, %eax
+; CHECK-WIDE-NEXT: vpinsrb $6, %eax, %xmm1, %xmm0
+; CHECK-WIDE-NEXT: movzbl %ch, %eax
+; CHECK-WIDE-NEXT: vpinsrb $8, %ecx, %xmm0, %xmm0
+; CHECK-WIDE-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
+; CHECK-WIDE-NEXT: movl %ecx, %eax
+; CHECK-WIDE-NEXT: shrl $16, %eax
+; CHECK-WIDE-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
+; CHECK-WIDE-NEXT: shrl $24, %ecx
+; CHECK-WIDE-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; CHECK-WIDE-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; CHECK-WIDE-NEXT: retl
+ %perm = shufflevector <4 x i64> %x, <4 x i64> undef, <4 x i32> <i32 0, i32 undef, i32 1, i32 undef>
+ %t1 = bitcast <4 x i64> %perm to <32 x i8>
+ %t2 = shufflevector <32 x i8> %t1, <32 x i8> <i8 0, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 0, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef>, <32 x i32> <i32 0, i32 32, i32 1, i32 32, i32 2, i32 32, i32 3, i32 32, i32 4, i32 32, i32 5, i32 32, i32 6, i32 32, i32 7, i32 32, i32 16, i32 48, i32 17, i32 48, i32 18, i32 48, i32 19, i32 48, i32 20, i32 48, i32 21, i32 48, i32 22, i32 48, i32 23, i32 48>
+ ret <32 x i8> %t2
+}
+