define <4 x i32> @sext_inc_vec(<4 x i1> %x) nounwind {
; CHECK-LABEL: sext_inc_vec:
; CHECK: # BB#0:
-; CHECK-NEXT: vbroadcastss {{.*}}(%rip), %xmm1
+; CHECK-NEXT: vbroadcastss {{.*#+}} xmm1 = [1,1,1,1]
; CHECK-NEXT: vandnps %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%ext = sext <4 x i1> %x to <4 x i32>
; CHECK-LABEL: cmpgt_sext_inc_vec:
; CHECK: # BB#0:
; CHECK-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
-; CHECK-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
+; CHECK-NEXT: vpbroadcastd {{.*#+}} xmm1 = [1,1,1,1]
; CHECK-NEXT: vpandn %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%cmp = icmp sgt <4 x i32> %x, %y
; CHECK-LABEL: cmpgt_sext_inc_vec256:
; CHECK: # BB#0:
; CHECK-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
-; CHECK-NEXT: vpbroadcastq {{.*}}(%rip), %ymm1
+; CHECK-NEXT: vpbroadcastq {{.*#+}} ymm1 = [1,1,1,1]
; CHECK-NEXT: vpandn %ymm1, %ymm0, %ymm0
; CHECK-NEXT: retq
%cmp = icmp sgt <4 x i64> %x, %y
; CHECK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpxor %xmm2, %xmm1, %xmm1
; CHECK-NEXT: vpandn %xmm1, %xmm0, %xmm0
-; CHECK-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
+; CHECK-NEXT: vpbroadcastd {{.*#+}} xmm1 = [1,1,1,1]
; CHECK-NEXT: vpandn %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%cmp1 = icmp ne <4 x i32> %a, %b
;
; AVX-LABEL: mul_v4i32c:
; AVX: # BB#0: # %entry
-; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
+; AVX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [117,117,117,117]
; AVX-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
;
; AVX-LABEL: mul_v8i32c:
; AVX: # BB#0: # %entry
-; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %ymm1
+; AVX-NEXT: vpbroadcastd {{.*#+}} ymm1 = [117,117,117,117,117,117,117,117]
; AVX-NEXT: vpmulld %ymm1, %ymm0, %ymm0
; AVX-NEXT: retq
entry:
;
; AVX-LABEL: mul_v4i64c:
; AVX: # BB#0: # %entry
-; AVX-NEXT: vpbroadcastq {{.*}}(%rip), %ymm1
+; AVX-NEXT: vpbroadcastq {{.*#+}} ymm1 = [117,117,117,117]
; AVX-NEXT: vpmuludq %ymm1, %ymm0, %ymm2
; AVX-NEXT: vpsrlq $32, %ymm0, %ymm0
; AVX-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
;
; AVX2-LABEL: gt_v4i32:
; AVX2: # BB#0:
-; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2
+; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
;
; AVX512-LABEL: gt_v4i32:
; AVX512: # BB#0:
-; AVX512-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2
+; AVX512-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; AVX512-NEXT: vpxor %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vpxor %xmm2, %xmm0, %xmm0
; AVX512-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
;
; AVX2-LABEL: lt_v4i32:
; AVX2: # BB#0:
-; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2
+; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
;
; AVX512-LABEL: lt_v4i32:
; AVX512: # BB#0:
-; AVX512-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2
+; AVX512-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; AVX512-NEXT: vpxor %xmm2, %xmm0, %xmm0
; AVX512-NEXT: vpxor %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
; AVX2-NEXT: vpslld $31, %xmm0, %xmm0
; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0
; AVX2-NEXT: movq (%rdi,%rsi,8), %rax
-; AVX2-NEXT: vbroadcastsd {{.*}}(%rip), %ymm1
-; AVX2-NEXT: vbroadcastsd {{.*}}(%rip), %ymm2
+; AVX2-NEXT: vbroadcastsd {{.*#+}} ymm1 = [-0.5,-0.5,-0.5,-0.5]
+; AVX2-NEXT: vbroadcastsd {{.*#+}} ymm2 = [0.5,0.5,0.5,0.5]
; AVX2-NEXT: vblendvpd %ymm0, %ymm1, %ymm2, %ymm0
; AVX2-NEXT: vmovupd %ymm0, (%rax)
; AVX2-NEXT: vzeroupper
;
; AVX2-LABEL: test3:
; AVX2: ## BB#0:
-; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm3
+; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1431655766,1431655766,1431655766,1431655766]
; AVX2-NEXT: vpshufd {{.*#+}} xmm4 = xmm3[1,1,3,3]
; AVX2-NEXT: vpshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
; AVX2-NEXT: vpmuldq %xmm4, %xmm5, %xmm4
; AVX2-NEXT: vpblendd {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2],xmm4[3]
; AVX2-NEXT: vpsrld $31, %xmm3, %xmm4
; AVX2-NEXT: vpaddd %xmm4, %xmm3, %xmm3
-; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm4
+; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm4 = [3,3,3,3]
; AVX2-NEXT: vpmulld %xmm4, %xmm3, %xmm3
; AVX2-NEXT: vpsubd %xmm3, %xmm0, %xmm0
; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3