define void @t(<4 x float> %A) {
; SSE-LABEL: t:
; SSE: # BB#0:
-; SSE-NEXT: xorps .LCPI0_0, %xmm0
+; SSE-NEXT: xorps {{\.LCPI.*}}, %xmm0
; SSE-NEXT: movaps %xmm0, 0
; SSE-NEXT: retl
;
; AVX-LABEL: t:
; AVX: # BB#0:
-; AVX-NEXT: vxorps .LCPI0_0, %xmm0, %xmm0
+; AVX-NEXT: vxorps {{\.LCPI.*}}, %xmm0, %xmm0
; AVX-NEXT: vmovaps %xmm0, 0
; AVX-NEXT: retl
%tmp1277 = fsub <4 x float> < float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00 >, %A
store <4 x float> %tmp30, <4 x float>* %d
ret void
}
+
+define <2 x i64> @andn_double_xor(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c) {
+; SSE-LABEL: andn_double_xor:
+; SSE: # BB#0:
+; SSE-NEXT: pcmpeqd %xmm3, %xmm3
+; SSE-NEXT: pxor %xmm3, %xmm0
+; SSE-NEXT: pxor %xmm2, %xmm1
+; SSE-NEXT: pand %xmm1, %xmm0
+; SSE-NEXT: retl
+;
+; AVX-LABEL: andn_double_xor:
+; AVX: # BB#0:
+; AVX-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
+; AVX-NEXT: vpxor %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retl
+ %1 = xor <2 x i64> %a, <i64 -1, i64 -1>
+ %2 = xor <2 x i64> %b, %c
+ %3 = and <2 x i64> %1, %2
+ ret <2 x i64> %3
+}
+