Depth + 1))
return true;
+ // Attempt to avoid multi-use ops if we don't need anything from them.
+ if (!DemandedSrcBits.isAllOnesValue() ||
+ !DemandedSrcElts.isAllOnesValue()) {
+ if (SDValue DemandedSrc = SimplifyMultipleUseDemandedBits(
+ Src, DemandedSrcBits, DemandedSrcElts, TLO.DAG, Depth + 1)) {
+ SDValue NewOp =
+ TLO.DAG.getNode(Op.getOpcode(), dl, VT, DemandedSrc, Idx);
+ return TLO.CombineTo(Op, NewOp);
+ }
+ }
+
Known = Known2;
if (BitWidth > EltBitWidth)
Known = Known.zext(BitWidth, false /* => any extend */);
; CHECK-NEXT: mov v0.b[14], w8
; CHECK-NEXT: mov v0.b[15], w8
; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT: and v0.8b, v0.8b, v1.8b
-; CHECK-NEXT: umov w8, v0.b[1]
-; CHECK-NEXT: umov w9, v0.b[0]
+; CHECK-NEXT: and v1.8b, v0.8b, v1.8b
+; CHECK-NEXT: umov w8, v1.b[1]
+; CHECK-NEXT: umov w9, v1.b[0]
; CHECK-NEXT: and w8, w9, w8
-; CHECK-NEXT: umov w9, v0.b[2]
+; CHECK-NEXT: umov w9, v1.b[2]
; CHECK-NEXT: and w8, w8, w9
-; CHECK-NEXT: umov w9, v0.b[3]
+; CHECK-NEXT: umov w9, v1.b[3]
; CHECK-NEXT: and w8, w8, w9
-; CHECK-NEXT: umov w9, v0.b[4]
+; CHECK-NEXT: umov w9, v1.b[4]
; CHECK-NEXT: and w8, w8, w9
-; CHECK-NEXT: umov w9, v0.b[5]
+; CHECK-NEXT: umov w9, v1.b[5]
; CHECK-NEXT: and w8, w8, w9
; CHECK-NEXT: umov w9, v0.b[6]
; CHECK-NEXT: and w8, w8, w9
; CHECK-NEXT: mov w8, #-1
; CHECK-NEXT: mov v0.s[3], w8
; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT: and v0.8b, v0.8b, v1.8b
+; CHECK-NEXT: and v1.8b, v0.8b, v1.8b
; CHECK-NEXT: mov w8, v0.s[1]
-; CHECK-NEXT: fmov w9, s0
+; CHECK-NEXT: fmov w9, s1
; CHECK-NEXT: and w0, w9, w8
; CHECK-NEXT: ret
%b = call i32 @llvm.experimental.vector.reduce.and.v3i32(<3 x i32> %a)
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple armv7 %s -o - | FileCheck %s
define float @f(<4 x i16>* nocapture %in) {
ret <4 x i32> %13
}
+; FIXME: The vmov.u + sxt can convert to a vmov.s
define float @i(<4 x i16>* nocapture %in) {
- ; FIXME: The vmov.u + sxt can convert to a vmov.s
; CHECK-LABEL: i:
; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r0]
ret float %3
}
-define float @k(<8 x i8>* nocapture %in) {
; FIXME: The vmov.u + sxt can convert to a vmov.s
+define float @k(<8 x i8>* nocapture %in) {
; CHECK-LABEL: k:
; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r0]
}
define float @KnownUpperZero(<4 x i16> %v) {
-; FIXME: uxtb are not required
; CHECK-LABEL: KnownUpperZero:
; CHECK: @ %bb.0:
-; CHECK-NEXT: vmov.i16 d16, #0x3
-; CHECK-NEXT: vmov d17, r0, r1
-; CHECK-NEXT: vand d16, d17, d16
+; CHECK-NEXT: vmov d16, r0, r1
; CHECK-NEXT: vmov.u16 r0, d16[0]
; CHECK-NEXT: vmov.u16 r1, d16[3]
-; CHECK-NEXT: uxtb r0, r0
+; CHECK-NEXT: and r0, r0, #3
; CHECK-NEXT: vmov s0, r0
-; CHECK-NEXT: uxtb r0, r1
+; CHECK-NEXT: and r0, r1, #3
; CHECK-NEXT: vmov s2, r0
; CHECK-NEXT: vcvt.f32.s32 s0, s0
; CHECK-NEXT: vcvt.f32.s32 s2, s2
; NO-SIMD128-NOT: i8x16
; SIMD128-NEXT: .functype shl_vec_v16i8 (v128, v128) -> (v128){{$}}
; SIMD128-NEXT: i8x16.extract_lane_s $push[[L0:[0-9]+]]=, $0, 0{{$}}
+; SIMD128-NEXT: i8x16.extract_lane_s $push[[L1:[0-9]+]]=, $1, 0{{$}}
; SIMD128-NEXT: i32.const $push[[M0:[0-9]+]]=, 7{{$}}
-; SIMD128-NEXT: i8x16.splat $push[[M1:[0-9]+]]=, $pop[[M0]]{{$}}
-; SIMD128-NEXT: v128.and $push[[M2:[0-9]+]]=, $1, $pop[[M1]]{{$}}
-; SIMD128-NEXT: local.tee $push[[M:[0-9]+]]=, $1=, $pop[[M2]]{{$}}
-; SIMD128-NEXT: i8x16.extract_lane_u $push[[L1:[0-9]+]]=, $pop[[M]], 0{{$}}
-; SIMD128-NEXT: i32.shl $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
-; SIMD128-NEXT: i8x16.splat $push[[L3:[0-9]+]]=, $pop[[L2]]{{$}}
+; SIMD128-NEXT: i32.and $push[[M1:[0-9]+]]=, $pop[[L1]], $pop[[M0]]{{$}}
+; SIMD128-NEXT: i32.shl $push[[M2:[0-9]+]]=, $pop[[L0]], $pop[[M1]]
+; SIMD128-NEXT: i8x16.splat $push[[M3:[0-9]+]]=, $pop[[M2]]
; Skip 14 lanes
; SIMD128: i8x16.extract_lane_s $push[[L4:[0-9]+]]=, $0, 15{{$}}
-; SIMD128-NEXT: i8x16.extract_lane_u $push[[L5:[0-9]+]]=, $1, 15{{$}}
-; SIMD128-NEXT: i32.shl $push[[L6:[0-9]+]]=, $pop[[L4]], $pop[[L5]]{{$}}
-; SIMD128-NEXT: i8x16.replace_lane $push[[R:[0-9]+]]=, $pop[[L7:[0-9]+]], 15, $pop[[L6]]{{$}}
+; SIMD128-NEXT: i8x16.extract_lane_s $push[[L5:[0-9]+]]=, $1, 15{{$}}
+; SIMD128-NEXT: i32.const $push[[M4:[0-9]+]]=, 7{{$}}
+; SIMD128-NEXT: i32.and $push[[M5:[0-9]+]]=, $pop[[L5]], $pop[[M4]]{{$}}
+; SIMD128-NEXT: i32.shl $push[[M6:[0-9]+]]=, $pop[[L4]], $pop[[M5]]{{$}}
+; SIMD128-NEXT: i8x16.replace_lane $push[[R:[0-9]+]]=, $pop[[M7:[0-9]+]], 15, $pop[[M6]]{{$}}
; SIMD128-NEXT: return $pop[[R]]{{$}}
define <16 x i8> @shl_vec_v16i8(<16 x i8> %v, <16 x i8> %x) {
%a = shl <16 x i8> %v, %x
; NO-SIMD128-NOT: i8x16
; SIMD128-NEXT: .functype shr_s_vec_v16i8 (v128, v128) -> (v128){{$}}
; SIMD128-NEXT: i8x16.extract_lane_s $push[[L0:[0-9]+]]=, $0, 0{{$}}
+; SIMD128-NEXT: i8x16.extract_lane_s $push[[L1:[0-9]+]]=, $1, 0{{$}}
; SIMD128-NEXT: i32.const $push[[M0:[0-9]+]]=, 7{{$}}
-; SIMD128-NEXT: i8x16.splat $push[[M1:[0-9]+]]=, $pop[[M0]]{{$}}
-; SIMD128-NEXT: v128.and $push[[M2:[0-9]+]]=, $1, $pop[[M1]]{{$}}
-; SIMD128-NEXT: local.tee $push[[M:[0-9]+]]=, $1=, $pop[[M2]]{{$}}
-; SIMD128-NEXT: i8x16.extract_lane_u $push[[L1:[0-9]+]]=, $pop[[M]], 0{{$}}
-; SIMD128-NEXT: i32.shr_s $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
-; SIMD128-NEXT: i8x16.splat $push[[L3:[0-9]+]]=, $pop[[L2]]{{$}}
+; SIMD128-NEXT: i32.and $push[[M1:[0-9]+]]=, $pop[[L1]], $pop[[M0]]{{$}}
+; SIMD128-NEXT: i32.shr_s $push[[M2:[0-9]+]]=, $pop[[L0]], $pop[[M1]]
+; SIMD128-NEXT: i8x16.splat $push[[M3:[0-9]+]]=, $pop[[M2]]
; Skip 14 lanes
-; SIMD128: i8x16.extract_lane_s $push[[L0:[0-9]+]]=, $0, 15{{$}}
-; SIMD128-NEXT: i8x16.extract_lane_u $push[[L1:[0-9]+]]=, $1, 15{{$}}
-; SIMD128-NEXT: i32.shr_s $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
-; SIMD128-NEXT: i8x16.replace_lane $push[[R:[0-9]+]]=, $pop{{[0-9]+}}, 15, $pop[[L2]]{{$}}
+; SIMD128: i8x16.extract_lane_s $push[[L4:[0-9]+]]=, $0, 15{{$}}
+; SIMD128-NEXT: i8x16.extract_lane_s $push[[L5:[0-9]+]]=, $1, 15{{$}}
+; SIMD128-NEXT: i32.const $push[[M4:[0-9]+]]=, 7{{$}}
+; SIMD128-NEXT: i32.and $push[[M5:[0-9]+]]=, $pop[[L5]], $pop[[M4]]{{$}}
+; SIMD128-NEXT: i32.shr_s $push[[M6:[0-9]+]]=, $pop[[L4]], $pop[[M5]]{{$}}
+; SIMD128-NEXT: i8x16.replace_lane $push[[R:[0-9]+]]=, $pop[[M7:[0-9]+]], 15, $pop[[M6]]{{$}}
; SIMD128-NEXT: return $pop[[R]]{{$}}
define <16 x i8> @shr_s_vec_v16i8(<16 x i8> %v, <16 x i8> %x) {
%a = ashr <16 x i8> %v, %x
; NO-SIMD128-NOT: i8x16
; SIMD128-NEXT: .functype shr_u_vec_v16i8 (v128, v128) -> (v128){{$}}
; SIMD128-NEXT: i8x16.extract_lane_u $push[[L0:[0-9]+]]=, $0, 0{{$}}
+; SIMD128-NEXT: i8x16.extract_lane_s $push[[L1:[0-9]+]]=, $1, 0{{$}}
; SIMD128-NEXT: i32.const $push[[M0:[0-9]+]]=, 7{{$}}
-; SIMD128-NEXT: i8x16.splat $push[[M1:[0-9]+]]=, $pop[[M0]]{{$}}
-; SIMD128-NEXT: v128.and $push[[M2:[0-9]+]]=, $1, $pop[[M1]]{{$}}
-; SIMD128-NEXT: local.tee $push[[M:[0-9]+]]=, $1=, $pop[[M2]]{{$}}
-; SIMD128-NEXT: i8x16.extract_lane_u $push[[L1:[0-9]+]]=, $pop[[M]], 0{{$}}
-; SIMD128-NEXT: i32.shr_u $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
-; SIMD128-NEXT: i8x16.splat $push[[L3:[0-9]+]]=, $pop[[L2]]{{$}}
+; SIMD128-NEXT: i32.and $push[[M1:[0-9]+]]=, $pop[[L1]], $pop[[M0]]{{$}}
+; SIMD128-NEXT: i32.shr_u $push[[M2:[0-9]+]]=, $pop[[L0]], $pop[[M1]]
+; SIMD128-NEXT: i8x16.splat $push[[M3:[0-9]+]]=, $pop[[M2]]
; Skip 14 lanes
; SIMD128: i8x16.extract_lane_u $push[[L4:[0-9]+]]=, $0, 15{{$}}
-; SIMD128-NEXT: i8x16.extract_lane_u $push[[L5:[0-9]+]]=, $1, 15{{$}}
-; SIMD128-NEXT: i32.shr_u $push[[L6:[0-9]+]]=, $pop[[L4]], $pop[[L5]]{{$}}
-; SIMD128-NEXT: i8x16.replace_lane $push[[R:[0-9]+]]=, $pop[[L7:[0-9]+]], 15, $pop[[L6]]{{$}}
+; SIMD128-NEXT: i8x16.extract_lane_s $push[[L5:[0-9]+]]=, $1, 15{{$}}
+; SIMD128-NEXT: i32.const $push[[M4:[0-9]+]]=, 7{{$}}
+; SIMD128-NEXT: i32.and $push[[M5:[0-9]+]]=, $pop[[L5]], $pop[[M4]]{{$}}
+; SIMD128-NEXT: i32.shr_u $push[[M6:[0-9]+]]=, $pop[[L4]], $pop[[M5]]{{$}}
+; SIMD128-NEXT: i8x16.replace_lane $push[[R:[0-9]+]]=, $pop[[M7:[0-9]+]], 15, $pop[[M6]]{{$}}
; SIMD128-NEXT: return $pop[[R]]{{$}}
define <16 x i8> @shr_u_vec_v16i8(<16 x i8> %v, <16 x i8> %x) {
%a = lshr <16 x i8> %v, %x
; NO-SIMD128-NOT: i16x8
; SIMD128-NEXT: .functype shl_vec_v8i16 (v128, v128) -> (v128){{$}}
; SIMD128-NEXT: i16x8.extract_lane_s $push[[L0:[0-9]+]]=, $0, 0{{$}}
+; SIMD128-NEXT: i16x8.extract_lane_s $push[[L1:[0-9]+]]=, $1, 0{{$}}
; SIMD128-NEXT: i32.const $push[[M0:[0-9]+]]=, 15{{$}}
-; SIMD128-NEXT: i16x8.splat $push[[M1:[0-9]+]]=, $pop[[M0]]{{$}}
-; SIMD128-NEXT: v128.and $push[[M2:[0-9]+]]=, $1, $pop[[M1]]{{$}}
-; SIMD128-NEXT: local.tee $push[[M:[0-9]+]]=, $1=, $pop[[M2]]{{$}}
-; SIMD128-NEXT: i16x8.extract_lane_u $push[[L1:[0-9]+]]=, $pop[[M]], 0{{$}}
-; SIMD128-NEXT: i32.shl $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
-; SIMD128-NEXT: i16x8.splat $push[[L3:[0-9]+]]=, $pop[[L2]]{{$}}
+; SIMD128-NEXT: i32.and $push[[M1:[0-9]+]]=, $pop[[L1]], $pop[[M0]]{{$}}
+; SIMD128-NEXT: i32.shl $push[[M2:[0-9]+]]=, $pop[[L0]], $pop[[M1]]{{$}}
+; SIMD128-NEXT: i16x8.splat $push[[M3:[0-9]+]]=, $pop[[M2]]{{$}}
; Skip 6 lanes
; SIMD128: i16x8.extract_lane_s $push[[L4:[0-9]+]]=, $0, 7{{$}}
-; SIMD128-NEXT: i16x8.extract_lane_u $push[[L5:[0-9]+]]=, $1, 7{{$}}
-; SIMD128-NEXT: i32.shl $push[[L6:[0-9]+]]=, $pop[[L4]], $pop[[L5]]{{$}}
-; SIMD128-NEXT: i16x8.replace_lane $push[[R:[0-9]+]]=, $pop[[L7:[0-9]+]], 7, $pop[[L6]]{{$}}
+; SIMD128-NEXT: i16x8.extract_lane_s $push[[L5:[0-9]+]]=, $1, 7{{$}}
+; SIMD128-NEXT: i32.const $push[[M4:[0-9]+]]=, 15{{$}}
+; SIMD128-NEXT: i32.and $push[[M5:[0-9]+]]=, $pop[[L5]], $pop[[M4]]{{$}}
+; SIMD128-NEXT: i32.shl $push[[M6:[0-9]+]]=, $pop[[L4]], $pop[[M5]]{{$}}
+; SIMD128-NEXT: i16x8.replace_lane $push[[R:[0-9]+]]=, $pop[[M7:[0-9]+]], 7, $pop[[M6]]{{$}}
; SIMD128-NEXT: return $pop[[R]]{{$}}
define <8 x i16> @shl_vec_v8i16(<8 x i16> %v, <8 x i16> %x) {
%a = shl <8 x i16> %v, %x
; NO-SIMD128-NOT: i16x8
; SIMD128-NEXT: .functype shr_s_vec_v8i16 (v128, v128) -> (v128){{$}}
; SIMD128-NEXT: i16x8.extract_lane_s $push[[L0:[0-9]+]]=, $0, 0{{$}}
+; SIMD128-NEXT: i16x8.extract_lane_s $push[[L1:[0-9]+]]=, $1, 0{{$}}
; SIMD128-NEXT: i32.const $push[[M0:[0-9]+]]=, 15{{$}}
-; SIMD128-NEXT: i16x8.splat $push[[M1:[0-9]+]]=, $pop[[M0]]{{$}}
-; SIMD128-NEXT: v128.and $push[[M2:[0-9]+]]=, $1, $pop[[M1]]{{$}}
-; SIMD128-NEXT: local.tee $push[[M:[0-9]+]]=, $1=, $pop[[M2]]{{$}}
-; SIMD128-NEXT: i16x8.extract_lane_u $push[[L1:[0-9]+]]=, $pop[[M]], 0{{$}}
-; SIMD128-NEXT: i32.shr_s $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
-; SIMD128-NEXT: i16x8.splat $push[[L3:[0-9]+]]=, $pop[[L2]]{{$}}
+; SIMD128-NEXT: i32.and $push[[M1:[0-9]+]]=, $pop[[L1]], $pop[[M0]]{{$}}
+; SIMD128-NEXT: i32.shr_s $push[[M2:[0-9]+]]=, $pop[[L0]], $pop[[M1]]{{$}}
+; SIMD128-NEXT: i16x8.splat $push[[M3:[0-9]+]]=, $pop[[M2]]{{$}}
; Skip 6 lanes
-; SIMD128: i16x8.extract_lane_s $push[[L0:[0-9]+]]=, $0, 7{{$}}
-; SIMD128-NEXT: i16x8.extract_lane_u $push[[L1:[0-9]+]]=, $1, 7{{$}}
-; SIMD128-NEXT: i32.shr_s $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
-; SIMD128-NEXT: i16x8.replace_lane $push[[R:[0-9]+]]=, $pop{{[0-9]+}}, 7, $pop[[L2]]{{$}}
+; SIMD128: i16x8.extract_lane_s $push[[L4:[0-9]+]]=, $0, 7{{$}}
+; SIMD128-NEXT: i16x8.extract_lane_s $push[[L5:[0-9]+]]=, $1, 7{{$}}
+; SIMD128-NEXT: i32.const $push[[M4:[0-9]+]]=, 15{{$}}
+; SIMD128-NEXT: i32.and $push[[M5:[0-9]+]]=, $pop[[L5]], $pop[[M4]]{{$}}
+; SIMD128-NEXT: i32.shr_s $push[[M6:[0-9]+]]=, $pop[[L4]], $pop[[M5]]{{$}}
+; SIMD128-NEXT: i16x8.replace_lane $push[[R:[0-9]+]]=, $pop[[M7:[0-9]+]], 7, $pop[[M6]]{{$}}
; SIMD128-NEXT: return $pop[[R]]{{$}}
define <8 x i16> @shr_s_vec_v8i16(<8 x i16> %v, <8 x i16> %x) {
%a = ashr <8 x i16> %v, %x
; NO-SIMD128-NOT: i16x8
; SIMD128-NEXT: .functype shr_u_vec_v8i16 (v128, v128) -> (v128){{$}}
; SIMD128-NEXT: i16x8.extract_lane_u $push[[L0:[0-9]+]]=, $0, 0{{$}}
+; SIMD128-NEXT: i16x8.extract_lane_s $push[[L1:[0-9]+]]=, $1, 0{{$}}
; SIMD128-NEXT: i32.const $push[[M0:[0-9]+]]=, 15{{$}}
-; SIMD128-NEXT: i16x8.splat $push[[M1:[0-9]+]]=, $pop[[M0]]{{$}}
-; SIMD128-NEXT: v128.and $push[[M2:[0-9]+]]=, $1, $pop[[M1]]{{$}}
-; SIMD128-NEXT: local.tee $push[[M:[0-9]+]]=, $1=, $pop[[M2]]{{$}}
-; SIMD128-NEXT: i16x8.extract_lane_u $push[[L1:[0-9]+]]=, $pop[[M]], 0{{$}}
-; SIMD128-NEXT: i32.shr_u $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
-; SIMD128-NEXT: i16x8.splat $push[[L3:[0-9]+]]=, $pop[[L2]]{{$}}
+; SIMD128-NEXT: i32.and $push[[M1:[0-9]+]]=, $pop[[L1]], $pop[[M0]]{{$}}
+; SIMD128-NEXT: i32.shr_u $push[[M2:[0-9]+]]=, $pop[[L0]], $pop[[M1]]{{$}}
+; SIMD128-NEXT: i16x8.splat $push[[M3:[0-9]+]]=, $pop[[M2]]{{$}}
; Skip 6 lanes
; SIMD128: i16x8.extract_lane_u $push[[L4:[0-9]+]]=, $0, 7{{$}}
-; SIMD128-NEXT: i16x8.extract_lane_u $push[[L5:[0-9]+]]=, $1, 7{{$}}
-; SIMD128-NEXT: i32.shr_u $push[[L6:[0-9]+]]=, $pop[[L4]], $pop[[L5]]{{$}}
-; SIMD128-NEXT: i16x8.replace_lane $push[[R:[0-9]+]]=, $pop[[L7:[0-9]+]], 7, $pop[[L6]]{{$}}
+; SIMD128-NEXT: i16x8.extract_lane_s $push[[L5:[0-9]+]]=, $1, 7{{$}}
+; SIMD128-NEXT: i32.const $push[[M4:[0-9]+]]=, 15{{$}}
+; SIMD128-NEXT: i32.and $push[[M5:[0-9]+]]=, $pop[[L5]], $pop[[M4]]{{$}}
+; SIMD128-NEXT: i32.shr_u $push[[M6:[0-9]+]]=, $pop[[L4]], $pop[[M5]]{{$}}
+; SIMD128-NEXT: i16x8.replace_lane $push[[R:[0-9]+]]=, $pop[[M7:[0-9]+]], 7, $pop[[M6]]{{$}}
; SIMD128-NEXT: return $pop[[R]]{{$}}
define <8 x i16> @shr_u_vec_v8i16(<8 x i16> %v, <8 x i16> %x) {
%a = lshr <8 x i16> %v, %x
; CHECK: .functype foo (i32) -> ()
; CHECK-NEXT: i32.load8_u 0
; CHECK-NEXT: i32x4.splat
-; CHECK-NEXT: i32.load8_u 1
-; CHECK-NEXT: i32x4.replace_lane 1
-; CHECK-NEXT: i32.const 2
-; CHECK-NEXT: i32.add
-; CHECK-NEXT: i32.load8_u 0
-; CHECK-NEXT: i32x4.replace_lane 2
-; CHECK-NEXT: i32.const 3
-; CHECK-NEXT: i32.add
-; CHECK-NEXT: i32.load8_u 0
-; CHECK-NEXT: i32x4.replace_lane 3
; CHECK-NEXT: local.tee
; CHECK-NEXT: i8x16.extract_lane_s 0
; CHECK-NEXT: f64.convert_i32_s
; CHECK-NEXT: f64.add
; CHECK-NEXT: f32.demote_f64
; CHECK-NEXT: f32x4.splat
+; CHECK-NEXT: i32.load8_u 1
+; CHECK-NEXT: i32x4.replace_lane 1
+; CHECK-NEXT: local.tee
; CHECK-NEXT: i8x16.extract_lane_s 4
; CHECK-NEXT: f64.convert_i32_s
; CHECK-NEXT: f64.const 0x0p0
; CHECK-NEXT: f64.add
; CHECK-NEXT: f32.demote_f64
; CHECK-NEXT: f32x4.replace_lane 1
+; CHECK-NEXT: i32.const 2
+; CHECK-NEXT: i32.add
+; CHECK-NEXT: i32.load8_u 0
+; CHECK-NEXT: i32x4.replace_lane 2
+; CHECK-NEXT: local.tee
; CHECK-NEXT: i8x16.extract_lane_s 8
; CHECK-NEXT: f64.convert_i32_s
; CHECK-NEXT: f64.const 0x0p0
; CHECK-NEXT: f64.add
; CHECK-NEXT: f32.demote_f64
; CHECK-NEXT: f32x4.replace_lane 2
+; CHECK-NEXT: i32.const 3
+; CHECK-NEXT: i32.add
+; CHECK-NEXT: i32.load8_u 0
+; CHECK-NEXT: i32x4.replace_lane 3
; CHECK-NEXT: i8x16.extract_lane_s 12
; CHECK-NEXT: f64.convert_i32_s
; CHECK-NEXT: f64.const 0x0p0
define <3 x i16> @zext_i8(<3 x i8>) {
; SSE3-LABEL: zext_i8:
; SSE3: # %bb.0:
+; SSE3-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; SSE3-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
; SSE3-NEXT: movzbl {{[0-9]+}}(%esp), %edx
-; SSE3-NEXT: movzbl {{[0-9]+}}(%esp), %eax
-; SSE3-NEXT: movd %eax, %xmm0
-; SSE3-NEXT: pinsrw $1, %edx, %xmm0
-; SSE3-NEXT: pinsrw $2, %ecx, %xmm0
-; SSE3-NEXT: movd %xmm0, %eax
; SSE3-NEXT: # kill: def $ax killed $ax killed $eax
; SSE3-NEXT: # kill: def $dx killed $dx killed $edx
; SSE3-NEXT: # kill: def $cx killed $cx killed $ecx
; SSE2-NEXT: psrad $8, %xmm1
; SSE2-NEXT: pcmpeqd %xmm4, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[3,1,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm4[2,3,0,1]
; SSE2-NEXT: psrad $31, %xmm4
; SSE2-NEXT: pcmpeqd %xmm3, %xmm4
; SSE2-NEXT: pcmpeqd %xmm3, %xmm3
; SSE2-NEXT: por %xmm4, %xmm1
; SSE2-NEXT: movd %xmm0, %eax
; SSE2-NEXT: movw %ax, (%rdi)
-; SSE2-NEXT: movd %xmm2, %ecx
-; SSE2-NEXT: movw %cx, 3(%rdi)
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSE2-NEXT: movd %xmm0, %ecx
+; SSE2-NEXT: movw %cx, 6(%rdi)
+; SSE2-NEXT: movd %xmm2, %edx
+; SSE2-NEXT: movw %dx, 3(%rdi)
; SSE2-NEXT: shrl $16, %eax
; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: shrl $16, %ecx
-; SSE2-NEXT: movb %cl, 5(%rdi)
+; SSE2-NEXT: movb %cl, 8(%rdi)
+; SSE2-NEXT: shrl $16, %edx
+; SSE2-NEXT: movb %dl, 5(%rdi)
; SSE2-NEXT: movd %xmm5, %eax
; SSE2-NEXT: movw %ax, 9(%rdi)
-; SSE2-NEXT: movd %xmm6, %ecx
-; SSE2-NEXT: movw %cx, 6(%rdi)
; SSE2-NEXT: shrl $16, %eax
; SSE2-NEXT: movb %al, 11(%rdi)
-; SSE2-NEXT: shrl $16, %ecx
-; SSE2-NEXT: movb %cl, 8(%rdi)
; SSE2-NEXT: movdqa %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-NEXT: psrad $8, %xmm1
; SSSE3-NEXT: pcmpeqd %xmm4, %xmm1
; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm4[3,1,2,3]
-; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm4[2,3,0,1]
; SSSE3-NEXT: psrad $31, %xmm4
; SSSE3-NEXT: pcmpeqd %xmm3, %xmm4
; SSSE3-NEXT: pcmpeqd %xmm3, %xmm3
; SSSE3-NEXT: por %xmm4, %xmm1
; SSSE3-NEXT: movd %xmm0, %eax
; SSSE3-NEXT: movw %ax, (%rdi)
-; SSSE3-NEXT: movd %xmm2, %ecx
-; SSSE3-NEXT: movw %cx, 3(%rdi)
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSSE3-NEXT: movd %xmm0, %ecx
+; SSSE3-NEXT: movw %cx, 6(%rdi)
+; SSSE3-NEXT: movd %xmm2, %edx
+; SSSE3-NEXT: movw %dx, 3(%rdi)
; SSSE3-NEXT: shrl $16, %eax
; SSSE3-NEXT: movb %al, 2(%rdi)
; SSSE3-NEXT: shrl $16, %ecx
-; SSSE3-NEXT: movb %cl, 5(%rdi)
+; SSSE3-NEXT: movb %cl, 8(%rdi)
+; SSSE3-NEXT: shrl $16, %edx
+; SSSE3-NEXT: movb %dl, 5(%rdi)
; SSSE3-NEXT: movd %xmm5, %eax
; SSSE3-NEXT: movw %ax, 9(%rdi)
-; SSSE3-NEXT: movd %xmm6, %ecx
-; SSSE3-NEXT: movw %cx, 6(%rdi)
; SSSE3-NEXT: shrl $16, %eax
; SSSE3-NEXT: movb %al, 11(%rdi)
-; SSSE3-NEXT: shrl $16, %ecx
-; SSSE3-NEXT: movb %cl, 8(%rdi)
; SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
; SSE2-NEXT: pxor %xmm4, %xmm4
; SSE2-NEXT: pcmpeqd %xmm4, %xmm3
+; SSE2-NEXT: pcmpeqd %xmm5, %xmm5
+; SSE2-NEXT: pxor %xmm5, %xmm3
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,2,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm2[0,2,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1]
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm1[3,1,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm1[2,3,0,1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm2[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm1[3,1,2,3]
; SSE2-NEXT: psrld $24, %xmm1
; SSE2-NEXT: pcmpeqd %xmm4, %xmm1
-; SSE2-NEXT: pcmpeqd %xmm4, %xmm4
-; SSE2-NEXT: pxor %xmm4, %xmm3
-; SSE2-NEXT: pxor %xmm4, %xmm1
+; SSE2-NEXT: pxor %xmm5, %xmm1
; SSE2-NEXT: por %xmm3, %xmm1
; SSE2-NEXT: movd %xmm0, %eax
; SSE2-NEXT: movw %ax, (%rdi)
-; SSE2-NEXT: movd %xmm2, %ecx
-; SSE2-NEXT: movw %cx, 3(%rdi)
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSE2-NEXT: movd %xmm0, %ecx
+; SSE2-NEXT: movw %cx, 6(%rdi)
+; SSE2-NEXT: movd %xmm2, %edx
+; SSE2-NEXT: movw %dx, 3(%rdi)
; SSE2-NEXT: shrl $16, %eax
; SSE2-NEXT: movb %al, 2(%rdi)
; SSE2-NEXT: shrl $16, %ecx
-; SSE2-NEXT: movb %cl, 5(%rdi)
-; SSE2-NEXT: movd %xmm5, %eax
+; SSE2-NEXT: movb %cl, 8(%rdi)
+; SSE2-NEXT: shrl $16, %edx
+; SSE2-NEXT: movb %dl, 5(%rdi)
+; SSE2-NEXT: movd %xmm6, %eax
; SSE2-NEXT: movw %ax, 9(%rdi)
-; SSE2-NEXT: movd %xmm6, %ecx
-; SSE2-NEXT: movw %cx, 6(%rdi)
; SSE2-NEXT: shrl $16, %eax
; SSE2-NEXT: movb %al, 11(%rdi)
-; SSE2-NEXT: shrl $16, %ecx
-; SSE2-NEXT: movb %cl, 8(%rdi)
; SSE2-NEXT: movdqa %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
; SSSE3-NEXT: pxor %xmm4, %xmm4
; SSSE3-NEXT: pcmpeqd %xmm4, %xmm3
+; SSSE3-NEXT: pcmpeqd %xmm5, %xmm5
+; SSSE3-NEXT: pxor %xmm5, %xmm3
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,2,2,3]
-; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm2[0,2,2,3]
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1]
-; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm1[3,1,2,3]
-; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm1[2,3,0,1]
+; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm2[0,2,2,3]
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1]
+; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm1[3,1,2,3]
; SSSE3-NEXT: psrld $24, %xmm1
; SSSE3-NEXT: pcmpeqd %xmm4, %xmm1
-; SSSE3-NEXT: pcmpeqd %xmm4, %xmm4
-; SSSE3-NEXT: pxor %xmm4, %xmm3
-; SSSE3-NEXT: pxor %xmm4, %xmm1
+; SSSE3-NEXT: pxor %xmm5, %xmm1
; SSSE3-NEXT: por %xmm3, %xmm1
; SSSE3-NEXT: movd %xmm0, %eax
; SSSE3-NEXT: movw %ax, (%rdi)
-; SSSE3-NEXT: movd %xmm2, %ecx
-; SSSE3-NEXT: movw %cx, 3(%rdi)
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSSE3-NEXT: movd %xmm0, %ecx
+; SSSE3-NEXT: movw %cx, 6(%rdi)
+; SSSE3-NEXT: movd %xmm2, %edx
+; SSSE3-NEXT: movw %dx, 3(%rdi)
; SSSE3-NEXT: shrl $16, %eax
; SSSE3-NEXT: movb %al, 2(%rdi)
; SSSE3-NEXT: shrl $16, %ecx
-; SSSE3-NEXT: movb %cl, 5(%rdi)
-; SSSE3-NEXT: movd %xmm5, %eax
+; SSSE3-NEXT: movb %cl, 8(%rdi)
+; SSSE3-NEXT: shrl $16, %edx
+; SSSE3-NEXT: movb %dl, 5(%rdi)
+; SSSE3-NEXT: movd %xmm6, %eax
; SSSE3-NEXT: movw %ax, 9(%rdi)
-; SSSE3-NEXT: movd %xmm6, %ecx
-; SSSE3-NEXT: movw %cx, 6(%rdi)
; SSSE3-NEXT: shrl $16, %eax
; SSSE3-NEXT: movb %al, 11(%rdi)
-; SSSE3-NEXT: shrl $16, %ecx
-; SSSE3-NEXT: movb %cl, 8(%rdi)
; SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: pmullw %xmm2, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: pand %xmm1, %xmm2
-; SSE2-NEXT: packuswb %xmm3, %xmm2
-; SSE2-NEXT: psrlw $8, %xmm2
-; SSE2-NEXT: pmullw %xmm0, %xmm2
-; SSE2-NEXT: pand %xmm1, %xmm2
-; SSE2-NEXT: packuswb %xmm0, %xmm2
-; SSE2-NEXT: movd %xmm2, %eax
+; SSE2-NEXT: pand %xmm0, %xmm1
+; SSE2-NEXT: packuswb %xmm3, %xmm1
+; SSE2-NEXT: psrlw $8, %xmm1
+; SSE2-NEXT: pmullw %xmm0, %xmm1
+; SSE2-NEXT: movd %xmm1, %eax
; SSE2-NEXT: # kill: def $al killed $al killed $eax
; SSE2-NEXT: retq
;
; SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: pmullw %xmm2, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: pand %xmm1, %xmm2
-; SSE2-NEXT: packuswb %xmm3, %xmm2
-; SSE2-NEXT: psrlw $8, %xmm2
-; SSE2-NEXT: pmullw %xmm0, %xmm2
-; SSE2-NEXT: pand %xmm1, %xmm2
-; SSE2-NEXT: packuswb %xmm0, %xmm2
-; SSE2-NEXT: movd %xmm2, %eax
+; SSE2-NEXT: pand %xmm0, %xmm1
+; SSE2-NEXT: packuswb %xmm3, %xmm1
+; SSE2-NEXT: psrlw $8, %xmm1
+; SSE2-NEXT: pmullw %xmm0, %xmm1
+; SSE2-NEXT: movd %xmm1, %eax
; SSE2-NEXT: # kill: def $al killed $al killed $eax
; SSE2-NEXT: retq
;
; SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: pmullw %xmm2, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: pand %xmm1, %xmm2
-; SSE2-NEXT: packuswb %xmm3, %xmm2
-; SSE2-NEXT: psrlw $8, %xmm2
-; SSE2-NEXT: pmullw %xmm0, %xmm2
-; SSE2-NEXT: pand %xmm1, %xmm2
-; SSE2-NEXT: packuswb %xmm0, %xmm2
-; SSE2-NEXT: movd %xmm2, %eax
+; SSE2-NEXT: pand %xmm0, %xmm1
+; SSE2-NEXT: packuswb %xmm3, %xmm1
+; SSE2-NEXT: psrlw $8, %xmm1
+; SSE2-NEXT: pmullw %xmm0, %xmm1
+; SSE2-NEXT: movd %xmm1, %eax
; SSE2-NEXT: # kill: def $al killed $al killed $eax
; SSE2-NEXT: retq
;
; SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE2-NEXT: pmullw %xmm2, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: pand %xmm0, %xmm2
-; SSE2-NEXT: packuswb %xmm3, %xmm2
-; SSE2-NEXT: psrlw $8, %xmm2
-; SSE2-NEXT: pmullw %xmm1, %xmm2
-; SSE2-NEXT: pand %xmm0, %xmm2
-; SSE2-NEXT: packuswb %xmm0, %xmm2
-; SSE2-NEXT: movd %xmm2, %eax
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: packuswb %xmm3, %xmm0
+; SSE2-NEXT: psrlw $8, %xmm0
+; SSE2-NEXT: pmullw %xmm1, %xmm0
+; SSE2-NEXT: movd %xmm0, %eax
; SSE2-NEXT: # kill: def $al killed $al killed $eax
; SSE2-NEXT: retq
;
; X32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; X32-NEXT: pandn {{\.LCPI.*}}, %xmm0
-; X32-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
-; X32-NEXT: movd %xmm1, %ecx
; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; X32-NEXT: movd %xmm0, %edx
-; X32-NEXT: xorl $1, %edx
+; X32-NEXT: movd %xmm0, %ecx
; X32-NEXT: xorl %eax, %eax
-; X32-NEXT: orl %ecx, %edx
+; X32-NEXT: cmpl $1, %ecx
; X32-NEXT: setne %al
; X32-NEXT: retl
;