BUILTIN(__builtin_altivec_vabsduh, "V8UsV8UsV8Us", "")
BUILTIN(__builtin_altivec_vabsduw, "V4UiV4UiV4Ui", "")
+// P9 Shift built-ins.
+BUILTIN(__builtin_altivec_vslv, "V16UcV16UcV16Uc", "")
+BUILTIN(__builtin_altivec_vsrv, "V16UcV16UcV16Uc", "")
+
// VSX built-ins.
BUILTIN(__builtin_vsx_lxvd2x, "V2divC*", "")
#endif
}
+#ifdef __VSX__
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_sld(vector bool long long __a, vector bool long long __b,
+ unsigned const int __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
+ 20 - __d, 21 - __d, 22 - __d, 23 - __d,
+ 24 - __d, 25 - __d, 26 - __d, 27 - __d,
+ 28 - __d, 29 - __d, 30 - __d, 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_sld(vector signed long long __a, vector signed long long __b,
+ unsigned const int __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
+ 20 - __d, 21 - __d, 22 - __d, 23 - __d,
+ 24 - __d, 25 - __d, 26 - __d, 27 - __d,
+ 28 - __d, 29 - __d, 30 - __d, 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_sld(vector unsigned long long __a, vector unsigned long long __b,
+ unsigned const int __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
+ 20 - __d, 21 - __d, 22 - __d, 23 - __d,
+ 24 - __d, 25 - __d, 26 - __d, 27 - __d,
+ 28 - __d, 29 - __d, 30 - __d, 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+static __inline__ vector double __ATTRS_o_ai vec_sld(vector double __a,
+ vector double __b,
+ unsigned const int __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
+ 20 - __d, 21 - __d, 22 - __d, 23 - __d,
+ 24 - __d, 25 - __d, 26 - __d, 27 - __d,
+ 28 - __d, 29 - __d, 30 - __d, 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+#endif
+
+/* vec_sldw */
+static __inline__ vector signed char __ATTRS_o_ai vec_sldw(
+ vector signed char __a, vector signed char __b, unsigned const int __c) {
+ return vec_sld(__a, __b, ((__c << 2) & 0x0F));
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_sldw(vector unsigned char __a, vector unsigned char __b,
+ unsigned const int __c) {
+ return vec_sld(__a, __b, ((__c << 2) & 0x0F));
+}
+
+static __inline__ vector signed short __ATTRS_o_ai vec_sldw(
+ vector signed short __a, vector signed short __b, unsigned const int __c) {
+ return vec_sld(__a, __b, ((__c << 2) & 0x0F));
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_sldw(vector unsigned short __a, vector unsigned short __b,
+ unsigned const int __c) {
+ return vec_sld(__a, __b, ((__c << 2) & 0x0F));
+}
+
+static __inline__ vector signed int __ATTRS_o_ai
+vec_sldw(vector signed int __a, vector signed int __b, unsigned const int __c) {
+ return vec_sld(__a, __b, ((__c << 2) & 0x0F));
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai vec_sldw(
+ vector unsigned int __a, vector unsigned int __b, unsigned const int __c) {
+ return vec_sld(__a, __b, ((__c << 2) & 0x0F));
+}
+
+#ifdef __VSX__
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_sldw(vector signed long long __a, vector signed long long __b,
+ unsigned const int __c) {
+ return vec_sld(__a, __b, ((__c << 2) & 0x0F));
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_sldw(vector unsigned long long __a, vector unsigned long long __b,
+ unsigned const int __c) {
+ return vec_sld(__a, __b, ((__c << 2) & 0x0F));
+}
+#endif
+
+#ifdef __POWER9_VECTOR__
+/* vec_slv */
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_slv(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_altivec_vslv(__a, __b);
+}
+
+/* vec_srv */
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_srv(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_altivec_vsrv(__a, __b);
+}
+#endif
+
/* vec_vsldoi */
static __inline__ vector signed char __ATTRS_o_ai
(vector int)__b);
}
+#ifdef __VSX__
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_sll(vector signed long long __a, vector unsigned char __b) {
+ return (vector signed long long)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_sll(vector unsigned long long __a, vector unsigned char __b) {
+ return (vector unsigned long long)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+#endif
+
/* vec_vsl */
static __inline__ vector signed char __ATTRS_o_ai
return (vector float)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
}
+#ifdef __VSX__
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_slo(vector signed long long __a, vector signed char __b) {
+ return (vector signed long long)__builtin_altivec_vslo((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_slo(vector signed long long __a, vector unsigned char __b) {
+ return (vector signed long long)__builtin_altivec_vslo((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_slo(vector unsigned long long __a, vector signed char __b) {
+ return (vector unsigned long long)__builtin_altivec_vslo((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_slo(vector unsigned long long __a, vector unsigned char __b) {
+ return (vector unsigned long long)__builtin_altivec_vslo((vector int)__a,
+ (vector int)__b);
+}
+#endif
+
/* vec_vslo */
static __inline__ vector signed char __ATTRS_o_ai
(vector int)__b);
}
+#ifdef __VSX__
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_srl(vector signed long long __a, vector unsigned char __b) {
+ return (vector signed long long)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_srl(vector unsigned long long __a, vector unsigned char __b) {
+ return (vector unsigned long long)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+#endif
+
/* vec_vsr */
static __inline__ vector signed char __ATTRS_o_ai
return (vector float)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
}
+#ifdef __VSX__
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_sro(vector signed long long __a, vector signed char __b) {
+ return (vector signed long long)__builtin_altivec_vsro((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_sro(vector signed long long __a, vector unsigned char __b) {
+ return (vector signed long long)__builtin_altivec_vsro((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_sro(vector unsigned long long __a, vector signed char __b) {
+ return (vector unsigned long long)__builtin_altivec_vsro((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_sro(vector unsigned long long __a, vector unsigned char __b) {
+ return (vector unsigned long long)__builtin_altivec_vsro((vector int)__a,
+ (vector int)__b);
+}
+#endif
+
/* vec_vsro */
static __inline__ vector signed char __ATTRS_o_ai
// CHECK-LE: sub nsw i32 31
// CHECK-LE: @llvm.ppc.altivec.vperm
+ /* vec_sldw */
+ res_vsc = vec_sldw(vsc, vsc, 0);
+ // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+ // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+ // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+ // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
+ // CHECK: @llvm.ppc.altivec.vperm
+ // CHECK-LE: sub nsw i32 16
+ // CHECK-LE: sub nsw i32 17
+ // CHECK-LE: sub nsw i32 18
+ // CHECK-LE: sub nsw i32 31
+ // CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vuc = vec_sldw(vuc, vuc, 0);
+ // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+ // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+ // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+ // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
+ // CHECK: @llvm.ppc.altivec.vperm
+ // CHECK-LE: sub nsw i32 16
+ // CHECK-LE: sub nsw i32 17
+ // CHECK-LE: sub nsw i32 18
+ // CHECK-LE: sub nsw i32 31
+ // CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vi = vec_sldw(vi, vi, 0);
+ // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+ // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+ // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+ // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
+ // CHECK: @llvm.ppc.altivec.vperm
+ // CHECK-LE: sub nsw i32 16
+ // CHECK-LE: sub nsw i32 17
+ // CHECK-LE: sub nsw i32 18
+ // CHECK-LE: sub nsw i32 31
+ // CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vui = vec_sldw(vui, vui, 0);
+ // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+ // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+ // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+ // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
+ // CHECK: @llvm.ppc.altivec.vperm
+ // CHECK-LE: sub nsw i32 16
+ // CHECK-LE: sub nsw i32 17
+ // CHECK-LE: sub nsw i32 18
+ // CHECK-LE: sub nsw i32 31
+ // CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vs = vec_sldw(vs, vs, 0);
+ // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+ // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+ // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+ // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
+ // CHECK: @llvm.ppc.altivec.vperm
+ // CHECK-LE: sub nsw i32 16
+ // CHECK-LE: sub nsw i32 17
+ // CHECK-LE: sub nsw i32 18
+ // CHECK-LE: sub nsw i32 31
+ // CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vus = vec_sldw(vus, vus, 0);
+ // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+ // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+ // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+ // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
+ // CHECK: @llvm.ppc.altivec.vperm
+ // CHECK-LE: sub nsw i32 16
+ // CHECK-LE: sub nsw i32 17
+ // CHECK-LE: sub nsw i32 18
+ // CHECK-LE: sub nsw i32 31
+ // CHECK-LE: @llvm.ppc.altivec.vperm
+
res_vsc = vec_vsldoi(vsc, vsc, 0);
// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
// CHECK-LE-NEXT: ret i32
return vec_cnttz_lsbb (vuca);
}
-
vector unsigned int test63(void) {
// CHECK-BE: @llvm.ppc.altivec.vprtybw(<4 x i32>
// CHECK-BE-NEXT: ret <4 x i32>
// CHECK-NEXT: ret <4 x i32>
return vec_parity_lsbb (vuia);
}
-
vector unsigned int test64(void) {
// CHECK-BE: @llvm.ppc.altivec.vprtybw(<4 x i32>
// CHECK-BE-NEXT: ret <4 x i32>
// CHECK-NEXT: ret <4 x i32>
return vec_parity_lsbb (vsia);
}
-
vector unsigned long long test65(void) {
// CHECK-BE: @llvm.ppc.altivec.vprtybd(<2 x i64>
// CHECK-BE-NEXT: ret <2 x i64>
// CHECK-NEXT: ret <2 x i64>
return vec_parity_lsbb (vula);
}
-
vector unsigned long long test66(void) {
// CHECK-BE: @llvm.ppc.altivec.vprtybd(<2 x i64>
// CHECK-BE-NEXT: ret <2 x i64>
// CHECK-NEXT: ret <1 x i128>
return vec_parity_lsbb (vui128a);
}
-
vector unsigned __int128 test68(void) {
// CHECK-BE: @llvm.ppc.altivec.vprtybq(<1 x i128>
// CHECK-BE-NEXT: ret <1 x i128>
// CHECK-NEXT: ret <1 x i128>
return vec_parity_lsbb (vsi128a);
}
-
vector unsigned char test69(void) {
// CHECK-BE: call <16 x i8> @llvm.ppc.altivec.vabsdub(<16 x i8> {{.+}}, <16 x i8> {{.+}})
// CHECK: call <16 x i8> @llvm.ppc.altivec.vabsdub(<16 x i8> {{.+}}, <16 x i8> {{.+}})
// CHECK: call <4 x i32> @llvm.ppc.altivec.vabsduw(<4 x i32> {{.+}}, <4 x i32> {{.+}})
return vec_absd(vuia, vuib);
}
+vector unsigned char test72(void) {
+// CHECK-BE: @llvm.ppc.altivec.vslv(<16 x i8>
+// CHECK-BE-NEXT: ret <16 x i8>
+// CHECK: @llvm.ppc.altivec.vslv(<16 x i8>
+// CHECK-NEXT: ret <16 x i8>
+ return vec_slv (vuca, vucb);
+}
+vector unsigned char test73(void) {
+// CHECK-BE: @llvm.ppc.altivec.vsrv(<16 x i8>
+// CHECK-BE-NEXT: ret <16 x i8>
+// CHECK: @llvm.ppc.altivec.vsrv(<16 x i8>
+// CHECK-NEXT: ret <16 x i8>
+ return vec_srv (vuca, vucb);
+}
+
// CHECK-LE: fmul <2 x double>
res_vbll = vec_reve(vbll);
- // CHECK: shufflevector <2 x i64> %{{[0-9]+}}, <2 x i64> %{{[0-9]+}}, <2 x i32> <i32 1, i32 0>
- // CHECK-LE: shufflevector <2 x i64> %{{[0-9]+}}, <2 x i64> %{{[0-9]+}}, <2 x i32> <i32 1, i32 0>
+// CHECK: shufflevector <2 x i64> %{{[0-9]+}}, <2 x i64> %{{[0-9]+}}, <2 x i32> <i32 1, i32 0>
+// CHECK-LE: shufflevector <2 x i64> %{{[0-9]+}}, <2 x i64> %{{[0-9]+}}, <2 x i32> <i32 1, i32 0>
res_vsll = vec_reve(vsll);
- // CHECK: shufflevector <2 x i64> %{{[0-9]+}}, <2 x i64> %{{[0-9]+}}, <2 x i32> <i32 1, i32 0>
- // CHECK-LE: shufflevector <2 x i64> %{{[0-9]+}}, <2 x i64> %{{[0-9]+}}, <2 x i32> <i32 1, i32 0>
+// CHECK: shufflevector <2 x i64> %{{[0-9]+}}, <2 x i64> %{{[0-9]+}}, <2 x i32> <i32 1, i32 0>
+// CHECK-LE: shufflevector <2 x i64> %{{[0-9]+}}, <2 x i64> %{{[0-9]+}}, <2 x i32> <i32 1, i32 0>
res_vull = vec_reve(vull);
- // CHECK: shufflevector <2 x i64> %{{[0-9]+}}, <2 x i64> %{{[0-9]+}}, <2 x i32> <i32 1, i32 0>
- // CHECK-LE: shufflevector <2 x i64> %{{[0-9]+}}, <2 x i64> %{{[0-9]+}}, <2 x i32> <i32 1, i32 0>
+// CHECK: shufflevector <2 x i64> %{{[0-9]+}}, <2 x i64> %{{[0-9]+}}, <2 x i32> <i32 1, i32 0>
+// CHECK-LE: shufflevector <2 x i64> %{{[0-9]+}}, <2 x i64> %{{[0-9]+}}, <2 x i32> <i32 1, i32 0>
res_vd = vec_reve(vd);
- // CHECK: shufflevector <2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}}, <2 x i32> <i32 1, i32 0>
- // CHECK-LE: shufflevector <2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}}, <2 x i32> <i32 1, i32 0>
+// CHECK: shufflevector <2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}}, <2 x i32> <i32 1, i32 0>
+// CHECK-LE: shufflevector <2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}}, <2 x i32> <i32 1, i32 0>
res_vbll = vec_revb(vbll);
// CHECK: store <16 x i8> <i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 15, i8 14, i8 13, i8 12, i8 11, i8 10, i8 9, i8 8>, <16 x i8>* {{%.+}}, align 16
// CHECK-LE: store <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i8>* {{%.+}}, align 16
// CHECK-LE: xor <16 x i8>
// CHECK-LE: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> {{%.+}}, <4 x i32> {{%.+}}, <16 x i8> {{%.+}})
+
+ res_vbll = vec_sld(vbll, vbll, 0);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vsll = vec_sld(vsll, vsll, 0);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vull = vec_sld(vull, vull, 0);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vd = vec_sld(vd, vd, 0);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vsll = vec_sldw(vsll, vsll, 0);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vull = vec_sldw(vull, vull, 0);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vsll = vec_sll(vsll, vuc);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+res_vull = vec_sll(vull, vuc);
+// CHECK: @llvm.ppc.altivec.vsl
+// CHECK-LE: @llvm.ppc.altivec.vsl
+
+res_vsll = vec_slo(vsll, vsc);
+// CHECK: @llvm.ppc.altivec.vslo
+// CHECK-LE: @llvm.ppc.altivec.vslo
+
+ res_vsll = vec_slo(vsll, vuc);
+// CHECK: @llvm.ppc.altivec.vslo
+// CHECK-LE: @llvm.ppc.altivec.vslo
+
+ res_vull = vec_slo(vull, vsc);
+// CHECK: @llvm.ppc.altivec.vslo
+// CHECK-LE: @llvm.ppc.altivec.vslo
+
+ res_vull = vec_slo(vull, vuc);
+// CHECK: @llvm.ppc.altivec.vslo
+// CHECK-LE: @llvm.ppc.altivec.vslo
+
+ res_vsll = vec_srl(vsll, vuc);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vull = vec_srl(vull, vuc);
+// CHECK: @llvm.ppc.altivec.vsr
+// CHECK-LE: @llvm.ppc.altivec.vsr
+
+ res_vsll = vec_sro(vsll, vsc);
+// CHECK: @llvm.ppc.altivec.vsro
+// CHECK-LE: @llvm.ppc.altivec.vsro
+
+ res_vsll = vec_sro(vsll, vuc);
+// CHECK: @llvm.ppc.altivec.vsro
+// CHECK-LE: @llvm.ppc.altivec.vsro
+
+ res_vull = vec_sro(vull, vsc);
+// CHECK: @llvm.ppc.altivec.vsro
+// CHECK-LE: @llvm.ppc.altivec.vsro
+
+ res_vull = vec_sro(vull, vuc);
+// CHECK: @llvm.ppc.altivec.vsro
+// CHECK-LE: @llvm.ppc.altivec.vsro
}