return __builtin_altivec_vctuxs(__a, __b);
}
+/* vec_signed */
+
+static __inline__ vector signed int __ATTRS_o_ai
+vec_sld(vector signed int, vector signed int, unsigned const int __c);
+
+static __inline__ vector signed int __ATTRS_o_ai
+vec_signed(vector float __a) {
+ return __builtin_convertvector(__a, vector signed int);
+}
+
+#ifdef __VSX__
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_signed(vector double __a) {
+ return __builtin_convertvector(__a, vector signed long long);
+}
+
+static __inline__ vector signed int __attribute__((__always_inline__))
+vec_signed2(vector double __a, vector double __b) {
+ return (vector signed int) { __a[0], __a[1], __b[0], __b[1] };
+}
+
+static __inline__ vector signed int __ATTRS_o_ai
+vec_signede(vector double __a) {
+#ifdef __LITTLE_ENDIAN__
+ vector signed int __ret = __builtin_vsx_xvcvdpsxws(__a);
+ return vec_sld(__ret, __ret, 12);
+#else
+ return __builtin_vsx_xvcvdpsxws(__a);
+#endif
+}
+
+static __inline__ vector signed int __ATTRS_o_ai
+vec_signedo(vector double __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_vsx_xvcvdpsxws(__a);
+#else
+ vector signed int __ret = __builtin_vsx_xvcvdpsxws(__a);
+ return vec_sld(__ret, __ret, 12);
+#endif
+}
+#endif
+
+/* vec_unsigned */
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_sld(vector unsigned int, vector unsigned int, unsigned const int __c);
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_unsigned(vector float __a) {
+ return __builtin_convertvector(__a, vector unsigned int);
+}
+
+#ifdef __VSX__
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_unsigned(vector double __a) {
+ return __builtin_convertvector(__a, vector unsigned long long);
+}
+
+static __inline__ vector unsigned int __attribute__((__always_inline__))
+vec_unsigned2(vector double __a, vector double __b) {
+ return (vector unsigned int) { __a[0], __a[1], __b[0], __b[1] };
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_unsignede(vector double __a) {
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned int __ret = __builtin_vsx_xvcvdpuxws(__a);
+ return vec_sld(__ret, __ret, 12);
+#else
+ return __builtin_vsx_xvcvdpuxws(__a);
+#endif
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_unsignedo(vector double __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_vsx_xvcvdpuxws(__a);
+#else
+ vector unsigned int __ret = __builtin_vsx_xvcvdpuxws(__a);
+ return vec_sld(__ret, __ret, 12);
+#endif
+}
+#endif
+
+/* vec_float */
+
+static __inline__ vector float __ATTRS_o_ai
+vec_sld(vector float, vector float, unsigned const int __c);
+
+static __inline__ vector float __ATTRS_o_ai
+vec_float(vector signed int __a) {
+ return __builtin_convertvector(__a, vector float);
+}
+
+static __inline__ vector float __ATTRS_o_ai
+vec_float(vector unsigned int __a) {
+ return __builtin_convertvector(__a, vector float);
+}
+
+#ifdef __VSX__
+static __inline__ vector float __ATTRS_o_ai
+vec_float2(vector signed long long __a, vector signed long long __b) {
+ return (vector float) { __a[0], __a[1], __b[0], __b[1] };
+}
+
+static __inline__ vector float __ATTRS_o_ai
+vec_float2(vector unsigned long long __a, vector unsigned long long __b) {
+ return (vector float) { __a[0], __a[1], __b[0], __b[1] };
+}
+
+static __inline__ vector float __ATTRS_o_ai
+vec_float2(vector double __a, vector double __b) {
+ return (vector float) { __a[0], __a[1], __b[0], __b[1] };
+}
+
+static __inline__ vector float __ATTRS_o_ai
+vec_floate(vector signed long long __a) {
+#ifdef __LITTLE_ENDIAN__
+ vector float __ret = __builtin_vsx_xvcvsxdsp(__a);
+ return vec_sld(__ret, __ret, 12);
+#else
+ return __builtin_vsx_xvcvsxdsp(__a);
+#endif
+}
+
+static __inline__ vector float __ATTRS_o_ai
+vec_floate(vector unsigned long long __a) {
+#ifdef __LITTLE_ENDIAN__
+ vector float __ret = __builtin_vsx_xvcvuxdsp(__a);
+ return vec_sld(__ret, __ret, 12);
+#else
+ return __builtin_vsx_xvcvuxdsp(__a);
+#endif
+}
+
+static __inline__ vector float __ATTRS_o_ai
+vec_floate(vector double __a) {
+#ifdef __LITTLE_ENDIAN__
+ vector float __ret = __builtin_vsx_xvcvdpsp(__a);
+ return vec_sld(__ret, __ret, 12);
+#else
+ return __builtin_vsx_xvcvdpsp(__a);
+#endif
+}
+
+static __inline__ vector float __ATTRS_o_ai
+vec_floato(vector signed long long __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_vsx_xvcvsxdsp(__a);
+#else
+ vector float __ret = __builtin_vsx_xvcvsxdsp(__a);
+ return vec_sld(__ret, __ret, 12);
+#endif
+}
+
+static __inline__ vector float __ATTRS_o_ai
+vec_floato(vector unsigned long long __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_vsx_xvcvuxdsp(__a);
+#else
+ vector float __ret = __builtin_vsx_xvcvuxdsp(__a);
+ return vec_sld(__ret, __ret, 12);
+#endif
+}
+
+static __inline__ vector float __ATTRS_o_ai
+vec_floato(vector double __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_vsx_xvcvdpsp(__a);
+#else
+ vector float __ret = __builtin_vsx_xvcvdpsp(__a);
+ return vec_sld(__ret, __ret, 12);
+#endif
+}
+#endif
+
/* vec_double */
#ifdef __VSX__
static __inline__ vector double __ATTRS_o_ai
vec_double(vector signed long long __a) {
+ return __builtin_convertvector(__a, vector double);
+}
+
+static __inline__ vector double __ATTRS_o_ai
+vec_double(vector unsigned long long __a) {
+ return __builtin_convertvector(__a, vector double);
+}
+
+static __inline__ vector double __ATTRS_o_ai
+vec_doublee(vector signed int __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_vsx_xvcvsxwdp(vec_sld(__a, __a, 4));
+#else
+ return __builtin_vsx_xvcvsxwdp(__a);
+#endif
+}
+
+static __inline__ vector double __ATTRS_o_ai
+vec_doublee(vector unsigned int __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_vsx_xvcvuxwdp(vec_sld(__a, __a, 4));
+#else
+ return __builtin_vsx_xvcvuxwdp(__a);
+#endif
+}
+
+static __inline__ vector double __ATTRS_o_ai
+vec_doublee(vector float __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_vsx_xvcvspdp(vec_sld(__a, __a, 4));
+#else
+ return __builtin_vsx_xvcvspdp(__a);
+#endif
+}
+
+static __inline__ vector double __ATTRS_o_ai
+vec_doubleh(vector signed int __a) {
vector double __ret = {__a[0], __a[1]};
return __ret;
}
static __inline__ vector double __ATTRS_o_ai
-vec_double(vector unsigned long long __a) {
+vec_doubleh(vector unsigned int __a) {
vector double __ret = {__a[0], __a[1]};
return __ret;
}
+
+static __inline__ vector double __ATTRS_o_ai
+vec_doubleh(vector float __a) {
+ vector double __ret = {__a[0], __a[1]};
+ return __ret;
+}
+
+static __inline__ vector double __ATTRS_o_ai
+vec_doublel(vector signed int __a) {
+ vector double __ret = {__a[2], __a[3]};
+ return __ret;
+}
+
+static __inline__ vector double __ATTRS_o_ai
+vec_doublel(vector unsigned int __a) {
+ vector double __ret = {__a[2], __a[3]};
+ return __ret;
+}
+
+static __inline__ vector double __ATTRS_o_ai
+vec_doublel(vector float __a) {
+ vector double __ret = {__a[2], __a[3]};
+ return __ret;
+}
+
+static __inline__ vector double __ATTRS_o_ai
+vec_doubleo(vector signed int __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_vsx_xvcvsxwdp(__a);
+#else
+ return __builtin_vsx_xvcvsxwdp(vec_sld(__a, __a, 4));
+#endif
+}
+
+static __inline__ vector double __ATTRS_o_ai
+vec_doubleo(vector unsigned int __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_vsx_xvcvuxwdp(__a);
+#else
+ return __builtin_vsx_xvcvuxwdp(vec_sld(__a, __a, 4));
+#endif
+}
+
+static __inline__ vector double __ATTRS_o_ai
+vec_doubleo(vector float __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_vsx_xvcvspdp(__a);
+#else
+ return __builtin_vsx_xvcvspdp(vec_sld(__a, __a, 4));
+#endif
+}
#endif
/* vec_div */
// CHECK-LE: uitofp <2 x i64> %{{.*}} to <2 x double>
// CHECK-LE: fmul <2 x double>
+ res_vsll = vec_signed(vd);
+// CHECK: fptosi <2 x double>
+// CHECK-LE: fptosi <2 x double>
+
+ res_vsi = vec_signed2(vd, vd);
+// CHECK: extractelement <2 x double>
+// CHECK: fptosi double
+// CHECK: insertelement <4 x i32>
+// CHECK: extractelement <2 x double>
+// CHECK: fptosi double
+// CHECK: insertelement <4 x i32>
+// CHECK: extractelement <2 x double>
+// CHECK: fptosi double
+// CHECK: insertelement <4 x i32>
+// CHECK: extractelement <2 x double>
+// CHECK: fptosi double
+// CHECK: insertelement <4 x i32>
+// CHECK-LE: extractelement <2 x double>
+// CHECK-LE: fptosi double
+// CHECK-LE: insertelement <4 x i32>
+// CHECK-LE: extractelement <2 x double>
+// CHECK-LE: fptosi double
+// CHECK-LE: insertelement <4 x i32>
+// CHECK-LE: extractelement <2 x double>
+// CHECK-LE: fptosi double
+// CHECK-LE: insertelement <4 x i32>
+// CHECK-LE: extractelement <2 x double>
+// CHECK-LE: fptosi double
+// CHECK-LE: insertelement <4 x i32>
+
+ res_vsi = vec_signede(vd);
+// CHECK: @llvm.ppc.vsx.xvcvdpsxws(<2 x double>
+// CHECK-LE: @llvm.ppc.vsx.xvcvdpsxws(<2 x double>
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vsi = vec_signedo(vd);
+// CHECK: @llvm.ppc.vsx.xvcvdpsxws(<2 x double>
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.vsx.xvcvdpsxws(<2 x double>
+
+ res_vull = vec_unsigned(vd);
+// CHECK: fptoui <2 x double>
+// CHECK-LE: fptoui <2 x double>
+
+ res_vui = vec_unsigned2(vd, vd);
+// CHECK: extractelement <2 x double>
+// CHECK: fptoui double
+// CHECK: insertelement <4 x i32>
+// CHECK: extractelement <2 x double>
+// CHECK: fptoui double
+// CHECK: insertelement <4 x i32>
+// CHECK: extractelement <2 x double>
+// CHECK: fptoui double
+// CHECK: insertelement <4 x i32>
+// CHECK: extractelement <2 x double>
+// CHECK: fptoui double
+// CHECK: insertelement <4 x i32>
+// CHECK-LE: extractelement <2 x double>
+// CHECK-LE: fptoui double
+// CHECK-LE: insertelement <4 x i32>
+// CHECK-LE: extractelement <2 x double>
+// CHECK-LE: fptoui double
+// CHECK-LE: insertelement <4 x i32>
+// CHECK-LE: extractelement <2 x double>
+// CHECK-LE: fptoui double
+// CHECK-LE: insertelement <4 x i32>
+// CHECK-LE: extractelement <2 x double>
+// CHECK-LE: fptoui double
+// CHECK-LE: insertelement <4 x i32>
+
+ res_vui = vec_unsignede(vd);
+// CHECK: @llvm.ppc.vsx.xvcvdpuxws(<2 x double>
+// CHECK-LE: @llvm.ppc.vsx.xvcvdpuxws(<2 x double>
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vui = vec_unsignedo(vd);
+// CHECK: @llvm.ppc.vsx.xvcvdpuxws(<2 x double>
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.vsx.xvcvdpuxws(<2 x double>
+
+ res_vf = vec_float2(vsll, vsll);
+// CHECK: extractelement <2 x i64>
+// CHECK: sitofp i64
+// CHECK: insertelement <4 x float>
+// CHECK: extractelement <2 x i64>
+// CHECK: sitofp i64
+// CHECK: insertelement <4 x float>
+// CHECK: extractelement <2 x i64>
+// CHECK: sitofp i64
+// CHECK: insertelement <4 x float>
+// CHECK: extractelement <2 x i64>
+// CHECK: sitofp i64
+// CHECK: insertelement <4 x float>
+// CHECK-LE: extractelement <2 x i64>
+// CHECK-LE: sitofp i64
+// CHECK-LE: insertelement <4 x float>
+// CHECK-LE: extractelement <2 x i64>
+// CHECK-LE: sitofp i64
+// CHECK-LE: insertelement <4 x float>
+// CHECK-LE: extractelement <2 x i64>
+// CHECK-LE: sitofp i64
+// CHECK-LE: insertelement <4 x float>
+// CHECK-LE: extractelement <2 x i64>
+// CHECK-LE: sitofp i64
+// CHECK-LE: insertelement <4 x float>
+
+ res_vf = vec_float2(vull, vull);
+// CHECK: extractelement <2 x i64>
+// CHECK: uitofp i64
+// CHECK: insertelement <4 x float>
+// CHECK: extractelement <2 x i64>
+// CHECK: uitofp i64
+// CHECK: insertelement <4 x float>
+// CHECK: extractelement <2 x i64>
+// CHECK: uitofp i64
+// CHECK: insertelement <4 x float>
+// CHECK: extractelement <2 x i64>
+// CHECK: uitofp i64
+// CHECK: insertelement <4 x float>
+// CHECK-LE: extractelement <2 x i64>
+// CHECK-LE: uitofp i64
+// CHECK-LE: insertelement <4 x float>
+// CHECK-LE: extractelement <2 x i64>
+// CHECK-LE: uitofp i64
+// CHECK-LE: insertelement <4 x float>
+// CHECK-LE: extractelement <2 x i64>
+// CHECK-LE: uitofp i64
+// CHECK-LE: insertelement <4 x float>
+// CHECK-LE: extractelement <2 x i64>
+// CHECK-LE: uitofp i64
+// CHECK-LE: insertelement <4 x float>
+
+ res_vf = vec_float2(vd, vd);
+// CHECK: extractelement <2 x double>
+// CHECK: fptrunc double
+// CHECK: insertelement <4 x float>
+// CHECK: extractelement <2 x double>
+// CHECK: fptrunc double
+// CHECK: insertelement <4 x float>
+// CHECK: extractelement <2 x double>
+// CHECK: fptrunc double
+// CHECK: insertelement <4 x float>
+// CHECK: extractelement <2 x double>
+// CHECK: fptrunc double
+// CHECK: insertelement <4 x float>
+// CHECK-LE: extractelement <2 x double>
+// CHECK-LE: fptrunc double
+// CHECK-LE: insertelement <4 x float>
+// CHECK-LE: extractelement <2 x double>
+// CHECK-LE: fptrunc double
+// CHECK-LE: insertelement <4 x float>
+// CHECK-LE: extractelement <2 x double>
+// CHECK-LE: fptrunc double
+// CHECK-LE: insertelement <4 x float>
+// CHECK-LE: extractelement <2 x double>
+// CHECK-LE: fptrunc double
+// CHECK-LE: insertelement <4 x float>
+
+ res_vf = vec_floate(vsll);
+// CHECK: @llvm.ppc.vsx.xvcvsxdsp
+// CHECK-LE: @llvm.ppc.vsx.xvcvsxdsp
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vf = vec_floate(vull);
+// CHECK: @llvm.ppc.vsx.xvcvuxdsp
+// CHECK-LE: @llvm.ppc.vsx.xvcvuxdsp
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vf = vec_floate(vd);
+// CHECK: @llvm.ppc.vsx.xvcvdpsp
+// CHECK-LE: @llvm.ppc.vsx.xvcvdpsp
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vf = vec_floato(vsll);
+// CHECK: @llvm.ppc.vsx.xvcvsxdsp
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.vsx.xvcvsxdsp
+
+ res_vf = vec_floato(vull);
+// CHECK: @llvm.ppc.vsx.xvcvuxdsp
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.vsx.xvcvuxdsp
+
+ res_vf = vec_floato(vd);
+// CHECK: @llvm.ppc.vsx.xvcvdpsp
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.vsx.xvcvdpsp
+
+ res_vd = vec_double(vsll);
+// CHECK: sitofp <2 x i64>
+// CHECK-LE: sitofp <2 x i64>
+
+ res_vd = vec_double(vull);
+// CHECK: uitofp <2 x i64>
+// CHECK-LE: uitofp <2 x i64>
+
+ res_vd = vec_doublee(vsi);
+// CHECK: @llvm.ppc.vsx.xvcvsxwdp(<4 x i32
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.vsx.xvcvsxwdp(<4 x i32
+
+ res_vd = vec_doublee(vui);
+// CHECK: @llvm.ppc.vsx.xvcvuxwdp(<4 x i32
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.vsx.xvcvuxwdp(<4 x i32
+
+ res_vd = vec_doublee(vf);
+// CHECK: @llvm.ppc.vsx.xvcvspdp(<4 x float
+// CHECK-LE: sub nsw i32 16
+// CHECK-LE: sub nsw i32 17
+// CHECK-LE: sub nsw i32 18
+// CHECK-LE: sub nsw i32 31
+// CHECK-LE: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.vsx.xvcvspdp(<4 x float
+
+ res_vd = vec_doubleh(vsi);
+// CHECK: extractelement <4 x i32>
+// CHECK: sitofp i32
+// CHECK: insertelement <2 x double>
+// CHECK: extractelement <4 x i32>
+// CHECK: sitofp i32
+// CHECK: insertelement <2 x double>
+// CHECK-LE: extractelement <4 x i32>
+// CHECK-LE: sitofp i32
+// CHECK-LE: insertelement <2 x double>
+// CHECK-LE: extractelement <4 x i32>
+// CHECK-LE: sitofp i32
+// CHECK-LE: insertelement <2 x double>
+
+ res_vd = vec_doubleh(vui);
+// CHECK: extractelement <4 x i32>
+// CHECK: uitofp i32
+// CHECK: insertelement <2 x double>
+// CHECK: extractelement <4 x i32>
+// CHECK: uitofp i32
+// CHECK: insertelement <2 x double>
+// CHECK-LE: extractelement <4 x i32>
+// CHECK-LE: uitofp i32
+// CHECK-LE: insertelement <2 x double>
+// CHECK-LE: extractelement <4 x i32>
+// CHECK-LE: uitofp i32
+// CHECK-LE: insertelement <2 x double>
+
+ res_vd = vec_doubleh(vf);
+// CHECK: extractelement <4 x float>
+// CHECK: fpext float
+// CHECK: insertelement <2 x double>
+// CHECK: extractelement <4 x float>
+// CHECK: fpext float
+// CHECK: insertelement <2 x double>
+// CHECK-LE: extractelement <4 x float>
+// CHECK-LE: fpext float
+// CHECK-LE: insertelement <2 x double>
+// CHECK-LE: extractelement <4 x float>
+// CHECK-LE: fpext float
+// CHECK-LE: insertelement <2 x double>
+
+ res_vd = vec_doublel(vsi);
+// CHECK: extractelement <4 x i32>
+// CHECK: sitofp i32
+// CHECK: insertelement <2 x double>
+// CHECK: extractelement <4 x i32>
+// CHECK: sitofp i32
+// CHECK: insertelement <2 x double>
+// CHECK-LE: extractelement <4 x i32>
+// CHECK-LE: sitofp i32
+// CHECK-LE: insertelement <2 x double>
+// CHECK-LE: extractelement <4 x i32>
+// CHECK-LE: sitofp i32
+// CHECK-LE: insertelement <2 x double>
+
+ res_vd = vec_doublel(vui);
+// CHECK: extractelement <4 x i32>
+// CHECK: uitofp i32
+// CHECK: insertelement <2 x double>
+// CHECK: extractelement <4 x i32>
+// CHECK: uitofp i32
+// CHECK: insertelement <2 x double>
+// CHECK-LE: extractelement <4 x i32>
+// CHECK-LE: uitofp i32
+// CHECK-LE: insertelement <2 x double>
+// CHECK-LE: extractelement <4 x i32>
+// CHECK-LE: uitofp i32
+// CHECK-LE: insertelement <2 x double>
+
+ res_vd = vec_doublel(vf);
+// CHECK: extractelement <4 x float>
+// CHECK: fpext float
+// CHECK: insertelement <2 x double>
+// CHECK: extractelement <4 x float>
+// CHECK: fpext float
+// CHECK: insertelement <2 x double>
+// CHECK-LE: extractelement <4 x float>
+// CHECK-LE: fpext float
+// CHECK-LE: insertelement <2 x double>
+// CHECK-LE: extractelement <4 x float>
+// CHECK-LE: fpext float
+// CHECK-LE: insertelement <2 x double>
+
+ res_vd = vec_doubleo(vsi);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.vsx.xvcvsxwdp(<4 x i32>
+// CHECK-LE: @llvm.ppc.vsx.xvcvsxwdp(<4 x i32>
+
+ res_vd = vec_doubleo(vui);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.vsx.xvcvuxwdp(<4 x i32>
+// CHECK-LE: @llvm.ppc.vsx.xvcvuxwdp(<4 x i32>
+
+ res_vd = vec_doubleo(vf);
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
+// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK: @llvm.ppc.vsx.xvcvspdp(<4 x float>
+// CHECK-LE: @llvm.ppc.vsx.xvcvspdp(<4 x float>
+
res_vbll = vec_reve(vbll);
// CHECK: shufflevector <2 x i64> %{{[0-9]+}}, <2 x i64> %{{[0-9]+}}, <2 x i32> <i32 1, i32 0>
// CHECK-LE: shufflevector <2 x i64> %{{[0-9]+}}, <2 x i64> %{{[0-9]+}}, <2 x i32> <i32 1, i32 0>