ret <4 x double> %min
}
+define <1 x i32> @constrained_vector_fptosi_v1i32_v1f32() {
+; CHECK-LABEL: constrained_vector_fptosi_v1i32_v1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax
+; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fptosi_v1i32_v1f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vcvttss2si {{.*}}(%rip), %eax
+; AVX-NEXT: retq
+entry:
+ %result = call <1 x i32> @llvm.experimental.constrained.fptosi.v1i32.v1f32(
+ <1 x float><float 42.0>,
+ metadata !"fpexcept.strict")
+ ret <1 x i32> %result
+}
+
+define <2 x i32> @constrained_vector_fptosi_v2i32_v2f32() {
+; CHECK-LABEL: constrained_vector_fptosi_v2i32_v2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax
+; CHECK-NEXT: movd %eax, %xmm1
+; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax
+; CHECK-NEXT: movd %eax, %xmm0
+; CHECK-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fptosi_v2i32_v2f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vcvttss2si {{.*}}(%rip), %eax
+; AVX-NEXT: vmovd %eax, %xmm0
+; AVX-NEXT: vcvttss2si {{.*}}(%rip), %eax
+; AVX-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
+; AVX-NEXT: retq
+entry:
+ %result = call <2 x i32> @llvm.experimental.constrained.fptosi.v2i32.v2f32(
+ <2 x float><float 42.0, float 43.0>,
+ metadata !"fpexcept.strict")
+ ret <2 x i32> %result
+}
+
+define <3 x i32> @constrained_vector_fptosi_v3i32_v3f32() {
+; CHECK-LABEL: constrained_vector_fptosi_v3i32_v3f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax
+; CHECK-NEXT: movd %eax, %xmm1
+; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax
+; CHECK-NEXT: movd %eax, %xmm0
+; CHECK-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax
+; CHECK-NEXT: movd %eax, %xmm1
+; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fptosi_v3i32_v3f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vcvttss2si {{.*}}(%rip), %eax
+; AVX-NEXT: vmovd %eax, %xmm0
+; AVX-NEXT: vcvttss2si {{.*}}(%rip), %eax
+; AVX-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
+; AVX-NEXT: vcvttss2si {{.*}}(%rip), %eax
+; AVX-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0
+; AVX-NEXT: retq
+entry:
+ %result = call <3 x i32> @llvm.experimental.constrained.fptosi.v3i32.v3f32(
+ <3 x float><float 42.0, float 43.0,
+ float 44.0>,
+ metadata !"fpexcept.strict")
+ ret <3 x i32> %result
+}
+
+define <4 x i32> @constrained_vector_fptosi_v4i32_v4f32() {
+; CHECK-LABEL: constrained_vector_fptosi_v4i32_v4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax
+; CHECK-NEXT: movd %eax, %xmm0
+; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax
+; CHECK-NEXT: movd %eax, %xmm1
+; CHECK-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax
+; CHECK-NEXT: movd %eax, %xmm2
+; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax
+; CHECK-NEXT: movd %eax, %xmm0
+; CHECK-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fptosi_v4i32_v4f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vcvttss2si {{.*}}(%rip), %eax
+; AVX-NEXT: vmovd %eax, %xmm0
+; AVX-NEXT: vcvttss2si {{.*}}(%rip), %eax
+; AVX-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
+; AVX-NEXT: vcvttss2si {{.*}}(%rip), %eax
+; AVX-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0
+; AVX-NEXT: vcvttss2si {{.*}}(%rip), %eax
+; AVX-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0
+; AVX-NEXT: retq
+entry:
+ %result = call <4 x i32> @llvm.experimental.constrained.fptosi.v4i32.v4f32(
+ <4 x float><float 42.0, float 43.0,
+ float 44.0, float 45.0>,
+ metadata !"fpexcept.strict")
+ ret <4 x i32> %result
+}
+
+define <1 x i64> @constrained_vector_fptosi_v1i64_v1f32() {
+; CHECK-LABEL: constrained_vector_fptosi_v1i64_v1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
+; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fptosi_v1i64_v1f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax
+; AVX-NEXT: retq
+entry:
+ %result = call <1 x i64> @llvm.experimental.constrained.fptosi.v1i64.v1f32(
+ <1 x float><float 42.0>,
+ metadata !"fpexcept.strict")
+ ret <1 x i64> %result
+}
+
+define <2 x i64> @constrained_vector_fptosi_v2i64_v2f32() {
+; CHECK-LABEL: constrained_vector_fptosi_v2i64_v2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
+; CHECK-NEXT: movq %rax, %xmm1
+; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
+; CHECK-NEXT: movq %rax, %xmm0
+; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fptosi_v2i64_v2f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax
+; AVX-NEXT: vmovq %rax, %xmm0
+; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax
+; AVX-NEXT: vmovq %rax, %xmm1
+; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX-NEXT: retq
+entry:
+ %result = call <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f32(
+ <2 x float><float 42.0, float 43.0>,
+ metadata !"fpexcept.strict")
+ ret <2 x i64> %result
+}
+
+define <3 x i64> @constrained_vector_fptosi_v3i64_v3f32() {
+; CHECK-LABEL: constrained_vector_fptosi_v3i64_v3f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
+; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rdx
+; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rcx
+; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fptosi_v3i64_v3f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax
+; AVX-NEXT: vmovq %rax, %xmm0
+; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax
+; AVX-NEXT: vmovq %rax, %xmm1
+; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax
+; AVX-NEXT: vmovq %rax, %xmm1
+; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX-NEXT: retq
+entry:
+ %result = call <3 x i64> @llvm.experimental.constrained.fptosi.v3i64.v3f32(
+ <3 x float><float 42.0, float 43.0,
+ float 44.0>,
+ metadata !"fpexcept.strict")
+ ret <3 x i64> %result
+}
+
+define <4 x i64> @constrained_vector_fptosi_v4i64_v4f32() {
+; CHECK-LABEL: constrained_vector_fptosi_v4i64_v4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
+; CHECK-NEXT: movq %rax, %xmm1
+; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
+; CHECK-NEXT: movq %rax, %xmm0
+; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
+; CHECK-NEXT: movq %rax, %xmm2
+; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
+; CHECK-NEXT: movq %rax, %xmm1
+; CHECK-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fptosi_v4i64_v4f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax
+; AVX-NEXT: vmovq %rax, %xmm0
+; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax
+; AVX-NEXT: vmovq %rax, %xmm1
+; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax
+; AVX-NEXT: vmovq %rax, %xmm1
+; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax
+; AVX-NEXT: vmovq %rax, %xmm2
+; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX-NEXT: retq
+entry:
+ %result = call <4 x i64> @llvm.experimental.constrained.fptosi.v4i64.v4f32(
+ <4 x float><float 42.0, float 43.0,
+ float 44.0, float 45.0>,
+ metadata !"fpexcept.strict")
+ ret <4 x i64> %result
+}
+
+define <1 x i32> @constrained_vector_fptosi_v1i32_v1f64() {
+; CHECK-LABEL: constrained_vector_fptosi_v1i32_v1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax
+; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fptosi_v1i32_v1f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %eax
+; AVX-NEXT: retq
+entry:
+ %result = call <1 x i32> @llvm.experimental.constrained.fptosi.v1i32.v1f64(
+ <1 x double><double 42.1>,
+ metadata !"fpexcept.strict")
+ ret <1 x i32> %result
+}
+
+
+define <2 x i32> @constrained_vector_fptosi_v2i32_v2f64() {
+; CHECK-LABEL: constrained_vector_fptosi_v2i32_v2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax
+; CHECK-NEXT: movd %eax, %xmm1
+; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax
+; CHECK-NEXT: movd %eax, %xmm0
+; CHECK-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fptosi_v2i32_v2f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %eax
+; AVX-NEXT: vmovd %eax, %xmm0
+; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %eax
+; AVX-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
+; AVX-NEXT: retq
+entry:
+ %result = call <2 x i32> @llvm.experimental.constrained.fptosi.v2i32.v2f64(
+ <2 x double><double 42.1, double 42.2>,
+ metadata !"fpexcept.strict")
+ ret <2 x i32> %result
+}
+
+define <3 x i32> @constrained_vector_fptosi_v3i32_v3f64() {
+; CHECK-LABEL: constrained_vector_fptosi_v3i32_v3f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax
+; CHECK-NEXT: movd %eax, %xmm1
+; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax
+; CHECK-NEXT: movd %eax, %xmm0
+; CHECK-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax
+; CHECK-NEXT: movd %eax, %xmm1
+; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fptosi_v3i32_v3f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %eax
+; AVX-NEXT: vmovd %eax, %xmm0
+; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %eax
+; AVX-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
+; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %eax
+; AVX-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0
+; AVX-NEXT: retq
+entry:
+ %result = call <3 x i32> @llvm.experimental.constrained.fptosi.v3i32.v3f64(
+ <3 x double><double 42.1, double 42.2,
+ double 42.3>,
+ metadata !"fpexcept.strict")
+ ret <3 x i32> %result
+}
+
+define <4 x i32> @constrained_vector_fptosi_v4i32_v4f64() {
+; CHECK-LABEL: constrained_vector_fptosi_v4i32_v4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax
+; CHECK-NEXT: movd %eax, %xmm0
+; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax
+; CHECK-NEXT: movd %eax, %xmm1
+; CHECK-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax
+; CHECK-NEXT: movd %eax, %xmm2
+; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax
+; CHECK-NEXT: movd %eax, %xmm0
+; CHECK-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fptosi_v4i32_v4f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %eax
+; AVX-NEXT: vmovd %eax, %xmm0
+; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %eax
+; AVX-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
+; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %eax
+; AVX-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0
+; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %eax
+; AVX-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0
+; AVX-NEXT: retq
+entry:
+ %result = call <4 x i32> @llvm.experimental.constrained.fptosi.v4i32.v4f64(
+ <4 x double><double 42.1, double 42.2,
+ double 42.3, double 42.4>,
+ metadata !"fpexcept.strict")
+ ret <4 x i32> %result
+}
+
+define <1 x i64> @constrained_vector_fptosi_v1i64_v1f64() {
+; CHECK-LABEL: constrained_vector_fptosi_v1i64_v1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax
+; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fptosi_v1i64_v1f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %rax
+; AVX-NEXT: retq
+entry:
+ %result = call <1 x i64> @llvm.experimental.constrained.fptosi.v1i64.v1f64(
+ <1 x double><double 42.1>,
+ metadata !"fpexcept.strict")
+ ret <1 x i64> %result
+}
+
+define <2 x i64> @constrained_vector_fptosi_v2i64_v2f64() {
+; CHECK-LABEL: constrained_vector_fptosi_v2i64_v2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax
+; CHECK-NEXT: movq %rax, %xmm1
+; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax
+; CHECK-NEXT: movq %rax, %xmm0
+; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fptosi_v2i64_v2f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %rax
+; AVX-NEXT: vmovq %rax, %xmm0
+; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %rax
+; AVX-NEXT: vmovq %rax, %xmm1
+; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX-NEXT: retq
+entry:
+ %result = call <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f64(
+ <2 x double><double 42.1, double 42.2>,
+ metadata !"fpexcept.strict")
+ ret <2 x i64> %result
+}
+
+define <3 x i64> @constrained_vector_fptosi_v3i64_v3f64() {
+; CHECK-LABEL: constrained_vector_fptosi_v3i64_v3f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax
+; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rdx
+; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rcx
+; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fptosi_v3i64_v3f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %rax
+; AVX-NEXT: vmovq %rax, %xmm0
+; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %rax
+; AVX-NEXT: vmovq %rax, %xmm1
+; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %rax
+; AVX-NEXT: vmovq %rax, %xmm1
+; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX-NEXT: retq
+entry:
+ %result = call <3 x i64> @llvm.experimental.constrained.fptosi.v3i64.v3f64(
+ <3 x double><double 42.1, double 42.2,
+ double 42.3>,
+ metadata !"fpexcept.strict")
+ ret <3 x i64> %result
+}
+
+define <4 x i64> @constrained_vector_fptosi_v4i64_v4f64() {
+; CHECK-LABEL: constrained_vector_fptosi_v4i64_v4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax
+; CHECK-NEXT: movq %rax, %xmm1
+; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax
+; CHECK-NEXT: movq %rax, %xmm0
+; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax
+; CHECK-NEXT: movq %rax, %xmm2
+; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax
+; CHECK-NEXT: movq %rax, %xmm1
+; CHECK-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fptosi_v4i64_v4f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %rax
+; AVX-NEXT: vmovq %rax, %xmm0
+; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %rax
+; AVX-NEXT: vmovq %rax, %xmm1
+; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %rax
+; AVX-NEXT: vmovq %rax, %xmm1
+; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %rax
+; AVX-NEXT: vmovq %rax, %xmm2
+; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX-NEXT: retq
+entry:
+ %result = call <4 x i64> @llvm.experimental.constrained.fptosi.v4i64.v4f64(
+ <4 x double><double 42.1, double 42.2,
+ double 42.3, double 42.4>,
+ metadata !"fpexcept.strict")
+ ret <4 x i64> %result
+}
+
+define <1 x i32> @constrained_vector_fptoui_v1i32_v1f32() {
+; CHECK-LABEL: constrained_vector_fptoui_v1i32_v1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax
+; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fptoui_v1i32_v1f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vcvttss2si {{.*}}(%rip), %eax
+; AVX-NEXT: retq
+entry:
+ %result = call <1 x i32> @llvm.experimental.constrained.fptoui.v1i32.v1f32(
+ <1 x float><float 42.0>,
+ metadata !"fpexcept.strict")
+ ret <1 x i32> %result
+}
+
+define <2 x i32> @constrained_vector_fptoui_v2i32_v2f32() {
+; CHECK-LABEL: constrained_vector_fptoui_v2i32_v2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax
+; CHECK-NEXT: movd %eax, %xmm1
+; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax
+; CHECK-NEXT: movd %eax, %xmm0
+; CHECK-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fptoui_v2i32_v2f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vcvttss2si {{.*}}(%rip), %eax
+; AVX-NEXT: vcvttss2si {{.*}}(%rip), %ecx
+; AVX-NEXT: vmovd %ecx, %xmm0
+; AVX-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
+; AVX-NEXT: retq
+entry:
+ %result = call <2 x i32> @llvm.experimental.constrained.fptoui.v2i32.v2f32(
+ <2 x float><float 42.0, float 43.0>,
+ metadata !"fpexcept.strict")
+ ret <2 x i32> %result
+}
+
+define <3 x i32> @constrained_vector_fptoui_v3i32_v3f32() {
+; CHECK-LABEL: constrained_vector_fptoui_v3i32_v3f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax
+; CHECK-NEXT: movd %eax, %xmm1
+; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax
+; CHECK-NEXT: movd %eax, %xmm0
+; CHECK-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax
+; CHECK-NEXT: movd %eax, %xmm1
+; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fptoui_v3i32_v3f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vcvttss2si {{.*}}(%rip), %eax
+; AVX-NEXT: vcvttss2si {{.*}}(%rip), %ecx
+; AVX-NEXT: vmovd %ecx, %xmm0
+; AVX-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
+; AVX-NEXT: vcvttss2si {{.*}}(%rip), %eax
+; AVX-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0
+; AVX-NEXT: retq
+entry:
+ %result = call <3 x i32> @llvm.experimental.constrained.fptoui.v3i32.v3f32(
+ <3 x float><float 42.0, float 43.0,
+ float 44.0>,
+ metadata !"fpexcept.strict")
+ ret <3 x i32> %result
+}
+
+define <4 x i32> @constrained_vector_fptoui_v4i32_v4f32() {
+; CHECK-LABEL: constrained_vector_fptoui_v4i32_v4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax
+; CHECK-NEXT: movd %eax, %xmm0
+; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax
+; CHECK-NEXT: movd %eax, %xmm1
+; CHECK-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax
+; CHECK-NEXT: movd %eax, %xmm2
+; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax
+; CHECK-NEXT: movd %eax, %xmm0
+; CHECK-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fptoui_v4i32_v4f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vcvttss2si {{.*}}(%rip), %eax
+; AVX-NEXT: vcvttss2si {{.*}}(%rip), %ecx
+; AVX-NEXT: vmovd %ecx, %xmm0
+; AVX-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
+; AVX-NEXT: vcvttss2si {{.*}}(%rip), %eax
+; AVX-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0
+; AVX-NEXT: vcvttss2si {{.*}}(%rip), %eax
+; AVX-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0
+; AVX-NEXT: retq
+entry:
+ %result = call <4 x i32> @llvm.experimental.constrained.fptoui.v4i32.v4f32(
+ <4 x float><float 42.0, float 43.0,
+ float 44.0, float 45.0>,
+ metadata !"fpexcept.strict")
+ ret <4 x i32> %result
+}
+
+define <1 x i64> @constrained_vector_fptoui_v1i64_v1f32() {
+; CHECK-LABEL: constrained_vector_fptoui_v1i64_v1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
+; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fptoui_v1i64_v1f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax
+; AVX-NEXT: retq
+entry:
+ %result = call <1 x i64> @llvm.experimental.constrained.fptoui.v1i64.v1f32(
+ <1 x float><float 42.0>,
+ metadata !"fpexcept.strict")
+ ret <1 x i64> %result
+}
+
+define <2 x i64> @constrained_vector_fptoui_v2i64_v2f32() {
+; CHECK-LABEL: constrained_vector_fptoui_v2i64_v2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
+; CHECK-NEXT: movq %rax, %xmm1
+; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
+; CHECK-NEXT: movq %rax, %xmm0
+; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fptoui_v2i64_v2f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax
+; AVX-NEXT: vmovq %rax, %xmm0
+; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax
+; AVX-NEXT: vmovq %rax, %xmm1
+; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX-NEXT: retq
+entry:
+ %result = call <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f32(
+ <2 x float><float 42.0, float 43.0>,
+ metadata !"fpexcept.strict")
+ ret <2 x i64> %result
+}
+
+define <3 x i64> @constrained_vector_fptoui_v3i64_v3f32() {
+; CHECK-LABEL: constrained_vector_fptoui_v3i64_v3f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
+; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rdx
+; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rcx
+; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fptoui_v3i64_v3f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax
+; AVX-NEXT: vmovq %rax, %xmm0
+; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax
+; AVX-NEXT: vmovq %rax, %xmm1
+; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax
+; AVX-NEXT: vmovq %rax, %xmm1
+; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX-NEXT: retq
+entry:
+ %result = call <3 x i64> @llvm.experimental.constrained.fptoui.v3i64.v3f32(
+ <3 x float><float 42.0, float 43.0,
+ float 44.0>,
+ metadata !"fpexcept.strict")
+ ret <3 x i64> %result
+}
+
+define <4 x i64> @constrained_vector_fptoui_v4i64_v4f32() {
+; CHECK-LABEL: constrained_vector_fptoui_v4i64_v4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
+; CHECK-NEXT: movq %rax, %xmm1
+; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
+; CHECK-NEXT: movq %rax, %xmm0
+; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
+; CHECK-NEXT: movq %rax, %xmm2
+; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
+; CHECK-NEXT: movq %rax, %xmm1
+; CHECK-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fptoui_v4i64_v4f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax
+; AVX-NEXT: vmovq %rax, %xmm0
+; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax
+; AVX-NEXT: vmovq %rax, %xmm1
+; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax
+; AVX-NEXT: vmovq %rax, %xmm1
+; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax
+; AVX-NEXT: vmovq %rax, %xmm2
+; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX-NEXT: retq
+entry:
+ %result = call <4 x i64> @llvm.experimental.constrained.fptoui.v4i64.v4f32(
+ <4 x float><float 42.0, float 43.0,
+ float 44.0, float 45.0>,
+ metadata !"fpexcept.strict")
+ ret <4 x i64> %result
+}
+
+define <1 x i32> @constrained_vector_fptoui_v1i32_v1f64() {
+; CHECK-LABEL: constrained_vector_fptoui_v1i32_v1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax
+; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fptoui_v1i32_v1f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %eax
+; AVX-NEXT: retq
+entry:
+ %result = call <1 x i32> @llvm.experimental.constrained.fptoui.v1i32.v1f64(
+ <1 x double><double 42.1>,
+ metadata !"fpexcept.strict")
+ ret <1 x i32> %result
+}
+
+define <2 x i32> @constrained_vector_fptoui_v2i32_v2f64() {
+; CHECK-LABEL: constrained_vector_fptoui_v2i32_v2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax
+; CHECK-NEXT: movd %eax, %xmm1
+; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax
+; CHECK-NEXT: movd %eax, %xmm0
+; CHECK-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fptoui_v2i32_v2f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %eax
+; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %ecx
+; AVX-NEXT: vmovd %ecx, %xmm0
+; AVX-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
+; AVX-NEXT: retq
+entry:
+ %result = call <2 x i32> @llvm.experimental.constrained.fptoui.v2i32.v2f64(
+ <2 x double><double 42.1, double 42.2>,
+ metadata !"fpexcept.strict")
+ ret <2 x i32> %result
+}
+
+define <3 x i32> @constrained_vector_fptoui_v3i32_v3f64() {
+; CHECK-LABEL: constrained_vector_fptoui_v3i32_v3f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax
+; CHECK-NEXT: movd %eax, %xmm1
+; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax
+; CHECK-NEXT: movd %eax, %xmm0
+; CHECK-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax
+; CHECK-NEXT: movd %eax, %xmm1
+; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fptoui_v3i32_v3f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %eax
+; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %ecx
+; AVX-NEXT: vmovd %ecx, %xmm0
+; AVX-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
+; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %eax
+; AVX-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0
+; AVX-NEXT: retq
+entry:
+ %result = call <3 x i32> @llvm.experimental.constrained.fptoui.v3i32.v3f64(
+ <3 x double><double 42.1, double 42.2,
+ double 42.3>,
+ metadata !"fpexcept.strict")
+ ret <3 x i32> %result
+}
+
+define <4 x i32> @constrained_vector_fptoui_v4i32_v4f64() {
+; CHECK-LABEL: constrained_vector_fptoui_v4i32_v4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax
+; CHECK-NEXT: movd %eax, %xmm0
+; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax
+; CHECK-NEXT: movd %eax, %xmm1
+; CHECK-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax
+; CHECK-NEXT: movd %eax, %xmm2
+; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax
+; CHECK-NEXT: movd %eax, %xmm0
+; CHECK-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fptoui_v4i32_v4f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %eax
+; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %ecx
+; AVX-NEXT: vmovd %ecx, %xmm0
+; AVX-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
+; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %eax
+; AVX-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0
+; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %eax
+; AVX-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0
+; AVX-NEXT: retq
+entry:
+ %result = call <4 x i32> @llvm.experimental.constrained.fptoui.v4i32.v4f64(
+ <4 x double><double 42.1, double 42.2,
+ double 42.3, double 42.4>,
+ metadata !"fpexcept.strict")
+ ret <4 x i32> %result
+}
+
+define <1 x i64> @constrained_vector_fptoui_v1i64_v1f64() {
+; CHECK-LABEL: constrained_vector_fptoui_v1i64_v1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax
+; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fptoui_v1i64_v1f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %rax
+; AVX-NEXT: retq
+entry:
+ %result = call <1 x i64> @llvm.experimental.constrained.fptoui.v1i64.v1f64(
+ <1 x double><double 42.1>,
+ metadata !"fpexcept.strict")
+ ret <1 x i64> %result
+}
+
+define <2 x i64> @constrained_vector_fptoui_v2i64_v2f64() {
+; CHECK-LABEL: constrained_vector_fptoui_v2i64_v2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax
+; CHECK-NEXT: movq %rax, %xmm1
+; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax
+; CHECK-NEXT: movq %rax, %xmm0
+; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fptoui_v2i64_v2f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %rax
+; AVX-NEXT: vmovq %rax, %xmm0
+; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %rax
+; AVX-NEXT: vmovq %rax, %xmm1
+; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX-NEXT: retq
+entry:
+ %result = call <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f64(
+ <2 x double><double 42.1, double 42.2>,
+ metadata !"fpexcept.strict")
+ ret <2 x i64> %result
+}
+
+define <3 x i64> @constrained_vector_fptoui_v3i64_v3f64() {
+; CHECK-LABEL: constrained_vector_fptoui_v3i64_v3f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax
+; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rdx
+; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rcx
+; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fptoui_v3i64_v3f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %rax
+; AVX-NEXT: vmovq %rax, %xmm0
+; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %rax
+; AVX-NEXT: vmovq %rax, %xmm1
+; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %rax
+; AVX-NEXT: vmovq %rax, %xmm1
+; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX-NEXT: retq
+entry:
+ %result = call <3 x i64> @llvm.experimental.constrained.fptoui.v3i64.v3f64(
+ <3 x double><double 42.1, double 42.2,
+ double 42.3>,
+ metadata !"fpexcept.strict")
+ ret <3 x i64> %result
+}
+
+define <4 x i64> @constrained_vector_fptoui_v4i64_v4f64() {
+; CHECK-LABEL: constrained_vector_fptoui_v4i64_v4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax
+; CHECK-NEXT: movq %rax, %xmm1
+; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax
+; CHECK-NEXT: movq %rax, %xmm0
+; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax
+; CHECK-NEXT: movq %rax, %xmm2
+; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax
+; CHECK-NEXT: movq %rax, %xmm1
+; CHECK-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fptoui_v4i64_v4f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %rax
+; AVX-NEXT: vmovq %rax, %xmm0
+; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %rax
+; AVX-NEXT: vmovq %rax, %xmm1
+; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %rax
+; AVX-NEXT: vmovq %rax, %xmm1
+; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %rax
+; AVX-NEXT: vmovq %rax, %xmm2
+; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX-NEXT: retq
+entry:
+ %result = call <4 x i64> @llvm.experimental.constrained.fptoui.v4i64.v4f64(
+ <4 x double><double 42.1, double 42.2,
+ double 42.3, double 42.4>,
+ metadata !"fpexcept.strict")
+ ret <4 x i64> %result
+}
+
+
define <1 x float> @constrained_vector_fptrunc_v1f64() {
; CHECK-LABEL: constrained_vector_fptrunc_v1f64:
; CHECK: # %bb.0: # %entry
declare <2 x double> @llvm.experimental.constrained.nearbyint.v2f64(<2 x double>, metadata, metadata)
declare <2 x double> @llvm.experimental.constrained.maxnum.v2f64(<2 x double>, <2 x double>, metadata, metadata)
declare <2 x double> @llvm.experimental.constrained.minnum.v2f64(<2 x double>, <2 x double>, metadata, metadata)
+declare <2 x i32> @llvm.experimental.constrained.fptosi.v2i32.v2f32(<2 x float>, metadata)
+declare <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f32(<2 x float>, metadata)
+declare <2 x i32> @llvm.experimental.constrained.fptosi.v2i32.v2f64(<2 x double>, metadata)
+declare <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f64(<2 x double>, metadata)
+declare <2 x i32> @llvm.experimental.constrained.fptoui.v2i32.v2f32(<2 x float>, metadata)
+declare <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f32(<2 x float>, metadata)
+declare <2 x i32> @llvm.experimental.constrained.fptoui.v2i32.v2f64(<2 x double>, metadata)
+declare <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f64(<2 x double>, metadata)
declare <2 x float> @llvm.experimental.constrained.fptrunc.v2f32.v2f64(<2 x double>, metadata, metadata)
declare <2 x double> @llvm.experimental.constrained.fpext.v2f64.v2f32(<2 x float>, metadata)
declare <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double>, metadata, metadata)
declare <1 x float> @llvm.experimental.constrained.nearbyint.v1f32(<1 x float>, metadata, metadata)
declare <1 x float> @llvm.experimental.constrained.maxnum.v1f32(<1 x float>, <1 x float>, metadata, metadata)
declare <1 x float> @llvm.experimental.constrained.minnum.v1f32(<1 x float>, <1 x float>, metadata, metadata)
+declare <1 x i32> @llvm.experimental.constrained.fptosi.v1i32.v1f32(<1 x float>, metadata)
+declare <1 x i64> @llvm.experimental.constrained.fptosi.v1i64.v1f32(<1 x float>, metadata)
+declare <1 x i32> @llvm.experimental.constrained.fptosi.v1i32.v1f64(<1 x double>, metadata)
+declare <1 x i64> @llvm.experimental.constrained.fptosi.v1i64.v1f64(<1 x double>, metadata)
+declare <1 x i32> @llvm.experimental.constrained.fptoui.v1i32.v1f32(<1 x float>, metadata)
+declare <1 x i64> @llvm.experimental.constrained.fptoui.v1i64.v1f32(<1 x float>, metadata)
+declare <1 x i32> @llvm.experimental.constrained.fptoui.v1i32.v1f64(<1 x double>, metadata)
+declare <1 x i64> @llvm.experimental.constrained.fptoui.v1i64.v1f64(<1 x double>, metadata)
declare <1 x float> @llvm.experimental.constrained.fptrunc.v1f32.v1f64(<1 x double>, metadata, metadata)
declare <1 x double> @llvm.experimental.constrained.fpext.v1f64.v1f32(<1 x float>, metadata)
declare <1 x float> @llvm.experimental.constrained.ceil.v1f32(<1 x float>, metadata, metadata)
declare <3 x double> @llvm.experimental.constrained.maxnum.v3f64(<3 x double>, <3 x double>, metadata, metadata)
declare <3 x float> @llvm.experimental.constrained.minnum.v3f32(<3 x float>, <3 x float>, metadata, metadata)
declare <3 x double> @llvm.experimental.constrained.minnum.v3f64(<3 x double>, <3 x double>, metadata, metadata)
+declare <3 x i32> @llvm.experimental.constrained.fptosi.v3i32.v3f32(<3 x float>, metadata)
+declare <3 x i64> @llvm.experimental.constrained.fptosi.v3i64.v3f32(<3 x float>, metadata)
+declare <3 x i32> @llvm.experimental.constrained.fptosi.v3i32.v3f64(<3 x double>, metadata)
+declare <3 x i64> @llvm.experimental.constrained.fptosi.v3i64.v3f64(<3 x double>, metadata)
+declare <3 x i32> @llvm.experimental.constrained.fptoui.v3i32.v3f32(<3 x float>, metadata)
+declare <3 x i64> @llvm.experimental.constrained.fptoui.v3i64.v3f32(<3 x float>, metadata)
+declare <3 x i32> @llvm.experimental.constrained.fptoui.v3i32.v3f64(<3 x double>, metadata)
+declare <3 x i64> @llvm.experimental.constrained.fptoui.v3i64.v3f64(<3 x double>, metadata)
declare <3 x float> @llvm.experimental.constrained.fptrunc.v3f32.v3f64(<3 x double>, metadata, metadata)
declare <3 x double> @llvm.experimental.constrained.fpext.v3f64.v3f32(<3 x float>, metadata)
declare <3 x float> @llvm.experimental.constrained.ceil.v3f32(<3 x float>, metadata, metadata)
declare <4 x double> @llvm.experimental.constrained.nearbyint.v4f64(<4 x double>, metadata, metadata)
declare <4 x double> @llvm.experimental.constrained.maxnum.v4f64(<4 x double>, <4 x double>, metadata, metadata)
declare <4 x double> @llvm.experimental.constrained.minnum.v4f64(<4 x double>, <4 x double>, metadata, metadata)
+declare <4 x i32> @llvm.experimental.constrained.fptosi.v4i32.v4f32(<4 x float>, metadata)
+declare <4 x i64> @llvm.experimental.constrained.fptosi.v4i64.v4f32(<4 x float>, metadata)
+declare <4 x i32> @llvm.experimental.constrained.fptosi.v4i32.v4f64(<4 x double>, metadata)
+declare <4 x i64> @llvm.experimental.constrained.fptosi.v4i64.v4f64(<4 x double>, metadata)
+declare <4 x i32> @llvm.experimental.constrained.fptoui.v4i32.v4f32(<4 x float>, metadata)
+declare <4 x i64> @llvm.experimental.constrained.fptoui.v4i64.v4f32(<4 x float>, metadata)
+declare <4 x i32> @llvm.experimental.constrained.fptoui.v4i32.v4f64(<4 x double>, metadata)
+declare <4 x i64> @llvm.experimental.constrained.fptoui.v4i64.v4f64(<4 x double>, metadata)
declare <4 x float> @llvm.experimental.constrained.fptrunc.v4f32.v4f64(<4 x double>, metadata, metadata)
declare <4 x double> @llvm.experimental.constrained.fpext.v4f64.v4f32(<4 x float>, metadata)
declare <4 x double> @llvm.experimental.constrained.ceil.v4f64(<4 x double>, metadata, metadata)