}
static Value *EmitX86MaskedCompare(CodeGenFunction &CGF, unsigned CC,
- bool Signed, SmallVectorImpl<Value *> &Ops) {
+ bool Signed, ArrayRef<Value *> Ops) {
+ assert((Ops.size() == 2 || Ops.size() == 4) &&
+ "Unexpected number of arguments");
unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
Value *Cmp;
Cmp = CGF.Builder.CreateICmp(Pred, Ops[0], Ops[1]);
}
- const auto *C = dyn_cast<Constant>(Ops.back());
- if (!C || !C->isAllOnesValue())
- Cmp = CGF.Builder.CreateAnd(Cmp, getMaskVecValue(CGF, Ops.back(), NumElts));
+ if (Ops.size() == 4) {
+ const auto *C = dyn_cast<Constant>(Ops[3]);
+ if (!C || !C->isAllOnesValue())
+ Cmp = CGF.Builder.CreateAnd(Cmp, getMaskVecValue(CGF, Ops[3], NumElts));
+ }
if (NumElts < 8) {
uint32_t Indices[8];
std::max(NumElts, 8U)));
}
+static Value *EmitX86ConvertToMask(CodeGenFunction &CGF, Value *In) {
+ Value *Zero = Constant::getNullValue(In->getType());
+ return EmitX86MaskedCompare(CGF, 1, true, { In, Zero });
+}
+
static Value *EmitX86Abs(CodeGenFunction &CGF, ArrayRef<Value *> Ops) {
llvm::Type *Ty = Ops[0]->getType();
case X86::BI__builtin_ia32_cvtmask2q512:
return EmitX86SExtMask(*this, Ops[0], ConvertType(E->getType()));
+ case X86::BI__builtin_ia32_cvtb2mask128:
+ case X86::BI__builtin_ia32_cvtb2mask256:
+ case X86::BI__builtin_ia32_cvtb2mask512:
+ case X86::BI__builtin_ia32_cvtw2mask128:
+ case X86::BI__builtin_ia32_cvtw2mask256:
+ case X86::BI__builtin_ia32_cvtw2mask512:
+ case X86::BI__builtin_ia32_cvtd2mask128:
+ case X86::BI__builtin_ia32_cvtd2mask256:
+ case X86::BI__builtin_ia32_cvtd2mask512:
+ case X86::BI__builtin_ia32_cvtq2mask128:
+ case X86::BI__builtin_ia32_cvtq2mask256:
+ case X86::BI__builtin_ia32_cvtq2mask512:
+ return EmitX86ConvertToMask(*this, Ops[0]);
+
case X86::BI__builtin_ia32_movdqa32store128_mask:
case X86::BI__builtin_ia32_movdqa64store128_mask:
case X86::BI__builtin_ia32_storeaps128_mask:
__mmask64 test_mm512_movepi8_mask(__m512i __A) {
// CHECK-LABEL: @test_mm512_movepi8_mask
- // CHECK: @llvm.x86.avx512.cvtb2mask.512
+ // CHECK: [[CMP:%.*]] = icmp slt <64 x i8> %{{.*}}, zeroinitializer
+ // CHECK: bitcast <64 x i1> [[CMP]] to i64
return _mm512_movepi8_mask(__A);
}
__mmask32 test_mm512_movepi16_mask(__m512i __A) {
// CHECK-LABEL: @test_mm512_movepi16_mask
- // CHECK: @llvm.x86.avx512.cvtw2mask.512
+ // CHECK: [[CMP:%.*]] = icmp slt <32 x i16> %{{.*}}, zeroinitializer
+ // CHECK: bitcast <32 x i1> [[CMP]] to i32
return _mm512_movepi16_mask(__A);
}
__mmask16 test_mm512_movepi32_mask(__m512i __A) {
// CHECK-LABEL: @test_mm512_movepi32_mask
- // CHECK: @llvm.x86.avx512.cvtd2mask.512
+ // CHECK: [[CMP:%.*]] = icmp slt <16 x i32> %{{.*}}, zeroinitializer
+ // CHECK: bitcast <16 x i1> [[CMP]] to i16
return _mm512_movepi32_mask(__A);
}
__mmask8 test_mm512_movepi64_mask(__m512i __A) {
// CHECK-LABEL: @test_mm512_movepi64_mask
- // CHECK: @llvm.x86.avx512.cvtq2mask.512
+ // CHECK: [[CMP:%.*]] = icmp slt <8 x i64> %{{.*}}, zeroinitializer
+ // CHECK: bitcast <8 x i1> [[CMP]] to i8
return _mm512_movepi64_mask(__A);
}
__mmask16 test_mm_movepi8_mask(__m128i __A) {
// CHECK-LABEL: @test_mm_movepi8_mask
- // CHECK: @llvm.x86.avx512.cvtb2mask.128
+ // CHECK: [[CMP:%.*]] = icmp slt <16 x i8> %{{.*}}, zeroinitializer
+ // CHECK: bitcast <16 x i1> [[CMP]] to i16
return _mm_movepi8_mask(__A);
}
__mmask32 test_mm256_movepi8_mask(__m256i __A) {
// CHECK-LABEL: @test_mm256_movepi8_mask
- // CHECK: @llvm.x86.avx512.cvtb2mask.256
+ // CHECK: [[CMP:%.*]] = icmp slt <32 x i8> %{{.*}}, zeroinitializer
+ // CHECK: bitcast <32 x i1> [[CMP]] to i32
return _mm256_movepi8_mask(__A);
}
}
__mmask8 test_mm_movepi16_mask(__m128i __A) {
// CHECK-LABEL: @test_mm_movepi16_mask
- // CHECK: @llvm.x86.avx512.cvtw2mask.128
+ // CHECK: [[CMP:%.*]] = icmp slt <8 x i16> %{{.*}}, zeroinitializer
+ // CHECK: bitcast <8 x i1> [[CMP]] to i8
return _mm_movepi16_mask(__A);
}
__mmask16 test_mm256_movepi16_mask(__m256i __A) {
// CHECK-LABEL: @test_mm256_movepi16_mask
- // CHECK: @llvm.x86.avx512.cvtw2mask.256
+ // CHECK: [[CMP:%.*]] = icmp slt <16 x i16> %{{.*}}, zeroinitializer
+ // CHECK: bitcast <16 x i1> [[CMP]] to i16
return _mm256_movepi16_mask(__A);
}
__mmask8 test_mm_movepi32_mask(__m128i __A) {
// CHECK-LABEL: @test_mm_movepi32_mask
- // CHECK: @llvm.x86.avx512.cvtd2mask.128
+ // CHECK: [[CMP:%.*]] = icmp slt <4 x i32> %{{.*}}, zeroinitializer
+ // CHECK: [[SHUF:%.*]] = shufflevector <4 x i1> [[CMP]], <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ // CHECK: bitcast <8 x i1> [[SHUF]] to i8
return _mm_movepi32_mask(__A);
}
__mmask8 test_mm256_movepi32_mask(__m256i __A) {
// CHECK-LABEL: @test_mm256_movepi32_mask
- // CHECK: @llvm.x86.avx512.cvtd2mask.256
+ // CHECK: [[CMP:%.*]] = icmp slt <8 x i32> %{{.*}}, zeroinitializer
+ // CHECK: bitcast <8 x i1> [[CMP]] to i8
return _mm256_movepi32_mask(__A);
}
__mmask8 test_mm_movepi64_mask(__m128i __A) {
// CHECK-LABEL: @test_mm_movepi64_mask
- // CHECK: @llvm.x86.avx512.cvtq2mask.128
+ // CHECK: [[CMP:%.*]] = icmp slt <2 x i64> %{{.*}}, zeroinitializer
+ // CHECK: [[SHUF:%.*]] = shufflevector <2 x i1> [[CMP]], <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ // CHECK: bitcast <8 x i1> [[SHUF]] to i8
return _mm_movepi64_mask(__A);
}
__mmask8 test_mm256_movepi64_mask(__m256i __A) {
// CHECK-LABEL: @test_mm256_movepi64_mask
- // CHECK: @llvm.x86.avx512.cvtq2mask.256
+ // CHECK: [[CMP:%.*]] = icmp slt <4 x i64> %{{.*}}, zeroinitializer
+ // CHECK: [[SHUF:%.*]] = shufflevector <4 x i1> [[CMP]], <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ // CHECK: bitcast <8 x i1> [[SHUF]] to i8
return _mm256_movepi64_mask(__A);
}