Originally we were using the same GCC builtins to lower this AVX2 vector
intrinsic. Instead we will now lower it directly to a vector shuffle.
This will not only allow LLVM to generate better code, but it will also allow us
to remove the GCC intrinsics.
Reviewed by Andrea
This is related to rdar://problem/
18742778.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@231081
91177308-0d34-0410-b5e6-
96231b3b80d8
BUILTIN(__builtin_ia32_vbroadcastss_ps, "V4fV4f", "")
BUILTIN(__builtin_ia32_vbroadcastss_ps256, "V8fV4f", "")
BUILTIN(__builtin_ia32_vbroadcastsd_pd256, "V4dV2d", "")
-BUILTIN(__builtin_ia32_vbroadcastsi256, "V4LLiV2LLi", "")
BUILTIN(__builtin_ia32_pbroadcastb256, "V32cV16c", "")
BUILTIN(__builtin_ia32_pbroadcastw256, "V16sV8s", "")
BUILTIN(__builtin_ia32_pbroadcastd256, "V8iV4i", "")
Builder.CreateStore(Builder.CreateExtractValue(Call, 0), Ops[0]);
return Builder.CreateExtractValue(Call, 1);
}
- // AVX2 broadcast
- case X86::BI__builtin_ia32_vbroadcastsi256: {
- Value *VecTmp = CreateMemTemp(E->getArg(0)->getType());
- Builder.CreateStore(Ops[0], VecTmp);
- Value *F = CGM.getIntrinsic(Intrinsic::x86_avx2_vbroadcasti128);
- return Builder.CreateCall(F, Builder.CreateBitCast(VecTmp, Int8PtrTy));
- }
// SSE comparison intrisics
case X86::BI__builtin_ia32_cmpeqps:
case X86::BI__builtin_ia32_cmpltps:
static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
_mm256_broadcastsi128_si256(__m128i __X)
{
- return (__m256i)__builtin_ia32_vbroadcastsi256(__X);
+ return (__m256i)__builtin_shufflevector(__X, __X, 0, 1, 0, 1);
}
#define _mm_blend_epi32(V1, V2, M) __extension__ ({ \
}
__m256i test_mm256_broadcastsi128_si256(__m128i a) {
- // CHECK: @llvm.x86.avx2.vbroadcasti128
+ // CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
return _mm256_broadcastsi128_si256(a);
}