From: Jim Grosbach Date: Wed, 15 May 2013 02:40:04 +0000 (+0000) Subject: ARM: Improve codegen for vget_low_* and vget_high_ intrinsics. X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=cd76539274cdc3907a61de28ba81a9e90f270a41;p=clang ARM: Improve codegen for vget_low_* and vget_high_ intrinsics. These intrinsics use the __builtin_shuffle() function to extract the low and high half, respectively, of a 128-bit NEON vector. Currently, they're defined to use bitcasts to simplify the emitter, so we get code like: uint16x4_t vget_low_u32(uint16x8_t __a) { return (uint32x2_t) __builtin_shufflevector((int64x2_t) __a, (int64x2_t) __a, 0); } While this works, it results in those bitcasts going all the way through to the IR, resulting in code like: %1 = bitcast <8 x i16> %in to <2 x i64> %2 = shufflevector <2 x i64> %1, <2 x i64> undef, <1 x i32> %zeroinitializer %3 = bitcast <1 x i64> %2 to <4 x i16> We can instead easily perform the operation directly on the input vector like: uint16x4_t vget_low_u16(uint16x8_t __a) { return __builtin_shufflevector(__a, __a, 0, 1, 2, 3); } Not only is that much easier to read on its own, it also results in cleaner IR like: %1 = shufflevector <8 x i16> %in, <8 x i16> undef, <4 x i32> This is both easier to read and easier for the back end to reason about effectively since the operation is obfuscating the source with bitcasts. rdar://13894163 git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@181865 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/test/CodeGen/arm-neon-vget.c b/test/CodeGen/arm-neon-vget.c new file mode 100644 index 0000000000..4a710a2ad8 --- /dev/null +++ b/test/CodeGen/arm-neon-vget.c @@ -0,0 +1,124 @@ +// REQUIRES: arm-registered-target +// RUN: %clang_cc1 -triple thumbv7-apple-darwin \ +// RUN: -target-abi apcs-gnu \ +// RUN: -target-cpu cortex-a8 \ +// RUN: -mfloat-abi soft \ +// RUN: -target-feature +soft-float-abi \ +// RUN: -ffreestanding \ +// RUN: -emit-llvm -w -O1 -o - %s | FileCheck %s + +#include + +// Check that the vget_low/vget_high intrinsics generate a single shuffle +// without any bitcasting. +int8x8_t low_s8(int8x16_t a) { +// CHECK: shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> + return vget_low_s8(a); +} + +uint8x8_t low_u8 (uint8x16_t a) { +// CHECK: shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> + return vget_low_u8(a); +} + +int16x4_t low_s16( int16x8_t a) { +// CHECK: shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> + return vget_low_s16(a); +} + +uint16x4_t low_u16(uint16x8_t a) { +// CHECK: shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> + return vget_low_u16(a); +} + +int32x2_t low_s32( int32x4_t a) { +// CHECK: shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> + return vget_low_s32(a); +} + +uint32x2_t low_u32(uint32x4_t a) { +// CHECK: shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> + return vget_low_u32(a); +} + +int64x1_t low_s64( int64x2_t a) { +// CHECK: shufflevector <2 x i64> %a, <2 x i64> undef, <1 x i32> zeroinitializer + return vget_low_s64(a); +} + +uint64x1_t low_u64(uint64x2_t a) { +// CHECK: shufflevector <2 x i64> %a, <2 x i64> undef, <1 x i32> zeroinitializer + return vget_low_u64(a); +} + +poly8x8_t low_p8 (poly8x16_t a) { +// CHECK: shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> + return vget_low_p8(a); +} + +poly16x4_t low_p16(poly16x8_t a) { +// CHECK: shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> + return vget_low_p16(a); +} + +float32x2_t low_f32(float32x4_t a) { +// CHECK: shufflevector <4 x float> %a, <4 x float> undef, <2 x i32> + return vget_low_f32(a); +} + + +int8x8_t high_s8(int8x16_t a) { +// CHECK: shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> + return vget_high_s8(a); +} + +uint8x8_t high_u8 (uint8x16_t a) { +// CHECK: shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> + return vget_high_u8(a); +} + +int16x4_t high_s16( int16x8_t a) { +// CHECK: shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> + return vget_high_s16(a); +} + +uint16x4_t high_u16(uint16x8_t a) { +// CHECK: shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> + return vget_high_u16(a); +} + +int32x2_t high_s32( int32x4_t a) { +// CHECK: shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> + return vget_high_s32(a); +} + +uint32x2_t high_u32(uint32x4_t a) { +// CHECK: shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> + return vget_high_u32(a); +} + +int64x1_t high_s64( int64x2_t a) { +// CHECK: shufflevector <2 x i64> %a, <2 x i64> undef, <1 x i32> + return vget_high_s64(a); +} + +uint64x1_t high_u64(uint64x2_t a) { +// CHECK: shufflevector <2 x i64> %a, <2 x i64> undef, <1 x i32> + return vget_high_u64(a); +} + +poly8x8_t high_p8 (poly8x16_t a) { +// CHECK: shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> + return vget_high_p8(a); +} + +poly16x4_t high_p16(poly16x8_t a) { +// CHECK: shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> + return vget_high_p16(a); +} + +float32x2_t high_f32(float32x4_t a) { +// CHECK: shufflevector <4 x float> %a, <4 x float> undef, <2 x i32> + return vget_high_f32(a); +} + diff --git a/utils/TableGen/NeonEmitter.cpp b/utils/TableGen/NeonEmitter.cpp index 34b955e8e9..05505c99c9 100644 --- a/utils/TableGen/NeonEmitter.cpp +++ b/utils/TableGen/NeonEmitter.cpp @@ -1410,12 +1410,17 @@ static std::string GenOpString(OpKind op, const std::string &proto, s += ", (int64x1_t)__b, 0, 1);"; break; case OpHi: - s += "(" + ts + - ")__builtin_shufflevector((int64x2_t)__a, (int64x2_t)__a, 1);"; + // nElts is for the result vector, so the source is twice that number. + s += "__builtin_shufflevector(__a, __a"; + for (unsigned i = nElts; i < nElts * 2; ++i) + s += ", " + utostr(i); + s+= ");"; break; case OpLo: - s += "(" + ts + - ")__builtin_shufflevector((int64x2_t)__a, (int64x2_t)__a, 0);"; + s += "__builtin_shufflevector(__a, __a"; + for (unsigned i = 0; i < nElts; ++i) + s += ", " + utostr(i); + s+= ");"; break; case OpDup: s += Duplicate(nElts, typestr, "__a") + ";";