From: Chad Rosier Date: Thu, 14 Nov 2013 22:02:24 +0000 (+0000) Subject: [AArch64] Add support for legacy AArch32 NEON scalar shift right by immediate X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=f46e56bf99384b742228a9be215f38bf107c1a3b;p=clang [AArch64] Add support for legacy AArch32 NEON scalar shift right by immediate and accumulate instructions. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@194732 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp index a2669fe8fe..363caedf31 100644 --- a/lib/CodeGen/CGBuiltin.cpp +++ b/lib/CodeGen/CGBuiltin.cpp @@ -2913,10 +2913,21 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID, : Intrinsic::aarch64_neon_vsrshr; return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n"); case AArch64::BI__builtin_neon_vsra_n_v: + if (VTy->getElementType()->isIntegerTy(64)) { + Int = usgn ? Intrinsic::aarch64_neon_vsradu_n + : Intrinsic::aarch64_neon_vsrads_n; + return EmitNeonCall(CGM.getIntrinsic(Int), Ops, "vsra_n"); + } return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vsra_n_v, E); case AArch64::BI__builtin_neon_vsraq_n_v: return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vsraq_n_v, E); case AArch64::BI__builtin_neon_vrsra_n_v: + if (VTy->getElementType()->isIntegerTy(64)) { + Int = usgn ? Intrinsic::aarch64_neon_vrsradu_n + : Intrinsic::aarch64_neon_vrsrads_n; + return EmitNeonCall(CGM.getIntrinsic(Int), Ops, "vrsra_n"); + } + // fall through case AArch64::BI__builtin_neon_vrsraq_n_v: { Ops[0] = Builder.CreateBitCast(Ops[0], Ty); Ops[1] = Builder.CreateBitCast(Ops[1], Ty); diff --git a/test/CodeGen/aarch64-neon-intrinsics.c b/test/CodeGen/aarch64-neon-intrinsics.c index c73e87d6ac..4c2f3cc101 100644 --- a/test/CodeGen/aarch64-neon-intrinsics.c +++ b/test/CodeGen/aarch64-neon-intrinsics.c @@ -7538,24 +7538,48 @@ int64_t test_vsrad_n_s64(int64_t a, int64_t b) { return (int64_t)vsrad_n_s64(a, b, 63); } +int64x1_t test_vsra_n_s64(int64x1_t a, int64x1_t b) { +// CHECK: test_vsra_n_s64 +// CHECK: ssra d{{[0-9]+}}, d{{[0-9]+}}, #1 + return vsra_n_s64(a, b, 1); +} + uint64_t test_vsrad_n_u64(uint64_t a, uint64_t b) { // CHECK-LABEL: test_vsrad_n_u64 // CHECK: usra {{d[0-9]+}}, {{d[0-9]+}}, #63 return (uint64_t)vsrad_n_u64(a, b, 63); } +uint64x1_t test_vsra_n_u64(uint64x1_t a, uint64x1_t b) { +// CHECK: test_vsra_n_u64 +// CHECK: usra d{{[0-9]+}}, d{{[0-9]+}}, #1 + return vsra_n_u64(a, b, 1); +} + int64_t test_vrsrad_n_s64(int64_t a, int64_t b) { // CHECK-LABEL: test_vrsrad_n_s64 // CHECK: srsra {{d[0-9]+}}, {{d[0-9]+}}, #63 return (int64_t)vrsrad_n_s64(a, b, 63); } +int64x1_t test_vrsra_n_s64(int64x1_t a, int64x1_t b) { +// CHECK: test_vrsra_n_s64 +// CHECK: srsra d{{[0-9]+}}, d{{[0-9]+}}, #1 + return vrsra_n_s64(a, b, 1); +} + uint64_t test_vrsrad_n_u64(uint64_t a, uint64_t b) { // CHECK-LABEL: test_vrsrad_n_u64 // CHECK: ursra {{d[0-9]+}}, {{d[0-9]+}}, #63 return (uint64_t)vrsrad_n_u64(a, b, 63); } +uint64x1_t test_vrsra_n_u64(uint64x1_t a, uint64x1_t b) { +// CHECK: test_vrsra_n_u64 +// CHECK: ursra d{{[0-9]+}}, d{{[0-9]+}}, #1 + return vrsra_n_u64(a, b, 1); +} + int64_t test_vshld_n_s64(int64_t a) { // CHECK-LABEL: test_vshld_n_s64 // CHECK: shl {{d[0-9]+}}, {{d[0-9]+}}, #0