From a665321a6885ff659ae6d75bb2ee2f083f78ddd7 Mon Sep 17 00:00:00 2001 From: Chad Rosier Date: Wed, 13 Nov 2013 20:05:44 +0000 Subject: [PATCH] [AArch64] Tests for legacy AArch32 NEON scalar shift by immediate instructions. A number of non-overloaded intrinsics have been replaced by thier overloaded counterparts. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@194599 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/CodeGen/CGBuiltin.cpp | 18 +++--- test/CodeGen/aarch64-neon-intrinsics.c | 77 ++++++++++++++++++++++++++ 2 files changed, 86 insertions(+), 9 deletions(-) diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp index 5ced54360f..1c62615f81 100644 --- a/lib/CodeGen/CGBuiltin.cpp +++ b/lib/CodeGen/CGBuiltin.cpp @@ -2276,12 +2276,12 @@ static Value *EmitAArch64ScalarBuiltinExpr(CodeGenFunction &CGF, s = "vushr"; OverloadInt = false; break; // Scalar Signed Rounding Shift Right (Immediate) case AArch64::BI__builtin_neon_vrshrd_n_s64: - Int = Intrinsic::aarch64_neon_vrshrds_n; - s = "vsrshr"; OverloadInt = false; break; + Int = Intrinsic::aarch64_neon_vsrshr; + s = "vsrshr"; OverloadInt = true; break; // Scalar Unsigned Rounding Shift Right (Immediate) case AArch64::BI__builtin_neon_vrshrd_n_u64: - Int = Intrinsic::aarch64_neon_vrshrdu_n; - s = "vurshr"; OverloadInt = false; break; + Int = Intrinsic::aarch64_neon_vurshr; + s = "vurshr"; OverloadInt = true; break; // Scalar Signed Shift Right and Accumulate (Immediate) case AArch64::BI__builtin_neon_vsrad_n_s64: Int = Intrinsic::aarch64_neon_vsrads_n; @@ -2322,18 +2322,18 @@ static Value *EmitAArch64ScalarBuiltinExpr(CodeGenFunction &CGF, case AArch64::BI__builtin_neon_vqshluh_n_s16: case AArch64::BI__builtin_neon_vqshlus_n_s32: case AArch64::BI__builtin_neon_vqshlud_n_s64: - Int = Intrinsic::aarch64_neon_vqshlus_n; + Int = Intrinsic::aarch64_neon_vsqshlu; s = "vsqshlu"; OverloadInt = true; break; // Shift Right And Insert (Immediate) case AArch64::BI__builtin_neon_vsrid_n_s64: case AArch64::BI__builtin_neon_vsrid_n_u64: - Int = Intrinsic::aarch64_neon_vsrid_n; - s = "vsri"; OverloadInt = false; break; + Int = Intrinsic::aarch64_neon_vsri; + s = "vsri"; OverloadInt = true; break; // Shift Left And Insert (Immediate) case AArch64::BI__builtin_neon_vslid_n_s64: case AArch64::BI__builtin_neon_vslid_n_u64: - Int = Intrinsic::aarch64_neon_vslid_n; - s = "vsli"; OverloadInt = false; break; + Int = Intrinsic::aarch64_neon_vsli; + s = "vsli"; OverloadInt = true; break; // Signed Saturating Shift Right Narrow (Immediate) case AArch64::BI__builtin_neon_vqshrnh_n_s16: case AArch64::BI__builtin_neon_vqshrns_n_s32: diff --git a/test/CodeGen/aarch64-neon-intrinsics.c b/test/CodeGen/aarch64-neon-intrinsics.c index 3030bd96a2..c73e87d6ac 100644 --- a/test/CodeGen/aarch64-neon-intrinsics.c +++ b/test/CodeGen/aarch64-neon-intrinsics.c @@ -7490,24 +7490,48 @@ int64_t test_vshrd_n_s64(int64_t a) { return (int64_t)vshrd_n_s64(a, 1); } +int64x1_t test_vshr_n_s64(int64x1_t a) { +// CHECK-LABEL: test_vshr_n_s64 +// CHECK: sshr {{d[0-9]+}}, {{d[0-9]+}}, #1 + return vshr_n_s64(a, 1); +} + uint64_t test_vshrd_n_u64(uint64_t a) { // CHECK-LABEL: test_vshrd_n_u64 // CHECK: ushr {{d[0-9]+}}, {{d[0-9]+}}, #64 return (uint64_t)vshrd_n_u64(a, 64); } +uint64x1_t test_vshr_n_u64(uint64x1_t a) { +// CHECK-LABEL: test_vshr_n_u64 +// CHECK: ushr {{d[0-9]+}}, {{d[0-9]+}}, #1 + return vshr_n_u64(a, 1); +} + int64_t test_vrshrd_n_s64(int64_t a) { // CHECK-LABEL: test_vrshrd_n_s64 // CHECK: srshr {{d[0-9]+}}, {{d[0-9]+}}, #63 return (int64_t)vrshrd_n_s64(a, 63); } +int64x1_t test_vrshr_n_s64(int64x1_t a) { +// CHECK: test_vrshr_n_s64 +// CHECK: srshr d{{[0-9]+}}, d{{[0-9]+}}, #1 + return vrshr_n_s64(a, 1); +} + uint64_t test_vrshrd_n_u64(uint64_t a) { // CHECK-LABEL: test_vrshrd_n_u64 // CHECK: urshr {{d[0-9]+}}, {{d[0-9]+}}, #63 return (uint64_t)vrshrd_n_u64(a, 63); } +uint64x1_t test_vrshr_n_u64(uint64x1_t a) { +// CHECK: test_vrshr_n_u64 +// CHECK: urshr d{{[0-9]+}}, d{{[0-9]+}}, #1 + return vrshr_n_u64(a, 1); +} + int64_t test_vsrad_n_s64(int64_t a, int64_t b) { // CHECK-LABEL: test_vsrad_n_s64 // CHECK: ssra {{d[0-9]+}}, {{d[0-9]+}}, #63 @@ -7537,6 +7561,11 @@ int64_t test_vshld_n_s64(int64_t a) { // CHECK: shl {{d[0-9]+}}, {{d[0-9]+}}, #0 return (int64_t)vshld_n_s64(a, 0); } +int64x1_t test_vshl_n_s64(int64x1_t a) { +// CHECK: test_vshl_n_s64 +// CHECK: shl d{{[0-9]+}}, d{{[0-9]+}}, #1 + return vshl_n_s64(a, 1); +} uint64_t test_vshld_n_u64(uint64_t a) { // CHECK-LABEL: test_vshld_n_u64 @@ -7544,6 +7573,12 @@ uint64_t test_vshld_n_u64(uint64_t a) { return (uint64_t)vshld_n_u64(a, 63); } +uint64x1_t test_vshl_n_u64(uint64x1_t a) { +// CHECK: test_vshl_n_u64 +// CHECK: shl d{{[0-9]+}}, d{{[0-9]+}}, #1 + return vshl_n_u64(a, 1); +} + int8_t test_vqshlb_n_s8(int8_t a) { // CHECK-LABEL: test_vqshlb_n_s8 // CHECK: sqshl {{b[0-9]+}}, {{b[0-9]+}}, #7 @@ -7568,6 +7603,12 @@ int64_t test_vqshld_n_s64(int64_t a) { return (int64_t)vqshld_n_s64(a, 63); } +int64x1_t test_vqshl_n_s64(int64x1_t a) { +// CHECK: test_vqshl_n_s64 +// CHECK: sqshl d{{[0-9]+}}, d{{[0-9]+}}, #1 + return vqshl_n_s64(a, 1); +} + uint8_t test_vqshlb_n_u8(uint8_t a) { // CHECK-LABEL: test_vqshlb_n_u8 // CHECK: uqshl {{b[0-9]+}}, {{b[0-9]+}}, #7 @@ -7592,6 +7633,12 @@ uint64_t test_vqshld_n_u64(uint64_t a) { return (uint64_t)vqshld_n_u64(a, 63); } +uint64x1_t test_vqshl_n_u64(uint64x1_t a) { +// CHECK: test_vqshl_n_u64 +// CHECK: uqshl d{{[0-9]+}}, d{{[0-9]+}}, #1 + return vqshl_n_u64(a, 1); +} + int8_t test_vqshlub_n_s8(int8_t a) { // CHECK-LABEL: test_vqshlub_n_s8 // CHECK: sqshlu {{b[0-9]+}}, {{b[0-9]+}}, #7 @@ -7616,30 +7663,60 @@ int64_t test_vqshlud_n_s64(int64_t a) { return (int64_t)vqshlud_n_s64(a, 63); } +uint64x1_t test_vqshlu_n_s64(int64x1_t a) { +// CHECK: test_vqshlu_n_s64 +// CHECK: sqshlu d{{[0-9]+}}, d{{[0-9]+}}, #1 + return vqshlu_n_s64(a, 1); +} + int64_t test_vsrid_n_s64(int64_t a, int64_t b) { // CHECK-LABEL: test_vsrid_n_s64 // CHECK: sri {{d[0-9]+}}, {{d[0-9]+}}, #63 return (int64_t)vsrid_n_s64(a, b, 63); } +int64x1_t test_vsri_n_s64(int64x1_t a, int64x1_t b) { +// CHECK: test_vsri_n_s64 +// CHECK: sri d{{[0-9]+}}, d{{[0-9]+}}, #1 + return vsri_n_s64(a, b, 1); +} + uint64_t test_vsrid_n_u64(uint64_t a, uint64_t b) { // CHECK-LABEL: test_vsrid_n_u64 // CHECK: sri {{d[0-9]+}}, {{d[0-9]+}}, #63 return (uint64_t)vsrid_n_u64(a, b, 63); } +uint64x1_t test_vsri_n_u64(uint64x1_t a, uint64x1_t b) { +// CHECK: test_vsri_n_u64 +// CHECK: sri d{{[0-9]+}}, d{{[0-9]+}}, #1 + return vsri_n_u64(a, b, 1); +} + int64_t test_vslid_n_s64(int64_t a, int64_t b) { // CHECK-LABEL: test_vslid_n_s64 // CHECK: sli {{d[0-9]+}}, {{d[0-9]+}}, #63 return (int64_t)vslid_n_s64(a, b, 63); } +int64x1_t test_vsli_n_s64(int64x1_t a, int64x1_t b) { +// CHECK: test_vsli_n_s64 +// CHECK: sli d{{[0-9]+}}, d{{[0-9]+}}, #1 + return vsli_n_s64(a, b, 1); +} + uint64_t test_vslid_n_u64(uint64_t a, uint64_t b) { // CHECK-LABEL: test_vslid_n_u64 // CHECK: sli {{d[0-9]+}}, {{d[0-9]+}}, #63 return (uint64_t)vslid_n_u64(a, b, 63); } +uint64x1_t test_vsli_n_u64(uint64x1_t a, uint64x1_t b) { +// CHECK: test_vsli_n_u64 +// CHECK: sli d{{[0-9]+}}, d{{[0-9]+}}, #1 + return vsli_n_u64(a, b, 1); +} + int8_t test_vqshrnh_n_s16(int16_t a) { // CHECK-LABEL: test_vqshrnh_n_s16 // CHECK: sqshrn {{b[0-9]+}}, {{h[0-9]+}}, #15 -- 2.40.0