From c8f25fa5c02155367c2426eb8e4101050e2c385a Mon Sep 17 00:00:00 2001 From: Linfeng Zhang Date: Wed, 14 Dec 2016 10:42:01 -0800 Subject: [PATCH] Clean hbd idct 4x4 neon functions and other BUG=webm:1301 Change-Id: I387b7eae716a7df15c691dc6f368b07602df7342 --- test/partial_idct_test.cc | 30 ++++---- vpx_dsp/arm/highbd_idct4x4_add_neon.c | 98 +++++++++++++-------------- vpx_dsp/arm/idct8x8_add_neon.c | 26 +++---- vpx_dsp/x86/inv_txfm_ssse3_x86_64.asm | 2 +- 4 files changed, 77 insertions(+), 79 deletions(-) diff --git a/test/partial_idct_test.cc b/test/partial_idct_test.cc index fc2b971a7..09ff4a3dc 100644 --- a/test/partial_idct_test.cc +++ b/test/partial_idct_test.cc @@ -448,13 +448,13 @@ const PartialInvTxfmParam neon_partial_idct_tests[] = { #if CONFIG_VP9_HIGHBITDEPTH make_tuple(&vpx_highbd_fdct4x4_c, &highbd_wrapper, - &highbd_wrapper, TX_4X4, 1, 8, 2), - make_tuple(&vpx_highbd_fdct4x4_c, - &highbd_wrapper, - &highbd_wrapper, TX_4X4, 1, 10, 2), - make_tuple(&vpx_highbd_fdct4x4_c, - &highbd_wrapper, - &highbd_wrapper, TX_4X4, 1, 12, 2), + &highbd_wrapper, TX_4X4, 16, 8, 2), + make_tuple( + &vpx_highbd_fdct4x4_c, &highbd_wrapper, + &highbd_wrapper, TX_4X4, 16, 10, 2), + make_tuple( + &vpx_highbd_fdct4x4_c, &highbd_wrapper, + &highbd_wrapper, TX_4X4, 16, 12, 2), make_tuple(&vpx_highbd_fdct4x4_c, &highbd_wrapper, &highbd_wrapper, TX_4X4, 1, 8, 2), make_tuple(&vpx_highbd_fdct4x4_c, &highbd_wrapper, @@ -543,13 +543,13 @@ const PartialInvTxfmParam sse2_partial_idct_tests[] = { &highbd_wrapper, TX_8X8, 12, 12, 2), make_tuple(&vpx_highbd_fdct4x4_c, &highbd_wrapper, - &highbd_wrapper, TX_4X4, 1, 8, 2), - make_tuple(&vpx_highbd_fdct4x4_c, - &highbd_wrapper, - &highbd_wrapper, TX_4X4, 1, 10, 2), - make_tuple(&vpx_highbd_fdct4x4_c, - &highbd_wrapper, - &highbd_wrapper, TX_4X4, 1, 12, 2), + &highbd_wrapper, TX_4X4, 16, 8, 2), + make_tuple( + &vpx_highbd_fdct4x4_c, &highbd_wrapper, + &highbd_wrapper, TX_4X4, 16, 10, 2), + make_tuple( + &vpx_highbd_fdct4x4_c, &highbd_wrapper, + &highbd_wrapper, TX_4X4, 16, 12, 2), #endif // CONFIG_VP9_HIGHBITDEPTH make_tuple(&vpx_fdct32x32_c, &wrapper, &wrapper, TX_32X32, 1024, 8, 1), @@ -620,7 +620,7 @@ const PartialInvTxfmParam msa_partial_idct_tests[] = { make_tuple(&vpx_fdct8x8_c, &wrapper, &wrapper, TX_8X8, 64, 8, 1), make_tuple(&vpx_fdct8x8_c, &wrapper, - &wrapper, TX_8X8, 10, 8, 1), + &wrapper, TX_8X8, 12, 8, 1), make_tuple(&vpx_fdct8x8_c, &wrapper, &wrapper, TX_8X8, 1, 8, 1), make_tuple(&vpx_fdct4x4_c, &wrapper, diff --git a/vpx_dsp/arm/highbd_idct4x4_add_neon.c b/vpx_dsp/arm/highbd_idct4x4_add_neon.c index 1b2b5714f..b9e226a68 100644 --- a/vpx_dsp/arm/highbd_idct4x4_add_neon.c +++ b/vpx_dsp/arm/highbd_idct4x4_add_neon.c @@ -14,31 +14,56 @@ #include "vpx_dsp/arm/idct_neon.h" #include "vpx_dsp/inv_txfm.h" +static INLINE void highbd_idct4x4_1_add_kernel1(uint16_t **dest, + const int stride, + const int16x8_t res, + const int16x8_t max) { + const uint16x4_t a0 = vld1_u16(*dest); + const uint16x4_t a1 = vld1_u16(*dest + stride); + const int16x8_t a = vreinterpretq_s16_u16(vcombine_u16(a0, a1)); + // Note: In some profile tests, res is quite close to +/-32767. + // We use saturating addition. + const int16x8_t b = vqaddq_s16(res, a); + const int16x8_t c = vminq_s16(b, max); + const uint16x8_t d = vqshluq_n_s16(c, 0); + vst1_u16(*dest, vget_low_u16(d)); + *dest += stride; + vst1_u16(*dest, vget_high_u16(d)); + *dest += stride; +} + +// res is in reverse row order +static INLINE void highbd_idct4x4_1_add_kernel2(uint16_t **dest, + const int stride, + const int16x8_t res, + const int16x8_t max) { + const uint16x4_t a0 = vld1_u16(*dest); + const uint16x4_t a1 = vld1_u16(*dest + stride); + const int16x8_t a = vreinterpretq_s16_u16(vcombine_u16(a1, a0)); + // Note: In some profile tests, res is quite close to +/-32767. + // We use saturating addition. + const int16x8_t b = vqaddq_s16(res, a); + const int16x8_t c = vminq_s16(b, max); + const uint16x8_t d = vqshluq_n_s16(c, 0); + vst1_u16(*dest, vget_high_u16(d)); + *dest += stride; + vst1_u16(*dest, vget_low_u16(d)); + *dest += stride; +} + void vpx_highbd_idct4x4_1_add_neon(const tran_low_t *input, uint8_t *dest8, int stride, int bd) { - int i; const int16x8_t max = vdupq_n_s16((1 << bd) - 1); - const tran_low_t out0 = dct_const_round_shift(input[0] * cospi_16_64); - const tran_low_t out1 = dct_const_round_shift(out0 * cospi_16_64); + const tran_low_t out0 = + HIGHBD_WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64), bd); + const tran_low_t out1 = + HIGHBD_WRAPLOW(dct_const_round_shift(out0 * cospi_16_64), bd); const int16_t a1 = ROUND_POWER_OF_TWO(out1, 4); const int16x8_t dc = vdupq_n_s16(a1); uint16_t *dest = CONVERT_TO_SHORTPTR(dest8); - int16x8_t a; - uint16x8_t b; - uint16x4_t d0, d1; - - for (i = 0; i < 2; i++) { - d0 = vld1_u16(dest); - d1 = vld1_u16(dest + stride); - a = vreinterpretq_s16_u16(vcombine_u16(d0, d1)); - a = vaddq_s16(dc, a); - a = vminq_s16(a, max); - b = vqshluq_n_s16(a, 0); - vst1_u16(dest, vget_low_u16(b)); - dest += stride; - vst1_u16(dest, vget_high_u16(b)); - dest += stride; - } + + highbd_idct4x4_1_add_kernel1(&dest, stride, dc, max); + highbd_idct4x4_1_add_kernel1(&dest, stride, dc, max); } static INLINE void idct4x4_16_kernel_bd10(const int32x4_t cospis, @@ -114,10 +139,7 @@ void vpx_highbd_idct4x4_16_add_neon(const tran_low_t *input, uint8_t *dest8, int32x4_t c2 = vld1q_s32(input + 8); int32x4_t c3 = vld1q_s32(input + 12); uint16_t *dest = CONVERT_TO_SHORTPTR(dest8); - const uint16_t *dst = dest; - int16x8_t a0, a1, d01, d32; - int16x4_t d0, d1, d2, d3; - uint16x8_t d01_u16, d32_u16; + int16x8_t a0, a1; if (bd == 8) { const int16x4_t cospis = vld1_s16(kCospi); @@ -142,36 +164,10 @@ void vpx_highbd_idct4x4_16_add_neon(const tran_low_t *input, uint8_t *dest8, idct4x4_16_kernel_bd12(cospis, &c0, &c1, &c2, &c3); idct4x4_16_kernel_bd12(cospis, &c0, &c1, &c2, &c3); } - // Note: In some profile tests, a0 and a1 are quite close to +/-32767. - // We use saturating narrow shift in case they could be even larger. a0 = vcombine_s16(vqrshrn_n_s32(c0, 4), vqrshrn_n_s32(c1, 4)); a1 = vcombine_s16(vqrshrn_n_s32(c3, 4), vqrshrn_n_s32(c2, 4)); } - d0 = vreinterpret_s16_u16(vld1_u16(dst)); - dst += stride; - d1 = vreinterpret_s16_u16(vld1_u16(dst)); - dst += stride; - d2 = vreinterpret_s16_u16(vld1_u16(dst)); - dst += stride; - d3 = vreinterpret_s16_u16(vld1_u16(dst)); - d01 = vcombine_s16(d0, d1); - d32 = vcombine_s16(d3, d2); - - // Note: In some profile tests, a0 and a1 is quite close to +/-32767. - // We use saturating addition. - d01 = vqaddq_s16(a0, d01); - d32 = vqaddq_s16(a1, d32); - d01 = vminq_s16(d01, max); - d32 = vminq_s16(d32, max); - d01_u16 = vqshluq_n_s16(d01, 0); - d32_u16 = vqshluq_n_s16(d32, 0); - - vst1_u16(dest, vget_low_u16(d01_u16)); - dest += stride; - vst1_u16(dest, vget_high_u16(d01_u16)); - dest += stride; - vst1_u16(dest, vget_high_u16(d32_u16)); - dest += stride; - vst1_u16(dest, vget_low_u16(d32_u16)); + highbd_idct4x4_1_add_kernel1(&dest, stride, a0, max); + highbd_idct4x4_1_add_kernel2(&dest, stride, a1, max); } diff --git a/vpx_dsp/arm/idct8x8_add_neon.c b/vpx_dsp/arm/idct8x8_add_neon.c index 7db229cf5..6fae48b8b 100644 --- a/vpx_dsp/arm/idct8x8_add_neon.c +++ b/vpx_dsp/arm/idct8x8_add_neon.c @@ -16,10 +16,11 @@ #include "vpx_dsp/arm/transpose_neon.h" #include "vpx_dsp/txfm_common.h" -static INLINE void IDCT8x8_1D(const int16x4_t cospis0, const int16x4_t cospis1, - int16x8_t *a0, int16x8_t *a1, int16x8_t *a2, - int16x8_t *a3, int16x8_t *a4, int16x8_t *a5, - int16x8_t *a6, int16x8_t *a7) { +static INLINE void idct8x8_1d(const int16x4_t cospis0, const int16x4_t cospis1, + int16x8_t *const a0, int16x8_t *const a1, + int16x8_t *const a2, int16x8_t *const a3, + int16x8_t *const a4, int16x8_t *const a5, + int16x8_t *const a6, int16x8_t *const a7) { const int16x4_t a0l = vget_low_s16(*a0); const int16x4_t a0h = vget_high_s16(*a0); const int16x4_t a1l = vget_low_s16(*a1); @@ -220,17 +221,18 @@ void vpx_idct8x8_64_add_neon(const tran_low_t *input, uint8_t *dest, a7 = load_tran_low_to_s16q(input + 56); transpose_s16_8x8(&a0, &a1, &a2, &a3, &a4, &a5, &a6, &a7); - IDCT8x8_1D(cospis0, cospis1, &a0, &a1, &a2, &a3, &a4, &a5, &a6, &a7); + idct8x8_1d(cospis0, cospis1, &a0, &a1, &a2, &a3, &a4, &a5, &a6, &a7); transpose_s16_8x8(&a0, &a1, &a2, &a3, &a4, &a5, &a6, &a7); - IDCT8x8_1D(cospis0, cospis1, &a0, &a1, &a2, &a3, &a4, &a5, &a6, &a7); + idct8x8_1d(cospis0, cospis1, &a0, &a1, &a2, &a3, &a4, &a5, &a6, &a7); add8x8(a0, a1, a2, a3, a4, a5, a6, a7, dest, stride); } -static INLINE void IDCT8x4_1D(const int16x4_t cospis0, const int16x4_t cospisd0, - const int16x4_t cospisd1, int16x8_t *a0, - int16x8_t *a1, int16x8_t *a2, int16x8_t *a3, - int16x8_t *a4, int16x8_t *a5, int16x8_t *a6, - int16x8_t *a7) { +static INLINE void idct8x4_1d(const int16x4_t cospis0, const int16x4_t cospisd0, + const int16x4_t cospisd1, int16x8_t *const a0, + int16x8_t *const a1, int16x8_t *const a2, + int16x8_t *const a3, int16x8_t *const a4, + int16x8_t *const a5, int16x8_t *const a6, + int16x8_t *const a7) { int32x4_t b0, b1, b2, b3; int16x4_t c0, c1, c2, c3; int16x8_t d0, d1, d2, d3, d4, d5, d6, d7, e0, e1, e2, e3; @@ -339,7 +341,7 @@ void vpx_idct8x8_12_add_neon(const tran_low_t *input, uint8_t *dest, b7 = vsub_s16(b0, b7); transpose_s16_4x8(b8, b9, b10, b11, b4, b5, b6, b7, &a0, &a1, &a2, &a3); - IDCT8x4_1D(cospis0, cospisd0, cospisd1, &a0, &a1, &a2, &a3, &a4, &a5, &a6, + idct8x4_1d(cospis0, cospisd0, cospisd1, &a0, &a1, &a2, &a3, &a4, &a5, &a6, &a7); add8x8(a0, a1, a2, a3, a4, a5, a6, a7, dest, stride); } diff --git a/vpx_dsp/x86/inv_txfm_ssse3_x86_64.asm b/vpx_dsp/x86/inv_txfm_ssse3_x86_64.asm index 20baf820f..dee64e3ad 100644 --- a/vpx_dsp/x86/inv_txfm_ssse3_x86_64.asm +++ b/vpx_dsp/x86/inv_txfm_ssse3_x86_64.asm @@ -263,7 +263,7 @@ cglobal idct8x8_64_add, 3, 5, 13, input, output, stride RET -; inverse 8x8 2D-DCT transform with only first 10 coeffs non-zero +; inverse 8x8 2D-DCT transform with only first 12 coeffs non-zero cglobal idct8x8_12_add, 3, 5, 13, input, output, stride mova m8, [pd_8192] mova m11, [pw_16] -- 2.40.0