From 0f3edc66252128fb831458cce71d59371faef3fc Mon Sep 17 00:00:00 2001 From: Linfeng Zhang Date: Tue, 6 Feb 2018 18:12:12 -0800 Subject: [PATCH] Update iadst NEON functions Use scalar multiply. No impact on clang, but improves gcc compiling. BUG=webm:1403 Change-Id: I4922e7e033d9e93282c754754100850e232e1529 --- vp9/common/arm/neon/vp9_iht8x8_add_neon.c | 93 ++++++++++++++++------- vp9/common/arm/neon/vp9_iht_neon.h | 23 +++--- vpx_dsp/arm/mem_neon.h | 7 ++ 3 files changed, 81 insertions(+), 42 deletions(-) diff --git a/vp9/common/arm/neon/vp9_iht8x8_add_neon.c b/vp9/common/arm/neon/vp9_iht8x8_add_neon.c index c6d2688f9..19163bc87 100644 --- a/vp9/common/arm/neon/vp9_iht8x8_add_neon.c +++ b/vp9/common/arm/neon/vp9_iht8x8_add_neon.c @@ -18,37 +18,66 @@ #include "vpx_dsp/arm/mem_neon.h" #include "vpx_dsp/arm/transpose_neon.h" -static INLINE void iadst_half_butterfly_neon(int16x8_t *const x) { - const int16x4_t c = vdup_n_s16(cospi_16_64); +static INLINE void iadst_half_butterfly_neon(int16x8_t *const x, + const int16x4_t c) { const int16x8_t sum = vaddq_s16(x[0], x[1]); const int16x8_t sub = vsubq_s16(x[0], x[1]); int32x4_t t0[2], t1[2]; - t0[0] = vmull_s16(c, vget_low_s16(sum)); - t0[1] = vmull_s16(c, vget_high_s16(sum)); - t1[0] = vmull_s16(c, vget_low_s16(sub)); - t1[1] = vmull_s16(c, vget_high_s16(sub)); + t0[0] = vmull_lane_s16(vget_low_s16(sum), c, 0); + t0[1] = vmull_lane_s16(vget_high_s16(sum), c, 0); + t1[0] = vmull_lane_s16(vget_low_s16(sub), c, 0); + t1[1] = vmull_lane_s16(vget_high_s16(sub), c, 0); x[0] = dct_const_round_shift_low_8(t0); x[1] = dct_const_round_shift_low_8(t1); } -static INLINE void iadst_butterfly_neon(const int16x8_t in0, - const int16x8_t in1, const int c0, - const int c1, int32x4_t *const s0, - int32x4_t *const s1) { - const int16x4_t cst0 = vdup_n_s16(c0); - const int16x4_t cst1 = vdup_n_s16(c1); - int32x4_t t0[2], t1[2]; +static INLINE void iadst_butterfly_lane_0_1_neon(const int16x8_t in0, + const int16x8_t in1, + const int16x4_t c, + int32x4_t *const s0, + int32x4_t *const s1) { + s0[0] = vmull_lane_s16(vget_low_s16(in0), c, 0); + s0[1] = vmull_lane_s16(vget_high_s16(in0), c, 0); + s1[0] = vmull_lane_s16(vget_low_s16(in0), c, 1); + s1[1] = vmull_lane_s16(vget_high_s16(in0), c, 1); + + s0[0] = vmlal_lane_s16(s0[0], vget_low_s16(in1), c, 1); + s0[1] = vmlal_lane_s16(s0[1], vget_high_s16(in1), c, 1); + s1[0] = vmlsl_lane_s16(s1[0], vget_low_s16(in1), c, 0); + s1[1] = vmlsl_lane_s16(s1[1], vget_high_s16(in1), c, 0); +} - t0[0] = vmull_s16(cst0, vget_low_s16(in0)); - t0[1] = vmull_s16(cst0, vget_high_s16(in0)); - t1[0] = vmull_s16(cst1, vget_low_s16(in0)); - t1[1] = vmull_s16(cst1, vget_high_s16(in0)); +static INLINE void iadst_butterfly_lane_2_3_neon(const int16x8_t in0, + const int16x8_t in1, + const int16x4_t c, + int32x4_t *const s0, + int32x4_t *const s1) { + s0[0] = vmull_lane_s16(vget_low_s16(in0), c, 2); + s0[1] = vmull_lane_s16(vget_high_s16(in0), c, 2); + s1[0] = vmull_lane_s16(vget_low_s16(in0), c, 3); + s1[1] = vmull_lane_s16(vget_high_s16(in0), c, 3); + + s0[0] = vmlal_lane_s16(s0[0], vget_low_s16(in1), c, 3); + s0[1] = vmlal_lane_s16(s0[1], vget_high_s16(in1), c, 3); + s1[0] = vmlsl_lane_s16(s1[0], vget_low_s16(in1), c, 2); + s1[1] = vmlsl_lane_s16(s1[1], vget_high_s16(in1), c, 2); +} - s0[0] = vmlal_s16(t0[0], cst1, vget_low_s16(in1)); - s0[1] = vmlal_s16(t0[1], cst1, vget_high_s16(in1)); - s1[0] = vmlsl_s16(t1[0], cst0, vget_low_s16(in1)); - s1[1] = vmlsl_s16(t1[1], cst0, vget_high_s16(in1)); +static INLINE void iadst_butterfly_lane_3_2_neon(const int16x8_t in0, + const int16x8_t in1, + const int16x4_t c, + int32x4_t *const s0, + int32x4_t *const s1) { + s0[0] = vmull_lane_s16(vget_low_s16(in0), c, 3); + s0[1] = vmull_lane_s16(vget_high_s16(in0), c, 3); + s1[0] = vmull_lane_s16(vget_low_s16(in0), c, 2); + s1[1] = vmull_lane_s16(vget_high_s16(in0), c, 2); + + s0[0] = vmlal_lane_s16(s0[0], vget_low_s16(in1), c, 2); + s0[1] = vmlal_lane_s16(s0[1], vget_high_s16(in1), c, 2); + s1[0] = vmlsl_lane_s16(s1[0], vget_low_s16(in1), c, 3); + s1[1] = vmlsl_lane_s16(s1[1], vget_high_s16(in1), c, 3); } static INLINE int16x8_t add_dct_const_round_shift_low_8( @@ -70,6 +99,12 @@ static INLINE int16x8_t sub_dct_const_round_shift_low_8( } static INLINE void iadst8(int16x8_t *const io) { + const int16x4_t c0 = + create_s16x4_neon(cospi_2_64, cospi_30_64, cospi_10_64, cospi_22_64); + const int16x4_t c1 = + create_s16x4_neon(cospi_18_64, cospi_14_64, cospi_26_64, cospi_6_64); + const int16x4_t c2 = + create_s16x4_neon(cospi_16_64, 0, cospi_8_64, cospi_24_64); int16x8_t x[8], t[4]; int32x4_t s0[2], s1[2], s2[2], s3[2], s4[2], s5[2], s6[2], s7[2]; @@ -83,10 +118,10 @@ static INLINE void iadst8(int16x8_t *const io) { x[7] = io[6]; // stage 1 - iadst_butterfly_neon(x[0], x[1], cospi_2_64, cospi_30_64, s0, s1); - iadst_butterfly_neon(x[2], x[3], cospi_10_64, cospi_22_64, s2, s3); - iadst_butterfly_neon(x[4], x[5], cospi_18_64, cospi_14_64, s4, s5); - iadst_butterfly_neon(x[6], x[7], cospi_26_64, cospi_6_64, s6, s7); + iadst_butterfly_lane_0_1_neon(x[0], x[1], c0, s0, s1); + iadst_butterfly_lane_2_3_neon(x[2], x[3], c0, s2, s3); + iadst_butterfly_lane_0_1_neon(x[4], x[5], c1, s4, s5); + iadst_butterfly_lane_2_3_neon(x[6], x[7], c1, s6, s7); x[0] = add_dct_const_round_shift_low_8(s0, s4); x[1] = add_dct_const_round_shift_low_8(s1, s5); @@ -102,8 +137,8 @@ static INLINE void iadst8(int16x8_t *const io) { t[1] = x[1]; t[2] = x[2]; t[3] = x[3]; - iadst_butterfly_neon(x[4], x[5], cospi_8_64, cospi_24_64, s4, s5); - iadst_butterfly_neon(x[7], x[6], cospi_24_64, cospi_8_64, s7, s6); + iadst_butterfly_lane_2_3_neon(x[4], x[5], c2, s4, s5); + iadst_butterfly_lane_3_2_neon(x[7], x[6], c2, s7, s6); x[0] = vaddq_s16(t[0], t[2]); x[1] = vaddq_s16(t[1], t[3]); @@ -115,8 +150,8 @@ static INLINE void iadst8(int16x8_t *const io) { x[7] = sub_dct_const_round_shift_low_8(s5, s7); // stage 3 - iadst_half_butterfly_neon(x + 2); - iadst_half_butterfly_neon(x + 6); + iadst_half_butterfly_neon(x + 2, c2); + iadst_half_butterfly_neon(x + 6, c2); io[0] = x[0]; io[1] = vnegq_s16(x[4]); diff --git a/vp9/common/arm/neon/vp9_iht_neon.h b/vp9/common/arm/neon/vp9_iht_neon.h index b222377a7..08daa1a4a 100644 --- a/vp9/common/arm/neon/vp9_iht_neon.h +++ b/vp9/common/arm/neon/vp9_iht_neon.h @@ -22,26 +22,23 @@ static INLINE void iadst4(int16x8_t *const io) { const int32x4_t c3 = vdupq_n_s32(sinpi_3_9); - int16x4_t c[5], x[4]; + int16x4_t x[4]; int32x4_t s[8], output[4]; - - c[1] = vdup_n_s16(sinpi_1_9); - c[2] = vdup_n_s16(sinpi_2_9); - c[3] = vdup_n_s16(sinpi_3_9); - c[4] = vdup_n_s16(sinpi_4_9); + const int16x4_t c = + create_s16x4_neon(sinpi_1_9, sinpi_2_9, sinpi_3_9, sinpi_4_9); x[0] = vget_low_s16(io[0]); x[1] = vget_low_s16(io[1]); x[2] = vget_high_s16(io[0]); x[3] = vget_high_s16(io[1]); - s[0] = vmull_s16(c[1], x[0]); - s[1] = vmull_s16(c[2], x[0]); - s[2] = vmull_s16(c[3], x[1]); - s[3] = vmull_s16(c[4], x[2]); - s[4] = vmull_s16(c[1], x[2]); - s[5] = vmull_s16(c[2], x[3]); - s[6] = vmull_s16(c[4], x[3]); + s[0] = vmull_lane_s16(x[0], c, 0); + s[1] = vmull_lane_s16(x[0], c, 1); + s[2] = vmull_lane_s16(x[1], c, 2); + s[3] = vmull_lane_s16(x[2], c, 3); + s[4] = vmull_lane_s16(x[2], c, 0); + s[5] = vmull_lane_s16(x[3], c, 1); + s[6] = vmull_lane_s16(x[3], c, 3); s[7] = vaddl_s16(x[0], x[3]); s[7] = vsubw_s16(s[7], x[2]); diff --git a/vpx_dsp/arm/mem_neon.h b/vpx_dsp/arm/mem_neon.h index 4efad5333..ea0962954 100644 --- a/vpx_dsp/arm/mem_neon.h +++ b/vpx_dsp/arm/mem_neon.h @@ -19,6 +19,13 @@ #include "vpx/vpx_integer.h" #include "vpx_dsp/vpx_dsp_common.h" +static INLINE int16x4_t create_s16x4_neon(const int16_t c0, const int16_t c1, + const int16_t c2, const int16_t c3) { + return vcreate_s16((uint16_t)c0 | ((uint16_t)c1 << 16) | + ((int64_t)(uint16_t)c2 << 32) | + ((int64_t)(uint16_t)c3 << 48)); +} + // Helper functions used to load tran_low_t into int16, narrowing if necessary. static INLINE int16x8x2_t load_tran_low_to_s16x2q(const tran_low_t *buf) { #if CONFIG_VP9_HIGHBITDEPTH -- 2.40.0