From ab1192c2907185d59f0044230602ea6025a42844 Mon Sep 17 00:00:00 2001 From: Jonathan Wright Date: Thu, 5 Jan 2023 12:20:03 +0000 Subject: [PATCH] Use lane-referencing intrinsics in Neon convolution kernels The Neon convolution helper functions take a pointer to a filter and load the 8 values into a single Neon register. For some reason, filter values 3 and 4 are then duplicated into their own separate registers. This patch modifies these helper functions so that they access filter values 3 and 4 via the lane-referencing versions of the various Neon multiply instructions. This reduces register pressure and tidies up the source code quite a bit. Change-Id: Ia4aeee8b46fe218658fb8577dc07ff04a9324b3e --- vpx_dsp/arm/vpx_convolve8_neon.c | 166 +++++++----------------- vpx_dsp/arm/vpx_convolve8_neon.h | 20 +-- vpx_dsp/arm/vpx_scaled_convolve8_neon.c | 9 +- 3 files changed, 59 insertions(+), 136 deletions(-) diff --git a/vpx_dsp/arm/vpx_convolve8_neon.c b/vpx_dsp/arm/vpx_convolve8_neon.c index ca5222fa0..28018398a 100644 --- a/vpx_dsp/arm/vpx_convolve8_neon.c +++ b/vpx_dsp/arm/vpx_convolve8_neon.c @@ -807,16 +807,13 @@ void vpx_convolve8_horiz_neon(const uint8_t *src, ptrdiff_t src_stride, if (h == 4) { uint8x8_t d01, d23; - int16x4_t filter3, filter4, s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, d0, - d1, d2, d3; + int16x4_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, d0, d1, d2, d3; int16x8_t tt0, tt1, tt2, tt3; __builtin_prefetch(src + 0 * src_stride); __builtin_prefetch(src + 1 * src_stride); __builtin_prefetch(src + 2 * src_stride); __builtin_prefetch(src + 3 * src_stride); - filter3 = vdup_lane_s16(vget_low_s16(filters), 3); - filter4 = vdup_lane_s16(vget_high_s16(filters), 0); load_u8_8x4(src, src_stride, &t0, &t1, &t2, &t3); transpose_u8_8x4(&t0, &t1, &t2, &t3); tt0 = vreinterpretq_s16_u16(vmovl_u8(t0)); @@ -848,14 +845,10 @@ void vpx_convolve8_horiz_neon(const uint8_t *src, ptrdiff_t src_stride, s9 = vget_low_s16(tt2); s10 = vget_low_s16(tt3); - d0 = convolve8_4(s0, s1, s2, s3, s4, s5, s6, s7, filters, filter3, - filter4); - d1 = convolve8_4(s1, s2, s3, s4, s5, s6, s7, s8, filters, filter3, - filter4); - d2 = convolve8_4(s2, s3, s4, s5, s6, s7, s8, s9, filters, filter3, - filter4); - d3 = convolve8_4(s3, s4, s5, s6, s7, s8, s9, s10, filters, filter3, - filter4); + d0 = convolve8_4(s0, s1, s2, s3, s4, s5, s6, s7, filters); + d1 = convolve8_4(s1, s2, s3, s4, s5, s6, s7, s8, filters); + d2 = convolve8_4(s2, s3, s4, s5, s6, s7, s8, s9, filters); + d3 = convolve8_4(s3, s4, s5, s6, s7, s8, s9, s10, filters); d01 = vqrshrun_n_s16(vcombine_s16(d0, d1), 7); d23 = vqrshrun_n_s16(vcombine_s16(d2, d3), 7); @@ -882,8 +875,6 @@ void vpx_convolve8_horiz_neon(const uint8_t *src, ptrdiff_t src_stride, w -= 4; } while (w != 0); } else { - const int16x8_t filter3 = vdupq_lane_s16(vget_low_s16(filters), 3); - const int16x8_t filter4 = vdupq_lane_s16(vget_high_s16(filters), 0); int width; const uint8_t *s; uint8x8_t t4, t5, t6, t7; @@ -926,14 +917,10 @@ void vpx_convolve8_horiz_neon(const uint8_t *src, ptrdiff_t src_stride, __builtin_prefetch(src + 5 * src_stride); __builtin_prefetch(src + 6 * src_stride); __builtin_prefetch(src + 7 * src_stride); - t0 = convolve8_8(s0, s1, s2, s3, s4, s5, s6, s7, filters, filter3, - filter4); - t1 = convolve8_8(s1, s2, s3, s4, s5, s6, s7, s8, filters, filter3, - filter4); - t2 = convolve8_8(s2, s3, s4, s5, s6, s7, s8, s9, filters, filter3, - filter4); - t3 = convolve8_8(s3, s4, s5, s6, s7, s8, s9, s10, filters, filter3, - filter4); + t0 = convolve8_8(s0, s1, s2, s3, s4, s5, s6, s7, filters); + t1 = convolve8_8(s1, s2, s3, s4, s5, s6, s7, s8, filters); + t2 = convolve8_8(s2, s3, s4, s5, s6, s7, s8, s9, filters); + t3 = convolve8_8(s3, s4, s5, s6, s7, s8, s9, s10, filters); transpose_u8_8x4(&t0, &t1, &t2, &t3); vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(t0), 0); @@ -1001,22 +988,14 @@ void vpx_convolve8_horiz_neon(const uint8_t *src, ptrdiff_t src_stride, s13 = vreinterpretq_s16_u16(vmovl_u8(t6)); s14 = vreinterpretq_s16_u16(vmovl_u8(t7)); - t0 = convolve8_8(s0, s1, s2, s3, s4, s5, s6, s7, filters, filter3, - filter4); - t1 = convolve8_8(s1, s2, s3, s4, s5, s6, s7, s8, filters, filter3, - filter4); - t2 = convolve8_8(s2, s3, s4, s5, s6, s7, s8, s9, filters, filter3, - filter4); - t3 = convolve8_8(s3, s4, s5, s6, s7, s8, s9, s10, filters, filter3, - filter4); - t4 = convolve8_8(s4, s5, s6, s7, s8, s9, s10, s11, filters, filter3, - filter4); - t5 = convolve8_8(s5, s6, s7, s8, s9, s10, s11, s12, filters, filter3, - filter4); - t6 = convolve8_8(s6, s7, s8, s9, s10, s11, s12, s13, filters, filter3, - filter4); - t7 = convolve8_8(s7, s8, s9, s10, s11, s12, s13, s14, filters, - filter3, filter4); + t0 = convolve8_8(s0, s1, s2, s3, s4, s5, s6, s7, filters); + t1 = convolve8_8(s1, s2, s3, s4, s5, s6, s7, s8, filters); + t2 = convolve8_8(s2, s3, s4, s5, s6, s7, s8, s9, filters); + t3 = convolve8_8(s3, s4, s5, s6, s7, s8, s9, s10, filters); + t4 = convolve8_8(s4, s5, s6, s7, s8, s9, s10, s11, filters); + t5 = convolve8_8(s5, s6, s7, s8, s9, s10, s11, s12, filters); + t6 = convolve8_8(s6, s7, s8, s9, s10, s11, s12, s13, filters); + t7 = convolve8_8(s7, s8, s9, s10, s11, s12, s13, s14, filters); transpose_u8_8x8(&t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7); store_u8_8x8(d, dst_stride, t0, t1, t2, t3, t4, t5, t6, t7); @@ -1060,8 +1039,7 @@ void vpx_convolve8_avg_horiz_neon(const uint8_t *src, ptrdiff_t src_stride, if (h == 4) { uint8x8_t d01, d23; - int16x4_t filter3, filter4, s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, d0, - d1, d2, d3; + int16x4_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, d0, d1, d2, d3; int16x8_t tt0, tt1, tt2, tt3; uint32x4_t d0123 = vdupq_n_u32(0); @@ -1069,8 +1047,6 @@ void vpx_convolve8_avg_horiz_neon(const uint8_t *src, ptrdiff_t src_stride, __builtin_prefetch(src + 1 * src_stride); __builtin_prefetch(src + 2 * src_stride); __builtin_prefetch(src + 3 * src_stride); - filter3 = vdup_lane_s16(vget_low_s16(filters), 3); - filter4 = vdup_lane_s16(vget_high_s16(filters), 0); load_u8_8x4(src, src_stride, &t0, &t1, &t2, &t3); transpose_u8_8x4(&t0, &t1, &t2, &t3); tt0 = vreinterpretq_s16_u16(vmovl_u8(t0)); @@ -1102,14 +1078,10 @@ void vpx_convolve8_avg_horiz_neon(const uint8_t *src, ptrdiff_t src_stride, s9 = vget_low_s16(tt2); s10 = vget_low_s16(tt3); - d0 = convolve8_4(s0, s1, s2, s3, s4, s5, s6, s7, filters, filter3, - filter4); - d1 = convolve8_4(s1, s2, s3, s4, s5, s6, s7, s8, filters, filter3, - filter4); - d2 = convolve8_4(s2, s3, s4, s5, s6, s7, s8, s9, filters, filter3, - filter4); - d3 = convolve8_4(s3, s4, s5, s6, s7, s8, s9, s10, filters, filter3, - filter4); + d0 = convolve8_4(s0, s1, s2, s3, s4, s5, s6, s7, filters); + d1 = convolve8_4(s1, s2, s3, s4, s5, s6, s7, s8, filters); + d2 = convolve8_4(s2, s3, s4, s5, s6, s7, s8, s9, filters); + d3 = convolve8_4(s3, s4, s5, s6, s7, s8, s9, s10, filters); d01 = vqrshrun_n_s16(vcombine_s16(d0, d1), 7); d23 = vqrshrun_n_s16(vcombine_s16(d2, d3), 7); @@ -1139,8 +1111,6 @@ void vpx_convolve8_avg_horiz_neon(const uint8_t *src, ptrdiff_t src_stride, w -= 4; } while (w != 0); } else { - const int16x8_t filter3 = vdupq_lane_s16(vget_low_s16(filters), 3); - const int16x8_t filter4 = vdupq_lane_s16(vget_high_s16(filters), 0); int width; const uint8_t *s; uint8x8_t t4, t5, t6, t7; @@ -1185,14 +1155,10 @@ void vpx_convolve8_avg_horiz_neon(const uint8_t *src, ptrdiff_t src_stride, __builtin_prefetch(src + 5 * src_stride); __builtin_prefetch(src + 6 * src_stride); __builtin_prefetch(src + 7 * src_stride); - t0 = convolve8_8(s0, s1, s2, s3, s4, s5, s6, s7, filters, filter3, - filter4); - t1 = convolve8_8(s1, s2, s3, s4, s5, s6, s7, s8, filters, filter3, - filter4); - t2 = convolve8_8(s2, s3, s4, s5, s6, s7, s8, s9, filters, filter3, - filter4); - t3 = convolve8_8(s3, s4, s5, s6, s7, s8, s9, s10, filters, filter3, - filter4); + t0 = convolve8_8(s0, s1, s2, s3, s4, s5, s6, s7, filters); + t1 = convolve8_8(s1, s2, s3, s4, s5, s6, s7, s8, filters); + t2 = convolve8_8(s2, s3, s4, s5, s6, s7, s8, s9, filters); + t3 = convolve8_8(s3, s4, s5, s6, s7, s8, s9, s10, filters); transpose_u8_8x4(&t0, &t1, &t2, &t3); @@ -1275,22 +1241,14 @@ void vpx_convolve8_avg_horiz_neon(const uint8_t *src, ptrdiff_t src_stride, s13 = vreinterpretq_s16_u16(vmovl_u8(t6)); s14 = vreinterpretq_s16_u16(vmovl_u8(t7)); - t0 = convolve8_8(s0, s1, s2, s3, s4, s5, s6, s7, filters, filter3, - filter4); - t1 = convolve8_8(s1, s2, s3, s4, s5, s6, s7, s8, filters, filter3, - filter4); - t2 = convolve8_8(s2, s3, s4, s5, s6, s7, s8, s9, filters, filter3, - filter4); - t3 = convolve8_8(s3, s4, s5, s6, s7, s8, s9, s10, filters, filter3, - filter4); - t4 = convolve8_8(s4, s5, s6, s7, s8, s9, s10, s11, filters, filter3, - filter4); - t5 = convolve8_8(s5, s6, s7, s8, s9, s10, s11, s12, filters, filter3, - filter4); - t6 = convolve8_8(s6, s7, s8, s9, s10, s11, s12, s13, filters, filter3, - filter4); - t7 = convolve8_8(s7, s8, s9, s10, s11, s12, s13, s14, filters, - filter3, filter4); + t0 = convolve8_8(s0, s1, s2, s3, s4, s5, s6, s7, filters); + t1 = convolve8_8(s1, s2, s3, s4, s5, s6, s7, s8, filters); + t2 = convolve8_8(s2, s3, s4, s5, s6, s7, s8, s9, filters); + t3 = convolve8_8(s3, s4, s5, s6, s7, s8, s9, s10, filters); + t4 = convolve8_8(s4, s5, s6, s7, s8, s9, s10, s11, filters); + t5 = convolve8_8(s5, s6, s7, s8, s9, s10, s11, s12, filters); + t6 = convolve8_8(s6, s7, s8, s9, s10, s11, s12, s13, filters); + t7 = convolve8_8(s7, s8, s9, s10, s11, s12, s13, s14, filters); transpose_u8_8x8(&t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7); @@ -1348,8 +1306,6 @@ void vpx_convolve8_vert_neon(const uint8_t *src, ptrdiff_t src_stride, src -= 3 * src_stride; if (w == 4) { - const int16x4_t filter3 = vdup_lane_s16(vget_low_s16(filters), 3); - const int16x4_t filter4 = vdup_lane_s16(vget_high_s16(filters), 0); uint8x8_t d01, d23; int16x4_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, d0, d1, d2, d3; @@ -1386,14 +1342,10 @@ void vpx_convolve8_vert_neon(const uint8_t *src, ptrdiff_t src_stride, __builtin_prefetch(src + 1 * src_stride); __builtin_prefetch(src + 2 * src_stride); __builtin_prefetch(src + 3 * src_stride); - d0 = convolve8_4(s0, s1, s2, s3, s4, s5, s6, s7, filters, filter3, - filter4); - d1 = convolve8_4(s1, s2, s3, s4, s5, s6, s7, s8, filters, filter3, - filter4); - d2 = convolve8_4(s2, s3, s4, s5, s6, s7, s8, s9, filters, filter3, - filter4); - d3 = convolve8_4(s3, s4, s5, s6, s7, s8, s9, s10, filters, filter3, - filter4); + d0 = convolve8_4(s0, s1, s2, s3, s4, s5, s6, s7, filters); + d1 = convolve8_4(s1, s2, s3, s4, s5, s6, s7, s8, filters); + d2 = convolve8_4(s2, s3, s4, s5, s6, s7, s8, s9, filters); + d3 = convolve8_4(s3, s4, s5, s6, s7, s8, s9, s10, filters); d01 = vqrshrun_n_s16(vcombine_s16(d0, d1), 7); d23 = vqrshrun_n_s16(vcombine_s16(d2, d3), 7); @@ -1416,8 +1368,6 @@ void vpx_convolve8_vert_neon(const uint8_t *src, ptrdiff_t src_stride, h -= 4; } while (h != 0); } else { - const int16x8_t filter3 = vdupq_lane_s16(vget_low_s16(filters), 3); - const int16x8_t filter4 = vdupq_lane_s16(vget_high_s16(filters), 0); int height; const uint8_t *s; uint8_t *d; @@ -1468,14 +1418,10 @@ void vpx_convolve8_vert_neon(const uint8_t *src, ptrdiff_t src_stride, __builtin_prefetch(s + 1 * src_stride); __builtin_prefetch(s + 2 * src_stride); __builtin_prefetch(s + 3 * src_stride); - t0 = convolve8_8(s0, s1, s2, s3, s4, s5, s6, s7, filters, filter3, - filter4); - t1 = convolve8_8(s1, s2, s3, s4, s5, s6, s7, s8, filters, filter3, - filter4); - t2 = convolve8_8(s2, s3, s4, s5, s6, s7, s8, s9, filters, filter3, - filter4); - t3 = convolve8_8(s3, s4, s5, s6, s7, s8, s9, s10, filters, filter3, - filter4); + t0 = convolve8_8(s0, s1, s2, s3, s4, s5, s6, s7, filters); + t1 = convolve8_8(s1, s2, s3, s4, s5, s6, s7, s8, filters); + t2 = convolve8_8(s2, s3, s4, s5, s6, s7, s8, s9, filters); + t3 = convolve8_8(s3, s4, s5, s6, s7, s8, s9, s10, filters); vst1_u8(d, t0); d += dst_stride; @@ -1520,8 +1466,6 @@ void vpx_convolve8_avg_vert_neon(const uint8_t *src, ptrdiff_t src_stride, src -= 3 * src_stride; if (w == 4) { - const int16x4_t filter3 = vdup_lane_s16(vget_low_s16(filters), 3); - const int16x4_t filter4 = vdup_lane_s16(vget_high_s16(filters), 0); uint8x8_t d01, d23; int16x4_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, d0, d1, d2, d3; uint32x4_t d0123 = vdupq_n_u32(0); @@ -1559,14 +1503,10 @@ void vpx_convolve8_avg_vert_neon(const uint8_t *src, ptrdiff_t src_stride, __builtin_prefetch(src + 1 * src_stride); __builtin_prefetch(src + 2 * src_stride); __builtin_prefetch(src + 3 * src_stride); - d0 = convolve8_4(s0, s1, s2, s3, s4, s5, s6, s7, filters, filter3, - filter4); - d1 = convolve8_4(s1, s2, s3, s4, s5, s6, s7, s8, filters, filter3, - filter4); - d2 = convolve8_4(s2, s3, s4, s5, s6, s7, s8, s9, filters, filter3, - filter4); - d3 = convolve8_4(s3, s4, s5, s6, s7, s8, s9, s10, filters, filter3, - filter4); + d0 = convolve8_4(s0, s1, s2, s3, s4, s5, s6, s7, filters); + d1 = convolve8_4(s1, s2, s3, s4, s5, s6, s7, s8, filters); + d2 = convolve8_4(s2, s3, s4, s5, s6, s7, s8, s9, filters); + d3 = convolve8_4(s3, s4, s5, s6, s7, s8, s9, s10, filters); d01 = vqrshrun_n_s16(vcombine_s16(d0, d1), 7); d23 = vqrshrun_n_s16(vcombine_s16(d2, d3), 7); @@ -1597,8 +1537,6 @@ void vpx_convolve8_avg_vert_neon(const uint8_t *src, ptrdiff_t src_stride, h -= 4; } while (h != 0); } else { - const int16x8_t filter3 = vdupq_lane_s16(vget_low_s16(filters), 3); - const int16x8_t filter4 = vdupq_lane_s16(vget_high_s16(filters), 0); int height; const uint8_t *s; uint8_t *d; @@ -1650,14 +1588,10 @@ void vpx_convolve8_avg_vert_neon(const uint8_t *src, ptrdiff_t src_stride, __builtin_prefetch(s + 1 * src_stride); __builtin_prefetch(s + 2 * src_stride); __builtin_prefetch(s + 3 * src_stride); - t0 = convolve8_8(s0, s1, s2, s3, s4, s5, s6, s7, filters, filter3, - filter4); - t1 = convolve8_8(s1, s2, s3, s4, s5, s6, s7, s8, filters, filter3, - filter4); - t2 = convolve8_8(s2, s3, s4, s5, s6, s7, s8, s9, filters, filter3, - filter4); - t3 = convolve8_8(s3, s4, s5, s6, s7, s8, s9, s10, filters, filter3, - filter4); + t0 = convolve8_8(s0, s1, s2, s3, s4, s5, s6, s7, filters); + t1 = convolve8_8(s1, s2, s3, s4, s5, s6, s7, s8, filters); + t2 = convolve8_8(s2, s3, s4, s5, s6, s7, s8, s9, filters); + t3 = convolve8_8(s3, s4, s5, s6, s7, s8, s9, s10, filters); d01 = vcombine_u8(t0, t1); d23 = vcombine_u8(t2, t3); diff --git a/vpx_dsp/arm/vpx_convolve8_neon.h b/vpx_dsp/arm/vpx_convolve8_neon.h index b112cb249..b8dfce71e 100644 --- a/vpx_dsp/arm/vpx_convolve8_neon.h +++ b/vpx_dsp/arm/vpx_convolve8_neon.h @@ -176,9 +176,7 @@ static INLINE int16x4_t convolve8_4(const int16x4_t s0, const int16x4_t s1, const int16x4_t s2, const int16x4_t s3, const int16x4_t s4, const int16x4_t s5, const int16x4_t s6, const int16x4_t s7, - const int16x8_t filters, - const int16x4_t filter3, - const int16x4_t filter4) { + const int16x8_t filters) { const int16x4_t filters_lo = vget_low_s16(filters); const int16x4_t filters_hi = vget_high_s16(filters); int16x4_t sum; @@ -189,8 +187,8 @@ static INLINE int16x4_t convolve8_4(const int16x4_t s0, const int16x4_t s1, sum = vmla_lane_s16(sum, s5, filters_hi, 1); sum = vmla_lane_s16(sum, s6, filters_hi, 2); sum = vmla_lane_s16(sum, s7, filters_hi, 3); - sum = vqadd_s16(sum, vmul_s16(s3, filter3)); - sum = vqadd_s16(sum, vmul_s16(s4, filter4)); + sum = vqadd_s16(sum, vmul_lane_s16(s3, filters_lo, 3)); + sum = vqadd_s16(sum, vmul_lane_s16(s4, filters_hi, 0)); return sum; } @@ -198,9 +196,7 @@ static INLINE uint8x8_t convolve8_8(const int16x8_t s0, const int16x8_t s1, const int16x8_t s2, const int16x8_t s3, const int16x8_t s4, const int16x8_t s5, const int16x8_t s6, const int16x8_t s7, - const int16x8_t filters, - const int16x8_t filter3, - const int16x8_t filter4) { + const int16x8_t filters) { const int16x4_t filters_lo = vget_low_s16(filters); const int16x4_t filters_hi = vget_high_s16(filters); int16x8_t sum; @@ -211,15 +207,13 @@ static INLINE uint8x8_t convolve8_8(const int16x8_t s0, const int16x8_t s1, sum = vmlaq_lane_s16(sum, s5, filters_hi, 1); sum = vmlaq_lane_s16(sum, s6, filters_hi, 2); sum = vmlaq_lane_s16(sum, s7, filters_hi, 3); - sum = vqaddq_s16(sum, vmulq_s16(s3, filter3)); - sum = vqaddq_s16(sum, vmulq_s16(s4, filter4)); + sum = vqaddq_s16(sum, vmulq_lane_s16(s3, filters_lo, 3)); + sum = vqaddq_s16(sum, vmulq_lane_s16(s4, filters_hi, 0)); return vqrshrun_n_s16(sum, 7); } static INLINE uint8x8_t scale_filter_8(const uint8x8_t *const s, const int16x8_t filters) { - const int16x8_t filter3 = vdupq_lane_s16(vget_low_s16(filters), 3); - const int16x8_t filter4 = vdupq_lane_s16(vget_high_s16(filters), 0); int16x8_t ss[8]; ss[0] = vreinterpretq_s16_u16(vmovl_u8(s[0])); @@ -232,7 +226,7 @@ static INLINE uint8x8_t scale_filter_8(const uint8x8_t *const s, ss[7] = vreinterpretq_s16_u16(vmovl_u8(s[7])); return convolve8_8(ss[0], ss[1], ss[2], ss[3], ss[4], ss[5], ss[6], ss[7], - filters, filter3, filter4); + filters); } #endif // VPX_VPX_DSP_ARM_VPX_CONVOLVE8_NEON_H_ diff --git a/vpx_dsp/arm/vpx_scaled_convolve8_neon.c b/vpx_dsp/arm/vpx_scaled_convolve8_neon.c index 8edf8a66e..8491ca7ac 100644 --- a/vpx_dsp/arm/vpx_scaled_convolve8_neon.c +++ b/vpx_dsp/arm/vpx_scaled_convolve8_neon.c @@ -38,8 +38,6 @@ static INLINE void scaledconvolve_horiz_w4( const uint8_t *const src_x = &src[x_q4 >> SUBPEL_BITS]; if (x_q4 & SUBPEL_MASK) { const int16x8_t filters = vld1q_s16(x_filters[x_q4 & SUBPEL_MASK]); - const int16x4_t filter3 = vdup_lane_s16(vget_low_s16(filters), 3); - const int16x4_t filter4 = vdup_lane_s16(vget_high_s16(filters), 0); uint8x8_t s[8], d; int16x8_t ss[4]; int16x4_t t[8], tt; @@ -61,7 +59,7 @@ static INLINE void scaledconvolve_horiz_w4( t[7] = vget_high_s16(ss[3]); tt = convolve8_4(t[0], t[1], t[2], t[3], t[4], t[5], t[6], t[7], - filters, filter3, filter4); + filters); d = vqrshrun_n_s16(vcombine_s16(tt, tt), 7); vst1_lane_u32((uint32_t *)&temp[4 * z], vreinterpret_u32_u8(d), 0); } else { @@ -167,8 +165,6 @@ static INLINE void scaledconvolve_vert_w4( if (y_q4 & SUBPEL_MASK) { const int16x8_t filters = vld1q_s16(y_filters[y_q4 & SUBPEL_MASK]); - const int16x4_t filter3 = vdup_lane_s16(vget_low_s16(filters), 3); - const int16x4_t filter4 = vdup_lane_s16(vget_high_s16(filters), 0); uint8x8_t s[8], d; int16x4_t t[8], tt; @@ -183,8 +179,7 @@ static INLINE void scaledconvolve_vert_w4( t[6] = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(s[6]))); t[7] = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(s[7]))); - tt = convolve8_4(t[0], t[1], t[2], t[3], t[4], t[5], t[6], t[7], filters, - filter3, filter4); + tt = convolve8_4(t[0], t[1], t[2], t[3], t[4], t[5], t[6], t[7], filters); d = vqrshrun_n_s16(vcombine_s16(tt, tt), 7); vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d), 0); } else { -- 2.40.0