SadMxNParam(16, 8, &vpx_sad16x8_neon),
SadMxNParam(8, 16, &vpx_sad8x16_neon),
SadMxNParam(8, 8, &vpx_sad8x8_neon),
+ SadMxNParam(4, 8, &vpx_sad4x8_neon),
SadMxNParam(4, 4, &vpx_sad4x4_neon),
};
INSTANTIATE_TEST_CASE_P(NEON, SADTest, ::testing::ValuesIn(neon_tests));
#include "./vpx_config.h"
#include "vpx/vpx_integer.h"
+#include "vpx_dsp/arm/mem_neon.h"
unsigned int vpx_sad8x16_neon(unsigned char *src_ptr, int src_stride,
unsigned char *ref_ptr, int ref_stride) {
return vget_lane_u32(d5, 0);
}
-unsigned int vpx_sad4x4_neon(unsigned char *src_ptr, int src_stride,
- unsigned char *ref_ptr, int ref_stride) {
- uint8x8_t d0, d8;
- uint16x8_t q12;
- uint32x2_t d1;
- uint64x1_t d3;
- int i;
+// TODO(johannkoenig): combine with avg_neon.h version.
+static INLINE uint32_t horizontal_add_16x8(const uint16x8_t vec_16x8) {
+ const uint32x4_t a = vpaddlq_u16(vec_16x8);
+ const uint64x2_t b = vpaddlq_u32(a);
+ const uint32x2_t c = vadd_u32(vreinterpret_u32_u64(vget_low_u64(b)),
+ vreinterpret_u32_u64(vget_high_u64(b)));
+ return vget_lane_u32(c, 0);
+}
- d0 = vld1_u8(src_ptr);
- src_ptr += src_stride;
- d8 = vld1_u8(ref_ptr);
- ref_ptr += ref_stride;
- q12 = vabdl_u8(d0, d8);
+uint32_t vpx_sad4x4_neon(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride) {
+ const uint8x16_t src_u8 = load_unaligned_u8q(src_ptr, src_stride);
+ const uint8x16_t ref_u8 = load_unaligned_u8q(ref_ptr, ref_stride);
+ uint16x8_t abs = vabdl_u8(vget_low_u8(src_u8), vget_low_u8(ref_u8));
+ abs = vabal_u8(abs, vget_high_u8(src_u8), vget_high_u8(ref_u8));
+ return horizontal_add_16x8(abs);
+}
- for (i = 0; i < 3; i++) {
- d0 = vld1_u8(src_ptr);
- src_ptr += src_stride;
- d8 = vld1_u8(ref_ptr);
- ref_ptr += ref_stride;
- q12 = vabal_u8(q12, d0, d8);
+uint32_t vpx_sad4x8_neon(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride) {
+ int i;
+ uint16x8_t abs = vdupq_n_u16(0);
+ for (i = 0; i < 8; i += 4) {
+ const uint8x16_t src_u8 = load_unaligned_u8q(src_ptr, src_stride);
+ const uint8x16_t ref_u8 = load_unaligned_u8q(ref_ptr, ref_stride);
+ src_ptr += 4 * src_stride;
+ ref_ptr += 4 * ref_stride;
+ abs = vabal_u8(abs, vget_low_u8(src_u8), vget_low_u8(ref_u8));
+ abs = vabal_u8(abs, vget_high_u8(src_u8), vget_high_u8(ref_u8));
}
- d1 = vpaddl_u16(vget_low_u16(q12));
- d3 = vpaddl_u32(d1);
-
- return vget_lane_u32(vreinterpret_u32_u64(d3), 0);
+ return horizontal_add_16x8(abs);
}
unsigned int vpx_sad16x8_neon(unsigned char *src_ptr, int src_stride,
vreinterpret_u32_u64(vget_high_u64(b)));
return vget_lane_u32(c, 0);
}
-static INLINE unsigned int horizontal_add_16x8(const uint16x8_t vec_16x8) {
- const uint32x4_t a = vpaddlq_u16(vec_16x8);
- const uint64x2_t b = vpaddlq_u32(a);
- const uint32x2_t c = vadd_u32(vreinterpret_u32_u64(vget_low_u64(b)),
- vreinterpret_u32_u64(vget_high_u64(b)));
- return vget_lane_u32(c, 0);
-}
unsigned int vpx_sad64x64_neon(const uint8_t *src, int src_stride,
const uint8_t *ref, int ref_stride) {