From: Johann Date: Thu, 3 Aug 2017 17:22:07 +0000 (-0700) Subject: quantize: copy ssse3 optimizations to intrinsics X-Git-Tag: v1.7.0~252^2 X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=d52cb5972;p=libvpx quantize: copy ssse3 optimizations to intrinsics Fairly minor differences from sse2. pabsw and psignw are the big gains. Also re-uses some values in eob calculation to avoid an extra pcmp. Fixes test failures in HBD and OS X builds. Allows using it in 32bit builds, where it is about 40% faster than sse2. Substantially faster than the assembly for skip_block. 10-20% faster the rest of the time. Change-Id: If783bb3567e561e47667e10133b9c84414a334e2 --- diff --git a/test/vp9_quantize_test.cc b/test/vp9_quantize_test.cc index 7ec8f585b..c4aae2d9d 100644 --- a/test/vp9_quantize_test.cc +++ b/test/vp9_quantize_test.cc @@ -333,15 +333,20 @@ INSTANTIATE_TEST_CASE_P(SSE2, VP9QuantizeTest, #endif // CONFIG_VP9_HIGHBITDEPTH #endif // HAVE_SSE2 -// TODO(johannkoenig): SSSE3 optimizations do not yet pass these tests. -#if HAVE_SSSE3 && ARCH_X86_64 +#if HAVE_SSSE3 +INSTANTIATE_TEST_CASE_P(SSSE3, VP9QuantizeTest, + ::testing::Values(make_tuple(&vpx_quantize_b_ssse3, + &vpx_quantize_b_c, + VPX_BITS_8, 16))); + +#if ARCH_X86_64 +// TODO(johannkoenig): SSSE3 optimizations do not yet pass this test. INSTANTIATE_TEST_CASE_P( DISABLED_SSSE3, VP9QuantizeTest, - ::testing::Values(make_tuple(&vpx_quantize_b_ssse3, &vpx_quantize_b_c, - VPX_BITS_8, 16), - make_tuple(&vpx_quantize_b_32x32_ssse3, + ::testing::Values(make_tuple(&vpx_quantize_b_32x32_ssse3, &vpx_quantize_b_32x32_c, VPX_BITS_8, 32))); -#endif // HAVE_SSSE3 && ARCH_X86_64 +#endif // ARCH_X86_64 +#endif // HAVE_SSSE3 // TODO(johannkoenig): AVX optimizations do not yet pass the 32x32 test or // highbitdepth configurations. diff --git a/vpx_dsp/vpx_dsp.mk b/vpx_dsp/vpx_dsp.mk index ae98eb23d..98aa2173b 100644 --- a/vpx_dsp/vpx_dsp.mk +++ b/vpx_dsp/vpx_dsp.mk @@ -274,6 +274,7 @@ DSP_SRCS-yes += quantize.c DSP_SRCS-yes += quantize.h DSP_SRCS-$(HAVE_SSE2) += x86/quantize_sse2.c +DSP_SRCS-$(HAVE_SSSE3) += x86/quantize_ssse3.c DSP_SRCS-$(HAVE_NEON) += arm/quantize_neon.c ifeq ($(CONFIG_VP9_HIGHBITDEPTH),yes) DSP_SRCS-$(HAVE_SSE2) += x86/highbd_quantize_intrin_sse2.c diff --git a/vpx_dsp/vpx_dsp_rtcd_defs.pl b/vpx_dsp/vpx_dsp_rtcd_defs.pl index 99ef262b1..a78b1cff7 100644 --- a/vpx_dsp/vpx_dsp_rtcd_defs.pl +++ b/vpx_dsp/vpx_dsp_rtcd_defs.pl @@ -671,7 +671,7 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") { # if (vpx_config("CONFIG_VP9_ENCODER") eq "yes") { add_proto qw/void vpx_quantize_b/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan"; - specialize qw/vpx_quantize_b neon sse2/, "$ssse3_x86_64", "$avx_x86_64"; + specialize qw/vpx_quantize_b neon sse2 ssse3/, "$avx_x86_64"; add_proto qw/void vpx_quantize_b_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan"; specialize qw/vpx_quantize_b_32x32/, "$ssse3_x86_64", "$avx_x86_64"; diff --git a/vpx_dsp/x86/quantize_ssse3.c b/vpx_dsp/x86/quantize_ssse3.c new file mode 100644 index 000000000..014acb4af --- /dev/null +++ b/vpx_dsp/x86/quantize_ssse3.c @@ -0,0 +1,198 @@ +/* + * Copyright (c) 2017 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include + +#include "./vpx_dsp_rtcd.h" +#include "vpx/vpx_integer.h" +#include "vpx_dsp/x86/bitdepth_conversion_sse2.h" + +void vpx_quantize_b_ssse3(const tran_low_t *coeff_ptr, intptr_t n_coeffs, + int skip_block, const int16_t *zbin_ptr, + const int16_t *round_ptr, const int16_t *quant_ptr, + const int16_t *quant_shift_ptr, + tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, + const int16_t *dequant_ptr, uint16_t *eob_ptr, + const int16_t *scan_ptr, const int16_t *iscan_ptr) { + const __m128i zero = _mm_setzero_si128(); + __m128i coeff0, coeff1; + __m128i eob; + __m128i zbin; + __m128i round, quant, dequant, shift; + intptr_t index = 0; + (void)scan_ptr; + + if (skip_block) { + do { + store_tran_low(zero, dqcoeff_ptr + index); + store_tran_low(zero, dqcoeff_ptr + index + 8); + store_tran_low(zero, qcoeff_ptr + index); + store_tran_low(zero, qcoeff_ptr + index + 8); + index += 16; + } while (index < n_coeffs); + *eob_ptr = 0; + return; + } + + // Setup global values + { + const __m128i one = _mm_set1_epi16(1); + zbin = _mm_load_si128((const __m128i *)zbin_ptr); + // x86 has no "greater *or equal* comparison. Subtract 1 from zbin so + // it is a strict "greater" comparison. + zbin = _mm_sub_epi16(zbin, one); + round = _mm_load_si128((const __m128i *)round_ptr); + quant = _mm_load_si128((const __m128i *)quant_ptr); + dequant = _mm_load_si128((const __m128i *)dequant_ptr); + shift = _mm_load_si128((const __m128i *)quant_shift_ptr); + } + + { + __m128i qcoeff0, qcoeff1; + __m128i qtmp0, qtmp1; + __m128i cmp_mask0, cmp_mask1; + __m128i zero_coeff0, zero_coeff1; + __m128i iscan0, iscan1; + __m128i eob1; + + // Do DC and first 15 AC + coeff0 = load_tran_low(coeff_ptr + index); + coeff1 = load_tran_low(coeff_ptr + index + 8); + + qcoeff0 = _mm_abs_epi16(coeff0); + qcoeff1 = _mm_abs_epi16(coeff1); + + cmp_mask0 = _mm_cmpgt_epi16(qcoeff0, zbin); + // Overwrite DC component. + zbin = _mm_unpackhi_epi64(zbin, zbin); + cmp_mask1 = _mm_cmpgt_epi16(qcoeff1, zbin); + + qcoeff0 = _mm_adds_epi16(qcoeff0, round); + round = _mm_unpackhi_epi64(round, round); + qcoeff1 = _mm_adds_epi16(qcoeff1, round); + + qtmp0 = _mm_mulhi_epi16(qcoeff0, quant); + quant = _mm_unpackhi_epi64(quant, quant); + qtmp1 = _mm_mulhi_epi16(qcoeff1, quant); + + qtmp0 = _mm_add_epi16(qtmp0, qcoeff0); + qtmp1 = _mm_add_epi16(qtmp1, qcoeff1); + + qcoeff0 = _mm_mulhi_epi16(qtmp0, shift); + shift = _mm_unpackhi_epi64(shift, shift); + qcoeff1 = _mm_mulhi_epi16(qtmp1, shift); + + // Reinsert signs + qcoeff0 = _mm_sign_epi16(qcoeff0, coeff0); + qcoeff1 = _mm_sign_epi16(qcoeff1, coeff1); + + // Mask out zbin threshold coeffs + qcoeff0 = _mm_and_si128(qcoeff0, cmp_mask0); + qcoeff1 = _mm_and_si128(qcoeff1, cmp_mask1); + + store_tran_low(qcoeff0, qcoeff_ptr + index); + store_tran_low(qcoeff1, qcoeff_ptr + index + 8); + + coeff0 = _mm_mullo_epi16(qcoeff0, dequant); + dequant = _mm_unpackhi_epi64(dequant, dequant); + coeff1 = _mm_mullo_epi16(qcoeff1, dequant); + + store_tran_low(coeff0, dqcoeff_ptr + index); + store_tran_low(coeff1, dqcoeff_ptr + index + 8); + + // Scan for eob + zero_coeff0 = _mm_cmpeq_epi16(coeff0, zero); + zero_coeff1 = _mm_cmpeq_epi16(coeff1, zero); + iscan0 = _mm_load_si128((const __m128i *)(iscan_ptr + index)); + iscan1 = _mm_load_si128((const __m128i *)(iscan_ptr + index + 8)); + // Add one to convert from indices to counts + iscan0 = _mm_sub_epi16(iscan0, cmp_mask0); + iscan1 = _mm_sub_epi16(iscan1, cmp_mask1); + eob = _mm_andnot_si128(zero_coeff0, iscan0); + eob1 = _mm_andnot_si128(zero_coeff1, iscan1); + eob = _mm_max_epi16(eob, eob1); + } + index += 16; + + // AC only loop + while (index < n_coeffs) { + __m128i qcoeff0, qcoeff1; + __m128i qtmp0, qtmp1; + __m128i cmp_mask0, cmp_mask1; + __m128i zero_coeff0, zero_coeff1; + __m128i iscan0, iscan1; + __m128i eob0, eob1; + + coeff0 = load_tran_low(coeff_ptr + index); + coeff1 = load_tran_low(coeff_ptr + index + 8); + + qcoeff0 = _mm_abs_epi16(coeff0); + qcoeff1 = _mm_abs_epi16(coeff1); + + cmp_mask0 = _mm_cmpgt_epi16(qcoeff0, zbin); + cmp_mask1 = _mm_cmpgt_epi16(qcoeff1, zbin); + + qcoeff0 = _mm_adds_epi16(qcoeff0, round); + qcoeff1 = _mm_adds_epi16(qcoeff1, round); + + qtmp0 = _mm_mulhi_epi16(qcoeff0, quant); + qtmp1 = _mm_mulhi_epi16(qcoeff1, quant); + + qtmp0 = _mm_add_epi16(qtmp0, qcoeff0); + qtmp1 = _mm_add_epi16(qtmp1, qcoeff1); + + qcoeff0 = _mm_mulhi_epi16(qtmp0, shift); + qcoeff1 = _mm_mulhi_epi16(qtmp1, shift); + + // Reinsert signs + qcoeff0 = _mm_sign_epi16(qcoeff0, coeff0); + qcoeff1 = _mm_sign_epi16(qcoeff1, coeff1); + + // Mask out zbin threshold coeffs + qcoeff0 = _mm_and_si128(qcoeff0, cmp_mask0); + qcoeff1 = _mm_and_si128(qcoeff1, cmp_mask1); + + store_tran_low(qcoeff0, qcoeff_ptr + index); + store_tran_low(qcoeff1, qcoeff_ptr + index + 8); + + coeff0 = _mm_mullo_epi16(qcoeff0, dequant); + coeff1 = _mm_mullo_epi16(qcoeff1, dequant); + + store_tran_low(coeff0, dqcoeff_ptr + index); + store_tran_low(coeff1, dqcoeff_ptr + index + 8); + + // Scan for eob + zero_coeff0 = _mm_cmpeq_epi16(coeff0, zero); + zero_coeff1 = _mm_cmpeq_epi16(coeff1, zero); + iscan0 = _mm_load_si128((const __m128i *)(iscan_ptr + index)); + iscan1 = _mm_load_si128((const __m128i *)(iscan_ptr + index + 8)); + // Add one to convert from indices to counts + iscan0 = _mm_sub_epi16(iscan0, cmp_mask0); + iscan1 = _mm_sub_epi16(iscan1, cmp_mask1); + eob0 = _mm_andnot_si128(zero_coeff0, iscan0); + eob1 = _mm_andnot_si128(zero_coeff1, iscan1); + eob0 = _mm_max_epi16(eob0, eob1); + eob = _mm_max_epi16(eob, eob0); + + index += 16; + } + + // Accumulate EOB + { + __m128i eob_shuffled; + eob_shuffled = _mm_shuffle_epi32(eob, 0xe); + eob = _mm_max_epi16(eob, eob_shuffled); + eob_shuffled = _mm_shufflelo_epi16(eob, 0xe); + eob = _mm_max_epi16(eob, eob_shuffled); + eob_shuffled = _mm_shufflelo_epi16(eob, 0x1); + eob = _mm_max_epi16(eob, eob_shuffled); + *eob_ptr = _mm_extract_epi16(eob, 1); + } +} diff --git a/vpx_dsp/x86/quantize_ssse3_x86_64.asm b/vpx_dsp/x86/quantize_ssse3_x86_64.asm index ec2cafb94..19798422b 100644 --- a/vpx_dsp/x86/quantize_ssse3_x86_64.asm +++ b/vpx_dsp/x86/quantize_ssse3_x86_64.asm @@ -15,7 +15,6 @@ pw_1: times 8 dw 1 SECTION .text -; TODO(yunqingwang)fix quantize_b code for skip=1 case. %macro QUANTIZE_FN 2 cglobal quantize_%1, 0, %2, 15, coeff, ncoeff, skip, zbin, round, quant, \ shift, qcoeff, dqcoeff, dequant, \ @@ -304,6 +303,9 @@ cglobal quantize_%1, 0, %2, 15, coeff, ncoeff, skip, zbin, round, quant, \ ; skip-block, i.e. just write all zeroes .blank: +DEFINE_ARGS coeff, ncoeff, skip, zbin, round, quant, shift, \ + qcoeff, dqcoeff, dequant, eob, scan, iscan + mov r0, dqcoeffmp movifnidn ncoeffq, ncoeffmp mov r2, qcoeffmp @@ -341,5 +343,4 @@ cglobal quantize_%1, 0, %2, 15, coeff, ncoeff, skip, zbin, round, quant, \ %endmacro INIT_XMM ssse3 -QUANTIZE_FN b, 7 QUANTIZE_FN b_32x32, 7