]> granicus.if.org Git - libvpx/commitdiff
Add sse2 forward and inverse 16x32 and 32x16 transforms
authorDavid Barker <david.barker@argondesign.com>
Thu, 6 Oct 2016 16:25:40 +0000 (17:25 +0100)
committerDavid Barker <david.barker@argondesign.com>
Thu, 13 Oct 2016 13:01:22 +0000 (14:01 +0100)
Change-Id: I1241257430f1e08ead1ce0f31db8272b50783102

aom_dsp/aom_dsp.mk
aom_dsp/x86/fwd_dct32_8cols_sse2.c [new file with mode: 0644]
aom_dsp/x86/fwd_txfm_sse2.h
aom_dsp/x86/inv_txfm_sse2.c
aom_dsp/x86/inv_txfm_sse2.h
av1/common/av1_rtcd_defs.pl
av1/common/x86/idct_intrin_sse2.c
av1/encoder/x86/dct_intrin_sse2.c
test/av1_fht16x32_test.cc [new file with mode: 0644]
test/av1_fht32x16_test.cc [new file with mode: 0644]
test/test.mk

index 741bbde78b10432c105aea71b9601d58ae47c4be..8a0109eede3b033b04d0d2a755eaaa59984c60d7 100644 (file)
@@ -186,6 +186,7 @@ DSP_SRCS-yes            += fwd_txfm.c
 DSP_SRCS-yes            += fwd_txfm.h
 DSP_SRCS-$(HAVE_SSE2)   += x86/fwd_txfm_sse2.h
 DSP_SRCS-$(HAVE_SSE2)   += x86/fwd_txfm_sse2.c
+DSP_SRCS-$(HAVE_SSE2)   += x86/fwd_dct32_8cols_sse2.c
 DSP_SRCS-$(HAVE_SSE2)   += x86/fwd_txfm_impl_sse2.h
 DSP_SRCS-$(HAVE_SSE2)   += x86/fwd_dct32x32_impl_sse2.h
 ifeq ($(ARCH_X86_64),yes)
diff --git a/aom_dsp/x86/fwd_dct32_8cols_sse2.c b/aom_dsp/x86/fwd_dct32_8cols_sse2.c
new file mode 100644 (file)
index 0000000..b8ec08d
--- /dev/null
@@ -0,0 +1,862 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <emmintrin.h>  // SSE2
+
+#include "aom_dsp/fwd_txfm.h"
+#include "aom_dsp/txfm_common.h"
+#include "aom_dsp/x86/txfm_common_sse2.h"
+
+// Apply a 32-element IDCT to 8 columns. This does not do any transposition
+// of its output - the caller is expected to do that.
+// The input buffers are the top and bottom halves of an 8x32 block.
+void fdct32_8col(__m128i *in0, __m128i *in1) {
+  // Constants
+  //    When we use them, in one case, they are all the same. In all others
+  //    it's a pair of them that we need to repeat four times. This is done
+  //    by constructing the 32 bit constant corresponding to that pair.
+  const __m128i k__cospi_p16_p16 = _mm_set1_epi16((int16_t)cospi_16_64);
+  const __m128i k__cospi_p16_m16 = pair_set_epi16(+cospi_16_64, -cospi_16_64);
+  const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64);
+  const __m128i k__cospi_m24_m08 = pair_set_epi16(-cospi_24_64, -cospi_8_64);
+  const __m128i k__cospi_p24_p08 = pair_set_epi16(+cospi_24_64, cospi_8_64);
+  const __m128i k__cospi_p12_p20 = pair_set_epi16(+cospi_12_64, cospi_20_64);
+  const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64);
+  const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64);
+  const __m128i k__cospi_p28_p04 = pair_set_epi16(+cospi_28_64, cospi_4_64);
+  const __m128i k__cospi_m28_m04 = pair_set_epi16(-cospi_28_64, -cospi_4_64);
+  const __m128i k__cospi_m12_m20 = pair_set_epi16(-cospi_12_64, -cospi_20_64);
+  const __m128i k__cospi_p30_p02 = pair_set_epi16(+cospi_30_64, cospi_2_64);
+  const __m128i k__cospi_p14_p18 = pair_set_epi16(+cospi_14_64, cospi_18_64);
+  const __m128i k__cospi_p22_p10 = pair_set_epi16(+cospi_22_64, cospi_10_64);
+  const __m128i k__cospi_p06_p26 = pair_set_epi16(+cospi_6_64, cospi_26_64);
+  const __m128i k__cospi_m26_p06 = pair_set_epi16(-cospi_26_64, cospi_6_64);
+  const __m128i k__cospi_m10_p22 = pair_set_epi16(-cospi_10_64, cospi_22_64);
+  const __m128i k__cospi_m18_p14 = pair_set_epi16(-cospi_18_64, cospi_14_64);
+  const __m128i k__cospi_m02_p30 = pair_set_epi16(-cospi_2_64, cospi_30_64);
+  const __m128i k__cospi_p31_p01 = pair_set_epi16(+cospi_31_64, cospi_1_64);
+  const __m128i k__cospi_p15_p17 = pair_set_epi16(+cospi_15_64, cospi_17_64);
+  const __m128i k__cospi_p23_p09 = pair_set_epi16(+cospi_23_64, cospi_9_64);
+  const __m128i k__cospi_p07_p25 = pair_set_epi16(+cospi_7_64, cospi_25_64);
+  const __m128i k__cospi_m25_p07 = pair_set_epi16(-cospi_25_64, cospi_7_64);
+  const __m128i k__cospi_m09_p23 = pair_set_epi16(-cospi_9_64, cospi_23_64);
+  const __m128i k__cospi_m17_p15 = pair_set_epi16(-cospi_17_64, cospi_15_64);
+  const __m128i k__cospi_m01_p31 = pair_set_epi16(-cospi_1_64, cospi_31_64);
+  const __m128i k__cospi_p27_p05 = pair_set_epi16(+cospi_27_64, cospi_5_64);
+  const __m128i k__cospi_p11_p21 = pair_set_epi16(+cospi_11_64, cospi_21_64);
+  const __m128i k__cospi_p19_p13 = pair_set_epi16(+cospi_19_64, cospi_13_64);
+  const __m128i k__cospi_p03_p29 = pair_set_epi16(+cospi_3_64, cospi_29_64);
+  const __m128i k__cospi_m29_p03 = pair_set_epi16(-cospi_29_64, cospi_3_64);
+  const __m128i k__cospi_m13_p19 = pair_set_epi16(-cospi_13_64, cospi_19_64);
+  const __m128i k__cospi_m21_p11 = pair_set_epi16(-cospi_21_64, cospi_11_64);
+  const __m128i k__cospi_m05_p27 = pair_set_epi16(-cospi_5_64, cospi_27_64);
+  const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
+
+  __m128i step1[32];
+  __m128i step2[32];
+  __m128i step3[32];
+  __m128i out[32];
+  // Stage 1
+  {
+    const __m128i *ina = in0;
+    const __m128i *inb = in1 + 15;
+    __m128i *step1a = &step1[0];
+    __m128i *step1b = &step1[31];
+    const __m128i ina0 = _mm_loadu_si128((const __m128i *)(ina));
+    const __m128i ina1 = _mm_loadu_si128((const __m128i *)(ina + 1));
+    const __m128i ina2 = _mm_loadu_si128((const __m128i *)(ina + 2));
+    const __m128i ina3 = _mm_loadu_si128((const __m128i *)(ina + 3));
+    const __m128i inb3 = _mm_loadu_si128((const __m128i *)(inb - 3));
+    const __m128i inb2 = _mm_loadu_si128((const __m128i *)(inb - 2));
+    const __m128i inb1 = _mm_loadu_si128((const __m128i *)(inb - 1));
+    const __m128i inb0 = _mm_loadu_si128((const __m128i *)(inb));
+    step1a[0] = _mm_add_epi16(ina0, inb0);
+    step1a[1] = _mm_add_epi16(ina1, inb1);
+    step1a[2] = _mm_add_epi16(ina2, inb2);
+    step1a[3] = _mm_add_epi16(ina3, inb3);
+    step1b[-3] = _mm_sub_epi16(ina3, inb3);
+    step1b[-2] = _mm_sub_epi16(ina2, inb2);
+    step1b[-1] = _mm_sub_epi16(ina1, inb1);
+    step1b[-0] = _mm_sub_epi16(ina0, inb0);
+  }
+  {
+    const __m128i *ina = in0 + 4;
+    const __m128i *inb = in1 + 11;
+    __m128i *step1a = &step1[4];
+    __m128i *step1b = &step1[27];
+    const __m128i ina0 = _mm_loadu_si128((const __m128i *)(ina));
+    const __m128i ina1 = _mm_loadu_si128((const __m128i *)(ina + 1));
+    const __m128i ina2 = _mm_loadu_si128((const __m128i *)(ina + 2));
+    const __m128i ina3 = _mm_loadu_si128((const __m128i *)(ina + 3));
+    const __m128i inb3 = _mm_loadu_si128((const __m128i *)(inb - 3));
+    const __m128i inb2 = _mm_loadu_si128((const __m128i *)(inb - 2));
+    const __m128i inb1 = _mm_loadu_si128((const __m128i *)(inb - 1));
+    const __m128i inb0 = _mm_loadu_si128((const __m128i *)(inb));
+    step1a[0] = _mm_add_epi16(ina0, inb0);
+    step1a[1] = _mm_add_epi16(ina1, inb1);
+    step1a[2] = _mm_add_epi16(ina2, inb2);
+    step1a[3] = _mm_add_epi16(ina3, inb3);
+    step1b[-3] = _mm_sub_epi16(ina3, inb3);
+    step1b[-2] = _mm_sub_epi16(ina2, inb2);
+    step1b[-1] = _mm_sub_epi16(ina1, inb1);
+    step1b[-0] = _mm_sub_epi16(ina0, inb0);
+  }
+  {
+    const __m128i *ina = in0 + 8;
+    const __m128i *inb = in1 + 7;
+    __m128i *step1a = &step1[8];
+    __m128i *step1b = &step1[23];
+    const __m128i ina0 = _mm_loadu_si128((const __m128i *)(ina));
+    const __m128i ina1 = _mm_loadu_si128((const __m128i *)(ina + 1));
+    const __m128i ina2 = _mm_loadu_si128((const __m128i *)(ina + 2));
+    const __m128i ina3 = _mm_loadu_si128((const __m128i *)(ina + 3));
+    const __m128i inb3 = _mm_loadu_si128((const __m128i *)(inb - 3));
+    const __m128i inb2 = _mm_loadu_si128((const __m128i *)(inb - 2));
+    const __m128i inb1 = _mm_loadu_si128((const __m128i *)(inb - 1));
+    const __m128i inb0 = _mm_loadu_si128((const __m128i *)(inb));
+    step1a[0] = _mm_add_epi16(ina0, inb0);
+    step1a[1] = _mm_add_epi16(ina1, inb1);
+    step1a[2] = _mm_add_epi16(ina2, inb2);
+    step1a[3] = _mm_add_epi16(ina3, inb3);
+    step1b[-3] = _mm_sub_epi16(ina3, inb3);
+    step1b[-2] = _mm_sub_epi16(ina2, inb2);
+    step1b[-1] = _mm_sub_epi16(ina1, inb1);
+    step1b[-0] = _mm_sub_epi16(ina0, inb0);
+  }
+  {
+    const __m128i *ina = in0 + 12;
+    const __m128i *inb = in1 + 3;
+    __m128i *step1a = &step1[12];
+    __m128i *step1b = &step1[19];
+    const __m128i ina0 = _mm_loadu_si128((const __m128i *)(ina));
+    const __m128i ina1 = _mm_loadu_si128((const __m128i *)(ina + 1));
+    const __m128i ina2 = _mm_loadu_si128((const __m128i *)(ina + 2));
+    const __m128i ina3 = _mm_loadu_si128((const __m128i *)(ina + 3));
+    const __m128i inb3 = _mm_loadu_si128((const __m128i *)(inb - 3));
+    const __m128i inb2 = _mm_loadu_si128((const __m128i *)(inb - 2));
+    const __m128i inb1 = _mm_loadu_si128((const __m128i *)(inb - 1));
+    const __m128i inb0 = _mm_loadu_si128((const __m128i *)(inb));
+    step1a[0] = _mm_add_epi16(ina0, inb0);
+    step1a[1] = _mm_add_epi16(ina1, inb1);
+    step1a[2] = _mm_add_epi16(ina2, inb2);
+    step1a[3] = _mm_add_epi16(ina3, inb3);
+    step1b[-3] = _mm_sub_epi16(ina3, inb3);
+    step1b[-2] = _mm_sub_epi16(ina2, inb2);
+    step1b[-1] = _mm_sub_epi16(ina1, inb1);
+    step1b[-0] = _mm_sub_epi16(ina0, inb0);
+  }
+  // Stage 2
+  {
+    step2[0] = _mm_add_epi16(step1[0], step1[15]);
+    step2[1] = _mm_add_epi16(step1[1], step1[14]);
+    step2[2] = _mm_add_epi16(step1[2], step1[13]);
+    step2[3] = _mm_add_epi16(step1[3], step1[12]);
+    step2[4] = _mm_add_epi16(step1[4], step1[11]);
+    step2[5] = _mm_add_epi16(step1[5], step1[10]);
+    step2[6] = _mm_add_epi16(step1[6], step1[9]);
+    step2[7] = _mm_add_epi16(step1[7], step1[8]);
+    step2[8] = _mm_sub_epi16(step1[7], step1[8]);
+    step2[9] = _mm_sub_epi16(step1[6], step1[9]);
+    step2[10] = _mm_sub_epi16(step1[5], step1[10]);
+    step2[11] = _mm_sub_epi16(step1[4], step1[11]);
+    step2[12] = _mm_sub_epi16(step1[3], step1[12]);
+    step2[13] = _mm_sub_epi16(step1[2], step1[13]);
+    step2[14] = _mm_sub_epi16(step1[1], step1[14]);
+    step2[15] = _mm_sub_epi16(step1[0], step1[15]);
+  }
+  {
+    const __m128i s2_20_0 = _mm_unpacklo_epi16(step1[27], step1[20]);
+    const __m128i s2_20_1 = _mm_unpackhi_epi16(step1[27], step1[20]);
+    const __m128i s2_21_0 = _mm_unpacklo_epi16(step1[26], step1[21]);
+    const __m128i s2_21_1 = _mm_unpackhi_epi16(step1[26], step1[21]);
+    const __m128i s2_22_0 = _mm_unpacklo_epi16(step1[25], step1[22]);
+    const __m128i s2_22_1 = _mm_unpackhi_epi16(step1[25], step1[22]);
+    const __m128i s2_23_0 = _mm_unpacklo_epi16(step1[24], step1[23]);
+    const __m128i s2_23_1 = _mm_unpackhi_epi16(step1[24], step1[23]);
+    const __m128i s2_20_2 = _mm_madd_epi16(s2_20_0, k__cospi_p16_m16);
+    const __m128i s2_20_3 = _mm_madd_epi16(s2_20_1, k__cospi_p16_m16);
+    const __m128i s2_21_2 = _mm_madd_epi16(s2_21_0, k__cospi_p16_m16);
+    const __m128i s2_21_3 = _mm_madd_epi16(s2_21_1, k__cospi_p16_m16);
+    const __m128i s2_22_2 = _mm_madd_epi16(s2_22_0, k__cospi_p16_m16);
+    const __m128i s2_22_3 = _mm_madd_epi16(s2_22_1, k__cospi_p16_m16);
+    const __m128i s2_23_2 = _mm_madd_epi16(s2_23_0, k__cospi_p16_m16);
+    const __m128i s2_23_3 = _mm_madd_epi16(s2_23_1, k__cospi_p16_m16);
+    const __m128i s2_24_2 = _mm_madd_epi16(s2_23_0, k__cospi_p16_p16);
+    const __m128i s2_24_3 = _mm_madd_epi16(s2_23_1, k__cospi_p16_p16);
+    const __m128i s2_25_2 = _mm_madd_epi16(s2_22_0, k__cospi_p16_p16);
+    const __m128i s2_25_3 = _mm_madd_epi16(s2_22_1, k__cospi_p16_p16);
+    const __m128i s2_26_2 = _mm_madd_epi16(s2_21_0, k__cospi_p16_p16);
+    const __m128i s2_26_3 = _mm_madd_epi16(s2_21_1, k__cospi_p16_p16);
+    const __m128i s2_27_2 = _mm_madd_epi16(s2_20_0, k__cospi_p16_p16);
+    const __m128i s2_27_3 = _mm_madd_epi16(s2_20_1, k__cospi_p16_p16);
+    // dct_const_round_shift
+    const __m128i s2_20_4 = _mm_add_epi32(s2_20_2, k__DCT_CONST_ROUNDING);
+    const __m128i s2_20_5 = _mm_add_epi32(s2_20_3, k__DCT_CONST_ROUNDING);
+    const __m128i s2_21_4 = _mm_add_epi32(s2_21_2, k__DCT_CONST_ROUNDING);
+    const __m128i s2_21_5 = _mm_add_epi32(s2_21_3, k__DCT_CONST_ROUNDING);
+    const __m128i s2_22_4 = _mm_add_epi32(s2_22_2, k__DCT_CONST_ROUNDING);
+    const __m128i s2_22_5 = _mm_add_epi32(s2_22_3, k__DCT_CONST_ROUNDING);
+    const __m128i s2_23_4 = _mm_add_epi32(s2_23_2, k__DCT_CONST_ROUNDING);
+    const __m128i s2_23_5 = _mm_add_epi32(s2_23_3, k__DCT_CONST_ROUNDING);
+    const __m128i s2_24_4 = _mm_add_epi32(s2_24_2, k__DCT_CONST_ROUNDING);
+    const __m128i s2_24_5 = _mm_add_epi32(s2_24_3, k__DCT_CONST_ROUNDING);
+    const __m128i s2_25_4 = _mm_add_epi32(s2_25_2, k__DCT_CONST_ROUNDING);
+    const __m128i s2_25_5 = _mm_add_epi32(s2_25_3, k__DCT_CONST_ROUNDING);
+    const __m128i s2_26_4 = _mm_add_epi32(s2_26_2, k__DCT_CONST_ROUNDING);
+    const __m128i s2_26_5 = _mm_add_epi32(s2_26_3, k__DCT_CONST_ROUNDING);
+    const __m128i s2_27_4 = _mm_add_epi32(s2_27_2, k__DCT_CONST_ROUNDING);
+    const __m128i s2_27_5 = _mm_add_epi32(s2_27_3, k__DCT_CONST_ROUNDING);
+    const __m128i s2_20_6 = _mm_srai_epi32(s2_20_4, DCT_CONST_BITS);
+    const __m128i s2_20_7 = _mm_srai_epi32(s2_20_5, DCT_CONST_BITS);
+    const __m128i s2_21_6 = _mm_srai_epi32(s2_21_4, DCT_CONST_BITS);
+    const __m128i s2_21_7 = _mm_srai_epi32(s2_21_5, DCT_CONST_BITS);
+    const __m128i s2_22_6 = _mm_srai_epi32(s2_22_4, DCT_CONST_BITS);
+    const __m128i s2_22_7 = _mm_srai_epi32(s2_22_5, DCT_CONST_BITS);
+    const __m128i s2_23_6 = _mm_srai_epi32(s2_23_4, DCT_CONST_BITS);
+    const __m128i s2_23_7 = _mm_srai_epi32(s2_23_5, DCT_CONST_BITS);
+    const __m128i s2_24_6 = _mm_srai_epi32(s2_24_4, DCT_CONST_BITS);
+    const __m128i s2_24_7 = _mm_srai_epi32(s2_24_5, DCT_CONST_BITS);
+    const __m128i s2_25_6 = _mm_srai_epi32(s2_25_4, DCT_CONST_BITS);
+    const __m128i s2_25_7 = _mm_srai_epi32(s2_25_5, DCT_CONST_BITS);
+    const __m128i s2_26_6 = _mm_srai_epi32(s2_26_4, DCT_CONST_BITS);
+    const __m128i s2_26_7 = _mm_srai_epi32(s2_26_5, DCT_CONST_BITS);
+    const __m128i s2_27_6 = _mm_srai_epi32(s2_27_4, DCT_CONST_BITS);
+    const __m128i s2_27_7 = _mm_srai_epi32(s2_27_5, DCT_CONST_BITS);
+    // Combine
+    step2[20] = _mm_packs_epi32(s2_20_6, s2_20_7);
+    step2[21] = _mm_packs_epi32(s2_21_6, s2_21_7);
+    step2[22] = _mm_packs_epi32(s2_22_6, s2_22_7);
+    step2[23] = _mm_packs_epi32(s2_23_6, s2_23_7);
+    step2[24] = _mm_packs_epi32(s2_24_6, s2_24_7);
+    step2[25] = _mm_packs_epi32(s2_25_6, s2_25_7);
+    step2[26] = _mm_packs_epi32(s2_26_6, s2_26_7);
+    step2[27] = _mm_packs_epi32(s2_27_6, s2_27_7);
+  }
+  // Stage 3
+  {
+    step3[0] = _mm_add_epi16(step2[(8 - 1)], step2[0]);
+    step3[1] = _mm_add_epi16(step2[(8 - 2)], step2[1]);
+    step3[2] = _mm_add_epi16(step2[(8 - 3)], step2[2]);
+    step3[3] = _mm_add_epi16(step2[(8 - 4)], step2[3]);
+    step3[4] = _mm_sub_epi16(step2[(8 - 5)], step2[4]);
+    step3[5] = _mm_sub_epi16(step2[(8 - 6)], step2[5]);
+    step3[6] = _mm_sub_epi16(step2[(8 - 7)], step2[6]);
+    step3[7] = _mm_sub_epi16(step2[(8 - 8)], step2[7]);
+  }
+  {
+    const __m128i s3_10_0 = _mm_unpacklo_epi16(step2[13], step2[10]);
+    const __m128i s3_10_1 = _mm_unpackhi_epi16(step2[13], step2[10]);
+    const __m128i s3_11_0 = _mm_unpacklo_epi16(step2[12], step2[11]);
+    const __m128i s3_11_1 = _mm_unpackhi_epi16(step2[12], step2[11]);
+    const __m128i s3_10_2 = _mm_madd_epi16(s3_10_0, k__cospi_p16_m16);
+    const __m128i s3_10_3 = _mm_madd_epi16(s3_10_1, k__cospi_p16_m16);
+    const __m128i s3_11_2 = _mm_madd_epi16(s3_11_0, k__cospi_p16_m16);
+    const __m128i s3_11_3 = _mm_madd_epi16(s3_11_1, k__cospi_p16_m16);
+    const __m128i s3_12_2 = _mm_madd_epi16(s3_11_0, k__cospi_p16_p16);
+    const __m128i s3_12_3 = _mm_madd_epi16(s3_11_1, k__cospi_p16_p16);
+    const __m128i s3_13_2 = _mm_madd_epi16(s3_10_0, k__cospi_p16_p16);
+    const __m128i s3_13_3 = _mm_madd_epi16(s3_10_1, k__cospi_p16_p16);
+    // dct_const_round_shift
+    const __m128i s3_10_4 = _mm_add_epi32(s3_10_2, k__DCT_CONST_ROUNDING);
+    const __m128i s3_10_5 = _mm_add_epi32(s3_10_3, k__DCT_CONST_ROUNDING);
+    const __m128i s3_11_4 = _mm_add_epi32(s3_11_2, k__DCT_CONST_ROUNDING);
+    const __m128i s3_11_5 = _mm_add_epi32(s3_11_3, k__DCT_CONST_ROUNDING);
+    const __m128i s3_12_4 = _mm_add_epi32(s3_12_2, k__DCT_CONST_ROUNDING);
+    const __m128i s3_12_5 = _mm_add_epi32(s3_12_3, k__DCT_CONST_ROUNDING);
+    const __m128i s3_13_4 = _mm_add_epi32(s3_13_2, k__DCT_CONST_ROUNDING);
+    const __m128i s3_13_5 = _mm_add_epi32(s3_13_3, k__DCT_CONST_ROUNDING);
+    const __m128i s3_10_6 = _mm_srai_epi32(s3_10_4, DCT_CONST_BITS);
+    const __m128i s3_10_7 = _mm_srai_epi32(s3_10_5, DCT_CONST_BITS);
+    const __m128i s3_11_6 = _mm_srai_epi32(s3_11_4, DCT_CONST_BITS);
+    const __m128i s3_11_7 = _mm_srai_epi32(s3_11_5, DCT_CONST_BITS);
+    const __m128i s3_12_6 = _mm_srai_epi32(s3_12_4, DCT_CONST_BITS);
+    const __m128i s3_12_7 = _mm_srai_epi32(s3_12_5, DCT_CONST_BITS);
+    const __m128i s3_13_6 = _mm_srai_epi32(s3_13_4, DCT_CONST_BITS);
+    const __m128i s3_13_7 = _mm_srai_epi32(s3_13_5, DCT_CONST_BITS);
+    // Combine
+    step3[10] = _mm_packs_epi32(s3_10_6, s3_10_7);
+    step3[11] = _mm_packs_epi32(s3_11_6, s3_11_7);
+    step3[12] = _mm_packs_epi32(s3_12_6, s3_12_7);
+    step3[13] = _mm_packs_epi32(s3_13_6, s3_13_7);
+  }
+  {
+    step3[16] = _mm_add_epi16(step2[23], step1[16]);
+    step3[17] = _mm_add_epi16(step2[22], step1[17]);
+    step3[18] = _mm_add_epi16(step2[21], step1[18]);
+    step3[19] = _mm_add_epi16(step2[20], step1[19]);
+    step3[20] = _mm_sub_epi16(step1[19], step2[20]);
+    step3[21] = _mm_sub_epi16(step1[18], step2[21]);
+    step3[22] = _mm_sub_epi16(step1[17], step2[22]);
+    step3[23] = _mm_sub_epi16(step1[16], step2[23]);
+    step3[24] = _mm_sub_epi16(step1[31], step2[24]);
+    step3[25] = _mm_sub_epi16(step1[30], step2[25]);
+    step3[26] = _mm_sub_epi16(step1[29], step2[26]);
+    step3[27] = _mm_sub_epi16(step1[28], step2[27]);
+    step3[28] = _mm_add_epi16(step2[27], step1[28]);
+    step3[29] = _mm_add_epi16(step2[26], step1[29]);
+    step3[30] = _mm_add_epi16(step2[25], step1[30]);
+    step3[31] = _mm_add_epi16(step2[24], step1[31]);
+  }
+
+  // Stage 4
+  {
+    step1[0] = _mm_add_epi16(step3[3], step3[0]);
+    step1[1] = _mm_add_epi16(step3[2], step3[1]);
+    step1[2] = _mm_sub_epi16(step3[1], step3[2]);
+    step1[3] = _mm_sub_epi16(step3[0], step3[3]);
+    step1[8] = _mm_add_epi16(step3[11], step2[8]);
+    step1[9] = _mm_add_epi16(step3[10], step2[9]);
+    step1[10] = _mm_sub_epi16(step2[9], step3[10]);
+    step1[11] = _mm_sub_epi16(step2[8], step3[11]);
+    step1[12] = _mm_sub_epi16(step2[15], step3[12]);
+    step1[13] = _mm_sub_epi16(step2[14], step3[13]);
+    step1[14] = _mm_add_epi16(step3[13], step2[14]);
+    step1[15] = _mm_add_epi16(step3[12], step2[15]);
+  }
+  {
+    const __m128i s1_05_0 = _mm_unpacklo_epi16(step3[6], step3[5]);
+    const __m128i s1_05_1 = _mm_unpackhi_epi16(step3[6], step3[5]);
+    const __m128i s1_05_2 = _mm_madd_epi16(s1_05_0, k__cospi_p16_m16);
+    const __m128i s1_05_3 = _mm_madd_epi16(s1_05_1, k__cospi_p16_m16);
+    const __m128i s1_06_2 = _mm_madd_epi16(s1_05_0, k__cospi_p16_p16);
+    const __m128i s1_06_3 = _mm_madd_epi16(s1_05_1, k__cospi_p16_p16);
+    // dct_const_round_shift
+    const __m128i s1_05_4 = _mm_add_epi32(s1_05_2, k__DCT_CONST_ROUNDING);
+    const __m128i s1_05_5 = _mm_add_epi32(s1_05_3, k__DCT_CONST_ROUNDING);
+    const __m128i s1_06_4 = _mm_add_epi32(s1_06_2, k__DCT_CONST_ROUNDING);
+    const __m128i s1_06_5 = _mm_add_epi32(s1_06_3, k__DCT_CONST_ROUNDING);
+    const __m128i s1_05_6 = _mm_srai_epi32(s1_05_4, DCT_CONST_BITS);
+    const __m128i s1_05_7 = _mm_srai_epi32(s1_05_5, DCT_CONST_BITS);
+    const __m128i s1_06_6 = _mm_srai_epi32(s1_06_4, DCT_CONST_BITS);
+    const __m128i s1_06_7 = _mm_srai_epi32(s1_06_5, DCT_CONST_BITS);
+    // Combine
+    step1[5] = _mm_packs_epi32(s1_05_6, s1_05_7);
+    step1[6] = _mm_packs_epi32(s1_06_6, s1_06_7);
+  }
+  {
+    const __m128i s1_18_0 = _mm_unpacklo_epi16(step3[18], step3[29]);
+    const __m128i s1_18_1 = _mm_unpackhi_epi16(step3[18], step3[29]);
+    const __m128i s1_19_0 = _mm_unpacklo_epi16(step3[19], step3[28]);
+    const __m128i s1_19_1 = _mm_unpackhi_epi16(step3[19], step3[28]);
+    const __m128i s1_20_0 = _mm_unpacklo_epi16(step3[20], step3[27]);
+    const __m128i s1_20_1 = _mm_unpackhi_epi16(step3[20], step3[27]);
+    const __m128i s1_21_0 = _mm_unpacklo_epi16(step3[21], step3[26]);
+    const __m128i s1_21_1 = _mm_unpackhi_epi16(step3[21], step3[26]);
+    const __m128i s1_18_2 = _mm_madd_epi16(s1_18_0, k__cospi_m08_p24);
+    const __m128i s1_18_3 = _mm_madd_epi16(s1_18_1, k__cospi_m08_p24);
+    const __m128i s1_19_2 = _mm_madd_epi16(s1_19_0, k__cospi_m08_p24);
+    const __m128i s1_19_3 = _mm_madd_epi16(s1_19_1, k__cospi_m08_p24);
+    const __m128i s1_20_2 = _mm_madd_epi16(s1_20_0, k__cospi_m24_m08);
+    const __m128i s1_20_3 = _mm_madd_epi16(s1_20_1, k__cospi_m24_m08);
+    const __m128i s1_21_2 = _mm_madd_epi16(s1_21_0, k__cospi_m24_m08);
+    const __m128i s1_21_3 = _mm_madd_epi16(s1_21_1, k__cospi_m24_m08);
+    const __m128i s1_26_2 = _mm_madd_epi16(s1_21_0, k__cospi_m08_p24);
+    const __m128i s1_26_3 = _mm_madd_epi16(s1_21_1, k__cospi_m08_p24);
+    const __m128i s1_27_2 = _mm_madd_epi16(s1_20_0, k__cospi_m08_p24);
+    const __m128i s1_27_3 = _mm_madd_epi16(s1_20_1, k__cospi_m08_p24);
+    const __m128i s1_28_2 = _mm_madd_epi16(s1_19_0, k__cospi_p24_p08);
+    const __m128i s1_28_3 = _mm_madd_epi16(s1_19_1, k__cospi_p24_p08);
+    const __m128i s1_29_2 = _mm_madd_epi16(s1_18_0, k__cospi_p24_p08);
+    const __m128i s1_29_3 = _mm_madd_epi16(s1_18_1, k__cospi_p24_p08);
+    // dct_const_round_shift
+    const __m128i s1_18_4 = _mm_add_epi32(s1_18_2, k__DCT_CONST_ROUNDING);
+    const __m128i s1_18_5 = _mm_add_epi32(s1_18_3, k__DCT_CONST_ROUNDING);
+    const __m128i s1_19_4 = _mm_add_epi32(s1_19_2, k__DCT_CONST_ROUNDING);
+    const __m128i s1_19_5 = _mm_add_epi32(s1_19_3, k__DCT_CONST_ROUNDING);
+    const __m128i s1_20_4 = _mm_add_epi32(s1_20_2, k__DCT_CONST_ROUNDING);
+    const __m128i s1_20_5 = _mm_add_epi32(s1_20_3, k__DCT_CONST_ROUNDING);
+    const __m128i s1_21_4 = _mm_add_epi32(s1_21_2, k__DCT_CONST_ROUNDING);
+    const __m128i s1_21_5 = _mm_add_epi32(s1_21_3, k__DCT_CONST_ROUNDING);
+    const __m128i s1_26_4 = _mm_add_epi32(s1_26_2, k__DCT_CONST_ROUNDING);
+    const __m128i s1_26_5 = _mm_add_epi32(s1_26_3, k__DCT_CONST_ROUNDING);
+    const __m128i s1_27_4 = _mm_add_epi32(s1_27_2, k__DCT_CONST_ROUNDING);
+    const __m128i s1_27_5 = _mm_add_epi32(s1_27_3, k__DCT_CONST_ROUNDING);
+    const __m128i s1_28_4 = _mm_add_epi32(s1_28_2, k__DCT_CONST_ROUNDING);
+    const __m128i s1_28_5 = _mm_add_epi32(s1_28_3, k__DCT_CONST_ROUNDING);
+    const __m128i s1_29_4 = _mm_add_epi32(s1_29_2, k__DCT_CONST_ROUNDING);
+    const __m128i s1_29_5 = _mm_add_epi32(s1_29_3, k__DCT_CONST_ROUNDING);
+    const __m128i s1_18_6 = _mm_srai_epi32(s1_18_4, DCT_CONST_BITS);
+    const __m128i s1_18_7 = _mm_srai_epi32(s1_18_5, DCT_CONST_BITS);
+    const __m128i s1_19_6 = _mm_srai_epi32(s1_19_4, DCT_CONST_BITS);
+    const __m128i s1_19_7 = _mm_srai_epi32(s1_19_5, DCT_CONST_BITS);
+    const __m128i s1_20_6 = _mm_srai_epi32(s1_20_4, DCT_CONST_BITS);
+    const __m128i s1_20_7 = _mm_srai_epi32(s1_20_5, DCT_CONST_BITS);
+    const __m128i s1_21_6 = _mm_srai_epi32(s1_21_4, DCT_CONST_BITS);
+    const __m128i s1_21_7 = _mm_srai_epi32(s1_21_5, DCT_CONST_BITS);
+    const __m128i s1_26_6 = _mm_srai_epi32(s1_26_4, DCT_CONST_BITS);
+    const __m128i s1_26_7 = _mm_srai_epi32(s1_26_5, DCT_CONST_BITS);
+    const __m128i s1_27_6 = _mm_srai_epi32(s1_27_4, DCT_CONST_BITS);
+    const __m128i s1_27_7 = _mm_srai_epi32(s1_27_5, DCT_CONST_BITS);
+    const __m128i s1_28_6 = _mm_srai_epi32(s1_28_4, DCT_CONST_BITS);
+    const __m128i s1_28_7 = _mm_srai_epi32(s1_28_5, DCT_CONST_BITS);
+    const __m128i s1_29_6 = _mm_srai_epi32(s1_29_4, DCT_CONST_BITS);
+    const __m128i s1_29_7 = _mm_srai_epi32(s1_29_5, DCT_CONST_BITS);
+    // Combine
+    step1[18] = _mm_packs_epi32(s1_18_6, s1_18_7);
+    step1[19] = _mm_packs_epi32(s1_19_6, s1_19_7);
+    step1[20] = _mm_packs_epi32(s1_20_6, s1_20_7);
+    step1[21] = _mm_packs_epi32(s1_21_6, s1_21_7);
+    step1[26] = _mm_packs_epi32(s1_26_6, s1_26_7);
+    step1[27] = _mm_packs_epi32(s1_27_6, s1_27_7);
+    step1[28] = _mm_packs_epi32(s1_28_6, s1_28_7);
+    step1[29] = _mm_packs_epi32(s1_29_6, s1_29_7);
+  }
+  // Stage 5
+  {
+    step2[4] = _mm_add_epi16(step1[5], step3[4]);
+    step2[5] = _mm_sub_epi16(step3[4], step1[5]);
+    step2[6] = _mm_sub_epi16(step3[7], step1[6]);
+    step2[7] = _mm_add_epi16(step1[6], step3[7]);
+  }
+  {
+    const __m128i out_00_0 = _mm_unpacklo_epi16(step1[0], step1[1]);
+    const __m128i out_00_1 = _mm_unpackhi_epi16(step1[0], step1[1]);
+    const __m128i out_08_0 = _mm_unpacklo_epi16(step1[2], step1[3]);
+    const __m128i out_08_1 = _mm_unpackhi_epi16(step1[2], step1[3]);
+    const __m128i out_00_2 = _mm_madd_epi16(out_00_0, k__cospi_p16_p16);
+    const __m128i out_00_3 = _mm_madd_epi16(out_00_1, k__cospi_p16_p16);
+    const __m128i out_16_2 = _mm_madd_epi16(out_00_0, k__cospi_p16_m16);
+    const __m128i out_16_3 = _mm_madd_epi16(out_00_1, k__cospi_p16_m16);
+    const __m128i out_08_2 = _mm_madd_epi16(out_08_0, k__cospi_p24_p08);
+    const __m128i out_08_3 = _mm_madd_epi16(out_08_1, k__cospi_p24_p08);
+    const __m128i out_24_2 = _mm_madd_epi16(out_08_0, k__cospi_m08_p24);
+    const __m128i out_24_3 = _mm_madd_epi16(out_08_1, k__cospi_m08_p24);
+    // dct_const_round_shift
+    const __m128i out_00_4 = _mm_add_epi32(out_00_2, k__DCT_CONST_ROUNDING);
+    const __m128i out_00_5 = _mm_add_epi32(out_00_3, k__DCT_CONST_ROUNDING);
+    const __m128i out_16_4 = _mm_add_epi32(out_16_2, k__DCT_CONST_ROUNDING);
+    const __m128i out_16_5 = _mm_add_epi32(out_16_3, k__DCT_CONST_ROUNDING);
+    const __m128i out_08_4 = _mm_add_epi32(out_08_2, k__DCT_CONST_ROUNDING);
+    const __m128i out_08_5 = _mm_add_epi32(out_08_3, k__DCT_CONST_ROUNDING);
+    const __m128i out_24_4 = _mm_add_epi32(out_24_2, k__DCT_CONST_ROUNDING);
+    const __m128i out_24_5 = _mm_add_epi32(out_24_3, k__DCT_CONST_ROUNDING);
+    const __m128i out_00_6 = _mm_srai_epi32(out_00_4, DCT_CONST_BITS);
+    const __m128i out_00_7 = _mm_srai_epi32(out_00_5, DCT_CONST_BITS);
+    const __m128i out_16_6 = _mm_srai_epi32(out_16_4, DCT_CONST_BITS);
+    const __m128i out_16_7 = _mm_srai_epi32(out_16_5, DCT_CONST_BITS);
+    const __m128i out_08_6 = _mm_srai_epi32(out_08_4, DCT_CONST_BITS);
+    const __m128i out_08_7 = _mm_srai_epi32(out_08_5, DCT_CONST_BITS);
+    const __m128i out_24_6 = _mm_srai_epi32(out_24_4, DCT_CONST_BITS);
+    const __m128i out_24_7 = _mm_srai_epi32(out_24_5, DCT_CONST_BITS);
+    // Combine
+    out[0] = _mm_packs_epi32(out_00_6, out_00_7);
+    out[16] = _mm_packs_epi32(out_16_6, out_16_7);
+    out[8] = _mm_packs_epi32(out_08_6, out_08_7);
+    out[24] = _mm_packs_epi32(out_24_6, out_24_7);
+  }
+  {
+    const __m128i s2_09_0 = _mm_unpacklo_epi16(step1[9], step1[14]);
+    const __m128i s2_09_1 = _mm_unpackhi_epi16(step1[9], step1[14]);
+    const __m128i s2_10_0 = _mm_unpacklo_epi16(step1[10], step1[13]);
+    const __m128i s2_10_1 = _mm_unpackhi_epi16(step1[10], step1[13]);
+    const __m128i s2_09_2 = _mm_madd_epi16(s2_09_0, k__cospi_m08_p24);
+    const __m128i s2_09_3 = _mm_madd_epi16(s2_09_1, k__cospi_m08_p24);
+    const __m128i s2_10_2 = _mm_madd_epi16(s2_10_0, k__cospi_m24_m08);
+    const __m128i s2_10_3 = _mm_madd_epi16(s2_10_1, k__cospi_m24_m08);
+    const __m128i s2_13_2 = _mm_madd_epi16(s2_10_0, k__cospi_m08_p24);
+    const __m128i s2_13_3 = _mm_madd_epi16(s2_10_1, k__cospi_m08_p24);
+    const __m128i s2_14_2 = _mm_madd_epi16(s2_09_0, k__cospi_p24_p08);
+    const __m128i s2_14_3 = _mm_madd_epi16(s2_09_1, k__cospi_p24_p08);
+    // dct_const_round_shift
+    const __m128i s2_09_4 = _mm_add_epi32(s2_09_2, k__DCT_CONST_ROUNDING);
+    const __m128i s2_09_5 = _mm_add_epi32(s2_09_3, k__DCT_CONST_ROUNDING);
+    const __m128i s2_10_4 = _mm_add_epi32(s2_10_2, k__DCT_CONST_ROUNDING);
+    const __m128i s2_10_5 = _mm_add_epi32(s2_10_3, k__DCT_CONST_ROUNDING);
+    const __m128i s2_13_4 = _mm_add_epi32(s2_13_2, k__DCT_CONST_ROUNDING);
+    const __m128i s2_13_5 = _mm_add_epi32(s2_13_3, k__DCT_CONST_ROUNDING);
+    const __m128i s2_14_4 = _mm_add_epi32(s2_14_2, k__DCT_CONST_ROUNDING);
+    const __m128i s2_14_5 = _mm_add_epi32(s2_14_3, k__DCT_CONST_ROUNDING);
+    const __m128i s2_09_6 = _mm_srai_epi32(s2_09_4, DCT_CONST_BITS);
+    const __m128i s2_09_7 = _mm_srai_epi32(s2_09_5, DCT_CONST_BITS);
+    const __m128i s2_10_6 = _mm_srai_epi32(s2_10_4, DCT_CONST_BITS);
+    const __m128i s2_10_7 = _mm_srai_epi32(s2_10_5, DCT_CONST_BITS);
+    const __m128i s2_13_6 = _mm_srai_epi32(s2_13_4, DCT_CONST_BITS);
+    const __m128i s2_13_7 = _mm_srai_epi32(s2_13_5, DCT_CONST_BITS);
+    const __m128i s2_14_6 = _mm_srai_epi32(s2_14_4, DCT_CONST_BITS);
+    const __m128i s2_14_7 = _mm_srai_epi32(s2_14_5, DCT_CONST_BITS);
+    // Combine
+    step2[9] = _mm_packs_epi32(s2_09_6, s2_09_7);
+    step2[10] = _mm_packs_epi32(s2_10_6, s2_10_7);
+    step2[13] = _mm_packs_epi32(s2_13_6, s2_13_7);
+    step2[14] = _mm_packs_epi32(s2_14_6, s2_14_7);
+  }
+  {
+    step2[16] = _mm_add_epi16(step1[19], step3[16]);
+    step2[17] = _mm_add_epi16(step1[18], step3[17]);
+    step2[18] = _mm_sub_epi16(step3[17], step1[18]);
+    step2[19] = _mm_sub_epi16(step3[16], step1[19]);
+    step2[20] = _mm_sub_epi16(step3[23], step1[20]);
+    step2[21] = _mm_sub_epi16(step3[22], step1[21]);
+    step2[22] = _mm_add_epi16(step1[21], step3[22]);
+    step2[23] = _mm_add_epi16(step1[20], step3[23]);
+    step2[24] = _mm_add_epi16(step1[27], step3[24]);
+    step2[25] = _mm_add_epi16(step1[26], step3[25]);
+    step2[26] = _mm_sub_epi16(step3[25], step1[26]);
+    step2[27] = _mm_sub_epi16(step3[24], step1[27]);
+    step2[28] = _mm_sub_epi16(step3[31], step1[28]);
+    step2[29] = _mm_sub_epi16(step3[30], step1[29]);
+    step2[30] = _mm_add_epi16(step1[29], step3[30]);
+    step2[31] = _mm_add_epi16(step1[28], step3[31]);
+  }
+  // Stage 6
+  {
+    const __m128i out_04_0 = _mm_unpacklo_epi16(step2[4], step2[7]);
+    const __m128i out_04_1 = _mm_unpackhi_epi16(step2[4], step2[7]);
+    const __m128i out_20_0 = _mm_unpacklo_epi16(step2[5], step2[6]);
+    const __m128i out_20_1 = _mm_unpackhi_epi16(step2[5], step2[6]);
+    const __m128i out_12_0 = _mm_unpacklo_epi16(step2[5], step2[6]);
+    const __m128i out_12_1 = _mm_unpackhi_epi16(step2[5], step2[6]);
+    const __m128i out_28_0 = _mm_unpacklo_epi16(step2[4], step2[7]);
+    const __m128i out_28_1 = _mm_unpackhi_epi16(step2[4], step2[7]);
+    const __m128i out_04_2 = _mm_madd_epi16(out_04_0, k__cospi_p28_p04);
+    const __m128i out_04_3 = _mm_madd_epi16(out_04_1, k__cospi_p28_p04);
+    const __m128i out_20_2 = _mm_madd_epi16(out_20_0, k__cospi_p12_p20);
+    const __m128i out_20_3 = _mm_madd_epi16(out_20_1, k__cospi_p12_p20);
+    const __m128i out_12_2 = _mm_madd_epi16(out_12_0, k__cospi_m20_p12);
+    const __m128i out_12_3 = _mm_madd_epi16(out_12_1, k__cospi_m20_p12);
+    const __m128i out_28_2 = _mm_madd_epi16(out_28_0, k__cospi_m04_p28);
+    const __m128i out_28_3 = _mm_madd_epi16(out_28_1, k__cospi_m04_p28);
+    // dct_const_round_shift
+    const __m128i out_04_4 = _mm_add_epi32(out_04_2, k__DCT_CONST_ROUNDING);
+    const __m128i out_04_5 = _mm_add_epi32(out_04_3, k__DCT_CONST_ROUNDING);
+    const __m128i out_20_4 = _mm_add_epi32(out_20_2, k__DCT_CONST_ROUNDING);
+    const __m128i out_20_5 = _mm_add_epi32(out_20_3, k__DCT_CONST_ROUNDING);
+    const __m128i out_12_4 = _mm_add_epi32(out_12_2, k__DCT_CONST_ROUNDING);
+    const __m128i out_12_5 = _mm_add_epi32(out_12_3, k__DCT_CONST_ROUNDING);
+    const __m128i out_28_4 = _mm_add_epi32(out_28_2, k__DCT_CONST_ROUNDING);
+    const __m128i out_28_5 = _mm_add_epi32(out_28_3, k__DCT_CONST_ROUNDING);
+    const __m128i out_04_6 = _mm_srai_epi32(out_04_4, DCT_CONST_BITS);
+    const __m128i out_04_7 = _mm_srai_epi32(out_04_5, DCT_CONST_BITS);
+    const __m128i out_20_6 = _mm_srai_epi32(out_20_4, DCT_CONST_BITS);
+    const __m128i out_20_7 = _mm_srai_epi32(out_20_5, DCT_CONST_BITS);
+    const __m128i out_12_6 = _mm_srai_epi32(out_12_4, DCT_CONST_BITS);
+    const __m128i out_12_7 = _mm_srai_epi32(out_12_5, DCT_CONST_BITS);
+    const __m128i out_28_6 = _mm_srai_epi32(out_28_4, DCT_CONST_BITS);
+    const __m128i out_28_7 = _mm_srai_epi32(out_28_5, DCT_CONST_BITS);
+    // Combine
+    out[4] = _mm_packs_epi32(out_04_6, out_04_7);
+    out[20] = _mm_packs_epi32(out_20_6, out_20_7);
+    out[12] = _mm_packs_epi32(out_12_6, out_12_7);
+    out[28] = _mm_packs_epi32(out_28_6, out_28_7);
+  }
+  {
+    step3[8] = _mm_add_epi16(step2[9], step1[8]);
+    step3[9] = _mm_sub_epi16(step1[8], step2[9]);
+    step3[10] = _mm_sub_epi16(step1[11], step2[10]);
+    step3[11] = _mm_add_epi16(step2[10], step1[11]);
+    step3[12] = _mm_add_epi16(step2[13], step1[12]);
+    step3[13] = _mm_sub_epi16(step1[12], step2[13]);
+    step3[14] = _mm_sub_epi16(step1[15], step2[14]);
+    step3[15] = _mm_add_epi16(step2[14], step1[15]);
+  }
+  {
+    const __m128i s3_17_0 = _mm_unpacklo_epi16(step2[17], step2[30]);
+    const __m128i s3_17_1 = _mm_unpackhi_epi16(step2[17], step2[30]);
+    const __m128i s3_18_0 = _mm_unpacklo_epi16(step2[18], step2[29]);
+    const __m128i s3_18_1 = _mm_unpackhi_epi16(step2[18], step2[29]);
+    const __m128i s3_21_0 = _mm_unpacklo_epi16(step2[21], step2[26]);
+    const __m128i s3_21_1 = _mm_unpackhi_epi16(step2[21], step2[26]);
+    const __m128i s3_22_0 = _mm_unpacklo_epi16(step2[22], step2[25]);
+    const __m128i s3_22_1 = _mm_unpackhi_epi16(step2[22], step2[25]);
+    const __m128i s3_17_2 = _mm_madd_epi16(s3_17_0, k__cospi_m04_p28);
+    const __m128i s3_17_3 = _mm_madd_epi16(s3_17_1, k__cospi_m04_p28);
+    const __m128i s3_18_2 = _mm_madd_epi16(s3_18_0, k__cospi_m28_m04);
+    const __m128i s3_18_3 = _mm_madd_epi16(s3_18_1, k__cospi_m28_m04);
+    const __m128i s3_21_2 = _mm_madd_epi16(s3_21_0, k__cospi_m20_p12);
+    const __m128i s3_21_3 = _mm_madd_epi16(s3_21_1, k__cospi_m20_p12);
+    const __m128i s3_22_2 = _mm_madd_epi16(s3_22_0, k__cospi_m12_m20);
+    const __m128i s3_22_3 = _mm_madd_epi16(s3_22_1, k__cospi_m12_m20);
+    const __m128i s3_25_2 = _mm_madd_epi16(s3_22_0, k__cospi_m20_p12);
+    const __m128i s3_25_3 = _mm_madd_epi16(s3_22_1, k__cospi_m20_p12);
+    const __m128i s3_26_2 = _mm_madd_epi16(s3_21_0, k__cospi_p12_p20);
+    const __m128i s3_26_3 = _mm_madd_epi16(s3_21_1, k__cospi_p12_p20);
+    const __m128i s3_29_2 = _mm_madd_epi16(s3_18_0, k__cospi_m04_p28);
+    const __m128i s3_29_3 = _mm_madd_epi16(s3_18_1, k__cospi_m04_p28);
+    const __m128i s3_30_2 = _mm_madd_epi16(s3_17_0, k__cospi_p28_p04);
+    const __m128i s3_30_3 = _mm_madd_epi16(s3_17_1, k__cospi_p28_p04);
+    // dct_const_round_shift
+    const __m128i s3_17_4 = _mm_add_epi32(s3_17_2, k__DCT_CONST_ROUNDING);
+    const __m128i s3_17_5 = _mm_add_epi32(s3_17_3, k__DCT_CONST_ROUNDING);
+    const __m128i s3_18_4 = _mm_add_epi32(s3_18_2, k__DCT_CONST_ROUNDING);
+    const __m128i s3_18_5 = _mm_add_epi32(s3_18_3, k__DCT_CONST_ROUNDING);
+    const __m128i s3_21_4 = _mm_add_epi32(s3_21_2, k__DCT_CONST_ROUNDING);
+    const __m128i s3_21_5 = _mm_add_epi32(s3_21_3, k__DCT_CONST_ROUNDING);
+    const __m128i s3_22_4 = _mm_add_epi32(s3_22_2, k__DCT_CONST_ROUNDING);
+    const __m128i s3_22_5 = _mm_add_epi32(s3_22_3, k__DCT_CONST_ROUNDING);
+    const __m128i s3_17_6 = _mm_srai_epi32(s3_17_4, DCT_CONST_BITS);
+    const __m128i s3_17_7 = _mm_srai_epi32(s3_17_5, DCT_CONST_BITS);
+    const __m128i s3_18_6 = _mm_srai_epi32(s3_18_4, DCT_CONST_BITS);
+    const __m128i s3_18_7 = _mm_srai_epi32(s3_18_5, DCT_CONST_BITS);
+    const __m128i s3_21_6 = _mm_srai_epi32(s3_21_4, DCT_CONST_BITS);
+    const __m128i s3_21_7 = _mm_srai_epi32(s3_21_5, DCT_CONST_BITS);
+    const __m128i s3_22_6 = _mm_srai_epi32(s3_22_4, DCT_CONST_BITS);
+    const __m128i s3_22_7 = _mm_srai_epi32(s3_22_5, DCT_CONST_BITS);
+    const __m128i s3_25_4 = _mm_add_epi32(s3_25_2, k__DCT_CONST_ROUNDING);
+    const __m128i s3_25_5 = _mm_add_epi32(s3_25_3, k__DCT_CONST_ROUNDING);
+    const __m128i s3_26_4 = _mm_add_epi32(s3_26_2, k__DCT_CONST_ROUNDING);
+    const __m128i s3_26_5 = _mm_add_epi32(s3_26_3, k__DCT_CONST_ROUNDING);
+    const __m128i s3_29_4 = _mm_add_epi32(s3_29_2, k__DCT_CONST_ROUNDING);
+    const __m128i s3_29_5 = _mm_add_epi32(s3_29_3, k__DCT_CONST_ROUNDING);
+    const __m128i s3_30_4 = _mm_add_epi32(s3_30_2, k__DCT_CONST_ROUNDING);
+    const __m128i s3_30_5 = _mm_add_epi32(s3_30_3, k__DCT_CONST_ROUNDING);
+    const __m128i s3_25_6 = _mm_srai_epi32(s3_25_4, DCT_CONST_BITS);
+    const __m128i s3_25_7 = _mm_srai_epi32(s3_25_5, DCT_CONST_BITS);
+    const __m128i s3_26_6 = _mm_srai_epi32(s3_26_4, DCT_CONST_BITS);
+    const __m128i s3_26_7 = _mm_srai_epi32(s3_26_5, DCT_CONST_BITS);
+    const __m128i s3_29_6 = _mm_srai_epi32(s3_29_4, DCT_CONST_BITS);
+    const __m128i s3_29_7 = _mm_srai_epi32(s3_29_5, DCT_CONST_BITS);
+    const __m128i s3_30_6 = _mm_srai_epi32(s3_30_4, DCT_CONST_BITS);
+    const __m128i s3_30_7 = _mm_srai_epi32(s3_30_5, DCT_CONST_BITS);
+    // Combine
+    step3[17] = _mm_packs_epi32(s3_17_6, s3_17_7);
+    step3[18] = _mm_packs_epi32(s3_18_6, s3_18_7);
+    step3[21] = _mm_packs_epi32(s3_21_6, s3_21_7);
+    step3[22] = _mm_packs_epi32(s3_22_6, s3_22_7);
+    // Combine
+    step3[25] = _mm_packs_epi32(s3_25_6, s3_25_7);
+    step3[26] = _mm_packs_epi32(s3_26_6, s3_26_7);
+    step3[29] = _mm_packs_epi32(s3_29_6, s3_29_7);
+    step3[30] = _mm_packs_epi32(s3_30_6, s3_30_7);
+  }
+  // Stage 7
+  {
+    const __m128i out_02_0 = _mm_unpacklo_epi16(step3[8], step3[15]);
+    const __m128i out_02_1 = _mm_unpackhi_epi16(step3[8], step3[15]);
+    const __m128i out_18_0 = _mm_unpacklo_epi16(step3[9], step3[14]);
+    const __m128i out_18_1 = _mm_unpackhi_epi16(step3[9], step3[14]);
+    const __m128i out_10_0 = _mm_unpacklo_epi16(step3[10], step3[13]);
+    const __m128i out_10_1 = _mm_unpackhi_epi16(step3[10], step3[13]);
+    const __m128i out_26_0 = _mm_unpacklo_epi16(step3[11], step3[12]);
+    const __m128i out_26_1 = _mm_unpackhi_epi16(step3[11], step3[12]);
+    const __m128i out_02_2 = _mm_madd_epi16(out_02_0, k__cospi_p30_p02);
+    const __m128i out_02_3 = _mm_madd_epi16(out_02_1, k__cospi_p30_p02);
+    const __m128i out_18_2 = _mm_madd_epi16(out_18_0, k__cospi_p14_p18);
+    const __m128i out_18_3 = _mm_madd_epi16(out_18_1, k__cospi_p14_p18);
+    const __m128i out_10_2 = _mm_madd_epi16(out_10_0, k__cospi_p22_p10);
+    const __m128i out_10_3 = _mm_madd_epi16(out_10_1, k__cospi_p22_p10);
+    const __m128i out_26_2 = _mm_madd_epi16(out_26_0, k__cospi_p06_p26);
+    const __m128i out_26_3 = _mm_madd_epi16(out_26_1, k__cospi_p06_p26);
+    const __m128i out_06_2 = _mm_madd_epi16(out_26_0, k__cospi_m26_p06);
+    const __m128i out_06_3 = _mm_madd_epi16(out_26_1, k__cospi_m26_p06);
+    const __m128i out_22_2 = _mm_madd_epi16(out_10_0, k__cospi_m10_p22);
+    const __m128i out_22_3 = _mm_madd_epi16(out_10_1, k__cospi_m10_p22);
+    const __m128i out_14_2 = _mm_madd_epi16(out_18_0, k__cospi_m18_p14);
+    const __m128i out_14_3 = _mm_madd_epi16(out_18_1, k__cospi_m18_p14);
+    const __m128i out_30_2 = _mm_madd_epi16(out_02_0, k__cospi_m02_p30);
+    const __m128i out_30_3 = _mm_madd_epi16(out_02_1, k__cospi_m02_p30);
+    // dct_const_round_shift
+    const __m128i out_02_4 = _mm_add_epi32(out_02_2, k__DCT_CONST_ROUNDING);
+    const __m128i out_02_5 = _mm_add_epi32(out_02_3, k__DCT_CONST_ROUNDING);
+    const __m128i out_18_4 = _mm_add_epi32(out_18_2, k__DCT_CONST_ROUNDING);
+    const __m128i out_18_5 = _mm_add_epi32(out_18_3, k__DCT_CONST_ROUNDING);
+    const __m128i out_10_4 = _mm_add_epi32(out_10_2, k__DCT_CONST_ROUNDING);
+    const __m128i out_10_5 = _mm_add_epi32(out_10_3, k__DCT_CONST_ROUNDING);
+    const __m128i out_26_4 = _mm_add_epi32(out_26_2, k__DCT_CONST_ROUNDING);
+    const __m128i out_26_5 = _mm_add_epi32(out_26_3, k__DCT_CONST_ROUNDING);
+    const __m128i out_06_4 = _mm_add_epi32(out_06_2, k__DCT_CONST_ROUNDING);
+    const __m128i out_06_5 = _mm_add_epi32(out_06_3, k__DCT_CONST_ROUNDING);
+    const __m128i out_22_4 = _mm_add_epi32(out_22_2, k__DCT_CONST_ROUNDING);
+    const __m128i out_22_5 = _mm_add_epi32(out_22_3, k__DCT_CONST_ROUNDING);
+    const __m128i out_14_4 = _mm_add_epi32(out_14_2, k__DCT_CONST_ROUNDING);
+    const __m128i out_14_5 = _mm_add_epi32(out_14_3, k__DCT_CONST_ROUNDING);
+    const __m128i out_30_4 = _mm_add_epi32(out_30_2, k__DCT_CONST_ROUNDING);
+    const __m128i out_30_5 = _mm_add_epi32(out_30_3, k__DCT_CONST_ROUNDING);
+    const __m128i out_02_6 = _mm_srai_epi32(out_02_4, DCT_CONST_BITS);
+    const __m128i out_02_7 = _mm_srai_epi32(out_02_5, DCT_CONST_BITS);
+    const __m128i out_18_6 = _mm_srai_epi32(out_18_4, DCT_CONST_BITS);
+    const __m128i out_18_7 = _mm_srai_epi32(out_18_5, DCT_CONST_BITS);
+    const __m128i out_10_6 = _mm_srai_epi32(out_10_4, DCT_CONST_BITS);
+    const __m128i out_10_7 = _mm_srai_epi32(out_10_5, DCT_CONST_BITS);
+    const __m128i out_26_6 = _mm_srai_epi32(out_26_4, DCT_CONST_BITS);
+    const __m128i out_26_7 = _mm_srai_epi32(out_26_5, DCT_CONST_BITS);
+    const __m128i out_06_6 = _mm_srai_epi32(out_06_4, DCT_CONST_BITS);
+    const __m128i out_06_7 = _mm_srai_epi32(out_06_5, DCT_CONST_BITS);
+    const __m128i out_22_6 = _mm_srai_epi32(out_22_4, DCT_CONST_BITS);
+    const __m128i out_22_7 = _mm_srai_epi32(out_22_5, DCT_CONST_BITS);
+    const __m128i out_14_6 = _mm_srai_epi32(out_14_4, DCT_CONST_BITS);
+    const __m128i out_14_7 = _mm_srai_epi32(out_14_5, DCT_CONST_BITS);
+    const __m128i out_30_6 = _mm_srai_epi32(out_30_4, DCT_CONST_BITS);
+    const __m128i out_30_7 = _mm_srai_epi32(out_30_5, DCT_CONST_BITS);
+    // Combine
+    out[2] = _mm_packs_epi32(out_02_6, out_02_7);
+    out[18] = _mm_packs_epi32(out_18_6, out_18_7);
+    out[10] = _mm_packs_epi32(out_10_6, out_10_7);
+    out[26] = _mm_packs_epi32(out_26_6, out_26_7);
+    out[6] = _mm_packs_epi32(out_06_6, out_06_7);
+    out[22] = _mm_packs_epi32(out_22_6, out_22_7);
+    out[14] = _mm_packs_epi32(out_14_6, out_14_7);
+    out[30] = _mm_packs_epi32(out_30_6, out_30_7);
+  }
+  {
+    step1[16] = _mm_add_epi16(step3[17], step2[16]);
+    step1[17] = _mm_sub_epi16(step2[16], step3[17]);
+    step1[18] = _mm_sub_epi16(step2[19], step3[18]);
+    step1[19] = _mm_add_epi16(step3[18], step2[19]);
+    step1[20] = _mm_add_epi16(step3[21], step2[20]);
+    step1[21] = _mm_sub_epi16(step2[20], step3[21]);
+    step1[22] = _mm_sub_epi16(step2[23], step3[22]);
+    step1[23] = _mm_add_epi16(step3[22], step2[23]);
+    step1[24] = _mm_add_epi16(step3[25], step2[24]);
+    step1[25] = _mm_sub_epi16(step2[24], step3[25]);
+    step1[26] = _mm_sub_epi16(step2[27], step3[26]);
+    step1[27] = _mm_add_epi16(step3[26], step2[27]);
+    step1[28] = _mm_add_epi16(step3[29], step2[28]);
+    step1[29] = _mm_sub_epi16(step2[28], step3[29]);
+    step1[30] = _mm_sub_epi16(step2[31], step3[30]);
+    step1[31] = _mm_add_epi16(step3[30], step2[31]);
+  }
+  // Final stage --- outputs indices are bit-reversed.
+  {
+    const __m128i out_01_0 = _mm_unpacklo_epi16(step1[16], step1[31]);
+    const __m128i out_01_1 = _mm_unpackhi_epi16(step1[16], step1[31]);
+    const __m128i out_17_0 = _mm_unpacklo_epi16(step1[17], step1[30]);
+    const __m128i out_17_1 = _mm_unpackhi_epi16(step1[17], step1[30]);
+    const __m128i out_09_0 = _mm_unpacklo_epi16(step1[18], step1[29]);
+    const __m128i out_09_1 = _mm_unpackhi_epi16(step1[18], step1[29]);
+    const __m128i out_25_0 = _mm_unpacklo_epi16(step1[19], step1[28]);
+    const __m128i out_25_1 = _mm_unpackhi_epi16(step1[19], step1[28]);
+    const __m128i out_01_2 = _mm_madd_epi16(out_01_0, k__cospi_p31_p01);
+    const __m128i out_01_3 = _mm_madd_epi16(out_01_1, k__cospi_p31_p01);
+    const __m128i out_17_2 = _mm_madd_epi16(out_17_0, k__cospi_p15_p17);
+    const __m128i out_17_3 = _mm_madd_epi16(out_17_1, k__cospi_p15_p17);
+    const __m128i out_09_2 = _mm_madd_epi16(out_09_0, k__cospi_p23_p09);
+    const __m128i out_09_3 = _mm_madd_epi16(out_09_1, k__cospi_p23_p09);
+    const __m128i out_25_2 = _mm_madd_epi16(out_25_0, k__cospi_p07_p25);
+    const __m128i out_25_3 = _mm_madd_epi16(out_25_1, k__cospi_p07_p25);
+    const __m128i out_07_2 = _mm_madd_epi16(out_25_0, k__cospi_m25_p07);
+    const __m128i out_07_3 = _mm_madd_epi16(out_25_1, k__cospi_m25_p07);
+    const __m128i out_23_2 = _mm_madd_epi16(out_09_0, k__cospi_m09_p23);
+    const __m128i out_23_3 = _mm_madd_epi16(out_09_1, k__cospi_m09_p23);
+    const __m128i out_15_2 = _mm_madd_epi16(out_17_0, k__cospi_m17_p15);
+    const __m128i out_15_3 = _mm_madd_epi16(out_17_1, k__cospi_m17_p15);
+    const __m128i out_31_2 = _mm_madd_epi16(out_01_0, k__cospi_m01_p31);
+    const __m128i out_31_3 = _mm_madd_epi16(out_01_1, k__cospi_m01_p31);
+    // dct_const_round_shift
+    const __m128i out_01_4 = _mm_add_epi32(out_01_2, k__DCT_CONST_ROUNDING);
+    const __m128i out_01_5 = _mm_add_epi32(out_01_3, k__DCT_CONST_ROUNDING);
+    const __m128i out_17_4 = _mm_add_epi32(out_17_2, k__DCT_CONST_ROUNDING);
+    const __m128i out_17_5 = _mm_add_epi32(out_17_3, k__DCT_CONST_ROUNDING);
+    const __m128i out_09_4 = _mm_add_epi32(out_09_2, k__DCT_CONST_ROUNDING);
+    const __m128i out_09_5 = _mm_add_epi32(out_09_3, k__DCT_CONST_ROUNDING);
+    const __m128i out_25_4 = _mm_add_epi32(out_25_2, k__DCT_CONST_ROUNDING);
+    const __m128i out_25_5 = _mm_add_epi32(out_25_3, k__DCT_CONST_ROUNDING);
+    const __m128i out_07_4 = _mm_add_epi32(out_07_2, k__DCT_CONST_ROUNDING);
+    const __m128i out_07_5 = _mm_add_epi32(out_07_3, k__DCT_CONST_ROUNDING);
+    const __m128i out_23_4 = _mm_add_epi32(out_23_2, k__DCT_CONST_ROUNDING);
+    const __m128i out_23_5 = _mm_add_epi32(out_23_3, k__DCT_CONST_ROUNDING);
+    const __m128i out_15_4 = _mm_add_epi32(out_15_2, k__DCT_CONST_ROUNDING);
+    const __m128i out_15_5 = _mm_add_epi32(out_15_3, k__DCT_CONST_ROUNDING);
+    const __m128i out_31_4 = _mm_add_epi32(out_31_2, k__DCT_CONST_ROUNDING);
+    const __m128i out_31_5 = _mm_add_epi32(out_31_3, k__DCT_CONST_ROUNDING);
+    const __m128i out_01_6 = _mm_srai_epi32(out_01_4, DCT_CONST_BITS);
+    const __m128i out_01_7 = _mm_srai_epi32(out_01_5, DCT_CONST_BITS);
+    const __m128i out_17_6 = _mm_srai_epi32(out_17_4, DCT_CONST_BITS);
+    const __m128i out_17_7 = _mm_srai_epi32(out_17_5, DCT_CONST_BITS);
+    const __m128i out_09_6 = _mm_srai_epi32(out_09_4, DCT_CONST_BITS);
+    const __m128i out_09_7 = _mm_srai_epi32(out_09_5, DCT_CONST_BITS);
+    const __m128i out_25_6 = _mm_srai_epi32(out_25_4, DCT_CONST_BITS);
+    const __m128i out_25_7 = _mm_srai_epi32(out_25_5, DCT_CONST_BITS);
+    const __m128i out_07_6 = _mm_srai_epi32(out_07_4, DCT_CONST_BITS);
+    const __m128i out_07_7 = _mm_srai_epi32(out_07_5, DCT_CONST_BITS);
+    const __m128i out_23_6 = _mm_srai_epi32(out_23_4, DCT_CONST_BITS);
+    const __m128i out_23_7 = _mm_srai_epi32(out_23_5, DCT_CONST_BITS);
+    const __m128i out_15_6 = _mm_srai_epi32(out_15_4, DCT_CONST_BITS);
+    const __m128i out_15_7 = _mm_srai_epi32(out_15_5, DCT_CONST_BITS);
+    const __m128i out_31_6 = _mm_srai_epi32(out_31_4, DCT_CONST_BITS);
+    const __m128i out_31_7 = _mm_srai_epi32(out_31_5, DCT_CONST_BITS);
+    // Combine
+    out[1] = _mm_packs_epi32(out_01_6, out_01_7);
+    out[17] = _mm_packs_epi32(out_17_6, out_17_7);
+    out[9] = _mm_packs_epi32(out_09_6, out_09_7);
+    out[25] = _mm_packs_epi32(out_25_6, out_25_7);
+    out[7] = _mm_packs_epi32(out_07_6, out_07_7);
+    out[23] = _mm_packs_epi32(out_23_6, out_23_7);
+    out[15] = _mm_packs_epi32(out_15_6, out_15_7);
+    out[31] = _mm_packs_epi32(out_31_6, out_31_7);
+  }
+  {
+    const __m128i out_05_0 = _mm_unpacklo_epi16(step1[20], step1[27]);
+    const __m128i out_05_1 = _mm_unpackhi_epi16(step1[20], step1[27]);
+    const __m128i out_21_0 = _mm_unpacklo_epi16(step1[21], step1[26]);
+    const __m128i out_21_1 = _mm_unpackhi_epi16(step1[21], step1[26]);
+    const __m128i out_13_0 = _mm_unpacklo_epi16(step1[22], step1[25]);
+    const __m128i out_13_1 = _mm_unpackhi_epi16(step1[22], step1[25]);
+    const __m128i out_29_0 = _mm_unpacklo_epi16(step1[23], step1[24]);
+    const __m128i out_29_1 = _mm_unpackhi_epi16(step1[23], step1[24]);
+    const __m128i out_05_2 = _mm_madd_epi16(out_05_0, k__cospi_p27_p05);
+    const __m128i out_05_3 = _mm_madd_epi16(out_05_1, k__cospi_p27_p05);
+    const __m128i out_21_2 = _mm_madd_epi16(out_21_0, k__cospi_p11_p21);
+    const __m128i out_21_3 = _mm_madd_epi16(out_21_1, k__cospi_p11_p21);
+    const __m128i out_13_2 = _mm_madd_epi16(out_13_0, k__cospi_p19_p13);
+    const __m128i out_13_3 = _mm_madd_epi16(out_13_1, k__cospi_p19_p13);
+    const __m128i out_29_2 = _mm_madd_epi16(out_29_0, k__cospi_p03_p29);
+    const __m128i out_29_3 = _mm_madd_epi16(out_29_1, k__cospi_p03_p29);
+    const __m128i out_03_2 = _mm_madd_epi16(out_29_0, k__cospi_m29_p03);
+    const __m128i out_03_3 = _mm_madd_epi16(out_29_1, k__cospi_m29_p03);
+    const __m128i out_19_2 = _mm_madd_epi16(out_13_0, k__cospi_m13_p19);
+    const __m128i out_19_3 = _mm_madd_epi16(out_13_1, k__cospi_m13_p19);
+    const __m128i out_11_2 = _mm_madd_epi16(out_21_0, k__cospi_m21_p11);
+    const __m128i out_11_3 = _mm_madd_epi16(out_21_1, k__cospi_m21_p11);
+    const __m128i out_27_2 = _mm_madd_epi16(out_05_0, k__cospi_m05_p27);
+    const __m128i out_27_3 = _mm_madd_epi16(out_05_1, k__cospi_m05_p27);
+    // dct_const_round_shift
+    const __m128i out_05_4 = _mm_add_epi32(out_05_2, k__DCT_CONST_ROUNDING);
+    const __m128i out_05_5 = _mm_add_epi32(out_05_3, k__DCT_CONST_ROUNDING);
+    const __m128i out_21_4 = _mm_add_epi32(out_21_2, k__DCT_CONST_ROUNDING);
+    const __m128i out_21_5 = _mm_add_epi32(out_21_3, k__DCT_CONST_ROUNDING);
+    const __m128i out_13_4 = _mm_add_epi32(out_13_2, k__DCT_CONST_ROUNDING);
+    const __m128i out_13_5 = _mm_add_epi32(out_13_3, k__DCT_CONST_ROUNDING);
+    const __m128i out_29_4 = _mm_add_epi32(out_29_2, k__DCT_CONST_ROUNDING);
+    const __m128i out_29_5 = _mm_add_epi32(out_29_3, k__DCT_CONST_ROUNDING);
+    const __m128i out_03_4 = _mm_add_epi32(out_03_2, k__DCT_CONST_ROUNDING);
+    const __m128i out_03_5 = _mm_add_epi32(out_03_3, k__DCT_CONST_ROUNDING);
+    const __m128i out_19_4 = _mm_add_epi32(out_19_2, k__DCT_CONST_ROUNDING);
+    const __m128i out_19_5 = _mm_add_epi32(out_19_3, k__DCT_CONST_ROUNDING);
+    const __m128i out_11_4 = _mm_add_epi32(out_11_2, k__DCT_CONST_ROUNDING);
+    const __m128i out_11_5 = _mm_add_epi32(out_11_3, k__DCT_CONST_ROUNDING);
+    const __m128i out_27_4 = _mm_add_epi32(out_27_2, k__DCT_CONST_ROUNDING);
+    const __m128i out_27_5 = _mm_add_epi32(out_27_3, k__DCT_CONST_ROUNDING);
+    const __m128i out_05_6 = _mm_srai_epi32(out_05_4, DCT_CONST_BITS);
+    const __m128i out_05_7 = _mm_srai_epi32(out_05_5, DCT_CONST_BITS);
+    const __m128i out_21_6 = _mm_srai_epi32(out_21_4, DCT_CONST_BITS);
+    const __m128i out_21_7 = _mm_srai_epi32(out_21_5, DCT_CONST_BITS);
+    const __m128i out_13_6 = _mm_srai_epi32(out_13_4, DCT_CONST_BITS);
+    const __m128i out_13_7 = _mm_srai_epi32(out_13_5, DCT_CONST_BITS);
+    const __m128i out_29_6 = _mm_srai_epi32(out_29_4, DCT_CONST_BITS);
+    const __m128i out_29_7 = _mm_srai_epi32(out_29_5, DCT_CONST_BITS);
+    const __m128i out_03_6 = _mm_srai_epi32(out_03_4, DCT_CONST_BITS);
+    const __m128i out_03_7 = _mm_srai_epi32(out_03_5, DCT_CONST_BITS);
+    const __m128i out_19_6 = _mm_srai_epi32(out_19_4, DCT_CONST_BITS);
+    const __m128i out_19_7 = _mm_srai_epi32(out_19_5, DCT_CONST_BITS);
+    const __m128i out_11_6 = _mm_srai_epi32(out_11_4, DCT_CONST_BITS);
+    const __m128i out_11_7 = _mm_srai_epi32(out_11_5, DCT_CONST_BITS);
+    const __m128i out_27_6 = _mm_srai_epi32(out_27_4, DCT_CONST_BITS);
+    const __m128i out_27_7 = _mm_srai_epi32(out_27_5, DCT_CONST_BITS);
+    // Combine
+    out[5] = _mm_packs_epi32(out_05_6, out_05_7);
+    out[21] = _mm_packs_epi32(out_21_6, out_21_7);
+    out[13] = _mm_packs_epi32(out_13_6, out_13_7);
+    out[29] = _mm_packs_epi32(out_29_6, out_29_7);
+    out[3] = _mm_packs_epi32(out_03_6, out_03_7);
+    out[19] = _mm_packs_epi32(out_19_6, out_19_7);
+    out[11] = _mm_packs_epi32(out_11_6, out_11_7);
+    out[27] = _mm_packs_epi32(out_27_6, out_27_7);
+  }
+
+  // Output results
+  {
+    int j;
+    for (j = 0; j < 16; ++j) {
+      _mm_storeu_si128((__m128i *)(in0 + j), out[j]);
+      _mm_storeu_si128((__m128i *)(in1 + j), out[j + 16]);
+    }
+  }
+}  // NOLINT
index 4b243ba3f2fae72438aec279de0d008194dcec4c..326158452658fbf48994a643187672cdf7f12be7 100644 (file)
@@ -365,6 +365,8 @@ static INLINE void transpose_and_output8x8(
   }
 }
 
+void fdct32_8col(__m128i *in0, __m128i *in1);
+
 #ifdef __cplusplus
 }  // extern "C"
 #endif
index 78fd1ef026e45ad3bf07620030426367ef195ce7..61f548a51ee1e4116ef6b3525a55db7ced7b4053 100644 (file)
@@ -2669,28 +2669,28 @@ void aom_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest,
     stp1_31 = stp2_31;                                                         \
   }
 
-#define IDCT32                                                                 \
+#define IDCT32(in0, in1)                                                       \
   /* Stage1 */                                                                 \
   {                                                                            \
-    const __m128i lo_1_31 = _mm_unpacklo_epi16(in[1], in[31]);                 \
-    const __m128i hi_1_31 = _mm_unpackhi_epi16(in[1], in[31]);                 \
-    const __m128i lo_17_15 = _mm_unpacklo_epi16(in[17], in[15]);               \
-    const __m128i hi_17_15 = _mm_unpackhi_epi16(in[17], in[15]);               \
-                                                                               \
-    const __m128i lo_9_23 = _mm_unpacklo_epi16(in[9], in[23]);                 \
-    const __m128i hi_9_23 = _mm_unpackhi_epi16(in[9], in[23]);                 \
-    const __m128i lo_25_7 = _mm_unpacklo_epi16(in[25], in[7]);                 \
-    const __m128i hi_25_7 = _mm_unpackhi_epi16(in[25], in[7]);                 \
-                                                                               \
-    const __m128i lo_5_27 = _mm_unpacklo_epi16(in[5], in[27]);                 \
-    const __m128i hi_5_27 = _mm_unpackhi_epi16(in[5], in[27]);                 \
-    const __m128i lo_21_11 = _mm_unpacklo_epi16(in[21], in[11]);               \
-    const __m128i hi_21_11 = _mm_unpackhi_epi16(in[21], in[11]);               \
-                                                                               \
-    const __m128i lo_13_19 = _mm_unpacklo_epi16(in[13], in[19]);               \
-    const __m128i hi_13_19 = _mm_unpackhi_epi16(in[13], in[19]);               \
-    const __m128i lo_29_3 = _mm_unpacklo_epi16(in[29], in[3]);                 \
-    const __m128i hi_29_3 = _mm_unpackhi_epi16(in[29], in[3]);                 \
+    const __m128i lo_1_31 = _mm_unpacklo_epi16((in0)[1], (in1)[15]);           \
+    const __m128i hi_1_31 = _mm_unpackhi_epi16((in0)[1], (in1)[15]);           \
+    const __m128i lo_17_15 = _mm_unpacklo_epi16((in1)[1], (in0)[15]);          \
+    const __m128i hi_17_15 = _mm_unpackhi_epi16((in1)[1], (in0)[15]);          \
+                                                                               \
+    const __m128i lo_9_23 = _mm_unpacklo_epi16((in0)[9], (in1)[7]);            \
+    const __m128i hi_9_23 = _mm_unpackhi_epi16((in0)[9], (in1)[7]);            \
+    const __m128i lo_25_7 = _mm_unpacklo_epi16((in1)[9], (in0)[7]);            \
+    const __m128i hi_25_7 = _mm_unpackhi_epi16((in1)[9], (in0)[7]);            \
+                                                                               \
+    const __m128i lo_5_27 = _mm_unpacklo_epi16((in0)[5], (in1)[11]);           \
+    const __m128i hi_5_27 = _mm_unpackhi_epi16((in0)[5], (in1)[11]);           \
+    const __m128i lo_21_11 = _mm_unpacklo_epi16((in1)[5], (in0)[11]);          \
+    const __m128i hi_21_11 = _mm_unpackhi_epi16((in1)[5], (in0)[11]);          \
+                                                                               \
+    const __m128i lo_13_19 = _mm_unpacklo_epi16((in0)[13], (in1)[3]);          \
+    const __m128i hi_13_19 = _mm_unpackhi_epi16((in0)[13], (in1)[3]);          \
+    const __m128i lo_29_3 = _mm_unpacklo_epi16((in1)[13], (in0)[3]);           \
+    const __m128i hi_29_3 = _mm_unpackhi_epi16((in1)[13], (in0)[3]);           \
                                                                                \
     MULTIPLICATION_AND_ADD(lo_1_31, hi_1_31, lo_17_15, hi_17_15, stg1_0,       \
                            stg1_1, stg1_2, stg1_3, stp1_16, stp1_31, stp1_17,  \
@@ -2707,15 +2707,15 @@ void aom_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest,
                                                                                \
   /* Stage2 */                                                                 \
   {                                                                            \
-    const __m128i lo_2_30 = _mm_unpacklo_epi16(in[2], in[30]);                 \
-    const __m128i hi_2_30 = _mm_unpackhi_epi16(in[2], in[30]);                 \
-    const __m128i lo_18_14 = _mm_unpacklo_epi16(in[18], in[14]);               \
-    const __m128i hi_18_14 = _mm_unpackhi_epi16(in[18], in[14]);               \
+    const __m128i lo_2_30 = _mm_unpacklo_epi16((in0)[2], (in1)[14]);           \
+    const __m128i hi_2_30 = _mm_unpackhi_epi16((in0)[2], (in1)[14]);           \
+    const __m128i lo_18_14 = _mm_unpacklo_epi16((in1)[2], (in0)[14]);          \
+    const __m128i hi_18_14 = _mm_unpackhi_epi16((in1)[2], (in0)[14]);          \
                                                                                \
-    const __m128i lo_10_22 = _mm_unpacklo_epi16(in[10], in[22]);               \
-    const __m128i hi_10_22 = _mm_unpackhi_epi16(in[10], in[22]);               \
-    const __m128i lo_26_6 = _mm_unpacklo_epi16(in[26], in[6]);                 \
-    const __m128i hi_26_6 = _mm_unpackhi_epi16(in[26], in[6]);                 \
+    const __m128i lo_10_22 = _mm_unpacklo_epi16((in0)[10], (in1)[6]);          \
+    const __m128i hi_10_22 = _mm_unpackhi_epi16((in0)[10], (in1)[6]);          \
+    const __m128i lo_26_6 = _mm_unpacklo_epi16((in1)[10], (in0)[6]);           \
+    const __m128i hi_26_6 = _mm_unpackhi_epi16((in1)[10], (in0)[6]);           \
                                                                                \
     MULTIPLICATION_AND_ADD(lo_2_30, hi_2_30, lo_18_14, hi_18_14, stg2_0,       \
                            stg2_1, stg2_2, stg2_3, stp2_8, stp2_15, stp2_9,    \
@@ -2747,10 +2747,10 @@ void aom_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest,
                                                                                \
   /* Stage3 */                                                                 \
   {                                                                            \
-    const __m128i lo_4_28 = _mm_unpacklo_epi16(in[4], in[28]);                 \
-    const __m128i hi_4_28 = _mm_unpackhi_epi16(in[4], in[28]);                 \
-    const __m128i lo_20_12 = _mm_unpacklo_epi16(in[20], in[12]);               \
-    const __m128i hi_20_12 = _mm_unpackhi_epi16(in[20], in[12]);               \
+    const __m128i lo_4_28 = _mm_unpacklo_epi16((in0)[4], (in1)[12]);           \
+    const __m128i hi_4_28 = _mm_unpackhi_epi16((in0)[4], (in1)[12]);           \
+    const __m128i lo_20_12 = _mm_unpacklo_epi16((in1)[4], (in0)[12]);          \
+    const __m128i hi_20_12 = _mm_unpackhi_epi16((in1)[4], (in0)[12]);          \
                                                                                \
     const __m128i lo_17_30 = _mm_unpacklo_epi16(stp2_17, stp2_30);             \
     const __m128i hi_17_30 = _mm_unpackhi_epi16(stp2_17, stp2_30);             \
@@ -2794,10 +2794,10 @@ void aom_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest,
                                                                                \
   /* Stage4 */                                                                 \
   {                                                                            \
-    const __m128i lo_0_16 = _mm_unpacklo_epi16(in[0], in[16]);                 \
-    const __m128i hi_0_16 = _mm_unpackhi_epi16(in[0], in[16]);                 \
-    const __m128i lo_8_24 = _mm_unpacklo_epi16(in[8], in[24]);                 \
-    const __m128i hi_8_24 = _mm_unpackhi_epi16(in[8], in[24]);                 \
+    const __m128i lo_0_16 = _mm_unpacklo_epi16((in0)[0], (in1)[0]);            \
+    const __m128i hi_0_16 = _mm_unpackhi_epi16((in0)[0], (in1)[0]);            \
+    const __m128i lo_8_24 = _mm_unpacklo_epi16((in0)[8], (in1)[8]);            \
+    const __m128i hi_8_24 = _mm_unpackhi_epi16((in0)[8], (in1)[8]);            \
                                                                                \
     const __m128i lo_9_14 = _mm_unpacklo_epi16(stp1_9, stp1_14);               \
     const __m128i hi_9_14 = _mm_unpackhi_epi16(stp1_9, stp1_14);               \
@@ -3338,7 +3338,7 @@ void aom_idct32x32_1024_add_sse2(const tran_low_t *input, uint8_t *dest,
     array_transpose_8x8(in + 16, in + 16);
     array_transpose_8x8(in + 24, in + 24);
 
-    IDCT32
+    IDCT32(in, in + 16)
 
     // 1_D: Store 32 intermediate results for each 8x32 block.
     col[i32 + 0] = _mm_add_epi16(stp1_0, stp1_31);
@@ -3384,7 +3384,7 @@ void aom_idct32x32_1024_add_sse2(const tran_low_t *input, uint8_t *dest,
     array_transpose_8x8(col + j + 64, in + 16);
     array_transpose_8x8(col + j + 96, in + 24);
 
-    IDCT32
+    IDCT32(in, in + 16)
 
     // 2_D: Calculate the results and store them to destination.
     in[0] = _mm_add_epi16(stp1_0, stp1_31);
@@ -3451,6 +3451,107 @@ void aom_idct32x32_1_add_sse2(const tran_low_t *input, uint8_t *dest,
   }
 }
 
+// Apply a 32-element IDCT to 8 columns. This does not do any transposition
+// of its input - the caller is expected to have done that.
+// The input buffers are the top and bottom halves of an 8x32 block.
+void idct32_8col(__m128i *in0, __m128i *in1) {
+  const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
+
+  // idct constants for each stage
+  const __m128i stg1_0 = pair_set_epi16(cospi_31_64, -cospi_1_64);
+  const __m128i stg1_1 = pair_set_epi16(cospi_1_64, cospi_31_64);
+  const __m128i stg1_2 = pair_set_epi16(cospi_15_64, -cospi_17_64);
+  const __m128i stg1_3 = pair_set_epi16(cospi_17_64, cospi_15_64);
+  const __m128i stg1_4 = pair_set_epi16(cospi_23_64, -cospi_9_64);
+  const __m128i stg1_5 = pair_set_epi16(cospi_9_64, cospi_23_64);
+  const __m128i stg1_6 = pair_set_epi16(cospi_7_64, -cospi_25_64);
+  const __m128i stg1_7 = pair_set_epi16(cospi_25_64, cospi_7_64);
+  const __m128i stg1_8 = pair_set_epi16(cospi_27_64, -cospi_5_64);
+  const __m128i stg1_9 = pair_set_epi16(cospi_5_64, cospi_27_64);
+  const __m128i stg1_10 = pair_set_epi16(cospi_11_64, -cospi_21_64);
+  const __m128i stg1_11 = pair_set_epi16(cospi_21_64, cospi_11_64);
+  const __m128i stg1_12 = pair_set_epi16(cospi_19_64, -cospi_13_64);
+  const __m128i stg1_13 = pair_set_epi16(cospi_13_64, cospi_19_64);
+  const __m128i stg1_14 = pair_set_epi16(cospi_3_64, -cospi_29_64);
+  const __m128i stg1_15 = pair_set_epi16(cospi_29_64, cospi_3_64);
+
+  const __m128i stg2_0 = pair_set_epi16(cospi_30_64, -cospi_2_64);
+  const __m128i stg2_1 = pair_set_epi16(cospi_2_64, cospi_30_64);
+  const __m128i stg2_2 = pair_set_epi16(cospi_14_64, -cospi_18_64);
+  const __m128i stg2_3 = pair_set_epi16(cospi_18_64, cospi_14_64);
+  const __m128i stg2_4 = pair_set_epi16(cospi_22_64, -cospi_10_64);
+  const __m128i stg2_5 = pair_set_epi16(cospi_10_64, cospi_22_64);
+  const __m128i stg2_6 = pair_set_epi16(cospi_6_64, -cospi_26_64);
+  const __m128i stg2_7 = pair_set_epi16(cospi_26_64, cospi_6_64);
+
+  const __m128i stg3_0 = pair_set_epi16(cospi_28_64, -cospi_4_64);
+  const __m128i stg3_1 = pair_set_epi16(cospi_4_64, cospi_28_64);
+  const __m128i stg3_2 = pair_set_epi16(cospi_12_64, -cospi_20_64);
+  const __m128i stg3_3 = pair_set_epi16(cospi_20_64, cospi_12_64);
+  const __m128i stg3_4 = pair_set_epi16(-cospi_4_64, cospi_28_64);
+  const __m128i stg3_5 = pair_set_epi16(cospi_28_64, cospi_4_64);
+  const __m128i stg3_6 = pair_set_epi16(-cospi_28_64, -cospi_4_64);
+  const __m128i stg3_8 = pair_set_epi16(-cospi_20_64, cospi_12_64);
+  const __m128i stg3_9 = pair_set_epi16(cospi_12_64, cospi_20_64);
+  const __m128i stg3_10 = pair_set_epi16(-cospi_12_64, -cospi_20_64);
+
+  const __m128i stg4_0 = pair_set_epi16(cospi_16_64, cospi_16_64);
+  const __m128i stg4_1 = pair_set_epi16(cospi_16_64, -cospi_16_64);
+  const __m128i stg4_2 = pair_set_epi16(cospi_24_64, -cospi_8_64);
+  const __m128i stg4_3 = pair_set_epi16(cospi_8_64, cospi_24_64);
+  const __m128i stg4_4 = pair_set_epi16(-cospi_8_64, cospi_24_64);
+  const __m128i stg4_5 = pair_set_epi16(cospi_24_64, cospi_8_64);
+  const __m128i stg4_6 = pair_set_epi16(-cospi_24_64, -cospi_8_64);
+
+  const __m128i stg6_0 = pair_set_epi16(-cospi_16_64, cospi_16_64);
+
+  __m128i stp1_0, stp1_1, stp1_2, stp1_3, stp1_4, stp1_5, stp1_6, stp1_7,
+      stp1_8, stp1_9, stp1_10, stp1_11, stp1_12, stp1_13, stp1_14, stp1_15,
+      stp1_16, stp1_17, stp1_18, stp1_19, stp1_20, stp1_21, stp1_22, stp1_23,
+      stp1_24, stp1_25, stp1_26, stp1_27, stp1_28, stp1_29, stp1_30, stp1_31;
+  __m128i stp2_0, stp2_1, stp2_2, stp2_3, stp2_4, stp2_5, stp2_6, stp2_7,
+      stp2_8, stp2_9, stp2_10, stp2_11, stp2_12, stp2_13, stp2_14, stp2_15,
+      stp2_16, stp2_17, stp2_18, stp2_19, stp2_20, stp2_21, stp2_22, stp2_23,
+      stp2_24, stp2_25, stp2_26, stp2_27, stp2_28, stp2_29, stp2_30, stp2_31;
+  __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+
+  IDCT32(in0, in1)
+
+  // 2_D: Calculate the results and store them to destination.
+  in0[0] = _mm_add_epi16(stp1_0, stp1_31);
+  in0[1] = _mm_add_epi16(stp1_1, stp1_30);
+  in0[2] = _mm_add_epi16(stp1_2, stp1_29);
+  in0[3] = _mm_add_epi16(stp1_3, stp1_28);
+  in0[4] = _mm_add_epi16(stp1_4, stp1_27);
+  in0[5] = _mm_add_epi16(stp1_5, stp1_26);
+  in0[6] = _mm_add_epi16(stp1_6, stp1_25);
+  in0[7] = _mm_add_epi16(stp1_7, stp1_24);
+  in0[8] = _mm_add_epi16(stp1_8, stp1_23);
+  in0[9] = _mm_add_epi16(stp1_9, stp1_22);
+  in0[10] = _mm_add_epi16(stp1_10, stp1_21);
+  in0[11] = _mm_add_epi16(stp1_11, stp1_20);
+  in0[12] = _mm_add_epi16(stp1_12, stp1_19);
+  in0[13] = _mm_add_epi16(stp1_13, stp1_18);
+  in0[14] = _mm_add_epi16(stp1_14, stp1_17);
+  in0[15] = _mm_add_epi16(stp1_15, stp1_16);
+  in1[0] = _mm_sub_epi16(stp1_15, stp1_16);
+  in1[1] = _mm_sub_epi16(stp1_14, stp1_17);
+  in1[2] = _mm_sub_epi16(stp1_13, stp1_18);
+  in1[3] = _mm_sub_epi16(stp1_12, stp1_19);
+  in1[4] = _mm_sub_epi16(stp1_11, stp1_20);
+  in1[5] = _mm_sub_epi16(stp1_10, stp1_21);
+  in1[6] = _mm_sub_epi16(stp1_9, stp1_22);
+  in1[7] = _mm_sub_epi16(stp1_8, stp1_23);
+  in1[8] = _mm_sub_epi16(stp1_7, stp1_24);
+  in1[9] = _mm_sub_epi16(stp1_6, stp1_25);
+  in1[10] = _mm_sub_epi16(stp1_5, stp1_26);
+  in1[11] = _mm_sub_epi16(stp1_4, stp1_27);
+  in1[12] = _mm_sub_epi16(stp1_3, stp1_28);
+  in1[13] = _mm_sub_epi16(stp1_2, stp1_29);
+  in1[14] = _mm_sub_epi16(stp1_1, stp1_30);
+  in1[15] = _mm_sub_epi16(stp1_0, stp1_31);
+}
+
 #if CONFIG_AOM_HIGHBITDEPTH
 static INLINE __m128i clamp_high_sse2(__m128i value, int bd) {
   __m128i ubounded, retval;
index f39b4d6da491e19aa7241e9fa53dadbe8760aae6..c271b28452f017a004f2eb64ff95364ba6459668 100644 (file)
@@ -203,5 +203,6 @@ void idct16_sse2(__m128i *in0, __m128i *in1);
 void iadst4_sse2(__m128i *in);
 void iadst8_sse2(__m128i *in);
 void iadst16_sse2(__m128i *in0, __m128i *in1);
+void idct32_8col(__m128i *in0, __m128i *in1);
 
 #endif  // AOM_DSP_X86_INV_TXFM_SSE2_H_
index 92cbc8c31dea93e92312910a1ad9f7a91c77b631..a32b3b34966e221fb7cb04440f23cef7a5012446 100644 (file)
@@ -103,10 +103,10 @@ if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
       specialize qw/av1_iht16x8_128_add sse2/;
 
       add_proto qw/void av1_iht16x32_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-      specialize qw/av1_iht16x32_512_add/;
+      specialize qw/av1_iht16x32_512_add sse2/;
 
       add_proto qw/void av1_iht32x16_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-      specialize qw/av1_iht32x16_512_add/;
+      specialize qw/av1_iht32x16_512_add sse2/;
     }
 
     add_proto qw/void av1_iht8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
@@ -164,10 +164,10 @@ if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
       specialize qw/av1_iht16x8_128_add sse2/;
 
       add_proto qw/void av1_iht16x32_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-      specialize qw/av1_iht16x32_512_add/;
+      specialize qw/av1_iht16x32_512_add sse2/;
 
       add_proto qw/void av1_iht32x16_512_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-      specialize qw/av1_iht32x16_512_add/;
+      specialize qw/av1_iht32x16_512_add sse2/;
     }
 
     add_proto qw/void av1_iht8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
@@ -404,10 +404,10 @@ if (aom_config("CONFIG_EXT_TX") eq "yes") {
   specialize qw/av1_fht16x8 sse2/;
 
   add_proto qw/void av1_fht16x32/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/av1_fht16x32/;
+  specialize qw/av1_fht16x32 sse2/;
 
   add_proto qw/void av1_fht32x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/av1_fht32x16/;
+  specialize qw/av1_fht32x16 sse2/;
 
   add_proto qw/void av1_fht32x32/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
   specialize qw/av1_fht32x32 avx2/;
index e90be26960ea182035eaedbd54d416ccab31fc01..27cd7568e500710951cd3e601317ea98dc60d355 100644 (file)
@@ -496,6 +496,12 @@ static void iidtx16_8col(__m128i *in) {
   in[15] = _mm_packs_epi32(u7, y7);
 }
 
+static void iidtx16_sse2(__m128i *in0, __m128i *in1) {
+  array_transpose_16x16(in0, in1);
+  iidtx16_8col(in0);
+  iidtx16_8col(in1);
+}
+
 static void iidtx8_sse2(__m128i *in) {
   in[0] = _mm_slli_epi16(in[0], 1);
   in[1] = _mm_slli_epi16(in[1], 1);
@@ -628,6 +634,11 @@ static INLINE void scale_sqrt2_8x8(__m128i *in) {
                           xx_roundn_epi32_unsigned(v_p7b_d, DCT_CONST_BITS));
 }
 
+static INLINE void scale_sqrt2_8x16(__m128i *in) {
+  scale_sqrt2_8x8(in);
+  scale_sqrt2_8x8(in + 8);
+}
+
 void av1_iht8x16_128_add_sse2(const tran_low_t *input, uint8_t *dest,
                               int stride, int tx_type) {
   __m128i in[16];
@@ -1202,4 +1213,322 @@ void av1_iht4x8_32_add_sse2(const tran_low_t *input, uint8_t *dest, int stride,
   in[3] = _mm_unpacklo_epi64(in[6], in[7]);
   write_buffer_4x8_round5(dest, in, stride);
 }
+
+// Note: The 16-column 32-element transforms take input in the form of four
+// 8x16 blocks (each stored as a __m128i[16]), which are the four quadrants
+// of the overall 16x32 input buffer.
+static INLINE void idct32_16col(__m128i *tl, __m128i *tr, __m128i *bl,
+                                __m128i *br) {
+  array_transpose_16x16(tl, tr);
+  array_transpose_16x16(bl, br);
+  idct32_8col(tl, bl);
+  idct32_8col(tr, br);
+}
+
+static INLINE void ihalfright32_16col(__m128i *tl, __m128i *tr, __m128i *bl,
+                                      __m128i *br) {
+  __m128i tmpl[16], tmpr[16];
+  int i;
+
+  // Copy the top half of the input to temporary storage
+  for (i = 0; i < 16; ++i) {
+    tmpl[i] = tl[i];
+    tmpr[i] = tr[i];
+  }
+
+  // Generate the top half of the output
+  for (i = 0; i < 16; ++i) {
+    tl[i] = _mm_slli_epi16(bl[i], 2);
+    tr[i] = _mm_slli_epi16(br[i], 2);
+  }
+  array_transpose_16x16(tl, tr);
+
+  // Copy the temporary storage back to the bottom half of the input
+  for (i = 0; i < 16; ++i) {
+    bl[i] = tmpl[i];
+    br[i] = tmpr[i];
+  }
+
+  // Generate the bottom half of the output
+  scale_sqrt2_8x16(bl);
+  scale_sqrt2_8x16(br);
+  idct16_sse2(bl, br);  // Includes a transposition
+}
+
+static INLINE void iidtx32_16col(__m128i *tl, __m128i *tr, __m128i *bl,
+                                 __m128i *br) {
+  int i;
+  array_transpose_16x16(tl, tr);
+  array_transpose_16x16(bl, br);
+  for (i = 0; i < 16; ++i) {
+    tl[i] = _mm_slli_epi16(tl[i], 2);
+    tr[i] = _mm_slli_epi16(tr[i], 2);
+    bl[i] = _mm_slli_epi16(bl[i], 2);
+    br[i] = _mm_slli_epi16(br[i], 2);
+  }
+}
+
+static INLINE void write_buffer_16x32_round6(uint8_t *dest, __m128i *intl,
+                                             __m128i *intr, __m128i *inbl,
+                                             __m128i *inbr, int stride) {
+  const __m128i zero = _mm_setzero_si128();
+  const __m128i final_rounding = _mm_set1_epi16(1 << 5);
+  int i;
+
+  for (i = 0; i < 16; ++i) {
+    intl[i] = _mm_adds_epi16(intl[i], final_rounding);
+    intr[i] = _mm_adds_epi16(intr[i], final_rounding);
+    inbl[i] = _mm_adds_epi16(inbl[i], final_rounding);
+    inbr[i] = _mm_adds_epi16(inbr[i], final_rounding);
+    intl[i] = _mm_srai_epi16(intl[i], 6);
+    intr[i] = _mm_srai_epi16(intr[i], 6);
+    inbl[i] = _mm_srai_epi16(inbl[i], 6);
+    inbr[i] = _mm_srai_epi16(inbr[i], 6);
+    RECON_AND_STORE(dest + i * stride + 0, intl[i]);
+    RECON_AND_STORE(dest + i * stride + 8, intr[i]);
+    RECON_AND_STORE(dest + (i + 16) * stride + 0, inbl[i]);
+    RECON_AND_STORE(dest + (i + 16) * stride + 8, inbr[i]);
+  }
+}
+
+void av1_iht16x32_512_add_sse2(const tran_low_t *input, uint8_t *dest,
+                               int stride, int tx_type) {
+  __m128i intl[16], intr[16], inbl[16], inbr[16];
+
+  int i;
+  for (i = 0; i < 16; ++i) {
+    intl[i] = load_input_data(input + i * 16 + 0);
+    intr[i] = load_input_data(input + i * 16 + 8);
+    inbl[i] = load_input_data(input + (i + 16) * 16 + 0);
+    inbr[i] = load_input_data(input + (i + 16) * 16 + 8);
+  }
+
+  // Row transform
+  switch (tx_type) {
+    case DCT_DCT:
+    case ADST_DCT:
+    case FLIPADST_DCT:
+    case H_DCT:
+      idct16_sse2(intl, intr);
+      idct16_sse2(inbl, inbr);
+      break;
+    case DCT_ADST:
+    case ADST_ADST:
+    case DCT_FLIPADST:
+    case FLIPADST_FLIPADST:
+    case ADST_FLIPADST:
+    case FLIPADST_ADST:
+    case H_ADST:
+    case H_FLIPADST:
+      iadst16_sse2(intl, intr);
+      iadst16_sse2(inbl, inbr);
+      break;
+    case V_FLIPADST:
+    case V_ADST:
+    case V_DCT:
+    case IDTX:
+      iidtx16_sse2(intl, intr);
+      iidtx16_sse2(inbl, inbr);
+      break;
+    default: assert(0); break;
+  }
+
+  scale_sqrt2_8x16(intl);
+  scale_sqrt2_8x16(intr);
+  scale_sqrt2_8x16(inbl);
+  scale_sqrt2_8x16(inbr);
+
+  // Column transform
+  switch (tx_type) {
+    case DCT_DCT:
+    case DCT_ADST:
+    case DCT_FLIPADST:
+    case V_DCT: idct32_16col(intl, intr, inbl, inbr); break;
+    case ADST_DCT:
+    case ADST_ADST:
+    case FLIPADST_ADST:
+    case ADST_FLIPADST:
+    case FLIPADST_FLIPADST:
+    case FLIPADST_DCT:
+    case V_ADST:
+    case V_FLIPADST: ihalfright32_16col(intl, intr, inbl, inbr); break;
+    case H_DCT:
+    case H_ADST:
+    case H_FLIPADST:
+    case IDTX: iidtx32_16col(intl, intr, inbl, inbr); break;
+    default: assert(0); break;
+  }
+
+  switch (tx_type) {
+    case DCT_DCT:
+    case ADST_DCT:
+    case H_DCT:
+    case DCT_ADST:
+    case ADST_ADST:
+    case H_ADST:
+    case V_ADST:
+    case V_DCT:
+    case IDTX: break;
+    case FLIPADST_DCT:
+    case FLIPADST_ADST:
+    case V_FLIPADST: FLIPUD_PTR(dest, stride, 32); break;
+    case DCT_FLIPADST:
+    case ADST_FLIPADST:
+    case H_FLIPADST:
+      for (i = 0; i < 16; ++i) {
+        __m128i tmp = intl[i];
+        intl[i] = mm_reverse_epi16(intr[i]);
+        intr[i] = mm_reverse_epi16(tmp);
+        tmp = inbl[i];
+        inbl[i] = mm_reverse_epi16(inbr[i]);
+        inbr[i] = mm_reverse_epi16(tmp);
+      }
+      break;
+    case FLIPADST_FLIPADST:
+      for (i = 0; i < 16; ++i) {
+        __m128i tmp = intl[i];
+        intl[i] = mm_reverse_epi16(intr[i]);
+        intr[i] = mm_reverse_epi16(tmp);
+        tmp = inbl[i];
+        inbl[i] = mm_reverse_epi16(inbr[i]);
+        inbr[i] = mm_reverse_epi16(tmp);
+      }
+      FLIPUD_PTR(dest, stride, 32);
+      break;
+    default: assert(0); break;
+  }
+  write_buffer_16x32_round6(dest, intl, intr, inbl, inbr, stride);
+}
+
+static INLINE void write_buffer_32x16_round6(uint8_t *dest, __m128i *in0,
+                                             __m128i *in1, __m128i *in2,
+                                             __m128i *in3, int stride) {
+  const __m128i zero = _mm_setzero_si128();
+  const __m128i final_rounding = _mm_set1_epi16(1 << 5);
+  int i;
+
+  for (i = 0; i < 16; ++i) {
+    in0[i] = _mm_adds_epi16(in0[i], final_rounding);
+    in1[i] = _mm_adds_epi16(in1[i], final_rounding);
+    in2[i] = _mm_adds_epi16(in2[i], final_rounding);
+    in3[i] = _mm_adds_epi16(in3[i], final_rounding);
+    in0[i] = _mm_srai_epi16(in0[i], 6);
+    in1[i] = _mm_srai_epi16(in1[i], 6);
+    in2[i] = _mm_srai_epi16(in2[i], 6);
+    in3[i] = _mm_srai_epi16(in3[i], 6);
+    RECON_AND_STORE(dest + i * stride + 0, in0[i]);
+    RECON_AND_STORE(dest + i * stride + 8, in1[i]);
+    RECON_AND_STORE(dest + i * stride + 16, in2[i]);
+    RECON_AND_STORE(dest + i * stride + 24, in3[i]);
+  }
+}
+
+void av1_iht32x16_512_add_sse2(const tran_low_t *input, uint8_t *dest,
+                               int stride, int tx_type) {
+  __m128i in0[16], in1[16], in2[16], in3[16];
+  int i;
+
+  for (i = 0; i < 16; ++i) {
+    in0[i] = load_input_data(input + i * 32 + 0);
+    in1[i] = load_input_data(input + i * 32 + 8);
+    in2[i] = load_input_data(input + i * 32 + 16);
+    in3[i] = load_input_data(input + i * 32 + 24);
+  }
+
+  // Row transform
+  switch (tx_type) {
+    case DCT_DCT:
+    case ADST_DCT:
+    case FLIPADST_DCT:
+    case H_DCT: idct32_16col(in0, in1, in2, in3); break;
+    case DCT_ADST:
+    case ADST_ADST:
+    case DCT_FLIPADST:
+    case FLIPADST_FLIPADST:
+    case ADST_FLIPADST:
+    case FLIPADST_ADST:
+    case H_ADST:
+    case H_FLIPADST: ihalfright32_16col(in0, in1, in2, in3); break;
+    case V_FLIPADST:
+    case V_ADST:
+    case V_DCT:
+    case IDTX: iidtx32_16col(in0, in1, in2, in3); break;
+    default: assert(0); break;
+  }
+
+  scale_sqrt2_8x16(in0);
+  scale_sqrt2_8x16(in1);
+  scale_sqrt2_8x16(in2);
+  scale_sqrt2_8x16(in3);
+
+  // Column transform
+  switch (tx_type) {
+    case DCT_DCT:
+    case DCT_ADST:
+    case DCT_FLIPADST:
+    case V_DCT:
+      idct16_sse2(in0, in1);
+      idct16_sse2(in2, in3);
+      break;
+    case ADST_DCT:
+    case ADST_ADST:
+    case FLIPADST_ADST:
+    case ADST_FLIPADST:
+    case FLIPADST_FLIPADST:
+    case FLIPADST_DCT:
+    case V_ADST:
+    case V_FLIPADST:
+      iadst16_sse2(in0, in1);
+      iadst16_sse2(in2, in3);
+      break;
+    case H_DCT:
+    case H_ADST:
+    case H_FLIPADST:
+    case IDTX:
+      iidtx16_sse2(in0, in1);
+      iidtx16_sse2(in2, in3);
+      break;
+    default: assert(0); break;
+  }
+
+  switch (tx_type) {
+    case DCT_DCT:
+    case ADST_DCT:
+    case H_DCT:
+    case DCT_ADST:
+    case ADST_ADST:
+    case H_ADST:
+    case V_ADST:
+    case V_DCT:
+    case IDTX: break;
+    case FLIPADST_DCT:
+    case FLIPADST_ADST:
+    case V_FLIPADST: FLIPUD_PTR(dest, stride, 16); break;
+    case DCT_FLIPADST:
+    case ADST_FLIPADST:
+    case H_FLIPADST:
+      for (i = 0; i < 16; ++i) {
+        __m128i tmp1 = in0[i];
+        __m128i tmp2 = in1[i];
+        in0[i] = mm_reverse_epi16(in3[i]);
+        in1[i] = mm_reverse_epi16(in2[i]);
+        in2[i] = mm_reverse_epi16(tmp2);
+        in3[i] = mm_reverse_epi16(tmp1);
+      }
+      break;
+    case FLIPADST_FLIPADST:
+      for (i = 0; i < 16; ++i) {
+        __m128i tmp1 = in0[i];
+        __m128i tmp2 = in1[i];
+        in0[i] = mm_reverse_epi16(in3[i]);
+        in1[i] = mm_reverse_epi16(in2[i]);
+        in2[i] = mm_reverse_epi16(tmp2);
+        in3[i] = mm_reverse_epi16(tmp1);
+      }
+      FLIPUD_PTR(dest, stride, 16);
+      break;
+    default: assert(0); break;
+  }
+  write_buffer_32x16_round6(dest, in0, in1, in2, in3, stride);
+}
 #endif  // CONFIG_EXT_TX
index ad61fd3ebf24dcf6b231e94d80987d6b2940a116..22cc342a94a8d4531e315e6fd9e4ea74a64b5b3b 100644 (file)
@@ -2684,6 +2684,13 @@ static INLINE void scale_sqrt2_8x8_signed(__m128i *in) {
                           xx_roundn_epi32(v_p7b_d, DCT_CONST_BITS));
 }
 
+static INLINE void scale_sqrt2_8x16(__m128i *in) {
+  scale_sqrt2_8x4(in);
+  scale_sqrt2_8x4(in + 4);
+  scale_sqrt2_8x4(in + 8);
+  scale_sqrt2_8x4(in + 12);
+}
+
 // Load input into the left-hand half of in (ie, into lanes 0..3 of
 // each element of in). The right hand half (lanes 4..7) should be
 // treated as being filled with "don't care" values.
@@ -2803,7 +2810,6 @@ void av1_fht4x8_sse2(const int16_t *input, tran_low_t *output, int stride,
       fadst4_sse2(in);
       fadst4_sse2(in + 4);
       break;
-#if CONFIG_EXT_TX
     case FLIPADST_DCT:
       load_buffer_4x8(input, in, stride, 1, 0);
       fadst8_sse2(in);
@@ -2924,7 +2930,6 @@ void av1_fht4x8_sse2(const int16_t *input, tran_low_t *output, int stride,
       fadst4_sse2(in);
       fadst4_sse2(in + 4);
       break;
-#endif  // CONFIG_EXT_TX
     default: assert(0); break;
   }
   write_buffer_4x8(output, in);
@@ -3018,7 +3023,6 @@ void av1_fht8x4_sse2(const int16_t *input, tran_low_t *output, int stride,
       fadst4_sse2(in + 4);
       fadst8_sse2(in);
       break;
-#if CONFIG_EXT_TX
     case FLIPADST_DCT:
       load_buffer_8x4(input, in, stride, 1, 0);
       fadst4_sse2(in);
@@ -3091,7 +3095,6 @@ void av1_fht8x4_sse2(const int16_t *input, tran_low_t *output, int stride,
       fidtx4_sse2(in + 4);
       fadst8_sse2(in);
       break;
-#endif  // CONFIG_EXT_TX
     default: assert(0); break;
   }
   write_buffer_8x4(output, in);
@@ -3155,7 +3158,6 @@ void av1_fht8x16_sse2(const int16_t *input, tran_low_t *output, int stride,
       fadst8_sse2(t);
       fadst8_sse2(b);
       break;
-#if CONFIG_EXT_TX
     case FLIPADST_DCT:
       load_buffer_8x16(input, in, stride, 1, 0);
       fadst16_8col(in);
@@ -3252,7 +3254,6 @@ void av1_fht8x16_sse2(const int16_t *input, tran_low_t *output, int stride,
       fadst8_sse2(t);
       fadst8_sse2(b);
       break;
-#endif  // CONFIG_EXT_TX
     default: assert(0); break;
   }
   right_shift_8x8(t, 2);
@@ -3313,7 +3314,6 @@ void av1_fht16x8_sse2(const int16_t *input, tran_low_t *output, int stride,
       fadst8_sse2(r);
       fadst16_8col(in);
       break;
-#if CONFIG_EXT_TX
     case FLIPADST_DCT:
       load_buffer_16x8(input, in, stride, 1, 0);
       fadst8_sse2(l);
@@ -3386,7 +3386,6 @@ void av1_fht16x8_sse2(const int16_t *input, tran_low_t *output, int stride,
       fidtx8_sse2(r);
       fadst16_8col(in);
       break;
-#endif  // CONFIG_EXT_TX
     default: assert(0); break;
   }
   array_transpose_8x8(l, l);
@@ -3396,4 +3395,381 @@ void av1_fht16x8_sse2(const int16_t *input, tran_low_t *output, int stride,
   write_buffer_8x8(output, l, 16);
   write_buffer_8x8(output + 8, r, 16);
 }
+
+// Note: The 16-column 32-element transforms expect their input to be
+// split up into a 2x2 grid of 8x16 blocks
+static INLINE void fdct32_16col(__m128i *tl, __m128i *tr, __m128i *bl,
+                                __m128i *br) {
+  fdct32_8col(tl, bl);
+  fdct32_8col(tr, br);
+  array_transpose_16x16(tl, tr);
+  array_transpose_16x16(bl, br);
+}
+
+static INLINE void fhalfright32_16col(__m128i *tl, __m128i *tr, __m128i *bl,
+                                      __m128i *br) {
+  __m128i tmpl[16], tmpr[16];
+  int i;
+
+  // Copy the bottom half of the input to temporary storage
+  for (i = 0; i < 16; ++i) {
+    tmpl[i] = bl[i];
+    tmpr[i] = br[i];
+  }
+
+  // Generate the bottom half of the output
+  for (i = 0; i < 16; ++i) {
+    bl[i] = _mm_slli_epi16(tl[i], 2);
+    br[i] = _mm_slli_epi16(tr[i], 2);
+  }
+  array_transpose_16x16(bl, br);
+
+  // Copy the temporary storage back to the top half of the input
+  for (i = 0; i < 16; ++i) {
+    tl[i] = tmpl[i];
+    tr[i] = tmpr[i];
+  }
+
+  // Generate the top half of the output
+  scale_sqrt2_8x16(tl);
+  scale_sqrt2_8x16(tr);
+  fdct16_sse2(tl, tr);
+}
+
+static INLINE void fidtx32_16col(__m128i *tl, __m128i *tr, __m128i *bl,
+                                 __m128i *br) {
+  int i;
+  for (i = 0; i < 16; ++i) {
+    tl[i] = _mm_slli_epi16(tl[i], 2);
+    tr[i] = _mm_slli_epi16(tr[i], 2);
+    bl[i] = _mm_slli_epi16(bl[i], 2);
+    br[i] = _mm_slli_epi16(br[i], 2);
+  }
+  array_transpose_16x16(tl, tr);
+  array_transpose_16x16(bl, br);
+}
+
+static INLINE void load_buffer_16x32(const int16_t *input, __m128i *intl,
+                                     __m128i *intr, __m128i *inbl,
+                                     __m128i *inbr, int stride, int flipud,
+                                     int fliplr) {
+  int i;
+  if (flipud) {
+    input = input + 31 * stride;
+    stride = -stride;
+  }
+
+  for (i = 0; i < 16; ++i) {
+    intl[i + 0] = _mm_load_si128((const __m128i *)(input + i * stride + 0));
+    intr[i + 0] = _mm_load_si128((const __m128i *)(input + i * stride + 8));
+    inbl[i + 0] =
+        _mm_load_si128((const __m128i *)(input + (i + 16) * stride + 0));
+    inbr[i + 0] =
+        _mm_load_si128((const __m128i *)(input + (i + 16) * stride + 8));
+  }
+
+  if (fliplr) {
+    __m128i tmp;
+    for (i = 0; i < 16; ++i) {
+      tmp = intl[i];
+      intl[i] = mm_reverse_epi16(intr[i]);
+      intr[i] = mm_reverse_epi16(tmp);
+      tmp = inbl[i];
+      inbl[i] = mm_reverse_epi16(inbr[i]);
+      inbr[i] = mm_reverse_epi16(tmp);
+    }
+  }
+
+  scale_sqrt2_8x16(intl);
+  scale_sqrt2_8x16(intr);
+  scale_sqrt2_8x16(inbl);
+  scale_sqrt2_8x16(inbr);
+}
+
+static INLINE void right_shift_8x16(__m128i *res, const int bit) {
+  right_shift_8x8(res, bit);
+  right_shift_8x8(res + 8, bit);
+}
+
+static INLINE void write_buffer_16x32(tran_low_t *output, __m128i *restl,
+                                      __m128i *restr, __m128i *resbl,
+                                      __m128i *resbr) {
+  int i;
+  right_shift_8x16(restl, 2);
+  right_shift_8x16(restr, 2);
+  right_shift_8x16(resbl, 2);
+  right_shift_8x16(resbr, 2);
+  for (i = 0; i < 16; ++i) {
+    store_output(&restl[i], output + i * 16 + 0);
+    store_output(&restr[i], output + i * 16 + 8);
+    store_output(&resbl[i], output + (i + 16) * 16 + 0);
+    store_output(&resbr[i], output + (i + 16) * 16 + 8);
+  }
+}
+
+// Note on data layout, for both this and the 32x16 tranforms:
+// So that we can reuse the 16-element transforms easily,
+// we want to split the input into 8x16 blocks.
+// For 16x32, this means the input is a 2x2 grid of such blocks.
+// For 32x16, it means the input is a 4x1 grid.
+void av1_fht16x32_sse2(const int16_t *input, tran_low_t *output, int stride,
+                       int tx_type) {
+  __m128i intl[16], intr[16], inbl[16], inbr[16];
+
+  switch (tx_type) {
+    case DCT_DCT:
+      load_buffer_16x32(input, intl, intr, inbl, inbr, stride, 0, 0);
+      fdct32_16col(intl, intr, inbl, inbr);
+      fdct16_sse2(intl, intr);
+      fdct16_sse2(inbl, inbr);
+      break;
+    case ADST_DCT:
+      load_buffer_16x32(input, intl, intr, inbl, inbr, stride, 0, 0);
+      fhalfright32_16col(intl, intr, inbl, inbr);
+      fdct16_sse2(intl, intr);
+      fdct16_sse2(inbl, inbr);
+      break;
+    case DCT_ADST:
+      load_buffer_16x32(input, intl, intr, inbl, inbr, stride, 0, 0);
+      fdct32_16col(intl, intr, inbl, inbr);
+      fadst16_sse2(intl, intr);
+      fadst16_sse2(inbl, inbr);
+      break;
+    case ADST_ADST:
+      load_buffer_16x32(input, intl, intr, inbl, inbr, stride, 0, 0);
+      fhalfright32_16col(intl, intr, inbl, inbr);
+      fadst16_sse2(intl, intr);
+      fadst16_sse2(inbl, inbr);
+      break;
+    case FLIPADST_DCT:
+      load_buffer_16x32(input, intl, intr, inbl, inbr, stride, 1, 0);
+      fhalfright32_16col(intl, intr, inbl, inbr);
+      fdct16_sse2(intl, intr);
+      fdct16_sse2(inbl, inbr);
+      break;
+    case DCT_FLIPADST:
+      load_buffer_16x32(input, intl, intr, inbl, inbr, stride, 0, 1);
+      fdct32_16col(intl, intr, inbl, inbr);
+      fadst16_sse2(intl, intr);
+      fadst16_sse2(inbl, inbr);
+      break;
+    case FLIPADST_FLIPADST:
+      load_buffer_16x32(input, intl, intr, inbl, inbr, stride, 1, 1);
+      fhalfright32_16col(intl, intr, inbl, inbr);
+      fadst16_sse2(intl, intr);
+      fadst16_sse2(inbl, inbr);
+      break;
+    case ADST_FLIPADST:
+      load_buffer_16x32(input, intl, intr, inbl, inbr, stride, 0, 1);
+      fhalfright32_16col(intl, intr, inbl, inbr);
+      fadst16_sse2(intl, intr);
+      fadst16_sse2(inbl, inbr);
+      break;
+    case FLIPADST_ADST:
+      load_buffer_16x32(input, intl, intr, inbl, inbr, stride, 1, 0);
+      fhalfright32_16col(intl, intr, inbl, inbr);
+      fadst16_sse2(intl, intr);
+      fadst16_sse2(inbl, inbr);
+      break;
+    case IDTX:
+      load_buffer_16x32(input, intl, intr, inbl, inbr, stride, 0, 0);
+      fidtx32_16col(intl, intr, inbl, inbr);
+      fidtx16_sse2(intl, intr);
+      fidtx16_sse2(inbl, inbr);
+      break;
+    case V_DCT:
+      load_buffer_16x32(input, intl, intr, inbl, inbr, stride, 0, 0);
+      fdct32_16col(intl, intr, inbl, inbr);
+      fidtx16_sse2(intl, intr);
+      fidtx16_sse2(inbl, inbr);
+      break;
+    case H_DCT:
+      load_buffer_16x32(input, intl, intr, inbl, inbr, stride, 0, 0);
+      fidtx32_16col(intl, intr, inbl, inbr);
+      fdct16_sse2(intl, intr);
+      fdct16_sse2(inbl, inbr);
+      break;
+    case V_ADST:
+      load_buffer_16x32(input, intl, intr, inbl, inbr, stride, 0, 0);
+      fhalfright32_16col(intl, intr, inbl, inbr);
+      fidtx16_sse2(intl, intr);
+      fidtx16_sse2(inbl, inbr);
+      break;
+    case H_ADST:
+      load_buffer_16x32(input, intl, intr, inbl, inbr, stride, 0, 0);
+      fidtx32_16col(intl, intr, inbl, inbr);
+      fadst16_sse2(intl, intr);
+      fadst16_sse2(inbl, inbr);
+      break;
+    case V_FLIPADST:
+      load_buffer_16x32(input, intl, intr, inbl, inbr, stride, 1, 0);
+      fhalfright32_16col(intl, intr, inbl, inbr);
+      fidtx16_sse2(intl, intr);
+      fidtx16_sse2(inbl, inbr);
+      break;
+    case H_FLIPADST:
+      load_buffer_16x32(input, intl, intr, inbl, inbr, stride, 0, 1);
+      fidtx32_16col(intl, intr, inbl, inbr);
+      fadst16_sse2(intl, intr);
+      fadst16_sse2(inbl, inbr);
+      break;
+    default: assert(0); break;
+  }
+  write_buffer_16x32(output, intl, intr, inbl, inbr);
+}
+
+static INLINE void load_buffer_32x16(const int16_t *input, __m128i *in0,
+                                     __m128i *in1, __m128i *in2, __m128i *in3,
+                                     int stride, int flipud, int fliplr) {
+  int i;
+  if (flipud) {
+    input += 15 * stride;
+    stride = -stride;
+  }
+
+  for (i = 0; i < 16; ++i) {
+    in0[i] = _mm_load_si128((const __m128i *)(input + i * stride + 0));
+    in1[i] = _mm_load_si128((const __m128i *)(input + i * stride + 8));
+    in2[i] = _mm_load_si128((const __m128i *)(input + i * stride + 16));
+    in3[i] = _mm_load_si128((const __m128i *)(input + i * stride + 24));
+  }
+
+  if (fliplr) {
+    for (i = 0; i < 16; ++i) {
+      __m128i tmp1 = in0[i];
+      __m128i tmp2 = in1[i];
+      in0[i] = mm_reverse_epi16(in3[i]);
+      in1[i] = mm_reverse_epi16(in2[i]);
+      in2[i] = mm_reverse_epi16(tmp2);
+      in3[i] = mm_reverse_epi16(tmp1);
+    }
+  }
+
+  scale_sqrt2_8x16(in0);
+  scale_sqrt2_8x16(in1);
+  scale_sqrt2_8x16(in2);
+  scale_sqrt2_8x16(in3);
+}
+
+static INLINE void write_buffer_32x16(tran_low_t *output, __m128i *res0,
+                                      __m128i *res1, __m128i *res2,
+                                      __m128i *res3) {
+  int i;
+  right_shift_8x16(res0, 2);
+  right_shift_8x16(res1, 2);
+  right_shift_8x16(res2, 2);
+  right_shift_8x16(res3, 2);
+  for (i = 0; i < 16; ++i) {
+    store_output(&res0[i], output + i * 32 + 0);
+    store_output(&res1[i], output + i * 32 + 8);
+    store_output(&res2[i], output + i * 32 + 16);
+    store_output(&res3[i], output + i * 32 + 24);
+  }
+}
+
+void av1_fht32x16_sse2(const int16_t *input, tran_low_t *output, int stride,
+                       int tx_type) {
+  __m128i in0[16], in1[16], in2[16], in3[16];
+
+  switch (tx_type) {
+    case DCT_DCT:
+      load_buffer_32x16(input, in0, in1, in2, in3, stride, 0, 0);
+      fdct16_sse2(in0, in1);
+      fdct16_sse2(in2, in3);
+      fdct32_16col(in0, in1, in2, in3);
+      break;
+    case ADST_DCT:
+      load_buffer_32x16(input, in0, in1, in2, in3, stride, 0, 0);
+      fadst16_sse2(in0, in1);
+      fadst16_sse2(in2, in3);
+      fdct32_16col(in0, in1, in2, in3);
+      break;
+    case DCT_ADST:
+      load_buffer_32x16(input, in0, in1, in2, in3, stride, 0, 0);
+      fdct16_sse2(in0, in1);
+      fdct16_sse2(in2, in3);
+      fhalfright32_16col(in0, in1, in2, in3);
+      break;
+    case ADST_ADST:
+      load_buffer_32x16(input, in0, in1, in2, in3, stride, 0, 0);
+      fadst16_sse2(in0, in1);
+      fadst16_sse2(in2, in3);
+      fhalfright32_16col(in0, in1, in2, in3);
+      break;
+    case FLIPADST_DCT:
+      load_buffer_32x16(input, in0, in1, in2, in3, stride, 1, 0);
+      fadst16_sse2(in0, in1);
+      fadst16_sse2(in2, in3);
+      fdct32_16col(in0, in1, in2, in3);
+      break;
+    case DCT_FLIPADST:
+      load_buffer_32x16(input, in0, in1, in2, in3, stride, 0, 1);
+      fdct16_sse2(in0, in1);
+      fdct16_sse2(in2, in3);
+      fhalfright32_16col(in0, in1, in2, in3);
+      break;
+    case FLIPADST_FLIPADST:
+      load_buffer_32x16(input, in0, in1, in2, in3, stride, 1, 1);
+      fadst16_sse2(in0, in1);
+      fadst16_sse2(in2, in3);
+      fhalfright32_16col(in0, in1, in2, in3);
+      break;
+    case ADST_FLIPADST:
+      load_buffer_32x16(input, in0, in1, in2, in3, stride, 0, 1);
+      fadst16_sse2(in0, in1);
+      fadst16_sse2(in2, in3);
+      fhalfright32_16col(in0, in1, in2, in3);
+      break;
+    case FLIPADST_ADST:
+      load_buffer_32x16(input, in0, in1, in2, in3, stride, 1, 0);
+      fadst16_sse2(in0, in1);
+      fadst16_sse2(in2, in3);
+      fhalfright32_16col(in0, in1, in2, in3);
+      break;
+    case IDTX:
+      load_buffer_32x16(input, in0, in1, in2, in3, stride, 0, 0);
+      fidtx16_sse2(in0, in1);
+      fidtx16_sse2(in2, in3);
+      fidtx32_16col(in0, in1, in2, in3);
+      break;
+    case V_DCT:
+      load_buffer_32x16(input, in0, in1, in2, in3, stride, 0, 0);
+      fdct16_sse2(in0, in1);
+      fdct16_sse2(in2, in3);
+      fidtx32_16col(in0, in1, in2, in3);
+      break;
+    case H_DCT:
+      load_buffer_32x16(input, in0, in1, in2, in3, stride, 0, 0);
+      fidtx16_sse2(in0, in1);
+      fidtx16_sse2(in2, in3);
+      fdct32_16col(in0, in1, in2, in3);
+      break;
+    case V_ADST:
+      load_buffer_32x16(input, in0, in1, in2, in3, stride, 0, 0);
+      fadst16_sse2(in0, in1);
+      fadst16_sse2(in2, in3);
+      fidtx32_16col(in0, in1, in2, in3);
+      break;
+    case H_ADST:
+      load_buffer_32x16(input, in0, in1, in2, in3, stride, 0, 0);
+      fidtx16_sse2(in0, in1);
+      fidtx16_sse2(in2, in3);
+      fhalfright32_16col(in0, in1, in2, in3);
+      break;
+    case V_FLIPADST:
+      load_buffer_32x16(input, in0, in1, in2, in3, stride, 1, 0);
+      fadst16_sse2(in0, in1);
+      fadst16_sse2(in2, in3);
+      fidtx32_16col(in0, in1, in2, in3);
+      break;
+    case H_FLIPADST:
+      load_buffer_32x16(input, in0, in1, in2, in3, stride, 0, 1);
+      fidtx16_sse2(in0, in1);
+      fidtx16_sse2(in2, in3);
+      fhalfright32_16col(in0, in1, in2, in3);
+      break;
+    default: assert(0); break;
+  }
+  write_buffer_32x16(output, in0, in1, in2, in3);
+}
 #endif  // CONFIG_EXT_TX
diff --git a/test/av1_fht16x32_test.cc b/test/av1_fht16x32_test.cc
new file mode 100644 (file)
index 0000000..9a55f3d
--- /dev/null
@@ -0,0 +1,117 @@
+/*
+ *  Copyright (c) 2016 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "third_party/googletest/src/include/gtest/gtest.h"
+
+#include "./aom_dsp_rtcd.h"
+#include "./av1_rtcd.h"
+
+#include "aom_ports/mem.h"
+#include "test/acm_random.h"
+#include "test/clear_system_state.h"
+#include "test/register_state_check.h"
+#include "test/transform_test_base.h"
+#include "test/util.h"
+
+using libaom_test::ACMRandom;
+
+namespace {
+typedef void (*IhtFunc)(const tran_low_t *in, uint8_t *out, int stride,
+                        int tx_type);
+using std::tr1::tuple;
+using libaom_test::FhtFunc;
+typedef tuple<FhtFunc, IhtFunc, int, aom_bit_depth_t, int> Ht16x32Param;
+
+void fht16x32_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
+  av1_fht16x32_c(in, out, stride, tx_type);
+}
+
+void iht16x32_ref(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
+  av1_iht16x32_512_add_c(in, out, stride, tx_type);
+}
+
+class AV1Trans16x32HT : public libaom_test::TransformTestBase,
+                        public ::testing::TestWithParam<Ht16x32Param> {
+ public:
+  virtual ~AV1Trans16x32HT() {}
+
+  virtual void SetUp() {
+    fwd_txfm_ = GET_PARAM(0);
+    inv_txfm_ = GET_PARAM(1);
+    tx_type_ = GET_PARAM(2);
+    pitch_ = 16;
+    fwd_txfm_ref = fht16x32_ref;
+    inv_txfm_ref = iht16x32_ref;
+    bit_depth_ = GET_PARAM(3);
+    mask_ = (1 << bit_depth_) - 1;
+    num_coeffs_ = GET_PARAM(4);
+  }
+  virtual void TearDown() { libaom_test::ClearSystemState(); }
+
+ protected:
+  void RunFwdTxfm(const int16_t *in, tran_low_t *out, int stride) {
+    fwd_txfm_(in, out, stride, tx_type_);
+  }
+
+  void RunInvTxfm(const tran_low_t *out, uint8_t *dst, int stride) {
+    inv_txfm_(out, dst, stride, tx_type_);
+  }
+
+  FhtFunc fwd_txfm_;
+  IhtFunc inv_txfm_;
+};
+
+TEST_P(AV1Trans16x32HT, CoeffCheck) { RunCoeffCheck(); }
+TEST_P(AV1Trans16x32HT, InvCoeffCheck) { RunInvCoeffCheck(); }
+
+using std::tr1::make_tuple;
+
+#if HAVE_SSE2
+const Ht16x32Param kArrayHt16x32Param_sse2[] = {
+  make_tuple(&av1_fht16x32_sse2, &av1_iht16x32_512_add_sse2, 0, AOM_BITS_8,
+             512),
+  make_tuple(&av1_fht16x32_sse2, &av1_iht16x32_512_add_sse2, 1, AOM_BITS_8,
+             512),
+  make_tuple(&av1_fht16x32_sse2, &av1_iht16x32_512_add_sse2, 2, AOM_BITS_8,
+             512),
+  make_tuple(&av1_fht16x32_sse2, &av1_iht16x32_512_add_sse2, 3, AOM_BITS_8,
+             512),
+#if CONFIG_EXT_TX
+  make_tuple(&av1_fht16x32_sse2, &av1_iht16x32_512_add_sse2, 4, AOM_BITS_8,
+             512),
+  make_tuple(&av1_fht16x32_sse2, &av1_iht16x32_512_add_sse2, 5, AOM_BITS_8,
+             512),
+  make_tuple(&av1_fht16x32_sse2, &av1_iht16x32_512_add_sse2, 6, AOM_BITS_8,
+             512),
+  make_tuple(&av1_fht16x32_sse2, &av1_iht16x32_512_add_sse2, 7, AOM_BITS_8,
+             512),
+  make_tuple(&av1_fht16x32_sse2, &av1_iht16x32_512_add_sse2, 8, AOM_BITS_8,
+             512),
+  make_tuple(&av1_fht16x32_sse2, &av1_iht16x32_512_add_sse2, 9, AOM_BITS_8,
+             512),
+  make_tuple(&av1_fht16x32_sse2, &av1_iht16x32_512_add_sse2, 10, AOM_BITS_8,
+             512),
+  make_tuple(&av1_fht16x32_sse2, &av1_iht16x32_512_add_sse2, 11, AOM_BITS_8,
+             512),
+  make_tuple(&av1_fht16x32_sse2, &av1_iht16x32_512_add_sse2, 12, AOM_BITS_8,
+             512),
+  make_tuple(&av1_fht16x32_sse2, &av1_iht16x32_512_add_sse2, 13, AOM_BITS_8,
+             512),
+  make_tuple(&av1_fht16x32_sse2, &av1_iht16x32_512_add_sse2, 14, AOM_BITS_8,
+             512),
+  make_tuple(&av1_fht16x32_sse2, &av1_iht16x32_512_add_sse2, 15, AOM_BITS_8,
+             512)
+#endif  // CONFIG_EXT_TX
+};
+INSTANTIATE_TEST_CASE_P(SSE2, AV1Trans16x32HT,
+                        ::testing::ValuesIn(kArrayHt16x32Param_sse2));
+#endif  // HAVE_SSE2
+
+}  // namespace
diff --git a/test/av1_fht32x16_test.cc b/test/av1_fht32x16_test.cc
new file mode 100644 (file)
index 0000000..3a7ca9b
--- /dev/null
@@ -0,0 +1,117 @@
+/*
+ *  Copyright (c) 2016 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "third_party/googletest/src/include/gtest/gtest.h"
+
+#include "./aom_dsp_rtcd.h"
+#include "./av1_rtcd.h"
+
+#include "aom_ports/mem.h"
+#include "test/acm_random.h"
+#include "test/clear_system_state.h"
+#include "test/register_state_check.h"
+#include "test/transform_test_base.h"
+#include "test/util.h"
+
+using libaom_test::ACMRandom;
+
+namespace {
+typedef void (*IhtFunc)(const tran_low_t *in, uint8_t *out, int stride,
+                        int tx_type);
+using std::tr1::tuple;
+using libaom_test::FhtFunc;
+typedef tuple<FhtFunc, IhtFunc, int, aom_bit_depth_t, int> Ht32x16Param;
+
+void fht32x16_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
+  av1_fht32x16_c(in, out, stride, tx_type);
+}
+
+void iht32x16_ref(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
+  av1_iht32x16_512_add_c(in, out, stride, tx_type);
+}
+
+class AV1Trans32x16HT : public libaom_test::TransformTestBase,
+                        public ::testing::TestWithParam<Ht32x16Param> {
+ public:
+  virtual ~AV1Trans32x16HT() {}
+
+  virtual void SetUp() {
+    fwd_txfm_ = GET_PARAM(0);
+    inv_txfm_ = GET_PARAM(1);
+    tx_type_ = GET_PARAM(2);
+    pitch_ = 32;
+    fwd_txfm_ref = fht32x16_ref;
+    inv_txfm_ref = iht32x16_ref;
+    bit_depth_ = GET_PARAM(3);
+    mask_ = (1 << bit_depth_) - 1;
+    num_coeffs_ = GET_PARAM(4);
+  }
+  virtual void TearDown() { libaom_test::ClearSystemState(); }
+
+ protected:
+  void RunFwdTxfm(const int16_t *in, tran_low_t *out, int stride) {
+    fwd_txfm_(in, out, stride, tx_type_);
+  }
+
+  void RunInvTxfm(const tran_low_t *out, uint8_t *dst, int stride) {
+    inv_txfm_(out, dst, stride, tx_type_);
+  }
+
+  FhtFunc fwd_txfm_;
+  IhtFunc inv_txfm_;
+};
+
+TEST_P(AV1Trans32x16HT, CoeffCheck) { RunCoeffCheck(); }
+TEST_P(AV1Trans32x16HT, InvCoeffCheck) { RunInvCoeffCheck(); }
+
+using std::tr1::make_tuple;
+
+#if HAVE_SSE2
+const Ht32x16Param kArrayHt32x16Param_sse2[] = {
+  make_tuple(&av1_fht32x16_sse2, &av1_iht32x16_512_add_sse2, 0, AOM_BITS_8,
+             512),
+  make_tuple(&av1_fht32x16_sse2, &av1_iht32x16_512_add_sse2, 1, AOM_BITS_8,
+             512),
+  make_tuple(&av1_fht32x16_sse2, &av1_iht32x16_512_add_sse2, 2, AOM_BITS_8,
+             512),
+  make_tuple(&av1_fht32x16_sse2, &av1_iht32x16_512_add_sse2, 3, AOM_BITS_8,
+             512),
+#if CONFIG_EXT_TX
+  make_tuple(&av1_fht32x16_sse2, &av1_iht32x16_512_add_sse2, 4, AOM_BITS_8,
+             512),
+  make_tuple(&av1_fht32x16_sse2, &av1_iht32x16_512_add_sse2, 5, AOM_BITS_8,
+             512),
+  make_tuple(&av1_fht32x16_sse2, &av1_iht32x16_512_add_sse2, 6, AOM_BITS_8,
+             512),
+  make_tuple(&av1_fht32x16_sse2, &av1_iht32x16_512_add_sse2, 7, AOM_BITS_8,
+             512),
+  make_tuple(&av1_fht32x16_sse2, &av1_iht32x16_512_add_sse2, 8, AOM_BITS_8,
+             512),
+  make_tuple(&av1_fht32x16_sse2, &av1_iht32x16_512_add_sse2, 9, AOM_BITS_8,
+             512),
+  make_tuple(&av1_fht32x16_sse2, &av1_iht32x16_512_add_sse2, 10, AOM_BITS_8,
+             512),
+  make_tuple(&av1_fht32x16_sse2, &av1_iht32x16_512_add_sse2, 11, AOM_BITS_8,
+             512),
+  make_tuple(&av1_fht32x16_sse2, &av1_iht32x16_512_add_sse2, 12, AOM_BITS_8,
+             512),
+  make_tuple(&av1_fht32x16_sse2, &av1_iht32x16_512_add_sse2, 13, AOM_BITS_8,
+             512),
+  make_tuple(&av1_fht32x16_sse2, &av1_iht32x16_512_add_sse2, 14, AOM_BITS_8,
+             512),
+  make_tuple(&av1_fht32x16_sse2, &av1_iht32x16_512_add_sse2, 15, AOM_BITS_8,
+             512)
+#endif  // CONFIG_EXT_TX
+};
+INSTANTIATE_TEST_CASE_P(SSE2, AV1Trans32x16HT,
+                        ::testing::ValuesIn(kArrayHt32x16Param_sse2));
+#endif  // HAVE_SSE2
+
+}  // namespace
index 162d7c9ef2983433e3c8a6a91b8fb66e45947b81..5a181331164d6cd3f982775044f636393d220554 100644 (file)
@@ -142,6 +142,8 @@ LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += av1_fht4x8_test.cc
 LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += av1_fht8x4_test.cc
 LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += av1_fht8x16_test.cc
 LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += av1_fht16x8_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += av1_fht16x32_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += av1_fht32x16_test.cc
 LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += av1_iht8x16_test.cc
 LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += av1_iht16x8_test.cc
 LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += fht32x32_test.cc