]> granicus.if.org Git - libvpx/commitdiff
16x16 butterfly inverse ADST/DCT hybrid transform
authorJingning Han <jingning@google.com>
Sat, 16 Feb 2013 22:08:36 +0000 (14:08 -0800)
committerJingning Han <jingning@google.com>
Tue, 19 Feb 2013 17:07:00 +0000 (09:07 -0800)
rebased.

This patch includes 16x16 butterfly inverse ADST/DCT hybrid
transform. It uses the variant ADST of kernel
    sin((2k+1)*(2n+1)/4N),
which allows a butterfly implementation.

The coding gains as compared to DCT 16x16 are about 0.1% for
both derf and std-hd. It is noteworthy that for std-hd sets
many sequences gains about 0.5%, some 0.2%. There are also few
points that provides -1% to -3% performance. Hence the average
goes to about 0.1%.

Change-Id: Ie80ac84cf403390f6e5d282caa58723739e5ec17

configure
vp9/common/vp9_blockd.h
vp9/common/vp9_idctllm.c
vp9/common/vp9_invtrans.c
vp9/common/vp9_rtcd_defs.sh
vp9/decoder/vp9_dequantize.c

index 5cdb45288720bc4e501621fad8faa9ed432d5cb7..8ea25232077a1d7ae2f0e57259630cbddc3d5665 100755 (executable)
--- a/configure
+++ b/configure
@@ -249,6 +249,7 @@ EXPERIMENT_LIST="
     abovesprefmv
     intht
     intht4x4
+    intht16x16
 "
 CONFIG_LIST="
     external_build
index 602b2a10cbb67cc4eb9224e9a25b8f7004ad802a..5854818732da97e7da7b3d650f002d0bef2207dc 100644 (file)
@@ -399,7 +399,7 @@ typedef struct macroblockd {
 
 #define ACTIVE_HT8  300
 
-#define ACTIVE_HT16 0
+#define ACTIVE_HT16 300
 
 // convert MB_PREDICTION_MODE to B_PREDICTION_MODE
 static B_PREDICTION_MODE pred_mode_conv(MB_PREDICTION_MODE mode) {
index 263b98597e7aa55fd4b04266e2cf63a063c22a7a..4ac18ae4a8d13d0fa24e74f770b5808142ba22df 100644 (file)
@@ -986,6 +986,231 @@ void vp9_short_idct16x16_c(int16_t *input, int16_t *output, int pitch) {
     }
 }
 
+#if CONFIG_INTHT16X16
+void iadst16_1d(int16_t *input, int16_t *output) {
+  int x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15;
+  int s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15;
+
+  x0 = input[15];
+  x1 = input[0];
+  x2 = input[13];
+  x3 = input[2];
+  x4 = input[11];
+  x5 = input[4];
+  x6 = input[9];
+  x7 = input[6];
+  x8 = input[7];
+  x9 = input[8];
+  x10 = input[5];
+  x11 = input[10];
+  x12 = input[3];
+  x13 = input[12];
+  x14 = input[1];
+  x15 = input[14];
+
+  if (!(x0 | x1 | x2 | x3 | x4 | x5 | x6 | x7 | x8
+           | x9 | x10 | x11 | x12 | x13 | x14 | x15)) {
+    output[0] = output[1] = output[2] = output[3] = output[4]
+              = output[5] = output[6] = output[7] = output[8]
+              = output[9] = output[10] = output[11] = output[12]
+              = output[13] = output[14] = output[15] = 0;
+    return;
+  }
+
+  // stage 1
+  s0 = x0 * cospi_1_64  + x1 * cospi_31_64;
+  s1 = x0 * cospi_31_64 - x1 * cospi_1_64;
+  s2 = x2 * cospi_5_64  + x3 * cospi_27_64;
+  s3 = x2 * cospi_27_64 - x3 * cospi_5_64;
+  s4 = x4 * cospi_9_64  + x5 * cospi_23_64;
+  s5 = x4 * cospi_23_64 - x5 * cospi_9_64;
+  s6 = x6 * cospi_13_64 + x7 * cospi_19_64;
+  s7 = x6 * cospi_19_64 - x7 * cospi_13_64;
+  s8 = x8 * cospi_17_64 + x9 * cospi_15_64;
+  s9 = x8 * cospi_15_64 - x9 * cospi_17_64;
+  s10 = x10 * cospi_21_64 + x11 * cospi_11_64;
+  s11 = x10 * cospi_11_64 - x11 * cospi_21_64;
+  s12 = x12 * cospi_25_64 + x13 * cospi_7_64;
+  s13 = x12 * cospi_7_64  - x13 * cospi_25_64;
+  s14 = x14 * cospi_29_64 + x15 * cospi_3_64;
+  s15 = x14 * cospi_3_64  - x15 * cospi_29_64;
+
+  x0 = dct_const_round_shift(s0 + s8);
+  x1 = dct_const_round_shift(s1 + s9);
+  x2 = dct_const_round_shift(s2 + s10);
+  x3 = dct_const_round_shift(s3 + s11);
+  x4 = dct_const_round_shift(s4 + s12);
+  x5 = dct_const_round_shift(s5 + s13);
+  x6 = dct_const_round_shift(s6 + s14);
+  x7 = dct_const_round_shift(s7 + s15);
+  x8  = dct_const_round_shift(s0 - s8);
+  x9  = dct_const_round_shift(s1 - s9);
+  x10 = dct_const_round_shift(s2 - s10);
+  x11 = dct_const_round_shift(s3 - s11);
+  x12 = dct_const_round_shift(s4 - s12);
+  x13 = dct_const_round_shift(s5 - s13);
+  x14 = dct_const_round_shift(s6 - s14);
+  x15 = dct_const_round_shift(s7 - s15);
+
+  // stage 2
+  s0 = x0;
+  s1 = x1;
+  s2 = x2;
+  s3 = x3;
+  s4 = x4;
+  s5 = x5;
+  s6 = x6;
+  s7 = x7;
+  s8 =    x8 * cospi_4_64   + x9 * cospi_28_64;
+  s9 =    x8 * cospi_28_64  - x9 * cospi_4_64;
+  s10 =   x10 * cospi_20_64 + x11 * cospi_12_64;
+  s11 =   x10 * cospi_12_64 - x11 * cospi_20_64;
+  s12 = - x12 * cospi_28_64 + x13 * cospi_4_64;
+  s13 =   x12 * cospi_4_64  + x13 * cospi_28_64;
+  s14 = - x14 * cospi_12_64 + x15 * cospi_20_64;
+  s15 =   x14 * cospi_20_64 + x15 * cospi_12_64;
+
+  x0 = s0 + s4;
+  x1 = s1 + s5;
+  x2 = s2 + s6;
+  x3 = s3 + s7;
+  x4 = s0 - s4;
+  x5 = s1 - s5;
+  x6 = s2 - s6;
+  x7 = s3 - s7;
+  x8 = dct_const_round_shift(s8 + s12);
+  x9 = dct_const_round_shift(s9 + s13);
+  x10 = dct_const_round_shift(s10 + s14);
+  x11 = dct_const_round_shift(s11 + s15);
+  x12 = dct_const_round_shift(s8 - s12);
+  x13 = dct_const_round_shift(s9 - s13);
+  x14 = dct_const_round_shift(s10 - s14);
+  x15 = dct_const_round_shift(s11 - s15);
+
+  // stage 3
+  s0 = x0;
+  s1 = x1;
+  s2 = x2;
+  s3 = x3;
+  s4 = x4 * cospi_8_64  + x5 * cospi_24_64;
+  s5 = x4 * cospi_24_64 - x5 * cospi_8_64;
+  s6 = - x6 * cospi_24_64 + x7 * cospi_8_64;
+  s7 =   x6 * cospi_8_64  + x7 * cospi_24_64;
+  s8 = x8;
+  s9 = x9;
+  s10 = x10;
+  s11 = x11;
+  s12 = x12 * cospi_8_64  + x13 * cospi_24_64;
+  s13 = x12 * cospi_24_64 - x13 * cospi_8_64;
+  s14 = - x14 * cospi_24_64 + x15 * cospi_8_64;
+  s15 =   x14 * cospi_8_64  + x15 * cospi_24_64;
+
+  x0 = s0 + s2;
+  x1 = s1 + s3;
+  x2 = s0 - s2;
+  x3 = s1 - s3;
+  x4 = dct_const_round_shift(s4 + s6);
+  x5 = dct_const_round_shift(s5 + s7);
+  x6 = dct_const_round_shift(s4 - s6);
+  x7 = dct_const_round_shift(s5 - s7);
+  x8 = s8 + s10;
+  x9 = s9 + s11;
+  x10 = s8 - s10;
+  x11 = s9 - s11;
+  x12 = dct_const_round_shift(s12 + s14);
+  x13 = dct_const_round_shift(s13 + s15);
+  x14 = dct_const_round_shift(s12 - s14);
+  x15 = dct_const_round_shift(s13 - s15);
+
+  // stage 4
+  s2 = (- cospi_16_64) * (x2 + x3);
+  s3 = cospi_16_64 * (x2 - x3);
+  s6 = cospi_16_64 * (x6 + x7);
+  s7 = cospi_16_64 * (- x6 + x7);
+  s10 = cospi_16_64 * (x10 + x11);
+  s11 = cospi_16_64 * (- x10 + x11);
+  s14 = (- cospi_16_64) * (x14 + x15);
+  s15 = cospi_16_64 * (x14 - x15);
+
+  x2 = dct_const_round_shift(s2);
+  x3 = dct_const_round_shift(s3);
+  x6 = dct_const_round_shift(s6);
+  x7 = dct_const_round_shift(s7);
+  x10 = dct_const_round_shift(s10);
+  x11 = dct_const_round_shift(s11);
+  x14 = dct_const_round_shift(s14);
+  x15 = dct_const_round_shift(s15);
+
+  output[0] = x0;
+  output[1] = - x8;
+  output[2] = x12;
+  output[3] = - x4;
+  output[4] = x6;
+  output[5] = x14;
+  output[6] = x10;
+  output[7] = x2;
+  output[8] = x3;
+  output[9] =  x11;
+  output[10] = x15;
+  output[11] = x7;
+  output[12] = x5;
+  output[13] = - x13;
+  output[14] = x9;
+  output[15] = - x1;
+}
+
+void vp9_short_iht16x16_c(int16_t *input, int16_t *output,
+                        int pitch, TX_TYPE tx_type) {
+  int16_t out[16 * 16];
+  int16_t *outptr = &out[0];
+  const int short_pitch = pitch >> 1;
+  int i, j;
+  int16_t temp_in[16], temp_out[16];
+
+  void (*invr)(int16_t*, int16_t*);
+  void (*invc)(int16_t*, int16_t*);
+
+  switch (tx_type) {
+    case ADST_ADST:
+      invc = &iadst16_1d;
+      invr = &iadst16_1d;
+      break;
+    case ADST_DCT:
+      invc = &iadst16_1d;
+      invr = &idct16_1d;
+      break;
+    case DCT_ADST:
+      invc = &idct16_1d;
+      invr = &iadst16_1d;
+      break;
+    case DCT_DCT:
+      invc = &idct16_1d;
+      invr = &idct16_1d;
+      break;
+    default:
+      assert(0);
+  }
+
+  // inverse transform row vectors
+  for (i = 0; i < 16; ++i) {
+    invr(input, outptr);
+    input += short_pitch;
+    outptr += 16;
+  }
+
+  // inverse transform column vectors
+  for (i = 0; i < 16; ++i) {
+    for (j = 0; j < 16; ++j)
+      temp_in[j] = out[j * 16 + i];
+    invc(temp_in, temp_out);
+    for (j = 0; j < 16; ++j)
+      output[j * 16 + i] = (temp_out[j] + 32) >> 6;
+  }
+}
+#endif
+
+
+
 void vp9_short_idct10_16x16_c(int16_t *input, int16_t *output, int pitch) {
     int16_t out[16 * 16];
     int16_t *outptr = &out[0];
index 591cea8a2db79cc87faf8e6194e2cd923efebcd3..25b59cc5d69318c8fdd4d70d7cdcfbaa804c717e 100644 (file)
@@ -116,7 +116,11 @@ void vp9_inverse_transform_mby_16x16(MACROBLOCKD *xd) {
   BLOCKD *bd = &xd->block[0];
   TX_TYPE tx_type = get_tx_type_16x16(xd, bd);
   if (tx_type != DCT_DCT) {
+#if CONFIG_INTHT16X16
+    vp9_short_iht16x16(bd->dqcoeff, bd->diff, 32, tx_type);
+#else
     vp9_ihtllm(bd->dqcoeff, bd->diff, 32, tx_type, 16, bd->eob);
+#endif
   } else {
     vp9_inverse_transform_b_16x16(&xd->block[0].dqcoeff[0],
                                   &xd->block[0].diff[0], 32);
index a8fd9a90fa2e5fca1053d910a8748c0c691ad629..71e92546b1928c8b49c83b7c2a7c8c195dfacb06 100644 (file)
@@ -294,6 +294,11 @@ prototype void vp9_short_iht4x4 "int16_t *input, int16_t *output, int pitch, int
 specialize vp9_short_iht4x4
 #endif
 
+#if CONFIG_INTHT16X16
+prototype void vp9_short_iht16x16 "int16_t *input, int16_t *output, int pitch, int tx_type"
+specialize vp9_short_iht16x16
+#endif
+
 prototype void vp9_ihtllm "const int16_t *input, int16_t *output, int pitch, int tx_type, int tx_dim, int16_t eobs"
 specialize vp9_ihtllm
 
index fe1474ce95cc61b5a1acf984bbc032004b0634dc..376147547c0a58d5dc44cb13d25ead763ef45344 100644 (file)
@@ -267,7 +267,11 @@ void vp9_ht_dequant_idct_add_16x16_c(TX_TYPE tx_type, int16_t *input,
       input[i] = input[i] * dq[1];
 
     // inverse hybrid transform
+#if CONFIG_INTHT16X16
+    vp9_short_iht16x16(input, output, 32, tx_type);
+#else
     vp9_ihtllm(input, output, 32, tx_type, 16, eobs);
+#endif
 
     // the idct halves ( >> 1) the pitch
     // vp9_short_idct16x16_c(input, output, 32);