From babbd5d170d080ea69c602a7ab8b824a27aaa0a2 Mon Sep 17 00:00:00 2001 From: Jingning Han Date: Wed, 13 Feb 2013 09:03:21 -0800 Subject: [PATCH] Forward butterfly hybrid transform This patch includes 4x4, 8x8, and 16x16 forward butterfly ADST/DCT hybrid transform. The kernel of 4x4 ADST is sin((2k+1)*(n+1)/(2N+1)). The kernel of 8x8/16x16 ADST is of the form sin((2k+1)*(2n+1)/4N). Change-Id: I8f1ab3843ce32eb287ab766f92e0611e1c5cb4c1 --- vp9/common/vp9_rtcd_defs.sh | 15 ++ vp9/encoder/vp9_dct.c | 436 ++++++++++++++++++++++++++++++++++ vp9/encoder/vp9_encodeintra.c | 12 + vp9/encoder/vp9_encodemb.c | 16 ++ vp9/encoder/vp9_rdopt.c | 12 + vpxenc.c | 4 + 6 files changed, 495 insertions(+) diff --git a/vp9/common/vp9_rtcd_defs.sh b/vp9/common/vp9_rtcd_defs.sh index e4781aa05..5a7484d81 100644 --- a/vp9/common/vp9_rtcd_defs.sh +++ b/vp9/common/vp9_rtcd_defs.sh @@ -568,6 +568,21 @@ fi prototype void vp9_fht "const int16_t *input, int pitch, int16_t *output, int tx_type, int tx_dim" specialize vp9_fht +#if CONFIG_INTHT4X4 +prototype void vp9_short_fht4x4 "int16_t *InputData, int16_t *OutputData, int pitch, int tx_type" +specialize vp9_short_fht4x4 +#endif + +#if CONFIG_INTHT +prototype void vp9_short_fht8x8 "int16_t *InputData, int16_t *OutputData, int pitch, int tx_type" +specialize vp9_short_fht8x8 +#endif + +#if CONFIG_INTHT16X16 +prototype void vp9_short_fht16x16 "int16_t *InputData, int16_t *OutputData, int pitch, int tx_type" +specialize vp9_short_fht16x16 +#endif + prototype void vp9_short_fdct8x8 "int16_t *InputData, int16_t *OutputData, int pitch" specialize vp9_short_fdct8x8 diff --git a/vp9/encoder/vp9_dct.c b/vp9/encoder/vp9_dct.c index b9648638d..4a1e78e93 100644 --- a/vp9/encoder/vp9_dct.c +++ b/vp9/encoder/vp9_dct.c @@ -816,6 +816,101 @@ void vp9_short_fdct4x4_c(short *input, short *output, int pitch) { } } +#if CONFIG_INTHT4X4 +static void fadst4_1d(int16_t *input, int16_t *output) { + int x0, x1, x2, x3; + int s0, s1, s2, s3, s4, s5, s6, s7; + + x0 = input[0]; + x1 = input[1]; + x2 = input[2]; + x3 = input[3]; + + if (!(x0 | x1 | x2 | x3)) { + output[0] = output[1] = output[2] = output[3] = 0; + return; + } + + s0 = sinpi_1_9 * x0; + s1 = sinpi_4_9 * x0; + s2 = sinpi_2_9 * x1; + s3 = sinpi_1_9 * x1; + s4 = sinpi_3_9 * x2; + s5 = sinpi_4_9 * x3; + s6 = sinpi_2_9 * x3; + s7 = x0 + x1 - x3; + + x0 = s0 + s2 + s5; + x1 = sinpi_3_9 * s7; + x2 = s1 - s3 + s6; + x3 = s4; + + s0 = x0 + x3; + s1 = x1; + s2 = x2 - x3; + s3 = x2 - x0 + x3; + + // 1-D transform scaling factor is sqrt(2). + output[0] = dct_const_round_shift(s0); + output[1] = dct_const_round_shift(s1); + output[2] = dct_const_round_shift(s2); + output[3] = dct_const_round_shift(s3); +} + +void vp9_short_fht4x4_c(int16_t *input, int16_t *output, + int pitch, TX_TYPE tx_type) { + int16_t out[4 * 4]; + int16_t *outptr = &out[0]; + const int short_pitch = pitch >> 1; + int i, j; + int16_t temp_in[4], temp_out[4]; + + void (*fwdr)(int16_t*, int16_t*); + void (*fwdc)(int16_t*, int16_t*); + + switch (tx_type) { + case ADST_ADST: + fwdc = &fadst4_1d; + fwdr = &fadst4_1d; + break; + case ADST_DCT: + fwdc = &fadst4_1d; + fwdr = &fdct4_1d; + break; + case DCT_ADST: + fwdc = &fdct4_1d; + fwdr = &fadst4_1d; + break; + case DCT_DCT: + fwdc = &fdct4_1d; + fwdr = &fdct4_1d; + break; + default: + assert(0); + } + + + // column transform + for (i = 0; i < 4; ++i) { + for (j = 0; j < 4; ++j) + temp_in[j] = input[j * short_pitch + i] << 4; + if (i == 0 && temp_in[0]) + temp_in[0] += 1; + fwdc(temp_in, temp_out); + for (j = 0; j < 4; ++j) + outptr[j * 4 + i] = temp_out[j]; + } + + // row transform + for (i = 0; i < 4; ++i) { + for (j = 0; j < 4; ++j) + temp_in[j] = out[j + i * 4]; + fwdr(temp_in, temp_out); + for (j = 0; j < 4; ++j) + output[j + i * 4] = (temp_out[j] + 1) >> 2; + } +} +#endif void vp9_short_fdct8x4_c(short *input, short *output, int pitch) { @@ -896,6 +991,131 @@ void vp9_short_fdct8x8_c(int16_t *input, int16_t *output, int pitch) { } #endif +#if CONFIG_INTHT +static void fadst8_1d(int16_t *input, int16_t *output) { + int x0, x1, x2, x3, x4, x5, x6, x7; + int s0, s1, s2, s3, s4, s5, s6, s7; + + x0 = input[7]; + x1 = input[0]; + x2 = input[5]; + x3 = input[2]; + x4 = input[3]; + x5 = input[4]; + x6 = input[1]; + x7 = input[6]; + + // stage 1 + s0 = cospi_2_64 * x0 + cospi_30_64 * x1; + s1 = cospi_30_64 * x0 - cospi_2_64 * x1; + s2 = cospi_10_64 * x2 + cospi_22_64 * x3; + s3 = cospi_22_64 * x2 - cospi_10_64 * x3; + s4 = cospi_18_64 * x4 + cospi_14_64 * x5; + s5 = cospi_14_64 * x4 - cospi_18_64 * x5; + s6 = cospi_26_64 * x6 + cospi_6_64 * x7; + s7 = cospi_6_64 * x6 - cospi_26_64 * x7; + + x0 = dct_const_round_shift(s0 + s4); + x1 = dct_const_round_shift(s1 + s5); + x2 = dct_const_round_shift(s2 + s6); + x3 = dct_const_round_shift(s3 + s7); + x4 = dct_const_round_shift(s0 - s4); + x5 = dct_const_round_shift(s1 - s5); + x6 = dct_const_round_shift(s2 - s6); + x7 = dct_const_round_shift(s3 - s7); + + // stage 2 + s0 = x0; + s1 = x1; + s2 = x2; + s3 = x3; + s4 = cospi_8_64 * x4 + cospi_24_64 * x5; + s5 = cospi_24_64 * x4 - cospi_8_64 * x5; + s6 = - cospi_24_64 * x6 + cospi_8_64 * x7; + s7 = cospi_8_64 * x6 + cospi_24_64 * x7; + + x0 = s0 + s2; + x1 = s1 + s3; + x2 = s0 - s2; + x3 = s1 - s3; + x4 = dct_const_round_shift(s4 + s6); + x5 = dct_const_round_shift(s5 + s7); + x6 = dct_const_round_shift(s4 - s6); + x7 = dct_const_round_shift(s5 - s7); + + // stage 3 + s2 = cospi_16_64 * (x2 + x3); + s3 = cospi_16_64 * (x2 - x3); + s6 = cospi_16_64 * (x6 + x7); + s7 = cospi_16_64 * (x6 - x7); + + x2 = dct_const_round_shift(s2); + x3 = dct_const_round_shift(s3); + x6 = dct_const_round_shift(s6); + x7 = dct_const_round_shift(s7); + + output[0] = x0; + output[1] = - x4; + output[2] = x6; + output[3] = - x2; + output[4] = x3; + output[5] = - x7; + output[6] = x5; + output[7] = - x1; +} + +void vp9_short_fht8x8_c(int16_t *input, int16_t *output, + int pitch, TX_TYPE tx_type) { + int16_t out[64]; + int16_t *outptr = &out[0]; + const int short_pitch = pitch >> 1; + int i, j; + int16_t temp_in[8], temp_out[8]; + + void (*fwdr)(int16_t*, int16_t*); + void (*fwdc)(int16_t*, int16_t*); + + switch (tx_type) { + case ADST_ADST: + fwdc = &fadst8_1d; + fwdr = &fadst8_1d; + break; + case ADST_DCT: + fwdc = &fadst8_1d; + fwdr = &fdct8_1d; + break; + case DCT_ADST: + fwdc = &fdct8_1d; + fwdr = &fadst8_1d; + break; + case DCT_DCT: + fwdc = &fdct8_1d; + fwdr = &fdct8_1d; + break; + default: + assert(0); + } + + // column transform + for (i = 0; i < 8; ++i) { + for (j = 0; j < 8; ++j) + temp_in[j] = input[j * short_pitch + i] << 2; + fwdc(temp_in, temp_out); + for (j = 0; j < 8; ++j) + outptr[j * 8 + i] = temp_out[j]; + } + + // row transform + for (i = 0; i < 8; ++i) { + for (j = 0; j < 8; ++j) + temp_in[j] = out[j + i * 8]; + fwdr(temp_in, temp_out); + for (j = 0; j < 8; ++j) + output[j + i * 8] = temp_out[j] >> 1; + } +} +#endif + void vp9_short_walsh4x4_x8_c(short *input, short *output, int pitch) { int i; int a1, b1, c1, d1; @@ -1507,6 +1727,222 @@ void vp9_short_fdct16x16_c(int16_t *input, int16_t *out, int pitch) { #endif #endif +#if CONFIG_INTHT16X16 +void fadst16_1d(int16_t *input, int16_t *output) { + int x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15; + int s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15; + + x0 = input[15]; + x1 = input[0]; + x2 = input[13]; + x3 = input[2]; + x4 = input[11]; + x5 = input[4]; + x6 = input[9]; + x7 = input[6]; + x8 = input[7]; + x9 = input[8]; + x10 = input[5]; + x11 = input[10]; + x12 = input[3]; + x13 = input[12]; + x14 = input[1]; + x15 = input[14]; + + // stage 1 + s0 = x0 * cospi_1_64 + x1 * cospi_31_64; + s1 = x0 * cospi_31_64 - x1 * cospi_1_64; + s2 = x2 * cospi_5_64 + x3 * cospi_27_64; + s3 = x2 * cospi_27_64 - x3 * cospi_5_64; + s4 = x4 * cospi_9_64 + x5 * cospi_23_64; + s5 = x4 * cospi_23_64 - x5 * cospi_9_64; + s6 = x6 * cospi_13_64 + x7 * cospi_19_64; + s7 = x6 * cospi_19_64 - x7 * cospi_13_64; + s8 = x8 * cospi_17_64 + x9 * cospi_15_64; + s9 = x8 * cospi_15_64 - x9 * cospi_17_64; + s10 = x10 * cospi_21_64 + x11 * cospi_11_64; + s11 = x10 * cospi_11_64 - x11 * cospi_21_64; + s12 = x12 * cospi_25_64 + x13 * cospi_7_64; + s13 = x12 * cospi_7_64 - x13 * cospi_25_64; + s14 = x14 * cospi_29_64 + x15 * cospi_3_64; + s15 = x14 * cospi_3_64 - x15 * cospi_29_64; + + x0 = dct_const_round_shift(s0 + s8); + x1 = dct_const_round_shift(s1 + s9); + x2 = dct_const_round_shift(s2 + s10); + x3 = dct_const_round_shift(s3 + s11); + x4 = dct_const_round_shift(s4 + s12); + x5 = dct_const_round_shift(s5 + s13); + x6 = dct_const_round_shift(s6 + s14); + x7 = dct_const_round_shift(s7 + s15); + x8 = dct_const_round_shift(s0 - s8); + x9 = dct_const_round_shift(s1 - s9); + x10 = dct_const_round_shift(s2 - s10); + x11 = dct_const_round_shift(s3 - s11); + x12 = dct_const_round_shift(s4 - s12); + x13 = dct_const_round_shift(s5 - s13); + x14 = dct_const_round_shift(s6 - s14); + x15 = dct_const_round_shift(s7 - s15); + + // stage 2 + s0 = x0; + s1 = x1; + s2 = x2; + s3 = x3; + s4 = x4; + s5 = x5; + s6 = x6; + s7 = x7; + s8 = x8 * cospi_4_64 + x9 * cospi_28_64; + s9 = x8 * cospi_28_64 - x9 * cospi_4_64; + s10 = x10 * cospi_20_64 + x11 * cospi_12_64; + s11 = x10 * cospi_12_64 - x11 * cospi_20_64; + s12 = - x12 * cospi_28_64 + x13 * cospi_4_64; + s13 = x12 * cospi_4_64 + x13 * cospi_28_64; + s14 = - x14 * cospi_12_64 + x15 * cospi_20_64; + s15 = x14 * cospi_20_64 + x15 * cospi_12_64; + + x0 = s0 + s4; + x1 = s1 + s5; + x2 = s2 + s6; + x3 = s3 + s7; + x4 = s0 - s4; + x5 = s1 - s5; + x6 = s2 - s6; + x7 = s3 - s7; + x8 = dct_const_round_shift(s8 + s12); + x9 = dct_const_round_shift(s9 + s13); + x10 = dct_const_round_shift(s10 + s14); + x11 = dct_const_round_shift(s11 + s15); + x12 = dct_const_round_shift(s8 - s12); + x13 = dct_const_round_shift(s9 - s13); + x14 = dct_const_round_shift(s10 - s14); + x15 = dct_const_round_shift(s11 - s15); + + // stage 3 + s0 = x0; + s1 = x1; + s2 = x2; + s3 = x3; + s4 = x4 * cospi_8_64 + x5 * cospi_24_64; + s5 = x4 * cospi_24_64 - x5 * cospi_8_64; + s6 = - x6 * cospi_24_64 + x7 * cospi_8_64; + s7 = x6 * cospi_8_64 + x7 * cospi_24_64; + s8 = x8; + s9 = x9; + s10 = x10; + s11 = x11; + s12 = x12 * cospi_8_64 + x13 * cospi_24_64; + s13 = x12 * cospi_24_64 - x13 * cospi_8_64; + s14 = - x14 * cospi_24_64 + x15 * cospi_8_64; + s15 = x14 * cospi_8_64 + x15 * cospi_24_64; + + x0 = s0 + s2; + x1 = s1 + s3; + x2 = s0 - s2; + x3 = s1 - s3; + x4 = dct_const_round_shift(s4 + s6); + x5 = dct_const_round_shift(s5 + s7); + x6 = dct_const_round_shift(s4 - s6); + x7 = dct_const_round_shift(s5 - s7); + x8 = s8 + s10; + x9 = s9 + s11; + x10 = s8 - s10; + x11 = s9 - s11; + x12 = dct_const_round_shift(s12 + s14); + x13 = dct_const_round_shift(s13 + s15); + x14 = dct_const_round_shift(s12 - s14); + x15 = dct_const_round_shift(s13 - s15); + + // stage 4 + s2 = (- cospi_16_64) * (x2 + x3); + s3 = cospi_16_64 * (x2 - x3); + s6 = cospi_16_64 * (x6 + x7); + s7 = cospi_16_64 * (- x6 + x7); + s10 = cospi_16_64 * (x10 + x11); + s11 = cospi_16_64 * (- x10 + x11); + s14 = (- cospi_16_64) * (x14 + x15); + s15 = cospi_16_64 * (x14 - x15); + + x2 = dct_const_round_shift(s2); + x3 = dct_const_round_shift(s3); + x6 = dct_const_round_shift(s6); + x7 = dct_const_round_shift(s7); + x10 = dct_const_round_shift(s10); + x11 = dct_const_round_shift(s11); + x14 = dct_const_round_shift(s14); + x15 = dct_const_round_shift(s15); + + output[0] = x0; + output[1] = - x8; + output[2] = x12; + output[3] = - x4; + output[4] = x6; + output[5] = x14; + output[6] = x10; + output[7] = x2; + output[8] = x3; + output[9] = x11; + output[10] = x15; + output[11] = x7; + output[12] = x5; + output[13] = - x13; + output[14] = x9; + output[15] = - x1; +} + +void vp9_short_fht16x16_c(int16_t *input, int16_t *output, + int pitch, TX_TYPE tx_type) { + int16_t out[256]; + int16_t *outptr = &out[0]; + const int short_pitch = pitch >> 1; + int i, j; + int16_t temp_in[16], temp_out[16]; + + void (*fwdr)(int16_t*, int16_t*); + void (*fwdc)(int16_t*, int16_t*); + + switch (tx_type) { + case ADST_ADST: + fwdc = &fadst16_1d; + fwdr = &fadst16_1d; + break; + case ADST_DCT: + fwdc = &fadst16_1d; + fwdr = &fdct16_1d; + break; + case DCT_ADST: + fwdc = &fdct16_1d; + fwdr = &fadst16_1d; + break; + case DCT_DCT: + fwdc = &fdct16_1d; + fwdr = &fdct16_1d; + break; + default: + assert(0); + } + + // column transform + for (i = 0; i < 16; ++i) { + for (j = 0; j < 16; ++j) + temp_in[j] = input[j * short_pitch + i]; + fwdc(temp_in, temp_out); + for (j = 0; j < 16; ++j) + outptr[j * 16 + i] = temp_out[j]; + } + + // row transform + for (i = 0; i < 16; ++i) { + for (j = 0; j < 16; ++j) + temp_in[j] = out[j + i * 16]; + fwdr(temp_in, temp_out); + for (j = 0; j < 16; ++j) + output[j + i * 16] = temp_out[j]; + } +} +#endif + #define TEST_INT_32x32_DCT 1 #if !TEST_INT_32x32_DCT diff --git a/vp9/encoder/vp9_encodeintra.c b/vp9/encoder/vp9_encodeintra.c index a52763080..e40127982 100644 --- a/vp9/encoder/vp9_encodeintra.c +++ b/vp9/encoder/vp9_encodeintra.c @@ -54,7 +54,11 @@ void vp9_encode_intra4x4block(MACROBLOCK *x, int ib) { tx_type = get_tx_type_4x4(&x->e_mbd, b); if (tx_type != DCT_DCT) { +#if CONFIG_INTHT4X4 + vp9_short_fht4x4(be->src_diff, be->coeff, 32, tx_type); +#else vp9_fht(be->src_diff, 32, be->coeff, tx_type, 4); +#endif vp9_ht_quantize_b_4x4(be, b, tx_type); #if CONFIG_INTHT4X4 vp9_short_iht4x4(b->dqcoeff, b->diff, 32, tx_type); @@ -153,8 +157,12 @@ void vp9_encode_intra8x8(MACROBLOCK *x, int ib) { tx_type = get_tx_type_8x8(xd, &xd->block[ib]); if (tx_type != DCT_DCT) { +#if CONFIG_INTHT + vp9_short_fht8x8(be->src_diff, (x->block + idx)->coeff, 32, tx_type); +#else vp9_fht(be->src_diff, 32, (x->block + idx)->coeff, tx_type, 8); +#endif x->quantize_b_8x8(x->block + idx, xd->block + idx); #if CONFIG_INTHT @@ -175,7 +183,11 @@ void vp9_encode_intra8x8(MACROBLOCK *x, int ib) { be = &x->block[ib + iblock[i]]; tx_type = get_tx_type_4x4(xd, b); if (tx_type != DCT_DCT) { +#if CONFIG_INTHT4X4 + vp9_short_fht4x4(be->src_diff, be->coeff, 32, tx_type); +#else vp9_fht_c(be->src_diff, 32, be->coeff, tx_type, 4); +#endif vp9_ht_quantize_b_4x4(be, b, tx_type); #if CONFIG_INTHT4X4 vp9_short_iht4x4(b->dqcoeff, b->diff, 32, tx_type); diff --git a/vp9/encoder/vp9_encodemb.c b/vp9/encoder/vp9_encodemb.c index efe95c911..a4a0de6ed 100644 --- a/vp9/encoder/vp9_encodemb.c +++ b/vp9/encoder/vp9_encodemb.c @@ -174,7 +174,11 @@ void vp9_transform_mby_4x4(MACROBLOCK *x) { BLOCK *b = &x->block[i]; TX_TYPE tx_type = get_tx_type_4x4(xd, &xd->block[i]); if (tx_type != DCT_DCT) { +#if CONFIG_INTHT4X4 + vp9_short_fht4x4(b->src_diff, b->coeff, 32, tx_type); +#else vp9_fht_c(b->src_diff, 32, b->coeff, tx_type, 4); +#endif } else if (!(i & 1) && get_tx_type_4x4(xd, &xd->block[i + 1]) == DCT_DCT) { x->fwd_txm8x4(&x->block[i].src_diff[0], &x->block[i].coeff[0], 32); @@ -209,7 +213,11 @@ void vp9_transform_mby_8x8(MACROBLOCK *x) { BLOCK *b = &x->block[i]; tx_type = get_tx_type_8x8(xd, &xd->block[i]); if (tx_type != DCT_DCT) { +#if CONFIG_INTHT + vp9_short_fht8x8(b->src_diff, b->coeff, 32, tx_type); +#else vp9_fht_c(b->src_diff, 32, b->coeff, tx_type, 8); +#endif } else { x->fwd_txm8x8(&x->block[i].src_diff[0], &x->block[i].coeff[0], 32); @@ -219,7 +227,11 @@ void vp9_transform_mby_8x8(MACROBLOCK *x) { BLOCK *b = &x->block[i]; tx_type = get_tx_type_8x8(xd, &xd->block[i]); if (tx_type != DCT_DCT) { +#if CONFIG_INTHT + vp9_short_fht8x8(b->src_diff, (b + 2)->coeff, 32, tx_type); +#else vp9_fht_c(b->src_diff, 32, (b + 2)->coeff, tx_type, 8); +#endif } else { x->fwd_txm8x8(&x->block[i].src_diff[0], &x->block[i + 2].coeff[0], 32); @@ -247,7 +259,11 @@ void vp9_transform_mby_16x16(MACROBLOCK *x) { TX_TYPE tx_type = get_tx_type_16x16(xd, &xd->block[0]); vp9_clear_system_state(); if (tx_type != DCT_DCT) { +#if CONFIG_INTHT16X16 + vp9_short_fht16x16(b->src_diff, b->coeff, 32, tx_type); +#else vp9_fht_c(b->src_diff, 32, b->coeff, tx_type, 16); +#endif } else { x->fwd_txm16x16(&x->block[0].src_diff[0], &x->block[0].coeff[0], 32); diff --git a/vp9/encoder/vp9_rdopt.c b/vp9/encoder/vp9_rdopt.c index 14b9a13db..824395e15 100644 --- a/vp9/encoder/vp9_rdopt.c +++ b/vp9/encoder/vp9_rdopt.c @@ -1046,7 +1046,11 @@ static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, BLOCK *be, b->bmi.as_mode.first = mode; tx_type = get_tx_type_4x4(xd, b); if (tx_type != DCT_DCT) { +#if CONFIG_INTHT4X4 + vp9_short_fht4x4(be->src_diff, be->coeff, 32, tx_type); +#else vp9_fht(be->src_diff, 32, be->coeff, tx_type, 4); +#endif vp9_ht_quantize_b_4x4(be, b, tx_type); } else { x->fwd_txm4x4(be->src_diff, be->coeff, 32); @@ -1346,7 +1350,11 @@ static int64_t rd_pick_intra8x8block(VP9_COMP *cpi, MACROBLOCK *x, int ib, if (xd->mode_info_context->mbmi.txfm_size == TX_8X8) { TX_TYPE tx_type = get_tx_type_8x8(xd, b); if (tx_type != DCT_DCT) +#if CONFIG_INTHT + vp9_short_fht8x8(be->src_diff, (x->block + idx)->coeff, 32, tx_type); +#else vp9_fht(be->src_diff, 32, (x->block + idx)->coeff, tx_type, 8); +#endif else x->fwd_txm8x8(be->src_diff, (x->block + idx)->coeff, 32); x->quantize_b_8x8(x->block + idx, xd->block + idx); @@ -1379,7 +1387,11 @@ static int64_t rd_pick_intra8x8block(VP9_COMP *cpi, MACROBLOCK *x, int ib, be = &x->block[ib + iblock[i]]; tx_type = get_tx_type_4x4(xd, b); if (tx_type != DCT_DCT) { +#if CONFIG_INTHT4X4 + vp9_short_fht4x4(be->src_diff, be->coeff, 32, tx_type); +#else vp9_fht_c(be->src_diff, 32, be->coeff, tx_type, 4); +#endif vp9_ht_quantize_b_4x4(be, b, tx_type); } else if (!(i & 1) && get_tx_type_4x4(xd, b + 1) == DCT_DCT) { x->fwd_txm8x4(be->src_diff, be->coeff, 32); diff --git a/vpxenc.c b/vpxenc.c index 0973a65b3..2f3b6356d 100644 --- a/vpxenc.c +++ b/vpxenc.c @@ -2297,6 +2297,8 @@ static void get_cx_data(struct stream_state *stream, if (!(pkt->data.frame.flags & VPX_FRAME_IS_FRAGMENT)) { stream->frames_out++; } + if (!global->quiet) + fprintf(stderr, " %6luF", (unsigned long)pkt->data.frame.sz); update_rate_histogram(&stream->rate_hist, cfg, pkt); if (stream->config.write_webm) { @@ -2359,6 +2361,8 @@ static void get_cx_data(struct stream_state *stream, stream->psnr_sse_total += pkt->data.psnr.sse[0]; stream->psnr_samples_total += pkt->data.psnr.samples[0]; for (i = 0; i < 4; i++) { + if (!global->quiet) + fprintf(stderr, "%.3f ", pkt->data.psnr.psnr[i]); stream->psnr_totals[i] += pkt->data.psnr.psnr[i]; } stream->psnr_count++; -- 2.40.0