From: Dmitry Kovalev Date: Fri, 28 Mar 2014 23:46:41 +0000 (-0700) Subject: Moving encoder quantization parameters into separate struct. X-Git-Tag: v1.4.0~1931^2 X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=7bcfa31750865ec07c36ae2a1e66c3059983a1bb;p=libvpx Moving encoder quantization parameters into separate struct. Change-Id: I2a169535489aeda3943fb5a46ab53e7a12abaa36 --- diff --git a/vp9/encoder/vp9_onyx_if.c b/vp9/encoder/vp9_onyx_if.c index 78c7f8ead..8b0f3cf3a 100644 --- a/vp9/encoder/vp9_onyx_if.c +++ b/vp9/encoder/vp9_onyx_if.c @@ -2231,7 +2231,7 @@ static void encode_without_recode_loop(VP9_COMP *cpi, int q) { VP9_COMMON *const cm = &cpi->common; vp9_clear_system_state(); - vp9_set_quantizer(cpi, q); + vp9_set_quantizer(cm, q); // Set up entropy context depending on frame type. The decoder mandates // the use of the default context, index 0, for keyframes and inter @@ -2242,7 +2242,7 @@ static void encode_without_recode_loop(VP9_COMP *cpi, setup_key_frame(cpi); } else { if (!cm->intra_only && !cm->error_resilient_mode && !cpi->use_svc) - cpi->common.frame_context_idx = cpi->refresh_alt_ref_frame; + cm->frame_context_idx = cpi->refresh_alt_ref_frame; setup_inter_frame(cm); } @@ -2288,7 +2288,7 @@ static void encode_with_recode_loop(VP9_COMP *cpi, do { vp9_clear_system_state(); - vp9_set_quantizer(cpi, q); + vp9_set_quantizer(cm, q); if (loop_count == 0) { // Set up entropy context depending on frame type. The decoder mandates @@ -2840,7 +2840,7 @@ static void Pass1Encode(VP9_COMP *cpi, size_t *size, uint8_t *dest, (void) frame_flags; vp9_rc_get_first_pass_params(cpi); - vp9_set_quantizer(cpi, find_fp_qindex()); + vp9_set_quantizer(&cpi->common, find_fp_qindex()); vp9_first_pass(cpi); } diff --git a/vp9/encoder/vp9_onyx_int.h b/vp9/encoder/vp9_onyx_int.h index 6dbe4d474..1722b82e4 100644 --- a/vp9/encoder/vp9_onyx_int.h +++ b/vp9/encoder/vp9_onyx_int.h @@ -280,23 +280,7 @@ typedef struct { } VP9_CONFIG; typedef struct VP9_COMP { - DECLARE_ALIGNED(16, int16_t, y_quant[QINDEX_RANGE][8]); - DECLARE_ALIGNED(16, int16_t, y_quant_shift[QINDEX_RANGE][8]); - DECLARE_ALIGNED(16, int16_t, y_zbin[QINDEX_RANGE][8]); - DECLARE_ALIGNED(16, int16_t, y_round[QINDEX_RANGE][8]); - - DECLARE_ALIGNED(16, int16_t, uv_quant[QINDEX_RANGE][8]); - DECLARE_ALIGNED(16, int16_t, uv_quant_shift[QINDEX_RANGE][8]); - DECLARE_ALIGNED(16, int16_t, uv_zbin[QINDEX_RANGE][8]); - DECLARE_ALIGNED(16, int16_t, uv_round[QINDEX_RANGE][8]); - -#if CONFIG_ALPHA - DECLARE_ALIGNED(16, int16_t, a_quant[QINDEX_RANGE][8]); - DECLARE_ALIGNED(16, int16_t, a_quant_shift[QINDEX_RANGE][8]); - DECLARE_ALIGNED(16, int16_t, a_zbin[QINDEX_RANGE][8]); - DECLARE_ALIGNED(16, int16_t, a_round[QINDEX_RANGE][8]); -#endif - + QUANTS quants; MACROBLOCK mb; VP9_COMMON common; VP9_CONFIG oxcf; diff --git a/vp9/encoder/vp9_quantize.c b/vp9/encoder/vp9_quantize.c index 4ab8995e3..17993d31d 100644 --- a/vp9/encoder/vp9_quantize.c +++ b/vp9/encoder/vp9_quantize.c @@ -153,6 +153,7 @@ static void invert_quant(int16_t *quant, int16_t *shift, int d) { void vp9_init_quantizer(VP9_COMP *cpi) { VP9_COMMON *const cm = &cpi->common; + QUANTS *const quants = &cpi->quants; int i, q, quant; for (q = 0; q < QINDEX_RANGE; q++) { @@ -163,48 +164,49 @@ void vp9_init_quantizer(VP9_COMP *cpi) { // y quant = i == 0 ? vp9_dc_quant(q, cm->y_dc_delta_q) : vp9_ac_quant(q, 0); - invert_quant(&cpi->y_quant[q][i], &cpi->y_quant_shift[q][i], quant); - cpi->y_zbin[q][i] = ROUND_POWER_OF_TWO(qzbin_factor * quant, 7); - cpi->y_round[q][i] = (qrounding_factor * quant) >> 7; + invert_quant(&quants->y_quant[q][i], &quants->y_quant_shift[q][i], quant); + quants->y_zbin[q][i] = ROUND_POWER_OF_TWO(qzbin_factor * quant, 7); + quants->y_round[q][i] = (qrounding_factor * quant) >> 7; cm->y_dequant[q][i] = quant; // uv quant = i == 0 ? vp9_dc_quant(q, cm->uv_dc_delta_q) : vp9_ac_quant(q, cm->uv_ac_delta_q); - invert_quant(&cpi->uv_quant[q][i], &cpi->uv_quant_shift[q][i], quant); - cpi->uv_zbin[q][i] = ROUND_POWER_OF_TWO(qzbin_factor * quant, 7); - cpi->uv_round[q][i] = (qrounding_factor * quant) >> 7; + invert_quant(&quants->uv_quant[q][i], + &quants->uv_quant_shift[q][i], quant); + quants->uv_zbin[q][i] = ROUND_POWER_OF_TWO(qzbin_factor * quant, 7); + quants->uv_round[q][i] = (qrounding_factor * quant) >> 7; cm->uv_dequant[q][i] = quant; #if CONFIG_ALPHA // alpha quant = i == 0 ? vp9_dc_quant(q, cm->a_dc_delta_q) : vp9_ac_quant(q, cm->a_ac_delta_q); - invert_quant(&cpi->a_quant[q][i], &cpi->a_quant_shift[q][i], quant); - cpi->a_zbin[q][i] = ROUND_POWER_OF_TWO(qzbin_factor * quant, 7); - cpi->a_round[q][i] = (qrounding_factor * quant) >> 7; + invert_quant(&quants->a_quant[q][i], &quants->a_quant_shift[q][i], quant); + quants->a_zbin[q][i] = ROUND_POWER_OF_TWO(qzbin_factor * quant, 7); + quants->a_round[q][i] = (qrounding_factor * quant) >> 7; cm->a_dequant[q][i] = quant; #endif } for (i = 2; i < 8; i++) { - cpi->y_quant[q][i] = cpi->y_quant[q][1]; - cpi->y_quant_shift[q][i] = cpi->y_quant_shift[q][1]; - cpi->y_zbin[q][i] = cpi->y_zbin[q][1]; - cpi->y_round[q][i] = cpi->y_round[q][1]; + quants->y_quant[q][i] = quants->y_quant[q][1]; + quants->y_quant_shift[q][i] = quants->y_quant_shift[q][1]; + quants->y_zbin[q][i] = quants->y_zbin[q][1]; + quants->y_round[q][i] = quants->y_round[q][1]; cm->y_dequant[q][i] = cm->y_dequant[q][1]; - cpi->uv_quant[q][i] = cpi->uv_quant[q][1]; - cpi->uv_quant_shift[q][i] = cpi->uv_quant_shift[q][1]; - cpi->uv_zbin[q][i] = cpi->uv_zbin[q][1]; - cpi->uv_round[q][i] = cpi->uv_round[q][1]; + quants->uv_quant[q][i] = quants->uv_quant[q][1]; + quants->uv_quant_shift[q][i] = quants->uv_quant_shift[q][1]; + quants->uv_zbin[q][i] = quants->uv_zbin[q][1]; + quants->uv_round[q][i] = quants->uv_round[q][1]; cm->uv_dequant[q][i] = cm->uv_dequant[q][1]; #if CONFIG_ALPHA - cpi->a_quant[q][i] = cpi->a_quant[q][1]; - cpi->a_quant_shift[q][i] = cpi->a_quant_shift[q][1]; - cpi->a_zbin[q][i] = cpi->a_zbin[q][1]; - cpi->a_round[q][i] = cpi->a_round[q][1]; + quants->a_quant[q][i] = quants->a_quant[q][1]; + quants->a_quant_shift[q][i] = quants->a_quant_shift[q][1]; + quants->a_zbin[q][i] = quants->a_zbin[q][1]; + quants->a_round[q][i] = quants->a_round[q][1]; cm->a_dequant[q][i] = cm->a_dequant[q][1]; #endif } @@ -213,7 +215,8 @@ void vp9_init_quantizer(VP9_COMP *cpi) { void vp9_init_plane_quantizers(VP9_COMP *cpi, MACROBLOCK *x) { const VP9_COMMON *const cm = &cpi->common; - MACROBLOCKD *xd = &x->e_mbd; + MACROBLOCKD *const xd = &x->e_mbd; + QUANTS *const quants = &cpi->quants; const int segment_id = xd->mi_8x8[0]->mbmi.segment_id; const int qindex = vp9_get_qindex(&cm->seg, segment_id, cm->base_qindex); const int rdmult = vp9_compute_rd_mult(cpi, qindex + cm->y_dc_delta_q); @@ -221,19 +224,19 @@ void vp9_init_plane_quantizers(VP9_COMP *cpi, MACROBLOCK *x) { int i; // Y - x->plane[0].quant = cpi->y_quant[qindex]; - x->plane[0].quant_shift = cpi->y_quant_shift[qindex]; - x->plane[0].zbin = cpi->y_zbin[qindex]; - x->plane[0].round = cpi->y_round[qindex]; + x->plane[0].quant = quants->y_quant[qindex]; + x->plane[0].quant_shift = quants->y_quant_shift[qindex]; + x->plane[0].zbin = quants->y_zbin[qindex]; + x->plane[0].round = quants->y_round[qindex]; x->plane[0].zbin_extra = (int16_t)((cm->y_dequant[qindex][1] * zbin) >> 7); xd->plane[0].dequant = cm->y_dequant[qindex]; // UV for (i = 1; i < 3; i++) { - x->plane[i].quant = cpi->uv_quant[qindex]; - x->plane[i].quant_shift = cpi->uv_quant_shift[qindex]; - x->plane[i].zbin = cpi->uv_zbin[qindex]; - x->plane[i].round = cpi->uv_round[qindex]; + x->plane[i].quant = quants->uv_quant[qindex]; + x->plane[i].quant_shift = quants->uv_quant_shift[qindex]; + x->plane[i].zbin = quants->uv_zbin[qindex]; + x->plane[i].round = quants->uv_round[qindex]; x->plane[i].zbin_extra = (int16_t)((cm->uv_dequant[qindex][1] * zbin) >> 7); xd->plane[i].dequant = cm->uv_dequant[qindex]; } @@ -273,9 +276,7 @@ void vp9_frame_init_quantizer(VP9_COMP *cpi) { vp9_init_plane_quantizers(cpi, &cpi->mb); } -void vp9_set_quantizer(struct VP9_COMP *cpi, int q) { - VP9_COMMON *const cm = &cpi->common; - +void vp9_set_quantizer(VP9_COMMON *cm, int q) { // quantizer has to be reinitialized with vp9_init_quantizer() if any // delta_q changes. cm->base_qindex = q; diff --git a/vp9/encoder/vp9_quantize.h b/vp9/encoder/vp9_quantize.h index f356b125c..7d231dfd3 100644 --- a/vp9/encoder/vp9_quantize.h +++ b/vp9/encoder/vp9_quantize.h @@ -17,12 +17,30 @@ extern "C" { #endif +typedef struct { + DECLARE_ALIGNED(16, int16_t, y_quant[QINDEX_RANGE][8]); + DECLARE_ALIGNED(16, int16_t, y_quant_shift[QINDEX_RANGE][8]); + DECLARE_ALIGNED(16, int16_t, y_zbin[QINDEX_RANGE][8]); + DECLARE_ALIGNED(16, int16_t, y_round[QINDEX_RANGE][8]); + + DECLARE_ALIGNED(16, int16_t, uv_quant[QINDEX_RANGE][8]); + DECLARE_ALIGNED(16, int16_t, uv_quant_shift[QINDEX_RANGE][8]); + DECLARE_ALIGNED(16, int16_t, uv_zbin[QINDEX_RANGE][8]); + DECLARE_ALIGNED(16, int16_t, uv_round[QINDEX_RANGE][8]); + +#if CONFIG_ALPHA + DECLARE_ALIGNED(16, int16_t, a_quant[QINDEX_RANGE][8]); + DECLARE_ALIGNED(16, int16_t, a_quant_shift[QINDEX_RANGE][8]); + DECLARE_ALIGNED(16, int16_t, a_zbin[QINDEX_RANGE][8]); + DECLARE_ALIGNED(16, int16_t, a_round[QINDEX_RANGE][8]); +#endif +} QUANTS; + void vp9_regular_quantize_b_4x4(MACROBLOCK *x, int plane, int block, const int16_t *scan, const int16_t *iscan); struct VP9_COMP; - -void vp9_set_quantizer(struct VP9_COMP *cpi, int q); +struct VP9Common; void vp9_frame_init_quantizer(struct VP9_COMP *cpi); @@ -32,6 +50,8 @@ void vp9_init_plane_quantizers(struct VP9_COMP *cpi, MACROBLOCK *x); void vp9_init_quantizer(struct VP9_COMP *cpi); +void vp9_set_quantizer(struct VP9Common *cm, int q); + #ifdef __cplusplus } // extern "C" #endif