From 368fbc955d3d5fa5eb46181d2ca1a7de9e5192ca Mon Sep 17 00:00:00 2001 From: Urvang Joshi Date: Mon, 17 Oct 2016 16:31:34 -0700 Subject: [PATCH] Fix warnings reported by -Wshadow: Part2b: more from av1 directory From code only part of nextgenv2 (and not aomedia) Change-Id: I21f7478a59d525dff23747efe5238ded16b743d2 --- av1/av1_cx_iface.c | 2 +- av1/common/entropymode.c | 2 - av1/common/reconintra.c | 34 +++++++---------- av1/decoder/decodeframe.c | 8 ++-- av1/encoder/bitstream.c | 9 ++--- av1/encoder/encodeframe.c | 18 ++++----- av1/encoder/mbgraph.c | 1 - av1/encoder/pickrst.c | 2 - av1/encoder/quantize.c | 4 +- av1/encoder/rdopt.c | 74 +++++++++++++++++-------------------- av1/encoder/tokenize.c | 3 +- av1/encoder/variance_tree.h | 12 +++--- 12 files changed, 73 insertions(+), 96 deletions(-) diff --git a/av1/av1_cx_iface.c b/av1/av1_cx_iface.c index d815ca75d..fae7d042a 100644 --- a/av1/av1_cx_iface.c +++ b/av1/av1_cx_iface.c @@ -871,7 +871,6 @@ static int write_superframe_index(aom_codec_alg_priv_t *ctx) { index_sz = 2 + (mag + 1) * (ctx->pending_frame_count - 1); if (ctx->pending_cx_data_sz + index_sz < ctx->cx_data_sz) { uint8_t *x = ctx->pending_cx_data + ctx->pending_cx_data_sz; - int i, j; #ifdef TEST_SUPPLEMENTAL_SUPERFRAME_DATA uint8_t marker_test = 0xc0; int mag_test = 2; // 1 - 4 @@ -890,6 +889,7 @@ static int write_superframe_index(aom_codec_alg_priv_t *ctx) { *x++ = marker; for (i = 0; i < ctx->pending_frame_count - 1; i++) { unsigned int this_sz; + int j; assert(ctx->pending_frame_sizes[i] > 0); this_sz = (unsigned int)ctx->pending_frame_sizes[i] - 1; diff --git a/av1/common/entropymode.c b/av1/common/entropymode.c index 157f00f3c..01bcde97d 100644 --- a/av1/common/entropymode.c +++ b/av1/common/entropymode.c @@ -1483,7 +1483,6 @@ void av1_adapt_inter_frame_probs(AV1_COMMON *cm) { #if CONFIG_SUPERTX for (i = 0; i < PARTITION_SUPERTX_CONTEXTS; ++i) { - int j; for (j = 1; j < TX_SIZES; ++j) { fc->supertx_prob[i][j] = av1_mode_mv_merge_probs( pre_fc->supertx_prob[i][j], counts->supertx[i][j]); @@ -1577,7 +1576,6 @@ void av1_adapt_intra_frame_probs(AV1_COMMON *cm) { } for (s = 1; s < EXT_TX_SETS_INTRA; ++s) { if (use_intra_ext_tx_for_txsize[s][i]) { - int j; for (j = 0; j < INTRA_MODES; ++j) aom_tree_merge_probs( av1_ext_tx_intra_tree[s], pre_fc->intra_ext_tx_prob[s][i][j], diff --git a/av1/common/reconintra.c b/av1/common/reconintra.c index d6bd87de8..22479b6ec 100644 --- a/av1/common/reconintra.c +++ b/av1/common/reconintra.c @@ -702,7 +702,7 @@ static void filter_intra_predictors_4tap(uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left, int mode) { int k, r, c; - int pred[33][65]; + int preds[33][65]; int mean, ipred; const TX_SIZE tx_size = (bs == 32) ? TX_32X32 @@ -721,20 +721,20 @@ static void filter_intra_predictors_4tap(uint8_t *dst, ptrdiff_t stride, int bs, } mean = (mean + bs) / (2 * bs); - for (r = 0; r < bs; ++r) pred[r + 1][0] = (int)left[r] - mean; + for (r = 0; r < bs; ++r) preds[r + 1][0] = (int)left[r] - mean; - for (c = 0; c < 2 * bs + 1; ++c) pred[0][c] = (int)above[c - 1] - mean; + for (c = 0; c < 2 * bs + 1; ++c) preds[0][c] = (int)above[c - 1] - mean; for (r = 1; r < bs + 1; ++r) for (c = 1; c < 2 * bs + 1 - r; ++c) { - ipred = c0 * pred[r - 1][c] + c1 * pred[r][c - 1] + - c2 * pred[r - 1][c - 1] + c3 * pred[r - 1][c + 1]; - pred[r][c] = ROUND_POWER_OF_TWO_SIGNED(ipred, FILTER_INTRA_PREC_BITS); + ipred = c0 * preds[r - 1][c] + c1 * preds[r][c - 1] + + c2 * preds[r - 1][c - 1] + c3 * preds[r - 1][c + 1]; + preds[r][c] = ROUND_POWER_OF_TWO_SIGNED(ipred, FILTER_INTRA_PREC_BITS); } for (r = 0; r < bs; ++r) { for (c = 0; c < bs; ++c) { - ipred = pred[r + 1][c + 1] + mean; + ipred = preds[r + 1][c + 1] + mean; dst[c] = clip_pixel(ipred); } dst += stride; @@ -997,7 +997,7 @@ static void highbd_filter_intra_predictors_4tap(uint16_t *dst, ptrdiff_t stride, const uint16_t *left, int mode, int bd) { int k, r, c; - int pred[33][65]; + int preds[33][65]; int mean, ipred; const TX_SIZE tx_size = (bs == 32) ? TX_32X32 @@ -1016,20 +1016,20 @@ static void highbd_filter_intra_predictors_4tap(uint16_t *dst, ptrdiff_t stride, } mean = (mean + bs) / (2 * bs); - for (r = 0; r < bs; ++r) pred[r + 1][0] = (int)left[r] - mean; + for (r = 0; r < bs; ++r) preds[r + 1][0] = (int)left[r] - mean; - for (c = 0; c < 2 * bs + 1; ++c) pred[0][c] = (int)above[c - 1] - mean; + for (c = 0; c < 2 * bs + 1; ++c) preds[0][c] = (int)above[c - 1] - mean; for (r = 1; r < bs + 1; ++r) for (c = 1; c < 2 * bs + 1 - r; ++c) { - ipred = c0 * pred[r - 1][c] + c1 * pred[r][c - 1] + - c2 * pred[r - 1][c - 1] + c3 * pred[r - 1][c + 1]; - pred[r][c] = ROUND_POWER_OF_TWO_SIGNED(ipred, FILTER_INTRA_PREC_BITS); + ipred = c0 * preds[r - 1][c] + c1 * preds[r][c - 1] + + c2 * preds[r - 1][c - 1] + c3 * preds[r - 1][c + 1]; + preds[r][c] = ROUND_POWER_OF_TWO_SIGNED(ipred, FILTER_INTRA_PREC_BITS); } for (r = 0; r < bs; ++r) { for (c = 0; c < bs; ++c) { - ipred = pred[r + 1][c + 1] + mean; + ipred = preds[r + 1][c + 1] + mean; dst[c] = clip_pixel_highbd(ipred, bd); } dst += stride; @@ -1188,8 +1188,6 @@ static void build_intra_predictors_high( } if (ext_intra_mode_info->use_ext_intra_mode[plane != 0]) { - EXT_INTRA_MODE ext_intra_mode = - ext_intra_mode_info->ext_intra_mode[plane != 0]; need_left = ext_intra_extend_modes[ext_intra_mode] & NEED_LEFT; need_above = ext_intra_extend_modes[ext_intra_mode] & NEED_ABOVE; } @@ -1202,7 +1200,6 @@ static void build_intra_predictors_high( assert(n_bottomleft_px >= 0); if ((!need_above && n_left_px == 0) || (!need_left && n_top_px == 0)) { - int i; const int val = (n_left_px == 0) ? base + 1 : base - 1; for (i = 0; i < bs; ++i) { aom_memset16(dst, val, bs); @@ -1351,8 +1348,6 @@ static void build_intra_predictors(const MACROBLOCKD *xd, const uint8_t *ref, } if (ext_intra_mode_info->use_ext_intra_mode[plane != 0]) { - EXT_INTRA_MODE ext_intra_mode = - ext_intra_mode_info->ext_intra_mode[plane != 0]; need_left = ext_intra_extend_modes[ext_intra_mode] & NEED_LEFT; need_above = ext_intra_extend_modes[ext_intra_mode] & NEED_ABOVE; } @@ -1373,7 +1368,6 @@ static void build_intra_predictors(const MACROBLOCKD *xd, const uint8_t *ref, assert(n_bottomleft_px >= 0); if ((!need_above && n_left_px == 0) || (!need_left && n_top_px == 0)) { - int i; const int val = (n_left_px == 0) ? 129 : 127; for (i = 0; i < bs; ++i) { memset(dst, val, bs); diff --git a/av1/decoder/decodeframe.c b/av1/decoder/decodeframe.c index 8a2794701..f853866bc 100644 --- a/av1/decoder/decodeframe.c +++ b/av1/decoder/decodeframe.c @@ -1308,8 +1308,8 @@ static void decode_block(AV1Decoder *const pbi, MACROBLOCKD *const xd, const BLOCK_SIZE plane_bsize = get_plane_block_size(AOMMAX(bsize, BLOCK_8X8), pd); const TX_SIZE max_tx_size = max_txsize_lookup[plane_bsize]; - int bw = num_4x4_blocks_wide_txsize_lookup[max_tx_size]; - int bh = num_4x4_blocks_high_txsize_lookup[max_tx_size]; + const int bw_var_tx = num_4x4_blocks_wide_txsize_lookup[max_tx_size]; + const int bh_var_tx = num_4x4_blocks_high_txsize_lookup[max_tx_size]; const int step = num_4x4_blocks_txsize_lookup[max_tx_size]; int block = 0; #if CONFIG_EXT_TX && CONFIG_RECT_TX @@ -1333,8 +1333,8 @@ static void decode_block(AV1Decoder *const pbi, MACROBLOCKD *const xd, plane, row, col, tx_size); } else { #endif - for (row = 0; row < num_4x4_h; row += bh) { - for (col = 0; col < num_4x4_w; col += bw) { + for (row = 0; row < num_4x4_h; row += bh_var_tx) { + for (col = 0; col < num_4x4_w; col += bw_var_tx) { decode_reconstruct_tx(xd, r, mbmi, plane, plane_bsize, block, row, col, max_tx_size, &eobtotal); block += step; diff --git a/av1/encoder/bitstream.c b/av1/encoder/bitstream.c index 799cfe4ad..c361dbf3f 100644 --- a/av1/encoder/bitstream.c +++ b/av1/encoder/bitstream.c @@ -1697,9 +1697,9 @@ static void write_modes_b(AV1_COMP *cpi, const TileInfo *const tile, #endif const TX_SIZE max_tx_size = max_txsize_lookup[plane_bsize]; const BLOCK_SIZE txb_size = txsize_to_bsize[max_tx_size]; - int bw = num_4x4_blocks_wide_lookup[txb_size]; int block = 0; const int step = num_4x4_blocks_txsize_lookup[max_tx_size]; + bw = num_4x4_blocks_wide_lookup[txb_size]; for (row = 0; row < num_4x4_h; row += bw) { for (col = 0; col < num_4x4_w; col += bw) { pack_txb_tokens(w, tok, tok_end, xd, mbmi, plane, plane_bsize, @@ -1711,8 +1711,8 @@ static void write_modes_b(AV1_COMP *cpi, const TileInfo *const tile, TX_SIZE tx = plane ? get_uv_tx_size(&m->mbmi, &xd->plane[plane]) : m->mbmi.tx_size; BLOCK_SIZE txb_size = txsize_to_bsize[tx]; - int bw = num_4x4_blocks_wide_lookup[txb_size]; - int bh = num_4x4_blocks_high_lookup[txb_size]; + bw = num_4x4_blocks_wide_lookup[txb_size]; + bh = num_4x4_blocks_high_lookup[txb_size]; for (row = 0; row < num_4x4_h; row += bh) for (col = 0; col < num_4x4_w; col += bw) @@ -2295,7 +2295,6 @@ static void update_coef_probs_subframe( for (t = 0; t < entropy_nodes_update; ++t) { aom_prob newp = new_coef_probs[i][j][k][l][t]; aom_prob *oldp = old_coef_probs[i][j][k][l] + t; - const aom_prob upd = DIFF_UPDATE_PROB; int s; int u = 0; @@ -2418,8 +2417,6 @@ static void update_coef_probs(AV1_COMP *cpi, aom_writer *w) { #if CONFIG_ENTROPY if (cm->do_subframe_update && cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) { - unsigned int eob_counts_copy[PLANE_TYPES][REF_TYPES][COEF_BANDS] - [COEFF_CONTEXTS]; av1_coeff_count coef_counts_copy[PLANE_TYPES]; av1_copy(eob_counts_copy, cpi->common.counts.eob_branch[tx_size]); av1_copy(coef_counts_copy, cpi->td.rd_counts.coef_counts[tx_size]); diff --git a/av1/encoder/encodeframe.c b/av1/encoder/encodeframe.c index 47c173f12..297c35427 100644 --- a/av1/encoder/encodeframe.c +++ b/av1/encoder/encodeframe.c @@ -2705,8 +2705,6 @@ static void rd_use_partition(AV1_COMP *cpi, ThreadData *td, #if CONFIG_SUPERTX int rt_nocoef = 0; #endif - RD_SEARCH_MACROBLOCK_CONTEXT x_ctx; - if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols)) continue; @@ -3140,12 +3138,12 @@ static void rd_test_partition3( #else if (sum_rdc.rdcost < best_rdc->rdcost) { #endif - PICK_MODE_CONTEXT *ctx = &ctxs[0]; - update_state(cpi, td, ctx, mi_row0, mi_col0, subsize0, 1); + PICK_MODE_CONTEXT *ctx_0 = &ctxs[0]; + update_state(cpi, td, ctx_0, mi_row0, mi_col0, subsize0, 1); encode_superblock(cpi, td, tp, DRY_RUN_NORMAL, mi_row0, mi_col0, subsize0, - ctx, NULL); + ctx_0, NULL); - if (cpi->sf.adaptive_motion_search) load_pred_mv(x, ctx); + if (cpi->sf.adaptive_motion_search) load_pred_mv(x, ctx_0); #if CONFIG_SUPERTX rd_pick_sb_modes(cpi, tile_data, x, mi_row1, mi_col1, &this_rdc, @@ -3181,12 +3179,12 @@ static void rd_test_partition3( #else if (sum_rdc.rdcost < best_rdc->rdcost) { #endif - PICK_MODE_CONTEXT *ctx = &ctxs[1]; - update_state(cpi, td, ctx, mi_row1, mi_col1, subsize1, 1); + PICK_MODE_CONTEXT *ctx_1 = &ctxs[1]; + update_state(cpi, td, ctx_1, mi_row1, mi_col1, subsize1, 1); encode_superblock(cpi, td, tp, DRY_RUN_NORMAL, mi_row1, mi_col1, subsize1, - ctx, NULL); + ctx_1, NULL); - if (cpi->sf.adaptive_motion_search) load_pred_mv(x, ctx); + if (cpi->sf.adaptive_motion_search) load_pred_mv(x, ctx_1); #if CONFIG_SUPERTX rd_pick_sb_modes(cpi, tile_data, x, mi_row2, mi_col2, &this_rdc, diff --git a/av1/encoder/mbgraph.c b/av1/encoder/mbgraph.c index c1ccb9598..9bbed2b48 100644 --- a/av1/encoder/mbgraph.c +++ b/av1/encoder/mbgraph.c @@ -110,7 +110,6 @@ static int do_16x16_motion_search(AV1_COMP *cpi, const MV *ref_mv, int mb_row, // If the current best reference mv is not centered on 0,0 then do a 0,0 // based search as well. if (ref_mv->row != 0 || ref_mv->col != 0) { - unsigned int tmp_err; MV zero_ref_mv = { 0, 0 }; tmp_err = do_16x16_motion_iteration(cpi, &zero_ref_mv, mb_row, mb_col); diff --git a/av1/encoder/pickrst.c b/av1/encoder/pickrst.c index 28bdcc3e7..62303b7f5 100644 --- a/av1/encoder/pickrst.c +++ b/av1/encoder/pickrst.c @@ -351,7 +351,6 @@ static void update_a_sep_sym(double **Mc, double **Hc, double *a, double *b) { memset(A, 0, sizeof(A)); memset(B, 0, sizeof(B)); for (i = 0; i < RESTORATION_WIN; i++) { - int j; for (j = 0; j < RESTORATION_WIN; ++j) { const int jj = wrap_index(j); A[jj] += Mc[i][j] * b[i]; @@ -399,7 +398,6 @@ static void update_b_sep_sym(double **Mc, double **Hc, double *a, double *b) { memset(A, 0, sizeof(A)); memset(B, 0, sizeof(B)); for (i = 0; i < RESTORATION_WIN; i++) { - int j; const int ii = wrap_index(i); for (j = 0; j < RESTORATION_WIN; j++) A[ii] += Mc[i][j] * a[j]; } diff --git a/av1/encoder/quantize.c b/av1/encoder/quantize.c index f3de6ad28..827e6d853 100644 --- a/av1/encoder/quantize.c +++ b/av1/encoder/quantize.c @@ -1115,9 +1115,9 @@ void av1_init_quantizer(AV1_COMP *cpi) { #if CONFIG_NEW_QUANT for (dq = 0; dq < QUANT_PROFILES; dq++) { for (i = 0; i < COEF_BANDS; i++) { - const int quant = cpi->y_dequant[q][i != 0]; + const int y_quant = cpi->y_dequant[q][i != 0]; const int uvquant = cpi->uv_dequant[q][i != 0]; - av1_get_dequant_val_nuq(quant, i, cpi->y_dequant_val_nuq[dq][q][i], + av1_get_dequant_val_nuq(y_quant, i, cpi->y_dequant_val_nuq[dq][q][i], quants->y_cuml_bins_nuq[dq][q][i], dq); av1_get_dequant_val_nuq(uvquant, i, cpi->uv_dequant_val_nuq[dq][q][i], quants->uv_cuml_bins_nuq[dq][q][i], dq); diff --git a/av1/encoder/rdopt.c b/av1/encoder/rdopt.c index 719e75a8d..2f9c42aa7 100644 --- a/av1/encoder/rdopt.c +++ b/av1/encoder/rdopt.c @@ -1425,7 +1425,7 @@ static int64_t choose_tx_size_fix_type(const AV1_COMP *const cpi, BLOCK_SIZE bs, #if CONFIG_EXT_TX && CONFIG_RECT_TX if (evaluate_rect_tx) { const TX_SIZE rect_tx_size = max_txsize_rect_lookup[bs]; - const int ext_tx_set = get_ext_tx_set(rect_tx_size, bs, 1); + ext_tx_set = get_ext_tx_set(rect_tx_size, bs, 1); if (ext_tx_used_inter[ext_tx_set][tx_type]) { rd = txfm_yrd(cpi, x, &r, &d, &s, &sse, ref_best_rd, bs, tx_type, rect_tx_size); @@ -2499,7 +2499,7 @@ static const uint8_t mode_to_angle_bin[INTRA_MODES] = { static void angle_estimation(const uint8_t *src, int src_stride, int rows, int cols, uint8_t *directional_mode_skip_mask) { - int i, r, c, index, dx, dy, temp, sn, remd, quot; + int i, r, c, dx, dy, temp, sn, remd, quot; uint64_t hist[DIRECTIONAL_MODES]; uint64_t hist_sum = 0; @@ -2507,6 +2507,7 @@ static void angle_estimation(const uint8_t *src, int src_stride, int rows, src += src_stride; for (r = 1; r < rows; ++r) { for (c = 1; c < cols; ++c) { + uint8_t index; dx = src[c] - src[c - 1]; dy = src[c] - src[c - src_stride]; temp = dx * dx + dy * dy; @@ -2529,7 +2530,7 @@ static void angle_estimation(const uint8_t *src, int src_stride, int rows, for (i = 0; i < DIRECTIONAL_MODES; ++i) hist_sum += hist[i]; for (i = 0; i < INTRA_MODES; ++i) { if (i != DC_PRED && i != TM_PRED) { - int index = mode_to_angle_bin[i]; + const uint8_t index = mode_to_angle_bin[i]; uint64_t score = 2 * hist[index]; int weight = 2; if (index > 0) { @@ -2550,7 +2551,7 @@ static void angle_estimation(const uint8_t *src, int src_stride, int rows, static void highbd_angle_estimation(const uint8_t *src8, int src_stride, int rows, int cols, uint8_t *directional_mode_skip_mask) { - int i, r, c, index, dx, dy, temp, sn, remd, quot; + int i, r, c, dx, dy, temp, sn, remd, quot; uint64_t hist[DIRECTIONAL_MODES]; uint64_t hist_sum = 0; uint16_t *src = CONVERT_TO_SHORTPTR(src8); @@ -2559,6 +2560,7 @@ static void highbd_angle_estimation(const uint8_t *src8, int src_stride, src += src_stride; for (r = 1; r < rows; ++r) { for (c = 1; c < cols; ++c) { + uint8_t index; dx = src[c] - src[c - 1]; dy = src[c] - src[c - src_stride]; temp = dx * dx + dy * dy; @@ -2581,7 +2583,7 @@ static void highbd_angle_estimation(const uint8_t *src8, int src_stride, for (i = 0; i < DIRECTIONAL_MODES; ++i) hist_sum += hist[i]; for (i = 0; i < INTRA_MODES; ++i) { if (i != DC_PRED && i != TM_PRED) { - int index = mode_to_angle_bin[i]; + const uint8_t index = mode_to_angle_bin[i]; uint64_t score = 2 * hist[index]; int weight = 2; if (index > 0) { @@ -3111,7 +3113,6 @@ static void select_tx_block(const AV1_COMP *cpi, MACROBLOCK *x, int blk_row, BLOCK_SIZE bsize = txsize_to_bsize[tx_size]; int bsl = b_height_log2_lookup[bsize]; int sub_step = num_4x4_blocks_txsize_lookup[tx_size - 1]; - int i; int this_rate; int64_t this_dist; int64_t this_bsse; @@ -3250,7 +3251,7 @@ static int64_t select_tx_size_fix_type(const AV1_COMP *cpi, MACROBLOCK *x, #if CONFIG_EXT_TX && CONFIG_RECT_TX if (is_rect_tx_allowed(xd, mbmi)) { int rate_rect_tx, skippable_rect_tx = 0; - int64_t dist_rect_tx, sse_rect_tx, rd, rd_rect_tx; + int64_t dist_rect_tx, sse_rect_tx, rd_rect_tx; int tx_size_cat = inter_tx_size_cat_lookup[bsize]; TX_SIZE tx_size = max_txsize_rect_lookup[bsize]; TX_SIZE var_tx_size = mbmi->tx_size; @@ -3531,7 +3532,6 @@ static int inter_block_uvrd(const AV1_COMP *cpi, MACROBLOCK *x, int *rate, #endif // CONFIG_EXT_TX && CONFIG_RECT_TX if (is_inter_block(mbmi) && is_cost_valid) { - int plane; for (plane = 1; plane < MAX_MB_PLANE; ++plane) av1_subtract_plane(x, bsize, plane); } @@ -6099,6 +6099,7 @@ static void do_masked_motion_search(const AV1_COMP *const cpi, MACROBLOCK *x, const YV12_BUFFER_CONFIG *scaled_ref_frame = av1_get_scaled_ref_frame(cpi, ref); + int i; MV pred_mv[3]; pred_mv[0] = x->mbmi_ext->ref_mvs[ref][0].as_mv; @@ -6110,7 +6111,6 @@ static void do_masked_motion_search(const AV1_COMP *const cpi, MACROBLOCK *x, #endif if (scaled_ref_frame) { - int i; // Swap out the reference frame for a version that's been scaled to // match the resolution of the current frame, allowing the existing // motion search code to be used without additional modifications. @@ -6152,7 +6152,6 @@ static void do_masked_motion_search(const AV1_COMP *const cpi, MACROBLOCK *x, // prev_mv_sad is not setup for dynamically scaled frames. if (cpi->oxcf.resize_mode != RESIZE_DYNAMIC) { - int i; for (i = LAST_FRAME; i <= ALTREF_FRAME && cm->show_frame; ++i) { if ((x->pred_mv_sad[ref] >> 3) > x->pred_mv_sad[i]) { x->pred_mv[ref].row = 0; @@ -6160,9 +6159,9 @@ static void do_masked_motion_search(const AV1_COMP *const cpi, MACROBLOCK *x, tmp_mv->as_int = INVALID_MV; if (scaled_ref_frame) { - int i; - for (i = 0; i < MAX_MB_PLANE; ++i) - xd->plane[i].pre[ref_idx] = backup_yv12[i]; + int j; + for (j = 0; j < MAX_MB_PLANE; ++j) + xd->plane[j].pre[ref_idx] = backup_yv12[j]; } return; } @@ -6201,7 +6200,6 @@ static void do_masked_motion_search(const AV1_COMP *const cpi, MACROBLOCK *x, x->pred_mv[ref] = tmp_mv->as_mv; if (scaled_ref_frame) { - int i; for (i = 0; i < MAX_MB_PLANE; i++) xd->plane[i].pre[ref_idx] = backup_yv12[i]; } @@ -7121,21 +7119,21 @@ static int64_t handle_inter_mode( #endif // CONFIG_MOTION_VAR if (is_comp_pred && is_interinter_wedge_used(bsize)) { - int rate_sum, rs; + int rate_sum, rs2; int64_t dist_sum; int64_t best_rd_nowedge = INT64_MAX; int64_t best_rd_wedge = INT64_MAX; int tmp_skip_txfm_sb; int64_t tmp_skip_sse_sb; - rs = av1_cost_bit(cm->fc->wedge_interinter_prob[bsize], 0); + rs2 = av1_cost_bit(cm->fc->wedge_interinter_prob[bsize], 0); mbmi->use_wedge_interinter = 0; av1_build_inter_predictors_sby(xd, mi_row, mi_col, bsize); av1_subtract_plane(x, bsize, 0); rd = estimate_yrd_for_sb(cpi, bsize, x, &rate_sum, &dist_sum, &tmp_skip_txfm_sb, &tmp_skip_sse_sb, INT64_MAX); if (rd != INT64_MAX) - rd = RDCOST(x->rdmult, x->rddiv, rs + rate_mv + rate_sum, dist_sum); + rd = RDCOST(x->rdmult, x->rddiv, rs2 + rate_mv + rate_sum, dist_sum); best_rd_nowedge = rd; // Disbale wedge search if source variance is small @@ -7148,8 +7146,8 @@ static int64_t handle_inter_mode( int strides[1] = { bw }; mbmi->use_wedge_interinter = 1; - rs = av1_cost_literal(get_interinter_wedge_bits(bsize)) + - av1_cost_bit(cm->fc->wedge_interinter_prob[bsize], 1); + rs2 = av1_cost_literal(get_interinter_wedge_bits(bsize)) + + av1_cost_bit(cm->fc->wedge_interinter_prob[bsize], 1); av1_build_inter_predictors_for_planes_single_buf( xd, bsize, 0, 0, mi_row, mi_col, 0, preds0, strides); @@ -7158,7 +7156,7 @@ static int64_t handle_inter_mode( // Choose the best wedge best_rd_wedge = pick_interinter_wedge(cpi, x, bsize, pred0, pred1); - best_rd_wedge += RDCOST(x->rdmult, x->rddiv, rs + rate_mv, 0); + best_rd_wedge += RDCOST(x->rdmult, x->rddiv, rs2 + rate_mv, 0); if (have_newmv_in_inter_mode(this_mode)) { int_mv tmp_mv[2]; @@ -7189,7 +7187,8 @@ static int64_t handle_inter_mode( av1_build_inter_predictors_sby(xd, mi_row, mi_col, bsize); model_rd_for_sb(cpi, bsize, x, xd, 0, 0, &rate_sum, &dist_sum, &tmp_skip_txfm_sb, &tmp_skip_sse_sb); - rd = RDCOST(x->rdmult, x->rddiv, rs + tmp_rate_mv + rate_sum, dist_sum); + rd = + RDCOST(x->rdmult, x->rddiv, rs2 + tmp_rate_mv + rate_sum, dist_sum); if (rd < best_rd_wedge) { best_rd_wedge = rd; } else { @@ -7204,7 +7203,7 @@ static int64_t handle_inter_mode( estimate_yrd_for_sb(cpi, bsize, x, &rate_sum, &dist_sum, &tmp_skip_txfm_sb, &tmp_skip_sse_sb, INT64_MAX); if (rd != INT64_MAX) - rd = RDCOST(x->rdmult, x->rddiv, rs + tmp_rate_mv + rate_sum, + rd = RDCOST(x->rdmult, x->rddiv, rs2 + tmp_rate_mv + rate_sum, dist_sum); best_rd_wedge = rd; @@ -7229,7 +7228,7 @@ static int64_t handle_inter_mode( estimate_yrd_for_sb(cpi, bsize, x, &rate_sum, &dist_sum, &tmp_skip_txfm_sb, &tmp_skip_sse_sb, INT64_MAX); if (rd != INT64_MAX) - rd = RDCOST(x->rdmult, x->rddiv, rs + rate_mv + rate_sum, dist_sum); + rd = RDCOST(x->rdmult, x->rddiv, rs2 + rate_mv + rate_sum, dist_sum); best_rd_wedge = rd; if (best_rd_wedge < best_rd_nowedge) { mbmi->use_wedge_interinter = 1; @@ -7477,7 +7476,6 @@ static int64_t handle_inter_mode( for (mbmi->motion_mode = SIMPLE_TRANSLATION; mbmi->motion_mode < (allow_motvar ? MOTION_MODES : 1); mbmi->motion_mode++) { - int64_t tmp_rd; #if CONFIG_EXT_INTER int tmp_rate2 = mbmi->motion_mode != SIMPLE_TRANSLATION ? rate2_bmc_nocoeff : rate2_nocoeff; @@ -8215,8 +8213,8 @@ void av1_rd_pick_inter_mode_sb(const AV1_COMP *cpi, TileDataEnc *tile_data, unsigned int best_pred_sse = UINT_MAX; PREDICTION_MODE best_intra_mode = DC_PRED; int rate_uv_intra[TX_SIZES], rate_uv_tokenonly[TX_SIZES]; - int64_t dist_uv[TX_SIZES]; - int skip_uv[TX_SIZES]; + int64_t dist_uvs[TX_SIZES]; + int skip_uvs[TX_SIZES]; PREDICTION_MODE mode_uv[TX_SIZES]; #if CONFIG_PALETTE PALETTE_MODE_INFO pmi_uv[TX_SIZES]; @@ -8536,7 +8534,6 @@ void av1_rd_pick_inter_mode_sb(const AV1_COMP *cpi, TileDataEnc *tile_data, int rate2 = 0, rate_y = 0, rate_uv = 0; int64_t distortion2 = 0, distortion_y = 0, distortion_uv = 0; int skippable = 0; - int i; int this_skip2 = 0; int64_t total_sse = INT64_MAX; #if CONFIG_REF_MV @@ -8767,8 +8764,8 @@ void av1_rd_pick_inter_mode_sb(const AV1_COMP *cpi, TileDataEnc *tile_data, [pd->subsampling_y]; if (rate_uv_intra[uv_tx] == INT_MAX) { choose_intra_uv_mode(cpi, x, ctx, bsize, uv_tx, &rate_uv_intra[uv_tx], - &rate_uv_tokenonly[uv_tx], &dist_uv[uv_tx], - &skip_uv[uv_tx], &mode_uv[uv_tx]); + &rate_uv_tokenonly[uv_tx], &dist_uvs[uv_tx], + &skip_uvs[uv_tx], &mode_uv[uv_tx]); #if CONFIG_PALETTE if (cm->allow_screen_content_tools) pmi_uv[uv_tx] = *pmi; #endif // CONFIG_PALETTE @@ -8780,8 +8777,8 @@ void av1_rd_pick_inter_mode_sb(const AV1_COMP *cpi, TileDataEnc *tile_data, } rate_uv = rate_uv_tokenonly[uv_tx]; - distortion_uv = dist_uv[uv_tx]; - skippable = skippable && skip_uv[uv_tx]; + distortion_uv = dist_uvs[uv_tx]; + skippable = skippable && skip_uvs[uv_tx]; mbmi->uv_mode = mode_uv[uv_tx]; #if CONFIG_PALETTE if (cm->allow_screen_content_tools) { @@ -8999,7 +8996,6 @@ void av1_rd_pick_inter_mode_sb(const AV1_COMP *cpi, TileDataEnc *tile_data, }; int dummy_single_skippable[MB_MODE_COUNT] [TOTAL_REFS_PER_FRAME] = { { 0 } }; - int dummy_disable_skip = 0; #if CONFIG_EXT_INTER int_mv dummy_single_newmvs[2][TOTAL_REFS_PER_FRAME] = { { { 0 } }, { { 0 } } }; @@ -9418,8 +9414,8 @@ void av1_rd_pick_inter_mode_sb(const AV1_COMP *cpi, TileDataEnc *tile_data, [xd->plane[1].subsampling_y]; if (rate_uv_intra[uv_tx] == INT_MAX) { choose_intra_uv_mode(cpi, x, ctx, bsize, uv_tx, &rate_uv_intra[uv_tx], - &rate_uv_tokenonly[uv_tx], &dist_uv[uv_tx], - &skip_uv[uv_tx], &mode_uv[uv_tx]); + &rate_uv_tokenonly[uv_tx], &dist_uvs[uv_tx], + &skip_uvs[uv_tx], &mode_uv[uv_tx]); pmi_uv[uv_tx] = *pmi; #if CONFIG_EXT_INTRA ext_intra_mode_info_uv[uv_tx] = mbmi->ext_intra_mode_info; @@ -9441,8 +9437,8 @@ void av1_rd_pick_inter_mode_sb(const AV1_COMP *cpi, TileDataEnc *tile_data, ext_intra_mode_info_uv[uv_tx].ext_intra_mode[1]; } #endif // CONFIG_EXT_INTRA - skippable = skippable && skip_uv[uv_tx]; - distortion2 = distortion_y + dist_uv[uv_tx]; + skippable = skippable && skip_uvs[uv_tx]; + distortion2 = distortion_y + dist_uvs[uv_tx]; rate2 = rate_y + rate_overhead + rate_uv_intra[uv_tx]; rate2 += ref_costs_single[INTRA_FRAME]; @@ -9487,8 +9483,8 @@ PALETTE_EXIT: !dc_skipped && best_mode_index >= 0 && best_intra_rd < (best_rd + (best_rd >> 3))) { pick_ext_intra_interframe( - cpi, x, ctx, bsize, rate_uv_intra, rate_uv_tokenonly, dist_uv, skip_uv, - mode_uv, ext_intra_mode_info_uv, uv_angle_delta, + cpi, x, ctx, bsize, rate_uv_intra, rate_uv_tokenonly, dist_uvs, + skip_uvs, mode_uv, ext_intra_mode_info_uv, uv_angle_delta, #if CONFIG_PALETTE pmi_uv, palette_ctx, #endif // CONFIG_PALETTE @@ -9526,7 +9522,6 @@ PALETTE_EXIT: #endif // CONFIG_GLOBAL_MOTION #if CONFIG_REF_MV if (!comp_pred_mode) { - int i; int ref_set = (mbmi_ext->ref_mv_count[rf_type] >= 2) ? AOMMIN(2, mbmi_ext->ref_mv_count[rf_type] - 2) : INT_MAX; @@ -10011,7 +10006,6 @@ void av1_rd_pick_inter_mode_sub8x8(const struct AV1_COMP *cpi, int rate2 = 0, rate_y = 0, rate_uv = 0; int64_t distortion2 = 0, distortion_y = 0, distortion_uv = 0; int skippable = 0; - int i; int this_skip2 = 0; int64_t total_sse = INT_MAX; diff --git a/av1/encoder/tokenize.c b/av1/encoder/tokenize.c index 4e16d922a..e95e52b86 100644 --- a/av1/encoder/tokenize.c +++ b/av1/encoder/tokenize.c @@ -613,8 +613,7 @@ void tokenize_vartx(ThreadData *td, TOKENEXTRA **t, RUN_TYPE dry_run, : mbmi->inter_tx_size[tx_row][tx_col]; if (tx_size == plane_tx_size) { - const struct macroblockd_plane *const pd = &xd->plane[plane]; - BLOCK_SIZE plane_bsize = get_plane_block_size(mbmi->sb_type, pd); + plane_bsize = get_plane_block_size(mbmi->sb_type, pd); if (!dry_run) tokenize_b(plane, block, blk_row, blk_col, plane_bsize, tx_size, arg); else if (dry_run == DRY_RUN_NORMAL) diff --git a/av1/encoder/variance_tree.h b/av1/encoder/variance_tree.h index 728d7f481..63970846a 100644 --- a/av1/encoder/variance_tree.h +++ b/av1/encoder/variance_tree.h @@ -31,12 +31,12 @@ typedef struct { int64_t sum_error; int log2_count; int variance; -} var; +} VAR; typedef struct { - var none; - var horz[2]; - var vert[2]; + VAR none; + VAR horz[2]; + VAR vert[2]; } partition_variance; typedef struct VAR_TREE { @@ -59,7 +59,7 @@ void av1_setup_var_tree(struct AV1Common *cm, struct ThreadData *td); void av1_free_var_tree(struct ThreadData *td); // Set variance values given sum square error, sum error, count. -static INLINE void fill_variance(int64_t s2, int64_t s, int c, var *v) { +static INLINE void fill_variance(int64_t s2, int64_t s, int c, VAR *v) { v->sum_square_error = s2; v->sum_error = s; v->log2_count = c; @@ -69,7 +69,7 @@ static INLINE void fill_variance(int64_t s2, int64_t s, int c, var *v) { v->log2_count); } -static INLINE void sum_2_variances(const var *a, const var *b, var *r) { +static INLINE void sum_2_variances(const VAR *a, const VAR *b, VAR *r) { assert(a->log2_count == b->log2_count); fill_variance(a->sum_square_error + b->sum_square_error, a->sum_error + b->sum_error, a->log2_count + 1, r); -- 2.50.0