2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
14 #include "./vp9_rtcd.h"
15 #include "./vpx_dsp_rtcd.h"
17 #include "vpx_dsp/vpx_dsp_common.h"
18 #include "vpx_mem/vpx_mem.h"
19 #include "vpx_ports/mem.h"
20 #include "vpx_ports/system_state.h"
22 #include "vp9/common/vp9_common.h"
23 #include "vp9/common/vp9_entropy.h"
24 #include "vp9/common/vp9_entropymode.h"
25 #include "vp9/common/vp9_idct.h"
26 #include "vp9/common/vp9_mvref_common.h"
27 #include "vp9/common/vp9_pred_common.h"
28 #include "vp9/common/vp9_quant_common.h"
29 #include "vp9/common/vp9_reconinter.h"
30 #include "vp9/common/vp9_reconintra.h"
31 #include "vp9/common/vp9_scan.h"
32 #include "vp9/common/vp9_seg_common.h"
34 #include "vp9/encoder/vp9_cost.h"
35 #include "vp9/encoder/vp9_encodemb.h"
36 #include "vp9/encoder/vp9_encodemv.h"
37 #include "vp9/encoder/vp9_encoder.h"
38 #include "vp9/encoder/vp9_mcomp.h"
39 #include "vp9/encoder/vp9_quantize.h"
40 #include "vp9/encoder/vp9_ratectrl.h"
41 #include "vp9/encoder/vp9_rd.h"
42 #include "vp9/encoder/vp9_rdopt.h"
43 #include "vp9/encoder/vp9_aq_variance.h"
45 #define LAST_FRAME_MODE_MASK \
46 ((1 << GOLDEN_FRAME) | (1 << ALTREF_FRAME) | (1 << INTRA_FRAME))
47 #define GOLDEN_FRAME_MODE_MASK \
48 ((1 << LAST_FRAME) | (1 << ALTREF_FRAME) | (1 << INTRA_FRAME))
49 #define ALT_REF_MODE_MASK \
50 ((1 << LAST_FRAME) | (1 << GOLDEN_FRAME) | (1 << INTRA_FRAME))
52 #define SECOND_REF_FRAME_MASK ((1 << ALTREF_FRAME) | 0x01)
54 #define MIN_EARLY_TERM_INDEX 3
55 #define NEW_MV_DISCOUNT_FACTOR 8
59 MV_REFERENCE_FRAME ref_frame[2];
63 MV_REFERENCE_FRAME ref_frame[2];
66 struct rdcost_block_args {
69 ENTROPY_CONTEXT t_above[16];
70 ENTROPY_CONTEXT t_left[16];
77 int use_fast_coef_costing;
82 #define LAST_NEW_MV_INDEX 6
83 static const MODE_DEFINITION vp9_mode_order[MAX_MODES] = {
84 { NEARESTMV, { LAST_FRAME, NONE } },
85 { NEARESTMV, { ALTREF_FRAME, NONE } },
86 { NEARESTMV, { GOLDEN_FRAME, NONE } },
88 { DC_PRED, { INTRA_FRAME, NONE } },
90 { NEWMV, { LAST_FRAME, NONE } },
91 { NEWMV, { ALTREF_FRAME, NONE } },
92 { NEWMV, { GOLDEN_FRAME, NONE } },
94 { NEARMV, { LAST_FRAME, NONE } },
95 { NEARMV, { ALTREF_FRAME, NONE } },
96 { NEARMV, { GOLDEN_FRAME, NONE } },
98 { ZEROMV, { LAST_FRAME, NONE } },
99 { ZEROMV, { GOLDEN_FRAME, NONE } },
100 { ZEROMV, { ALTREF_FRAME, NONE } },
102 { NEARESTMV, { LAST_FRAME, ALTREF_FRAME } },
103 { NEARESTMV, { GOLDEN_FRAME, ALTREF_FRAME } },
105 { TM_PRED, { INTRA_FRAME, NONE } },
107 { NEARMV, { LAST_FRAME, ALTREF_FRAME } },
108 { NEWMV, { LAST_FRAME, ALTREF_FRAME } },
109 { NEARMV, { GOLDEN_FRAME, ALTREF_FRAME } },
110 { NEWMV, { GOLDEN_FRAME, ALTREF_FRAME } },
112 { ZEROMV, { LAST_FRAME, ALTREF_FRAME } },
113 { ZEROMV, { GOLDEN_FRAME, ALTREF_FRAME } },
115 { H_PRED, { INTRA_FRAME, NONE } },
116 { V_PRED, { INTRA_FRAME, NONE } },
117 { D135_PRED, { INTRA_FRAME, NONE } },
118 { D207_PRED, { INTRA_FRAME, NONE } },
119 { D153_PRED, { INTRA_FRAME, NONE } },
120 { D63_PRED, { INTRA_FRAME, NONE } },
121 { D117_PRED, { INTRA_FRAME, NONE } },
122 { D45_PRED, { INTRA_FRAME, NONE } },
125 static const REF_DEFINITION vp9_ref_order[MAX_REFS] = {
126 { { LAST_FRAME, NONE } }, { { GOLDEN_FRAME, NONE } },
127 { { ALTREF_FRAME, NONE } }, { { LAST_FRAME, ALTREF_FRAME } },
128 { { GOLDEN_FRAME, ALTREF_FRAME } }, { { INTRA_FRAME, NONE } },
131 static void swap_block_ptr(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx, int m, int n,
132 int min_plane, int max_plane) {
135 for (i = min_plane; i < max_plane; ++i) {
136 struct macroblock_plane *const p = &x->plane[i];
137 struct macroblockd_plane *const pd = &x->e_mbd.plane[i];
139 p->coeff = ctx->coeff_pbuf[i][m];
140 p->qcoeff = ctx->qcoeff_pbuf[i][m];
141 pd->dqcoeff = ctx->dqcoeff_pbuf[i][m];
142 p->eobs = ctx->eobs_pbuf[i][m];
144 ctx->coeff_pbuf[i][m] = ctx->coeff_pbuf[i][n];
145 ctx->qcoeff_pbuf[i][m] = ctx->qcoeff_pbuf[i][n];
146 ctx->dqcoeff_pbuf[i][m] = ctx->dqcoeff_pbuf[i][n];
147 ctx->eobs_pbuf[i][m] = ctx->eobs_pbuf[i][n];
149 ctx->coeff_pbuf[i][n] = p->coeff;
150 ctx->qcoeff_pbuf[i][n] = p->qcoeff;
151 ctx->dqcoeff_pbuf[i][n] = pd->dqcoeff;
152 ctx->eobs_pbuf[i][n] = p->eobs;
156 static void model_rd_for_sb(VP9_COMP *cpi, BLOCK_SIZE bsize, MACROBLOCK *x,
157 MACROBLOCKD *xd, int *out_rate_sum,
158 int64_t *out_dist_sum, int *skip_txfm_sb,
159 int64_t *skip_sse_sb) {
160 // Note our transform coeffs are 8 times an orthogonal transform.
161 // Hence quantizer step is also 8 times. To get effective quantizer
162 // we need to divide by 8 before sending to modeling function.
164 int64_t rate_sum = 0;
165 int64_t dist_sum = 0;
166 const int ref = xd->mi[0]->ref_frame[0];
168 unsigned int var = 0;
169 int64_t total_sse = 0;
173 const int dequant_shift =
174 #if CONFIG_VP9_HIGHBITDEPTH
175 (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) ? xd->bd - 5 :
176 #endif // CONFIG_VP9_HIGHBITDEPTH
178 unsigned int qstep_vec[MAX_MB_PLANE];
179 unsigned int nlog2_vec[MAX_MB_PLANE];
180 unsigned int sum_sse_vec[MAX_MB_PLANE];
181 int any_zero_sum_sse = 0;
183 x->pred_sse[ref] = 0;
185 for (i = 0; i < MAX_MB_PLANE; ++i) {
186 struct macroblock_plane *const p = &x->plane[i];
187 struct macroblockd_plane *const pd = &xd->plane[i];
188 const BLOCK_SIZE bs = get_plane_block_size(bsize, pd);
189 const TX_SIZE max_tx_size = max_txsize_lookup[bs];
190 const BLOCK_SIZE unit_size = txsize_to_bsize[max_tx_size];
191 const int64_t dc_thr = p->quant_thred[0] >> shift;
192 const int64_t ac_thr = p->quant_thred[1] >> shift;
193 unsigned int sum_sse = 0;
194 // The low thresholds are used to measure if the prediction errors are
195 // low enough so that we can skip the mode search.
196 const int64_t low_dc_thr = VPXMIN(50, dc_thr >> 2);
197 const int64_t low_ac_thr = VPXMIN(80, ac_thr >> 2);
198 int bw = 1 << (b_width_log2_lookup[bs] - b_width_log2_lookup[unit_size]);
199 int bh = 1 << (b_height_log2_lookup[bs] - b_width_log2_lookup[unit_size]);
201 int lw = b_width_log2_lookup[unit_size] + 2;
202 int lh = b_height_log2_lookup[unit_size] + 2;
204 for (idy = 0; idy < bh; ++idy) {
205 for (idx = 0; idx < bw; ++idx) {
206 uint8_t *src = p->src.buf + (idy * p->src.stride << lh) + (idx << lw);
207 uint8_t *dst = pd->dst.buf + (idy * pd->dst.stride << lh) + (idx << lh);
208 int block_idx = (idy << 1) + idx;
209 int low_err_skip = 0;
211 var = cpi->fn_ptr[unit_size].vf(src, p->src.stride, dst, pd->dst.stride,
213 x->bsse[(i << 2) + block_idx] = sse;
216 x->skip_txfm[(i << 2) + block_idx] = SKIP_TXFM_NONE;
217 if (!x->select_tx_size) {
218 // Check if all ac coefficients can be quantized to zero.
219 if (var < ac_thr || var == 0) {
220 x->skip_txfm[(i << 2) + block_idx] = SKIP_TXFM_AC_ONLY;
222 // Check if dc coefficient can be quantized to zero.
223 if (sse - var < dc_thr || sse == var) {
224 x->skip_txfm[(i << 2) + block_idx] = SKIP_TXFM_AC_DC;
226 if (!sse || (var < low_ac_thr && sse - var < low_dc_thr))
232 if (skip_flag && !low_err_skip) skip_flag = 0;
234 if (i == 0) x->pred_sse[ref] += sse;
238 total_sse += sum_sse;
239 sum_sse_vec[i] = sum_sse;
240 any_zero_sum_sse = any_zero_sum_sse || (sum_sse == 0);
241 qstep_vec[i] = pd->dequant[1] >> dequant_shift;
242 nlog2_vec[i] = num_pels_log2_lookup[bs];
245 // Fast approximate the modelling function.
246 if (cpi->sf.simple_model_rd_from_var) {
247 for (i = 0; i < MAX_MB_PLANE; ++i) {
249 const int64_t square_error = sum_sse_vec[i];
250 int quantizer = qstep_vec[i];
253 rate = (square_error * (280 - quantizer)) >> (16 - VP9_PROB_COST_SHIFT);
256 dist = (square_error * quantizer) >> 8;
261 if (any_zero_sum_sse) {
262 for (i = 0; i < MAX_MB_PLANE; ++i) {
264 vp9_model_rd_from_var_lapndz(sum_sse_vec[i], nlog2_vec[i], qstep_vec[i],
270 vp9_model_rd_from_var_lapndz_vec(sum_sse_vec, nlog2_vec, qstep_vec,
271 &rate_sum, &dist_sum);
275 *skip_txfm_sb = skip_flag;
276 *skip_sse_sb = total_sse << 4;
277 *out_rate_sum = (int)rate_sum;
278 *out_dist_sum = dist_sum << 4;
281 #if CONFIG_VP9_HIGHBITDEPTH
282 int64_t vp9_highbd_block_error_c(const tran_low_t *coeff,
283 const tran_low_t *dqcoeff, intptr_t block_size,
284 int64_t *ssz, int bd) {
286 int64_t error = 0, sqcoeff = 0;
287 int shift = 2 * (bd - 8);
288 int rounding = shift > 0 ? 1 << (shift - 1) : 0;
290 for (i = 0; i < block_size; i++) {
291 const int64_t diff = coeff[i] - dqcoeff[i];
292 error += diff * diff;
293 sqcoeff += (int64_t)coeff[i] * (int64_t)coeff[i];
295 assert(error >= 0 && sqcoeff >= 0);
296 error = (error + rounding) >> shift;
297 sqcoeff = (sqcoeff + rounding) >> shift;
303 static int64_t vp9_highbd_block_error_dispatch(const tran_low_t *coeff,
304 const tran_low_t *dqcoeff,
306 int64_t *ssz, int bd) {
308 return vp9_block_error(coeff, dqcoeff, block_size, ssz);
310 return vp9_highbd_block_error(coeff, dqcoeff, block_size, ssz, bd);
313 #endif // CONFIG_VP9_HIGHBITDEPTH
315 int64_t vp9_block_error_c(const tran_low_t *coeff, const tran_low_t *dqcoeff,
316 intptr_t block_size, int64_t *ssz) {
318 int64_t error = 0, sqcoeff = 0;
320 for (i = 0; i < block_size; i++) {
321 const int diff = coeff[i] - dqcoeff[i];
322 error += diff * diff;
323 sqcoeff += coeff[i] * coeff[i];
330 int64_t vp9_block_error_fp_c(const tran_low_t *coeff, const tran_low_t *dqcoeff,
335 for (i = 0; i < block_size; i++) {
336 const int diff = coeff[i] - dqcoeff[i];
337 error += diff * diff;
343 /* The trailing '0' is a terminator which is used inside cost_coeffs() to
344 * decide whether to include cost of a trailing EOB node or not (i.e. we
345 * can skip this if the last coefficient in this transform block, e.g. the
346 * 16th coefficient in a 4x4 block or the 64th coefficient in a 8x8 block,
348 static const int16_t band_counts[TX_SIZES][8] = {
349 { 1, 2, 3, 4, 3, 16 - 13, 0 },
350 { 1, 2, 3, 4, 11, 64 - 21, 0 },
351 { 1, 2, 3, 4, 11, 256 - 21, 0 },
352 { 1, 2, 3, 4, 11, 1024 - 21, 0 },
354 static int cost_coeffs(MACROBLOCK *x, int plane, int block, TX_SIZE tx_size,
355 int pt, const int16_t *scan, const int16_t *nb,
356 int use_fast_coef_costing) {
357 MACROBLOCKD *const xd = &x->e_mbd;
358 MODE_INFO *mi = xd->mi[0];
359 const struct macroblock_plane *p = &x->plane[plane];
360 const PLANE_TYPE type = get_plane_type(plane);
361 const int16_t *band_count = &band_counts[tx_size][1];
362 const int eob = p->eobs[block];
363 const tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
364 unsigned int(*token_costs)[2][COEFF_CONTEXTS][ENTROPY_TOKENS] =
365 x->token_costs[tx_size][type][is_inter_block(mi)];
366 uint8_t token_cache[32 * 32];
368 #if CONFIG_VP9_HIGHBITDEPTH
369 const uint16_t *cat6_high_cost = vp9_get_high_cost_table(xd->bd);
371 const uint16_t *cat6_high_cost = vp9_get_high_cost_table(8);
374 // Check for consistency of tx_size with mode info
375 assert(type == PLANE_TYPE_Y
376 ? mi->tx_size == tx_size
377 : get_uv_tx_size(mi, &xd->plane[plane]) == tx_size);
381 cost = token_costs[0][0][pt][EOB_TOKEN];
383 if (use_fast_coef_costing) {
384 int band_left = *band_count++;
390 cost = vp9_get_token_cost(v, &prev_t, cat6_high_cost);
391 cost += (*token_costs)[0][pt][prev_t];
393 token_cache[0] = vp9_pt_energy_class[prev_t];
397 for (c = 1; c < eob; c++) {
398 const int rc = scan[c];
402 cost += vp9_get_token_cost(v, &t, cat6_high_cost);
403 cost += (*token_costs)[!prev_t][!prev_t][t];
406 band_left = *band_count++;
412 if (band_left) cost += (*token_costs)[0][!prev_t][EOB_TOKEN];
414 } else { // !use_fast_coef_costing
415 int band_left = *band_count++;
421 unsigned int(*tok_cost_ptr)[COEFF_CONTEXTS][ENTROPY_TOKENS];
422 cost = vp9_get_token_cost(v, &tok, cat6_high_cost);
423 cost += (*token_costs)[0][pt][tok];
425 token_cache[0] = vp9_pt_energy_class[tok];
428 tok_cost_ptr = &((*token_costs)[!tok]);
431 for (c = 1; c < eob; c++) {
432 const int rc = scan[c];
435 cost += vp9_get_token_cost(v, &tok, cat6_high_cost);
436 pt = get_coef_context(nb, token_cache, c);
437 cost += (*tok_cost_ptr)[pt][tok];
438 token_cache[rc] = vp9_pt_energy_class[tok];
440 band_left = *band_count++;
443 tok_cost_ptr = &((*token_costs)[!tok]);
448 pt = get_coef_context(nb, token_cache, c);
449 cost += (*token_costs)[0][pt][EOB_TOKEN];
457 static INLINE int num_4x4_to_edge(int plane_4x4_dim, int mb_to_edge_dim,
458 int subsampling_dim, int blk_dim) {
459 return plane_4x4_dim + (mb_to_edge_dim >> (5 + subsampling_dim)) - blk_dim;
462 // Compute the pixel domain sum square error on all visible 4x4s in the
464 static unsigned pixel_sse(const VP9_COMP *const cpi, const MACROBLOCKD *xd,
465 const struct macroblockd_plane *const pd,
466 const uint8_t *src, const int src_stride,
467 const uint8_t *dst, const int dst_stride, int blk_row,
468 int blk_col, const BLOCK_SIZE plane_bsize,
469 const BLOCK_SIZE tx_bsize) {
470 unsigned int sse = 0;
471 const int plane_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize];
472 const int plane_4x4_h = num_4x4_blocks_high_lookup[plane_bsize];
473 const int tx_4x4_w = num_4x4_blocks_wide_lookup[tx_bsize];
474 const int tx_4x4_h = num_4x4_blocks_high_lookup[tx_bsize];
475 int b4x4s_to_right_edge = num_4x4_to_edge(plane_4x4_w, xd->mb_to_right_edge,
476 pd->subsampling_x, blk_col);
477 int b4x4s_to_bottom_edge = num_4x4_to_edge(plane_4x4_h, xd->mb_to_bottom_edge,
478 pd->subsampling_y, blk_row);
479 if (tx_bsize == BLOCK_4X4 ||
480 (b4x4s_to_right_edge >= tx_4x4_w && b4x4s_to_bottom_edge >= tx_4x4_h)) {
481 cpi->fn_ptr[tx_bsize].vf(src, src_stride, dst, dst_stride, &sse);
483 const vpx_variance_fn_t vf_4x4 = cpi->fn_ptr[BLOCK_4X4].vf;
485 unsigned this_sse = 0;
486 int max_r = VPXMIN(b4x4s_to_bottom_edge, tx_4x4_h);
487 int max_c = VPXMIN(b4x4s_to_right_edge, tx_4x4_w);
489 // if we are in the unrestricted motion border.
490 for (r = 0; r < max_r; ++r) {
491 // Skip visiting the sub blocks that are wholly within the UMV.
492 for (c = 0; c < max_c; ++c) {
493 vf_4x4(src + r * src_stride * 4 + c * 4, src_stride,
494 dst + r * dst_stride * 4 + c * 4, dst_stride, &this_sse);
502 // Compute the squares sum squares on all visible 4x4s in the transform block.
503 static int64_t sum_squares_visible(const MACROBLOCKD *xd,
504 const struct macroblockd_plane *const pd,
505 const int16_t *diff, const int diff_stride,
506 int blk_row, int blk_col,
507 const BLOCK_SIZE plane_bsize,
508 const BLOCK_SIZE tx_bsize) {
510 const int plane_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize];
511 const int plane_4x4_h = num_4x4_blocks_high_lookup[plane_bsize];
512 const int tx_4x4_w = num_4x4_blocks_wide_lookup[tx_bsize];
513 const int tx_4x4_h = num_4x4_blocks_high_lookup[tx_bsize];
514 int b4x4s_to_right_edge = num_4x4_to_edge(plane_4x4_w, xd->mb_to_right_edge,
515 pd->subsampling_x, blk_col);
516 int b4x4s_to_bottom_edge = num_4x4_to_edge(plane_4x4_h, xd->mb_to_bottom_edge,
517 pd->subsampling_y, blk_row);
518 if (tx_bsize == BLOCK_4X4 ||
519 (b4x4s_to_right_edge >= tx_4x4_w && b4x4s_to_bottom_edge >= tx_4x4_h)) {
520 assert(tx_4x4_w == tx_4x4_h);
521 sse = (int64_t)vpx_sum_squares_2d_i16(diff, diff_stride, tx_4x4_w << 2);
524 int max_r = VPXMIN(b4x4s_to_bottom_edge, tx_4x4_h);
525 int max_c = VPXMIN(b4x4s_to_right_edge, tx_4x4_w);
527 // if we are in the unrestricted motion border.
528 for (r = 0; r < max_r; ++r) {
529 // Skip visiting the sub blocks that are wholly within the UMV.
530 for (c = 0; c < max_c; ++c) {
531 sse += (int64_t)vpx_sum_squares_2d_i16(
532 diff + r * diff_stride * 4 + c * 4, diff_stride, 4);
539 static void dist_block(const VP9_COMP *cpi, MACROBLOCK *x, int plane,
540 BLOCK_SIZE plane_bsize, int block, int blk_row,
541 int blk_col, TX_SIZE tx_size, int64_t *out_dist,
543 MACROBLOCKD *const xd = &x->e_mbd;
544 const struct macroblock_plane *const p = &x->plane[plane];
545 const struct macroblockd_plane *const pd = &xd->plane[plane];
546 const int eob = p->eobs[block];
548 if (x->block_tx_domain && eob) {
549 const int ss_txfrm_size = tx_size << 1;
551 const int shift = tx_size == TX_32X32 ? 0 : 2;
552 const tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block);
553 const tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
554 #if CONFIG_VP9_HIGHBITDEPTH
555 const int bd = (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) ? xd->bd : 8;
556 *out_dist = vp9_highbd_block_error_dispatch(
557 coeff, dqcoeff, 16 << ss_txfrm_size, &this_sse, bd) >>
561 vp9_block_error(coeff, dqcoeff, 16 << ss_txfrm_size, &this_sse) >>
563 #endif // CONFIG_VP9_HIGHBITDEPTH
564 *out_sse = this_sse >> shift;
566 if (x->skip_encode && !is_inter_block(xd->mi[0])) {
567 // TODO(jingning): tune the model to better capture the distortion.
569 (pd->dequant[1] * pd->dequant[1] * (1 << ss_txfrm_size)) >>
570 #if CONFIG_VP9_HIGHBITDEPTH
571 (shift + 2 + (bd - 8) * 2);
574 #endif // CONFIG_VP9_HIGHBITDEPTH
575 *out_dist += (p >> 4);
579 const BLOCK_SIZE tx_bsize = txsize_to_bsize[tx_size];
580 const int bs = 4 * num_4x4_blocks_wide_lookup[tx_bsize];
581 const int src_stride = p->src.stride;
582 const int dst_stride = pd->dst.stride;
583 const int src_idx = 4 * (blk_row * src_stride + blk_col);
584 const int dst_idx = 4 * (blk_row * dst_stride + blk_col);
585 const uint8_t *src = &p->src.buf[src_idx];
586 const uint8_t *dst = &pd->dst.buf[dst_idx];
587 const tran_low_t *dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
590 tmp = pixel_sse(cpi, xd, pd, src, src_stride, dst, dst_stride, blk_row,
591 blk_col, plane_bsize, tx_bsize);
592 *out_sse = (int64_t)tmp * 16;
595 #if CONFIG_VP9_HIGHBITDEPTH
596 DECLARE_ALIGNED(16, uint16_t, recon16[1024]);
597 uint8_t *recon = (uint8_t *)recon16;
599 DECLARE_ALIGNED(16, uint8_t, recon[1024]);
600 #endif // CONFIG_VP9_HIGHBITDEPTH
602 #if CONFIG_VP9_HIGHBITDEPTH
603 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
604 vpx_highbd_convolve_copy(CONVERT_TO_SHORTPTR(dst), dst_stride, recon16,
605 32, NULL, 0, 0, 0, 0, bs, bs, xd->bd);
607 vp9_highbd_iwht4x4_add(dqcoeff, recon16, 32, eob, xd->bd);
611 vp9_highbd_idct4x4_add(dqcoeff, recon16, 32, eob, xd->bd);
614 vp9_highbd_idct8x8_add(dqcoeff, recon16, 32, eob, xd->bd);
617 vp9_highbd_idct16x16_add(dqcoeff, recon16, 32, eob, xd->bd);
620 assert(tx_size == TX_32X32);
621 vp9_highbd_idct32x32_add(dqcoeff, recon16, 32, eob, xd->bd);
625 recon = CONVERT_TO_BYTEPTR(recon16);
627 #endif // CONFIG_VP9_HIGHBITDEPTH
628 vpx_convolve_copy(dst, dst_stride, recon, 32, NULL, 0, 0, 0, 0, bs, bs);
630 case TX_32X32: vp9_idct32x32_add(dqcoeff, recon, 32, eob); break;
631 case TX_16X16: vp9_idct16x16_add(dqcoeff, recon, 32, eob); break;
632 case TX_8X8: vp9_idct8x8_add(dqcoeff, recon, 32, eob); break;
634 assert(tx_size == TX_4X4);
635 // this is like vp9_short_idct4x4 but has a special case around
636 // eob<=1, which is significant (not just an optimization) for
637 // the lossless case.
638 x->inv_txfm_add(dqcoeff, recon, 32, eob);
641 #if CONFIG_VP9_HIGHBITDEPTH
643 #endif // CONFIG_VP9_HIGHBITDEPTH
645 tmp = pixel_sse(cpi, xd, pd, src, src_stride, recon, 32, blk_row, blk_col,
646 plane_bsize, tx_bsize);
649 *out_dist = (int64_t)tmp * 16;
653 static int rate_block(int plane, int block, TX_SIZE tx_size, int coeff_ctx,
654 struct rdcost_block_args *args) {
655 return cost_coeffs(args->x, plane, block, tx_size, coeff_ctx, args->so->scan,
656 args->so->neighbors, args->use_fast_coef_costing);
659 static void block_rd_txfm(int plane, int block, int blk_row, int blk_col,
660 BLOCK_SIZE plane_bsize, TX_SIZE tx_size, void *arg) {
661 struct rdcost_block_args *args = arg;
662 MACROBLOCK *const x = args->x;
663 MACROBLOCKD *const xd = &x->e_mbd;
664 MODE_INFO *const mi = xd->mi[0];
665 int64_t rd1, rd2, rd;
669 const int coeff_ctx =
670 combine_entropy_contexts(args->t_left[blk_row], args->t_above[blk_col]);
672 if (args->exit_early) return;
674 if (!is_inter_block(mi)) {
675 struct encode_b_args intra_arg = { x, x->block_qcoeff_opt, args->t_above,
676 args->t_left, &mi->skip };
677 vp9_encode_block_intra(plane, block, blk_row, blk_col, plane_bsize, tx_size,
679 if (x->block_tx_domain) {
680 dist_block(args->cpi, x, plane, plane_bsize, block, blk_row, blk_col,
681 tx_size, &dist, &sse);
683 const BLOCK_SIZE tx_bsize = txsize_to_bsize[tx_size];
684 const struct macroblock_plane *const p = &x->plane[plane];
685 const struct macroblockd_plane *const pd = &xd->plane[plane];
686 const int src_stride = p->src.stride;
687 const int dst_stride = pd->dst.stride;
688 const int diff_stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
689 const uint8_t *src = &p->src.buf[4 * (blk_row * src_stride + blk_col)];
690 const uint8_t *dst = &pd->dst.buf[4 * (blk_row * dst_stride + blk_col)];
691 const int16_t *diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)];
693 sse = sum_squares_visible(xd, pd, diff, diff_stride, blk_row, blk_col,
694 plane_bsize, tx_bsize);
695 #if CONFIG_VP9_HIGHBITDEPTH
696 if ((xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) && (xd->bd > 8))
697 sse = ROUND64_POWER_OF_TWO(sse, (xd->bd - 8) * 2);
698 #endif // CONFIG_VP9_HIGHBITDEPTH
700 tmp = pixel_sse(args->cpi, xd, pd, src, src_stride, dst, dst_stride,
701 blk_row, blk_col, plane_bsize, tx_bsize);
702 dist = (int64_t)tmp * 16;
704 } else if (max_txsize_lookup[plane_bsize] == tx_size) {
705 if (x->skip_txfm[(plane << 2) + (block >> (tx_size << 1))] ==
707 // full forward transform and quantization
708 vp9_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size);
709 if (x->block_qcoeff_opt)
710 vp9_optimize_b(x, plane, block, tx_size, coeff_ctx);
711 dist_block(args->cpi, x, plane, plane_bsize, block, blk_row, blk_col,
712 tx_size, &dist, &sse);
713 } else if (x->skip_txfm[(plane << 2) + (block >> (tx_size << 1))] ==
715 // compute DC coefficient
716 tran_low_t *const coeff = BLOCK_OFFSET(x->plane[plane].coeff, block);
717 tran_low_t *const dqcoeff = BLOCK_OFFSET(xd->plane[plane].dqcoeff, block);
718 vp9_xform_quant_dc(x, plane, block, blk_row, blk_col, plane_bsize,
720 sse = x->bsse[(plane << 2) + (block >> (tx_size << 1))] << 4;
722 if (x->plane[plane].eobs[block]) {
723 const int64_t orig_sse = (int64_t)coeff[0] * coeff[0];
724 const int64_t resd_sse = coeff[0] - dqcoeff[0];
725 int64_t dc_correct = orig_sse - resd_sse * resd_sse;
726 #if CONFIG_VP9_HIGHBITDEPTH
727 dc_correct >>= ((xd->bd - 8) * 2);
729 if (tx_size != TX_32X32) dc_correct >>= 2;
731 dist = VPXMAX(0, sse - dc_correct);
735 // skip forward transform. Because this is handled here, the quantization
736 // does not need to do it.
737 x->plane[plane].eobs[block] = 0;
738 sse = x->bsse[(plane << 2) + (block >> (tx_size << 1))] << 4;
742 // full forward transform and quantization
743 vp9_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size);
744 if (x->block_qcoeff_opt)
745 vp9_optimize_b(x, plane, block, tx_size, coeff_ctx);
746 dist_block(args->cpi, x, plane, plane_bsize, block, blk_row, blk_col,
747 tx_size, &dist, &sse);
750 rd = RDCOST(x->rdmult, x->rddiv, 0, dist);
751 if (args->this_rd + rd > args->best_rd) {
752 args->exit_early = 1;
756 rate = rate_block(plane, block, tx_size, coeff_ctx, args);
757 args->t_above[blk_col] = (x->plane[plane].eobs[block] > 0) ? 1 : 0;
758 args->t_left[blk_row] = (x->plane[plane].eobs[block] > 0) ? 1 : 0;
759 rd1 = RDCOST(x->rdmult, x->rddiv, rate, dist);
760 rd2 = RDCOST(x->rdmult, x->rddiv, 0, sse);
762 // TODO(jingning): temporarily enabled only for luma component
763 rd = VPXMIN(rd1, rd2);
765 x->zcoeff_blk[tx_size][block] =
766 !x->plane[plane].eobs[block] || (rd1 > rd2 && !xd->lossless);
767 x->sum_y_eobs[tx_size] += x->plane[plane].eobs[block];
770 args->this_rate += rate;
771 args->this_dist += dist;
772 args->this_sse += sse;
775 if (args->this_rd > args->best_rd) {
776 args->exit_early = 1;
780 args->skippable &= !x->plane[plane].eobs[block];
783 static void txfm_rd_in_plane(const VP9_COMP *cpi, MACROBLOCK *x, int *rate,
784 int64_t *distortion, int *skippable, int64_t *sse,
785 int64_t ref_best_rd, int plane, BLOCK_SIZE bsize,
786 TX_SIZE tx_size, int use_fast_coef_costing) {
787 MACROBLOCKD *const xd = &x->e_mbd;
788 const struct macroblockd_plane *const pd = &xd->plane[plane];
789 struct rdcost_block_args args;
793 args.best_rd = ref_best_rd;
794 args.use_fast_coef_costing = use_fast_coef_costing;
797 if (plane == 0) xd->mi[0]->tx_size = tx_size;
799 vp9_get_entropy_contexts(bsize, tx_size, pd, args.t_above, args.t_left);
801 args.so = get_scan(xd, tx_size, get_plane_type(plane), 0);
803 vp9_foreach_transformed_block_in_plane(xd, bsize, plane, block_rd_txfm,
805 if (args.exit_early) {
807 *distortion = INT64_MAX;
811 *distortion = args.this_dist;
812 *rate = args.this_rate;
813 *sse = args.this_sse;
814 *skippable = args.skippable;
818 static void choose_largest_tx_size(VP9_COMP *cpi, MACROBLOCK *x, int *rate,
819 int64_t *distortion, int *skip, int64_t *sse,
820 int64_t ref_best_rd, BLOCK_SIZE bs) {
821 const TX_SIZE max_tx_size = max_txsize_lookup[bs];
822 VP9_COMMON *const cm = &cpi->common;
823 const TX_SIZE largest_tx_size = tx_mode_to_biggest_tx_size[cm->tx_mode];
824 MACROBLOCKD *const xd = &x->e_mbd;
825 MODE_INFO *const mi = xd->mi[0];
827 mi->tx_size = VPXMIN(max_tx_size, largest_tx_size);
829 txfm_rd_in_plane(cpi, x, rate, distortion, skip, sse, ref_best_rd, 0, bs,
830 mi->tx_size, cpi->sf.use_fast_coef_costing);
833 static void choose_tx_size_from_rd(VP9_COMP *cpi, MACROBLOCK *x, int *rate,
834 int64_t *distortion, int *skip,
835 int64_t *psse, int64_t ref_best_rd,
837 const TX_SIZE max_tx_size = max_txsize_lookup[bs];
838 VP9_COMMON *const cm = &cpi->common;
839 MACROBLOCKD *const xd = &x->e_mbd;
840 MODE_INFO *const mi = xd->mi[0];
841 vpx_prob skip_prob = vp9_get_skip_prob(cm, xd);
842 int r[TX_SIZES][2], s[TX_SIZES];
843 int64_t d[TX_SIZES], sse[TX_SIZES];
844 int64_t rd[TX_SIZES][2] = { { INT64_MAX, INT64_MAX },
845 { INT64_MAX, INT64_MAX },
846 { INT64_MAX, INT64_MAX },
847 { INT64_MAX, INT64_MAX } };
850 int64_t best_rd = ref_best_rd;
851 TX_SIZE best_tx = max_tx_size;
852 int start_tx, end_tx;
853 const int tx_size_ctx = get_tx_size_context(xd);
854 assert(skip_prob > 0);
855 s0 = vp9_cost_bit(skip_prob, 0);
856 s1 = vp9_cost_bit(skip_prob, 1);
858 if (cm->tx_mode == TX_MODE_SELECT) {
859 start_tx = max_tx_size;
860 end_tx = VPXMAX(start_tx - cpi->sf.tx_size_search_depth, 0);
861 if (bs > BLOCK_32X32) end_tx = VPXMIN(end_tx + 1, start_tx);
863 TX_SIZE chosen_tx_size =
864 VPXMIN(max_tx_size, tx_mode_to_biggest_tx_size[cm->tx_mode]);
865 start_tx = chosen_tx_size;
866 end_tx = chosen_tx_size;
869 for (n = start_tx; n >= end_tx; n--) {
870 const int r_tx_size = cpi->tx_size_cost[max_tx_size - 1][tx_size_ctx][n];
871 txfm_rd_in_plane(cpi, x, &r[n][0], &d[n], &s[n], &sse[n], best_rd, 0, bs, n,
872 cpi->sf.use_fast_coef_costing);
874 if (r[n][0] < INT_MAX) {
875 r[n][1] += r_tx_size;
877 if (d[n] == INT64_MAX || r[n][0] == INT_MAX) {
878 rd[n][0] = rd[n][1] = INT64_MAX;
880 if (is_inter_block(mi)) {
881 rd[n][0] = rd[n][1] = RDCOST(x->rdmult, x->rddiv, s1, sse[n]);
882 r[n][1] -= r_tx_size;
884 rd[n][0] = RDCOST(x->rdmult, x->rddiv, s1, sse[n]);
885 rd[n][1] = RDCOST(x->rdmult, x->rddiv, s1 + r_tx_size, sse[n]);
888 rd[n][0] = RDCOST(x->rdmult, x->rddiv, r[n][0] + s0, d[n]);
889 rd[n][1] = RDCOST(x->rdmult, x->rddiv, r[n][1] + s0, d[n]);
892 if (is_inter_block(mi) && !xd->lossless && !s[n] && sse[n] != INT64_MAX) {
893 rd[n][0] = VPXMIN(rd[n][0], RDCOST(x->rdmult, x->rddiv, s1, sse[n]));
894 rd[n][1] = VPXMIN(rd[n][1], RDCOST(x->rdmult, x->rddiv, s1, sse[n]));
897 // Early termination in transform size search.
898 if (cpi->sf.tx_size_search_breakout &&
899 (rd[n][1] == INT64_MAX ||
900 (n < (int)max_tx_size && rd[n][1] > rd[n + 1][1]) || s[n] == 1))
903 if (rd[n][1] < best_rd) {
908 mi->tx_size = best_tx;
910 *distortion = d[mi->tx_size];
911 *rate = r[mi->tx_size][cm->tx_mode == TX_MODE_SELECT];
912 *skip = s[mi->tx_size];
913 *psse = sse[mi->tx_size];
916 static void super_block_yrd(VP9_COMP *cpi, MACROBLOCK *x, int *rate,
917 int64_t *distortion, int *skip, int64_t *psse,
918 BLOCK_SIZE bs, int64_t ref_best_rd) {
919 MACROBLOCKD *xd = &x->e_mbd;
921 int64_t *ret_sse = psse ? psse : &sse;
923 assert(bs == xd->mi[0]->sb_type);
925 if (cpi->sf.tx_size_search_method == USE_LARGESTALL || xd->lossless) {
926 choose_largest_tx_size(cpi, x, rate, distortion, skip, ret_sse, ref_best_rd,
929 choose_tx_size_from_rd(cpi, x, rate, distortion, skip, ret_sse, ref_best_rd,
934 static int conditional_skipintra(PREDICTION_MODE mode,
935 PREDICTION_MODE best_intra_mode) {
936 if (mode == D117_PRED && best_intra_mode != V_PRED &&
937 best_intra_mode != D135_PRED)
939 if (mode == D63_PRED && best_intra_mode != V_PRED &&
940 best_intra_mode != D45_PRED)
942 if (mode == D207_PRED && best_intra_mode != H_PRED &&
943 best_intra_mode != D45_PRED)
945 if (mode == D153_PRED && best_intra_mode != H_PRED &&
946 best_intra_mode != D135_PRED)
951 static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, int row,
952 int col, PREDICTION_MODE *best_mode,
953 const int *bmode_costs, ENTROPY_CONTEXT *a,
954 ENTROPY_CONTEXT *l, int *bestrate,
955 int *bestratey, int64_t *bestdistortion,
956 BLOCK_SIZE bsize, int64_t rd_thresh) {
957 PREDICTION_MODE mode;
958 MACROBLOCKD *const xd = &x->e_mbd;
959 int64_t best_rd = rd_thresh;
960 struct macroblock_plane *p = &x->plane[0];
961 struct macroblockd_plane *pd = &xd->plane[0];
962 const int src_stride = p->src.stride;
963 const int dst_stride = pd->dst.stride;
964 const uint8_t *src_init = &p->src.buf[row * 4 * src_stride + col * 4];
965 uint8_t *dst_init = &pd->dst.buf[row * 4 * src_stride + col * 4];
966 ENTROPY_CONTEXT ta[2], tempa[2];
967 ENTROPY_CONTEXT tl[2], templ[2];
968 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
969 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
971 uint8_t best_dst[8 * 8];
972 #if CONFIG_VP9_HIGHBITDEPTH
973 uint16_t best_dst16[8 * 8];
975 memcpy(ta, a, num_4x4_blocks_wide * sizeof(a[0]));
976 memcpy(tl, l, num_4x4_blocks_high * sizeof(l[0]));
978 xd->mi[0]->tx_size = TX_4X4;
980 #if CONFIG_VP9_HIGHBITDEPTH
981 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
982 for (mode = DC_PRED; mode <= TM_PRED; ++mode) {
985 int64_t distortion = 0;
986 int rate = bmode_costs[mode];
988 if (!(cpi->sf.intra_y_mode_mask[TX_4X4] & (1 << mode))) continue;
990 // Only do the oblique modes if the best so far is
991 // one of the neighboring directional modes
992 if (cpi->sf.mode_search_skip_flags & FLAG_SKIP_INTRA_DIRMISMATCH) {
993 if (conditional_skipintra(mode, *best_mode)) continue;
996 memcpy(tempa, ta, num_4x4_blocks_wide * sizeof(ta[0]));
997 memcpy(templ, tl, num_4x4_blocks_high * sizeof(tl[0]));
999 for (idy = 0; idy < num_4x4_blocks_high; ++idy) {
1000 for (idx = 0; idx < num_4x4_blocks_wide; ++idx) {
1001 const int block = (row + idy) * 2 + (col + idx);
1002 const uint8_t *const src = &src_init[idx * 4 + idy * 4 * src_stride];
1003 uint8_t *const dst = &dst_init[idx * 4 + idy * 4 * dst_stride];
1004 uint16_t *const dst16 = CONVERT_TO_SHORTPTR(dst);
1005 int16_t *const src_diff =
1006 vp9_raster_block_offset_int16(BLOCK_8X8, block, p->src_diff);
1007 tran_low_t *const coeff = BLOCK_OFFSET(x->plane[0].coeff, block);
1008 xd->mi[0]->bmi[block].as_mode = mode;
1009 vp9_predict_intra_block(xd, 1, TX_4X4, mode,
1010 x->skip_encode ? src : dst,
1011 x->skip_encode ? src_stride : dst_stride, dst,
1012 dst_stride, col + idx, row + idy, 0);
1013 vpx_highbd_subtract_block(4, 4, src_diff, 8, src, src_stride, dst,
1014 dst_stride, xd->bd);
1016 const scan_order *so = &vp9_default_scan_orders[TX_4X4];
1017 const int coeff_ctx =
1018 combine_entropy_contexts(tempa[idx], templ[idy]);
1019 vp9_highbd_fwht4x4(src_diff, coeff, 8);
1020 vp9_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan);
1021 ratey += cost_coeffs(x, 0, block, TX_4X4, coeff_ctx, so->scan,
1022 so->neighbors, cpi->sf.use_fast_coef_costing);
1023 tempa[idx] = templ[idy] = (x->plane[0].eobs[block] > 0 ? 1 : 0);
1024 if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd)
1026 vp9_highbd_iwht4x4_add(BLOCK_OFFSET(pd->dqcoeff, block), dst16,
1027 dst_stride, p->eobs[block], xd->bd);
1030 const TX_TYPE tx_type = get_tx_type_4x4(PLANE_TYPE_Y, xd, block);
1031 const scan_order *so = &vp9_scan_orders[TX_4X4][tx_type];
1032 const int coeff_ctx =
1033 combine_entropy_contexts(tempa[idx], templ[idy]);
1034 if (tx_type == DCT_DCT)
1035 vpx_highbd_fdct4x4(src_diff, coeff, 8);
1037 vp9_highbd_fht4x4(src_diff, coeff, 8, tx_type);
1038 vp9_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan);
1039 ratey += cost_coeffs(x, 0, block, TX_4X4, coeff_ctx, so->scan,
1040 so->neighbors, cpi->sf.use_fast_coef_costing);
1041 distortion += vp9_highbd_block_error_dispatch(
1042 coeff, BLOCK_OFFSET(pd->dqcoeff, block), 16,
1045 tempa[idx] = templ[idy] = (x->plane[0].eobs[block] > 0 ? 1 : 0);
1046 if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd)
1048 vp9_highbd_iht4x4_add(tx_type, BLOCK_OFFSET(pd->dqcoeff, block),
1049 dst16, dst_stride, p->eobs[block], xd->bd);
1055 this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
1057 if (this_rd < best_rd) {
1060 *bestdistortion = distortion;
1063 memcpy(a, tempa, num_4x4_blocks_wide * sizeof(tempa[0]));
1064 memcpy(l, templ, num_4x4_blocks_high * sizeof(templ[0]));
1065 for (idy = 0; idy < num_4x4_blocks_high * 4; ++idy) {
1066 memcpy(best_dst16 + idy * 8,
1067 CONVERT_TO_SHORTPTR(dst_init + idy * dst_stride),
1068 num_4x4_blocks_wide * 4 * sizeof(uint16_t));
1073 if (best_rd >= rd_thresh || x->skip_encode) return best_rd;
1075 for (idy = 0; idy < num_4x4_blocks_high * 4; ++idy) {
1076 memcpy(CONVERT_TO_SHORTPTR(dst_init + idy * dst_stride),
1077 best_dst16 + idy * 8, num_4x4_blocks_wide * 4 * sizeof(uint16_t));
1082 #endif // CONFIG_VP9_HIGHBITDEPTH
1084 for (mode = DC_PRED; mode <= TM_PRED; ++mode) {
1087 int64_t distortion = 0;
1088 int rate = bmode_costs[mode];
1090 if (!(cpi->sf.intra_y_mode_mask[TX_4X4] & (1 << mode))) continue;
1092 // Only do the oblique modes if the best so far is
1093 // one of the neighboring directional modes
1094 if (cpi->sf.mode_search_skip_flags & FLAG_SKIP_INTRA_DIRMISMATCH) {
1095 if (conditional_skipintra(mode, *best_mode)) continue;
1098 memcpy(tempa, ta, num_4x4_blocks_wide * sizeof(ta[0]));
1099 memcpy(templ, tl, num_4x4_blocks_high * sizeof(tl[0]));
1101 for (idy = 0; idy < num_4x4_blocks_high; ++idy) {
1102 for (idx = 0; idx < num_4x4_blocks_wide; ++idx) {
1103 const int block = (row + idy) * 2 + (col + idx);
1104 const uint8_t *const src = &src_init[idx * 4 + idy * 4 * src_stride];
1105 uint8_t *const dst = &dst_init[idx * 4 + idy * 4 * dst_stride];
1106 int16_t *const src_diff =
1107 vp9_raster_block_offset_int16(BLOCK_8X8, block, p->src_diff);
1108 tran_low_t *const coeff = BLOCK_OFFSET(x->plane[0].coeff, block);
1109 xd->mi[0]->bmi[block].as_mode = mode;
1110 vp9_predict_intra_block(xd, 1, TX_4X4, mode, x->skip_encode ? src : dst,
1111 x->skip_encode ? src_stride : dst_stride, dst,
1112 dst_stride, col + idx, row + idy, 0);
1113 vpx_subtract_block(4, 4, src_diff, 8, src, src_stride, dst, dst_stride);
1116 const scan_order *so = &vp9_default_scan_orders[TX_4X4];
1117 const int coeff_ctx =
1118 combine_entropy_contexts(tempa[idx], templ[idy]);
1119 vp9_fwht4x4(src_diff, coeff, 8);
1120 vp9_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan);
1121 ratey += cost_coeffs(x, 0, block, TX_4X4, coeff_ctx, so->scan,
1122 so->neighbors, cpi->sf.use_fast_coef_costing);
1123 tempa[idx] = templ[idy] = (x->plane[0].eobs[block] > 0) ? 1 : 0;
1124 if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd)
1126 vp9_iwht4x4_add(BLOCK_OFFSET(pd->dqcoeff, block), dst, dst_stride,
1130 const TX_TYPE tx_type = get_tx_type_4x4(PLANE_TYPE_Y, xd, block);
1131 const scan_order *so = &vp9_scan_orders[TX_4X4][tx_type];
1132 const int coeff_ctx =
1133 combine_entropy_contexts(tempa[idx], templ[idy]);
1134 vp9_fht4x4(src_diff, coeff, 8, tx_type);
1135 vp9_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan);
1136 ratey += cost_coeffs(x, 0, block, TX_4X4, coeff_ctx, so->scan,
1137 so->neighbors, cpi->sf.use_fast_coef_costing);
1138 tempa[idx] = templ[idy] = (x->plane[0].eobs[block] > 0) ? 1 : 0;
1139 distortion += vp9_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, block),
1142 if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd)
1144 vp9_iht4x4_add(tx_type, BLOCK_OFFSET(pd->dqcoeff, block), dst,
1145 dst_stride, p->eobs[block]);
1151 this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
1153 if (this_rd < best_rd) {
1156 *bestdistortion = distortion;
1159 memcpy(a, tempa, num_4x4_blocks_wide * sizeof(tempa[0]));
1160 memcpy(l, templ, num_4x4_blocks_high * sizeof(templ[0]));
1161 for (idy = 0; idy < num_4x4_blocks_high * 4; ++idy)
1162 memcpy(best_dst + idy * 8, dst_init + idy * dst_stride,
1163 num_4x4_blocks_wide * 4);
1168 if (best_rd >= rd_thresh || x->skip_encode) return best_rd;
1170 for (idy = 0; idy < num_4x4_blocks_high * 4; ++idy)
1171 memcpy(dst_init + idy * dst_stride, best_dst + idy * 8,
1172 num_4x4_blocks_wide * 4);
1177 static int64_t rd_pick_intra_sub_8x8_y_mode(VP9_COMP *cpi, MACROBLOCK *mb,
1178 int *rate, int *rate_y,
1179 int64_t *distortion,
1182 const MACROBLOCKD *const xd = &mb->e_mbd;
1183 MODE_INFO *const mic = xd->mi[0];
1184 const MODE_INFO *above_mi = xd->above_mi;
1185 const MODE_INFO *left_mi = xd->left_mi;
1186 const BLOCK_SIZE bsize = xd->mi[0]->sb_type;
1187 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
1188 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
1191 int64_t total_distortion = 0;
1193 int64_t total_rd = 0;
1194 const int *bmode_costs = cpi->mbmode_cost;
1196 // Pick modes for each sub-block (of size 4x4, 4x8, or 8x4) in an 8x8 block.
1197 for (idy = 0; idy < 2; idy += num_4x4_blocks_high) {
1198 for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) {
1199 PREDICTION_MODE best_mode = DC_PRED;
1200 int r = INT_MAX, ry = INT_MAX;
1201 int64_t d = INT64_MAX, this_rd = INT64_MAX;
1203 if (cpi->common.frame_type == KEY_FRAME) {
1204 const PREDICTION_MODE A = vp9_above_block_mode(mic, above_mi, i);
1205 const PREDICTION_MODE L = vp9_left_block_mode(mic, left_mi, i);
1207 bmode_costs = cpi->y_mode_costs[A][L];
1210 this_rd = rd_pick_intra4x4block(
1211 cpi, mb, idy, idx, &best_mode, bmode_costs,
1212 xd->plane[0].above_context + idx, xd->plane[0].left_context + idy, &r,
1213 &ry, &d, bsize, best_rd - total_rd);
1215 if (this_rd >= best_rd - total_rd) return INT64_MAX;
1217 total_rd += this_rd;
1219 total_distortion += d;
1222 mic->bmi[i].as_mode = best_mode;
1223 for (j = 1; j < num_4x4_blocks_high; ++j)
1224 mic->bmi[i + j * 2].as_mode = best_mode;
1225 for (j = 1; j < num_4x4_blocks_wide; ++j)
1226 mic->bmi[i + j].as_mode = best_mode;
1228 if (total_rd >= best_rd) return INT64_MAX;
1233 *rate_y = tot_rate_y;
1234 *distortion = total_distortion;
1235 mic->mode = mic->bmi[3].as_mode;
1237 return RDCOST(mb->rdmult, mb->rddiv, cost, total_distortion);
1240 // This function is used only for intra_only frames
1241 static int64_t rd_pick_intra_sby_mode(VP9_COMP *cpi, MACROBLOCK *x, int *rate,
1242 int *rate_tokenonly, int64_t *distortion,
1243 int *skippable, BLOCK_SIZE bsize,
1245 PREDICTION_MODE mode;
1246 PREDICTION_MODE mode_selected = DC_PRED;
1247 MACROBLOCKD *const xd = &x->e_mbd;
1248 MODE_INFO *const mic = xd->mi[0];
1249 int this_rate, this_rate_tokenonly, s;
1250 int64_t this_distortion, this_rd;
1251 TX_SIZE best_tx = TX_4X4;
1253 const MODE_INFO *above_mi = xd->above_mi;
1254 const MODE_INFO *left_mi = xd->left_mi;
1255 const PREDICTION_MODE A = vp9_above_block_mode(mic, above_mi, 0);
1256 const PREDICTION_MODE L = vp9_left_block_mode(mic, left_mi, 0);
1257 bmode_costs = cpi->y_mode_costs[A][L];
1259 memset(x->skip_txfm, SKIP_TXFM_NONE, sizeof(x->skip_txfm));
1260 /* Y Search for intra prediction mode */
1261 for (mode = DC_PRED; mode <= TM_PRED; mode++) {
1262 if (cpi->sf.use_nonrd_pick_mode) {
1263 // These speed features are turned on in hybrid non-RD and RD mode
1264 // for key frame coding in the context of real-time setting.
1265 if (conditional_skipintra(mode, mode_selected)) continue;
1266 if (*skippable) break;
1271 super_block_yrd(cpi, x, &this_rate_tokenonly, &this_distortion, &s, NULL,
1274 if (this_rate_tokenonly == INT_MAX) continue;
1276 this_rate = this_rate_tokenonly + bmode_costs[mode];
1277 this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion);
1279 if (this_rd < best_rd) {
1280 mode_selected = mode;
1282 best_tx = mic->tx_size;
1284 *rate_tokenonly = this_rate_tokenonly;
1285 *distortion = this_distortion;
1290 mic->mode = mode_selected;
1291 mic->tx_size = best_tx;
1296 // Return value 0: early termination triggered, no valid rd cost available;
1297 // 1: rd cost values are valid.
1298 static int super_block_uvrd(const VP9_COMP *cpi, MACROBLOCK *x, int *rate,
1299 int64_t *distortion, int *skippable, int64_t *sse,
1300 BLOCK_SIZE bsize, int64_t ref_best_rd) {
1301 MACROBLOCKD *const xd = &x->e_mbd;
1302 MODE_INFO *const mi = xd->mi[0];
1303 const TX_SIZE uv_tx_size = get_uv_tx_size(mi, &xd->plane[1]);
1305 int pnrate = 0, pnskip = 1;
1306 int64_t pndist = 0, pnsse = 0;
1307 int is_cost_valid = 1;
1309 if (ref_best_rd < 0) is_cost_valid = 0;
1311 if (is_inter_block(mi) && is_cost_valid) {
1313 for (plane = 1; plane < MAX_MB_PLANE; ++plane)
1314 vp9_subtract_plane(x, bsize, plane);
1322 for (plane = 1; plane < MAX_MB_PLANE; ++plane) {
1323 txfm_rd_in_plane(cpi, x, &pnrate, &pndist, &pnskip, &pnsse, ref_best_rd,
1324 plane, bsize, uv_tx_size, cpi->sf.use_fast_coef_costing);
1325 if (pnrate == INT_MAX) {
1330 *distortion += pndist;
1332 *skippable &= pnskip;
1335 if (!is_cost_valid) {
1338 *distortion = INT64_MAX;
1343 return is_cost_valid;
1346 static int64_t rd_pick_intra_sbuv_mode(VP9_COMP *cpi, MACROBLOCK *x,
1347 PICK_MODE_CONTEXT *ctx, int *rate,
1348 int *rate_tokenonly, int64_t *distortion,
1349 int *skippable, BLOCK_SIZE bsize,
1350 TX_SIZE max_tx_size) {
1351 MACROBLOCKD *xd = &x->e_mbd;
1352 PREDICTION_MODE mode;
1353 PREDICTION_MODE mode_selected = DC_PRED;
1354 int64_t best_rd = INT64_MAX, this_rd;
1355 int this_rate_tokenonly, this_rate, s;
1356 int64_t this_distortion, this_sse;
1358 memset(x->skip_txfm, SKIP_TXFM_NONE, sizeof(x->skip_txfm));
1359 for (mode = DC_PRED; mode <= TM_PRED; ++mode) {
1360 if (!(cpi->sf.intra_uv_mode_mask[max_tx_size] & (1 << mode))) continue;
1361 #if CONFIG_BETTER_HW_COMPATIBILITY && CONFIG_VP9_HIGHBITDEPTH
1362 if ((xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) &&
1363 (xd->above_mi == NULL || xd->left_mi == NULL) && need_top_left[mode])
1365 #endif // CONFIG_BETTER_HW_COMPATIBILITY && CONFIG_VP9_HIGHBITDEPTH
1367 xd->mi[0]->uv_mode = mode;
1369 if (!super_block_uvrd(cpi, x, &this_rate_tokenonly, &this_distortion, &s,
1370 &this_sse, bsize, best_rd))
1373 this_rate_tokenonly +
1374 cpi->intra_uv_mode_cost[cpi->common.frame_type][xd->mi[0]->mode][mode];
1375 this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion);
1377 if (this_rd < best_rd) {
1378 mode_selected = mode;
1381 *rate_tokenonly = this_rate_tokenonly;
1382 *distortion = this_distortion;
1384 if (!x->select_tx_size) swap_block_ptr(x, ctx, 2, 0, 1, MAX_MB_PLANE);
1388 xd->mi[0]->uv_mode = mode_selected;
1392 static int64_t rd_sbuv_dcpred(const VP9_COMP *cpi, MACROBLOCK *x, int *rate,
1393 int *rate_tokenonly, int64_t *distortion,
1394 int *skippable, BLOCK_SIZE bsize) {
1395 const VP9_COMMON *cm = &cpi->common;
1398 x->e_mbd.mi[0]->uv_mode = DC_PRED;
1399 memset(x->skip_txfm, SKIP_TXFM_NONE, sizeof(x->skip_txfm));
1400 super_block_uvrd(cpi, x, rate_tokenonly, distortion, skippable, &unused,
1404 cpi->intra_uv_mode_cost[cm->frame_type][x->e_mbd.mi[0]->mode][DC_PRED];
1405 return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
1408 static void choose_intra_uv_mode(VP9_COMP *cpi, MACROBLOCK *const x,
1409 PICK_MODE_CONTEXT *ctx, BLOCK_SIZE bsize,
1410 TX_SIZE max_tx_size, int *rate_uv,
1411 int *rate_uv_tokenonly, int64_t *dist_uv,
1412 int *skip_uv, PREDICTION_MODE *mode_uv) {
1413 // Use an estimated rd for uv_intra based on DC_PRED if the
1414 // appropriate speed flag is set.
1415 if (cpi->sf.use_uv_intra_rd_estimate) {
1416 rd_sbuv_dcpred(cpi, x, rate_uv, rate_uv_tokenonly, dist_uv, skip_uv,
1417 bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize);
1418 // Else do a proper rd search for each possible transform size that may
1419 // be considered in the main rd loop.
1421 rd_pick_intra_sbuv_mode(cpi, x, ctx, rate_uv, rate_uv_tokenonly, dist_uv,
1422 skip_uv, bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize,
1425 *mode_uv = x->e_mbd.mi[0]->uv_mode;
1428 static int cost_mv_ref(const VP9_COMP *cpi, PREDICTION_MODE mode,
1430 assert(is_inter_mode(mode));
1431 return cpi->inter_mode_cost[mode_context][INTER_OFFSET(mode)];
1434 static int set_and_cost_bmi_mvs(VP9_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *xd,
1435 int i, PREDICTION_MODE mode, int_mv this_mv[2],
1436 int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES],
1437 int_mv seg_mvs[MAX_REF_FRAMES],
1438 int_mv *best_ref_mv[2], const int *mvjcost,
1440 MODE_INFO *const mi = xd->mi[0];
1441 const MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
1444 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[mi->sb_type];
1445 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[mi->sb_type];
1446 const int is_compound = has_second_ref(mi);
1450 this_mv[0].as_int = seg_mvs[mi->ref_frame[0]].as_int;
1451 thismvcost += vp9_mv_bit_cost(&this_mv[0].as_mv, &best_ref_mv[0]->as_mv,
1452 mvjcost, mvcost, MV_COST_WEIGHT_SUB);
1454 this_mv[1].as_int = seg_mvs[mi->ref_frame[1]].as_int;
1455 thismvcost += vp9_mv_bit_cost(&this_mv[1].as_mv, &best_ref_mv[1]->as_mv,
1456 mvjcost, mvcost, MV_COST_WEIGHT_SUB);
1461 this_mv[0].as_int = frame_mv[mode][mi->ref_frame[0]].as_int;
1463 this_mv[1].as_int = frame_mv[mode][mi->ref_frame[1]].as_int;
1466 assert(mode == ZEROMV);
1467 this_mv[0].as_int = 0;
1468 if (is_compound) this_mv[1].as_int = 0;
1472 mi->bmi[i].as_mv[0].as_int = this_mv[0].as_int;
1473 if (is_compound) mi->bmi[i].as_mv[1].as_int = this_mv[1].as_int;
1475 mi->bmi[i].as_mode = mode;
1477 for (idy = 0; idy < num_4x4_blocks_high; ++idy)
1478 for (idx = 0; idx < num_4x4_blocks_wide; ++idx)
1479 memmove(&mi->bmi[i + idy * 2 + idx], &mi->bmi[i], sizeof(mi->bmi[i]));
1481 return cost_mv_ref(cpi, mode, mbmi_ext->mode_context[mi->ref_frame[0]]) +
1485 static int64_t encode_inter_mb_segment(VP9_COMP *cpi, MACROBLOCK *x,
1486 int64_t best_yrd, int i, int *labelyrate,
1487 int64_t *distortion, int64_t *sse,
1488 ENTROPY_CONTEXT *ta, ENTROPY_CONTEXT *tl,
1489 int mi_row, int mi_col) {
1491 MACROBLOCKD *xd = &x->e_mbd;
1492 struct macroblockd_plane *const pd = &xd->plane[0];
1493 struct macroblock_plane *const p = &x->plane[0];
1494 MODE_INFO *const mi = xd->mi[0];
1495 const BLOCK_SIZE plane_bsize = get_plane_block_size(mi->sb_type, pd);
1496 const int width = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
1497 const int height = 4 * num_4x4_blocks_high_lookup[plane_bsize];
1500 const uint8_t *const src =
1501 &p->src.buf[vp9_raster_block_offset(BLOCK_8X8, i, p->src.stride)];
1502 uint8_t *const dst =
1503 &pd->dst.buf[vp9_raster_block_offset(BLOCK_8X8, i, pd->dst.stride)];
1504 int64_t thisdistortion = 0, thissse = 0;
1505 int thisrate = 0, ref;
1506 const scan_order *so = &vp9_default_scan_orders[TX_4X4];
1507 const int is_compound = has_second_ref(mi);
1508 const InterpKernel *kernel = vp9_filter_kernels[mi->interp_filter];
1510 for (ref = 0; ref < 1 + is_compound; ++ref) {
1511 const int bw = b_width_log2_lookup[BLOCK_8X8];
1512 const int h = 4 * (i >> bw);
1513 const int w = 4 * (i & ((1 << bw) - 1));
1514 const struct scale_factors *sf = &xd->block_refs[ref]->sf;
1515 int y_stride = pd->pre[ref].stride;
1516 uint8_t *pre = pd->pre[ref].buf + (h * pd->pre[ref].stride + w);
1518 if (vp9_is_scaled(sf)) {
1519 const int x_start = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x));
1520 const int y_start = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y));
1522 y_stride = xd->block_refs[ref]->buf->y_stride;
1523 pre = xd->block_refs[ref]->buf->y_buffer;
1524 pre += scaled_buffer_offset(x_start + w, y_start + h, y_stride, sf);
1526 #if CONFIG_VP9_HIGHBITDEPTH
1527 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
1528 vp9_highbd_build_inter_predictor(
1529 CONVERT_TO_SHORTPTR(pre), y_stride, CONVERT_TO_SHORTPTR(dst),
1530 pd->dst.stride, &mi->bmi[i].as_mv[ref].as_mv,
1531 &xd->block_refs[ref]->sf, width, height, ref, kernel, MV_PRECISION_Q3,
1532 mi_col * MI_SIZE + 4 * (i % 2), mi_row * MI_SIZE + 4 * (i / 2),
1535 vp9_build_inter_predictor(
1536 pre, y_stride, dst, pd->dst.stride, &mi->bmi[i].as_mv[ref].as_mv,
1537 &xd->block_refs[ref]->sf, width, height, ref, kernel, MV_PRECISION_Q3,
1538 mi_col * MI_SIZE + 4 * (i % 2), mi_row * MI_SIZE + 4 * (i / 2));
1541 vp9_build_inter_predictor(
1542 pre, y_stride, dst, pd->dst.stride, &mi->bmi[i].as_mv[ref].as_mv,
1543 &xd->block_refs[ref]->sf, width, height, ref, kernel, MV_PRECISION_Q3,
1544 mi_col * MI_SIZE + 4 * (i % 2), mi_row * MI_SIZE + 4 * (i / 2));
1545 #endif // CONFIG_VP9_HIGHBITDEPTH
1548 #if CONFIG_VP9_HIGHBITDEPTH
1549 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
1550 vpx_highbd_subtract_block(
1551 height, width, vp9_raster_block_offset_int16(BLOCK_8X8, i, p->src_diff),
1552 8, src, p->src.stride, dst, pd->dst.stride, xd->bd);
1554 vpx_subtract_block(height, width,
1555 vp9_raster_block_offset_int16(BLOCK_8X8, i, p->src_diff),
1556 8, src, p->src.stride, dst, pd->dst.stride);
1559 vpx_subtract_block(height, width,
1560 vp9_raster_block_offset_int16(BLOCK_8X8, i, p->src_diff),
1561 8, src, p->src.stride, dst, pd->dst.stride);
1562 #endif // CONFIG_VP9_HIGHBITDEPTH
1565 for (idy = 0; idy < height / 4; ++idy) {
1566 for (idx = 0; idx < width / 4; ++idx) {
1567 #if CONFIG_VP9_HIGHBITDEPTH
1568 const int bd = (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) ? xd->bd : 8;
1570 int64_t ssz, rd, rd1, rd2;
1573 k += (idy * 2 + idx);
1574 coeff_ctx = combine_entropy_contexts(ta[k & 1], tl[k >> 1]);
1575 coeff = BLOCK_OFFSET(p->coeff, k);
1576 x->fwd_txfm4x4(vp9_raster_block_offset_int16(BLOCK_8X8, k, p->src_diff),
1578 vp9_regular_quantize_b_4x4(x, 0, k, so->scan, so->iscan);
1579 #if CONFIG_VP9_HIGHBITDEPTH
1580 thisdistortion += vp9_highbd_block_error_dispatch(
1581 coeff, BLOCK_OFFSET(pd->dqcoeff, k), 16, &ssz, bd);
1584 vp9_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, k), 16, &ssz);
1585 #endif // CONFIG_VP9_HIGHBITDEPTH
1587 thisrate += cost_coeffs(x, 0, k, TX_4X4, coeff_ctx, so->scan,
1588 so->neighbors, cpi->sf.use_fast_coef_costing);
1589 ta[k & 1] = tl[k >> 1] = (x->plane[0].eobs[k] > 0) ? 1 : 0;
1590 rd1 = RDCOST(x->rdmult, x->rddiv, thisrate, thisdistortion >> 2);
1591 rd2 = RDCOST(x->rdmult, x->rddiv, 0, thissse >> 2);
1592 rd = VPXMIN(rd1, rd2);
1593 if (rd >= best_yrd) return INT64_MAX;
1597 *distortion = thisdistortion >> 2;
1598 *labelyrate = thisrate;
1599 *sse = thissse >> 2;
1601 return RDCOST(x->rdmult, x->rddiv, *labelyrate, *distortion);
1612 ENTROPY_CONTEXT ta[2];
1613 ENTROPY_CONTEXT tl[2];
1625 PREDICTION_MODE modes[4];
1626 SEG_RDSTAT rdstat[4][INTER_MODES];
1630 static INLINE int mv_check_bounds(const MvLimits *mv_limits, const MV *mv) {
1631 return (mv->row >> 3) < mv_limits->row_min ||
1632 (mv->row >> 3) > mv_limits->row_max ||
1633 (mv->col >> 3) < mv_limits->col_min ||
1634 (mv->col >> 3) > mv_limits->col_max;
1637 static INLINE void mi_buf_shift(MACROBLOCK *x, int i) {
1638 MODE_INFO *const mi = x->e_mbd.mi[0];
1639 struct macroblock_plane *const p = &x->plane[0];
1640 struct macroblockd_plane *const pd = &x->e_mbd.plane[0];
1643 &p->src.buf[vp9_raster_block_offset(BLOCK_8X8, i, p->src.stride)];
1644 assert(((intptr_t)pd->pre[0].buf & 0x7) == 0);
1646 &pd->pre[0].buf[vp9_raster_block_offset(BLOCK_8X8, i, pd->pre[0].stride)];
1647 if (has_second_ref(mi))
1650 .buf[vp9_raster_block_offset(BLOCK_8X8, i, pd->pre[1].stride)];
1653 static INLINE void mi_buf_restore(MACROBLOCK *x, struct buf_2d orig_src,
1654 struct buf_2d orig_pre[2]) {
1655 MODE_INFO *mi = x->e_mbd.mi[0];
1656 x->plane[0].src = orig_src;
1657 x->e_mbd.plane[0].pre[0] = orig_pre[0];
1658 if (has_second_ref(mi)) x->e_mbd.plane[0].pre[1] = orig_pre[1];
1661 static INLINE int mv_has_subpel(const MV *mv) {
1662 return (mv->row & 0x0F) || (mv->col & 0x0F);
1665 // Check if NEARESTMV/NEARMV/ZEROMV is the cheapest way encode zero motion.
1666 // TODO(aconverse): Find out if this is still productive then clean up or remove
1667 static int check_best_zero_mv(const VP9_COMP *cpi,
1668 const uint8_t mode_context[MAX_REF_FRAMES],
1669 int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES],
1671 const MV_REFERENCE_FRAME ref_frames[2]) {
1672 if ((this_mode == NEARMV || this_mode == NEARESTMV || this_mode == ZEROMV) &&
1673 frame_mv[this_mode][ref_frames[0]].as_int == 0 &&
1674 (ref_frames[1] == NONE ||
1675 frame_mv[this_mode][ref_frames[1]].as_int == 0)) {
1676 int rfc = mode_context[ref_frames[0]];
1677 int c1 = cost_mv_ref(cpi, NEARMV, rfc);
1678 int c2 = cost_mv_ref(cpi, NEARESTMV, rfc);
1679 int c3 = cost_mv_ref(cpi, ZEROMV, rfc);
1681 if (this_mode == NEARMV) {
1682 if (c1 > c3) return 0;
1683 } else if (this_mode == NEARESTMV) {
1684 if (c2 > c3) return 0;
1686 assert(this_mode == ZEROMV);
1687 if (ref_frames[1] == NONE) {
1688 if ((c3 >= c2 && frame_mv[NEARESTMV][ref_frames[0]].as_int == 0) ||
1689 (c3 >= c1 && frame_mv[NEARMV][ref_frames[0]].as_int == 0))
1692 if ((c3 >= c2 && frame_mv[NEARESTMV][ref_frames[0]].as_int == 0 &&
1693 frame_mv[NEARESTMV][ref_frames[1]].as_int == 0) ||
1694 (c3 >= c1 && frame_mv[NEARMV][ref_frames[0]].as_int == 0 &&
1695 frame_mv[NEARMV][ref_frames[1]].as_int == 0))
1703 static void joint_motion_search(VP9_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
1704 int_mv *frame_mv, int mi_row, int mi_col,
1705 int_mv single_newmv[MAX_REF_FRAMES],
1707 const VP9_COMMON *const cm = &cpi->common;
1708 const int pw = 4 * num_4x4_blocks_wide_lookup[bsize];
1709 const int ph = 4 * num_4x4_blocks_high_lookup[bsize];
1710 MACROBLOCKD *xd = &x->e_mbd;
1711 MODE_INFO *mi = xd->mi[0];
1712 const int refs[2] = { mi->ref_frame[0],
1713 mi->ref_frame[1] < 0 ? 0 : mi->ref_frame[1] };
1716 const InterpKernel *kernel = vp9_filter_kernels[mi->interp_filter];
1717 struct scale_factors sf;
1719 // Do joint motion search in compound mode to get more accurate mv.
1720 struct buf_2d backup_yv12[2][MAX_MB_PLANE];
1721 uint32_t last_besterr[2] = { UINT_MAX, UINT_MAX };
1722 const YV12_BUFFER_CONFIG *const scaled_ref_frame[2] = {
1723 vp9_get_scaled_ref_frame(cpi, mi->ref_frame[0]),
1724 vp9_get_scaled_ref_frame(cpi, mi->ref_frame[1])
1727 // Prediction buffer from second frame.
1728 #if CONFIG_VP9_HIGHBITDEPTH
1729 DECLARE_ALIGNED(16, uint16_t, second_pred_alloc_16[64 * 64]);
1730 uint8_t *second_pred;
1732 DECLARE_ALIGNED(16, uint8_t, second_pred[64 * 64]);
1733 #endif // CONFIG_VP9_HIGHBITDEPTH
1735 for (ref = 0; ref < 2; ++ref) {
1736 ref_mv[ref] = x->mbmi_ext->ref_mvs[refs[ref]][0];
1738 if (scaled_ref_frame[ref]) {
1740 // Swap out the reference frame for a version that's been scaled to
1741 // match the resolution of the current frame, allowing the existing
1742 // motion search code to be used without additional modifications.
1743 for (i = 0; i < MAX_MB_PLANE; i++)
1744 backup_yv12[ref][i] = xd->plane[i].pre[ref];
1745 vp9_setup_pre_planes(xd, ref, scaled_ref_frame[ref], mi_row, mi_col,
1749 frame_mv[refs[ref]].as_int = single_newmv[refs[ref]].as_int;
1752 // Since we have scaled the reference frames to match the size of the current
1753 // frame we must use a unit scaling factor during mode selection.
1754 #if CONFIG_VP9_HIGHBITDEPTH
1755 vp9_setup_scale_factors_for_frame(&sf, cm->width, cm->height, cm->width,
1756 cm->height, cm->use_highbitdepth);
1758 vp9_setup_scale_factors_for_frame(&sf, cm->width, cm->height, cm->width,
1760 #endif // CONFIG_VP9_HIGHBITDEPTH
1762 // Allow joint search multiple times iteratively for each reference frame
1763 // and break out of the search loop if it couldn't find a better mv.
1764 for (ite = 0; ite < 4; ite++) {
1765 struct buf_2d ref_yv12[2];
1766 uint32_t bestsme = UINT_MAX;
1767 int sadpb = x->sadperbit16;
1769 int search_range = 3;
1771 const MvLimits tmp_mv_limits = x->mv_limits;
1772 int id = ite % 2; // Even iterations search in the first reference frame,
1773 // odd iterations search in the second. The predictor
1774 // found for the 'other' reference frame is factored in.
1776 // Initialized here because of compiler problem in Visual Studio.
1777 ref_yv12[0] = xd->plane[0].pre[0];
1778 ref_yv12[1] = xd->plane[0].pre[1];
1780 // Get the prediction block from the 'other' reference frame.
1781 #if CONFIG_VP9_HIGHBITDEPTH
1782 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
1783 second_pred = CONVERT_TO_BYTEPTR(second_pred_alloc_16);
1784 vp9_highbd_build_inter_predictor(
1785 CONVERT_TO_SHORTPTR(ref_yv12[!id].buf), ref_yv12[!id].stride,
1786 second_pred_alloc_16, pw, &frame_mv[refs[!id]].as_mv, &sf, pw, ph, 0,
1787 kernel, MV_PRECISION_Q3, mi_col * MI_SIZE, mi_row * MI_SIZE, xd->bd);
1789 second_pred = (uint8_t *)second_pred_alloc_16;
1790 vp9_build_inter_predictor(ref_yv12[!id].buf, ref_yv12[!id].stride,
1791 second_pred, pw, &frame_mv[refs[!id]].as_mv,
1792 &sf, pw, ph, 0, kernel, MV_PRECISION_Q3,
1793 mi_col * MI_SIZE, mi_row * MI_SIZE);
1796 vp9_build_inter_predictor(ref_yv12[!id].buf, ref_yv12[!id].stride,
1797 second_pred, pw, &frame_mv[refs[!id]].as_mv, &sf,
1798 pw, ph, 0, kernel, MV_PRECISION_Q3,
1799 mi_col * MI_SIZE, mi_row * MI_SIZE);
1800 #endif // CONFIG_VP9_HIGHBITDEPTH
1802 // Do compound motion search on the current reference frame.
1803 if (id) xd->plane[0].pre[0] = ref_yv12[id];
1804 vp9_set_mv_search_range(&x->mv_limits, &ref_mv[id].as_mv);
1806 // Use the mv result from the single mode as mv predictor.
1807 tmp_mv = frame_mv[refs[id]].as_mv;
1812 // Small-range full-pixel motion search.
1813 bestsme = vp9_refining_search_8p_c(x, &tmp_mv, sadpb, search_range,
1814 &cpi->fn_ptr[bsize], &ref_mv[id].as_mv,
1816 if (bestsme < UINT_MAX)
1817 bestsme = vp9_get_mvpred_av_var(x, &tmp_mv, &ref_mv[id].as_mv,
1818 second_pred, &cpi->fn_ptr[bsize], 1);
1820 x->mv_limits = tmp_mv_limits;
1822 if (bestsme < UINT_MAX) {
1823 uint32_t dis; /* TODO: use dis in distortion calculation later. */
1825 bestsme = cpi->find_fractional_mv_step(
1826 x, &tmp_mv, &ref_mv[id].as_mv, cpi->common.allow_high_precision_mv,
1827 x->errorperbit, &cpi->fn_ptr[bsize], 0,
1828 cpi->sf.mv.subpel_iters_per_step, NULL, x->nmvjointcost, x->mvcost,
1829 &dis, &sse, second_pred, pw, ph);
1832 // Restore the pointer to the first (possibly scaled) prediction buffer.
1833 if (id) xd->plane[0].pre[0] = ref_yv12[0];
1835 if (bestsme < last_besterr[id]) {
1836 frame_mv[refs[id]].as_mv = tmp_mv;
1837 last_besterr[id] = bestsme;
1845 for (ref = 0; ref < 2; ++ref) {
1846 if (scaled_ref_frame[ref]) {
1847 // Restore the prediction frame pointers to their unscaled versions.
1849 for (i = 0; i < MAX_MB_PLANE; i++)
1850 xd->plane[i].pre[ref] = backup_yv12[ref][i];
1853 *rate_mv += vp9_mv_bit_cost(&frame_mv[refs[ref]].as_mv,
1854 &x->mbmi_ext->ref_mvs[refs[ref]][0].as_mv,
1855 x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
1859 static int64_t rd_pick_best_sub8x8_mode(
1860 VP9_COMP *cpi, MACROBLOCK *x, int_mv *best_ref_mv,
1861 int_mv *second_best_ref_mv, int64_t best_rd, int *returntotrate,
1862 int *returnyrate, int64_t *returndistortion, int *skippable, int64_t *psse,
1863 int mvthresh, int_mv seg_mvs[4][MAX_REF_FRAMES], BEST_SEG_INFO *bsi_buf,
1864 int filter_idx, int mi_row, int mi_col) {
1866 BEST_SEG_INFO *bsi = bsi_buf + filter_idx;
1867 MACROBLOCKD *xd = &x->e_mbd;
1868 MODE_INFO *mi = xd->mi[0];
1870 int k, br = 0, idx, idy;
1871 int64_t bd = 0, block_sse = 0;
1872 PREDICTION_MODE this_mode;
1873 VP9_COMMON *cm = &cpi->common;
1874 struct macroblock_plane *const p = &x->plane[0];
1875 struct macroblockd_plane *const pd = &xd->plane[0];
1876 const int label_count = 4;
1877 int64_t this_segment_rd = 0;
1878 int label_mv_thresh;
1879 int segmentyrate = 0;
1880 const BLOCK_SIZE bsize = mi->sb_type;
1881 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
1882 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
1883 ENTROPY_CONTEXT t_above[2], t_left[2];
1884 int subpelmv = 1, have_ref = 0;
1885 SPEED_FEATURES *const sf = &cpi->sf;
1886 const int has_second_rf = has_second_ref(mi);
1887 const int inter_mode_mask = sf->inter_mode_mask[bsize];
1888 MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
1892 bsi->segment_rd = best_rd;
1893 bsi->ref_mv[0] = best_ref_mv;
1894 bsi->ref_mv[1] = second_best_ref_mv;
1895 bsi->mvp.as_int = best_ref_mv->as_int;
1896 bsi->mvthresh = mvthresh;
1898 for (i = 0; i < 4; i++) bsi->modes[i] = ZEROMV;
1900 memcpy(t_above, pd->above_context, sizeof(t_above));
1901 memcpy(t_left, pd->left_context, sizeof(t_left));
1903 // 64 makes this threshold really big effectively
1904 // making it so that we very rarely check mvs on
1905 // segments. setting this to 1 would make mv thresh
1906 // roughly equal to what it is for macroblocks
1907 label_mv_thresh = 1 * bsi->mvthresh / label_count;
1909 // Segmentation method overheads
1910 for (idy = 0; idy < 2; idy += num_4x4_blocks_high) {
1911 for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) {
1912 // TODO(jingning,rbultje): rewrite the rate-distortion optimization
1913 // loop for 4x4/4x8/8x4 block coding. to be replaced with new rd loop
1914 int_mv mode_mv[MB_MODE_COUNT][2];
1915 int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES];
1916 PREDICTION_MODE mode_selected = ZEROMV;
1917 int64_t best_rd = INT64_MAX;
1918 const int i = idy * 2 + idx;
1921 for (ref = 0; ref < 1 + has_second_rf; ++ref) {
1922 const MV_REFERENCE_FRAME frame = mi->ref_frame[ref];
1923 frame_mv[ZEROMV][frame].as_int = 0;
1924 vp9_append_sub8x8_mvs_for_idx(
1925 cm, xd, i, ref, mi_row, mi_col, &frame_mv[NEARESTMV][frame],
1926 &frame_mv[NEARMV][frame], mbmi_ext->mode_context);
1929 // search for the best motion vector on this segment
1930 for (this_mode = NEARESTMV; this_mode <= NEWMV; ++this_mode) {
1931 const struct buf_2d orig_src = x->plane[0].src;
1932 struct buf_2d orig_pre[2];
1934 mode_idx = INTER_OFFSET(this_mode);
1935 bsi->rdstat[i][mode_idx].brdcost = INT64_MAX;
1936 if (!(inter_mode_mask & (1 << this_mode))) continue;
1938 if (!check_best_zero_mv(cpi, mbmi_ext->mode_context, frame_mv,
1939 this_mode, mi->ref_frame))
1942 memcpy(orig_pre, pd->pre, sizeof(orig_pre));
1943 memcpy(bsi->rdstat[i][mode_idx].ta, t_above,
1944 sizeof(bsi->rdstat[i][mode_idx].ta));
1945 memcpy(bsi->rdstat[i][mode_idx].tl, t_left,
1946 sizeof(bsi->rdstat[i][mode_idx].tl));
1948 // motion search for newmv (single predictor case only)
1949 if (!has_second_rf && this_mode == NEWMV &&
1950 seg_mvs[i][mi->ref_frame[0]].as_int == INVALID_MV) {
1951 MV *const new_mv = &mode_mv[NEWMV][0].as_mv;
1953 uint32_t bestsme = UINT_MAX;
1954 int sadpb = x->sadperbit4;
1958 const MvLimits tmp_mv_limits = x->mv_limits;
1960 /* Is the best so far sufficiently good that we cant justify doing
1961 * and new motion search. */
1962 if (best_rd < label_mv_thresh) break;
1964 if (cpi->oxcf.mode != BEST) {
1965 // use previous block's result as next block's MV predictor.
1967 bsi->mvp.as_int = mi->bmi[i - 1].as_mv[0].as_int;
1968 if (i == 2) bsi->mvp.as_int = mi->bmi[i - 2].as_mv[0].as_int;
1972 max_mv = x->max_mv_context[mi->ref_frame[0]];
1975 VPXMAX(abs(bsi->mvp.as_mv.row), abs(bsi->mvp.as_mv.col)) >> 3;
1977 if (sf->mv.auto_mv_step_size && cm->show_frame) {
1978 // Take wtd average of the step_params based on the last frame's
1979 // max mv magnitude and the best ref mvs of the current block for
1980 // the given reference.
1982 (vp9_init_search_range(max_mv) + cpi->mv_step_param) / 2;
1984 step_param = cpi->mv_step_param;
1987 mvp_full.row = bsi->mvp.as_mv.row >> 3;
1988 mvp_full.col = bsi->mvp.as_mv.col >> 3;
1990 if (sf->adaptive_motion_search) {
1991 mvp_full.row = x->pred_mv[mi->ref_frame[0]].row >> 3;
1992 mvp_full.col = x->pred_mv[mi->ref_frame[0]].col >> 3;
1993 step_param = VPXMAX(step_param, 8);
1996 // adjust src pointer for this block
1999 vp9_set_mv_search_range(&x->mv_limits, &bsi->ref_mv[0]->as_mv);
2001 bestsme = vp9_full_pixel_search(
2002 cpi, x, bsize, &mvp_full, step_param, cpi->sf.mv.search_method,
2004 sf->mv.subpel_search_method != SUBPEL_TREE ? cost_list : NULL,
2005 &bsi->ref_mv[0]->as_mv, new_mv, INT_MAX, 1);
2007 x->mv_limits = tmp_mv_limits;
2009 if (bestsme < UINT_MAX) {
2010 uint32_t distortion;
2011 cpi->find_fractional_mv_step(
2012 x, new_mv, &bsi->ref_mv[0]->as_mv, cm->allow_high_precision_mv,
2013 x->errorperbit, &cpi->fn_ptr[bsize], sf->mv.subpel_force_stop,
2014 sf->mv.subpel_iters_per_step, cond_cost_list(cpi, cost_list),
2015 x->nmvjointcost, x->mvcost, &distortion,
2016 &x->pred_sse[mi->ref_frame[0]], NULL, 0, 0);
2018 // save motion search result for use in compound prediction
2019 seg_mvs[i][mi->ref_frame[0]].as_mv = *new_mv;
2022 x->pred_mv[mi->ref_frame[0]] = *new_mv;
2024 // restore src pointers
2025 mi_buf_restore(x, orig_src, orig_pre);
2028 if (has_second_rf) {
2029 if (seg_mvs[i][mi->ref_frame[1]].as_int == INVALID_MV ||
2030 seg_mvs[i][mi->ref_frame[0]].as_int == INVALID_MV)
2034 if (has_second_rf && this_mode == NEWMV &&
2035 mi->interp_filter == EIGHTTAP) {
2036 // adjust src pointers
2038 if (sf->comp_inter_joint_search_thresh <= bsize) {
2040 joint_motion_search(cpi, x, bsize, frame_mv[this_mode], mi_row,
2041 mi_col, seg_mvs[i], &rate_mv);
2042 seg_mvs[i][mi->ref_frame[0]].as_int =
2043 frame_mv[this_mode][mi->ref_frame[0]].as_int;
2044 seg_mvs[i][mi->ref_frame[1]].as_int =
2045 frame_mv[this_mode][mi->ref_frame[1]].as_int;
2047 // restore src pointers
2048 mi_buf_restore(x, orig_src, orig_pre);
2051 bsi->rdstat[i][mode_idx].brate = set_and_cost_bmi_mvs(
2052 cpi, x, xd, i, this_mode, mode_mv[this_mode], frame_mv, seg_mvs[i],
2053 bsi->ref_mv, x->nmvjointcost, x->mvcost);
2055 for (ref = 0; ref < 1 + has_second_rf; ++ref) {
2056 bsi->rdstat[i][mode_idx].mvs[ref].as_int =
2057 mode_mv[this_mode][ref].as_int;
2058 if (num_4x4_blocks_wide > 1)
2059 bsi->rdstat[i + 1][mode_idx].mvs[ref].as_int =
2060 mode_mv[this_mode][ref].as_int;
2061 if (num_4x4_blocks_high > 1)
2062 bsi->rdstat[i + 2][mode_idx].mvs[ref].as_int =
2063 mode_mv[this_mode][ref].as_int;
2066 // Trap vectors that reach beyond the UMV borders
2067 if (mv_check_bounds(&x->mv_limits, &mode_mv[this_mode][0].as_mv) ||
2069 mv_check_bounds(&x->mv_limits, &mode_mv[this_mode][1].as_mv)))
2072 if (filter_idx > 0) {
2073 BEST_SEG_INFO *ref_bsi = bsi_buf;
2077 for (ref = 0; ref < 1 + has_second_rf; ++ref) {
2078 subpelmv |= mv_has_subpel(&mode_mv[this_mode][ref].as_mv);
2079 have_ref &= mode_mv[this_mode][ref].as_int ==
2080 ref_bsi->rdstat[i][mode_idx].mvs[ref].as_int;
2083 if (filter_idx > 1 && !subpelmv && !have_ref) {
2084 ref_bsi = bsi_buf + 1;
2086 for (ref = 0; ref < 1 + has_second_rf; ++ref)
2087 have_ref &= mode_mv[this_mode][ref].as_int ==
2088 ref_bsi->rdstat[i][mode_idx].mvs[ref].as_int;
2091 if (!subpelmv && have_ref &&
2092 ref_bsi->rdstat[i][mode_idx].brdcost < INT64_MAX) {
2093 memcpy(&bsi->rdstat[i][mode_idx], &ref_bsi->rdstat[i][mode_idx],
2094 sizeof(SEG_RDSTAT));
2095 if (num_4x4_blocks_wide > 1)
2096 bsi->rdstat[i + 1][mode_idx].eobs =
2097 ref_bsi->rdstat[i + 1][mode_idx].eobs;
2098 if (num_4x4_blocks_high > 1)
2099 bsi->rdstat[i + 2][mode_idx].eobs =
2100 ref_bsi->rdstat[i + 2][mode_idx].eobs;
2102 if (bsi->rdstat[i][mode_idx].brdcost < best_rd) {
2103 mode_selected = this_mode;
2104 best_rd = bsi->rdstat[i][mode_idx].brdcost;
2110 bsi->rdstat[i][mode_idx].brdcost = encode_inter_mb_segment(
2111 cpi, x, bsi->segment_rd - this_segment_rd, i,
2112 &bsi->rdstat[i][mode_idx].byrate, &bsi->rdstat[i][mode_idx].bdist,
2113 &bsi->rdstat[i][mode_idx].bsse, bsi->rdstat[i][mode_idx].ta,
2114 bsi->rdstat[i][mode_idx].tl, mi_row, mi_col);
2115 if (bsi->rdstat[i][mode_idx].brdcost < INT64_MAX) {
2116 bsi->rdstat[i][mode_idx].brdcost +=
2117 RDCOST(x->rdmult, x->rddiv, bsi->rdstat[i][mode_idx].brate, 0);
2118 bsi->rdstat[i][mode_idx].brate += bsi->rdstat[i][mode_idx].byrate;
2119 bsi->rdstat[i][mode_idx].eobs = p->eobs[i];
2120 if (num_4x4_blocks_wide > 1)
2121 bsi->rdstat[i + 1][mode_idx].eobs = p->eobs[i + 1];
2122 if (num_4x4_blocks_high > 1)
2123 bsi->rdstat[i + 2][mode_idx].eobs = p->eobs[i + 2];
2126 if (bsi->rdstat[i][mode_idx].brdcost < best_rd) {
2127 mode_selected = this_mode;
2128 best_rd = bsi->rdstat[i][mode_idx].brdcost;
2130 } /*for each 4x4 mode*/
2132 if (best_rd == INT64_MAX) {
2134 for (iy = i + 1; iy < 4; ++iy)
2135 for (midx = 0; midx < INTER_MODES; ++midx)
2136 bsi->rdstat[iy][midx].brdcost = INT64_MAX;
2137 bsi->segment_rd = INT64_MAX;
2141 mode_idx = INTER_OFFSET(mode_selected);
2142 memcpy(t_above, bsi->rdstat[i][mode_idx].ta, sizeof(t_above));
2143 memcpy(t_left, bsi->rdstat[i][mode_idx].tl, sizeof(t_left));
2145 set_and_cost_bmi_mvs(cpi, x, xd, i, mode_selected, mode_mv[mode_selected],
2146 frame_mv, seg_mvs[i], bsi->ref_mv, x->nmvjointcost,
2149 br += bsi->rdstat[i][mode_idx].brate;
2150 bd += bsi->rdstat[i][mode_idx].bdist;
2151 block_sse += bsi->rdstat[i][mode_idx].bsse;
2152 segmentyrate += bsi->rdstat[i][mode_idx].byrate;
2153 this_segment_rd += bsi->rdstat[i][mode_idx].brdcost;
2155 if (this_segment_rd > bsi->segment_rd) {
2157 for (iy = i + 1; iy < 4; ++iy)
2158 for (midx = 0; midx < INTER_MODES; ++midx)
2159 bsi->rdstat[iy][midx].brdcost = INT64_MAX;
2160 bsi->segment_rd = INT64_MAX;
2164 } /* for each label */
2168 bsi->segment_yrate = segmentyrate;
2169 bsi->segment_rd = this_segment_rd;
2170 bsi->sse = block_sse;
2172 // update the coding decisions
2173 for (k = 0; k < 4; ++k) bsi->modes[k] = mi->bmi[k].as_mode;
2175 if (bsi->segment_rd > best_rd) return INT64_MAX;
2176 /* set it to the best */
2177 for (i = 0; i < 4; i++) {
2178 mode_idx = INTER_OFFSET(bsi->modes[i]);
2179 mi->bmi[i].as_mv[0].as_int = bsi->rdstat[i][mode_idx].mvs[0].as_int;
2180 if (has_second_ref(mi))
2181 mi->bmi[i].as_mv[1].as_int = bsi->rdstat[i][mode_idx].mvs[1].as_int;
2182 x->plane[0].eobs[i] = bsi->rdstat[i][mode_idx].eobs;
2183 mi->bmi[i].as_mode = bsi->modes[i];
2187 * used to set mbmi->mv.as_int
2189 *returntotrate = bsi->r;
2190 *returndistortion = bsi->d;
2191 *returnyrate = bsi->segment_yrate;
2192 *skippable = vp9_is_skippable_in_plane(x, BLOCK_8X8, 0);
2194 mi->mode = bsi->modes[3];
2196 return bsi->segment_rd;
2199 static void estimate_ref_frame_costs(const VP9_COMMON *cm,
2200 const MACROBLOCKD *xd, int segment_id,
2201 unsigned int *ref_costs_single,
2202 unsigned int *ref_costs_comp,
2203 vpx_prob *comp_mode_p) {
2204 int seg_ref_active =
2205 segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME);
2206 if (seg_ref_active) {
2207 memset(ref_costs_single, 0, MAX_REF_FRAMES * sizeof(*ref_costs_single));
2208 memset(ref_costs_comp, 0, MAX_REF_FRAMES * sizeof(*ref_costs_comp));
2211 vpx_prob intra_inter_p = vp9_get_intra_inter_prob(cm, xd);
2212 vpx_prob comp_inter_p = 128;
2214 if (cm->reference_mode == REFERENCE_MODE_SELECT) {
2215 comp_inter_p = vp9_get_reference_mode_prob(cm, xd);
2216 *comp_mode_p = comp_inter_p;
2221 ref_costs_single[INTRA_FRAME] = vp9_cost_bit(intra_inter_p, 0);
2223 if (cm->reference_mode != COMPOUND_REFERENCE) {
2224 vpx_prob ref_single_p1 = vp9_get_pred_prob_single_ref_p1(cm, xd);
2225 vpx_prob ref_single_p2 = vp9_get_pred_prob_single_ref_p2(cm, xd);
2226 unsigned int base_cost = vp9_cost_bit(intra_inter_p, 1);
2228 if (cm->reference_mode == REFERENCE_MODE_SELECT)
2229 base_cost += vp9_cost_bit(comp_inter_p, 0);
2231 ref_costs_single[LAST_FRAME] = ref_costs_single[GOLDEN_FRAME] =
2232 ref_costs_single[ALTREF_FRAME] = base_cost;
2233 ref_costs_single[LAST_FRAME] += vp9_cost_bit(ref_single_p1, 0);
2234 ref_costs_single[GOLDEN_FRAME] += vp9_cost_bit(ref_single_p1, 1);
2235 ref_costs_single[ALTREF_FRAME] += vp9_cost_bit(ref_single_p1, 1);
2236 ref_costs_single[GOLDEN_FRAME] += vp9_cost_bit(ref_single_p2, 0);
2237 ref_costs_single[ALTREF_FRAME] += vp9_cost_bit(ref_single_p2, 1);
2239 ref_costs_single[LAST_FRAME] = 512;
2240 ref_costs_single[GOLDEN_FRAME] = 512;
2241 ref_costs_single[ALTREF_FRAME] = 512;
2243 if (cm->reference_mode != SINGLE_REFERENCE) {
2244 vpx_prob ref_comp_p = vp9_get_pred_prob_comp_ref_p(cm, xd);
2245 unsigned int base_cost = vp9_cost_bit(intra_inter_p, 1);
2247 if (cm->reference_mode == REFERENCE_MODE_SELECT)
2248 base_cost += vp9_cost_bit(comp_inter_p, 1);
2250 ref_costs_comp[LAST_FRAME] = base_cost + vp9_cost_bit(ref_comp_p, 0);
2251 ref_costs_comp[GOLDEN_FRAME] = base_cost + vp9_cost_bit(ref_comp_p, 1);
2253 ref_costs_comp[LAST_FRAME] = 512;
2254 ref_costs_comp[GOLDEN_FRAME] = 512;
2259 static void store_coding_context(
2260 MACROBLOCK *x, PICK_MODE_CONTEXT *ctx, int mode_index,
2261 int64_t comp_pred_diff[REFERENCE_MODES],
2262 int64_t best_filter_diff[SWITCHABLE_FILTER_CONTEXTS], int skippable) {
2263 MACROBLOCKD *const xd = &x->e_mbd;
2265 // Take a snapshot of the coding context so it can be
2266 // restored if we decide to encode this way
2267 ctx->skip = x->skip;
2268 ctx->skippable = skippable;
2269 ctx->best_mode_index = mode_index;
2270 ctx->mic = *xd->mi[0];
2271 ctx->mbmi_ext = *x->mbmi_ext;
2272 ctx->single_pred_diff = (int)comp_pred_diff[SINGLE_REFERENCE];
2273 ctx->comp_pred_diff = (int)comp_pred_diff[COMPOUND_REFERENCE];
2274 ctx->hybrid_pred_diff = (int)comp_pred_diff[REFERENCE_MODE_SELECT];
2276 memcpy(ctx->best_filter_diff, best_filter_diff,
2277 sizeof(*best_filter_diff) * SWITCHABLE_FILTER_CONTEXTS);
2280 static void setup_buffer_inter(VP9_COMP *cpi, MACROBLOCK *x,
2281 MV_REFERENCE_FRAME ref_frame,
2282 BLOCK_SIZE block_size, int mi_row, int mi_col,
2283 int_mv frame_nearest_mv[MAX_REF_FRAMES],
2284 int_mv frame_near_mv[MAX_REF_FRAMES],
2285 struct buf_2d yv12_mb[4][MAX_MB_PLANE]) {
2286 const VP9_COMMON *cm = &cpi->common;
2287 const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, ref_frame);
2288 MACROBLOCKD *const xd = &x->e_mbd;
2289 MODE_INFO *const mi = xd->mi[0];
2290 int_mv *const candidates = x->mbmi_ext->ref_mvs[ref_frame];
2291 const struct scale_factors *const sf = &cm->frame_refs[ref_frame - 1].sf;
2292 MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
2294 assert(yv12 != NULL);
2296 // TODO(jkoleszar): Is the UV buffer ever used here? If so, need to make this
2297 // use the UV scaling factors.
2298 vp9_setup_pred_block(xd, yv12_mb[ref_frame], yv12, mi_row, mi_col, sf, sf);
2300 // Gets an initial list of candidate vectors from neighbours and orders them
2301 vp9_find_mv_refs(cm, xd, mi, ref_frame, candidates, mi_row, mi_col,
2302 mbmi_ext->mode_context);
2304 // Candidate refinement carried out at encoder and decoder
2305 vp9_find_best_ref_mvs(xd, cm->allow_high_precision_mv, candidates,
2306 &frame_nearest_mv[ref_frame],
2307 &frame_near_mv[ref_frame]);
2309 // Further refinement that is encode side only to test the top few candidates
2310 // in full and choose the best as the centre point for subsequent searches.
2311 // The current implementation doesn't support scaling.
2312 if (!vp9_is_scaled(sf) && block_size >= BLOCK_8X8)
2313 vp9_mv_pred(cpi, x, yv12_mb[ref_frame][0].buf, yv12->y_stride, ref_frame,
2317 static void single_motion_search(VP9_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
2318 int mi_row, int mi_col, int_mv *tmp_mv,
2320 MACROBLOCKD *xd = &x->e_mbd;
2321 const VP9_COMMON *cm = &cpi->common;
2322 MODE_INFO *mi = xd->mi[0];
2323 struct buf_2d backup_yv12[MAX_MB_PLANE] = { { 0, 0 } };
2324 int bestsme = INT_MAX;
2326 int sadpb = x->sadperbit16;
2328 int ref = mi->ref_frame[0];
2329 MV ref_mv = x->mbmi_ext->ref_mvs[ref][0].as_mv;
2330 const MvLimits tmp_mv_limits = x->mv_limits;
2333 const YV12_BUFFER_CONFIG *scaled_ref_frame =
2334 vp9_get_scaled_ref_frame(cpi, ref);
2337 pred_mv[0] = x->mbmi_ext->ref_mvs[ref][0].as_mv;
2338 pred_mv[1] = x->mbmi_ext->ref_mvs[ref][1].as_mv;
2339 pred_mv[2] = x->pred_mv[ref];
2341 if (scaled_ref_frame) {
2343 // Swap out the reference frame for a version that's been scaled to
2344 // match the resolution of the current frame, allowing the existing
2345 // motion search code to be used without additional modifications.
2346 for (i = 0; i < MAX_MB_PLANE; i++) backup_yv12[i] = xd->plane[i].pre[0];
2348 vp9_setup_pre_planes(xd, 0, scaled_ref_frame, mi_row, mi_col, NULL);
2351 // Work out the size of the first step in the mv step search.
2352 // 0 here is maximum length first step. 1 is VPXMAX >> 1 etc.
2353 if (cpi->sf.mv.auto_mv_step_size && cm->show_frame) {
2354 // Take wtd average of the step_params based on the last frame's
2355 // max mv magnitude and that based on the best ref mvs of the current
2356 // block for the given reference.
2358 (vp9_init_search_range(x->max_mv_context[ref]) + cpi->mv_step_param) /
2361 step_param = cpi->mv_step_param;
2364 if (cpi->sf.adaptive_motion_search && bsize < BLOCK_64X64) {
2366 2 * (b_width_log2_lookup[BLOCK_64X64] -
2367 VPXMIN(b_height_log2_lookup[bsize], b_width_log2_lookup[bsize]));
2368 step_param = VPXMAX(step_param, boffset);
2371 if (cpi->sf.adaptive_motion_search) {
2372 int bwl = b_width_log2_lookup[bsize];
2373 int bhl = b_height_log2_lookup[bsize];
2374 int tlevel = x->pred_mv_sad[ref] >> (bwl + bhl + 4);
2376 if (tlevel < 5) step_param += 2;
2378 // prev_mv_sad is not setup for dynamically scaled frames.
2379 if (cpi->oxcf.resize_mode != RESIZE_DYNAMIC) {
2381 for (i = LAST_FRAME; i <= ALTREF_FRAME && cm->show_frame; ++i) {
2382 if ((x->pred_mv_sad[ref] >> 3) > x->pred_mv_sad[i]) {
2383 x->pred_mv[ref].row = 0;
2384 x->pred_mv[ref].col = 0;
2385 tmp_mv->as_int = INVALID_MV;
2387 if (scaled_ref_frame) {
2389 for (i = 0; i < MAX_MB_PLANE; ++i)
2390 xd->plane[i].pre[0] = backup_yv12[i];
2398 // Note: MV limits are modified here. Always restore the original values
2399 // after full-pixel motion search.
2400 vp9_set_mv_search_range(&x->mv_limits, &ref_mv);
2402 mvp_full = pred_mv[x->mv_best_ref_index[ref]];
2407 bestsme = vp9_full_pixel_search(
2408 cpi, x, bsize, &mvp_full, step_param, cpi->sf.mv.search_method, sadpb,
2409 cond_cost_list(cpi, cost_list), &ref_mv, &tmp_mv->as_mv, INT_MAX, 1);
2411 if (cpi->sf.enhanced_full_pixel_motion_search) {
2412 if (x->mv_best_ref_index[ref] == 2) {
2413 const int diff_row = ((int)pred_mv[0].row - pred_mv[2].row) >> 3;
2414 const int diff_col = ((int)pred_mv[0].col - pred_mv[2].col) >> 3;
2415 const int diff_sse = diff_row * diff_row + diff_col * diff_col;
2416 // If pred_mv[0] and pred_mv[2] are very different, also search around
2418 if (diff_sse > 10) {
2421 mvp_full = pred_mv[0];
2424 this_me = vp9_full_pixel_search(cpi, x, bsize, &mvp_full, step_param,
2425 cpi->sf.mv.search_method, sadpb,
2426 cond_cost_list(cpi, cost_list), &ref_mv,
2427 &this_mv, INT_MAX, 1);
2428 if (this_me < bestsme) {
2429 tmp_mv->as_mv = this_mv;
2436 x->mv_limits = tmp_mv_limits;
2438 if (bestsme < INT_MAX) {
2439 uint32_t dis; /* TODO: use dis in distortion calculation later. */
2440 cpi->find_fractional_mv_step(
2441 x, &tmp_mv->as_mv, &ref_mv, cm->allow_high_precision_mv, x->errorperbit,
2442 &cpi->fn_ptr[bsize], cpi->sf.mv.subpel_force_stop,
2443 cpi->sf.mv.subpel_iters_per_step, cond_cost_list(cpi, cost_list),
2444 x->nmvjointcost, x->mvcost, &dis, &x->pred_sse[ref], NULL, 0, 0);
2446 *rate_mv = vp9_mv_bit_cost(&tmp_mv->as_mv, &ref_mv, x->nmvjointcost,
2447 x->mvcost, MV_COST_WEIGHT);
2449 x->pred_mv[ref] = tmp_mv->as_mv;
2451 if (scaled_ref_frame) {
2453 for (i = 0; i < MAX_MB_PLANE; i++) xd->plane[i].pre[0] = backup_yv12[i];
2457 static INLINE void restore_dst_buf(MACROBLOCKD *xd,
2458 uint8_t *orig_dst[MAX_MB_PLANE],
2459 int orig_dst_stride[MAX_MB_PLANE]) {
2461 for (i = 0; i < MAX_MB_PLANE; i++) {
2462 xd->plane[i].dst.buf = orig_dst[i];
2463 xd->plane[i].dst.stride = orig_dst_stride[i];
2467 // In some situations we want to discount tha pparent cost of a new motion
2468 // vector. Where there is a subtle motion field and especially where there is
2469 // low spatial complexity then it can be hard to cover the cost of a new motion
2470 // vector in a single block, even if that motion vector reduces distortion.
2471 // However, once established that vector may be usable through the nearest and
2472 // near mv modes to reduce distortion in subsequent blocks and also improve
2474 static int discount_newmv_test(const VP9_COMP *cpi, int this_mode,
2476 int_mv (*mode_mv)[MAX_REF_FRAMES],
2478 return (!cpi->rc.is_src_frame_alt_ref && (this_mode == NEWMV) &&
2479 (this_mv.as_int != 0) &&
2480 ((mode_mv[NEARESTMV][ref_frame].as_int == 0) ||
2481 (mode_mv[NEARESTMV][ref_frame].as_int == INVALID_MV)) &&
2482 ((mode_mv[NEARMV][ref_frame].as_int == 0) ||
2483 (mode_mv[NEARMV][ref_frame].as_int == INVALID_MV)));
2486 static int64_t handle_inter_mode(
2487 VP9_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize, int *rate2,
2488 int64_t *distortion, int *skippable, int *rate_y, int *rate_uv,
2489 int *disable_skip, int_mv (*mode_mv)[MAX_REF_FRAMES], int mi_row,
2490 int mi_col, int_mv single_newmv[MAX_REF_FRAMES],
2491 INTERP_FILTER (*single_filter)[MAX_REF_FRAMES],
2492 int (*single_skippable)[MAX_REF_FRAMES], int64_t *psse,
2493 const int64_t ref_best_rd, int64_t *mask_filter, int64_t filter_cache[]) {
2494 VP9_COMMON *cm = &cpi->common;
2495 MACROBLOCKD *xd = &x->e_mbd;
2496 MODE_INFO *mi = xd->mi[0];
2497 MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
2498 const int is_comp_pred = has_second_ref(mi);
2499 const int this_mode = mi->mode;
2500 int_mv *frame_mv = mode_mv[this_mode];
2502 int refs[2] = { mi->ref_frame[0],
2503 (mi->ref_frame[1] < 0 ? 0 : mi->ref_frame[1]) };
2505 #if CONFIG_VP9_HIGHBITDEPTH
2506 DECLARE_ALIGNED(16, uint16_t, tmp_buf16[MAX_MB_PLANE * 64 * 64]);
2509 DECLARE_ALIGNED(16, uint8_t, tmp_buf[MAX_MB_PLANE * 64 * 64]);
2510 #endif // CONFIG_VP9_HIGHBITDEPTH
2511 int pred_exists = 0;
2513 int64_t rd, tmp_rd, best_rd = INT64_MAX;
2514 int best_needs_copy = 0;
2515 uint8_t *orig_dst[MAX_MB_PLANE];
2516 int orig_dst_stride[MAX_MB_PLANE];
2518 INTERP_FILTER best_filter = SWITCHABLE;
2519 uint8_t skip_txfm[MAX_MB_PLANE << 2] = { 0 };
2520 int64_t bsse[MAX_MB_PLANE << 2] = { 0 };
2522 int bsl = mi_width_log2_lookup[bsize];
2523 int pred_filter_search =
2524 cpi->sf.cb_pred_filter_search
2525 ? (((mi_row + mi_col) >> bsl) +
2526 get_chessboard_index(cm->current_video_frame)) &
2530 int skip_txfm_sb = 0;
2531 int64_t skip_sse_sb = INT64_MAX;
2532 int64_t distortion_y = 0, distortion_uv = 0;
2534 #if CONFIG_VP9_HIGHBITDEPTH
2535 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
2536 tmp_buf = CONVERT_TO_BYTEPTR(tmp_buf16);
2538 tmp_buf = (uint8_t *)tmp_buf16;
2540 #endif // CONFIG_VP9_HIGHBITDEPTH
2542 if (pred_filter_search) {
2543 INTERP_FILTER af = SWITCHABLE, lf = SWITCHABLE;
2544 if (xd->above_mi && is_inter_block(xd->above_mi))
2545 af = xd->above_mi->interp_filter;
2546 if (xd->left_mi && is_inter_block(xd->left_mi))
2547 lf = xd->left_mi->interp_filter;
2549 if ((this_mode != NEWMV) || (af == lf)) best_filter = af;
2553 if (frame_mv[refs[0]].as_int == INVALID_MV ||
2554 frame_mv[refs[1]].as_int == INVALID_MV)
2557 if (cpi->sf.adaptive_mode_search) {
2558 if (single_filter[this_mode][refs[0]] ==
2559 single_filter[this_mode][refs[1]])
2560 best_filter = single_filter[this_mode][refs[0]];
2564 if (this_mode == NEWMV) {
2567 // Initialize mv using single prediction mode result.
2568 frame_mv[refs[0]].as_int = single_newmv[refs[0]].as_int;
2569 frame_mv[refs[1]].as_int = single_newmv[refs[1]].as_int;
2571 if (cpi->sf.comp_inter_joint_search_thresh <= bsize) {
2572 joint_motion_search(cpi, x, bsize, frame_mv, mi_row, mi_col,
2573 single_newmv, &rate_mv);
2575 rate_mv = vp9_mv_bit_cost(&frame_mv[refs[0]].as_mv,
2576 &x->mbmi_ext->ref_mvs[refs[0]][0].as_mv,
2577 x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
2578 rate_mv += vp9_mv_bit_cost(&frame_mv[refs[1]].as_mv,
2579 &x->mbmi_ext->ref_mvs[refs[1]][0].as_mv,
2580 x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
2585 single_motion_search(cpi, x, bsize, mi_row, mi_col, &tmp_mv, &rate_mv);
2586 if (tmp_mv.as_int == INVALID_MV) return INT64_MAX;
2588 frame_mv[refs[0]].as_int = xd->mi[0]->bmi[0].as_mv[0].as_int =
2590 single_newmv[refs[0]].as_int = tmp_mv.as_int;
2592 // Estimate the rate implications of a new mv but discount this
2593 // under certain circumstances where we want to help initiate a weak
2594 // motion field, where the distortion gain for a single block may not
2595 // be enough to overcome the cost of a new mv.
2596 if (discount_newmv_test(cpi, this_mode, tmp_mv, mode_mv, refs[0])) {
2597 *rate2 += VPXMAX((rate_mv / NEW_MV_DISCOUNT_FACTOR), 1);
2604 for (i = 0; i < is_comp_pred + 1; ++i) {
2605 cur_mv[i] = frame_mv[refs[i]];
2606 // Clip "next_nearest" so that it does not extend to far out of image
2607 if (this_mode != NEWMV) clamp_mv2(&cur_mv[i].as_mv, xd);
2609 if (mv_check_bounds(&x->mv_limits, &cur_mv[i].as_mv)) return INT64_MAX;
2610 mi->mv[i].as_int = cur_mv[i].as_int;
2613 // do first prediction into the destination buffer. Do the next
2614 // prediction into a temporary buffer. Then keep track of which one
2615 // of these currently holds the best predictor, and use the other
2616 // one for future predictions. In the end, copy from tmp_buf to
2617 // dst if necessary.
2618 for (i = 0; i < MAX_MB_PLANE; i++) {
2619 orig_dst[i] = xd->plane[i].dst.buf;
2620 orig_dst_stride[i] = xd->plane[i].dst.stride;
2623 // We don't include the cost of the second reference here, because there
2624 // are only two options: Last/ARF or Golden/ARF; The second one is always
2625 // known, which is ARF.
2627 // Under some circumstances we discount the cost of new mv mode to encourage
2628 // initiation of a motion field.
2629 if (discount_newmv_test(cpi, this_mode, frame_mv[refs[0]], mode_mv,
2632 VPXMIN(cost_mv_ref(cpi, this_mode, mbmi_ext->mode_context[refs[0]]),
2633 cost_mv_ref(cpi, NEARESTMV, mbmi_ext->mode_context[refs[0]]));
2635 *rate2 += cost_mv_ref(cpi, this_mode, mbmi_ext->mode_context[refs[0]]);
2638 if (RDCOST(x->rdmult, x->rddiv, *rate2, 0) > ref_best_rd &&
2639 mi->mode != NEARESTMV)
2643 // Are all MVs integer pel for Y and UV
2644 intpel_mv = !mv_has_subpel(&mi->mv[0].as_mv);
2645 if (is_comp_pred) intpel_mv &= !mv_has_subpel(&mi->mv[1].as_mv);
2647 // Search for best switchable filter by checking the variance of
2648 // pred error irrespective of whether the filter will be used
2649 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i) filter_cache[i] = INT64_MAX;
2651 if (cm->interp_filter != BILINEAR) {
2652 if (x->source_variance < cpi->sf.disable_filter_search_var_thresh) {
2653 best_filter = EIGHTTAP;
2654 } else if (best_filter == SWITCHABLE) {
2656 int tmp_rate_sum = 0;
2657 int64_t tmp_dist_sum = 0;
2659 for (i = 0; i < SWITCHABLE_FILTERS; ++i) {
2662 int tmp_skip_sb = 0;
2663 int64_t tmp_skip_sse = INT64_MAX;
2665 mi->interp_filter = i;
2666 rs = vp9_get_switchable_rate(cpi, xd);
2667 rs_rd = RDCOST(x->rdmult, x->rddiv, rs, 0);
2669 if (i > 0 && intpel_mv) {
2670 rd = RDCOST(x->rdmult, x->rddiv, tmp_rate_sum, tmp_dist_sum);
2671 filter_cache[i] = rd;
2672 filter_cache[SWITCHABLE_FILTERS] =
2673 VPXMIN(filter_cache[SWITCHABLE_FILTERS], rd + rs_rd);
2674 if (cm->interp_filter == SWITCHABLE) rd += rs_rd;
2675 *mask_filter = VPXMAX(*mask_filter, rd);
2678 int64_t dist_sum = 0;
2679 if (i > 0 && cpi->sf.adaptive_interp_filter_search &&
2680 (cpi->sf.interp_filter_search_mask & (1 << i))) {
2682 dist_sum = INT64_MAX;
2686 if ((cm->interp_filter == SWITCHABLE && (!i || best_needs_copy)) ||
2687 (cm->interp_filter != SWITCHABLE &&
2688 (cm->interp_filter == mi->interp_filter ||
2689 (i == 0 && intpel_mv)))) {
2690 restore_dst_buf(xd, orig_dst, orig_dst_stride);
2692 for (j = 0; j < MAX_MB_PLANE; j++) {
2693 xd->plane[j].dst.buf = tmp_buf + j * 64 * 64;
2694 xd->plane[j].dst.stride = 64;
2697 vp9_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
2698 model_rd_for_sb(cpi, bsize, x, xd, &rate_sum, &dist_sum, &tmp_skip_sb,
2701 rd = RDCOST(x->rdmult, x->rddiv, rate_sum, dist_sum);
2702 filter_cache[i] = rd;
2703 filter_cache[SWITCHABLE_FILTERS] =
2704 VPXMIN(filter_cache[SWITCHABLE_FILTERS], rd + rs_rd);
2705 if (cm->interp_filter == SWITCHABLE) rd += rs_rd;
2706 *mask_filter = VPXMAX(*mask_filter, rd);
2708 if (i == 0 && intpel_mv) {
2709 tmp_rate_sum = rate_sum;
2710 tmp_dist_sum = dist_sum;
2714 if (i == 0 && cpi->sf.use_rd_breakout && ref_best_rd < INT64_MAX) {
2715 if (rd / 2 > ref_best_rd) {
2716 restore_dst_buf(xd, orig_dst, orig_dst_stride);
2720 newbest = i == 0 || rd < best_rd;
2724 best_filter = mi->interp_filter;
2725 if (cm->interp_filter == SWITCHABLE && i && !intpel_mv)
2726 best_needs_copy = !best_needs_copy;
2729 if ((cm->interp_filter == SWITCHABLE && newbest) ||
2730 (cm->interp_filter != SWITCHABLE &&
2731 cm->interp_filter == mi->interp_filter)) {
2735 skip_txfm_sb = tmp_skip_sb;
2736 skip_sse_sb = tmp_skip_sse;
2737 memcpy(skip_txfm, x->skip_txfm, sizeof(skip_txfm));
2738 memcpy(bsse, x->bsse, sizeof(bsse));
2741 restore_dst_buf(xd, orig_dst, orig_dst_stride);
2744 // Set the appropriate filter
2746 cm->interp_filter != SWITCHABLE ? cm->interp_filter : best_filter;
2747 rs = cm->interp_filter == SWITCHABLE ? vp9_get_switchable_rate(cpi, xd) : 0;
2750 if (best_needs_copy) {
2751 // again temporarily set the buffers to local memory to prevent a memcpy
2752 for (i = 0; i < MAX_MB_PLANE; i++) {
2753 xd->plane[i].dst.buf = tmp_buf + i * 64 * 64;
2754 xd->plane[i].dst.stride = 64;
2757 rd = tmp_rd + RDCOST(x->rdmult, x->rddiv, rs, 0);
2761 // Handles the special case when a filter that is not in the
2762 // switchable list (ex. bilinear) is indicated at the frame level, or
2763 // skip condition holds.
2764 vp9_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
2765 model_rd_for_sb(cpi, bsize, x, xd, &tmp_rate, &tmp_dist, &skip_txfm_sb,
2767 rd = RDCOST(x->rdmult, x->rddiv, rs + tmp_rate, tmp_dist);
2768 memcpy(skip_txfm, x->skip_txfm, sizeof(skip_txfm));
2769 memcpy(bsse, x->bsse, sizeof(bsse));
2772 if (!is_comp_pred) single_filter[this_mode][refs[0]] = mi->interp_filter;
2774 if (cpi->sf.adaptive_mode_search)
2776 if (single_skippable[this_mode][refs[0]] &&
2777 single_skippable[this_mode][refs[1]])
2778 memset(skip_txfm, SKIP_TXFM_AC_DC, sizeof(skip_txfm));
2780 if (cpi->sf.use_rd_breakout && ref_best_rd < INT64_MAX) {
2781 // if current pred_error modeled rd is substantially more than the best
2782 // so far, do not bother doing full rd
2783 if (rd / 2 > ref_best_rd) {
2784 restore_dst_buf(xd, orig_dst, orig_dst_stride);
2789 if (cm->interp_filter == SWITCHABLE) *rate2 += rs;
2791 memcpy(x->skip_txfm, skip_txfm, sizeof(skip_txfm));
2792 memcpy(x->bsse, bsse, sizeof(bsse));
2794 if (!skip_txfm_sb) {
2795 int skippable_y, skippable_uv;
2796 int64_t sseuv = INT64_MAX;
2797 int64_t rdcosty = INT64_MAX;
2799 // Y cost and distortion
2800 vp9_subtract_plane(x, bsize, 0);
2801 super_block_yrd(cpi, x, rate_y, &distortion_y, &skippable_y, psse, bsize,
2804 if (*rate_y == INT_MAX) {
2806 *distortion = INT64_MAX;
2807 restore_dst_buf(xd, orig_dst, orig_dst_stride);
2812 *distortion += distortion_y;
2814 rdcosty = RDCOST(x->rdmult, x->rddiv, *rate2, *distortion);
2815 rdcosty = VPXMIN(rdcosty, RDCOST(x->rdmult, x->rddiv, 0, *psse));
2817 if (!super_block_uvrd(cpi, x, rate_uv, &distortion_uv, &skippable_uv,
2818 &sseuv, bsize, ref_best_rd - rdcosty)) {
2820 *distortion = INT64_MAX;
2821 restore_dst_buf(xd, orig_dst, orig_dst_stride);
2827 *distortion += distortion_uv;
2828 *skippable = skippable_y && skippable_uv;
2833 // The cost of skip bit needs to be added.
2834 *rate2 += vp9_cost_bit(vp9_get_skip_prob(cm, xd), 1);
2836 *distortion = skip_sse_sb;
2839 if (!is_comp_pred) single_skippable[this_mode][refs[0]] = *skippable;
2841 restore_dst_buf(xd, orig_dst, orig_dst_stride);
2842 return 0; // The rate-distortion cost will be re-calculated by caller.
2845 void vp9_rd_pick_intra_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, RD_COST *rd_cost,
2846 BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
2848 VP9_COMMON *const cm = &cpi->common;
2849 MACROBLOCKD *const xd = &x->e_mbd;
2850 struct macroblockd_plane *const pd = xd->plane;
2851 int rate_y = 0, rate_uv = 0, rate_y_tokenonly = 0, rate_uv_tokenonly = 0;
2852 int y_skip = 0, uv_skip = 0;
2853 int64_t dist_y = 0, dist_uv = 0;
2854 TX_SIZE max_uv_tx_size;
2857 xd->mi[0]->ref_frame[0] = INTRA_FRAME;
2858 xd->mi[0]->ref_frame[1] = NONE;
2859 // Initialize interp_filter here so we do not have to check for inter block
2860 // modes in get_pred_context_switchable_interp()
2861 xd->mi[0]->interp_filter = SWITCHABLE_FILTERS;
2863 if (bsize >= BLOCK_8X8) {
2864 if (rd_pick_intra_sby_mode(cpi, x, &rate_y, &rate_y_tokenonly, &dist_y,
2865 &y_skip, bsize, best_rd) >= best_rd) {
2866 rd_cost->rate = INT_MAX;
2871 if (rd_pick_intra_sub_8x8_y_mode(cpi, x, &rate_y, &rate_y_tokenonly,
2872 &dist_y, best_rd) >= best_rd) {
2873 rd_cost->rate = INT_MAX;
2877 max_uv_tx_size = uv_txsize_lookup[bsize][xd->mi[0]->tx_size]
2878 [pd[1].subsampling_x][pd[1].subsampling_y];
2879 rd_pick_intra_sbuv_mode(cpi, x, ctx, &rate_uv, &rate_uv_tokenonly, &dist_uv,
2880 &uv_skip, VPXMAX(BLOCK_8X8, bsize), max_uv_tx_size);
2882 if (y_skip && uv_skip) {
2883 rd_cost->rate = rate_y + rate_uv - rate_y_tokenonly - rate_uv_tokenonly +
2884 vp9_cost_bit(vp9_get_skip_prob(cm, xd), 1);
2885 rd_cost->dist = dist_y + dist_uv;
2888 rate_y + rate_uv + vp9_cost_bit(vp9_get_skip_prob(cm, xd), 0);
2889 rd_cost->dist = dist_y + dist_uv;
2892 ctx->mic = *xd->mi[0];
2893 ctx->mbmi_ext = *x->mbmi_ext;
2894 rd_cost->rdcost = RDCOST(x->rdmult, x->rddiv, rd_cost->rate, rd_cost->dist);
2897 // This function is designed to apply a bias or adjustment to an rd value based
2898 // on the relative variance of the source and reconstruction.
2899 #define VERY_LOW_VAR_THRESH 2
2900 #define LOW_VAR_THRESH 5
2901 #define VAR_MULT 100
2902 static unsigned int max_var_adjust[VP9E_CONTENT_INVALID] = { 16, 16, 100 };
2904 static void rd_variance_adjustment(VP9_COMP *cpi, MACROBLOCK *x,
2905 BLOCK_SIZE bsize, int64_t *this_rd,
2906 MV_REFERENCE_FRAME ref_frame,
2907 unsigned int source_variance) {
2908 MACROBLOCKD *const xd = &x->e_mbd;
2909 unsigned int rec_variance;
2910 unsigned int src_variance;
2911 unsigned int src_rec_min;
2912 unsigned int absvar_diff = 0;
2913 unsigned int var_factor = 0;
2914 unsigned int adj_max;
2915 vp9e_tune_content content_type = cpi->oxcf.content;
2917 if (*this_rd == INT64_MAX) return;
2919 #if CONFIG_VP9_HIGHBITDEPTH
2920 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
2921 if (source_variance > 0) {
2922 rec_variance = vp9_high_get_sby_perpixel_variance(cpi, &xd->plane[0].dst,
2924 src_variance = source_variance;
2927 vp9_high_get_sby_variance(cpi, &xd->plane[0].dst, bsize, xd->bd);
2929 vp9_high_get_sby_variance(cpi, &x->plane[0].src, bsize, xd->bd);
2932 if (source_variance > 0) {
2934 vp9_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
2935 src_variance = source_variance;
2937 rec_variance = vp9_get_sby_variance(cpi, &xd->plane[0].dst, bsize);
2938 src_variance = vp9_get_sby_variance(cpi, &x->plane[0].src, bsize);
2942 if (source_variance > 0) {
2943 rec_variance = vp9_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
2944 src_variance = source_variance;
2946 rec_variance = vp9_get_sby_variance(cpi, &xd->plane[0].dst, bsize);
2947 src_variance = vp9_get_sby_variance(cpi, &x->plane[0].src, bsize);
2949 #endif // CONFIG_VP9_HIGHBITDEPTH
2951 // Lower of source (raw per pixel value) and recon variance. Note that
2952 // if the source per pixel is 0 then the recon value here will not be per
2953 // pixel (see above) so will likely be much larger.
2954 src_rec_min = VPXMIN(source_variance, rec_variance);
2956 if (src_rec_min > LOW_VAR_THRESH) return;
2958 absvar_diff = (src_variance > rec_variance) ? (src_variance - rec_variance)
2959 : (rec_variance - src_variance);
2961 adj_max = max_var_adjust[content_type];
2964 (unsigned int)((int64_t)VAR_MULT * absvar_diff) / VPXMAX(1, src_variance);
2965 var_factor = VPXMIN(adj_max, var_factor);
2967 *this_rd += (*this_rd * var_factor) / 100;
2969 if (content_type == VP9E_CONTENT_FILM) {
2970 if (src_rec_min <= VERY_LOW_VAR_THRESH) {
2971 if (ref_frame == INTRA_FRAME) *this_rd *= 2;
2972 if (bsize > 6) *this_rd *= 2;
2977 // Do we have an internal image edge (e.g. formatting bars).
2978 int vp9_internal_image_edge(VP9_COMP *cpi) {
2979 return (cpi->oxcf.pass == 2) &&
2980 ((cpi->twopass.this_frame_stats.inactive_zone_rows > 0) ||
2981 (cpi->twopass.this_frame_stats.inactive_zone_cols > 0));
2984 // Checks to see if a super block is on a horizontal image edge.
2985 // In most cases this is the "real" edge unless there are formatting
2986 // bars embedded in the stream.
2987 int vp9_active_h_edge(VP9_COMP *cpi, int mi_row, int mi_step) {
2989 int bottom_edge = cpi->common.mi_rows;
2990 int is_active_h_edge = 0;
2992 // For two pass account for any formatting bars detected.
2993 if (cpi->oxcf.pass == 2) {
2994 TWO_PASS *twopass = &cpi->twopass;
2996 // The inactive region is specified in MBs not mi units.
2997 // The image edge is in the following MB row.
2998 top_edge += (int)(twopass->this_frame_stats.inactive_zone_rows * 2);
3000 bottom_edge -= (int)(twopass->this_frame_stats.inactive_zone_rows * 2);
3001 bottom_edge = VPXMAX(top_edge, bottom_edge);
3004 if (((top_edge >= mi_row) && (top_edge < (mi_row + mi_step))) ||
3005 ((bottom_edge >= mi_row) && (bottom_edge < (mi_row + mi_step)))) {
3006 is_active_h_edge = 1;
3008 return is_active_h_edge;
3011 // Checks to see if a super block is on a vertical image edge.
3012 // In most cases this is the "real" edge unless there are formatting
3013 // bars embedded in the stream.
3014 int vp9_active_v_edge(VP9_COMP *cpi, int mi_col, int mi_step) {
3016 int right_edge = cpi->common.mi_cols;
3017 int is_active_v_edge = 0;
3019 // For two pass account for any formatting bars detected.
3020 if (cpi->oxcf.pass == 2) {
3021 TWO_PASS *twopass = &cpi->twopass;
3023 // The inactive region is specified in MBs not mi units.
3024 // The image edge is in the following MB row.
3025 left_edge += (int)(twopass->this_frame_stats.inactive_zone_cols * 2);
3027 right_edge -= (int)(twopass->this_frame_stats.inactive_zone_cols * 2);
3028 right_edge = VPXMAX(left_edge, right_edge);
3031 if (((left_edge >= mi_col) && (left_edge < (mi_col + mi_step))) ||
3032 ((right_edge >= mi_col) && (right_edge < (mi_col + mi_step)))) {
3033 is_active_v_edge = 1;
3035 return is_active_v_edge;
3038 // Checks to see if a super block is at the edge of the active image.
3039 // In most cases this is the "real" edge unless there are formatting
3040 // bars embedded in the stream.
3041 int vp9_active_edge_sb(VP9_COMP *cpi, int mi_row, int mi_col) {
3042 return vp9_active_h_edge(cpi, mi_row, MI_BLOCK_SIZE) ||
3043 vp9_active_v_edge(cpi, mi_col, MI_BLOCK_SIZE);
3046 void vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, TileDataEnc *tile_data,
3047 MACROBLOCK *x, int mi_row, int mi_col,
3048 RD_COST *rd_cost, BLOCK_SIZE bsize,
3049 PICK_MODE_CONTEXT *ctx, int64_t best_rd_so_far) {
3050 VP9_COMMON *const cm = &cpi->common;
3051 TileInfo *const tile_info = &tile_data->tile_info;
3052 RD_OPT *const rd_opt = &cpi->rd;
3053 SPEED_FEATURES *const sf = &cpi->sf;
3054 MACROBLOCKD *const xd = &x->e_mbd;
3055 MODE_INFO *const mi = xd->mi[0];
3056 MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
3057 const struct segmentation *const seg = &cm->seg;
3058 PREDICTION_MODE this_mode;
3059 MV_REFERENCE_FRAME ref_frame, second_ref_frame;
3060 unsigned char segment_id = mi->segment_id;
3061 int comp_pred, i, k;
3062 int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES];
3063 struct buf_2d yv12_mb[4][MAX_MB_PLANE];
3064 int_mv single_newmv[MAX_REF_FRAMES] = { { 0 } };
3065 INTERP_FILTER single_inter_filter[MB_MODE_COUNT][MAX_REF_FRAMES];
3066 int single_skippable[MB_MODE_COUNT][MAX_REF_FRAMES];
3067 static const int flag_list[4] = { 0, VP9_LAST_FLAG, VP9_GOLD_FLAG,
3069 int64_t best_rd = best_rd_so_far;
3070 int64_t best_pred_diff[REFERENCE_MODES];
3071 int64_t best_pred_rd[REFERENCE_MODES];
3072 int64_t best_filter_rd[SWITCHABLE_FILTER_CONTEXTS];
3073 int64_t best_filter_diff[SWITCHABLE_FILTER_CONTEXTS];
3074 MODE_INFO best_mbmode;
3075 int best_mode_skippable = 0;
3076 int midx, best_mode_index = -1;
3077 unsigned int ref_costs_single[MAX_REF_FRAMES], ref_costs_comp[MAX_REF_FRAMES];
3078 vpx_prob comp_mode_p;
3079 int64_t best_intra_rd = INT64_MAX;
3080 unsigned int best_pred_sse = UINT_MAX;
3081 PREDICTION_MODE best_intra_mode = DC_PRED;
3082 int rate_uv_intra[TX_SIZES], rate_uv_tokenonly[TX_SIZES];
3083 int64_t dist_uv[TX_SIZES];
3084 int skip_uv[TX_SIZES];
3085 PREDICTION_MODE mode_uv[TX_SIZES];
3086 const int intra_cost_penalty =
3087 vp9_get_intra_cost_penalty(cpi, bsize, cm->base_qindex, cm->y_dc_delta_q);
3089 uint8_t ref_frame_skip_mask[2] = { 0 };
3090 uint16_t mode_skip_mask[MAX_REF_FRAMES] = { 0 };
3091 int mode_skip_start = sf->mode_skip_start + 1;
3092 const int *const rd_threshes = rd_opt->threshes[segment_id][bsize];
3093 const int *const rd_thresh_freq_fact = tile_data->thresh_freq_fact[bsize];
3094 int64_t mode_threshold[MAX_MODES];
3095 int8_t *tile_mode_map = tile_data->mode_map[bsize];
3096 int8_t mode_map[MAX_MODES]; // Maintain mode_map information locally to avoid
3097 // lock mechanism involved with reads from
3099 const int mode_search_skip_flags = sf->mode_search_skip_flags;
3100 const int is_rect_partition =
3101 num_4x4_blocks_wide_lookup[bsize] != num_4x4_blocks_high_lookup[bsize];
3102 int64_t mask_filter = 0;
3103 int64_t filter_cache[SWITCHABLE_FILTER_CONTEXTS];
3105 vp9_zero(best_mbmode);
3107 x->skip_encode = sf->skip_encode_frame && x->q_index < QIDX_SKIP_THRESH;
3109 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i) filter_cache[i] = INT64_MAX;
3111 estimate_ref_frame_costs(cm, xd, segment_id, ref_costs_single, ref_costs_comp,
3114 for (i = 0; i < REFERENCE_MODES; ++i) best_pred_rd[i] = INT64_MAX;
3115 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++)
3116 best_filter_rd[i] = INT64_MAX;
3117 for (i = 0; i < TX_SIZES; i++) rate_uv_intra[i] = INT_MAX;
3118 for (i = 0; i < MAX_REF_FRAMES; ++i) x->pred_sse[i] = INT_MAX;
3119 for (i = 0; i < MB_MODE_COUNT; ++i) {
3120 for (k = 0; k < MAX_REF_FRAMES; ++k) {
3121 single_inter_filter[i][k] = SWITCHABLE;
3122 single_skippable[i][k] = 0;
3126 rd_cost->rate = INT_MAX;
3128 for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
3129 x->pred_mv_sad[ref_frame] = INT_MAX;
3130 if (cpi->ref_frame_flags & flag_list[ref_frame]) {
3131 assert(get_ref_frame_buffer(cpi, ref_frame) != NULL);
3132 setup_buffer_inter(cpi, x, ref_frame, bsize, mi_row, mi_col,
3133 frame_mv[NEARESTMV], frame_mv[NEARMV], yv12_mb);
3135 frame_mv[NEWMV][ref_frame].as_int = INVALID_MV;
3136 frame_mv[ZEROMV][ref_frame].as_int = 0;
3139 for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
3140 if (!(cpi->ref_frame_flags & flag_list[ref_frame])) {
3141 // Skip checking missing references in both single and compound reference
3142 // modes. Note that a mode will be skipped if both reference frames
3144 ref_frame_skip_mask[0] |= (1 << ref_frame);
3145 ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
3146 } else if (sf->reference_masking) {
3147 for (i = LAST_FRAME; i <= ALTREF_FRAME; ++i) {
3148 // Skip fixed mv modes for poor references
3149 if ((x->pred_mv_sad[ref_frame] >> 2) > x->pred_mv_sad[i]) {
3150 mode_skip_mask[ref_frame] |= INTER_NEAREST_NEAR_ZERO;
3155 // If the segment reference frame feature is enabled....
3156 // then do nothing if the current ref frame is not allowed..
3157 if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME) &&
3158 get_segdata(seg, segment_id, SEG_LVL_REF_FRAME) != (int)ref_frame) {
3159 ref_frame_skip_mask[0] |= (1 << ref_frame);
3160 ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
3164 // Disable this drop out case if the ref frame
3165 // segment level feature is enabled for this segment. This is to
3166 // prevent the possibility that we end up unable to pick any mode.
3167 if (!segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) {
3168 // Only consider ZEROMV/ALTREF_FRAME for alt ref frame,
3169 // unless ARNR filtering is enabled in which case we want
3170 // an unfiltered alternative. We allow near/nearest as well
3171 // because they may result in zero-zero MVs but be cheaper.
3172 if (cpi->rc.is_src_frame_alt_ref && (cpi->oxcf.arnr_max_frames == 0)) {
3173 ref_frame_skip_mask[0] = (1 << LAST_FRAME) | (1 << GOLDEN_FRAME);
3174 ref_frame_skip_mask[1] = SECOND_REF_FRAME_MASK;
3175 mode_skip_mask[ALTREF_FRAME] = ~INTER_NEAREST_NEAR_ZERO;
3176 if (frame_mv[NEARMV][ALTREF_FRAME].as_int != 0)
3177 mode_skip_mask[ALTREF_FRAME] |= (1 << NEARMV);
3178 if (frame_mv[NEARESTMV][ALTREF_FRAME].as_int != 0)
3179 mode_skip_mask[ALTREF_FRAME] |= (1 << NEARESTMV);
3183 if (cpi->rc.is_src_frame_alt_ref) {
3184 if (sf->alt_ref_search_fp) {
3185 mode_skip_mask[ALTREF_FRAME] = 0;
3186 ref_frame_skip_mask[0] = ~(1 << ALTREF_FRAME);
3187 ref_frame_skip_mask[1] = SECOND_REF_FRAME_MASK;
3191 if (sf->alt_ref_search_fp)
3192 if (!cm->show_frame && x->pred_mv_sad[GOLDEN_FRAME] < INT_MAX)
3193 if (x->pred_mv_sad[ALTREF_FRAME] > (x->pred_mv_sad[GOLDEN_FRAME] << 1))
3194 mode_skip_mask[ALTREF_FRAME] |= INTER_ALL;
3196 if (sf->adaptive_mode_search) {
3197 if (cm->show_frame && !cpi->rc.is_src_frame_alt_ref &&
3198 cpi->rc.frames_since_golden >= 3)
3199 if (x->pred_mv_sad[GOLDEN_FRAME] > (x->pred_mv_sad[LAST_FRAME] << 1))
3200 mode_skip_mask[GOLDEN_FRAME] |= INTER_ALL;
3203 if (bsize > sf->max_intra_bsize) {
3204 ref_frame_skip_mask[0] |= (1 << INTRA_FRAME);
3205 ref_frame_skip_mask[1] |= (1 << INTRA_FRAME);
3208 mode_skip_mask[INTRA_FRAME] |=
3209 ~(sf->intra_y_mode_mask[max_txsize_lookup[bsize]]);
3211 for (i = 0; i <= LAST_NEW_MV_INDEX; ++i) mode_threshold[i] = 0;
3213 for (i = LAST_NEW_MV_INDEX + 1; i < MAX_MODES; ++i)
3214 mode_threshold[i] = ((int64_t)rd_threshes[i] * rd_thresh_freq_fact[i]) >> 5;
3216 midx = sf->schedule_mode_search ? mode_skip_start : 0;
3219 uint8_t end_pos = 0;
3220 for (i = 5; i < midx; ++i) {
3221 if (mode_threshold[tile_mode_map[i - 1]] >
3222 mode_threshold[tile_mode_map[i]]) {
3223 uint8_t tmp = tile_mode_map[i];
3224 tile_mode_map[i] = tile_mode_map[i - 1];
3225 tile_mode_map[i - 1] = tmp;
3232 memcpy(mode_map, tile_mode_map, sizeof(mode_map));
3234 for (midx = 0; midx < MAX_MODES; ++midx) {
3235 int mode_index = mode_map[midx];
3236 int mode_excluded = 0;
3237 int64_t this_rd = INT64_MAX;
3238 int disable_skip = 0;
3239 int compmode_cost = 0;
3240 int rate2 = 0, rate_y = 0, rate_uv = 0;
3241 int64_t distortion2 = 0, distortion_y = 0, distortion_uv = 0;
3244 int64_t total_sse = INT64_MAX;
3247 this_mode = vp9_mode_order[mode_index].mode;
3248 ref_frame = vp9_mode_order[mode_index].ref_frame[0];
3249 second_ref_frame = vp9_mode_order[mode_index].ref_frame[1];
3251 vp9_zero(x->sum_y_eobs);
3253 if (is_rect_partition) {
3254 if (ctx->skip_ref_frame_mask & (1 << ref_frame)) continue;
3255 if (second_ref_frame > 0 &&
3256 (ctx->skip_ref_frame_mask & (1 << second_ref_frame)))
3260 // Look at the reference frame of the best mode so far and set the
3261 // skip mask to look at a subset of the remaining modes.
3262 if (midx == mode_skip_start && best_mode_index >= 0) {
3263 switch (best_mbmode.ref_frame[0]) {
3264 case INTRA_FRAME: break;
3266 ref_frame_skip_mask[0] |= LAST_FRAME_MODE_MASK;
3267 ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
3270 ref_frame_skip_mask[0] |= GOLDEN_FRAME_MODE_MASK;
3271 ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
3273 case ALTREF_FRAME: ref_frame_skip_mask[0] |= ALT_REF_MODE_MASK; break;
3275 case MAX_REF_FRAMES: assert(0 && "Invalid Reference frame"); break;
3279 if ((ref_frame_skip_mask[0] & (1 << ref_frame)) &&
3280 (ref_frame_skip_mask[1] & (1 << VPXMAX(0, second_ref_frame))))
3283 if (mode_skip_mask[ref_frame] & (1 << this_mode)) continue;
3285 // Test best rd so far against threshold for trying this mode.
3286 if (best_mode_skippable && sf->schedule_mode_search)
3287 mode_threshold[mode_index] <<= 1;
3289 if (best_rd < mode_threshold[mode_index]) continue;
3291 // This is only used in motion vector unit test.
3292 if (cpi->oxcf.motion_vector_unit_test && ref_frame == INTRA_FRAME) continue;
3294 if (sf->motion_field_mode_search) {
3295 const int mi_width = VPXMIN(num_8x8_blocks_wide_lookup[bsize],
3296 tile_info->mi_col_end - mi_col);
3297 const int mi_height = VPXMIN(num_8x8_blocks_high_lookup[bsize],
3298 tile_info->mi_row_end - mi_row);
3299 const int bsl = mi_width_log2_lookup[bsize];
3300 int cb_partition_search_ctrl =
3301 (((mi_row + mi_col) >> bsl) +
3302 get_chessboard_index(cm->current_video_frame)) &
3305 int const_motion = 1;
3306 int skip_ref_frame = !cb_partition_search_ctrl;
3307 MV_REFERENCE_FRAME rf = NONE;
3309 ref_mv.as_int = INVALID_MV;
3311 if ((mi_row - 1) >= tile_info->mi_row_start) {
3312 ref_mv = xd->mi[-xd->mi_stride]->mv[0];
3313 rf = xd->mi[-xd->mi_stride]->ref_frame[0];
3314 for (i = 0; i < mi_width; ++i) {
3315 ref_mi = xd->mi[-xd->mi_stride + i];
3316 const_motion &= (ref_mv.as_int == ref_mi->mv[0].as_int) &&
3317 (ref_frame == ref_mi->ref_frame[0]);
3318 skip_ref_frame &= (rf == ref_mi->ref_frame[0]);
3322 if ((mi_col - 1) >= tile_info->mi_col_start) {
3323 if (ref_mv.as_int == INVALID_MV) ref_mv = xd->mi[-1]->mv[0];
3324 if (rf == NONE) rf = xd->mi[-1]->ref_frame[0];
3325 for (i = 0; i < mi_height; ++i) {
3326 ref_mi = xd->mi[i * xd->mi_stride - 1];
3327 const_motion &= (ref_mv.as_int == ref_mi->mv[0].as_int) &&
3328 (ref_frame == ref_mi->ref_frame[0]);
3329 skip_ref_frame &= (rf == ref_mi->ref_frame[0]);
3333 if (skip_ref_frame && this_mode != NEARESTMV && this_mode != NEWMV)
3334 if (rf > INTRA_FRAME)
3335 if (ref_frame != rf) continue;
3338 if (this_mode == NEARMV || this_mode == ZEROMV) continue;
3341 comp_pred = second_ref_frame > INTRA_FRAME;
3343 if (!cpi->allow_comp_inter_inter) continue;
3345 // Skip compound inter modes if ARF is not available.
3346 if (!(cpi->ref_frame_flags & flag_list[second_ref_frame])) continue;
3348 // Do not allow compound prediction if the segment level reference frame
3349 // feature is in use as in this case there can only be one reference.
3350 if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) continue;
3352 if ((mode_search_skip_flags & FLAG_SKIP_COMP_BESTINTRA) &&
3353 best_mode_index >= 0 && best_mbmode.ref_frame[0] == INTRA_FRAME)
3356 mode_excluded = cm->reference_mode == SINGLE_REFERENCE;
3358 if (ref_frame != INTRA_FRAME)
3359 mode_excluded = cm->reference_mode == COMPOUND_REFERENCE;
3362 if (ref_frame == INTRA_FRAME) {
3363 if (sf->adaptive_mode_search)
3364 if ((x->source_variance << num_pels_log2_lookup[bsize]) > best_pred_sse)
3367 if (this_mode != DC_PRED) {
3368 // Disable intra modes other than DC_PRED for blocks with low variance
3369 // Threshold for intra skipping based on source variance
3370 // TODO(debargha): Specialize the threshold for super block sizes
3371 const unsigned int skip_intra_var_thresh = 64;
3372 if ((mode_search_skip_flags & FLAG_SKIP_INTRA_LOWVAR) &&
3373 x->source_variance < skip_intra_var_thresh)
3375 // Only search the oblique modes if the best so far is
3376 // one of the neighboring directional modes
3377 if ((mode_search_skip_flags & FLAG_SKIP_INTRA_BESTINTER) &&
3378 (this_mode >= D45_PRED && this_mode <= TM_PRED)) {
3379 if (best_mode_index >= 0 && best_mbmode.ref_frame[0] > INTRA_FRAME)
3382 if (mode_search_skip_flags & FLAG_SKIP_INTRA_DIRMISMATCH) {
3383 if (conditional_skipintra(this_mode, best_intra_mode)) continue;
3387 const MV_REFERENCE_FRAME ref_frames[2] = { ref_frame, second_ref_frame };
3388 if (!check_best_zero_mv(cpi, mbmi_ext->mode_context, frame_mv, this_mode,
3393 mi->mode = this_mode;
3394 mi->uv_mode = DC_PRED;
3395 mi->ref_frame[0] = ref_frame;
3396 mi->ref_frame[1] = second_ref_frame;
3397 // Evaluate all sub-pel filters irrespective of whether we can use
3398 // them for this frame.
3400 cm->interp_filter == SWITCHABLE ? EIGHTTAP : cm->interp_filter;
3401 mi->mv[0].as_int = mi->mv[1].as_int = 0;
3404 set_ref_ptrs(cm, xd, ref_frame, second_ref_frame);
3406 // Select prediction reference frames.
3407 for (i = 0; i < MAX_MB_PLANE; i++) {
3408 xd->plane[i].pre[0] = yv12_mb[ref_frame][i];
3409 if (comp_pred) xd->plane[i].pre[1] = yv12_mb[second_ref_frame][i];
3412 if (ref_frame == INTRA_FRAME) {
3414 struct macroblockd_plane *const pd = &xd->plane[1];
3415 memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
3416 super_block_yrd(cpi, x, &rate_y, &distortion_y, &skippable, NULL, bsize,
3418 if (rate_y == INT_MAX) continue;
3420 uv_tx = uv_txsize_lookup[bsize][mi->tx_size][pd->subsampling_x]
3421 [pd->subsampling_y];
3422 if (rate_uv_intra[uv_tx] == INT_MAX) {
3423 choose_intra_uv_mode(cpi, x, ctx, bsize, uv_tx, &rate_uv_intra[uv_tx],
3424 &rate_uv_tokenonly[uv_tx], &dist_uv[uv_tx],
3425 &skip_uv[uv_tx], &mode_uv[uv_tx]);
3428 rate_uv = rate_uv_tokenonly[uv_tx];
3429 distortion_uv = dist_uv[uv_tx];
3430 skippable = skippable && skip_uv[uv_tx];
3431 mi->uv_mode = mode_uv[uv_tx];
3433 rate2 = rate_y + cpi->mbmode_cost[mi->mode] + rate_uv_intra[uv_tx];
3434 if (this_mode != DC_PRED && this_mode != TM_PRED)
3435 rate2 += intra_cost_penalty;
3436 distortion2 = distortion_y + distortion_uv;
3438 this_rd = handle_inter_mode(
3439 cpi, x, bsize, &rate2, &distortion2, &skippable, &rate_y, &rate_uv,
3440 &disable_skip, frame_mv, mi_row, mi_col, single_newmv,
3441 single_inter_filter, single_skippable, &total_sse, best_rd,
3442 &mask_filter, filter_cache);
3443 if (this_rd == INT64_MAX) continue;
3445 compmode_cost = vp9_cost_bit(comp_mode_p, comp_pred);
3447 if (cm->reference_mode == REFERENCE_MODE_SELECT) rate2 += compmode_cost;
3450 // Estimate the reference frame signaling cost and add it
3451 // to the rolling cost variable.
3453 rate2 += ref_costs_comp[ref_frame];
3455 rate2 += ref_costs_single[ref_frame];
3458 if (!disable_skip) {
3459 const vpx_prob skip_prob = vp9_get_skip_prob(cm, xd);
3460 const int skip_cost0 = vp9_cost_bit(skip_prob, 0);
3461 const int skip_cost1 = vp9_cost_bit(skip_prob, 1);
3464 // Back out the coefficient coding costs
3465 rate2 -= (rate_y + rate_uv);
3467 // Cost the skip mb case
3468 rate2 += skip_cost1;
3469 } else if (ref_frame != INTRA_FRAME && !xd->lossless) {
3470 if (RDCOST(x->rdmult, x->rddiv, rate_y + rate_uv + skip_cost0,
3472 RDCOST(x->rdmult, x->rddiv, skip_cost1, total_sse)) {
3473 // Add in the cost of the no skip flag.
3474 rate2 += skip_cost0;
3476 // FIXME(rbultje) make this work for splitmv also
3477 assert(total_sse >= 0);
3479 rate2 += skip_cost1;
3480 distortion2 = total_sse;
3481 rate2 -= (rate_y + rate_uv);
3485 // Add in the cost of the no skip flag.
3486 rate2 += skip_cost0;
3489 // Calculate the final RD estimate for this mode.
3490 this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
3493 // Apply an adjustment to the rd value based on the similarity of the
3494 // source variance and reconstructed variance.
3495 rd_variance_adjustment(cpi, x, bsize, &this_rd, ref_frame,
3496 x->source_variance);
3498 if (ref_frame == INTRA_FRAME) {
3499 // Keep record of best intra rd
3500 if (this_rd < best_intra_rd) {
3501 best_intra_rd = this_rd;
3502 best_intra_mode = mi->mode;
3506 if (!disable_skip && ref_frame == INTRA_FRAME) {
3507 for (i = 0; i < REFERENCE_MODES; ++i)
3508 best_pred_rd[i] = VPXMIN(best_pred_rd[i], this_rd);
3509 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++)
3510 best_filter_rd[i] = VPXMIN(best_filter_rd[i], this_rd);
3513 // Did this mode help.. i.e. is it the new best mode
3514 if (this_rd < best_rd || x->skip) {
3515 int max_plane = MAX_MB_PLANE;
3516 if (!mode_excluded) {
3517 // Note index of best mode so far
3518 best_mode_index = mode_index;
3520 if (ref_frame == INTRA_FRAME) {
3521 /* required for left and above block mv */
3522 mi->mv[0].as_int = 0;
3524 // Initialize interp_filter here so we do not have to check for
3525 // inter block modes in get_pred_context_switchable_interp()
3526 mi->interp_filter = SWITCHABLE_FILTERS;
3528 best_pred_sse = x->pred_sse[ref_frame];
3531 rd_cost->rate = rate2;
3532 rd_cost->dist = distortion2;
3533 rd_cost->rdcost = this_rd;
3536 best_skip2 = this_skip2;
3537 best_mode_skippable = skippable;
3539 if (!x->select_tx_size) swap_block_ptr(x, ctx, 1, 0, 0, max_plane);
3540 memcpy(ctx->zcoeff_blk, x->zcoeff_blk[mi->tx_size],
3541 sizeof(ctx->zcoeff_blk[0]) * ctx->num_4x4_blk);
3542 ctx->sum_y_eobs = x->sum_y_eobs[mi->tx_size];
3544 // TODO(debargha): enhance this test with a better distortion prediction
3545 // based on qp, activity mask and history
3546 if ((mode_search_skip_flags & FLAG_EARLY_TERMINATE) &&
3547 (mode_index > MIN_EARLY_TERM_INDEX)) {
3548 int qstep = xd->plane[0].dequant[1];
3549 // TODO(debargha): Enhance this by specializing for each mode_index
3551 #if CONFIG_VP9_HIGHBITDEPTH
3552 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
3553 qstep >>= (xd->bd - 8);
3555 #endif // CONFIG_VP9_HIGHBITDEPTH
3556 if (x->source_variance < UINT_MAX) {
3557 const int var_adjust = (x->source_variance < 16);
3558 scale -= var_adjust;
3560 if (ref_frame > INTRA_FRAME && distortion2 * scale < qstep * qstep) {
3567 /* keep record of best compound/single-only prediction */
3568 if (!disable_skip && ref_frame != INTRA_FRAME) {
3569 int64_t single_rd, hybrid_rd, single_rate, hybrid_rate;
3571 if (cm->reference_mode == REFERENCE_MODE_SELECT) {
3572 single_rate = rate2 - compmode_cost;
3573 hybrid_rate = rate2;
3575 single_rate = rate2;
3576 hybrid_rate = rate2 + compmode_cost;
3579 single_rd = RDCOST(x->rdmult, x->rddiv, single_rate, distortion2);
3580 hybrid_rd = RDCOST(x->rdmult, x->rddiv, hybrid_rate, distortion2);
3583 if (single_rd < best_pred_rd[SINGLE_REFERENCE])
3584 best_pred_rd[SINGLE_REFERENCE] = single_rd;
3586 if (single_rd < best_pred_rd[COMPOUND_REFERENCE])
3587 best_pred_rd[COMPOUND_REFERENCE] = single_rd;
3589 if (hybrid_rd < best_pred_rd[REFERENCE_MODE_SELECT])
3590 best_pred_rd[REFERENCE_MODE_SELECT] = hybrid_rd;
3592 /* keep record of best filter type */
3593 if (!mode_excluded && cm->interp_filter != BILINEAR) {
3595 filter_cache[cm->interp_filter == SWITCHABLE ? SWITCHABLE_FILTERS
3596 : cm->interp_filter];
3598 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) {
3600 if (ref == INT64_MAX)
3602 else if (filter_cache[i] == INT64_MAX)
3603 // when early termination is triggered, the encoder does not have
3604 // access to the rate-distortion cost. it only knows that the cost
3605 // should be above the maximum valid value. hence it takes the known
3606 // maximum plus an arbitrary constant as the rate-distortion cost.
3607 adj_rd = mask_filter - ref + 10;
3609 adj_rd = filter_cache[i] - ref;
3612 best_filter_rd[i] = VPXMIN(best_filter_rd[i], adj_rd);
3617 if (early_term) break;
3619 if (x->skip && !comp_pred) break;
3622 // The inter modes' rate costs are not calculated precisely in some cases.
3623 // Therefore, sometimes, NEWMV is chosen instead of NEARESTMV, NEARMV, and
3624 // ZEROMV. Here, checks are added for those cases, and the mode decisions
3626 if (best_mbmode.mode == NEWMV) {
3627 const MV_REFERENCE_FRAME refs[2] = { best_mbmode.ref_frame[0],
3628 best_mbmode.ref_frame[1] };
3629 int comp_pred_mode = refs[1] > INTRA_FRAME;
3631 if (frame_mv[NEARESTMV][refs[0]].as_int == best_mbmode.mv[0].as_int &&
3633 frame_mv[NEARESTMV][refs[1]].as_int == best_mbmode.mv[1].as_int) ||
3635 best_mbmode.mode = NEARESTMV;
3636 else if (frame_mv[NEARMV][refs[0]].as_int == best_mbmode.mv[0].as_int &&
3638 frame_mv[NEARMV][refs[1]].as_int == best_mbmode.mv[1].as_int) ||
3640 best_mbmode.mode = NEARMV;
3641 else if (best_mbmode.mv[0].as_int == 0 &&
3642 ((comp_pred_mode && best_mbmode.mv[1].as_int == 0) ||
3644 best_mbmode.mode = ZEROMV;
3647 if (best_mode_index < 0 || best_rd >= best_rd_so_far) {
3648 // If adaptive interp filter is enabled, then the current leaf node of 8x8
3649 // data is needed for sub8x8. Hence preserve the context.
3650 #if CONFIG_CONSISTENT_RECODE
3651 if (bsize == BLOCK_8X8) ctx->mic = *xd->mi[0];
3653 if (cpi->row_mt && bsize == BLOCK_8X8) ctx->mic = *xd->mi[0];
3655 rd_cost->rate = INT_MAX;
3656 rd_cost->rdcost = INT64_MAX;
3660 // If we used an estimate for the uv intra rd in the loop above...
3661 if (sf->use_uv_intra_rd_estimate) {
3662 // Do Intra UV best rd mode selection if best mode choice above was intra.
3663 if (best_mbmode.ref_frame[0] == INTRA_FRAME) {
3666 uv_tx_size = get_uv_tx_size(mi, &xd->plane[1]);
3667 rd_pick_intra_sbuv_mode(cpi, x, ctx, &rate_uv_intra[uv_tx_size],
3668 &rate_uv_tokenonly[uv_tx_size],
3669 &dist_uv[uv_tx_size], &skip_uv[uv_tx_size],
3670 bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize,
3675 assert((cm->interp_filter == SWITCHABLE) ||
3676 (cm->interp_filter == best_mbmode.interp_filter) ||
3677 !is_inter_block(&best_mbmode));
3679 if (!cpi->rc.is_src_frame_alt_ref)
3680 vp9_update_rd_thresh_fact(tile_data->thresh_freq_fact,
3681 sf->adaptive_rd_thresh, bsize, best_mode_index);
3685 x->skip |= best_skip2;
3687 for (i = 0; i < REFERENCE_MODES; ++i) {
3688 if (best_pred_rd[i] == INT64_MAX)
3689 best_pred_diff[i] = INT_MIN;
3691 best_pred_diff[i] = best_rd - best_pred_rd[i];
3695 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) {
3696 if (best_filter_rd[i] == INT64_MAX)
3697 best_filter_diff[i] = 0;
3699 best_filter_diff[i] = best_rd - best_filter_rd[i];
3701 if (cm->interp_filter == SWITCHABLE)
3702 assert(best_filter_diff[SWITCHABLE_FILTERS] == 0);
3704 vp9_zero(best_filter_diff);
3707 // TODO(yunqingwang): Moving this line in front of the above best_filter_diff
3708 // updating code causes PSNR loss. Need to figure out the confliction.
3709 x->skip |= best_mode_skippable;
3711 if (!x->skip && !x->select_tx_size) {
3712 int has_high_freq_coeff = 0;
3714 int max_plane = is_inter_block(xd->mi[0]) ? MAX_MB_PLANE : 1;
3715 for (plane = 0; plane < max_plane; ++plane) {
3716 x->plane[plane].eobs = ctx->eobs_pbuf[plane][1];
3717 has_high_freq_coeff |= vp9_has_high_freq_in_plane(x, bsize, plane);
3720 for (plane = max_plane; plane < MAX_MB_PLANE; ++plane) {
3721 x->plane[plane].eobs = ctx->eobs_pbuf[plane][2];
3722 has_high_freq_coeff |= vp9_has_high_freq_in_plane(x, bsize, plane);
3725 best_mode_skippable |= !has_high_freq_coeff;
3728 assert(best_mode_index >= 0);
3730 store_coding_context(x, ctx, best_mode_index, best_pred_diff,
3731 best_filter_diff, best_mode_skippable);
3734 void vp9_rd_pick_inter_mode_sb_seg_skip(VP9_COMP *cpi, TileDataEnc *tile_data,
3735 MACROBLOCK *x, RD_COST *rd_cost,
3737 PICK_MODE_CONTEXT *ctx,
3738 int64_t best_rd_so_far) {
3739 VP9_COMMON *const cm = &cpi->common;
3740 MACROBLOCKD *const xd = &x->e_mbd;
3741 MODE_INFO *const mi = xd->mi[0];
3742 unsigned char segment_id = mi->segment_id;
3743 const int comp_pred = 0;
3745 int64_t best_pred_diff[REFERENCE_MODES];
3746 int64_t best_filter_diff[SWITCHABLE_FILTER_CONTEXTS];
3747 unsigned int ref_costs_single[MAX_REF_FRAMES], ref_costs_comp[MAX_REF_FRAMES];
3748 vpx_prob comp_mode_p;
3749 INTERP_FILTER best_filter = SWITCHABLE;
3750 int64_t this_rd = INT64_MAX;
3752 const int64_t distortion2 = 0;
3754 x->skip_encode = cpi->sf.skip_encode_frame && x->q_index < QIDX_SKIP_THRESH;
3756 estimate_ref_frame_costs(cm, xd, segment_id, ref_costs_single, ref_costs_comp,
3759 for (i = 0; i < MAX_REF_FRAMES; ++i) x->pred_sse[i] = INT_MAX;
3760 for (i = LAST_FRAME; i < MAX_REF_FRAMES; ++i) x->pred_mv_sad[i] = INT_MAX;
3762 rd_cost->rate = INT_MAX;
3764 assert(segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP));
3767 mi->uv_mode = DC_PRED;
3768 mi->ref_frame[0] = LAST_FRAME;
3769 mi->ref_frame[1] = NONE;
3770 mi->mv[0].as_int = 0;
3773 ctx->sum_y_eobs = 0;
3775 if (cm->interp_filter != BILINEAR) {
3776 best_filter = EIGHTTAP;
3777 if (cm->interp_filter == SWITCHABLE &&
3778 x->source_variance >= cpi->sf.disable_filter_search_var_thresh) {
3780 int best_rs = INT_MAX;
3781 for (i = 0; i < SWITCHABLE_FILTERS; ++i) {
3782 mi->interp_filter = i;
3783 rs = vp9_get_switchable_rate(cpi, xd);
3786 best_filter = mi->interp_filter;
3791 // Set the appropriate filter
3792 if (cm->interp_filter == SWITCHABLE) {
3793 mi->interp_filter = best_filter;
3794 rate2 += vp9_get_switchable_rate(cpi, xd);
3796 mi->interp_filter = cm->interp_filter;
3799 if (cm->reference_mode == REFERENCE_MODE_SELECT)
3800 rate2 += vp9_cost_bit(comp_mode_p, comp_pred);
3802 // Estimate the reference frame signaling cost and add it
3803 // to the rolling cost variable.
3804 rate2 += ref_costs_single[LAST_FRAME];
3805 this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
3807 rd_cost->rate = rate2;
3808 rd_cost->dist = distortion2;
3809 rd_cost->rdcost = this_rd;
3811 if (this_rd >= best_rd_so_far) {
3812 rd_cost->rate = INT_MAX;
3813 rd_cost->rdcost = INT64_MAX;
3817 assert((cm->interp_filter == SWITCHABLE) ||
3818 (cm->interp_filter == mi->interp_filter));
3820 vp9_update_rd_thresh_fact(tile_data->thresh_freq_fact,
3821 cpi->sf.adaptive_rd_thresh, bsize, THR_ZEROMV);
3823 vp9_zero(best_pred_diff);
3824 vp9_zero(best_filter_diff);
3826 if (!x->select_tx_size) swap_block_ptr(x, ctx, 1, 0, 0, MAX_MB_PLANE);
3827 store_coding_context(x, ctx, THR_ZEROMV, best_pred_diff, best_filter_diff, 0);
3830 void vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, TileDataEnc *tile_data,
3831 MACROBLOCK *x, int mi_row, int mi_col,
3832 RD_COST *rd_cost, BLOCK_SIZE bsize,
3833 PICK_MODE_CONTEXT *ctx,
3834 int64_t best_rd_so_far) {
3835 VP9_COMMON *const cm = &cpi->common;
3836 RD_OPT *const rd_opt = &cpi->rd;
3837 SPEED_FEATURES *const sf = &cpi->sf;
3838 MACROBLOCKD *const xd = &x->e_mbd;
3839 MODE_INFO *const mi = xd->mi[0];
3840 const struct segmentation *const seg = &cm->seg;
3841 MV_REFERENCE_FRAME ref_frame, second_ref_frame;
3842 unsigned char segment_id = mi->segment_id;
3844 int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES];
3845 struct buf_2d yv12_mb[4][MAX_MB_PLANE];
3846 static const int flag_list[4] = { 0, VP9_LAST_FLAG, VP9_GOLD_FLAG,
3848 int64_t best_rd = best_rd_so_far;
3849 int64_t best_yrd = best_rd_so_far; // FIXME(rbultje) more precise
3850 int64_t best_pred_diff[REFERENCE_MODES];
3851 int64_t best_pred_rd[REFERENCE_MODES];
3852 int64_t best_filter_rd[SWITCHABLE_FILTER_CONTEXTS];
3853 int64_t best_filter_diff[SWITCHABLE_FILTER_CONTEXTS];
3854 MODE_INFO best_mbmode;
3855 int ref_index, best_ref_index = 0;
3856 unsigned int ref_costs_single[MAX_REF_FRAMES], ref_costs_comp[MAX_REF_FRAMES];
3857 vpx_prob comp_mode_p;
3858 INTERP_FILTER tmp_best_filter = SWITCHABLE;
3859 int rate_uv_intra, rate_uv_tokenonly;
3862 PREDICTION_MODE mode_uv = DC_PRED;
3863 const int intra_cost_penalty =
3864 vp9_get_intra_cost_penalty(cpi, bsize, cm->base_qindex, cm->y_dc_delta_q);
3865 int_mv seg_mvs[4][MAX_REF_FRAMES];
3866 b_mode_info best_bmodes[4];
3868 int ref_frame_skip_mask[2] = { 0 };
3869 int64_t mask_filter = 0;
3870 int64_t filter_cache[SWITCHABLE_FILTER_CONTEXTS];
3871 int internal_active_edge =
3872 vp9_active_edge_sb(cpi, mi_row, mi_col) && vp9_internal_image_edge(cpi);
3873 const int *const rd_thresh_freq_fact = tile_data->thresh_freq_fact[bsize];
3875 x->skip_encode = sf->skip_encode_frame && x->q_index < QIDX_SKIP_THRESH;
3876 memset(x->zcoeff_blk[TX_4X4], 0, 4);
3877 vp9_zero(best_mbmode);
3879 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i) filter_cache[i] = INT64_MAX;
3881 for (i = 0; i < 4; i++) {
3883 for (j = 0; j < MAX_REF_FRAMES; j++) seg_mvs[i][j].as_int = INVALID_MV;
3886 estimate_ref_frame_costs(cm, xd, segment_id, ref_costs_single, ref_costs_comp,
3889 for (i = 0; i < REFERENCE_MODES; ++i) best_pred_rd[i] = INT64_MAX;
3890 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++)
3891 best_filter_rd[i] = INT64_MAX;
3892 rate_uv_intra = INT_MAX;
3894 rd_cost->rate = INT_MAX;
3896 for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ref_frame++) {
3897 if (cpi->ref_frame_flags & flag_list[ref_frame]) {
3898 setup_buffer_inter(cpi, x, ref_frame, bsize, mi_row, mi_col,
3899 frame_mv[NEARESTMV], frame_mv[NEARMV], yv12_mb);
3901 ref_frame_skip_mask[0] |= (1 << ref_frame);
3902 ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
3904 frame_mv[NEWMV][ref_frame].as_int = INVALID_MV;
3905 frame_mv[ZEROMV][ref_frame].as_int = 0;
3908 for (ref_index = 0; ref_index < MAX_REFS; ++ref_index) {
3909 int mode_excluded = 0;
3910 int64_t this_rd = INT64_MAX;
3911 int disable_skip = 0;
3912 int compmode_cost = 0;
3913 int rate2 = 0, rate_y = 0, rate_uv = 0;
3914 int64_t distortion2 = 0, distortion_y = 0, distortion_uv = 0;
3918 int64_t total_sse = INT_MAX;
3920 struct buf_2d backup_yv12[2][MAX_MB_PLANE];
3922 ref_frame = vp9_ref_order[ref_index].ref_frame[0];
3923 second_ref_frame = vp9_ref_order[ref_index].ref_frame[1];
3925 vp9_zero(x->sum_y_eobs);
3927 #if CONFIG_BETTER_HW_COMPATIBILITY
3928 // forbid 8X4 and 4X8 partitions if any reference frame is scaled.
3929 if (bsize == BLOCK_8X4 || bsize == BLOCK_4X8) {
3930 int ref_scaled = vp9_is_scaled(&cm->frame_refs[ref_frame - 1].sf);
3931 if (second_ref_frame > INTRA_FRAME)
3932 ref_scaled += vp9_is_scaled(&cm->frame_refs[second_ref_frame - 1].sf);
3933 if (ref_scaled) continue;
3936 // Look at the reference frame of the best mode so far and set the
3937 // skip mask to look at a subset of the remaining modes.
3938 if (ref_index > 2 && sf->mode_skip_start < MAX_MODES) {
3939 if (ref_index == 3) {
3940 switch (best_mbmode.ref_frame[0]) {
3941 case INTRA_FRAME: break;
3943 ref_frame_skip_mask[0] |= (1 << GOLDEN_FRAME) | (1 << ALTREF_FRAME);
3944 ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
3947 ref_frame_skip_mask[0] |= (1 << LAST_FRAME) | (1 << ALTREF_FRAME);
3948 ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
3951 ref_frame_skip_mask[0] |= (1 << GOLDEN_FRAME) | (1 << LAST_FRAME);
3954 case MAX_REF_FRAMES: assert(0 && "Invalid Reference frame"); break;
3959 if ((ref_frame_skip_mask[0] & (1 << ref_frame)) &&
3960 (ref_frame_skip_mask[1] & (1 << VPXMAX(0, second_ref_frame))))
3963 // Test best rd so far against threshold for trying this mode.
3964 if (!internal_active_edge &&
3965 rd_less_than_thresh(best_rd,
3966 rd_opt->threshes[segment_id][bsize][ref_index],
3967 &rd_thresh_freq_fact[ref_index]))
3970 // This is only used in motion vector unit test.
3971 if (cpi->oxcf.motion_vector_unit_test && ref_frame == INTRA_FRAME) continue;
3973 comp_pred = second_ref_frame > INTRA_FRAME;
3975 if (!cpi->allow_comp_inter_inter) continue;
3976 if (!(cpi->ref_frame_flags & flag_list[second_ref_frame])) continue;
3977 // Do not allow compound prediction if the segment level reference frame
3978 // feature is in use as in this case there can only be one reference.
3979 if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) continue;
3981 if ((sf->mode_search_skip_flags & FLAG_SKIP_COMP_BESTINTRA) &&
3982 best_mbmode.ref_frame[0] == INTRA_FRAME)
3987 mode_excluded = cm->reference_mode == SINGLE_REFERENCE;
3988 else if (ref_frame != INTRA_FRAME)
3989 mode_excluded = cm->reference_mode == COMPOUND_REFERENCE;
3991 // If the segment reference frame feature is enabled....
3992 // then do nothing if the current ref frame is not allowed..
3993 if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME) &&
3994 get_segdata(seg, segment_id, SEG_LVL_REF_FRAME) != (int)ref_frame) {
3996 // Disable this drop out case if the ref frame
3997 // segment level feature is enabled for this segment. This is to
3998 // prevent the possibility that we end up unable to pick any mode.
3999 } else if (!segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) {
4000 // Only consider ZEROMV/ALTREF_FRAME for alt ref frame,
4001 // unless ARNR filtering is enabled in which case we want
4002 // an unfiltered alternative. We allow near/nearest as well
4003 // because they may result in zero-zero MVs but be cheaper.
4004 if (cpi->rc.is_src_frame_alt_ref && (cpi->oxcf.arnr_max_frames == 0))
4008 mi->tx_size = TX_4X4;
4009 mi->uv_mode = DC_PRED;
4010 mi->ref_frame[0] = ref_frame;
4011 mi->ref_frame[1] = second_ref_frame;
4012 // Evaluate all sub-pel filters irrespective of whether we can use
4013 // them for this frame.
4015 cm->interp_filter == SWITCHABLE ? EIGHTTAP : cm->interp_filter;
4017 set_ref_ptrs(cm, xd, ref_frame, second_ref_frame);
4019 // Select prediction reference frames.
4020 for (i = 0; i < MAX_MB_PLANE; i++) {
4021 xd->plane[i].pre[0] = yv12_mb[ref_frame][i];
4022 if (comp_pred) xd->plane[i].pre[1] = yv12_mb[second_ref_frame][i];
4025 if (ref_frame == INTRA_FRAME) {
4027 if (rd_pick_intra_sub_8x8_y_mode(cpi, x, &rate, &rate_y, &distortion_y,
4028 best_rd) >= best_rd)
4031 rate2 += intra_cost_penalty;
4032 distortion2 += distortion_y;
4034 if (rate_uv_intra == INT_MAX) {
4035 choose_intra_uv_mode(cpi, x, ctx, bsize, TX_4X4, &rate_uv_intra,
4036 &rate_uv_tokenonly, &dist_uv, &skip_uv, &mode_uv);
4038 rate2 += rate_uv_intra;
4039 rate_uv = rate_uv_tokenonly;
4040 distortion2 += dist_uv;
4041 distortion_uv = dist_uv;
4042 mi->uv_mode = mode_uv;
4046 int64_t this_rd_thresh;
4047 int64_t tmp_rd, tmp_best_rd = INT64_MAX, tmp_best_rdu = INT64_MAX;
4048 int tmp_best_rate = INT_MAX, tmp_best_ratey = INT_MAX;
4049 int64_t tmp_best_distortion = INT_MAX, tmp_best_sse, uv_sse;
4050 int tmp_best_skippable = 0;
4051 int switchable_filter_index;
4052 int_mv *second_ref =
4053 comp_pred ? &x->mbmi_ext->ref_mvs[second_ref_frame][0] : NULL;
4054 b_mode_info tmp_best_bmodes[16];
4055 MODE_INFO tmp_best_mbmode;
4056 BEST_SEG_INFO bsi[SWITCHABLE_FILTERS];
4057 int pred_exists = 0;
4060 YV12_BUFFER_CONFIG *scaled_ref_frame[2] = { NULL, NULL };
4063 for (ref = 0; ref < 2; ++ref) {
4064 scaled_ref_frame[ref] =
4065 mi->ref_frame[ref] > INTRA_FRAME
4066 ? vp9_get_scaled_ref_frame(cpi, mi->ref_frame[ref])
4069 if (scaled_ref_frame[ref]) {
4071 // Swap out the reference frame for a version that's been scaled to
4072 // match the resolution of the current frame, allowing the existing
4073 // motion search code to be used without additional modifications.
4074 for (i = 0; i < MAX_MB_PLANE; i++)
4075 backup_yv12[ref][i] = xd->plane[i].pre[ref];
4076 vp9_setup_pre_planes(xd, ref, scaled_ref_frame[ref], mi_row, mi_col,
4081 this_rd_thresh = (ref_frame == LAST_FRAME)
4082 ? rd_opt->threshes[segment_id][bsize][THR_LAST]
4083 : rd_opt->threshes[segment_id][bsize][THR_ALTR];
4084 this_rd_thresh = (ref_frame == GOLDEN_FRAME)
4085 ? rd_opt->threshes[segment_id][bsize][THR_GOLD]
4087 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
4088 filter_cache[i] = INT64_MAX;
4090 if (cm->interp_filter != BILINEAR) {
4091 tmp_best_filter = EIGHTTAP;
4092 if (x->source_variance < sf->disable_filter_search_var_thresh) {
4093 tmp_best_filter = EIGHTTAP;
4094 } else if (sf->adaptive_pred_interp_filter == 1 &&
4095 ctx->pred_interp_filter < SWITCHABLE) {
4096 tmp_best_filter = ctx->pred_interp_filter;
4097 } else if (sf->adaptive_pred_interp_filter == 2) {
4098 tmp_best_filter = ctx->pred_interp_filter < SWITCHABLE
4099 ? ctx->pred_interp_filter
4102 for (switchable_filter_index = 0;
4103 switchable_filter_index < SWITCHABLE_FILTERS;
4104 ++switchable_filter_index) {
4107 MB_MODE_INFO_EXT *mbmi_ext = x->mbmi_ext;
4108 mi->interp_filter = switchable_filter_index;
4109 tmp_rd = rd_pick_best_sub8x8_mode(
4110 cpi, x, &mbmi_ext->ref_mvs[ref_frame][0], second_ref, best_yrd,
4111 &rate, &rate_y, &distortion, &skippable, &total_sse,
4112 (int)this_rd_thresh, seg_mvs, bsi, switchable_filter_index,
4115 if (tmp_rd == INT64_MAX) continue;
4116 rs = vp9_get_switchable_rate(cpi, xd);
4117 rs_rd = RDCOST(x->rdmult, x->rddiv, rs, 0);
4118 filter_cache[switchable_filter_index] = tmp_rd;
4119 filter_cache[SWITCHABLE_FILTERS] =
4120 VPXMIN(filter_cache[SWITCHABLE_FILTERS], tmp_rd + rs_rd);
4121 if (cm->interp_filter == SWITCHABLE) tmp_rd += rs_rd;
4123 mask_filter = VPXMAX(mask_filter, tmp_rd);
4125 newbest = (tmp_rd < tmp_best_rd);
4127 tmp_best_filter = mi->interp_filter;
4128 tmp_best_rd = tmp_rd;
4130 if ((newbest && cm->interp_filter == SWITCHABLE) ||
4131 (mi->interp_filter == cm->interp_filter &&
4132 cm->interp_filter != SWITCHABLE)) {
4133 tmp_best_rdu = tmp_rd;
4134 tmp_best_rate = rate;
4135 tmp_best_ratey = rate_y;
4136 tmp_best_distortion = distortion;
4137 tmp_best_sse = total_sse;
4138 tmp_best_skippable = skippable;
4139 tmp_best_mbmode = *mi;
4140 for (i = 0; i < 4; i++) {
4141 tmp_best_bmodes[i] = xd->mi[0]->bmi[i];
4142 x->zcoeff_blk[TX_4X4][i] = !x->plane[0].eobs[i];
4143 x->sum_y_eobs[TX_4X4] += x->plane[0].eobs[i];
4146 if (switchable_filter_index == 0 && sf->use_rd_breakout &&
4147 best_rd < INT64_MAX) {
4148 if (tmp_best_rdu / 2 > best_rd) {
4149 // skip searching the other filters if the first is
4150 // already substantially larger than the best so far
4151 tmp_best_filter = mi->interp_filter;
4152 tmp_best_rdu = INT64_MAX;
4157 } // switchable_filter_index loop
4161 if (tmp_best_rdu == INT64_MAX && pred_exists) continue;
4163 mi->interp_filter = (cm->interp_filter == SWITCHABLE ? tmp_best_filter
4164 : cm->interp_filter);
4166 // Handles the special case when a filter that is not in the
4167 // switchable list (bilinear, 6-tap) is indicated at the frame level
4168 tmp_rd = rd_pick_best_sub8x8_mode(
4169 cpi, x, &x->mbmi_ext->ref_mvs[ref_frame][0], second_ref, best_yrd,
4170 &rate, &rate_y, &distortion, &skippable, &total_sse,
4171 (int)this_rd_thresh, seg_mvs, bsi, 0, mi_row, mi_col);
4172 if (tmp_rd == INT64_MAX) continue;
4174 total_sse = tmp_best_sse;
4175 rate = tmp_best_rate;
4176 rate_y = tmp_best_ratey;
4177 distortion = tmp_best_distortion;
4178 skippable = tmp_best_skippable;
4179 *mi = tmp_best_mbmode;
4180 for (i = 0; i < 4; i++) xd->mi[0]->bmi[i] = tmp_best_bmodes[i];
4184 distortion2 += distortion;
4186 if (cm->interp_filter == SWITCHABLE)
4187 rate2 += vp9_get_switchable_rate(cpi, xd);
4190 mode_excluded = comp_pred ? cm->reference_mode == SINGLE_REFERENCE
4191 : cm->reference_mode == COMPOUND_REFERENCE;
4193 compmode_cost = vp9_cost_bit(comp_mode_p, comp_pred);
4196 best_rd - VPXMIN(RDCOST(x->rdmult, x->rddiv, rate2, distortion2),
4197 RDCOST(x->rdmult, x->rddiv, 0, total_sse));
4199 if (tmp_best_rdu > 0) {
4200 // If even the 'Y' rd value of split is higher than best so far
4201 // then dont bother looking at UV
4202 vp9_build_inter_predictors_sbuv(&x->e_mbd, mi_row, mi_col, BLOCK_8X8);
4203 memset(x->skip_txfm, SKIP_TXFM_NONE, sizeof(x->skip_txfm));
4204 if (!super_block_uvrd(cpi, x, &rate_uv, &distortion_uv, &uv_skippable,
4205 &uv_sse, BLOCK_8X8, tmp_best_rdu)) {
4206 for (ref = 0; ref < 2; ++ref) {
4207 if (scaled_ref_frame[ref]) {
4209 for (i = 0; i < MAX_MB_PLANE; ++i)
4210 xd->plane[i].pre[ref] = backup_yv12[ref][i];
4217 distortion2 += distortion_uv;
4218 skippable = skippable && uv_skippable;
4219 total_sse += uv_sse;
4222 for (ref = 0; ref < 2; ++ref) {
4223 if (scaled_ref_frame[ref]) {
4224 // Restore the prediction frame pointers to their unscaled versions.
4226 for (i = 0; i < MAX_MB_PLANE; ++i)
4227 xd->plane[i].pre[ref] = backup_yv12[ref][i];
4232 if (cm->reference_mode == REFERENCE_MODE_SELECT) rate2 += compmode_cost;
4234 // Estimate the reference frame signaling cost and add it
4235 // to the rolling cost variable.
4236 if (second_ref_frame > INTRA_FRAME) {
4237 rate2 += ref_costs_comp[ref_frame];
4239 rate2 += ref_costs_single[ref_frame];
4242 if (!disable_skip) {
4243 const vpx_prob skip_prob = vp9_get_skip_prob(cm, xd);
4244 const int skip_cost0 = vp9_cost_bit(skip_prob, 0);
4245 const int skip_cost1 = vp9_cost_bit(skip_prob, 1);
4247 // Skip is never coded at the segment level for sub8x8 blocks and instead
4248 // always coded in the bitstream at the mode info level.
4249 if (ref_frame != INTRA_FRAME && !xd->lossless) {
4250 if (RDCOST(x->rdmult, x->rddiv, rate_y + rate_uv + skip_cost0,
4252 RDCOST(x->rdmult, x->rddiv, skip_cost1, total_sse)) {
4253 // Add in the cost of the no skip flag.
4254 rate2 += skip_cost0;
4256 // FIXME(rbultje) make this work for splitmv also
4257 rate2 += skip_cost1;
4258 distortion2 = total_sse;
4259 assert(total_sse >= 0);
4260 rate2 -= (rate_y + rate_uv);
4266 // Add in the cost of the no skip flag.
4267 rate2 += skip_cost0;
4270 // Calculate the final RD estimate for this mode.
4271 this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
4274 if (!disable_skip && ref_frame == INTRA_FRAME) {
4275 for (i = 0; i < REFERENCE_MODES; ++i)
4276 best_pred_rd[i] = VPXMIN(best_pred_rd[i], this_rd);
4277 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++)
4278 best_filter_rd[i] = VPXMIN(best_filter_rd[i], this_rd);
4281 // Did this mode help.. i.e. is it the new best mode
4282 if (this_rd < best_rd || x->skip) {
4283 if (!mode_excluded) {
4284 int max_plane = MAX_MB_PLANE;
4285 // Note index of best mode so far
4286 best_ref_index = ref_index;
4288 if (ref_frame == INTRA_FRAME) {
4289 /* required for left and above block mv */
4290 mi->mv[0].as_int = 0;
4292 // Initialize interp_filter here so we do not have to check for
4293 // inter block modes in get_pred_context_switchable_interp()
4294 mi->interp_filter = SWITCHABLE_FILTERS;
4297 rd_cost->rate = rate2;
4298 rd_cost->dist = distortion2;
4299 rd_cost->rdcost = this_rd;
4302 best_rd - RDCOST(x->rdmult, x->rddiv, rate_uv, distortion_uv);
4304 best_skip2 = this_skip2;
4305 if (!x->select_tx_size) swap_block_ptr(x, ctx, 1, 0, 0, max_plane);
4306 memcpy(ctx->zcoeff_blk, x->zcoeff_blk[TX_4X4],
4307 sizeof(ctx->zcoeff_blk[0]) * ctx->num_4x4_blk);
4308 ctx->sum_y_eobs = x->sum_y_eobs[TX_4X4];
4310 for (i = 0; i < 4; i++) best_bmodes[i] = xd->mi[0]->bmi[i];
4312 // TODO(debargha): enhance this test with a better distortion prediction
4313 // based on qp, activity mask and history
4314 if ((sf->mode_search_skip_flags & FLAG_EARLY_TERMINATE) &&
4315 (ref_index > MIN_EARLY_TERM_INDEX)) {
4316 int qstep = xd->plane[0].dequant[1];
4317 // TODO(debargha): Enhance this by specializing for each mode_index
4319 #if CONFIG_VP9_HIGHBITDEPTH
4320 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
4321 qstep >>= (xd->bd - 8);
4323 #endif // CONFIG_VP9_HIGHBITDEPTH
4324 if (x->source_variance < UINT_MAX) {
4325 const int var_adjust = (x->source_variance < 16);
4326 scale -= var_adjust;
4328 if (ref_frame > INTRA_FRAME && distortion2 * scale < qstep * qstep) {
4335 /* keep record of best compound/single-only prediction */
4336 if (!disable_skip && ref_frame != INTRA_FRAME) {
4337 int64_t single_rd, hybrid_rd, single_rate, hybrid_rate;
4339 if (cm->reference_mode == REFERENCE_MODE_SELECT) {
4340 single_rate = rate2 - compmode_cost;
4341 hybrid_rate = rate2;
4343 single_rate = rate2;
4344 hybrid_rate = rate2 + compmode_cost;
4347 single_rd = RDCOST(x->rdmult, x->rddiv, single_rate, distortion2);
4348 hybrid_rd = RDCOST(x->rdmult, x->rddiv, hybrid_rate, distortion2);
4350 if (!comp_pred && single_rd < best_pred_rd[SINGLE_REFERENCE])
4351 best_pred_rd[SINGLE_REFERENCE] = single_rd;
4352 else if (comp_pred && single_rd < best_pred_rd[COMPOUND_REFERENCE])
4353 best_pred_rd[COMPOUND_REFERENCE] = single_rd;
4355 if (hybrid_rd < best_pred_rd[REFERENCE_MODE_SELECT])
4356 best_pred_rd[REFERENCE_MODE_SELECT] = hybrid_rd;
4359 /* keep record of best filter type */
4360 if (!mode_excluded && !disable_skip && ref_frame != INTRA_FRAME &&
4361 cm->interp_filter != BILINEAR) {
4363 filter_cache[cm->interp_filter == SWITCHABLE ? SWITCHABLE_FILTERS
4364 : cm->interp_filter];
4366 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) {
4367 if (ref == INT64_MAX)
4369 else if (filter_cache[i] == INT64_MAX)
4370 // when early termination is triggered, the encoder does not have
4371 // access to the rate-distortion cost. it only knows that the cost
4372 // should be above the maximum valid value. hence it takes the known
4373 // maximum plus an arbitrary constant as the rate-distortion cost.
4374 adj_rd = mask_filter - ref + 10;
4376 adj_rd = filter_cache[i] - ref;
4379 best_filter_rd[i] = VPXMIN(best_filter_rd[i], adj_rd);
4383 if (early_term) break;
4385 if (x->skip && !comp_pred) break;
4388 if (best_rd >= best_rd_so_far) {
4389 rd_cost->rate = INT_MAX;
4390 rd_cost->rdcost = INT64_MAX;
4394 // If we used an estimate for the uv intra rd in the loop above...
4395 if (sf->use_uv_intra_rd_estimate) {
4396 // Do Intra UV best rd mode selection if best mode choice above was intra.
4397 if (best_mbmode.ref_frame[0] == INTRA_FRAME) {
4399 rd_pick_intra_sbuv_mode(cpi, x, ctx, &rate_uv_intra, &rate_uv_tokenonly,
4400 &dist_uv, &skip_uv, BLOCK_8X8, TX_4X4);
4404 if (best_rd == INT64_MAX) {
4405 rd_cost->rate = INT_MAX;
4406 rd_cost->dist = INT64_MAX;
4407 rd_cost->rdcost = INT64_MAX;
4411 assert((cm->interp_filter == SWITCHABLE) ||
4412 (cm->interp_filter == best_mbmode.interp_filter) ||
4413 !is_inter_block(&best_mbmode));
4415 vp9_update_rd_thresh_fact(tile_data->thresh_freq_fact, sf->adaptive_rd_thresh,
4416 bsize, best_ref_index);
4420 x->skip |= best_skip2;
4421 if (!is_inter_block(&best_mbmode)) {
4422 for (i = 0; i < 4; i++) xd->mi[0]->bmi[i].as_mode = best_bmodes[i].as_mode;
4424 for (i = 0; i < 4; ++i)
4425 memcpy(&xd->mi[0]->bmi[i], &best_bmodes[i], sizeof(b_mode_info));
4427 mi->mv[0].as_int = xd->mi[0]->bmi[3].as_mv[0].as_int;
4428 mi->mv[1].as_int = xd->mi[0]->bmi[3].as_mv[1].as_int;
4431 for (i = 0; i < REFERENCE_MODES; ++i) {
4432 if (best_pred_rd[i] == INT64_MAX)
4433 best_pred_diff[i] = INT_MIN;
4435 best_pred_diff[i] = best_rd - best_pred_rd[i];
4439 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) {
4440 if (best_filter_rd[i] == INT64_MAX)
4441 best_filter_diff[i] = 0;
4443 best_filter_diff[i] = best_rd - best_filter_rd[i];
4445 if (cm->interp_filter == SWITCHABLE)
4446 assert(best_filter_diff[SWITCHABLE_FILTERS] == 0);
4448 vp9_zero(best_filter_diff);
4451 store_coding_context(x, ctx, best_ref_index, best_pred_diff, best_filter_diff,