2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
14 #include "./vp10_rtcd.h"
15 #include "./vpx_dsp_rtcd.h"
17 #include "vpx_mem/vpx_mem.h"
18 #include "vpx_ports/mem.h"
19 #include "vpx_ports/system_state.h"
21 #include "vp10/common/common.h"
22 #include "vp10/common/entropy.h"
23 #include "vp10/common/entropymode.h"
24 #include "vp10/common/idct.h"
25 #include "vp10/common/mvref_common.h"
26 #include "vp10/common/pred_common.h"
27 #include "vp10/common/quant_common.h"
28 #include "vp10/common/reconinter.h"
29 #include "vp10/common/reconintra.h"
30 #include "vp10/common/scan.h"
31 #include "vp10/common/seg_common.h"
33 #include "vp10/encoder/cost.h"
34 #include "vp10/encoder/encodemb.h"
35 #include "vp10/encoder/encodemv.h"
36 #include "vp10/encoder/encoder.h"
37 #include "vp10/encoder/mcomp.h"
38 #include "vp10/encoder/quantize.h"
39 #include "vp10/encoder/ratectrl.h"
40 #include "vp10/encoder/rd.h"
41 #include "vp10/encoder/rdopt.h"
42 #include "vp10/encoder/aq_variance.h"
44 #define LAST_FRAME_MODE_MASK ((1 << GOLDEN_FRAME) | (1 << ALTREF_FRAME) | \
46 #define GOLDEN_FRAME_MODE_MASK ((1 << LAST_FRAME) | (1 << ALTREF_FRAME) | \
48 #define ALT_REF_MODE_MASK ((1 << LAST_FRAME) | (1 << GOLDEN_FRAME) | \
51 #define SECOND_REF_FRAME_MASK ((1 << ALTREF_FRAME) | 0x01)
53 #define MIN_EARLY_TERM_INDEX 3
54 #define NEW_MV_DISCOUNT_FACTOR 8
58 MV_REFERENCE_FRAME ref_frame[2];
62 MV_REFERENCE_FRAME ref_frame[2];
65 struct rdcost_block_args {
67 ENTROPY_CONTEXT t_above[16];
68 ENTROPY_CONTEXT t_left[16];
75 int use_fast_coef_costing;
80 #define LAST_NEW_MV_INDEX 6
81 static const MODE_DEFINITION vp10_mode_order[MAX_MODES] = {
82 {NEARESTMV, {LAST_FRAME, NONE}},
83 {NEARESTMV, {ALTREF_FRAME, NONE}},
84 {NEARESTMV, {GOLDEN_FRAME, NONE}},
86 {DC_PRED, {INTRA_FRAME, NONE}},
88 {NEWMV, {LAST_FRAME, NONE}},
89 {NEWMV, {ALTREF_FRAME, NONE}},
90 {NEWMV, {GOLDEN_FRAME, NONE}},
92 {NEARMV, {LAST_FRAME, NONE}},
93 {NEARMV, {ALTREF_FRAME, NONE}},
94 {NEARMV, {GOLDEN_FRAME, NONE}},
96 {ZEROMV, {LAST_FRAME, NONE}},
97 {ZEROMV, {GOLDEN_FRAME, NONE}},
98 {ZEROMV, {ALTREF_FRAME, NONE}},
100 {NEARESTMV, {LAST_FRAME, ALTREF_FRAME}},
101 {NEARESTMV, {GOLDEN_FRAME, ALTREF_FRAME}},
103 {TM_PRED, {INTRA_FRAME, NONE}},
105 {NEARMV, {LAST_FRAME, ALTREF_FRAME}},
106 {NEWMV, {LAST_FRAME, ALTREF_FRAME}},
107 {NEARMV, {GOLDEN_FRAME, ALTREF_FRAME}},
108 {NEWMV, {GOLDEN_FRAME, ALTREF_FRAME}},
110 {ZEROMV, {LAST_FRAME, ALTREF_FRAME}},
111 {ZEROMV, {GOLDEN_FRAME, ALTREF_FRAME}},
113 {H_PRED, {INTRA_FRAME, NONE}},
114 {V_PRED, {INTRA_FRAME, NONE}},
115 {D135_PRED, {INTRA_FRAME, NONE}},
116 {D207_PRED, {INTRA_FRAME, NONE}},
117 {D153_PRED, {INTRA_FRAME, NONE}},
118 {D63_PRED, {INTRA_FRAME, NONE}},
119 {D117_PRED, {INTRA_FRAME, NONE}},
120 {D45_PRED, {INTRA_FRAME, NONE}},
123 static const REF_DEFINITION vp10_ref_order[MAX_REFS] = {
124 {{LAST_FRAME, NONE}},
125 {{GOLDEN_FRAME, NONE}},
126 {{ALTREF_FRAME, NONE}},
127 {{LAST_FRAME, ALTREF_FRAME}},
128 {{GOLDEN_FRAME, ALTREF_FRAME}},
129 {{INTRA_FRAME, NONE}},
132 static void swap_block_ptr(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx,
133 int m, int n, int min_plane, int max_plane) {
136 for (i = min_plane; i < max_plane; ++i) {
137 struct macroblock_plane *const p = &x->plane[i];
138 struct macroblockd_plane *const pd = &x->e_mbd.plane[i];
140 p->coeff = ctx->coeff_pbuf[i][m];
141 p->qcoeff = ctx->qcoeff_pbuf[i][m];
142 pd->dqcoeff = ctx->dqcoeff_pbuf[i][m];
143 p->eobs = ctx->eobs_pbuf[i][m];
145 ctx->coeff_pbuf[i][m] = ctx->coeff_pbuf[i][n];
146 ctx->qcoeff_pbuf[i][m] = ctx->qcoeff_pbuf[i][n];
147 ctx->dqcoeff_pbuf[i][m] = ctx->dqcoeff_pbuf[i][n];
148 ctx->eobs_pbuf[i][m] = ctx->eobs_pbuf[i][n];
150 ctx->coeff_pbuf[i][n] = p->coeff;
151 ctx->qcoeff_pbuf[i][n] = p->qcoeff;
152 ctx->dqcoeff_pbuf[i][n] = pd->dqcoeff;
153 ctx->eobs_pbuf[i][n] = p->eobs;
157 static void model_rd_for_sb(VP10_COMP *cpi, BLOCK_SIZE bsize,
158 MACROBLOCK *x, MACROBLOCKD *xd,
159 int *out_rate_sum, int64_t *out_dist_sum,
160 int *skip_txfm_sb, int64_t *skip_sse_sb) {
161 // Note our transform coeffs are 8 times an orthogonal transform.
162 // Hence quantizer step is also 8 times. To get effective quantizer
163 // we need to divide by 8 before sending to modeling function.
165 int64_t rate_sum = 0;
166 int64_t dist_sum = 0;
167 const int ref = xd->mi[0]->mbmi.ref_frame[0];
169 unsigned int var = 0;
170 unsigned int sum_sse = 0;
171 int64_t total_sse = 0;
176 const int dequant_shift =
177 #if CONFIG_VP9_HIGHBITDEPTH
178 (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) ?
180 #endif // CONFIG_VP9_HIGHBITDEPTH
183 x->pred_sse[ref] = 0;
185 for (i = 0; i < MAX_MB_PLANE; ++i) {
186 struct macroblock_plane *const p = &x->plane[i];
187 struct macroblockd_plane *const pd = &xd->plane[i];
188 const BLOCK_SIZE bs = get_plane_block_size(bsize, pd);
189 const TX_SIZE max_tx_size = max_txsize_lookup[bs];
190 const BLOCK_SIZE unit_size = txsize_to_bsize[max_tx_size];
191 const int64_t dc_thr = p->quant_thred[0] >> shift;
192 const int64_t ac_thr = p->quant_thred[1] >> shift;
193 // The low thresholds are used to measure if the prediction errors are
194 // low enough so that we can skip the mode search.
195 const int64_t low_dc_thr = MIN(50, dc_thr >> 2);
196 const int64_t low_ac_thr = MIN(80, ac_thr >> 2);
197 int bw = 1 << (b_width_log2_lookup[bs] - b_width_log2_lookup[unit_size]);
198 int bh = 1 << (b_height_log2_lookup[bs] - b_width_log2_lookup[unit_size]);
200 int lw = b_width_log2_lookup[unit_size] + 2;
201 int lh = b_height_log2_lookup[unit_size] + 2;
205 for (idy = 0; idy < bh; ++idy) {
206 for (idx = 0; idx < bw; ++idx) {
207 uint8_t *src = p->src.buf + (idy * p->src.stride << lh) + (idx << lw);
208 uint8_t *dst = pd->dst.buf + (idy * pd->dst.stride << lh) + (idx << lh);
209 int block_idx = (idy << 1) + idx;
210 int low_err_skip = 0;
212 var = cpi->fn_ptr[unit_size].vf(src, p->src.stride,
213 dst, pd->dst.stride, &sse);
214 x->bsse[(i << 2) + block_idx] = sse;
217 x->skip_txfm[(i << 2) + block_idx] = SKIP_TXFM_NONE;
218 if (!x->select_tx_size) {
219 // Check if all ac coefficients can be quantized to zero.
220 if (var < ac_thr || var == 0) {
221 x->skip_txfm[(i << 2) + block_idx] = SKIP_TXFM_AC_ONLY;
223 // Check if dc coefficient can be quantized to zero.
224 if (sse - var < dc_thr || sse == var) {
225 x->skip_txfm[(i << 2) + block_idx] = SKIP_TXFM_AC_DC;
227 if (!sse || (var < low_ac_thr && sse - var < low_dc_thr))
233 if (skip_flag && !low_err_skip)
237 x->pred_sse[ref] += sse;
241 total_sse += sum_sse;
243 // Fast approximate the modelling function.
244 if (cpi->sf.simple_model_rd_from_var) {
246 const int64_t square_error = sum_sse;
247 int quantizer = (pd->dequant[1] >> dequant_shift);
250 rate = (square_error * (280 - quantizer)) >> 8;
253 dist = (square_error * quantizer) >> 8;
257 vp10_model_rd_from_var_lapndz(sum_sse, num_pels_log2_lookup[bs],
258 pd->dequant[1] >> dequant_shift,
265 *skip_txfm_sb = skip_flag;
266 *skip_sse_sb = total_sse << 4;
267 *out_rate_sum = (int)rate_sum;
268 *out_dist_sum = dist_sum << 4;
271 int64_t vp10_block_error_c(const tran_low_t *coeff, const tran_low_t *dqcoeff,
272 intptr_t block_size, int64_t *ssz) {
274 int64_t error = 0, sqcoeff = 0;
276 for (i = 0; i < block_size; i++) {
277 const int diff = coeff[i] - dqcoeff[i];
278 error += diff * diff;
279 sqcoeff += coeff[i] * coeff[i];
286 int64_t vp10_block_error_fp_c(const int16_t *coeff, const int16_t *dqcoeff,
291 for (i = 0; i < block_size; i++) {
292 const int diff = coeff[i] - dqcoeff[i];
293 error += diff * diff;
299 #if CONFIG_VP9_HIGHBITDEPTH
300 int64_t vp10_highbd_block_error_c(const tran_low_t *coeff,
301 const tran_low_t *dqcoeff,
303 int64_t *ssz, int bd) {
305 int64_t error = 0, sqcoeff = 0;
306 int shift = 2 * (bd - 8);
307 int rounding = shift > 0 ? 1 << (shift - 1) : 0;
309 for (i = 0; i < block_size; i++) {
310 const int64_t diff = coeff[i] - dqcoeff[i];
311 error += diff * diff;
312 sqcoeff += (int64_t)coeff[i] * (int64_t)coeff[i];
314 assert(error >= 0 && sqcoeff >= 0);
315 error = (error + rounding) >> shift;
316 sqcoeff = (sqcoeff + rounding) >> shift;
321 #endif // CONFIG_VP9_HIGHBITDEPTH
323 /* The trailing '0' is a terminator which is used inside cost_coeffs() to
324 * decide whether to include cost of a trailing EOB node or not (i.e. we
325 * can skip this if the last coefficient in this transform block, e.g. the
326 * 16th coefficient in a 4x4 block or the 64th coefficient in a 8x8 block,
328 static const int16_t band_counts[TX_SIZES][8] = {
329 { 1, 2, 3, 4, 3, 16 - 13, 0 },
330 { 1, 2, 3, 4, 11, 64 - 21, 0 },
331 { 1, 2, 3, 4, 11, 256 - 21, 0 },
332 { 1, 2, 3, 4, 11, 1024 - 21, 0 },
334 static int cost_coeffs(MACROBLOCK *x,
335 int plane, int block,
336 ENTROPY_CONTEXT *A, ENTROPY_CONTEXT *L,
338 const int16_t *scan, const int16_t *nb,
339 int use_fast_coef_costing) {
340 MACROBLOCKD *const xd = &x->e_mbd;
341 MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
342 const struct macroblock_plane *p = &x->plane[plane];
343 const struct macroblockd_plane *pd = &xd->plane[plane];
344 const PLANE_TYPE type = pd->plane_type;
345 const int16_t *band_count = &band_counts[tx_size][1];
346 const int eob = p->eobs[block];
347 const tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
348 unsigned int (*token_costs)[2][COEFF_CONTEXTS][ENTROPY_TOKENS] =
349 x->token_costs[tx_size][type][is_inter_block(mbmi)];
350 uint8_t token_cache[32 * 32];
351 int pt = combine_entropy_contexts(*A, *L);
353 #if CONFIG_VP9_HIGHBITDEPTH
354 const int16_t *cat6_high_cost = vp10_get_high_cost_table(xd->bd);
356 const int16_t *cat6_high_cost = vp10_get_high_cost_table(8);
359 // Check for consistency of tx_size with mode info
360 assert(type == PLANE_TYPE_Y ? mbmi->tx_size == tx_size
361 : get_uv_tx_size(mbmi, pd) == tx_size);
365 cost = token_costs[0][0][pt][EOB_TOKEN];
368 int band_left = *band_count++;
374 vp10_get_token_extra(v, &prev_t, &e);
375 cost = (*token_costs)[0][pt][prev_t] +
376 vp10_get_cost(prev_t, e, cat6_high_cost);
378 token_cache[0] = vp10_pt_energy_class[prev_t];
382 for (c = 1; c < eob; c++) {
383 const int rc = scan[c];
387 vp10_get_token_extra(v, &t, &e);
388 if (use_fast_coef_costing) {
389 cost += (*token_costs)[!prev_t][!prev_t][t] +
390 vp10_get_cost(t, e, cat6_high_cost);
392 pt = get_coef_context(nb, token_cache, c);
393 cost += (*token_costs)[!prev_t][pt][t] +
394 vp10_get_cost(t, e, cat6_high_cost);
395 token_cache[rc] = vp10_pt_energy_class[t];
399 band_left = *band_count++;
406 if (use_fast_coef_costing) {
407 cost += (*token_costs)[0][!prev_t][EOB_TOKEN];
409 pt = get_coef_context(nb, token_cache, c);
410 cost += (*token_costs)[0][pt][EOB_TOKEN];
415 // is eob first coefficient;
421 static void dist_block(MACROBLOCK *x, int plane, int block, TX_SIZE tx_size,
422 int64_t *out_dist, int64_t *out_sse) {
423 const int ss_txfrm_size = tx_size << 1;
424 MACROBLOCKD* const xd = &x->e_mbd;
425 const struct macroblock_plane *const p = &x->plane[plane];
426 const struct macroblockd_plane *const pd = &xd->plane[plane];
428 int shift = tx_size == TX_32X32 ? 0 : 2;
429 tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block);
430 tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
431 #if CONFIG_VP9_HIGHBITDEPTH
432 const int bd = (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) ? xd->bd : 8;
433 *out_dist = vp10_highbd_block_error(coeff, dqcoeff, 16 << ss_txfrm_size,
434 &this_sse, bd) >> shift;
436 *out_dist = vp10_block_error(coeff, dqcoeff, 16 << ss_txfrm_size,
438 #endif // CONFIG_VP9_HIGHBITDEPTH
439 *out_sse = this_sse >> shift;
441 if (x->skip_encode && !is_inter_block(&xd->mi[0]->mbmi)) {
442 // TODO(jingning): tune the model to better capture the distortion.
443 int64_t p = (pd->dequant[1] * pd->dequant[1] *
444 (1 << ss_txfrm_size)) >>
445 #if CONFIG_VP9_HIGHBITDEPTH
446 (shift + 2 + (bd - 8) * 2);
449 #endif // CONFIG_VP9_HIGHBITDEPTH
450 *out_dist += (p >> 4);
455 static int rate_block(int plane, int block, BLOCK_SIZE plane_bsize,
456 TX_SIZE tx_size, struct rdcost_block_args* args) {
458 txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &x_idx, &y_idx);
460 return cost_coeffs(args->x, plane, block, args->t_above + x_idx,
461 args->t_left + y_idx, tx_size,
462 args->so->scan, args->so->neighbors,
463 args->use_fast_coef_costing);
466 static void block_rd_txfm(int plane, int block, BLOCK_SIZE plane_bsize,
467 TX_SIZE tx_size, void *arg) {
468 struct rdcost_block_args *args = arg;
469 MACROBLOCK *const x = args->x;
470 MACROBLOCKD *const xd = &x->e_mbd;
471 MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
472 int64_t rd1, rd2, rd;
477 if (args->exit_early)
480 if (!is_inter_block(mbmi)) {
481 struct encode_b_args arg = {x, NULL, &mbmi->skip};
482 vp10_encode_block_intra(plane, block, plane_bsize, tx_size, &arg);
483 dist_block(x, plane, block, tx_size, &dist, &sse);
484 } else if (max_txsize_lookup[plane_bsize] == tx_size) {
485 if (x->skip_txfm[(plane << 2) + (block >> (tx_size << 1))] ==
487 // full forward transform and quantization
488 vp10_xform_quant(x, plane, block, plane_bsize, tx_size);
489 dist_block(x, plane, block, tx_size, &dist, &sse);
490 } else if (x->skip_txfm[(plane << 2) + (block >> (tx_size << 1))] ==
492 // compute DC coefficient
493 tran_low_t *const coeff = BLOCK_OFFSET(x->plane[plane].coeff, block);
494 tran_low_t *const dqcoeff = BLOCK_OFFSET(xd->plane[plane].dqcoeff, block);
495 vp10_xform_quant_dc(x, plane, block, plane_bsize, tx_size);
496 sse = x->bsse[(plane << 2) + (block >> (tx_size << 1))] << 4;
498 if (x->plane[plane].eobs[block]) {
499 const int64_t orig_sse = (int64_t)coeff[0] * coeff[0];
500 const int64_t resd_sse = coeff[0] - dqcoeff[0];
501 int64_t dc_correct = orig_sse - resd_sse * resd_sse;
502 #if CONFIG_VP9_HIGHBITDEPTH
503 dc_correct >>= ((xd->bd - 8) * 2);
505 if (tx_size != TX_32X32)
508 dist = MAX(0, sse - dc_correct);
512 // skip forward transform
513 x->plane[plane].eobs[block] = 0;
514 sse = x->bsse[(plane << 2) + (block >> (tx_size << 1))] << 4;
518 // full forward transform and quantization
519 vp10_xform_quant(x, plane, block, plane_bsize, tx_size);
520 dist_block(x, plane, block, tx_size, &dist, &sse);
523 rd = RDCOST(x->rdmult, x->rddiv, 0, dist);
524 if (args->this_rd + rd > args->best_rd) {
525 args->exit_early = 1;
529 rate = rate_block(plane, block, plane_bsize, tx_size, args);
530 rd1 = RDCOST(x->rdmult, x->rddiv, rate, dist);
531 rd2 = RDCOST(x->rdmult, x->rddiv, 0, sse);
533 // TODO(jingning): temporarily enabled only for luma component
536 x->zcoeff_blk[tx_size][block] = !x->plane[plane].eobs[block] ||
537 (rd1 > rd2 && !xd->lossless);
539 args->this_rate += rate;
540 args->this_dist += dist;
541 args->this_sse += sse;
544 if (args->this_rd > args->best_rd) {
545 args->exit_early = 1;
549 args->skippable &= !x->plane[plane].eobs[block];
552 static void txfm_rd_in_plane(MACROBLOCK *x,
553 int *rate, int64_t *distortion,
554 int *skippable, int64_t *sse,
555 int64_t ref_best_rd, int plane,
556 BLOCK_SIZE bsize, TX_SIZE tx_size,
557 int use_fast_coef_casting) {
558 MACROBLOCKD *const xd = &x->e_mbd;
559 const struct macroblockd_plane *const pd = &xd->plane[plane];
561 struct rdcost_block_args args;
564 args.best_rd = ref_best_rd;
565 args.use_fast_coef_costing = use_fast_coef_casting;
569 xd->mi[0]->mbmi.tx_size = tx_size;
571 vp10_get_entropy_contexts(bsize, tx_size, pd, args.t_above, args.t_left);
573 tx_type = get_tx_type(pd->plane_type, xd, 0);
574 args.so = get_scan(tx_size, tx_type);
576 vp10_foreach_transformed_block_in_plane(xd, bsize, plane,
577 block_rd_txfm, &args);
578 if (args.exit_early) {
580 *distortion = INT64_MAX;
584 *distortion = args.this_dist;
585 *rate = args.this_rate;
586 *sse = args.this_sse;
587 *skippable = args.skippable;
591 static void choose_largest_tx_size(VP10_COMP *cpi, MACROBLOCK *x,
592 int *rate, int64_t *distortion,
593 int *skip, int64_t *sse,
596 const TX_SIZE max_tx_size = max_txsize_lookup[bs];
597 VP10_COMMON *const cm = &cpi->common;
598 const TX_SIZE largest_tx_size = tx_mode_to_biggest_tx_size[cm->tx_mode];
599 MACROBLOCKD *const xd = &x->e_mbd;
600 MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
602 mbmi->tx_size = MIN(max_tx_size, largest_tx_size);
604 txfm_rd_in_plane(x, rate, distortion, skip,
605 sse, ref_best_rd, 0, bs,
606 mbmi->tx_size, cpi->sf.use_fast_coef_costing);
609 static void choose_tx_size_from_rd(VP10_COMP *cpi, MACROBLOCK *x,
616 const TX_SIZE max_tx_size = max_txsize_lookup[bs];
617 VP10_COMMON *const cm = &cpi->common;
618 MACROBLOCKD *const xd = &x->e_mbd;
619 MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
620 vpx_prob skip_prob = vp10_get_skip_prob(cm, xd);
621 int r[TX_SIZES][2], s[TX_SIZES];
622 int64_t d[TX_SIZES], sse[TX_SIZES];
623 int64_t rd[TX_SIZES][2] = {{INT64_MAX, INT64_MAX},
624 {INT64_MAX, INT64_MAX},
625 {INT64_MAX, INT64_MAX},
626 {INT64_MAX, INT64_MAX}};
629 int64_t best_rd = INT64_MAX;
630 TX_SIZE best_tx = max_tx_size;
631 int start_tx, end_tx;
633 const vpx_prob *tx_probs = get_tx_probs2(max_tx_size, xd, &cm->fc->tx_probs);
634 assert(skip_prob > 0);
635 s0 = vp10_cost_bit(skip_prob, 0);
636 s1 = vp10_cost_bit(skip_prob, 1);
638 if (cm->tx_mode == TX_MODE_SELECT) {
639 start_tx = max_tx_size;
642 TX_SIZE chosen_tx_size = MIN(max_tx_size,
643 tx_mode_to_biggest_tx_size[cm->tx_mode]);
644 start_tx = chosen_tx_size;
645 end_tx = chosen_tx_size;
648 for (n = start_tx; n >= end_tx; n--) {
650 for (m = 0; m <= n - (n == (int) max_tx_size); m++) {
652 r_tx_size += vp10_cost_zero(tx_probs[m]);
654 r_tx_size += vp10_cost_one(tx_probs[m]);
656 txfm_rd_in_plane(x, &r[n][0], &d[n], &s[n],
657 &sse[n], ref_best_rd, 0, bs, n,
658 cpi->sf.use_fast_coef_costing);
660 if (r[n][0] < INT_MAX) {
661 r[n][1] += r_tx_size;
663 if (d[n] == INT64_MAX || r[n][0] == INT_MAX) {
664 rd[n][0] = rd[n][1] = INT64_MAX;
666 if (is_inter_block(mbmi)) {
667 rd[n][0] = rd[n][1] = RDCOST(x->rdmult, x->rddiv, s1, sse[n]);
669 rd[n][0] = RDCOST(x->rdmult, x->rddiv, s1, sse[n]);
670 rd[n][1] = RDCOST(x->rdmult, x->rddiv, s1 + r_tx_size, sse[n]);
673 rd[n][0] = RDCOST(x->rdmult, x->rddiv, r[n][0] + s0, d[n]);
674 rd[n][1] = RDCOST(x->rdmult, x->rddiv, r[n][1] + s0, d[n]);
677 // Early termination in transform size search.
678 if (cpi->sf.tx_size_search_breakout &&
679 (rd[n][1] == INT64_MAX ||
680 (n < (int) max_tx_size && rd[n][1] > rd[n + 1][1]) ||
684 if (rd[n][1] < best_rd) {
689 mbmi->tx_size = best_tx;
691 *distortion = d[mbmi->tx_size];
692 *rate = r[mbmi->tx_size][cm->tx_mode == TX_MODE_SELECT];
693 *skip = s[mbmi->tx_size];
694 *psse = sse[mbmi->tx_size];
697 static void super_block_yrd(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
698 int64_t *distortion, int *skip,
699 int64_t *psse, BLOCK_SIZE bs,
700 int64_t ref_best_rd) {
701 MACROBLOCKD *xd = &x->e_mbd;
703 int64_t *ret_sse = psse ? psse : &sse;
705 assert(bs == xd->mi[0]->mbmi.sb_type);
707 if (cpi->sf.tx_size_search_method == USE_LARGESTALL || xd->lossless) {
708 choose_largest_tx_size(cpi, x, rate, distortion, skip, ret_sse, ref_best_rd,
711 choose_tx_size_from_rd(cpi, x, rate, distortion, skip, ret_sse,
716 static int conditional_skipintra(PREDICTION_MODE mode,
717 PREDICTION_MODE best_intra_mode) {
718 if (mode == D117_PRED &&
719 best_intra_mode != V_PRED &&
720 best_intra_mode != D135_PRED)
722 if (mode == D63_PRED &&
723 best_intra_mode != V_PRED &&
724 best_intra_mode != D45_PRED)
726 if (mode == D207_PRED &&
727 best_intra_mode != H_PRED &&
728 best_intra_mode != D45_PRED)
730 if (mode == D153_PRED &&
731 best_intra_mode != H_PRED &&
732 best_intra_mode != D135_PRED)
737 static int64_t rd_pick_intra4x4block(VP10_COMP *cpi, MACROBLOCK *x,
739 PREDICTION_MODE *best_mode,
740 const int *bmode_costs,
741 ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l,
742 int *bestrate, int *bestratey,
743 int64_t *bestdistortion,
744 BLOCK_SIZE bsize, int64_t rd_thresh) {
745 PREDICTION_MODE mode;
746 MACROBLOCKD *const xd = &x->e_mbd;
747 int64_t best_rd = rd_thresh;
748 struct macroblock_plane *p = &x->plane[0];
749 struct macroblockd_plane *pd = &xd->plane[0];
750 const int src_stride = p->src.stride;
751 const int dst_stride = pd->dst.stride;
752 const uint8_t *src_init = &p->src.buf[row * 4 * src_stride + col * 4];
753 uint8_t *dst_init = &pd->dst.buf[row * 4 * src_stride + col * 4];
754 ENTROPY_CONTEXT ta[2], tempa[2];
755 ENTROPY_CONTEXT tl[2], templ[2];
756 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
757 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
759 uint8_t best_dst[8 * 8];
760 #if CONFIG_VP9_HIGHBITDEPTH
761 uint16_t best_dst16[8 * 8];
764 memcpy(ta, a, sizeof(ta));
765 memcpy(tl, l, sizeof(tl));
766 xd->mi[0]->mbmi.tx_size = TX_4X4;
768 #if CONFIG_VP9_HIGHBITDEPTH
769 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
770 for (mode = DC_PRED; mode <= TM_PRED; ++mode) {
773 int64_t distortion = 0;
774 int rate = bmode_costs[mode];
776 if (!(cpi->sf.intra_y_mode_mask[TX_4X4] & (1 << mode)))
779 // Only do the oblique modes if the best so far is
780 // one of the neighboring directional modes
781 if (cpi->sf.mode_search_skip_flags & FLAG_SKIP_INTRA_DIRMISMATCH) {
782 if (conditional_skipintra(mode, *best_mode))
786 memcpy(tempa, ta, sizeof(ta));
787 memcpy(templ, tl, sizeof(tl));
789 for (idy = 0; idy < num_4x4_blocks_high; ++idy) {
790 for (idx = 0; idx < num_4x4_blocks_wide; ++idx) {
791 const int block = (row + idy) * 2 + (col + idx);
792 const uint8_t *const src = &src_init[idx * 4 + idy * 4 * src_stride];
793 uint8_t *const dst = &dst_init[idx * 4 + idy * 4 * dst_stride];
794 int16_t *const src_diff = vp10_raster_block_offset_int16(BLOCK_8X8,
797 tran_low_t *const coeff = BLOCK_OFFSET(x->plane[0].coeff, block);
798 xd->mi[0]->bmi[block].as_mode = mode;
799 vp10_predict_intra_block(xd, 1, TX_4X4, mode,
800 x->skip_encode ? src : dst,
801 x->skip_encode ? src_stride : dst_stride,
803 col + idx, row + idy, 0);
804 vpx_highbd_subtract_block(4, 4, src_diff, 8, src, src_stride,
805 dst, dst_stride, xd->bd);
807 TX_TYPE tx_type = get_tx_type(PLANE_TYPE_Y, xd, block);
808 const scan_order *so = get_scan(TX_4X4, tx_type);
809 vp10_highbd_fwd_txfm_4x4(src_diff, coeff, 8, DCT_DCT,
810 vp10_highbd_fwht4x4);
811 vp10_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan);
812 ratey += cost_coeffs(x, 0, block, tempa + idx, templ + idy, TX_4X4,
813 so->scan, so->neighbors,
814 cpi->sf.use_fast_coef_costing);
815 if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd)
817 vp10_highbd_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block),
818 dst, dst_stride, p->eobs[block],
820 vp10_highbd_iwht4x4_add);
823 TX_TYPE tx_type = get_tx_type(PLANE_TYPE_Y, xd, block);
824 const scan_order *so = get_scan(TX_4X4, tx_type);
825 vp10_highbd_fwd_txfm_4x4(src_diff, coeff, 8, tx_type,
827 vp10_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan);
828 ratey += cost_coeffs(x, 0, block, tempa + idx, templ + idy, TX_4X4,
829 so->scan, so->neighbors,
830 cpi->sf.use_fast_coef_costing);
831 distortion += vp10_highbd_block_error(
832 coeff, BLOCK_OFFSET(pd->dqcoeff, block),
833 16, &unused, xd->bd) >> 2;
834 if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd)
836 vp10_highbd_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block),
837 dst, dst_stride, p->eobs[block],
839 vp10_highbd_idct4x4_add);
845 this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
847 if (this_rd < best_rd) {
850 *bestdistortion = distortion;
853 memcpy(a, tempa, sizeof(tempa));
854 memcpy(l, templ, sizeof(templ));
855 for (idy = 0; idy < num_4x4_blocks_high * 4; ++idy) {
856 memcpy(best_dst16 + idy * 8,
857 CONVERT_TO_SHORTPTR(dst_init + idy * dst_stride),
858 num_4x4_blocks_wide * 4 * sizeof(uint16_t));
864 if (best_rd >= rd_thresh || x->skip_encode)
867 for (idy = 0; idy < num_4x4_blocks_high * 4; ++idy) {
868 memcpy(CONVERT_TO_SHORTPTR(dst_init + idy * dst_stride),
869 best_dst16 + idy * 8,
870 num_4x4_blocks_wide * 4 * sizeof(uint16_t));
875 #endif // CONFIG_VP9_HIGHBITDEPTH
877 for (mode = DC_PRED; mode <= TM_PRED; ++mode) {
880 int64_t distortion = 0;
881 int rate = bmode_costs[mode];
883 if (!(cpi->sf.intra_y_mode_mask[TX_4X4] & (1 << mode)))
886 // Only do the oblique modes if the best so far is
887 // one of the neighboring directional modes
888 if (cpi->sf.mode_search_skip_flags & FLAG_SKIP_INTRA_DIRMISMATCH) {
889 if (conditional_skipintra(mode, *best_mode))
893 memcpy(tempa, ta, sizeof(ta));
894 memcpy(templ, tl, sizeof(tl));
896 for (idy = 0; idy < num_4x4_blocks_high; ++idy) {
897 for (idx = 0; idx < num_4x4_blocks_wide; ++idx) {
898 const int block = (row + idy) * 2 + (col + idx);
899 const uint8_t *const src = &src_init[idx * 4 + idy * 4 * src_stride];
900 uint8_t *const dst = &dst_init[idx * 4 + idy * 4 * dst_stride];
901 int16_t *const src_diff =
902 vp10_raster_block_offset_int16(BLOCK_8X8, block, p->src_diff);
903 tran_low_t *const coeff = BLOCK_OFFSET(x->plane[0].coeff, block);
904 xd->mi[0]->bmi[block].as_mode = mode;
905 vp10_predict_intra_block(xd, 1, TX_4X4, mode,
906 x->skip_encode ? src : dst,
907 x->skip_encode ? src_stride : dst_stride,
908 dst, dst_stride, col + idx, row + idy, 0);
909 vpx_subtract_block(4, 4, src_diff, 8, src, src_stride, dst, dst_stride);
912 TX_TYPE tx_type = get_tx_type(PLANE_TYPE_Y, xd, block);
913 const scan_order *so = get_scan(TX_4X4, tx_type);
914 vp10_fwd_txfm_4x4(src_diff, coeff, 8, DCT_DCT, vp10_fwht4x4);
915 vp10_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan);
916 ratey += cost_coeffs(x, 0, block, tempa + idx, templ + idy, TX_4X4,
917 so->scan, so->neighbors,
918 cpi->sf.use_fast_coef_costing);
919 if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd)
921 vp10_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block),
922 dst, dst_stride, p->eobs[block], DCT_DCT,
926 TX_TYPE tx_type = get_tx_type(PLANE_TYPE_Y, xd, block);
927 const scan_order *so = get_scan(TX_4X4, tx_type);
928 vp10_fwd_txfm_4x4(src_diff, coeff, 8, tx_type, vpx_fdct4x4);
929 vp10_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan);
930 ratey += cost_coeffs(x, 0, block, tempa + idx, templ + idy, TX_4X4,
931 so->scan, so->neighbors,
932 cpi->sf.use_fast_coef_costing);
933 distortion += vp10_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, block),
935 if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd)
937 vp10_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block),
938 dst, dst_stride, p->eobs[block], tx_type,
945 this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
947 if (this_rd < best_rd) {
950 *bestdistortion = distortion;
953 memcpy(a, tempa, sizeof(tempa));
954 memcpy(l, templ, sizeof(templ));
955 for (idy = 0; idy < num_4x4_blocks_high * 4; ++idy)
956 memcpy(best_dst + idy * 8, dst_init + idy * dst_stride,
957 num_4x4_blocks_wide * 4);
963 if (best_rd >= rd_thresh || x->skip_encode)
966 for (idy = 0; idy < num_4x4_blocks_high * 4; ++idy)
967 memcpy(dst_init + idy * dst_stride, best_dst + idy * 8,
968 num_4x4_blocks_wide * 4);
973 static int64_t rd_pick_intra_sub_8x8_y_mode(VP10_COMP *cpi, MACROBLOCK *mb,
974 int *rate, int *rate_y,
978 const MACROBLOCKD *const xd = &mb->e_mbd;
979 MODE_INFO *const mic = xd->mi[0];
980 const MODE_INFO *above_mi = xd->above_mi;
981 const MODE_INFO *left_mi = xd->left_mi;
982 const BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
983 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
984 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
987 int64_t total_distortion = 0;
989 int64_t total_rd = 0;
990 ENTROPY_CONTEXT t_above[4], t_left[4];
991 const int *bmode_costs = cpi->mbmode_cost;
993 memcpy(t_above, xd->plane[0].above_context, sizeof(t_above));
994 memcpy(t_left, xd->plane[0].left_context, sizeof(t_left));
996 // Pick modes for each sub-block (of size 4x4, 4x8, or 8x4) in an 8x8 block.
997 for (idy = 0; idy < 2; idy += num_4x4_blocks_high) {
998 for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) {
999 PREDICTION_MODE best_mode = DC_PRED;
1000 int r = INT_MAX, ry = INT_MAX;
1001 int64_t d = INT64_MAX, this_rd = INT64_MAX;
1003 if (cpi->common.frame_type == KEY_FRAME) {
1004 const PREDICTION_MODE A = vp10_above_block_mode(mic, above_mi, i);
1005 const PREDICTION_MODE L = vp10_left_block_mode(mic, left_mi, i);
1007 bmode_costs = cpi->y_mode_costs[A][L];
1010 this_rd = rd_pick_intra4x4block(cpi, mb, idy, idx, &best_mode,
1011 bmode_costs, t_above + idx, t_left + idy,
1012 &r, &ry, &d, bsize, best_rd - total_rd);
1013 if (this_rd >= best_rd - total_rd)
1016 total_rd += this_rd;
1018 total_distortion += d;
1021 mic->bmi[i].as_mode = best_mode;
1022 for (j = 1; j < num_4x4_blocks_high; ++j)
1023 mic->bmi[i + j * 2].as_mode = best_mode;
1024 for (j = 1; j < num_4x4_blocks_wide; ++j)
1025 mic->bmi[i + j].as_mode = best_mode;
1027 if (total_rd >= best_rd)
1033 *rate_y = tot_rate_y;
1034 *distortion = total_distortion;
1035 mic->mbmi.mode = mic->bmi[3].as_mode;
1037 return RDCOST(mb->rdmult, mb->rddiv, cost, total_distortion);
1040 // This function is used only for intra_only frames
1041 static int64_t rd_pick_intra_sby_mode(VP10_COMP *cpi, MACROBLOCK *x,
1042 int *rate, int *rate_tokenonly,
1043 int64_t *distortion, int *skippable,
1046 PREDICTION_MODE mode;
1047 PREDICTION_MODE mode_selected = DC_PRED;
1048 MACROBLOCKD *const xd = &x->e_mbd;
1049 MODE_INFO *const mic = xd->mi[0];
1050 int this_rate, this_rate_tokenonly, s;
1051 int64_t this_distortion, this_rd;
1052 TX_SIZE best_tx = TX_4X4;
1054 const MODE_INFO *above_mi = xd->above_mi;
1055 const MODE_INFO *left_mi = xd->left_mi;
1056 const PREDICTION_MODE A = vp10_above_block_mode(mic, above_mi, 0);
1057 const PREDICTION_MODE L = vp10_left_block_mode(mic, left_mi, 0);
1058 bmode_costs = cpi->y_mode_costs[A][L];
1060 memset(x->skip_txfm, SKIP_TXFM_NONE, sizeof(x->skip_txfm));
1061 /* Y Search for intra prediction mode */
1062 for (mode = DC_PRED; mode <= TM_PRED; mode++) {
1063 mic->mbmi.mode = mode;
1065 super_block_yrd(cpi, x, &this_rate_tokenonly, &this_distortion,
1066 &s, NULL, bsize, best_rd);
1068 if (this_rate_tokenonly == INT_MAX)
1071 this_rate = this_rate_tokenonly + bmode_costs[mode];
1072 this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion);
1074 if (this_rd < best_rd) {
1075 mode_selected = mode;
1077 best_tx = mic->mbmi.tx_size;
1079 *rate_tokenonly = this_rate_tokenonly;
1080 *distortion = this_distortion;
1085 mic->mbmi.mode = mode_selected;
1086 mic->mbmi.tx_size = best_tx;
1091 // Return value 0: early termination triggered, no valid rd cost available;
1092 // 1: rd cost values are valid.
1093 static int super_block_uvrd(const VP10_COMP *cpi, MACROBLOCK *x,
1094 int *rate, int64_t *distortion, int *skippable,
1095 int64_t *sse, BLOCK_SIZE bsize,
1096 int64_t ref_best_rd) {
1097 MACROBLOCKD *const xd = &x->e_mbd;
1098 MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
1099 const TX_SIZE uv_tx_size = get_uv_tx_size(mbmi, &xd->plane[1]);
1101 int pnrate = 0, pnskip = 1;
1102 int64_t pndist = 0, pnsse = 0;
1103 int is_cost_valid = 1;
1105 if (ref_best_rd < 0)
1108 if (is_inter_block(mbmi) && is_cost_valid) {
1110 for (plane = 1; plane < MAX_MB_PLANE; ++plane)
1111 vp10_subtract_plane(x, bsize, plane);
1119 for (plane = 1; plane < MAX_MB_PLANE; ++plane) {
1120 txfm_rd_in_plane(x, &pnrate, &pndist, &pnskip, &pnsse,
1121 ref_best_rd, plane, bsize, uv_tx_size,
1122 cpi->sf.use_fast_coef_costing);
1123 if (pnrate == INT_MAX) {
1128 *distortion += pndist;
1130 *skippable &= pnskip;
1133 if (!is_cost_valid) {
1136 *distortion = INT64_MAX;
1141 return is_cost_valid;
1144 static int64_t rd_pick_intra_sbuv_mode(VP10_COMP *cpi, MACROBLOCK *x,
1145 PICK_MODE_CONTEXT *ctx,
1146 int *rate, int *rate_tokenonly,
1147 int64_t *distortion, int *skippable,
1148 BLOCK_SIZE bsize, TX_SIZE max_tx_size) {
1149 MACROBLOCKD *xd = &x->e_mbd;
1150 PREDICTION_MODE mode;
1151 PREDICTION_MODE mode_selected = DC_PRED;
1152 int64_t best_rd = INT64_MAX, this_rd;
1153 int this_rate_tokenonly, this_rate, s;
1154 int64_t this_distortion, this_sse;
1156 memset(x->skip_txfm, SKIP_TXFM_NONE, sizeof(x->skip_txfm));
1157 for (mode = DC_PRED; mode <= TM_PRED; ++mode) {
1158 if (!(cpi->sf.intra_uv_mode_mask[max_tx_size] & (1 << mode)))
1161 xd->mi[0]->mbmi.uv_mode = mode;
1163 if (!super_block_uvrd(cpi, x, &this_rate_tokenonly,
1164 &this_distortion, &s, &this_sse, bsize, best_rd))
1166 this_rate = this_rate_tokenonly +
1167 cpi->intra_uv_mode_cost[cpi->common.frame_type][mode];
1168 this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion);
1170 if (this_rd < best_rd) {
1171 mode_selected = mode;
1174 *rate_tokenonly = this_rate_tokenonly;
1175 *distortion = this_distortion;
1177 if (!x->select_tx_size)
1178 swap_block_ptr(x, ctx, 2, 0, 1, MAX_MB_PLANE);
1182 xd->mi[0]->mbmi.uv_mode = mode_selected;
1186 static int64_t rd_sbuv_dcpred(const VP10_COMP *cpi, MACROBLOCK *x,
1187 int *rate, int *rate_tokenonly,
1188 int64_t *distortion, int *skippable,
1190 const VP10_COMMON *cm = &cpi->common;
1193 x->e_mbd.mi[0]->mbmi.uv_mode = DC_PRED;
1194 memset(x->skip_txfm, SKIP_TXFM_NONE, sizeof(x->skip_txfm));
1195 super_block_uvrd(cpi, x, rate_tokenonly, distortion,
1196 skippable, &unused, bsize, INT64_MAX);
1197 *rate = *rate_tokenonly + cpi->intra_uv_mode_cost[cm->frame_type][DC_PRED];
1198 return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
1201 static void choose_intra_uv_mode(VP10_COMP *cpi, MACROBLOCK *const x,
1202 PICK_MODE_CONTEXT *ctx,
1203 BLOCK_SIZE bsize, TX_SIZE max_tx_size,
1204 int *rate_uv, int *rate_uv_tokenonly,
1205 int64_t *dist_uv, int *skip_uv,
1206 PREDICTION_MODE *mode_uv) {
1207 // Use an estimated rd for uv_intra based on DC_PRED if the
1208 // appropriate speed flag is set.
1209 if (cpi->sf.use_uv_intra_rd_estimate) {
1210 rd_sbuv_dcpred(cpi, x, rate_uv, rate_uv_tokenonly, dist_uv,
1211 skip_uv, bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize);
1212 // Else do a proper rd search for each possible transform size that may
1213 // be considered in the main rd loop.
1215 rd_pick_intra_sbuv_mode(cpi, x, ctx,
1216 rate_uv, rate_uv_tokenonly, dist_uv, skip_uv,
1217 bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize, max_tx_size);
1219 *mode_uv = x->e_mbd.mi[0]->mbmi.uv_mode;
1222 static int cost_mv_ref(const VP10_COMP *cpi, PREDICTION_MODE mode,
1224 assert(is_inter_mode(mode));
1225 return cpi->inter_mode_cost[mode_context][INTER_OFFSET(mode)];
1228 static int set_and_cost_bmi_mvs(VP10_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *xd,
1230 PREDICTION_MODE mode, int_mv this_mv[2],
1231 int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES],
1232 int_mv seg_mvs[MAX_REF_FRAMES],
1233 int_mv *best_ref_mv[2], const int *mvjcost,
1235 MODE_INFO *const mic = xd->mi[0];
1236 const MB_MODE_INFO *const mbmi = &mic->mbmi;
1237 const MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
1240 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[mbmi->sb_type];
1241 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[mbmi->sb_type];
1242 const int is_compound = has_second_ref(mbmi);
1246 this_mv[0].as_int = seg_mvs[mbmi->ref_frame[0]].as_int;
1247 thismvcost += vp10_mv_bit_cost(&this_mv[0].as_mv, &best_ref_mv[0]->as_mv,
1248 mvjcost, mvcost, MV_COST_WEIGHT_SUB);
1250 this_mv[1].as_int = seg_mvs[mbmi->ref_frame[1]].as_int;
1251 thismvcost += vp10_mv_bit_cost(&this_mv[1].as_mv, &best_ref_mv[1]->as_mv,
1252 mvjcost, mvcost, MV_COST_WEIGHT_SUB);
1257 this_mv[0].as_int = frame_mv[mode][mbmi->ref_frame[0]].as_int;
1259 this_mv[1].as_int = frame_mv[mode][mbmi->ref_frame[1]].as_int;
1262 this_mv[0].as_int = 0;
1264 this_mv[1].as_int = 0;
1270 mic->bmi[i].as_mv[0].as_int = this_mv[0].as_int;
1272 mic->bmi[i].as_mv[1].as_int = this_mv[1].as_int;
1274 mic->bmi[i].as_mode = mode;
1276 for (idy = 0; idy < num_4x4_blocks_high; ++idy)
1277 for (idx = 0; idx < num_4x4_blocks_wide; ++idx)
1278 memmove(&mic->bmi[i + idy * 2 + idx], &mic->bmi[i], sizeof(mic->bmi[i]));
1280 return cost_mv_ref(cpi, mode, mbmi_ext->mode_context[mbmi->ref_frame[0]]) +
1284 static int64_t encode_inter_mb_segment(VP10_COMP *cpi,
1289 int64_t *distortion, int64_t *sse,
1290 ENTROPY_CONTEXT *ta,
1291 ENTROPY_CONTEXT *tl,
1292 int mi_row, int mi_col) {
1294 MACROBLOCKD *xd = &x->e_mbd;
1295 struct macroblockd_plane *const pd = &xd->plane[0];
1296 struct macroblock_plane *const p = &x->plane[0];
1297 MODE_INFO *const mi = xd->mi[0];
1298 const BLOCK_SIZE plane_bsize = get_plane_block_size(mi->mbmi.sb_type, pd);
1299 const int width = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
1300 const int height = 4 * num_4x4_blocks_high_lookup[plane_bsize];
1303 const uint8_t *const src =
1304 &p->src.buf[vp10_raster_block_offset(BLOCK_8X8, i, p->src.stride)];
1305 uint8_t *const dst = &pd->dst.buf[vp10_raster_block_offset(BLOCK_8X8, i,
1307 int64_t thisdistortion = 0, thissse = 0;
1308 int thisrate = 0, ref;
1309 TX_TYPE tx_type = get_tx_type(PLANE_TYPE_Y, xd, i);
1310 const scan_order *so = get_scan(TX_4X4, tx_type);
1311 const int is_compound = has_second_ref(&mi->mbmi);
1312 const InterpKernel *kernel = vp10_filter_kernels[mi->mbmi.interp_filter];
1314 for (ref = 0; ref < 1 + is_compound; ++ref) {
1315 const uint8_t *pre = &pd->pre[ref].buf[vp10_raster_block_offset(BLOCK_8X8, i,
1316 pd->pre[ref].stride)];
1317 #if CONFIG_VP9_HIGHBITDEPTH
1318 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
1319 vp10_highbd_build_inter_predictor(pre, pd->pre[ref].stride,
1320 dst, pd->dst.stride,
1321 &mi->bmi[i].as_mv[ref].as_mv,
1322 &xd->block_refs[ref]->sf, width, height,
1323 ref, kernel, MV_PRECISION_Q3,
1324 mi_col * MI_SIZE + 4 * (i % 2),
1325 mi_row * MI_SIZE + 4 * (i / 2), xd->bd);
1327 vp10_build_inter_predictor(pre, pd->pre[ref].stride,
1328 dst, pd->dst.stride,
1329 &mi->bmi[i].as_mv[ref].as_mv,
1330 &xd->block_refs[ref]->sf, width, height, ref,
1331 kernel, MV_PRECISION_Q3,
1332 mi_col * MI_SIZE + 4 * (i % 2),
1333 mi_row * MI_SIZE + 4 * (i / 2));
1336 vp10_build_inter_predictor(pre, pd->pre[ref].stride,
1337 dst, pd->dst.stride,
1338 &mi->bmi[i].as_mv[ref].as_mv,
1339 &xd->block_refs[ref]->sf, width, height, ref,
1340 kernel, MV_PRECISION_Q3,
1341 mi_col * MI_SIZE + 4 * (i % 2),
1342 mi_row * MI_SIZE + 4 * (i / 2));
1343 #endif // CONFIG_VP9_HIGHBITDEPTH
1346 #if CONFIG_VP9_HIGHBITDEPTH
1347 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
1348 vpx_highbd_subtract_block(
1349 height, width, vp10_raster_block_offset_int16(BLOCK_8X8, i, p->src_diff),
1350 8, src, p->src.stride, dst, pd->dst.stride, xd->bd);
1353 height, width, vp10_raster_block_offset_int16(BLOCK_8X8, i, p->src_diff),
1354 8, src, p->src.stride, dst, pd->dst.stride);
1357 vpx_subtract_block(height, width,
1358 vp10_raster_block_offset_int16(BLOCK_8X8, i, p->src_diff),
1359 8, src, p->src.stride, dst, pd->dst.stride);
1360 #endif // CONFIG_VP9_HIGHBITDEPTH
1363 for (idy = 0; idy < height / 4; ++idy) {
1364 for (idx = 0; idx < width / 4; ++idx) {
1365 int64_t ssz, rd, rd1, rd2;
1368 k += (idy * 2 + idx);
1369 coeff = BLOCK_OFFSET(p->coeff, k);
1370 x->fwd_txm4x4(vp10_raster_block_offset_int16(BLOCK_8X8, k, p->src_diff),
1372 vp10_regular_quantize_b_4x4(x, 0, k, so->scan, so->iscan);
1373 #if CONFIG_VP9_HIGHBITDEPTH
1374 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
1375 thisdistortion += vp10_highbd_block_error(coeff,
1376 BLOCK_OFFSET(pd->dqcoeff, k),
1379 thisdistortion += vp10_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, k),
1383 thisdistortion += vp10_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, k),
1385 #endif // CONFIG_VP9_HIGHBITDEPTH
1387 thisrate += cost_coeffs(x, 0, k, ta + (k & 1), tl + (k >> 1), TX_4X4,
1388 so->scan, so->neighbors,
1389 cpi->sf.use_fast_coef_costing);
1390 rd1 = RDCOST(x->rdmult, x->rddiv, thisrate, thisdistortion >> 2);
1391 rd2 = RDCOST(x->rdmult, x->rddiv, 0, thissse >> 2);
1398 *distortion = thisdistortion >> 2;
1399 *labelyrate = thisrate;
1400 *sse = thissse >> 2;
1402 return RDCOST(x->rdmult, x->rddiv, *labelyrate, *distortion);
1413 ENTROPY_CONTEXT ta[2];
1414 ENTROPY_CONTEXT tl[2];
1426 PREDICTION_MODE modes[4];
1427 SEG_RDSTAT rdstat[4][INTER_MODES];
1431 static INLINE int mv_check_bounds(const MACROBLOCK *x, const MV *mv) {
1432 return (mv->row >> 3) < x->mv_row_min ||
1433 (mv->row >> 3) > x->mv_row_max ||
1434 (mv->col >> 3) < x->mv_col_min ||
1435 (mv->col >> 3) > x->mv_col_max;
1438 static INLINE void mi_buf_shift(MACROBLOCK *x, int i) {
1439 MB_MODE_INFO *const mbmi = &x->e_mbd.mi[0]->mbmi;
1440 struct macroblock_plane *const p = &x->plane[0];
1441 struct macroblockd_plane *const pd = &x->e_mbd.plane[0];
1443 p->src.buf = &p->src.buf[vp10_raster_block_offset(BLOCK_8X8, i,
1445 assert(((intptr_t)pd->pre[0].buf & 0x7) == 0);
1446 pd->pre[0].buf = &pd->pre[0].buf[vp10_raster_block_offset(BLOCK_8X8, i,
1447 pd->pre[0].stride)];
1448 if (has_second_ref(mbmi))
1449 pd->pre[1].buf = &pd->pre[1].buf[vp10_raster_block_offset(BLOCK_8X8, i,
1450 pd->pre[1].stride)];
1453 static INLINE void mi_buf_restore(MACROBLOCK *x, struct buf_2d orig_src,
1454 struct buf_2d orig_pre[2]) {
1455 MB_MODE_INFO *mbmi = &x->e_mbd.mi[0]->mbmi;
1456 x->plane[0].src = orig_src;
1457 x->e_mbd.plane[0].pre[0] = orig_pre[0];
1458 if (has_second_ref(mbmi))
1459 x->e_mbd.plane[0].pre[1] = orig_pre[1];
1462 static INLINE int mv_has_subpel(const MV *mv) {
1463 return (mv->row & 0x0F) || (mv->col & 0x0F);
1466 // Check if NEARESTMV/NEARMV/ZEROMV is the cheapest way encode zero motion.
1467 // TODO(aconverse): Find out if this is still productive then clean up or remove
1468 static int check_best_zero_mv(
1469 const VP10_COMP *cpi, const uint8_t mode_context[MAX_REF_FRAMES],
1470 int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES], int this_mode,
1471 const MV_REFERENCE_FRAME ref_frames[2]) {
1472 if ((this_mode == NEARMV || this_mode == NEARESTMV || this_mode == ZEROMV) &&
1473 frame_mv[this_mode][ref_frames[0]].as_int == 0 &&
1474 (ref_frames[1] == NONE ||
1475 frame_mv[this_mode][ref_frames[1]].as_int == 0)) {
1476 int rfc = mode_context[ref_frames[0]];
1477 int c1 = cost_mv_ref(cpi, NEARMV, rfc);
1478 int c2 = cost_mv_ref(cpi, NEARESTMV, rfc);
1479 int c3 = cost_mv_ref(cpi, ZEROMV, rfc);
1481 if (this_mode == NEARMV) {
1482 if (c1 > c3) return 0;
1483 } else if (this_mode == NEARESTMV) {
1484 if (c2 > c3) return 0;
1486 assert(this_mode == ZEROMV);
1487 if (ref_frames[1] == NONE) {
1488 if ((c3 >= c2 && frame_mv[NEARESTMV][ref_frames[0]].as_int == 0) ||
1489 (c3 >= c1 && frame_mv[NEARMV][ref_frames[0]].as_int == 0))
1492 if ((c3 >= c2 && frame_mv[NEARESTMV][ref_frames[0]].as_int == 0 &&
1493 frame_mv[NEARESTMV][ref_frames[1]].as_int == 0) ||
1494 (c3 >= c1 && frame_mv[NEARMV][ref_frames[0]].as_int == 0 &&
1495 frame_mv[NEARMV][ref_frames[1]].as_int == 0))
1503 static void joint_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
1506 int mi_row, int mi_col,
1507 int_mv single_newmv[MAX_REF_FRAMES],
1509 const VP10_COMMON *const cm = &cpi->common;
1510 const int pw = 4 * num_4x4_blocks_wide_lookup[bsize];
1511 const int ph = 4 * num_4x4_blocks_high_lookup[bsize];
1512 MACROBLOCKD *xd = &x->e_mbd;
1513 MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
1514 const int refs[2] = {mbmi->ref_frame[0],
1515 mbmi->ref_frame[1] < 0 ? 0 : mbmi->ref_frame[1]};
1518 const InterpKernel *kernel = vp10_filter_kernels[mbmi->interp_filter];
1519 struct scale_factors sf;
1521 // Do joint motion search in compound mode to get more accurate mv.
1522 struct buf_2d backup_yv12[2][MAX_MB_PLANE];
1523 int last_besterr[2] = {INT_MAX, INT_MAX};
1524 const YV12_BUFFER_CONFIG *const scaled_ref_frame[2] = {
1525 vp10_get_scaled_ref_frame(cpi, mbmi->ref_frame[0]),
1526 vp10_get_scaled_ref_frame(cpi, mbmi->ref_frame[1])
1529 // Prediction buffer from second frame.
1530 #if CONFIG_VP9_HIGHBITDEPTH
1531 DECLARE_ALIGNED(16, uint16_t, second_pred_alloc_16[64 * 64]);
1532 uint8_t *second_pred;
1534 DECLARE_ALIGNED(16, uint8_t, second_pred[64 * 64]);
1535 #endif // CONFIG_VP9_HIGHBITDEPTH
1537 for (ref = 0; ref < 2; ++ref) {
1538 ref_mv[ref] = x->mbmi_ext->ref_mvs[refs[ref]][0];
1540 if (scaled_ref_frame[ref]) {
1542 // Swap out the reference frame for a version that's been scaled to
1543 // match the resolution of the current frame, allowing the existing
1544 // motion search code to be used without additional modifications.
1545 for (i = 0; i < MAX_MB_PLANE; i++)
1546 backup_yv12[ref][i] = xd->plane[i].pre[ref];
1547 vp10_setup_pre_planes(xd, ref, scaled_ref_frame[ref], mi_row, mi_col,
1551 frame_mv[refs[ref]].as_int = single_newmv[refs[ref]].as_int;
1554 // Since we have scaled the reference frames to match the size of the current
1555 // frame we must use a unit scaling factor during mode selection.
1556 #if CONFIG_VP9_HIGHBITDEPTH
1557 vp10_setup_scale_factors_for_frame(&sf, cm->width, cm->height,
1558 cm->width, cm->height,
1559 cm->use_highbitdepth);
1561 vp10_setup_scale_factors_for_frame(&sf, cm->width, cm->height,
1562 cm->width, cm->height);
1563 #endif // CONFIG_VP9_HIGHBITDEPTH
1565 // Allow joint search multiple times iteratively for each reference frame
1566 // and break out of the search loop if it couldn't find a better mv.
1567 for (ite = 0; ite < 4; ite++) {
1568 struct buf_2d ref_yv12[2];
1569 int bestsme = INT_MAX;
1570 int sadpb = x->sadperbit16;
1572 int search_range = 3;
1574 int tmp_col_min = x->mv_col_min;
1575 int tmp_col_max = x->mv_col_max;
1576 int tmp_row_min = x->mv_row_min;
1577 int tmp_row_max = x->mv_row_max;
1578 int id = ite % 2; // Even iterations search in the first reference frame,
1579 // odd iterations search in the second. The predictor
1580 // found for the 'other' reference frame is factored in.
1582 // Initialized here because of compiler problem in Visual Studio.
1583 ref_yv12[0] = xd->plane[0].pre[0];
1584 ref_yv12[1] = xd->plane[0].pre[1];
1586 // Get the prediction block from the 'other' reference frame.
1587 #if CONFIG_VP9_HIGHBITDEPTH
1588 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
1589 second_pred = CONVERT_TO_BYTEPTR(second_pred_alloc_16);
1590 vp10_highbd_build_inter_predictor(ref_yv12[!id].buf,
1591 ref_yv12[!id].stride,
1593 &frame_mv[refs[!id]].as_mv,
1595 kernel, MV_PRECISION_Q3,
1596 mi_col * MI_SIZE, mi_row * MI_SIZE,
1599 second_pred = (uint8_t *)second_pred_alloc_16;
1600 vp10_build_inter_predictor(ref_yv12[!id].buf,
1601 ref_yv12[!id].stride,
1603 &frame_mv[refs[!id]].as_mv,
1605 kernel, MV_PRECISION_Q3,
1606 mi_col * MI_SIZE, mi_row * MI_SIZE);
1609 vp10_build_inter_predictor(ref_yv12[!id].buf,
1610 ref_yv12[!id].stride,
1612 &frame_mv[refs[!id]].as_mv,
1614 kernel, MV_PRECISION_Q3,
1615 mi_col * MI_SIZE, mi_row * MI_SIZE);
1616 #endif // CONFIG_VP9_HIGHBITDEPTH
1618 // Do compound motion search on the current reference frame.
1620 xd->plane[0].pre[0] = ref_yv12[id];
1621 vp10_set_mv_search_range(x, &ref_mv[id].as_mv);
1623 // Use the mv result from the single mode as mv predictor.
1624 tmp_mv = frame_mv[refs[id]].as_mv;
1629 // Small-range full-pixel motion search.
1630 bestsme = vp10_refining_search_8p_c(x, &tmp_mv, sadpb,
1632 &cpi->fn_ptr[bsize],
1633 &ref_mv[id].as_mv, second_pred);
1634 if (bestsme < INT_MAX)
1635 bestsme = vp10_get_mvpred_av_var(x, &tmp_mv, &ref_mv[id].as_mv,
1636 second_pred, &cpi->fn_ptr[bsize], 1);
1638 x->mv_col_min = tmp_col_min;
1639 x->mv_col_max = tmp_col_max;
1640 x->mv_row_min = tmp_row_min;
1641 x->mv_row_max = tmp_row_max;
1643 if (bestsme < INT_MAX) {
1644 int dis; /* TODO: use dis in distortion calculation later. */
1646 bestsme = cpi->find_fractional_mv_step(
1649 cpi->common.allow_high_precision_mv,
1651 &cpi->fn_ptr[bsize],
1652 0, cpi->sf.mv.subpel_iters_per_step,
1654 x->nmvjointcost, x->mvcost,
1655 &dis, &sse, second_pred,
1659 // Restore the pointer to the first (possibly scaled) prediction buffer.
1661 xd->plane[0].pre[0] = ref_yv12[0];
1663 if (bestsme < last_besterr[id]) {
1664 frame_mv[refs[id]].as_mv = tmp_mv;
1665 last_besterr[id] = bestsme;
1673 for (ref = 0; ref < 2; ++ref) {
1674 if (scaled_ref_frame[ref]) {
1675 // Restore the prediction frame pointers to their unscaled versions.
1677 for (i = 0; i < MAX_MB_PLANE; i++)
1678 xd->plane[i].pre[ref] = backup_yv12[ref][i];
1681 *rate_mv += vp10_mv_bit_cost(&frame_mv[refs[ref]].as_mv,
1682 &x->mbmi_ext->ref_mvs[refs[ref]][0].as_mv,
1683 x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
1687 static int64_t rd_pick_best_sub8x8_mode(VP10_COMP *cpi, MACROBLOCK *x,
1688 int_mv *best_ref_mv,
1689 int_mv *second_best_ref_mv,
1690 int64_t best_rd, int *returntotrate,
1692 int64_t *returndistortion,
1693 int *skippable, int64_t *psse,
1695 int_mv seg_mvs[4][MAX_REF_FRAMES],
1696 BEST_SEG_INFO *bsi_buf, int filter_idx,
1697 int mi_row, int mi_col) {
1699 BEST_SEG_INFO *bsi = bsi_buf + filter_idx;
1700 MACROBLOCKD *xd = &x->e_mbd;
1701 MODE_INFO *mi = xd->mi[0];
1702 MB_MODE_INFO *mbmi = &mi->mbmi;
1704 int k, br = 0, idx, idy;
1705 int64_t bd = 0, block_sse = 0;
1706 PREDICTION_MODE this_mode;
1707 VP10_COMMON *cm = &cpi->common;
1708 struct macroblock_plane *const p = &x->plane[0];
1709 struct macroblockd_plane *const pd = &xd->plane[0];
1710 const int label_count = 4;
1711 int64_t this_segment_rd = 0;
1712 int label_mv_thresh;
1713 int segmentyrate = 0;
1714 const BLOCK_SIZE bsize = mbmi->sb_type;
1715 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
1716 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
1717 ENTROPY_CONTEXT t_above[2], t_left[2];
1718 int subpelmv = 1, have_ref = 0;
1719 const int has_second_rf = has_second_ref(mbmi);
1720 const int inter_mode_mask = cpi->sf.inter_mode_mask[bsize];
1721 MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
1725 bsi->segment_rd = best_rd;
1726 bsi->ref_mv[0] = best_ref_mv;
1727 bsi->ref_mv[1] = second_best_ref_mv;
1728 bsi->mvp.as_int = best_ref_mv->as_int;
1729 bsi->mvthresh = mvthresh;
1731 for (i = 0; i < 4; i++)
1732 bsi->modes[i] = ZEROMV;
1734 memcpy(t_above, pd->above_context, sizeof(t_above));
1735 memcpy(t_left, pd->left_context, sizeof(t_left));
1737 // 64 makes this threshold really big effectively
1738 // making it so that we very rarely check mvs on
1739 // segments. setting this to 1 would make mv thresh
1740 // roughly equal to what it is for macroblocks
1741 label_mv_thresh = 1 * bsi->mvthresh / label_count;
1743 // Segmentation method overheads
1744 for (idy = 0; idy < 2; idy += num_4x4_blocks_high) {
1745 for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) {
1746 // TODO(jingning,rbultje): rewrite the rate-distortion optimization
1747 // loop for 4x4/4x8/8x4 block coding. to be replaced with new rd loop
1748 int_mv mode_mv[MB_MODE_COUNT][2];
1749 int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES];
1750 PREDICTION_MODE mode_selected = ZEROMV;
1751 int64_t best_rd = INT64_MAX;
1752 const int i = idy * 2 + idx;
1755 for (ref = 0; ref < 1 + has_second_rf; ++ref) {
1756 const MV_REFERENCE_FRAME frame = mbmi->ref_frame[ref];
1757 frame_mv[ZEROMV][frame].as_int = 0;
1758 vp10_append_sub8x8_mvs_for_idx(cm, xd, i, ref, mi_row, mi_col,
1759 &frame_mv[NEARESTMV][frame],
1760 &frame_mv[NEARMV][frame],
1761 mbmi_ext->mode_context);
1764 // search for the best motion vector on this segment
1765 for (this_mode = NEARESTMV; this_mode <= NEWMV; ++this_mode) {
1766 const struct buf_2d orig_src = x->plane[0].src;
1767 struct buf_2d orig_pre[2];
1769 mode_idx = INTER_OFFSET(this_mode);
1770 bsi->rdstat[i][mode_idx].brdcost = INT64_MAX;
1771 if (!(inter_mode_mask & (1 << this_mode)))
1774 if (!check_best_zero_mv(cpi, mbmi_ext->mode_context, frame_mv,
1775 this_mode, mbmi->ref_frame))
1778 memcpy(orig_pre, pd->pre, sizeof(orig_pre));
1779 memcpy(bsi->rdstat[i][mode_idx].ta, t_above,
1780 sizeof(bsi->rdstat[i][mode_idx].ta));
1781 memcpy(bsi->rdstat[i][mode_idx].tl, t_left,
1782 sizeof(bsi->rdstat[i][mode_idx].tl));
1784 // motion search for newmv (single predictor case only)
1785 if (!has_second_rf && this_mode == NEWMV &&
1786 seg_mvs[i][mbmi->ref_frame[0]].as_int == INVALID_MV) {
1787 MV *const new_mv = &mode_mv[NEWMV][0].as_mv;
1789 int thissme, bestsme = INT_MAX;
1790 int sadpb = x->sadperbit4;
1795 /* Is the best so far sufficiently good that we cant justify doing
1796 * and new motion search. */
1797 if (best_rd < label_mv_thresh)
1800 if (cpi->oxcf.mode != BEST) {
1801 // use previous block's result as next block's MV predictor.
1803 bsi->mvp.as_int = mi->bmi[i - 1].as_mv[0].as_int;
1805 bsi->mvp.as_int = mi->bmi[i - 2].as_mv[0].as_int;
1809 max_mv = x->max_mv_context[mbmi->ref_frame[0]];
1811 max_mv = MAX(abs(bsi->mvp.as_mv.row), abs(bsi->mvp.as_mv.col)) >> 3;
1813 if (cpi->sf.mv.auto_mv_step_size && cm->show_frame) {
1814 // Take wtd average of the step_params based on the last frame's
1815 // max mv magnitude and the best ref mvs of the current block for
1816 // the given reference.
1817 step_param = (vp10_init_search_range(max_mv) +
1818 cpi->mv_step_param) / 2;
1820 step_param = cpi->mv_step_param;
1823 mvp_full.row = bsi->mvp.as_mv.row >> 3;
1824 mvp_full.col = bsi->mvp.as_mv.col >> 3;
1826 if (cpi->sf.adaptive_motion_search) {
1827 mvp_full.row = x->pred_mv[mbmi->ref_frame[0]].row >> 3;
1828 mvp_full.col = x->pred_mv[mbmi->ref_frame[0]].col >> 3;
1829 step_param = MAX(step_param, 8);
1832 // adjust src pointer for this block
1835 vp10_set_mv_search_range(x, &bsi->ref_mv[0]->as_mv);
1837 bestsme = vp10_full_pixel_search(
1838 cpi, x, bsize, &mvp_full, step_param, sadpb,
1839 cpi->sf.mv.subpel_search_method != SUBPEL_TREE ? cost_list : NULL,
1840 &bsi->ref_mv[0]->as_mv, new_mv,
1843 // Should we do a full search (best quality only)
1844 if (cpi->oxcf.mode == BEST) {
1845 int_mv *const best_mv = &mi->bmi[i].as_mv[0];
1846 /* Check if mvp_full is within the range. */
1847 clamp_mv(&mvp_full, x->mv_col_min, x->mv_col_max,
1848 x->mv_row_min, x->mv_row_max);
1849 thissme = cpi->full_search_sad(x, &mvp_full,
1850 sadpb, 16, &cpi->fn_ptr[bsize],
1851 &bsi->ref_mv[0]->as_mv,
1853 cost_list[1] = cost_list[2] = cost_list[3] = cost_list[4] = INT_MAX;
1854 if (thissme < bestsme) {
1856 *new_mv = best_mv->as_mv;
1858 // The full search result is actually worse so re-instate the
1859 // previous best vector
1860 best_mv->as_mv = *new_mv;
1864 if (bestsme < INT_MAX) {
1866 cpi->find_fractional_mv_step(
1869 &bsi->ref_mv[0]->as_mv,
1870 cm->allow_high_precision_mv,
1871 x->errorperbit, &cpi->fn_ptr[bsize],
1872 cpi->sf.mv.subpel_force_stop,
1873 cpi->sf.mv.subpel_iters_per_step,
1874 cond_cost_list(cpi, cost_list),
1875 x->nmvjointcost, x->mvcost,
1877 &x->pred_sse[mbmi->ref_frame[0]],
1880 // save motion search result for use in compound prediction
1881 seg_mvs[i][mbmi->ref_frame[0]].as_mv = *new_mv;
1884 if (cpi->sf.adaptive_motion_search)
1885 x->pred_mv[mbmi->ref_frame[0]] = *new_mv;
1887 // restore src pointers
1888 mi_buf_restore(x, orig_src, orig_pre);
1891 if (has_second_rf) {
1892 if (seg_mvs[i][mbmi->ref_frame[1]].as_int == INVALID_MV ||
1893 seg_mvs[i][mbmi->ref_frame[0]].as_int == INVALID_MV)
1897 if (has_second_rf && this_mode == NEWMV &&
1898 mbmi->interp_filter == EIGHTTAP) {
1899 // adjust src pointers
1901 if (cpi->sf.comp_inter_joint_search_thresh <= bsize) {
1903 joint_motion_search(cpi, x, bsize, frame_mv[this_mode],
1904 mi_row, mi_col, seg_mvs[i],
1906 seg_mvs[i][mbmi->ref_frame[0]].as_int =
1907 frame_mv[this_mode][mbmi->ref_frame[0]].as_int;
1908 seg_mvs[i][mbmi->ref_frame[1]].as_int =
1909 frame_mv[this_mode][mbmi->ref_frame[1]].as_int;
1911 // restore src pointers
1912 mi_buf_restore(x, orig_src, orig_pre);
1915 bsi->rdstat[i][mode_idx].brate =
1916 set_and_cost_bmi_mvs(cpi, x, xd, i, this_mode, mode_mv[this_mode],
1917 frame_mv, seg_mvs[i], bsi->ref_mv,
1918 x->nmvjointcost, x->mvcost);
1920 for (ref = 0; ref < 1 + has_second_rf; ++ref) {
1921 bsi->rdstat[i][mode_idx].mvs[ref].as_int =
1922 mode_mv[this_mode][ref].as_int;
1923 if (num_4x4_blocks_wide > 1)
1924 bsi->rdstat[i + 1][mode_idx].mvs[ref].as_int =
1925 mode_mv[this_mode][ref].as_int;
1926 if (num_4x4_blocks_high > 1)
1927 bsi->rdstat[i + 2][mode_idx].mvs[ref].as_int =
1928 mode_mv[this_mode][ref].as_int;
1931 // Trap vectors that reach beyond the UMV borders
1932 if (mv_check_bounds(x, &mode_mv[this_mode][0].as_mv) ||
1934 mv_check_bounds(x, &mode_mv[this_mode][1].as_mv)))
1937 if (filter_idx > 0) {
1938 BEST_SEG_INFO *ref_bsi = bsi_buf;
1942 for (ref = 0; ref < 1 + has_second_rf; ++ref) {
1943 subpelmv |= mv_has_subpel(&mode_mv[this_mode][ref].as_mv);
1944 have_ref &= mode_mv[this_mode][ref].as_int ==
1945 ref_bsi->rdstat[i][mode_idx].mvs[ref].as_int;
1948 if (filter_idx > 1 && !subpelmv && !have_ref) {
1949 ref_bsi = bsi_buf + 1;
1951 for (ref = 0; ref < 1 + has_second_rf; ++ref)
1952 have_ref &= mode_mv[this_mode][ref].as_int ==
1953 ref_bsi->rdstat[i][mode_idx].mvs[ref].as_int;
1956 if (!subpelmv && have_ref &&
1957 ref_bsi->rdstat[i][mode_idx].brdcost < INT64_MAX) {
1958 memcpy(&bsi->rdstat[i][mode_idx], &ref_bsi->rdstat[i][mode_idx],
1959 sizeof(SEG_RDSTAT));
1960 if (num_4x4_blocks_wide > 1)
1961 bsi->rdstat[i + 1][mode_idx].eobs =
1962 ref_bsi->rdstat[i + 1][mode_idx].eobs;
1963 if (num_4x4_blocks_high > 1)
1964 bsi->rdstat[i + 2][mode_idx].eobs =
1965 ref_bsi->rdstat[i + 2][mode_idx].eobs;
1967 if (bsi->rdstat[i][mode_idx].brdcost < best_rd) {
1968 mode_selected = this_mode;
1969 best_rd = bsi->rdstat[i][mode_idx].brdcost;
1975 bsi->rdstat[i][mode_idx].brdcost =
1976 encode_inter_mb_segment(cpi, x,
1977 bsi->segment_rd - this_segment_rd, i,
1978 &bsi->rdstat[i][mode_idx].byrate,
1979 &bsi->rdstat[i][mode_idx].bdist,
1980 &bsi->rdstat[i][mode_idx].bsse,
1981 bsi->rdstat[i][mode_idx].ta,
1982 bsi->rdstat[i][mode_idx].tl,
1984 if (bsi->rdstat[i][mode_idx].brdcost < INT64_MAX) {
1985 bsi->rdstat[i][mode_idx].brdcost += RDCOST(x->rdmult, x->rddiv,
1986 bsi->rdstat[i][mode_idx].brate, 0);
1987 bsi->rdstat[i][mode_idx].brate += bsi->rdstat[i][mode_idx].byrate;
1988 bsi->rdstat[i][mode_idx].eobs = p->eobs[i];
1989 if (num_4x4_blocks_wide > 1)
1990 bsi->rdstat[i + 1][mode_idx].eobs = p->eobs[i + 1];
1991 if (num_4x4_blocks_high > 1)
1992 bsi->rdstat[i + 2][mode_idx].eobs = p->eobs[i + 2];
1995 if (bsi->rdstat[i][mode_idx].brdcost < best_rd) {
1996 mode_selected = this_mode;
1997 best_rd = bsi->rdstat[i][mode_idx].brdcost;
1999 } /*for each 4x4 mode*/
2001 if (best_rd == INT64_MAX) {
2003 for (iy = i + 1; iy < 4; ++iy)
2004 for (midx = 0; midx < INTER_MODES; ++midx)
2005 bsi->rdstat[iy][midx].brdcost = INT64_MAX;
2006 bsi->segment_rd = INT64_MAX;
2010 mode_idx = INTER_OFFSET(mode_selected);
2011 memcpy(t_above, bsi->rdstat[i][mode_idx].ta, sizeof(t_above));
2012 memcpy(t_left, bsi->rdstat[i][mode_idx].tl, sizeof(t_left));
2014 set_and_cost_bmi_mvs(cpi, x, xd, i, mode_selected, mode_mv[mode_selected],
2015 frame_mv, seg_mvs[i], bsi->ref_mv, x->nmvjointcost,
2018 br += bsi->rdstat[i][mode_idx].brate;
2019 bd += bsi->rdstat[i][mode_idx].bdist;
2020 block_sse += bsi->rdstat[i][mode_idx].bsse;
2021 segmentyrate += bsi->rdstat[i][mode_idx].byrate;
2022 this_segment_rd += bsi->rdstat[i][mode_idx].brdcost;
2024 if (this_segment_rd > bsi->segment_rd) {
2026 for (iy = i + 1; iy < 4; ++iy)
2027 for (midx = 0; midx < INTER_MODES; ++midx)
2028 bsi->rdstat[iy][midx].brdcost = INT64_MAX;
2029 bsi->segment_rd = INT64_MAX;
2033 } /* for each label */
2037 bsi->segment_yrate = segmentyrate;
2038 bsi->segment_rd = this_segment_rd;
2039 bsi->sse = block_sse;
2041 // update the coding decisions
2042 for (k = 0; k < 4; ++k)
2043 bsi->modes[k] = mi->bmi[k].as_mode;
2045 if (bsi->segment_rd > best_rd)
2047 /* set it to the best */
2048 for (i = 0; i < 4; i++) {
2049 mode_idx = INTER_OFFSET(bsi->modes[i]);
2050 mi->bmi[i].as_mv[0].as_int = bsi->rdstat[i][mode_idx].mvs[0].as_int;
2051 if (has_second_ref(mbmi))
2052 mi->bmi[i].as_mv[1].as_int = bsi->rdstat[i][mode_idx].mvs[1].as_int;
2053 x->plane[0].eobs[i] = bsi->rdstat[i][mode_idx].eobs;
2054 mi->bmi[i].as_mode = bsi->modes[i];
2058 * used to set mbmi->mv.as_int
2060 *returntotrate = bsi->r;
2061 *returndistortion = bsi->d;
2062 *returnyrate = bsi->segment_yrate;
2063 *skippable = vp10_is_skippable_in_plane(x, BLOCK_8X8, 0);
2065 mbmi->mode = bsi->modes[3];
2067 return bsi->segment_rd;
2070 static void estimate_ref_frame_costs(const VP10_COMMON *cm,
2071 const MACROBLOCKD *xd,
2073 unsigned int *ref_costs_single,
2074 unsigned int *ref_costs_comp,
2075 vpx_prob *comp_mode_p) {
2076 int seg_ref_active = segfeature_active(&cm->seg, segment_id,
2078 if (seg_ref_active) {
2079 memset(ref_costs_single, 0, MAX_REF_FRAMES * sizeof(*ref_costs_single));
2080 memset(ref_costs_comp, 0, MAX_REF_FRAMES * sizeof(*ref_costs_comp));
2083 vpx_prob intra_inter_p = vp10_get_intra_inter_prob(cm, xd);
2084 vpx_prob comp_inter_p = 128;
2086 if (cm->reference_mode == REFERENCE_MODE_SELECT) {
2087 comp_inter_p = vp10_get_reference_mode_prob(cm, xd);
2088 *comp_mode_p = comp_inter_p;
2093 ref_costs_single[INTRA_FRAME] = vp10_cost_bit(intra_inter_p, 0);
2095 if (cm->reference_mode != COMPOUND_REFERENCE) {
2096 vpx_prob ref_single_p1 = vp10_get_pred_prob_single_ref_p1(cm, xd);
2097 vpx_prob ref_single_p2 = vp10_get_pred_prob_single_ref_p2(cm, xd);
2098 unsigned int base_cost = vp10_cost_bit(intra_inter_p, 1);
2100 if (cm->reference_mode == REFERENCE_MODE_SELECT)
2101 base_cost += vp10_cost_bit(comp_inter_p, 0);
2103 ref_costs_single[LAST_FRAME] = ref_costs_single[GOLDEN_FRAME] =
2104 ref_costs_single[ALTREF_FRAME] = base_cost;
2105 ref_costs_single[LAST_FRAME] += vp10_cost_bit(ref_single_p1, 0);
2106 ref_costs_single[GOLDEN_FRAME] += vp10_cost_bit(ref_single_p1, 1);
2107 ref_costs_single[ALTREF_FRAME] += vp10_cost_bit(ref_single_p1, 1);
2108 ref_costs_single[GOLDEN_FRAME] += vp10_cost_bit(ref_single_p2, 0);
2109 ref_costs_single[ALTREF_FRAME] += vp10_cost_bit(ref_single_p2, 1);
2111 ref_costs_single[LAST_FRAME] = 512;
2112 ref_costs_single[GOLDEN_FRAME] = 512;
2113 ref_costs_single[ALTREF_FRAME] = 512;
2115 if (cm->reference_mode != SINGLE_REFERENCE) {
2116 vpx_prob ref_comp_p = vp10_get_pred_prob_comp_ref_p(cm, xd);
2117 unsigned int base_cost = vp10_cost_bit(intra_inter_p, 1);
2119 if (cm->reference_mode == REFERENCE_MODE_SELECT)
2120 base_cost += vp10_cost_bit(comp_inter_p, 1);
2122 ref_costs_comp[LAST_FRAME] = base_cost + vp10_cost_bit(ref_comp_p, 0);
2123 ref_costs_comp[GOLDEN_FRAME] = base_cost + vp10_cost_bit(ref_comp_p, 1);
2125 ref_costs_comp[LAST_FRAME] = 512;
2126 ref_costs_comp[GOLDEN_FRAME] = 512;
2131 static void store_coding_context(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx,
2133 int64_t comp_pred_diff[REFERENCE_MODES],
2134 int64_t best_filter_diff[SWITCHABLE_FILTER_CONTEXTS],
2136 MACROBLOCKD *const xd = &x->e_mbd;
2138 // Take a snapshot of the coding context so it can be
2139 // restored if we decide to encode this way
2140 ctx->skip = x->skip;
2141 ctx->skippable = skippable;
2142 ctx->best_mode_index = mode_index;
2143 ctx->mic = *xd->mi[0];
2144 ctx->mbmi_ext = *x->mbmi_ext;
2145 ctx->single_pred_diff = (int)comp_pred_diff[SINGLE_REFERENCE];
2146 ctx->comp_pred_diff = (int)comp_pred_diff[COMPOUND_REFERENCE];
2147 ctx->hybrid_pred_diff = (int)comp_pred_diff[REFERENCE_MODE_SELECT];
2149 memcpy(ctx->best_filter_diff, best_filter_diff,
2150 sizeof(*best_filter_diff) * SWITCHABLE_FILTER_CONTEXTS);
2153 static void setup_buffer_inter(VP10_COMP *cpi, MACROBLOCK *x,
2154 MV_REFERENCE_FRAME ref_frame,
2155 BLOCK_SIZE block_size,
2156 int mi_row, int mi_col,
2157 int_mv frame_nearest_mv[MAX_REF_FRAMES],
2158 int_mv frame_near_mv[MAX_REF_FRAMES],
2159 struct buf_2d yv12_mb[4][MAX_MB_PLANE]) {
2160 const VP10_COMMON *cm = &cpi->common;
2161 const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, ref_frame);
2162 MACROBLOCKD *const xd = &x->e_mbd;
2163 MODE_INFO *const mi = xd->mi[0];
2164 int_mv *const candidates = x->mbmi_ext->ref_mvs[ref_frame];
2165 const struct scale_factors *const sf = &cm->frame_refs[ref_frame - 1].sf;
2166 MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
2168 assert(yv12 != NULL);
2170 // TODO(jkoleszar): Is the UV buffer ever used here? If so, need to make this
2171 // use the UV scaling factors.
2172 vp10_setup_pred_block(xd, yv12_mb[ref_frame], yv12, mi_row, mi_col, sf, sf);
2174 // Gets an initial list of candidate vectors from neighbours and orders them
2175 vp10_find_mv_refs(cm, xd, mi, ref_frame, candidates, mi_row, mi_col,
2176 NULL, NULL, mbmi_ext->mode_context);
2178 // Candidate refinement carried out at encoder and decoder
2179 vp10_find_best_ref_mvs(xd, cm->allow_high_precision_mv, candidates,
2180 &frame_nearest_mv[ref_frame],
2181 &frame_near_mv[ref_frame]);
2183 // Further refinement that is encode side only to test the top few candidates
2184 // in full and choose the best as the centre point for subsequent searches.
2185 // The current implementation doesn't support scaling.
2186 if (!vp10_is_scaled(sf) && block_size >= BLOCK_8X8)
2187 vp10_mv_pred(cpi, x, yv12_mb[ref_frame][0].buf, yv12->y_stride,
2188 ref_frame, block_size);
2191 static void single_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
2193 int mi_row, int mi_col,
2194 int_mv *tmp_mv, int *rate_mv) {
2195 MACROBLOCKD *xd = &x->e_mbd;
2196 const VP10_COMMON *cm = &cpi->common;
2197 MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
2198 struct buf_2d backup_yv12[MAX_MB_PLANE] = {{0, 0}};
2199 int bestsme = INT_MAX;
2201 int sadpb = x->sadperbit16;
2203 int ref = mbmi->ref_frame[0];
2204 MV ref_mv = x->mbmi_ext->ref_mvs[ref][0].as_mv;
2206 int tmp_col_min = x->mv_col_min;
2207 int tmp_col_max = x->mv_col_max;
2208 int tmp_row_min = x->mv_row_min;
2209 int tmp_row_max = x->mv_row_max;
2212 const YV12_BUFFER_CONFIG *scaled_ref_frame = vp10_get_scaled_ref_frame(cpi,
2216 pred_mv[0] = x->mbmi_ext->ref_mvs[ref][0].as_mv;
2217 pred_mv[1] = x->mbmi_ext->ref_mvs[ref][1].as_mv;
2218 pred_mv[2] = x->pred_mv[ref];
2220 if (scaled_ref_frame) {
2222 // Swap out the reference frame for a version that's been scaled to
2223 // match the resolution of the current frame, allowing the existing
2224 // motion search code to be used without additional modifications.
2225 for (i = 0; i < MAX_MB_PLANE; i++)
2226 backup_yv12[i] = xd->plane[i].pre[0];
2228 vp10_setup_pre_planes(xd, 0, scaled_ref_frame, mi_row, mi_col, NULL);
2231 vp10_set_mv_search_range(x, &ref_mv);
2233 // Work out the size of the first step in the mv step search.
2234 // 0 here is maximum length first step. 1 is MAX >> 1 etc.
2235 if (cpi->sf.mv.auto_mv_step_size && cm->show_frame) {
2236 // Take wtd average of the step_params based on the last frame's
2237 // max mv magnitude and that based on the best ref mvs of the current
2238 // block for the given reference.
2239 step_param = (vp10_init_search_range(x->max_mv_context[ref]) +
2240 cpi->mv_step_param) / 2;
2242 step_param = cpi->mv_step_param;
2245 if (cpi->sf.adaptive_motion_search && bsize < BLOCK_64X64) {
2246 int boffset = 2 * (b_width_log2_lookup[BLOCK_64X64] -
2247 MIN(b_height_log2_lookup[bsize], b_width_log2_lookup[bsize]));
2248 step_param = MAX(step_param, boffset);
2251 if (cpi->sf.adaptive_motion_search) {
2252 int bwl = b_width_log2_lookup[bsize];
2253 int bhl = b_height_log2_lookup[bsize];
2254 int tlevel = x->pred_mv_sad[ref] >> (bwl + bhl + 4);
2259 // prev_mv_sad is not setup for dynamically scaled frames.
2260 if (cpi->oxcf.resize_mode != RESIZE_DYNAMIC) {
2262 for (i = LAST_FRAME; i <= ALTREF_FRAME && cm->show_frame; ++i) {
2263 if ((x->pred_mv_sad[ref] >> 3) > x->pred_mv_sad[i]) {
2264 x->pred_mv[ref].row = 0;
2265 x->pred_mv[ref].col = 0;
2266 tmp_mv->as_int = INVALID_MV;
2268 if (scaled_ref_frame) {
2270 for (i = 0; i < MAX_MB_PLANE; ++i)
2271 xd->plane[i].pre[0] = backup_yv12[i];
2279 mvp_full = pred_mv[x->mv_best_ref_index[ref]];
2284 bestsme = vp10_full_pixel_search(cpi, x, bsize, &mvp_full, step_param, sadpb,
2285 cond_cost_list(cpi, cost_list),
2286 &ref_mv, &tmp_mv->as_mv, INT_MAX, 1);
2288 x->mv_col_min = tmp_col_min;
2289 x->mv_col_max = tmp_col_max;
2290 x->mv_row_min = tmp_row_min;
2291 x->mv_row_max = tmp_row_max;
2293 if (bestsme < INT_MAX) {
2294 int dis; /* TODO: use dis in distortion calculation later. */
2295 cpi->find_fractional_mv_step(x, &tmp_mv->as_mv, &ref_mv,
2296 cm->allow_high_precision_mv,
2298 &cpi->fn_ptr[bsize],
2299 cpi->sf.mv.subpel_force_stop,
2300 cpi->sf.mv.subpel_iters_per_step,
2301 cond_cost_list(cpi, cost_list),
2302 x->nmvjointcost, x->mvcost,
2303 &dis, &x->pred_sse[ref], NULL, 0, 0);
2305 *rate_mv = vp10_mv_bit_cost(&tmp_mv->as_mv, &ref_mv,
2306 x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
2308 if (cpi->sf.adaptive_motion_search)
2309 x->pred_mv[ref] = tmp_mv->as_mv;
2311 if (scaled_ref_frame) {
2313 for (i = 0; i < MAX_MB_PLANE; i++)
2314 xd->plane[i].pre[0] = backup_yv12[i];
2320 static INLINE void restore_dst_buf(MACROBLOCKD *xd,
2321 uint8_t *orig_dst[MAX_MB_PLANE],
2322 int orig_dst_stride[MAX_MB_PLANE]) {
2324 for (i = 0; i < MAX_MB_PLANE; i++) {
2325 xd->plane[i].dst.buf = orig_dst[i];
2326 xd->plane[i].dst.stride = orig_dst_stride[i];
2330 // In some situations we want to discount tha pparent cost of a new motion
2331 // vector. Where there is a subtle motion field and especially where there is
2332 // low spatial complexity then it can be hard to cover the cost of a new motion
2333 // vector in a single block, even if that motion vector reduces distortion.
2334 // However, once established that vector may be usable through the nearest and
2335 // near mv modes to reduce distortion in subsequent blocks and also improve
2337 static int discount_newmv_test(const VP10_COMP *cpi,
2340 int_mv (*mode_mv)[MAX_REF_FRAMES],
2342 return (!cpi->rc.is_src_frame_alt_ref &&
2343 (this_mode == NEWMV) &&
2344 (this_mv.as_int != 0) &&
2345 ((mode_mv[NEARESTMV][ref_frame].as_int == 0) ||
2346 (mode_mv[NEARESTMV][ref_frame].as_int == INVALID_MV)) &&
2347 ((mode_mv[NEARMV][ref_frame].as_int == 0) ||
2348 (mode_mv[NEARMV][ref_frame].as_int == INVALID_MV)));
2351 static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
2353 int *rate2, int64_t *distortion,
2355 int *rate_y, int *rate_uv,
2357 int_mv (*mode_mv)[MAX_REF_FRAMES],
2358 int mi_row, int mi_col,
2359 int_mv single_newmv[MAX_REF_FRAMES],
2360 INTERP_FILTER (*single_filter)[MAX_REF_FRAMES],
2361 int (*single_skippable)[MAX_REF_FRAMES],
2363 const int64_t ref_best_rd,
2364 int64_t *mask_filter,
2365 int64_t filter_cache[]) {
2366 VP10_COMMON *cm = &cpi->common;
2367 MACROBLOCKD *xd = &x->e_mbd;
2368 MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
2369 MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
2370 const int is_comp_pred = has_second_ref(mbmi);
2371 const int this_mode = mbmi->mode;
2372 int_mv *frame_mv = mode_mv[this_mode];
2374 int refs[2] = { mbmi->ref_frame[0],
2375 (mbmi->ref_frame[1] < 0 ? 0 : mbmi->ref_frame[1]) };
2377 #if CONFIG_VP9_HIGHBITDEPTH
2378 DECLARE_ALIGNED(16, uint16_t, tmp_buf16[MAX_MB_PLANE * 64 * 64]);
2381 DECLARE_ALIGNED(16, uint8_t, tmp_buf[MAX_MB_PLANE * 64 * 64]);
2382 #endif // CONFIG_VP9_HIGHBITDEPTH
2383 int pred_exists = 0;
2385 int64_t rd, tmp_rd, best_rd = INT64_MAX;
2386 int best_needs_copy = 0;
2387 uint8_t *orig_dst[MAX_MB_PLANE];
2388 int orig_dst_stride[MAX_MB_PLANE];
2390 INTERP_FILTER best_filter = SWITCHABLE;
2391 uint8_t skip_txfm[MAX_MB_PLANE << 2] = {0};
2392 int64_t bsse[MAX_MB_PLANE << 2] = {0};
2394 int bsl = mi_width_log2_lookup[bsize];
2395 int pred_filter_search = cpi->sf.cb_pred_filter_search ?
2396 (((mi_row + mi_col) >> bsl) +
2397 get_chessboard_index(cm->current_video_frame)) & 0x1 : 0;
2399 int skip_txfm_sb = 0;
2400 int64_t skip_sse_sb = INT64_MAX;
2401 int64_t distortion_y = 0, distortion_uv = 0;
2403 #if CONFIG_VP9_HIGHBITDEPTH
2404 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
2405 tmp_buf = CONVERT_TO_BYTEPTR(tmp_buf16);
2407 tmp_buf = (uint8_t *)tmp_buf16;
2409 #endif // CONFIG_VP9_HIGHBITDEPTH
2411 if (pred_filter_search) {
2412 INTERP_FILTER af = SWITCHABLE, lf = SWITCHABLE;
2413 if (xd->up_available)
2414 af = xd->mi[-xd->mi_stride]->mbmi.interp_filter;
2415 if (xd->left_available)
2416 lf = xd->mi[-1]->mbmi.interp_filter;
2418 if ((this_mode != NEWMV) || (af == lf))
2423 if (frame_mv[refs[0]].as_int == INVALID_MV ||
2424 frame_mv[refs[1]].as_int == INVALID_MV)
2427 if (cpi->sf.adaptive_mode_search) {
2428 if (single_filter[this_mode][refs[0]] ==
2429 single_filter[this_mode][refs[1]])
2430 best_filter = single_filter[this_mode][refs[0]];
2434 if (this_mode == NEWMV) {
2437 // Initialize mv using single prediction mode result.
2438 frame_mv[refs[0]].as_int = single_newmv[refs[0]].as_int;
2439 frame_mv[refs[1]].as_int = single_newmv[refs[1]].as_int;
2441 if (cpi->sf.comp_inter_joint_search_thresh <= bsize) {
2442 joint_motion_search(cpi, x, bsize, frame_mv,
2443 mi_row, mi_col, single_newmv, &rate_mv);
2445 rate_mv = vp10_mv_bit_cost(&frame_mv[refs[0]].as_mv,
2446 &x->mbmi_ext->ref_mvs[refs[0]][0].as_mv,
2447 x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
2448 rate_mv += vp10_mv_bit_cost(&frame_mv[refs[1]].as_mv,
2449 &x->mbmi_ext->ref_mvs[refs[1]][0].as_mv,
2450 x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
2455 single_motion_search(cpi, x, bsize, mi_row, mi_col,
2457 if (tmp_mv.as_int == INVALID_MV)
2460 frame_mv[refs[0]].as_int =
2461 xd->mi[0]->bmi[0].as_mv[0].as_int = tmp_mv.as_int;
2462 single_newmv[refs[0]].as_int = tmp_mv.as_int;
2464 // Estimate the rate implications of a new mv but discount this
2465 // under certain circumstances where we want to help initiate a weak
2466 // motion field, where the distortion gain for a single block may not
2467 // be enough to overcome the cost of a new mv.
2468 if (discount_newmv_test(cpi, this_mode, tmp_mv, mode_mv, refs[0])) {
2469 *rate2 += MAX((rate_mv / NEW_MV_DISCOUNT_FACTOR), 1);
2476 for (i = 0; i < is_comp_pred + 1; ++i) {
2477 cur_mv[i] = frame_mv[refs[i]];
2478 // Clip "next_nearest" so that it does not extend to far out of image
2479 if (this_mode != NEWMV)
2480 clamp_mv2(&cur_mv[i].as_mv, xd);
2482 if (mv_check_bounds(x, &cur_mv[i].as_mv))
2484 mbmi->mv[i].as_int = cur_mv[i].as_int;
2487 // do first prediction into the destination buffer. Do the next
2488 // prediction into a temporary buffer. Then keep track of which one
2489 // of these currently holds the best predictor, and use the other
2490 // one for future predictions. In the end, copy from tmp_buf to
2491 // dst if necessary.
2492 for (i = 0; i < MAX_MB_PLANE; i++) {
2493 orig_dst[i] = xd->plane[i].dst.buf;
2494 orig_dst_stride[i] = xd->plane[i].dst.stride;
2497 // We don't include the cost of the second reference here, because there
2498 // are only three options: Last/Golden, ARF/Last or Golden/ARF, or in other
2499 // words if you present them in that order, the second one is always known
2500 // if the first is known.
2502 // Under some circumstances we discount the cost of new mv mode to encourage
2503 // initiation of a motion field.
2504 if (discount_newmv_test(cpi, this_mode, frame_mv[refs[0]],
2505 mode_mv, refs[0])) {
2506 *rate2 += MIN(cost_mv_ref(cpi, this_mode,
2507 mbmi_ext->mode_context[refs[0]]),
2508 cost_mv_ref(cpi, NEARESTMV,
2509 mbmi_ext->mode_context[refs[0]]));
2511 *rate2 += cost_mv_ref(cpi, this_mode, mbmi_ext->mode_context[refs[0]]);
2514 if (RDCOST(x->rdmult, x->rddiv, *rate2, 0) > ref_best_rd &&
2515 mbmi->mode != NEARESTMV)
2519 // Are all MVs integer pel for Y and UV
2520 intpel_mv = !mv_has_subpel(&mbmi->mv[0].as_mv);
2522 intpel_mv &= !mv_has_subpel(&mbmi->mv[1].as_mv);
2524 // Search for best switchable filter by checking the variance of
2525 // pred error irrespective of whether the filter will be used
2526 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
2527 filter_cache[i] = INT64_MAX;
2529 if (cm->interp_filter != BILINEAR) {
2530 if (x->source_variance < cpi->sf.disable_filter_search_var_thresh) {
2531 best_filter = EIGHTTAP;
2532 } else if (best_filter == SWITCHABLE) {
2534 int tmp_rate_sum = 0;
2535 int64_t tmp_dist_sum = 0;
2537 for (i = 0; i < SWITCHABLE_FILTERS; ++i) {
2540 int tmp_skip_sb = 0;
2541 int64_t tmp_skip_sse = INT64_MAX;
2543 mbmi->interp_filter = i;
2544 rs = vp10_get_switchable_rate(cpi, xd);
2545 rs_rd = RDCOST(x->rdmult, x->rddiv, rs, 0);
2547 if (i > 0 && intpel_mv) {
2548 rd = RDCOST(x->rdmult, x->rddiv, tmp_rate_sum, tmp_dist_sum);
2549 filter_cache[i] = rd;
2550 filter_cache[SWITCHABLE_FILTERS] =
2551 MIN(filter_cache[SWITCHABLE_FILTERS], rd + rs_rd);
2552 if (cm->interp_filter == SWITCHABLE)
2554 *mask_filter = MAX(*mask_filter, rd);
2557 int64_t dist_sum = 0;
2558 if (i > 0 && cpi->sf.adaptive_interp_filter_search &&
2559 (cpi->sf.interp_filter_search_mask & (1 << i))) {
2561 dist_sum = INT64_MAX;
2565 if ((cm->interp_filter == SWITCHABLE &&
2566 (!i || best_needs_copy)) ||
2567 (cm->interp_filter != SWITCHABLE &&
2568 (cm->interp_filter == mbmi->interp_filter ||
2569 (i == 0 && intpel_mv)))) {
2570 restore_dst_buf(xd, orig_dst, orig_dst_stride);
2572 for (j = 0; j < MAX_MB_PLANE; j++) {
2573 xd->plane[j].dst.buf = tmp_buf + j * 64 * 64;
2574 xd->plane[j].dst.stride = 64;
2577 vp10_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
2578 model_rd_for_sb(cpi, bsize, x, xd, &rate_sum, &dist_sum,
2579 &tmp_skip_sb, &tmp_skip_sse);
2581 rd = RDCOST(x->rdmult, x->rddiv, rate_sum, dist_sum);
2582 filter_cache[i] = rd;
2583 filter_cache[SWITCHABLE_FILTERS] =
2584 MIN(filter_cache[SWITCHABLE_FILTERS], rd + rs_rd);
2585 if (cm->interp_filter == SWITCHABLE)
2587 *mask_filter = MAX(*mask_filter, rd);
2589 if (i == 0 && intpel_mv) {
2590 tmp_rate_sum = rate_sum;
2591 tmp_dist_sum = dist_sum;
2595 if (i == 0 && cpi->sf.use_rd_breakout && ref_best_rd < INT64_MAX) {
2596 if (rd / 2 > ref_best_rd) {
2597 restore_dst_buf(xd, orig_dst, orig_dst_stride);
2601 newbest = i == 0 || rd < best_rd;
2605 best_filter = mbmi->interp_filter;
2606 if (cm->interp_filter == SWITCHABLE && i && !intpel_mv)
2607 best_needs_copy = !best_needs_copy;
2610 if ((cm->interp_filter == SWITCHABLE && newbest) ||
2611 (cm->interp_filter != SWITCHABLE &&
2612 cm->interp_filter == mbmi->interp_filter)) {
2616 skip_txfm_sb = tmp_skip_sb;
2617 skip_sse_sb = tmp_skip_sse;
2618 memcpy(skip_txfm, x->skip_txfm, sizeof(skip_txfm));
2619 memcpy(bsse, x->bsse, sizeof(bsse));
2622 restore_dst_buf(xd, orig_dst, orig_dst_stride);
2625 // Set the appropriate filter
2626 mbmi->interp_filter = cm->interp_filter != SWITCHABLE ?
2627 cm->interp_filter : best_filter;
2628 rs = cm->interp_filter == SWITCHABLE ? vp10_get_switchable_rate(cpi, xd) : 0;
2631 if (best_needs_copy) {
2632 // again temporarily set the buffers to local memory to prevent a memcpy
2633 for (i = 0; i < MAX_MB_PLANE; i++) {
2634 xd->plane[i].dst.buf = tmp_buf + i * 64 * 64;
2635 xd->plane[i].dst.stride = 64;
2638 rd = tmp_rd + RDCOST(x->rdmult, x->rddiv, rs, 0);
2642 // Handles the special case when a filter that is not in the
2643 // switchable list (ex. bilinear) is indicated at the frame level, or
2644 // skip condition holds.
2645 vp10_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
2646 model_rd_for_sb(cpi, bsize, x, xd, &tmp_rate, &tmp_dist,
2647 &skip_txfm_sb, &skip_sse_sb);
2648 rd = RDCOST(x->rdmult, x->rddiv, rs + tmp_rate, tmp_dist);
2649 memcpy(skip_txfm, x->skip_txfm, sizeof(skip_txfm));
2650 memcpy(bsse, x->bsse, sizeof(bsse));
2654 single_filter[this_mode][refs[0]] = mbmi->interp_filter;
2656 if (cpi->sf.adaptive_mode_search)
2658 if (single_skippable[this_mode][refs[0]] &&
2659 single_skippable[this_mode][refs[1]])
2660 memset(skip_txfm, SKIP_TXFM_AC_DC, sizeof(skip_txfm));
2662 if (cpi->sf.use_rd_breakout && ref_best_rd < INT64_MAX) {
2663 // if current pred_error modeled rd is substantially more than the best
2664 // so far, do not bother doing full rd
2665 if (rd / 2 > ref_best_rd) {
2666 restore_dst_buf(xd, orig_dst, orig_dst_stride);
2671 if (cm->interp_filter == SWITCHABLE)
2674 memcpy(x->skip_txfm, skip_txfm, sizeof(skip_txfm));
2675 memcpy(x->bsse, bsse, sizeof(bsse));
2677 if (!skip_txfm_sb) {
2678 int skippable_y, skippable_uv;
2679 int64_t sseuv = INT64_MAX;
2680 int64_t rdcosty = INT64_MAX;
2682 // Y cost and distortion
2683 vp10_subtract_plane(x, bsize, 0);
2684 super_block_yrd(cpi, x, rate_y, &distortion_y, &skippable_y, psse,
2685 bsize, ref_best_rd);
2687 if (*rate_y == INT_MAX) {
2689 *distortion = INT64_MAX;
2690 restore_dst_buf(xd, orig_dst, orig_dst_stride);
2695 *distortion += distortion_y;
2697 rdcosty = RDCOST(x->rdmult, x->rddiv, *rate2, *distortion);
2698 rdcosty = MIN(rdcosty, RDCOST(x->rdmult, x->rddiv, 0, *psse));
2700 if (!super_block_uvrd(cpi, x, rate_uv, &distortion_uv, &skippable_uv,
2701 &sseuv, bsize, ref_best_rd - rdcosty)) {
2703 *distortion = INT64_MAX;
2704 restore_dst_buf(xd, orig_dst, orig_dst_stride);
2710 *distortion += distortion_uv;
2711 *skippable = skippable_y && skippable_uv;
2716 // The cost of skip bit needs to be added.
2717 *rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1);
2719 *distortion = skip_sse_sb;
2723 single_skippable[this_mode][refs[0]] = *skippable;
2725 restore_dst_buf(xd, orig_dst, orig_dst_stride);
2726 return 0; // The rate-distortion cost will be re-calculated by caller.
2729 void vp10_rd_pick_intra_mode_sb(VP10_COMP *cpi, MACROBLOCK *x,
2730 RD_COST *rd_cost, BLOCK_SIZE bsize,
2731 PICK_MODE_CONTEXT *ctx, int64_t best_rd) {
2732 VP10_COMMON *const cm = &cpi->common;
2733 MACROBLOCKD *const xd = &x->e_mbd;
2734 struct macroblockd_plane *const pd = xd->plane;
2735 int rate_y = 0, rate_uv = 0, rate_y_tokenonly = 0, rate_uv_tokenonly = 0;
2736 int y_skip = 0, uv_skip = 0;
2737 int64_t dist_y = 0, dist_uv = 0;
2738 TX_SIZE max_uv_tx_size;
2741 xd->mi[0]->mbmi.ref_frame[0] = INTRA_FRAME;
2742 xd->mi[0]->mbmi.ref_frame[1] = NONE;
2744 if (bsize >= BLOCK_8X8) {
2745 if (rd_pick_intra_sby_mode(cpi, x, &rate_y, &rate_y_tokenonly,
2746 &dist_y, &y_skip, bsize,
2747 best_rd) >= best_rd) {
2748 rd_cost->rate = INT_MAX;
2753 if (rd_pick_intra_sub_8x8_y_mode(cpi, x, &rate_y, &rate_y_tokenonly,
2754 &dist_y, best_rd) >= best_rd) {
2755 rd_cost->rate = INT_MAX;
2759 max_uv_tx_size = get_uv_tx_size_impl(xd->mi[0]->mbmi.tx_size, bsize,
2760 pd[1].subsampling_x,
2761 pd[1].subsampling_y);
2762 rd_pick_intra_sbuv_mode(cpi, x, ctx, &rate_uv, &rate_uv_tokenonly,
2763 &dist_uv, &uv_skip, MAX(BLOCK_8X8, bsize),
2766 if (y_skip && uv_skip) {
2767 rd_cost->rate = rate_y + rate_uv - rate_y_tokenonly - rate_uv_tokenonly +
2768 vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1);
2769 rd_cost->dist = dist_y + dist_uv;
2771 rd_cost->rate = rate_y + rate_uv +
2772 vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0);
2773 rd_cost->dist = dist_y + dist_uv;
2776 ctx->mic = *xd->mi[0];
2777 ctx->mbmi_ext = *x->mbmi_ext;
2778 rd_cost->rdcost = RDCOST(x->rdmult, x->rddiv, rd_cost->rate, rd_cost->dist);
2781 // This function is designed to apply a bias or adjustment to an rd value based
2782 // on the relative variance of the source and reconstruction.
2783 #define LOW_VAR_THRESH 16
2784 #define VLOW_ADJ_MAX 25
2785 #define VHIGH_ADJ_MAX 8
2786 static void rd_variance_adjustment(VP10_COMP *cpi,
2790 MV_REFERENCE_FRAME ref_frame,
2791 unsigned int source_variance) {
2792 MACROBLOCKD *const xd = &x->e_mbd;
2793 unsigned int recon_variance;
2794 unsigned int absvar_diff = 0;
2795 int64_t var_error = 0;
2796 int64_t var_factor = 0;
2798 if (*this_rd == INT64_MAX)
2801 #if CONFIG_VP9_HIGHBITDEPTH
2802 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
2804 vp10_high_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize, xd->bd);
2807 vp10_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
2811 vp10_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
2812 #endif // CONFIG_VP9_HIGHBITDEPTH
2814 if ((source_variance + recon_variance) > LOW_VAR_THRESH) {
2815 absvar_diff = (source_variance > recon_variance)
2816 ? (source_variance - recon_variance)
2817 : (recon_variance - source_variance);
2819 var_error = (200 * source_variance * recon_variance) /
2820 ((source_variance * source_variance) +
2821 (recon_variance * recon_variance));
2822 var_error = 100 - var_error;
2825 // Source variance above a threshold and ref frame is intra.
2826 // This case is targeted mainly at discouraging intra modes that give rise
2827 // to a predictor with a low spatial complexity compared to the source.
2828 if ((source_variance > LOW_VAR_THRESH) && (ref_frame == INTRA_FRAME) &&
2829 (source_variance > recon_variance)) {
2830 var_factor = MIN(absvar_diff, MIN(VLOW_ADJ_MAX, var_error));
2831 // A second possible case of interest is where the source variance
2832 // is very low and we wish to discourage false texture or motion trails.
2833 } else if ((source_variance < (LOW_VAR_THRESH >> 1)) &&
2834 (recon_variance > source_variance)) {
2835 var_factor = MIN(absvar_diff, MIN(VHIGH_ADJ_MAX, var_error));
2837 *this_rd += (*this_rd * var_factor) / 100;
2841 // Do we have an internal image edge (e.g. formatting bars).
2842 int vp10_internal_image_edge(VP10_COMP *cpi) {
2843 return (cpi->oxcf.pass == 2) &&
2844 ((cpi->twopass.this_frame_stats.inactive_zone_rows > 0) ||
2845 (cpi->twopass.this_frame_stats.inactive_zone_cols > 0));
2848 // Checks to see if a super block is on a horizontal image edge.
2849 // In most cases this is the "real" edge unless there are formatting
2850 // bars embedded in the stream.
2851 int vp10_active_h_edge(VP10_COMP *cpi, int mi_row, int mi_step) {
2853 int bottom_edge = cpi->common.mi_rows;
2854 int is_active_h_edge = 0;
2856 // For two pass account for any formatting bars detected.
2857 if (cpi->oxcf.pass == 2) {
2858 TWO_PASS *twopass = &cpi->twopass;
2860 // The inactive region is specified in MBs not mi units.
2861 // The image edge is in the following MB row.
2862 top_edge += (int)(twopass->this_frame_stats.inactive_zone_rows * 2);
2864 bottom_edge -= (int)(twopass->this_frame_stats.inactive_zone_rows * 2);
2865 bottom_edge = MAX(top_edge, bottom_edge);
2868 if (((top_edge >= mi_row) && (top_edge < (mi_row + mi_step))) ||
2869 ((bottom_edge >= mi_row) && (bottom_edge < (mi_row + mi_step)))) {
2870 is_active_h_edge = 1;
2872 return is_active_h_edge;
2875 // Checks to see if a super block is on a vertical image edge.
2876 // In most cases this is the "real" edge unless there are formatting
2877 // bars embedded in the stream.
2878 int vp10_active_v_edge(VP10_COMP *cpi, int mi_col, int mi_step) {
2880 int right_edge = cpi->common.mi_cols;
2881 int is_active_v_edge = 0;
2883 // For two pass account for any formatting bars detected.
2884 if (cpi->oxcf.pass == 2) {
2885 TWO_PASS *twopass = &cpi->twopass;
2887 // The inactive region is specified in MBs not mi units.
2888 // The image edge is in the following MB row.
2889 left_edge += (int)(twopass->this_frame_stats.inactive_zone_cols * 2);
2891 right_edge -= (int)(twopass->this_frame_stats.inactive_zone_cols * 2);
2892 right_edge = MAX(left_edge, right_edge);
2895 if (((left_edge >= mi_col) && (left_edge < (mi_col + mi_step))) ||
2896 ((right_edge >= mi_col) && (right_edge < (mi_col + mi_step)))) {
2897 is_active_v_edge = 1;
2899 return is_active_v_edge;
2902 // Checks to see if a super block is at the edge of the active image.
2903 // In most cases this is the "real" edge unless there are formatting
2904 // bars embedded in the stream.
2905 int vp10_active_edge_sb(VP10_COMP *cpi,
2906 int mi_row, int mi_col) {
2907 return vp10_active_h_edge(cpi, mi_row, MI_BLOCK_SIZE) ||
2908 vp10_active_v_edge(cpi, mi_col, MI_BLOCK_SIZE);
2911 void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi,
2912 TileDataEnc *tile_data,
2914 int mi_row, int mi_col,
2915 RD_COST *rd_cost, BLOCK_SIZE bsize,
2916 PICK_MODE_CONTEXT *ctx,
2917 int64_t best_rd_so_far) {
2918 VP10_COMMON *const cm = &cpi->common;
2919 TileInfo *const tile_info = &tile_data->tile_info;
2920 RD_OPT *const rd_opt = &cpi->rd;
2921 SPEED_FEATURES *const sf = &cpi->sf;
2922 MACROBLOCKD *const xd = &x->e_mbd;
2923 MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
2924 MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
2925 const struct segmentation *const seg = &cm->seg;
2926 PREDICTION_MODE this_mode;
2927 MV_REFERENCE_FRAME ref_frame, second_ref_frame;
2928 unsigned char segment_id = mbmi->segment_id;
2929 int comp_pred, i, k;
2930 int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES];
2931 struct buf_2d yv12_mb[4][MAX_MB_PLANE];
2932 int_mv single_newmv[MAX_REF_FRAMES] = { { 0 } };
2933 INTERP_FILTER single_inter_filter[MB_MODE_COUNT][MAX_REF_FRAMES];
2934 int single_skippable[MB_MODE_COUNT][MAX_REF_FRAMES];
2935 static const int flag_list[4] = { 0, VP9_LAST_FLAG, VP9_GOLD_FLAG,
2937 int64_t best_rd = best_rd_so_far;
2938 int64_t best_pred_diff[REFERENCE_MODES];
2939 int64_t best_pred_rd[REFERENCE_MODES];
2940 int64_t best_filter_rd[SWITCHABLE_FILTER_CONTEXTS];
2941 int64_t best_filter_diff[SWITCHABLE_FILTER_CONTEXTS];
2942 MB_MODE_INFO best_mbmode;
2943 int best_mode_skippable = 0;
2944 int midx, best_mode_index = -1;
2945 unsigned int ref_costs_single[MAX_REF_FRAMES], ref_costs_comp[MAX_REF_FRAMES];
2946 vpx_prob comp_mode_p;
2947 int64_t best_intra_rd = INT64_MAX;
2948 unsigned int best_pred_sse = UINT_MAX;
2949 PREDICTION_MODE best_intra_mode = DC_PRED;
2950 int rate_uv_intra[TX_SIZES], rate_uv_tokenonly[TX_SIZES];
2951 int64_t dist_uv[TX_SIZES];
2952 int skip_uv[TX_SIZES];
2953 PREDICTION_MODE mode_uv[TX_SIZES];
2954 const int intra_cost_penalty = vp10_get_intra_cost_penalty(
2955 cm->base_qindex, cm->y_dc_delta_q, cm->bit_depth);
2957 uint8_t ref_frame_skip_mask[2] = { 0 };
2958 uint16_t mode_skip_mask[MAX_REF_FRAMES] = { 0 };
2959 int mode_skip_start = sf->mode_skip_start + 1;
2960 const int *const rd_threshes = rd_opt->threshes[segment_id][bsize];
2961 const int *const rd_thresh_freq_fact = tile_data->thresh_freq_fact[bsize];
2962 int64_t mode_threshold[MAX_MODES];
2963 int *mode_map = tile_data->mode_map[bsize];
2964 const int mode_search_skip_flags = sf->mode_search_skip_flags;
2965 int64_t mask_filter = 0;
2966 int64_t filter_cache[SWITCHABLE_FILTER_CONTEXTS];
2968 vp10_zero(best_mbmode);
2970 x->skip_encode = sf->skip_encode_frame && x->q_index < QIDX_SKIP_THRESH;
2972 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
2973 filter_cache[i] = INT64_MAX;
2975 estimate_ref_frame_costs(cm, xd, segment_id, ref_costs_single, ref_costs_comp,
2978 for (i = 0; i < REFERENCE_MODES; ++i)
2979 best_pred_rd[i] = INT64_MAX;
2980 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++)
2981 best_filter_rd[i] = INT64_MAX;
2982 for (i = 0; i < TX_SIZES; i++)
2983 rate_uv_intra[i] = INT_MAX;
2984 for (i = 0; i < MAX_REF_FRAMES; ++i)
2985 x->pred_sse[i] = INT_MAX;
2986 for (i = 0; i < MB_MODE_COUNT; ++i) {
2987 for (k = 0; k < MAX_REF_FRAMES; ++k) {
2988 single_inter_filter[i][k] = SWITCHABLE;
2989 single_skippable[i][k] = 0;
2993 rd_cost->rate = INT_MAX;
2995 for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
2996 x->pred_mv_sad[ref_frame] = INT_MAX;
2997 if (cpi->ref_frame_flags & flag_list[ref_frame]) {
2998 assert(get_ref_frame_buffer(cpi, ref_frame) != NULL);
2999 setup_buffer_inter(cpi, x, ref_frame, bsize, mi_row, mi_col,
3000 frame_mv[NEARESTMV], frame_mv[NEARMV], yv12_mb);
3002 frame_mv[NEWMV][ref_frame].as_int = INVALID_MV;
3003 frame_mv[ZEROMV][ref_frame].as_int = 0;
3006 for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
3007 if (!(cpi->ref_frame_flags & flag_list[ref_frame])) {
3008 // Skip checking missing references in both single and compound reference
3009 // modes. Note that a mode will be skipped iff both reference frames
3011 ref_frame_skip_mask[0] |= (1 << ref_frame);
3012 ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
3013 } else if (sf->reference_masking) {
3014 for (i = LAST_FRAME; i <= ALTREF_FRAME; ++i) {
3015 // Skip fixed mv modes for poor references
3016 if ((x->pred_mv_sad[ref_frame] >> 2) > x->pred_mv_sad[i]) {
3017 mode_skip_mask[ref_frame] |= INTER_NEAREST_NEAR_ZERO;
3022 // If the segment reference frame feature is enabled....
3023 // then do nothing if the current ref frame is not allowed..
3024 if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME) &&
3025 get_segdata(seg, segment_id, SEG_LVL_REF_FRAME) != (int)ref_frame) {
3026 ref_frame_skip_mask[0] |= (1 << ref_frame);
3027 ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
3031 // Disable this drop out case if the ref frame
3032 // segment level feature is enabled for this segment. This is to
3033 // prevent the possibility that we end up unable to pick any mode.
3034 if (!segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) {
3035 // Only consider ZEROMV/ALTREF_FRAME for alt ref frame,
3036 // unless ARNR filtering is enabled in which case we want
3037 // an unfiltered alternative. We allow near/nearest as well
3038 // because they may result in zero-zero MVs but be cheaper.
3039 if (cpi->rc.is_src_frame_alt_ref && (cpi->oxcf.arnr_max_frames == 0)) {
3040 ref_frame_skip_mask[0] = (1 << LAST_FRAME) | (1 << GOLDEN_FRAME);
3041 ref_frame_skip_mask[1] = SECOND_REF_FRAME_MASK;
3042 mode_skip_mask[ALTREF_FRAME] = ~INTER_NEAREST_NEAR_ZERO;
3043 if (frame_mv[NEARMV][ALTREF_FRAME].as_int != 0)
3044 mode_skip_mask[ALTREF_FRAME] |= (1 << NEARMV);
3045 if (frame_mv[NEARESTMV][ALTREF_FRAME].as_int != 0)
3046 mode_skip_mask[ALTREF_FRAME] |= (1 << NEARESTMV);
3050 if (cpi->rc.is_src_frame_alt_ref) {
3051 if (sf->alt_ref_search_fp) {
3052 mode_skip_mask[ALTREF_FRAME] = 0;
3053 ref_frame_skip_mask[0] = ~(1 << ALTREF_FRAME);
3054 ref_frame_skip_mask[1] = SECOND_REF_FRAME_MASK;
3058 if (sf->alt_ref_search_fp)
3059 if (!cm->show_frame && x->pred_mv_sad[GOLDEN_FRAME] < INT_MAX)
3060 if (x->pred_mv_sad[ALTREF_FRAME] > (x->pred_mv_sad[GOLDEN_FRAME] << 1))
3061 mode_skip_mask[ALTREF_FRAME] |= INTER_ALL;
3063 if (sf->adaptive_mode_search) {
3064 if (cm->show_frame && !cpi->rc.is_src_frame_alt_ref &&
3065 cpi->rc.frames_since_golden >= 3)
3066 if (x->pred_mv_sad[GOLDEN_FRAME] > (x->pred_mv_sad[LAST_FRAME] << 1))
3067 mode_skip_mask[GOLDEN_FRAME] |= INTER_ALL;
3070 if (bsize > sf->max_intra_bsize) {
3071 ref_frame_skip_mask[0] |= (1 << INTRA_FRAME);
3072 ref_frame_skip_mask[1] |= (1 << INTRA_FRAME);
3075 mode_skip_mask[INTRA_FRAME] |=
3076 ~(sf->intra_y_mode_mask[max_txsize_lookup[bsize]]);
3078 for (i = 0; i <= LAST_NEW_MV_INDEX; ++i)
3079 mode_threshold[i] = 0;
3080 for (i = LAST_NEW_MV_INDEX + 1; i < MAX_MODES; ++i)
3081 mode_threshold[i] = ((int64_t)rd_threshes[i] * rd_thresh_freq_fact[i]) >> 5;
3083 midx = sf->schedule_mode_search ? mode_skip_start : 0;
3085 uint8_t end_pos = 0;
3086 for (i = 5; i < midx; ++i) {
3087 if (mode_threshold[mode_map[i - 1]] > mode_threshold[mode_map[i]]) {
3088 uint8_t tmp = mode_map[i];
3089 mode_map[i] = mode_map[i - 1];
3090 mode_map[i - 1] = tmp;
3097 for (midx = 0; midx < MAX_MODES; ++midx) {
3098 int mode_index = mode_map[midx];
3099 int mode_excluded = 0;
3100 int64_t this_rd = INT64_MAX;
3101 int disable_skip = 0;
3102 int compmode_cost = 0;
3103 int rate2 = 0, rate_y = 0, rate_uv = 0;
3104 int64_t distortion2 = 0, distortion_y = 0, distortion_uv = 0;
3107 int64_t total_sse = INT64_MAX;
3110 this_mode = vp10_mode_order[mode_index].mode;
3111 ref_frame = vp10_mode_order[mode_index].ref_frame[0];
3112 second_ref_frame = vp10_mode_order[mode_index].ref_frame[1];
3114 // Look at the reference frame of the best mode so far and set the
3115 // skip mask to look at a subset of the remaining modes.
3116 if (midx == mode_skip_start && best_mode_index >= 0) {
3117 switch (best_mbmode.ref_frame[0]) {
3121 ref_frame_skip_mask[0] |= LAST_FRAME_MODE_MASK;
3122 ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
3125 ref_frame_skip_mask[0] |= GOLDEN_FRAME_MODE_MASK;
3126 ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
3129 ref_frame_skip_mask[0] |= ALT_REF_MODE_MASK;
3132 case MAX_REF_FRAMES:
3133 assert(0 && "Invalid Reference frame");
3138 if ((ref_frame_skip_mask[0] & (1 << ref_frame)) &&
3139 (ref_frame_skip_mask[1] & (1 << MAX(0, second_ref_frame))))
3142 if (mode_skip_mask[ref_frame] & (1 << this_mode))
3145 // Test best rd so far against threshold for trying this mode.
3146 if (best_mode_skippable && sf->schedule_mode_search)
3147 mode_threshold[mode_index] <<= 1;
3149 if (best_rd < mode_threshold[mode_index])
3152 if (sf->motion_field_mode_search) {
3153 const int mi_width = MIN(num_8x8_blocks_wide_lookup[bsize],
3154 tile_info->mi_col_end - mi_col);
3155 const int mi_height = MIN(num_8x8_blocks_high_lookup[bsize],
3156 tile_info->mi_row_end - mi_row);
3157 const int bsl = mi_width_log2_lookup[bsize];
3158 int cb_partition_search_ctrl = (((mi_row + mi_col) >> bsl)
3159 + get_chessboard_index(cm->current_video_frame)) & 0x1;
3160 MB_MODE_INFO *ref_mbmi;
3161 int const_motion = 1;
3162 int skip_ref_frame = !cb_partition_search_ctrl;
3163 MV_REFERENCE_FRAME rf = NONE;
3165 ref_mv.as_int = INVALID_MV;
3167 if ((mi_row - 1) >= tile_info->mi_row_start) {
3168 ref_mv = xd->mi[-xd->mi_stride]->mbmi.mv[0];
3169 rf = xd->mi[-xd->mi_stride]->mbmi.ref_frame[0];
3170 for (i = 0; i < mi_width; ++i) {
3171 ref_mbmi = &xd->mi[-xd->mi_stride + i]->mbmi;
3172 const_motion &= (ref_mv.as_int == ref_mbmi->mv[0].as_int) &&
3173 (ref_frame == ref_mbmi->ref_frame[0]);
3174 skip_ref_frame &= (rf == ref_mbmi->ref_frame[0]);
3178 if ((mi_col - 1) >= tile_info->mi_col_start) {
3179 if (ref_mv.as_int == INVALID_MV)
3180 ref_mv = xd->mi[-1]->mbmi.mv[0];
3182 rf = xd->mi[-1]->mbmi.ref_frame[0];
3183 for (i = 0; i < mi_height; ++i) {
3184 ref_mbmi = &xd->mi[i * xd->mi_stride - 1]->mbmi;
3185 const_motion &= (ref_mv.as_int == ref_mbmi->mv[0].as_int) &&
3186 (ref_frame == ref_mbmi->ref_frame[0]);
3187 skip_ref_frame &= (rf == ref_mbmi->ref_frame[0]);
3191 if (skip_ref_frame && this_mode != NEARESTMV && this_mode != NEWMV)
3192 if (rf > INTRA_FRAME)
3193 if (ref_frame != rf)
3197 if (this_mode == NEARMV || this_mode == ZEROMV)
3201 comp_pred = second_ref_frame > INTRA_FRAME;
3203 if (!cpi->allow_comp_inter_inter)
3206 // Skip compound inter modes if ARF is not available.
3207 if (!(cpi->ref_frame_flags & flag_list[second_ref_frame]))
3210 // Do not allow compound prediction if the segment level reference frame
3211 // feature is in use as in this case there can only be one reference.
3212 if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME))
3215 if ((mode_search_skip_flags & FLAG_SKIP_COMP_BESTINTRA) &&
3216 best_mode_index >= 0 && best_mbmode.ref_frame[0] == INTRA_FRAME)
3219 mode_excluded = cm->reference_mode == SINGLE_REFERENCE;
3221 if (ref_frame != INTRA_FRAME)
3222 mode_excluded = cm->reference_mode == COMPOUND_REFERENCE;
3225 if (ref_frame == INTRA_FRAME) {
3226 if (sf->adaptive_mode_search)
3227 if ((x->source_variance << num_pels_log2_lookup[bsize]) > best_pred_sse)
3230 if (this_mode != DC_PRED) {
3231 // Disable intra modes other than DC_PRED for blocks with low variance
3232 // Threshold for intra skipping based on source variance
3233 // TODO(debargha): Specialize the threshold for super block sizes
3234 const unsigned int skip_intra_var_thresh = 64;
3235 if ((mode_search_skip_flags & FLAG_SKIP_INTRA_LOWVAR) &&
3236 x->source_variance < skip_intra_var_thresh)
3238 // Only search the oblique modes if the best so far is
3239 // one of the neighboring directional modes
3240 if ((mode_search_skip_flags & FLAG_SKIP_INTRA_BESTINTER) &&
3241 (this_mode >= D45_PRED && this_mode <= TM_PRED)) {
3242 if (best_mode_index >= 0 &&
3243 best_mbmode.ref_frame[0] > INTRA_FRAME)
3246 if (mode_search_skip_flags & FLAG_SKIP_INTRA_DIRMISMATCH) {
3247 if (conditional_skipintra(this_mode, best_intra_mode))
3252 const MV_REFERENCE_FRAME ref_frames[2] = {ref_frame, second_ref_frame};
3253 if (!check_best_zero_mv(cpi, mbmi_ext->mode_context, frame_mv,
3254 this_mode, ref_frames))
3258 mbmi->mode = this_mode;
3259 mbmi->uv_mode = DC_PRED;
3260 mbmi->ref_frame[0] = ref_frame;
3261 mbmi->ref_frame[1] = second_ref_frame;
3262 // Evaluate all sub-pel filters irrespective of whether we can use
3263 // them for this frame.
3264 mbmi->interp_filter = cm->interp_filter == SWITCHABLE ? EIGHTTAP
3265 : cm->interp_filter;
3266 mbmi->mv[0].as_int = mbmi->mv[1].as_int = 0;
3269 set_ref_ptrs(cm, xd, ref_frame, second_ref_frame);
3271 // Select prediction reference frames.
3272 for (i = 0; i < MAX_MB_PLANE; i++) {
3273 xd->plane[i].pre[0] = yv12_mb[ref_frame][i];
3275 xd->plane[i].pre[1] = yv12_mb[second_ref_frame][i];
3278 if (ref_frame == INTRA_FRAME) {
3280 struct macroblockd_plane *const pd = &xd->plane[1];
3281 memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
3282 super_block_yrd(cpi, x, &rate_y, &distortion_y, &skippable,
3283 NULL, bsize, best_rd);
3284 if (rate_y == INT_MAX)
3287 uv_tx = get_uv_tx_size_impl(mbmi->tx_size, bsize, pd->subsampling_x,
3289 if (rate_uv_intra[uv_tx] == INT_MAX) {
3290 choose_intra_uv_mode(cpi, x, ctx, bsize, uv_tx,
3291 &rate_uv_intra[uv_tx], &rate_uv_tokenonly[uv_tx],
3292 &dist_uv[uv_tx], &skip_uv[uv_tx], &mode_uv[uv_tx]);
3295 rate_uv = rate_uv_tokenonly[uv_tx];
3296 distortion_uv = dist_uv[uv_tx];
3297 skippable = skippable && skip_uv[uv_tx];
3298 mbmi->uv_mode = mode_uv[uv_tx];
3300 rate2 = rate_y + cpi->mbmode_cost[mbmi->mode] + rate_uv_intra[uv_tx];
3301 if (this_mode != DC_PRED && this_mode != TM_PRED)
3302 rate2 += intra_cost_penalty;
3303 distortion2 = distortion_y + distortion_uv;
3305 this_rd = handle_inter_mode(cpi, x, bsize,
3306 &rate2, &distortion2, &skippable,
3308 &disable_skip, frame_mv,
3310 single_newmv, single_inter_filter,
3311 single_skippable, &total_sse, best_rd,
3312 &mask_filter, filter_cache);
3313 if (this_rd == INT64_MAX)
3316 compmode_cost = vp10_cost_bit(comp_mode_p, comp_pred);
3318 if (cm->reference_mode == REFERENCE_MODE_SELECT)
3319 rate2 += compmode_cost;
3322 // Estimate the reference frame signaling cost and add it
3323 // to the rolling cost variable.
3325 rate2 += ref_costs_comp[ref_frame];
3327 rate2 += ref_costs_single[ref_frame];
3330 if (!disable_skip) {
3332 // Back out the coefficient coding costs
3333 rate2 -= (rate_y + rate_uv);
3335 // Cost the skip mb case
3336 rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1);
3337 } else if (ref_frame != INTRA_FRAME && !xd->lossless) {
3338 if (RDCOST(x->rdmult, x->rddiv, rate_y + rate_uv, distortion2) <
3339 RDCOST(x->rdmult, x->rddiv, 0, total_sse)) {
3340 // Add in the cost of the no skip flag.
3341 rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0);
3343 // FIXME(rbultje) make this work for splitmv also
3344 rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1);
3345 distortion2 = total_sse;
3346 assert(total_sse >= 0);
3347 rate2 -= (rate_y + rate_uv);
3351 // Add in the cost of the no skip flag.
3352 rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0);
3355 // Calculate the final RD estimate for this mode.
3356 this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
3359 // Apply an adjustment to the rd value based on the similarity of the
3360 // source variance and reconstructed variance.
3361 rd_variance_adjustment(cpi, x, bsize, &this_rd,
3362 ref_frame, x->source_variance);
3364 if (ref_frame == INTRA_FRAME) {
3365 // Keep record of best intra rd
3366 if (this_rd < best_intra_rd) {
3367 best_intra_rd = this_rd;
3368 best_intra_mode = mbmi->mode;
3372 if (!disable_skip && ref_frame == INTRA_FRAME) {
3373 for (i = 0; i < REFERENCE_MODES; ++i)
3374 best_pred_rd[i] = MIN(best_pred_rd[i], this_rd);
3375 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++)
3376 best_filter_rd[i] = MIN(best_filter_rd[i], this_rd);
3379 // Did this mode help.. i.e. is it the new best mode
3380 if (this_rd < best_rd || x->skip) {
3381 int max_plane = MAX_MB_PLANE;
3382 if (!mode_excluded) {
3383 // Note index of best mode so far
3384 best_mode_index = mode_index;
3386 if (ref_frame == INTRA_FRAME) {
3387 /* required for left and above block mv */
3388 mbmi->mv[0].as_int = 0;
3391 best_pred_sse = x->pred_sse[ref_frame];
3394 rd_cost->rate = rate2;
3395 rd_cost->dist = distortion2;
3396 rd_cost->rdcost = this_rd;
3398 best_mbmode = *mbmi;
3399 best_skip2 = this_skip2;
3400 best_mode_skippable = skippable;
3402 if (!x->select_tx_size)
3403 swap_block_ptr(x, ctx, 1, 0, 0, max_plane);
3404 memcpy(ctx->zcoeff_blk, x->zcoeff_blk[mbmi->tx_size],
3405 sizeof(ctx->zcoeff_blk[0]) * ctx->num_4x4_blk);
3407 // TODO(debargha): enhance this test with a better distortion prediction
3408 // based on qp, activity mask and history
3409 if ((mode_search_skip_flags & FLAG_EARLY_TERMINATE) &&
3410 (mode_index > MIN_EARLY_TERM_INDEX)) {
3411 int qstep = xd->plane[0].dequant[1];
3412 // TODO(debargha): Enhance this by specializing for each mode_index
3414 #if CONFIG_VP9_HIGHBITDEPTH
3415 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
3416 qstep >>= (xd->bd - 8);
3418 #endif // CONFIG_VP9_HIGHBITDEPTH
3419 if (x->source_variance < UINT_MAX) {
3420 const int var_adjust = (x->source_variance < 16);
3421 scale -= var_adjust;
3423 if (ref_frame > INTRA_FRAME &&
3424 distortion2 * scale < qstep * qstep) {
3431 /* keep record of best compound/single-only prediction */
3432 if (!disable_skip && ref_frame != INTRA_FRAME) {
3433 int64_t single_rd, hybrid_rd, single_rate, hybrid_rate;
3435 if (cm->reference_mode == REFERENCE_MODE_SELECT) {
3436 single_rate = rate2 - compmode_cost;
3437 hybrid_rate = rate2;
3439 single_rate = rate2;
3440 hybrid_rate = rate2 + compmode_cost;
3443 single_rd = RDCOST(x->rdmult, x->rddiv, single_rate, distortion2);
3444 hybrid_rd = RDCOST(x->rdmult, x->rddiv, hybrid_rate, distortion2);
3447 if (single_rd < best_pred_rd[SINGLE_REFERENCE])
3448 best_pred_rd[SINGLE_REFERENCE] = single_rd;
3450 if (single_rd < best_pred_rd[COMPOUND_REFERENCE])
3451 best_pred_rd[COMPOUND_REFERENCE] = single_rd;
3453 if (hybrid_rd < best_pred_rd[REFERENCE_MODE_SELECT])
3454 best_pred_rd[REFERENCE_MODE_SELECT] = hybrid_rd;
3456 /* keep record of best filter type */
3457 if (!mode_excluded && cm->interp_filter != BILINEAR) {
3458 int64_t ref = filter_cache[cm->interp_filter == SWITCHABLE ?
3459 SWITCHABLE_FILTERS : cm->interp_filter];
3461 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) {
3463 if (ref == INT64_MAX)
3465 else if (filter_cache[i] == INT64_MAX)
3466 // when early termination is triggered, the encoder does not have
3467 // access to the rate-distortion cost. it only knows that the cost
3468 // should be above the maximum valid value. hence it takes the known
3469 // maximum plus an arbitrary constant as the rate-distortion cost.
3470 adj_rd = mask_filter - ref + 10;
3472 adj_rd = filter_cache[i] - ref;
3475 best_filter_rd[i] = MIN(best_filter_rd[i], adj_rd);
3483 if (x->skip && !comp_pred)
3487 // The inter modes' rate costs are not calculated precisely in some cases.
3488 // Therefore, sometimes, NEWMV is chosen instead of NEARESTMV, NEARMV, and
3489 // ZEROMV. Here, checks are added for those cases, and the mode decisions
3491 if (best_mbmode.mode == NEWMV) {
3492 const MV_REFERENCE_FRAME refs[2] = {best_mbmode.ref_frame[0],
3493 best_mbmode.ref_frame[1]};
3494 int comp_pred_mode = refs[1] > INTRA_FRAME;
3496 if (frame_mv[NEARESTMV][refs[0]].as_int == best_mbmode.mv[0].as_int &&
3497 ((comp_pred_mode && frame_mv[NEARESTMV][refs[1]].as_int ==
3498 best_mbmode.mv[1].as_int) || !comp_pred_mode))
3499 best_mbmode.mode = NEARESTMV;
3500 else if (frame_mv[NEARMV][refs[0]].as_int == best_mbmode.mv[0].as_int &&
3501 ((comp_pred_mode && frame_mv[NEARMV][refs[1]].as_int ==
3502 best_mbmode.mv[1].as_int) || !comp_pred_mode))
3503 best_mbmode.mode = NEARMV;
3504 else if (best_mbmode.mv[0].as_int == 0 &&
3505 ((comp_pred_mode && best_mbmode.mv[1].as_int == 0) || !comp_pred_mode))
3506 best_mbmode.mode = ZEROMV;
3509 if (best_mode_index < 0 || best_rd >= best_rd_so_far) {
3510 rd_cost->rate = INT_MAX;
3511 rd_cost->rdcost = INT64_MAX;
3515 // If we used an estimate for the uv intra rd in the loop above...
3516 if (sf->use_uv_intra_rd_estimate) {
3517 // Do Intra UV best rd mode selection if best mode choice above was intra.
3518 if (best_mbmode.ref_frame[0] == INTRA_FRAME) {
3520 *mbmi = best_mbmode;
3521 uv_tx_size = get_uv_tx_size(mbmi, &xd->plane[1]);
3522 rd_pick_intra_sbuv_mode(cpi, x, ctx, &rate_uv_intra[uv_tx_size],
3523 &rate_uv_tokenonly[uv_tx_size],
3524 &dist_uv[uv_tx_size],
3525 &skip_uv[uv_tx_size],
3526 bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize,
3531 assert((cm->interp_filter == SWITCHABLE) ||
3532 (cm->interp_filter == best_mbmode.interp_filter) ||
3533 !is_inter_block(&best_mbmode));
3535 if (!cpi->rc.is_src_frame_alt_ref)
3536 vp10_update_rd_thresh_fact(tile_data->thresh_freq_fact,
3537 sf->adaptive_rd_thresh, bsize, best_mode_index);
3540 *mbmi = best_mbmode;
3541 x->skip |= best_skip2;
3543 for (i = 0; i < REFERENCE_MODES; ++i) {
3544 if (best_pred_rd[i] == INT64_MAX)
3545 best_pred_diff[i] = INT_MIN;
3547 best_pred_diff[i] = best_rd - best_pred_rd[i];
3551 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) {
3552 if (best_filter_rd[i] == INT64_MAX)
3553 best_filter_diff[i] = 0;
3555 best_filter_diff[i] = best_rd - best_filter_rd[i];
3557 if (cm->interp_filter == SWITCHABLE)
3558 assert(best_filter_diff[SWITCHABLE_FILTERS] == 0);
3560 vp10_zero(best_filter_diff);
3563 // TODO(yunqingwang): Moving this line in front of the above best_filter_diff
3564 // updating code causes PSNR loss. Need to figure out the confliction.
3565 x->skip |= best_mode_skippable;
3567 if (!x->skip && !x->select_tx_size) {
3568 int has_high_freq_coeff = 0;
3570 int max_plane = is_inter_block(&xd->mi[0]->mbmi)
3572 for (plane = 0; plane < max_plane; ++plane) {
3573 x->plane[plane].eobs = ctx->eobs_pbuf[plane][1];
3574 has_high_freq_coeff |= vp10_has_high_freq_in_plane(x, bsize, plane);
3577 for (plane = max_plane; plane < MAX_MB_PLANE; ++plane) {
3578 x->plane[plane].eobs = ctx->eobs_pbuf[plane][2];
3579 has_high_freq_coeff |= vp10_has_high_freq_in_plane(x, bsize, plane);
3582 best_mode_skippable |= !has_high_freq_coeff;
3585 assert(best_mode_index >= 0);
3587 store_coding_context(x, ctx, best_mode_index, best_pred_diff,
3588 best_filter_diff, best_mode_skippable);
3591 void vp10_rd_pick_inter_mode_sb_seg_skip(VP10_COMP *cpi,
3592 TileDataEnc *tile_data,
3596 PICK_MODE_CONTEXT *ctx,
3597 int64_t best_rd_so_far) {
3598 VP10_COMMON *const cm = &cpi->common;
3599 MACROBLOCKD *const xd = &x->e_mbd;
3600 MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
3601 unsigned char segment_id = mbmi->segment_id;
3602 const int comp_pred = 0;
3604 int64_t best_pred_diff[REFERENCE_MODES];
3605 int64_t best_filter_diff[SWITCHABLE_FILTER_CONTEXTS];
3606 unsigned int ref_costs_single[MAX_REF_FRAMES], ref_costs_comp[MAX_REF_FRAMES];
3607 vpx_prob comp_mode_p;
3608 INTERP_FILTER best_filter = SWITCHABLE;
3609 int64_t this_rd = INT64_MAX;
3611 const int64_t distortion2 = 0;
3613 x->skip_encode = cpi->sf.skip_encode_frame && x->q_index < QIDX_SKIP_THRESH;
3615 estimate_ref_frame_costs(cm, xd, segment_id, ref_costs_single, ref_costs_comp,
3618 for (i = 0; i < MAX_REF_FRAMES; ++i)
3619 x->pred_sse[i] = INT_MAX;
3620 for (i = LAST_FRAME; i < MAX_REF_FRAMES; ++i)
3621 x->pred_mv_sad[i] = INT_MAX;
3623 rd_cost->rate = INT_MAX;
3625 assert(segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP));
3627 mbmi->mode = ZEROMV;
3628 mbmi->uv_mode = DC_PRED;
3629 mbmi->ref_frame[0] = LAST_FRAME;
3630 mbmi->ref_frame[1] = NONE;
3631 mbmi->mv[0].as_int = 0;
3634 if (cm->interp_filter != BILINEAR) {
3635 best_filter = EIGHTTAP;
3636 if (cm->interp_filter == SWITCHABLE &&
3637 x->source_variance >= cpi->sf.disable_filter_search_var_thresh) {
3639 int best_rs = INT_MAX;
3640 for (i = 0; i < SWITCHABLE_FILTERS; ++i) {
3641 mbmi->interp_filter = i;
3642 rs = vp10_get_switchable_rate(cpi, xd);
3645 best_filter = mbmi->interp_filter;
3650 // Set the appropriate filter
3651 if (cm->interp_filter == SWITCHABLE) {
3652 mbmi->interp_filter = best_filter;
3653 rate2 += vp10_get_switchable_rate(cpi, xd);
3655 mbmi->interp_filter = cm->interp_filter;
3658 if (cm->reference_mode == REFERENCE_MODE_SELECT)
3659 rate2 += vp10_cost_bit(comp_mode_p, comp_pred);
3661 // Estimate the reference frame signaling cost and add it
3662 // to the rolling cost variable.
3663 rate2 += ref_costs_single[LAST_FRAME];
3664 this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
3666 rd_cost->rate = rate2;
3667 rd_cost->dist = distortion2;
3668 rd_cost->rdcost = this_rd;
3670 if (this_rd >= best_rd_so_far) {
3671 rd_cost->rate = INT_MAX;
3672 rd_cost->rdcost = INT64_MAX;
3676 assert((cm->interp_filter == SWITCHABLE) ||
3677 (cm->interp_filter == mbmi->interp_filter));
3679 vp10_update_rd_thresh_fact(tile_data->thresh_freq_fact,
3680 cpi->sf.adaptive_rd_thresh, bsize, THR_ZEROMV);
3682 vp10_zero(best_pred_diff);
3683 vp10_zero(best_filter_diff);
3685 if (!x->select_tx_size)
3686 swap_block_ptr(x, ctx, 1, 0, 0, MAX_MB_PLANE);
3687 store_coding_context(x, ctx, THR_ZEROMV,
3688 best_pred_diff, best_filter_diff, 0);
3691 void vp10_rd_pick_inter_mode_sub8x8(VP10_COMP *cpi,
3692 TileDataEnc *tile_data,
3694 int mi_row, int mi_col,
3697 PICK_MODE_CONTEXT *ctx,
3698 int64_t best_rd_so_far) {
3699 VP10_COMMON *const cm = &cpi->common;
3700 RD_OPT *const rd_opt = &cpi->rd;
3701 SPEED_FEATURES *const sf = &cpi->sf;
3702 MACROBLOCKD *const xd = &x->e_mbd;
3703 MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
3704 const struct segmentation *const seg = &cm->seg;
3705 MV_REFERENCE_FRAME ref_frame, second_ref_frame;
3706 unsigned char segment_id = mbmi->segment_id;
3708 int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES];
3709 struct buf_2d yv12_mb[4][MAX_MB_PLANE];
3710 static const int flag_list[4] = { 0, VP9_LAST_FLAG, VP9_GOLD_FLAG,
3712 int64_t best_rd = best_rd_so_far;
3713 int64_t best_yrd = best_rd_so_far; // FIXME(rbultje) more precise
3714 int64_t best_pred_diff[REFERENCE_MODES];
3715 int64_t best_pred_rd[REFERENCE_MODES];
3716 int64_t best_filter_rd[SWITCHABLE_FILTER_CONTEXTS];
3717 int64_t best_filter_diff[SWITCHABLE_FILTER_CONTEXTS];
3718 MB_MODE_INFO best_mbmode;
3719 int ref_index, best_ref_index = 0;
3720 unsigned int ref_costs_single[MAX_REF_FRAMES], ref_costs_comp[MAX_REF_FRAMES];
3721 vpx_prob comp_mode_p;
3722 INTERP_FILTER tmp_best_filter = SWITCHABLE;
3723 int rate_uv_intra, rate_uv_tokenonly;
3726 PREDICTION_MODE mode_uv = DC_PRED;
3727 const int intra_cost_penalty = vp10_get_intra_cost_penalty(
3728 cm->base_qindex, cm->y_dc_delta_q, cm->bit_depth);
3729 int_mv seg_mvs[4][MAX_REF_FRAMES];
3730 b_mode_info best_bmodes[4];
3732 int ref_frame_skip_mask[2] = { 0 };
3733 int64_t mask_filter = 0;
3734 int64_t filter_cache[SWITCHABLE_FILTER_CONTEXTS];
3735 int internal_active_edge =
3736 vp10_active_edge_sb(cpi, mi_row, mi_col) && vp10_internal_image_edge(cpi);
3738 x->skip_encode = sf->skip_encode_frame && x->q_index < QIDX_SKIP_THRESH;
3739 memset(x->zcoeff_blk[TX_4X4], 0, 4);
3740 vp10_zero(best_mbmode);
3742 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
3743 filter_cache[i] = INT64_MAX;
3745 for (i = 0; i < 4; i++) {
3747 for (j = 0; j < MAX_REF_FRAMES; j++)
3748 seg_mvs[i][j].as_int = INVALID_MV;
3751 estimate_ref_frame_costs(cm, xd, segment_id, ref_costs_single, ref_costs_comp,
3754 for (i = 0; i < REFERENCE_MODES; ++i)
3755 best_pred_rd[i] = INT64_MAX;
3756 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++)
3757 best_filter_rd[i] = INT64_MAX;
3758 rate_uv_intra = INT_MAX;
3760 rd_cost->rate = INT_MAX;
3762 for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ref_frame++) {
3763 if (cpi->ref_frame_flags & flag_list[ref_frame]) {
3764 setup_buffer_inter(cpi, x, ref_frame, bsize, mi_row, mi_col,
3765 frame_mv[NEARESTMV], frame_mv[NEARMV],
3768 ref_frame_skip_mask[0] |= (1 << ref_frame);
3769 ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
3771 frame_mv[NEWMV][ref_frame].as_int = INVALID_MV;
3772 frame_mv[ZEROMV][ref_frame].as_int = 0;
3775 for (ref_index = 0; ref_index < MAX_REFS; ++ref_index) {
3776 int mode_excluded = 0;
3777 int64_t this_rd = INT64_MAX;
3778 int disable_skip = 0;
3779 int compmode_cost = 0;
3780 int rate2 = 0, rate_y = 0, rate_uv = 0;
3781 int64_t distortion2 = 0, distortion_y = 0, distortion_uv = 0;
3785 int64_t total_sse = INT_MAX;
3788 ref_frame = vp10_ref_order[ref_index].ref_frame[0];
3789 second_ref_frame = vp10_ref_order[ref_index].ref_frame[1];
3791 // Look at the reference frame of the best mode so far and set the
3792 // skip mask to look at a subset of the remaining modes.
3793 if (ref_index > 2 && sf->mode_skip_start < MAX_MODES) {
3794 if (ref_index == 3) {
3795 switch (best_mbmode.ref_frame[0]) {
3799 ref_frame_skip_mask[0] |= (1 << GOLDEN_FRAME) | (1 << ALTREF_FRAME);
3800 ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
3803 ref_frame_skip_mask[0] |= (1 << LAST_FRAME) | (1 << ALTREF_FRAME);
3804 ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
3807 ref_frame_skip_mask[0] |= (1 << GOLDEN_FRAME) | (1 << LAST_FRAME);
3810 case MAX_REF_FRAMES:
3811 assert(0 && "Invalid Reference frame");
3817 if ((ref_frame_skip_mask[0] & (1 << ref_frame)) &&
3818 (ref_frame_skip_mask[1] & (1 << MAX(0, second_ref_frame))))
3821 // Test best rd so far against threshold for trying this mode.
3822 if (!internal_active_edge &&
3823 rd_less_than_thresh(best_rd,
3824 rd_opt->threshes[segment_id][bsize][ref_index],
3825 tile_data->thresh_freq_fact[bsize][ref_index]))
3828 comp_pred = second_ref_frame > INTRA_FRAME;
3830 if (!cpi->allow_comp_inter_inter)
3832 if (!(cpi->ref_frame_flags & flag_list[second_ref_frame]))
3834 // Do not allow compound prediction if the segment level reference frame
3835 // feature is in use as in this case there can only be one reference.
3836 if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME))
3839 if ((sf->mode_search_skip_flags & FLAG_SKIP_COMP_BESTINTRA) &&
3840 best_mbmode.ref_frame[0] == INTRA_FRAME)
3844 // TODO(jingning, jkoleszar): scaling reference frame not supported for
3846 if (ref_frame > INTRA_FRAME &&
3847 vp10_is_scaled(&cm->frame_refs[ref_frame - 1].sf))
3850 if (second_ref_frame > INTRA_FRAME &&
3851 vp10_is_scaled(&cm->frame_refs[second_ref_frame - 1].sf))
3855 mode_excluded = cm->reference_mode == SINGLE_REFERENCE;
3856 else if (ref_frame != INTRA_FRAME)
3857 mode_excluded = cm->reference_mode == COMPOUND_REFERENCE;
3859 // If the segment reference frame feature is enabled....
3860 // then do nothing if the current ref frame is not allowed..
3861 if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME) &&
3862 get_segdata(seg, segment_id, SEG_LVL_REF_FRAME) != (int)ref_frame) {
3864 // Disable this drop out case if the ref frame
3865 // segment level feature is enabled for this segment. This is to
3866 // prevent the possibility that we end up unable to pick any mode.
3867 } else if (!segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) {
3868 // Only consider ZEROMV/ALTREF_FRAME for alt ref frame,
3869 // unless ARNR filtering is enabled in which case we want
3870 // an unfiltered alternative. We allow near/nearest as well
3871 // because they may result in zero-zero MVs but be cheaper.
3872 if (cpi->rc.is_src_frame_alt_ref && (cpi->oxcf.arnr_max_frames == 0))
3876 mbmi->tx_size = TX_4X4;
3877 mbmi->uv_mode = DC_PRED;
3878 mbmi->ref_frame[0] = ref_frame;
3879 mbmi->ref_frame[1] = second_ref_frame;
3880 // Evaluate all sub-pel filters irrespective of whether we can use
3881 // them for this frame.
3882 mbmi->interp_filter = cm->interp_filter == SWITCHABLE ? EIGHTTAP
3883 : cm->interp_filter;
3885 set_ref_ptrs(cm, xd, ref_frame, second_ref_frame);
3887 // Select prediction reference frames.
3888 for (i = 0; i < MAX_MB_PLANE; i++) {
3889 xd->plane[i].pre[0] = yv12_mb[ref_frame][i];
3891 xd->plane[i].pre[1] = yv12_mb[second_ref_frame][i];
3894 if (ref_frame == INTRA_FRAME) {
3896 if (rd_pick_intra_sub_8x8_y_mode(cpi, x, &rate, &rate_y,
3897 &distortion_y, best_rd) >= best_rd)
3900 rate2 += intra_cost_penalty;
3901 distortion2 += distortion_y;
3903 if (rate_uv_intra == INT_MAX) {
3904 choose_intra_uv_mode(cpi, x, ctx, bsize, TX_4X4,
3910 rate2 += rate_uv_intra;
3911 rate_uv = rate_uv_tokenonly;
3912 distortion2 += dist_uv;
3913 distortion_uv = dist_uv;
3914 mbmi->uv_mode = mode_uv;
3918 int64_t this_rd_thresh;
3919 int64_t tmp_rd, tmp_best_rd = INT64_MAX, tmp_best_rdu = INT64_MAX;
3920 int tmp_best_rate = INT_MAX, tmp_best_ratey = INT_MAX;
3921 int64_t tmp_best_distortion = INT_MAX, tmp_best_sse, uv_sse;
3922 int tmp_best_skippable = 0;
3923 int switchable_filter_index;
3924 int_mv *second_ref = comp_pred ?
3925 &x->mbmi_ext->ref_mvs[second_ref_frame][0] : NULL;
3926 b_mode_info tmp_best_bmodes[16];
3927 MB_MODE_INFO tmp_best_mbmode;
3928 BEST_SEG_INFO bsi[SWITCHABLE_FILTERS];
3929 int pred_exists = 0;
3932 this_rd_thresh = (ref_frame == LAST_FRAME) ?
3933 rd_opt->threshes[segment_id][bsize][THR_LAST] :
3934 rd_opt->threshes[segment_id][bsize][THR_ALTR];
3935 this_rd_thresh = (ref_frame == GOLDEN_FRAME) ?
3936 rd_opt->threshes[segment_id][bsize][THR_GOLD] : this_rd_thresh;
3937 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
3938 filter_cache[i] = INT64_MAX;
3940 if (cm->interp_filter != BILINEAR) {
3941 tmp_best_filter = EIGHTTAP;
3942 if (x->source_variance < sf->disable_filter_search_var_thresh) {
3943 tmp_best_filter = EIGHTTAP;
3944 } else if (sf->adaptive_pred_interp_filter == 1 &&
3945 ctx->pred_interp_filter < SWITCHABLE) {
3946 tmp_best_filter = ctx->pred_interp_filter;
3947 } else if (sf->adaptive_pred_interp_filter == 2) {
3948 tmp_best_filter = ctx->pred_interp_filter < SWITCHABLE ?
3949 ctx->pred_interp_filter : 0;
3951 for (switchable_filter_index = 0;
3952 switchable_filter_index < SWITCHABLE_FILTERS;
3953 ++switchable_filter_index) {
3956 MB_MODE_INFO_EXT *mbmi_ext = x->mbmi_ext;
3957 mbmi->interp_filter = switchable_filter_index;
3958 tmp_rd = rd_pick_best_sub8x8_mode(cpi, x,
3959 &mbmi_ext->ref_mvs[ref_frame][0],
3960 second_ref, best_yrd, &rate,
3961 &rate_y, &distortion,
3962 &skippable, &total_sse,
3963 (int) this_rd_thresh, seg_mvs,
3964 bsi, switchable_filter_index,
3967 if (tmp_rd == INT64_MAX)
3969 rs = vp10_get_switchable_rate(cpi, xd);
3970 rs_rd = RDCOST(x->rdmult, x->rddiv, rs, 0);
3971 filter_cache[switchable_filter_index] = tmp_rd;
3972 filter_cache[SWITCHABLE_FILTERS] =
3973 MIN(filter_cache[SWITCHABLE_FILTERS],
3975 if (cm->interp_filter == SWITCHABLE)
3978 mask_filter = MAX(mask_filter, tmp_rd);
3980 newbest = (tmp_rd < tmp_best_rd);
3982 tmp_best_filter = mbmi->interp_filter;
3983 tmp_best_rd = tmp_rd;
3985 if ((newbest && cm->interp_filter == SWITCHABLE) ||
3986 (mbmi->interp_filter == cm->interp_filter &&
3987 cm->interp_filter != SWITCHABLE)) {
3988 tmp_best_rdu = tmp_rd;
3989 tmp_best_rate = rate;
3990 tmp_best_ratey = rate_y;
3991 tmp_best_distortion = distortion;
3992 tmp_best_sse = total_sse;
3993 tmp_best_skippable = skippable;
3994 tmp_best_mbmode = *mbmi;
3995 for (i = 0; i < 4; i++) {
3996 tmp_best_bmodes[i] = xd->mi[0]->bmi[i];
3997 x->zcoeff_blk[TX_4X4][i] = !x->plane[0].eobs[i];
4000 if (switchable_filter_index == 0 &&
4001 sf->use_rd_breakout &&
4002 best_rd < INT64_MAX) {
4003 if (tmp_best_rdu / 2 > best_rd) {
4004 // skip searching the other filters if the first is
4005 // already substantially larger than the best so far
4006 tmp_best_filter = mbmi->interp_filter;
4007 tmp_best_rdu = INT64_MAX;
4012 } // switchable_filter_index loop
4016 if (tmp_best_rdu == INT64_MAX && pred_exists)
4019 mbmi->interp_filter = (cm->interp_filter == SWITCHABLE ?
4020 tmp_best_filter : cm->interp_filter);
4022 // Handles the special case when a filter that is not in the
4023 // switchable list (bilinear, 6-tap) is indicated at the frame level
4024 tmp_rd = rd_pick_best_sub8x8_mode(cpi, x,
4025 &x->mbmi_ext->ref_mvs[ref_frame][0],
4026 second_ref, best_yrd, &rate, &rate_y,
4027 &distortion, &skippable, &total_sse,
4028 (int) this_rd_thresh, seg_mvs, bsi, 0,
4030 if (tmp_rd == INT64_MAX)
4033 total_sse = tmp_best_sse;
4034 rate = tmp_best_rate;
4035 rate_y = tmp_best_ratey;
4036 distortion = tmp_best_distortion;
4037 skippable = tmp_best_skippable;
4038 *mbmi = tmp_best_mbmode;
4039 for (i = 0; i < 4; i++)
4040 xd->mi[0]->bmi[i] = tmp_best_bmodes[i];
4044 distortion2 += distortion;
4046 if (cm->interp_filter == SWITCHABLE)
4047 rate2 += vp10_get_switchable_rate(cpi, xd);
4050 mode_excluded = comp_pred ? cm->reference_mode == SINGLE_REFERENCE
4051 : cm->reference_mode == COMPOUND_REFERENCE;
4053 compmode_cost = vp10_cost_bit(comp_mode_p, comp_pred);
4055 tmp_best_rdu = best_rd -
4056 MIN(RDCOST(x->rdmult, x->rddiv, rate2, distortion2),
4057 RDCOST(x->rdmult, x->rddiv, 0, total_sse));
4059 if (tmp_best_rdu > 0) {
4060 // If even the 'Y' rd value of split is higher than best so far
4061 // then dont bother looking at UV
4062 vp10_build_inter_predictors_sbuv(&x->e_mbd, mi_row, mi_col,
4064 memset(x->skip_txfm, SKIP_TXFM_NONE, sizeof(x->skip_txfm));
4065 if (!super_block_uvrd(cpi, x, &rate_uv, &distortion_uv, &uv_skippable,
4066 &uv_sse, BLOCK_8X8, tmp_best_rdu))
4070 distortion2 += distortion_uv;
4071 skippable = skippable && uv_skippable;
4072 total_sse += uv_sse;
4076 if (cm->reference_mode == REFERENCE_MODE_SELECT)
4077 rate2 += compmode_cost;
4079 // Estimate the reference frame signaling cost and add it
4080 // to the rolling cost variable.
4081 if (second_ref_frame > INTRA_FRAME) {
4082 rate2 += ref_costs_comp[ref_frame];
4084 rate2 += ref_costs_single[ref_frame];
4087 if (!disable_skip) {
4088 // Skip is never coded at the segment level for sub8x8 blocks and instead
4089 // always coded in the bitstream at the mode info level.
4091 if (ref_frame != INTRA_FRAME && !xd->lossless) {
4092 if (RDCOST(x->rdmult, x->rddiv, rate_y + rate_uv, distortion2) <
4093 RDCOST(x->rdmult, x->rddiv, 0, total_sse)) {
4094 // Add in the cost of the no skip flag.
4095 rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0);
4097 // FIXME(rbultje) make this work for splitmv also
4098 rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1);
4099 distortion2 = total_sse;
4100 assert(total_sse >= 0);
4101 rate2 -= (rate_y + rate_uv);
4107 // Add in the cost of the no skip flag.
4108 rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0);
4111 // Calculate the final RD estimate for this mode.
4112 this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
4115 if (!disable_skip && ref_frame == INTRA_FRAME) {
4116 for (i = 0; i < REFERENCE_MODES; ++i)
4117 best_pred_rd[i] = MIN(best_pred_rd[i], this_rd);
4118 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++)
4119 best_filter_rd[i] = MIN(best_filter_rd[i], this_rd);
4122 // Did this mode help.. i.e. is it the new best mode
4123 if (this_rd < best_rd || x->skip) {
4124 if (!mode_excluded) {
4125 int max_plane = MAX_MB_PLANE;
4126 // Note index of best mode so far
4127 best_ref_index = ref_index;
4129 if (ref_frame == INTRA_FRAME) {
4130 /* required for left and above block mv */
4131 mbmi->mv[0].as_int = 0;
4135 rd_cost->rate = rate2;
4136 rd_cost->dist = distortion2;
4137 rd_cost->rdcost = this_rd;
4139 best_yrd = best_rd -
4140 RDCOST(x->rdmult, x->rddiv, rate_uv, distortion_uv);
4141 best_mbmode = *mbmi;
4142 best_skip2 = this_skip2;
4143 if (!x->select_tx_size)
4144 swap_block_ptr(x, ctx, 1, 0, 0, max_plane);
4145 memcpy(ctx->zcoeff_blk, x->zcoeff_blk[TX_4X4],
4146 sizeof(ctx->zcoeff_blk[0]) * ctx->num_4x4_blk);
4148 for (i = 0; i < 4; i++)
4149 best_bmodes[i] = xd->mi[0]->bmi[i];
4151 // TODO(debargha): enhance this test with a better distortion prediction
4152 // based on qp, activity mask and history
4153 if ((sf->mode_search_skip_flags & FLAG_EARLY_TERMINATE) &&
4154 (ref_index > MIN_EARLY_TERM_INDEX)) {
4155 int qstep = xd->plane[0].dequant[1];
4156 // TODO(debargha): Enhance this by specializing for each mode_index
4158 #if CONFIG_VP9_HIGHBITDEPTH
4159 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
4160 qstep >>= (xd->bd - 8);
4162 #endif // CONFIG_VP9_HIGHBITDEPTH
4163 if (x->source_variance < UINT_MAX) {
4164 const int var_adjust = (x->source_variance < 16);
4165 scale -= var_adjust;
4167 if (ref_frame > INTRA_FRAME &&
4168 distortion2 * scale < qstep * qstep) {
4175 /* keep record of best compound/single-only prediction */
4176 if (!disable_skip && ref_frame != INTRA_FRAME) {
4177 int64_t single_rd, hybrid_rd, single_rate, hybrid_rate;
4179 if (cm->reference_mode == REFERENCE_MODE_SELECT) {
4180 single_rate = rate2 - compmode_cost;
4181 hybrid_rate = rate2;
4183 single_rate = rate2;
4184 hybrid_rate = rate2 + compmode_cost;
4187 single_rd = RDCOST(x->rdmult, x->rddiv, single_rate, distortion2);
4188 hybrid_rd = RDCOST(x->rdmult, x->rddiv, hybrid_rate, distortion2);
4190 if (!comp_pred && single_rd < best_pred_rd[SINGLE_REFERENCE])
4191 best_pred_rd[SINGLE_REFERENCE] = single_rd;
4192 else if (comp_pred && single_rd < best_pred_rd[COMPOUND_REFERENCE])
4193 best_pred_rd[COMPOUND_REFERENCE] = single_rd;
4195 if (hybrid_rd < best_pred_rd[REFERENCE_MODE_SELECT])
4196 best_pred_rd[REFERENCE_MODE_SELECT] = hybrid_rd;
4199 /* keep record of best filter type */
4200 if (!mode_excluded && !disable_skip && ref_frame != INTRA_FRAME &&
4201 cm->interp_filter != BILINEAR) {
4202 int64_t ref = filter_cache[cm->interp_filter == SWITCHABLE ?
4203 SWITCHABLE_FILTERS : cm->interp_filter];
4205 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) {
4206 if (ref == INT64_MAX)
4208 else if (filter_cache[i] == INT64_MAX)
4209 // when early termination is triggered, the encoder does not have
4210 // access to the rate-distortion cost. it only knows that the cost
4211 // should be above the maximum valid value. hence it takes the known
4212 // maximum plus an arbitrary constant as the rate-distortion cost.
4213 adj_rd = mask_filter - ref + 10;
4215 adj_rd = filter_cache[i] - ref;
4218 best_filter_rd[i] = MIN(best_filter_rd[i], adj_rd);
4225 if (x->skip && !comp_pred)
4229 if (best_rd >= best_rd_so_far) {
4230 rd_cost->rate = INT_MAX;
4231 rd_cost->rdcost = INT64_MAX;
4235 // If we used an estimate for the uv intra rd in the loop above...
4236 if (sf->use_uv_intra_rd_estimate) {
4237 // Do Intra UV best rd mode selection if best mode choice above was intra.
4238 if (best_mbmode.ref_frame[0] == INTRA_FRAME) {
4239 *mbmi = best_mbmode;
4240 rd_pick_intra_sbuv_mode(cpi, x, ctx, &rate_uv_intra,
4248 if (best_rd == INT64_MAX) {
4249 rd_cost->rate = INT_MAX;
4250 rd_cost->dist = INT64_MAX;
4251 rd_cost->rdcost = INT64_MAX;
4255 assert((cm->interp_filter == SWITCHABLE) ||
4256 (cm->interp_filter == best_mbmode.interp_filter) ||
4257 !is_inter_block(&best_mbmode));
4259 vp10_update_rd_thresh_fact(tile_data->thresh_freq_fact,
4260 sf->adaptive_rd_thresh, bsize, best_ref_index);
4263 *mbmi = best_mbmode;
4264 x->skip |= best_skip2;
4265 if (!is_inter_block(&best_mbmode)) {
4266 for (i = 0; i < 4; i++)
4267 xd->mi[0]->bmi[i].as_mode = best_bmodes[i].as_mode;
4269 for (i = 0; i < 4; ++i)
4270 memcpy(&xd->mi[0]->bmi[i], &best_bmodes[i], sizeof(b_mode_info));
4272 mbmi->mv[0].as_int = xd->mi[0]->bmi[3].as_mv[0].as_int;
4273 mbmi->mv[1].as_int = xd->mi[0]->bmi[3].as_mv[1].as_int;
4276 for (i = 0; i < REFERENCE_MODES; ++i) {
4277 if (best_pred_rd[i] == INT64_MAX)
4278 best_pred_diff[i] = INT_MIN;
4280 best_pred_diff[i] = best_rd - best_pred_rd[i];
4284 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) {
4285 if (best_filter_rd[i] == INT64_MAX)
4286 best_filter_diff[i] = 0;
4288 best_filter_diff[i] = best_rd - best_filter_rd[i];
4290 if (cm->interp_filter == SWITCHABLE)
4291 assert(best_filter_diff[SWITCHABLE_FILTERS] == 0);
4293 vp10_zero(best_filter_diff);
4296 store_coding_context(x, ctx, best_ref_index,
4297 best_pred_diff, best_filter_diff, 0);