2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
14 #include "./vp10_rtcd.h"
15 #include "./vpx_dsp_rtcd.h"
17 #include "vpx_dsp/vpx_dsp_common.h"
18 #include "vpx_mem/vpx_mem.h"
19 #include "vpx_ports/mem.h"
20 #include "vpx_ports/system_state.h"
22 #include "vp10/common/common.h"
23 #include "vp10/common/entropy.h"
24 #include "vp10/common/entropymode.h"
25 #include "vp10/common/idct.h"
26 #include "vp10/common/mvref_common.h"
27 #include "vp10/common/pred_common.h"
28 #include "vp10/common/quant_common.h"
29 #include "vp10/common/reconinter.h"
30 #include "vp10/common/reconintra.h"
31 #include "vp10/common/scan.h"
32 #include "vp10/common/seg_common.h"
34 #include "vp10/encoder/cost.h"
35 #include "vp10/encoder/encodemb.h"
36 #include "vp10/encoder/encodemv.h"
37 #include "vp10/encoder/encoder.h"
38 #include "vp10/encoder/mcomp.h"
39 #include "vp10/encoder/palette.h"
40 #include "vp10/encoder/quantize.h"
41 #include "vp10/encoder/ratectrl.h"
42 #include "vp10/encoder/rd.h"
43 #include "vp10/encoder/rdopt.h"
44 #include "vp10/encoder/aq_variance.h"
46 #define LAST_FRAME_MODE_MASK ((1 << GOLDEN_FRAME) | (1 << ALTREF_FRAME) | \
48 #define GOLDEN_FRAME_MODE_MASK ((1 << LAST_FRAME) | (1 << ALTREF_FRAME) | \
50 #define ALT_REF_MODE_MASK ((1 << LAST_FRAME) | (1 << GOLDEN_FRAME) | \
53 #define SECOND_REF_FRAME_MASK ((1 << ALTREF_FRAME) | 0x01)
55 #define MIN_EARLY_TERM_INDEX 3
56 #define NEW_MV_DISCOUNT_FACTOR 8
60 MV_REFERENCE_FRAME ref_frame[2];
64 MV_REFERENCE_FRAME ref_frame[2];
67 struct rdcost_block_args {
69 ENTROPY_CONTEXT t_above[16];
70 ENTROPY_CONTEXT t_left[16];
77 int use_fast_coef_costing;
82 #define LAST_NEW_MV_INDEX 6
83 static const MODE_DEFINITION vp10_mode_order[MAX_MODES] = {
84 {NEARESTMV, {LAST_FRAME, NONE}},
85 {NEARESTMV, {ALTREF_FRAME, NONE}},
86 {NEARESTMV, {GOLDEN_FRAME, NONE}},
88 {DC_PRED, {INTRA_FRAME, NONE}},
90 {NEWMV, {LAST_FRAME, NONE}},
91 {NEWMV, {ALTREF_FRAME, NONE}},
92 {NEWMV, {GOLDEN_FRAME, NONE}},
94 {NEARMV, {LAST_FRAME, NONE}},
95 {NEARMV, {ALTREF_FRAME, NONE}},
96 {NEARMV, {GOLDEN_FRAME, NONE}},
98 {ZEROMV, {LAST_FRAME, NONE}},
99 {ZEROMV, {GOLDEN_FRAME, NONE}},
100 {ZEROMV, {ALTREF_FRAME, NONE}},
102 {NEARESTMV, {LAST_FRAME, ALTREF_FRAME}},
103 {NEARESTMV, {GOLDEN_FRAME, ALTREF_FRAME}},
105 {TM_PRED, {INTRA_FRAME, NONE}},
107 {NEARMV, {LAST_FRAME, ALTREF_FRAME}},
108 {NEWMV, {LAST_FRAME, ALTREF_FRAME}},
109 {NEARMV, {GOLDEN_FRAME, ALTREF_FRAME}},
110 {NEWMV, {GOLDEN_FRAME, ALTREF_FRAME}},
112 {ZEROMV, {LAST_FRAME, ALTREF_FRAME}},
113 {ZEROMV, {GOLDEN_FRAME, ALTREF_FRAME}},
115 {H_PRED, {INTRA_FRAME, NONE}},
116 {V_PRED, {INTRA_FRAME, NONE}},
117 {D135_PRED, {INTRA_FRAME, NONE}},
118 {D207_PRED, {INTRA_FRAME, NONE}},
119 {D153_PRED, {INTRA_FRAME, NONE}},
120 {D63_PRED, {INTRA_FRAME, NONE}},
121 {D117_PRED, {INTRA_FRAME, NONE}},
122 {D45_PRED, {INTRA_FRAME, NONE}},
125 static const REF_DEFINITION vp10_ref_order[MAX_REFS] = {
126 {{LAST_FRAME, NONE}},
127 {{GOLDEN_FRAME, NONE}},
128 {{ALTREF_FRAME, NONE}},
129 {{LAST_FRAME, ALTREF_FRAME}},
130 {{GOLDEN_FRAME, ALTREF_FRAME}},
131 {{INTRA_FRAME, NONE}},
134 static INLINE int write_uniform_cost(int n, int v) {
135 int l = get_unsigned_bits(n), m = (1 << l) - n;
139 return (l - 1) * vp10_cost_bit(128, 0);
141 return l * vp10_cost_bit(128, 0);
144 static void swap_block_ptr(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx,
145 int m, int n, int min_plane, int max_plane) {
148 for (i = min_plane; i < max_plane; ++i) {
149 struct macroblock_plane *const p = &x->plane[i];
150 struct macroblockd_plane *const pd = &x->e_mbd.plane[i];
152 p->coeff = ctx->coeff_pbuf[i][m];
153 p->qcoeff = ctx->qcoeff_pbuf[i][m];
154 pd->dqcoeff = ctx->dqcoeff_pbuf[i][m];
155 p->eobs = ctx->eobs_pbuf[i][m];
157 ctx->coeff_pbuf[i][m] = ctx->coeff_pbuf[i][n];
158 ctx->qcoeff_pbuf[i][m] = ctx->qcoeff_pbuf[i][n];
159 ctx->dqcoeff_pbuf[i][m] = ctx->dqcoeff_pbuf[i][n];
160 ctx->eobs_pbuf[i][m] = ctx->eobs_pbuf[i][n];
162 ctx->coeff_pbuf[i][n] = p->coeff;
163 ctx->qcoeff_pbuf[i][n] = p->qcoeff;
164 ctx->dqcoeff_pbuf[i][n] = pd->dqcoeff;
165 ctx->eobs_pbuf[i][n] = p->eobs;
169 static void model_rd_for_sb(VP10_COMP *cpi, BLOCK_SIZE bsize,
170 MACROBLOCK *x, MACROBLOCKD *xd,
171 int *out_rate_sum, int64_t *out_dist_sum,
172 int *skip_txfm_sb, int64_t *skip_sse_sb) {
173 // Note our transform coeffs are 8 times an orthogonal transform.
174 // Hence quantizer step is also 8 times. To get effective quantizer
175 // we need to divide by 8 before sending to modeling function.
177 int64_t rate_sum = 0;
178 int64_t dist_sum = 0;
179 const int ref = xd->mi[0]->mbmi.ref_frame[0];
181 unsigned int var = 0;
182 unsigned int sum_sse = 0;
183 int64_t total_sse = 0;
188 const int dequant_shift =
189 #if CONFIG_VP9_HIGHBITDEPTH
190 (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) ?
192 #endif // CONFIG_VP9_HIGHBITDEPTH
195 x->pred_sse[ref] = 0;
197 for (i = 0; i < MAX_MB_PLANE; ++i) {
198 struct macroblock_plane *const p = &x->plane[i];
199 struct macroblockd_plane *const pd = &xd->plane[i];
200 const BLOCK_SIZE bs = get_plane_block_size(bsize, pd);
201 const TX_SIZE max_tx_size = max_txsize_lookup[bs];
202 const BLOCK_SIZE unit_size = txsize_to_bsize[max_tx_size];
203 const int64_t dc_thr = p->quant_thred[0] >> shift;
204 const int64_t ac_thr = p->quant_thred[1] >> shift;
205 // The low thresholds are used to measure if the prediction errors are
206 // low enough so that we can skip the mode search.
207 const int64_t low_dc_thr = VPXMIN(50, dc_thr >> 2);
208 const int64_t low_ac_thr = VPXMIN(80, ac_thr >> 2);
209 int bw = 1 << (b_width_log2_lookup[bs] - b_width_log2_lookup[unit_size]);
210 int bh = 1 << (b_height_log2_lookup[bs] - b_width_log2_lookup[unit_size]);
212 int lw = b_width_log2_lookup[unit_size] + 2;
213 int lh = b_height_log2_lookup[unit_size] + 2;
217 for (idy = 0; idy < bh; ++idy) {
218 for (idx = 0; idx < bw; ++idx) {
219 uint8_t *src = p->src.buf + (idy * p->src.stride << lh) + (idx << lw);
220 uint8_t *dst = pd->dst.buf + (idy * pd->dst.stride << lh) + (idx << lh);
221 int block_idx = (idy << 1) + idx;
222 int low_err_skip = 0;
224 var = cpi->fn_ptr[unit_size].vf(src, p->src.stride,
225 dst, pd->dst.stride, &sse);
226 x->bsse[(i << 2) + block_idx] = sse;
229 x->skip_txfm[(i << 2) + block_idx] = SKIP_TXFM_NONE;
230 if (!x->select_tx_size) {
231 // Check if all ac coefficients can be quantized to zero.
232 if (var < ac_thr || var == 0) {
233 x->skip_txfm[(i << 2) + block_idx] = SKIP_TXFM_AC_ONLY;
235 // Check if dc coefficient can be quantized to zero.
236 if (sse - var < dc_thr || sse == var) {
237 x->skip_txfm[(i << 2) + block_idx] = SKIP_TXFM_AC_DC;
239 if (!sse || (var < low_ac_thr && sse - var < low_dc_thr))
245 if (skip_flag && !low_err_skip)
249 x->pred_sse[ref] += sse;
253 total_sse += sum_sse;
255 // Fast approximate the modelling function.
256 if (cpi->sf.simple_model_rd_from_var) {
258 const int64_t square_error = sum_sse;
259 int quantizer = (pd->dequant[1] >> dequant_shift);
262 rate = (square_error * (280 - quantizer)) >> 8;
265 dist = (square_error * quantizer) >> 8;
269 vp10_model_rd_from_var_lapndz(sum_sse, num_pels_log2_lookup[bs],
270 pd->dequant[1] >> dequant_shift,
277 *skip_txfm_sb = skip_flag;
278 *skip_sse_sb = total_sse << 4;
279 *out_rate_sum = (int)rate_sum;
280 *out_dist_sum = dist_sum << 4;
283 int64_t vp10_block_error_c(const tran_low_t *coeff, const tran_low_t *dqcoeff,
284 intptr_t block_size, int64_t *ssz) {
286 int64_t error = 0, sqcoeff = 0;
288 for (i = 0; i < block_size; i++) {
289 const int diff = coeff[i] - dqcoeff[i];
290 error += diff * diff;
291 sqcoeff += coeff[i] * coeff[i];
298 int64_t vp10_block_error_fp_c(const int16_t *coeff, const int16_t *dqcoeff,
303 for (i = 0; i < block_size; i++) {
304 const int diff = coeff[i] - dqcoeff[i];
305 error += diff * diff;
311 #if CONFIG_VP9_HIGHBITDEPTH
312 int64_t vp10_highbd_block_error_c(const tran_low_t *coeff,
313 const tran_low_t *dqcoeff,
315 int64_t *ssz, int bd) {
317 int64_t error = 0, sqcoeff = 0;
318 int shift = 2 * (bd - 8);
319 int rounding = shift > 0 ? 1 << (shift - 1) : 0;
321 for (i = 0; i < block_size; i++) {
322 const int64_t diff = coeff[i] - dqcoeff[i];
323 error += diff * diff;
324 sqcoeff += (int64_t)coeff[i] * (int64_t)coeff[i];
326 assert(error >= 0 && sqcoeff >= 0);
327 error = (error + rounding) >> shift;
328 sqcoeff = (sqcoeff + rounding) >> shift;
333 #endif // CONFIG_VP9_HIGHBITDEPTH
335 /* The trailing '0' is a terminator which is used inside cost_coeffs() to
336 * decide whether to include cost of a trailing EOB node or not (i.e. we
337 * can skip this if the last coefficient in this transform block, e.g. the
338 * 16th coefficient in a 4x4 block or the 64th coefficient in a 8x8 block,
340 static const int16_t band_counts[TX_SIZES][8] = {
341 { 1, 2, 3, 4, 3, 16 - 13, 0 },
342 { 1, 2, 3, 4, 11, 64 - 21, 0 },
343 { 1, 2, 3, 4, 11, 256 - 21, 0 },
344 { 1, 2, 3, 4, 11, 1024 - 21, 0 },
346 static int cost_coeffs(MACROBLOCK *x,
347 int plane, int block,
348 ENTROPY_CONTEXT *A, ENTROPY_CONTEXT *L,
350 const int16_t *scan, const int16_t *nb,
351 int use_fast_coef_costing) {
352 MACROBLOCKD *const xd = &x->e_mbd;
353 MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
354 const struct macroblock_plane *p = &x->plane[plane];
355 const struct macroblockd_plane *pd = &xd->plane[plane];
356 const PLANE_TYPE type = pd->plane_type;
357 const int16_t *band_count = &band_counts[tx_size][1];
358 const int eob = p->eobs[block];
359 const tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
360 unsigned int (*token_costs)[2][COEFF_CONTEXTS][ENTROPY_TOKENS] =
361 x->token_costs[tx_size][type][is_inter_block(mbmi)];
362 uint8_t token_cache[32 * 32];
363 int pt = combine_entropy_contexts(*A, *L);
365 #if CONFIG_VP9_HIGHBITDEPTH
366 const int16_t *cat6_high_cost = vp10_get_high_cost_table(xd->bd);
368 const int16_t *cat6_high_cost = vp10_get_high_cost_table(8);
371 // Check for consistency of tx_size with mode info
372 assert(type == PLANE_TYPE_Y ? mbmi->tx_size == tx_size
373 : get_uv_tx_size(mbmi, pd) == tx_size);
377 cost = token_costs[0][0][pt][EOB_TOKEN];
380 int band_left = *band_count++;
386 vp10_get_token_extra(v, &prev_t, &e);
387 cost = (*token_costs)[0][pt][prev_t] +
388 vp10_get_cost(prev_t, e, cat6_high_cost);
390 token_cache[0] = vp10_pt_energy_class[prev_t];
394 for (c = 1; c < eob; c++) {
395 const int rc = scan[c];
399 vp10_get_token_extra(v, &t, &e);
400 if (use_fast_coef_costing) {
401 cost += (*token_costs)[!prev_t][!prev_t][t] +
402 vp10_get_cost(t, e, cat6_high_cost);
404 pt = get_coef_context(nb, token_cache, c);
405 cost += (*token_costs)[!prev_t][pt][t] +
406 vp10_get_cost(t, e, cat6_high_cost);
407 token_cache[rc] = vp10_pt_energy_class[t];
411 band_left = *band_count++;
418 if (use_fast_coef_costing) {
419 cost += (*token_costs)[0][!prev_t][EOB_TOKEN];
421 pt = get_coef_context(nb, token_cache, c);
422 cost += (*token_costs)[0][pt][EOB_TOKEN];
427 // is eob first coefficient;
433 static void dist_block(MACROBLOCK *x, int plane, int block, TX_SIZE tx_size,
434 int64_t *out_dist, int64_t *out_sse) {
435 const int ss_txfrm_size = tx_size << 1;
436 MACROBLOCKD* const xd = &x->e_mbd;
437 const struct macroblock_plane *const p = &x->plane[plane];
438 const struct macroblockd_plane *const pd = &xd->plane[plane];
440 int shift = tx_size == TX_32X32 ? 0 : 2;
441 tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block);
442 tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
443 #if CONFIG_VP9_HIGHBITDEPTH
444 const int bd = (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) ? xd->bd : 8;
445 *out_dist = vp10_highbd_block_error(coeff, dqcoeff, 16 << ss_txfrm_size,
446 &this_sse, bd) >> shift;
448 *out_dist = vp10_block_error(coeff, dqcoeff, 16 << ss_txfrm_size,
450 #endif // CONFIG_VP9_HIGHBITDEPTH
451 *out_sse = this_sse >> shift;
454 static int rate_block(int plane, int block, BLOCK_SIZE plane_bsize,
455 TX_SIZE tx_size, struct rdcost_block_args* args) {
457 txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &x_idx, &y_idx);
459 return cost_coeffs(args->x, plane, block, args->t_above + x_idx,
460 args->t_left + y_idx, tx_size,
461 args->so->scan, args->so->neighbors,
462 args->use_fast_coef_costing);
465 static void block_rd_txfm(int plane, int block, BLOCK_SIZE plane_bsize,
466 TX_SIZE tx_size, void *arg) {
467 struct rdcost_block_args *args = arg;
468 MACROBLOCK *const x = args->x;
469 MACROBLOCKD *const xd = &x->e_mbd;
470 MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
471 int64_t rd1, rd2, rd;
476 if (args->exit_early)
479 if (!is_inter_block(mbmi)) {
480 struct encode_b_args arg = {x, NULL, &mbmi->skip};
481 vp10_encode_block_intra(plane, block, plane_bsize, tx_size, &arg);
482 dist_block(x, plane, block, tx_size, &dist, &sse);
483 } else if (max_txsize_lookup[plane_bsize] == tx_size) {
484 if (x->skip_txfm[(plane << 2) + (block >> (tx_size << 1))] ==
486 // full forward transform and quantization
487 vp10_xform_quant(x, plane, block, plane_bsize, tx_size);
488 dist_block(x, plane, block, tx_size, &dist, &sse);
489 } else if (x->skip_txfm[(plane << 2) + (block >> (tx_size << 1))] ==
491 // compute DC coefficient
492 tran_low_t *const coeff = BLOCK_OFFSET(x->plane[plane].coeff, block);
493 tran_low_t *const dqcoeff = BLOCK_OFFSET(xd->plane[plane].dqcoeff, block);
494 vp10_xform_quant_dc(x, plane, block, plane_bsize, tx_size);
495 sse = x->bsse[(plane << 2) + (block >> (tx_size << 1))] << 4;
497 if (x->plane[plane].eobs[block]) {
498 const int64_t orig_sse = (int64_t)coeff[0] * coeff[0];
499 const int64_t resd_sse = coeff[0] - dqcoeff[0];
500 int64_t dc_correct = orig_sse - resd_sse * resd_sse;
501 #if CONFIG_VP9_HIGHBITDEPTH
502 dc_correct >>= ((xd->bd - 8) * 2);
504 if (tx_size != TX_32X32)
507 dist = VPXMAX(0, sse - dc_correct);
511 // skip forward transform
512 x->plane[plane].eobs[block] = 0;
513 sse = x->bsse[(plane << 2) + (block >> (tx_size << 1))] << 4;
517 // full forward transform and quantization
518 vp10_xform_quant(x, plane, block, plane_bsize, tx_size);
519 dist_block(x, plane, block, tx_size, &dist, &sse);
522 rd = RDCOST(x->rdmult, x->rddiv, 0, dist);
523 if (args->this_rd + rd > args->best_rd) {
524 args->exit_early = 1;
528 rate = rate_block(plane, block, plane_bsize, tx_size, args);
529 rd1 = RDCOST(x->rdmult, x->rddiv, rate, dist);
530 rd2 = RDCOST(x->rdmult, x->rddiv, 0, sse);
532 // TODO(jingning): temporarily enabled only for luma component
533 rd = VPXMIN(rd1, rd2);
535 x->zcoeff_blk[tx_size][block] = !x->plane[plane].eobs[block] ||
536 (rd1 > rd2 && !xd->lossless);
538 args->this_rate += rate;
539 args->this_dist += dist;
540 args->this_sse += sse;
543 if (args->this_rd > args->best_rd) {
544 args->exit_early = 1;
548 args->skippable &= !x->plane[plane].eobs[block];
551 static void txfm_rd_in_plane(MACROBLOCK *x,
552 int *rate, int64_t *distortion,
553 int *skippable, int64_t *sse,
554 int64_t ref_best_rd, int plane,
555 BLOCK_SIZE bsize, TX_SIZE tx_size,
556 int use_fast_coef_casting) {
557 MACROBLOCKD *const xd = &x->e_mbd;
558 const struct macroblockd_plane *const pd = &xd->plane[plane];
560 struct rdcost_block_args args;
563 args.best_rd = ref_best_rd;
564 args.use_fast_coef_costing = use_fast_coef_casting;
568 xd->mi[0]->mbmi.tx_size = tx_size;
570 vp10_get_entropy_contexts(bsize, tx_size, pd, args.t_above, args.t_left);
572 tx_type = get_tx_type(pd->plane_type, xd, 0);
573 args.so = get_scan(tx_size, tx_type);
575 vp10_foreach_transformed_block_in_plane(xd, bsize, plane,
576 block_rd_txfm, &args);
577 if (args.exit_early) {
579 *distortion = INT64_MAX;
583 *distortion = args.this_dist;
584 *rate = args.this_rate;
585 *sse = args.this_sse;
586 *skippable = args.skippable;
590 static void choose_largest_tx_size(VP10_COMP *cpi, MACROBLOCK *x,
591 int *rate, int64_t *distortion,
592 int *skip, int64_t *sse,
595 const TX_SIZE max_tx_size = max_txsize_lookup[bs];
596 VP10_COMMON *const cm = &cpi->common;
597 const TX_SIZE largest_tx_size = tx_mode_to_biggest_tx_size[cm->tx_mode];
598 MACROBLOCKD *const xd = &x->e_mbd;
599 MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
601 mbmi->tx_size = VPXMIN(max_tx_size, largest_tx_size);
603 txfm_rd_in_plane(x, rate, distortion, skip,
604 sse, ref_best_rd, 0, bs,
605 mbmi->tx_size, cpi->sf.use_fast_coef_costing);
608 static void choose_tx_size_from_rd(VP10_COMP *cpi, MACROBLOCK *x,
615 const TX_SIZE max_tx_size = max_txsize_lookup[bs];
616 VP10_COMMON *const cm = &cpi->common;
617 MACROBLOCKD *const xd = &x->e_mbd;
618 MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
619 vpx_prob skip_prob = vp10_get_skip_prob(cm, xd);
620 int r[TX_SIZES][2], s[TX_SIZES];
621 int64_t d[TX_SIZES], sse[TX_SIZES];
622 int64_t rd[TX_SIZES][2] = {{INT64_MAX, INT64_MAX},
623 {INT64_MAX, INT64_MAX},
624 {INT64_MAX, INT64_MAX},
625 {INT64_MAX, INT64_MAX}};
628 int64_t best_rd = INT64_MAX;
629 TX_SIZE best_tx = max_tx_size;
630 int start_tx, end_tx;
632 const vpx_prob *tx_probs = get_tx_probs2(max_tx_size, xd, &cm->fc->tx_probs);
633 assert(skip_prob > 0);
634 s0 = vp10_cost_bit(skip_prob, 0);
635 s1 = vp10_cost_bit(skip_prob, 1);
637 if (cm->tx_mode == TX_MODE_SELECT) {
638 start_tx = max_tx_size;
641 TX_SIZE chosen_tx_size = VPXMIN(max_tx_size,
642 tx_mode_to_biggest_tx_size[cm->tx_mode]);
643 start_tx = chosen_tx_size;
644 end_tx = chosen_tx_size;
647 for (n = start_tx; n >= end_tx; n--) {
649 for (m = 0; m <= n - (n == (int) max_tx_size); m++) {
651 r_tx_size += vp10_cost_zero(tx_probs[m]);
653 r_tx_size += vp10_cost_one(tx_probs[m]);
655 txfm_rd_in_plane(x, &r[n][0], &d[n], &s[n],
656 &sse[n], ref_best_rd, 0, bs, n,
657 cpi->sf.use_fast_coef_costing);
659 if (r[n][0] < INT_MAX) {
660 r[n][1] += r_tx_size;
662 if (d[n] == INT64_MAX || r[n][0] == INT_MAX) {
663 rd[n][0] = rd[n][1] = INT64_MAX;
665 if (is_inter_block(mbmi)) {
666 rd[n][0] = rd[n][1] = RDCOST(x->rdmult, x->rddiv, s1, sse[n]);
667 r[n][1] -= r_tx_size;
669 rd[n][0] = RDCOST(x->rdmult, x->rddiv, s1, sse[n]);
670 rd[n][1] = RDCOST(x->rdmult, x->rddiv, s1 + r_tx_size, sse[n]);
673 rd[n][0] = RDCOST(x->rdmult, x->rddiv, r[n][0] + s0, d[n]);
674 rd[n][1] = RDCOST(x->rdmult, x->rddiv, r[n][1] + s0, d[n]);
677 if (is_inter_block(mbmi) && !xd->lossless && !s[n] && sse[n] != INT64_MAX) {
678 rd[n][0] = VPXMIN(rd[n][0], RDCOST(x->rdmult, x->rddiv, s1, sse[n]));
679 rd[n][1] = VPXMIN(rd[n][1], RDCOST(x->rdmult, x->rddiv, s1, sse[n]));
682 // Early termination in transform size search.
683 if (cpi->sf.tx_size_search_breakout &&
684 (rd[n][1] == INT64_MAX ||
685 (n < (int) max_tx_size && rd[n][1] > rd[n + 1][1]) ||
689 if (rd[n][1] < best_rd) {
694 mbmi->tx_size = best_tx;
696 *distortion = d[mbmi->tx_size];
697 *rate = r[mbmi->tx_size][cm->tx_mode == TX_MODE_SELECT];
698 *skip = s[mbmi->tx_size];
699 *psse = sse[mbmi->tx_size];
702 static void super_block_yrd(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
703 int64_t *distortion, int *skip,
704 int64_t *psse, BLOCK_SIZE bs,
705 int64_t ref_best_rd) {
706 MACROBLOCKD *xd = &x->e_mbd;
708 int64_t *ret_sse = psse ? psse : &sse;
710 assert(bs == xd->mi[0]->mbmi.sb_type);
712 if (cpi->sf.tx_size_search_method == USE_LARGESTALL || xd->lossless) {
713 choose_largest_tx_size(cpi, x, rate, distortion, skip, ret_sse, ref_best_rd,
716 choose_tx_size_from_rd(cpi, x, rate, distortion, skip, ret_sse,
721 static int conditional_skipintra(PREDICTION_MODE mode,
722 PREDICTION_MODE best_intra_mode) {
723 if (mode == D117_PRED &&
724 best_intra_mode != V_PRED &&
725 best_intra_mode != D135_PRED)
727 if (mode == D63_PRED &&
728 best_intra_mode != V_PRED &&
729 best_intra_mode != D45_PRED)
731 if (mode == D207_PRED &&
732 best_intra_mode != H_PRED &&
733 best_intra_mode != D45_PRED)
735 if (mode == D153_PRED &&
736 best_intra_mode != H_PRED &&
737 best_intra_mode != D135_PRED)
742 void rd_pick_palette_intra_sby(VP10_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
743 int palette_ctx, int dc_mode_cost,
744 PALETTE_MODE_INFO *palette_mode_info,
745 uint8_t *best_palette_color_map,
746 TX_SIZE *best_tx, PREDICTION_MODE *mode_selected,
748 MACROBLOCKD *const xd = &x->e_mbd;
749 MODE_INFO *const mic = xd->mi[0];
750 int rows = 4 * num_4x4_blocks_high_lookup[bsize];
751 int cols = 4 * num_4x4_blocks_wide_lookup[bsize];
752 int this_rate, this_rate_tokenonly, s;
753 int64_t this_distortion, this_rd;
755 int src_stride = x->plane[0].src.stride;
756 uint8_t *src = x->plane[0].src.buf;
758 #if CONFIG_VP9_HIGHBITDEPTH
759 if (cpi->common.use_highbitdepth)
760 colors = vp10_count_colors_highbd(src, src_stride, rows, cols,
761 cpi->common.bit_depth);
763 #endif // CONFIG_VP9_HIGHBITDEPTH
764 colors = vp10_count_colors(src, src_stride, rows, cols);
765 palette_mode_info->palette_size[0] = 0;
767 if (colors > 1 && colors <= 64 && cpi->common.allow_screen_content_tools) {
770 int color_ctx, color_idx = 0;
771 int color_order[PALETTE_MAX_SIZE];
772 double *data = x->palette_buffer->kmeans_data_buf;
773 uint8_t *indices = x->palette_buffer->kmeans_indices_buf;
774 uint8_t *pre_indices = x->palette_buffer->kmeans_pre_indices_buf;
775 double centroids[PALETTE_MAX_SIZE];
778 PALETTE_MODE_INFO *pmi = &mic->mbmi.palette_mode_info;
779 #if CONFIG_VP9_HIGHBITDEPTH
780 uint16_t *src16 = CONVERT_TO_SHORTPTR(src);
781 if (cpi->common.use_highbitdepth)
784 #endif // CONFIG_VP9_HIGHBITDEPTH
787 #if CONFIG_VP9_HIGHBITDEPTH
788 if (cpi->common.use_highbitdepth) {
789 for (r = 0; r < rows; ++r) {
790 for (c = 0; c < cols; ++c) {
791 val = src16[r * src_stride + c];
792 data[r * cols + c] = val;
800 #endif // CONFIG_VP9_HIGHBITDEPTH
801 for (r = 0; r < rows; ++r) {
802 for (c = 0; c < cols; ++c) {
803 val = src[r * src_stride + c];
804 data[r * cols + c] = val;
811 #if CONFIG_VP9_HIGHBITDEPTH
813 #endif // CONFIG_VP9_HIGHBITDEPTH
815 mic->mbmi.mode = DC_PRED;
817 for (n = colors > PALETTE_MAX_SIZE ? PALETTE_MAX_SIZE : colors;
819 for (i = 0; i < n; ++i)
820 centroids[i] = lb + (2 * i + 1) * (ub - lb) / n / 2;
821 vp10_k_means(data, centroids, indices, pre_indices, rows * cols,
823 vp10_insertion_sort(centroids, n);
829 if (centroids[i] == centroids[i - 1]) {
832 centroids[j] = centroids[j + 1];
841 #if CONFIG_VP9_HIGHBITDEPTH
842 if (cpi->common.use_highbitdepth)
843 for (i = 0; i < k; ++i)
844 mic->mbmi.palette_mode_info.palette_colors[i] =
845 clip_pixel_highbd(round(centroids[i]), cpi->common.bit_depth);
847 #endif // CONFIG_VP9_HIGHBITDEPTH
848 for (i = 0; i < k; ++i)
849 pmi->palette_colors[i] = clip_pixel((int)round(centroids[i]));
850 pmi->palette_size[0] = k;
852 vp10_calc_indices(data, centroids, indices, rows * cols, k, 1);
853 for (r = 0; r < rows; ++r)
854 for (c = 0; c < cols; ++c)
855 xd->plane[0].color_index_map[r * cols + c] = indices[r * cols + c];
857 super_block_yrd(cpi, x, &this_rate_tokenonly, &this_distortion,
858 &s, NULL, bsize, *best_rd);
859 if (this_rate_tokenonly == INT_MAX)
862 this_rate = this_rate_tokenonly + dc_mode_cost +
863 cpi->common.bit_depth * k * vp10_cost_bit(128, 0) +
864 cpi->palette_y_size_cost[bsize - BLOCK_8X8][k - 2];
866 vp10_cost_bit(vp10_default_palette_y_mode_prob[bsize - BLOCK_8X8]
868 color_map = xd->plane[0].color_index_map;
869 this_rate += write_uniform_cost(k, xd->plane[0].color_index_map[0]);
870 for (i = 0; i < rows; ++i) {
871 for (j = (i == 0 ? 1 : 0); j < cols; ++j) {
872 color_ctx = vp10_get_palette_color_context(color_map, cols, i, j,
874 for (r = 0; r < k; ++r)
875 if (color_map[i * cols + j] == color_order[r]) {
879 assert(color_idx < k);
881 cpi->palette_y_color_cost[k - 2][color_ctx][color_idx];
884 this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion);
886 if (this_rd < *best_rd) {
888 *palette_mode_info = mic->mbmi.palette_mode_info;
889 memcpy(best_palette_color_map, xd->plane[0].color_index_map,
890 rows * cols * sizeof(xd->plane[0].color_index_map[0]));
891 *mode_selected = DC_PRED;
892 *best_tx = mic->mbmi.tx_size;
898 static int64_t rd_pick_intra4x4block(VP10_COMP *cpi, MACROBLOCK *x,
900 PREDICTION_MODE *best_mode,
901 const int *bmode_costs,
902 ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l,
903 int *bestrate, int *bestratey,
904 int64_t *bestdistortion,
905 BLOCK_SIZE bsize, int64_t rd_thresh) {
906 PREDICTION_MODE mode;
907 MACROBLOCKD *const xd = &x->e_mbd;
908 int64_t best_rd = rd_thresh;
909 struct macroblock_plane *p = &x->plane[0];
910 struct macroblockd_plane *pd = &xd->plane[0];
911 const int src_stride = p->src.stride;
912 const int dst_stride = pd->dst.stride;
913 const uint8_t *src_init = &p->src.buf[row * 4 * src_stride + col * 4];
914 uint8_t *dst_init = &pd->dst.buf[row * 4 * src_stride + col * 4];
915 ENTROPY_CONTEXT ta[2], tempa[2];
916 ENTROPY_CONTEXT tl[2], templ[2];
917 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
918 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
920 uint8_t best_dst[8 * 8];
921 #if CONFIG_VP9_HIGHBITDEPTH
922 uint16_t best_dst16[8 * 8];
925 memcpy(ta, a, sizeof(ta));
926 memcpy(tl, l, sizeof(tl));
927 xd->mi[0]->mbmi.tx_size = TX_4X4;
928 xd->mi[0]->mbmi.palette_mode_info.palette_size[0] = 0;
930 #if CONFIG_VP9_HIGHBITDEPTH
931 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
932 for (mode = DC_PRED; mode <= TM_PRED; ++mode) {
935 int64_t distortion = 0;
936 int rate = bmode_costs[mode];
938 if (!(cpi->sf.intra_y_mode_mask[TX_4X4] & (1 << mode)))
941 // Only do the oblique modes if the best so far is
942 // one of the neighboring directional modes
943 if (cpi->sf.mode_search_skip_flags & FLAG_SKIP_INTRA_DIRMISMATCH) {
944 if (conditional_skipintra(mode, *best_mode))
948 memcpy(tempa, ta, sizeof(ta));
949 memcpy(templ, tl, sizeof(tl));
951 for (idy = 0; idy < num_4x4_blocks_high; ++idy) {
952 for (idx = 0; idx < num_4x4_blocks_wide; ++idx) {
953 const int block = (row + idy) * 2 + (col + idx);
954 const uint8_t *const src = &src_init[idx * 4 + idy * 4 * src_stride];
955 uint8_t *const dst = &dst_init[idx * 4 + idy * 4 * dst_stride];
956 int16_t *const src_diff = vp10_raster_block_offset_int16(BLOCK_8X8,
959 tran_low_t *const coeff = BLOCK_OFFSET(x->plane[0].coeff, block);
960 xd->mi[0]->bmi[block].as_mode = mode;
961 vp10_predict_intra_block(xd, 1, TX_4X4, mode, dst, dst_stride,
963 col + idx, row + idy, 0);
964 vpx_highbd_subtract_block(4, 4, src_diff, 8, src, src_stride,
965 dst, dst_stride, xd->bd);
967 TX_TYPE tx_type = get_tx_type(PLANE_TYPE_Y, xd, block);
968 const scan_order *so = get_scan(TX_4X4, tx_type);
969 vp10_highbd_fwd_txfm_4x4(src_diff, coeff, 8, DCT_DCT, 1);
970 vp10_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan);
971 ratey += cost_coeffs(x, 0, block, tempa + idx, templ + idy, TX_4X4,
972 so->scan, so->neighbors,
973 cpi->sf.use_fast_coef_costing);
974 if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd)
976 vp10_highbd_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block),
977 dst, dst_stride, p->eobs[block],
981 TX_TYPE tx_type = get_tx_type(PLANE_TYPE_Y, xd, block);
982 const scan_order *so = get_scan(TX_4X4, tx_type);
983 vp10_highbd_fwd_txfm_4x4(src_diff, coeff, 8, tx_type, 0);
984 vp10_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan);
985 ratey += cost_coeffs(x, 0, block, tempa + idx, templ + idy, TX_4X4,
986 so->scan, so->neighbors,
987 cpi->sf.use_fast_coef_costing);
988 distortion += vp10_highbd_block_error(
989 coeff, BLOCK_OFFSET(pd->dqcoeff, block),
990 16, &unused, xd->bd) >> 2;
991 if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd)
993 vp10_highbd_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block),
994 dst, dst_stride, p->eobs[block],
1001 this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
1003 if (this_rd < best_rd) {
1006 *bestdistortion = distortion;
1009 memcpy(a, tempa, sizeof(tempa));
1010 memcpy(l, templ, sizeof(templ));
1011 for (idy = 0; idy < num_4x4_blocks_high * 4; ++idy) {
1012 memcpy(best_dst16 + idy * 8,
1013 CONVERT_TO_SHORTPTR(dst_init + idy * dst_stride),
1014 num_4x4_blocks_wide * 4 * sizeof(uint16_t));
1020 if (best_rd >= rd_thresh)
1023 for (idy = 0; idy < num_4x4_blocks_high * 4; ++idy) {
1024 memcpy(CONVERT_TO_SHORTPTR(dst_init + idy * dst_stride),
1025 best_dst16 + idy * 8,
1026 num_4x4_blocks_wide * 4 * sizeof(uint16_t));
1031 #endif // CONFIG_VP9_HIGHBITDEPTH
1033 for (mode = DC_PRED; mode <= TM_PRED; ++mode) {
1036 int64_t distortion = 0;
1037 int rate = bmode_costs[mode];
1039 if (!(cpi->sf.intra_y_mode_mask[TX_4X4] & (1 << mode)))
1042 // Only do the oblique modes if the best so far is
1043 // one of the neighboring directional modes
1044 if (cpi->sf.mode_search_skip_flags & FLAG_SKIP_INTRA_DIRMISMATCH) {
1045 if (conditional_skipintra(mode, *best_mode))
1049 memcpy(tempa, ta, sizeof(ta));
1050 memcpy(templ, tl, sizeof(tl));
1052 for (idy = 0; idy < num_4x4_blocks_high; ++idy) {
1053 for (idx = 0; idx < num_4x4_blocks_wide; ++idx) {
1054 const int block = (row + idy) * 2 + (col + idx);
1055 const uint8_t *const src = &src_init[idx * 4 + idy * 4 * src_stride];
1056 uint8_t *const dst = &dst_init[idx * 4 + idy * 4 * dst_stride];
1057 int16_t *const src_diff =
1058 vp10_raster_block_offset_int16(BLOCK_8X8, block, p->src_diff);
1059 tran_low_t *const coeff = BLOCK_OFFSET(x->plane[0].coeff, block);
1060 xd->mi[0]->bmi[block].as_mode = mode;
1061 vp10_predict_intra_block(xd, 1, TX_4X4, mode, dst, dst_stride,
1062 dst, dst_stride, col + idx, row + idy, 0);
1063 vpx_subtract_block(4, 4, src_diff, 8, src, src_stride, dst, dst_stride);
1066 TX_TYPE tx_type = get_tx_type(PLANE_TYPE_Y, xd, block);
1067 const scan_order *so = get_scan(TX_4X4, tx_type);
1068 vp10_fwd_txfm_4x4(src_diff, coeff, 8, DCT_DCT, 1);
1069 vp10_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan);
1070 ratey += cost_coeffs(x, 0, block, tempa + idx, templ + idy, TX_4X4,
1071 so->scan, so->neighbors,
1072 cpi->sf.use_fast_coef_costing);
1073 if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd)
1075 vp10_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block),
1076 dst, dst_stride, p->eobs[block], DCT_DCT, 1);
1079 TX_TYPE tx_type = get_tx_type(PLANE_TYPE_Y, xd, block);
1080 const scan_order *so = get_scan(TX_4X4, tx_type);
1081 vp10_fwd_txfm_4x4(src_diff, coeff, 8, tx_type, 0);
1082 vp10_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan);
1083 ratey += cost_coeffs(x, 0, block, tempa + idx, templ + idy, TX_4X4,
1084 so->scan, so->neighbors,
1085 cpi->sf.use_fast_coef_costing);
1086 distortion += vp10_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, block),
1088 if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd)
1090 vp10_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block),
1091 dst, dst_stride, p->eobs[block], tx_type, 0);
1097 this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
1099 if (this_rd < best_rd) {
1102 *bestdistortion = distortion;
1105 memcpy(a, tempa, sizeof(tempa));
1106 memcpy(l, templ, sizeof(templ));
1107 for (idy = 0; idy < num_4x4_blocks_high * 4; ++idy)
1108 memcpy(best_dst + idy * 8, dst_init + idy * dst_stride,
1109 num_4x4_blocks_wide * 4);
1115 if (best_rd >= rd_thresh)
1118 for (idy = 0; idy < num_4x4_blocks_high * 4; ++idy)
1119 memcpy(dst_init + idy * dst_stride, best_dst + idy * 8,
1120 num_4x4_blocks_wide * 4);
1125 static int64_t rd_pick_intra_sub_8x8_y_mode(VP10_COMP *cpi, MACROBLOCK *mb,
1126 int *rate, int *rate_y,
1127 int64_t *distortion,
1130 const MACROBLOCKD *const xd = &mb->e_mbd;
1131 MODE_INFO *const mic = xd->mi[0];
1132 const MODE_INFO *above_mi = xd->above_mi;
1133 const MODE_INFO *left_mi = xd->left_mi;
1134 const BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
1135 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
1136 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
1139 int64_t total_distortion = 0;
1141 int64_t total_rd = 0;
1142 ENTROPY_CONTEXT t_above[4], t_left[4];
1143 const int *bmode_costs = cpi->mbmode_cost;
1145 memcpy(t_above, xd->plane[0].above_context, sizeof(t_above));
1146 memcpy(t_left, xd->plane[0].left_context, sizeof(t_left));
1148 // Pick modes for each sub-block (of size 4x4, 4x8, or 8x4) in an 8x8 block.
1149 for (idy = 0; idy < 2; idy += num_4x4_blocks_high) {
1150 for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) {
1151 PREDICTION_MODE best_mode = DC_PRED;
1152 int r = INT_MAX, ry = INT_MAX;
1153 int64_t d = INT64_MAX, this_rd = INT64_MAX;
1155 if (cpi->common.frame_type == KEY_FRAME) {
1156 const PREDICTION_MODE A = vp10_above_block_mode(mic, above_mi, i);
1157 const PREDICTION_MODE L = vp10_left_block_mode(mic, left_mi, i);
1159 bmode_costs = cpi->y_mode_costs[A][L];
1162 this_rd = rd_pick_intra4x4block(cpi, mb, idy, idx, &best_mode,
1163 bmode_costs, t_above + idx, t_left + idy,
1164 &r, &ry, &d, bsize, best_rd - total_rd);
1165 if (this_rd >= best_rd - total_rd)
1168 total_rd += this_rd;
1170 total_distortion += d;
1173 mic->bmi[i].as_mode = best_mode;
1174 for (j = 1; j < num_4x4_blocks_high; ++j)
1175 mic->bmi[i + j * 2].as_mode = best_mode;
1176 for (j = 1; j < num_4x4_blocks_wide; ++j)
1177 mic->bmi[i + j].as_mode = best_mode;
1179 if (total_rd >= best_rd)
1185 *rate_y = tot_rate_y;
1186 *distortion = total_distortion;
1187 mic->mbmi.mode = mic->bmi[3].as_mode;
1189 return RDCOST(mb->rdmult, mb->rddiv, cost, total_distortion);
1192 // This function is used only for intra_only frames
1193 static int64_t rd_pick_intra_sby_mode(VP10_COMP *cpi, MACROBLOCK *x,
1194 int *rate, int *rate_tokenonly,
1195 int64_t *distortion, int *skippable,
1198 PREDICTION_MODE mode;
1199 PREDICTION_MODE mode_selected = DC_PRED;
1200 MACROBLOCKD *const xd = &x->e_mbd;
1201 MODE_INFO *const mic = xd->mi[0];
1202 int this_rate, this_rate_tokenonly, s;
1203 int64_t this_distortion, this_rd;
1204 TX_SIZE best_tx = TX_4X4;
1206 PALETTE_MODE_INFO palette_mode_info;
1207 uint8_t *best_palette_color_map = cpi->common.allow_screen_content_tools ?
1208 x->palette_buffer->best_palette_color_map : NULL;
1209 int rows = 4 * num_4x4_blocks_high_lookup[bsize];
1210 int cols = 4 * num_4x4_blocks_wide_lookup[bsize];
1211 int palette_ctx = 0;
1212 const MODE_INFO *above_mi = xd->above_mi;
1213 const MODE_INFO *left_mi = xd->left_mi;
1214 const PREDICTION_MODE A = vp10_above_block_mode(mic, above_mi, 0);
1215 const PREDICTION_MODE L = vp10_left_block_mode(mic, left_mi, 0);
1216 bmode_costs = cpi->y_mode_costs[A][L];
1218 memset(x->skip_txfm, SKIP_TXFM_NONE, sizeof(x->skip_txfm));
1219 palette_mode_info.palette_size[0] = 0;
1220 mic->mbmi.palette_mode_info.palette_size[0] = 0;
1222 palette_ctx += (above_mi->mbmi.palette_mode_info.palette_size[0] > 0);
1224 palette_ctx += (left_mi->mbmi.palette_mode_info.palette_size[0] > 0);
1226 /* Y Search for intra prediction mode */
1227 for (mode = DC_PRED; mode <= TM_PRED; mode++) {
1228 mic->mbmi.mode = mode;
1230 super_block_yrd(cpi, x, &this_rate_tokenonly, &this_distortion,
1231 &s, NULL, bsize, best_rd);
1233 if (this_rate_tokenonly == INT_MAX)
1236 this_rate = this_rate_tokenonly + bmode_costs[mode];
1237 if (cpi->common.allow_screen_content_tools && mode == DC_PRED)
1239 vp10_cost_bit(vp10_default_palette_y_mode_prob[bsize - BLOCK_8X8]
1241 this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion);
1243 if (this_rd < best_rd) {
1244 mode_selected = mode;
1246 best_tx = mic->mbmi.tx_size;
1248 *rate_tokenonly = this_rate_tokenonly;
1249 *distortion = this_distortion;
1254 if (cpi->common.allow_screen_content_tools)
1255 rd_pick_palette_intra_sby(cpi, x, bsize, palette_ctx, bmode_costs[DC_PRED],
1256 &palette_mode_info, best_palette_color_map,
1257 &best_tx, &mode_selected, &best_rd);
1259 mic->mbmi.mode = mode_selected;
1260 mic->mbmi.tx_size = best_tx;
1261 mic->mbmi.palette_mode_info.palette_size[0] =
1262 palette_mode_info.palette_size[0];
1263 if (palette_mode_info.palette_size[0] > 0) {
1264 memcpy(mic->mbmi.palette_mode_info.palette_colors,
1265 palette_mode_info.palette_colors,
1266 PALETTE_MAX_SIZE * sizeof(palette_mode_info.palette_colors[0]));
1267 memcpy(xd->plane[0].color_index_map, best_palette_color_map,
1268 rows * cols * sizeof(best_palette_color_map[0]));
1274 // Return value 0: early termination triggered, no valid rd cost available;
1275 // 1: rd cost values are valid.
1276 static int super_block_uvrd(const VP10_COMP *cpi, MACROBLOCK *x,
1277 int *rate, int64_t *distortion, int *skippable,
1278 int64_t *sse, BLOCK_SIZE bsize,
1279 int64_t ref_best_rd) {
1280 MACROBLOCKD *const xd = &x->e_mbd;
1281 MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
1282 const TX_SIZE uv_tx_size = get_uv_tx_size(mbmi, &xd->plane[1]);
1284 int pnrate = 0, pnskip = 1;
1285 int64_t pndist = 0, pnsse = 0;
1286 int is_cost_valid = 1;
1288 if (ref_best_rd < 0)
1291 if (is_inter_block(mbmi) && is_cost_valid) {
1293 for (plane = 1; plane < MAX_MB_PLANE; ++plane)
1294 vp10_subtract_plane(x, bsize, plane);
1302 for (plane = 1; plane < MAX_MB_PLANE; ++plane) {
1303 txfm_rd_in_plane(x, &pnrate, &pndist, &pnskip, &pnsse,
1304 ref_best_rd, plane, bsize, uv_tx_size,
1305 cpi->sf.use_fast_coef_costing);
1306 if (pnrate == INT_MAX) {
1311 *distortion += pndist;
1313 *skippable &= pnskip;
1316 if (!is_cost_valid) {
1319 *distortion = INT64_MAX;
1324 return is_cost_valid;
1327 static int64_t rd_pick_intra_sbuv_mode(VP10_COMP *cpi, MACROBLOCK *x,
1328 PICK_MODE_CONTEXT *ctx,
1329 int *rate, int *rate_tokenonly,
1330 int64_t *distortion, int *skippable,
1331 BLOCK_SIZE bsize, TX_SIZE max_tx_size) {
1332 MACROBLOCKD *xd = &x->e_mbd;
1333 PREDICTION_MODE mode;
1334 PREDICTION_MODE mode_selected = DC_PRED;
1335 int64_t best_rd = INT64_MAX, this_rd;
1336 int this_rate_tokenonly, this_rate, s;
1337 int64_t this_distortion, this_sse;
1339 memset(x->skip_txfm, SKIP_TXFM_NONE, sizeof(x->skip_txfm));
1340 xd->mi[0]->mbmi.palette_mode_info.palette_size[1] = 0;
1341 for (mode = DC_PRED; mode <= TM_PRED; ++mode) {
1342 if (!(cpi->sf.intra_uv_mode_mask[max_tx_size] & (1 << mode)))
1345 xd->mi[0]->mbmi.uv_mode = mode;
1347 if (!super_block_uvrd(cpi, x, &this_rate_tokenonly,
1348 &this_distortion, &s, &this_sse, bsize, best_rd))
1350 this_rate = this_rate_tokenonly +
1351 cpi->intra_uv_mode_cost[cpi->common.frame_type][mode];
1352 this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion);
1354 if (this_rd < best_rd) {
1355 mode_selected = mode;
1358 *rate_tokenonly = this_rate_tokenonly;
1359 *distortion = this_distortion;
1361 if (!x->select_tx_size)
1362 swap_block_ptr(x, ctx, 2, 0, 1, MAX_MB_PLANE);
1366 xd->mi[0]->mbmi.uv_mode = mode_selected;
1370 static int64_t rd_sbuv_dcpred(const VP10_COMP *cpi, MACROBLOCK *x,
1371 int *rate, int *rate_tokenonly,
1372 int64_t *distortion, int *skippable,
1374 const VP10_COMMON *cm = &cpi->common;
1377 x->e_mbd.mi[0]->mbmi.uv_mode = DC_PRED;
1378 memset(x->skip_txfm, SKIP_TXFM_NONE, sizeof(x->skip_txfm));
1379 super_block_uvrd(cpi, x, rate_tokenonly, distortion,
1380 skippable, &unused, bsize, INT64_MAX);
1381 *rate = *rate_tokenonly + cpi->intra_uv_mode_cost[cm->frame_type][DC_PRED];
1382 return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
1385 static void choose_intra_uv_mode(VP10_COMP *cpi, MACROBLOCK *const x,
1386 PICK_MODE_CONTEXT *ctx,
1387 BLOCK_SIZE bsize, TX_SIZE max_tx_size,
1388 int *rate_uv, int *rate_uv_tokenonly,
1389 int64_t *dist_uv, int *skip_uv,
1390 PREDICTION_MODE *mode_uv) {
1391 // Use an estimated rd for uv_intra based on DC_PRED if the
1392 // appropriate speed flag is set.
1393 if (cpi->sf.use_uv_intra_rd_estimate) {
1394 rd_sbuv_dcpred(cpi, x, rate_uv, rate_uv_tokenonly, dist_uv,
1395 skip_uv, bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize);
1396 // Else do a proper rd search for each possible transform size that may
1397 // be considered in the main rd loop.
1399 rd_pick_intra_sbuv_mode(cpi, x, ctx,
1400 rate_uv, rate_uv_tokenonly, dist_uv, skip_uv,
1401 bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize, max_tx_size);
1403 *mode_uv = x->e_mbd.mi[0]->mbmi.uv_mode;
1406 static int cost_mv_ref(const VP10_COMP *cpi, PREDICTION_MODE mode,
1408 assert(is_inter_mode(mode));
1409 return cpi->inter_mode_cost[mode_context][INTER_OFFSET(mode)];
1412 static int set_and_cost_bmi_mvs(VP10_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *xd,
1414 PREDICTION_MODE mode, int_mv this_mv[2],
1415 int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES],
1416 int_mv seg_mvs[MAX_REF_FRAMES],
1417 int_mv *best_ref_mv[2], const int *mvjcost,
1419 MODE_INFO *const mic = xd->mi[0];
1420 const MB_MODE_INFO *const mbmi = &mic->mbmi;
1421 const MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
1424 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[mbmi->sb_type];
1425 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[mbmi->sb_type];
1426 const int is_compound = has_second_ref(mbmi);
1430 this_mv[0].as_int = seg_mvs[mbmi->ref_frame[0]].as_int;
1431 thismvcost += vp10_mv_bit_cost(&this_mv[0].as_mv, &best_ref_mv[0]->as_mv,
1432 mvjcost, mvcost, MV_COST_WEIGHT_SUB);
1434 this_mv[1].as_int = seg_mvs[mbmi->ref_frame[1]].as_int;
1435 thismvcost += vp10_mv_bit_cost(&this_mv[1].as_mv, &best_ref_mv[1]->as_mv,
1436 mvjcost, mvcost, MV_COST_WEIGHT_SUB);
1441 this_mv[0].as_int = frame_mv[mode][mbmi->ref_frame[0]].as_int;
1443 this_mv[1].as_int = frame_mv[mode][mbmi->ref_frame[1]].as_int;
1446 this_mv[0].as_int = 0;
1448 this_mv[1].as_int = 0;
1454 mic->bmi[i].as_mv[0].as_int = this_mv[0].as_int;
1456 mic->bmi[i].as_mv[1].as_int = this_mv[1].as_int;
1458 mic->bmi[i].as_mode = mode;
1460 for (idy = 0; idy < num_4x4_blocks_high; ++idy)
1461 for (idx = 0; idx < num_4x4_blocks_wide; ++idx)
1462 memmove(&mic->bmi[i + idy * 2 + idx], &mic->bmi[i], sizeof(mic->bmi[i]));
1464 return cost_mv_ref(cpi, mode, mbmi_ext->mode_context[mbmi->ref_frame[0]]) +
1468 static int64_t encode_inter_mb_segment(VP10_COMP *cpi,
1473 int64_t *distortion, int64_t *sse,
1474 ENTROPY_CONTEXT *ta,
1475 ENTROPY_CONTEXT *tl,
1477 int mi_row, int mi_col) {
1479 MACROBLOCKD *xd = &x->e_mbd;
1480 struct macroblockd_plane *const pd = &xd->plane[0];
1481 struct macroblock_plane *const p = &x->plane[0];
1482 MODE_INFO *const mi = xd->mi[0];
1483 const BLOCK_SIZE plane_bsize = get_plane_block_size(mi->mbmi.sb_type, pd);
1484 const int width = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
1485 const int height = 4 * num_4x4_blocks_high_lookup[plane_bsize];
1487 void (*fwd_txm4x4)(const int16_t *input, tran_low_t *output, int stride);
1489 const uint8_t *const src =
1490 &p->src.buf[vp10_raster_block_offset(BLOCK_8X8, i, p->src.stride)];
1491 uint8_t *const dst = &pd->dst.buf[vp10_raster_block_offset(BLOCK_8X8, i,
1493 int64_t thisdistortion = 0, thissse = 0;
1495 TX_TYPE tx_type = get_tx_type(PLANE_TYPE_Y, xd, i);
1496 const scan_order *so = get_scan(TX_4X4, tx_type);
1498 vp10_build_inter_predictor_sub8x8(xd, 0, i, ir, ic, mi_row, mi_col);
1500 #if CONFIG_VP9_HIGHBITDEPTH
1501 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
1502 fwd_txm4x4 = xd->lossless ? vp10_highbd_fwht4x4 : vpx_highbd_fdct4x4;
1504 fwd_txm4x4 = xd->lossless ? vp10_fwht4x4 : vpx_fdct4x4;
1507 fwd_txm4x4 = xd->lossless ? vp10_fwht4x4 : vpx_fdct4x4;
1508 #endif // CONFIG_VP9_HIGHBITDEPTH
1510 #if CONFIG_VP9_HIGHBITDEPTH
1511 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
1512 vpx_highbd_subtract_block(
1513 height, width, vp10_raster_block_offset_int16(BLOCK_8X8, i, p->src_diff),
1514 8, src, p->src.stride, dst, pd->dst.stride, xd->bd);
1517 height, width, vp10_raster_block_offset_int16(BLOCK_8X8, i, p->src_diff),
1518 8, src, p->src.stride, dst, pd->dst.stride);
1521 vpx_subtract_block(height, width,
1522 vp10_raster_block_offset_int16(BLOCK_8X8, i, p->src_diff),
1523 8, src, p->src.stride, dst, pd->dst.stride);
1524 #endif // CONFIG_VP9_HIGHBITDEPTH
1527 for (idy = 0; idy < height / 4; ++idy) {
1528 for (idx = 0; idx < width / 4; ++idx) {
1529 int64_t ssz, rd, rd1, rd2;
1532 k += (idy * 2 + idx);
1533 coeff = BLOCK_OFFSET(p->coeff, k);
1534 fwd_txm4x4(vp10_raster_block_offset_int16(BLOCK_8X8, k, p->src_diff),
1536 vp10_regular_quantize_b_4x4(x, 0, k, so->scan, so->iscan);
1537 #if CONFIG_VP9_HIGHBITDEPTH
1538 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
1539 thisdistortion += vp10_highbd_block_error(coeff,
1540 BLOCK_OFFSET(pd->dqcoeff, k),
1543 thisdistortion += vp10_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, k),
1547 thisdistortion += vp10_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, k),
1549 #endif // CONFIG_VP9_HIGHBITDEPTH
1551 thisrate += cost_coeffs(x, 0, k, ta + (k & 1), tl + (k >> 1), TX_4X4,
1552 so->scan, so->neighbors,
1553 cpi->sf.use_fast_coef_costing);
1554 rd1 = RDCOST(x->rdmult, x->rddiv, thisrate, thisdistortion >> 2);
1555 rd2 = RDCOST(x->rdmult, x->rddiv, 0, thissse >> 2);
1556 rd = VPXMIN(rd1, rd2);
1562 *distortion = thisdistortion >> 2;
1563 *labelyrate = thisrate;
1564 *sse = thissse >> 2;
1566 return RDCOST(x->rdmult, x->rddiv, *labelyrate, *distortion);
1577 ENTROPY_CONTEXT ta[2];
1578 ENTROPY_CONTEXT tl[2];
1590 PREDICTION_MODE modes[4];
1591 SEG_RDSTAT rdstat[4][INTER_MODES];
1595 static INLINE int mv_check_bounds(const MACROBLOCK *x, const MV *mv) {
1596 return (mv->row >> 3) < x->mv_row_min ||
1597 (mv->row >> 3) > x->mv_row_max ||
1598 (mv->col >> 3) < x->mv_col_min ||
1599 (mv->col >> 3) > x->mv_col_max;
1602 static INLINE void mi_buf_shift(MACROBLOCK *x, int i) {
1603 MB_MODE_INFO *const mbmi = &x->e_mbd.mi[0]->mbmi;
1604 struct macroblock_plane *const p = &x->plane[0];
1605 struct macroblockd_plane *const pd = &x->e_mbd.plane[0];
1607 p->src.buf = &p->src.buf[vp10_raster_block_offset(BLOCK_8X8, i,
1609 assert(((intptr_t)pd->pre[0].buf & 0x7) == 0);
1610 pd->pre[0].buf = &pd->pre[0].buf[vp10_raster_block_offset(BLOCK_8X8, i,
1611 pd->pre[0].stride)];
1612 if (has_second_ref(mbmi))
1613 pd->pre[1].buf = &pd->pre[1].buf[vp10_raster_block_offset(BLOCK_8X8, i,
1614 pd->pre[1].stride)];
1617 static INLINE void mi_buf_restore(MACROBLOCK *x, struct buf_2d orig_src,
1618 struct buf_2d orig_pre[2]) {
1619 MB_MODE_INFO *mbmi = &x->e_mbd.mi[0]->mbmi;
1620 x->plane[0].src = orig_src;
1621 x->e_mbd.plane[0].pre[0] = orig_pre[0];
1622 if (has_second_ref(mbmi))
1623 x->e_mbd.plane[0].pre[1] = orig_pre[1];
1626 static INLINE int mv_has_subpel(const MV *mv) {
1627 return (mv->row & 0x0F) || (mv->col & 0x0F);
1630 // Check if NEARESTMV/NEARMV/ZEROMV is the cheapest way encode zero motion.
1631 // TODO(aconverse): Find out if this is still productive then clean up or remove
1632 static int check_best_zero_mv(
1633 const VP10_COMP *cpi, const uint8_t mode_context[MAX_REF_FRAMES],
1634 int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES], int this_mode,
1635 const MV_REFERENCE_FRAME ref_frames[2]) {
1636 if ((this_mode == NEARMV || this_mode == NEARESTMV || this_mode == ZEROMV) &&
1637 frame_mv[this_mode][ref_frames[0]].as_int == 0 &&
1638 (ref_frames[1] == NONE ||
1639 frame_mv[this_mode][ref_frames[1]].as_int == 0)) {
1640 int rfc = mode_context[ref_frames[0]];
1641 int c1 = cost_mv_ref(cpi, NEARMV, rfc);
1642 int c2 = cost_mv_ref(cpi, NEARESTMV, rfc);
1643 int c3 = cost_mv_ref(cpi, ZEROMV, rfc);
1645 if (this_mode == NEARMV) {
1646 if (c1 > c3) return 0;
1647 } else if (this_mode == NEARESTMV) {
1648 if (c2 > c3) return 0;
1650 assert(this_mode == ZEROMV);
1651 if (ref_frames[1] == NONE) {
1652 if ((c3 >= c2 && frame_mv[NEARESTMV][ref_frames[0]].as_int == 0) ||
1653 (c3 >= c1 && frame_mv[NEARMV][ref_frames[0]].as_int == 0))
1656 if ((c3 >= c2 && frame_mv[NEARESTMV][ref_frames[0]].as_int == 0 &&
1657 frame_mv[NEARESTMV][ref_frames[1]].as_int == 0) ||
1658 (c3 >= c1 && frame_mv[NEARMV][ref_frames[0]].as_int == 0 &&
1659 frame_mv[NEARMV][ref_frames[1]].as_int == 0))
1667 static void joint_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
1670 int mi_row, int mi_col,
1671 int_mv single_newmv[MAX_REF_FRAMES],
1673 const VP10_COMMON *const cm = &cpi->common;
1674 const int pw = 4 * num_4x4_blocks_wide_lookup[bsize];
1675 const int ph = 4 * num_4x4_blocks_high_lookup[bsize];
1676 MACROBLOCKD *xd = &x->e_mbd;
1677 MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
1678 const int refs[2] = {mbmi->ref_frame[0],
1679 mbmi->ref_frame[1] < 0 ? 0 : mbmi->ref_frame[1]};
1682 const InterpKernel *kernel = vp10_filter_kernels[mbmi->interp_filter];
1683 struct scale_factors sf;
1685 // Do joint motion search in compound mode to get more accurate mv.
1686 struct buf_2d backup_yv12[2][MAX_MB_PLANE];
1687 int last_besterr[2] = {INT_MAX, INT_MAX};
1688 const YV12_BUFFER_CONFIG *const scaled_ref_frame[2] = {
1689 vp10_get_scaled_ref_frame(cpi, mbmi->ref_frame[0]),
1690 vp10_get_scaled_ref_frame(cpi, mbmi->ref_frame[1])
1693 // Prediction buffer from second frame.
1694 #if CONFIG_VP9_HIGHBITDEPTH
1695 DECLARE_ALIGNED(16, uint16_t, second_pred_alloc_16[64 * 64]);
1696 uint8_t *second_pred;
1698 DECLARE_ALIGNED(16, uint8_t, second_pred[64 * 64]);
1699 #endif // CONFIG_VP9_HIGHBITDEPTH
1701 for (ref = 0; ref < 2; ++ref) {
1702 ref_mv[ref] = x->mbmi_ext->ref_mvs[refs[ref]][0];
1704 if (scaled_ref_frame[ref]) {
1706 // Swap out the reference frame for a version that's been scaled to
1707 // match the resolution of the current frame, allowing the existing
1708 // motion search code to be used without additional modifications.
1709 for (i = 0; i < MAX_MB_PLANE; i++)
1710 backup_yv12[ref][i] = xd->plane[i].pre[ref];
1711 vp10_setup_pre_planes(xd, ref, scaled_ref_frame[ref], mi_row, mi_col,
1715 frame_mv[refs[ref]].as_int = single_newmv[refs[ref]].as_int;
1718 // Since we have scaled the reference frames to match the size of the current
1719 // frame we must use a unit scaling factor during mode selection.
1720 #if CONFIG_VP9_HIGHBITDEPTH
1721 vp10_setup_scale_factors_for_frame(&sf, cm->width, cm->height,
1722 cm->width, cm->height,
1723 cm->use_highbitdepth);
1725 vp10_setup_scale_factors_for_frame(&sf, cm->width, cm->height,
1726 cm->width, cm->height);
1727 #endif // CONFIG_VP9_HIGHBITDEPTH
1729 // Allow joint search multiple times iteratively for each reference frame
1730 // and break out of the search loop if it couldn't find a better mv.
1731 for (ite = 0; ite < 4; ite++) {
1732 struct buf_2d ref_yv12[2];
1733 int bestsme = INT_MAX;
1734 int sadpb = x->sadperbit16;
1736 int search_range = 3;
1738 int tmp_col_min = x->mv_col_min;
1739 int tmp_col_max = x->mv_col_max;
1740 int tmp_row_min = x->mv_row_min;
1741 int tmp_row_max = x->mv_row_max;
1742 int id = ite % 2; // Even iterations search in the first reference frame,
1743 // odd iterations search in the second. The predictor
1744 // found for the 'other' reference frame is factored in.
1746 // Initialized here because of compiler problem in Visual Studio.
1747 ref_yv12[0] = xd->plane[0].pre[0];
1748 ref_yv12[1] = xd->plane[0].pre[1];
1750 // Get the prediction block from the 'other' reference frame.
1751 #if CONFIG_VP9_HIGHBITDEPTH
1752 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
1753 second_pred = CONVERT_TO_BYTEPTR(second_pred_alloc_16);
1754 vp10_highbd_build_inter_predictor(ref_yv12[!id].buf,
1755 ref_yv12[!id].stride,
1757 &frame_mv[refs[!id]].as_mv,
1759 kernel, MV_PRECISION_Q3,
1760 mi_col * MI_SIZE, mi_row * MI_SIZE,
1763 second_pred = (uint8_t *)second_pred_alloc_16;
1764 vp10_build_inter_predictor(ref_yv12[!id].buf,
1765 ref_yv12[!id].stride,
1767 &frame_mv[refs[!id]].as_mv,
1769 kernel, MV_PRECISION_Q3,
1770 mi_col * MI_SIZE, mi_row * MI_SIZE);
1773 vp10_build_inter_predictor(ref_yv12[!id].buf,
1774 ref_yv12[!id].stride,
1776 &frame_mv[refs[!id]].as_mv,
1778 kernel, MV_PRECISION_Q3,
1779 mi_col * MI_SIZE, mi_row * MI_SIZE);
1780 #endif // CONFIG_VP9_HIGHBITDEPTH
1782 // Do compound motion search on the current reference frame.
1784 xd->plane[0].pre[0] = ref_yv12[id];
1785 vp10_set_mv_search_range(x, &ref_mv[id].as_mv);
1787 // Use the mv result from the single mode as mv predictor.
1788 tmp_mv = frame_mv[refs[id]].as_mv;
1793 // Small-range full-pixel motion search.
1794 bestsme = vp10_refining_search_8p_c(x, &tmp_mv, sadpb,
1796 &cpi->fn_ptr[bsize],
1797 &ref_mv[id].as_mv, second_pred);
1798 if (bestsme < INT_MAX)
1799 bestsme = vp10_get_mvpred_av_var(x, &tmp_mv, &ref_mv[id].as_mv,
1800 second_pred, &cpi->fn_ptr[bsize], 1);
1802 x->mv_col_min = tmp_col_min;
1803 x->mv_col_max = tmp_col_max;
1804 x->mv_row_min = tmp_row_min;
1805 x->mv_row_max = tmp_row_max;
1807 if (bestsme < INT_MAX) {
1808 int dis; /* TODO: use dis in distortion calculation later. */
1810 bestsme = cpi->find_fractional_mv_step(
1813 cpi->common.allow_high_precision_mv,
1815 &cpi->fn_ptr[bsize],
1816 0, cpi->sf.mv.subpel_iters_per_step,
1818 x->nmvjointcost, x->mvcost,
1819 &dis, &sse, second_pred,
1823 // Restore the pointer to the first (possibly scaled) prediction buffer.
1825 xd->plane[0].pre[0] = ref_yv12[0];
1827 if (bestsme < last_besterr[id]) {
1828 frame_mv[refs[id]].as_mv = tmp_mv;
1829 last_besterr[id] = bestsme;
1837 for (ref = 0; ref < 2; ++ref) {
1838 if (scaled_ref_frame[ref]) {
1839 // Restore the prediction frame pointers to their unscaled versions.
1841 for (i = 0; i < MAX_MB_PLANE; i++)
1842 xd->plane[i].pre[ref] = backup_yv12[ref][i];
1845 *rate_mv += vp10_mv_bit_cost(&frame_mv[refs[ref]].as_mv,
1846 &x->mbmi_ext->ref_mvs[refs[ref]][0].as_mv,
1847 x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
1851 static int64_t rd_pick_best_sub8x8_mode(VP10_COMP *cpi, MACROBLOCK *x,
1852 int_mv *best_ref_mv,
1853 int_mv *second_best_ref_mv,
1854 int64_t best_rd, int *returntotrate,
1856 int64_t *returndistortion,
1857 int *skippable, int64_t *psse,
1859 int_mv seg_mvs[4][MAX_REF_FRAMES],
1860 BEST_SEG_INFO *bsi_buf, int filter_idx,
1861 int mi_row, int mi_col) {
1863 BEST_SEG_INFO *bsi = bsi_buf + filter_idx;
1864 MACROBLOCKD *xd = &x->e_mbd;
1865 MODE_INFO *mi = xd->mi[0];
1866 MB_MODE_INFO *mbmi = &mi->mbmi;
1868 int k, br = 0, idx, idy;
1869 int64_t bd = 0, block_sse = 0;
1870 PREDICTION_MODE this_mode;
1871 VP10_COMMON *cm = &cpi->common;
1872 struct macroblock_plane *const p = &x->plane[0];
1873 struct macroblockd_plane *const pd = &xd->plane[0];
1874 const int label_count = 4;
1875 int64_t this_segment_rd = 0;
1876 int label_mv_thresh;
1877 int segmentyrate = 0;
1878 const BLOCK_SIZE bsize = mbmi->sb_type;
1879 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
1880 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
1881 ENTROPY_CONTEXT t_above[2], t_left[2];
1882 int subpelmv = 1, have_ref = 0;
1883 const int has_second_rf = has_second_ref(mbmi);
1884 const int inter_mode_mask = cpi->sf.inter_mode_mask[bsize];
1885 MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
1889 bsi->segment_rd = best_rd;
1890 bsi->ref_mv[0] = best_ref_mv;
1891 bsi->ref_mv[1] = second_best_ref_mv;
1892 bsi->mvp.as_int = best_ref_mv->as_int;
1893 bsi->mvthresh = mvthresh;
1895 for (i = 0; i < 4; i++)
1896 bsi->modes[i] = ZEROMV;
1898 memcpy(t_above, pd->above_context, sizeof(t_above));
1899 memcpy(t_left, pd->left_context, sizeof(t_left));
1901 // 64 makes this threshold really big effectively
1902 // making it so that we very rarely check mvs on
1903 // segments. setting this to 1 would make mv thresh
1904 // roughly equal to what it is for macroblocks
1905 label_mv_thresh = 1 * bsi->mvthresh / label_count;
1907 // Segmentation method overheads
1908 for (idy = 0; idy < 2; idy += num_4x4_blocks_high) {
1909 for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) {
1910 // TODO(jingning,rbultje): rewrite the rate-distortion optimization
1911 // loop for 4x4/4x8/8x4 block coding. to be replaced with new rd loop
1912 int_mv mode_mv[MB_MODE_COUNT][2];
1913 int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES];
1914 PREDICTION_MODE mode_selected = ZEROMV;
1915 int64_t best_rd = INT64_MAX;
1916 const int i = idy * 2 + idx;
1919 for (ref = 0; ref < 1 + has_second_rf; ++ref) {
1920 const MV_REFERENCE_FRAME frame = mbmi->ref_frame[ref];
1921 frame_mv[ZEROMV][frame].as_int = 0;
1922 vp10_append_sub8x8_mvs_for_idx(cm, xd, i, ref, mi_row, mi_col,
1923 &frame_mv[NEARESTMV][frame],
1924 &frame_mv[NEARMV][frame],
1925 mbmi_ext->mode_context);
1928 // search for the best motion vector on this segment
1929 for (this_mode = NEARESTMV; this_mode <= NEWMV; ++this_mode) {
1930 const struct buf_2d orig_src = x->plane[0].src;
1931 struct buf_2d orig_pre[2];
1933 mode_idx = INTER_OFFSET(this_mode);
1934 bsi->rdstat[i][mode_idx].brdcost = INT64_MAX;
1935 if (!(inter_mode_mask & (1 << this_mode)))
1938 if (!check_best_zero_mv(cpi, mbmi_ext->mode_context, frame_mv,
1939 this_mode, mbmi->ref_frame))
1942 memcpy(orig_pre, pd->pre, sizeof(orig_pre));
1943 memcpy(bsi->rdstat[i][mode_idx].ta, t_above,
1944 sizeof(bsi->rdstat[i][mode_idx].ta));
1945 memcpy(bsi->rdstat[i][mode_idx].tl, t_left,
1946 sizeof(bsi->rdstat[i][mode_idx].tl));
1948 // motion search for newmv (single predictor case only)
1949 if (!has_second_rf && this_mode == NEWMV &&
1950 seg_mvs[i][mbmi->ref_frame[0]].as_int == INVALID_MV) {
1951 MV *const new_mv = &mode_mv[NEWMV][0].as_mv;
1953 int thissme, bestsme = INT_MAX;
1954 int sadpb = x->sadperbit4;
1959 /* Is the best so far sufficiently good that we cant justify doing
1960 * and new motion search. */
1961 if (best_rd < label_mv_thresh)
1964 if (cpi->oxcf.mode != BEST) {
1965 // use previous block's result as next block's MV predictor.
1967 bsi->mvp.as_int = mi->bmi[i - 1].as_mv[0].as_int;
1969 bsi->mvp.as_int = mi->bmi[i - 2].as_mv[0].as_int;
1973 max_mv = x->max_mv_context[mbmi->ref_frame[0]];
1976 VPXMAX(abs(bsi->mvp.as_mv.row), abs(bsi->mvp.as_mv.col)) >> 3;
1978 if (cpi->sf.mv.auto_mv_step_size && cm->show_frame) {
1979 // Take wtd average of the step_params based on the last frame's
1980 // max mv magnitude and the best ref mvs of the current block for
1981 // the given reference.
1982 step_param = (vp10_init_search_range(max_mv) +
1983 cpi->mv_step_param) / 2;
1985 step_param = cpi->mv_step_param;
1988 mvp_full.row = bsi->mvp.as_mv.row >> 3;
1989 mvp_full.col = bsi->mvp.as_mv.col >> 3;
1991 if (cpi->sf.adaptive_motion_search) {
1992 mvp_full.row = x->pred_mv[mbmi->ref_frame[0]].row >> 3;
1993 mvp_full.col = x->pred_mv[mbmi->ref_frame[0]].col >> 3;
1994 step_param = VPXMAX(step_param, 8);
1997 // adjust src pointer for this block
2000 vp10_set_mv_search_range(x, &bsi->ref_mv[0]->as_mv);
2002 bestsme = vp10_full_pixel_search(
2003 cpi, x, bsize, &mvp_full, step_param, sadpb,
2004 cpi->sf.mv.subpel_search_method != SUBPEL_TREE ? cost_list : NULL,
2005 &bsi->ref_mv[0]->as_mv, new_mv,
2008 // Should we do a full search (best quality only)
2009 if (cpi->oxcf.mode == BEST) {
2010 int_mv *const best_mv = &mi->bmi[i].as_mv[0];
2011 /* Check if mvp_full is within the range. */
2012 clamp_mv(&mvp_full, x->mv_col_min, x->mv_col_max,
2013 x->mv_row_min, x->mv_row_max);
2014 thissme = cpi->full_search_sad(x, &mvp_full,
2015 sadpb, 16, &cpi->fn_ptr[bsize],
2016 &bsi->ref_mv[0]->as_mv,
2018 cost_list[1] = cost_list[2] = cost_list[3] = cost_list[4] = INT_MAX;
2019 if (thissme < bestsme) {
2021 *new_mv = best_mv->as_mv;
2023 // The full search result is actually worse so re-instate the
2024 // previous best vector
2025 best_mv->as_mv = *new_mv;
2029 if (bestsme < INT_MAX) {
2031 cpi->find_fractional_mv_step(
2034 &bsi->ref_mv[0]->as_mv,
2035 cm->allow_high_precision_mv,
2036 x->errorperbit, &cpi->fn_ptr[bsize],
2037 cpi->sf.mv.subpel_force_stop,
2038 cpi->sf.mv.subpel_iters_per_step,
2039 cond_cost_list(cpi, cost_list),
2040 x->nmvjointcost, x->mvcost,
2042 &x->pred_sse[mbmi->ref_frame[0]],
2045 // save motion search result for use in compound prediction
2046 seg_mvs[i][mbmi->ref_frame[0]].as_mv = *new_mv;
2049 if (cpi->sf.adaptive_motion_search)
2050 x->pred_mv[mbmi->ref_frame[0]] = *new_mv;
2052 // restore src pointers
2053 mi_buf_restore(x, orig_src, orig_pre);
2056 if (has_second_rf) {
2057 if (seg_mvs[i][mbmi->ref_frame[1]].as_int == INVALID_MV ||
2058 seg_mvs[i][mbmi->ref_frame[0]].as_int == INVALID_MV)
2062 if (has_second_rf && this_mode == NEWMV &&
2063 mbmi->interp_filter == EIGHTTAP) {
2064 // adjust src pointers
2066 if (cpi->sf.comp_inter_joint_search_thresh <= bsize) {
2068 joint_motion_search(cpi, x, bsize, frame_mv[this_mode],
2069 mi_row, mi_col, seg_mvs[i],
2071 seg_mvs[i][mbmi->ref_frame[0]].as_int =
2072 frame_mv[this_mode][mbmi->ref_frame[0]].as_int;
2073 seg_mvs[i][mbmi->ref_frame[1]].as_int =
2074 frame_mv[this_mode][mbmi->ref_frame[1]].as_int;
2076 // restore src pointers
2077 mi_buf_restore(x, orig_src, orig_pre);
2080 bsi->rdstat[i][mode_idx].brate =
2081 set_and_cost_bmi_mvs(cpi, x, xd, i, this_mode, mode_mv[this_mode],
2082 frame_mv, seg_mvs[i], bsi->ref_mv,
2083 x->nmvjointcost, x->mvcost);
2085 for (ref = 0; ref < 1 + has_second_rf; ++ref) {
2086 bsi->rdstat[i][mode_idx].mvs[ref].as_int =
2087 mode_mv[this_mode][ref].as_int;
2088 if (num_4x4_blocks_wide > 1)
2089 bsi->rdstat[i + 1][mode_idx].mvs[ref].as_int =
2090 mode_mv[this_mode][ref].as_int;
2091 if (num_4x4_blocks_high > 1)
2092 bsi->rdstat[i + 2][mode_idx].mvs[ref].as_int =
2093 mode_mv[this_mode][ref].as_int;
2096 // Trap vectors that reach beyond the UMV borders
2097 if (mv_check_bounds(x, &mode_mv[this_mode][0].as_mv) ||
2099 mv_check_bounds(x, &mode_mv[this_mode][1].as_mv)))
2102 if (filter_idx > 0) {
2103 BEST_SEG_INFO *ref_bsi = bsi_buf;
2107 for (ref = 0; ref < 1 + has_second_rf; ++ref) {
2108 subpelmv |= mv_has_subpel(&mode_mv[this_mode][ref].as_mv);
2109 have_ref &= mode_mv[this_mode][ref].as_int ==
2110 ref_bsi->rdstat[i][mode_idx].mvs[ref].as_int;
2113 if (filter_idx > 1 && !subpelmv && !have_ref) {
2114 ref_bsi = bsi_buf + 1;
2116 for (ref = 0; ref < 1 + has_second_rf; ++ref)
2117 have_ref &= mode_mv[this_mode][ref].as_int ==
2118 ref_bsi->rdstat[i][mode_idx].mvs[ref].as_int;
2121 if (!subpelmv && have_ref &&
2122 ref_bsi->rdstat[i][mode_idx].brdcost < INT64_MAX) {
2123 memcpy(&bsi->rdstat[i][mode_idx], &ref_bsi->rdstat[i][mode_idx],
2124 sizeof(SEG_RDSTAT));
2125 if (num_4x4_blocks_wide > 1)
2126 bsi->rdstat[i + 1][mode_idx].eobs =
2127 ref_bsi->rdstat[i + 1][mode_idx].eobs;
2128 if (num_4x4_blocks_high > 1)
2129 bsi->rdstat[i + 2][mode_idx].eobs =
2130 ref_bsi->rdstat[i + 2][mode_idx].eobs;
2132 if (bsi->rdstat[i][mode_idx].brdcost < best_rd) {
2133 mode_selected = this_mode;
2134 best_rd = bsi->rdstat[i][mode_idx].brdcost;
2140 bsi->rdstat[i][mode_idx].brdcost =
2141 encode_inter_mb_segment(cpi, x,
2142 bsi->segment_rd - this_segment_rd, i,
2143 &bsi->rdstat[i][mode_idx].byrate,
2144 &bsi->rdstat[i][mode_idx].bdist,
2145 &bsi->rdstat[i][mode_idx].bsse,
2146 bsi->rdstat[i][mode_idx].ta,
2147 bsi->rdstat[i][mode_idx].tl,
2150 if (bsi->rdstat[i][mode_idx].brdcost < INT64_MAX) {
2151 bsi->rdstat[i][mode_idx].brdcost += RDCOST(x->rdmult, x->rddiv,
2152 bsi->rdstat[i][mode_idx].brate, 0);
2153 bsi->rdstat[i][mode_idx].brate += bsi->rdstat[i][mode_idx].byrate;
2154 bsi->rdstat[i][mode_idx].eobs = p->eobs[i];
2155 if (num_4x4_blocks_wide > 1)
2156 bsi->rdstat[i + 1][mode_idx].eobs = p->eobs[i + 1];
2157 if (num_4x4_blocks_high > 1)
2158 bsi->rdstat[i + 2][mode_idx].eobs = p->eobs[i + 2];
2161 if (bsi->rdstat[i][mode_idx].brdcost < best_rd) {
2162 mode_selected = this_mode;
2163 best_rd = bsi->rdstat[i][mode_idx].brdcost;
2165 } /*for each 4x4 mode*/
2167 if (best_rd == INT64_MAX) {
2169 for (iy = i + 1; iy < 4; ++iy)
2170 for (midx = 0; midx < INTER_MODES; ++midx)
2171 bsi->rdstat[iy][midx].brdcost = INT64_MAX;
2172 bsi->segment_rd = INT64_MAX;
2176 mode_idx = INTER_OFFSET(mode_selected);
2177 memcpy(t_above, bsi->rdstat[i][mode_idx].ta, sizeof(t_above));
2178 memcpy(t_left, bsi->rdstat[i][mode_idx].tl, sizeof(t_left));
2180 set_and_cost_bmi_mvs(cpi, x, xd, i, mode_selected, mode_mv[mode_selected],
2181 frame_mv, seg_mvs[i], bsi->ref_mv, x->nmvjointcost,
2184 br += bsi->rdstat[i][mode_idx].brate;
2185 bd += bsi->rdstat[i][mode_idx].bdist;
2186 block_sse += bsi->rdstat[i][mode_idx].bsse;
2187 segmentyrate += bsi->rdstat[i][mode_idx].byrate;
2188 this_segment_rd += bsi->rdstat[i][mode_idx].brdcost;
2190 if (this_segment_rd > bsi->segment_rd) {
2192 for (iy = i + 1; iy < 4; ++iy)
2193 for (midx = 0; midx < INTER_MODES; ++midx)
2194 bsi->rdstat[iy][midx].brdcost = INT64_MAX;
2195 bsi->segment_rd = INT64_MAX;
2199 } /* for each label */
2203 bsi->segment_yrate = segmentyrate;
2204 bsi->segment_rd = this_segment_rd;
2205 bsi->sse = block_sse;
2207 // update the coding decisions
2208 for (k = 0; k < 4; ++k)
2209 bsi->modes[k] = mi->bmi[k].as_mode;
2211 if (bsi->segment_rd > best_rd)
2213 /* set it to the best */
2214 for (i = 0; i < 4; i++) {
2215 mode_idx = INTER_OFFSET(bsi->modes[i]);
2216 mi->bmi[i].as_mv[0].as_int = bsi->rdstat[i][mode_idx].mvs[0].as_int;
2217 if (has_second_ref(mbmi))
2218 mi->bmi[i].as_mv[1].as_int = bsi->rdstat[i][mode_idx].mvs[1].as_int;
2219 x->plane[0].eobs[i] = bsi->rdstat[i][mode_idx].eobs;
2220 mi->bmi[i].as_mode = bsi->modes[i];
2224 * used to set mbmi->mv.as_int
2226 *returntotrate = bsi->r;
2227 *returndistortion = bsi->d;
2228 *returnyrate = bsi->segment_yrate;
2229 *skippable = vp10_is_skippable_in_plane(x, BLOCK_8X8, 0);
2231 mbmi->mode = bsi->modes[3];
2233 return bsi->segment_rd;
2236 static void estimate_ref_frame_costs(const VP10_COMMON *cm,
2237 const MACROBLOCKD *xd,
2239 unsigned int *ref_costs_single,
2240 unsigned int *ref_costs_comp,
2241 vpx_prob *comp_mode_p) {
2242 int seg_ref_active = segfeature_active(&cm->seg, segment_id,
2244 if (seg_ref_active) {
2245 memset(ref_costs_single, 0, MAX_REF_FRAMES * sizeof(*ref_costs_single));
2246 memset(ref_costs_comp, 0, MAX_REF_FRAMES * sizeof(*ref_costs_comp));
2249 vpx_prob intra_inter_p = vp10_get_intra_inter_prob(cm, xd);
2250 vpx_prob comp_inter_p = 128;
2252 if (cm->reference_mode == REFERENCE_MODE_SELECT) {
2253 comp_inter_p = vp10_get_reference_mode_prob(cm, xd);
2254 *comp_mode_p = comp_inter_p;
2259 ref_costs_single[INTRA_FRAME] = vp10_cost_bit(intra_inter_p, 0);
2261 if (cm->reference_mode != COMPOUND_REFERENCE) {
2262 vpx_prob ref_single_p1 = vp10_get_pred_prob_single_ref_p1(cm, xd);
2263 vpx_prob ref_single_p2 = vp10_get_pred_prob_single_ref_p2(cm, xd);
2264 unsigned int base_cost = vp10_cost_bit(intra_inter_p, 1);
2266 if (cm->reference_mode == REFERENCE_MODE_SELECT)
2267 base_cost += vp10_cost_bit(comp_inter_p, 0);
2269 ref_costs_single[LAST_FRAME] = ref_costs_single[GOLDEN_FRAME] =
2270 ref_costs_single[ALTREF_FRAME] = base_cost;
2271 ref_costs_single[LAST_FRAME] += vp10_cost_bit(ref_single_p1, 0);
2272 ref_costs_single[GOLDEN_FRAME] += vp10_cost_bit(ref_single_p1, 1);
2273 ref_costs_single[ALTREF_FRAME] += vp10_cost_bit(ref_single_p1, 1);
2274 ref_costs_single[GOLDEN_FRAME] += vp10_cost_bit(ref_single_p2, 0);
2275 ref_costs_single[ALTREF_FRAME] += vp10_cost_bit(ref_single_p2, 1);
2277 ref_costs_single[LAST_FRAME] = 512;
2278 ref_costs_single[GOLDEN_FRAME] = 512;
2279 ref_costs_single[ALTREF_FRAME] = 512;
2281 if (cm->reference_mode != SINGLE_REFERENCE) {
2282 vpx_prob ref_comp_p = vp10_get_pred_prob_comp_ref_p(cm, xd);
2283 unsigned int base_cost = vp10_cost_bit(intra_inter_p, 1);
2285 if (cm->reference_mode == REFERENCE_MODE_SELECT)
2286 base_cost += vp10_cost_bit(comp_inter_p, 1);
2288 ref_costs_comp[LAST_FRAME] = base_cost + vp10_cost_bit(ref_comp_p, 0);
2289 ref_costs_comp[GOLDEN_FRAME] = base_cost + vp10_cost_bit(ref_comp_p, 1);
2291 ref_costs_comp[LAST_FRAME] = 512;
2292 ref_costs_comp[GOLDEN_FRAME] = 512;
2297 static void store_coding_context(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx,
2299 int64_t comp_pred_diff[REFERENCE_MODES],
2300 int64_t best_filter_diff[SWITCHABLE_FILTER_CONTEXTS],
2302 MACROBLOCKD *const xd = &x->e_mbd;
2304 // Take a snapshot of the coding context so it can be
2305 // restored if we decide to encode this way
2306 ctx->skip = x->skip;
2307 ctx->skippable = skippable;
2308 ctx->best_mode_index = mode_index;
2309 ctx->mic = *xd->mi[0];
2310 ctx->mbmi_ext = *x->mbmi_ext;
2311 ctx->single_pred_diff = (int)comp_pred_diff[SINGLE_REFERENCE];
2312 ctx->comp_pred_diff = (int)comp_pred_diff[COMPOUND_REFERENCE];
2313 ctx->hybrid_pred_diff = (int)comp_pred_diff[REFERENCE_MODE_SELECT];
2315 memcpy(ctx->best_filter_diff, best_filter_diff,
2316 sizeof(*best_filter_diff) * SWITCHABLE_FILTER_CONTEXTS);
2319 static void setup_buffer_inter(VP10_COMP *cpi, MACROBLOCK *x,
2320 MV_REFERENCE_FRAME ref_frame,
2321 BLOCK_SIZE block_size,
2322 int mi_row, int mi_col,
2323 int_mv frame_nearest_mv[MAX_REF_FRAMES],
2324 int_mv frame_near_mv[MAX_REF_FRAMES],
2325 struct buf_2d yv12_mb[4][MAX_MB_PLANE]) {
2326 const VP10_COMMON *cm = &cpi->common;
2327 const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, ref_frame);
2328 MACROBLOCKD *const xd = &x->e_mbd;
2329 MODE_INFO *const mi = xd->mi[0];
2330 int_mv *const candidates = x->mbmi_ext->ref_mvs[ref_frame];
2331 const struct scale_factors *const sf = &cm->frame_refs[ref_frame - 1].sf;
2332 MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
2334 assert(yv12 != NULL);
2336 // TODO(jkoleszar): Is the UV buffer ever used here? If so, need to make this
2337 // use the UV scaling factors.
2338 vp10_setup_pred_block(xd, yv12_mb[ref_frame], yv12, mi_row, mi_col, sf, sf);
2340 // Gets an initial list of candidate vectors from neighbours and orders them
2341 vp10_find_mv_refs(cm, xd, mi, ref_frame, candidates, mi_row, mi_col,
2342 NULL, NULL, mbmi_ext->mode_context);
2344 // Candidate refinement carried out at encoder and decoder
2345 vp10_find_best_ref_mvs(cm->allow_high_precision_mv, candidates,
2346 &frame_nearest_mv[ref_frame],
2347 &frame_near_mv[ref_frame]);
2349 // Further refinement that is encode side only to test the top few candidates
2350 // in full and choose the best as the centre point for subsequent searches.
2351 // The current implementation doesn't support scaling.
2352 if (!vp10_is_scaled(sf) && block_size >= BLOCK_8X8)
2353 vp10_mv_pred(cpi, x, yv12_mb[ref_frame][0].buf, yv12->y_stride,
2354 ref_frame, block_size);
2357 static void single_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
2359 int mi_row, int mi_col,
2360 int_mv *tmp_mv, int *rate_mv) {
2361 MACROBLOCKD *xd = &x->e_mbd;
2362 const VP10_COMMON *cm = &cpi->common;
2363 MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
2364 struct buf_2d backup_yv12[MAX_MB_PLANE] = {{0, 0}};
2365 int bestsme = INT_MAX;
2367 int sadpb = x->sadperbit16;
2369 int ref = mbmi->ref_frame[0];
2370 MV ref_mv = x->mbmi_ext->ref_mvs[ref][0].as_mv;
2372 int tmp_col_min = x->mv_col_min;
2373 int tmp_col_max = x->mv_col_max;
2374 int tmp_row_min = x->mv_row_min;
2375 int tmp_row_max = x->mv_row_max;
2378 const YV12_BUFFER_CONFIG *scaled_ref_frame = vp10_get_scaled_ref_frame(cpi,
2382 pred_mv[0] = x->mbmi_ext->ref_mvs[ref][0].as_mv;
2383 pred_mv[1] = x->mbmi_ext->ref_mvs[ref][1].as_mv;
2384 pred_mv[2] = x->pred_mv[ref];
2386 if (scaled_ref_frame) {
2388 // Swap out the reference frame for a version that's been scaled to
2389 // match the resolution of the current frame, allowing the existing
2390 // motion search code to be used without additional modifications.
2391 for (i = 0; i < MAX_MB_PLANE; i++)
2392 backup_yv12[i] = xd->plane[i].pre[0];
2394 vp10_setup_pre_planes(xd, 0, scaled_ref_frame, mi_row, mi_col, NULL);
2397 vp10_set_mv_search_range(x, &ref_mv);
2399 // Work out the size of the first step in the mv step search.
2400 // 0 here is maximum length first step. 1 is VPXMAX >> 1 etc.
2401 if (cpi->sf.mv.auto_mv_step_size && cm->show_frame) {
2402 // Take wtd average of the step_params based on the last frame's
2403 // max mv magnitude and that based on the best ref mvs of the current
2404 // block for the given reference.
2405 step_param = (vp10_init_search_range(x->max_mv_context[ref]) +
2406 cpi->mv_step_param) / 2;
2408 step_param = cpi->mv_step_param;
2411 if (cpi->sf.adaptive_motion_search && bsize < BLOCK_64X64) {
2413 2 * (b_width_log2_lookup[BLOCK_64X64] -
2414 VPXMIN(b_height_log2_lookup[bsize], b_width_log2_lookup[bsize]));
2415 step_param = VPXMAX(step_param, boffset);
2418 if (cpi->sf.adaptive_motion_search) {
2419 int bwl = b_width_log2_lookup[bsize];
2420 int bhl = b_height_log2_lookup[bsize];
2421 int tlevel = x->pred_mv_sad[ref] >> (bwl + bhl + 4);
2426 // prev_mv_sad is not setup for dynamically scaled frames.
2427 if (cpi->oxcf.resize_mode != RESIZE_DYNAMIC) {
2429 for (i = LAST_FRAME; i <= ALTREF_FRAME && cm->show_frame; ++i) {
2430 if ((x->pred_mv_sad[ref] >> 3) > x->pred_mv_sad[i]) {
2431 x->pred_mv[ref].row = 0;
2432 x->pred_mv[ref].col = 0;
2433 tmp_mv->as_int = INVALID_MV;
2435 if (scaled_ref_frame) {
2437 for (i = 0; i < MAX_MB_PLANE; ++i)
2438 xd->plane[i].pre[0] = backup_yv12[i];
2446 mvp_full = pred_mv[x->mv_best_ref_index[ref]];
2451 bestsme = vp10_full_pixel_search(cpi, x, bsize, &mvp_full, step_param, sadpb,
2452 cond_cost_list(cpi, cost_list),
2453 &ref_mv, &tmp_mv->as_mv, INT_MAX, 1);
2455 x->mv_col_min = tmp_col_min;
2456 x->mv_col_max = tmp_col_max;
2457 x->mv_row_min = tmp_row_min;
2458 x->mv_row_max = tmp_row_max;
2460 if (bestsme < INT_MAX) {
2461 int dis; /* TODO: use dis in distortion calculation later. */
2462 cpi->find_fractional_mv_step(x, &tmp_mv->as_mv, &ref_mv,
2463 cm->allow_high_precision_mv,
2465 &cpi->fn_ptr[bsize],
2466 cpi->sf.mv.subpel_force_stop,
2467 cpi->sf.mv.subpel_iters_per_step,
2468 cond_cost_list(cpi, cost_list),
2469 x->nmvjointcost, x->mvcost,
2470 &dis, &x->pred_sse[ref], NULL, 0, 0);
2472 *rate_mv = vp10_mv_bit_cost(&tmp_mv->as_mv, &ref_mv,
2473 x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
2475 if (cpi->sf.adaptive_motion_search)
2476 x->pred_mv[ref] = tmp_mv->as_mv;
2478 if (scaled_ref_frame) {
2480 for (i = 0; i < MAX_MB_PLANE; i++)
2481 xd->plane[i].pre[0] = backup_yv12[i];
2487 static INLINE void restore_dst_buf(MACROBLOCKD *xd,
2488 uint8_t *orig_dst[MAX_MB_PLANE],
2489 int orig_dst_stride[MAX_MB_PLANE]) {
2491 for (i = 0; i < MAX_MB_PLANE; i++) {
2492 xd->plane[i].dst.buf = orig_dst[i];
2493 xd->plane[i].dst.stride = orig_dst_stride[i];
2497 // In some situations we want to discount tha pparent cost of a new motion
2498 // vector. Where there is a subtle motion field and especially where there is
2499 // low spatial complexity then it can be hard to cover the cost of a new motion
2500 // vector in a single block, even if that motion vector reduces distortion.
2501 // However, once established that vector may be usable through the nearest and
2502 // near mv modes to reduce distortion in subsequent blocks and also improve
2504 static int discount_newmv_test(const VP10_COMP *cpi,
2507 int_mv (*mode_mv)[MAX_REF_FRAMES],
2509 return (!cpi->rc.is_src_frame_alt_ref &&
2510 (this_mode == NEWMV) &&
2511 (this_mv.as_int != 0) &&
2512 ((mode_mv[NEARESTMV][ref_frame].as_int == 0) ||
2513 (mode_mv[NEARESTMV][ref_frame].as_int == INVALID_MV)) &&
2514 ((mode_mv[NEARMV][ref_frame].as_int == 0) ||
2515 (mode_mv[NEARMV][ref_frame].as_int == INVALID_MV)));
2518 #define LEFT_TOP_MARGIN ((VP9_ENC_BORDER_IN_PIXELS - VP9_INTERP_EXTEND) << 3)
2519 #define RIGHT_BOTTOM_MARGIN ((VP9_ENC_BORDER_IN_PIXELS -\
2520 VP9_INTERP_EXTEND) << 3)
2522 // TODO(jingning): this mv clamping function should be block size dependent.
2523 static INLINE void clamp_mv2(MV *mv, const MACROBLOCKD *xd) {
2524 clamp_mv(mv, xd->mb_to_left_edge - LEFT_TOP_MARGIN,
2525 xd->mb_to_right_edge + RIGHT_BOTTOM_MARGIN,
2526 xd->mb_to_top_edge - LEFT_TOP_MARGIN,
2527 xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARGIN);
2530 static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
2532 int *rate2, int64_t *distortion,
2534 int *rate_y, int *rate_uv,
2536 int_mv (*mode_mv)[MAX_REF_FRAMES],
2537 int mi_row, int mi_col,
2538 int_mv single_newmv[MAX_REF_FRAMES],
2539 INTERP_FILTER (*single_filter)[MAX_REF_FRAMES],
2540 int (*single_skippable)[MAX_REF_FRAMES],
2542 const int64_t ref_best_rd,
2543 int64_t *mask_filter,
2544 int64_t filter_cache[]) {
2545 VP10_COMMON *cm = &cpi->common;
2546 MACROBLOCKD *xd = &x->e_mbd;
2547 MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
2548 MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
2549 const int is_comp_pred = has_second_ref(mbmi);
2550 const int this_mode = mbmi->mode;
2551 int_mv *frame_mv = mode_mv[this_mode];
2553 int refs[2] = { mbmi->ref_frame[0],
2554 (mbmi->ref_frame[1] < 0 ? 0 : mbmi->ref_frame[1]) };
2556 #if CONFIG_VP9_HIGHBITDEPTH
2557 DECLARE_ALIGNED(16, uint16_t, tmp_buf16[MAX_MB_PLANE * 64 * 64]);
2560 DECLARE_ALIGNED(16, uint8_t, tmp_buf[MAX_MB_PLANE * 64 * 64]);
2561 #endif // CONFIG_VP9_HIGHBITDEPTH
2562 int pred_exists = 0;
2564 int64_t rd, tmp_rd, best_rd = INT64_MAX;
2565 int best_needs_copy = 0;
2566 uint8_t *orig_dst[MAX_MB_PLANE];
2567 int orig_dst_stride[MAX_MB_PLANE];
2569 INTERP_FILTER best_filter = SWITCHABLE;
2570 uint8_t skip_txfm[MAX_MB_PLANE << 2] = {0};
2571 int64_t bsse[MAX_MB_PLANE << 2] = {0};
2573 int bsl = mi_width_log2_lookup[bsize];
2574 int pred_filter_search = cpi->sf.cb_pred_filter_search ?
2575 (((mi_row + mi_col) >> bsl) +
2576 get_chessboard_index(cm->current_video_frame)) & 0x1 : 0;
2578 int skip_txfm_sb = 0;
2579 int64_t skip_sse_sb = INT64_MAX;
2580 int64_t distortion_y = 0, distortion_uv = 0;
2582 #if CONFIG_VP9_HIGHBITDEPTH
2583 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
2584 tmp_buf = CONVERT_TO_BYTEPTR(tmp_buf16);
2586 tmp_buf = (uint8_t *)tmp_buf16;
2588 #endif // CONFIG_VP9_HIGHBITDEPTH
2590 if (pred_filter_search) {
2591 INTERP_FILTER af = SWITCHABLE, lf = SWITCHABLE;
2592 if (xd->up_available)
2593 af = xd->mi[-xd->mi_stride]->mbmi.interp_filter;
2594 if (xd->left_available)
2595 lf = xd->mi[-1]->mbmi.interp_filter;
2597 if ((this_mode != NEWMV) || (af == lf))
2602 if (frame_mv[refs[0]].as_int == INVALID_MV ||
2603 frame_mv[refs[1]].as_int == INVALID_MV)
2606 if (cpi->sf.adaptive_mode_search) {
2607 if (single_filter[this_mode][refs[0]] ==
2608 single_filter[this_mode][refs[1]])
2609 best_filter = single_filter[this_mode][refs[0]];
2613 if (this_mode == NEWMV) {
2616 // Initialize mv using single prediction mode result.
2617 frame_mv[refs[0]].as_int = single_newmv[refs[0]].as_int;
2618 frame_mv[refs[1]].as_int = single_newmv[refs[1]].as_int;
2620 if (cpi->sf.comp_inter_joint_search_thresh <= bsize) {
2621 joint_motion_search(cpi, x, bsize, frame_mv,
2622 mi_row, mi_col, single_newmv, &rate_mv);
2624 rate_mv = vp10_mv_bit_cost(&frame_mv[refs[0]].as_mv,
2625 &x->mbmi_ext->ref_mvs[refs[0]][0].as_mv,
2626 x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
2627 rate_mv += vp10_mv_bit_cost(&frame_mv[refs[1]].as_mv,
2628 &x->mbmi_ext->ref_mvs[refs[1]][0].as_mv,
2629 x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
2634 single_motion_search(cpi, x, bsize, mi_row, mi_col,
2636 if (tmp_mv.as_int == INVALID_MV)
2639 frame_mv[refs[0]].as_int =
2640 xd->mi[0]->bmi[0].as_mv[0].as_int = tmp_mv.as_int;
2641 single_newmv[refs[0]].as_int = tmp_mv.as_int;
2643 // Estimate the rate implications of a new mv but discount this
2644 // under certain circumstances where we want to help initiate a weak
2645 // motion field, where the distortion gain for a single block may not
2646 // be enough to overcome the cost of a new mv.
2647 if (discount_newmv_test(cpi, this_mode, tmp_mv, mode_mv, refs[0])) {
2648 *rate2 += VPXMAX((rate_mv / NEW_MV_DISCOUNT_FACTOR), 1);
2655 for (i = 0; i < is_comp_pred + 1; ++i) {
2656 cur_mv[i] = frame_mv[refs[i]];
2657 // Clip "next_nearest" so that it does not extend to far out of image
2658 if (this_mode != NEWMV)
2659 clamp_mv2(&cur_mv[i].as_mv, xd);
2661 if (mv_check_bounds(x, &cur_mv[i].as_mv))
2663 mbmi->mv[i].as_int = cur_mv[i].as_int;
2666 // do first prediction into the destination buffer. Do the next
2667 // prediction into a temporary buffer. Then keep track of which one
2668 // of these currently holds the best predictor, and use the other
2669 // one for future predictions. In the end, copy from tmp_buf to
2670 // dst if necessary.
2671 for (i = 0; i < MAX_MB_PLANE; i++) {
2672 orig_dst[i] = xd->plane[i].dst.buf;
2673 orig_dst_stride[i] = xd->plane[i].dst.stride;
2676 // We don't include the cost of the second reference here, because there
2677 // are only three options: Last/Golden, ARF/Last or Golden/ARF, or in other
2678 // words if you present them in that order, the second one is always known
2679 // if the first is known.
2681 // Under some circumstances we discount the cost of new mv mode to encourage
2682 // initiation of a motion field.
2683 if (discount_newmv_test(cpi, this_mode, frame_mv[refs[0]],
2684 mode_mv, refs[0])) {
2685 *rate2 += VPXMIN(cost_mv_ref(cpi, this_mode,
2686 mbmi_ext->mode_context[refs[0]]),
2687 cost_mv_ref(cpi, NEARESTMV,
2688 mbmi_ext->mode_context[refs[0]]));
2690 *rate2 += cost_mv_ref(cpi, this_mode, mbmi_ext->mode_context[refs[0]]);
2693 if (RDCOST(x->rdmult, x->rddiv, *rate2, 0) > ref_best_rd &&
2694 mbmi->mode != NEARESTMV)
2698 // Are all MVs integer pel for Y and UV
2699 intpel_mv = !mv_has_subpel(&mbmi->mv[0].as_mv);
2701 intpel_mv &= !mv_has_subpel(&mbmi->mv[1].as_mv);
2703 // Search for best switchable filter by checking the variance of
2704 // pred error irrespective of whether the filter will be used
2705 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
2706 filter_cache[i] = INT64_MAX;
2708 if (cm->interp_filter != BILINEAR) {
2709 if (x->source_variance < cpi->sf.disable_filter_search_var_thresh) {
2710 best_filter = EIGHTTAP;
2711 } else if (best_filter == SWITCHABLE) {
2713 int tmp_rate_sum = 0;
2714 int64_t tmp_dist_sum = 0;
2716 for (i = 0; i < SWITCHABLE_FILTERS; ++i) {
2719 int tmp_skip_sb = 0;
2720 int64_t tmp_skip_sse = INT64_MAX;
2722 mbmi->interp_filter = i;
2723 rs = vp10_get_switchable_rate(cpi, xd);
2724 rs_rd = RDCOST(x->rdmult, x->rddiv, rs, 0);
2726 if (i > 0 && intpel_mv) {
2727 rd = RDCOST(x->rdmult, x->rddiv, tmp_rate_sum, tmp_dist_sum);
2728 filter_cache[i] = rd;
2729 filter_cache[SWITCHABLE_FILTERS] =
2730 VPXMIN(filter_cache[SWITCHABLE_FILTERS], rd + rs_rd);
2731 if (cm->interp_filter == SWITCHABLE)
2733 *mask_filter = VPXMAX(*mask_filter, rd);
2736 int64_t dist_sum = 0;
2737 if (i > 0 && cpi->sf.adaptive_interp_filter_search &&
2738 (cpi->sf.interp_filter_search_mask & (1 << i))) {
2740 dist_sum = INT64_MAX;
2744 if ((cm->interp_filter == SWITCHABLE &&
2745 (!i || best_needs_copy)) ||
2746 (cm->interp_filter != SWITCHABLE &&
2747 (cm->interp_filter == mbmi->interp_filter ||
2748 (i == 0 && intpel_mv)))) {
2749 restore_dst_buf(xd, orig_dst, orig_dst_stride);
2751 for (j = 0; j < MAX_MB_PLANE; j++) {
2752 xd->plane[j].dst.buf = tmp_buf + j * 64 * 64;
2753 xd->plane[j].dst.stride = 64;
2756 vp10_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
2757 model_rd_for_sb(cpi, bsize, x, xd, &rate_sum, &dist_sum,
2758 &tmp_skip_sb, &tmp_skip_sse);
2760 rd = RDCOST(x->rdmult, x->rddiv, rate_sum, dist_sum);
2761 filter_cache[i] = rd;
2762 filter_cache[SWITCHABLE_FILTERS] =
2763 VPXMIN(filter_cache[SWITCHABLE_FILTERS], rd + rs_rd);
2764 if (cm->interp_filter == SWITCHABLE)
2766 *mask_filter = VPXMAX(*mask_filter, rd);
2768 if (i == 0 && intpel_mv) {
2769 tmp_rate_sum = rate_sum;
2770 tmp_dist_sum = dist_sum;
2774 if (i == 0 && cpi->sf.use_rd_breakout && ref_best_rd < INT64_MAX) {
2775 if (rd / 2 > ref_best_rd) {
2776 restore_dst_buf(xd, orig_dst, orig_dst_stride);
2780 newbest = i == 0 || rd < best_rd;
2784 best_filter = mbmi->interp_filter;
2785 if (cm->interp_filter == SWITCHABLE && i && !intpel_mv)
2786 best_needs_copy = !best_needs_copy;
2789 if ((cm->interp_filter == SWITCHABLE && newbest) ||
2790 (cm->interp_filter != SWITCHABLE &&
2791 cm->interp_filter == mbmi->interp_filter)) {
2795 skip_txfm_sb = tmp_skip_sb;
2796 skip_sse_sb = tmp_skip_sse;
2797 memcpy(skip_txfm, x->skip_txfm, sizeof(skip_txfm));
2798 memcpy(bsse, x->bsse, sizeof(bsse));
2801 restore_dst_buf(xd, orig_dst, orig_dst_stride);
2804 // Set the appropriate filter
2805 mbmi->interp_filter = cm->interp_filter != SWITCHABLE ?
2806 cm->interp_filter : best_filter;
2807 rs = cm->interp_filter == SWITCHABLE ? vp10_get_switchable_rate(cpi, xd) : 0;
2810 if (best_needs_copy) {
2811 // again temporarily set the buffers to local memory to prevent a memcpy
2812 for (i = 0; i < MAX_MB_PLANE; i++) {
2813 xd->plane[i].dst.buf = tmp_buf + i * 64 * 64;
2814 xd->plane[i].dst.stride = 64;
2817 rd = tmp_rd + RDCOST(x->rdmult, x->rddiv, rs, 0);
2821 // Handles the special case when a filter that is not in the
2822 // switchable list (ex. bilinear) is indicated at the frame level, or
2823 // skip condition holds.
2824 vp10_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
2825 model_rd_for_sb(cpi, bsize, x, xd, &tmp_rate, &tmp_dist,
2826 &skip_txfm_sb, &skip_sse_sb);
2827 rd = RDCOST(x->rdmult, x->rddiv, rs + tmp_rate, tmp_dist);
2828 memcpy(skip_txfm, x->skip_txfm, sizeof(skip_txfm));
2829 memcpy(bsse, x->bsse, sizeof(bsse));
2833 single_filter[this_mode][refs[0]] = mbmi->interp_filter;
2835 if (cpi->sf.adaptive_mode_search)
2837 if (single_skippable[this_mode][refs[0]] &&
2838 single_skippable[this_mode][refs[1]])
2839 memset(skip_txfm, SKIP_TXFM_AC_DC, sizeof(skip_txfm));
2841 if (cpi->sf.use_rd_breakout && ref_best_rd < INT64_MAX) {
2842 // if current pred_error modeled rd is substantially more than the best
2843 // so far, do not bother doing full rd
2844 if (rd / 2 > ref_best_rd) {
2845 restore_dst_buf(xd, orig_dst, orig_dst_stride);
2850 if (cm->interp_filter == SWITCHABLE)
2853 memcpy(x->skip_txfm, skip_txfm, sizeof(skip_txfm));
2854 memcpy(x->bsse, bsse, sizeof(bsse));
2856 if (!skip_txfm_sb) {
2857 int skippable_y, skippable_uv;
2858 int64_t sseuv = INT64_MAX;
2859 int64_t rdcosty = INT64_MAX;
2861 // Y cost and distortion
2862 vp10_subtract_plane(x, bsize, 0);
2863 super_block_yrd(cpi, x, rate_y, &distortion_y, &skippable_y, psse,
2864 bsize, ref_best_rd);
2866 if (*rate_y == INT_MAX) {
2868 *distortion = INT64_MAX;
2869 restore_dst_buf(xd, orig_dst, orig_dst_stride);
2874 *distortion += distortion_y;
2876 rdcosty = RDCOST(x->rdmult, x->rddiv, *rate2, *distortion);
2877 rdcosty = VPXMIN(rdcosty, RDCOST(x->rdmult, x->rddiv, 0, *psse));
2879 if (!super_block_uvrd(cpi, x, rate_uv, &distortion_uv, &skippable_uv,
2880 &sseuv, bsize, ref_best_rd - rdcosty)) {
2882 *distortion = INT64_MAX;
2883 restore_dst_buf(xd, orig_dst, orig_dst_stride);
2889 *distortion += distortion_uv;
2890 *skippable = skippable_y && skippable_uv;
2895 // The cost of skip bit needs to be added.
2896 *rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1);
2898 *distortion = skip_sse_sb;
2902 single_skippable[this_mode][refs[0]] = *skippable;
2904 restore_dst_buf(xd, orig_dst, orig_dst_stride);
2905 return 0; // The rate-distortion cost will be re-calculated by caller.
2908 void vp10_rd_pick_intra_mode_sb(VP10_COMP *cpi, MACROBLOCK *x,
2909 RD_COST *rd_cost, BLOCK_SIZE bsize,
2910 PICK_MODE_CONTEXT *ctx, int64_t best_rd) {
2911 VP10_COMMON *const cm = &cpi->common;
2912 MACROBLOCKD *const xd = &x->e_mbd;
2913 struct macroblockd_plane *const pd = xd->plane;
2914 int rate_y = 0, rate_uv = 0, rate_y_tokenonly = 0, rate_uv_tokenonly = 0;
2915 int y_skip = 0, uv_skip = 0;
2916 int64_t dist_y = 0, dist_uv = 0;
2917 TX_SIZE max_uv_tx_size;
2919 xd->mi[0]->mbmi.ref_frame[0] = INTRA_FRAME;
2920 xd->mi[0]->mbmi.ref_frame[1] = NONE;
2922 if (bsize >= BLOCK_8X8) {
2923 if (rd_pick_intra_sby_mode(cpi, x, &rate_y, &rate_y_tokenonly,
2924 &dist_y, &y_skip, bsize,
2925 best_rd) >= best_rd) {
2926 rd_cost->rate = INT_MAX;
2931 if (rd_pick_intra_sub_8x8_y_mode(cpi, x, &rate_y, &rate_y_tokenonly,
2932 &dist_y, best_rd) >= best_rd) {
2933 rd_cost->rate = INT_MAX;
2937 max_uv_tx_size = get_uv_tx_size_impl(xd->mi[0]->mbmi.tx_size, bsize,
2938 pd[1].subsampling_x,
2939 pd[1].subsampling_y);
2940 rd_pick_intra_sbuv_mode(cpi, x, ctx, &rate_uv, &rate_uv_tokenonly,
2941 &dist_uv, &uv_skip, VPXMAX(BLOCK_8X8, bsize),
2944 if (y_skip && uv_skip) {
2945 rd_cost->rate = rate_y + rate_uv - rate_y_tokenonly - rate_uv_tokenonly +
2946 vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1);
2947 rd_cost->dist = dist_y + dist_uv;
2949 rd_cost->rate = rate_y + rate_uv +
2950 vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0);
2951 rd_cost->dist = dist_y + dist_uv;
2954 ctx->mic = *xd->mi[0];
2955 ctx->mbmi_ext = *x->mbmi_ext;
2956 rd_cost->rdcost = RDCOST(x->rdmult, x->rddiv, rd_cost->rate, rd_cost->dist);
2959 // This function is designed to apply a bias or adjustment to an rd value based
2960 // on the relative variance of the source and reconstruction.
2961 #define LOW_VAR_THRESH 16
2962 #define VLOW_ADJ_MAX 25
2963 #define VHIGH_ADJ_MAX 8
2964 static void rd_variance_adjustment(VP10_COMP *cpi,
2968 MV_REFERENCE_FRAME ref_frame,
2969 unsigned int source_variance) {
2970 MACROBLOCKD *const xd = &x->e_mbd;
2971 unsigned int recon_variance;
2972 unsigned int absvar_diff = 0;
2973 int64_t var_error = 0;
2974 int64_t var_factor = 0;
2976 if (*this_rd == INT64_MAX)
2979 #if CONFIG_VP9_HIGHBITDEPTH
2980 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
2982 vp10_high_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize, xd->bd);
2985 vp10_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
2989 vp10_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
2990 #endif // CONFIG_VP9_HIGHBITDEPTH
2992 if ((source_variance + recon_variance) > LOW_VAR_THRESH) {
2993 absvar_diff = (source_variance > recon_variance)
2994 ? (source_variance - recon_variance)
2995 : (recon_variance - source_variance);
2997 var_error = (200 * source_variance * recon_variance) /
2998 ((source_variance * source_variance) +
2999 (recon_variance * recon_variance));
3000 var_error = 100 - var_error;
3003 // Source variance above a threshold and ref frame is intra.
3004 // This case is targeted mainly at discouraging intra modes that give rise
3005 // to a predictor with a low spatial complexity compared to the source.
3006 if ((source_variance > LOW_VAR_THRESH) && (ref_frame == INTRA_FRAME) &&
3007 (source_variance > recon_variance)) {
3008 var_factor = VPXMIN(absvar_diff, VPXMIN(VLOW_ADJ_MAX, var_error));
3009 // A second possible case of interest is where the source variance
3010 // is very low and we wish to discourage false texture or motion trails.
3011 } else if ((source_variance < (LOW_VAR_THRESH >> 1)) &&
3012 (recon_variance > source_variance)) {
3013 var_factor = VPXMIN(absvar_diff, VPXMIN(VHIGH_ADJ_MAX, var_error));
3015 *this_rd += (*this_rd * var_factor) / 100;
3019 // Do we have an internal image edge (e.g. formatting bars).
3020 int vp10_internal_image_edge(VP10_COMP *cpi) {
3021 return (cpi->oxcf.pass == 2) &&
3022 ((cpi->twopass.this_frame_stats.inactive_zone_rows > 0) ||
3023 (cpi->twopass.this_frame_stats.inactive_zone_cols > 0));
3026 // Checks to see if a super block is on a horizontal image edge.
3027 // In most cases this is the "real" edge unless there are formatting
3028 // bars embedded in the stream.
3029 int vp10_active_h_edge(VP10_COMP *cpi, int mi_row, int mi_step) {
3031 int bottom_edge = cpi->common.mi_rows;
3032 int is_active_h_edge = 0;
3034 // For two pass account for any formatting bars detected.
3035 if (cpi->oxcf.pass == 2) {
3036 TWO_PASS *twopass = &cpi->twopass;
3038 // The inactive region is specified in MBs not mi units.
3039 // The image edge is in the following MB row.
3040 top_edge += (int)(twopass->this_frame_stats.inactive_zone_rows * 2);
3042 bottom_edge -= (int)(twopass->this_frame_stats.inactive_zone_rows * 2);
3043 bottom_edge = VPXMAX(top_edge, bottom_edge);
3046 if (((top_edge >= mi_row) && (top_edge < (mi_row + mi_step))) ||
3047 ((bottom_edge >= mi_row) && (bottom_edge < (mi_row + mi_step)))) {
3048 is_active_h_edge = 1;
3050 return is_active_h_edge;
3053 // Checks to see if a super block is on a vertical image edge.
3054 // In most cases this is the "real" edge unless there are formatting
3055 // bars embedded in the stream.
3056 int vp10_active_v_edge(VP10_COMP *cpi, int mi_col, int mi_step) {
3058 int right_edge = cpi->common.mi_cols;
3059 int is_active_v_edge = 0;
3061 // For two pass account for any formatting bars detected.
3062 if (cpi->oxcf.pass == 2) {
3063 TWO_PASS *twopass = &cpi->twopass;
3065 // The inactive region is specified in MBs not mi units.
3066 // The image edge is in the following MB row.
3067 left_edge += (int)(twopass->this_frame_stats.inactive_zone_cols * 2);
3069 right_edge -= (int)(twopass->this_frame_stats.inactive_zone_cols * 2);
3070 right_edge = VPXMAX(left_edge, right_edge);
3073 if (((left_edge >= mi_col) && (left_edge < (mi_col + mi_step))) ||
3074 ((right_edge >= mi_col) && (right_edge < (mi_col + mi_step)))) {
3075 is_active_v_edge = 1;
3077 return is_active_v_edge;
3080 // Checks to see if a super block is at the edge of the active image.
3081 // In most cases this is the "real" edge unless there are formatting
3082 // bars embedded in the stream.
3083 int vp10_active_edge_sb(VP10_COMP *cpi,
3084 int mi_row, int mi_col) {
3085 return vp10_active_h_edge(cpi, mi_row, MI_BLOCK_SIZE) ||
3086 vp10_active_v_edge(cpi, mi_col, MI_BLOCK_SIZE);
3089 void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi,
3090 TileDataEnc *tile_data,
3092 int mi_row, int mi_col,
3093 RD_COST *rd_cost, BLOCK_SIZE bsize,
3094 PICK_MODE_CONTEXT *ctx,
3095 int64_t best_rd_so_far) {
3096 VP10_COMMON *const cm = &cpi->common;
3097 RD_OPT *const rd_opt = &cpi->rd;
3098 SPEED_FEATURES *const sf = &cpi->sf;
3099 MACROBLOCKD *const xd = &x->e_mbd;
3100 MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
3101 MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
3102 const struct segmentation *const seg = &cm->seg;
3103 PREDICTION_MODE this_mode;
3104 MV_REFERENCE_FRAME ref_frame, second_ref_frame;
3105 unsigned char segment_id = mbmi->segment_id;
3106 int comp_pred, i, k;
3107 int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES];
3108 struct buf_2d yv12_mb[4][MAX_MB_PLANE];
3109 int_mv single_newmv[MAX_REF_FRAMES] = { { 0 } };
3110 INTERP_FILTER single_inter_filter[MB_MODE_COUNT][MAX_REF_FRAMES];
3111 int single_skippable[MB_MODE_COUNT][MAX_REF_FRAMES];
3112 static const int flag_list[4] = { 0, VP9_LAST_FLAG, VP9_GOLD_FLAG,
3114 int64_t best_rd = best_rd_so_far;
3115 int64_t best_pred_diff[REFERENCE_MODES];
3116 int64_t best_pred_rd[REFERENCE_MODES];
3117 int64_t best_filter_rd[SWITCHABLE_FILTER_CONTEXTS];
3118 int64_t best_filter_diff[SWITCHABLE_FILTER_CONTEXTS];
3119 MB_MODE_INFO best_mbmode;
3120 int best_mode_skippable = 0;
3121 int midx, best_mode_index = -1;
3122 unsigned int ref_costs_single[MAX_REF_FRAMES], ref_costs_comp[MAX_REF_FRAMES];
3123 vpx_prob comp_mode_p;
3124 int64_t best_intra_rd = INT64_MAX;
3125 unsigned int best_pred_sse = UINT_MAX;
3126 PREDICTION_MODE best_intra_mode = DC_PRED;
3127 int rate_uv_intra[TX_SIZES], rate_uv_tokenonly[TX_SIZES];
3128 int64_t dist_uv[TX_SIZES];
3129 int skip_uv[TX_SIZES];
3130 PREDICTION_MODE mode_uv[TX_SIZES];
3131 const int intra_cost_penalty = vp10_get_intra_cost_penalty(
3132 cm->base_qindex, cm->y_dc_delta_q, cm->bit_depth);
3134 uint8_t ref_frame_skip_mask[2] = { 0 };
3135 uint16_t mode_skip_mask[MAX_REF_FRAMES] = { 0 };
3136 int mode_skip_start = sf->mode_skip_start + 1;
3137 const int *const rd_threshes = rd_opt->threshes[segment_id][bsize];
3138 const int *const rd_thresh_freq_fact = tile_data->thresh_freq_fact[bsize];
3139 int64_t mode_threshold[MAX_MODES];
3140 int *mode_map = tile_data->mode_map[bsize];
3141 const int mode_search_skip_flags = sf->mode_search_skip_flags;
3142 int64_t mask_filter = 0;
3143 int64_t filter_cache[SWITCHABLE_FILTER_CONTEXTS];
3145 vp10_zero(best_mbmode);
3147 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
3148 filter_cache[i] = INT64_MAX;
3150 estimate_ref_frame_costs(cm, xd, segment_id, ref_costs_single, ref_costs_comp,
3153 for (i = 0; i < REFERENCE_MODES; ++i)
3154 best_pred_rd[i] = INT64_MAX;
3155 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++)
3156 best_filter_rd[i] = INT64_MAX;
3157 for (i = 0; i < TX_SIZES; i++)
3158 rate_uv_intra[i] = INT_MAX;
3159 for (i = 0; i < MAX_REF_FRAMES; ++i)
3160 x->pred_sse[i] = INT_MAX;
3161 for (i = 0; i < MB_MODE_COUNT; ++i) {
3162 for (k = 0; k < MAX_REF_FRAMES; ++k) {
3163 single_inter_filter[i][k] = SWITCHABLE;
3164 single_skippable[i][k] = 0;
3168 rd_cost->rate = INT_MAX;
3170 for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
3171 x->pred_mv_sad[ref_frame] = INT_MAX;
3172 if (cpi->ref_frame_flags & flag_list[ref_frame]) {
3173 assert(get_ref_frame_buffer(cpi, ref_frame) != NULL);
3174 setup_buffer_inter(cpi, x, ref_frame, bsize, mi_row, mi_col,
3175 frame_mv[NEARESTMV], frame_mv[NEARMV], yv12_mb);
3177 frame_mv[NEWMV][ref_frame].as_int = INVALID_MV;
3178 frame_mv[ZEROMV][ref_frame].as_int = 0;
3181 for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
3182 if (!(cpi->ref_frame_flags & flag_list[ref_frame])) {
3183 // Skip checking missing references in both single and compound reference
3184 // modes. Note that a mode will be skipped iff both reference frames
3186 ref_frame_skip_mask[0] |= (1 << ref_frame);
3187 ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
3189 for (i = LAST_FRAME; i <= ALTREF_FRAME; ++i) {
3190 // Skip fixed mv modes for poor references
3191 if ((x->pred_mv_sad[ref_frame] >> 2) > x->pred_mv_sad[i]) {
3192 mode_skip_mask[ref_frame] |= INTER_NEAREST_NEAR_ZERO;
3197 // If the segment reference frame feature is enabled....
3198 // then do nothing if the current ref frame is not allowed..
3199 if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME) &&
3200 get_segdata(seg, segment_id, SEG_LVL_REF_FRAME) != (int)ref_frame) {
3201 ref_frame_skip_mask[0] |= (1 << ref_frame);
3202 ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
3206 // Disable this drop out case if the ref frame
3207 // segment level feature is enabled for this segment. This is to
3208 // prevent the possibility that we end up unable to pick any mode.
3209 if (!segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) {
3210 // Only consider ZEROMV/ALTREF_FRAME for alt ref frame,
3211 // unless ARNR filtering is enabled in which case we want
3212 // an unfiltered alternative. We allow near/nearest as well
3213 // because they may result in zero-zero MVs but be cheaper.
3214 if (cpi->rc.is_src_frame_alt_ref && (cpi->oxcf.arnr_max_frames == 0)) {
3215 ref_frame_skip_mask[0] = (1 << LAST_FRAME) | (1 << GOLDEN_FRAME);
3216 ref_frame_skip_mask[1] = SECOND_REF_FRAME_MASK;
3217 mode_skip_mask[ALTREF_FRAME] = ~INTER_NEAREST_NEAR_ZERO;
3218 if (frame_mv[NEARMV][ALTREF_FRAME].as_int != 0)
3219 mode_skip_mask[ALTREF_FRAME] |= (1 << NEARMV);
3220 if (frame_mv[NEARESTMV][ALTREF_FRAME].as_int != 0)
3221 mode_skip_mask[ALTREF_FRAME] |= (1 << NEARESTMV);
3225 if (cpi->rc.is_src_frame_alt_ref) {
3226 if (sf->alt_ref_search_fp) {
3227 mode_skip_mask[ALTREF_FRAME] = 0;
3228 ref_frame_skip_mask[0] = ~(1 << ALTREF_FRAME);
3229 ref_frame_skip_mask[1] = SECOND_REF_FRAME_MASK;
3233 if (sf->alt_ref_search_fp)
3234 if (!cm->show_frame && x->pred_mv_sad[GOLDEN_FRAME] < INT_MAX)
3235 if (x->pred_mv_sad[ALTREF_FRAME] > (x->pred_mv_sad[GOLDEN_FRAME] << 1))
3236 mode_skip_mask[ALTREF_FRAME] |= INTER_ALL;
3238 if (sf->adaptive_mode_search) {
3239 if (cm->show_frame && !cpi->rc.is_src_frame_alt_ref &&
3240 cpi->rc.frames_since_golden >= 3)
3241 if (x->pred_mv_sad[GOLDEN_FRAME] > (x->pred_mv_sad[LAST_FRAME] << 1))
3242 mode_skip_mask[GOLDEN_FRAME] |= INTER_ALL;
3245 if (bsize > sf->max_intra_bsize) {
3246 ref_frame_skip_mask[0] |= (1 << INTRA_FRAME);
3247 ref_frame_skip_mask[1] |= (1 << INTRA_FRAME);
3250 mode_skip_mask[INTRA_FRAME] |=
3251 ~(sf->intra_y_mode_mask[max_txsize_lookup[bsize]]);
3253 for (i = 0; i <= LAST_NEW_MV_INDEX; ++i)
3254 mode_threshold[i] = 0;
3255 for (i = LAST_NEW_MV_INDEX + 1; i < MAX_MODES; ++i)
3256 mode_threshold[i] = ((int64_t)rd_threshes[i] * rd_thresh_freq_fact[i]) >> 5;
3258 midx = sf->schedule_mode_search ? mode_skip_start : 0;
3260 uint8_t end_pos = 0;
3261 for (i = 5; i < midx; ++i) {
3262 if (mode_threshold[mode_map[i - 1]] > mode_threshold[mode_map[i]]) {
3263 uint8_t tmp = mode_map[i];
3264 mode_map[i] = mode_map[i - 1];
3265 mode_map[i - 1] = tmp;
3272 mbmi->palette_mode_info.palette_size[0] = 0;
3273 mbmi->palette_mode_info.palette_size[1] = 0;
3274 for (midx = 0; midx < MAX_MODES; ++midx) {
3275 int mode_index = mode_map[midx];
3276 int mode_excluded = 0;
3277 int64_t this_rd = INT64_MAX;
3278 int disable_skip = 0;
3279 int compmode_cost = 0;
3280 int rate2 = 0, rate_y = 0, rate_uv = 0;
3281 int64_t distortion2 = 0, distortion_y = 0, distortion_uv = 0;
3284 int64_t total_sse = INT64_MAX;
3287 this_mode = vp10_mode_order[mode_index].mode;
3288 ref_frame = vp10_mode_order[mode_index].ref_frame[0];
3289 second_ref_frame = vp10_mode_order[mode_index].ref_frame[1];
3291 // Look at the reference frame of the best mode so far and set the
3292 // skip mask to look at a subset of the remaining modes.
3293 if (midx == mode_skip_start && best_mode_index >= 0) {
3294 switch (best_mbmode.ref_frame[0]) {
3298 ref_frame_skip_mask[0] |= LAST_FRAME_MODE_MASK;
3299 ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
3302 ref_frame_skip_mask[0] |= GOLDEN_FRAME_MODE_MASK;
3303 ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
3306 ref_frame_skip_mask[0] |= ALT_REF_MODE_MASK;
3309 case MAX_REF_FRAMES:
3310 assert(0 && "Invalid Reference frame");
3315 if ((ref_frame_skip_mask[0] & (1 << ref_frame)) &&
3316 (ref_frame_skip_mask[1] & (1 << VPXMAX(0, second_ref_frame))))
3319 if (mode_skip_mask[ref_frame] & (1 << this_mode))
3322 // Test best rd so far against threshold for trying this mode.
3323 if (best_mode_skippable && sf->schedule_mode_search)
3324 mode_threshold[mode_index] <<= 1;
3326 if (best_rd < mode_threshold[mode_index])
3329 comp_pred = second_ref_frame > INTRA_FRAME;
3331 if (!cpi->allow_comp_inter_inter)
3334 // Skip compound inter modes if ARF is not available.
3335 if (!(cpi->ref_frame_flags & flag_list[second_ref_frame]))
3338 // Do not allow compound prediction if the segment level reference frame
3339 // feature is in use as in this case there can only be one reference.
3340 if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME))
3343 if ((mode_search_skip_flags & FLAG_SKIP_COMP_BESTINTRA) &&
3344 best_mode_index >= 0 && best_mbmode.ref_frame[0] == INTRA_FRAME)
3347 mode_excluded = cm->reference_mode == SINGLE_REFERENCE;
3349 if (ref_frame != INTRA_FRAME)
3350 mode_excluded = cm->reference_mode == COMPOUND_REFERENCE;
3353 if (ref_frame == INTRA_FRAME) {
3354 if (sf->adaptive_mode_search)
3355 if ((x->source_variance << num_pels_log2_lookup[bsize]) > best_pred_sse)
3358 if (this_mode != DC_PRED) {
3359 // Disable intra modes other than DC_PRED for blocks with low variance
3360 // Threshold for intra skipping based on source variance
3361 // TODO(debargha): Specialize the threshold for super block sizes
3362 const unsigned int skip_intra_var_thresh = 64;
3363 if ((mode_search_skip_flags & FLAG_SKIP_INTRA_LOWVAR) &&
3364 x->source_variance < skip_intra_var_thresh)
3366 // Only search the oblique modes if the best so far is
3367 // one of the neighboring directional modes
3368 if ((mode_search_skip_flags & FLAG_SKIP_INTRA_BESTINTER) &&
3369 (this_mode >= D45_PRED && this_mode <= TM_PRED)) {
3370 if (best_mode_index >= 0 &&
3371 best_mbmode.ref_frame[0] > INTRA_FRAME)
3374 if (mode_search_skip_flags & FLAG_SKIP_INTRA_DIRMISMATCH) {
3375 if (conditional_skipintra(this_mode, best_intra_mode))
3380 const MV_REFERENCE_FRAME ref_frames[2] = {ref_frame, second_ref_frame};
3381 if (!check_best_zero_mv(cpi, mbmi_ext->mode_context, frame_mv,
3382 this_mode, ref_frames))
3386 mbmi->mode = this_mode;
3387 mbmi->uv_mode = DC_PRED;
3388 mbmi->ref_frame[0] = ref_frame;
3389 mbmi->ref_frame[1] = second_ref_frame;
3390 // Evaluate all sub-pel filters irrespective of whether we can use
3391 // them for this frame.
3392 mbmi->interp_filter = cm->interp_filter == SWITCHABLE ? EIGHTTAP
3393 : cm->interp_filter;
3394 mbmi->mv[0].as_int = mbmi->mv[1].as_int = 0;
3397 set_ref_ptrs(cm, xd, ref_frame, second_ref_frame);
3399 // Select prediction reference frames.
3400 for (i = 0; i < MAX_MB_PLANE; i++) {
3401 xd->plane[i].pre[0] = yv12_mb[ref_frame][i];
3403 xd->plane[i].pre[1] = yv12_mb[second_ref_frame][i];
3406 if (ref_frame == INTRA_FRAME) {
3408 struct macroblockd_plane *const pd = &xd->plane[1];
3409 memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
3410 super_block_yrd(cpi, x, &rate_y, &distortion_y, &skippable,
3411 NULL, bsize, best_rd);
3412 if (rate_y == INT_MAX)
3415 uv_tx = get_uv_tx_size_impl(mbmi->tx_size, bsize, pd->subsampling_x,
3417 if (rate_uv_intra[uv_tx] == INT_MAX) {
3418 choose_intra_uv_mode(cpi, x, ctx, bsize, uv_tx,
3419 &rate_uv_intra[uv_tx], &rate_uv_tokenonly[uv_tx],
3420 &dist_uv[uv_tx], &skip_uv[uv_tx], &mode_uv[uv_tx]);
3423 rate_uv = rate_uv_tokenonly[uv_tx];
3424 distortion_uv = dist_uv[uv_tx];
3425 skippable = skippable && skip_uv[uv_tx];
3426 mbmi->uv_mode = mode_uv[uv_tx];
3428 rate2 = rate_y + cpi->mbmode_cost[mbmi->mode] + rate_uv_intra[uv_tx];
3429 if (this_mode != DC_PRED && this_mode != TM_PRED)
3430 rate2 += intra_cost_penalty;
3431 distortion2 = distortion_y + distortion_uv;
3433 this_rd = handle_inter_mode(cpi, x, bsize,
3434 &rate2, &distortion2, &skippable,
3436 &disable_skip, frame_mv,
3438 single_newmv, single_inter_filter,
3439 single_skippable, &total_sse, best_rd,
3440 &mask_filter, filter_cache);
3441 if (this_rd == INT64_MAX)
3444 compmode_cost = vp10_cost_bit(comp_mode_p, comp_pred);
3446 if (cm->reference_mode == REFERENCE_MODE_SELECT)
3447 rate2 += compmode_cost;
3450 // Estimate the reference frame signaling cost and add it
3451 // to the rolling cost variable.
3453 rate2 += ref_costs_comp[ref_frame];
3455 rate2 += ref_costs_single[ref_frame];
3458 if (!disable_skip) {
3460 // Back out the coefficient coding costs
3461 rate2 -= (rate_y + rate_uv);
3463 // Cost the skip mb case
3464 rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1);
3465 } else if (ref_frame != INTRA_FRAME && !xd->lossless) {
3466 if (RDCOST(x->rdmult, x->rddiv, rate_y + rate_uv, distortion2) <
3467 RDCOST(x->rdmult, x->rddiv, 0, total_sse)) {
3468 // Add in the cost of the no skip flag.
3469 rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0);
3471 // FIXME(rbultje) make this work for splitmv also
3472 rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1);
3473 distortion2 = total_sse;
3474 assert(total_sse >= 0);
3475 rate2 -= (rate_y + rate_uv);
3479 // Add in the cost of the no skip flag.
3480 rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0);
3483 // Calculate the final RD estimate for this mode.
3484 this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
3487 // Apply an adjustment to the rd value based on the similarity of the
3488 // source variance and reconstructed variance.
3489 rd_variance_adjustment(cpi, x, bsize, &this_rd,
3490 ref_frame, x->source_variance);
3492 if (ref_frame == INTRA_FRAME) {
3493 // Keep record of best intra rd
3494 if (this_rd < best_intra_rd) {
3495 best_intra_rd = this_rd;
3496 best_intra_mode = mbmi->mode;
3500 if (!disable_skip && ref_frame == INTRA_FRAME) {
3501 for (i = 0; i < REFERENCE_MODES; ++i)
3502 best_pred_rd[i] = VPXMIN(best_pred_rd[i], this_rd);
3503 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++)
3504 best_filter_rd[i] = VPXMIN(best_filter_rd[i], this_rd);
3507 // Did this mode help.. i.e. is it the new best mode
3508 if (this_rd < best_rd || x->skip) {
3509 int max_plane = MAX_MB_PLANE;
3510 if (!mode_excluded) {
3511 // Note index of best mode so far
3512 best_mode_index = mode_index;
3514 if (ref_frame == INTRA_FRAME) {
3515 /* required for left and above block mv */
3516 mbmi->mv[0].as_int = 0;
3519 best_pred_sse = x->pred_sse[ref_frame];
3522 rd_cost->rate = rate2;
3523 rd_cost->dist = distortion2;
3524 rd_cost->rdcost = this_rd;
3526 best_mbmode = *mbmi;
3527 best_skip2 = this_skip2;
3528 best_mode_skippable = skippable;
3530 if (!x->select_tx_size)
3531 swap_block_ptr(x, ctx, 1, 0, 0, max_plane);
3532 memcpy(ctx->zcoeff_blk, x->zcoeff_blk[mbmi->tx_size],
3533 sizeof(ctx->zcoeff_blk[0]) * ctx->num_4x4_blk);
3535 // TODO(debargha): enhance this test with a better distortion prediction
3536 // based on qp, activity mask and history
3537 if ((mode_search_skip_flags & FLAG_EARLY_TERMINATE) &&
3538 (mode_index > MIN_EARLY_TERM_INDEX)) {
3539 int qstep = xd->plane[0].dequant[1];
3540 // TODO(debargha): Enhance this by specializing for each mode_index
3542 #if CONFIG_VP9_HIGHBITDEPTH
3543 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
3544 qstep >>= (xd->bd - 8);
3546 #endif // CONFIG_VP9_HIGHBITDEPTH
3547 if (x->source_variance < UINT_MAX) {
3548 const int var_adjust = (x->source_variance < 16);
3549 scale -= var_adjust;
3551 if (ref_frame > INTRA_FRAME &&
3552 distortion2 * scale < qstep * qstep) {
3559 /* keep record of best compound/single-only prediction */
3560 if (!disable_skip && ref_frame != INTRA_FRAME) {
3561 int64_t single_rd, hybrid_rd, single_rate, hybrid_rate;
3563 if (cm->reference_mode == REFERENCE_MODE_SELECT) {
3564 single_rate = rate2 - compmode_cost;
3565 hybrid_rate = rate2;
3567 single_rate = rate2;
3568 hybrid_rate = rate2 + compmode_cost;
3571 single_rd = RDCOST(x->rdmult, x->rddiv, single_rate, distortion2);
3572 hybrid_rd = RDCOST(x->rdmult, x->rddiv, hybrid_rate, distortion2);
3575 if (single_rd < best_pred_rd[SINGLE_REFERENCE])
3576 best_pred_rd[SINGLE_REFERENCE] = single_rd;
3578 if (single_rd < best_pred_rd[COMPOUND_REFERENCE])
3579 best_pred_rd[COMPOUND_REFERENCE] = single_rd;
3581 if (hybrid_rd < best_pred_rd[REFERENCE_MODE_SELECT])
3582 best_pred_rd[REFERENCE_MODE_SELECT] = hybrid_rd;
3584 /* keep record of best filter type */
3585 if (!mode_excluded && cm->interp_filter != BILINEAR) {
3586 int64_t ref = filter_cache[cm->interp_filter == SWITCHABLE ?
3587 SWITCHABLE_FILTERS : cm->interp_filter];
3589 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) {
3591 if (ref == INT64_MAX)
3593 else if (filter_cache[i] == INT64_MAX)
3594 // when early termination is triggered, the encoder does not have
3595 // access to the rate-distortion cost. it only knows that the cost
3596 // should be above the maximum valid value. hence it takes the known
3597 // maximum plus an arbitrary constant as the rate-distortion cost.
3598 adj_rd = mask_filter - ref + 10;
3600 adj_rd = filter_cache[i] - ref;
3603 best_filter_rd[i] = VPXMIN(best_filter_rd[i], adj_rd);
3611 if (x->skip && !comp_pred)
3615 // The inter modes' rate costs are not calculated precisely in some cases.
3616 // Therefore, sometimes, NEWMV is chosen instead of NEARESTMV, NEARMV, and
3617 // ZEROMV. Here, checks are added for those cases, and the mode decisions
3619 if (best_mbmode.mode == NEWMV) {
3620 const MV_REFERENCE_FRAME refs[2] = {best_mbmode.ref_frame[0],
3621 best_mbmode.ref_frame[1]};
3622 int comp_pred_mode = refs[1] > INTRA_FRAME;
3624 if (frame_mv[NEARESTMV][refs[0]].as_int == best_mbmode.mv[0].as_int &&
3625 ((comp_pred_mode && frame_mv[NEARESTMV][refs[1]].as_int ==
3626 best_mbmode.mv[1].as_int) || !comp_pred_mode))
3627 best_mbmode.mode = NEARESTMV;
3628 else if (frame_mv[NEARMV][refs[0]].as_int == best_mbmode.mv[0].as_int &&
3629 ((comp_pred_mode && frame_mv[NEARMV][refs[1]].as_int ==
3630 best_mbmode.mv[1].as_int) || !comp_pred_mode))
3631 best_mbmode.mode = NEARMV;
3632 else if (best_mbmode.mv[0].as_int == 0 &&
3633 ((comp_pred_mode && best_mbmode.mv[1].as_int == 0) || !comp_pred_mode))
3634 best_mbmode.mode = ZEROMV;
3637 if (best_mode_index < 0 || best_rd >= best_rd_so_far) {
3638 rd_cost->rate = INT_MAX;
3639 rd_cost->rdcost = INT64_MAX;
3643 // If we used an estimate for the uv intra rd in the loop above...
3644 if (sf->use_uv_intra_rd_estimate) {
3645 // Do Intra UV best rd mode selection if best mode choice above was intra.
3646 if (best_mbmode.ref_frame[0] == INTRA_FRAME) {
3648 *mbmi = best_mbmode;
3649 uv_tx_size = get_uv_tx_size(mbmi, &xd->plane[1]);
3650 rd_pick_intra_sbuv_mode(cpi, x, ctx, &rate_uv_intra[uv_tx_size],
3651 &rate_uv_tokenonly[uv_tx_size],
3652 &dist_uv[uv_tx_size],
3653 &skip_uv[uv_tx_size],
3654 bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize,
3659 assert((cm->interp_filter == SWITCHABLE) ||
3660 (cm->interp_filter == best_mbmode.interp_filter) ||
3661 !is_inter_block(&best_mbmode));
3663 if (!cpi->rc.is_src_frame_alt_ref)
3664 vp10_update_rd_thresh_fact(tile_data->thresh_freq_fact,
3665 sf->adaptive_rd_thresh, bsize, best_mode_index);
3668 *mbmi = best_mbmode;
3669 x->skip |= best_skip2;
3671 for (i = 0; i < REFERENCE_MODES; ++i) {
3672 if (best_pred_rd[i] == INT64_MAX)
3673 best_pred_diff[i] = INT_MIN;
3675 best_pred_diff[i] = best_rd - best_pred_rd[i];
3679 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) {
3680 if (best_filter_rd[i] == INT64_MAX)
3681 best_filter_diff[i] = 0;
3683 best_filter_diff[i] = best_rd - best_filter_rd[i];
3685 if (cm->interp_filter == SWITCHABLE)
3686 assert(best_filter_diff[SWITCHABLE_FILTERS] == 0);
3688 vp10_zero(best_filter_diff);
3691 // TODO(yunqingwang): Moving this line in front of the above best_filter_diff
3692 // updating code causes PSNR loss. Need to figure out the confliction.
3693 x->skip |= best_mode_skippable;
3695 if (!x->skip && !x->select_tx_size) {
3696 int has_high_freq_coeff = 0;
3698 int max_plane = is_inter_block(&xd->mi[0]->mbmi)
3700 for (plane = 0; plane < max_plane; ++plane) {
3701 x->plane[plane].eobs = ctx->eobs_pbuf[plane][1];
3702 has_high_freq_coeff |= vp10_has_high_freq_in_plane(x, bsize, plane);
3705 for (plane = max_plane; plane < MAX_MB_PLANE; ++plane) {
3706 x->plane[plane].eobs = ctx->eobs_pbuf[plane][2];
3707 has_high_freq_coeff |= vp10_has_high_freq_in_plane(x, bsize, plane);
3710 best_mode_skippable |= !has_high_freq_coeff;
3713 assert(best_mode_index >= 0);
3715 store_coding_context(x, ctx, best_mode_index, best_pred_diff,
3716 best_filter_diff, best_mode_skippable);
3719 void vp10_rd_pick_inter_mode_sb_seg_skip(VP10_COMP *cpi,
3720 TileDataEnc *tile_data,
3724 PICK_MODE_CONTEXT *ctx,
3725 int64_t best_rd_so_far) {
3726 VP10_COMMON *const cm = &cpi->common;
3727 MACROBLOCKD *const xd = &x->e_mbd;
3728 MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
3729 unsigned char segment_id = mbmi->segment_id;
3730 const int comp_pred = 0;
3732 int64_t best_pred_diff[REFERENCE_MODES];
3733 int64_t best_filter_diff[SWITCHABLE_FILTER_CONTEXTS];
3734 unsigned int ref_costs_single[MAX_REF_FRAMES], ref_costs_comp[MAX_REF_FRAMES];
3735 vpx_prob comp_mode_p;
3736 INTERP_FILTER best_filter = SWITCHABLE;
3737 int64_t this_rd = INT64_MAX;
3739 const int64_t distortion2 = 0;
3741 estimate_ref_frame_costs(cm, xd, segment_id, ref_costs_single, ref_costs_comp,
3744 for (i = 0; i < MAX_REF_FRAMES; ++i)
3745 x->pred_sse[i] = INT_MAX;
3746 for (i = LAST_FRAME; i < MAX_REF_FRAMES; ++i)
3747 x->pred_mv_sad[i] = INT_MAX;
3749 rd_cost->rate = INT_MAX;
3751 assert(segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP));
3753 mbmi->palette_mode_info.palette_size[0] = 0;
3754 mbmi->palette_mode_info.palette_size[1] = 0;
3755 mbmi->mode = ZEROMV;
3756 mbmi->uv_mode = DC_PRED;
3757 mbmi->ref_frame[0] = LAST_FRAME;
3758 mbmi->ref_frame[1] = NONE;
3759 mbmi->mv[0].as_int = 0;
3762 if (cm->interp_filter != BILINEAR) {
3763 best_filter = EIGHTTAP;
3764 if (cm->interp_filter == SWITCHABLE &&
3765 x->source_variance >= cpi->sf.disable_filter_search_var_thresh) {
3767 int best_rs = INT_MAX;
3768 for (i = 0; i < SWITCHABLE_FILTERS; ++i) {
3769 mbmi->interp_filter = i;
3770 rs = vp10_get_switchable_rate(cpi, xd);
3773 best_filter = mbmi->interp_filter;
3778 // Set the appropriate filter
3779 if (cm->interp_filter == SWITCHABLE) {
3780 mbmi->interp_filter = best_filter;
3781 rate2 += vp10_get_switchable_rate(cpi, xd);
3783 mbmi->interp_filter = cm->interp_filter;
3786 if (cm->reference_mode == REFERENCE_MODE_SELECT)
3787 rate2 += vp10_cost_bit(comp_mode_p, comp_pred);
3789 // Estimate the reference frame signaling cost and add it
3790 // to the rolling cost variable.
3791 rate2 += ref_costs_single[LAST_FRAME];
3792 this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
3794 rd_cost->rate = rate2;
3795 rd_cost->dist = distortion2;
3796 rd_cost->rdcost = this_rd;
3798 if (this_rd >= best_rd_so_far) {
3799 rd_cost->rate = INT_MAX;
3800 rd_cost->rdcost = INT64_MAX;
3804 assert((cm->interp_filter == SWITCHABLE) ||
3805 (cm->interp_filter == mbmi->interp_filter));
3807 vp10_update_rd_thresh_fact(tile_data->thresh_freq_fact,
3808 cpi->sf.adaptive_rd_thresh, bsize, THR_ZEROMV);
3810 vp10_zero(best_pred_diff);
3811 vp10_zero(best_filter_diff);
3813 if (!x->select_tx_size)
3814 swap_block_ptr(x, ctx, 1, 0, 0, MAX_MB_PLANE);
3815 store_coding_context(x, ctx, THR_ZEROMV,
3816 best_pred_diff, best_filter_diff, 0);
3819 void vp10_rd_pick_inter_mode_sub8x8(VP10_COMP *cpi,
3820 TileDataEnc *tile_data,
3822 int mi_row, int mi_col,
3825 PICK_MODE_CONTEXT *ctx,
3826 int64_t best_rd_so_far) {
3827 VP10_COMMON *const cm = &cpi->common;
3828 RD_OPT *const rd_opt = &cpi->rd;
3829 SPEED_FEATURES *const sf = &cpi->sf;
3830 MACROBLOCKD *const xd = &x->e_mbd;
3831 MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
3832 const struct segmentation *const seg = &cm->seg;
3833 MV_REFERENCE_FRAME ref_frame, second_ref_frame;
3834 unsigned char segment_id = mbmi->segment_id;
3836 int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES];
3837 struct buf_2d yv12_mb[4][MAX_MB_PLANE];
3838 static const int flag_list[4] = { 0, VP9_LAST_FLAG, VP9_GOLD_FLAG,
3840 int64_t best_rd = best_rd_so_far;
3841 int64_t best_yrd = best_rd_so_far; // FIXME(rbultje) more precise
3842 int64_t best_pred_diff[REFERENCE_MODES];
3843 int64_t best_pred_rd[REFERENCE_MODES];
3844 int64_t best_filter_rd[SWITCHABLE_FILTER_CONTEXTS];
3845 int64_t best_filter_diff[SWITCHABLE_FILTER_CONTEXTS];
3846 MB_MODE_INFO best_mbmode;
3847 int ref_index, best_ref_index = 0;
3848 unsigned int ref_costs_single[MAX_REF_FRAMES], ref_costs_comp[MAX_REF_FRAMES];
3849 vpx_prob comp_mode_p;
3850 INTERP_FILTER tmp_best_filter = SWITCHABLE;
3851 int rate_uv_intra, rate_uv_tokenonly;
3854 PREDICTION_MODE mode_uv = DC_PRED;
3855 const int intra_cost_penalty = vp10_get_intra_cost_penalty(
3856 cm->base_qindex, cm->y_dc_delta_q, cm->bit_depth);
3857 int_mv seg_mvs[4][MAX_REF_FRAMES];
3858 b_mode_info best_bmodes[4];
3860 int ref_frame_skip_mask[2] = { 0 };
3861 int64_t mask_filter = 0;
3862 int64_t filter_cache[SWITCHABLE_FILTER_CONTEXTS];
3863 int internal_active_edge =
3864 vp10_active_edge_sb(cpi, mi_row, mi_col) && vp10_internal_image_edge(cpi);
3866 memset(x->zcoeff_blk[TX_4X4], 0, 4);
3867 vp10_zero(best_mbmode);
3869 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
3870 filter_cache[i] = INT64_MAX;
3872 for (i = 0; i < 4; i++) {
3874 for (j = 0; j < MAX_REF_FRAMES; j++)
3875 seg_mvs[i][j].as_int = INVALID_MV;
3878 estimate_ref_frame_costs(cm, xd, segment_id, ref_costs_single, ref_costs_comp,
3881 for (i = 0; i < REFERENCE_MODES; ++i)
3882 best_pred_rd[i] = INT64_MAX;
3883 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++)
3884 best_filter_rd[i] = INT64_MAX;
3885 rate_uv_intra = INT_MAX;
3887 rd_cost->rate = INT_MAX;
3889 for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ref_frame++) {
3890 if (cpi->ref_frame_flags & flag_list[ref_frame]) {
3891 setup_buffer_inter(cpi, x, ref_frame, bsize, mi_row, mi_col,
3892 frame_mv[NEARESTMV], frame_mv[NEARMV],
3895 ref_frame_skip_mask[0] |= (1 << ref_frame);
3896 ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
3898 frame_mv[NEWMV][ref_frame].as_int = INVALID_MV;
3899 frame_mv[ZEROMV][ref_frame].as_int = 0;
3902 mbmi->palette_mode_info.palette_size[0] = 0;
3903 mbmi->palette_mode_info.palette_size[1] = 0;
3905 for (ref_index = 0; ref_index < MAX_REFS; ++ref_index) {
3906 int mode_excluded = 0;
3907 int64_t this_rd = INT64_MAX;
3908 int disable_skip = 0;
3909 int compmode_cost = 0;
3910 int rate2 = 0, rate_y = 0, rate_uv = 0;
3911 int64_t distortion2 = 0, distortion_y = 0, distortion_uv = 0;
3915 int64_t total_sse = INT_MAX;
3918 ref_frame = vp10_ref_order[ref_index].ref_frame[0];
3919 second_ref_frame = vp10_ref_order[ref_index].ref_frame[1];
3921 // Look at the reference frame of the best mode so far and set the
3922 // skip mask to look at a subset of the remaining modes.
3923 if (ref_index > 2 && sf->mode_skip_start < MAX_MODES) {
3924 if (ref_index == 3) {
3925 switch (best_mbmode.ref_frame[0]) {
3929 ref_frame_skip_mask[0] |= (1 << GOLDEN_FRAME) | (1 << ALTREF_FRAME);
3930 ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
3933 ref_frame_skip_mask[0] |= (1 << LAST_FRAME) | (1 << ALTREF_FRAME);
3934 ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
3937 ref_frame_skip_mask[0] |= (1 << GOLDEN_FRAME) | (1 << LAST_FRAME);
3940 case MAX_REF_FRAMES:
3941 assert(0 && "Invalid Reference frame");
3947 if ((ref_frame_skip_mask[0] & (1 << ref_frame)) &&
3948 (ref_frame_skip_mask[1] & (1 << VPXMAX(0, second_ref_frame))))
3951 // Test best rd so far against threshold for trying this mode.
3952 if (!internal_active_edge &&
3953 rd_less_than_thresh(best_rd,
3954 rd_opt->threshes[segment_id][bsize][ref_index],
3955 tile_data->thresh_freq_fact[bsize][ref_index]))
3958 comp_pred = second_ref_frame > INTRA_FRAME;
3960 if (!cpi->allow_comp_inter_inter)
3962 if (!(cpi->ref_frame_flags & flag_list[second_ref_frame]))
3964 // Do not allow compound prediction if the segment level reference frame
3965 // feature is in use as in this case there can only be one reference.
3966 if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME))
3969 if ((sf->mode_search_skip_flags & FLAG_SKIP_COMP_BESTINTRA) &&
3970 best_mbmode.ref_frame[0] == INTRA_FRAME)
3974 // TODO(jingning, jkoleszar): scaling reference frame not supported for
3976 if (ref_frame > INTRA_FRAME &&
3977 vp10_is_scaled(&cm->frame_refs[ref_frame - 1].sf))
3980 if (second_ref_frame > INTRA_FRAME &&
3981 vp10_is_scaled(&cm->frame_refs[second_ref_frame - 1].sf))
3985 mode_excluded = cm->reference_mode == SINGLE_REFERENCE;
3986 else if (ref_frame != INTRA_FRAME)
3987 mode_excluded = cm->reference_mode == COMPOUND_REFERENCE;
3989 // If the segment reference frame feature is enabled....
3990 // then do nothing if the current ref frame is not allowed..
3991 if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME) &&
3992 get_segdata(seg, segment_id, SEG_LVL_REF_FRAME) != (int)ref_frame) {
3994 // Disable this drop out case if the ref frame
3995 // segment level feature is enabled for this segment. This is to
3996 // prevent the possibility that we end up unable to pick any mode.
3997 } else if (!segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) {
3998 // Only consider ZEROMV/ALTREF_FRAME for alt ref frame,
3999 // unless ARNR filtering is enabled in which case we want
4000 // an unfiltered alternative. We allow near/nearest as well
4001 // because they may result in zero-zero MVs but be cheaper.
4002 if (cpi->rc.is_src_frame_alt_ref && (cpi->oxcf.arnr_max_frames == 0))
4006 mbmi->tx_size = TX_4X4;
4007 mbmi->uv_mode = DC_PRED;
4008 mbmi->ref_frame[0] = ref_frame;
4009 mbmi->ref_frame[1] = second_ref_frame;
4010 // Evaluate all sub-pel filters irrespective of whether we can use
4011 // them for this frame.
4012 mbmi->interp_filter = cm->interp_filter == SWITCHABLE ? EIGHTTAP
4013 : cm->interp_filter;
4015 set_ref_ptrs(cm, xd, ref_frame, second_ref_frame);
4017 // Select prediction reference frames.
4018 for (i = 0; i < MAX_MB_PLANE; i++) {
4019 xd->plane[i].pre[0] = yv12_mb[ref_frame][i];
4021 xd->plane[i].pre[1] = yv12_mb[second_ref_frame][i];
4024 if (ref_frame == INTRA_FRAME) {
4026 if (rd_pick_intra_sub_8x8_y_mode(cpi, x, &rate, &rate_y,
4027 &distortion_y, best_rd) >= best_rd)
4030 rate2 += intra_cost_penalty;
4031 distortion2 += distortion_y;
4033 if (rate_uv_intra == INT_MAX) {
4034 choose_intra_uv_mode(cpi, x, ctx, bsize, TX_4X4,
4040 rate2 += rate_uv_intra;
4041 rate_uv = rate_uv_tokenonly;
4042 distortion2 += dist_uv;
4043 distortion_uv = dist_uv;
4044 mbmi->uv_mode = mode_uv;
4048 int64_t this_rd_thresh;
4049 int64_t tmp_rd, tmp_best_rd = INT64_MAX, tmp_best_rdu = INT64_MAX;
4050 int tmp_best_rate = INT_MAX, tmp_best_ratey = INT_MAX;
4051 int64_t tmp_best_distortion = INT_MAX, tmp_best_sse, uv_sse;
4052 int tmp_best_skippable = 0;
4053 int switchable_filter_index;
4054 int_mv *second_ref = comp_pred ?
4055 &x->mbmi_ext->ref_mvs[second_ref_frame][0] : NULL;
4056 b_mode_info tmp_best_bmodes[16];
4057 MB_MODE_INFO tmp_best_mbmode;
4058 BEST_SEG_INFO bsi[SWITCHABLE_FILTERS];
4059 int pred_exists = 0;
4062 this_rd_thresh = (ref_frame == LAST_FRAME) ?
4063 rd_opt->threshes[segment_id][bsize][THR_LAST] :
4064 rd_opt->threshes[segment_id][bsize][THR_ALTR];
4065 this_rd_thresh = (ref_frame == GOLDEN_FRAME) ?
4066 rd_opt->threshes[segment_id][bsize][THR_GOLD] : this_rd_thresh;
4067 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
4068 filter_cache[i] = INT64_MAX;
4070 if (cm->interp_filter != BILINEAR) {
4071 tmp_best_filter = EIGHTTAP;
4072 if (x->source_variance < sf->disable_filter_search_var_thresh) {
4073 tmp_best_filter = EIGHTTAP;
4074 } else if (sf->adaptive_pred_interp_filter == 1 &&
4075 ctx->pred_interp_filter < SWITCHABLE) {
4076 tmp_best_filter = ctx->pred_interp_filter;
4077 } else if (sf->adaptive_pred_interp_filter == 2) {
4078 tmp_best_filter = ctx->pred_interp_filter < SWITCHABLE ?
4079 ctx->pred_interp_filter : 0;
4081 for (switchable_filter_index = 0;
4082 switchable_filter_index < SWITCHABLE_FILTERS;
4083 ++switchable_filter_index) {
4086 MB_MODE_INFO_EXT *mbmi_ext = x->mbmi_ext;
4087 mbmi->interp_filter = switchable_filter_index;
4088 tmp_rd = rd_pick_best_sub8x8_mode(cpi, x,
4089 &mbmi_ext->ref_mvs[ref_frame][0],
4090 second_ref, best_yrd, &rate,
4091 &rate_y, &distortion,
4092 &skippable, &total_sse,
4093 (int) this_rd_thresh, seg_mvs,
4094 bsi, switchable_filter_index,
4097 if (tmp_rd == INT64_MAX)
4099 rs = vp10_get_switchable_rate(cpi, xd);
4100 rs_rd = RDCOST(x->rdmult, x->rddiv, rs, 0);
4101 filter_cache[switchable_filter_index] = tmp_rd;
4102 filter_cache[SWITCHABLE_FILTERS] =
4103 VPXMIN(filter_cache[SWITCHABLE_FILTERS], tmp_rd + rs_rd);
4104 if (cm->interp_filter == SWITCHABLE)
4107 mask_filter = VPXMAX(mask_filter, tmp_rd);
4109 newbest = (tmp_rd < tmp_best_rd);
4111 tmp_best_filter = mbmi->interp_filter;
4112 tmp_best_rd = tmp_rd;
4114 if ((newbest && cm->interp_filter == SWITCHABLE) ||
4115 (mbmi->interp_filter == cm->interp_filter &&
4116 cm->interp_filter != SWITCHABLE)) {
4117 tmp_best_rdu = tmp_rd;
4118 tmp_best_rate = rate;
4119 tmp_best_ratey = rate_y;
4120 tmp_best_distortion = distortion;
4121 tmp_best_sse = total_sse;
4122 tmp_best_skippable = skippable;
4123 tmp_best_mbmode = *mbmi;
4124 for (i = 0; i < 4; i++) {
4125 tmp_best_bmodes[i] = xd->mi[0]->bmi[i];
4126 x->zcoeff_blk[TX_4X4][i] = !x->plane[0].eobs[i];
4129 if (switchable_filter_index == 0 &&
4130 sf->use_rd_breakout &&
4131 best_rd < INT64_MAX) {
4132 if (tmp_best_rdu / 2 > best_rd) {
4133 // skip searching the other filters if the first is
4134 // already substantially larger than the best so far
4135 tmp_best_filter = mbmi->interp_filter;
4136 tmp_best_rdu = INT64_MAX;
4141 } // switchable_filter_index loop
4145 if (tmp_best_rdu == INT64_MAX && pred_exists)
4148 mbmi->interp_filter = (cm->interp_filter == SWITCHABLE ?
4149 tmp_best_filter : cm->interp_filter);
4151 // Handles the special case when a filter that is not in the
4152 // switchable list (bilinear, 6-tap) is indicated at the frame level
4153 tmp_rd = rd_pick_best_sub8x8_mode(cpi, x,
4154 &x->mbmi_ext->ref_mvs[ref_frame][0],
4155 second_ref, best_yrd, &rate, &rate_y,
4156 &distortion, &skippable, &total_sse,
4157 (int) this_rd_thresh, seg_mvs, bsi, 0,
4159 if (tmp_rd == INT64_MAX)
4162 total_sse = tmp_best_sse;
4163 rate = tmp_best_rate;
4164 rate_y = tmp_best_ratey;
4165 distortion = tmp_best_distortion;
4166 skippable = tmp_best_skippable;
4167 *mbmi = tmp_best_mbmode;
4168 for (i = 0; i < 4; i++)
4169 xd->mi[0]->bmi[i] = tmp_best_bmodes[i];
4173 distortion2 += distortion;
4175 if (cm->interp_filter == SWITCHABLE)
4176 rate2 += vp10_get_switchable_rate(cpi, xd);
4179 mode_excluded = comp_pred ? cm->reference_mode == SINGLE_REFERENCE
4180 : cm->reference_mode == COMPOUND_REFERENCE;
4182 compmode_cost = vp10_cost_bit(comp_mode_p, comp_pred);
4184 tmp_best_rdu = best_rd -
4185 VPXMIN(RDCOST(x->rdmult, x->rddiv, rate2, distortion2),
4186 RDCOST(x->rdmult, x->rddiv, 0, total_sse));
4188 if (tmp_best_rdu > 0) {
4189 // If even the 'Y' rd value of split is higher than best so far
4190 // then dont bother looking at UV
4191 vp10_build_inter_predictors_sbuv(&x->e_mbd, mi_row, mi_col,
4193 memset(x->skip_txfm, SKIP_TXFM_NONE, sizeof(x->skip_txfm));
4194 if (!super_block_uvrd(cpi, x, &rate_uv, &distortion_uv, &uv_skippable,
4195 &uv_sse, BLOCK_8X8, tmp_best_rdu))
4199 distortion2 += distortion_uv;
4200 skippable = skippable && uv_skippable;
4201 total_sse += uv_sse;
4205 if (cm->reference_mode == REFERENCE_MODE_SELECT)
4206 rate2 += compmode_cost;
4208 // Estimate the reference frame signaling cost and add it
4209 // to the rolling cost variable.
4210 if (second_ref_frame > INTRA_FRAME) {
4211 rate2 += ref_costs_comp[ref_frame];
4213 rate2 += ref_costs_single[ref_frame];
4216 if (!disable_skip) {
4217 // Skip is never coded at the segment level for sub8x8 blocks and instead
4218 // always coded in the bitstream at the mode info level.
4220 if (ref_frame != INTRA_FRAME && !xd->lossless) {
4221 if (RDCOST(x->rdmult, x->rddiv, rate_y + rate_uv, distortion2) <
4222 RDCOST(x->rdmult, x->rddiv, 0, total_sse)) {
4223 // Add in the cost of the no skip flag.
4224 rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0);
4226 // FIXME(rbultje) make this work for splitmv also
4227 rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1);
4228 distortion2 = total_sse;
4229 assert(total_sse >= 0);
4230 rate2 -= (rate_y + rate_uv);
4236 // Add in the cost of the no skip flag.
4237 rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0);
4240 // Calculate the final RD estimate for this mode.
4241 this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
4244 if (!disable_skip && ref_frame == INTRA_FRAME) {
4245 for (i = 0; i < REFERENCE_MODES; ++i)
4246 best_pred_rd[i] = VPXMIN(best_pred_rd[i], this_rd);
4247 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++)
4248 best_filter_rd[i] = VPXMIN(best_filter_rd[i], this_rd);
4251 // Did this mode help.. i.e. is it the new best mode
4252 if (this_rd < best_rd || x->skip) {
4253 if (!mode_excluded) {
4254 int max_plane = MAX_MB_PLANE;
4255 // Note index of best mode so far
4256 best_ref_index = ref_index;
4258 if (ref_frame == INTRA_FRAME) {
4259 /* required for left and above block mv */
4260 mbmi->mv[0].as_int = 0;
4264 rd_cost->rate = rate2;
4265 rd_cost->dist = distortion2;
4266 rd_cost->rdcost = this_rd;
4268 best_yrd = best_rd -
4269 RDCOST(x->rdmult, x->rddiv, rate_uv, distortion_uv);
4270 best_mbmode = *mbmi;
4271 best_skip2 = this_skip2;
4272 if (!x->select_tx_size)
4273 swap_block_ptr(x, ctx, 1, 0, 0, max_plane);
4274 memcpy(ctx->zcoeff_blk, x->zcoeff_blk[TX_4X4],
4275 sizeof(ctx->zcoeff_blk[0]) * ctx->num_4x4_blk);
4277 for (i = 0; i < 4; i++)
4278 best_bmodes[i] = xd->mi[0]->bmi[i];
4280 // TODO(debargha): enhance this test with a better distortion prediction
4281 // based on qp, activity mask and history
4282 if ((sf->mode_search_skip_flags & FLAG_EARLY_TERMINATE) &&
4283 (ref_index > MIN_EARLY_TERM_INDEX)) {
4284 int qstep = xd->plane[0].dequant[1];
4285 // TODO(debargha): Enhance this by specializing for each mode_index
4287 #if CONFIG_VP9_HIGHBITDEPTH
4288 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
4289 qstep >>= (xd->bd - 8);
4291 #endif // CONFIG_VP9_HIGHBITDEPTH
4292 if (x->source_variance < UINT_MAX) {
4293 const int var_adjust = (x->source_variance < 16);
4294 scale -= var_adjust;
4296 if (ref_frame > INTRA_FRAME &&
4297 distortion2 * scale < qstep * qstep) {
4304 /* keep record of best compound/single-only prediction */
4305 if (!disable_skip && ref_frame != INTRA_FRAME) {
4306 int64_t single_rd, hybrid_rd, single_rate, hybrid_rate;
4308 if (cm->reference_mode == REFERENCE_MODE_SELECT) {
4309 single_rate = rate2 - compmode_cost;
4310 hybrid_rate = rate2;
4312 single_rate = rate2;
4313 hybrid_rate = rate2 + compmode_cost;
4316 single_rd = RDCOST(x->rdmult, x->rddiv, single_rate, distortion2);
4317 hybrid_rd = RDCOST(x->rdmult, x->rddiv, hybrid_rate, distortion2);
4319 if (!comp_pred && single_rd < best_pred_rd[SINGLE_REFERENCE])
4320 best_pred_rd[SINGLE_REFERENCE] = single_rd;
4321 else if (comp_pred && single_rd < best_pred_rd[COMPOUND_REFERENCE])
4322 best_pred_rd[COMPOUND_REFERENCE] = single_rd;
4324 if (hybrid_rd < best_pred_rd[REFERENCE_MODE_SELECT])
4325 best_pred_rd[REFERENCE_MODE_SELECT] = hybrid_rd;
4328 /* keep record of best filter type */
4329 if (!mode_excluded && !disable_skip && ref_frame != INTRA_FRAME &&
4330 cm->interp_filter != BILINEAR) {
4331 int64_t ref = filter_cache[cm->interp_filter == SWITCHABLE ?
4332 SWITCHABLE_FILTERS : cm->interp_filter];
4334 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) {
4335 if (ref == INT64_MAX)
4337 else if (filter_cache[i] == INT64_MAX)
4338 // when early termination is triggered, the encoder does not have
4339 // access to the rate-distortion cost. it only knows that the cost
4340 // should be above the maximum valid value. hence it takes the known
4341 // maximum plus an arbitrary constant as the rate-distortion cost.
4342 adj_rd = mask_filter - ref + 10;
4344 adj_rd = filter_cache[i] - ref;
4347 best_filter_rd[i] = VPXMIN(best_filter_rd[i], adj_rd);
4354 if (x->skip && !comp_pred)
4358 if (best_rd >= best_rd_so_far) {
4359 rd_cost->rate = INT_MAX;
4360 rd_cost->rdcost = INT64_MAX;
4364 // If we used an estimate for the uv intra rd in the loop above...
4365 if (sf->use_uv_intra_rd_estimate) {
4366 // Do Intra UV best rd mode selection if best mode choice above was intra.
4367 if (best_mbmode.ref_frame[0] == INTRA_FRAME) {
4368 *mbmi = best_mbmode;
4369 rd_pick_intra_sbuv_mode(cpi, x, ctx, &rate_uv_intra,
4377 if (best_rd == INT64_MAX) {
4378 rd_cost->rate = INT_MAX;
4379 rd_cost->dist = INT64_MAX;
4380 rd_cost->rdcost = INT64_MAX;
4384 assert((cm->interp_filter == SWITCHABLE) ||
4385 (cm->interp_filter == best_mbmode.interp_filter) ||
4386 !is_inter_block(&best_mbmode));
4388 vp10_update_rd_thresh_fact(tile_data->thresh_freq_fact,
4389 sf->adaptive_rd_thresh, bsize, best_ref_index);
4392 *mbmi = best_mbmode;
4393 x->skip |= best_skip2;
4394 if (!is_inter_block(&best_mbmode)) {
4395 for (i = 0; i < 4; i++)
4396 xd->mi[0]->bmi[i].as_mode = best_bmodes[i].as_mode;
4398 for (i = 0; i < 4; ++i)
4399 memcpy(&xd->mi[0]->bmi[i], &best_bmodes[i], sizeof(b_mode_info));
4401 mbmi->mv[0].as_int = xd->mi[0]->bmi[3].as_mv[0].as_int;
4402 mbmi->mv[1].as_int = xd->mi[0]->bmi[3].as_mv[1].as_int;
4405 for (i = 0; i < REFERENCE_MODES; ++i) {
4406 if (best_pred_rd[i] == INT64_MAX)
4407 best_pred_diff[i] = INT_MIN;
4409 best_pred_diff[i] = best_rd - best_pred_rd[i];
4413 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) {
4414 if (best_filter_rd[i] == INT64_MAX)
4415 best_filter_diff[i] = 0;
4417 best_filter_diff[i] = best_rd - best_filter_rd[i];
4419 if (cm->interp_filter == SWITCHABLE)
4420 assert(best_filter_diff[SWITCHABLE_FILTERS] == 0);
4422 vp10_zero(best_filter_diff);
4425 store_coding_context(x, ctx, best_ref_index,
4426 best_pred_diff, best_filter_diff, 0);