2 * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
16 #include "./vp9_rtcd.h"
17 #include "./vpx_dsp_rtcd.h"
19 #include "vpx/vpx_codec.h"
20 #include "vpx_dsp/vpx_dsp_common.h"
21 #include "vpx_mem/vpx_mem.h"
22 #include "vpx_ports/mem.h"
24 #include "vp9/common/vp9_blockd.h"
25 #include "vp9/common/vp9_common.h"
26 #include "vp9/common/vp9_mvref_common.h"
27 #include "vp9/common/vp9_pred_common.h"
28 #include "vp9/common/vp9_reconinter.h"
29 #include "vp9/common/vp9_reconintra.h"
30 #include "vp9/common/vp9_scan.h"
32 #include "vp9/encoder/vp9_cost.h"
33 #include "vp9/encoder/vp9_encoder.h"
34 #include "vp9/encoder/vp9_pickmode.h"
35 #include "vp9/encoder/vp9_ratectrl.h"
36 #include "vp9/encoder/vp9_rd.h"
45 PRED_BUFFER *best_pred;
46 PREDICTION_MODE best_mode;
48 TX_SIZE best_intra_tx_size;
49 MV_REFERENCE_FRAME best_ref_frame;
50 MV_REFERENCE_FRAME best_second_ref_frame;
51 uint8_t best_mode_skip_txfm;
52 INTERP_FILTER best_pred_filter;
55 static const int pos_shift_16x16[4][4] = {
56 { 9, 10, 13, 14 }, { 11, 12, 15, 16 }, { 17, 18, 21, 22 }, { 19, 20, 23, 24 }
59 static int mv_refs_rt(VP9_COMP *cpi, const VP9_COMMON *cm, const MACROBLOCK *x,
60 const MACROBLOCKD *xd, const TileInfo *const tile,
61 MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame,
62 int_mv *mv_ref_list, int_mv *base_mv, int mi_row,
63 int mi_col, int use_base_mv) {
64 const int *ref_sign_bias = cm->ref_frame_sign_bias;
65 int i, refmv_count = 0;
67 const POSITION *const mv_ref_search = mv_ref_blocks[mi->sb_type];
69 int different_ref_found = 0;
70 int context_counter = 0;
73 // Blank the reference vector list
74 memset(mv_ref_list, 0, sizeof(*mv_ref_list) * MAX_MV_REF_CANDIDATES);
76 // The nearest 2 blocks are treated differently
77 // if the size < 8x8 we get the mv from the bmi substructure,
78 // and we also need to keep a mode count.
79 for (i = 0; i < 2; ++i) {
80 const POSITION *const mv_ref = &mv_ref_search[i];
81 if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) {
82 const MODE_INFO *const candidate_mi =
83 xd->mi[mv_ref->col + mv_ref->row * xd->mi_stride];
84 // Keep counts for entropy encoding.
85 context_counter += mode_2_counter[candidate_mi->mode];
86 different_ref_found = 1;
88 if (candidate_mi->ref_frame[0] == ref_frame)
89 ADD_MV_REF_LIST(get_sub_block_mv(candidate_mi, 0, mv_ref->col, -1),
90 refmv_count, mv_ref_list, Done);
96 // Check the rest of the neighbors in much the same way
97 // as before except we don't need to keep track of sub blocks or
99 for (; i < MVREF_NEIGHBOURS && !refmv_count; ++i) {
100 const POSITION *const mv_ref = &mv_ref_search[i];
101 if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) {
102 const MODE_INFO *const candidate_mi =
103 xd->mi[mv_ref->col + mv_ref->row * xd->mi_stride];
104 different_ref_found = 1;
106 if (candidate_mi->ref_frame[0] == ref_frame)
107 ADD_MV_REF_LIST(candidate_mi->mv[0], refmv_count, mv_ref_list, Done);
111 // Since we couldn't find 2 mvs from the same reference frame
112 // go back through the neighbors and find motion vectors from
113 // different reference frames.
114 if (different_ref_found && !refmv_count) {
115 for (i = 0; i < MVREF_NEIGHBOURS; ++i) {
116 const POSITION *mv_ref = &mv_ref_search[i];
117 if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) {
118 const MODE_INFO *const candidate_mi =
119 xd->mi[mv_ref->col + mv_ref->row * xd->mi_stride];
121 // If the candidate is INTRA we don't want to consider its mv.
122 IF_DIFF_REF_FRAME_ADD_MV(candidate_mi, ref_frame, ref_sign_bias,
123 refmv_count, mv_ref_list, Done);
128 !cpi->svc.layer_context[cpi->svc.temporal_layer_id].is_key_frame &&
129 ref_frame == LAST_FRAME) {
130 // Get base layer mv.
133 ->mvs[(mi_col >> 1) + (mi_row >> 1) * (cm->mi_cols >> 1)];
134 if (candidate->mv[0].as_int != INVALID_MV) {
135 base_mv->as_mv.row = (candidate->mv[0].as_mv.row * 2);
136 base_mv->as_mv.col = (candidate->mv[0].as_mv.col * 2);
137 clamp_mv_ref(&base_mv->as_mv, xd);
139 base_mv->as_int = INVALID_MV;
145 x->mbmi_ext->mode_context[ref_frame] = counter_to_context[context_counter];
148 for (i = 0; i < MAX_MV_REF_CANDIDATES; ++i)
149 clamp_mv_ref(&mv_ref_list[i].as_mv, xd);
154 static int combined_motion_search(VP9_COMP *cpi, MACROBLOCK *x,
155 BLOCK_SIZE bsize, int mi_row, int mi_col,
156 int_mv *tmp_mv, int *rate_mv,
157 int64_t best_rd_sofar, int use_base_mv) {
158 MACROBLOCKD *xd = &x->e_mbd;
159 MODE_INFO *mi = xd->mi[0];
160 struct buf_2d backup_yv12[MAX_MB_PLANE] = { { 0, 0 } };
161 const int step_param = cpi->sf.mv.fullpel_search_step_param;
162 const int sadpb = x->sadperbit16;
164 const int ref = mi->ref_frame[0];
165 const MV ref_mv = x->mbmi_ext->ref_mvs[ref][0].as_mv;
169 const MvLimits tmp_mv_limits = x->mv_limits;
172 int search_subpel = 1;
173 const YV12_BUFFER_CONFIG *scaled_ref_frame =
174 vp9_get_scaled_ref_frame(cpi, ref);
175 if (scaled_ref_frame) {
177 // Swap out the reference frame for a version that's been scaled to
178 // match the resolution of the current frame, allowing the existing
179 // motion search code to be used without additional modifications.
180 for (i = 0; i < MAX_MB_PLANE; i++) backup_yv12[i] = xd->plane[i].pre[0];
181 vp9_setup_pre_planes(xd, 0, scaled_ref_frame, mi_row, mi_col, NULL);
183 vp9_set_mv_search_range(&x->mv_limits, &ref_mv);
185 // Limit motion vector for large lightning change.
186 if (cpi->oxcf.speed > 5 && x->lowvar_highsumdiff) {
187 x->mv_limits.col_min = VPXMAX(x->mv_limits.col_min, -10);
188 x->mv_limits.row_min = VPXMAX(x->mv_limits.row_min, -10);
189 x->mv_limits.col_max = VPXMIN(x->mv_limits.col_max, 10);
190 x->mv_limits.row_max = VPXMIN(x->mv_limits.row_max, 10);
193 assert(x->mv_best_ref_index[ref] <= 2);
194 if (x->mv_best_ref_index[ref] < 2)
195 mvp_full = x->mbmi_ext->ref_mvs[ref][x->mv_best_ref_index[ref]].as_mv;
197 mvp_full = x->pred_mv[ref];
205 center_mv = tmp_mv->as_mv;
207 if (x->sb_use_mv_part) {
208 tmp_mv->as_mv.row = x->sb_mvrow_part >> 3;
209 tmp_mv->as_mv.col = x->sb_mvcol_part >> 3;
211 vp9_full_pixel_search(
212 cpi, x, bsize, &mvp_full, step_param, cpi->sf.mv.search_method, sadpb,
213 cond_cost_list(cpi, cost_list), ¢er_mv, &tmp_mv->as_mv, INT_MAX, 0);
216 x->mv_limits = tmp_mv_limits;
218 // calculate the bit cost on motion vector
219 mvp_full.row = tmp_mv->as_mv.row * 8;
220 mvp_full.col = tmp_mv->as_mv.col * 8;
222 *rate_mv = vp9_mv_bit_cost(&mvp_full, &ref_mv, x->nmvjointcost, x->mvcost,
226 cpi->inter_mode_cost[x->mbmi_ext->mode_context[ref]][INTER_OFFSET(NEWMV)];
228 !(RDCOST(x->rdmult, x->rddiv, (*rate_mv + rate_mode), 0) > best_rd_sofar);
230 // For SVC on non-reference frame, avoid subpel for (0, 0) motion.
231 if (cpi->use_svc && cpi->svc.non_reference_frame) {
232 if (mvp_full.row == 0 && mvp_full.col == 0) search_subpel = 0;
235 if (rv && search_subpel) {
236 int subpel_force_stop = cpi->sf.mv.subpel_force_stop;
237 if (use_base_mv && cpi->sf.base_mv_aggressive) subpel_force_stop = 2;
238 if (cpi->sf.mv.enable_adaptive_subpel_force_stop) {
239 int mv_thresh = cpi->sf.mv.adapt_subpel_force_stop.mv_thresh;
240 if (abs(tmp_mv->as_mv.row) >= mv_thresh ||
241 abs(tmp_mv->as_mv.col) >= mv_thresh)
242 subpel_force_stop = cpi->sf.mv.adapt_subpel_force_stop.force_stop_above;
244 subpel_force_stop = cpi->sf.mv.adapt_subpel_force_stop.force_stop_below;
246 cpi->find_fractional_mv_step(
247 x, &tmp_mv->as_mv, &ref_mv, cpi->common.allow_high_precision_mv,
248 x->errorperbit, &cpi->fn_ptr[bsize], subpel_force_stop,
249 cpi->sf.mv.subpel_search_level, cond_cost_list(cpi, cost_list),
250 x->nmvjointcost, x->mvcost, &dis, &x->pred_sse[ref], NULL, 0, 0);
251 *rate_mv = vp9_mv_bit_cost(&tmp_mv->as_mv, &ref_mv, x->nmvjointcost,
252 x->mvcost, MV_COST_WEIGHT);
255 if (scaled_ref_frame) {
257 for (i = 0; i < MAX_MB_PLANE; i++) xd->plane[i].pre[0] = backup_yv12[i];
262 static void block_variance(const uint8_t *src, int src_stride,
263 const uint8_t *ref, int ref_stride, int w, int h,
264 unsigned int *sse, int *sum, int block_size,
265 #if CONFIG_VP9_HIGHBITDEPTH
266 int use_highbitdepth, vpx_bit_depth_t bd,
268 uint32_t *sse8x8, int *sum8x8, uint32_t *var8x8) {
274 for (i = 0; i < h; i += block_size) {
275 for (j = 0; j < w; j += block_size) {
276 #if CONFIG_VP9_HIGHBITDEPTH
277 if (use_highbitdepth) {
280 vpx_highbd_8_get8x8var(src + src_stride * i + j, src_stride,
281 ref + ref_stride * i + j, ref_stride,
282 &sse8x8[k], &sum8x8[k]);
285 vpx_highbd_10_get8x8var(src + src_stride * i + j, src_stride,
286 ref + ref_stride * i + j, ref_stride,
287 &sse8x8[k], &sum8x8[k]);
290 vpx_highbd_12_get8x8var(src + src_stride * i + j, src_stride,
291 ref + ref_stride * i + j, ref_stride,
292 &sse8x8[k], &sum8x8[k]);
296 vpx_get8x8var(src + src_stride * i + j, src_stride,
297 ref + ref_stride * i + j, ref_stride, &sse8x8[k],
301 vpx_get8x8var(src + src_stride * i + j, src_stride,
302 ref + ref_stride * i + j, ref_stride, &sse8x8[k],
307 var8x8[k] = sse8x8[k] - (uint32_t)(((int64_t)sum8x8[k] * sum8x8[k]) >> 6);
313 static void calculate_variance(int bw, int bh, TX_SIZE tx_size,
314 unsigned int *sse_i, int *sum_i,
315 unsigned int *var_o, unsigned int *sse_o,
317 const BLOCK_SIZE unit_size = txsize_to_bsize[tx_size];
318 const int nw = 1 << (bw - b_width_log2_lookup[unit_size]);
319 const int nh = 1 << (bh - b_height_log2_lookup[unit_size]);
322 for (i = 0; i < nh; i += 2) {
323 for (j = 0; j < nw; j += 2) {
324 sse_o[k] = sse_i[i * nw + j] + sse_i[i * nw + j + 1] +
325 sse_i[(i + 1) * nw + j] + sse_i[(i + 1) * nw + j + 1];
326 sum_o[k] = sum_i[i * nw + j] + sum_i[i * nw + j + 1] +
327 sum_i[(i + 1) * nw + j] + sum_i[(i + 1) * nw + j + 1];
328 var_o[k] = sse_o[k] - (uint32_t)(((int64_t)sum_o[k] * sum_o[k]) >>
329 (b_width_log2_lookup[unit_size] +
330 b_height_log2_lookup[unit_size] + 6));
336 // Adjust the ac_thr according to speed, width, height and normalized sum
337 static int ac_thr_factor(const int speed, const int width, const int height,
338 const int norm_sum) {
339 if (speed >= 8 && norm_sum < 5) {
340 if (width <= 640 && height <= 480)
348 static TX_SIZE calculate_tx_size(VP9_COMP *const cpi, BLOCK_SIZE bsize,
349 MACROBLOCKD *const xd, unsigned int var,
350 unsigned int sse, int64_t ac_thr) {
352 if (cpi->common.tx_mode == TX_MODE_SELECT) {
353 if (sse > (var << 2))
354 tx_size = VPXMIN(max_txsize_lookup[bsize],
355 tx_mode_to_biggest_tx_size[cpi->common.tx_mode]);
359 if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ &&
360 cyclic_refresh_segment_id_boosted(xd->mi[0]->segment_id))
362 else if (tx_size > TX_16X16)
365 // For screen-content force 4X4 tx_size over 8X8, for large variance.
366 if (cpi->oxcf.content == VP9E_CONTENT_SCREEN && tx_size == TX_8X8 &&
367 bsize <= BLOCK_16X16 && var > (ac_thr << 6))
370 tx_size = VPXMIN(max_txsize_lookup[bsize],
371 tx_mode_to_biggest_tx_size[cpi->common.tx_mode]);
377 static void model_rd_for_sb_y_large(VP9_COMP *cpi, BLOCK_SIZE bsize,
378 MACROBLOCK *x, MACROBLOCKD *xd,
379 int *out_rate_sum, int64_t *out_dist_sum,
380 unsigned int *var_y, unsigned int *sse_y,
381 int mi_row, int mi_col, int *early_term,
382 int *flag_preduv_computed) {
383 // Note our transform coeffs are 8 times an orthogonal transform.
384 // Hence quantizer step is also 8 times. To get effective quantizer
385 // we need to divide by 8 before sending to modeling function.
389 struct macroblock_plane *const p = &x->plane[0];
390 struct macroblockd_plane *const pd = &xd->plane[0];
391 const uint32_t dc_quant = pd->dequant[0];
392 const uint32_t ac_quant = pd->dequant[1];
393 int64_t dc_thr = dc_quant * dc_quant >> 6;
394 int64_t ac_thr = ac_quant * ac_quant >> 6;
399 const int bw = b_width_log2_lookup[bsize];
400 const int bh = b_height_log2_lookup[bsize];
401 const int num8x8 = 1 << (bw + bh - 2);
402 unsigned int sse8x8[64] = { 0 };
403 int sum8x8[64] = { 0 };
404 unsigned int var8x8[64] = { 0 };
407 #if CONFIG_VP9_HIGHBITDEPTH
408 const vpx_bit_depth_t bd = cpi->common.bit_depth;
410 // Calculate variance for whole partition, and also save 8x8 blocks' variance
411 // to be used in following transform skipping test.
412 block_variance(p->src.buf, p->src.stride, pd->dst.buf, pd->dst.stride,
413 4 << bw, 4 << bh, &sse, &sum, 8,
414 #if CONFIG_VP9_HIGHBITDEPTH
415 cpi->common.use_highbitdepth, bd,
417 sse8x8, sum8x8, var8x8);
418 var = sse - (unsigned int)(((int64_t)sum * sum) >> (bw + bh + 4));
423 #if CONFIG_VP9_TEMPORAL_DENOISING
424 if (cpi->oxcf.noise_sensitivity > 0 && denoise_svc(cpi) &&
426 ac_thr = vp9_scale_acskip_thresh(ac_thr, cpi->denoiser.denoising_level,
427 (abs(sum) >> (bw + bh)),
428 cpi->svc.temporal_layer_id);
430 ac_thr *= ac_thr_factor(cpi->oxcf.speed, cpi->common.width,
431 cpi->common.height, abs(sum) >> (bw + bh));
433 ac_thr *= ac_thr_factor(cpi->oxcf.speed, cpi->common.width,
434 cpi->common.height, abs(sum) >> (bw + bh));
437 tx_size = calculate_tx_size(cpi, bsize, xd, var, sse, ac_thr);
438 // The code below for setting skip flag assumes tranform size of at least 8x8,
439 // so force this lower limit on transform.
440 if (tx_size < TX_8X8) tx_size = TX_8X8;
441 xd->mi[0]->tx_size = tx_size;
443 if (cpi->oxcf.content == VP9E_CONTENT_SCREEN && x->zero_temp_sad_source &&
444 x->source_variance == 0)
445 dc_thr = dc_thr << 1;
447 // Evaluate if the partition block is a skippable block in Y plane.
449 unsigned int sse16x16[16] = { 0 };
450 int sum16x16[16] = { 0 };
451 unsigned int var16x16[16] = { 0 };
452 const int num16x16 = num8x8 >> 2;
454 unsigned int sse32x32[4] = { 0 };
455 int sum32x32[4] = { 0 };
456 unsigned int var32x32[4] = { 0 };
457 const int num32x32 = num8x8 >> 4;
461 const int num = (tx_size == TX_8X8)
463 : ((tx_size == TX_16X16) ? num16x16 : num32x32);
464 const unsigned int *sse_tx =
465 (tx_size == TX_8X8) ? sse8x8
466 : ((tx_size == TX_16X16) ? sse16x16 : sse32x32);
467 const unsigned int *var_tx =
468 (tx_size == TX_8X8) ? var8x8
469 : ((tx_size == TX_16X16) ? var16x16 : var32x32);
471 // Calculate variance if tx_size > TX_8X8
472 if (tx_size >= TX_16X16)
473 calculate_variance(bw, bh, TX_8X8, sse8x8, sum8x8, var16x16, sse16x16,
475 if (tx_size == TX_32X32)
476 calculate_variance(bw, bh, TX_16X16, sse16x16, sum16x16, var32x32,
480 x->skip_txfm[0] = SKIP_TXFM_NONE;
481 for (k = 0; k < num; k++)
482 // Check if all ac coefficients can be quantized to zero.
483 if (!(var_tx[k] < ac_thr || var == 0)) {
488 for (k = 0; k < num; k++)
489 // Check if dc coefficient can be quantized to zero.
490 if (!(sse_tx[k] - var_tx[k] < dc_thr || sse == var)) {
496 x->skip_txfm[0] = SKIP_TXFM_AC_ONLY;
498 if (dc_test) x->skip_txfm[0] = SKIP_TXFM_AC_DC;
499 } else if (dc_test) {
504 if (x->skip_txfm[0] == SKIP_TXFM_AC_DC) {
505 int skip_uv[2] = { 0 };
506 unsigned int var_uv[2];
507 unsigned int sse_uv[2];
510 *out_dist_sum = sse << 4;
512 // Transform skipping test in UV planes.
513 for (i = 1; i <= 2; i++) {
514 if (cpi->oxcf.speed < 8 || x->color_sensitivity[i - 1]) {
515 struct macroblock_plane *const p = &x->plane[i];
516 struct macroblockd_plane *const pd = &xd->plane[i];
517 const TX_SIZE uv_tx_size = get_uv_tx_size(xd->mi[0], pd);
518 const BLOCK_SIZE unit_size = txsize_to_bsize[uv_tx_size];
519 const BLOCK_SIZE uv_bsize = get_plane_block_size(bsize, pd);
520 const int uv_bw = b_width_log2_lookup[uv_bsize];
521 const int uv_bh = b_height_log2_lookup[uv_bsize];
522 const int sf = (uv_bw - b_width_log2_lookup[unit_size]) +
523 (uv_bh - b_height_log2_lookup[unit_size]);
524 const uint32_t uv_dc_thr = pd->dequant[0] * pd->dequant[0] >> (6 - sf);
525 const uint32_t uv_ac_thr = pd->dequant[1] * pd->dequant[1] >> (6 - sf);
528 vp9_build_inter_predictors_sbp(xd, mi_row, mi_col, bsize, i);
529 flag_preduv_computed[i - 1] = 1;
530 var_uv[j] = cpi->fn_ptr[uv_bsize].vf(
531 p->src.buf, p->src.stride, pd->dst.buf, pd->dst.stride, &sse_uv[j]);
533 if ((var_uv[j] < uv_ac_thr || var_uv[j] == 0) &&
534 (sse_uv[j] - var_uv[j] < uv_dc_thr || sse_uv[j] == var_uv[j]))
543 // If the transform in YUV planes are skippable, the mode search checks
544 // fewer inter modes and doesn't check intra modes.
545 if (skip_uv[0] & skip_uv[1]) {
552 #if CONFIG_VP9_HIGHBITDEPTH
553 vp9_model_rd_from_var_lapndz(sse - var, num_pels_log2_lookup[bsize],
554 dc_quant >> (xd->bd - 5), &rate, &dist);
556 vp9_model_rd_from_var_lapndz(sse - var, num_pels_log2_lookup[bsize],
557 dc_quant >> 3, &rate, &dist);
558 #endif // CONFIG_VP9_HIGHBITDEPTH
562 *out_rate_sum = rate >> 1;
563 *out_dist_sum = dist << 3;
566 *out_dist_sum = (sse - var) << 4;
569 #if CONFIG_VP9_HIGHBITDEPTH
570 vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bsize],
571 ac_quant >> (xd->bd - 5), &rate, &dist);
573 vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bsize], ac_quant >> 3,
575 #endif // CONFIG_VP9_HIGHBITDEPTH
577 *out_rate_sum += rate;
578 *out_dist_sum += dist << 4;
581 static void model_rd_for_sb_y(VP9_COMP *cpi, BLOCK_SIZE bsize, MACROBLOCK *x,
582 MACROBLOCKD *xd, int *out_rate_sum,
583 int64_t *out_dist_sum, unsigned int *var_y,
584 unsigned int *sse_y) {
585 // Note our transform coeffs are 8 times an orthogonal transform.
586 // Hence quantizer step is also 8 times. To get effective quantizer
587 // we need to divide by 8 before sending to modeling function.
591 struct macroblock_plane *const p = &x->plane[0];
592 struct macroblockd_plane *const pd = &xd->plane[0];
593 const int64_t dc_thr = p->quant_thred[0] >> 6;
594 const int64_t ac_thr = p->quant_thred[1] >> 6;
595 const uint32_t dc_quant = pd->dequant[0];
596 const uint32_t ac_quant = pd->dequant[1];
597 unsigned int var = cpi->fn_ptr[bsize].vf(p->src.buf, p->src.stride,
598 pd->dst.buf, pd->dst.stride, &sse);
604 xd->mi[0]->tx_size = calculate_tx_size(cpi, bsize, xd, var, sse, ac_thr);
606 // Evaluate if the partition block is a skippable block in Y plane.
608 const BLOCK_SIZE unit_size = txsize_to_bsize[xd->mi[0]->tx_size];
609 const unsigned int num_blk_log2 =
610 (b_width_log2_lookup[bsize] - b_width_log2_lookup[unit_size]) +
611 (b_height_log2_lookup[bsize] - b_height_log2_lookup[unit_size]);
612 const unsigned int sse_tx = sse >> num_blk_log2;
613 const unsigned int var_tx = var >> num_blk_log2;
615 x->skip_txfm[0] = SKIP_TXFM_NONE;
616 // Check if all ac coefficients can be quantized to zero.
617 if (var_tx < ac_thr || var == 0) {
618 x->skip_txfm[0] = SKIP_TXFM_AC_ONLY;
619 // Check if dc coefficient can be quantized to zero.
620 if (sse_tx - var_tx < dc_thr || sse == var)
621 x->skip_txfm[0] = SKIP_TXFM_AC_DC;
623 if (sse_tx - var_tx < dc_thr || sse == var) skip_dc = 1;
627 if (x->skip_txfm[0] == SKIP_TXFM_AC_DC) {
629 *out_dist_sum = sse << 4;
634 #if CONFIG_VP9_HIGHBITDEPTH
635 vp9_model_rd_from_var_lapndz(sse - var, num_pels_log2_lookup[bsize],
636 dc_quant >> (xd->bd - 5), &rate, &dist);
638 vp9_model_rd_from_var_lapndz(sse - var, num_pels_log2_lookup[bsize],
639 dc_quant >> 3, &rate, &dist);
640 #endif // CONFIG_VP9_HIGHBITDEPTH
644 *out_rate_sum = rate >> 1;
645 *out_dist_sum = dist << 3;
648 *out_dist_sum = (sse - var) << 4;
651 #if CONFIG_VP9_HIGHBITDEPTH
652 vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bsize],
653 ac_quant >> (xd->bd - 5), &rate, &dist);
655 vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bsize], ac_quant >> 3,
657 #endif // CONFIG_VP9_HIGHBITDEPTH
659 *out_rate_sum += rate;
660 *out_dist_sum += dist << 4;
663 static void block_yrd(VP9_COMP *cpi, MACROBLOCK *x, RD_COST *this_rdc,
664 int *skippable, int64_t *sse, BLOCK_SIZE bsize,
665 TX_SIZE tx_size, int rd_computed) {
666 MACROBLOCKD *xd = &x->e_mbd;
667 const struct macroblockd_plane *pd = &xd->plane[0];
668 struct macroblock_plane *const p = &x->plane[0];
669 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
670 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
671 const int step = 1 << (tx_size << 1);
672 const int block_step = (1 << tx_size);
674 const int max_blocks_wide =
675 num_4x4_w + (xd->mb_to_right_edge >= 0 ? 0 : xd->mb_to_right_edge >> 5);
676 const int max_blocks_high =
677 num_4x4_h + (xd->mb_to_bottom_edge >= 0 ? 0 : xd->mb_to_bottom_edge >> 5);
679 const int bw = 4 * num_4x4_w;
680 const int bh = 4 * num_4x4_h;
682 #if CONFIG_VP9_HIGHBITDEPTH
683 // TODO(jingning): Implement the high bit-depth Hadamard transforms and
684 // remove this check condition.
685 // TODO(marpan): Use this path (model_rd) for 8bit under certain conditions
686 // for now, as the vp9_quantize_fp below for highbitdepth build is slow.
688 (cpi->oxcf.speed > 5 && cpi->common.frame_type != KEY_FRAME &&
689 bsize < BLOCK_32X32)) {
690 unsigned int var_y, sse_y;
693 model_rd_for_sb_y(cpi, bsize, x, xd, &this_rdc->rate, &this_rdc->dist,
701 if (cpi->sf.use_simple_block_yrd && cpi->common.frame_type != KEY_FRAME &&
702 (bsize < BLOCK_32X32 ||
704 (bsize < BLOCK_32X32 || cpi->svc.temporal_layer_id > 0)))) {
705 unsigned int var_y, sse_y;
708 model_rd_for_sb_y(cpi, bsize, x, xd, &this_rdc->rate, &this_rdc->dist,
717 // The max tx_size passed in is TX_16X16.
718 assert(tx_size != TX_32X32);
720 vpx_subtract_block(bh, bw, p->src_diff, bw, p->src.buf, p->src.stride,
721 pd->dst.buf, pd->dst.stride);
723 // Keep track of the row and column of the blocks we use so that we know
724 // if we are in the unrestricted motion border.
725 for (r = 0; r < max_blocks_high; r += block_step) {
726 for (c = 0; c < num_4x4_w; c += block_step) {
727 if (c < max_blocks_wide) {
728 const scan_order *const scan_order = &vp9_default_scan_orders[tx_size];
729 tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block);
730 tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
731 tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
732 uint16_t *const eob = &p->eobs[block];
733 const int diff_stride = bw;
734 const int16_t *src_diff;
735 src_diff = &p->src_diff[(r * diff_stride + c) << 2];
739 vpx_hadamard_16x16(src_diff, diff_stride, coeff);
740 vp9_quantize_fp(coeff, 256, x->skip_block, p->round_fp, p->quant_fp,
741 qcoeff, dqcoeff, pd->dequant, eob, scan_order->scan,
745 vpx_hadamard_8x8(src_diff, diff_stride, coeff);
746 vp9_quantize_fp(coeff, 64, x->skip_block, p->round_fp, p->quant_fp,
747 qcoeff, dqcoeff, pd->dequant, eob, scan_order->scan,
751 assert(tx_size == TX_4X4);
752 x->fwd_txfm4x4(src_diff, coeff, diff_stride);
753 vp9_quantize_fp(coeff, 16, x->skip_block, p->round_fp, p->quant_fp,
754 qcoeff, dqcoeff, pd->dequant, eob, scan_order->scan,
758 *skippable &= (*eob == 0);
766 if (*sse < INT64_MAX) {
767 *sse = (*sse << 6) >> 2;
769 this_rdc->dist = *sse;
776 for (r = 0; r < max_blocks_high; r += block_step) {
777 for (c = 0; c < num_4x4_w; c += block_step) {
778 if (c < max_blocks_wide) {
779 tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block);
780 tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
781 tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
782 uint16_t *const eob = &p->eobs[block];
785 this_rdc->rate += (int)abs(qcoeff[0]);
787 this_rdc->rate += vpx_satd(qcoeff, step << 4);
789 this_rdc->dist += vp9_block_error_fp(coeff, dqcoeff, step << 4) >> 2;
795 // If skippable is set, rate gets clobbered later.
796 this_rdc->rate <<= (2 + VP9_PROB_COST_SHIFT);
797 this_rdc->rate += (eob_cost << VP9_PROB_COST_SHIFT);
800 static void model_rd_for_sb_uv(VP9_COMP *cpi, BLOCK_SIZE plane_bsize,
801 MACROBLOCK *x, MACROBLOCKD *xd,
802 RD_COST *this_rdc, unsigned int *var_y,
803 unsigned int *sse_y, int start_plane,
805 // Note our transform coeffs are 8 times an orthogonal transform.
806 // Hence quantizer step is also 8 times. To get effective quantizer
807 // we need to divide by 8 before sending to modeling function.
812 #if CONFIG_VP9_HIGHBITDEPTH
813 uint64_t tot_var = *var_y;
814 uint64_t tot_sse = *sse_y;
816 uint32_t tot_var = *var_y;
817 uint32_t tot_sse = *sse_y;
823 for (i = start_plane; i <= stop_plane; ++i) {
824 struct macroblock_plane *const p = &x->plane[i];
825 struct macroblockd_plane *const pd = &xd->plane[i];
826 const uint32_t dc_quant = pd->dequant[0];
827 const uint32_t ac_quant = pd->dequant[1];
828 const BLOCK_SIZE bs = plane_bsize;
830 if (!x->color_sensitivity[i - 1]) continue;
832 var = cpi->fn_ptr[bs].vf(p->src.buf, p->src.stride, pd->dst.buf,
833 pd->dst.stride, &sse);
838 #if CONFIG_VP9_HIGHBITDEPTH
839 vp9_model_rd_from_var_lapndz(sse - var, num_pels_log2_lookup[bs],
840 dc_quant >> (xd->bd - 5), &rate, &dist);
842 vp9_model_rd_from_var_lapndz(sse - var, num_pels_log2_lookup[bs],
843 dc_quant >> 3, &rate, &dist);
844 #endif // CONFIG_VP9_HIGHBITDEPTH
846 this_rdc->rate += rate >> 1;
847 this_rdc->dist += dist << 3;
849 #if CONFIG_VP9_HIGHBITDEPTH
850 vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bs],
851 ac_quant >> (xd->bd - 5), &rate, &dist);
853 vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bs], ac_quant >> 3,
855 #endif // CONFIG_VP9_HIGHBITDEPTH
857 this_rdc->rate += rate;
858 this_rdc->dist += dist << 4;
861 #if CONFIG_VP9_HIGHBITDEPTH
862 *var_y = tot_var > UINT32_MAX ? UINT32_MAX : (uint32_t)tot_var;
863 *sse_y = tot_sse > UINT32_MAX ? UINT32_MAX : (uint32_t)tot_sse;
870 static int get_pred_buffer(PRED_BUFFER *p, int len) {
873 for (i = 0; i < len; i++) {
882 static void free_pred_buffer(PRED_BUFFER *p) {
883 if (p != NULL) p->in_use = 0;
886 static void encode_breakout_test(
887 VP9_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize, int mi_row, int mi_col,
888 MV_REFERENCE_FRAME ref_frame, PREDICTION_MODE this_mode, unsigned int var_y,
889 unsigned int sse_y, struct buf_2d yv12_mb[][MAX_MB_PLANE], int *rate,
890 int64_t *dist, int *flag_preduv_computed) {
891 MACROBLOCKD *xd = &x->e_mbd;
892 MODE_INFO *const mi = xd->mi[0];
893 const BLOCK_SIZE uv_size = get_plane_block_size(bsize, &xd->plane[1]);
894 unsigned int var = var_y, sse = sse_y;
895 // Skipping threshold for ac.
896 unsigned int thresh_ac;
897 // Skipping threshold for dc.
898 unsigned int thresh_dc;
900 if (cpi->use_svc && ref_frame == GOLDEN_FRAME) return;
901 if (mi->mv[0].as_mv.row > 64 || mi->mv[0].as_mv.row < -64 ||
902 mi->mv[0].as_mv.col > 64 || mi->mv[0].as_mv.col < -64)
904 if (x->encode_breakout > 0 && motion_low == 1) {
905 // Set a maximum for threshold to avoid big PSNR loss in low bit rate
906 // case. Use extreme low threshold for static frames to limit
908 const unsigned int max_thresh = 36000;
909 // The encode_breakout input
910 const unsigned int min_thresh =
911 VPXMIN(((unsigned int)x->encode_breakout << 4), max_thresh);
912 #if CONFIG_VP9_HIGHBITDEPTH
913 const int shift = (xd->bd << 1) - 16;
916 // Calculate threshold according to dequant value.
917 thresh_ac = (xd->plane[0].dequant[1] * xd->plane[0].dequant[1]) >> 3;
918 #if CONFIG_VP9_HIGHBITDEPTH
919 if ((xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) && shift > 0) {
920 thresh_ac = ROUND_POWER_OF_TWO(thresh_ac, shift);
922 #endif // CONFIG_VP9_HIGHBITDEPTH
923 thresh_ac = clamp(thresh_ac, min_thresh, max_thresh);
925 // Adjust ac threshold according to partition size.
927 8 - (b_width_log2_lookup[bsize] + b_height_log2_lookup[bsize]);
929 thresh_dc = (xd->plane[0].dequant[0] * xd->plane[0].dequant[0] >> 6);
930 #if CONFIG_VP9_HIGHBITDEPTH
931 if ((xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) && shift > 0) {
932 thresh_dc = ROUND_POWER_OF_TWO(thresh_dc, shift);
934 #endif // CONFIG_VP9_HIGHBITDEPTH
940 // Y skipping condition checking for ac and dc.
941 if (var <= thresh_ac && (sse - var) <= thresh_dc) {
942 unsigned int sse_u, sse_v;
943 unsigned int var_u, var_v;
944 unsigned int thresh_ac_uv = thresh_ac;
945 unsigned int thresh_dc_uv = thresh_dc;
951 if (!flag_preduv_computed[0] || !flag_preduv_computed[1]) {
952 xd->plane[1].pre[0] = yv12_mb[ref_frame][1];
953 xd->plane[2].pre[0] = yv12_mb[ref_frame][2];
954 vp9_build_inter_predictors_sbuv(xd, mi_row, mi_col, bsize);
957 var_u = cpi->fn_ptr[uv_size].vf(x->plane[1].src.buf, x->plane[1].src.stride,
958 xd->plane[1].dst.buf,
959 xd->plane[1].dst.stride, &sse_u);
961 // U skipping condition checking
962 if (((var_u << 2) <= thresh_ac_uv) && (sse_u - var_u <= thresh_dc_uv)) {
963 var_v = cpi->fn_ptr[uv_size].vf(
964 x->plane[2].src.buf, x->plane[2].src.stride, xd->plane[2].dst.buf,
965 xd->plane[2].dst.stride, &sse_v);
967 // V skipping condition checking
968 if (((var_v << 2) <= thresh_ac_uv) && (sse_v - var_v <= thresh_dc_uv)) {
971 // The cost of skip bit needs to be added.
972 *rate = cpi->inter_mode_cost[x->mbmi_ext->mode_context[ref_frame]]
973 [INTER_OFFSET(this_mode)];
975 // More on this part of rate
976 // rate += vp9_cost_bit(vp9_get_skip_prob(cm, xd), 1);
978 // Scaling factor for SSE from spatial domain to frequency
979 // domain is 16. Adjust distortion accordingly.
980 // TODO(yunqingwang): In this function, only y-plane dist is
982 *dist = (sse << 4); // + ((sse_u + sse_v) << 4);
984 // *disable_skip = 1;
990 struct estimate_block_intra_args {
993 PREDICTION_MODE mode;
998 static void estimate_block_intra(int plane, int block, int row, int col,
999 BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
1001 struct estimate_block_intra_args *const args = arg;
1002 VP9_COMP *const cpi = args->cpi;
1003 MACROBLOCK *const x = args->x;
1004 MACROBLOCKD *const xd = &x->e_mbd;
1005 struct macroblock_plane *const p = &x->plane[0];
1006 struct macroblockd_plane *const pd = &xd->plane[0];
1007 const BLOCK_SIZE bsize_tx = txsize_to_bsize[tx_size];
1008 uint8_t *const src_buf_base = p->src.buf;
1009 uint8_t *const dst_buf_base = pd->dst.buf;
1010 const int src_stride = p->src.stride;
1011 const int dst_stride = pd->dst.stride;
1016 p->src.buf = &src_buf_base[4 * (row * src_stride + col)];
1017 pd->dst.buf = &dst_buf_base[4 * (row * dst_stride + col)];
1018 // Use source buffer as an approximation for the fully reconstructed buffer.
1019 vp9_predict_intra_block(xd, b_width_log2_lookup[plane_bsize], tx_size,
1020 args->mode, x->skip_encode ? p->src.buf : pd->dst.buf,
1021 x->skip_encode ? src_stride : dst_stride, pd->dst.buf,
1022 dst_stride, col, row, plane);
1025 int64_t this_sse = INT64_MAX;
1026 // TODO(jingning): This needs further refactoring.
1027 block_yrd(cpi, x, &this_rdc, &args->skippable, &this_sse, bsize_tx,
1028 VPXMIN(tx_size, TX_16X16), 0);
1030 unsigned int var = 0;
1031 unsigned int sse = 0;
1032 model_rd_for_sb_uv(cpi, plane_bsize, x, xd, &this_rdc, &var, &sse, plane,
1036 p->src.buf = src_buf_base;
1037 pd->dst.buf = dst_buf_base;
1038 args->rdc->rate += this_rdc.rate;
1039 args->rdc->dist += this_rdc.dist;
1042 static const THR_MODES mode_idx[MAX_REF_FRAMES][4] = {
1043 { THR_DC, THR_V_PRED, THR_H_PRED, THR_TM },
1044 { THR_NEARESTMV, THR_NEARMV, THR_ZEROMV, THR_NEWMV },
1045 { THR_NEARESTG, THR_NEARG, THR_ZEROG, THR_NEWG },
1046 { THR_NEARESTA, THR_NEARA, THR_ZEROA, THR_NEWA },
1049 static const PREDICTION_MODE intra_mode_list[] = { DC_PRED, V_PRED, H_PRED,
1052 static int mode_offset(const PREDICTION_MODE mode) {
1053 if (mode >= NEARESTMV) {
1054 return INTER_OFFSET(mode);
1057 case DC_PRED: return 0;
1058 case V_PRED: return 1;
1059 case H_PRED: return 2;
1060 case TM_PRED: return 3;
1066 static INLINE int rd_less_than_thresh_row_mt(int64_t best_rd, int thresh,
1067 const int *const thresh_fact) {
1068 int is_rd_less_than_thresh;
1069 is_rd_less_than_thresh =
1070 best_rd < ((int64_t)thresh * (*thresh_fact) >> 5) || thresh == INT_MAX;
1071 return is_rd_less_than_thresh;
1074 static INLINE void update_thresh_freq_fact_row_mt(
1075 VP9_COMP *cpi, TileDataEnc *tile_data, int source_variance,
1076 int thresh_freq_fact_idx, MV_REFERENCE_FRAME ref_frame,
1077 THR_MODES best_mode_idx, PREDICTION_MODE mode) {
1078 THR_MODES thr_mode_idx = mode_idx[ref_frame][mode_offset(mode)];
1079 int freq_fact_idx = thresh_freq_fact_idx + thr_mode_idx;
1080 int *freq_fact = &tile_data->row_base_thresh_freq_fact[freq_fact_idx];
1081 if (thr_mode_idx == best_mode_idx)
1082 *freq_fact -= (*freq_fact >> 4);
1083 else if (cpi->sf.limit_newmv_early_exit && mode == NEWMV &&
1084 ref_frame == LAST_FRAME && source_variance < 5) {
1085 *freq_fact = VPXMIN(*freq_fact + RD_THRESH_INC, 32);
1087 *freq_fact = VPXMIN(*freq_fact + RD_THRESH_INC,
1088 cpi->sf.adaptive_rd_thresh * RD_THRESH_MAX_FACT);
1092 static INLINE void update_thresh_freq_fact(
1093 VP9_COMP *cpi, TileDataEnc *tile_data, int source_variance,
1094 BLOCK_SIZE bsize, MV_REFERENCE_FRAME ref_frame, THR_MODES best_mode_idx,
1095 PREDICTION_MODE mode) {
1096 THR_MODES thr_mode_idx = mode_idx[ref_frame][mode_offset(mode)];
1097 int *freq_fact = &tile_data->thresh_freq_fact[bsize][thr_mode_idx];
1098 if (thr_mode_idx == best_mode_idx)
1099 *freq_fact -= (*freq_fact >> 4);
1100 else if (cpi->sf.limit_newmv_early_exit && mode == NEWMV &&
1101 ref_frame == LAST_FRAME && source_variance < 5) {
1102 *freq_fact = VPXMIN(*freq_fact + RD_THRESH_INC, 32);
1104 *freq_fact = VPXMIN(*freq_fact + RD_THRESH_INC,
1105 cpi->sf.adaptive_rd_thresh * RD_THRESH_MAX_FACT);
1109 void vp9_pick_intra_mode(VP9_COMP *cpi, MACROBLOCK *x, RD_COST *rd_cost,
1110 BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx) {
1111 MACROBLOCKD *const xd = &x->e_mbd;
1112 MODE_INFO *const mi = xd->mi[0];
1113 RD_COST this_rdc, best_rdc;
1114 PREDICTION_MODE this_mode;
1115 struct estimate_block_intra_args args = { cpi, x, DC_PRED, 1, 0 };
1116 const TX_SIZE intra_tx_size =
1117 VPXMIN(max_txsize_lookup[bsize],
1118 tx_mode_to_biggest_tx_size[cpi->common.tx_mode]);
1119 MODE_INFO *const mic = xd->mi[0];
1121 const MODE_INFO *above_mi = xd->above_mi;
1122 const MODE_INFO *left_mi = xd->left_mi;
1123 const PREDICTION_MODE A = vp9_above_block_mode(mic, above_mi, 0);
1124 const PREDICTION_MODE L = vp9_left_block_mode(mic, left_mi, 0);
1125 bmode_costs = cpi->y_mode_costs[A][L];
1128 vp9_rd_cost_reset(&best_rdc);
1129 vp9_rd_cost_reset(&this_rdc);
1131 mi->ref_frame[0] = INTRA_FRAME;
1132 // Initialize interp_filter here so we do not have to check for inter block
1133 // modes in get_pred_context_switchable_interp()
1134 mi->interp_filter = SWITCHABLE_FILTERS;
1136 mi->mv[0].as_int = INVALID_MV;
1137 mi->uv_mode = DC_PRED;
1138 memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
1140 // Change the limit of this loop to add other intra prediction
1142 for (this_mode = DC_PRED; this_mode <= H_PRED; ++this_mode) {
1143 this_rdc.dist = this_rdc.rate = 0;
1144 args.mode = this_mode;
1146 args.rdc = &this_rdc;
1147 mi->tx_size = intra_tx_size;
1148 vp9_foreach_transformed_block_in_plane(xd, bsize, 0, estimate_block_intra,
1150 if (args.skippable) {
1151 x->skip_txfm[0] = SKIP_TXFM_AC_DC;
1152 this_rdc.rate = vp9_cost_bit(vp9_get_skip_prob(&cpi->common, xd), 1);
1154 x->skip_txfm[0] = SKIP_TXFM_NONE;
1155 this_rdc.rate += vp9_cost_bit(vp9_get_skip_prob(&cpi->common, xd), 0);
1157 this_rdc.rate += bmode_costs[this_mode];
1158 this_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, this_rdc.rate, this_rdc.dist);
1160 if (this_rdc.rdcost < best_rdc.rdcost) {
1161 best_rdc = this_rdc;
1162 mi->mode = this_mode;
1166 *rd_cost = best_rdc;
1169 static void init_ref_frame_cost(VP9_COMMON *const cm, MACROBLOCKD *const xd,
1170 int ref_frame_cost[MAX_REF_FRAMES]) {
1171 vpx_prob intra_inter_p = vp9_get_intra_inter_prob(cm, xd);
1172 vpx_prob ref_single_p1 = vp9_get_pred_prob_single_ref_p1(cm, xd);
1173 vpx_prob ref_single_p2 = vp9_get_pred_prob_single_ref_p2(cm, xd);
1175 ref_frame_cost[INTRA_FRAME] = vp9_cost_bit(intra_inter_p, 0);
1176 ref_frame_cost[LAST_FRAME] = ref_frame_cost[GOLDEN_FRAME] =
1177 ref_frame_cost[ALTREF_FRAME] = vp9_cost_bit(intra_inter_p, 1);
1179 ref_frame_cost[LAST_FRAME] += vp9_cost_bit(ref_single_p1, 0);
1180 ref_frame_cost[GOLDEN_FRAME] += vp9_cost_bit(ref_single_p1, 1);
1181 ref_frame_cost[ALTREF_FRAME] += vp9_cost_bit(ref_single_p1, 1);
1182 ref_frame_cost[GOLDEN_FRAME] += vp9_cost_bit(ref_single_p2, 0);
1183 ref_frame_cost[ALTREF_FRAME] += vp9_cost_bit(ref_single_p2, 1);
1187 MV_REFERENCE_FRAME ref_frame;
1188 PREDICTION_MODE pred_mode;
1191 #define RT_INTER_MODES 12
1192 static const REF_MODE ref_mode_set[RT_INTER_MODES] = {
1193 { LAST_FRAME, ZEROMV }, { LAST_FRAME, NEARESTMV },
1194 { GOLDEN_FRAME, ZEROMV }, { LAST_FRAME, NEARMV },
1195 { LAST_FRAME, NEWMV }, { GOLDEN_FRAME, NEARESTMV },
1196 { GOLDEN_FRAME, NEARMV }, { GOLDEN_FRAME, NEWMV },
1197 { ALTREF_FRAME, ZEROMV }, { ALTREF_FRAME, NEARESTMV },
1198 { ALTREF_FRAME, NEARMV }, { ALTREF_FRAME, NEWMV }
1201 #define RT_INTER_MODES_SVC 8
1202 static const REF_MODE ref_mode_set_svc[RT_INTER_MODES_SVC] = {
1203 { LAST_FRAME, ZEROMV }, { LAST_FRAME, NEARESTMV },
1204 { LAST_FRAME, NEARMV }, { GOLDEN_FRAME, ZEROMV },
1205 { GOLDEN_FRAME, NEARESTMV }, { GOLDEN_FRAME, NEARMV },
1206 { LAST_FRAME, NEWMV }, { GOLDEN_FRAME, NEWMV }
1209 static INLINE void find_predictors(
1210 VP9_COMP *cpi, MACROBLOCK *x, MV_REFERENCE_FRAME ref_frame,
1211 int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES],
1212 int const_motion[MAX_REF_FRAMES], int *ref_frame_skip_mask,
1213 const int flag_list[4], TileDataEnc *tile_data, int mi_row, int mi_col,
1214 struct buf_2d yv12_mb[4][MAX_MB_PLANE], BLOCK_SIZE bsize,
1215 int force_skip_low_temp_var, int comp_pred_allowed) {
1216 VP9_COMMON *const cm = &cpi->common;
1217 MACROBLOCKD *const xd = &x->e_mbd;
1218 const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, ref_frame);
1219 TileInfo *const tile_info = &tile_data->tile_info;
1220 // TODO(jingning) placeholder for inter-frame non-RD mode decision.
1221 x->pred_mv_sad[ref_frame] = INT_MAX;
1222 frame_mv[NEWMV][ref_frame].as_int = INVALID_MV;
1223 frame_mv[ZEROMV][ref_frame].as_int = 0;
1224 // this needs various further optimizations. to be continued..
1225 if ((cpi->ref_frame_flags & flag_list[ref_frame]) && (yv12 != NULL)) {
1226 int_mv *const candidates = x->mbmi_ext->ref_mvs[ref_frame];
1227 const struct scale_factors *const sf = &cm->frame_refs[ref_frame - 1].sf;
1228 vp9_setup_pred_block(xd, yv12_mb[ref_frame], yv12, mi_row, mi_col, sf, sf);
1229 if (cm->use_prev_frame_mvs || comp_pred_allowed) {
1230 vp9_find_mv_refs(cm, xd, xd->mi[0], ref_frame, candidates, mi_row, mi_col,
1231 x->mbmi_ext->mode_context);
1233 const_motion[ref_frame] =
1234 mv_refs_rt(cpi, cm, x, xd, tile_info, xd->mi[0], ref_frame,
1235 candidates, &frame_mv[NEWMV][ref_frame], mi_row, mi_col,
1236 (int)(cpi->svc.use_base_mv && cpi->svc.spatial_layer_id));
1238 vp9_find_best_ref_mvs(xd, cm->allow_high_precision_mv, candidates,
1239 &frame_mv[NEARESTMV][ref_frame],
1240 &frame_mv[NEARMV][ref_frame]);
1241 // Early exit for golden frame if force_skip_low_temp_var is set.
1242 if (!vp9_is_scaled(sf) && bsize >= BLOCK_8X8 &&
1243 !(force_skip_low_temp_var && ref_frame == GOLDEN_FRAME)) {
1244 vp9_mv_pred(cpi, x, yv12_mb[ref_frame][0].buf, yv12->y_stride, ref_frame,
1248 *ref_frame_skip_mask |= (1 << ref_frame);
1252 static void vp9_NEWMV_diff_bias(const NOISE_ESTIMATE *ne, MACROBLOCKD *xd,
1253 PREDICTION_MODE this_mode, RD_COST *this_rdc,
1254 BLOCK_SIZE bsize, int mv_row, int mv_col,
1255 int is_last_frame, int lowvar_highsumdiff,
1257 // Bias against MVs associated with NEWMV mode that are very different from
1258 // top/left neighbors.
1259 if (this_mode == NEWMV) {
1260 int al_mv_average_row;
1261 int al_mv_average_col;
1262 int left_row, left_col;
1263 int row_diff, col_diff;
1264 int above_mv_valid = 0;
1265 int left_mv_valid = 0;
1270 above_mv_valid = xd->above_mi->mv[0].as_int != INVALID_MV;
1271 above_row = xd->above_mi->mv[0].as_mv.row;
1272 above_col = xd->above_mi->mv[0].as_mv.col;
1275 left_mv_valid = xd->left_mi->mv[0].as_int != INVALID_MV;
1276 left_row = xd->left_mi->mv[0].as_mv.row;
1277 left_col = xd->left_mi->mv[0].as_mv.col;
1279 if (above_mv_valid && left_mv_valid) {
1280 al_mv_average_row = (above_row + left_row + 1) >> 1;
1281 al_mv_average_col = (above_col + left_col + 1) >> 1;
1282 } else if (above_mv_valid) {
1283 al_mv_average_row = above_row;
1284 al_mv_average_col = above_col;
1285 } else if (left_mv_valid) {
1286 al_mv_average_row = left_row;
1287 al_mv_average_col = left_col;
1289 al_mv_average_row = al_mv_average_col = 0;
1291 row_diff = (al_mv_average_row - mv_row);
1292 col_diff = (al_mv_average_col - mv_col);
1293 if (row_diff > 48 || row_diff < -48 || col_diff > 48 || col_diff < -48) {
1294 if (bsize > BLOCK_32X32)
1295 this_rdc->rdcost = this_rdc->rdcost << 1;
1297 this_rdc->rdcost = 3 * this_rdc->rdcost >> 1;
1300 // If noise estimation is enabled, and estimated level is above threshold,
1301 // add a bias to LAST reference with small motion, for large blocks.
1302 if (ne->enabled && ne->level >= kMedium && bsize >= BLOCK_32X32 &&
1303 is_last_frame && mv_row < 8 && mv_row > -8 && mv_col < 8 && mv_col > -8)
1304 this_rdc->rdcost = 7 * (this_rdc->rdcost >> 3);
1305 else if (lowvar_highsumdiff && !is_skin && bsize >= BLOCK_16X16 &&
1306 is_last_frame && mv_row < 16 && mv_row > -16 && mv_col < 16 &&
1308 this_rdc->rdcost = 7 * (this_rdc->rdcost >> 3);
1311 #if CONFIG_VP9_TEMPORAL_DENOISING
1312 static void vp9_pickmode_ctx_den_update(
1313 VP9_PICKMODE_CTX_DEN *ctx_den, int64_t zero_last_cost_orig,
1314 int ref_frame_cost[MAX_REF_FRAMES],
1315 int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES], int reuse_inter_pred,
1316 BEST_PICKMODE *bp) {
1317 ctx_den->zero_last_cost_orig = zero_last_cost_orig;
1318 ctx_den->ref_frame_cost = ref_frame_cost;
1319 ctx_den->frame_mv = frame_mv;
1320 ctx_den->reuse_inter_pred = reuse_inter_pred;
1321 ctx_den->best_tx_size = bp->best_tx_size;
1322 ctx_den->best_mode = bp->best_mode;
1323 ctx_den->best_ref_frame = bp->best_ref_frame;
1324 ctx_den->best_pred_filter = bp->best_pred_filter;
1325 ctx_den->best_mode_skip_txfm = bp->best_mode_skip_txfm;
1328 static void recheck_zeromv_after_denoising(
1329 VP9_COMP *cpi, MODE_INFO *const mi, MACROBLOCK *x, MACROBLOCKD *const xd,
1330 VP9_DENOISER_DECISION decision, VP9_PICKMODE_CTX_DEN *ctx_den,
1331 struct buf_2d yv12_mb[4][MAX_MB_PLANE], RD_COST *best_rdc, BLOCK_SIZE bsize,
1332 int mi_row, int mi_col) {
1333 // If INTRA or GOLDEN reference was selected, re-evaluate ZEROMV on
1334 // denoised result. Only do this under noise conditions, and if rdcost of
1335 // ZEROMV onoriginal source is not significantly higher than rdcost of best
1337 if (cpi->noise_estimate.enabled && cpi->noise_estimate.level > kLow &&
1338 ctx_den->zero_last_cost_orig < (best_rdc->rdcost << 3) &&
1339 ((ctx_den->best_ref_frame == INTRA_FRAME && decision >= FILTER_BLOCK) ||
1340 (ctx_den->best_ref_frame == GOLDEN_FRAME &&
1341 cpi->svc.number_spatial_layers == 1 &&
1342 decision == FILTER_ZEROMV_BLOCK))) {
1343 // Check if we should pick ZEROMV on denoised signal.
1346 uint32_t var_y = UINT_MAX;
1347 uint32_t sse_y = UINT_MAX;
1350 mi->ref_frame[0] = LAST_FRAME;
1351 mi->ref_frame[1] = NONE;
1352 mi->mv[0].as_int = 0;
1353 mi->interp_filter = EIGHTTAP;
1354 if (cpi->sf.default_interp_filter == BILINEAR) mi->interp_filter = BILINEAR;
1355 xd->plane[0].pre[0] = yv12_mb[LAST_FRAME][0];
1356 vp9_build_inter_predictors_sby(xd, mi_row, mi_col, bsize);
1357 model_rd_for_sb_y(cpi, bsize, x, xd, &rate, &dist, &var_y, &sse_y);
1358 this_rdc.rate = rate + ctx_den->ref_frame_cost[LAST_FRAME] +
1359 cpi->inter_mode_cost[x->mbmi_ext->mode_context[LAST_FRAME]]
1360 [INTER_OFFSET(ZEROMV)];
1361 this_rdc.dist = dist;
1362 this_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, rate, dist);
1363 // Don't switch to ZEROMV if the rdcost for ZEROMV on denoised source
1364 // is higher than best_ref mode (on original source).
1365 if (this_rdc.rdcost > best_rdc->rdcost) {
1366 this_rdc = *best_rdc;
1367 mi->mode = ctx_den->best_mode;
1368 mi->ref_frame[0] = ctx_den->best_ref_frame;
1369 mi->interp_filter = ctx_den->best_pred_filter;
1370 if (ctx_den->best_ref_frame == INTRA_FRAME) {
1371 mi->mv[0].as_int = INVALID_MV;
1372 mi->interp_filter = SWITCHABLE_FILTERS;
1373 } else if (ctx_den->best_ref_frame == GOLDEN_FRAME) {
1375 ctx_den->frame_mv[ctx_den->best_mode][ctx_den->best_ref_frame]
1377 if (ctx_den->reuse_inter_pred) {
1378 xd->plane[0].pre[0] = yv12_mb[GOLDEN_FRAME][0];
1379 vp9_build_inter_predictors_sby(xd, mi_row, mi_col, bsize);
1382 mi->tx_size = ctx_den->best_tx_size;
1383 x->skip_txfm[0] = ctx_den->best_mode_skip_txfm;
1385 ctx_den->best_ref_frame = LAST_FRAME;
1386 *best_rdc = this_rdc;
1390 #endif // CONFIG_VP9_TEMPORAL_DENOISING
1392 static INLINE int get_force_skip_low_temp_var(uint8_t *variance_low, int mi_row,
1393 int mi_col, BLOCK_SIZE bsize) {
1394 const int i = (mi_row & 0x7) >> 1;
1395 const int j = (mi_col & 0x7) >> 1;
1396 int force_skip_low_temp_var = 0;
1397 // Set force_skip_low_temp_var based on the block size and block offset.
1398 if (bsize == BLOCK_64X64) {
1399 force_skip_low_temp_var = variance_low[0];
1400 } else if (bsize == BLOCK_64X32) {
1401 if (!(mi_col & 0x7) && !(mi_row & 0x7)) {
1402 force_skip_low_temp_var = variance_low[1];
1403 } else if (!(mi_col & 0x7) && (mi_row & 0x7)) {
1404 force_skip_low_temp_var = variance_low[2];
1406 } else if (bsize == BLOCK_32X64) {
1407 if (!(mi_col & 0x7) && !(mi_row & 0x7)) {
1408 force_skip_low_temp_var = variance_low[3];
1409 } else if ((mi_col & 0x7) && !(mi_row & 0x7)) {
1410 force_skip_low_temp_var = variance_low[4];
1412 } else if (bsize == BLOCK_32X32) {
1413 if (!(mi_col & 0x7) && !(mi_row & 0x7)) {
1414 force_skip_low_temp_var = variance_low[5];
1415 } else if ((mi_col & 0x7) && !(mi_row & 0x7)) {
1416 force_skip_low_temp_var = variance_low[6];
1417 } else if (!(mi_col & 0x7) && (mi_row & 0x7)) {
1418 force_skip_low_temp_var = variance_low[7];
1419 } else if ((mi_col & 0x7) && (mi_row & 0x7)) {
1420 force_skip_low_temp_var = variance_low[8];
1422 } else if (bsize == BLOCK_16X16) {
1423 force_skip_low_temp_var = variance_low[pos_shift_16x16[i][j]];
1424 } else if (bsize == BLOCK_32X16) {
1425 // The col shift index for the second 16x16 block.
1426 const int j2 = ((mi_col + 2) & 0x7) >> 1;
1427 // Only if each 16x16 block inside has low temporal variance.
1428 force_skip_low_temp_var = variance_low[pos_shift_16x16[i][j]] &&
1429 variance_low[pos_shift_16x16[i][j2]];
1430 } else if (bsize == BLOCK_16X32) {
1431 // The row shift index for the second 16x16 block.
1432 const int i2 = ((mi_row + 2) & 0x7) >> 1;
1433 force_skip_low_temp_var = variance_low[pos_shift_16x16[i][j]] &&
1434 variance_low[pos_shift_16x16[i2][j]];
1436 return force_skip_low_temp_var;
1439 static void search_filter_ref(VP9_COMP *cpi, MACROBLOCK *x, RD_COST *this_rdc,
1440 int mi_row, int mi_col, PRED_BUFFER *tmp,
1441 BLOCK_SIZE bsize, int reuse_inter_pred,
1442 PRED_BUFFER **this_mode_pred, unsigned int *var_y,
1443 unsigned int *sse_y) {
1444 MACROBLOCKD *const xd = &x->e_mbd;
1445 MODE_INFO *const mi = xd->mi[0];
1446 struct macroblockd_plane *const pd = &xd->plane[0];
1447 const int bw = num_4x4_blocks_wide_lookup[bsize] << 2;
1449 int pf_rate[3] = { 0 };
1450 int64_t pf_dist[3] = { 0 };
1451 int curr_rate[3] = { 0 };
1452 unsigned int pf_var[3] = { 0 };
1453 unsigned int pf_sse[3] = { 0 };
1454 TX_SIZE pf_tx_size[3] = { 0 };
1455 int64_t best_cost = INT64_MAX;
1456 INTERP_FILTER best_filter = SWITCHABLE, filter;
1457 PRED_BUFFER *current_pred = *this_mode_pred;
1458 uint8_t skip_txfm = SKIP_TXFM_NONE;
1460 for (filter = EIGHTTAP; filter <= EIGHTTAP_SMOOTH; ++filter) {
1462 mi->interp_filter = filter;
1463 vp9_build_inter_predictors_sby(xd, mi_row, mi_col, bsize);
1464 model_rd_for_sb_y(cpi, bsize, x, xd, &pf_rate[filter], &pf_dist[filter],
1465 &pf_var[filter], &pf_sse[filter]);
1466 curr_rate[filter] = pf_rate[filter];
1467 pf_rate[filter] += vp9_get_switchable_rate(cpi, xd);
1468 cost = RDCOST(x->rdmult, x->rddiv, pf_rate[filter], pf_dist[filter]);
1469 pf_tx_size[filter] = mi->tx_size;
1470 if (cost < best_cost) {
1471 best_filter = filter;
1473 skip_txfm = x->skip_txfm[0];
1475 if (reuse_inter_pred) {
1476 if (*this_mode_pred != current_pred) {
1477 free_pred_buffer(*this_mode_pred);
1478 *this_mode_pred = current_pred;
1480 current_pred = &tmp[get_pred_buffer(tmp, 3)];
1481 pd->dst.buf = current_pred->data;
1482 pd->dst.stride = bw;
1487 if (reuse_inter_pred && *this_mode_pred != current_pred)
1488 free_pred_buffer(current_pred);
1490 mi->interp_filter = best_filter;
1491 mi->tx_size = pf_tx_size[best_filter];
1492 this_rdc->rate = curr_rate[best_filter];
1493 this_rdc->dist = pf_dist[best_filter];
1494 *var_y = pf_var[best_filter];
1495 *sse_y = pf_sse[best_filter];
1496 x->skip_txfm[0] = skip_txfm;
1497 if (reuse_inter_pred) {
1498 pd->dst.buf = (*this_mode_pred)->data;
1499 pd->dst.stride = (*this_mode_pred)->stride;
1503 static int search_new_mv(VP9_COMP *cpi, MACROBLOCK *x,
1504 int_mv frame_mv[][MAX_REF_FRAMES],
1505 MV_REFERENCE_FRAME ref_frame, int gf_temporal_ref,
1506 BLOCK_SIZE bsize, int mi_row, int mi_col,
1507 int best_pred_sad, int *rate_mv,
1508 unsigned int best_sse_sofar, RD_COST *best_rdc) {
1509 SVC *const svc = &cpi->svc;
1510 MACROBLOCKD *const xd = &x->e_mbd;
1511 MODE_INFO *const mi = xd->mi[0];
1512 SPEED_FEATURES *const sf = &cpi->sf;
1514 if (ref_frame > LAST_FRAME && gf_temporal_ref &&
1515 cpi->oxcf.rc_mode == VPX_CBR) {
1518 int cost_list[5] = { INT_MAX, INT_MAX, INT_MAX, INT_MAX, INT_MAX };
1520 if (bsize < BLOCK_16X16) return -1;
1522 tmp_sad = vp9_int_pro_motion_estimation(
1523 cpi, x, bsize, mi_row, mi_col,
1524 &x->mbmi_ext->ref_mvs[ref_frame][0].as_mv);
1526 if (tmp_sad > x->pred_mv_sad[LAST_FRAME]) return -1;
1527 if (tmp_sad + (num_pels_log2_lookup[bsize] << 4) > best_pred_sad) return -1;
1529 frame_mv[NEWMV][ref_frame].as_int = mi->mv[0].as_int;
1530 *rate_mv = vp9_mv_bit_cost(&frame_mv[NEWMV][ref_frame].as_mv,
1531 &x->mbmi_ext->ref_mvs[ref_frame][0].as_mv,
1532 x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
1533 frame_mv[NEWMV][ref_frame].as_mv.row >>= 3;
1534 frame_mv[NEWMV][ref_frame].as_mv.col >>= 3;
1536 cpi->find_fractional_mv_step(
1537 x, &frame_mv[NEWMV][ref_frame].as_mv,
1538 &x->mbmi_ext->ref_mvs[ref_frame][0].as_mv,
1539 cpi->common.allow_high_precision_mv, x->errorperbit,
1540 &cpi->fn_ptr[bsize], cpi->sf.mv.subpel_force_stop,
1541 cpi->sf.mv.subpel_search_level, cond_cost_list(cpi, cost_list),
1542 x->nmvjointcost, x->mvcost, &dis, &x->pred_sse[ref_frame], NULL, 0, 0);
1543 } else if (svc->use_base_mv && svc->spatial_layer_id) {
1544 if (frame_mv[NEWMV][ref_frame].as_int != INVALID_MV) {
1545 const int pre_stride = xd->plane[0].pre[0].stride;
1546 unsigned int base_mv_sse = UINT_MAX;
1547 int scale = (cpi->rc.avg_frame_low_motion > 60) ? 2 : 4;
1548 const uint8_t *const pre_buf =
1549 xd->plane[0].pre[0].buf +
1550 (frame_mv[NEWMV][ref_frame].as_mv.row >> 3) * pre_stride +
1551 (frame_mv[NEWMV][ref_frame].as_mv.col >> 3);
1552 cpi->fn_ptr[bsize].vf(x->plane[0].src.buf, x->plane[0].src.stride,
1553 pre_buf, pre_stride, &base_mv_sse);
1555 // Exit NEWMV search if base_mv is (0,0) && bsize < BLOCK_16x16,
1556 // for SVC encoding.
1557 if (cpi->use_svc && svc->use_base_mv && bsize < BLOCK_16X16 &&
1558 frame_mv[NEWMV][ref_frame].as_mv.row == 0 &&
1559 frame_mv[NEWMV][ref_frame].as_mv.col == 0)
1562 // Exit NEWMV search if base_mv_sse is large.
1563 if (sf->base_mv_aggressive && base_mv_sse > (best_sse_sofar << scale))
1565 if (base_mv_sse < (best_sse_sofar << 1)) {
1566 // Base layer mv is good.
1567 // Exit NEWMV search if the base_mv is (0, 0) and sse is low, since
1568 // (0, 0) mode is already tested.
1569 unsigned int base_mv_sse_normalized =
1571 (b_width_log2_lookup[bsize] + b_height_log2_lookup[bsize]);
1572 if (sf->base_mv_aggressive && base_mv_sse <= best_sse_sofar &&
1573 base_mv_sse_normalized < 400 &&
1574 frame_mv[NEWMV][ref_frame].as_mv.row == 0 &&
1575 frame_mv[NEWMV][ref_frame].as_mv.col == 0)
1577 if (!combined_motion_search(cpi, x, bsize, mi_row, mi_col,
1578 &frame_mv[NEWMV][ref_frame], rate_mv,
1579 best_rdc->rdcost, 1)) {
1582 } else if (!combined_motion_search(cpi, x, bsize, mi_row, mi_col,
1583 &frame_mv[NEWMV][ref_frame], rate_mv,
1584 best_rdc->rdcost, 0)) {
1587 } else if (!combined_motion_search(cpi, x, bsize, mi_row, mi_col,
1588 &frame_mv[NEWMV][ref_frame], rate_mv,
1589 best_rdc->rdcost, 0)) {
1592 } else if (!combined_motion_search(cpi, x, bsize, mi_row, mi_col,
1593 &frame_mv[NEWMV][ref_frame], rate_mv,
1594 best_rdc->rdcost, 0)) {
1601 static INLINE void init_best_pickmode(BEST_PICKMODE *bp) {
1602 bp->best_mode = ZEROMV;
1603 bp->best_ref_frame = LAST_FRAME;
1604 bp->best_tx_size = TX_SIZES;
1605 bp->best_intra_tx_size = TX_SIZES;
1606 bp->best_pred_filter = EIGHTTAP;
1607 bp->best_mode_skip_txfm = SKIP_TXFM_NONE;
1608 bp->best_second_ref_frame = NONE;
1609 bp->best_pred = NULL;
1612 void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, TileDataEnc *tile_data,
1613 int mi_row, int mi_col, RD_COST *rd_cost,
1614 BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx) {
1615 VP9_COMMON *const cm = &cpi->common;
1616 SPEED_FEATURES *const sf = &cpi->sf;
1617 SVC *const svc = &cpi->svc;
1618 MACROBLOCKD *const xd = &x->e_mbd;
1619 MODE_INFO *const mi = xd->mi[0];
1620 struct macroblockd_plane *const pd = &xd->plane[0];
1622 BEST_PICKMODE best_pickmode;
1624 MV_REFERENCE_FRAME ref_frame;
1625 MV_REFERENCE_FRAME usable_ref_frame, second_ref_frame;
1626 int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES];
1627 uint8_t mode_checked[MB_MODE_COUNT][MAX_REF_FRAMES];
1628 struct buf_2d yv12_mb[4][MAX_MB_PLANE];
1629 static const int flag_list[4] = { 0, VP9_LAST_FLAG, VP9_GOLD_FLAG,
1631 RD_COST this_rdc, best_rdc;
1632 // var_y and sse_y are saved to be used in skipping checking
1633 unsigned int var_y = UINT_MAX;
1634 unsigned int sse_y = UINT_MAX;
1635 const int intra_cost_penalty =
1636 vp9_get_intra_cost_penalty(cpi, bsize, cm->base_qindex, cm->y_dc_delta_q);
1637 int64_t inter_mode_thresh =
1638 RDCOST(x->rdmult, x->rddiv, intra_cost_penalty, 0);
1639 const int *const rd_threshes = cpi->rd.threshes[mi->segment_id][bsize];
1640 const int sb_row = mi_row >> MI_BLOCK_SIZE_LOG2;
1641 int thresh_freq_fact_idx = (sb_row * BLOCK_SIZES + bsize) * MAX_MODES;
1642 const int *const rd_thresh_freq_fact =
1643 (cpi->sf.adaptive_rd_thresh_row_mt)
1644 ? &(tile_data->row_base_thresh_freq_fact[thresh_freq_fact_idx])
1645 : tile_data->thresh_freq_fact[bsize];
1647 INTERP_FILTER filter_ref;
1648 const int bsl = mi_width_log2_lookup[bsize];
1649 const int pred_filter_search =
1650 cm->interp_filter == SWITCHABLE
1651 ? (((mi_row + mi_col) >> bsl) +
1652 get_chessboard_index(cm->current_video_frame)) &
1655 int const_motion[MAX_REF_FRAMES] = { 0 };
1656 const int bh = num_4x4_blocks_high_lookup[bsize] << 2;
1657 const int bw = num_4x4_blocks_wide_lookup[bsize] << 2;
1658 // For speed 6, the result of interp filter is reused later in actual encoding
1660 // tmp[3] points to dst buffer, and the other 3 point to allocated buffers.
1662 DECLARE_ALIGNED(16, uint8_t, pred_buf[3 * 64 * 64]);
1663 #if CONFIG_VP9_HIGHBITDEPTH
1664 DECLARE_ALIGNED(16, uint16_t, pred_buf_16[3 * 64 * 64]);
1666 struct buf_2d orig_dst = pd->dst;
1667 PRED_BUFFER *this_mode_pred = NULL;
1668 const int pixels_in_block = bh * bw;
1669 int reuse_inter_pred = cpi->sf.reuse_inter_pred_sby && ctx->pred_pixel_ready;
1670 int ref_frame_skip_mask = 0;
1672 int best_pred_sad = INT_MAX;
1673 int best_early_term = 0;
1674 int ref_frame_cost[MAX_REF_FRAMES];
1675 int svc_force_zero_mode[3] = { 0 };
1676 int perform_intra_pred = 1;
1677 int use_golden_nonzeromv = 1;
1678 int force_skip_low_temp_var = 0;
1679 int skip_ref_find_pred[4] = { 0 };
1680 unsigned int sse_zeromv_normalized = UINT_MAX;
1681 unsigned int best_sse_sofar = UINT_MAX;
1682 int gf_temporal_ref = 0;
1683 #if CONFIG_VP9_TEMPORAL_DENOISING
1684 VP9_PICKMODE_CTX_DEN ctx_den;
1685 int64_t zero_last_cost_orig = INT64_MAX;
1686 int denoise_svc_pickmode = 1;
1688 INTERP_FILTER filter_gf_svc = EIGHTTAP;
1689 MV_REFERENCE_FRAME inter_layer_ref = GOLDEN_FRAME;
1690 const struct segmentation *const seg = &cm->seg;
1692 int num_inter_modes = (cpi->use_svc) ? RT_INTER_MODES_SVC : RT_INTER_MODES;
1693 int flag_svc_subpel = 0;
1697 unsigned int thresh_svc_skip_golden = 500;
1698 int scene_change_detected =
1699 cpi->rc.high_source_sad ||
1700 (cpi->use_svc && cpi->svc.high_source_sad_superframe);
1702 init_best_pickmode(&best_pickmode);
1704 x->source_variance = UINT_MAX;
1705 if (cpi->sf.default_interp_filter == BILINEAR) {
1706 best_pickmode.best_pred_filter = BILINEAR;
1707 filter_gf_svc = BILINEAR;
1709 if (cpi->use_svc && svc->spatial_layer_id > 0) {
1711 LAYER_IDS_TO_IDX(svc->spatial_layer_id - 1, svc->temporal_layer_id,
1712 svc->number_temporal_layers);
1713 LAYER_CONTEXT *const lc = &svc->layer_context[layer];
1714 if (lc->scaling_factor_num == lc->scaling_factor_den) no_scaling = 1;
1716 if (svc->spatial_layer_id > 0 &&
1717 (svc->high_source_sad_superframe || no_scaling))
1718 thresh_svc_skip_golden = 0;
1719 // Lower the skip threshold if lower spatial layer is better quality relative
1720 // to current layer.
1721 else if (svc->spatial_layer_id > 0 && cm->base_qindex > 150 &&
1722 cm->base_qindex > svc->lower_layer_qindex + 15)
1723 thresh_svc_skip_golden = 100;
1724 // Increase skip threshold if lower spatial layer is lower quality relative
1725 // to current layer.
1726 else if (svc->spatial_layer_id > 0 && cm->base_qindex < 140 &&
1727 cm->base_qindex < svc->lower_layer_qindex - 20)
1728 thresh_svc_skip_golden = 1000;
1730 if (!cpi->use_svc ||
1731 (svc->use_gf_temporal_ref_current_layer &&
1732 !svc->layer_context[svc->temporal_layer_id].is_key_frame)) {
1733 gf_temporal_ref = 1;
1734 if (cpi->rc.avg_frame_low_motion > 70)
1735 thresh_svc_skip_golden = 500;
1737 thresh_svc_skip_golden = 0;
1740 init_ref_frame_cost(cm, xd, ref_frame_cost);
1741 memset(&mode_checked[0][0], 0, MB_MODE_COUNT * MAX_REF_FRAMES);
1743 if (reuse_inter_pred) {
1745 for (i = 0; i < 3; i++) {
1746 #if CONFIG_VP9_HIGHBITDEPTH
1747 if (cm->use_highbitdepth)
1748 tmp[i].data = CONVERT_TO_BYTEPTR(&pred_buf_16[pixels_in_block * i]);
1750 tmp[i].data = &pred_buf[pixels_in_block * i];
1752 tmp[i].data = &pred_buf[pixels_in_block * i];
1753 #endif // CONFIG_VP9_HIGHBITDEPTH
1757 tmp[3].data = pd->dst.buf;
1758 tmp[3].stride = pd->dst.stride;
1762 x->skip_encode = cpi->sf.skip_encode_frame && x->q_index < QIDX_SKIP_THRESH;
1765 // Instead of using vp9_get_pred_context_switchable_interp(xd) to assign
1766 // filter_ref, we use a less strict condition on assigning filter_ref.
1767 // This is to reduce the probabily of entering the flow of not assigning
1768 // filter_ref and then skip filter search.
1769 filter_ref = cm->interp_filter;
1770 if (cpi->sf.default_interp_filter != BILINEAR) {
1771 if (xd->above_mi && is_inter_block(xd->above_mi))
1772 filter_ref = xd->above_mi->interp_filter;
1773 else if (xd->left_mi && is_inter_block(xd->left_mi))
1774 filter_ref = xd->left_mi->interp_filter;
1777 // initialize mode decisions
1778 vp9_rd_cost_reset(&best_rdc);
1779 vp9_rd_cost_reset(rd_cost);
1780 mi->sb_type = bsize;
1781 mi->ref_frame[0] = NONE;
1782 mi->ref_frame[1] = NONE;
1785 VPXMIN(max_txsize_lookup[bsize], tx_mode_to_biggest_tx_size[cm->tx_mode]);
1787 if (sf->short_circuit_flat_blocks || sf->limit_newmv_early_exit) {
1788 #if CONFIG_VP9_HIGHBITDEPTH
1789 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
1790 x->source_variance = vp9_high_get_sby_perpixel_variance(
1791 cpi, &x->plane[0].src, bsize, xd->bd);
1793 #endif // CONFIG_VP9_HIGHBITDEPTH
1794 x->source_variance =
1795 vp9_get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize);
1797 if (cpi->oxcf.content == VP9E_CONTENT_SCREEN && mi->segment_id > 0 &&
1798 x->zero_temp_sad_source && x->source_variance == 0) {
1800 vp9_init_plane_quantizers(cpi, x);
1804 #if CONFIG_VP9_TEMPORAL_DENOISING
1805 if (cpi->oxcf.noise_sensitivity > 0) {
1808 LAYER_IDS_TO_IDX(svc->spatial_layer_id, svc->temporal_layer_id,
1809 svc->number_temporal_layers);
1810 LAYER_CONTEXT *lc = &svc->layer_context[layer];
1811 denoise_svc_pickmode = denoise_svc(cpi) && !lc->is_key_frame;
1813 if (cpi->denoiser.denoising_level > kDenLowLow && denoise_svc_pickmode)
1814 vp9_denoiser_reset_frame_stats(ctx);
1818 if (cpi->rc.frames_since_golden == 0 && gf_temporal_ref &&
1819 !cpi->rc.alt_ref_gf_group && !cpi->rc.last_frame_is_src_altref) {
1820 usable_ref_frame = LAST_FRAME;
1822 usable_ref_frame = GOLDEN_FRAME;
1825 if (cpi->oxcf.lag_in_frames > 0 && cpi->oxcf.rc_mode == VPX_VBR) {
1826 if (cpi->rc.alt_ref_gf_group || cpi->rc.is_src_frame_alt_ref)
1827 usable_ref_frame = ALTREF_FRAME;
1829 if (cpi->rc.is_src_frame_alt_ref) {
1830 skip_ref_find_pred[LAST_FRAME] = 1;
1831 skip_ref_find_pred[GOLDEN_FRAME] = 1;
1833 if (!cm->show_frame) {
1834 if (cpi->rc.frames_since_key == 1) {
1835 usable_ref_frame = LAST_FRAME;
1836 skip_ref_find_pred[GOLDEN_FRAME] = 1;
1837 skip_ref_find_pred[ALTREF_FRAME] = 1;
1842 // For svc mode, on spatial_layer_id > 0: if the reference has different scale
1843 // constrain the inter mode to only test zero motion.
1844 if (cpi->use_svc && svc->force_zero_mode_spatial_ref &&
1845 svc->spatial_layer_id > 0 && !gf_temporal_ref) {
1846 if (cpi->ref_frame_flags & flag_list[LAST_FRAME]) {
1847 struct scale_factors *const sf = &cm->frame_refs[LAST_FRAME - 1].sf;
1848 if (vp9_is_scaled(sf)) {
1849 svc_force_zero_mode[LAST_FRAME - 1] = 1;
1850 inter_layer_ref = LAST_FRAME;
1853 if (cpi->ref_frame_flags & flag_list[GOLDEN_FRAME]) {
1854 struct scale_factors *const sf = &cm->frame_refs[GOLDEN_FRAME - 1].sf;
1855 if (vp9_is_scaled(sf)) {
1856 svc_force_zero_mode[GOLDEN_FRAME - 1] = 1;
1857 inter_layer_ref = GOLDEN_FRAME;
1862 if (cpi->sf.short_circuit_low_temp_var) {
1863 force_skip_low_temp_var =
1864 get_force_skip_low_temp_var(&x->variance_low[0], mi_row, mi_col, bsize);
1865 // If force_skip_low_temp_var is set, and for short circuit mode = 1 and 3,
1866 // skip golden reference.
1867 if ((cpi->sf.short_circuit_low_temp_var == 1 ||
1868 cpi->sf.short_circuit_low_temp_var == 3) &&
1869 force_skip_low_temp_var) {
1870 usable_ref_frame = LAST_FRAME;
1874 if (sf->disable_golden_ref && (x->content_state_sb != kVeryHighSad ||
1875 cpi->rc.avg_frame_low_motion < 60))
1876 usable_ref_frame = LAST_FRAME;
1878 if (!((cpi->ref_frame_flags & flag_list[GOLDEN_FRAME]) &&
1879 !svc_force_zero_mode[GOLDEN_FRAME - 1] && !force_skip_low_temp_var))
1880 use_golden_nonzeromv = 0;
1882 if (cpi->oxcf.speed >= 8 && !cpi->use_svc &&
1883 ((cpi->rc.frames_since_golden + 1) < x->last_sb_high_content ||
1884 x->last_sb_high_content > 40 || cpi->rc.frames_since_golden > 120))
1885 usable_ref_frame = LAST_FRAME;
1887 // Compound prediction modes: (0,0) on LAST/GOLDEN and ARF.
1888 if (cm->reference_mode == REFERENCE_MODE_SELECT &&
1889 cpi->sf.use_compound_nonrd_pickmode && usable_ref_frame == ALTREF_FRAME)
1892 // If the segment reference frame feature is enabled and it's set to GOLDEN
1893 // reference, then make sure we don't skip checking GOLDEN, this is to
1894 // prevent possibility of not picking any mode.
1895 if (segfeature_active(seg, mi->segment_id, SEG_LVL_REF_FRAME) &&
1896 get_segdata(seg, mi->segment_id, SEG_LVL_REF_FRAME) == GOLDEN_FRAME) {
1897 usable_ref_frame = GOLDEN_FRAME;
1898 skip_ref_find_pred[GOLDEN_FRAME] = 0;
1899 thresh_svc_skip_golden = 0;
1902 for (ref_frame = LAST_FRAME; ref_frame <= usable_ref_frame; ++ref_frame) {
1903 // Skip find_predictor if the reference frame is not in the
1904 // ref_frame_flags (i.e., not used as a reference for this frame).
1905 skip_ref_find_pred[ref_frame] =
1906 !(cpi->ref_frame_flags & flag_list[ref_frame]);
1907 if (!skip_ref_find_pred[ref_frame]) {
1908 find_predictors(cpi, x, ref_frame, frame_mv, const_motion,
1909 &ref_frame_skip_mask, flag_list, tile_data, mi_row,
1910 mi_col, yv12_mb, bsize, force_skip_low_temp_var,
1915 if (cpi->use_svc || cpi->oxcf.speed <= 7 || bsize < BLOCK_32X32)
1916 x->sb_use_mv_part = 0;
1918 // Set the flag_svc_subpel to 1 for SVC if the lower spatial layer used
1919 // an averaging filter for downsampling (phase = 8). If so, we will test
1920 // a nonzero motion mode on the spatial reference.
1921 // The nonzero motion is half pixel shifted to left and top (-4, -4).
1922 if (cpi->use_svc && svc->spatial_layer_id > 0 &&
1923 svc_force_zero_mode[inter_layer_ref - 1] &&
1924 svc->downsample_filter_phase[svc->spatial_layer_id - 1] == 8 &&
1928 flag_svc_subpel = 1;
1931 for (idx = 0; idx < num_inter_modes + comp_modes; ++idx) {
1938 int this_early_term = 0;
1939 int rd_computed = 0;
1940 int flag_preduv_computed[2] = { 0 };
1941 int inter_mv_mode = 0;
1942 int skip_this_mv = 0;
1944 int force_mv_inter_layer = 0;
1945 PREDICTION_MODE this_mode;
1946 second_ref_frame = NONE;
1948 if (idx < num_inter_modes) {
1949 this_mode = ref_mode_set[idx].pred_mode;
1950 ref_frame = ref_mode_set[idx].ref_frame;
1953 this_mode = ref_mode_set_svc[idx].pred_mode;
1954 ref_frame = ref_mode_set_svc[idx].ref_frame;
1957 // Add (0,0) compound modes.
1959 ref_frame = LAST_FRAME;
1960 if (idx == num_inter_modes + comp_modes - 1) ref_frame = GOLDEN_FRAME;
1961 second_ref_frame = ALTREF_FRAME;
1965 if (ref_frame > usable_ref_frame) continue;
1966 if (skip_ref_find_pred[ref_frame]) continue;
1968 if (svc->previous_frame_is_intra_only) {
1969 if (ref_frame != LAST_FRAME || frame_mv[this_mode][ref_frame].as_int != 0)
1973 // If the segment reference frame feature is enabled then do nothing if the
1974 // current ref frame is not allowed.
1975 if (segfeature_active(seg, mi->segment_id, SEG_LVL_REF_FRAME) &&
1976 get_segdata(seg, mi->segment_id, SEG_LVL_REF_FRAME) != (int)ref_frame)
1979 if (flag_svc_subpel && ref_frame == inter_layer_ref) {
1980 force_mv_inter_layer = 1;
1981 // Only test mode if NEARESTMV/NEARMV is (svc_mv_col, svc_mv_row),
1982 // otherwise set NEWMV to (svc_mv_col, svc_mv_row).
1983 if (this_mode == NEWMV) {
1984 frame_mv[this_mode][ref_frame].as_mv.col = svc_mv_col;
1985 frame_mv[this_mode][ref_frame].as_mv.row = svc_mv_row;
1986 } else if (frame_mv[this_mode][ref_frame].as_mv.col != svc_mv_col ||
1987 frame_mv[this_mode][ref_frame].as_mv.row != svc_mv_row) {
1993 if (!cpi->allow_comp_inter_inter) continue;
1994 // Skip compound inter modes if ARF is not available.
1995 if (!(cpi->ref_frame_flags & flag_list[second_ref_frame])) continue;
1996 // Do not allow compound prediction if the segment level reference frame
1997 // feature is in use as in this case there can only be one reference.
1998 if (segfeature_active(seg, mi->segment_id, SEG_LVL_REF_FRAME)) continue;
2001 // For SVC, skip the golden (spatial) reference search if sse of zeromv_last
2002 // is below threshold.
2003 if (cpi->use_svc && ref_frame == GOLDEN_FRAME &&
2004 sse_zeromv_normalized < thresh_svc_skip_golden)
2007 if (!(cpi->ref_frame_flags & flag_list[ref_frame])) continue;
2009 if (sf->short_circuit_flat_blocks && x->source_variance == 0 &&
2010 (frame_mv[this_mode][ref_frame].as_int != 0 ||
2011 (cpi->oxcf.content == VP9E_CONTENT_SCREEN && !svc->spatial_layer_id &&
2012 !x->zero_temp_sad_source))) {
2016 if (!(cpi->sf.inter_mode_mask[bsize] & (1 << this_mode))) continue;
2018 if (cpi->oxcf.lag_in_frames > 0 && cpi->oxcf.rc_mode == VPX_VBR) {
2019 if (cpi->rc.is_src_frame_alt_ref &&
2020 (ref_frame != ALTREF_FRAME ||
2021 frame_mv[this_mode][ref_frame].as_int != 0))
2024 if (!cm->show_frame && ref_frame == ALTREF_FRAME &&
2025 frame_mv[this_mode][ref_frame].as_int != 0)
2028 if (cpi->rc.alt_ref_gf_group && cm->show_frame &&
2029 cpi->rc.frames_since_golden > (cpi->rc.baseline_gf_interval >> 1) &&
2030 ref_frame == GOLDEN_FRAME &&
2031 frame_mv[this_mode][ref_frame].as_int != 0)
2034 if (cpi->rc.alt_ref_gf_group && cm->show_frame &&
2035 cpi->rc.frames_since_golden > 0 &&
2036 cpi->rc.frames_since_golden < (cpi->rc.baseline_gf_interval >> 1) &&
2037 ref_frame == ALTREF_FRAME &&
2038 frame_mv[this_mode][ref_frame].as_int != 0)
2042 if (const_motion[ref_frame] && this_mode == NEARMV) continue;
2044 // Skip non-zeromv mode search for golden frame if force_skip_low_temp_var
2045 // is set. If nearestmv for golden frame is 0, zeromv mode will be skipped
2047 if (!force_mv_inter_layer && force_skip_low_temp_var &&
2048 ref_frame == GOLDEN_FRAME &&
2049 frame_mv[this_mode][ref_frame].as_int != 0) {
2053 if (x->content_state_sb != kVeryHighSad &&
2054 (cpi->sf.short_circuit_low_temp_var >= 2 ||
2055 (cpi->sf.short_circuit_low_temp_var == 1 && bsize == BLOCK_64X64)) &&
2056 force_skip_low_temp_var && ref_frame == LAST_FRAME &&
2057 this_mode == NEWMV) {
2062 if (!force_mv_inter_layer && svc_force_zero_mode[ref_frame - 1] &&
2063 frame_mv[this_mode][ref_frame].as_int != 0)
2067 // Disable this drop out case if the ref frame segment level feature is
2068 // enabled for this segment. This is to prevent the possibility that we end
2069 // up unable to pick any mode.
2070 if (!segfeature_active(seg, mi->segment_id, SEG_LVL_REF_FRAME)) {
2071 if (sf->reference_masking &&
2072 !(frame_mv[this_mode][ref_frame].as_int == 0 &&
2073 ref_frame == LAST_FRAME)) {
2074 if (usable_ref_frame < ALTREF_FRAME) {
2075 if (!force_skip_low_temp_var && usable_ref_frame > LAST_FRAME) {
2076 i = (ref_frame == LAST_FRAME) ? GOLDEN_FRAME : LAST_FRAME;
2077 if ((cpi->ref_frame_flags & flag_list[i]))
2078 if (x->pred_mv_sad[ref_frame] > (x->pred_mv_sad[i] << 1))
2079 ref_frame_skip_mask |= (1 << ref_frame);
2081 } else if (!cpi->rc.is_src_frame_alt_ref &&
2082 !(frame_mv[this_mode][ref_frame].as_int == 0 &&
2083 ref_frame == ALTREF_FRAME)) {
2084 int ref1 = (ref_frame == GOLDEN_FRAME) ? LAST_FRAME : GOLDEN_FRAME;
2085 int ref2 = (ref_frame == ALTREF_FRAME) ? LAST_FRAME : ALTREF_FRAME;
2086 if (((cpi->ref_frame_flags & flag_list[ref1]) &&
2087 (x->pred_mv_sad[ref_frame] > (x->pred_mv_sad[ref1] << 1))) ||
2088 ((cpi->ref_frame_flags & flag_list[ref2]) &&
2089 (x->pred_mv_sad[ref_frame] > (x->pred_mv_sad[ref2] << 1))))
2090 ref_frame_skip_mask |= (1 << ref_frame);
2093 if (ref_frame_skip_mask & (1 << ref_frame)) continue;
2096 // Select prediction reference frames.
2097 for (i = 0; i < MAX_MB_PLANE; i++) {
2098 xd->plane[i].pre[0] = yv12_mb[ref_frame][i];
2099 if (comp_pred) xd->plane[i].pre[1] = yv12_mb[second_ref_frame][i];
2102 mi->ref_frame[0] = ref_frame;
2103 mi->ref_frame[1] = second_ref_frame;
2104 set_ref_ptrs(cm, xd, ref_frame, second_ref_frame);
2106 mode_index = mode_idx[ref_frame][INTER_OFFSET(this_mode)];
2107 mode_rd_thresh = best_pickmode.best_mode_skip_txfm
2108 ? rd_threshes[mode_index] << 1
2109 : rd_threshes[mode_index];
2111 // Increase mode_rd_thresh value for GOLDEN_FRAME for improved encoding
2112 // speed with little/no subjective quality loss.
2113 if (cpi->sf.bias_golden && ref_frame == GOLDEN_FRAME &&
2114 cpi->rc.frames_since_golden > 4)
2115 mode_rd_thresh = mode_rd_thresh << 3;
2117 if ((cpi->sf.adaptive_rd_thresh_row_mt &&
2118 rd_less_than_thresh_row_mt(best_rdc.rdcost, mode_rd_thresh,
2119 &rd_thresh_freq_fact[mode_index])) ||
2120 (!cpi->sf.adaptive_rd_thresh_row_mt &&
2121 rd_less_than_thresh(best_rdc.rdcost, mode_rd_thresh,
2122 &rd_thresh_freq_fact[mode_index])))
2123 if (frame_mv[this_mode][ref_frame].as_int != 0) continue;
2125 if (this_mode == NEWMV && !force_mv_inter_layer) {
2126 if (search_new_mv(cpi, x, frame_mv, ref_frame, gf_temporal_ref, bsize,
2127 mi_row, mi_col, best_pred_sad, &rate_mv, best_sse_sofar,
2132 // TODO(jianj): Skipping the testing of (duplicate) non-zero motion vector
2133 // causes some regression, leave it for duplicate zero-mv for now, until
2134 // regression issue is resolved.
2135 for (inter_mv_mode = NEARESTMV; inter_mv_mode <= NEWMV; inter_mv_mode++) {
2136 if (inter_mv_mode == this_mode || comp_pred) continue;
2137 if (mode_checked[inter_mv_mode][ref_frame] &&
2138 frame_mv[this_mode][ref_frame].as_int ==
2139 frame_mv[inter_mv_mode][ref_frame].as_int &&
2140 frame_mv[inter_mv_mode][ref_frame].as_int == 0) {
2146 if (skip_this_mv) continue;
2148 // If use_golden_nonzeromv is false, NEWMV mode is skipped for golden, no
2149 // need to compute best_pred_sad which is only used to skip golden NEWMV.
2150 if (use_golden_nonzeromv && this_mode == NEWMV && ref_frame == LAST_FRAME &&
2151 frame_mv[NEWMV][LAST_FRAME].as_int != INVALID_MV) {
2152 const int pre_stride = xd->plane[0].pre[0].stride;
2153 const uint8_t *const pre_buf =
2154 xd->plane[0].pre[0].buf +
2155 (frame_mv[NEWMV][LAST_FRAME].as_mv.row >> 3) * pre_stride +
2156 (frame_mv[NEWMV][LAST_FRAME].as_mv.col >> 3);
2157 best_pred_sad = cpi->fn_ptr[bsize].sdf(
2158 x->plane[0].src.buf, x->plane[0].src.stride, pre_buf, pre_stride);
2159 x->pred_mv_sad[LAST_FRAME] = best_pred_sad;
2162 if (this_mode != NEARESTMV && !comp_pred &&
2163 frame_mv[this_mode][ref_frame].as_int ==
2164 frame_mv[NEARESTMV][ref_frame].as_int)
2167 mi->mode = this_mode;
2168 mi->mv[0].as_int = frame_mv[this_mode][ref_frame].as_int;
2169 mi->mv[1].as_int = 0;
2171 // Search for the best prediction filter type, when the resulting
2172 // motion vector is at sub-pixel accuracy level for luma component, i.e.,
2173 // the last three bits are all zeros.
2174 if (reuse_inter_pred) {
2175 if (!this_mode_pred) {
2176 this_mode_pred = &tmp[3];
2178 this_mode_pred = &tmp[get_pred_buffer(tmp, 3)];
2179 pd->dst.buf = this_mode_pred->data;
2180 pd->dst.stride = bw;
2184 if ((this_mode == NEWMV || filter_ref == SWITCHABLE) &&
2185 pred_filter_search &&
2186 (ref_frame == LAST_FRAME ||
2187 (ref_frame == GOLDEN_FRAME && !force_mv_inter_layer &&
2188 (cpi->use_svc || cpi->oxcf.rc_mode == VPX_VBR))) &&
2189 (((mi->mv[0].as_mv.row | mi->mv[0].as_mv.col) & 0x07) != 0)) {
2191 search_filter_ref(cpi, x, &this_rdc, mi_row, mi_col, tmp, bsize,
2192 reuse_inter_pred, &this_mode_pred, &var_y, &sse_y);
2194 // For low motion content use x->sb_is_skin in addition to VeryHighSad
2195 // for setting large_block.
2196 const int large_block =
2197 (x->content_state_sb == kVeryHighSad ||
2198 (x->sb_is_skin && cpi->rc.avg_frame_low_motion > 70) ||
2199 cpi->oxcf.speed < 7)
2200 ? bsize > BLOCK_32X32
2201 : bsize >= BLOCK_32X32;
2202 mi->interp_filter = (filter_ref == SWITCHABLE) ? EIGHTTAP : filter_ref;
2204 if (cpi->use_svc && ref_frame == GOLDEN_FRAME &&
2205 svc_force_zero_mode[ref_frame - 1])
2206 mi->interp_filter = filter_gf_svc;
2208 vp9_build_inter_predictors_sby(xd, mi_row, mi_col, bsize);
2210 // For large partition blocks, extra testing is done.
2211 if (cpi->oxcf.rc_mode == VPX_CBR && large_block &&
2212 !cyclic_refresh_segment_id_boosted(xd->mi[0]->segment_id) &&
2214 model_rd_for_sb_y_large(cpi, bsize, x, xd, &this_rdc.rate,
2215 &this_rdc.dist, &var_y, &sse_y, mi_row, mi_col,
2216 &this_early_term, flag_preduv_computed);
2219 model_rd_for_sb_y(cpi, bsize, x, xd, &this_rdc.rate, &this_rdc.dist,
2222 // Save normalized sse (between current and last frame) for (0, 0) motion.
2223 if (cpi->use_svc && ref_frame == LAST_FRAME &&
2224 frame_mv[this_mode][ref_frame].as_int == 0) {
2225 sse_zeromv_normalized =
2226 sse_y >> (b_width_log2_lookup[bsize] + b_height_log2_lookup[bsize]);
2228 if (sse_y < best_sse_sofar) best_sse_sofar = sse_y;
2231 if (!this_early_term) {
2232 this_sse = (int64_t)sse_y;
2233 block_yrd(cpi, x, &this_rdc, &is_skippable, &this_sse, bsize,
2234 VPXMIN(mi->tx_size, TX_16X16), rd_computed);
2236 x->skip_txfm[0] = is_skippable;
2238 this_rdc.rate = vp9_cost_bit(vp9_get_skip_prob(cm, xd), 1);
2240 if (RDCOST(x->rdmult, x->rddiv, this_rdc.rate, this_rdc.dist) <
2241 RDCOST(x->rdmult, x->rddiv, 0, this_sse)) {
2242 this_rdc.rate += vp9_cost_bit(vp9_get_skip_prob(cm, xd), 0);
2244 this_rdc.rate = vp9_cost_bit(vp9_get_skip_prob(cm, xd), 1);
2245 this_rdc.dist = this_sse;
2246 x->skip_txfm[0] = SKIP_TXFM_AC_DC;
2250 if (cm->interp_filter == SWITCHABLE) {
2251 if ((mi->mv[0].as_mv.row | mi->mv[0].as_mv.col) & 0x07)
2252 this_rdc.rate += vp9_get_switchable_rate(cpi, xd);
2255 this_rdc.rate += cm->interp_filter == SWITCHABLE
2256 ? vp9_get_switchable_rate(cpi, xd)
2258 this_rdc.rate += vp9_cost_bit(vp9_get_skip_prob(cm, xd), 1);
2261 if (!this_early_term &&
2262 (x->color_sensitivity[0] || x->color_sensitivity[1])) {
2264 const BLOCK_SIZE uv_bsize = get_plane_block_size(bsize, &xd->plane[1]);
2265 if (x->color_sensitivity[0] && !flag_preduv_computed[0]) {
2266 vp9_build_inter_predictors_sbp(xd, mi_row, mi_col, bsize, 1);
2267 flag_preduv_computed[0] = 1;
2269 if (x->color_sensitivity[1] && !flag_preduv_computed[1]) {
2270 vp9_build_inter_predictors_sbp(xd, mi_row, mi_col, bsize, 2);
2271 flag_preduv_computed[1] = 1;
2273 model_rd_for_sb_uv(cpi, uv_bsize, x, xd, &rdc_uv, &var_y, &sse_y, 1, 2);
2274 this_rdc.rate += rdc_uv.rate;
2275 this_rdc.dist += rdc_uv.dist;
2278 this_rdc.rate += rate_mv;
2279 this_rdc.rate += cpi->inter_mode_cost[x->mbmi_ext->mode_context[ref_frame]]
2280 [INTER_OFFSET(this_mode)];
2281 // TODO(marpan): Add costing for compound mode.
2282 this_rdc.rate += ref_frame_cost[ref_frame];
2283 this_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, this_rdc.rate, this_rdc.dist);
2285 // Bias against NEWMV that is very different from its neighbors, and bias
2286 // to small motion-lastref for noisy input.
2287 if (cpi->oxcf.rc_mode == VPX_CBR && cpi->oxcf.speed >= 5 &&
2288 cpi->oxcf.content != VP9E_CONTENT_SCREEN) {
2289 vp9_NEWMV_diff_bias(&cpi->noise_estimate, xd, this_mode, &this_rdc, bsize,
2290 frame_mv[this_mode][ref_frame].as_mv.row,
2291 frame_mv[this_mode][ref_frame].as_mv.col,
2292 ref_frame == LAST_FRAME, x->lowvar_highsumdiff,
2296 // Skipping checking: test to see if this block can be reconstructed by
2298 if (cpi->allow_encode_breakout && !xd->lossless && !scene_change_detected) {
2299 encode_breakout_test(cpi, x, bsize, mi_row, mi_col, ref_frame, this_mode,
2300 var_y, sse_y, yv12_mb, &this_rdc.rate,
2301 &this_rdc.dist, flag_preduv_computed);
2303 this_rdc.rate += rate_mv;
2305 RDCOST(x->rdmult, x->rddiv, this_rdc.rate, this_rdc.dist);
2309 #if CONFIG_VP9_TEMPORAL_DENOISING
2310 if (cpi->oxcf.noise_sensitivity > 0 && denoise_svc_pickmode &&
2311 cpi->denoiser.denoising_level > kDenLowLow) {
2312 vp9_denoiser_update_frame_stats(mi, sse_y, this_mode, ctx);
2313 // Keep track of zero_last cost.
2314 if (ref_frame == LAST_FRAME && frame_mv[this_mode][ref_frame].as_int == 0)
2315 zero_last_cost_orig = this_rdc.rdcost;
2321 mode_checked[this_mode][ref_frame] = 1;
2323 if (this_rdc.rdcost < best_rdc.rdcost || x->skip) {
2324 best_rdc = this_rdc;
2325 best_early_term = this_early_term;
2326 best_pickmode.best_mode = this_mode;
2327 best_pickmode.best_pred_filter = mi->interp_filter;
2328 best_pickmode.best_tx_size = mi->tx_size;
2329 best_pickmode.best_ref_frame = ref_frame;
2330 best_pickmode.best_mode_skip_txfm = x->skip_txfm[0];
2331 best_pickmode.best_second_ref_frame = second_ref_frame;
2333 if (reuse_inter_pred) {
2334 free_pred_buffer(best_pickmode.best_pred);
2335 best_pickmode.best_pred = this_mode_pred;
2338 if (reuse_inter_pred) free_pred_buffer(this_mode_pred);
2343 // If early termination flag is 1 and at least 2 modes are checked,
2344 // the mode search is terminated.
2345 if (best_early_term && idx > 0 && !scene_change_detected) {
2351 mi->mode = best_pickmode.best_mode;
2352 mi->interp_filter = best_pickmode.best_pred_filter;
2353 mi->tx_size = best_pickmode.best_tx_size;
2354 mi->ref_frame[0] = best_pickmode.best_ref_frame;
2356 frame_mv[best_pickmode.best_mode][best_pickmode.best_ref_frame].as_int;
2357 xd->mi[0]->bmi[0].as_mv[0].as_int = mi->mv[0].as_int;
2358 x->skip_txfm[0] = best_pickmode.best_mode_skip_txfm;
2359 mi->ref_frame[1] = best_pickmode.best_second_ref_frame;
2361 // For spatial enhancemanent layer: perform intra prediction only if base
2362 // layer is chosen as the reference. Always perform intra prediction if
2363 // LAST is the only reference, or is_key_frame is set, or on base
2365 if (svc->spatial_layer_id && !gf_temporal_ref) {
2366 perform_intra_pred =
2367 svc->temporal_layer_id == 0 ||
2368 svc->layer_context[svc->temporal_layer_id].is_key_frame ||
2369 !(cpi->ref_frame_flags & flag_list[GOLDEN_FRAME]) ||
2370 (!svc->layer_context[svc->temporal_layer_id].is_key_frame &&
2371 svc_force_zero_mode[best_pickmode.best_ref_frame - 1]);
2372 inter_mode_thresh = (inter_mode_thresh << 1) + inter_mode_thresh;
2374 if ((cpi->oxcf.lag_in_frames > 0 && cpi->oxcf.rc_mode == VPX_VBR &&
2375 cpi->rc.is_src_frame_alt_ref) ||
2376 svc->previous_frame_is_intra_only)
2377 perform_intra_pred = 0;
2379 // If the segment reference frame feature is enabled and set then
2380 // skip the intra prediction.
2381 if (segfeature_active(seg, mi->segment_id, SEG_LVL_REF_FRAME) &&
2382 get_segdata(seg, mi->segment_id, SEG_LVL_REF_FRAME) > 0)
2383 perform_intra_pred = 0;
2385 // Perform intra prediction search, if the best SAD is above a certain
2387 if (best_rdc.rdcost == INT64_MAX ||
2388 (scene_change_detected && perform_intra_pred) ||
2389 ((!force_skip_low_temp_var || bsize < BLOCK_32X32 ||
2390 x->content_state_sb == kVeryHighSad) &&
2391 perform_intra_pred && !x->skip && best_rdc.rdcost > inter_mode_thresh &&
2392 bsize <= cpi->sf.max_intra_bsize && !x->skip_low_source_sad &&
2393 !x->lowvar_highsumdiff)) {
2394 struct estimate_block_intra_args args = { cpi, x, DC_PRED, 1, 0 };
2396 PRED_BUFFER *const best_pred = best_pickmode.best_pred;
2397 TX_SIZE intra_tx_size =
2398 VPXMIN(max_txsize_lookup[bsize],
2399 tx_mode_to_biggest_tx_size[cpi->common.tx_mode]);
2400 if (cpi->oxcf.content != VP9E_CONTENT_SCREEN && intra_tx_size > TX_16X16)
2401 intra_tx_size = TX_16X16;
2403 if (reuse_inter_pred && best_pred != NULL) {
2404 if (best_pred->data == orig_dst.buf) {
2405 this_mode_pred = &tmp[get_pred_buffer(tmp, 3)];
2406 #if CONFIG_VP9_HIGHBITDEPTH
2407 if (cm->use_highbitdepth)
2408 vpx_highbd_convolve_copy(
2409 CONVERT_TO_SHORTPTR(best_pred->data), best_pred->stride,
2410 CONVERT_TO_SHORTPTR(this_mode_pred->data), this_mode_pred->stride,
2411 NULL, 0, 0, 0, 0, bw, bh, xd->bd);
2413 vpx_convolve_copy(best_pred->data, best_pred->stride,
2414 this_mode_pred->data, this_mode_pred->stride, NULL,
2415 0, 0, 0, 0, bw, bh);
2417 vpx_convolve_copy(best_pred->data, best_pred->stride,
2418 this_mode_pred->data, this_mode_pred->stride, NULL, 0,
2420 #endif // CONFIG_VP9_HIGHBITDEPTH
2421 best_pickmode.best_pred = this_mode_pred;
2426 for (i = 0; i < 4; ++i) {
2427 const PREDICTION_MODE this_mode = intra_mode_list[i];
2428 THR_MODES mode_index = mode_idx[INTRA_FRAME][mode_offset(this_mode)];
2429 int mode_rd_thresh = rd_threshes[mode_index];
2430 if (sf->short_circuit_flat_blocks && x->source_variance == 0 &&
2431 this_mode != DC_PRED) {
2435 if (!((1 << this_mode) & cpi->sf.intra_y_mode_bsize_mask[bsize]))
2438 if ((cpi->sf.adaptive_rd_thresh_row_mt &&
2439 rd_less_than_thresh_row_mt(best_rdc.rdcost, mode_rd_thresh,
2440 &rd_thresh_freq_fact[mode_index])) ||
2441 (!cpi->sf.adaptive_rd_thresh_row_mt &&
2442 rd_less_than_thresh(best_rdc.rdcost, mode_rd_thresh,
2443 &rd_thresh_freq_fact[mode_index])))
2446 mi->mode = this_mode;
2447 mi->ref_frame[0] = INTRA_FRAME;
2448 this_rdc.dist = this_rdc.rate = 0;
2449 args.mode = this_mode;
2451 args.rdc = &this_rdc;
2452 mi->tx_size = intra_tx_size;
2453 vp9_foreach_transformed_block_in_plane(xd, bsize, 0, estimate_block_intra,
2455 // Check skip cost here since skippable is not set for for uv, this
2456 // mirrors the behavior used by inter
2457 if (args.skippable) {
2458 x->skip_txfm[0] = SKIP_TXFM_AC_DC;
2459 this_rdc.rate = vp9_cost_bit(vp9_get_skip_prob(&cpi->common, xd), 1);
2461 x->skip_txfm[0] = SKIP_TXFM_NONE;
2462 this_rdc.rate += vp9_cost_bit(vp9_get_skip_prob(&cpi->common, xd), 0);
2464 // Inter and intra RD will mismatch in scale for non-screen content.
2465 if (cpi->oxcf.content == VP9E_CONTENT_SCREEN) {
2466 if (x->color_sensitivity[0])
2467 vp9_foreach_transformed_block_in_plane(xd, bsize, 1,
2468 estimate_block_intra, &args);
2469 if (x->color_sensitivity[1])
2470 vp9_foreach_transformed_block_in_plane(xd, bsize, 2,
2471 estimate_block_intra, &args);
2473 this_rdc.rate += cpi->mbmode_cost[this_mode];
2474 this_rdc.rate += ref_frame_cost[INTRA_FRAME];
2475 this_rdc.rate += intra_cost_penalty;
2477 RDCOST(x->rdmult, x->rddiv, this_rdc.rate, this_rdc.dist);
2479 if (this_rdc.rdcost < best_rdc.rdcost) {
2480 best_rdc = this_rdc;
2481 best_pickmode.best_mode = this_mode;
2482 best_pickmode.best_intra_tx_size = mi->tx_size;
2483 best_pickmode.best_ref_frame = INTRA_FRAME;
2484 best_pickmode.best_second_ref_frame = NONE;
2485 mi->uv_mode = this_mode;
2486 mi->mv[0].as_int = INVALID_MV;
2487 mi->mv[1].as_int = INVALID_MV;
2488 best_pickmode.best_mode_skip_txfm = x->skip_txfm[0];
2492 // Reset mb_mode_info to the best inter mode.
2493 if (best_pickmode.best_ref_frame != INTRA_FRAME) {
2494 mi->tx_size = best_pickmode.best_tx_size;
2496 mi->tx_size = best_pickmode.best_intra_tx_size;
2501 mi->mode = best_pickmode.best_mode;
2502 mi->ref_frame[0] = best_pickmode.best_ref_frame;
2503 mi->ref_frame[1] = best_pickmode.best_second_ref_frame;
2504 x->skip_txfm[0] = best_pickmode.best_mode_skip_txfm;
2506 if (!is_inter_block(mi)) {
2507 mi->interp_filter = SWITCHABLE_FILTERS;
2510 if (reuse_inter_pred && best_pickmode.best_pred != NULL) {
2511 PRED_BUFFER *const best_pred = best_pickmode.best_pred;
2512 if (best_pred->data != orig_dst.buf && is_inter_mode(mi->mode)) {
2513 #if CONFIG_VP9_HIGHBITDEPTH
2514 if (cm->use_highbitdepth)
2515 vpx_highbd_convolve_copy(
2516 CONVERT_TO_SHORTPTR(best_pred->data), best_pred->stride,
2517 CONVERT_TO_SHORTPTR(pd->dst.buf), pd->dst.stride, NULL, 0, 0, 0, 0,
2520 vpx_convolve_copy(best_pred->data, best_pred->stride, pd->dst.buf,
2521 pd->dst.stride, NULL, 0, 0, 0, 0, bw, bh);
2523 vpx_convolve_copy(best_pred->data, best_pred->stride, pd->dst.buf,
2524 pd->dst.stride, NULL, 0, 0, 0, 0, bw, bh);
2525 #endif // CONFIG_VP9_HIGHBITDEPTH
2529 #if CONFIG_VP9_TEMPORAL_DENOISING
2530 if (cpi->oxcf.noise_sensitivity > 0 && cpi->resize_pending == 0 &&
2531 denoise_svc_pickmode && cpi->denoiser.denoising_level > kDenLowLow &&
2532 cpi->denoiser.reset == 0) {
2533 VP9_DENOISER_DECISION decision = COPY_BLOCK;
2534 ctx->sb_skip_denoising = 0;
2535 // TODO(marpan): There is an issue with denoising when the
2536 // superblock partitioning scheme is based on the pickmode.
2537 // Remove this condition when the issue is resolved.
2538 if (x->sb_pickmode_part) ctx->sb_skip_denoising = 1;
2539 vp9_pickmode_ctx_den_update(&ctx_den, zero_last_cost_orig, ref_frame_cost,
2540 frame_mv, reuse_inter_pred, &best_pickmode);
2541 vp9_denoiser_denoise(cpi, x, mi_row, mi_col, bsize, ctx, &decision,
2543 recheck_zeromv_after_denoising(cpi, mi, x, xd, decision, &ctx_den, yv12_mb,
2544 &best_rdc, bsize, mi_row, mi_col);
2545 best_pickmode.best_ref_frame = ctx_den.best_ref_frame;
2549 if (best_pickmode.best_ref_frame == ALTREF_FRAME ||
2550 best_pickmode.best_second_ref_frame == ALTREF_FRAME)
2551 x->arf_frame_usage++;
2552 else if (best_pickmode.best_ref_frame != INTRA_FRAME)
2553 x->lastgolden_frame_usage++;
2555 if (cpi->sf.adaptive_rd_thresh) {
2556 THR_MODES best_mode_idx =
2557 mode_idx[best_pickmode.best_ref_frame][mode_offset(mi->mode)];
2559 if (best_pickmode.best_ref_frame == INTRA_FRAME) {
2560 // Only consider the modes that are included in the intra_mode_list.
2561 int intra_modes = sizeof(intra_mode_list) / sizeof(PREDICTION_MODE);
2564 // TODO(yunqingwang): Check intra mode mask and only update freq_fact
2565 // for those valid modes.
2566 for (i = 0; i < intra_modes; i++) {
2567 if (cpi->sf.adaptive_rd_thresh_row_mt)
2568 update_thresh_freq_fact_row_mt(cpi, tile_data, x->source_variance,
2569 thresh_freq_fact_idx, INTRA_FRAME,
2570 best_mode_idx, intra_mode_list[i]);
2572 update_thresh_freq_fact(cpi, tile_data, x->source_variance, bsize,
2573 INTRA_FRAME, best_mode_idx,
2574 intra_mode_list[i]);
2577 for (ref_frame = LAST_FRAME; ref_frame <= GOLDEN_FRAME; ++ref_frame) {
2578 PREDICTION_MODE this_mode;
2579 if (best_pickmode.best_ref_frame != ref_frame) continue;
2580 for (this_mode = NEARESTMV; this_mode <= NEWMV; ++this_mode) {
2581 if (cpi->sf.adaptive_rd_thresh_row_mt)
2582 update_thresh_freq_fact_row_mt(cpi, tile_data, x->source_variance,
2583 thresh_freq_fact_idx, ref_frame,
2584 best_mode_idx, this_mode);
2586 update_thresh_freq_fact(cpi, tile_data, x->source_variance, bsize,
2587 ref_frame, best_mode_idx, this_mode);
2593 *rd_cost = best_rdc;
2596 void vp9_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, int mi_row,
2597 int mi_col, RD_COST *rd_cost, BLOCK_SIZE bsize,
2598 PICK_MODE_CONTEXT *ctx) {
2599 VP9_COMMON *const cm = &cpi->common;
2600 SPEED_FEATURES *const sf = &cpi->sf;
2601 MACROBLOCKD *const xd = &x->e_mbd;
2602 MODE_INFO *const mi = xd->mi[0];
2603 MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
2604 const struct segmentation *const seg = &cm->seg;
2605 MV_REFERENCE_FRAME ref_frame, second_ref_frame = NONE;
2606 MV_REFERENCE_FRAME best_ref_frame = NONE;
2607 unsigned char segment_id = mi->segment_id;
2608 struct buf_2d yv12_mb[4][MAX_MB_PLANE];
2609 static const int flag_list[4] = { 0, VP9_LAST_FLAG, VP9_GOLD_FLAG,
2611 int64_t best_rd = INT64_MAX;
2612 b_mode_info bsi[MAX_REF_FRAMES][4];
2613 int ref_frame_skip_mask = 0;
2614 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
2615 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
2618 x->skip_encode = sf->skip_encode_frame && x->q_index < QIDX_SKIP_THRESH;
2619 ctx->pred_pixel_ready = 0;
2621 for (ref_frame = LAST_FRAME; ref_frame <= GOLDEN_FRAME; ++ref_frame) {
2622 const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, ref_frame);
2624 x->pred_mv_sad[ref_frame] = INT_MAX;
2626 if ((cpi->ref_frame_flags & flag_list[ref_frame]) && (yv12 != NULL)) {
2627 int_mv *const candidates = mbmi_ext->ref_mvs[ref_frame];
2628 const struct scale_factors *const sf = &cm->frame_refs[ref_frame - 1].sf;
2629 vp9_setup_pred_block(xd, yv12_mb[ref_frame], yv12, mi_row, mi_col, sf,
2631 vp9_find_mv_refs(cm, xd, xd->mi[0], ref_frame, candidates, mi_row, mi_col,
2632 mbmi_ext->mode_context);
2634 vp9_find_best_ref_mvs(xd, cm->allow_high_precision_mv, candidates,
2635 &dummy_mv[0], &dummy_mv[1]);
2637 ref_frame_skip_mask |= (1 << ref_frame);
2641 mi->sb_type = bsize;
2642 mi->tx_size = TX_4X4;
2643 mi->uv_mode = DC_PRED;
2644 mi->ref_frame[0] = LAST_FRAME;
2645 mi->ref_frame[1] = NONE;
2647 cm->interp_filter == SWITCHABLE ? EIGHTTAP : cm->interp_filter;
2649 for (ref_frame = LAST_FRAME; ref_frame <= GOLDEN_FRAME; ++ref_frame) {
2650 int64_t this_rd = 0;
2653 if (ref_frame_skip_mask & (1 << ref_frame)) continue;
2655 #if CONFIG_BETTER_HW_COMPATIBILITY
2656 if ((bsize == BLOCK_8X4 || bsize == BLOCK_4X8) && ref_frame > INTRA_FRAME &&
2657 vp9_is_scaled(&cm->frame_refs[ref_frame - 1].sf))
2661 // TODO(jingning, agrange): Scaling reference frame not supported for
2662 // sub8x8 blocks. Is this supported now?
2663 if (ref_frame > INTRA_FRAME &&
2664 vp9_is_scaled(&cm->frame_refs[ref_frame - 1].sf))
2667 // If the segment reference frame feature is enabled....
2668 // then do nothing if the current ref frame is not allowed..
2669 if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME) &&
2670 get_segdata(seg, segment_id, SEG_LVL_REF_FRAME) != (int)ref_frame)
2673 mi->ref_frame[0] = ref_frame;
2675 set_ref_ptrs(cm, xd, ref_frame, second_ref_frame);
2677 // Select prediction reference frames.
2678 for (plane = 0; plane < MAX_MB_PLANE; plane++)
2679 xd->plane[plane].pre[0] = yv12_mb[ref_frame][plane];
2681 for (idy = 0; idy < 2; idy += num_4x4_blocks_high) {
2682 for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) {
2683 int_mv b_mv[MB_MODE_COUNT];
2684 int64_t b_best_rd = INT64_MAX;
2685 const int i = idy * 2 + idx;
2686 PREDICTION_MODE this_mode;
2688 unsigned int var_y, sse_y;
2690 struct macroblock_plane *p = &x->plane[0];
2691 struct macroblockd_plane *pd = &xd->plane[0];
2693 const struct buf_2d orig_src = p->src;
2694 const struct buf_2d orig_dst = pd->dst;
2695 struct buf_2d orig_pre[2];
2696 memcpy(orig_pre, xd->plane[0].pre, sizeof(orig_pre));
2698 // set buffer pointers for sub8x8 motion search.
2700 &p->src.buf[vp9_raster_block_offset(BLOCK_8X8, i, p->src.stride)];
2702 &pd->dst.buf[vp9_raster_block_offset(BLOCK_8X8, i, pd->dst.stride)];
2705 .buf[vp9_raster_block_offset(BLOCK_8X8, i, pd->pre[0].stride)];
2707 b_mv[ZEROMV].as_int = 0;
2708 b_mv[NEWMV].as_int = INVALID_MV;
2709 vp9_append_sub8x8_mvs_for_idx(cm, xd, i, 0, mi_row, mi_col,
2710 &b_mv[NEARESTMV], &b_mv[NEARMV],
2711 mbmi_ext->mode_context);
2713 for (this_mode = NEARESTMV; this_mode <= NEWMV; ++this_mode) {
2715 xd->mi[0]->bmi[i].as_mv[0].as_int = b_mv[this_mode].as_int;
2717 if (this_mode == NEWMV) {
2718 const int step_param = cpi->sf.mv.fullpel_search_step_param;
2722 const MvLimits tmp_mv_limits = x->mv_limits;
2723 uint32_t dummy_dist;
2726 mvp_full.row = b_mv[NEARESTMV].as_mv.row >> 3;
2727 mvp_full.col = b_mv[NEARESTMV].as_mv.col >> 3;
2729 mvp_full.row = xd->mi[0]->bmi[0].as_mv[0].as_mv.row >> 3;
2730 mvp_full.col = xd->mi[0]->bmi[0].as_mv[0].as_mv.col >> 3;
2733 vp9_set_mv_search_range(&x->mv_limits,
2734 &mbmi_ext->ref_mvs[ref_frame][0].as_mv);
2736 vp9_full_pixel_search(
2737 cpi, x, bsize, &mvp_full, step_param, cpi->sf.mv.search_method,
2738 x->sadperbit4, cond_cost_list(cpi, cost_list),
2739 &mbmi_ext->ref_mvs[ref_frame][0].as_mv, &tmp_mv, INT_MAX, 0);
2741 x->mv_limits = tmp_mv_limits;
2743 // calculate the bit cost on motion vector
2744 mvp_full.row = tmp_mv.row * 8;
2745 mvp_full.col = tmp_mv.col * 8;
2747 b_rate += vp9_mv_bit_cost(
2748 &mvp_full, &mbmi_ext->ref_mvs[ref_frame][0].as_mv,
2749 x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
2751 b_rate += cpi->inter_mode_cost[x->mbmi_ext->mode_context[ref_frame]]
2752 [INTER_OFFSET(NEWMV)];
2753 if (RDCOST(x->rdmult, x->rddiv, b_rate, 0) > b_best_rd) continue;
2755 cpi->find_fractional_mv_step(
2756 x, &tmp_mv, &mbmi_ext->ref_mvs[ref_frame][0].as_mv,
2757 cpi->common.allow_high_precision_mv, x->errorperbit,
2758 &cpi->fn_ptr[bsize], cpi->sf.mv.subpel_force_stop,
2759 cpi->sf.mv.subpel_search_level, cond_cost_list(cpi, cost_list),
2760 x->nmvjointcost, x->mvcost, &dummy_dist,
2761 &x->pred_sse[ref_frame], NULL, 0, 0);
2763 xd->mi[0]->bmi[i].as_mv[0].as_mv = tmp_mv;
2765 b_rate += cpi->inter_mode_cost[x->mbmi_ext->mode_context[ref_frame]]
2766 [INTER_OFFSET(this_mode)];
2769 #if CONFIG_VP9_HIGHBITDEPTH
2770 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
2771 vp9_highbd_build_inter_predictor(
2772 CONVERT_TO_SHORTPTR(pd->pre[0].buf), pd->pre[0].stride,
2773 CONVERT_TO_SHORTPTR(pd->dst.buf), pd->dst.stride,
2774 &xd->mi[0]->bmi[i].as_mv[0].as_mv, &xd->block_refs[0]->sf,
2775 4 * num_4x4_blocks_wide, 4 * num_4x4_blocks_high, 0,
2776 vp9_filter_kernels[mi->interp_filter], MV_PRECISION_Q3,
2777 mi_col * MI_SIZE + 4 * (i & 0x01),
2778 mi_row * MI_SIZE + 4 * (i >> 1), xd->bd);
2781 vp9_build_inter_predictor(
2782 pd->pre[0].buf, pd->pre[0].stride, pd->dst.buf, pd->dst.stride,
2783 &xd->mi[0]->bmi[i].as_mv[0].as_mv, &xd->block_refs[0]->sf,
2784 4 * num_4x4_blocks_wide, 4 * num_4x4_blocks_high, 0,
2785 vp9_filter_kernels[mi->interp_filter], MV_PRECISION_Q3,
2786 mi_col * MI_SIZE + 4 * (i & 0x01),
2787 mi_row * MI_SIZE + 4 * (i >> 1));
2789 #if CONFIG_VP9_HIGHBITDEPTH
2793 model_rd_for_sb_y(cpi, bsize, x, xd, &this_rdc.rate, &this_rdc.dist,
2796 this_rdc.rate += b_rate;
2798 RDCOST(x->rdmult, x->rddiv, this_rdc.rate, this_rdc.dist);
2799 if (this_rdc.rdcost < b_best_rd) {
2800 b_best_rd = this_rdc.rdcost;
2801 bsi[ref_frame][i].as_mode = this_mode;
2802 bsi[ref_frame][i].as_mv[0].as_mv = xd->mi[0]->bmi[i].as_mv[0].as_mv;
2806 // restore source and prediction buffer pointers.
2808 pd->pre[0] = orig_pre[0];
2810 this_rd += b_best_rd;
2812 xd->mi[0]->bmi[i] = bsi[ref_frame][i];
2813 if (num_4x4_blocks_wide > 1) xd->mi[0]->bmi[i + 1] = xd->mi[0]->bmi[i];
2814 if (num_4x4_blocks_high > 1) xd->mi[0]->bmi[i + 2] = xd->mi[0]->bmi[i];
2816 } // loop through sub8x8 blocks
2818 if (this_rd < best_rd) {
2820 best_ref_frame = ref_frame;
2822 } // reference frames
2824 mi->tx_size = TX_4X4;
2825 mi->ref_frame[0] = best_ref_frame;
2826 for (idy = 0; idy < 2; idy += num_4x4_blocks_high) {
2827 for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) {
2828 const int block = idy * 2 + idx;
2829 xd->mi[0]->bmi[block] = bsi[best_ref_frame][block];
2830 if (num_4x4_blocks_wide > 1)
2831 xd->mi[0]->bmi[block + 1] = bsi[best_ref_frame][block];
2832 if (num_4x4_blocks_high > 1)
2833 xd->mi[0]->bmi[block + 2] = bsi[best_ref_frame][block];
2836 mi->mode = xd->mi[0]->bmi[3].as_mode;
2837 ctx->mic = *(xd->mi[0]);
2838 ctx->mbmi_ext = *x->mbmi_ext;
2839 ctx->skip_txfm[0] = SKIP_TXFM_NONE;
2841 // Dummy assignment for speed -5. No effect in speed -6.
2842 rd_cost->rdcost = best_rd;