2 * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
16 #include "./vp9_rtcd.h"
17 #include "./vpx_dsp_rtcd.h"
19 #include "vpx/vpx_codec.h"
20 #include "vpx_dsp/vpx_dsp_common.h"
21 #include "vpx_mem/vpx_mem.h"
22 #include "vpx_ports/mem.h"
24 #include "vp9/common/vp9_blockd.h"
25 #include "vp9/common/vp9_common.h"
26 #include "vp9/common/vp9_mvref_common.h"
27 #include "vp9/common/vp9_pred_common.h"
28 #include "vp9/common/vp9_reconinter.h"
29 #include "vp9/common/vp9_reconintra.h"
30 #include "vp9/common/vp9_scan.h"
32 #include "vp9/encoder/vp9_cost.h"
33 #include "vp9/encoder/vp9_encoder.h"
34 #include "vp9/encoder/vp9_pickmode.h"
35 #include "vp9/encoder/vp9_ratectrl.h"
36 #include "vp9/encoder/vp9_rd.h"
44 static const int pos_shift_16x16[4][4] = {
45 { 9, 10, 13, 14 }, { 11, 12, 15, 16 }, { 17, 18, 21, 22 }, { 19, 20, 23, 24 }
48 static int mv_refs_rt(VP9_COMP *cpi, const VP9_COMMON *cm, const MACROBLOCK *x,
49 const MACROBLOCKD *xd, const TileInfo *const tile,
50 MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame,
51 int_mv *mv_ref_list, int_mv *base_mv, int mi_row,
52 int mi_col, int use_base_mv) {
53 const int *ref_sign_bias = cm->ref_frame_sign_bias;
54 int i, refmv_count = 0;
56 const POSITION *const mv_ref_search = mv_ref_blocks[mi->sb_type];
58 int different_ref_found = 0;
59 int context_counter = 0;
62 // Blank the reference vector list
63 memset(mv_ref_list, 0, sizeof(*mv_ref_list) * MAX_MV_REF_CANDIDATES);
65 // The nearest 2 blocks are treated differently
66 // if the size < 8x8 we get the mv from the bmi substructure,
67 // and we also need to keep a mode count.
68 for (i = 0; i < 2; ++i) {
69 const POSITION *const mv_ref = &mv_ref_search[i];
70 if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) {
71 const MODE_INFO *const candidate_mi =
72 xd->mi[mv_ref->col + mv_ref->row * xd->mi_stride];
73 // Keep counts for entropy encoding.
74 context_counter += mode_2_counter[candidate_mi->mode];
75 different_ref_found = 1;
77 if (candidate_mi->ref_frame[0] == ref_frame)
78 ADD_MV_REF_LIST(get_sub_block_mv(candidate_mi, 0, mv_ref->col, -1),
79 refmv_count, mv_ref_list, Done);
85 // Check the rest of the neighbors in much the same way
86 // as before except we don't need to keep track of sub blocks or
88 for (; i < MVREF_NEIGHBOURS && !refmv_count; ++i) {
89 const POSITION *const mv_ref = &mv_ref_search[i];
90 if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) {
91 const MODE_INFO *const candidate_mi =
92 xd->mi[mv_ref->col + mv_ref->row * xd->mi_stride];
93 different_ref_found = 1;
95 if (candidate_mi->ref_frame[0] == ref_frame)
96 ADD_MV_REF_LIST(candidate_mi->mv[0], refmv_count, mv_ref_list, Done);
100 // Since we couldn't find 2 mvs from the same reference frame
101 // go back through the neighbors and find motion vectors from
102 // different reference frames.
103 if (different_ref_found && !refmv_count) {
104 for (i = 0; i < MVREF_NEIGHBOURS; ++i) {
105 const POSITION *mv_ref = &mv_ref_search[i];
106 if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) {
107 const MODE_INFO *const candidate_mi =
108 xd->mi[mv_ref->col + mv_ref->row * xd->mi_stride];
110 // If the candidate is INTRA we don't want to consider its mv.
111 IF_DIFF_REF_FRAME_ADD_MV(candidate_mi, ref_frame, ref_sign_bias,
112 refmv_count, mv_ref_list, Done);
117 !cpi->svc.layer_context[cpi->svc.temporal_layer_id].is_key_frame &&
118 ref_frame == LAST_FRAME) {
119 // Get base layer mv.
122 ->mvs[(mi_col >> 1) + (mi_row >> 1) * (cm->mi_cols >> 1)];
123 if (candidate->mv[0].as_int != INVALID_MV) {
124 base_mv->as_mv.row = (candidate->mv[0].as_mv.row * 2);
125 base_mv->as_mv.col = (candidate->mv[0].as_mv.col * 2);
126 clamp_mv_ref(&base_mv->as_mv, xd);
128 base_mv->as_int = INVALID_MV;
134 x->mbmi_ext->mode_context[ref_frame] = counter_to_context[context_counter];
137 for (i = 0; i < MAX_MV_REF_CANDIDATES; ++i)
138 clamp_mv_ref(&mv_ref_list[i].as_mv, xd);
143 static int combined_motion_search(VP9_COMP *cpi, MACROBLOCK *x,
144 BLOCK_SIZE bsize, int mi_row, int mi_col,
145 int_mv *tmp_mv, int *rate_mv,
146 int64_t best_rd_sofar, int use_base_mv) {
147 MACROBLOCKD *xd = &x->e_mbd;
148 MODE_INFO *mi = xd->mi[0];
149 struct buf_2d backup_yv12[MAX_MB_PLANE] = { { 0, 0 } };
150 const int step_param = cpi->sf.mv.fullpel_search_step_param;
151 const int sadpb = x->sadperbit16;
153 const int ref = mi->ref_frame[0];
154 const MV ref_mv = x->mbmi_ext->ref_mvs[ref][0].as_mv;
158 const MvLimits tmp_mv_limits = x->mv_limits;
161 const YV12_BUFFER_CONFIG *scaled_ref_frame =
162 vp9_get_scaled_ref_frame(cpi, ref);
163 if (scaled_ref_frame) {
165 // Swap out the reference frame for a version that's been scaled to
166 // match the resolution of the current frame, allowing the existing
167 // motion search code to be used without additional modifications.
168 for (i = 0; i < MAX_MB_PLANE; i++) backup_yv12[i] = xd->plane[i].pre[0];
169 vp9_setup_pre_planes(xd, 0, scaled_ref_frame, mi_row, mi_col, NULL);
171 vp9_set_mv_search_range(&x->mv_limits, &ref_mv);
173 // Limit motion vector for large lightning change.
174 if (cpi->oxcf.speed > 5 && x->lowvar_highsumdiff) {
175 x->mv_limits.col_min = VPXMAX(x->mv_limits.col_min, -10);
176 x->mv_limits.row_min = VPXMAX(x->mv_limits.row_min, -10);
177 x->mv_limits.col_max = VPXMIN(x->mv_limits.col_max, 10);
178 x->mv_limits.row_max = VPXMIN(x->mv_limits.row_max, 10);
181 assert(x->mv_best_ref_index[ref] <= 2);
182 if (x->mv_best_ref_index[ref] < 2)
183 mvp_full = x->mbmi_ext->ref_mvs[ref][x->mv_best_ref_index[ref]].as_mv;
185 mvp_full = x->pred_mv[ref];
193 center_mv = tmp_mv->as_mv;
195 vp9_full_pixel_search(
196 cpi, x, bsize, &mvp_full, step_param, cpi->sf.mv.search_method, sadpb,
197 cond_cost_list(cpi, cost_list), ¢er_mv, &tmp_mv->as_mv, INT_MAX, 0);
199 x->mv_limits = tmp_mv_limits;
201 // calculate the bit cost on motion vector
202 mvp_full.row = tmp_mv->as_mv.row * 8;
203 mvp_full.col = tmp_mv->as_mv.col * 8;
205 *rate_mv = vp9_mv_bit_cost(&mvp_full, &ref_mv, x->nmvjointcost, x->mvcost,
209 cpi->inter_mode_cost[x->mbmi_ext->mode_context[ref]][INTER_OFFSET(NEWMV)];
211 !(RDCOST(x->rdmult, x->rddiv, (*rate_mv + rate_mode), 0) > best_rd_sofar);
214 const int subpel_force_stop = cpi->sf.mv.subpel_force_stop;
215 cpi->find_fractional_mv_step(
216 x, &tmp_mv->as_mv, &ref_mv, cpi->common.allow_high_precision_mv,
217 x->errorperbit, &cpi->fn_ptr[bsize], subpel_force_stop,
218 cpi->sf.mv.subpel_iters_per_step, cond_cost_list(cpi, cost_list),
219 x->nmvjointcost, x->mvcost, &dis, &x->pred_sse[ref], NULL, 0, 0);
220 *rate_mv = vp9_mv_bit_cost(&tmp_mv->as_mv, &ref_mv, x->nmvjointcost,
221 x->mvcost, MV_COST_WEIGHT);
224 if (scaled_ref_frame) {
226 for (i = 0; i < MAX_MB_PLANE; i++) xd->plane[i].pre[0] = backup_yv12[i];
231 static void block_variance(const uint8_t *src, int src_stride,
232 const uint8_t *ref, int ref_stride, int w, int h,
233 unsigned int *sse, int *sum, int block_size,
234 #if CONFIG_VP9_HIGHBITDEPTH
235 int use_highbitdepth, vpx_bit_depth_t bd,
237 uint32_t *sse8x8, int *sum8x8, uint32_t *var8x8) {
243 for (i = 0; i < h; i += block_size) {
244 for (j = 0; j < w; j += block_size) {
245 #if CONFIG_VP9_HIGHBITDEPTH
246 if (use_highbitdepth) {
249 vpx_highbd_8_get8x8var(src + src_stride * i + j, src_stride,
250 ref + ref_stride * i + j, ref_stride,
251 &sse8x8[k], &sum8x8[k]);
254 vpx_highbd_10_get8x8var(src + src_stride * i + j, src_stride,
255 ref + ref_stride * i + j, ref_stride,
256 &sse8x8[k], &sum8x8[k]);
259 vpx_highbd_12_get8x8var(src + src_stride * i + j, src_stride,
260 ref + ref_stride * i + j, ref_stride,
261 &sse8x8[k], &sum8x8[k]);
265 vpx_get8x8var(src + src_stride * i + j, src_stride,
266 ref + ref_stride * i + j, ref_stride, &sse8x8[k],
270 vpx_get8x8var(src + src_stride * i + j, src_stride,
271 ref + ref_stride * i + j, ref_stride, &sse8x8[k],
276 var8x8[k] = sse8x8[k] - (uint32_t)(((int64_t)sum8x8[k] * sum8x8[k]) >> 6);
282 static void calculate_variance(int bw, int bh, TX_SIZE tx_size,
283 unsigned int *sse_i, int *sum_i,
284 unsigned int *var_o, unsigned int *sse_o,
286 const BLOCK_SIZE unit_size = txsize_to_bsize[tx_size];
287 const int nw = 1 << (bw - b_width_log2_lookup[unit_size]);
288 const int nh = 1 << (bh - b_height_log2_lookup[unit_size]);
291 for (i = 0; i < nh; i += 2) {
292 for (j = 0; j < nw; j += 2) {
293 sse_o[k] = sse_i[i * nw + j] + sse_i[i * nw + j + 1] +
294 sse_i[(i + 1) * nw + j] + sse_i[(i + 1) * nw + j + 1];
295 sum_o[k] = sum_i[i * nw + j] + sum_i[i * nw + j + 1] +
296 sum_i[(i + 1) * nw + j] + sum_i[(i + 1) * nw + j + 1];
297 var_o[k] = sse_o[k] - (uint32_t)(((int64_t)sum_o[k] * sum_o[k]) >>
298 (b_width_log2_lookup[unit_size] +
299 b_height_log2_lookup[unit_size] + 6));
305 // Adjust the ac_thr according to speed, width, height and normalized sum
306 static int ac_thr_factor(const int speed, const int width, const int height,
307 const int norm_sum) {
308 if (speed >= 8 && norm_sum < 5) {
309 if (width <= 640 && height <= 480)
317 static void model_rd_for_sb_y_large(VP9_COMP *cpi, BLOCK_SIZE bsize,
318 MACROBLOCK *x, MACROBLOCKD *xd,
319 int *out_rate_sum, int64_t *out_dist_sum,
320 unsigned int *var_y, unsigned int *sse_y,
321 int mi_row, int mi_col, int *early_term) {
322 // Note our transform coeffs are 8 times an orthogonal transform.
323 // Hence quantizer step is also 8 times. To get effective quantizer
324 // we need to divide by 8 before sending to modeling function.
328 struct macroblock_plane *const p = &x->plane[0];
329 struct macroblockd_plane *const pd = &xd->plane[0];
330 const uint32_t dc_quant = pd->dequant[0];
331 const uint32_t ac_quant = pd->dequant[1];
332 const int64_t dc_thr = dc_quant * dc_quant >> 6;
333 int64_t ac_thr = ac_quant * ac_quant >> 6;
338 const int bw = b_width_log2_lookup[bsize];
339 const int bh = b_height_log2_lookup[bsize];
340 const int num8x8 = 1 << (bw + bh - 2);
341 unsigned int sse8x8[64] = { 0 };
342 int sum8x8[64] = { 0 };
343 unsigned int var8x8[64] = { 0 };
346 #if CONFIG_VP9_HIGHBITDEPTH
347 const vpx_bit_depth_t bd = cpi->common.bit_depth;
349 // Calculate variance for whole partition, and also save 8x8 blocks' variance
350 // to be used in following transform skipping test.
351 block_variance(p->src.buf, p->src.stride, pd->dst.buf, pd->dst.stride,
352 4 << bw, 4 << bh, &sse, &sum, 8,
353 #if CONFIG_VP9_HIGHBITDEPTH
354 cpi->common.use_highbitdepth, bd,
356 sse8x8, sum8x8, var8x8);
357 var = sse - (unsigned int)(((int64_t)sum * sum) >> (bw + bh + 4));
362 #if CONFIG_VP9_TEMPORAL_DENOISING
363 if (cpi->oxcf.noise_sensitivity > 0 && denoise_svc(cpi) &&
365 ac_thr = vp9_scale_acskip_thresh(ac_thr, cpi->denoiser.denoising_level,
366 (abs(sum) >> (bw + bh)));
368 ac_thr *= ac_thr_factor(cpi->oxcf.speed, cpi->common.width,
369 cpi->common.height, abs(sum) >> (bw + bh));
371 ac_thr *= ac_thr_factor(cpi->oxcf.speed, cpi->common.width,
372 cpi->common.height, abs(sum) >> (bw + bh));
375 if (cpi->common.tx_mode == TX_MODE_SELECT) {
376 if (sse > (var << 2))
377 tx_size = VPXMIN(max_txsize_lookup[bsize],
378 tx_mode_to_biggest_tx_size[cpi->common.tx_mode]);
382 if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ &&
383 cyclic_refresh_segment_id_boosted(xd->mi[0]->segment_id))
385 else if (tx_size > TX_16X16)
388 tx_size = VPXMIN(max_txsize_lookup[bsize],
389 tx_mode_to_biggest_tx_size[cpi->common.tx_mode]);
392 assert(tx_size >= TX_8X8);
393 xd->mi[0]->tx_size = tx_size;
395 // Evaluate if the partition block is a skippable block in Y plane.
397 unsigned int sse16x16[16] = { 0 };
398 int sum16x16[16] = { 0 };
399 unsigned int var16x16[16] = { 0 };
400 const int num16x16 = num8x8 >> 2;
402 unsigned int sse32x32[4] = { 0 };
403 int sum32x32[4] = { 0 };
404 unsigned int var32x32[4] = { 0 };
405 const int num32x32 = num8x8 >> 4;
409 const int num = (tx_size == TX_8X8)
411 : ((tx_size == TX_16X16) ? num16x16 : num32x32);
412 const unsigned int *sse_tx =
413 (tx_size == TX_8X8) ? sse8x8
414 : ((tx_size == TX_16X16) ? sse16x16 : sse32x32);
415 const unsigned int *var_tx =
416 (tx_size == TX_8X8) ? var8x8
417 : ((tx_size == TX_16X16) ? var16x16 : var32x32);
419 // Calculate variance if tx_size > TX_8X8
420 if (tx_size >= TX_16X16)
421 calculate_variance(bw, bh, TX_8X8, sse8x8, sum8x8, var16x16, sse16x16,
423 if (tx_size == TX_32X32)
424 calculate_variance(bw, bh, TX_16X16, sse16x16, sum16x16, var32x32,
428 x->skip_txfm[0] = SKIP_TXFM_NONE;
429 for (k = 0; k < num; k++)
430 // Check if all ac coefficients can be quantized to zero.
431 if (!(var_tx[k] < ac_thr || var == 0)) {
436 for (k = 0; k < num; k++)
437 // Check if dc coefficient can be quantized to zero.
438 if (!(sse_tx[k] - var_tx[k] < dc_thr || sse == var)) {
444 x->skip_txfm[0] = SKIP_TXFM_AC_ONLY;
446 if (dc_test) x->skip_txfm[0] = SKIP_TXFM_AC_DC;
447 } else if (dc_test) {
452 if (x->skip_txfm[0] == SKIP_TXFM_AC_DC) {
453 int skip_uv[2] = { 0 };
454 unsigned int var_uv[2];
455 unsigned int sse_uv[2];
458 *out_dist_sum = sse << 4;
460 // Transform skipping test in UV planes.
461 for (i = 1; i <= 2; i++) {
462 if (cpi->oxcf.speed < 8 || x->color_sensitivity[i - 1]) {
463 struct macroblock_plane *const p = &x->plane[i];
464 struct macroblockd_plane *const pd = &xd->plane[i];
465 const TX_SIZE uv_tx_size = get_uv_tx_size(xd->mi[0], pd);
466 const BLOCK_SIZE unit_size = txsize_to_bsize[uv_tx_size];
467 const BLOCK_SIZE uv_bsize = get_plane_block_size(bsize, pd);
468 const int uv_bw = b_width_log2_lookup[uv_bsize];
469 const int uv_bh = b_height_log2_lookup[uv_bsize];
470 const int sf = (uv_bw - b_width_log2_lookup[unit_size]) +
471 (uv_bh - b_height_log2_lookup[unit_size]);
472 const uint32_t uv_dc_thr = pd->dequant[0] * pd->dequant[0] >> (6 - sf);
473 const uint32_t uv_ac_thr = pd->dequant[1] * pd->dequant[1] >> (6 - sf);
476 vp9_build_inter_predictors_sbp(xd, mi_row, mi_col, bsize, i);
477 var_uv[j] = cpi->fn_ptr[uv_bsize].vf(
478 p->src.buf, p->src.stride, pd->dst.buf, pd->dst.stride, &sse_uv[j]);
480 if ((var_uv[j] < uv_ac_thr || var_uv[j] == 0) &&
481 (sse_uv[j] - var_uv[j] < uv_dc_thr || sse_uv[j] == var_uv[j]))
490 // If the transform in YUV planes are skippable, the mode search checks
491 // fewer inter modes and doesn't check intra modes.
492 if (skip_uv[0] & skip_uv[1]) {
499 #if CONFIG_VP9_HIGHBITDEPTH
500 vp9_model_rd_from_var_lapndz(sse - var, num_pels_log2_lookup[bsize],
501 dc_quant >> (xd->bd - 5), &rate, &dist);
503 vp9_model_rd_from_var_lapndz(sse - var, num_pels_log2_lookup[bsize],
504 dc_quant >> 3, &rate, &dist);
505 #endif // CONFIG_VP9_HIGHBITDEPTH
509 *out_rate_sum = rate >> 1;
510 *out_dist_sum = dist << 3;
513 *out_dist_sum = (sse - var) << 4;
516 #if CONFIG_VP9_HIGHBITDEPTH
517 vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bsize],
518 ac_quant >> (xd->bd - 5), &rate, &dist);
520 vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bsize], ac_quant >> 3,
522 #endif // CONFIG_VP9_HIGHBITDEPTH
524 *out_rate_sum += rate;
525 *out_dist_sum += dist << 4;
528 static void model_rd_for_sb_y(VP9_COMP *cpi, BLOCK_SIZE bsize, MACROBLOCK *x,
529 MACROBLOCKD *xd, int *out_rate_sum,
530 int64_t *out_dist_sum, unsigned int *var_y,
531 unsigned int *sse_y) {
532 // Note our transform coeffs are 8 times an orthogonal transform.
533 // Hence quantizer step is also 8 times. To get effective quantizer
534 // we need to divide by 8 before sending to modeling function.
538 struct macroblock_plane *const p = &x->plane[0];
539 struct macroblockd_plane *const pd = &xd->plane[0];
540 const int64_t dc_thr = p->quant_thred[0] >> 6;
541 const int64_t ac_thr = p->quant_thred[1] >> 6;
542 const uint32_t dc_quant = pd->dequant[0];
543 const uint32_t ac_quant = pd->dequant[1];
544 unsigned int var = cpi->fn_ptr[bsize].vf(p->src.buf, p->src.stride,
545 pd->dst.buf, pd->dst.stride, &sse);
551 if (cpi->common.tx_mode == TX_MODE_SELECT) {
552 if (sse > (var << 2))
554 VPXMIN(max_txsize_lookup[bsize],
555 tx_mode_to_biggest_tx_size[cpi->common.tx_mode]);
557 xd->mi[0]->tx_size = TX_8X8;
559 if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ &&
560 cyclic_refresh_segment_id_boosted(xd->mi[0]->segment_id))
561 xd->mi[0]->tx_size = TX_8X8;
562 else if (xd->mi[0]->tx_size > TX_16X16)
563 xd->mi[0]->tx_size = TX_16X16;
566 VPXMIN(max_txsize_lookup[bsize],
567 tx_mode_to_biggest_tx_size[cpi->common.tx_mode]);
570 // Evaluate if the partition block is a skippable block in Y plane.
572 const BLOCK_SIZE unit_size = txsize_to_bsize[xd->mi[0]->tx_size];
573 const unsigned int num_blk_log2 =
574 (b_width_log2_lookup[bsize] - b_width_log2_lookup[unit_size]) +
575 (b_height_log2_lookup[bsize] - b_height_log2_lookup[unit_size]);
576 const unsigned int sse_tx = sse >> num_blk_log2;
577 const unsigned int var_tx = var >> num_blk_log2;
579 x->skip_txfm[0] = SKIP_TXFM_NONE;
580 // Check if all ac coefficients can be quantized to zero.
581 if (var_tx < ac_thr || var == 0) {
582 x->skip_txfm[0] = SKIP_TXFM_AC_ONLY;
583 // Check if dc coefficient can be quantized to zero.
584 if (sse_tx - var_tx < dc_thr || sse == var)
585 x->skip_txfm[0] = SKIP_TXFM_AC_DC;
587 if (sse_tx - var_tx < dc_thr || sse == var) skip_dc = 1;
591 if (x->skip_txfm[0] == SKIP_TXFM_AC_DC) {
593 *out_dist_sum = sse << 4;
598 #if CONFIG_VP9_HIGHBITDEPTH
599 vp9_model_rd_from_var_lapndz(sse - var, num_pels_log2_lookup[bsize],
600 dc_quant >> (xd->bd - 5), &rate, &dist);
602 vp9_model_rd_from_var_lapndz(sse - var, num_pels_log2_lookup[bsize],
603 dc_quant >> 3, &rate, &dist);
604 #endif // CONFIG_VP9_HIGHBITDEPTH
608 *out_rate_sum = rate >> 1;
609 *out_dist_sum = dist << 3;
612 *out_dist_sum = (sse - var) << 4;
615 #if CONFIG_VP9_HIGHBITDEPTH
616 vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bsize],
617 ac_quant >> (xd->bd - 5), &rate, &dist);
619 vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bsize], ac_quant >> 3,
621 #endif // CONFIG_VP9_HIGHBITDEPTH
623 *out_rate_sum += rate;
624 *out_dist_sum += dist << 4;
627 static void block_yrd(VP9_COMP *cpi, MACROBLOCK *x, RD_COST *this_rdc,
628 int *skippable, int64_t *sse, BLOCK_SIZE bsize,
629 TX_SIZE tx_size, int rd_computed) {
630 MACROBLOCKD *xd = &x->e_mbd;
631 const struct macroblockd_plane *pd = &xd->plane[0];
632 struct macroblock_plane *const p = &x->plane[0];
633 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
634 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
635 const int step = 1 << (tx_size << 1);
636 const int block_step = (1 << tx_size);
638 const int max_blocks_wide =
639 num_4x4_w + (xd->mb_to_right_edge >= 0 ? 0 : xd->mb_to_right_edge >> 5);
640 const int max_blocks_high =
641 num_4x4_h + (xd->mb_to_bottom_edge >= 0 ? 0 : xd->mb_to_bottom_edge >> 5);
643 const int bw = 4 * num_4x4_w;
644 const int bh = 4 * num_4x4_h;
646 #if CONFIG_VP9_HIGHBITDEPTH
647 // TODO(jingning): Implement the high bit-depth Hadamard transforms and
648 // remove this check condition.
649 // TODO(marpan): Use this path (model_rd) for 8bit under certain conditions
650 // for now, as the vp9_quantize_fp below for highbitdepth build is slow.
652 (cpi->oxcf.speed > 5 && cpi->common.frame_type != KEY_FRAME &&
653 bsize < BLOCK_32X32)) {
654 unsigned int var_y, sse_y;
657 model_rd_for_sb_y(cpi, bsize, x, xd, &this_rdc->rate, &this_rdc->dist,
665 if (cpi->sf.use_simple_block_yrd && cpi->common.frame_type != KEY_FRAME &&
666 bsize < BLOCK_32X32) {
667 unsigned int var_y, sse_y;
670 model_rd_for_sb_y(cpi, bsize, x, xd, &this_rdc->rate, &this_rdc->dist,
679 // The max tx_size passed in is TX_16X16.
680 assert(tx_size != TX_32X32);
682 vpx_subtract_block(bh, bw, p->src_diff, bw, p->src.buf, p->src.stride,
683 pd->dst.buf, pd->dst.stride);
685 // Keep track of the row and column of the blocks we use so that we know
686 // if we are in the unrestricted motion border.
687 for (r = 0; r < max_blocks_high; r += block_step) {
688 for (c = 0; c < num_4x4_w; c += block_step) {
689 if (c < max_blocks_wide) {
690 const scan_order *const scan_order = &vp9_default_scan_orders[tx_size];
691 tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block);
692 tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
693 tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
694 uint16_t *const eob = &p->eobs[block];
695 const int diff_stride = bw;
696 const int16_t *src_diff;
697 src_diff = &p->src_diff[(r * diff_stride + c) << 2];
701 vpx_hadamard_16x16(src_diff, diff_stride, coeff);
702 vp9_quantize_fp(coeff, 256, x->skip_block, p->round_fp, p->quant_fp,
703 qcoeff, dqcoeff, pd->dequant, eob, scan_order->scan,
707 vpx_hadamard_8x8(src_diff, diff_stride, coeff);
708 vp9_quantize_fp(coeff, 64, x->skip_block, p->round_fp, p->quant_fp,
709 qcoeff, dqcoeff, pd->dequant, eob, scan_order->scan,
713 x->fwd_txm4x4(src_diff, coeff, diff_stride);
714 vp9_quantize_fp(coeff, 16, x->skip_block, p->round_fp, p->quant_fp,
715 qcoeff, dqcoeff, pd->dequant, eob, scan_order->scan,
718 default: assert(0); break;
720 *skippable &= (*eob == 0);
728 if (*sse < INT64_MAX) {
729 *sse = (*sse << 6) >> 2;
731 this_rdc->dist = *sse;
738 for (r = 0; r < max_blocks_high; r += block_step) {
739 for (c = 0; c < num_4x4_w; c += block_step) {
740 if (c < max_blocks_wide) {
741 tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block);
742 tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
743 tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
744 uint16_t *const eob = &p->eobs[block];
747 this_rdc->rate += (int)abs(qcoeff[0]);
749 this_rdc->rate += vpx_satd(qcoeff, step << 4);
751 this_rdc->dist += vp9_block_error_fp(coeff, dqcoeff, step << 4) >> 2;
757 // If skippable is set, rate gets clobbered later.
758 this_rdc->rate <<= (2 + VP9_PROB_COST_SHIFT);
759 this_rdc->rate += (eob_cost << VP9_PROB_COST_SHIFT);
762 static void model_rd_for_sb_uv(VP9_COMP *cpi, BLOCK_SIZE plane_bsize,
763 MACROBLOCK *x, MACROBLOCKD *xd,
764 RD_COST *this_rdc, unsigned int *var_y,
765 unsigned int *sse_y, int start_plane,
767 // Note our transform coeffs are 8 times an orthogonal transform.
768 // Hence quantizer step is also 8 times. To get effective quantizer
769 // we need to divide by 8 before sending to modeling function.
774 #if CONFIG_VP9_HIGHBITDEPTH
775 uint64_t tot_var = *var_y;
776 uint64_t tot_sse = *sse_y;
778 uint32_t tot_var = *var_y;
779 uint32_t tot_sse = *sse_y;
785 for (i = start_plane; i <= stop_plane; ++i) {
786 struct macroblock_plane *const p = &x->plane[i];
787 struct macroblockd_plane *const pd = &xd->plane[i];
788 const uint32_t dc_quant = pd->dequant[0];
789 const uint32_t ac_quant = pd->dequant[1];
790 const BLOCK_SIZE bs = plane_bsize;
792 if (!x->color_sensitivity[i - 1]) continue;
794 var = cpi->fn_ptr[bs].vf(p->src.buf, p->src.stride, pd->dst.buf,
795 pd->dst.stride, &sse);
800 #if CONFIG_VP9_HIGHBITDEPTH
801 vp9_model_rd_from_var_lapndz(sse - var, num_pels_log2_lookup[bs],
802 dc_quant >> (xd->bd - 5), &rate, &dist);
804 vp9_model_rd_from_var_lapndz(sse - var, num_pels_log2_lookup[bs],
805 dc_quant >> 3, &rate, &dist);
806 #endif // CONFIG_VP9_HIGHBITDEPTH
808 this_rdc->rate += rate >> 1;
809 this_rdc->dist += dist << 3;
811 #if CONFIG_VP9_HIGHBITDEPTH
812 vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bs],
813 ac_quant >> (xd->bd - 5), &rate, &dist);
815 vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bs], ac_quant >> 3,
817 #endif // CONFIG_VP9_HIGHBITDEPTH
819 this_rdc->rate += rate;
820 this_rdc->dist += dist << 4;
823 #if CONFIG_VP9_HIGHBITDEPTH
824 *var_y = tot_var > UINT32_MAX ? UINT32_MAX : (uint32_t)tot_var;
825 *sse_y = tot_sse > UINT32_MAX ? UINT32_MAX : (uint32_t)tot_sse;
832 static int get_pred_buffer(PRED_BUFFER *p, int len) {
835 for (i = 0; i < len; i++) {
844 static void free_pred_buffer(PRED_BUFFER *p) {
845 if (p != NULL) p->in_use = 0;
848 static void encode_breakout_test(VP9_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
849 int mi_row, int mi_col,
850 MV_REFERENCE_FRAME ref_frame,
851 PREDICTION_MODE this_mode, unsigned int var_y,
853 struct buf_2d yv12_mb[][MAX_MB_PLANE],
854 int *rate, int64_t *dist) {
855 MACROBLOCKD *xd = &x->e_mbd;
856 MODE_INFO *const mi = xd->mi[0];
857 const BLOCK_SIZE uv_size = get_plane_block_size(bsize, &xd->plane[1]);
858 unsigned int var = var_y, sse = sse_y;
859 // Skipping threshold for ac.
860 unsigned int thresh_ac;
861 // Skipping threshold for dc.
862 unsigned int thresh_dc;
864 if (mi->mv[0].as_mv.row > 64 || mi->mv[0].as_mv.row < -64 ||
865 mi->mv[0].as_mv.col > 64 || mi->mv[0].as_mv.col < -64)
867 if (x->encode_breakout > 0 && motion_low == 1) {
868 // Set a maximum for threshold to avoid big PSNR loss in low bit rate
869 // case. Use extreme low threshold for static frames to limit
871 const unsigned int max_thresh = 36000;
872 // The encode_breakout input
873 const unsigned int min_thresh =
874 VPXMIN(((unsigned int)x->encode_breakout << 4), max_thresh);
875 #if CONFIG_VP9_HIGHBITDEPTH
876 const int shift = (xd->bd << 1) - 16;
879 // Calculate threshold according to dequant value.
880 thresh_ac = (xd->plane[0].dequant[1] * xd->plane[0].dequant[1]) >> 3;
881 #if CONFIG_VP9_HIGHBITDEPTH
882 if ((xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) && shift > 0) {
883 thresh_ac = ROUND_POWER_OF_TWO(thresh_ac, shift);
885 #endif // CONFIG_VP9_HIGHBITDEPTH
886 thresh_ac = clamp(thresh_ac, min_thresh, max_thresh);
888 // Adjust ac threshold according to partition size.
890 8 - (b_width_log2_lookup[bsize] + b_height_log2_lookup[bsize]);
892 thresh_dc = (xd->plane[0].dequant[0] * xd->plane[0].dequant[0] >> 6);
893 #if CONFIG_VP9_HIGHBITDEPTH
894 if ((xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) && shift > 0) {
895 thresh_dc = ROUND_POWER_OF_TWO(thresh_dc, shift);
897 #endif // CONFIG_VP9_HIGHBITDEPTH
903 // Y skipping condition checking for ac and dc.
904 if (var <= thresh_ac && (sse - var) <= thresh_dc) {
905 unsigned int sse_u, sse_v;
906 unsigned int var_u, var_v;
907 unsigned int thresh_ac_uv = thresh_ac;
908 unsigned int thresh_dc_uv = thresh_dc;
914 // Skip UV prediction unless breakout is zero (lossless) to save
915 // computation with low impact on the result
916 if (x->encode_breakout == 0) {
917 xd->plane[1].pre[0] = yv12_mb[ref_frame][1];
918 xd->plane[2].pre[0] = yv12_mb[ref_frame][2];
919 vp9_build_inter_predictors_sbuv(xd, mi_row, mi_col, bsize);
922 var_u = cpi->fn_ptr[uv_size].vf(x->plane[1].src.buf, x->plane[1].src.stride,
923 xd->plane[1].dst.buf,
924 xd->plane[1].dst.stride, &sse_u);
926 // U skipping condition checking
927 if (((var_u << 2) <= thresh_ac_uv) && (sse_u - var_u <= thresh_dc_uv)) {
928 var_v = cpi->fn_ptr[uv_size].vf(
929 x->plane[2].src.buf, x->plane[2].src.stride, xd->plane[2].dst.buf,
930 xd->plane[2].dst.stride, &sse_v);
932 // V skipping condition checking
933 if (((var_v << 2) <= thresh_ac_uv) && (sse_v - var_v <= thresh_dc_uv)) {
936 // The cost of skip bit needs to be added.
937 *rate = cpi->inter_mode_cost[x->mbmi_ext->mode_context[ref_frame]]
938 [INTER_OFFSET(this_mode)];
940 // More on this part of rate
941 // rate += vp9_cost_bit(vp9_get_skip_prob(cm, xd), 1);
943 // Scaling factor for SSE from spatial domain to frequency
944 // domain is 16. Adjust distortion accordingly.
945 // TODO(yunqingwang): In this function, only y-plane dist is
947 *dist = (sse << 4); // + ((sse_u + sse_v) << 4);
949 // *disable_skip = 1;
955 struct estimate_block_intra_args {
958 PREDICTION_MODE mode;
963 static void estimate_block_intra(int plane, int block, int row, int col,
964 BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
966 struct estimate_block_intra_args *const args = arg;
967 VP9_COMP *const cpi = args->cpi;
968 MACROBLOCK *const x = args->x;
969 MACROBLOCKD *const xd = &x->e_mbd;
970 struct macroblock_plane *const p = &x->plane[0];
971 struct macroblockd_plane *const pd = &xd->plane[0];
972 const BLOCK_SIZE bsize_tx = txsize_to_bsize[tx_size];
973 uint8_t *const src_buf_base = p->src.buf;
974 uint8_t *const dst_buf_base = pd->dst.buf;
975 const int src_stride = p->src.stride;
976 const int dst_stride = pd->dst.stride;
981 p->src.buf = &src_buf_base[4 * (row * src_stride + col)];
982 pd->dst.buf = &dst_buf_base[4 * (row * dst_stride + col)];
983 // Use source buffer as an approximation for the fully reconstructed buffer.
984 vp9_predict_intra_block(xd, b_width_log2_lookup[plane_bsize], tx_size,
985 args->mode, x->skip_encode ? p->src.buf : pd->dst.buf,
986 x->skip_encode ? src_stride : dst_stride, pd->dst.buf,
987 dst_stride, col, row, plane);
990 int64_t this_sse = INT64_MAX;
991 // TODO(jingning): This needs further refactoring.
992 block_yrd(cpi, x, &this_rdc, &args->skippable, &this_sse, bsize_tx,
993 VPXMIN(tx_size, TX_16X16), 0);
995 unsigned int var = 0;
996 unsigned int sse = 0;
997 model_rd_for_sb_uv(cpi, plane_bsize, x, xd, &this_rdc, &var, &sse, plane,
1001 p->src.buf = src_buf_base;
1002 pd->dst.buf = dst_buf_base;
1003 args->rdc->rate += this_rdc.rate;
1004 args->rdc->dist += this_rdc.dist;
1007 static const THR_MODES mode_idx[MAX_REF_FRAMES][4] = {
1008 { THR_DC, THR_V_PRED, THR_H_PRED, THR_TM },
1009 { THR_NEARESTMV, THR_NEARMV, THR_ZEROMV, THR_NEWMV },
1010 { THR_NEARESTG, THR_NEARG, THR_ZEROG, THR_NEWG },
1011 { THR_NEARESTA, THR_NEARA, THR_ZEROA, THR_NEWA },
1014 static const PREDICTION_MODE intra_mode_list[] = { DC_PRED, V_PRED, H_PRED,
1017 static int mode_offset(const PREDICTION_MODE mode) {
1018 if (mode >= NEARESTMV) {
1019 return INTER_OFFSET(mode);
1022 case DC_PRED: return 0;
1023 case V_PRED: return 1;
1024 case H_PRED: return 2;
1025 case TM_PRED: return 3;
1031 static INLINE int rd_less_than_thresh_row_mt(int64_t best_rd, int thresh,
1032 const int *const thresh_fact) {
1033 int is_rd_less_than_thresh;
1034 is_rd_less_than_thresh =
1035 best_rd < ((int64_t)thresh * (*thresh_fact) >> 5) || thresh == INT_MAX;
1036 return is_rd_less_than_thresh;
1039 static INLINE void update_thresh_freq_fact_row_mt(
1040 VP9_COMP *cpi, TileDataEnc *tile_data, int source_variance,
1041 int thresh_freq_fact_idx, MV_REFERENCE_FRAME ref_frame,
1042 THR_MODES best_mode_idx, PREDICTION_MODE mode) {
1043 THR_MODES thr_mode_idx = mode_idx[ref_frame][mode_offset(mode)];
1044 int freq_fact_idx = thresh_freq_fact_idx + thr_mode_idx;
1045 int *freq_fact = &tile_data->row_base_thresh_freq_fact[freq_fact_idx];
1046 if (thr_mode_idx == best_mode_idx)
1047 *freq_fact -= (*freq_fact >> 4);
1048 else if (cpi->sf.limit_newmv_early_exit && mode == NEWMV &&
1049 ref_frame == LAST_FRAME && source_variance < 5) {
1050 *freq_fact = VPXMIN(*freq_fact + RD_THRESH_INC, 32);
1052 *freq_fact = VPXMIN(*freq_fact + RD_THRESH_INC,
1053 cpi->sf.adaptive_rd_thresh * RD_THRESH_MAX_FACT);
1057 static INLINE void update_thresh_freq_fact(
1058 VP9_COMP *cpi, TileDataEnc *tile_data, int source_variance,
1059 BLOCK_SIZE bsize, MV_REFERENCE_FRAME ref_frame, THR_MODES best_mode_idx,
1060 PREDICTION_MODE mode) {
1061 THR_MODES thr_mode_idx = mode_idx[ref_frame][mode_offset(mode)];
1062 int *freq_fact = &tile_data->thresh_freq_fact[bsize][thr_mode_idx];
1063 if (thr_mode_idx == best_mode_idx)
1064 *freq_fact -= (*freq_fact >> 4);
1065 else if (cpi->sf.limit_newmv_early_exit && mode == NEWMV &&
1066 ref_frame == LAST_FRAME && source_variance < 5) {
1067 *freq_fact = VPXMIN(*freq_fact + RD_THRESH_INC, 32);
1069 *freq_fact = VPXMIN(*freq_fact + RD_THRESH_INC,
1070 cpi->sf.adaptive_rd_thresh * RD_THRESH_MAX_FACT);
1074 void vp9_pick_intra_mode(VP9_COMP *cpi, MACROBLOCK *x, RD_COST *rd_cost,
1075 BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx) {
1076 MACROBLOCKD *const xd = &x->e_mbd;
1077 MODE_INFO *const mi = xd->mi[0];
1078 RD_COST this_rdc, best_rdc;
1079 PREDICTION_MODE this_mode;
1080 struct estimate_block_intra_args args = { cpi, x, DC_PRED, 1, 0 };
1081 const TX_SIZE intra_tx_size =
1082 VPXMIN(max_txsize_lookup[bsize],
1083 tx_mode_to_biggest_tx_size[cpi->common.tx_mode]);
1084 MODE_INFO *const mic = xd->mi[0];
1086 const MODE_INFO *above_mi = xd->above_mi;
1087 const MODE_INFO *left_mi = xd->left_mi;
1088 const PREDICTION_MODE A = vp9_above_block_mode(mic, above_mi, 0);
1089 const PREDICTION_MODE L = vp9_left_block_mode(mic, left_mi, 0);
1090 bmode_costs = cpi->y_mode_costs[A][L];
1093 vp9_rd_cost_reset(&best_rdc);
1094 vp9_rd_cost_reset(&this_rdc);
1096 mi->ref_frame[0] = INTRA_FRAME;
1097 // Initialize interp_filter here so we do not have to check for inter block
1098 // modes in get_pred_context_switchable_interp()
1099 mi->interp_filter = SWITCHABLE_FILTERS;
1101 mi->mv[0].as_int = INVALID_MV;
1102 mi->uv_mode = DC_PRED;
1103 memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
1105 // Change the limit of this loop to add other intra prediction
1107 for (this_mode = DC_PRED; this_mode <= H_PRED; ++this_mode) {
1108 this_rdc.dist = this_rdc.rate = 0;
1109 args.mode = this_mode;
1111 args.rdc = &this_rdc;
1112 mi->tx_size = intra_tx_size;
1113 vp9_foreach_transformed_block_in_plane(xd, bsize, 0, estimate_block_intra,
1115 if (args.skippable) {
1116 x->skip_txfm[0] = SKIP_TXFM_AC_DC;
1117 this_rdc.rate = vp9_cost_bit(vp9_get_skip_prob(&cpi->common, xd), 1);
1119 x->skip_txfm[0] = SKIP_TXFM_NONE;
1120 this_rdc.rate += vp9_cost_bit(vp9_get_skip_prob(&cpi->common, xd), 0);
1122 this_rdc.rate += bmode_costs[this_mode];
1123 this_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, this_rdc.rate, this_rdc.dist);
1125 if (this_rdc.rdcost < best_rdc.rdcost) {
1126 best_rdc = this_rdc;
1127 mi->mode = this_mode;
1131 *rd_cost = best_rdc;
1134 static void init_ref_frame_cost(VP9_COMMON *const cm, MACROBLOCKD *const xd,
1135 int ref_frame_cost[MAX_REF_FRAMES]) {
1136 vpx_prob intra_inter_p = vp9_get_intra_inter_prob(cm, xd);
1137 vpx_prob ref_single_p1 = vp9_get_pred_prob_single_ref_p1(cm, xd);
1138 vpx_prob ref_single_p2 = vp9_get_pred_prob_single_ref_p2(cm, xd);
1140 ref_frame_cost[INTRA_FRAME] = vp9_cost_bit(intra_inter_p, 0);
1141 ref_frame_cost[LAST_FRAME] = ref_frame_cost[GOLDEN_FRAME] =
1142 ref_frame_cost[ALTREF_FRAME] = vp9_cost_bit(intra_inter_p, 1);
1144 ref_frame_cost[LAST_FRAME] += vp9_cost_bit(ref_single_p1, 0);
1145 ref_frame_cost[GOLDEN_FRAME] += vp9_cost_bit(ref_single_p1, 1);
1146 ref_frame_cost[ALTREF_FRAME] += vp9_cost_bit(ref_single_p1, 1);
1147 ref_frame_cost[GOLDEN_FRAME] += vp9_cost_bit(ref_single_p2, 0);
1148 ref_frame_cost[ALTREF_FRAME] += vp9_cost_bit(ref_single_p2, 1);
1152 MV_REFERENCE_FRAME ref_frame;
1153 PREDICTION_MODE pred_mode;
1156 #define RT_INTER_MODES 12
1157 static const REF_MODE ref_mode_set[RT_INTER_MODES] = {
1158 { LAST_FRAME, ZEROMV }, { LAST_FRAME, NEARESTMV },
1159 { GOLDEN_FRAME, ZEROMV }, { LAST_FRAME, NEARMV },
1160 { LAST_FRAME, NEWMV }, { GOLDEN_FRAME, NEARESTMV },
1161 { GOLDEN_FRAME, NEARMV }, { GOLDEN_FRAME, NEWMV },
1162 { ALTREF_FRAME, ZEROMV }, { ALTREF_FRAME, NEARESTMV },
1163 { ALTREF_FRAME, NEARMV }, { ALTREF_FRAME, NEWMV }
1165 static const REF_MODE ref_mode_set_svc[RT_INTER_MODES] = {
1166 { LAST_FRAME, ZEROMV }, { GOLDEN_FRAME, ZEROMV },
1167 { LAST_FRAME, NEARESTMV }, { LAST_FRAME, NEARMV },
1168 { GOLDEN_FRAME, NEARESTMV }, { GOLDEN_FRAME, NEARMV },
1169 { LAST_FRAME, NEWMV }, { GOLDEN_FRAME, NEWMV }
1172 static int set_intra_cost_penalty(const VP9_COMP *const cpi, BLOCK_SIZE bsize) {
1173 const VP9_COMMON *const cm = &cpi->common;
1174 // Reduce the intra cost penalty for small blocks (<=16x16).
1176 (bsize <= BLOCK_16X16) ? ((bsize <= BLOCK_8X8) ? 4 : 2) : 0;
1177 if (cpi->noise_estimate.enabled && cpi->noise_estimate.level == kHigh)
1178 // Don't reduce intra cost penalty if estimated noise level is high.
1180 return vp9_get_intra_cost_penalty(cm->base_qindex, cm->y_dc_delta_q,
1185 static INLINE void find_predictors(
1186 VP9_COMP *cpi, MACROBLOCK *x, MV_REFERENCE_FRAME ref_frame,
1187 int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES],
1188 int const_motion[MAX_REF_FRAMES], int *ref_frame_skip_mask,
1189 const int flag_list[4], TileDataEnc *tile_data, int mi_row, int mi_col,
1190 struct buf_2d yv12_mb[4][MAX_MB_PLANE], BLOCK_SIZE bsize,
1191 int force_skip_low_temp_var) {
1192 VP9_COMMON *const cm = &cpi->common;
1193 MACROBLOCKD *const xd = &x->e_mbd;
1194 const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, ref_frame);
1195 TileInfo *const tile_info = &tile_data->tile_info;
1196 // TODO(jingning) placeholder for inter-frame non-RD mode decision.
1197 x->pred_mv_sad[ref_frame] = INT_MAX;
1198 frame_mv[NEWMV][ref_frame].as_int = INVALID_MV;
1199 frame_mv[ZEROMV][ref_frame].as_int = 0;
1200 // this needs various further optimizations. to be continued..
1201 if ((cpi->ref_frame_flags & flag_list[ref_frame]) && (yv12 != NULL)) {
1202 int_mv *const candidates = x->mbmi_ext->ref_mvs[ref_frame];
1203 const struct scale_factors *const sf = &cm->frame_refs[ref_frame - 1].sf;
1204 vp9_setup_pred_block(xd, yv12_mb[ref_frame], yv12, mi_row, mi_col, sf, sf);
1205 if (cm->use_prev_frame_mvs) {
1206 vp9_find_mv_refs(cm, xd, xd->mi[0], ref_frame, candidates, mi_row, mi_col,
1207 x->mbmi_ext->mode_context);
1209 const_motion[ref_frame] =
1210 mv_refs_rt(cpi, cm, x, xd, tile_info, xd->mi[0], ref_frame,
1211 candidates, &frame_mv[NEWMV][ref_frame], mi_row, mi_col,
1212 (int)(cpi->svc.use_base_mv && cpi->svc.spatial_layer_id));
1214 vp9_find_best_ref_mvs(xd, cm->allow_high_precision_mv, candidates,
1215 &frame_mv[NEARESTMV][ref_frame],
1216 &frame_mv[NEARMV][ref_frame]);
1217 // Early exit for golden frame if force_skip_low_temp_var is set.
1218 if (!vp9_is_scaled(sf) && bsize >= BLOCK_8X8 &&
1219 !(force_skip_low_temp_var && ref_frame == GOLDEN_FRAME)) {
1220 vp9_mv_pred(cpi, x, yv12_mb[ref_frame][0].buf, yv12->y_stride, ref_frame,
1224 *ref_frame_skip_mask |= (1 << ref_frame);
1228 static void vp9_NEWMV_diff_bias(const NOISE_ESTIMATE *ne, MACROBLOCKD *xd,
1229 PREDICTION_MODE this_mode, RD_COST *this_rdc,
1230 BLOCK_SIZE bsize, int mv_row, int mv_col,
1231 int is_last_frame, int lowvar_highsumdiff,
1233 // Bias against MVs associated with NEWMV mode that are very different from
1234 // top/left neighbors.
1235 if (this_mode == NEWMV) {
1236 int al_mv_average_row;
1237 int al_mv_average_col;
1238 int left_row, left_col;
1239 int row_diff, col_diff;
1240 int above_mv_valid = 0;
1241 int left_mv_valid = 0;
1246 above_mv_valid = xd->above_mi->mv[0].as_int != INVALID_MV;
1247 above_row = xd->above_mi->mv[0].as_mv.row;
1248 above_col = xd->above_mi->mv[0].as_mv.col;
1251 left_mv_valid = xd->left_mi->mv[0].as_int != INVALID_MV;
1252 left_row = xd->left_mi->mv[0].as_mv.row;
1253 left_col = xd->left_mi->mv[0].as_mv.col;
1255 if (above_mv_valid && left_mv_valid) {
1256 al_mv_average_row = (above_row + left_row + 1) >> 1;
1257 al_mv_average_col = (above_col + left_col + 1) >> 1;
1258 } else if (above_mv_valid) {
1259 al_mv_average_row = above_row;
1260 al_mv_average_col = above_col;
1261 } else if (left_mv_valid) {
1262 al_mv_average_row = left_row;
1263 al_mv_average_col = left_col;
1265 al_mv_average_row = al_mv_average_col = 0;
1267 row_diff = (al_mv_average_row - mv_row);
1268 col_diff = (al_mv_average_col - mv_col);
1269 if (row_diff > 48 || row_diff < -48 || col_diff > 48 || col_diff < -48) {
1270 if (bsize > BLOCK_32X32)
1271 this_rdc->rdcost = this_rdc->rdcost << 1;
1273 this_rdc->rdcost = 3 * this_rdc->rdcost >> 1;
1276 // If noise estimation is enabled, and estimated level is above threshold,
1277 // add a bias to LAST reference with small motion, for large blocks.
1278 if (ne->enabled && ne->level >= kMedium && bsize >= BLOCK_32X32 &&
1279 is_last_frame && mv_row < 8 && mv_row > -8 && mv_col < 8 && mv_col > -8)
1280 this_rdc->rdcost = 7 * (this_rdc->rdcost >> 3);
1281 else if (lowvar_highsumdiff && !is_skin && bsize >= BLOCK_16X16 &&
1282 is_last_frame && mv_row < 16 && mv_row > -16 && mv_col < 16 &&
1284 this_rdc->rdcost = 7 * (this_rdc->rdcost >> 3);
1287 #if CONFIG_VP9_TEMPORAL_DENOISING
1288 static void vp9_pickmode_ctx_den_update(
1289 VP9_PICKMODE_CTX_DEN *ctx_den, int64_t zero_last_cost_orig,
1290 int ref_frame_cost[MAX_REF_FRAMES],
1291 int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES], int reuse_inter_pred,
1292 TX_SIZE best_tx_size, PREDICTION_MODE best_mode,
1293 MV_REFERENCE_FRAME best_ref_frame, INTERP_FILTER best_pred_filter,
1294 uint8_t best_mode_skip_txfm) {
1295 ctx_den->zero_last_cost_orig = zero_last_cost_orig;
1296 ctx_den->ref_frame_cost = ref_frame_cost;
1297 ctx_den->frame_mv = frame_mv;
1298 ctx_den->reuse_inter_pred = reuse_inter_pred;
1299 ctx_den->best_tx_size = best_tx_size;
1300 ctx_den->best_mode = best_mode;
1301 ctx_den->best_ref_frame = best_ref_frame;
1302 ctx_den->best_pred_filter = best_pred_filter;
1303 ctx_den->best_mode_skip_txfm = best_mode_skip_txfm;
1306 static void recheck_zeromv_after_denoising(
1307 VP9_COMP *cpi, MODE_INFO *const mi, MACROBLOCK *x, MACROBLOCKD *const xd,
1308 VP9_DENOISER_DECISION decision, VP9_PICKMODE_CTX_DEN *ctx_den,
1309 struct buf_2d yv12_mb[4][MAX_MB_PLANE], RD_COST *best_rdc, BLOCK_SIZE bsize,
1310 int mi_row, int mi_col) {
1311 // If INTRA or GOLDEN reference was selected, re-evaluate ZEROMV on
1312 // denoised result. Only do this under noise conditions, and if rdcost of
1313 // ZEROMV onoriginal source is not significantly higher than rdcost of best
1315 if (cpi->noise_estimate.enabled && cpi->noise_estimate.level > kLow &&
1316 ctx_den->zero_last_cost_orig < (best_rdc->rdcost << 3) &&
1317 ((ctx_den->best_ref_frame == INTRA_FRAME && decision >= FILTER_BLOCK) ||
1318 (ctx_den->best_ref_frame == GOLDEN_FRAME &&
1319 cpi->svc.number_spatial_layers == 1 &&
1320 decision == FILTER_ZEROMV_BLOCK))) {
1321 // Check if we should pick ZEROMV on denoised signal.
1324 uint32_t var_y = UINT_MAX;
1325 uint32_t sse_y = UINT_MAX;
1328 mi->ref_frame[0] = LAST_FRAME;
1329 mi->ref_frame[1] = NONE;
1330 mi->mv[0].as_int = 0;
1331 mi->interp_filter = EIGHTTAP;
1332 xd->plane[0].pre[0] = yv12_mb[LAST_FRAME][0];
1333 vp9_build_inter_predictors_sby(xd, mi_row, mi_col, bsize);
1334 model_rd_for_sb_y(cpi, bsize, x, xd, &rate, &dist, &var_y, &sse_y);
1335 this_rdc.rate = rate + ctx_den->ref_frame_cost[LAST_FRAME] +
1336 cpi->inter_mode_cost[x->mbmi_ext->mode_context[LAST_FRAME]]
1337 [INTER_OFFSET(ZEROMV)];
1338 this_rdc.dist = dist;
1339 this_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, rate, dist);
1340 // Don't switch to ZEROMV if the rdcost for ZEROMV on denoised source
1341 // is higher than best_ref mode (on original source).
1342 if (this_rdc.rdcost > best_rdc->rdcost) {
1343 this_rdc = *best_rdc;
1344 mi->mode = ctx_den->best_mode;
1345 mi->ref_frame[0] = ctx_den->best_ref_frame;
1346 mi->interp_filter = ctx_den->best_pred_filter;
1347 if (ctx_den->best_ref_frame == INTRA_FRAME) {
1348 mi->mv[0].as_int = INVALID_MV;
1349 mi->interp_filter = SWITCHABLE_FILTERS;
1350 } else if (ctx_den->best_ref_frame == GOLDEN_FRAME) {
1352 ctx_den->frame_mv[ctx_den->best_mode][ctx_den->best_ref_frame]
1354 if (ctx_den->reuse_inter_pred) {
1355 xd->plane[0].pre[0] = yv12_mb[GOLDEN_FRAME][0];
1356 vp9_build_inter_predictors_sby(xd, mi_row, mi_col, bsize);
1359 mi->tx_size = ctx_den->best_tx_size;
1360 x->skip_txfm[0] = ctx_den->best_mode_skip_txfm;
1362 ctx_den->best_ref_frame = LAST_FRAME;
1363 *best_rdc = this_rdc;
1367 #endif // CONFIG_VP9_TEMPORAL_DENOISING
1369 static INLINE int get_force_skip_low_temp_var(uint8_t *variance_low, int mi_row,
1370 int mi_col, BLOCK_SIZE bsize) {
1371 const int i = (mi_row & 0x7) >> 1;
1372 const int j = (mi_col & 0x7) >> 1;
1373 int force_skip_low_temp_var = 0;
1374 // Set force_skip_low_temp_var based on the block size and block offset.
1375 if (bsize == BLOCK_64X64) {
1376 force_skip_low_temp_var = variance_low[0];
1377 } else if (bsize == BLOCK_64X32) {
1378 if (!(mi_col & 0x7) && !(mi_row & 0x7)) {
1379 force_skip_low_temp_var = variance_low[1];
1380 } else if (!(mi_col & 0x7) && (mi_row & 0x7)) {
1381 force_skip_low_temp_var = variance_low[2];
1383 } else if (bsize == BLOCK_32X64) {
1384 if (!(mi_col & 0x7) && !(mi_row & 0x7)) {
1385 force_skip_low_temp_var = variance_low[3];
1386 } else if ((mi_col & 0x7) && !(mi_row & 0x7)) {
1387 force_skip_low_temp_var = variance_low[4];
1389 } else if (bsize == BLOCK_32X32) {
1390 if (!(mi_col & 0x7) && !(mi_row & 0x7)) {
1391 force_skip_low_temp_var = variance_low[5];
1392 } else if ((mi_col & 0x7) && !(mi_row & 0x7)) {
1393 force_skip_low_temp_var = variance_low[6];
1394 } else if (!(mi_col & 0x7) && (mi_row & 0x7)) {
1395 force_skip_low_temp_var = variance_low[7];
1396 } else if ((mi_col & 0x7) && (mi_row & 0x7)) {
1397 force_skip_low_temp_var = variance_low[8];
1399 } else if (bsize == BLOCK_16X16) {
1400 force_skip_low_temp_var = variance_low[pos_shift_16x16[i][j]];
1401 } else if (bsize == BLOCK_32X16) {
1402 // The col shift index for the second 16x16 block.
1403 const int j2 = ((mi_col + 2) & 0x7) >> 1;
1404 // Only if each 16x16 block inside has low temporal variance.
1405 force_skip_low_temp_var = variance_low[pos_shift_16x16[i][j]] &&
1406 variance_low[pos_shift_16x16[i][j2]];
1407 } else if (bsize == BLOCK_16X32) {
1408 // The row shift index for the second 16x16 block.
1409 const int i2 = ((mi_row + 2) & 0x7) >> 1;
1410 force_skip_low_temp_var = variance_low[pos_shift_16x16[i][j]] &&
1411 variance_low[pos_shift_16x16[i2][j]];
1413 return force_skip_low_temp_var;
1416 void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, TileDataEnc *tile_data,
1417 int mi_row, int mi_col, RD_COST *rd_cost,
1418 BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx) {
1419 VP9_COMMON *const cm = &cpi->common;
1420 SPEED_FEATURES *const sf = &cpi->sf;
1421 const SVC *const svc = &cpi->svc;
1422 MACROBLOCKD *const xd = &x->e_mbd;
1423 MODE_INFO *const mi = xd->mi[0];
1424 struct macroblockd_plane *const pd = &xd->plane[0];
1425 PREDICTION_MODE best_mode = ZEROMV;
1426 MV_REFERENCE_FRAME ref_frame, best_ref_frame = LAST_FRAME;
1427 MV_REFERENCE_FRAME usable_ref_frame;
1428 TX_SIZE best_tx_size = TX_SIZES;
1429 INTERP_FILTER best_pred_filter = EIGHTTAP;
1430 int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES];
1431 struct buf_2d yv12_mb[4][MAX_MB_PLANE];
1432 static const int flag_list[4] = { 0, VP9_LAST_FLAG, VP9_GOLD_FLAG,
1434 RD_COST this_rdc, best_rdc;
1435 uint8_t skip_txfm = SKIP_TXFM_NONE, best_mode_skip_txfm = SKIP_TXFM_NONE;
1436 // var_y and sse_y are saved to be used in skipping checking
1437 unsigned int var_y = UINT_MAX;
1438 unsigned int sse_y = UINT_MAX;
1439 const int intra_cost_penalty = set_intra_cost_penalty(cpi, bsize);
1440 int64_t inter_mode_thresh =
1441 RDCOST(x->rdmult, x->rddiv, intra_cost_penalty, 0);
1442 const int *const rd_threshes = cpi->rd.threshes[mi->segment_id][bsize];
1443 const int sb_row = mi_row >> MI_BLOCK_SIZE_LOG2;
1444 int thresh_freq_fact_idx = (sb_row * BLOCK_SIZES + bsize) * MAX_MODES;
1445 const int *const rd_thresh_freq_fact =
1446 (cpi->sf.adaptive_rd_thresh_row_mt)
1447 ? &(tile_data->row_base_thresh_freq_fact[thresh_freq_fact_idx])
1448 : tile_data->thresh_freq_fact[bsize];
1450 INTERP_FILTER filter_ref;
1451 const int bsl = mi_width_log2_lookup[bsize];
1452 const int pred_filter_search =
1453 cm->interp_filter == SWITCHABLE
1454 ? (((mi_row + mi_col) >> bsl) +
1455 get_chessboard_index(cm->current_video_frame)) &
1458 int const_motion[MAX_REF_FRAMES] = { 0 };
1459 const int bh = num_4x4_blocks_high_lookup[bsize] << 2;
1460 const int bw = num_4x4_blocks_wide_lookup[bsize] << 2;
1461 // For speed 6, the result of interp filter is reused later in actual encoding
1463 // tmp[3] points to dst buffer, and the other 3 point to allocated buffers.
1465 DECLARE_ALIGNED(16, uint8_t, pred_buf[3 * 64 * 64]);
1466 #if CONFIG_VP9_HIGHBITDEPTH
1467 DECLARE_ALIGNED(16, uint16_t, pred_buf_16[3 * 64 * 64]);
1469 struct buf_2d orig_dst = pd->dst;
1470 PRED_BUFFER *best_pred = NULL;
1471 PRED_BUFFER *this_mode_pred = NULL;
1472 const int pixels_in_block = bh * bw;
1473 int reuse_inter_pred = cpi->sf.reuse_inter_pred_sby && ctx->pred_pixel_ready;
1474 int ref_frame_skip_mask = 0;
1476 int best_pred_sad = INT_MAX;
1477 int best_early_term = 0;
1478 int ref_frame_cost[MAX_REF_FRAMES];
1479 int svc_force_zero_mode[3] = { 0 };
1480 int perform_intra_pred = 1;
1481 int use_golden_nonzeromv = 1;
1482 int force_skip_low_temp_var = 0;
1483 int skip_ref_find_pred[4] = { 0 };
1484 #if CONFIG_VP9_TEMPORAL_DENOISING
1485 VP9_PICKMODE_CTX_DEN ctx_den;
1486 int64_t zero_last_cost_orig = INT64_MAX;
1487 int denoise_svc_pickmode = 1;
1490 init_ref_frame_cost(cm, xd, ref_frame_cost);
1492 if (reuse_inter_pred) {
1494 for (i = 0; i < 3; i++) {
1495 #if CONFIG_VP9_HIGHBITDEPTH
1496 if (cm->use_highbitdepth)
1497 tmp[i].data = CONVERT_TO_BYTEPTR(&pred_buf_16[pixels_in_block * i]);
1499 tmp[i].data = &pred_buf[pixels_in_block * i];
1501 tmp[i].data = &pred_buf[pixels_in_block * i];
1502 #endif // CONFIG_VP9_HIGHBITDEPTH
1506 tmp[3].data = pd->dst.buf;
1507 tmp[3].stride = pd->dst.stride;
1511 x->skip_encode = cpi->sf.skip_encode_frame && x->q_index < QIDX_SKIP_THRESH;
1514 // Instead of using vp9_get_pred_context_switchable_interp(xd) to assign
1515 // filter_ref, we use a less strict condition on assigning filter_ref.
1516 // This is to reduce the probabily of entering the flow of not assigning
1517 // filter_ref and then skip filter search.
1518 if (xd->above_mi && is_inter_block(xd->above_mi))
1519 filter_ref = xd->above_mi->interp_filter;
1520 else if (xd->left_mi && is_inter_block(xd->left_mi))
1521 filter_ref = xd->left_mi->interp_filter;
1523 filter_ref = cm->interp_filter;
1525 // initialize mode decisions
1526 vp9_rd_cost_reset(&best_rdc);
1527 vp9_rd_cost_reset(rd_cost);
1528 mi->sb_type = bsize;
1529 mi->ref_frame[0] = NONE;
1530 mi->ref_frame[1] = NONE;
1533 VPXMIN(max_txsize_lookup[bsize], tx_mode_to_biggest_tx_size[cm->tx_mode]);
1535 if (sf->short_circuit_flat_blocks || sf->limit_newmv_early_exit) {
1536 #if CONFIG_VP9_HIGHBITDEPTH
1537 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
1538 x->source_variance = vp9_high_get_sby_perpixel_variance(
1539 cpi, &x->plane[0].src, bsize, xd->bd);
1541 #endif // CONFIG_VP9_HIGHBITDEPTH
1542 x->source_variance =
1543 vp9_get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize);
1546 #if CONFIG_VP9_TEMPORAL_DENOISING
1547 if (cpi->oxcf.noise_sensitivity > 0) {
1549 int layer = LAYER_IDS_TO_IDX(cpi->svc.spatial_layer_id,
1550 cpi->svc.temporal_layer_id,
1551 cpi->svc.number_temporal_layers);
1552 LAYER_CONTEXT *lc = &cpi->svc.layer_context[layer];
1553 denoise_svc_pickmode = denoise_svc(cpi) && !lc->is_key_frame;
1555 if (cpi->denoiser.denoising_level > kDenLowLow && denoise_svc_pickmode)
1556 vp9_denoiser_reset_frame_stats(ctx);
1560 if (cpi->rc.frames_since_golden == 0 && !cpi->use_svc) {
1561 usable_ref_frame = LAST_FRAME;
1563 usable_ref_frame = GOLDEN_FRAME;
1566 if (cpi->oxcf.lag_in_frames > 0 && cpi->oxcf.rc_mode == VPX_VBR) {
1567 if (cpi->rc.alt_ref_gf_group || cpi->rc.is_src_frame_alt_ref)
1568 usable_ref_frame = ALTREF_FRAME;
1570 if (cpi->rc.is_src_frame_alt_ref) {
1571 skip_ref_find_pred[LAST_FRAME] = 1;
1572 skip_ref_find_pred[GOLDEN_FRAME] = 1;
1576 // For svc mode, on spatial_layer_id > 0: if the reference has different scale
1577 // constrain the inter mode to only test zero motion.
1578 if (cpi->use_svc && svc->force_zero_mode_spatial_ref &&
1579 cpi->svc.spatial_layer_id > 0) {
1580 if (cpi->ref_frame_flags & flag_list[LAST_FRAME]) {
1581 struct scale_factors *const sf = &cm->frame_refs[LAST_FRAME - 1].sf;
1582 if (vp9_is_scaled(sf)) svc_force_zero_mode[LAST_FRAME - 1] = 1;
1584 if (cpi->ref_frame_flags & flag_list[GOLDEN_FRAME]) {
1585 struct scale_factors *const sf = &cm->frame_refs[GOLDEN_FRAME - 1].sf;
1586 if (vp9_is_scaled(sf)) svc_force_zero_mode[GOLDEN_FRAME - 1] = 1;
1590 if (cpi->sf.short_circuit_low_temp_var) {
1591 force_skip_low_temp_var =
1592 get_force_skip_low_temp_var(&x->variance_low[0], mi_row, mi_col, bsize);
1593 // If force_skip_low_temp_var is set, and for short circuit mode = 1 and 3,
1594 // skip golden reference.
1595 if ((cpi->sf.short_circuit_low_temp_var == 1 ||
1596 cpi->sf.short_circuit_low_temp_var == 3) &&
1597 force_skip_low_temp_var) {
1598 usable_ref_frame = LAST_FRAME;
1602 if (!((cpi->ref_frame_flags & flag_list[GOLDEN_FRAME]) &&
1603 !svc_force_zero_mode[GOLDEN_FRAME - 1] && !force_skip_low_temp_var))
1604 use_golden_nonzeromv = 0;
1606 if (cpi->oxcf.speed >= 8 && !cpi->use_svc &&
1607 ((cpi->rc.frames_since_golden + 1) < x->last_sb_high_content ||
1608 x->last_sb_high_content > 40))
1609 usable_ref_frame = LAST_FRAME;
1611 for (ref_frame = LAST_FRAME; ref_frame <= usable_ref_frame; ++ref_frame) {
1612 if (!skip_ref_find_pred[ref_frame]) {
1613 find_predictors(cpi, x, ref_frame, frame_mv, const_motion,
1614 &ref_frame_skip_mask, flag_list, tile_data, mi_row,
1615 mi_col, yv12_mb, bsize, force_skip_low_temp_var);
1619 for (idx = 0; idx < RT_INTER_MODES; ++idx) {
1626 int this_early_term = 0;
1627 int rd_computed = 0;
1629 PREDICTION_MODE this_mode = ref_mode_set[idx].pred_mode;
1631 ref_frame = ref_mode_set[idx].ref_frame;
1634 this_mode = ref_mode_set_svc[idx].pred_mode;
1635 ref_frame = ref_mode_set_svc[idx].ref_frame;
1637 if (ref_frame > usable_ref_frame) continue;
1638 if (skip_ref_find_pred[ref_frame]) continue;
1640 if (sf->short_circuit_flat_blocks && x->source_variance == 0 &&
1641 this_mode != NEARESTMV) {
1645 if (!(cpi->sf.inter_mode_mask[bsize] & (1 << this_mode))) continue;
1647 if (cpi->oxcf.lag_in_frames > 0 && cpi->oxcf.rc_mode == VPX_VBR) {
1648 if (cpi->rc.is_src_frame_alt_ref &&
1649 (ref_frame != ALTREF_FRAME ||
1650 frame_mv[this_mode][ref_frame].as_int != 0))
1653 if (cpi->rc.alt_ref_gf_group &&
1654 cpi->rc.frames_since_golden > (cpi->rc.baseline_gf_interval >> 1) &&
1655 ref_frame == GOLDEN_FRAME &&
1656 frame_mv[this_mode][ref_frame].as_int != 0)
1659 if (cpi->rc.alt_ref_gf_group &&
1660 cpi->rc.frames_since_golden < (cpi->rc.baseline_gf_interval >> 1) &&
1661 ref_frame == ALTREF_FRAME &&
1662 frame_mv[this_mode][ref_frame].as_int != 0)
1666 if (!(cpi->ref_frame_flags & flag_list[ref_frame])) continue;
1668 if (const_motion[ref_frame] && this_mode == NEARMV) continue;
1670 // Skip non-zeromv mode search for golden frame if force_skip_low_temp_var
1671 // is set. If nearestmv for golden frame is 0, zeromv mode will be skipped
1673 if (force_skip_low_temp_var && ref_frame == GOLDEN_FRAME &&
1674 frame_mv[this_mode][ref_frame].as_int != 0) {
1678 if ((cpi->sf.short_circuit_low_temp_var >= 2 ||
1679 (cpi->sf.short_circuit_low_temp_var == 1 && bsize == BLOCK_64X64)) &&
1680 force_skip_low_temp_var && ref_frame == LAST_FRAME &&
1681 this_mode == NEWMV) {
1686 if (svc_force_zero_mode[ref_frame - 1] &&
1687 frame_mv[this_mode][ref_frame].as_int != 0)
1691 if (sf->reference_masking &&
1692 !(frame_mv[this_mode][ref_frame].as_int == 0 &&
1693 ref_frame == LAST_FRAME)) {
1694 if (usable_ref_frame < ALTREF_FRAME) {
1695 if (!force_skip_low_temp_var && usable_ref_frame > LAST_FRAME) {
1696 i = (ref_frame == LAST_FRAME) ? GOLDEN_FRAME : LAST_FRAME;
1697 if ((cpi->ref_frame_flags & flag_list[i]))
1698 if (x->pred_mv_sad[ref_frame] > (x->pred_mv_sad[i] << 1))
1699 ref_frame_skip_mask |= (1 << ref_frame);
1701 } else if (!cpi->rc.is_src_frame_alt_ref &&
1702 !(frame_mv[this_mode][ref_frame].as_int == 0 &&
1703 ref_frame == ALTREF_FRAME)) {
1704 int ref1 = (ref_frame == GOLDEN_FRAME) ? LAST_FRAME : GOLDEN_FRAME;
1705 int ref2 = (ref_frame == ALTREF_FRAME) ? LAST_FRAME : ALTREF_FRAME;
1706 if (((cpi->ref_frame_flags & flag_list[ref1]) &&
1707 (x->pred_mv_sad[ref_frame] > (x->pred_mv_sad[ref1] << 1))) ||
1708 ((cpi->ref_frame_flags & flag_list[ref2]) &&
1709 (x->pred_mv_sad[ref_frame] > (x->pred_mv_sad[ref2] << 1))))
1710 ref_frame_skip_mask |= (1 << ref_frame);
1713 if (ref_frame_skip_mask & (1 << ref_frame)) continue;
1715 // Select prediction reference frames.
1716 for (i = 0; i < MAX_MB_PLANE; i++)
1717 xd->plane[i].pre[0] = yv12_mb[ref_frame][i];
1719 mi->ref_frame[0] = ref_frame;
1720 set_ref_ptrs(cm, xd, ref_frame, NONE);
1722 mode_index = mode_idx[ref_frame][INTER_OFFSET(this_mode)];
1723 mode_rd_thresh = best_mode_skip_txfm ? rd_threshes[mode_index] << 1
1724 : rd_threshes[mode_index];
1726 // Increase mode_rd_thresh value for GOLDEN_FRAME for improved encoding
1727 // speed with little/no subjective quality loss.
1728 if (cpi->sf.bias_golden && ref_frame == GOLDEN_FRAME &&
1729 cpi->rc.frames_since_golden > 4)
1730 mode_rd_thresh = mode_rd_thresh << 3;
1732 if ((cpi->sf.adaptive_rd_thresh_row_mt &&
1733 rd_less_than_thresh_row_mt(best_rdc.rdcost, mode_rd_thresh,
1734 &rd_thresh_freq_fact[mode_index])) ||
1735 (!cpi->sf.adaptive_rd_thresh_row_mt &&
1736 rd_less_than_thresh(best_rdc.rdcost, mode_rd_thresh,
1737 &rd_thresh_freq_fact[mode_index])))
1740 if (this_mode == NEWMV) {
1741 if (ref_frame > LAST_FRAME && !cpi->use_svc &&
1742 cpi->oxcf.rc_mode == VPX_CBR) {
1747 if (bsize < BLOCK_16X16) continue;
1749 tmp_sad = vp9_int_pro_motion_estimation(cpi, x, bsize, mi_row, mi_col);
1751 if (tmp_sad > x->pred_mv_sad[LAST_FRAME]) continue;
1752 if (tmp_sad + (num_pels_log2_lookup[bsize] << 4) > best_pred_sad)
1755 frame_mv[NEWMV][ref_frame].as_int = mi->mv[0].as_int;
1756 rate_mv = vp9_mv_bit_cost(&frame_mv[NEWMV][ref_frame].as_mv,
1757 &x->mbmi_ext->ref_mvs[ref_frame][0].as_mv,
1758 x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
1759 frame_mv[NEWMV][ref_frame].as_mv.row >>= 3;
1760 frame_mv[NEWMV][ref_frame].as_mv.col >>= 3;
1762 cpi->find_fractional_mv_step(
1763 x, &frame_mv[NEWMV][ref_frame].as_mv,
1764 &x->mbmi_ext->ref_mvs[ref_frame][0].as_mv,
1765 cpi->common.allow_high_precision_mv, x->errorperbit,
1766 &cpi->fn_ptr[bsize], cpi->sf.mv.subpel_force_stop,
1767 cpi->sf.mv.subpel_iters_per_step, cond_cost_list(cpi, cost_list),
1768 x->nmvjointcost, x->mvcost, &dis, &x->pred_sse[ref_frame], NULL, 0,
1770 } else if (svc->use_base_mv && svc->spatial_layer_id) {
1771 if (frame_mv[NEWMV][ref_frame].as_int != INVALID_MV) {
1772 const int pre_stride = xd->plane[0].pre[0].stride;
1773 int base_mv_sad = INT_MAX;
1774 const float base_mv_bias = sf->base_mv_aggressive ? 1.5f : 1.0f;
1775 const uint8_t *const pre_buf =
1776 xd->plane[0].pre[0].buf +
1777 (frame_mv[NEWMV][ref_frame].as_mv.row >> 3) * pre_stride +
1778 (frame_mv[NEWMV][ref_frame].as_mv.col >> 3);
1779 base_mv_sad = cpi->fn_ptr[bsize].sdf(
1780 x->plane[0].src.buf, x->plane[0].src.stride, pre_buf, pre_stride);
1782 if (base_mv_sad < (int)(base_mv_bias * x->pred_mv_sad[ref_frame])) {
1783 // Base layer mv is good.
1784 if (!combined_motion_search(cpi, x, bsize, mi_row, mi_col,
1785 &frame_mv[NEWMV][ref_frame], &rate_mv,
1786 best_rdc.rdcost, 1)) {
1789 } else if (!combined_motion_search(cpi, x, bsize, mi_row, mi_col,
1790 &frame_mv[NEWMV][ref_frame],
1791 &rate_mv, best_rdc.rdcost, 0)) {
1794 } else if (!combined_motion_search(cpi, x, bsize, mi_row, mi_col,
1795 &frame_mv[NEWMV][ref_frame],
1796 &rate_mv, best_rdc.rdcost, 0)) {
1799 } else if (!combined_motion_search(cpi, x, bsize, mi_row, mi_col,
1800 &frame_mv[NEWMV][ref_frame], &rate_mv,
1801 best_rdc.rdcost, 0)) {
1806 // If use_golden_nonzeromv is false, NEWMV mode is skipped for golden, no
1807 // need to compute best_pred_sad which is only used to skip golden NEWMV.
1808 if (use_golden_nonzeromv && this_mode == NEWMV && ref_frame == LAST_FRAME &&
1809 frame_mv[NEWMV][LAST_FRAME].as_int != INVALID_MV) {
1810 const int pre_stride = xd->plane[0].pre[0].stride;
1811 const uint8_t *const pre_buf =
1812 xd->plane[0].pre[0].buf +
1813 (frame_mv[NEWMV][LAST_FRAME].as_mv.row >> 3) * pre_stride +
1814 (frame_mv[NEWMV][LAST_FRAME].as_mv.col >> 3);
1815 best_pred_sad = cpi->fn_ptr[bsize].sdf(
1816 x->plane[0].src.buf, x->plane[0].src.stride, pre_buf, pre_stride);
1817 x->pred_mv_sad[LAST_FRAME] = best_pred_sad;
1820 if (this_mode != NEARESTMV &&
1821 frame_mv[this_mode][ref_frame].as_int ==
1822 frame_mv[NEARESTMV][ref_frame].as_int)
1825 mi->mode = this_mode;
1826 mi->mv[0].as_int = frame_mv[this_mode][ref_frame].as_int;
1828 // Search for the best prediction filter type, when the resulting
1829 // motion vector is at sub-pixel accuracy level for luma component, i.e.,
1830 // the last three bits are all zeros.
1831 if (reuse_inter_pred) {
1832 if (!this_mode_pred) {
1833 this_mode_pred = &tmp[3];
1835 this_mode_pred = &tmp[get_pred_buffer(tmp, 3)];
1836 pd->dst.buf = this_mode_pred->data;
1837 pd->dst.stride = bw;
1841 if ((this_mode == NEWMV || filter_ref == SWITCHABLE) &&
1842 pred_filter_search &&
1843 (ref_frame == LAST_FRAME ||
1844 (ref_frame == GOLDEN_FRAME &&
1845 (cpi->use_svc || cpi->oxcf.rc_mode == VPX_VBR))) &&
1846 (((mi->mv[0].as_mv.row | mi->mv[0].as_mv.col) & 0x07) != 0)) {
1850 unsigned int pf_var[3];
1851 unsigned int pf_sse[3];
1852 TX_SIZE pf_tx_size[3];
1853 int64_t best_cost = INT64_MAX;
1854 INTERP_FILTER best_filter = SWITCHABLE, filter;
1855 PRED_BUFFER *current_pred = this_mode_pred;
1858 for (filter = EIGHTTAP; filter <= EIGHTTAP_SMOOTH; ++filter) {
1860 mi->interp_filter = filter;
1861 vp9_build_inter_predictors_sby(xd, mi_row, mi_col, bsize);
1862 model_rd_for_sb_y(cpi, bsize, x, xd, &pf_rate[filter], &pf_dist[filter],
1863 &pf_var[filter], &pf_sse[filter]);
1864 curr_rate[filter] = pf_rate[filter];
1865 pf_rate[filter] += vp9_get_switchable_rate(cpi, xd);
1866 cost = RDCOST(x->rdmult, x->rddiv, pf_rate[filter], pf_dist[filter]);
1867 pf_tx_size[filter] = mi->tx_size;
1868 if (cost < best_cost) {
1869 best_filter = filter;
1871 skip_txfm = x->skip_txfm[0];
1873 if (reuse_inter_pred) {
1874 if (this_mode_pred != current_pred) {
1875 free_pred_buffer(this_mode_pred);
1876 this_mode_pred = current_pred;
1878 current_pred = &tmp[get_pred_buffer(tmp, 3)];
1879 pd->dst.buf = current_pred->data;
1880 pd->dst.stride = bw;
1885 if (reuse_inter_pred && this_mode_pred != current_pred)
1886 free_pred_buffer(current_pred);
1888 mi->interp_filter = best_filter;
1889 mi->tx_size = pf_tx_size[best_filter];
1890 this_rdc.rate = curr_rate[best_filter];
1891 this_rdc.dist = pf_dist[best_filter];
1892 var_y = pf_var[best_filter];
1893 sse_y = pf_sse[best_filter];
1894 x->skip_txfm[0] = skip_txfm;
1895 if (reuse_inter_pred) {
1896 pd->dst.buf = this_mode_pred->data;
1897 pd->dst.stride = this_mode_pred->stride;
1900 const int large_block = (x->sb_is_skin || cpi->oxcf.speed < 7)
1901 ? bsize > BLOCK_32X32
1902 : bsize >= BLOCK_32X32;
1903 mi->interp_filter = (filter_ref == SWITCHABLE) ? EIGHTTAP : filter_ref;
1904 vp9_build_inter_predictors_sby(xd, mi_row, mi_col, bsize);
1906 // For large partition blocks, extra testing is done.
1907 if (cpi->oxcf.rc_mode == VPX_CBR && large_block &&
1908 !cyclic_refresh_segment_id_boosted(xd->mi[0]->segment_id) &&
1910 model_rd_for_sb_y_large(cpi, bsize, x, xd, &this_rdc.rate,
1911 &this_rdc.dist, &var_y, &sse_y, mi_row, mi_col,
1915 model_rd_for_sb_y(cpi, bsize, x, xd, &this_rdc.rate, &this_rdc.dist,
1920 if (!this_early_term) {
1921 this_sse = (int64_t)sse_y;
1922 block_yrd(cpi, x, &this_rdc, &is_skippable, &this_sse, bsize,
1923 VPXMIN(mi->tx_size, TX_16X16), rd_computed);
1925 x->skip_txfm[0] = is_skippable;
1927 this_rdc.rate = vp9_cost_bit(vp9_get_skip_prob(cm, xd), 1);
1929 if (RDCOST(x->rdmult, x->rddiv, this_rdc.rate, this_rdc.dist) <
1930 RDCOST(x->rdmult, x->rddiv, 0, this_sse)) {
1931 this_rdc.rate += vp9_cost_bit(vp9_get_skip_prob(cm, xd), 0);
1933 this_rdc.rate = vp9_cost_bit(vp9_get_skip_prob(cm, xd), 1);
1934 this_rdc.dist = this_sse;
1935 x->skip_txfm[0] = SKIP_TXFM_AC_DC;
1939 if (cm->interp_filter == SWITCHABLE) {
1940 if ((mi->mv[0].as_mv.row | mi->mv[0].as_mv.col) & 0x07)
1941 this_rdc.rate += vp9_get_switchable_rate(cpi, xd);
1944 this_rdc.rate += cm->interp_filter == SWITCHABLE
1945 ? vp9_get_switchable_rate(cpi, xd)
1947 this_rdc.rate += vp9_cost_bit(vp9_get_skip_prob(cm, xd), 1);
1950 if (x->color_sensitivity[0] || x->color_sensitivity[1]) {
1952 const BLOCK_SIZE uv_bsize = get_plane_block_size(bsize, &xd->plane[1]);
1953 if (x->color_sensitivity[0])
1954 vp9_build_inter_predictors_sbp(xd, mi_row, mi_col, bsize, 1);
1955 if (x->color_sensitivity[1])
1956 vp9_build_inter_predictors_sbp(xd, mi_row, mi_col, bsize, 2);
1957 model_rd_for_sb_uv(cpi, uv_bsize, x, xd, &rdc_uv, &var_y, &sse_y, 1, 2);
1958 this_rdc.rate += rdc_uv.rate;
1959 this_rdc.dist += rdc_uv.dist;
1962 this_rdc.rate += rate_mv;
1963 this_rdc.rate += cpi->inter_mode_cost[x->mbmi_ext->mode_context[ref_frame]]
1964 [INTER_OFFSET(this_mode)];
1965 this_rdc.rate += ref_frame_cost[ref_frame];
1966 this_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, this_rdc.rate, this_rdc.dist);
1968 // Bias against NEWMV that is very different from its neighbors, and bias
1969 // to small motion-lastref for noisy input.
1970 if (cpi->oxcf.rc_mode == VPX_CBR && cpi->oxcf.speed >= 5 &&
1971 cpi->oxcf.content != VP9E_CONTENT_SCREEN) {
1972 vp9_NEWMV_diff_bias(&cpi->noise_estimate, xd, this_mode, &this_rdc, bsize,
1973 frame_mv[this_mode][ref_frame].as_mv.row,
1974 frame_mv[this_mode][ref_frame].as_mv.col,
1975 ref_frame == LAST_FRAME, x->lowvar_highsumdiff,
1979 // Skipping checking: test to see if this block can be reconstructed by
1981 if (cpi->allow_encode_breakout) {
1982 encode_breakout_test(cpi, x, bsize, mi_row, mi_col, ref_frame, this_mode,
1983 var_y, sse_y, yv12_mb, &this_rdc.rate,
1986 this_rdc.rate += rate_mv;
1988 RDCOST(x->rdmult, x->rddiv, this_rdc.rate, this_rdc.dist);
1992 #if CONFIG_VP9_TEMPORAL_DENOISING
1993 if (cpi->oxcf.noise_sensitivity > 0 && denoise_svc_pickmode &&
1994 cpi->denoiser.denoising_level > kDenLowLow) {
1995 vp9_denoiser_update_frame_stats(mi, sse_y, this_mode, ctx);
1996 // Keep track of zero_last cost.
1997 if (ref_frame == LAST_FRAME && frame_mv[this_mode][ref_frame].as_int == 0)
1998 zero_last_cost_orig = this_rdc.rdcost;
2004 if (this_rdc.rdcost < best_rdc.rdcost || x->skip) {
2005 best_rdc = this_rdc;
2006 best_mode = this_mode;
2007 best_pred_filter = mi->interp_filter;
2008 best_tx_size = mi->tx_size;
2009 best_ref_frame = ref_frame;
2010 best_mode_skip_txfm = x->skip_txfm[0];
2011 best_early_term = this_early_term;
2013 if (reuse_inter_pred) {
2014 free_pred_buffer(best_pred);
2015 best_pred = this_mode_pred;
2018 if (reuse_inter_pred) free_pred_buffer(this_mode_pred);
2023 // If early termination flag is 1 and at least 2 modes are checked,
2024 // the mode search is terminated.
2025 if (best_early_term && idx > 0) {
2031 mi->mode = best_mode;
2032 mi->interp_filter = best_pred_filter;
2033 mi->tx_size = best_tx_size;
2034 mi->ref_frame[0] = best_ref_frame;
2035 mi->mv[0].as_int = frame_mv[best_mode][best_ref_frame].as_int;
2036 xd->mi[0]->bmi[0].as_mv[0].as_int = mi->mv[0].as_int;
2037 x->skip_txfm[0] = best_mode_skip_txfm;
2039 // For spatial enhancemanent layer: perform intra prediction only if base
2040 // layer is chosen as the reference. Always perform intra prediction if
2041 // LAST is the only reference or is_key_frame is set.
2042 if (cpi->svc.spatial_layer_id) {
2043 perform_intra_pred =
2044 cpi->svc.layer_context[cpi->svc.temporal_layer_id].is_key_frame ||
2045 !(cpi->ref_frame_flags & flag_list[GOLDEN_FRAME]) ||
2046 (!cpi->svc.layer_context[cpi->svc.temporal_layer_id].is_key_frame &&
2047 svc_force_zero_mode[best_ref_frame - 1]);
2048 inter_mode_thresh = (inter_mode_thresh << 1) + inter_mode_thresh;
2050 if (cpi->oxcf.lag_in_frames > 0 && cpi->oxcf.rc_mode == VPX_VBR &&
2051 cpi->rc.is_src_frame_alt_ref)
2052 perform_intra_pred = 0;
2053 // Perform intra prediction search, if the best SAD is above a certain
2055 if (best_rdc.rdcost == INT64_MAX ||
2056 ((!force_skip_low_temp_var || bsize < BLOCK_32X32) &&
2057 perform_intra_pred && !x->skip && best_rdc.rdcost > inter_mode_thresh &&
2058 bsize <= cpi->sf.max_intra_bsize && !x->skip_low_source_sad &&
2059 !x->lowvar_highsumdiff)) {
2060 struct estimate_block_intra_args args = { cpi, x, DC_PRED, 1, 0 };
2062 TX_SIZE best_intra_tx_size = TX_SIZES;
2063 TX_SIZE intra_tx_size =
2064 VPXMIN(max_txsize_lookup[bsize],
2065 tx_mode_to_biggest_tx_size[cpi->common.tx_mode]);
2066 if (cpi->oxcf.content != VP9E_CONTENT_SCREEN && intra_tx_size > TX_16X16)
2067 intra_tx_size = TX_16X16;
2069 if (reuse_inter_pred && best_pred != NULL) {
2070 if (best_pred->data == orig_dst.buf) {
2071 this_mode_pred = &tmp[get_pred_buffer(tmp, 3)];
2072 #if CONFIG_VP9_HIGHBITDEPTH
2073 if (cm->use_highbitdepth)
2074 vpx_highbd_convolve_copy(
2075 CONVERT_TO_SHORTPTR(best_pred->data), best_pred->stride,
2076 CONVERT_TO_SHORTPTR(this_mode_pred->data), this_mode_pred->stride,
2077 NULL, 0, NULL, 0, bw, bh, xd->bd);
2079 vpx_convolve_copy(best_pred->data, best_pred->stride,
2080 this_mode_pred->data, this_mode_pred->stride, NULL,
2081 0, NULL, 0, bw, bh);
2083 vpx_convolve_copy(best_pred->data, best_pred->stride,
2084 this_mode_pred->data, this_mode_pred->stride, NULL, 0,
2086 #endif // CONFIG_VP9_HIGHBITDEPTH
2087 best_pred = this_mode_pred;
2092 for (i = 0; i < 4; ++i) {
2093 const PREDICTION_MODE this_mode = intra_mode_list[i];
2094 THR_MODES mode_index = mode_idx[INTRA_FRAME][mode_offset(this_mode)];
2095 int mode_rd_thresh = rd_threshes[mode_index];
2096 if (sf->short_circuit_flat_blocks && x->source_variance == 0 &&
2097 this_mode != DC_PRED) {
2101 if (!((1 << this_mode) & cpi->sf.intra_y_mode_bsize_mask[bsize]))
2104 if ((cpi->sf.adaptive_rd_thresh_row_mt &&
2105 rd_less_than_thresh_row_mt(best_rdc.rdcost, mode_rd_thresh,
2106 &rd_thresh_freq_fact[mode_index])) ||
2107 (!cpi->sf.adaptive_rd_thresh_row_mt &&
2108 rd_less_than_thresh(best_rdc.rdcost, mode_rd_thresh,
2109 &rd_thresh_freq_fact[mode_index])))
2112 mi->mode = this_mode;
2113 mi->ref_frame[0] = INTRA_FRAME;
2114 this_rdc.dist = this_rdc.rate = 0;
2115 args.mode = this_mode;
2117 args.rdc = &this_rdc;
2118 mi->tx_size = intra_tx_size;
2119 vp9_foreach_transformed_block_in_plane(xd, bsize, 0, estimate_block_intra,
2121 // Check skip cost here since skippable is not set for for uv, this
2122 // mirrors the behavior used by inter
2123 if (args.skippable) {
2124 x->skip_txfm[0] = SKIP_TXFM_AC_DC;
2125 this_rdc.rate = vp9_cost_bit(vp9_get_skip_prob(&cpi->common, xd), 1);
2127 x->skip_txfm[0] = SKIP_TXFM_NONE;
2128 this_rdc.rate += vp9_cost_bit(vp9_get_skip_prob(&cpi->common, xd), 0);
2130 // Inter and intra RD will mismatch in scale for non-screen content.
2131 if (cpi->oxcf.content == VP9E_CONTENT_SCREEN) {
2132 if (x->color_sensitivity[0])
2133 vp9_foreach_transformed_block_in_plane(xd, bsize, 1,
2134 estimate_block_intra, &args);
2135 if (x->color_sensitivity[1])
2136 vp9_foreach_transformed_block_in_plane(xd, bsize, 2,
2137 estimate_block_intra, &args);
2139 this_rdc.rate += cpi->mbmode_cost[this_mode];
2140 this_rdc.rate += ref_frame_cost[INTRA_FRAME];
2141 this_rdc.rate += intra_cost_penalty;
2143 RDCOST(x->rdmult, x->rddiv, this_rdc.rate, this_rdc.dist);
2145 if (this_rdc.rdcost < best_rdc.rdcost) {
2146 best_rdc = this_rdc;
2147 best_mode = this_mode;
2148 best_intra_tx_size = mi->tx_size;
2149 best_ref_frame = INTRA_FRAME;
2150 mi->uv_mode = this_mode;
2151 mi->mv[0].as_int = INVALID_MV;
2152 best_mode_skip_txfm = x->skip_txfm[0];
2156 // Reset mb_mode_info to the best inter mode.
2157 if (best_ref_frame != INTRA_FRAME) {
2158 mi->tx_size = best_tx_size;
2160 mi->tx_size = best_intra_tx_size;
2165 mi->mode = best_mode;
2166 mi->ref_frame[0] = best_ref_frame;
2167 x->skip_txfm[0] = best_mode_skip_txfm;
2169 if (!is_inter_block(mi)) {
2170 mi->interp_filter = SWITCHABLE_FILTERS;
2173 if (reuse_inter_pred && best_pred != NULL) {
2174 if (best_pred->data != orig_dst.buf && is_inter_mode(mi->mode)) {
2175 #if CONFIG_VP9_HIGHBITDEPTH
2176 if (cm->use_highbitdepth)
2177 vpx_highbd_convolve_copy(
2178 CONVERT_TO_SHORTPTR(best_pred->data), best_pred->stride,
2179 CONVERT_TO_SHORTPTR(pd->dst.buf), pd->dst.stride, NULL, 0, NULL, 0,
2182 vpx_convolve_copy(best_pred->data, best_pred->stride, pd->dst.buf,
2183 pd->dst.stride, NULL, 0, NULL, 0, bw, bh);
2185 vpx_convolve_copy(best_pred->data, best_pred->stride, pd->dst.buf,
2186 pd->dst.stride, NULL, 0, NULL, 0, bw, bh);
2187 #endif // CONFIG_VP9_HIGHBITDEPTH
2191 #if CONFIG_VP9_TEMPORAL_DENOISING
2192 if (cpi->oxcf.noise_sensitivity > 0 && cpi->resize_pending == 0 &&
2193 denoise_svc_pickmode && cpi->denoiser.denoising_level > kDenLowLow &&
2194 cpi->denoiser.reset == 0) {
2195 VP9_DENOISER_DECISION decision = COPY_BLOCK;
2196 vp9_pickmode_ctx_den_update(&ctx_den, zero_last_cost_orig, ref_frame_cost,
2197 frame_mv, reuse_inter_pred, best_tx_size,
2198 best_mode, best_ref_frame, best_pred_filter,
2199 best_mode_skip_txfm);
2200 vp9_denoiser_denoise(cpi, x, mi_row, mi_col, bsize, ctx, &decision);
2201 recheck_zeromv_after_denoising(cpi, mi, x, xd, decision, &ctx_den, yv12_mb,
2202 &best_rdc, bsize, mi_row, mi_col);
2203 best_ref_frame = ctx_den.best_ref_frame;
2207 if (cpi->sf.adaptive_rd_thresh) {
2208 THR_MODES best_mode_idx = mode_idx[best_ref_frame][mode_offset(mi->mode)];
2210 if (best_ref_frame == INTRA_FRAME) {
2211 // Only consider the modes that are included in the intra_mode_list.
2212 int intra_modes = sizeof(intra_mode_list) / sizeof(PREDICTION_MODE);
2215 // TODO(yunqingwang): Check intra mode mask and only update freq_fact
2216 // for those valid modes.
2217 for (i = 0; i < intra_modes; i++) {
2218 if (cpi->sf.adaptive_rd_thresh_row_mt)
2219 update_thresh_freq_fact_row_mt(cpi, tile_data, x->source_variance,
2220 thresh_freq_fact_idx, INTRA_FRAME,
2221 best_mode_idx, intra_mode_list[i]);
2223 update_thresh_freq_fact(cpi, tile_data, x->source_variance, bsize,
2224 INTRA_FRAME, best_mode_idx,
2225 intra_mode_list[i]);
2228 for (ref_frame = LAST_FRAME; ref_frame <= GOLDEN_FRAME; ++ref_frame) {
2229 PREDICTION_MODE this_mode;
2230 if (best_ref_frame != ref_frame) continue;
2231 for (this_mode = NEARESTMV; this_mode <= NEWMV; ++this_mode) {
2232 if (cpi->sf.adaptive_rd_thresh_row_mt)
2233 update_thresh_freq_fact_row_mt(cpi, tile_data, x->source_variance,
2234 thresh_freq_fact_idx, ref_frame,
2235 best_mode_idx, this_mode);
2237 update_thresh_freq_fact(cpi, tile_data, x->source_variance, bsize,
2238 ref_frame, best_mode_idx, this_mode);
2244 *rd_cost = best_rdc;
2247 void vp9_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, int mi_row,
2248 int mi_col, RD_COST *rd_cost, BLOCK_SIZE bsize,
2249 PICK_MODE_CONTEXT *ctx) {
2250 VP9_COMMON *const cm = &cpi->common;
2251 SPEED_FEATURES *const sf = &cpi->sf;
2252 MACROBLOCKD *const xd = &x->e_mbd;
2253 MODE_INFO *const mi = xd->mi[0];
2254 MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
2255 const struct segmentation *const seg = &cm->seg;
2256 MV_REFERENCE_FRAME ref_frame, second_ref_frame = NONE;
2257 MV_REFERENCE_FRAME best_ref_frame = NONE;
2258 unsigned char segment_id = mi->segment_id;
2259 struct buf_2d yv12_mb[4][MAX_MB_PLANE];
2260 static const int flag_list[4] = { 0, VP9_LAST_FLAG, VP9_GOLD_FLAG,
2262 int64_t best_rd = INT64_MAX;
2263 b_mode_info bsi[MAX_REF_FRAMES][4];
2264 int ref_frame_skip_mask = 0;
2265 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
2266 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
2269 x->skip_encode = sf->skip_encode_frame && x->q_index < QIDX_SKIP_THRESH;
2270 ctx->pred_pixel_ready = 0;
2272 for (ref_frame = LAST_FRAME; ref_frame <= GOLDEN_FRAME; ++ref_frame) {
2273 const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, ref_frame);
2275 x->pred_mv_sad[ref_frame] = INT_MAX;
2277 if ((cpi->ref_frame_flags & flag_list[ref_frame]) && (yv12 != NULL)) {
2278 int_mv *const candidates = mbmi_ext->ref_mvs[ref_frame];
2279 const struct scale_factors *const sf = &cm->frame_refs[ref_frame - 1].sf;
2280 vp9_setup_pred_block(xd, yv12_mb[ref_frame], yv12, mi_row, mi_col, sf,
2282 vp9_find_mv_refs(cm, xd, xd->mi[0], ref_frame, candidates, mi_row, mi_col,
2283 mbmi_ext->mode_context);
2285 vp9_find_best_ref_mvs(xd, cm->allow_high_precision_mv, candidates,
2286 &dummy_mv[0], &dummy_mv[1]);
2288 ref_frame_skip_mask |= (1 << ref_frame);
2292 mi->sb_type = bsize;
2293 mi->tx_size = TX_4X4;
2294 mi->uv_mode = DC_PRED;
2295 mi->ref_frame[0] = LAST_FRAME;
2296 mi->ref_frame[1] = NONE;
2298 cm->interp_filter == SWITCHABLE ? EIGHTTAP : cm->interp_filter;
2300 for (ref_frame = LAST_FRAME; ref_frame <= GOLDEN_FRAME; ++ref_frame) {
2301 int64_t this_rd = 0;
2304 if (ref_frame_skip_mask & (1 << ref_frame)) continue;
2306 #if CONFIG_BETTER_HW_COMPATIBILITY
2307 if ((bsize == BLOCK_8X4 || bsize == BLOCK_4X8) && ref_frame > INTRA_FRAME &&
2308 vp9_is_scaled(&cm->frame_refs[ref_frame - 1].sf))
2312 // TODO(jingning, agrange): Scaling reference frame not supported for
2313 // sub8x8 blocks. Is this supported now?
2314 if (ref_frame > INTRA_FRAME &&
2315 vp9_is_scaled(&cm->frame_refs[ref_frame - 1].sf))
2318 // If the segment reference frame feature is enabled....
2319 // then do nothing if the current ref frame is not allowed..
2320 if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME) &&
2321 get_segdata(seg, segment_id, SEG_LVL_REF_FRAME) != (int)ref_frame)
2324 mi->ref_frame[0] = ref_frame;
2326 set_ref_ptrs(cm, xd, ref_frame, second_ref_frame);
2328 // Select prediction reference frames.
2329 for (plane = 0; plane < MAX_MB_PLANE; plane++)
2330 xd->plane[plane].pre[0] = yv12_mb[ref_frame][plane];
2332 for (idy = 0; idy < 2; idy += num_4x4_blocks_high) {
2333 for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) {
2334 int_mv b_mv[MB_MODE_COUNT];
2335 int64_t b_best_rd = INT64_MAX;
2336 const int i = idy * 2 + idx;
2337 PREDICTION_MODE this_mode;
2339 unsigned int var_y, sse_y;
2341 struct macroblock_plane *p = &x->plane[0];
2342 struct macroblockd_plane *pd = &xd->plane[0];
2344 const struct buf_2d orig_src = p->src;
2345 const struct buf_2d orig_dst = pd->dst;
2346 struct buf_2d orig_pre[2];
2347 memcpy(orig_pre, xd->plane[0].pre, sizeof(orig_pre));
2349 // set buffer pointers for sub8x8 motion search.
2351 &p->src.buf[vp9_raster_block_offset(BLOCK_8X8, i, p->src.stride)];
2353 &pd->dst.buf[vp9_raster_block_offset(BLOCK_8X8, i, pd->dst.stride)];
2356 .buf[vp9_raster_block_offset(BLOCK_8X8, i, pd->pre[0].stride)];
2358 b_mv[ZEROMV].as_int = 0;
2359 b_mv[NEWMV].as_int = INVALID_MV;
2360 vp9_append_sub8x8_mvs_for_idx(cm, xd, i, 0, mi_row, mi_col,
2361 &b_mv[NEARESTMV], &b_mv[NEARMV],
2362 mbmi_ext->mode_context);
2364 for (this_mode = NEARESTMV; this_mode <= NEWMV; ++this_mode) {
2366 xd->mi[0]->bmi[i].as_mv[0].as_int = b_mv[this_mode].as_int;
2368 if (this_mode == NEWMV) {
2369 const int step_param = cpi->sf.mv.fullpel_search_step_param;
2373 const MvLimits tmp_mv_limits = x->mv_limits;
2374 uint32_t dummy_dist;
2377 mvp_full.row = b_mv[NEARESTMV].as_mv.row >> 3;
2378 mvp_full.col = b_mv[NEARESTMV].as_mv.col >> 3;
2380 mvp_full.row = xd->mi[0]->bmi[0].as_mv[0].as_mv.row >> 3;
2381 mvp_full.col = xd->mi[0]->bmi[0].as_mv[0].as_mv.col >> 3;
2384 vp9_set_mv_search_range(&x->mv_limits,
2385 &mbmi_ext->ref_mvs[ref_frame][0].as_mv);
2387 vp9_full_pixel_search(
2388 cpi, x, bsize, &mvp_full, step_param, cpi->sf.mv.search_method,
2389 x->sadperbit4, cond_cost_list(cpi, cost_list),
2390 &mbmi_ext->ref_mvs[ref_frame][0].as_mv, &tmp_mv, INT_MAX, 0);
2392 x->mv_limits = tmp_mv_limits;
2394 // calculate the bit cost on motion vector
2395 mvp_full.row = tmp_mv.row * 8;
2396 mvp_full.col = tmp_mv.col * 8;
2398 b_rate += vp9_mv_bit_cost(
2399 &mvp_full, &mbmi_ext->ref_mvs[ref_frame][0].as_mv,
2400 x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
2402 b_rate += cpi->inter_mode_cost[x->mbmi_ext->mode_context[ref_frame]]
2403 [INTER_OFFSET(NEWMV)];
2404 if (RDCOST(x->rdmult, x->rddiv, b_rate, 0) > b_best_rd) continue;
2406 cpi->find_fractional_mv_step(
2407 x, &tmp_mv, &mbmi_ext->ref_mvs[ref_frame][0].as_mv,
2408 cpi->common.allow_high_precision_mv, x->errorperbit,
2409 &cpi->fn_ptr[bsize], cpi->sf.mv.subpel_force_stop,
2410 cpi->sf.mv.subpel_iters_per_step,
2411 cond_cost_list(cpi, cost_list), x->nmvjointcost, x->mvcost,
2412 &dummy_dist, &x->pred_sse[ref_frame], NULL, 0, 0);
2414 xd->mi[0]->bmi[i].as_mv[0].as_mv = tmp_mv;
2416 b_rate += cpi->inter_mode_cost[x->mbmi_ext->mode_context[ref_frame]]
2417 [INTER_OFFSET(this_mode)];
2420 #if CONFIG_VP9_HIGHBITDEPTH
2421 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
2422 vp9_highbd_build_inter_predictor(
2423 pd->pre[0].buf, pd->pre[0].stride, pd->dst.buf, pd->dst.stride,
2424 &xd->mi[0]->bmi[i].as_mv[0].as_mv, &xd->block_refs[0]->sf,
2425 4 * num_4x4_blocks_wide, 4 * num_4x4_blocks_high, 0,
2426 vp9_filter_kernels[mi->interp_filter], MV_PRECISION_Q3,
2427 mi_col * MI_SIZE + 4 * (i & 0x01),
2428 mi_row * MI_SIZE + 4 * (i >> 1), xd->bd);
2431 vp9_build_inter_predictor(
2432 pd->pre[0].buf, pd->pre[0].stride, pd->dst.buf, pd->dst.stride,
2433 &xd->mi[0]->bmi[i].as_mv[0].as_mv, &xd->block_refs[0]->sf,
2434 4 * num_4x4_blocks_wide, 4 * num_4x4_blocks_high, 0,
2435 vp9_filter_kernels[mi->interp_filter], MV_PRECISION_Q3,
2436 mi_col * MI_SIZE + 4 * (i & 0x01),
2437 mi_row * MI_SIZE + 4 * (i >> 1));
2439 #if CONFIG_VP9_HIGHBITDEPTH
2443 model_rd_for_sb_y(cpi, bsize, x, xd, &this_rdc.rate, &this_rdc.dist,
2446 this_rdc.rate += b_rate;
2448 RDCOST(x->rdmult, x->rddiv, this_rdc.rate, this_rdc.dist);
2449 if (this_rdc.rdcost < b_best_rd) {
2450 b_best_rd = this_rdc.rdcost;
2451 bsi[ref_frame][i].as_mode = this_mode;
2452 bsi[ref_frame][i].as_mv[0].as_mv = xd->mi[0]->bmi[i].as_mv[0].as_mv;
2456 // restore source and prediction buffer pointers.
2458 pd->pre[0] = orig_pre[0];
2460 this_rd += b_best_rd;
2462 xd->mi[0]->bmi[i] = bsi[ref_frame][i];
2463 if (num_4x4_blocks_wide > 1) xd->mi[0]->bmi[i + 1] = xd->mi[0]->bmi[i];
2464 if (num_4x4_blocks_high > 1) xd->mi[0]->bmi[i + 2] = xd->mi[0]->bmi[i];
2466 } // loop through sub8x8 blocks
2468 if (this_rd < best_rd) {
2470 best_ref_frame = ref_frame;
2472 } // reference frames
2474 mi->tx_size = TX_4X4;
2475 mi->ref_frame[0] = best_ref_frame;
2476 for (idy = 0; idy < 2; idy += num_4x4_blocks_high) {
2477 for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) {
2478 const int block = idy * 2 + idx;
2479 xd->mi[0]->bmi[block] = bsi[best_ref_frame][block];
2480 if (num_4x4_blocks_wide > 1)
2481 xd->mi[0]->bmi[block + 1] = bsi[best_ref_frame][block];
2482 if (num_4x4_blocks_high > 1)
2483 xd->mi[0]->bmi[block + 2] = bsi[best_ref_frame][block];
2486 mi->mode = xd->mi[0]->bmi[3].as_mode;
2487 ctx->mic = *(xd->mi[0]);
2488 ctx->mbmi_ext = *x->mbmi_ext;
2489 ctx->skip_txfm[0] = SKIP_TXFM_NONE;
2491 // Dummy assignment for speed -5. No effect in speed -6.
2492 rd_cost->rdcost = best_rd;