2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
12 #include "vpx_config.h"
13 #include "./vpx_dsp_rtcd.h"
15 #include "modecosts.h"
16 #include "encodeintra.h"
17 #include "vp8/common/common.h"
18 #include "vp8/common/entropymode.h"
19 #include "pickinter.h"
20 #include "vp8/common/findnearmv.h"
22 #include "vp8/common/reconinter.h"
23 #include "vp8/common/reconintra.h"
24 #include "vp8/common/reconintra4x4.h"
25 #include "vpx_dsp/variance.h"
28 #include "vpx_dsp/vpx_dsp_common.h"
29 #include "vpx_mem/vpx_mem.h"
30 #if CONFIG_TEMPORAL_DENOISING
31 #include "denoising.h"
35 extern unsigned int cnt_pm;
40 extern const int vp8_ref_frame_order[MAX_MODES];
41 extern const MB_PREDICTION_MODE vp8_mode_order[MAX_MODES];
43 // Fixed point implementation of a skin color classifier. Skin color
44 // is model by a Gaussian distribution in the CbCr color space.
45 // See ../../test/skin_color_detector_test.cc where the reference
46 // skin color classifier is defined.
48 // Fixed-point skin color model parameters.
49 static const int skin_mean[5][2] = { { 7463, 9614 },
54 static const int skin_inv_cov[4] = { 4107, 1663, 1663, 2157 }; // q16
55 static const int skin_threshold[6] = { 1570636, 1400000, 800000,
56 800000, 800000, 800000 }; // q18
58 // Evaluates the Mahalanobis distance measure for the input CbCr values.
59 static int evaluate_skin_color_difference(int cb, int cr, int idx) {
60 const int cb_q6 = cb << 6;
61 const int cr_q6 = cr << 6;
62 const int cb_diff_q12 =
63 (cb_q6 - skin_mean[idx][0]) * (cb_q6 - skin_mean[idx][0]);
64 const int cbcr_diff_q12 =
65 (cb_q6 - skin_mean[idx][0]) * (cr_q6 - skin_mean[idx][1]);
66 const int cr_diff_q12 =
67 (cr_q6 - skin_mean[idx][1]) * (cr_q6 - skin_mean[idx][1]);
68 const int cb_diff_q2 = (cb_diff_q12 + (1 << 9)) >> 10;
69 const int cbcr_diff_q2 = (cbcr_diff_q12 + (1 << 9)) >> 10;
70 const int cr_diff_q2 = (cr_diff_q12 + (1 << 9)) >> 10;
72 skin_inv_cov[0] * cb_diff_q2 + skin_inv_cov[1] * cbcr_diff_q2 +
73 skin_inv_cov[2] * cbcr_diff_q2 + skin_inv_cov[3] * cr_diff_q2;
77 // Checks if the input yCbCr values corresponds to skin color.
78 static int is_skin_color(int y, int cb, int cr, int consec_zeromv) {
79 if (y < 40 || y > 220) {
82 if (MODEL_MODE == 0) {
83 return (evaluate_skin_color_difference(cb, cr, 0) < skin_threshold[0]);
86 // No skin if block has been zero motion for long consecutive time.
87 if (consec_zeromv > 60) return 0;
89 if (cb == 128 && cr == 128) return 0;
90 // Exit on very strong cb.
91 if (cb > 150 && cr < 110) return 0;
93 int skin_color_diff = evaluate_skin_color_difference(cb, cr, i);
94 if (skin_color_diff < skin_threshold[i + 1]) {
95 if (y < 60 && skin_color_diff > 3 * (skin_threshold[i + 1] >> 2)) {
97 } else if (consec_zeromv > 25 &&
98 skin_color_diff > (skin_threshold[i + 1] >> 1)) {
104 // Exit if difference is much large than the threshold.
105 if (skin_color_diff > (skin_threshold[i + 1] << 3)) {
114 static int macroblock_corner_grad(unsigned char *signal, int stride,
115 int offsetx, int offsety, int sgnx,
117 int y1 = signal[offsetx * stride + offsety];
118 int y2 = signal[offsetx * stride + offsety + sgny];
119 int y3 = signal[(offsetx + sgnx) * stride + offsety];
120 int y4 = signal[(offsetx + sgnx) * stride + offsety + sgny];
121 return VPXMAX(VPXMAX(abs(y1 - y2), abs(y1 - y3)), abs(y1 - y4));
124 static int check_dot_artifact_candidate(VP8_COMP *cpi, MACROBLOCK *x,
125 unsigned char *target_last, int stride,
126 unsigned char *last_ref, int mb_row,
127 int mb_col, int channel) {
130 unsigned int max_num = (cpi->common.MBs) / 10;
133 int index = mb_row * cpi->common.mb_cols + mb_col;
134 // Threshold for #consecutive (base layer) frames using zero_last mode.
140 if (cpi->oxcf.number_of_layers > 1) {
143 x->zero_last_dot_suppress = 0;
144 // Blocks on base layer frames that have been using ZEROMV_LAST repeatedly
145 // (i.e, at least |x| consecutive frames are candidates for increasing the
146 // rd adjustment for zero_last mode.
147 // Only allow this for at most |max_num| blocks per frame.
148 // Don't allow this for screen content input.
149 if (cpi->current_layer == 0 &&
150 cpi->consec_zero_last_mvbias[index] > num_frames &&
151 x->mbs_zero_last_dot_suppress < max_num &&
152 !cpi->oxcf.screen_content_mode) {
153 // If this block is checked here, label it so we don't check it again until
154 // ~|x| framaes later.
155 x->zero_last_dot_suppress = 1;
156 // Dot artifact is noticeable as strong gradient at corners of macroblock,
157 // for flat areas. As a simple detector for now, we look for a high
158 // corner gradient on last ref, and a smaller gradient on source.
159 // Check 4 corners, return if any satisfy condition.
161 grad_last = macroblock_corner_grad(last_ref, stride, 0, 0, 1, 1);
162 grad_source = macroblock_corner_grad(target_last, stride, 0, 0, 1, 1);
163 if (grad_last >= threshold1 && grad_source <= threshold2) {
164 x->mbs_zero_last_dot_suppress++;
168 grad_last = macroblock_corner_grad(last_ref, stride, 0, shift, 1, -1);
169 grad_source = macroblock_corner_grad(target_last, stride, 0, shift, 1, -1);
170 if (grad_last >= threshold1 && grad_source <= threshold2) {
171 x->mbs_zero_last_dot_suppress++;
175 grad_last = macroblock_corner_grad(last_ref, stride, shift, 0, -1, 1);
176 grad_source = macroblock_corner_grad(target_last, stride, shift, 0, -1, 1);
177 if (grad_last >= threshold1 && grad_source <= threshold2) {
178 x->mbs_zero_last_dot_suppress++;
182 grad_last = macroblock_corner_grad(last_ref, stride, shift, shift, -1, -1);
184 macroblock_corner_grad(target_last, stride, shift, shift, -1, -1);
185 if (grad_last >= threshold1 && grad_source <= threshold2) {
186 x->mbs_zero_last_dot_suppress++;
194 int vp8_skip_fractional_mv_step(MACROBLOCK *mb, BLOCK *b, BLOCKD *d,
195 int_mv *bestmv, int_mv *ref_mv,
197 const vp8_variance_fn_ptr_t *vfp,
198 int *mvcost[2], int *distortion,
209 bestmv->as_mv.row <<= 3;
210 bestmv->as_mv.col <<= 3;
214 int vp8_get_inter_mbpred_error(MACROBLOCK *mb, const vp8_variance_fn_ptr_t *vfp,
215 unsigned int *sse, int_mv this_mv) {
216 BLOCK *b = &mb->block[0];
217 BLOCKD *d = &mb->e_mbd.block[0];
218 unsigned char *what = (*(b->base_src) + b->src);
219 int what_stride = b->src_stride;
220 int pre_stride = mb->e_mbd.pre.y_stride;
221 unsigned char *in_what = mb->e_mbd.pre.y_buffer + d->offset;
222 int in_what_stride = pre_stride;
223 int xoffset = this_mv.as_mv.col & 7;
224 int yoffset = this_mv.as_mv.row & 7;
226 in_what += (this_mv.as_mv.row >> 3) * pre_stride + (this_mv.as_mv.col >> 3);
228 if (xoffset | yoffset) {
229 return vfp->svf(in_what, in_what_stride, xoffset, yoffset, what,
232 return vfp->vf(what, what_stride, in_what, in_what_stride, sse);
236 static int get_prediction_error(BLOCK *be, BLOCKD *b) {
239 sptr = (*(be->base_src) + be->src);
242 return vpx_get4x4sse_cs(sptr, be->src_stride, dptr, 16);
245 static int pick_intra4x4block(MACROBLOCK *x, int ib,
246 B_PREDICTION_MODE *best_mode,
247 const int *mode_costs,
249 int *bestrate, int *bestdistortion) {
250 BLOCKD *b = &x->e_mbd.block[ib];
251 BLOCK *be = &x->block[ib];
252 int dst_stride = x->e_mbd.dst.y_stride;
253 unsigned char *dst = x->e_mbd.dst.y_buffer + b->offset;
254 B_PREDICTION_MODE mode;
255 int best_rd = INT_MAX;
259 unsigned char *Above = dst - dst_stride;
260 unsigned char *yleft = dst - 1;
261 unsigned char top_left = Above[-1];
263 for (mode = B_DC_PRED; mode <= B_HE_PRED; ++mode) {
266 rate = mode_costs[mode];
268 vp8_intra4x4_predict(Above, yleft, dst_stride, mode, b->predictor, 16,
270 distortion = get_prediction_error(be, b);
271 this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
273 if (this_rd < best_rd) {
275 *bestdistortion = distortion;
281 b->bmi.as_mode = *best_mode;
282 vp8_encode_intra4x4block(x, ib);
286 static int pick_intra4x4mby_modes(MACROBLOCK *mb, int *Rate, int *best_dist) {
287 MACROBLOCKD *const xd = &mb->e_mbd;
289 int cost = mb->mbmode_cost[xd->frame_type][B_PRED];
292 const int *bmode_costs;
294 intra_prediction_down_copy(xd, xd->dst.y_buffer - xd->dst.y_stride + 16);
296 bmode_costs = mb->inter_bmode_costs;
298 for (i = 0; i < 16; ++i) {
299 MODE_INFO *const mic = xd->mode_info_context;
300 const int mis = xd->mode_info_stride;
302 B_PREDICTION_MODE UNINITIALIZED_IS_SAFE(best_mode);
303 int UNINITIALIZED_IS_SAFE(r), UNINITIALIZED_IS_SAFE(d);
305 if (mb->e_mbd.frame_type == KEY_FRAME) {
306 const B_PREDICTION_MODE A = above_block_mode(mic, i, mis);
307 const B_PREDICTION_MODE L = left_block_mode(mic, i);
309 bmode_costs = mb->bmode_costs[A][L];
312 pick_intra4x4block(mb, i, &best_mode, bmode_costs, &r, &d);
316 mic->bmi[i].as_mode = best_mode;
318 /* Break out case where we have already exceeded best so far value
321 if (distortion > *best_dist) break;
327 *best_dist = distortion;
328 error = RDCOST(mb->rdmult, mb->rddiv, cost, distortion);
330 *best_dist = INT_MAX;
337 static void pick_intra_mbuv_mode(MACROBLOCK *mb) {
338 MACROBLOCKD *x = &mb->e_mbd;
339 unsigned char *uabove_row = x->dst.u_buffer - x->dst.uv_stride;
340 unsigned char *vabove_row = x->dst.v_buffer - x->dst.uv_stride;
341 unsigned char *usrc_ptr = (mb->block[16].src + *mb->block[16].base_src);
342 unsigned char *vsrc_ptr = (mb->block[20].src + *mb->block[20].base_src);
343 int uvsrc_stride = mb->block[16].src_stride;
344 unsigned char uleft_col[8];
345 unsigned char vleft_col[8];
346 unsigned char utop_left = uabove_row[-1];
347 unsigned char vtop_left = vabove_row[-1];
355 int pred_error[4] = { 0, 0, 0, 0 }, best_error = INT_MAX;
356 MB_PREDICTION_MODE UNINITIALIZED_IS_SAFE(best_mode);
358 for (i = 0; i < 8; ++i) {
359 uleft_col[i] = x->dst.u_buffer[i * x->dst.uv_stride - 1];
360 vleft_col[i] = x->dst.v_buffer[i * x->dst.uv_stride - 1];
363 if (!x->up_available && !x->left_available) {
369 if (x->up_available) {
370 for (i = 0; i < 8; ++i) {
371 Uaverage += uabove_row[i];
372 Vaverage += vabove_row[i];
378 if (x->left_available) {
379 for (i = 0; i < 8; ++i) {
380 Uaverage += uleft_col[i];
381 Vaverage += vleft_col[i];
387 expected_udc = (Uaverage + (1 << (shift - 1))) >> shift;
388 expected_vdc = (Vaverage + (1 << (shift - 1))) >> shift;
391 for (i = 0; i < 8; ++i) {
392 for (j = 0; j < 8; ++j) {
393 int predu = uleft_col[i] + uabove_row[j] - utop_left;
394 int predv = vleft_col[i] + vabove_row[j] - vtop_left;
400 if (predu < 0) predu = 0;
402 if (predu > 255) predu = 255;
404 if (predv < 0) predv = 0;
406 if (predv > 255) predv = 255;
408 diff = u_p - expected_udc;
409 pred_error[DC_PRED] += diff * diff;
410 diff = v_p - expected_vdc;
411 pred_error[DC_PRED] += diff * diff;
413 diff = u_p - uabove_row[j];
414 pred_error[V_PRED] += diff * diff;
415 diff = v_p - vabove_row[j];
416 pred_error[V_PRED] += diff * diff;
418 diff = u_p - uleft_col[i];
419 pred_error[H_PRED] += diff * diff;
420 diff = v_p - vleft_col[i];
421 pred_error[H_PRED] += diff * diff;
424 pred_error[TM_PRED] += diff * diff;
426 pred_error[TM_PRED] += diff * diff;
429 usrc_ptr += uvsrc_stride;
430 vsrc_ptr += uvsrc_stride;
433 usrc_ptr = (mb->block[18].src + *mb->block[18].base_src);
434 vsrc_ptr = (mb->block[22].src + *mb->block[22].base_src);
438 for (i = DC_PRED; i <= TM_PRED; ++i) {
439 if (best_error > pred_error[i]) {
440 best_error = pred_error[i];
441 best_mode = (MB_PREDICTION_MODE)i;
445 mb->e_mbd.mode_info_context->mbmi.uv_mode = best_mode;
448 static void update_mvcount(MACROBLOCK *x, int_mv *best_ref_mv) {
449 MACROBLOCKD *xd = &x->e_mbd;
450 /* Split MV modes currently not supported when RD is nopt enabled,
451 * therefore, only need to modify MVcount in NEWMV mode. */
452 if (xd->mode_info_context->mbmi.mode == NEWMV) {
453 x->MVcount[0][mv_max + ((xd->mode_info_context->mbmi.mv.as_mv.row -
454 best_ref_mv->as_mv.row) >>
456 x->MVcount[1][mv_max + ((xd->mode_info_context->mbmi.mv.as_mv.col -
457 best_ref_mv->as_mv.col) >>
462 #if CONFIG_MULTI_RES_ENCODING
463 static void get_lower_res_motion_info(VP8_COMP *cpi, MACROBLOCKD *xd,
464 int *dissim, int *parent_ref_frame,
465 MB_PREDICTION_MODE *parent_mode,
466 int_mv *parent_ref_mv, int mb_row,
468 LOWER_RES_MB_INFO *store_mode_info =
469 ((LOWER_RES_FRAME_INFO *)cpi->oxcf.mr_low_res_mode_info)->mb_info;
470 unsigned int parent_mb_index;
472 /* Consider different down_sampling_factor. */
474 /* TODO: Removed the loop that supports special down_sampling_factor
475 * such as 2, 4, 8. Will revisit it if needed.
476 * Should also try using a look-up table to see if it helps
478 int parent_mb_row, parent_mb_col;
480 parent_mb_row = mb_row * cpi->oxcf.mr_down_sampling_factor.den /
481 cpi->oxcf.mr_down_sampling_factor.num;
482 parent_mb_col = mb_col * cpi->oxcf.mr_down_sampling_factor.den /
483 cpi->oxcf.mr_down_sampling_factor.num;
484 parent_mb_index = parent_mb_row * cpi->mr_low_res_mb_cols + parent_mb_col;
487 /* Read lower-resolution mode & motion result from memory.*/
488 *parent_ref_frame = store_mode_info[parent_mb_index].ref_frame;
489 *parent_mode = store_mode_info[parent_mb_index].mode;
490 *dissim = store_mode_info[parent_mb_index].dissim;
492 /* For highest-resolution encoder, adjust dissim value. Lower its quality
493 * for good performance. */
494 if (cpi->oxcf.mr_encoder_id == (cpi->oxcf.mr_total_resolutions - 1))
497 if (*parent_ref_frame != INTRA_FRAME) {
498 /* Consider different down_sampling_factor.
499 * The result can be rounded to be more precise, but it takes more time.
501 (*parent_ref_mv).as_mv.row = store_mode_info[parent_mb_index].mv.as_mv.row *
502 cpi->oxcf.mr_down_sampling_factor.num /
503 cpi->oxcf.mr_down_sampling_factor.den;
504 (*parent_ref_mv).as_mv.col = store_mode_info[parent_mb_index].mv.as_mv.col *
505 cpi->oxcf.mr_down_sampling_factor.num /
506 cpi->oxcf.mr_down_sampling_factor.den;
508 vp8_clamp_mv2(parent_ref_mv, xd);
513 static void check_for_encode_breakout(unsigned int sse, MACROBLOCK *x) {
514 MACROBLOCKD *xd = &x->e_mbd;
516 unsigned int threshold =
517 (xd->block[0].dequant[1] * xd->block[0].dequant[1] >> 4);
519 if (threshold < x->encode_breakout) threshold = x->encode_breakout;
521 if (sse < threshold) {
522 /* Check u and v to make sure skip is ok */
523 unsigned int sse2 = 0;
527 if (sse2 * 2 < x->encode_breakout) {
535 static int evaluate_inter_mode(unsigned int *sse, int rate2, int *distortion2,
536 VP8_COMP *cpi, MACROBLOCK *x, int rd_adj) {
537 MB_PREDICTION_MODE this_mode = x->e_mbd.mode_info_context->mbmi.mode;
538 int_mv mv = x->e_mbd.mode_info_context->mbmi.mv;
540 int denoise_aggressive = 0;
541 /* Exit early and don't compute the distortion if this macroblock
542 * is marked inactive. */
543 if (cpi->active_map_enabled && x->active_ptr[0] == 0) {
550 if ((this_mode != NEWMV) || !(cpi->sf.half_pixel_search) ||
551 cpi->common.full_pixel == 1) {
553 vp8_get_inter_mbpred_error(x, &cpi->fn_ptr[BLOCK_16X16], sse, mv);
556 this_rd = RDCOST(x->rdmult, x->rddiv, rate2, *distortion2);
558 #if CONFIG_TEMPORAL_DENOISING
559 if (cpi->oxcf.noise_sensitivity > 0) {
561 (cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive) ? 1 : 0;
565 // Adjust rd for ZEROMV and LAST, if LAST is the closest reference frame.
566 // TODO: We should also add condition on distance of closest to current.
567 if (!cpi->oxcf.screen_content_mode && this_mode == ZEROMV &&
568 x->e_mbd.mode_info_context->mbmi.ref_frame == LAST_FRAME &&
569 (denoise_aggressive || (cpi->closest_reference_frame == LAST_FRAME))) {
570 // No adjustment if block is considered to be skin area.
571 if (x->is_skin) rd_adj = 100;
573 this_rd = ((int64_t)this_rd) * rd_adj / 100;
576 check_for_encode_breakout(*sse, x);
580 static void calculate_zeromv_rd_adjustment(VP8_COMP *cpi, MACROBLOCK *x,
581 int *rd_adjustment) {
582 MODE_INFO *mic = x->e_mbd.mode_info_context;
583 int_mv mv_l, mv_a, mv_al;
584 int local_motion_check = 0;
586 if (cpi->lf_zeromv_pct > 40) {
591 if (mic->mbmi.ref_frame != INTRA_FRAME) {
592 if (abs(mv_l.as_mv.row) < 8 && abs(mv_l.as_mv.col) < 8) {
593 local_motion_check++;
598 mic -= x->e_mbd.mode_info_stride;
599 mv_al = mic->mbmi.mv;
601 if (mic->mbmi.ref_frame != INTRA_FRAME) {
602 if (abs(mv_al.as_mv.row) < 8 && abs(mv_al.as_mv.col) < 8) {
603 local_motion_check++;
611 if (mic->mbmi.ref_frame != INTRA_FRAME) {
612 if (abs(mv_a.as_mv.row) < 8 && abs(mv_a.as_mv.col) < 8) {
613 local_motion_check++;
617 if (((!x->e_mbd.mb_to_top_edge || !x->e_mbd.mb_to_left_edge) &&
618 local_motion_check > 0) ||
619 local_motion_check > 2) {
621 } else if (local_motion_check > 0) {
627 void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
628 int recon_uvoffset, int *returnrate,
629 int *returndistortion, int *returnintra, int mb_row,
631 BLOCK *b = &x->block[0];
632 BLOCKD *d = &x->e_mbd.block[0];
633 MACROBLOCKD *xd = &x->e_mbd;
634 MB_MODE_INFO best_mbmode;
636 int_mv best_ref_mv_sb[2];
637 int_mv mode_mv_sb[2][MB_MODE_COUNT];
640 MB_PREDICTION_MODE this_mode;
643 int best_rd = INT_MAX;
644 int rd_adjustment = 100;
645 int best_intra_rd = INT_MAX;
650 int bestsme = INT_MAX;
651 int best_mode_index = 0;
652 unsigned int sse = UINT_MAX, best_rd_sse = UINT_MAX;
653 #if CONFIG_TEMPORAL_DENOISING
654 unsigned int zero_mv_sse = UINT_MAX, best_sse = UINT_MAX;
657 int sf_improved_mv_pred = cpi->sf.improved_mv_pred;
659 #if CONFIG_MULTI_RES_ENCODING
660 int dissim = INT_MAX;
661 int parent_ref_frame = 0;
662 int_mv parent_ref_mv;
663 MB_PREDICTION_MODE parent_mode = 0;
664 int parent_ref_valid = 0;
669 int near_sadidx[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
671 /* search range got from mv_pred(). It uses step_param levels. (0-7) */
674 unsigned char *plane[4][3];
675 int ref_frame_map[4];
677 int dot_artifact_candidate = 0;
678 get_predictor_pointers(cpi, plane, recon_yoffset, recon_uvoffset);
680 // If the current frame is using LAST as a reference, check for
681 // biasing the mode selection for dot artifacts.
682 if (cpi->ref_frame_flags & VP8_LAST_FRAME) {
683 unsigned char *target_y = x->src.y_buffer;
684 unsigned char *target_u = x->block[16].src + *x->block[16].base_src;
685 unsigned char *target_v = x->block[20].src + *x->block[20].base_src;
686 int stride = x->src.y_stride;
687 int stride_uv = x->block[16].src_stride;
688 #if CONFIG_TEMPORAL_DENOISING
689 if (cpi->oxcf.noise_sensitivity) {
690 const int uv_denoise = (cpi->oxcf.noise_sensitivity >= 2) ? 1 : 0;
692 cpi->denoiser.yv12_running_avg[LAST_FRAME].y_buffer + recon_yoffset;
693 stride = cpi->denoiser.yv12_running_avg[LAST_FRAME].y_stride;
695 target_u = cpi->denoiser.yv12_running_avg[LAST_FRAME].u_buffer +
697 target_v = cpi->denoiser.yv12_running_avg[LAST_FRAME].v_buffer +
699 stride_uv = cpi->denoiser.yv12_running_avg[LAST_FRAME].uv_stride;
703 dot_artifact_candidate = check_dot_artifact_candidate(
704 cpi, x, target_y, stride, plane[LAST_FRAME][0], mb_row, mb_col, 0);
705 // If not found in Y channel, check UV channel.
706 if (!dot_artifact_candidate) {
707 dot_artifact_candidate = check_dot_artifact_candidate(
708 cpi, x, target_u, stride_uv, plane[LAST_FRAME][1], mb_row, mb_col, 1);
709 if (!dot_artifact_candidate) {
710 dot_artifact_candidate = check_dot_artifact_candidate(
711 cpi, x, target_v, stride_uv, plane[LAST_FRAME][2], mb_row, mb_col,
717 #if CONFIG_MULTI_RES_ENCODING
718 // |parent_ref_valid| will be set here if potentially we can do mv resue for
719 // this higher resol (|cpi->oxcf.mr_encoder_id| > 0) frame.
720 // |parent_ref_valid| may be reset depending on |parent_ref_frame| for
721 // the current macroblock below.
722 parent_ref_valid = cpi->oxcf.mr_encoder_id && cpi->mr_low_res_mv_avail;
723 if (parent_ref_valid) {
726 get_lower_res_motion_info(cpi, xd, &dissim, &parent_ref_frame, &parent_mode,
727 &parent_ref_mv, mb_row, mb_col);
729 /* TODO(jkoleszar): The references available (ref_frame_flags) to the
730 * lower res encoder should match those available to this encoder, but
731 * there seems to be a situation where this mismatch can happen in the
732 * case of frame dropping and temporal layers. For example,
733 * GOLD being disallowed in ref_frame_flags, but being returned as
736 * In this event, take the conservative approach of disabling the
737 * lower res info for this MB.
741 // Note availability for mv reuse is only based on last and golden.
742 if (parent_ref_frame == LAST_FRAME)
743 parent_ref_flag = (cpi->ref_frame_flags & VP8_LAST_FRAME);
744 else if (parent_ref_frame == GOLDEN_FRAME)
745 parent_ref_flag = (cpi->ref_frame_flags & VP8_GOLD_FRAME);
747 // assert(!parent_ref_frame || parent_ref_flag);
749 // If |parent_ref_frame| did not match either last or golden then
750 // shut off mv reuse.
751 if (parent_ref_frame && !parent_ref_flag) parent_ref_valid = 0;
753 // Don't do mv reuse since we want to allow for another mode besides
754 // ZEROMV_LAST to remove dot artifact.
755 if (dot_artifact_candidate) parent_ref_valid = 0;
759 // Check if current macroblock is in skin area.
761 const int y = (x->src.y_buffer[7 * x->src.y_stride + 7] +
762 x->src.y_buffer[7 * x->src.y_stride + 8] +
763 x->src.y_buffer[8 * x->src.y_stride + 7] +
764 x->src.y_buffer[8 * x->src.y_stride + 8]) >>
766 const int cb = (x->src.u_buffer[3 * x->src.uv_stride + 3] +
767 x->src.u_buffer[3 * x->src.uv_stride + 4] +
768 x->src.u_buffer[4 * x->src.uv_stride + 3] +
769 x->src.u_buffer[4 * x->src.uv_stride + 4]) >>
771 const int cr = (x->src.v_buffer[3 * x->src.uv_stride + 3] +
772 x->src.v_buffer[3 * x->src.uv_stride + 4] +
773 x->src.v_buffer[4 * x->src.uv_stride + 3] +
774 x->src.v_buffer[4 * x->src.uv_stride + 4]) >>
777 if (!cpi->oxcf.screen_content_mode) {
778 int block_index = mb_row * cpi->common.mb_cols + mb_col;
779 x->is_skin = is_skin_color(y, cb, cr, cpi->consec_zero_last[block_index]);
782 #if CONFIG_TEMPORAL_DENOISING
783 if (cpi->oxcf.noise_sensitivity) {
784 // Under aggressive denoising mode, should we use skin map to reduce
786 // and ZEROMV bias? Will need to revisit the accuracy of this detection for
787 // very noisy input. For now keep this as is (i.e., don't turn it off).
788 // if (cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive)
793 mode_mv = mode_mv_sb[sign_bias];
794 best_ref_mv.as_int = 0;
795 memset(mode_mv_sb, 0, sizeof(mode_mv_sb));
796 memset(&best_mbmode, 0, sizeof(best_mbmode));
798 /* Setup search priorities */
799 #if CONFIG_MULTI_RES_ENCODING
800 if (parent_ref_valid && parent_ref_frame && dissim < 8) {
801 ref_frame_map[0] = -1;
802 ref_frame_map[1] = parent_ref_frame;
803 ref_frame_map[2] = -1;
804 ref_frame_map[3] = -1;
807 get_reference_search_order(cpi, ref_frame_map);
809 /* Check to see if there is at least 1 valid reference frame that we need
810 * to calculate near_mvs.
812 if (ref_frame_map[1] > 0) {
813 sign_bias = vp8_find_near_mvs_bias(
814 &x->e_mbd, x->e_mbd.mode_info_context, mode_mv_sb, best_ref_mv_sb,
815 mdcounts, ref_frame_map[1], cpi->common.ref_frame_sign_bias);
817 mode_mv = mode_mv_sb[sign_bias];
818 best_ref_mv.as_int = best_ref_mv_sb[sign_bias].as_int;
821 /* Count of the number of MBs tested so far this frame */
822 x->mbs_tested_so_far++;
824 *returnintra = INT_MAX;
827 x->e_mbd.mode_info_context->mbmi.ref_frame = INTRA_FRAME;
829 /* If the frame has big static background and current MB is in low
830 * motion area, its mode decision is biased to ZEROMV mode.
831 * No adjustment if cpu_used is <= -12 (i.e., cpi->Speed >= 12).
832 * At such speed settings, ZEROMV is already heavily favored.
834 if (cpi->Speed < 12) {
835 calculate_zeromv_rd_adjustment(cpi, x, &rd_adjustment);
838 #if CONFIG_TEMPORAL_DENOISING
839 if (cpi->oxcf.noise_sensitivity) {
840 rd_adjustment = (int)(rd_adjustment *
841 cpi->denoiser.denoise_pars.pickmode_mv_bias / 100);
845 if (dot_artifact_candidate) {
846 // Bias against ZEROMV_LAST mode.
850 /* if we encode a new mv this is important
851 * find the best new motion vector
853 for (mode_index = 0; mode_index < MAX_MODES; ++mode_index) {
855 int this_rd = INT_MAX;
856 int this_ref_frame = ref_frame_map[vp8_ref_frame_order[mode_index]];
858 if (best_rd <= x->rd_threshes[mode_index]) continue;
860 if (this_ref_frame < 0) continue;
862 x->e_mbd.mode_info_context->mbmi.ref_frame = this_ref_frame;
864 /* everything but intra */
865 if (x->e_mbd.mode_info_context->mbmi.ref_frame) {
866 x->e_mbd.pre.y_buffer = plane[this_ref_frame][0];
867 x->e_mbd.pre.u_buffer = plane[this_ref_frame][1];
868 x->e_mbd.pre.v_buffer = plane[this_ref_frame][2];
870 if (sign_bias != cpi->common.ref_frame_sign_bias[this_ref_frame]) {
871 sign_bias = cpi->common.ref_frame_sign_bias[this_ref_frame];
872 mode_mv = mode_mv_sb[sign_bias];
873 best_ref_mv.as_int = best_ref_mv_sb[sign_bias].as_int;
876 #if CONFIG_MULTI_RES_ENCODING
877 if (parent_ref_valid) {
878 if (vp8_mode_order[mode_index] == NEARESTMV &&
879 mode_mv[NEARESTMV].as_int == 0)
881 if (vp8_mode_order[mode_index] == NEARMV && mode_mv[NEARMV].as_int == 0)
884 if (vp8_mode_order[mode_index] == NEWMV && parent_mode == ZEROMV &&
885 best_ref_mv.as_int == 0)
887 else if (vp8_mode_order[mode_index] == NEWMV && dissim == 0 &&
888 best_ref_mv.as_int == parent_ref_mv.as_int)
894 /* Check to see if the testing frequency for this mode is at its max
895 * If so then prevent it from being tested and increase the threshold
897 if (x->mode_test_hit_counts[mode_index] &&
898 (cpi->mode_check_freq[mode_index] > 1)) {
899 if (x->mbs_tested_so_far <= (cpi->mode_check_freq[mode_index] *
900 x->mode_test_hit_counts[mode_index])) {
901 /* Increase the threshold for coding this mode to make it less
902 * likely to be chosen */
903 x->rd_thresh_mult[mode_index] += 4;
905 if (x->rd_thresh_mult[mode_index] > MAX_THRESHMULT) {
906 x->rd_thresh_mult[mode_index] = MAX_THRESHMULT;
909 x->rd_threshes[mode_index] =
910 (cpi->rd_baseline_thresh[mode_index] >> 7) *
911 x->rd_thresh_mult[mode_index];
916 /* We have now reached the point where we are going to test the current
917 * mode so increment the counter for the number of times it has been
919 x->mode_test_hit_counts[mode_index]++;
924 this_mode = vp8_mode_order[mode_index];
926 x->e_mbd.mode_info_context->mbmi.mode = this_mode;
927 x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED;
929 /* Work out the cost assosciated with selecting the reference frame */
930 frame_cost = x->ref_frame_cost[x->e_mbd.mode_info_context->mbmi.ref_frame];
933 /* Only consider ZEROMV/ALTREF_FRAME for alt ref frame,
934 * unless ARNR filtering is enabled in which case we want
935 * an unfiltered alternative */
936 if (cpi->is_src_frame_alt_ref && (cpi->oxcf.arnr_max_frames == 0)) {
937 if (this_mode != ZEROMV ||
938 x->e_mbd.mode_info_context->mbmi.ref_frame != ALTREF_FRAME) {
945 /* Pass best so far to pick_intra4x4mby_modes to use as breakout */
946 distortion2 = best_rd_sse;
947 pick_intra4x4mby_modes(x, &rate, &distortion2);
949 if (distortion2 == INT_MAX) {
953 distortion2 = vpx_variance16x16(*(b->base_src), b->src_stride,
954 x->e_mbd.predictor, 16, &sse);
955 this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
957 if (this_rd < best_intra_rd) {
958 best_intra_rd = this_rd;
959 *returnintra = distortion2;
967 /* Split MV modes currently not supported when RD is not enabled. */
974 vp8_build_intra_predictors_mby_s(
975 xd, xd->dst.y_buffer - xd->dst.y_stride, xd->dst.y_buffer - 1,
976 xd->dst.y_stride, xd->predictor, 16);
977 distortion2 = vpx_variance16x16(*(b->base_src), b->src_stride,
978 x->e_mbd.predictor, 16, &sse);
979 rate2 += x->mbmode_cost[x->e_mbd.frame_type][x->e_mbd.mode_info_context
981 this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
983 if (this_rd < best_intra_rd) {
984 best_intra_rd = this_rd;
985 *returnintra = distortion2;
994 int sadpb = x->sadperbit16;
997 int col_min = ((best_ref_mv.as_mv.col + 7) >> 3) - MAX_FULL_PEL_VAL;
998 int row_min = ((best_ref_mv.as_mv.row + 7) >> 3) - MAX_FULL_PEL_VAL;
999 int col_max = (best_ref_mv.as_mv.col >> 3) + MAX_FULL_PEL_VAL;
1000 int row_max = (best_ref_mv.as_mv.row >> 3) + MAX_FULL_PEL_VAL;
1002 int tmp_col_min = x->mv_col_min;
1003 int tmp_col_max = x->mv_col_max;
1004 int tmp_row_min = x->mv_row_min;
1005 int tmp_row_max = x->mv_row_max;
1007 int speed_adjust = (cpi->Speed > 5) ? ((cpi->Speed >= 8) ? 3 : 2) : 1;
1009 /* Further step/diamond searches as necessary */
1010 step_param = cpi->sf.first_step + speed_adjust;
1012 #if CONFIG_MULTI_RES_ENCODING
1013 /* If lower-res frame is not available for mv reuse (because of
1014 frame dropping or different temporal layer pattern), then higher
1015 resol encoder does motion search without any previous knowledge.
1016 Also, since last frame motion info is not stored, then we can not
1017 use improved_mv_pred. */
1018 if (cpi->oxcf.mr_encoder_id) sf_improved_mv_pred = 0;
1020 // Only use parent MV as predictor if this candidate reference frame
1021 // (|this_ref_frame|) is equal to |parent_ref_frame|.
1022 if (parent_ref_valid && (parent_ref_frame == this_ref_frame)) {
1023 /* Use parent MV as predictor. Adjust search range
1026 mvp.as_int = parent_ref_mv.as_int;
1027 mvp_full.as_mv.col = parent_ref_mv.as_mv.col >> 3;
1028 mvp_full.as_mv.row = parent_ref_mv.as_mv.row >> 3;
1032 else if (dissim <= 128)
1039 if (sf_improved_mv_pred) {
1041 vp8_cal_sad(cpi, xd, x, recon_yoffset, &near_sadidx[0]);
1045 vp8_mv_pred(cpi, &x->e_mbd, x->e_mbd.mode_info_context, &mvp,
1046 x->e_mbd.mode_info_context->mbmi.ref_frame,
1047 cpi->common.ref_frame_sign_bias, &sr, &near_sadidx[0]);
1050 /* adjust search range according to sr from mv prediction */
1051 if (sr > step_param) step_param = sr;
1053 mvp_full.as_mv.col = mvp.as_mv.col >> 3;
1054 mvp_full.as_mv.row = mvp.as_mv.row >> 3;
1056 mvp.as_int = best_ref_mv.as_int;
1057 mvp_full.as_mv.col = best_ref_mv.as_mv.col >> 3;
1058 mvp_full.as_mv.row = best_ref_mv.as_mv.row >> 3;
1062 #if CONFIG_MULTI_RES_ENCODING
1063 if (parent_ref_valid && (parent_ref_frame == this_ref_frame) &&
1065 VPXMAX(abs(best_ref_mv.as_mv.row - parent_ref_mv.as_mv.row),
1066 abs(best_ref_mv.as_mv.col - parent_ref_mv.as_mv.col)) <= 4) {
1067 d->bmi.mv.as_int = mvp_full.as_int;
1068 mode_mv[NEWMV].as_int = mvp_full.as_int;
1070 cpi->find_fractional_mv_step(
1071 x, b, d, &d->bmi.mv, &best_ref_mv, x->errorperbit,
1072 &cpi->fn_ptr[BLOCK_16X16], cpi->mb.mvcost, &distortion2, &sse);
1076 /* Get intersection of UMV window and valid MV window to
1077 * reduce # of checks in diamond search. */
1078 if (x->mv_col_min < col_min) x->mv_col_min = col_min;
1079 if (x->mv_col_max > col_max) x->mv_col_max = col_max;
1080 if (x->mv_row_min < row_min) x->mv_row_min = row_min;
1081 if (x->mv_row_max > row_max) x->mv_row_max = row_max;
1086 : (cpi->sf.max_step_search_steps - 1 - step_param);
1088 if (cpi->sf.search_method == HEX) {
1089 #if CONFIG_MULTI_RES_ENCODING
1090 /* TODO: In higher-res pick_inter_mode, step_param is used to
1091 * modify hex search range. Here, set step_param to 0 not to
1092 * change the behavior in lowest-resolution encoder.
1093 * Will improve it later.
1095 /* Set step_param to 0 to ensure large-range motion search
1096 * when mv reuse if not valid (i.e. |parent_ref_valid| = 0),
1097 * or if this candidate reference frame (|this_ref_frame|) is
1098 * not equal to |parent_ref_frame|.
1100 if (!parent_ref_valid || (parent_ref_frame != this_ref_frame))
1103 bestsme = vp8_hex_search(x, b, d, &mvp_full, &d->bmi.mv, step_param,
1104 sadpb, &cpi->fn_ptr[BLOCK_16X16],
1105 x->mvsadcost, x->mvcost, &best_ref_mv);
1106 mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
1108 bestsme = cpi->diamond_search_sad(
1109 x, b, d, &mvp_full, &d->bmi.mv, step_param, sadpb, &num00,
1110 &cpi->fn_ptr[BLOCK_16X16], x->mvcost, &best_ref_mv);
1111 mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
1113 /* Further step/diamond searches as necessary */
1117 while (n < further_steps) {
1123 thissme = cpi->diamond_search_sad(
1124 x, b, d, &mvp_full, &d->bmi.mv, step_param + n, sadpb,
1125 &num00, &cpi->fn_ptr[BLOCK_16X16], x->mvcost, &best_ref_mv);
1126 if (thissme < bestsme) {
1128 mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
1130 d->bmi.mv.as_int = mode_mv[NEWMV].as_int;
1136 x->mv_col_min = tmp_col_min;
1137 x->mv_col_max = tmp_col_max;
1138 x->mv_row_min = tmp_row_min;
1139 x->mv_row_max = tmp_row_max;
1141 if (bestsme < INT_MAX) {
1142 cpi->find_fractional_mv_step(
1143 x, b, d, &d->bmi.mv, &best_ref_mv, x->errorperbit,
1144 &cpi->fn_ptr[BLOCK_16X16], cpi->mb.mvcost, &distortion2, &sse);
1148 mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
1149 // The clamp below is not necessary from the perspective
1150 // of VP8 bitstream, but is added to improve ChromeCast
1151 // mirroring's robustness. Please do not remove.
1152 vp8_clamp_mv2(&mode_mv[this_mode], xd);
1155 vp8_mv_bit_cost(&mode_mv[NEWMV], &best_ref_mv, cpi->mb.mvcost, 128);
1160 if (mode_mv[this_mode].as_int == 0) continue;
1164 /* Trap vectors that reach beyond the UMV borders
1165 * Note that ALL New MV, Nearest MV Near MV and Zero MV code drops
1166 * through to this point because of the lack of break statements
1167 * in the previous two cases.
1169 if (((mode_mv[this_mode].as_mv.row >> 3) < x->mv_row_min) ||
1170 ((mode_mv[this_mode].as_mv.row >> 3) > x->mv_row_max) ||
1171 ((mode_mv[this_mode].as_mv.col >> 3) < x->mv_col_min) ||
1172 ((mode_mv[this_mode].as_mv.col >> 3) > x->mv_col_max)) {
1176 rate2 += vp8_cost_mv_ref(this_mode, mdcounts);
1177 x->e_mbd.mode_info_context->mbmi.mv.as_int = mode_mv[this_mode].as_int;
1178 this_rd = evaluate_inter_mode(&sse, rate2, &distortion2, cpi, x,
1185 #if CONFIG_TEMPORAL_DENOISING
1186 if (cpi->oxcf.noise_sensitivity) {
1187 /* Store for later use by denoiser. */
1188 // Dont' denoise with GOLDEN OR ALTREF is they are old reference
1189 // frames (greater than MAX_GF_ARF_DENOISE_RANGE frames in past).
1190 int skip_old_reference = ((this_ref_frame != LAST_FRAME) &&
1191 (cpi->common.current_video_frame -
1192 cpi->current_ref_frames[this_ref_frame] >
1193 MAX_GF_ARF_DENOISE_RANGE))
1196 if (this_mode == ZEROMV && sse < zero_mv_sse && !skip_old_reference) {
1198 x->best_zeromv_reference_frame =
1199 x->e_mbd.mode_info_context->mbmi.ref_frame;
1202 // Store the best NEWMV in x for later use in the denoiser.
1203 if (x->e_mbd.mode_info_context->mbmi.mode == NEWMV && sse < best_sse &&
1204 !skip_old_reference) {
1206 x->best_sse_inter_mode = NEWMV;
1207 x->best_sse_mv = x->e_mbd.mode_info_context->mbmi.mv;
1208 x->need_to_clamp_best_mvs =
1209 x->e_mbd.mode_info_context->mbmi.need_to_clamp_mvs;
1210 x->best_reference_frame = x->e_mbd.mode_info_context->mbmi.ref_frame;
1215 if (this_rd < best_rd || x->skip) {
1216 /* Note index of best mode */
1217 best_mode_index = mode_index;
1219 *returnrate = rate2;
1220 *returndistortion = distortion2;
1223 memcpy(&best_mbmode, &x->e_mbd.mode_info_context->mbmi,
1224 sizeof(MB_MODE_INFO));
1226 /* Testing this mode gave rise to an improvement in best error
1227 * score. Lower threshold a bit for next time
1229 x->rd_thresh_mult[mode_index] =
1230 (x->rd_thresh_mult[mode_index] >= (MIN_THRESHMULT + 2))
1231 ? x->rd_thresh_mult[mode_index] - 2
1233 x->rd_threshes[mode_index] = (cpi->rd_baseline_thresh[mode_index] >> 7) *
1234 x->rd_thresh_mult[mode_index];
1237 /* If the mode did not help improve the best error case then raise the
1238 * threshold for testing that mode next time around.
1241 x->rd_thresh_mult[mode_index] += 4;
1243 if (x->rd_thresh_mult[mode_index] > MAX_THRESHMULT) {
1244 x->rd_thresh_mult[mode_index] = MAX_THRESHMULT;
1247 x->rd_threshes[mode_index] = (cpi->rd_baseline_thresh[mode_index] >> 7) *
1248 x->rd_thresh_mult[mode_index];
1254 /* Reduce the activation RD thresholds for the best choice mode */
1255 if ((cpi->rd_baseline_thresh[best_mode_index] > 0) &&
1256 (cpi->rd_baseline_thresh[best_mode_index] < (INT_MAX >> 2))) {
1257 int best_adjustment = (x->rd_thresh_mult[best_mode_index] >> 3);
1259 x->rd_thresh_mult[best_mode_index] =
1260 (x->rd_thresh_mult[best_mode_index] >=
1261 (MIN_THRESHMULT + best_adjustment))
1262 ? x->rd_thresh_mult[best_mode_index] - best_adjustment
1264 x->rd_threshes[best_mode_index] =
1265 (cpi->rd_baseline_thresh[best_mode_index] >> 7) *
1266 x->rd_thresh_mult[best_mode_index];
1270 int this_rdbin = (*returndistortion >> 7);
1272 if (this_rdbin >= 1024) {
1276 x->error_bins[this_rdbin]++;
1279 #if CONFIG_TEMPORAL_DENOISING
1280 if (cpi->oxcf.noise_sensitivity) {
1281 int block_index = mb_row * cpi->common.mb_cols + mb_col;
1284 if (x->best_sse_inter_mode == DC_PRED) {
1285 /* No best MV found. */
1286 x->best_sse_inter_mode = best_mbmode.mode;
1287 x->best_sse_mv = best_mbmode.mv;
1288 x->need_to_clamp_best_mvs = best_mbmode.need_to_clamp_mvs;
1289 x->best_reference_frame = best_mbmode.ref_frame;
1290 best_sse = best_rd_sse;
1292 // For non-skin blocks that have selected ZEROMV for this current frame,
1293 // and have been selecting ZEROMV_LAST (on the base layer frame) at
1294 // least |x~20| consecutive past frames in a row, label the block for
1295 // possible increase in denoising strength. We also condition this
1296 // labeling on there being significant denoising in the scene
1297 if (cpi->oxcf.noise_sensitivity == 4) {
1298 if (cpi->denoiser.nmse_source_diff >
1299 70 * cpi->denoiser.threshold_aggressive_mode / 100) {
1303 if (cpi->mse_source_denoised > 1000) is_noisy = 1;
1305 x->increase_denoising = 0;
1306 if (!x->is_skin && x->best_sse_inter_mode == ZEROMV &&
1307 (x->best_reference_frame == LAST_FRAME ||
1308 x->best_reference_frame == cpi->closest_reference_frame) &&
1309 cpi->consec_zero_last[block_index] >= 20 && is_noisy) {
1310 x->increase_denoising = 1;
1312 x->denoise_zeromv = 0;
1313 vp8_denoiser_denoise_mb(&cpi->denoiser, x, best_sse, zero_mv_sse,
1314 recon_yoffset, recon_uvoffset, &cpi->common.lf_info,
1315 mb_row, mb_col, block_index,
1316 cpi->consec_zero_last_mvbias[block_index]);
1318 // Reevaluate ZEROMV after denoising: for large noise content
1319 // (i.e., cpi->mse_source_denoised is above threshold), do this for all
1320 // blocks that did not pick ZEROMV as best mode but are using ZEROMV
1321 // for denoising. Otherwise, always re-evaluate for blocks that picked
1322 // INTRA mode as best mode.
1323 // Avoid blocks that have been biased against ZERO_LAST
1324 // (i.e., dot artifact candidate blocks).
1325 reevaluate = (best_mbmode.ref_frame == INTRA_FRAME) ||
1326 (best_mbmode.mode != ZEROMV && x->denoise_zeromv &&
1327 cpi->mse_source_denoised > 2000);
1328 if (!dot_artifact_candidate && reevaluate &&
1329 x->best_zeromv_reference_frame != INTRA_FRAME) {
1331 int this_ref_frame = x->best_zeromv_reference_frame;
1332 rd_adjustment = 100;
1334 x->ref_frame_cost[this_ref_frame] + vp8_cost_mv_ref(ZEROMV, mdcounts);
1337 /* set up the proper prediction buffers for the frame */
1338 x->e_mbd.mode_info_context->mbmi.ref_frame = this_ref_frame;
1339 x->e_mbd.pre.y_buffer = plane[this_ref_frame][0];
1340 x->e_mbd.pre.u_buffer = plane[this_ref_frame][1];
1341 x->e_mbd.pre.v_buffer = plane[this_ref_frame][2];
1343 x->e_mbd.mode_info_context->mbmi.mode = ZEROMV;
1344 x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED;
1345 x->e_mbd.mode_info_context->mbmi.mv.as_int = 0;
1347 evaluate_inter_mode(&sse, rate2, &distortion2, cpi, x, rd_adjustment);
1349 if (this_rd < best_rd) {
1350 memcpy(&best_mbmode, &x->e_mbd.mode_info_context->mbmi,
1351 sizeof(MB_MODE_INFO));
1357 if (cpi->is_src_frame_alt_ref &&
1358 (best_mbmode.mode != ZEROMV || best_mbmode.ref_frame != ALTREF_FRAME)) {
1359 x->e_mbd.mode_info_context->mbmi.mode = ZEROMV;
1360 x->e_mbd.mode_info_context->mbmi.ref_frame = ALTREF_FRAME;
1361 x->e_mbd.mode_info_context->mbmi.mv.as_int = 0;
1362 x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED;
1363 x->e_mbd.mode_info_context->mbmi.mb_skip_coeff =
1364 (cpi->common.mb_no_coeff_skip);
1365 x->e_mbd.mode_info_context->mbmi.partitioning = 0;
1370 /* set to the best mb mode, this copy can be skip if x->skip since it
1371 * already has the right content */
1373 memcpy(&x->e_mbd.mode_info_context->mbmi, &best_mbmode,
1374 sizeof(MB_MODE_INFO));
1377 if (best_mbmode.mode <= B_PRED) {
1378 /* set mode_info_context->mbmi.uv_mode */
1379 pick_intra_mbuv_mode(x);
1383 cpi->common.ref_frame_sign_bias[xd->mode_info_context->mbmi.ref_frame]) {
1384 best_ref_mv.as_int = best_ref_mv_sb[!sign_bias].as_int;
1387 update_mvcount(x, &best_ref_mv);
1390 void vp8_pick_intra_mode(MACROBLOCK *x, int *rate_) {
1391 int error4x4, error16x16 = INT_MAX;
1392 int rate, best_rate = 0, distortion, best_sse;
1393 MB_PREDICTION_MODE mode, best_mode = DC_PRED;
1396 BLOCK *b = &x->block[0];
1397 MACROBLOCKD *xd = &x->e_mbd;
1399 xd->mode_info_context->mbmi.ref_frame = INTRA_FRAME;
1401 pick_intra_mbuv_mode(x);
1403 for (mode = DC_PRED; mode <= TM_PRED; ++mode) {
1404 xd->mode_info_context->mbmi.mode = mode;
1405 vp8_build_intra_predictors_mby_s(xd, xd->dst.y_buffer - xd->dst.y_stride,
1406 xd->dst.y_buffer - 1, xd->dst.y_stride,
1408 distortion = vpx_variance16x16(*(b->base_src), b->src_stride, xd->predictor,
1410 rate = x->mbmode_cost[xd->frame_type][mode];
1411 this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
1413 if (error16x16 > this_rd) {
1414 error16x16 = this_rd;
1420 xd->mode_info_context->mbmi.mode = best_mode;
1422 error4x4 = pick_intra4x4mby_modes(x, &rate, &best_sse);
1423 if (error4x4 < error16x16) {
1424 xd->mode_info_context->mbmi.mode = B_PRED;