2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
13 #include "vpx_config.h"
14 #include "./vpx_dsp_rtcd.h"
16 #include "modecosts.h"
17 #include "encodeintra.h"
18 #include "vp8/common/common.h"
19 #include "vp8/common/entropymode.h"
20 #include "pickinter.h"
21 #include "vp8/common/findnearmv.h"
23 #include "vp8/common/reconinter.h"
24 #include "vp8/common/reconintra.h"
25 #include "vp8/common/reconintra4x4.h"
26 #include "vpx_dsp/variance.h"
29 #include "vpx_dsp/vpx_dsp_common.h"
30 #include "vpx_mem/vpx_mem.h"
31 #if CONFIG_TEMPORAL_DENOISING
32 #include "denoising.h"
36 extern unsigned int cnt_pm;
41 extern const int vp8_ref_frame_order[MAX_MODES];
42 extern const MB_PREDICTION_MODE vp8_mode_order[MAX_MODES];
44 // Fixed point implementation of a skin color classifier. Skin color
45 // is model by a Gaussian distribution in the CbCr color space.
46 // See ../../test/skin_color_detector_test.cc where the reference
47 // skin color classifier is defined.
49 // Fixed-point skin color model parameters.
50 static const int skin_mean[5][2] = { { 7463, 9614 },
55 static const int skin_inv_cov[4] = { 4107, 1663, 1663, 2157 }; // q16
56 static const int skin_threshold[6] = { 1570636, 1400000, 800000,
57 800000, 800000, 800000 }; // q18
59 // Evaluates the Mahalanobis distance measure for the input CbCr values.
60 static int evaluate_skin_color_difference(int cb, int cr, int idx) {
61 const int cb_q6 = cb << 6;
62 const int cr_q6 = cr << 6;
63 const int cb_diff_q12 =
64 (cb_q6 - skin_mean[idx][0]) * (cb_q6 - skin_mean[idx][0]);
65 const int cbcr_diff_q12 =
66 (cb_q6 - skin_mean[idx][0]) * (cr_q6 - skin_mean[idx][1]);
67 const int cr_diff_q12 =
68 (cr_q6 - skin_mean[idx][1]) * (cr_q6 - skin_mean[idx][1]);
69 const int cb_diff_q2 = (cb_diff_q12 + (1 << 9)) >> 10;
70 const int cbcr_diff_q2 = (cbcr_diff_q12 + (1 << 9)) >> 10;
71 const int cr_diff_q2 = (cr_diff_q12 + (1 << 9)) >> 10;
73 skin_inv_cov[0] * cb_diff_q2 + skin_inv_cov[1] * cbcr_diff_q2 +
74 skin_inv_cov[2] * cbcr_diff_q2 + skin_inv_cov[3] * cr_diff_q2;
78 // Checks if the input yCbCr values corresponds to skin color.
79 static int is_skin_color(int y, int cb, int cr, int consec_zeromv) {
80 if (y < 40 || y > 220) {
83 if (MODEL_MODE == 0) {
84 return (evaluate_skin_color_difference(cb, cr, 0) < skin_threshold[0]);
87 // No skin if block has been zero motion for long consecutive time.
88 if (consec_zeromv > 60) return 0;
90 if (cb == 128 && cr == 128) return 0;
91 // Exit on very strong cb.
92 if (cb > 150 && cr < 110) return 0;
94 int skin_color_diff = evaluate_skin_color_difference(cb, cr, i);
95 if (skin_color_diff < skin_threshold[i + 1]) {
96 if (y < 60 && skin_color_diff > 3 * (skin_threshold[i + 1] >> 2)) {
98 } else if (consec_zeromv > 25 &&
99 skin_color_diff > (skin_threshold[i + 1] >> 1)) {
105 // Exit if difference is much large than the threshold.
106 if (skin_color_diff > (skin_threshold[i + 1] << 3)) {
115 static int macroblock_corner_grad(unsigned char *signal, int stride,
116 int offsetx, int offsety, int sgnx,
118 int y1 = signal[offsetx * stride + offsety];
119 int y2 = signal[offsetx * stride + offsety + sgny];
120 int y3 = signal[(offsetx + sgnx) * stride + offsety];
121 int y4 = signal[(offsetx + sgnx) * stride + offsety + sgny];
122 return VPXMAX(VPXMAX(abs(y1 - y2), abs(y1 - y3)), abs(y1 - y4));
125 static int check_dot_artifact_candidate(VP8_COMP *cpi, MACROBLOCK *x,
126 unsigned char *target_last, int stride,
127 unsigned char *last_ref, int mb_row,
128 int mb_col, int channel) {
131 unsigned int max_num = (cpi->common.MBs) / 10;
134 int index = mb_row * cpi->common.mb_cols + mb_col;
135 // Threshold for #consecutive (base layer) frames using zero_last mode.
141 if (cpi->oxcf.number_of_layers > 1) {
144 x->zero_last_dot_suppress = 0;
145 // Blocks on base layer frames that have been using ZEROMV_LAST repeatedly
146 // (i.e, at least |x| consecutive frames are candidates for increasing the
147 // rd adjustment for zero_last mode.
148 // Only allow this for at most |max_num| blocks per frame.
149 // Don't allow this for screen content input.
150 if (cpi->current_layer == 0 &&
151 cpi->consec_zero_last_mvbias[index] > num_frames &&
152 x->mbs_zero_last_dot_suppress < max_num &&
153 !cpi->oxcf.screen_content_mode) {
154 // If this block is checked here, label it so we don't check it again until
155 // ~|x| framaes later.
156 x->zero_last_dot_suppress = 1;
157 // Dot artifact is noticeable as strong gradient at corners of macroblock,
158 // for flat areas. As a simple detector for now, we look for a high
159 // corner gradient on last ref, and a smaller gradient on source.
160 // Check 4 corners, return if any satisfy condition.
162 grad_last = macroblock_corner_grad(last_ref, stride, 0, 0, 1, 1);
163 grad_source = macroblock_corner_grad(target_last, stride, 0, 0, 1, 1);
164 if (grad_last >= threshold1 && grad_source <= threshold2) {
165 x->mbs_zero_last_dot_suppress++;
169 grad_last = macroblock_corner_grad(last_ref, stride, 0, shift, 1, -1);
170 grad_source = macroblock_corner_grad(target_last, stride, 0, shift, 1, -1);
171 if (grad_last >= threshold1 && grad_source <= threshold2) {
172 x->mbs_zero_last_dot_suppress++;
176 grad_last = macroblock_corner_grad(last_ref, stride, shift, 0, -1, 1);
177 grad_source = macroblock_corner_grad(target_last, stride, shift, 0, -1, 1);
178 if (grad_last >= threshold1 && grad_source <= threshold2) {
179 x->mbs_zero_last_dot_suppress++;
183 grad_last = macroblock_corner_grad(last_ref, stride, shift, shift, -1, -1);
185 macroblock_corner_grad(target_last, stride, shift, shift, -1, -1);
186 if (grad_last >= threshold1 && grad_source <= threshold2) {
187 x->mbs_zero_last_dot_suppress++;
195 int vp8_skip_fractional_mv_step(MACROBLOCK *mb, BLOCK *b, BLOCKD *d,
196 int_mv *bestmv, int_mv *ref_mv,
198 const vp8_variance_fn_ptr_t *vfp,
199 int *mvcost[2], int *distortion,
210 bestmv->as_mv.row *= 8;
211 bestmv->as_mv.col *= 8;
215 int vp8_get_inter_mbpred_error(MACROBLOCK *mb, const vp8_variance_fn_ptr_t *vfp,
216 unsigned int *sse, int_mv this_mv) {
217 BLOCK *b = &mb->block[0];
218 BLOCKD *d = &mb->e_mbd.block[0];
219 unsigned char *what = (*(b->base_src) + b->src);
220 int what_stride = b->src_stride;
221 int pre_stride = mb->e_mbd.pre.y_stride;
222 unsigned char *in_what = mb->e_mbd.pre.y_buffer + d->offset;
223 int in_what_stride = pre_stride;
224 int xoffset = this_mv.as_mv.col & 7;
225 int yoffset = this_mv.as_mv.row & 7;
227 in_what += (this_mv.as_mv.row >> 3) * pre_stride + (this_mv.as_mv.col >> 3);
229 if (xoffset | yoffset) {
230 return vfp->svf(in_what, in_what_stride, xoffset, yoffset, what,
233 return vfp->vf(what, what_stride, in_what, in_what_stride, sse);
237 static int get_prediction_error(BLOCK *be, BLOCKD *b) {
240 sptr = (*(be->base_src) + be->src);
243 return vpx_get4x4sse_cs(sptr, be->src_stride, dptr, 16);
246 static int pick_intra4x4block(MACROBLOCK *x, int ib,
247 B_PREDICTION_MODE *best_mode,
248 const int *mode_costs,
250 int *bestrate, int *bestdistortion) {
251 BLOCKD *b = &x->e_mbd.block[ib];
252 BLOCK *be = &x->block[ib];
253 int dst_stride = x->e_mbd.dst.y_stride;
254 unsigned char *dst = x->e_mbd.dst.y_buffer + b->offset;
255 B_PREDICTION_MODE mode;
256 int best_rd = INT_MAX;
260 unsigned char *Above = dst - dst_stride;
261 unsigned char *yleft = dst - 1;
262 unsigned char top_left = Above[-1];
264 for (mode = B_DC_PRED; mode <= B_HE_PRED; ++mode) {
267 rate = mode_costs[mode];
269 vp8_intra4x4_predict(Above, yleft, dst_stride, mode, b->predictor, 16,
271 distortion = get_prediction_error(be, b);
272 this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
274 if (this_rd < best_rd) {
276 *bestdistortion = distortion;
282 b->bmi.as_mode = *best_mode;
283 vp8_encode_intra4x4block(x, ib);
287 static int pick_intra4x4mby_modes(MACROBLOCK *mb, int *Rate, int *best_dist) {
288 MACROBLOCKD *const xd = &mb->e_mbd;
290 int cost = mb->mbmode_cost[xd->frame_type][B_PRED];
293 const int *bmode_costs;
295 intra_prediction_down_copy(xd, xd->dst.y_buffer - xd->dst.y_stride + 16);
297 bmode_costs = mb->inter_bmode_costs;
299 for (i = 0; i < 16; ++i) {
300 MODE_INFO *const mic = xd->mode_info_context;
301 const int mis = xd->mode_info_stride;
303 B_PREDICTION_MODE best_mode = B_MODE_COUNT;
306 if (mb->e_mbd.frame_type == KEY_FRAME) {
307 const B_PREDICTION_MODE A = above_block_mode(mic, i, mis);
308 const B_PREDICTION_MODE L = left_block_mode(mic, i);
310 bmode_costs = mb->bmode_costs[A][L];
313 pick_intra4x4block(mb, i, &best_mode, bmode_costs, &r, &d);
317 assert(best_mode != B_MODE_COUNT);
318 mic->bmi[i].as_mode = best_mode;
320 /* Break out case where we have already exceeded best so far value
323 if (distortion > *best_dist) break;
329 *best_dist = distortion;
330 error = RDCOST(mb->rdmult, mb->rddiv, cost, distortion);
332 *best_dist = INT_MAX;
339 static void pick_intra_mbuv_mode(MACROBLOCK *mb) {
340 MACROBLOCKD *x = &mb->e_mbd;
341 unsigned char *uabove_row = x->dst.u_buffer - x->dst.uv_stride;
342 unsigned char *vabove_row = x->dst.v_buffer - x->dst.uv_stride;
343 unsigned char *usrc_ptr = (mb->block[16].src + *mb->block[16].base_src);
344 unsigned char *vsrc_ptr = (mb->block[20].src + *mb->block[20].base_src);
345 int uvsrc_stride = mb->block[16].src_stride;
346 unsigned char uleft_col[8];
347 unsigned char vleft_col[8];
348 unsigned char utop_left = uabove_row[-1];
349 unsigned char vtop_left = vabove_row[-1];
357 int pred_error[4] = { 0, 0, 0, 0 }, best_error = INT_MAX;
358 MB_PREDICTION_MODE best_mode = MB_MODE_COUNT;
360 for (i = 0; i < 8; ++i) {
361 uleft_col[i] = x->dst.u_buffer[i * x->dst.uv_stride - 1];
362 vleft_col[i] = x->dst.v_buffer[i * x->dst.uv_stride - 1];
365 if (!x->up_available && !x->left_available) {
371 if (x->up_available) {
372 for (i = 0; i < 8; ++i) {
373 Uaverage += uabove_row[i];
374 Vaverage += vabove_row[i];
380 if (x->left_available) {
381 for (i = 0; i < 8; ++i) {
382 Uaverage += uleft_col[i];
383 Vaverage += vleft_col[i];
389 expected_udc = (Uaverage + (1 << (shift - 1))) >> shift;
390 expected_vdc = (Vaverage + (1 << (shift - 1))) >> shift;
393 for (i = 0; i < 8; ++i) {
394 for (j = 0; j < 8; ++j) {
395 int predu = uleft_col[i] + uabove_row[j] - utop_left;
396 int predv = vleft_col[i] + vabove_row[j] - vtop_left;
402 if (predu < 0) predu = 0;
404 if (predu > 255) predu = 255;
406 if (predv < 0) predv = 0;
408 if (predv > 255) predv = 255;
410 diff = u_p - expected_udc;
411 pred_error[DC_PRED] += diff * diff;
412 diff = v_p - expected_vdc;
413 pred_error[DC_PRED] += diff * diff;
415 diff = u_p - uabove_row[j];
416 pred_error[V_PRED] += diff * diff;
417 diff = v_p - vabove_row[j];
418 pred_error[V_PRED] += diff * diff;
420 diff = u_p - uleft_col[i];
421 pred_error[H_PRED] += diff * diff;
422 diff = v_p - vleft_col[i];
423 pred_error[H_PRED] += diff * diff;
426 pred_error[TM_PRED] += diff * diff;
428 pred_error[TM_PRED] += diff * diff;
431 usrc_ptr += uvsrc_stride;
432 vsrc_ptr += uvsrc_stride;
435 usrc_ptr = (mb->block[18].src + *mb->block[18].base_src);
436 vsrc_ptr = (mb->block[22].src + *mb->block[22].base_src);
440 for (i = DC_PRED; i <= TM_PRED; ++i) {
441 if (best_error > pred_error[i]) {
442 best_error = pred_error[i];
443 best_mode = (MB_PREDICTION_MODE)i;
447 assert(best_mode != MB_MODE_COUNT);
448 mb->e_mbd.mode_info_context->mbmi.uv_mode = best_mode;
451 static void update_mvcount(MACROBLOCK *x, int_mv *best_ref_mv) {
452 MACROBLOCKD *xd = &x->e_mbd;
453 /* Split MV modes currently not supported when RD is nopt enabled,
454 * therefore, only need to modify MVcount in NEWMV mode. */
455 if (xd->mode_info_context->mbmi.mode == NEWMV) {
456 x->MVcount[0][mv_max + ((xd->mode_info_context->mbmi.mv.as_mv.row -
457 best_ref_mv->as_mv.row) >>
459 x->MVcount[1][mv_max + ((xd->mode_info_context->mbmi.mv.as_mv.col -
460 best_ref_mv->as_mv.col) >>
465 #if CONFIG_MULTI_RES_ENCODING
466 static void get_lower_res_motion_info(VP8_COMP *cpi, MACROBLOCKD *xd,
467 int *dissim, int *parent_ref_frame,
468 MB_PREDICTION_MODE *parent_mode,
469 int_mv *parent_ref_mv, int mb_row,
471 LOWER_RES_MB_INFO *store_mode_info =
472 ((LOWER_RES_FRAME_INFO *)cpi->oxcf.mr_low_res_mode_info)->mb_info;
473 unsigned int parent_mb_index;
475 /* Consider different down_sampling_factor. */
477 /* TODO: Removed the loop that supports special down_sampling_factor
478 * such as 2, 4, 8. Will revisit it if needed.
479 * Should also try using a look-up table to see if it helps
481 int parent_mb_row, parent_mb_col;
483 parent_mb_row = mb_row * cpi->oxcf.mr_down_sampling_factor.den /
484 cpi->oxcf.mr_down_sampling_factor.num;
485 parent_mb_col = mb_col * cpi->oxcf.mr_down_sampling_factor.den /
486 cpi->oxcf.mr_down_sampling_factor.num;
487 parent_mb_index = parent_mb_row * cpi->mr_low_res_mb_cols + parent_mb_col;
490 /* Read lower-resolution mode & motion result from memory.*/
491 *parent_ref_frame = store_mode_info[parent_mb_index].ref_frame;
492 *parent_mode = store_mode_info[parent_mb_index].mode;
493 *dissim = store_mode_info[parent_mb_index].dissim;
495 /* For highest-resolution encoder, adjust dissim value. Lower its quality
496 * for good performance. */
497 if (cpi->oxcf.mr_encoder_id == (cpi->oxcf.mr_total_resolutions - 1))
500 if (*parent_ref_frame != INTRA_FRAME) {
501 /* Consider different down_sampling_factor.
502 * The result can be rounded to be more precise, but it takes more time.
504 (*parent_ref_mv).as_mv.row = store_mode_info[parent_mb_index].mv.as_mv.row *
505 cpi->oxcf.mr_down_sampling_factor.num /
506 cpi->oxcf.mr_down_sampling_factor.den;
507 (*parent_ref_mv).as_mv.col = store_mode_info[parent_mb_index].mv.as_mv.col *
508 cpi->oxcf.mr_down_sampling_factor.num /
509 cpi->oxcf.mr_down_sampling_factor.den;
511 vp8_clamp_mv2(parent_ref_mv, xd);
516 static void check_for_encode_breakout(unsigned int sse, MACROBLOCK *x) {
517 MACROBLOCKD *xd = &x->e_mbd;
519 unsigned int threshold =
520 (xd->block[0].dequant[1] * xd->block[0].dequant[1] >> 4);
522 if (threshold < x->encode_breakout) threshold = x->encode_breakout;
524 if (sse < threshold) {
525 /* Check u and v to make sure skip is ok */
526 unsigned int sse2 = 0;
530 if (sse2 * 2 < x->encode_breakout) {
538 static int evaluate_inter_mode(unsigned int *sse, int rate2, int *distortion2,
539 VP8_COMP *cpi, MACROBLOCK *x, int rd_adj) {
540 MB_PREDICTION_MODE this_mode = x->e_mbd.mode_info_context->mbmi.mode;
541 int_mv mv = x->e_mbd.mode_info_context->mbmi.mv;
543 int denoise_aggressive = 0;
544 /* Exit early and don't compute the distortion if this macroblock
545 * is marked inactive. */
546 if (cpi->active_map_enabled && x->active_ptr[0] == 0) {
553 if ((this_mode != NEWMV) || !(cpi->sf.half_pixel_search) ||
554 cpi->common.full_pixel == 1) {
556 vp8_get_inter_mbpred_error(x, &cpi->fn_ptr[BLOCK_16X16], sse, mv);
559 this_rd = RDCOST(x->rdmult, x->rddiv, rate2, *distortion2);
561 #if CONFIG_TEMPORAL_DENOISING
562 if (cpi->oxcf.noise_sensitivity > 0) {
564 (cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive) ? 1 : 0;
568 // Adjust rd for ZEROMV and LAST, if LAST is the closest reference frame.
569 // TODO: We should also add condition on distance of closest to current.
570 if (!cpi->oxcf.screen_content_mode && this_mode == ZEROMV &&
571 x->e_mbd.mode_info_context->mbmi.ref_frame == LAST_FRAME &&
572 (denoise_aggressive || (cpi->closest_reference_frame == LAST_FRAME))) {
573 // No adjustment if block is considered to be skin area.
574 if (x->is_skin) rd_adj = 100;
576 this_rd = (int)(((int64_t)this_rd) * rd_adj / 100);
579 check_for_encode_breakout(*sse, x);
583 static void calculate_zeromv_rd_adjustment(VP8_COMP *cpi, MACROBLOCK *x,
584 int *rd_adjustment) {
585 MODE_INFO *mic = x->e_mbd.mode_info_context;
586 int_mv mv_l, mv_a, mv_al;
587 int local_motion_check = 0;
589 if (cpi->lf_zeromv_pct > 40) {
594 if (mic->mbmi.ref_frame != INTRA_FRAME) {
595 if (abs(mv_l.as_mv.row) < 8 && abs(mv_l.as_mv.col) < 8) {
596 local_motion_check++;
601 mic -= x->e_mbd.mode_info_stride;
602 mv_al = mic->mbmi.mv;
604 if (mic->mbmi.ref_frame != INTRA_FRAME) {
605 if (abs(mv_al.as_mv.row) < 8 && abs(mv_al.as_mv.col) < 8) {
606 local_motion_check++;
614 if (mic->mbmi.ref_frame != INTRA_FRAME) {
615 if (abs(mv_a.as_mv.row) < 8 && abs(mv_a.as_mv.col) < 8) {
616 local_motion_check++;
620 if (((!x->e_mbd.mb_to_top_edge || !x->e_mbd.mb_to_left_edge) &&
621 local_motion_check > 0) ||
622 local_motion_check > 2) {
624 } else if (local_motion_check > 0) {
630 void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
631 int recon_uvoffset, int *returnrate,
632 int *returndistortion, int *returnintra, int mb_row,
634 BLOCK *b = &x->block[0];
635 BLOCKD *d = &x->e_mbd.block[0];
636 MACROBLOCKD *xd = &x->e_mbd;
637 MB_MODE_INFO best_mbmode;
639 int_mv best_ref_mv_sb[2];
640 int_mv mode_mv_sb[2][MB_MODE_COUNT];
643 MB_PREDICTION_MODE this_mode;
646 int best_rd = INT_MAX;
647 int rd_adjustment = 100;
648 int best_intra_rd = INT_MAX;
653 int bestsme = INT_MAX;
654 int best_mode_index = 0;
655 unsigned int sse = UINT_MAX, best_rd_sse = UINT_MAX;
656 #if CONFIG_TEMPORAL_DENOISING
657 unsigned int zero_mv_sse = UINT_MAX, best_sse = UINT_MAX;
660 int sf_improved_mv_pred = cpi->sf.improved_mv_pred;
662 #if CONFIG_MULTI_RES_ENCODING
663 int dissim = INT_MAX;
664 int parent_ref_frame = 0;
665 int_mv parent_ref_mv;
666 MB_PREDICTION_MODE parent_mode = 0;
667 int parent_ref_valid = 0;
672 int near_sadidx[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
674 /* search range got from mv_pred(). It uses step_param levels. (0-7) */
677 unsigned char *plane[4][3];
678 int ref_frame_map[4];
680 int dot_artifact_candidate = 0;
681 get_predictor_pointers(cpi, plane, recon_yoffset, recon_uvoffset);
683 // If the current frame is using LAST as a reference, check for
684 // biasing the mode selection for dot artifacts.
685 if (cpi->ref_frame_flags & VP8_LAST_FRAME) {
686 unsigned char *target_y = x->src.y_buffer;
687 unsigned char *target_u = x->block[16].src + *x->block[16].base_src;
688 unsigned char *target_v = x->block[20].src + *x->block[20].base_src;
689 int stride = x->src.y_stride;
690 int stride_uv = x->block[16].src_stride;
691 #if CONFIG_TEMPORAL_DENOISING
692 if (cpi->oxcf.noise_sensitivity) {
693 const int uv_denoise = (cpi->oxcf.noise_sensitivity >= 2) ? 1 : 0;
695 cpi->denoiser.yv12_running_avg[LAST_FRAME].y_buffer + recon_yoffset;
696 stride = cpi->denoiser.yv12_running_avg[LAST_FRAME].y_stride;
698 target_u = cpi->denoiser.yv12_running_avg[LAST_FRAME].u_buffer +
700 target_v = cpi->denoiser.yv12_running_avg[LAST_FRAME].v_buffer +
702 stride_uv = cpi->denoiser.yv12_running_avg[LAST_FRAME].uv_stride;
706 dot_artifact_candidate = check_dot_artifact_candidate(
707 cpi, x, target_y, stride, plane[LAST_FRAME][0], mb_row, mb_col, 0);
708 // If not found in Y channel, check UV channel.
709 if (!dot_artifact_candidate) {
710 dot_artifact_candidate = check_dot_artifact_candidate(
711 cpi, x, target_u, stride_uv, plane[LAST_FRAME][1], mb_row, mb_col, 1);
712 if (!dot_artifact_candidate) {
713 dot_artifact_candidate = check_dot_artifact_candidate(
714 cpi, x, target_v, stride_uv, plane[LAST_FRAME][2], mb_row, mb_col,
720 #if CONFIG_MULTI_RES_ENCODING
721 // |parent_ref_valid| will be set here if potentially we can do mv resue for
722 // this higher resol (|cpi->oxcf.mr_encoder_id| > 0) frame.
723 // |parent_ref_valid| may be reset depending on |parent_ref_frame| for
724 // the current macroblock below.
725 parent_ref_valid = cpi->oxcf.mr_encoder_id && cpi->mr_low_res_mv_avail;
726 if (parent_ref_valid) {
729 get_lower_res_motion_info(cpi, xd, &dissim, &parent_ref_frame, &parent_mode,
730 &parent_ref_mv, mb_row, mb_col);
732 /* TODO(jkoleszar): The references available (ref_frame_flags) to the
733 * lower res encoder should match those available to this encoder, but
734 * there seems to be a situation where this mismatch can happen in the
735 * case of frame dropping and temporal layers. For example,
736 * GOLD being disallowed in ref_frame_flags, but being returned as
739 * In this event, take the conservative approach of disabling the
740 * lower res info for this MB.
744 // Note availability for mv reuse is only based on last and golden.
745 if (parent_ref_frame == LAST_FRAME)
746 parent_ref_flag = (cpi->ref_frame_flags & VP8_LAST_FRAME);
747 else if (parent_ref_frame == GOLDEN_FRAME)
748 parent_ref_flag = (cpi->ref_frame_flags & VP8_GOLD_FRAME);
750 // assert(!parent_ref_frame || parent_ref_flag);
752 // If |parent_ref_frame| did not match either last or golden then
753 // shut off mv reuse.
754 if (parent_ref_frame && !parent_ref_flag) parent_ref_valid = 0;
756 // Don't do mv reuse since we want to allow for another mode besides
757 // ZEROMV_LAST to remove dot artifact.
758 if (dot_artifact_candidate) parent_ref_valid = 0;
762 // Check if current macroblock is in skin area.
764 const int y = (x->src.y_buffer[7 * x->src.y_stride + 7] +
765 x->src.y_buffer[7 * x->src.y_stride + 8] +
766 x->src.y_buffer[8 * x->src.y_stride + 7] +
767 x->src.y_buffer[8 * x->src.y_stride + 8]) >>
769 const int cb = (x->src.u_buffer[3 * x->src.uv_stride + 3] +
770 x->src.u_buffer[3 * x->src.uv_stride + 4] +
771 x->src.u_buffer[4 * x->src.uv_stride + 3] +
772 x->src.u_buffer[4 * x->src.uv_stride + 4]) >>
774 const int cr = (x->src.v_buffer[3 * x->src.uv_stride + 3] +
775 x->src.v_buffer[3 * x->src.uv_stride + 4] +
776 x->src.v_buffer[4 * x->src.uv_stride + 3] +
777 x->src.v_buffer[4 * x->src.uv_stride + 4]) >>
780 if (!cpi->oxcf.screen_content_mode) {
781 int block_index = mb_row * cpi->common.mb_cols + mb_col;
782 x->is_skin = is_skin_color(y, cb, cr, cpi->consec_zero_last[block_index]);
785 #if CONFIG_TEMPORAL_DENOISING
786 if (cpi->oxcf.noise_sensitivity) {
787 // Under aggressive denoising mode, should we use skin map to reduce
789 // and ZEROMV bias? Will need to revisit the accuracy of this detection for
790 // very noisy input. For now keep this as is (i.e., don't turn it off).
791 // if (cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive)
796 mode_mv = mode_mv_sb[sign_bias];
797 best_ref_mv.as_int = 0;
798 memset(mode_mv_sb, 0, sizeof(mode_mv_sb));
799 memset(&best_mbmode, 0, sizeof(best_mbmode));
801 /* Setup search priorities */
802 #if CONFIG_MULTI_RES_ENCODING
803 if (parent_ref_valid && parent_ref_frame && dissim < 8) {
804 ref_frame_map[0] = -1;
805 ref_frame_map[1] = parent_ref_frame;
806 ref_frame_map[2] = -1;
807 ref_frame_map[3] = -1;
810 get_reference_search_order(cpi, ref_frame_map);
812 /* Check to see if there is at least 1 valid reference frame that we need
813 * to calculate near_mvs.
815 if (ref_frame_map[1] > 0) {
816 sign_bias = vp8_find_near_mvs_bias(
817 &x->e_mbd, x->e_mbd.mode_info_context, mode_mv_sb, best_ref_mv_sb,
818 mdcounts, ref_frame_map[1], cpi->common.ref_frame_sign_bias);
820 mode_mv = mode_mv_sb[sign_bias];
821 best_ref_mv.as_int = best_ref_mv_sb[sign_bias].as_int;
824 /* Count of the number of MBs tested so far this frame */
825 x->mbs_tested_so_far++;
827 *returnintra = INT_MAX;
830 x->e_mbd.mode_info_context->mbmi.ref_frame = INTRA_FRAME;
832 /* If the frame has big static background and current MB is in low
833 * motion area, its mode decision is biased to ZEROMV mode.
834 * No adjustment if cpu_used is <= -12 (i.e., cpi->Speed >= 12).
835 * At such speed settings, ZEROMV is already heavily favored.
837 if (cpi->Speed < 12) {
838 calculate_zeromv_rd_adjustment(cpi, x, &rd_adjustment);
841 #if CONFIG_TEMPORAL_DENOISING
842 if (cpi->oxcf.noise_sensitivity) {
843 rd_adjustment = (int)(rd_adjustment *
844 cpi->denoiser.denoise_pars.pickmode_mv_bias / 100);
848 if (dot_artifact_candidate) {
849 // Bias against ZEROMV_LAST mode.
853 /* if we encode a new mv this is important
854 * find the best new motion vector
856 for (mode_index = 0; mode_index < MAX_MODES; ++mode_index) {
858 int this_rd = INT_MAX;
859 int this_ref_frame = ref_frame_map[vp8_ref_frame_order[mode_index]];
861 if (best_rd <= x->rd_threshes[mode_index]) continue;
863 if (this_ref_frame < 0) continue;
865 x->e_mbd.mode_info_context->mbmi.ref_frame = this_ref_frame;
867 /* everything but intra */
868 if (x->e_mbd.mode_info_context->mbmi.ref_frame) {
869 x->e_mbd.pre.y_buffer = plane[this_ref_frame][0];
870 x->e_mbd.pre.u_buffer = plane[this_ref_frame][1];
871 x->e_mbd.pre.v_buffer = plane[this_ref_frame][2];
873 if (sign_bias != cpi->common.ref_frame_sign_bias[this_ref_frame]) {
874 sign_bias = cpi->common.ref_frame_sign_bias[this_ref_frame];
875 mode_mv = mode_mv_sb[sign_bias];
876 best_ref_mv.as_int = best_ref_mv_sb[sign_bias].as_int;
879 #if CONFIG_MULTI_RES_ENCODING
880 if (parent_ref_valid) {
881 if (vp8_mode_order[mode_index] == NEARESTMV &&
882 mode_mv[NEARESTMV].as_int == 0)
884 if (vp8_mode_order[mode_index] == NEARMV && mode_mv[NEARMV].as_int == 0)
887 if (vp8_mode_order[mode_index] == NEWMV && parent_mode == ZEROMV &&
888 best_ref_mv.as_int == 0)
890 else if (vp8_mode_order[mode_index] == NEWMV && dissim == 0 &&
891 best_ref_mv.as_int == parent_ref_mv.as_int)
897 /* Check to see if the testing frequency for this mode is at its max
898 * If so then prevent it from being tested and increase the threshold
900 if (x->mode_test_hit_counts[mode_index] &&
901 (cpi->mode_check_freq[mode_index] > 1)) {
902 if (x->mbs_tested_so_far <= (cpi->mode_check_freq[mode_index] *
903 x->mode_test_hit_counts[mode_index])) {
904 /* Increase the threshold for coding this mode to make it less
905 * likely to be chosen */
906 x->rd_thresh_mult[mode_index] += 4;
908 if (x->rd_thresh_mult[mode_index] > MAX_THRESHMULT) {
909 x->rd_thresh_mult[mode_index] = MAX_THRESHMULT;
912 x->rd_threshes[mode_index] =
913 (cpi->rd_baseline_thresh[mode_index] >> 7) *
914 x->rd_thresh_mult[mode_index];
919 /* We have now reached the point where we are going to test the current
920 * mode so increment the counter for the number of times it has been
922 x->mode_test_hit_counts[mode_index]++;
927 this_mode = vp8_mode_order[mode_index];
929 x->e_mbd.mode_info_context->mbmi.mode = this_mode;
930 x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED;
932 /* Work out the cost assosciated with selecting the reference frame */
933 frame_cost = x->ref_frame_cost[x->e_mbd.mode_info_context->mbmi.ref_frame];
936 /* Only consider ZEROMV/ALTREF_FRAME for alt ref frame,
937 * unless ARNR filtering is enabled in which case we want
938 * an unfiltered alternative */
939 if (cpi->is_src_frame_alt_ref && (cpi->oxcf.arnr_max_frames == 0)) {
940 if (this_mode != ZEROMV ||
941 x->e_mbd.mode_info_context->mbmi.ref_frame != ALTREF_FRAME) {
948 /* Pass best so far to pick_intra4x4mby_modes to use as breakout */
949 distortion2 = best_rd_sse;
950 pick_intra4x4mby_modes(x, &rate, &distortion2);
952 if (distortion2 == INT_MAX) {
956 distortion2 = vpx_variance16x16(*(b->base_src), b->src_stride,
957 x->e_mbd.predictor, 16, &sse);
958 this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
960 if (this_rd < best_intra_rd) {
961 best_intra_rd = this_rd;
962 *returnintra = distortion2;
970 /* Split MV modes currently not supported when RD is not enabled. */
977 vp8_build_intra_predictors_mby_s(
978 xd, xd->dst.y_buffer - xd->dst.y_stride, xd->dst.y_buffer - 1,
979 xd->dst.y_stride, xd->predictor, 16);
980 distortion2 = vpx_variance16x16(*(b->base_src), b->src_stride,
981 x->e_mbd.predictor, 16, &sse);
982 rate2 += x->mbmode_cost[x->e_mbd.frame_type]
983 [x->e_mbd.mode_info_context->mbmi.mode];
984 this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
986 if (this_rd < best_intra_rd) {
987 best_intra_rd = this_rd;
988 *returnintra = distortion2;
997 int sadpb = x->sadperbit16;
1000 int col_min = ((best_ref_mv.as_mv.col + 7) >> 3) - MAX_FULL_PEL_VAL;
1001 int row_min = ((best_ref_mv.as_mv.row + 7) >> 3) - MAX_FULL_PEL_VAL;
1002 int col_max = (best_ref_mv.as_mv.col >> 3) + MAX_FULL_PEL_VAL;
1003 int row_max = (best_ref_mv.as_mv.row >> 3) + MAX_FULL_PEL_VAL;
1005 int tmp_col_min = x->mv_col_min;
1006 int tmp_col_max = x->mv_col_max;
1007 int tmp_row_min = x->mv_row_min;
1008 int tmp_row_max = x->mv_row_max;
1010 int speed_adjust = (cpi->Speed > 5) ? ((cpi->Speed >= 8) ? 3 : 2) : 1;
1012 /* Further step/diamond searches as necessary */
1013 step_param = cpi->sf.first_step + speed_adjust;
1015 #if CONFIG_MULTI_RES_ENCODING
1016 /* If lower-res frame is not available for mv reuse (because of
1017 frame dropping or different temporal layer pattern), then higher
1018 resol encoder does motion search without any previous knowledge.
1019 Also, since last frame motion info is not stored, then we can not
1020 use improved_mv_pred. */
1021 if (cpi->oxcf.mr_encoder_id) sf_improved_mv_pred = 0;
1023 // Only use parent MV as predictor if this candidate reference frame
1024 // (|this_ref_frame|) is equal to |parent_ref_frame|.
1025 if (parent_ref_valid && (parent_ref_frame == this_ref_frame)) {
1026 /* Use parent MV as predictor. Adjust search range
1029 mvp.as_int = parent_ref_mv.as_int;
1030 mvp_full.as_mv.col = parent_ref_mv.as_mv.col >> 3;
1031 mvp_full.as_mv.row = parent_ref_mv.as_mv.row >> 3;
1035 else if (dissim <= 128)
1042 if (sf_improved_mv_pred) {
1044 vp8_cal_sad(cpi, xd, x, recon_yoffset, &near_sadidx[0]);
1048 vp8_mv_pred(cpi, &x->e_mbd, x->e_mbd.mode_info_context, &mvp,
1049 x->e_mbd.mode_info_context->mbmi.ref_frame,
1050 cpi->common.ref_frame_sign_bias, &sr, &near_sadidx[0]);
1053 /* adjust search range according to sr from mv prediction */
1054 if (sr > step_param) step_param = sr;
1056 mvp_full.as_mv.col = mvp.as_mv.col >> 3;
1057 mvp_full.as_mv.row = mvp.as_mv.row >> 3;
1059 mvp.as_int = best_ref_mv.as_int;
1060 mvp_full.as_mv.col = best_ref_mv.as_mv.col >> 3;
1061 mvp_full.as_mv.row = best_ref_mv.as_mv.row >> 3;
1065 #if CONFIG_MULTI_RES_ENCODING
1066 if (parent_ref_valid && (parent_ref_frame == this_ref_frame) &&
1068 VPXMAX(abs(best_ref_mv.as_mv.row - parent_ref_mv.as_mv.row),
1069 abs(best_ref_mv.as_mv.col - parent_ref_mv.as_mv.col)) <= 4) {
1070 d->bmi.mv.as_int = mvp_full.as_int;
1071 mode_mv[NEWMV].as_int = mvp_full.as_int;
1073 cpi->find_fractional_mv_step(
1074 x, b, d, &d->bmi.mv, &best_ref_mv, x->errorperbit,
1075 &cpi->fn_ptr[BLOCK_16X16], cpi->mb.mvcost, &distortion2, &sse);
1079 /* Get intersection of UMV window and valid MV window to
1080 * reduce # of checks in diamond search. */
1081 if (x->mv_col_min < col_min) x->mv_col_min = col_min;
1082 if (x->mv_col_max > col_max) x->mv_col_max = col_max;
1083 if (x->mv_row_min < row_min) x->mv_row_min = row_min;
1084 if (x->mv_row_max > row_max) x->mv_row_max = row_max;
1089 : (cpi->sf.max_step_search_steps - 1 - step_param);
1091 if (cpi->sf.search_method == HEX) {
1092 #if CONFIG_MULTI_RES_ENCODING
1093 /* TODO: In higher-res pick_inter_mode, step_param is used to
1094 * modify hex search range. Here, set step_param to 0 not to
1095 * change the behavior in lowest-resolution encoder.
1096 * Will improve it later.
1098 /* Set step_param to 0 to ensure large-range motion search
1099 * when mv reuse if not valid (i.e. |parent_ref_valid| = 0),
1100 * or if this candidate reference frame (|this_ref_frame|) is
1101 * not equal to |parent_ref_frame|.
1103 if (!parent_ref_valid || (parent_ref_frame != this_ref_frame))
1106 bestsme = vp8_hex_search(x, b, d, &mvp_full, &d->bmi.mv, step_param,
1107 sadpb, &cpi->fn_ptr[BLOCK_16X16],
1108 x->mvsadcost, x->mvcost, &best_ref_mv);
1109 mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
1111 bestsme = cpi->diamond_search_sad(
1112 x, b, d, &mvp_full, &d->bmi.mv, step_param, sadpb, &num00,
1113 &cpi->fn_ptr[BLOCK_16X16], x->mvcost, &best_ref_mv);
1114 mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
1116 /* Further step/diamond searches as necessary */
1120 while (n < further_steps) {
1126 thissme = cpi->diamond_search_sad(
1127 x, b, d, &mvp_full, &d->bmi.mv, step_param + n, sadpb,
1128 &num00, &cpi->fn_ptr[BLOCK_16X16], x->mvcost, &best_ref_mv);
1129 if (thissme < bestsme) {
1131 mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
1133 d->bmi.mv.as_int = mode_mv[NEWMV].as_int;
1139 x->mv_col_min = tmp_col_min;
1140 x->mv_col_max = tmp_col_max;
1141 x->mv_row_min = tmp_row_min;
1142 x->mv_row_max = tmp_row_max;
1144 if (bestsme < INT_MAX) {
1145 cpi->find_fractional_mv_step(
1146 x, b, d, &d->bmi.mv, &best_ref_mv, x->errorperbit,
1147 &cpi->fn_ptr[BLOCK_16X16], cpi->mb.mvcost, &distortion2, &sse);
1151 mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
1152 // The clamp below is not necessary from the perspective
1153 // of VP8 bitstream, but is added to improve ChromeCast
1154 // mirroring's robustness. Please do not remove.
1155 vp8_clamp_mv2(&mode_mv[this_mode], xd);
1158 vp8_mv_bit_cost(&mode_mv[NEWMV], &best_ref_mv, cpi->mb.mvcost, 128);
1163 if (mode_mv[this_mode].as_int == 0) continue;
1167 /* Trap vectors that reach beyond the UMV borders
1168 * Note that ALL New MV, Nearest MV Near MV and Zero MV code drops
1169 * through to this point because of the lack of break statements
1170 * in the previous two cases.
1172 if (((mode_mv[this_mode].as_mv.row >> 3) < x->mv_row_min) ||
1173 ((mode_mv[this_mode].as_mv.row >> 3) > x->mv_row_max) ||
1174 ((mode_mv[this_mode].as_mv.col >> 3) < x->mv_col_min) ||
1175 ((mode_mv[this_mode].as_mv.col >> 3) > x->mv_col_max)) {
1179 rate2 += vp8_cost_mv_ref(this_mode, mdcounts);
1180 x->e_mbd.mode_info_context->mbmi.mv.as_int = mode_mv[this_mode].as_int;
1181 this_rd = evaluate_inter_mode(&sse, rate2, &distortion2, cpi, x,
1188 #if CONFIG_TEMPORAL_DENOISING
1189 if (cpi->oxcf.noise_sensitivity) {
1190 /* Store for later use by denoiser. */
1191 // Dont' denoise with GOLDEN OR ALTREF is they are old reference
1192 // frames (greater than MAX_GF_ARF_DENOISE_RANGE frames in past).
1193 int skip_old_reference = ((this_ref_frame != LAST_FRAME) &&
1194 (cpi->common.current_video_frame -
1195 cpi->current_ref_frames[this_ref_frame] >
1196 MAX_GF_ARF_DENOISE_RANGE))
1199 if (this_mode == ZEROMV && sse < zero_mv_sse && !skip_old_reference) {
1201 x->best_zeromv_reference_frame =
1202 x->e_mbd.mode_info_context->mbmi.ref_frame;
1205 // Store the best NEWMV in x for later use in the denoiser.
1206 if (x->e_mbd.mode_info_context->mbmi.mode == NEWMV && sse < best_sse &&
1207 !skip_old_reference) {
1209 x->best_sse_inter_mode = NEWMV;
1210 x->best_sse_mv = x->e_mbd.mode_info_context->mbmi.mv;
1211 x->need_to_clamp_best_mvs =
1212 x->e_mbd.mode_info_context->mbmi.need_to_clamp_mvs;
1213 x->best_reference_frame = x->e_mbd.mode_info_context->mbmi.ref_frame;
1218 if (this_rd < best_rd || x->skip) {
1219 /* Note index of best mode */
1220 best_mode_index = mode_index;
1222 *returnrate = rate2;
1223 *returndistortion = distortion2;
1226 memcpy(&best_mbmode, &x->e_mbd.mode_info_context->mbmi,
1227 sizeof(MB_MODE_INFO));
1229 /* Testing this mode gave rise to an improvement in best error
1230 * score. Lower threshold a bit for next time
1232 x->rd_thresh_mult[mode_index] =
1233 (x->rd_thresh_mult[mode_index] >= (MIN_THRESHMULT + 2))
1234 ? x->rd_thresh_mult[mode_index] - 2
1236 x->rd_threshes[mode_index] = (cpi->rd_baseline_thresh[mode_index] >> 7) *
1237 x->rd_thresh_mult[mode_index];
1240 /* If the mode did not help improve the best error case then raise the
1241 * threshold for testing that mode next time around.
1244 x->rd_thresh_mult[mode_index] += 4;
1246 if (x->rd_thresh_mult[mode_index] > MAX_THRESHMULT) {
1247 x->rd_thresh_mult[mode_index] = MAX_THRESHMULT;
1250 x->rd_threshes[mode_index] = (cpi->rd_baseline_thresh[mode_index] >> 7) *
1251 x->rd_thresh_mult[mode_index];
1257 /* Reduce the activation RD thresholds for the best choice mode */
1258 if ((cpi->rd_baseline_thresh[best_mode_index] > 0) &&
1259 (cpi->rd_baseline_thresh[best_mode_index] < (INT_MAX >> 2))) {
1260 int best_adjustment = (x->rd_thresh_mult[best_mode_index] >> 3);
1262 x->rd_thresh_mult[best_mode_index] =
1263 (x->rd_thresh_mult[best_mode_index] >=
1264 (MIN_THRESHMULT + best_adjustment))
1265 ? x->rd_thresh_mult[best_mode_index] - best_adjustment
1267 x->rd_threshes[best_mode_index] =
1268 (cpi->rd_baseline_thresh[best_mode_index] >> 7) *
1269 x->rd_thresh_mult[best_mode_index];
1273 int this_rdbin = (*returndistortion >> 7);
1275 if (this_rdbin >= 1024) {
1279 x->error_bins[this_rdbin]++;
1282 #if CONFIG_TEMPORAL_DENOISING
1283 if (cpi->oxcf.noise_sensitivity) {
1284 int block_index = mb_row * cpi->common.mb_cols + mb_col;
1287 if (x->best_sse_inter_mode == DC_PRED) {
1288 /* No best MV found. */
1289 x->best_sse_inter_mode = best_mbmode.mode;
1290 x->best_sse_mv = best_mbmode.mv;
1291 x->need_to_clamp_best_mvs = best_mbmode.need_to_clamp_mvs;
1292 x->best_reference_frame = best_mbmode.ref_frame;
1293 best_sse = best_rd_sse;
1295 // For non-skin blocks that have selected ZEROMV for this current frame,
1296 // and have been selecting ZEROMV_LAST (on the base layer frame) at
1297 // least |x~20| consecutive past frames in a row, label the block for
1298 // possible increase in denoising strength. We also condition this
1299 // labeling on there being significant denoising in the scene
1300 if (cpi->oxcf.noise_sensitivity == 4) {
1301 if (cpi->denoiser.nmse_source_diff >
1302 70 * cpi->denoiser.threshold_aggressive_mode / 100) {
1306 if (cpi->mse_source_denoised > 1000) is_noisy = 1;
1308 x->increase_denoising = 0;
1309 if (!x->is_skin && x->best_sse_inter_mode == ZEROMV &&
1310 (x->best_reference_frame == LAST_FRAME ||
1311 x->best_reference_frame == cpi->closest_reference_frame) &&
1312 cpi->consec_zero_last[block_index] >= 20 && is_noisy) {
1313 x->increase_denoising = 1;
1315 x->denoise_zeromv = 0;
1316 vp8_denoiser_denoise_mb(&cpi->denoiser, x, best_sse, zero_mv_sse,
1317 recon_yoffset, recon_uvoffset, &cpi->common.lf_info,
1318 mb_row, mb_col, block_index,
1319 cpi->consec_zero_last_mvbias[block_index]);
1321 // Reevaluate ZEROMV after denoising: for large noise content
1322 // (i.e., cpi->mse_source_denoised is above threshold), do this for all
1323 // blocks that did not pick ZEROMV as best mode but are using ZEROMV
1324 // for denoising. Otherwise, always re-evaluate for blocks that picked
1325 // INTRA mode as best mode.
1326 // Avoid blocks that have been biased against ZERO_LAST
1327 // (i.e., dot artifact candidate blocks).
1328 reevaluate = (best_mbmode.ref_frame == INTRA_FRAME) ||
1329 (best_mbmode.mode != ZEROMV && x->denoise_zeromv &&
1330 cpi->mse_source_denoised > 2000);
1331 if (!dot_artifact_candidate && reevaluate &&
1332 x->best_zeromv_reference_frame != INTRA_FRAME) {
1334 int this_ref_frame = x->best_zeromv_reference_frame;
1335 rd_adjustment = 100;
1337 x->ref_frame_cost[this_ref_frame] + vp8_cost_mv_ref(ZEROMV, mdcounts);
1340 /* set up the proper prediction buffers for the frame */
1341 x->e_mbd.mode_info_context->mbmi.ref_frame = this_ref_frame;
1342 x->e_mbd.pre.y_buffer = plane[this_ref_frame][0];
1343 x->e_mbd.pre.u_buffer = plane[this_ref_frame][1];
1344 x->e_mbd.pre.v_buffer = plane[this_ref_frame][2];
1346 x->e_mbd.mode_info_context->mbmi.mode = ZEROMV;
1347 x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED;
1348 x->e_mbd.mode_info_context->mbmi.mv.as_int = 0;
1350 evaluate_inter_mode(&sse, rate2, &distortion2, cpi, x, rd_adjustment);
1352 if (this_rd < best_rd) {
1353 memcpy(&best_mbmode, &x->e_mbd.mode_info_context->mbmi,
1354 sizeof(MB_MODE_INFO));
1360 if (cpi->is_src_frame_alt_ref &&
1361 (best_mbmode.mode != ZEROMV || best_mbmode.ref_frame != ALTREF_FRAME)) {
1362 x->e_mbd.mode_info_context->mbmi.mode = ZEROMV;
1363 x->e_mbd.mode_info_context->mbmi.ref_frame = ALTREF_FRAME;
1364 x->e_mbd.mode_info_context->mbmi.mv.as_int = 0;
1365 x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED;
1366 x->e_mbd.mode_info_context->mbmi.mb_skip_coeff =
1367 (cpi->common.mb_no_coeff_skip);
1368 x->e_mbd.mode_info_context->mbmi.partitioning = 0;
1373 /* set to the best mb mode, this copy can be skip if x->skip since it
1374 * already has the right content */
1376 memcpy(&x->e_mbd.mode_info_context->mbmi, &best_mbmode,
1377 sizeof(MB_MODE_INFO));
1380 if (best_mbmode.mode <= B_PRED) {
1381 /* set mode_info_context->mbmi.uv_mode */
1382 pick_intra_mbuv_mode(x);
1386 cpi->common.ref_frame_sign_bias[xd->mode_info_context->mbmi.ref_frame]) {
1387 best_ref_mv.as_int = best_ref_mv_sb[!sign_bias].as_int;
1390 update_mvcount(x, &best_ref_mv);
1393 void vp8_pick_intra_mode(MACROBLOCK *x, int *rate_) {
1394 int error4x4, error16x16 = INT_MAX;
1395 int rate, best_rate = 0, distortion, best_sse;
1396 MB_PREDICTION_MODE mode, best_mode = DC_PRED;
1399 BLOCK *b = &x->block[0];
1400 MACROBLOCKD *xd = &x->e_mbd;
1402 xd->mode_info_context->mbmi.ref_frame = INTRA_FRAME;
1404 pick_intra_mbuv_mode(x);
1406 for (mode = DC_PRED; mode <= TM_PRED; ++mode) {
1407 xd->mode_info_context->mbmi.mode = mode;
1408 vp8_build_intra_predictors_mby_s(xd, xd->dst.y_buffer - xd->dst.y_stride,
1409 xd->dst.y_buffer - 1, xd->dst.y_stride,
1411 distortion = vpx_variance16x16(*(b->base_src), b->src_stride, xd->predictor,
1413 rate = x->mbmode_cost[xd->frame_type][mode];
1414 this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
1416 if (error16x16 > this_rd) {
1417 error16x16 = this_rd;
1423 xd->mode_info_context->mbmi.mode = best_mode;
1425 error4x4 = pick_intra4x4mby_modes(x, &rate, &best_sse);
1426 if (error4x4 < error16x16) {
1427 xd->mode_info_context->mbmi.mode = B_PRED;