2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
13 #include "vpx_config.h"
14 #include "./vpx_dsp_rtcd.h"
16 #include "modecosts.h"
17 #include "encodeintra.h"
18 #include "vp8/common/common.h"
19 #include "vp8/common/entropymode.h"
20 #include "pickinter.h"
21 #include "vp8/common/findnearmv.h"
23 #include "vp8/common/reconinter.h"
24 #include "vp8/common/reconintra.h"
25 #include "vp8/common/reconintra4x4.h"
26 #include "vpx_dsp/variance.h"
28 #include "vp8/common/skin_detection.h"
30 #include "vpx_dsp/vpx_dsp_common.h"
31 #include "vpx_mem/vpx_mem.h"
32 #if CONFIG_TEMPORAL_DENOISING
33 #include "denoising.h"
37 extern unsigned int cnt_pm;
40 extern const int vp8_ref_frame_order[MAX_MODES];
41 extern const MB_PREDICTION_MODE vp8_mode_order[MAX_MODES];
43 static int macroblock_corner_grad(unsigned char *signal, int stride,
44 int offsetx, int offsety, int sgnx,
46 int y1 = signal[offsetx * stride + offsety];
47 int y2 = signal[offsetx * stride + offsety + sgny];
48 int y3 = signal[(offsetx + sgnx) * stride + offsety];
49 int y4 = signal[(offsetx + sgnx) * stride + offsety + sgny];
50 return VPXMAX(VPXMAX(abs(y1 - y2), abs(y1 - y3)), abs(y1 - y4));
53 static int check_dot_artifact_candidate(VP8_COMP *cpi, MACROBLOCK *x,
54 unsigned char *target_last, int stride,
55 unsigned char *last_ref, int mb_row,
56 int mb_col, int channel) {
59 unsigned int max_num = (cpi->common.MBs) / 10;
62 int index = mb_row * cpi->common.mb_cols + mb_col;
63 // Threshold for #consecutive (base layer) frames using zero_last mode.
69 if (cpi->oxcf.number_of_layers > 1) {
72 x->zero_last_dot_suppress = 0;
73 // Blocks on base layer frames that have been using ZEROMV_LAST repeatedly
74 // (i.e, at least |x| consecutive frames are candidates for increasing the
75 // rd adjustment for zero_last mode.
76 // Only allow this for at most |max_num| blocks per frame.
77 // Don't allow this for screen content input.
78 if (cpi->current_layer == 0 &&
79 cpi->consec_zero_last_mvbias[index] > num_frames &&
80 x->mbs_zero_last_dot_suppress < max_num &&
81 !cpi->oxcf.screen_content_mode) {
82 // If this block is checked here, label it so we don't check it again until
83 // ~|x| framaes later.
84 x->zero_last_dot_suppress = 1;
85 // Dot artifact is noticeable as strong gradient at corners of macroblock,
86 // for flat areas. As a simple detector for now, we look for a high
87 // corner gradient on last ref, and a smaller gradient on source.
88 // Check 4 corners, return if any satisfy condition.
90 grad_last = macroblock_corner_grad(last_ref, stride, 0, 0, 1, 1);
91 grad_source = macroblock_corner_grad(target_last, stride, 0, 0, 1, 1);
92 if (grad_last >= threshold1 && grad_source <= threshold2) {
93 x->mbs_zero_last_dot_suppress++;
97 grad_last = macroblock_corner_grad(last_ref, stride, 0, shift, 1, -1);
98 grad_source = macroblock_corner_grad(target_last, stride, 0, shift, 1, -1);
99 if (grad_last >= threshold1 && grad_source <= threshold2) {
100 x->mbs_zero_last_dot_suppress++;
104 grad_last = macroblock_corner_grad(last_ref, stride, shift, 0, -1, 1);
105 grad_source = macroblock_corner_grad(target_last, stride, shift, 0, -1, 1);
106 if (grad_last >= threshold1 && grad_source <= threshold2) {
107 x->mbs_zero_last_dot_suppress++;
111 grad_last = macroblock_corner_grad(last_ref, stride, shift, shift, -1, -1);
113 macroblock_corner_grad(target_last, stride, shift, shift, -1, -1);
114 if (grad_last >= threshold1 && grad_source <= threshold2) {
115 x->mbs_zero_last_dot_suppress++;
123 int vp8_skip_fractional_mv_step(MACROBLOCK *mb, BLOCK *b, BLOCKD *d,
124 int_mv *bestmv, int_mv *ref_mv,
126 const vp8_variance_fn_ptr_t *vfp,
127 int *mvcost[2], int *distortion,
138 bestmv->as_mv.row *= 8;
139 bestmv->as_mv.col *= 8;
143 int vp8_get_inter_mbpred_error(MACROBLOCK *mb, const vp8_variance_fn_ptr_t *vfp,
144 unsigned int *sse, int_mv this_mv) {
145 BLOCK *b = &mb->block[0];
146 BLOCKD *d = &mb->e_mbd.block[0];
147 unsigned char *what = (*(b->base_src) + b->src);
148 int what_stride = b->src_stride;
149 int pre_stride = mb->e_mbd.pre.y_stride;
150 unsigned char *in_what = mb->e_mbd.pre.y_buffer + d->offset;
151 int in_what_stride = pre_stride;
152 int xoffset = this_mv.as_mv.col & 7;
153 int yoffset = this_mv.as_mv.row & 7;
155 in_what += (this_mv.as_mv.row >> 3) * pre_stride + (this_mv.as_mv.col >> 3);
157 if (xoffset | yoffset) {
158 return vfp->svf(in_what, in_what_stride, xoffset, yoffset, what,
161 return vfp->vf(what, what_stride, in_what, in_what_stride, sse);
165 static int get_prediction_error(BLOCK *be, BLOCKD *b) {
168 sptr = (*(be->base_src) + be->src);
171 return vpx_get4x4sse_cs(sptr, be->src_stride, dptr, 16);
174 static int pick_intra4x4block(MACROBLOCK *x, int ib,
175 B_PREDICTION_MODE *best_mode,
176 const int *mode_costs,
178 int *bestrate, int *bestdistortion) {
179 BLOCKD *b = &x->e_mbd.block[ib];
180 BLOCK *be = &x->block[ib];
181 int dst_stride = x->e_mbd.dst.y_stride;
182 unsigned char *dst = x->e_mbd.dst.y_buffer + b->offset;
183 B_PREDICTION_MODE mode;
184 int best_rd = INT_MAX;
188 unsigned char *Above = dst - dst_stride;
189 unsigned char *yleft = dst - 1;
190 unsigned char top_left = Above[-1];
192 for (mode = B_DC_PRED; mode <= B_HE_PRED; ++mode) {
195 rate = mode_costs[mode];
197 vp8_intra4x4_predict(Above, yleft, dst_stride, mode, b->predictor, 16,
199 distortion = get_prediction_error(be, b);
200 this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
202 if (this_rd < best_rd) {
204 *bestdistortion = distortion;
210 b->bmi.as_mode = *best_mode;
211 vp8_encode_intra4x4block(x, ib);
215 static int pick_intra4x4mby_modes(MACROBLOCK *mb, int *Rate, int *best_dist) {
216 MACROBLOCKD *const xd = &mb->e_mbd;
218 int cost = mb->mbmode_cost[xd->frame_type][B_PRED];
221 const int *bmode_costs;
223 intra_prediction_down_copy(xd, xd->dst.y_buffer - xd->dst.y_stride + 16);
225 bmode_costs = mb->inter_bmode_costs;
227 for (i = 0; i < 16; ++i) {
228 MODE_INFO *const mic = xd->mode_info_context;
229 const int mis = xd->mode_info_stride;
231 B_PREDICTION_MODE best_mode = B_MODE_COUNT;
234 if (mb->e_mbd.frame_type == KEY_FRAME) {
235 const B_PREDICTION_MODE A = above_block_mode(mic, i, mis);
236 const B_PREDICTION_MODE L = left_block_mode(mic, i);
238 bmode_costs = mb->bmode_costs[A][L];
241 pick_intra4x4block(mb, i, &best_mode, bmode_costs, &r, &d);
245 assert(best_mode != B_MODE_COUNT);
246 mic->bmi[i].as_mode = best_mode;
248 /* Break out case where we have already exceeded best so far value
251 if (distortion > *best_dist) break;
257 *best_dist = distortion;
258 error = RDCOST(mb->rdmult, mb->rddiv, cost, distortion);
260 *best_dist = INT_MAX;
267 static void pick_intra_mbuv_mode(MACROBLOCK *mb) {
268 MACROBLOCKD *x = &mb->e_mbd;
269 unsigned char *uabove_row = x->dst.u_buffer - x->dst.uv_stride;
270 unsigned char *vabove_row = x->dst.v_buffer - x->dst.uv_stride;
271 unsigned char *usrc_ptr = (mb->block[16].src + *mb->block[16].base_src);
272 unsigned char *vsrc_ptr = (mb->block[20].src + *mb->block[20].base_src);
273 int uvsrc_stride = mb->block[16].src_stride;
274 unsigned char uleft_col[8];
275 unsigned char vleft_col[8];
276 unsigned char utop_left = uabove_row[-1];
277 unsigned char vtop_left = vabove_row[-1];
285 int pred_error[4] = { 0, 0, 0, 0 }, best_error = INT_MAX;
286 MB_PREDICTION_MODE best_mode = MB_MODE_COUNT;
288 for (i = 0; i < 8; ++i) {
289 uleft_col[i] = x->dst.u_buffer[i * x->dst.uv_stride - 1];
290 vleft_col[i] = x->dst.v_buffer[i * x->dst.uv_stride - 1];
293 if (!x->up_available && !x->left_available) {
299 if (x->up_available) {
300 for (i = 0; i < 8; ++i) {
301 Uaverage += uabove_row[i];
302 Vaverage += vabove_row[i];
308 if (x->left_available) {
309 for (i = 0; i < 8; ++i) {
310 Uaverage += uleft_col[i];
311 Vaverage += vleft_col[i];
317 expected_udc = (Uaverage + (1 << (shift - 1))) >> shift;
318 expected_vdc = (Vaverage + (1 << (shift - 1))) >> shift;
321 for (i = 0; i < 8; ++i) {
322 for (j = 0; j < 8; ++j) {
323 int predu = uleft_col[i] + uabove_row[j] - utop_left;
324 int predv = vleft_col[i] + vabove_row[j] - vtop_left;
330 if (predu < 0) predu = 0;
332 if (predu > 255) predu = 255;
334 if (predv < 0) predv = 0;
336 if (predv > 255) predv = 255;
338 diff = u_p - expected_udc;
339 pred_error[DC_PRED] += diff * diff;
340 diff = v_p - expected_vdc;
341 pred_error[DC_PRED] += diff * diff;
343 diff = u_p - uabove_row[j];
344 pred_error[V_PRED] += diff * diff;
345 diff = v_p - vabove_row[j];
346 pred_error[V_PRED] += diff * diff;
348 diff = u_p - uleft_col[i];
349 pred_error[H_PRED] += diff * diff;
350 diff = v_p - vleft_col[i];
351 pred_error[H_PRED] += diff * diff;
354 pred_error[TM_PRED] += diff * diff;
356 pred_error[TM_PRED] += diff * diff;
359 usrc_ptr += uvsrc_stride;
360 vsrc_ptr += uvsrc_stride;
363 usrc_ptr = (mb->block[18].src + *mb->block[18].base_src);
364 vsrc_ptr = (mb->block[22].src + *mb->block[22].base_src);
368 for (i = DC_PRED; i <= TM_PRED; ++i) {
369 if (best_error > pred_error[i]) {
370 best_error = pred_error[i];
371 best_mode = (MB_PREDICTION_MODE)i;
375 assert(best_mode != MB_MODE_COUNT);
376 mb->e_mbd.mode_info_context->mbmi.uv_mode = best_mode;
379 static void update_mvcount(MACROBLOCK *x, int_mv *best_ref_mv) {
380 MACROBLOCKD *xd = &x->e_mbd;
381 /* Split MV modes currently not supported when RD is nopt enabled,
382 * therefore, only need to modify MVcount in NEWMV mode. */
383 if (xd->mode_info_context->mbmi.mode == NEWMV) {
384 x->MVcount[0][mv_max + ((xd->mode_info_context->mbmi.mv.as_mv.row -
385 best_ref_mv->as_mv.row) >>
387 x->MVcount[1][mv_max + ((xd->mode_info_context->mbmi.mv.as_mv.col -
388 best_ref_mv->as_mv.col) >>
393 #if CONFIG_MULTI_RES_ENCODING
394 static void get_lower_res_motion_info(VP8_COMP *cpi, MACROBLOCKD *xd,
395 int *dissim, int *parent_ref_frame,
396 MB_PREDICTION_MODE *parent_mode,
397 int_mv *parent_ref_mv, int mb_row,
399 LOWER_RES_MB_INFO *store_mode_info =
400 ((LOWER_RES_FRAME_INFO *)cpi->oxcf.mr_low_res_mode_info)->mb_info;
401 unsigned int parent_mb_index;
403 /* Consider different down_sampling_factor. */
405 /* TODO: Removed the loop that supports special down_sampling_factor
406 * such as 2, 4, 8. Will revisit it if needed.
407 * Should also try using a look-up table to see if it helps
409 int parent_mb_row, parent_mb_col;
411 parent_mb_row = mb_row * cpi->oxcf.mr_down_sampling_factor.den /
412 cpi->oxcf.mr_down_sampling_factor.num;
413 parent_mb_col = mb_col * cpi->oxcf.mr_down_sampling_factor.den /
414 cpi->oxcf.mr_down_sampling_factor.num;
415 parent_mb_index = parent_mb_row * cpi->mr_low_res_mb_cols + parent_mb_col;
418 /* Read lower-resolution mode & motion result from memory.*/
419 *parent_ref_frame = store_mode_info[parent_mb_index].ref_frame;
420 *parent_mode = store_mode_info[parent_mb_index].mode;
421 *dissim = store_mode_info[parent_mb_index].dissim;
423 /* For highest-resolution encoder, adjust dissim value. Lower its quality
424 * for good performance. */
425 if (cpi->oxcf.mr_encoder_id == (cpi->oxcf.mr_total_resolutions - 1))
428 if (*parent_ref_frame != INTRA_FRAME) {
429 /* Consider different down_sampling_factor.
430 * The result can be rounded to be more precise, but it takes more time.
432 (*parent_ref_mv).as_mv.row = store_mode_info[parent_mb_index].mv.as_mv.row *
433 cpi->oxcf.mr_down_sampling_factor.num /
434 cpi->oxcf.mr_down_sampling_factor.den;
435 (*parent_ref_mv).as_mv.col = store_mode_info[parent_mb_index].mv.as_mv.col *
436 cpi->oxcf.mr_down_sampling_factor.num /
437 cpi->oxcf.mr_down_sampling_factor.den;
439 vp8_clamp_mv2(parent_ref_mv, xd);
444 static void check_for_encode_breakout(unsigned int sse, MACROBLOCK *x) {
445 MACROBLOCKD *xd = &x->e_mbd;
447 unsigned int threshold =
448 (xd->block[0].dequant[1] * xd->block[0].dequant[1] >> 4);
450 if (threshold < x->encode_breakout) threshold = x->encode_breakout;
452 if (sse < threshold) {
453 /* Check u and v to make sure skip is ok */
454 unsigned int sse2 = 0;
458 if (sse2 * 2 < x->encode_breakout) {
466 static int evaluate_inter_mode(unsigned int *sse, int rate2, int *distortion2,
467 VP8_COMP *cpi, MACROBLOCK *x, int rd_adj) {
468 MB_PREDICTION_MODE this_mode = x->e_mbd.mode_info_context->mbmi.mode;
469 int_mv mv = x->e_mbd.mode_info_context->mbmi.mv;
471 int denoise_aggressive = 0;
472 /* Exit early and don't compute the distortion if this macroblock
473 * is marked inactive. */
474 if (cpi->active_map_enabled && x->active_ptr[0] == 0) {
481 if ((this_mode != NEWMV) || !(cpi->sf.half_pixel_search) ||
482 cpi->common.full_pixel == 1) {
484 vp8_get_inter_mbpred_error(x, &cpi->fn_ptr[BLOCK_16X16], sse, mv);
487 this_rd = RDCOST(x->rdmult, x->rddiv, rate2, *distortion2);
489 #if CONFIG_TEMPORAL_DENOISING
490 if (cpi->oxcf.noise_sensitivity > 0) {
492 (cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive) ? 1 : 0;
496 // Adjust rd for ZEROMV and LAST, if LAST is the closest reference frame.
497 // TODO: We should also add condition on distance of closest to current.
498 if (!cpi->oxcf.screen_content_mode && this_mode == ZEROMV &&
499 x->e_mbd.mode_info_context->mbmi.ref_frame == LAST_FRAME &&
500 (denoise_aggressive || (cpi->closest_reference_frame == LAST_FRAME))) {
501 // No adjustment if block is considered to be skin area.
502 if (x->is_skin) rd_adj = 100;
504 this_rd = (int)(((int64_t)this_rd) * rd_adj / 100);
507 check_for_encode_breakout(*sse, x);
511 static void calculate_zeromv_rd_adjustment(VP8_COMP *cpi, MACROBLOCK *x,
512 int *rd_adjustment) {
513 MODE_INFO *mic = x->e_mbd.mode_info_context;
514 int_mv mv_l, mv_a, mv_al;
515 int local_motion_check = 0;
517 if (cpi->lf_zeromv_pct > 40) {
522 if (mic->mbmi.ref_frame != INTRA_FRAME) {
523 if (abs(mv_l.as_mv.row) < 8 && abs(mv_l.as_mv.col) < 8) {
524 local_motion_check++;
529 mic -= x->e_mbd.mode_info_stride;
530 mv_al = mic->mbmi.mv;
532 if (mic->mbmi.ref_frame != INTRA_FRAME) {
533 if (abs(mv_al.as_mv.row) < 8 && abs(mv_al.as_mv.col) < 8) {
534 local_motion_check++;
542 if (mic->mbmi.ref_frame != INTRA_FRAME) {
543 if (abs(mv_a.as_mv.row) < 8 && abs(mv_a.as_mv.col) < 8) {
544 local_motion_check++;
548 if (((!x->e_mbd.mb_to_top_edge || !x->e_mbd.mb_to_left_edge) &&
549 local_motion_check > 0) ||
550 local_motion_check > 2) {
552 } else if (local_motion_check > 0) {
558 void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
559 int recon_uvoffset, int *returnrate,
560 int *returndistortion, int *returnintra, int mb_row,
562 BLOCK *b = &x->block[0];
563 BLOCKD *d = &x->e_mbd.block[0];
564 MACROBLOCKD *xd = &x->e_mbd;
565 MB_MODE_INFO best_mbmode;
567 int_mv best_ref_mv_sb[2];
568 int_mv mode_mv_sb[2][MB_MODE_COUNT];
571 MB_PREDICTION_MODE this_mode;
574 int best_rd = INT_MAX;
575 int rd_adjustment = 100;
576 int best_intra_rd = INT_MAX;
581 int bestsme = INT_MAX;
582 int best_mode_index = 0;
583 unsigned int sse = UINT_MAX, best_rd_sse = UINT_MAX;
584 #if CONFIG_TEMPORAL_DENOISING
585 unsigned int zero_mv_sse = UINT_MAX, best_sse = UINT_MAX;
588 int sf_improved_mv_pred = cpi->sf.improved_mv_pred;
590 #if CONFIG_MULTI_RES_ENCODING
591 int dissim = INT_MAX;
592 int parent_ref_frame = 0;
593 int_mv parent_ref_mv;
594 MB_PREDICTION_MODE parent_mode = 0;
595 int parent_ref_valid = 0;
600 int near_sadidx[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
602 /* search range got from mv_pred(). It uses step_param levels. (0-7) */
605 unsigned char *plane[4][3];
606 int ref_frame_map[4];
608 int dot_artifact_candidate = 0;
609 get_predictor_pointers(cpi, plane, recon_yoffset, recon_uvoffset);
611 // If the current frame is using LAST as a reference, check for
612 // biasing the mode selection for dot artifacts.
613 if (cpi->ref_frame_flags & VP8_LAST_FRAME) {
614 unsigned char *target_y = x->src.y_buffer;
615 unsigned char *target_u = x->block[16].src + *x->block[16].base_src;
616 unsigned char *target_v = x->block[20].src + *x->block[20].base_src;
617 int stride = x->src.y_stride;
618 int stride_uv = x->block[16].src_stride;
619 #if CONFIG_TEMPORAL_DENOISING
620 if (cpi->oxcf.noise_sensitivity) {
621 const int uv_denoise = (cpi->oxcf.noise_sensitivity >= 2) ? 1 : 0;
623 cpi->denoiser.yv12_running_avg[LAST_FRAME].y_buffer + recon_yoffset;
624 stride = cpi->denoiser.yv12_running_avg[LAST_FRAME].y_stride;
626 target_u = cpi->denoiser.yv12_running_avg[LAST_FRAME].u_buffer +
628 target_v = cpi->denoiser.yv12_running_avg[LAST_FRAME].v_buffer +
630 stride_uv = cpi->denoiser.yv12_running_avg[LAST_FRAME].uv_stride;
634 dot_artifact_candidate = check_dot_artifact_candidate(
635 cpi, x, target_y, stride, plane[LAST_FRAME][0], mb_row, mb_col, 0);
636 // If not found in Y channel, check UV channel.
637 if (!dot_artifact_candidate) {
638 dot_artifact_candidate = check_dot_artifact_candidate(
639 cpi, x, target_u, stride_uv, plane[LAST_FRAME][1], mb_row, mb_col, 1);
640 if (!dot_artifact_candidate) {
641 dot_artifact_candidate = check_dot_artifact_candidate(
642 cpi, x, target_v, stride_uv, plane[LAST_FRAME][2], mb_row, mb_col,
648 #if CONFIG_MULTI_RES_ENCODING
649 // |parent_ref_valid| will be set here if potentially we can do mv resue for
650 // this higher resol (|cpi->oxcf.mr_encoder_id| > 0) frame.
651 // |parent_ref_valid| may be reset depending on |parent_ref_frame| for
652 // the current macroblock below.
653 parent_ref_valid = cpi->oxcf.mr_encoder_id && cpi->mr_low_res_mv_avail;
654 if (parent_ref_valid) {
657 get_lower_res_motion_info(cpi, xd, &dissim, &parent_ref_frame, &parent_mode,
658 &parent_ref_mv, mb_row, mb_col);
660 /* TODO(jkoleszar): The references available (ref_frame_flags) to the
661 * lower res encoder should match those available to this encoder, but
662 * there seems to be a situation where this mismatch can happen in the
663 * case of frame dropping and temporal layers. For example,
664 * GOLD being disallowed in ref_frame_flags, but being returned as
667 * In this event, take the conservative approach of disabling the
668 * lower res info for this MB.
672 // Note availability for mv reuse is only based on last and golden.
673 if (parent_ref_frame == LAST_FRAME)
674 parent_ref_flag = (cpi->ref_frame_flags & VP8_LAST_FRAME);
675 else if (parent_ref_frame == GOLDEN_FRAME)
676 parent_ref_flag = (cpi->ref_frame_flags & VP8_GOLD_FRAME);
678 // assert(!parent_ref_frame || parent_ref_flag);
680 // If |parent_ref_frame| did not match either last or golden then
681 // shut off mv reuse.
682 if (parent_ref_frame && !parent_ref_flag) parent_ref_valid = 0;
684 // Don't do mv reuse since we want to allow for another mode besides
685 // ZEROMV_LAST to remove dot artifact.
686 if (dot_artifact_candidate) parent_ref_valid = 0;
690 // Check if current macroblock is in skin area.
692 if (!cpi->oxcf.screen_content_mode) {
693 int block_index = mb_row * cpi->common.mb_cols + mb_col;
694 x->is_skin = compute_skin_block(
695 x->src.y_buffer, x->src.u_buffer, x->src.v_buffer, x->src.y_stride,
696 x->src.uv_stride, cpi->consec_zero_last[block_index], 0);
698 #if CONFIG_TEMPORAL_DENOISING
699 if (cpi->oxcf.noise_sensitivity) {
700 // Under aggressive denoising mode, should we use skin map to reduce
702 // and ZEROMV bias? Will need to revisit the accuracy of this detection for
703 // very noisy input. For now keep this as is (i.e., don't turn it off).
704 // if (cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive)
709 mode_mv = mode_mv_sb[sign_bias];
710 best_ref_mv.as_int = 0;
711 memset(mode_mv_sb, 0, sizeof(mode_mv_sb));
712 memset(&best_mbmode, 0, sizeof(best_mbmode));
714 /* Setup search priorities */
715 #if CONFIG_MULTI_RES_ENCODING
716 if (parent_ref_valid && parent_ref_frame && dissim < 8) {
717 ref_frame_map[0] = -1;
718 ref_frame_map[1] = parent_ref_frame;
719 ref_frame_map[2] = -1;
720 ref_frame_map[3] = -1;
723 get_reference_search_order(cpi, ref_frame_map);
725 /* Check to see if there is at least 1 valid reference frame that we need
726 * to calculate near_mvs.
728 if (ref_frame_map[1] > 0) {
729 sign_bias = vp8_find_near_mvs_bias(
730 &x->e_mbd, x->e_mbd.mode_info_context, mode_mv_sb, best_ref_mv_sb,
731 mdcounts, ref_frame_map[1], cpi->common.ref_frame_sign_bias);
733 mode_mv = mode_mv_sb[sign_bias];
734 best_ref_mv.as_int = best_ref_mv_sb[sign_bias].as_int;
737 /* Count of the number of MBs tested so far this frame */
738 x->mbs_tested_so_far++;
740 *returnintra = INT_MAX;
743 x->e_mbd.mode_info_context->mbmi.ref_frame = INTRA_FRAME;
745 /* If the frame has big static background and current MB is in low
746 * motion area, its mode decision is biased to ZEROMV mode.
747 * No adjustment if cpu_used is <= -12 (i.e., cpi->Speed >= 12).
748 * At such speed settings, ZEROMV is already heavily favored.
750 if (cpi->Speed < 12) {
751 calculate_zeromv_rd_adjustment(cpi, x, &rd_adjustment);
754 #if CONFIG_TEMPORAL_DENOISING
755 if (cpi->oxcf.noise_sensitivity) {
756 rd_adjustment = (int)(rd_adjustment *
757 cpi->denoiser.denoise_pars.pickmode_mv_bias / 100);
761 if (dot_artifact_candidate) {
762 // Bias against ZEROMV_LAST mode.
766 /* if we encode a new mv this is important
767 * find the best new motion vector
769 for (mode_index = 0; mode_index < MAX_MODES; ++mode_index) {
771 int this_rd = INT_MAX;
772 int this_ref_frame = ref_frame_map[vp8_ref_frame_order[mode_index]];
774 if (best_rd <= x->rd_threshes[mode_index]) continue;
776 if (this_ref_frame < 0) continue;
778 x->e_mbd.mode_info_context->mbmi.ref_frame = this_ref_frame;
780 /* everything but intra */
781 if (x->e_mbd.mode_info_context->mbmi.ref_frame) {
782 x->e_mbd.pre.y_buffer = plane[this_ref_frame][0];
783 x->e_mbd.pre.u_buffer = plane[this_ref_frame][1];
784 x->e_mbd.pre.v_buffer = plane[this_ref_frame][2];
786 if (sign_bias != cpi->common.ref_frame_sign_bias[this_ref_frame]) {
787 sign_bias = cpi->common.ref_frame_sign_bias[this_ref_frame];
788 mode_mv = mode_mv_sb[sign_bias];
789 best_ref_mv.as_int = best_ref_mv_sb[sign_bias].as_int;
792 #if CONFIG_MULTI_RES_ENCODING
793 if (parent_ref_valid) {
794 if (vp8_mode_order[mode_index] == NEARESTMV &&
795 mode_mv[NEARESTMV].as_int == 0)
797 if (vp8_mode_order[mode_index] == NEARMV && mode_mv[NEARMV].as_int == 0)
800 if (vp8_mode_order[mode_index] == NEWMV && parent_mode == ZEROMV &&
801 best_ref_mv.as_int == 0)
803 else if (vp8_mode_order[mode_index] == NEWMV && dissim == 0 &&
804 best_ref_mv.as_int == parent_ref_mv.as_int)
810 /* Check to see if the testing frequency for this mode is at its max
811 * If so then prevent it from being tested and increase the threshold
813 if (x->mode_test_hit_counts[mode_index] &&
814 (cpi->mode_check_freq[mode_index] > 1)) {
815 if (x->mbs_tested_so_far <= (cpi->mode_check_freq[mode_index] *
816 x->mode_test_hit_counts[mode_index])) {
817 /* Increase the threshold for coding this mode to make it less
818 * likely to be chosen */
819 x->rd_thresh_mult[mode_index] += 4;
821 if (x->rd_thresh_mult[mode_index] > MAX_THRESHMULT) {
822 x->rd_thresh_mult[mode_index] = MAX_THRESHMULT;
825 x->rd_threshes[mode_index] =
826 (cpi->rd_baseline_thresh[mode_index] >> 7) *
827 x->rd_thresh_mult[mode_index];
832 /* We have now reached the point where we are going to test the current
833 * mode so increment the counter for the number of times it has been
835 x->mode_test_hit_counts[mode_index]++;
840 this_mode = vp8_mode_order[mode_index];
842 x->e_mbd.mode_info_context->mbmi.mode = this_mode;
843 x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED;
845 /* Work out the cost assosciated with selecting the reference frame */
846 frame_cost = x->ref_frame_cost[x->e_mbd.mode_info_context->mbmi.ref_frame];
849 /* Only consider ZEROMV/ALTREF_FRAME for alt ref frame,
850 * unless ARNR filtering is enabled in which case we want
851 * an unfiltered alternative */
852 if (cpi->is_src_frame_alt_ref && (cpi->oxcf.arnr_max_frames == 0)) {
853 if (this_mode != ZEROMV ||
854 x->e_mbd.mode_info_context->mbmi.ref_frame != ALTREF_FRAME) {
861 /* Pass best so far to pick_intra4x4mby_modes to use as breakout */
862 distortion2 = best_rd_sse;
863 pick_intra4x4mby_modes(x, &rate, &distortion2);
865 if (distortion2 == INT_MAX) {
869 distortion2 = vpx_variance16x16(*(b->base_src), b->src_stride,
870 x->e_mbd.predictor, 16, &sse);
871 this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
873 if (this_rd < best_intra_rd) {
874 best_intra_rd = this_rd;
875 *returnintra = distortion2;
883 /* Split MV modes currently not supported when RD is not enabled. */
890 vp8_build_intra_predictors_mby_s(
891 xd, xd->dst.y_buffer - xd->dst.y_stride, xd->dst.y_buffer - 1,
892 xd->dst.y_stride, xd->predictor, 16);
893 distortion2 = vpx_variance16x16(*(b->base_src), b->src_stride,
894 x->e_mbd.predictor, 16, &sse);
895 rate2 += x->mbmode_cost[x->e_mbd.frame_type]
896 [x->e_mbd.mode_info_context->mbmi.mode];
897 this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
899 if (this_rd < best_intra_rd) {
900 best_intra_rd = this_rd;
901 *returnintra = distortion2;
910 int sadpb = x->sadperbit16;
913 int col_min = ((best_ref_mv.as_mv.col + 7) >> 3) - MAX_FULL_PEL_VAL;
914 int row_min = ((best_ref_mv.as_mv.row + 7) >> 3) - MAX_FULL_PEL_VAL;
915 int col_max = (best_ref_mv.as_mv.col >> 3) + MAX_FULL_PEL_VAL;
916 int row_max = (best_ref_mv.as_mv.row >> 3) + MAX_FULL_PEL_VAL;
918 int tmp_col_min = x->mv_col_min;
919 int tmp_col_max = x->mv_col_max;
920 int tmp_row_min = x->mv_row_min;
921 int tmp_row_max = x->mv_row_max;
923 int speed_adjust = (cpi->Speed > 5) ? ((cpi->Speed >= 8) ? 3 : 2) : 1;
925 /* Further step/diamond searches as necessary */
926 step_param = cpi->sf.first_step + speed_adjust;
928 #if CONFIG_MULTI_RES_ENCODING
929 /* If lower-res frame is not available for mv reuse (because of
930 frame dropping or different temporal layer pattern), then higher
931 resol encoder does motion search without any previous knowledge.
932 Also, since last frame motion info is not stored, then we can not
933 use improved_mv_pred. */
934 if (cpi->oxcf.mr_encoder_id) sf_improved_mv_pred = 0;
936 // Only use parent MV as predictor if this candidate reference frame
937 // (|this_ref_frame|) is equal to |parent_ref_frame|.
938 if (parent_ref_valid && (parent_ref_frame == this_ref_frame)) {
939 /* Use parent MV as predictor. Adjust search range
942 mvp.as_int = parent_ref_mv.as_int;
943 mvp_full.as_mv.col = parent_ref_mv.as_mv.col >> 3;
944 mvp_full.as_mv.row = parent_ref_mv.as_mv.row >> 3;
948 else if (dissim <= 128)
955 if (sf_improved_mv_pred) {
957 vp8_cal_sad(cpi, xd, x, recon_yoffset, &near_sadidx[0]);
961 vp8_mv_pred(cpi, &x->e_mbd, x->e_mbd.mode_info_context, &mvp,
962 x->e_mbd.mode_info_context->mbmi.ref_frame,
963 cpi->common.ref_frame_sign_bias, &sr, &near_sadidx[0]);
966 /* adjust search range according to sr from mv prediction */
967 if (sr > step_param) step_param = sr;
969 mvp_full.as_mv.col = mvp.as_mv.col >> 3;
970 mvp_full.as_mv.row = mvp.as_mv.row >> 3;
972 mvp.as_int = best_ref_mv.as_int;
973 mvp_full.as_mv.col = best_ref_mv.as_mv.col >> 3;
974 mvp_full.as_mv.row = best_ref_mv.as_mv.row >> 3;
978 #if CONFIG_MULTI_RES_ENCODING
979 if (parent_ref_valid && (parent_ref_frame == this_ref_frame) &&
981 VPXMAX(abs(best_ref_mv.as_mv.row - parent_ref_mv.as_mv.row),
982 abs(best_ref_mv.as_mv.col - parent_ref_mv.as_mv.col)) <= 4) {
983 d->bmi.mv.as_int = mvp_full.as_int;
984 mode_mv[NEWMV].as_int = mvp_full.as_int;
986 cpi->find_fractional_mv_step(
987 x, b, d, &d->bmi.mv, &best_ref_mv, x->errorperbit,
988 &cpi->fn_ptr[BLOCK_16X16], cpi->mb.mvcost, &distortion2, &sse);
992 /* Get intersection of UMV window and valid MV window to
993 * reduce # of checks in diamond search. */
994 if (x->mv_col_min < col_min) x->mv_col_min = col_min;
995 if (x->mv_col_max > col_max) x->mv_col_max = col_max;
996 if (x->mv_row_min < row_min) x->mv_row_min = row_min;
997 if (x->mv_row_max > row_max) x->mv_row_max = row_max;
1002 : (cpi->sf.max_step_search_steps - 1 - step_param);
1004 if (cpi->sf.search_method == HEX) {
1005 #if CONFIG_MULTI_RES_ENCODING
1006 /* TODO: In higher-res pick_inter_mode, step_param is used to
1007 * modify hex search range. Here, set step_param to 0 not to
1008 * change the behavior in lowest-resolution encoder.
1009 * Will improve it later.
1011 /* Set step_param to 0 to ensure large-range motion search
1012 * when mv reuse if not valid (i.e. |parent_ref_valid| = 0),
1013 * or if this candidate reference frame (|this_ref_frame|) is
1014 * not equal to |parent_ref_frame|.
1016 if (!parent_ref_valid || (parent_ref_frame != this_ref_frame))
1019 bestsme = vp8_hex_search(x, b, d, &mvp_full, &d->bmi.mv, step_param,
1020 sadpb, &cpi->fn_ptr[BLOCK_16X16],
1021 x->mvsadcost, x->mvcost, &best_ref_mv);
1022 mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
1024 bestsme = cpi->diamond_search_sad(
1025 x, b, d, &mvp_full, &d->bmi.mv, step_param, sadpb, &num00,
1026 &cpi->fn_ptr[BLOCK_16X16], x->mvcost, &best_ref_mv);
1027 mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
1029 /* Further step/diamond searches as necessary */
1033 while (n < further_steps) {
1039 thissme = cpi->diamond_search_sad(
1040 x, b, d, &mvp_full, &d->bmi.mv, step_param + n, sadpb,
1041 &num00, &cpi->fn_ptr[BLOCK_16X16], x->mvcost, &best_ref_mv);
1042 if (thissme < bestsme) {
1044 mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
1046 d->bmi.mv.as_int = mode_mv[NEWMV].as_int;
1052 x->mv_col_min = tmp_col_min;
1053 x->mv_col_max = tmp_col_max;
1054 x->mv_row_min = tmp_row_min;
1055 x->mv_row_max = tmp_row_max;
1057 if (bestsme < INT_MAX) {
1058 cpi->find_fractional_mv_step(
1059 x, b, d, &d->bmi.mv, &best_ref_mv, x->errorperbit,
1060 &cpi->fn_ptr[BLOCK_16X16], cpi->mb.mvcost, &distortion2, &sse);
1064 mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
1065 // The clamp below is not necessary from the perspective
1066 // of VP8 bitstream, but is added to improve ChromeCast
1067 // mirroring's robustness. Please do not remove.
1068 vp8_clamp_mv2(&mode_mv[this_mode], xd);
1071 vp8_mv_bit_cost(&mode_mv[NEWMV], &best_ref_mv, cpi->mb.mvcost, 128);
1076 if (mode_mv[this_mode].as_int == 0) continue;
1080 /* Trap vectors that reach beyond the UMV borders
1081 * Note that ALL New MV, Nearest MV Near MV and Zero MV code drops
1082 * through to this point because of the lack of break statements
1083 * in the previous two cases.
1085 if (((mode_mv[this_mode].as_mv.row >> 3) < x->mv_row_min) ||
1086 ((mode_mv[this_mode].as_mv.row >> 3) > x->mv_row_max) ||
1087 ((mode_mv[this_mode].as_mv.col >> 3) < x->mv_col_min) ||
1088 ((mode_mv[this_mode].as_mv.col >> 3) > x->mv_col_max)) {
1092 rate2 += vp8_cost_mv_ref(this_mode, mdcounts);
1093 x->e_mbd.mode_info_context->mbmi.mv.as_int = mode_mv[this_mode].as_int;
1094 this_rd = evaluate_inter_mode(&sse, rate2, &distortion2, cpi, x,
1101 #if CONFIG_TEMPORAL_DENOISING
1102 if (cpi->oxcf.noise_sensitivity) {
1103 /* Store for later use by denoiser. */
1104 // Dont' denoise with GOLDEN OR ALTREF is they are old reference
1105 // frames (greater than MAX_GF_ARF_DENOISE_RANGE frames in past).
1106 int skip_old_reference = ((this_ref_frame != LAST_FRAME) &&
1107 (cpi->common.current_video_frame -
1108 cpi->current_ref_frames[this_ref_frame] >
1109 MAX_GF_ARF_DENOISE_RANGE))
1112 if (this_mode == ZEROMV && sse < zero_mv_sse && !skip_old_reference) {
1114 x->best_zeromv_reference_frame =
1115 x->e_mbd.mode_info_context->mbmi.ref_frame;
1118 // Store the best NEWMV in x for later use in the denoiser.
1119 if (x->e_mbd.mode_info_context->mbmi.mode == NEWMV && sse < best_sse &&
1120 !skip_old_reference) {
1122 x->best_sse_inter_mode = NEWMV;
1123 x->best_sse_mv = x->e_mbd.mode_info_context->mbmi.mv;
1124 x->need_to_clamp_best_mvs =
1125 x->e_mbd.mode_info_context->mbmi.need_to_clamp_mvs;
1126 x->best_reference_frame = x->e_mbd.mode_info_context->mbmi.ref_frame;
1131 if (this_rd < best_rd || x->skip) {
1132 /* Note index of best mode */
1133 best_mode_index = mode_index;
1135 *returnrate = rate2;
1136 *returndistortion = distortion2;
1139 memcpy(&best_mbmode, &x->e_mbd.mode_info_context->mbmi,
1140 sizeof(MB_MODE_INFO));
1142 /* Testing this mode gave rise to an improvement in best error
1143 * score. Lower threshold a bit for next time
1145 x->rd_thresh_mult[mode_index] =
1146 (x->rd_thresh_mult[mode_index] >= (MIN_THRESHMULT + 2))
1147 ? x->rd_thresh_mult[mode_index] - 2
1149 x->rd_threshes[mode_index] = (cpi->rd_baseline_thresh[mode_index] >> 7) *
1150 x->rd_thresh_mult[mode_index];
1153 /* If the mode did not help improve the best error case then raise the
1154 * threshold for testing that mode next time around.
1157 x->rd_thresh_mult[mode_index] += 4;
1159 if (x->rd_thresh_mult[mode_index] > MAX_THRESHMULT) {
1160 x->rd_thresh_mult[mode_index] = MAX_THRESHMULT;
1163 x->rd_threshes[mode_index] = (cpi->rd_baseline_thresh[mode_index] >> 7) *
1164 x->rd_thresh_mult[mode_index];
1170 /* Reduce the activation RD thresholds for the best choice mode */
1171 if ((cpi->rd_baseline_thresh[best_mode_index] > 0) &&
1172 (cpi->rd_baseline_thresh[best_mode_index] < (INT_MAX >> 2))) {
1173 int best_adjustment = (x->rd_thresh_mult[best_mode_index] >> 3);
1175 x->rd_thresh_mult[best_mode_index] =
1176 (x->rd_thresh_mult[best_mode_index] >=
1177 (MIN_THRESHMULT + best_adjustment))
1178 ? x->rd_thresh_mult[best_mode_index] - best_adjustment
1180 x->rd_threshes[best_mode_index] =
1181 (cpi->rd_baseline_thresh[best_mode_index] >> 7) *
1182 x->rd_thresh_mult[best_mode_index];
1186 int this_rdbin = (*returndistortion >> 7);
1188 if (this_rdbin >= 1024) {
1192 x->error_bins[this_rdbin]++;
1195 #if CONFIG_TEMPORAL_DENOISING
1196 if (cpi->oxcf.noise_sensitivity) {
1197 int block_index = mb_row * cpi->common.mb_cols + mb_col;
1200 if (x->best_sse_inter_mode == DC_PRED) {
1201 /* No best MV found. */
1202 x->best_sse_inter_mode = best_mbmode.mode;
1203 x->best_sse_mv = best_mbmode.mv;
1204 x->need_to_clamp_best_mvs = best_mbmode.need_to_clamp_mvs;
1205 x->best_reference_frame = best_mbmode.ref_frame;
1206 best_sse = best_rd_sse;
1208 // For non-skin blocks that have selected ZEROMV for this current frame,
1209 // and have been selecting ZEROMV_LAST (on the base layer frame) at
1210 // least |x~20| consecutive past frames in a row, label the block for
1211 // possible increase in denoising strength. We also condition this
1212 // labeling on there being significant denoising in the scene
1213 if (cpi->oxcf.noise_sensitivity == 4) {
1214 if (cpi->denoiser.nmse_source_diff >
1215 70 * cpi->denoiser.threshold_aggressive_mode / 100) {
1219 if (cpi->mse_source_denoised > 1000) is_noisy = 1;
1221 x->increase_denoising = 0;
1222 if (!x->is_skin && x->best_sse_inter_mode == ZEROMV &&
1223 (x->best_reference_frame == LAST_FRAME ||
1224 x->best_reference_frame == cpi->closest_reference_frame) &&
1225 cpi->consec_zero_last[block_index] >= 20 && is_noisy) {
1226 x->increase_denoising = 1;
1228 x->denoise_zeromv = 0;
1229 vp8_denoiser_denoise_mb(&cpi->denoiser, x, best_sse, zero_mv_sse,
1230 recon_yoffset, recon_uvoffset, &cpi->common.lf_info,
1231 mb_row, mb_col, block_index,
1232 cpi->consec_zero_last_mvbias[block_index]);
1234 // Reevaluate ZEROMV after denoising: for large noise content
1235 // (i.e., cpi->mse_source_denoised is above threshold), do this for all
1236 // blocks that did not pick ZEROMV as best mode but are using ZEROMV
1237 // for denoising. Otherwise, always re-evaluate for blocks that picked
1238 // INTRA mode as best mode.
1239 // Avoid blocks that have been biased against ZERO_LAST
1240 // (i.e., dot artifact candidate blocks).
1241 reevaluate = (best_mbmode.ref_frame == INTRA_FRAME) ||
1242 (best_mbmode.mode != ZEROMV && x->denoise_zeromv &&
1243 cpi->mse_source_denoised > 2000);
1244 if (!dot_artifact_candidate && reevaluate &&
1245 x->best_zeromv_reference_frame != INTRA_FRAME) {
1247 int this_ref_frame = x->best_zeromv_reference_frame;
1248 rd_adjustment = 100;
1250 x->ref_frame_cost[this_ref_frame] + vp8_cost_mv_ref(ZEROMV, mdcounts);
1253 /* set up the proper prediction buffers for the frame */
1254 x->e_mbd.mode_info_context->mbmi.ref_frame = this_ref_frame;
1255 x->e_mbd.pre.y_buffer = plane[this_ref_frame][0];
1256 x->e_mbd.pre.u_buffer = plane[this_ref_frame][1];
1257 x->e_mbd.pre.v_buffer = plane[this_ref_frame][2];
1259 x->e_mbd.mode_info_context->mbmi.mode = ZEROMV;
1260 x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED;
1261 x->e_mbd.mode_info_context->mbmi.mv.as_int = 0;
1263 evaluate_inter_mode(&sse, rate2, &distortion2, cpi, x, rd_adjustment);
1265 if (this_rd < best_rd) {
1266 memcpy(&best_mbmode, &x->e_mbd.mode_info_context->mbmi,
1267 sizeof(MB_MODE_INFO));
1273 if (cpi->is_src_frame_alt_ref &&
1274 (best_mbmode.mode != ZEROMV || best_mbmode.ref_frame != ALTREF_FRAME)) {
1275 x->e_mbd.mode_info_context->mbmi.mode = ZEROMV;
1276 x->e_mbd.mode_info_context->mbmi.ref_frame = ALTREF_FRAME;
1277 x->e_mbd.mode_info_context->mbmi.mv.as_int = 0;
1278 x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED;
1279 x->e_mbd.mode_info_context->mbmi.mb_skip_coeff =
1280 (cpi->common.mb_no_coeff_skip);
1281 x->e_mbd.mode_info_context->mbmi.partitioning = 0;
1286 /* set to the best mb mode, this copy can be skip if x->skip since it
1287 * already has the right content */
1289 memcpy(&x->e_mbd.mode_info_context->mbmi, &best_mbmode,
1290 sizeof(MB_MODE_INFO));
1293 if (best_mbmode.mode <= B_PRED) {
1294 /* set mode_info_context->mbmi.uv_mode */
1295 pick_intra_mbuv_mode(x);
1299 cpi->common.ref_frame_sign_bias[xd->mode_info_context->mbmi.ref_frame]) {
1300 best_ref_mv.as_int = best_ref_mv_sb[!sign_bias].as_int;
1303 update_mvcount(x, &best_ref_mv);
1306 void vp8_pick_intra_mode(MACROBLOCK *x, int *rate_) {
1307 int error4x4, error16x16 = INT_MAX;
1308 int rate, best_rate = 0, distortion, best_sse;
1309 MB_PREDICTION_MODE mode, best_mode = DC_PRED;
1312 BLOCK *b = &x->block[0];
1313 MACROBLOCKD *xd = &x->e_mbd;
1315 xd->mode_info_context->mbmi.ref_frame = INTRA_FRAME;
1317 pick_intra_mbuv_mode(x);
1319 for (mode = DC_PRED; mode <= TM_PRED; ++mode) {
1320 xd->mode_info_context->mbmi.mode = mode;
1321 vp8_build_intra_predictors_mby_s(xd, xd->dst.y_buffer - xd->dst.y_stride,
1322 xd->dst.y_buffer - 1, xd->dst.y_stride,
1324 distortion = vpx_variance16x16(*(b->base_src), b->src_stride, xd->predictor,
1326 rate = x->mbmode_cost[xd->frame_type][mode];
1327 this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
1329 if (error16x16 > this_rd) {
1330 error16x16 = this_rd;
1336 xd->mode_info_context->mbmi.mode = best_mode;
1338 error4x4 = pick_intra4x4mby_modes(x, &rate, &best_sse);
1339 if (error4x4 < error16x16) {
1340 xd->mode_info_context->mbmi.mode = B_PRED;