2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
13 #include "vpx_config.h"
14 #include "./vpx_dsp_rtcd.h"
16 #include "modecosts.h"
17 #include "encodeintra.h"
18 #include "vp8/common/common.h"
19 #include "vp8/common/entropymode.h"
20 #include "pickinter.h"
21 #include "vp8/common/findnearmv.h"
23 #include "vp8/common/reconinter.h"
24 #include "vp8/common/reconintra4x4.h"
25 #include "vpx_dsp/variance.h"
28 #include "vpx_dsp/vpx_dsp_common.h"
29 #include "vpx_mem/vpx_mem.h"
30 #if CONFIG_TEMPORAL_DENOISING
31 #include "denoising.h"
35 extern unsigned int cnt_pm;
38 extern const int vp8_ref_frame_order[MAX_MODES];
39 extern const MB_PREDICTION_MODE vp8_mode_order[MAX_MODES];
41 // Fixed point implementation of a skin color classifier. Skin color
42 // is model by a Gaussian distribution in the CbCr color space.
43 // See ../../test/skin_color_detector_test.cc where the reference
44 // skin color classifier is defined.
46 // Fixed-point skin color model parameters.
47 static const int skin_mean[2] = {7463, 9614}; // q6
48 static const int skin_inv_cov[4] = {4107, 1663, 1663, 2157}; // q16
49 static const int skin_threshold = 1570636; // q18
51 // Evaluates the Mahalanobis distance measure for the input CbCr values.
52 static int evaluate_skin_color_difference(int cb, int cr)
54 const int cb_q6 = cb << 6;
55 const int cr_q6 = cr << 6;
56 const int cb_diff_q12 = (cb_q6 - skin_mean[0]) * (cb_q6 - skin_mean[0]);
57 const int cbcr_diff_q12 = (cb_q6 - skin_mean[0]) * (cr_q6 - skin_mean[1]);
58 const int cr_diff_q12 = (cr_q6 - skin_mean[1]) * (cr_q6 - skin_mean[1]);
59 const int cb_diff_q2 = (cb_diff_q12 + (1 << 9)) >> 10;
60 const int cbcr_diff_q2 = (cbcr_diff_q12 + (1 << 9)) >> 10;
61 const int cr_diff_q2 = (cr_diff_q12 + (1 << 9)) >> 10;
62 const int skin_diff = skin_inv_cov[0] * cb_diff_q2 +
63 skin_inv_cov[1] * cbcr_diff_q2 +
64 skin_inv_cov[2] * cbcr_diff_q2 +
65 skin_inv_cov[3] * cr_diff_q2;
69 static int macroblock_corner_grad(unsigned char* signal, int stride,
70 int offsetx, int offsety, int sgnx, int sgny)
72 int y1 = signal[offsetx * stride + offsety];
73 int y2 = signal[offsetx * stride + offsety + sgny];
74 int y3 = signal[(offsetx + sgnx) * stride + offsety];
75 int y4 = signal[(offsetx + sgnx) * stride + offsety + sgny];
76 return VPXMAX(VPXMAX(abs(y1 - y2), abs(y1 - y3)), abs(y1 - y4));
79 static int check_dot_artifact_candidate(VP8_COMP *cpi,
81 unsigned char *target_last,
83 unsigned char* last_ref,
90 unsigned int max_num = (cpi->common.MBs) / 10;
93 int index = mb_row * cpi->common.mb_cols + mb_col;
94 // Threshold for #consecutive (base layer) frames using zero_last mode.
100 if (cpi->oxcf.number_of_layers > 1)
104 x->zero_last_dot_suppress = 0;
105 // Blocks on base layer frames that have been using ZEROMV_LAST repeatedly
106 // (i.e, at least |x| consecutive frames are candidates for increasing the
107 // rd adjustment for zero_last mode.
108 // Only allow this for at most |max_num| blocks per frame.
109 // Don't allow this for screen content input.
110 if (cpi->current_layer == 0 &&
111 cpi->consec_zero_last_mvbias[index] > num_frames &&
112 x->mbs_zero_last_dot_suppress < max_num &&
113 !cpi->oxcf.screen_content_mode)
115 // If this block is checked here, label it so we don't check it again until
116 // ~|x| framaes later.
117 x->zero_last_dot_suppress = 1;
118 // Dot artifact is noticeable as strong gradient at corners of macroblock,
119 // for flat areas. As a simple detector for now, we look for a high
120 // corner gradient on last ref, and a smaller gradient on source.
121 // Check 4 corners, return if any satisfy condition.
123 grad_last = macroblock_corner_grad(last_ref, stride, 0, 0, 1, 1);
124 grad_source = macroblock_corner_grad(target_last, stride, 0, 0, 1, 1);
125 if (grad_last >= threshold1 && grad_source <= threshold2)
127 x->mbs_zero_last_dot_suppress++;
131 grad_last = macroblock_corner_grad(last_ref, stride, 0, shift, 1, -1);
132 grad_source = macroblock_corner_grad(target_last, stride, 0, shift, 1, -1);
133 if (grad_last >= threshold1 && grad_source <= threshold2)
135 x->mbs_zero_last_dot_suppress++;
139 grad_last = macroblock_corner_grad(last_ref, stride, shift, 0, -1, 1);
140 grad_source = macroblock_corner_grad(target_last, stride, shift, 0, -1, 1);
141 if (grad_last >= threshold1 && grad_source <= threshold2)
143 x->mbs_zero_last_dot_suppress++;
147 grad_last = macroblock_corner_grad(last_ref, stride, shift, shift, -1, -1);
148 grad_source = macroblock_corner_grad(target_last, stride, shift, shift, -1, -1);
149 if (grad_last >= threshold1 && grad_source <= threshold2)
151 x->mbs_zero_last_dot_suppress++;
159 // Checks if the input yCbCr values corresponds to skin color.
160 static int is_skin_color(int y, int cb, int cr)
162 if (y < 40 || y > 220)
166 return (evaluate_skin_color_difference(cb, cr) < skin_threshold);
169 int vp8_skip_fractional_mv_step(MACROBLOCK *mb, BLOCK *b, BLOCKD *d,
170 int_mv *bestmv, int_mv *ref_mv,
172 const vp8_variance_fn_ptr_t *vfp,
173 int *mvcost[2], int *distortion,
179 (void) error_per_bit;
185 bestmv->as_mv.row <<= 3;
186 bestmv->as_mv.col <<= 3;
191 int vp8_get_inter_mbpred_error(MACROBLOCK *mb,
192 const vp8_variance_fn_ptr_t *vfp,
197 BLOCK *b = &mb->block[0];
198 BLOCKD *d = &mb->e_mbd.block[0];
199 unsigned char *what = (*(b->base_src) + b->src);
200 int what_stride = b->src_stride;
201 int pre_stride = mb->e_mbd.pre.y_stride;
202 unsigned char *in_what = mb->e_mbd.pre.y_buffer + d->offset ;
203 int in_what_stride = pre_stride;
204 int xoffset = this_mv.as_mv.col & 7;
205 int yoffset = this_mv.as_mv.row & 7;
207 in_what += (this_mv.as_mv.row >> 3) * pre_stride + (this_mv.as_mv.col >> 3);
209 if (xoffset | yoffset)
211 return vfp->svf(in_what, in_what_stride, xoffset, yoffset, what, what_stride, sse);
215 return vfp->vf(what, what_stride, in_what, in_what_stride, sse);
220 static int get_prediction_error(BLOCK *be, BLOCKD *b)
224 sptr = (*(be->base_src) + be->src);
227 return vpx_get4x4sse_cs(sptr, be->src_stride, dptr, 16);
231 static int pick_intra4x4block(
234 B_PREDICTION_MODE *best_mode,
235 const int *mode_costs,
241 BLOCKD *b = &x->e_mbd.block[ib];
242 BLOCK *be = &x->block[ib];
243 int dst_stride = x->e_mbd.dst.y_stride;
244 unsigned char *dst = x->e_mbd.dst.y_buffer + b->offset;
245 B_PREDICTION_MODE mode;
246 int best_rd = INT_MAX;
250 unsigned char *Above = dst - dst_stride;
251 unsigned char *yleft = dst - 1;
252 unsigned char top_left = Above[-1];
254 for (mode = B_DC_PRED; mode <= B_HE_PRED; mode++)
258 rate = mode_costs[mode];
260 vp8_intra4x4_predict(Above, yleft, dst_stride, mode,
261 b->predictor, 16, top_left);
262 distortion = get_prediction_error(be, b);
263 this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
265 if (this_rd < best_rd)
268 *bestdistortion = distortion;
274 b->bmi.as_mode = *best_mode;
275 vp8_encode_intra4x4block(x, ib);
280 static int pick_intra4x4mby_modes
287 MACROBLOCKD *const xd = &mb->e_mbd;
289 int cost = mb->mbmode_cost [xd->frame_type] [B_PRED];
292 const int *bmode_costs;
294 intra_prediction_down_copy(xd, xd->dst.y_buffer - xd->dst.y_stride + 16);
296 bmode_costs = mb->inter_bmode_costs;
298 for (i = 0; i < 16; i++)
300 MODE_INFO *const mic = xd->mode_info_context;
301 const int mis = xd->mode_info_stride;
303 B_PREDICTION_MODE UNINITIALIZED_IS_SAFE(best_mode);
304 int UNINITIALIZED_IS_SAFE(r), UNINITIALIZED_IS_SAFE(d);
306 if (mb->e_mbd.frame_type == KEY_FRAME)
308 const B_PREDICTION_MODE A = above_block_mode(mic, i, mis);
309 const B_PREDICTION_MODE L = left_block_mode(mic, i);
311 bmode_costs = mb->bmode_costs[A][L];
315 pick_intra4x4block(mb, i, &best_mode, bmode_costs, &r, &d);
319 mic->bmi[i].as_mode = best_mode;
321 /* Break out case where we have already exceeded best so far value
324 if (distortion > *best_dist)
332 *best_dist = distortion;
333 error = RDCOST(mb->rdmult, mb->rddiv, cost, distortion);
337 *best_dist = INT_MAX;
344 static void pick_intra_mbuv_mode(MACROBLOCK *mb)
347 MACROBLOCKD *x = &mb->e_mbd;
348 unsigned char *uabove_row = x->dst.u_buffer - x->dst.uv_stride;
349 unsigned char *vabove_row = x->dst.v_buffer - x->dst.uv_stride;
350 unsigned char *usrc_ptr = (mb->block[16].src + *mb->block[16].base_src);
351 unsigned char *vsrc_ptr = (mb->block[20].src + *mb->block[20].base_src);
352 int uvsrc_stride = mb->block[16].src_stride;
353 unsigned char uleft_col[8];
354 unsigned char vleft_col[8];
355 unsigned char utop_left = uabove_row[-1];
356 unsigned char vtop_left = vabove_row[-1];
364 int pred_error[4] = {0, 0, 0, 0}, best_error = INT_MAX;
365 MB_PREDICTION_MODE UNINITIALIZED_IS_SAFE(best_mode);
368 for (i = 0; i < 8; i++)
370 uleft_col[i] = x->dst.u_buffer [i* x->dst.uv_stride -1];
371 vleft_col[i] = x->dst.v_buffer [i* x->dst.uv_stride -1];
374 if (!x->up_available && !x->left_available)
386 for (i = 0; i < 8; i++)
388 Uaverage += uabove_row[i];
389 Vaverage += vabove_row[i];
396 if (x->left_available)
398 for (i = 0; i < 8; i++)
400 Uaverage += uleft_col[i];
401 Vaverage += vleft_col[i];
408 expected_udc = (Uaverage + (1 << (shift - 1))) >> shift;
409 expected_vdc = (Vaverage + (1 << (shift - 1))) >> shift;
413 for (i = 0; i < 8; i++)
415 for (j = 0; j < 8; j++)
418 int predu = uleft_col[i] + uabove_row[j] - utop_left;
419 int predv = vleft_col[i] + vabove_row[j] - vtop_left;
438 diff = u_p - expected_udc;
439 pred_error[DC_PRED] += diff * diff;
440 diff = v_p - expected_vdc;
441 pred_error[DC_PRED] += diff * diff;
444 diff = u_p - uabove_row[j];
445 pred_error[V_PRED] += diff * diff;
446 diff = v_p - vabove_row[j];
447 pred_error[V_PRED] += diff * diff;
450 diff = u_p - uleft_col[i];
451 pred_error[H_PRED] += diff * diff;
452 diff = v_p - vleft_col[i];
453 pred_error[H_PRED] += diff * diff;
457 pred_error[TM_PRED] += diff * diff;
459 pred_error[TM_PRED] += diff * diff;
464 usrc_ptr += uvsrc_stride;
465 vsrc_ptr += uvsrc_stride;
469 usrc_ptr = (mb->block[18].src + *mb->block[18].base_src);
470 vsrc_ptr = (mb->block[22].src + *mb->block[22].base_src);
478 for (i = DC_PRED; i <= TM_PRED; i++)
480 if (best_error > pred_error[i])
482 best_error = pred_error[i];
483 best_mode = (MB_PREDICTION_MODE)i;
488 mb->e_mbd.mode_info_context->mbmi.uv_mode = best_mode;
492 static void update_mvcount(MACROBLOCK *x, int_mv *best_ref_mv)
494 MACROBLOCKD *xd = &x->e_mbd;
495 /* Split MV modes currently not supported when RD is nopt enabled,
496 * therefore, only need to modify MVcount in NEWMV mode. */
497 if (xd->mode_info_context->mbmi.mode == NEWMV)
499 x->MVcount[0][mv_max+((xd->mode_info_context->mbmi.mv.as_mv.row -
500 best_ref_mv->as_mv.row) >> 1)]++;
501 x->MVcount[1][mv_max+((xd->mode_info_context->mbmi.mv.as_mv.col -
502 best_ref_mv->as_mv.col) >> 1)]++;
507 #if CONFIG_MULTI_RES_ENCODING
509 void get_lower_res_motion_info(VP8_COMP *cpi, MACROBLOCKD *xd, int *dissim,
510 int *parent_ref_frame,
511 MB_PREDICTION_MODE *parent_mode,
512 int_mv *parent_ref_mv, int mb_row, int mb_col)
514 LOWER_RES_MB_INFO* store_mode_info
515 = ((LOWER_RES_FRAME_INFO*)cpi->oxcf.mr_low_res_mode_info)->mb_info;
516 unsigned int parent_mb_index;
518 /* Consider different down_sampling_factor. */
520 /* TODO: Removed the loop that supports special down_sampling_factor
521 * such as 2, 4, 8. Will revisit it if needed.
522 * Should also try using a look-up table to see if it helps
524 int parent_mb_row, parent_mb_col;
526 parent_mb_row = mb_row*cpi->oxcf.mr_down_sampling_factor.den
527 /cpi->oxcf.mr_down_sampling_factor.num;
528 parent_mb_col = mb_col*cpi->oxcf.mr_down_sampling_factor.den
529 /cpi->oxcf.mr_down_sampling_factor.num;
530 parent_mb_index = parent_mb_row*cpi->mr_low_res_mb_cols + parent_mb_col;
533 /* Read lower-resolution mode & motion result from memory.*/
534 *parent_ref_frame = store_mode_info[parent_mb_index].ref_frame;
535 *parent_mode = store_mode_info[parent_mb_index].mode;
536 *dissim = store_mode_info[parent_mb_index].dissim;
538 /* For highest-resolution encoder, adjust dissim value. Lower its quality
539 * for good performance. */
540 if (cpi->oxcf.mr_encoder_id == (cpi->oxcf.mr_total_resolutions - 1))
543 if(*parent_ref_frame != INTRA_FRAME)
545 /* Consider different down_sampling_factor.
546 * The result can be rounded to be more precise, but it takes more time.
548 (*parent_ref_mv).as_mv.row = store_mode_info[parent_mb_index].mv.as_mv.row
549 *cpi->oxcf.mr_down_sampling_factor.num
550 /cpi->oxcf.mr_down_sampling_factor.den;
551 (*parent_ref_mv).as_mv.col = store_mode_info[parent_mb_index].mv.as_mv.col
552 *cpi->oxcf.mr_down_sampling_factor.num
553 /cpi->oxcf.mr_down_sampling_factor.den;
555 vp8_clamp_mv2(parent_ref_mv, xd);
560 static void check_for_encode_breakout(unsigned int sse, MACROBLOCK* x)
562 MACROBLOCKD *xd = &x->e_mbd;
564 unsigned int threshold = (xd->block[0].dequant[1]
565 * xd->block[0].dequant[1] >>4);
567 if(threshold < x->encode_breakout)
568 threshold = x->encode_breakout;
570 if (sse < threshold )
572 /* Check u and v to make sure skip is ok */
573 unsigned int sse2 = 0;
577 if (sse2 * 2 < x->encode_breakout)
584 static int evaluate_inter_mode(unsigned int* sse, int rate2, int* distortion2,
585 VP8_COMP *cpi, MACROBLOCK *x, int rd_adj)
587 MB_PREDICTION_MODE this_mode = x->e_mbd.mode_info_context->mbmi.mode;
588 int_mv mv = x->e_mbd.mode_info_context->mbmi.mv;
590 int denoise_aggressive = 0;
591 /* Exit early and don't compute the distortion if this macroblock
592 * is marked inactive. */
593 if (cpi->active_map_enabled && x->active_ptr[0] == 0)
601 if((this_mode != NEWMV) ||
602 !(cpi->sf.half_pixel_search) || cpi->common.full_pixel==1)
603 *distortion2 = vp8_get_inter_mbpred_error(x,
604 &cpi->fn_ptr[BLOCK_16X16],
607 this_rd = RDCOST(x->rdmult, x->rddiv, rate2, *distortion2);
609 #if CONFIG_TEMPORAL_DENOISING
610 if (cpi->oxcf.noise_sensitivity > 0) {
612 (cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive) ? 1 : 0;
616 // Adjust rd for ZEROMV and LAST, if LAST is the closest reference frame.
617 // TODO: We should also add condition on distance of closest to current.
618 if(!cpi->oxcf.screen_content_mode &&
619 this_mode == ZEROMV &&
620 x->e_mbd.mode_info_context->mbmi.ref_frame == LAST_FRAME &&
621 (denoise_aggressive || (cpi->closest_reference_frame == LAST_FRAME)))
623 // No adjustment if block is considered to be skin area.
627 this_rd = ((int64_t)this_rd) * rd_adj / 100;
630 check_for_encode_breakout(*sse, x);
634 static void calculate_zeromv_rd_adjustment(VP8_COMP *cpi, MACROBLOCK *x,
637 MODE_INFO *mic = x->e_mbd.mode_info_context;
638 int_mv mv_l, mv_a, mv_al;
639 int local_motion_check = 0;
641 if (cpi->lf_zeromv_pct > 40)
647 if (mic->mbmi.ref_frame != INTRA_FRAME)
648 if( abs(mv_l.as_mv.row) < 8 && abs(mv_l.as_mv.col) < 8)
649 local_motion_check++;
652 mic -= x->e_mbd.mode_info_stride;
653 mv_al = mic->mbmi.mv;
655 if (mic->mbmi.ref_frame != INTRA_FRAME)
656 if( abs(mv_al.as_mv.row) < 8 && abs(mv_al.as_mv.col) < 8)
657 local_motion_check++;
663 if (mic->mbmi.ref_frame != INTRA_FRAME)
664 if( abs(mv_a.as_mv.row) < 8 && abs(mv_a.as_mv.col) < 8)
665 local_motion_check++;
667 if (((!x->e_mbd.mb_to_top_edge || !x->e_mbd.mb_to_left_edge)
668 && local_motion_check >0) || local_motion_check >2 )
670 else if (local_motion_check > 0)
675 void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
676 int recon_uvoffset, int *returnrate,
677 int *returndistortion, int *returnintra, int mb_row,
680 BLOCK *b = &x->block[0];
681 BLOCKD *d = &x->e_mbd.block[0];
682 MACROBLOCKD *xd = &x->e_mbd;
683 MB_MODE_INFO best_mbmode;
685 int_mv best_ref_mv_sb[2];
686 int_mv mode_mv_sb[2][MB_MODE_COUNT];
689 MB_PREDICTION_MODE this_mode;
692 int best_rd = INT_MAX;
693 int rd_adjustment = 100;
694 int best_intra_rd = INT_MAX;
699 int bestsme = INT_MAX;
700 int best_mode_index = 0;
701 unsigned int sse = UINT_MAX, best_rd_sse = UINT_MAX;
702 #if CONFIG_TEMPORAL_DENOISING
703 unsigned int zero_mv_sse = UINT_MAX, best_sse = UINT_MAX;
706 int sf_improved_mv_pred = cpi->sf.improved_mv_pred;
708 #if CONFIG_MULTI_RES_ENCODING
709 int dissim = INT_MAX;
710 int parent_ref_frame = 0;
711 int_mv parent_ref_mv;
712 MB_PREDICTION_MODE parent_mode = 0;
713 int parent_ref_valid = 0;
718 int near_sadidx[8] = {0, 1, 2, 3, 4, 5, 6, 7};
720 /* search range got from mv_pred(). It uses step_param levels. (0-7) */
723 unsigned char *plane[4][3];
724 int ref_frame_map[4];
726 int dot_artifact_candidate = 0;
727 get_predictor_pointers(cpi, plane, recon_yoffset, recon_uvoffset);
729 // If the current frame is using LAST as a reference, check for
730 // biasing the mode selection for dot artifacts.
731 if (cpi->ref_frame_flags & VP8_LAST_FRAME) {
732 unsigned char* target_y = x->src.y_buffer;
733 unsigned char* target_u = x->block[16].src + *x->block[16].base_src;
734 unsigned char* target_v = x->block[20].src + *x->block[20].base_src;
735 int stride = x->src.y_stride;
736 int stride_uv = x->block[16].src_stride;
737 #if CONFIG_TEMPORAL_DENOISING
738 if (cpi->oxcf.noise_sensitivity) {
739 const int uv_denoise = (cpi->oxcf.noise_sensitivity >= 2) ? 1 : 0;
741 cpi->denoiser.yv12_running_avg[LAST_FRAME].y_buffer + recon_yoffset;
742 stride = cpi->denoiser.yv12_running_avg[LAST_FRAME].y_stride;
745 cpi->denoiser.yv12_running_avg[LAST_FRAME].u_buffer +
748 cpi->denoiser.yv12_running_avg[LAST_FRAME].v_buffer +
750 stride_uv = cpi->denoiser.yv12_running_avg[LAST_FRAME].uv_stride;
754 dot_artifact_candidate =
755 check_dot_artifact_candidate(cpi, x, target_y, stride,
756 plane[LAST_FRAME][0], mb_row, mb_col, 0);
757 // If not found in Y channel, check UV channel.
758 if (!dot_artifact_candidate) {
759 dot_artifact_candidate =
760 check_dot_artifact_candidate(cpi, x, target_u, stride_uv,
761 plane[LAST_FRAME][1], mb_row, mb_col, 1);
762 if (!dot_artifact_candidate) {
763 dot_artifact_candidate =
764 check_dot_artifact_candidate(cpi, x, target_v, stride_uv,
765 plane[LAST_FRAME][2], mb_row, mb_col, 2);
770 #if CONFIG_MULTI_RES_ENCODING
771 // |parent_ref_valid| will be set here if potentially we can do mv resue for
772 // this higher resol (|cpi->oxcf.mr_encoder_id| > 0) frame.
773 // |parent_ref_valid| may be reset depending on |parent_ref_frame| for
774 // the current macroblock below.
775 parent_ref_valid = cpi->oxcf.mr_encoder_id && cpi->mr_low_res_mv_avail;
776 if (parent_ref_valid)
780 get_lower_res_motion_info(cpi, xd, &dissim, &parent_ref_frame,
781 &parent_mode, &parent_ref_mv, mb_row, mb_col);
783 /* TODO(jkoleszar): The references available (ref_frame_flags) to the
784 * lower res encoder should match those available to this encoder, but
785 * there seems to be a situation where this mismatch can happen in the
786 * case of frame dropping and temporal layers. For example,
787 * GOLD being disallowed in ref_frame_flags, but being returned as
790 * In this event, take the conservative approach of disabling the
791 * lower res info for this MB.
795 // Note availability for mv reuse is only based on last and golden.
796 if (parent_ref_frame == LAST_FRAME)
797 parent_ref_flag = (cpi->ref_frame_flags & VP8_LAST_FRAME);
798 else if (parent_ref_frame == GOLDEN_FRAME)
799 parent_ref_flag = (cpi->ref_frame_flags & VP8_GOLD_FRAME);
801 //assert(!parent_ref_frame || parent_ref_flag);
803 // If |parent_ref_frame| did not match either last or golden then
804 // shut off mv reuse.
805 if (parent_ref_frame && !parent_ref_flag)
806 parent_ref_valid = 0;
808 // Don't do mv reuse since we want to allow for another mode besides
809 // ZEROMV_LAST to remove dot artifact.
810 if (dot_artifact_candidate)
811 parent_ref_valid = 0;
815 // Check if current macroblock is in skin area.
817 const int y = (x->src.y_buffer[7 * x->src.y_stride + 7] +
818 x->src.y_buffer[7 * x->src.y_stride + 8] +
819 x->src.y_buffer[8 * x->src.y_stride + 7] +
820 x->src.y_buffer[8 * x->src.y_stride + 8]) >> 2;
821 const int cb = (x->src.u_buffer[3 * x->src.uv_stride + 3] +
822 x->src.u_buffer[3 * x->src.uv_stride + 4] +
823 x->src.u_buffer[4 * x->src.uv_stride + 3] +
824 x->src.u_buffer[4 * x->src.uv_stride + 4]) >> 2;
825 const int cr = (x->src.v_buffer[3 * x->src.uv_stride + 3] +
826 x->src.v_buffer[3 * x->src.uv_stride + 4] +
827 x->src.v_buffer[4 * x->src.uv_stride + 3] +
828 x->src.v_buffer[4 * x->src.uv_stride + 4]) >> 2;
830 if (!cpi->oxcf.screen_content_mode)
831 x->is_skin = is_skin_color(y, cb, cr);
833 #if CONFIG_TEMPORAL_DENOISING
834 if (cpi->oxcf.noise_sensitivity) {
835 // Under aggressive denoising mode, should we use skin map to reduce denoiser
836 // and ZEROMV bias? Will need to revisit the accuracy of this detection for
837 // very noisy input. For now keep this as is (i.e., don't turn it off).
838 // if (cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive)
843 mode_mv = mode_mv_sb[sign_bias];
844 best_ref_mv.as_int = 0;
845 memset(mode_mv_sb, 0, sizeof(mode_mv_sb));
846 memset(&best_mbmode, 0, sizeof(best_mbmode));
848 /* Setup search priorities */
849 #if CONFIG_MULTI_RES_ENCODING
850 if (parent_ref_valid && parent_ref_frame && dissim < 8)
852 ref_frame_map[0] = -1;
853 ref_frame_map[1] = parent_ref_frame;
854 ref_frame_map[2] = -1;
855 ref_frame_map[3] = -1;
858 get_reference_search_order(cpi, ref_frame_map);
860 /* Check to see if there is at least 1 valid reference frame that we need
861 * to calculate near_mvs.
863 if (ref_frame_map[1] > 0)
865 sign_bias = vp8_find_near_mvs_bias(&x->e_mbd,
866 x->e_mbd.mode_info_context,
871 cpi->common.ref_frame_sign_bias);
873 mode_mv = mode_mv_sb[sign_bias];
874 best_ref_mv.as_int = best_ref_mv_sb[sign_bias].as_int;
877 /* Count of the number of MBs tested so far this frame */
878 x->mbs_tested_so_far++;
880 *returnintra = INT_MAX;
883 x->e_mbd.mode_info_context->mbmi.ref_frame = INTRA_FRAME;
885 /* If the frame has big static background and current MB is in low
886 * motion area, its mode decision is biased to ZEROMV mode.
887 * No adjustment if cpu_used is <= -12 (i.e., cpi->Speed >= 12).
888 * At such speed settings, ZEROMV is already heavily favored.
890 if (cpi->Speed < 12) {
891 calculate_zeromv_rd_adjustment(cpi, x, &rd_adjustment);
894 #if CONFIG_TEMPORAL_DENOISING
895 if (cpi->oxcf.noise_sensitivity) {
896 rd_adjustment = (int)(rd_adjustment *
897 cpi->denoiser.denoise_pars.pickmode_mv_bias / 100);
901 if (dot_artifact_candidate)
903 // Bias against ZEROMV_LAST mode.
908 /* if we encode a new mv this is important
909 * find the best new motion vector
911 for (mode_index = 0; mode_index < MAX_MODES; mode_index++)
914 int this_rd = INT_MAX;
915 int this_ref_frame = ref_frame_map[vp8_ref_frame_order[mode_index]];
917 if (best_rd <= x->rd_threshes[mode_index])
920 if (this_ref_frame < 0)
923 x->e_mbd.mode_info_context->mbmi.ref_frame = this_ref_frame;
925 /* everything but intra */
926 if (x->e_mbd.mode_info_context->mbmi.ref_frame)
928 x->e_mbd.pre.y_buffer = plane[this_ref_frame][0];
929 x->e_mbd.pre.u_buffer = plane[this_ref_frame][1];
930 x->e_mbd.pre.v_buffer = plane[this_ref_frame][2];
932 if (sign_bias != cpi->common.ref_frame_sign_bias[this_ref_frame])
934 sign_bias = cpi->common.ref_frame_sign_bias[this_ref_frame];
935 mode_mv = mode_mv_sb[sign_bias];
936 best_ref_mv.as_int = best_ref_mv_sb[sign_bias].as_int;
939 #if CONFIG_MULTI_RES_ENCODING
940 if (parent_ref_valid)
942 if (vp8_mode_order[mode_index] == NEARESTMV &&
943 mode_mv[NEARESTMV].as_int ==0)
945 if (vp8_mode_order[mode_index] == NEARMV &&
946 mode_mv[NEARMV].as_int ==0)
949 if (vp8_mode_order[mode_index] == NEWMV && parent_mode == ZEROMV
950 && best_ref_mv.as_int==0)
952 else if(vp8_mode_order[mode_index] == NEWMV && dissim==0
953 && best_ref_mv.as_int==parent_ref_mv.as_int)
959 /* Check to see if the testing frequency for this mode is at its max
960 * If so then prevent it from being tested and increase the threshold
962 if (x->mode_test_hit_counts[mode_index] &&
963 (cpi->mode_check_freq[mode_index] > 1))
965 if (x->mbs_tested_so_far <= (cpi->mode_check_freq[mode_index] *
966 x->mode_test_hit_counts[mode_index]))
968 /* Increase the threshold for coding this mode to make it less
969 * likely to be chosen */
970 x->rd_thresh_mult[mode_index] += 4;
972 if (x->rd_thresh_mult[mode_index] > MAX_THRESHMULT)
973 x->rd_thresh_mult[mode_index] = MAX_THRESHMULT;
975 x->rd_threshes[mode_index] =
976 (cpi->rd_baseline_thresh[mode_index] >> 7) *
977 x->rd_thresh_mult[mode_index];
982 /* We have now reached the point where we are going to test the current
983 * mode so increment the counter for the number of times it has been
985 x->mode_test_hit_counts[mode_index] ++;
990 this_mode = vp8_mode_order[mode_index];
992 x->e_mbd.mode_info_context->mbmi.mode = this_mode;
993 x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED;
995 /* Work out the cost assosciated with selecting the reference frame */
997 x->ref_frame_cost[x->e_mbd.mode_info_context->mbmi.ref_frame];
1000 /* Only consider ZEROMV/ALTREF_FRAME for alt ref frame,
1001 * unless ARNR filtering is enabled in which case we want
1002 * an unfiltered alternative */
1003 if (cpi->is_src_frame_alt_ref && (cpi->oxcf.arnr_max_frames == 0))
1005 if (this_mode != ZEROMV ||
1006 x->e_mbd.mode_info_context->mbmi.ref_frame != ALTREF_FRAME)
1013 /* Pass best so far to pick_intra4x4mby_modes to use as breakout */
1014 distortion2 = best_rd_sse;
1015 pick_intra4x4mby_modes(x, &rate, &distortion2);
1017 if (distortion2 == INT_MAX)
1024 distortion2 = vpx_variance16x16(
1025 *(b->base_src), b->src_stride,
1026 x->e_mbd.predictor, 16, &sse);
1027 this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
1029 if (this_rd < best_intra_rd)
1031 best_intra_rd = this_rd;
1032 *returnintra = distortion2;
1040 /* Split MV modes currently not supported when RD is not enabled. */
1047 vp8_build_intra_predictors_mby_s(xd,
1048 xd->dst.y_buffer - xd->dst.y_stride,
1049 xd->dst.y_buffer - 1,
1053 distortion2 = vpx_variance16x16
1054 (*(b->base_src), b->src_stride,
1055 x->e_mbd.predictor, 16, &sse);
1056 rate2 += x->mbmode_cost[x->e_mbd.frame_type][x->e_mbd.mode_info_context->mbmi.mode];
1057 this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
1059 if (this_rd < best_intra_rd)
1061 best_intra_rd = this_rd;
1062 *returnintra = distortion2;
1072 int sadpb = x->sadperbit16;
1075 int col_min = ((best_ref_mv.as_mv.col+7)>>3) - MAX_FULL_PEL_VAL;
1076 int row_min = ((best_ref_mv.as_mv.row+7)>>3) - MAX_FULL_PEL_VAL;
1077 int col_max = (best_ref_mv.as_mv.col>>3)
1079 int row_max = (best_ref_mv.as_mv.row>>3)
1082 int tmp_col_min = x->mv_col_min;
1083 int tmp_col_max = x->mv_col_max;
1084 int tmp_row_min = x->mv_row_min;
1085 int tmp_row_max = x->mv_row_max;
1087 int speed_adjust = (cpi->Speed > 5) ? ((cpi->Speed >= 8)? 3 : 2) : 1;
1089 /* Further step/diamond searches as necessary */
1090 step_param = cpi->sf.first_step + speed_adjust;
1092 #if CONFIG_MULTI_RES_ENCODING
1093 /* If lower-res frame is not available for mv reuse (because of
1094 frame dropping or different temporal layer pattern), then higher
1095 resol encoder does motion search without any previous knowledge.
1096 Also, since last frame motion info is not stored, then we can not
1097 use improved_mv_pred. */
1098 if (cpi->oxcf.mr_encoder_id)
1099 sf_improved_mv_pred = 0;
1101 // Only use parent MV as predictor if this candidate reference frame
1102 // (|this_ref_frame|) is equal to |parent_ref_frame|.
1103 if (parent_ref_valid && (parent_ref_frame == this_ref_frame))
1105 /* Use parent MV as predictor. Adjust search range
1108 mvp.as_int = parent_ref_mv.as_int;
1109 mvp_full.as_mv.col = parent_ref_mv.as_mv.col>>3;
1110 mvp_full.as_mv.row = parent_ref_mv.as_mv.row>>3;
1112 if(dissim <=32) step_param += 3;
1113 else if(dissim <=128) step_param += 2;
1114 else step_param += 1;
1118 if(sf_improved_mv_pred)
1122 vp8_cal_sad(cpi,xd,x, recon_yoffset ,&near_sadidx[0] );
1126 vp8_mv_pred(cpi, &x->e_mbd, x->e_mbd.mode_info_context,
1127 &mvp,x->e_mbd.mode_info_context->mbmi.ref_frame,
1128 cpi->common.ref_frame_sign_bias, &sr,
1132 /* adjust search range according to sr from mv prediction */
1136 mvp_full.as_mv.col = mvp.as_mv.col>>3;
1137 mvp_full.as_mv.row = mvp.as_mv.row>>3;
1140 mvp.as_int = best_ref_mv.as_int;
1141 mvp_full.as_mv.col = best_ref_mv.as_mv.col>>3;
1142 mvp_full.as_mv.row = best_ref_mv.as_mv.row>>3;
1146 #if CONFIG_MULTI_RES_ENCODING
1147 if (parent_ref_valid && (parent_ref_frame == this_ref_frame) &&
1149 VPXMAX(abs(best_ref_mv.as_mv.row - parent_ref_mv.as_mv.row),
1150 abs(best_ref_mv.as_mv.col - parent_ref_mv.as_mv.col)) <=
1153 d->bmi.mv.as_int = mvp_full.as_int;
1154 mode_mv[NEWMV].as_int = mvp_full.as_int;
1156 cpi->find_fractional_mv_step(x, b, d, &d->bmi.mv, &best_ref_mv,
1158 &cpi->fn_ptr[BLOCK_16X16],
1164 /* Get intersection of UMV window and valid MV window to
1165 * reduce # of checks in diamond search. */
1166 if (x->mv_col_min < col_min )
1167 x->mv_col_min = col_min;
1168 if (x->mv_col_max > col_max )
1169 x->mv_col_max = col_max;
1170 if (x->mv_row_min < row_min )
1171 x->mv_row_min = row_min;
1172 if (x->mv_row_max > row_max )
1173 x->mv_row_max = row_max;
1175 further_steps = (cpi->Speed >= 8)?
1176 0: (cpi->sf.max_step_search_steps - 1 - step_param);
1178 if (cpi->sf.search_method == HEX)
1180 #if CONFIG_MULTI_RES_ENCODING
1181 /* TODO: In higher-res pick_inter_mode, step_param is used to
1182 * modify hex search range. Here, set step_param to 0 not to
1183 * change the behavior in lowest-resolution encoder.
1184 * Will improve it later.
1186 /* Set step_param to 0 to ensure large-range motion search
1187 * when mv reuse if not valid (i.e. |parent_ref_valid| = 0),
1188 * or if this candidate reference frame (|this_ref_frame|) is
1189 * not equal to |parent_ref_frame|.
1191 if (!parent_ref_valid || (parent_ref_frame != this_ref_frame))
1194 bestsme = vp8_hex_search(x, b, d, &mvp_full, &d->bmi.mv,
1196 &cpi->fn_ptr[BLOCK_16X16],
1197 x->mvsadcost, x->mvcost, &best_ref_mv);
1198 mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
1202 bestsme = cpi->diamond_search_sad(x, b, d, &mvp_full,
1203 &d->bmi.mv, step_param, sadpb, &num00,
1204 &cpi->fn_ptr[BLOCK_16X16],
1205 x->mvcost, &best_ref_mv);
1206 mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
1208 /* Further step/diamond searches as necessary */
1212 while (n < further_steps)
1221 cpi->diamond_search_sad(x, b, d, &mvp_full,
1225 &cpi->fn_ptr[BLOCK_16X16],
1226 x->mvcost, &best_ref_mv);
1227 if (thissme < bestsme)
1230 mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
1234 d->bmi.mv.as_int = mode_mv[NEWMV].as_int;
1240 x->mv_col_min = tmp_col_min;
1241 x->mv_col_max = tmp_col_max;
1242 x->mv_row_min = tmp_row_min;
1243 x->mv_row_max = tmp_row_max;
1245 if (bestsme < INT_MAX)
1246 cpi->find_fractional_mv_step(x, b, d, &d->bmi.mv,
1247 &best_ref_mv, x->errorperbit,
1248 &cpi->fn_ptr[BLOCK_16X16],
1253 mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
1254 // The clamp below is not necessary from the perspective
1255 // of VP8 bitstream, but is added to improve ChromeCast
1256 // mirroring's robustness. Please do not remove.
1257 vp8_clamp_mv2(&mode_mv[this_mode], xd);
1259 rate2 += vp8_mv_bit_cost(&mode_mv[NEWMV], &best_ref_mv,
1260 cpi->mb.mvcost, 128);
1265 if (mode_mv[this_mode].as_int == 0)
1270 /* Trap vectors that reach beyond the UMV borders
1271 * Note that ALL New MV, Nearest MV Near MV and Zero MV code drops
1272 * through to this point because of the lack of break statements
1273 * in the previous two cases.
1275 if (((mode_mv[this_mode].as_mv.row >> 3) < x->mv_row_min) ||
1276 ((mode_mv[this_mode].as_mv.row >> 3) > x->mv_row_max) ||
1277 ((mode_mv[this_mode].as_mv.col >> 3) < x->mv_col_min) ||
1278 ((mode_mv[this_mode].as_mv.col >> 3) > x->mv_col_max))
1281 rate2 += vp8_cost_mv_ref(this_mode, mdcounts);
1282 x->e_mbd.mode_info_context->mbmi.mv.as_int =
1283 mode_mv[this_mode].as_int;
1284 this_rd = evaluate_inter_mode(&sse, rate2, &distortion2, cpi, x,
1292 #if CONFIG_TEMPORAL_DENOISING
1293 if (cpi->oxcf.noise_sensitivity)
1295 /* Store for later use by denoiser. */
1296 // Dont' denoise with GOLDEN OR ALTREF is they are old reference
1297 // frames (greater than MAX_GF_ARF_DENOISE_RANGE frames in past).
1298 int skip_old_reference = ((this_ref_frame != LAST_FRAME) &&
1299 (cpi->common.current_video_frame -
1300 cpi->current_ref_frames[this_ref_frame] >
1301 MAX_GF_ARF_DENOISE_RANGE)) ? 1 : 0;
1302 if (this_mode == ZEROMV && sse < zero_mv_sse &&
1303 !skip_old_reference)
1306 x->best_zeromv_reference_frame =
1307 x->e_mbd.mode_info_context->mbmi.ref_frame;
1310 // Store the best NEWMV in x for later use in the denoiser.
1311 if (x->e_mbd.mode_info_context->mbmi.mode == NEWMV &&
1312 sse < best_sse && !skip_old_reference)
1315 x->best_sse_inter_mode = NEWMV;
1316 x->best_sse_mv = x->e_mbd.mode_info_context->mbmi.mv;
1317 x->need_to_clamp_best_mvs =
1318 x->e_mbd.mode_info_context->mbmi.need_to_clamp_mvs;
1319 x->best_reference_frame =
1320 x->e_mbd.mode_info_context->mbmi.ref_frame;
1325 if (this_rd < best_rd || x->skip)
1327 /* Note index of best mode */
1328 best_mode_index = mode_index;
1330 *returnrate = rate2;
1331 *returndistortion = distortion2;
1334 memcpy(&best_mbmode, &x->e_mbd.mode_info_context->mbmi,
1335 sizeof(MB_MODE_INFO));
1337 /* Testing this mode gave rise to an improvement in best error
1338 * score. Lower threshold a bit for next time
1340 x->rd_thresh_mult[mode_index] =
1341 (x->rd_thresh_mult[mode_index] >= (MIN_THRESHMULT + 2)) ?
1342 x->rd_thresh_mult[mode_index] - 2 : MIN_THRESHMULT;
1343 x->rd_threshes[mode_index] =
1344 (cpi->rd_baseline_thresh[mode_index] >> 7) *
1345 x->rd_thresh_mult[mode_index];
1348 /* If the mode did not help improve the best error case then raise the
1349 * threshold for testing that mode next time around.
1353 x->rd_thresh_mult[mode_index] += 4;
1355 if (x->rd_thresh_mult[mode_index] > MAX_THRESHMULT)
1356 x->rd_thresh_mult[mode_index] = MAX_THRESHMULT;
1358 x->rd_threshes[mode_index] =
1359 (cpi->rd_baseline_thresh[mode_index] >> 7) *
1360 x->rd_thresh_mult[mode_index];
1367 /* Reduce the activation RD thresholds for the best choice mode */
1368 if ((cpi->rd_baseline_thresh[best_mode_index] > 0) && (cpi->rd_baseline_thresh[best_mode_index] < (INT_MAX >> 2)))
1370 int best_adjustment = (x->rd_thresh_mult[best_mode_index] >> 3);
1372 x->rd_thresh_mult[best_mode_index] =
1373 (x->rd_thresh_mult[best_mode_index]
1374 >= (MIN_THRESHMULT + best_adjustment)) ?
1375 x->rd_thresh_mult[best_mode_index] - best_adjustment :
1377 x->rd_threshes[best_mode_index] =
1378 (cpi->rd_baseline_thresh[best_mode_index] >> 7) *
1379 x->rd_thresh_mult[best_mode_index];
1384 int this_rdbin = (*returndistortion >> 7);
1386 if (this_rdbin >= 1024)
1391 x->error_bins[this_rdbin] ++;
1394 #if CONFIG_TEMPORAL_DENOISING
1395 if (cpi->oxcf.noise_sensitivity)
1397 int block_index = mb_row * cpi->common.mb_cols + mb_col;
1400 if (x->best_sse_inter_mode == DC_PRED)
1402 /* No best MV found. */
1403 x->best_sse_inter_mode = best_mbmode.mode;
1404 x->best_sse_mv = best_mbmode.mv;
1405 x->need_to_clamp_best_mvs = best_mbmode.need_to_clamp_mvs;
1406 x->best_reference_frame = best_mbmode.ref_frame;
1407 best_sse = best_rd_sse;
1409 // For non-skin blocks that have selected ZEROMV for this current frame,
1410 // and have been selecting ZEROMV_LAST (on the base layer frame) at
1411 // least |x~20| consecutive past frames in a row, label the block for
1412 // possible increase in denoising strength. We also condition this
1413 // labeling on there being significant denoising in the scene
1414 if (cpi->oxcf.noise_sensitivity == 4) {
1415 if (cpi->denoiser.nmse_source_diff >
1416 70 * cpi->denoiser.threshold_aggressive_mode / 100)
1419 if (cpi->mse_source_denoised > 1000)
1422 x->increase_denoising = 0;
1424 x->best_sse_inter_mode == ZEROMV &&
1425 (x->best_reference_frame == LAST_FRAME ||
1426 x->best_reference_frame == cpi->closest_reference_frame) &&
1427 cpi->consec_zero_last[block_index] >= 20 &&
1429 x->increase_denoising = 1;
1431 x->denoise_zeromv = 0;
1432 vp8_denoiser_denoise_mb(&cpi->denoiser, x, best_sse, zero_mv_sse,
1433 recon_yoffset, recon_uvoffset,
1434 &cpi->common.lf_info, mb_row, mb_col,
1437 // Reevaluate ZEROMV after denoising: for large noise content
1438 // (i.e., cpi->mse_source_denoised is above threshold), do this for all
1439 // blocks that did not pick ZEROMV as best mode but are using ZEROMV
1440 // for denoising. Otherwise, always re-evaluate for blocks that picked
1441 // INTRA mode as best mode.
1442 // Avoid blocks that have been biased against ZERO_LAST
1443 // (i.e., dot artifact candidate blocks).
1444 reevaluate = (best_mbmode.ref_frame == INTRA_FRAME) ||
1445 (best_mbmode.mode != ZEROMV &&
1446 x->denoise_zeromv &&
1447 cpi->mse_source_denoised > 2000);
1448 if (!dot_artifact_candidate &&
1450 x->best_zeromv_reference_frame != INTRA_FRAME)
1453 int this_ref_frame = x->best_zeromv_reference_frame;
1454 rd_adjustment = 100;
1455 rate2 = x->ref_frame_cost[this_ref_frame] +
1456 vp8_cost_mv_ref(ZEROMV, mdcounts);
1459 /* set up the proper prediction buffers for the frame */
1460 x->e_mbd.mode_info_context->mbmi.ref_frame = this_ref_frame;
1461 x->e_mbd.pre.y_buffer = plane[this_ref_frame][0];
1462 x->e_mbd.pre.u_buffer = plane[this_ref_frame][1];
1463 x->e_mbd.pre.v_buffer = plane[this_ref_frame][2];
1465 x->e_mbd.mode_info_context->mbmi.mode = ZEROMV;
1466 x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED;
1467 x->e_mbd.mode_info_context->mbmi.mv.as_int = 0;
1468 this_rd = evaluate_inter_mode(&sse, rate2, &distortion2, cpi, x,
1471 if (this_rd < best_rd)
1473 memcpy(&best_mbmode, &x->e_mbd.mode_info_context->mbmi,
1474 sizeof(MB_MODE_INFO));
1481 if (cpi->is_src_frame_alt_ref &&
1482 (best_mbmode.mode != ZEROMV || best_mbmode.ref_frame != ALTREF_FRAME))
1484 x->e_mbd.mode_info_context->mbmi.mode = ZEROMV;
1485 x->e_mbd.mode_info_context->mbmi.ref_frame = ALTREF_FRAME;
1486 x->e_mbd.mode_info_context->mbmi.mv.as_int = 0;
1487 x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED;
1488 x->e_mbd.mode_info_context->mbmi.mb_skip_coeff =
1489 (cpi->common.mb_no_coeff_skip);
1490 x->e_mbd.mode_info_context->mbmi.partitioning = 0;
1495 /* set to the best mb mode, this copy can be skip if x->skip since it
1496 * already has the right content */
1498 memcpy(&x->e_mbd.mode_info_context->mbmi, &best_mbmode,
1499 sizeof(MB_MODE_INFO));
1501 if (best_mbmode.mode <= B_PRED)
1503 /* set mode_info_context->mbmi.uv_mode */
1504 pick_intra_mbuv_mode(x);
1508 != cpi->common.ref_frame_sign_bias[xd->mode_info_context->mbmi.ref_frame])
1509 best_ref_mv.as_int = best_ref_mv_sb[!sign_bias].as_int;
1511 update_mvcount(x, &best_ref_mv);
1514 void vp8_pick_intra_mode(MACROBLOCK *x, int *rate_)
1516 int error4x4, error16x16 = INT_MAX;
1517 int rate, best_rate = 0, distortion, best_sse;
1518 MB_PREDICTION_MODE mode, best_mode = DC_PRED;
1521 BLOCK *b = &x->block[0];
1522 MACROBLOCKD *xd = &x->e_mbd;
1524 xd->mode_info_context->mbmi.ref_frame = INTRA_FRAME;
1526 pick_intra_mbuv_mode(x);
1528 for (mode = DC_PRED; mode <= TM_PRED; mode ++)
1530 xd->mode_info_context->mbmi.mode = mode;
1531 vp8_build_intra_predictors_mby_s(xd,
1532 xd->dst.y_buffer - xd->dst.y_stride,
1533 xd->dst.y_buffer - 1,
1537 distortion = vpx_variance16x16
1538 (*(b->base_src), b->src_stride, xd->predictor, 16, &sse);
1539 rate = x->mbmode_cost[xd->frame_type][mode];
1540 this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
1542 if (error16x16 > this_rd)
1544 error16x16 = this_rd;
1550 xd->mode_info_context->mbmi.mode = best_mode;
1552 error4x4 = pick_intra4x4mby_modes(x, &rate,
1554 if (error4x4 < error16x16)
1556 xd->mode_info_context->mbmi.mode = B_PRED;