#define IF_RTCD(x) NULL
#endif
+#if CONFIG_HIGH_PRECISION_MV
+#define XMVCOST (x->e_mbd.allow_high_precision_mv?x->mvcost_hp:x->mvcost)
+#else
+#define XMVCOST (x->mvcost)
+#endif
+
extern void vp8_build_block_offsets(MACROBLOCK *x);
extern void vp8_setup_block_ptrs(MACROBLOCK *x);
extern void vp8cx_frame_init_quantizer(VP8_COMP *cpi);
ref_mv_full.as_mv.row = ref_mv->as_mv.row>>3;
tmp_err = cpi->diamond_search_sad(x, b, d, &ref_mv_full, &tmp_mv, step_param,
x->sadperbit16, &num00, &v_fn_ptr,
-#if CONFIG_HIGH_PRECISION_MV
- x->e_mbd.allow_high_precision_mv?x->mvcost_hp:x->mvcost,
-#else
- x->mvcost,
-#endif
- ref_mv);
+ XMVCOST, ref_mv);
if ( tmp_err < INT_MAX-new_mv_mode_penalty )
tmp_err += new_mv_mode_penalty;
tmp_err = cpi->diamond_search_sad(x, b, d, &ref_mv_full, &tmp_mv,
step_param + n, x->sadperbit16,
&num00, &v_fn_ptr,
-#if CONFIG_HIGH_PRECISION_MV
- x->e_mbd.allow_high_precision_mv?x->mvcost_hp:x->mvcost,
-#else
- x->mvcost,
-#endif
- ref_mv);
+ XMVCOST, ref_mv);
if ( tmp_err < INT_MAX-new_mv_mode_penalty )
tmp_err += new_mv_mode_penalty;
static int mv_mode_cts [4] [2];
#endif
-int vp8_mv_bit_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int Weight)
+#if CONFIG_HIGH_PRECISION_MV
+int vp8_mv_bit_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int Weight, int ishp)
{
// MV costing is based on the distribution of vectors in the previous frame and as such will tend to
// over state the cost of vectors. In addition coding a new vector can have a knock on effect on the
// cost of subsequent vectors and the quality of prediction from NEAR and NEAREST for subsequent blocks.
// The "Weight" parameter allows, to a limited extent, for some account to be taken of these factors.
- return ((mvcost[0][(mv->as_mv.row - ref->as_mv.row) >> 1] + mvcost[1][(mv->as_mv.col - ref->as_mv.col) >> 1]) * Weight) >> 7;
+ return ((mvcost[0][(mv->as_mv.row - ref->as_mv.row) >> (ishp==0)] + mvcost[1][(mv->as_mv.col - ref->as_mv.col) >> (ishp==0)]) * Weight) >> 7;
}
-#if CONFIG_HIGH_PRECISION_MV
-int vp8_mv_bit_cost_hp(int_mv *mv, int_mv *ref, int *mvcost[2], int Weight)
+#else
+int vp8_mv_bit_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int Weight)
{
// MV costing is based on the distribution of vectors in the previous frame and as such will tend to
// over state the cost of vectors. In addition coding a new vector can have a knock on effect on the
// cost of subsequent vectors and the quality of prediction from NEAR and NEAREST for subsequent blocks.
// The "Weight" parameter allows, to a limited extent, for some account to be taken of these factors.
- return ((mvcost[0][(mv->as_mv.row - ref->as_mv.row)] + mvcost[1][(mv->as_mv.col - ref->as_mv.col)]) * Weight) >> 7;
+ return ((mvcost[0][(mv->as_mv.row - ref->as_mv.row) >> 1] + mvcost[1][(mv->as_mv.col - ref->as_mv.col) >> 1]) * Weight) >> 7;
}
#endif
rr = ref_mv->as_mv.row; rc = ref_mv->as_mv.col;
br = bestmv->as_mv.row << 3; bc = bestmv->as_mv.col << 3;
hstep = 4;
- minc = MAX(x->mv_col_min << 3, (ref_mv->as_mv.col) - ((1 << mvlong_width) - 1));
- maxc = MIN(x->mv_col_max << 3, (ref_mv->as_mv.col) + ((1 << mvlong_width) - 1));
- minr = MAX(x->mv_row_min << 3, (ref_mv->as_mv.row) - ((1 << mvlong_width) - 1));
- maxr = MIN(x->mv_row_max << 3, (ref_mv->as_mv.row) + ((1 << mvlong_width) - 1));
+ minc = MAX(x->mv_col_min << 3, (ref_mv->as_mv.col) - ((1 << mvlong_width_hp) - 1));
+ maxc = MIN(x->mv_col_max << 3, (ref_mv->as_mv.col) + ((1 << mvlong_width_hp) - 1));
+ minr = MAX(x->mv_row_min << 3, (ref_mv->as_mv.row) - ((1 << mvlong_width_hp) - 1));
+ maxr = MIN(x->mv_row_max << 3, (ref_mv->as_mv.row) + ((1 << mvlong_width_hp) - 1));
}
else
#endif
extern void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x);
extern void vp8_update_zbin_extra(VP8_COMP *cpi, MACROBLOCK *x);
+#if CONFIG_HIGH_PRECISION_MV
+#define XMVCOST (x->e_mbd.allow_high_precision_mv?x->mvcost_hp:x->mvcost)
+#else
+#define XMVCOST (x->mvcost)
+#endif
+
#define MAXF(a,b) (((a) > (b)) ? (a) : (b))
static const int auto_speed_thresh[17] =
switch (m = this_mode)
{
case NEW4X4 :
+#if CONFIG_HIGH_PRECISION_MV
+ thismvcost = vp8_mv_bit_cost(this_mv, best_ref_mv, mvcost,
+ 102, xd->allow_high_precision_mv);
+#else
thismvcost = vp8_mv_bit_cost(this_mv, best_ref_mv, mvcost, 102);
+#endif
break;
case LEFT4X4:
this_mv->as_int = col ? d[-1].bmi.mv.as_int : left_block_mv(mic, i);
bestsme = cpi->diamond_search_sad(x, c, e, &mvp_full,
&mode_mv[NEW4X4], step_param,
sadpb, &num00, v_fn_ptr,
-#if CONFIG_HIGH_PRECISION_MV
- x->e_mbd.allow_high_precision_mv?x->mvcost_hp:x->mvcost,
-#else
- x->mvcost,
-#endif
+ XMVCOST,
bsi->ref_mv);
n = num00;
&mvp_full, &temp_mv,
step_param + n, sadpb,
&num00, v_fn_ptr,
-#if CONFIG_HIGH_PRECISION_MV
- x->e_mbd.allow_high_precision_mv?x->mvcost_hp:x->mvcost,
-#else
- x->mvcost,
-#endif
+ XMVCOST,
bsi->ref_mv);
if (thissme < bestsme)
thissme = cpi->full_search_sad(x, c, e, &mvp_full,
sadpb, 16, v_fn_ptr,
-#if CONFIG_HIGH_PRECISION_MV
- x->e_mbd.allow_high_precision_mv?x->mvcost_hp:x->mvcost,
-#else
- x->mvcost,
-#endif
- bsi->ref_mv);
+ XMVCOST, bsi->ref_mv);
if (thissme < bestsme)
{
int distortion;
unsigned int sse;
cpi->find_fractional_mv_step(x, c, e, &mode_mv[NEW4X4],
- bsi->ref_mv, x->errorperbit, v_fn_ptr,
-#if CONFIG_HIGH_PRECISION_MV
- x->e_mbd.allow_high_precision_mv?x->mvcost_hp:x->mvcost,
-#else
- x->mvcost,
-#endif
+ bsi->ref_mv, x->errorperbit, v_fn_ptr, XMVCOST,
&distortion, &sse);
}
} /* NEW4X4 */
rate = labels2mode(x, labels, i, this_mode, &mode_mv[this_mode],
- bsi->ref_mv, x->mvcost);
+ bsi->ref_mv, XMVCOST);
// Trap vectors that reach beyond the UMV borders
if (((mode_mv[this_mode].as_mv.row >> 3) < x->mv_row_min) || ((mode_mv[this_mode].as_mv.row >> 3) > x->mv_row_max) ||
vpx_memcpy(tl, tl_b, sizeof(ENTROPY_CONTEXT_PLANES));
labels2mode(x, labels, i, mode_selected, &mode_mv[mode_selected],
- bsi->ref_mv, x->mvcost);
+ bsi->ref_mv, XMVCOST);
br += sbr;
bd += sbd;
bestsme = cpi->diamond_search_sad(x, b, d, &mvp_full, &d->bmi.mv,
step_param, sadpb, &num00,
&cpi->fn_ptr[BLOCK_16X16],
-#if CONFIG_HIGH_PRECISION_MV
- x->e_mbd.allow_high_precision_mv?x->mvcost_hp:x->mvcost,
-#else
- x->mvcost,
-#endif
- &best_ref_mv);
+ XMVCOST, &best_ref_mv);
mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
// Further step/diamond searches as necessary
thissme = cpi->diamond_search_sad(x, b, d, &mvp_full,
&d->bmi.mv, step_param + n, sadpb, &num00,
&cpi->fn_ptr[BLOCK_16X16],
-#if CONFIG_HIGH_PRECISION_MV
- x->e_mbd.allow_high_precision_mv?x->mvcost_hp:x->mvcost,
-#else
- x->mvcost,
-#endif
- &best_ref_mv);
+ XMVCOST, &best_ref_mv);
/* check to see if refining search is needed. */
if (num00 > (further_steps-n))
//thissme = cpi->full_search_sad(x, b, d, &d->bmi.mv.as_mv, sadpb, search_range, &cpi->fn_ptr[BLOCK_16X16], x->mvcost, &best_ref_mv);
thissme = cpi->refining_search_sad(x, b, d, &d->bmi.mv, sadpb,
search_range, &cpi->fn_ptr[BLOCK_16X16],
-#if CONFIG_HIGH_PRECISION_MV
- x->e_mbd.allow_high_precision_mv?x->mvcost_hp:x->mvcost,
-#else
- x->mvcost,
-#endif
- &best_ref_mv);
+ XMVCOST, &best_ref_mv);
if (thissme < bestsme)
{
cpi->find_fractional_mv_step(x, b, d, &d->bmi.mv, &best_ref_mv,
x->errorperbit,
&cpi->fn_ptr[BLOCK_16X16],
-#if CONFIG_HIGH_PRECISION_MV
- x->e_mbd.allow_high_precision_mv?x->mvcost_hp:x->mvcost,
-#else
- x->mvcost,
-#endif
- &dis, &sse);
+ XMVCOST, &dis, &sse);
}
mc_search_result[x->e_mbd.mode_info_context->mbmi.ref_frame].as_int = d->bmi.mv.as_int;
mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
// Add the new motion vector cost to our rolling cost variable
- rate2 += vp8_mv_bit_cost(&mode_mv[NEWMV], &best_ref_mv, x->mvcost, 96);
+#if CONFIG_HIGH_PRECISION_MV
+ rate2 += vp8_mv_bit_cost(&mode_mv[NEWMV], &best_ref_mv,
+ XMVCOST, 96,
+ x->e_mbd.allow_high_precision_mv);
+#else
+ rate2 += vp8_mv_bit_cost(&mode_mv[NEWMV], &best_ref_mv,
+ XMVCOST, 96);
+#endif
}
case NEARESTMV:
continue;
x->e_mbd.mode_info_context->mbmi.mv.as_int = mc_search_result[ref1].as_int;
x->e_mbd.mode_info_context->mbmi.second_mv.as_int = mc_search_result[ref2].as_int;
+#if CONFIG_HIGH_PRECISION_MV
rate2 += vp8_mv_bit_cost(&mc_search_result[ref1],
- &frame_best_ref_mv[ref1], x->mvcost, 96);
+ &frame_best_ref_mv[ref1],
+ XMVCOST, 96,
+ x->e_mbd.allow_high_precision_mv);
rate2 += vp8_mv_bit_cost(&mc_search_result[ref2],
- &frame_best_ref_mv[ref2], x->mvcost, 96);
+ &frame_best_ref_mv[ref2],
+ XMVCOST, 96,
+ x->e_mbd.allow_high_precision_mv);
+#else
+ rate2 += vp8_mv_bit_cost(&mc_search_result[ref1],
+ &frame_best_ref_mv[ref1],
+ XMVCOST, 96);
+ rate2 += vp8_mv_bit_cost(&mc_search_result[ref2],
+ &frame_best_ref_mv[ref2],
+ XMVCOST, 96);
+#endif
break;
case ZEROMV:
x->e_mbd.mode_info_context->mbmi.mv.as_int = 0;