#define DIST(r,c) vfp->svf( PRE(r,c), d->pre_stride, SP(c),SP(r), z,b->src_stride,&sse) // returns subpixel variance error function.
#define IFMVCV(r,c,s,e) if ( c >= minc && c <= maxc && r >= minr && r <= maxr) s else e;
#define ERR(r,c) (MVC(r,c)+DIST(r,c)) // returns distortion + motion vector cost
-#define CHECK_BETTER(v,r,c) IFMVCV(r,c,{thismse = DIST(r,c); if((v = (MVC(r,c)+thismse)) < besterr) { besterr = v; br=r; bc=c; *distortion = thismse;}}, v=INT_MAX;)// checks if (r,c) has better score than previous best
+#define CHECK_BETTER(v,r,c) IFMVCV(r,c,{thismse = DIST(r,c); if((v = (MVC(r,c)+thismse)) < besterr) { besterr = v; br=r; bc=c; *distortion = thismse; *sse1 = sse; }}, v=INT_MAX;)// checks if (r,c) has better score than previous best
#define MIN(x,y) (((x)<(y))?(x):(y))
#define MAX(x,y) (((x)>(y))?(x):(y))
//#define CHECK_BETTER(v,r,c) if((v = ERR(r,c)) < besterr) { besterr = v; br=r; bc=c; }
-int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *bestmv, MV *ref_mv, int error_per_bit, const vp8_variance_fn_ptr_t *vfp, int *mvcost[2], int *distortion)
+int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *bestmv, MV *ref_mv, int error_per_bit, const vp8_variance_fn_ptr_t *vfp, int *mvcost[2], int *distortion, unsigned int *sse1)
{
unsigned char *y = *(d->base_pre) + d->pre + (bestmv->row) * d->pre_stride + bestmv->col;
unsigned char *z = (*(b->base_src) + b->src);
bestmv->col <<= 3;
// calculate central point error
- besterr = vfp->vf(y, d->pre_stride, z, b->src_stride, &sse);
+ besterr = vfp->vf(y, d->pre_stride, z, b->src_stride, sse1);
*distortion = besterr;
besterr += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
#undef CHECK_BETTER
#undef MIN
#undef MAX
-int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *bestmv, MV *ref_mv, int error_per_bit, const vp8_variance_fn_ptr_t *vfp, int *mvcost[2], int *distortion)
+int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *bestmv, MV *ref_mv, int error_per_bit, const vp8_variance_fn_ptr_t *vfp, int *mvcost[2], int *distortion, unsigned int *sse1)
{
int bestmse = INT_MAX;
MV startmv;
startmv = *bestmv;
// calculate central point error
- bestmse = vfp->vf(y, d->pre_stride, z, b->src_stride, &sse);
+ bestmse = vfp->vf(y, d->pre_stride, z, b->src_stride, sse1);
*distortion = bestmse;
bestmse += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
*bestmv = this_mv;
bestmse = left;
*distortion = thismse;
+ *sse1 = sse;
}
this_mv.col += 8;
*bestmv = this_mv;
bestmse = right;
*distortion = thismse;
+ *sse1 = sse;
}
// go up then down and check error
*bestmv = this_mv;
bestmse = up;
*distortion = thismse;
+ *sse1 = sse;
}
this_mv.row += 8;
*bestmv = this_mv;
bestmse = down;
*distortion = thismse;
+ *sse1 = sse;
}
*bestmv = this_mv;
bestmse = diag;
*distortion = thismse;
+ *sse1 = sse;
}
// }
*bestmv = this_mv;
bestmse = left;
*distortion = thismse;
+ *sse1 = sse;
}
this_mv.col += 4;
*bestmv = this_mv;
bestmse = right;
*distortion = thismse;
+ *sse1 = sse;
}
// go up then down and check error
*bestmv = this_mv;
bestmse = up;
*distortion = thismse;
+ *sse1 = sse;
}
this_mv.row += 4;
*bestmv = this_mv;
bestmse = down;
*distortion = thismse;
+ *sse1 = sse;
}
*bestmv = this_mv;
bestmse = diag;
*distortion = thismse;
+ *sse1 = sse;
}
return bestmse;
}
-int vp8_find_best_half_pixel_step(MACROBLOCK *mb, BLOCK *b, BLOCKD *d, MV *bestmv, MV *ref_mv, int error_per_bit, const vp8_variance_fn_ptr_t *vfp, int *mvcost[2], int *distortion)
+int vp8_find_best_half_pixel_step(MACROBLOCK *mb, BLOCK *b, BLOCKD *d, MV *bestmv, MV *ref_mv, int error_per_bit, const vp8_variance_fn_ptr_t *vfp, int *mvcost[2], int *distortion, unsigned int *sse1)
{
int bestmse = INT_MAX;
MV startmv;
startmv = *bestmv;
// calculate central point error
- bestmse = vfp->vf(y, d->pre_stride, z, b->src_stride, &sse);
+ bestmse = vfp->vf(y, d->pre_stride, z, b->src_stride, sse1);
*distortion = bestmse;
bestmse += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
*bestmv = this_mv;
bestmse = left;
*distortion = thismse;
+ *sse1 = sse;
}
this_mv.col += 8;
*bestmv = this_mv;
bestmse = right;
*distortion = thismse;
+ *sse1 = sse;
}
// go up then down and check error
*bestmv = this_mv;
bestmse = up;
*distortion = thismse;
+ *sse1 = sse;
}
this_mv.row += 8;
*bestmv = this_mv;
bestmse = down;
*distortion = thismse;
+ *sse1 = sse;
}
// somewhat strangely not doing all the diagonals for half pel is slower than doing them.
*bestmv = this_mv;
bestmse = diag;
*distortion = thismse;
+ *sse1 = sse;
}
this_mv.col += 8;
*bestmv = this_mv;
bestmse = diag;
*distortion = thismse;
+ *sse1 = sse;
}
this_mv.col = (this_mv.col - 8) | 4;
*bestmv = this_mv;
bestmse = diag;
*distortion = thismse;
+ *sse1 = sse;
}
this_mv.col += 8;
*bestmv = this_mv;
bestmse = diag;
*distortion = thismse;
+ *sse1 = sse;
}
#endif
typedef int (fractional_mv_step_fp)
(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *bestmv, MV *ref_mv,
- int error_per_bit, const vp8_variance_fn_ptr_t *vfp, int *mvcost[2], int *distortion);
+ int error_per_bit, const vp8_variance_fn_ptr_t *vfp, int *mvcost[2], int *distortion, unsigned int *sse);
extern fractional_mv_step_fp vp8_find_best_sub_pixel_step_iteratively;
extern fractional_mv_step_fp vp8_find_best_sub_pixel_step;
extern fractional_mv_step_fp vp8_find_best_half_pixel_step;
extern void vp8_set_mbmode_and_mvs(MACROBLOCK *x, MB_PREDICTION_MODE mb, MV *mv);
-int vp8_skip_fractional_mv_step(MACROBLOCK *mb, BLOCK *b, BLOCKD *d, MV *bestmv, MV *ref_mv, int error_per_bit, const vp8_variance_fn_ptr_t *vfp, int *mvcost[2], int *distortion)
+int vp8_skip_fractional_mv_step(MACROBLOCK *mb, BLOCK *b, BLOCKD *d, MV *bestmv, MV *ref_mv, int error_per_bit, const vp8_variance_fn_ptr_t *vfp, int *mvcost[2], int *distortion, unsigned int *sse)
{
(void) b;
(void) d;
(void) vfp;
(void) mvcost;
(void) distortion;
+ (void) sse;
bestmv->row <<= 3;
bestmv->col <<= 3;
return 0;
int bestsme;
//int all_rds[MAX_MODES]; // Experimental debug code.
int best_mode_index = 0;
- int sse = INT_MAX;
+ unsigned int sse = INT_MAX;
MV mvp;
int near_sadidx[8] = {0, 1, 2, 3, 4, 5, 6, 7};
}
if (bestsme < INT_MAX)
- cpi->find_fractional_mv_step(x, b, d, &d->bmi.mv.as_mv, &best_ref_mv, x->errorperbit, &cpi->fn_ptr[BLOCK_16X16], cpi->mb.mvcost, &distortion2);
+ cpi->find_fractional_mv_step(x, b, d, &d->bmi.mv.as_mv, &best_ref_mv, x->errorperbit, &cpi->fn_ptr[BLOCK_16X16], cpi->mb.mvcost, &distortion2, &sse);
mode_mv[NEWMV].row = d->bmi.mv.as_mv.row;
mode_mv[NEWMV].col = d->bmi.mv.as_mv.col;
x->e_mbd.block[0].bmi.mv.as_int = x->e_mbd.mode_info_context->mbmi.mv.as_int;
if((this_mode != NEWMV) || !(have_subp_search))
- distortion2 = get_inter_mbpred_error(x, &cpi->fn_ptr[BLOCK_16X16], (unsigned int *)(&sse));
+ distortion2 = get_inter_mbpred_error(x, &cpi->fn_ptr[BLOCK_16X16], &sse);
this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
if (bestsme < INT_MAX)
{
int distortion;
+ unsigned int sse;
if (!cpi->common.full_pixel)
cpi->find_fractional_mv_step(x, c, e, &mode_mv[NEW4X4],
- bsi->ref_mv, x->errorperbit / 2, v_fn_ptr, x->mvcost, &distortion);
+ bsi->ref_mv, x->errorperbit / 2, v_fn_ptr, x->mvcost, &distortion, &sse);
else
vp8_skip_fractional_mv_step(x, c, e, &mode_mv[NEW4X4],
- bsi->ref_mv, x->errorperbit, v_fn_ptr, x->mvcost, &distortion);
+ bsi->ref_mv, x->errorperbit, v_fn_ptr, x->mvcost, &distortion, &sse);
}
} /* NEW4X4 */
x->mv_row_max = tmp_row_max;
if (bestsme < INT_MAX)
- {
- int dis; /* TODO: use dis in distortion calculation later. */
- cpi->find_fractional_mv_step(x, b, d, &d->bmi.mv.as_mv, &best_ref_mv, x->errorperbit / 4, &cpi->fn_ptr[BLOCK_16X16], x->mvcost, &dis);
+ {
+ int dis; /* TODO: use dis in distortion calculation later. */
+ unsigned int sse;
+ cpi->find_fractional_mv_step(x, b, d, &d->bmi.mv.as_mv, &best_ref_mv, x->errorperbit / 4, &cpi->fn_ptr[BLOCK_16X16], x->mvcost, &dis, &sse);
}
mode_mv[NEWMV].row = d->bmi.mv.as_mv.row;
}
else if (x->encode_breakout)
{
- int sum, sse;
+ int sum;
+ unsigned int sse;
int threshold = (xd->block[0].dequant[1]
* xd->block[0].dequant[1] >>4);
VARIANCE_INVOKE(&cpi->rtcd.variance, get16x16var)
(x->src.y_buffer, x->src.y_stride,
- x->e_mbd.predictor, 16, (unsigned int *)(&sse), &sum);
+ x->e_mbd.predictor, 16, &sse, &sum);
if (sse < threshold)
{
distortion_uv = sse2;
disable_skip = 1;
- this_rd = RDCOST(x->rdmult, x->rddiv, rate2,
- distortion2);
+ this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
break;
}
//if (bestsme > error_thresh && bestsme < INT_MAX)
{
int distortion;
+ unsigned int sse;
bestsme = cpi->find_fractional_mv_step(x, b, d,
&d->bmi.mv.as_mv, &best_ref_mv1,
x->errorperbit, &cpi->fn_ptr[BLOCK_16X16],
- mvcost, &distortion);
+ mvcost, &distortion, &sse);
}
#endif