#define MV_COUNT_SAT 16
#define MV_MAX_UPDATE_FACTOR 160
+/* Integer pel reference mv threshold for use of high-precision 1/8 mv */
+#define COMPANDED_MVREF_THRESH 8
+
/* Smooth or bias the mv-counts before prob computation */
/* #define SMOOTH_MV_COUNTS */
return c;
}
+int vp8_use_nmv_hp(const MV *ref) {
+ if ((abs(ref->row) >> 3) < COMPANDED_MVREF_THRESH &&
+ (abs(ref->col) >> 3) < COMPANDED_MVREF_THRESH)
+ return 1;
+ else
+ return 0;
+}
+
int vp8_get_mv_mag(MV_CLASS_TYPE c, int offset) {
return mv_class_base(c) + offset;
}
} else {
mvcomp->hp[e] += incr;
}
- } else { /* assume the extra bit is 1 */
- if (c == MV_CLASS_0) {
- mvcomp->class0_hp[1] += incr;
- } else {
- mvcomp->hp[1] += incr;
- }
}
}
int usehp) {
MV_JOINT_TYPE j = vp8_get_mv_joint(*mv);
mvctx->joints[j]++;
+ usehp = usehp && vp8_use_nmv_hp(ref);
if (j == MV_JOINT_HZVNZ || j == MV_JOINT_HNZVNZ) {
increment_nmv_component_count(mv->row, &mvctx->comps[0], 1, usehp);
}
void vp8_entropy_mv_init();
void vp8_init_mv_probs(struct VP8Common *cm);
void vp8_adapt_mv_probs(struct VP8Common *cm);
-#if CONFIG_NEWMVENTROPY
-void vp8_adapt_nmv_probs(struct VP8Common *cm, int usehp);
-#endif
#if CONFIG_NEWMVENTROPY
+void vp8_adapt_nmv_probs(struct VP8Common *cm, int usehp);
+void vp8_lower_mv_precision(MV *mv);
+int vp8_use_nmv_hp(const MV *ref);
#define VP8_NMV_UPDATE_PROB 255
//#define MV_GROUP_UPDATE
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}
};
-static void lower_mv_precision(int_mv *mv)
+static void lower_mv_precision(int_mv *mv, int usehp)
{
- if (mv->as_mv.row & 1)
- mv->as_mv.row += (mv->as_mv.row > 0 ? -1 : 1);
- if (mv->as_mv.col & 1)
- mv->as_mv.col += (mv->as_mv.col > 0 ? -1 : 1);
+#if CONFIG_NEWMVENTROPY
+ if (!usehp || !vp8_use_nmv_hp(&mv->as_mv)) {
+#else
+ if (!usehp) {
+#endif
+ if (mv->as_mv.row & 1)
+ mv->as_mv.row += (mv->as_mv.row > 0 ? -1 : 1);
+ if (mv->as_mv.col & 1)
+ mv->as_mv.col += (mv->as_mv.col > 0 ? -1 : 1);
+ }
}
-
/* Predict motion vectors using those from already-decoded nearby blocks.
Note that we only consider one 4x4 subblock from each candidate 16x16
macroblock. */
/* Make sure that the 1/8th bits of the Mvs are zero if high_precision
* is not being used, by truncating the last bit towards 0
*/
- if (!xd->allow_high_precision_mv) {
- lower_mv_precision(best_mv);
- lower_mv_precision(nearest);
- lower_mv_precision(nearby);
- }
+ lower_mv_precision(best_mv, xd->allow_high_precision_mv);
+ lower_mv_precision(nearest, xd->allow_high_precision_mv);
+ lower_mv_precision(nearby, xd->allow_high_precision_mv);
// TODO: move clamp outside findnearmv
vp8_clamp_mv2(nearest, xd);
// Copy back the re-ordered mv list
vpx_memcpy(mvlist, sorted_mvs, sizeof(sorted_mvs));
-
- if (!xd->allow_high_precision_mv)
- lower_mv_precision(best_mv);
+ lower_mv_precision(best_mv, xd->allow_high_precision_mv);
vp8_clamp_mv2(best_mv, xd);
}
static void read_nmv_fp(vp8_reader *r, MV *mv, const MV *ref,
const nmv_context *mvctx, int usehp) {
MV_JOINT_TYPE j = vp8_get_mv_joint(*mv);
+ usehp = usehp && vp8_use_nmv_hp(ref);
if (j == MV_JOINT_HZVNZ || j == MV_JOINT_HNZVNZ) {
mv->row = read_nmv_component_fp(r, mv->row, ref->row, &mvctx->comps[0],
usehp);
mv->col = read_nmv_component_fp(r, mv->col, ref->col, &mvctx->comps[1],
usehp);
}
+ //printf(" %d: %d %d ref: %d %d\n", usehp, mv->row, mv-> col, ref->row, ref->col);
}
static void update_nmv(vp8_reader *bc, vp8_prob *const p,
void vp8_encode_nmv_fp(vp8_writer *w, const MV *mv, const MV *ref,
const nmv_context *mvctx, int usehp) {
MV_JOINT_TYPE j = vp8_get_mv_joint(*mv);
+ usehp = usehp && vp8_use_nmv_hp(ref);
if (j == MV_JOINT_HZVNZ || j == MV_JOINT_HNZVNZ) {
encode_nmv_component_fp(w, mv->row, ref->row, &mvctx->comps[0], usehp);
}
int maxc, minc, maxr, minr;
int y_stride;
int offset;
+ int usehp = xd->allow_high_precision_mv;
#if !CONFIG_SUPERBLOCKS && (ARCH_X86 || ARCH_X86_64)
unsigned char *y0 = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
y_stride = d->pre_stride;
#endif
-
rr = ref_mv->as_mv.row;
rc = ref_mv->as_mv.col;
br = bestmv->as_mv.row << 3;
tc = bc;
}
- if (x->e_mbd.allow_high_precision_mv) {
+#if CONFIG_NEWMVENTROPY
+ if (xd->allow_high_precision_mv) {
+ usehp = vp8_use_nmv_hp(&ref_mv->as_mv);
+ } else {
+ usehp = 0;
+ }
+#endif
+
+ if (usehp) {
hstep >>= 1;
while (--eighthiters) {
CHECK_BETTER(left, tr, tc - hstep);
int thismse;
int y_stride;
MACROBLOCKD *xd = &x->e_mbd;
+ int usehp = xd->allow_high_precision_mv;
#if !CONFIG_SUPERBLOCKS && (ARCH_X86 || ARCH_X86_64)
unsigned char *y0 = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
*sse1 = sse;
}
- if (!x->e_mbd.allow_high_precision_mv)
+#if CONFIG_NEWMVENTROPY
+ if (x->e_mbd.allow_high_precision_mv) {
+ usehp = vp8_use_nmv_hp(&ref_mv->as_mv);
+ } else {
+ usehp = 0;
+ }
+#endif
+ if (!usehp)
return bestmse;
/* Now do 1/8th pixel */