Coding and costing of mv reference signal.
Issues in updating MV ref with COMPANDED_MVREF_THRESH
to be resolved. Ideally the MV precision should be defined based
on absolute MV magnitude not as now the MV ref magnitude.
Update to mv counts moved into bitstream.c because otherwise
if the motion reference is changed at the last minute the encoder
and decoder get out of step in terms of the counts used to update
entropy probs.
Code working on a few test clips but no results yet re benefit vs
signaling cost and no tuning of red loop to test lower cost alternatives
based on the available reference values.
Patch 3. Added check to make sure we don't pick a reference
that would give rise to an uncodeable / out of range residual.
Patch 6-7: Attempt to rebase. OK to submit but best to leave flag off for now.
Patch 9. Remove print no longer needed.
Change-Id: I1938c2ffe41afe6d3cf6ccc0cb2c5d404809a712
MV_REFERENCE_FRAME ref_frame, second_ref_frame;
TX_SIZE txfm_size;
int_mv mv[2]; // for each reference frame used
-#if CONFIG_NEWBESTREFMV
- int_mv ref_mv, second_ref_mv;
+#if CONFIG_NEWBESTREFMV || CONFIG_NEW_MVREF
int_mv ref_mvs[MAX_REF_FRAMES][MAX_MV_REFS];
- int mv_ref_index[MAX_REF_FRAMES];
#endif
SPLITMV_PARTITIONING_TYPE partitioning;
// Probability Tree used to code Segment number
vp8_prob mb_segment_tree_probs[MB_FEATURE_TREE_PROBS];
+#if CONFIG_NEW_MVREF
+ vp8_prob mb_mv_ref_id_probs[MAX_REF_FRAMES][3];
+#endif
// Segment features
signed char segment_feature_data[MAX_MB_SEGMENTS][SEG_LVL_MAX];
#endif
int mb_index; // Index of the MB in the SB (0..3)
-
-#if CONFIG_NEWBESTREFMV
- int_mv ref_mv[MAX_MV_REFS];
-#endif
-
int q_index;
} MACROBLOCKD;
#define MV_COUNT_SAT 16
#define MV_MAX_UPDATE_FACTOR 160
+#if CONFIG_NEW_MVREF
+/* Integer pel reference mv threshold for use of high-precision 1/8 mv */
+#define COMPANDED_MVREF_THRESH 1000000
+#else
/* Integer pel reference mv threshold for use of high-precision 1/8 mv */
#define COMPANDED_MVREF_THRESH 8
+#endif
/* Smooth or bias the mv-counts before prob computation */
/* #define SMOOTH_MV_COUNTS */
int *cntx = cnt;
enum {CNT_INTRA, CNT_NEAREST, CNT_NEAR, CNT_SPLITMV};
-#if CONFIG_NEWBESTREFMV
- int_mv *ref_mv = xd->ref_mv;
-#endif
-
/* Zero accumulators */
mv[0].as_int = mv[1].as_int = mv[2].as_int = 0;
cnt[0] = cnt[1] = cnt[2] = cnt[3] = 0;
-#if CONFIG_NEWBESTREFMV
- ref_mv[0].as_int = ref_mv[1].as_int
- = ref_mv[2].as_int
- = ref_mv[3].as_int
- = 0;
-#endif
/* Process above */
if (above->mbmi.ref_frame != INTRA_FRAME) {
mv->as_int = above->mbmi.mv[0].as_int;
mv_bias(ref_frame_sign_bias[above->mbmi.ref_frame],
refframe, mv, ref_frame_sign_bias);
-#if CONFIG_NEWBESTREFMV
- ref_mv[0].as_int = mv->as_int;
-#endif
++cntx;
}
*cntx += 2;
this_mv.as_int = left->mbmi.mv[0].as_int;
mv_bias(ref_frame_sign_bias[left->mbmi.ref_frame],
refframe, &this_mv, ref_frame_sign_bias);
-#if CONFIG_NEWBESTREFMV
- ref_mv[1].as_int = this_mv.as_int;
-#endif
+
if (this_mv.as_int != mv->as_int) {
++ mv;
mv->as_int = this_mv.as_int;
(lf_here->mbmi.ref_frame == LAST_FRAME && refframe == LAST_FRAME)) {
if (aboveleft->mbmi.mv[0].as_int) {
third = aboveleft;
-#if CONFIG_NEWBESTREFMV
- ref_mv[2].as_int = aboveleft->mbmi.mv[0].as_int;
- mv_bias(ref_frame_sign_bias[aboveleft->mbmi.ref_frame],
- refframe, (ref_mv+2), ref_frame_sign_bias);
-#endif
} else if (lf_here->mbmi.mv[0].as_int) {
third = lf_here;
}
-#if CONFIG_NEWBESTREFMV
- if (lf_here->mbmi.mv[0].as_int) {
- ref_mv[3].as_int = lf_here->mbmi.mv[0].as_int;
- mv_bias(ref_frame_sign_bias[lf_here->mbmi.ref_frame],
- refframe, (ref_mv+3), ref_frame_sign_bias);
- }
-#endif
if (third) {
int_mv this_mv;
this_mv.as_int = third->mbmi.mv[0].as_int;
}
}
+ // Make sure all the candidates are properly clamped etc
+ for (i = 0; i < 4; ++i) {
+ lower_mv_precision(&sorted_mvs[i], xd->allow_high_precision_mv);
+ vp8_clamp_mv2(&sorted_mvs[i], xd);
+ }
+
// Set the best mv to the first entry in the sorted list
best_mv->as_int = sorted_mvs[0].as_int;
// Copy back the re-ordered mv list
vpx_memcpy(mvlist, sorted_mvs, sizeof(sorted_mvs));
- lower_mv_precision(best_mv, xd->allow_high_precision_mv);
-
- vp8_clamp_mv2(best_mv, xd);
}
#endif // CONFIG_NEWBESTREFMV
}
}
+#if CONFIG_NEW_MVREF
+int vp8_read_mv_ref_id(vp8_reader *r,
+ vp8_prob * ref_id_probs) {
+ int ref_index = 0;
+
+ if (vp8_read(r, ref_id_probs[0])) {
+ ref_index++;
+ if (vp8_read(r, ref_id_probs[1])) {
+ ref_index++;
+ if (vp8_read(r, ref_id_probs[2]))
+ ref_index++;
+ }
+ }
+ return ref_index;
+}
+#endif
+
extern const int vp8_i8x8_block[4];
static void kfread_modes(VP8D_COMP *pbi,
MODE_INFO *m,
cm->fc.ymode_prob[i] = (vp8_prob) vp8_read_literal(bc, 8);
} while (++i < VP8_YMODES - 1);
}
+
+#if CONFIG_NEW_MVREF
+ // Temp defaults probabilities for ecnoding the MV ref id signal
+ vpx_memset(xd->mb_mv_ref_id_probs, 192, sizeof(xd->mb_mv_ref_id_probs));
+#endif
+
read_nmvprobs(bc, nmvc, xd->allow_high_precision_mv);
}
}
xd->pre.u_buffer = cm->yv12_fb[ref_fb_idx].u_buffer + recon_uvoffset;
xd->pre.v_buffer = cm->yv12_fb[ref_fb_idx].v_buffer + recon_uvoffset;
- // Update stats on relative distance of chosen vector to the
- // possible best reference vectors.
- {
- find_mv_refs(xd, mi, prev_mi,
- ref_frame, mbmi->ref_mvs[ref_frame],
- cm->ref_frame_sign_bias );
- }
+ find_mv_refs(xd, mi, prev_mi,
+ ref_frame, mbmi->ref_mvs[ref_frame],
+ cm->ref_frame_sign_bias);
vp8_find_best_ref_mvs(xd,
xd->pre.y_buffer,
mbmi->second_ref_frame,
cm->ref_frame_sign_bias);
- // Update stats on relative distance of chosen vector to the
- // possible best reference vectors.
- {
- MV_REFERENCE_FRAME ref_frame = mbmi->second_ref_frame;
-
- find_mv_refs(xd, mi, prev_mi,
- ref_frame, mbmi->ref_mvs[ref_frame],
- cm->ref_frame_sign_bias );
- }
+ find_mv_refs(xd, mi, prev_mi,
+ mbmi->second_ref_frame,
+ mbmi->ref_mvs[mbmi->second_ref_frame],
+ cm->ref_frame_sign_bias);
vp8_find_best_ref_mvs(xd,
xd->second_pre.y_buffer,
break;
case NEWMV:
+
+#if CONFIG_NEW_MVREF
+ {
+ int best_index;
+ MV_REFERENCE_FRAME ref_frame = mbmi->ref_frame;
+
+ // Encode the index of the choice.
+ best_index =
+ vp8_read_mv_ref_id(bc, xd->mb_mv_ref_id_probs[ref_frame]);
+
+ best_mv.as_int = mbmi->ref_mvs[ref_frame][best_index].as_int;
+ }
+#endif
+
read_nmv(bc, &mv->as_mv, &best_mv.as_mv, nmvc);
read_nmv_fp(bc, &mv->as_mv, &best_mv.as_mv, nmvc,
xd->allow_high_precision_mv);
vp8_increment_nmv(&mv->as_mv, &best_mv.as_mv, &cm->fc.NMVcount,
xd->allow_high_precision_mv);
+
mv->as_mv.row += best_mv.as_mv.row;
mv->as_mv.col += best_mv.as_mv.col;
mb_to_right_edge,
mb_to_top_edge,
mb_to_bottom_edge);
+
if (mbmi->second_ref_frame) {
+#if CONFIG_NEW_MVREF
+ {
+ int best_index;
+ MV_REFERENCE_FRAME ref_frame = mbmi->second_ref_frame;
+
+ // Encode the index of the choice.
+ best_index =
+ vp8_read_mv_ref_id(bc, xd->mb_mv_ref_id_probs[ref_frame]);
+ best_mv_second.as_int = mbmi->ref_mvs[ref_frame][best_index].as_int;
+ }
+#endif
+
read_nmv(bc, &mbmi->mv[1].as_mv, &best_mv_second.as_mv, nmvc);
read_nmv_fp(bc, &mbmi->mv[1].as_mv, &best_mv_second.as_mv, nmvc,
xd->allow_high_precision_mv);
#include "vp8/common/pred_common.h"
#include "vp8/common/entropy.h"
#include "vp8/encoder/encodemv.h"
+#include "vp8/common/entropymv.h"
#if CONFIG_NEWBESTREFMV
#include "vp8/common/mvref_common.h"
return update_bits[delp] * 256;
}
-#if CONFIG_NEW_MVREF
-// Estimate the cost of each coding the vector using each reference candidate
-unsigned int pick_best_mv_ref( MACROBLOCK *x,
- int_mv target_mv,
- int_mv * mv_ref_list,
- int_mv * best_ref ) {
-
- int i;
- int best_index = 0;
- int cost, cost2;
- int index_cost[MAX_MV_REFS];
- MACROBLOCKD *xd = &x->e_mbd;
-
- /*unsigned int distance, distance2;
-
- distance = mv_distance(&target_mv, &mv_ref_list[0]);
-
- for (i = 1; i < MAX_MV_REFS; ++i ) {
- distance2 =
- mv_distance(&target_mv, &mv_ref_list[i]);
- if (distance2 < distance) {
- distance = distance2;
- best_index = i;
- }
- }*/
-
- // For now estimate the cost of selecting a given ref index
- // as index * 1 bits (but here 1 bit is scaled to 256)
- for (i = 0; i < MAX_MV_REFS; ++i ) {
- index_cost[i] = i << 8;
- }
- index_cost[0] = vp8_cost_zero(205);
- index_cost[1] = vp8_cost_zero(40);
- index_cost[2] = vp8_cost_zero(8);
- index_cost[3] = vp8_cost_zero(2);
-
- cost = index_cost[0] +
- vp8_mv_bit_cost(&target_mv,
- &mv_ref_list[0],
- XMVCOST, 96,
- xd->allow_high_precision_mv);
-
-
- //for (i = 1; i < MAX_MV_REFS; ++i ) {
- for (i = 1; i < 4; ++i ) {
- cost2 = index_cost[i] +
- vp8_mv_bit_cost(&target_mv,
- &mv_ref_list[i],
- XMVCOST, 96,
- xd->allow_high_precision_mv);
-
- if (cost2 < cost) {
- cost = cost2;
- best_index = i;
- }
- }
-
- (*best_ref).as_int = mv_ref_list[best_index].as_int;
-
- return best_index;
-}
-#endif
-
static void update_mode(
vp8_writer *const bc,
int n,
}
}
+static void update_mvcount(VP8_COMP *cpi, MACROBLOCK *x,
+ int_mv *best_ref_mv, int_mv *second_best_ref_mv) {
+ MB_MODE_INFO * mbmi = &x->e_mbd.mode_info_context->mbmi;
+ MV mv;
+
+ if (mbmi->mode == SPLITMV) {
+ int i;
+
+ for (i = 0; i < x->partition_info->count; i++) {
+ if (x->partition_info->bmi[i].mode == NEW4X4) {
+ if (x->e_mbd.allow_high_precision_mv) {
+ mv.row = (x->partition_info->bmi[i].mv.as_mv.row
+ - best_ref_mv->as_mv.row);
+ mv.col = (x->partition_info->bmi[i].mv.as_mv.col
+ - best_ref_mv->as_mv.col);
+ vp8_increment_nmv(&mv, &best_ref_mv->as_mv, &cpi->NMVcount, 1);
+ if (x->e_mbd.mode_info_context->mbmi.second_ref_frame) {
+ mv.row = (x->partition_info->bmi[i].second_mv.as_mv.row
+ - second_best_ref_mv->as_mv.row);
+ mv.col = (x->partition_info->bmi[i].second_mv.as_mv.col
+ - second_best_ref_mv->as_mv.col);
+ vp8_increment_nmv(&mv, &second_best_ref_mv->as_mv,
+ &cpi->NMVcount, 1);
+ }
+ } else {
+ mv.row = (x->partition_info->bmi[i].mv.as_mv.row
+ - best_ref_mv->as_mv.row);
+ mv.col = (x->partition_info->bmi[i].mv.as_mv.col
+ - best_ref_mv->as_mv.col);
+ vp8_increment_nmv(&mv, &best_ref_mv->as_mv, &cpi->NMVcount, 0);
+ if (x->e_mbd.mode_info_context->mbmi.second_ref_frame) {
+ mv.row = (x->partition_info->bmi[i].second_mv.as_mv.row
+ - second_best_ref_mv->as_mv.row);
+ mv.col = (x->partition_info->bmi[i].second_mv.as_mv.col
+ - second_best_ref_mv->as_mv.col);
+ vp8_increment_nmv(&mv, &second_best_ref_mv->as_mv,
+ &cpi->NMVcount, 0);
+ }
+ }
+ }
+ }
+ } else if (mbmi->mode == NEWMV) {
+ if (x->e_mbd.allow_high_precision_mv) {
+ mv.row = (mbmi->mv[0].as_mv.row - best_ref_mv->as_mv.row);
+ mv.col = (mbmi->mv[0].as_mv.col - best_ref_mv->as_mv.col);
+ vp8_increment_nmv(&mv, &best_ref_mv->as_mv, &cpi->NMVcount, 1);
+ if (mbmi->second_ref_frame) {
+ mv.row = (mbmi->mv[1].as_mv.row - second_best_ref_mv->as_mv.row);
+ mv.col = (mbmi->mv[1].as_mv.col - second_best_ref_mv->as_mv.col);
+ vp8_increment_nmv(&mv, &second_best_ref_mv->as_mv, &cpi->NMVcount, 1);
+ }
+ } else {
+ mv.row = (mbmi->mv[0].as_mv.row - best_ref_mv->as_mv.row);
+ mv.col = (mbmi->mv[0].as_mv.col - best_ref_mv->as_mv.col);
+ vp8_increment_nmv(&mv, &best_ref_mv->as_mv, &cpi->NMVcount, 0);
+ if (mbmi->second_ref_frame) {
+ mv.row = (mbmi->mv[1].as_mv.row - second_best_ref_mv->as_mv.row);
+ mv.col = (mbmi->mv[1].as_mv.col - second_best_ref_mv->as_mv.col);
+ vp8_increment_nmv(&mv, &second_best_ref_mv->as_mv, &cpi->NMVcount, 0);
+ }
+ }
+ }
+}
+
static void write_ymode(vp8_writer *bc, int m, const vp8_prob *p) {
vp8_write_token(bc, vp8_ymode_tree, p, vp8_ymode_encodings + m);
}
vp8_encode_nmv_fp(bc, &e, &ref->as_mv, nmvc, usehp);
}
+#if CONFIG_NEW_MVREF
+static int vp8_cost_mv_ref_id(vp8_prob * ref_id_probs, int mv_ref_id) {
+ int cost;
+
+ // Encode the index for the MV reference.
+ switch (mv_ref_id) {
+ case 0:
+ cost = vp8_cost_zero(ref_id_probs[0]);
+ break;
+ case 1:
+ cost = vp8_cost_one(ref_id_probs[0]);
+ cost += vp8_cost_zero(ref_id_probs[1]);
+ break;
+ case 2:
+ cost = vp8_cost_one(ref_id_probs[0]);
+ cost += vp8_cost_one(ref_id_probs[1]);
+ cost += vp8_cost_zero(ref_id_probs[2]);
+ break;
+ case 3:
+ cost = vp8_cost_one(ref_id_probs[0]);
+ cost += vp8_cost_one(ref_id_probs[1]);
+ cost += vp8_cost_one(ref_id_probs[2]);
+ break;
+
+ // TRAP.. This should not happen
+ default:
+ assert(0);
+ break;
+ }
+
+ return cost;
+}
+
+static void vp8_write_mv_ref_id(vp8_writer *w,
+ vp8_prob * ref_id_probs,
+ int mv_ref_id) {
+ // Encode the index for the MV reference.
+ switch (mv_ref_id) {
+ case 0:
+ vp8_write(w, 0, ref_id_probs[0]);
+ break;
+ case 1:
+ vp8_write(w, 1, ref_id_probs[0]);
+ vp8_write(w, 0, ref_id_probs[1]);
+ break;
+ case 2:
+ vp8_write(w, 1, ref_id_probs[0]);
+ vp8_write(w, 1, ref_id_probs[1]);
+ vp8_write(w, 0, ref_id_probs[2]);
+ break;
+ case 3:
+ vp8_write(w, 1, ref_id_probs[0]);
+ vp8_write(w, 1, ref_id_probs[1]);
+ vp8_write(w, 1, ref_id_probs[2]);
+ break;
+
+ // TRAP.. This should not happen
+ default:
+ assert(0);
+ break;
+ }
+}
+
+// Estimate the cost of each coding the vector using each reference candidate
+static unsigned int pick_best_mv_ref(MACROBLOCK *x,
+ MV_REFERENCE_FRAME ref_frame,
+ int_mv target_mv,
+ int_mv * mv_ref_list,
+ int_mv * best_ref) {
+ int i;
+ int best_index = 0;
+ int cost, cost2;
+ int zero_seen = (mv_ref_list[0].as_int) ? FALSE : TRUE;
+ MACROBLOCKD *xd = &x->e_mbd;
+ int max_mv = MV_MAX;
+
+ cost = vp8_cost_mv_ref_id(xd->mb_mv_ref_id_probs[ref_frame], 0) +
+ vp8_mv_bit_cost(&target_mv,
+ &mv_ref_list[0],
+ XMVCOST, 96,
+ xd->allow_high_precision_mv);
+
+
+ // Use 4 for now : for (i = 1; i < MAX_MV_REFS; ++i ) {
+ for (i = 1; i < 4; ++i) {
+ // If we see a 0,0 reference vector for a second time we have reached
+ // the end of the list of valid candidate vectors.
+ if (!mv_ref_list[i].as_int)
+ if (zero_seen)
+ break;
+ else
+ zero_seen = TRUE;
+
+ // Check for cases where the reference choice would give rise to an
+ // uncodable/out of range residual for row or col.
+ if ((abs(target_mv.as_mv.row - mv_ref_list[i].as_mv.row) > max_mv) ||
+ (abs(target_mv.as_mv.col - mv_ref_list[i].as_mv.col) > max_mv)) {
+ continue;
+ }
+
+ cost2 = vp8_cost_mv_ref_id(xd->mb_mv_ref_id_probs[ref_frame], i) +
+ vp8_mv_bit_cost(&target_mv,
+ &mv_ref_list[i],
+ XMVCOST, 96,
+ xd->allow_high_precision_mv);
+
+ if (cost2 < cost) {
+ cost = cost2;
+ best_index = i;
+ }
+ }
+
+ (*best_ref).as_int = mv_ref_list[best_index].as_int;
+
+ return best_index;
+}
+#endif
+
// This function writes the current macro block's segnment id to the bitstream
// It should only be called if a segment map update is indicated.
static void write_mb_segid(vp8_writer *bc,
{
int_mv n1, n2;
+ // Only used for context just now and soon to be deprecated.
vp8_find_near_mvs(xd, m, prev_m, &n1, &n2, &best_mv, ct,
rf, cpi->common.ref_frame_sign_bias);
#if CONFIG_NEWBESTREFMV
- best_mv.as_int = mi->ref_mv.as_int;
+ best_mv.as_int = mi->ref_mvs[rf][0].as_int;
#endif
+
vp8_mv_ref_probs(&cpi->common, mv_ref_p, ct);
#ifdef ENTROPY_STATS
(mode == NEWMV || mode == SPLITMV)) {
int_mv n1, n2;
- vp8_find_near_mvs(xd, m,
- prev_m,
+ // Only used for context just now and soon to be deprecated.
+ vp8_find_near_mvs(xd, m, prev_m,
&n1, &n2, &best_second_mv, ct,
mi->second_ref_frame,
cpi->common.ref_frame_sign_bias);
+
#if CONFIG_NEWBESTREFMV
- best_second_mv.as_int = mi->second_ref_mv.as_int;
+ best_second_mv.as_int =
+ mi->ref_mvs[mi->second_ref_frame][0].as_int;
#endif
}
active_section = 5;
#endif
-#if 0 //CONFIG_NEW_MVREF
+#if CONFIG_NEW_MVREF
{
unsigned int best_index;
- /*find_mv_refs(xd, m, prev_m,
- m->mbmi.ref_frame,
- mi->ref_mvs[rf],
- cpi->common.ref_frame_sign_bias );*/
- best_index = pick_best_mv_ref(x, mi->mv[0],
+ // Choose the best mv reference
+ best_index = pick_best_mv_ref(x, rf, mi->mv[0],
mi->ref_mvs[rf], &best_mv);
- cpi->best_ref_index_counts[best_index]++;
+
+ // Encode the index of the choice.
+ vp8_write_mv_ref_id(bc,
+ xd->mb_mv_ref_id_probs[rf], best_index);
+
+ cpi->best_ref_index_counts[rf][best_index]++;
}
#endif
+
write_nmv(bc, &mi->mv[0].as_mv, &best_mv,
(const nmv_context*) nmvc,
xd->allow_high_precision_mv);
if (mi->second_ref_frame) {
-#if 0 //CONFIG_NEW_MVREF
+#if CONFIG_NEW_MVREF
unsigned int best_index;
-
- /*find_mv_refs(xd, m, prev_m,
- m->mbmi.second_ref_frame,
- mi->ref_mvs[mi->second_ref_frame],
- cpi->common.ref_frame_sign_bias );*/
+ MV_REFERENCE_FRAME sec_ref_frame = mi->second_ref_frame;
best_index =
- pick_best_mv_ref(x, mi->mv[1],
- mi->ref_mvs[mi->second_ref_frame],
+ pick_best_mv_ref(x, sec_ref_frame, mi->mv[1],
+ mi->ref_mvs[sec_ref_frame],
&best_second_mv);
- cpi->best_ref_index_counts[best_index]++;
+
+ // Encode the index of the choice.
+ vp8_write_mv_ref_id(bc,
+ xd->mb_mv_ref_id_probs[sec_ref_frame],
+ best_index);
+
+ cpi->best_ref_index_counts[sec_ref_frame][best_index]++;
#endif
write_nmv(bc, &mi->mv[1].as_mv, &best_second_mv,
(const nmv_context*) nmvc,
break;
}
}
+
+ // Update the mvcounts used to tune mv probs but only if this is
+ // the real pack run.
+ if ( !cpi->dummy_packing ) {
+ update_mvcount(cpi, x, &best_mv, &best_second_mv);
+ }
}
if (((rf == INTRA_FRAME && mode <= I8X8_PRED) ||
update_mbintra_mode_probs(cpi, &header_bc);
+#if CONFIG_NEW_MVREF
+ // Temp defaults probabilities for ecnoding the MV ref id signal
+ vpx_memset(xd->mb_mv_ref_id_probs, 192, sizeof(xd->mb_mv_ref_id_probs));
+#endif
+
vp8_write_nmvprobs(cpi, xd->allow_high_precision_mv, &header_bc);
}
PARTITION_INFO partition_info;
int_mv best_ref_mv;
int_mv second_best_ref_mv;
+#if CONFIG_NEWBESTREFMV || CONFIG_NEW_MVREF
+ int_mv ref_mvs[MAX_REF_FRAMES][MAX_MV_REFS];
+#endif
int rate;
int distortion;
int64_t intra_error;
// Note how often each mode chosen as best
cpi->mode_chosen_counts[mb_mode_index]++;
- rd_update_mvcount(cpi, x, &ctx->best_ref_mv, &ctx->second_best_ref_mv);
-
cpi->prediction_error += ctx->distortion;
cpi->intra_error += ctx->intra_error;
FILE *f = fopen("mv_ref_dist.stt", "a");
unsigned int i;
for (i = 0; i < MAX_MV_REFS; ++i) {
- fprintf(f, "%10d", cpi->best_ref_index_counts[i] );
+ fprintf(f, "%10d", cpi->best_ref_index_counts[0][i]);
}
fprintf(f, "\n" );
[VP8_SWITCHABLE_FILTERS];
#if CONFIG_NEW_MVREF
- unsigned int best_ref_index_counts[MAX_MV_REFS];
+ unsigned int best_ref_index_counts[MAX_REF_FRAMES][MAX_MV_REFS];
#endif
} VP8_COMP;
}
}
-void rd_update_mvcount(VP8_COMP *cpi, MACROBLOCK *x,
- int_mv *best_ref_mv, int_mv *second_best_ref_mv) {
- MB_MODE_INFO * mbmi = &x->e_mbd.mode_info_context->mbmi;
- MV mv;
-
- if (mbmi->mode == SPLITMV) {
- int i;
-
- for (i = 0; i < x->partition_info->count; i++) {
- if (x->partition_info->bmi[i].mode == NEW4X4) {
- if (x->e_mbd.allow_high_precision_mv) {
- mv.row = (x->partition_info->bmi[i].mv.as_mv.row
- - best_ref_mv->as_mv.row);
- mv.col = (x->partition_info->bmi[i].mv.as_mv.col
- - best_ref_mv->as_mv.col);
- vp8_increment_nmv(&mv, &best_ref_mv->as_mv, &cpi->NMVcount, 1);
- if (x->e_mbd.mode_info_context->mbmi.second_ref_frame) {
- mv.row = (x->partition_info->bmi[i].second_mv.as_mv.row
- - second_best_ref_mv->as_mv.row);
- mv.col = (x->partition_info->bmi[i].second_mv.as_mv.col
- - second_best_ref_mv->as_mv.col);
- vp8_increment_nmv(&mv, &second_best_ref_mv->as_mv,
- &cpi->NMVcount, 1);
- }
- } else {
- mv.row = (x->partition_info->bmi[i].mv.as_mv.row
- - best_ref_mv->as_mv.row);
- mv.col = (x->partition_info->bmi[i].mv.as_mv.col
- - best_ref_mv->as_mv.col);
- vp8_increment_nmv(&mv, &best_ref_mv->as_mv, &cpi->NMVcount, 0);
- if (x->e_mbd.mode_info_context->mbmi.second_ref_frame) {
- mv.row = (x->partition_info->bmi[i].second_mv.as_mv.row
- - second_best_ref_mv->as_mv.row);
- mv.col = (x->partition_info->bmi[i].second_mv.as_mv.col
- - second_best_ref_mv->as_mv.col);
- vp8_increment_nmv(&mv, &second_best_ref_mv->as_mv,
- &cpi->NMVcount, 0);
- }
- }
- }
- }
- } else if (mbmi->mode == NEWMV) {
- if (x->e_mbd.allow_high_precision_mv) {
- mv.row = (mbmi->mv[0].as_mv.row - best_ref_mv->as_mv.row);
- mv.col = (mbmi->mv[0].as_mv.col - best_ref_mv->as_mv.col);
- vp8_increment_nmv(&mv, &best_ref_mv->as_mv, &cpi->NMVcount, 1);
- if (mbmi->second_ref_frame) {
- mv.row = (mbmi->mv[1].as_mv.row - second_best_ref_mv->as_mv.row);
- mv.col = (mbmi->mv[1].as_mv.col - second_best_ref_mv->as_mv.col);
- vp8_increment_nmv(&mv, &second_best_ref_mv->as_mv, &cpi->NMVcount, 1);
- }
- } else {
- mv.row = (mbmi->mv[0].as_mv.row - best_ref_mv->as_mv.row);
- mv.col = (mbmi->mv[0].as_mv.col - best_ref_mv->as_mv.col);
- vp8_increment_nmv(&mv, &best_ref_mv->as_mv, &cpi->NMVcount, 0);
- if (mbmi->second_ref_frame) {
- mv.row = (mbmi->mv[1].as_mv.row - second_best_ref_mv->as_mv.row);
- mv.col = (mbmi->mv[1].as_mv.col - second_best_ref_mv->as_mv.col);
- vp8_increment_nmv(&mv, &second_best_ref_mv->as_mv, &cpi->NMVcount, 0);
- }
- }
- }
-}
-
static void set_i8x8_block_modes(MACROBLOCK *x, int modes[2][4]) {
int i;
MACROBLOCKD *xd = &x->e_mbd;
int recon_yoffset, int recon_uvoffset,
int_mv frame_nearest_mv[4], int_mv frame_near_mv[4],
int_mv frame_best_ref_mv[4],
-#if CONFIG_NEWBESTREFMV
- int_mv ref_mv[MAX_REF_FRAMES],
-#endif
int frame_mdcounts[4][4],
unsigned char *y_buffer[4], unsigned char *u_buffer[4],
unsigned char *v_buffer[4]) {
v_buffer[frame_type] = yv12->v_buffer + recon_uvoffset;
#if CONFIG_NEWBESTREFMV
- // Update stats on relative distance of chosen vector to the
- // possible best reference vectors.
- {
- find_mv_refs(xd, xd->mode_info_context,
- xd->prev_mode_info_context,
- frame_type,
- mbmi->ref_mvs[frame_type],
- cpi->common.ref_frame_sign_bias );
- }
+ find_mv_refs(xd, xd->mode_info_context,
+ xd->prev_mode_info_context,
+ frame_type,
+ mbmi->ref_mvs[frame_type],
+ cpi->common.ref_frame_sign_bias);
vp8_find_best_ref_mvs(xd, y_buffer[frame_type],
yv12->y_stride,
&frame_best_ref_mv[frame_type],
&frame_nearest_mv[frame_type],
&frame_near_mv[frame_type]);
- ref_mv[frame_type].as_int = frame_best_ref_mv[frame_type].as_int;
#endif
}
#if CONFIG_PRED_FILTER
int best_filter_state;
#endif
-#if CONFIG_NEWBESTREFMV
- int_mv ref_mv[MAX_REF_FRAMES] = {{0}};
-#endif
-
int switchable_filter_index = 0;
MB_PREDICTION_MODE uv_intra_mode;
setup_buffer_inter(cpi, x, cpi->common.lst_fb_idx, LAST_FRAME,
recon_yoffset, recon_uvoffset, frame_mv[NEARESTMV],
frame_mv[NEARMV], frame_best_ref_mv,
-#if CONFIG_NEWBESTREFMV
- ref_mv,
-#endif
frame_mdcounts, y_buffer, u_buffer, v_buffer);
}
setup_buffer_inter(cpi, x, cpi->common.gld_fb_idx, GOLDEN_FRAME,
recon_yoffset, recon_uvoffset, frame_mv[NEARESTMV],
frame_mv[NEARMV], frame_best_ref_mv,
-#if CONFIG_NEWBESTREFMV
- ref_mv,
-#endif
frame_mdcounts, y_buffer, u_buffer, v_buffer);
}
setup_buffer_inter(cpi, x, cpi->common.alt_fb_idx, ALTREF_FRAME,
recon_yoffset, recon_uvoffset, frame_mv[NEARESTMV],
frame_mv[NEARMV], frame_best_ref_mv,
-#if CONFIG_NEWBESTREFMV
- ref_mv,
-#endif
frame_mdcounts, y_buffer, u_buffer, v_buffer);
}
mbmi->ref_frame = vp8_mode_order[mode_index].ref_frame;
mbmi->second_ref_frame = vp8_mode_order[mode_index].second_ref_frame;
is_comp_pred = x->e_mbd.mode_info_context->mbmi.second_ref_frame != 0;
-#if CONFIG_NEWBESTREFMV
- mbmi->ref_mv = ref_mv[mbmi->ref_frame];
- mbmi->second_ref_mv = ref_mv[mbmi->second_ref_frame];
-#endif
#if CONFIG_PRED_FILTER
mbmi->pred_filter_enabled = 0;
#endif
// macroblock modes
vpx_memcpy(mbmi, &best_mbmode, sizeof(MB_MODE_INFO));
-#if CONFIG_NEWBESTREFMV
- mbmi->ref_mv = ref_mv[best_mbmode.ref_frame];
- mbmi->second_ref_mv = ref_mv[best_mbmode.second_ref_frame];
-#endif
if (best_mbmode.mode == B_PRED) {
for (i = 0; i < 16; i++) {
xd->mode_info_context->bmi[i].as_mode = best_bmodes[i].as_mode;
extern void vp8_cal_sad(VP8_COMP *cpi, MACROBLOCKD *xd, MACROBLOCK *x, int recon_yoffset, int near_sadidx[]);
extern void vp8_init_me_luts();
extern void vp8_set_mbmode_and_mvs(MACROBLOCK *x, MB_PREDICTION_MODE mb, int_mv *mv);
-extern void rd_update_mvcount(VP8_COMP *cpi,
- MACROBLOCK *x,
- int_mv *best_ref_mv,
- int_mv *second_best_ref_mv);
-
#endif