int8_t angle_delta[2];
#endif // CONFIG_EXT_INTRA
- // TODO(slavarnway): Delete and use bmi[3].as_mv[] instead.
int_mv mv[2];
+ int_mv pred_mv[2];
} MB_MODE_INFO;
typedef struct MODE_INFO {
{ 10, 7, 6 }, // a/l both split
};
+#if CONFIG_REF_MV
+static const vpx_prob default_newmv_prob[NEWMV_MODE_CONTEXTS] = {
+ 230, 190, 150, 110, 70, 30,
+};
+
+static const vpx_prob default_zeromv_prob[ZEROMV_MODE_CONTEXTS] = {
+ 192, 64,
+};
+
+static const vpx_prob default_refmv_prob[REFMV_MODE_CONTEXTS] = {
+ 180, 230, 128
+};
+#endif
+
static const vpx_prob default_inter_mode_probs[INTER_MODE_CONTEXTS]
[INTER_MODES - 1] = {
{2, 173, 34}, // 0 = both zero mv
vp10_copy(fc->txfm_partition_prob, default_txfm_partition_probs);
#endif
vp10_copy(fc->skip_probs, default_skip_probs);
+#if CONFIG_REF_MV
+ vp10_copy(fc->newmv_prob, default_newmv_prob);
+ vp10_copy(fc->zeromv_prob, default_zeromv_prob);
+ vp10_copy(fc->refmv_prob, default_refmv_prob);
+#endif
vp10_copy(fc->inter_mode_probs, default_inter_mode_probs);
#if CONFIG_EXT_TX
vp10_copy(fc->inter_ext_tx_prob, default_inter_ext_tx_prob);
fc->single_ref_prob[i][j] = mode_mv_merge_probs(
pre_fc->single_ref_prob[i][j], counts->single_ref[i][j]);
+#if CONFIG_REF_MV
+ for (i = 0; i < NEWMV_MODE_CONTEXTS; ++i)
+ fc->newmv_prob[i] = mode_mv_merge_probs(pre_fc->newmv_prob[i],
+ counts->newmv_mode[i]);
+ for (i = 0; i < ZEROMV_MODE_CONTEXTS; ++i)
+ fc->zeromv_prob[i] = mode_mv_merge_probs(pre_fc->zeromv_prob[i],
+ counts->zeromv_mode[i]);
+ for (i = 0; i < REFMV_MODE_CONTEXTS; ++i)
+ fc->refmv_prob[i] = mode_mv_merge_probs(pre_fc->refmv_prob[i],
+ counts->refmv_mode[i]);
+#else
for (i = 0; i < INTER_MODE_CONTEXTS; i++)
vpx_tree_merge_probs(vp10_inter_mode_tree, pre_fc->inter_mode_probs[i],
counts->inter_mode[i], fc->inter_mode_probs[i]);
+#endif
for (i = 0; i < BLOCK_SIZE_GROUPS; i++)
vpx_tree_merge_probs(vp10_intra_mode_tree, pre_fc->y_mode_prob[i],
vp10_coeff_probs_model coef_probs[TX_SIZES][PLANE_TYPES];
vpx_prob switchable_interp_prob[SWITCHABLE_FILTER_CONTEXTS]
[SWITCHABLE_FILTERS - 1];
+
+#if CONFIG_REF_MV
+ vpx_prob newmv_prob[NEWMV_MODE_CONTEXTS];
+ vpx_prob zeromv_prob[ZEROMV_MODE_CONTEXTS];
+ vpx_prob refmv_prob[REFMV_MODE_CONTEXTS];
+#endif
+
vpx_prob inter_mode_probs[INTER_MODE_CONTEXTS][INTER_MODES - 1];
vpx_prob intra_inter_prob[INTRA_INTER_CONTEXTS];
vpx_prob comp_inter_prob[COMP_INTER_CONTEXTS];
[COEF_BANDS][COEFF_CONTEXTS];
unsigned int switchable_interp[SWITCHABLE_FILTER_CONTEXTS]
[SWITCHABLE_FILTERS];
+#if CONFIG_REF_MV
+ unsigned int newmv_mode[NEWMV_MODE_CONTEXTS][2];
+ unsigned int zeromv_mode[ZEROMV_MODE_CONTEXTS][2];
+ unsigned int refmv_mode[REFMV_MODE_CONTEXTS][2];
+#endif
+
unsigned int inter_mode[INTER_MODE_CONTEXTS][INTER_MODES];
unsigned int intra_inter[INTRA_INTER_CONTEXTS][2];
unsigned int comp_inter[COMP_INTER_CONTEXTS][2];
#define INTER_MODES (1 + NEWMV - NEARESTMV)
#define SKIP_CONTEXTS 3
+
+#if CONFIG_REF_MV
+#define NEWMV_MODE_CONTEXTS 6
+#define ZEROMV_MODE_CONTEXTS 2
+#define REFMV_MODE_CONTEXTS 3
+
+#define ZEROMV_OFFSET 3
+#define REFMV_OFFSET 4
+
+#define NEWMV_CTX_MASK ((1 << ZEROMV_OFFSET) - 1)
+#define ZEROMV_CTX_MASK ((1 << (REFMV_OFFSET - ZEROMV_OFFSET)) - 1)
+#endif
+
#define INTER_MODE_CONTEXTS 7
/* Segment Feature Masks */
#include "vp10/common/mvref_common.h"
#if CONFIG_REF_MV
-static void scan_row_mbmi(const VP10_COMMON *cm, const MACROBLOCKD *xd,
- const int mi_row, const int mi_col, int block,
- const MV_REFERENCE_FRAME ref_frame,
- int row_offset,
- CANDIDATE_MV *ref_mv_stack,
- uint8_t *refmv_count) {
+static uint8_t scan_row_mbmi(const VP10_COMMON *cm, const MACROBLOCKD *xd,
+ const int mi_row, const int mi_col, int block,
+ const MV_REFERENCE_FRAME ref_frame,
+ int row_offset,
+ CANDIDATE_MV *ref_mv_stack,
+ uint8_t *refmv_count) {
const TileInfo *const tile = &xd->tile;
int i;
+ uint8_t newmv_count = 0;
for (i = 0; i < xd->n8_w && *refmv_count < MAX_REF_MV_STACK_SIZE;) {
POSITION mi_pos;
ref_mv_stack[index].this_mv = this_refmv;
ref_mv_stack[index].weight = weight;
++(*refmv_count);
+
+ if (candidate->mode == NEWMV)
+ ++newmv_count;
}
}
}
++i;
}
}
+
+ return newmv_count;
}
-static void scan_col_mbmi(const VP10_COMMON *cm, const MACROBLOCKD *xd,
- const int mi_row, const int mi_col, int block,
- const MV_REFERENCE_FRAME ref_frame,
- int col_offset,
- CANDIDATE_MV *ref_mv_stack,
- uint8_t *refmv_count) {
+static uint8_t scan_col_mbmi(const VP10_COMMON *cm, const MACROBLOCKD *xd,
+ const int mi_row, const int mi_col, int block,
+ const MV_REFERENCE_FRAME ref_frame,
+ int col_offset,
+ CANDIDATE_MV *ref_mv_stack,
+ uint8_t *refmv_count) {
const TileInfo *const tile = &xd->tile;
int i;
+ uint8_t newmv_count = 0;
for (i = 0; i < xd->n8_h && *refmv_count < MAX_REF_MV_STACK_SIZE;) {
POSITION mi_pos;
ref_mv_stack[index].this_mv = this_refmv;
ref_mv_stack[index].weight = weight;
++(*refmv_count);
+
+ if (candidate->mode == NEWMV)
+ ++newmv_count;
}
}
}
-
i += len;
} else {
++i;
}
}
+
+ return newmv_count;
}
-static void scan_blk_mbmi(const VP10_COMMON *cm, const MACROBLOCKD *xd,
- const int mi_row, const int mi_col, int block,
- const MV_REFERENCE_FRAME ref_frame,
- int row_offset, int col_offset,
- CANDIDATE_MV *ref_mv_stack,
- uint8_t *refmv_count) {
+static uint8_t scan_blk_mbmi(const VP10_COMMON *cm, const MACROBLOCKD *xd,
+ const int mi_row, const int mi_col, int block,
+ const MV_REFERENCE_FRAME ref_frame,
+ int row_offset, int col_offset,
+ CANDIDATE_MV *ref_mv_stack,
+ uint8_t *refmv_count) {
const TileInfo *const tile = &xd->tile;
POSITION mi_pos;
+ uint8_t newmv_count = 0;
mi_pos.row = row_offset;
mi_pos.col = col_offset;
ref_mv_stack[index].this_mv = this_refmv;
ref_mv_stack[index].weight = weight;
++(*refmv_count);
+
+ if (candidate->mode == NEWMV)
+ ++newmv_count;
}
if (candidate_mi->mbmi.sb_type < BLOCK_8X8 && block >= 0) {
}
}
} // Analyze a single 8x8 block motion information.
+ return newmv_count;
}
static int has_top_right(const MACROBLOCKD *xd,
int block, int mi_row, int mi_col,
uint8_t *mode_context) {
int idx, nearest_refmv_count = 0;
+ uint8_t newmv_count = 0;
CANDIDATE_MV tmp_mv;
int len, nr_len;
- const MV_REF *const prev_frame_mvs = cm->use_prev_frame_mvs ?
+ const MV_REF *const prev_frame_mvs_base = cm->use_prev_frame_mvs ?
cm->prev_frame->mvs + mi_row * cm->mi_cols + mi_col : NULL;
int bs = VPXMAX(xd->n8_w, xd->n8_h);
int has_tr = has_top_right(xd, mi_row, mi_col, bs);
- (void) mode_context;
-
*refmv_count = 0;
// Scan the first above row mode info.
- scan_row_mbmi(cm, xd, mi_row, mi_col, block, ref_frame,
- -1, ref_mv_stack, refmv_count);
+ newmv_count = scan_row_mbmi(cm, xd, mi_row, mi_col, block, ref_frame,
+ -1, ref_mv_stack, refmv_count);
// Scan the first left column mode info.
- scan_col_mbmi(cm, xd, mi_row, mi_col, block, ref_frame,
- -1, ref_mv_stack, refmv_count);
+ newmv_count += scan_col_mbmi(cm, xd, mi_row, mi_col, block, ref_frame,
+ -1, ref_mv_stack, refmv_count);
// Check top-right boundary
if (has_tr)
- scan_blk_mbmi(cm, xd, mi_row, mi_col, block, ref_frame,
- -1, 1, ref_mv_stack, refmv_count);
+ newmv_count += scan_blk_mbmi(cm, xd, mi_row, mi_col, block, ref_frame,
+ -1, 1, ref_mv_stack, refmv_count);
nearest_refmv_count = *refmv_count;
- if (prev_frame_mvs && cm->show_frame && cm->last_show_frame) {
- int ref;
- for (ref = 0; ref < 2; ++ref) {
- if (prev_frame_mvs->ref_frame[ref] == ref_frame) {
- for (idx = 0; idx < nearest_refmv_count; ++idx)
- if (prev_frame_mvs->mv[ref].as_int ==
- ref_mv_stack[idx].this_mv.as_int)
- break;
+ mode_context[ref_frame] = 0;
+ switch (nearest_refmv_count) {
+ case 0:
+ mode_context[ref_frame] = 0;
+ break;
- if (idx == nearest_refmv_count &&
- nearest_refmv_count < MAX_REF_MV_STACK_SIZE) {
- ref_mv_stack[idx].this_mv.as_int = prev_frame_mvs->mv[ref].as_int;
- ref_mv_stack[idx].weight = 1;
- ++(*refmv_count);
- ++nearest_refmv_count;
+ case 1:
+ mode_context[ref_frame] = (newmv_count > 0) ? 1 : 2;
+ mode_context[ref_frame] += (1 << REFMV_OFFSET);
+ break;
+
+ case 2:
+ default:
+ if (newmv_count >= 2)
+ mode_context[ref_frame] = 3;
+ else if (newmv_count == 1)
+ mode_context[ref_frame] = 4;
+ else
+ mode_context[ref_frame] = 5;
+ mode_context[ref_frame] += (2 << REFMV_OFFSET);
+ break;
+ }
+
+ if (prev_frame_mvs_base && cm->show_frame && cm->last_show_frame) {
+ int ref;
+ int blk_row, blk_col;
+
+ for (blk_row = 0; blk_row < xd->n8_h; ++blk_row) {
+ for (blk_col = 0; blk_col < xd->n8_w; ++blk_col) {
+ const MV_REF *prev_frame_mvs =
+ prev_frame_mvs_base + blk_row * cm->mi_cols + blk_col;
+
+ POSITION mi_pos;
+ mi_pos.row = blk_row;
+ mi_pos.col = blk_col;
+
+ if (!is_inside(&xd->tile, mi_col, mi_row, cm->mi_rows, &mi_pos))
+ continue;
+
+ for (ref = 0; ref < 2; ++ref) {
+ if (prev_frame_mvs->ref_frame[ref] == ref_frame) {
+ for (idx = 0; idx < *refmv_count; ++idx)
+ if (prev_frame_mvs->mv[ref].as_int ==
+ ref_mv_stack[idx].this_mv.as_int)
+ break;
+
+ if (idx < *refmv_count)
+ ref_mv_stack[idx].weight += 1;
+
+ if (idx == *refmv_count &&
+ *refmv_count < MAX_REF_MV_STACK_SIZE) {
+ ref_mv_stack[idx].this_mv.as_int = prev_frame_mvs->mv[ref].as_int;
+ ref_mv_stack[idx].weight = 1;
+ ++(*refmv_count);
+
+ if (abs(ref_mv_stack[idx].this_mv.as_mv.row) >= 8 ||
+ abs(ref_mv_stack[idx].this_mv.as_mv.col) >= 8)
+ mode_context[ref_frame] |= (1 << ZEROMV_OFFSET);
+ }
+ }
}
}
}
}
+ if (*refmv_count == nearest_refmv_count)
+ mode_context[ref_frame] |= (1 << ZEROMV_OFFSET);
+
// Analyze the top-left corner block mode info.
// scan_blk_mbmi(cm, xd, mi_row, mi_col, block, ref_frame,
// -1, -1, ref_mv_stack, refmv_count);
find_mv_refs_idx(cm, xd, mi, mi->mbmi.ref_frame[ref], mv_list, block,
mi_row, mi_col, NULL, NULL, NULL);
-
#if CONFIG_REF_MV
scan_blk_mbmi(cm, xd, mi_row, mi_col, block, mi->mbmi.ref_frame[ref],
-1, 0, ref_mv_stack, &ref_mv_count);
for (j = 0; j < SWITCHABLE_FILTERS; j++)
cm->counts.switchable_interp[i][j] += counts->switchable_interp[i][j];
+#if CONFIG_REF_MV
+ for (i = 0; i < NEWMV_MODE_CONTEXTS; ++i)
+ for (j = 0; j < 2; ++j)
+ cm->counts.newmv_mode[i][j] += counts->newmv_mode[i][j];
+
+ for (i = 0; i < ZEROMV_MODE_CONTEXTS; ++i)
+ for (j = 0; j < 2; ++j)
+ cm->counts.zeromv_mode[i][j] += counts->zeromv_mode[i][j];
+
+ for (i = 0; i < REFMV_MODE_CONTEXTS; ++i)
+ for (j = 0; j < 2; ++j)
+ cm->counts.refmv_mode[i][j] += counts->refmv_mode[i][j];
+#endif
+
for (i = 0; i < INTER_MODE_CONTEXTS; i++)
for (j = 0; j < INTER_MODES; j++)
cm->counts.inter_mode[i][j] += counts->inter_mode[i][j];
}
static void read_inter_mode_probs(FRAME_CONTEXT *fc, vpx_reader *r) {
- int i, j;
+ int i;
+#if CONFIG_REF_MV
+ for (i = 0; i < NEWMV_MODE_CONTEXTS; ++i)
+ vp10_diff_update_prob(r, &fc->newmv_prob[i]);
+ for (i = 0; i < ZEROMV_MODE_CONTEXTS; ++i)
+ vp10_diff_update_prob(r, &fc->zeromv_prob[i]);
+ for (i = 0; i < REFMV_MODE_CONTEXTS; ++i)
+ vp10_diff_update_prob(r, &fc->refmv_prob[i]);
+#else
+ int j;
for (i = 0; i < INTER_MODE_CONTEXTS; ++i)
for (j = 0; j < INTER_MODES - 1; ++j)
vp10_diff_update_prob(r, &fc->inter_mode_probs[i][j]);
+#endif
}
static REFERENCE_MODE read_frame_reference_mode(const VP10_COMMON *cm,
}
static PREDICTION_MODE read_inter_mode(VP10_COMMON *cm, MACROBLOCKD *xd,
- vpx_reader *r, int ctx) {
+ vpx_reader *r, uint8_t ctx) {
+#if CONFIG_REF_MV
+ FRAME_COUNTS *counts = xd->counts;
+ uint8_t mode_ctx = ctx & NEWMV_CTX_MASK;
+ vpx_prob mode_prob = cm->fc->newmv_prob[mode_ctx];
+
+ if (vpx_read(r, mode_prob) == 0) {
+ if (counts)
+ ++counts->newmv_mode[mode_ctx][0];
+ return NEWMV;
+ }
+ if (counts)
+ ++counts->newmv_mode[mode_ctx][1];
+
+ mode_ctx = (ctx >> ZEROMV_OFFSET) & ZEROMV_CTX_MASK;
+
+ if (mode_ctx > 1)
+ assert(0);
+
+ mode_prob = cm->fc->zeromv_prob[mode_ctx];
+ if (vpx_read(r, mode_prob) == 0) {
+ if (counts)
+ ++counts->zeromv_mode[mode_ctx][0];
+ return ZEROMV;
+ }
+ if (counts)
+ ++counts->zeromv_mode[mode_ctx][1];
+
+ mode_ctx = (ctx >> REFMV_OFFSET);
+ mode_prob = cm->fc->refmv_prob[mode_ctx];
+ if (vpx_read(r, mode_prob) == 0) {
+ if (counts)
+ ++counts->refmv_mode[mode_ctx][0];
+ return NEARESTMV;
+ } else {
+ if (counts)
+ ++counts->refmv_mode[mode_ctx][1];
+ return NEARMV;
+ }
+
+ // Invalid prediction mode.
+ assert(0);
+#else
const int mode = vpx_read_tree(r, vp10_inter_mode_tree,
cm->fc->inter_mode_probs[ctx]);
FRAME_COUNTS *counts = xd->counts;
++counts->inter_mode[ctx][mode];
return NEARESTMV + mode;
+#endif
}
static int read_segment_id(vpx_reader *r,
#endif // CONFIG_EXT_INTERP && SWITCHABLE_FILTERS == 4
static const struct vp10_token partition_encodings[PARTITION_TYPES] =
{{0, 1}, {2, 2}, {6, 3}, {7, 3}};
+#if !CONFIG_REF_MV
static const struct vp10_token inter_mode_encodings[INTER_MODES] =
{{2, 2}, {6, 3}, {0, 1}, {7, 3}};
+#endif
static const struct vp10_token palette_size_encodings[] = {
{0, 1}, {2, 2}, {6, 3}, {14, 4}, {30, 5}, {62, 6}, {63, 6},
};
vp10_write_token(w, vp10_intra_mode_tree, probs, &intra_mode_encodings[mode]);
}
-static void write_inter_mode(vpx_writer *w, PREDICTION_MODE mode,
- const vpx_prob *probs) {
+static void write_inter_mode(VP10_COMMON *cm,
+ vpx_writer *w, PREDICTION_MODE mode,
+ const uint8_t mode_ctx) {
+#if CONFIG_REF_MV
+ const uint8_t newmv_ctx = mode_ctx & NEWMV_CTX_MASK;
+ const vpx_prob newmv_prob = cm->fc->newmv_prob[newmv_ctx];
+ vpx_write(w, mode != NEWMV, newmv_prob);
+
+ if (mode != NEWMV) {
+ const uint8_t zeromv_ctx = (mode_ctx >> ZEROMV_OFFSET) & ZEROMV_CTX_MASK;
+ const vpx_prob zeromv_prob = cm->fc->zeromv_prob[zeromv_ctx];
+ vpx_write(w, mode != ZEROMV, zeromv_prob);
+
+ if (mode != ZEROMV) {
+ const uint8_t refmv_ctx = (mode_ctx >> REFMV_OFFSET);
+ const vpx_prob refmv_prob = cm->fc->refmv_prob[refmv_ctx];
+ vpx_write(w, mode != NEARESTMV, refmv_prob);
+ }
+ }
+#else
+ const vpx_prob *const inter_probs = cm->fc->inter_mode_probs[mode_ctx];
assert(is_inter_mode(mode));
- vp10_write_token(w, vp10_inter_mode_tree, probs,
+ vp10_write_token(w, vp10_inter_mode_tree, inter_probs,
&inter_mode_encodings[INTER_OFFSET(mode)]);
+#endif
}
static void encode_unsigned_max(struct vpx_write_bit_buffer *wb,
}
}
+#if CONFIG_REF_MV
+static void update_inter_mode_probs(VP10_COMMON *cm, vpx_writer *w,
+ FRAME_COUNTS *counts) {
+ int i;
+ for (i = 0; i < NEWMV_MODE_CONTEXTS; ++i)
+ vp10_cond_prob_diff_update(w, &cm->fc->newmv_prob[i],
+ counts->newmv_mode[i]);
+ for (i = 0; i < ZEROMV_MODE_CONTEXTS; ++i)
+ vp10_cond_prob_diff_update(w, &cm->fc->zeromv_prob[i],
+ counts->zeromv_mode[i]);
+ for (i = 0; i < REFMV_MODE_CONTEXTS; ++i)
+ vp10_cond_prob_diff_update(w, &cm->fc->refmv_prob[i],
+ counts->refmv_mode[i]);
+}
+#endif
+
static int write_skip(const VP10_COMMON *cm, const MACROBLOCKD *xd,
int segment_id, const MODE_INFO *mi, vpx_writer *w) {
if (segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)) {
#endif // CONFIG_EXT_INTRA
} else {
const int mode_ctx = mbmi_ext->mode_context[mbmi->ref_frame[0]];
- const vpx_prob *const inter_probs = cm->fc->inter_mode_probs[mode_ctx];
write_ref_frames(cm, xd, w);
// If segment skip is not enabled code the mode.
if (!segfeature_active(seg, segment_id, SEG_LVL_SKIP)) {
if (bsize >= BLOCK_8X8) {
- write_inter_mode(w, mode, inter_probs);
+ write_inter_mode(cm, w, mode, mode_ctx);
}
}
for (idx = 0; idx < 2; idx += num_4x4_w) {
const int j = idy * 2 + idx;
const PREDICTION_MODE b_mode = mi->bmi[j].as_mode;
- write_inter_mode(w, b_mode, inter_probs);
+ write_inter_mode(cm, w, b_mode, mode_ctx);
if (b_mode == NEWMV) {
for (ref = 0; ref < 1 + is_compound; ++ref)
vp10_encode_mv(cpi, w, &mi->bmi[j].as_mv[ref].as_mv,
prob_diff_update(vp10_intra_mode_tree, cm->kf_y_prob[i][j],
counts->kf_y_mode[i][j], INTRA_MODES, &header_bc);
} else {
+#if CONFIG_REF_MV
+ update_inter_mode_probs(cm, &header_bc, counts);
+#else
for (i = 0; i < INTER_MODE_CONTEXTS; ++i)
prob_diff_update(vp10_inter_mode_tree, cm->fc->inter_mode_probs[i],
counts->inter_mode[i], INTER_MODES, &header_bc);
+#endif
if (cm->interp_filter == SWITCHABLE)
update_switchable_interp_probs(cm, &header_bc, counts);
ctx->dist = rd_cost->dist;
}
+#if CONFIG_REF_MV
+static void update_inter_mode_stats(FRAME_COUNTS *counts,
+ PREDICTION_MODE mode,
+ uint8_t mode_context) {
+ uint8_t mode_ctx = mode_context & NEWMV_CTX_MASK;
+ if (mode == NEWMV) {
+ ++counts->newmv_mode[mode_ctx][0];
+ return;
+ } else {
+ ++counts->newmv_mode[mode_ctx][1];
+ mode_ctx = (mode_context >> ZEROMV_OFFSET) & ZEROMV_CTX_MASK;
+ if (mode == ZEROMV) {
+ ++counts->zeromv_mode[mode_ctx][0];
+ return;
+ } else {
+ ++counts->zeromv_mode[mode_ctx][1];
+ mode_ctx = (mode_context >> REFMV_OFFSET);
+ ++counts->refmv_mode[mode_ctx][mode != NEARESTMV];
+ }
+ }
+}
+#endif
+
static void update_stats(VP10_COMMON *cm, ThreadData *td) {
const MACROBLOCK *x = &td->mb;
const MACROBLOCKD *const xd = &x->e_mbd;
const int mode_ctx = mbmi_ext->mode_context[mbmi->ref_frame[0]];
if (bsize >= BLOCK_8X8) {
const PREDICTION_MODE mode = mbmi->mode;
+#if CONFIG_REF_MV
+ update_inter_mode_stats(counts, mode, mode_ctx);
+#else
++counts->inter_mode[mode_ctx][INTER_OFFSET(mode)];
+#endif
} else {
const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
for (idx = 0; idx < 2; idx += num_4x4_w) {
const int j = idy * 2 + idx;
const PREDICTION_MODE b_mode = mi->bmi[j].as_mode;
+#if CONFIG_REF_MV
+ update_inter_mode_stats(counts, b_mode, mode_ctx);
+#else
++counts->inter_mode[mode_ctx][INTER_OFFSET(b_mode)];
+#endif
}
}
}
search_site_config ss_cfg;
int mbmode_cost[INTRA_MODES];
+#if CONFIG_REF_MV
+ int newmv_mode_cost[NEWMV_MODE_CONTEXTS][2];
+ int zeromv_mode_cost[ZEROMV_MODE_CONTEXTS][2];
+ int refmv_mode_cost[REFMV_MODE_CONTEXTS][2];
+#endif
+
unsigned int inter_mode_cost[INTER_MODE_CONTEXTS][INTER_MODES];
int intra_uv_mode_cost[INTRA_MODES][INTRA_MODES];
int y_mode_costs[INTRA_MODES][INTRA_MODES][INTRA_MODES];
cm->allow_high_precision_mv ? x->nmvcost_hp
: x->nmvcost,
&cm->fc->nmvc, cm->allow_high_precision_mv);
+#if CONFIG_REF_MV
+ for (i = 0; i < NEWMV_MODE_CONTEXTS; ++i) {
+ cpi->newmv_mode_cost[i][0] = vp10_cost_bit(cm->fc->newmv_prob[i], 0);
+ cpi->newmv_mode_cost[i][1] = vp10_cost_bit(cm->fc->newmv_prob[i], 1);
+ }
+
+ for (i = 0; i < ZEROMV_MODE_CONTEXTS; ++i) {
+ cpi->zeromv_mode_cost[i][0] = vp10_cost_bit(cm->fc->zeromv_prob[i], 0);
+ cpi->zeromv_mode_cost[i][1] = vp10_cost_bit(cm->fc->zeromv_prob[i], 1);
+ }
+ for (i = 0; i < REFMV_MODE_CONTEXTS; ++i) {
+ cpi->refmv_mode_cost[i][0] = vp10_cost_bit(cm->fc->refmv_prob[i], 0);
+ cpi->refmv_mode_cost[i][1] = vp10_cost_bit(cm->fc->refmv_prob[i], 1);
+ }
+#else
for (i = 0; i < INTER_MODE_CONTEXTS; ++i)
vp10_cost_tokens((int *)cpi->inter_mode_cost[i],
cm->fc->inter_mode_probs[i], vp10_inter_mode_tree);
+#endif
}
}
}
static int cost_mv_ref(const VP10_COMP *cpi, PREDICTION_MODE mode,
- int mode_context) {
+ uint8_t mode_context) {
+#if CONFIG_REF_MV
+ int mode_cost = 0;
+ uint8_t mode_ctx = mode_context & NEWMV_CTX_MASK;
+
+ assert(is_inter_mode(mode));
+
+ if (mode == NEWMV) {
+ mode_cost = cpi->newmv_mode_cost[mode_ctx][0];
+ return mode_cost;
+ } else {
+ mode_cost = cpi->newmv_mode_cost[mode_ctx][1];
+ mode_ctx = (mode_context >> ZEROMV_OFFSET) & ZEROMV_CTX_MASK;
+ if (mode == ZEROMV) {
+ mode_cost += cpi->zeromv_mode_cost[mode_ctx][0];
+ return mode_cost;
+ } else {
+ mode_cost += cpi->zeromv_mode_cost[mode_ctx][1];
+ mode_ctx = (mode_context >> REFMV_OFFSET);
+ mode_cost += cpi->refmv_mode_cost[mode_ctx][mode != NEARESTMV];
+ return mode_cost;
+ }
+ }
+#else
assert(is_inter_mode(mode));
return cpi->inter_mode_cost[mode_context][INTER_OFFSET(mode)];
+#endif
}
static int set_and_cost_bmi_mvs(VP10_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *xd,