void vp10_set_high_precision_mv(VP10_COMP *cpi, int allow_high_precision_mv) {
MACROBLOCK *const mb = &cpi->td.mb;
cpi->common.allow_high_precision_mv = allow_high_precision_mv;
+
+#if CONFIG_REF_MV
+ if (cpi->common.allow_high_precision_mv) {
+ int i;
+ for (i = 0; i < NMV_CONTEXTS; ++i) {
+ mb->mv_cost_stack[i] = mb->nmvcost_hp[i];
+ mb->mvsadcost = mb->nmvsadcost_hp;
+ }
+ } else {
+ int i;
+ for (i = 0; i < NMV_CONTEXTS; ++i) {
+ mb->mv_cost_stack[i] = mb->nmvcost[i];
+ mb->mvsadcost = mb->nmvsadcost;
+ }
+ }
+#else
if (cpi->common.allow_high_precision_mv) {
mb->mvcost = mb->nmvcost_hp;
mb->mvsadcost = mb->nmvsadcost_hp;
mb->mvcost = mb->nmvcost;
mb->mvsadcost = mb->nmvsadcost;
}
+#endif
}
static void setup_frame(VP10_COMP *cpi) {
static void dealloc_compressor_data(VP10_COMP *cpi) {
VP10_COMMON *const cm = &cpi->common;
+#if CONFIG_REF_MV
+ int i;
+#endif
vpx_free(cpi->mbmi_ext_base);
cpi->mbmi_ext_base = NULL;
vpx_free(cpi->coding_context.last_frame_seg_map_copy);
cpi->coding_context.last_frame_seg_map_copy = NULL;
+#if CONFIG_REF_MV
+ for (i = 0; i < NMV_CONTEXTS; ++i) {
+ vpx_free(cpi->nmv_costs[i][0]);
+ vpx_free(cpi->nmv_costs[i][1]);
+ vpx_free(cpi->nmv_costs_hp[i][0]);
+ vpx_free(cpi->nmv_costs_hp[i][1]);
+ cpi->nmv_costs[i][0] = NULL;
+ cpi->nmv_costs[i][1] = NULL;
+ cpi->nmv_costs_hp[i][0] = NULL;
+ cpi->nmv_costs_hp[i][1] = NULL;
+ }
+#endif
+
vpx_free(cpi->nmvcosts[0]);
vpx_free(cpi->nmvcosts[1]);
cpi->nmvcosts[0] = NULL;
static void save_coding_context(VP10_COMP *cpi) {
CODING_CONTEXT *const cc = &cpi->coding_context;
VP10_COMMON *cm = &cpi->common;
+#if CONFIG_REF_MV
+ int i;
+#endif
// Stores a snapshot of key state variables which can subsequently be
// restored with a call to vp10_restore_coding_context. These functions are
// intended for use in a re-code loop in vp10_compress_frame where the
// quantizer value is adjusted between loop iterations.
+#if CONFIG_REF_MV
+ for (i = 0; i < NMV_CONTEXTS; ++i) {
+ vp10_copy(cc->nmv_vec_cost[i], cpi->td.mb.nmv_vec_cost[i]);
+ memcpy(cc->nmv_costs[i][0], cpi->nmv_costs[i][0],
+ MV_VALS * sizeof(*cpi->nmv_costs[i][0]));
+ memcpy(cc->nmv_costs[i][1], cpi->nmv_costs[i][1],
+ MV_VALS * sizeof(*cpi->nmv_costs[i][1]));
+ memcpy(cc->nmv_costs_hp[i][0], cpi->nmv_costs_hp[i][0],
+ MV_VALS * sizeof(*cpi->nmv_costs_hp[i][0]));
+ memcpy(cc->nmv_costs_hp[i][1], cpi->nmv_costs_hp[i][1],
+ MV_VALS * sizeof(*cpi->nmv_costs_hp[i][1]));
+ }
+#else
vp10_copy(cc->nmvjointcost, cpi->td.mb.nmvjointcost);
+#endif
memcpy(cc->nmvcosts[0], cpi->nmvcosts[0],
MV_VALS * sizeof(*cpi->nmvcosts[0]));
static void restore_coding_context(VP10_COMP *cpi) {
CODING_CONTEXT *const cc = &cpi->coding_context;
VP10_COMMON *cm = &cpi->common;
+#if CONFIG_REF_MV
+ int i;
+#endif
// Restore key state variables to the snapshot state stored in the
// previous call to vp10_save_coding_context.
+#if CONFIG_REF_MV
+ for (i = 0; i < NMV_CONTEXTS; ++i) {
+ vp10_copy(cpi->td.mb.nmv_vec_cost[i], cc->nmv_vec_cost[i]);
+ memcpy(cpi->nmv_costs[i][0], cc->nmv_costs[i][0],
+ MV_VALS * sizeof(*cc->nmv_costs[i][0]));
+ memcpy(cpi->nmv_costs[i][1], cc->nmv_costs[i][1],
+ MV_VALS * sizeof(*cc->nmv_costs[i][1]));
+ memcpy(cpi->nmv_costs_hp[i][0], cc->nmv_costs_hp[i][0],
+ MV_VALS * sizeof(*cc->nmv_costs_hp[i][0]));
+ memcpy(cpi->nmv_costs_hp[i][1], cc->nmv_costs_hp[i][1],
+ MV_VALS * sizeof(*cc->nmv_costs_hp[i][1]));
+ }
+#else
vp10_copy(cpi->td.mb.nmvjointcost, cc->nmvjointcost);
+#endif
memcpy(cpi->nmvcosts[0], cc->nmvcosts[0], MV_VALS * sizeof(*cc->nmvcosts[0]));
memcpy(cpi->nmvcosts[1], cc->nmvcosts[1], MV_VALS * sizeof(*cc->nmvcosts[1]));
realloc_segmentation_maps(cpi);
+#if CONFIG_REF_MV
+ for (i = 0; i < NMV_CONTEXTS; ++i) {
+ CHECK_MEM_ERROR(cm, cpi->nmv_costs[i][0],
+ vpx_calloc(MV_VALS, sizeof(*cpi->nmv_costs[i][0])));
+ CHECK_MEM_ERROR(cm, cpi->nmv_costs[i][1],
+ vpx_calloc(MV_VALS, sizeof(*cpi->nmv_costs[i][1])));
+ CHECK_MEM_ERROR(cm, cpi->nmv_costs_hp[i][0],
+ vpx_calloc(MV_VALS, sizeof(*cpi->nmv_costs_hp[i][0])));
+ CHECK_MEM_ERROR(cm, cpi->nmv_costs_hp[i][1],
+ vpx_calloc(MV_VALS, sizeof(*cpi->nmv_costs_hp[i][1])));
+ }
+#endif
+
CHECK_MEM_ERROR(cm, cpi->nmvcosts[0],
vpx_calloc(MV_VALS, sizeof(*cpi->nmvcosts[0])));
CHECK_MEM_ERROR(cm, cpi->nmvcosts[1],
cpi->first_time_stamp_ever = INT64_MAX;
cal_nmvjointsadcost(cpi->td.mb.nmvjointsadcost);
+#if CONFIG_REF_MV
+ for (i = 0; i < NMV_CONTEXTS; ++i) {
+ cpi->td.mb.nmvcost[i][0] = &cpi->nmv_costs[i][0][MV_MAX];
+ cpi->td.mb.nmvcost[i][1] = &cpi->nmv_costs[i][1][MV_MAX];
+ cpi->td.mb.nmvcost_hp[i][0] = &cpi->nmv_costs_hp[i][0][MV_MAX];
+ cpi->td.mb.nmvcost_hp[i][1] = &cpi->nmv_costs_hp[i][1][MV_MAX];
+ }
+#else
cpi->td.mb.nmvcost[0] = &cpi->nmvcosts[0][MV_MAX];
cpi->td.mb.nmvcost[1] = &cpi->nmvcosts[1][MV_MAX];
+ cpi->td.mb.nmvcost_hp[0] = &cpi->nmvcosts_hp[0][MV_MAX];
+ cpi->td.mb.nmvcost_hp[1] = &cpi->nmvcosts_hp[1][MV_MAX];
+#endif
cpi->td.mb.nmvsadcost[0] = &cpi->nmvsadcosts[0][MV_MAX];
cpi->td.mb.nmvsadcost[1] = &cpi->nmvsadcosts[1][MV_MAX];
cal_nmvsadcosts(cpi->td.mb.nmvsadcost);
- cpi->td.mb.nmvcost_hp[0] = &cpi->nmvcosts_hp[0][MV_MAX];
- cpi->td.mb.nmvcost_hp[1] = &cpi->nmvcosts_hp[1][MV_MAX];
cpi->td.mb.nmvsadcost_hp[0] = &cpi->nmvsadcosts_hp[0][MV_MAX];
cpi->td.mb.nmvsadcost_hp[1] = &cpi->nmvsadcosts_hp[1][MV_MAX];
cal_nmvsadcosts_hp(cpi->td.mb.nmvsadcost_hp);
}
}
+#if CONFIG_REF_MV
+void vp10_set_mvcost(MACROBLOCK *x, MV_REFERENCE_FRAME ref_frame) {
+ MB_MODE_INFO_EXT *mbmi_ext = x->mbmi_ext;
+ int nmv_ctx = vp10_nmv_ctx(mbmi_ext->ref_mv_count[ref_frame],
+ mbmi_ext->ref_mv_stack[ref_frame]);
+ x->mvcost = x->mv_cost_stack[nmv_ctx];
+ x->nmvjointcost = x->nmv_vec_cost[nmv_ctx];
+}
+#endif
+
void vp10_initialize_rd_consts(VP10_COMP *cpi) {
VP10_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->td.mb;
if (!frame_is_intra_only(cm)) {
#if CONFIG_REF_MV
- int nmv_ctx = 0;
- vp10_build_nmv_cost_table(x->nmvjointcost,
- cm->allow_high_precision_mv ? x->nmvcost_hp
- : x->nmvcost,
- &cm->fc->nmvc[nmv_ctx],
- cm->allow_high_precision_mv);
+ int nmv_ctx;
+ for (nmv_ctx = 0; nmv_ctx < NMV_CONTEXTS; ++nmv_ctx) {
+ vp10_build_nmv_cost_table(x->nmv_vec_cost[nmv_ctx],
+ cm->allow_high_precision_mv ?
+ x->nmvcost_hp[nmv_ctx] : x->nmvcost[nmv_ctx],
+ &cm->fc->nmvc[nmv_ctx],
+ cm->allow_high_precision_mv);
+ }
+ x->mvcost = x->mv_cost_stack[0];
+ x->nmvjointcost = x->nmv_vec_cost[0];
#else
vp10_build_nmv_cost_table(x->nmvjointcost,
cm->allow_high_precision_mv ? x->nmvcost_hp