#if CONFIG_REF_MV
uint8_t ref_mv_count[MAX_REF_FRAMES];
CANDIDATE_MV ref_mv_stack[MAX_REF_FRAMES][MAX_REF_MV_STACK_SIZE];
+ uint8_t is_sec_rect;
#endif
#if CONFIG_VP9_HIGHBITDEPTH
};
static const vpx_prob default_refmv_prob[REFMV_MODE_CONTEXTS] = {
- 220, 220, 200, 200, 180, 128,
+ 220, 220, 200, 200, 180, 128, 1, 250,
};
#endif
#if CONFIG_REF_MV
#define NEWMV_MODE_CONTEXTS 7
#define ZEROMV_MODE_CONTEXTS 2
-#define REFMV_MODE_CONTEXTS 6
+#define REFMV_MODE_CONTEXTS 8
#define ZEROMV_OFFSET 3
#define REFMV_OFFSET 4
#define NEWMV_CTX_MASK ((1 << ZEROMV_OFFSET) - 1)
#define ZEROMV_CTX_MASK ((1 << (REFMV_OFFSET - ZEROMV_OFFSET)) - 1)
+#define REFMV_CTX_MASK ((1 << (8 - REFMV_OFFSET)) - 1)
+
+#define ALL_ZERO_FLAG_OFFSET 8
+#define SKIP_NEARESTMV_OFFSET 9
+#define SKIP_NEARMV_OFFSET 10
#endif
#define INTER_MODE_CONTEXTS 7
static int has_top_right(const MACROBLOCKD *xd,
int mi_row, int mi_col, int bs) {
- int is_second_rect = 0;
int has_tr = !((mi_row & bs) & (bs * 2 - 1)) ||
!((mi_col & bs) & (bs * 2 - 1));
if ((mi_row & 0x07) > 0)
has_tr = 0;
- if (xd->n8_w < xd->n8_h) {
- if (mi_col & (xd->n8_h - 1))
- is_second_rect = 1;
-
- if (!is_second_rect)
+ if (xd->n8_w < xd->n8_h)
+ if (!xd->is_sec_rect)
has_tr = 1;
- }
- if (xd->n8_w > xd->n8_h) {
- if (mi_row & (xd->n8_w - 1))
- is_second_rect = 1;
-
- if (is_second_rect)
+ if (xd->n8_w > xd->n8_h)
+ if (xd->is_sec_rect)
has_tr = 0;
- }
+
return has_tr;
}
+static void handle_sec_rect_block(const MB_MODE_INFO * const candidate,
+ uint8_t *refmv_count,
+ CANDIDATE_MV *ref_mv_stack,
+ MV_REFERENCE_FRAME ref_frame,
+ int16_t *mode_context) {
+ int rf, idx;
+
+ for (rf = 0; rf < 2; ++rf) {
+ if (candidate->ref_frame[rf] == ref_frame) {
+ const int list_range = VPXMIN(*refmv_count, MAX_MV_REF_CANDIDATES);
+
+ const int_mv pred_mv = candidate->mv[rf];
+ for (idx = 0; idx < list_range; ++idx)
+ if (pred_mv.as_int == ref_mv_stack[idx].this_mv.as_int)
+ break;
+
+ if (idx < list_range) {
+ mode_context[ref_frame] &= ~(0x0f << REFMV_OFFSET);
+
+ if (idx == 0) {
+ mode_context[ref_frame] |= (1 << SKIP_NEARESTMV_OFFSET);
+ mode_context[ref_frame] |= (6 << REFMV_OFFSET);
+ } else if (idx == 1) {
+ mode_context[ref_frame] |= (1 << SKIP_NEARMV_OFFSET);
+ mode_context[ref_frame] |= (7 << REFMV_OFFSET);
+ }
+ }
+ }
+ }
+}
+
static void setup_ref_mv_list(const VP10_COMMON *cm, const MACROBLOCKD *xd,
MV_REFERENCE_FRAME ref_frame,
uint8_t *refmv_count,
CANDIDATE_MV *ref_mv_stack,
int_mv *mv_ref_list,
int block, int mi_row, int mi_col,
- uint8_t *mode_context) {
+ int16_t *mode_context) {
int idx, nearest_refmv_count = 0;
uint8_t newmv_count = 0;
len = nr_len;
}
+ // TODO(jingning): Clean-up needed.
+ if (xd->is_sec_rect) {
+ if (xd->n8_w < xd->n8_h) {
+ const MODE_INFO *const candidate_mi = xd->mi[-1];
+ const MB_MODE_INFO *const candidate = &candidate_mi->mbmi;
+ handle_sec_rect_block(candidate, refmv_count, ref_mv_stack,
+ ref_frame, mode_context);
+ }
+
+ if (xd->n8_w > xd->n8_h) {
+ const MODE_INFO *const candidate_mi = xd->mi[-xd->mi_stride];
+ const MB_MODE_INFO *const candidate = &candidate_mi->mbmi;
+ handle_sec_rect_block(candidate, refmv_count, ref_mv_stack,
+ ref_frame, mode_context);
+ }
+ }
+
for (idx = 0; idx < VPXMIN(MAX_MV_REF_CANDIDATES, *refmv_count); ++idx) {
mv_ref_list[idx].as_int = ref_mv_stack[idx].this_mv.as_int;
clamp_mv_ref(&mv_ref_list[idx].as_mv,
int_mv *mv_ref_list,
int block, int mi_row, int mi_col,
find_mv_refs_sync sync, void *const data,
- uint8_t *mode_context) {
+ int16_t *mode_context) {
const int *ref_sign_bias = cm->ref_frame_sign_bias;
int i, refmv_count = 0;
const POSITION *const mv_ref_search = mv_ref_blocks[mi->mbmi.sb_type];
int_mv *mv_ref_list,
int mi_row, int mi_col,
find_mv_refs_sync sync, void *const data,
- uint8_t *mode_context) {
+ int16_t *mode_context) {
+#if CONFIG_REF_MV
+ int idx, all_zero = 1;
+#endif
find_mv_refs_idx(cm, xd, mi, ref_frame, mv_ref_list, -1,
mi_row, mi_col, sync, data, mode_context);
#if CONFIG_REF_MV
setup_ref_mv_list(cm, xd, ref_frame, ref_mv_count, ref_mv_stack,
mv_ref_list, -1, mi_row, mi_col, mode_context);
+
+ for (idx = 0; idx < MAX_MV_REF_CANDIDATES; ++idx)
+ if (mv_ref_list[idx].as_int != 0)
+ all_zero = 0;
+
+ if (all_zero)
+ mode_context[ref_frame] |= (1 << ALL_ZERO_FLAG_OFFSET);
#endif
}
#endif
int_mv *mv_ref_list, int mi_row, int mi_col,
find_mv_refs_sync sync, void *const data,
- uint8_t *mode_context);
+ int16_t *mode_context);
// check a list of motion vectors by sad score using a number rows of pixels
// above and a number cols of pixels in the left to select the one with best
xd->n8_h = bh;
xd->n8_w = bw;
+#if CONFIG_REF_MV
+ xd->is_sec_rect = 0;
+ if (xd->n8_w < xd->n8_h)
+ if (mi_col & (xd->n8_h - 1))
+ xd->is_sec_rect = 1;
+
+ if (xd->n8_w > xd->n8_h)
+ if (mi_row & (xd->n8_w - 1))
+ xd->is_sec_rect = 1;
+#endif
}
static INLINE const vpx_prob *get_y_mode_probs(const VP10_COMMON *cm,
}
static PREDICTION_MODE read_inter_mode(VP10_COMMON *cm, MACROBLOCKD *xd,
- vpx_reader *r, uint8_t ctx) {
+ vpx_reader *r, int16_t ctx) {
#if CONFIG_REF_MV
FRAME_COUNTS *counts = xd->counts;
- uint8_t mode_ctx = ctx & NEWMV_CTX_MASK;
+ int16_t mode_ctx = ctx & NEWMV_CTX_MASK;
vpx_prob mode_prob = cm->fc->newmv_prob[mode_ctx];
if (vpx_read(r, mode_prob) == 0) {
if (counts)
++counts->newmv_mode[mode_ctx][1];
- mode_ctx = (ctx >> ZEROMV_OFFSET) & ZEROMV_CTX_MASK;
+ if (ctx & (1 << ALL_ZERO_FLAG_OFFSET))
+ return ZEROMV;
- if (mode_ctx > 1)
- assert(0);
+ mode_ctx = (ctx >> ZEROMV_OFFSET) & ZEROMV_CTX_MASK;
mode_prob = cm->fc->zeromv_prob[mode_ctx];
if (vpx_read(r, mode_prob) == 0) {
if (counts)
++counts->zeromv_mode[mode_ctx][1];
- mode_ctx = (ctx >> REFMV_OFFSET);
+ mode_ctx = (ctx >> REFMV_OFFSET) & REFMV_CTX_MASK;
mode_prob = cm->fc->refmv_prob[mode_ctx];
+
if (vpx_read(r, mode_prob) == 0) {
if (counts)
++counts->refmv_mode[mode_ctx][0];
+
return NEARESTMV;
} else {
if (counts)
int_mv nearestmv[2], nearmv[2];
int_mv ref_mvs[MAX_REF_FRAMES][MAX_MV_REF_CANDIDATES];
int ref, is_compound;
- uint8_t inter_mode_ctx[MAX_REF_FRAMES];
+ int16_t inter_mode_ctx[MAX_REF_FRAMES];
+ int16_t mode_ctx = 0;
read_ref_frames(cm, xd, r, mbmi->segment_id, mbmi->ref_frame);
is_compound = has_second_ref(mbmi);
mi_row, mi_col, fpm_sync, (void *)pbi, inter_mode_ctx);
}
+ mode_ctx = inter_mode_ctx[mbmi->ref_frame[0]];
+
+#if CONFIG_REF_MV
+ if (mbmi->ref_frame[1] > NONE)
+ mode_ctx &= (inter_mode_ctx[mbmi->ref_frame[1]] | 0x00ff);
+ if (bsize < BLOCK_8X8)
+ mode_ctx &= 0x00ff;
+#endif
+
if (segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
mbmi->mode = ZEROMV;
if (bsize < BLOCK_8X8) {
}
} else {
if (bsize >= BLOCK_8X8)
- mbmi->mode = read_inter_mode(cm, xd, r,
- inter_mode_ctx[mbmi->ref_frame[0]]);
+ mbmi->mode = read_inter_mode(cm, xd, r, mode_ctx);
}
if (bsize < BLOCK_8X8 || mbmi->mode != ZEROMV) {
for (idx = 0; idx < 2; idx += num_4x4_w) {
int_mv block[2];
const int j = idy * 2 + idx;
- b_mode = read_inter_mode(cm, xd, r, inter_mode_ctx[mbmi->ref_frame[0]]);
+ b_mode = read_inter_mode(cm, xd, r, mode_ctx);
if (b_mode == NEARESTMV || b_mode == NEARMV) {
for (ref = 0; ref < 1 + is_compound; ++ref)
static void write_inter_mode(VP10_COMMON *cm,
vpx_writer *w, PREDICTION_MODE mode,
- const uint8_t mode_ctx) {
+ const int16_t mode_ctx) {
#if CONFIG_REF_MV
- const uint8_t newmv_ctx = mode_ctx & NEWMV_CTX_MASK;
+ const int16_t newmv_ctx = mode_ctx & NEWMV_CTX_MASK;
const vpx_prob newmv_prob = cm->fc->newmv_prob[newmv_ctx];
vpx_write(w, mode != NEWMV, newmv_prob);
if (mode != NEWMV) {
- const uint8_t zeromv_ctx = (mode_ctx >> ZEROMV_OFFSET) & ZEROMV_CTX_MASK;
+ const int16_t zeromv_ctx = (mode_ctx >> ZEROMV_OFFSET) & ZEROMV_CTX_MASK;
const vpx_prob zeromv_prob = cm->fc->zeromv_prob[zeromv_ctx];
+
+ if (mode_ctx & (1 << ALL_ZERO_FLAG_OFFSET)) {
+ assert(mode == ZEROMV);
+ return;
+ }
+
vpx_write(w, mode != ZEROMV, zeromv_prob);
if (mode != ZEROMV) {
- const uint8_t refmv_ctx = (mode_ctx >> REFMV_OFFSET);
+ const int16_t refmv_ctx = (mode_ctx >> REFMV_OFFSET) & REFMV_CTX_MASK;
const vpx_prob refmv_prob = cm->fc->refmv_prob[refmv_ctx];
vpx_write(w, mode != NEARESTMV, refmv_prob);
}
write_ext_intra_mode_info(cm, mbmi, w);
#endif // CONFIG_EXT_INTRA
} else {
- const int mode_ctx = mbmi_ext->mode_context[mbmi->ref_frame[0]];
+ int16_t mode_ctx = mbmi_ext->mode_context[mbmi->ref_frame[0]];
write_ref_frames(cm, xd, w);
+#if CONFIG_REF_MV
+ if (mbmi->ref_frame[1] > NONE)
+ mode_ctx &= (mbmi_ext->mode_context[mbmi->ref_frame[1]] | 0x00ff);
+ if (bsize < BLOCK_8X8)
+ mode_ctx &= 0x00ff;
+#endif
+
// If segment skip is not enabled code the mode.
if (!segfeature_active(seg, segment_id, SEG_LVL_SKIP)) {
if (bsize >= BLOCK_8X8) {
typedef struct {
int_mv ref_mvs[MAX_REF_FRAMES][MAX_MV_REF_CANDIDATES];
- uint8_t mode_context[MAX_REF_FRAMES];
+ int16_t mode_context[MAX_REF_FRAMES];
#if CONFIG_REF_MV
uint8_t ref_mv_count[MAX_REF_FRAMES];
CANDIDATE_MV ref_mv_stack[MAX_REF_FRAMES][MAX_REF_MV_STACK_SIZE];
#if CONFIG_REF_MV
static void update_inter_mode_stats(FRAME_COUNTS *counts,
PREDICTION_MODE mode,
- uint8_t mode_context) {
- uint8_t mode_ctx = mode_context & NEWMV_CTX_MASK;
+ int16_t mode_context) {
+ int16_t mode_ctx = mode_context & NEWMV_CTX_MASK;
if (mode == NEWMV) {
++counts->newmv_mode[mode_ctx][0];
return;
} else {
++counts->newmv_mode[mode_ctx][1];
+
+ if (mode_context & (1 << ALL_ZERO_FLAG_OFFSET)) {
+ return;
+ }
+
mode_ctx = (mode_context >> ZEROMV_OFFSET) & ZEROMV_CTX_MASK;
if (mode == ZEROMV) {
++counts->zeromv_mode[mode_ctx][0];
return;
} else {
++counts->zeromv_mode[mode_ctx][1];
- mode_ctx = (mode_context >> REFMV_OFFSET);
+ mode_ctx = (mode_context >> REFMV_OFFSET) & REFMV_CTX_MASK;
++counts->refmv_mode[mode_ctx][mode != NEARESTMV];
}
}
}
if (inter_block &&
!segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
- const int mode_ctx = mbmi_ext->mode_context[mbmi->ref_frame[0]];
+ int16_t mode_ctx = mbmi_ext->mode_context[mbmi->ref_frame[0]];
if (bsize >= BLOCK_8X8) {
const PREDICTION_MODE mode = mbmi->mode;
#if CONFIG_REF_MV
+ if (mbmi->ref_frame[1] > NONE)
+ mode_ctx &= (mbmi_ext->mode_context[mbmi->ref_frame[1]] | 0x00ff);
update_inter_mode_stats(counts, mode, mode_ctx);
#else
++counts->inter_mode[mode_ctx][INTER_OFFSET(mode)];
const int j = idy * 2 + idx;
const PREDICTION_MODE b_mode = mi->bmi[j].as_mode;
#if CONFIG_REF_MV
+ mode_ctx &= 0x00ff;
update_inter_mode_stats(counts, b_mode, mode_ctx);
#else
++counts->inter_mode[mode_ctx][INTER_OFFSET(b_mode)];
}
static int cost_mv_ref(const VP10_COMP *cpi, PREDICTION_MODE mode,
- uint8_t mode_context) {
+ int16_t mode_context) {
#if CONFIG_REF_MV
int mode_cost = 0;
- uint8_t mode_ctx = mode_context & NEWMV_CTX_MASK;
+ int16_t mode_ctx = mode_context & NEWMV_CTX_MASK;
+ int16_t is_all_zero_mv = mode_context & (1 << ALL_ZERO_FLAG_OFFSET);
assert(is_inter_mode(mode));
} else {
mode_cost = cpi->newmv_mode_cost[mode_ctx][1];
mode_ctx = (mode_context >> ZEROMV_OFFSET) & ZEROMV_CTX_MASK;
+
+ if (is_all_zero_mv)
+ return mode_cost;
+
if (mode == ZEROMV) {
mode_cost += cpi->zeromv_mode_cost[mode_ctx][0];
return mode_cost;
} else {
mode_cost += cpi->zeromv_mode_cost[mode_ctx][1];
- mode_ctx = (mode_context >> REFMV_OFFSET);
+ mode_ctx = (mode_context >> REFMV_OFFSET) & REFMV_CTX_MASK;
mode_cost += cpi->refmv_mode_cost[mode_ctx][mode != NEARESTMV];
return mode_cost;
}
const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[mbmi->sb_type];
const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[mbmi->sb_type];
const int is_compound = has_second_ref(mbmi);
+ int mode_ctx = mbmi_ext->mode_context[mbmi->ref_frame[0]];
switch (mode) {
case NEWMV:
for (idx = 0; idx < num_4x4_blocks_wide; ++idx)
memmove(&mic->bmi[i + idy * 2 + idx], &mic->bmi[i], sizeof(mic->bmi[i]));
- return cost_mv_ref(cpi, mode, mbmi_ext->mode_context[mbmi->ref_frame[0]]) +
- thismvcost;
+#if CONFIG_REF_MV
+ mode_ctx &= 0x00ff;
+#endif
+ return cost_mv_ref(cpi, mode, mode_ctx) + thismvcost;
}
static int64_t encode_inter_mb_segment(VP10_COMP *cpi,
// Check if NEARESTMV/NEARMV/ZEROMV is the cheapest way encode zero motion.
// TODO(aconverse): Find out if this is still productive then clean up or remove
static int check_best_zero_mv(
- const VP10_COMP *cpi, const uint8_t mode_context[MAX_REF_FRAMES],
+ const VP10_COMP *cpi, const int16_t mode_context[MAX_REF_FRAMES],
int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES], int this_mode,
const MV_REFERENCE_FRAME ref_frames[2]) {
if ((this_mode == NEARMV || this_mode == NEARESTMV || this_mode == ZEROMV) &&
frame_mv[this_mode][ref_frames[0]].as_int == 0 &&
(ref_frames[1] == NONE ||
frame_mv[this_mode][ref_frames[1]].as_int == 0)) {
- int rfc = mode_context[ref_frames[0]];
+#if CONFIG_REF_MV
+ int16_t rfc = (ref_frames[1] == NONE) ? mode_context[ref_frames[0]] :
+ mode_context[ref_frames[0]] & (mode_context[ref_frames[1]] | 0x00ff);
+#else
+ int16_t rfc = mode_context[ref_frames[0]];
+#endif
int c1 = cost_mv_ref(cpi, NEARMV, rfc);
int c2 = cost_mv_ref(cpi, NEARESTMV, rfc);
int c3 = cost_mv_ref(cpi, ZEROMV, rfc);
int skip_txfm_sb = 0;
int64_t skip_sse_sb = INT64_MAX;
int64_t distortion_y = 0, distortion_uv = 0;
+ int16_t mode_ctx = mbmi_ext->mode_context[refs[0]];
+
+#if CONFIG_REF_MV
+ if (refs[1] > NONE)
+ mode_ctx &= (mbmi_ext->mode_context[refs[1]] | 0x00ff);
+#endif
#if CONFIG_VP9_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
// initiation of a motion field.
if (discount_newmv_test(cpi, this_mode, frame_mv[refs[0]],
mode_mv, refs[0])) {
- *rate2 += VPXMIN(cost_mv_ref(cpi, this_mode,
- mbmi_ext->mode_context[refs[0]]),
- cost_mv_ref(cpi, NEARESTMV,
- mbmi_ext->mode_context[refs[0]]));
+ *rate2 += VPXMIN(cost_mv_ref(cpi, this_mode, mode_ctx),
+ cost_mv_ref(cpi, NEARESTMV, mode_ctx));
} else {
- *rate2 += cost_mv_ref(cpi, this_mode, mbmi_ext->mode_context[refs[0]]);
+ *rate2 += cost_mv_ref(cpi, this_mode, mode_ctx);
}
if (RDCOST(x->rdmult, x->rddiv, *rate2, 0) > ref_best_rd &&
best_mbmode.mode = ZEROMV;
}
+#if CONFIG_REF_MV
+ if (best_mbmode.ref_frame[0] > INTRA_FRAME &&
+ best_mbmode.mv[0].as_int == 0 &&
+ (best_mbmode.ref_frame[1] == NONE || best_mbmode.mv[1].as_int == 0)) {
+ int16_t mode_ctx = mbmi_ext->mode_context[best_mbmode.ref_frame[0]];
+ if (best_mbmode.ref_frame[1] > NONE)
+ mode_ctx &= (mbmi_ext->mode_context[best_mbmode.ref_frame[1]] | 0x00ff);
+
+ if (mode_ctx & (1 << ALL_ZERO_FLAG_OFFSET))
+ best_mbmode.mode = ZEROMV;
+ }
+#endif
+
if (best_mode_index < 0 || best_rd >= best_rd_so_far) {
rd_cost->rate = INT_MAX;
rd_cost->rdcost = INT64_MAX;