alpha
interintra
filterintra
- masked_compound_inter
+ masked_compound
"
CONFIG_LIST="
external_build
static INLINE int is_interintra_allowed(BLOCK_SIZE_TYPE sb_type) {
return ((sb_type >= BLOCK_8X8) && (sb_type < BLOCK_64X64));
}
+
+#if CONFIG_MASKED_COMPOUND
+#define MASK_BITS_SML_INTERINTRA 3
+#define MASK_BITS_MED_INTERINTRA 4
+#define MASK_BITS_BIG_INTERINTRA 5
+#define MASK_NONE_INTERINTRA -1
+static INLINE int get_mask_bits_interintra(BLOCK_SIZE_TYPE sb_type) {
+ if (sb_type == BLOCK_4X4)
+ return 0;
+ if (sb_type <= BLOCK_8X8)
+ return MASK_BITS_SML_INTERINTRA;
+ else if (sb_type <= BLOCK_32X32)
+ return MASK_BITS_MED_INTERINTRA;
+ else
+ return MASK_BITS_BIG_INTERINTRA;
+}
+#endif
#endif
-#if CONFIG_MASKED_COMPOUND_INTER
+#if CONFIG_MASKED_COMPOUND
#define MASK_BITS_SML 3
#define MASK_BITS_MED 4
#define MASK_BITS_BIG 5
else
return MASK_BITS_BIG;
}
-#endif // CONFIG_MASKED_COMPOUND_INTER
+#endif // CONFIG_MASKED_COMPOUND
typedef struct {
MB_PREDICTION_MODE mode, uv_mode;
#if CONFIG_INTERINTRA
MB_PREDICTION_MODE interintra_mode, interintra_uv_mode;
+#if CONFIG_MASKED_COMPOUND
+ int interintra_mask_index;
+ int interintra_uv_mask_index;
+ int use_masked_interintra;
+#endif
#endif
#if CONFIG_FILTERINTRA
int filterbit, uv_filterbit;
BLOCK_SIZE_TYPE sb_type;
-#if CONFIG_MASKED_COMPOUND_INTER
+#if CONFIG_MASKED_COMPOUND
int use_masked_compound;
int mask_index;
#endif
static const vp9_prob default_interintra_prob[BLOCK_SIZE_TYPES] = {
192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192
};
+#if CONFIG_MASKED_COMPOUND
+static const vp9_prob default_masked_interintra_prob[BLOCK_SIZE_TYPES] = {
+// 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180
+ 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192
+};
+#endif
+#endif
+
+#if CONFIG_MASKED_COMPOUND
+static const vp9_prob default_masked_interinter_prob[BLOCK_SIZE_TYPES] = {
+ 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192
+};
#endif
void vp9_init_mbmode_probs(VP9_COMMON *cm) {
vp9_copy(cm->fc.mbskip_probs, default_mbskip_probs);
#if CONFIG_INTERINTRA
vp9_copy(cm->fc.interintra_prob, default_interintra_prob);
+#if CONFIG_MASKED_COMPOUND
+ vp9_copy(cm->fc.masked_interintra_prob, default_masked_interintra_prob);
+#endif
#endif
#if CONFIG_FILTERINTRA
vp9_copy(cm->fc.filterintra_prob, vp9_default_filterintra_prob);
#endif
-#if CONFIG_MASKED_COMPOUND_INTER
- cm->fc.masked_compound_prob = VP9_DEF_MASKED_COMPOUND_PROB;
+#if CONFIG_MASKED_COMPOUND
+ vp9_copy(cm->fc.masked_compound_prob, default_masked_interinter_prob);
#endif
}
fc->interintra_prob[i] = update_ct2(pre_fc->interintra_prob[i],
counts->interintra[i]);
}
+#if CONFIG_MASKED_COMPOUND
+ if (cm->use_masked_interintra) {
+ for (i = 0; i < BLOCK_SIZE_TYPES; ++i) {
+ if (is_interintra_allowed(i) && get_mask_bits_interintra(i))
+ fc->masked_interintra_prob[i] = update_ct2(
+ pre_fc->masked_interintra_prob[i],
+ counts->masked_interintra[i]);
+ }
+ }
+#endif
}
#endif
#if CONFIG_FILTERINTRA
fc->filterintra_prob[i][j] = update_ct2(pre_fc->filterintra_prob[i][j],
counts->filterintra[i][j]);
#endif
-#if CONFIG_MASKED_COMPOUND_INTER
+#if CONFIG_MASKED_COMPOUND
if (cm->use_masked_compound) {
- fc->masked_compound_prob = update_ct2(pre_fc->masked_compound_prob,
- counts->masked_compound);
+ for (i = 0; i < BLOCK_SIZE_TYPES; ++i) {
+ if (get_mask_bits(i))
+ fc->masked_compound_prob[i] = update_ct2
+ (pre_fc->masked_compound_prob[i],
+ counts->masked_compound[i]);
+ }
}
#endif
}
#if CONFIG_INTERINTRA
#define VP9_UPD_INTERINTRA_PROB 248
#define SEPARATE_INTERINTRA_UV 0
+#if CONFIG_MASKED_COMPOUND
+#define VP9_UPD_MASKED_INTERINTRA_PROB 248
+#endif
#endif
-#if CONFIG_MASKED_COMPOUND_INTER
-#define VP9_DEF_MASKED_COMPOUND_PROB 192
+#if CONFIG_MASKED_COMPOUND
#define VP9_UPD_MASKED_COMPOUND_PROB 248
#endif
#define LOG2_MI_SIZE 3
#define LOG2_MI_BLOCK_SIZE (6 - LOG2_MI_SIZE) // 64 = 2^6
+#define MAX_BLOCK_SIZE (1 << 6) // max block size in pixel
#define MI_SIZE (1 << LOG2_MI_SIZE) // pixels per mi-unit
#define MI_BLOCK_SIZE (1 << LOG2_MI_BLOCK_SIZE) // mi-units per max block
nmv_context nmvc;
#if CONFIG_INTERINTRA
vp9_prob interintra_prob[BLOCK_SIZE_TYPES];
+#if CONFIG_MASKED_COMPOUND
+ vp9_prob masked_interintra_prob[BLOCK_SIZE_TYPES];
+#endif
#endif
#if CONFIG_FILTERINTRA
vp9_prob filterintra_prob[TX_SIZES][VP9_INTRA_MODES];
#endif
-#if CONFIG_MASKED_COMPOUND_INTER
- vp9_prob masked_compound_prob;
+#if CONFIG_MASKED_COMPOUND
+ vp9_prob masked_compound_prob[BLOCK_SIZE_TYPES];
#endif
} FRAME_CONTEXT;
nmv_context_counts mv;
#if CONFIG_INTERINTRA
unsigned int interintra[BLOCK_SIZE_TYPES][2];
+#if CONFIG_MASKED_COMPOUND
+ unsigned int masked_interintra[BLOCK_SIZE_TYPES][2];
+#endif
#endif
#if CONFIG_FILTERINTRA
unsigned int filterintra[TX_SIZES][VP9_INTRA_MODES][2];
#endif
-#if CONFIG_MASKED_COMPOUND_INTER
- unsigned int masked_compound[2];
+#if CONFIG_MASKED_COMPOUND
+ unsigned int masked_compound[BLOCK_SIZE_TYPES][2];
#endif
} FRAME_COUNTS;
#if CONFIG_INTERINTRA
int use_interintra;
+#if CONFIG_MASKED_COMPOUND
+ int use_masked_interintra;
+#endif
#endif
-#if CONFIG_MASKED_COMPOUND_INTER
+#if CONFIG_MASKED_COMPOUND
int use_masked_compound;
#endif
return clamped_mv;
}
-#if CONFIG_MASKED_COMPOUND_INTER
+#if CONFIG_MASKED_COMPOUND
#define MASK_WEIGHT_BITS 6
static int get_masked_weight(int m) {
xd->mb_to_bottom_edge);
scale->set_scaled_offsets(scale, arg->y + y, arg->x + x);
-#if CONFIG_MASKED_COMPOUND_INTER
+#if CONFIG_MASKED_COMPOUND
if (which_mv && xd->mode_info_context->mbmi.use_masked_compound) {
uint8_t tmp_dst[4096];
vp9_build_inter_predictor(pre, pre_stride,
&res_mv, &xd->scale_factor[which_mv],
4 << pred_w, 4 << pred_h, which_mv,
&xd->subpix, MV_PRECISION_Q4);
-#if CONFIG_MASKED_COMPOUND_INTER
+#if CONFIG_MASKED_COMPOUND
}
#endif
}
void vp9_setup_scale_factors(VP9_COMMON *cm, int i);
-#if CONFIG_MASKED_COMPOUND_INTER
+#if CONFIG_MASKED_COMPOUND
void vp9_generate_masked_weight(int mask_index, BLOCK_SIZE_TYPE sb_type,
int h, int w, uint8_t *mask, int stride);
void vp9_generate_hard_mask(int mask_index, BLOCK_SIZE_TYPE sb_type,
}
}
+#if CONFIG_MASKED_COMPOUND
+#define MASK_WEIGHT_BITS_INTERINTRA 6
+
+static int get_masked_weight_interintra(int m) {
+ #define SMOOTHER_LEN_INTERINTRA 32
+ static const uint8_t smoothfn[2 * SMOOTHER_LEN_INTERINTRA + 1] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 1, 1, 1,
+ 1, 1, 2, 2, 3, 4, 5, 6,
+ 8, 9, 12, 14, 17, 21, 24, 28,
+ 32,
+ 36, 40, 43, 47, 50, 52, 55, 56,
+ 58, 59, 60, 61, 62, 62, 63, 63,
+ 63, 63, 63, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64,
+ };
+ if (m < -SMOOTHER_LEN_INTERINTRA)
+ return 0;
+ else if (m > SMOOTHER_LEN_INTERINTRA)
+ return (1 << MASK_WEIGHT_BITS_INTERINTRA);
+ else
+ return smoothfn[m + SMOOTHER_LEN_INTERINTRA];
+}
+
+static int get_hard_mask_interintra(int m) {
+ return m > 0;
+}
+
+// Equation of line: f(x, y) = a[0]*(x - a[2]*w/4) + a[1]*(y - a[3]*h/4) = 0
+// The soft mask is obtained by computing f(x, y) and then calling
+// get_masked_weight(f(x, y)).
+static const int mask_params_sml_interintra[1 << MASK_BITS_SML_INTERINTRA]
+ [4] = {
+ {-1, 2, 2, 2},
+ { 1, -2, 2, 2},
+ {-2, 1, 2, 2},
+ { 2, -1, 2, 2},
+ { 2, 1, 2, 2},
+ {-2, -1, 2, 2},
+ { 1, 2, 2, 2},
+ {-1, -2, 2, 2},
+};
+
+static const int mask_params_med_hgtw_interintra[1 << MASK_BITS_MED_INTERINTRA]
+ [4] = {
+ {-1, 2, 2, 2},
+ { 1, -2, 2, 2},
+ {-2, 1, 2, 2},
+ { 2, -1, 2, 2},
+ { 2, 1, 2, 2},
+ {-2, -1, 2, 2},
+ { 1, 2, 2, 2},
+ {-1, -2, 2, 2},
+
+ {-1, 2, 2, 1},
+ { 1, -2, 2, 1},
+ {-1, 2, 2, 3},
+ { 1, -2, 2, 3},
+ { 1, 2, 2, 1},
+ {-1, -2, 2, 1},
+ { 1, 2, 2, 3},
+ {-1, -2, 2, 3},
+};
+
+static const int mask_params_med_hltw_interintra[1 << MASK_BITS_MED_INTERINTRA]
+ [4] = {
+ {-1, 2, 2, 2},
+ { 1, -2, 2, 2},
+ {-2, 1, 2, 2},
+ { 2, -1, 2, 2},
+ { 2, 1, 2, 2},
+ {-2, -1, 2, 2},
+ { 1, 2, 2, 2},
+ {-1, -2, 2, 2},
+
+ {-2, 1, 1, 2},
+ { 2, -1, 1, 2},
+ {-2, 1, 3, 2},
+ { 2, -1, 3, 2},
+ { 2, 1, 1, 2},
+ {-2, -1, 1, 2},
+ { 2, 1, 3, 2},
+ {-2, -1, 3, 2},
+};
+
+static const int mask_params_med_heqw_interintra[1 << MASK_BITS_MED_INTERINTRA]
+ [4] = {
+ {-1, 2, 2, 2},
+ { 1, -2, 2, 2},
+ {-2, 1, 2, 2},
+ { 2, -1, 2, 2},
+ { 2, 1, 2, 2},
+ {-2, -1, 2, 2},
+ { 1, 2, 2, 2},
+ {-1, -2, 2, 2},
+
+ { 0, 2, 0, 1},
+ { 0, -2, 0, 1},
+ { 0, 2, 0, 3},
+ { 0, -2, 0, 3},
+ { 2, 0, 1, 0},
+ {-2, 0, 1, 0},
+ { 2, 0, 3, 0},
+ {-2, 0, 3, 0},
+};
+
+static const int mask_params_big_hgtw_interintra[1 << MASK_BITS_BIG_INTERINTRA]
+ [4] = {
+ {-1, 2, 2, 2},
+ { 1, -2, 2, 2},
+ {-2, 1, 2, 2},
+ { 2, -1, 2, 2},
+ { 2, 1, 2, 2},
+ {-2, -1, 2, 2},
+ { 1, 2, 2, 2},
+ {-1, -2, 2, 2},
+
+ {-1, 2, 2, 1},
+ { 1, -2, 2, 1},
+ {-1, 2, 2, 3},
+ { 1, -2, 2, 3},
+ { 1, 2, 2, 1},
+ {-1, -2, 2, 1},
+ { 1, 2, 2, 3},
+ {-1, -2, 2, 3},
+
+ {-2, 1, 1, 2},
+ { 2, -1, 1, 2},
+ {-2, 1, 3, 2},
+ { 2, -1, 3, 2},
+ { 2, 1, 1, 2},
+ {-2, -1, 1, 2},
+ { 2, 1, 3, 2},
+ {-2, -1, 3, 2},
+
+ { 0, 2, 0, 1},
+ { 0, -2, 0, 1},
+ { 0, 2, 0, 2},
+ { 0, -2, 0, 2},
+ { 0, 2, 0, 3},
+ { 0, -2, 0, 3},
+ { 2, 0, 2, 0},
+ {-2, 0, 2, 0},
+};
+
+static const int mask_params_big_hltw_interintra[1 << MASK_BITS_BIG_INTERINTRA]
+ [4] = {
+ {-1, 2, 2, 2},
+ { 1, -2, 2, 2},
+ {-2, 1, 2, 2},
+ { 2, -1, 2, 2},
+ { 2, 1, 2, 2},
+ {-2, -1, 2, 2},
+ { 1, 2, 2, 2},
+ {-1, -2, 2, 2},
+
+ {-1, 2, 2, 1},
+ { 1, -2, 2, 1},
+ {-1, 2, 2, 3},
+ { 1, -2, 2, 3},
+ { 1, 2, 2, 1},
+ {-1, -2, 2, 1},
+ { 1, 2, 2, 3},
+ {-1, -2, 2, 3},
+
+ {-2, 1, 1, 2},
+ { 2, -1, 1, 2},
+ {-2, 1, 3, 2},
+ { 2, -1, 3, 2},
+ { 2, 1, 1, 2},
+ {-2, -1, 1, 2},
+ { 2, 1, 3, 2},
+ {-2, -1, 3, 2},
+
+ { 0, 2, 0, 2},
+ { 0, -2, 0, 2},
+ { 2, 0, 1, 0},
+ {-2, 0, 1, 0},
+ { 2, 0, 2, 0},
+ {-2, 0, 2, 0},
+ { 2, 0, 3, 0},
+ {-2, 0, 3, 0},
+};
+
+static const int mask_params_big_heqw_interintra[1 << MASK_BITS_BIG_INTERINTRA]
+ [4] = {
+ {-1, 2, 2, 2},
+ { 1, -2, 2, 2},
+ {-2, 1, 2, 2},
+ { 2, -1, 2, 2},
+ { 2, 1, 2, 2},
+ {-2, -1, 2, 2},
+ { 1, 2, 2, 2},
+ {-1, -2, 2, 2},
+
+ {-1, 2, 2, 1},
+ { 1, -2, 2, 1},
+ {-1, 2, 2, 3},
+ { 1, -2, 2, 3},
+ { 1, 2, 2, 1},
+ {-1, -2, 2, 1},
+ { 1, 2, 2, 3},
+ {-1, -2, 2, 3},
+
+ {-2, 1, 1, 2},
+ { 2, -1, 1, 2},
+ {-2, 1, 3, 2},
+ { 2, -1, 3, 2},
+ { 2, 1, 1, 2},
+ {-2, -1, 1, 2},
+ { 2, 1, 3, 2},
+ {-2, -1, 3, 2},
+
+ { 0, 2, 0, 1},
+ { 0, -2, 0, 1},
+ { 0, 2, 0, 3},
+ { 0, -2, 0, 3},
+ { 2, 0, 1, 0},
+ {-2, 0, 1, 0},
+ { 2, 0, 3, 0},
+ {-2, 0, 3, 0},
+};
+
+static const int *get_mask_params_interintra(int mask_index,
+ BLOCK_SIZE_TYPE sb_type,
+ int h, int w) {
+ const int *a;
+ const int mask_bits = get_mask_bits_interintra(sb_type);
+
+ if (mask_index == MASK_NONE_INTERINTRA)
+ return NULL;
+
+ if (mask_bits == MASK_BITS_SML_INTERINTRA) {
+ a = mask_params_sml_interintra[mask_index];
+ } else if (mask_bits == MASK_BITS_MED_INTERINTRA) {
+ if (h > w)
+ a = mask_params_med_hgtw_interintra[mask_index];
+ else if (h < w)
+ a = mask_params_med_hltw_interintra[mask_index];
+ else
+ a = mask_params_med_heqw_interintra[mask_index];
+ } else if (mask_bits == MASK_BITS_BIG_INTERINTRA) {
+ if (h > w)
+ a = mask_params_big_hgtw_interintra[mask_index];
+ else if (h < w)
+ a = mask_params_big_hltw_interintra[mask_index];
+ else
+ a = mask_params_big_heqw_interintra[mask_index];
+ } else {
+ assert(0);
+ }
+ return a;
+}
+
+void vp9_generate_masked_weight_interintra(int mask_index,
+ BLOCK_SIZE_TYPE sb_type,
+ int h, int w,
+ uint8_t *mask, int stride) {
+ int i, j;
+ const int *a = get_mask_params_interintra(mask_index, sb_type, h, w);
+ if (!a) return;
+ for (i = 0; i < h; ++i)
+ for (j = 0; j < w; ++j) {
+ int x = (j - (a[2] * w) / 4);
+ int y = (i - (a[3] * h) / 4);
+ int m = a[0] * x + a[1] * y;
+ mask[i * stride + j] = get_masked_weight_interintra(m);
+ }
+}
+
+void vp9_generate_hard_mask_interintra(int mask_index, BLOCK_SIZE_TYPE sb_type,
+ int h, int w, uint8_t *mask, int stride) {
+ int i, j;
+ const int *a = get_mask_params_interintra(mask_index, sb_type, h, w);
+ if (!a) return;
+ for (i = 0; i < h; ++i)
+ for (j = 0; j < w; ++j) {
+ int x = (j - (a[2] * w) / 4);
+ int y = (i - (a[3] * h) / 4);
+ int m = a[0] * x + a[1] * y;
+ mask[i * stride + j] = get_hard_mask_interintra(m);
+ }
+}
+#endif
+
static void combine_interintra(MB_PREDICTION_MODE mode,
+#if CONFIG_MASKED_COMPOUND
+ int use_masked_interintra,
+ int mask_index,
+ BLOCK_SIZE_TYPE bsize,
+#endif
uint8_t *interpred,
int interstride,
uint8_t *intrapred,
size == 16 ? 4 :
size == 8 ? 8 : 16);
int i, j;
+
+#if CONFIG_MASKED_COMPOUND
+ uint8_t mask[4096];
+ if (use_masked_interintra && get_mask_bits_interintra(bsize))
+ vp9_generate_masked_weight_interintra(mask_index, bsize, bh, bw, mask, bw);
+#endif
+
switch (mode) {
case V_PRED:
for (i = 0; i < bh; ++i) {
for (j = 0; j < bw; ++j) {
int k = i * interstride + j;
int scale = weights1d[i * size_scale];
+#if CONFIG_MASKED_COMPOUND
+ int m = mask[i * bw + j];
+ if (use_masked_interintra && get_mask_bits_interintra(bsize))
+ interpred[k] = (intrapred[i * intrastride + j] * m +
+ interpred[k] *
+ ((1 << MASK_WEIGHT_BITS_INTERINTRA) - m) +
+ (1 << (MASK_WEIGHT_BITS_INTERINTRA - 1))) >>
+ MASK_WEIGHT_BITS_INTERINTRA;
+ else
+#endif
interpred[k] =
((scale_max - scale) * interpred[k] +
scale * intrapred[i * intrastride + j] + scale_round)
for (j = 0; j < bw; ++j) {
int k = i * interstride + j;
int scale = weights1d[j * size_scale];
+#if CONFIG_MASKED_COMPOUND
+ int m = mask[i * bw + j];
+ if (use_masked_interintra && get_mask_bits_interintra(bsize))
+ interpred[k] = (intrapred[i * intrastride + j] * m +
+ interpred[k] *
+ ((1 << MASK_WEIGHT_BITS_INTERINTRA) - m) +
+ (1 << (MASK_WEIGHT_BITS_INTERINTRA - 1))) >>
+ MASK_WEIGHT_BITS_INTERINTRA;
+ else
+#endif
interpred[k] =
((scale_max - scale) * interpred[k] +
scale * intrapred[i * intrastride + j] + scale_round)
int k = i * interstride + j;
int scale = (weights1d[i * size_scale] * 3 +
weights1d[j * size_scale]) >> 2;
+#if CONFIG_MASKED_COMPOUND
+ int m = mask[i * bw + j];
+ if (use_masked_interintra && get_mask_bits_interintra(bsize))
+ interpred[k] = (intrapred[i * intrastride + j] * m +
+ interpred[k] *
+ ((1 << MASK_WEIGHT_BITS_INTERINTRA) - m) +
+ (1 << (MASK_WEIGHT_BITS_INTERINTRA - 1))) >>
+ MASK_WEIGHT_BITS_INTERINTRA;
+ else
+#endif
interpred[k] =
((scale_max - scale) * interpred[k] +
scale * intrapred[i * intrastride + j] + scale_round)
int k = i * interstride + j;
int scale = (weights1d[j * size_scale] * 3 +
weights1d[i * size_scale]) >> 2;
+#if CONFIG_MASKED_COMPOUND
+ int m = mask[i * bw + j];
+ if (use_masked_interintra && get_mask_bits_interintra(bsize))
+ interpred[k] = (intrapred[i * intrastride + j] * m +
+ interpred[k] *
+ ((1 << MASK_WEIGHT_BITS_INTERINTRA) - m) +
+ (1 << (MASK_WEIGHT_BITS_INTERINTRA - 1))) >>
+ MASK_WEIGHT_BITS_INTERINTRA;
+ else
+#endif
interpred[k] =
((scale_max - scale) * interpred[k] +
scale * intrapred[i * intrastride + j] + scale_round)
for (j = 0; j < bw; ++j) {
int k = i * interstride + j;
int scale = weights1d[(i < j ? i : j) * size_scale];
+#if CONFIG_MASKED_COMPOUND
+ int m = mask[i * bw + j];
+ if (use_masked_interintra && get_mask_bits_interintra(bsize))
+ interpred[k] = (intrapred[i * intrastride + j] * m +
+ interpred[k] *
+ ((1 << MASK_WEIGHT_BITS_INTERINTRA) - m) +
+ (1 << (MASK_WEIGHT_BITS_INTERINTRA - 1))) >>
+ MASK_WEIGHT_BITS_INTERINTRA;
+ else
+#endif
interpred[k] =
((scale_max - scale) * interpred[k] +
scale * intrapred[i * intrastride + j] + scale_round)
int k = i * interstride + j;
int scale = (weights1d[i * size_scale] +
weights1d[j * size_scale]) >> 1;
+#if CONFIG_MASKED_COMPOUND
+ int m = mask[i * bw + j];
+ if (use_masked_interintra && get_mask_bits_interintra(bsize))
+ interpred[k] = (intrapred[i * intrastride + j] * m +
+ interpred[k] *
+ ((1 << MASK_WEIGHT_BITS_INTERINTRA) - m) +
+ (1 << (MASK_WEIGHT_BITS_INTERINTRA - 1))) >>
+ MASK_WEIGHT_BITS_INTERINTRA;
+ else
+#endif
interpred[k] =
((scale_max - scale) * interpred[k] +
scale * intrapred[i * intrastride + j] + scale_round)
for (i = 0; i < bh; ++i) {
for (j = 0; j < bw; ++j) {
int k = i * interstride + j;
+#if CONFIG_MASKED_COMPOUND
+ int m = mask[i * bw + j];
+ if (use_masked_interintra && get_mask_bits_interintra(bsize))
+ interpred[k] = (intrapred[i * intrastride + j] * m +
+ interpred[k] *
+ ((1 << MASK_WEIGHT_BITS_INTERINTRA) - m) +
+ (1 << (MASK_WEIGHT_BITS_INTERINTRA - 1))) >>
+ MASK_WEIGHT_BITS_INTERINTRA;
+ else
+#endif
interpred[k] = (interpred[k] + intrapred[i * intrastride + j]) >> 1;
}
}
xd->mode_info_context->mbmi.interintra_mode, bw, bh,
xd->up_available, xd->left_available, xd->right_available);
combine_interintra(xd->mode_info_context->mbmi.interintra_mode,
+#if CONFIG_MASKED_COMPOUND
+ xd->mode_info_context->mbmi.use_masked_interintra,
+ xd->mode_info_context->mbmi.interintra_mask_index,
+ bsize,
+#endif
ypred, ystride, intrapredictor, bw, bw, bh);
}
xd->mode_info_context->mbmi.interintra_uv_mode, bw, bh,
xd->up_available, xd->left_available, xd->right_available);
combine_interintra(xd->mode_info_context->mbmi.interintra_uv_mode,
+#if CONFIG_MASKED_COMPOUND
+ xd->mode_info_context->mbmi.use_masked_interintra,
+ xd->mode_info_context->mbmi.interintra_uv_mask_index,
+ bsize,
+#endif
upred, uvstride, uintrapredictor, bw, bw, bh);
combine_interintra(xd->mode_info_context->mbmi.interintra_uv_mode,
+#if CONFIG_MASKED_COMPOUND
+ xd->mode_info_context->mbmi.use_masked_interintra,
+ xd->mode_info_context->mbmi.interintra_uv_mask_index,
+ bsize,
+#endif
vpred, uvstride, vintrapredictor, bw, bw, bh);
}
int ystride,
int uvstride,
BLOCK_SIZE_TYPE bsize);
+#if CONFIG_MASKED_COMPOUND
+void vp9_generate_masked_weight_interintra(int mask_index,
+ BLOCK_SIZE_TYPE sb_type,
+ int h, int w,
+ uint8_t *mask, int stride);
+#endif
+
#endif
#endif // VP9_COMMON_VP9_RECONINTRA_H_
prototype unsigned int vp9_sad4x4_avg "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred, unsigned int max_sad"
specialize vp9_sad4x4_avg $sse_x86inc
-if [ "$CONFIG_MASKED_COMPOUND_INTER" = "yes" ]; then
+if [ "$CONFIG_MASKED_COMPOUND" = "yes" ]; then
prototype int vp9_masked_diamond_search_sad "struct macroblock *x, uint8_t *mask, int mask_stride, union int_mv *ref_mv, union int_mv *best_mv, int search_param, int sad_per_bit, int *num00, struct vp9_variance_vtable *fn_ptr, DEC_MVCOSTS, union int_mv *center_mv, int is_second"
specialize vp9_masked_diamond_search_sad
return sad;
}
-#if CONFIG_MASKED_COMPOUND_INTER
+#if CONFIG_MASKED_COMPOUND
static INLINE unsigned int masked_sad_mx_n_c(const uint8_t *src_ptr,
int src_stride,
const uint8_t *ref_ptr,
}
}
-#if CONFIG_MASKED_COMPOUND_INTER
+#if CONFIG_MASKED_COMPOUND
static void masked_variance(const uint8_t *src_ptr,
int src_stride,
const uint8_t *ref_ptr,
uint8_t inter_mode_ctx;
MV_REFERENCE_FRAME ref0, ref1;
-#if CONFIG_MASKED_COMPOUND_INTER
+#if CONFIG_MASKED_COMPOUND
mbmi->use_masked_compound = 0;
mbmi->mask_index = MASK_NONE;
#endif
mbmi->ref_frame[1] = (vp9_read(r, cm->fc.interintra_prob[bsize]) ?
INTRA_FRAME : NONE);
cm->counts.interintra[bsize][mbmi->ref_frame[1] == INTRA_FRAME]++;
+#if CONFIG_MASKED_COMPOUND
+ mbmi->use_masked_interintra = 0;
+#endif
if (mbmi->ref_frame[1] == INTRA_FRAME) {
int bsg = MIN(MIN(b_width_log2(bsize), b_height_log2(bsize)), 3);
mbmi->interintra_mode = read_intra_mode(r, cm->fc.y_mode_prob[bsg]);
#else
mbmi->interintra_uv_mode = mbmi->interintra_mode;
#endif
+#if CONFIG_MASKED_COMPOUND
+ if (cm->use_masked_interintra && get_mask_bits_interintra(bsize)) {
+ mbmi->use_masked_interintra = vp9_read(r,
+ cm->fc.masked_interintra_prob[bsize]);
+ cm->counts.masked_interintra[bsize][mbmi->use_masked_interintra]++;
+ if (mbmi->use_masked_interintra) {
+ mbmi->interintra_mask_index = vp9_read_literal(r,
+ get_mask_bits_interintra(bsize));
+ mbmi->interintra_uv_mask_index = mbmi->interintra_mask_index;
+ }
}
+#endif
+ }
}
#endif
assert(!"Invalid inter mode value");
}
}
-#if CONFIG_MASKED_COMPOUND_INTER
+#if CONFIG_MASKED_COMPOUND
mbmi->use_masked_compound = 0;
if (pbi->common.use_masked_compound &&
pbi->common.comp_pred_mode != SINGLE_PREDICTION_ONLY &&
get_mask_bits(mi->mbmi.sb_type) &&
mbmi->ref_frame[1] > INTRA_FRAME) {
mbmi->use_masked_compound =
- vp9_read(r, pbi->common.fc.masked_compound_prob);
- pbi->common.counts.masked_compound[mbmi->use_masked_compound]++;
+ vp9_read(r, pbi->common.fc.masked_compound_prob[bsize]);
+ pbi->common.counts.masked_compound[bsize][mbmi->use_masked_compound]++;
if (mbmi->use_masked_compound) {
mbmi->mask_index = vp9_read_literal(r, get_mask_bits(mi->mbmi.sb_type));
}
if (vp9_read(r, VP9_MODE_UPDATE_PROB))
vp9_diff_update_prob(r, &cm->fc.comp_inter_prob[i]);
-#if CONFIG_MASKED_COMPOUND_INTER
+#if CONFIG_MASKED_COMPOUND
if (cm->comp_pred_mode != SINGLE_PREDICTION_ONLY) {
cm->use_masked_compound = vp9_read_bit(r);
if (cm->use_masked_compound) {
- if (vp9_read(r, VP9_UPD_MASKED_COMPOUND_PROB))
- cm->fc.masked_compound_prob = vp9_read_prob(r);
+ for (i = 0; i < BLOCK_SIZE_TYPES; ++i) {
+ if (get_mask_bits(i))
+ if (vp9_read(r, VP9_UPD_MASKED_COMPOUND_PROB))
+ vp9_diff_update_prob(r, &cm->fc.masked_compound_prob[i]);
+ }
}
} else {
cm->use_masked_compound = 0;
if (vp9_read(r, VP9_UPD_INTERINTRA_PROB))
vp9_diff_update_prob(r, &cm->fc.interintra_prob[b]);
}
+#if CONFIG_MASKED_COMPOUND
+ if (cm->use_masked_interintra) {
+ int k;
+ for (k = 0; k < BLOCK_SIZE_TYPES; ++k) {
+ if (is_interintra_allowed(k) && get_mask_bits_interintra(k))
+ if (vp9_read(r, VP9_UPD_MASKED_INTERINTRA_PROB))
+ vp9_diff_update_prob(r, &cm->fc.masked_interintra_prob[k]);
+ }
+ }
+#endif
}
#endif
#if CONFIG_INTERINTRA
cm->use_interintra = vp9_rb_read_bit(rb);
+#if CONFIG_MASKED_COMPOUND
+ if (cm->use_interintra) {
+ cm->use_masked_interintra = vp9_rb_read_bit(rb);
+ } else {
+ cm->use_masked_interintra = 0;
+ }
+#endif
#endif
for (i = 0; i < ALLOWED_REFS_PER_FRAME; ++i)
#include "vp9/common/vp9_treecoder.h"
#include "vp9/decoder/vp9_dboolhuff.h"
-#if CONFIG_INTERINTRA || CONFIG_MASKED_COMPOUND_INTER
-#define vp9_read_prob(r) ((vp9_prob)vp9_read_literal(r, 8))
-#endif
#define vp9_read_and_apply_sign(r, value) (vp9_read_bit(r) ? -(value) : (value))
// Intent of tree data structure is to make decoding trivial.
write_intra_mode(bc, mi->interintra_uv_mode,
pc->fc.uv_mode_prob[mi->interintra_mode]);
#endif
+#if CONFIG_MASKED_COMPOUND
+ if (get_mask_bits_interintra(mi->sb_type) &&
+ pc->use_masked_interintra) {
+ vp9_write(bc, mi->use_masked_interintra,
+ pc->fc.masked_interintra_prob[bsize]);
+ if (mi->use_masked_interintra) {
+ vp9_write_literal(bc, mi->interintra_mask_index,
+ get_mask_bits_interintra(mi->sb_type));
+ }
}
+#endif
+ }
}
#endif
if (bsize < BLOCK_8X8) {
vp9_encode_mv(cpi, bc, &mi->mv[1].as_mv, &mi->best_second_mv.as_mv,
nmvc, allow_hp);
}
-#if CONFIG_MASKED_COMPOUND_INTER
+#if CONFIG_MASKED_COMPOUND
if (cpi->common.use_masked_compound &&
cpi->common.comp_pred_mode != SINGLE_PREDICTION_ONLY &&
is_inter_mode(mode) &&
get_mask_bits(mi->sb_type) &&
mi->ref_frame[1] > INTRA_FRAME) {
- vp9_write(bc, mi->use_masked_compound, pc->fc.masked_compound_prob);
+ vp9_write(bc, mi->use_masked_compound, pc->fc.masked_compound_prob[bsize]);
if (mi->use_masked_compound) {
vp9_write_literal(bc, mi->mask_index, get_mask_bits(mi->sb_type));
}
int b;
cm->use_interintra = 0;
for (b = 0; b < BLOCK_SIZE_TYPES; ++b) {
- if (is_interintra_allowed(b) && cpi->interintra_count[b][1] > 0) {
+ if (is_interintra_allowed(b) && (cpi->interintra_count[b][1] > 0)) {
cm->use_interintra = 1;
break;
}
vp9_wb_write_bit(wb, cm->use_interintra);
if (!cm->use_interintra)
vp9_zero(cpi->interintra_count);
+#if CONFIG_MASKED_COMPOUND
+ if (!cpi->dummy_packing && cm->use_interintra
+ && cm->use_masked_interintra) {
+ int k;
+ cm->use_masked_interintra = 0;
+ for (k = 0; k < BLOCK_SIZE_TYPES; ++k) {
+ if (is_interintra_allowed(k) && get_mask_bits_interintra(k) &&
+ (cpi->masked_interintra_count[k][1] > 0)) {
+ cm->use_masked_interintra = 1;
+ break;
+ }
+ }
+ }
+ if (cm->use_interintra) {
+ vp9_wb_write_bit(wb, cm->use_masked_interintra);
+ if (!cm->use_masked_interintra)
+ vp9_zero(cpi->masked_interintra_count);
+ }
+#endif
#endif
}
}
VP9_UPD_INTERINTRA_PROB,
cpi->interintra_count[b]);
}
+#if CONFIG_MASKED_COMPOUND
+ if (cm->use_masked_interintra) {
+ int k;
+ for (k = 0; k < BLOCK_SIZE_TYPES; ++k) {
+ if (is_interintra_allowed(k) && get_mask_bits_interintra(k))
+ vp9_cond_prob_diff_update(&header_bc,
+ &cm->fc.masked_interintra_prob[k],
+ VP9_UPD_MASKED_INTERINTRA_PROB,
+ cpi->masked_interintra_count[k]);
+ }
+ }
+#endif
}
#endif
VP9_MODE_UPDATE_PROB,
cpi->comp_inter_count[i]);
}
-#if CONFIG_MASKED_COMPOUND_INTER
+#if CONFIG_MASKED_COMPOUND
if (use_compound_pred) {
- if (!cpi->dummy_packing && cm->use_masked_compound)
- cm->use_masked_compound = (cpi->masked_compound_counts[1] > 0);
+ if (!cpi->dummy_packing && cm->use_masked_compound) {
+ cm->use_masked_compound = 0;
+ for (i = 0; i < BLOCK_SIZE_TYPES; i++) {
+ if (get_mask_bits(i) && (cpi->masked_compound_counts[i][1] > 0)) {
+ cm->use_masked_compound = 1;
+ break;
+ }
+ }
+ }
vp9_write_bit(&header_bc, cm->use_masked_compound);
if (cm->use_masked_compound) {
- vp9_cond_prob_update(&header_bc,
- &fc->masked_compound_prob,
- VP9_UPD_MASKED_COMPOUND_PROB,
- cpi->masked_compound_counts);
+ for (i = 0; i < BLOCK_SIZE_TYPES; i++) {
+ if (get_mask_bits(i))
+ vp9_cond_prob_diff_update(&header_bc,
+ &fc->masked_compound_prob[i],
+ VP9_UPD_MASKED_COMPOUND_PROB,
+ cpi->masked_compound_counts[i]);
+ }
} else {
vp9_zero(cpi->masked_compound_counts);
}
#if SEPARATE_INTERINTRA_UV
++cpi->uv_mode_count[mbmi->interintra_mode][mbmi->interintra_uv_mode];
#endif
- } else {
- ++cpi->interintra_count[mbmi->sb_type][0];
- }
+#if CONFIG_MASKED_COMPOUND
+ if (cm->use_masked_interintra &&
+ get_mask_bits_interintra(mbmi->sb_type))
+ ++cpi->masked_interintra_count[mbmi->sb_type]
+ [mbmi->use_masked_interintra];
+#endif
+ } else {
+ ++cpi->interintra_count[mbmi->sb_type][0];
+ }
}
#endif
-#if CONFIG_MASKED_COMPOUND_INTER
+#if CONFIG_MASKED_COMPOUND
if (cm->use_masked_compound &&
cm->comp_pred_mode != SINGLE_PREDICTION_ONLY &&
is_inter_mode(mbmi->mode) &&
get_mask_bits(mbmi->sb_type) &&
mbmi->ref_frame[1] > INTRA_FRAME) {
- ++cpi->masked_compound_counts[mbmi->use_masked_compound];
+ ++cpi->masked_compound_counts[bsize][mbmi->use_masked_compound];
}
#endif
// Set up destination pointers
setup_dst_planes(xd, &cm->yv12_fb[dst_fb_idx], mi_row, mi_col);
- /* Set up limit values for MV components to prevent them from
- * extending beyond the UMV borders assuming 16x16 block size */
- x->mv_row_min = -((mi_row * MI_SIZE)+ VP9BORDERINPIXELS - VP9_INTERP_EXTEND);
- x->mv_col_min = -((mi_col * MI_SIZE)+ VP9BORDERINPIXELS - VP9_INTERP_EXTEND);
- x->mv_row_max = ((cm->mi_rows - mi_row) * MI_SIZE
- + (VP9BORDERINPIXELS - MI_SIZE * mi_height - VP9_INTERP_EXTEND));
- x->mv_col_max = ((cm->mi_cols - mi_col) * MI_SIZE
- + (VP9BORDERINPIXELS - MI_SIZE * mi_width - VP9_INTERP_EXTEND));
+ // Set up limit values for MV components
+ // mv beyond the range do not produce new/different prediction block
+ x->mv_row_min = -((mi_row * MI_SIZE)+ MAX_BLOCK_SIZE - VP9_INTERP_EXTEND);
+ x->mv_col_min = -((mi_col * MI_SIZE)+ MAX_BLOCK_SIZE - VP9_INTERP_EXTEND);
+ x->mv_row_max = (cm->mi_rows - mi_row) * MI_SIZE + VP9_INTERP_EXTEND;
+ x->mv_col_max = (cm->mi_cols - mi_col) * MI_SIZE + VP9_INTERP_EXTEND;
// Set up distance of MB to edge of frame in 1/8th pel units
assert(!(mi_col & (mi_width - 1)) && !(mi_row & (mi_height - 1)));
#if CONFIG_INTERINTRA
vp9_zero(cpi->interintra_count);
vp9_zero(cpi->interintra_select_count);
+#if CONFIG_MASKED_COMPOUND
+ vp9_zero(cpi->masked_interintra_count);
+ vp9_zero(cpi->masked_interintra_select_count);
+#endif
#endif
#if CONFIG_FILTERINTRA
vp9_zero(cm->counts.filterintra);
#endif
-#if CONFIG_MASKED_COMPOUND_INTER
+#if CONFIG_MASKED_COMPOUND
vp9_zero(cpi->masked_compound_counts);
vp9_zero(cpi->masked_compound_select_counts);
#endif
#undef DIST
#undef IFMVCV
#undef CHECK_BETTER
-#if !CONFIG_MASKED_COMPOUND_INTER
+#if !CONFIG_MASKED_COMPOUND
#undef MIN
#undef MAX
#endif
mvcost, x->errorperbit);
}
-#if CONFIG_MASKED_COMPOUND_INTER
+#if CONFIG_MASKED_COMPOUND
#define MVC(r, c) \
(mvcost ? \
}
return bestsme;
}
-#endif // CONFIG_MASKED_COMPOUND_INTER
+#endif // CONFIG_MASKED_COMPOUND
int vp9_diamond_search_sadx4(MACROBLOCK *x,
int_mv *ref_mv, int_mv *best_mv, int search_param,
}
}
- while ((c + 2) < col_max) {
+ while ((c + 2) < col_max && fn_ptr->sdx3f != NULL) {
int i;
fn_ptr->sdx3f(what, what_stride, check_here, in_what_stride, sad_array);
int_mv *center_mv, const uint8_t *second_pred,
int w, int h);
-#if CONFIG_MASKED_COMPOUND_INTER
+#if CONFIG_MASKED_COMPOUND
int vp9_find_best_masked_sub_pixel_step_iteratively(
MACROBLOCK *x, uint8_t *mask, int mask_stride, int_mv *bestmv,
int_mv *ref_mv, int error_per_bit, const vp9_variance_fn_ptr_t *vfp,
int_mv *mvp_full, int step_param, int sadpb, int further_steps,
int do_refine, vp9_variance_fn_ptr_t *fn_ptr, int_mv *ref_mv,
int_mv *dst_mv, int is_second);
-#endif // CONFIG_MASKED_COMPOUND_INTER
+#endif // CONFIG_MASKED_COMPOUND
#endif // VP9_ENCODER_VP9_MCOMP_H_
vp9_sub_pixel_avg_variance4x4, NULL, NULL, NULL,
vp9_sad4x4x3, vp9_sad4x4x8, vp9_sad4x4x4d)
-#if CONFIG_MASKED_COMPOUND_INTER
+#if CONFIG_MASKED_COMPOUND
#define MBFP(BT, MSDF, MVF, MSVF) \
cpi->fn_ptr[BT].msdf = MSDF; \
cpi->fn_ptr[BT].mvf = MVF; \
cm->use_interintra = (fraction > threshold);
}
}
+
+#if CONFIG_MASKED_COMPOUND
+static void select_masked_interintra_mode(VP9_COMP *cpi) {
+ static const double threshold = 1/100.0;
+ VP9_COMMON *cm = &cpi->common;
+ int sum = cpi->masked_interintra_select_count[1] +
+ cpi->masked_interintra_select_count[0];
+ if (sum) {
+ double fraction = (double) cpi->masked_interintra_select_count[1] / sum;
+ cm->use_masked_interintra = (fraction > threshold);
+ }
+}
+#endif
#endif
-#if CONFIG_MASKED_COMPOUND_INTER
+#if CONFIG_MASKED_COMPOUND
static void select_masked_compound_mode(VP9_COMP *cpi) {
static const double threshold = 1/128.0;
VP9_COMMON *cm = &cpi->common;
if (sum) {
double fraction = (double) cpi->masked_compound_select_counts[1] / sum;
cm->use_masked_compound = (fraction > threshold);
- }
+ }
}
#endif
#if CONFIG_INTERINTRA
if (cm->current_video_frame == 0) {
cm->use_interintra = 1;
+#if CONFIG_MASKED_COMPOUND
+ cm->use_masked_interintra = 1;
+#endif
}
#endif
-#if CONFIG_MASKED_COMPOUND_INTER
+#if CONFIG_MASKED_COMPOUND
if (cm->current_video_frame == 0) {
cm->use_masked_compound = 0;
}
counts->mv = cpi->NMVcount;
#if CONFIG_INTERINTRA
vp9_copy(counts->interintra, cpi->interintra_count);
+#if CONFIG_MASKED_COMPOUND
+ vp9_copy(counts->masked_interintra, cpi->masked_interintra_count);
+#endif
#endif
-#if CONFIG_MASKED_COMPOUND_INTER
+#if CONFIG_MASKED_COMPOUND
vp9_copy(counts->masked_compound, cpi->masked_compound_counts);
#endif
if (!cpi->common.error_resilient_mode &&
}
#if CONFIG_INTERINTRA
- if (cm->frame_type != KEY_FRAME)
+ if (cm->frame_type != KEY_FRAME) {
select_interintra_mode(cpi);
+#if CONFIG_MASKED_COMPOUND
+ if (cpi->common.use_interintra)
+ select_masked_interintra_mode(cpi);
+ else
+ cpi->common.use_masked_interintra = 0;
+#endif
+ }
#endif
-#if CONFIG_MASKED_COMPOUND_INTER
+#if CONFIG_MASKED_COMPOUND
if (cm->frame_type != KEY_FRAME)
select_masked_compound_mode(cpi);
#endif
#if CONFIG_INTERINTRA
vp9_prob interintra_prob[BLOCK_SIZE_TYPES];
+#if CONFIG_MASKED_COMPOUND
+ vp9_prob masked_interintra_prob[BLOCK_SIZE_TYPES];
+#endif
#endif
#if CONFIG_FILTERINTRA
vp9_prob filterintra_prob[TX_SIZES][VP9_INTRA_MODES];
#endif
-#if CONFIG_MASKED_COMPOUND_INTER
- vp9_prob masked_compound_prob;
+#if CONFIG_MASKED_COMPOUND
+ vp9_prob masked_compound_prob[BLOCK_SIZE_TYPES];
#endif
} CODING_CONTEXT;
#if CONFIG_INTERINTRA
unsigned int interintra_count[BLOCK_SIZE_TYPES][2];
unsigned int interintra_select_count[2];
+#if CONFIG_MASKED_COMPOUND
+ unsigned int masked_interintra_count[BLOCK_SIZE_TYPES][2];
+ unsigned int masked_interintra_select_count[2];
+#endif
#endif
-#if CONFIG_MASKED_COMPOUND_INTER
- unsigned int masked_compound_counts[2];
+#if CONFIG_MASKED_COMPOUND
+ unsigned int masked_compound_counts[BLOCK_SIZE_TYPES][2];
unsigned int masked_compound_select_counts[2];
#endif
vp9_copy(cc->mbskip_probs, cm->fc.mbskip_probs);
#if CONFIG_INTERINTRA
vp9_copy(cc->interintra_prob, cm->fc.interintra_prob);
+#if CONFIG_MASKED_COMPOUND
+ vp9_copy(cc->masked_interintra_prob, cm->fc.masked_interintra_prob);
+#endif
#endif
#if CONFIG_FILTERINTRA
vp9_copy(cc->filterintra_prob, cm->fc.filterintra_prob);
#endif
-#if CONFIG_MASKED_COMPOUND_INTER
- cc->masked_compound_prob = cm->fc.masked_compound_prob;
+#if CONFIG_MASKED_COMPOUND
+ vp9_copy(cc->masked_compound_prob, cm->fc.masked_compound_prob);
#endif
}
vp9_copy(cm->fc.mbskip_probs, cc->mbskip_probs);
#if CONFIG_INTERINTRA
vp9_copy(cm->fc.interintra_prob, cc->interintra_prob);
+#if CONFIG_MASKED_COMPOUND
+ vp9_copy(cm->fc.masked_interintra_prob, cc->masked_interintra_prob);
+#endif
#endif
#if CONFIG_FILTERINTRA
vp9_copy(cm->fc.filterintra_prob, cc->filterintra_prob);
#endif
-#if CONFIG_MASKED_COMPOUND_INTER
- cm->fc.masked_compound_prob = cc->masked_compound_prob;
+#if CONFIG_MASKED_COMPOUND
+ vp9_copy(cm->fc.masked_compound_prob, cc->masked_compound_prob);
#endif
}
vpx_free(second_pred);
}
-#if CONFIG_MASKED_COMPOUND_INTER
+#if CONFIG_MASKED_COMPOUND
static void do_masked_motion_search(VP9_COMP *cpi, MACROBLOCK *x,
uint8_t *mask, int mask_stride,
BLOCK_SIZE_TYPE bsize,
#if CONFIG_INTERINTRA
int *compmode_interintra_cost,
#endif
-#if CONFIG_MASKED_COMPOUND_INTER
+#if CONFIG_MASKED_COMPOUND
int *compmode_masked_cost,
#endif
int *rate_y, int64_t *distortion_y,
#if CONFIG_INTERINTRA
const int is_comp_interintra_pred = (mbmi->ref_frame[1] == INTRA_FRAME);
#endif
-#if CONFIG_MASKED_COMPOUND_INTER
+#if CONFIG_MASKED_COMPOUND || CONFIG_INTERINTRA
int rate_mv_tmp = 0;
#endif
const int num_refs = is_comp_pred ? 2 : 1;
is_interintra_allowed(mbmi->sb_type)) {
extend_for_interintra(xd, bsize);
}
+#if CONFIG_MASKED_COMPOUND
+ mbmi->use_masked_interintra = 0;
+ mbmi->interintra_mask_index = 0;
+ mbmi->interintra_uv_mask_index = 0;
+#endif
#endif
if (this_mode == NEWMV) {
if (frame_mv[refs[0]].as_int == INVALID_MV ||
frame_mv[refs[1]].as_int == INVALID_MV)
return INT64_MAX;
-#if !CONFIG_MASKED_COMPOUND_INTER
+#if !(CONFIG_MASKED_COMPOUND || CONFIG_INTERINTRA)
*rate2 += rate_mv;
#endif
} else {
int_mv tmp_mv;
single_motion_search(cpi, x, bsize, mi_row, mi_col, &tmp_mv, &rate_mv);
-#if !CONFIG_MASKED_COMPOUND_INTER
+#if !(CONFIG_MASKED_COMPOUND || CONFIG_INTERINTRA)
*rate2 += rate_mv;
#endif
frame_mv[refs[0]].as_int =
xd->mode_info_context->bmi[0].as_mv[0].as_int = tmp_mv.as_int;
single_newmv[refs[0]].as_int = tmp_mv.as_int;
}
-#if CONFIG_MASKED_COMPOUND_INTER
+#if CONFIG_MASKED_COMPOUND || CONFIG_INTERINTRA
rate_mv_tmp = rate_mv;
#endif
}
int64_t dist_sum = 0;
if ((cm->mcomp_filter_type == SWITCHABLE &&
(!i || best_needs_copy)) ||
+#if CONFIG_INTERINTRA
+ (is_inter_mode(this_mode) && is_comp_interintra_pred &&
+ is_interintra_allowed(mbmi->sb_type)) ||
+#endif
(cm->mcomp_filter_type != SWITCHABLE &&
(cm->mcomp_filter_type == mbmi->interp_filter ||
(!interpolating_intpel_seen && is_intpel_interp)))) {
int64_t best_interintra_rd = INT64_MAX;
int rmode, rate_sum;
int64_t dist_sum;
+#if CONFIG_MASKED_COMPOUND
+#define MASKED_INTERINTRA_REFINE_SEARCH
+ int maskbits, mask_types, mask_index, best_mask_index = 0;
+ int64_t best_interintra_rd_nomask, best_interintra_rd_mask = INT64_MAX;
+ int rmask;
+#ifdef MASKED_INTERINTRA_REFINE_SEARCH
+ int bw = 4 << b_width_log2(bsize), bh = 4 << b_height_log2(bsize);
+ uint8_t mask[4096];
+ int_mv tmp_mv;
+ int tmp_rate_mv;
+ MB_PREDICTION_MODE best_interintra_mode_mask;
+#endif
+#endif
for (interintra_mode = DC_PRED; interintra_mode <= TM_PRED;
++interintra_mode) {
mbmi->interintra_mode = interintra_mode;
mbmi->interintra_mode = best_interintra_mode;
#if !SEPARATE_INTERINTRA_UV
mbmi->interintra_uv_mode = best_interintra_mode;
+#endif
+#if CONFIG_MASKED_COMPOUND
+ maskbits = get_mask_bits_interintra(bsize);
+ rmode = x->mbmode_cost[mbmi->interintra_mode];
+ if (maskbits) {
+ mbmi->use_masked_interintra = 0;
+ vp9_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
+ model_rd_for_sb(cpi, bsize, x, xd, &rate_sum, &dist_sum);
+ rmask = vp9_cost_bit(cm->fc.masked_interintra_prob[bsize], 0);
+ rd = RDCOST(x->rdmult, x->rddiv,
+ rmode + rate_mv_tmp + rmask + rate_sum, dist_sum);
+ best_interintra_rd_nomask = rd;
+
+ mbmi->use_masked_interintra = 1;
+ rmask = maskbits * 256 +
+ vp9_cost_bit(cm->fc.masked_interintra_prob[bsize], 1);
+ mask_types = (1 << maskbits);
+ for (mask_index = 0; mask_index < mask_types; ++mask_index) {
+ mbmi->interintra_mask_index = mask_index;
+ mbmi->interintra_uv_mask_index = mask_index;
+ vp9_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
+ model_rd_for_sb(cpi, bsize, x, xd, &rate_sum, &dist_sum);
+ rd = RDCOST(x->rdmult, x->rddiv,
+ rmode + rate_mv_tmp + rmask + rate_sum, dist_sum);
+ if (rd < best_interintra_rd_mask) {
+ best_interintra_rd_mask = rd;
+ best_mask_index = mask_index;
+ }
+ }
+#ifdef MASKED_INTERINTRA_REFINE_SEARCH
+ // Refine motion vector
+ if (this_mode == NEWMV) {
+ mbmi->interintra_mask_index = best_mask_index;
+ mbmi->interintra_uv_mask_index = best_mask_index;
+ vp9_generate_masked_weight_interintra(best_mask_index, bsize,
+ bh, bw, mask, bw);
+ do_masked_motion_search(cpi, x, mask, bw, bsize, refs[0],
+ mi_row, mi_col, &tmp_mv, &tmp_rate_mv, 0);
+ mbmi->mv[0].as_int = tmp_mv.as_int;
+ vp9_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
+ model_rd_for_sb(cpi, bsize, x, xd, &rate_sum, &dist_sum);
+ rd = RDCOST(x->rdmult, x->rddiv,
+ rmode + tmp_rate_mv + rmask + rate_sum, dist_sum);
+ if (rd < best_interintra_rd_mask) {
+ best_interintra_rd_mask = rd;
+ } else {
+ tmp_mv.as_int = cur_mv[0].as_int;
+ tmp_rate_mv = rate_mv_tmp;
+ }
+ } else {
+ tmp_mv.as_int = cur_mv[0].as_int;
+ tmp_rate_mv = rate_mv_tmp;
+ }
+ // Refine intra prediction
+ best_interintra_mode_mask = best_interintra_mode;
+ mbmi->mv[0].as_int = tmp_mv.as_int;
+ for (interintra_mode = DC_PRED; interintra_mode <= TM_PRED;
+ ++interintra_mode) {
+ mbmi->interintra_mode = interintra_mode;
+ #if !SEPARATE_INTERINTRA_UV
+ mbmi->interintra_uv_mode = interintra_mode;
+ #endif
+ rmode = x->mbmode_cost[mbmi->interintra_mode];
+ vp9_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
+ model_rd_for_sb(cpi, bsize, x, xd, &rate_sum, &dist_sum);
+ rd = RDCOST(x->rdmult, x->rddiv,
+ rmode + tmp_rate_mv + rmask + rate_sum, dist_sum);
+ if (rd < best_interintra_rd_mask) {
+ best_interintra_rd_mask = rd;
+ best_interintra_mode_mask = interintra_mode;
+ }
+ }
+ mbmi->interintra_mode = best_interintra_mode_mask;
+ #if !SEPARATE_INTERINTRA_UV
+ mbmi->interintra_uv_mode = best_interintra_mode_mask;
+ #endif
+#endif
+ if (best_interintra_rd_mask < best_interintra_rd_nomask) {
+ mbmi->use_masked_interintra = 1;
+ if (cm->use_masked_interintra) {
+ mbmi->interintra_mask_index = best_mask_index;
+ mbmi->interintra_uv_mask_index = best_mask_index;
+#ifdef MASKED_INTERINTRA_REFINE_SEARCH
+ mbmi->mv[0].as_int = tmp_mv.as_int;
+ rate_mv_tmp = tmp_rate_mv;
+#endif
+ } else {
+ mbmi->interintra_mode = best_interintra_mode;
+ #if !SEPARATE_INTERINTRA_UV
+ mbmi->interintra_uv_mode = best_interintra_mode;
+ #endif
+ mbmi->mv[0].as_int = cur_mv[0].as_int;
+ }
+ } else {
+ mbmi->use_masked_interintra = 0;
+#ifdef MASKED_INTERINTRA_REFINE_SEARCH
+ mbmi->interintra_mode = best_interintra_mode;
+ #if !SEPARATE_INTERINTRA_UV
+ mbmi->interintra_uv_mode = best_interintra_mode;
+ #endif
+ mbmi->mv[0].as_int = cur_mv[0].as_int;
+#endif
+ }
+
+ ++cpi->masked_interintra_select_count[mbmi->use_masked_interintra];
+ if (!cm->use_masked_interintra)
+ mbmi->use_masked_interintra = 0;
+ }
#endif
pred_exists = 0;
}
- if (!is_comp_pred) {
+ if (!is_comp_pred && is_interintra_allowed(mbmi->sb_type)) {
*compmode_interintra_cost = vp9_cost_bit(cm->fc.interintra_prob[bsize],
is_comp_interintra_pred);
- if (is_comp_interintra_pred && is_interintra_allowed(mbmi->sb_type)) {
+ if (is_comp_interintra_pred) {
*compmode_interintra_cost += x->mbmode_cost[mbmi->interintra_mode];
#if SEPARATE_INTERINTRA_UV
*compmode_interintra_cost +=
x->intra_uv_mode_cost[xd->frame_type][mbmi->interintra_uv_mode];
#endif
+#if CONFIG_MASKED_COMPOUND
+ if (get_mask_bits_interintra(bsize) && cm->use_masked_interintra) {
+ *compmode_interintra_cost += vp9_cost_bit(
+ cm->fc.masked_interintra_prob[bsize],
+ mbmi->use_masked_interintra);
+ if (mbmi->use_masked_interintra) {
+ *compmode_interintra_cost += get_mask_bits_interintra(bsize) * 256;
+ }
}
+#endif
+ }
}
#endif
-#if CONFIG_MASKED_COMPOUND_INTER
+#if CONFIG_MASKED_COMPOUND
if (is_comp_pred && get_mask_bits(bsize)) {
int mask_index, best_mask_index = -1, rs;
int rate_sum;
int64_t best_rd_mask = INT64_MAX;
int mask_types;
mbmi->use_masked_compound = 0;
- rs = vp9_cost_bit(cm->fc.masked_compound_prob, 0);
+ rs = vp9_cost_bit(cm->fc.masked_compound_prob[bsize], 0);
vp9_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
model_rd_for_sb(cpi, bsize, x, xd, &rate_sum, &dist_sum);
rd = RDCOST(x->rdmult, x->rddiv, rs + rate_mv_tmp + rate_sum, dist_sum);
best_rd_nomask = rd;
mbmi->use_masked_compound = 1;
rs = get_mask_bits(bsize) * 256 +
- vp9_cost_bit(cm->fc.masked_compound_prob, 1);
+ vp9_cost_bit(cm->fc.masked_compound_prob[bsize], 1);
mask_types = (1 << get_mask_bits(bsize));
if (this_mode == NEWMV) {
#define USE_MASKED_NEWMV_FAST_SEARCH
pred_exists = 0;
if (mbmi->use_masked_compound)
*compmode_masked_cost = get_mask_bits(bsize) * 256 +
- vp9_cost_bit(cm->fc.masked_compound_prob, 1);
+ vp9_cost_bit(cm->fc.masked_compound_prob[bsize], 1);
else
- *compmode_masked_cost = vp9_cost_bit(cm->fc.masked_compound_prob, 0);
+ *compmode_masked_cost = vp9_cost_bit(cm->fc.masked_compound_prob[bsize],
+ 0);
}
+#endif // CONFIG_MASKED_COMPOUND
+
+#if CONFIG_INTERINTRA || CONFIG_MASKED_COMPOUND
*rate2 += rate_mv_tmp;
-#endif // CONFIG_MASKED_COMPOUND_INTER
+#endif
if (pred_exists) {
if (best_needs_copy) {
int bhsl = b_height_log2(bsize);
int bhs = (1 << bhsl) / 4; // mode_info step for subsize
int best_skip2 = 0;
-#if CONFIG_INTERINTRA || CONFIG_MASKED_COMPOUND_INTER
+#if CONFIG_INTERINTRA || CONFIG_MASKED_COMPOUND
int64_t best_overall_rd = INT64_MAX;
#endif
#if CONFIG_INTERINTRA
#endif
int64_t best_intra16_rd = INT64_MAX;
#endif
-#if CONFIG_MASKED_COMPOUND_INTER
+#if CONFIG_MASKED_COMPOUND
int is_best_masked_compound = 0;
#endif
#if CONFIG_INTERINTRA
int compmode_interintra_cost = 0;
#endif
-#if CONFIG_MASKED_COMPOUND_INTER
+#if CONFIG_MASKED_COMPOUND
int compmode_masked_cost = 0;
#endif
int rate2 = 0, rate_y = 0, rate_uv = 0;
mbmi->interintra_mode = (MB_PREDICTION_MODE)(DC_PRED - 1);
mbmi->interintra_uv_mode = (MB_PREDICTION_MODE)(DC_PRED - 1);
#endif
-#if CONFIG_MASKED_COMPOUND_INTER
+#if CONFIG_MASKED_COMPOUND
mbmi->use_masked_compound = 0;
mbmi->mask_index = MASK_NONE;
#endif
#if CONFIG_INTERINTRA
&compmode_interintra_cost,
#endif
-#if CONFIG_MASKED_COMPOUND_INTER
+#if CONFIG_MASKED_COMPOUND
&compmode_masked_cost,
#endif
&rate_y, &distortion_y,
rate2 += compmode_interintra_cost;
}
#endif
-#if CONFIG_MASKED_COMPOUND_INTER
+#if CONFIG_MASKED_COMPOUND
if ((cpi->common.comp_pred_mode == HYBRID_PREDICTION ||
cpi->common.comp_pred_mode == COMP_PREDICTION_ONLY) &&
cpi->common.use_masked_compound)
best_filter_rd[i] = MIN(best_filter_rd[i], this_rd);
}
-#if CONFIG_INTERINTRA || CONFIG_MASKED_COMPOUND_INTER
+#if CONFIG_INTERINTRA || CONFIG_MASKED_COMPOUND
if (this_rd < best_overall_rd) {
best_overall_rd = this_rd;
#if CONFIG_INTERINTRA
is_best_interintra = (second_ref_frame == INTRA_FRAME);
#endif
-#if CONFIG_MASKED_COMPOUND_INTER
+#if CONFIG_MASKED_COMPOUND
is_best_masked_compound = (mbmi->mask_index != MASK_NONE);
#endif
}
++cpi->interintra_select_count[is_best_interintra];
#endif
-#if CONFIG_MASKED_COMPOUND_INTER
+#if CONFIG_MASKED_COMPOUND
// diagnostics
if ((cpi->common.comp_pred_mode == HYBRID_PREDICTION ||
cpi->common.comp_pred_mode == COMP_PREDICTION_ONLY) &&
ref_ptr[3], ref_stride, 0x7fffffff);
}
-#if CONFIG_MASKED_COMPOUND_INTER
+#if CONFIG_MASKED_COMPOUND
unsigned int vp9_masked_sad64x64_c(const uint8_t *src_ptr,
int src_stride,
const uint8_t *ref_ptr,
vp9_write(w, 0, upd);
}
}
-
-#if CONFIG_INTERINTRA || CONFIG_MASKED_COMPOUND_INTER
-static int prob_update_savings(const unsigned int *ct,
- const vp9_prob oldp, const vp9_prob newp,
- const vp9_prob upd) {
- const int old_b = cost_branch256(ct, oldp);
- const int new_b = cost_branch256(ct, newp);
- const int update_b = 2048 + vp9_cost_upd256;
- return old_b - new_b - update_b;
-}
-
-void vp9_cond_prob_update(vp9_writer *w, vp9_prob *oldp, vp9_prob upd,
- unsigned int *ct) {
- vp9_prob newp;
- int savings;
- newp = get_binary_prob(ct[0], ct[1]);
- assert(newp >= 1);
- savings = prob_update_savings(ct, *oldp, newp, upd);
- if (savings > 0) {
- vp9_write(w, 1, upd);
- vp9_write_prob(w, newp);
- *oldp = newp;
- } else {
- vp9_write(w, 0, upd);
- }
-}
-#endif
void vp9_cond_prob_diff_update(vp9_writer *w, vp9_prob *oldp,
vp9_prob upd, unsigned int *ct);
-#if CONFIG_INTERINTRA || CONFIG_MASKED_COMPOUND_INTER
-void vp9_cond_prob_update(vp9_writer *w, vp9_prob *oldp, vp9_prob upd,
- unsigned int *ct);
-#endif
-
int vp9_prob_diff_update_savings_search(const unsigned int *ct,
vp9_prob oldp, vp9_prob *bestp,
vp9_prob upd);
unsigned int *sse,
const uint8_t *second_pred);
-#if CONFIG_MASKED_COMPOUND_INTER
+#if CONFIG_MASKED_COMPOUND
typedef unsigned int(*vp9_masked_sad_fn_t)(const uint8_t *src_ptr,
int source_stride,
const uint8_t *ref_ptr,
vp9_sad_multi_fn_t sdx3f;
vp9_sad_multi1_fn_t sdx8f;
vp9_sad_multi_d_fn_t sdx4df;
-#if CONFIG_MASKED_COMPOUND_INTER
+#if CONFIG_MASKED_COMPOUND
vp9_masked_sad_fn_t msdf;
vp9_masked_variance_fn_t mvf;
vp9_masked_subpixvariance_fn_t msvf;
return vp9_variance4x8(temp3, 4, dst_ptr, dst_pixels_per_line, sse);
}
-#if CONFIG_MASKED_COMPOUND_INTER
+#if CONFIG_MASKED_COMPOUND
unsigned int vp9_masked_variance64x64_c(const uint8_t *src_ptr,
int source_stride,
const uint8_t *ref_ptr,