return WEDGE_BITS_BIG;
}
+static INLINE int is_interinter_wedge_used(BLOCK_SIZE sb_type) {
+ (void) sb_type;
+ return get_wedge_bits(sb_type) > 0;
+}
+
+static INLINE int is_interintra_wedge_used(BLOCK_SIZE sb_type) {
+ (void) sb_type;
+ return 0; // get_wedge_bits(sb_type) > 0;
+}
+
static INLINE int is_inter_singleref_mode(PREDICTION_MODE mode) {
return mode >= NEARESTMV && mode <= NEWFROMNEARMV;
}
&& is_interintra_allowed_ref(mbmi->ref_frame);
}
+static INLINE int is_interintra_allowed_bsize_group(const int group) {
+ int i;
+ for (i = 0; i < BLOCK_SIZES; i++) {
+ if (size_group_lookup[i] == group &&
+ is_interintra_allowed_bsize(i))
+ return 1;
+ }
+ return 0;
+}
+
static INLINE int is_interintra_pred(const MB_MODE_INFO *mbmi) {
return (mbmi->ref_frame[1] == INTRA_FRAME) && is_interintra_allowed(mbmi);
}
{25, 29, 50, 192, 192, 128, 180, 180}, // 6 = two intra neighbours
};
-static const vpx_prob default_interintra_prob[BLOCK_SIZES] = {
- 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192,
-#if CONFIG_EXT_PARTITION
- 192, 192, 192
-#endif // CONFIG_EXT_PARTITION
+static const vpx_prob default_interintra_prob[BLOCK_SIZE_GROUPS] = {
+ 208, 208, 208, 208,
+};
+
+static const vpx_prob
+ default_interintra_mode_prob[BLOCK_SIZE_GROUPS][INTERINTRA_MODES - 1] = {
+ { 65, 32, 18, 144, 162, 194, 41, 51, 98 }, // block_size < 8x8
+ { 132, 68, 18, 165, 217, 196, 45, 40, 78 }, // block_size < 16x16
+ { 173, 80, 19, 176, 240, 193, 64, 35, 46 }, // block_size < 32x32
+ { 221, 135, 38, 194, 248, 121, 96, 85, 29 } // block_size >= 32x32
};
static const vpx_prob default_wedge_interintra_prob[BLOCK_SIZES] = {
- 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192,
+ 208, 208, 208, 208, 208, 208, 208, 208, 208, 208, 208, 208, 208,
#if CONFIG_EXT_PARTITION
- 192, 192, 192
+ 208, 208, 208
#endif // CONFIG_EXT_PARTITION
};
static const vpx_prob default_wedge_interinter_prob[BLOCK_SIZES] = {
- 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192,
+ 208, 208, 208, 208, 208, 208, 208, 208, 208, 208, 208, 208, 208,
#if CONFIG_EXT_PARTITION
- 192, 192, 192
+ 208, 208, 208
#endif // CONFIG_EXT_PARTITION
};
#endif // CONFIG_EXT_INTER
-II_D153_PRED, -II_D207_PRED /* 8 = II_D153_NODE */
};
-static const vpx_prob
- default_interintra_mode_prob[BLOCK_SIZE_GROUPS][INTERINTRA_MODES - 1] = {
- { 65, 32, 18, 144, 162, 194, 41, 51, 98 }, // block_size < 8x8
- { 132, 68, 18, 165, 217, 196, 45, 40, 78 }, // block_size < 16x16
- { 173, 80, 19, 176, 240, 193, 64, 35, 46 }, // block_size < 32x32
- { 221, 135, 38, 194, 248, 121, 96, 85, 29 } // block_size >= 32x32
-};
-
const vpx_tree_index vp10_inter_compound_mode_tree
[TREE_SIZE(INTER_COMPOUND_MODES)] = {
-INTER_COMPOUND_OFFSET(ZERO_ZEROMV), 2,
#if CONFIG_EXT_INTRA
static const vpx_prob
default_intra_filter_probs[INTRA_FILTERS + 1][INTRA_FILTERS - 1] = {
- { 98, 63, 60, },
- { 98, 82, 80, },
- { 94, 65, 103, },
- { 49, 25, 24, },
- { 72, 38, 50, },
+ { 98, 63, 60, },
+ { 98, 82, 80, },
+ { 94, 65, 103, },
+ { 49, 25, 24, },
+ { 72, 38, 50, },
};
static const vpx_prob default_ext_intra_probs[2] = {230, 230};
pre_fc->inter_compound_mode_probs[i],
counts->inter_compound_mode[i],
fc->inter_compound_mode_probs[i]);
- for (i = 0; i < BLOCK_SIZES; ++i) {
- if (is_interintra_allowed_bsize(i))
+ for (i = 0; i < BLOCK_SIZE_GROUPS; ++i) {
+ if (is_interintra_allowed_bsize_group(i))
fc->interintra_prob[i] = mode_mv_merge_probs(pre_fc->interintra_prob[i],
counts->interintra[i]);
}
counts->interintra_mode[i], fc->interintra_mode_prob[i]);
}
for (i = 0; i < BLOCK_SIZES; ++i) {
- if (is_interintra_allowed_bsize(i) && get_wedge_bits(i))
+ if (is_interintra_allowed_bsize(i) && is_interintra_wedge_used(i))
fc->wedge_interintra_prob[i] = mode_mv_merge_probs(
pre_fc->wedge_interintra_prob[i], counts->wedge_interintra[i]);
}
for (i = 0; i < BLOCK_SIZES; ++i) {
- if (get_wedge_bits(i))
+ if (is_interinter_wedge_used(i))
fc->wedge_interinter_prob[i] = mode_mv_merge_probs(
pre_fc->wedge_interinter_prob[i], counts->wedge_interinter[i]);
}
#if CONFIG_EXT_INTER
vpx_prob inter_compound_mode_probs[INTER_MODE_CONTEXTS]
[INTER_COMPOUND_MODES - 1];
- vpx_prob interintra_prob[BLOCK_SIZES];
+ vpx_prob interintra_prob[BLOCK_SIZE_GROUPS];
vpx_prob interintra_mode_prob[BLOCK_SIZE_GROUPS][INTERINTRA_MODES - 1];
vpx_prob wedge_interintra_prob[BLOCK_SIZES];
vpx_prob wedge_interinter_prob[BLOCK_SIZES];
unsigned int inter_mode[INTER_MODE_CONTEXTS][INTER_MODES];
#if CONFIG_EXT_INTER
unsigned int inter_compound_mode[INTER_MODE_CONTEXTS][INTER_COMPOUND_MODES];
- unsigned int interintra[BLOCK_SIZES][2];
+ unsigned int interintra[BLOCK_SIZE_GROUPS][2];
unsigned int interintra_mode[BLOCK_SIZE_GROUPS][INTERINTRA_MODES];
unsigned int wedge_interintra[BLOCK_SIZES][2];
unsigned int wedge_interinter[BLOCK_SIZES][2];
}
void vp10_build_inter_predictors_sbuv(MACROBLOCKD *xd, int mi_row, int mi_col,
- BLOCK_SIZE bsize) {
+ BLOCK_SIZE bsize) {
build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 1,
MAX_MB_PLANE - 1);
#if CONFIG_EXT_INTER
#if CONFIG_EXT_INTER
#if CONFIG_EXT_PARTITION
-static const int weights1d[MAX_SB_SIZE] = {
+static const int ii_weights1d[MAX_SB_SIZE] = {
128, 127, 125, 124, 123, 122, 120, 119,
118, 117, 116, 115, 113, 112, 111, 110,
109, 108, 107, 106, 105, 104, 103, 103,
62, 62, 62, 61, 61, 61, 61, 61,
61, 60, 60, 60, 60, 60, 60, 60,
};
-static int size_scales[BLOCK_SIZES] = {
+static int ii_size_scales[BLOCK_SIZES] = {
32, 16, 16, 16, 8, 8, 8, 4, 4, 4, 2, 2, 2, 1, 1, 1
};
#else
-static const int weights1d[MAX_SB_SIZE] = {
- 128, 125, 123, 120, 118, 116, 113, 111,
- 109, 107, 105, 103, 102, 100, 98, 97,
- 95, 94, 92, 91, 89, 88, 87, 86,
- 84, 83, 82, 81, 80, 79, 78, 77,
- 76, 75, 75, 74, 73, 72, 72, 71,
- 70, 69, 69, 68, 68, 67, 67, 66,
- 66, 65, 65, 64, 64, 63, 63, 62,
- 62, 62, 61, 61, 61, 60, 60, 60,
+static const int ii_weights1d[MAX_SB_SIZE] = {
+ 102, 100, 97, 95, 92, 90, 88, 86,
+ 84, 82, 80, 78, 76, 74, 73, 71,
+ 69, 68, 67, 65, 64, 62, 61, 60,
+ 59, 58, 57, 55, 54, 53, 52, 52,
+ 51, 50, 49, 48, 47, 47, 46, 45,
+ 45, 44, 43, 43, 42, 41, 41, 40,
+ 40, 39, 39, 38, 38, 38, 37, 37,
+ 36, 36, 36, 35, 35, 35, 34, 34,
};
-static int size_scales[BLOCK_SIZES] = {
+static int ii_size_scales[BLOCK_SIZES] = {
16, 8, 8, 8, 4, 4, 4, 2, 2, 2, 1, 1, 1
};
#endif // CONFIG_EXT_PARTITION
static const int scale_round = 127;
const int bw = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
const int bh = 4 * num_4x4_blocks_high_lookup[plane_bsize];
- const int size_scale = size_scales[plane_bsize];
+ const int size_scale = ii_size_scales[plane_bsize];
int i, j;
if (use_wedge_interintra) {
case II_V_PRED:
for (i = 0; i < bh; ++i) {
for (j = 0; j < bw; ++j) {
- int scale = weights1d[i * size_scale];
+ int scale = ii_weights1d[i * size_scale];
comppred[i * compstride + j] =
((scale_max - scale) * interpred[i * interstride + j] +
scale * intrapred[i * intrastride + j] + scale_round)
case II_H_PRED:
for (i = 0; i < bh; ++i) {
for (j = 0; j < bw; ++j) {
- int scale = weights1d[j * size_scale];
+ int scale = ii_weights1d[j * size_scale];
comppred[i * compstride + j] =
((scale_max - scale) * interpred[i * interstride + j] +
scale * intrapred[i * intrastride + j] + scale_round)
case II_D117_PRED:
for (i = 0; i < bh; ++i) {
for (j = 0; j < bw; ++j) {
- int scale = (weights1d[i * size_scale] * 3 +
- weights1d[j * size_scale]) >> 2;
+ int scale = (ii_weights1d[i * size_scale] * 3 +
+ ii_weights1d[j * size_scale]) >> 2;
comppred[i * compstride + j] =
((scale_max - scale) * interpred[i * interstride + j] +
scale * intrapred[i * intrastride + j] + scale_round)
case II_D153_PRED:
for (i = 0; i < bh; ++i) {
for (j = 0; j < bw; ++j) {
- int scale = (weights1d[j * size_scale] * 3 +
- weights1d[i * size_scale]) >> 2;
+ int scale = (ii_weights1d[j * size_scale] * 3 +
+ ii_weights1d[i * size_scale]) >> 2;
comppred[i * compstride + j] =
((scale_max - scale) * interpred[i * interstride + j] +
scale * intrapred[i * intrastride + j] + scale_round)
case II_D135_PRED:
for (i = 0; i < bh; ++i) {
for (j = 0; j < bw; ++j) {
- int scale = weights1d[(i < j ? i : j) * size_scale];
+ int scale = ii_weights1d[(i < j ? i : j) * size_scale];
comppred[i * compstride + j] =
((scale_max - scale) * interpred[i * interstride + j] +
scale * intrapred[i * intrastride + j] + scale_round)
case II_D45_PRED:
for (i = 0; i < bh; ++i) {
for (j = 0; j < bw; ++j) {
- int scale = (weights1d[i * size_scale] +
- weights1d[j * size_scale]) >> 1;
+ int scale = (ii_weights1d[i * size_scale] +
+ ii_weights1d[j * size_scale]) >> 1;
comppred[i * compstride + j] =
((scale_max - scale) * interpred[i * interstride + j] +
scale * intrapred[i * intrastride + j] + scale_round)
static const int scale_round = 127;
const int bw = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
const int bh = 4 * num_4x4_blocks_high_lookup[plane_bsize];
- const int size_scale = size_scales[plane_bsize];
+ const int size_scale = ii_size_scales[plane_bsize];
int i, j;
uint16_t *comppred = CONVERT_TO_SHORTPTR(comppred8);
uint16_t *intrapred = CONVERT_TO_SHORTPTR(intrapred8);
(void) bd;
- if (use_wedge_interintra && get_wedge_bits(bsize)) {
- const uint8_t *mask = vp10_get_soft_mask(wedge_index, bsize, bh, bw);
- for (i = 0; i < bh; ++i) {
- for (j = 0; j < bw; ++j) {
- int m = mask[i * MASK_MASTER_STRIDE + j];
- comppred[i * compstride + j] =
- (intrapred[i * intrastride + j] * m +
- interpred[i * interstride + j] * ((1 << WEDGE_WEIGHT_BITS) - m) +
- (1 << (WEDGE_WEIGHT_BITS - 1))) >> WEDGE_WEIGHT_BITS;
+ if (use_wedge_interintra) {
+ if (get_wedge_bits(bsize)) {
+ const uint8_t *mask = vp10_get_soft_mask(wedge_index, bsize, bh, bw);
+ for (i = 0; i < bh; ++i) {
+ for (j = 0; j < bw; ++j) {
+ int m = mask[i * MASK_MASTER_STRIDE + j];
+ comppred[i * compstride + j] =
+ (intrapred[i * intrastride + j] * m +
+ interpred[i * interstride + j] * ((1 << WEDGE_WEIGHT_BITS) - m) +
+ (1 << (WEDGE_WEIGHT_BITS - 1))) >> WEDGE_WEIGHT_BITS;
+ }
}
}
return;
case II_V_PRED:
for (i = 0; i < bh; ++i) {
for (j = 0; j < bw; ++j) {
- int scale = weights1d[i * size_scale];
+ int scale = ii_weights1d[i * size_scale];
comppred[i * compstride + j] =
((scale_max - scale) * interpred[i * interstride + j] +
scale * intrapred[i * intrastride + j] + scale_round)
case II_H_PRED:
for (i = 0; i < bh; ++i) {
for (j = 0; j < bw; ++j) {
- int scale = weights1d[j * size_scale];
+ int scale = ii_weights1d[j * size_scale];
comppred[i * compstride + j] =
((scale_max - scale) * interpred[i * interstride + j] +
scale * intrapred[i * intrastride + j] + scale_round)
case II_D117_PRED:
for (i = 0; i < bh; ++i) {
for (j = 0; j < bw; ++j) {
- int scale = (weights1d[i * size_scale] * 3 +
- weights1d[j * size_scale]) >> 2;
+ int scale = (ii_weights1d[i * size_scale] * 3 +
+ ii_weights1d[j * size_scale]) >> 2;
comppred[i * compstride + j] =
((scale_max - scale) * interpred[i * interstride + j] +
scale * intrapred[i * intrastride + j] + scale_round)
case II_D153_PRED:
for (i = 0; i < bh; ++i) {
for (j = 0; j < bw; ++j) {
- int scale = (weights1d[j * size_scale] * 3 +
- weights1d[i * size_scale]) >> 2;
+ int scale = (ii_weights1d[j * size_scale] * 3 +
+ ii_weights1d[i * size_scale]) >> 2;
comppred[i * compstride + j] =
((scale_max - scale) * interpred[i * interstride + j] +
scale * intrapred[i * intrastride + j] + scale_round)
case II_D135_PRED:
for (i = 0; i < bh; ++i) {
for (j = 0; j < bw; ++j) {
- int scale = weights1d[(i < j ? i : j) * size_scale];
+ int scale = ii_weights1d[(i < j ? i : j) * size_scale];
comppred[i * compstride + j] =
((scale_max - scale) * interpred[i * interstride + j] +
scale * intrapred[i * intrastride + j] + scale_round)
case II_D45_PRED:
for (i = 0; i < bh; ++i) {
for (j = 0; j < bw; ++j) {
- int scale = (weights1d[i * size_scale] +
- weights1d[j * size_scale]) >> 1;
+ int scale = (ii_weights1d[i * size_scale] +
+ ii_weights1d[j * size_scale]) >> 1;
comppred[i * compstride + j] =
((scale_max - scale) * interpred[i * interstride + j] +
scale * intrapred[i * intrastride + j] + scale_round)
0, 0, plane);
#if CONFIG_VP9_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- uint16_t *src2_16 = CONVERT_TO_SHORTPTR(src_2);
- uint16_t *dst2_16 = CONVERT_TO_SHORTPTR(dst_2);
- memcpy(src2_16 - ref_stride, dst2_16 - dst_stride,
- sizeof(*src2_16) * (4 << bhl));
+ uint16_t *src_216 = CONVERT_TO_SHORTPTR(src_2);
+ uint16_t *dst_216 = CONVERT_TO_SHORTPTR(dst_2);
+ memcpy(src_216 - ref_stride, dst_216 - dst_stride,
+ sizeof(*src_216) * (4 << bhl));
} else
#endif // CONFIG_VP9_HIGHBITDEPTH
{
BLOCK_SIZE bsize, int plane,
uint8_t *inter_pred, int inter_stride,
uint8_t *intra_pred, int intra_stride);
+void vp10_build_interintra_predictors_sbuv(MACROBLOCKD *xd,
+ uint8_t *upred,
+ uint8_t *vpred,
+ int ustride, int vstride,
+ BLOCK_SIZE bsize);
+void vp10_build_interintra_predictors_sby(MACROBLOCKD *xd,
+ uint8_t *ypred,
+ int ystride,
+ BLOCK_SIZE bsize);
// Encoder only
void vp10_build_inter_predictors_for_planes_single_buf(
cm->counts.inter_compound_mode[i][j] +=
counts->inter_compound_mode[i][j];
- for (i = 0; i < BLOCK_SIZES; i++)
+ for (i = 0; i < BLOCK_SIZE_GROUPS; i++)
for (j = 0; j < 2; j++)
cm->counts.interintra[i][j] += counts->interintra[i][j];
buf_ptr = ((uint8_t *)mc_buf_high) + border_offset;
}
#if CONFIG_EXT_INTER
- if (ref && get_wedge_bits(xd->mi[0]->mbmi.sb_type) &&
+ if (ref && is_interinter_wedge_used(xd->mi[0]->mbmi.sb_type) &&
xd->mi[0]->mbmi.use_wedge_interinter)
vp10_make_masked_inter_predictor(
buf_ptr, b_w, dst, dst_buf_stride,
x0, y0, b_w, b_h, frame_width, frame_height);
buf_ptr = mc_buf + border_offset;
#if CONFIG_EXT_INTER
- if (ref && get_wedge_bits(xd->mi[0]->mbmi.sb_type) &&
+ if (ref && is_interinter_wedge_used(xd->mi[0]->mbmi.sb_type) &&
xd->mi[0]->mbmi.use_wedge_interinter)
vp10_make_masked_inter_predictor(
buf_ptr, b_w, dst, dst_buf_stride,
}
}
#if CONFIG_EXT_INTER
- if (ref && get_wedge_bits(mi->mbmi.sb_type) &&
+ if (ref && is_interinter_wedge_used(mi->mbmi.sb_type) &&
mi->mbmi.use_wedge_interinter) {
vp10_make_masked_inter_predictor(
buf_ptr, buf_stride, dst, dst_buf->stride,
#if CONFIG_EXT_INTER
read_inter_compound_mode_probs(fc, &r);
if (cm->reference_mode != COMPOUND_REFERENCE) {
- for (i = 0; i < BLOCK_SIZES; i++) {
- if (is_interintra_allowed_bsize(i)) {
+ for (i = 0; i < BLOCK_SIZE_GROUPS; i++) {
+ if (is_interintra_allowed_bsize_group(i)) {
vp10_diff_update_prob(&r, &fc->interintra_prob[i]);
}
}
vp10_diff_update_prob(&r, &fc->interintra_mode_prob[i][j]);
}
for (i = 0; i < BLOCK_SIZES; i++) {
- if (is_interintra_allowed_bsize(i) && get_wedge_bits(i)) {
+ if (is_interintra_allowed_bsize(i) && is_interintra_wedge_used(i)) {
vp10_diff_update_prob(&r, &fc->wedge_interintra_prob[i]);
}
}
}
if (cm->reference_mode != SINGLE_REFERENCE) {
for (i = 0; i < BLOCK_SIZES; i++) {
- if (get_wedge_bits(i)) {
+ if (is_interinter_wedge_used(i)) {
vp10_diff_update_prob(&r, &fc->wedge_interinter_prob[i]);
}
}
!supertx_enabled &&
#endif
is_interintra_allowed(mbmi)) {
- const int interintra = vp10_read(r, cm->fc->interintra_prob[bsize]);
+ const int bsize_group = size_group_lookup[bsize];
+ const int interintra = vpx_read(r, cm->fc->interintra_prob[bsize_group]);
if (xd->counts)
- xd->counts->interintra[bsize][interintra]++;
+ xd->counts->interintra[bsize_group][interintra]++;
assert(mbmi->ref_frame[1] == NONE);
if (interintra) {
const INTERINTRA_MODE interintra_mode =
- read_interintra_mode(cm, xd, r, size_group_lookup[bsize]);
+ read_interintra_mode(cm, xd, r, bsize_group);
mbmi->ref_frame[1] = INTRA_FRAME;
mbmi->interintra_mode = interintra_mode;
mbmi->interintra_uv_mode = interintra_mode;
mbmi->angle_delta[1] = 0;
mbmi->intra_filter = INTRA_FILTER_LINEAR;
#endif // CONFIG_EXT_INTRA
- if (get_wedge_bits(bsize)) {
+ if (is_interintra_wedge_used(bsize)) {
mbmi->use_wedge_interintra =
vp10_read(r, cm->fc->wedge_interintra_prob[bsize]);
if (xd->counts)
#if CONFIG_OBMC
!(is_obmc_allowed(mbmi) && mbmi->obmc) &&
#endif // CONFIG_OBMC
- get_wedge_bits(bsize)) {
+ is_interinter_wedge_used(bsize)) {
mbmi->use_wedge_interinter =
vp10_read(r, cm->fc->wedge_interinter_prob[bsize]);
if (xd->counts)
#endif // CONFIG_SUPERTX
is_interintra_allowed(mbmi)) {
const int interintra = mbmi->ref_frame[1] == INTRA_FRAME;
- vp10_write(w, interintra, cm->fc->interintra_prob[bsize]);
+ const int bsize_group = size_group_lookup[bsize];
+ vpx_write(w, interintra, cm->fc->interintra_prob[bsize_group]);
if (interintra) {
write_interintra_mode(
w, mbmi->interintra_mode,
- cm->fc->interintra_mode_prob[size_group_lookup[bsize]]);
+ cm->fc->interintra_mode_prob[bsize_group]);
assert(mbmi->interintra_mode == mbmi->interintra_uv_mode);
- if (get_wedge_bits(bsize)) {
- vp10_write(w, mbmi->use_wedge_interintra,
+ if (is_interintra_wedge_used(bsize)) {
+ vpx_write(w, mbmi->use_wedge_interintra,
cm->fc->wedge_interintra_prob[bsize]);
if (mbmi->use_wedge_interintra) {
vp10_write_literal(w, mbmi->interintra_wedge_index,
#if CONFIG_OBMC
!(is_obmc_allowed(mbmi) && mbmi->obmc) &&
#endif // CONFIG_OBMC
- get_wedge_bits(bsize)) {
- vp10_write(w, mbmi->use_wedge_interinter,
+ is_interinter_wedge_used(bsize)) {
+ vpx_write(w, mbmi->use_wedge_interinter,
cm->fc->wedge_interinter_prob[bsize]);
if (mbmi->use_wedge_interinter)
vp10_write_literal(w, mbmi->interinter_wedge_index,
update_inter_compound_mode_probs(cm, &header_bc);
if (cm->reference_mode != COMPOUND_REFERENCE) {
- for (i = 0; i < BLOCK_SIZES; i++) {
- if (is_interintra_allowed_bsize(i)) {
+ for (i = 0; i < BLOCK_SIZE_GROUPS; i++) {
+ if (is_interintra_allowed_bsize_group(i)) {
vp10_cond_prob_diff_update(&header_bc,
&fc->interintra_prob[i],
cm->counts.interintra[i]);
INTERINTRA_MODES, &header_bc);
}
for (i = 0; i < BLOCK_SIZES; i++) {
- if (is_interintra_allowed_bsize(i) && get_wedge_bits(i))
+ if (is_interintra_allowed_bsize(i) && is_interintra_wedge_used(i))
vp10_cond_prob_diff_update(&header_bc,
&fc->wedge_interintra_prob[i],
cm->counts.wedge_interintra[i]);
}
if (cm->reference_mode != SINGLE_REFERENCE) {
for (i = 0; i < BLOCK_SIZES; i++)
- if (get_wedge_bits(i))
+ if (is_interinter_wedge_used(i))
vp10_cond_prob_diff_update(&header_bc,
&fc->wedge_interinter_prob[i],
cm->counts.wedge_interinter[i]);
!supertx_enabled &&
#endif
is_interintra_allowed(mbmi)) {
+ const int bsize_group = size_group_lookup[bsize];
if (mbmi->ref_frame[1] == INTRA_FRAME) {
- counts->interintra[bsize][1]++;
- counts->interintra_mode[size_group_lookup[bsize]]
- [mbmi->interintra_mode]++;
- if (get_wedge_bits(bsize))
+ counts->interintra[bsize_group][1]++;
+ counts->interintra_mode[bsize_group][mbmi->interintra_mode]++;
+ if (is_interintra_wedge_used(bsize))
counts->wedge_interintra[bsize][mbmi->use_wedge_interintra]++;
} else {
- counts->interintra[bsize][0]++;
+ counts->interintra[bsize_group][0]++;
}
}
if (cm->reference_mode != SINGLE_REFERENCE &&
#if CONFIG_OBMC
!(is_obmc_allowed(mbmi) && mbmi->obmc) &&
#endif // CONFIG_OBMC
- get_wedge_bits(bsize)) {
+ is_interinter_wedge_used(bsize)) {
counts->wedge_interinter[bsize][mbmi->use_wedge_interinter]++;
}
#endif // CONFIG_EXT_INTER
rs = cm->interp_filter == SWITCHABLE ? vp10_get_switchable_rate(cpi, xd) : 0;
#if CONFIG_EXT_INTER
- if (is_comp_pred && get_wedge_bits(bsize)) {
+ if (is_comp_pred && is_interinter_wedge_used(bsize)) {
int wedge_index, best_wedge_index = WEDGE_NONE, rs;
int rate_sum;
int64_t dist_sum;
mbmi->interintra_mode = (INTERINTRA_MODE)j;
mbmi->interintra_uv_mode = (INTERINTRA_MODE)j;
rmode = interintra_mode_cost[mbmi->interintra_mode];
- vp10_build_interintra_predictors(xd,
- tmp_buf,
- tmp_buf + MAX_SB_SQUARE,
- tmp_buf + 2 * MAX_SB_SQUARE,
- MAX_SB_SIZE,
- MAX_SB_SIZE,
- MAX_SB_SIZE,
- bsize);
+ vp10_build_intra_predictors_for_interintra(
+ xd, bsize, 0, intrapred, MAX_SB_SIZE);
+ vp10_combine_interintra(xd, bsize, 0, tmp_buf, MAX_SB_SIZE,
+ intrapred, MAX_SB_SIZE);
+ vp10_build_intra_predictors_for_interintra(
+ xd, bsize, 1, intrapred + MAX_SB_SQUARE, MAX_SB_SIZE);
+ vp10_build_intra_predictors_for_interintra(
+ xd, bsize, 2, intrapred + 2 * MAX_SB_SQUARE, MAX_SB_SIZE);
+ vp10_combine_interintra(xd, bsize, 1,
+ tmp_buf + MAX_SB_SQUARE, MAX_SB_SIZE,
+ intrapred + MAX_SB_SQUARE, MAX_SB_SIZE);
+ vp10_combine_interintra(xd, bsize, 2,
+ tmp_buf + 2 * MAX_SB_SQUARE, MAX_SB_SIZE,
+ intrapred + 2 * MAX_SB_SQUARE, MAX_SB_SIZE);
model_rd_for_sb(cpi, bsize, x, xd, &rate_sum, &dist_sum,
&skip_txfm_sb, &skip_sse_sb);
rd = RDCOST(x->rdmult, x->rddiv, rate_mv + rmode + rate_sum, dist_sum);
vp10_build_intra_predictors_for_interintra(
xd, bsize, 2, intrapred + 2 * MAX_SB_SQUARE, MAX_SB_SIZE);
- wedge_bits = get_wedge_bits(bsize);
rmode = interintra_mode_cost[mbmi->interintra_mode];
- if (wedge_bits) {
+ if (is_interintra_wedge_used(bsize)) {
+ wedge_bits = get_wedge_bits(bsize);
vp10_combine_interintra(xd, bsize, 0, tmp_buf, MAX_SB_SIZE,
intrapred, MAX_SB_SIZE);
vp10_combine_interintra(xd, bsize, 1,
pred_exists = 0;
tmp_rd = best_interintra_rd;
*compmode_interintra_cost =
- vp10_cost_bit(cm->fc->interintra_prob[bsize], 1);
+ vp10_cost_bit(cm->fc->interintra_prob[size_group_lookup[bsize]], 1);
*compmode_interintra_cost += interintra_mode_cost[mbmi->interintra_mode];
- if (get_wedge_bits(bsize)) {
+ if (is_interintra_wedge_used(bsize)) {
*compmode_interintra_cost += vp10_cost_bit(
cm->fc->wedge_interintra_prob[bsize], mbmi->use_wedge_interintra);
if (mbmi->use_wedge_interintra) {
}
} else if (is_interintra_allowed(mbmi)) {
*compmode_interintra_cost =
- vp10_cost_bit(cm->fc->interintra_prob[bsize], 0);
+ vp10_cost_bit(cm->fc->interintra_prob[size_group_lookup[bsize]], 0);
}
#if CONFIG_EXT_INTERP