return master;
}
-static const uint8_t *get_wedge_mask(int wedge_index,
- int neg,
- BLOCK_SIZE bsize) {
- return wedge_params_lookup[bsize].masks[neg][wedge_index];
-}
-
const uint8_t *vp10_get_soft_mask(int wedge_index,
int wedge_sign,
BLOCK_SIZE sb_type,
int offset_x,
int offset_y) {
- const int bw = 4 * num_4x4_blocks_wide_lookup[sb_type];
const uint8_t *mask =
- get_wedge_mask(wedge_index, wedge_sign, sb_type);
+ get_wedge_mask_inplace(wedge_index, wedge_sign, sb_type);
if (mask)
- mask -= (offset_x + offset_y * bw);
+ mask -= (offset_x + offset_y * MASK_MASTER_STRIDE);
return mask;
}
vpx_blend_mask6(dst, dst_stride,
src0, src0_stride,
src1, src1_stride,
- mask, 4 * num_4x4_blocks_wide_lookup[sb_type],
+ mask, MASK_MASTER_STRIDE,
h, w, subh, subw);
}
vpx_highbd_blend_mask6(dst_8, dst_stride,
src0_8, src0_stride,
src1_8, src1_stride,
- mask, 4 * num_4x4_blocks_wide_lookup[sb_type],
+ mask, MASK_MASTER_STRIDE,
h, w, subh, subw, bd);
}
#endif // CONFIG_VP9_HIGHBITDEPTH
// pass in subsampling factors directly.
const int subh = (2 << b_height_log2_lookup[sb_type]) == h;
const int subw = (2 << b_width_log2_lookup[sb_type]) == w;
- const uint8_t *mask = vp10_get_soft_mask(wedge_index, wedge_sign,
- sb_type, 0, 0);
+ const uint8_t *mask = vp10_get_contiguous_soft_mask(wedge_index, wedge_sign,
+ sb_type);
vpx_blend_mask6(dst, dst_stride,
src0, src0_stride,
src1, src1_stride,
// pass in subsampling factors directly.
const int subh = (2 << b_height_log2_lookup[sb_type]) == h;
const int subw = (2 << b_width_log2_lookup[sb_type]) == w;
- const uint8_t *mask = vp10_get_soft_mask(wedge_index, wedge_sign,
- sb_type, 0, 0);
+ const uint8_t *mask = vp10_get_contiguous_soft_mask(wedge_index, wedge_sign,
+ sb_type);
vpx_highbd_blend_mask6(dst_8, dst_stride,
src0_8, src0_stride,
src1_8, src1_stride,
if (use_wedge_interintra) {
if (is_interintra_wedge_used(bsize)) {
- const uint8_t *mask = vp10_get_soft_mask(wedge_index, wedge_sign,
- bsize, 0, 0);
+ const uint8_t *mask = vp10_get_contiguous_soft_mask(wedge_index,
+ wedge_sign,
+ bsize);
const int subw = 2 * num_4x4_blocks_wide_lookup[bsize] == bw;
const int subh = 2 * num_4x4_blocks_high_lookup[bsize] == bh;
vpx_blend_mask6(comppred, compstride,
if (use_wedge_interintra) {
if (is_interintra_wedge_used(bsize)) {
- const uint8_t *mask = vp10_get_soft_mask(wedge_index, wedge_sign,
- bsize, 0, 0);
+ const uint8_t *mask = vp10_get_contiguous_soft_mask(wedge_index,
+ wedge_sign,
+ bsize);
const int subh = 2 * num_4x4_blocks_high_lookup[bsize] == bh;
const int subw = 2 * num_4x4_blocks_wide_lookup[bsize] == bw;
vpx_highbd_blend_mask6(comppred8, compstride,
BLOCK_SIZE sb_type = mbmi->sb_type;
const uint8_t *mask;
const int mask_stride = 4 * num_4x4_blocks_wide_lookup[bsize];
- mask = vp10_get_soft_mask(wedge_index, wedge_sign, sb_type, 0, 0);
+ mask = vp10_get_contiguous_soft_mask(wedge_index, wedge_sign, sb_type);
if (which == 0 || which == 2)
do_masked_motion_search(cpi, x, mask, mask_stride, bsize,
if (which == 1 || which == 2) {
// get the negative mask
- mask = vp10_get_soft_mask(wedge_index, !wedge_sign, sb_type, 0, 0);
+ mask = vp10_get_contiguous_soft_mask(wedge_index, !wedge_sign, sb_type);
do_masked_motion_search(cpi, x, mask, mask_stride, bsize,
mi_row, mi_col, &tmp_mv[1], &rate_mv[1],
1, mv_idx[1]);
// Refine motion vector.
if (have_newmv_in_inter_mode(this_mode) && best_wedge_index > -1) {
// get negative of mask
- const uint8_t* mask = vp10_get_soft_mask(
- best_wedge_index, 1, bsize, 0, 0);
+ const uint8_t* mask = vp10_get_contiguous_soft_mask(
+ best_wedge_index, 1, bsize);
mbmi->interintra_wedge_index = best_wedge_index;
mbmi->interintra_wedge_sign = 0;
do_masked_motion_search(cpi, x, mask, bw, bsize,