#if CONFIG_EXT_INTER
INTERINTRA_MODE interintra_mode;
- INTERINTRA_MODE interintra_uv_mode;
// TODO(debargha): Consolidate these flags
int use_wedge_interintra;
int interintra_wedge_index;
#endif // CONFIG_EXT_PARTITION
};
-static const uint8_t *get_wedge_mask_inplace(const int *a,
+static const int *get_wedge_params(int wedge_index,
+ BLOCK_SIZE sb_type) {
+ const int *a = NULL;
+ if (wedge_index != WEDGE_NONE) {
+ return get_wedge_params_lookup[sb_type] + WEDGE_PARMS * wedge_index;
+ }
+ return a;
+}
+
+static const uint8_t *get_wedge_mask_inplace(int wedge_index,
int neg,
- int h, int w) {
+ BLOCK_SIZE sb_type) {
const uint8_t *master;
- const int woff = (a[3] * w) >> 2;
- const int hoff = (a[4] * h) >> 2;
+ const int bh = 4 << b_height_log2_lookup[sb_type];
+ const int bw = 4 << b_width_log2_lookup[sb_type];
+ const int *a = get_wedge_params(wedge_index, sb_type);
+ int woff, hoff;
if (!a) return NULL;
+ woff = (a[3] * bw) >> 2;
+ hoff = (a[4] * bh) >> 2;
master = (a[0] ?
wedge_mask_obl[neg][a[1]][a[2]] :
wedge_mask_str[neg][a[1]]) +
return master;
}
-static const int *get_wedge_params(int wedge_index,
- BLOCK_SIZE sb_type) {
- const int *a = NULL;
- if (wedge_index != WEDGE_NONE) {
- return get_wedge_params_lookup[sb_type] + WEDGE_PARMS * wedge_index;
- }
- return a;
-}
-
const uint8_t *vp10_get_soft_mask(int wedge_index,
int wedge_sign,
BLOCK_SIZE sb_type,
- int h, int w) {
- const int *a = get_wedge_params(wedge_index, sb_type);
- return get_wedge_mask_inplace(a, wedge_sign, h, w);
+ int wedge_offset_x,
+ int wedge_offset_y) {
+ const uint8_t *mask =
+ get_wedge_mask_inplace(wedge_index, wedge_sign, sb_type);
+ if (mask)
+ mask -= (wedge_offset_x + wedge_offset_y * MASK_MASTER_STRIDE);
+ return mask;
}
-#if CONFIG_SUPERTX
-const uint8_t *get_soft_mask_extend(int wedge_index,
- int wedge_sign,
- int plane,
- BLOCK_SIZE sb_type,
- int wedge_offset_y,
- int wedge_offset_x) {
- int subh = (plane ? 2 : 4) << b_height_log2_lookup[sb_type];
- int subw = (plane ? 2 : 4) << b_width_log2_lookup[sb_type];
- const int *a = get_wedge_params(wedge_index, sb_type);
- if (a) {
- const uint8_t *mask = get_wedge_mask_inplace(a, wedge_sign, subh, subw);
- mask -= (wedge_offset_x + wedge_offset_y * MASK_MASTER_STRIDE);
- return mask;
+static void build_masked_compound(uint8_t *dst, int dst_stride,
+ uint8_t *dst1, int dst1_stride,
+ uint8_t *dst2, int dst2_stride,
+ const uint8_t *mask,
+ int h, int w, int subh, int subw) {
+ int i, j;
+ if (subw == 0 && subh == 0) {
+ for (i = 0; i < h; ++i)
+ for (j = 0; j < w; ++j) {
+ int m = mask[i * MASK_MASTER_STRIDE + j];
+ dst[i * dst_stride + j] = (dst1[i * dst1_stride + j] * m +
+ dst2[i * dst2_stride + j] *
+ ((1 << WEDGE_WEIGHT_BITS) - m) +
+ (1 << (WEDGE_WEIGHT_BITS - 1))) >>
+ WEDGE_WEIGHT_BITS;
+ }
+ } else if (subw == 1 && subh == 1) {
+ for (i = 0; i < h; ++i)
+ for (j = 0; j < w; ++j) {
+ int m = (mask[(2 * i) * MASK_MASTER_STRIDE + (2 * j)] +
+ mask[(2 * i + 1) * MASK_MASTER_STRIDE + (2 * j)] +
+ mask[(2 * i) * MASK_MASTER_STRIDE + (2 * j + 1)] +
+ mask[(2 * i + 1) * MASK_MASTER_STRIDE + (2 * j + 1)] + 2) >> 2;
+ dst[i * dst_stride + j] = (dst1[i * dst1_stride + j] * m +
+ dst2[i * dst2_stride + j] *
+ ((1 << WEDGE_WEIGHT_BITS) - m) +
+ (1 << (WEDGE_WEIGHT_BITS - 1))) >>
+ WEDGE_WEIGHT_BITS;
+ }
+ } else if (subw == 1 && subh == 0) {
+ for (i = 0; i < h; ++i)
+ for (j = 0; j < w; ++j) {
+ int m = (mask[i * MASK_MASTER_STRIDE + (2 * j)] +
+ mask[i * MASK_MASTER_STRIDE + (2 * j + 1)] + 1) >> 1;
+ dst[i * dst_stride + j] = (dst1[i * dst1_stride + j] * m +
+ dst2[i * dst2_stride + j] *
+ ((1 << WEDGE_WEIGHT_BITS) - m) +
+ (1 << (WEDGE_WEIGHT_BITS - 1))) >>
+ WEDGE_WEIGHT_BITS;
+ }
} else {
- return NULL;
+ for (i = 0; i < h; ++i)
+ for (j = 0; j < w; ++j) {
+ int m = (mask[(2 * i) * MASK_MASTER_STRIDE + j] +
+ mask[(2 * i + 1) * MASK_MASTER_STRIDE + j] + 1) >> 1;
+ dst[i * dst_stride + j] = (dst1[i * dst1_stride + j] * m +
+ dst2[i * dst2_stride + j] *
+ ((1 << WEDGE_WEIGHT_BITS) - m) +
+ (1 << (WEDGE_WEIGHT_BITS - 1))) >>
+ WEDGE_WEIGHT_BITS;
+ }
}
}
-static void build_masked_compound_extend(uint8_t *dst, int dst_stride,
- uint8_t *dst2, int dst2_stride,
- int plane,
- int wedge_index,
- int wedge_sign,
- BLOCK_SIZE sb_type,
- int wedge_offset_y, int wedge_offset_x,
- int h, int w) {
+#if CONFIG_VP9_HIGHBITDEPTH
+static void build_masked_compound_highbd(uint8_t *dst_8, int dst_stride,
+ uint8_t *dst1_8, int dst1_stride,
+ uint8_t *dst2_8, int dst2_stride,
+ const uint8_t *mask,
+ int h, int w, int subh, int subw) {
int i, j;
- const uint8_t *mask = get_soft_mask_extend(
- wedge_index, wedge_sign, plane, sb_type, wedge_offset_y, wedge_offset_x);
- for (i = 0; i < h; ++i)
- for (j = 0; j < w; ++j) {
- int m = mask[i * MASK_MASTER_STRIDE + j];
- dst[i * dst_stride + j] = (dst[i * dst_stride + j] * m +
- dst2[i * dst2_stride + j] *
- ((1 << WEDGE_WEIGHT_BITS) - m) +
- (1 << (WEDGE_WEIGHT_BITS - 1))) >>
- WEDGE_WEIGHT_BITS;
- }
+ uint16_t *dst = CONVERT_TO_SHORTPTR(dst_8);
+ uint16_t *dst1 = CONVERT_TO_SHORTPTR(dst1_8);
+ uint16_t *dst2 = CONVERT_TO_SHORTPTR(dst2_8);
+ if (subw == 0 && subh == 0) {
+ for (i = 0; i < h; ++i)
+ for (j = 0; j < w; ++j) {
+ int m = mask[i * MASK_MASTER_STRIDE + j];
+ dst[i * dst_stride + j] = (dst1[i * dst1_stride + j] * m +
+ dst2[i * dst2_stride + j] *
+ ((1 << WEDGE_WEIGHT_BITS) - m) +
+ (1 << (WEDGE_WEIGHT_BITS - 1))) >>
+ WEDGE_WEIGHT_BITS;
+ }
+ } else if (subw == 1 && subh == 1) {
+ for (i = 0; i < h; ++i)
+ for (j = 0; j < w; ++j) {
+ int m = (mask[(2 * i) * MASK_MASTER_STRIDE + (2 * j)] +
+ mask[(2 * i + 1) * MASK_MASTER_STRIDE + (2 * j)] +
+ mask[(2 * i) * MASK_MASTER_STRIDE + (2 * j + 1)] +
+ mask[(2 * i + 1) * MASK_MASTER_STRIDE + (2 * j + 1)] + 2) >> 2;
+ dst[i * dst_stride + j] = (dst1[i * dst1_stride + j] * m +
+ dst2[i * dst2_stride + j] *
+ ((1 << WEDGE_WEIGHT_BITS) - m) +
+ (1 << (WEDGE_WEIGHT_BITS - 1))) >>
+ WEDGE_WEIGHT_BITS;
+ }
+ } else if (subw == 1 && subh == 0) {
+ for (i = 0; i < h; ++i)
+ for (j = 0; j < w; ++j) {
+ int m = (mask[i * MASK_MASTER_STRIDE + (2 * j)] +
+ mask[i * MASK_MASTER_STRIDE + (2 * j + 1)] + 1) >> 1;
+ dst[i * dst_stride + j] = (dst1[i * dst1_stride + j] * m +
+ dst2[i * dst2_stride + j] *
+ ((1 << WEDGE_WEIGHT_BITS) - m) +
+ (1 << (WEDGE_WEIGHT_BITS - 1))) >>
+ WEDGE_WEIGHT_BITS;
+ }
+ } else {
+ for (i = 0; i < h; ++i)
+ for (j = 0; j < w; ++j) {
+ int m = (mask[(2 * i) * MASK_MASTER_STRIDE + j] +
+ mask[(2 * i + 1) * MASK_MASTER_STRIDE + j] + 1) >> 1;
+ dst[i * dst_stride + j] = (dst1[i * dst1_stride + j] * m +
+ dst2[i * dst2_stride + j] *
+ ((1 << WEDGE_WEIGHT_BITS) - m) +
+ (1 << (WEDGE_WEIGHT_BITS - 1))) >>
+ WEDGE_WEIGHT_BITS;
+ }
+ }
+}
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+#if CONFIG_SUPERTX
+static void build_masked_compound_wedge_extend(
+ uint8_t *dst, int dst_stride,
+ uint8_t *dst2, int dst2_stride,
+ int wedge_index,
+ int wedge_sign,
+ BLOCK_SIZE sb_type,
+ int wedge_offset_x, int wedge_offset_y,
+ int h, int w) {
+ const int subh = (2 << b_height_log2_lookup[sb_type]) == h;
+ const int subw = (2 << b_width_log2_lookup[sb_type]) == w;
+ const uint8_t *mask = vp10_get_soft_mask(
+ wedge_index, wedge_sign, sb_type, wedge_offset_x, wedge_offset_y);
+ build_masked_compound(dst, dst_stride,
+ dst, dst_stride, dst2, dst2_stride, mask,
+ h, w, subh, subw);
}
#if CONFIG_VP9_HIGHBITDEPTH
-static void build_masked_compound_extend_highbd(
+static void build_masked_compound_wedge_extend_highbd(
uint8_t *dst_8, int dst_stride,
- uint8_t *dst2_8, int dst2_stride, int plane,
- int wedge_index, int wedge_sign, BLOCK_SIZE sb_type,
- int wedge_offset_y, int wedge_offset_x,
+ uint8_t *dst2_8, int dst2_stride,
+ int wedge_index, int wedge_sign,
+ BLOCK_SIZE sb_type,
+ int wedge_offset_x, int wedge_offset_y,
int h, int w) {
- int i, j;
- const uint8_t *mask = get_soft_mask_extend(
- wedge_index, wedge_sign, plane, sb_type, wedge_offset_y, wedge_offset_x);
- uint16_t *dst = CONVERT_TO_SHORTPTR(dst_8);
- uint16_t *dst2 = CONVERT_TO_SHORTPTR(dst2_8);
- for (i = 0; i < h; ++i)
- for (j = 0; j < w; ++j) {
- int m = mask[i * MASK_MASTER_STRIDE + j];
- dst[i * dst_stride + j] = (dst[i * dst_stride + j] * m +
- dst2[i * dst2_stride + j] *
- ((1 << WEDGE_WEIGHT_BITS) - m) +
- (1 << (WEDGE_WEIGHT_BITS - 1))) >>
- WEDGE_WEIGHT_BITS;
- }
+ const int subh = (2 << b_height_log2_lookup[sb_type]) == h;
+ const int subw = (2 << b_width_log2_lookup[sb_type]) == w;
+ const uint8_t *mask = vp10_get_soft_mask(
+ wedge_index, wedge_sign, sb_type, wedge_offset_x, wedge_offset_y);
+ build_masked_compound_highbd(dst_8, dst_stride,
+ dst_8, dst_stride, dst2_8, dst2_stride, mask,
+ h, w, subh, subw);
}
#endif // CONFIG_VP9_HIGHBITDEPTH
#else // CONFIG_SUPERTX
-static void build_masked_compound(uint8_t *dst, int dst_stride,
- uint8_t *dst2, int dst2_stride,
- int wedge_index, int wedge_sign,
- BLOCK_SIZE sb_type,
- int h, int w) {
- int i, j;
+static void build_masked_compound_wedge(uint8_t *dst, int dst_stride,
+ uint8_t *dst2, int dst2_stride,
+ int wedge_index, int wedge_sign,
+ BLOCK_SIZE sb_type,
+ int h, int w) {
+ // Derive subsampling from h and w passed in. May be refactored to
+ // pass in subsampling factors directly.
+ const int subh = (2 << b_height_log2_lookup[sb_type]) == h;
+ const int subw = (2 << b_width_log2_lookup[sb_type]) == w;
const uint8_t *mask = vp10_get_soft_mask(wedge_index, wedge_sign,
- sb_type, h, w);
- for (i = 0; i < h; ++i)
- for (j = 0; j < w; ++j) {
- int m = mask[i * MASK_MASTER_STRIDE + j];
- dst[i * dst_stride + j] = (dst[i * dst_stride + j] * m +
- dst2[i * dst2_stride + j] *
- ((1 << WEDGE_WEIGHT_BITS) - m) +
- (1 << (WEDGE_WEIGHT_BITS - 1))) >>
- WEDGE_WEIGHT_BITS;
- }
+ sb_type, 0, 0);
+ build_masked_compound(dst, dst_stride,
+ dst, dst_stride, dst2, dst2_stride, mask,
+ h, w, subh, subw);
}
#if CONFIG_VP9_HIGHBITDEPTH
-static void build_masked_compound_highbd(uint8_t *dst_8, int dst_stride,
- uint8_t *dst2_8, int dst2_stride,
- int wedge_index, int wedge_sign,
- BLOCK_SIZE sb_type,
- int h, int w) {
- int i, j;
+static void build_masked_compound_wedge_highbd(uint8_t *dst_8, int dst_stride,
+ uint8_t *dst2_8, int dst2_stride,
+ int wedge_index, int wedge_sign,
+ BLOCK_SIZE sb_type,
+ int h, int w) {
+ // Derive subsampling from h and w passed in. May be refactored to
+ // pass in subsampling factors directly.
+ const int subh = (2 << b_height_log2_lookup[sb_type]) == h;
+ const int subw = (2 << b_width_log2_lookup[sb_type]) == w;
const uint8_t *mask = vp10_get_soft_mask(wedge_index, wedge_sign,
- sb_type, h, w);
- uint16_t *dst = CONVERT_TO_SHORTPTR(dst_8);
- uint16_t *dst2 = CONVERT_TO_SHORTPTR(dst2_8);
- for (i = 0; i < h; ++i)
- for (j = 0; j < w; ++j) {
- int m = mask[i * MASK_MASTER_STRIDE + j];
- dst[i * dst_stride + j] = (dst[i * dst_stride + j] * m +
- dst2[i * dst2_stride + j] *
- ((1 << WEDGE_WEIGHT_BITS) - m) +
- (1 << (WEDGE_WEIGHT_BITS - 1))) >>
- WEDGE_WEIGHT_BITS;
- }
+ sb_type, 0, 0);
+ build_masked_compound_highbd(dst_8, dst_stride,
+ dst_8, dst_stride, dst2_8, dst2_stride, mask,
+ h, w, subh, subw);
}
#endif // CONFIG_VP9_HIGHBITDEPTH
#endif // CONFIG_SUPERTX
const INTERP_FILTER interp_filter,
int xs, int ys,
#if CONFIG_SUPERTX
- int plane, int wedge_offset_x, int wedge_offset_y,
+ int wedge_offset_x, int wedge_offset_y,
#endif // CONFIG_SUPERTX
const MACROBLOCKD *xd) {
const MODE_INFO *mi = xd->mi[0];
interp_filter, xs, ys, xd);
#if CONFIG_SUPERTX
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
- build_masked_compound_extend_highbd(
- dst, dst_stride, tmp_dst, MAX_SB_SIZE, plane,
+ build_masked_compound_wedge_extend_highbd(
+ dst, dst_stride, tmp_dst, MAX_SB_SIZE,
mi->mbmi.interinter_wedge_index,
mi->mbmi.interinter_wedge_sign,
- mi->mbmi.sb_type,
- wedge_offset_y, wedge_offset_x, h, w);
+ mi->mbmi.sb_type, plane,
+ wedge_offset_x, wedge_offset_y, h, w);
else
- build_masked_compound_extend(
- dst, dst_stride, tmp_dst, MAX_SB_SIZE, plane,
+ build_masked_compound_wedge_extend(
+ dst, dst_stride, tmp_dst, MAX_SB_SIZE,
mi->mbmi.interinter_wedge_index,
mi->mbmi.interinter_wedge_sign,
- mi->mbmi.sb_type,
- wedge_offset_y, wedge_offset_x, h, w);
+ mi->mbmi.sb_type, plane,
+ wedge_offset_x, wedge_offset_y, h, w);
#else
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
- build_masked_compound_highbd(
+ build_masked_compound_wedge_highbd(
dst, dst_stride, tmp_dst, MAX_SB_SIZE,
mi->mbmi.interinter_wedge_index,
mi->mbmi.interinter_wedge_sign,
mi->mbmi.sb_type, h, w);
else
- build_masked_compound(
+ build_masked_compound_wedge(
dst, dst_stride, tmp_dst, MAX_SB_SIZE,
mi->mbmi.interinter_wedge_index,
mi->mbmi.interinter_wedge_sign,
subpel_x, subpel_y, sf, w, h, 0,
interp_filter, xs, ys, xd);
#if CONFIG_SUPERTX
- build_masked_compound_extend(
- dst, dst_stride, tmp_dst, MAX_SB_SIZE, plane,
+ build_masked_compound_wedge_extend(
+ dst, dst_stride, tmp_dst, MAX_SB_SIZE,
mi->mbmi.interinter_wedge_index,
mi->mbmi.interinter_wedge_sign,
mi->mbmi.sb_type,
- wedge_offset_y, wedge_offset_x, h, w);
+ wedge_offset_x, wedge_offset_y, h, w);
#else
- build_masked_compound(
+ build_masked_compound_wedge(
dst, dst_stride, tmp_dst, MAX_SB_SIZE,
mi->mbmi.interinter_wedge_index,
mi->mbmi.interinter_wedge_sign,
subpel_x, subpel_y, sf, w, h,
interp_filter, xs, ys,
#if CONFIG_SUPERTX
- plane, wedge_offset_x, wedge_offset_y,
+ wedge_offset_x, wedge_offset_y,
#endif // CONFIG_SUPERTX
xd);
else
void vp10_build_masked_inter_predictor_complex(
MACROBLOCKD *xd,
uint8_t *dst, int dst_stride, uint8_t *dst2, int dst2_stride,
- const struct macroblockd_plane *pd, int mi_row, int mi_col,
+ int mi_row, int mi_col,
int mi_row_ori, int mi_col_ori, BLOCK_SIZE bsize, BLOCK_SIZE top_bsize,
PARTITION_TYPE partition, int plane) {
int i, j;
+ const struct macroblockd_plane *pd = &xd->plane[plane];
uint8_t mask[MAX_TX_SIZE];
int top_w = 4 << b_width_log2_lookup[top_bsize];
int top_h = 4 << b_height_log2_lookup[top_bsize];
block, bw, bh,
0, 0, bw, bh,
#if CONFIG_EXT_INTER
- wedge_offset_x >> (xd->plane[plane].subsampling_x),
- wedge_offset_y >> (xd->plane[plane].subsampling_y),
+ wedge_offset_x,
+ wedge_offset_y,
#endif // CONFIG_SUPERTX
mi_x, mi_y);
}
#endif // CONFIG_OBMC
y * 2 + x, bw, bh, 4 * x, 4 * y, 4, 4,
#if CONFIG_EXT_INTER
- wedge_offset_x >> (xd->plane[plane].subsampling_x),
- wedge_offset_y >> (xd->plane[plane].subsampling_y),
+ wedge_offset_x,
+ wedge_offset_y,
#endif // CONFIG_EXT_INTER
mi_x, mi_y);
} else {
#endif // CONFIG_OBMC
0, bw, bh, 0, 0, bw, bh,
#if CONFIG_EXT_INTER
- wedge_offset_x >> (xd->plane[plane].subsampling_x),
- wedge_offset_y >> (xd->plane[plane].subsampling_y),
+ wedge_offset_x,
+ wedge_offset_y,
#endif // CONFIG_EXT_INTER
mi_x, mi_y);
}
#if CONFIG_EXT_INTER
#if CONFIG_EXT_PARTITION
static const int ii_weights1d[MAX_SB_SIZE] = {
- 128, 127, 125, 124, 123, 122, 120, 119,
- 118, 117, 116, 115, 113, 112, 111, 110,
- 109, 108, 107, 106, 105, 104, 103, 103,
- 102, 101, 100, 99, 98, 97, 97, 96,
- 95, 94, 94, 93, 92, 91, 91, 90,
- 89, 89, 88, 87, 87, 86, 86, 85,
- 84, 84, 83, 83, 82, 82, 81, 81,
- 80, 80, 79, 79, 78, 78, 77, 77,
- 76, 76, 75, 75, 75, 74, 74, 73,
- 73, 73, 72, 72, 72, 71, 71, 70,
- 70, 70, 69, 69, 69, 69, 68, 68,
- 68, 67, 67, 67, 67, 66, 66, 66,
- 66, 65, 65, 65, 65, 64, 64, 64,
- 64, 63, 63, 63, 63, 63, 62, 62,
- 62, 62, 62, 61, 61, 61, 61, 61,
- 61, 60, 60, 60, 60, 60, 60, 60,
+ 102, 100, 97, 95, 92, 90, 88, 86,
+ 84, 82, 80, 78, 76, 74, 73, 71,
+ 69, 68, 67, 65, 64, 62, 61, 60,
+ 59, 58, 57, 55, 54, 53, 52, 52,
+ 51, 50, 49, 48, 47, 47, 46, 45,
+ 45, 44, 43, 43, 42, 41, 41, 40,
+ 40, 39, 39, 38, 38, 38, 37, 37,
+ 36, 36, 36, 35, 35, 35, 34, 34,
+ 34, 33, 33, 33, 33, 32, 32, 32,
+ 32, 32, 31, 31, 31, 31, 31, 30,
+ 30, 30, 30, 30, 30, 30, 29, 29,
+ 29, 29, 29, 29, 29, 29, 28, 28,
+ 28, 28, 28, 28, 28, 28, 28, 28,
+ 28, 28, 27, 27, 27, 27, 27, 27,
+ 27, 27, 27, 27, 27, 27, 27, 27,
+ 27, 27, 27, 27, 27, 27, 27, 27,
};
static int ii_size_scales[BLOCK_SIZES] = {
32, 16, 16, 16, 8, 8, 8, 4, 4, 4, 2, 2, 2, 1, 1, 1
int i, j;
if (use_wedge_interintra) {
- if (is_interinter_wedge_used(bsize)) {
+ if (is_interintra_wedge_used(bsize)) {
const uint8_t *mask = vp10_get_soft_mask(wedge_index, wedge_sign,
- bsize, bh, bw);
- for (i = 0; i < bh; ++i) {
- for (j = 0; j < bw; ++j) {
- int m = mask[i * MASK_MASTER_STRIDE + j];
- comppred[i * compstride + j] =
- (intrapred[i * intrastride + j] * m +
- interpred[i * interstride + j] * ((1 << WEDGE_WEIGHT_BITS) - m) +
- (1 << (WEDGE_WEIGHT_BITS - 1))) >> WEDGE_WEIGHT_BITS;
- }
- }
+ bsize, 0, 0);
+ const int subw = 2 * num_4x4_blocks_wide_lookup[bsize] == bw;
+ const int subh = 2 * num_4x4_blocks_high_lookup[bsize] == bh;
+ build_masked_compound(comppred, compstride,
+ intrapred, intrastride,
+ interpred, interstride, mask,
+ bh, bw, subh, subw);
}
return;
}
(void) bd;
if (use_wedge_interintra) {
- if (is_interinter_wedge_used(bsize)) {
+ if (is_interintra_wedge_used(bsize)) {
const uint8_t *mask = vp10_get_soft_mask(wedge_index, wedge_sign,
- bsize, bh, bw);
- for (i = 0; i < bh; ++i) {
- for (j = 0; j < bw; ++j) {
- int m = mask[i * MASK_MASTER_STRIDE + j];
- comppred[i * compstride + j] =
- (intrapred[i * intrastride + j] * m +
- interpred[i * interstride + j] * ((1 << WEDGE_WEIGHT_BITS) - m) +
- (1 << (WEDGE_WEIGHT_BITS - 1))) >> WEDGE_WEIGHT_BITS;
- }
- }
+ bsize, 0, 0);
+ const int subh = 2 * num_4x4_blocks_high_lookup[bsize] == bh;
+ const int subw = 2 * num_4x4_blocks_wide_lookup[bsize] == bw;
+ build_masked_compound(comppred8, compstride,
+ intrapred8, intrastride,
+ interpred8, interstride, mask,
+ bh, bw, subh, subw);
}
return;
}
#if CONFIG_SUPERTX
#if CONFIG_VP9_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- build_masked_compound_extend_highbd(
- dst, dst_buf->stride, tmp_dst, MAX_SB_SIZE, plane,
+ build_masked_compound_wedge_extend_highbd(
+ dst, dst_buf->stride, tmp_dst, MAX_SB_SIZE,
mi->mbmi.interinter_wedge_index,
mi->mbmi.interinter_wedge_sign,
mi->mbmi.sb_type,
- wedge_offset_y, wedge_offset_x, h, w);
+ wedge_offset_x, wedge_offset_y, h, w);
} else {
- build_masked_compound_extend(
- dst, dst_buf->stride, tmp_dst, MAX_SB_SIZE, plane,
+ build_masked_compound_wedge_extend(
+ dst, dst_buf->stride, tmp_dst, MAX_SB_SIZE,
mi->mbmi.interinter_wedge_index,
mi->mbmi.interinter_wedge_sign,
mi->mbmi.sb_type,
- wedge_offset_y, wedge_offset_x, h, w);
+ wedge_offset_x, wedge_offset_y, h, w);
}
#else
- build_masked_compound_extend(dst, dst_buf->stride, tmp_dst,
- MAX_SB_SIZE, plane,
- mi->mbmi.interinter_wedge_index,
- mi->mbmi.interinter_wedge_sign,
- mi->mbmi.sb_type,
- wedge_offset_y, wedge_offset_x, h, w);
+ build_masked_compound_wedge_extend(dst, dst_buf->stride,
+ tmp_dst, MAX_SB_SIZE,
+ mi->mbmi.interinter_wedge_index,
+ mi->mbmi.interinter_wedge_sign,
+ mi->mbmi.sb_type,
+ wedge_offset_x, wedge_offset_y, h, w);
#endif // CONFIG_VP9_HIGHBITDEPTH
#else // CONFIG_SUPERTX
#if CONFIG_VP9_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
- build_masked_compound_highbd(dst, dst_buf->stride, tmp_dst,
- MAX_SB_SIZE,
- mi->mbmi.interinter_wedge_index,
- mi->mbmi.interinter_wedge_sign,
- mi->mbmi.sb_type, h, w);
+ build_masked_compound_wedge_highbd(dst, dst_buf->stride, tmp_dst,
+ MAX_SB_SIZE,
+ mi->mbmi.interinter_wedge_index,
+ mi->mbmi.interinter_wedge_sign,
+ mi->mbmi.sb_type, h, w);
else
#endif // CONFIG_VP9_HIGHBITDEPTH
- build_masked_compound(dst, dst_buf->stride, tmp_dst, MAX_SB_SIZE,
- mi->mbmi.interinter_wedge_index,
- mi->mbmi.interinter_wedge_sign,
- mi->mbmi.sb_type, h, w);
+ build_masked_compound_wedge(dst, dst_buf->stride, tmp_dst, MAX_SB_SIZE,
+ mi->mbmi.interinter_wedge_index,
+ mi->mbmi.interinter_wedge_sign,
+ mi->mbmi.sb_type, h, w);
#endif // CONFIG_SUPERTX
} else {
#if CONFIG_VP9_HIGHBITDEPTH
const INTERP_FILTER interp_filter,
int xs, int ys,
#if CONFIG_SUPERTX
- int plane, int wedge_offset_x, int wedge_offset_y,
+ int wedge_offset_x, int wedge_offset_y,
#endif // CONFIG_SUPERTX
const MACROBLOCKD *xd);
#endif // CONFIG_EXT_INTER
void vp10_build_masked_inter_predictor_complex(
MACROBLOCKD *xd,
uint8_t *dst, int dst_stride, uint8_t *dst2, int dst2_stride,
- const struct macroblockd_plane *pd, int mi_row, int mi_col,
- int mi_row_ori, int mi_col_ori, BLOCK_SIZE bsize, BLOCK_SIZE top_bsize,
+ int mi_row, int mi_col, int mi_row_ori, int mi_col_ori,
+ BLOCK_SIZE bsize, BLOCK_SIZE top_bsize,
PARTITION_TYPE partition, int plane);
#endif // CONFIG_SUPERTX
const uint8_t *vp10_get_soft_mask(int wedge_index,
int wedge_sign,
BLOCK_SIZE sb_type,
- int h, int w);
+ int wedge_offset_x,
+ int wedge_offset_y);
void vp10_build_interintra_predictors(MACROBLOCKD *xd,
uint8_t *ypred,
int subpel_x, int subpel_y,
const INTERP_FILTER interp_filter,
const struct scale_factors *sf,
-#if CONFIG_EXT_INTER && CONFIG_SUPERTX
- int plane,
+#if CONFIG_EXT_INTER
int wedge_offset_x, int wedge_offset_y,
-#endif // CONFIG_EXT_INTER && CONFIG_SUPERTX
+#endif // CONFIG_EXT_INTER
MACROBLOCKD *xd,
int w, int h, int ref, int xs, int ys) {
DECLARE_ALIGNED(16, uint16_t,
buf_ptr, b_w, dst, dst_buf_stride,
subpel_x, subpel_y, sf, w, h,
interp_filter, xs, ys,
-#if CONFIG_SUPERTX
- plane, wedge_offset_x, wedge_offset_y,
-#endif // CONFIG_SUPERTX
+ wedge_offset_x, wedge_offset_y,
xd);
else
#endif // CONFIG_EXT_INTER
int subpel_x, int subpel_y,
const INTERP_FILTER interp_filter,
const struct scale_factors *sf,
-#if CONFIG_EXT_INTER && CONFIG_SUPERTX
- int plane,
+#if CONFIG_EXT_INTER
int wedge_offset_x, int wedge_offset_y,
-#endif // CONFIG_EXT_INTER && CONFIG_SUPERTX
+#endif // CONFIG_EXT_INTER
MACROBLOCKD *xd,
int w, int h, int ref, int xs, int ys) {
DECLARE_ALIGNED(16, uint8_t,
buf_ptr, b_w, dst, dst_buf_stride,
subpel_x, subpel_y, sf, w, h,
interp_filter, xs, ys,
-#if CONFIG_SUPERTX
- plane, wedge_offset_x, wedge_offset_y,
-#endif // CONFIG_SUPERTX
+ wedge_offset_x, wedge_offset_y,
xd);
else
#endif // CONFIG_EXT_INTER
#endif // CONFIG_OBMC
int bw, int bh,
int x, int y, int w, int h,
-#if CONFIG_EXT_INTER && CONFIG_SUPERTX
+#if CONFIG_EXT_INTER
int wedge_offset_x, int wedge_offset_y,
-#endif // CONFIG_EXT_INTER && CONFIG_SUPERTX
+#endif // CONFIG_EXT_INTER
int mi_x, int mi_y,
const INTERP_FILTER interp_filter,
const struct scale_factors *sf,
dst, dst_buf->stride,
subpel_x, subpel_y,
interp_filter, sf,
-#if CONFIG_EXT_INTER && CONFIG_SUPERTX
- plane, wedge_offset_x, wedge_offset_y,
-#endif // CONFIG_EXT_INTER && CONFIG_SUPERTX
+#if CONFIG_EXT_INTER
+ wedge_offset_x, wedge_offset_y,
+#endif // CONFIG_EXT_INTER
xd, w, h, ref, xs, ys);
#else
extend_and_predict(buf_ptr1, buf_stride, x0, y0, b_w, b_h,
dst, dst_buf->stride,
subpel_x, subpel_y,
interp_filter, sf,
-#if CONFIG_EXT_INTER && CONFIG_SUPERTX
- plane, wedge_offset_x, wedge_offset_y,
-#endif // CONFIG_EXT_INTER && CONFIG_SUPERTX
+#if CONFIG_EXT_INTER
+ wedge_offset_x, wedge_offset_y,
+#endif // CONFIG_EXT_INTER
xd, w, h, ref, xs, ys);
#endif // CONFIG_VP9_HIGHBITDEPTH
return;
}
#if CONFIG_EXT_INTER
if (ref && is_interinter_wedge_used(mi->mbmi.sb_type) &&
- mi->mbmi.use_wedge_interinter) {
+ mi->mbmi.use_wedge_interinter)
vp10_make_masked_inter_predictor(
buf_ptr, buf_stride, dst, dst_buf->stride,
subpel_x, subpel_y, sf, w, h,
interp_filter, xs, ys,
-#if CONFIG_SUPERTX
- plane, wedge_offset_x, wedge_offset_y,
-#endif // CONFIG_SUPERTX
+ wedge_offset_x, wedge_offset_y,
xd);
- } else {
+ else
+#endif // CONFIG_EXT_INTER
vp10_make_inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride,
subpel_x, subpel_y, sf, w, h, ref,
interp_filter, xs, ys, xd);
- }
-#else
- vp10_make_inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride,
- subpel_x, subpel_y, sf, w, h, ref,
- interp_filter, xs, ys, xd);
-#endif // CONFIG_EXT_INTER
}
static void dec_build_inter_predictors_sb_extend(
n4w_x4, n4h_x4,
4 * x, 4 * y, pw, ph,
#if CONFIG_EXT_INTER
- wedge_offset_x >> (pd->subsampling_x),
- wedge_offset_y >> (pd->subsampling_y),
+ wedge_offset_x,
+ wedge_offset_y,
#endif // CONFIG_EXT_INTER
mi_x, mi_y,
interp_filter, sf, pre_buf, dst_buf,
n4w_x4, n4h_x4,
0, 0, n4w_x4, n4h_x4,
#if CONFIG_EXT_INTER
- wedge_offset_x >> (pd->subsampling_x),
- wedge_offset_y >> (pd->subsampling_y),
+ wedge_offset_x,
+ wedge_offset_y,
#endif // CONFIG_EXT_INTER
mi_x, mi_y,
interp_filter, sf, pre_buf, dst_buf,
n4w_x4, n4h_x4,
0, 0, n4w_x4, n4h_x4,
#if CONFIG_EXT_INTER
- wedge_offset_x >> (pd->subsampling_x),
- wedge_offset_y >> (pd->subsampling_y),
+ wedge_offset_x,
+ wedge_offset_y,
#endif // CONFIG_EXT_INTER
mi_x, mi_y,
interp_filter, sf, pre_buf, dst_buf,
vp10_build_masked_inter_predictor_complex(xd,
dst_buf[0], dst_stride[0],
dst_buf1[0], dst_stride1[0],
- &xd->plane[0],
mi_row, mi_col,
mi_row_top, mi_col_top,
bsize, top_bsize,
xd->plane[i].dst.stride = dst_stride[i];
vp10_build_masked_inter_predictor_complex(
xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i],
- &xd->plane[i], mi_row, mi_col, mi_row_top, mi_col_top,
+ mi_row, mi_col, mi_row_top, mi_col_top,
bsize, top_bsize, PARTITION_HORZ, i);
}
}
vp10_build_masked_inter_predictor_complex(xd,
dst_buf[0], dst_stride[0],
dst_buf1[0], dst_stride1[0],
- &xd->plane[0],
mi_row, mi_col,
mi_row_top, mi_col_top,
bsize, top_bsize,
xd->plane[i].dst.stride = dst_stride[i];
vp10_build_masked_inter_predictor_complex(
xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i],
- &xd->plane[i], mi_row, mi_col, mi_row_top, mi_col_top,
+ mi_row, mi_col, mi_row_top, mi_col_top,
bsize, top_bsize, PARTITION_VERT, i);
}
}
dst_buf[i], dst_stride[i],
dst_buf1[i],
dst_stride1[i],
- &xd->plane[i],
mi_row, mi_col,
mi_row_top, mi_col_top,
bsize, top_bsize,
dst_stride2[i],
dst_buf3[i],
dst_stride3[i],
- &xd->plane[i],
mi_row, mi_col,
mi_row_top, mi_col_top,
bsize, top_bsize,
dst_stride[i],
dst_buf2[i],
dst_stride2[i],
- &xd->plane[i],
mi_row, mi_col,
mi_row_top, mi_col_top,
bsize, top_bsize,
dst_stride[i],
dst_buf2[i],
dst_stride2[i],
- &xd->plane[i],
mi_row, mi_col,
mi_row_top, mi_col_top,
bsize, top_bsize,
xd->plane[i].dst.buf = dst_buf[i];
xd->plane[i].dst.stride = dst_stride[i];
vp10_build_masked_inter_predictor_complex(xd,
- dst_buf[i], dst_stride[i],
- dst_buf1[i], dst_stride1[i],
- &xd->plane[i],
- mi_row, mi_col,
- mi_row_top, mi_col_top,
- bsize, top_bsize,
- PARTITION_VERT, i);
+ dst_buf[i], dst_stride[i],
+ dst_buf1[i], dst_stride1[i],
+ mi_row, mi_col,
+ mi_row_top, mi_col_top,
+ bsize, top_bsize,
+ PARTITION_VERT, i);
}
for (i = 0; i < MAX_MB_PLANE; i++) {
vp10_build_masked_inter_predictor_complex(xd,
- dst_buf[i], dst_stride[i],
- dst_buf2[i], dst_stride2[i],
- &xd->plane[i],
- mi_row, mi_col,
- mi_row_top, mi_col_top,
- bsize, top_bsize,
- PARTITION_HORZ, i);
+ dst_buf[i], dst_stride[i],
+ dst_buf2[i], dst_stride2[i],
+ mi_row, mi_col,
+ mi_row_top, mi_col_top,
+ bsize, top_bsize,
+ PARTITION_HORZ, i);
}
break;
case PARTITION_VERT_A:
xd->plane[i].dst.buf = dst_buf[i];
xd->plane[i].dst.stride = dst_stride[i];
vp10_build_masked_inter_predictor_complex(xd,
- dst_buf[i], dst_stride[i],
- dst_buf1[i], dst_stride1[i],
- &xd->plane[i],
- mi_row, mi_col,
- mi_row_top, mi_col_top,
- bsize, top_bsize,
- PARTITION_HORZ, i);
+ dst_buf[i], dst_stride[i],
+ dst_buf1[i], dst_stride1[i],
+ mi_row, mi_col,
+ mi_row_top, mi_col_top,
+ bsize, top_bsize,
+ PARTITION_HORZ, i);
}
for (i = 0; i < MAX_MB_PLANE; i++) {
vp10_build_masked_inter_predictor_complex(xd,
- dst_buf[i], dst_stride[i],
- dst_buf2[i], dst_stride2[i],
- &xd->plane[i],
- mi_row, mi_col,
- mi_row_top, mi_col_top,
- bsize, top_bsize,
- PARTITION_VERT, i);
+ dst_buf[i], dst_stride[i],
+ dst_buf2[i], dst_stride2[i],
+ mi_row, mi_col,
+ mi_row_top, mi_col_top,
+ bsize, top_bsize,
+ PARTITION_VERT, i);
}
break;
case PARTITION_HORZ_B:
xd->plane[i].dst.buf = dst_buf1[i];
xd->plane[i].dst.stride = dst_stride1[i];
vp10_build_masked_inter_predictor_complex(xd,
- dst_buf1[i], dst_stride1[i],
- dst_buf2[i], dst_stride2[i],
- &xd->plane[i],
- mi_row, mi_col,
- mi_row_top, mi_col_top,
- bsize, top_bsize,
- PARTITION_VERT, i);
+ dst_buf1[i], dst_stride1[i],
+ dst_buf2[i], dst_stride2[i],
+ mi_row, mi_col,
+ mi_row_top, mi_col_top,
+ bsize, top_bsize,
+ PARTITION_VERT, i);
}
for (i = 0; i < MAX_MB_PLANE; i++) {
xd->plane[i].dst.buf = dst_buf[i];
xd->plane[i].dst.stride = dst_stride[i];
vp10_build_masked_inter_predictor_complex(xd,
- dst_buf[i], dst_stride[i],
- dst_buf1[i], dst_stride1[i],
- &xd->plane[i],
- mi_row, mi_col,
- mi_row_top, mi_col_top,
- bsize, top_bsize,
- PARTITION_HORZ, i);
+ dst_buf[i], dst_stride[i],
+ dst_buf1[i], dst_stride1[i],
+ mi_row, mi_col,
+ mi_row_top, mi_col_top,
+ bsize, top_bsize,
+ PARTITION_HORZ, i);
}
break;
case PARTITION_VERT_B:
xd->plane[i].dst.buf = dst_buf1[i];
xd->plane[i].dst.stride = dst_stride1[i];
vp10_build_masked_inter_predictor_complex(xd,
- dst_buf1[i], dst_stride1[i],
- dst_buf2[i], dst_stride2[i],
- &xd->plane[i],
- mi_row, mi_col,
- mi_row_top, mi_col_top,
- bsize, top_bsize,
- PARTITION_HORZ, i);
+ dst_buf1[i], dst_stride1[i],
+ dst_buf2[i], dst_stride2[i],
+ mi_row, mi_col,
+ mi_row_top, mi_col_top,
+ bsize, top_bsize,
+ PARTITION_HORZ, i);
}
for (i = 0; i < MAX_MB_PLANE; i++) {
xd->plane[i].dst.buf = dst_buf[i];
xd->plane[i].dst.stride = dst_stride[i];
vp10_build_masked_inter_predictor_complex(xd,
- dst_buf[i], dst_stride[i],
- dst_buf1[i], dst_stride1[i],
- &xd->plane[i],
- mi_row, mi_col,
- mi_row_top, mi_col_top,
- bsize, top_bsize,
- PARTITION_VERT, i);
+ dst_buf[i], dst_stride[i],
+ dst_buf1[i], dst_stride1[i],
+ mi_row, mi_col,
+ mi_row_top, mi_col_top,
+ bsize, top_bsize,
+ PARTITION_VERT, i);
}
break;
#endif // CONFIG_EXT_PARTITION_TYPES
read_interintra_mode(cm, xd, r, bsize_group);
mbmi->ref_frame[1] = INTRA_FRAME;
mbmi->interintra_mode = interintra_mode;
- mbmi->interintra_uv_mode = interintra_mode;
#if CONFIG_EXT_INTRA
mbmi->ext_intra_mode_info.use_ext_intra_mode[0] = 0;
mbmi->ext_intra_mode_info.use_ext_intra_mode[1] = 0;
write_interintra_mode(
w, mbmi->interintra_mode,
cm->fc->interintra_mode_prob[bsize_group]);
- assert(mbmi->interintra_mode == mbmi->interintra_uv_mode);
if (is_interintra_wedge_used(bsize)) {
vp10_write(w, mbmi->use_wedge_interintra,
cm->fc->wedge_interintra_prob[bsize]);
vp10_build_masked_inter_predictor_complex(xd,
dst_buf[0], dst_stride[0],
dst_buf1[0], dst_stride1[0],
- &xd->plane[0],
mi_row, mi_col,
mi_row_top, mi_col_top,
bsize, top_bsize,
xd->plane[i].dst.stride = dst_stride[i];
vp10_build_masked_inter_predictor_complex(
xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i],
- &xd->plane[i], mi_row, mi_col, mi_row_top, mi_col_top,
+ mi_row, mi_col, mi_row_top, mi_col_top,
bsize, top_bsize, PARTITION_HORZ, i);
}
}
vp10_build_masked_inter_predictor_complex(xd,
dst_buf[0], dst_stride[0],
dst_buf1[0], dst_stride1[0],
- &xd->plane[0],
mi_row, mi_col,
mi_row_top, mi_col_top,
bsize, top_bsize,
xd->plane[i].dst.stride = dst_stride[i];
vp10_build_masked_inter_predictor_complex(
xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i],
- &xd->plane[i], mi_row, mi_col, mi_row_top, mi_col_top,
+ mi_row, mi_col, mi_row_top, mi_col_top,
bsize, top_bsize, PARTITION_VERT, i);
}
}
dst_stride[i],
dst_buf1[i],
dst_stride1[i],
- &xd->plane[i],
mi_row, mi_col,
mi_row_top, mi_col_top,
bsize, top_bsize,
dst_stride2[i],
dst_buf3[i],
dst_stride3[i],
- &xd->plane[i],
mi_row, mi_col,
mi_row_top, mi_col_top,
bsize, top_bsize,
vp10_build_masked_inter_predictor_complex(xd,
dst_buf[i],
dst_stride[i],
- dst_buf2[i],
- dst_stride2[i],
- &xd->plane[i],
- mi_row, mi_col,
- mi_row_top, mi_col_top,
- bsize, top_bsize,
- PARTITION_HORZ, i);
+ dst_buf2[i],
+ dst_stride2[i],
+ mi_row, mi_col,
+ mi_row_top, mi_col_top,
+ bsize, top_bsize,
+ PARTITION_HORZ, i);
}
} else if (mi_row + hbs < cm->mi_rows && mi_col < cm->mi_cols) {
vp10_build_masked_inter_predictor_complex(xd,
dst_stride[i],
dst_buf2[i],
dst_stride2[i],
- &xd->plane[i],
mi_row, mi_col,
mi_row_top, mi_col_top,
bsize, top_bsize,
vp10_build_masked_inter_predictor_complex(xd,
dst_buf[i], dst_stride[i],
dst_buf1[i], dst_stride1[i],
- &xd->plane[i],
mi_row, mi_col,
mi_row_top, mi_col_top,
bsize, top_bsize,
vp10_build_masked_inter_predictor_complex(xd,
dst_buf[i], dst_stride[i],
dst_buf2[i], dst_stride2[i],
- &xd->plane[i],
mi_row, mi_col,
mi_row_top, mi_col_top,
bsize, top_bsize,
vp10_build_masked_inter_predictor_complex(xd,
dst_buf[i], dst_stride[i],
dst_buf1[i], dst_stride1[i],
- &xd->plane[i],
mi_row, mi_col,
mi_row_top, mi_col_top,
bsize, top_bsize,
vp10_build_masked_inter_predictor_complex(xd,
dst_buf[i], dst_stride[i],
dst_buf2[i], dst_stride2[i],
- &xd->plane[i],
mi_row, mi_col,
mi_row_top, mi_col_top,
bsize, top_bsize,
vp10_build_masked_inter_predictor_complex(xd,
dst_buf1[i], dst_stride1[i],
dst_buf2[i], dst_stride2[i],
- &xd->plane[i],
mi_row, mi_col,
mi_row_top, mi_col_top,
bsize, top_bsize,
vp10_build_masked_inter_predictor_complex(xd,
dst_buf[i], dst_stride[i],
dst_buf1[i], dst_stride1[i],
- &xd->plane[i],
mi_row, mi_col,
mi_row_top, mi_col_top,
bsize, top_bsize,
vp10_build_masked_inter_predictor_complex(xd,
dst_buf1[i], dst_stride1[i],
dst_buf2[i], dst_stride2[i],
- &xd->plane[i],
mi_row, mi_col,
mi_row_top, mi_col_top,
bsize, top_bsize,
vp10_build_masked_inter_predictor_complex(xd,
dst_buf[i], dst_stride[i],
dst_buf1[i], dst_stride1[i],
- &xd->plane[i],
mi_row, mi_col,
mi_row_top, mi_col_top,
bsize, top_bsize,
MACROBLOCKD *xd = &x->e_mbd;
MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
BLOCK_SIZE sb_type = mbmi->sb_type;
- int w = (4 << b_width_log2_lookup[sb_type]);
- int h = (4 << b_height_log2_lookup[sb_type]);
const uint8_t *mask;
const int mask_stride = MASK_MASTER_STRIDE;
- mask = vp10_get_soft_mask(wedge_index, wedge_sign, sb_type, h, w);
+ mask = vp10_get_soft_mask(wedge_index, wedge_sign, sb_type, 0, 0);
if (which == 0 || which == 2)
do_masked_motion_search(cpi, x, mask, mask_stride, bsize,
if (which == 1 || which == 2) {
// get the negative mask
- mask = vp10_get_soft_mask(wedge_index, !wedge_sign, sb_type, h, w);
+ mask = vp10_get_soft_mask(wedge_index, !wedge_sign, sb_type, 0, 0);
do_masked_motion_search(cpi, x, mask, mask_stride, bsize,
mi_row, mi_col, &tmp_mv[1], &rate_mv[1],
1, mv_idx[1]);
int wedge_types;
int tmp_skip_txfm_sb;
int64_t tmp_skip_sse_sb;
+
rs = vp10_cost_bit(cm->fc->wedge_interinter_prob[bsize], 0);
vp10_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
model_rd_for_sb(cpi, bsize, x, xd, &rate_sum, &dist_sum,
&tmp_skip_txfm_sb, &tmp_skip_sse_sb);
rd = RDCOST(x->rdmult, x->rddiv, rs + rate_mv + rate_sum, dist_sum);
best_rd_nowedge = rd;
- mbmi->use_wedge_interinter = 1;
- rs = (1 + get_wedge_bits_lookup[bsize]) * 256 +
- vp10_cost_bit(cm->fc->wedge_interinter_prob[bsize], 1);
- wedge_types = (1 << get_wedge_bits_lookup[bsize]);
- if (have_newmv_in_inter_mode(this_mode)) {
- int_mv tmp_mv[2];
- int rate_mvs[2], tmp_rate_mv = 0;
- uint8_t pred0[2 * MAX_SB_SQUARE * 3];
- uint8_t pred1[2 * MAX_SB_SQUARE * 3];
- uint8_t *preds0[3] = {pred0,
- pred0 + 2 * MAX_SB_SQUARE,
- pred0 + 4 * MAX_SB_SQUARE};
- uint8_t *preds1[3] = {pred1,
- pred1 + 2 * MAX_SB_SQUARE,
- pred1 + 4 * MAX_SB_SQUARE};
- int strides[3] = {MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE};
- vp10_build_inter_predictors_for_planes_single_buf(
- xd, bsize, mi_row, mi_col, 0, preds0, strides);
- vp10_build_inter_predictors_for_planes_single_buf(
- xd, bsize, mi_row, mi_col, 1, preds1, strides);
-
- for (wedge_index = 0; wedge_index < 2 * wedge_types; ++wedge_index) {
- mbmi->interinter_wedge_index = wedge_index >> 1;
- mbmi->interinter_wedge_sign = wedge_index & 1;
- vp10_build_wedge_inter_predictor_from_buf(xd, bsize, mi_row, mi_col,
- preds0, strides,
- preds1, strides);
- model_rd_for_sb(cpi, bsize, x, xd, &rate_sum, &dist_sum,
- &tmp_skip_txfm_sb, &tmp_skip_sse_sb);
- rd = RDCOST(x->rdmult, x->rddiv, rs + rate_mv + rate_sum, dist_sum);
- if (rd < best_rd_wedge) {
- best_wedge_index = wedge_index;
- best_rd_wedge = rd;
+ mbmi->use_wedge_interinter = 0;
+
+ // Disbale wedge search if source variance is small
+ if (x->source_variance > cpi->sf.disable_wedge_search_var_thresh) {
+
+ mbmi->use_wedge_interinter = 1;
+ rs = (1 + get_wedge_bits_lookup[bsize]) * 256 +
+ vp10_cost_bit(cm->fc->wedge_interinter_prob[bsize], 1);
+ wedge_types = (1 << get_wedge_bits_lookup[bsize]);
+ if (have_newmv_in_inter_mode(this_mode)) {
+ int_mv tmp_mv[2];
+ int rate_mvs[2], tmp_rate_mv = 0;
+ uint8_t pred0[2 * MAX_SB_SQUARE * 3];
+ uint8_t pred1[2 * MAX_SB_SQUARE * 3];
+ uint8_t *preds0[3] = {pred0,
+ pred0 + 2 * MAX_SB_SQUARE,
+ pred0 + 4 * MAX_SB_SQUARE};
+ uint8_t *preds1[3] = {pred1,
+ pred1 + 2 * MAX_SB_SQUARE,
+ pred1 + 4 * MAX_SB_SQUARE};
+ int strides[3] = {MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE};
+ vp10_build_inter_predictors_for_planes_single_buf(
+ xd, bsize, mi_row, mi_col, 0, preds0, strides);
+ vp10_build_inter_predictors_for_planes_single_buf(
+ xd, bsize, mi_row, mi_col, 1, preds1, strides);
+
+ for (wedge_index = 0; wedge_index < 2 * wedge_types; ++wedge_index) {
+ mbmi->interinter_wedge_index = wedge_index >> 1;
+ mbmi->interinter_wedge_sign = wedge_index & 1;
+ vp10_build_wedge_inter_predictor_from_buf(xd, bsize, mi_row, mi_col,
+ preds0, strides,
+ preds1, strides);
+ model_rd_for_sb(cpi, bsize, x, xd, &rate_sum, &dist_sum,
+ &tmp_skip_txfm_sb, &tmp_skip_sse_sb);
+ rd = RDCOST(x->rdmult, x->rddiv, rs + rate_mv + rate_sum, dist_sum);
+ if (rd < best_rd_wedge) {
+ best_wedge_index = wedge_index;
+ best_rd_wedge = rd;
+ }
}
- }
- mbmi->interinter_wedge_index = best_wedge_index >> 1;
- mbmi->interinter_wedge_sign = best_wedge_index & 1;
- if (this_mode == NEW_NEWMV) {
- int mv_idxs[2] = {0, 0};
- do_masked_motion_search_indexed(cpi, x,
- mbmi->interinter_wedge_index,
- mbmi->interinter_wedge_sign,
- bsize, mi_row, mi_col, tmp_mv, rate_mvs,
- mv_idxs, 2);
- tmp_rate_mv = rate_mvs[0] + rate_mvs[1];
- mbmi->mv[0].as_int = tmp_mv[0].as_int;
- mbmi->mv[1].as_int = tmp_mv[1].as_int;
- } else if (this_mode == NEW_NEARESTMV || this_mode == NEW_NEARMV) {
- int mv_idxs[2] = {0, 0};
- do_masked_motion_search_indexed(cpi, x,
- mbmi->interinter_wedge_index,
- mbmi->interinter_wedge_sign,
- bsize, mi_row, mi_col, tmp_mv, rate_mvs,
- mv_idxs, 0);
- tmp_rate_mv = rate_mvs[0];
- mbmi->mv[0].as_int = tmp_mv[0].as_int;
- } else if (this_mode == NEAREST_NEWMV || this_mode == NEAR_NEWMV) {
- int mv_idxs[2] = {0, 0};
- do_masked_motion_search_indexed(cpi, x,
- mbmi->interinter_wedge_index,
- mbmi->interinter_wedge_sign,
- bsize, mi_row, mi_col, tmp_mv, rate_mvs,
- mv_idxs, 1);
- tmp_rate_mv = rate_mvs[1];
- mbmi->mv[1].as_int = tmp_mv[1].as_int;
- }
- vp10_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
- model_rd_for_sb(cpi, bsize, x, xd, &rate_sum, &dist_sum,
- &tmp_skip_txfm_sb, &tmp_skip_sse_sb);
- rd = RDCOST(x->rdmult, x->rddiv, rs + tmp_rate_mv + rate_sum, dist_sum);
- if (rd < best_rd_wedge) {
- best_rd_wedge = rd;
- } else {
- mbmi->mv[0].as_int = cur_mv[0].as_int;
- mbmi->mv[1].as_int = cur_mv[1].as_int;
- tmp_rate_mv = rate_mv;
- }
- if (best_rd_wedge < best_rd_nowedge) {
- mbmi->use_wedge_interinter = 1;
mbmi->interinter_wedge_index = best_wedge_index >> 1;
mbmi->interinter_wedge_sign = best_wedge_index & 1;
- xd->mi[0]->bmi[0].as_mv[0].as_int = mbmi->mv[0].as_int;
- xd->mi[0]->bmi[0].as_mv[1].as_int = mbmi->mv[1].as_int;
- *rate2 += tmp_rate_mv - rate_mv;
- rate_mv = tmp_rate_mv;
- } else {
- mbmi->use_wedge_interinter = 0;
- mbmi->mv[0].as_int = cur_mv[0].as_int;
- mbmi->mv[1].as_int = cur_mv[1].as_int;
- }
- } else {
- uint8_t pred0[2 * MAX_SB_SQUARE * 3];
- uint8_t pred1[2 * MAX_SB_SQUARE * 3];
- uint8_t *preds0[3] = {pred0,
- pred0 + 2 * MAX_SB_SQUARE,
- pred0 + 4 * MAX_SB_SQUARE};
- uint8_t *preds1[3] = {pred1,
- pred1 + 2 * MAX_SB_SQUARE,
- pred1 + 4 * MAX_SB_SQUARE};
- int strides[3] = {MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE};
- vp10_build_inter_predictors_for_planes_single_buf(
- xd, bsize, mi_row, mi_col, 0, preds0, strides);
- vp10_build_inter_predictors_for_planes_single_buf(
- xd, bsize, mi_row, mi_col, 1, preds1, strides);
- for (wedge_index = 0; wedge_index < 2 * wedge_types; ++wedge_index) {
- mbmi->interinter_wedge_index = wedge_index >> 1;
- mbmi->interinter_wedge_sign = wedge_index & 1;
- vp10_build_wedge_inter_predictor_from_buf(xd, bsize, mi_row, mi_col,
- preds0, strides,
- preds1, strides);
+ if (this_mode == NEW_NEWMV) {
+ int mv_idxs[2] = {0, 0};
+ do_masked_motion_search_indexed(cpi, x,
+ mbmi->interinter_wedge_index,
+ mbmi->interinter_wedge_sign,
+ bsize, mi_row, mi_col, tmp_mv, rate_mvs,
+ mv_idxs, 2);
+ tmp_rate_mv = rate_mvs[0] + rate_mvs[1];
+ mbmi->mv[0].as_int = tmp_mv[0].as_int;
+ mbmi->mv[1].as_int = tmp_mv[1].as_int;
+ } else if (this_mode == NEW_NEARESTMV || this_mode == NEW_NEARMV) {
+ int mv_idxs[2] = {0, 0};
+ do_masked_motion_search_indexed(cpi, x,
+ mbmi->interinter_wedge_index,
+ mbmi->interinter_wedge_sign,
+ bsize, mi_row, mi_col, tmp_mv, rate_mvs,
+ mv_idxs, 0);
+ tmp_rate_mv = rate_mvs[0];
+ mbmi->mv[0].as_int = tmp_mv[0].as_int;
+ } else if (this_mode == NEAREST_NEWMV || this_mode == NEAR_NEWMV) {
+ int mv_idxs[2] = {0, 0};
+ do_masked_motion_search_indexed(cpi, x,
+ mbmi->interinter_wedge_index,
+ mbmi->interinter_wedge_sign,
+ bsize, mi_row, mi_col, tmp_mv, rate_mvs,
+ mv_idxs, 1);
+ tmp_rate_mv = rate_mvs[1];
+ mbmi->mv[1].as_int = tmp_mv[1].as_int;
+ }
+ vp10_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
model_rd_for_sb(cpi, bsize, x, xd, &rate_sum, &dist_sum,
&tmp_skip_txfm_sb, &tmp_skip_sse_sb);
- rd = RDCOST(x->rdmult, x->rddiv, rs + rate_mv + rate_sum, dist_sum);
+ rd = RDCOST(x->rdmult, x->rddiv, rs + tmp_rate_mv + rate_sum, dist_sum);
if (rd < best_rd_wedge) {
- best_wedge_index = wedge_index;
best_rd_wedge = rd;
+ } else {
+ mbmi->mv[0].as_int = cur_mv[0].as_int;
+ mbmi->mv[1].as_int = cur_mv[1].as_int;
+ tmp_rate_mv = rate_mv;
+ }
+ if (best_rd_wedge < best_rd_nowedge) {
+ mbmi->use_wedge_interinter = 1;
+ mbmi->interinter_wedge_index = best_wedge_index >> 1;
+ mbmi->interinter_wedge_sign = best_wedge_index & 1;
+ xd->mi[0]->bmi[0].as_mv[0].as_int = mbmi->mv[0].as_int;
+ xd->mi[0]->bmi[0].as_mv[1].as_int = mbmi->mv[1].as_int;
+ *rate2 += tmp_rate_mv - rate_mv;
+ rate_mv = tmp_rate_mv;
+ } else {
+ mbmi->use_wedge_interinter = 0;
+ mbmi->mv[0].as_int = cur_mv[0].as_int;
+ mbmi->mv[1].as_int = cur_mv[1].as_int;
}
- }
- if (best_rd_wedge < best_rd_nowedge) {
- mbmi->use_wedge_interinter = 1;
- mbmi->interinter_wedge_index = best_wedge_index >> 1;
- mbmi->interinter_wedge_sign = best_wedge_index & 1;
} else {
- mbmi->use_wedge_interinter = 0;
+ uint8_t pred0[2 * MAX_SB_SQUARE * 3];
+ uint8_t pred1[2 * MAX_SB_SQUARE * 3];
+ uint8_t *preds0[3] = {pred0,
+ pred0 + 2 * MAX_SB_SQUARE,
+ pred0 + 4 * MAX_SB_SQUARE};
+ uint8_t *preds1[3] = {pred1,
+ pred1 + 2 * MAX_SB_SQUARE,
+ pred1 + 4 * MAX_SB_SQUARE};
+ int strides[3] = {MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE};
+ vp10_build_inter_predictors_for_planes_single_buf(
+ xd, bsize, mi_row, mi_col, 0, preds0, strides);
+ vp10_build_inter_predictors_for_planes_single_buf(
+ xd, bsize, mi_row, mi_col, 1, preds1, strides);
+ for (wedge_index = 0; wedge_index < 2 * wedge_types; ++wedge_index) {
+ mbmi->interinter_wedge_index = wedge_index >> 1;
+ mbmi->interinter_wedge_sign = wedge_index & 1;
+ vp10_build_wedge_inter_predictor_from_buf(xd, bsize, mi_row, mi_col,
+ preds0, strides,
+ preds1, strides);
+ model_rd_for_sb(cpi, bsize, x, xd, &rate_sum, &dist_sum,
+ &tmp_skip_txfm_sb, &tmp_skip_sse_sb);
+ rd = RDCOST(x->rdmult, x->rddiv, rs + rate_mv + rate_sum, dist_sum);
+ if (rd < best_rd_wedge) {
+ best_wedge_index = wedge_index;
+ best_rd_wedge = rd;
+ }
+ }
+ if (best_rd_wedge < best_rd_nowedge) {
+ mbmi->use_wedge_interinter = 1;
+ mbmi->interinter_wedge_index = best_wedge_index >> 1;
+ mbmi->interinter_wedge_sign = best_wedge_index & 1;
+ } else {
+ mbmi->use_wedge_interinter = 0;
+ }
}
}
if (ref_best_rd < INT64_MAX &&
pred_exists = 0;
tmp_rd = VPXMIN(best_rd_wedge, best_rd_nowedge);
+
if (mbmi->use_wedge_interinter)
*compmode_wedge_cost = (1 + get_wedge_bits_lookup[bsize]) * 256 +
vp10_cost_bit(cm->fc->wedge_interinter_prob[bsize], 1);
int64_t best_interintra_rd_nowedge = INT64_MAX;
int64_t best_interintra_rd_wedge = INT64_MAX;
int rwedge;
- int bw = 4 << b_width_log2_lookup[mbmi->sb_type],
- bh = 4 << b_height_log2_lookup[mbmi->sb_type];
int_mv tmp_mv;
int tmp_rate_mv = 0;
DECLARE_ALIGNED(16, uint8_t,
vp10_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
restore_dst_buf(xd, orig_dst, orig_dst_stride);
mbmi->ref_frame[1] = INTRA_FRAME;
+ mbmi->use_wedge_interintra = 0;
for (j = 0; j < INTERINTRA_MODES; ++j) {
mbmi->interintra_mode = (INTERINTRA_MODE)j;
- mbmi->interintra_uv_mode = (INTERINTRA_MODE)j;
rmode = interintra_mode_cost[mbmi->interintra_mode];
vp10_build_intra_predictors_for_interintra(
xd, bsize, 0, intrapred, MAX_SB_SIZE);
}
}
mbmi->interintra_mode = best_interintra_mode;
- mbmi->interintra_uv_mode = best_interintra_mode;
if (ref_best_rd < INT64_MAX &&
best_interintra_rd > 2 * ref_best_rd) {
return INT64_MAX;
rmode + rate_mv + rwedge + rate_sum, dist_sum);
best_interintra_rd_nowedge = rd;
- mbmi->use_wedge_interintra = 1;
- wedge_types = (1 << get_wedge_bits_lookup[bsize]);
- rwedge = get_wedge_bits_lookup[bsize] * 256 +
- vp10_cost_bit(cm->fc->wedge_interintra_prob[bsize], 1);
- for (wedge_index = 0; wedge_index < wedge_types; ++wedge_index) {
- mbmi->interintra_wedge_index = wedge_index;
- mbmi->interintra_wedge_sign = 0;
- vp10_combine_interintra(xd, bsize, 0,
- tmp_buf, MAX_SB_SIZE,
- intrapred, MAX_SB_SIZE);
- vp10_combine_interintra(xd, bsize, 1,
- tmp_buf + MAX_SB_SQUARE, MAX_SB_SIZE,
- intrapred + MAX_SB_SQUARE, MAX_SB_SIZE);
- vp10_combine_interintra(xd, bsize, 2,
- tmp_buf + 2 * MAX_SB_SQUARE, MAX_SB_SIZE,
- intrapred + 2 * MAX_SB_SQUARE, MAX_SB_SIZE);
- model_rd_for_sb(cpi, bsize, x, xd, &rate_sum, &dist_sum,
- &skip_txfm_sb, &skip_sse_sb);
- rd = RDCOST(x->rdmult, x->rddiv,
- rmode + rate_mv + rwedge + rate_sum, dist_sum);
- if (rd < best_interintra_rd_wedge) {
- best_interintra_rd_wedge = rd;
- best_wedge_index = wedge_index;
+ // Disbale wedge search if source variance is small
+ if (x->source_variance > cpi->sf.disable_wedge_search_var_thresh) {
+
+ mbmi->use_wedge_interintra = 1;
+ wedge_types = (1 << get_wedge_bits_lookup[bsize]);
+ rwedge = get_wedge_bits_lookup[bsize] * 256 +
+ vp10_cost_bit(cm->fc->wedge_interintra_prob[bsize], 1);
+ for (wedge_index = 0; wedge_index < wedge_types; ++wedge_index) {
+ mbmi->interintra_wedge_index = wedge_index;
+ mbmi->interintra_wedge_sign = 0;
+ vp10_combine_interintra(xd, bsize, 0,
+ tmp_buf, MAX_SB_SIZE,
+ intrapred, MAX_SB_SIZE);
+ vp10_combine_interintra(xd, bsize, 1,
+ tmp_buf + MAX_SB_SQUARE, MAX_SB_SIZE,
+ intrapred + MAX_SB_SQUARE, MAX_SB_SIZE);
+ vp10_combine_interintra(xd, bsize, 2,
+ tmp_buf + 2 * MAX_SB_SQUARE, MAX_SB_SIZE,
+ intrapred + 2 * MAX_SB_SQUARE, MAX_SB_SIZE);
+ model_rd_for_sb(cpi, bsize, x, xd, &rate_sum, &dist_sum,
+ &skip_txfm_sb, &skip_sse_sb);
+ rd = RDCOST(x->rdmult, x->rddiv,
+ rmode + rate_mv + rwedge + rate_sum, dist_sum);
+ if (rd < best_interintra_rd_wedge) {
+ best_interintra_rd_wedge = rd;
+ best_wedge_index = wedge_index;
+ }
}
- }
- // Refine motion vector.
- if (have_newmv_in_inter_mode(this_mode)) {
- // get negative of mask
- const uint8_t* mask = vp10_get_soft_mask(
- best_wedge_index, 1, bsize, bh, bw);
- mbmi->interintra_wedge_index = best_wedge_index;
- mbmi->interintra_wedge_sign = 0;
- do_masked_motion_search(cpi, x, mask, MASK_MASTER_STRIDE, bsize,
- mi_row, mi_col, &tmp_mv, &tmp_rate_mv,
- 0, mv_idx);
- mbmi->mv[0].as_int = tmp_mv.as_int;
- vp10_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
- model_rd_for_sb(cpi, bsize, x, xd, &rate_sum, &dist_sum,
- &skip_txfm_sb, &skip_sse_sb);
- rd = RDCOST(x->rdmult, x->rddiv,
- rmode + tmp_rate_mv + rwedge + rate_sum, dist_sum);
- if (rd < best_interintra_rd_wedge) {
- best_interintra_rd_wedge = rd;
+ // Refine motion vector.
+ if (have_newmv_in_inter_mode(this_mode)) {
+ // get negative of mask
+ const uint8_t* mask = vp10_get_soft_mask(
+ best_wedge_index, 1, bsize, 0, 0);
+ mbmi->interintra_wedge_index = best_wedge_index;
+ mbmi->interintra_wedge_sign = 0;
+ do_masked_motion_search(cpi, x, mask, MASK_MASTER_STRIDE, bsize,
+ mi_row, mi_col, &tmp_mv, &tmp_rate_mv,
+ 0, mv_idx);
+ mbmi->mv[0].as_int = tmp_mv.as_int;
+ vp10_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
+ model_rd_for_sb(cpi, bsize, x, xd, &rate_sum, &dist_sum,
+ &skip_txfm_sb, &skip_sse_sb);
+ rd = RDCOST(x->rdmult, x->rddiv,
+ rmode + tmp_rate_mv + rwedge + rate_sum, dist_sum);
+ if (rd < best_interintra_rd_wedge) {
+ best_interintra_rd_wedge = rd;
+ } else {
+ tmp_mv.as_int = cur_mv[0].as_int;
+ tmp_rate_mv = rate_mv;
+ }
} else {
tmp_mv.as_int = cur_mv[0].as_int;
tmp_rate_mv = rate_mv;
}
- } else {
- tmp_mv.as_int = cur_mv[0].as_int;
- tmp_rate_mv = rate_mv;
- }
- if (best_interintra_rd_wedge < best_interintra_rd_nowedge) {
- mbmi->use_wedge_interintra = 1;
- mbmi->interintra_wedge_index = best_wedge_index;
- mbmi->interintra_wedge_sign = 0;
- best_interintra_rd = best_interintra_rd_wedge;
- mbmi->mv[0].as_int = tmp_mv.as_int;
- *rate2 += tmp_rate_mv - rate_mv;
- rate_mv = tmp_rate_mv;
+ if (best_interintra_rd_wedge < best_interintra_rd_nowedge) {
+ mbmi->use_wedge_interintra = 1;
+ mbmi->interintra_wedge_index = best_wedge_index;
+ mbmi->interintra_wedge_sign = 0;
+ best_interintra_rd = best_interintra_rd_wedge;
+ mbmi->mv[0].as_int = tmp_mv.as_int;
+ *rate2 += tmp_rate_mv - rate_mv;
+ rate_mv = tmp_rate_mv;
+ } else {
+ mbmi->use_wedge_interintra = 0;
+ best_interintra_rd = best_interintra_rd_nowedge;
+ mbmi->mv[0].as_int = cur_mv[0].as_int;
+ }
} else {
mbmi->use_wedge_interintra = 0;
best_interintra_rd = best_interintra_rd_nowedge;
- mbmi->mv[0].as_int = cur_mv[0].as_int;
}
}
}
} else if (is_interintra_allowed(mbmi)) {
*compmode_interintra_cost =
- vp10_cost_bit(cm->fc->interintra_prob[size_group_lookup[bsize]], 0);
+ vp10_cost_bit(cm->fc->interintra_prob[size_group_lookup[bsize]], 0);
}
#if CONFIG_EXT_INTERP
#if CONFIG_EXT_INTER
mbmi->interintra_mode = (PREDICTION_MODE)(DC_PRED - 1);
- mbmi->interintra_uv_mode = (PREDICTION_MODE)(DC_PRED - 1);
#endif // CONFIG_EXT_INTER
if (ref_frame == INTRA_FRAME) {
if (best_single_inter_ref != ref_frame)
continue;
mbmi->interintra_mode = best_intra_mode;
- mbmi->interintra_uv_mode = best_intra_mode;
#if CONFIG_EXT_INTRA
// TODO(debargha|geza.lore):
// Should we use ext_intra modes for interintra?
// Use transform domain distortion.
// Note var-tx expt always uses pixel domain distortion.
sf->use_transform_domain_distortion = 1;
+#if CONFIG_EXT_INTER
+ sf->disable_wedge_search_var_thresh = 100;
+#endif // CONFIG_EXT_INTER
}
if (speed >= 2) {
sf->allow_exhaustive_searches = 0;
sf->exhaustive_searches_thresh = INT_MAX;
sf->use_upsampled_references = 0;
+#if CONFIG_EXT_INTER
+ sf->disable_wedge_search_var_thresh = 100;
+#endif // CONFIG_EXT_INTER
// Use transform domain distortion computation
// Note var-tx expt always uses pixel domain distortion.
#else
sf->use_upsampled_references = 1;
#endif
+#if CONFIG_EXT_INTER
+ sf->disable_wedge_search_var_thresh = 0;
+#endif // CONFIG_EXT_INTER
for (i = 0; i < TX_SIZES; i++) {
sf->intra_y_mode_mask[i] = INTRA_ALL;
// Choose a very large value (UINT_MAX) to use 8-tap always
unsigned int disable_filter_search_var_thresh;
+#if CONFIG_EXT_INTER
+ // A source variance threshold below which wedge search is disabled
+ unsigned int disable_wedge_search_var_thresh;
+#endif // CONFIG_EXT_INTER
+
// These bit masks allow you to enable or disable intra modes for each
// transform size separately.
int intra_y_mode_mask[TX_SIZES];