Reimplements the supertx experiment from the playground branch.
Makes it work with other experiments.
Results:
With --enable-superttx
derflr: +0.958
With --enable-supertx --enable-ext-tx
derflr: +2.25%
With --enable-supertx --enable-ext-tx --enable-filterintra
derflr: +2.73%
Change-Id: I5012418ef2556bf2758146d90c4e2fb8a14610c7
filterintra
ext_tx
tx_skip
+ supertx
"
CONFIG_LIST="
external_build
#define INTER_OFFSET(mode) ((mode) - NEARESTMV)
+#if CONFIG_TX64X64
+#define MAXTXLEN 64
+#else
+#define MAXTXLEN 32
+#endif
+
/* For keyframes, intra block modes are predicted by the (already decoded)
modes for the Y blocks to the left and above us; for interframes, there
is a single probability table. */
extern const TX_TYPE intra_mode_to_tx_type_lookup[INTRA_MODES];
+#if CONFIG_SUPERTX
+#if CONFIG_TX64X64
+#define MAX_SUPERTX_BLOCK_SIZE BLOCK_64X64
+#else
+#define MAX_SUPERTX_BLOCK_SIZE BLOCK_32X32
+#endif
+
+static INLINE TX_SIZE bsize_to_tx_size(BLOCK_SIZE bsize) {
+ const TX_SIZE bsize_to_tx_size_lookup[BLOCK_SIZES] = {
+ TX_4X4, TX_4X4, TX_4X4,
+ TX_8X8, TX_8X8, TX_8X8,
+ TX_16X16, TX_16X16, TX_16X16,
+ TX_32X32, TX_32X32, TX_32X32,
+#if CONFIG_TX64X64
+ TX_64X64
+#else
+ TX_32X32
+#endif
+ };
+ return bsize_to_tx_size_lookup[bsize];
+}
+
+static INLINE int supertx_enabled(const MB_MODE_INFO *mbmi) {
+ return (int)mbmi->tx_size >
+ MIN(b_width_log2_lookup[mbmi->sb_type],
+ b_height_log2_lookup[mbmi->sb_type]);
+}
+#endif // CONFIG_SUPERTX
+
#if CONFIG_EXT_TX
static TX_TYPE ext_tx_to_txtype[EXT_TX_TYPES] = {
DCT_DCT,
static INLINE TX_SIZE get_uv_tx_size(const MB_MODE_INFO *mbmi,
const struct macroblockd_plane *pd) {
+#if CONFIG_SUPERTX
+ if (!supertx_enabled(mbmi)) {
+ return get_uv_tx_size_impl(mbmi->tx_size, mbmi->sb_type, pd->subsampling_x,
+ pd->subsampling_y);
+ } else {
+ return uvsupertx_size_lookup[mbmi->tx_size][pd->subsampling_x]
+ [pd->subsampling_y];
+ }
+#else
return get_uv_tx_size_impl(mbmi->tx_size, mbmi->sb_type, pd->subsampling_x,
pd->subsampling_y);
+#endif // CONFIG_SUPERTX
}
static INLINE BLOCK_SIZE get_plane_block_size(BLOCK_SIZE bsize,
{0, 8 }, // 64X32 - {0b0000, 0b1000}
{0, 0 }, // 64X64 - {0b0000, 0b0000}
};
+
+#if CONFIG_SUPERTX
+const TX_SIZE uvsupertx_size_lookup[TX_SIZES][2][2] = {
+// ss_x == 0 ss_x == 0 ss_x == 1 ss_x == 1
+// ss_y == 0 ss_y == 1 ss_y == 0 ss_y == 1
+ {{TX_4X4, TX_4X4}, {TX_4X4, TX_4X4}},
+ {{TX_8X8, TX_4X4}, {TX_4X4, TX_4X4}},
+ {{TX_16X16, TX_8X8}, {TX_8X8, TX_8X8}},
+ {{TX_32X32, TX_16X16}, {TX_16X16, TX_16X16}},
+#if CONFIG_TX64X64
+ {{TX_64X64, TX_32X32}, {TX_32X32, TX_32X32}},
+#endif // CONFIG_TX64X64
+};
+#endif
extern const BLOCK_SIZE txsize_to_bsize[TX_SIZES];
extern const TX_SIZE tx_mode_to_biggest_tx_size[TX_MODES];
extern const BLOCK_SIZE ss_size_lookup[BLOCK_SIZES][2][2];
+#if CONFIG_SUPERTX
+extern const TX_SIZE uvsupertx_size_lookup[TX_SIZES][2][2];
+#endif
#ifdef __cplusplus
} // extern "C"
};
#endif // CONFIG_EXT_TX
+#if CONFIG_SUPERTX
+static const vp9_prob default_supertx_prob[TX_SIZES] = {
+ 255, 160, 160, 160,
+#if CONFIG_TX64X64
+ 160
+#endif
+};
+
+static const vp9_prob default_supertxsplit_prob[TX_SIZES] = {
+ 255, 200, 200, 200,
+#if CONFIG_TX64X64
+ 200
+#endif
+};
+#endif
+
#if CONFIG_TX64X64
void tx_counts_to_branch_counts_64x64(const unsigned int *tx_count_64x64p,
unsigned int (*ct_64x64p)[2]) {
#if CONFIG_EXT_TX
vp9_copy(fc->ext_tx_prob, default_ext_tx_prob);
#endif
+#if CONFIG_SUPERTX
+ vp9_copy(fc->supertx_prob, default_supertx_prob);
+ vp9_copy(fc->supertxsplit_prob, default_supertxsplit_prob);
+#endif
}
const vp9_tree_index vp9_switchable_interp_tree
for (j = 0; j < INTRA_MODES; ++j)
fc->filterintra_prob[i][j] = adapt_prob(pre_fc->filterintra_prob[i][j],
counts->filterintra[i][j]);
-#endif
+#endif // CONFIG_FILTERINTRA
for (i = 0; i < SKIP_CONTEXTS; ++i)
fc->skip_probs[i] = adapt_prob(pre_fc->skip_probs[i], counts->skip[i]);
adapt_probs(vp9_ext_tx_tree, pre_fc->ext_tx_prob[i], counts->ext_tx[i],
fc->ext_tx_prob[i]);
}
-#endif
+#endif // CONFIG_EXT_TX
+
+#if CONFIG_SUPERTX
+ for (i = 1; i < TX_SIZES; ++i) {
+ fc->supertx_prob[i] = adapt_prob(pre_fc->supertx_prob[i],
+ counts->supertx[i]);
+ }
+ for (i = 1; i < TX_SIZES; ++i) {
+ fc->supertxsplit_prob[i] = adapt_prob(pre_fc->supertxsplit_prob[i],
+ counts->supertxsplit[i]);
+ }
+#endif // CONFIG_SUPERTX
}
static void set_default_lf_deltas(struct loopfilter *lf) {
#if CONFIG_EXT_TX
vp9_prob ext_tx_prob[3][EXT_TX_TYPES - 1];
#endif
+#if CONFIG_SUPERTX
+ vp9_prob supertx_prob[TX_SIZES];
+ vp9_prob supertxsplit_prob[TX_SIZES];
+#endif
} FRAME_CONTEXT;
typedef struct {
#if CONFIG_EXT_TX
unsigned int ext_tx[3][EXT_TX_TYPES];
#endif
+#if CONFIG_SUPERTX
+ unsigned int supertx[TX_SIZES][2];
+ unsigned int supertxsplit[TX_SIZES][2];
+ unsigned int supertx_size[BLOCK_SIZES];
+#endif
} FRAME_COUNTS;
extern const vp9_prob vp9_kf_uv_mode_prob[INTRA_MODES][INTRA_MODES - 1];
// block we are currently looking at. Shift is used to position the
// 1's we produce.
// TODO(JBB) Need another function for different resolution color..
-static void build_masks(const loop_filter_info_n *const lfi_n,
+static void build_masks(const VP9_COMMON *const cm,
const MODE_INFO *mi, const int shift_y,
const int shift_uv,
LOOP_FILTER_MASK *lfm) {
+ const loop_filter_info_n *const lfi_n = &cm->lf_info;
const MB_MODE_INFO *mbmi = &mi->mbmi;
const BLOCK_SIZE block_size = mbmi->sb_type;
const TX_SIZE tx_size_y = mbmi->tx_size;
- const TX_SIZE tx_size_uv = get_uv_tx_size_impl(tx_size_y, block_size, 1, 1);
+ const TX_SIZE tx_size_uv = get_uv_tx_size_impl(
+ tx_size_y, block_size, cm->subsampling_x, cm->subsampling_y);
const int filter_level = get_filter_level(lfi_n, mbmi);
uint64_t *const left_y = &lfm->left_y[tx_size_y];
uint64_t *const above_y = &lfm->above_y[tx_size_y];
// This function does the same thing as the one above with the exception that
// it only affects the y masks. It exists because for blocks < 16x16 in size,
// we only update u and v masks on the first block.
-static void build_y_mask(const loop_filter_info_n *const lfi_n,
+static void build_y_mask(const VP9_COMMON *const cm,
const MODE_INFO *mi, const int shift_y,
+#if CONFIG_SUPERTX
+ int supertx_enabled,
+#endif
LOOP_FILTER_MASK *lfm) {
+ const loop_filter_info_n *const lfi_n = &cm->lf_info;
const MB_MODE_INFO *mbmi = &mi->mbmi;
- const BLOCK_SIZE block_size = mbmi->sb_type;
const TX_SIZE tx_size_y = mbmi->tx_size;
+#if CONFIG_SUPERTX
+ const BLOCK_SIZE block_size =
+ supertx_enabled ? (BLOCK_SIZE)(3 * tx_size_y) : mbmi->sb_type;
+#else
+ const BLOCK_SIZE block_size = mbmi->sb_type;
+#endif
const int filter_level = get_filter_level(lfi_n, mbmi);
uint64_t *const left_y = &lfm->left_y[tx_size_y];
uint64_t *const above_y = &lfm->above_y[tx_size_y];
MODE_INFO *mi, const int mode_info_stride,
LOOP_FILTER_MASK *lfm) {
int idx_32, idx_16, idx_8;
- const loop_filter_info_n *const lfi_n = &cm->lf_info;
MODE_INFO *mip = mi;
MODE_INFO *mip2 = mi;
// through the recursive loop structure multiple times.
switch (mip->mbmi.sb_type) {
case BLOCK_64X64:
- build_masks(lfi_n, mip , 0, 0, lfm);
+ build_masks(cm, mip, 0, 0, lfm);
break;
case BLOCK_64X32:
- build_masks(lfi_n, mip, 0, 0, lfm);
+ build_masks(cm, mip, 0, 0, lfm);
+#if CONFIG_SUPERTX && CONFIG_TX64X64
+ if (supertx_enabled(&mip->mbmi))
+ break;
+#endif
mip2 = mip + mode_info_stride * 4;
if (4 >= max_rows)
break;
- build_masks(lfi_n, mip2, 32, 8, lfm);
+ build_masks(cm, mip2, 32, 8, lfm);
break;
case BLOCK_32X64:
- build_masks(lfi_n, mip, 0, 0, lfm);
+ build_masks(cm, mip, 0, 0, lfm);
+#if CONFIG_SUPERTX && CONFIG_TX64X64
+ if (supertx_enabled(&mip->mbmi))
+ break;
+#endif
mip2 = mip + 4;
if (4 >= max_cols)
break;
- build_masks(lfi_n, mip2, 4, 2, lfm);
+ build_masks(cm, mip2, 4, 2, lfm);
break;
default:
+#if CONFIG_SUPERTX && CONFIG_TX64X64
+ if (mip->mbmi.tx_size == TX_64X64) {
+ build_masks(cm, mip, 0, 0, lfm);
+ } else {
+#endif
for (idx_32 = 0; idx_32 < 4; mip += offset_32[idx_32], ++idx_32) {
const int shift_y = shift_32_y[idx_32];
const int shift_uv = shift_32_uv[idx_32];
continue;
switch (mip->mbmi.sb_type) {
case BLOCK_32X32:
- build_masks(lfi_n, mip, shift_y, shift_uv, lfm);
+ build_masks(cm, mip, shift_y, shift_uv, lfm);
break;
case BLOCK_32X16:
- build_masks(lfi_n, mip, shift_y, shift_uv, lfm);
+ build_masks(cm, mip, shift_y, shift_uv, lfm);
+#if CONFIG_SUPERTX
+ if (supertx_enabled(&mip->mbmi))
+ break;
+#endif
if (mi_32_row_offset + 2 >= max_rows)
continue;
mip2 = mip + mode_info_stride * 2;
- build_masks(lfi_n, mip2, shift_y + 16, shift_uv + 4, lfm);
+ build_masks(cm, mip2, shift_y + 16, shift_uv + 4, lfm);
break;
case BLOCK_16X32:
- build_masks(lfi_n, mip, shift_y, shift_uv, lfm);
+ build_masks(cm, mip, shift_y, shift_uv, lfm);
+#if CONFIG_SUPERTX
+ if (supertx_enabled(&mip->mbmi))
+ break;
+#endif
if (mi_32_col_offset + 2 >= max_cols)
continue;
mip2 = mip + 2;
- build_masks(lfi_n, mip2, shift_y + 2, shift_uv + 1, lfm);
+ build_masks(cm, mip2, shift_y + 2, shift_uv + 1, lfm);
break;
default:
+#if CONFIG_SUPERTX
+ if (mip->mbmi.tx_size == TX_32X32) {
+ build_masks(cm, mip, shift_y, shift_uv, lfm);
+ } else {
+#endif
for (idx_16 = 0; idx_16 < 4; mip += offset_16[idx_16], ++idx_16) {
const int shift_y = shift_32_y[idx_32] + shift_16_y[idx_16];
const int shift_uv = shift_32_uv[idx_32] + shift_16_uv[idx_16];
switch (mip->mbmi.sb_type) {
case BLOCK_16X16:
- build_masks(lfi_n, mip, shift_y, shift_uv, lfm);
+ build_masks(cm, mip, shift_y, shift_uv, lfm);
break;
case BLOCK_16X8:
- build_masks(lfi_n, mip, shift_y, shift_uv, lfm);
+ build_masks(cm, mip, shift_y, shift_uv, lfm);
+#if CONFIG_SUPERTX
+ if (supertx_enabled(&mip->mbmi))
+ break;
+#endif
if (mi_16_row_offset + 1 >= max_rows)
continue;
mip2 = mip + mode_info_stride;
- build_y_mask(lfi_n, mip2, shift_y+8, lfm);
+ build_y_mask(cm, mip2, shift_y + 8,
+#if CONFIG_SUPERTX
+ 0,
+#endif
+ lfm);
break;
case BLOCK_8X16:
- build_masks(lfi_n, mip, shift_y, shift_uv, lfm);
+ build_masks(cm, mip, shift_y, shift_uv, lfm);
+#if CONFIG_SUPERTX
+ if (supertx_enabled(&mip->mbmi))
+ break;
+#endif
if (mi_16_col_offset +1 >= max_cols)
continue;
mip2 = mip + 1;
- build_y_mask(lfi_n, mip2, shift_y+1, lfm);
+ build_y_mask(cm, mip2, shift_y + 1,
+#if CONFIG_SUPERTX
+ 0,
+#endif
+ lfm);
break;
default: {
+#if CONFIG_SUPERTX
+ if (mip->mbmi.tx_size == TX_16X16) {
+ build_masks(cm, mip, shift_y, shift_uv, lfm);
+ } else {
+#endif
const int shift_y = shift_32_y[idx_32] +
shift_16_y[idx_16] +
shift_8_y[0];
- build_masks(lfi_n, mip, shift_y, shift_uv, lfm);
+ build_masks(cm, mip, shift_y, shift_uv, lfm);
mip += offset[0];
for (idx_8 = 1; idx_8 < 4; mip += offset[idx_8], ++idx_8) {
const int shift_y = shift_32_y[idx_32] +
if (mi_8_col_offset >= max_cols ||
mi_8_row_offset >= max_rows)
continue;
- build_y_mask(lfi_n, mip, shift_y, lfm);
+ build_y_mask(cm, mip, shift_y,
+#if CONFIG_SUPERTX
+ supertx_enabled(&mip->mbmi),
+#endif
+ lfm);
+ }
+#if CONFIG_SUPERTX
}
+#endif
break;
}
}
}
+#if CONFIG_SUPERTX
+ }
+#endif
break;
}
}
+#if CONFIG_SUPERTX && CONFIG_TX64X64
+ }
+#endif
break;
}
// The largest loopfilter we have is 16x16 so we use the 16x16 mask
BLOCK_SIZE bsize) {
build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 0, 0);
}
+
void vp9_build_inter_predictors_sbuv(MACROBLOCKD *xd, int mi_row, int mi_col,
BLOCK_SIZE bsize) {
build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 1,
MAX_MB_PLANE - 1);
}
+
void vp9_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col,
BLOCK_SIZE bsize) {
build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 0,
MAX_MB_PLANE - 1);
}
+#if CONFIG_SUPERTX
+static const uint8_t mask_8[8] = {
+ 64, 64, 62, 52, 12, 2, 0, 0
+};
+
+static const uint8_t mask_16[16] = {
+ 63, 62, 60, 58, 55, 50, 43, 36, 28, 21, 14, 9, 6, 4, 2, 1
+};
+
+static const uint8_t mask_32[32] = {
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 63, 61, 57, 52, 45, 36,
+ 28, 19, 12, 7, 3, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+#if CONFIG_TX64X64
+static const uint8_t mask_64[64] = {
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 63, 61, 57, 52, 45, 36,
+ 28, 19, 12, 7, 3, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+#endif
+
+static void generate_1dmask(int length, uint8_t *mask) {
+ switch (length) {
+ case 8:
+ vpx_memcpy(mask, mask_8, length);
+ break;
+ case 16:
+ vpx_memcpy(mask, mask_16, length);
+ break;
+ case 32:
+ vpx_memcpy(mask, mask_32, length);
+ break;
+#if CONFIG_TX64X64
+ case 64:
+ vpx_memcpy(mask, mask_64, length);
+ break;
+#endif
+ default:
+ assert(0);
+ }
+}
+
+void vp9_build_masked_inter_predictor_complex(
+ uint8_t *dst, int dst_stride, uint8_t *dst2, int dst2_stride,
+ const struct macroblockd_plane *pd, int mi_row, int mi_col,
+ int mi_row_ori, int mi_col_ori, BLOCK_SIZE bsize, BLOCK_SIZE top_bsize,
+ PARTITION_TYPE partition) {
+ int i, j;
+ uint8_t mask[MAXTXLEN];
+ int top_w = 4 << b_width_log2_lookup[top_bsize],
+ top_h = 4 << b_height_log2_lookup[top_bsize];
+ int w = 4 << b_width_log2_lookup[bsize], h = 4 << b_height_log2_lookup[bsize];
+ int w_offset = (mi_col - mi_col_ori) << 3,
+ h_offset = (mi_row - mi_row_ori) << 3;
+
+ top_w >>= pd->subsampling_x;
+ top_h >>= pd->subsampling_y;
+ w >>= pd->subsampling_x;
+ h >>= pd->subsampling_y;
+ w_offset >>= pd->subsampling_x;
+ h_offset >>= pd->subsampling_y;
+
+ switch (partition) {
+ case PARTITION_HORZ:
+ generate_1dmask(h, mask + h_offset);
+ vpx_memset(mask, 64, h_offset);
+ vpx_memset(mask + h_offset + h, 0, top_h - h_offset - h);
+ break;
+ case PARTITION_VERT:
+ generate_1dmask(w, mask + w_offset);
+ vpx_memset(mask, 64, w_offset);
+ vpx_memset(mask + w_offset + w, 0, top_w - w_offset - w);
+ break;
+ default:
+ assert(0);
+ }
+ for (i = 0; i < top_h; ++i) {
+ for (j = 0; j < top_w; ++j) {
+ const int m = (partition == PARTITION_HORZ ? mask[i] : mask[j]);
+ if (m == 64)
+ continue;
+ else if (m == 0)
+ dst[i * dst_stride + j] = dst2[i * dst2_stride + j];
+ else
+ dst[i * dst_stride + j] = (dst[i * dst_stride + j] * m +
+ dst2[i * dst2_stride + j] * (64 - m) +
+ 32) >> 6;
+ }
+ }
+}
+
+void vp9_build_inter_predictors_sby_sub8x8_extend(MACROBLOCKD *xd,
+ int mi_row, int mi_col,
+ int mi_row_ori,
+ int mi_col_ori,
+ BLOCK_SIZE top_bsize,
+ PARTITION_TYPE partition) {
+ const int mi_x = mi_col_ori * MI_SIZE;
+ const int mi_y = mi_row_ori * MI_SIZE;
+ uint8_t *orig_dst;
+ int orig_dst_stride;
+ int bw = 4 << b_width_log2_lookup[top_bsize];
+ int bh = 4 << b_height_log2_lookup[top_bsize];
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, tmp_buf, MAXTXLEN * MAXTXLEN);
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, tmp_buf1, MAXTXLEN * MAXTXLEN);
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, tmp_buf2, MAXTXLEN * MAXTXLEN);
+
+ orig_dst = xd->plane[0].dst.buf;
+ orig_dst_stride = xd->plane[0].dst.stride;
+ build_inter_predictors(xd, 0, 0, bw, bh, 0, 0, bw, bh,
+ mi_x, mi_y);
+
+ xd->plane[0].dst.buf = tmp_buf;
+ xd->plane[0].dst.stride = MAXTXLEN;
+ switch (partition) {
+ case PARTITION_HORZ:
+ build_inter_predictors(xd, 0, 2, bw, bh, 0, 0, bw, bh,
+ mi_x, mi_y);
+ break;
+ case PARTITION_VERT:
+ build_inter_predictors(xd, 0, 1, bw, bh, 0, 0, bw, bh,
+ mi_x, mi_y);
+ break;
+ case PARTITION_SPLIT:
+ build_inter_predictors(xd, 0, 1, bw, bh, 0, 0, bw, bh,
+ mi_x, mi_y);
+ xd->plane[0].dst.buf = tmp_buf1;
+ xd->plane[0].dst.stride = MAXTXLEN;
+ build_inter_predictors(xd, 0, 2, bw, bh, 0, 0, bw, bh,
+ mi_x, mi_y);
+ xd->plane[0].dst.buf = tmp_buf2;
+ xd->plane[0].dst.stride = MAXTXLEN;
+ build_inter_predictors(xd, 0, 3, bw, bh, 0, 0, bw, bh,
+ mi_x, mi_y);
+ break;
+ default:
+ assert(0);
+ }
+
+ if (partition != PARTITION_SPLIT) {
+ vp9_build_masked_inter_predictor_complex(orig_dst, orig_dst_stride,
+ tmp_buf, MAXTXLEN,
+ &xd->plane[0], mi_row, mi_col,
+ mi_row_ori, mi_col_ori,
+ BLOCK_8X8, top_bsize,
+ partition);
+ } else {
+ vp9_build_masked_inter_predictor_complex(orig_dst, orig_dst_stride,
+ tmp_buf, MAXTXLEN,
+ &xd->plane[0], mi_row, mi_col,
+ mi_row_ori, mi_col_ori,
+ BLOCK_8X8, top_bsize,
+ PARTITION_VERT);
+ vp9_build_masked_inter_predictor_complex(tmp_buf1, MAXTXLEN,
+ tmp_buf2, MAXTXLEN,
+ &xd->plane[0], mi_row, mi_col,
+ mi_row_ori, mi_col_ori,
+ BLOCK_8X8, top_bsize,
+ PARTITION_VERT);
+ vp9_build_masked_inter_predictor_complex(orig_dst, orig_dst_stride,
+ tmp_buf1, MAXTXLEN,
+ &xd->plane[0], mi_row, mi_col,
+ mi_row_ori, mi_col_ori,
+ BLOCK_8X8, top_bsize,
+ PARTITION_HORZ);
+ }
+ xd->plane[0].dst.buf = orig_dst;
+ xd->plane[0].dst.stride = orig_dst_stride;
+}
+
+void vp9_build_inter_predictors_sbuv_sub8x8_extend(MACROBLOCKD *xd,
+ int mi_row_ori,
+ int mi_col_ori,
+ BLOCK_SIZE top_bsize) {
+ int plane;
+ const int mi_x = mi_col_ori * MI_SIZE;
+ const int mi_y = mi_row_ori * MI_SIZE;
+ for (plane = 1; plane < MAX_MB_PLANE; ++plane) {
+ const BLOCK_SIZE plane_bsize = get_plane_block_size(top_bsize,
+ &xd->plane[plane]);
+ const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize];
+ const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize];
+ const int bw = 4 * num_4x4_w;
+ const int bh = 4 * num_4x4_h;
+
+ build_inter_predictors(xd, plane, 0, bw, bh, 0, 0, bw, bh,
+ mi_x, mi_y);
+ }
+}
+#endif // CONFIG_SUPERTX
+
// TODO(jingning): This function serves as a placeholder for decoder prediction
// using on demand border extension. It should be moved to /decoder/ directory.
static void dec_build_inter_predictors(MACROBLOCKD *xd, int plane, int block,
subpel_y, sf, w, h, ref, kernel, xs, ys, xd->bd);
} else {
inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x,
- subpel_y, sf, w, h, ref, kernel, xs, ys);
+ subpel_y, sf, w, h, ref, kernel, xs, ys);
}
#else
inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x,
}
}
+#if CONFIG_SUPERTX
+void vp9_dec_build_inter_predictors_sby_sub8x8_extend(MACROBLOCKD *xd,
+ int mi_row, int mi_col,
+ int mi_row_ori,
+ int mi_col_ori,
+ BLOCK_SIZE top_bsize,
+ PARTITION_TYPE partition) {
+ const int mi_x = mi_col_ori * MI_SIZE;
+ const int mi_y = mi_row_ori * MI_SIZE;
+ uint8_t *orig_dst;
+ int orig_dst_stride;
+ int bw = 4 << b_width_log2_lookup[top_bsize];
+ int bh = 4 << b_height_log2_lookup[top_bsize];
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, tmp_buf, MAXTXLEN * MAXTXLEN);
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, tmp_buf1, MAXTXLEN * MAXTXLEN);
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, tmp_buf2, MAXTXLEN * MAXTXLEN);
+
+ orig_dst = xd->plane[0].dst.buf;
+ orig_dst_stride = xd->plane[0].dst.stride;
+ dec_build_inter_predictors(xd, 0, 0, bw, bh, 0, 0, bw, bh,
+ mi_x, mi_y);
+
+ xd->plane[0].dst.buf = tmp_buf;
+ xd->plane[0].dst.stride = MAXTXLEN;
+ switch (partition) {
+ case PARTITION_HORZ:
+ dec_build_inter_predictors(xd, 0, 2, bw, bh, 0, 0, bw, bh,
+ mi_x, mi_y);
+ break;
+ case PARTITION_VERT:
+ dec_build_inter_predictors(xd, 0, 1, bw, bh, 0, 0, bw, bh,
+ mi_x, mi_y);
+ break;
+ case PARTITION_SPLIT:
+ dec_build_inter_predictors(xd, 0, 1, bw, bh, 0, 0, bw, bh,
+ mi_x, mi_y);
+ xd->plane[0].dst.buf = tmp_buf1;
+ xd->plane[0].dst.stride = MAXTXLEN;
+ dec_build_inter_predictors(xd, 0, 2, bw, bh, 0, 0, bw, bh,
+ mi_x, mi_y);
+ xd->plane[0].dst.buf = tmp_buf2;
+ xd->plane[0].dst.stride = MAXTXLEN;
+ dec_build_inter_predictors(xd, 0, 3, bw, bh, 0, 0, bw, bh,
+ mi_x, mi_y);
+ break;
+ default:
+ assert(0);
+ }
+
+ if (partition != PARTITION_SPLIT) {
+ vp9_build_masked_inter_predictor_complex(orig_dst, orig_dst_stride,
+ tmp_buf, MAXTXLEN,
+ &xd->plane[0], mi_row, mi_col,
+ mi_row_ori, mi_col_ori,
+ BLOCK_8X8, top_bsize,
+ partition);
+ } else {
+ vp9_build_masked_inter_predictor_complex(orig_dst, orig_dst_stride,
+ tmp_buf, MAXTXLEN,
+ &xd->plane[0], mi_row, mi_col,
+ mi_row_ori, mi_col_ori,
+ BLOCK_8X8, top_bsize,
+ PARTITION_VERT);
+ vp9_build_masked_inter_predictor_complex(tmp_buf1, MAXTXLEN,
+ tmp_buf2, MAXTXLEN,
+ &xd->plane[0], mi_row, mi_col,
+ mi_row_ori, mi_col_ori,
+ BLOCK_8X8, top_bsize,
+ PARTITION_VERT);
+ vp9_build_masked_inter_predictor_complex(orig_dst, orig_dst_stride,
+ tmp_buf1, MAXTXLEN,
+ &xd->plane[0], mi_row, mi_col,
+ mi_row_ori, mi_col_ori,
+ BLOCK_8X8, top_bsize,
+ PARTITION_HORZ);
+ }
+ xd->plane[0].dst.buf = orig_dst;
+ xd->plane[0].dst.stride = orig_dst_stride;
+}
+
+void vp9_dec_build_inter_predictors_sbuv_sub8x8_extend(MACROBLOCKD *xd,
+ int mi_row_ori,
+ int mi_col_ori,
+ BLOCK_SIZE top_bsize) {
+ int plane;
+ const int mi_x = mi_col_ori * MI_SIZE;
+ const int mi_y = mi_row_ori * MI_SIZE;
+ for (plane = 1; plane < MAX_MB_PLANE; ++plane) {
+ const BLOCK_SIZE plane_bsize = get_plane_block_size(top_bsize,
+ &xd->plane[plane]);
+ const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize];
+ const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize];
+ const int bw = 4 * num_4x4_w;
+ const int bh = 4 * num_4x4_h;
+
+ dec_build_inter_predictors(xd, plane, 0, bw, bh, 0, 0, bw, bh,
+ mi_x, mi_y);
+ }
+}
+#endif // CONFIG_SUPERTX
+
void vp9_setup_dst_planes(struct macroblockd_plane planes[MAX_MB_PLANE],
const YV12_BUFFER_CONFIG *src,
int mi_row, int mi_col) {
const YV12_BUFFER_CONFIG *src, int mi_row, int mi_col,
const struct scale_factors *sf);
+#if CONFIG_SUPERTX
+struct macroblockd_plane;
+void vp9_build_inter_predictors_sby_sub8x8_extend(MACROBLOCKD *xd,
+ int mi_row, int mi_col,
+ int mi_row_ori,
+ int mi_col_ori,
+ BLOCK_SIZE top_bsize,
+ PARTITION_TYPE partition);
+void vp9_build_inter_predictors_sbuv_sub8x8_extend(MACROBLOCKD *xd,
+ int mi_row_ori,
+ int mi_col_ori,
+ BLOCK_SIZE top_bsize);
+void vp9_build_masked_inter_predictor_complex(
+ uint8_t *dst, int dst_stride, uint8_t *dst2, int dst2_stride,
+ const struct macroblockd_plane *pd, int mi_row, int mi_col,
+ int mi_row_ori, int mi_col_ori, BLOCK_SIZE bsize, BLOCK_SIZE top_bsize,
+ PARTITION_TYPE partition);
+void vp9_dec_build_inter_predictors_sby_sub8x8_extend(MACROBLOCKD *xd,
+ int mi_row, int mi_col,
+ int mi_row_ori,
+ int mi_col_ori,
+ BLOCK_SIZE top_bsize,
+ PARTITION_TYPE p);
+void vp9_dec_build_inter_predictors_sbuv_sub8x8_extend(MACROBLOCKD *xd,
+ int mi_row_ori,
+ int mi_col_ori,
+ BLOCK_SIZE top_bsize);
+#endif // CONFIG_SUPERTX
+
#ifdef __cplusplus
} // extern "C"
#endif
return &xd->mi[0].mbmi;
}
+#if CONFIG_SUPERTX
+static MB_MODE_INFO *set_offsets_extend(VP9_COMMON *const cm,
+ MACROBLOCKD *const xd,
+ const TileInfo *const tile,
+ BLOCK_SIZE top_bsize,
+ int mi_row, int mi_col,
+ int mi_row_ori, int mi_col_ori) {
+ const int bw = num_8x8_blocks_wide_lookup[top_bsize];
+ const int bh = num_8x8_blocks_high_lookup[top_bsize];
+ const int offset = mi_row * cm->mi_stride + mi_col;
+
+ xd->mi = cm->mi + offset;
+ xd->mi[0].src_mi = &xd->mi[0];
+ set_mi_row_col(xd, tile, mi_row_ori, bh, mi_col_ori, bw,
+ cm->mi_rows, cm->mi_cols);
+ return &xd->mi[0].mbmi;
+}
+
+static MB_MODE_INFO *set_mb_offsets(VP9_COMMON *const cm,
+ MACROBLOCKD *const xd,
+ const TileInfo *const tile,
+ BLOCK_SIZE bsize,
+ int mi_row, int mi_col) {
+ const int bw = num_8x8_blocks_wide_lookup[bsize];
+ const int bh = num_8x8_blocks_high_lookup[bsize];
+ const int x_mis = MIN(bw, cm->mi_cols - mi_col);
+ const int y_mis = MIN(bh, cm->mi_rows - mi_row);
+ const int offset = mi_row * cm->mi_stride + mi_col;
+ int x, y;
+
+ xd->mi = cm->mi + offset;
+ xd->mi[0].src_mi = &xd->mi[0];
+ xd->mi[0].mbmi.sb_type = bsize;
+ for (y = 0; y < y_mis; ++y)
+ for (x = !y; x < x_mis; ++x)
+ xd->mi[y * cm->mi_stride + x] = xd->mi[0];
+
+ set_mi_row_col(xd, tile, mi_row, bh, mi_col, bw, cm->mi_rows, cm->mi_cols);
+ return &xd->mi[0].mbmi;
+}
+
+static void set_offsets_topblock(VP9_COMMON *const cm, MACROBLOCKD *const xd,
+ const TileInfo *const tile,
+ BLOCK_SIZE bsize, int mi_row, int mi_col) {
+ const int bw = num_8x8_blocks_wide_lookup[bsize];
+ const int bh = num_8x8_blocks_high_lookup[bsize];
+ const int offset = mi_row * cm->mi_stride + mi_col;
+
+ xd->mi = cm->mi + offset;
+ xd->mi[0].src_mi = &xd->mi[0];
+
+ set_mi_row_col(xd, tile, mi_row, bh, mi_col, bw, cm->mi_rows, cm->mi_cols);
+
+ vp9_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
+}
+
+static void set_param_topblock(VP9_COMMON *const cm, MACROBLOCKD *const xd,
+ BLOCK_SIZE bsize, int mi_row, int mi_col,
+#if CONFIG_EXT_TX
+ int txfm,
+#endif
+ int skip) {
+ const int bw = num_8x8_blocks_wide_lookup[bsize];
+ const int bh = num_8x8_blocks_high_lookup[bsize];
+ const int x_mis = MIN(bw, cm->mi_cols - mi_col);
+ const int y_mis = MIN(bh, cm->mi_rows - mi_row);
+ const int offset = mi_row * cm->mi_stride + mi_col;
+ int x, y;
+
+ xd->mi = cm->mi + offset;
+ xd->mi[0].src_mi = &xd->mi[0];
+
+ for (y = 0; y < y_mis; ++y)
+ for (x = 0; x < x_mis; ++x) {
+ xd->mi[y * cm->mi_stride + x].mbmi.skip = skip;
+#if CONFIG_EXT_TX
+ xd->mi[y * cm->mi_stride + x].mbmi.ext_txfrm = txfm;
+#endif
+ }
+}
+
+static void set_ref(VP9_COMMON *const cm, MACROBLOCKD *const xd,
+ int idx, int mi_row, int mi_col) {
+ MB_MODE_INFO *const mbmi = &xd->mi[0].mbmi;
+ RefBuffer *ref_buffer = &cm->frame_refs[mbmi->ref_frame[idx] - LAST_FRAME];
+ xd->block_refs[idx] = ref_buffer;
+ if (!vp9_is_valid_scale(&ref_buffer->sf))
+ vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+ "Invalid scale factors");
+ vp9_setup_pre_planes(xd, idx, ref_buffer->buf, mi_row, mi_col,
+ &ref_buffer->sf);
+ xd->corrupted |= ref_buffer->buf->corrupted;
+}
+
+static void dec_predict_b_extend(VP9_COMMON *const cm, MACROBLOCKD *const xd,
+ const TileInfo *const tile,
+ int mi_row, int mi_col,
+ int mi_row_ori, int mi_col_ori,
+ BLOCK_SIZE top_bsize) {
+ MB_MODE_INFO *mbmi = set_offsets_extend(cm, xd, tile, top_bsize,
+ mi_row, mi_col,
+ mi_row_ori, mi_col_ori);
+ set_ref(cm, xd, 0, mi_row_ori, mi_col_ori);
+ if (has_second_ref(&xd->mi[0].mbmi))
+ set_ref(cm, xd, 1, mi_row_ori, mi_col_ori);
+ mbmi->tx_size = b_width_log2_lookup[top_bsize];
+ vp9_dec_build_inter_predictors_sb(xd, mi_row_ori, mi_col_ori, top_bsize);
+}
+
+static void dec_predict_b_sub8x8_extend(VP9_COMMON *const cm,
+ MACROBLOCKD *const xd,
+ const TileInfo *const tile,
+ int mi_row, int mi_col,
+ int mi_row_ori, int mi_col_ori,
+ BLOCK_SIZE top_bsize,
+ PARTITION_TYPE partition) {
+ MB_MODE_INFO *mbmi = set_offsets_extend(cm, xd, tile, top_bsize,
+ mi_row, mi_col,
+ mi_row_ori, mi_col_ori);
+ set_ref(cm, xd, 0, mi_row_ori, mi_col_ori);
+ if (has_second_ref(&xd->mi[0].mbmi))
+ set_ref(cm, xd, 1, mi_row_ori, mi_col_ori);
+ mbmi->tx_size = b_width_log2_lookup[top_bsize];
+ vp9_dec_build_inter_predictors_sby_sub8x8_extend(xd, mi_row, mi_col,
+ mi_row_ori, mi_col_ori,
+ top_bsize, partition);
+ vp9_dec_build_inter_predictors_sbuv_sub8x8_extend(xd, mi_row_ori, mi_col_ori,
+ top_bsize);
+}
+
+static void dec_predict_sb_complex(VP9_COMMON *const cm, MACROBLOCKD *const xd,
+ const TileInfo *const tile,
+ int mi_row, int mi_col,
+ int mi_row_ori, int mi_col_ori,
+ BLOCK_SIZE bsize, BLOCK_SIZE top_bsize,
+ uint8_t *dst_buf[3], int dst_stride[3]) {
+ const int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4;
+ PARTITION_TYPE partition;
+ BLOCK_SIZE subsize;
+ MB_MODE_INFO *mbmi;
+ int i, offset = mi_row * cm->mi_stride + mi_col;
+
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, tmp_buf1,
+ MAX_MB_PLANE * MAXTXLEN * MAXTXLEN);
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, tmp_buf2,
+ MAX_MB_PLANE * MAXTXLEN * MAXTXLEN);
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, tmp_buf3,
+ MAX_MB_PLANE * MAXTXLEN * MAXTXLEN);
+ uint8_t *dst_buf1[3] = {
+ tmp_buf1,
+ tmp_buf1 + MAXTXLEN * MAXTXLEN,
+ tmp_buf1 + 2 * MAXTXLEN * MAXTXLEN};
+ uint8_t *dst_buf2[3] = {
+ tmp_buf2,
+ tmp_buf2 + MAXTXLEN * MAXTXLEN,
+ tmp_buf2 + 2 * MAXTXLEN * MAXTXLEN};
+ uint8_t *dst_buf3[3] = {
+ tmp_buf3,
+ tmp_buf3 + MAXTXLEN * MAXTXLEN,
+ tmp_buf3 + 2 * MAXTXLEN * MAXTXLEN};
+ int dst_stride1[3] = {MAXTXLEN, MAXTXLEN, MAXTXLEN};
+ int dst_stride2[3] = {MAXTXLEN, MAXTXLEN, MAXTXLEN};
+ int dst_stride3[3] = {MAXTXLEN, MAXTXLEN, MAXTXLEN};
+
+ if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
+ return;
+
+ xd->mi = cm->mi + offset;
+ xd->mi[0].src_mi = &xd->mi[0];
+ mbmi = &xd->mi[0].mbmi;
+ partition = partition_lookup[bsl][mbmi->sb_type];
+ subsize = get_subsize(bsize, partition);
+
+ for (i = 0; i < MAX_MB_PLANE; i++) {
+ xd->plane[i].dst.buf = dst_buf[i];
+ xd->plane[i].dst.stride = dst_stride[i];
+ }
+
+ switch (partition) {
+ case PARTITION_NONE:
+ assert(bsize < top_bsize);
+ dec_predict_b_extend(cm, xd, tile, mi_row, mi_col, mi_row_ori, mi_col_ori,
+ top_bsize);
+ break;
+ case PARTITION_HORZ:
+ if (bsize > BLOCK_8X8) {
+ dec_predict_b_extend(cm, xd, tile, mi_row, mi_col, mi_row_ori,
+ mi_col_ori, top_bsize);
+ } else {
+ dec_predict_b_sub8x8_extend(cm, xd, tile, mi_row, mi_col,
+ mi_row_ori, mi_col_ori,
+ top_bsize, partition);
+ }
+ if (mi_row + hbs < cm->mi_rows && bsize > BLOCK_8X8) {
+ for (i = 0; i < MAX_MB_PLANE; i++) {
+ xd->plane[i].dst.buf = tmp_buf1 + i * MAXTXLEN * MAXTXLEN;
+ xd->plane[i].dst.stride = MAXTXLEN;
+ }
+ dec_predict_b_extend(cm, xd, tile, mi_row + hbs, mi_col,
+ mi_row_ori, mi_col_ori, top_bsize);
+ for (i = 0; i < MAX_MB_PLANE; i++) {
+ xd->plane[i].dst.buf = dst_buf[i];
+ xd->plane[i].dst.stride = dst_stride[i];
+ vp9_build_masked_inter_predictor_complex(dst_buf[i], dst_stride[i],
+ dst_buf1[i], dst_stride1[i],
+ &xd->plane[i],
+ mi_row, mi_col,
+ mi_row_ori, mi_col_ori,
+ bsize, top_bsize,
+ PARTITION_HORZ);
+ }
+ }
+ break;
+ case PARTITION_VERT:
+ if (bsize > BLOCK_8X8) {
+ dec_predict_b_extend(cm, xd, tile, mi_row, mi_col, mi_row_ori,
+ mi_col_ori, top_bsize);
+ } else {
+ dec_predict_b_sub8x8_extend(cm, xd, tile, mi_row, mi_col,
+ mi_row_ori, mi_col_ori,
+ top_bsize, partition);
+ }
+ if (mi_col + hbs < cm->mi_cols && bsize > BLOCK_8X8) {
+ for (i = 0; i < MAX_MB_PLANE; i++) {
+ xd->plane[i].dst.buf = tmp_buf1 + i * MAXTXLEN * MAXTXLEN;
+ xd->plane[i].dst.stride = MAXTXLEN;
+ }
+ dec_predict_b_extend(cm, xd, tile, mi_row, mi_col + hbs, mi_row_ori,
+ mi_col_ori, top_bsize);
+ for (i = 0; i < MAX_MB_PLANE; i++) {
+ xd->plane[i].dst.buf = dst_buf[i];
+ xd->plane[i].dst.stride = dst_stride[i];
+ vp9_build_masked_inter_predictor_complex(dst_buf[i], dst_stride[i],
+ dst_buf1[i], dst_stride1[i],
+ &xd->plane[i],
+ mi_row, mi_col,
+ mi_row_ori, mi_col_ori,
+ bsize, top_bsize,
+ PARTITION_VERT);
+ }
+ }
+ break;
+ case PARTITION_SPLIT:
+ if (bsize == BLOCK_8X8) {
+ dec_predict_b_sub8x8_extend(cm, xd, tile, mi_row, mi_col,
+ mi_row_ori, mi_col_ori,
+ top_bsize, partition);
+ } else {
+ dec_predict_sb_complex(cm, xd, tile, mi_row, mi_col,
+ mi_row_ori, mi_col_ori, subsize, top_bsize,
+ dst_buf, dst_stride);
+ if (mi_row < cm->mi_rows && mi_col + hbs < cm->mi_cols)
+ dec_predict_sb_complex(cm, xd, tile, mi_row, mi_col + hbs,
+ mi_row_ori, mi_col_ori, subsize, top_bsize,
+ dst_buf1, dst_stride1);
+ if (mi_row + hbs < cm->mi_rows && mi_col < cm->mi_cols)
+ dec_predict_sb_complex(cm, xd, tile, mi_row + hbs, mi_col,
+ mi_row_ori, mi_col_ori, subsize, top_bsize,
+ dst_buf2, dst_stride2);
+ if (mi_row + hbs < cm->mi_rows && mi_col + hbs < cm->mi_cols)
+ dec_predict_sb_complex(cm, xd, tile, mi_row + hbs, mi_col + hbs,
+ mi_row_ori, mi_col_ori, subsize, top_bsize,
+ dst_buf3, dst_stride3);
+ for (i = 0; i < MAX_MB_PLANE; i++) {
+ if (mi_row < cm->mi_rows && mi_col + hbs < cm->mi_cols) {
+ vp9_build_masked_inter_predictor_complex(dst_buf[i], dst_stride[i],
+ dst_buf1[i],
+ dst_stride1[i],
+ &xd->plane[i],
+ mi_row, mi_col,
+ mi_row_ori, mi_col_ori,
+ bsize, top_bsize,
+ PARTITION_VERT);
+ if (mi_row + hbs < cm->mi_rows) {
+ vp9_build_masked_inter_predictor_complex(dst_buf2[i],
+ dst_stride2[i],
+ dst_buf3[i],
+ dst_stride3[i],
+ &xd->plane[i],
+ mi_row, mi_col,
+ mi_row_ori, mi_col_ori,
+ bsize, top_bsize,
+ PARTITION_VERT);
+ vp9_build_masked_inter_predictor_complex(dst_buf[i],
+ dst_stride[i],
+ dst_buf2[i],
+ dst_stride2[i],
+ &xd->plane[i],
+ mi_row, mi_col,
+ mi_row_ori, mi_col_ori,
+ bsize, top_bsize,
+ PARTITION_HORZ);
+ }
+ } else if (mi_row + hbs < cm->mi_rows && mi_col < cm->mi_cols) {
+ vp9_build_masked_inter_predictor_complex(dst_buf[i],
+ dst_stride[i],
+ dst_buf2[i],
+ dst_stride2[i],
+ &xd->plane[i],
+ mi_row, mi_col,
+ mi_row_ori, mi_col_ori,
+ bsize, top_bsize,
+ PARTITION_HORZ);
+ }
+ }
+ }
+ break;
+ default:
+ assert(0);
+ }
+}
+#endif // CONFIG_SUPERTX
+
static void decode_block(VP9_COMMON *const cm, MACROBLOCKD *const xd,
const TileInfo *const tile,
+#if CONFIG_SUPERTX
+ int supertx_enabled,
+#endif
int mi_row, int mi_col,
vp9_reader *r, BLOCK_SIZE bsize) {
const int less8x8 = bsize < BLOCK_8X8;
+#if CONFIG_SUPERTX
+ MB_MODE_INFO *mbmi;
+ if (supertx_enabled) {
+ mbmi = set_mb_offsets(cm, xd, tile, bsize, mi_row, mi_col);
+ } else {
+ mbmi = set_offsets(cm, xd, tile, bsize, mi_row, mi_col);
+ }
+ vp9_read_mode_info(cm, xd, tile, supertx_enabled, mi_row, mi_col, r);
+#else
MB_MODE_INFO *mbmi = set_offsets(cm, xd, tile, bsize, mi_row, mi_col);
vp9_read_mode_info(cm, xd, tile, mi_row, mi_col, r);
+#endif
+#if CONFIG_SUPERTX
+ if (!supertx_enabled) {
+#endif
if (less8x8)
bsize = BLOCK_8X8;
mbmi->skip = 1; // skip loopfilter
}
}
+#if CONFIG_SUPERTX
+ }
+#endif
xd->corrupted |= vp9_reader_has_error(r);
}
static void decode_partition(VP9_COMMON *const cm, MACROBLOCKD *const xd,
const TileInfo *const tile,
+#if CONFIG_SUPERTX
+ int read_token, int supertx_enabled,
+#endif
int mi_row, int mi_col,
vp9_reader* r, BLOCK_SIZE bsize) {
const int hbs = num_8x8_blocks_wide_lookup[bsize] / 2;
PARTITION_TYPE partition;
BLOCK_SIZE subsize, uv_subsize;
+#if CONFIG_SUPERTX
+ int skip = 0;
+ TX_SIZE supertx_size = b_width_log2_lookup[bsize];
+#if CONFIG_EXT_TX
+ int txfm = NORM;
+#endif
+#endif
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
return;
if (subsize >= BLOCK_8X8 && uv_subsize == BLOCK_INVALID)
vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
"Invalid block size.");
+#if CONFIG_SUPERTX
+ if (cm->frame_type != KEY_FRAME &&
+ partition != PARTITION_NONE &&
+ bsize <= MAX_SUPERTX_BLOCK_SIZE &&
+ !supertx_enabled) {
+ if (partition == PARTITION_SPLIT) {
+ supertx_enabled = vp9_read(r, cm->fc.supertxsplit_prob[supertx_size]);
+ cm->counts.supertxsplit[supertx_size][supertx_enabled]++;
+ } else {
+ supertx_enabled = vp9_read(r, cm->fc.supertx_prob[supertx_size]);
+ cm->counts.supertx[supertx_size][supertx_enabled]++;
+ }
+ }
+ if (supertx_enabled && read_token) {
+ int offset = mi_row * cm->mi_stride + mi_col;
+ xd->mi = cm->mi + offset;
+ xd->mi[0].src_mi = &xd->mi[0];
+ set_mi_row_col(xd, tile, mi_row, num_8x8_blocks_high_lookup[bsize],
+ mi_col, num_8x8_blocks_wide_lookup[bsize],
+ cm->mi_rows, cm->mi_cols);
+ set_skip_context(xd, mi_row, mi_col);
+ // Here we assume mbmi->segment_id = 0
+ skip = read_skip(cm, xd, 0, r);
+ if (skip)
+ reset_skip_context(xd, bsize);
+#if CONFIG_EXT_TX
+ if (bsize <= BLOCK_16X16 && !skip) {
+ txfm = vp9_read_tree(r, vp9_ext_tx_tree,
+ cm->fc.ext_tx_prob[supertx_size]);
+ if (!cm->frame_parallel_decoding_mode)
+ ++cm->counts.ext_tx[supertx_size][txfm];
+ }
+#endif
+ }
+#endif // CONFIG_SUPERTX
if (subsize < BLOCK_8X8) {
- decode_block(cm, xd, tile, mi_row, mi_col, r, subsize);
+ decode_block(cm, xd, tile,
+#if CONFIG_SUPERTX
+ supertx_enabled,
+#endif
+ mi_row, mi_col, r, subsize);
} else {
switch (partition) {
case PARTITION_NONE:
- decode_block(cm, xd, tile, mi_row, mi_col, r, subsize);
+ decode_block(cm, xd, tile,
+#if CONFIG_SUPERTX
+ supertx_enabled,
+#endif
+ mi_row, mi_col, r, subsize);
break;
case PARTITION_HORZ:
- decode_block(cm, xd, tile, mi_row, mi_col, r, subsize);
+ decode_block(cm, xd, tile,
+#if CONFIG_SUPERTX
+ supertx_enabled,
+#endif
+ mi_row, mi_col, r, subsize);
if (mi_row + hbs < cm->mi_rows)
- decode_block(cm, xd, tile, mi_row + hbs, mi_col, r, subsize);
+ decode_block(cm, xd, tile,
+#if CONFIG_SUPERTX
+ supertx_enabled,
+#endif
+ mi_row + hbs, mi_col, r, subsize);
break;
case PARTITION_VERT:
- decode_block(cm, xd, tile, mi_row, mi_col, r, subsize);
+ decode_block(cm, xd, tile,
+#if CONFIG_SUPERTX
+ supertx_enabled,
+#endif
+ mi_row, mi_col, r, subsize);
if (mi_col + hbs < cm->mi_cols)
- decode_block(cm, xd, tile, mi_row, mi_col + hbs, r, subsize);
+ decode_block(cm, xd, tile,
+#if CONFIG_SUPERTX
+ supertx_enabled,
+#endif
+ mi_row, mi_col + hbs, r, subsize);
break;
case PARTITION_SPLIT:
+#if CONFIG_SUPERTX
+ decode_partition(cm, xd, tile, !supertx_enabled, supertx_enabled,
+ mi_row, mi_col, r, subsize);
+ decode_partition(cm, xd, tile, !supertx_enabled, supertx_enabled,
+ mi_row, mi_col + hbs, r, subsize);
+ decode_partition(cm, xd, tile, !supertx_enabled, supertx_enabled,
+ mi_row + hbs, mi_col, r, subsize);
+ decode_partition(cm, xd, tile, !supertx_enabled, supertx_enabled,
+ mi_row + hbs, mi_col + hbs, r, subsize);
+#else
decode_partition(cm, xd, tile, mi_row, mi_col, r, subsize);
decode_partition(cm, xd, tile, mi_row, mi_col + hbs, r, subsize);
decode_partition(cm, xd, tile, mi_row + hbs, mi_col, r, subsize);
decode_partition(cm, xd, tile, mi_row + hbs, mi_col + hbs, r, subsize);
+#endif
break;
default:
assert(0 && "Invalid partition type");
}
}
+#if CONFIG_SUPERTX
+ if (supertx_enabled && read_token) {
+ uint8_t *dst_buf[3];
+ int dst_stride[3], i;
+
+ vp9_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
+ for (i = 0; i < MAX_MB_PLANE; i++) {
+ dst_buf[i] = xd->plane[i].dst.buf;
+ dst_stride[i] = xd->plane[i].dst.stride;
+ }
+ dec_predict_sb_complex(cm, xd, tile, mi_row, mi_col, mi_row, mi_col,
+ bsize, bsize, dst_buf, dst_stride);
+
+ if (!skip) {
+ int eobtotal = 0;
+ struct inter_args arg = { cm, xd, r, &eobtotal };
+ set_offsets_topblock(cm, xd, tile, bsize, mi_row, mi_col);
+#if CONFIG_EXT_TX
+ xd->mi[0].mbmi.ext_txfrm = txfm;
+#endif
+ vp9_foreach_transformed_block(xd, bsize, reconstruct_inter_block, &arg);
+ if (!(subsize < BLOCK_8X8) && eobtotal == 0)
+ skip = 1;
+ }
+ set_param_topblock(cm, xd, bsize, mi_row, mi_col,
+#if CONFIG_EXT_TX
+ txfm,
+#endif
+ skip);
+ }
+#endif // CONFIG_SUPERTX
+
// update partition context
if (bsize >= BLOCK_8X8 &&
(bsize == BLOCK_8X8 || partition != PARTITION_SPLIT))
vp9_zero(tile_data->xd.left_seg_context);
for (mi_col = tile.mi_col_start; mi_col < tile.mi_col_end;
mi_col += MI_BLOCK_SIZE) {
- decode_partition(tile_data->cm, &tile_data->xd, &tile, mi_row, mi_col,
+ decode_partition(tile_data->cm, &tile_data->xd, &tile,
+#if CONFIG_SUPERTX
+ 1, 0,
+#endif
+ mi_row, mi_col,
&tile_data->bit_reader, BLOCK_64X64);
}
pbi->mb.corrupted |= tile_data->xd.corrupted;
for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end;
mi_col += MI_BLOCK_SIZE) {
decode_partition(tile_data->cm, &tile_data->xd, tile,
+#if CONFIG_SUPERTX
+ 1, 0,
+#endif
mi_row, mi_col, &tile_data->bit_reader, BLOCK_64X64);
}
}
return segment_id;
}
+#if CONFIG_SUPERTX
+int read_skip(VP9_COMMON *cm, const MACROBLOCKD *xd,
+#else
static int read_skip(VP9_COMMON *cm, const MACROBLOCKD *xd,
+#endif
int segment_id, vp9_reader *r) {
if (vp9_segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)) {
return 1;
MACROBLOCKD *const xd,
const TileInfo *const tile,
MODE_INFO *const mi,
+#if CONFIG_SUPERTX && CONFIG_EXT_TX
+ int supertx_enabled,
+#endif
int mi_row, int mi_col, vp9_reader *r) {
MB_MODE_INFO *const mbmi = &mi->mbmi;
const BLOCK_SIZE bsize = mbmi->sb_type;
static void read_inter_frame_mode_info(VP9_COMMON *const cm,
MACROBLOCKD *const xd,
const TileInfo *const tile,
+#if CONFIG_SUPERTX
+ int supertx_enabled,
+#endif
int mi_row, int mi_col, vp9_reader *r) {
MODE_INFO *const mi = xd->mi[0].src_mi;
MB_MODE_INFO *const mbmi = &mi->mbmi;
mbmi->mv[0].as_int = 0;
mbmi->mv[1].as_int = 0;
- mbmi->segment_id = read_inter_segment_id(cm, xd, mi_row, mi_col, r);
- mbmi->skip = read_skip(cm, xd, mbmi->segment_id, r);
+#if CONFIG_SUPERTX
+ if (!supertx_enabled) {
+#endif
+ mbmi->segment_id = read_inter_segment_id(cm, xd, mi_row, mi_col, r);
+ mbmi->skip = read_skip(cm, xd, mbmi->segment_id, r);
#if CONFIG_TX_SKIP
- mbmi->tx_skip[0] = vp9_read_bit(r);
- mbmi->tx_skip[1] = vp9_read_bit(r);
+ mbmi->tx_skip[0] = vp9_read_bit(r);
+ mbmi->tx_skip[1] = vp9_read_bit(r);
#endif
- inter_block = read_is_inter_block(cm, xd, mbmi->segment_id, r);
- mbmi->tx_size = read_tx_size(cm, xd, cm->tx_mode, mbmi->sb_type,
- !mbmi->skip || !inter_block, r);
+ inter_block = read_is_inter_block(cm, xd, mbmi->segment_id, r);
+ mbmi->tx_size = read_tx_size(cm, xd, cm->tx_mode, mbmi->sb_type,
+ !mbmi->skip || !inter_block, r);
#if CONFIG_EXT_TX
- if (inter_block &&
- mbmi->tx_size <= TX_16X16 &&
- cm->base_qindex > 0 &&
- mbmi->sb_type >= BLOCK_8X8 &&
+ if (inter_block &&
+ mbmi->tx_size <= TX_16X16 &&
+ cm->base_qindex > 0 &&
+ mbmi->sb_type >= BLOCK_8X8 &&
+#if CONFIG_SUPERTX
+ !supertx_enabled &&
+#endif
!vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP) &&
!mbmi->skip) {
- mbmi->ext_txfrm = vp9_read_tree(r, vp9_ext_tx_tree,
- cm->fc.ext_tx_prob[mbmi->tx_size]);
- if (!cm->frame_parallel_decoding_mode)
- ++cm->counts.ext_tx[mbmi->tx_size][mbmi->ext_txfrm];
+ mbmi->ext_txfrm = vp9_read_tree(r, vp9_ext_tx_tree,
+ cm->fc.ext_tx_prob[mbmi->tx_size]);
+ if (!cm->frame_parallel_decoding_mode)
+ ++cm->counts.ext_tx[mbmi->tx_size][mbmi->ext_txfrm];
+ } else {
+ mbmi->ext_txfrm = NORM;
+ }
+#endif // CONFIG_EXT_TX
+#if CONFIG_SUPERTX
} else {
- mbmi->ext_txfrm = NORM;
+ const int ctx = vp9_get_intra_inter_context(xd);
+ mbmi->segment_id = 0;
+ inter_block = 1;
+ if (!cm->frame_parallel_decoding_mode)
+ ++cm->counts.intra_inter[ctx][1];
}
-#endif
+#endif // CONFIG_SUPERTX
- if (inter_block)
- read_inter_block_mode_info(cm, xd, tile, mi, mi_row, mi_col, r);
- else
+ if (inter_block) {
+ read_inter_block_mode_info(cm, xd, tile, mi,
+#if CONFIG_SUPERTX && CONFIG_EXT_TX
+ supertx_enabled,
+#endif
+ mi_row, mi_col, r);
+ } else {
read_intra_block_mode_info(cm, mi,
#if CONFIG_FILTERINTRA
- xd,
+ xd,
#endif
- r);
+ r);
+ }
}
void vp9_read_mode_info(VP9_COMMON *cm, MACROBLOCKD *xd,
const TileInfo *const tile,
+#if CONFIG_SUPERTX
+ int supertx_enabled,
+#endif
int mi_row, int mi_col, vp9_reader *r) {
if (frame_is_intra_only(cm))
read_intra_frame_mode_info(cm, xd, mi_row, mi_col, r);
else
- read_inter_frame_mode_info(cm, xd, tile, mi_row, mi_col, r);
+ read_inter_frame_mode_info(cm, xd, tile,
+#if CONFIG_SUPERTX
+ supertx_enabled,
+#endif
+ mi_row, mi_col, r);
}
void vp9_read_mode_info(VP9_COMMON *cm, MACROBLOCKD *xd,
const struct TileInfo *const tile,
+#if CONFIG_SUPERTX
+ int supertx_enabled,
+#endif
int mi_row, int mi_col, vp9_reader *r);
+#if CONFIG_SUPERTX
+int read_skip(VP9_COMMON *cm, const MACROBLOCKD *xd,
+ int segment_id, vp9_reader *r);
+#endif
#ifdef __cplusplus
} // extern "C"
#endif
static struct vp9_token ext_tx_encodings[EXT_TX_TYPES];
#endif
+#if CONFIG_SUPERTX
+static int vp9_check_supertx(VP9_COMMON *cm, int mi_row, int mi_col,
+ BLOCK_SIZE bsize) {
+ MODE_INFO *mi;
+
+ mi = cm->mi + (mi_row * cm->mi_stride + mi_col);
+
+ return mi[0].mbmi.tx_size == bsize_to_tx_size(bsize) &&
+ mi[0].mbmi.sb_type < bsize;
+}
+#endif // CONFIG_SUPERTX
+
void vp9_entropy_mode_init() {
vp9_tokens_from_tree(intra_mode_encodings, vp9_intra_mode_tree);
vp9_tokens_from_tree(switchable_interp_encodings, vp9_switchable_interp_tree);
}
static void pack_inter_mode_mvs(VP9_COMP *cpi, const MODE_INFO *mi,
+#if CONFIG_SUPERTX
+ int supertx_enabled,
+#endif
vp9_writer *w) {
VP9_COMMON *const cm = &cpi->common;
const nmv_context *nmvc = &cm->fc.nmvc;
}
}
+#if CONFIG_SUPERTX
+ if (supertx_enabled)
+ skip = mbmi->skip;
+ else
+ skip = write_skip(cm, xd, segment_id, mi, w);
+#else
skip = write_skip(cm, xd, segment_id, mi, w);
+#endif // CONFIG_SUPERTX
+
#if CONFIG_TX_SKIP
vp9_write_bit(w, mbmi->tx_skip[0]);
vp9_write_bit(w, mbmi->tx_skip[1]);
#endif
+#if CONFIG_SUPERTX
+ if (!supertx_enabled) {
+#endif
if (!vp9_segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME))
vp9_write(w, is_inter, vp9_get_intra_inter_prob(cm, xd));
+#if CONFIG_SUPERTX
+ }
+#endif
if (bsize >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT &&
+#if CONFIG_SUPERTX
+ !supertx_enabled &&
+#endif
!(is_inter &&
(skip || vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)))) {
write_selected_tx_size(cm, xd, mbmi->tx_size, bsize, w);
}
#if CONFIG_EXT_TX
- if (is_inter &&
- mbmi->tx_size < TX_32X32 &&
- cm->base_qindex > 0 &&
- bsize >= BLOCK_8X8 &&
- !mbmi->skip &&
- !vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
- vp9_write_token(w, vp9_ext_tx_tree, cm->fc.ext_tx_prob[mbmi->tx_size],
- &ext_tx_encodings[mbmi->ext_txfrm]);
- }
+ if (is_inter &&
+ mbmi->tx_size < TX_32X32 &&
+ cm->base_qindex > 0 &&
+ bsize >= BLOCK_8X8 &&
+#if CONFIG_SUPERTX
+ !supertx_enabled &&
#endif
+ !mbmi->skip &&
+ !vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
+ vp9_write_token(w, vp9_ext_tx_tree, cm->fc.ext_tx_prob[mbmi->tx_size],
+ &ext_tx_encodings[mbmi->ext_txfrm]);
+ }
+#endif // CONFIG_EXT_TX
if (!is_inter) {
if (bsize >= BLOCK_8X8) {
static void write_modes_b(VP9_COMP *cpi, const TileInfo *const tile,
vp9_writer *w, TOKENEXTRA **tok,
const TOKENEXTRA *const tok_end,
+#if CONFIG_SUPERTX
+ int supertx_enabled,
+#endif
int mi_row, int mi_col) {
const VP9_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &cpi->mb.e_mbd;
if (frame_is_intra_only(cm)) {
write_mb_modes_kf(cm, xd, xd->mi, w);
} else {
- pack_inter_mode_mvs(cpi, m, w);
+ pack_inter_mode_mvs(cpi, m,
+#if CONFIG_SUPERTX
+ supertx_enabled,
+#endif
+ w);
}
+#if CONFIG_SUPERTX
+ if (!supertx_enabled) {
+#endif
assert(*tok < tok_end);
pack_mb_tokens(w, tok, tok_end, cm->bit_depth);
+#if CONFIG_SUPERTX
+ }
+#endif
}
static void write_partition(const VP9_COMMON *const cm,
static void write_modes_sb(VP9_COMP *cpi,
const TileInfo *const tile, vp9_writer *w,
TOKENEXTRA **tok, const TOKENEXTRA *const tok_end,
+#if CONFIG_SUPERTX
+ int pack_token, int supertx_enabled,
+#endif
int mi_row, int mi_col, BLOCK_SIZE bsize) {
const VP9_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &cpi->mb.e_mbd;
const int bs = (1 << bsl) / 4;
PARTITION_TYPE partition;
BLOCK_SIZE subsize;
- const MODE_INFO *m = NULL;
+ MODE_INFO *m = NULL;
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
return;
partition = partition_lookup[bsl][m->mbmi.sb_type];
write_partition(cm, xd, bs, mi_row, mi_col, partition, bsize, w);
subsize = get_subsize(bsize, partition);
+#if CONFIG_SUPERTX
+ xd->mi = m;
+ set_mi_row_col(xd, tile,
+ mi_row, num_8x8_blocks_high_lookup[bsize],
+ mi_col, num_8x8_blocks_wide_lookup[bsize],
+ cm->mi_rows, cm->mi_cols);
+ if (!supertx_enabled && cm->frame_type != KEY_FRAME &&
+ partition != PARTITION_NONE && bsize <= MAX_SUPERTX_BLOCK_SIZE) {
+ TX_SIZE supertx_size = bsize_to_tx_size(bsize);
+ vp9_prob prob = partition == PARTITION_SPLIT ?
+ cm->fc.supertxsplit_prob[supertx_size] :
+ cm->fc.supertx_prob[supertx_size];
+ supertx_enabled = (xd->mi[0].mbmi.tx_size == supertx_size);
+ vp9_write(w, supertx_enabled, prob);
+ if (supertx_enabled) {
+ vp9_write(w, xd->mi[0].mbmi.skip, vp9_get_skip_prob(cm, xd));
+#if CONFIG_EXT_TX
+ if (supertx_size <= TX_16X16 && !xd->mi[0].mbmi.skip)
+ vp9_write_token(w, vp9_ext_tx_tree, cm->fc.ext_tx_prob[supertx_size],
+ &ext_tx_encodings[xd->mi[0].mbmi.ext_txfrm]);
+#endif
+ }
+ }
+#endif // CONFIG_SUPERTX
if (subsize < BLOCK_8X8) {
- write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col);
+ write_modes_b(cpi, tile, w, tok, tok_end,
+#if CONFIG_SUPERTX
+ supertx_enabled,
+#endif
+ mi_row, mi_col);
} else {
switch (partition) {
case PARTITION_NONE:
- write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col);
+ write_modes_b(cpi, tile, w, tok, tok_end,
+#if CONFIG_SUPERTX
+ supertx_enabled,
+#endif
+ mi_row, mi_col);
break;
case PARTITION_HORZ:
- write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col);
+ write_modes_b(cpi, tile, w, tok, tok_end,
+#if CONFIG_SUPERTX
+ supertx_enabled,
+#endif
+ mi_row, mi_col);
if (mi_row + bs < cm->mi_rows)
- write_modes_b(cpi, tile, w, tok, tok_end, mi_row + bs, mi_col);
+ write_modes_b(cpi, tile, w, tok, tok_end,
+#if CONFIG_SUPERTX
+ supertx_enabled,
+#endif
+ mi_row + bs, mi_col);
break;
case PARTITION_VERT:
- write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col);
+ write_modes_b(cpi, tile, w, tok, tok_end,
+#if CONFIG_SUPERTX
+ supertx_enabled,
+#endif
+ mi_row, mi_col);
if (mi_col + bs < cm->mi_cols)
- write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col + bs);
+ write_modes_b(cpi, tile, w, tok, tok_end,
+#if CONFIG_SUPERTX
+ supertx_enabled,
+#endif
+ mi_row, mi_col + bs);
break;
case PARTITION_SPLIT:
- write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col, subsize);
- write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col + bs,
- subsize);
- write_modes_sb(cpi, tile, w, tok, tok_end, mi_row + bs, mi_col,
- subsize);
- write_modes_sb(cpi, tile, w, tok, tok_end, mi_row + bs, mi_col + bs,
- subsize);
+ write_modes_sb(cpi, tile, w, tok, tok_end,
+#if CONFIG_SUPERTX
+ !supertx_enabled, supertx_enabled,
+#endif
+ mi_row, mi_col, subsize);
+ write_modes_sb(cpi, tile, w, tok, tok_end,
+#if CONFIG_SUPERTX
+ !supertx_enabled, supertx_enabled,
+#endif
+ mi_row, mi_col + bs, subsize);
+ write_modes_sb(cpi, tile, w, tok, tok_end,
+#if CONFIG_SUPERTX
+ !supertx_enabled, supertx_enabled,
+#endif
+ mi_row + bs, mi_col, subsize);
+ write_modes_sb(cpi, tile, w, tok, tok_end,
+#if CONFIG_SUPERTX
+ !supertx_enabled, supertx_enabled,
+#endif
+ mi_row + bs, mi_col + bs, subsize);
break;
default:
assert(0);
}
}
+#if CONFIG_SUPERTX
+ if (partition != PARTITION_NONE && supertx_enabled && pack_token) {
+ assert(*tok < tok_end);
+ pack_mb_tokens(w, tok, tok_end, cm->bit_depth);
+ }
+#endif
// update partition context
if (bsize >= BLOCK_8X8 &&
vp9_zero(cpi->mb.e_mbd.left_seg_context);
for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end;
mi_col += MI_BLOCK_SIZE)
- write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col,
- BLOCK_64X64);
+ write_modes_sb(cpi, tile, w, tok, tok_end,
+#if CONFIG_SUPERTX
+ 1, 0,
+#endif
+ mi_row, mi_col, BLOCK_64X64);
}
}
#include "vp9/encoder/vp9_aq_complexity.h"
#include "vp9/encoder/vp9_aq_cyclicrefresh.h"
#include "vp9/encoder/vp9_aq_variance.h"
+#if CONFIG_SUPERTX
+#include "vp9/encoder/vp9_cost.h"
+#endif
#include "vp9/encoder/vp9_encodeframe.h"
#include "vp9/encoder/vp9_encodemb.h"
#include "vp9/encoder/vp9_encodemv.h"
int mi_row, int mi_col, BLOCK_SIZE bsize,
PICK_MODE_CONTEXT *ctx);
+#if CONFIG_SUPERTX
+static int check_intra_b(PICK_MODE_CONTEXT *ctx);
+
+static int check_intra_sb(VP9_COMP *cpi, const TileInfo *const tile,
+ int mi_row, int mi_col, BLOCK_SIZE bsize,
+ PC_TREE *pc_tree);
+static void predict_superblock(VP9_COMP *cpi, int mi_row_ori, int mi_col_ori,
+ BLOCK_SIZE bsize);
+static int check_supertx_sb(BLOCK_SIZE bsize, TX_SIZE supertx_size,
+ PC_TREE *pc_tree);
+static void predict_sb_complex(VP9_COMP *cpi, const TileInfo *const tile,
+ int mi_row, int mi_col,
+ int mi_row_ori, int mi_col_ori,
+ int output_enabled, BLOCK_SIZE bsize,
+ BLOCK_SIZE top_bsize,
+ uint8_t *dst_buf[3], int dst_stride[3],
+ PC_TREE *pc_tree);
+static void update_state_sb_supertx(VP9_COMP *cpi, const TileInfo *const tile,
+ int mi_row, int mi_col,
+ BLOCK_SIZE bsize,
+ int output_enabled, PC_TREE *pc_tree);
+static void rd_supertx_sb(VP9_COMP *cpi, const TileInfo *const tile,
+ int mi_row, int mi_col, BLOCK_SIZE bsize,
+ int *tmp_rate, int64_t *tmp_dist,
+#if CONFIG_EXT_TX
+ EXT_TX_TYPE *best_tx,
+#endif
+ PC_TREE *pc_tree);
+#endif // CONFIG_SUPERTX
+
// Motion vector component magnitude threshold for defining fast motion.
#define FAST_MOTION_MV_THRESH 24
}
}
+#if CONFIG_SUPERTX
+static void set_offsets_supertx(VP9_COMP *cpi, const TileInfo *const tile,
+ int mi_row, int mi_col, BLOCK_SIZE bsize) {
+ MACROBLOCK *const x = &cpi->mb;
+ VP9_COMMON *const cm = &cpi->common;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ const int mi_width = num_8x8_blocks_wide_lookup[bsize];
+ const int mi_height = num_8x8_blocks_high_lookup[bsize];
+
+ set_modeinfo_offsets(cm, xd, mi_row, mi_col);
+
+ // Set up distance of MB to edge of frame in 1/8th pel units.
+ assert(!(mi_col & (mi_width - 1)) && !(mi_row & (mi_height - 1)));
+ set_mi_row_col(xd, tile, mi_row, mi_height, mi_col, mi_width,
+ cm->mi_rows, cm->mi_cols);
+}
+
+static void set_offsets_extend(VP9_COMP *cpi, const TileInfo *const tile,
+ int mi_row, int mi_col,
+ int mi_row_ori, int mi_col_ori,
+ BLOCK_SIZE bsize, BLOCK_SIZE top_bsize) {
+ MACROBLOCK *const x = &cpi->mb;
+ VP9_COMMON *const cm = &cpi->common;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ MB_MODE_INFO *mbmi;
+ const int mi_width = num_8x8_blocks_wide_lookup[top_bsize];
+ const int mi_height = num_8x8_blocks_high_lookup[top_bsize];
+ const struct segmentation *const seg = &cm->seg;
+
+ set_modeinfo_offsets(cm, xd, mi_row, mi_col);
+
+ mbmi = &xd->mi[0].src_mi->mbmi;
+
+ // Set up limit values for MV components.
+ // Mv beyond the range do not produce new/different prediction block.
+ x->mv_row_min = -(((mi_row_ori + mi_height) * MI_SIZE) + VP9_INTERP_EXTEND);
+ x->mv_col_min = -(((mi_col_ori + mi_width) * MI_SIZE) + VP9_INTERP_EXTEND);
+ x->mv_row_max = (cm->mi_rows - mi_row_ori) * MI_SIZE + VP9_INTERP_EXTEND;
+ x->mv_col_max = (cm->mi_cols - mi_col_ori) * MI_SIZE + VP9_INTERP_EXTEND;
+
+ // Set up distance of MB to edge of frame in 1/8th pel units.
+ assert(!(mi_col_ori & (mi_width - 1)) && !(mi_row_ori & (mi_height - 1)));
+ set_mi_row_col(xd, tile, mi_row_ori, mi_height, mi_col_ori, mi_width,
+ cm->mi_rows, cm->mi_cols);
+ xd->up_available = (mi_row != 0);
+ xd->left_available = (mi_col > tile->mi_col_start);
+
+ // R/D setup.
+ x->rddiv = cpi->rd.RDDIV;
+ x->rdmult = cpi->rd.RDMULT;
+
+ // Setup segment ID.
+ if (seg->enabled) {
+ if (cpi->oxcf.aq_mode != VARIANCE_AQ) {
+ const uint8_t *const map = seg->update_map ? cpi->segmentation_map
+ : cm->last_frame_seg_map;
+ mbmi->segment_id = vp9_get_segment_id(cm, map, bsize, mi_row, mi_col);
+ }
+ vp9_init_plane_quantizers(cpi, x);
+
+ x->encode_breakout = cpi->segment_encode_breakout[mbmi->segment_id];
+ } else {
+ mbmi->segment_id = 0;
+ x->encode_breakout = cpi->encode_breakout;
+ }
+}
+#endif // CONFIG_SUPERTX
+
static void duplicate_mode_info_in_sb(VP9_COMMON *cm, MACROBLOCKD *xd,
int mi_row, int mi_col,
BLOCK_SIZE bsize) {
const int mi_height = num_8x8_blocks_high_lookup[bsize];
int max_plane;
+#if !CONFIG_SUPERTX
assert(mi->mbmi.sb_type == bsize);
+#endif
*mi_addr = *mi;
mi_addr->src_mi = mi_addr;
}
}
+#if CONFIG_SUPERTX
+static void update_state_supertx(VP9_COMP *cpi, PICK_MODE_CONTEXT *ctx,
+ int mi_row, int mi_col, BLOCK_SIZE bsize,
+ int output_enabled) {
+ int i, y, x_idx;
+ VP9_COMMON *const cm = &cpi->common;
+ RD_OPT *const rd_opt = &cpi->rd;
+ MACROBLOCK *const x = &cpi->mb;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ MODE_INFO *mi = &ctx->mic;
+ MB_MODE_INFO *const mbmi = &xd->mi[0].src_mi->mbmi;
+ MODE_INFO *mi_addr = &xd->mi[0];
+ const struct segmentation *const seg = &cm->seg;
+ const int mis = cm->mi_stride;
+ const int mi_width = num_8x8_blocks_wide_lookup[bsize];
+ const int mi_height = num_8x8_blocks_high_lookup[bsize];
+
+ *mi_addr = *mi;
+ mi_addr->src_mi = mi_addr;
+ assert(is_inter_block(mbmi));
+
+ // If segmentation in use
+ if (seg->enabled && output_enabled) {
+ // For in frame complexity AQ copy the segment id from the segment map.
+ if (cpi->oxcf.aq_mode == COMPLEXITY_AQ) {
+ const uint8_t *const map = seg->update_map ? cpi->segmentation_map
+ : cm->last_frame_seg_map;
+ mi_addr->mbmi.segment_id =
+ vp9_get_segment_id(cm, map, bsize, mi_row, mi_col);
+ } else if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) {
+ // Else for cyclic refresh mode update the segment map, set the segment id
+ // and then update the quantizer.
+ vp9_cyclic_refresh_update_segment(cpi, &xd->mi[0].mbmi,
+ mi_row, mi_col, bsize, 1);
+ vp9_init_plane_quantizers(cpi, x);
+ }
+ }
+
+ // Restore the coding context of the MB to that that was in place
+ // when the mode was picked for it
+ for (y = 0; y < mi_height; y++)
+ for (x_idx = 0; x_idx < mi_width; x_idx++)
+ if ((xd->mb_to_right_edge >> (3 + MI_SIZE_LOG2)) + mi_width > x_idx
+ && (xd->mb_to_bottom_edge >> (3 + MI_SIZE_LOG2)) + mi_height > y) {
+ xd->mi[x_idx + y * mis].src_mi = mi_addr;
+ }
+
+ if (cpi->oxcf.aq_mode)
+ vp9_init_plane_quantizers(cpi, x);
+
+ if (is_inter_block(mbmi) && mbmi->sb_type < BLOCK_8X8) {
+ mbmi->mv[0].as_int = mi->bmi[3].as_mv[0].as_int;
+ mbmi->mv[1].as_int = mi->bmi[3].as_mv[1].as_int;
+ }
+
+ x->skip = ctx->skip;
+ vpx_memcpy(x->zcoeff_blk[mbmi->tx_size], ctx->zcoeff_blk,
+ sizeof(uint8_t) * ctx->num_4x4_blk);
+
+ if (!output_enabled)
+ return;
+
+ if (!frame_is_intra_only(cm)) {
+ if (is_inter_block(mbmi)) {
+ vp9_update_mv_count(cm, xd);
+
+ if (cm->interp_filter == SWITCHABLE) {
+ const int ctx = vp9_get_pred_context_switchable_interp(xd);
+ ++cm->counts.switchable_interp[ctx][mbmi->interp_filter];
+ }
+ }
+
+ rd_opt->comp_pred_diff[SINGLE_REFERENCE] += ctx->single_pred_diff;
+ rd_opt->comp_pred_diff[COMPOUND_REFERENCE] += ctx->comp_pred_diff;
+ rd_opt->comp_pred_diff[REFERENCE_MODE_SELECT] += ctx->hybrid_pred_diff;
+
+ for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
+ rd_opt->filter_diff[i] += ctx->best_filter_diff[i];
+ }
+}
+
+static void update_state_sb_supertx(VP9_COMP *cpi, const TileInfo *const tile,
+ int mi_row, int mi_col,
+ BLOCK_SIZE bsize,
+ int output_enabled, PC_TREE *pc_tree) {
+ VP9_COMMON *const cm = &cpi->common;
+ MACROBLOCK *const x = &cpi->mb;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ struct macroblock_plane *const p = x->plane;
+ struct macroblockd_plane *const pd = xd->plane;
+ int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4;
+ PARTITION_TYPE partition = pc_tree->partitioning;
+ BLOCK_SIZE subsize = get_subsize(bsize, partition);
+ int i;
+
+ if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
+ return;
+
+ switch (partition) {
+ case PARTITION_NONE:
+ set_offsets_supertx(cpi, tile, mi_row, mi_col, subsize);
+ update_state_supertx(cpi, &pc_tree->none, mi_row, mi_col,
+ subsize, output_enabled);
+ break;
+ case PARTITION_VERT:
+ set_offsets_supertx(cpi, tile, mi_row, mi_col, subsize);
+ update_state_supertx(cpi, &pc_tree->vertical[0], mi_row, mi_col,
+ subsize, output_enabled);
+ if (mi_col + hbs < cm->mi_cols && bsize > BLOCK_8X8) {
+ set_offsets_supertx(cpi, tile, mi_row, mi_col + hbs, subsize);
+ update_state_supertx(cpi, &pc_tree->vertical[1], mi_row, mi_col + hbs,
+ subsize, output_enabled);
+ }
+ break;
+ case PARTITION_HORZ:
+ set_offsets_supertx(cpi, tile, mi_row, mi_col, subsize);
+ update_state_supertx(cpi, &pc_tree->horizontal[0], mi_row, mi_col,
+ subsize, output_enabled);
+ if (mi_row + hbs < cm->mi_rows && bsize > BLOCK_8X8) {
+ set_offsets_supertx(cpi, tile, mi_row + hbs, mi_col, subsize);
+ update_state_supertx(cpi, &pc_tree->horizontal[1], mi_row + hbs, mi_col,
+ subsize, output_enabled);
+ }
+ break;
+ case PARTITION_SPLIT:
+ if (bsize == BLOCK_8X8) {
+ set_offsets_supertx(cpi, tile, mi_row, mi_col, subsize);
+ update_state_supertx(cpi, pc_tree->leaf_split[0], mi_row, mi_col,
+ subsize, output_enabled);
+ } else {
+ set_offsets_supertx(cpi, tile, mi_row, mi_col, subsize);
+ update_state_sb_supertx(cpi, tile, mi_row, mi_col, subsize,
+ output_enabled, pc_tree->split[0]);
+ set_offsets_supertx(cpi, tile, mi_row, mi_col + hbs, subsize);
+ update_state_sb_supertx(cpi, tile, mi_row, mi_col + hbs, subsize,
+ output_enabled, pc_tree->split[1]);
+ set_offsets_supertx(cpi, tile, mi_row + hbs, mi_col, subsize);
+ update_state_sb_supertx(cpi, tile, mi_row + hbs, mi_col, subsize,
+ output_enabled, pc_tree->split[2]);
+ set_offsets_supertx(cpi, tile, mi_row + hbs, mi_col + hbs, subsize);
+ update_state_sb_supertx(cpi, tile, mi_row + hbs, mi_col + hbs, subsize,
+ output_enabled, pc_tree->split[3]);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ for (i = 0; i < MAX_MB_PLANE; ++i) {
+ p[i].coeff = (&pc_tree->none)->coeff_pbuf[i][1];
+ p[i].qcoeff = (&pc_tree->none)->qcoeff_pbuf[i][1];
+ pd[i].dqcoeff = (&pc_tree->none)->dqcoeff_pbuf[i][1];
+ p[i].eobs = (&pc_tree->none)->eobs_pbuf[i][1];
+ }
+}
+
+static void update_supertx_param(VP9_COMP *cpi, PICK_MODE_CONTEXT *ctx,
+#if CONFIG_EXT_TX
+ int best_tx,
+#endif
+ TX_SIZE supertx_size) {
+ MACROBLOCK *const x = &cpi->mb;
+
+ ctx->mic.mbmi.tx_size = supertx_size;
+ vpx_memcpy(ctx->zcoeff_blk, x->zcoeff_blk[supertx_size],
+ sizeof(uint8_t) * ctx->num_4x4_blk);
+ ctx->skip = x->skip;
+#if CONFIG_EXT_TX
+ ctx->mic.mbmi.ext_txfrm = best_tx;
+#endif
+}
+
+static void update_supertx_param_sb(VP9_COMP *cpi, int mi_row, int mi_col,
+ BLOCK_SIZE bsize,
+#if CONFIG_EXT_TX
+ int best_tx,
+#endif
+ TX_SIZE supertx_size, PC_TREE *pc_tree) {
+ VP9_COMMON *const cm = &cpi->common;
+ int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4;
+ PARTITION_TYPE partition = pc_tree->partitioning;
+ BLOCK_SIZE subsize = get_subsize(bsize, partition);
+
+ if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
+ return;
+
+ switch (partition) {
+ case PARTITION_NONE:
+ update_supertx_param(cpi, &pc_tree->none,
+#if CONFIG_EXT_TX
+ best_tx,
+#endif
+ supertx_size);
+ break;
+ case PARTITION_VERT:
+ update_supertx_param(cpi, &pc_tree->vertical[0],
+#if CONFIG_EXT_TX
+ best_tx,
+#endif
+ supertx_size);
+ if (mi_col + hbs < cm->mi_cols && bsize > BLOCK_8X8)
+ update_supertx_param(cpi, &pc_tree->vertical[1],
+#if CONFIG_EXT_TX
+ best_tx,
+#endif
+ supertx_size);
+ break;
+ case PARTITION_HORZ:
+ update_supertx_param(cpi, &pc_tree->horizontal[0],
+#if CONFIG_EXT_TX
+ best_tx,
+#endif
+ supertx_size);
+ if (mi_row + hbs < cm->mi_rows && bsize > BLOCK_8X8)
+ update_supertx_param(cpi, &pc_tree->horizontal[1],
+#if CONFIG_EXT_TX
+ best_tx,
+#endif
+ supertx_size);
+ break;
+ case PARTITION_SPLIT:
+ if (bsize == BLOCK_8X8) {
+ update_supertx_param(cpi, pc_tree->leaf_split[0],
+#if CONFIG_EXT_TX
+ best_tx,
+#endif
+ supertx_size);
+ } else {
+ update_supertx_param_sb(cpi, mi_row, mi_col, subsize,
+#if CONFIG_EXT_TX
+ best_tx,
+#endif
+ supertx_size, pc_tree->split[0]);
+ update_supertx_param_sb(cpi, mi_row, mi_col + hbs, subsize,
+#if CONFIG_EXT_TX
+ best_tx,
+#endif
+ supertx_size, pc_tree->split[1]);
+ update_supertx_param_sb(cpi, mi_row + hbs, mi_col, subsize,
+#if CONFIG_EXT_TX
+ best_tx,
+#endif
+ supertx_size, pc_tree->split[2]);
+ update_supertx_param_sb(cpi, mi_row + hbs, mi_col + hbs, subsize,
+#if CONFIG_EXT_TX
+ best_tx,
+#endif
+ supertx_size, pc_tree->split[3]);
+ }
+ break;
+ default:
+ assert(0);
+ }
+}
+#endif // CONFIG_SUPERTX
+
void vp9_setup_src_planes(MACROBLOCK *x, const YV12_BUFFER_CONFIG *src,
int mi_row, int mi_col) {
uint8_t *const buffers[3] = {src->y_buffer, src->u_buffer, src->v_buffer };
static void rd_pick_sb_modes(VP9_COMP *cpi, const TileInfo *const tile,
int mi_row, int mi_col, RD_COST *rd_cost,
+#if CONFIG_SUPERTX
+ int *totalrate_nocoef,
+#endif
BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
int64_t best_rd) {
VP9_COMMON *const cm = &cpi->common;
// as a predictor for MBs that follow in the SB
if (frame_is_intra_only(cm)) {
vp9_rd_pick_intra_mode_sb(cpi, x, rd_cost, bsize, ctx, best_rd);
+#if CONFIG_SUPERTX
+ *totalrate_nocoef = 0;
+#endif
} else {
if (bsize >= BLOCK_8X8) {
- if (vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP))
+ if (vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
vp9_rd_pick_inter_mode_sb_seg_skip(cpi, x, rd_cost, bsize,
ctx, best_rd);
- else
- vp9_rd_pick_inter_mode_sb(cpi, x, tile, mi_row, mi_col,
- rd_cost, bsize, ctx, best_rd);
+#if CONFIG_SUPERTX
+ *totalrate_nocoef = rd_cost->rate;
+#endif
+ } else {
+ vp9_rd_pick_inter_mode_sb(cpi, x, tile, mi_row, mi_col, rd_cost,
+#if CONFIG_SUPERTX
+ totalrate_nocoef,
+#endif
+ bsize, ctx, best_rd);
+ }
} else {
vp9_rd_pick_inter_mode_sub8x8(cpi, x, tile, mi_row, mi_col, rd_cost,
+#if CONFIG_SUPERTX
+ totalrate_nocoef,
+#endif
bsize, ctx, best_rd);
}
}
vp9_clear_system_state();
rd_cost->rate = (int)round(rd_cost->rate * rdmult_ratio);
rd_cost->rdcost = RDCOST(x->rdmult, x->rddiv, rd_cost->rate, rd_cost->dist);
+#if CONFIG_SUPERTX
+ *totalrate_nocoef = (int)round(*totalrate_nocoef * rdmult_ratio);
+#endif
}
x->rdmult = orig_rdmult;
if (output_enabled && bsize != BLOCK_4X4)
cm->counts.partition[ctx][partition]++;
+#if CONFIG_SUPERTX
+ if (cm->frame_type != KEY_FRAME &&
+ bsize <= MAX_SUPERTX_BLOCK_SIZE &&
+ partition != PARTITION_NONE) {
+ int supertx_enabled;
+ TX_SIZE supertx_size = bsize_to_tx_size(bsize);
+ supertx_enabled = check_supertx_sb(bsize, supertx_size, pc_tree);
+ if (supertx_enabled) {
+ const int mi_width = num_8x8_blocks_wide_lookup[bsize];
+ const int mi_height = num_8x8_blocks_high_lookup[bsize];
+ int x_idx, y_idx, i;
+ uint8_t *dst_buf[3];
+ int dst_stride[3];
+ set_skip_context(xd, mi_row, mi_col);
+ set_modeinfo_offsets(cm, xd, mi_row, mi_col);
+ update_state_sb_supertx(cpi, tile, mi_row, mi_col, bsize,
+ output_enabled, pc_tree);
+
+ vp9_setup_dst_planes(xd->plane, get_frame_new_buffer(cm),
+ mi_row, mi_col);
+ for (i = 0; i < MAX_MB_PLANE; i++) {
+ dst_buf[i] = xd->plane[i].dst.buf;
+ dst_stride[i] = xd->plane[i].dst.stride;
+ }
+ predict_sb_complex(cpi, tile, mi_row, mi_col, mi_row, mi_col,
+ output_enabled, bsize, bsize,
+ dst_buf, dst_stride, pc_tree);
+
+ set_offsets(cpi, tile, mi_row, mi_col, bsize);
+ if (!x->skip) {
+ xd->mi[0].mbmi.skip = 1;
+ vp9_encode_sb_supertx(x, bsize);
+ vp9_tokenize_sb_supertx(cpi, tp, !output_enabled, bsize);
+ } else {
+ xd->mi[0].mbmi.skip = 1;
+ if (output_enabled)
+ cm->counts.skip[vp9_get_skip_context(xd)][1]++;
+ reset_skip_context(xd, bsize);
+ }
+ if (output_enabled) {
+ for (y_idx = 0; y_idx < mi_height; y_idx++)
+ for (x_idx = 0; x_idx < mi_width; x_idx++) {
+ if ((xd->mb_to_right_edge >> (3 + MI_SIZE_LOG2)) + mi_width > x_idx
+ && (xd->mb_to_bottom_edge >> (3 + MI_SIZE_LOG2)) + mi_height
+ > y_idx) {
+ xd->mi[x_idx + y_idx * cm->mi_stride].mbmi.skip =
+ xd->mi[0].mbmi.skip;
+ }
+ }
+ if (partition != PARTITION_SPLIT)
+ cm->counts.supertx[supertx_size][1]++;
+ else
+ cm->counts.supertxsplit[supertx_size][1]++;
+ cm->counts.supertx_size[supertx_size]++;
+#if CONFIG_EXT_TX
+ if (supertx_size < TX_32X32 && !xd->mi[0].mbmi.skip)
+ ++cm->counts.ext_tx[xd->mi[0].mbmi.tx_size][xd->mi[0].mbmi.ext_txfrm];
+#endif
+ (*tp)->token = EOSB_TOKEN;
+ (*tp)++;
+ }
+ if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8)
+ update_partition_context(xd, mi_row, mi_col, subsize, bsize);
+ return;
+ } else {
+ if (output_enabled) {
+ if (partition != PARTITION_SPLIT)
+ cm->counts.supertx[supertx_size][0]++;
+ else
+ cm->counts.supertxsplit[supertx_size][0]++;
+ }
+ }
+ }
+#endif // CONFIG_SUPERTX
+
switch (partition) {
case PARTITION_NONE:
encode_b(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize,
static void rd_use_partition(VP9_COMP *cpi, const TileInfo *const tile,
MODE_INFO *mi_8x8, TOKENEXTRA **tp,
int mi_row, int mi_col,
- BLOCK_SIZE bsize, int *rate, int64_t *dist,
+ BLOCK_SIZE bsize, int *rate,
+ int64_t *dist,
+#if CONFIG_SUPERTX
+ int *rate_nocoef,
+#endif
int do_recon, PC_TREE *pc_tree) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->mb;
BLOCK_SIZE bs_type = mi_8x8[0].src_mi->mbmi.sb_type;
int do_partition_search = 1;
PICK_MODE_CONTEXT *ctx = &pc_tree->none;
+#if CONFIG_SUPERTX
+ int last_part_rate_nocoef = INT_MAX;
+ int none_rate_nocoef = INT_MAX;
+ int chosen_rate_nocoef = INT_MAX;
+#endif
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
return;
mi_row + (mi_step >> 1) < cm->mi_rows &&
mi_col + (mi_step >> 1) < cm->mi_cols) {
pc_tree->partitioning = PARTITION_NONE;
- rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &none_rdc, bsize,
- ctx, INT64_MAX);
+ rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &none_rdc,
+#if CONFIG_SUPERTX
+ &none_rate_nocoef,
+#endif
+ bsize, ctx, INT64_MAX);
pl = partition_plane_context(xd, mi_row, mi_col, bsize);
none_rdc.rate += cpi->partition_cost[pl][PARTITION_NONE];
none_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, none_rdc.rate,
none_rdc.dist);
+#if CONFIG_SUPERTX
+ none_rate_nocoef += cpi->partition_cost[pl][PARTITION_NONE];
+#endif
}
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
switch (partition) {
case PARTITION_NONE:
rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &last_part_rdc,
+#if CONFIG_SUPERTX
+ &last_part_rate_nocoef,
+#endif
bsize, ctx, INT64_MAX);
break;
case PARTITION_HORZ:
rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &last_part_rdc,
+#if CONFIG_SUPERTX
+ &last_part_rate_nocoef,
+#endif
subsize, &pc_tree->horizontal[0],
INT64_MAX);
if (last_part_rdc.rate != INT_MAX &&
bsize >= BLOCK_8X8 && mi_row + (mi_step >> 1) < cm->mi_rows) {
RD_COST tmp_rdc;
+#if CONFIG_SUPERTX
+ int rt_nocoef = 0;
+#endif
PICK_MODE_CONTEXT *ctx = &pc_tree->horizontal[0];
vp9_rd_cost_init(&tmp_rdc);
update_state(cpi, ctx, mi_row, mi_col, subsize, 0);
encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize, ctx);
rd_pick_sb_modes(cpi, tile, mi_row + (mi_step >> 1), mi_col, &tmp_rdc,
+#if CONFIG_SUPERTX
+ &rt_nocoef,
+#endif
subsize, &pc_tree->horizontal[1], INT64_MAX);
if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
vp9_rd_cost_reset(&last_part_rdc);
+#if CONFIG_SUPERTX
+ last_part_rate_nocoef = INT_MAX;
+#endif
break;
}
last_part_rdc.rate += tmp_rdc.rate;
last_part_rdc.dist += tmp_rdc.dist;
last_part_rdc.rdcost += tmp_rdc.rdcost;
+#if CONFIG_SUPERTX
+ last_part_rate_nocoef += rt_nocoef;
+#endif
}
break;
case PARTITION_VERT:
rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &last_part_rdc,
+#if CONFIG_SUPERTX
+ &last_part_rate_nocoef,
+#endif
subsize, &pc_tree->vertical[0], INT64_MAX);
if (last_part_rdc.rate != INT_MAX &&
bsize >= BLOCK_8X8 && mi_col + (mi_step >> 1) < cm->mi_cols) {
RD_COST tmp_rdc;
+#if CONFIG_SUPERTX
+ int rt_nocoef = 0;
+#endif
PICK_MODE_CONTEXT *ctx = &pc_tree->vertical[0];
vp9_rd_cost_init(&tmp_rdc);
update_state(cpi, ctx, mi_row, mi_col, subsize, 0);
encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize, ctx);
rd_pick_sb_modes(cpi, tile, mi_row, mi_col + (mi_step >> 1), &tmp_rdc,
+#if CONFIG_SUPERTX
+ &rt_nocoef,
+#endif
subsize, &pc_tree->vertical[bsize > BLOCK_8X8],
INT64_MAX);
if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
vp9_rd_cost_reset(&last_part_rdc);
+#if CONFIG_SUPERTX
+ last_part_rate_nocoef = INT_MAX;
+#endif
break;
}
last_part_rdc.rate += tmp_rdc.rate;
last_part_rdc.dist += tmp_rdc.dist;
last_part_rdc.rdcost += tmp_rdc.rdcost;
+#if CONFIG_SUPERTX
+ last_part_rate_nocoef += rt_nocoef;
+#endif
}
break;
case PARTITION_SPLIT:
if (bsize == BLOCK_8X8) {
rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &last_part_rdc,
+#if CONFIG_SUPERTX
+ &last_part_rate_nocoef,
+#endif
subsize, pc_tree->leaf_split[0], INT64_MAX);
break;
}
last_part_rdc.rate = 0;
last_part_rdc.dist = 0;
last_part_rdc.rdcost = 0;
+#if CONFIG_SUPERTX
+ last_part_rate_nocoef = 0;
+#endif
for (i = 0; i < 4; i++) {
int x_idx = (i & 1) * (mi_step >> 1);
int y_idx = (i >> 1) * (mi_step >> 1);
int jj = i >> 1, ii = i & 0x01;
RD_COST tmp_rdc;
+#if CONFIG_SUPERTX
+ int rt_nocoef;
+#endif
if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols))
continue;
rd_use_partition(cpi, tile, mi_8x8 + jj * bss * mis + ii * bss, tp,
mi_row + y_idx, mi_col + x_idx, subsize,
&tmp_rdc.rate, &tmp_rdc.dist,
+#if CONFIG_SUPERTX
+ &rt_nocoef,
+#endif
i != 3, pc_tree->split[i]);
if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
vp9_rd_cost_reset(&last_part_rdc);
+#if CONFIG_SUPERTX
+ last_part_rate_nocoef = INT_MAX;
+#endif
break;
}
last_part_rdc.rate += tmp_rdc.rate;
last_part_rdc.dist += tmp_rdc.dist;
+#if CONFIG_SUPERTX
+ last_part_rate_nocoef += rt_nocoef;
+#endif
}
break;
default:
last_part_rdc.rate += cpi->partition_cost[pl][partition];
last_part_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
last_part_rdc.rate, last_part_rdc.dist);
+#if CONFIG_SUPERTX
+ last_part_rate_nocoef += cpi->partition_cost[pl][partition];
+#endif
}
if (do_partition_search
BLOCK_SIZE split_subsize = get_subsize(bsize, PARTITION_SPLIT);
chosen_rdc.rate = 0;
chosen_rdc.dist = 0;
+#if CONFIG_SUPERTX
+ chosen_rate_nocoef = 0;
+#endif
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
pc_tree->partitioning = PARTITION_SPLIT;
int x_idx = (i & 1) * (mi_step >> 1);
int y_idx = (i >> 1) * (mi_step >> 1);
RD_COST tmp_rdc;
+#if CONFIG_SUPERTX
+ int rt_nocoef = 0;
+#endif
ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
PARTITION_CONTEXT sl[8], sa[8];
save_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
pc_tree->split[i]->partitioning = PARTITION_NONE;
rd_pick_sb_modes(cpi, tile, mi_row + y_idx, mi_col + x_idx, &tmp_rdc,
+#if CONFIG_SUPERTX
+ &rt_nocoef,
+#endif
split_subsize, &pc_tree->split[i]->none, INT64_MAX);
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
vp9_rd_cost_reset(&chosen_rdc);
+#if CONFIG_SUPERTX
+ chosen_rate_nocoef = INT_MAX;
+#endif
break;
}
chosen_rdc.rate += tmp_rdc.rate;
chosen_rdc.dist += tmp_rdc.dist;
+#if CONFIG_SUPERTX
+ chosen_rate_nocoef += rt_nocoef;
+#endif
if (i != 3)
encode_sb(cpi, tile, tp, mi_row + y_idx, mi_col + x_idx, 0,
pl = partition_plane_context(xd, mi_row + y_idx, mi_col + x_idx,
split_subsize);
chosen_rdc.rate += cpi->partition_cost[pl][PARTITION_NONE];
+#if CONFIG_SUPERTX
+ chosen_rate_nocoef += cpi->partition_cost[pl][PARTITION_SPLIT];
+#endif
}
pl = partition_plane_context(xd, mi_row, mi_col, bsize);
if (chosen_rdc.rate < INT_MAX) {
chosen_rdc.rate += cpi->partition_cost[pl][PARTITION_SPLIT];
chosen_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
chosen_rdc.rate, chosen_rdc.dist);
+#if CONFIG_SUPERTX
+ chosen_rate_nocoef += cpi->partition_cost[pl][PARTITION_NONE];
+#endif
}
}
if (bsize >= BLOCK_8X8)
pc_tree->partitioning = partition;
chosen_rdc = last_part_rdc;
+#if CONFIG_SUPERTX
+ chosen_rate_nocoef = last_part_rate_nocoef;
+#endif
}
// If none was better set the partitioning to that.
if (none_rdc.rdcost < chosen_rdc.rdcost) {
if (bsize >= BLOCK_8X8)
pc_tree->partitioning = PARTITION_NONE;
chosen_rdc = none_rdc;
+#if CONFIG_SUPERTX
+ chosen_rate_nocoef = none_rate_nocoef;
+#endif
}
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
*rate = chosen_rdc.rate;
*dist = chosen_rdc.dist;
+#if CONFIG_SUPERTX
+ *rate_nocoef = chosen_rate_nocoef;
+#endif
}
static const BLOCK_SIZE min_partition_size[BLOCK_SIZES] = {
static void rd_pick_partition(VP9_COMP *cpi, const TileInfo *const tile,
TOKENEXTRA **tp, int mi_row, int mi_col,
BLOCK_SIZE bsize, RD_COST *rd_cost,
+#if CONFIG_SUPERTX
+ int *rate_nocoef,
+#endif
int64_t best_rd, PC_TREE *pc_tree) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->mb;
int i, pl;
BLOCK_SIZE subsize;
RD_COST this_rdc, sum_rdc, best_rdc;
+#if CONFIG_SUPERTX
+ int this_rate_nocoef, sum_rate_nocoef = 0, best_rate_nocoef = INT_MAX;
+ int tmp_rate;
+ int abort_flag;
+ int64_t tmp_dist, tmp_rd;
+ PARTITION_TYPE best_partition;
+#endif
int do_split = bsize >= BLOCK_8X8;
int do_rect = 1;
// PARTITION_NONE
if (partition_none_allowed) {
- rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &this_rdc, bsize, ctx,
- best_rdc.rdcost);
+ rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &this_rdc,
+#if CONFIG_SUPERTX
+ &this_rate_nocoef,
+#endif
+ bsize, ctx, best_rdc.rdcost);
if (this_rdc.rate != INT_MAX) {
if (bsize >= BLOCK_8X8) {
pl = partition_plane_context(xd, mi_row, mi_col, bsize);
this_rdc.rate += cpi->partition_cost[pl][PARTITION_NONE];
this_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
this_rdc.rate, this_rdc.dist);
+#if CONFIG_SUPERTX
+ this_rate_nocoef += cpi->partition_cost[pl][PARTITION_NONE];
+#endif
}
if (this_rdc.rdcost < best_rdc.rdcost) {
int rate_breakout_thr = cpi->sf.partition_search_breakout_rate_thr;
best_rdc = this_rdc;
+#if CONFIG_SUPERTX
+ best_rate_nocoef = this_rate_nocoef;
+ assert(best_rate_nocoef >= 0);
+#endif
if (bsize >= BLOCK_8X8)
pc_tree->partitioning = PARTITION_NONE;
if (cpi->sf.adaptive_pred_interp_filter && partition_none_allowed)
pc_tree->leaf_split[0]->pred_interp_filter =
ctx->mic.mbmi.interp_filter;
- rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &sum_rdc, subsize,
- pc_tree->leaf_split[0], best_rdc.rdcost);
- if (sum_rdc.rate == INT_MAX)
+#if CONFIG_SUPERTX
+ rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &sum_rdc, &sum_rate_nocoef,
+ subsize, pc_tree->leaf_split[0], INT64_MAX);
+#else
+ rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &sum_rdc,
+ subsize, pc_tree->leaf_split[0], best_rdc.rdcost);
+#endif
+ if (sum_rdc.rate == INT_MAX) {
sum_rdc.rdcost = INT64_MAX;
+#if CONFIG_SUPERTX
+ sum_rate_nocoef = INT_MAX;
+#endif
+ }
+#if CONFIG_SUPERTX
+ if (cm->frame_type != KEY_FRAME && sum_rdc.rdcost < INT64_MAX) {
+ TX_SIZE supertx_size = bsize_to_tx_size(bsize); // b_width_log2(bsize);
+ best_partition = pc_tree->partitioning;
+ pc_tree->partitioning = PARTITION_SPLIT;
+
+ sum_rdc.rate += vp9_cost_bit(cm->fc.supertxsplit_prob[supertx_size], 0);
+ sum_rdc.rdcost =
+ RDCOST(x->rdmult, x->rddiv, sum_rdc.rate, sum_rdc.dist);
+
+ if (is_inter_mode(pc_tree->leaf_split[0]->mic.mbmi.mode)) {
+#if CONFIG_EXT_TX
+ EXT_TX_TYPE best_tx = NORM;
+#endif
+
+ tmp_rate = sum_rate_nocoef;
+ tmp_dist = 0;
+ restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
+ rd_supertx_sb(cpi, tile, mi_row, mi_col, bsize, &tmp_rate, &tmp_dist,
+#if CONFIG_EXT_TX
+ &best_tx,
+#endif
+ pc_tree);
+
+ tmp_rate += vp9_cost_bit(cm->fc.supertxsplit_prob[supertx_size], 1);
+ tmp_rd = RDCOST(x->rdmult, x->rddiv, tmp_rate, tmp_dist);
+ if (tmp_rd < sum_rdc.rdcost) {
+ sum_rdc.rdcost = tmp_rd;
+ sum_rdc.rate = tmp_rate;
+ sum_rdc.dist = tmp_dist;
+ update_supertx_param_sb(cpi, mi_row, mi_col, bsize,
+#if CONFIG_EXT_TX
+ best_tx,
+#endif
+ supertx_size, pc_tree);
+ }
+ }
+ pc_tree->partitioning = best_partition;
+ }
+#endif // CONFIG_SUPERTX
} else {
+#if CONFIG_SUPERTX
+ for (i = 0; i < 4 && sum_rdc.rdcost < INT64_MAX; ++i) {
+#else
for (i = 0; i < 4 && sum_rdc.rdcost < best_rdc.rdcost; ++i) {
+#endif
const int x_idx = (i & 1) * mi_step;
const int y_idx = (i >> 1) * mi_step;
load_pred_mv(x, ctx);
pc_tree->split[i]->index = i;
+#if CONFIG_SUPERTX
+ rd_pick_partition(cpi, tile, tp, mi_row + y_idx, mi_col + x_idx,
+ subsize, &this_rdc, &this_rate_nocoef,
+ INT64_MAX - sum_rdc.rdcost, pc_tree->split[i]);
+#else
rd_pick_partition(cpi, tile, tp, mi_row + y_idx, mi_col + x_idx,
subsize, &this_rdc,
best_rdc.rdcost - sum_rdc.rdcost, pc_tree->split[i]);
+#endif
if (this_rdc.rate == INT_MAX) {
sum_rdc.rdcost = INT64_MAX;
+#if CONFIG_SUPERTX
+ sum_rate_nocoef = INT_MAX;
+#endif
break;
} else {
sum_rdc.rate += this_rdc.rate;
sum_rdc.dist += this_rdc.dist;
sum_rdc.rdcost += this_rdc.rdcost;
+#if CONFIG_SUPERTX
+ sum_rate_nocoef += this_rate_nocoef;
+#endif
+ }
+ }
+#if CONFIG_SUPERTX
+ if (cm->frame_type != KEY_FRAME && sum_rdc.rdcost < INT64_MAX &&
+ i == 4 && bsize <= MAX_SUPERTX_BLOCK_SIZE) {
+ TX_SIZE supertx_size = bsize_to_tx_size(bsize);
+ best_partition = pc_tree->partitioning;
+ pc_tree->partitioning = PARTITION_SPLIT;
+
+ sum_rdc.rate += vp9_cost_bit(cm->fc.supertxsplit_prob[supertx_size], 0);
+ sum_rdc.rdcost =
+ RDCOST(x->rdmult, x->rddiv, sum_rdc.rate, sum_rdc.dist);
+
+ if (!check_intra_sb(cpi, tile, mi_row, mi_col, bsize, pc_tree)) {
+#if CONFIG_EXT_TX
+ EXT_TX_TYPE best_tx = NORM;
+#endif
+
+ tmp_rate = sum_rate_nocoef;
+ tmp_dist = 0;
+ restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
+ rd_supertx_sb(cpi, tile, mi_row, mi_col, bsize, &tmp_rate, &tmp_dist,
+#if CONFIG_EXT_TX
+ &best_tx,
+#endif
+ pc_tree);
+
+ tmp_rate += vp9_cost_bit(cm->fc.supertxsplit_prob[supertx_size], 1);
+ tmp_rd = RDCOST(x->rdmult, x->rddiv, tmp_rate, tmp_dist);
+ if (tmp_rd < sum_rdc.rdcost) {
+ sum_rdc.rdcost = tmp_rd;
+ sum_rdc.rate = tmp_rate;
+ sum_rdc.dist = tmp_dist;
+ update_supertx_param_sb(cpi, mi_row, mi_col, bsize,
+#if CONFIG_EXT_TX
+ best_tx,
+#endif
+ supertx_size, pc_tree);
+ }
}
+ pc_tree->partitioning = best_partition;
}
+#endif // CONFIG_SUPERTX
}
if (sum_rdc.rdcost < best_rdc.rdcost && i == 4) {
sum_rdc.rate += cpi->partition_cost[pl][PARTITION_SPLIT];
sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
sum_rdc.rate, sum_rdc.dist);
+#if CONFIG_SUPERTX
+ sum_rate_nocoef += cpi->partition_cost[pl][PARTITION_SPLIT];
+#endif
if (sum_rdc.rdcost < best_rdc.rdcost) {
best_rdc = sum_rdc;
+#if CONFIG_SUPERTX
+ best_rate_nocoef = sum_rate_nocoef;
+ assert(best_rate_nocoef >= 0);
+#endif
pc_tree->partitioning = PARTITION_SPLIT;
}
} else {
partition_none_allowed)
pc_tree->horizontal[0].pred_interp_filter =
ctx->mic.mbmi.interp_filter;
- rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &sum_rdc, subsize,
- &pc_tree->horizontal[0], best_rdc.rdcost);
+ rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &sum_rdc,
+#if CONFIG_SUPERTX
+ &sum_rate_nocoef,
+#endif
+ subsize, &pc_tree->horizontal[0], best_rdc.rdcost);
+#if CONFIG_SUPERTX
+ abort_flag = (sum_rdc.rdcost >= best_rd && bsize > BLOCK_8X8) ||
+ (sum_rdc.rate == INT_MAX && bsize == BLOCK_8X8);
+#endif
if (sum_rdc.rdcost < best_rdc.rdcost && mi_row + mi_step < cm->mi_rows &&
bsize > BLOCK_8X8) {
partition_none_allowed)
pc_tree->horizontal[1].pred_interp_filter =
ctx->mic.mbmi.interp_filter;
+#if CONFIG_SUPERTX
+ rd_pick_sb_modes(cpi, tile, mi_row + mi_step, mi_col, &this_rdc,
+ &this_rate_nocoef,
+ subsize, &pc_tree->horizontal[1],
+ INT64_MAX);
+#else
rd_pick_sb_modes(cpi, tile, mi_row + mi_step, mi_col, &this_rdc,
subsize, &pc_tree->horizontal[1],
best_rdc.rdcost - sum_rdc.rdcost);
+#endif
if (this_rdc.rate == INT_MAX) {
sum_rdc.rdcost = INT64_MAX;
+#if CONFIG_SUPERTX
+ sum_rate_nocoef = INT_MAX;
+#endif
} else {
sum_rdc.rate += this_rdc.rate;
sum_rdc.dist += this_rdc.dist;
sum_rdc.rdcost += this_rdc.rdcost;
+#if CONFIG_SUPERTX
+ sum_rate_nocoef += this_rate_nocoef;
+#endif
+ }
+ }
+#if CONFIG_SUPERTX
+ if (cm->frame_type != KEY_FRAME && !abort_flag &&
+ sum_rdc.rdcost < INT64_MAX && bsize <= MAX_SUPERTX_BLOCK_SIZE) {
+ TX_SIZE supertx_size = bsize_to_tx_size(bsize);
+ best_partition = pc_tree->partitioning;
+ pc_tree->partitioning = PARTITION_HORZ;
+
+ sum_rdc.rate += vp9_cost_bit(cm->fc.supertx_prob[supertx_size], 0);
+ sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, sum_rdc.rate, sum_rdc.dist);
+
+ if (!check_intra_sb(cpi, tile, mi_row, mi_col, bsize, pc_tree)) {
+#if CONFIG_EXT_TX
+ EXT_TX_TYPE best_tx = NORM;
+#endif
+
+ tmp_rate = sum_rate_nocoef;
+ tmp_dist = 0;
+ restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
+ rd_supertx_sb(cpi, tile, mi_row, mi_col, bsize, &tmp_rate, &tmp_dist,
+#if CONFIG_EXT_TX
+ &best_tx,
+#endif
+ pc_tree);
+
+ tmp_rate += vp9_cost_bit(cm->fc.supertx_prob[supertx_size], 1);
+ tmp_rd = RDCOST(x->rdmult, x->rddiv, tmp_rate, tmp_dist);
+ if (tmp_rd < sum_rdc.rdcost) {
+ sum_rdc.rdcost = tmp_rd;
+ sum_rdc.rate = tmp_rate;
+ sum_rdc.dist = tmp_dist;
+ update_supertx_param_sb(cpi, mi_row, mi_col, bsize,
+#if CONFIG_EXT_TX
+ best_tx,
+#endif
+ supertx_size, pc_tree);
+ }
}
+ pc_tree->partitioning = best_partition;
}
+#endif // CONFIG_SUPERTX
if (sum_rdc.rdcost < best_rdc.rdcost) {
pl = partition_plane_context(xd, mi_row, mi_col, bsize);
sum_rdc.rate += cpi->partition_cost[pl][PARTITION_HORZ];
sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, sum_rdc.rate, sum_rdc.dist);
+#if CONFIG_SUPERTX
+ sum_rate_nocoef += cpi->partition_cost[pl][PARTITION_HORZ];
+#endif
if (sum_rdc.rdcost < best_rdc.rdcost) {
best_rdc = sum_rdc;
+#if CONFIG_SUPERTX
+ best_rate_nocoef = sum_rate_nocoef;
+ assert(best_rate_nocoef >= 0);
+#endif
pc_tree->partitioning = PARTITION_HORZ;
}
}
partition_none_allowed)
pc_tree->vertical[0].pred_interp_filter =
ctx->mic.mbmi.interp_filter;
- rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &sum_rdc, subsize,
- &pc_tree->vertical[0], best_rdc.rdcost);
+ rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &sum_rdc,
+#if CONFIG_SUPERTX
+ &sum_rate_nocoef,
+#endif
+ subsize, &pc_tree->vertical[0], best_rdc.rdcost);
+#if CONFIG_SUPERTX
+ abort_flag = (sum_rdc.rdcost >= best_rd && bsize > BLOCK_8X8) ||
+ (sum_rdc.rate == INT_MAX && bsize == BLOCK_8X8);
+#endif
if (sum_rdc.rdcost < best_rdc.rdcost && mi_col + mi_step < cm->mi_cols &&
bsize > BLOCK_8X8) {
update_state(cpi, &pc_tree->vertical[0], mi_row, mi_col, subsize, 0);
partition_none_allowed)
pc_tree->vertical[1].pred_interp_filter =
ctx->mic.mbmi.interp_filter;
+#if CONFIG_SUPERTX
+ rd_pick_sb_modes(cpi, tile, mi_row, mi_col + mi_step, &this_rdc,
+ &this_rate_nocoef, subsize, &pc_tree->vertical[1],
+ INT64_MAX - sum_rdc.rdcost);
+#else
rd_pick_sb_modes(cpi, tile, mi_row, mi_col + mi_step, &this_rdc, subsize,
&pc_tree->vertical[1], best_rdc.rdcost - sum_rdc.rdcost);
+#endif
if (this_rdc.rate == INT_MAX) {
sum_rdc.rdcost = INT64_MAX;
+#if CONFIG_SUPERTX
+ sum_rate_nocoef = INT_MAX;
+#endif
} else {
sum_rdc.rate += this_rdc.rate;
sum_rdc.dist += this_rdc.dist;
sum_rdc.rdcost += this_rdc.rdcost;
+#if CONFIG_SUPERTX
+ sum_rate_nocoef += this_rate_nocoef;
+#endif
+ }
+ }
+#if CONFIG_SUPERTX
+ if (cm->frame_type != KEY_FRAME && !abort_flag &&
+ sum_rdc.rdcost < INT64_MAX && bsize <= MAX_SUPERTX_BLOCK_SIZE) {
+ TX_SIZE supertx_size = bsize_to_tx_size(bsize);
+ best_partition = pc_tree->partitioning;
+ pc_tree->partitioning = PARTITION_VERT;
+
+ sum_rdc.rate += vp9_cost_bit(cm->fc.supertx_prob[supertx_size], 0);
+ sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, sum_rdc.rate, sum_rdc.dist);
+
+ if (!check_intra_sb(cpi, tile, mi_row, mi_col, bsize, pc_tree)) {
+#if CONFIG_EXT_TX
+ EXT_TX_TYPE best_tx = NORM;
+#endif
+
+ tmp_rate = sum_rate_nocoef;
+ tmp_dist = 0;
+ restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
+ rd_supertx_sb(cpi, tile, mi_row, mi_col, bsize, &tmp_rate, &tmp_dist,
+#if CONFIG_EXT_TX
+ &best_tx,
+#endif
+ pc_tree);
+
+ tmp_rate += vp9_cost_bit(cm->fc.supertx_prob[supertx_size], 1);
+ tmp_rd = RDCOST(x->rdmult, x->rddiv, tmp_rate, tmp_dist);
+ if (tmp_rd < sum_rdc.rdcost) {
+ sum_rdc.rdcost = tmp_rd;
+ sum_rdc.rate = tmp_rate;
+ sum_rdc.dist = tmp_dist;
+ update_supertx_param_sb(cpi, mi_row, mi_col, bsize,
+#if CONFIG_EXT_TX
+ best_tx,
+#endif
+ supertx_size, pc_tree);
+ }
}
+ pc_tree->partitioning = best_partition;
}
+#endif // CONFIG_SUPERTX
if (sum_rdc.rdcost < best_rdc.rdcost) {
pl = partition_plane_context(xd, mi_row, mi_col, bsize);
sum_rdc.rate += cpi->partition_cost[pl][PARTITION_VERT];
sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
sum_rdc.rate, sum_rdc.dist);
+#if CONFIG_SUPERTX
+ sum_rate_nocoef += cpi->partition_cost[pl][PARTITION_VERT];
+#endif
if (sum_rdc.rdcost < best_rdc.rdcost) {
best_rdc = sum_rdc;
+#if CONFIG_SUPERTX
+ best_rate_nocoef = sum_rate_nocoef;
+ assert(best_rate_nocoef >= 0);
+#endif
pc_tree->partitioning = PARTITION_VERT;
}
}
// checks occur in some sub function and thus are used...
(void) best_rd;
*rd_cost = best_rdc;
-
+#if CONFIG_SUPERTX
+ *rate_nocoef = best_rate_nocoef;
+#endif
if (best_rdc.rate < INT_MAX && best_rdc.dist < INT64_MAX &&
pc_tree->index != 3) {
int dummy_rate;
int64_t dummy_dist;
RD_COST dummy_rdc;
+#if CONFIG_SUPERTX
+ int dummy_rate_nocoef;
+#endif
int i;
const int idx_str = cm->mi_stride * mi_row + mi_col;
set_fixed_partitioning(cpi, tile, mi, mi_row, mi_col,
sf->always_this_block_size);
rd_use_partition(cpi, tile, mi, tp, mi_row, mi_col, BLOCK_64X64,
- &dummy_rate, &dummy_dist, 1, cpi->pc_root);
+ &dummy_rate, &dummy_dist,
+#if CONFIG_SUPERTX
+ &dummy_rate_nocoef,
+#endif
+ 1, cpi->pc_root);
} else if (cpi->partition_search_skippable_frame) {
BLOCK_SIZE bsize;
set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64);
bsize = get_rd_var_based_fixed_partition(cpi, mi_row, mi_col);
set_fixed_partitioning(cpi, tile, mi, mi_row, mi_col, bsize);
rd_use_partition(cpi, tile, mi, tp, mi_row, mi_col, BLOCK_64X64,
- &dummy_rate, &dummy_dist, 1, cpi->pc_root);
+ &dummy_rate, &dummy_dist,
+#if CONFIG_SUPERTX
+ &dummy_rate_nocoef,
+#endif
+ 1, cpi->pc_root);
} else if (sf->partition_search_type == VAR_BASED_PARTITION &&
cm->frame_type != KEY_FRAME ) {
choose_partitioning(cpi, tile, mi_row, mi_col);
rd_use_partition(cpi, tile, mi, tp, mi_row, mi_col, BLOCK_64X64,
- &dummy_rate, &dummy_dist, 1, cpi->pc_root);
+ &dummy_rate, &dummy_dist,
+#if CONFIG_SUPERTX
+ &dummy_rate_nocoef,
+#endif
+ 1, cpi->pc_root);
} else {
// If required set upper and lower partition size limits
if (sf->auto_min_max_partition_size) {
&sf->min_partition_size,
&sf->max_partition_size);
}
- rd_pick_partition(cpi, tile, tp, mi_row, mi_col, BLOCK_64X64,
- &dummy_rdc, INT64_MAX, cpi->pc_root);
+ rd_pick_partition(cpi, tile, tp, mi_row, mi_col, BLOCK_64X64, &dummy_rdc,
+#if CONFIG_SUPERTX
+ &dummy_rate_nocoef,
+#endif
+ INT64_MAX, cpi->pc_root);
}
}
}
if (count4x4_lp == 0 && count16x16_lp == 0 && count16x16_16x16p == 0 &&
count32x32_lp == 0 && count32x32_32x32p == 0 &&
+#if CONFIG_SUPERTX
+ cm->counts.supertx_size[TX_16X16] == 0 &&
+ cm->counts.supertx_size[TX_32X32] == 0 &&
+ cm->counts.supertx_size[TX_64X64] == 0 &&
+#endif
count64x64_64x64p == 0) {
cm->tx_mode = ALLOW_8X8;
reset_skip_tx_size(cm, TX_8X8);
} else if (count8x8_8x8p == 0 && count8x8_lp == 0 &&
count16x16_16x16p == 0 && count16x16_lp == 0 &&
count32x32_32x32p == 0 && count32x32_lp == 0 &&
+#if CONFIG_SUPERTX
+ cm->counts.supertx_size[TX_8X8] == 0 &&
+ cm->counts.supertx_size[TX_16X16] == 0 &&
+ cm->counts.supertx_size[TX_32X32] == 0 &&
+ cm->counts.supertx_size[TX_64X64] == 0 &&
+#endif
count64x64_64x64p == 0) {
cm->tx_mode = ONLY_4X4;
reset_skip_tx_size(cm, TX_4X4);
count32x32_lp == 0) {
cm->tx_mode = ALLOW_64X64;
} else if (count4x4_lp == 0 && count8x8_lp == 0 && count16x16_lp == 0 &&
+#if CONFIG_SUPERTX
+ cm->counts.supertx_size[TX_64X64] == 0 &&
+#endif
count64x64_64x64p == 0) {
cm->tx_mode = ALLOW_32X32;
reset_skip_tx_size(cm, TX_32X32);
} else if (count4x4_lp == 0 && count8x8_lp == 0 &&
count32x32_lp == 0 && count32x32_32x32p == 0 &&
+#if CONFIG_SUPERTX
+ cm->counts.supertx_size[TX_32X32] == 0 &&
+ cm->counts.supertx_size[TX_64X64] == 0 &&
+#endif
count64x64_64x64p == 0) {
cm->tx_mode = ALLOW_16X16;
reset_skip_tx_size(cm, TX_16X16);
}
if (count4x4_lp == 0 && count16x16_lp == 0 && count16x16_16x16p == 0 &&
+#if CONFIG_SUPERTX
+ cm->counts.supertx_size[TX_16X16] == 0 &&
+ cm->counts.supertx_size[TX_32X32] == 0 &&
+#endif
count32x32_32x32p == 0) {
cm->tx_mode = ALLOW_8X8;
reset_skip_tx_size(cm, TX_8X8);
} else if (count8x8_8x8p == 0 && count16x16_16x16p == 0 &&
count8x8_lp == 0 && count16x16_lp == 0 &&
+#if CONFIG_SUPERTX
+ cm->counts.supertx_size[TX_8X8] == 0 &&
+ cm->counts.supertx_size[TX_16X16] == 0 &&
+ cm->counts.supertx_size[TX_32X32] == 0 &&
+#endif
count32x32_32x32p == 0) {
cm->tx_mode = ONLY_4X4;
reset_skip_tx_size(cm, TX_4X4);
- } else if (count8x8_lp == 0 && count16x16_lp == 0 && count4x4_lp == 0) {
+ } else if (count8x8_lp == 0 && count16x16_lp == 0 &&
+ count4x4_lp == 0) {
cm->tx_mode = ALLOW_32X32;
} else if (count32x32_32x32p == 0 && count8x8_lp == 0 &&
+#if CONFIG_SUPERTX
+ cm->counts.supertx_size[TX_32X32] == 0 &&
+#endif
count4x4_lp == 0) {
cm->tx_mode = ALLOW_16X16;
reset_skip_tx_size(cm, TX_16X16);
}
}
-#endif
+#endif // CONFIG_TX64X64
} else {
cm->reference_mode = SINGLE_REFERENCE;
encode_frame_internal(cpi);
#endif
}
}
+
+#if CONFIG_SUPERTX
+static int check_intra_b(PICK_MODE_CONTEXT *ctx) {
+ return !is_inter_mode((&ctx->mic)->mbmi.mode);
+}
+
+static int check_intra_sb(VP9_COMP *cpi, const TileInfo *const tile,
+ int mi_row, int mi_col, BLOCK_SIZE bsize,
+ PC_TREE *pc_tree) {
+ VP9_COMMON *const cm = &cpi->common;
+
+ const int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4;
+ PARTITION_TYPE partition;
+ BLOCK_SIZE subsize = bsize;
+
+ if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
+ return 1;
+
+ if (bsize >= BLOCK_8X8)
+ subsize = get_subsize(bsize, pc_tree->partitioning);
+ else
+ subsize = BLOCK_4X4;
+
+ partition = partition_lookup[bsl][subsize];
+
+ switch (partition) {
+ case PARTITION_NONE:
+ return check_intra_b(&pc_tree->none);
+ break;
+ case PARTITION_VERT:
+ if (check_intra_b(&pc_tree->vertical[0]))
+ return 1;
+ if (mi_col + hbs < cm->mi_cols && bsize > BLOCK_8X8) {
+ if (check_intra_b(&pc_tree->vertical[1]))
+ return 1;
+ }
+ break;
+ case PARTITION_HORZ:
+ if (check_intra_b(&pc_tree->horizontal[0]))
+ return 1;
+ if (mi_row + hbs < cm->mi_rows && bsize > BLOCK_8X8) {
+ if (check_intra_b(&pc_tree->horizontal[1]))
+ return 1;
+ }
+ break;
+ case PARTITION_SPLIT:
+ if (bsize == BLOCK_8X8) {
+ if (check_intra_b(pc_tree->leaf_split[0]))
+ return 1;
+ } else {
+ if (check_intra_sb(cpi, tile, mi_row, mi_col, subsize,
+ pc_tree->split[0]))
+ return 1;
+ if (check_intra_sb(cpi, tile, mi_row, mi_col + hbs, subsize,
+ pc_tree->split[1]))
+ return 1;
+ if (check_intra_sb(cpi, tile, mi_row + hbs, mi_col, subsize,
+ pc_tree->split[2]))
+ return 1;
+ if (check_intra_sb(cpi, tile, mi_row + hbs, mi_col + hbs, subsize,
+ pc_tree->split[3]))
+ return 1;
+ }
+ break;
+ default:
+ assert(0);
+ }
+ return 0;
+}
+
+static int check_supertx_b(TX_SIZE supertx_size, PICK_MODE_CONTEXT *ctx) {
+ return ctx->mic.mbmi.tx_size == supertx_size;
+}
+
+static int check_supertx_sb(BLOCK_SIZE bsize, TX_SIZE supertx_size,
+ PC_TREE *pc_tree) {
+ PARTITION_TYPE partition;
+ BLOCK_SIZE subsize;
+
+ partition = pc_tree->partitioning;
+ subsize = get_subsize(bsize, partition);
+ switch (partition) {
+ case PARTITION_NONE:
+ return check_supertx_b(supertx_size, &pc_tree->none);
+ case PARTITION_VERT:
+ return check_supertx_b(supertx_size, &pc_tree->vertical[0]);
+ case PARTITION_HORZ:
+ return check_supertx_b(supertx_size, &pc_tree->horizontal[0]);
+ case PARTITION_SPLIT:
+ if (bsize == BLOCK_8X8)
+ return check_supertx_b(supertx_size, pc_tree->leaf_split[0]);
+ else
+ return check_supertx_sb(subsize, supertx_size, pc_tree->split[0]);
+ default:
+ assert(0);
+ return 0;
+ }
+}
+
+static void predict_superblock(VP9_COMP *cpi,
+ int mi_row_ori, int mi_col_ori,
+ BLOCK_SIZE bsize) {
+ VP9_COMMON *const cm = &cpi->common;
+ MACROBLOCK *const x = &cpi->mb;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ MODE_INFO *mi_8x8 = xd->mi;
+ MODE_INFO *mi = mi_8x8;
+ MB_MODE_INFO *mbmi = &mi->mbmi;
+ int ref;
+ const int is_compound = has_second_ref(mbmi);
+
+ set_ref_ptrs(cm, xd, mbmi->ref_frame[0], mbmi->ref_frame[1]);
+
+ cpi->zbin_mode_boost = get_zbin_mode_boost(mbmi,
+ cpi->zbin_mode_boost_enabled);
+ vp9_update_zbin_extra(cpi, x);
+
+ for (ref = 0; ref < 1 + is_compound; ++ref) {
+ YV12_BUFFER_CONFIG *cfg = get_ref_frame_buffer(cpi,
+ mbmi->ref_frame[ref]);
+ vp9_setup_pre_planes(xd, ref, cfg, mi_row_ori, mi_col_ori,
+ &xd->block_refs[ref]->sf);
+ }
+ vp9_build_inter_predictors_sb(xd, mi_row_ori, mi_col_ori, bsize);
+}
+
+static void predict_superblock_sub8x8_extend(VP9_COMP *cpi,
+ int mi_row, int mi_col,
+ int mi_row_ori, int mi_col_ori,
+ BLOCK_SIZE top_bsize,
+ PARTITION_TYPE partition) {
+ VP9_COMMON *const cm = &cpi->common;
+ MACROBLOCK *const x = &cpi->mb;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ MODE_INFO *mi_8x8 = xd->mi;
+ MODE_INFO *mi = mi_8x8;
+ MB_MODE_INFO *mbmi = &mi->mbmi;
+ int ref;
+ const int is_compound = has_second_ref(mbmi);
+
+ set_ref_ptrs(cm, xd, mbmi->ref_frame[0], mbmi->ref_frame[1]);
+
+ cpi->zbin_mode_boost = get_zbin_mode_boost(mbmi,
+ cpi->zbin_mode_boost_enabled);
+ vp9_update_zbin_extra(cpi, x);
+
+ for (ref = 0; ref < 1 + is_compound; ++ref) {
+ YV12_BUFFER_CONFIG *cfg = get_ref_frame_buffer(cpi,
+ mbmi->ref_frame[ref]);
+ vp9_setup_pre_planes(xd, ref, cfg, mi_row_ori, mi_col_ori,
+ &xd->block_refs[ref]->sf);
+ }
+ vp9_build_inter_predictors_sby_sub8x8_extend(xd, mi_row, mi_col,
+ mi_row_ori, mi_col_ori,
+ top_bsize, partition);
+ vp9_build_inter_predictors_sbuv_sub8x8_extend(xd,
+ mi_row_ori, mi_col_ori,
+ top_bsize);
+}
+
+static void predict_b_sub8x8_extend(VP9_COMP *cpi, const TileInfo *const tile,
+ int mi_row, int mi_col,
+ int mi_row_ori, int mi_col_ori,
+ int output_enabled,
+ BLOCK_SIZE bsize, BLOCK_SIZE top_bsize,
+ PARTITION_TYPE partition) {
+ set_offsets_extend(cpi, tile, mi_row, mi_col, mi_row_ori, mi_col_ori,
+ bsize, top_bsize);
+ predict_superblock_sub8x8_extend(cpi, mi_row, mi_col, mi_row_ori, mi_col_ori,
+ top_bsize, partition);
+
+ if (output_enabled)
+ update_stats(&cpi->common, &cpi->mb);
+}
+
+static void predict_b_extend(VP9_COMP *cpi, const TileInfo *const tile,
+ int mi_row, int mi_col,
+ int mi_row_ori, int mi_col_ori,
+ int output_enabled,
+ BLOCK_SIZE bsize, BLOCK_SIZE top_bsize) {
+ set_offsets_extend(cpi, tile, mi_row, mi_col, mi_row_ori, mi_col_ori,
+ bsize, top_bsize);
+ predict_superblock(cpi, mi_row_ori, mi_col_ori, top_bsize);
+
+ if (output_enabled)
+ update_stats(&cpi->common, &cpi->mb);
+}
+
+// This function generates prediction for multiple blocks, between which
+// discontinuity around boundary is reduced by smoothing masks. The basic
+// smoothing mask is a soft step function along horz/vert direction. In more
+// complicated case when a block is split into 4 subblocks, the basic mask is
+// first applied to neighboring subblocks (2 pairs) in horizontal direction and
+// then applied to the 2 masked prediction mentioned above in vertical direction
+// If the block is split into more than one level, at every stage, masked
+// prediction is stored in dst_buf[] passed from higher level.
+static void predict_sb_complex(VP9_COMP *cpi, const TileInfo *const tile,
+ int mi_row, int mi_col,
+ int mi_row_ori, int mi_col_ori,
+ int output_enabled, BLOCK_SIZE bsize,
+ BLOCK_SIZE top_bsize,
+ uint8_t *dst_buf[3], int dst_stride[3],
+ PC_TREE *pc_tree) {
+ VP9_COMMON *const cm = &cpi->common;
+ MACROBLOCK *const x = &cpi->mb;
+ MACROBLOCKD *const xd = &x->e_mbd;
+
+ const int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4;
+ PARTITION_TYPE partition;
+ BLOCK_SIZE subsize;
+
+ int i, ctx;
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, tmp_buf1,
+ MAX_MB_PLANE * MAXTXLEN * MAXTXLEN);
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, tmp_buf2,
+ MAX_MB_PLANE * MAXTXLEN * MAXTXLEN);
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, tmp_buf3,
+ MAX_MB_PLANE * MAXTXLEN * MAXTXLEN);
+ uint8_t *dst_buf1[3] = {
+ tmp_buf1,
+ tmp_buf1 + MAXTXLEN * MAXTXLEN,
+ tmp_buf1 + 2 * MAXTXLEN * MAXTXLEN};
+ uint8_t *dst_buf2[3] = {
+ tmp_buf2,
+ tmp_buf2 + MAXTXLEN * MAXTXLEN,
+ tmp_buf2 + 2 * MAXTXLEN * MAXTXLEN};
+ uint8_t *dst_buf3[3] = {
+ tmp_buf3,
+ tmp_buf3 + MAXTXLEN * MAXTXLEN,
+ tmp_buf3 + 2 * MAXTXLEN * MAXTXLEN};
+ int dst_stride1[3] = {MAXTXLEN, MAXTXLEN, MAXTXLEN};
+ int dst_stride2[3] = {MAXTXLEN, MAXTXLEN, MAXTXLEN};
+ int dst_stride3[3] = {MAXTXLEN, MAXTXLEN, MAXTXLEN};
+
+ if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
+ return;
+
+ if (bsize >= BLOCK_8X8) {
+ ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
+ subsize = get_subsize(bsize, pc_tree->partitioning);
+ } else {
+ ctx = 0;
+ subsize = BLOCK_4X4;
+ }
+ partition = partition_lookup[bsl][subsize];
+ if (output_enabled && bsize != BLOCK_4X4 && bsize < top_bsize)
+ cm->counts.partition[ctx][partition]++;
+
+ for (i = 0; i < MAX_MB_PLANE; i++) {
+ xd->plane[i].dst.buf = dst_buf[i];
+ xd->plane[i].dst.stride = dst_stride[i];
+ }
+
+ switch (partition) {
+ case PARTITION_NONE:
+ assert(bsize < top_bsize);
+ predict_b_extend(cpi, tile, mi_row, mi_col, mi_row_ori, mi_col_ori,
+ output_enabled, bsize, top_bsize);
+ break;
+ case PARTITION_HORZ:
+ if (bsize > BLOCK_8X8) {
+ predict_b_extend(cpi, tile, mi_row, mi_col, mi_row_ori, mi_col_ori,
+ output_enabled, subsize, top_bsize);
+ } else {
+ predict_b_sub8x8_extend(cpi, tile, mi_row, mi_col,
+ mi_row_ori, mi_col_ori, output_enabled,
+ bsize, top_bsize, PARTITION_HORZ);
+ }
+ if (mi_row + hbs < cm->mi_rows && bsize > BLOCK_8X8) {
+ for (i = 0; i < MAX_MB_PLANE; i++) {
+ xd->plane[i].dst.buf = tmp_buf1 + i * MAXTXLEN * MAXTXLEN;
+ xd->plane[i].dst.stride = MAXTXLEN;
+ }
+ predict_b_extend(cpi, tile, mi_row + hbs, mi_col,
+ mi_row_ori, mi_col_ori, output_enabled,
+ subsize, top_bsize);
+ for (i = 0; i < MAX_MB_PLANE; i++) {
+ xd->plane[i].dst.buf = dst_buf[i];
+ xd->plane[i].dst.stride = dst_stride[i];
+ vp9_build_masked_inter_predictor_complex(dst_buf[i], dst_stride[i],
+ dst_buf1[i], dst_stride1[i],
+ &xd->plane[i],
+ mi_row, mi_col,
+ mi_row_ori, mi_col_ori,
+ bsize, top_bsize,
+ PARTITION_HORZ);
+ }
+ }
+ break;
+ case PARTITION_VERT:
+ if (bsize > BLOCK_8X8) {
+ predict_b_extend(cpi, tile, mi_row, mi_col, mi_row_ori, mi_col_ori,
+ output_enabled, subsize, top_bsize);
+ } else {
+ predict_b_sub8x8_extend(cpi, tile, mi_row, mi_col,
+ mi_row_ori, mi_col_ori, output_enabled,
+ bsize, top_bsize, PARTITION_VERT);
+ }
+ if (mi_col + hbs < cm->mi_cols && bsize > BLOCK_8X8) {
+ for (i = 0; i < MAX_MB_PLANE; i++) {
+ xd->plane[i].dst.buf = tmp_buf1 + i * MAXTXLEN * MAXTXLEN;
+ xd->plane[i].dst.stride = MAXTXLEN;
+ }
+ predict_b_extend(cpi, tile, mi_row, mi_col + hbs,
+ mi_row_ori, mi_col_ori, output_enabled,
+ subsize, top_bsize);
+ for (i = 0; i < MAX_MB_PLANE; i++) {
+ xd->plane[i].dst.buf = dst_buf[i];
+ xd->plane[i].dst.stride = dst_stride[i];
+ vp9_build_masked_inter_predictor_complex(dst_buf[i], dst_stride[i],
+ dst_buf1[i], dst_stride1[i],
+ &xd->plane[i],
+ mi_row, mi_col,
+ mi_row_ori, mi_col_ori,
+ bsize, top_bsize,
+ PARTITION_VERT);
+ }
+ }
+ break;
+ case PARTITION_SPLIT:
+ if (bsize == BLOCK_8X8) {
+ predict_b_sub8x8_extend(cpi, tile, mi_row, mi_col,
+ mi_row_ori, mi_col_ori, output_enabled,
+ bsize, top_bsize, PARTITION_SPLIT);
+ } else {
+ predict_sb_complex(cpi, tile, mi_row, mi_col,
+ mi_row_ori, mi_col_ori, output_enabled, subsize,
+ top_bsize, dst_buf, dst_stride,
+ pc_tree->split[0]);
+ if (mi_row < cm->mi_rows && mi_col + hbs < cm->mi_cols)
+ predict_sb_complex(cpi, tile, mi_row, mi_col + hbs,
+ mi_row_ori, mi_col_ori, output_enabled, subsize,
+ top_bsize, dst_buf1, dst_stride1,
+ pc_tree->split[1]);
+ if (mi_row + hbs < cm->mi_rows && mi_col < cm->mi_cols)
+ predict_sb_complex(cpi, tile, mi_row + hbs, mi_col,
+ mi_row_ori, mi_col_ori, output_enabled, subsize,
+ top_bsize, dst_buf2, dst_stride2,
+ pc_tree->split[2]);
+ if (mi_row + hbs < cm->mi_rows && mi_col + hbs < cm->mi_cols)
+ predict_sb_complex(cpi, tile, mi_row + hbs, mi_col + hbs,
+ mi_row_ori, mi_col_ori, output_enabled, subsize,
+ top_bsize, dst_buf3, dst_stride3,
+ pc_tree->split[3]);
+ for (i = 0; i < MAX_MB_PLANE; i++) {
+ if (mi_row < cm->mi_rows && mi_col + hbs < cm->mi_cols) {
+ vp9_build_masked_inter_predictor_complex(dst_buf[i],
+ dst_stride[i],
+ dst_buf1[i],
+ dst_stride1[i],
+ &xd->plane[i],
+ mi_row, mi_col,
+ mi_row_ori, mi_col_ori,
+ bsize, top_bsize,
+ PARTITION_VERT);
+ if (mi_row + hbs < cm->mi_rows) {
+ vp9_build_masked_inter_predictor_complex(dst_buf2[i],
+ dst_stride2[i],
+ dst_buf3[i],
+ dst_stride3[i],
+ &xd->plane[i],
+ mi_row, mi_col,
+ mi_row_ori, mi_col_ori,
+ bsize, top_bsize,
+ PARTITION_VERT);
+ vp9_build_masked_inter_predictor_complex(dst_buf[i],
+ dst_stride[i],
+ dst_buf2[i],
+ dst_stride2[i],
+ &xd->plane[i],
+ mi_row, mi_col,
+ mi_row_ori, mi_col_ori,
+ bsize, top_bsize,
+ PARTITION_HORZ);
+ }
+ } else if (mi_row + hbs < cm->mi_rows && mi_col < cm->mi_cols) {
+ vp9_build_masked_inter_predictor_complex(dst_buf[i],
+ dst_stride[i],
+ dst_buf2[i],
+ dst_stride2[i],
+ &xd->plane[i],
+ mi_row, mi_col,
+ mi_row_ori, mi_col_ori,
+ bsize, top_bsize,
+ PARTITION_HORZ);
+ }
+ }
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ if (bsize < top_bsize && (partition != PARTITION_SPLIT || bsize == BLOCK_8X8))
+ update_partition_context(xd, mi_row, mi_col, subsize, bsize);
+}
+
+static void rd_supertx_sb(VP9_COMP *cpi, const TileInfo *const tile,
+ int mi_row, int mi_col, BLOCK_SIZE bsize,
+ int *tmp_rate, int64_t *tmp_dist,
+#if CONFIG_EXT_TX
+ EXT_TX_TYPE *best_tx,
+#endif
+ PC_TREE *pc_tree) {
+ VP9_COMMON *const cm = &cpi->common;
+ MACROBLOCK *const x = &cpi->mb;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ int plane, pnskip, skippable, skippable_uv, rate_uv, this_rate,
+ base_rate = *tmp_rate;
+ int64_t sse, pnsse, sse_uv, this_dist, dist_uv;
+ uint8_t *dst_buf[3];
+ int dst_stride[3];
+ TX_SIZE tx_size;
+#if CONFIG_EXT_TX
+ EXT_TX_TYPE txfm, best_tx_nostx = xd->mi[0].mbmi.ext_txfrm;
+ int tmp_rate_tx = 0, skip_tx = 0;
+ int64_t tmp_dist_tx = 0, rd_tx, bestrd_tx = INT64_MAX;
+ uint8_t tmp_zcoeff_blk = 0;
+#endif
+
+ update_state_sb_supertx(cpi, tile, mi_row, mi_col, bsize, 0, pc_tree);
+ vp9_setup_dst_planes(xd->plane, get_frame_new_buffer(cm),
+ mi_row, mi_col);
+ for (plane = 0; plane < MAX_MB_PLANE; plane++) {
+ dst_buf[plane] = xd->plane[plane].dst.buf;
+ dst_stride[plane] = xd->plane[plane].dst.stride;
+ }
+ predict_sb_complex(cpi, tile, mi_row, mi_col, mi_row, mi_col,
+ 0, bsize, bsize, dst_buf, dst_stride, pc_tree);
+
+ set_offsets(cpi, tile, mi_row, mi_col, bsize);
+#if CONFIG_EXT_TX
+ *best_tx = NORM;
+#endif
+
+ // chroma
+ skippable_uv = 1;
+ rate_uv = 0;
+ dist_uv = 0;
+ sse_uv = 0;
+ for (plane = 1; plane < MAX_MB_PLANE; ++plane) {
+ tx_size = bsize_to_tx_size(bsize);
+ tx_size = get_uv_tx_size_impl(tx_size, bsize,
+ cm->subsampling_x, cm->subsampling_y);
+ vp9_subtract_plane(x, bsize, plane);
+ txfm_rd_in_plane_supertx(x, &this_rate, &this_dist, &pnskip, &pnsse,
+ INT64_MAX, plane, bsize, tx_size, 0);
+ rate_uv += this_rate;
+ dist_uv += this_dist;
+ sse_uv += pnsse;
+ skippable_uv &= pnskip;
+ }
+
+ // luma
+ tx_size = bsize_to_tx_size(bsize);
+ vp9_subtract_plane(x, bsize, 0);
+#if CONFIG_EXT_TX
+ for (txfm = NORM; txfm < EXT_TX_TYPES; txfm++) {
+ if (tx_size > TX_16X16 && txfm != NORM)
+ continue;
+
+ xd->mi[0].mbmi.ext_txfrm = txfm;
+#endif // CONFIG_EXT_TX
+ txfm_rd_in_plane_supertx(x, &this_rate, &this_dist, &pnskip, &pnsse,
+ INT64_MAX, 0, bsize, tx_size, 0);
+ *tmp_rate = rate_uv + this_rate;
+ *tmp_dist = dist_uv + this_dist;
+ sse = sse_uv + pnsse;
+ skippable = skippable_uv && pnskip;
+
+ if (skippable) {
+ *tmp_rate = vp9_cost_bit(vp9_get_skip_prob(cm, xd), 1);
+ x->skip = 1;
+ } else {
+#if CONFIG_EXT_TX
+ if (tx_size < TX_32X32)
+ *tmp_rate += cpi->ext_tx_costs[tx_size][txfm];
+#endif // CONFIG_EXT_TX
+ if (RDCOST(x->rdmult, x->rddiv, *tmp_rate, *tmp_dist)
+ < RDCOST(x->rdmult, x->rddiv, 0, sse)) {
+ *tmp_rate += vp9_cost_bit(vp9_get_skip_prob(cm, xd), 0);
+ x->skip = 0;
+ } else {
+ *tmp_dist = sse;
+ *tmp_rate = vp9_cost_bit(vp9_get_skip_prob(cm, xd), 1);
+ x->skip = 1;
+ }
+ }
+ *tmp_rate += base_rate;
+#if CONFIG_EXT_TX
+ rd_tx = RDCOST(x->rdmult, x->rddiv, *tmp_rate, *tmp_dist);
+ if (rd_tx < bestrd_tx * 0.99 || txfm == NORM) {
+ *best_tx = txfm;
+ bestrd_tx = rd_tx;
+ tmp_rate_tx = *tmp_rate;
+ tmp_dist_tx = *tmp_dist;
+ skip_tx = x->skip;
+ tmp_zcoeff_blk = x->zcoeff_blk[tx_size][0];
+ }
+ }
+ x->zcoeff_blk[tx_size][0] = tmp_zcoeff_blk;
+ *tmp_rate = tmp_rate_tx;
+ *tmp_dist = tmp_dist_tx;
+ x->skip = skip_tx;
+ xd->mi[0].mbmi.ext_txfrm = best_tx_nostx;
+#endif // CONFIG_EXT_TX
+}
+#endif // CONFIG_SUPERTX
int plane;
mbmi->skip = 1;
-
if (x->skip)
return;
}
}
+#if CONFIG_SUPERTX
+void vp9_encode_sb_supertx(MACROBLOCK *x, BLOCK_SIZE bsize) {
+ MACROBLOCKD *const xd = &x->e_mbd;
+ struct optimize_ctx ctx;
+ MB_MODE_INFO *mbmi = &xd->mi[0].src_mi->mbmi;
+ struct encode_b_args arg = {x, &ctx, &mbmi->skip};
+ int plane;
+
+ mbmi->skip = 1;
+ if (x->skip)
+ return;
+
+ for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
+ const struct macroblockd_plane* const pd = &xd->plane[plane];
+ const BLOCK_SIZE plane_size = get_plane_block_size(bsize, pd);
+ const TX_SIZE tx_size = plane ? get_uv_tx_size(mbmi, pd) : mbmi->tx_size;
+ vp9_subtract_plane(x, bsize, plane);
+ vp9_get_entropy_contexts(bsize, tx_size, pd,
+ ctx.ta[plane], ctx.tl[plane]);
+ encode_block(plane, 0, plane_size, tx_size, &arg);
+ }
+}
+#endif
+
static void encode_block_intra(int plane, int block, BLOCK_SIZE plane_bsize,
TX_SIZE tx_size, void *arg) {
struct encode_b_args* const args = arg;
#endif
void vp9_encode_sb(MACROBLOCK *x, BLOCK_SIZE bsize);
+#if CONFIG_SUPERTX
+void vp9_encode_sb_supertx(MACROBLOCK *x, BLOCK_SIZE bsize);
+#endif
void vp9_encode_sby_pass1(MACROBLOCK *x, BLOCK_SIZE bsize);
void vp9_xform_quant_fp(MACROBLOCK *x, int plane, int block,
BLOCK_SIZE plane_bsize, TX_SIZE tx_size);
MACROBLOCKD *xd = &cpi->mb.e_mbd;
struct loopfilter *lf = &cm->lf;
if (xd->lossless) {
- lf->filter_level = 0;
+ lf->filter_level = 0;
} else {
struct vpx_usec_timer timer;
filt_best = filt_mid;
ss_err[filt_mid] = best_err;
+
while (filter_step > 0) {
const int filt_high = MIN(filt_mid + filter_step, max_filter_level);
const int filt_low = MAX(filt_mid - filter_step, min_filter_level);
filt_mid = filt_best;
}
}
-
return filt_best;
}
const int q = vp9_ac_quant(cm->base_qindex, 0, cm->bit_depth);
// These values were determined by linear fitting the result of the
// searched level, filt_guess = q * 0.316206 + 3.87252
-#if CONFIG_VP9_HIGHDEPTH
+#if CONFIG_VP9_HIGHBITDEPTH
int filt_guess;
switch (cm->bit_depth) {
case VPX_BITS_8:
{ 1, 2, 3, 4, 11, 4096 - 21, 0 },
#endif
};
+
static INLINE int cost_coeffs(MACROBLOCK *x,
int plane, int block,
ENTROPY_CONTEXT *A, ENTROPY_CONTEXT *L,
int pt = combine_entropy_contexts(*A, *L);
int c, cost;
// Check for consistency of tx_size with mode info
+#if !CONFIG_SUPERTX
assert(type == PLANE_TYPE_Y ? mbmi->tx_size == tx_size
: get_uv_tx_size(mbmi, pd) == tx_size);
+#endif
if (eob == 0) {
// single eob token
}
}
+#if CONFIG_SUPERTX
+void txfm_rd_in_plane(MACROBLOCK *x,
+#else
static void txfm_rd_in_plane(MACROBLOCK *x,
+#endif
int *rate, int64_t *distortion,
int *skippable, int64_t *sse,
int64_t ref_best_rd, int plane,
}
}
+#if CONFIG_SUPERTX
+void txfm_rd_in_plane_supertx(MACROBLOCK *x,
+ int *rate, int64_t *distortion,
+ int *skippable, int64_t *sse,
+ int64_t ref_best_rd, int plane,
+ BLOCK_SIZE bsize, TX_SIZE tx_size,
+ int use_fast_coef_casting) {
+ MACROBLOCKD *const xd = &x->e_mbd;
+ const struct macroblockd_plane *const pd = &xd->plane[plane];
+ struct rdcost_block_args args;
+ vp9_zero(args);
+ args.x = x;
+ args.best_rd = ref_best_rd;
+ args.use_fast_coef_costing = use_fast_coef_casting;
+
+ if (plane == 0)
+ xd->mi[0].src_mi->mbmi.tx_size = tx_size;
+
+ vp9_get_entropy_contexts(bsize, tx_size, pd, args.t_above, args.t_left);
+
+ args.so = get_scan(xd, tx_size, pd->plane_type, 0);
+
+ block_rd_txfm(plane, 0, get_plane_block_size(bsize, pd), tx_size, &args);
+
+ if (args.skip) {
+ *rate = INT_MAX;
+ *distortion = INT64_MAX;
+ *sse = INT64_MAX;
+ *skippable = 0;
+ } else {
+ *distortion = args.this_dist;
+ *rate = args.this_rate;
+ *sse = args.this_sse;
+ *skippable = !x->plane[plane].eobs[0];
+ }
+}
+#endif // CONFIG_SUPERTX
+
static void choose_largest_tx_size(VP9_COMP *cpi, MACROBLOCK *x,
int *rate, int64_t *distortion,
int *skip, int64_t *sse,
}
}
mbmi->tx_size = cm->tx_mode == TX_MODE_SELECT ?
- best_tx : MIN(max_tx_size, max_mode_tx_size);
+ best_tx : MIN(max_tx_size, max_mode_tx_size);
*distortion = d[mbmi->tx_size];
*rate = r[mbmi->tx_size][cm->tx_mode == TX_MODE_SELECT];
void vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
const TileInfo *const tile,
int mi_row, int mi_col,
- RD_COST *rd_cost, BLOCK_SIZE bsize,
+ RD_COST *rd_cost,
+#if CONFIG_SUPERTX
+ int *returnrate_nocoef,
+#endif
+ BLOCK_SIZE bsize,
PICK_MODE_CONTEXT *ctx,
int64_t best_rd_so_far) {
VP9_COMMON *const cm = &cpi->common;
}
rd_cost->rate = INT_MAX;
+#if CONFIG_SUPERTX
+ *returnrate_nocoef = INT_MAX;
+#endif
for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
x->pred_mv_sad[ref_frame] = INT_MAX;
if (skippable) {
// Back out the coefficient coding costs
rate2 -= (rate_y + rate_uv);
-
+ rate_y = 0;
+ rate_uv = 0;
// Cost the skip mb case
rate2 += vp9_cost_bit(vp9_get_skip_prob(cm, xd), 1);
} else if (ref_frame != INTRA_FRAME && !xd->lossless) {
assert(total_sse >= 0);
rate2 -= (rate_y + rate_uv);
this_skip2 = 1;
+ rate_y = 0;
+ rate_uv = 0;
}
} else {
// Add in the cost of the no skip flag.
}
rd_cost->rate = rate2;
+#if CONFIG_SUPERTX
+ *returnrate_nocoef = rate2 - rate_y - rate_uv;
+ if (!disable_skip) {
+ *returnrate_nocoef -= vp9_cost_bit(vp9_get_skip_prob(cm, xd),
+ skippable || this_skip2);
+ }
+ *returnrate_nocoef -= vp9_cost_bit(vp9_get_intra_inter_prob(cm, xd),
+ mbmi->ref_frame[0] != INTRA_FRAME);
+#endif
rd_cost->dist = distortion2;
rd_cost->rdcost = this_rd;
best_rd = this_rd;
const TileInfo *const tile,
int mi_row, int mi_col,
RD_COST *rd_cost,
+#if CONFIG_SUPERTX
+ int *returnrate_nocoef,
+#endif
BLOCK_SIZE bsize,
PICK_MODE_CONTEXT *ctx,
int64_t best_rd_so_far) {
#if CONFIG_EXT_TX
mbmi->ext_txfrm = NORM;
#endif
+#if CONFIG_SUPERTX
+ best_rd_so_far = INT64_MAX;
+ best_rd = best_rd_so_far;
+ best_yrd = best_rd_so_far;
+#endif
x->skip_encode = sf->skip_encode_frame && x->q_index < QIDX_SKIP_THRESH;
vpx_memset(x->zcoeff_blk[TX_4X4], 0, 4);
rate_uv_intra = INT_MAX;
rd_cost->rate = INT_MAX;
+#if CONFIG_SUPERTX
+ *returnrate_nocoef = INT_MAX;
+#endif
for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ref_frame++) {
if (cpi->ref_frame_flags & flag_list[ref_frame]) {
int i;
int this_skip2 = 0;
int64_t total_sse = INT_MAX;
+ int64_t uv_sse;
int early_term = 0;
ref_frame = vp9_ref_order[ref_index].ref_frame[0];
int64_t this_rd_thresh;
int64_t tmp_rd, tmp_best_rd = INT64_MAX, tmp_best_rdu = INT64_MAX;
int tmp_best_rate = INT_MAX, tmp_best_ratey = INT_MAX;
- int64_t tmp_best_distortion = INT_MAX, tmp_best_sse, uv_sse;
+ int64_t tmp_best_distortion = INT_MAX, tmp_best_sse;
int tmp_best_skippable = 0;
int switchable_filter_index;
int_mv *second_ref = comp_pred ?
distortion2 = total_sse;
assert(total_sse >= 0);
rate2 -= (rate_y + rate_uv);
+ distortion_uv = uv_sse;
+ this_skip2 = 1;
rate_y = 0;
rate_uv = 0;
- this_skip2 = 1;
}
} else {
// Add in the cost of the no skip flag.
}
rd_cost->rate = rate2;
+#if CONFIG_SUPERTX
+ *returnrate_nocoef = rate2 - rate_y - rate_uv;
+ if (!disable_skip)
+ *returnrate_nocoef -= vp9_cost_bit(vp9_get_skip_prob(cm, xd),
+ this_skip2);
+ *returnrate_nocoef -= vp9_cost_bit(vp9_get_intra_inter_prob(cm, xd),
+ mbmi->ref_frame[0] != INTRA_FRAME);
+ assert(*returnrate_nocoef > 0);
+#endif
rd_cost->dist = distortion2;
rd_cost->rdcost = this_rd;
best_rd = this_rd;
if (best_rd >= best_rd_so_far) {
rd_cost->rate = INT_MAX;
rd_cost->rdcost = INT64_MAX;
+#if CONFIG_SUPERTX
+ *returnrate_nocoef = INT_MAX;
+#endif
return;
}
rd_cost->rate = INT_MAX;
rd_cost->dist = INT64_MAX;
rd_cost->rdcost = INT64_MAX;
+#if CONFIG_SUPERTX
+ *returnrate_nocoef = INT_MAX;
+#endif
return;
}
const struct TileInfo *const tile,
int mi_row, int mi_col,
struct RD_COST *rd_cost,
+#if CONFIG_SUPERTX
+ int *returnrate_nocoef,
+#endif
BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
int64_t best_rd_so_far);
const struct TileInfo *const tile,
int mi_row, int mi_col,
struct RD_COST *rd_cost,
+#if CONFIG_SUPERTX
+ int *returnrate_nocoef,
+#endif
BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
int64_t best_rd_so_far);
+
+#if CONFIG_SUPERTX
+void txfm_rd_in_plane_supertx(MACROBLOCK *x,
+ int *rate, int64_t *distortion,
+ int *skippable, int64_t *sse,
+ int64_t ref_best_rd, int plane,
+ BLOCK_SIZE bsize, TX_SIZE tx_size,
+ int use_fast_coef_casting);
+void txfm_rd_in_plane(MACROBLOCK *x,
+ int *rate, int64_t *distortion,
+ int *skippable, int64_t *sse,
+ int64_t ref_best_rd, int plane,
+ BLOCK_SIZE bsize, TX_SIZE tx_size,
+ int use_fast_coef_casting);
+#endif
#ifdef __cplusplus
} // extern "C"
#endif
*t = t_backup;
}
}
+
+#if CONFIG_SUPERTX
+void vp9_tokenize_sb_supertx(VP9_COMP *cpi, TOKENEXTRA **t, int dry_run,
+ BLOCK_SIZE bsize) {
+ VP9_COMMON *const cm = &cpi->common;
+ MACROBLOCKD *const xd = &cpi->mb.e_mbd;
+ MB_MODE_INFO *const mbmi = &xd->mi[0].mbmi;
+ TOKENEXTRA *t_backup = *t;
+ const int ctx = vp9_get_skip_context(xd);
+ const int skip_inc = !vp9_segfeature_active(&cm->seg, mbmi->segment_id,
+ SEG_LVL_SKIP);
+ struct tokenize_b_args arg = {cpi, xd, t};
+ int plane;
+ if (mbmi->skip) {
+ if (!dry_run)
+ cm->counts.skip[ctx][1] += skip_inc;
+ reset_skip_context(xd, bsize);
+ if (dry_run)
+ *t = t_backup;
+ return;
+ }
+
+ if (!dry_run) {
+ cm->counts.skip[ctx][0] += skip_inc;
+ for (plane = 0; plane < MAX_MB_PLANE; plane++) {
+ const BLOCK_SIZE plane_size =
+ get_plane_block_size(bsize, &xd->plane[plane]);
+ tokenize_b(plane, 0, plane_size, b_width_log2_lookup[plane_size], &arg);
+ }
+ } else {
+ for (plane = 0; plane < MAX_MB_PLANE; plane++) {
+ const BLOCK_SIZE plane_size =
+ get_plane_block_size(bsize, &xd->plane[plane]);
+ set_entropy_context_b(plane, 0, plane_size,
+ b_width_log2_lookup[plane_size], &arg);
+ }
+ *t = t_backup;
+ }
+}
+#endif // CONFIG_SUPERTX
void vp9_tokenize_sb(struct VP9_COMP *cpi, TOKENEXTRA **t, int dry_run,
BLOCK_SIZE bsize);
+#if CONFIG_SUPERTX
+void vp9_tokenize_sb_supertx(struct VP9_COMP *cpi, TOKENEXTRA **t, int dry_run,
+ BLOCK_SIZE bsize);
+#endif
extern const int16_t *vp9_dct_value_cost_ptr;
/* TODO: The Token field should be broken out into a separate char array to