const int set = get_ext_tx_set(tx_size, bs, is_inter);
return is_inter ? num_ext_tx_set_inter[set] : num_ext_tx_set_intra[set];
}
+
+#if CONFIG_RECT_TX
+static INLINE int is_rect_tx_allowed_bsize(BLOCK_SIZE bsize) {
+ static const char LUT[BLOCK_SIZES] = {
+ 0, // BLOCK_4X4
+ 1, // BLOCK_4X8
+ 1, // BLOCK_8X4
+ 0, // BLOCK_8X8
+ 1, // BLOCK_8X16
+ 1, // BLOCK_16X8
+ 0, // BLOCK_16X16
+ 1, // BLOCK_16X32
+ 1, // BLOCK_32X16
+ 0, // BLOCK_32X32
+ 0, // BLOCK_32X64
+ 0, // BLOCK_64X32
+ 0, // BLOCK_64X64
+#if CONFIG_EXT_PARTITION
+ 0, // BLOCK_64X128
+ 0, // BLOCK_128X64
+ 0, // BLOCK_128X128
+#endif // CONFIG_EXT_PARTITION
+ };
+
+ return LUT[bsize];
+}
+
+static INLINE int is_rect_tx_allowed(const MB_MODE_INFO *mbmi) {
+ return is_inter_block(mbmi) && is_rect_tx_allowed_bsize(mbmi->sb_type);
+}
+
+static INLINE int is_rect_tx(TX_SIZE tx_size) { return tx_size >= TX_SIZES; }
+#endif // CONFIG_RECT_TX
#endif // CONFIG_EXT_TX
+static INLINE TX_SIZE tx_size_from_tx_mode(BLOCK_SIZE bsize, TX_MODE tx_mode,
+ int is_inter) {
+ const TX_SIZE largest_tx_size = tx_mode_to_biggest_tx_size[tx_mode];
+ const TX_SIZE max_tx_size = max_txsize_lookup[bsize];
+
+#if CONFIG_EXT_TX && CONFIG_RECT_TX
+ if (!is_inter) {
+ return VPXMIN(max_tx_size, largest_tx_size);
+ } else {
+ const TX_SIZE max_rect_tx_size = max_txsize_rect_lookup[bsize];
+ if (txsize_sqr_up_map[max_rect_tx_size] <= largest_tx_size) {
+ return max_rect_tx_size;
+ } else {
+ return largest_tx_size;
+ }
+ }
+#else
+ (void)is_inter;
+ return VPXMIN(max_tx_size, largest_tx_size);
+#endif // CONFIG_EXT_TX && CONFIG_RECT_TX
+}
+
#if CONFIG_EXT_INTRA
#define ALLOW_FILTER_INTRA_MODES 1
#define ANGLE_STEP 3
// 4X8, 8X4, 8X8
TX_4X8, TX_8X4, TX_8X8,
// 8X16, 16X8, 16X16
- TX_8X8, TX_8X8, TX_16X16,
+ TX_8X16, TX_16X8, TX_16X16,
// 16X32, 32X16, 32X32
- TX_16X16, TX_16X16, TX_32X32,
+ TX_16X32, TX_32X16, TX_32X32,
// 32X64, 64X32, 64X64
TX_32X32, TX_32X32, TX_32X32,
#if CONFIG_EXT_PARTITION
#endif // CONFIG_EXT_PARTITION
};
#endif // CONFIG_EXT_TX
+
+// Same as "max_txsize_lookup[bsize] - TX_8X8", invalid for bsize < 8X8
+static const int32_t intra_tx_size_cat_lookup[BLOCK_SIZES] = {
+ // 4X4
+ INT32_MIN,
+ // 4X8, 8X4, 8X8
+ INT32_MIN, INT32_MIN, TX_8X8 - TX_8X8,
+ // 8X16, 16X8, 16X16
+ TX_8X8 - TX_8X8, TX_8X8 - TX_8X8, TX_16X16 - TX_8X8,
+ // 16X32, 32X16, 32X32
+ TX_16X16 - TX_8X8, TX_16X16 - TX_8X8, TX_32X32 - TX_8X8,
+ // 32X64, 64X32, 64X64
+ TX_32X32 - TX_8X8, TX_32X32 - TX_8X8, TX_32X32 - TX_8X8,
+#if CONFIG_EXT_PARTITION
+ // 64x128, 128x64, 128x128
+ TX_32X32 - TX_8X8, TX_32X32 - TX_8X8, TX_32X32 - TX_8X8,
+#endif // CONFIG_EXT_PARTITION
+};
+
+#if CONFIG_EXT_TX && CONFIG_RECT_TX
+// Same as "max_txsize_lookup[bsize] - TX_8X8", except for rectangular
+// block which may use a rectangular transform, in which case it is
+// "(max_txsize_lookup[bsize] + 1) - TX_8X8", invalid for bsize < 8X8
+static const int32_t inter_tx_size_cat_lookup[BLOCK_SIZES] = {
+ // 4X4
+ INT32_MIN,
+ // 4X8, 8X4, 8X8
+ INT32_MIN, INT32_MIN, TX_8X8 - TX_8X8,
+ // 8X16, 16X8, 16X16
+ TX_16X16 - TX_8X8, TX_16X16 - TX_8X8, TX_16X16 - TX_8X8,
+ // 16X32, 32X16, 32X32
+ TX_32X32 - TX_8X8, TX_32X32 - TX_8X8, TX_32X32 - TX_8X8,
+ // 32X64, 64X32, 64X64
+ TX_32X32 - TX_8X8, TX_32X32 - TX_8X8, TX_32X32 - TX_8X8,
+#if CONFIG_EXT_PARTITION
+ // 64x128, 128x64, 128x128
+ TX_32X32 - TX_8X8, TX_32X32 - TX_8X8, TX_32X32 - TX_8X8,
+#endif // CONFIG_EXT_PARTITION
+};
+#else
+#define inter_tx_size_cat_lookup intra_tx_size_cat_lookup
+#endif // CONFIG_EXT_TX && CONFIG_RECT_TX
+
/* clang-format on */
static const TX_SIZE txsize_horz_map[TX_SIZES_ALL] = {
#else
unsigned int comp_ref[REF_CONTEXTS][COMP_REFS - 1][2];
#endif // CONFIG_EXT_REFS
+ // TODO(any): tx_size_totals is only used by the encoder to decide whether
+ // to use forward updates for the coeff probs, and as such it does not really
+ // belong into this structure.
unsigned int tx_size_totals[TX_SIZES];
unsigned int tx_size[TX_SIZES - 1][TX_SIZE_CONTEXTS][TX_SIZES];
#if CONFIG_VAR_TX
(void)tx_type;
#if CONFIG_VP9_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- return tx_size == TX_32X32;
+ return txsize_sqr_up_map[tx_size] == TX_32X32;
}
#else
(void)xd;
#endif
- return tx_size == TX_32X32;
+ return txsize_sqr_up_map[tx_size] == TX_32X32;
}
#if CONFIG_EXT_TX
#endif
static TX_SIZE read_selected_tx_size(VP10_COMMON *cm, MACROBLOCKD *xd,
- TX_SIZE max_tx_size, vp10_reader *r) {
+ int tx_size_cat, vp10_reader *r) {
FRAME_COUNTS *counts = xd->counts;
const int ctx = get_tx_size_context(xd);
- const int tx_size_cat = max_tx_size - TX_8X8;
int tx_size = vp10_read_tree(r, vp10_tx_size_tree[tx_size_cat],
cm->fc->tx_size_probs[tx_size_cat][ctx]);
if (counts) ++counts->tx_size[tx_size_cat][ctx][tx_size];
BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
if (xd->lossless[xd->mi[0]->mbmi.segment_id]) return TX_4X4;
if (bsize >= BLOCK_8X8) {
- const TX_SIZE max_tx_size = max_txsize_lookup[bsize];
if (tx_mode == TX_MODE_SELECT) {
- return read_selected_tx_size(cm, xd, max_tx_size, r);
+ const TX_SIZE tx_size =
+ read_selected_tx_size(cm, xd, intra_tx_size_cat_lookup[bsize], r);
+ assert(tx_size <= max_txsize_lookup[bsize]);
+ return tx_size;
} else {
- return VPXMIN(max_tx_size, tx_mode_to_biggest_tx_size[tx_mode]);
+ return tx_size_from_tx_mode(bsize, cm->tx_mode, 0);
}
} else {
return TX_4X4;
BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
if (xd->lossless[xd->mi[0]->mbmi.segment_id]) return TX_4X4;
if (bsize >= BLOCK_8X8) {
- const TX_SIZE max_tx_size = max_txsize_lookup[bsize];
if (allow_select && tx_mode == TX_MODE_SELECT) {
- return read_selected_tx_size(cm, xd, max_tx_size, r);
+ const TX_SIZE coded_tx_size =
+ read_selected_tx_size(cm, xd, inter_tx_size_cat_lookup[bsize], r);
+#if !CONFIG_RECT_TX
+ assert(coded_tx_size <= max_txsize_lookup[bsize]);
+#else
+ if (coded_tx_size > max_txsize_lookup[bsize]) {
+ assert(coded_tx_size == max_txsize_lookup[bsize] + 1);
+ return max_txsize_rect_lookup[bsize];
+ }
+#endif // !CONFIG_RECT_TX
+ return coded_tx_size;
} else {
- TX_SIZE tx_size =
- VPXMIN(max_tx_size, tx_mode_to_biggest_tx_size[tx_mode]);
-#if CONFIG_EXT_TX && CONFIG_RECT_TX
- if (txsize_sqr_map[max_txsize_rect_lookup[bsize]] <= tx_size)
- tx_size = max_txsize_rect_lookup[bsize];
-#endif // CONFIG_EXT_TX && CONFIG_RECT_TX
- return tx_size;
+ return tx_size_from_tx_mode(bsize, cm->tx_mode, 1);
}
} else {
#if CONFIG_EXT_TX && CONFIG_RECT_TX
if (inter_block) {
if (eset > 0) {
- mbmi->tx_type =
- vp10_read_tree(r, vp10_ext_tx_inter_tree[eset],
- cm->fc->inter_ext_tx_prob[eset][mbmi->tx_size]);
+ mbmi->tx_type = vp10_read_tree(
+ r, vp10_ext_tx_inter_tree[eset],
+ cm->fc->inter_ext_tx_prob[eset][txsize_sqr_map[mbmi->tx_size]]);
if (counts)
- ++counts->inter_ext_tx[eset][mbmi->tx_size][mbmi->tx_type];
+ ++counts->inter_ext_tx[eset][txsize_sqr_map[mbmi->tx_size]]
+ [mbmi->tx_type];
}
} else if (ALLOW_INTRA_EXT_TX) {
if (eset > 0) {
static void write_selected_tx_size(const VP10_COMMON *cm, const MACROBLOCKD *xd,
vp10_writer *w) {
- TX_SIZE tx_size = xd->mi[0]->mbmi.tx_size;
- BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
- const TX_SIZE max_tx_size = max_txsize_lookup[bsize];
+ const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
+ const BLOCK_SIZE bsize = mbmi->sb_type;
// For sub8x8 blocks the tx_size symbol does not need to be sent
if (bsize >= BLOCK_8X8) {
- vp10_write_token(
- w, vp10_tx_size_tree[max_tx_size - TX_8X8],
- cm->fc->tx_size_probs[max_tx_size - TX_8X8][get_tx_size_context(xd)],
- &tx_size_encodings[max_tx_size - TX_8X8][tx_size]);
+ const TX_SIZE tx_size = mbmi->tx_size;
+ const int is_inter = is_inter_block(mbmi);
+ const int tx_size_ctx = get_tx_size_context(xd);
+ const int tx_size_cat = is_inter ? inter_tx_size_cat_lookup[bsize]
+ : intra_tx_size_cat_lookup[bsize];
+ const TX_SIZE coded_tx_size = txsize_sqr_up_map[tx_size];
+
+#if CONFIG_EXT_TX && CONFIG_RECT_TX
+ assert(IMPLIES(is_rect_tx(tx_size), is_rect_tx_allowed(mbmi)));
+ assert(
+ IMPLIES(is_rect_tx(tx_size), tx_size == max_txsize_rect_lookup[bsize]));
+#endif // CONFIG_EXT_TX && CONFIG_RECT_TX
+
+ vp10_write_token(w, vp10_tx_size_tree[tx_size_cat],
+ cm->fc->tx_size_probs[tx_size_cat][tx_size_ctx],
+ &tx_size_encodings[tx_size_cat][coded_tx_size]);
}
}
!segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
int eset = get_ext_tx_set(mbmi->tx_size, bsize, is_inter);
if (is_inter) {
+ assert(ext_tx_used_inter[eset][mbmi->tx_type]);
if (eset > 0)
- vp10_write_token(w, vp10_ext_tx_inter_tree[eset],
- cm->fc->inter_ext_tx_prob[eset][mbmi->tx_size],
- &ext_tx_inter_encodings[eset][mbmi->tx_type]);
+ vp10_write_token(
+ w, vp10_ext_tx_inter_tree[eset],
+ cm->fc->inter_ext_tx_prob[eset][txsize_sqr_map[mbmi->tx_size]],
+ &ext_tx_inter_encodings[eset][mbmi->tx_type]);
} else if (ALLOW_INTRA_EXT_TX) {
if (eset > 0)
vp10_write_token(
for (i = 0; i < n2; ++i) {
for (j = 0; j < n; ++j) temp_in[j] = out[j + i * n];
ht.rows(temp_in, temp_out);
- for (j = 0; j < n; ++j) output[j + i * n] = (temp_out[j] + 1) >> 1;
+ for (j = 0; j < n; ++j) output[j + i * n] = (temp_out[j] + 1) >> 2;
}
// Note: overall scale factor of transform is 8 times unitary
}
for (i = 0; i < n; ++i) {
for (j = 0; j < n2; ++j) temp_in[j] = out[j + i * n2];
ht.rows(temp_in, temp_out);
- for (j = 0; j < n2; ++j) output[j + i * n2] = (temp_out[j] + 1) >> 1;
+ for (j = 0; j < n2; ++j) output[j + i * n2] = (temp_out[j] + 1) >> 2;
}
// Note: overall scale factor of transform is 8 times unitary
}
if (output_enabled) {
if (cm->tx_mode == TX_MODE_SELECT && mbmi->sb_type >= BLOCK_8X8 &&
!(is_inter_block(mbmi) && (mbmi->skip || seg_skip))) {
- const int ctx = get_tx_size_context(xd);
- const int tx_size_cat = max_txsize_lookup[bsize] - TX_8X8;
+ const int is_inter = is_inter_block(mbmi);
+ const int tx_size_ctx = get_tx_size_context(xd);
+ const int tx_size_cat = is_inter ? inter_tx_size_cat_lookup[bsize]
+ : intra_tx_size_cat_lookup[bsize];
+ const TX_SIZE coded_tx_size = txsize_sqr_up_map[mbmi->tx_size];
+#if CONFIG_EXT_TX && CONFIG_RECT_TX
+ assert(IMPLIES(is_rect_tx(mbmi->tx_size), is_rect_tx_allowed(mbmi)));
+#endif // CONFIG_EXT_TX && CONFIG_RECT_TX
#if CONFIG_VAR_TX
- if (is_inter_block(mbmi))
+ if (is_inter)
tx_partition_count_update(cm, xd, bsize, mi_row, mi_col, td->counts);
#endif
- ++td->counts->tx_size[tx_size_cat][ctx][txsize_sqr_up_map[mbmi->tx_size]];
+ ++td->counts->tx_size[tx_size_cat][tx_size_ctx][coded_tx_size];
} else {
int x, y;
TX_SIZE tx_size;
// The new intra coding scheme requires no change of transform size
if (is_inter_block(&mi->mbmi)) {
- tx_size = VPXMIN(tx_mode_to_biggest_tx_size[cm->tx_mode],
- max_txsize_lookup[bsize]);
-#if CONFIG_EXT_TX && CONFIG_RECT_TX
- if (txsize_sqr_map[max_txsize_rect_lookup[bsize]] <= tx_size)
- tx_size = max_txsize_rect_lookup[bsize];
-#endif // CONFIG_EXT_TX && CONFIG_RECT_TX
- if (xd->lossless[mbmi->segment_id]) tx_size = TX_4X4;
+ if (xd->lossless[mbmi->segment_id]) {
+ tx_size = TX_4X4;
+ } else {
+ tx_size = tx_size_from_tx_mode(bsize, cm->tx_mode, 1);
+ }
#if CONFIG_EXT_TX && CONFIG_RECT_TX
++td->counts->tx_size_implied[max_txsize_lookup[bsize]]
[txsize_sqr_up_map[mbmi->tx_size]];
int eset = get_ext_tx_set(mbmi->tx_size, bsize, is_inter_block(mbmi));
if (eset > 0) {
if (is_inter_block(mbmi)) {
- ++td->counts->inter_ext_tx[eset][mbmi->tx_size][mbmi->tx_type];
+ ++td->counts->inter_ext_tx[eset][txsize_sqr_map[mbmi->tx_size]]
+ [mbmi->tx_type];
} else {
++td->counts
->intra_ext_tx[eset][mbmi->tx_size][mbmi->mode][mbmi->tx_type];
else if (cpi->refresh_alt_ref_frame)
cm->frame_context_idx = ARF_FRAME;
#else
- if (cpi->refresh_alt_ref_frame)
- cm->frame_context_idx = ARF_FRAME;
+ if (cpi->refresh_alt_ref_frame) cm->frame_context_idx = ARF_FRAME;
#endif
else if (cpi->rc.is_src_frame_alt_ref)
cm->frame_context_idx = OVERLAY_FRAME;
int64_t rd = INT64_MAX;
vpx_prob skip_prob = vp10_get_skip_prob(cm, xd);
int s0, s1;
- const TX_SIZE max_tx_size = max_txsize_lookup[bs];
- const int tx_select = cm->tx_mode == TX_MODE_SELECT;
const int is_inter = is_inter_block(mbmi);
+ const int tx_size_ctx = get_tx_size_context(xd);
+ const int tx_size_cat =
+ is_inter ? inter_tx_size_cat_lookup[bs] : intra_tx_size_cat_lookup[bs];
+ const TX_SIZE coded_tx_size = txsize_sqr_up_map[tx_size];
+ const int tx_select = cm->tx_mode == TX_MODE_SELECT;
const int r_tx_size =
- cpi->tx_size_cost[max_tx_size - TX_8X8][get_tx_size_context(xd)][tx_size];
-#if CONFIG_EXT_TX
- int ext_tx_set;
-#endif // CONFIG_EXT_TX
+ cpi->tx_size_cost[tx_size_cat][tx_size_ctx][coded_tx_size];
assert(skip_prob > 0);
+#if CONFIG_EXT_TX && CONFIG_RECT_TX
+ assert(IMPLIES(is_rect_tx(tx_size), is_rect_tx_allowed_bsize(bs)));
+#endif // CONFIG_EXT_TX && CONFIG_RECT_TX
s0 = vp10_cost_bit(skip_prob, 0);
s1 = vp10_cost_bit(skip_prob, 1);
cpi->sf.use_fast_coef_costing);
if (*r == INT_MAX) return INT64_MAX;
#if CONFIG_EXT_TX
- ext_tx_set = get_ext_tx_set(tx_size, bs, is_inter);
if (get_ext_tx_types(tx_size, bs, is_inter) > 1 &&
!xd->lossless[xd->mi[0]->mbmi.segment_id]) {
+ const int ext_tx_set = get_ext_tx_set(tx_size, bs, is_inter);
if (is_inter) {
if (ext_tx_set > 0)
- *r +=
- cpi->inter_tx_type_costs[ext_tx_set][mbmi->tx_size][mbmi->tx_type];
+ *r += cpi->inter_tx_type_costs
+ [ext_tx_set][txsize_sqr_map[mbmi->tx_size]][mbmi->tx_type];
} else {
if (ext_tx_set > 0 && ALLOW_INTRA_EXT_TX)
*r += cpi->intra_tx_type_costs[ext_tx_set][mbmi->tx_size][mbmi->mode]
[mbmi->tx_type];
}
}
-
#else
if (tx_size < TX_32X32 && !xd->lossless[xd->mi[0]->mbmi.segment_id] &&
!FIXED_TX_TYPE) {
int start_tx, end_tx;
int64_t best_rd = INT64_MAX, last_rd = INT64_MAX;
const TX_SIZE max_tx_size = max_txsize_lookup[bs];
- TX_SIZE best_tx = max_tx_size;
+ TX_SIZE best_tx_size = max_tx_size;
const int tx_select = cm->tx_mode == TX_MODE_SELECT;
const int is_inter = is_inter_block(mbmi);
#if CONFIG_EXT_TX
+#if CONFIG_RECT_TX
+ int evaulate_rect_tx = 0;
+#endif // CONFIG_RECT_TX
int ext_tx_set;
#endif // CONFIG_EXT_TX
if (tx_select) {
+#if CONFIG_EXT_TX && CONFIG_RECT_TX
+ evaulate_rect_tx = is_rect_tx_allowed(mbmi);
+#endif // CONFIG_EXT_TX && CONFIG_RECT_TX
start_tx = max_tx_size;
end_tx = 0;
} else {
const TX_SIZE chosen_tx_size =
- VPXMIN(max_tx_size, tx_mode_to_biggest_tx_size[cm->tx_mode]);
+ tx_size_from_tx_mode(bs, cm->tx_mode, is_inter);
+#if CONFIG_EXT_TX && CONFIG_RECT_TX
+ evaulate_rect_tx = is_rect_tx(chosen_tx_size);
+ assert(IMPLIES(evaulate_rect_tx, is_rect_tx_allowed(mbmi)));
+#endif // CONFIG_EXT_TX && CONFIG_RECT_TX
start_tx = chosen_tx_size;
end_tx = chosen_tx_size;
}
*psse = INT64_MAX;
mbmi->tx_type = tx_type;
+
+#if CONFIG_EXT_TX && CONFIG_RECT_TX
+ if (evaulate_rect_tx) {
+ const TX_SIZE rect_tx_size = max_txsize_rect_lookup[bs];
+ const int ext_tx_set = get_ext_tx_set(rect_tx_size, bs, 1);
+ if (ext_tx_used_inter[ext_tx_set][tx_type]) {
+ rd = txfm_yrd(cpi, x, &r, &d, &s, &sse, ref_best_rd, bs, tx_type,
+ rect_tx_size);
+ best_tx_size = rect_tx_size;
+ best_rd = rd;
+ *distortion = d;
+ *rate = r;
+ *skip = s;
+ *psse = sse;
+ }
+ }
+#endif // CONFIG_EXT_TX && CONFIG_RECT_TX
+
last_rd = INT64_MAX;
for (n = start_tx; n >= end_tx; --n) {
+#if CONFIG_EXT_TX && CONFIG_RECT_TX
+ if (is_rect_tx(n)) break;
+#endif // CONFIG_EXT_TX && CONFIG_RECT_TX
if (FIXED_TX_TYPE && tx_type != get_default_tx_type(0, xd, 0, n)) continue;
if (!is_inter && x->use_default_intra_tx_type &&
tx_type != get_default_tx_type(0, xd, 0, n))
last_rd = rd;
if (rd < best_rd) {
- best_tx = n;
+ best_tx_size = n;
best_rd = rd;
*distortion = d;
*rate = r;
*psse = sse;
}
}
- mbmi->tx_size = best_tx;
+ mbmi->tx_size = best_tx_size;
return best_rd;
}
static void choose_largest_tx_size(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
int64_t *distortion, int *skip, int64_t *sse,
int64_t ref_best_rd, BLOCK_SIZE bs) {
- const TX_SIZE max_tx_size = max_txsize_lookup[bs];
VP10_COMMON *const cm = &cpi->common;
- const TX_SIZE largest_tx_size = tx_mode_to_biggest_tx_size[cm->tx_mode];
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
TX_TYPE tx_type, best_tx_type = DCT_DCT;
int ext_tx_set;
#endif // CONFIG_EXT_TX
- mbmi->tx_size = VPXMIN(max_tx_size, largest_tx_size);
+ mbmi->tx_size = tx_size_from_tx_mode(bs, cm->tx_mode, is_inter);
#if CONFIG_EXT_TX
ext_tx_set = get_ext_tx_set(mbmi->tx_size, bs, is_inter);
tx_size == max_txsize_rect_lookup[mi->mbmi.sb_type]));
#else
assert(tx_size == TX_4X4);
-#endif
+#endif // CONFIG_EXT_TX && CONFIG_RECT_TX
assert(tx_type == DCT_DCT);
vp10_build_inter_predictor_sub8x8(xd, 0, i, ir, ic, mi_row, mi_col);
xd->lossless[mbmi->segment_id] ? TX_4X4 : max_txsize_rect_lookup[bsize];
#else
mbmi->tx_size = TX_4X4;
-#endif
+#endif // CONFIG_EXT_TX && CONFIG_RECT_TX
vp10_zero(*bsi);
*mbmi = best_mbmode;
#if CONFIG_VAR_TX && CONFIG_EXT_TX && CONFIG_RECT_TX
mbmi->inter_tx_size[0][0] = mbmi->tx_size;
-#endif
+#endif // CONFIG_EXT_TX && CONFIG_RECT_TX
x->skip |= best_skip2;
if (!is_inter_block(&best_mbmode)) {