Some cleanups on the transform size and type selection logic.
Change-Id: If2e9675459482242cf83b4f7de7634505e3f6dac
break;
}
}
+
+static TX_TYPE get_tx_type(MACROBLOCKD *xd, BLOCKD *b) {
+ TX_TYPE tx_type = DCT_DCT;
+#if CONFIG_HYBRIDTRANSFORM16X16
+ if (xd->mode_info_context->mbmi.txfm_size == TX_16X16) {
+ if (xd->mode_info_context->mbmi.mode < I8X8_PRED &&
+ xd->q_index < ACTIVE_HT16)
+ tx_type = b->bmi.as_mode.tx_type;
+ return tx_type;
+ }
+#endif
+#if CONFIG_HYBRIDTRANSFORM8X8
+ if (xd->mode_info_context->mbmi.txfm_size == TX_8X8) {
+ if (xd->mode_info_context->mbmi.mode == I8X8_PRED)
+ tx_type = b->bmi.as_mode.tx_type;
+ return tx_type;
+ }
+#endif
+#if CONFIG_HYBRIDTRANSFORM
+ if (xd->mode_info_context->mbmi.txfm_size == TX_4X4) {
+ if (xd->mode_info_context->mbmi.mode == B_PRED &&
+ xd->q_index < ACTIVE_HT)
+ tx_type = b->bmi.as_mode.tx_type;
+ return tx_type;
+ }
+#endif
+}
#endif
extern void vp8_build_block_doffsets(MACROBLOCKD *xd);
int i;
int is_4x4;
is_4x4 = (xd->mode_info_context->mbmi.mode == SPLITMV) ||
- (xd->mode_info_context->mbmi.mode == I8X8_PRED) ||
- (xd->mode_info_context->mbmi.mode == B_PRED);
+ (xd->mode_info_context->mbmi.mode == I8X8_PRED) ||
+ (xd->mode_info_context->mbmi.mode == B_PRED);
if (is_4x4) {
for (i = 0; i < 16; i++) {
#include <stdio.h>
-#ifdef DEC_DEBUG
-int dec_debug = 0;
-#endif
-
#define COEFCOUNT_TESTING
static int merge_index(int v, int n, int modulus) {
}
#endif
}
-#ifdef DEC_DEBUG
- if (dec_debug) {
- int i, j;
- printf("Generating predictors\n");
- for (i = 0; i < 16; i++) {
- for (j = 0; j < 16; j++) printf("%3d ", xd->dst.y_buffer[i * xd->dst.y_stride + j]);
- printf("\n");
- }
- }
-#endif
-
}
extern const int vp8_i8x8_block[4];
xd->mode_info_context->mbmi.mode == NEARMV ||
xd->mode_info_context->mbmi.mode == NEARESTMV)
xd->mode_info_context->mbmi.txfm_size = TX_16X16;
- else if (pbi->common.txfm_mode == ALLOW_8X8 &&
- xd->mode_info_context->mbmi.mode != I8X8_PRED &&
- xd->mode_info_context->mbmi.mode != B_PRED)
-#else
- if (pbi->common.txfm_mode == ALLOW_8X8 &&
- xd->mode_info_context->mbmi.mode != I8X8_PRED &&
- xd->mode_info_context->mbmi.mode != B_PRED)
+ else
#endif
+ if (pbi->common.txfm_mode == ALLOW_8X8 &&
+ xd->mode_info_context->mbmi.mode != B_PRED)
xd->mode_info_context->mbmi.txfm_size = TX_8X8;
else
xd->mode_info_context->mbmi.txfm_size = TX_4X4;
xd->mode_info_context->mbmi.mode == NEWMV ||
xd->mode_info_context->mbmi.mode == ZEROMV ||
xd->mode_info_context->mbmi.mode == NEARMV ||
- xd->mode_info_context->mbmi.mode == NEARESTMV) {
+ xd->mode_info_context->mbmi.mode == NEARESTMV)
xd->mode_info_context->mbmi.txfm_size = TX_16X16;
- } else if (pbi->common.txfm_mode == ALLOW_8X8 &&
- xd->mode_info_context->mbmi.mode != I8X8_PRED &&
- xd->mode_info_context->mbmi.mode != B_PRED &&
- xd->mode_info_context->mbmi.mode != SPLITMV) {
-#else
- if (pbi->common.txfm_mode == ALLOW_8X8 &&
- xd->mode_info_context->mbmi.mode != I8X8_PRED &&
- xd->mode_info_context->mbmi.mode != B_PRED &&
- xd->mode_info_context->mbmi.mode != SPLITMV) {
+ else
#endif
+ if (pbi->common.txfm_mode == ALLOW_8X8 &&
+ xd->mode_info_context->mbmi.mode != B_PRED &&
+ xd->mode_info_context->mbmi.mode != SPLITMV)
xd->mode_info_context->mbmi.txfm_size = TX_8X8;
- }
- else {
+ else
xd->mode_info_context->mbmi.txfm_size = TX_4X4;
- }
}
-#if CONFIG_HYBRIDTRANSFORM8X8
- if (xd->mode_info_context->mbmi.mode == I8X8_PRED) {
- xd->mode_info_context->mbmi.txfm_size = TX_8X8;
- }
-#endif
#if CONFIG_SUPERBLOCKS
if (xd->mode_info_context->mbmi.encoded_as_sb) {
xd->mode_info_context->mbmi.txfm_size = TX_8X8;
#endif
tx_type = xd->mode_info_context->mbmi.txfm_size;
+ mode = xd->mode_info_context->mbmi.mode;
+
+#if CONFIG_HYBRIDTRANSFORM
+ // parse transform types for intra 4x4 mode
+ QIndex = xd->q_index;
+ active_ht = (QIndex < ACTIVE_HT);
+ if (mode == B_PRED) {
+ for (i = 0; i < 16; i++) {
+ BLOCKD *b = &xd->block[i];
+ int b_mode = xd->mode_info_context->bmi[i].as_mode.first;
+ if(active_ht)
+ txfm_map(b, b_mode);
+ } // loop over 4x4 blocks
+ }
+#endif
+
+#if CONFIG_HYBRIDTRANSFORM8X8
+ if (mode == I8X8_PRED) {
+ for (i = 0; i < 4; i++) {
+ int ib = vp8_i8x8_block[i];
+ BLOCKD *b = &xd->block[ib];
+ int i8x8mode = b->bmi.as_mode.first;
+ txfm_map(b, pred_mode_conv(i8x8mode));
+ }
+ }
+#endif
+
+#if CONFIG_HYBRIDTRANSFORM16X16
+ active_ht16 = (QIndex < ACTIVE_HT16);
+ if (mode < I8X8_PRED) {
+ BLOCKD *b = &xd->block[0];
+ if(active_ht16)
+ txfm_map(b, pred_mode_conv(mode));
+ }
+#endif
if (xd->mode_info_context->mbmi.mb_skip_coeff) {
vp8_reset_mb_tokens_context(xd);
eobtotal = vp8_decode_mb_tokens_8x8(pbi, xd);
else
eobtotal = vp8_decode_mb_tokens(pbi, xd);
-#ifdef DEC_DEBUG
- if (dec_debug) {
- printf("\nTokens (%d)\n", eobtotal);
- for (i = 0; i < 400; i++) {
- printf("%3d ", xd->qcoeff[i]);
- if (i % 16 == 15) printf("\n");
- }
- printf("\n");
- }
-#endif
}
- mode = xd->mode_info_context->mbmi.mode;
+ //mode = xd->mode_info_context->mbmi.mode;
#if CONFIG_SWITCHABLE_INTERP
if (pbi->common.frame_type != KEY_FRAME)
vp8_setup_interp_filters(xd, xd->mode_info_context->mbmi.interp_filter,
}
}
-#ifdef DEC_DEBUG
- if (dec_debug) {
- int i, j;
- printf("Generating predictors\n");
- for (i = 0; i < 16; i++) {
- for (j = 0; j < 16; j++) printf("%3d ", xd->dst.y_buffer[i * xd->dst.y_stride + j]);
- printf("\n");
- }
- }
-#endif
-
// moved to be performed before detokenization
// if (xd->segmentation_enabled)
// mb_init_dequantizer(pbi, xd);
-#if CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16
- // parse transform types for intra 4x4 mode
- QIndex = xd->q_index;
- active_ht = (QIndex < ACTIVE_HT);
- if (mode == B_PRED) {
- for (i = 0; i < 16; i++) {
- BLOCKD *b = &xd->block[i];
- int b_mode = xd->mode_info_context->bmi[i].as_mode.first;
- if(active_ht)
- txfm_map(b, b_mode);
- } // loop over 4x4 blocks
- }
-#endif
-
-#if CONFIG_HYBRIDTRANSFORM16X16
- active_ht16 = (QIndex < ACTIVE_HT16);
-#endif
-
/* do prediction */
if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) {
#if CONFIG_SUPERBLOCKS
unsigned char *pre = xd->block[ib].predictor;
unsigned char *dst = *(xd->block[ib].base_dst) + xd->block[ib].dst;
int stride = xd->dst.y_stride;
-
- tx_type = TX_4X4;
- xd->mode_info_context->mbmi.txfm_size = TX_4X4;
#endif
b = &xd->block[ib];
(b, i8x8mode, b->predictor);
#if CONFIG_HYBRIDTRANSFORM8X8
- txfm_map(b, pred_mode_conv(i8x8mode));
vp8_ht_dequant_idct_add_8x8_c(b->bmi.as_mode.tx_type,
q, dq, pre, dst, 16, stride);
q += 64;
continue; // only happens for SBs, which are already in dest buffer
#endif
DEQUANT_INVOKE(&pbi->dequant, block_2x2)(b);
-#ifdef DEC_DEBUG
- if (dec_debug) {
- int j;
- printf("DQcoeff Haar\n");
- for (j = 0; j < 16; j++) {
- printf("%d ", b->dqcoeff[j]);
- }
- printf("\n");
- }
-#endif
IDCT_INVOKE(RTCD_VTABLE(idct), ihaar2)(&b->dqcoeff[0], b->diff, 8);
((int *)b->qcoeff)[0] = 0;// 2nd order block are set to 0 after inverse transform
((int *)b->qcoeff)[1] = 0;
#if CONFIG_SUPERBLOCKS
if (!xd->mode_info_context->mbmi.encoded_as_sb) {
#endif
- if (tx_type == TX_8X8
+ if ((tx_type == TX_8X8 &&
+ xd->mode_info_context->mbmi.mode != I8X8_PRED)
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
|| tx_type == TX_16X16
#endif
continue;
}
-#ifdef DEC_DEBUG
- dec_debug = (pc->current_video_frame == 0 && mb_row == 0 && mb_col == 0);
-#endif
-
// Set above context pointer
xd->above_context = pc->above_context + mb_col;
xd->left_context = pc->left_context + (i >> 1);
else
#endif
if (cpi->common.txfm_mode == ALLOW_8X8
- && mbmi->mode != I8X8_PRED
&& mbmi->mode != B_PRED) {
mbmi->txfm_size = TX_8X8;
cpi->t8x8_count++;
} else
#endif
if (cpi->common.txfm_mode == ALLOW_8X8
- && mbmi->mode != I8X8_PRED
&& mbmi->mode != B_PRED
&& mbmi->mode != SPLITMV) {
mbmi->txfm_size = TX_8X8;
/* test code: set transform size based on mode selection */
if (cpi->common.txfm_mode == ALLOW_8X8
- && x->e_mbd.mode_info_context->mbmi.mode != I8X8_PRED
&& x->e_mbd.mode_info_context->mbmi.mode != B_PRED
&& x->e_mbd.mode_info_context->mbmi.mode != SPLITMV) {
x->e_mbd.mode_info_context->mbmi.txfm_size = TX_8X8;
xd->above_context = ta + x_idx;
xd->left_context = tl + y_idx;
r += vp8_rdcost_mby_8x8(x, 0);
- skippable = skippable && mby_is_skippable_8x8(xd);
+ skippable = skippable && mby_is_skippable_8x8(xd, 1);
}
*distortion = (d >> 2);
#if CONFIG_TX16X16
skip = mby_is_skippable_16x16(xd);
#else
- skip = mby_is_skippable_8x8(xd);
+ skip = mby_is_skippable_8x8(xd, 1);
#endif
mode_selected = mode;
#if CONFIG_COMP_INTRA_PRED
#if CONFIG_TX16X16
mb_skippable = mb_is_skippable_16x16(&x->e_mbd);
#else
- mb_skippable = mb_is_skippable_8x8(&x->e_mbd);
+ mb_skippable = mb_is_skippable_8x8(&x->e_mbd, has_y2);
#endif
} else {
#if CONFIG_TX16X16
& mby_is_skippable_16x16(&x->e_mbd);
#else
mb_skippable = uv_intra_skippable_8x8
- & mby_is_skippable_8x8(&x->e_mbd);
+ & mby_is_skippable_8x8(&x->e_mbd, has_y2);
#endif
}
} else {
MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run);
void vp8_stuff_mb_8x8(VP8_COMP *cpi,
MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run);
+void vp8_stuff_mb_8x8_4x4uv(VP8_COMP *cpi,
+ MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run);
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
void vp8_stuff_mb_16x16(VP8_COMP *cpi, MACROBLOCKD *xd,
TOKENEXTRA **t, int dry_run);
mbuv_is_skippable(xd));
}
-int mby_is_skippable_8x8(MACROBLOCKD *xd) {
+int mby_is_skippable_8x8(MACROBLOCKD *xd, int has_y2_block) {
int skip = 1;
int i = 0;
- for (i = 0; i < 16; i += 4)
- skip &= (xd->block[i].eob < 2);
- skip &= (!xd->block[24].eob);
+ if (has_y2_block) {
+ for (i = 0; i < 16; i += 4)
+ skip &= (xd->block[i].eob < 2);
+ skip &= (!xd->block[24].eob);
+ } else {
+ for (i = 0; i < 16; i += 4)
+ skip &= (!xd->block[i].eob);
+ }
return skip;
}
return (!xd->block[16].eob) & (!xd->block[20].eob);
}
-int mb_is_skippable_8x8(MACROBLOCKD *xd) {
- return (mby_is_skippable_8x8(xd) & mbuv_is_skippable_8x8(xd));
+int mb_is_skippable_8x8(MACROBLOCKD *xd, int has_y2_block) {
+ return (mby_is_skippable_8x8(xd, has_y2_block) &
+ mbuv_is_skippable_8x8(xd));
+}
+
+int mb_is_skippable_8x8_4x4uv(MACROBLOCKD *xd, int has_y2_block) {
+ return (mby_is_skippable_8x8(xd, has_y2_block) &
+ mbuv_is_skippable(xd));
}
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
break;
#endif
case TX_8X8:
- xd->mode_info_context->mbmi.mb_skip_coeff = mb_is_skippable_8x8(xd);
+#if CONFIG_HYBRIDTRANSFORM8X8
+ if (xd->mode_info_context->mbmi.mode == I8X8_PRED)
+ xd->mode_info_context->mbmi.mb_skip_coeff = mb_is_skippable_8x8_4x4uv(xd, 0);
+ else
+#endif
+ xd->mode_info_context->mbmi.mb_skip_coeff = mb_is_skippable_8x8(xd, has_y2_block);
break;
+
default:
xd->mode_info_context->mbmi.mb_skip_coeff = mb_is_skippable(xd, has_y2_block);
break;
vp8_stuff_mb_16x16(cpi, xd, t, dry_run);
else
#endif
- if (tx_type == TX_8X8)
- vp8_stuff_mb_8x8(cpi, xd, t, dry_run);
- else
+ if (tx_type == TX_8X8) {
+#if CONFIG_HYBRIDTRANSFORM8X8
+ if (xd->mode_info_context->mbmi.mode == I8X8_PRED)
+ vp8_stuff_mb_8x8_4x4uv(cpi, xd, t, dry_run);
+ else
+#endif
+ vp8_stuff_mb_8x8(cpi, xd, t, dry_run);
+ } else
vp8_stuff_mb(cpi, xd, t, dry_run);
} else {
vp8_fix_contexts(xd);
if (tx_type == TX_8X8) {
ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)xd->above_context;
ENTROPY_CONTEXT *L = (ENTROPY_CONTEXT *)xd->left_context;
+#if CONFIG_HYBRIDTRANSFORM8X8
+ if (xd->mode_info_context->mbmi.mode == I8X8_PRED) {
+ plane_type = PLANE_TYPE_Y_WITH_DC;
+ }
+#endif
for (b = 0; b < 16; b += 4) {
tokenize1st_order_b_8x8(xd,
xd->block + b, t, plane_type, xd->frame_type,
*(A + vp8_block2above_8x8[b] + 1) = *(A + vp8_block2above_8x8[b]);
*(L + vp8_block2left_8x8[b] + 1) = *(L + vp8_block2left_8x8[b]);
}
- for (b = 16; b < 24; b += 4) {
- tokenize1st_order_b_8x8(xd,
- xd->block + b, t, 2, xd->frame_type,
- A + vp8_block2above_8x8[b],
- L + vp8_block2left_8x8[b],
- cpi, dry_run);
- *(A + vp8_block2above_8x8[b] + 1) = *(A + vp8_block2above_8x8[b]);
- *(L + vp8_block2left_8x8[b] + 1) = *(L + vp8_block2left_8x8[b]);
+#if CONFIG_HYBRIDTRANSFORM8X8
+ if (xd->mode_info_context->mbmi.mode == I8X8_PRED) {
+ tokenize1st_order_chroma(xd, t, PLANE_TYPE_UV, cpi, dry_run);
+ } else
+#endif
+ {
+ for (b = 16; b < 24; b += 4) {
+ tokenize1st_order_b_8x8(xd,
+ xd->block + b, t, 2, xd->frame_type,
+ A + vp8_block2above_8x8[b],
+ L + vp8_block2left_8x8[b],
+ cpi, dry_run);
+ *(A + vp8_block2above_8x8[b] + 1) = *(A + vp8_block2above_8x8[b]);
+ *(L + vp8_block2left_8x8[b] + 1) = *(L + vp8_block2left_8x8[b]);
+ }
}
} else {
#if CONFIG_HYBRIDTRANSFORM
- if(active_ht) {
+ if (active_ht)
tokenize1st_order_ht(xd, t, plane_type, cpi, dry_run);
- } else {
-
-#if CONFIG_HYBRIDTRANSFORM8X8
- if (xd->mode_info_context->mbmi.mode == I8X8_PRED) {
- ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)xd->above_context;
- ENTROPY_CONTEXT *L = (ENTROPY_CONTEXT *)xd->left_context;
- for (b = 0; b < 16; b += 4) {
- tokenize1st_order_b_8x8(xd,
- xd->block + b, t, PLANE_TYPE_Y_WITH_DC,
- xd->frame_type,
- A + vp8_block2above_8x8[b],
- L + vp8_block2left_8x8[b],
- cpi, dry_run);
- *(A + vp8_block2above_8x8[b] + 1) = *(A + vp8_block2above_8x8[b]);
- *(L + vp8_block2left_8x8[b] + 1) = *(L + vp8_block2left_8x8[b]);
- }
- tokenize1st_order_chroma(xd, t, PLANE_TYPE_UV, cpi, dry_run);
- } else {
- tokenize1st_order_b(xd, t, plane_type, cpi, dry_run);
- }
-#else
- tokenize1st_order_b(xd, t, plane_type, cpi, dry_run);
-#endif
-
- }
-#else
- tokenize1st_order_b(xd, t, plane_type, cpi, dry_run);
+ else
#endif
+ tokenize1st_order_b(xd, t, plane_type, cpi, dry_run);
}
+
if (dry_run)
*t = t_backup;
}
*t = t_backup;
}
-
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
static __inline
void stuff1st_order_b_16x16(const BLOCKD *const b,
A + vp8_block2above[24],
L + vp8_block2left[24],
cpi, dry_run);
- plane_type = 0;
for (b = 0; b < 16; b++)
stuff1st_order_b(t,
if (dry_run)
*t = t_backup;
}
+
+void vp8_stuff_mb_8x8_4x4uv(VP8_COMP *cpi,
+ MACROBLOCKD *xd,
+ TOKENEXTRA **t,
+ int dry_run) {
+ ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)xd->above_context;
+ ENTROPY_CONTEXT *L = (ENTROPY_CONTEXT *)xd->left_context;
+ int plane_type;
+ int b;
+ TOKENEXTRA *t_backup = *t;
+
+ stuff2nd_order_b_8x8(xd->block + 24, t, 1, xd->frame_type,
+ A + vp8_block2above_8x8[24],
+ L + vp8_block2left_8x8[24], cpi, dry_run);
+ plane_type = 3;
+
+ for (b = 0; b < 16; b += 4) {
+ stuff1st_order_b_8x8(xd->block + b, t, plane_type, xd->frame_type,
+ A + vp8_block2above_8x8[b],
+ L + vp8_block2left_8x8[b],
+ cpi, dry_run);
+ *(A + vp8_block2above_8x8[b] + 1) = *(A + vp8_block2above_8x8[b]);
+ *(L + vp8_block2left_8x8[b] + 1) = *(L + vp8_block2left_8x8[b]);
+ }
+
+ for (b = 16; b < 24; b++)
+ stuff1st_order_buv(t,
+ A + vp8_block2above[b],
+ L + vp8_block2left[b],
+ cpi, dry_run);
+
+ if (dry_run)
+ *t = t_backup;
+}
+
void vp8_fix_contexts(MACROBLOCKD *xd) {
/* Clear entropy contexts for Y2 blocks */
if ((xd->mode_info_context->mbmi.mode != B_PRED
extern int mby_is_skippable(MACROBLOCKD *xd, int has_y2_block);
extern int mbuv_is_skippable(MACROBLOCKD *xd);
extern int mb_is_skippable(MACROBLOCKD *xd, int has_y2_block);
-extern int mby_is_skippable_8x8(MACROBLOCKD *xd);
+extern int mby_is_skippable_8x8(MACROBLOCKD *xd, int has_y2_block);
extern int mbuv_is_skippable_8x8(MACROBLOCKD *xd);
-extern int mb_is_skippable_8x8(MACROBLOCKD *xd);
+extern int mb_is_skippable_8x8(MACROBLOCKD *xd, int has_y2_block);
+extern int mb_is_skippable_8x8_4x4uv(MACROBLOCKD *xd, int has_y2_block);
extern int mb_is_skippable_16x16(MACROBLOCKD *xd);
extern int mby_is_skippable_16x16(MACROBLOCKD *xd);