mi_row, mi_col, xd->scale_factor, xd->scale_factor_uv);
if (!x->skip) {
- vp9_encode_inter16x16(cm, x, mi_row, mi_col);
+ vp9_build_inter_predictors_sb(xd, mi_row, mi_col, BLOCK_SIZE_MB16X16);
+ vp9_encode_sb(cm, x, BLOCK_SIZE_MB16X16);
} else {
vp9_build_inter_predictors_sb(xd, mi_row, mi_col, BLOCK_SIZE_MB16X16);
#if CONFIG_COMP_INTERINTRA_PRED
} else
#endif
if (!x->skip) {
- vp9_subtract_sb(x, bsize);
-
- switch (xd->mode_info_context->mbmi.txfm_size) {
- case TX_32X32:
- vp9_transform_sby_32x32(x, bsize);
- vp9_quantize_sby_32x32(x, bsize);
- if (bsize == BLOCK_SIZE_SB64X64) {
- vp9_transform_sbuv_32x32(x, bsize);
- vp9_quantize_sbuv_32x32(x, bsize);
- } else {
- vp9_transform_sbuv_16x16(x, bsize);
- vp9_quantize_sbuv_16x16(x, bsize);
- }
- if (x->optimize) {
- vp9_optimize_sby(cm, x, bsize);
- if (bsize == BLOCK_SIZE_SB64X64)
- vp9_optimize_sbuv(cm, x, bsize);
- else
- vp9_optimize_sbuv(cm, x, bsize);
- }
- vp9_inverse_transform_sby_32x32(xd, bsize);
- if (bsize == BLOCK_SIZE_SB64X64)
- vp9_inverse_transform_sbuv_32x32(xd, bsize);
- else
- vp9_inverse_transform_sbuv_16x16(xd, bsize);
- break;
- case TX_16X16:
- vp9_transform_sby_16x16(x, bsize);
- vp9_quantize_sby_16x16(x, bsize);
- if (bsize >= BLOCK_SIZE_SB32X32) {
- vp9_transform_sbuv_16x16(x, bsize);
- vp9_quantize_sbuv_16x16(x, bsize);
- } else {
- vp9_transform_sbuv_8x8(x, bsize);
- vp9_quantize_sbuv_8x8(x, bsize);
- }
- if (x->optimize) {
- vp9_optimize_sby(cm, x, bsize);
- if (bsize >= BLOCK_SIZE_SB32X32)
- vp9_optimize_sbuv(cm, x, bsize);
- else
- vp9_optimize_sbuv(cm, x, bsize);
- }
- vp9_inverse_transform_sby_16x16(xd, bsize);
- if (bsize >= BLOCK_SIZE_SB32X32)
- vp9_inverse_transform_sbuv_16x16(xd, bsize);
- else
- vp9_inverse_transform_sbuv_8x8(xd, bsize);
- break;
- case TX_8X8:
- vp9_transform_sby_8x8(x, bsize);
- vp9_quantize_sby_8x8(x, bsize);
- if (x->optimize)
- vp9_optimize_sby(cm, x, bsize);
- vp9_inverse_transform_sby_8x8(xd, bsize);
- if (bsize >= BLOCK_SIZE_MB16X16) {
- vp9_transform_sbuv_8x8(x, bsize);
- vp9_quantize_sbuv_8x8(x, bsize);
- if (x->optimize)
- vp9_optimize_sbuv(cm, x, bsize);
- vp9_inverse_transform_sbuv_8x8(xd, bsize);
- } else {
- vp9_transform_sbuv_4x4(x, bsize);
- vp9_quantize_sbuv_4x4(x, bsize);
- if (x->optimize)
- vp9_optimize_sbuv(cm, x, bsize);
- vp9_inverse_transform_sbuv_4x4(xd, bsize);
- }
- break;
- case TX_4X4:
- vp9_transform_sby_4x4(x, bsize);
- vp9_transform_sbuv_4x4(x, bsize);
- vp9_quantize_sby_4x4(x, bsize);
- vp9_quantize_sbuv_4x4(x, bsize);
- if (x->optimize) {
- vp9_optimize_sby(cm, x, bsize);
- vp9_optimize_sbuv(cm, x, bsize);
- }
- vp9_inverse_transform_sby_4x4(xd, bsize);
- vp9_inverse_transform_sbuv_4x4(xd, bsize);
- break;
- default: assert(0);
- }
- vp9_recon_sb_c(xd, bsize);
+ vp9_encode_sb(cm, x, bsize);
vp9_tokenize_sb(cpi, &x->e_mbd, t, !output_enabled, bsize);
} else {
// FIXME(rbultje): not tile-aware (mi - 1)
*a = *l = (final_eob > 0);
}
-struct optimize_ctx {
- ENTROPY_CONTEXT ta[MAX_MB_PLANE][16];
- ENTROPY_CONTEXT tl[MAX_MB_PLANE][16];
-};
-
struct optimize_block_args {
VP9_COMMON *cm;
MACROBLOCK *x;
struct optimize_ctx *ctx;
};
-static void optimize_block(int plane, int block, BLOCK_SIZE_TYPE bsize,
- int ss_txfrm_size, void *arg) {
- const struct optimize_block_args* const args = arg;
- MACROBLOCKD* const xd = &args->x->e_mbd;
+void vp9_optimize_b(int plane, int block, BLOCK_SIZE_TYPE bsize,
+ int ss_txfrm_size, VP9_COMMON *cm, MACROBLOCK *mb,
+ struct optimize_ctx *ctx) {
+ MACROBLOCKD* const xd = &mb->e_mbd;
int x, y;
// find current entropy context
txfrm_block_to_raster_xy(xd, bsize, plane, block, ss_txfrm_size, &x, &y);
- optimize_b(args->cm, args->x, plane, block, bsize,
- &args->ctx->ta[plane][x], &args->ctx->tl[plane][y],
+ optimize_b(cm, mb, plane, block, bsize,
+ &ctx->ta[plane][x], &ctx->tl[plane][y],
ss_txfrm_size / 2);
}
+static void optimize_block(int plane, int block, BLOCK_SIZE_TYPE bsize,
+ int ss_txfrm_size, void *arg) {
+ const struct optimize_block_args* const args = arg;
+ vp9_optimize_b(plane, block, bsize, ss_txfrm_size, args->cm, args->x,
+ args->ctx);
+}
+
void vp9_optimize_init(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize,
struct optimize_ctx *ctx) {
int p;
foreach_transformed_block_uv(&x->e_mbd, bsize, optimize_block, &arg);
}
-#if !CONFIG_SB8X8
-void vp9_fidct_mb(VP9_COMMON *const cm, MACROBLOCK *x) {
- MACROBLOCKD *const xd = &x->e_mbd;
- const TX_SIZE tx_size = xd->mode_info_context->mbmi.txfm_size;
-
- if (tx_size == TX_16X16) {
- vp9_transform_sby_16x16(x, BLOCK_SIZE_MB16X16);
- vp9_transform_sbuv_8x8(x, BLOCK_SIZE_MB16X16);
- vp9_quantize_sby_16x16(x, BLOCK_SIZE_MB16X16);
- vp9_quantize_sbuv_8x8(x, BLOCK_SIZE_MB16X16);
- if (x->optimize) {
- vp9_optimize_sby(cm, x, BLOCK_SIZE_MB16X16);
- vp9_optimize_sbuv(cm, x, BLOCK_SIZE_MB16X16);
- }
- vp9_inverse_transform_sby_16x16(xd, BLOCK_SIZE_MB16X16);
- vp9_inverse_transform_sbuv_8x8(xd, BLOCK_SIZE_MB16X16);
- } else if (tx_size == TX_8X8) {
- vp9_transform_sby_8x8(x, BLOCK_SIZE_MB16X16);
- vp9_quantize_sby_8x8(x, BLOCK_SIZE_MB16X16);
- if (x->optimize)
- vp9_optimize_sby(cm, x, BLOCK_SIZE_MB16X16);
- vp9_inverse_transform_sby_8x8(xd, BLOCK_SIZE_MB16X16);
- if (xd->mode_info_context->mbmi.mode == SPLITMV) {
- assert(xd->mode_info_context->mbmi.partitioning != PARTITIONING_4X4);
- vp9_transform_sbuv_4x4(x, BLOCK_SIZE_MB16X16);
- vp9_quantize_sbuv_4x4(x, BLOCK_SIZE_MB16X16);
- if (x->optimize)
- vp9_optimize_sbuv(cm, x, BLOCK_SIZE_MB16X16);
- vp9_inverse_transform_sbuv_4x4(xd, BLOCK_SIZE_MB16X16);
- } else {
- vp9_transform_sbuv_8x8(x, BLOCK_SIZE_MB16X16);
- vp9_quantize_sbuv_8x8(x, BLOCK_SIZE_MB16X16);
- if (x->optimize)
- vp9_optimize_sbuv(cm, x, BLOCK_SIZE_MB16X16);
- vp9_inverse_transform_sbuv_8x8(xd, BLOCK_SIZE_MB16X16);
- }
- } else {
- vp9_transform_sby_4x4(x, BLOCK_SIZE_MB16X16);
- vp9_transform_sbuv_4x4(x, BLOCK_SIZE_MB16X16);
- vp9_quantize_sby_4x4(x, BLOCK_SIZE_MB16X16);
- vp9_quantize_sbuv_4x4(x, BLOCK_SIZE_MB16X16);
- if (x->optimize) {
- vp9_optimize_sby(cm, x, BLOCK_SIZE_MB16X16);
- vp9_optimize_sbuv(cm, x, BLOCK_SIZE_MB16X16);
- }
- vp9_inverse_transform_sby_4x4(xd, BLOCK_SIZE_MB16X16);
- vp9_inverse_transform_sbuv_4x4(xd, BLOCK_SIZE_MB16X16);
+struct encode_b_args {
+ VP9_COMMON *cm;
+ MACROBLOCK *x;
+ struct optimize_ctx *ctx;
+};
+
+static void encode_block(int plane, int block, BLOCK_SIZE_TYPE bsize,
+ int ss_txfrm_size, void *arg) {
+ struct encode_b_args* const args = arg;
+ MACROBLOCK* const x = args->x;
+ MACROBLOCKD* const xd = &x->e_mbd;
+ const int bw = 4 << (b_width_log2(bsize) - xd->plane[plane].subsampling_x);
+ const int raster_block = txfrm_block_to_raster_block(xd, bsize, plane,
+ block, ss_txfrm_size);
+ int16_t* const src_diff = raster_block_offset_int16(xd, bsize, plane,
+ raster_block,
+ x->plane[plane].src_diff);
+ int16_t* const diff = raster_block_offset_int16(xd, bsize, plane,
+ raster_block,
+ xd->plane[plane].diff);
+ TX_TYPE tx_type = DCT_DCT;
+
+ switch (ss_txfrm_size / 2) {
+ case TX_32X32:
+ vp9_short_fdct32x32(src_diff,
+ BLOCK_OFFSET(x->plane[plane].coeff, block, 16),
+ bw * 2);
+ break;
+ case TX_16X16:
+ tx_type = plane == 0 ? get_tx_type_16x16(xd, raster_block) : DCT_DCT;
+ if (tx_type != DCT_DCT) {
+ vp9_short_fht16x16(src_diff,
+ BLOCK_OFFSET(x->plane[plane].coeff, block, 16),
+ bw, tx_type);
+ } else {
+ x->fwd_txm16x16(src_diff,
+ BLOCK_OFFSET(x->plane[plane].coeff, block, 16),
+ bw * 2);
+ }
+ break;
+ case TX_8X8:
+ tx_type = plane == 0 ? get_tx_type_8x8(xd, raster_block) : DCT_DCT;
+ if (tx_type != DCT_DCT) {
+ vp9_short_fht8x8(src_diff,
+ BLOCK_OFFSET(x->plane[plane].coeff, block, 16),
+ bw, tx_type);
+ } else {
+ x->fwd_txm8x8(src_diff,
+ BLOCK_OFFSET(x->plane[plane].coeff, block, 16),
+ bw * 2);
+ }
+ break;
+ case TX_4X4:
+ tx_type = plane == 0 ? get_tx_type_4x4(xd, raster_block) : DCT_DCT;
+ if (tx_type != DCT_DCT) {
+ vp9_short_fht4x4(src_diff,
+ BLOCK_OFFSET(x->plane[plane].coeff, block, 16),
+ bw, tx_type);
+ } else {
+ x->fwd_txm4x4(src_diff,
+ BLOCK_OFFSET(x->plane[plane].coeff, block, 16),
+ bw * 2);
+ }
+ break;
+ default:
+ assert(0);
}
-}
-void vp9_encode_inter16x16(VP9_COMMON *const cm, MACROBLOCK *x,
- int mi_row, int mi_col) {
- MACROBLOCKD *const xd = &x->e_mbd;
+ vp9_quantize(x, plane, block, 16 << ss_txfrm_size, tx_type);
+ if (x->optimize)
+ vp9_optimize_b(plane, block, bsize, ss_txfrm_size, args->cm, x, args->ctx);
- vp9_build_inter_predictors_sb(xd, mi_row, mi_col, BLOCK_SIZE_MB16X16);
- vp9_subtract_sb(x, BLOCK_SIZE_MB16X16);
- vp9_fidct_mb(cm, x);
- vp9_recon_sb(xd, BLOCK_SIZE_MB16X16);
+ switch (ss_txfrm_size / 2) {
+ case TX_32X32:
+ vp9_short_idct32x32(BLOCK_OFFSET(xd->plane[plane].dqcoeff, block, 16),
+ diff, bw * 2);
+ break;
+ case TX_16X16:
+ if (tx_type == DCT_DCT) {
+ vp9_short_idct16x16(BLOCK_OFFSET(xd->plane[plane].dqcoeff, block, 16),
+ diff, bw * 2);
+ } else {
+ vp9_short_iht16x16(BLOCK_OFFSET(xd->plane[plane].dqcoeff, block, 16),
+ diff, bw, tx_type);
+ }
+ break;
+ case TX_8X8:
+ if (tx_type == DCT_DCT) {
+ vp9_short_idct8x8(BLOCK_OFFSET(xd->plane[plane].dqcoeff, block, 16),
+ diff, bw * 2);
+ } else {
+ vp9_short_iht8x8(BLOCK_OFFSET(xd->plane[plane].dqcoeff, block, 16),
+ diff, bw, tx_type);
+ }
+ break;
+ case TX_4X4:
+ if (tx_type == DCT_DCT) {
+ // this is like vp9_short_idct4x4 but has a special case around eob<=1
+ // which is significant (not just an optimization) for the lossless
+ // case.
+ vp9_inverse_transform_b_4x4(xd, xd->plane[plane].eobs[block],
+ BLOCK_OFFSET(xd->plane[plane].dqcoeff, block, 16), diff, bw * 2);
+ } else {
+ vp9_short_iht4x4(BLOCK_OFFSET(xd->plane[plane].dqcoeff, block, 16),
+ diff, bw, tx_type);
+ }
+ break;
+ }
}
-#endif
-/* this function is used by first pass only */
-void vp9_encode_inter16x16y(MACROBLOCK *x, int mi_row, int mi_col) {
- MACROBLOCKD *xd = &x->e_mbd;
+void vp9_encode_sb(VP9_COMMON *const cm, MACROBLOCK *x,
+ BLOCK_SIZE_TYPE bsize) {
+ MACROBLOCKD* const xd = &x->e_mbd;
+ struct optimize_ctx ctx;
+ struct encode_b_args arg = {cm, x, &ctx};
- vp9_build_inter_predictors_sby(xd, mi_row, mi_col, BLOCK_SIZE_MB16X16);
- vp9_subtract_sby(x, BLOCK_SIZE_MB16X16);
+ vp9_subtract_sb(x, bsize);
+ if (x->optimize)
+ vp9_optimize_init(xd, bsize, &ctx);
- vp9_transform_sby_4x4(x, BLOCK_SIZE_MB16X16);
- vp9_quantize_sby_4x4(x, BLOCK_SIZE_MB16X16);
- vp9_inverse_transform_sby_4x4(xd, BLOCK_SIZE_MB16X16);
+ foreach_transformed_block(xd, bsize, encode_block, &arg);
- vp9_recon_sby(xd, BLOCK_SIZE_MB16X16);
+ vp9_recon_sb(xd, bsize);
}