vp8_default_bmode_probs(oci->fc.bmode_prob);
-#if CONFIG_T8X8
oci->txfm_mode = ONLY_4X4;
-#endif
oci->mb_no_coeff_skip = 1;
oci->comp_pred_mode = HYBRID_PREDICTION;
oci->no_lpf = 0;
0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 4, 5, 6, 7, 6, 7, 8
};
-#if CONFIG_T8X8
const unsigned char vp8_block2left_8x8[25] =
{
0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4, 4, 6, 6, 6, 6, 8
{
0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0, 2, 2, 2, 2, 4, 4, 4, 4, 6, 6, 6, 6, 8
};
-#endif
-
MB_PREDICTION_MODE second_mode, second_uv_mode;
#endif
MV_REFERENCE_FRAME ref_frame, second_ref_frame;
-#if CONFIG_T8X8
TX_SIZE txfm_size;
-#endif
int_mv mv, second_mv;
unsigned char partitioning;
unsigned char mb_skip_coeff; /* does this mb has coefficients at all, 1=no coefficients, 0=need decode tokens */
},
},
};
-#if CONFIG_T8X8
const vp8_prob vp8_coef_update_probs_8x8 [BLOCK_TYPES]
[COEF_BANDS]
[PREV_COEF_CONTEXTS]
},
};
-#endif
\ No newline at end of file
9, 12, 13, 10,
7, 11, 14, 15,
};
-#if CONFIG_T8X8
DECLARE_ALIGNED(64, cuchar, vp8_coef_bands_8x8[64]) = { 0, 1, 2, 3, 5, 4, 4, 5,
5, 3, 6, 3, 5, 4, 6, 6,
6, 5, 5, 6, 6, 6, 6, 6,
35, 42, 49, 56, 57, 50, 43, 36, 29, 22, 15, 23, 30, 37, 44, 51,
58, 59, 52, 45, 38, 31, 39, 46, 53, 60, 61, 54, 47, 55, 62, 63,
};
-#endif
DECLARE_ALIGNED(16, const short, vp8_default_inv_zig_zag[16]) =
{
};
DECLARE_ALIGNED(16, short, vp8_default_zig_zag_mask[16]);
-#if CONFIG_T8X8
DECLARE_ALIGNED(64, short, vp8_default_zig_zag_mask_8x8[64]);//int64_t
-#endif
/* Array indices are identical to previously-existing CONTEXT_NODE indices */
{
vp8_default_zig_zag_mask[vp8_default_zig_zag1d[i]] = 1 << i;
}
-#if CONFIG_T8X8
for (i = 0; i < 64; i++)
{
vp8_default_zig_zag_mask_8x8[vp8_default_zig_zag1d_8x8[i]] = 1 << i;
}
-#endif
}
static void init_bit_tree(vp8_tree_index *p, int n)
void vp8_default_coef_probs(VP8_COMMON *pc)
{
-#if CONFIG_T8X8
int h;
-#endif
vpx_memcpy(pc->fc.coef_probs, default_coef_probs,
sizeof(default_coef_probs));
-#if CONFIG_T8X8
h = 0;
do
{
while (++i < COEF_BANDS);
}
while (++h < BLOCK_TYPES);
-#endif
+
}
void vp8_coef_tree_initialize()
#define COEF_BANDS 8
extern DECLARE_ALIGNED(16, const unsigned char, vp8_coef_bands[16]);
-#if CONFIG_T8X8
extern DECLARE_ALIGNED(64, const unsigned char, vp8_coef_bands_8x8[64]);
-#endif
/* Inside dimension is 3-valued measure of nearby complexity, that is,
the extent to which nearby coefficients are nonzero. For the first
extern DECLARE_ALIGNED(16, const unsigned char, vp8_prev_token_class[MAX_ENTROPY_TOKENS]);
extern const vp8_prob vp8_coef_update_probs [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
-#if CONFIG_T8X8
extern const vp8_prob vp8_coef_update_probs_8x8 [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
-#endif
struct VP8Common;
void vp8_default_coef_probs(struct VP8Common *);
extern DECLARE_ALIGNED(16, const int, vp8_default_zig_zag1d[16]);
extern DECLARE_ALIGNED(16, const short, vp8_default_inv_zig_zag[16]);
extern short vp8_default_zig_zag_mask[16];
-#if CONFIG_T8X8
extern DECLARE_ALIGNED(64, const int, vp8_default_zig_zag1d_8x8[64]);
extern short vp8_default_zig_zag_mask_8x8[64];//int64_t
-#endif
void vp8_coef_tree_initialize(void);
#endif
rtcd->idct.idct1_scalar_add = vp8_dc_only_idct_add_c;
rtcd->idct.iwalsh1 = vp8_short_inv_walsh4x4_1_c;
rtcd->idct.iwalsh16 = vp8_short_inv_walsh4x4_c;
-#if CONFIG_T8X8
rtcd->idct.idct8 = vp8_short_idct8x8_c;
rtcd->idct.idct1_scalar_add_8x8 = vp8_dc_only_idct_add_8x8_c;
rtcd->idct.ihaar2 = vp8_short_ihaar2x2_c;
-#endif
rtcd->recon.copy16x16 = vp8_copy_mem16x16_c;
rtcd->recon.copy8x8 = vp8_copy_mem8x8_c;
rtcd->recon.avg16x16 = vp8_avg_mem16x16_c;
#include "arm/idct_arm.h"
#endif
-#if CONFIG_T8X8
+
#ifndef vp8_idct_idct8
#define vp8_idct_idct8 vp8_short_idct8x8_c
#endif
#endif
extern prototype_idct_scalar_add(vp8_idct_idct1_scalar_add_8x8);
-#endif
+
#ifndef vp8_idct_idct1
#define vp8_idct_idct1 vp8_short_idct4x4llm_1_c
vp8_second_order_fn_t iwalsh1;
vp8_second_order_fn_t iwalsh16;
-#if CONFIG_T8X8
vp8_idct_fn_t idct8;
vp8_idct_fn_t idct8_1;
vp8_idct_scalar_add_fn_t idct1_scalar_add_8x8;
vp8_idct_fn_t ihaar2;
vp8_idct_fn_t ihaar2_1;
-#endif
} vp8_idct_rtcd_vtable_t;
#if CONFIG_RUNTIME_CPU_DETECT
}
}
-#if CONFIG_T8X8
+
void vp8_dc_only_idct_add_8x8_c(short input_dc,
unsigned char *pred_ptr,
unsigned char *dst_ptr,
op[8] = (ip[0] - ip[1] - ip[4] + ip[8])>>1;
}
-#endif
}
}
-#if CONFIG_T8X8
static void recon_dcblock_8x8(MACROBLOCKD *x)
{
BLOCKD *b = &x->block[24]; //for coeff 0, 2, 8, 10
x->block[12].dqcoeff[0] = b->diff[8];
}
-#endif
+
void vp8_inverse_transform_b(const vp8_idct_rtcd_vtable_t *rtcd, BLOCKD *b, int pitch)
{
}
-#if CONFIG_T8X8
+
void vp8_inverse_transform_b_8x8(const vp8_idct_rtcd_vtable_t *rtcd, short *input_dqcoeff, short *output_coeff, int pitch)//pay attention to use when 8x8
{
// int b,i;
}
}
-#endif
+
extern void vp8_inverse_transform_mby(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x);
extern void vp8_inverse_transform_mbuv(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x);
-#if CONFIG_T8X8
extern void vp8_inverse_transform_b_8x8(const vp8_idct_rtcd_vtable_t *rtcd, short *input_dqcoeff, short *output_coeff, int pitch);
extern void vp8_inverse_transform_mb_8x8(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x);
extern void vp8_inverse_transform_mby_8x8(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x);
extern void vp8_inverse_transform_mbuv_8x8(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x);
-#endif
#endif
vp8_loop_filter_horizontal_edge_c(v_ptr + 4 * uv_stride, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, 1);
}
-#if CONFIG_T8X8
void vp8_loop_filter_bh8x8_c(unsigned char *y_ptr, unsigned char *u_ptr,
unsigned char *v_ptr, int y_stride, int uv_stride,
loop_filter_info *lfi)
vp8_mbloop_filter_horizontal_edge_c(
y_ptr + 8 * y_stride, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
}
-#endif
void vp8_loop_filter_bhs_c(unsigned char *y_ptr, int y_stride,
const unsigned char *blimit)
vp8_loop_filter_vertical_edge_c(v_ptr + 4, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, 1);
}
-#if CONFIG_T8X8
void vp8_loop_filter_bv8x8_c(unsigned char *y_ptr, unsigned char *u_ptr,
unsigned char *v_ptr, int y_stride, int uv_stride,
loop_filter_info *lfi)
y_ptr + 8, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
}
-#endif
-
void vp8_loop_filter_bvs_c(unsigned char *y_ptr, int y_stride,
const unsigned char *blimit)
{
const int mode_index = lfi_n->mode_lf_lut[mode_info_context->mbmi.mode];
const int seg = mode_info_context->mbmi.segment_id;
const int ref_frame = mode_info_context->mbmi.ref_frame;
-#if CONFIG_T8X8
int tx_type = mode_info_context->mbmi.txfm_size;
-#endif
filter_level = lfi_n->lvl[seg][ref_frame][mode_index];
if (filter_level)
if (!skip_lf)
{
-#if CONFIG_T8X8
if(tx_type == TX_8X8)
vp8_loop_filter_bv8x8_c
(y_ptr, u_ptr, v_ptr, post->y_stride, post->uv_stride, &lfi);
else
-#endif
LF_INVOKE(&cm->rtcd.loopfilter, normal_b_v)
(y_ptr, u_ptr, v_ptr, post->y_stride, post->uv_stride, &lfi);
if (!skip_lf)
{
-#if CONFIG_T8X8
if(tx_type == TX_8X8)
vp8_loop_filter_bh8x8_c
(y_ptr, u_ptr, v_ptr, post->y_stride, post->uv_stride, &lfi);
else
-#endif
LF_INVOKE(&cm->rtcd.loopfilter, normal_b_h)
(y_ptr, u_ptr, v_ptr, post->y_stride, post->uv_stride, &lfi);
}
const int mode_index = lfi_n->mode_lf_lut[mode_info_context->mbmi.mode];
const int seg = mode_info_context->mbmi.segment_id;
const int ref_frame = mode_info_context->mbmi.ref_frame;
-#if CONFIG_T8X8
int tx_type = mode_info_context->mbmi.txfm_size;
-#endif
filter_level = lfi_n->lvl[seg][ref_frame][mode_index];
if (!skip_lf)
{
-#if CONFIG_T8X8
if(tx_type == TX_8X8)
vp8_loop_filter_bv8x8_c
(y_ptr, 0, 0, post->y_stride, 0, &lfi);
else
-#endif
LF_INVOKE(&cm->rtcd.loopfilter, normal_b_v)
(y_ptr, 0, 0, post->y_stride, 0, &lfi);
}
if (!skip_lf)
{
-#if CONFIG_T8X8
if(tx_type == TX_8X8)
vp8_loop_filter_bh8x8_c
(y_ptr, 0, 0, post->y_stride, 0, &lfi);
else
-#endif
LF_INVOKE(&cm->rtcd.loopfilter, normal_b_h)
(y_ptr, 0, 0, post->y_stride, 0, &lfi);
}
#endif
vp8_prob sub_mv_ref_prob [VP8_SUBMVREFS-1];
vp8_prob coef_probs [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
-#if CONFIG_T8X8
vp8_prob coef_probs_8x8 [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
-#endif
MV_CONTEXT mvc[2];
MV_CONTEXT pre_mvc[2]; /* not to caculate the mvcost for the frame if mvc doesn't change. */
#if CONFIG_HIGH_PRECISION_MV
NB_PREDICTION_TYPES = 3,
} COMPPREDMODE_TYPE;
-#if CONFIG_T8X8
/* TODO: allows larger transform */
typedef enum
{
ONLY_4X4 = 0,
ALLOW_8X8 = 1
} TXFM_MODE;
-#endif /* CONFIG_T8X8 */
typedef struct VP8_COMMON_RTCD
{
/* profile settings */
int experimental;
int mb_no_coeff_skip;
-#if CONFIG_T8X8
TXFM_MODE txfm_mode;
-#endif
COMPPREDMODE_TYPE comp_pred_mode;
int no_lpf;
int use_bilinear_mc_filter;
MB_PREDICTION_MODE mode;
int i;
-#if CONFIG_T8X8
int tx_type;
if( pbi->common.txfm_mode==ONLY_4X4 )
{
}
tx_type = xd->mode_info_context->mbmi.txfm_size;
-#endif
if (xd->mode_info_context->mbmi.mb_skip_coeff)
{
}
else if (!vp8dx_bool_error(xd->current_bc))
{
-
-#if CONFIG_T8X8
for(i = 0; i < 25; i++)
{
xd->block[i].eob = 0;
xd->eobs[i] = 0;
}
if ( tx_type == TX_8X8 )
- {
eobtotal = vp8_decode_mb_tokens_8x8(pbi, xd);
- }
else
-#endif
eobtotal = vp8_decode_mb_tokens(pbi, xd);
#ifdef DEC_DEBUG
if (dec_debug) {
{
BLOCKD *b = &xd->block[24];
-#if CONFIG_T8X8
+
if( tx_type == TX_8X8 )
{
DEQUANT_INVOKE(&pbi->dequant, block_2x2)(b);
(xd->qcoeff, xd->block[0].dequant,
xd->predictor, xd->dst.y_buffer,
xd->dst.y_stride, xd->eobs, xd->block[24].diff, xd);
-
}
-
else
-#endif
{
DEQUANT_INVOKE(&pbi->dequant, block)(b);
if (xd->eobs[24] > 1)
xd->dst.y_stride, xd->eobs, xd->block[24].diff);
}
}
-#if CONFIG_T8X8
+
if( tx_type == TX_8X8 )
- {
DEQUANT_INVOKE (&pbi->dequant, idct_add_uv_block_8x8)//
(xd->qcoeff+16*16, xd->block[16].dequant,
xd->predictor+16*16, xd->dst.u_buffer, xd->dst.v_buffer,
xd->dst.uv_stride, xd->eobs+16, xd);//
-
- }
- else
-#endif
- if(xd->mode_info_context->mbmi.mode!=I8X8_PRED)
+ else if(xd->mode_info_context->mbmi.mode!=I8X8_PRED)
DEQUANT_INVOKE (&pbi->dequant, idct_add_uv_block)
(xd->qcoeff+16*16, xd->block[16].dequant,
xd->predictor+16*16, xd->dst.u_buffer, xd->dst.v_buffer,
}
/* Read the loop filter level and type */
-#if CONFIG_T8X8
pc->txfm_mode = (TXFM_MODE) vp8_read_bit(bc);
-#endif
pc->filter_type = (LOOPFILTERTYPE) vp8_read_bit(bc);
pc->filter_level = vp8_read_literal(bc, 6);
}
}
}
-#if CONFIG_T8X8
+
if(pbi->common.txfm_mode == ALLOW_8X8 && vp8_read_bit(bc))
{
// read coef probability tree
}
}
}
-#endif
+
vpx_memcpy(&xd->pre, &pc->yv12_fb[pc->lst_fb_idx], sizeof(YV12_BUFFER_CONFIG));
vpx_memcpy(&xd->dst, &pc->yv12_fb[pc->new_fb_idx], sizeof(YV12_BUFFER_CONFIG));
extern void vp8_short_idct4x4llm_c(short *input, short *output, int pitch) ;
extern void vp8_short_idct4x4llm_1_c(short *input, short *output, int pitch);
-#if CONFIG_T8X8
extern void vp8_short_idct8x8_c(short *input, short *output, int pitch);
extern void vp8_short_idct8x8_1_c(short *input, short *output, int pitch);
-#endif
#ifdef DEC_DEBUG
extern int dec_debug;
}
}
-#if CONFIG_T8X8
void vp8_dequantize_b_2x2_c(BLOCKD *d)
{
int i;
#endif
}
-#endif
\ No newline at end of file
unsigned char *pre, unsigned char *dst_u, \
unsigned char *dst_v, int stride, char *eobs)
-#if CONFIG_T8X8
#define prototype_dequant_dc_idct_add_y_block_8x8(sym) \
void sym(short *q, short *dq, \
unsigned char *pre, unsigned char *dst, \
unsigned char *dst_v, int stride, char *eobs, \
MACROBLOCKD *xd)
-#endif
-
#if ARCH_X86 || ARCH_X86_64
#include "x86/dequantize_x86.h"
#endif
#endif
extern prototype_dequant_idct_add_uv_block(vp8_dequant_idct_add_uv_block);
-#if CONFIG_T8X8
+
#ifndef vp8_dequant_block_2x2
#define vp8_dequant_block_2x2 vp8_dequantize_b_2x2_c
#endif
#endif
extern prototype_dequant_idct_add_uv_block_8x8(vp8_dequant_idct_add_uv_block_8x8);
-#endif
+
typedef prototype_dequant_block((*vp8_dequant_block_fn_t));
typedef prototype_dequant_idct_add_uv_block((*vp8_dequant_idct_add_uv_block_fn_t));
-#if CONFIG_T8X8
typedef prototype_dequant_dc_idct_add_y_block_8x8((*vp8_dequant_dc_idct_add_y_block_fn_t_8x8));
typedef prototype_dequant_idct_add_y_block_8x8((*vp8_dequant_idct_add_y_block_fn_t_8x8));
typedef prototype_dequant_idct_add_uv_block_8x8((*vp8_dequant_idct_add_uv_block_fn_t_8x8));
-#endif
+
typedef struct
{
vp8_dequant_block_fn_t block;
vp8_dequant_dc_idct_add_y_block_fn_t dc_idct_add_y_block;
vp8_dequant_idct_add_y_block_fn_t idct_add_y_block;
vp8_dequant_idct_add_uv_block_fn_t idct_add_uv_block;
-#if CONFIG_T8X8
vp8_dequant_block_fn_t block_2x2;
vp8_dequant_idct_add_fn_t idct_add_8x8;
vp8_dequant_dc_idct_add_fn_t dc_idct_add_8x8;
vp8_dequant_dc_idct_add_y_block_fn_t_8x8 dc_idct_add_y_block_8x8;
vp8_dequant_idct_add_y_block_fn_t_8x8 idct_add_y_block_8x8;
vp8_dequant_idct_add_uv_block_fn_t_8x8 idct_add_uv_block_8x8;
-#endif
} vp8_dequant_rtcd_vtable_t;
#if CONFIG_RUNTIME_CPU_DETECT
6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X,
6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 7 * OCB_X
};
-#if CONFIG_T8X8
DECLARE_ALIGNED(64, static const unsigned char, coef_bands_x_8x8[64]) = {
0 * OCB_X, 1 * OCB_X, 2 * OCB_X, 3 * OCB_X, 5 * OCB_X, 4 * OCB_X, 4 * OCB_X, 5 * OCB_X,
5 * OCB_X, 3 * OCB_X, 6 * OCB_X, 3 * OCB_X, 5 * OCB_X, 4 * OCB_X, 6 * OCB_X, 6 * OCB_X,
7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X,
7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X,
};
-#endif
+
#define EOB_CONTEXT_NODE 0
#define ZERO_CONTEXT_NODE 1
#define ONE_CONTEXT_NODE 2
range = range - split; \
NORMALIZE \
}
-#if CONFIG_T8X8
+
#define DECODE_AND_LOOP_IF_ZERO_8x8_2(probability,branch) \
{ \
split = 1 + ((( probability*(range-1) ) ) >> 8); \
range = range - split; \
NORMALIZE \
}
-#endif
+
#define DECODE_SIGN_WRITE_COEFF_AND_CHECK_EXIT(val) \
DECODE_AND_APPLYSIGN(val) \
Prob = coef_probs + (ENTROPY_NODES*2); \
qcoeff_ptr [ 15 ] = (INT16) v; \
goto BLOCK_FINISHED;
-#if CONFIG_T8X8
+
#define DECODE_SIGN_WRITE_COEFF_AND_CHECK_EXIT_8x8_2(val) \
DECODE_AND_APPLYSIGN(val) \
Prob = coef_probs + (ENTROPY_NODES*2); \
goto DO_WHILE_8x8; }\
qcoeff_ptr [ scan[63] ] = (INT16) v; \
goto BLOCK_FINISHED_8x8;
-#endif
+
#define DECODE_EXTRABIT_AND_ADJUST_VAL(prob, bits_count)\
split = 1 + (((range-1) * prob) >> 8); \
}\
NORMALIZE
-#if CONFIG_T8X8
+
int vp8_decode_mb_tokens_8x8(VP8D_COMP *dx, MACROBLOCKD *x)
{
ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)x->above_context;
return eobtotal;
}
-#endif
+
int vp8_decode_mb_tokens(VP8D_COMP *dx, MACROBLOCKD *xd)
{
ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)xd->above_context;
void vp8_reset_mb_tokens_context(MACROBLOCKD *x);
int vp8_decode_mb_tokens(VP8D_COMP *, MACROBLOCKD *);
-#if CONFIG_T8X8
int vp8_decode_mb_tokens_8x8(VP8D_COMP *, MACROBLOCKD *);
-#endif
#endif /* DETOKENIZE_H */
/* Pure C: */
#if CONFIG_RUNTIME_CPU_DETECT
pbi->mb.rtcd = &pbi->common.rtcd;
-
-#if CONFIG_T8X8
-
pbi->dequant.block_2x2 = vp8_dequantize_b_2x2_c;
pbi->dequant.idct_add_8x8 = vp8_dequant_idct_add_8x8_c;
pbi->dequant.dc_idct_add_8x8 = vp8_dequant_dc_idct_add_8x8_c;
pbi->dequant.dc_idct_add_y_block_8x8 = vp8_dequant_dc_idct_add_y_block_8x8_c;
pbi->dequant.idct_add_y_block_8x8 = vp8_dequant_idct_add_y_block_8x8_c;
pbi->dequant.idct_add_uv_block_8x8 = vp8_dequant_idct_add_uv_block_8x8_c;
-
-#endif
pbi->dequant.block = vp8_dequantize_b_c;
pbi->dequant.idct_add = vp8_dequant_idct_add_c;
pbi->dequant.dc_idct_add = vp8_dequant_dc_idct_add_c;
}
}
-#if CONFIG_T8X8
+
void vp8_dequant_dc_idct_add_y_block_8x8_c
(short *q, short *dq, unsigned char *pre,
unsigned char *dst, int stride, char *eobs, short *dc, MACROBLOCKD *xd)
vp8_dequant_idct_add_8x8_c (q, dq, pre, dstv, 8, stride);
}
-#endif
+
typedef struct
{
int const *scan;
-#if CONFIG_T8X8
int const *scan_8x8;
-#endif
UINT8 const *ptr_block2leftabove;
vp8_tree_index const *vp8_coef_tree_ptr;
unsigned char *norm_ptr;
UINT8 *ptr_coef_bands_x;
-#if CONFIG_T8X8
UINT8 *ptr_coef_bands_x_8x8;
-#endif
ENTROPY_CONTEXT_PLANES *A;
ENTROPY_CONTEXT_PLANES *L;
BOOL_DECODER *current_bc;
vp8_prob const *coef_probs[4];
-#if CONFIG_T8X8
vp8_prob const *coef_probs_8x8[4];
-#endif
UINT8 eob[25];
#ifdef ENTROPY_STATS
int intra_mode_stats[10][10][10];
static unsigned int tree_update_hist [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES] [2];
-#if CONFIG_T8X8
static unsigned int tree_update_hist_8x8 [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES] [2];
-#endif
extern unsigned int active_section;
#endif
int vp8_estimate_entropy_savings(VP8_COMP *cpi)
{
int savings = 0;
-#if CONFIG_T8X8
int i=0;
-#endif
VP8_COMMON *const cm = & cpi->common;
const int *const rfct = cpi->count_mb_ref_frame_usage;
const int rf_intra = rfct[INTRA_FRAME];
savings += default_coef_context_savings(cpi);
-#if CONFIG_T8X8
+
/* do not do this if not evena allowed */
if(cpi->common.txfm_mode == ALLOW_8X8)
{
savings += savings8x8 >> 8;
}
-#endif
-
return savings;
}
}
-#if CONFIG_T8X8
/* do not do this if not evena allowed */
if(cpi->common.txfm_mode == ALLOW_8X8)
{
while (++i < BLOCK_TYPES);
}
}
-
-#endif
}
#ifdef PACKET_TESTING
FILE *vpxlogc = 0;
}
}
-#if CONFIG_T8X8
vp8_write_bit(bc, pc->txfm_mode);
-#endif
// Encode the loop filter level and type
vp8_write_bit(bc, pc->filter_type);
fprintf(f, "};\n");
-#if CONFIG_T8X8
fprintf(f, "const vp8_prob tree_update_probs_8x8[BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES] = {\n");
for (i = 0; i < BLOCK_TYPES; i++)
fprintf(f, " },\n");
}
-#endif
fclose(f);
}
#endif
int src_stride;
int eob_max_offset;
-#if CONFIG_T8X8
int eob_max_offset_8x8;
-#endif
} BLOCK;
unsigned int token_costs[BLOCK_TYPES] [COEF_BANDS]
[PREV_COEF_CONTEXTS][MAX_ENTROPY_TOKENS];
-
-#if CONFIG_T8X8
unsigned int token_costs_8x8[BLOCK_TYPES] [COEF_BANDS]
[PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
-#endif
int optimize;
int q_index;
void (*short_walsh4x4)(short *input, short *output, int pitch);
void (*quantize_b)(BLOCK *b, BLOCKD *d);
void (*quantize_b_pair)(BLOCK *b1, BLOCK *b2, BLOCKD *d0, BLOCKD *d1);
- #if CONFIG_T8X8
void (*vp8_short_fdct8x8)(short *input, short *output, int pitch);
void (*short_fhaar2x2)(short *input, short *output, int pitch);
void (*quantize_b_8x8)(BLOCK *b, BLOCKD *d);
void (*quantize_b_2x2)(BLOCK *b, BLOCKD *d);
-#endif
} MACROBLOCK;
-#if CONFIG_T8X8
+
void vp8_short_fdct8x8_c(short *block, short *coefs, int pitch)
{
int j1, i, j, k;
op1[8]=(ip1[0] - ip1[1] - ip1[4] + ip1[8])>>1;
}
-#endif
+
void vp8_short_fdct4x4_c(short *input, short *output, int pitch)
{
int i;
#include "arm/dct_arm.h"
#endif
-#if CONFIG_T8X8
+
#ifndef vp8_fdct_short8x8
#define vp8_fdct_short8x8 vp8_short_fdct8x8_c
#endif
extern prototype_fdct(vp8_fhaar_short2x2);
-#endif
#ifndef vp8_fdct_short4x4
#define vp8_fdct_short4x4 vp8_short_fdct4x4_c
typedef prototype_fdct(*vp8_fdct_fn_t);
typedef struct
{
-#if CONFIG_T8X8
vp8_fdct_fn_t short8x8;
vp8_fdct_fn_t haar_short2x2;
-#endif
vp8_fdct_fn_t short4x4;
vp8_fdct_fn_t short8x4;
vp8_fdct_fn_t fast4x4;
};
-#if CONFIG_T8X8
const unsigned int vp8_default_coef_counts_8x8[BLOCK_TYPES]
[COEF_BANDS]
[PREV_COEF_CONTEXTS]
}
}
};
-#endif
+
};
-
-#if CONFIG_T8X8
-
-//INTRA mode transform size
-//When all three criteria are off the default is 4x4
-//#define INTRA_VARIANCE_ENTROPY_CRITERIA
-#define INTRA_WTD_SSE_ENTROPY_CRITERIA
-//#define INTRA_TEST_8X8_ONLY
-//
-//INTER mode transform size
-//When all three criteria are off the default is 4x4
-//#define INTER_VARIANCE_ENTROPY_CRITERIA
-#define INTER_WTD_SSE_ENTROPY_CRITERIA
-//#define INTER_TEST_8X8_ONLY
-
-double variance_Block(short *b1, int pitch, int dimension)
-{
- short ip[8][8]={{0}};
- short *b = b1;
- int i, j = 0;
- double mean = 0.0, variance = 0.0;
- for (i = 0; i < dimension; i++)
- {
- for (j = 0; j < dimension; j++)
- {
- ip[i][j] = b[j];
- mean += ip[i][j];
- }
- b += pitch;
- }
- mean /= (dimension*dimension);
-
- for (i = 0; i < dimension; i++)
- {
- for (j = 0; j < dimension; j++)
- {
- variance += (ip[i][j]-mean)*(ip[i][j]-mean);
- }
- }
- variance /= (dimension*dimension);
- return variance;
-}
-
-double mean_Block(short *b, int pitch, int dimension)
-{
- short ip[8][8]={{0}};
- int i, j = 0;
- double mean = 0;
- for (i = 0; i < dimension; i++)
- {
- for (j = 0; j < dimension; j++)
- {
- ip[i][j] = b[j];
- mean += ip[i][j];
- }
- b += pitch;
- }
- mean /= (dimension*dimension);
-
- return mean;
-}
-
-int SSE_Block(short *b, int pitch, int dimension)
-{
- int i, j, sse_block = 0;
- for (i = 0; i < dimension; i++)
- {
- for (j = 0; j < dimension; j++)
- {
- sse_block += b[j]*b[j];
- }
- b += pitch;
- }
- return sse_block;
-}
-
-double Compute_Variance_Entropy(MACROBLOCK *x)
-{
- double variance_8[4] = {0.0, 0.0, 0.0, 0.0}, sum_var = 0.0, all_entropy = 0.0;
- variance_8[0] = variance_Block(x->block[0].src_diff, 16, 8);
- variance_8[1] = variance_Block(x->block[2].src_diff, 16, 8);
- variance_8[2] = variance_Block(x->block[8].src_diff, 16, 8);
- variance_8[3] = variance_Block(x->block[10].src_diff, 16, 8);
- sum_var = variance_8[0] + variance_8[1] + variance_8[2] + variance_8[3];
- if(sum_var)
- {
- int i;
- for(i = 0; i <4; i++)
- {
- if(variance_8[i])
- {
- variance_8[i] /= sum_var;
- all_entropy -= variance_8[i]*log(variance_8[i]);
- }
- }
- }
- return (all_entropy /log(2));
-}
-
-double Compute_Wtd_SSE_SubEntropy(MACROBLOCK *x)
-{
- double variance_8[4] = {0.0, 0.0, 0.0, 0.0};
- double entropy_8[4] = {0.0, 0.0, 0.0, 0.0};
- double sse_1, sse_2, sse_3, sse_4, sse_0;
- int i;
- for (i=0;i<3;i+=2)
- {
- sse_0 = SSE_Block(x->block[i].src_diff, 16, 8);
- if(sse_0)
- {
- sse_1 = SSE_Block(x->block[i].src_diff, 16, 4)/sse_0;
- sse_2 = SSE_Block(x->block[i+1].src_diff, 16, 4)/sse_0;
- sse_3 = SSE_Block(x->block[i+4].src_diff, 16, 4)/sse_0;
- sse_4 = SSE_Block(x->block[i+5].src_diff, 16, 4)/sse_0;
- variance_8[i]= variance_Block(x->block[i].src_diff, 16, 8);
- if(sse_1 && sse_2 && sse_3 && sse_4)
- entropy_8[i]= (-sse_1*log(sse_1)
- -sse_2*log(sse_2)
- -sse_3*log(sse_3)
- -sse_4*log(sse_4))/log(2);
- }
- }
- for (i=8;i<11;i+=2)
- {
- if(sse_0)
- {
- sse_0 = SSE_Block(x->block[i].src_diff, 16, 8);
- sse_1 = SSE_Block(x->block[i].src_diff, 16, 4)/sse_0;
- sse_2 = SSE_Block(x->block[i+1].src_diff, 16, 4)/sse_0;
- sse_3 = SSE_Block(x->block[i+4].src_diff, 16, 4)/sse_0;
- sse_4 = SSE_Block(x->block[i+5].src_diff, 16, 4)/sse_0;
- variance_8[i-7]= variance_Block(x->block[i].src_diff, 16, 8);
- if(sse_1 && sse_2 && sse_3 && sse_4)
- entropy_8[i-7]= (-sse_1*log(sse_1)
- -sse_2*log(sse_2)
- -sse_3*log(sse_3)
- -sse_4*log(sse_4))/log(2);
- }
- }
-
- if(variance_8[0]+variance_8[1]+variance_8[2]+variance_8[3])
- return (entropy_8[0]*variance_8[0]+
- entropy_8[1]*variance_8[1]+
- entropy_8[2]*variance_8[2]+
- entropy_8[3]*variance_8[3])/
- (variance_8[0]+
- variance_8[1]+
- variance_8[2]+
- variance_8[3]);
- else
- return 0;
-}
-
-int vp8_8x8_selection_intra(MACROBLOCK *x)
-{
-#ifdef INTRA_VARIANCE_ENTROPY_CRITERIA
- return (Compute_Variance_Entropy(x) > 1.2);
-#elif defined(INTRA_WTD_SSE_ENTROPY_CRITERIA)
- return (Compute_Wtd_SSE_SubEntropy(x) > 1.2);
-#elif defined(INTRA_TEST_8X8_ONLY)
- return 1;
-#else
- return 0; //when all criteria are off use the default 4x4 only
-#endif
-}
-
-int vp8_8x8_selection_inter(MACROBLOCK *x)
-{
-#ifdef INTER_VARIANCE_ENTROPY_CRITERIA
- return (Compute_Variance_Entropy(x) > 1.5);
-#elif defined(INTER_WTD_SSE_ENTROPY_CRITERIA)
- return (Compute_Wtd_SSE_SubEntropy(x) > 1.5);
-#elif defined(INTER_TEST_8X8_ONLY)
- return 1;
-#else
- return 0; //when all criteria are off use the default 4x4 only
-#endif
-}
-
-#endif
-
// Original activity measure from Tim T's code.
static unsigned int tt_activity_measure( VP8_COMP *cpi, MACROBLOCK *x )
{
x->active_ptr = cpi->active_map + map_index + mb_col;
-#if CONFIG_T8X8
/* force 4x4 transform for mode selection */
xd->mode_info_context->mbmi.txfm_size = TX_4X4;
-#endif
if (cm->frame_type == KEY_FRAME)
{
vp8_update_zbin_extra(cpi, x);
}
-#if CONFIG_T8X8
/* test code: set transform size based on mode selection */
if(cpi->common.txfm_mode == ALLOW_8X8
&& x->e_mbd.mode_info_context->mbmi.mode != I8X8_PRED
x->e_mbd.mode_info_context->mbmi.txfm_size = TX_4X4;
cpi->t4x4_count ++;
}
-#endif
if(x->e_mbd.mode_info_context->mbmi.mode == I8X8_PRED)
{
cpi->comp_pred_count[pred_context]++;
}
-#if CONFIG_T8X8
+
/* test code: set transform size based on mode selection */
if( cpi->common.txfm_mode == ALLOW_8X8
&& x->e_mbd.mode_info_context->mbmi.mode != I8X8_PRED
x->e_mbd.mode_info_context->mbmi.txfm_size = TX_4X4;
cpi->t4x4_count++;
}
-#endif
+
/* switch back to the regular quantizer for the encode */
if (cpi->sf.improved_quant)
{
{
BLOCK *b = &x->block[0];
-#if CONFIG_T8X8
int tx_type = x->e_mbd.mode_info_context->mbmi.txfm_size;
-#endif
#if CONFIG_COMP_INTRA_PRED
if (x->e_mbd.mode_info_context->mbmi.second_mode == (MB_PREDICTION_MODE) (DC_PRED - 1))
ENCODEMB_INVOKE(&rtcd->encodemb, submby)(x->src_diff, *(b->base_src), x->e_mbd.predictor, b->src_stride);
-#if CONFIG_T8X8
if( tx_type == TX_8X8 )
vp8_transform_intra_mby_8x8(x);
else
-#endif
- vp8_transform_intra_mby(x);
+ vp8_transform_intra_mby(x);
-#if CONFIG_T8X8
if(tx_type == TX_8X8)
vp8_quantize_mby_8x8(x);
else
-#endif
vp8_quantize_mby(x);
if (x->optimize)
{
-#if CONFIG_T8X8
if( tx_type == TX_8X8 )
vp8_optimize_mby_8x8(x, rtcd);
else
-#endif
vp8_optimize_mby(x, rtcd);
}
-#if CONFIG_T8X8
if(tx_type == TX_8X8)
vp8_inverse_transform_mby_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
else
-#endif
vp8_inverse_transform_mby(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
#ifdef ENC_DEBUG
void vp8_encode_intra16x16mbuv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
{
-#if CONFIG_T8X8
int tx_type = x->e_mbd.mode_info_context->mbmi.txfm_size;
-#endif
#if CONFIG_COMP_INTRA_PRED
if (x->e_mbd.mode_info_context->mbmi.second_uv_mode == (MB_PREDICTION_MODE) (DC_PRED - 1))
{
#endif
ENCODEMB_INVOKE(&rtcd->encodemb, submbuv)(x->src_diff, x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor, x->src.uv_stride);
-#if CONFIG_T8X8
if(tx_type == TX_8X8)
vp8_transform_mbuv_8x8(x);
else
-#endif
vp8_transform_mbuv(x);
-#if CONFIG_T8X8
if(tx_type == TX_8X8)
vp8_quantize_mbuv_8x8(x);
else
-#endif
vp8_quantize_mbuv(x);
#ifdef ENC_DEBUG
#endif
if (x->optimize)
{
-#if CONFIG_T8X8
if(tx_type == TX_8X8)
vp8_optimize_mbuv_8x8(x, rtcd);
else
-#endif
vp8_optimize_mbuv(x, rtcd);
}
-#if CONFIG_T8X8
if(tx_type == TX_8X8)
- vp8_inverse_transform_mbuv_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
+ vp8_inverse_transform_mbuv_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
else
-#endif
- vp8_inverse_transform_mbuv(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
+ vp8_inverse_transform_mbuv(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
vp8_recon_intra_mbuv(IF_RTCD(&rtcd->common->recon), &x->e_mbd);
}
src_diff_ptr[i] = x->coeff[i * 16];
}
}
-#if CONFIG_T8X8
void vp8_build_dcblock_8x8(MACROBLOCK *x)
{
short *src_diff_ptr = &x->src_diff[384];
src_diff_ptr[4] = x->coeff[8 * 16];
src_diff_ptr[8] = x->coeff[12 * 16];
}
-#endif
+
void vp8_transform_mbuv(MACROBLOCK *x)
{
int i;
}
}
-#if CONFIG_T8X8
-
void vp8_transform_mbuv_8x8(MACROBLOCK *x)
{
int i;
}
}
-#endif
#define RDTRUNC(RM,DM,R,D) ( (128+(R)*(RM)) & 0xFF )
#define RDTRUNC_8x8(RM,DM,R,D) ( (128+(R)*(RM)) & 0xFF )
}
}
-#if CONFIG_T8X8
void optimize_b_8x8(MACROBLOCK *mb, int i, int type,
ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l,
const VP8_ENCODER_RTCD *rtcd)
}
}
-#endif
void vp8_encode_inter16x16(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
{
-#if CONFIG_T8X8
int tx_type = x->e_mbd.mode_info_context->mbmi.txfm_size;
-#endif
-
vp8_build_inter_predictors_mb(&x->e_mbd);
vp8_subtract_mb(rtcd, x);
-#if CONFIG_T8X8
if( tx_type == TX_8X8 )
vp8_transform_mb_8x8(x);
else
-#endif
transform_mb(x);
-#if CONFIG_T8X8
if( tx_type == TX_8X8 )
vp8_quantize_mb_8x8(x);
else
-#endif
vp8_quantize_mb(x);
if (x->optimize)
{
-#if CONFIG_T8X8
if( tx_type == TX_8X8 )
optimize_mb_8x8(x, rtcd);
else
-#endif
optimize_mb(x, rtcd);
}
-#if CONFIG_T8X8
if( tx_type == TX_8X8 )
vp8_inverse_transform_mb_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
else
-#endif
vp8_inverse_transform_mb(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
-#if CONFIG_T8X8
if( tx_type == TX_8X8 )
{
#ifdef ENC_DEBUG
}
#endif
}
-#endif
RECON_INVOKE(&rtcd->common->recon, recon_mb)
(IF_RTCD(&rtcd->common->recon), &x->e_mbd);
/* this function is used by first pass only */
void vp8_encode_inter16x16y(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
{
-#if CONFIG_T8X8
int tx_type = x->e_mbd.mode_info_context->mbmi.txfm_size;
-#endif
BLOCK *b = &x->block[0];
ENCODEMB_INVOKE(&rtcd->encodemb, submby)(x->src_diff, *(b->base_src), x->e_mbd.predictor, b->src_stride);
-#if CONFIG_T8X8
if( tx_type == TX_8X8 )
vp8_transform_mby_8x8(x);
else
-#endif
transform_mby(x);
vp8_quantize_mby(x);
-#if CONFIG_T8X8
+
if( tx_type == TX_8X8 )
vp8_inverse_transform_mby_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
else
-#endif
vp8_inverse_transform_mby(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
RECON_INVOKE(&rtcd->common->recon, recon_mby)
void vp8_optimize_mbuv(MACROBLOCK *x, const struct VP8_ENCODER_RTCD *rtcd);
void vp8_encode_inter16x16y(const struct VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x);
-#if CONFIG_T8X8
void vp8_transform_mb_8x8(MACROBLOCK *mb);
void vp8_transform_mby_8x8(MACROBLOCK *x);
void vp8_transform_mbuv_8x8(MACROBLOCK *x);
void vp8_build_dcblock_8x8(MACROBLOCK *b);
void vp8_optimize_mby_8x8(MACROBLOCK *x, const struct VP8_ENCODER_RTCD *rtcd);
void vp8_optimize_mbuv_8x8(MACROBLOCK *x, const struct VP8_ENCODER_RTCD *rtcd);
-#endif
void vp8_subtract_4b_c(BLOCK *be, BLOCKD *bd, int pitch);
cpi->rtcd.variance.mse16x16 = vp8_mse16x16_c;
cpi->rtcd.variance.getmbss = vp8_get_mb_ss_c;
-#if CONFIG_T8X8
cpi->rtcd.fdct.short8x8 = vp8_short_fdct8x8_c;
cpi->rtcd.fdct.haar_short2x2 = vp8_short_fhaar2x2_c;
-#endif
cpi->rtcd.fdct.short4x4 = vp8_short_fdct4x4_c;
cpi->rtcd.fdct.short8x4 = vp8_short_fdct8x4_c;
cpi->rtcd.fdct.fast4x4 = vp8_short_fdct4x4_c;
cpi->rtcd.quantize.quantb_pair = vp8_regular_quantize_b_pair;
cpi->rtcd.quantize.fastquantb = vp8_fast_quantize_b_c;
cpi->rtcd.quantize.fastquantb_pair = vp8_fast_quantize_b_pair_c;
-#if CONFIG_T8X8
cpi->rtcd.quantize.quantb_8x8 = vp8_regular_quantize_b_8x8;
cpi->rtcd.quantize.fastquantb_8x8 = vp8_fast_quantize_b_8x8_c;
cpi->rtcd.quantize.quantb_2x2 = vp8_regular_quantize_b_2x2;
cpi->rtcd.quantize.fastquantb_2x2 = vp8_fast_quantize_b_2x2_c;
-#endif
cpi->rtcd.search.full_search = vp8_full_search_sad;
cpi->rtcd.search.refining_search = vp8_refining_search_sad;
cpi->rtcd.search.diamond_search = vp8_diamond_search_sad;
if (cpi->sf.improved_dct)
{
-#if CONFIG_T8X8
cpi->mb.vp8_short_fdct8x8 = FDCT_INVOKE(&cpi->rtcd.fdct, short8x8);
-#endif
cpi->mb.vp8_short_fdct8x4 = FDCT_INVOKE(&cpi->rtcd.fdct, short8x4);
cpi->mb.vp8_short_fdct4x4 = FDCT_INVOKE(&cpi->rtcd.fdct, short4x4);
}
else
{
-#if CONFIG_T8X8
cpi->mb.vp8_short_fdct8x8 = FDCT_INVOKE(&cpi->rtcd.fdct, short8x8);
-#endif
cpi->mb.vp8_short_fdct8x4 = FDCT_INVOKE(&cpi->rtcd.fdct, fast8x4);
cpi->mb.vp8_short_fdct4x4 = FDCT_INVOKE(&cpi->rtcd.fdct, fast4x4);
}
cpi->mb.short_walsh4x4 = FDCT_INVOKE(&cpi->rtcd.fdct, walsh_short4x4);
-#if CONFIG_T8X8
cpi->mb.short_fhaar2x2 = FDCT_INVOKE(&cpi->rtcd.fdct, haar_short2x2);
-#endif
if (cpi->sf.improved_quant)
{
quantb);
cpi->mb.quantize_b_pair = QUANTIZE_INVOKE(&cpi->rtcd.quantize,
quantb_pair);
-#if CONFIG_T8X8
cpi->mb.quantize_b_8x8 = QUANTIZE_INVOKE(&cpi->rtcd.quantize, quantb_8x8);
cpi->mb.quantize_b_2x2 = QUANTIZE_INVOKE(&cpi->rtcd.quantize, quantb_2x2);
-#endif
}
else
{
fastquantb);
cpi->mb.quantize_b_pair = QUANTIZE_INVOKE(&cpi->rtcd.quantize,
fastquantb_pair);
-#if CONFIG_T8X8
cpi->mb.quantize_b_8x8 = QUANTIZE_INVOKE(&cpi->rtcd.quantize, fastquantb_8x8);
cpi->mb.quantize_b_2x2 = QUANTIZE_INVOKE(&cpi->rtcd.quantize, fastquantb_2x2);
-#endif
}
if (cpi->sf.improved_quant != last_improved_quant)
vp8cx_init_quantizer(cpi);
#if CONFIG_INTERNAL_STATS
vp8_clear_system_state();
-#if CONFIG_T8X8
+
printf("\n8x8-4x4:%d-%d\n", cpi->t8x8_count, cpi->t4x4_count);
-#endif
if (cpi->pass != 1)
{
FILE *f = fopen("opsnr.stt", "a");
//save vp8_tree_probs_from_distribution result for each frame to avoid repeat calculation
vp8_prob frame_coef_probs [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
unsigned int frame_branch_ct [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES][2];
-#if CONFIG_T8X8
unsigned int coef_counts_8x8 [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; /* for this frame */
vp8_prob frame_coef_probs_8x8 [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
unsigned int frame_branch_ct_8x8 [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES][2];
-#endif
int gfu_boost;
int kf_boost;
int gf_update_recommended;
int skip_true_count;
int skip_false_count;
-#if CONFIG_T8X8
int t4x4_count;
int t8x8_count;
-#endif
#if CONFIG_UVINTRA
int y_uv_mode_count[VP8_YMODES][VP8_UV_MODES];
if (cpi->twopass.section_intra_rating < 20)
Bias = Bias * cpi->twopass.section_intra_rating / 20;
-#if CONFIG_T8X8
// yx, bias less for large block size
if(cpi->common.txfm_mode == ALLOW_8X8)
Bias >>= 1;
-#endif
filt_high = ((filt_mid + filter_step) > max_filter_level) ? max_filter_level : (filt_mid + filter_step);
filt_low = ((filt_mid - filter_step) < min_filter_level) ? min_filter_level : (filt_mid - filter_step);
if (cpi->twopass.section_intra_rating < 20)
Bias = Bias * cpi->twopass.section_intra_rating / 20;
-#if CONFIG_T8X8
// yx, bias less for large block size
if(cpi->common.txfm_mode == ALLOW_8X8)
Bias >>= 1;
-#endif
filt_high = ((filt_mid + filter_step) > max_filter_level) ? max_filter_level : (filt_mid + filter_step);
filt_low = ((filt_mid - filter_step) < min_filter_level) ? min_filter_level : (filt_mid - filter_step);
#endif
#define EXACT_QUANT
-
-#ifdef EXACT_FASTQUANT
-void vp8_fast_quantize_b_c(BLOCK *b, BLOCKD *d)
-{
- int i, rc, eob;
- int zbin;
- int x, y, z, sz;
- short *coeff_ptr = b->coeff;
- short *zbin_ptr = b->zbin;
- short *round_ptr = b->round;
- short *quant_ptr = b->quant_fast;
- unsigned char *quant_shift_ptr = b->quant_shift;
- short *qcoeff_ptr = d->qcoeff;
- short *dqcoeff_ptr = d->dqcoeff;
- short *dequant_ptr = d->dequant;
-
- vpx_memset(qcoeff_ptr, 0, 32);
- vpx_memset(dqcoeff_ptr, 0, 32);
-
- eob = -1;
-
- for (i = 0; i < 16; i++)
- {
- rc = vp8_default_zig_zag1d[i];
- z = coeff_ptr[rc];
- zbin = zbin_ptr[rc] ;
-
- sz = (z >> 31); // sign of z
- x = (z ^ sz) - sz; // x = abs(z)
-
- if (x >= zbin)
- {
- x += round_ptr[rc];
- y = (((x * quant_ptr[rc]) >> 16) + x)
- >> quant_shift_ptr[rc]; // quantize (x)
- x = (y ^ sz) - sz; // get the sign back
- qcoeff_ptr[rc] = x; // write to destination
- dqcoeff_ptr[rc] = x * dequant_ptr[rc]; // dequantized value
-
- if (y)
- {
- eob = i; // last nonzero coeffs
- }
- }
- }
- d->eob = eob + 1;
-}
-
-#else
-
void vp8_fast_quantize_b_c(BLOCK *b, BLOCKD *d)
{
int i, rc, eob, nonzeros;
short *qcoeff_ptr = d->qcoeff;
short *dqcoeff_ptr = d->dqcoeff;
short *dequant_ptr = d->dequant;
-#if CONFIG_T8X8
vpx_memset(qcoeff_ptr, 0, 32);
vpx_memset(dqcoeff_ptr, 0, 32);
-#endif
+
eob = -1;
for (i = 0; i < 16; i++)
{
d->eob = eob + 1;
}
-#endif
+
#ifdef EXACT_QUANT
void vp8_regular_quantize_b(BLOCK *b, BLOCKD *d)
d->eob = eob + 1;
}
-#endif //EXACT_QUANT
+#endif
+//EXACT_QUANT
void vp8_quantize_mby_c(MACROBLOCK *x)
x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
}
-#if CONFIG_T8X8
-
-#ifdef EXACT_FASTQUANT
-void vp8_fast_quantize_b_2x2_c(BLOCK *b, BLOCKD *d)
-{
- int i, rc, eob;
- int zbin;
- int x, y, z, sz;
- short *coeff_ptr = b->coeff;
- short *zbin_ptr = b->zbin;
- short *round_ptr = b->round;
- short *quant_ptr = b->quant;
- short *quant_shift_ptr = b->quant_shift;
- short *qcoeff_ptr = d->qcoeff;
- short *dqcoeff_ptr = d->dqcoeff;
- short *dequant_ptr = d->dequant;
- //double q2nd = 4;
- vpx_memset(qcoeff_ptr, 0, 32);
- vpx_memset(dqcoeff_ptr, 0, 32);
-
- eob = -1;
-
- for (i = 0; i < 4; i++)
- {
- rc = vp8_default_zig_zag1d[i];
- z = coeff_ptr[rc];
- //zbin = zbin_ptr[rc]/q2nd ;
- zbin = zbin_ptr[rc] ;
-
- sz = (z >> 31); // sign of z
- x = (z ^ sz) - sz; // x = abs(z)
-
- if (x >= zbin)
- {
- //x += (round_ptr[rc]/q2nd);
- x += (round_ptr[rc]);
- //y = ((int)((int)(x * quant_ptr[rc] * q2nd) >> 16) + x)
- // >> quant_shift_ptr[rc]; // quantize (x)
- y = ((int)((int)(x * quant_ptr[rc]) >> 16) + x)
- >> quant_shift_ptr[rc]; // quantize (x)
- x = (y ^ sz) - sz; // get the sign back
- qcoeff_ptr[rc] = x; // write to destination
- dqcoeff_ptr[rc] = x * dequant_ptr[rc]; // dequantized value
-
- if (y)
- {
- eob = i; // last nonzero coeffs
- }
- }
- }
- d->eob = eob + 1;
-}
-
-void vp8_fast_quantize_b_8x8_c(BLOCK *b, BLOCKD *d)// only ac and dc difference, no difference among ac
-{
- int i, rc, eob;
- int zbin;
- int x, y, z, sz;
- short *coeff_ptr = b->coeff;
- short *zbin_ptr = b->zbin;
- short *round_ptr = b->round;
- short *quant_ptr = b->quant;
- short *quant_shift_ptr = b->quant_shift;
- short *qcoeff_ptr = d->qcoeff;
- short *dqcoeff_ptr = d->dqcoeff;
- short *dequant_ptr = d->dequant;
- //double q1st = 2;
- vpx_memset(qcoeff_ptr, 0, 64*sizeof(short));
- vpx_memset(dqcoeff_ptr, 0, 64*sizeof(short));
-
- eob = -1;
-
- for (i = 0; i < 64; i++)
- {
- rc = vp8_default_zig_zag1d_8x8[i];
- z = coeff_ptr[rc];
- //zbin = zbin_ptr[rc!=0]/q1st ;
- zbin = zbin_ptr[rc!=0] ;
-
- sz = (z >> 31); // sign of z
- x = (z ^ sz) - sz; // x = abs(z)
-
- if (x >= zbin)
- {
- //x += round_ptr[rc]/q1st;
- //y = ((int)(((int)((x * quant_ptr[rc!=0] * q1st)) >> 16) + x))
- // >> quant_shift_ptr[rc!=0]; // quantize (x)
- x += round_ptr[rc];
- y = ((int)(((int)((x * quant_ptr[rc!=0])) >> 16) + x))
- >> quant_shift_ptr[rc!=0]; // quantize (x)
- x = (y ^ sz) - sz; // get the sign back
- qcoeff_ptr[rc] = x; // write to destination
- //dqcoeff_ptr[rc] = x * dequant_ptr[rc!=0] / q1st; // dequantized value
- dqcoeff_ptr[rc] = x * dequant_ptr[rc!=0]; // dequantized value
-
- if (y)
- {
- eob = i; // last nonzero coeffs
- }
- }
- }
- d->eob = eob + 1;
-}
-
-#else
void vp8_fast_quantize_b_2x2_c(BLOCK *b, BLOCKD *d)
{
d->eob = eob + 1;
}
-#endif //EXACT_FASTQUANT
-#ifdef EXACT_QUANT
+
+
void vp8_regular_quantize_b_2x2(BLOCK *b, BLOCKD *d)
{
int i, rc, eob;
d->eob = eob + 1;
}
-#else
-void vp8_regular_quantize_b_2x2(BLOCK *b, BLOCKD *d)
-{
- int i, rc, eob;
- int zbin;
- int x, y, z, sz;
- short *zbin_boost_ptr = b->zrun_zbin_boost;
- short *coeff_ptr = b->coeff;
- short *zbin_ptr = b->zbin;
- short *round_ptr = b->round;
- short *quant_ptr = b->quant;
- short *qcoeff_ptr = d->qcoeff;
- short *dqcoeff_ptr = d->dqcoeff;
- short *dequant_ptr = d->dequant;
- short zbin_oq_value = b->zbin_extra;
- //double q2nd = 4;
- vpx_memset(qcoeff_ptr, 0, 32);
- vpx_memset(dqcoeff_ptr, 0, 32);
-
- eob = -1;
- for (i = 0; i < 4; i++)
- {
- rc = vp8_default_zig_zag1d[i];
- z = coeff_ptr[rc];
- //zbin = (zbin_ptr[rc] + *zbin_boost_ptr + zbin_oq_value)/q2nd;
- zbin = (zbin_ptr[rc] + *zbin_boost_ptr + zbin_oq_value);
- zbin_boost_ptr ++;
- sz = (z >> 31); // sign of z
- x = (z ^ sz) - sz; // x = abs(z)
-
- if (x >= zbin)
- {
- //y = (((x + round_ptr[rc]/q2nd) * quant_ptr[rc]*q2nd)) >> 16; // quantize (x)
- y = (((x + round_ptr[rc]) * quant_ptr[rc])) >> 16; // quantize (x)
- x = (y ^ sz) - sz; // get the sign back
- qcoeff_ptr[rc] = x; // write to destination
- //dqcoeff_ptr[rc] = x * dequant_ptr[rc]/q2nd; // dequantized value
- dqcoeff_ptr[rc] = x * dequant_ptr[rc]; // dequantized value
-
- if (y)
- {
- eob = i; // last nonzero coeffs
- zbin_boost_ptr = &b->zrun_zbin_boost[0]; // reset zero runlength
- }
- }
- }
-
- d->eob = eob + 1;
-}
-
-void vp8_regular_quantize_b_8x8(BLOCK *b, BLOCKD *d)
-{
- int i, rc, eob;
- int zbin;
- int x, y, z, sz;
- short *zbin_boost_ptr = b->zrun_zbin_boost;
- short *coeff_ptr = b->coeff;
- short *zbin_ptr = b->zbin;
- short *round_ptr = b->round;
- short *quant_ptr = b->quant;
- short *qcoeff_ptr = d->qcoeff;
- short *dqcoeff_ptr = d->dqcoeff;
- short *dequant_ptr = d->dequant;
- short zbin_oq_value = b->zbin_extra;
- //double q1st = 2;
- vpx_memset(qcoeff_ptr, 0, 64*sizeof(short));
- vpx_memset(dqcoeff_ptr, 0, 64*sizeof(short));
-
- eob = -1;
- for (i = 0; i < 64; i++)
- {
-
- rc = vp8_default_zig_zag1d_8x8[i];
- z = coeff_ptr[rc];
- //zbin = (zbin_ptr[rc!=0] + *zbin_boost_ptr + zbin_oq_value)/q1st;
- zbin = (zbin_ptr[rc!=0] + *zbin_boost_ptr + zbin_oq_value);
- zbin_boost_ptr ++;
- sz = (z >> 31); // sign of z
- x = (z ^ sz) - sz; // x = abs(z)
-
- if (x >= zbin)
- {
- //y = ((x + round_ptr[rc!=0]/q1st) * quant_ptr[rc!=0] * q1st) >> 16;
- y = ((x + round_ptr[rc!=0]) * quant_ptr[rc!=0]) >> 16;
- x = (y ^ sz) - sz; // get the sign back
- qcoeff_ptr[rc] = x; // write to destination
- //dqcoeff_ptr[rc] = x * dequant_ptr[rc!=0]/q1st; // dequantized value
- dqcoeff_ptr[rc] = x * dequant_ptr[rc!=0]; // dequantized value
-
- if (y)
- {
- eob = i; // last nonzero coeffs
- zbin_boost_ptr = &b->zrun_zbin_boost[0]; // reset zero runlength
- }
- }
- }
- d->eob = eob + 1;
-}
-
-#endif //EXACT_QUANT
void vp8_quantize_mby_8x8(MACROBLOCK *x)
{
x->quantize_b_8x8(&x->block[i], &x->e_mbd.block[i]);
}
-#endif //CONFIG_T8X8
+
/* quantize_b_pair function pointer in MACROBLOCK structure is set to one of
* these two C functions if corresponding optimized routine is not available.
}
}
}
-#else
-void vp8cx_init_quantizer(VP8_COMP *cpi)
-{
- int i;
- int quant_val;
- int Q;
- int zbin_boost[16] = {0, 0, 8, 10, 12, 14, 16, 20, 24, 28, 32, 36, 40, 44, 44, 44};
- int qrounding_factor = 48;
-
- for (Q = 0; Q < QINDEX_RANGE; Q++)
- {
- int qzbin_factor = vp8_dc_quant(Q,0) < 148 ) ? 84: 80;
-
- // dc values
- quant_val = vp8_dc_quant(Q, cpi->common.y1dc_delta_q);
- cpi->Y1quant[Q][0] = (1 << 16) / quant_val;
- cpi->Y1zbin[Q][0] = ((qzbin_factors * quant_val) + 64) >> 7;
- cpi->Y1round[Q][0] = (qrounding_factor * quant_val) >> 7;
- cpi->common.Y1dequant[Q][0] = quant_val;
- cpi->zrun_zbin_boost_y1[Q][0] = (quant_val * zbin_boost[0]) >> 7;
-
- quant_val = vp8_dc2quant(Q, cpi->common.y2dc_delta_q);
- cpi->Y2quant[Q][0] = (1 << 16) / quant_val;
- cpi->Y2zbin[Q][0] = ((qzbin_factors * quant_val) + 64) >> 7;
- cpi->Y2round[Q][0] = (qrounding_factor * quant_val) >> 7;
- cpi->common.Y2dequant[Q][0] = quant_val;
- cpi->zrun_zbin_boost_y2[Q][0] = (quant_val * zbin_boost[0]) >> 7;
-
- quant_val = vp8_dc_uv_quant(Q, cpi->common.uvdc_delta_q);
- cpi->UVquant[Q][0] = (1 << 16) / quant_val;
- cpi->UVzbin[Q][0] = ((qzbin_factors * quant_val) + 64) >> 7;;
- cpi->UVround[Q][0] = (qrounding_factor * quant_val) >> 7;
- cpi->common.UVdequant[Q][0] = quant_val;
- cpi->zrun_zbin_boost_uv[Q][0] = (quant_val * zbin_boost[0]) >> 7;
-
- // all the ac values = ;
- for (i = 1; i < 16; i++)
- {
- int rc = vp8_default_zig_zag1d[i];
-
- quant_val = vp8_ac_yquant(Q);
- cpi->Y1quant[Q][rc] = (1 << 16) / quant_val;
- cpi->Y1zbin[Q][rc] = ((qzbin_factors * quant_val) + 64) >> 7;
- cpi->Y1round[Q][rc] = (qrounding_factor * quant_val) >> 7;
- cpi->common.Y1dequant[Q][rc] = quant_val;
- cpi->zrun_zbin_boost_y1[Q][i] = (quant_val * zbin_boost[i]) >> 7;
-
- quant_val = vp8_ac2quant(Q, cpi->common.y2ac_delta_q);
- cpi->Y2quant[Q][rc] = (1 << 16) / quant_val;
- cpi->Y2zbin[Q][rc] = ((qzbin_factors * quant_val) + 64) >> 7;
- cpi->Y2round[Q][rc] = (qrounding_factors * quant_val) >> 7;
- cpi->common.Y2dequant[Q][rc] = quant_val;
- cpi->zrun_zbin_boost_y2[Q][i] = (quant_val * zbin_boost[i]) >> 7;
-
- quant_val = vp8_ac_uv_quant(Q, cpi->common.uvac_delta_q);
- cpi->UVquant[Q][rc] = (1 << 16) / quant_val;
- cpi->UVzbin[Q][rc] = ((qzbin_factors * quant_val) + 64) >> 7;
- cpi->UVround[Q][rc] = (qrounding_factors * quant_val) >> 7;
- cpi->common.UVdequant[Q][rc] = quant_val;
- cpi->zrun_zbin_boost_uv[Q][i] = (quant_val * zbin_boost[i]) >> 7;
- }
- }
-}
#endif
{
x->block[i].eob_max_offset =
get_segdata( xd, segment_id, SEG_LVL_EOB );
-#if CONFIG_T8X8
x->block[i].eob_max_offset_8x8 =
get_segdata( xd, segment_id, SEG_LVL_EOB );
-#endif
}
else
{
x->block[i].eob_max_offset = 16;
-#if CONFIG_T8X8
x->block[i].eob_max_offset_8x8 = 64;
-#endif
}
}
{
x->block[i].eob_max_offset =
get_segdata( xd, segment_id, SEG_LVL_EOB );
-#if CONFIG_T8X8
x->block[i].eob_max_offset_8x8 =
get_segdata( xd, segment_id, SEG_LVL_EOB );
-#endif
-
}
else
{
x->block[i].eob_max_offset = 16;
-#if CONFIG_T8X8
x->block[i].eob_max_offset_8x8 = 64;
-#endif
-
}
}
{
x->block[24].eob_max_offset =
get_segdata( xd, segment_id, SEG_LVL_EOB );
-#if CONFIG_T8X8
x->block[24].eob_max_offset_8x8 =
get_segdata( xd, segment_id, SEG_LVL_EOB );
-#endif
}
else
{
x->block[24].eob_max_offset = 16;
-#if CONFIG_T8X8
x->block[24].eob_max_offset_8x8 = 4;
-#endif
}
/* save this macroblock QIndex for vp8_update_zbin_extra() */
#define vp8_quantize_fastquantb vp8_fast_quantize_b_c
#endif
extern prototype_quantize_block(vp8_quantize_fastquantb);
-#if CONFIG_T8X8
+
#ifndef vp8_quantize_quantb_8x8
#define vp8_quantize_quantb_8x8 vp8_regular_quantize_b_8x8
#endif
#define vp8_quantize_fastquantb_2x2 vp8_fast_quantize_b_2x2_c
#endif
extern prototype_quantize_block(vp8_quantize_fastquantb_2x2);
-#endif
+
#ifndef vp8_quantize_fastquantb_pair
#define vp8_quantize_fastquantb_pair vp8_fast_quantize_b_pair_c
prototype_quantize_block(*quantb);
prototype_quantize_block_pair(*quantb_pair);
prototype_quantize_block(*fastquantb);
-#if CONFIG_T8X8
prototype_quantize_block(*quantb_8x8);
prototype_quantize_block(*fastquantb_8x8);
prototype_quantize_block(*quantb_2x2);
prototype_quantize_block(*fastquantb_2x2);
-#endif
prototype_quantize_block_pair(*fastquantb_pair);
} vp8_quantize_rtcd_vtable_t;
#endif
extern void vp8_strict_quantize_b(BLOCK *b,BLOCKD *d);
-#if CONFIG_T8X8
extern void vp8_strict_quantize_b_8x8(BLOCK *b,BLOCKD *d);
extern void vp8_strict_quantize_b_2x2(BLOCK *b,BLOCKD *d);
-#endif
struct VP8_COMP;
extern void vp8_set_quantizer(struct VP8_COMP *cpi, int Q);
extern void vp8cx_frame_init_quantizer(struct VP8_COMP *cpi);
#endif
-#if CONFIG_T8X8
cpi->common.txfm_mode = ONLY_4X4;
-#endif
//cpi->common.filter_level = 0; // Reset every key frame.
cpi->common.filter_level = cpi->common.base_qindex * 3 / 8 ;
}
void vp8_setup_inter_frame(VP8_COMP *cpi)
{
-#if CONFIG_T8X8
+
if(cpi->common.Width * cpi->common.Height > 640*360)
//||cpi->this_frame_target < 7 * cpi->common.MBs)
cpi->common.txfm_mode = ALLOW_8X8;
else
cpi->common.txfm_mode = ONLY_4X4;
-#endif
if(cpi->common.refresh_alt_ref_frame)
{
(const vp8_prob( *)[8][3][11]) cpi->common.fc.coef_probs
);
-#if CONFIG_T8X8
fill_token_costs(
cpi->mb.token_costs_8x8,
(const vp8_prob( *)[8][3][11]) cpi->common.fc.coef_probs_8x8
);
-#endif
#if CONFIG_QIMODE
//rough estimate for costing
cpi->common.kf_ymode_probs_index = cpi->common.base_qindex>>4;
*Rate = vp8_rdcost_mby(mb);
}
-#if CONFIG_T8X8
static int cost_coeffs_2x2(MACROBLOCK *mb,
BLOCKD *b, int type,
// rate
*Rate = vp8_rdcost_mby_8x8(mb);
}
-#endif
static void copy_predictor(unsigned char *dst, const unsigned char *predictor)
{
return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
}
-#if CONFIG_T8X8
+
static int rd_cost_mbuv_8x8(MACROBLOCK *mb)
{
int b;
return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
}
-#endif
+
static int rd_inter4x4_uv(VP8_COMP *cpi, MACROBLOCK *x, int *rate,
int *distortion, int fullpixel)
int rate2, distortion2;
int uv_intra_rate, uv_intra_distortion, uv_intra_rate_tokenonly;
int uv_intra_tteob = 0;
-#if CONFIG_T8X8
int uv_intra_rate_8x8, uv_intra_distortion_8x8, uv_intra_rate_tokenonly_8x8;
int uv_intra_tteob_8x8=0;
-#endif
int rate_y, UNINITIALIZED_IS_SAFE(rate_uv);
int distortion_uv;
int best_yrd = INT_MAX;
for(i=16; i<24; i++)
uv_intra_tteob += x->e_mbd.block[i].eob;
-#if CONFIG_T8X8
uv_intra_tteob_8x8 = uv_intra_tteob;
-#endif
// Get estimates of reference frame costs for each reference frame
// that depend on the current prediction etc.
// FIXME compound intra prediction
RECON_INVOKE(&cpi->common.rtcd.recon, build_intra_predictors_mby)
(&x->e_mbd);
-#if CONFIG_T8X8
if(cpi->common.txfm_mode == ALLOW_8X8)
macro_block_yrd_8x8(x, &rate_y, &distortion,
IF_RTCD(&cpi->rtcd)) ;
else
-#endif
macro_block_yrd(x, &rate_y, &distortion,
IF_RTCD(&cpi->rtcd.encodemb)) ;
rate2 += rate_y;
rate2 += vp8_cost_mv_ref(&cpi->common, this_mode, mdcounts);
// Y cost and distortion
-#if CONFIG_T8X8
if(cpi->common.txfm_mode == ALLOW_8X8)
macro_block_yrd_8x8(x, &rate_y, &distortion,
IF_RTCD(&cpi->rtcd));
else
-#endif
macro_block_yrd(x, &rate_y, &distortion,
IF_RTCD(&cpi->rtcd.encodemb));
// UV cost and distortion
vp8_build_inter16x16_predictors_mbuv(&x->e_mbd);
-#if CONFIG_T8X8
if(cpi->common.txfm_mode == ALLOW_8X8)
rd_inter16x16_uv_8x8(cpi, x, &rate_uv,
&distortion_uv,
cpi->common.full_pixel);
else
-#endif
rd_inter16x16_uv(cpi, x, &rate_uv,
&distortion_uv,
cpi->common.full_pixel);
&x->e_mbd.predictor[320], 16, 8);
/* Y cost and distortion */
-#if CONFIG_T8X8
if(cpi->common.txfm_mode == ALLOW_8X8)
macro_block_yrd_8x8(x, &rate_y, &distortion,
IF_RTCD(&cpi->rtcd));
else
-#endif
macro_block_yrd(x, &rate_y, &distortion,
IF_RTCD(&cpi->rtcd.encodemb));
distortion2 += distortion;
/* UV cost and distortion */
-#if CONFIG_T8X8
if(cpi->common.txfm_mode == ALLOW_8X8)
rd_inter16x16_uv_8x8(cpi, x, &rate_uv,
&distortion_uv,
cpi->common.full_pixel);
else
-#endif
rd_inter16x16_uv(cpi, x, &rate_uv,
&distortion_uv,
cpi->common.full_pixel);
if(has_y2_block)
tteob += x->e_mbd.block[24].eob;
-#if CONFIG_T8X8
if(cpi->common.txfm_mode ==ALLOW_8X8 && has_y2_block)
{
for (i = 0; i < 16; i+=4)
}
}
else
-#endif
{
for (i = 0; i < 16; i++)
tteob += (x->e_mbd.block[i].eob > has_y2_block);
void vp8_rd_pick_intra_mode(VP8_COMP *cpi, MACROBLOCK *x, int *rate_)
{
-#if CONFIG_T8X8
MACROBLOCKD *xd = &x->e_mbd;
-#endif
int error4x4, error16x16;
int rate4x4, rate16x16 = 0, rateuv;
int dist4x4, dist16x16, distuv;
#ifdef ENTROPY_STATS
_int64 context_counters[BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
-#if CONFIG_T8X8
_int64 context_counters_8x8[BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
#endif
-#endif
void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t) ;
-#if CONFIG_T8X8
void vp8_stuff_mb_8x8(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t) ;
-#endif
void vp8_fix_contexts(MACROBLOCKD *x);
static TOKENVALUE dct_value_tokens[DCT_MAX_VALUE*2];
vp8_dct_value_cost_ptr = dct_value_cost + DCT_MAX_VALUE;
}
-#if CONFIG_T8X8
static void tokenize2nd_order_b_8x8
(
MACROBLOCKD *xd,
*a = *l = pt;
}
-#endif
static void tokenize2nd_order_b
(
*a = *l = pt;
}
-#if CONFIG_T8X8
+
static void tokenize1st_order_b_8x8
(
MACROBLOCKD *xd,
*a = *l = pt;
}
-#endif
+
static void tokenize1st_order_b
return skip;
}
-#if CONFIG_T8X8
+
static int mb_is_skippable_8x8(MACROBLOCKD *x)
{
int has_y2_block;
return skip;
}
-#endif
+
void vp8_tokenize_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t)
{
int plane_type;
int has_y2_block;
int b;
-
-#if CONFIG_T8X8
int tx_type = x->mode_info_context->mbmi.txfm_size;
-#endif
// If the MB is going to be skipped because of a segment level flag
// exclude this from the skip count stats used to calculate the
&& x->mode_info_context->mbmi.mode != SPLITMV);
x->mode_info_context->mbmi.mb_skip_coeff =
-#if CONFIG_T8X8
(( tx_type == TX_8X8 ) ?
mb_is_skippable_8x8(x) :
mb_is_skippable(x, has_y2_block));
-#else
- mb_is_skippable(x, has_y2_block);
-#endif
if (x->mode_info_context->mbmi.mb_skip_coeff)
{
if (!cpi->common.mb_no_coeff_skip)
{
-#if CONFIG_T8X8
if ( tx_type == TX_8X8 )
vp8_stuff_mb_8x8(cpi, x, t) ;
else
-#endif
vp8_stuff_mb(cpi, x, t) ;
}
else
plane_type = 3;
if(has_y2_block)
{
-#if CONFIG_T8X8
if ( tx_type == TX_8X8 )
{
ENTROPY_CONTEXT * A = (ENTROPY_CONTEXT *)x->above_context;
L + vp8_block2left_8x8[24], cpi);
}
else
-#endif
tokenize2nd_order_b(x, t, cpi);
plane_type = 0;
}
-#if CONFIG_T8X8
+
if ( tx_type == TX_8X8 )
{
ENTROPY_CONTEXT * A = (ENTROPY_CONTEXT *)x->above_context;
}
}
else
-#endif
+
tokenize1st_order_b(x, t, plane_type, cpi);
}
void init_context_counters(void)
{
vpx_memset(context_counters, 0, sizeof(context_counters));
-#if CONFIG_T8X8
vpx_memset(context_counters_8x8, 0, sizeof(context_counters_8x8));
-#endif
}
void print_context_counters()
}
while (++type < BLOCK_TYPES);
-#if CONFIG_T8X8
fprintf(f, "int Contexts_8x8[BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];\n\n");
fprintf(f, "const int default_contexts_8x8[BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS] = {");
fprintf(f, "\n }");
}
while (++type < BLOCK_TYPES);
-#endif
fprintf(f, "\n};\n");
fclose(f);
fill_value_tokens();
}
-#if CONFIG_T8X8
+
static __inline void stuff2nd_order_b_8x8
(
const BLOCKD *const b,
*(L + vp8_block2left_8x8[b]+1 ) = *(L + vp8_block2left_8x8[b]);
}
}
-#endif
+
static __inline void stuff2nd_order_b
(
void print_context_counters();
extern _int64 context_counters[BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
-#if CONFIG_T8X8
extern _int64 context_counters_8x8[BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
#endif
-#endif
extern const int *vp8_dct_value_cost_ptr;
/* TODO: The Token field should be broken out into a separate char array to
* improve cache locality, since it's needed for costing when the rest of the