int i;
MACROBLOCKD *xd = &x->e_mbd;
MODE_INFO *mi = &ctx->mic;
+ MB_MODE_INFO * mbmi = &x->e_mbd.mode_info_context->mbmi;
int mb_mode = mi->mbmi.mode;
int mb_mode_index = ctx->best_mode_index;
vpx_memcpy(x->partition_info, &ctx->partition_info,
sizeof(PARTITION_INFO));
- xd->mode_info_context->mbmi.mv.as_int =
- x->partition_info->bmi[15].mv.as_int;
- xd->mode_info_context->mbmi.second_mv.as_int =
- x->partition_info->bmi[15].second_mv.as_int;
+ mbmi->mv.as_int = x->partition_info->bmi[15].mv.as_int;
+ mbmi->second_mv.as_int = x->partition_info->bmi[15].second_mv.as_int;
}
if (cpi->common.frame_type == KEY_FRAME) {
int dx = col_delta[i];
int offset_unextended = dy * cm->mb_cols + dx;
int offset_extended = dy * xd->mode_info_stride + dx;
+ MB_MODE_INFO * mbmi = &xd->mode_info_context->mbmi;
// TODO Many of the index items here can be computed more efficiently!
if (xd->segmentation_enabled) {
// Code to set segment id in xd->mbmi.segment_id
if (cpi->segmentation_map[map_index] <= 3)
- xd->mode_info_context->mbmi.segment_id =
- cpi->segmentation_map[map_index];
+ mbmi->segment_id = cpi->segmentation_map[map_index];
else
- xd->mode_info_context->mbmi.segment_id = 0;
+ mbmi->segment_id = 0;
vp8cx_mb_init_quantizer(cpi, x);
} else
// Set to Segment 0 by default
- xd->mode_info_context->mbmi.segment_id = 0;
+ mbmi->segment_id = 0;
x->active_ptr = cpi->active_map + map_index;
/* force 4x4 transform for mode selection */
- xd->mode_info_context->mbmi.txfm_size = TX_4X4; // TODO IS this right??
+ mbmi->txfm_size = TX_4X4; // TODO IS this right??
cpi->update_context = 0; // TODO Do we need this now??
vp8cx_encode_inter_macroblock(cpi, x, tp,
recon_yoffset, recon_uvoffset, 0);
- seg_id = xd->mode_info_context->mbmi.segment_id;
+ seg_id = mbmi->segment_id;
if (cpi->mb.e_mbd.segmentation_enabled && seg_id == 0) {
cpi->seg0_idx++;
}
int dx = col_delta[i];
int offset_extended = dy * xd->mode_info_stride + dx;
int offset_unextended = dy * cm->mb_cols + dx;
+ MB_MODE_INFO * mbmi = &xd->mode_info_context->mbmi;
if ((mb_row >= cm->mb_rows) || (mb_col >= cm->mb_cols)) {
// MB lies outside frame, move on
if (xd->segmentation_enabled) {
// Code to set segment id in xd->mbmi.segment_id
if (cpi->segmentation_map[map_index] <= 3)
- xd->mode_info_context->mbmi.segment_id =
- cpi->segmentation_map[map_index];
+ mbmi->segment_id = cpi->segmentation_map[map_index];
else
- xd->mode_info_context->mbmi.segment_id = 0;
+ mbmi->segment_id = 0;
vp8cx_mb_init_quantizer(cpi, x);
} else
// Set to Segment 0 by default
- xd->mode_info_context->mbmi.segment_id = 0;
+ mbmi->segment_id = 0;
x->active_ptr = cpi->active_map + map_index;
// Note the encoder may have changed the segment_id
#ifdef MODE_STATS
- y_modes[xd->mode_info_context->mbmi.mode]++;
+ y_modes[mbmi->mode]++;
#endif
} else {
unsigned char *segment_id;
// Note the encoder may have changed the segment_id
#ifdef MODE_STATS
- inter_y_modes[xd->mode_info_context->mbmi.mode]++;
+ inter_y_modes[mbmi->mode]++;
- if (xd->mode_info_context->mbmi.mode == SPLITMV) {
+ if (mbmi->mode == SPLITMV) {
int b;
for (b = 0; b < x->partition_info->count; b++) {
// probabilities. NOTE: At the moment we dont support custom trees
// for the reference frame coding for each segment but this is a
// possible future action.
- segment_id = &xd->mode_info_context->mbmi.segment_id;
+ segment_id = &mbmi->segment_id;
seg_ref_active = segfeature_active(xd, *segment_id, SEG_LVL_REF_FRAME);
if (!seg_ref_active ||
((check_segref(xd, *segment_id, INTRA_FRAME) +
check_segref(xd, *segment_id, GOLDEN_FRAME) +
check_segref(xd, *segment_id, ALTREF_FRAME)) > 1)) {
{
- cpi->count_mb_ref_frame_usage
- [xd->mode_info_context->mbmi.ref_frame]++;
+ cpi->count_mb_ref_frame_usage[mbmi->ref_frame]++;
}
}
// Count of last ref frame 0,0 usage
- if ((xd->mode_info_context->mbmi.mode == ZEROMV) &&
- (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME))
+ if ((mbmi->mode == ZEROMV) && (mbmi->ref_frame == LAST_FRAME))
cpi->inter_zz_count++;
}
MACROBLOCK *x,
TOKENEXTRA **t,
int output_enabled) {
+ MB_MODE_INFO * mbmi = &x->e_mbd.mode_info_context->mbmi;
if ((cpi->oxcf.tuning == VP8_TUNE_SSIM) && output_enabled) {
adjust_act_zbin(cpi, x);
vp8_update_zbin_extra(cpi, x);
/* test code: set transform size based on mode selection */
#if CONFIG_TX16X16
- if (x->e_mbd.mode_info_context->mbmi.mode <= TM_PRED) {
- x->e_mbd.mode_info_context->mbmi.txfm_size = TX_16X16;
+ if (mbmi->mode <= TM_PRED) {
+ mbmi->txfm_size = TX_16X16;
cpi->t16x16_count++;
}
else
#endif
if (cpi->common.txfm_mode == ALLOW_8X8
- && x->e_mbd.mode_info_context->mbmi.mode != I8X8_PRED
- && x->e_mbd.mode_info_context->mbmi.mode != B_PRED) {
- x->e_mbd.mode_info_context->mbmi.txfm_size = TX_8X8;
+ && mbmi->mode != I8X8_PRED
+ && mbmi->mode != B_PRED) {
+ mbmi->txfm_size = TX_8X8;
cpi->t8x8_count++;
} else {
- x->e_mbd.mode_info_context->mbmi.txfm_size = TX_4X4;
+ mbmi->txfm_size = TX_4X4;
cpi->t4x4_count++;
}
- if (x->e_mbd.mode_info_context->mbmi.mode == I8X8_PRED) {
+ if (mbmi->mode == I8X8_PRED) {
vp8_encode_intra8x8mby(IF_RTCD(&cpi->rtcd), x);
vp8_encode_intra8x8mbuv(IF_RTCD(&cpi->rtcd), x);
- } else if (x->e_mbd.mode_info_context->mbmi.mode == B_PRED)
+ } else if (mbmi->mode == B_PRED)
vp8_encode_intra4x4mby(IF_RTCD(&cpi->rtcd), x);
else
vp8_encode_intra16x16mby(IF_RTCD(&cpi->rtcd), x);
- if (x->e_mbd.mode_info_context->mbmi.mode != I8X8_PRED)
+ if (mbmi->mode != I8X8_PRED)
vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
if (output_enabled) {
sum_intra_stats(cpi, x);
vp8_tokenize_mb(cpi, &x->e_mbd, t, 0);
}
-#if CONFIG_NEWBESTREFMV
+#if CONFIG_NEWBESTREFMVvp8_tokenize_mb
else
vp8_tokenize_mb(cpi, &x->e_mbd, t, 1);
#endif
int recon_uvoffset, int output_enabled) {
VP8_COMMON *cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
- unsigned char *segment_id = &xd->mode_info_context->mbmi.segment_id;
+ MB_MODE_INFO * mbmi = &xd->mode_info_context->mbmi;
+ unsigned char *segment_id = &mbmi->segment_id;
int seg_ref_active;
unsigned char ref_pred_flag;
x->skip = 0;
#if CONFIG_SWITCHABLE_INTERP
- vp8_setup_interp_filters(xd, xd->mode_info_context->mbmi.interp_filter, cm);
+ vp8_setup_interp_filters(xd, mbmi->interp_filter, cm);
#endif
if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
// Adjust the zbin based on this MB rate.
// Increase zbin size to suppress noise
cpi->zbin_mode_boost = 0;
if (cpi->zbin_mode_boost_enabled) {
- if (xd->mode_info_context->mbmi.ref_frame != INTRA_FRAME) {
- if (xd->mode_info_context->mbmi.mode == ZEROMV) {
- if (xd->mode_info_context->mbmi.ref_frame != LAST_FRAME)
+ if (mbmi->ref_frame != INTRA_FRAME) {
+ if (mbmi->mode == ZEROMV) {
+ if (mbmi->ref_frame != LAST_FRAME)
cpi->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
else
cpi->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST;
- } else if (xd->mode_info_context->mbmi.mode == SPLITMV)
+ } else if (mbmi->mode == SPLITMV)
cpi->zbin_mode_boost = 0;
else
cpi->zbin_mode_boost = MV_ZBIN_BOOST;
// SET VARIOUS PREDICTION FLAGS
// Did the chosen reference frame match its predicted value.
- ref_pred_flag = ((xd->mode_info_context->mbmi.ref_frame ==
- get_pred_ref(cm, xd)));
+ ref_pred_flag = ((mbmi->ref_frame == get_pred_ref(cm, xd)));
set_pred_flag(xd, PRED_REF, ref_pred_flag);
/* test code: set transform size based on mode selection */
#if CONFIG_TX16X16
- if (x->e_mbd.mode_info_context->mbmi.mode <= TM_PRED ||
- x->e_mbd.mode_info_context->mbmi.mode == NEWMV ||
- x->e_mbd.mode_info_context->mbmi.mode == ZEROMV ||
- x->e_mbd.mode_info_context->mbmi.mode == NEARMV ||
- x->e_mbd.mode_info_context->mbmi.mode == NEARESTMV) {
- x->e_mbd.mode_info_context->mbmi.txfm_size = TX_16X16;
+ if (mbmi->mode <= TM_PRED || mbmi->mode == NEWMV || mbmi->mode == ZEROMV ||
+ mbmi->mode == NEARMV || mbmi->mode == NEARESTMV) {
+ mbmi->txfm_size = TX_16X16;
cpi->t16x16_count++;
} else
#endif
if (cpi->common.txfm_mode == ALLOW_8X8
- && x->e_mbd.mode_info_context->mbmi.mode != I8X8_PRED
- && x->e_mbd.mode_info_context->mbmi.mode != B_PRED
- && x->e_mbd.mode_info_context->mbmi.mode != SPLITMV) {
- x->e_mbd.mode_info_context->mbmi.txfm_size = TX_8X8;
+ && mbmi->mode != I8X8_PRED
+ && mbmi->mode != B_PRED
+ && mbmi->mode != SPLITMV) {
+ mbmi->txfm_size = TX_8X8;
cpi->t8x8_count++;
} else {
- x->e_mbd.mode_info_context->mbmi.txfm_size = TX_4X4;
+ mbmi->txfm_size = TX_4X4;
cpi->t4x4_count++;
}
- if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) {
- if (xd->mode_info_context->mbmi.mode == B_PRED) {
+ if (mbmi->ref_frame == INTRA_FRAME) {
+ if (mbmi->mode == B_PRED) {
vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
vp8_encode_intra4x4mby(IF_RTCD(&cpi->rtcd), x);
- } else if (xd->mode_info_context->mbmi.mode == I8X8_PRED) {
+ } else if (mbmi->mode == I8X8_PRED) {
vp8_encode_intra8x8mby(IF_RTCD(&cpi->rtcd), x);
vp8_encode_intra8x8mbuv(IF_RTCD(&cpi->rtcd), x);
} else {
} else {
int ref_fb_idx;
- if (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME)
+ if (mbmi->ref_frame == LAST_FRAME)
ref_fb_idx = cpi->common.lst_fb_idx;
- else if (xd->mode_info_context->mbmi.ref_frame == GOLDEN_FRAME)
+ else if (mbmi->ref_frame == GOLDEN_FRAME)
ref_fb_idx = cpi->common.gld_fb_idx;
else
ref_fb_idx = cpi->common.alt_fb_idx;
xd->pre.u_buffer = cpi->common.yv12_fb[ref_fb_idx].u_buffer + recon_uvoffset;
xd->pre.v_buffer = cpi->common.yv12_fb[ref_fb_idx].v_buffer + recon_uvoffset;
- if (xd->mode_info_context->mbmi.second_ref_frame) {
+ if (mbmi->second_ref_frame) {
int second_ref_fb_idx;
- if (xd->mode_info_context->mbmi.second_ref_frame == LAST_FRAME)
+ if (mbmi->second_ref_frame == LAST_FRAME)
second_ref_fb_idx = cpi->common.lst_fb_idx;
- else if (xd->mode_info_context->mbmi.second_ref_frame == GOLDEN_FRAME)
+ else if (mbmi->second_ref_frame == GOLDEN_FRAME)
second_ref_fb_idx = cpi->common.gld_fb_idx;
else
second_ref_fb_idx = cpi->common.alt_fb_idx;
// Clear mb_skip_coeff if mb_no_coeff_skip is not set
if (!cpi->common.mb_no_coeff_skip)
- xd->mode_info_context->mbmi.mb_skip_coeff = 0;
+ mbmi->mb_skip_coeff = 0;
} else {
vp8_build_1st_inter16x16_predictors_mb(xd, xd->dst.y_buffer,
#ifdef ENC_DEBUG
if (enc_debug) {
int i;
- printf("Segment=%d [%d, %d]: %d %d:\n", x->e_mbd.mode_info_context->mbmi.segment_id, mb_col_debug, mb_row_debug, xd->mb_to_left_edge, xd->mb_to_top_edge);
+ printf("Segment=%d [%d, %d]: %d %d:\n", mbmi->segment_id, mb_col_debug,
+ mb_row_debug, xd->mb_to_left_edge, xd->mb_to_top_edge);
for (i = 0; i < 400; i++) {
printf("%3d ", xd->qcoeff[i]);
if (i % 16 == 15) printf("\n");
(x->e_mbd.mode_info_context - cpi->common.mode_info_stride)->mbmi.mb_skip_coeff :
0;
if (cpi->common.mb_no_coeff_skip) {
- xd->mode_info_context->mbmi.mb_skip_coeff = 1;
+ mbmi->mb_skip_coeff = 1;
if (output_enabled)
cpi->skip_true_count[mb_skip_context]++;
vp8_fix_contexts(xd);
} else {
vp8_stuff_mb(cpi, xd, t, !output_enabled);
- xd->mode_info_context->mbmi.mb_skip_coeff = 0;
+ mbmi->mb_skip_coeff = 0;
if (output_enabled)
cpi->skip_false_count[mb_skip_context]++;
}
int pt; /* surrounding block/prev coef predictor */
int const *scan, *band;
short *qcoeff_ptr = b->qcoeff;
+ MB_MODE_INFO * mbmi = &mb->e_mbd.mode_info_context->mbmi;
- int segment_id = mb->e_mbd.mode_info_context->mbmi.segment_id;
+ int segment_id = mbmi->segment_id;
switch (tx_type) {
case TX_4X4:
#if CONFIG_HYBRIDTRANSFORM
{
int active_ht = (mb->q_index < ACTIVE_HT) &&
- (mb->e_mbd.mode_info_context->mbmi.mode_rdopt == B_PRED);
+ (mbmi->mode_rdopt == B_PRED);
if((type == PLANE_TYPE_Y_WITH_DC) && active_ht) {
switch (b->bmi.as_mode.tx_type) {
static int vp8_rdcost_mby(MACROBLOCK *mb) {
int cost = 0;
int b;
- MACROBLOCKD *x = &mb->e_mbd;
+ MACROBLOCKD *xd = &mb->e_mbd;
ENTROPY_CONTEXT_PLANES t_above, t_left;
ENTROPY_CONTEXT *ta;
ENTROPY_CONTEXT *tl;
- vpx_memcpy(&t_above, mb->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
- vpx_memcpy(&t_left, mb->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
+ vpx_memcpy(&t_above, xd->above_context, sizeof(ENTROPY_CONTEXT_PLANES));
+ vpx_memcpy(&t_left, xd->left_context, sizeof(ENTROPY_CONTEXT_PLANES));
ta = (ENTROPY_CONTEXT *)&t_above;
tl = (ENTROPY_CONTEXT *)&t_left;
for (b = 0; b < 16; b++)
- cost += cost_coeffs(mb, x->block + b, PLANE_TYPE_Y_NO_DC,
+ cost += cost_coeffs(mb, xd->block + b, PLANE_TYPE_Y_NO_DC,
ta + vp8_block2above[b], tl + vp8_block2left[b],
TX_4X4);
- cost += cost_coeffs(mb, x->block + 24, PLANE_TYPE_Y2,
+ cost += cost_coeffs(mb, xd->block + 24, PLANE_TYPE_Y2,
ta + vp8_block2above[24], tl + vp8_block2left[24],
TX_4X4);
int *Distortion,
const VP8_ENCODER_RTCD *rtcd) {
int b;
- MACROBLOCKD *const x = &mb->e_mbd;
+ MACROBLOCKD *const xd = &mb->e_mbd;
BLOCK *const mb_y2 = mb->block + 24;
- BLOCKD *const x_y2 = x->block + 24;
+ BLOCKD *const x_y2 = xd->block + 24;
short *Y2DCPtr = mb_y2->src_diff;
BLOCK *beptr;
int d;
ENCODEMB_INVOKE(&rtcd->encodemb, submby)(
mb->src_diff,
*(mb->block[0].base_src),
- mb->e_mbd.predictor,
+ xd->predictor,
mb->block[0].src_stride);
// Fdct and building the 2nd order block
// Quantization
for (b = 0; b < 16; b++) {
- mb->quantize_b(&mb->block[b], &mb->e_mbd.block[b]);
+ mb->quantize_b(&mb->block[b], &xd->block[b]);
}
// DC predication and Quantization of 2nd Order block
static int vp8_rdcost_mby_8x8(MACROBLOCK *mb) {
int cost = 0;
int b;
- MACROBLOCKD *x = &mb->e_mbd;
+ MACROBLOCKD *xd = &mb->e_mbd;
ENTROPY_CONTEXT_PLANES t_above, t_left;
ENTROPY_CONTEXT *ta;
ENTROPY_CONTEXT *tl;
- vpx_memcpy(&t_above, mb->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
- vpx_memcpy(&t_left, mb->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
+ vpx_memcpy(&t_above,xd->above_context, sizeof(ENTROPY_CONTEXT_PLANES));
+ vpx_memcpy(&t_left, xd->left_context, sizeof(ENTROPY_CONTEXT_PLANES));
ta = (ENTROPY_CONTEXT *)&t_above;
tl = (ENTROPY_CONTEXT *)&t_left;
for (b = 0; b < 16; b += 4)
- cost += cost_coeffs(mb, x->block + b, PLANE_TYPE_Y_NO_DC,
+ cost += cost_coeffs(mb, xd->block + b, PLANE_TYPE_Y_NO_DC,
ta + vp8_block2above_8x8[b], tl + vp8_block2left_8x8[b],
TX_8X8);
- cost += cost_coeffs_2x2(mb, x->block + 24, PLANE_TYPE_Y2,
+ cost += cost_coeffs_2x2(mb, xd->block + 24, PLANE_TYPE_Y2,
ta + vp8_block2above[24], tl + vp8_block2left[24]);
return cost;
}
int *Rate,
int *Distortion,
const VP8_ENCODER_RTCD *rtcd) {
- MACROBLOCKD *const x = &mb->e_mbd;
+ MACROBLOCKD *const xd = &mb->e_mbd;
BLOCK *const mb_y2 = mb->block + 24;
- BLOCKD *const x_y2 = x->block + 24;
+ BLOCKD *const x_y2 = xd->block + 24;
int d;
ENCODEMB_INVOKE(&rtcd->encodemb, submby)(
mb->src_diff,
*(mb->block[0].base_src),
- mb->e_mbd.predictor,
+ xd->predictor,
mb->block[0].src_stride);
vp8_transform_mby_8x8(mb);
mb->coeff[64] = 0;
mb->coeff[128] = 0;
mb->coeff[192] = 0;
- mb->e_mbd.dqcoeff[0] = 0;
- mb->e_mbd.dqcoeff[64] = 0;
- mb->e_mbd.dqcoeff[128] = 0;
- mb->e_mbd.dqcoeff[192] = 0;
+ xd->dqcoeff[0] = 0;
+ xd->dqcoeff[64] = 0;
+ xd->dqcoeff[128] = 0;
+ xd->dqcoeff[192] = 0;
d = ENCODEMB_INVOKE(&rtcd->encodemb, mberr)(mb, 0);
d += ENCODEMB_INVOKE(&rtcd->encodemb, berr)(mb_y2->coeff, x_y2->dqcoeff, 16);
#if CONFIG_TX16X16
static int vp8_rdcost_mby_16x16(MACROBLOCK *mb) {
int cost;
- MACROBLOCKD *x = &mb->e_mbd;
+ MACROBLOCKD *xd = &mb->e_mbd;
ENTROPY_CONTEXT_PLANES t_above, t_left;
ENTROPY_CONTEXT *ta, *tl;
- vpx_memcpy(&t_above, mb->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
- vpx_memcpy(&t_left, mb->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
+ vpx_memcpy(&t_above, xd->above_context, sizeof(ENTROPY_CONTEXT_PLANES));
+ vpx_memcpy(&t_left, xd->left_context, sizeof(ENTROPY_CONTEXT_PLANES));
ta = (ENTROPY_CONTEXT *)&t_above;
tl = (ENTROPY_CONTEXT *)&t_left;
- cost = cost_coeffs(mb, x->block, PLANE_TYPE_Y_WITH_DC, ta, tl, TX_16X16);
+ cost = cost_coeffs(mb, xd->block, PLANE_TYPE_Y_WITH_DC, ta, tl, TX_16X16);
return cost;
}
-
static void macro_block_yrd_16x16(MACROBLOCK *mb, int *Rate, int *Distortion,
const VP8_ENCODER_RTCD *rtcd) {
int d;
int *bmode_costs;
if (update_contexts) {
- ta = (ENTROPY_CONTEXT *)mb->e_mbd.above_context;
- tl = (ENTROPY_CONTEXT *)mb->e_mbd.left_context;
+ ta = (ENTROPY_CONTEXT *)xd->above_context;
+ tl = (ENTROPY_CONTEXT *)xd->left_context;
} else {
- vpx_memcpy(&t_above, mb->e_mbd.above_context,
+ vpx_memcpy(&t_above, xd->above_context,
sizeof(ENTROPY_CONTEXT_PLANES));
- vpx_memcpy(&t_left, mb->e_mbd.left_context,
+ vpx_memcpy(&t_left, xd->left_context,
sizeof(ENTROPY_CONTEXT_PLANES));
ta = (ENTROPY_CONTEXT *)&t_above;
#endif
int UNINITIALIZED_IS_SAFE(r), UNINITIALIZED_IS_SAFE(ry), UNINITIALIZED_IS_SAFE(d);
- if (mb->e_mbd.frame_type == KEY_FRAME) {
+ if (xd->frame_type == KEY_FRAME) {
const B_PREDICTION_MODE A = above_block_mode(mic, i, mis);
const B_PREDICTION_MODE L = left_block_mode(mic, i);
MB_PREDICTION_MODE mode2;
MB_PREDICTION_MODE UNINITIALIZED_IS_SAFE(mode2_selected);
#endif
+ MB_MODE_INFO * mbmi = &x->e_mbd.mode_info_context->mbmi;
int rate, ratey;
int distortion;
int64_t best_rd = INT64_MAX;
// Y Search for 16x16 intra prediction mode
for (mode = DC_PRED; mode <= TM_PRED; mode++) {
- x->e_mbd.mode_info_context->mbmi.mode = mode;
+ mbmi->mode = mode;
#if CONFIG_COMP_INTRA_PRED
for (mode2 = DC_PRED - 1; mode2 != TM_PRED + 1; mode2++) {
- x->e_mbd.mode_info_context->mbmi.second_mode = mode2;
+ mbmi->second_mode = mode2;
if (mode2 == (MB_PREDICTION_MODE)(DC_PRED - 1)) {
#endif
RECON_INVOKE(&cpi->common.rtcd.recon, build_intra_predictors_mby)
#if CONFIG_COMP_INTRA_PRED
} else {
continue; // i.e. disable for now
- RECON_INVOKE(&cpi->common.rtcd.recon, build_comp_intra_predictors_mby)(&x->e_mbd);
+ RECON_INVOKE(&cpi->common.rtcd.recon, build_comp_intra_predictors_mby)
+ (&x->e_mbd);
}
#endif
#endif
// FIXME add compoundmode cost
// FIXME add rate for mode2
- rate = ratey + x->mbmode_cost[x->e_mbd.frame_type]
- [x->e_mbd.mode_info_context->mbmi.mode];
+ rate = ratey + x->mbmode_cost[x->e_mbd.frame_type][mbmi->mode];
this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
#endif
}
- x->e_mbd.mode_info_context->mbmi.mode = mode_selected;
+ mbmi->mode = mode_selected;
#if CONFIG_COMP_INTRA_PRED
- x->e_mbd.mode_info_context->mbmi.second_mode = mode2_selected;
+ mbmi->second_mode = mode2_selected;
#endif
return best_rd;
}
int64_t best_rd = INT64_MAX;
int distortion, rate = 0;
BLOCK *be = x->block + ib;
- BLOCKD *b = x->e_mbd.block + ib;
+ BLOCKD *b = xd->block + ib;
ENTROPY_CONTEXT ta0, ta1, besta0 = 0, besta1 = 0;
ENTROPY_CONTEXT tl0, tl1, bestl0 = 0, bestl1 = 0;
ENTROPY_CONTEXT *ta, *tl;
int *i8x8mode_costs;
- vpx_memcpy(&t_above, mb->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
- vpx_memcpy(&t_left, mb->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
+ vpx_memcpy(&t_above, xd->above_context, sizeof(ENTROPY_CONTEXT_PLANES));
+ vpx_memcpy(&t_left, xd->left_context, sizeof(ENTROPY_CONTEXT_PLANES));
ta = (ENTROPY_CONTEXT *)&t_above;
tl = (ENTROPY_CONTEXT *)&t_left;
static int rd_cost_mbuv(MACROBLOCK *mb) {
int b;
int cost = 0;
- MACROBLOCKD *x = &mb->e_mbd;
+ MACROBLOCKD *xd = &mb->e_mbd;
ENTROPY_CONTEXT_PLANES t_above, t_left;
ENTROPY_CONTEXT *ta, *tl;
- vpx_memcpy(&t_above, mb->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
- vpx_memcpy(&t_left, mb->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
+ vpx_memcpy(&t_above, xd->above_context, sizeof(ENTROPY_CONTEXT_PLANES));
+ vpx_memcpy(&t_left, xd->left_context, sizeof(ENTROPY_CONTEXT_PLANES));
ta = (ENTROPY_CONTEXT *)&t_above;
tl = (ENTROPY_CONTEXT *)&t_left;
for (b = 16; b < 24; b++)
- cost += cost_coeffs(mb, x->block + b, PLANE_TYPE_UV,
+ cost += cost_coeffs(mb, xd->block + b, PLANE_TYPE_UV,
ta + vp8_block2above[b], tl + vp8_block2left[b],
TX_4X4);
static int rd_cost_mbuv_8x8(MACROBLOCK *mb) {
int b;
int cost = 0;
- MACROBLOCKD *x = &mb->e_mbd;
+ MACROBLOCKD *xd = &mb->e_mbd;
ENTROPY_CONTEXT_PLANES t_above, t_left;
ENTROPY_CONTEXT *ta, *tl;
- vpx_memcpy(&t_above, mb->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
- vpx_memcpy(&t_left, mb->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
+ vpx_memcpy(&t_above, xd->above_context, sizeof(ENTROPY_CONTEXT_PLANES));
+ vpx_memcpy(&t_left, xd->left_context, sizeof(ENTROPY_CONTEXT_PLANES));
ta = (ENTROPY_CONTEXT *)&t_above;
tl = (ENTROPY_CONTEXT *)&t_left;
for (b = 16; b < 24; b += 4)
- cost += cost_coeffs(mb, x->block + b, PLANE_TYPE_UV,
+ cost += cost_coeffs(mb, xd->block + b, PLANE_TYPE_UV,
ta + vp8_block2above_8x8[b],
tl + vp8_block2left_8x8[b], TX_8X8);
MB_PREDICTION_MODE mode2;
MB_PREDICTION_MODE UNINITIALIZED_IS_SAFE(mode2_selected);
#endif
+ MB_MODE_INFO * mbmi = &x->e_mbd.mode_info_context->mbmi;
int64_t best_rd = INT64_MAX;
int UNINITIALIZED_IS_SAFE(d), UNINITIALIZED_IS_SAFE(r);
int rate_to;
int distortion;
int64_t this_rd;
- x->e_mbd.mode_info_context->mbmi.uv_mode = mode;
+ mbmi->uv_mode = mode;
#if CONFIG_COMP_INTRA_PRED
- x->e_mbd.mode_info_context->mbmi.second_uv_mode = mode2;
+ mbmi->second_uv_mode = mode2;
if (mode2 == (MB_PREDICTION_MODE)(DC_PRED - 1)) {
#endif
RECON_INVOKE(&cpi->rtcd.common->recon, build_intra_predictors_mbuv)
rate_to = rd_cost_mbuv(x);
rate = rate_to
- + x->intra_uv_mode_cost[x->e_mbd.frame_type]
- [x->e_mbd.mode_info_context->mbmi.uv_mode];
+ + x->intra_uv_mode_cost[x->e_mbd.frame_type][mbmi->uv_mode];
distortion = ENCODEMB_INVOKE(&cpi->rtcd.encodemb, mbuverr)(x) / 4;
*rate = r;
*distortion = d;
- x->e_mbd.mode_info_context->mbmi.uv_mode = mode_selected;
+ mbmi->uv_mode = mode_selected;
#if CONFIG_COMP_INTRA_PRED
- x->e_mbd.mode_info_context->mbmi.second_uv_mode = mode2_selected;
+ mbmi->second_uv_mode = mode2_selected;
#endif
}
int *distortion) {
MB_PREDICTION_MODE mode;
MB_PREDICTION_MODE UNINITIALIZED_IS_SAFE(mode_selected);
+ MB_MODE_INFO * mbmi = &x->e_mbd.mode_info_context->mbmi;
int64_t best_rd = INT64_MAX;
int UNINITIALIZED_IS_SAFE(d), UNINITIALIZED_IS_SAFE(r);
int rate_to;
int distortion;
int64_t this_rd;
- x->e_mbd.mode_info_context->mbmi.uv_mode = mode;
+ mbmi->uv_mode = mode;
RECON_INVOKE(&cpi->rtcd.common->recon, build_intra_predictors_mbuv)
(&x->e_mbd);
ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), submbuv)(x->src_diff,
vp8_quantize_mbuv_8x8(x);
rate_to = rd_cost_mbuv_8x8(x);
- rate = rate_to + x->intra_uv_mode_cost[x->e_mbd.frame_type]
- [x->e_mbd.mode_info_context->mbmi.uv_mode];
+ rate = rate_to + x->intra_uv_mode_cost[x->e_mbd.frame_type][mbmi->uv_mode];
distortion = ENCODEMB_INVOKE(&cpi->rtcd.encodemb, mbuverr)(x) / 4;
this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
}
*rate = r;
*distortion = d;
- x->e_mbd.mode_info_context->mbmi.uv_mode = mode_selected;
+ mbmi->uv_mode = mode_selected;
}
int vp8_cost_mv_ref(VP8_COMP *cpi,
int_mv *second_best_ref_mv, int *mvcost[2]) {
MACROBLOCKD *const xd = & x->e_mbd;
MODE_INFO *const mic = xd->mode_info_context;
+ MB_MODE_INFO * mbmi = &x->e_mbd.mode_info_context->mbmi;
const int mis = xd->mode_info_stride;
int i, cost = 0, thismvcost = 0;
// is when we are on a new label (jbb May 08, 2007)
switch (m = this_mode) {
case NEW4X4 :
- if (xd->mode_info_context->mbmi.second_ref_frame) {
- this_mv->as_int = seg_mvs[xd->mode_info_context->mbmi.ref_frame - 1].as_int;
- this_second_mv->as_int = seg_mvs[xd->mode_info_context->mbmi.second_ref_frame - 1].as_int;
+ if (mbmi->second_ref_frame) {
+ this_mv->as_int = seg_mvs[mbmi->ref_frame - 1].as_int;
+ this_second_mv->as_int =
+ seg_mvs[mbmi->second_ref_frame - 1].as_int;
}
thismvcost = vp8_mv_bit_cost(this_mv, best_ref_mv, mvcost,
102, xd->allow_high_precision_mv);
- if (xd->mode_info_context->mbmi.second_ref_frame) {
+ if (mbmi->second_ref_frame) {
thismvcost += vp8_mv_bit_cost(this_second_mv, second_best_ref_mv,
mvcost, 102,
xd->allow_high_precision_mv);
break;
case LEFT4X4:
this_mv->as_int = col ? d[-1].bmi.as_mv.first.as_int : left_block_mv(mic, i);
- if (xd->mode_info_context->mbmi.second_ref_frame)
+ if (mbmi->second_ref_frame)
this_second_mv->as_int = col ? d[-1].bmi.as_mv.second.as_int : left_block_second_mv(mic, i);
break;
case ABOVE4X4:
this_mv->as_int = row ? d[-4].bmi.as_mv.first.as_int : above_block_mv(mic, i, mis);
- if (xd->mode_info_context->mbmi.second_ref_frame)
+ if (mbmi->second_ref_frame)
this_second_mv->as_int = row ? d[-4].bmi.as_mv.second.as_int : above_block_second_mv(mic, i, mis);
break;
case ZERO4X4:
this_mv->as_int = 0;
- if (xd->mode_info_context->mbmi.second_ref_frame)
+ if (mbmi->second_ref_frame)
this_second_mv->as_int = 0;
break;
default:
left_mv.as_int = col ? d[-1].bmi.as_mv.first.as_int :
left_block_mv(mic, i);
- if (xd->mode_info_context->mbmi.second_ref_frame)
+ if (mbmi->second_ref_frame)
left_second_mv.as_int = col ? d[-1].bmi.as_mv.second.as_int :
left_block_second_mv(mic, i);
if (left_mv.as_int == this_mv->as_int &&
- (!xd->mode_info_context->mbmi.second_ref_frame ||
+ (!mbmi->second_ref_frame ||
left_second_mv.as_int == this_second_mv->as_int))
m = LEFT4X4;
}
}
d->bmi.as_mv.first.as_int = this_mv->as_int;
- if (xd->mode_info_context->mbmi.second_ref_frame)
+ if (mbmi->second_ref_frame)
d->bmi.as_mv.second.as_int = this_second_mv->as_int;
x->partition_info->bmi[i].mode = m;
x->partition_info->bmi[i].mv.as_int = this_mv->as_int;
- if (xd->mode_info_context->mbmi.second_ref_frame)
+ if (mbmi->second_ref_frame)
x->partition_info->bmi[i].second_mv.as_int = this_second_mv->as_int;
}
int which_label, ENTROPY_CONTEXT *ta,
ENTROPY_CONTEXT *tl) {
int b, cost = 0;
- MACROBLOCKD *x = &mb->e_mbd;
+ MACROBLOCKD *xd = &mb->e_mbd;
for (b = 0; b < 16; b++)
if (labels[ b] == which_label)
- cost += cost_coeffs(mb, x->block + b, PLANE_TYPE_Y_WITH_DC,
+ cost += cost_coeffs(mb, xd->block + b, PLANE_TYPE_Y_WITH_DC,
ta + vp8_block2above[b],
tl + vp8_block2left[b], TX_4X4);
int const *labels;
int br = 0, bd = 0;
B_PREDICTION_MODE this_mode;
-
+ MB_MODE_INFO * mbmi = &x->e_mbd.mode_info_context->mbmi;
int label_count;
int64_t this_segment_rd = 0;
tl_s = (ENTROPY_CONTEXT *)&t_left_s;
// motion search for newmv (single predictor case only)
- if (!x->e_mbd.mode_info_context->mbmi.second_ref_frame && this_mode == NEW4X4) {
+ if (!mbmi->second_ref_frame && this_mode == NEW4X4) {
int sseshift, n;
int step_param = 0;
int further_steps;
&distortion, &sse);
// safe motion search result for use in compound prediction
- seg_mvs[i][x->e_mbd.mode_info_context->mbmi.ref_frame - 1].as_int = mode_mv[NEW4X4].as_int;
+ seg_mvs[i][mbmi->ref_frame - 1].as_int = mode_mv[NEW4X4].as_int;
}
} /* NEW4X4 */
- else if (x->e_mbd.mode_info_context->mbmi.second_ref_frame && this_mode == NEW4X4) {
+ else if (mbmi->second_ref_frame && this_mode == NEW4X4) {
// motion search not completed? Then skip newmv for this block with comppred
- if (seg_mvs[i][x->e_mbd.mode_info_context->mbmi.second_ref_frame - 1].as_int == INVALID_MV ||
- seg_mvs[i][x->e_mbd.mode_info_context->mbmi.ref_frame - 1].as_int == INVALID_MV) {
+ if (seg_mvs[i][mbmi->second_ref_frame - 1].as_int == INVALID_MV ||
+ seg_mvs[i][mbmi->ref_frame - 1].as_int == INVALID_MV) {
continue;
}
}
rate = labels2mode(x, labels, i, this_mode, &mode_mv[this_mode],
- &second_mode_mv[this_mode], seg_mvs[i], bsi->ref_mv, bsi->second_ref_mv, XMVCOST);
+ &second_mode_mv[this_mode], seg_mvs[i],
+ bsi->ref_mv, bsi->second_ref_mv, XMVCOST);
// Trap vectors that reach beyond the UMV borders
- if (((mode_mv[this_mode].as_mv.row >> 3) < x->mv_row_min) || ((mode_mv[this_mode].as_mv.row >> 3) > x->mv_row_max) ||
- ((mode_mv[this_mode].as_mv.col >> 3) < x->mv_col_min) || ((mode_mv[this_mode].as_mv.col >> 3) > x->mv_col_max)) {
+ if (((mode_mv[this_mode].as_mv.row >> 3) < x->mv_row_min) ||
+ ((mode_mv[this_mode].as_mv.row >> 3) > x->mv_row_max) ||
+ ((mode_mv[this_mode].as_mv.col >> 3) < x->mv_col_min) ||
+ ((mode_mv[this_mode].as_mv.col >> 3) > x->mv_col_max)) {
continue;
}
- if (x->e_mbd.mode_info_context->mbmi.second_ref_frame &&
+ if (mbmi->second_ref_frame &&
mv_check_bounds(x, &second_mode_mv[this_mode]))
continue;
BLOCKD *bd = &x->e_mbd.block[i];
bsi->mvs[i].as_mv = x->partition_info->bmi[i].mv.as_mv;
- if (x->e_mbd.mode_info_context->mbmi.second_ref_frame)
+ if (mbmi->second_ref_frame)
bsi->second_mvs[i].as_mv = x->partition_info->bmi[i].second_mv.as_mv;
bsi->modes[i] = x->partition_info->bmi[i].mode;
bsi->eobs[i] = bd->eob;
int_mv seg_mvs[BLOCK_MAX_SEGMENTS - 1][16 /* n_blocks */][MAX_REF_FRAMES - 1]) {
int i;
BEST_SEG_INFO bsi;
+ MB_MODE_INFO * mbmi = &x->e_mbd.mode_info_context->mbmi;
vpx_memset(&bsi, 0, sizeof(bsi));
BLOCKD *bd = &x->e_mbd.block[i];
bd->bmi.as_mv.first.as_int = bsi.mvs[i].as_int;
- if (x->e_mbd.mode_info_context->mbmi.second_ref_frame)
+ if (mbmi->second_ref_frame)
bd->bmi.as_mv.second.as_int = bsi.second_mvs[i].as_int;
bd->eob = bsi.eobs[i];
}
*returnyrate = bsi.segment_yrate;
/* save partitions */
- x->e_mbd.mode_info_context->mbmi.partitioning = bsi.segment_num;
+ mbmi->partitioning = bsi.segment_num;
x->partition_info->count = vp8_mbsplit_count[bsi.segment_num];
for (i = 0; i < x->partition_info->count; i++) {
x->partition_info->bmi[i].mode = bsi.modes[j];
x->partition_info->bmi[i].mv.as_mv = bsi.mvs[j].as_mv;
- if (x->e_mbd.mode_info_context->mbmi.second_ref_frame)
+ if (mbmi->second_ref_frame)
x->partition_info->bmi[i].second_mv.as_mv = bsi.second_mvs[j].as_mv;
}
/*
- * used to set x->e_mbd.mode_info_context->mbmi.mv.as_int
+ * used to set mbmi->mv.as_int
*/
x->partition_info->bmi[15].mv.as_int = bsi.mvs[15].as_int;
- if (x->e_mbd.mode_info_context->mbmi.second_ref_frame)
+ if (mbmi->second_ref_frame)
x->partition_info->bmi[15].second_mv.as_int = bsi.second_mvs[15].as_int;
return bsi.segment_rd;
void rd_update_mvcount(VP8_COMP *cpi, MACROBLOCK *x,
int_mv *best_ref_mv, int_mv *second_best_ref_mv) {
- if (x->e_mbd.mode_info_context->mbmi.mode == SPLITMV) {
+
+ MB_MODE_INFO * mbmi = &x->e_mbd.mode_info_context->mbmi;
+
+ if (mbmi->mode == SPLITMV) {
int i;
for (i = 0; i < x->partition_info->count; i++) {
- best_ref_mv->as_mv.row)]++;
cpi->MVcount_hp[1][mv_max_hp + (x->partition_info->bmi[i].mv.as_mv.col
- best_ref_mv->as_mv.col)]++;
- if (x->e_mbd.mode_info_context->mbmi.second_ref_frame) {
+ if (mbmi->second_ref_frame) {
cpi->MVcount_hp[0][mv_max_hp + (x->partition_info->bmi[i].second_mv.as_mv.row
- second_best_ref_mv->as_mv.row)]++;
cpi->MVcount_hp[1][mv_max_hp + (x->partition_info->bmi[i].second_mv.as_mv.col
- best_ref_mv->as_mv.row) >> 1)]++;
cpi->MVcount[1][mv_max + ((x->partition_info->bmi[i].mv.as_mv.col
- best_ref_mv->as_mv.col) >> 1)]++;
- if (x->e_mbd.mode_info_context->mbmi.second_ref_frame) {
+ if (mbmi->second_ref_frame) {
cpi->MVcount[0][mv_max + ((x->partition_info->bmi[i].second_mv.as_mv.row
- second_best_ref_mv->as_mv.row) >> 1)]++;
cpi->MVcount[1][mv_max + ((x->partition_info->bmi[i].second_mv.as_mv.col
}
}
}
- } else if (x->e_mbd.mode_info_context->mbmi.mode == NEWMV) {
+ } else if (mbmi->mode == NEWMV) {
if (x->e_mbd.allow_high_precision_mv) {
- cpi->MVcount_hp[0][mv_max_hp + (x->e_mbd.mode_info_context->mbmi.mv.as_mv.row
+ cpi->MVcount_hp[0][mv_max_hp + (mbmi->mv.as_mv.row
- best_ref_mv->as_mv.row)]++;
- cpi->MVcount_hp[1][mv_max_hp + (x->e_mbd.mode_info_context->mbmi.mv.as_mv.col
+ cpi->MVcount_hp[1][mv_max_hp + (mbmi->mv.as_mv.col
- best_ref_mv->as_mv.col)]++;
- if (x->e_mbd.mode_info_context->mbmi.second_ref_frame) {
- cpi->MVcount_hp[0][mv_max_hp + (x->e_mbd.mode_info_context->mbmi.second_mv.as_mv.row
+ if (mbmi->second_ref_frame) {
+ cpi->MVcount_hp[0][mv_max_hp + (mbmi->second_mv.as_mv.row
- second_best_ref_mv->as_mv.row)]++;
- cpi->MVcount_hp[1][mv_max_hp + (x->e_mbd.mode_info_context->mbmi.second_mv.as_mv.col
+ cpi->MVcount_hp[1][mv_max_hp + (mbmi->second_mv.as_mv.col
- second_best_ref_mv->as_mv.col)]++;
}
} else
{
- cpi->MVcount[0][mv_max + ((x->e_mbd.mode_info_context->mbmi.mv.as_mv.row
+ cpi->MVcount[0][mv_max + ((mbmi->mv.as_mv.row
- best_ref_mv->as_mv.row) >> 1)]++;
- cpi->MVcount[1][mv_max + ((x->e_mbd.mode_info_context->mbmi.mv.as_mv.col
+ cpi->MVcount[1][mv_max + ((mbmi->mv.as_mv.col
- best_ref_mv->as_mv.col) >> 1)]++;
- if (x->e_mbd.mode_info_context->mbmi.second_ref_frame) {
- cpi->MVcount[0][mv_max + ((x->e_mbd.mode_info_context->mbmi.second_mv.as_mv.row
+ if (mbmi->second_ref_frame) {
+ cpi->MVcount[0][mv_max + ((mbmi->second_mv.as_mv.row
- second_best_ref_mv->as_mv.row) >> 1)]++;
- cpi->MVcount[1][mv_max + ((x->e_mbd.mode_info_context->mbmi.second_mv.as_mv.col
+ cpi->MVcount[1][mv_max + ((mbmi->second_mv.as_mv.col
- second_best_ref_mv->as_mv.col) >> 1)]++;
}
}
MACROBLOCKD *xd = &x->e_mbd;
for (i = 0; i < 4; i++) {
int ib = vp8_i8x8_block[i];
- x->e_mbd.mode_info_context->bmi[ib + 0].as_mode.first = modes[0][i];
- x->e_mbd.mode_info_context->bmi[ib + 1].as_mode.first = modes[0][i];
- x->e_mbd.mode_info_context->bmi[ib + 4].as_mode.first = modes[0][i];
- x->e_mbd.mode_info_context->bmi[ib + 5].as_mode.first = modes[0][i];
+ xd->mode_info_context->bmi[ib + 0].as_mode.first = modes[0][i];
+ xd->mode_info_context->bmi[ib + 1].as_mode.first = modes[0][i];
+ xd->mode_info_context->bmi[ib + 4].as_mode.first = modes[0][i];
+ xd->mode_info_context->bmi[ib + 5].as_mode.first = modes[0][i];
#if CONFIG_COMP_INTRA_PRED
- x->e_mbd.mode_info_context->bmi[ib + 0].as_mode.second = modes[1][i];
- x->e_mbd.mode_info_context->bmi[ib + 1].as_mode.second = modes[1][i];
- x->e_mbd.mode_info_context->bmi[ib + 4].as_mode.second = modes[1][i];
- x->e_mbd.mode_info_context->bmi[ib + 5].as_mode.second = modes[1][i];
+ xd->mode_info_context->bmi[ib + 0].as_mode.second = modes[1][i];
+ xd->mode_info_context->bmi[ib + 1].as_mode.second = modes[1][i];
+ xd->mode_info_context->bmi[ib + 4].as_mode.second = modes[1][i];
+ xd->mode_info_context->bmi[ib + 5].as_mode.second = modes[1][i];
#endif
// printf("%d,%d,%d,%d %d,%d,%d,%d\n",
// modes[0][0], modes[0][1], modes[0][2], modes[0][3],
int_mv best_ref_mv, second_best_ref_mv;
int_mv mode_mv[MB_MODE_COUNT];
MB_PREDICTION_MODE this_mode;
+ MB_MODE_INFO * mbmi = &x->e_mbd.mode_info_context->mbmi;
int i, best_mode_index = 0;
int mode8x8[2][4];
- unsigned char segment_id = xd->mode_info_context->mbmi.segment_id;
+ unsigned char segment_id = mbmi->segment_id;
int mode_index;
int mdcounts[4];
vpx_memset(mode_mv, 0, sizeof(mode_mv));
- x->e_mbd.mode_info_context->mbmi.ref_frame = INTRA_FRAME;
+ mbmi->ref_frame = INTRA_FRAME;
/* Initialize zbin mode boost for uv costing */
cpi->zbin_mode_boost = 0;
rd_pick_intra_mbuv_mode(cpi, x, &uv_intra_rate,
&uv_intra_rate_tokenonly, &uv_intra_distortion);
- uv_intra_mode = x->e_mbd.mode_info_context->mbmi.uv_mode;
+ uv_intra_mode = mbmi->uv_mode;
uv_intra_skippable = mbuv_is_skippable(&x->e_mbd);
/* rough estimate for now */
rd_pick_intra_mbuv_mode_8x8(cpi, x, &uv_intra_rate_8x8,
&uv_intra_rate_tokenonly_8x8,
&uv_intra_distortion_8x8);
- uv_intra_mode_8x8 = x->e_mbd.mode_info_context->mbmi.uv_mode;
+ uv_intra_mode_8x8 = mbmi->uv_mode;
uv_intra_skippable_8x8 = mbuv_is_skippable_8x8(&x->e_mbd);
}
rate_uv = 0;
this_mode = vp8_mode_order[mode_index].mode;
- xd->mode_info_context->mbmi.mode = this_mode;
- xd->mode_info_context->mbmi.uv_mode = DC_PRED;
- xd->mode_info_context->mbmi.ref_frame =
- vp8_mode_order[mode_index].ref_frame;
- xd->mode_info_context->mbmi.second_ref_frame =
- vp8_mode_order[mode_index].second_ref_frame;
+ mbmi->mode = this_mode;
+ mbmi->uv_mode = DC_PRED;
+ mbmi->ref_frame = vp8_mode_order[mode_index].ref_frame;
+ mbmi->second_ref_frame = vp8_mode_order[mode_index].second_ref_frame;
#if CONFIG_NEWBESTREFMV
- x->e_mbd.mode_info_context->mbmi.ref_mv =
- ref_mv[x->e_mbd.mode_info_context->mbmi.ref_frame];
- x->e_mbd.mode_info_context->mbmi.second_ref_mv =
- ref_mv[x->e_mbd.mode_info_context->mbmi.second_ref_frame];
+ mbmi->ref_mv = ref_mv[mbmi->ref_frame];
+ mbmi->second_ref_mv = ref_mv[mbmi->second_ref_frame];
#endif
#if CONFIG_PRED_FILTER
- xd->mode_info_context->mbmi.pred_filter_enabled = 0;
+ mbmi->pred_filter_enabled = 0;
#endif
#if CONFIG_SWITCHABLE_INTERP
if (cpi->common.mcomp_filter_type == SWITCHABLE &&
this_mode >= NEARESTMV && this_mode <= SPLITMV) {
- xd->mode_info_context->mbmi.interp_filter =
+ mbmi->interp_filter =
vp8_switchable_interp[switchable_filter_index++];
if (switchable_filter_index == VP8_SWITCHABLE_FILTERS)
switchable_filter_index = 0;
//printf("Searching %d (%d)\n", this_mode, switchable_filter_index);
} else {
- xd->mode_info_context->mbmi.interp_filter = cpi->common.mcomp_filter_type;
+ mbmi->interp_filter = cpi->common.mcomp_filter_type;
}
- vp8_setup_interp_filters(xd, xd->mode_info_context->mbmi.interp_filter,
- &cpi->common);
+ vp8_setup_interp_filters(xd, mbmi->interp_filter, &cpi->common);
#endif
// Test best rd so far against threshold for trying this mode.
// current coding mode under rate-distortion optimization test loop
#if CONFIG_HYBRIDTRANSFORM
- xd->mode_info_context->mbmi.mode_rdopt = this_mode;
+ mbmi->mode_rdopt = this_mode;
#endif
+
#if CONFIG_COMP_INTRA_PRED
- xd->mode_info_context->mbmi.second_mode = (MB_PREDICTION_MODE)(DC_PRED - 1);
- xd->mode_info_context->mbmi.second_uv_mode = (MB_PREDICTION_MODE)(DC_PRED - 1);
+ mbmi->second_mode = (MB_PREDICTION_MODE)(DC_PRED - 1);
+ mbmi->second_uv_mode = (MB_PREDICTION_MODE)(DC_PRED - 1);
#endif
// If the segment reference frame feature is enabled....
// then do nothing if the current ref frame is not allowed..
if (segfeature_active(xd, segment_id, SEG_LVL_REF_FRAME) &&
- !check_segref(xd, segment_id,
- xd->mode_info_context->mbmi.ref_frame)) {
+ !check_segref(xd, segment_id, mbmi->ref_frame)) {
continue;
}
// If the segment mode feature is enabled....
// an unfiltered alternative
if (cpi->is_src_frame_alt_ref && (cpi->oxcf.arnr_max_frames == 0)) {
if (this_mode != ZEROMV ||
- x->e_mbd.mode_info_context->mbmi.ref_frame != ALTREF_FRAME) {
+ mbmi->ref_frame != ALTREF_FRAME) {
continue;
}
}
}
/* everything but intra */
- if (x->e_mbd.mode_info_context->mbmi.ref_frame) {
- int ref = x->e_mbd.mode_info_context->mbmi.ref_frame;
+ if (mbmi->ref_frame) {
+ int ref = mbmi->ref_frame;
x->e_mbd.pre.y_buffer = y_buffer[ref];
x->e_mbd.pre.u_buffer = u_buffer[ref];
vpx_memcpy(mdcounts, frame_mdcounts[ref], sizeof(mdcounts));
}
- if (x->e_mbd.mode_info_context->mbmi.second_ref_frame) {
- int ref = x->e_mbd.mode_info_context->mbmi.second_ref_frame;
+ if (mbmi->second_ref_frame) {
+ int ref = mbmi->second_ref_frame;
x->e_mbd.second_pre.y_buffer = y_buffer[ref];
x->e_mbd.second_pre.u_buffer = u_buffer[ref];
}
// Intra
- if (!x->e_mbd.mode_info_context->mbmi.ref_frame) {
+ if (!mbmi->ref_frame) {
switch (this_mode) {
default:
case DC_PRED:
case D153_PRED:
case D27_PRED:
case D63_PRED:
- x->e_mbd.mode_info_context->mbmi.ref_frame = INTRA_FRAME;
+ mbmi->ref_frame = INTRA_FRAME;
// FIXME compound intra prediction
RECON_INVOKE(&cpi->common.rtcd.recon, build_intra_predictors_mby)
(&x->e_mbd);
IF_RTCD(&cpi->rtcd));
rate2 += rate_y;
distortion2 += distortion;
- rate2 += x->mbmode_cost[x->e_mbd.frame_type][x->e_mbd.mode_info_context->mbmi.mode];
+ rate2 += x->mbmode_cost[x->e_mbd.frame_type][mbmi->mode];
rate2 += uv_intra_rate_8x8;
rate_uv = uv_intra_rate_tokenonly_8x8;
distortion2 += uv_intra_distortion_8x8;
IF_RTCD(&cpi->rtcd));
rate2 += rate_y;
distortion2 += distortion;
- rate2 += x->mbmode_cost[x->e_mbd.frame_type][x->e_mbd.mode_info_context->mbmi.mode];
+ rate2 += x->mbmode_cost[x->e_mbd.frame_type][mbmi->mode];
if (cpi->common.txfm_mode == ALLOW_8X8) {
rate2 += uv_intra_rate_8x8;
rate_uv = uv_intra_rate_tokenonly_8x8;
// special case it.
else if (this_mode == SPLITMV) {
int64_t tmp_rd, this_rd_thresh;
- int is_comp_pred = x->e_mbd.mode_info_context->mbmi.second_ref_frame != 0;
+ int is_comp_pred = mbmi->second_ref_frame != 0;
int_mv *second_ref = is_comp_pred ? &second_best_ref_mv : NULL;
this_rd_thresh =
- (x->e_mbd.mode_info_context->mbmi.ref_frame == LAST_FRAME) ?
+ (mbmi->ref_frame == LAST_FRAME) ?
cpi->rd_threshes[THR_NEWMV] : cpi->rd_threshes[THR_NEWA];
this_rd_thresh =
- (x->e_mbd.mode_info_context->mbmi.ref_frame == GOLDEN_FRAME) ?
+ (mbmi->ref_frame == GOLDEN_FRAME) ?
cpi->rd_threshes[THR_NEWG] : this_rd_thresh;
tmp_rd = vp8_rd_pick_best_mbsegmentation(cpi, x, &best_ref_mv,
if (cpi->common.mcomp_filter_type == SWITCHABLE)
rate2 += SWITCHABLE_INTERP_RATE_FACTOR * x->switchable_interp_costs
[get_pred_context(&cpi->common, xd, PRED_SWITCHABLE_INTERP)]
- [vp8_switchable_interp_map[
- x->e_mbd.mode_info_context->mbmi.interp_filter]];
+ [vp8_switchable_interp_map[mbmi->interp_filter]];
#endif
// If even the 'Y' rd value of split is higher than best so far
// then dont bother looking at UV
compmode_cost =
vp8_cost_bit(get_pred_prob(cm, xd, PRED_COMP), is_comp_pred);
- x->e_mbd.mode_info_context->mbmi.mode = this_mode;
+ mbmi->mode = this_mode;
}
// Single prediction inter
- else if (!x->e_mbd.mode_info_context->mbmi.second_ref_frame) {
+ else if (!mbmi->second_ref_frame) {
switch (this_mode) {
case NEWMV: {
int bestsme = INT_MAX;
}
vp8_mv_pred(cpi, &x->e_mbd, x->e_mbd.mode_info_context, &mvp,
- x->e_mbd.mode_info_context->mbmi.ref_frame,
- cpi->common.ref_frame_sign_bias, &sr, &near_sadidx[0]);
+ mbmi->ref_frame, cpi->common.ref_frame_sign_bias,
+ &sr, &near_sadidx[0]);
mvp_full.as_mv.col = mvp.as_mv.col >> 3;
mvp_full.as_mv.row = mvp.as_mv.row >> 3;
&cpi->fn_ptr[BLOCK_16X16],
XMVCOST, &dis, &sse);
}
- mc_search_result[x->e_mbd.mode_info_context->mbmi.ref_frame].as_int = d->bmi.as_mv.first.as_int;
+ mc_search_result[mbmi->ref_frame].as_int = d->bmi.as_mv.first.as_int;
mode_mv[NEWMV].as_int = d->bmi.as_mv.first.as_int;
#if CONFIG_PRED_FILTER
// Filtered prediction:
- xd->mode_info_context->mbmi.pred_filter_enabled =
+ mbmi->pred_filter_enabled =
vp8_mode_order[mode_index].pred_filter_flag;
rate2 += vp8_cost_bit(cpi->common.prob_pred_filter_off,
- xd->mode_info_context->mbmi.pred_filter_enabled);
+ mbmi->pred_filter_enabled);
#endif
#if CONFIG_SWITCHABLE_INTERP
if (cpi->common.mcomp_filter_type == SWITCHABLE)
rate2 += SWITCHABLE_INTERP_RATE_FACTOR * x->switchable_interp_costs
[get_pred_context(&cpi->common, xd, PRED_SWITCHABLE_INTERP)]
- [vp8_switchable_interp_map[
- x->e_mbd.mode_info_context->mbmi.interp_filter]];
+ [vp8_switchable_interp_map[mbmi->interp_filter]];
#endif
vp8_build_1st_inter16x16_predictors_mby(&x->e_mbd,
default:
break;
}
- } else { /* x->e_mbd.mode_info_context->mbmi.second_ref_frame != 0 */
- int ref1 = x->e_mbd.mode_info_context->mbmi.ref_frame;
- int ref2 = x->e_mbd.mode_info_context->mbmi.second_ref_frame;
+ } else { /* mbmi->second_ref_frame != 0 */
+ int ref1 = mbmi->ref_frame;
+ int ref2 = mbmi->second_ref_frame;
mode_excluded = cpi->common.comp_pred_mode == SINGLE_PREDICTION_ONLY;
switch (this_mode) {
if (mc_search_result[ref1].as_int == INVALID_MV ||
mc_search_result[ref2].as_int == INVALID_MV)
continue;
- x->e_mbd.mode_info_context->mbmi.mv.as_int = mc_search_result[ref1].as_int;
- x->e_mbd.mode_info_context->mbmi.second_mv.as_int = mc_search_result[ref2].as_int;
+ mbmi->mv.as_int = mc_search_result[ref1].as_int;
+ mbmi->second_mv.as_int = mc_search_result[ref2].as_int;
rate2 += vp8_mv_bit_cost(&mc_search_result[ref1],
&frame_best_ref_mv[ref1],
XMVCOST, 96,
x->e_mbd.allow_high_precision_mv);
break;
case ZEROMV:
- x->e_mbd.mode_info_context->mbmi.mv.as_int = 0;
- x->e_mbd.mode_info_context->mbmi.second_mv.as_int = 0;
+ mbmi->mv.as_int = 0;
+ mbmi->second_mv.as_int = 0;
break;
case NEARMV:
- if (frame_near_mv[ref1].as_int == 0 || frame_near_mv[ref2].as_int == 0)
+ if (frame_near_mv[ref1].as_int == 0 ||
+ frame_near_mv[ref2].as_int == 0)
continue;
- x->e_mbd.mode_info_context->mbmi.mv.as_int = frame_near_mv[ref1].as_int;
- x->e_mbd.mode_info_context->mbmi.second_mv.as_int = frame_near_mv[ref2].as_int;
+ mbmi->mv.as_int = frame_near_mv[ref1].as_int;
+ mbmi->second_mv.as_int = frame_near_mv[ref2].as_int;
break;
case NEARESTMV:
- if (frame_nearest_mv[ref1].as_int == 0 || frame_nearest_mv[ref2].as_int == 0)
+ if (frame_nearest_mv[ref1].as_int == 0 ||
+ frame_nearest_mv[ref2].as_int == 0)
continue;
- x->e_mbd.mode_info_context->mbmi.mv.as_int = frame_nearest_mv[ref1].as_int;
- x->e_mbd.mode_info_context->mbmi.second_mv.as_int = frame_nearest_mv[ref2].as_int;
+ mbmi->mv.as_int = frame_nearest_mv[ref1].as_int;
+ mbmi->second_mv.as_int = frame_nearest_mv[ref2].as_int;
break;
default:
break;
/* Add in the Mv/mode cost */
rate2 += vp8_cost_mv_ref(cpi, this_mode, mdcounts);
- vp8_clamp_mv2(&x->e_mbd.mode_info_context->mbmi.mv, xd);
- vp8_clamp_mv2(&x->e_mbd.mode_info_context->mbmi.second_mv, xd);
- if (mv_check_bounds(x, &x->e_mbd.mode_info_context->mbmi.mv))
+ vp8_clamp_mv2(&mbmi->mv, xd);
+ vp8_clamp_mv2(&mbmi->second_mv, xd);
+ if (mv_check_bounds(x, &mbmi->mv))
continue;
- if (mv_check_bounds(x, &x->e_mbd.mode_info_context->mbmi.second_mv))
+ if (mv_check_bounds(x, &mbmi->second_mv))
continue;
/* build first and second prediction */
&rate_y, &distortion, &rate_uv, &distortion_uv);
/* don't bother w/ skip, we would never have come here if skip were enabled */
- x->e_mbd.mode_info_context->mbmi.mode = this_mode;
+ mbmi->mode = this_mode;
}
if (cpi->common.comp_pred_mode == HYBRID_PREDICTION)
// Estimate the reference frame signaling cost and add it
// to the rolling cost variable.
- rate2 += ref_costs[x->e_mbd.mode_info_context->mbmi.ref_frame];
+ rate2 += ref_costs[mbmi->ref_frame];
if (!disable_skip) {
// Test for the condition where skip block will be activated
else
#endif
if ((cpi->common.txfm_mode == ALLOW_8X8) && has_y2) {
- if (x->e_mbd.mode_info_context->mbmi.ref_frame != INTRA_FRAME)
+ if (mbmi->ref_frame != INTRA_FRAME)
mb_skippable = mb_is_skippable_8x8(&x->e_mbd);
else
mb_skippable = uv_intra_skippable_8x8
& mby_is_skippable_8x8(&x->e_mbd);
} else {
- if (x->e_mbd.mode_info_context->mbmi.ref_frame != INTRA_FRAME)
+ if (mbmi->ref_frame != INTRA_FRAME)
mb_skippable = mb_is_skippable(&x->e_mbd, has_y2);
else
mb_skippable = uv_intra_skippable
}
// Keep record of best intra distortion
- if ((x->e_mbd.mode_info_context->mbmi.ref_frame == INTRA_FRAME) &&
+ if ((mbmi->ref_frame == INTRA_FRAME) &&
(this_rd < best_intra_rd)) {
best_intra_rd = this_rd;
*returnintra = distortion2;
}
- if (!disable_skip && x->e_mbd.mode_info_context->mbmi.ref_frame == INTRA_FRAME) {
+ if (!disable_skip && mbmi->ref_frame == INTRA_FRAME) {
best_comp_rd = MIN(best_comp_rd, this_rd);
best_single_rd = MIN(best_single_rd, this_rd);
best_hybrid_rd = MIN(best_hybrid_rd, this_rd);
// Keep track of the best mode irrespective of prediction filter state
if (this_rd < best_overall_rd) {
best_overall_rd = this_rd;
- best_filter_state = xd->mode_info_context->mbmi.pred_filter_enabled;
+ best_filter_state = mbmi->pred_filter_enabled;
}
// Ignore modes where the prediction filter state doesn't
// match the state signaled at the frame level
if ((cm->pred_filter_mode == 2) ||
(cm->pred_filter_mode ==
- xd->mode_info_context->mbmi.pred_filter_enabled)) {
+ mbmi->pred_filter_enabled)) {
#endif
// Did this mode help.. i.e. is it the new best mode
if (this_rd < best_rd || x->skip) {
if (cpi->common.txfm_mode == ALLOW_8X8
&& this_mode != B_PRED
&& this_mode != I8X8_PRED)
- x->e_mbd.mode_info_context->mbmi.uv_mode = uv_intra_mode_8x8;
+ mbmi->uv_mode = uv_intra_mode_8x8;
else
- x->e_mbd.mode_info_context->mbmi.uv_mode = uv_intra_mode;
+ mbmi->uv_mode = uv_intra_mode;
/* required for left and above block mv */
- x->e_mbd.mode_info_context->mbmi.mv.as_int = 0;
+ mbmi->mv.as_int = 0;
}
- other_cost +=
- ref_costs[x->e_mbd.mode_info_context->mbmi.ref_frame];
+ other_cost += ref_costs[mbmi->ref_frame];
/* Calculate the final y RD estimate for this mode */
best_yrd = RDCOST(x->rdmult, x->rddiv, (rate2 - rate_uv - other_cost),
*returnrate = rate2;
*returndistortion = distortion2;
best_rd = this_rd;
- vpx_memcpy(&best_mbmode, &x->e_mbd.mode_info_context->mbmi, sizeof(MB_MODE_INFO));
+ vpx_memcpy(&best_mbmode, mbmi, sizeof(MB_MODE_INFO));
vpx_memcpy(&best_partition, x->partition_info, sizeof(PARTITION_INFO));
if ((this_mode == B_PRED)
/* keep record of best compound/single-only prediction */
if (!disable_skip &&
- x->e_mbd.mode_info_context->mbmi.ref_frame != INTRA_FRAME) {
+ mbmi->ref_frame != INTRA_FRAME) {
int64_t single_rd, hybrid_rd;
int single_rate, hybrid_rate;
single_rd = RDCOST(x->rdmult, x->rddiv, single_rate, distortion2);
hybrid_rd = RDCOST(x->rdmult, x->rddiv, hybrid_rate, distortion2);
- if (x->e_mbd.mode_info_context->mbmi.second_ref_frame == INTRA_FRAME &&
+ if (mbmi->second_ref_frame == INTRA_FRAME &&
single_rd < best_single_rd) {
best_single_rd = single_rd;
- } else if (x->e_mbd.mode_info_context->mbmi.second_ref_frame != INTRA_FRAME &&
+ } else if (mbmi->second_ref_frame != INTRA_FRAME &&
single_rd < best_comp_rd) {
best_comp_rd = single_rd;
}
cpi->is_src_frame_alt_ref &&
(cpi->oxcf.arnr_max_frames == 0) &&
(best_mbmode.mode != ZEROMV || best_mbmode.ref_frame != ALTREF_FRAME)) {
- x->e_mbd.mode_info_context->mbmi.mode = ZEROMV;
- x->e_mbd.mode_info_context->mbmi.ref_frame = ALTREF_FRAME;
- x->e_mbd.mode_info_context->mbmi.mv.as_int = 0;
- x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED;
- x->e_mbd.mode_info_context->mbmi.mb_skip_coeff =
+ mbmi->mode = ZEROMV;
+ mbmi->ref_frame = ALTREF_FRAME;
+ mbmi->mv.as_int = 0;
+ mbmi->uv_mode = DC_PRED;
+ mbmi->mb_skip_coeff =
(cpi->common.mb_no_coeff_skip) ? 1 : 0;
- x->e_mbd.mode_info_context->mbmi.partitioning = 0;
+ mbmi->partitioning = 0;
*best_single_rd_diff = *best_comp_rd_diff = *best_hybrid_rd_diff = 0;
store_coding_context(x, xd->mb_index, best_mode_index, &best_partition,
- &frame_best_ref_mv[xd->mode_info_context->mbmi.ref_frame],
- &frame_best_ref_mv[xd->mode_info_context->mbmi.second_ref_frame]);
+ &frame_best_ref_mv[mbmi->ref_frame],
+ &frame_best_ref_mv[mbmi->second_ref_frame]
+ );
+
return;
}
// macroblock modes
- vpx_memcpy(&x->e_mbd.mode_info_context->mbmi,
- &best_mbmode, sizeof(MB_MODE_INFO));
+ vpx_memcpy(mbmi, &best_mbmode, sizeof(MB_MODE_INFO));
#if CONFIG_NEWBESTREFMV
- x->e_mbd.mode_info_context->mbmi.ref_mv =
- ref_mv[best_mbmode.ref_frame];
- x->e_mbd.mode_info_context->mbmi.second_ref_mv =
- ref_mv[best_mbmode.second_ref_frame];
+ mbmi->ref_mv = ref_mv[best_mbmode.ref_frame];
+ mbmi->second_ref_mv = ref_mv[best_mbmode.second_ref_frame];
#endif
if (best_mbmode.mode == B_PRED) {
for (i = 0; i < 16; i++) {
if (best_mbmode.mode == SPLITMV) {
for (i = 0; i < 16; i++)
xd->mode_info_context->bmi[i].as_mv.first.as_int = best_bmodes[i].as_mv.first.as_int;
- if (xd->mode_info_context->mbmi.second_ref_frame)
+ if (mbmi->second_ref_frame)
for (i = 0; i < 16; i++)
xd->mode_info_context->bmi[i].as_mv.second.as_int = best_bmodes[i].as_mv.second.as_int;
vpx_memcpy(x->partition_info, &best_partition, sizeof(PARTITION_INFO));
- x->e_mbd.mode_info_context->mbmi.mv.as_int =
- x->partition_info->bmi[15].mv.as_int;
- x->e_mbd.mode_info_context->mbmi.second_mv.as_int =
- x->partition_info->bmi[15].second_mv.as_int;
+ mbmi->mv.as_int = x->partition_info->bmi[15].mv.as_int;
+ mbmi->second_mv.as_int = x->partition_info->bmi[15].second_mv.as_int;
}
if (best_single_rd == INT64_MAX)
*best_hybrid_rd_diff = best_rd - best_hybrid_rd;
store_coding_context(x, xd->mb_index, best_mode_index, &best_partition,
- &frame_best_ref_mv[xd->mode_info_context->mbmi.ref_frame],
- &frame_best_ref_mv[xd->mode_info_context->mbmi.second_ref_frame]);
+ &frame_best_ref_mv[mbmi->ref_frame],
+ &frame_best_ref_mv[mbmi->second_ref_frame]);
}
int vp8_rd_pick_intra_mode(VP8_COMP *cpi, MACROBLOCK *x) {
MACROBLOCKD *xd = &x->e_mbd;
+ MB_MODE_INFO * mbmi = &x->e_mbd.mode_info_context->mbmi;
int64_t error4x4, error16x16;
#if CONFIG_COMP_INTRA_PRED
int64_t error4x4d;
int mode16x16;
int mode8x8[2][4];
- xd->mode_info_context->mbmi.ref_frame = INTRA_FRAME;
+ mbmi->ref_frame = INTRA_FRAME;
- rd_pick_intra_mbuv_mode(cpi, x, &rateuv, &rateuv_tokenonly, &distuv);
- rate = rateuv;
+ rd_pick_intra_mbuv_mode(cpi, x, &rateuv, &rateuv_tokenonly, &distuv);
+ rate = rateuv;
- // current macroblock under rate-distortion optimization test loop
+ // current macroblock under rate-distortion optimization test loop
#if CONFIG_HYBRIDTRANSFORM
- xd->mode_info_context->mbmi.mode_rdopt = DC_PRED;
+ mbmi->mode_rdopt = DC_PRED;
#endif
error16x16 = rd_pick_intra16x16mby_mode(cpi, x, &rate16x16,
&rate16x16_tokenonly, &dist16x16);
- mode16x16 = xd->mode_info_context->mbmi.mode;
+ mode16x16 = mbmi->mode;
#if CONFIG_HYBRIDTRANSFORM
- xd->mode_info_context->mbmi.mode_rdopt = I8X8_PRED;
+ mbmi->mode_rdopt = I8X8_PRED;
#endif
error8x8 = rd_pick_intra8x8mby_modes(cpi, x, &rate8x8, &rate8x8_tokenonly,
#endif
#if CONFIG_HYBRIDTRANSFORM
- xd->mode_info_context->mbmi.mode_rdopt = B_PRED;
+ mbmi->mode_rdopt = B_PRED;
#endif
error4x4 = rd_pick_intra4x4mby_modes(cpi, x,
#else
rate += rate4x4;
#endif
- xd->mode_info_context->mbmi.mode = B_PRED;
+ mbmi->mode = B_PRED;
} else {
- xd->mode_info_context->mbmi.mode = mode16x16;
+ mbmi->mode = mode16x16;
rate += rate16x16;
}
} else {
#else
rate += rate4x4;
#endif
- xd->mode_info_context->mbmi.mode = B_PRED;
+ mbmi->mode = B_PRED;
} else {
- xd->mode_info_context->mbmi.mode = I8X8_PRED;
+ mbmi->mode = I8X8_PRED;
set_i8x8_block_modes(x, mode8x8);
rate += rate8x8;
}
int recon_yoffset, int recon_uvoffset) {
VP8_COMMON *cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
+ MB_MODE_INFO * mbmi = &x->e_mbd.mode_info_context->mbmi;
int rate;
int distortion;
int64_t intra_error = 0;
- unsigned char *segment_id = &xd->mode_info_context->mbmi.segment_id;
+ unsigned char *segment_id = &mbmi->segment_id;
if (xd->segmentation_enabled)
x->encode_breakout = cpi->segment_encode_breakout[*segment_id];
cpi->rd_single_diff += single;
cpi->rd_comp_diff += compound;
cpi->rd_hybrid_diff += hybrid;
- if (xd->mode_info_context->mbmi.ref_frame) {
+ if (mbmi->ref_frame) {
unsigned char pred_context;
pred_context = get_pred_context(cm, xd, PRED_COMP);
- if (xd->mode_info_context->mbmi.second_ref_frame == INTRA_FRAME)
+ if (mbmi->second_ref_frame == INTRA_FRAME)
cpi->single_pred_count[pred_context]++;
else
cpi->comp_pred_count[pred_context]++;