const MACROBLOCKD *const xd, BLOCK_SIZE bsize, int plane,
foreach_transformed_block_visitor visit, void *arg) {
const struct macroblockd_plane *const pd = &xd->plane[plane];
- const MB_MODE_INFO* mbmi = &xd->mi[0].src_mi->mbmi;
+ const MB_MODE_INFO* mbmi = &xd->mi[0]->mbmi;
// block and transform sizes, in number of 4x4 blocks log 2 ("*_b")
// 4x4=0, 8x8=2, 16x16=4, 32x32=6, 64x64=8
// transform size varies per plane, look it up in a common way.
} MB_MODE_INFO;
typedef struct MODE_INFO {
- struct MODE_INFO *src_mi;
MB_MODE_INFO mbmi;
b_mode_info bmi[4];
} MODE_INFO;
int mi_stride;
- MODE_INFO *mi;
+ MODE_INFO **mi;
MODE_INFO *left_mi;
MODE_INFO *above_mi;
MB_MODE_INFO *left_mbmi;
static INLINE TX_TYPE get_tx_type(PLANE_TYPE plane_type,
const MACROBLOCKD *xd) {
- const MB_MODE_INFO *const mbmi = &xd->mi[0].src_mi->mbmi;
+ const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
if (plane_type != PLANE_TYPE_Y || xd->lossless || is_inter_block(mbmi))
return DCT_DCT;
static INLINE TX_TYPE get_tx_type_4x4(PLANE_TYPE plane_type,
const MACROBLOCKD *xd, int ib) {
- const MODE_INFO *const mi = xd->mi[0].src_mi;
+ const MODE_INFO *const mi = xd->mi[0];
if (plane_type != PLANE_TYPE_Y || xd->lossless || is_inter_block(&mi->mbmi))
return DCT_DCT;
fprintf(file, "%c ", prefix);
for (mi_col = 0; mi_col < cols; mi_col++) {
fprintf(file, "%2d ",
- *((int*) ((char *) (&mi->src_mi->mbmi) +
+ *((int*) ((char *) (&mi->mbmi) +
member_offset)));
mi++;
}
for (mi_row = 0; mi_row < rows; mi_row++) {
fprintf(mvs, "S ");
for (mi_col = 0; mi_col < cols; mi_col++) {
- fprintf(mvs, "%2d ", mi->src_mi->mbmi.skip);
+ fprintf(mvs, "%2d ", mi->mbmi.skip);
mi++;
}
fprintf(mvs, "\n");
for (mi_row = 0; mi_row < rows; mi_row++) {
fprintf(mvs, "V ");
for (mi_col = 0; mi_col < cols; mi_col++) {
- fprintf(mvs, "%4d:%4d ", mi->src_mi->mbmi.mv[0].as_mv.row,
- mi->src_mi->mbmi.mv[0].as_mv.col);
+ fprintf(mvs, "%4d:%4d ", mi->mbmi.mv[0].as_mv.row,
+ mi->mbmi.mv[0].as_mv.col);
mi++;
}
fprintf(mvs, "\n");
static INLINE const scan_order *get_scan(const MACROBLOCKD *xd, TX_SIZE tx_size,
PLANE_TYPE type, int block_idx) {
- const MODE_INFO *const mi = xd->mi[0].src_mi;
+ const MODE_INFO *const mi = xd->mi[0];
if (is_inter_block(&mi->mbmi) || type != PLANE_TYPE_Y || xd->lossless) {
return &vp9_default_scan_orders[tx_size];
// by mi_row, mi_col.
// TODO(JBB): This function only works for yv12.
void vp9_setup_mask(VP9_COMMON *const cm, const int mi_row, const int mi_col,
- MODE_INFO *mi, const int mode_info_stride,
+ MODE_INFO **mi, const int mode_info_stride,
LOOP_FILTER_MASK *lfm) {
int idx_32, idx_16, idx_8;
const loop_filter_info_n *const lfi_n = &cm->lf_info;
- MODE_INFO *mip = mi;
- MODE_INFO *mip2 = mi;
+ MODE_INFO **mip = mi;
+ MODE_INFO **mip2 = mi;
// These are offsets to the next mi in the 64x64 block. It is what gets
// added to the mi ptr as we go through each loop. It helps us to avoid
cm->mi_cols - mi_col : MI_BLOCK_SIZE);
vp9_zero(*lfm);
- assert(mip != NULL);
+ assert(mip[0] != NULL);
// TODO(jimbankoski): Try moving most of the following code into decode
// loop and storing lfm in the mbmi structure so that we don't have to go
// through the recursive loop structure multiple times.
- switch (mip->mbmi.sb_type) {
+ switch (mip[0]->mbmi.sb_type) {
case BLOCK_64X64:
- build_masks(lfi_n, mip , 0, 0, lfm);
+ build_masks(lfi_n, mip[0] , 0, 0, lfm);
break;
case BLOCK_64X32:
- build_masks(lfi_n, mip, 0, 0, lfm);
+ build_masks(lfi_n, mip[0], 0, 0, lfm);
mip2 = mip + mode_info_stride * 4;
if (4 >= max_rows)
break;
- build_masks(lfi_n, mip2, 32, 8, lfm);
+ build_masks(lfi_n, mip2[0], 32, 8, lfm);
break;
case BLOCK_32X64:
- build_masks(lfi_n, mip, 0, 0, lfm);
+ build_masks(lfi_n, mip[0], 0, 0, lfm);
mip2 = mip + 4;
if (4 >= max_cols)
break;
- build_masks(lfi_n, mip2, 4, 2, lfm);
+ build_masks(lfi_n, mip2[0], 4, 2, lfm);
break;
default:
for (idx_32 = 0; idx_32 < 4; mip += offset_32[idx_32], ++idx_32) {
const int mi_32_row_offset = ((idx_32 >> 1) << 2);
if (mi_32_col_offset >= max_cols || mi_32_row_offset >= max_rows)
continue;
- switch (mip->mbmi.sb_type) {
+ switch (mip[0]->mbmi.sb_type) {
case BLOCK_32X32:
- build_masks(lfi_n, mip, shift_y, shift_uv, lfm);
+ build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm);
break;
case BLOCK_32X16:
- build_masks(lfi_n, mip, shift_y, shift_uv, lfm);
+ build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm);
if (mi_32_row_offset + 2 >= max_rows)
continue;
mip2 = mip + mode_info_stride * 2;
- build_masks(lfi_n, mip2, shift_y + 16, shift_uv + 4, lfm);
+ build_masks(lfi_n, mip2[0], shift_y + 16, shift_uv + 4, lfm);
break;
case BLOCK_16X32:
- build_masks(lfi_n, mip, shift_y, shift_uv, lfm);
+ build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm);
if (mi_32_col_offset + 2 >= max_cols)
continue;
mip2 = mip + 2;
- build_masks(lfi_n, mip2, shift_y + 2, shift_uv + 1, lfm);
+ build_masks(lfi_n, mip2[0], shift_y + 2, shift_uv + 1, lfm);
break;
default:
for (idx_16 = 0; idx_16 < 4; mip += offset_16[idx_16], ++idx_16) {
if (mi_16_col_offset >= max_cols || mi_16_row_offset >= max_rows)
continue;
- switch (mip->mbmi.sb_type) {
+ switch (mip[0]->mbmi.sb_type) {
case BLOCK_16X16:
- build_masks(lfi_n, mip, shift_y, shift_uv, lfm);
+ build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm);
break;
case BLOCK_16X8:
- build_masks(lfi_n, mip, shift_y, shift_uv, lfm);
+ build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm);
if (mi_16_row_offset + 1 >= max_rows)
continue;
mip2 = mip + mode_info_stride;
- build_y_mask(lfi_n, mip2, shift_y+8, lfm);
+ build_y_mask(lfi_n, mip2[0], shift_y+8, lfm);
break;
case BLOCK_8X16:
- build_masks(lfi_n, mip, shift_y, shift_uv, lfm);
+ build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm);
if (mi_16_col_offset +1 >= max_cols)
continue;
mip2 = mip + 1;
- build_y_mask(lfi_n, mip2, shift_y+1, lfm);
+ build_y_mask(lfi_n, mip2[0], shift_y+1, lfm);
break;
default: {
const int shift_y = shift_32_y[idx_32] +
shift_16_y[idx_16] +
shift_8_y[0];
- build_masks(lfi_n, mip, shift_y, shift_uv, lfm);
+ build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm);
mip += offset[0];
for (idx_8 = 1; idx_8 < 4; mip += offset[idx_8], ++idx_8) {
const int shift_y = shift_32_y[idx_32] +
if (mi_8_col_offset >= max_cols ||
mi_8_row_offset >= max_rows)
continue;
- build_y_mask(lfi_n, mip, shift_y, lfm);
+ build_y_mask(lfi_n, mip[0], shift_y, lfm);
}
break;
}
void vp9_filter_block_plane_non420(VP9_COMMON *cm,
struct macroblockd_plane *plane,
- MODE_INFO *mi_8x8,
+ MODE_INFO **mi_8x8,
int mi_row, int mi_col) {
const int ss_x = plane->subsampling_x;
const int ss_y = plane->subsampling_y;
// Determine the vertical edges that need filtering
for (c = 0; c < MI_BLOCK_SIZE && mi_col + c < cm->mi_cols; c += col_step) {
- const MODE_INFO *mi = mi_8x8[c].src_mi;
+ const MODE_INFO *mi = mi_8x8[c];
const BLOCK_SIZE sb_type = mi[0].mbmi.sb_type;
const int skip_this = mi[0].mbmi.skip && is_inter_block(&mi[0].mbmi);
// left edge of current unit is block/partition edge -> no skip
path = LF_PATH_SLOW;
for (mi_row = start; mi_row < stop; mi_row += MI_BLOCK_SIZE) {
- MODE_INFO *mi = cm->mi + mi_row * cm->mi_stride;
+ MODE_INFO **mi = cm->mi_grid_visible + mi_row * cm->mi_stride;
for (mi_col = 0; mi_col < cm->mi_cols; mi_col += MI_BLOCK_SIZE) {
int plane;
// by mi_row, mi_col.
void vp9_setup_mask(struct VP9Common *const cm,
const int mi_row, const int mi_col,
- MODE_INFO *mi_8x8, const int mode_info_stride,
+ MODE_INFO **mi_8x8, const int mode_info_stride,
LOOP_FILTER_MASK *lfm);
void vp9_filter_block_plane_ss00(struct VP9Common *const cm,
void vp9_filter_block_plane_non420(struct VP9Common *cm,
struct macroblockd_plane *plane,
- MODE_INFO *mi_8x8,
+ MODE_INFO **mi_8x8,
int mi_row, int mi_col);
void vp9_loop_filter_init(struct VP9Common *cm);
const POSITION *const mv_ref = &mv_ref_search[i];
if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) {
const MODE_INFO *const candidate_mi = xd->mi[mv_ref->col + mv_ref->row *
- xd->mi_stride].src_mi;
+ xd->mi_stride];
const MB_MODE_INFO *const candidate = &candidate_mi->mbmi;
// Keep counts for entropy encoding.
context_counter += mode_2_counter[candidate->mode];
const POSITION *const mv_ref = &mv_ref_search[i];
if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) {
const MB_MODE_INFO *const candidate = &xd->mi[mv_ref->col + mv_ref->row *
- xd->mi_stride].src_mi->mbmi;
+ xd->mi_stride]->mbmi;
different_ref_found = 1;
if (candidate->ref_frame[0] == ref_frame)
const POSITION *mv_ref = &mv_ref_search[i];
if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) {
const MB_MODE_INFO *const candidate = &xd->mi[mv_ref->col + mv_ref->row
- * xd->mi_stride].src_mi->mbmi;
+ * xd->mi_stride]->mbmi;
// If the candidate is INTRA we don't want to consider its mv.
IF_DIFF_REF_FRAME_ADD_MV(candidate, ref_frame, ref_sign_bias,
int block, int ref, int mi_row, int mi_col,
int_mv *nearest_mv, int_mv *near_mv) {
int_mv mv_list[MAX_MV_REF_CANDIDATES];
- MODE_INFO *const mi = xd->mi[0].src_mi;
+ MODE_INFO *const mi = xd->mi[0];
b_mode_info *bmi = mi->bmi;
int n;
void (*free_mi)(struct VP9Common *cm);
void (*setup_mi)(struct VP9Common *cm);
+ // Grid of pointers to 8x8 MODE_INFO structs. Any 8x8 not in the visible
+ // area will be NULL.
+ MODE_INFO **mi_grid_base;
+ MODE_INFO **mi_grid_visible;
+ MODE_INFO **prev_mi_grid_base;
+ MODE_INFO **prev_mi_grid_visible;
// Whether to use previous frame's motion vectors for prediction.
int use_prev_frame_mvs;
xd->up_available = (mi_row != 0);
xd->left_available = (mi_col > tile->mi_col_start);
if (xd->up_available) {
- xd->above_mi = xd->mi[-xd->mi_stride].src_mi;
+ xd->above_mi = xd->mi[-xd->mi_stride];
xd->above_mbmi = xd->above_mi ? &xd->above_mi->mbmi : NULL;
} else {
xd->above_mi = NULL;
}
if (xd->left_available) {
- xd->left_mi = xd->mi[-1].src_mi;
+ xd->left_mi = xd->mi[-1];
xd->left_mbmi = xd->left_mi ? &xd->left_mi->mbmi : NULL;
} else {
xd->left_mi = NULL;
// left of the entries corresponding to real blocks.
// The prediction flags in these dummy entries are initialized to 0.
int vp9_get_tx_size_context(const MACROBLOCKD *xd) {
- const int max_tx_size = max_txsize_lookup[xd->mi[0].src_mi->mbmi.sb_type];
+ const int max_tx_size = max_txsize_lookup[xd->mi[0]->mbmi.sb_type];
const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
const int has_above = xd->up_available;
int x, int y, int w, int h,
int mi_x, int mi_y) {
struct macroblockd_plane *const pd = &xd->plane[plane];
- const MODE_INFO *mi = xd->mi[0].src_mi;
+ const MODE_INFO *mi = xd->mi[0];
const int is_compound = has_second_ref(&mi->mbmi);
const InterpKernel *kernel = vp9_get_interp_kernel(mi->mbmi.interp_filter);
int ref;
const int bw = 4 * num_4x4_w;
const int bh = 4 * num_4x4_h;
- if (xd->mi[0].src_mi->mbmi.sb_type < BLOCK_8X8) {
+ if (xd->mi[0]->mbmi.sb_type < BLOCK_8X8) {
int i = 0, x, y;
assert(bsize == BLOCK_8X8);
for (y = 0; y < num_4x4_h; ++y)
for (mi_row = start; mi_row < stop;
mi_row += lf_sync->num_workers * MI_BLOCK_SIZE) {
- MODE_INFO *const mi = cm->mi + mi_row * cm->mi_stride;
+ MODE_INFO **const mi = cm->mi_grid_visible + mi_row * cm->mi_stride;
for (mi_col = 0; mi_col < cm->mi_cols; mi_col += MI_BLOCK_SIZE) {
const int r = mi_row >> MI_BLOCK_SIZE_LOG2;
VP9_COMMON *const cm = args->cm;
MACROBLOCKD *const xd = args->xd;
struct macroblockd_plane *const pd = &xd->plane[plane];
- MODE_INFO *const mi = xd->mi[0].src_mi;
+ MODE_INFO *const mi = xd->mi[0];
const PREDICTION_MODE mode = (plane == 0) ? get_y_mode(mi, block)
: mi->mbmi.uv_mode;
const int16_t *const dequant = (plane == 0) ? args->y_dequant
const int offset = mi_row * cm->mi_stride + mi_col;
int x, y;
- xd->mi = cm->mi + offset;
- xd->mi[0].src_mi = &xd->mi[0]; // Point to self.
- xd->mi[0].mbmi.sb_type = bsize;
-
+ xd->mi = cm->mi_grid_visible + offset;
+ xd->mi[0] = &cm->mi[offset];
+ xd->mi[0]->mbmi.sb_type = bsize;
for (y = 0; y < y_mis; ++y)
for (x = !y; x < x_mis; ++x) {
- xd->mi[y * cm->mi_stride + x].src_mi = &xd->mi[0];
+ xd->mi[y * cm->mi_stride + x] = xd->mi[0];
}
set_skip_context(xd, mi_row, mi_col);
set_mi_row_col(xd, tile, mi_row, bh, mi_col, bw, cm->mi_rows, cm->mi_cols);
vp9_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
- return &xd->mi[0].mbmi;
+ return &xd->mi[0]->mbmi;
}
static void decode_block(VP9Decoder *const pbi, MACROBLOCKD *const xd,
int plane;
const int mi_x = mi_col * MI_SIZE;
const int mi_y = mi_row * MI_SIZE;
- const MODE_INFO *mi = xd->mi[0].src_mi;
+ const MODE_INFO *mi = xd->mi[0];
const InterpKernel *kernel = vp9_get_interp_kernel(mi->mbmi.interp_filter);
const BLOCK_SIZE sb_type = mi->mbmi.sb_type;
const int is_compound = has_second_ref(&mi->mbmi);
FRAME_COUNTS *counts,
int allow_select, vp9_reader *r) {
TX_MODE tx_mode = cm->tx_mode;
- BLOCK_SIZE bsize = xd->mi[0].src_mi->mbmi.sb_type;
+ BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
const TX_SIZE max_tx_size = max_txsize_lookup[bsize];
if (allow_select && tx_mode == TX_MODE_SELECT && bsize >= BLOCK_8X8)
return read_selected_tx_size(cm, xd, counts, max_tx_size, r);
static int read_inter_segment_id(VP9_COMMON *const cm, MACROBLOCKD *const xd,
int mi_row, int mi_col, vp9_reader *r) {
struct segmentation *const seg = &cm->seg;
- MB_MODE_INFO *const mbmi = &xd->mi[0].src_mi->mbmi;
+ MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
const BLOCK_SIZE bsize = mbmi->sb_type;
int predicted_segment_id, segment_id;
MACROBLOCKD *const xd,
FRAME_COUNTS *counts,
int mi_row, int mi_col, vp9_reader *r) {
- MODE_INFO *const mi = xd->mi[0].src_mi;
+ MODE_INFO *const mi = xd->mi[0];
MB_MODE_INFO *const mbmi = &mi->mbmi;
- const MODE_INFO *above_mi = xd->mi[-cm->mi_stride].src_mi;
- const MODE_INFO *left_mi = xd->left_available ? xd->mi[-1].src_mi : NULL;
+ const MODE_INFO *above_mi = xd->mi[-cm->mi_stride];
+ const MODE_INFO *left_mi = xd->left_available ? xd->mi[-1] : NULL;
const BLOCK_SIZE bsize = mbmi->sb_type;
int i;
const TileInfo *const tile,
int mi_row, int mi_col, vp9_reader *r) {
VP9_COMMON *const cm = &pbi->common;
- MODE_INFO *const mi = xd->mi[0].src_mi;
+ MODE_INFO *const mi = xd->mi[0];
MB_MODE_INFO *const mbmi = &mi->mbmi;
int inter_block;
const TileInfo *const tile,
int mi_row, int mi_col, vp9_reader *r) {
VP9_COMMON *const cm = &pbi->common;
- MODE_INFO *const mi = xd->mi[0].src_mi;
+ MODE_INFO *const mi = xd->mi[0];
const int bw = num_8x8_blocks_wide_lookup[mi->mbmi.sb_type];
const int bh = num_8x8_blocks_high_lookup[mi->mbmi.sb_type];
const int x_mis = MIN(bw, cm->mi_cols - mi_col);
MV_REF *const frame_mv = frame_mvs + h * cm->mi_cols;
for (w = 0; w < x_mis; ++w) {
MV_REF *const mv = frame_mv + w;
- mv->ref_frame[0] = mi->src_mi->mbmi.ref_frame[0];
- mv->ref_frame[1] = mi->src_mi->mbmi.ref_frame[1];
- mv->mv[0].as_int = mi->src_mi->mbmi.mv[0].as_int;
- mv->mv[1].as_int = mi->src_mi->mbmi.mv[1].as_int;
+ mv->ref_frame[0] = mi->mbmi.ref_frame[0];
+ mv->ref_frame[1] = mi->mbmi.ref_frame[1];
+ mv->mv[0].as_int = mi->mbmi.mv[0].as_int;
+ mv->mv[1].as_int = mi->mbmi.mv[1].as_int;
}
}
}
static void vp9_dec_setup_mi(VP9_COMMON *cm) {
cm->mi = cm->mip + cm->mi_stride + 1;
vpx_memset(cm->mip, 0, cm->mi_stride * (cm->mi_rows + 1) * sizeof(*cm->mip));
+ cm->mi_grid_visible = cm->mi_grid_base + cm->mi_stride + 1;
+ vpx_memset(cm->mi_grid_base, 0,
+ cm->mi_stride * (cm->mi_rows + 1) * sizeof(*cm->mi_grid_base));
}
static int vp9_dec_alloc_mi(VP9_COMMON *cm, int mi_size) {
if (!cm->mip)
return 1;
cm->mi_alloc_size = mi_size;
+ cm->mi_grid_base = (MODE_INFO **)vpx_calloc(mi_size, sizeof(MODE_INFO*));
+ if (!cm->mi_grid_base)
+ return 1;
return 0;
}
static void vp9_dec_free_mi(VP9_COMMON *cm) {
vpx_free(cm->mip);
cm->mip = NULL;
+ vpx_free(cm->mi_grid_base);
+ cm->mi_grid_base = NULL;
}
VP9Decoder *vp9_decoder_create(BufferPool *const pool) {
vp9_reader *r) {
const int max_eob = 16 << (tx_size << 1);
const FRAME_CONTEXT *const fc = cm->fc;
- const int ref = is_inter_block(&xd->mi[0].src_mi->mbmi);
+ const int ref = is_inter_block(&xd->mi[0]->mbmi);
int band, c = 0;
const vp9_prob (*coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] =
fc->coef_probs[tx_size][type][ref];
static void write_selected_tx_size(const VP9_COMMON *cm,
const MACROBLOCKD *xd, vp9_writer *w) {
- TX_SIZE tx_size = xd->mi[0].src_mi->mbmi.tx_size;
- BLOCK_SIZE bsize = xd->mi[0].src_mi->mbmi.sb_type;
+ TX_SIZE tx_size = xd->mi[0]->mbmi.tx_size;
+ BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
const TX_SIZE max_tx_size = max_txsize_lookup[bsize];
const vp9_prob *const tx_probs = get_tx_probs2(max_tx_size, xd,
&cm->fc->tx_probs);
// This function encodes the reference frame
static void write_ref_frames(const VP9_COMMON *cm, const MACROBLOCKD *xd,
vp9_writer *w) {
- const MB_MODE_INFO *const mbmi = &xd->mi[0].src_mi->mbmi;
+ const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
const int is_compound = has_second_ref(mbmi);
const int segment_id = mbmi->segment_id;
}
static void write_mb_modes_kf(const VP9_COMMON *cm, const MACROBLOCKD *xd,
- MODE_INFO *mi_8x8, vp9_writer *w) {
+ MODE_INFO **mi_8x8, vp9_writer *w) {
const struct segmentation *const seg = &cm->seg;
- const MODE_INFO *const mi = mi_8x8;
+ const MODE_INFO *const mi = mi_8x8[0];
const MODE_INFO *const above_mi = xd->above_mi;
const MODE_INFO *const left_mi = xd->left_mi;
const MB_MODE_INFO *const mbmi = &mi->mbmi;
MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
MODE_INFO *m;
- xd->mi = cm->mi + (mi_row * cm->mi_stride + mi_col);
- m = xd->mi;
+ xd->mi = cm->mi_grid_visible + (mi_row * cm->mi_stride + mi_col);
+ m = xd->mi[0];
set_mi_row_col(xd, tile,
mi_row, num_8x8_blocks_high_lookup[m->mbmi.sb_type],
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
return;
- m = cm->mi[mi_row * cm->mi_stride + mi_col].src_mi;
+ m = cm->mi_grid_visible[mi_row * cm->mi_stride + mi_col];
partition = partition_lookup[bsl][m->mbmi.sb_type];
write_partition(cm, xd, bs, mi_row, mi_col, partition, bsize, w);
int sse_diff = ctx->zeromv_sse - ctx->newmv_sse;
MV_REFERENCE_FRAME frame;
MACROBLOCKD *filter_mbd = &mb->e_mbd;
- MB_MODE_INFO *mbmi = &filter_mbd->mi[0].src_mi->mbmi;
+ MB_MODE_INFO *mbmi = &filter_mbd->mi[0]->mbmi;
MB_MODE_INFO saved_mbmi;
int i, j;
struct buf_2d saved_dst[MAX_MB_PLANE];
int mi_row,
int mi_col) {
const int idx_str = xd->mi_stride * mi_row + mi_col;
- xd->mi = cm->mi + idx_str;
- xd->mi[0].src_mi = &xd->mi[0];
+ xd->mi = cm->mi_grid_visible + idx_str;
+ xd->mi[0] = cm->mi + idx_str;
}
static void set_offsets(VP9_COMP *cpi, const TileInfo *const tile,
set_mode_info_offsets(cm, xd, mi_row, mi_col);
- mbmi = &xd->mi[0].src_mi->mbmi;
+ mbmi = &xd->mi[0]->mbmi;
// Set up destination pointers.
vp9_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
for (j = 0; j < block_height; ++j)
for (i = 0; i < block_width; ++i) {
if (mi_row + j < cm->mi_rows && mi_col + i < cm->mi_cols)
- xd->mi[j * xd->mi_stride + i].src_mi = &xd->mi[0];
+ xd->mi[j * xd->mi_stride + i] = xd->mi[0];
}
}
BLOCK_SIZE bsize) {
if (cpi->common.mi_cols > mi_col && cpi->common.mi_rows > mi_row) {
set_mode_info_offsets(&cpi->common, xd, mi_row, mi_col);
- xd->mi[0].src_mi->mbmi.sb_type = bsize;
+ xd->mi[0]->mbmi.sb_type = bsize;
}
}
sp = x->plane[0].src.stride;
if (!is_key_frame) {
- MB_MODE_INFO *mbmi = &xd->mi[0].src_mi->mbmi;
+ MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
unsigned int uv_sad;
const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, LAST_FRAME);
struct macroblock_plane *const p = x->plane;
struct macroblockd_plane *const pd = xd->plane;
MODE_INFO *mi = &ctx->mic;
- MB_MODE_INFO *const mbmi = &xd->mi[0].src_mi->mbmi;
- MODE_INFO *mi_addr = &xd->mi[0];
+ MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
+ MODE_INFO *mi_addr = xd->mi[0];
const struct segmentation *const seg = &cm->seg;
const int bw = num_8x8_blocks_wide_lookup[mi->mbmi.sb_type];
const int bh = num_8x8_blocks_high_lookup[mi->mbmi.sb_type];
assert(mi->mbmi.sb_type == bsize);
*mi_addr = *mi;
- mi_addr->src_mi = mi_addr;
// If segmentation in use
if (seg->enabled) {
// Else for cyclic refresh mode update the segment map, set the segment id
// and then update the quantizer.
if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) {
- vp9_cyclic_refresh_update_segment(cpi, &xd->mi[0].src_mi->mbmi, mi_row,
+ vp9_cyclic_refresh_update_segment(cpi, &xd->mi[0]->mbmi, mi_row,
mi_col, bsize, ctx->rate, ctx->dist,
x->skip);
}
for (x_idx = 0; x_idx < mi_width; x_idx++)
if ((xd->mb_to_right_edge >> (3 + MI_SIZE_LOG2)) + mi_width > x_idx
&& (xd->mb_to_bottom_edge >> (3 + MI_SIZE_LOG2)) + mi_height > y) {
- xd->mi[x_idx + y * mis].src_mi = mi_addr;
+ xd->mi[x_idx + y * mis] = mi_addr;
}
if (cpi->oxcf.aq_mode)
MV_REF *const frame_mv = frame_mvs + h * cm->mi_cols;
for (w = 0; w < x_mis; ++w) {
MV_REF *const mv = frame_mv + w;
- mv->ref_frame[0] = mi->src_mi->mbmi.ref_frame[0];
- mv->ref_frame[1] = mi->src_mi->mbmi.ref_frame[1];
- mv->mv[0].as_int = mi->src_mi->mbmi.mv[0].as_int;
- mv->mv[1].as_int = mi->src_mi->mbmi.mv[1].as_int;
+ mv->ref_frame[0] = mi->mbmi.ref_frame[0];
+ mv->ref_frame[1] = mi->mbmi.ref_frame[1];
+ mv->mv[0].as_int = mi->mbmi.mv[0].as_int;
+ mv->mv[1].as_int = mi->mbmi.mv[1].as_int;
}
}
}
static void set_mode_info_seg_skip(MACROBLOCK *x, TX_MODE tx_mode,
RD_COST *rd_cost, BLOCK_SIZE bsize) {
MACROBLOCKD *const xd = &x->e_mbd;
- MB_MODE_INFO *const mbmi = &xd->mi[0].src_mi->mbmi;
+ MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
INTERP_FILTER filter_ref;
if (xd->up_available)
- filter_ref = xd->mi[-xd->mi_stride].src_mi->mbmi.interp_filter;
+ filter_ref = xd->mi[-xd->mi_stride]->mbmi.interp_filter;
else if (xd->left_available)
- filter_ref = xd->mi[-1].src_mi->mbmi.interp_filter;
+ filter_ref = xd->mi[-1]->mbmi.interp_filter;
else
filter_ref = EIGHTTAP;
mbmi->mv[0].as_int = 0;
mbmi->interp_filter = filter_ref;
- xd->mi[0].src_mi->bmi[0].as_mv[0].as_int = 0;
+ xd->mi[0]->bmi[0].as_mv[0].as_int = 0;
x->skip = 1;
vp9_rd_cost_init(rd_cost);
x->use_lp32x32fdct = 1;
set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
- mbmi = &xd->mi[0].src_mi->mbmi;
+ mbmi = &xd->mi[0]->mbmi;
mbmi->sb_type = bsize;
for (i = 0; i < MAX_MB_PLANE; ++i) {
static void update_stats(VP9_COMMON *cm, ThreadData *td) {
const MACROBLOCK *x = &td->mb;
const MACROBLOCKD *const xd = &x->e_mbd;
- const MODE_INFO *const mi = xd->mi[0].src_mi;
+ const MODE_INFO *const mi = xd->mi[0];
const MB_MODE_INFO *const mbmi = &mi->mbmi;
const BLOCK_SIZE bsize = mbmi->sb_type;
static void set_partial_b64x64_partition(MODE_INFO *mi, int mis,
int bh_in, int bw_in, int row8x8_remaining, int col8x8_remaining,
- BLOCK_SIZE bsize, MODE_INFO *mi_8x8) {
+ BLOCK_SIZE bsize, MODE_INFO **mi_8x8) {
int bh = bh_in;
int r, c;
for (r = 0; r < MI_BLOCK_SIZE; r += bh) {
int bw = bw_in;
for (c = 0; c < MI_BLOCK_SIZE; c += bw) {
const int index = r * mis + c;
- mi_8x8[index].src_mi = mi + index;
- mi_8x8[index].src_mi->mbmi.sb_type = find_partition_size(bsize,
+ mi_8x8[index] = mi + index;
+ mi_8x8[index]->mbmi.sb_type = find_partition_size(bsize,
row8x8_remaining - r, col8x8_remaining - c, &bh, &bw);
}
}
// may not be allowed in which case this code attempts to choose the largest
// allowable partition.
static void set_fixed_partitioning(VP9_COMP *cpi, const TileInfo *const tile,
- MODE_INFO *mi_8x8, int mi_row, int mi_col,
+ MODE_INFO **mi_8x8, int mi_row, int mi_col,
BLOCK_SIZE bsize) {
VP9_COMMON *const cm = &cpi->common;
const int mis = cm->mi_stride;
for (block_row = 0; block_row < MI_BLOCK_SIZE; block_row += bh) {
for (block_col = 0; block_col < MI_BLOCK_SIZE; block_col += bw) {
int index = block_row * mis + block_col;
- mi_8x8[index].src_mi = mi_upper_left + index;
- mi_8x8[index].src_mi->mbmi.sb_type = bsize;
+ mi_8x8[index] = mi_upper_left + index;
+ mi_8x8[index]->mbmi.sb_type = bsize;
}
}
} else {
static void set_source_var_based_partition(VP9_COMP *cpi,
const TileInfo *const tile,
MACROBLOCK *const x,
- MODE_INFO *mi_8x8,
+ MODE_INFO **mi_8x8,
int mi_row, int mi_col) {
VP9_COMMON *const cm = &cpi->common;
const int mis = cm->mi_stride;
d16[j] = cpi->source_diff_var + offset + boffset;
index = b_mi_row * mis + b_mi_col;
- mi_8x8[index].src_mi = mi_upper_left + index;
- mi_8x8[index].src_mi->mbmi.sb_type = BLOCK_16X16;
+ mi_8x8[index] = mi_upper_left + index;
+ mi_8x8[index]->mbmi.sb_type = BLOCK_16X16;
// TODO(yunqingwang): If d16[j].var is very large, use 8x8 partition
// size to further improve quality.
d32[i].var = d32[i].sse - (((int64_t)d32[i].sum * d32[i].sum) >> 10);
index = coord_lookup[i*4].row * mis + coord_lookup[i*4].col;
- mi_8x8[index].src_mi = mi_upper_left + index;
- mi_8x8[index].src_mi->mbmi.sb_type = BLOCK_32X32;
+ mi_8x8[index] = mi_upper_left + index;
+ mi_8x8[index]->mbmi.sb_type = BLOCK_32X32;
}
}
// Use 64x64 partition
if (is_larger_better) {
- mi_8x8[0].src_mi = mi_upper_left;
- mi_8x8[0].src_mi->mbmi.sb_type = BLOCK_64X64;
+ mi_8x8[0] = mi_upper_left;
+ mi_8x8[0]->mbmi.sb_type = BLOCK_64X64;
}
}
} else { // partial in-image SB64
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &td->mb;
MACROBLOCKD *const xd = &x->e_mbd;
- MODE_INFO *const mi = xd->mi[0].src_mi;
- MB_MODE_INFO *const mbmi = &xd->mi[0].src_mi->mbmi;
+ MODE_INFO *const mi = xd->mi[0];
+ MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
const struct segmentation *const seg = &cm->seg;
const int bw = num_8x8_blocks_wide_lookup[mi->mbmi.sb_type];
const int bh = num_8x8_blocks_high_lookup[mi->mbmi.sb_type];
const int x_mis = MIN(bw, cm->mi_cols - mi_col);
const int y_mis = MIN(bh, cm->mi_rows - mi_row);
- xd->mi[0] = ctx->mic;
- xd->mi[0].src_mi = &xd->mi[0];
+ *(xd->mi[0]) = ctx->mic;
if (seg->enabled && cpi->oxcf.aq_mode) {
// For in frame complexity AQ or variance AQ, copy segment_id from
MV_REF *const frame_mv = frame_mvs + h * cm->mi_cols;
for (w = 0; w < x_mis; ++w) {
MV_REF *const mv = frame_mv + w;
- mv->ref_frame[0] = mi->src_mi->mbmi.ref_frame[0];
- mv->ref_frame[1] = mi->src_mi->mbmi.ref_frame[1];
- mv->mv[0].as_int = mi->src_mi->mbmi.mv[0].as_int;
- mv->mv[1].as_int = mi->src_mi->mbmi.mv[1].as_int;
+ mv->ref_frame[0] = mi->mbmi.ref_frame[0];
+ mv->ref_frame[1] = mi->mbmi.ref_frame[1];
+ mv->mv[0].as_int = mi->mbmi.mv[0].as_int;
+ mv->mv[1].as_int = mi->mbmi.mv[1].as_int;
}
}
}
if (bsize >= BLOCK_8X8) {
const int idx_str = xd->mi_stride * mi_row + mi_col;
- MODE_INFO *mi_8x8 = cm->mi[idx_str].src_mi;
+ MODE_INFO ** mi_8x8 = cm->mi_grid_visible + idx_str;
ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
- subsize = mi_8x8[0].src_mi->mbmi.sb_type;
+ subsize = mi_8x8[0]->mbmi.sb_type;
} else {
ctx = 0;
subsize = BLOCK_4X4;
static void rd_use_partition(VP9_COMP *cpi,
ThreadData *td,
TileDataEnc *tile_data,
- MODE_INFO *mi_8x8, TOKENEXTRA **tp,
+ MODE_INFO **mi_8x8, TOKENEXTRA **tp,
int mi_row, int mi_col,
BLOCK_SIZE bsize,
int *rate, int64_t *dist,
RD_COST last_part_rdc, none_rdc, chosen_rdc;
BLOCK_SIZE sub_subsize = BLOCK_4X4;
int splits_below = 0;
- BLOCK_SIZE bs_type = mi_8x8[0].src_mi->mbmi.sb_type;
+ BLOCK_SIZE bs_type = mi_8x8[0]->mbmi.sb_type;
int do_partition_search = 1;
PICK_MODE_CONTEXT *ctx = &pc_tree->none;
splits_below = 1;
for (i = 0; i < 4; i++) {
int jj = i >> 1, ii = i & 0x01;
- MODE_INFO *this_mi = mi_8x8[jj * bss * mis + ii * bss].src_mi;
+ MODE_INFO *this_mi = mi_8x8[jj * bss * mis + ii * bss];
if (this_mi && this_mi->mbmi.sb_type >= sub_subsize) {
splits_below = 0;
}
}
restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
- mi_8x8[0].src_mi->mbmi.sb_type = bs_type;
+ mi_8x8[0]->mbmi.sb_type = bs_type;
pc_tree->partitioning = partition;
}
}
// If last_part is better set the partitioning to that.
if (last_part_rdc.rdcost < chosen_rdc.rdcost) {
- mi_8x8[0].src_mi->mbmi.sb_type = bsize;
+ mi_8x8[0]->mbmi.sb_type = bsize;
if (bsize >= BLOCK_8X8)
pc_tree->partitioning = partition;
chosen_rdc = last_part_rdc;
//
// The min and max are assumed to have been initialized prior to calling this
// function so repeat calls can accumulate a min and max of more than one sb64.
-static void get_sb_partition_size_range(MACROBLOCKD *xd, MODE_INFO *mi_8x8,
+static void get_sb_partition_size_range(MACROBLOCKD *xd, MODE_INFO **mi_8x8,
BLOCK_SIZE *min_block_size,
BLOCK_SIZE *max_block_size,
int bs_hist[BLOCK_SIZES]) {
// Check the sb_type for each block that belongs to this region.
for (i = 0; i < sb_height_in_blocks; ++i) {
for (j = 0; j < sb_width_in_blocks; ++j) {
- MODE_INFO *mi = mi_8x8[index+j].src_mi;
+ MODE_INFO *mi = mi_8x8[index+j];
BLOCK_SIZE sb_type = mi ? mi->mbmi.sb_type : 0;
bs_hist[sb_type]++;
*min_block_size = MIN(*min_block_size, sb_type);
BLOCK_SIZE *min_block_size,
BLOCK_SIZE *max_block_size) {
VP9_COMMON *const cm = &cpi->common;
- MODE_INFO *mi = xd->mi[0].src_mi;
- const int left_in_image = xd->left_available && mi[-1].src_mi;
- const int above_in_image = xd->up_available && mi[-xd->mi_stride].src_mi;
+ MODE_INFO **mi = xd->mi;
+ const int left_in_image = xd->left_available && mi[-1];
+ const int above_in_image = xd->up_available && mi[-xd->mi_stride];
const int row8x8_remaining = tile->mi_row_end - mi_row;
const int col8x8_remaining = tile->mi_col_end - mi_col;
int bh, bw;
// passed in values for min and max as a starting point.
// Find the min and max partition used in previous frame at this location
if (cm->frame_type != KEY_FRAME) {
- MODE_INFO *prev_mi =
- cm->prev_mip + cm->mi_stride + 1 + mi_row * xd->mi_stride + mi_col;
-
+ MODE_INFO **prev_mi =
+ &cm->prev_mi_grid_visible[mi_row * xd->mi_stride + mi_col];
get_sb_partition_size_range(xd, prev_mi, &min_size, &max_size, bs_hist);
}
// Find the min and max partition sizes used in the left SB64
if (left_in_image) {
- MODE_INFO *left_sb64_mi = mi[-MI_BLOCK_SIZE].src_mi;
+ MODE_INFO **left_sb64_mi = &mi[-MI_BLOCK_SIZE];
get_sb_partition_size_range(xd, left_sb64_mi, &min_size, &max_size,
bs_hist);
}
// Find the min and max partition sizes used in the above SB64.
if (above_in_image) {
- MODE_INFO *above_sb64_mi = mi[-xd->mi_stride * MI_BLOCK_SIZE].src_mi;
+ MODE_INFO **above_sb64_mi = &mi[-xd->mi_stride * MI_BLOCK_SIZE];
get_sb_partition_size_range(xd, above_sb64_mi, &min_size, &max_size,
bs_hist);
}
BLOCK_SIZE *min_block_size,
BLOCK_SIZE *max_block_size) {
VP9_COMMON *const cm = &cpi->common;
- MODE_INFO *mi_8x8 = xd->mi;
- const int left_in_image = xd->left_available && mi_8x8[-1].src_mi;
- const int above_in_image = xd->up_available &&
- mi_8x8[-xd->mi_stride].src_mi;
+ MODE_INFO **mi_8x8 = xd->mi;
+ const int left_in_image = xd->left_available && mi_8x8[-1];
+ const int above_in_image = xd->up_available && mi_8x8[-xd->mi_stride];
int row8x8_remaining = tile->mi_row_end - mi_row;
int col8x8_remaining = tile->mi_col_end - mi_col;
int bh, bw;
if (search_range_ctrl &&
(left_in_image || above_in_image || cm->frame_type != KEY_FRAME)) {
int block;
- MODE_INFO *mi;
+ MODE_INFO **mi;
BLOCK_SIZE sb_type;
// Find the min and max partition sizes used in the left SB64.
if (left_in_image) {
MODE_INFO *cur_mi;
- mi = mi_8x8[-1].src_mi;
+ mi = &mi_8x8[-1];
for (block = 0; block < MI_BLOCK_SIZE; ++block) {
- cur_mi = mi[block * xd->mi_stride].src_mi;
+ cur_mi = mi[block * xd->mi_stride];
sb_type = cur_mi ? cur_mi->mbmi.sb_type : 0;
min_size = MIN(min_size, sb_type);
max_size = MAX(max_size, sb_type);
}
// Find the min and max partition sizes used in the above SB64.
if (above_in_image) {
- mi = mi_8x8[-xd->mi_stride * MI_BLOCK_SIZE].src_mi;
+ mi = &mi_8x8[-xd->mi_stride * MI_BLOCK_SIZE];
for (block = 0; block < MI_BLOCK_SIZE; ++block) {
- sb_type = mi[block].src_mi ? mi[block].src_mi->mbmi.sb_type : 0;
+ sb_type = mi[block] ? mi[block]->mbmi.sb_type : 0;
min_size = MIN(min_size, sb_type);
max_size = MAX(max_size, sb_type);
}
MODE_INFO *mi;
const int idx_str = cm->mi_stride * mi_row + mi_col;
- MODE_INFO *prev_mi = (cm->prev_mip + cm->mi_stride + 1 + idx_str)->src_mi;
-
-
+ MODE_INFO **prev_mi = &cm->prev_mi_grid_visible[idx_str];
BLOCK_SIZE bs, min_size, max_size;
min_size = BLOCK_64X64;
if (prev_mi) {
for (idy = 0; idy < mi_height; ++idy) {
for (idx = 0; idx < mi_width; ++idx) {
- mi = prev_mi[idy * cm->mi_stride + idx].src_mi;
+ mi = prev_mi[idy * cm->mi_stride + idx];
bs = mi ? mi->mbmi.sb_type : bsize;
min_size = MIN(min_size, bs);
max_size = MAX(max_size, bs);
if (xd->left_available) {
for (idy = 0; idy < mi_height; ++idy) {
- mi = xd->mi[idy * cm->mi_stride - 1].src_mi;
+ mi = xd->mi[idy * cm->mi_stride - 1];
bs = mi ? mi->mbmi.sb_type : bsize;
min_size = MIN(min_size, bs);
max_size = MAX(max_size, bs);
if (xd->up_available) {
for (idx = 0; idx < mi_width; ++idx) {
- mi = xd->mi[idx - cm->mi_stride].src_mi;
+ mi = xd->mi[idx - cm->mi_stride];
bs = mi ? mi->mbmi.sb_type : bsize;
min_size = MIN(min_size, bs);
max_size = MAX(max_size, bs);
int seg_skip = 0;
const int idx_str = cm->mi_stride * mi_row + mi_col;
- MODE_INFO *mi = cm->mi + idx_str;
+ MODE_INFO **mi = cm->mi_grid_visible + idx_str;
if (sf->adaptive_pred_interp_filter) {
for (i = 0; i < 64; ++i)
static void reset_skip_tx_size(VP9_COMMON *cm, TX_SIZE max_tx_size) {
int mi_row, mi_col;
const int mis = cm->mi_stride;
- MODE_INFO *mi_ptr = cm->mi;
+ MODE_INFO **mi_ptr = cm->mi_grid_visible;
for (mi_row = 0; mi_row < cm->mi_rows; ++mi_row, mi_ptr += mis) {
for (mi_col = 0; mi_col < cm->mi_cols; ++mi_col) {
- if (mi_ptr[mi_col].src_mi->mbmi.tx_size > max_tx_size)
- mi_ptr[mi_col].src_mi->mbmi.tx_size = max_tx_size;
+ if (mi_ptr[mi_col]->mbmi.tx_size > max_tx_size)
+ mi_ptr[mi_col]->mbmi.tx_size = max_tx_size;
}
}
}
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *mbmi;
set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
- mbmi = &xd->mi[0].src_mi->mbmi;
+ mbmi = &xd->mi[0]->mbmi;
mbmi->sb_type = bsize;
if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled)
switch (partition) {
case PARTITION_NONE:
set_mode_info_offsets(cm, xd, mi_row, mi_col);
- *(xd->mi[0].src_mi) = pc_tree->none.mic;
+ *(xd->mi[0]) = pc_tree->none.mic;
duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, bsize);
break;
case PARTITION_VERT:
set_mode_info_offsets(cm, xd, mi_row, mi_col);
- *(xd->mi[0].src_mi) = pc_tree->vertical[0].mic;
+ *(xd->mi[0]) = pc_tree->vertical[0].mic;
duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, subsize);
if (mi_col + hbs < cm->mi_cols) {
set_mode_info_offsets(cm, xd, mi_row, mi_col + hbs);
- *(xd->mi[0].src_mi) = pc_tree->vertical[1].mic;
+ *(xd->mi[0]) = pc_tree->vertical[1].mic;
duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col + hbs, subsize);
}
break;
case PARTITION_HORZ:
set_mode_info_offsets(cm, xd, mi_row, mi_col);
- *(xd->mi[0].src_mi) = pc_tree->horizontal[0].mic;
+ *(xd->mi[0]) = pc_tree->horizontal[0].mic;
duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, subsize);
if (mi_row + hbs < cm->mi_rows) {
set_mode_info_offsets(cm, xd, mi_row + hbs, mi_col);
- *(xd->mi[0].src_mi) = pc_tree->horizontal[1].mic;
+ *(xd->mi[0]) = pc_tree->horizontal[1].mic;
duplicate_mode_info_in_sb(cm, xd, mi_row + hbs, mi_col, subsize);
}
break;
if (partition_none_allowed) {
nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col,
&this_rdc, bsize, ctx);
- ctx->mic.mbmi = xd->mi[0].src_mi->mbmi;
+ ctx->mic.mbmi = xd->mi[0]->mbmi;
ctx->skip_txfm[0] = x->skip_txfm[0];
ctx->skip = x->skip;
nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize,
&pc_tree->horizontal[0]);
- pc_tree->horizontal[0].mic.mbmi = xd->mi[0].src_mi->mbmi;
+ pc_tree->horizontal[0].mic.mbmi = xd->mi[0]->mbmi;
pc_tree->horizontal[0].skip_txfm[0] = x->skip_txfm[0];
pc_tree->horizontal[0].skip = x->skip;
&this_rdc, subsize,
&pc_tree->horizontal[1]);
- pc_tree->horizontal[1].mic.mbmi = xd->mi[0].src_mi->mbmi;
+ pc_tree->horizontal[1].mic.mbmi = xd->mi[0]->mbmi;
pc_tree->horizontal[1].skip_txfm[0] = x->skip_txfm[0];
pc_tree->horizontal[1].skip = x->skip;
pc_tree->vertical[0].pred_pixel_ready = 1;
nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize,
&pc_tree->vertical[0]);
- pc_tree->vertical[0].mic.mbmi = xd->mi[0].src_mi->mbmi;
+ pc_tree->vertical[0].mic.mbmi = xd->mi[0]->mbmi;
pc_tree->vertical[0].skip_txfm[0] = x->skip_txfm[0];
pc_tree->vertical[0].skip = x->skip;
nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + ms,
&this_rdc, subsize,
&pc_tree->vertical[1]);
- pc_tree->vertical[1].mic.mbmi = xd->mi[0].src_mi->mbmi;
+ pc_tree->vertical[1].mic.mbmi = xd->mi[0]->mbmi;
pc_tree->vertical[1].skip_txfm[0] = x->skip_txfm[0];
pc_tree->vertical[1].skip = x->skip;
static void nonrd_select_partition(VP9_COMP *cpi,
ThreadData *td,
TileDataEnc *tile_data,
- MODE_INFO *mi,
+ MODE_INFO **mi,
TOKENEXTRA **tp,
int mi_row, int mi_col,
BLOCK_SIZE bsize, int output_enabled,
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
return;
- subsize = (bsize >= BLOCK_8X8) ? mi[0].src_mi->mbmi.sb_type : BLOCK_4X4;
+ subsize = (bsize >= BLOCK_8X8) ? mi[0]->mbmi.sb_type : BLOCK_4X4;
partition = partition_lookup[bsl][subsize];
if (bsize == BLOCK_32X32 && partition != PARTITION_NONE &&
pc_tree->none.pred_pixel_ready = 1;
nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, rd_cost,
subsize, &pc_tree->none);
- pc_tree->none.mic.mbmi = xd->mi[0].src_mi->mbmi;
+ pc_tree->none.mic.mbmi = xd->mi[0]->mbmi;
pc_tree->none.skip_txfm[0] = x->skip_txfm[0];
pc_tree->none.skip = x->skip;
break;
pc_tree->vertical[0].pred_pixel_ready = 1;
nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, rd_cost,
subsize, &pc_tree->vertical[0]);
- pc_tree->vertical[0].mic.mbmi = xd->mi[0].src_mi->mbmi;
+ pc_tree->vertical[0].mic.mbmi = xd->mi[0]->mbmi;
pc_tree->vertical[0].skip_txfm[0] = x->skip_txfm[0];
pc_tree->vertical[0].skip = x->skip;
if (mi_col + hbs < cm->mi_cols) {
pc_tree->vertical[1].pred_pixel_ready = 1;
nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + hbs,
&this_rdc, subsize, &pc_tree->vertical[1]);
- pc_tree->vertical[1].mic.mbmi = xd->mi[0].src_mi->mbmi;
+ pc_tree->vertical[1].mic.mbmi = xd->mi[0]->mbmi;
pc_tree->vertical[1].skip_txfm[0] = x->skip_txfm[0];
pc_tree->vertical[1].skip = x->skip;
if (this_rdc.rate != INT_MAX && this_rdc.dist != INT64_MAX &&
pc_tree->horizontal[0].pred_pixel_ready = 1;
nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, rd_cost,
subsize, &pc_tree->horizontal[0]);
- pc_tree->horizontal[0].mic.mbmi = xd->mi[0].src_mi->mbmi;
+ pc_tree->horizontal[0].mic.mbmi = xd->mi[0]->mbmi;
pc_tree->horizontal[0].skip_txfm[0] = x->skip_txfm[0];
pc_tree->horizontal[0].skip = x->skip;
if (mi_row + hbs < cm->mi_rows) {
pc_tree->horizontal[1].pred_pixel_ready = 1;
nonrd_pick_sb_modes(cpi, tile_data, x, mi_row + hbs, mi_col,
&this_rdc, subsize, &pc_tree->horizontal[1]);
- pc_tree->horizontal[1].mic.mbmi = xd->mi[0].src_mi->mbmi;
+ pc_tree->horizontal[1].mic.mbmi = xd->mi[0]->mbmi;
pc_tree->horizontal[1].skip_txfm[0] = x->skip_txfm[0];
pc_tree->horizontal[1].skip = x->skip;
if (this_rdc.rate != INT_MAX && this_rdc.dist != INT64_MAX &&
static void nonrd_use_partition(VP9_COMP *cpi,
ThreadData *td,
TileDataEnc *tile_data,
- MODE_INFO *mi,
+ MODE_INFO **mi,
TOKENEXTRA **tp,
int mi_row, int mi_col,
BLOCK_SIZE bsize, int output_enabled,
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
return;
- subsize = (bsize >= BLOCK_8X8) ? mi[0].src_mi->mbmi.sb_type : BLOCK_4X4;
+ subsize = (bsize >= BLOCK_8X8) ? mi[0]->mbmi.sb_type : BLOCK_4X4;
partition = partition_lookup[bsl][subsize];
if (output_enabled && bsize != BLOCK_4X4) {
pc_tree->none.pred_pixel_ready = 1;
nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, dummy_cost,
subsize, &pc_tree->none);
- pc_tree->none.mic.mbmi = xd->mi[0].src_mi->mbmi;
+ pc_tree->none.mic.mbmi = xd->mi[0]->mbmi;
pc_tree->none.skip_txfm[0] = x->skip_txfm[0];
pc_tree->none.skip = x->skip;
encode_b_rt(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled,
pc_tree->vertical[0].pred_pixel_ready = 1;
nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, dummy_cost,
subsize, &pc_tree->vertical[0]);
- pc_tree->vertical[0].mic.mbmi = xd->mi[0].src_mi->mbmi;
+ pc_tree->vertical[0].mic.mbmi = xd->mi[0]->mbmi;
pc_tree->vertical[0].skip_txfm[0] = x->skip_txfm[0];
pc_tree->vertical[0].skip = x->skip;
encode_b_rt(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled,
pc_tree->vertical[1].pred_pixel_ready = 1;
nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + hbs,
dummy_cost, subsize, &pc_tree->vertical[1]);
- pc_tree->vertical[1].mic.mbmi = xd->mi[0].src_mi->mbmi;
+ pc_tree->vertical[1].mic.mbmi = xd->mi[0]->mbmi;
pc_tree->vertical[1].skip_txfm[0] = x->skip_txfm[0];
pc_tree->vertical[1].skip = x->skip;
encode_b_rt(cpi, td, tile_info, tp, mi_row, mi_col + hbs,
pc_tree->horizontal[0].pred_pixel_ready = 1;
nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, dummy_cost,
subsize, &pc_tree->horizontal[0]);
- pc_tree->horizontal[0].mic.mbmi = xd->mi[0].src_mi->mbmi;
+ pc_tree->horizontal[0].mic.mbmi = xd->mi[0]->mbmi;
pc_tree->horizontal[0].skip_txfm[0] = x->skip_txfm[0];
pc_tree->horizontal[0].skip = x->skip;
encode_b_rt(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled,
pc_tree->horizontal[1].pred_pixel_ready = 1;
nonrd_pick_sb_modes(cpi, tile_data, x, mi_row + hbs, mi_col,
dummy_cost, subsize, &pc_tree->horizontal[1]);
- pc_tree->horizontal[1].mic.mbmi = xd->mi[0].src_mi->mbmi;
+ pc_tree->horizontal[1].mic.mbmi = xd->mi[0]->mbmi;
pc_tree->horizontal[1].skip_txfm[0] = x->skip_txfm[0];
pc_tree->horizontal[1].skip = x->skip;
encode_b_rt(cpi, td, tile_info, tp, mi_row + hbs, mi_col,
const struct segmentation *const seg = &cm->seg;
RD_COST dummy_rdc;
const int idx_str = cm->mi_stride * mi_row + mi_col;
- MODE_INFO *mi = cm->mi + idx_str;
+ MODE_INFO **mi = cm->mi_grid_visible + idx_str;
PARTITION_SEARCH_TYPE partition_search_type = sf->partition_search_type;
BLOCK_SIZE bsize = BLOCK_64X64;
int seg_skip = 0;
case REFERENCE_PARTITION:
set_offsets(cpi, tile_info, x, mi_row, mi_col, BLOCK_64X64);
if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled &&
- xd->mi[0].src_mi->mbmi.segment_id) {
+ xd->mi[0]->mbmi.segment_id) {
x->max_partition_size = BLOCK_64X64;
x->min_partition_size = BLOCK_8X8;
nonrd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col,
MACROBLOCKD *const xd = &x->e_mbd;
RD_COUNTS *const rdc = &cpi->td.rd_counts;
- xd->mi = cm->mi;
- xd->mi[0].src_mi = &xd->mi[0];
+ xd->mi = cm->mi_grid_visible;
+ xd->mi[0] = cm->mi;
vp9_zero(*td->counts);
vp9_zero(rdc->coef_counts);
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &td->mb;
MACROBLOCKD *const xd = &x->e_mbd;
- MODE_INFO *mi_8x8 = xd->mi;
- MODE_INFO *mi = mi_8x8;
+ MODE_INFO **mi_8x8 = xd->mi;
+ MODE_INFO *mi = mi_8x8[0];
MB_MODE_INFO *mbmi = &mi->mbmi;
const int seg_skip = vp9_segfeature_active(&cm->seg, mbmi->segment_id,
SEG_LVL_SKIP);
for (y = 0; y < mi_height; y++)
for (x = 0; x < mi_width; x++)
if (mi_col + x < cm->mi_cols && mi_row + y < cm->mi_rows)
- mi_8x8[mis * y + x].src_mi->mbmi.tx_size = tx_size;
+ mi_8x8[mis * y + x]->mbmi.tx_size = tx_size;
}
++td->counts->tx.tx_totals[mbmi->tx_size];
++td->counts->tx.tx_totals[get_uv_tx_size(mbmi, &xd->plane[1])];
MACROBLOCKD *const xd = &mb->e_mbd;
struct macroblock_plane *const p = &mb->plane[plane];
struct macroblockd_plane *const pd = &xd->plane[plane];
- const int ref = is_inter_block(&xd->mi[0].src_mi->mbmi);
+ const int ref = is_inter_block(&xd->mi[0]->mbmi);
vp9_token_state tokens[1025][2];
unsigned best_index[1025][2];
uint8_t token_cache[1024];
void vp9_encode_sb(MACROBLOCK *x, BLOCK_SIZE bsize) {
MACROBLOCKD *const xd = &x->e_mbd;
struct optimize_ctx ctx;
- MB_MODE_INFO *mbmi = &xd->mi[0].src_mi->mbmi;
+ MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
struct encode_b_args arg = {x, &ctx, &mbmi->skip};
int plane;
struct encode_b_args* const args = arg;
MACROBLOCK *const x = args->x;
MACROBLOCKD *const xd = &x->e_mbd;
- MB_MODE_INFO *mbmi = &xd->mi[0].src_mi->mbmi;
+ MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
struct macroblock_plane *const p = &x->plane[plane];
struct macroblockd_plane *const pd = &xd->plane[plane];
tran_low_t *coeff = BLOCK_OFFSET(p->coeff, block);
case TX_4X4:
tx_type = get_tx_type_4x4(pd->plane_type, xd, block);
scan_order = &vp9_scan_orders[TX_4X4][tx_type];
- mode = plane == 0 ? get_y_mode(xd->mi[0].src_mi, block) : mbmi->uv_mode;
+ mode = plane == 0 ? get_y_mode(xd->mi[0], block) : mbmi->uv_mode;
vp9_predict_intra_block(xd, block, bwl, TX_4X4, mode,
x->skip_encode ? src : dst,
x->skip_encode ? src_stride : dst_stride,
case TX_4X4:
tx_type = get_tx_type_4x4(pd->plane_type, xd, block);
scan_order = &vp9_scan_orders[TX_4X4][tx_type];
- mode = plane == 0 ? get_y_mode(xd->mi[0].src_mi, block) : mbmi->uv_mode;
+ mode = plane == 0 ? get_y_mode(xd->mi[0], block) : mbmi->uv_mode;
vp9_predict_intra_block(xd, block, bwl, TX_4X4, mode,
x->skip_encode ? src : dst,
x->skip_encode ? src_stride : dst_stride,
void vp9_encode_intra_block_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) {
const MACROBLOCKD *const xd = &x->e_mbd;
- struct encode_b_args arg = {x, NULL, &xd->mi[0].src_mi->mbmi.skip};
+ struct encode_b_args arg = {x, NULL, &xd->mi[0]->mbmi.skip};
vp9_foreach_transformed_block_in_plane(xd, bsize, plane,
vp9_encode_block_intra, &arg);
void vp9_update_mv_count(ThreadData *td) {
const MACROBLOCKD *xd = &td->mb.e_mbd;
- const MODE_INFO *mi = xd->mi[0].src_mi;
+ const MODE_INFO *mi = xd->mi[0];
const MB_MODE_INFO *const mbmi = &mi->mbmi;
if (mbmi->sb_type < BLOCK_8X8) {
// Clear left border column
for (i = 1; i < cm->mi_rows + 1; ++i)
vpx_memset(&cm->prev_mip[i * cm->mi_stride], 0, sizeof(*cm->prev_mip));
+
+ cm->mi_grid_visible = cm->mi_grid_base + cm->mi_stride + 1;
+ cm->prev_mi_grid_visible = cm->prev_mi_grid_base + cm->mi_stride + 1;
+
+ vpx_memset(cm->mi_grid_base, 0,
+ cm->mi_stride * (cm->mi_rows + 1) * sizeof(*cm->mi_grid_base));
}
static int vp9_enc_alloc_mi(VP9_COMMON *cm, int mi_size) {
if (!cm->prev_mip)
return 1;
cm->mi_alloc_size = mi_size;
+
+ cm->mi_grid_base = (MODE_INFO **)vpx_calloc(mi_size, sizeof(MODE_INFO*));
+ if (!cm->mi_grid_base)
+ return 1;
+ cm->prev_mi_grid_base = (MODE_INFO **)vpx_calloc(mi_size, sizeof(MODE_INFO*));
+ if (!cm->prev_mi_grid_base)
+ return 1;
+
return 0;
}
cm->mip = NULL;
vpx_free(cm->prev_mip);
cm->prev_mip = NULL;
+ vpx_free(cm->mi_grid_base);
+ cm->mi_grid_base = NULL;
+ vpx_free(cm->prev_mi_grid_base);
+ cm->prev_mi_grid_base = NULL;
}
static void vp9_swap_mi_and_prev_mi(VP9_COMMON *cm) {
// Current mip will be the prev_mip for the next frame.
+ MODE_INFO **temp_base = cm->prev_mi_grid_base;
MODE_INFO *temp = cm->prev_mip;
cm->prev_mip = cm->mip;
cm->mip = temp;
// Update the upper left visible macroblock ptrs.
cm->mi = cm->mip + cm->mi_stride + 1;
cm->prev_mi = cm->prev_mip + cm->mi_stride + 1;
+
+ cm->prev_mi_grid_base = cm->mi_grid_base;
+ cm->mi_grid_base = temp_base;
+ cm->mi_grid_visible = cm->mi_grid_base + cm->mi_stride + 1;
+ cm->prev_mi_grid_visible = cm->prev_mi_grid_base + cm->mi_stride + 1;
}
void vp9_initialize_enc(void) {
static void update_reference_segmentation_map(VP9_COMP *cpi) {
VP9_COMMON *const cm = &cpi->common;
- MODE_INFO *mi_8x8_ptr = cm->mi;
+ MODE_INFO **mi_8x8_ptr = cm->mi_grid_visible;
uint8_t *cache_ptr = cm->last_frame_seg_map;
int row, col;
for (row = 0; row < cm->mi_rows; row++) {
- MODE_INFO *mi_8x8 = mi_8x8_ptr;
+ MODE_INFO **mi_8x8 = mi_8x8_ptr;
uint8_t *cache = cache_ptr;
for (col = 0; col < cm->mi_cols; col++, mi_8x8++, cache++)
- cache[0] = mi_8x8[0].src_mi->mbmi.segment_id;
+ cache[0] = mi_8x8[0]->mbmi.segment_id;
mi_8x8_ptr += cm->mi_stride;
cache_ptr += cm->mi_cols;
}
MV tmp_mv = {0, 0};
MV ref_mv_full = {ref_mv->row >> 3, ref_mv->col >> 3};
int num00, tmp_err, n;
- const BLOCK_SIZE bsize = xd->mi[0].src_mi->mbmi.sb_type;
+ const BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
vp9_variance_fn_ptr_t v_fn_ptr = cpi->fn_ptr[bsize];
const int new_mv_mode_penalty = NEW_MV_MODE_PENALTY;
vp9_setup_pre_planes(xd, 0, first_ref_buf, 0, 0, NULL);
}
- xd->mi = cm->mi;
- xd->mi[0].src_mi = &xd->mi[0];
+ xd->mi = cm->mi_grid_visible;
+ xd->mi[0] = cm->mi;
vp9_frame_init_quantizer(cpi);
xd->plane[1].dst.buf = new_yv12->u_buffer + recon_uvoffset;
xd->plane[2].dst.buf = new_yv12->v_buffer + recon_uvoffset;
xd->left_available = (mb_col != 0);
- xd->mi[0].src_mi->mbmi.sb_type = bsize;
- xd->mi[0].src_mi->mbmi.ref_frame[0] = INTRA_FRAME;
+ xd->mi[0]->mbmi.sb_type = bsize;
+ xd->mi[0]->mbmi.ref_frame[0] = INTRA_FRAME;
set_mi_row_col(xd, &tile,
mb_row << 1, num_8x8_blocks_high_lookup[bsize],
mb_col << 1, num_8x8_blocks_wide_lookup[bsize],
// Do intra 16x16 prediction.
x->skip_encode = 0;
- xd->mi[0].src_mi->mbmi.mode = DC_PRED;
- xd->mi[0].src_mi->mbmi.tx_size = use_dc_pred ?
+ xd->mi[0]->mbmi.mode = DC_PRED;
+ xd->mi[0]->mbmi.tx_size = use_dc_pred ?
(bsize >= BLOCK_16X16 ? TX_16X16 : TX_8X8) : TX_4X4;
vp9_encode_intra_block_plane(x, bsize, 0);
this_error = vp9_get_mb_ss(x->plane[0].src_diff);
mv.row *= 8;
mv.col *= 8;
this_error = motion_error;
- xd->mi[0].src_mi->mbmi.mode = NEWMV;
- xd->mi[0].src_mi->mbmi.mv[0].as_mv = mv;
- xd->mi[0].src_mi->mbmi.tx_size = TX_4X4;
- xd->mi[0].src_mi->mbmi.ref_frame[0] = LAST_FRAME;
- xd->mi[0].src_mi->mbmi.ref_frame[1] = NONE;
+ xd->mi[0]->mbmi.mode = NEWMV;
+ xd->mi[0]->mbmi.mv[0].as_mv = mv;
+ xd->mi[0]->mbmi.tx_size = TX_4X4;
+ xd->mi[0]->mbmi.ref_frame[0] = LAST_FRAME;
+ xd->mi[0]->mbmi.ref_frame[1] = NONE;
vp9_build_inter_predictors_sby(xd, mb_row << 1, mb_col << 1, bsize);
vp9_encode_sby_pass1(x, bsize);
sum_mvr += mv.row;
&distortion, &sse, NULL, 0, 0);
}
- xd->mi[0].src_mi->mbmi.mode = NEWMV;
- xd->mi[0].src_mi->mbmi.mv[0].as_mv = *dst_mv;
+ xd->mi[0]->mbmi.mode = NEWMV;
+ xd->mi[0]->mbmi.mv[0].as_mv = *dst_mv;
vp9_build_inter_predictors_sby(xd, mb_row, mb_col, BLOCK_16X16);
for (mode = DC_PRED; mode <= TM_PRED; mode++) {
unsigned int err;
- xd->mi[0].src_mi->mbmi.mode = mode;
+ xd->mi[0]->mbmi.mode = mode;
vp9_predict_intra_block(xd, 0, 2, TX_16X16, mode,
x->plane[0].src.buf, x->plane[0].src.stride,
xd->plane[0].dst.buf, xd->plane[0].dst.stride,
xd->plane[0].dst.stride = buf->y_stride;
xd->plane[0].pre[0].stride = buf->y_stride;
xd->plane[1].dst.stride = buf->uv_stride;
- xd->mi[0].src_mi = &mi_local;
+ xd->mi[0] = &mi_local;
mi_local.mbmi.sb_type = BLOCK_16X16;
mi_local.mbmi.ref_frame[0] = LAST_FRAME;
mi_local.mbmi.ref_frame[1] = NONE;
const int src_stride = x->plane[0].src.stride;
const int ref_stride = xd->plane[0].pre[0].stride;
uint8_t const *ref_buf, *src_buf;
- MV *tmp_mv = &xd->mi[0].src_mi->mbmi.mv[0].as_mv;
+ MV *tmp_mv = &xd->mi[0]->mbmi.mv[0].as_mv;
unsigned int best_sad, tmp_sad, this_sad[4];
MV this_mv;
const int norm_factor = 3 + (bw >> 5);
const POSITION *const mv_ref = &mv_ref_search[i];
if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) {
const MODE_INFO *const candidate_mi = xd->mi[mv_ref->col + mv_ref->row *
- xd->mi_stride].src_mi;
+ xd->mi_stride];
const MB_MODE_INFO *const candidate = &candidate_mi->mbmi;
// Keep counts for entropy encoding.
context_counter += mode_2_counter[candidate->mode];
const POSITION *const mv_ref = &mv_ref_search[i];
if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) {
const MB_MODE_INFO *const candidate = &xd->mi[mv_ref->col + mv_ref->row *
- xd->mi_stride].src_mi->mbmi;
+ xd->mi_stride]->mbmi;
different_ref_found = 1;
if (candidate->ref_frame[0] == ref_frame)
const POSITION *mv_ref = &mv_ref_search[i];
if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) {
const MB_MODE_INFO *const candidate = &xd->mi[mv_ref->col + mv_ref->row
- * xd->mi_stride].src_mi->mbmi;
+ * xd->mi_stride]->mbmi;
// If the candidate is INTRA we don't want to consider its mv.
IF_DIFF_REF_FRAME_ADD_MV(candidate, ref_frame, ref_sign_bias,
int_mv *tmp_mv, int *rate_mv,
int64_t best_rd_sofar) {
MACROBLOCKD *xd = &x->e_mbd;
- MB_MODE_INFO *mbmi = &xd->mi[0].src_mi->mbmi;
+ MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
struct buf_2d backup_yv12[MAX_MB_PLANE] = {{0, 0}};
const int step_param = cpi->sf.mv.fullpel_search_step_param;
const int sadpb = x->sadperbit16;
if (cpi->sf.partition_search_type == VAR_BASED_PARTITION) {
if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ &&
- cyclic_refresh_segment_id_boosted(xd->mi[0].src_mi->mbmi.segment_id))
+ cyclic_refresh_segment_id_boosted(xd->mi[0]->mbmi.segment_id))
tx_size = TX_8X8;
else if (tx_size > TX_16X16)
tx_size = TX_16X16;
}
assert(tx_size >= TX_8X8);
- xd->mi[0].src_mi->mbmi.tx_size = tx_size;
+ xd->mi[0]->mbmi.tx_size = tx_size;
// Evaluate if the partition block is a skippable block in Y plane.
{
for (i = 1; i <= 2; i++) {
struct macroblock_plane *const p = &x->plane[i];
struct macroblockd_plane *const pd = &xd->plane[i];
- const TX_SIZE uv_tx_size = get_uv_tx_size(&xd->mi[0].src_mi->mbmi, pd);
+ const TX_SIZE uv_tx_size = get_uv_tx_size(&xd->mi[0]->mbmi, pd);
const BLOCK_SIZE unit_size = txsize_to_bsize[uv_tx_size];
const BLOCK_SIZE uv_bsize = get_plane_block_size(bsize, pd);
const int uv_bw = b_width_log2_lookup[uv_bsize];
if (cpi->common.tx_mode == TX_MODE_SELECT) {
if (sse > (var << 2))
- xd->mi[0].src_mi->mbmi.tx_size =
+ xd->mi[0]->mbmi.tx_size =
MIN(max_txsize_lookup[bsize],
tx_mode_to_biggest_tx_size[cpi->common.tx_mode]);
else
- xd->mi[0].src_mi->mbmi.tx_size = TX_8X8;
+ xd->mi[0]->mbmi.tx_size = TX_8X8;
if (cpi->sf.partition_search_type == VAR_BASED_PARTITION) {
if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ &&
- cyclic_refresh_segment_id_boosted(xd->mi[0].src_mi->mbmi.segment_id))
- xd->mi[0].src_mi->mbmi.tx_size = TX_8X8;
- else if (xd->mi[0].src_mi->mbmi.tx_size > TX_16X16)
- xd->mi[0].src_mi->mbmi.tx_size = TX_16X16;
+ cyclic_refresh_segment_id_boosted(xd->mi[0]->mbmi.segment_id))
+ xd->mi[0]->mbmi.tx_size = TX_8X8;
+ else if (xd->mi[0]->mbmi.tx_size > TX_16X16)
+ xd->mi[0]->mbmi.tx_size = TX_16X16;
}
} else {
- xd->mi[0].src_mi->mbmi.tx_size =
+ xd->mi[0]->mbmi.tx_size =
MIN(max_txsize_lookup[bsize],
tx_mode_to_biggest_tx_size[cpi->common.tx_mode]);
}
// Evaluate if the partition block is a skippable block in Y plane.
{
const BLOCK_SIZE unit_size =
- txsize_to_bsize[xd->mi[0].src_mi->mbmi.tx_size];
+ txsize_to_bsize[xd->mi[0]->mbmi.tx_size];
const unsigned int num_blk_log2 =
(b_width_log2_lookup[bsize] - b_width_log2_lookup[unit_size]) +
(b_height_log2_lookup[bsize] - b_height_log2_lookup[unit_size]);
struct buf_2d yv12_mb[][MAX_MB_PLANE],
int *rate, int64_t *dist) {
MACROBLOCKD *xd = &x->e_mbd;
- MB_MODE_INFO *mbmi = &xd->mi[0].src_mi->mbmi;
+ MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
const BLOCK_SIZE uv_size = get_plane_block_size(bsize, &xd->plane[1]);
unsigned int var = var_y, sse = sse_y;
void vp9_pick_intra_mode(VP9_COMP *cpi, MACROBLOCK *x, RD_COST *rd_cost,
BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx) {
MACROBLOCKD *const xd = &x->e_mbd;
- MB_MODE_INFO *const mbmi = &xd->mi[0].src_mi->mbmi;
+ MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
RD_COST this_rdc, best_rdc;
PREDICTION_MODE this_mode;
struct estimate_block_intra_args args = { cpi, x, DC_PRED, 0, 0 };
const TX_SIZE intra_tx_size =
MIN(max_txsize_lookup[bsize],
tx_mode_to_biggest_tx_size[cpi->common.tx_mode]);
- MODE_INFO *const mic = xd->mi[0].src_mi;
+ MODE_INFO *const mic = xd->mi[0];
int *bmode_costs;
- const MODE_INFO *above_mi = xd->mi[-xd->mi_stride].src_mi;
- const MODE_INFO *left_mi = xd->left_available ? xd->mi[-1].src_mi : NULL;
+ const MODE_INFO *above_mi = xd->mi[-xd->mi_stride];
+ const MODE_INFO *left_mi = xd->left_available ? xd->mi[-1] : NULL;
const PREDICTION_MODE A = vp9_above_block_mode(mic, above_mi, 0);
const PREDICTION_MODE L = vp9_left_block_mode(mic, left_mi, 0);
bmode_costs = cpi->y_mode_costs[A][L];
VP9_COMMON *const cm = &cpi->common;
TileInfo *const tile_info = &tile_data->tile_info;
MACROBLOCKD *const xd = &x->e_mbd;
- MB_MODE_INFO *const mbmi = &xd->mi[0].src_mi->mbmi;
+ MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
struct macroblockd_plane *const pd = &xd->plane[0];
PREDICTION_MODE best_mode = ZEROMV;
MV_REFERENCE_FRAME ref_frame, best_ref_frame = LAST_FRAME;
x->skip = 0;
if (xd->up_available)
- filter_ref = xd->mi[-xd->mi_stride].src_mi->mbmi.interp_filter;
+ filter_ref = xd->mi[-xd->mi_stride]->mbmi.interp_filter;
else if (xd->left_available)
- filter_ref = xd->mi[-1].src_mi->mbmi.interp_filter;
+ filter_ref = xd->mi[-1]->mbmi.interp_filter;
else
filter_ref = cm->interp_filter;
sf, sf);
if (cm->use_prev_frame_mvs)
- vp9_find_mv_refs(cm, xd, tile_info, xd->mi[0].src_mi, ref_frame,
+ vp9_find_mv_refs(cm, xd, tile_info, xd->mi[0], ref_frame,
candidates, mi_row, mi_col, NULL, NULL);
else
const_motion[ref_frame] = mv_refs_rt(cm, xd, tile_info,
- xd->mi[0].src_mi,
+ xd->mi[0],
ref_frame, candidates,
mi_row, mi_col);
// For large partition blocks, extra testing is done.
if (bsize > BLOCK_32X32 &&
- !cyclic_refresh_segment_id_boosted(xd->mi[0].src_mi->mbmi.segment_id) &&
+ !cyclic_refresh_segment_id_boosted(xd->mi[0]->mbmi.segment_id) &&
cm->base_qindex) {
model_rd_for_sb_y_large(cpi, bsize, x, xd, &this_rdc.rate,
&this_rdc.dist, &var_y, &sse_y, mi_row, mi_col,
mbmi->tx_size = best_tx_size;
mbmi->ref_frame[0] = best_ref_frame;
mbmi->mv[0].as_int = frame_mv[best_mode][best_ref_frame].as_int;
- xd->mi[0].src_mi->bmi[0].as_mv[0].as_int = mbmi->mv[0].as_int;
+ xd->mi[0]->bmi[0].as_mv[0].as_int = mbmi->mv[0].as_int;
x->skip_txfm[0] = best_mode_skip_txfm;
// Perform intra prediction search, if the best SAD is above a certain
TileInfo *const tile_info = &tile_data->tile_info;
SPEED_FEATURES *const sf = &cpi->sf;
MACROBLOCKD *const xd = &x->e_mbd;
- MB_MODE_INFO *const mbmi = &xd->mi[0].src_mi->mbmi;
+ MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
const struct segmentation *const seg = &cm->seg;
MV_REFERENCE_FRAME ref_frame, second_ref_frame = NONE;
MV_REFERENCE_FRAME best_ref_frame = NONE;
&cm->frame_refs[ref_frame - 1].sf;
vp9_setup_pred_block(xd, yv12_mb[ref_frame], yv12, mi_row, mi_col,
sf, sf);
- vp9_find_mv_refs(cm, xd, tile_info, xd->mi[0].src_mi, ref_frame,
+ vp9_find_mv_refs(cm, xd, tile_info, xd->mi[0], ref_frame,
candidates, mi_row, mi_col, NULL, NULL);
vp9_find_best_ref_mvs(xd, cm->allow_high_precision_mv, candidates,
for (this_mode = NEARESTMV; this_mode <= NEWMV; ++this_mode) {
int b_rate = 0;
- xd->mi[0].bmi[i].as_mv[0].as_int = b_mv[this_mode].as_int;
+ xd->mi[0]->bmi[i].as_mv[0].as_int = b_mv[this_mode].as_int;
if (this_mode == NEWMV) {
const int step_param = cpi->sf.mv.fullpel_search_step_param;
mvp_full.row = b_mv[NEARESTMV].as_mv.row >> 3;
mvp_full.col = b_mv[NEARESTMV].as_mv.col >> 3;
} else {
- mvp_full.row = xd->mi[0].bmi[0].as_mv[0].as_mv.row >> 3;
- mvp_full.col = xd->mi[0].bmi[0].as_mv[0].as_mv.col >> 3;
+ mvp_full.row = xd->mi[0]->bmi[0].as_mv[0].as_mv.row >> 3;
+ mvp_full.col = xd->mi[0]->bmi[0].as_mv[0].as_mv.col >> 3;
}
vp9_set_mv_search_range(x, &mbmi->ref_mvs[0]->as_mv);
&dummy_dist,
&x->pred_sse[ref_frame], NULL, 0, 0);
- xd->mi[0].bmi[i].as_mv[0].as_mv = tmp_mv;
+ xd->mi[0]->bmi[i].as_mv[0].as_mv = tmp_mv;
} else {
b_rate += cpi->inter_mode_cost[mbmi->mode_context[ref_frame]]
[INTER_OFFSET(this_mode)];
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
vp9_highbd_build_inter_predictor(pd->pre[0].buf, pd->pre[0].stride,
pd->dst.buf, pd->dst.stride,
- &xd->mi[0].bmi[i].as_mv[0].as_mv,
+ &xd->mi[0]->bmi[i].as_mv[0].as_mv,
&xd->block_refs[0]->sf,
4 * num_4x4_blocks_wide,
4 * num_4x4_blocks_high, 0,
#endif
vp9_build_inter_predictor(pd->pre[0].buf, pd->pre[0].stride,
pd->dst.buf, pd->dst.stride,
- &xd->mi[0].bmi[i].as_mv[0].as_mv,
+ &xd->mi[0]->bmi[i].as_mv[0].as_mv,
&xd->block_refs[0]->sf,
4 * num_4x4_blocks_wide,
4 * num_4x4_blocks_high, 0,
if (this_rdc.rdcost < b_best_rd) {
b_best_rd = this_rdc.rdcost;
bsi[ref_frame][i].as_mode = this_mode;
- bsi[ref_frame][i].as_mv[0].as_mv = xd->mi[0].bmi[i].as_mv[0].as_mv;
+ bsi[ref_frame][i].as_mv[0].as_mv = xd->mi[0]->bmi[i].as_mv[0].as_mv;
}
} // mode search
pd->dst = orig_dst;
this_rd += b_best_rd;
- xd->mi[0].bmi[i] = bsi[ref_frame][i];
+ xd->mi[0]->bmi[i] = bsi[ref_frame][i];
if (num_4x4_blocks_wide > 1)
- xd->mi[0].bmi[i + 1] = xd->mi[0].bmi[i];
+ xd->mi[0]->bmi[i + 1] = xd->mi[0]->bmi[i];
if (num_4x4_blocks_high > 1)
- xd->mi[0].bmi[i + 2] = xd->mi[0].bmi[i];
+ xd->mi[0]->bmi[i + 2] = xd->mi[0]->bmi[i];
}
} // loop through sub8x8 blocks
for (idy = 0; idy < 2; idy += num_4x4_blocks_high) {
for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) {
const int block = idy * 2 + idx;
- xd->mi[0].bmi[block] = bsi[best_ref_frame][block];
+ xd->mi[0]->bmi[block] = bsi[best_ref_frame][block];
if (num_4x4_blocks_wide > 1)
- xd->mi[0].bmi[block + 1] = bsi[best_ref_frame][block];
+ xd->mi[0]->bmi[block + 1] = bsi[best_ref_frame][block];
if (num_4x4_blocks_high > 1)
- xd->mi[0].bmi[block + 2] = bsi[best_ref_frame][block];
+ xd->mi[0]->bmi[block + 2] = bsi[best_ref_frame][block];
}
}
- mbmi->mode = xd->mi[0].bmi[3].as_mode;
- ctx->mic = *(xd->mi[0].src_mi);
+ mbmi->mode = xd->mi[0]->bmi[3].as_mode;
+ ctx->mic = *(xd->mi[0]);
ctx->skip_txfm[0] = 0;
ctx->skip = 0;
// Dummy assignment for speed -5. No effect in speed -6.
const VP9_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
QUANTS *const quants = &cpi->quants;
- const int segment_id = xd->mi[0].src_mi->mbmi.segment_id;
+ const int segment_id = xd->mi[0]->mbmi.segment_id;
const int qindex = vp9_get_qindex(&cm->seg, segment_id, cm->base_qindex);
const int rdmult = vp9_compute_rd_mult(cpi, qindex + cm->y_dc_delta_q);
int i;
uint8_t *ref_y_buffer, int ref_y_stride,
int ref_frame, BLOCK_SIZE block_size) {
MACROBLOCKD *xd = &x->e_mbd;
- MB_MODE_INFO *mbmi = &xd->mi[0].src_mi->mbmi;
+ MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
int i;
int zero_seen = 0;
int best_index = 0;
}
int vp9_get_switchable_rate(const VP9_COMP *cpi, const MACROBLOCKD *const xd) {
- const MB_MODE_INFO *const mbmi = &xd->mi[0].src_mi->mbmi;
+ const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
const int ctx = vp9_get_pred_context_switchable_interp(xd);
return SWITCHABLE_INTERP_RATE_FACTOR *
cpi->switchable_interp_costs[ctx][mbmi->interp_filter];
int i;
int64_t rate_sum = 0;
int64_t dist_sum = 0;
- const int ref = xd->mi[0].src_mi->mbmi.ref_frame[0];
+ const int ref = xd->mi[0]->mbmi.ref_frame[0];
unsigned int sse;
unsigned int var = 0;
unsigned int sum_sse = 0;
const int16_t *scan, const int16_t *nb,
int use_fast_coef_costing) {
MACROBLOCKD *const xd = &x->e_mbd;
- MB_MODE_INFO *mbmi = &xd->mi[0].src_mi->mbmi;
+ MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
const struct macroblock_plane *p = &x->plane[plane];
const struct macroblockd_plane *pd = &xd->plane[plane];
const PLANE_TYPE type = pd->plane_type;
#endif // CONFIG_VP9_HIGHBITDEPTH
args->sse = this_sse >> shift;
- if (x->skip_encode && !is_inter_block(&xd->mi[0].src_mi->mbmi)) {
+ if (x->skip_encode && !is_inter_block(&xd->mi[0]->mbmi)) {
// TODO(jingning): tune the model to better capture the distortion.
int64_t p = (pd->dequant[1] * pd->dequant[1] *
(1 << ss_txfrm_size)) >> (shift + 2);
struct rdcost_block_args *args = arg;
MACROBLOCK *const x = args->x;
MACROBLOCKD *const xd = &x->e_mbd;
- MB_MODE_INFO *const mbmi = &xd->mi[0].src_mi->mbmi;
+ MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
int64_t rd1, rd2, rd;
if (args->skip)
args.use_fast_coef_costing = use_fast_coef_casting;
if (plane == 0)
- xd->mi[0].src_mi->mbmi.tx_size = tx_size;
+ xd->mi[0]->mbmi.tx_size = tx_size;
vp9_get_entropy_contexts(bsize, tx_size, pd, args.t_above, args.t_left);
VP9_COMMON *const cm = &cpi->common;
const TX_SIZE largest_tx_size = tx_mode_to_biggest_tx_size[cm->tx_mode];
MACROBLOCKD *const xd = &x->e_mbd;
- MB_MODE_INFO *const mbmi = &xd->mi[0].src_mi->mbmi;
+ MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
mbmi->tx_size = MIN(max_tx_size, largest_tx_size);
const TX_SIZE max_tx_size = max_txsize_lookup[bs];
VP9_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
- MB_MODE_INFO *const mbmi = &xd->mi[0].src_mi->mbmi;
+ MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
vp9_prob skip_prob = vp9_get_skip_prob(cm, xd);
int r[TX_SIZES][2], s[TX_SIZES];
int64_t d[TX_SIZES], sse[TX_SIZES];
int64_t sse;
int64_t *ret_sse = psse ? psse : &sse;
- assert(bs == xd->mi[0].src_mi->mbmi.sb_type);
+ assert(bs == xd->mi[0]->mbmi.sb_type);
if (cpi->sf.tx_size_search_method == USE_LARGESTALL || xd->lossless) {
vpx_memset(txfm_cache, 0, TX_MODES * sizeof(int64_t));
vpx_memcpy(ta, a, sizeof(ta));
vpx_memcpy(tl, l, sizeof(tl));
- xd->mi[0].src_mi->mbmi.tx_size = TX_4X4;
+ xd->mi[0]->mbmi.tx_size = TX_4X4;
#if CONFIG_VP9_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
block,
p->src_diff);
tran_low_t *const coeff = BLOCK_OFFSET(x->plane[0].coeff, block);
- xd->mi[0].src_mi->bmi[block].as_mode = mode;
+ xd->mi[0]->bmi[block].as_mode = mode;
vp9_predict_intra_block(xd, block, 1,
TX_4X4, mode,
x->skip_encode ? src : dst,
int16_t *const src_diff =
vp9_raster_block_offset_int16(BLOCK_8X8, block, p->src_diff);
tran_low_t *const coeff = BLOCK_OFFSET(x->plane[0].coeff, block);
- xd->mi[0].src_mi->bmi[block].as_mode = mode;
+ xd->mi[0]->bmi[block].as_mode = mode;
vp9_predict_intra_block(xd, block, 1,
TX_4X4, mode,
x->skip_encode ? src : dst,
int64_t best_rd) {
int i, j;
const MACROBLOCKD *const xd = &mb->e_mbd;
- MODE_INFO *const mic = xd->mi[0].src_mi;
+ MODE_INFO *const mic = xd->mi[0];
const MODE_INFO *above_mi = xd->above_mi;
const MODE_INFO *left_mi = xd->left_mi;
- const BLOCK_SIZE bsize = xd->mi[0].src_mi->mbmi.sb_type;
+ const BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
int idx, idy;
PREDICTION_MODE mode;
PREDICTION_MODE mode_selected = DC_PRED;
MACROBLOCKD *const xd = &x->e_mbd;
- MODE_INFO *const mic = xd->mi[0].src_mi;
+ MODE_INFO *const mic = xd->mi[0];
int this_rate, this_rate_tokenonly, s;
int64_t this_distortion, this_rd;
TX_SIZE best_tx = TX_4X4;
int64_t *sse, BLOCK_SIZE bsize,
int64_t ref_best_rd) {
MACROBLOCKD *const xd = &x->e_mbd;
- MB_MODE_INFO *const mbmi = &xd->mi[0].src_mi->mbmi;
+ MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
const TX_SIZE uv_tx_size = get_uv_tx_size(mbmi, &xd->plane[1]);
int plane;
int pnrate = 0, pnskip = 1;
if (!(cpi->sf.intra_uv_mode_mask[max_tx_size] & (1 << mode)))
continue;
- xd->mi[0].src_mi->mbmi.uv_mode = mode;
+ xd->mi[0]->mbmi.uv_mode = mode;
if (!super_block_uvrd(cpi, x, &this_rate_tokenonly,
&this_distortion, &s, &this_sse, bsize, best_rd))
}
}
- xd->mi[0].src_mi->mbmi.uv_mode = mode_selected;
+ xd->mi[0]->mbmi.uv_mode = mode_selected;
return best_rd;
}
const VP9_COMMON *cm = &cpi->common;
int64_t unused;
- x->e_mbd.mi[0].src_mi->mbmi.uv_mode = DC_PRED;
+ x->e_mbd.mi[0]->mbmi.uv_mode = DC_PRED;
vpx_memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
super_block_uvrd(cpi, x, rate_tokenonly, distortion,
skippable, &unused, bsize, INT64_MAX);
rate_uv, rate_uv_tokenonly, dist_uv, skip_uv,
bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize, max_tx_size);
}
- *mode_uv = x->e_mbd.mi[0].src_mi->mbmi.uv_mode;
+ *mode_uv = x->e_mbd.mi[0]->mbmi.uv_mode;
}
static int cost_mv_ref(const VP9_COMP *cpi, PREDICTION_MODE mode,
int_mv seg_mvs[MAX_REF_FRAMES],
int_mv *best_ref_mv[2], const int *mvjcost,
int *mvcost[2]) {
- MODE_INFO *const mic = xd->mi[0].src_mi;
+ MODE_INFO *const mic = xd->mi[0];
const MB_MODE_INFO *const mbmi = &mic->mbmi;
int thismvcost = 0;
int idx, idy;
MACROBLOCKD *xd = &x->e_mbd;
struct macroblockd_plane *const pd = &xd->plane[0];
struct macroblock_plane *const p = &x->plane[0];
- MODE_INFO *const mi = xd->mi[0].src_mi;
+ MODE_INFO *const mi = xd->mi[0];
const BLOCK_SIZE plane_bsize = get_plane_block_size(mi->mbmi.sb_type, pd);
const int width = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
const int height = 4 * num_4x4_blocks_high_lookup[plane_bsize];
}
static INLINE void mi_buf_shift(MACROBLOCK *x, int i) {
- MB_MODE_INFO *const mbmi = &x->e_mbd.mi[0].src_mi->mbmi;
+ MB_MODE_INFO *const mbmi = &x->e_mbd.mi[0]->mbmi;
struct macroblock_plane *const p = &x->plane[0];
struct macroblockd_plane *const pd = &x->e_mbd.plane[0];
static INLINE void mi_buf_restore(MACROBLOCK *x, struct buf_2d orig_src,
struct buf_2d orig_pre[2]) {
- MB_MODE_INFO *mbmi = &x->e_mbd.mi[0].src_mi->mbmi;
+ MB_MODE_INFO *mbmi = &x->e_mbd.mi[0]->mbmi;
x->plane[0].src = orig_src;
x->e_mbd.plane[0].pre[0] = orig_pre[0];
if (has_second_ref(mbmi))
const int pw = 4 * num_4x4_blocks_wide_lookup[bsize];
const int ph = 4 * num_4x4_blocks_high_lookup[bsize];
MACROBLOCKD *xd = &x->e_mbd;
- MB_MODE_INFO *mbmi = &xd->mi[0].src_mi->mbmi;
+ MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
const int refs[2] = {mbmi->ref_frame[0],
mbmi->ref_frame[1] < 0 ? 0 : mbmi->ref_frame[1]};
int_mv ref_mv[2];
int i;
BEST_SEG_INFO *bsi = bsi_buf + filter_idx;
MACROBLOCKD *xd = &x->e_mbd;
- MODE_INFO *mi = xd->mi[0].src_mi;
+ MODE_INFO *mi = xd->mi[0];
MB_MODE_INFO *mbmi = &mi->mbmi;
int mode_idx;
int k, br = 0, idx, idy;
ctx->skip = x->skip;
ctx->skippable = skippable;
ctx->best_mode_index = mode_index;
- ctx->mic = *xd->mi[0].src_mi;
+ ctx->mic = *xd->mi[0];
ctx->single_pred_diff = (int)comp_pred_diff[SINGLE_REFERENCE];
ctx->comp_pred_diff = (int)comp_pred_diff[COMPOUND_REFERENCE];
ctx->hybrid_pred_diff = (int)comp_pred_diff[REFERENCE_MODE_SELECT];
const VP9_COMMON *cm = &cpi->common;
const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, ref_frame);
MACROBLOCKD *const xd = &x->e_mbd;
- MODE_INFO *const mi = xd->mi[0].src_mi;
+ MODE_INFO *const mi = xd->mi[0];
int_mv *const candidates = mi->mbmi.ref_mvs[ref_frame];
const struct scale_factors *const sf = &cm->frame_refs[ref_frame - 1].sf;
int_mv *tmp_mv, int *rate_mv) {
MACROBLOCKD *xd = &x->e_mbd;
const VP9_COMMON *cm = &cpi->common;
- MB_MODE_INFO *mbmi = &xd->mi[0].src_mi->mbmi;
+ MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
struct buf_2d backup_yv12[MAX_MB_PLANE] = {{0, 0}};
int bestsme = INT_MAX;
int step_param;
int64_t filter_cache[]) {
VP9_COMMON *cm = &cpi->common;
MACROBLOCKD *xd = &x->e_mbd;
- MB_MODE_INFO *mbmi = &xd->mi[0].src_mi->mbmi;
+ MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
const int is_comp_pred = has_second_ref(mbmi);
const int this_mode = mbmi->mode;
int_mv *frame_mv = mode_mv[this_mode];
if (pred_filter_search) {
INTERP_FILTER af = SWITCHABLE, lf = SWITCHABLE;
if (xd->up_available)
- af = xd->mi[-xd->mi_stride].src_mi->mbmi.interp_filter;
+ af = xd->mi[-xd->mi_stride]->mbmi.interp_filter;
if (xd->left_available)
- lf = xd->mi[-1].src_mi->mbmi.interp_filter;
+ lf = xd->mi[-1]->mbmi.interp_filter;
if ((this_mode != NEWMV) || (af == lf))
best_filter = af;
return INT64_MAX;
frame_mv[refs[0]].as_int =
- xd->mi[0].src_mi->bmi[0].as_mv[0].as_int = tmp_mv.as_int;
+ xd->mi[0]->bmi[0].as_mv[0].as_int = tmp_mv.as_int;
single_newmv[refs[0]].as_int = tmp_mv.as_int;
// Estimate the rate implications of a new mv but discount this
TX_SIZE max_uv_tx_size;
x->skip_encode = 0;
ctx->skip = 0;
- xd->mi[0].src_mi->mbmi.ref_frame[0] = INTRA_FRAME;
- xd->mi[0].src_mi->mbmi.ref_frame[1] = NONE;
+ xd->mi[0]->mbmi.ref_frame[0] = INTRA_FRAME;
+ xd->mi[0]->mbmi.ref_frame[1] = NONE;
if (bsize >= BLOCK_8X8) {
if (rd_pick_intra_sby_mode(cpi, x, &rate_y, &rate_y_tokenonly,
return;
}
}
- max_uv_tx_size = get_uv_tx_size_impl(xd->mi[0].src_mi->mbmi.tx_size, bsize,
+ max_uv_tx_size = get_uv_tx_size_impl(xd->mi[0]->mbmi.tx_size, bsize,
pd[1].subsampling_x,
pd[1].subsampling_y);
rd_pick_intra_sbuv_mode(cpi, x, ctx, &rate_uv, &rate_uv_tokenonly,
}
}
- ctx->mic = *xd->mi[0].src_mi;
+ ctx->mic = *xd->mi[0];
rd_cost->rdcost = RDCOST(x->rdmult, x->rddiv, rd_cost->rate, rd_cost->dist);
}
RD_OPT *const rd_opt = &cpi->rd;
SPEED_FEATURES *const sf = &cpi->sf;
MACROBLOCKD *const xd = &x->e_mbd;
- MB_MODE_INFO *const mbmi = &xd->mi[0].src_mi->mbmi;
+ MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
const struct segmentation *const seg = &cm->seg;
PREDICTION_MODE this_mode;
MV_REFERENCE_FRAME ref_frame, second_ref_frame;
ref_mv.as_int = INVALID_MV;
if ((mi_row - 1) >= tile_info->mi_row_start) {
- ref_mv = xd->mi[-xd->mi_stride].src_mi->mbmi.mv[0];
- rf = xd->mi[-xd->mi_stride].src_mi->mbmi.ref_frame[0];
+ ref_mv = xd->mi[-xd->mi_stride]->mbmi.mv[0];
+ rf = xd->mi[-xd->mi_stride]->mbmi.ref_frame[0];
for (i = 0; i < mi_width; ++i) {
- ref_mbmi = &xd->mi[-xd->mi_stride + i].src_mi->mbmi;
+ ref_mbmi = &xd->mi[-xd->mi_stride + i]->mbmi;
const_motion &= (ref_mv.as_int == ref_mbmi->mv[0].as_int) &&
(ref_frame == ref_mbmi->ref_frame[0]);
skip_ref_frame &= (rf == ref_mbmi->ref_frame[0]);
if ((mi_col - 1) >= tile_info->mi_col_start) {
if (ref_mv.as_int == INVALID_MV)
- ref_mv = xd->mi[-1].src_mi->mbmi.mv[0];
+ ref_mv = xd->mi[-1]->mbmi.mv[0];
if (rf == NONE)
- rf = xd->mi[-1].src_mi->mbmi.ref_frame[0];
+ rf = xd->mi[-1]->mbmi.ref_frame[0];
for (i = 0; i < mi_height; ++i) {
- ref_mbmi = &xd->mi[i * xd->mi_stride - 1].src_mi->mbmi;
+ ref_mbmi = &xd->mi[i * xd->mi_stride - 1]->mbmi;
const_motion &= (ref_mv.as_int == ref_mbmi->mv[0].as_int) &&
(ref_frame == ref_mbmi->ref_frame[0]);
skip_ref_frame &= (rf == ref_mbmi->ref_frame[0]);
if (!x->skip && !x->select_tx_size) {
int has_high_freq_coeff = 0;
int plane;
- int max_plane = is_inter_block(&xd->mi[0].src_mi->mbmi)
+ int max_plane = is_inter_block(&xd->mi[0]->mbmi)
? MAX_MB_PLANE : 1;
for (plane = 0; plane < max_plane; ++plane) {
x->plane[plane].eobs = ctx->eobs_pbuf[plane][1];
int64_t best_rd_so_far) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
- MB_MODE_INFO *const mbmi = &xd->mi[0].src_mi->mbmi;
+ MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
unsigned char segment_id = mbmi->segment_id;
const int comp_pred = 0;
int i;
RD_OPT *const rd_opt = &cpi->rd;
SPEED_FEATURES *const sf = &cpi->sf;
MACROBLOCKD *const xd = &x->e_mbd;
- MB_MODE_INFO *const mbmi = &xd->mi[0].src_mi->mbmi;
+ MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
const struct segmentation *const seg = &cm->seg;
MV_REFERENCE_FRAME ref_frame, second_ref_frame;
unsigned char segment_id = mbmi->segment_id;
tmp_best_skippable = skippable;
tmp_best_mbmode = *mbmi;
for (i = 0; i < 4; i++) {
- tmp_best_bmodes[i] = xd->mi[0].src_mi->bmi[i];
+ tmp_best_bmodes[i] = xd->mi[0]->bmi[i];
x->zcoeff_blk[TX_4X4][i] = !x->plane[0].eobs[i];
}
pred_exists = 1;
skippable = tmp_best_skippable;
*mbmi = tmp_best_mbmode;
for (i = 0; i < 4; i++)
- xd->mi[0].src_mi->bmi[i] = tmp_best_bmodes[i];
+ xd->mi[0]->bmi[i] = tmp_best_bmodes[i];
}
rate2 += rate;
sizeof(uint8_t) * ctx->num_4x4_blk);
for (i = 0; i < 4; i++)
- best_bmodes[i] = xd->mi[0].src_mi->bmi[i];
+ best_bmodes[i] = xd->mi[0]->bmi[i];
// TODO(debargha): enhance this test with a better distortion prediction
// based on qp, activity mask and history
x->skip |= best_skip2;
if (!is_inter_block(&best_mbmode)) {
for (i = 0; i < 4; i++)
- xd->mi[0].src_mi->bmi[i].as_mode = best_bmodes[i].as_mode;
+ xd->mi[0]->bmi[i].as_mode = best_bmodes[i].as_mode;
} else {
for (i = 0; i < 4; ++i)
- vpx_memcpy(&xd->mi[0].src_mi->bmi[i], &best_bmodes[i],
+ vpx_memcpy(&xd->mi[0]->bmi[i], &best_bmodes[i],
sizeof(b_mode_info));
- mbmi->mv[0].as_int = xd->mi[0].src_mi->bmi[3].as_mv[0].as_int;
- mbmi->mv[1].as_int = xd->mi[0].src_mi->bmi[3].as_mv[1].as_int;
+ mbmi->mv[0].as_int = xd->mi[0]->bmi[3].as_mv[0].as_int;
+ mbmi->mv[1].as_int = xd->mi[0]->bmi[3].as_mv[1].as_int;
}
for (i = 0; i < REFERENCE_MODES; ++i) {
}
static void count_segs(const VP9_COMMON *cm, MACROBLOCKD *xd,
- const TileInfo *tile, MODE_INFO *mi,
+ const TileInfo *tile, MODE_INFO **mi,
int *no_pred_segcounts,
int (*temporal_predictor_count)[2],
int *t_unpred_seg_counts,
return;
xd->mi = mi;
- segment_id = xd->mi[0].src_mi->mbmi.segment_id;
+ segment_id = xd->mi[0]->mbmi.segment_id;
set_mi_row_col(xd, tile, mi_row, bh, mi_col, bw, cm->mi_rows, cm->mi_cols);
// Temporal prediction not allowed on key frames
if (cm->frame_type != KEY_FRAME) {
- const BLOCK_SIZE bsize = xd->mi[0].src_mi->mbmi.sb_type;
+ const BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
// Test to see if the segment id matches the predicted value.
const int pred_segment_id = vp9_get_segment_id(cm, cm->last_frame_seg_map,
bsize, mi_row, mi_col);
// Store the prediction status for this mb and update counts
// as appropriate
- xd->mi[0].src_mi->mbmi.seg_id_predicted = pred_flag;
+ xd->mi[0]->mbmi.seg_id_predicted = pred_flag;
temporal_predictor_count[pred_context][pred_flag]++;
// Update the "unpredicted" segment count
}
static void count_segs_sb(const VP9_COMMON *cm, MACROBLOCKD *xd,
- const TileInfo *tile, MODE_INFO *mi,
+ const TileInfo *tile, MODE_INFO **mi,
int *no_pred_segcounts,
int (*temporal_predictor_count)[2],
int *t_unpred_seg_counts,
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
return;
- bw = num_8x8_blocks_wide_lookup[mi[0].src_mi->mbmi.sb_type];
- bh = num_8x8_blocks_high_lookup[mi[0].src_mi->mbmi.sb_type];
+ bw = num_8x8_blocks_wide_lookup[mi[0]->mbmi.sb_type];
+ bh = num_8x8_blocks_high_lookup[mi[0]->mbmi.sb_type];
if (bw == bs && bh == bs) {
count_segs(cm, xd, tile, mi, no_pred_segcounts, temporal_predictor_count,
// predicts this one
for (tile_col = 0; tile_col < 1 << cm->log2_tile_cols; tile_col++) {
TileInfo tile;
- MODE_INFO *mi_ptr;
+ MODE_INFO **mi_ptr;
vp9_tile_init(&tile, cm, 0, tile_col);
- mi_ptr = cm->mi + tile.mi_col_start;
+ mi_ptr = cm->mi_grid_visible + tile.mi_col_start;
for (mi_row = 0; mi_row < cm->mi_rows;
mi_row += 8, mi_ptr += 8 * cm->mi_stride) {
- MODE_INFO *mi = mi_ptr;
+ MODE_INFO **mi = mi_ptr;
for (mi_col = tile.mi_col_start; mi_col < tile.mi_col_end;
mi_col += 8, mi += 8)
count_segs_sb(cm, xd, &tile, mi, no_pred_segcounts,
const int which_mv = 0;
const MV mv = { mv_row, mv_col };
const InterpKernel *const kernel =
- vp9_get_interp_kernel(xd->mi[0].src_mi->mbmi.interp_filter);
+ vp9_get_interp_kernel(xd->mi[0]->mbmi.interp_filter);
enum mv_precision mv_precision_uv;
int uv_stride;
MV best_ref_mv1 = {0, 0};
MV best_ref_mv1_full; /* full-pixel value of best_ref_mv1 */
- MV *ref_mv = &x->e_mbd.mi[0].src_mi->bmi[0].as_mv[0].as_mv;
+ MV *ref_mv = &x->e_mbd.mi[0]->bmi[0].as_mv[0].as_mv;
// Save input state
struct buf_2d src = x->plane[0].src;
if (frames[frame] == NULL)
continue;
- mbd->mi[0].src_mi->bmi[0].as_mv[0].as_mv.row = 0;
- mbd->mi[0].src_mi->bmi[0].as_mv[0].as_mv.col = 0;
+ mbd->mi[0]->bmi[0].as_mv[0].as_mv.row = 0;
+ mbd->mi[0]->bmi[0].as_mv[0].as_mv.col = 0;
if (frame == alt_ref_index) {
filter_weight = 2;
frames[frame]->v_buffer + mb_uv_offset,
frames[frame]->y_stride,
mb_uv_width, mb_uv_height,
- mbd->mi[0].src_mi->bmi[0].as_mv[0].as_mv.row,
- mbd->mi[0].src_mi->bmi[0].as_mv[0].as_mv.col,
+ mbd->mi[0]->bmi[0].as_mv[0].as_mv.row,
+ mbd->mi[0]->bmi[0].as_mv[0].as_mv.col,
predictor, scale,
mb_col * 16, mb_row * 16);
}
}
cm->mi = cm->mip + cm->mi_stride + 1;
- xd->mi = cm->mi;
- xd->mi[0].src_mi = &xd->mi[0];
+ xd->mi = cm->mi_grid_visible;
+ xd->mi[0] = cm->mi;
} else {
// ARF is produced at the native frame size and resized when coded.
#if CONFIG_VP9_HIGHBITDEPTH
uint8_t token_cache[32 * 32];
struct macroblock_plane *p = &x->plane[plane];
struct macroblockd_plane *pd = &xd->plane[plane];
- MB_MODE_INFO *mbmi = &xd->mi[0].src_mi->mbmi;
+ MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
int pt; /* near block/prev token context index */
int c;
TOKENEXTRA *t = *tp; /* store tokens starting here */
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &td->mb;
MACROBLOCKD *const xd = &x->e_mbd;
- MB_MODE_INFO *const mbmi = &xd->mi[0].src_mi->mbmi;
+ MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
const int ctx = vp9_get_skip_context(xd);
const int skip_inc = !vp9_segfeature_active(&cm->seg, mbmi->segment_id,
SEG_LVL_SKIP);