int enc_debug = 0;
#endif
-static INLINE uint8_t *get_sb_index(MACROBLOCKD *xd, BLOCK_SIZE subsize) {
+static INLINE uint8_t *get_sb_index(MACROBLOCK *x, BLOCK_SIZE subsize) {
switch (subsize) {
case BLOCK_64X64:
case BLOCK_64X32:
case BLOCK_32X64:
case BLOCK_32X32:
- return &xd->sb_index;
+ return &x->sb_index;
case BLOCK_32X16:
case BLOCK_16X32:
case BLOCK_16X16:
- return &xd->mb_index;
+ return &x->mb_index;
case BLOCK_16X8:
case BLOCK_8X16:
case BLOCK_8X8:
- return &xd->b_index;
+ return &x->b_index;
case BLOCK_8X4:
case BLOCK_4X8:
case BLOCK_4X4:
- return &xd->ab_index;
+ return &x->ab_index;
default:
assert(0);
return NULL;
if (bsize < BLOCK_8X8) {
// When ab_index = 0 all sub-blocks are handled, so for ab_index != 0
// there is nothing to be done.
- if (xd->ab_index != 0) {
+ if (x->ab_index != 0) {
*totalrate = 0;
*totaldist = 0;
return;
}
static BLOCK_SIZE *get_sb_partitioning(MACROBLOCK *x, BLOCK_SIZE bsize) {
- MACROBLOCKD *const xd = &x->e_mbd;
switch (bsize) {
case BLOCK_64X64:
return &x->sb64_partitioning;
case BLOCK_32X32:
- return &x->sb_partitioning[xd->sb_index];
+ return &x->sb_partitioning[x->sb_index];
case BLOCK_16X16:
- return &x->mb_partitioning[xd->sb_index][xd->mb_index];
+ return &x->mb_partitioning[x->sb_index][x->mb_index];
case BLOCK_8X8:
- return &x->b_partitioning[xd->sb_index][xd->mb_index][xd->b_index];
+ return &x->b_partitioning[x->sb_index][x->mb_index][x->b_index];
default:
assert(0);
return NULL;
static void encode_b(VP9_COMP *cpi, const TileInfo *const tile,
TOKENEXTRA **tp, int mi_row, int mi_col,
int output_enabled, BLOCK_SIZE bsize, int sub_index) {
- VP9_COMMON * const cm = &cpi->common;
- MACROBLOCK * const x = &cpi->mb;
- MACROBLOCKD * const xd = &x->e_mbd;
+ VP9_COMMON *const cm = &cpi->common;
+ MACROBLOCK *const x = &cpi->mb;
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
return;
if (sub_index != -1)
- *get_sb_index(xd, bsize) = sub_index;
+ *get_sb_index(x, bsize) = sub_index;
if (bsize < BLOCK_8X8) {
// When ab_index = 0 all sub-blocks are handled, so for ab_index != 0
// there is nothing to be done.
- if (xd->ab_index > 0)
+ if (x->ab_index > 0)
return;
}
set_offsets(cpi, tile, mi_row, mi_col, bsize);
static void encode_sb(VP9_COMP *cpi, const TileInfo *const tile,
TOKENEXTRA **tp, int mi_row, int mi_col,
int output_enabled, BLOCK_SIZE bsize) {
- VP9_COMMON * const cm = &cpi->common;
- MACROBLOCK * const x = &cpi->mb;
- MACROBLOCKD * const xd = &x->e_mbd;
+ VP9_COMMON *const cm = &cpi->common;
+ MACROBLOCK *const x = &cpi->mb;
BLOCK_SIZE c1 = BLOCK_8X8;
const int bsl = b_width_log2(bsize), bs = (1 << bsl) / 4;
int pl = 0;
for (i = 0; i < 4; i++) {
const int x_idx = i & 1, y_idx = i >> 1;
- *get_sb_index(xd, subsize) = i;
+ *get_sb_index(x, subsize) = i;
encode_sb(cpi, tile, tp, mi_row + y_idx * bs, mi_col + x_idx * bs,
output_enabled, subsize);
}
TOKENEXTRA **tp, int mi_row, int mi_col,
BLOCK_SIZE bsize, int *rate, int64_t *dist,
int do_recon) {
- VP9_COMMON * const cm = &cpi->common;
- MACROBLOCK * const x = &cpi->mb;
- MACROBLOCKD *xd = &cpi->mb.e_mbd;
+ VP9_COMMON *const cm = &cpi->common;
+ MACROBLOCK *const x = &cpi->mb;
const int mis = cm->mode_info_stride;
int bsl = b_width_log2(bsize);
const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
if (bsize < BLOCK_8X8) {
// When ab_index = 0 all sub-blocks are handled, so for ab_index != 0
// there is nothing to be done.
- if (xd->ab_index != 0) {
+ if (x->ab_index != 0) {
*rate = 0;
*dist = 0;
return;
bsize, get_block_context(x, bsize), INT64_MAX);
break;
case PARTITION_HORZ:
- *get_sb_index(xd, subsize) = 0;
+ *get_sb_index(x, subsize) = 0;
pick_sb_modes(cpi, tile, mi_row, mi_col, &last_part_rate, &last_part_dist,
subsize, get_block_context(x, subsize), INT64_MAX);
if (last_part_rate != INT_MAX &&
int64_t dt = 0;
update_state(cpi, get_block_context(x, subsize), subsize, 0);
encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize);
- *get_sb_index(xd, subsize) = 1;
+ *get_sb_index(x, subsize) = 1;
pick_sb_modes(cpi, tile, mi_row + (ms >> 1), mi_col, &rt, &dt, subsize,
get_block_context(x, subsize), INT64_MAX);
if (rt == INT_MAX || dt == INT_MAX) {
}
break;
case PARTITION_VERT:
- *get_sb_index(xd, subsize) = 0;
+ *get_sb_index(x, subsize) = 0;
pick_sb_modes(cpi, tile, mi_row, mi_col, &last_part_rate, &last_part_dist,
subsize, get_block_context(x, subsize), INT64_MAX);
if (last_part_rate != INT_MAX &&
int64_t dt = 0;
update_state(cpi, get_block_context(x, subsize), subsize, 0);
encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize);
- *get_sb_index(xd, subsize) = 1;
+ *get_sb_index(x, subsize) = 1;
pick_sb_modes(cpi, tile, mi_row, mi_col + (ms >> 1), &rt, &dt, subsize,
get_block_context(x, subsize), INT64_MAX);
if (rt == INT_MAX || dt == INT_MAX) {
if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols))
continue;
- *get_sb_index(xd, subsize) = i;
+ *get_sb_index(x, subsize) = i;
rd_use_partition(cpi, tile, mi_8x8 + jj * bss * mis + ii * bss, tp,
mi_row + y_idx, mi_col + x_idx, subsize, &rt, &dt,
ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
PARTITION_CONTEXT sl[8], sa[8];
- if ((mi_row + y_idx >= cm->mi_rows)
- || (mi_col + x_idx >= cm->mi_cols))
+ if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols))
continue;
- *get_sb_index(xd, split_subsize) = i;
+ *get_sb_index(x, split_subsize) = i;
*get_sb_partitioning(x, bsize) = split_subsize;
*get_sb_partitioning(x, split_subsize) = split_subsize;
static void compute_fast_motion_search_level(VP9_COMP *cpi, BLOCK_SIZE bsize) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->mb;
- MACROBLOCKD *const xd = &x->e_mbd;
// Only use 8x8 result for non HD videos.
// int use_8x8 = (MIN(cpi->common.width, cpi->common.height) < 720) ? 1 : 0;
PICK_MODE_CONTEXT *block_context = NULL;
if (bsize == BLOCK_16X16) {
- block_context = x->sb8x8_context[xd->sb_index][xd->mb_index];
+ block_context = x->sb8x8_context[x->sb_index][x->mb_index];
} else if (bsize == BLOCK_32X32) {
- block_context = x->mb_context[xd->sb_index];
+ block_context = x->mb_context[x->sb_index];
} else if (bsize == BLOCK_64X64) {
block_context = x->sb32_context;
}
TOKENEXTRA **tp, int mi_row,
int mi_col, BLOCK_SIZE bsize, int *rate,
int64_t *dist, int do_recon, int64_t best_rd) {
- VP9_COMMON * const cm = &cpi->common;
- MACROBLOCK * const x = &cpi->mb;
- MACROBLOCKD * const xd = &x->e_mbd;
+ VP9_COMMON *const cm = &cpi->common;
+ MACROBLOCK *const x = &cpi->mb;
const int ms = num_8x8_blocks_wide_lookup[bsize] / 2;
ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
PARTITION_CONTEXT sl[8], sa[8];
if (bsize < BLOCK_8X8) {
// When ab_index = 0 all sub-blocks are handled, so for ab_index != 0
// there is nothing to be done.
- if (xd->ab_index != 0) {
+ if (x->ab_index != 0) {
*rate = 0;
*dist = 0;
return;
if (mi_row + y_idx >= cm->mi_rows || mi_col + x_idx >= cm->mi_cols)
continue;
- *get_sb_index(xd, subsize) = i;
+ *get_sb_index(x, subsize) = i;
if (cpi->sf.adaptive_motion_search)
load_pred_mv(x, get_block_context(x, bsize));
rd_pick_partition(cpi, tile, tp, mi_row + y_idx, mi_col + x_idx, subsize,
// PARTITION_HORZ
if (partition_horz_allowed && do_rect) {
subsize = get_subsize(bsize, PARTITION_HORZ);
- *get_sb_index(xd, subsize) = 0;
+ *get_sb_index(x, subsize) = 0;
if (cpi->sf.adaptive_motion_search)
load_pred_mv(x, get_block_context(x, bsize));
pick_sb_modes(cpi, tile, mi_row, mi_col, &sum_rate, &sum_dist, subsize,
update_state(cpi, get_block_context(x, subsize), subsize, 0);
encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize);
- *get_sb_index(xd, subsize) = 1;
+ *get_sb_index(x, subsize) = 1;
if (cpi->sf.adaptive_motion_search)
load_pred_mv(x, get_block_context(x, bsize));
pick_sb_modes(cpi, tile, mi_row + ms, mi_col, &this_rate,
if (partition_vert_allowed && do_rect) {
subsize = get_subsize(bsize, PARTITION_VERT);
- *get_sb_index(xd, subsize) = 0;
+ *get_sb_index(x, subsize) = 0;
if (cpi->sf.adaptive_motion_search)
load_pred_mv(x, get_block_context(x, bsize));
pick_sb_modes(cpi, tile, mi_row, mi_col, &sum_rate, &sum_dist, subsize,
update_state(cpi, get_block_context(x, subsize), subsize, 0);
encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize);
- *get_sb_index(xd, subsize) = 1;
+ *get_sb_index(x, subsize) = 1;
if (cpi->sf.adaptive_motion_search)
load_pred_mv(x, get_block_context(x, bsize));
pick_sb_modes(cpi, tile, mi_row, mi_col + ms, &this_rate,
static void init_pick_mode_context(VP9_COMP *cpi) {
int i;
- MACROBLOCK *x = &cpi->mb;
- MACROBLOCKD *xd = &x->e_mbd;
- VP9_COMMON *cm = &cpi->common;
+ VP9_COMMON *const cm = &cpi->common;
+ MACROBLOCK *const x = &cpi->mb;
+
for (i = 0; i < BLOCK_SIZES; ++i) {
const int num_4x4_w = num_4x4_blocks_wide_lookup[i];
const int num_4x4_h = num_4x4_blocks_high_lookup[i];
const int num_4x4_blk = MAX(4, num_4x4_w * num_4x4_h);
if (i < BLOCK_16X16) {
- for (xd->sb_index = 0; xd->sb_index < 4; ++xd->sb_index) {
- for (xd->mb_index = 0; xd->mb_index < 4; ++xd->mb_index) {
- for (xd->b_index = 0; xd->b_index < 16 / num_4x4_blk; ++xd->b_index) {
+ for (x->sb_index = 0; x->sb_index < 4; ++x->sb_index) {
+ for (x->mb_index = 0; x->mb_index < 4; ++x->mb_index) {
+ for (x->b_index = 0; x->b_index < 16 / num_4x4_blk; ++x->b_index) {
PICK_MODE_CONTEXT *ctx = get_block_context(x, i);
ctx->num_4x4_blk = num_4x4_blk;
CHECK_MEM_ERROR(cm, ctx->zcoeff_blk,
}
}
} else if (i < BLOCK_32X32) {
- for (xd->sb_index = 0; xd->sb_index < 4; ++xd->sb_index) {
- for (xd->mb_index = 0; xd->mb_index < 64 / num_4x4_blk;
- ++xd->mb_index) {
+ for (x->sb_index = 0; x->sb_index < 4; ++x->sb_index) {
+ for (x->mb_index = 0; x->mb_index < 64 / num_4x4_blk; ++x->mb_index) {
PICK_MODE_CONTEXT *ctx = get_block_context(x, i);
ctx->num_4x4_blk = num_4x4_blk;
CHECK_MEM_ERROR(cm, ctx->zcoeff_blk,
}
}
} else if (i < BLOCK_64X64) {
- for (xd->sb_index = 0; xd->sb_index < 256 / num_4x4_blk; ++xd->sb_index) {
+ for (x->sb_index = 0; x->sb_index < 256 / num_4x4_blk; ++x->sb_index) {
PICK_MODE_CONTEXT *ctx = get_block_context(x, i);
ctx->num_4x4_blk = num_4x4_blk;
CHECK_MEM_ERROR(cm, ctx->zcoeff_blk,
static void free_pick_mode_context(MACROBLOCK *x) {
int i;
- MACROBLOCKD *xd = &x->e_mbd;
for (i = 0; i < BLOCK_SIZES; ++i) {
const int num_4x4_w = num_4x4_blocks_wide_lookup[i];
const int num_4x4_h = num_4x4_blocks_high_lookup[i];
const int num_4x4_blk = MAX(4, num_4x4_w * num_4x4_h);
if (i < BLOCK_16X16) {
- for (xd->sb_index = 0; xd->sb_index < 4; ++xd->sb_index) {
- for (xd->mb_index = 0; xd->mb_index < 4; ++xd->mb_index) {
- for (xd->b_index = 0; xd->b_index < 16 / num_4x4_blk; ++xd->b_index) {
+ for (x->sb_index = 0; x->sb_index < 4; ++x->sb_index) {
+ for (x->mb_index = 0; x->mb_index < 4; ++x->mb_index) {
+ for (x->b_index = 0; x->b_index < 16 / num_4x4_blk; ++x->b_index) {
PICK_MODE_CONTEXT *ctx = get_block_context(x, i);
vpx_free(ctx->zcoeff_blk);
ctx->zcoeff_blk = 0;
}
}
} else if (i < BLOCK_32X32) {
- for (xd->sb_index = 0; xd->sb_index < 4; ++xd->sb_index) {
- for (xd->mb_index = 0; xd->mb_index < 64 / num_4x4_blk;
- ++xd->mb_index) {
+ for (x->sb_index = 0; x->sb_index < 4; ++x->sb_index) {
+ for (x->mb_index = 0; x->mb_index < 64 / num_4x4_blk; ++x->mb_index) {
PICK_MODE_CONTEXT *ctx = get_block_context(x, i);
vpx_free(ctx->zcoeff_blk);
ctx->zcoeff_blk = 0;
}
}
} else if (i < BLOCK_64X64) {
- for (xd->sb_index = 0; xd->sb_index < 256 / num_4x4_blk; ++xd->sb_index) {
+ for (x->sb_index = 0; x->sb_index < 256 / num_4x4_blk; ++x->sb_index) {
PICK_MODE_CONTEXT *ctx = get_block_context(x, i);
vpx_free(ctx->zcoeff_blk);
ctx->zcoeff_blk = 0;