// Note how often each mode chosen as best
cpi->mode_chosen_counts[mb_mode_index]++;
if (is_inter_block(mbmi)
- && (mbmi->sb_type < BLOCK_SIZE_SB8X8 || mbmi->mode == NEWMV)) {
+ && (mbmi->sb_type < BLOCK_8X8 || mbmi->mode == NEWMV)) {
int_mv best_mv, best_second_mv;
const MV_REFERENCE_FRAME rf1 = mbmi->ref_frame[0];
const MV_REFERENCE_FRAME rf2 = mbmi->ref_frame[1];
vp9_update_nmv_count(cpi, x, &best_mv, &best_second_mv);
}
- if (bsize > BLOCK_SIZE_SB8X8 && mbmi->mode == NEWMV) {
+ if (bsize > BLOCK_8X8 && mbmi->mode == NEWMV) {
int i, j;
for (j = 0; j < mi_height; ++j)
for (i = 0; i < mi_width; ++i)
x->rd_search = 1;
- if (bsize < BLOCK_SIZE_SB8X8) {
+ if (bsize < BLOCK_8X8) {
// When ab_index = 0 all sub-blocks are handled, so for ab_index != 0
// there is nothing to be done.
if (xd->ab_index != 0)
if (sub_index != -1)
*(get_sb_index(xd, bsize)) = sub_index;
- if (bsize < BLOCK_SIZE_SB8X8) {
+ if (bsize < BLOCK_8X8) {
// When ab_index = 0 all sub-blocks are handled, so for ab_index != 0
// there is nothing to be done.
if (xd->ab_index > 0)
VP9_COMMON * const cm = &cpi->common;
MACROBLOCK * const x = &cpi->mb;
MACROBLOCKD * const xd = &x->e_mbd;
- BLOCK_SIZE_TYPE c1 = BLOCK_SIZE_SB8X8;
+ BLOCK_SIZE_TYPE c1 = BLOCK_8X8;
const int bsl = b_width_log2(bsize), bs = (1 << bsl) / 4;
int UNINITIALIZED_IS_SAFE(pl);
PARTITION_TYPE partition;
return;
c1 = BLOCK_4X4;
- if (bsize >= BLOCK_SIZE_SB8X8) {
+ if (bsize >= BLOCK_8X8) {
set_partition_seg_context(cm, xd, mi_row, mi_col);
pl = partition_plane_context(xd, bsize);
c1 = *(get_sb_partitioning(x, bsize));
switch (partition) {
case PARTITION_NONE:
- if (output_enabled && bsize >= BLOCK_SIZE_SB8X8)
+ if (output_enabled && bsize >= BLOCK_8X8)
cpi->partition_count[pl][PARTITION_NONE]++;
encode_b(cpi, tp, mi_row, mi_col, output_enabled, c1, -1);
break;
break;
}
- if (partition != PARTITION_SPLIT || bsize == BLOCK_SIZE_SB8X8) {
+ if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8) {
set_partition_seg_context(cm, xd, mi_row, mi_col);
update_partition_context(xd, c1, bsize);
}
int pixels_wide = 64, pixels_high = 64;
vp9_zero(vt);
- set_offsets(cpi, mi_row, mi_col, BLOCK_SIZE_SB64X64);
+ set_offsets(cpi, mi_row, mi_col, BLOCK_64X64);
if (xd->mb_to_right_edge < 0)
pixels_wide += (xd->mb_to_right_edge >> 3);
setup_pre_planes(xd, 1, second_ref_fb, mi_row, mi_col,
&xd->scale_factor[1]);
xd->mode_info_context->mbmi.ref_frame[0] = LAST_FRAME;
- xd->mode_info_context->mbmi.sb_type = BLOCK_SIZE_SB64X64;
+ xd->mode_info_context->mbmi.sb_type = BLOCK_64X64;
vp9_find_best_ref_mvs(xd, m->mbmi.ref_mvs[m->mbmi.ref_frame[0]],
&nearest_mv, &near_mv);
xd->mode_info_context->mbmi.mv[0] = nearest_mv;
- vp9_build_inter_predictors_sby(xd, mi_row, mi_col, BLOCK_SIZE_SB64X64);
+ vp9_build_inter_predictors_sby(xd, mi_row, mi_col, BLOCK_64X64);
d = xd->plane[0].dst.buf;
dp = xd->plane[0].dst.stride;
}
subsize = get_subsize(bsize, partition);
- if (bsize < BLOCK_SIZE_SB8X8) {
+ if (bsize < BLOCK_8X8) {
// When ab_index = 0 all sub-blocks are handled, so for ab_index != 0
// there is nothing to be done.
if (xd->ab_index != 0) {
if (cpi->sf.adjust_partitioning_from_last_frame) {
// Check if any of the sub blocks are further split.
- if (partition == PARTITION_SPLIT && subsize > BLOCK_SIZE_SB8X8) {
+ if (partition == PARTITION_SPLIT && subsize > BLOCK_8X8) {
sub_subsize = get_subsize(subsize, PARTITION_SPLIT);
splits_below = 1;
for (i = 0; i < 4; i++) {
pick_sb_modes(cpi, mi_row, mi_col, &last_part_rate, &last_part_dist,
subsize, get_block_context(x, subsize), INT64_MAX);
if (last_part_rate != INT_MAX &&
- bsize >= BLOCK_SIZE_SB8X8 && mi_row + (mh >> 1) < cm->mi_rows) {
+ bsize >= BLOCK_8X8 && mi_row + (mh >> 1) < cm->mi_rows) {
int rt = 0;
int64_t dt = 0;
update_state(cpi, get_block_context(x, subsize), subsize, 0);
pick_sb_modes(cpi, mi_row, mi_col, &last_part_rate, &last_part_dist,
subsize, get_block_context(x, subsize), INT64_MAX);
if (last_part_rate != INT_MAX &&
- bsize >= BLOCK_SIZE_SB8X8 && mi_col + (ms >> 1) < cm->mi_cols) {
+ bsize >= BLOCK_8X8 && mi_col + (ms >> 1) < cm->mi_cols) {
int rt = 0;
int64_t dt = 0;
update_state(cpi, get_block_context(x, subsize), subsize, 0);
last_part_rate += x->partition_cost[pl][partition];
if (cpi->sf.adjust_partitioning_from_last_frame
- && partition != PARTITION_SPLIT && bsize > BLOCK_SIZE_SB8X8
+ && partition != PARTITION_SPLIT && bsize > BLOCK_8X8
&& (mi_row + ms < cm->mi_rows || mi_row + (ms >> 1) == cm->mi_rows)
&& (mi_col + ms < cm->mi_cols || mi_col + (ms >> 1) == cm->mi_cols)) {
BLOCK_SIZE_TYPE split_subsize = get_subsize(bsize, PARTITION_SPLIT);
if (RDCOST(x->rdmult, x->rddiv, last_part_rate, last_part_dist)
< RDCOST(x->rdmult, x->rddiv, chosen_rate, chosen_dist)) {
m->mbmi.sb_type = bsize;
- if (bsize >= BLOCK_SIZE_SB8X8)
+ if (bsize >= BLOCK_8X8)
*(get_sb_partitioning(x, bsize)) = subsize;
chosen_rate = last_part_rate;
chosen_dist = last_part_dist;
// If none was better set the partitioning to that...
if (RDCOST(x->rdmult, x->rddiv, chosen_rate, chosen_dist)
> RDCOST(x->rdmult, x->rddiv, none_rate, none_dist)) {
- if (bsize >= BLOCK_SIZE_SB8X8)
+ if (bsize >= BLOCK_8X8)
*(get_sb_partitioning(x, bsize)) = bsize;
chosen_rate = none_rate;
chosen_dist = none_dist;
// We must have chosen a partitioning and encoding or we'll fail later on.
// No other opportunities for success.
- if ( bsize == BLOCK_SIZE_SB64X64)
+ if ( bsize == BLOCK_64X64)
assert(chosen_rate < INT_MAX && chosen_dist < INT_MAX);
if (do_recon)
- encode_sb(cpi, tp, mi_row, mi_col, bsize == BLOCK_SIZE_SB64X64, bsize);
+ encode_sb(cpi, tp, mi_row, mi_col, bsize == BLOCK_64X64, bsize);
*rate = chosen_rate;
*dist = chosen_dist;
(void) *tp_orig;
- if (bsize < BLOCK_SIZE_SB8X8) {
+ if (bsize < BLOCK_8X8) {
// When ab_index = 0 all sub-blocks are handled, so for ab_index != 0
// there is nothing to be done.
if (xd->ab_index != 0) {
// PARTITION_SPLIT
if (!cpi->sf.auto_min_max_partition_size ||
bsize >= cpi->sf.min_partition_size) {
- if (bsize > BLOCK_SIZE_SB8X8) {
+ if (bsize > BLOCK_8X8) {
int r4 = 0;
int64_t d4 = 0, sum_rd = 0;
subsize = get_subsize(bsize, PARTITION_SPLIT);
block_context = x->sb8x8_context[xd->sb_index][xd->mb_index];
} else if (bsize == BLOCK_32X32) {
block_context = x->mb_context[xd->sb_index];
- } else if (bsize == BLOCK_SIZE_SB64X64) {
+ } else if (bsize == BLOCK_64X64) {
block_context = x->sb32_context;
}
int64_t d;
pick_sb_modes(cpi, mi_row, mi_col, &r, &d, bsize,
get_block_context(x, bsize), best_rd);
- if (r != INT_MAX && bsize >= BLOCK_SIZE_SB8X8) {
+ if (r != INT_MAX && bsize >= BLOCK_8X8) {
set_partition_seg_context(cm, xd, mi_row, mi_col);
pl = partition_plane_context(xd, bsize);
r += x->partition_cost[pl][PARTITION_NONE];
}
if (r != INT_MAX &&
- (bsize == BLOCK_SIZE_SB8X8 ||
+ (bsize == BLOCK_8X8 ||
RDCOST(x->rdmult, x->rddiv, r, d) <
RDCOST(x->rdmult, x->rddiv, srate, sdist))) {
best_rd = MIN(best_rd, RDCOST(x->rdmult, x->rddiv, r, d));
srate = r;
sdist = d;
larger_is_better = 1;
- if (bsize >= BLOCK_SIZE_SB8X8)
+ if (bsize >= BLOCK_8X8)
*(get_sb_partitioning(x, bsize)) = bsize;
}
}
- if (bsize == BLOCK_SIZE_SB8X8) {
+ if (bsize == BLOCK_8X8) {
int r4 = 0;
int64_t d4 = 0, sum_rd = 0;
subsize = get_subsize(bsize, PARTITION_SPLIT);
if (!cpi->sf.use_square_partition_only &&
(!cpi->sf.less_rectangular_check ||!larger_is_better)) {
// PARTITION_HORZ
- if (bsize >= BLOCK_SIZE_SB8X8 && mi_col + (ms >> 1) < cm->mi_cols) {
+ if (bsize >= BLOCK_8X8 && mi_col + (ms >> 1) < cm->mi_cols) {
int r2, r = 0;
int64_t d2, d = 0, h_rd;
subsize = get_subsize(bsize, PARTITION_HORZ);
}
// PARTITION_VERT
- if (bsize >= BLOCK_SIZE_SB8X8 && mi_row + (ms >> 1) < cm->mi_rows) {
+ if (bsize >= BLOCK_8X8 && mi_row + (ms >> 1) < cm->mi_rows) {
int r2;
int64_t d2, v_rd;
subsize = get_subsize(bsize, PARTITION_VERT);
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
if (srate < INT_MAX && sdist < INT_MAX && do_recon)
- encode_sb(cpi, tp, mi_row, mi_col, bsize == BLOCK_SIZE_SB64X64, bsize);
+ encode_sb(cpi, tp, mi_row, mi_col, bsize == BLOCK_64X64, bsize);
- if (bsize == BLOCK_SIZE_SB64X64) {
+ if (bsize == BLOCK_64X64) {
assert(tp_orig < *tp);
assert(srate < INT_MAX);
assert(sdist < INT_MAX);
VP9_COMMON * const cm = &cpi->common;
MACROBLOCK * const x = &cpi->mb;
MACROBLOCKD * const xd = &x->e_mbd;
- int bsl = b_width_log2(BLOCK_SIZE_SB64X64), bs = 1 << bsl;
+ int bsl = b_width_log2(BLOCK_64X64), bs = 1 << bsl;
int ms = bs / 2;
ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
PARTITION_CONTEXT sl[8], sa[8];
int r;
int64_t d;
- save_context(cpi, mi_row, mi_col, a, l, sa, sl, BLOCK_SIZE_SB64X64);
+ save_context(cpi, mi_row, mi_col, a, l, sa, sl, BLOCK_64X64);
// Default is non mask (all reference frames allowed.
cpi->ref_frame_mask = 0;
if ((mi_row + (ms >> 1) < cm->mi_rows) &&
(mi_col + (ms >> 1) < cm->mi_cols)) {
cpi->set_ref_frame_mask = 1;
- pick_sb_modes(cpi, mi_row, mi_col, &r, &d, BLOCK_SIZE_SB64X64,
- get_block_context(x, BLOCK_SIZE_SB64X64), INT64_MAX);
+ pick_sb_modes(cpi, mi_row, mi_col, &r, &d, BLOCK_64X64,
+ get_block_context(x, BLOCK_64X64), INT64_MAX);
set_partition_seg_context(cm, xd, mi_row, mi_col);
- pl = partition_plane_context(xd, BLOCK_SIZE_SB64X64);
+ pl = partition_plane_context(xd, BLOCK_64X64);
r += x->partition_cost[pl][PARTITION_NONE];
- *(get_sb_partitioning(x, BLOCK_SIZE_SB64X64)) = BLOCK_SIZE_SB64X64;
+ *(get_sb_partitioning(x, BLOCK_64X64)) = BLOCK_64X64;
cpi->set_ref_frame_mask = 0;
}
- restore_context(cpi, mi_row, mi_col, a, l, sa, sl, BLOCK_SIZE_SB64X64);
+ restore_context(cpi, mi_row, mi_col, a, l, sa, sl, BLOCK_64X64);
}
static void encode_sb_row(VP9_COMP *cpi, int mi_row, TOKENEXTRA **tp,
MODE_INFO *p = cm->prev_mi + idx_str;
if (cpi->sf.use_one_partition_size_always) {
- set_offsets(cpi, mi_row, mi_col, BLOCK_SIZE_SB64X64);
+ set_offsets(cpi, mi_row, mi_col, BLOCK_64X64);
set_partitioning(cpi, m, cpi->sf.always_this_block_size);
- rd_use_partition(cpi, m, tp, mi_row, mi_col, BLOCK_SIZE_SB64X64,
+ rd_use_partition(cpi, m, tp, mi_row, mi_col, BLOCK_64X64,
&dummy_rate, &dummy_dist, 1);
} else if (cpi->sf.partition_by_variance) {
choose_partitioning(cpi, cm->mi, mi_row, mi_col);
- rd_use_partition(cpi, m, tp, mi_row, mi_col, BLOCK_SIZE_SB64X64,
+ rd_use_partition(cpi, m, tp, mi_row, mi_col, BLOCK_64X64,
&dummy_rate, &dummy_dist, 1);
} else {
if ((cpi->common.current_video_frame
&cpi->sf.min_partition_size,
&cpi->sf.max_partition_size);
}
- rd_pick_partition(cpi, tp, mi_row, mi_col, BLOCK_SIZE_SB64X64,
+ rd_pick_partition(cpi, tp, mi_row, mi_col, BLOCK_64X64,
&dummy_rate, &dummy_dist, 1, INT64_MAX);
} else {
copy_partitioning(cpi, m, p);
- rd_use_partition(cpi, m, tp, mi_row, mi_col, BLOCK_SIZE_SB64X64,
+ rd_use_partition(cpi, m, tp, mi_row, mi_col, BLOCK_64X64,
&dummy_rate, &dummy_dist, 1);
}
}
&cpi->sf.max_partition_size);
}
- rd_pick_partition(cpi, tp, mi_row, mi_col, BLOCK_SIZE_SB64X64,
+ rd_pick_partition(cpi, tp, mi_row, mi_col, BLOCK_64X64,
&dummy_rate, &dummy_dist, 1, INT64_MAX);
}
}
for (mi_row = 0; mi_row < cm->mi_rows; mi_row += 8, mi_ptr += 8 * mis) {
mi = mi_ptr;
- for (mi_col = 0; mi_col < cm->mi_cols; mi_col += 8, mi += 8) {
- reset_skip_txfm_size_sb(cpi, mi, txfm_max, mi_row, mi_col,
- BLOCK_SIZE_SB64X64);
- }
+ for (mi_col = 0; mi_col < cm->mi_cols; mi_col += 8, mi += 8)
+ reset_skip_txfm_size_sb(cpi, mi, txfm_max, mi_row, mi_col, BLOCK_64X64);
}
}
const MB_PREDICTION_MODE uvm = xd->mode_info_context->mbmi.uv_mode;
++cpi->y_uv_mode_count[m][uvm];
- if (xd->mode_info_context->mbmi.sb_type >= BLOCK_SIZE_SB8X8) {
+ if (xd->mode_info_context->mbmi.sb_type >= BLOCK_8X8) {
const BLOCK_SIZE_TYPE bsize = xd->mode_info_context->mbmi.sb_type;
const int bwl = b_width_log2(bsize), bhl = b_height_log2(bsize);
const int bsl = MIN(bwl, bhl);
cpi->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
else
cpi->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST;
- } else if (mbmi->sb_type < BLOCK_SIZE_SB8X8) {
+ } else if (mbmi->sb_type < BLOCK_8X8) {
cpi->zbin_mode_boost = SPLIT_MV_ZBIN_BOOST;
} else {
cpi->zbin_mode_boost = MV_ZBIN_BOOST;
}
if (mbmi->ref_frame[0] == INTRA_FRAME) {
- vp9_encode_intra_block_y(
- cm, x, (bsize < BLOCK_SIZE_SB8X8) ? BLOCK_SIZE_SB8X8 : bsize);
- vp9_encode_intra_block_uv(
- cm, x, (bsize < BLOCK_SIZE_SB8X8) ? BLOCK_SIZE_SB8X8 : bsize);
+ vp9_encode_intra_block_y(cm, x, MAX(bsize, BLOCK_8X8));
+ vp9_encode_intra_block_uv(cm, x, MAX(bsize, BLOCK_8X8));
if (output_enabled)
sum_intra_stats(cpi, x);
} else {
&xd->scale_factor[1]);
- vp9_build_inter_predictors_sb(
- xd, mi_row, mi_col,
- bsize < BLOCK_SIZE_SB8X8 ? BLOCK_SIZE_SB8X8 : bsize);
+ vp9_build_inter_predictors_sb(xd, mi_row, mi_col, MAX(bsize, BLOCK_8X8));
}
- if (xd->mode_info_context->mbmi.ref_frame[0] == INTRA_FRAME) {
- vp9_tokenize_sb(cpi, t, !output_enabled,
- (bsize < BLOCK_SIZE_SB8X8) ? BLOCK_SIZE_SB8X8 : bsize);
+ if (mbmi->ref_frame[0] == INTRA_FRAME) {
+ vp9_tokenize_sb(cpi, t, !output_enabled, MAX(bsize, BLOCK_8X8));
} else if (!x->skip) {
- vp9_encode_sb(cm, x, (bsize < BLOCK_SIZE_SB8X8) ? BLOCK_SIZE_SB8X8 : bsize);
- vp9_tokenize_sb(cpi, t, !output_enabled,
- (bsize < BLOCK_SIZE_SB8X8) ? BLOCK_SIZE_SB8X8 : bsize);
+ vp9_encode_sb(cm, x, MAX(bsize, BLOCK_8X8));
+ vp9_tokenize_sb(cpi, t, !output_enabled, MAX(bsize, BLOCK_8X8));
} else {
int mb_skip_context = xd->left_available ? (mi - 1)->mbmi.mb_skip_coeff : 0;
mb_skip_context += (mi - mis)->mbmi.mb_skip_coeff;
mbmi->mb_skip_coeff = 1;
if (output_enabled)
cm->counts.mbskip[mb_skip_context][1]++;
- vp9_reset_sb_tokens_context(
- xd, (bsize < BLOCK_SIZE_SB8X8) ? BLOCK_SIZE_SB8X8 : bsize);
+ vp9_reset_sb_tokens_context(xd, MAX(bsize, BLOCK_8X8));
}
// copy skip flag on all mb_mode_info contexts in this SB
if (output_enabled) {
if (cm->tx_mode == TX_MODE_SELECT &&
- mbmi->sb_type >= BLOCK_SIZE_SB8X8 &&
+ mbmi->sb_type >= BLOCK_8X8 &&
!(is_inter_block(mbmi) &&
(mbmi->mb_skip_coeff ||
vp9_segfeature_active(&xd->seg, segment_id, SEG_LVL_SKIP)))) {
struct macroblockd_plane *pd = &xd->plane[0];
const int src_stride = p->src.stride;
const int dst_stride = pd->dst.stride;
- uint8_t *src_init = raster_block_offset_uint8(xd, BLOCK_SIZE_SB8X8, 0, ib,
+ uint8_t *src_init = raster_block_offset_uint8(xd, BLOCK_8X8, 0, ib,
p->src.buf, src_stride);
- uint8_t *dst_init = raster_block_offset_uint8(xd, BLOCK_SIZE_SB8X8, 0, ib,
+ uint8_t *dst_init = raster_block_offset_uint8(xd, BLOCK_8X8, 0, ib,
pd->dst.buf, dst_stride);
int16_t *src_diff, *coeff;
block = ib + idy * 2 + idx;
xd->mode_info_context->bmi[block].as_mode = mode;
- src_diff = raster_block_offset_int16(xd, BLOCK_SIZE_SB8X8, 0, block,
+ src_diff = raster_block_offset_int16(xd, BLOCK_8X8, 0, block,
p->src_diff);
coeff = BLOCK_OFFSET(x->plane[0].coeff, block, 16);
vp9_predict_intra_block(xd, block, 1,
int this_rate_tokenonly, this_rate, s;
int64_t this_distortion;
- MB_PREDICTION_MODE last_mode = bsize <= BLOCK_SIZE_SB8X8 ?
+ MB_PREDICTION_MODE last_mode = bsize <= BLOCK_8X8 ?
TM_PRED : cpi->sf.last_chroma_intra_mode;
for (mode = DC_PRED; mode <= last_mode; mode++) {
// appropriate speed flag is set.
if (cpi->sf.use_uv_intra_rd_estimate) {
rd_sbuv_dcpred(cpi, x, rate_uv, rate_uv_tokenonly, dist_uv, skip_uv,
- (bsize < BLOCK_SIZE_SB8X8) ? BLOCK_SIZE_SB8X8 :
- bsize);
+ bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize);
// Else do a proper rd search for each possible transform size that may
// be considered in the main rd loop.
} else {
rd_pick_intra_sbuv_mode(cpi, x,
rate_uv, rate_uv_tokenonly, dist_uv, skip_uv,
- (bsize < BLOCK_SIZE_SB8X8) ? BLOCK_SIZE_SB8X8
- : bsize);
+ bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize);
}
*mode_uv = x->e_mbd.mode_info_context->mbmi.uv_mode;
}
const int height = plane_block_height(bsize, pd);
int idx, idy;
const int src_stride = x->plane[0].src.stride;
- uint8_t* const src = raster_block_offset_uint8(xd, BLOCK_SIZE_SB8X8, 0, i,
+ uint8_t* const src = raster_block_offset_uint8(xd, BLOCK_8X8, 0, i,
x->plane[0].src.buf,
src_stride);
- int16_t* src_diff = raster_block_offset_int16(xd, BLOCK_SIZE_SB8X8, 0, i,
+ int16_t* src_diff = raster_block_offset_int16(xd, BLOCK_8X8, 0, i,
x->plane[0].src_diff);
int16_t* coeff = BLOCK_OFFSET(x->plane[0].coeff, 16, i);
- uint8_t* const pre = raster_block_offset_uint8(xd, BLOCK_SIZE_SB8X8, 0, i,
+ uint8_t* const pre = raster_block_offset_uint8(xd, BLOCK_8X8, 0, i,
pd->pre[0].buf,
pd->pre[0].stride);
- uint8_t* const dst = raster_block_offset_uint8(xd, BLOCK_SIZE_SB8X8, 0, i,
+ uint8_t* const dst = raster_block_offset_uint8(xd, BLOCK_8X8, 0, i,
pd->dst.buf,
pd->dst.stride);
int64_t thisdistortion = 0, thissse = 0;
if (mi->mbmi.ref_frame[1] > 0) {
uint8_t* const second_pre =
- raster_block_offset_uint8(xd, BLOCK_SIZE_SB8X8, 0, i,
+ raster_block_offset_uint8(xd, BLOCK_8X8, 0, i,
pd->pre[1].buf, pd->pre[1].stride);
vp9_build_inter_predictor(second_pre, pd->pre[1].stride,
dst, pd->dst.stride,
int64_t ssz, rd, rd1, rd2;
k += (idy * 2 + idx);
- src_diff = raster_block_offset_int16(xd, BLOCK_SIZE_SB8X8, 0, k,
+ src_diff = raster_block_offset_int16(xd, BLOCK_8X8, 0, k,
x->plane[0].src_diff);
coeff = BLOCK_OFFSET(x->plane[0].coeff, 16, k);
x->fwd_txm4x4(src_diff, coeff, 16);
static INLINE void mi_buf_shift(MACROBLOCK *x, int i) {
MB_MODE_INFO *mbmi = &x->e_mbd.mode_info_context->mbmi;
x->plane[0].src.buf =
- raster_block_offset_uint8(&x->e_mbd, BLOCK_SIZE_SB8X8, 0, i,
+ raster_block_offset_uint8(&x->e_mbd, BLOCK_8X8, 0, i,
x->plane[0].src.buf,
x->plane[0].src.stride);
assert(((intptr_t)x->e_mbd.plane[0].pre[0].buf & 0x7) == 0);
x->e_mbd.plane[0].pre[0].buf =
- raster_block_offset_uint8(&x->e_mbd, BLOCK_SIZE_SB8X8, 0, i,
+ raster_block_offset_uint8(&x->e_mbd, BLOCK_8X8, 0, i,
x->e_mbd.plane[0].pre[0].buf,
x->e_mbd.plane[0].pre[0].stride);
if (mbmi->ref_frame[1])
x->e_mbd.plane[0].pre[1].buf =
- raster_block_offset_uint8(&x->e_mbd, BLOCK_SIZE_SB8X8, 0, i,
+ raster_block_offset_uint8(&x->e_mbd, BLOCK_8X8, 0, i,
x->e_mbd.plane[0].pre[1].buf,
x->e_mbd.plane[0].pre[1].stride);
}
*returntotrate = bsi->r;
*returndistortion = bsi->d;
*returnyrate = bsi->segment_yrate;
- *skippable = vp9_sby_is_skippable(&x->e_mbd, BLOCK_SIZE_SB8X8);
+ *skippable = vp9_sby_is_skippable(&x->e_mbd, BLOCK_8X8);
*psse = bsi->sse;
mbmi->mode = bsi->modes[3];
x->skip_encode = 0;
ctx->skip = 0;
xd->mode_info_context->mbmi.ref_frame[0] = INTRA_FRAME;
- if (bsize >= BLOCK_SIZE_SB8X8) {
+ if (bsize >= BLOCK_8X8) {
if (rd_pick_intra_sby_mode(cpi, x, &rate_y, &rate_y_tokenonly,
&dist_y, &y_skip, bsize, tx_cache,
best_rd) >= best_rd) {
return;
}
rd_pick_intra_sbuv_mode(cpi, x, &rate_uv, &rate_uv_tokenonly,
- &dist_uv, &uv_skip, BLOCK_SIZE_SB8X8);
+ &dist_uv, &uv_skip, BLOCK_8X8);
}
if (y_skip && uv_skip) {
if (x->fast_ms > 2 && ref_frame != x->subblock_ref)
continue;
- if (cpi->sf.use_avoid_tested_higherror && bsize >= BLOCK_SIZE_SB8X8) {
+ if (cpi->sf.use_avoid_tested_higherror && bsize >= BLOCK_8X8) {
if (!(ref_frame_mask & (1 << ref_frame))) {
continue;
}
mbmi->interp_filter = cm->mcomp_filter_type;
vp9_setup_interp_filters(xd, mbmi->interp_filter, &cpi->common);
- if (bsize >= BLOCK_SIZE_SB8X8 &&
+ if (bsize >= BLOCK_8X8 &&
(this_mode == I4X4_PRED || this_mode == SPLITMV))
continue;
- if (bsize < BLOCK_SIZE_SB8X8 &&
+ if (bsize < BLOCK_8X8 &&
!(this_mode == I4X4_PRED || this_mode == SPLITMV))
continue;
// If even the 'Y' rd value of split is higher than best so far
// then dont bother looking at UV
vp9_build_inter_predictors_sbuv(&x->e_mbd, mi_row, mi_col,
- BLOCK_SIZE_SB8X8);
- vp9_subtract_sbuv(x, BLOCK_SIZE_SB8X8);
+ BLOCK_8X8);
+ vp9_subtract_sbuv(x, BLOCK_8X8);
super_block_uvrd_for_txfm(cm, x, &rate_uv, &distortion_uv,
- &uv_skippable, &uv_sse,
- BLOCK_SIZE_SB8X8, TX_4X4);
+ &uv_skippable, &uv_sse, BLOCK_8X8, TX_4X4);
rate2 += rate_uv;
distortion2 += distortion_uv;
skippable = skippable && uv_skippable;
const int mb_skip_allowed = !vp9_segfeature_active(seg, segment_id,
SEG_LVL_SKIP);
- if (skippable && bsize >= BLOCK_SIZE_SB8X8) {
+ if (skippable && bsize >= BLOCK_8X8) {
// Back out the coefficient coding costs
rate2 -= (rate_y + rate_uv);
// for best yrd calculation
&rate_uv_tokenonly[uv_tx_size],
&dist_uv[uv_tx_size],
&skip_uv[uv_tx_size],
- (bsize < BLOCK_SIZE_SB8X8) ? BLOCK_SIZE_SB8X8
- : bsize);
+ bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize);
}
}
}
}
- if (best_rd == INT64_MAX && bsize < BLOCK_SIZE_SB8X8) {
+ if (best_rd == INT64_MAX && bsize < BLOCK_8X8) {
*returnrate = INT_MAX;
*returndistortion = INT_MAX;
return best_rd;
*mbmi = best_mbmode;
x->skip |= best_skip2;
if (best_mbmode.ref_frame[0] == INTRA_FRAME &&
- best_mbmode.sb_type < BLOCK_SIZE_SB8X8) {
+ best_mbmode.sb_type < BLOCK_8X8) {
for (i = 0; i < 4; i++)
xd->mode_info_context->bmi[i].as_mode = best_bmodes[i].as_mode;
}
if (best_mbmode.ref_frame[0] != INTRA_FRAME &&
- best_mbmode.sb_type < BLOCK_SIZE_SB8X8) {
+ best_mbmode.sb_type < BLOCK_8X8) {
for (i = 0; i < 4; i++)
xd->mode_info_context->bmi[i].as_mv[0].as_int =
best_bmodes[i].as_mv[0].as_int;