static int is_compound_reference_allowed(const VP10_COMMON *cm) {
int i;
- if (frame_is_intra_only(cm))
- return 0;
+ if (frame_is_intra_only(cm)) return 0;
for (i = 1; i < INTER_REFS_PER_FRAME; ++i)
- if (cm->ref_frame_sign_bias[i + 1] != cm->ref_frame_sign_bias[1])
- return 1;
+ if (cm->ref_frame_sign_bias[i + 1] != cm->ref_frame_sign_bias[1]) return 1;
return 0;
}
cm->comp_bwd_ref[1] = ALTREF_FRAME;
#else
if (cm->ref_frame_sign_bias[LAST_FRAME] ==
- cm->ref_frame_sign_bias[GOLDEN_FRAME]) {
+ cm->ref_frame_sign_bias[GOLDEN_FRAME]) {
cm->comp_fixed_ref = ALTREF_FRAME;
cm->comp_var_ref[0] = LAST_FRAME;
cm->comp_var_ref[1] = GOLDEN_FRAME;
} else if (cm->ref_frame_sign_bias[LAST_FRAME] ==
- cm->ref_frame_sign_bias[ALTREF_FRAME]) {
+ cm->ref_frame_sign_bias[ALTREF_FRAME]) {
cm->comp_fixed_ref = GOLDEN_FRAME;
cm->comp_var_ref[0] = LAST_FRAME;
cm->comp_var_ref[1] = ALTREF_FRAME;
}
#endif // CONFIG_EXT_INTER
-static REFERENCE_MODE read_frame_reference_mode(const VP10_COMMON *cm,
- struct vpx_read_bit_buffer *rb) {
+static REFERENCE_MODE read_frame_reference_mode(
+ const VP10_COMMON *cm, struct vpx_read_bit_buffer *rb) {
if (is_compound_reference_allowed(cm)) {
- return vpx_rb_read_bit(rb) ? REFERENCE_MODE_SELECT
- : (vpx_rb_read_bit(rb) ? COMPOUND_REFERENCE
- : SINGLE_REFERENCE);
+ return vpx_rb_read_bit(rb)
+ ? REFERENCE_MODE_SELECT
+ : (vpx_rb_read_bit(rb) ? COMPOUND_REFERENCE : SINGLE_REFERENCE);
} else {
return SINGLE_REFERENCE;
}
static void update_mv_probs(vpx_prob *p, int n, vp10_reader *r) {
int i;
- for (i = 0; i < n; ++i)
- vp10_diff_update_prob(r, &p[i]);
+ for (i = 0; i < n; ++i) vp10_diff_update_prob(r, &p[i]);
}
static void read_mv_probs(nmv_context *ctx, int allow_hp, vp10_reader *r) {
}
}
-static void inverse_transform_block(MACROBLOCKD* xd, int plane,
+static void inverse_transform_block(MACROBLOCKD *xd, int plane,
const TX_TYPE tx_type,
- const TX_SIZE tx_size,
- uint8_t *dst, int stride,
- int eob) {
+ const TX_SIZE tx_size, uint8_t *dst,
+ int stride, int eob) {
struct macroblockd_plane *const pd = &xd->plane[plane];
if (eob > 0) {
tran_low_t *const dqcoeff = pd->dqcoeff;
} else {
if (tx_type == DCT_DCT && tx_size <= TX_16X16 && eob <= 10)
memset(dqcoeff, 0, 4 * 4 * num_4x4_blocks_wide_txsize_lookup[tx_size] *
- sizeof(dqcoeff[0]));
+ sizeof(dqcoeff[0]));
#if CONFIG_EXT_TX
else
memset(dqcoeff, 0, get_tx2d_size(tx_size) * sizeof(dqcoeff[0]));
vp10_reader *r,
#endif // CONFIG_ANS
MB_MODE_INFO *const mbmi,
- int plane,
- int row, int col,
+ int plane, int row, int col,
TX_SIZE tx_size) {
struct macroblockd_plane *const pd = &xd->plane[plane];
PREDICTION_MODE mode = (plane == 0) ? mbmi->mode : mbmi->uv_mode;
dst = &pd->dst.buf[4 * row * pd->dst.stride + 4 * col];
if (mbmi->sb_type < BLOCK_8X8)
- if (plane == 0)
- mode = xd->mi[0]->bmi[(row << 1) + col].as_mode;
+ if (plane == 0) mode = xd->mi[0]->bmi[(row << 1) + col].as_mode;
- vp10_predict_intra_block(xd, pd->n4_wl, pd->n4_hl, tx_size, mode,
- dst, pd->dst.stride, dst, pd->dst.stride,
- col, row, plane);
+ vp10_predict_intra_block(xd, pd->n4_wl, pd->n4_hl, tx_size, mode, dst,
+ pd->dst.stride, dst, pd->dst.stride, col, row,
+ plane);
if (!mbmi->skip) {
TX_TYPE tx_type = get_tx_type(plane_type, xd, block_idx, tx_size);
const scan_order *sc = get_scan(tx_size, tx_type, 0);
- const int eob = vp10_decode_block_tokens(xd,
- plane, sc, col, row, tx_size,
+ const int eob = vp10_decode_block_tokens(xd, plane, sc, col, row, tx_size,
tx_type, r, mbmi->segment_id);
- inverse_transform_block(xd, plane, tx_type, tx_size,
- dst, pd->dst.stride, eob);
+ inverse_transform_block(xd, plane, tx_type, tx_size, dst, pd->dst.stride,
+ eob);
}
}
#if CONFIG_VAR_TX
static void decode_reconstruct_tx(MACROBLOCKD *const xd, vp10_reader *r,
- MB_MODE_INFO *const mbmi,
- int plane, BLOCK_SIZE plane_bsize,
- int block, int blk_row, int blk_col,
- TX_SIZE tx_size, int *eob_total) {
+ MB_MODE_INFO *const mbmi, int plane,
+ BLOCK_SIZE plane_bsize, int block,
+ int blk_row, int blk_col, TX_SIZE tx_size,
+ int *eob_total) {
const struct macroblockd_plane *const pd = &xd->plane[plane];
const BLOCK_SIZE bsize = txsize_to_bsize[tx_size];
const int tx_row = blk_row >> (1 - pd->subsampling_y);
const int tx_col = blk_col >> (1 - pd->subsampling_x);
- const TX_SIZE plane_tx_size = plane ?
- get_uv_tx_size_impl(mbmi->inter_tx_size[tx_row][tx_col], bsize, 0, 0) :
- mbmi->inter_tx_size[tx_row][tx_col];
+ const TX_SIZE plane_tx_size =
+ plane ? get_uv_tx_size_impl(mbmi->inter_tx_size[tx_row][tx_col], bsize, 0,
+ 0)
+ : mbmi->inter_tx_size[tx_row][tx_col];
int max_blocks_high = num_4x4_blocks_high_lookup[plane_bsize];
int max_blocks_wide = num_4x4_blocks_wide_lookup[plane_bsize];
if (xd->mb_to_right_edge < 0)
max_blocks_wide += xd->mb_to_right_edge >> (5 + pd->subsampling_x);
- if (blk_row >= max_blocks_high || blk_col >= max_blocks_wide)
- return;
+ if (blk_row >= max_blocks_high || blk_col >= max_blocks_wide) return;
if (tx_size == plane_tx_size
#if CONFIG_EXT_TX && CONFIG_RECT_TX
PLANE_TYPE plane_type = (plane == 0) ? PLANE_TYPE_Y : PLANE_TYPE_UV;
TX_TYPE tx_type = get_tx_type(plane_type, xd, block, plane_tx_size);
const scan_order *sc = get_scan(plane_tx_size, tx_type, 1);
- const int eob = vp10_decode_block_tokens(xd, plane, sc,
- blk_col, blk_row, plane_tx_size,
- tx_type, r, mbmi->segment_id);
- inverse_transform_block(xd, plane, tx_type, plane_tx_size,
+ const int eob =
+ vp10_decode_block_tokens(xd, plane, sc, blk_col, blk_row, plane_tx_size,
+ tx_type, r, mbmi->segment_id);
+ inverse_transform_block(
+ xd, plane, tx_type, plane_tx_size,
&pd->dst.buf[4 * blk_row * pd->dst.stride + 4 * blk_col],
pd->dst.stride, eob);
*eob_total += eob;
const int offsetc = blk_col + ((i & 0x01) << bsl);
int step = num_4x4_blocks_txsize_lookup[tx_size - 1];
- if (offsetr >= max_blocks_high || offsetc >= max_blocks_wide)
- continue;
+ if (offsetr >= max_blocks_high || offsetc >= max_blocks_wide) continue;
decode_reconstruct_tx(xd, r, mbmi, plane, plane_bsize, block + i * step,
offsetr, offsetc, tx_size - 1, eob_total);
#else
vp10_reader *r,
#endif
- int segment_id, int plane,
- int row, int col, TX_SIZE tx_size) {
+ int segment_id, int plane, int row, int col,
+ TX_SIZE tx_size) {
struct macroblockd_plane *const pd = &xd->plane[plane];
PLANE_TYPE plane_type = (plane == 0) ? PLANE_TYPE_Y : PLANE_TYPE_UV;
int block_idx = (row << 1) + col;
TX_TYPE tx_type = get_tx_type(plane_type, xd, block_idx, tx_size);
const scan_order *sc = get_scan(tx_size, tx_type, 1);
- const int eob = vp10_decode_block_tokens(xd,
- plane, sc, col, row,
- tx_size, tx_type, r,
- segment_id);
+ const int eob = vp10_decode_block_tokens(xd, plane, sc, col, row, tx_size,
+ tx_type, r, segment_id);
inverse_transform_block(xd, plane, tx_type, tx_size,
&pd->dst.buf[4 * row * pd->dst.stride + 4 * col],
}
#endif // !CONFIG_VAR_TX || CONFIG_SUPER_TX
-static INLINE TX_SIZE dec_get_uv_tx_size(const MB_MODE_INFO *mbmi,
- int n4_wl, int n4_hl) {
+static INLINE TX_SIZE dec_get_uv_tx_size(const MB_MODE_INFO *mbmi, int n4_wl,
+ int n4_hl) {
// get minimum log2 num4x4s dimension
const int x = VPXMIN(n4_wl, n4_hl);
- return VPXMIN(mbmi->tx_size, x);
+ return VPXMIN(mbmi->tx_size, x);
}
static INLINE void dec_reset_skip_context(MACROBLOCKD *xd) {
static MB_MODE_INFO *set_offsets(VP10_COMMON *const cm, MACROBLOCKD *const xd,
BLOCK_SIZE bsize, int mi_row, int mi_col,
- int bw, int bh, int x_mis, int y_mis,
- int bwl, int bhl) {
+ int bw, int bh, int x_mis, int y_mis, int bwl,
+ int bhl) {
const int offset = mi_row * cm->mi_stride + mi_col;
int x, y;
const TileInfo *const tile = &xd->tile;
static MB_MODE_INFO *set_offsets_extend(VP10_COMMON *const cm,
MACROBLOCKD *const xd,
const TileInfo *const tile,
- BLOCK_SIZE bsize_pred,
- int mi_row_pred, int mi_col_pred,
- int mi_row_ori, int mi_col_ori) {
+ BLOCK_SIZE bsize_pred, int mi_row_pred,
+ int mi_col_pred, int mi_row_ori,
+ int mi_col_ori) {
// Used in supertx
// (mi_row_ori, mi_col_ori): location for mv
// (mi_row_pred, mi_col_pred, bsize_pred): region to predict
const int bhl = b_height_log2_lookup[bsize_pred];
xd->mi = cm->mi_grid_visible + offset;
xd->mi[0] = cm->mi + offset;
- set_mi_row_col(xd, tile, mi_row_pred, bh, mi_col_pred, bw,
- cm->mi_rows, cm->mi_cols);
+ set_mi_row_col(xd, tile, mi_row_pred, bh, mi_col_pred, bw, cm->mi_rows,
+ cm->mi_cols);
- xd->up_available = (mi_row_ori > tile->mi_row_start);
- xd->left_available = (mi_col_ori > tile->mi_col_start);
+ xd->up_available = (mi_row_ori > tile->mi_row_start);
+ xd->left_available = (mi_col_ori > tile->mi_col_start);
set_plane_n4(xd, bw, bh, bwl, bhl);
}
static MB_MODE_INFO *set_mb_offsets(VP10_COMMON *const cm,
- MACROBLOCKD *const xd,
- BLOCK_SIZE bsize,
- int mi_row, int mi_col,
- int bw, int bh,
+ MACROBLOCKD *const xd, BLOCK_SIZE bsize,
+ int mi_row, int mi_col, int bw, int bh,
int x_mis, int y_mis) {
const int offset = mi_row * cm->mi_stride + mi_col;
const TileInfo *const tile = &xd->tile;
xd->mi[0] = cm->mi + offset;
xd->mi[0]->mbmi.sb_type = bsize;
for (y = 0; y < y_mis; ++y)
- for (x = !y; x < x_mis; ++x)
- xd->mi[y * cm->mi_stride + x] = xd->mi[0];
+ for (x = !y; x < x_mis; ++x) xd->mi[y * cm->mi_stride + x] = xd->mi[0];
set_mi_row_col(xd, tile, mi_row, bh, mi_col, bw, cm->mi_rows, cm->mi_cols);
return &xd->mi[0]->mbmi;
}
static void set_offsets_topblock(VP10_COMMON *const cm, MACROBLOCKD *const xd,
- const TileInfo *const tile,
- BLOCK_SIZE bsize, int mi_row, int mi_col) {
+ const TileInfo *const tile, BLOCK_SIZE bsize,
+ int mi_row, int mi_col) {
const int bw = num_8x8_blocks_wide_lookup[bsize];
const int bh = num_8x8_blocks_high_lookup[bsize];
const int offset = mi_row * cm->mi_stride + mi_col;
vp10_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
}
-static void set_param_topblock(VP10_COMMON *const cm, MACROBLOCKD *const xd,
+static void set_param_topblock(VP10_COMMON *const cm, MACROBLOCKD *const xd,
BLOCK_SIZE bsize, int mi_row, int mi_col,
int txfm, int skip) {
const int bw = num_8x8_blocks_wide_lookup[bsize];
#if CONFIG_VAR_TX
xd->above_txfm_context = cm->above_txfm_context + mi_col;
xd->left_txfm_context =
- xd->left_txfm_context_buffer + (mi_row & MAX_MIB_MASK);
+ xd->left_txfm_context_buffer + (mi_row & MAX_MIB_MASK);
set_txfm_ctx(xd->left_txfm_context, xd->mi[0]->mbmi.tx_size, bh);
set_txfm_ctx(xd->above_txfm_context, xd->mi[0]->mbmi.tx_size, bw);
#endif
}
-static void set_ref(VP10_COMMON *const cm, MACROBLOCKD *const xd,
- int idx, int mi_row, int mi_col) {
+static void set_ref(VP10_COMMON *const cm, MACROBLOCKD *const xd, int idx,
+ int mi_row, int mi_col) {
MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
RefBuffer *ref_buffer = &cm->frame_refs[mbmi->ref_frame[idx] - LAST_FRAME];
xd->block_refs[idx] = ref_buffer;
}
static void dec_predict_b_extend(
- VP10Decoder *const pbi, MACROBLOCKD *const xd,
- const TileInfo *const tile, int block,
- int mi_row_ori, int mi_col_ori,
- int mi_row_pred, int mi_col_pred,
- int mi_row_top, int mi_col_top,
- uint8_t * dst_buf[3], int dst_stride[3],
- BLOCK_SIZE bsize_top,
- BLOCK_SIZE bsize_pred,
- int b_sub8x8, int bextend) {
+ VP10Decoder *const pbi, MACROBLOCKD *const xd, const TileInfo *const tile,
+ int block, int mi_row_ori, int mi_col_ori, int mi_row_pred, int mi_col_pred,
+ int mi_row_top, int mi_col_top, uint8_t *dst_buf[3], int dst_stride[3],
+ BLOCK_SIZE bsize_top, BLOCK_SIZE bsize_pred, int b_sub8x8, int bextend) {
// Used in supertx
// (mi_row_ori, mi_col_ori): location for mv
// (mi_row_pred, mi_col_pred, bsize_pred): region to predict
if (mi_row_pred < mi_row_top || mi_col_pred < mi_col_top ||
mi_row_pred >= mi_row_top + mi_height_top ||
- mi_col_pred >= mi_col_top + mi_width_top ||
- mi_row_pred >= cm->mi_rows || mi_col_pred >= cm->mi_cols)
+ mi_col_pred >= mi_col_top + mi_width_top || mi_row_pred >= cm->mi_rows ||
+ mi_col_pred >= cm->mi_cols)
return;
- mbmi = set_offsets_extend(cm, xd, tile, bsize_pred,
- mi_row_pred, mi_col_pred,
+ mbmi = set_offsets_extend(cm, xd, tile, bsize_pred, mi_row_pred, mi_col_pred,
mi_row_ori, mi_col_ori);
set_ref(cm, xd, 0, mi_row_pred, mi_col_pred);
if (has_second_ref(&xd->mi[0]->mbmi))
(c >> xd->plane[2].subsampling_x);
if (!b_sub8x8)
- vp10_build_inter_predictors_sb_extend(
- xd,
+ vp10_build_inter_predictors_sb_extend(xd,
#if CONFIG_EXT_INTER
- mi_row_ori, mi_col_ori,
+ mi_row_ori, mi_col_ori,
#endif // CONFIG_EXT_INTER
- mi_row_pred, mi_col_pred, bsize_pred);
+ mi_row_pred, mi_col_pred, bsize_pred);
else
- vp10_build_inter_predictors_sb_sub8x8_extend(
- xd,
+ vp10_build_inter_predictors_sb_sub8x8_extend(xd,
#if CONFIG_EXT_INTER
- mi_row_ori, mi_col_ori,
+ mi_row_ori, mi_col_ori,
#endif // CONFIG_EXT_INTER
- mi_row_pred, mi_col_pred, bsize_pred, block);
+ mi_row_pred, mi_col_pred,
+ bsize_pred, block);
}
static void dec_extend_dir(VP10Decoder *const pbi, MACROBLOCKD *const xd,
const TileInfo *const tile, int block,
- BLOCK_SIZE bsize, BLOCK_SIZE top_bsize,
- int mi_row, int mi_col,
- int mi_row_top, int mi_col_top,
- uint8_t * dst_buf[3], int dst_stride[3], int dir) {
+ BLOCK_SIZE bsize, BLOCK_SIZE top_bsize, int mi_row,
+ int mi_col, int mi_row_top, int mi_col_top,
+ uint8_t *dst_buf[3], int dst_stride[3], int dir) {
// dir: 0-lower, 1-upper, 2-left, 3-right
// 4-lowerleft, 5-upperleft, 6-lowerright, 7-upperright
const int mi_width = num_8x8_blocks_wide_lookup[bsize];
int unit, mi_row_pred, mi_col_pred;
if (dir == 0 || dir == 1) {
- extend_bsize = (mi_width == 1 || bsize < BLOCK_8X8 || xss < yss) ?
- BLOCK_8X8 : BLOCK_16X8;
+ extend_bsize = (mi_width == 1 || bsize < BLOCK_8X8 || xss < yss)
+ ? BLOCK_8X8
+ : BLOCK_16X8;
unit = num_8x8_blocks_wide_lookup[extend_bsize];
mi_row_pred = mi_row + ((dir == 0) ? mi_height : -1);
mi_col_pred = mi_col;
- dec_predict_b_extend(pbi, xd, tile, block, mi_row, mi_col,
- mi_row_pred, mi_col_pred,
- mi_row_top, mi_col_top,
- dst_buf, dst_stride,
- top_bsize, extend_bsize, b_sub8x8, 1);
+ dec_predict_b_extend(pbi, xd, tile, block, mi_row, mi_col, mi_row_pred,
+ mi_col_pred, mi_row_top, mi_col_top, dst_buf,
+ dst_stride, top_bsize, extend_bsize, b_sub8x8, 1);
if (mi_width > unit) {
int i;
assert(!b_sub8x8);
- for (i = 0; i < mi_width/unit - 1; i++) {
+ for (i = 0; i < mi_width / unit - 1; i++) {
mi_col_pred += unit;
- dec_predict_b_extend(pbi, xd, tile, block, mi_row, mi_col,
- mi_row_pred, mi_col_pred,
- mi_row_top, mi_col_top,
- dst_buf, dst_stride,
- top_bsize, extend_bsize, b_sub8x8, 1);
+ dec_predict_b_extend(pbi, xd, tile, block, mi_row, mi_col, mi_row_pred,
+ mi_col_pred, mi_row_top, mi_col_top, dst_buf,
+ dst_stride, top_bsize, extend_bsize, b_sub8x8, 1);
}
}
} else if (dir == 2 || dir == 3) {
- extend_bsize = (mi_height == 1 || bsize < BLOCK_8X8 || yss < xss) ?
- BLOCK_8X8 : BLOCK_8X16;
+ extend_bsize = (mi_height == 1 || bsize < BLOCK_8X8 || yss < xss)
+ ? BLOCK_8X8
+ : BLOCK_8X16;
unit = num_8x8_blocks_high_lookup[extend_bsize];
mi_row_pred = mi_row;
mi_col_pred = mi_col + ((dir == 3) ? mi_width : -1);
- dec_predict_b_extend(pbi, xd, tile, block, mi_row, mi_col,
- mi_row_pred, mi_col_pred,
- mi_row_top, mi_col_top,
- dst_buf, dst_stride,
- top_bsize, extend_bsize, b_sub8x8, 1);
+ dec_predict_b_extend(pbi, xd, tile, block, mi_row, mi_col, mi_row_pred,
+ mi_col_pred, mi_row_top, mi_col_top, dst_buf,
+ dst_stride, top_bsize, extend_bsize, b_sub8x8, 1);
if (mi_height > unit) {
int i;
- for (i = 0; i < mi_height/unit - 1; i++) {
+ for (i = 0; i < mi_height / unit - 1; i++) {
mi_row_pred += unit;
- dec_predict_b_extend(pbi, xd, tile, block, mi_row, mi_col,
- mi_row_pred, mi_col_pred,
- mi_row_top, mi_col_top,
- dst_buf, dst_stride,
- top_bsize, extend_bsize, b_sub8x8, 1);
+ dec_predict_b_extend(pbi, xd, tile, block, mi_row, mi_col, mi_row_pred,
+ mi_col_pred, mi_row_top, mi_col_top, dst_buf,
+ dst_stride, top_bsize, extend_bsize, b_sub8x8, 1);
}
}
} else {
extend_bsize = BLOCK_8X8;
mi_row_pred = mi_row + ((dir == 4 || dir == 6) ? mi_height : -1);
mi_col_pred = mi_col + ((dir == 6 || dir == 7) ? mi_width : -1);
- dec_predict_b_extend(pbi, xd, tile, block, mi_row, mi_col,
- mi_row_pred, mi_col_pred,
- mi_row_top, mi_col_top,
- dst_buf, dst_stride,
- top_bsize, extend_bsize, b_sub8x8, 1);
+ dec_predict_b_extend(pbi, xd, tile, block, mi_row, mi_col, mi_row_pred,
+ mi_col_pred, mi_row_top, mi_col_top, dst_buf,
+ dst_stride, top_bsize, extend_bsize, b_sub8x8, 1);
}
}
static void dec_extend_all(VP10Decoder *const pbi, MACROBLOCKD *const xd,
const TileInfo *const tile, int block,
- BLOCK_SIZE bsize, BLOCK_SIZE top_bsize,
- int mi_row, int mi_col,
- int mi_row_top, int mi_col_top,
- uint8_t * dst_buf[3], int dst_stride[3]) {
+ BLOCK_SIZE bsize, BLOCK_SIZE top_bsize, int mi_row,
+ int mi_col, int mi_row_top, int mi_col_top,
+ uint8_t *dst_buf[3], int dst_stride[3]) {
dec_extend_dir(pbi, xd, tile, block, bsize, top_bsize, mi_row, mi_col,
mi_row_top, mi_col_top, dst_buf, dst_stride, 0);
dec_extend_dir(pbi, xd, tile, block, bsize, top_bsize, mi_row, mi_col,
static void dec_predict_sb_complex(VP10Decoder *const pbi,
MACROBLOCKD *const xd,
- const TileInfo *const tile,
- int mi_row, int mi_col,
- int mi_row_top, int mi_col_top,
+ const TileInfo *const tile, int mi_row,
+ int mi_col, int mi_row_top, int mi_col_top,
BLOCK_SIZE bsize, BLOCK_SIZE top_bsize,
uint8_t *dst_buf[3], int dst_stride[3]) {
const VP10_COMMON *const cm = &pbi->common;
const int mi_offset = mi_row * cm->mi_stride + mi_col;
uint8_t *dst_buf1[3], *dst_buf2[3], *dst_buf3[3];
- DECLARE_ALIGNED(16, uint8_t,
- tmp_buf1[MAX_MB_PLANE * MAX_TX_SQUARE * 2]);
- DECLARE_ALIGNED(16, uint8_t,
- tmp_buf2[MAX_MB_PLANE * MAX_TX_SQUARE * 2]);
- DECLARE_ALIGNED(16, uint8_t,
- tmp_buf3[MAX_MB_PLANE * MAX_TX_SQUARE * 2]);
- int dst_stride1[3] = {MAX_TX_SIZE, MAX_TX_SIZE, MAX_TX_SIZE};
- int dst_stride2[3] = {MAX_TX_SIZE, MAX_TX_SIZE, MAX_TX_SIZE};
- int dst_stride3[3] = {MAX_TX_SIZE, MAX_TX_SIZE, MAX_TX_SIZE};
+ DECLARE_ALIGNED(16, uint8_t, tmp_buf1[MAX_MB_PLANE * MAX_TX_SQUARE * 2]);
+ DECLARE_ALIGNED(16, uint8_t, tmp_buf2[MAX_MB_PLANE * MAX_TX_SQUARE * 2]);
+ DECLARE_ALIGNED(16, uint8_t, tmp_buf3[MAX_MB_PLANE * MAX_TX_SQUARE * 2]);
+ int dst_stride1[3] = { MAX_TX_SIZE, MAX_TX_SIZE, MAX_TX_SIZE };
+ int dst_stride2[3] = { MAX_TX_SIZE, MAX_TX_SIZE, MAX_TX_SIZE };
+ int dst_stride3[3] = { MAX_TX_SIZE, MAX_TX_SIZE, MAX_TX_SIZE };
#if CONFIG_VP9_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
}
#endif
- if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
- return;
+ if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
xd->mi = cm->mi_grid_visible + mi_offset;
xd->mi[0] = cm->mi + mi_offset;
// weighted average to smooth the boundary
xd->plane[0].dst.buf = dst_buf[0];
xd->plane[0].dst.stride = dst_stride[0];
- vp10_build_masked_inter_predictor_complex(xd,
- dst_buf[0], dst_stride[0],
- dst_buf1[0], dst_stride1[0],
- mi_row, mi_col,
- mi_row_top, mi_col_top,
- bsize, top_bsize,
- PARTITION_HORZ, 0);
+ vp10_build_masked_inter_predictor_complex(
+ xd, dst_buf[0], dst_stride[0], dst_buf1[0], dst_stride1[0], mi_row,
+ mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_HORZ,
+ 0);
} else {
// First half
dec_predict_b_extend(pbi, xd, tile, 0, mi_row, mi_col, mi_row, mi_col,
if (mi_row + hbs < cm->mi_rows) {
// Second half
dec_predict_b_extend(pbi, xd, tile, 0, mi_row + hbs, mi_col,
- mi_row + hbs, mi_col,
- mi_row_top, mi_col_top,
- dst_buf1, dst_stride1,
- top_bsize, subsize, 0, 0);
+ mi_row + hbs, mi_col, mi_row_top, mi_col_top,
+ dst_buf1, dst_stride1, top_bsize, subsize, 0, 0);
if (bsize < top_bsize)
- dec_extend_all(pbi, xd, tile, 0, subsize, top_bsize,
- mi_row + hbs, mi_col,
- mi_row_top, mi_col_top,
- dst_buf1, dst_stride1);
+ dec_extend_all(pbi, xd, tile, 0, subsize, top_bsize, mi_row + hbs,
+ mi_col, mi_row_top, mi_col_top, dst_buf1,
+ dst_stride1);
else
- dec_extend_dir(pbi, xd, tile, 0, subsize, top_bsize,
- mi_row + hbs, mi_col,
- mi_row_top, mi_col_top,
- dst_buf1, dst_stride1, 1);
+ dec_extend_dir(pbi, xd, tile, 0, subsize, top_bsize, mi_row + hbs,
+ mi_col, mi_row_top, mi_col_top, dst_buf1,
+ dst_stride1, 1);
// weighted average to smooth the boundary
for (i = 0; i < MAX_MB_PLANE; i++) {
xd->plane[i].dst.stride = dst_stride[i];
vp10_build_masked_inter_predictor_complex(
xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i],
- mi_row, mi_col, mi_row_top, mi_col_top,
- bsize, top_bsize, PARTITION_HORZ, i);
+ mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
+ PARTITION_HORZ, i);
}
}
}
// Smooth
xd->plane[0].dst.buf = dst_buf[0];
xd->plane[0].dst.stride = dst_stride[0];
- vp10_build_masked_inter_predictor_complex(xd,
- dst_buf[0], dst_stride[0],
- dst_buf1[0], dst_stride1[0],
- mi_row, mi_col,
- mi_row_top, mi_col_top,
- bsize, top_bsize,
- PARTITION_VERT, 0);
+ vp10_build_masked_inter_predictor_complex(
+ xd, dst_buf[0], dst_stride[0], dst_buf1[0], dst_stride1[0], mi_row,
+ mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_VERT,
+ 0);
} else {
// First half
dec_predict_b_extend(pbi, xd, tile, 0, mi_row, mi_col, mi_row, mi_col,
// Second half
if (mi_col + hbs < cm->mi_cols) {
- dec_predict_b_extend(pbi, xd, tile, 0, mi_row, mi_col + hbs,
- mi_row, mi_col + hbs, mi_row_top, mi_col_top,
- dst_buf1, dst_stride1, top_bsize, subsize, 0, 0);
+ dec_predict_b_extend(pbi, xd, tile, 0, mi_row, mi_col + hbs, mi_row,
+ mi_col + hbs, mi_row_top, mi_col_top, dst_buf1,
+ dst_stride1, top_bsize, subsize, 0, 0);
if (bsize < top_bsize)
- dec_extend_all(pbi, xd, tile, 0, subsize, top_bsize,
- mi_row, mi_col + hbs, mi_row_top, mi_col_top,
- dst_buf1, dst_stride1);
+ dec_extend_all(pbi, xd, tile, 0, subsize, top_bsize, mi_row,
+ mi_col + hbs, mi_row_top, mi_col_top, dst_buf1,
+ dst_stride1);
else
- dec_extend_dir(pbi, xd, tile, 0, subsize, top_bsize,
- mi_row, mi_col + hbs, mi_row_top, mi_col_top,
- dst_buf1, dst_stride1, 2);
+ dec_extend_dir(pbi, xd, tile, 0, subsize, top_bsize, mi_row,
+ mi_col + hbs, mi_row_top, mi_col_top, dst_buf1,
+ dst_stride1, 2);
// Smooth
for (i = 0; i < MAX_MB_PLANE; i++) {
xd->plane[i].dst.stride = dst_stride[i];
vp10_build_masked_inter_predictor_complex(
xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i],
- mi_row, mi_col, mi_row_top, mi_col_top,
- bsize, top_bsize, PARTITION_VERT, i);
+ mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
+ PARTITION_VERT, i);
}
}
}
mi_row_top, mi_col_top, dst_buf3, dst_stride3);
}
} else {
- dec_predict_sb_complex(pbi, xd, tile, mi_row, mi_col,
- mi_row_top, mi_col_top, subsize, top_bsize,
- dst_buf, dst_stride);
+ dec_predict_sb_complex(pbi, xd, tile, mi_row, mi_col, mi_row_top,
+ mi_col_top, subsize, top_bsize, dst_buf,
+ dst_stride);
if (mi_row < cm->mi_rows && mi_col + hbs < cm->mi_cols)
dec_predict_sb_complex(pbi, xd, tile, mi_row, mi_col + hbs,
mi_row_top, mi_col_top, subsize, top_bsize,
mi_row_top, mi_col_top, subsize, top_bsize,
dst_buf3, dst_stride3);
}
- for (i = 0; i < MAX_MB_PLANE; i++) {
- if (bsize == BLOCK_8X8 && i != 0)
- continue; // Skip <4x4 chroma smoothing
- if (mi_row < cm->mi_rows && mi_col + hbs < cm->mi_cols) {
- vp10_build_masked_inter_predictor_complex(xd,
- dst_buf[i], dst_stride[i],
- dst_buf1[i],
- dst_stride1[i],
- mi_row, mi_col,
- mi_row_top, mi_col_top,
- bsize, top_bsize,
- PARTITION_VERT, i);
- if (mi_row + hbs < cm->mi_rows) {
- vp10_build_masked_inter_predictor_complex(xd,
- dst_buf2[i],
- dst_stride2[i],
- dst_buf3[i],
- dst_stride3[i],
- mi_row, mi_col,
- mi_row_top, mi_col_top,
- bsize, top_bsize,
- PARTITION_VERT, i);
- vp10_build_masked_inter_predictor_complex(xd,
- dst_buf[i],
- dst_stride[i],
- dst_buf2[i],
- dst_stride2[i],
- mi_row, mi_col,
- mi_row_top, mi_col_top,
- bsize, top_bsize,
- PARTITION_HORZ, i);
- }
- } else if (mi_row + hbs < cm->mi_rows && mi_col < cm->mi_cols) {
- vp10_build_masked_inter_predictor_complex(xd,
- dst_buf[i],
- dst_stride[i],
- dst_buf2[i],
- dst_stride2[i],
- mi_row, mi_col,
- mi_row_top, mi_col_top,
- bsize, top_bsize,
- PARTITION_HORZ, i);
+ for (i = 0; i < MAX_MB_PLANE; i++) {
+ if (bsize == BLOCK_8X8 && i != 0)
+ continue; // Skip <4x4 chroma smoothing
+ if (mi_row < cm->mi_rows && mi_col + hbs < cm->mi_cols) {
+ vp10_build_masked_inter_predictor_complex(
+ xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i],
+ mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
+ PARTITION_VERT, i);
+ if (mi_row + hbs < cm->mi_rows) {
+ vp10_build_masked_inter_predictor_complex(
+ xd, dst_buf2[i], dst_stride2[i], dst_buf3[i], dst_stride3[i],
+ mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
+ PARTITION_VERT, i);
+ vp10_build_masked_inter_predictor_complex(
+ xd, dst_buf[i], dst_stride[i], dst_buf2[i], dst_stride2[i],
+ mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
+ PARTITION_HORZ, i);
}
+ } else if (mi_row + hbs < cm->mi_rows && mi_col < cm->mi_cols) {
+ vp10_build_masked_inter_predictor_complex(
+ xd, dst_buf[i], dst_stride[i], dst_buf2[i], dst_stride2[i],
+ mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
+ PARTITION_HORZ, i);
}
+ }
break;
#if CONFIG_EXT_PARTITION_TYPES
case PARTITION_HORZ_A:
mi_row_top, mi_col_top, dst_buf, dst_stride);
dec_predict_b_extend(pbi, xd, tile, 0, mi_row, mi_col + hbs, mi_row,
- mi_col + hbs, mi_row_top, mi_col_top,
- dst_buf1, dst_stride1, top_bsize, bsize2, 0, 0);
+ mi_col + hbs, mi_row_top, mi_col_top, dst_buf1,
+ dst_stride1, top_bsize, bsize2, 0, 0);
dec_extend_all(pbi, xd, tile, 0, bsize2, top_bsize, mi_row, mi_col + hbs,
mi_row_top, mi_col_top, dst_buf1, dst_stride1);
- dec_predict_b_extend(pbi, xd, tile, 0, mi_row + hbs, mi_col,
- mi_row + hbs, mi_col, mi_row_top, mi_col_top,
- dst_buf2, dst_stride2, top_bsize, subsize, 0, 0);
+ dec_predict_b_extend(pbi, xd, tile, 0, mi_row + hbs, mi_col, mi_row + hbs,
+ mi_col, mi_row_top, mi_col_top, dst_buf2,
+ dst_stride2, top_bsize, subsize, 0, 0);
if (bsize < top_bsize)
- dec_extend_all(pbi, xd, tile, 0, subsize, top_bsize,
- mi_row + hbs, mi_col, mi_row_top, mi_col_top,
- dst_buf2, dst_stride2);
+ dec_extend_all(pbi, xd, tile, 0, subsize, top_bsize, mi_row + hbs,
+ mi_col, mi_row_top, mi_col_top, dst_buf2, dst_stride2);
else
- dec_extend_dir(pbi, xd, tile, 0, subsize, top_bsize,
- mi_row + hbs, mi_col, mi_row_top, mi_col_top,
- dst_buf2, dst_stride2, 1);
+ dec_extend_dir(pbi, xd, tile, 0, subsize, top_bsize, mi_row + hbs,
+ mi_col, mi_row_top, mi_col_top, dst_buf2, dst_stride2,
+ 1);
for (i = 0; i < MAX_MB_PLANE; i++) {
xd->plane[i].dst.buf = dst_buf[i];
xd->plane[i].dst.stride = dst_stride[i];
- vp10_build_masked_inter_predictor_complex(xd,
- dst_buf[i], dst_stride[i],
- dst_buf1[i], dst_stride1[i],
- mi_row, mi_col,
- mi_row_top, mi_col_top,
- bsize, top_bsize,
- PARTITION_VERT, i);
+ vp10_build_masked_inter_predictor_complex(
+ xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i], mi_row,
+ mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_VERT,
+ i);
}
for (i = 0; i < MAX_MB_PLANE; i++) {
- vp10_build_masked_inter_predictor_complex(xd,
- dst_buf[i], dst_stride[i],
- dst_buf2[i], dst_stride2[i],
- mi_row, mi_col,
- mi_row_top, mi_col_top,
- bsize, top_bsize,
- PARTITION_HORZ, i);
+ vp10_build_masked_inter_predictor_complex(
+ xd, dst_buf[i], dst_stride[i], dst_buf2[i], dst_stride2[i], mi_row,
+ mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_HORZ,
+ i);
}
break;
case PARTITION_VERT_A:
dec_extend_all(pbi, xd, tile, 0, bsize2, top_bsize, mi_row, mi_col,
mi_row_top, mi_col_top, dst_buf, dst_stride);
- dec_predict_b_extend(pbi, xd, tile, 0, mi_row + hbs, mi_col,
- mi_row + hbs, mi_col, mi_row_top, mi_col_top,
- dst_buf1, dst_stride1, top_bsize, bsize2, 0, 0);
+ dec_predict_b_extend(pbi, xd, tile, 0, mi_row + hbs, mi_col, mi_row + hbs,
+ mi_col, mi_row_top, mi_col_top, dst_buf1,
+ dst_stride1, top_bsize, bsize2, 0, 0);
dec_extend_all(pbi, xd, tile, 0, bsize2, top_bsize, mi_row + hbs, mi_col,
mi_row_top, mi_col_top, dst_buf1, dst_stride1);
- dec_predict_b_extend(pbi, xd, tile, 0, mi_row, mi_col + hbs,
- mi_row, mi_col + hbs, mi_row_top, mi_col_top,
- dst_buf2, dst_stride2,
- top_bsize, subsize, 0, 0);
+ dec_predict_b_extend(pbi, xd, tile, 0, mi_row, mi_col + hbs, mi_row,
+ mi_col + hbs, mi_row_top, mi_col_top, dst_buf2,
+ dst_stride2, top_bsize, subsize, 0, 0);
if (bsize < top_bsize)
- dec_extend_all(pbi, xd, tile, 0, subsize, top_bsize,
- mi_row, mi_col + hbs, mi_row_top, mi_col_top,
- dst_buf2, dst_stride2);
+ dec_extend_all(pbi, xd, tile, 0, subsize, top_bsize, mi_row,
+ mi_col + hbs, mi_row_top, mi_col_top, dst_buf2,
+ dst_stride2);
else
- dec_extend_dir(pbi, xd, tile, 0, subsize, top_bsize,
- mi_row, mi_col + hbs, mi_row_top, mi_col_top,
- dst_buf2, dst_stride2, 2);
+ dec_extend_dir(pbi, xd, tile, 0, subsize, top_bsize, mi_row,
+ mi_col + hbs, mi_row_top, mi_col_top, dst_buf2,
+ dst_stride2, 2);
for (i = 0; i < MAX_MB_PLANE; i++) {
xd->plane[i].dst.buf = dst_buf[i];
xd->plane[i].dst.stride = dst_stride[i];
- vp10_build_masked_inter_predictor_complex(xd,
- dst_buf[i], dst_stride[i],
- dst_buf1[i], dst_stride1[i],
- mi_row, mi_col,
- mi_row_top, mi_col_top,
- bsize, top_bsize,
- PARTITION_HORZ, i);
+ vp10_build_masked_inter_predictor_complex(
+ xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i], mi_row,
+ mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_HORZ,
+ i);
}
for (i = 0; i < MAX_MB_PLANE; i++) {
- vp10_build_masked_inter_predictor_complex(xd,
- dst_buf[i], dst_stride[i],
- dst_buf2[i], dst_stride2[i],
- mi_row, mi_col,
- mi_row_top, mi_col_top,
- bsize, top_bsize,
- PARTITION_VERT, i);
+ vp10_build_masked_inter_predictor_complex(
+ xd, dst_buf[i], dst_stride[i], dst_buf2[i], dst_stride2[i], mi_row,
+ mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_VERT,
+ i);
}
break;
case PARTITION_HORZ_B:
mi_row_top, mi_col_top, dst_buf, dst_stride, 0);
dec_predict_b_extend(pbi, xd, tile, 0, mi_row + hbs, mi_col, mi_row + hbs,
- mi_col, mi_row_top, mi_col_top,
- dst_buf1, dst_stride1, top_bsize, bsize2, 0, 0);
+ mi_col, mi_row_top, mi_col_top, dst_buf1,
+ dst_stride1, top_bsize, bsize2, 0, 0);
dec_extend_all(pbi, xd, tile, 0, bsize2, top_bsize, mi_row + hbs, mi_col,
mi_row_top, mi_col_top, dst_buf1, dst_stride1);
dec_predict_b_extend(pbi, xd, tile, 0, mi_row + hbs, mi_col + hbs,
mi_row + hbs, mi_col + hbs, mi_row_top, mi_col_top,
dst_buf2, dst_stride2, top_bsize, bsize2, 0, 0);
- dec_extend_all(pbi, xd, tile, 0, bsize2, top_bsize,
- mi_row + hbs, mi_col + hbs,
- mi_row_top, mi_col_top, dst_buf2, dst_stride2);
+ dec_extend_all(pbi, xd, tile, 0, bsize2, top_bsize, mi_row + hbs,
+ mi_col + hbs, mi_row_top, mi_col_top, dst_buf2,
+ dst_stride2);
for (i = 0; i < MAX_MB_PLANE; i++) {
xd->plane[i].dst.buf = dst_buf1[i];
xd->plane[i].dst.stride = dst_stride1[i];
- vp10_build_masked_inter_predictor_complex(xd,
- dst_buf1[i], dst_stride1[i],
- dst_buf2[i], dst_stride2[i],
- mi_row, mi_col,
- mi_row_top, mi_col_top,
- bsize, top_bsize,
- PARTITION_VERT, i);
+ vp10_build_masked_inter_predictor_complex(
+ xd, dst_buf1[i], dst_stride1[i], dst_buf2[i], dst_stride2[i],
+ mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
+ PARTITION_VERT, i);
}
for (i = 0; i < MAX_MB_PLANE; i++) {
xd->plane[i].dst.buf = dst_buf[i];
xd->plane[i].dst.stride = dst_stride[i];
- vp10_build_masked_inter_predictor_complex(xd,
- dst_buf[i], dst_stride[i],
- dst_buf1[i], dst_stride1[i],
- mi_row, mi_col,
- mi_row_top, mi_col_top,
- bsize, top_bsize,
- PARTITION_HORZ, i);
+ vp10_build_masked_inter_predictor_complex(
+ xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i], mi_row,
+ mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_HORZ,
+ i);
}
break;
case PARTITION_VERT_B:
dec_extend_dir(pbi, xd, tile, 0, subsize, top_bsize, mi_row, mi_col,
mi_row_top, mi_col_top, dst_buf, dst_stride, 3);
- dec_predict_b_extend(pbi, xd, tile, 0, mi_row, mi_col + hbs,
- mi_row, mi_col + hbs, mi_row_top, mi_col_top,
- dst_buf1, dst_stride1, top_bsize, bsize2, 0, 0);
+ dec_predict_b_extend(pbi, xd, tile, 0, mi_row, mi_col + hbs, mi_row,
+ mi_col + hbs, mi_row_top, mi_col_top, dst_buf1,
+ dst_stride1, top_bsize, bsize2, 0, 0);
dec_extend_all(pbi, xd, tile, 0, bsize2, top_bsize, mi_row, mi_col + hbs,
mi_row_top, mi_col_top, dst_buf1, dst_stride1);
dec_predict_b_extend(pbi, xd, tile, 0, mi_row + hbs, mi_col + hbs,
mi_row + hbs, mi_col + hbs, mi_row_top, mi_col_top,
dst_buf2, dst_stride2, top_bsize, bsize2, 0, 0);
- dec_extend_all(pbi, xd, tile, 0, bsize2, top_bsize,
- mi_row + hbs, mi_col + hbs,
- mi_row_top, mi_col_top, dst_buf2, dst_stride2);
+ dec_extend_all(pbi, xd, tile, 0, bsize2, top_bsize, mi_row + hbs,
+ mi_col + hbs, mi_row_top, mi_col_top, dst_buf2,
+ dst_stride2);
for (i = 0; i < MAX_MB_PLANE; i++) {
xd->plane[i].dst.buf = dst_buf1[i];
xd->plane[i].dst.stride = dst_stride1[i];
- vp10_build_masked_inter_predictor_complex(xd,
- dst_buf1[i], dst_stride1[i],
- dst_buf2[i], dst_stride2[i],
- mi_row, mi_col,
- mi_row_top, mi_col_top,
- bsize, top_bsize,
- PARTITION_HORZ, i);
+ vp10_build_masked_inter_predictor_complex(
+ xd, dst_buf1[i], dst_stride1[i], dst_buf2[i], dst_stride2[i],
+ mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
+ PARTITION_HORZ, i);
}
for (i = 0; i < MAX_MB_PLANE; i++) {
xd->plane[i].dst.buf = dst_buf[i];
xd->plane[i].dst.stride = dst_stride[i];
- vp10_build_masked_inter_predictor_complex(xd,
- dst_buf[i], dst_stride[i],
- dst_buf1[i], dst_stride1[i],
- mi_row, mi_col,
- mi_row_top, mi_col_top,
- bsize, top_bsize,
- PARTITION_VERT, i);
+ vp10_build_masked_inter_predictor_complex(
+ xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i], mi_row,
+ mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_VERT,
+ i);
}
break;
#endif // CONFIG_EXT_PARTITION_TYPES
- default:
- assert(0);
+ default: assert(0);
}
}
seg_id_supertx = 0;
} else {
// Find the minimum segment_id
- for (r = 0 ; r < mih ; r++)
- for (c = 0 ; c < miw ; c++)
- seg_id_supertx = VPXMIN(mip[r * cm->mi_stride + c]->mbmi.segment_id,
- seg_id_supertx);
+ for (r = 0; r < mih; r++)
+ for (c = 0; c < miw; c++)
+ seg_id_supertx =
+ VPXMIN(mip[r * cm->mi_stride + c]->mbmi.segment_id, seg_id_supertx);
assert(0 <= seg_id_supertx && seg_id_supertx < MAX_SEGMENTS);
}
// Assign the the segment_id back to segment_id_supertx
- for (r = 0 ; r < mih ; r++)
- for (c = 0 ; c < miw ; c++)
+ for (r = 0; r < mih; r++)
+ for (c = 0; c < miw; c++)
mip[r * cm->mi_stride + c]->mbmi.segment_id_supertx = seg_id_supertx;
}
#endif // CONFIG_SUPERTX
#if CONFIG_SUPERTX
int supertx_enabled,
#endif // CONFIG_SUPERTX
- int mi_row, int mi_col,
- vp10_reader *r,
+ int mi_row, int mi_col, vp10_reader *r,
#if CONFIG_EXT_PARTITION_TYPES
PARTITION_TYPE partition,
#endif // CONFIG_EXT_PARTITION_TYPES
- BLOCK_SIZE bsize,
- int bwl, int bhl) {
+ BLOCK_SIZE bsize, int bwl, int bhl) {
VP10_COMMON *const cm = &pbi->common;
const int less8x8 = bsize < BLOCK_8X8;
const int bw = 1 << (bwl - 1);
#if CONFIG_SUPERTX
MB_MODE_INFO *mbmi;
if (supertx_enabled) {
- mbmi = set_mb_offsets(cm, xd, bsize, mi_row, mi_col,
- bw, bh, x_mis, y_mis);
+ mbmi = set_mb_offsets(cm, xd, bsize, mi_row, mi_col, bw, bh, x_mis, y_mis);
} else {
- mbmi = set_offsets(cm, xd, bsize, mi_row, mi_col,
- bw, bh, x_mis, y_mis, bwl, bhl);
+ mbmi = set_offsets(cm, xd, bsize, mi_row, mi_col, bw, bh, x_mis, y_mis, bwl,
+ bhl);
}
#if CONFIG_EXT_PARTITION_TYPES
xd->mi[0]->mbmi.partition = partition;
#endif
- vp10_read_mode_info(pbi, xd, supertx_enabled,
- mi_row, mi_col, r, x_mis, y_mis);
+ vp10_read_mode_info(pbi, xd, supertx_enabled, mi_row, mi_col, r, x_mis,
+ y_mis);
#else
- MB_MODE_INFO *mbmi = set_offsets(cm, xd, bsize, mi_row, mi_col,
- bw, bh, x_mis, y_mis, bwl, bhl);
+ MB_MODE_INFO *mbmi = set_offsets(cm, xd, bsize, mi_row, mi_col, bw, bh, x_mis,
+ y_mis, bwl, bhl);
#if CONFIG_EXT_PARTITION_TYPES
xd->mi[0]->mbmi.partition = partition;
#endif
const BLOCK_SIZE uv_subsize =
ss_size_lookup[bsize][cm->subsampling_x][cm->subsampling_y];
if (uv_subsize == BLOCK_INVALID)
- vpx_internal_error(xd->error_info,
- VPX_CODEC_CORRUPT_FRAME, "Invalid block size.");
+ vpx_internal_error(xd->error_info, VPX_CODEC_CORRUPT_FRAME,
+ "Invalid block size.");
}
#if CONFIG_SUPERTX
const struct macroblockd_plane *const pd = &xd->plane[plane];
const TX_SIZE tx_size =
plane ? dec_get_uv_tx_size(mbmi, pd->n4_wl, pd->n4_hl)
- : mbmi->tx_size;
+ : mbmi->tx_size;
const int num_4x4_w = pd->n4_w;
const int num_4x4_h = pd->n4_h;
const int stepr = num_4x4_blocks_high_txsize_lookup[tx_size];
const int stepc = num_4x4_blocks_wide_txsize_lookup[tx_size];
int row, col;
- const int max_blocks_wide = num_4x4_w +
- (xd->mb_to_right_edge >= 0 ?
- 0 : xd->mb_to_right_edge >> (5 + pd->subsampling_x));
- const int max_blocks_high = num_4x4_h +
- (xd->mb_to_bottom_edge >= 0 ?
- 0 : xd->mb_to_bottom_edge >> (5 + pd->subsampling_y));
+ const int max_blocks_wide =
+ num_4x4_w + (xd->mb_to_right_edge >= 0
+ ? 0
+ : xd->mb_to_right_edge >> (5 + pd->subsampling_x));
+ const int max_blocks_high =
+ num_4x4_h + (xd->mb_to_bottom_edge >= 0
+ ? 0
+ : xd->mb_to_bottom_edge >> (5 + pd->subsampling_y));
for (row = 0; row < max_blocks_high; row += stepr)
for (col = 0; col < max_blocks_wide; col += stepc)
- predict_and_reconstruct_intra_block(xd,
- r,
- mbmi, plane,
- row, col, tx_size);
+ predict_and_reconstruct_intra_block(xd, r, mbmi, plane, row, col,
+ tx_size);
}
} else {
// Prediction
#if CONFIG_OBMC
if (mbmi->motion_variation == OBMC_CAUSAL) {
#if CONFIG_VP9_HIGHBITDEPTH
- DECLARE_ALIGNED(16, uint8_t,
- tmp_buf1[2 * MAX_MB_PLANE * MAX_SB_SQUARE]);
- DECLARE_ALIGNED(16, uint8_t,
- tmp_buf2[2 * MAX_MB_PLANE * MAX_SB_SQUARE]);
+ DECLARE_ALIGNED(16, uint8_t, tmp_buf1[2 * MAX_MB_PLANE * MAX_SB_SQUARE]);
+ DECLARE_ALIGNED(16, uint8_t, tmp_buf2[2 * MAX_MB_PLANE * MAX_SB_SQUARE]);
#else
- DECLARE_ALIGNED(16, uint8_t,
- tmp_buf1[MAX_MB_PLANE * MAX_SB_SQUARE]);
- DECLARE_ALIGNED(16, uint8_t,
- tmp_buf2[MAX_MB_PLANE * MAX_SB_SQUARE]);
+ DECLARE_ALIGNED(16, uint8_t, tmp_buf1[MAX_MB_PLANE * MAX_SB_SQUARE]);
+ DECLARE_ALIGNED(16, uint8_t, tmp_buf2[MAX_MB_PLANE * MAX_SB_SQUARE]);
#endif // CONFIG_VP9_HIGHBITDEPTH
uint8_t *dst_buf1[MAX_MB_PLANE], *dst_buf2[MAX_MB_PLANE];
- int dst_width1[MAX_MB_PLANE] = {MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE};
- int dst_width2[MAX_MB_PLANE] = {MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE};
- int dst_height1[MAX_MB_PLANE] = {MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE};
- int dst_height2[MAX_MB_PLANE] = {MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE};
- int dst_stride1[MAX_MB_PLANE] = {MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE};
- int dst_stride2[MAX_MB_PLANE] = {MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE};
+ int dst_width1[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE };
+ int dst_width2[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE };
+ int dst_height1[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE };
+ int dst_height2[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE };
+ int dst_stride1[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE };
+ int dst_stride2[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE };
assert(mbmi->sb_type >= BLOCK_8X8);
#if CONFIG_VP9_HIGHBITDEPTH
#if CONFIG_VP9_HIGHBITDEPTH
}
#endif // CONFIG_VP9_HIGHBITDEPTH
- vp10_build_prediction_by_above_preds(cm, xd, mi_row, mi_col,
- dst_buf1, dst_width1,
- dst_height1, dst_stride1);
- vp10_build_prediction_by_left_preds(cm, xd, mi_row, mi_col,
- dst_buf2, dst_width2,
- dst_height2, dst_stride2);
- vp10_setup_dst_planes(xd->plane, get_frame_new_buffer(cm),
- mi_row, mi_col);
- vp10_build_obmc_inter_prediction(cm, xd, mi_row, mi_col,
- dst_buf1, dst_stride1,
- dst_buf2, dst_stride2);
+ vp10_build_prediction_by_above_preds(cm, xd, mi_row, mi_col, dst_buf1,
+ dst_width1, dst_height1,
+ dst_stride1);
+ vp10_build_prediction_by_left_preds(cm, xd, mi_row, mi_col, dst_buf2,
+ dst_width2, dst_height2, dst_stride2);
+ vp10_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row,
+ mi_col);
+ vp10_build_obmc_inter_prediction(cm, xd, mi_row, mi_col, dst_buf1,
+ dst_stride1, dst_buf2, dst_stride2);
}
#endif // CONFIG_OBMC
int row, col;
#if CONFIG_VAR_TX
// TODO(jingning): This can be simplified for decoder performance.
- const BLOCK_SIZE plane_bsize = get_plane_block_size(
- VPXMAX(bsize, BLOCK_8X8), pd);
+ const BLOCK_SIZE plane_bsize =
+ get_plane_block_size(VPXMAX(bsize, BLOCK_8X8), pd);
#if CONFIG_EXT_TX && CONFIG_RECT_TX
- const TX_SIZE max_tx_size = plane ?
- max_txsize_lookup[plane_bsize] : max_txsize_rect_lookup[plane_bsize];
+ const TX_SIZE max_tx_size = plane ? max_txsize_lookup[plane_bsize]
+ : max_txsize_rect_lookup[plane_bsize];
#else
const TX_SIZE max_tx_size = max_txsize_lookup[plane_bsize];
#endif // CONFIG_EXT_TX && CONFIG_RECT_TX
for (row = 0; row < num_4x4_h; row += bh) {
for (col = 0; col < num_4x4_w; col += bw) {
- decode_reconstruct_tx(xd, r, mbmi, plane, plane_bsize,
- block, row, col, max_tx_size, &eobtotal);
+ decode_reconstruct_tx(xd, r, mbmi, plane, plane_bsize, block, row,
+ col, max_tx_size, &eobtotal);
block += step;
}
}
#else
const TX_SIZE tx_size =
plane ? dec_get_uv_tx_size(mbmi, pd->n4_wl, pd->n4_hl)
- : mbmi->tx_size;
+ : mbmi->tx_size;
const int stepr = num_4x4_blocks_high_txsize_lookup[tx_size];
const int stepc = num_4x4_blocks_wide_txsize_lookup[tx_size];
- const int max_blocks_wide = num_4x4_w +
- (xd->mb_to_right_edge >= 0 ?
- 0 : xd->mb_to_right_edge >> (5 + pd->subsampling_x));
- const int max_blocks_high = num_4x4_h +
- (xd->mb_to_bottom_edge >= 0 ?
- 0 : xd->mb_to_bottom_edge >> (5 + pd->subsampling_y));
+ const int max_blocks_wide =
+ num_4x4_w + (xd->mb_to_right_edge >= 0
+ ? 0
+ : xd->mb_to_right_edge >> (5 + pd->subsampling_x));
+ const int max_blocks_high =
+ num_4x4_h +
+ (xd->mb_to_bottom_edge >= 0 ? 0 : xd->mb_to_bottom_edge >>
+ (5 + pd->subsampling_y));
for (row = 0; row < max_blocks_high; row += stepr)
for (col = 0; col < max_blocks_wide; col += stepc)
- eobtotal += reconstruct_inter_block(xd,
- r,
- mbmi->segment_id,
- plane, row, col,
- tx_size);
+ eobtotal += reconstruct_inter_block(xd, r, mbmi->segment_id, plane,
+ row, col, tx_size);
#endif
}
xd->corrupted |= vp10_reader_has_error(r);
}
-static INLINE int dec_partition_plane_context(const MACROBLOCKD *xd,
- int mi_row, int mi_col,
- int bsl) {
+static INLINE int dec_partition_plane_context(const MACROBLOCKD *xd, int mi_row,
+ int mi_col, int bsl) {
const PARTITION_CONTEXT *above_ctx = xd->above_seg_context + mi_col;
const PARTITION_CONTEXT *left_ctx =
- xd->left_seg_context + (mi_row & MAX_MIB_MASK);
- int above = (*above_ctx >> bsl) & 1 , left = (*left_ctx >> bsl) & 1;
+ xd->left_seg_context + (mi_row & MAX_MIB_MASK);
+ int above = (*above_ctx >> bsl) & 1, left = (*left_ctx >> bsl) & 1;
-// assert(bsl >= 0);
+ // assert(bsl >= 0);
return (left * 2 + above) + bsl * PARTITION_PLOFFSET;
}
#if !CONFIG_EXT_PARTITION_TYPES
-static INLINE void dec_update_partition_context(MACROBLOCKD *xd,
- int mi_row, int mi_col,
- BLOCK_SIZE subsize,
+static INLINE void dec_update_partition_context(MACROBLOCKD *xd, int mi_row,
+ int mi_col, BLOCK_SIZE subsize,
int bw) {
PARTITION_CONTEXT *const above_ctx = xd->above_seg_context + mi_col;
PARTITION_CONTEXT *const left_ctx =
- xd->left_seg_context + (mi_row & MAX_MIB_MASK);
+ xd->left_seg_context + (mi_row & MAX_MIB_MASK);
// update the partition context at the end notes. set partition bits
// of block sizes larger than the current one to be one, and partition
else
p = PARTITION_SPLIT;
- if (counts)
- ++counts->partition[ctx][p];
+ if (counts) ++counts->partition[ctx][p];
return p;
}
#if CONFIG_SUPERTX
-static int read_skip(VP10_COMMON *cm, const MACROBLOCKD *xd,
- int segment_id, vp10_reader *r) {
+static int read_skip(VP10_COMMON *cm, const MACROBLOCKD *xd, int segment_id,
+ vp10_reader *r) {
if (segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)) {
return 1;
} else {
const int ctx = vp10_get_skip_context(xd);
const int skip = vp10_read(r, cm->fc->skip_probs[ctx]);
FRAME_COUNTS *counts = xd->counts;
- if (counts)
- ++counts->skip[ctx][skip];
+ if (counts) ++counts->skip[ctx][skip];
return skip;
}
}
#if CONFIG_SUPERTX
int supertx_enabled,
#endif
- int mi_row, int mi_col,
- vp10_reader* r,
+ int mi_row, int mi_col, vp10_reader *r,
BLOCK_SIZE bsize, int n4x4_l2) {
VP10_COMMON *const cm = &pbi->common;
const int n8x8_l2 = n4x4_l2 - 1;
int txfm = DCT_DCT;
#endif // CONFIG_SUPERTX
- if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
- return;
+ if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
partition = read_partition(cm, xd, mi_row, mi_col, r, has_rows, has_cols,
#if CONFIG_EXT_PARTITION_TYPES
n8x8_l2);
subsize = subsize_lookup[partition][bsize]; // get_subsize(bsize, partition);
#if CONFIG_SUPERTX
- if (!frame_is_intra_only(cm) &&
- partition != PARTITION_NONE &&
- bsize <= MAX_SUPERTX_BLOCK_SIZE &&
- !supertx_enabled &&
- !xd->lossless[0]) {
- const int supertx_context =
- partition_supertx_context_lookup[partition];
- supertx_enabled = vp10_read(
- r, cm->fc->supertx_prob[supertx_context][supertx_size]);
+ if (!frame_is_intra_only(cm) && partition != PARTITION_NONE &&
+ bsize <= MAX_SUPERTX_BLOCK_SIZE && !supertx_enabled && !xd->lossless[0]) {
+ const int supertx_context = partition_supertx_context_lookup[partition];
+ supertx_enabled =
+ vp10_read(r, cm->fc->supertx_prob[supertx_context][supertx_size]);
if (xd->counts)
xd->counts->supertx[supertx_context][supertx_size][supertx_enabled]++;
#if CONFIG_VAR_TX
- if (supertx_enabled)
- xd->supertx_size = supertx_size;
+ if (supertx_enabled) xd->supertx_size = supertx_size;
#endif
}
#endif // CONFIG_SUPERTX
#endif // CONFIG_SUPERTX
mi_row, mi_col, r,
#if CONFIG_EXT_PARTITION_TYPES
- partition,
+ partition,
#endif // CONFIG_EXT_PARTITION_TYPES
subsize, n4x4_l2, n4x4_l2);
break;
#endif // CONFIG_SUPERTX
mi_row, mi_col, r,
#if CONFIG_EXT_PARTITION_TYPES
- partition,
+ partition,
#endif // CONFIG_EXT_PARTITION_TYPES
subsize, n4x4_l2, n8x8_l2);
if (has_rows)
#endif // CONFIG_SUPERTX
mi_row + hbs, mi_col, r,
#if CONFIG_EXT_PARTITION_TYPES
- partition,
+ partition,
#endif // CONFIG_EXT_PARTITION_TYPES
subsize, n4x4_l2, n8x8_l2);
break;
#endif // CONFIG_SUPERTX
mi_row, mi_col, r,
#if CONFIG_EXT_PARTITION_TYPES
- partition,
+ partition,
#endif // CONFIG_EXT_PARTITION_TYPES
subsize, n8x8_l2, n4x4_l2);
if (has_cols)
#endif // CONFIG_SUPERTX
mi_row, mi_col + hbs, r,
#if CONFIG_EXT_PARTITION_TYPES
- partition,
+ partition,
#endif // CONFIG_EXT_PARTITION_TYPES
subsize, n8x8_l2, n4x4_l2);
break;
#if CONFIG_SUPERTX
supertx_enabled,
#endif // CONFIG_SUPERTX
- mi_row, mi_col, r,
- subsize, n8x8_l2);
+ mi_row, mi_col, r, subsize, n8x8_l2);
decode_partition(pbi, xd,
#if CONFIG_SUPERTX
supertx_enabled,
#endif // CONFIG_SUPERTX
- mi_row, mi_col + hbs, r,
- subsize, n8x8_l2);
+ mi_row, mi_col + hbs, r, subsize, n8x8_l2);
decode_partition(pbi, xd,
#if CONFIG_SUPERTX
supertx_enabled,
#endif // CONFIG_SUPERTX
- mi_row + hbs, mi_col, r,
- subsize, n8x8_l2);
+ mi_row + hbs, mi_col, r, subsize, n8x8_l2);
decode_partition(pbi, xd,
#if CONFIG_SUPERTX
supertx_enabled,
#endif // CONFIG_SUPERTX
- mi_row + hbs, mi_col + hbs, r,
- subsize, n8x8_l2);
+ mi_row + hbs, mi_col + hbs, r, subsize, n8x8_l2);
break;
#if CONFIG_EXT_PARTITION_TYPES
case PARTITION_HORZ_A:
#if CONFIG_SUPERTX
supertx_enabled,
#endif
- mi_row, mi_col, r,
- partition, bsize2, n8x8_l2, n8x8_l2);
+ mi_row, mi_col, r, partition, bsize2, n8x8_l2, n8x8_l2);
decode_block(pbi, xd,
#if CONFIG_SUPERTX
supertx_enabled,
#endif
- mi_row, mi_col + hbs, r,
- partition, bsize2, n8x8_l2, n8x8_l2);
+ mi_row, mi_col + hbs, r, partition, bsize2, n8x8_l2,
+ n8x8_l2);
decode_block(pbi, xd,
#if CONFIG_SUPERTX
supertx_enabled,
#endif
- mi_row + hbs, mi_col, r,
- partition, subsize, n4x4_l2, n8x8_l2);
+ mi_row + hbs, mi_col, r, partition, subsize, n4x4_l2,
+ n8x8_l2);
break;
case PARTITION_HORZ_B:
decode_block(pbi, xd,
#if CONFIG_SUPERTX
supertx_enabled,
#endif
- mi_row, mi_col, r,
- partition, subsize, n4x4_l2, n8x8_l2);
+ mi_row, mi_col, r, partition, subsize, n4x4_l2, n8x8_l2);
decode_block(pbi, xd,
#if CONFIG_SUPERTX
supertx_enabled,
#endif
- mi_row + hbs, mi_col, r,
- partition, bsize2, n8x8_l2, n8x8_l2);
+ mi_row + hbs, mi_col, r, partition, bsize2, n8x8_l2,
+ n8x8_l2);
decode_block(pbi, xd,
#if CONFIG_SUPERTX
supertx_enabled,
#endif
- mi_row + hbs, mi_col + hbs, r,
- partition, bsize2, n8x8_l2, n8x8_l2);
+ mi_row + hbs, mi_col + hbs, r, partition, bsize2, n8x8_l2,
+ n8x8_l2);
break;
case PARTITION_VERT_A:
decode_block(pbi, xd,
#if CONFIG_SUPERTX
supertx_enabled,
#endif
- mi_row, mi_col, r,
- partition, bsize2, n8x8_l2, n8x8_l2);
+ mi_row, mi_col, r, partition, bsize2, n8x8_l2, n8x8_l2);
decode_block(pbi, xd,
#if CONFIG_SUPERTX
supertx_enabled,
#endif
- mi_row + hbs, mi_col, r,
- partition, bsize2, n8x8_l2, n8x8_l2);
+ mi_row + hbs, mi_col, r, partition, bsize2, n8x8_l2,
+ n8x8_l2);
decode_block(pbi, xd,
#if CONFIG_SUPERTX
supertx_enabled,
#endif
- mi_row, mi_col + hbs, r,
- partition, subsize, n8x8_l2, n4x4_l2);
+ mi_row, mi_col + hbs, r, partition, subsize, n8x8_l2,
+ n4x4_l2);
break;
case PARTITION_VERT_B:
decode_block(pbi, xd,
#if CONFIG_SUPERTX
supertx_enabled,
#endif
- mi_row, mi_col, r,
- partition, subsize, n8x8_l2, n4x4_l2);
+ mi_row, mi_col, r, partition, subsize, n8x8_l2, n4x4_l2);
decode_block(pbi, xd,
#if CONFIG_SUPERTX
supertx_enabled,
#endif
- mi_row, mi_col + hbs, r,
- partition, bsize2, n8x8_l2, n8x8_l2);
+ mi_row, mi_col + hbs, r, partition, bsize2, n8x8_l2,
+ n8x8_l2);
decode_block(pbi, xd,
#if CONFIG_SUPERTX
supertx_enabled,
#endif
- mi_row + hbs, mi_col + hbs, r,
- partition, bsize2, n8x8_l2, n8x8_l2);
+ mi_row + hbs, mi_col + hbs, r, partition, bsize2, n8x8_l2,
+ n8x8_l2);
break;
#endif
- default:
- assert(0 && "Invalid partition type");
+ default: assert(0 && "Invalid partition type");
}
}
xd->mi = cm->mi_grid_visible + offset;
xd->mi[0] = cm->mi + offset;
- set_mi_row_col(xd, tile, mi_row, num_8x8_blocks_high_lookup[bsize],
- mi_col, num_8x8_blocks_wide_lookup[bsize],
- cm->mi_rows, cm->mi_cols);
+ set_mi_row_col(xd, tile, mi_row, num_8x8_blocks_high_lookup[bsize], mi_col,
+ num_8x8_blocks_wide_lookup[bsize], cm->mi_rows, cm->mi_cols);
set_skip_context(xd, mi_row, mi_col);
skip = read_skip(cm, xd, xd->mi[0]->mbmi.segment_id_supertx, r);
if (skip) {
int eset = get_ext_tx_set(supertx_size, bsize, 1);
if (eset > 0) {
txfm = vp10_read_tree(r, vp10_ext_tx_inter_tree[eset],
- cm->fc->inter_ext_tx_prob[eset][supertx_size]);
- if (xd->counts)
- ++xd->counts->inter_ext_tx[eset][supertx_size][txfm];
+ cm->fc->inter_ext_tx_prob[eset][supertx_size]);
+ if (xd->counts) ++xd->counts->inter_ext_tx[eset][supertx_size][txfm];
}
}
#else
if (supertx_size < TX_32X32) {
txfm = vp10_read_tree(r, vp10_ext_tx_tree,
- cm->fc->inter_ext_tx_prob[supertx_size]);
- if (xd->counts)
- ++xd->counts->inter_ext_tx[supertx_size][txfm];
+ cm->fc->inter_ext_tx_prob[supertx_size]);
+ if (xd->counts) ++xd->counts->inter_ext_tx[supertx_size][txfm];
}
#endif // CONFIG_EXT_TX
}
-
vp10_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
for (i = 0; i < MAX_MB_PLANE; i++) {
dst_buf[i] = xd->plane[i].dst.buf;
dst_stride[i] = xd->plane[i].dst.stride;
}
- dec_predict_sb_complex(pbi, xd, tile, mi_row, mi_col, mi_row, mi_col,
- bsize, bsize, dst_buf, dst_stride);
+ dec_predict_sb_complex(pbi, xd, tile, mi_row, mi_col, mi_row, mi_col, bsize,
+ bsize, dst_buf, dst_stride);
if (!skip) {
int eobtotal = 0;
const int num_4x4_h = pd->n4_h;
int row, col;
const TX_SIZE tx_size =
- i ? dec_get_uv_tx_size(mbmi, pd->n4_wl, pd->n4_hl)
- : mbmi->tx_size;
+ i ? dec_get_uv_tx_size(mbmi, pd->n4_wl, pd->n4_hl) : mbmi->tx_size;
const int stepr = num_4x4_blocks_high_txsize_lookup[tx_size];
const int stepc = num_4x4_blocks_wide_txsize_lookup[tx_size];
- const int max_blocks_wide = num_4x4_w +
- (xd->mb_to_right_edge >= 0 ?
- 0 : xd->mb_to_right_edge >> (5 + pd->subsampling_x));
- const int max_blocks_high = num_4x4_h +
- (xd->mb_to_bottom_edge >= 0 ?
- 0 : xd->mb_to_bottom_edge >> (5 + pd->subsampling_y));
+ const int max_blocks_wide =
+ num_4x4_w + (xd->mb_to_right_edge >= 0
+ ? 0
+ : xd->mb_to_right_edge >> (5 + pd->subsampling_x));
+ const int max_blocks_high =
+ num_4x4_h +
+ (xd->mb_to_bottom_edge >= 0 ? 0 : xd->mb_to_bottom_edge >>
+ (5 + pd->subsampling_y));
for (row = 0; row < max_blocks_high; row += stepr)
for (col = 0; col < max_blocks_wide; col += stepc)
- eobtotal += reconstruct_inter_block(xd,
- r,
- mbmi->segment_id_supertx,
- i, row, col,
- tx_size);
+ eobtotal += reconstruct_inter_block(xd, r, mbmi->segment_id_supertx,
+ i, row, col, tx_size);
}
- if (!(subsize < BLOCK_8X8) && eobtotal == 0)
- skip = 1;
+ if (!(subsize < BLOCK_8X8) && eobtotal == 0) skip = 1;
}
set_param_topblock(cm, xd, bsize, mi_row, mi_col, txfm, skip);
}
if (bsize >= BLOCK_8X8) {
switch (partition) {
case PARTITION_SPLIT:
- if (bsize > BLOCK_8X8)
- break;
+ if (bsize > BLOCK_8X8) break;
case PARTITION_NONE:
case PARTITION_HORZ:
case PARTITION_VERT:
update_partition_context(xd, mi_row, mi_col, subsize, subsize);
update_partition_context(xd, mi_row, mi_col + hbs, bsize2, subsize);
break;
- default:
- assert(0 && "Invalid partition type");
+ default: assert(0 && "Invalid partition type");
}
}
#else
}
#if !CONFIG_ANS
-static void setup_bool_decoder(const uint8_t *data,
- const uint8_t *data_end,
+static void setup_bool_decoder(const uint8_t *data, const uint8_t *data_end,
const size_t read_size,
struct vpx_internal_error_info *error_info,
- vp10_reader *r,
- vpx_decrypt_cb decrypt_cb,
+ vp10_reader *r, vpx_decrypt_cb decrypt_cb,
void *decrypt_state) {
// Validate the calculated partition length. If the buffer
// described by the partition can't be fully read, then restrict
"Failed to allocate bool decoder %d", 1);
}
#else
-static void setup_token_decoder(const uint8_t *data,
- const uint8_t *data_end,
+static void setup_token_decoder(const uint8_t *data, const uint8_t *data_end,
const size_t read_size,
struct vpx_internal_error_info *error_info,
struct AnsDecoder *const ans,
vpx_decrypt_cb decrypt_cb,
void *decrypt_state) {
- (void) decrypt_cb;
- (void) decrypt_state;
+ (void)decrypt_cb;
+ (void)decrypt_state;
// Validate the calculated partition length. If the buffer
// described by the partition can't be fully read, then restrict
// it to the portion that can be (for EC mode) or throw an error.
static void read_coef_probs(FRAME_CONTEXT *fc, TX_MODE tx_mode,
vp10_reader *r) {
- const TX_SIZE max_tx_size = tx_mode_to_biggest_tx_size[tx_mode];
- TX_SIZE tx_size;
- for (tx_size = TX_4X4; tx_size <= max_tx_size; ++tx_size)
- read_coef_probs_common(fc->coef_probs[tx_size], r);
+ const TX_SIZE max_tx_size = tx_mode_to_biggest_tx_size[tx_mode];
+ TX_SIZE tx_size;
+ for (tx_size = TX_4X4; tx_size <= max_tx_size; ++tx_size)
+ read_coef_probs_common(fc->coef_probs[tx_size], r);
#if CONFIG_ANS
- vp10_coef_pareto_cdfs(fc);
+ vp10_coef_pareto_cdfs(fc);
#endif // CONFIG_ANS
}
seg->update_data = 0;
seg->enabled = vpx_rb_read_bit(rb);
- if (!seg->enabled)
- return;
+ if (!seg->enabled) return;
// Segmentation map update
if (frame_is_intra_only(cm) || cm->error_resilient_mode) {
}
#if CONFIG_LOOP_RESTORATION
-static void setup_restoration(VP10_COMMON *cm,
- struct vpx_read_bit_buffer *rb) {
+static void setup_restoration(VP10_COMMON *cm, struct vpx_read_bit_buffer *rb) {
RestorationInfo *rst = &cm->rst_info;
if (vpx_rb_read_bit(rb)) {
if (vpx_rb_read_bit(rb)) {
} else {
rst->restoration_type = RESTORE_WIENER;
rst->vfilter[0] = vpx_rb_read_literal(rb, WIENER_FILT_TAP0_BITS) +
- WIENER_FILT_TAP0_MINV;
+ WIENER_FILT_TAP0_MINV;
rst->vfilter[1] = vpx_rb_read_literal(rb, WIENER_FILT_TAP1_BITS) +
- WIENER_FILT_TAP1_MINV;
+ WIENER_FILT_TAP1_MINV;
rst->vfilter[2] = vpx_rb_read_literal(rb, WIENER_FILT_TAP2_BITS) +
- WIENER_FILT_TAP2_MINV;
+ WIENER_FILT_TAP2_MINV;
rst->hfilter[0] = vpx_rb_read_literal(rb, WIENER_FILT_TAP0_BITS) +
- WIENER_FILT_TAP0_MINV;
+ WIENER_FILT_TAP0_MINV;
rst->hfilter[1] = vpx_rb_read_literal(rb, WIENER_FILT_TAP1_BITS) +
- WIENER_FILT_TAP1_MINV;
+ WIENER_FILT_TAP1_MINV;
rst->hfilter[2] = vpx_rb_read_literal(rb, WIENER_FILT_TAP2_BITS) +
- WIENER_FILT_TAP2_MINV;
+ WIENER_FILT_TAP2_MINV;
}
} else {
rst->restoration_type = RESTORE_NONE;
}
#endif // CONFIG_LOOP_RESTORATION
-static void setup_loopfilter(VP10_COMMON *cm,
- struct vpx_read_bit_buffer *rb) {
+static void setup_loopfilter(VP10_COMMON *cm, struct vpx_read_bit_buffer *rb) {
struct loopfilter *lf = &cm->lf;
lf->filter_level = vpx_rb_read_literal(rb, 6);
lf->sharpness_level = vpx_rb_read_literal(rb, 3);
}
static INLINE int read_delta_q(struct vpx_read_bit_buffer *rb) {
- return vpx_rb_read_bit(rb) ?
- vpx_rb_read_inv_signed_literal(rb, 6) : 0;
+ return vpx_rb_read_bit(rb) ? vpx_rb_read_inv_signed_literal(rb, 6) : 0;
}
static void setup_quantization(VP10_COMMON *const cm,
}
static void setup_segmentation_dequant(VP10_COMMON *const cm) {
- // Build y/uv dequant values based on segmentation.
+// Build y/uv dequant values based on segmentation.
#if CONFIG_NEW_QUANT
int b;
int dq;
int i;
for (i = 0; i < MAX_SEGMENTS; ++i) {
const int qindex = vp10_get_qindex(&cm->seg, i, cm->base_qindex);
- cm->y_dequant[i][0] = vp10_dc_quant(qindex, cm->y_dc_delta_q,
- cm->bit_depth);
+ cm->y_dequant[i][0] =
+ vp10_dc_quant(qindex, cm->y_dc_delta_q, cm->bit_depth);
cm->y_dequant[i][1] = vp10_ac_quant(qindex, 0, cm->bit_depth);
- cm->uv_dequant[i][0] = vp10_dc_quant(qindex, cm->uv_dc_delta_q,
- cm->bit_depth);
- cm->uv_dequant[i][1] = vp10_ac_quant(qindex, cm->uv_ac_delta_q,
- cm->bit_depth);
+ cm->uv_dequant[i][0] =
+ vp10_dc_quant(qindex, cm->uv_dc_delta_q, cm->bit_depth);
+ cm->uv_dequant[i][1] =
+ vp10_ac_quant(qindex, cm->uv_ac_delta_q, cm->bit_depth);
#if CONFIG_NEW_QUANT
- for (dq = 0; dq < QUANT_PROFILES; dq ++) {
+ for (dq = 0; dq < QUANT_PROFILES; dq++) {
for (b = 0; b < COEF_BANDS; ++b) {
- vp10_get_dequant_val_nuq(
- cm->y_dequant[i][b != 0], qindex, b,
- cm->y_dequant_nuq[i][dq][b], NULL, dq);
- vp10_get_dequant_val_nuq(
- cm->uv_dequant[i][b != 0], qindex, b,
- cm->uv_dequant_nuq[i][dq][b], NULL, dq);
+ vp10_get_dequant_val_nuq(cm->y_dequant[i][b != 0], qindex, b,
+ cm->y_dequant_nuq[i][dq][b], NULL, dq);
+ vp10_get_dequant_val_nuq(cm->uv_dequant[i][b != 0], qindex, b,
+ cm->uv_dequant_nuq[i][dq][b], NULL, dq);
}
}
#endif // CONFIG_NEW_QUANT
const int qindex = cm->base_qindex;
// When segmentation is disabled, only the first value is used. The
// remaining are don't cares.
- cm->y_dequant[0][0] = vp10_dc_quant(qindex, cm->y_dc_delta_q, cm->bit_depth);
+ cm->y_dequant[0][0] =
+ vp10_dc_quant(qindex, cm->y_dc_delta_q, cm->bit_depth);
cm->y_dequant[0][1] = vp10_ac_quant(qindex, 0, cm->bit_depth);
- cm->uv_dequant[0][0] = vp10_dc_quant(qindex, cm->uv_dc_delta_q,
- cm->bit_depth);
- cm->uv_dequant[0][1] = vp10_ac_quant(qindex, cm->uv_ac_delta_q,
- cm->bit_depth);
+ cm->uv_dequant[0][0] =
+ vp10_dc_quant(qindex, cm->uv_dc_delta_q, cm->bit_depth);
+ cm->uv_dequant[0][1] =
+ vp10_ac_quant(qindex, cm->uv_ac_delta_q, cm->bit_depth);
#if CONFIG_NEW_QUANT
- for (dq = 0; dq < QUANT_PROFILES; dq ++) {
+ for (dq = 0; dq < QUANT_PROFILES; dq++) {
for (b = 0; b < COEF_BANDS; ++b) {
- vp10_get_dequant_val_nuq(
- cm->y_dequant[0][b != 0], qindex, b,
- cm->y_dequant_nuq[0][dq][b], NULL, dq);
- vp10_get_dequant_val_nuq(
- cm->uv_dequant[0][b != 0], qindex, b,
- cm->uv_dequant_nuq[0][dq][b], NULL, dq);
+ vp10_get_dequant_val_nuq(cm->y_dequant[0][b != 0], qindex, b,
+ cm->y_dequant_nuq[0][dq][b], NULL, dq);
+ vp10_get_dequant_val_nuq(cm->uv_dequant[0][b != 0], qindex, b,
+ cm->uv_dequant_nuq[0][dq][b], NULL, dq);
}
}
#endif // CONFIG_NEW_QUANT
}
static INTERP_FILTER read_interp_filter(struct vpx_read_bit_buffer *rb) {
- return vpx_rb_read_bit(rb) ?
- SWITCHABLE : vpx_rb_read_literal(rb, 2 + CONFIG_EXT_INTERP);
+ return vpx_rb_read_bit(rb) ? SWITCHABLE
+ : vpx_rb_read_literal(rb, 2 + CONFIG_EXT_INTERP);
}
-static void setup_render_size(VP10_COMMON *cm,
- struct vpx_read_bit_buffer *rb) {
+static void setup_render_size(VP10_COMMON *cm, struct vpx_read_bit_buffer *rb) {
cm->render_width = cm->width;
cm->render_height = cm->height;
if (vpx_rb_read_bit(rb))
const int new_mi_rows =
ALIGN_POWER_OF_TWO(height, MI_SIZE_LOG2) >> MI_SIZE_LOG2;
const int new_mi_cols =
- ALIGN_POWER_OF_TWO(width, MI_SIZE_LOG2) >> MI_SIZE_LOG2;
+ ALIGN_POWER_OF_TWO(width, MI_SIZE_LOG2) >> MI_SIZE_LOG2;
// Allocations in vp10_alloc_context_buffers() depend on individual
// dimensions as well as the overall size.
lock_buffer_pool(pool);
if (vpx_realloc_frame_buffer(
- get_frame_new_buffer(cm), cm->width, cm->height,
- cm->subsampling_x, cm->subsampling_y,
+ get_frame_new_buffer(cm), cm->width, cm->height, cm->subsampling_x,
+ cm->subsampling_y,
#if CONFIG_VP9_HIGHBITDEPTH
cm->use_highbitdepth,
#endif
- VPX_DEC_BORDER_IN_PIXELS,
- cm->byte_alignment,
+ VPX_DEC_BORDER_IN_PIXELS, cm->byte_alignment,
&pool->frame_bufs[cm->new_fb_idx].raw_frame_buffer, pool->get_fb_cb,
pool->cb_priv)) {
unlock_buffer_pool(pool);
pool->frame_bufs[cm->new_fb_idx].buf.bit_depth = (unsigned int)cm->bit_depth;
pool->frame_bufs[cm->new_fb_idx].buf.color_space = cm->color_space;
pool->frame_bufs[cm->new_fb_idx].buf.color_range = cm->color_range;
- pool->frame_bufs[cm->new_fb_idx].buf.render_width = cm->render_width;
+ pool->frame_bufs[cm->new_fb_idx].buf.render_width = cm->render_width;
pool->frame_bufs[cm->new_fb_idx].buf.render_height = cm->render_height;
}
// has valid dimensions.
for (i = 0; i < INTER_REFS_PER_FRAME; ++i) {
RefBuffer *const ref_frame = &cm->frame_refs[i];
- has_valid_ref_frame |= valid_ref_frame_size(ref_frame->buf->y_crop_width,
- ref_frame->buf->y_crop_height,
- width, height);
+ has_valid_ref_frame |=
+ valid_ref_frame_size(ref_frame->buf->y_crop_width,
+ ref_frame->buf->y_crop_height, width, height);
}
if (!has_valid_ref_frame)
vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
"Referenced frame has invalid size");
for (i = 0; i < INTER_REFS_PER_FRAME; ++i) {
RefBuffer *const ref_frame = &cm->frame_refs[i];
- if (!valid_ref_frame_img_fmt(
- ref_frame->buf->bit_depth,
- ref_frame->buf->subsampling_x,
- ref_frame->buf->subsampling_y,
- cm->bit_depth,
- cm->subsampling_x,
- cm->subsampling_y))
+ if (!valid_ref_frame_img_fmt(ref_frame->buf->bit_depth,
+ ref_frame->buf->subsampling_x,
+ ref_frame->buf->subsampling_y, cm->bit_depth,
+ cm->subsampling_x, cm->subsampling_y))
vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
"Referenced frame has incompatible color format");
}
lock_buffer_pool(pool);
if (vpx_realloc_frame_buffer(
- get_frame_new_buffer(cm), cm->width, cm->height,
- cm->subsampling_x, cm->subsampling_y,
+ get_frame_new_buffer(cm), cm->width, cm->height, cm->subsampling_x,
+ cm->subsampling_y,
#if CONFIG_VP9_HIGHBITDEPTH
cm->use_highbitdepth,
#endif
- VPX_DEC_BORDER_IN_PIXELS,
- cm->byte_alignment,
+ VPX_DEC_BORDER_IN_PIXELS, cm->byte_alignment,
&pool->frame_bufs[cm->new_fb_idx].raw_frame_buffer, pool->get_fb_cb,
pool->cb_priv)) {
unlock_buffer_pool(pool);
pool->frame_bufs[cm->new_fb_idx].buf.bit_depth = (unsigned int)cm->bit_depth;
pool->frame_bufs[cm->new_fb_idx].buf.color_space = cm->color_space;
pool->frame_bufs[cm->new_fb_idx].buf.color_range = cm->color_range;
- pool->frame_bufs[cm->new_fb_idx].buf.render_width = cm->render_width;
+ pool->frame_bufs[cm->new_fb_idx].buf.render_width = cm->render_width;
pool->frame_bufs[cm->new_fb_idx].buf.render_height = cm->render_height;
}
static void read_tile_info(VP10Decoder *const pbi,
- struct vpx_read_bit_buffer *const rb) {
+ struct vpx_read_bit_buffer *const rb) {
VP10_COMMON *const cm = &pbi->common;
#if CONFIG_EXT_TILE
- // Read the tile width/height
+// Read the tile width/height
#if CONFIG_EXT_PARTITION
if (cm->sb_size == BLOCK_128X128) {
- cm->tile_width = vpx_rb_read_literal(rb, 5) + 1;
+ cm->tile_width = vpx_rb_read_literal(rb, 5) + 1;
cm->tile_height = vpx_rb_read_literal(rb, 5) + 1;
} else
#endif // CONFIG_EXT_PARTITION
{
- cm->tile_width = vpx_rb_read_literal(rb, 6) + 1;
+ cm->tile_width = vpx_rb_read_literal(rb, 6) + 1;
cm->tile_height = vpx_rb_read_literal(rb, 6) + 1;
}
- cm->tile_width <<= cm->mib_size_log2;
+ cm->tile_width <<= cm->mib_size_log2;
cm->tile_height <<= cm->mib_size_log2;
- cm->tile_width = VPXMIN(cm->tile_width, cm->mi_cols);
+ cm->tile_width = VPXMIN(cm->tile_width, cm->mi_cols);
cm->tile_height = VPXMIN(cm->tile_height, cm->mi_rows);
// Get the number of tiles
cm->tile_cols = 1;
- while (cm->tile_cols * cm->tile_width < cm->mi_cols)
- ++cm->tile_cols;
+ while (cm->tile_cols * cm->tile_width < cm->mi_cols) ++cm->tile_cols;
cm->tile_rows = 1;
- while (cm->tile_rows * cm->tile_height < cm->mi_rows)
- ++cm->tile_rows;
+ while (cm->tile_rows * cm->tile_height < cm->mi_rows) ++cm->tile_rows;
if (cm->tile_cols * cm->tile_rows > 1) {
// Read the number of bytes used to store tile size
- pbi->tile_col_size_bytes = vpx_rb_read_literal(rb, 2) + 1;
+ pbi->tile_col_size_bytes = vpx_rb_read_literal(rb, 2) + 1;
pbi->tile_size_bytes = vpx_rb_read_literal(rb, 2) + 1;
}
#else
// columns
max_ones = max_log2_tile_cols - min_log2_tile_cols;
cm->log2_tile_cols = min_log2_tile_cols;
- while (max_ones-- && vpx_rb_read_bit(rb))
- cm->log2_tile_cols++;
+ while (max_ones-- && vpx_rb_read_bit(rb)) cm->log2_tile_cols++;
if (cm->log2_tile_cols > 6)
vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
// rows
cm->log2_tile_rows = vpx_rb_read_bit(rb);
- if (cm->log2_tile_rows)
- cm->log2_tile_rows += vpx_rb_read_bit(rb);
+ if (cm->log2_tile_rows) cm->log2_tile_rows += vpx_rb_read_bit(rb);
cm->tile_cols = 1 << cm->log2_tile_cols;
cm->tile_rows = 1 << cm->log2_tile_rows;
cm->tile_height >>= cm->log2_tile_rows;
// round to integer multiples of superblock size
- cm->tile_width = ALIGN_POWER_OF_TWO(cm->tile_width, MAX_MIB_SIZE_LOG2);
+ cm->tile_width = ALIGN_POWER_OF_TWO(cm->tile_width, MAX_MIB_SIZE_LOG2);
cm->tile_height = ALIGN_POWER_OF_TWO(cm->tile_height, MAX_MIB_SIZE_LOG2);
// tile size magnitude
static int mem_get_varsize(const uint8_t *src, const int sz) {
switch (sz) {
- case 1:
- return src[0];
- case 2:
- return mem_get_le16(src);
- case 3:
- return mem_get_le24(src);
- case 4:
- return mem_get_le32(src);
- default:
- assert("Invalid size" && 0);
- return -1;
+ case 1: return src[0];
+ case 2: return mem_get_le16(src);
+ case 3: return mem_get_le24(src);
+ case 4: return mem_get_le32(src);
+ default: assert("Invalid size" && 0); return -1;
}
}
// based on 'is_last'.
static void get_tile_buffer(const uint8_t *const data_end,
struct vpx_internal_error_info *error_info,
- const uint8_t **data,
- vpx_decrypt_cb decrypt_cb, void *decrypt_state,
+ const uint8_t **data, vpx_decrypt_cb decrypt_cb,
+ void *decrypt_state,
TileBufferDec (*const tile_buffers)[MAX_TILE_COLS],
int tile_size_bytes, int col, int row) {
size_t size;
- size_t copy_size = 0;
+ size_t copy_size = 0;
const uint8_t *copy_data = NULL;
if (!read_is_valid(*data, tile_size_bytes, data_end))
}
static void get_tile_buffers(
- VP10Decoder *pbi,
- const uint8_t *data, const uint8_t *data_end,
+ VP10Decoder *pbi, const uint8_t *data, const uint8_t *data_end,
TileBufferDec (*const tile_buffers)[MAX_TILE_COLS]) {
VP10_COMMON *const cm = &pbi->common;
const int tile_cols = cm->tile_cols;
const int tile_rows = cm->tile_rows;
const int have_tiles = tile_cols * tile_rows > 1;
- if (!have_tiles) {
+ if (!have_tiles) {
const uint32_t tile_size = data_end - data;
tile_buffers[0][0].data = data;
tile_buffers[0][0].size = tile_size;
for (c = tile_cols_start; c < tile_cols_end; ++c) {
const int is_last = c == tile_cols - 1;
- if (c > 0)
- data = tile_col_data_end[c - 1];
+ if (c > 0) data = tile_col_data_end[c - 1];
- if (!is_last)
- data += tile_col_size_bytes;
+ if (!is_last) data += tile_col_size_bytes;
// Get the whole of the last column, otherwise stop at the required tile.
for (r = 0; r < (is_last ? tile_rows : tile_rows_end); ++r) {
tile_buffers[r][c].col = c;
- get_tile_buffer(tile_col_data_end[c],
- &pbi->common.error, &data,
- pbi->decrypt_cb, pbi->decrypt_state,
- tile_buffers, tile_size_bytes, c, r);
+ get_tile_buffer(tile_col_data_end[c], &pbi->common.error, &data,
+ pbi->decrypt_cb, pbi->decrypt_state, tile_buffers,
+ tile_size_bytes, c, r);
}
}
for (r = 0; r < tile_rows; ++r) {
tile_buffers[r][c].col = c;
- get_tile_buffer(tile_col_data_end[c],
- &pbi->common.error, &data,
- pbi->decrypt_cb, pbi->decrypt_state,
- tile_buffers, tile_size_bytes, c, r);
+ get_tile_buffer(tile_col_data_end[c], &pbi->common.error, &data,
+ pbi->decrypt_cb, pbi->decrypt_state, tile_buffers,
+ tile_size_bytes, c, r);
}
}
}
static void get_tile_buffer(const uint8_t *const data_end,
const int tile_size_bytes, int is_last,
struct vpx_internal_error_info *error_info,
- const uint8_t **data,
- vpx_decrypt_cb decrypt_cb, void *decrypt_state,
- TileBufferDec *const buf) {
+ const uint8_t **data, vpx_decrypt_cb decrypt_cb,
+ void *decrypt_state, TileBufferDec *const buf) {
size_t size;
if (!is_last) {
}
static void get_tile_buffers(
- VP10Decoder *pbi,
- const uint8_t *data, const uint8_t *data_end,
+ VP10Decoder *pbi, const uint8_t *data, const uint8_t *data_end,
TileBufferDec (*const tile_buffers)[MAX_TILE_COLS]) {
VP10_COMMON *const cm = &pbi->common;
int r, c;
const int is_last = (r == tile_rows - 1) && (c == tile_cols - 1);
TileBufferDec *const buf = &tile_buffers[r][c];
buf->col = c;
- get_tile_buffer(data_end, pbi->tile_size_bytes,
- is_last, &cm->error, &data,
- pbi->decrypt_cb, pbi->decrypt_state, buf);
+ get_tile_buffer(data_end, pbi->tile_size_bytes, is_last, &cm->error,
+ &data, pbi->decrypt_cb, pbi->decrypt_state, buf);
}
}
}
#endif // CONFIG_EXT_TILE
-static const uint8_t *decode_tiles(VP10Decoder *pbi,
- const uint8_t *data,
+static const uint8_t *decode_tiles(VP10Decoder *pbi, const uint8_t *data,
const uint8_t *data_end) {
VP10_COMMON *const cm = &pbi->common;
const VPxWorkerInterface *const winterface = vpx_get_worker_interface();
}
if (cm->lf.filter_level && !cm->skip_loop_filter) {
- LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1;
+ LFWorkerData *const lf_data = (LFWorkerData *)pbi->lf_worker.data1;
// Be sure to sync as we might be resuming after a failed frame decode.
winterface->sync(&pbi->lf_worker);
vp10_loop_filter_data_reset(lf_data, get_frame_new_buffer(cm), cm,
- pbi->mb.plane);
+ pbi->mb.plane);
}
assert(tile_rows <= MAX_TILE_ROWS);
if (pbi->tile_data == NULL || n_tiles != pbi->allocated_tiles) {
vpx_free(pbi->tile_data);
- CHECK_MEM_ERROR(
- cm,
- pbi->tile_data,
- vpx_memalign(32, n_tiles * (sizeof(*pbi->tile_data))));
+ CHECK_MEM_ERROR(cm, pbi->tile_data,
+ vpx_memalign(32, n_tiles * (sizeof(*pbi->tile_data))));
pbi->allocated_tiles = n_tiles;
}
td->xd = pbi->mb;
td->xd.corrupted = 0;
td->xd.counts =
- cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD ?
- &cm->counts : NULL;
+ cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD
+ ? &cm->counts
+ : NULL;
vp10_zero(td->dqcoeff);
vp10_tile_init(&td->xd.tile, td->cm, tile_row, tile_col);
#if !CONFIG_ANS
setup_bool_decoder(buf->data, data_end, buf->size, &cm->error,
- &td->bit_reader, pbi->decrypt_cb,
- pbi->decrypt_state);
+ &td->bit_reader, pbi->decrypt_cb, pbi->decrypt_state);
#else
setup_token_decoder(buf->data, data_end, buf->size, &cm->error,
- &td->bit_reader, pbi->decrypt_cb,
- pbi->decrypt_state);
+ &td->bit_reader, pbi->decrypt_cb, pbi->decrypt_state);
#endif
vp10_init_macroblockd(cm, &td->xd, td->dqcoeff);
td->xd.plane[0].color_index_map = td->color_index_map[0];
#if CONFIG_SUPERTX
0,
#endif // CONFIG_SUPERTX
- mi_row, mi_col, &td->bit_reader,
- cm->sb_size, b_width_log2_lookup[cm->sb_size]);
+ mi_row, mi_col, &td->bit_reader, cm->sb_size,
+ b_width_log2_lookup[cm->sb_size]);
}
pbi->mb.corrupted |= td->xd.corrupted;
if (pbi->mb.corrupted)
- vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
- "Failed to decode tile data");
+ vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ "Failed to decode tile data");
#if CONFIG_ENTROPY
if (cm->do_subframe_update &&
cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) {
- if ((mi_row + MI_SIZE) % (MI_SIZE *
- VPXMAX(cm->mi_rows / MI_SIZE / COEF_PROBS_BUFS, 1)) == 0 &&
+ if ((mi_row + MI_SIZE) %
+ (MI_SIZE *
+ VPXMAX(cm->mi_rows / MI_SIZE / COEF_PROBS_BUFS, 1)) ==
+ 0 &&
mi_row + MI_SIZE < cm->mi_rows &&
cm->coef_probs_update_idx < COEF_PROBS_BUFS - 1) {
vp10_partial_adapt_probs(cm, mi_row, mi_col);
#if !CONFIG_VAR_TX
// Loopfilter one tile row.
if (cm->lf.filter_level && !cm->skip_loop_filter) {
- LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1;
+ LFWorkerData *const lf_data = (LFWorkerData *)pbi->lf_worker.data1;
const int lf_start = VPXMAX(0, tile_info.mi_row_start - cm->mib_size);
const int lf_end = tile_info.mi_row_end - cm->mib_size;
// Delay the loopfilter if the first tile row is only
// a single superblock high.
- if (lf_end <= 0)
- continue;
+ if (lf_end <= 0) continue;
// Decoding has completed. Finish up the loop filter in this thread.
- if (tile_info.mi_row_end >= cm->mi_rows)
- continue;
+ if (tile_info.mi_row_end >= cm->mi_rows) continue;
winterface->sync(&pbi->lf_worker);
lf_data->start = lf_start;
#else
// Loopfilter remaining rows in the frame.
if (cm->lf.filter_level && !cm->skip_loop_filter) {
- LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1;
+ LFWorkerData *const lf_data = (LFWorkerData *)pbi->lf_worker.data1;
winterface->sync(&pbi->lf_worker);
lf_data->start = lf_data->stop;
lf_data->stop = cm->mi_rows;
#if CONFIG_SUPERTX
0,
#endif
- mi_row, mi_col, &tile_data->bit_reader,
- cm->sb_size, b_width_log2_lookup[cm->sb_size]);
+ mi_row, mi_col, &tile_data->bit_reader, cm->sb_size,
+ b_width_log2_lookup[cm->sb_size]);
}
}
return !tile_data->xd.corrupted;
// sorts in descending order
static int compare_tile_buffers(const void *a, const void *b) {
- const TileBufferDec *const buf1 = (const TileBufferDec*)a;
- const TileBufferDec *const buf2 = (const TileBufferDec*)b;
+ const TileBufferDec *const buf1 = (const TileBufferDec *)a;
+ const TileBufferDec *const buf2 = (const TileBufferDec *)b;
return (int)(buf2->size - buf1->size);
}
-static const uint8_t *decode_tiles_mt(VP10Decoder *pbi,
- const uint8_t *data,
+static const uint8_t *decode_tiles_mt(VP10Decoder *pbi, const uint8_t *data,
const uint8_t *data_end) {
VP10_COMMON *const cm = &pbi->common;
const VPxWorkerInterface *const winterface = vpx_get_worker_interface();
#if CONFIG_ANS
// TODO(any): This might just work now. Needs to be tested.
abort(); // FIXME: Tile parsing broken
-#endif // CONFIG_ANS
+#endif // CONFIG_ANS
// TODO(jzern): See if we can remove the restriction of passing in max
// threads to the decoder.
// Ensure tile data offsets will be properly aligned. This may fail on
// platforms without DECLARE_ALIGNED().
assert((sizeof(*pbi->tile_worker_data) % 16) == 0);
- CHECK_MEM_ERROR(cm, pbi->tile_worker_data,
- vpx_memalign(32, num_threads *
- sizeof(*pbi->tile_worker_data)));
+ CHECK_MEM_ERROR(
+ cm, pbi->tile_worker_data,
+ vpx_memalign(32, num_threads * sizeof(*pbi->tile_worker_data)));
CHECK_MEM_ERROR(cm, pbi->tile_worker_info,
vpx_malloc(num_threads * sizeof(*pbi->tile_worker_info)));
for (i = 0; i < num_threads; ++i) {
// Initialize thread frame counts.
if (cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) {
for (i = 0; i < num_workers; ++i) {
- TileWorkerData *const twd = (TileWorkerData*)pbi->tile_workers[i].data1;
+ TileWorkerData *const twd = (TileWorkerData *)pbi->tile_workers[i].data1;
vp10_zero(twd->counts);
}
}
// Load tile data into tile_buffers
get_tile_buffers(pbi, data, data_end, tile_buffers);
- for (tile_row = tile_rows_start ; tile_row < tile_rows_end ; ++tile_row) {
+ for (tile_row = tile_rows_start; tile_row < tile_rows_end; ++tile_row) {
// Sort the buffers in this tile row based on size in descending order.
qsort(&tile_buffers[tile_row][tile_cols_start],
tile_cols_end - tile_cols_start, sizeof(tile_buffers[0][0]),
// where the main thread is waiting for a worker to complete.
{
int group_start;
- for (group_start = tile_cols_start ; group_start < tile_cols_end ;
+ for (group_start = tile_cols_start; group_start < tile_cols_end;
group_start += num_workers) {
const int group_end = VPXMIN(group_start + num_workers, tile_cols);
const TileBufferDec largest = tile_buffers[tile_row][group_start];
}
}
- for (tile_col = tile_cols_start ; tile_col < tile_cols_end ; ) {
+ for (tile_col = tile_cols_start; tile_col < tile_cols_end;) {
// Launch workers for individual columns
for (i = 0; i < num_workers && tile_col < tile_cols_end;
++i, ++tile_col) {
TileBufferDec *const buf = &tile_buffers[tile_row][tile_col];
VPxWorker *const worker = &pbi->tile_workers[i];
- TileWorkerData *const twd = (TileWorkerData*)worker->data1;
- TileInfo *const tile_info = (TileInfo*)worker->data2;
+ TileWorkerData *const twd = (TileWorkerData *)worker->data1;
+ TileInfo *const tile_info = (TileInfo *)worker->data2;
twd->pbi = pbi;
twd->xd = pbi->mb;
twd->xd.corrupted = 0;
twd->xd.counts =
- cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD ?
- &twd->counts : NULL;
+ cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD
+ ? &twd->counts
+ : NULL;
vp10_zero(twd->dqcoeff);
vp10_tile_init(tile_info, cm, tile_row, buf->col);
vp10_tile_init(&twd->xd.tile, cm, tile_row, buf->col);
#if !CONFIG_ANS
setup_bool_decoder(buf->data, data_end, buf->size, &cm->error,
- &twd->bit_reader,
- pbi->decrypt_cb, pbi->decrypt_state);
+ &twd->bit_reader, pbi->decrypt_cb,
+ pbi->decrypt_state);
#else
setup_token_decoder(buf->data, data_end, buf->size, &cm->error,
&twd->bit_reader, pbi->decrypt_cb,
// Accumulate thread frame counts.
if (cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) {
for (i = 0; i < num_workers; ++i) {
- TileWorkerData *const twd = (TileWorkerData*)pbi->tile_workers[i].data1;
+ TileWorkerData *const twd = (TileWorkerData *)pbi->tile_workers[i].data1;
vp10_accumulate_frame_counts(cm, &twd->counts);
}
}
assert(final_worker != -1);
{
TileWorkerData *const twd =
- (TileWorkerData*)pbi->tile_workers[final_worker].data1;
+ (TileWorkerData *)pbi->tile_workers[final_worker].data1;
return vpx_reader_find_end(&twd->bit_reader);
}
#endif // CONFIG_ANS
vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, "Truncated packet");
}
-static void read_bitdepth_colorspace_sampling(
- VP10_COMMON *cm, struct vpx_read_bit_buffer *rb) {
+static void read_bitdepth_colorspace_sampling(VP10_COMMON *cm,
+ struct vpx_read_bit_buffer *rb) {
if (cm->profile >= PROFILE_2) {
cm->bit_depth = vpx_rb_read_bit(rb) ? VPX_BITS_12 : VPX_BITS_10;
#if CONFIG_VP9_HIGHBITDEPTH
#endif // CONFIG_EXT_REFS
if (vpx_rb_read_literal(rb, 2) != VPX_FRAME_MARKER)
- vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
- "Invalid frame marker");
+ vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+ "Invalid frame marker");
cm->profile = vp10_read_profile(rb);
#if CONFIG_VP9_HIGHBITDEPTH
return 0;
}
- cm->frame_type = (FRAME_TYPE) vpx_rb_read_bit(rb);
+ cm->frame_type = (FRAME_TYPE)vpx_rb_read_bit(rb);
cm->show_frame = vpx_rb_read_bit(rb);
cm->error_resilient_mode = vpx_rb_read_bit(rb);
cm->intra_only = cm->show_frame ? 0 : vpx_rb_read_bit(rb);
if (cm->error_resilient_mode) {
- cm->reset_frame_context = RESET_FRAME_CONTEXT_ALL;
+ cm->reset_frame_context = RESET_FRAME_CONTEXT_ALL;
} else {
if (cm->intra_only) {
- cm->reset_frame_context =
- vpx_rb_read_bit(rb) ? RESET_FRAME_CONTEXT_ALL
- : RESET_FRAME_CONTEXT_CURRENT;
- } else {
- cm->reset_frame_context =
- vpx_rb_read_bit(rb) ? RESET_FRAME_CONTEXT_CURRENT
- : RESET_FRAME_CONTEXT_NONE;
- if (cm->reset_frame_context == RESET_FRAME_CONTEXT_CURRENT)
- cm->reset_frame_context =
- vpx_rb_read_bit(rb) ? RESET_FRAME_CONTEXT_ALL
+ cm->reset_frame_context = vpx_rb_read_bit(rb)
+ ? RESET_FRAME_CONTEXT_ALL
: RESET_FRAME_CONTEXT_CURRENT;
+ } else {
+ cm->reset_frame_context = vpx_rb_read_bit(rb)
+ ? RESET_FRAME_CONTEXT_CURRENT
+ : RESET_FRAME_CONTEXT_NONE;
+ if (cm->reset_frame_context == RESET_FRAME_CONTEXT_CURRENT)
+ cm->reset_frame_context = vpx_rb_read_bit(rb)
+ ? RESET_FRAME_CONTEXT_ALL
+ : RESET_FRAME_CONTEXT_CURRENT;
}
}
memset(&cm->ref_frame_map, -1, sizeof(cm->ref_frame_map));
pbi->need_resync = 0;
}
- } else if (pbi->need_resync != 1) { /* Skip if need resync */
+ } else if (pbi->need_resync != 1) { /* Skip if need resync */
pbi->refresh_frame_flags = vpx_rb_read_literal(rb, REF_FRAMES);
#if CONFIG_EXT_REFS
for (i = 0; i < INTER_REFS_PER_FRAME; ++i) {
RefBuffer *const ref_buf = &cm->frame_refs[i];
#if CONFIG_VP9_HIGHBITDEPTH
- vp10_setup_scale_factors_for_frame(&ref_buf->sf,
- ref_buf->buf->y_crop_width,
- ref_buf->buf->y_crop_height,
- cm->width, cm->height,
- cm->use_highbitdepth);
+ vp10_setup_scale_factors_for_frame(
+ &ref_buf->sf, ref_buf->buf->y_crop_width,
+ ref_buf->buf->y_crop_height, cm->width, cm->height,
+ cm->use_highbitdepth);
#else
- vp10_setup_scale_factors_for_frame(&ref_buf->sf,
- ref_buf->buf->y_crop_width,
- ref_buf->buf->y_crop_height,
- cm->width, cm->height);
+ vp10_setup_scale_factors_for_frame(
+ &ref_buf->sf, ref_buf->buf->y_crop_width,
+ ref_buf->buf->y_crop_height, cm->width, cm->height);
#endif
}
}
#endif
get_frame_new_buffer(cm)->color_space = cm->color_space;
get_frame_new_buffer(cm)->color_range = cm->color_range;
- get_frame_new_buffer(cm)->render_width = cm->render_width;
+ get_frame_new_buffer(cm)->render_width = cm->render_width;
get_frame_new_buffer(cm)->render_height = cm->render_height;
if (pbi->need_resync) {
}
if (!cm->error_resilient_mode) {
- cm->refresh_frame_context =
- vpx_rb_read_bit(rb) ? REFRESH_FRAME_CONTEXT_FORWARD
- : REFRESH_FRAME_CONTEXT_BACKWARD;
+ cm->refresh_frame_context = vpx_rb_read_bit(rb)
+ ? REFRESH_FRAME_CONTEXT_FORWARD
+ : REFRESH_FRAME_CONTEXT_BACKWARD;
} else {
cm->refresh_frame_context = REFRESH_FRAME_CONTEXT_FORWARD;
}
vp10_default_coef_probs(cm);
if (cm->frame_type == KEY_FRAME || cm->error_resilient_mode ||
cm->reset_frame_context == RESET_FRAME_CONTEXT_ALL) {
- for (i = 0; i < FRAME_CONTEXTS; ++i)
- cm->frame_contexts[i] = *cm->fc;
+ for (i = 0; i < FRAME_CONTEXTS; ++i) cm->frame_contexts[i] = *cm->fc;
} else if (cm->reset_frame_context == RESET_FRAME_CONTEXT_CURRENT) {
cm->frame_contexts[cm->frame_context_idx] = *cm->fc;
}
{
int i;
for (i = 0; i < MAX_SEGMENTS; ++i) {
- const int qindex = cm->seg.enabled ?
- vp10_get_qindex(&cm->seg, i, cm->base_qindex) : cm->base_qindex;
- xd->lossless[i] = qindex == 0 &&
- cm->y_dc_delta_q == 0 &&
- cm->uv_dc_delta_q == 0 &&
- cm->uv_ac_delta_q == 0;
+ const int qindex = cm->seg.enabled
+ ? vp10_get_qindex(&cm->seg, i, cm->base_qindex)
+ : cm->base_qindex;
+ xd->lossless[i] = qindex == 0 && cm->y_dc_delta_q == 0 &&
+ cm->uv_dc_delta_q == 0 && cm->uv_ac_delta_q == 0;
}
}
setup_segmentation_dequant(cm);
- cm->tx_mode = (!cm->seg.enabled && xd->lossless[0]) ? ONLY_4X4
- : read_tx_mode(rb);
+ cm->tx_mode =
+ (!cm->seg.enabled && xd->lossless[0]) ? ONLY_4X4 : read_tx_mode(rb);
cm->reference_mode = read_frame_reference_mode(cm, rb);
read_tile_info(pbi, rb);
#if CONFIG_GLOBAL_MOTION
static void read_global_motion_params(Global_Motion_Params *params,
- vpx_prob *probs,
- vp10_reader *r) {
- GLOBAL_MOTION_TYPE gmtype = vp10_read_tree(r, vp10_global_motion_types_tree,
- probs);
+ vpx_prob *probs, vp10_reader *r) {
+ GLOBAL_MOTION_TYPE gmtype =
+ vp10_read_tree(r, vp10_global_motion_types_tree, probs);
params->gmtype = gmtype;
params->motion_params.wmtype = gm_to_trans_type(gmtype);
switch (gmtype) {
- case GLOBAL_ZERO:
- break;
+ case GLOBAL_ZERO: break;
case GLOBAL_AFFINE:
params->motion_params.wmmat[4] =
(vp10_read_primitive_symmetric(r, GM_ABS_ALPHA_BITS) *
- GM_ALPHA_DECODE_FACTOR) + (1 << WARPEDMODEL_PREC_BITS);
+ GM_ALPHA_DECODE_FACTOR) +
+ (1 << WARPEDMODEL_PREC_BITS);
params->motion_params.wmmat[5] =
vp10_read_primitive_symmetric(r, GM_ABS_ALPHA_BITS) *
GM_ALPHA_DECODE_FACTOR;
- // fallthrough intended
+ // fallthrough intended
case GLOBAL_ROTZOOM:
params->motion_params.wmmat[2] =
(vp10_read_primitive_symmetric(r, GM_ABS_ALPHA_BITS) *
- GM_ALPHA_DECODE_FACTOR) + (1 << WARPEDMODEL_PREC_BITS);
+ GM_ALPHA_DECODE_FACTOR) +
+ (1 << WARPEDMODEL_PREC_BITS);
params->motion_params.wmmat[3] =
vp10_read_primitive_symmetric(r, GM_ABS_ALPHA_BITS) *
GM_ALPHA_DECODE_FACTOR;
- // fallthrough intended
+ // fallthrough intended
case GLOBAL_TRANSLATION:
params->motion_params.wmmat[0] =
vp10_read_primitive_symmetric(r, GM_ABS_TRANS_BITS) *
vp10_read_primitive_symmetric(r, GM_ABS_TRANS_BITS) *
GM_TRANS_DECODE_FACTOR;
break;
- default:
- assert(0);
+ default: assert(0);
}
}
int frame;
memset(cm->global_motion, 0, sizeof(cm->global_motion));
for (frame = LAST_FRAME; frame <= ALTREF_FRAME; ++frame) {
- read_global_motion_params(
- &cm->global_motion[frame], cm->fc->global_motion_types_prob, r);
+ read_global_motion_params(&cm->global_motion[frame],
+ cm->fc->global_motion_types_prob, r);
}
}
#endif // CONFIG_GLOBAL_MOTION
vp10_diff_update_prob(&r, &fc->uv_mode_prob[j][i]);
#if CONFIG_EXT_PARTITION_TYPES
- for (i = 0; i < PARTITION_TYPES - 1; ++i)
- vp10_diff_update_prob(&r, &fc->partition_prob[0][i]);
- for (j = 1; j < PARTITION_CONTEXTS; ++j)
- for (i = 0; i < EXT_PARTITION_TYPES - 1; ++i)
- vp10_diff_update_prob(&r, &fc->partition_prob[j][i]);
+ for (i = 0; i < PARTITION_TYPES - 1; ++i)
+ vp10_diff_update_prob(&r, &fc->partition_prob[0][i]);
+ for (j = 1; j < PARTITION_CONTEXTS; ++j)
+ for (i = 0; i < EXT_PARTITION_TYPES - 1; ++i)
+ vp10_diff_update_prob(&r, &fc->partition_prob[j][i]);
#else
for (j = 0; j < PARTITION_CONTEXTS; ++j)
for (i = 0; i < PARTITION_TYPES - 1; ++i)
}
#endif // CONFIG_OBMC || CONFIG_WARPED_MOTION
- if (cm->interp_filter == SWITCHABLE)
- read_switchable_interp_probs(fc, &r);
+ if (cm->interp_filter == SWITCHABLE) read_switchable_interp_probs(fc, &r);
for (i = 0; i < INTRA_INTER_CONTEXTS; i++)
vp10_diff_update_prob(&r, &fc->intra_inter_prob[i]);
#endif
read_ext_tx_probs(fc, &r);
#if CONFIG_SUPERTX
- if (!xd->lossless[0])
- read_supertx_probs(fc, &r);
+ if (!xd->lossless[0]) read_supertx_probs(fc, &r);
#endif
#if CONFIG_GLOBAL_MOTION
read_global_motion(cm, &r);
sizeof(cm->counts.uv_mode)));
assert(!memcmp(cm->counts.partition, zero_counts.partition,
sizeof(cm->counts.partition)));
- assert(!memcmp(cm->counts.coef, zero_counts.coef,
- sizeof(cm->counts.coef)));
+ assert(!memcmp(cm->counts.coef, zero_counts.coef, sizeof(cm->counts.coef)));
assert(!memcmp(cm->counts.eob_branch, zero_counts.eob_branch,
sizeof(cm->counts.eob_branch)));
assert(!memcmp(cm->counts.switchable_interp, zero_counts.switchable_interp,
sizeof(cm->counts.tx_size)));
assert(!memcmp(cm->counts.skip, zero_counts.skip, sizeof(cm->counts.skip)));
#if CONFIG_REF_MV
- assert(!memcmp(&cm->counts.mv[0], &zero_counts.mv[0],
- sizeof(cm->counts.mv[0])));
- assert(!memcmp(&cm->counts.mv[1], &zero_counts.mv[1],
- sizeof(cm->counts.mv[0])));
+ assert(
+ !memcmp(&cm->counts.mv[0], &zero_counts.mv[0], sizeof(cm->counts.mv[0])));
+ assert(
+ !memcmp(&cm->counts.mv[1], &zero_counts.mv[1], sizeof(cm->counts.mv[0])));
#else
assert(!memcmp(&cm->counts.mv, &zero_counts.mv, sizeof(cm->counts.mv)));
#endif
#endif // NDEBUG
static struct vpx_read_bit_buffer *init_read_bit_buffer(
- VP10Decoder *pbi,
- struct vpx_read_bit_buffer *rb,
- const uint8_t *data,
- const uint8_t *data_end,
- uint8_t clear_data[MAX_VPX_HEADER_SIZE]) {
+ VP10Decoder *pbi, struct vpx_read_bit_buffer *rb, const uint8_t *data,
+ const uint8_t *data_end, uint8_t clear_data[MAX_VPX_HEADER_SIZE]) {
rb->bit_offset = 0;
rb->error_handler = error_handler;
rb->error_handler_data = &pbi->common;
vpx_rb_read_literal(rb, 8) == VP10_SYNC_CODE_2;
}
-void vp10_read_frame_size(struct vpx_read_bit_buffer *rb,
- int *width, int *height) {
+void vp10_read_frame_size(struct vpx_read_bit_buffer *rb, int *width,
+ int *height) {
*width = vpx_rb_read_literal(rb, 16) + 1;
*height = vpx_rb_read_literal(rb, 16) + 1;
}
BITSTREAM_PROFILE vp10_read_profile(struct vpx_read_bit_buffer *rb) {
int profile = vpx_rb_read_bit(rb);
profile |= vpx_rb_read_bit(rb) << 1;
- if (profile > 2)
- profile += vpx_rb_read_bit(rb);
- return (BITSTREAM_PROFILE) profile;
+ if (profile > 2) profile += vpx_rb_read_bit(rb);
+ return (BITSTREAM_PROFILE)profile;
}
-void vp10_decode_frame(VP10Decoder *pbi,
- const uint8_t *data, const uint8_t *data_end,
- const uint8_t **p_data_end) {
+void vp10_decode_frame(VP10Decoder *pbi, const uint8_t *data,
+ const uint8_t *data_end, const uint8_t **p_data_end) {
VP10_COMMON *const cm = &pbi->common;
MACROBLOCKD *const xd = &pbi->mb;
struct vpx_read_bit_buffer rb;
int context_updated = 0;
uint8_t clear_data[MAX_VPX_HEADER_SIZE];
- const size_t first_partition_size = read_uncompressed_header(pbi,
- init_read_bit_buffer(pbi, &rb, data, data_end, clear_data));
+ const size_t first_partition_size = read_uncompressed_header(
+ pbi, init_read_bit_buffer(pbi, &rb, data, data_end, clear_data));
YV12_BUFFER_CONFIG *const new_fb = get_frame_new_buffer(cm);
xd->cur_buf = new_fb;
#if CONFIG_GLOBAL_MOTION
#endif // CONFIG_GLOBAL_MOTION
if (!first_partition_size) {
- // showing a frame directly
+// showing a frame directly
#if CONFIG_EXT_REFS
if (cm->show_existing_frame)
*p_data_end = data + vpx_rb_bytes_read(&rb);
vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
"Truncated packet or corrupt header length");
- cm->use_prev_frame_mvs = !cm->error_resilient_mode &&
- cm->width == cm->last_width &&
- cm->height == cm->last_height &&
- !cm->last_intra_only &&
- cm->last_show_frame &&
- (cm->last_frame_type != KEY_FRAME);
+ cm->use_prev_frame_mvs =
+ !cm->error_resilient_mode && cm->width == cm->last_width &&
+ cm->height == cm->last_height && !cm->last_intra_only &&
+ cm->last_show_frame && (cm->last_frame_type != KEY_FRAME);
#if CONFIG_EXT_REFS
// NOTE(zoeliu): As cm->prev_frame can take neither a frame of
// show_exisiting_frame=1, nor can it take a frame not used as
if (pbi->max_threads > 1
#if CONFIG_EXT_TILE
&& pbi->dec_tile_col < 0 // Decoding all columns
-#endif // CONFIG_EXT_TILE
+#endif // CONFIG_EXT_TILE
&& cm->tile_cols > 1) {
// Multi-threaded tile decoder
*p_data_end = decode_tiles_mt(pbi, data + first_partition_size, data_end);
// If multiple threads are used to decode tiles, then we use those
// threads to do parallel loopfiltering.
vp10_loop_filter_frame_mt(new_fb, cm, pbi->mb.plane,
- cm->lf.filter_level, 0, 0, pbi->tile_workers,
- pbi->num_tile_workers, &pbi->lf_row_sync);
+ cm->lf.filter_level, 0, 0, pbi->tile_workers,
+ pbi->num_tile_workers, &pbi->lf_row_sync);
}
} else {
vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
"Decode failed. Frame data is corrupted.");
-
}
} else {
*p_data_end = decode_tiles(pbi, data + first_partition_size, data_end);
}
#if CONFIG_LOOP_RESTORATION
if (cm->rst_info.restoration_type != RESTORE_NONE) {
- vp10_loop_restoration_init(&cm->rst_internal,
- &cm->rst_info,
+ vp10_loop_restoration_init(&cm->rst_internal, &cm->rst_info,
cm->frame_type == KEY_FRAME);
vp10_loop_restoration_rows(new_fb, cm, 0, cm->mi_rows, 0);
}
* be found in the AUTHORS file in the root of the source tree.
*/
-
#ifndef VP10_DECODER_DECODEFRAME_H_
#define VP10_DECODER_DECODEFRAME_H_
struct vpx_read_bit_buffer;
int vp10_read_sync_code(struct vpx_read_bit_buffer *const rb);
-void vp10_read_frame_size(struct vpx_read_bit_buffer *rb,
- int *width, int *height);
+void vp10_read_frame_size(struct vpx_read_bit_buffer *rb, int *width,
+ int *height);
BITSTREAM_PROFILE vp10_read_profile(struct vpx_read_bit_buffer *rb);
-void vp10_decode_frame(struct VP10Decoder *pbi,
- const uint8_t *data, const uint8_t *data_end,
- const uint8_t **p_data_end);
+void vp10_decode_frame(struct VP10Decoder *pbi, const uint8_t *data,
+ const uint8_t *data_end, const uint8_t **p_data_end);
#ifdef __cplusplus
} // extern "C"
static INLINE int read_uniform(vp10_reader *r, int n) {
int l = get_unsigned_bits(n);
int m = (1 << l) - n;
- int v = vp10_read_literal(r, l-1);
+ int v = vp10_read_literal(r, l - 1);
assert(l != 0);
const PREDICTION_MODE y_mode =
read_intra_mode(r, cm->fc->y_mode_prob[size_group]);
FRAME_COUNTS *counts = xd->counts;
- if (counts)
- ++counts->y_mode[size_group][y_mode];
+ if (counts) ++counts->y_mode[size_group][y_mode];
return y_mode;
}
static PREDICTION_MODE read_intra_mode_uv(VP10_COMMON *cm, MACROBLOCKD *xd,
vp10_reader *r,
PREDICTION_MODE y_mode) {
- const PREDICTION_MODE uv_mode = read_intra_mode(r,
- cm->fc->uv_mode_prob[y_mode]);
+ const PREDICTION_MODE uv_mode =
+ read_intra_mode(r, cm->fc->uv_mode_prob[y_mode]);
FRAME_COUNTS *counts = xd->counts;
- if (counts)
- ++counts->uv_mode[y_mode][uv_mode];
+ if (counts) ++counts->uv_mode[y_mode][uv_mode];
return uv_mode;
}
#if CONFIG_EXT_INTER
static INTERINTRA_MODE read_interintra_mode(VP10_COMMON *cm, MACROBLOCKD *xd,
vp10_reader *r, int size_group) {
- const INTERINTRA_MODE ii_mode =
- (INTERINTRA_MODE)vp10_read_tree(r, vp10_interintra_mode_tree,
- cm->fc->interintra_mode_prob[size_group]);
+ const INTERINTRA_MODE ii_mode = (INTERINTRA_MODE)vp10_read_tree(
+ r, vp10_interintra_mode_tree, cm->fc->interintra_mode_prob[size_group]);
FRAME_COUNTS *counts = xd->counts;
- if (counts)
- ++counts->interintra_mode[size_group][ii_mode];
+ if (counts) ++counts->interintra_mode[size_group][ii_mode];
return ii_mode;
}
#endif // CONFIG_EXT_INTER
vpx_prob mode_prob = cm->fc->newmv_prob[mode_ctx];
if (vp10_read(r, mode_prob) == 0) {
- if (counts)
- ++counts->newmv_mode[mode_ctx][0];
+ if (counts) ++counts->newmv_mode[mode_ctx][0];
#if CONFIG_EXT_INTER
if (has_second_ref(mbmi)) {
#endif // CONFIG_EXT_INTER
- return NEWMV;
+ return NEWMV;
#if CONFIG_EXT_INTER
} else {
mode_prob = cm->fc->new2mv_prob;
if (vp10_read(r, mode_prob) == 0) {
- if (counts)
- ++counts->new2mv_mode[0];
+ if (counts) ++counts->new2mv_mode[0];
return NEWMV;
} else {
- if (counts)
- ++counts->new2mv_mode[1];
+ if (counts) ++counts->new2mv_mode[1];
return NEWFROMNEARMV;
}
}
#endif // CONFIG_EXT_INTER
}
- if (counts)
- ++counts->newmv_mode[mode_ctx][1];
+ if (counts) ++counts->newmv_mode[mode_ctx][1];
- if (ctx & (1 << ALL_ZERO_FLAG_OFFSET))
- return ZEROMV;
+ if (ctx & (1 << ALL_ZERO_FLAG_OFFSET)) return ZEROMV;
mode_ctx = (ctx >> ZEROMV_OFFSET) & ZEROMV_CTX_MASK;
mode_prob = cm->fc->zeromv_prob[mode_ctx];
if (vp10_read(r, mode_prob) == 0) {
- if (counts)
- ++counts->zeromv_mode[mode_ctx][0];
+ if (counts) ++counts->zeromv_mode[mode_ctx][0];
return ZEROMV;
}
- if (counts)
- ++counts->zeromv_mode[mode_ctx][1];
+ if (counts) ++counts->zeromv_mode[mode_ctx][1];
mode_ctx = (ctx >> REFMV_OFFSET) & REFMV_CTX_MASK;
- if (ctx & (1 << SKIP_NEARESTMV_OFFSET))
- mode_ctx = 6;
- if (ctx & (1 << SKIP_NEARMV_OFFSET))
- mode_ctx = 7;
- if (ctx & (1 << SKIP_NEARESTMV_SUB8X8_OFFSET))
- mode_ctx = 8;
+ if (ctx & (1 << SKIP_NEARESTMV_OFFSET)) mode_ctx = 6;
+ if (ctx & (1 << SKIP_NEARMV_OFFSET)) mode_ctx = 7;
+ if (ctx & (1 << SKIP_NEARESTMV_SUB8X8_OFFSET)) mode_ctx = 8;
mode_prob = cm->fc->refmv_prob[mode_ctx];
if (vp10_read(r, mode_prob) == 0) {
- if (counts)
- ++counts->refmv_mode[mode_ctx][0];
+ if (counts) ++counts->refmv_mode[mode_ctx][0];
return NEARESTMV;
} else {
- if (counts)
- ++counts->refmv_mode[mode_ctx][1];
+ if (counts) ++counts->refmv_mode[mode_ctx][1];
return NEARMV;
}
// Invalid prediction mode.
assert(0);
#else
- const int mode = vp10_read_tree(r, vp10_inter_mode_tree,
- cm->fc->inter_mode_probs[ctx]);
+ const int mode =
+ vp10_read_tree(r, vp10_inter_mode_tree, cm->fc->inter_mode_probs[ctx]);
FRAME_COUNTS *counts = xd->counts;
- if (counts)
- ++counts->inter_mode[ctx][mode];
+ if (counts) ++counts->inter_mode[ctx][mode];
return NEARESTMV + mode;
#endif
}
#if CONFIG_REF_MV
-static void read_drl_idx(const VP10_COMMON *cm,
- MACROBLOCKD *xd,
- MB_MODE_INFO *mbmi,
- vp10_reader *r) {
+static void read_drl_idx(const VP10_COMMON *cm, MACROBLOCKD *xd,
+ MB_MODE_INFO *mbmi, vp10_reader *r) {
uint8_t ref_frame_type = vp10_ref_frame_type(mbmi->ref_frame);
mbmi->ref_mv_idx = 0;
vpx_prob drl_prob = cm->fc->drl_prob[drl_ctx];
if (!vp10_read(r, drl_prob)) {
mbmi->ref_mv_idx = idx;
- if (xd->counts)
- ++xd->counts->drl_mode[drl_ctx][0];
+ if (xd->counts) ++xd->counts->drl_mode[drl_ctx][0];
return;
}
mbmi->ref_mv_idx = idx + 1;
- if (xd->counts)
- ++xd->counts->drl_mode[drl_ctx][1];
+ if (xd->counts) ++xd->counts->drl_mode[drl_ctx][1];
}
}
}
vpx_prob drl_prob = cm->fc->drl_prob[drl_ctx];
if (!vp10_read(r, drl_prob)) {
mbmi->ref_mv_idx = idx - 1;
- if (xd->counts)
- ++xd->counts->drl_mode[drl_ctx][0];
+ if (xd->counts) ++xd->counts->drl_mode[drl_ctx][0];
return;
}
mbmi->ref_mv_idx = idx;
- if (xd->counts)
- ++xd->counts->drl_mode[drl_ctx][1];
+ if (xd->counts) ++xd->counts->drl_mode[drl_ctx][1];
}
}
}
#if CONFIG_EXT_INTER
static PREDICTION_MODE read_inter_compound_mode(VP10_COMMON *cm,
- MACROBLOCKD *xd,
- vp10_reader *r, int16_t ctx) {
+ MACROBLOCKD *xd, vp10_reader *r,
+ int16_t ctx) {
const int mode = vp10_read_tree(r, vp10_inter_compound_mode_tree,
- cm->fc->inter_compound_mode_probs[ctx]);
+ cm->fc->inter_compound_mode_probs[ctx]);
FRAME_COUNTS *counts = xd->counts;
- if (counts)
- ++counts->inter_compound_mode[ctx][mode];
+ if (counts) ++counts->inter_compound_mode[ctx][mode];
assert(is_inter_compound_mode(NEAREST_NEARESTMV + mode));
return NEAREST_NEARESTMV + mode;
#endif // CONFIG_EXT_INTER
static int read_segment_id(vp10_reader *r,
- const struct segmentation_probs *segp) {
+ const struct segmentation_probs *segp) {
return vp10_read_tree(r, vp10_segment_tree, segp->tree_probs);
}
int max_blocks_high = num_4x4_blocks_high_lookup[mbmi->sb_type];
int max_blocks_wide = num_4x4_blocks_wide_lookup[mbmi->sb_type];
int ctx = txfm_partition_context(xd->above_txfm_context + tx_col,
- xd->left_txfm_context + tx_row,
- tx_size);
- TX_SIZE (*const inter_tx_size)[MAX_MIB_SIZE] =
- (TX_SIZE (*)[MAX_MIB_SIZE])&mbmi->inter_tx_size[tx_row][tx_col];
+ xd->left_txfm_context + tx_row, tx_size);
+ TX_SIZE (*const inter_tx_size)
+ [MAX_MIB_SIZE] =
+ (TX_SIZE(*)[MAX_MIB_SIZE]) & mbmi->inter_tx_size[tx_row][tx_col];
- if (xd->mb_to_bottom_edge < 0)
- max_blocks_high += xd->mb_to_bottom_edge >> 5;
- if (xd->mb_to_right_edge < 0)
- max_blocks_wide += xd->mb_to_right_edge >> 5;
+ if (xd->mb_to_bottom_edge < 0) max_blocks_high += xd->mb_to_bottom_edge >> 5;
+ if (xd->mb_to_right_edge < 0) max_blocks_wide += xd->mb_to_right_edge >> 5;
- if (blk_row >= max_blocks_high || blk_col >= max_blocks_wide)
- return;
+ if (blk_row >= max_blocks_high || blk_col >= max_blocks_wide) return;
is_split = vp10_read(r, cm->fc->txfm_partition_prob[ctx]);
int bsl = b_width_log2_lookup[bsize];
int i;
- if (counts)
- ++counts->txfm_partition[ctx][1];
+ if (counts) ++counts->txfm_partition[ctx][1];
if (tx_size == TX_8X8) {
inter_tx_size[0][0] = TX_4X4;
for (i = 0; i < 4; ++i) {
int offsetr = blk_row + ((i >> 1) << bsl);
int offsetc = blk_col + ((i & 0x01) << bsl);
- read_tx_size_vartx(cm, xd, mbmi, counts,
- tx_size - 1, offsetr, offsetc, r);
+ read_tx_size_vartx(cm, xd, mbmi, counts, tx_size - 1, offsetr, offsetc,
+ r);
}
} else {
int idx, idy;
for (idx = 0; idx < num_4x4_blocks_wide_txsize_lookup[tx_size] / 2; ++idx)
inter_tx_size[idy][idx] = tx_size;
mbmi->tx_size = tx_size;
- if (counts)
- ++counts->txfm_partition[ctx][0];
+ if (counts) ++counts->txfm_partition[ctx][0];
txfm_partition_update(xd->above_txfm_context + tx_col,
xd->left_txfm_context + tx_row, tx_size);
}
const int ctx = get_tx_size_context(xd);
const int tx_size_cat = max_tx_size - TX_8X8;
int tx_size = vp10_read_tree(r, vp10_tx_size_tree[tx_size_cat],
- cm->fc->tx_size_probs[tx_size_cat][ctx]);
- if (counts)
- ++counts->tx_size[tx_size_cat][ctx][tx_size];
+ cm->fc->tx_size_probs[tx_size_cat][ctx]);
+ if (counts) ++counts->tx_size[tx_size_cat][ctx][tx_size];
return (TX_SIZE)tx_size;
}
vp10_reader *r) {
TX_MODE tx_mode = cm->tx_mode;
BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
- if (xd->lossless[xd->mi[0]->mbmi.segment_id])
- return TX_4X4;
+ if (xd->lossless[xd->mi[0]->mbmi.segment_id]) return TX_4X4;
if (bsize >= BLOCK_8X8) {
const TX_SIZE max_tx_size = max_txsize_lookup[bsize];
if (tx_mode == TX_MODE_SELECT) {
int allow_select, vp10_reader *r) {
TX_MODE tx_mode = cm->tx_mode;
BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
- if (xd->lossless[xd->mi[0]->mbmi.segment_id])
- return TX_4X4;
+ if (xd->lossless[xd->mi[0]->mbmi.segment_id]) return TX_4X4;
if (bsize >= BLOCK_8X8) {
const TX_SIZE max_tx_size = max_txsize_lookup[bsize];
if (allow_select && tx_mode == TX_MODE_SELECT) {
return segment_id;
}
-static void set_segment_id(VP10_COMMON *cm, int mi_offset,
- int x_mis, int y_mis, int segment_id) {
+static void set_segment_id(VP10_COMMON *cm, int mi_offset, int x_mis, int y_mis,
+ int segment_id) {
int x, y;
assert(segment_id >= 0 && segment_id < MAX_SEGMENTS);
struct segmentation_probs *const segp = &cm->fc->seg;
int segment_id;
- if (!seg->enabled)
- return 0; // Default for disabled segmentation
+ if (!seg->enabled) return 0; // Default for disabled segmentation
assert(seg->update_map && !seg->temporal_update);
segment_id = read_segment_id(r, segp);
- if (counts)
- ++counts->seg.tree_total[segment_id];
+ if (counts) ++counts->seg.tree_total[segment_id];
set_segment_id(cm, mi_offset, x_mis, y_mis, segment_id);
return segment_id;
}
static void copy_segment_id(const VP10_COMMON *cm,
- const uint8_t *last_segment_ids,
- uint8_t *current_segment_ids,
- int mi_offset, int x_mis, int y_mis) {
+ const uint8_t *last_segment_ids,
+ uint8_t *current_segment_ids, int mi_offset,
+ int x_mis, int y_mis) {
int x, y;
for (y = 0; y < y_mis; y++)
for (x = 0; x < x_mis; x++)
- current_segment_ids[mi_offset + y * cm->mi_cols + x] = last_segment_ids ?
- last_segment_ids[mi_offset + y * cm->mi_cols + x] : 0;
+ current_segment_ids[mi_offset + y * cm->mi_cols + x] =
+ last_segment_ids ? last_segment_ids[mi_offset + y * cm->mi_cols + x]
+ : 0;
}
static int read_inter_segment_id(VP10_COMMON *const cm, MACROBLOCKD *const xd,
const int x_mis = VPXMIN(cm->mi_cols - mi_col, bw);
const int y_mis = VPXMIN(cm->mi_rows - mi_row, bh);
- if (!seg->enabled)
- return 0; // Default for disabled segmentation
+ if (!seg->enabled) return 0; // Default for disabled segmentation
- predicted_segment_id = cm->last_frame_seg_map ?
- dec_get_segment_id(cm, cm->last_frame_seg_map, mi_offset, x_mis, y_mis) :
- 0;
+ predicted_segment_id = cm->last_frame_seg_map
+ ? dec_get_segment_id(cm, cm->last_frame_seg_map,
+ mi_offset, x_mis, y_mis)
+ : 0;
if (!seg->update_map) {
copy_segment_id(cm, cm->last_frame_seg_map, cm->current_frame_seg_map,
const int ctx = vp10_get_pred_context_seg_id(xd);
const vpx_prob pred_prob = segp->pred_probs[ctx];
mbmi->seg_id_predicted = vp10_read(r, pred_prob);
- if (counts)
- ++counts->seg.pred[ctx][mbmi->seg_id_predicted];
+ if (counts) ++counts->seg.pred[ctx][mbmi->seg_id_predicted];
if (mbmi->seg_id_predicted) {
segment_id = predicted_segment_id;
} else {
segment_id = read_segment_id(r, segp);
- if (counts)
- ++counts->seg.tree_mispred[segment_id];
+ if (counts) ++counts->seg.tree_mispred[segment_id];
}
} else {
segment_id = read_segment_id(r, segp);
- if (counts)
- ++counts->seg.tree_total[segment_id];
+ if (counts) ++counts->seg.tree_total[segment_id];
}
set_segment_id(cm, mi_offset, x_mis, y_mis, segment_id);
return segment_id;
}
-static int read_skip(VP10_COMMON *cm, const MACROBLOCKD *xd,
- int segment_id, vp10_reader *r) {
+static int read_skip(VP10_COMMON *cm, const MACROBLOCKD *xd, int segment_id,
+ vp10_reader *r) {
if (segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)) {
return 1;
} else {
const int ctx = vp10_get_skip_context(xd);
const int skip = vp10_read(r, cm->fc->skip_probs[ctx]);
FRAME_COUNTS *counts = xd->counts;
- if (counts)
- ++counts->skip[ctx][skip];
+ if (counts) ++counts->skip[ctx][skip];
return skip;
}
}
-static void read_palette_mode_info(VP10_COMMON *const cm,
- MACROBLOCKD *const xd,
+static void read_palette_mode_info(VP10_COMMON *const cm, MACROBLOCKD *const xd,
vp10_reader *r) {
MODE_INFO *const mi = xd->mi[0];
MB_MODE_INFO *const mbmi = &mi->mbmi;
const MODE_INFO *const above_mi = xd->above_mi;
- const MODE_INFO *const left_mi = xd->left_mi;
+ const MODE_INFO *const left_mi = xd->left_mi;
const BLOCK_SIZE bsize = mbmi->sb_type;
int i, n, palette_ctx = 0;
PALETTE_MODE_INFO *const pmi = &mbmi->palette_mode_info;
palette_ctx += (above_mi->mbmi.palette_mode_info.palette_size[0] > 0);
if (left_mi)
palette_ctx += (left_mi->mbmi.palette_mode_info.palette_size[0] > 0);
- if (vp10_read(r, vp10_default_palette_y_mode_prob[bsize - BLOCK_8X8]
- [palette_ctx])) {
+ if (vp10_read(
+ r,
+ vp10_default_palette_y_mode_prob[bsize - BLOCK_8X8][palette_ctx])) {
pmi->palette_size[0] =
- vp10_read_tree(r, vp10_palette_size_tree,
- vp10_default_palette_y_size_prob[bsize - BLOCK_8X8]) + 2;
+ vp10_read_tree(r, vp10_palette_size_tree,
+ vp10_default_palette_y_size_prob[bsize - BLOCK_8X8]) +
+ 2;
n = pmi->palette_size[0];
for (i = 0; i < n; ++i)
pmi->palette_colors[i] = vp10_read_literal(r, cm->bit_depth);
}
if (mbmi->uv_mode == DC_PRED) {
- if (vp10_read(r,
- vp10_default_palette_uv_mode_prob[pmi->palette_size[0] > 0])) {
+ if (vp10_read(
+ r, vp10_default_palette_uv_mode_prob[pmi->palette_size[0] > 0])) {
pmi->palette_size[1] =
vp10_read_tree(r, vp10_palette_size_tree,
- vp10_default_palette_uv_size_prob[bsize - BLOCK_8X8])
- + 2;
+ vp10_default_palette_uv_size_prob[bsize - BLOCK_8X8]) +
+ 2;
n = pmi->palette_size[1];
for (i = 0; i < n; ++i) {
pmi->palette_colors[PALETTE_MAX_SIZE + i] =
#if !ALLOW_FILTER_INTRA_MODES
return;
#endif
- if (mbmi->mode == DC_PRED &&
- mbmi->palette_mode_info.palette_size[0] == 0) {
+ if (mbmi->mode == DC_PRED && mbmi->palette_mode_info.palette_size[0] == 0) {
mbmi->ext_intra_mode_info.use_ext_intra_mode[0] =
vp10_read(r, cm->fc->ext_intra_probs[0]);
if (mbmi->ext_intra_mode_info.use_ext_intra_mode[0]) {
const int ctx = vp10_get_pred_context_intra_interp(xd);
int p_angle;
- if (bsize < BLOCK_8X8)
- return;
+ if (bsize < BLOCK_8X8) return;
if (mbmi->mode != DC_PRED && mbmi->mode != TM_PRED) {
mbmi->angle_delta[0] =
FRAME_COUNTS *counts = xd->counts;
mbmi->intra_filter = vp10_read_tree(r, vp10_intra_filter_tree,
cm->fc->intra_filter_probs[ctx]);
- if (counts)
- ++counts->intra_filter[ctx][mbmi->intra_filter];
+ if (counts) ++counts->intra_filter[ctx][mbmi->intra_filter];
} else {
mbmi->intra_filter = INTRA_FILTER_LINEAR;
}
#endif // CONFIG_EXT_INTRA
static void read_intra_frame_mode_info(VP10_COMMON *const cm,
- MACROBLOCKD *const xd,
- int mi_row, int mi_col, vp10_reader *r) {
+ MACROBLOCKD *const xd, int mi_row,
+ int mi_col, vp10_reader *r) {
MODE_INFO *const mi = xd->mi[0];
MB_MODE_INFO *const mbmi = &mi->mbmi;
const MODE_INFO *above_mi = xd->above_mi;
- const MODE_INFO *left_mi = xd->left_mi;
+ const MODE_INFO *left_mi = xd->left_mi;
const BLOCK_SIZE bsize = mbmi->sb_type;
int i;
const int mi_offset = mi_row * cm->mi_cols + mi_col;
read_intra_mode(r, get_y_mode_probs(cm, mi, above_mi, left_mi, 2));
break;
default:
- mbmi->mode = read_intra_mode(r,
- get_y_mode_probs(cm, mi, above_mi, left_mi, 0));
+ mbmi->mode =
+ read_intra_mode(r, get_y_mode_probs(cm, mi, above_mi, left_mi, 0));
}
mbmi->uv_mode = read_intra_mode_uv(cm, xd, r, mbmi->mode);
if (bsize >= BLOCK_8X8 && cm->allow_screen_content_tools)
read_palette_mode_info(cm, xd, r);
#if CONFIG_EXT_INTRA
- mbmi->ext_intra_mode_info.use_ext_intra_mode[0] = 0;
- mbmi->ext_intra_mode_info.use_ext_intra_mode[1] = 0;
- if (bsize >= BLOCK_8X8)
- read_ext_intra_mode_info(cm, xd, r);
+ mbmi->ext_intra_mode_info.use_ext_intra_mode[0] = 0;
+ mbmi->ext_intra_mode_info.use_ext_intra_mode[1] = 0;
+ if (bsize >= BLOCK_8X8) read_ext_intra_mode_info(cm, xd, r);
#endif // CONFIG_EXT_INTRA
if (!FIXED_TX_TYPE) {
r, vp10_ext_tx_intra_tree[eset],
cm->fc->intra_ext_tx_prob[eset][mbmi->tx_size][mbmi->mode]);
if (counts)
- ++counts->intra_ext_tx[eset][mbmi->tx_size][mbmi->mode]
- [mbmi->tx_type];
+ ++counts
+ ->intra_ext_tx[eset][mbmi->tx_size][mbmi->mode][mbmi->tx_type];
}
} else {
mbmi->tx_type = DCT_DCT;
}
#else
- if (mbmi->tx_size < TX_32X32 &&
- cm->base_qindex > 0 && !mbmi->skip &&
+ if (mbmi->tx_size < TX_32X32 && cm->base_qindex > 0 && !mbmi->skip &&
!segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
FRAME_COUNTS *counts = xd->counts;
TX_TYPE tx_type_nom = intra_mode_to_tx_type_context[mbmi->mode];
- mbmi->tx_type = vp10_read_tree(
- r, vp10_ext_tx_tree,
- cm->fc->intra_ext_tx_prob[mbmi->tx_size][tx_type_nom]);
+ mbmi->tx_type =
+ vp10_read_tree(r, vp10_ext_tx_tree,
+ cm->fc->intra_ext_tx_prob[mbmi->tx_size][tx_type_nom]);
if (counts)
++counts->intra_ext_tx[mbmi->tx_size][tx_type_nom][mbmi->tx_type];
} else {
}
}
-static int read_mv_component(vp10_reader *r,
- const nmv_component *mvcomp, int usehp) {
+static int read_mv_component(vp10_reader *r, const nmv_component *mvcomp,
+ int usehp) {
int mag, d, fr, hp;
const int sign = vp10_read(r, mvcomp->sign);
const int mv_class = vp10_read_tree(r, vp10_mv_class_tree, mvcomp->classes);
const int n = mv_class + CLASS0_BITS - 1; // number of bits
d = 0;
- for (i = 0; i < n; ++i)
- d |= vp10_read(r, mvcomp->bits[i]) << i;
+ for (i = 0; i < n; ++i) d |= vp10_read(r, mvcomp->bits[i]) << i;
mag = CLASS0_SIZE << (mv_class + 2);
}
// Fractional part
- fr = vp10_read_tree(r, vp10_mv_fp_tree, class0 ? mvcomp->class0_fp[d]
- : mvcomp->fp);
+ fr = vp10_read_tree(r, vp10_mv_fp_tree,
+ class0 ? mvcomp->class0_fp[d] : mvcomp->fp);
// High precision part (if hp is not used, the default value of the hp is 1)
- hp = usehp ? vp10_read(r, class0 ? mvcomp->class0_hp : mvcomp->hp)
- : 1;
+ hp = usehp ? vp10_read(r, class0 ? mvcomp->class0_hp : mvcomp->hp) : 1;
// Result
mag += ((d << 3) | (fr << 1) | hp) + 1;
#if CONFIG_REF_MV
int is_compound,
#endif
- const nmv_context *ctx,
- nmv_context_counts *counts, int allow_hp) {
+ const nmv_context *ctx, nmv_context_counts *counts,
+ int allow_hp) {
MV_JOINT_TYPE joint_type;
const int use_hp = allow_hp && vp10_use_mv_hp(ref);
- MV diff = {0, 0};
+ MV diff = { 0, 0 };
#if CONFIG_REF_MV && !CONFIG_EXT_INTER
if (is_compound) {
if (is_zero_rmv) {
joint_type = MV_JOINT_ZERO;
} else {
- joint_type = (MV_JOINT_TYPE)vp10_read_tree(r, vp10_mv_joint_tree,
- ctx->joints);
+ joint_type =
+ (MV_JOINT_TYPE)vp10_read_tree(r, vp10_mv_joint_tree, ctx->joints);
}
} else {
- joint_type = (MV_JOINT_TYPE)vp10_read_tree(r, vp10_mv_joint_tree,
- ctx->joints);
+ joint_type =
+ (MV_JOINT_TYPE)vp10_read_tree(r, vp10_mv_joint_tree, ctx->joints);
}
#else
- joint_type = (MV_JOINT_TYPE)vp10_read_tree(r, vp10_mv_joint_tree,
- ctx->joints);
+ joint_type =
+ (MV_JOINT_TYPE)vp10_read_tree(r, vp10_mv_joint_tree, ctx->joints);
#endif
#if CONFIG_REF_MV && CONFIG_EXT_INTER
const REFERENCE_MODE mode =
(REFERENCE_MODE)vp10_read(r, cm->fc->comp_inter_prob[ctx]);
FRAME_COUNTS *counts = xd->counts;
- if (counts)
- ++counts->comp_inter[ctx][mode];
+ if (counts) ++counts->comp_inter[ctx][mode];
return mode; // SINGLE_REFERENCE or COMPOUND_REFERENCE
} else {
return cm->reference_mode;
// Read the referncence frame
static void read_ref_frames(VP10_COMMON *const cm, MACROBLOCKD *const xd,
- vp10_reader *r,
- int segment_id, MV_REFERENCE_FRAME ref_frame[2]) {
+ vp10_reader *r, int segment_id,
+ MV_REFERENCE_FRAME ref_frame[2]) {
FRAME_CONTEXT *const fc = cm->fc;
FRAME_COUNTS *counts = xd->counts;
const int ctx = vp10_get_pred_context_comp_ref_p(cm, xd);
const int bit = vp10_read(r, fc->comp_ref_prob[ctx][0]);
- if (counts)
- ++counts->comp_ref[ctx][0][bit];
+ if (counts) ++counts->comp_ref[ctx][0][bit];
#if CONFIG_EXT_REFS
// Decode forward references.
if (!bit) {
const int ctx1 = vp10_get_pred_context_comp_ref_p1(cm, xd);
const int bit1 = vp10_read(r, fc->comp_ref_prob[ctx1][1]);
- if (counts)
- ++counts->comp_ref[ctx1][1][bit1];
+ if (counts) ++counts->comp_ref[ctx1][1][bit1];
ref_frame[!idx] = cm->comp_fwd_ref[bit1 ? 0 : 1];
} else {
const int ctx2 = vp10_get_pred_context_comp_ref_p2(cm, xd);
const int bit2 = vp10_read(r, fc->comp_ref_prob[ctx2][2]);
- if (counts)
- ++counts->comp_ref[ctx2][2][bit2];
+ if (counts) ++counts->comp_ref[ctx2][2][bit2];
ref_frame[!idx] = cm->comp_fwd_ref[bit2 ? 3 : 2];
}
{
const int ctx_bwd = vp10_get_pred_context_comp_bwdref_p(cm, xd);
const int bit_bwd = vp10_read(r, fc->comp_bwdref_prob[ctx_bwd][0]);
- if (counts)
- ++counts->comp_bwdref[ctx_bwd][0][bit_bwd];
+ if (counts) ++counts->comp_bwdref[ctx_bwd][0][bit_bwd];
ref_frame[idx] = cm->comp_bwd_ref[bit_bwd];
}
#else
#if CONFIG_EXT_REFS
const int ctx0 = vp10_get_pred_context_single_ref_p1(xd);
const int bit0 = vp10_read(r, fc->single_ref_prob[ctx0][0]);
- if (counts)
- ++counts->single_ref[ctx0][0][bit0];
+ if (counts) ++counts->single_ref[ctx0][0][bit0];
if (bit0) {
const int ctx1 = vp10_get_pred_context_single_ref_p2(xd);
const int bit1 = vp10_read(r, fc->single_ref_prob[ctx1][1]);
- if (counts)
- ++counts->single_ref[ctx1][1][bit1];
+ if (counts) ++counts->single_ref[ctx1][1][bit1];
ref_frame[0] = bit1 ? ALTREF_FRAME : BWDREF_FRAME;
} else {
const int ctx2 = vp10_get_pred_context_single_ref_p3(xd);
const int bit2 = vp10_read(r, fc->single_ref_prob[ctx2][2]);
- if (counts)
- ++counts->single_ref[ctx2][2][bit2];
+ if (counts) ++counts->single_ref[ctx2][2][bit2];
if (bit2) {
const int ctx4 = vp10_get_pred_context_single_ref_p5(xd);
const int bit4 = vp10_read(r, fc->single_ref_prob[ctx4][4]);
- if (counts)
- ++counts->single_ref[ctx4][4][bit4];
+ if (counts) ++counts->single_ref[ctx4][4][bit4];
ref_frame[0] = bit4 ? GOLDEN_FRAME : LAST3_FRAME;
} else {
const int ctx3 = vp10_get_pred_context_single_ref_p4(xd);
const int bit3 = vp10_read(r, fc->single_ref_prob[ctx3][3]);
- if (counts)
- ++counts->single_ref[ctx3][3][bit3];
+ if (counts) ++counts->single_ref[ctx3][3][bit3];
ref_frame[0] = bit3 ? LAST2_FRAME : LAST_FRAME;
}
}
#else
const int ctx0 = vp10_get_pred_context_single_ref_p1(xd);
const int bit0 = vp10_read(r, fc->single_ref_prob[ctx0][0]);
- if (counts)
- ++counts->single_ref[ctx0][0][bit0];
+ if (counts) ++counts->single_ref[ctx0][0][bit0];
if (bit0) {
const int ctx1 = vp10_get_pred_context_single_ref_p2(xd);
const int bit1 = vp10_read(r, fc->single_ref_prob[ctx1][1]);
- if (counts)
- ++counts->single_ref[ctx1][1][bit1];
+ if (counts) ++counts->single_ref[ctx1][1][bit1];
ref_frame[0] = bit1 ? ALTREF_FRAME : GOLDEN_FRAME;
} else {
ref_frame[0] = LAST_FRAME;
}
}
-
#if CONFIG_OBMC || CONFIG_WARPED_MOTION
-static MOTION_VARIATION read_motvar_block(
- VP10_COMMON *const cm, MACROBLOCKD *const xd, vp10_reader *r) {
+static MOTION_VARIATION read_motvar_block(VP10_COMMON *const cm,
+ MACROBLOCKD *const xd,
+ vp10_reader *r) {
BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
FRAME_COUNTS *counts = xd->counts;
MOTION_VARIATION motvar;
if (is_motvar_allowed(&xd->mi[0]->mbmi)) {
- motvar = (MOTION_VARIATION)
- vp10_read_tree(r, vp10_motvar_tree, cm->fc->motvar_prob[bsize]);
- if (counts)
- ++counts->motvar[bsize][motvar];
+ motvar = (MOTION_VARIATION)vp10_read_tree(r, vp10_motvar_tree,
+ cm->fc->motvar_prob[bsize]);
+ if (counts) ++counts->motvar[bsize][motvar];
return motvar;
} else {
return SIMPLE_TRANSLATION;
}
#endif // CONFIG_OBMC || CONFIG_WARPED_MOTION
-static INLINE INTERP_FILTER read_interp_filter(
- VP10_COMMON *const cm, MACROBLOCKD *const xd,
+static INLINE INTERP_FILTER read_interp_filter(VP10_COMMON *const cm,
+ MACROBLOCKD *const xd,
#if CONFIG_DUAL_FILTER
- int dir,
+ int dir,
#endif
- vp10_reader *r) {
+ vp10_reader *r) {
#if CONFIG_EXT_INTERP
if (!vp10_is_interp_needed(xd)) return EIGHTTAP_REGULAR;
#endif
const int ctx = vp10_get_pred_context_switchable_interp(xd);
#endif
FRAME_COUNTS *counts = xd->counts;
- const INTERP_FILTER type =
- (INTERP_FILTER)vp10_read_tree(r, vp10_switchable_interp_tree,
- cm->fc->switchable_interp_prob[ctx]);
- if (counts)
- ++counts->switchable_interp[ctx][type];
+ const INTERP_FILTER type = (INTERP_FILTER)vp10_read_tree(
+ r, vp10_switchable_interp_tree, cm->fc->switchable_interp_prob[ctx]);
+ if (counts) ++counts->switchable_interp[ctx][type];
return type;
}
}
mbmi->mode = mi->bmi[3].as_mode;
break;
case BLOCK_4X8:
- mi->bmi[0].as_mode = mi->bmi[2].as_mode = read_intra_mode_y(cm, xd,
- r, 0);
+ mi->bmi[0].as_mode = mi->bmi[2].as_mode = read_intra_mode_y(cm, xd, r, 0);
mi->bmi[1].as_mode = mi->bmi[3].as_mode = mbmi->mode =
read_intra_mode_y(cm, xd, r, 0);
break;
case BLOCK_8X4:
- mi->bmi[0].as_mode = mi->bmi[1].as_mode = read_intra_mode_y(cm, xd,
- r, 0);
+ mi->bmi[0].as_mode = mi->bmi[1].as_mode = read_intra_mode_y(cm, xd, r, 0);
mi->bmi[2].as_mode = mi->bmi[3].as_mode = mbmi->mode =
read_intra_mode_y(cm, xd, r, 0);
break;
#if CONFIG_EXT_INTRA
mbmi->ext_intra_mode_info.use_ext_intra_mode[0] = 0;
mbmi->ext_intra_mode_info.use_ext_intra_mode[1] = 0;
- if (bsize >= BLOCK_8X8)
- read_ext_intra_mode_info(cm, xd, r);
+ if (bsize >= BLOCK_8X8) read_ext_intra_mode_info(cm, xd, r);
#endif // CONFIG_EXT_INTRA
}
static INLINE int is_mv_valid(const MV *mv) {
- return mv->row > MV_LOW && mv->row < MV_UPP &&
- mv->col > MV_LOW && mv->col < MV_UPP;
+ return mv->row > MV_LOW && mv->row < MV_UPP && mv->col > MV_LOW &&
+ mv->col < MV_UPP;
}
static INLINE int assign_mv(VP10_COMMON *cm, MACROBLOCKD *xd,
#if CONFIG_REF_MV
MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
BLOCK_SIZE bsize = mbmi->sb_type;
- int_mv *pred_mv = (bsize >= BLOCK_8X8) ?
- mbmi->pred_mv : xd->mi[0]->bmi[block].pred_mv_s8;
+ int_mv *pred_mv =
+ (bsize >= BLOCK_8X8) ? mbmi->pred_mv : xd->mi[0]->bmi[block].pred_mv_s8;
#endif
switch (mode) {
}
case NEARESTMV: {
mv[0].as_int = nearest_mv[0].as_int;
- if (is_compound)
- mv[1].as_int = nearest_mv[1].as_int;
+ if (is_compound) mv[1].as_int = nearest_mv[1].as_int;
#if CONFIG_REF_MV
pred_mv[0].as_int = nearest_mv[0].as_int;
- if (is_compound)
- pred_mv[1].as_int = nearest_mv[1].as_int;
+ if (is_compound) pred_mv[1].as_int = nearest_mv[1].as_int;
#endif
break;
}
case NEARMV: {
mv[0].as_int = near_mv[0].as_int;
- if (is_compound)
- mv[1].as_int = near_mv[1].as_int;
+ if (is_compound) mv[1].as_int = near_mv[1].as_int;
#if CONFIG_REF_MV
pred_mv[0].as_int = near_mv[0].as_int;
- if (is_compound)
- pred_mv[1].as_int = near_mv[1].as_int;
+ if (is_compound) pred_mv[1].as_int = near_mv[1].as_int;
#endif
break;
}
case ZEROMV: {
mv[0].as_int = 0;
- if (is_compound)
- mv[1].as_int = 0;
+ if (is_compound) mv[1].as_int = 0;
#if CONFIG_REF_MV
pred_mv[0].as_int = 0;
- if (is_compound)
- pred_mv[1].as_int = 0;
+ if (is_compound) pred_mv[1].as_int = 0;
#endif
break;
}
nmv_context_counts *const mv_counts =
counts ? &counts->mv[nmv_ctx] : NULL;
read_mv(r, &mv[i].as_mv, &ref_mv[i].as_mv, is_compound,
- &cm->fc->nmvc[nmv_ctx], mv_counts,
- allow_hp);
+ &cm->fc->nmvc[nmv_ctx], mv_counts, allow_hp);
#else
read_mv(r, &mv[i].as_mv, &ref_mv[i].as_mv, &cm->fc->nmvc, mv_counts,
allow_hp);
nmv_context_counts *const mv_counts =
counts ? &counts->mv[nmv_ctx] : NULL;
read_mv(r, &mv[0].as_mv, &ref_mv[0].as_mv, is_compound,
- &cm->fc->nmvc[nmv_ctx], mv_counts,
- allow_hp);
+ &cm->fc->nmvc[nmv_ctx], mv_counts, allow_hp);
#else
nmv_context_counts *const mv_counts = counts ? &counts->mv : NULL;
read_mv(r, &mv[0].as_mv, &ref_mv[0].as_mv, &cm->fc->nmvc, mv_counts,
counts ? &counts->mv[nmv_ctx] : NULL;
mv[0].as_int = nearest_mv[0].as_int;
read_mv(r, &mv[1].as_mv, &ref_mv[1].as_mv, is_compound,
- &cm->fc->nmvc[nmv_ctx], mv_counts,
- allow_hp);
+ &cm->fc->nmvc[nmv_ctx], mv_counts, allow_hp);
#else
nmv_context_counts *const mv_counts = counts ? &counts->mv : NULL;
mv[0].as_int = nearest_mv[0].as_int;
counts ? &counts->mv[nmv_ctx] : NULL;
mv[0].as_int = near_mv[0].as_int;
read_mv(r, &mv[1].as_mv, &ref_mv[1].as_mv, is_compound,
- &cm->fc->nmvc[nmv_ctx], mv_counts,
- allow_hp);
+ &cm->fc->nmvc[nmv_ctx], mv_counts, allow_hp);
#else
nmv_context_counts *const mv_counts = counts ? &counts->mv : NULL;
mv[0].as_int = near_mv[0].as_int;
nmv_context_counts *const mv_counts =
counts ? &counts->mv[nmv_ctx] : NULL;
read_mv(r, &mv[0].as_mv, &ref_mv[0].as_mv, is_compound,
- &cm->fc->nmvc[nmv_ctx], mv_counts,
- allow_hp);
+ &cm->fc->nmvc[nmv_ctx], mv_counts, allow_hp);
#else
nmv_context_counts *const mv_counts = counts ? &counts->mv : NULL;
read_mv(r, &mv[0].as_mv, &ref_mv[0].as_mv, &cm->fc->nmvc, mv_counts,
break;
}
#endif // CONFIG_EXT_INTER
- default: {
- return 0;
- }
+ default: { return 0; }
}
return ret;
}
const int ctx = vp10_get_intra_inter_context(xd);
const int is_inter = vp10_read(r, cm->fc->intra_inter_prob[ctx]);
FRAME_COUNTS *counts = xd->counts;
- if (counts)
- ++counts->intra_inter[ctx][is_inter];
+ if (counts) ++counts->intra_inter[ctx][is_inter];
return is_inter;
}
}
static void fpm_sync(void *const data, int mi_row) {
VP10Decoder *const pbi = (VP10Decoder *)data;
vp10_frameworker_wait(pbi->frame_worker_owner, pbi->common.prev_frame,
- mi_row << pbi->common.mib_size_log2);
+ mi_row << pbi->common.mib_size_log2);
}
static void read_inter_block_mode_info(VP10Decoder *const pbi,
if ((!vp10_is_valid_scale(&ref_buf->sf)))
vpx_internal_error(xd->error_info, VPX_CODEC_UNSUP_BITSTREAM,
"Reference frame has invalid dimensions");
- vp10_setup_pre_planes(xd, ref, ref_buf->buf, mi_row, mi_col,
- &ref_buf->sf);
+ vp10_setup_pre_planes(xd, ref, ref_buf->buf, mi_row, mi_col, &ref_buf->sf);
}
for (ref_frame = LAST_FRAME; ref_frame < MODE_CTX_REF_FRAMES; ++ref_frame) {
vp10_find_mv_refs(cm, xd, mi, ref_frame,
#if CONFIG_REF_MV
- &xd->ref_mv_count[ref_frame],
- xd->ref_mv_stack[ref_frame],
+ &xd->ref_mv_count[ref_frame], xd->ref_mv_stack[ref_frame],
#if CONFIG_EXT_INTER
compound_inter_mode_ctx,
#endif // CONFIG_EXT_INTER
#endif
- ref_mvs[ref_frame],
- mi_row, mi_col, fpm_sync, (void *)pbi, inter_mode_ctx);
+ ref_mvs[ref_frame], mi_row, mi_col, fpm_sync, (void *)pbi,
+ inter_mode_ctx);
}
#if CONFIG_REF_MV
mode_ctx = compound_inter_mode_ctx[mbmi->ref_frame[0]];
else
#endif // CONFIG_EXT_INTER
- mode_ctx = vp10_mode_context_analyzer(inter_mode_ctx,
- mbmi->ref_frame, bsize, -1);
+ mode_ctx =
+ vp10_mode_context_analyzer(inter_mode_ctx, mbmi->ref_frame, bsize, -1);
mbmi->ref_mv_idx = 0;
#else
mode_ctx = inter_mode_ctx[mbmi->ref_frame[0]];
if (segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
mbmi->mode = ZEROMV;
if (bsize < BLOCK_8X8) {
- vpx_internal_error(xd->error_info, VPX_CODEC_UNSUP_BITSTREAM,
- "Invalid usage of segement feature on small blocks");
- return;
+ vpx_internal_error(xd->error_info, VPX_CODEC_UNSUP_BITSTREAM,
+ "Invalid usage of segement feature on small blocks");
+ return;
}
} else {
if (bsize >= BLOCK_8X8) {
mbmi->mode = read_inter_compound_mode(cm, xd, r, mode_ctx);
else
#endif // CONFIG_EXT_INTER
- mbmi->mode = read_inter_mode(cm, xd,
+ mbmi->mode = read_inter_mode(cm, xd,
#if CONFIG_REF_MV && CONFIG_EXT_INTER
- mbmi,
+ mbmi,
#endif // CONFIG_REF_MV && CONFIG_EXT_INTER
- r, mode_ctx);
+ r, mode_ctx);
#if CONFIG_REF_MV
if (mbmi->mode == NEARMV || mbmi->mode == NEWMV)
read_drl_idx(cm, xd, mbmi, r);
#if CONFIG_EXT_INTER
if (xd->ref_mv_count[ref_frame_type] > 1) {
- if (mbmi->mode == NEAR_NEWMV ||
- mbmi->mode == NEAR_NEARESTMV ||
+ if (mbmi->mode == NEAR_NEWMV || mbmi->mode == NEAR_NEARESTMV ||
mbmi->mode == NEAR_NEARMV) {
nearmv[0] = xd->ref_mv_stack[ref_frame_type][1].this_mv;
lower_mv_precision(&nearmv[0].as_mv, allow_hp);
}
- if (mbmi->mode == NEW_NEARMV ||
- mbmi->mode == NEAREST_NEARMV ||
+ if (mbmi->mode == NEW_NEARMV || mbmi->mode == NEAREST_NEARMV ||
mbmi->mode == NEAR_NEARMV) {
nearmv[1] = xd->ref_mv_stack[ref_frame_type][1].comp_mv;
lower_mv_precision(&nearmv[1].as_mv, allow_hp);
#if CONFIG_EXT_INTER
if (!is_compound)
#endif // CONFIG_EXT_INTER
- mode_ctx = vp10_mode_context_analyzer(inter_mode_ctx, mbmi->ref_frame,
- bsize, j);
+ mode_ctx = vp10_mode_context_analyzer(inter_mode_ctx, mbmi->ref_frame,
+ bsize, j);
#endif
#if CONFIG_EXT_INTER
if (is_compound)
b_mode = read_inter_compound_mode(cm, xd, r, mode_ctx);
else
#endif // CONFIG_EXT_INTER
- b_mode = read_inter_mode(cm, xd,
+ b_mode = read_inter_mode(cm, xd,
#if CONFIG_REF_MV && CONFIG_EXT_INTER
- mbmi,
+ mbmi,
#endif // CONFIG_REF_MV && CONFIG_EXT_INTER
- r, mode_ctx);
+ r, mode_ctx);
#if CONFIG_EXT_INTER
mv_idx = (b_mode == NEWFROMNEARMV) ? 1 : 0;
#if CONFIG_EXT_INTER
{
int_mv mv_ref_list[MAX_MV_REF_CANDIDATES];
- vp10_update_mv_context(xd, mi, mbmi->ref_frame[ref],
- mv_ref_list, j, mi_row, mi_col, NULL);
+ vp10_update_mv_context(xd, mi, mbmi->ref_frame[ref], mv_ref_list, j,
+ mi_row, mi_col, NULL);
#endif // CONFIG_EXT_INTER
- vp10_append_sub8x8_mvs_for_idx(cm, xd, j, ref, mi_row, mi_col,
+ vp10_append_sub8x8_mvs_for_idx(
+ cm, xd, j, ref, mi_row, mi_col,
#if CONFIG_REF_MV
- ref_mv_stack[ref],
- &ref_mv_count[ref],
+ ref_mv_stack[ref], &ref_mv_count[ref],
#endif
#if CONFIG_EXT_INTER
- mv_ref_list,
+ mv_ref_list,
#endif // CONFIG_EXT_INTER
- &nearest_sub8x8[ref],
- &near_sub8x8[ref]);
+ &nearest_sub8x8[ref], &near_sub8x8[ref]);
#if CONFIG_EXT_INTER
if (have_newmv_in_inter_mode(b_mode)) {
mv_ref_list[0].as_int = nearest_sub8x8[ref].as_int;
mv_ref_list[1].as_int = near_sub8x8[ref].as_int;
- vp10_find_best_ref_mvs(allow_hp, mv_ref_list,
- &ref_mv[0][ref], &ref_mv[1][ref]);
+ vp10_find_best_ref_mvs(allow_hp, mv_ref_list, &ref_mv[0][ref],
+ &ref_mv[1][ref]);
}
}
#endif // CONFIG_EXT_INTER
#else
ref_mv_s8,
#endif // CONFIG_EXT_INTER
- nearest_sub8x8, near_sub8x8,
- is_compound, allow_hp, r)) {
+ nearest_sub8x8, near_sub8x8, is_compound, allow_hp, r)) {
xd->corrupted |= 1;
break;
};
mi->bmi[j].as_mv[0].as_int = block[0].as_int;
- if (is_compound)
- mi->bmi[j].as_mv[1].as_int = block[1].as_int;
+ if (is_compound) mi->bmi[j].as_mv[1].as_int = block[1].as_int;
- if (num_4x4_h == 2)
- mi->bmi[j + 2] = mi->bmi[j];
- if (num_4x4_w == 2)
- mi->bmi[j + 1] = mi->bmi[j];
+ if (num_4x4_h == 2) mi->bmi[j + 2] = mi->bmi[j];
+ if (num_4x4_w == 2) mi->bmi[j + 1] = mi->bmi[j];
}
}
#if CONFIG_REF_MV
uint8_t ref_frame_type = vp10_ref_frame_type(mbmi->ref_frame);
if (xd->ref_mv_count[ref_frame_type] > 1) {
- ref_mv[ref] = (ref == 0) ?
- xd->ref_mv_stack[ref_frame_type][mbmi->ref_mv_idx].this_mv :
- xd->ref_mv_stack[ref_frame_type][mbmi->ref_mv_idx].comp_mv;
+ ref_mv[ref] =
+ (ref == 0)
+ ? xd->ref_mv_stack[ref_frame_type][mbmi->ref_mv_idx].this_mv
+ : xd->ref_mv_stack[ref_frame_type][mbmi->ref_mv_idx].comp_mv;
clamp_mv_ref(&ref_mv[ref].as_mv, xd->n8_w << 3, xd->n8_h << 3, xd);
}
#endif
nearestmv[ref] = ref_mv[ref];
}
- xd->corrupted |= !assign_mv(cm, xd, mbmi->mode,
+ xd->corrupted |=
+ !assign_mv(cm, xd, mbmi->mode,
#if CONFIG_REF_MV
- 0,
+ 0,
#endif
- mbmi->mv,
+ mbmi->mv,
#if CONFIG_EXT_INTER
- mbmi->mode == NEWFROMNEARMV ?
- nearmv : nearestmv,
+ mbmi->mode == NEWFROMNEARMV ? nearmv : nearestmv,
#else
- ref_mv,
+ ref_mv,
#endif // CONFIG_EXT_INTER
- nearestmv, nearmv, is_compound, allow_hp, r);
+ nearestmv, nearmv, is_compound, allow_hp, r);
}
#if CONFIG_EXT_INTER
is_interintra_allowed(mbmi)) {
const int bsize_group = size_group_lookup[bsize];
const int interintra = vp10_read(r, cm->fc->interintra_prob[bsize_group]);
- if (xd->counts)
- xd->counts->interintra[bsize_group][interintra]++;
+ if (xd->counts) xd->counts->interintra[bsize_group][interintra]++;
assert(mbmi->ref_frame[1] == NONE);
if (interintra) {
const INTERINTRA_MODE interintra_mode =
#if CONFIG_EXT_INTER
if (mbmi->ref_frame[1] != INTRA_FRAME)
#endif // CONFIG_EXT_INTER
- mbmi->motion_variation = read_motvar_block(cm, xd, r);
+ mbmi->motion_variation = read_motvar_block(cm, xd, r);
#endif // CONFIG_OBMC || CONFIG_WARPED_MOTION
#if CONFIG_EXT_INTER
#if CONFIG_DUAL_FILTER
for (ref = 0; ref < 2; ++ref) {
- mbmi->interp_filter[ref] = (cm->interp_filter == SWITCHABLE) ?
- EIGHTTAP_REGULAR : cm->interp_filter;
+ mbmi->interp_filter[ref] = (cm->interp_filter == SWITCHABLE)
+ ? EIGHTTAP_REGULAR
+ : cm->interp_filter;
if (has_subpel_mv_component(xd->mi[0], xd, ref) ||
(mbmi->ref_frame[1] > INTRA_FRAME &&
#if CONFIG_VAR_TX
xd->above_txfm_context = cm->above_txfm_context + mi_col;
xd->left_txfm_context =
- xd->left_txfm_context_buffer + (mi_row & MAX_MIB_MASK);
- if (bsize >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT &&
- !mbmi->skip && inter_block) {
+ xd->left_txfm_context_buffer + (mi_row & MAX_MIB_MASK);
+ if (bsize >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT && !mbmi->skip &&
+ inter_block) {
const TX_SIZE max_tx_size = max_txsize_lookup[bsize];
const BLOCK_SIZE txb_size = txsize_to_bsize[max_tx_size];
const int bs = num_4x4_blocks_wide_lookup[txb_size];
- const int width = num_4x4_blocks_wide_lookup[bsize];
+ const int width = num_4x4_blocks_wide_lookup[bsize];
const int height = num_4x4_blocks_high_lookup[bsize];
int idx, idy;
for (idy = 0; idy < height; idy += bs)
for (idx = 0; idx < width; idx += bs)
- read_tx_size_vartx(cm, xd, mbmi, xd->counts, max_tx_size,
- idy, idx, r);
+ read_tx_size_vartx(cm, xd, mbmi, xd->counts, max_tx_size, idy, idx,
+ r);
if (xd->counts) {
const int ctx = get_tx_size_context(xd);
++xd->counts->tx_size[max_tx_size - TX_8X8][ctx][mbmi->tx_size];
mbmi->tx_size = read_tx_size_intra(cm, xd, r);
if (inter_block) {
- const int width = num_4x4_blocks_wide_lookup[bsize];
+ const int width = num_4x4_blocks_wide_lookup[bsize];
const int height = num_4x4_blocks_high_lookup[bsize];
int idx, idy;
for (idy = 0; idy < height; ++idy)
set_txfm_ctx(xd->above_txfm_context, mbmi->tx_size, xd->n8_w);
}
#else
- if (inter_block)
- mbmi->tx_size = read_tx_size_inter(cm, xd, !mbmi->skip, r);
- else
- mbmi->tx_size = read_tx_size_intra(cm, xd, r);
+ if (inter_block)
+ mbmi->tx_size = read_tx_size_inter(cm, xd, !mbmi->skip, r);
+ else
+ mbmi->tx_size = read_tx_size_intra(cm, xd, r);
#endif // CONFIG_VAR_TX
#if CONFIG_SUPERTX
}
#if CONFIG_VAR_TX
else if (inter_block) {
- const int width = num_4x4_blocks_wide_lookup[bsize];
+ const int width = num_4x4_blocks_wide_lookup[bsize];
const int height = num_4x4_blocks_high_lookup[bsize];
int idx, idy;
xd->mi[0]->mbmi.tx_size = xd->supertx_size;
for (idy = 0; idy < height; ++idy)
for (idx = 0; idx < width; ++idx)
- xd->mi[0]->mbmi.inter_tx_size[idy >> 1][idx >> 1] =
- xd->supertx_size;
+ xd->mi[0]->mbmi.inter_tx_size[idy >> 1][idx >> 1] = xd->supertx_size;
}
#endif // CONFIG_VAR_TX
#endif // CONFIG_SUPERTX
!supertx_enabled &&
#endif // CONFIG_SUPERTX
!segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
- int eset = get_ext_tx_set(mbmi->tx_size, mbmi->sb_type,
- inter_block);
+ int eset = get_ext_tx_set(mbmi->tx_size, mbmi->sb_type, inter_block);
FRAME_COUNTS *counts = xd->counts;
if (inter_block) {
if (eset > 0) {
mbmi->tx_type =
vp10_read_tree(r, vp10_ext_tx_inter_tree[eset],
- cm->fc->inter_ext_tx_prob[eset][mbmi->tx_size]);
+ cm->fc->inter_ext_tx_prob[eset][mbmi->tx_size]);
if (counts)
++counts->inter_ext_tx[eset][mbmi->tx_size][mbmi->tx_type];
}
} else if (ALLOW_INTRA_EXT_TX) {
if (eset > 0) {
- mbmi->tx_type = vp10_read_tree(r, vp10_ext_tx_intra_tree[eset],
- cm->fc->intra_ext_tx_prob[eset]
- [mbmi->tx_size][mbmi->mode]);
+ mbmi->tx_type = vp10_read_tree(
+ r, vp10_ext_tx_intra_tree[eset],
+ cm->fc->intra_ext_tx_prob[eset][mbmi->tx_size][mbmi->mode]);
if (counts)
- ++counts->intra_ext_tx[eset][mbmi->tx_size]
- [mbmi->mode][mbmi->tx_type];
+ ++counts->intra_ext_tx[eset][mbmi->tx_size][mbmi->mode]
+ [mbmi->tx_type];
}
}
} else {
mbmi->tx_type = DCT_DCT;
}
#else
- if (mbmi->tx_size < TX_32X32 &&
- cm->base_qindex > 0 && !mbmi->skip &&
+ if (mbmi->tx_size < TX_32X32 && cm->base_qindex > 0 && !mbmi->skip &&
#if CONFIG_SUPERTX
!supertx_enabled &&
#endif // CONFIG_SUPERTX
FRAME_COUNTS *counts = xd->counts;
if (inter_block) {
mbmi->tx_type = vp10_read_tree(
- r, vp10_ext_tx_tree,
- cm->fc->inter_ext_tx_prob[mbmi->tx_size]);
- if (counts)
- ++counts->inter_ext_tx[mbmi->tx_size][mbmi->tx_type];
+ r, vp10_ext_tx_tree, cm->fc->inter_ext_tx_prob[mbmi->tx_size]);
+ if (counts) ++counts->inter_ext_tx[mbmi->tx_size][mbmi->tx_type];
} else {
const TX_TYPE tx_type_nom = intra_mode_to_tx_type_context[mbmi->mode];
mbmi->tx_type = vp10_read_tree(
#if CONFIG_SUPERTX
int supertx_enabled,
#endif // CONFIG_SUPERTX
- int mi_row, int mi_col, vp10_reader *r,
- int x_mis, int y_mis) {
+ int mi_row, int mi_col, vp10_reader *r, int x_mis,
+ int y_mis) {
VP10_COMMON *const cm = &pbi->common;
MODE_INFO *const mi = xd->mi[0];
- MV_REF* frame_mvs = cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col;
+ MV_REF *frame_mvs = cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col;
int w, h;
if (frame_is_intra_only(cm)) {
int supertx_enabled,
#endif
- int mi_row, int mi_col, vp10_reader *r,
- int x_mis, int y_mis);
+ int mi_row, int mi_col, vp10_reader *r, int x_mis,
+ int y_mis);
#ifdef __cplusplus
} // extern "C"
static int vp10_dec_alloc_mi(VP10_COMMON *cm, int mi_size) {
cm->mip = vpx_calloc(mi_size, sizeof(*cm->mip));
- if (!cm->mip)
- return 1;
+ if (!cm->mip) return 1;
cm->mi_alloc_size = mi_size;
- cm->mi_grid_base = (MODE_INFO **)vpx_calloc(mi_size, sizeof(MODE_INFO*));
- if (!cm->mi_grid_base)
- return 1;
+ cm->mi_grid_base = (MODE_INFO **)vpx_calloc(mi_size, sizeof(MODE_INFO *));
+ if (!cm->mi_grid_base) return 1;
return 0;
}
VP10Decoder *volatile const pbi = vpx_memalign(32, sizeof(*pbi));
VP10_COMMON *volatile const cm = pbi ? &pbi->common : NULL;
- if (!cm)
- return NULL;
+ if (!cm) return NULL;
vp10_zero(*pbi);
cm->error.setjmp = 1;
- CHECK_MEM_ERROR(cm, cm->fc,
- (FRAME_CONTEXT *)vpx_calloc(1, sizeof(*cm->fc)));
- CHECK_MEM_ERROR(cm, cm->frame_contexts,
- (FRAME_CONTEXT *)vpx_calloc(FRAME_CONTEXTS,
- sizeof(*cm->frame_contexts)));
+ CHECK_MEM_ERROR(cm, cm->fc, (FRAME_CONTEXT *)vpx_calloc(1, sizeof(*cm->fc)));
+ CHECK_MEM_ERROR(
+ cm, cm->frame_contexts,
+ (FRAME_CONTEXT *)vpx_calloc(FRAME_CONTEXTS, sizeof(*cm->frame_contexts)));
pbi->need_resync = 1;
once(initialize_dec);
void vp10_decoder_remove(VP10Decoder *pbi) {
int i;
- if (!pbi)
- return;
+ if (!pbi) return;
vpx_get_worker_interface()->end(&pbi->lf_worker);
vpx_free(pbi->lf_worker.data1);
static int equal_dimensions(const YV12_BUFFER_CONFIG *a,
const YV12_BUFFER_CONFIG *b) {
- return a->y_height == b->y_height && a->y_width == b->y_width &&
- a->uv_height == b->uv_height && a->uv_width == b->uv_width;
+ return a->y_height == b->y_height && a->y_width == b->y_width &&
+ a->uv_height == b->uv_height && a->uv_width == b->uv_width;
}
vpx_codec_err_t vp10_copy_reference_dec(VP10Decoder *pbi,
- VPX_REFFRAME ref_frame_flag,
- YV12_BUFFER_CONFIG *sd) {
+ VPX_REFFRAME ref_frame_flag,
+ YV12_BUFFER_CONFIG *sd) {
VP10_COMMON *cm = &pbi->common;
/* TODO(jkoleszar): The decoder doesn't have any real knowledge of what the
else
vpx_yv12_copy_frame(cfg, sd);
} else {
- vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
- "Invalid reference frame");
+ vpx_internal_error(&cm->error, VPX_CODEC_ERROR, "Invalid reference frame");
}
return cm->error.error_code;
}
-
vpx_codec_err_t vp10_set_reference_dec(VP10_COMMON *cm,
- VPX_REFFRAME ref_frame_flag,
- YV12_BUFFER_CONFIG *sd) {
+ VPX_REFFRAME ref_frame_flag,
+ YV12_BUFFER_CONFIG *sd) {
int idx;
YV12_BUFFER_CONFIG *ref_buf = NULL;
idx = cm->ref_frame_map[2];
#endif // CONFIG_EXT_REFS
} else {
- vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
- "Invalid reference frame");
+ vpx_internal_error(&cm->error, VPX_CODEC_ERROR, "Invalid reference frame");
return cm->error.error_code;
}
// Release the reference frame holding in the reference map for the decoding
// of the next frame.
- if (mask & 1)
- decrease_ref_count(old_idx, frame_bufs, pool);
+ if (mask & 1) decrease_ref_count(old_idx, frame_bufs, pool);
cm->ref_frame_map[ref_index] = cm->next_ref_frame_map[ref_index];
++ref_index;
}
}
}
-int vp10_receive_compressed_data(VP10Decoder *pbi,
- size_t size, const uint8_t **psource) {
+int vp10_receive_compressed_data(VP10Decoder *pbi, size_t size,
+ const uint8_t **psource) {
VP10_COMMON *volatile const cm = &pbi->common;
BufferPool *volatile const pool = cm->buffer_pool;
RefCntBuffer *volatile const frame_bufs = cm->buffer_pool->frame_bufs;
// Check if the previous frame was a frame without any references to it.
// Release frame buffer if not decoding in frame parallel mode.
- if (!cm->frame_parallel_decode && cm->new_fb_idx >= 0
- && frame_bufs[cm->new_fb_idx].ref_count == 0)
+ if (!cm->frame_parallel_decode && cm->new_fb_idx >= 0 &&
+ frame_bufs[cm->new_fb_idx].ref_count == 0)
pool->release_fb_cb(pool->cb_priv,
&frame_bufs[cm->new_fb_idx].raw_frame_buffer);
// Find a free frame buffer. Return error if can not find any.
cm->new_fb_idx = get_free_fb(cm);
- if (cm->new_fb_idx == INVALID_IDX)
- return VPX_CODEC_MEM_ERROR;
+ if (cm->new_fb_idx == INVALID_IDX) return VPX_CODEC_MEM_ERROR;
// Assign a MV array to the frame buffer.
cm->cur_frame = &pool->frame_bufs[cm->new_fb_idx];
// Current thread releases the holding of reference frame.
decrease_ref_count(old_idx, frame_bufs, pool);
- // Release the reference frame holding in the reference map for the
- // decoding of the next frame.
- if (mask & 1)
- decrease_ref_count(old_idx, frame_bufs, pool);
+ // Release the reference frame holding in the reference map for the
+ // decoding of the next frame.
+ if (mask & 1) decrease_ref_count(old_idx, frame_bufs, pool);
++ref_index;
}
int vp10_get_raw_frame(VP10Decoder *pbi, YV12_BUFFER_CONFIG *sd) {
VP10_COMMON *const cm = &pbi->common;
int ret = -1;
- if (pbi->ready_for_new_data == 1)
- return ret;
+ if (pbi->ready_for_new_data == 1) return ret;
pbi->ready_for_new_data = 1;
/* no raw frame to show!!! */
- if (!cm->show_frame)
- return ret;
+ if (!cm->show_frame) return ret;
pbi->ready_for_new_data = 1;
*sd = *cm->frame_to_show;
return ret;
}
-int vp10_get_frame_to_show(VP10Decoder *pbi,
- YV12_BUFFER_CONFIG *frame) {
+int vp10_get_frame_to_show(VP10Decoder *pbi, YV12_BUFFER_CONFIG *frame) {
VP10_COMMON *const cm = &pbi->common;
- if (!cm->show_frame || !cm->frame_to_show)
- return -1;
+ if (!cm->show_frame || !cm->frame_to_show) return -1;
*frame = *cm->frame_to_show;
return 0;
}
-vpx_codec_err_t vp10_parse_superframe_index(const uint8_t *data,
- size_t data_sz,
- uint32_t sizes[8], int *count,
- vpx_decrypt_cb decrypt_cb,
- void *decrypt_state) {
+vpx_codec_err_t vp10_parse_superframe_index(const uint8_t *data, size_t data_sz,
+ uint32_t sizes[8], int *count,
+ vpx_decrypt_cb decrypt_cb,
+ void *decrypt_state) {
// A chunk ending with a byte matching 0xc0 is an invalid chunk unless
// it is a super frame index. If the last byte of real video compression
// data is 0xc0 the encoder must add a 0 byte. If we have the marker but
// This chunk is marked as having a superframe index but doesn't have
// enough data for it, thus it's an invalid superframe index.
- if (data_sz < index_sz)
- return VPX_CODEC_CORRUPT_FRAME;
+ if (data_sz < index_sz) return VPX_CODEC_CORRUPT_FRAME;
{
- const uint8_t marker2 = read_marker(decrypt_cb, decrypt_state,
- data + data_sz - index_sz);
+ const uint8_t marker2 =
+ read_marker(decrypt_cb, decrypt_state, data + data_sz - index_sz);
// This chunk is marked as having a superframe index but doesn't have
// the matching marker byte at the front of the index therefore it's an
// invalid chunk.
- if (marker != marker2)
- return VPX_CODEC_CORRUPT_FRAME;
+ if (marker != marker2) return VPX_CODEC_CORRUPT_FRAME;
}
{
for (i = 0; i < frames - 1; ++i) {
uint32_t this_sz = 0;
- for (j = 0; j < mag; ++j)
- this_sz |= (*x++) << (j * 8);
+ for (j = 0; j < mag; ++j) this_sz |= (*x++) << (j * 8);
this_sz += 1;
sizes[i] = this_sz;
frame_sz_sum += this_sz;
size_t size;
const uint8_t *raw_data_end; // The end of the raw tile buffer in the
// bit stream.
- int col; // only used with multi-threaded decoding
+ int col; // only used with multi-threaded decoding
} TileBufferDec;
typedef struct VP10Decoder {
// TODO(hkuang): Combine this with cur_buf in macroblockd as they are
// the same.
- RefCntBuffer *cur_buf; // Current decoding frame buffer.
+ RefCntBuffer *cur_buf; // Current decoding frame buffer.
- VPxWorker *frame_worker_owner; // frame_worker that owns this pbi.
+ VPxWorker *frame_worker_owner; // frame_worker that owns this pbi.
VPxWorker lf_worker;
VPxWorker *tile_workers;
TileWorkerData *tile_worker_data;
int max_threads;
int inv_tile_order;
- int need_resync; // wait for key/intra-only frame.
+ int need_resync; // wait for key/intra-only frame.
int hold_ref_buf; // hold the reference buffer.
int tile_size_bytes;
#endif // CONFIG_EXT_TILE
} VP10Decoder;
-int vp10_receive_compressed_data(struct VP10Decoder *pbi,
- size_t size, const uint8_t **dest);
+int vp10_receive_compressed_data(struct VP10Decoder *pbi, size_t size,
+ const uint8_t **dest);
int vp10_get_raw_frame(struct VP10Decoder *pbi, YV12_BUFFER_CONFIG *sd);
int vp10_get_frame_to_show(struct VP10Decoder *pbi, YV12_BUFFER_CONFIG *frame);
vpx_codec_err_t vp10_copy_reference_dec(struct VP10Decoder *pbi,
- VPX_REFFRAME ref_frame_flag,
- YV12_BUFFER_CONFIG *sd);
+ VPX_REFFRAME ref_frame_flag,
+ YV12_BUFFER_CONFIG *sd);
vpx_codec_err_t vp10_set_reference_dec(VP10_COMMON *cm,
- VPX_REFFRAME ref_frame_flag,
- YV12_BUFFER_CONFIG *sd);
+ VPX_REFFRAME ref_frame_flag,
+ YV12_BUFFER_CONFIG *sd);
static INLINE uint8_t read_marker(vpx_decrypt_cb decrypt_cb,
- void *decrypt_state,
- const uint8_t *data) {
+ void *decrypt_state, const uint8_t *data) {
if (decrypt_cb) {
uint8_t marker;
decrypt_cb(decrypt_state, data, &marker, 1);
// This function is exposed for use in tests, as well as the inlined function
// "read_marker".
-vpx_codec_err_t vp10_parse_superframe_index(const uint8_t *data,
- size_t data_sz,
- uint32_t sizes[8], int *count,
- vpx_decrypt_cb decrypt_cb,
- void *decrypt_state);
+vpx_codec_err_t vp10_parse_superframe_index(const uint8_t *data, size_t data_sz,
+ uint32_t sizes[8], int *count,
+ vpx_decrypt_cb decrypt_cb,
+ void *decrypt_state);
struct VP10Decoder *vp10_decoder_create(BufferPool *const pool);
for (i = 0; i < INTER_REFS_PER_FRAME; ++i) {
RefBuffer *const ref_frame = &cm->frame_refs[i];
if (ref_frame->idx == INVALID_IDX) continue;
- if (frame_buf == &cm->buffer_pool->frame_bufs[ref_frame->idx])
- break;
+ if (frame_buf == &cm->buffer_pool->frame_bufs[ref_frame->idx]) break;
}
return (i < INTER_REFS_PER_FRAME);
}
#include "vp10/decoder/detokenize.h"
-#define EOB_CONTEXT_NODE 0
-#define ZERO_CONTEXT_NODE 1
-#define ONE_CONTEXT_NODE 2
-#define LOW_VAL_CONTEXT_NODE 0
-#define TWO_CONTEXT_NODE 1
-#define THREE_CONTEXT_NODE 2
-#define HIGH_LOW_CONTEXT_NODE 3
-#define CAT_ONE_CONTEXT_NODE 4
-#define CAT_THREEFOUR_CONTEXT_NODE 5
-#define CAT_THREE_CONTEXT_NODE 6
-#define CAT_FIVE_CONTEXT_NODE 7
-
-#define INCREMENT_COUNT(token) \
- do { \
- if (counts) \
- ++coef_counts[band][ctx][token]; \
+#define EOB_CONTEXT_NODE 0
+#define ZERO_CONTEXT_NODE 1
+#define ONE_CONTEXT_NODE 2
+#define LOW_VAL_CONTEXT_NODE 0
+#define TWO_CONTEXT_NODE 1
+#define THREE_CONTEXT_NODE 2
+#define HIGH_LOW_CONTEXT_NODE 3
+#define CAT_ONE_CONTEXT_NODE 4
+#define CAT_THREEFOUR_CONTEXT_NODE 5
+#define CAT_THREE_CONTEXT_NODE 6
+#define CAT_FIVE_CONTEXT_NODE 7
+
+#define INCREMENT_COUNT(token) \
+ do { \
+ if (counts) ++coef_counts[band][ctx][token]; \
} while (0)
#if !CONFIG_ANS
static INLINE int read_coeff(const vpx_prob *probs, int n, vp10_reader *r) {
int i, val = 0;
- for (i = 0; i < n; ++i)
- val = (val << 1) | vp10_read(r, probs[i]);
+ for (i = 0; i < n; ++i) val = (val << 1) | vp10_read(r, probs[i]);
return val;
}
-static int decode_coefs(const MACROBLOCKD *xd,
- PLANE_TYPE type,
+static int decode_coefs(const MACROBLOCKD *xd, PLANE_TYPE type,
tran_low_t *dqcoeff, TX_SIZE tx_size, TX_TYPE tx_type,
const int16_t *dq,
#if CONFIG_NEW_QUANT
const int ref = is_inter_block(&xd->mi[0]->mbmi);
int band, c = 0;
const int tx_size_ctx = txsize_sqr_map[tx_size];
- const vpx_prob (*coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] =
+ const vpx_prob(*coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] =
fc->coef_probs[tx_size_ctx][type][ref];
const vpx_prob *prob;
- unsigned int (*coef_counts)[COEFF_CONTEXTS][UNCONSTRAINED_NODES + 1];
- unsigned int (*eob_branch_count)[COEFF_CONTEXTS];
+ unsigned int(*coef_counts)[COEFF_CONTEXTS][UNCONSTRAINED_NODES + 1];
+ unsigned int(*eob_branch_count)[COEFF_CONTEXTS];
uint8_t token_cache[MAX_TX_SQUARE];
const uint8_t *band_translate = get_band_translate(tx_size);
int dq_shift;
int val = -1;
band = *band_translate++;
prob = coef_probs[band][ctx];
- if (counts)
- ++eob_branch_count[band][ctx];
+ if (counts) ++eob_branch_count[band][ctx];
if (!vp10_read(r, prob[EOB_CONTEXT_NODE])) {
INCREMENT_COUNT(EOB_MODEL_TOKEN);
break;
dqv = dq[1];
token_cache[scan[c]] = 0;
++c;
- if (c >= max_eob)
- return c; // zero tokens at the end (no eob token)
+ if (c >= max_eob) return c; // zero tokens at the end (no eob token)
ctx = get_coef_context(nb, token_cache, c);
band = *band_translate++;
prob = coef_probs[band][ctx];
} else {
INCREMENT_COUNT(TWO_TOKEN);
token = vp10_read_tree(r, vp10_coef_con_tree,
- vp10_pareto8_full[prob[PIVOT_NODE] - 1]);
+ vp10_pareto8_full[prob[PIVOT_NODE] - 1]);
switch (token) {
case TWO_TOKEN:
case THREE_TOKEN:
- case FOUR_TOKEN:
- val = token;
- break;
+ case FOUR_TOKEN: val = token; break;
case CATEGORY1_TOKEN:
val = CAT1_MIN_VAL + read_coeff(cat1_prob, 1, r);
break;
case VPX_BITS_12:
val = CAT6_MIN_VAL + read_coeff(cat6p, 18 - skip_bits, r);
break;
- default:
- assert(0);
- return -1;
+ default: assert(0); return -1;
}
#else
val = CAT6_MIN_VAL + read_coeff(cat6p, 14 - skip_bits, r);
#if CONFIG_COEFFICIENT_RANGE_CHECKING
#if CONFIG_VP9_HIGHBITDEPTH
- dqcoeff[scan[c]] = highbd_check_range((vp10_read_bit(r) ? -v : v),
- xd->bd);
+ dqcoeff[scan[c]] = highbd_check_range((vp10_read_bit(r) ? -v : v), xd->bd);
#else
dqcoeff[scan[c]] = check_range(vp10_read_bit(r) ? -v : v);
#endif // CONFIG_VP9_HIGHBITDEPTH
static INLINE int read_coeff(const vpx_prob *const probs, int n,
struct AnsDecoder *const ans) {
int i, val = 0;
- for (i = 0; i < n; ++i)
- val = (val << 1) | uabs_read(ans, probs[i]);
+ for (i = 0; i < n; ++i) val = (val << 1) | uabs_read(ans, probs[i]);
return val;
}
-static int decode_coefs_ans(const MACROBLOCKD *const xd,
- PLANE_TYPE type,
+static int decode_coefs_ans(const MACROBLOCKD *const xd, PLANE_TYPE type,
tran_low_t *dqcoeff, TX_SIZE tx_size,
- TX_TYPE tx_type,
- const int16_t *dq,
+ TX_TYPE tx_type, const int16_t *dq,
#if CONFIG_NEW_QUANT
dequant_val_type_nuq *dq_val,
#endif // CONFIG_NEW_QUANT
int band, c = 0;
int skip_eob = 0;
const int tx_size_ctx = txsize_sqr_map[tx_size];
- const vpx_prob (*coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] =
+ const vpx_prob(*coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] =
fc->coef_probs[tx_size_ctx][type][ref];
const rans_dec_lut(*coef_cdfs)[COEFF_CONTEXTS] =
fc->coef_cdfs[tx_size_ctx][type][ref];
const vpx_prob *prob;
const rans_dec_lut *cdf;
- unsigned int (*coef_counts)[COEFF_CONTEXTS][UNCONSTRAINED_NODES + 1];
- unsigned int (*eob_branch_count)[COEFF_CONTEXTS];
+ unsigned int(*coef_counts)[COEFF_CONTEXTS][UNCONSTRAINED_NODES + 1];
+ unsigned int(*eob_branch_count)[COEFF_CONTEXTS];
uint8_t token_cache[MAX_TX_SQUARE];
const uint8_t *band_translate = get_band_translate(tx_size);
int dq_shift;
band = *band_translate++;
prob = coef_probs[band][ctx];
if (!skip_eob) {
- if (counts)
- ++eob_branch_count[band][ctx];
+ if (counts) ++eob_branch_count[band][ctx];
if (!uabs_read(ans, prob[EOB_CONTEXT_NODE])) {
INCREMENT_COUNT(EOB_MODEL_TOKEN);
break;
case ONE_TOKEN:
case TWO_TOKEN:
case THREE_TOKEN:
- case FOUR_TOKEN:
- val = token;
- break;
+ case FOUR_TOKEN: val = token; break;
case CATEGORY1_TOKEN:
val = CAT1_MIN_VAL + read_coeff(cat1_prob, 1, ans);
break;
case VPX_BITS_12:
val = CAT6_MIN_VAL + read_coeff(cat6p, 18 - skip_bits, ans);
break;
- default:
- assert(0);
- return -1;
+ default: assert(0); return -1;
}
#else
val = CAT6_MIN_VAL + read_coeff(cat6p, 14 - skip_bits, ans);
} break;
}
#if CONFIG_NEW_QUANT
- v = vp10_dequant_abscoeff_nuq(val, dqv, dqv_val);
- v = dq_shift ? ROUND_POWER_OF_TWO(v, dq_shift) : v;
+ v = vp10_dequant_abscoeff_nuq(val, dqv, dqv_val);
+ v = dq_shift ? ROUND_POWER_OF_TWO(v, dq_shift) : v;
#else
- v = (val * dqv) >> dq_shift;
+ v = (val * dqv) >> dq_shift;
#endif // CONFIG_NEW_QUANT
#if CONFIG_COEFFICIENT_RANGE_CHECKING
}
#endif // !CONFIG_ANS
-// TODO(slavarnway): Decode version of vp10_set_context. Modify vp10_set_context
+// TODO(slavarnway): Decode version of vp10_set_context. Modify
+// vp10_set_context
// after testing is complete, then delete this version.
-static
-void dec_set_contexts(const MACROBLOCKD *xd, struct macroblockd_plane *pd,
- TX_SIZE tx_size, int has_eob,
- int aoff, int loff) {
+static void dec_set_contexts(const MACROBLOCKD *xd,
+ struct macroblockd_plane *pd, TX_SIZE tx_size,
+ int has_eob, int aoff, int loff) {
ENTROPY_CONTEXT *const a = pd->above_context + aoff;
ENTROPY_CONTEXT *const l = pd->left_context + loff;
const int tx_w_in_blocks = num_4x4_blocks_wide_txsize_lookup[tx_size];
// above
if (has_eob && xd->mb_to_right_edge < 0) {
int i;
- const int blocks_wide = pd->n4_w +
- (xd->mb_to_right_edge >> (5 + pd->subsampling_x));
+ const int blocks_wide =
+ pd->n4_w + (xd->mb_to_right_edge >> (5 + pd->subsampling_x));
int above_contexts = tx_w_in_blocks;
if (above_contexts + aoff > blocks_wide)
above_contexts = blocks_wide - aoff;
- for (i = 0; i < above_contexts; ++i)
- a[i] = has_eob;
- for (i = above_contexts; i < tx_w_in_blocks; ++i)
- a[i] = 0;
+ for (i = 0; i < above_contexts; ++i) a[i] = has_eob;
+ for (i = above_contexts; i < tx_w_in_blocks; ++i) a[i] = 0;
} else {
memset(a, has_eob, sizeof(ENTROPY_CONTEXT) * tx_w_in_blocks);
}
// left
if (has_eob && xd->mb_to_bottom_edge < 0) {
int i;
- const int blocks_high = pd->n4_h +
- (xd->mb_to_bottom_edge >> (5 + pd->subsampling_y));
+ const int blocks_high =
+ pd->n4_h + (xd->mb_to_bottom_edge >> (5 + pd->subsampling_y));
int left_contexts = tx_h_in_blocks;
- if (left_contexts + loff > blocks_high)
- left_contexts = blocks_high - loff;
+ if (left_contexts + loff > blocks_high) left_contexts = blocks_high - loff;
- for (i = 0; i < left_contexts; ++i)
- l[i] = has_eob;
- for (i = left_contexts; i < tx_h_in_blocks; ++i)
- l[i] = 0;
+ for (i = 0; i < left_contexts; ++i) l[i] = has_eob;
+ for (i = left_contexts; i < tx_h_in_blocks; ++i) l[i] = 0;
} else {
memset(l, has_eob, sizeof(ENTROPY_CONTEXT) * tx_h_in_blocks);
}
MB_MODE_INFO *const mbmi = &mi->mbmi;
const BLOCK_SIZE bsize = mbmi->sb_type;
const int rows = (4 * num_4x4_blocks_high_lookup[bsize]) >>
- (xd->plane[plane != 0].subsampling_y);
+ (xd->plane[plane != 0].subsampling_y);
const int cols = (4 * num_4x4_blocks_wide_lookup[bsize]) >>
- (xd->plane[plane != 0].subsampling_x);
+ (xd->plane[plane != 0].subsampling_x);
int color_idx, color_ctx, color_order[PALETTE_MAX_SIZE];
int n = mbmi->palette_mode_info.palette_size[plane != 0];
int i, j;
uint8_t *color_map = xd->plane[plane != 0].color_index_map;
- const vpx_prob (* const prob)[PALETTE_COLOR_CONTEXTS][PALETTE_COLORS - 1] =
- plane ? vp10_default_palette_uv_color_prob :
- vp10_default_palette_y_color_prob;
+ const vpx_prob (*const prob)[PALETTE_COLOR_CONTEXTS][PALETTE_COLORS - 1] =
+ plane ? vp10_default_palette_uv_color_prob
+ : vp10_default_palette_y_color_prob;
for (i = 0; i < rows; ++i) {
for (j = (i == 0 ? 1 : 0); j < cols; ++j) {
- color_ctx = vp10_get_palette_color_context(color_map, cols, i, j, n,
- color_order);
+ color_ctx =
+ vp10_get_palette_color_context(color_map, cols, i, j, n, color_order);
color_idx = vp10_read_tree(r, vp10_palette_color_tree[n - 2],
- prob[n - 2][color_ctx]);
+ prob[n - 2][color_ctx]);
assert(color_idx >= 0 && color_idx < n);
color_map[i * cols + j] = color_order[color_idx];
}
}
}
-int vp10_decode_block_tokens(MACROBLOCKD *const xd,
- int plane, const scan_order *sc,
- int x, int y,
- TX_SIZE tx_size,
- TX_TYPE tx_type,
+int vp10_decode_block_tokens(MACROBLOCKD *const xd, int plane,
+ const scan_order *sc, int x, int y,
+ TX_SIZE tx_size, TX_TYPE tx_type,
#if CONFIG_ANS
struct AnsDecoder *const r,
#else
int seg_id) {
struct macroblockd_plane *const pd = &xd->plane[plane];
const int16_t *const dequant = pd->seg_dequant[seg_id];
- const int ctx = get_entropy_context(tx_size, pd->above_context + x,
- pd->left_context + y);
+ const int ctx =
+ get_entropy_context(tx_size, pd->above_context + x, pd->left_context + y);
#if CONFIG_NEW_QUANT
int dq = get_dq_profile_from_ctx(ctx);
#endif // CONFIG_NEW_QUANT
#if !CONFIG_ANS
- const int eob = decode_coefs(xd, pd->plane_type,
- pd->dqcoeff, tx_size, tx_type,
- dequant,
+ const int eob =
+ decode_coefs(xd, pd->plane_type, pd->dqcoeff, tx_size, tx_type, dequant,
#if CONFIG_NEW_QUANT
- pd->seg_dequant_nuq[seg_id][dq],
+ pd->seg_dequant_nuq[seg_id][dq],
#endif // CONFIG_NEW_QUANT
- ctx, sc->scan, sc->neighbors, r);
+ ctx, sc->scan, sc->neighbors, r);
#else
- const int eob = decode_coefs_ans(xd, pd->plane_type,
- pd->dqcoeff, tx_size, tx_type,
- dequant,
+ const int eob = decode_coefs_ans(xd, pd->plane_type, pd->dqcoeff, tx_size,
+ tx_type, dequant,
#if CONFIG_NEW_QUANT
pd->seg_dequant_nuq[seg_id][dq],
#endif // CONFIG_NEW_QUANT
* be found in the AUTHORS file in the root of the source tree.
*/
-
#ifndef VP10_DECODER_DETOKENIZE_H_
#define VP10_DECODER_DETOKENIZE_H_
void vp10_decode_palette_tokens(MACROBLOCKD *const xd, int plane,
vp10_reader *r);
-int vp10_decode_block_tokens(MACROBLOCKD *const xd,
- int plane, const scan_order *sc,
- int x, int y,
- TX_SIZE tx_size,
- TX_TYPE tx_type,
+int vp10_decode_block_tokens(MACROBLOCKD *const xd, int plane,
+ const scan_order *sc, int x, int y,
+ TX_SIZE tx_size, TX_TYPE tx_type,
#if CONFIG_ANS
struct AnsDecoder *const r,
#else
#include "vp10/decoder/dsubexp.h"
static int inv_recenter_nonneg(int v, int m) {
- if (v > 2 * m)
- return v;
+ if (v > 2 * m) return v;
return (v & 1) ? m - ((v + 1) >> 1) : m + (v >> 1);
}
const int l = 8;
const int m = (1 << l) - 190;
const int v = vp10_read_literal(r, l - 1);
- return v < m ? v : (v << 1) - m + vp10_read_bit(r);
+ return v < m ? v : (v << 1) - m + vp10_read_bit(r);
}
static int inv_remap_prob(int v, int m) {
static uint8_t inv_map_table[MAX_PROB - 1] = {
- 7, 20, 33, 46, 59, 72, 85, 98, 111, 124, 137, 150, 163, 176, 189,
- 202, 215, 228, 241, 254, 1, 2, 3, 4, 5, 6, 8, 9, 10, 11,
- 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 24, 25, 26, 27,
- 28, 29, 30, 31, 32, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
- 44, 45, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 60,
- 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 73, 74, 75, 76,
- 77, 78, 79, 80, 81, 82, 83, 84, 86, 87, 88, 89, 90, 91, 92,
- 93, 94, 95, 96, 97, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108,
+ 7, 20, 33, 46, 59, 72, 85, 98, 111, 124, 137, 150, 163, 176, 189,
+ 202, 215, 228, 241, 254, 1, 2, 3, 4, 5, 6, 8, 9, 10, 11,
+ 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 24, 25, 26, 27,
+ 28, 29, 30, 31, 32, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
+ 44, 45, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 60,
+ 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 73, 74, 75, 76,
+ 77, 78, 79, 80, 81, 82, 83, 84, 86, 87, 88, 89, 90, 91, 92,
+ 93, 94, 95, 96, 97, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108,
109, 110, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 125,
126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 138, 139, 140, 141,
142, 143, 144, 145, 146, 147, 148, 149, 151, 152, 153, 154, 155, 156, 157,
}
static int decode_term_subexp(vp10_reader *r) {
- if (!vp10_read_bit(r))
- return vp10_read_literal(r, 4);
- if (!vp10_read_bit(r))
- return vp10_read_literal(r, 4) + 16;
- if (!vp10_read_bit(r))
- return vp10_read_literal(r, 5) + 32;
+ if (!vp10_read_bit(r)) return vp10_read_literal(r, 4);
+ if (!vp10_read_bit(r)) return vp10_read_literal(r, 4) + 16;
+ if (!vp10_read_bit(r)) return vp10_read_literal(r, 5) + 32;
return decode_uniform(r) + 64;
}
-void vp10_diff_update_prob(vp10_reader *r, vpx_prob* p) {
+void vp10_diff_update_prob(vp10_reader *r, vpx_prob *p) {
if (vp10_read(r, DIFF_UPDATE_PROB)) {
const int delp = decode_term_subexp(r);
*p = (vpx_prob)inv_remap_prob(delp, *p);
* be found in the AUTHORS file in the root of the source tree.
*/
-
#ifndef VP10_DECODER_DSUBEXP_H_
#define VP10_DECODER_DSUBEXP_H_
extern "C" {
#endif
-void vp10_diff_update_prob(vp10_reader *r, vpx_prob* p);
+void vp10_diff_update_prob(vp10_reader *r, vpx_prob *p);
#ifdef __cplusplus
} // extern "C"
// TODO(hkuang): Remove worker parameter as it is only used in debug code.
void vp10_frameworker_wait(VPxWorker *const worker, RefCntBuffer *const ref_buf,
- int row) {
+ int row) {
#if CONFIG_MULTITHREAD
- if (!ref_buf)
- return;
+ if (!ref_buf) return;
#ifndef BUILDING_WITH_TSAN
// The following line of code will get harmless tsan error but it is the key
}
void vp10_frameworker_copy_context(VPxWorker *const dst_worker,
- VPxWorker *const src_worker) {
+ VPxWorker *const src_worker) {
#if CONFIG_MULTITHREAD
FrameWorkerData *const src_worker_data = (FrameWorkerData *)src_worker->data1;
FrameWorkerData *const dst_worker_data = (FrameWorkerData *)dst_worker->data1;
vp10_frameworker_lock_stats(src_worker);
while (!src_worker_data->frame_context_ready) {
pthread_cond_wait(&src_worker_data->stats_cond,
- &src_worker_data->stats_mutex);
+ &src_worker_data->stats_mutex);
}
- dst_cm->last_frame_seg_map = src_cm->seg.enabled ?
- src_cm->current_frame_seg_map : src_cm->last_frame_seg_map;
+ dst_cm->last_frame_seg_map = src_cm->seg.enabled
+ ? src_cm->current_frame_seg_map
+ : src_cm->last_frame_seg_map;
dst_worker_data->pbi->need_resync = src_worker_data->pbi->need_resync;
vp10_frameworker_unlock_stats(src_worker);
dst_cm->use_highbitdepth = src_cm->use_highbitdepth;
#endif
#if CONFIG_EXT_REFS
- // TODO(zoeliu): To handle parallel decoding
+// TODO(zoeliu): To handle parallel decoding
#endif // CONFIG_EXT_REFS
- dst_cm->prev_frame = src_cm->show_existing_frame ?
- src_cm->prev_frame : src_cm->cur_frame;
- dst_cm->last_width = !src_cm->show_existing_frame ?
- src_cm->width : src_cm->last_width;
- dst_cm->last_height = !src_cm->show_existing_frame ?
- src_cm->height : src_cm->last_height;
+ dst_cm->prev_frame =
+ src_cm->show_existing_frame ? src_cm->prev_frame : src_cm->cur_frame;
+ dst_cm->last_width =
+ !src_cm->show_existing_frame ? src_cm->width : src_cm->last_width;
+ dst_cm->last_height =
+ !src_cm->show_existing_frame ? src_cm->height : src_cm->last_height;
dst_cm->subsampling_x = src_cm->subsampling_x;
dst_cm->subsampling_y = src_cm->subsampling_y;
dst_cm->frame_type = src_cm->frame_type;
- dst_cm->last_show_frame = !src_cm->show_existing_frame ?
- src_cm->show_frame : src_cm->last_show_frame;
+ dst_cm->last_show_frame = !src_cm->show_existing_frame
+ ? src_cm->show_frame
+ : src_cm->last_show_frame;
for (i = 0; i < REF_FRAMES; ++i)
dst_cm->ref_frame_map[i] = src_cm->next_ref_frame_map[i];
memcpy(dst_cm->frame_contexts, src_cm->frame_contexts,
FRAME_CONTEXTS * sizeof(dst_cm->frame_contexts[0]));
#else
- (void) dst_worker;
- (void) src_worker;
+ (void)dst_worker;
+ (void)src_worker;
#endif // CONFIG_MULTITHREAD
}
// start decoding next frame. So need to check whether worker is still decoding
// ref_buf.
void vp10_frameworker_wait(VPxWorker *const worker, RefCntBuffer *const ref_buf,
- int row);
+ int row);
// FrameWorker broadcasts its decoding progress so other workers that are
// waiting on it can resume decoding.
// Copy necessary decoding context from src worker to dst worker.
void vp10_frameworker_copy_context(VPxWorker *const dst_worker,
- VPxWorker *const src_worker);
+ VPxWorker *const src_worker);
#ifdef __cplusplus
-} // extern "C"
+} // extern "C"
#endif
#endif // VP10_DECODER_DTHREAD_H_
#include "vp10/vp10_iface_common.h"
struct vp10_extracfg {
- int cpu_used; // available cpu percentage in 1/16
- unsigned int enable_auto_alt_ref;
+ int cpu_used; // available cpu percentage in 1/16
+ unsigned int enable_auto_alt_ref;
#if CONFIG_EXT_REFS
- unsigned int enable_auto_bwd_ref;
+ unsigned int enable_auto_bwd_ref;
#endif // CONFIG_EXT_REFS
- unsigned int noise_sensitivity;
- unsigned int sharpness;
- unsigned int static_thresh;
- unsigned int tile_columns;
- unsigned int tile_rows;
- unsigned int arnr_max_frames;
- unsigned int arnr_strength;
- unsigned int min_gf_interval;
- unsigned int max_gf_interval;
- vpx_tune_metric tuning;
- unsigned int cq_level; // constrained quality level
- unsigned int rc_max_intra_bitrate_pct;
- unsigned int rc_max_inter_bitrate_pct;
- unsigned int gf_cbr_boost_pct;
- unsigned int lossless;
- unsigned int frame_parallel_decoding_mode;
- AQ_MODE aq_mode;
- unsigned int frame_periodic_boost;
- vpx_bit_depth_t bit_depth;
- vpx_tune_content content;
- vpx_color_space_t color_space;
- int color_range;
- int render_width;
- int render_height;
- vpx_superblock_size_t superblock_size;
+ unsigned int noise_sensitivity;
+ unsigned int sharpness;
+ unsigned int static_thresh;
+ unsigned int tile_columns;
+ unsigned int tile_rows;
+ unsigned int arnr_max_frames;
+ unsigned int arnr_strength;
+ unsigned int min_gf_interval;
+ unsigned int max_gf_interval;
+ vpx_tune_metric tuning;
+ unsigned int cq_level; // constrained quality level
+ unsigned int rc_max_intra_bitrate_pct;
+ unsigned int rc_max_inter_bitrate_pct;
+ unsigned int gf_cbr_boost_pct;
+ unsigned int lossless;
+ unsigned int frame_parallel_decoding_mode;
+ AQ_MODE aq_mode;
+ unsigned int frame_periodic_boost;
+ vpx_bit_depth_t bit_depth;
+ vpx_tune_content content;
+ vpx_color_space_t color_space;
+ int color_range;
+ int render_width;
+ int render_height;
+ vpx_superblock_size_t superblock_size;
};
static struct vp10_extracfg default_extra_cfg = {
- 0, // cpu_used
- 1, // enable_auto_alt_ref
+ 0, // cpu_used
+ 1, // enable_auto_alt_ref
#if CONFIG_EXT_REFS
- 0, // enable_auto_bwd_ref
+ 0, // enable_auto_bwd_ref
#endif // CONFIG_EXT_REFS
- 0, // noise_sensitivity
- 0, // sharpness
- 0, // static_thresh
+ 0, // noise_sensitivity
+ 0, // sharpness
+ 0, // static_thresh
#if CONFIG_EXT_TILE
- UINT_MAX, // tile_columns
- UINT_MAX, // tile_rows
+ UINT_MAX, // tile_columns
+ UINT_MAX, // tile_rows
#else
- 0, // tile_columns
- 0, // tile_rows
-#endif // CONFIG_EXT_TILE
- 7, // arnr_max_frames
- 5, // arnr_strength
- 0, // min_gf_interval; 0 -> default decision
- 0, // max_gf_interval; 0 -> default decision
- VPX_TUNE_PSNR, // tuning
- 10, // cq_level
- 0, // rc_max_intra_bitrate_pct
- 0, // rc_max_inter_bitrate_pct
- 0, // gf_cbr_boost_pct
- 0, // lossless
- 1, // frame_parallel_decoding_mode
- NO_AQ, // aq_mode
- 0, // frame_periodic_delta_q
- VPX_BITS_8, // Bit depth
- VPX_CONTENT_DEFAULT, // content
- VPX_CS_UNKNOWN, // color space
- 0, // color range
- 0, // render width
- 0, // render height
- VPX_SUPERBLOCK_SIZE_DYNAMIC // superblock_size
+ 0, // tile_columns
+ 0, // tile_rows
+#endif // CONFIG_EXT_TILE
+ 7, // arnr_max_frames
+ 5, // arnr_strength
+ 0, // min_gf_interval; 0 -> default decision
+ 0, // max_gf_interval; 0 -> default decision
+ VPX_TUNE_PSNR, // tuning
+ 10, // cq_level
+ 0, // rc_max_intra_bitrate_pct
+ 0, // rc_max_inter_bitrate_pct
+ 0, // gf_cbr_boost_pct
+ 0, // lossless
+ 1, // frame_parallel_decoding_mode
+ NO_AQ, // aq_mode
+ 0, // frame_periodic_delta_q
+ VPX_BITS_8, // Bit depth
+ VPX_CONTENT_DEFAULT, // content
+ VPX_CS_UNKNOWN, // color space
+ 0, // color range
+ 0, // render width
+ 0, // render height
+ VPX_SUPERBLOCK_SIZE_DYNAMIC // superblock_size
};
struct vpx_codec_alg_priv {
- vpx_codec_priv_t base;
- vpx_codec_enc_cfg_t cfg;
- struct vp10_extracfg extra_cfg;
- VP10EncoderConfig oxcf;
- VP10_COMP *cpi;
- unsigned char *cx_data;
- size_t cx_data_sz;
- unsigned char *pending_cx_data;
- size_t pending_cx_data_sz;
- int pending_frame_count;
- size_t pending_frame_sizes[8];
- vpx_image_t preview_img;
- vpx_enc_frame_flags_t next_frame_flags;
- vp8_postproc_cfg_t preview_ppcfg;
+ vpx_codec_priv_t base;
+ vpx_codec_enc_cfg_t cfg;
+ struct vp10_extracfg extra_cfg;
+ VP10EncoderConfig oxcf;
+ VP10_COMP *cpi;
+ unsigned char *cx_data;
+ size_t cx_data_sz;
+ unsigned char *pending_cx_data;
+ size_t pending_cx_data_sz;
+ int pending_frame_count;
+ size_t pending_frame_sizes[8];
+ vpx_image_t preview_img;
+ vpx_enc_frame_flags_t next_frame_flags;
+ vp8_postproc_cfg_t preview_ppcfg;
vpx_codec_pkt_list_decl(256) pkt_list;
- unsigned int fixed_kf_cntr;
+ unsigned int fixed_kf_cntr;
// BufferPool that holds all reference frames.
- BufferPool *buffer_pool;
+ BufferPool *buffer_pool;
};
-static vpx_codec_err_t update_error_state(vpx_codec_alg_priv_t *ctx,
- const struct vpx_internal_error_info *error) {
+static vpx_codec_err_t update_error_state(
+ vpx_codec_alg_priv_t *ctx, const struct vpx_internal_error_info *error) {
const vpx_codec_err_t res = error->error_code;
if (res != VPX_CODEC_OK)
return res;
}
-
#undef ERROR
-#define ERROR(str) do {\
- ctx->base.err_detail = str;\
- return VPX_CODEC_INVALID_PARAM;\
+#define ERROR(str) \
+ do { \
+ ctx->base.err_detail = str; \
+ return VPX_CODEC_INVALID_PARAM; \
} while (0)
-#define RANGE_CHECK(p, memb, lo, hi) do {\
+#define RANGE_CHECK(p, memb, lo, hi) \
+ do { \
if (!(((p)->memb == lo || (p)->memb > (lo)) && (p)->memb <= hi)) \
- ERROR(#memb " out of range ["#lo".."#hi"]");\
+ ERROR(#memb " out of range [" #lo ".." #hi "]"); \
} while (0)
-#define RANGE_CHECK_HI(p, memb, hi) do {\
- if (!((p)->memb <= (hi))) \
- ERROR(#memb " out of range [.."#hi"]");\
+#define RANGE_CHECK_HI(p, memb, hi) \
+ do { \
+ if (!((p)->memb <= (hi))) ERROR(#memb " out of range [.." #hi "]"); \
} while (0)
-#define RANGE_CHECK_LO(p, memb, lo) do {\
- if (!((p)->memb >= (lo))) \
- ERROR(#memb " out of range ["#lo"..]");\
+#define RANGE_CHECK_LO(p, memb, lo) \
+ do { \
+ if (!((p)->memb >= (lo))) ERROR(#memb " out of range [" #lo "..]"); \
} while (0)
-#define RANGE_CHECK_BOOL(p, memb) do {\
- if (!!((p)->memb) != (p)->memb) ERROR(#memb " expected boolean");\
+#define RANGE_CHECK_BOOL(p, memb) \
+ do { \
+ if (!!((p)->memb) != (p)->memb) ERROR(#memb " expected boolean"); \
} while (0)
static vpx_codec_err_t validate_config(vpx_codec_alg_priv_t *ctx,
const vpx_codec_enc_cfg_t *cfg,
const struct vp10_extracfg *extra_cfg) {
- RANGE_CHECK(cfg, g_w, 1, 65535); // 16 bits available
- RANGE_CHECK(cfg, g_h, 1, 65535); // 16 bits available
- RANGE_CHECK(cfg, g_timebase.den, 1, 1000000000);
- RANGE_CHECK(cfg, g_timebase.num, 1, cfg->g_timebase.den);
- RANGE_CHECK_HI(cfg, g_profile, 3);
-
- RANGE_CHECK_HI(cfg, rc_max_quantizer, 63);
- RANGE_CHECK_HI(cfg, rc_min_quantizer, cfg->rc_max_quantizer);
+ RANGE_CHECK(cfg, g_w, 1, 65535); // 16 bits available
+ RANGE_CHECK(cfg, g_h, 1, 65535); // 16 bits available
+ RANGE_CHECK(cfg, g_timebase.den, 1, 1000000000);
+ RANGE_CHECK(cfg, g_timebase.num, 1, cfg->g_timebase.den);
+ RANGE_CHECK_HI(cfg, g_profile, 3);
+
+ RANGE_CHECK_HI(cfg, rc_max_quantizer, 63);
+ RANGE_CHECK_HI(cfg, rc_min_quantizer, cfg->rc_max_quantizer);
RANGE_CHECK_BOOL(extra_cfg, lossless);
- RANGE_CHECK(extra_cfg, aq_mode, 0, AQ_MODE_COUNT - 1);
+ RANGE_CHECK(extra_cfg, aq_mode, 0, AQ_MODE_COUNT - 1);
RANGE_CHECK(extra_cfg, frame_periodic_boost, 0, 1);
- RANGE_CHECK_HI(cfg, g_threads, 64);
- RANGE_CHECK_HI(cfg, g_lag_in_frames, MAX_LAG_BUFFERS);
- RANGE_CHECK(cfg, rc_end_usage, VPX_VBR, VPX_Q);
- RANGE_CHECK_HI(cfg, rc_undershoot_pct, 100);
- RANGE_CHECK_HI(cfg, rc_overshoot_pct, 100);
+ RANGE_CHECK_HI(cfg, g_threads, 64);
+ RANGE_CHECK_HI(cfg, g_lag_in_frames, MAX_LAG_BUFFERS);
+ RANGE_CHECK(cfg, rc_end_usage, VPX_VBR, VPX_Q);
+ RANGE_CHECK_HI(cfg, rc_undershoot_pct, 100);
+ RANGE_CHECK_HI(cfg, rc_overshoot_pct, 100);
RANGE_CHECK_HI(cfg, rc_2pass_vbr_bias_pct, 100);
- RANGE_CHECK(cfg, kf_mode, VPX_KF_DISABLED, VPX_KF_AUTO);
- RANGE_CHECK_BOOL(cfg, rc_resize_allowed);
- RANGE_CHECK_HI(cfg, rc_dropframe_thresh, 100);
- RANGE_CHECK_HI(cfg, rc_resize_up_thresh, 100);
+ RANGE_CHECK(cfg, kf_mode, VPX_KF_DISABLED, VPX_KF_AUTO);
+ RANGE_CHECK_BOOL(cfg, rc_resize_allowed);
+ RANGE_CHECK_HI(cfg, rc_dropframe_thresh, 100);
+ RANGE_CHECK_HI(cfg, rc_resize_up_thresh, 100);
RANGE_CHECK_HI(cfg, rc_resize_down_thresh, 100);
- RANGE_CHECK(cfg, g_pass, VPX_RC_ONE_PASS, VPX_RC_LAST_PASS);
+ RANGE_CHECK(cfg, g_pass, VPX_RC_ONE_PASS, VPX_RC_LAST_PASS);
RANGE_CHECK(extra_cfg, min_gf_interval, 0, (MAX_LAG_BUFFERS - 1));
RANGE_CHECK(extra_cfg, max_gf_interval, 0, (MAX_LAG_BUFFERS - 1));
if (extra_cfg->max_gf_interval > 0) {
}
if (extra_cfg->min_gf_interval > 0 && extra_cfg->max_gf_interval > 0) {
RANGE_CHECK(extra_cfg, max_gf_interval, extra_cfg->min_gf_interval,
- (MAX_LAG_BUFFERS - 1));
+ (MAX_LAG_BUFFERS - 1));
}
if (cfg->rc_resize_allowed == 1) {
// VP9 does not support a lower bound on the keyframe interval in
// automatic keyframe placement mode.
- if (cfg->kf_mode != VPX_KF_DISABLED &&
- cfg->kf_min_dist != cfg->kf_max_dist &&
+ if (cfg->kf_mode != VPX_KF_DISABLED && cfg->kf_min_dist != cfg->kf_max_dist &&
cfg->kf_min_dist > 0)
- ERROR("kf_min_dist not supported in auto mode, use 0 "
- "or kf_max_dist instead.");
+ ERROR(
+ "kf_min_dist not supported in auto mode, use 0 "
+ "or kf_max_dist instead.");
RANGE_CHECK(extra_cfg, enable_auto_alt_ref, 0, 2);
#if CONFIG_EXT_REFS
#endif // CONFIG_EXT_REFS
RANGE_CHECK(extra_cfg, cpu_used, -8, 8);
RANGE_CHECK_HI(extra_cfg, noise_sensitivity, 6);
- RANGE_CHECK(extra_cfg, superblock_size,
- VPX_SUPERBLOCK_SIZE_64X64, VPX_SUPERBLOCK_SIZE_DYNAMIC);
+ RANGE_CHECK(extra_cfg, superblock_size, VPX_SUPERBLOCK_SIZE_64X64,
+ VPX_SUPERBLOCK_SIZE_DYNAMIC);
#if CONFIG_EXT_TILE
- // TODO(any): Waring. If CONFIG_EXT_TILE is true, tile_columns really
- // means tile_width, and tile_rows really means tile_hight. The interface
- // should be sanitized.
+// TODO(any): Waring. If CONFIG_EXT_TILE is true, tile_columns really
+// means tile_width, and tile_rows really means tile_hight. The interface
+// should be sanitized.
#if CONFIG_EXT_PARTITION
if (extra_cfg->superblock_size != VPX_SUPERBLOCK_SIZE_64X64) {
if (extra_cfg->tile_columns != UINT_MAX)
RANGE_CHECK(extra_cfg, cq_level, 0, 63);
RANGE_CHECK(cfg, g_bit_depth, VPX_BITS_8, VPX_BITS_12);
RANGE_CHECK(cfg, g_input_bit_depth, 8, 12);
- RANGE_CHECK(extra_cfg, content,
- VPX_CONTENT_DEFAULT, VPX_CONTENT_INVALID - 1);
+ RANGE_CHECK(extra_cfg, content, VPX_CONTENT_DEFAULT, VPX_CONTENT_INVALID - 1);
- // TODO(yaowu): remove this when ssim tuning is implemented for vp9
+ // TODO(yaowu): remove this when ssim tuning is implemented for vp10
if (extra_cfg->tuning == VPX_TUNE_SSIM)
- ERROR("Option --tune=ssim is not currently supported in VP9.");
+ ERROR("Option --tune=ssim is not currently supported in VP10.");
if (cfg->g_pass == VPX_RC_LAST_PASS) {
const size_t packet_sz = sizeof(FIRSTPASS_STATS);
cfg->g_bit_depth > VPX_BITS_8) {
ERROR("Codec high bit-depth not supported in profile < 2");
}
- if (cfg->g_profile <= (unsigned int)PROFILE_1 &&
- cfg->g_input_bit_depth > 8) {
+ if (cfg->g_profile <= (unsigned int)PROFILE_1 && cfg->g_input_bit_depth > 8) {
ERROR("Source high bit-depth not supported in profile < 2");
}
if (cfg->g_profile > (unsigned int)PROFILE_1 &&
switch (img->fmt) {
case VPX_IMG_FMT_YV12:
case VPX_IMG_FMT_I420:
- case VPX_IMG_FMT_I42016:
- break;
+ case VPX_IMG_FMT_I42016: break;
case VPX_IMG_FMT_I422:
case VPX_IMG_FMT_I444:
case VPX_IMG_FMT_I440:
if (ctx->cfg.g_profile != (unsigned int)PROFILE_1) {
- ERROR("Invalid image format. I422, I444, I440 images are "
- "not supported in profile.");
+ ERROR(
+ "Invalid image format. I422, I444, I440 images are "
+ "not supported in profile.");
}
break;
case VPX_IMG_FMT_I42216:
case VPX_IMG_FMT_I44016:
if (ctx->cfg.g_profile != (unsigned int)PROFILE_1 &&
ctx->cfg.g_profile != (unsigned int)PROFILE_3) {
- ERROR("Invalid image format. 16-bit I422, I444, I440 images are "
- "not supported in profile.");
+ ERROR(
+ "Invalid image format. 16-bit I422, I444, I440 images are "
+ "not supported in profile.");
}
break;
default:
- ERROR("Invalid image format. Only YV12, I420, I422, I444 images are "
- "supported.");
+ ERROR(
+ "Invalid image format. Only YV12, I420, I422, I444 images are "
+ "supported.");
break;
}
}
static vpx_codec_err_t set_encoder_config(
- VP10EncoderConfig *oxcf,
- const vpx_codec_enc_cfg_t *cfg,
- const struct vp10_extracfg *extra_cfg) {
+ VP10EncoderConfig *oxcf, const vpx_codec_enc_cfg_t *cfg,
+ const struct vp10_extracfg *extra_cfg) {
const int is_vbr = cfg->rc_end_usage == VPX_VBR;
oxcf->profile = cfg->g_profile;
oxcf->max_threads = (int)cfg->g_threads;
- oxcf->width = cfg->g_w;
- oxcf->height = cfg->g_h;
+ oxcf->width = cfg->g_w;
+ oxcf->height = cfg->g_h;
oxcf->bit_depth = cfg->g_bit_depth;
oxcf->input_bit_depth = cfg->g_input_bit_depth;
// guess a frame rate if out of whack, use 30
oxcf->init_framerate = (double)cfg->g_timebase.den / cfg->g_timebase.num;
- if (oxcf->init_framerate > 180)
- oxcf->init_framerate = 30;
+ if (oxcf->init_framerate > 180) oxcf->init_framerate = 30;
oxcf->mode = GOOD;
switch (cfg->g_pass) {
- case VPX_RC_ONE_PASS:
- oxcf->pass = 0;
- break;
- case VPX_RC_FIRST_PASS:
- oxcf->pass = 1;
- break;
- case VPX_RC_LAST_PASS:
- oxcf->pass = 2;
- break;
+ case VPX_RC_ONE_PASS: oxcf->pass = 0; break;
+ case VPX_RC_FIRST_PASS: oxcf->pass = 1; break;
+ case VPX_RC_LAST_PASS: oxcf->pass = 2; break;
}
- oxcf->lag_in_frames = cfg->g_pass == VPX_RC_FIRST_PASS ? 0
- : cfg->g_lag_in_frames;
+ oxcf->lag_in_frames =
+ cfg->g_pass == VPX_RC_FIRST_PASS ? 0 : cfg->g_lag_in_frames;
oxcf->rc_mode = cfg->rc_end_usage;
// Convert target bandwidth from Kbit/s to Bit/s
extra_cfg->lossless ? 0 : vp10_quantizer_to_qindex(cfg->rc_min_quantizer);
oxcf->worst_allowed_q =
extra_cfg->lossless ? 0 : vp10_quantizer_to_qindex(cfg->rc_max_quantizer);
- oxcf->cq_level = vp10_quantizer_to_qindex(extra_cfg->cq_level);
+ oxcf->cq_level = vp10_quantizer_to_qindex(extra_cfg->cq_level);
oxcf->fixed_q = -1;
- oxcf->under_shoot_pct = cfg->rc_undershoot_pct;
- oxcf->over_shoot_pct = cfg->rc_overshoot_pct;
+ oxcf->under_shoot_pct = cfg->rc_undershoot_pct;
+ oxcf->over_shoot_pct = cfg->rc_overshoot_pct;
- oxcf->scaled_frame_width = cfg->rc_scaled_width;
+ oxcf->scaled_frame_width = cfg->rc_scaled_width;
oxcf->scaled_frame_height = cfg->rc_scaled_height;
if (cfg->rc_resize_allowed == 1) {
oxcf->resize_mode =
- (oxcf->scaled_frame_width == 0 || oxcf->scaled_frame_height == 0) ?
- RESIZE_DYNAMIC : RESIZE_FIXED;
+ (oxcf->scaled_frame_width == 0 || oxcf->scaled_frame_height == 0)
+ ? RESIZE_DYNAMIC
+ : RESIZE_FIXED;
} else {
oxcf->resize_mode = RESIZE_NONE;
}
- oxcf->maximum_buffer_size_ms = is_vbr ? 240000 : cfg->rc_buf_sz;
+ oxcf->maximum_buffer_size_ms = is_vbr ? 240000 : cfg->rc_buf_sz;
oxcf->starting_buffer_level_ms = is_vbr ? 60000 : cfg->rc_buf_initial_sz;
- oxcf->optimal_buffer_level_ms = is_vbr ? 60000 : cfg->rc_buf_optimal_sz;
+ oxcf->optimal_buffer_level_ms = is_vbr ? 60000 : cfg->rc_buf_optimal_sz;
- oxcf->drop_frames_water_mark = cfg->rc_dropframe_thresh;
+ oxcf->drop_frames_water_mark = cfg->rc_dropframe_thresh;
- oxcf->two_pass_vbrbias = cfg->rc_2pass_vbr_bias_pct;
- oxcf->two_pass_vbrmin_section = cfg->rc_2pass_vbr_minsection_pct;
- oxcf->two_pass_vbrmax_section = cfg->rc_2pass_vbr_maxsection_pct;
+ oxcf->two_pass_vbrbias = cfg->rc_2pass_vbr_bias_pct;
+ oxcf->two_pass_vbrmin_section = cfg->rc_2pass_vbr_minsection_pct;
+ oxcf->two_pass_vbrmax_section = cfg->rc_2pass_vbr_maxsection_pct;
- oxcf->auto_key = cfg->kf_mode == VPX_KF_AUTO &&
- cfg->kf_min_dist != cfg->kf_max_dist;
+ oxcf->auto_key =
+ cfg->kf_mode == VPX_KF_AUTO && cfg->kf_min_dist != cfg->kf_max_dist;
- oxcf->key_freq = cfg->kf_max_dist;
+ oxcf->key_freq = cfg->kf_max_dist;
- oxcf->speed = abs(extra_cfg->cpu_used);
- oxcf->encode_breakout = extra_cfg->static_thresh;
- oxcf->enable_auto_arf = extra_cfg->enable_auto_alt_ref;
+ oxcf->speed = abs(extra_cfg->cpu_used);
+ oxcf->encode_breakout = extra_cfg->static_thresh;
+ oxcf->enable_auto_arf = extra_cfg->enable_auto_alt_ref;
#if CONFIG_EXT_REFS
- oxcf->enable_auto_brf = extra_cfg->enable_auto_bwd_ref;
+ oxcf->enable_auto_brf = extra_cfg->enable_auto_bwd_ref;
#endif // CONFIG_EXT_REFS
- oxcf->noise_sensitivity = extra_cfg->noise_sensitivity;
- oxcf->sharpness = extra_cfg->sharpness;
+ oxcf->noise_sensitivity = extra_cfg->noise_sensitivity;
+ oxcf->sharpness = extra_cfg->sharpness;
- oxcf->two_pass_stats_in = cfg->rc_twopass_stats_in;
+ oxcf->two_pass_stats_in = cfg->rc_twopass_stats_in;
#if CONFIG_FP_MB_STATS
- oxcf->firstpass_mb_stats_in = cfg->rc_firstpass_mb_stats_in;
+ oxcf->firstpass_mb_stats_in = cfg->rc_firstpass_mb_stats_in;
#endif
oxcf->color_space = extra_cfg->color_space;
oxcf->color_range = extra_cfg->color_range;
- oxcf->render_width = extra_cfg->render_width;
+ oxcf->render_width = extra_cfg->render_width;
oxcf->render_height = extra_cfg->render_height;
oxcf->arnr_max_frames = extra_cfg->arnr_max_frames;
- oxcf->arnr_strength = extra_cfg->arnr_strength;
+ oxcf->arnr_strength = extra_cfg->arnr_strength;
oxcf->min_gf_interval = extra_cfg->min_gf_interval;
oxcf->max_gf_interval = extra_cfg->max_gf_interval;
{
#if CONFIG_EXT_PARTITION
const unsigned int max =
- extra_cfg->superblock_size == VPX_SUPERBLOCK_SIZE_64X64 ? 64 : 32;
+ extra_cfg->superblock_size == VPX_SUPERBLOCK_SIZE_64X64 ? 64 : 32;
#else
const unsigned int max = 64;
#endif // CONFIG_EXT_PARTITION
oxcf->tile_columns = VPXMIN(extra_cfg->tile_columns, max);
- oxcf->tile_rows = VPXMIN(extra_cfg->tile_rows, max);
+ oxcf->tile_rows = VPXMIN(extra_cfg->tile_rows, max);
}
#else
oxcf->tile_columns = extra_cfg->tile_columns;
- oxcf->tile_rows = extra_cfg->tile_rows;
+ oxcf->tile_rows = extra_cfg->tile_rows;
#endif // CONFIG_EXT_TILE
- oxcf->error_resilient_mode = cfg->g_error_resilient;
+ oxcf->error_resilient_mode = cfg->g_error_resilient;
oxcf->frame_parallel_decoding_mode = extra_cfg->frame_parallel_decoding_mode;
oxcf->aq_mode = extra_cfg->aq_mode;
- oxcf->frame_periodic_boost = extra_cfg->frame_periodic_boost;
+ oxcf->frame_periodic_boost = extra_cfg->frame_periodic_boost;
/*
printf("Current VP9 Settings: \n");
}
static vpx_codec_err_t encoder_set_config(vpx_codec_alg_priv_t *ctx,
- const vpx_codec_enc_cfg_t *cfg) {
+ const vpx_codec_enc_cfg_t *cfg) {
vpx_codec_err_t res;
int force_key = 0;
vp10_change_config(ctx->cpi, &ctx->oxcf);
}
- if (force_key)
- ctx->next_frame_flags |= VPX_EFLAG_FORCE_KF;
+ if (force_key) ctx->next_frame_flags |= VPX_EFLAG_FORCE_KF;
return res;
}
static vpx_codec_err_t ctrl_get_quantizer(vpx_codec_alg_priv_t *ctx,
va_list args) {
int *const arg = va_arg(args, int *);
- if (arg == NULL)
- return VPX_CODEC_INVALID_PARAM;
+ if (arg == NULL) return VPX_CODEC_INVALID_PARAM;
*arg = vp10_get_quantizer(ctx->cpi);
return VPX_CODEC_OK;
}
static vpx_codec_err_t ctrl_get_quantizer64(vpx_codec_alg_priv_t *ctx,
va_list args) {
int *const arg = va_arg(args, int *);
- if (arg == NULL)
- return VPX_CODEC_INVALID_PARAM;
+ if (arg == NULL) return VPX_CODEC_INVALID_PARAM;
*arg = vp10_qindex_to_quantizer(vp10_get_quantizer(ctx->cpi));
return VPX_CODEC_OK;
}
return update_extra_cfg(ctx, &extra_cfg);
}
-static vpx_codec_err_t ctrl_set_rc_gf_cbr_boost_pct(
- vpx_codec_alg_priv_t *ctx, va_list args) {
+static vpx_codec_err_t ctrl_set_rc_gf_cbr_boost_pct(vpx_codec_alg_priv_t *ctx,
+ va_list args) {
struct vp10_extracfg extra_cfg = ctx->extra_cfg;
- extra_cfg.gf_cbr_boost_pct =
- CAST(VP9E_SET_GF_CBR_BOOST_PCT, args);
+ extra_cfg.gf_cbr_boost_pct = CAST(VP9E_SET_GF_CBR_BOOST_PCT, args);
return update_extra_cfg(ctx, &extra_cfg);
}
if (ctx->priv == NULL) {
vpx_codec_alg_priv_t *const priv = vpx_calloc(1, sizeof(*priv));
- if (priv == NULL)
- return VPX_CODEC_MEM_ERROR;
+ if (priv == NULL) return VPX_CODEC_MEM_ERROR;
ctx->priv = (vpx_codec_priv_t *)priv;
ctx->priv->init_flags = ctx->init_flags;
ctx->priv->enc.total_encoders = 1;
- priv->buffer_pool =
- (BufferPool *)vpx_calloc(1, sizeof(BufferPool));
- if (priv->buffer_pool == NULL)
- return VPX_CODEC_MEM_ERROR;
+ priv->buffer_pool = (BufferPool *)vpx_calloc(1, sizeof(BufferPool));
+ if (priv->buffer_pool == NULL) return VPX_CODEC_MEM_ERROR;
#if CONFIG_MULTITHREAD
if (pthread_mutex_init(&priv->buffer_pool->pool_mutex, NULL)) {
// Convert duration parameter from stream timebase to microseconds.
const uint64_t duration_us = (uint64_t)duration * 1000000 *
- (uint64_t)cfg->g_timebase.num /(uint64_t)cfg->g_timebase.den;
+ (uint64_t)cfg->g_timebase.num /
+ (uint64_t)cfg->g_timebase.den;
// If the deadline is more that the duration this frame is to be shown,
// use good quality mode. Otherwise use realtime mode.
new_mode = BEST;
}
break;
- case VPX_RC_FIRST_PASS:
- break;
- case VPX_RC_LAST_PASS:
- new_mode = deadline > 0 ? GOOD : BEST;
- break;
+ case VPX_RC_FIRST_PASS: break;
+ case VPX_RC_LAST_PASS: new_mode = deadline > 0 ? GOOD : BEST; break;
}
if (ctx->oxcf.mode != new_mode) {
// Add the number of frames to the marker byte
marker |= ctx->pending_frame_count - 1;
for (i = 0; i < ctx->pending_frame_count - 1; i++) {
- const size_t frame_sz = (unsigned int) ctx->pending_frame_sizes[i] - 1;
+ const size_t frame_sz = (unsigned int)ctx->pending_frame_sizes[i] - 1;
max_frame_sz = frame_sz > max_frame_sz ? frame_sz : max_frame_sz;
}
// Choose the magnitude
for (mag = 0, mask = 0xff; mag < 4; mag++) {
- if (max_frame_sz <= mask)
- break;
+ if (max_frame_sz <= mask) break;
mask <<= 8;
mask |= 0xff;
}
unsigned int lib_flags) {
vpx_codec_frame_flags_t flags = lib_flags << 16;
- if (lib_flags & FRAMEFLAGS_KEY)
- flags |= VPX_FRAME_IS_KEY;
+ if (lib_flags & FRAMEFLAGS_KEY) flags |= VPX_FRAME_IS_KEY;
- if (cpi->droppable)
- flags |= VPX_FRAME_IS_DROPPABLE;
+ if (cpi->droppable) flags |= VPX_FRAME_IS_DROPPABLE;
return flags;
}
-static vpx_codec_err_t encoder_encode(vpx_codec_alg_priv_t *ctx,
+static vpx_codec_err_t encoder_encode(vpx_codec_alg_priv_t *ctx,
const vpx_image_t *img,
vpx_codec_pts_t pts,
unsigned long duration,
data_sz = ctx->cfg.g_w * ctx->cfg.g_h * get_image_bps(img) / 8 *
(cpi->multi_arf_allowed ? 8 : 2);
#endif // CONFIG_EXT_REFS
- if (data_sz < 4096)
- data_sz = 4096;
+ if (data_sz < 4096) data_sz = 4096;
if (ctx->cx_data == NULL || ctx->cx_data_sz < data_sz) {
ctx->cx_data_sz = data_sz;
free(ctx->cx_data);
- ctx->cx_data = (unsigned char*)malloc(ctx->cx_data_sz);
+ ctx->cx_data = (unsigned char *)malloc(ctx->cx_data_sz);
if (ctx->cx_data == NULL) {
return VPX_CODEC_MEM_ERROR;
}
// Handle Flags
if (((flags & VP8_EFLAG_NO_UPD_GF) && (flags & VP8_EFLAG_FORCE_GF)) ||
- ((flags & VP8_EFLAG_NO_UPD_ARF) && (flags & VP8_EFLAG_FORCE_ARF))) {
+ ((flags & VP8_EFLAG_NO_UPD_ARF) && (flags & VP8_EFLAG_FORCE_ARF))) {
ctx->base.err_detail = "Conflicting flags.";
return VPX_CODEC_INVALID_PARAM;
}
unsigned char *cx_data;
// Set up internal flags
- if (ctx->base.init_flags & VPX_CODEC_USE_PSNR)
- cpi->b_calculate_psnr = 1;
+ if (ctx->base.init_flags & VPX_CODEC_USE_PSNR) cpi->b_calculate_psnr = 1;
if (img != NULL) {
res = image2yuvconfig(img, &sd);
// Store the original flags in to the frame buffer. Will extract the
// key frame flag when we actually encode this frame.
- if (vp10_receive_raw_frame(cpi, flags | ctx->next_frame_flags,
- &sd, dst_time_stamp, dst_end_time_stamp)) {
+ if (vp10_receive_raw_frame(cpi, flags | ctx->next_frame_flags, &sd,
+ dst_time_stamp, dst_end_time_stamp)) {
res = update_error_state(ctx, &cpi->common.error);
}
ctx->next_frame_flags = 0;
}
while (cx_data_sz >= ctx->cx_data_sz / 2 &&
- -1 != vp10_get_compressed_data(cpi, &lib_flags, &size,
- cx_data, &dst_time_stamp,
- &dst_end_time_stamp, !img)) {
+ -1 != vp10_get_compressed_data(cpi, &lib_flags, &size, cx_data,
+ &dst_time_stamp, &dst_end_time_stamp,
+ !img)) {
if (size) {
vpx_codec_cx_pkt_t pkt;
// Pack invisible frames with the next visible frame
if (!cpi->common.show_frame) {
- if (ctx->pending_cx_data == 0)
- ctx->pending_cx_data = cx_data;
+ if (ctx->pending_cx_data == 0) ctx->pending_cx_data = cx_data;
ctx->pending_cx_data_sz += size;
ctx->pending_frame_sizes[ctx->pending_frame_count++] = size;
cx_data += size;
// Add the frame packet to the list of returned packets.
pkt.kind = VPX_CODEC_CX_FRAME_PKT;
pkt.data.frame.pts = ticks_to_timebase_units(timebase, dst_time_stamp);
- pkt.data.frame.duration =
- (unsigned long)ticks_to_timebase_units(timebase,
- dst_end_time_stamp - dst_time_stamp);
+ pkt.data.frame.duration = (unsigned long)ticks_to_timebase_units(
+ timebase, dst_end_time_stamp - dst_time_stamp);
pkt.data.frame.flags = get_frame_pkt_flags(cpi, lib_flags);
if (ctx->pending_cx_data) {
ctx->pending_cx_data_sz += size;
size += write_superframe_index(ctx);
pkt.data.frame.buf = ctx->pending_cx_data;
- pkt.data.frame.sz = ctx->pending_cx_data_sz;
+ pkt.data.frame.sz = ctx->pending_cx_data_sz;
ctx->pending_cx_data = NULL;
ctx->pending_cx_data_sz = 0;
ctx->pending_frame_count = 0;
} else {
pkt.data.frame.buf = cx_data;
- pkt.data.frame.sz = size;
+ pkt.data.frame.sz = size;
}
pkt.data.frame.partition_id = -1;
YV12_BUFFER_CONFIG sd;
image2yuvconfig(&frame->img, &sd);
- vp10_set_reference_enc(ctx->cpi, ref_frame_to_vp10_reframe(frame->frame_type),
- &sd);
+ vp10_set_reference_enc(ctx->cpi,
+ ref_frame_to_vp10_reframe(frame->frame_type), &sd);
return VPX_CODEC_OK;
} else {
return VPX_CODEC_INVALID_PARAM;
image2yuvconfig(&frame->img, &sd);
vp10_copy_reference_enc(ctx->cpi,
- ref_frame_to_vp10_reframe(frame->frame_type), &sd);
+ ref_frame_to_vp10_reframe(frame->frame_type), &sd);
return VPX_CODEC_OK;
} else {
return VPX_CODEC_INVALID_PARAM;
return VPX_CODEC_INCAPABLE;
}
-
static vpx_image_t *encoder_get_preview(vpx_codec_alg_priv_t *ctx) {
YV12_BUFFER_CONFIG sd;
return VPX_CODEC_INVALID_PARAM;
}
-
static vpx_codec_err_t ctrl_set_active_map(vpx_codec_alg_priv_t *ctx,
va_list args) {
vpx_active_map_t *const map = va_arg(args, vpx_active_map_t *);
if (map) {
- if (!vp10_set_active_map(ctx->cpi, map->active_map,
- (int)map->rows, (int)map->cols))
+ if (!vp10_set_active_map(ctx->cpi, map->active_map, (int)map->rows,
+ (int)map->cols))
return VPX_CODEC_OK;
else
return VPX_CODEC_INVALID_PARAM;
vpx_active_map_t *const map = va_arg(args, vpx_active_map_t *);
if (map) {
- if (!vp10_get_active_map(ctx->cpi, map->active_map,
- (int)map->rows, (int)map->cols))
+ if (!vp10_get_active_map(ctx->cpi, map->active_map, (int)map->rows,
+ (int)map->cols))
return VPX_CODEC_OK;
else
return VPX_CODEC_INVALID_PARAM;
vpx_scaling_mode_t *const mode = va_arg(args, vpx_scaling_mode_t *);
if (mode) {
- const int res = vp10_set_internal_size(ctx->cpi,
- (VPX_SCALING)mode->h_scaling_mode,
- (VPX_SCALING)mode->v_scaling_mode);
+ const int res =
+ vp10_set_internal_size(ctx->cpi, (VPX_SCALING)mode->h_scaling_mode,
+ (VPX_SCALING)mode->v_scaling_mode);
return (res == 0) ? VPX_CODEC_OK : VPX_CODEC_INVALID_PARAM;
} else {
return VPX_CODEC_INVALID_PARAM;
va_list args) {
struct vp10_extracfg extra_cfg = ctx->extra_cfg;
int *const render_size = va_arg(args, int *);
- extra_cfg.render_width = render_size[0];
+ extra_cfg.render_width = render_size[0];
extra_cfg.render_height = render_size[1];
return update_extra_cfg(ctx, &extra_cfg);
}
static vpx_codec_err_t ctrl_set_superblock_size(vpx_codec_alg_priv_t *ctx,
- va_list args) {
+ va_list args) {
struct vp10_extracfg extra_cfg = ctx->extra_cfg;
extra_cfg.superblock_size = CAST(VP10E_SET_SUPERBLOCK_SIZE, args);
return update_extra_cfg(ctx, &extra_cfg);
}
static vpx_codec_ctrl_fn_map_t encoder_ctrl_maps[] = {
- {VP8_COPY_REFERENCE, ctrl_copy_reference},
- {VP8E_USE_REFERENCE, ctrl_use_reference},
+ { VP8_COPY_REFERENCE, ctrl_copy_reference },
+ { VP8E_USE_REFERENCE, ctrl_use_reference },
// Setters
- {VP8_SET_REFERENCE, ctrl_set_reference},
- {VP8_SET_POSTPROC, ctrl_set_previewpp},
- {VP8E_SET_ROI_MAP, ctrl_set_roi_map},
- {VP8E_SET_ACTIVEMAP, ctrl_set_active_map},
- {VP8E_SET_SCALEMODE, ctrl_set_scale_mode},
- {VP8E_SET_CPUUSED, ctrl_set_cpuused},
- {VP8E_SET_ENABLEAUTOALTREF, ctrl_set_enable_auto_alt_ref},
+ { VP8_SET_REFERENCE, ctrl_set_reference },
+ { VP8_SET_POSTPROC, ctrl_set_previewpp },
+ { VP8E_SET_ROI_MAP, ctrl_set_roi_map },
+ { VP8E_SET_ACTIVEMAP, ctrl_set_active_map },
+ { VP8E_SET_SCALEMODE, ctrl_set_scale_mode },
+ { VP8E_SET_CPUUSED, ctrl_set_cpuused },
+ { VP8E_SET_ENABLEAUTOALTREF, ctrl_set_enable_auto_alt_ref },
#if CONFIG_EXT_REFS
- {VP8E_SET_ENABLEAUTOBWDREF, ctrl_set_enable_auto_bwd_ref},
+ { VP8E_SET_ENABLEAUTOBWDREF, ctrl_set_enable_auto_bwd_ref },
#endif // CONFIG_EXT_REFS
- {VP8E_SET_SHARPNESS, ctrl_set_sharpness},
- {VP8E_SET_STATIC_THRESHOLD, ctrl_set_static_thresh},
- {VP9E_SET_TILE_COLUMNS, ctrl_set_tile_columns},
- {VP9E_SET_TILE_ROWS, ctrl_set_tile_rows},
- {VP8E_SET_ARNR_MAXFRAMES, ctrl_set_arnr_max_frames},
- {VP8E_SET_ARNR_STRENGTH, ctrl_set_arnr_strength},
- {VP8E_SET_ARNR_TYPE, ctrl_set_arnr_type},
- {VP8E_SET_TUNING, ctrl_set_tuning},
- {VP8E_SET_CQ_LEVEL, ctrl_set_cq_level},
- {VP8E_SET_MAX_INTRA_BITRATE_PCT, ctrl_set_rc_max_intra_bitrate_pct},
- {VP9E_SET_MAX_INTER_BITRATE_PCT, ctrl_set_rc_max_inter_bitrate_pct},
- {VP9E_SET_GF_CBR_BOOST_PCT, ctrl_set_rc_gf_cbr_boost_pct},
- {VP9E_SET_LOSSLESS, ctrl_set_lossless},
- {VP9E_SET_FRAME_PARALLEL_DECODING, ctrl_set_frame_parallel_decoding_mode},
- {VP9E_SET_AQ_MODE, ctrl_set_aq_mode},
- {VP9E_SET_FRAME_PERIODIC_BOOST, ctrl_set_frame_periodic_boost},
- {VP9E_SET_TUNE_CONTENT, ctrl_set_tune_content},
- {VP9E_SET_COLOR_SPACE, ctrl_set_color_space},
- {VP9E_SET_COLOR_RANGE, ctrl_set_color_range},
- {VP9E_SET_NOISE_SENSITIVITY, ctrl_set_noise_sensitivity},
- {VP9E_SET_MIN_GF_INTERVAL, ctrl_set_min_gf_interval},
- {VP9E_SET_MAX_GF_INTERVAL, ctrl_set_max_gf_interval},
- {VP9E_SET_RENDER_SIZE, ctrl_set_render_size},
- {VP10E_SET_SUPERBLOCK_SIZE, ctrl_set_superblock_size},
+ { VP8E_SET_SHARPNESS, ctrl_set_sharpness },
+ { VP8E_SET_STATIC_THRESHOLD, ctrl_set_static_thresh },
+ { VP9E_SET_TILE_COLUMNS, ctrl_set_tile_columns },
+ { VP9E_SET_TILE_ROWS, ctrl_set_tile_rows },
+ { VP8E_SET_ARNR_MAXFRAMES, ctrl_set_arnr_max_frames },
+ { VP8E_SET_ARNR_STRENGTH, ctrl_set_arnr_strength },
+ { VP8E_SET_ARNR_TYPE, ctrl_set_arnr_type },
+ { VP8E_SET_TUNING, ctrl_set_tuning },
+ { VP8E_SET_CQ_LEVEL, ctrl_set_cq_level },
+ { VP8E_SET_MAX_INTRA_BITRATE_PCT, ctrl_set_rc_max_intra_bitrate_pct },
+ { VP9E_SET_MAX_INTER_BITRATE_PCT, ctrl_set_rc_max_inter_bitrate_pct },
+ { VP9E_SET_GF_CBR_BOOST_PCT, ctrl_set_rc_gf_cbr_boost_pct },
+ { VP9E_SET_LOSSLESS, ctrl_set_lossless },
+ { VP9E_SET_FRAME_PARALLEL_DECODING, ctrl_set_frame_parallel_decoding_mode },
+ { VP9E_SET_AQ_MODE, ctrl_set_aq_mode },
+ { VP9E_SET_FRAME_PERIODIC_BOOST, ctrl_set_frame_periodic_boost },
+ { VP9E_SET_TUNE_CONTENT, ctrl_set_tune_content },
+ { VP9E_SET_COLOR_SPACE, ctrl_set_color_space },
+ { VP9E_SET_COLOR_RANGE, ctrl_set_color_range },
+ { VP9E_SET_NOISE_SENSITIVITY, ctrl_set_noise_sensitivity },
+ { VP9E_SET_MIN_GF_INTERVAL, ctrl_set_min_gf_interval },
+ { VP9E_SET_MAX_GF_INTERVAL, ctrl_set_max_gf_interval },
+ { VP9E_SET_RENDER_SIZE, ctrl_set_render_size },
+ { VP10E_SET_SUPERBLOCK_SIZE, ctrl_set_superblock_size },
// Getters
- {VP8E_GET_LAST_QUANTIZER, ctrl_get_quantizer},
- {VP8E_GET_LAST_QUANTIZER_64, ctrl_get_quantizer64},
- {VP9_GET_REFERENCE, ctrl_get_reference},
- {VP9E_GET_ACTIVEMAP, ctrl_get_active_map},
- {VP10_GET_NEW_FRAME_IMAGE, ctrl_get_new_frame_image},
+ { VP8E_GET_LAST_QUANTIZER, ctrl_get_quantizer },
+ { VP8E_GET_LAST_QUANTIZER_64, ctrl_get_quantizer64 },
+ { VP9_GET_REFERENCE, ctrl_get_reference },
+ { VP9E_GET_ACTIVEMAP, ctrl_get_active_map },
+ { VP10_GET_NEW_FRAME_IMAGE, ctrl_get_new_frame_image },
- { -1, NULL},
+ { -1, NULL },
};
static vpx_codec_enc_cfg_map_t encoder_usage_cfg_map[] = {
- {
- 0,
- { // NOLINT
- 0, // g_usage
- 8, // g_threads
- 0, // g_profile
-
- 320, // g_width
- 240, // g_height
- VPX_BITS_8, // g_bit_depth
- 8, // g_input_bit_depth
-
- {1, 30}, // g_timebase
-
- 0, // g_error_resilient
-
- VPX_RC_ONE_PASS, // g_pass
-
- 25, // g_lag_in_frames
-
- 0, // rc_dropframe_thresh
- 0, // rc_resize_allowed
- 0, // rc_scaled_width
- 0, // rc_scaled_height
- 60, // rc_resize_down_thresold
- 30, // rc_resize_up_thresold
-
- VPX_VBR, // rc_end_usage
- {NULL, 0}, // rc_twopass_stats_in
- {NULL, 0}, // rc_firstpass_mb_stats_in
- 256, // rc_target_bandwidth
- 0, // rc_min_quantizer
- 63, // rc_max_quantizer
- 25, // rc_undershoot_pct
- 25, // rc_overshoot_pct
-
- 6000, // rc_max_buffer_size
- 4000, // rc_buffer_initial_size
- 5000, // rc_buffer_optimal_size
-
- 50, // rc_two_pass_vbrbias
- 0, // rc_two_pass_vbrmin_section
- 2000, // rc_two_pass_vbrmax_section
-
- // keyframing settings (kf)
- VPX_KF_AUTO, // g_kfmode
- 0, // kf_min_dist
- 9999, // kf_max_dist
- }
- },
+ { 0,
+ {
+ // NOLINT
+ 0, // g_usage
+ 8, // g_threads
+ 0, // g_profile
+
+ 320, // g_width
+ 240, // g_height
+ VPX_BITS_8, // g_bit_depth
+ 8, // g_input_bit_depth
+
+ { 1, 30 }, // g_timebase
+
+ 0, // g_error_resilient
+
+ VPX_RC_ONE_PASS, // g_pass
+
+ 25, // g_lag_in_frames
+
+ 0, // rc_dropframe_thresh
+ 0, // rc_resize_allowed
+ 0, // rc_scaled_width
+ 0, // rc_scaled_height
+ 60, // rc_resize_down_thresold
+ 30, // rc_resize_up_thresold
+
+ VPX_VBR, // rc_end_usage
+ { NULL, 0 }, // rc_twopass_stats_in
+ { NULL, 0 }, // rc_firstpass_mb_stats_in
+ 256, // rc_target_bandwidth
+ 0, // rc_min_quantizer
+ 63, // rc_max_quantizer
+ 25, // rc_undershoot_pct
+ 25, // rc_overshoot_pct
+
+ 6000, // rc_max_buffer_size
+ 4000, // rc_buffer_initial_size
+ 5000, // rc_buffer_optimal_size
+
+ 50, // rc_two_pass_vbrbias
+ 0, // rc_two_pass_vbrmin_section
+ 2000, // rc_two_pass_vbrmax_section
+
+ // keyframing settings (kf)
+ VPX_KF_AUTO, // g_kfmode
+ 0, // kf_min_dist
+ 9999, // kf_max_dist
+ } },
};
#ifndef VERSION_STRING
#if CONFIG_VP9_HIGHBITDEPTH
VPX_CODEC_CAP_HIGHBITDEPTH |
#endif
- VPX_CODEC_CAP_ENCODER | VPX_CODEC_CAP_PSNR, // vpx_codec_caps_t
- encoder_init, // vpx_codec_init_fn_t
- encoder_destroy, // vpx_codec_destroy_fn_t
- encoder_ctrl_maps, // vpx_codec_ctrl_fn_map_t
- { // NOLINT
- NULL, // vpx_codec_peek_si_fn_t
- NULL, // vpx_codec_get_si_fn_t
- NULL, // vpx_codec_decode_fn_t
- NULL, // vpx_codec_frame_get_fn_t
- NULL // vpx_codec_set_fb_fn_t
+ VPX_CODEC_CAP_ENCODER | VPX_CODEC_CAP_PSNR, // vpx_codec_caps_t
+ encoder_init, // vpx_codec_init_fn_t
+ encoder_destroy, // vpx_codec_destroy_fn_t
+ encoder_ctrl_maps, // vpx_codec_ctrl_fn_map_t
+ {
+ // NOLINT
+ NULL, // vpx_codec_peek_si_fn_t
+ NULL, // vpx_codec_get_si_fn_t
+ NULL, // vpx_codec_decode_fn_t
+ NULL, // vpx_codec_frame_get_fn_t
+ NULL // vpx_codec_set_fb_fn_t
},
- { // NOLINT
- 1, // 1 cfg map
- encoder_usage_cfg_map, // vpx_codec_enc_cfg_map_t
- encoder_encode, // vpx_codec_encode_fn_t
- encoder_get_cxdata, // vpx_codec_get_cx_data_fn_t
- encoder_set_config, // vpx_codec_enc_config_set_fn_t
- NULL, // vpx_codec_get_global_headers_fn_t
- encoder_get_preview, // vpx_codec_get_preview_frame_fn_t
- NULL // vpx_codec_enc_mr_get_mem_loc_fn_t
+ {
+ // NOLINT
+ 1, // 1 cfg map
+ encoder_usage_cfg_map, // vpx_codec_enc_cfg_map_t
+ encoder_encode, // vpx_codec_encode_fn_t
+ encoder_get_cxdata, // vpx_codec_get_cx_data_fn_t
+ encoder_set_config, // vpx_codec_enc_config_set_fn_t
+ NULL, // vpx_codec_get_global_headers_fn_t
+ encoder_get_preview, // vpx_codec_get_preview_frame_fn_t
+ NULL // vpx_codec_enc_mr_get_mem_loc_fn_t
}
};
// This limit is due to framebuffer numbers.
// TODO(hkuang): Remove this limit after implementing ondemand framebuffers.
-#define FRAME_CACHE_SIZE 6 // Cache maximum 6 decoded frames.
+#define FRAME_CACHE_SIZE 6 // Cache maximum 6 decoded frames.
typedef struct cache_frame {
int fb_idx;
} cache_frame;
struct vpx_codec_alg_priv {
- vpx_codec_priv_t base;
- vpx_codec_dec_cfg_t cfg;
- vp10_stream_info_t si;
- int postproc_cfg_set;
- vp8_postproc_cfg_t postproc_cfg;
- vpx_decrypt_cb decrypt_cb;
- void *decrypt_state;
- vpx_image_t img;
- int img_avail;
- int flushed;
- int invert_tile_order;
- int last_show_frame; // Index of last output frame.
- int byte_alignment;
- int skip_loop_filter;
- int decode_tile_row;
- int decode_tile_col;
+ vpx_codec_priv_t base;
+ vpx_codec_dec_cfg_t cfg;
+ vp10_stream_info_t si;
+ int postproc_cfg_set;
+ vp8_postproc_cfg_t postproc_cfg;
+ vpx_decrypt_cb decrypt_cb;
+ void *decrypt_state;
+ vpx_image_t img;
+ int img_avail;
+ int flushed;
+ int invert_tile_order;
+ int last_show_frame; // Index of last output frame.
+ int byte_alignment;
+ int skip_loop_filter;
+ int decode_tile_row;
+ int decode_tile_col;
// Frame parallel related.
- int frame_parallel_decode; // frame-based threading.
- VPxWorker *frame_workers;
- int num_frame_workers;
- int next_submit_worker_id;
- int last_submit_worker_id;
- int next_output_worker_id;
- int available_threads;
- cache_frame frame_cache[FRAME_CACHE_SIZE];
- int frame_cache_write;
- int frame_cache_read;
- int num_cache_frames;
- int need_resync; // wait for key/intra-only frame
+ int frame_parallel_decode; // frame-based threading.
+ VPxWorker *frame_workers;
+ int num_frame_workers;
+ int next_submit_worker_id;
+ int last_submit_worker_id;
+ int next_output_worker_id;
+ int available_threads;
+ cache_frame frame_cache[FRAME_CACHE_SIZE];
+ int frame_cache_write;
+ int frame_cache_read;
+ int num_cache_frames;
+ int need_resync; // wait for key/intra-only frame
// BufferPool that holds all reference frames. Shared by all the FrameWorkers.
- BufferPool *buffer_pool;
+ BufferPool *buffer_pool;
// External frame buffer info to save for VP10 common.
void *ext_priv; // Private data associated with the external frame buffers.
if (!ctx->priv) {
vpx_codec_alg_priv_t *const priv =
(vpx_codec_alg_priv_t *)vpx_calloc(1, sizeof(*priv));
- if (priv == NULL)
- return VPX_CODEC_MEM_ERROR;
+ if (priv == NULL) return VPX_CODEC_MEM_ERROR;
ctx->priv = (vpx_codec_priv_t *)priv;
ctx->priv->init_flags = ctx->init_flags;
// Only do frame parallel decode when threads > 1.
priv->frame_parallel_decode =
(ctx->config.dec && (ctx->config.dec->threads > 1) &&
- (ctx->init_flags & VPX_CODEC_USE_FRAME_THREADING)) ? 1 : 0;
+ (ctx->init_flags & VPX_CODEC_USE_FRAME_THREADING))
+ ? 1
+ : 0;
if (ctx->config.dec) {
priv->cfg = *ctx->config.dec;
ctx->config.dec = &priv->cfg;
return VPX_CODEC_OK;
}
-static int parse_bitdepth_colorspace_sampling(
- BITSTREAM_PROFILE profile, struct vpx_read_bit_buffer *rb) {
+static int parse_bitdepth_colorspace_sampling(BITSTREAM_PROFILE profile,
+ struct vpx_read_bit_buffer *rb) {
vpx_color_space_t color_space;
- if (profile >= PROFILE_2)
- rb->bit_offset += 1; // Bit-depth 10 or 12.
+ if (profile >= PROFILE_2) rb->bit_offset += 1; // Bit-depth 10 or 12.
color_space = (vpx_color_space_t)vpx_rb_read_literal(rb, 3);
if (color_space != VPX_CS_SRGB) {
rb->bit_offset += 1; // [16,235] (including xvycc) vs [0,255] range.
return 1;
}
-static vpx_codec_err_t decoder_peek_si_internal(const uint8_t *data,
- unsigned int data_sz,
- vpx_codec_stream_info_t *si,
- int *is_intra_only,
- vpx_decrypt_cb decrypt_cb,
- void *decrypt_state) {
+static vpx_codec_err_t decoder_peek_si_internal(
+ const uint8_t *data, unsigned int data_sz, vpx_codec_stream_info_t *si,
+ int *is_intra_only, vpx_decrypt_cb decrypt_cb, void *decrypt_state) {
int intra_only_flag = 0;
uint8_t clear_buffer[9];
- if (data + data_sz <= data)
- return VPX_CODEC_INVALID_PARAM;
+ if (data + data_sz <= data) return VPX_CODEC_INVALID_PARAM;
si->is_kf = 0;
si->w = si->h = 0;
const int frame_marker = vpx_rb_read_literal(&rb, 2);
const BITSTREAM_PROFILE profile = vp10_read_profile(&rb);
- if (frame_marker != VPX_FRAME_MARKER)
- return VPX_CODEC_UNSUP_BITSTREAM;
+ if (frame_marker != VPX_FRAME_MARKER) return VPX_CODEC_UNSUP_BITSTREAM;
- if (profile >= MAX_PROFILES)
- return VPX_CODEC_UNSUP_BITSTREAM;
+ if (profile >= MAX_PROFILES) return VPX_CODEC_UNSUP_BITSTREAM;
if ((profile >= 2 && data_sz <= 1) || data_sz < 1)
return VPX_CODEC_UNSUP_BITSTREAM;
- if (vpx_rb_read_bit(&rb)) { // show an existing frame
+ if (vpx_rb_read_bit(&rb)) { // show an existing frame
vpx_rb_read_literal(&rb, 3); // Frame buffer to show.
return VPX_CODEC_OK;
}
- if (data_sz <= 8)
- return VPX_CODEC_UNSUP_BITSTREAM;
+ if (data_sz <= 8) return VPX_CODEC_UNSUP_BITSTREAM;
si->is_kf = !vpx_rb_read_bit(&rb);
show_frame = vpx_rb_read_bit(&rb);
error_resilient = vpx_rb_read_bit(&rb);
if (si->is_kf) {
- if (!vp10_read_sync_code(&rb))
- return VPX_CODEC_UNSUP_BITSTREAM;
+ if (!vp10_read_sync_code(&rb)) return VPX_CODEC_UNSUP_BITSTREAM;
if (!parse_bitdepth_colorspace_sampling(profile, &rb))
return VPX_CODEC_UNSUP_BITSTREAM;
rb.bit_offset += error_resilient ? 0 : 2; // reset_frame_context
if (intra_only_flag) {
- if (!vp10_read_sync_code(&rb))
- return VPX_CODEC_UNSUP_BITSTREAM;
+ if (!vp10_read_sync_code(&rb)) return VPX_CODEC_UNSUP_BITSTREAM;
if (profile > PROFILE_0) {
if (!parse_bitdepth_colorspace_sampling(profile, &rb))
return VPX_CODEC_UNSUP_BITSTREAM;
}
}
}
- if (is_intra_only != NULL)
- *is_intra_only = intra_only_flag;
+ if (is_intra_only != NULL) *is_intra_only = intra_only_flag;
return VPX_CODEC_OK;
}
static vpx_codec_err_t decoder_get_si(vpx_codec_alg_priv_t *ctx,
vpx_codec_stream_info_t *si) {
const size_t sz = (si->sz >= sizeof(vp10_stream_info_t))
- ? sizeof(vp10_stream_info_t)
- : sizeof(vpx_codec_stream_info_t);
+ ? sizeof(vp10_stream_info_t)
+ : sizeof(vpx_codec_stream_info_t);
memcpy(si, &ctx->si, sz);
si->sz = (unsigned int)sz;
ctx->base.err_detail = error;
}
-static vpx_codec_err_t update_error_state(vpx_codec_alg_priv_t *ctx,
- const struct vpx_internal_error_info *error) {
+static vpx_codec_err_t update_error_state(
+ vpx_codec_alg_priv_t *ctx, const struct vpx_internal_error_info *error) {
if (error->error_code)
set_error_detail(ctx, error->has_detail ? error->detail : NULL);
const uint8_t *data = frame_worker_data->data;
(void)arg2;
- frame_worker_data->result =
- vp10_receive_compressed_data(frame_worker_data->pbi,
- frame_worker_data->data_size,
- &data);
+ frame_worker_data->result = vp10_receive_compressed_data(
+ frame_worker_data->pbi, frame_worker_data->data_size, &data);
frame_worker_data->data_end = data;
if (frame_worker_data->pbi->common.frame_parallel_decode) {
ctx->num_cache_frames = 0;
ctx->need_resync = 1;
ctx->num_frame_workers =
- (ctx->frame_parallel_decode == 1) ? ctx->cfg.threads: 1;
+ (ctx->frame_parallel_decode == 1) ? ctx->cfg.threads : 1;
if (ctx->num_frame_workers > MAX_DECODE_THREADS)
ctx->num_frame_workers = MAX_DECODE_THREADS;
ctx->available_threads = ctx->num_frame_workers;
ctx->flushed = 0;
ctx->buffer_pool = (BufferPool *)vpx_calloc(1, sizeof(BufferPool));
- if (ctx->buffer_pool == NULL)
- return VPX_CODEC_MEM_ERROR;
+ if (ctx->buffer_pool == NULL) return VPX_CODEC_MEM_ERROR;
#if CONFIG_MULTITHREAD
- if (pthread_mutex_init(&ctx->buffer_pool->pool_mutex, NULL)) {
- set_error_detail(ctx, "Failed to allocate buffer pool mutex");
- return VPX_CODEC_MEM_ERROR;
- }
+ if (pthread_mutex_init(&ctx->buffer_pool->pool_mutex, NULL)) {
+ set_error_detail(ctx, "Failed to allocate buffer pool mutex");
+ return VPX_CODEC_MEM_ERROR;
+ }
#endif
- ctx->frame_workers = (VPxWorker *)
- vpx_malloc(ctx->num_frame_workers * sizeof(*ctx->frame_workers));
+ ctx->frame_workers = (VPxWorker *)vpx_malloc(ctx->num_frame_workers *
+ sizeof(*ctx->frame_workers));
if (ctx->frame_workers == NULL) {
set_error_detail(ctx, "Failed to allocate frame_workers");
return VPX_CODEC_MEM_ERROR;
// If postprocessing was enabled by the application and a
// configuration has not been provided, default it.
- if (!ctx->postproc_cfg_set &&
- (ctx->base.init_flags & VPX_CODEC_USE_POSTPROC))
+ if (!ctx->postproc_cfg_set && (ctx->base.init_flags & VPX_CODEC_USE_POSTPROC))
set_default_ppflags(&ctx->postproc_cfg);
init_buffer_callbacks(ctx);
const vpx_codec_err_t res =
decoder_peek_si_internal(*data, data_sz, &ctx->si, &is_intra_only,
ctx->decrypt_cb, ctx->decrypt_state);
- if (res != VPX_CODEC_OK)
- return res;
+ if (res != VPX_CODEC_OK) return res;
- if (!ctx->si.is_kf && !is_intra_only)
- return VPX_CODEC_ERROR;
+ if (!ctx->si.is_kf && !is_intra_only) return VPX_CODEC_ERROR;
}
if (!ctx->frame_parallel_decode) {
frame_worker_data->user_priv);
ctx->frame_cache[ctx->frame_cache_write].img.fb_priv =
frame_bufs[cm->new_fb_idx].raw_frame_buffer.priv;
- ctx->frame_cache_write =
- (ctx->frame_cache_write + 1) % FRAME_CACHE_SIZE;
+ ctx->frame_cache_write = (ctx->frame_cache_write + 1) % FRAME_CACHE_SIZE;
++ctx->num_cache_frames;
}
}
const uint8_t *data, unsigned int data_sz,
void *user_priv, long deadline) {
const uint8_t *data_start = data;
- const uint8_t * const data_end = data + data_sz;
+ const uint8_t *const data_end = data + data_sz;
vpx_codec_err_t res;
uint32_t frame_sizes[8];
int frame_count;
// Initialize the decoder workers on the first frame.
if (ctx->frame_workers == NULL) {
const vpx_codec_err_t res = init_decoder(ctx);
- if (res != VPX_CODEC_OK)
- return res;
+ if (res != VPX_CODEC_OK) return res;
}
res = vp10_parse_superframe_index(data, data_sz, frame_sizes, &frame_count,
- ctx->decrypt_cb, ctx->decrypt_state);
- if (res != VPX_CODEC_OK)
- return res;
+ ctx->decrypt_cb, ctx->decrypt_state);
+ if (res != VPX_CODEC_OK) return res;
if (ctx->frame_parallel_decode) {
// Decode in frame parallel mode. When decoding in this mode, the frame
for (i = 0; i < frame_count; ++i) {
const uint8_t *data_start_copy = data_start;
const uint32_t frame_size = frame_sizes[i];
- if (data_start < data
- || frame_size > (uint32_t) (data_end - data_start)) {
+ if (data_start < data ||
+ frame_size > (uint32_t)(data_end - data_start)) {
set_error_detail(ctx, "Invalid frame size in index");
return VPX_CODEC_CORRUPT_FRAME;
}
}
}
- res = decode_one(ctx, &data_start_copy, frame_size, user_priv,
- deadline);
- if (res != VPX_CODEC_OK)
- return res;
+ res =
+ decode_one(ctx, &data_start_copy, frame_size, user_priv, deadline);
+ if (res != VPX_CODEC_OK) return res;
data_start += frame_size;
}
} else {
}
res = decode_one(ctx, &data, data_sz, user_priv, deadline);
- if (res != VPX_CODEC_OK)
- return res;
+ if (res != VPX_CODEC_OK) return res;
}
} else {
// Decode in serial mode.
const uint8_t *data_start_copy = data_start;
const uint32_t frame_size = frame_sizes[i];
vpx_codec_err_t res;
- if (data_start < data
- || frame_size > (uint32_t) (data_end - data_start)) {
+ if (data_start < data ||
+ frame_size > (uint32_t)(data_end - data_start)) {
set_error_detail(ctx, "Invalid frame size in index");
return VPX_CODEC_CORRUPT_FRAME;
}
- res = decode_one(ctx, &data_start_copy, frame_size, user_priv,
- deadline);
- if (res != VPX_CODEC_OK)
- return res;
+ res =
+ decode_one(ctx, &data_start_copy, frame_size, user_priv, deadline);
+ if (res != VPX_CODEC_OK) return res;
data_start += frame_size;
}
} else {
while (data_start < data_end) {
- const uint32_t frame_size = (uint32_t) (data_end - data_start);
- const vpx_codec_err_t res = decode_one(ctx, &data_start, frame_size,
- user_priv, deadline);
- if (res != VPX_CODEC_OK)
- return res;
+ const uint32_t frame_size = (uint32_t)(data_end - data_start);
+ const vpx_codec_err_t res =
+ decode_one(ctx, &data_start, frame_size, user_priv, deadline);
+ if (res != VPX_CODEC_OK) return res;
// Account for suboptimal termination by the encoder.
while (data_start < data_end) {
- const uint8_t marker = read_marker(ctx->decrypt_cb,
- ctx->decrypt_state, data_start);
- if (marker)
- break;
+ const uint8_t marker =
+ read_marker(ctx->decrypt_cb, ctx->decrypt_state, data_start);
+ if (marker) break;
++data_start;
}
}
// Output the frames in the cache first.
if (ctx->num_cache_frames > 0) {
release_last_output_frame(ctx);
- ctx->last_show_frame = ctx->frame_cache[ctx->frame_cache_read].fb_idx;
- if (ctx->need_resync)
- return NULL;
+ ctx->last_show_frame = ctx->frame_cache[ctx->frame_cache_read].fb_idx;
+ if (ctx->need_resync) return NULL;
img = &ctx->frame_cache[ctx->frame_cache_read].img;
ctx->frame_cache_read = (ctx->frame_cache_read + 1) % FRAME_CACHE_SIZE;
--ctx->num_cache_frames;
do {
YV12_BUFFER_CONFIG sd;
const VPxWorkerInterface *const winterface = vpx_get_worker_interface();
- VPxWorker *const worker =
- &ctx->frame_workers[ctx->next_output_worker_id];
+ VPxWorker *const worker = &ctx->frame_workers[ctx->next_output_worker_id];
FrameWorkerData *const frame_worker_data =
(FrameWorkerData *)worker->data1;
ctx->next_output_worker_id =
RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
release_last_output_frame(ctx);
ctx->last_show_frame = frame_worker_data->pbi->common.new_fb_idx;
- if (ctx->need_resync)
- return NULL;
+ if (ctx->need_resync) return NULL;
yuvconfig2image(&ctx->img, &sd, frame_worker_data->user_priv);
-
#if CONFIG_EXT_TILE
if (frame_worker_data->pbi->dec_tile_row >= 0) {
- const int tile_row = VPXMIN(frame_worker_data->pbi->dec_tile_row,
- cm->tile_rows - 1);
+ const int tile_row =
+ VPXMIN(frame_worker_data->pbi->dec_tile_row, cm->tile_rows - 1);
const int mi_row = tile_row * cm->tile_height;
const int ssy = ctx->img.y_chroma_shift;
int plane;
ctx->img.planes[0] += mi_row * MI_SIZE * ctx->img.stride[0];
for (plane = 1; plane < MAX_MB_PLANE; ++plane) {
- ctx->img.planes[plane] += mi_row * (MI_SIZE >> ssy) *
- ctx->img.stride[plane];
+ ctx->img.planes[plane] +=
+ mi_row * (MI_SIZE >> ssy) * ctx->img.stride[plane];
}
- ctx->img.d_h = VPXMIN(cm->tile_height, cm->mi_rows - mi_row) *
- MI_SIZE;
+ ctx->img.d_h =
+ VPXMIN(cm->tile_height, cm->mi_rows - mi_row) * MI_SIZE;
}
if (frame_worker_data->pbi->dec_tile_col >= 0) {
- const int tile_col = VPXMIN(frame_worker_data->pbi->dec_tile_col,
- cm->tile_cols - 1);
+ const int tile_col =
+ VPXMIN(frame_worker_data->pbi->dec_tile_col, cm->tile_cols - 1);
const int mi_col = tile_col * cm->tile_width;
const int ssx = ctx->img.x_chroma_shift;
int plane;
for (plane = 1; plane < MAX_MB_PLANE; ++plane) {
ctx->img.planes[plane] += mi_col * (MI_SIZE >> ssx);
}
- ctx->img.d_w = VPXMIN(cm->tile_width, cm->mi_cols - mi_col) *
- MI_SIZE;
+ ctx->img.d_w =
+ VPXMIN(cm->tile_width, cm->mi_cols - mi_col) * MI_SIZE;
}
#endif // CONFIG_EXT_TILE
frame_worker_data->received_frame = 0;
++ctx->available_threads;
ctx->need_resync = 1;
- if (ctx->flushed != 1)
- return NULL;
+ if (ctx->flushed != 1) return NULL;
}
} while (ctx->next_output_worker_id != ctx->next_submit_worker_id);
}
}
static vpx_codec_err_t decoder_set_fb_fn(
- vpx_codec_alg_priv_t *ctx,
- vpx_get_frame_buffer_cb_fn_t cb_get,
+ vpx_codec_alg_priv_t *ctx, vpx_get_frame_buffer_cb_fn_t cb_get,
vpx_release_frame_buffer_cb_fn_t cb_release, void *cb_priv) {
if (cb_get == NULL || cb_release == NULL) {
return VPX_CODEC_INVALID_PARAM;
}
if (data) {
- vpx_ref_frame_t *frame = (vpx_ref_frame_t *) data;
+ vpx_ref_frame_t *frame = (vpx_ref_frame_t *)data;
YV12_BUFFER_CONFIG sd;
VPxWorker *const worker = ctx->frame_workers;
FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
image2yuvconfig(&frame->img, &sd);
return vp10_copy_reference_dec(frame_worker_data->pbi,
- (VPX_REFFRAME)frame->frame_type, &sd);
+ (VPX_REFFRAME)frame->frame_type, &sd);
} else {
return VPX_CODEC_INVALID_PARAM;
}
}
if (data) {
- YV12_BUFFER_CONFIG* fb;
+ YV12_BUFFER_CONFIG *fb;
VPxWorker *const worker = ctx->frame_workers;
FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
fb = get_ref_frame(&frame_worker_data->pbi->common, data->idx);
ctx->byte_alignment = byte_alignment;
if (ctx->frame_workers) {
VPxWorker *const worker = ctx->frame_workers;
- FrameWorkerData *const frame_worker_data =
- (FrameWorkerData *)worker->data1;
+ FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
frame_worker_data->pbi->common.byte_alignment = byte_alignment;
}
return VPX_CODEC_OK;
}
static vpx_codec_ctrl_fn_map_t decoder_ctrl_maps[] = {
- {VP8_COPY_REFERENCE, ctrl_copy_reference},
+ { VP8_COPY_REFERENCE, ctrl_copy_reference },
// Setters
- {VP8_SET_REFERENCE, ctrl_set_reference},
- {VP8_SET_POSTPROC, ctrl_set_postproc},
- {VP8_SET_DBG_COLOR_REF_FRAME, ctrl_set_dbg_options},
- {VP8_SET_DBG_COLOR_MB_MODES, ctrl_set_dbg_options},
- {VP8_SET_DBG_COLOR_B_MODES, ctrl_set_dbg_options},
- {VP8_SET_DBG_DISPLAY_MV, ctrl_set_dbg_options},
- {VP9_INVERT_TILE_DECODE_ORDER, ctrl_set_invert_tile_order},
- {VPXD_SET_DECRYPTOR, ctrl_set_decryptor},
- {VP9_SET_BYTE_ALIGNMENT, ctrl_set_byte_alignment},
- {VP9_SET_SKIP_LOOP_FILTER, ctrl_set_skip_loop_filter},
- {VP10_SET_DECODE_TILE_ROW, ctrl_set_decode_tile_row},
- {VP10_SET_DECODE_TILE_COL, ctrl_set_decode_tile_col},
+ { VP8_SET_REFERENCE, ctrl_set_reference },
+ { VP8_SET_POSTPROC, ctrl_set_postproc },
+ { VP8_SET_DBG_COLOR_REF_FRAME, ctrl_set_dbg_options },
+ { VP8_SET_DBG_COLOR_MB_MODES, ctrl_set_dbg_options },
+ { VP8_SET_DBG_COLOR_B_MODES, ctrl_set_dbg_options },
+ { VP8_SET_DBG_DISPLAY_MV, ctrl_set_dbg_options },
+ { VP9_INVERT_TILE_DECODE_ORDER, ctrl_set_invert_tile_order },
+ { VPXD_SET_DECRYPTOR, ctrl_set_decryptor },
+ { VP9_SET_BYTE_ALIGNMENT, ctrl_set_byte_alignment },
+ { VP9_SET_SKIP_LOOP_FILTER, ctrl_set_skip_loop_filter },
+ { VP10_SET_DECODE_TILE_ROW, ctrl_set_decode_tile_row },
+ { VP10_SET_DECODE_TILE_COL, ctrl_set_decode_tile_col },
// Getters
- {VP8D_GET_LAST_REF_UPDATES, ctrl_get_last_ref_updates},
- {VP8D_GET_FRAME_CORRUPTED, ctrl_get_frame_corrupted},
- {VP9_GET_REFERENCE, ctrl_get_reference},
- {VP9D_GET_DISPLAY_SIZE, ctrl_get_render_size},
- {VP9D_GET_BIT_DEPTH, ctrl_get_bit_depth},
- {VP9D_GET_FRAME_SIZE, ctrl_get_frame_size},
- {VP10_GET_NEW_FRAME_IMAGE, ctrl_get_new_frame_image},
-
- { -1, NULL},
+ { VP8D_GET_LAST_REF_UPDATES, ctrl_get_last_ref_updates },
+ { VP8D_GET_FRAME_CORRUPTED, ctrl_get_frame_corrupted },
+ { VP9_GET_REFERENCE, ctrl_get_reference },
+ { VP9D_GET_DISPLAY_SIZE, ctrl_get_render_size },
+ { VP9D_GET_BIT_DEPTH, ctrl_get_bit_depth },
+ { VP9D_GET_FRAME_SIZE, ctrl_get_frame_size },
+ { VP10_GET_NEW_FRAME_IMAGE, ctrl_get_new_frame_image },
+
+ { -1, NULL },
};
#ifndef VERSION_STRING
VPX_CODEC_INTERNAL_ABI_VERSION,
VPX_CODEC_CAP_DECODER |
VPX_CODEC_CAP_EXTERNAL_FRAME_BUFFER, // vpx_codec_caps_t
- decoder_init, // vpx_codec_init_fn_t
- decoder_destroy, // vpx_codec_destroy_fn_t
- decoder_ctrl_maps, // vpx_codec_ctrl_fn_map_t
- { // NOLINT
- decoder_peek_si, // vpx_codec_peek_si_fn_t
- decoder_get_si, // vpx_codec_get_si_fn_t
- decoder_decode, // vpx_codec_decode_fn_t
- decoder_get_frame, // vpx_codec_frame_get_fn_t
- decoder_set_fb_fn, // vpx_codec_set_fb_fn_t
+ decoder_init, // vpx_codec_init_fn_t
+ decoder_destroy, // vpx_codec_destroy_fn_t
+ decoder_ctrl_maps, // vpx_codec_ctrl_fn_map_t
+ {
+ // NOLINT
+ decoder_peek_si, // vpx_codec_peek_si_fn_t
+ decoder_get_si, // vpx_codec_get_si_fn_t
+ decoder_decode, // vpx_codec_decode_fn_t
+ decoder_get_frame, // vpx_codec_frame_get_fn_t
+ decoder_set_fb_fn, // vpx_codec_set_fb_fn_t
},
- { // NOLINT
- 0,
- NULL, // vpx_codec_enc_cfg_map_t
- NULL, // vpx_codec_encode_fn_t
- NULL, // vpx_codec_get_cx_data_fn_t
- NULL, // vpx_codec_enc_config_set_fn_t
- NULL, // vpx_codec_get_global_headers_fn_t
- NULL, // vpx_codec_get_preview_frame_fn_t
- NULL // vpx_codec_enc_mr_get_mem_loc_fn_t
+ {
+ // NOLINT
+ 0,
+ NULL, // vpx_codec_enc_cfg_map_t
+ NULL, // vpx_codec_encode_fn_t
+ NULL, // vpx_codec_get_cx_data_fn_t
+ NULL, // vpx_codec_enc_config_set_fn_t
+ NULL, // vpx_codec_get_global_headers_fn_t
+ NULL, // vpx_codec_get_preview_frame_fn_t
+ NULL // vpx_codec_enc_mr_get_mem_loc_fn_t
}
};
#include "vpx_ports/mem.h"
-static void yuvconfig2image(vpx_image_t *img, const YV12_BUFFER_CONFIG *yv12,
+static void yuvconfig2image(vpx_image_t *img, const YV12_BUFFER_CONFIG *yv12,
void *user_priv) {
/** vpx_img_wrap() doesn't allow specifying independent strides for
* the Y, U, and V planes, nor other alignment adjustments that
// of the image.
img->fmt = (vpx_img_fmt_t)(img->fmt | VPX_IMG_FMT_HIGHBITDEPTH);
img->bit_depth = yv12->bit_depth;
- img->planes[VPX_PLANE_Y] = (uint8_t*)CONVERT_TO_SHORTPTR(yv12->y_buffer);
- img->planes[VPX_PLANE_U] = (uint8_t*)CONVERT_TO_SHORTPTR(yv12->u_buffer);
- img->planes[VPX_PLANE_V] = (uint8_t*)CONVERT_TO_SHORTPTR(yv12->v_buffer);
+ img->planes[VPX_PLANE_Y] = (uint8_t *)CONVERT_TO_SHORTPTR(yv12->y_buffer);
+ img->planes[VPX_PLANE_U] = (uint8_t *)CONVERT_TO_SHORTPTR(yv12->u_buffer);
+ img->planes[VPX_PLANE_V] = (uint8_t *)CONVERT_TO_SHORTPTR(yv12->v_buffer);
img->planes[VPX_PLANE_ALPHA] = NULL;
img->stride[VPX_PLANE_Y] = 2 * yv12->y_stride;
img->stride[VPX_PLANE_U] = 2 * yv12->uv_stride;
yv12->u_buffer = img->planes[VPX_PLANE_U];
yv12->v_buffer = img->planes[VPX_PLANE_V];
- yv12->y_crop_width = img->d_w;
+ yv12->y_crop_width = img->d_w;
yv12->y_crop_height = img->d_h;
- yv12->render_width = img->r_w;
+ yv12->render_width = img->r_w;
yv12->render_height = img->r_h;
- yv12->y_width = img->d_w;
+ yv12->y_width = img->d_w;
yv12->y_height = img->d_h;
- yv12->uv_width = img->x_chroma_shift == 1 ? (1 + yv12->y_width) / 2
- : yv12->y_width;
- yv12->uv_height = img->y_chroma_shift == 1 ? (1 + yv12->y_height) / 2
- : yv12->y_height;
+ yv12->uv_width =
+ img->x_chroma_shift == 1 ? (1 + yv12->y_width) / 2 : yv12->y_width;
+ yv12->uv_height =
+ img->y_chroma_shift == 1 ? (1 + yv12->y_height) / 2 : yv12->y_height;
yv12->uv_crop_width = yv12->uv_width;
yv12->uv_crop_height = yv12->uv_height;
} else {
yv12->flags = 0;
}
- yv12->border = (yv12->y_stride - img->w) / 2;
+ yv12->border = (yv12->y_stride - img->w) / 2;
#else
- yv12->border = (img->stride[VPX_PLANE_Y] - img->w) / 2;
+ yv12->border = (img->stride[VPX_PLANE_Y] - img->w) / 2;
#endif // CONFIG_VP9_HIGHBITDEPTH
yv12->subsampling_x = img->x_chroma_shift;
yv12->subsampling_y = img->y_chroma_shift;
static VPX_REFFRAME ref_frame_to_vp10_reframe(vpx_ref_frame_type_t frame) {
switch (frame) {
- case VP8_LAST_FRAME:
- return VPX_LAST_FLAG;
- case VP8_GOLD_FRAME:
- return VPX_GOLD_FLAG;
- case VP8_ALTR_FRAME:
- return VPX_ALT_FLAG;
+ case VP8_LAST_FRAME: return VPX_LAST_FLAG;
+ case VP8_GOLD_FRAME: return VPX_GOLD_FLAG;
+ case VP8_ALTR_FRAME: return VPX_ALT_FLAG;
}
assert(0 && "Invalid Reference Frame");
return VPX_LAST_FLAG;