static unsigned long vp8_priv_sz(const vpx_codec_dec_cfg_t *si, vpx_codec_flags_t);
-static const mem_req_t vp8_mem_req_segs[] =
-{
- {VP8_SEG_ALG_PRIV, 0, 8, VPX_CODEC_MEM_ZERO, vp8_priv_sz},
- {VP8_SEG_MAX, 0, 0, 0, NULL}
-};
-
struct vpx_codec_alg_priv
{
vpx_codec_priv_t base;
- vpx_codec_mmap_t mmaps[NELEMENTS(vp8_mem_req_segs)-1];
vpx_codec_dec_cfg_t cfg;
vp8_stream_info_t si;
- int defer_alloc;
int decoder_init;
int postproc_cfg_set;
vp8_postproc_cfg_t postproc_cfg;
return sizeof(vpx_codec_alg_priv_t);
}
-static void vp8_init_ctx(vpx_codec_ctx_t *ctx, const vpx_codec_mmap_t *mmap)
+static void vp8_init_ctx(vpx_codec_ctx_t *ctx)
{
- int i;
-
- ctx->priv = mmap->base;
+ ctx->priv =
+ (vpx_codec_priv_t *)vpx_memalign(8, sizeof(vpx_codec_alg_priv_t));
+ vpx_memset(ctx->priv, 0, sizeof(vpx_codec_alg_priv_t));
ctx->priv->sz = sizeof(*ctx->priv);
ctx->priv->iface = ctx->iface;
- ctx->priv->alg_priv = mmap->base;
-
- for (i = 0; i < NELEMENTS(ctx->priv->alg_priv->mmaps); i++)
- ctx->priv->alg_priv->mmaps[i].id = vp8_mem_req_segs[i].id;
-
- ctx->priv->alg_priv->mmaps[0] = *mmap;
+ ctx->priv->alg_priv = (vpx_codec_alg_priv_t *)ctx->priv;
ctx->priv->alg_priv->si.sz = sizeof(ctx->priv->alg_priv->si);
ctx->priv->alg_priv->decrypt_cb = NULL;
ctx->priv->alg_priv->decrypt_state = NULL;
}
}
-static void vp8_finalize_mmaps(vpx_codec_alg_priv_t *ctx)
-{
- /* nothing to clean up */
-}
-
static vpx_codec_err_t vp8_init(vpx_codec_ctx_t *ctx,
vpx_codec_priv_enc_mr_cfg_t *data)
{
*/
if (!ctx->priv)
{
- vpx_codec_mmap_t mmap;
-
- mmap.id = vp8_mem_req_segs[0].id;
- mmap.sz = sizeof(vpx_codec_alg_priv_t);
- mmap.align = vp8_mem_req_segs[0].align;
- mmap.flags = vp8_mem_req_segs[0].flags;
-
- res = vpx_mmap_alloc(&mmap);
- if (res != VPX_CODEC_OK) return res;
-
- vp8_init_ctx(ctx, &mmap);
+ vp8_init_ctx(ctx);
/* initialize number of fragments to zero */
ctx->priv->alg_priv->fragments.count = 0;
(ctx->priv->alg_priv->base.init_flags &
VPX_CODEC_USE_INPUT_FRAGMENTS);
- ctx->priv->alg_priv->defer_alloc = 1;
/*post processing level initialized to do nothing */
}
static vpx_codec_err_t vp8_destroy(vpx_codec_alg_priv_t *ctx)
{
- int i;
-
vp8_remove_decoder_instances(&ctx->yv12_frame_buffers);
- for (i = NELEMENTS(ctx->mmaps) - 1; i >= 0; i--)
- {
- if (ctx->mmaps[i].dtor)
- ctx->mmaps[i].dtor(&ctx->mmaps[i]);
- }
+ vpx_free(ctx);
return VPX_CODEC_OK;
}
if ((ctx->si.h != h) || (ctx->si.w != w))
resolution_change = 1;
- /* Perform deferred allocations, if required */
- if (!res && ctx->defer_alloc)
- {
- int i;
-
- for (i = 1; !res && i < NELEMENTS(ctx->mmaps); i++)
- {
- vpx_codec_dec_cfg_t cfg;
-
- cfg.w = ctx->si.w;
- cfg.h = ctx->si.h;
- ctx->mmaps[i].id = vp8_mem_req_segs[i].id;
- ctx->mmaps[i].sz = vp8_mem_req_segs[i].sz;
- ctx->mmaps[i].align = vp8_mem_req_segs[i].align;
- ctx->mmaps[i].flags = vp8_mem_req_segs[i].flags;
-
- if (!ctx->mmaps[i].sz)
- ctx->mmaps[i].sz = vp8_mem_req_segs[i].calc_sz(&cfg,
- ctx->base.init_flags);
-
- res = vpx_mmap_alloc(&ctx->mmaps[i]);
- }
-
- if (!res)
- vp8_finalize_mmaps(ctx);
-
- ctx->defer_alloc = 0;
- }
-
/* Initialize the decoder instance on the first frame*/
if (!res && !ctx->decoder_init)
{
- res = vpx_validate_mmaps(&ctx->si, ctx->mmaps,
- vp8_mem_req_segs, NELEMENTS(vp8_mem_req_segs),
- ctx->base.init_flags);
-
- if (!res)
- {
- VP8D_CONFIG oxcf;
-
- oxcf.Width = ctx->si.w;
- oxcf.Height = ctx->si.h;
- oxcf.Version = 9;
- oxcf.postprocess = 0;
- oxcf.max_threads = ctx->cfg.threads;
- oxcf.error_concealment =
- (ctx->base.init_flags & VPX_CODEC_USE_ERROR_CONCEALMENT);
-
- /* If postprocessing was enabled by the application and a
- * configuration has not been provided, default it.
- */
- if (!ctx->postproc_cfg_set
- && (ctx->base.init_flags & VPX_CODEC_USE_POSTPROC))
- {
- ctx->postproc_cfg.post_proc_flag =
- VP8_DEBLOCK | VP8_DEMACROBLOCK | VP8_MFQE;
- ctx->postproc_cfg.deblocking_level = 4;
- ctx->postproc_cfg.noise_level = 0;
- }
-
- res = vp8_create_decoder_instances(&ctx->yv12_frame_buffers, &oxcf);
- ctx->yv12_frame_buffers.pbi[0]->decrypt_cb = ctx->decrypt_cb;
- ctx->yv12_frame_buffers.pbi[0]->decrypt_state = ctx->decrypt_state;
- }
-
- ctx->decoder_init = 1;
+ VP8D_CONFIG oxcf;
+
+ oxcf.Width = ctx->si.w;
+ oxcf.Height = ctx->si.h;
+ oxcf.Version = 9;
+ oxcf.postprocess = 0;
+ oxcf.max_threads = ctx->cfg.threads;
+ oxcf.error_concealment =
+ (ctx->base.init_flags & VPX_CODEC_USE_ERROR_CONCEALMENT);
+
+ /* If postprocessing was enabled by the application and a
+ * configuration has not been provided, default it.
+ */
+ if (!ctx->postproc_cfg_set
+ && (ctx->base.init_flags & VPX_CODEC_USE_POSTPROC)) {
+ ctx->postproc_cfg.post_proc_flag =
+ VP8_DEBLOCK | VP8_DEMACROBLOCK | VP8_MFQE;
+ ctx->postproc_cfg.deblocking_level = 4;
+ ctx->postproc_cfg.noise_level = 0;
+ }
+
+ res = vp8_create_decoder_instances(&ctx->yv12_frame_buffers, &oxcf);
+ ctx->yv12_frame_buffers.pbi[0]->decrypt_cb = ctx->decrypt_cb;
+ ctx->yv12_frame_buffers.pbi[0]->decrypt_state = ctx->decrypt_state;
+
+ ctx->decoder_init = 1;
}
if (!res)
return img;
}
-
-static
-vpx_codec_err_t vp8_xma_get_mmap(const vpx_codec_ctx_t *ctx,
- vpx_codec_mmap_t *mmap,
- vpx_codec_iter_t *iter)
-{
- vpx_codec_err_t res;
- const mem_req_t *seg_iter = *iter;
-
- /* Get address of next segment request */
- do
- {
- if (!seg_iter)
- seg_iter = vp8_mem_req_segs;
- else if (seg_iter->id != VP8_SEG_MAX)
- seg_iter++;
-
- *iter = (vpx_codec_iter_t)seg_iter;
-
- if (seg_iter->id != VP8_SEG_MAX)
- {
- mmap->id = seg_iter->id;
- mmap->sz = seg_iter->sz;
- mmap->align = seg_iter->align;
- mmap->flags = seg_iter->flags;
-
- if (!seg_iter->sz)
- mmap->sz = seg_iter->calc_sz(ctx->config.dec, ctx->init_flags);
-
- res = VPX_CODEC_OK;
- }
- else
- res = VPX_CODEC_LIST_END;
- }
- while (!mmap->sz && res != VPX_CODEC_LIST_END);
-
- return res;
-}
-
-static vpx_codec_err_t vp8_xma_set_mmap(vpx_codec_ctx_t *ctx,
- const vpx_codec_mmap_t *mmap)
-{
- vpx_codec_err_t res = VPX_CODEC_MEM_ERROR;
- int i, done;
-
- if (!ctx->priv)
- {
- if (mmap->id == VP8_SEG_ALG_PRIV)
- {
- if (!ctx->priv)
- {
- vp8_init_ctx(ctx, mmap);
- res = VPX_CODEC_OK;
- }
- }
- }
-
- done = 1;
-
- if (!res && ctx->priv->alg_priv)
- {
- for (i = 0; i < NELEMENTS(ctx->priv->alg_priv->mmaps); i++)
- {
- if (ctx->priv->alg_priv->mmaps[i].id == mmap->id)
- if (!ctx->priv->alg_priv->mmaps[i].base)
- {
- ctx->priv->alg_priv->mmaps[i] = *mmap;
- res = VPX_CODEC_OK;
- }
-
- done &= (ctx->priv->alg_priv->mmaps[i].base != NULL);
- }
- }
-
- if (done && !res)
- {
- vp8_finalize_mmaps(ctx->priv->alg_priv);
- res = ctx->iface->init(ctx, NULL);
- }
-
- return res;
-}
-
static vpx_codec_err_t image2yuvconfig(const vpx_image_t *img,
YV12_BUFFER_CONFIG *yv12)
{
vp8_init, /* vpx_codec_init_fn_t init; */
vp8_destroy, /* vpx_codec_destroy_fn_t destroy; */
vp8_ctf_maps, /* vpx_codec_ctrl_fn_map_t *ctrl_maps; */
- vp8_xma_get_mmap, /* vpx_codec_get_mmap_fn_t get_mmap; */
- vp8_xma_set_mmap, /* vpx_codec_set_mmap_fn_t set_mmap; */
+ NOT_IMPLEMENTED, /* vpx_codec_get_mmap_fn_t get_mmap; */
+ NOT_IMPLEMENTED, /* vpx_codec_set_mmap_fn_t set_mmap; */
{
vp8_peek_si, /* vpx_codec_peek_si_fn_t peek_si; */
vp8_get_si, /* vpx_codec_get_si_fn_t get_si; */
}
}
-static unsigned int zz_motion_search(const MACROBLOCK *x) {
- const MACROBLOCKD *const xd = &x->e_mbd;
- const uint8_t *const src = x->plane[0].src.buf;
- const int src_stride = x->plane[0].src.stride;
- const uint8_t *const ref = xd->plane[0].pre[0].buf;
- const int ref_stride = xd->plane[0].pre[0].stride;
+static unsigned int get_prediction_error(BLOCK_SIZE bsize,
+ const struct buf_2d *src,
+ const struct buf_2d *ref) {
unsigned int sse;
- vp9_variance_fn_t fn = get_block_variance_fn(xd->mi[0]->mbmi.sb_type);
- fn(src, src_stride, ref, ref_stride, &sse);
+ const vp9_variance_fn_t fn = get_block_variance_fn(bsize);
+ fn(src->buf, src->stride, ref->buf, ref->stride, &sse);
return sse;
}
int_mv mv, tmp_mv;
xd->plane[0].pre[0].buf = first_ref_buf->y_buffer + recon_yoffset;
- motion_error = zz_motion_search(x);
+ motion_error = get_prediction_error(bsize, &x->plane[0].src,
+ &xd->plane[0].pre[0]);
// Assume 0,0 motion with no mv overhead.
mv.as_int = tmp_mv.as_int = 0;
int gf_motion_error;
xd->plane[0].pre[0].buf = gld_yv12->y_buffer + recon_yoffset;
- gf_motion_error = zz_motion_search(x);
+ gf_motion_error = get_prediction_error(bsize, &x->plane[0].src,
+ &xd->plane[0].pre[0]);
first_pass_motion_search(cpi, x, &zero_mv, &tmp_mv.as_mv,
&gf_motion_error);
const VP9_COMMON *const cm = &cpi->common;
const VP9_CONFIG *const oxcf = &cpi->oxcf;
RATE_CONTROL *const rc = &cpi->rc;
+ const int qindex = cm->base_qindex;
// Update rate control heuristics
rc->projected_frame_size = (int)(bytes_used << 3);
// Keep a record of last Q and ambient average Q.
if (cm->frame_type == KEY_FRAME) {
- rc->last_q[KEY_FRAME] = cm->base_qindex;
- rc->avg_frame_qindex[KEY_FRAME] = ROUND_POWER_OF_TWO(
- 3 * rc->avg_frame_qindex[KEY_FRAME] + cm->base_qindex, 2);
+ rc->last_q[KEY_FRAME] = qindex;
+ rc->avg_frame_qindex[KEY_FRAME] =
+ ROUND_POWER_OF_TWO(3 * rc->avg_frame_qindex[KEY_FRAME] + qindex, 2);
} else if (!rc->is_src_frame_alt_ref &&
- (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame) &&
- !(cpi->use_svc && oxcf->end_usage == USAGE_STREAM_FROM_SERVER)) {
- rc->last_q[2] = cm->base_qindex;
- rc->avg_frame_qindex[2] = ROUND_POWER_OF_TWO(
- 3 * rc->avg_frame_qindex[2] + cm->base_qindex, 2);
+ (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame) &&
+ !(cpi->use_svc && oxcf->end_usage == USAGE_STREAM_FROM_SERVER)) {
+ rc->last_q[2] = qindex;
+ rc->avg_frame_qindex[2] =
+ ROUND_POWER_OF_TWO(3 * rc->avg_frame_qindex[2] + qindex, 2);
} else {
- rc->last_q[INTER_FRAME] = cm->base_qindex;
- rc->avg_frame_qindex[INTER_FRAME] = ROUND_POWER_OF_TWO(
- 3 * rc->avg_frame_qindex[INTER_FRAME] + cm->base_qindex, 2);
+ rc->last_q[INTER_FRAME] = qindex;
+ rc->avg_frame_qindex[INTER_FRAME] =
+ ROUND_POWER_OF_TWO(3 * rc->avg_frame_qindex[INTER_FRAME] + qindex, 2);
rc->ni_frames++;
- rc->tot_q += vp9_convert_qindex_to_q(cm->base_qindex);
- rc->avg_q = rc->tot_q / (double)rc->ni_frames;
-
+ rc->tot_q += vp9_convert_qindex_to_q(qindex);
+ rc->avg_q = rc->tot_q / rc->ni_frames;
// Calculate the average Q for normal inter frames (not key or GFU frames).
- rc->ni_tot_qi += cm->base_qindex;
+ rc->ni_tot_qi += qindex;
rc->ni_av_qi = rc->ni_tot_qi / rc->ni_frames;
}
// If all mbs in this group are skipped only update if the Q value is
// better than that already stored.
// This is used to help set quality in forced key frames to reduce popping
- if ((cm->base_qindex < rc->last_boosted_qindex) ||
+ if ((qindex < rc->last_boosted_qindex) ||
((cpi->static_mb_pct < 100) &&
((cm->frame_type == KEY_FRAME) || cpi->refresh_alt_ref_frame ||
(cpi->refresh_golden_frame && !rc->is_src_frame_alt_ref)))) {
- rc->last_boosted_qindex = cm->base_qindex;
+ rc->last_boosted_qindex = qindex;
}
update_buffer_level(cpi, rc->projected_frame_size);