#include "vp8/common/g_common.h"
#include "vp8/common/subpixel.h"
#include "vp8/common/loopfilter.h"
+#include "vp8/common/recon.h"
#include "vp8/common/idct.h"
#include "vp8/common/onyxc_int.h"
rtcd->idct.idct1_scalar_add_8x8 = vp8_dc_only_idct_add_8x8_c;
rtcd->idct.ihaar2 = vp8_short_ihaar2x2_c;
rtcd->idct.idct16x16 = vp8_short_idct16x16_c;
+ rtcd->recon.copy16x16 = vp8_copy_mem16x16_c;
+ rtcd->recon.copy8x8 = vp8_copy_mem8x8_c;
+ rtcd->recon.avg16x16 = vp8_avg_mem16x16_c;
+ rtcd->recon.avg8x8 = vp8_avg_mem8x8_c;
+ rtcd->recon.copy8x4 = vp8_copy_mem8x4_c;
+ rtcd->recon.recon = vp8_recon_b_c;
+ rtcd->recon.recon_uv = vp8_recon_uv_b_c;
+ rtcd->recon.recon2 = vp8_recon2b_c;
+ rtcd->recon.recon4 = vp8_recon4b_c;
+ rtcd->recon.recon_mb = vp8_recon_mb_c;
+ rtcd->recon.recon_mby = vp8_recon_mby_c;
+#if CONFIG_SUPERBLOCKS
+ rtcd->recon.build_intra_predictors_sby_s =
+ vp8_build_intra_predictors_sby_s;
+ rtcd->recon.build_intra_predictors_sbuv_s =
+ vp8_build_intra_predictors_sbuv_s;
+#endif
+ rtcd->recon.build_intra_predictors_mby =
+ vp8_build_intra_predictors_mby;
+#if CONFIG_COMP_INTRA_PRED
+ rtcd->recon.build_comp_intra_predictors_mby =
+ vp8_build_comp_intra_predictors_mby;
+#endif
+ rtcd->recon.build_intra_predictors_mby_s =
+ vp8_build_intra_predictors_mby_s;
+ rtcd->recon.build_intra_predictors_mbuv =
+ vp8_build_intra_predictors_mbuv;
+ rtcd->recon.build_intra_predictors_mbuv_s =
+ vp8_build_intra_predictors_mbuv_s;
+#if CONFIG_COMP_INTRA_PRED
+ rtcd->recon.build_comp_intra_predictors_mbuv =
+ vp8_build_comp_intra_predictors_mbuv;
+#endif
+ rtcd->recon.intra4x4_predict =
+ vp8_intra4x4_predict;
+#if CONFIG_COMP_INTRA_PRED
+ rtcd->recon.comp_intra4x4_predict =
+ vp8_comp_intra4x4_predict;
+#endif
+ rtcd->recon.intra8x8_predict =
+ vp8_intra8x8_predict;
+#if CONFIG_COMP_INTRA_PRED
+ rtcd->recon.comp_intra8x8_predict =
+ vp8_comp_intra8x8_predict;
+#endif
+ rtcd->recon.intra_uv4x4_predict =
+ vp8_intra_uv4x4_predict;
+#if CONFIG_COMP_INTRA_PRED
+ rtcd->recon.comp_intra_uv4x4_predict =
+ vp8_comp_intra_uv4x4_predict;
+#endif
rtcd->subpix.eighttap16x16 = vp8_eighttap_predict16x16_c;
rtcd->subpix.eighttap8x8 = vp8_eighttap_predict8x8_c;
#include "entropy.h"
#include "entropymode.h"
#include "idct.h"
+#include "recon.h"
#if CONFIG_POSTPROC
#include "postproc.h"
#endif
typedef struct VP8_COMMON_RTCD {
#if CONFIG_RUNTIME_CPU_DETECT
vp8_idct_rtcd_vtable_t idct;
+ vp8_recon_rtcd_vtable_t recon;
vp8_subpix_rtcd_vtable_t subpix;
vp8_loopfilter_rtcd_vtable_t loopfilter;
#if CONFIG_POSTPROC
#include "vpx_ports/config.h"
-#include "vpx_rtcd.h"
+#include "recon.h"
#include "blockd.h"
void vp8_recon_b_c
}
#if CONFIG_SUPERBLOCKS
-void vp8_recon_mby_s_c(MACROBLOCKD *xd, uint8_t *dst) {
+void vp8_recon_mby_s_c(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *xd, uint8_t *dst) {
int x, y;
BLOCKD *b = &xd->block[0];
int stride = b->dst_stride;
}
}
-void vp8_recon_mbuv_s_c(MACROBLOCKD *xd, uint8_t *udst, uint8_t *vdst) {
+void vp8_recon_mbuv_s_c(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *xd, uint8_t *udst, uint8_t *vdst) {
int x, y, i;
uint8_t *dst = udst;
}
#endif
-void vp8_recon_mby_c(MACROBLOCKD *xd) {
+void vp8_recon_mby_c(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *xd) {
+#if ARCH_ARM
+ BLOCKD *b = &xd->block[0];
+ RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
+
+ /*b = &xd->block[4];*/
+ b += 4;
+ RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
+
+ /*b = &xd->block[8];*/
+ b += 4;
+ RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
+
+ /*b = &xd->block[12];*/
+ b += 4;
+ RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
+#else
int i;
for (i = 0; i < 16; i += 4) {
BLOCKD *b = &xd->block[i];
- vp8_recon4b(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
+ RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
}
+#endif
}
-void vp8_recon_mb_c(MACROBLOCKD *xd) {
+void vp8_recon_mb_c(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *xd) {
+#if ARCH_ARM
+ BLOCKD *b = &xd->block[0];
+
+ RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
+ b += 4;
+ RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
+ b += 4;
+ RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
+ b += 4;
+ RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
+ b += 4;
+
+ /*b = &xd->block[16];*/
+
+ RECON_INVOKE(rtcd, recon2)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
+ b++;
+ b++;
+ RECON_INVOKE(rtcd, recon2)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
+ b++;
+ b++;
+ RECON_INVOKE(rtcd, recon2)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
+ b++;
+ b++;
+ RECON_INVOKE(rtcd, recon2)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
+#else
int i;
for (i = 0; i < 16; i += 4) {
BLOCKD *b = &xd->block[i];
- vp8_recon4b(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
+ RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
}
for (i = 16; i < 24; i += 2) {
BLOCKD *b = &xd->block[i];
- vp8_recon2b(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
+ RECON_INVOKE(rtcd, recon2)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
}
+#endif
}
--- /dev/null
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#ifndef __INC_RECON_H
+#define __INC_RECON_H
+
+#include "blockd.h"
+
+#define prototype_copy_block(sym) \
+ void sym(unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch)
+
+#define prototype_recon_block(sym) \
+ void sym(unsigned char *pred, short *diff, unsigned char *dst, int pitch)
+
+#define prototype_recon_macroblock(sym) \
+ void sym(const struct vp8_recon_rtcd_vtable *rtcd, MACROBLOCKD *xd)
+
+#define prototype_build_intra_predictors(sym) \
+ void sym(MACROBLOCKD *xd)
+
+#define prototype_intra4x4_predict(sym) \
+ void sym(BLOCKD *x, int b_mode, unsigned char *predictor)
+
+#if CONFIG_COMP_INTRA_PRED
+#define prototype_comp_intra4x4_predict(sym) \
+ void sym(BLOCKD *x, int b_mode, int mode2, unsigned char *predictor)
+#endif
+
+struct vp8_recon_rtcd_vtable;
+
+#if ARCH_X86 || ARCH_X86_64
+#include "x86/recon_x86.h"
+#endif
+
+#if ARCH_ARM
+#include "arm/recon_arm.h"
+#endif
+
+#ifndef vp8_recon_copy8x8
+#define vp8_recon_copy8x8 vp8_copy_mem8x8_c
+#endif
+extern prototype_copy_block(vp8_recon_copy8x8);
+
+#ifndef vp8_recon_avg16x16
+#define vp8_recon_avg16x16 vp8_avg_mem16x16_c
+#endif
+extern prototype_copy_block(vp8_recon_avg16x16);
+
+#ifndef vp8_recon_avg8x8
+#define vp8_recon_avg8x8 vp8_avg_mem8x8_c
+#endif
+extern prototype_copy_block(vp8_recon_avg8x8);
+
+#ifndef vp8_recon_copy8x4
+#define vp8_recon_copy8x4 vp8_copy_mem8x4_c
+#endif
+extern prototype_copy_block(vp8_recon_copy8x4);
+
+#ifndef vp8_recon_recon
+#define vp8_recon_recon vp8_recon_b_c
+#endif
+extern prototype_recon_block(vp8_recon_recon);
+
+#ifndef vp8_recon_recon_uv
+#define vp8_recon_recon_uv vp8_recon_uv_b_c
+#endif
+extern prototype_recon_block(vp8_recon_recon_uv);
+
+extern prototype_recon_block(vp8_recon_recon);
+#ifndef vp8_recon_recon2
+#define vp8_recon_recon2 vp8_recon2b_c
+#endif
+extern prototype_recon_block(vp8_recon_recon2);
+
+#ifndef vp8_recon_recon4
+#define vp8_recon_recon4 vp8_recon4b_c
+#endif
+extern prototype_recon_block(vp8_recon_recon4);
+
+#ifndef vp8_recon_recon_mb
+#define vp8_recon_recon_mb vp8_recon_mb_c
+#endif
+extern prototype_recon_macroblock(vp8_recon_recon_mb);
+
+#ifndef vp8_recon_recon_mby
+#define vp8_recon_recon_mby vp8_recon_mby_c
+#endif
+extern prototype_recon_macroblock(vp8_recon_recon_mby);
+
+#ifndef vp8_recon_build_intra_predictors_sby_s
+#define vp8_recon_build_intra_predictors_sby_s vp8_build_intra_predictors_sby_s
+#endif
+extern prototype_build_intra_predictors(vp8_recon_build_intra_predictors_sby_s);
+
+#ifndef vp8_recon_build_intra_predictors_mby
+#define vp8_recon_build_intra_predictors_mby vp8_build_intra_predictors_mby
+#endif
+extern prototype_build_intra_predictors\
+(vp8_recon_build_intra_predictors_mby);
+
+#if CONFIG_COMP_INTRA_PRED
+#ifndef vp8_recon_build_comp_intra_predictors_mby
+#define vp8_recon_build_comp_intra_predictors_mby vp8_build_comp_intra_predictors_mby
+#endif
+extern prototype_build_intra_predictors\
+(vp8_recon_build_comp_intra_predictors_mby);
+#endif
+
+#ifndef vp8_recon_build_intra8x8_predictors_mby
+#define vp8_recon_build_intra8x8_predictors_mby vp8_build_intra8x8_predictors_mby
+#endif
+extern prototype_build_intra_predictors\
+(vp8_recon_build_intra8x8_predictors_mby);
+
+#ifndef vp8_recon_build_intra_predictors_mby_s
+#define vp8_recon_build_intra_predictors_mby_s vp8_build_intra_predictors_mby_s
+#endif
+extern prototype_build_intra_predictors\
+(vp8_recon_build_intra_predictors_mby_s);
+
+#ifndef vp8_recon_build_intra_predictors_sbuv_s
+#define vp8_recon_build_intra_predictors_sbuv_s vp8_build_intra_predictors_sbuv_s
+#endif
+extern prototype_build_intra_predictors(vp8_recon_build_intra_predictors_sbuv_s);
+
+#ifndef vp8_recon_build_intra_predictors_mbuv
+#define vp8_recon_build_intra_predictors_mbuv vp8_build_intra_predictors_mbuv
+#endif
+extern prototype_build_intra_predictors\
+(vp8_recon_build_intra_predictors_mbuv);
+
+#ifndef vp8_recon_build_intra8x8_predictors_mbuv
+#define vp8_recon_build_intra8x8_predictors_mbuv vp8_build_intra8x8_predictors_mbuv
+#endif
+extern prototype_build_intra_predictors\
+(vp8_recon_build_intra8x8_predictors_mbuv);
+
+#ifndef vp8_recon_build_intra_predictors_mbuv_s
+#define vp8_recon_build_intra_predictors_mbuv_s vp8_build_intra_predictors_mbuv_s
+#endif
+extern prototype_build_intra_predictors\
+(vp8_recon_build_intra_predictors_mbuv_s);
+
+#if CONFIG_COMP_INTRA_PRED
+#ifndef vp8_recon_build_comp_intra_predictors_mbuv
+#define vp8_recon_build_comp_intra_predictors_mbuv vp8_build_comp_intra_predictors_mbuv
+#endif
+extern prototype_build_intra_predictors\
+(vp8_recon_build_comp_intra_predictors_mbuv);
+#endif
+
+#ifndef vp8_recon_intra4x4_predict
+#define vp8_recon_intra4x4_predict vp8_intra4x4_predict
+#endif
+extern prototype_intra4x4_predict\
+(vp8_recon_intra4x4_predict);
+
+#if CONFIG_COMP_INTRA_PRED
+#ifndef vp8_recon_comp_intra4x4_predict
+#define vp8_recon_comp_intra4x4_predict vp8_comp_intra4x4_predict
+#endif
+extern prototype_comp_intra4x4_predict\
+(vp8_recon_comp_intra4x4_predict);
+#endif
+
+#ifndef vp8_recon_intra8x8_predict
+#define vp8_recon_intra8x8_predict vp8_intra8x8_predict
+#endif
+extern prototype_intra4x4_predict\
+(vp8_recon_intra8x8_predict);
+
+#if CONFIG_COMP_INTRA_PRED
+#ifndef vp8_recon_comp_intra8x8_predict
+#define vp8_recon_comp_intra8x8_predict vp8_comp_intra8x8_predict
+#endif
+extern prototype_comp_intra4x4_predict\
+(vp8_recon_comp_intra8x8_predict);
+#endif
+
+#ifndef vp8_recon_intra_uv4x4_predict
+#define vp8_recon_intra_uv4x4_predict vp8_intra_uv4x4_predict
+#endif
+extern prototype_intra4x4_predict\
+(vp8_recon_intra_uv4x4_predict);
+
+#if CONFIG_COMP_INTRA_PRED
+#ifndef vp8_recon_comp_intra_uv4x4_predict
+#define vp8_recon_comp_intra_uv4x4_predict vp8_comp_intra_uv4x4_predict
+#endif
+extern prototype_comp_intra4x4_predict\
+(vp8_recon_comp_intra_uv4x4_predict);
+#endif
+
+typedef prototype_copy_block((*vp8_copy_block_fn_t));
+typedef prototype_recon_block((*vp8_recon_fn_t));
+typedef prototype_recon_macroblock((*vp8_recon_mb_fn_t));
+typedef prototype_build_intra_predictors((*vp8_build_intra_pred_fn_t));
+typedef prototype_intra4x4_predict((*vp8_intra4x4_pred_fn_t));
+#if CONFIG_COMP_INTRA_PRED
+typedef prototype_comp_intra4x4_predict((*vp8_comp_intra4x4_pred_fn_t));
+#endif
+typedef struct vp8_recon_rtcd_vtable {
+ vp8_copy_block_fn_t copy16x16;
+ vp8_copy_block_fn_t copy8x8;
+ vp8_copy_block_fn_t avg16x16;
+ vp8_copy_block_fn_t avg8x8;
+ vp8_copy_block_fn_t copy8x4;
+ vp8_recon_fn_t recon;
+ vp8_recon_fn_t recon_uv;
+ vp8_recon_fn_t recon2;
+ vp8_recon_fn_t recon4;
+ vp8_recon_mb_fn_t recon_mb;
+ vp8_recon_mb_fn_t recon_mby;
+#if CONFIG_SUPERBLOCKS
+ vp8_build_intra_pred_fn_t build_intra_predictors_sby_s;
+#endif
+ vp8_build_intra_pred_fn_t build_intra_predictors_mby_s;
+ vp8_build_intra_pred_fn_t build_intra_predictors_mby;
+#if CONFIG_COMP_INTRA_PRED
+ vp8_build_intra_pred_fn_t build_comp_intra_predictors_mby;
+#endif
+#if CONFIG_SUPERBLOCKS
+ vp8_build_intra_pred_fn_t build_intra_predictors_sbuv_s;
+#endif
+ vp8_build_intra_pred_fn_t build_intra_predictors_mbuv_s;
+ vp8_build_intra_pred_fn_t build_intra_predictors_mbuv;
+#if CONFIG_COMP_INTRA_PRED
+ vp8_build_intra_pred_fn_t build_comp_intra_predictors_mbuv;
+#endif
+ vp8_intra4x4_pred_fn_t intra4x4_predict;
+#if CONFIG_COMP_INTRA_PRED
+ vp8_comp_intra4x4_pred_fn_t comp_intra4x4_predict;
+#endif
+ vp8_intra4x4_pred_fn_t intra8x8_predict;
+#if CONFIG_COMP_INTRA_PRED
+ vp8_comp_intra4x4_pred_fn_t comp_intra8x8_predict;
+#endif
+ vp8_intra4x4_pred_fn_t intra_uv4x4_predict;
+#if CONFIG_COMP_INTRA_PRED
+ vp8_comp_intra4x4_pred_fn_t comp_intra_uv4x4_predict;
+#endif
+} vp8_recon_rtcd_vtable_t;
+
+#if CONFIG_RUNTIME_CPU_DETECT
+#define RECON_INVOKE(ctx,fn) (ctx)->fn
+#else
+#define RECON_INVOKE(ctx,fn) vp8_recon_##fn
+#endif
+
+void vp8_recon_intra_mbuv(const vp8_recon_rtcd_vtable_t *rtcd,
+ MACROBLOCKD *xd);
+
+#if CONFIG_SUPERBLOCKS
+extern void vp8_recon_mby_s_c(const vp8_recon_rtcd_vtable_t *rtcd,
+ MACROBLOCKD *xd, uint8_t *dst);
+extern void vp8_recon_mbuv_s_c(const vp8_recon_rtcd_vtable_t *rtcd,
+ MACROBLOCKD *xd, uint8_t *udst, uint8_t *vdst);
+#endif
+
+#endif
#include "vpx_ports/config.h"
#include "vpx/vpx_integer.h"
+#include "recon.h"
#include "subpixel.h"
#include "blockd.h"
#include "reconinter.h"
xd->subpixel_predict8x8(ptr, d->pre_stride, (mv.as_mv.col & 7) << 1,
(mv.as_mv.row & 7) << 1, pred_ptr, pitch);
} else {
- vp8_copy_mem8x8(ptr, d->pre_stride, pred_ptr, pitch);
+ RECON_INVOKE(&xd->rtcd->recon, copy8x8)
+ (ptr, d->pre_stride, pred_ptr, pitch);
}
}
xd->subpixel_predict_avg8x8(ptr, d->pre_stride, (mv.as_mv.col & 7) << 1,
(mv.as_mv.row & 7) << 1, pred_ptr, pitch);
} else {
- vp8_avg_mem8x8(ptr, d->pre_stride, pred_ptr, pitch);
+ RECON_INVOKE(&xd->rtcd->recon, avg8x8)
+ (ptr, d->pre_stride, pred_ptr, pitch);
}
}
xd->subpixel_predict8x4(ptr, d->pre_stride, (mv.as_mv.col & 7) << 1,
(mv.as_mv.row & 7) << 1, pred_ptr, pitch);
} else {
- vp8_copy_mem8x4(ptr, d->pre_stride, pred_ptr, pitch);
+ RECON_INVOKE(&xd->rtcd->recon, copy8x4)(ptr, d->pre_stride, pred_ptr, pitch);
}
}
xd->subpixel_predict8x8(vptr, pre_stride, _o16x16mv.as_mv.col & 15,
_o16x16mv.as_mv.row & 15, dst_v, dst_uvstride);
} else {
- vp8_copy_mem8x8(uptr, pre_stride, dst_u, dst_uvstride);
- vp8_copy_mem8x8(vptr, pre_stride, dst_v, dst_uvstride);
+ RECON_INVOKE(&xd->rtcd->recon, copy8x8)
+ (uptr, pre_stride, dst_u, dst_uvstride);
+ RECON_INVOKE(&xd->rtcd->recon, copy8x8)
+ (vptr, pre_stride, dst_v, dst_uvstride);
}
}
} else {
// TODO Needs to AVERAGE with the dst_y
// For now, do not apply the prediction filter in these cases!
- vp8_avg_mem16x16(ptr, pre_stride, dst_y, dst_ystride);
+ RECON_INVOKE(&xd->rtcd->recon, avg16x16)(ptr, pre_stride, dst_y,
+ dst_ystride);
}
} else
#endif // CONFIG_PRED_FILTER
xd->subpixel_predict_avg16x16(ptr, pre_stride, (mv_col & 7) << 1,
(mv_row & 7) << 1, dst_y, dst_ystride);
} else {
- vp8_avg_mem16x16(ptr, pre_stride, dst_y, dst_ystride);
+ RECON_INVOKE(&xd->rtcd->recon, avg16x16)(ptr, pre_stride, dst_y,
+ dst_ystride);
}
}
}
} else {
// TODO Needs to AVERAGE with the dst_[u|v]
// For now, do not apply the prediction filter here!
- vp8_avg_mem8x8(pSrc, pre_stride, pDst, dst_uvstride);
+ RECON_INVOKE(&xd->rtcd->recon, avg8x8)(pSrc, pre_stride, pDst,
+ dst_uvstride);
}
// V
xd->subpixel_predict_avg8x8(vptr, pre_stride, omv_col & 15,
omv_row & 15, dst_v, dst_uvstride);
} else {
- vp8_avg_mem8x8(uptr, pre_stride, dst_u, dst_uvstride);
- vp8_avg_mem8x8(vptr, pre_stride, dst_v, dst_uvstride);
+ RECON_INVOKE(&xd->rtcd->recon, avg8x8)(uptr, pre_stride, dst_u, dst_uvstride);
+ RECON_INVOKE(&xd->rtcd->recon, avg8x8)(vptr, pre_stride, dst_v, dst_uvstride);
}
}
#ifndef __INC_RECONINTER_H
#define __INC_RECONINTER_H
+#if CONFIG_RUNTIME_CPU_DETECT
#include "onyxc_int.h"
+#endif
extern void vp8_build_1st_inter16x16_predictors_mby(MACROBLOCKD *xd,
unsigned char *dst_y,
#include <stdio.h>
#include "vpx_ports/config.h"
-#include "vpx_rtcd.h"
+#include "recon.h"
#include "reconintra.h"
#include "vpx_mem/vpx_mem.h"
}
}
-void vp8_recon_intra_mbuv(MACROBLOCKD *xd) {
+void vp8_recon_intra_mbuv(const vp8_recon_rtcd_vtable_t *rtcd,
+ MACROBLOCKD *xd) {
int i;
for (i = 16; i < 24; i += 2) {
BLOCKD *b = &xd->block[i];
- vp8_recon2b(b->predictor, b->diff,*(b->base_dst) + b->dst, b->dst_stride);
+ RECON_INVOKE(rtcd, recon2)(b->predictor, b->diff,
+ *(b->base_dst) + b->dst, b->dst_stride);
}
}
#include "vpx_ports/config.h"
+#include "recon.h"
#include "vpx_mem/vpx_mem.h"
#include "reconintra.h"
-#include "vpx_rtcd.h"
-void vp8_intra4x4_predict_c(BLOCKD *x, int b_mode,
- unsigned char *predictor) {
+void vp8_intra4x4_predict(BLOCKD *x,
+ int b_mode,
+ unsigned char *predictor) {
int i, r, c;
unsigned char *Above = *(x->base_dst) + x->dst - x->dst_stride;
}
#if CONFIG_COMP_INTRA_PRED
-void vp8_comp_intra4x4_predict_c(BLOCKD *x,
+void vp8_comp_intra4x4_predict(BLOCKD *x,
int b_mode, int b_mode2,
unsigned char *out_predictor) {
unsigned char predictor[2][4 * 16];
common_forward_decls() {
cat <<EOF
-#include "vp8/common/blockd.h"
-
-struct loop_filter_info;
-
-/* Encoder forward decls */
-struct variance_vtable;
-union int_mv;
-struct yv12_buffer_config;
+struct blockd;
EOF
}
forward_decls common_forward_decls
+
+
prototype void vp8_filter_block2d_4x4_8 "const unsigned char *src_ptr, const unsigned int src_stride, const short *HFilter_aligned16, const short *VFilter_aligned16, unsigned char *dst_ptr, unsigned int dst_stride"
prototype void vp8_filter_block2d_8x4_8 "const unsigned char *src_ptr, const unsigned int src_stride, const short *HFilter_aligned16, const short *VFilter_aligned16, unsigned char *dst_ptr, unsigned int dst_stride"
prototype void vp8_filter_block2d_8x8_8 "const unsigned char *src_ptr, const unsigned int src_stride, const short *HFilter_aligned16, const short *VFilter_aligned16, unsigned char *dst_ptr, unsigned int dst_stride"
specialize vp8_copy_mem16x16 mmx sse2 media neon dspr2
vp8_copy_mem16x16_media=vp8_copy_mem16x16_v6
vp8_copy_mem16x16_dspr2=vp8_copy_mem16x16_dspr2
-
-
-prototype void vp8_copy_mem8x8 "unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch"
-specialize vp8_copy_mem8x8 mmx media neon dspr2
-vp8_copy_mem8x8_media=vp8_copy_mem8x8_v6
-vp8_copy_mem8x8_dspr2=vp8_copy_mem8x8_dspr2
-prototype void vp8_copy_mem8x4 "unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch"
-prototype void vp8_intra4x4_predict "unsigned char *Above, unsigned char *yleft, int left_stride, B_PREDICTION_MODE b_mode, unsigned char *dst, int dst_stride, unsigned char top_left"
-prototype void vp8_avg_mem16x16 "unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch"
-specialize vp8_avg_mem16x16
-prototype void vp8_avg_mem8x8 "unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch"
-specialize vp8_avg_mem8x8
-prototype void vp8_copy_mem8x4 "unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch"
-specialize vp8_copy_mem8x4 mmx media neon dspr2
-vp8_copy_mem8x4_media=vp8_copy_mem8x4_v6
-vp8_copy_mem8x4_dspr2=vp8_copy_mem8x4_dspr2
-prototype void vp8_recon_b "unsigned char *pred_ptr, short *diff_ptr, unsigned char *dst_ptr, int stride"
-specialize vp8_recon_b
-prototype void vp8_recon_uv_b "unsigned char *pred_ptr, short *diff_ptr, unsigned char *dst_ptr, int stride"
-specialize vp8_recon_uv_b
-prototype void vp8_recon2b "unsigned char *pred_ptr, short *diff_ptr, unsigned char *dst_ptr, int stride"
-specialize vp8_recon2b
-prototype void vp8_recon4b "unsigned char *pred_ptr, short *diff_ptr, unsigned char *dst_ptr, int stride"
-specialize vp8_recon4b
-prototype void vp8_recon_mb "MACROBLOCKD *x"
-specialize vp8_recon_mb
-prototype void vp8_recon_mby "MACROBLOCKD *x"
-specialize vp8_recon_mby
-prototype void vp8_build_intra_predictors_mby_s "MACROBLOCKD *x"
-prototype void vp8_build_intra_predictors_sby_s "MACROBLOCKD *x"
-specialize vp8_build_intra_predictors_sby_s;
-prototype void vp8_build_intra_predictors_sbuv_s "MACROBLOCKD *x"
-specialize vp8_build_intra_predictors_sbuv_s;
-prototype void vp8_build_intra_predictors_mby "MACROBLOCKD *x"
-specialize vp8_build_intra_predictors_mby;
-prototype void vp8_build_comp_intra_predictors_mby "MACROBLOCKD *x"
-specialize vp8_build_comp_intra_predictors_mby;
-prototype void vp8_build_intra_predictors_mby_s "MACROBLOCKD *x"
-specialize vp8_build_intra_predictors_mby_s;
-prototype void vp8_build_intra_predictors_mbuv "MACROBLOCKD *x"
-specialize vp8_build_intra_predictors_mbuv;
-prototype void vp8_build_intra_predictors_mbuv_s "MACROBLOCKD *x"
-specialize vp8_build_intra_predictors_mbuv_s;
-prototype void vp8_build_comp_intra_predictors_mbuv "MACROBLOCKD *x"
-specialize vp8_build_comp_intra_predictors_mbuv;
-prototype void vp8_intra4x4_predict "BLOCKD *x, int b_mode, unsigned char *predictor"
-specialize vp8_intra4x4_predict;
-prototype void vp8_comp_intra4x4_predict "BLOCKD *x, int b_mode, int second_mode, unsigned char *predictor"
-specialize vp8_comp_intra4x4_predict;
-prototype void vp8_intra8x8_predict "BLOCKD *x, int b_mode, unsigned char *predictor"
-specialize vp8_intra8x8_predict;
-prototype void vp8_comp_intra8x8_predict "BLOCKD *x, int b_mode, int second_mode, unsigned char *predictor"
-specialize vp8_comp_intra8x8_predict;
-prototype void vp8_intra_uv4x4_predict "BLOCKD *x, int b_mode, unsigned char *predictor"
-specialize vp8_intra_uv4x4_predict;
-prototype void vp8_comp_intra_uv4x4_predict "BLOCKD *x, int b_mode, int second_mode, unsigned char *predictor"
-specialize vp8_comp_intra_uv4x4_predict;
\ No newline at end of file
*/
#include "vpx_ports/config.h"
+#include "vp8/common/recon.h"
+#include "recon_x86.h"
#include "vpx_mem/vpx_mem.h"
-#include "vp8/common/blockd.h"
#define build_intra_predictors_mbuv_prototype(sym) \
void sym(unsigned char *dst, int dst_stride, \
--- /dev/null
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#ifndef RECON_X86_H
+#define RECON_X86_H
+
+/* Note:
+ *
+ * This platform is commonly built for runtime CPU detection. If you modify
+ * any of the function mappings present in this file, be sure to also update
+ * them in the function pointer initialization code
+ */
+
+#if HAVE_MMX
+extern prototype_recon_block(vp8_recon_b_mmx);
+extern prototype_copy_block(vp8_copy_mem8x8_mmx);
+extern prototype_copy_block(vp8_copy_mem8x4_mmx);
+extern prototype_copy_block(vp8_copy_mem16x16_mmx);
+
+
+#if !CONFIG_RUNTIME_CPU_DETECT
+#undef vp8_recon_recon
+#define vp8_recon_recon vp8_recon_b_mmx
+
+#undef vp8_recon_copy8x8
+#define vp8_recon_copy8x8 vp8_copy_mem8x8_mmx
+
+#undef vp8_recon_copy8x4
+#define vp8_recon_copy8x4 vp8_copy_mem8x4_mmx
+
+#endif
+#endif
+
+#if HAVE_SSE2
+extern prototype_recon_block(vp8_recon2b_sse2);
+extern prototype_recon_block(vp8_recon4b_sse2);
+extern prototype_copy_block(vp8_copy_mem16x16_sse2);
+extern prototype_build_intra_predictors(vp8_build_intra_predictors_mbuv_sse2);
+extern prototype_build_intra_predictors(vp8_build_intra_predictors_mbuv_s_sse2);
+
+#if !CONFIG_RUNTIME_CPU_DETECT
+#undef vp8_recon_recon2
+#define vp8_recon_recon2 vp8_recon2b_sse2
+
+#undef vp8_recon_recon4
+#define vp8_recon_recon4 vp8_recon4b_sse2
+
+#undef vp8_recon_copy16x16
+#define vp8_recon_copy16x16 vp8_copy_mem16x16_sse2
+
+#undef vp8_recon_build_intra_predictors_mbuv
+#define vp8_recon_build_intra_predictors_mbuv vp8_build_intra_predictors_mbuv_sse2
+
+#undef vp8_recon_build_intra_predictors_mbuv_s
+#define vp8_recon_build_intra_predictors_mbuv_s vp8_build_intra_predictors_mbuv_s_sse2
+
+#endif
+#endif
+
+#if HAVE_SSSE3
+extern prototype_build_intra_predictors(vp8_build_intra_predictors_mbuv_ssse3);
+extern prototype_build_intra_predictors(vp8_build_intra_predictors_mbuv_s_ssse3);
+
+#if !CONFIG_RUNTIME_CPU_DETECT
+#undef vp8_recon_build_intra_predictors_mbuv
+#define vp8_recon_build_intra_predictors_mbuv vp8_build_intra_predictors_mbuv_ssse3
+
+#undef vp8_recon_build_intra_predictors_mbuv_s
+#define vp8_recon_build_intra_predictors_mbuv_s vp8_build_intra_predictors_mbuv_s_ssse3
+
+#endif
+#endif
+#endif
#include "vp8/common/g_common.h"
#include "vp8/common/subpixel.h"
#include "vp8/common/loopfilter.h"
+#include "vp8/common/recon.h"
#include "vp8/common/idct.h"
#include "vp8/common/pragmas.h"
#include "vp8/common/onyxc_int.h"
// rtcd->idct.iwalsh16 = vp8_short_inv_walsh4x4_mmx;
// rtcd->idct.iwalsh1 = vp8_short_inv_walsh4x4_1_mmx;
+ rtcd->recon.recon = vp8_recon_b_mmx;
+ rtcd->recon.copy8x8 = vp8_copy_mem8x8_mmx;
+ rtcd->recon.copy8x4 = vp8_copy_mem8x4_mmx;
+
/* Disabled due to unsupported enhanced interpolation/high_prec mv
rtcd->subpix.sixtap16x16 = vp8_sixtap_predict16x16_mmx;
rtcd->subpix.sixtap8x8 = vp8_sixtap_predict8x8_mmx;
#if HAVE_SSE2
if (flags & HAS_SSE2) {
+ rtcd->recon.recon2 = vp8_recon2b_sse2;
+ rtcd->recon.recon4 = vp8_recon4b_sse2;
+ /* these are disable because of unsupported diagonal pred modes
+ rtcd->recon.build_intra_predictors_mbuv =
+ vp8_build_intra_predictors_mbuv_sse2;
+ rtcd->recon.build_intra_predictors_mbuv_s =
+ vp8_build_intra_predictors_mbuv_s_sse2;
+ */
// rtcd->idct.iwalsh16 = vp8_short_inv_walsh4x4_sse2;
#include "vp8/common/header.h"
#include "vp8/common/reconintra.h"
#include "vp8/common/reconintra4x4.h"
+#include "vp8/common/recon.h"
#include "vp8/common/reconinter.h"
#include "dequantize.h"
#include "detokenize.h"
#include "vp8/common/seg_common.h"
#include "vp8/common/entropy.h"
-#include "vpx_rtcd.h"
#include <assert.h>
#include <stdio.h>
if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) {
#if CONFIG_SUPERBLOCKS
if (xd->mode_info_context->mbmi.encoded_as_sb) {
- vp8_build_intra_predictors_sbuv_s(xd);
- vp8_build_intra_predictors_sby_s(xd);
+ RECON_INVOKE(&pbi->common.rtcd.recon, build_intra_predictors_sbuv_s)(xd);
+ RECON_INVOKE(&pbi->common.rtcd.recon,
+ build_intra_predictors_sby_s)(xd);
} else {
#endif
- vp8_build_intra_predictors_mbuv_s(xd);
- vp8_build_intra_predictors_mby_s(xd);
+ RECON_INVOKE(&pbi->common.rtcd.recon, build_intra_predictors_mbuv_s)(xd);
+ RECON_INVOKE(&pbi->common.rtcd.recon,
+ build_intra_predictors_mby_s)(xd);
#if CONFIG_SUPERBLOCKS
}
#endif
if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) {
#if CONFIG_SUPERBLOCKS
if (xd->mode_info_context->mbmi.encoded_as_sb) {
- vp8_build_intra_predictors_sby_s(xd);
- vp8_build_intra_predictors_sbuv_s(xd);
+ RECON_INVOKE(&pbi->common.rtcd.recon, build_intra_predictors_sby_s)(xd);
+ RECON_INVOKE(&pbi->common.rtcd.recon, build_intra_predictors_sbuv_s)(xd);
} else
#endif
if (mode != I8X8_PRED) {
- vp8_build_intra_predictors_mbuv(xd);
+ RECON_INVOKE(&pbi->common.rtcd.recon, build_intra_predictors_mbuv)(xd);
if (mode != B_PRED) {
- vp8_build_intra_predictors_mby(xd);
+ RECON_INVOKE(&pbi->common.rtcd.recon,
+ build_intra_predictors_mby)(xd);
}
#if 0
// Intra-modes requiring recon data from top-right
b = &xd->block[ib];
i8x8mode = b->bmi.as_mode.first;
- vp8_intra8x8_predict(b, i8x8mode, b->predictor);
+ RECON_INVOKE(RTCD_VTABLE(recon), intra8x8_predict)(b, i8x8mode,
+ b->predictor);
if (xd->mode_info_context->mbmi.txfm_size == TX_8X8) {
#if CONFIG_HYBRIDTRANSFORM8X8
}
b = &xd->block[16 + i];
- vp8_intra_uv4x4_predict(b, i8x8mode, b->predictor);
+ RECON_INVOKE(RTCD_VTABLE(recon), intra_uv4x4_predict)(b, i8x8mode,
+ b->predictor);
DEQUANT_INVOKE(&pbi->dequant, idct_add)(b->qcoeff, b->dequant,
b->predictor,
*(b->base_dst) + b->dst, 8,
b->dst_stride);
b = &xd->block[20 + i];
- vp8_intra_uv4x4_predict(b, i8x8mode, b->predictor);
+ RECON_INVOKE(RTCD_VTABLE(recon), intra_uv4x4_predict)(b, i8x8mode,
+ b->predictor);
DEQUANT_INVOKE(&pbi->dequant, idct_add)(b->qcoeff, b->dequant,
b->predictor,
*(b->base_dst) + b->dst, 8,
if (b_mode2 == (B_PREDICTION_MODE)(B_DC_PRED - 1)) {
#endif
- vp8_intra4x4_predict(b, b_mode, b->predictor);
+ RECON_INVOKE(RTCD_VTABLE(recon), intra4x4_predict)
+ (b, b_mode, b->predictor);
#if CONFIG_COMP_INTRA_PRED
} else {
- vp8_comp_intra4x4_predict(b, b_mode, b_mode2, b->predictor);
+ RECON_INVOKE(RTCD_VTABLE(recon), comp_intra4x4_predict)
+ (b, b_mode, b_mode2, b->predictor);
}
#endif
#include "vpx_ports/config.h"
+#include "vp8/common/recon.h"
#include "vp8/common/reconintra.h"
#include "vpx_mem/vpx_mem.h"
#include "onyxd_int.h"
extern prototype_fdct(vp8_fdct_short8x8);
#ifndef vp8_fhaar_short2x2
-#define vp8_fdct_haar_short2x2 vp8_fhaar_short2x2
#define vp8_fhaar_short2x2 vp8_short_fhaar2x2_c
#endif
extern prototype_fdct(vp8_fhaar_short2x2);
#include "vp8/common/findnearmv.h"
#include "vp8/common/reconintra.h"
#include "vp8/common/seg_common.h"
-#include "vpx_rtcd.h"
#include <stdio.h>
#include <math.h>
#include <limits.h>
vp8_update_zbin_extra(cpi, x);
}
- vp8_build_intra_predictors_sby_s(&x->e_mbd);
- vp8_build_intra_predictors_sbuv_s(&x->e_mbd);
+ RECON_INVOKE(&rtcd->common->recon, build_intra_predictors_sby_s)(&x->e_mbd);
+ RECON_INVOKE(&rtcd->common->recon, build_intra_predictors_sbuv_s)(&x->e_mbd);
assert(x->e_mbd.mode_info_context->mbmi.txfm_size == TX_8X8);
for (n = 0; n < 4; n++)
}
vp8_inverse_transform_mby_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
vp8_inverse_transform_mbuv_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
- vp8_recon_mby_s_c(&x->e_mbd, dst + x_idx * 16 + y_idx * 16 * dst_y_stride);
- vp8_recon_mbuv_s_c(&x->e_mbd,
+ vp8_recon_mby_s_c(IF_RTCD(&rtcd->common->recon), &x->e_mbd,
+ dst + x_idx * 16 + y_idx * 16 * dst_y_stride);
+ vp8_recon_mbuv_s_c(IF_RTCD(&rtcd->common->recon), &x->e_mbd,
udst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
vdst + x_idx * 8 + y_idx * 8 * dst_uv_stride);
set_pred_flag(xd, PRED_REF, ref_pred_flag);
if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) {
- vp8_build_intra_predictors_sby_s(&x->e_mbd);
- vp8_build_intra_predictors_sbuv_s(&x->e_mbd);
+ RECON_INVOKE(&rtcd->common->recon, build_intra_predictors_sby_s)(&x->e_mbd);
+ RECON_INVOKE(&rtcd->common->recon, build_intra_predictors_sbuv_s)(&x->e_mbd);
} else {
int ref_fb_idx;
}
vp8_inverse_transform_mby_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
vp8_inverse_transform_mbuv_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
- vp8_recon_mby_s_c( &x->e_mbd,
+ vp8_recon_mby_s_c(IF_RTCD(&rtcd->common->recon), &x->e_mbd,
dst + x_idx * 16 + y_idx * 16 * dst_y_stride);
- vp8_recon_mbuv_s_c(&x->e_mbd,
+ vp8_recon_mbuv_s_c(IF_RTCD(&rtcd->common->recon), &x->e_mbd,
udst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
vdst + x_idx * 8 + y_idx * 8 * dst_uv_stride);
#include "vp8/common/reconintra4x4.h"
#include "encodemb.h"
#include "vp8/common/invtrans.h"
+#include "vp8/common/recon.h"
#include "dct.h"
#include "vp8/common/g_common.h"
#include "encodeintra.h"
-#include "vpx_rtcd.h"
#if CONFIG_RUNTIME_CPU_DETECT
#if CONFIG_COMP_INTRA_PRED
if (b->bmi.as_mode.second == (B_PREDICTION_MODE)(B_DC_PRED - 1)) {
#endif
- vp8_intra4x4_predict(b, b->bmi.as_mode.first, b->predictor);
+ RECON_INVOKE(&rtcd->common->recon, intra4x4_predict)
+ (b, b->bmi.as_mode.first, b->predictor);
#if CONFIG_COMP_INTRA_PRED
} else {
- vp8_comp_intra4x4_predict(b, b->bmi.as_mode.first, b->bmi.as_mode.second,
- b->predictor);
+ RECON_INVOKE(&rtcd->common->recon, comp_intra4x4_predict)
+ (b, b->bmi.as_mode.first, b->bmi.as_mode.second, b->predictor);
}
#endif
vp8_inverse_transform_b(IF_RTCD(&rtcd->common->idct), b, 32);
#endif
- vp8_recon_b(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
+ RECON_INVOKE(&rtcd->common->recon, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
}
void vp8_encode_intra4x4mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *mb) {
#if CONFIG_COMP_INTRA_PRED
if (x->e_mbd.mode_info_context->mbmi.second_mode == (MB_PREDICTION_MODE)(DC_PRED - 1))
#endif
- vp8_build_intra_predictors_mby(&x->e_mbd);
+ RECON_INVOKE(&rtcd->common->recon, build_intra_predictors_mby)(&x->e_mbd);
#if CONFIG_COMP_INTRA_PRED
else
- vp8_build_comp_intra_predictors_mby(&x->e_mbd);
+ RECON_INVOKE(&rtcd->common->recon, build_comp_intra_predictors_mby)(&x->e_mbd);
#endif
ENCODEMB_INVOKE(&rtcd->encodemb, submby)(x->src_diff, *(b->base_src), x->e_mbd.predictor, b->src_stride);
else
vp8_inverse_transform_mby(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
- vp8_recon_mby(&x->e_mbd);
+ RECON_INVOKE(&rtcd->common->recon, recon_mby)
+ (IF_RTCD(&rtcd->common->recon), &x->e_mbd);
}
#if CONFIG_COMP_INTRA_PRED
if (x->e_mbd.mode_info_context->mbmi.second_uv_mode == (MB_PREDICTION_MODE)(DC_PRED - 1)) {
#endif
- vp8_build_intra_predictors_mbuv(&x->e_mbd);
+ RECON_INVOKE(&rtcd->common->recon, build_intra_predictors_mbuv)(&x->e_mbd);
#if CONFIG_COMP_INTRA_PRED
} else {
- vp8_build_comp_intra_predictors_mbuv(&x->e_mbd);
+ RECON_INVOKE(&rtcd->common->recon, build_comp_intra_predictors_mbuv)(&x->e_mbd);
}
#endif
else
vp8_inverse_transform_mbuv(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
- vp8_recon_intra_mbuv(&x->e_mbd);
+ vp8_recon_intra_mbuv(IF_RTCD(&rtcd->common->recon), &x->e_mbd);
}
void vp8_encode_intra8x8(const VP8_ENCODER_RTCD *rtcd,
#if CONFIG_COMP_INTRA_PRED
if (b->bmi.as_mode.second == (MB_PREDICTION_MODE)(DC_PRED - 1)) {
#endif
- vp8_intra8x8_predict(b, b->bmi.as_mode.first, b->predictor);
+ RECON_INVOKE(&rtcd->common->recon, intra8x8_predict)
+ (b, b->bmi.as_mode.first, b->predictor);
#if CONFIG_COMP_INTRA_PRED
} else {
- vp8_comp_intra8x8_predict(b, b->bmi.as_mode.first, b->bmi.as_mode.second,
- b->predictor);
+ RECON_INVOKE(&rtcd->common->recon, comp_intra8x8_predict)
+ (b, b->bmi.as_mode.first, b->bmi.as_mode.second, b->predictor);
}
#endif
#if CONFIG_COMP_INTRA_PRED
if (second == -1) {
#endif
- vp8_intra_uv4x4_predict(b, mode, b->predictor);
+ RECON_INVOKE(&rtcd->common->recon, intra_uv4x4_predict)
+ (b, mode, b->predictor);
#if CONFIG_COMP_INTRA_PRED
} else {
- vp8_comp_intra_uv4x4_predict(b, mode, second, b->predictor);
+ RECON_INVOKE(&rtcd->common->recon, comp_intra_uv4x4_predict)
+ (b, mode, second, b->predictor);
}
#endif
vp8_inverse_transform_b(IF_RTCD(&rtcd->common->idct), b, 16);
- vp8_recon_uv_b_c(b->predictor,b->diff, *(b->base_dst) + b->dst, b->dst_stride);
+ RECON_INVOKE(&rtcd->common->recon, recon_uv)(b->predictor,
+ b->diff, *(b->base_dst) + b->dst, b->dst_stride);
}
#include "quantize.h"
#include "tokenize.h"
#include "vp8/common/invtrans.h"
+#include "vp8/common/recon.h"
#include "vp8/common/reconintra.h"
#include "dct.h"
#include "vpx_mem/vpx_mem.h"
#endif
}
- vp8_recon_mb(&x->e_mbd);
+ RECON_INVOKE(&rtcd->common->recon, recon_mb)
+ (IF_RTCD(&rtcd->common->recon), &x->e_mbd);
#ifdef ENC_DEBUG
if (enc_debug) {
int i, j, k;
else
vp8_inverse_transform_mby(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
- vp8_recon_mby(&x->e_mbd);
+ RECON_INVOKE(&rtcd->common->recon, recon_mby)
+ (IF_RTCD(&rtcd->common->recon), &x->e_mbd);
}
cpi->rtcd.search.refining_search = vp8_refining_search_sad;
cpi->rtcd.search.diamond_search = vp8_diamond_search_sad;
cpi->rtcd.temporal.apply = vp8_temporal_filter_apply_c;
- cpi->rtcd.variance.satd16x16 = vp8_satd16x16_c;
- cpi->rtcd.fdct.short4x4 = vp8_short_fdct4x4_c;
- cpi->rtcd.fdct.short8x4 = vp8_short_fdct8x4_c;
- cpi->rtcd.fdct.fast4x4 = vp8_short_fdct4x4_c;
- cpi->rtcd.fdct.fast8x4 = vp8_short_fdct8x4_c;
- cpi->rtcd.fdct.walsh_short4x4 = vp8_short_walsh4x4_c;
#if CONFIG_INTERNAL_STATS
cpi->rtcd.variance.ssimpf_8x8 = vp8_ssim_parms_8x8_c;
cpi->rtcd.variance.ssimpf_16x16 = vp8_ssim_parms_16x16_c;
#endif
#endif
+ cpi->rtcd.variance.satd16x16 = vp8_satd16x16_c;
vp8_yv12_copy_partial_frame_ptr = vp8_yv12_copy_partial_frame;
#if ARCH_X86 || ARCH_X86_64
vp8_arch_arm_encoder_init(cpi);
#endif
+ cpi->rtcd.fdct.short4x4 = vp8_short_fdct4x4_c;
+ cpi->rtcd.fdct.short8x4 = vp8_short_fdct8x4_c;
+ cpi->rtcd.fdct.fast4x4 = vp8_short_fdct4x4_c;
+ cpi->rtcd.fdct.fast8x4 = vp8_short_fdct8x4_c;
+ cpi->rtcd.fdct.walsh_short4x4 = vp8_short_walsh4x4_c;
}
unsigned int err;
xd->mode_info_context->mbmi.mode = mode;
- vp8_build_intra_predictors_mby(xd);
+ RECON_INVOKE(&cpi->rtcd.common->recon, build_intra_predictors_mby)(xd);
// VARIANCE_INVOKE(&cpi->rtcd.variance, satd16x16)
err = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x16)
(xd->predictor, 16,
{
double frame_psnr2, frame_ssim2 = 0;
double weight = 0;
-#if CONFIG_POSTPROC
+
vp8_deblock(cm->frame_to_show, &cm->post_proc_buffer, cm->filter_level * 10 / 6, 1, 0, IF_RTCD(&cm->rtcd.postproc));
-#endif
vp8_clear_system_state();
ye = calc_plane_error(orig->y_buffer, orig->y_stride,
#if CONFIG_COMP_INTRA_PRED
if (mode2 == (B_PREDICTION_MODE)(B_DC_PRED - 1)) {
#endif
- vp8_intra4x4_predict(b, mode, b->predictor);
+ RECON_INVOKE(&cpi->rtcd.common->recon, intra4x4_predict)
+ (b, mode, b->predictor);
#if CONFIG_COMP_INTRA_PRED
} else {
- vp8_comp_intra4x4_predict(b, mode, mode2, b->predictor);
+ RECON_INVOKE(&cpi->rtcd.common->recon, comp_intra4x4_predict)
+ (b, mode, mode2, b->predictor);
rate += bmode_costs[mode2];
}
#endif
IDCT_INVOKE(IF_RTCD(&cpi->rtcd.common->idct), idct16)(best_dqcoeff, b->diff, 32);
#endif
- vp8_recon_b(best_predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
+ RECON_INVOKE(IF_RTCD(&cpi->rtcd.common->recon), recon)(best_predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
return best_rd;
}
/* Y Search for 32x32 intra prediction mode */
for (mode = DC_PRED; mode <= TM_PRED; mode++) {
x->e_mbd.mode_info_context->mbmi.mode = mode;
- vp8_build_intra_predictors_sby_s(&x->e_mbd);
+ RECON_INVOKE(&cpi->common.rtcd.recon,
+ build_intra_predictors_sby_s)(&x->e_mbd);
super_block_yrd_8x8(x, &this_rate_tokenonly,
&this_distortion, IF_RTCD(&cpi->rtcd), &s);
mbmi->second_mode = mode2;
if (mode2 == (MB_PREDICTION_MODE)(DC_PRED - 1)) {
#endif
- vp8_build_intra_predictors_mby(&x->e_mbd);
+ RECON_INVOKE(&cpi->common.rtcd.recon, build_intra_predictors_mby)
+ (&x->e_mbd);
#if CONFIG_COMP_INTRA_PRED
} else {
continue; // i.e. disable for now
- vp8_build_comp_intra_predictors_mby(&x->e_mbd);
+ RECON_INVOKE(&cpi->common.rtcd.recon, build_comp_intra_predictors_mby)
+ (&x->e_mbd);
}
#endif
#if CONFIG_COMP_INTRA_PRED
if (mode2 == (MB_PREDICTION_MODE)(DC_PRED - 1)) {
#endif
- vp8_intra8x8_predict(b, mode, b->predictor);
+ RECON_INVOKE(&cpi->rtcd.common->recon, intra8x8_predict)
+ (b, mode, b->predictor);
#if CONFIG_COMP_INTRA_PRED
} else {
continue; // i.e. disable for now
- vp8_comp_intra8x8_predict(b, mode, mode2, b->predictor);
+ RECON_INVOKE(&cpi->rtcd.common->recon, comp_intra8x8_predict)
+ (b, mode, mode2, b->predictor);
}
#endif
mbmi->second_uv_mode = mode2;
if (mode2 == (MB_PREDICTION_MODE)(DC_PRED - 1)) {
#endif
- vp8_build_intra_predictors_mbuv(&x->e_mbd);
+ RECON_INVOKE(&cpi->rtcd.common->recon, build_intra_predictors_mbuv)
+ (&x->e_mbd);
#if CONFIG_COMP_INTRA_PRED
} else {
continue;
- vp8_build_comp_intra_predictors_mbuv(&x->e_mbd);
+ RECON_INVOKE(&cpi->rtcd.common->recon, build_comp_intra_predictors_mbuv)
+ (&x->e_mbd);
}
#endif
int64_t this_rd;
mbmi->uv_mode = mode;
- vp8_build_intra_predictors_mbuv(&x->e_mbd);
+ RECON_INVOKE(&cpi->rtcd.common->recon, build_intra_predictors_mbuv)
+ (&x->e_mbd);
ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), submbuv)(x->src_diff,
x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor,
x->src.uv_stride);
for (mode = DC_PRED; mode <= TM_PRED; mode++) {
x->e_mbd.mode_info_context->mbmi.uv_mode = mode;
- vp8_build_intra_predictors_sbuv_s(&x->e_mbd);
+ RECON_INVOKE(&cpi->rtcd.common->recon,
+ build_intra_predictors_sbuv_s)(&x->e_mbd);
super_block_uvrd_8x8(x, &this_rate_tokenonly,
&this_distortion, IF_RTCD(&cpi->rtcd), &s);
case D63_PRED:
mbmi->ref_frame = INTRA_FRAME;
// FIXME compound intra prediction
- vp8_build_intra_predictors_mby(&x->e_mbd);
+ RECON_INVOKE(&cpi->common.rtcd.recon, build_intra_predictors_mby)
+ (&x->e_mbd);
macro_block_yrd(cpi, x, &rate_y, &distortion, &skippable, txfm_cache);
#if CONFIG_HYBRIDTRANSFORM16X16
rd_txtype = x->e_mbd.block[0].bmi.as_mode.tx_type;
(omv_col & 15), (omv_row & 15), &pred[320], 8);
}
else {
- vp8_copy_mem8x8(uptr, stride, &pred[256], 8);
- vp8_copy_mem8x8(vptr, stride, &pred[320], 8);
+ RECON_INVOKE(&xd->rtcd->recon, copy8x8)(uptr, stride, &pred[256], 8);
+ RECON_INVOKE(&xd->rtcd->recon, copy8x8)(vptr, stride, &pred[320], 8);
}
}
void vp8_temporal_filter_apply_c
VP8_CX_SRCS-yes += encoder/tokenize.c
VP8_CX_SRCS-yes += encoder/treewriter.c
VP8_CX_SRCS-yes += encoder/variance_c.c
-ifeq ($(CONFIG_POSTPROC),yes)
VP8_CX_SRCS-$(CONFIG_INTERNAL_STATS) += common/postproc.h
VP8_CX_SRCS-$(CONFIG_INTERNAL_STATS) += common/postproc.c
-endif
VP8_CX_SRCS-yes += encoder/temporal_filter.c
VP8_CX_SRCS-yes += encoder/temporal_filter.h
VP8_CX_SRCS-yes += encoder/mbgraph.c