#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
rtcd->idct.idct16x16 = vp8_short_idct16x16_c;
#endif
- rtcd->recon.copy16x16 = vp8_copy_mem16x16_c;
rtcd->recon.copy8x8 = vp8_copy_mem8x8_c;
rtcd->recon.avg16x16 = vp8_avg_mem16x16_c;
rtcd->recon.avg8x8 = vp8_avg_mem8x8_c;
#include "vpx_config.h"
#include "vpx/internal/vpx_codec_internal.h"
+#include "vpx_rtcd.h"
#include "loopfilter.h"
#include "entropymv.h"
#include "entropy.h"
#include "arm/recon_arm.h"
#endif
-#ifndef vp8_recon_copy16x16
-#define vp8_recon_copy16x16 vp8_copy_mem16x16_c
-#endif
-extern prototype_copy_block(vp8_recon_copy16x16);
-
#ifndef vp8_recon_copy8x8
#define vp8_recon_copy8x8 vp8_copy_mem8x8_c
#endif
(ymv.as_mv.row & 7) << 1,
dst_y, dst_ystride);
} else {
- RECON_INVOKE(&xd->rtcd->recon, copy16x16)
- (ptr, pre_stride, dst_y, dst_ystride);
+ vp8_copy_mem16x16(ptr, pre_stride, dst_y, dst_ystride);
}
}
}
forward_decls common_forward_decls
+
+
prototype void vp8_filter_block2d_4x4_8 "const unsigned char *src_ptr, const unsigned int src_stride, const short *HFilter_aligned16, const short *VFilter_aligned16, unsigned char *dst_ptr, unsigned int dst_stride"
prototype void vp8_filter_block2d_8x4_8 "const unsigned char *src_ptr, const unsigned int src_stride, const short *HFilter_aligned16, const short *VFilter_aligned16, unsigned char *dst_ptr, unsigned int dst_stride"
prototype void vp8_filter_block2d_8x8_8 "const unsigned char *src_ptr, const unsigned int src_stride, const short *HFilter_aligned16, const short *VFilter_aligned16, unsigned char *dst_ptr, unsigned int dst_stride"
specialize vp8_filter_block2d_8x8_8 sse4_1 sse2
specialize vp8_filter_block2d_16x16_8 sse4_1 sse2
fi
+
+
+#
+# RECON
+#
+prototype void vp8_copy_mem16x16 "unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch"
+specialize vp8_copy_mem16x16 mmx sse2 media neon dspr2
+vp8_copy_mem16x16_media=vp8_copy_mem16x16_v6
+vp8_copy_mem16x16_dspr2=vp8_copy_mem16x16_dspr2
#undef vp8_recon_copy8x4
#define vp8_recon_copy8x4 vp8_copy_mem8x4_mmx
-#undef vp8_recon_copy16x16
-#define vp8_recon_copy16x16 vp8_copy_mem16x16_mmx
-
#endif
#endif
rtcd->recon.recon = vp8_recon_b_mmx;
rtcd->recon.copy8x8 = vp8_copy_mem8x8_mmx;
rtcd->recon.copy8x4 = vp8_copy_mem8x4_mmx;
- rtcd->recon.copy16x16 = vp8_copy_mem16x16_mmx;
/* Disabled due to unsupported enhanced interpolation/high_prec mv
rtcd->subpix.sixtap16x16 = vp8_sixtap_predict16x16_mmx;
if (flags & HAS_SSE2) {
rtcd->recon.recon2 = vp8_recon2b_sse2;
rtcd->recon.recon4 = vp8_recon4b_sse2;
- rtcd->recon.copy16x16 = vp8_copy_mem16x16_sse2;
/* these are disable because of unsupported diagonal pred modes
rtcd->recon.build_intra_predictors_mbuv =
recon_yoffset += 16;
#endif
// Copy current mb to a buffer
- RECON_INVOKE(&xd->rtcd->recon, copy16x16)(x->src.y_buffer,
- x->src.y_stride,
- x->thismb, 16);
+ vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
// measure activity
mb_activity = mb_activity_measure(cpi, x, mb_row, mb_col);
xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset;
// Copy current MB to a work buffer
- RECON_INVOKE(&xd->rtcd->recon, copy16x16)(x->src.y_buffer,
- x->src.y_stride,
- x->thismb, 16);
+ vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
x->rddiv = cpi->RDDIV;
x->rdmult = cpi->RDMULT;
xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset;
#if 0 // FIXME
/* Copy current MB to a work buffer */
- RECON_INVOKE(&xd->rtcd->recon, copy16x16)(x->src.y_buffer,
- x->src.y_stride,
- x->thismb, 16);
+ vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
#endif
x->rddiv = cpi->RDDIV;
x->rdmult = cpi->RDMULT;
xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset;
// Copy current MB to a work buffer
- RECON_INVOKE(&xd->rtcd->recon, copy16x16)(x->src.y_buffer,
- x->src.y_stride,
- x->thismb, 16);
+ vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
if (cpi->oxcf.tuning == VP8_TUNE_SSIM)
vp8_activity_masking(cpi, x);
xd->left_available = (mb_col != 0);
// Copy current mb to a buffer
- RECON_INVOKE(&xd->rtcd->recon, copy16x16)(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
+ vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
// do intra 16x16 prediction
this_error = vp8_encode_intra(cpi, x, use_dc_pred);
xd->subpixel_predict16x16(yptr, stride,
(mv_col & 7) << 1, (mv_row & 7) << 1, &pred[0], 16);
} else {
- RECON_INVOKE(&xd->rtcd->recon, copy16x16)(yptr, stride, &pred[0], 16);
+ vp8_copy_mem16x16(yptr, stride, &pred[0], 16);
}
// U & V