]> granicus.if.org Git - libvpx/commitdiff
RTCD: add variance functions
authorJohn Koleszar <jkoleszar@google.com>
Fri, 13 Jan 2012 00:55:44 +0000 (16:55 -0800)
committerJohn Koleszar <jkoleszar@google.com>
Mon, 30 Jan 2012 20:08:30 +0000 (12:08 -0800)
This commit continues the process of converting to the new RTCD
system.

Change-Id: Ie5c1aa480637e98dc3918fb562ff45c37a66c538

20 files changed:
vp8/common/postproc.c
vp8/common/rtcd_defs.sh
vp8/encoder/arm/arm_csystemdependent.c
vp8/encoder/arm/variance_arm.c
vp8/encoder/arm/variance_arm.h [deleted file]
vp8/encoder/encodeframe.c
vp8/encoder/encodeintra.c
vp8/encoder/firstpass.c
vp8/encoder/generic/csystemdependent.c
vp8/encoder/onyx_if.c
vp8/encoder/onyx_int.h
vp8/encoder/pickinter.c
vp8/encoder/picklpf.c
vp8/encoder/rdopt.c
vp8/encoder/ssim.c
vp8/encoder/variance.h
vp8/encoder/x86/variance_x86.h [deleted file]
vp8/encoder/x86/x86_csystemdependent.c
vp8/vp8cx.mk
vp8/vp8cx_arm.mk

index 8e2f683ae308540862535d589b241dc1fd50c224..0dc14b8bdea04ae85d06e24202293cc2009a81b6 100644 (file)
@@ -728,18 +728,18 @@ static void multiframe_quality_enhance_block
     unsigned int act, sse, sad, thr;
     if (blksize == 16)
     {
-        act = (vp8_variance_var16x16(yd, yd_stride, VP8_ZEROS, 0, &sse)+128)>>8;
-        sad = (vp8_variance_sad16x16(y, y_stride, yd, yd_stride, 0)+128)>>8;
+        act = (vp8_variance16x16(yd, yd_stride, VP8_ZEROS, 0, &sse)+128)>>8;
+        sad = (vp8_sad16x16(y, y_stride, yd, yd_stride, 0)+128)>>8;
     }
     else if (blksize == 8)
     {
-        act = (vp8_variance_var8x8(yd, yd_stride, VP8_ZEROS, 0, &sse)+32)>>6;
-        sad = (vp8_variance_sad8x8(y, y_stride, yd, yd_stride, 0)+32)>>6;
+        act = (vp8_variance8x8(yd, yd_stride, VP8_ZEROS, 0, &sse)+32)>>6;
+        sad = (vp8_sad8x8(y, y_stride, yd, yd_stride, 0)+32)>>6;
     }
     else
     {
-        act = (vp8_variance_var4x4(yd, yd_stride, VP8_ZEROS, 0, &sse)+8)>>4;
-        sad = (vp8_variance_sad4x4(y, y_stride, yd, yd_stride, 0)+8)>>4;
+        act = (vp8_variance4x4(yd, yd_stride, VP8_ZEROS, 0, &sse)+8)>>4;
+        sad = (vp8_sad4x4(y, y_stride, yd, yd_stride, 0)+8)>>4;
     }
     /* thr = qdiff/8 + log2(act) + log4(qprev) */
     thr = (qdiff>>3);
index 62030f7c3be629831aaf001a07ddba3762da62f7..60af88801c080a826eb5de5a865003ff7d96de28 100644 (file)
@@ -195,3 +195,202 @@ vp8_bilinear_predict8x4_media=vp8_bilinear_predict8x4_armv6
 prototype void vp8_bilinear_predict4x4 "unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch"
 specialize vp8_bilinear_predict4x4 mmx media neon
 vp8_bilinear_predict4x4_media=vp8_bilinear_predict4x4_armv6
+
+#
+# Encoder functions below this point.
+#
+if [ "$CONFIG_VP8_ENCODER" = "yes" ]; then
+
+#
+# Whole-pixel Variance
+#
+prototype unsigned int vp8_variance4x4 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int  ref_stride, unsigned int *sse"
+specialize vp8_variance4x4 mmx sse2
+vp8_variance4x4_sse2=vp8_variance4x4_wmt
+
+prototype unsigned int vp8_variance8x8 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int  ref_stride, unsigned int *sse"
+specialize vp8_variance8x8 mmx sse2 media neon
+vp8_variance8x8_sse2=vp8_variance8x8_wmt
+vp8_variance8x8_media=vp8_variance8x8_armv6
+
+prototype unsigned int vp8_variance8x16 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int  ref_stride, unsigned int *sse"
+specialize vp8_variance8x16 mmx sse2 neon
+vp8_variance8x16_sse2=vp8_variance8x16_wmt
+
+prototype unsigned int vp8_variance16x8 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int  ref_stride, unsigned int *sse"
+specialize vp8_variance16x8 mmx sse2 neon
+vp8_variance16x8_sse2=vp8_variance16x8_wmt
+
+prototype unsigned int vp8_variance16x16 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int  ref_stride, unsigned int *sse"
+specialize vp8_variance16x16 mmx sse2 media neon
+vp8_variance16x16_sse2=vp8_variance16x16_wmt
+vp8_variance16x16_media=vp8_variance16x16_armv6
+
+#
+# Sub-pixel Variance
+#
+prototype unsigned int vp8_sub_pixel_variance4x4 "const unsigned char  *src_ptr, int  source_stride, int  xoffset, int  yoffset, const unsigned char *ref_ptr, int Refstride, unsigned int *sse"
+specialize vp8_sub_pixel_variance4x4 mmx sse2
+vp8_sub_pixel_variance4x4_sse2=vp8_sub_pixel_variance4x4_wmt
+
+prototype unsigned int vp8_sub_pixel_variance8x8 "const unsigned char  *src_ptr, int  source_stride, int  xoffset, int  yoffset, const unsigned char *ref_ptr, int Refstride, unsigned int *sse"
+specialize vp8_sub_pixel_variance8x8 mmx sse2 media neon
+vp8_sub_pixel_variance8x8_sse2=vp8_sub_pixel_variance8x8_wmt
+vp8_sub_pixel_variance8x8_media=vp8_sub_pixel_variance8x8_armv6
+
+prototype unsigned int vp8_sub_pixel_variance8x16 "const unsigned char  *src_ptr, int  source_stride, int  xoffset, int  yoffset, const unsigned char *ref_ptr, int Refstride, unsigned int *sse"
+specialize vp8_sub_pixel_variance8x16 mmx sse2
+vp8_sub_pixel_variance8x16_sse2=vp8_sub_pixel_variance8x16_wmt
+
+prototype unsigned int vp8_sub_pixel_variance16x8 "const unsigned char  *src_ptr, int  source_stride, int  xoffset, int  yoffset, const unsigned char *ref_ptr, int Refstride, unsigned int *sse"
+specialize vp8_sub_pixel_variance16x8 mmx sse2 ssse3
+vp8_sub_pixel_variance16x8_sse2=vp8_sub_pixel_variance16x8_wmt
+
+prototype unsigned int vp8_sub_pixel_variance16x16 "const unsigned char  *src_ptr, int  source_stride, int  xoffset, int  yoffset, const unsigned char *ref_ptr, int Refstride, unsigned int *sse"
+specialize vp8_sub_pixel_variance16x16 mmx sse2 ssse3 media neon
+vp8_sub_pixel_variance16x16_sse2=vp8_sub_pixel_variance16x16_wmt
+vp8_sub_pixel_variance16x16_media=vp8_sub_pixel_variance16x16_armv6
+
+prototype unsigned int vp8_variance_halfpixvar16x16_h "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int  ref_stride, unsigned int *sse"
+specialize vp8_variance_halfpixvar16x16_h mmx sse2 media neon
+vp8_variance_halfpixvar16x16_h_sse2=vp8_variance_halfpixvar16x16_h_wmt
+vp8_variance_halfpixvar16x16_h_media=vp8_variance_halfpixvar16x16_h_armv6
+
+prototype unsigned int vp8_variance_halfpixvar16x16_v "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int  ref_stride, unsigned int *sse"
+specialize vp8_variance_halfpixvar16x16_v mmx sse2 media neon
+vp8_variance_halfpixvar16x16_v_sse2=vp8_variance_halfpixvar16x16_v_wmt
+vp8_variance_halfpixvar16x16_v_media=vp8_variance_halfpixvar16x16_v_armv6
+
+prototype unsigned int vp8_variance_halfpixvar16x16_hv "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int  ref_stride, unsigned int *sse"
+specialize vp8_variance_halfpixvar16x16_hv mmx sse2 media neon
+vp8_variance_halfpixvar16x16_hv_sse2=vp8_variance_halfpixvar16x16_hv_wmt
+vp8_variance_halfpixvar16x16_hv_media=vp8_variance_halfpixvar16x16_hv_armv6
+
+#
+# Sum of squares (vector)
+#
+prototype unsigned int vp8_get_mb_ss "const short *"
+specialize vp8_get_mb_ss mmx sse2
+
+#
+# SSE (Sum Squared Error)
+#
+prototype unsigned int vp8_sub_pixel_mse16x16 "const unsigned char  *src_ptr, int  source_stride, int  xoffset, int  yoffset, const unsigned char *ref_ptr, int Refstride, unsigned int *sse"
+specialize vp8_sub_pixel_mse16x16 mmx sse2
+vp8_sub_pixel_mse16x16_sse2=vp8_sub_pixel_mse16x16_wmt
+
+prototype unsigned int vp8_mse16x16 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int  ref_stride, unsigned int *sse"
+specialize vp8_mse16x16 mmx sse2 media neon
+vp8_mse16x16_sse2=vp8_mse16x16_wmt
+vp8_mse16x16_media=vp8_mse16x16_armv6
+
+prototype unsigned int vp8_get4x4sse_cs "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int  ref_stride"
+specialize vp8_get4x4sse_cs mmx neon
+
+#
+# Single block SAD
+#
+prototype unsigned int vp8_sad4x4 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, int max_sad"
+specialize vp8_sad4x4 mmx sse2 neon
+vp8_sad4x4_sse2=vp8_sad4x4_wmt
+
+prototype unsigned int vp8_sad8x8 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, int max_sad"
+specialize vp8_sad8x8 mmx sse2 neon
+vp8_sad8x8_sse2=vp8_sad8x8_wmt
+
+prototype unsigned int vp8_sad8x16 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, int max_sad"
+specialize vp8_sad8x16 mmx sse2 neon
+vp8_sad8x16_sse2=vp8_sad8x16_wmt
+
+prototype unsigned int vp8_sad16x8 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, int max_sad"
+specialize vp8_sad16x8 mmx sse2 neon
+vp8_sad16x8_sse2=vp8_sad16x8_wmt
+
+prototype unsigned int vp8_sad16x16 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, int max_sad"
+specialize vp8_sad16x16 mmx sse2 sse3 media neon
+vp8_sad16x16_sse2=vp8_sad16x16_wmt
+vp8_sad16x16_media=vp8_sad16x16_armv6
+
+#
+# Multi-block SAD, comparing a reference to N blocks 1 pixel apart horizontally
+#
+prototype void vp8_sad4x4x3 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int  ref_stride, unsigned int *sad_array"
+specialize vp8_sad4x4x3 sse3
+
+prototype void vp8_sad8x8x3 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int  ref_stride, unsigned int *sad_array"
+specialize vp8_sad8x8x3 sse3
+
+prototype void vp8_sad8x16x3 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int  ref_stride, unsigned int *sad_array"
+specialize vp8_sad8x16x3 sse3
+
+prototype void vp8_sad16x8x3 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int  ref_stride, unsigned int *sad_array"
+specialize vp8_sad16x8x3 sse3 ssse3
+
+prototype void vp8_sad16x16x3 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int  ref_stride, unsigned int *sad_array"
+specialize vp8_sad16x16x3 sse3 ssse3
+
+# Note the only difference in the following prototypes is that they return into
+# an array of short
+prototype void vp8_sad4x4x8 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int  ref_stride, unsigned short *sad_array"
+specialize vp8_sad4x4x8 sse4_1
+vp8_sad4x4x8_sse4_1=vp8_sad4x4x8_sse4
+
+prototype void vp8_sad8x8x8 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int  ref_stride, unsigned short *sad_array"
+specialize vp8_sad8x8x8 sse4_1
+vp8_sad8x8x8_sse4_1=vp8_sad8x8x8_sse4
+
+prototype void vp8_sad8x16x8 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int  ref_stride, unsigned short *sad_array"
+specialize vp8_sad8x16x8 sse4_1
+vp8_sad8x16x8_sse4_1=vp8_sad8x16x8_sse4
+
+prototype void vp8_sad16x8x8 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int  ref_stride, unsigned short *sad_array"
+specialize vp8_sad16x8x8 sse4_1
+vp8_sad16x8x8_sse4_1=vp8_sad16x8x8_sse4
+
+prototype void vp8_sad16x16x8 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int  ref_stride, unsigned short *sad_array"
+specialize vp8_sad16x16x8 sse4_1
+vp8_sad16x16x8_sse4_1=vp8_sad16x16x8_sse4
+
+#
+# Multi-block SAD, comparing a reference to N independent blocks
+#
+prototype void vp8_sad4x4x4d "const unsigned char *src_ptr, int source_stride, unsigned char *ref_ptr[4], int  ref_stride, unsigned int *sad_array"
+specialize vp8_sad4x4x4d sse3
+
+prototype void vp8_sad8x8x4d "const unsigned char *src_ptr, int source_stride, unsigned char *ref_ptr[4], int  ref_stride, unsigned int *sad_array"
+specialize vp8_sad8x8x4d sse3
+
+prototype void vp8_sad8x16x4d "const unsigned char *src_ptr, int source_stride, unsigned char *ref_ptr[4], int  ref_stride, unsigned int *sad_array"
+specialize vp8_sad8x16x4d sse3
+
+prototype void vp8_sad16x8x4d "const unsigned char *src_ptr, int source_stride, unsigned char *ref_ptr[4], int  ref_stride, unsigned int *sad_array"
+specialize vp8_sad16x8x4d sse3
+
+prototype void vp8_sad16x16x4d "const unsigned char *src_ptr, int source_stride, unsigned char *ref_ptr[4], int  ref_stride, unsigned int *sad_array"
+specialize vp8_sad16x16x4d sse3
+
+#
+# Block copy
+#
+case $arch in
+    x86*)
+    prototype void vp8_copy32xn "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, int n"
+    specialize vp8_copy32xn sse2 sse3
+    ;;
+esac
+
+#
+# Structured Similarity (SSIM)
+#
+if [ "$CONFIG_INTERNAL_STATS" = "yes" ]; then
+    [ $arch = "x86_64" ] && sse2_on_x86_64=sse2
+
+    prototype void vp8_ssim_parms_8x8 "unsigned char *s, int sp, unsigned char *r, int rp, unsigned long *sum_s, unsigned long *sum_r, unsigned long *sum_sq_s, unsigned long *sum_sq_r, unsigned long *sum_sxr"
+    specialize vp8_ssim_parms_8x8 $sse2_on_x86_64
+
+    prototype void vp8_ssim_parms_16x16 "unsigned char *s, int sp, unsigned char *r, int rp, unsigned long *sum_s, unsigned long *sum_r, unsigned long *sum_sq_s, unsigned long *sum_sq_r, unsigned long *sum_sxr"
+    specialize vp8_ssim_parms_16x16 $sse2_on_x86_64
+fi
+
+# End of encoder only functions
+fi
index c6f46f424f49d4210460a93e770f3befd44a3166..60c3da948d3e2191b735648974a35b1fed7d2fbb 100644 (file)
@@ -32,32 +32,6 @@ void vp8_arch_arm_encoder_init(VP8_COMP *cpi)
 #if HAVE_MEDIA
     if (flags & HAS_MEDIA)
     {
-        cpi->rtcd.variance.sad16x16              = vp8_sad16x16_armv6;
-        /*cpi->rtcd.variance.sad16x8               = vp8_sad16x8_c;
-        cpi->rtcd.variance.sad8x16               = vp8_sad8x16_c;
-        cpi->rtcd.variance.sad8x8                = vp8_sad8x8_c;
-        cpi->rtcd.variance.sad4x4                = vp8_sad4x4_c;*/
-
-        /*cpi->rtcd.variance.var4x4                = vp8_variance4x4_c;*/
-        cpi->rtcd.variance.var8x8                = vp8_variance8x8_armv6;
-        /*cpi->rtcd.variance.var8x16               = vp8_variance8x16_c;
-        cpi->rtcd.variance.var16x8               = vp8_variance16x8_c;*/
-        cpi->rtcd.variance.var16x16              = vp8_variance16x16_armv6;
-
-        /*cpi->rtcd.variance.subpixvar4x4          = vp8_sub_pixel_variance4x4_c;*/
-        cpi->rtcd.variance.subpixvar8x8          = vp8_sub_pixel_variance8x8_armv6;
-        /*cpi->rtcd.variance.subpixvar8x16         = vp8_sub_pixel_variance8x16_c;
-        cpi->rtcd.variance.subpixvar16x8         = vp8_sub_pixel_variance16x8_c;*/
-        cpi->rtcd.variance.subpixvar16x16        = vp8_sub_pixel_variance16x16_armv6;
-        cpi->rtcd.variance.halfpixvar16x16_h     = vp8_variance_halfpixvar16x16_h_armv6;
-        cpi->rtcd.variance.halfpixvar16x16_v     = vp8_variance_halfpixvar16x16_v_armv6;
-        cpi->rtcd.variance.halfpixvar16x16_hv    = vp8_variance_halfpixvar16x16_hv_armv6;
-
-        cpi->rtcd.variance.mse16x16              = vp8_mse16x16_armv6;
-        /*cpi->rtcd.variance.getmbss               = vp8_get_mb_ss_c;*/
-
-        /*cpi->rtcd.variance.get4x4sse_cs          = vp8_get4x4sse_cs_c;*/
-
         cpi->rtcd.fdct.short4x4                  = vp8_short_fdct4x4_armv6;
         cpi->rtcd.fdct.short8x4                  = vp8_short_fdct8x4_armv6;
         cpi->rtcd.fdct.fast4x4                   = vp8_short_fdct4x4_armv6;
@@ -79,32 +53,6 @@ void vp8_arch_arm_encoder_init(VP8_COMP *cpi)
 #if HAVE_NEON
     if (flags & HAS_NEON)
     {
-        cpi->rtcd.variance.sad16x16              = vp8_sad16x16_neon;
-        cpi->rtcd.variance.sad16x8               = vp8_sad16x8_neon;
-        cpi->rtcd.variance.sad8x16               = vp8_sad8x16_neon;
-        cpi->rtcd.variance.sad8x8                = vp8_sad8x8_neon;
-        cpi->rtcd.variance.sad4x4                = vp8_sad4x4_neon;
-
-        /*cpi->rtcd.variance.var4x4                = vp8_variance4x4_c;*/
-        cpi->rtcd.variance.var8x8                = vp8_variance8x8_neon;
-        cpi->rtcd.variance.var8x16               = vp8_variance8x16_neon;
-        cpi->rtcd.variance.var16x8               = vp8_variance16x8_neon;
-        cpi->rtcd.variance.var16x16              = vp8_variance16x16_neon;
-
-        /*cpi->rtcd.variance.subpixvar4x4          = vp8_sub_pixel_variance4x4_c;*/
-        cpi->rtcd.variance.subpixvar8x8          = vp8_sub_pixel_variance8x8_neon;
-        /*cpi->rtcd.variance.subpixvar8x16         = vp8_sub_pixel_variance8x16_c;
-        cpi->rtcd.variance.subpixvar16x8         = vp8_sub_pixel_variance16x8_c;*/
-        cpi->rtcd.variance.subpixvar16x16        = vp8_sub_pixel_variance16x16_neon;
-        cpi->rtcd.variance.halfpixvar16x16_h     = vp8_variance_halfpixvar16x16_h_neon;
-        cpi->rtcd.variance.halfpixvar16x16_v     = vp8_variance_halfpixvar16x16_v_neon;
-        cpi->rtcd.variance.halfpixvar16x16_hv    = vp8_variance_halfpixvar16x16_hv_neon;
-
-        cpi->rtcd.variance.mse16x16              = vp8_mse16x16_neon;
-        /*cpi->rtcd.variance.getmbss               = vp8_get_mb_ss_c;*/
-
-        cpi->rtcd.variance.get4x4sse_cs          = vp8_get4x4sse_cs_neon;
-
         cpi->rtcd.fdct.short4x4                  = vp8_short_fdct4x4_neon;
         cpi->rtcd.fdct.short8x4                  = vp8_short_fdct8x4_neon;
         cpi->rtcd.fdct.fast4x4                   = vp8_short_fdct4x4_neon;
index 5faa1048ea018bcb94892c294102e799867637c1..052a2578ad963f757cb197d799221e06f793b38d 100644 (file)
@@ -9,6 +9,7 @@
  */
 
 #include "vpx_config.h"
+#include "vpx_rtcd.h"
 #include "vp8/encoder/variance.h"
 #include "vp8/common/filter.h"
 
diff --git a/vp8/encoder/arm/variance_arm.h b/vp8/encoder/arm/variance_arm.h
deleted file mode 100644 (file)
index 99eb26b..0000000
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-
-#ifndef VARIANCE_ARM_H
-#define VARIANCE_ARM_H
-
-#if HAVE_MEDIA
-
-extern prototype_sad(vp8_sad16x16_armv6);
-extern prototype_variance(vp8_variance16x16_armv6);
-extern prototype_variance(vp8_variance8x8_armv6);
-extern prototype_subpixvariance(vp8_sub_pixel_variance16x16_armv6);
-extern prototype_subpixvariance(vp8_sub_pixel_variance8x8_armv6);
-extern prototype_variance(vp8_variance_halfpixvar16x16_h_armv6);
-extern prototype_variance(vp8_variance_halfpixvar16x16_v_armv6);
-extern prototype_variance(vp8_variance_halfpixvar16x16_hv_armv6);
-extern prototype_variance(vp8_mse16x16_armv6);
-
-#if !CONFIG_RUNTIME_CPU_DETECT
-
-#undef  vp8_variance_sad16x16
-#define vp8_variance_sad16x16 vp8_sad16x16_armv6
-
-#undef  vp8_variance_subpixvar16x16
-#define vp8_variance_subpixvar16x16 vp8_sub_pixel_variance16x16_armv6
-
-#undef  vp8_variance_subpixvar8x8
-#define vp8_variance_subpixvar8x8 vp8_sub_pixel_variance8x8_armv6
-
-#undef  vp8_variance_var16x16
-#define vp8_variance_var16x16 vp8_variance16x16_armv6
-
-#undef  vp8_variance_mse16x16
-#define vp8_variance_mse16x16 vp8_mse16x16_armv6
-
-#undef  vp8_variance_var8x8
-#define vp8_variance_var8x8 vp8_variance8x8_armv6
-
-#undef  vp8_variance_halfpixvar16x16_h
-#define vp8_variance_halfpixvar16x16_h vp8_variance_halfpixvar16x16_h_armv6
-
-#undef  vp8_variance_halfpixvar16x16_v
-#define vp8_variance_halfpixvar16x16_v vp8_variance_halfpixvar16x16_v_armv6
-
-#undef  vp8_variance_halfpixvar16x16_hv
-#define vp8_variance_halfpixvar16x16_hv vp8_variance_halfpixvar16x16_hv_armv6
-
-#endif /* !CONFIG_RUNTIME_CPU_DETECT */
-
-#endif /* HAVE_MEDIA */
-
-
-#if HAVE_NEON
-extern prototype_sad(vp8_sad4x4_neon);
-extern prototype_sad(vp8_sad8x8_neon);
-extern prototype_sad(vp8_sad8x16_neon);
-extern prototype_sad(vp8_sad16x8_neon);
-extern prototype_sad(vp8_sad16x16_neon);
-
-//extern prototype_variance(vp8_variance4x4_c);
-extern prototype_variance(vp8_variance8x8_neon);
-extern prototype_variance(vp8_variance8x16_neon);
-extern prototype_variance(vp8_variance16x8_neon);
-extern prototype_variance(vp8_variance16x16_neon);
-
-//extern prototype_subpixvariance(vp8_sub_pixel_variance4x4_c);
-extern prototype_subpixvariance(vp8_sub_pixel_variance8x8_neon);
-//extern prototype_subpixvariance(vp8_sub_pixel_variance8x16_c);
-//extern prototype_subpixvariance(vp8_sub_pixel_variance16x8_c);
-extern prototype_subpixvariance(vp8_sub_pixel_variance16x16_neon);
-extern prototype_subpixvariance(vp8_sub_pixel_variance16x16_neon_func);
-extern prototype_variance(vp8_variance_halfpixvar16x16_h_neon);
-extern prototype_variance(vp8_variance_halfpixvar16x16_v_neon);
-extern prototype_variance(vp8_variance_halfpixvar16x16_hv_neon);
-
-//extern prototype_getmbss(vp8_get_mb_ss_c);
-extern prototype_variance(vp8_mse16x16_neon);
-extern prototype_get16x16prederror(vp8_get4x4sse_cs_neon);
-
-#if !CONFIG_RUNTIME_CPU_DETECT
-#undef  vp8_variance_sad4x4
-#define vp8_variance_sad4x4 vp8_sad4x4_neon
-
-#undef  vp8_variance_sad8x8
-#define vp8_variance_sad8x8 vp8_sad8x8_neon
-
-#undef  vp8_variance_sad8x16
-#define vp8_variance_sad8x16 vp8_sad8x16_neon
-
-#undef  vp8_variance_sad16x8
-#define vp8_variance_sad16x8 vp8_sad16x8_neon
-
-#undef  vp8_variance_sad16x16
-#define vp8_variance_sad16x16 vp8_sad16x16_neon
-
-//#undef  vp8_variance_var4x4
-//#define vp8_variance_var4x4 vp8_variance4x4_c
-
-#undef  vp8_variance_var8x8
-#define vp8_variance_var8x8 vp8_variance8x8_neon
-
-#undef  vp8_variance_var8x16
-#define vp8_variance_var8x16 vp8_variance8x16_neon
-
-#undef  vp8_variance_var16x8
-#define vp8_variance_var16x8 vp8_variance16x8_neon
-
-#undef  vp8_variance_var16x16
-#define vp8_variance_var16x16 vp8_variance16x16_neon
-
-//#undef  vp8_variance_subpixvar4x4
-//#define vp8_variance_subpixvar4x4 vp8_sub_pixel_variance4x4_c
-
-#undef  vp8_variance_subpixvar8x8
-#define vp8_variance_subpixvar8x8 vp8_sub_pixel_variance8x8_neon
-
-//#undef  vp8_variance_subpixvar8x16
-//#define vp8_variance_subpixvar8x16 vp8_sub_pixel_variance8x16_c
-
-//#undef  vp8_variance_subpixvar16x8
-//#define vp8_variance_subpixvar16x8 vp8_sub_pixel_variance16x8_c
-
-#undef  vp8_variance_subpixvar16x16
-#define vp8_variance_subpixvar16x16 vp8_sub_pixel_variance16x16_neon
-
-#undef  vp8_variance_halfpixvar16x16_h
-#define vp8_variance_halfpixvar16x16_h vp8_variance_halfpixvar16x16_h_neon
-
-#undef  vp8_variance_halfpixvar16x16_v
-#define vp8_variance_halfpixvar16x16_v vp8_variance_halfpixvar16x16_v_neon
-
-#undef  vp8_variance_halfpixvar16x16_hv
-#define vp8_variance_halfpixvar16x16_hv vp8_variance_halfpixvar16x16_hv_neon
-
-//#undef  vp8_variance_getmbss
-//#define vp8_variance_getmbss vp8_get_mb_ss_c
-
-#undef  vp8_variance_mse16x16
-#define vp8_variance_mse16x16 vp8_mse16x16_neon
-
-#undef  vp8_variance_get4x4sse_cs
-#define vp8_variance_get4x4sse_cs vp8_get4x4sse_cs_neon
-#endif  /* !CONFIG_RUNTIME_CPU_DETECT */
-
-#endif  /* HAVE_NEON */
-
-#endif
index 9eed6ccaf6cb969b287d6a94dc97b5a8a333c10a..41da9abf5ccb2e163071f65834192f85012c6a85 100644 (file)
@@ -96,7 +96,7 @@ static unsigned int tt_activity_measure( VP8_COMP *cpi, MACROBLOCK *x )
      *  lambda using a non-linear combination (e.g., the smallest, or second
      *  smallest, etc.).
      */
-    act =     VARIANCE_INVOKE(&cpi->rtcd.variance, var16x16)(x->src.y_buffer,
+    act =  vp8_variance16x16(x->src.y_buffer,
                     x->src.y_stride, VP8_VAR_OFFS, 0, &sse);
     act = act<<4;
 
index 14c93eb46cabe0a5a9662bdc7813de66a72f4a7f..4ae4e46df7207266aa2355153dd3cbafa7e33d1e 100644 (file)
@@ -53,7 +53,7 @@ int vp8_encode_intra(VP8_COMP *cpi, MACROBLOCK *x, int use_dc_pred)
         }
     }
 
-    intra_pred_var = VARIANCE_INVOKE(&cpi->rtcd.variance, getmbss)(x->src_diff);
+    intra_pred_var = vp8_get_mb_ss(x->src_diff);
 
     return intra_pred_var;
 }
index 03d639765ae1543c44f1d60c47d0fa0046ea66aa..09f184c6b5314c8ff5b30983c1e994a1bae25b7e 100644 (file)
@@ -409,7 +409,7 @@ static void zz_motion_search( VP8_COMP *cpi, MACROBLOCK * x, YV12_BUFFER_CONFIG
 
     ref_ptr = (unsigned char *)(*(d->base_pre) + d->pre );
 
-    VARIANCE_INVOKE(IF_RTCD(&cpi->rtcd.variance), mse16x16) ( src_ptr, src_stride, ref_ptr, ref_stride, (unsigned int *)(best_motion_err));
+    vp8_mse16x16 ( src_ptr, src_stride, ref_ptr, ref_stride, (unsigned int *)(best_motion_err));
 }
 
 static void first_pass_motion_search(VP8_COMP *cpi, MACROBLOCK *x,
@@ -433,7 +433,7 @@ static void first_pass_motion_search(VP8_COMP *cpi, MACROBLOCK *x,
     int new_mv_mode_penalty = 256;
 
     // override the default variance function to use MSE
-    v_fn_ptr.vf    = VARIANCE_INVOKE(IF_RTCD(&cpi->rtcd.variance), mse16x16);
+    v_fn_ptr.vf    = vp8_mse16x16;
 
     // Set up pointers for this macro block recon buffer
     xd->pre.y_buffer = recon_buffer->y_buffer + recon_yoffset;
index 89e8dc2bcfff6129dfcc8d2aee97e1f0d3b31511..59324d0df549695f3463b90a9cc95fe9c7079698 100644 (file)
@@ -25,53 +25,6 @@ extern void vp8_yv12_copy_partial_frame(YV12_BUFFER_CONFIG *src_ybc,
 void vp8_cmachine_specific_config(VP8_COMP *cpi)
 {
 #if CONFIG_RUNTIME_CPU_DETECT
-    cpi->rtcd.variance.sad16x16              = vp8_sad16x16_c;
-    cpi->rtcd.variance.sad16x8               = vp8_sad16x8_c;
-    cpi->rtcd.variance.sad8x16               = vp8_sad8x16_c;
-    cpi->rtcd.variance.sad8x8                = vp8_sad8x8_c;
-    cpi->rtcd.variance.sad4x4                = vp8_sad4x4_c;
-
-    cpi->rtcd.variance.sad16x16x3            = vp8_sad16x16x3_c;
-    cpi->rtcd.variance.sad16x8x3             = vp8_sad16x8x3_c;
-    cpi->rtcd.variance.sad8x16x3             = vp8_sad8x16x3_c;
-    cpi->rtcd.variance.sad8x8x3              = vp8_sad8x8x3_c;
-    cpi->rtcd.variance.sad4x4x3              = vp8_sad4x4x3_c;
-
-    cpi->rtcd.variance.sad16x16x8            = vp8_sad16x16x8_c;
-    cpi->rtcd.variance.sad16x8x8             = vp8_sad16x8x8_c;
-    cpi->rtcd.variance.sad8x16x8             = vp8_sad8x16x8_c;
-    cpi->rtcd.variance.sad8x8x8              = vp8_sad8x8x8_c;
-    cpi->rtcd.variance.sad4x4x8              = vp8_sad4x4x8_c;
-
-    cpi->rtcd.variance.sad16x16x4d           = vp8_sad16x16x4d_c;
-    cpi->rtcd.variance.sad16x8x4d            = vp8_sad16x8x4d_c;
-    cpi->rtcd.variance.sad8x16x4d            = vp8_sad8x16x4d_c;
-    cpi->rtcd.variance.sad8x8x4d             = vp8_sad8x8x4d_c;
-    cpi->rtcd.variance.sad4x4x4d             = vp8_sad4x4x4d_c;
-#if ARCH_X86 || ARCH_X86_64
-    cpi->rtcd.variance.copy32xn              = vp8_copy32xn_c;
-#endif
-    cpi->rtcd.variance.var4x4                = vp8_variance4x4_c;
-    cpi->rtcd.variance.var8x8                = vp8_variance8x8_c;
-    cpi->rtcd.variance.var8x16               = vp8_variance8x16_c;
-    cpi->rtcd.variance.var16x8               = vp8_variance16x8_c;
-    cpi->rtcd.variance.var16x16              = vp8_variance16x16_c;
-
-    cpi->rtcd.variance.subpixvar4x4          = vp8_sub_pixel_variance4x4_c;
-    cpi->rtcd.variance.subpixvar8x8          = vp8_sub_pixel_variance8x8_c;
-    cpi->rtcd.variance.subpixvar8x16         = vp8_sub_pixel_variance8x16_c;
-    cpi->rtcd.variance.subpixvar16x8         = vp8_sub_pixel_variance16x8_c;
-    cpi->rtcd.variance.subpixvar16x16        = vp8_sub_pixel_variance16x16_c;
-    cpi->rtcd.variance.halfpixvar16x16_h     = vp8_variance_halfpixvar16x16_h_c;
-    cpi->rtcd.variance.halfpixvar16x16_v     = vp8_variance_halfpixvar16x16_v_c;
-    cpi->rtcd.variance.halfpixvar16x16_hv    = vp8_variance_halfpixvar16x16_hv_c;
-    cpi->rtcd.variance.subpixmse16x16        = vp8_sub_pixel_mse16x16_c;
-
-    cpi->rtcd.variance.mse16x16              = vp8_mse16x16_c;
-    cpi->rtcd.variance.getmbss               = vp8_get_mb_ss_c;
-
-    cpi->rtcd.variance.get4x4sse_cs          = vp8_get4x4sse_cs_c;
-
     cpi->rtcd.fdct.short4x4                  = vp8_short_fdct4x4_c;
     cpi->rtcd.fdct.short8x4                  = vp8_short_fdct8x4_c;
     cpi->rtcd.fdct.fast4x4                   = vp8_short_fdct4x4_c;
@@ -95,10 +48,6 @@ void vp8_cmachine_specific_config(VP8_COMP *cpi)
 #if !(CONFIG_REALTIME_ONLY)
     cpi->rtcd.temporal.apply                 = vp8_temporal_filter_apply_c;
 #endif
-#if CONFIG_INTERNAL_STATS
-    cpi->rtcd.variance.ssimpf_8x8            = vp8_ssim_parms_8x8_c;
-    cpi->rtcd.variance.ssimpf_16x16          = vp8_ssim_parms_16x16_c;
-#endif
 #endif
 
     // Pure C:
index 65ae453753b3d7e9ef589a0109fa04d55f8e2e50..416d4daa1c3dae2e0458d9791f8e60344a4f3cff 100644 (file)
@@ -69,7 +69,7 @@ extern void vp8_yv12_copy_src_frame_func_neon(YV12_BUFFER_CONFIG *src_ybc, YV12_
 
 int vp8_estimate_entropy_savings(VP8_COMP *cpi);
 
-int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest, const vp8_variance_rtcd_vtable_t *rtcd);
+int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest);
 
 extern void vp8_temporal_filter_prepare_c(VP8_COMP *cpi, int distance);
 
@@ -85,8 +85,7 @@ extern double vp8_calc_ssim
     YV12_BUFFER_CONFIG *source,
     YV12_BUFFER_CONFIG *dest,
     int lumamask,
-    double *weight,
-    const vp8_variance_rtcd_vtable_t *rtcd
+    double *weight
 );
 
 
@@ -96,8 +95,7 @@ extern double vp8_calc_ssimg
     YV12_BUFFER_CONFIG *dest,
     double *ssim_y,
     double *ssim_u,
-    double *ssim_v,
-    const vp8_variance_rtcd_vtable_t *rtcd
+    double *ssim_v
 );
 
 
@@ -1947,62 +1945,62 @@ struct VP8_COMP* vp8_create_compressor(VP8_CONFIG *oxcf)
     vp8cx_create_encoder_threads(cpi);
 #endif
 
-    cpi->fn_ptr[BLOCK_16X16].sdf            = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x16);
-    cpi->fn_ptr[BLOCK_16X16].vf             = VARIANCE_INVOKE(&cpi->rtcd.variance, var16x16);
-    cpi->fn_ptr[BLOCK_16X16].svf            = VARIANCE_INVOKE(&cpi->rtcd.variance, subpixvar16x16);
-    cpi->fn_ptr[BLOCK_16X16].svf_halfpix_h  = VARIANCE_INVOKE(&cpi->rtcd.variance, halfpixvar16x16_h);
-    cpi->fn_ptr[BLOCK_16X16].svf_halfpix_v  = VARIANCE_INVOKE(&cpi->rtcd.variance, halfpixvar16x16_v);
-    cpi->fn_ptr[BLOCK_16X16].svf_halfpix_hv = VARIANCE_INVOKE(&cpi->rtcd.variance, halfpixvar16x16_hv);
-    cpi->fn_ptr[BLOCK_16X16].sdx3f          = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x16x3);
-    cpi->fn_ptr[BLOCK_16X16].sdx8f          = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x16x8);
-    cpi->fn_ptr[BLOCK_16X16].sdx4df         = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x16x4d);
-
-    cpi->fn_ptr[BLOCK_16X8].sdf            = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x8);
-    cpi->fn_ptr[BLOCK_16X8].vf             = VARIANCE_INVOKE(&cpi->rtcd.variance, var16x8);
-    cpi->fn_ptr[BLOCK_16X8].svf            = VARIANCE_INVOKE(&cpi->rtcd.variance, subpixvar16x8);
+    cpi->fn_ptr[BLOCK_16X16].sdf            = vp8_sad16x16;
+    cpi->fn_ptr[BLOCK_16X16].vf             = vp8_variance16x16;
+    cpi->fn_ptr[BLOCK_16X16].svf            = vp8_sub_pixel_variance16x16;
+    cpi->fn_ptr[BLOCK_16X16].svf_halfpix_h  = vp8_variance_halfpixvar16x16_h;
+    cpi->fn_ptr[BLOCK_16X16].svf_halfpix_v  = vp8_variance_halfpixvar16x16_v;
+    cpi->fn_ptr[BLOCK_16X16].svf_halfpix_hv = vp8_variance_halfpixvar16x16_hv;
+    cpi->fn_ptr[BLOCK_16X16].sdx3f          = vp8_sad16x16x3;
+    cpi->fn_ptr[BLOCK_16X16].sdx8f          = vp8_sad16x16x8;
+    cpi->fn_ptr[BLOCK_16X16].sdx4df         = vp8_sad16x16x4d;
+
+    cpi->fn_ptr[BLOCK_16X8].sdf            = vp8_sad16x8;
+    cpi->fn_ptr[BLOCK_16X8].vf             = vp8_variance16x8;
+    cpi->fn_ptr[BLOCK_16X8].svf            = vp8_sub_pixel_variance16x8;
     cpi->fn_ptr[BLOCK_16X8].svf_halfpix_h  = NULL;
     cpi->fn_ptr[BLOCK_16X8].svf_halfpix_v  = NULL;
     cpi->fn_ptr[BLOCK_16X8].svf_halfpix_hv = NULL;
-    cpi->fn_ptr[BLOCK_16X8].sdx3f          = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x8x3);
-    cpi->fn_ptr[BLOCK_16X8].sdx8f          = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x8x8);
-    cpi->fn_ptr[BLOCK_16X8].sdx4df         = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x8x4d);
+    cpi->fn_ptr[BLOCK_16X8].sdx3f          = vp8_sad16x8x3;
+    cpi->fn_ptr[BLOCK_16X8].sdx8f          = vp8_sad16x8x8;
+    cpi->fn_ptr[BLOCK_16X8].sdx4df         = vp8_sad16x8x4d;
 
-    cpi->fn_ptr[BLOCK_8X16].sdf            = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x16);
-    cpi->fn_ptr[BLOCK_8X16].vf             = VARIANCE_INVOKE(&cpi->rtcd.variance, var8x16);
-    cpi->fn_ptr[BLOCK_8X16].svf            = VARIANCE_INVOKE(&cpi->rtcd.variance, subpixvar8x16);
+    cpi->fn_ptr[BLOCK_8X16].sdf            = vp8_sad8x16;
+    cpi->fn_ptr[BLOCK_8X16].vf             = vp8_variance8x16;
+    cpi->fn_ptr[BLOCK_8X16].svf            = vp8_sub_pixel_variance8x16;
     cpi->fn_ptr[BLOCK_8X16].svf_halfpix_h  = NULL;
     cpi->fn_ptr[BLOCK_8X16].svf_halfpix_v  = NULL;
     cpi->fn_ptr[BLOCK_8X16].svf_halfpix_hv = NULL;
-    cpi->fn_ptr[BLOCK_8X16].sdx3f          = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x16x3);
-    cpi->fn_ptr[BLOCK_8X16].sdx8f          = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x16x8);
-    cpi->fn_ptr[BLOCK_8X16].sdx4df         = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x16x4d);
+    cpi->fn_ptr[BLOCK_8X16].sdx3f          = vp8_sad8x16x3;
+    cpi->fn_ptr[BLOCK_8X16].sdx8f          = vp8_sad8x16x8;
+    cpi->fn_ptr[BLOCK_8X16].sdx4df         = vp8_sad8x16x4d;
 
-    cpi->fn_ptr[BLOCK_8X8].sdf            = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x8);
-    cpi->fn_ptr[BLOCK_8X8].vf             = VARIANCE_INVOKE(&cpi->rtcd.variance, var8x8);
-    cpi->fn_ptr[BLOCK_8X8].svf            = VARIANCE_INVOKE(&cpi->rtcd.variance, subpixvar8x8);
+    cpi->fn_ptr[BLOCK_8X8].sdf            = vp8_sad8x8;
+    cpi->fn_ptr[BLOCK_8X8].vf             = vp8_variance8x8;
+    cpi->fn_ptr[BLOCK_8X8].svf            = vp8_sub_pixel_variance8x8;
     cpi->fn_ptr[BLOCK_8X8].svf_halfpix_h  = NULL;
     cpi->fn_ptr[BLOCK_8X8].svf_halfpix_v  = NULL;
     cpi->fn_ptr[BLOCK_8X8].svf_halfpix_hv = NULL;
-    cpi->fn_ptr[BLOCK_8X8].sdx3f          = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x8x3);
-    cpi->fn_ptr[BLOCK_8X8].sdx8f          = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x8x8);
-    cpi->fn_ptr[BLOCK_8X8].sdx4df         = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x8x4d);
+    cpi->fn_ptr[BLOCK_8X8].sdx3f          = vp8_sad8x8x3;
+    cpi->fn_ptr[BLOCK_8X8].sdx8f          = vp8_sad8x8x8;
+    cpi->fn_ptr[BLOCK_8X8].sdx4df         = vp8_sad8x8x4d;
 
-    cpi->fn_ptr[BLOCK_4X4].sdf            = VARIANCE_INVOKE(&cpi->rtcd.variance, sad4x4);
-    cpi->fn_ptr[BLOCK_4X4].vf             = VARIANCE_INVOKE(&cpi->rtcd.variance, var4x4);
-    cpi->fn_ptr[BLOCK_4X4].svf            = VARIANCE_INVOKE(&cpi->rtcd.variance, subpixvar4x4);
+    cpi->fn_ptr[BLOCK_4X4].sdf            = vp8_sad4x4;
+    cpi->fn_ptr[BLOCK_4X4].vf             = vp8_variance4x4;
+    cpi->fn_ptr[BLOCK_4X4].svf            = vp8_sub_pixel_variance4x4;
     cpi->fn_ptr[BLOCK_4X4].svf_halfpix_h  = NULL;
     cpi->fn_ptr[BLOCK_4X4].svf_halfpix_v  = NULL;
     cpi->fn_ptr[BLOCK_4X4].svf_halfpix_hv = NULL;
-    cpi->fn_ptr[BLOCK_4X4].sdx3f          = VARIANCE_INVOKE(&cpi->rtcd.variance, sad4x4x3);
-    cpi->fn_ptr[BLOCK_4X4].sdx8f          = VARIANCE_INVOKE(&cpi->rtcd.variance, sad4x4x8);
-    cpi->fn_ptr[BLOCK_4X4].sdx4df         = VARIANCE_INVOKE(&cpi->rtcd.variance, sad4x4x4d);
+    cpi->fn_ptr[BLOCK_4X4].sdx3f          = vp8_sad4x4x3;
+    cpi->fn_ptr[BLOCK_4X4].sdx8f          = vp8_sad4x4x8;
+    cpi->fn_ptr[BLOCK_4X4].sdx4df         = vp8_sad4x4x4d;
 
 #if ARCH_X86 || ARCH_X86_64
-    cpi->fn_ptr[BLOCK_16X16].copymem        = VARIANCE_INVOKE(&cpi->rtcd.variance, copy32xn);
-    cpi->fn_ptr[BLOCK_16X8].copymem        = VARIANCE_INVOKE(&cpi->rtcd.variance, copy32xn);
-    cpi->fn_ptr[BLOCK_8X16].copymem        = VARIANCE_INVOKE(&cpi->rtcd.variance, copy32xn);
-    cpi->fn_ptr[BLOCK_8X8].copymem        = VARIANCE_INVOKE(&cpi->rtcd.variance, copy32xn);
-    cpi->fn_ptr[BLOCK_4X4].copymem        = VARIANCE_INVOKE(&cpi->rtcd.variance, copy32xn);
+    cpi->fn_ptr[BLOCK_16X16].copymem      = vp8_copy32xn;
+    cpi->fn_ptr[BLOCK_16X8].copymem       = vp8_copy32xn;
+    cpi->fn_ptr[BLOCK_8X16].copymem       = vp8_copy32xn;
+    cpi->fn_ptr[BLOCK_8X8].copymem        = vp8_copy32xn;
+    cpi->fn_ptr[BLOCK_4X4].copymem        = vp8_copy32xn;
 #endif
 
     cpi->full_search_sad = SEARCH_INVOKE(&cpi->rtcd.search, full_search);
@@ -2334,8 +2332,7 @@ void vp8_remove_compressor(VP8_COMP **ptr)
 
 static uint64_t calc_plane_error(unsigned char *orig, int orig_stride,
                                  unsigned char *recon, int recon_stride,
-                                 unsigned int cols, unsigned int rows,
-                                 vp8_variance_rtcd_vtable_t *rtcd)
+                                 unsigned int cols, unsigned int rows)
 {
     unsigned int row, col;
     uint64_t total_sse = 0;
@@ -2347,7 +2344,7 @@ static uint64_t calc_plane_error(unsigned char *orig, int orig_stride,
         {
             unsigned int sse;
 
-            VARIANCE_INVOKE(rtcd, mse16x16)(orig + col, orig_stride,
+            vp8_mse16x16(orig + col, orig_stride,
                                             recon + col, recon_stride,
                                             &sse);
             total_sse += sse;
@@ -2408,8 +2405,7 @@ static void generate_psnr_packet(VP8_COMP *cpi)
     pkt.kind = VPX_CODEC_PSNR_PKT;
     sse = calc_plane_error(orig->y_buffer, orig->y_stride,
                            recon->y_buffer, recon->y_stride,
-                           width, height,
-                           IF_RTCD(&cpi->rtcd.variance));
+                           width, height);
     pkt.data.psnr.sse[0] = sse;
     pkt.data.psnr.sse[1] = sse;
     pkt.data.psnr.samples[0] = width * height;
@@ -2420,8 +2416,7 @@ static void generate_psnr_packet(VP8_COMP *cpi)
 
     sse = calc_plane_error(orig->u_buffer, orig->uv_stride,
                            recon->u_buffer, recon->uv_stride,
-                           width, height,
-                           IF_RTCD(&cpi->rtcd.variance));
+                           width, height);
     pkt.data.psnr.sse[0] += sse;
     pkt.data.psnr.sse[2] = sse;
     pkt.data.psnr.samples[0] += width * height;
@@ -2429,8 +2424,7 @@ static void generate_psnr_packet(VP8_COMP *cpi)
 
     sse = calc_plane_error(orig->v_buffer, orig->uv_stride,
                            recon->v_buffer, recon->uv_stride,
-                           width, height,
-                           IF_RTCD(&cpi->rtcd.variance));
+                           width, height);
     pkt.data.psnr.sse[0] += sse;
     pkt.data.psnr.sse[3] = sse;
     pkt.data.psnr.samples[0] += width * height;
@@ -3819,8 +3813,7 @@ static void encode_frame_to_data_rate
         {
             int last_q = Q;
             int kf_err = vp8_calc_ss_err(cpi->Source,
-                                         &cm->yv12_fb[cm->new_fb_idx],
-                                         IF_RTCD(&cpi->rtcd.variance));
+                                         &cm->yv12_fb[cm->new_fb_idx]);
 
             // The key frame is not good enough
             if ( kf_err > ((cpi->ambient_err * 7) >> 3) )
@@ -4016,8 +4009,7 @@ static void encode_frame_to_data_rate
     if ( cpi->next_key_frame_forced && (cpi->twopass.frames_to_key == 0) )
     {
         cpi->ambient_err = vp8_calc_ss_err(cpi->Source,
-                                           &cm->yv12_fb[cm->new_fb_idx],
-                                           IF_RTCD(&cpi->rtcd.variance));
+                                           &cm->yv12_fb[cm->new_fb_idx]);
     }
 
     /* This frame's MVs are saved and will be used in next frame's MV predictor.
@@ -4961,16 +4953,13 @@ int vp8_get_compressed_data(VP8_COMP *cpi, unsigned int *frame_flags, unsigned l
                 int64_t sq_error, sq_error2;
 
                 ye = calc_plane_error(orig->y_buffer, orig->y_stride,
-                  recon->y_buffer, recon->y_stride, orig->y_width, orig->y_height,
-                  IF_RTCD(&cpi->rtcd.variance));
+                  recon->y_buffer, recon->y_stride, orig->y_width, orig->y_height);
 
                 ue = calc_plane_error(orig->u_buffer, orig->uv_stride,
-                  recon->u_buffer, recon->uv_stride, orig->uv_width, orig->uv_height,
-                  IF_RTCD(&cpi->rtcd.variance));
+                  recon->u_buffer, recon->uv_stride, orig->uv_width, orig->uv_height);
 
                 ve = calc_plane_error(orig->v_buffer, orig->uv_stride,
-                  recon->v_buffer, recon->uv_stride, orig->uv_width, orig->uv_height,
-                  IF_RTCD(&cpi->rtcd.variance));
+                  recon->v_buffer, recon->uv_stride, orig->uv_width, orig->uv_height);
 
                 sq_error = ye + ue + ve;
 
@@ -4985,20 +4974,17 @@ int vp8_get_compressed_data(VP8_COMP *cpi, unsigned int *frame_flags, unsigned l
                     double frame_psnr2, frame_ssim2 = 0;
                     double weight = 0;
 
-                    vp8_deblock(cm->frame_to_show, &cm->post_proc_buffer, cm->filter_level * 10 / 6, 1, 0, IF_RTCD(&cm->rtcd.postproc));
+                    vp8_deblock(cm->frame_to_show, &cm->post_proc_buffer, cm->filter_level * 10 / 6, 1, 0);
                     vp8_clear_system_state();
 
                     ye = calc_plane_error(orig->y_buffer, orig->y_stride,
-                      pp->y_buffer, pp->y_stride, orig->y_width, orig->y_height,
-                      IF_RTCD(&cpi->rtcd.variance));
+                      pp->y_buffer, pp->y_stride, orig->y_width, orig->y_height);
 
                     ue = calc_plane_error(orig->u_buffer, orig->uv_stride,
-                      pp->u_buffer, pp->uv_stride, orig->uv_width, orig->uv_height,
-                      IF_RTCD(&cpi->rtcd.variance));
+                      pp->u_buffer, pp->uv_stride, orig->uv_width, orig->uv_height);
 
                     ve = calc_plane_error(orig->v_buffer, orig->uv_stride,
-                      pp->v_buffer, pp->uv_stride, orig->uv_width, orig->uv_height,
-                      IF_RTCD(&cpi->rtcd.variance));
+                      pp->v_buffer, pp->uv_stride, orig->uv_width, orig->uv_height);
 
                     sq_error2 = ye + ue + ve;
 
@@ -5011,8 +4997,7 @@ int vp8_get_compressed_data(VP8_COMP *cpi, unsigned int *frame_flags, unsigned l
                     cpi->totalp  += frame_psnr2;
 
                     frame_ssim2 = vp8_calc_ssim(cpi->Source,
-                      &cm->post_proc_buffer, 1, &weight,
-                      IF_RTCD(&cpi->rtcd.variance));
+                      &cm->post_proc_buffer, 1, &weight);
 
                     cpi->summed_quality += frame_ssim2 * weight;
                     cpi->summed_weights += weight;
@@ -5042,7 +5027,7 @@ int vp8_get_compressed_data(VP8_COMP *cpi, unsigned int *frame_flags, unsigned l
             {
                 double y, u, v, frame_all;
                 frame_all =  vp8_calc_ssimg(cpi->Source, cm->frame_to_show,
-                    &y, &u, &v, IF_RTCD(&cpi->rtcd.variance));
+                    &y, &u, &v);
 
                 if (cpi->oxcf.number_of_layers > 1)
                 {
@@ -5222,14 +5207,13 @@ int vp8_set_internal_size(VP8_COMP *cpi, VPX_SCALING horiz_mode, VPX_SCALING ver
 
 
 
-int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest, const vp8_variance_rtcd_vtable_t *rtcd)
+int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest)
 {
     int i, j;
     int Total = 0;
 
     unsigned char *src = source->y_buffer;
     unsigned char *dst = dest->y_buffer;
-    (void)rtcd;
 
     // Loop through the Y plane raw and reconstruction data summing (square differences)
     for (i = 0; i < source->y_height; i += 16)
@@ -5237,7 +5221,7 @@ int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest, const
         for (j = 0; j < source->y_width; j += 16)
         {
             unsigned int sse;
-            Total += VARIANCE_INVOKE(rtcd, mse16x16)(src + j, source->y_stride, dst + j, dest->y_stride, &sse);
+            Total += vp8_mse16x16(src + j, source->y_stride, dst + j, dest->y_stride, &sse);
         }
 
         src += 16 * source->y_stride;
index eeadf62a6377420ee4365b4a445c0c680ca8dea2..8a0c3df062dac408c7a57c44585ee1fabcb400eb 100644 (file)
@@ -224,7 +224,6 @@ typedef struct
 
 typedef struct VP8_ENCODER_RTCD
 {
-    vp8_variance_rtcd_vtable_t  variance;
     vp8_fdct_rtcd_vtable_t      fdct;
     vp8_encodemb_rtcd_vtable_t  encodemb;
     vp8_quantize_rtcd_vtable_t  quantize;
index e8a5d9522fc1c80a4b2d933fef012926efbdf924..4f69a5a899e86d1d0c0b0fa309f765007e092aa6 100644 (file)
@@ -31,7 +31,7 @@
 #define IF_RTCD(x)  NULL
 #endif
 
-extern int VP8_UVSSE(MACROBLOCK *x, const vp8_variance_rtcd_vtable_t *rtcd);
+extern int VP8_UVSSE(MACROBLOCK *x);
 
 #ifdef SPEEDSTATS
 extern unsigned int cnt_pm;
@@ -40,7 +40,6 @@ extern unsigned int cnt_pm;
 extern const int vp8_ref_frame_order[MAX_MODES];
 extern const MB_PREDICTION_MODE vp8_mode_order[MAX_MODES];
 
-extern unsigned int (*vp8_get4x4sse_cs)(unsigned char *src_ptr, int  source_stride, unsigned char *ref_ptr, int  recon_stride);
 extern int vp8_cost_mv_ref(MB_PREDICTION_MODE m, const int near_mv_ref_ct[4]);
 
 
@@ -120,14 +119,14 @@ unsigned int vp8_get4x4sse_cs_c
     return distortion;
 }
 
-static int get_prediction_error(BLOCK *be, BLOCKD *b, const vp8_variance_rtcd_vtable_t *rtcd)
+static int get_prediction_error(BLOCK *be, BLOCKD *b)
 {
     unsigned char *sptr;
     unsigned char *dptr;
     sptr = (*(be->base_src) + be->src);
     dptr = b->predictor;
 
-    return VARIANCE_INVOKE(rtcd, get4x4sse_cs)(sptr, be->src_stride, dptr, 16);
+    return vp8_get4x4sse_cs(sptr, be->src_stride, dptr, 16);
 
 }
 
@@ -157,7 +156,7 @@ static int pick_intra4x4block(
         vp8_intra4x4_predict
                      (*(b->base_dst) + b->dst, b->dst_stride,
                       mode, b->predictor, 16);
-        distortion = get_prediction_error(be, b, &rtcd->variance);
+        distortion = get_prediction_error(be, b);
         this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
 
         if (this_rd < best_rd)
@@ -674,8 +673,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
             else
             {
                 rate2 += rate;
-                distortion2 = VARIANCE_INVOKE
-                                (&cpi->rtcd.variance, var16x16)(
+                distortion2 = vp8_variance16x16(
                                     *(b->base_src), b->src_stride,
                                     x->e_mbd.predictor, 16, &sse);
                 this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
@@ -700,7 +698,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
         case TM_PRED:
             vp8_build_intra_predictors_mby
                 (&x->e_mbd);
-            distortion2 = VARIANCE_INVOKE(&cpi->rtcd.variance, var16x16)
+            distortion2 = vp8_variance16x16
                                           (*(b->base_src), b->src_stride,
                                           x->e_mbd.predictor, 16, &sse);
             rate2 += x->mbmode_cost[x->e_mbd.frame_type][x->e_mbd.mode_info_context->mbmi.mode];
@@ -937,7 +935,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
                 // Check u and v to make sure skip is ok
                 int sse2 = 0;
 
-                sse2 = VP8_UVSSE(x, IF_RTCD(&cpi->rtcd.variance));
+                sse2 = VP8_UVSSE(x);
 
                 if (sse2 * 2 < x->encode_breakout)
                     x->skip = 1;
@@ -1071,7 +1069,7 @@ void vp8_pick_intra_mode(VP8_COMP *cpi, MACROBLOCK *x, int *rate_)
         x->e_mbd.mode_info_context->mbmi.mode = mode;
         vp8_build_intra_predictors_mby
             (&x->e_mbd);
-        distortion = VARIANCE_INVOKE(&cpi->rtcd.variance, var16x16)
+        distortion = vp8_variance16x16
             (*(b->base_src), b->src_stride, x->e_mbd.predictor, 16, &sse);
         rate = x->mbmode_cost[x->e_mbd.frame_type][mode];
         this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
index 2449ae5402347cd5ad4cc48f0b7487c873de48d5..09e45bea646ba4f97f5d3fa83ae0699c18acd72f 100644 (file)
@@ -21,7 +21,7 @@
 #include "vpx_ports/arm.h"
 #endif
 
-extern int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest, const vp8_variance_rtcd_vtable_t *rtcd);
+extern int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest);
 
 #if CONFIG_RUNTIME_CPU_DETECT
 #define IF_RTCD(x) (x)
@@ -64,8 +64,7 @@ void vp8_yv12_copy_partial_frame(YV12_BUFFER_CONFIG *src_ybc,
 }
 
 static int calc_partial_ssl_err(YV12_BUFFER_CONFIG *source,
-                                YV12_BUFFER_CONFIG *dest,
-                                const vp8_variance_rtcd_vtable_t *rtcd)
+                                YV12_BUFFER_CONFIG *dest)
 {
     int i, j;
     int Total = 0;
@@ -93,7 +92,7 @@ static int calc_partial_ssl_err(YV12_BUFFER_CONFIG *source,
         for (j = 0; j < source->y_width; j += 16)
         {
             unsigned int sse;
-            Total += VARIANCE_INVOKE(rtcd, mse16x16)(src + j, source->y_stride,
+            Total += vp8_mse16x16(src + j, source->y_stride,
                                                      dst + j, dest->y_stride,
                                                      &sse);
         }
@@ -183,8 +182,7 @@ void vp8cx_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
     vp8_yv12_copy_partial_frame_ptr(saved_frame, cm->frame_to_show);
     vp8_loop_filter_partial_frame(cm, &cpi->mb.e_mbd, filt_val);
 
-    best_err = calc_partial_ssl_err(sd, cm->frame_to_show,
-                                    IF_RTCD(&cpi->rtcd.variance));
+    best_err = calc_partial_ssl_err(sd, cm->frame_to_show);
 
     filt_val -= 1 + (filt_val > 10);
 
@@ -196,8 +194,7 @@ void vp8cx_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
         vp8_loop_filter_partial_frame(cm, &cpi->mb.e_mbd, filt_val);
 
         // Get the err for filtered frame
-        filt_err = calc_partial_ssl_err(sd, cm->frame_to_show,
-                                        IF_RTCD(&cpi->rtcd.variance));
+        filt_err = calc_partial_ssl_err(sd, cm->frame_to_show);
 
         // Update the best case record or exit loop.
         if (filt_err < best_err)
@@ -228,8 +225,7 @@ void vp8cx_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
             vp8_loop_filter_partial_frame(cm, &cpi->mb.e_mbd, filt_val);
 
             // Get the err for filtered frame
-            filt_err = calc_partial_ssl_err(sd, cm->frame_to_show,
-                                            IF_RTCD(&cpi->rtcd.variance));
+            filt_err = calc_partial_ssl_err(sd, cm->frame_to_show);
 
             // Update the best case record or exit loop.
             if (filt_err < best_err)
@@ -322,8 +318,7 @@ void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
     vp8cx_set_alt_lf_level(cpi, filt_mid);
     vp8_loop_filter_frame_yonly(cm, &cpi->mb.e_mbd, filt_mid);
 
-    best_err = vp8_calc_ss_err(sd, cm->frame_to_show,
-                               IF_RTCD(&cpi->rtcd.variance));
+    best_err = vp8_calc_ss_err(sd, cm->frame_to_show);
 
     ss_err[filt_mid] = best_err;
 
@@ -349,8 +344,7 @@ void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
                 vp8cx_set_alt_lf_level(cpi, filt_low);
                 vp8_loop_filter_frame_yonly(cm, &cpi->mb.e_mbd, filt_low);
 
-                filt_err = vp8_calc_ss_err(sd, cm->frame_to_show,
-                                           IF_RTCD(&cpi->rtcd.variance));
+                filt_err = vp8_calc_ss_err(sd, cm->frame_to_show);
                 ss_err[filt_low] = filt_err;
             }
             else
@@ -376,8 +370,7 @@ void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
                 vp8cx_set_alt_lf_level(cpi, filt_high);
                 vp8_loop_filter_frame_yonly(cm, &cpi->mb.e_mbd, filt_high);
 
-                filt_err = vp8_calc_ss_err(sd, cm->frame_to_show,
-                                           IF_RTCD(&cpi->rtcd.variance));
+                filt_err = vp8_calc_ss_err(sd, cm->frame_to_show);
                 ss_err[filt_high] = filt_err;
             }
             else
index 2784455350f7801e13a5b63f887a20ba7c8109b1..015409d48c668cfea91275d05731f1226152a961 100644 (file)
@@ -453,7 +453,7 @@ int vp8_mbuverror_c(MACROBLOCK *mb)
     return error;
 }
 
-int VP8_UVSSE(MACROBLOCK *x, const vp8_variance_rtcd_vtable_t *rtcd)
+int VP8_UVSSE(MACROBLOCK *x)
 {
     unsigned char *uptr, *vptr;
     unsigned char *upred_ptr = (*(x->block[16].base_src) + x->block[16].src);
@@ -486,17 +486,17 @@ int VP8_UVSSE(MACROBLOCK *x, const vp8_variance_rtcd_vtable_t *rtcd)
 
     if ((mv_row | mv_col) & 7)
     {
-        VARIANCE_INVOKE(rtcd, subpixvar8x8)(uptr, pre_stride,
+        vp8_sub_pixel_variance8x8(uptr, pre_stride,
             mv_col & 7, mv_row & 7, upred_ptr, uv_stride, &sse2);
-        VARIANCE_INVOKE(rtcd, subpixvar8x8)(vptr, pre_stride,
+        vp8_sub_pixel_variance8x8(vptr, pre_stride,
             mv_col & 7, mv_row & 7, vpred_ptr, uv_stride, &sse1);
         sse2 += sse1;
     }
     else
     {
-        VARIANCE_INVOKE(rtcd, var8x8)(uptr, pre_stride,
+        vp8_variance8x8(uptr, pre_stride,
             upred_ptr, uv_stride, &sse2);
-        VARIANCE_INVOKE(rtcd, var8x8)(vptr, pre_stride,
+        vp8_variance8x8(vptr, pre_stride,
             vpred_ptr, uv_stride, &sse1);
         sse2 += sse1;
     }
@@ -2137,7 +2137,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
                 if(threshold < x->encode_breakout)
                     threshold = x->encode_breakout;
 
-                var = VARIANCE_INVOKE(&cpi->rtcd.variance, var16x16)
+                var = vp8_variance16x16
                         (*(b->base_src), b->src_stride,
                         x->e_mbd.predictor, 16, &sse);
 
@@ -2150,7 +2150,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
                         (sse /2 > var && sse-var < 64))
                     {
                         // Check u and v to make sure skip is ok
-                        int sse2=  VP8_UVSSE(x, IF_RTCD(&cpi->rtcd.variance));
+                        int sse2=  VP8_UVSSE(x);
                         if (sse2 * 2 < threshold)
                         {
                             x->skip = 1;
index d0f8e490a4512c28799a4b31dad9df0cd58845da..e75160836dc94a9d84231ff2db5b35aed226acc8 100644 (file)
@@ -94,25 +94,22 @@ static double similarity
     return ssim_n * 1.0 / ssim_d;
 }
 
-static double ssim_16x16(unsigned char *s,int sp, unsigned char *r,int rp,
-            const vp8_variance_rtcd_vtable_t *rtcd)
+static double ssim_16x16(unsigned char *s,int sp, unsigned char *r,int rp)
 {
     unsigned long sum_s=0,sum_r=0,sum_sq_s=0,sum_sq_r=0,sum_sxr=0;
-    SSIMPF_INVOKE(rtcd,16x16)(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r, &sum_sxr);
+    vp8_ssim_parms_16x16(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r, &sum_sxr);
     return similarity(sum_s, sum_r, sum_sq_s, sum_sq_r, sum_sxr, 256);
 }
-static double ssim_8x8(unsigned char *s,int sp, unsigned char *r,int rp,
-                const vp8_variance_rtcd_vtable_t *rtcd)
+static double ssim_8x8(unsigned char *s,int sp, unsigned char *r,int rp)
 {
     unsigned long sum_s=0,sum_r=0,sum_sq_s=0,sum_sq_r=0,sum_sxr=0;
-    SSIMPF_INVOKE(rtcd,8x8)(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r, &sum_sxr);
+    vp8_ssim_parms_8x8(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r, &sum_sxr);
     return similarity(sum_s, sum_r, sum_sq_s, sum_sq_r, sum_sxr, 64);
 }
 
 // TODO: (jbb) tried to scale this function such that we may be able to use it
 // for distortion metric in mode selection code ( provided we do a reconstruction)
-long dssim(unsigned char *s,int sp, unsigned char *r,int rp,
-           const vp8_variance_rtcd_vtable_t *rtcd)
+long dssim(unsigned char *s,int sp, unsigned char *r,int rp)
 {
     unsigned long sum_s=0,sum_r=0,sum_sq_s=0,sum_sq_r=0,sum_sxr=0;
     int64_t ssim3;
@@ -125,7 +122,7 @@ long dssim(unsigned char *s,int sp, unsigned char *r,int rp,
     c1 = cc1*16;
     c2 = cc2*16;
 
-    SSIMPF_INVOKE(rtcd,16x16)(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r, &sum_sxr);
+    vp8_ssim_parms_16x16(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r, &sum_sxr);
     ssim_n1 = (2*sum_s*sum_r+ c1);
 
     ssim_n2 =((int64_t) 2*256*sum_sxr-(int64_t) 2*sum_s*sum_r+c2);
@@ -154,8 +151,7 @@ double vp8_ssim2
     int stride_img1,
     int stride_img2,
     int width,
-    int height,
-    const vp8_variance_rtcd_vtable_t *rtcd
+    int height
 )
 {
     int i,j;
@@ -167,7 +163,7 @@ double vp8_ssim2
     {
         for(j=0; j < width-8; j+=4 )
         {
-            double v = ssim_8x8(img1+j, stride_img1, img2+j, stride_img2, rtcd);
+            double v = ssim_8x8(img1+j, stride_img1, img2+j, stride_img2);
             ssim_total += v;
             samples++;
         }
@@ -180,8 +176,7 @@ double vp8_calc_ssim
     YV12_BUFFER_CONFIG *source,
     YV12_BUFFER_CONFIG *dest,
     int lumamask,
-    double *weight,
-    const vp8_variance_rtcd_vtable_t *rtcd
+    double *weight
 )
 {
     double a, b, c;
@@ -189,15 +184,15 @@ double vp8_calc_ssim
 
     a = vp8_ssim2(source->y_buffer, dest->y_buffer,
                  source->y_stride, dest->y_stride, source->y_width,
-                 source->y_height, rtcd);
+                 source->y_height);
 
     b = vp8_ssim2(source->u_buffer, dest->u_buffer,
                  source->uv_stride, dest->uv_stride, source->uv_width,
-                 source->uv_height, rtcd);
+                 source->uv_height);
 
     c = vp8_ssim2(source->v_buffer, dest->v_buffer,
                  source->uv_stride, dest->uv_stride, source->uv_width,
-                 source->uv_height, rtcd);
+                 source->uv_height);
 
     ssimv = a * .8 + .1 * (b + c);
 
@@ -212,8 +207,7 @@ double vp8_calc_ssimg
     YV12_BUFFER_CONFIG *dest,
     double *ssim_y,
     double *ssim_u,
-    double *ssim_v,
-    const vp8_variance_rtcd_vtable_t *rtcd
+    double *ssim_v
 )
 {
     double ssim_all = 0;
@@ -221,15 +215,15 @@ double vp8_calc_ssimg
 
     a = vp8_ssim2(source->y_buffer, dest->y_buffer,
                  source->y_stride, dest->y_stride, source->y_width,
-                 source->y_height, rtcd);
+                 source->y_height);
 
     b = vp8_ssim2(source->u_buffer, dest->u_buffer,
                  source->uv_stride, dest->uv_stride, source->uv_width,
-                 source->uv_height, rtcd);
+                 source->uv_height);
 
     c = vp8_ssim2(source->v_buffer, dest->v_buffer,
                  source->uv_stride, dest->uv_stride, source->uv_width,
-                 source->uv_height, rtcd);
+                 source->uv_height);
     *ssim_y = a;
     *ssim_u = b;
     *ssim_v = c;
index d9bf66975081a103426eed7825524065b4a377c0..e216a22222599c45cbb4ff666159683e68be37ab 100644 (file)
 #ifndef VARIANCE_H
 #define VARIANCE_H
 
-#define prototype_sad(sym)\
-    unsigned int (sym)\
-    (\
-     const unsigned char *src_ptr, \
-     int source_stride, \
-     const unsigned char *ref_ptr, \
-     int  ref_stride, \
-     int max_sad\
-    )
-
-#define prototype_sad_multi_same_address(sym)\
-    void (sym)\
-    (\
-     const unsigned char *src_ptr, \
-     int source_stride, \
-     const unsigned char *ref_ptr, \
-     int  ref_stride, \
-     unsigned int *sad_array\
-    )
-
-#define prototype_sad_multi_same_address_1(sym)\
-    void (sym)\
-    (\
-     const unsigned char *src_ptr, \
-     int source_stride, \
-     const unsigned char *ref_ptr, \
-     int  ref_stride, \
-     unsigned short *sad_array\
-    )
+typedef unsigned int(*vp8_sad_fn_t)
+    (
+    const unsigned char *src_ptr,
+    int source_stride,
+    const unsigned char *ref_ptr,
+    int ref_stride,
+    int max_sad
+    );
 
-#define prototype_sad_multi_dif_address(sym)\
-    void (sym)\
-    (\
-     const unsigned char *src_ptr, \
-     int source_stride, \
-     unsigned char *ref_ptr[4], \
-     int  ref_stride, \
-     unsigned int *sad_array\
-    )
+typedef void (*vp8_copy32xn_fn_t)(
+    const unsigned char *src_ptr,
+    int source_stride,
+    const unsigned char *ref_ptr,
+    int ref_stride,
+    int n);
+
+typedef void (*vp8_sad_multi_fn_t)(
+    const unsigned char *src_ptr,
+    int source_stride,
+    const unsigned char *ref_ptr,
+    int  ref_stride,
+    unsigned int *sad_array);
+
+typedef void (*vp8_sad_multi1_fn_t)
+    (
+     const unsigned char *src_ptr,
+     int source_stride,
+     const unsigned char *ref_ptr,
+     int  ref_stride,
+     unsigned short *sad_array
+    );
 
-#define prototype_variance(sym) \
-    unsigned int (sym) \
-    (\
-     const unsigned char *src_ptr, \
-     int source_stride, \
-     const unsigned char *ref_ptr, \
-     int  ref_stride, \
-     unsigned int *sse\
-    )
+typedef void (*vp8_sad_multi_d_fn_t)
+    (
+     const unsigned char *src_ptr,
+     int source_stride,
+     unsigned char *ref_ptr[4],
+     int  ref_stride,
+     unsigned int *sad_array
+    );
 
-#define prototype_variance2(sym) \
-    unsigned int (sym) \
-    (\
-     const unsigned char *src_ptr, \
-     int source_stride, \
-     const unsigned char *ref_ptr, \
-     int  ref_stride, \
-     unsigned int *sse,\
-     int *sum\
-    )
+typedef unsigned int (*vp8_variance_fn_t)
+    (
+     const unsigned char *src_ptr,
+     int source_stride,
+     const unsigned char *ref_ptr,
+     int  ref_stride,
+     unsigned int *sse
+    );
 
-#define prototype_subpixvariance(sym) \
-    unsigned int (sym) \
-    ( \
-      const unsigned char  *src_ptr, \
-      int  source_stride, \
-      int  xoffset, \
-      int  yoffset, \
-      const unsigned char *ref_ptr, \
-      int Refstride, \
-      unsigned int *sse \
+typedef unsigned int (*vp8_subpixvariance_fn_t)
+    (
+      const unsigned char  *src_ptr,
+      int  source_stride,
+      int  xoffset,
+      int  yoffset,
+      const unsigned char *ref_ptr,
+      int Refstride,
+      unsigned int *sse
     );
 
-#define prototype_ssimpf(sym) \
-    void (sym) \
-      ( \
-        unsigned char *s, \
-        int sp, \
-        unsigned char *r, \
-        int rp, \
-        unsigned long *sum_s, \
-        unsigned long *sum_r, \
-        unsigned long *sum_sq_s, \
-        unsigned long *sum_sq_r, \
-        unsigned long *sum_sxr \
+typedef void (*vp8_ssimpf_fn_t)
+      (
+        unsigned char *s,
+        int sp,
+        unsigned char *r,
+        int rp,
+        unsigned long *sum_s,
+        unsigned long *sum_r,
+        unsigned long *sum_sq_s,
+        unsigned long *sum_sq_r,
+        unsigned long *sum_sxr
       );
 
-#define prototype_getmbss(sym) unsigned int (sym)(const short *)
-
-#define prototype_get16x16prederror(sym)\
-    unsigned int (sym)\
-    (\
-     const unsigned char *src_ptr, \
-     int source_stride, \
-     const unsigned char *ref_ptr, \
-     int  ref_stride \
-    )
-
-#if ARCH_X86 || ARCH_X86_64
-#include "x86/variance_x86.h"
-#endif
-
-#if ARCH_ARM
-#include "arm/variance_arm.h"
-#endif
-
-#ifndef vp8_variance_sad4x4
-#define vp8_variance_sad4x4 vp8_sad4x4_c
-#endif
-extern prototype_sad(vp8_variance_sad4x4);
-
-#ifndef vp8_variance_sad8x8
-#define vp8_variance_sad8x8 vp8_sad8x8_c
-#endif
-extern prototype_sad(vp8_variance_sad8x8);
-
-#ifndef vp8_variance_sad8x16
-#define vp8_variance_sad8x16 vp8_sad8x16_c
-#endif
-extern prototype_sad(vp8_variance_sad8x16);
-
-#ifndef vp8_variance_sad16x8
-#define vp8_variance_sad16x8 vp8_sad16x8_c
-#endif
-extern prototype_sad(vp8_variance_sad16x8);
-
-#ifndef vp8_variance_sad16x16
-#define vp8_variance_sad16x16 vp8_sad16x16_c
-#endif
-extern prototype_sad(vp8_variance_sad16x16);
-
-//-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
-
-#ifndef vp8_variance_sad16x16x3
-#define vp8_variance_sad16x16x3 vp8_sad16x16x3_c
-#endif
-extern prototype_sad_multi_same_address(vp8_variance_sad16x16x3);
-
-#ifndef vp8_variance_sad16x8x3
-#define vp8_variance_sad16x8x3 vp8_sad16x8x3_c
-#endif
-extern prototype_sad_multi_same_address(vp8_variance_sad16x8x3);
-
-#ifndef vp8_variance_sad8x8x3
-#define vp8_variance_sad8x8x3 vp8_sad8x8x3_c
-#endif
-extern prototype_sad_multi_same_address(vp8_variance_sad8x8x3);
-
-#ifndef vp8_variance_sad8x16x3
-#define vp8_variance_sad8x16x3 vp8_sad8x16x3_c
-#endif
-extern prototype_sad_multi_same_address(vp8_variance_sad8x16x3);
-
-#ifndef vp8_variance_sad4x4x3
-#define vp8_variance_sad4x4x3 vp8_sad4x4x3_c
-#endif
-extern prototype_sad_multi_same_address(vp8_variance_sad4x4x3);
-
-#ifndef vp8_variance_sad16x16x8
-#define vp8_variance_sad16x16x8 vp8_sad16x16x8_c
-#endif
-extern prototype_sad_multi_same_address_1(vp8_variance_sad16x16x8);
-
-#ifndef vp8_variance_sad16x8x8
-#define vp8_variance_sad16x8x8 vp8_sad16x8x8_c
-#endif
-extern prototype_sad_multi_same_address_1(vp8_variance_sad16x8x8);
-
-#ifndef vp8_variance_sad8x8x8
-#define vp8_variance_sad8x8x8 vp8_sad8x8x8_c
-#endif
-extern prototype_sad_multi_same_address_1(vp8_variance_sad8x8x8);
-
-#ifndef vp8_variance_sad8x16x8
-#define vp8_variance_sad8x16x8 vp8_sad8x16x8_c
-#endif
-extern prototype_sad_multi_same_address_1(vp8_variance_sad8x16x8);
-
-#ifndef vp8_variance_sad4x4x8
-#define vp8_variance_sad4x4x8 vp8_sad4x4x8_c
-#endif
-extern prototype_sad_multi_same_address_1(vp8_variance_sad4x4x8);
-
-//-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
-
-#ifndef vp8_variance_sad16x16x4d
-#define vp8_variance_sad16x16x4d vp8_sad16x16x4d_c
-#endif
-extern prototype_sad_multi_dif_address(vp8_variance_sad16x16x4d);
-
-#ifndef vp8_variance_sad16x8x4d
-#define vp8_variance_sad16x8x4d vp8_sad16x8x4d_c
-#endif
-extern prototype_sad_multi_dif_address(vp8_variance_sad16x8x4d);
-
-#ifndef vp8_variance_sad8x8x4d
-#define vp8_variance_sad8x8x4d vp8_sad8x8x4d_c
-#endif
-extern prototype_sad_multi_dif_address(vp8_variance_sad8x8x4d);
-
-#ifndef vp8_variance_sad8x16x4d
-#define vp8_variance_sad8x16x4d vp8_sad8x16x4d_c
-#endif
-extern prototype_sad_multi_dif_address(vp8_variance_sad8x16x4d);
-
-#ifndef vp8_variance_sad4x4x4d
-#define vp8_variance_sad4x4x4d vp8_sad4x4x4d_c
-#endif
-extern prototype_sad_multi_dif_address(vp8_variance_sad4x4x4d);
-
-#if ARCH_X86 || ARCH_X86_64
-#ifndef vp8_variance_copy32xn
-#define vp8_variance_copy32xn vp8_copy32xn_c
-#endif
-extern prototype_sad(vp8_variance_copy32xn);
-#endif
-
-//-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+typedef unsigned int (*vp8_getmbss_fn_t)(const short *);
 
-#ifndef vp8_variance_var4x4
-#define vp8_variance_var4x4 vp8_variance4x4_c
-#endif
-extern prototype_variance(vp8_variance_var4x4);
-
-#ifndef vp8_variance_var8x8
-#define vp8_variance_var8x8 vp8_variance8x8_c
-#endif
-extern prototype_variance(vp8_variance_var8x8);
-
-#ifndef vp8_variance_var8x16
-#define vp8_variance_var8x16 vp8_variance8x16_c
-#endif
-extern prototype_variance(vp8_variance_var8x16);
-
-#ifndef vp8_variance_var16x8
-#define vp8_variance_var16x8 vp8_variance16x8_c
-#endif
-extern prototype_variance(vp8_variance_var16x8);
-
-#ifndef vp8_variance_var16x16
-#define vp8_variance_var16x16 vp8_variance16x16_c
-#endif
-extern prototype_variance(vp8_variance_var16x16);
-
-//-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
-
-#ifndef vp8_variance_subpixvar4x4
-#define vp8_variance_subpixvar4x4 vp8_sub_pixel_variance4x4_c
-#endif
-extern prototype_subpixvariance(vp8_variance_subpixvar4x4);
-
-#ifndef vp8_variance_subpixvar8x8
-#define vp8_variance_subpixvar8x8 vp8_sub_pixel_variance8x8_c
-#endif
-extern prototype_subpixvariance(vp8_variance_subpixvar8x8);
-
-#ifndef vp8_variance_subpixvar8x16
-#define vp8_variance_subpixvar8x16 vp8_sub_pixel_variance8x16_c
-#endif
-extern prototype_subpixvariance(vp8_variance_subpixvar8x16);
-
-#ifndef vp8_variance_subpixvar16x8
-#define vp8_variance_subpixvar16x8 vp8_sub_pixel_variance16x8_c
-#endif
-extern prototype_subpixvariance(vp8_variance_subpixvar16x8);
-
-#ifndef vp8_variance_subpixvar16x16
-#define vp8_variance_subpixvar16x16 vp8_sub_pixel_variance16x16_c
-#endif
-extern prototype_subpixvariance(vp8_variance_subpixvar16x16);
-
-#ifndef vp8_variance_halfpixvar16x16_h
-#define vp8_variance_halfpixvar16x16_h vp8_variance_halfpixvar16x16_h_c
-#endif
-extern prototype_variance(vp8_variance_halfpixvar16x16_h);
-
-#ifndef vp8_variance_halfpixvar16x16_v
-#define vp8_variance_halfpixvar16x16_v vp8_variance_halfpixvar16x16_v_c
-#endif
-extern prototype_variance(vp8_variance_halfpixvar16x16_v);
-
-#ifndef vp8_variance_halfpixvar16x16_hv
-#define vp8_variance_halfpixvar16x16_hv vp8_variance_halfpixvar16x16_hv_c
-#endif
-extern prototype_variance(vp8_variance_halfpixvar16x16_hv);
-
-#ifndef vp8_variance_subpixmse16x16
-#define vp8_variance_subpixmse16x16 vp8_sub_pixel_mse16x16_c
-#endif
-extern prototype_subpixvariance(vp8_variance_subpixmse16x16);
-
-//-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
-
-#ifndef vp8_variance_getmbss
-#define vp8_variance_getmbss vp8_get_mb_ss_c
-#endif
-extern prototype_getmbss(vp8_variance_getmbss);
-
-#ifndef vp8_variance_mse16x16
-#define vp8_variance_mse16x16 vp8_mse16x16_c
-#endif
-extern prototype_variance(vp8_variance_mse16x16);
-
-#ifndef vp8_variance_get4x4sse_cs
-#define vp8_variance_get4x4sse_cs vp8_get4x4sse_cs_c
-#endif
-extern prototype_get16x16prederror(vp8_variance_get4x4sse_cs);
-
-#ifndef vp8_ssimpf_8x8
-#define vp8_ssimpf_8x8 vp8_ssim_parms_8x8_c
-#endif
-extern prototype_ssimpf(vp8_ssimpf_8x8)
-
-#ifndef vp8_ssimpf_16x16
-#define vp8_ssimpf_16x16 vp8_ssim_parms_16x16_c
-#endif
-extern prototype_ssimpf(vp8_ssimpf_16x16)
-
-typedef prototype_sad(*vp8_sad_fn_t);
-typedef prototype_sad_multi_same_address(*vp8_sad_multi_fn_t);
-typedef prototype_sad_multi_same_address_1(*vp8_sad_multi1_fn_t);
-typedef prototype_sad_multi_dif_address(*vp8_sad_multi_d_fn_t);
-typedef prototype_variance(*vp8_variance_fn_t);
-typedef prototype_variance2(*vp8_variance2_fn_t);
-typedef prototype_subpixvariance(*vp8_subpixvariance_fn_t);
-typedef prototype_getmbss(*vp8_getmbss_fn_t);
-typedef prototype_ssimpf(*vp8_ssimpf_fn_t);
-typedef prototype_get16x16prederror(*vp8_get16x16prederror_fn_t);
-
-typedef struct
-{
-    vp8_sad_fn_t             sad4x4;
-    vp8_sad_fn_t             sad8x8;
-    vp8_sad_fn_t             sad8x16;
-    vp8_sad_fn_t             sad16x8;
-    vp8_sad_fn_t             sad16x16;
-
-    vp8_variance_fn_t        var4x4;
-    vp8_variance_fn_t        var8x8;
-    vp8_variance_fn_t        var8x16;
-    vp8_variance_fn_t        var16x8;
-    vp8_variance_fn_t        var16x16;
-
-    vp8_subpixvariance_fn_t  subpixvar4x4;
-    vp8_subpixvariance_fn_t  subpixvar8x8;
-    vp8_subpixvariance_fn_t  subpixvar8x16;
-    vp8_subpixvariance_fn_t  subpixvar16x8;
-    vp8_subpixvariance_fn_t  subpixvar16x16;
-    vp8_variance_fn_t        halfpixvar16x16_h;
-    vp8_variance_fn_t        halfpixvar16x16_v;
-    vp8_variance_fn_t        halfpixvar16x16_hv;
-    vp8_subpixvariance_fn_t  subpixmse16x16;
-
-    vp8_getmbss_fn_t         getmbss;
-    vp8_variance_fn_t        mse16x16;
-
-    vp8_get16x16prederror_fn_t get4x4sse_cs;
-
-    vp8_sad_multi_fn_t       sad16x16x3;
-    vp8_sad_multi_fn_t       sad16x8x3;
-    vp8_sad_multi_fn_t       sad8x16x3;
-    vp8_sad_multi_fn_t       sad8x8x3;
-    vp8_sad_multi_fn_t       sad4x4x3;
-
-    vp8_sad_multi1_fn_t      sad16x16x8;
-    vp8_sad_multi1_fn_t      sad16x8x8;
-    vp8_sad_multi1_fn_t      sad8x16x8;
-    vp8_sad_multi1_fn_t      sad8x8x8;
-    vp8_sad_multi1_fn_t      sad4x4x8;
-
-    vp8_sad_multi_d_fn_t     sad16x16x4d;
-    vp8_sad_multi_d_fn_t     sad16x8x4d;
-    vp8_sad_multi_d_fn_t     sad8x16x4d;
-    vp8_sad_multi_d_fn_t     sad8x8x4d;
-    vp8_sad_multi_d_fn_t     sad4x4x4d;
-
-#if ARCH_X86 || ARCH_X86_64
-    vp8_sad_fn_t             copy32xn;
-#endif
-
-#if CONFIG_INTERNAL_STATS
-    vp8_ssimpf_fn_t          ssimpf_8x8;
-    vp8_ssimpf_fn_t          ssimpf_16x16;
-#endif
-
-} vp8_variance_rtcd_vtable_t;
+typedef unsigned int (*vp8_get16x16prederror_fn_t)
+    (
+     const unsigned char *src_ptr,
+     int source_stride,
+     const unsigned char *ref_ptr,
+     int  ref_stride
+    );
 
 typedef struct
 {
@@ -411,16 +108,8 @@ typedef struct
     vp8_sad_multi1_fn_t     sdx8f;
     vp8_sad_multi_d_fn_t    sdx4df;
 #if ARCH_X86 || ARCH_X86_64
-    vp8_sad_fn_t            copymem;
+    vp8_copy32xn_fn_t       copymem;
 #endif
 } vp8_variance_fn_ptr_t;
 
-#if CONFIG_RUNTIME_CPU_DETECT
-#define VARIANCE_INVOKE(ctx,fn) (ctx)->fn
-#define SSIMPF_INVOKE(ctx,fn) (ctx)->ssimpf_##fn
-#else
-#define VARIANCE_INVOKE(ctx,fn) vp8_variance_##fn
-#define SSIMPF_INVOKE(ctx,fn) vp8_ssimpf_##fn
-#endif
-
 #endif
diff --git a/vp8/encoder/x86/variance_x86.h b/vp8/encoder/x86/variance_x86.h
deleted file mode 100644 (file)
index 4b41b54..0000000
+++ /dev/null
@@ -1,332 +0,0 @@
-/*
- *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-
-#ifndef VARIANCE_X86_H
-#define VARIANCE_X86_H
-
-
-/* Note:
- *
- * This platform is commonly built for runtime CPU detection. If you modify
- * any of the function mappings present in this file, be sure to also update
- * them in the function pointer initialization code
- */
-#if HAVE_MMX
-extern prototype_sad(vp8_sad4x4_mmx);
-extern prototype_sad(vp8_sad8x8_mmx);
-extern prototype_sad(vp8_sad8x16_mmx);
-extern prototype_sad(vp8_sad16x8_mmx);
-extern prototype_sad(vp8_sad16x16_mmx);
-extern prototype_variance(vp8_variance4x4_mmx);
-extern prototype_variance(vp8_variance8x8_mmx);
-extern prototype_variance(vp8_variance8x16_mmx);
-extern prototype_variance(vp8_variance16x8_mmx);
-extern prototype_variance(vp8_variance16x16_mmx);
-extern prototype_subpixvariance(vp8_sub_pixel_variance4x4_mmx);
-extern prototype_subpixvariance(vp8_sub_pixel_variance8x8_mmx);
-extern prototype_subpixvariance(vp8_sub_pixel_variance8x16_mmx);
-extern prototype_subpixvariance(vp8_sub_pixel_variance16x8_mmx);
-extern prototype_subpixvariance(vp8_sub_pixel_variance16x16_mmx);
-extern prototype_variance(vp8_variance_halfpixvar16x16_h_mmx);
-extern prototype_variance(vp8_variance_halfpixvar16x16_v_mmx);
-extern prototype_variance(vp8_variance_halfpixvar16x16_hv_mmx);
-extern prototype_subpixvariance(vp8_sub_pixel_mse16x16_mmx);
-extern prototype_getmbss(vp8_get_mb_ss_mmx);
-extern prototype_variance(vp8_mse16x16_mmx);
-extern prototype_variance2(vp8_get8x8var_mmx);
-extern prototype_get16x16prederror(vp8_get4x4sse_cs_mmx);
-
-#if !CONFIG_RUNTIME_CPU_DETECT
-#undef  vp8_variance_sad4x4
-#define vp8_variance_sad4x4 vp8_sad4x4_mmx
-
-#undef  vp8_variance_sad8x8
-#define vp8_variance_sad8x8 vp8_sad8x8_mmx
-
-#undef  vp8_variance_sad8x16
-#define vp8_variance_sad8x16 vp8_sad8x16_mmx
-
-#undef  vp8_variance_sad16x8
-#define vp8_variance_sad16x8 vp8_sad16x8_mmx
-
-#undef  vp8_variance_sad16x16
-#define vp8_variance_sad16x16 vp8_sad16x16_mmx
-
-#undef  vp8_variance_var4x4
-#define vp8_variance_var4x4 vp8_variance4x4_mmx
-
-#undef  vp8_variance_var8x8
-#define vp8_variance_var8x8 vp8_variance8x8_mmx
-
-#undef  vp8_variance_var8x16
-#define vp8_variance_var8x16 vp8_variance8x16_mmx
-
-#undef  vp8_variance_var16x8
-#define vp8_variance_var16x8 vp8_variance16x8_mmx
-
-#undef  vp8_variance_var16x16
-#define vp8_variance_var16x16 vp8_variance16x16_mmx
-
-#undef  vp8_variance_subpixvar4x4
-#define vp8_variance_subpixvar4x4 vp8_sub_pixel_variance4x4_mmx
-
-#undef  vp8_variance_subpixvar8x8
-#define vp8_variance_subpixvar8x8 vp8_sub_pixel_variance8x8_mmx
-
-#undef  vp8_variance_subpixvar8x16
-#define vp8_variance_subpixvar8x16 vp8_sub_pixel_variance8x16_mmx
-
-#undef  vp8_variance_subpixvar16x8
-#define vp8_variance_subpixvar16x8 vp8_sub_pixel_variance16x8_mmx
-
-#undef  vp8_variance_subpixvar16x16
-#define vp8_variance_subpixvar16x16 vp8_sub_pixel_variance16x16_mmx
-
-#undef  vp8_variance_halfpixvar16x16_h
-#define vp8_variance_halfpixvar16x16_h vp8_variance_halfpixvar16x16_h_mmx
-
-#undef  vp8_variance_halfpixvar16x16_v
-#define vp8_variance_halfpixvar16x16_v vp8_variance_halfpixvar16x16_v_mmx
-
-#undef  vp8_variance_halfpixvar16x16_hv
-#define vp8_variance_halfpixvar16x16_hv vp8_variance_halfpixvar16x16_hv_mmx
-
-#undef  vp8_variance_subpixmse16x16
-#define vp8_variance_subpixmse16x16 vp8_sub_pixel_mse16x16_mmx
-
-#undef  vp8_variance_getmbss
-#define vp8_variance_getmbss vp8_get_mb_ss_mmx
-
-#undef  vp8_variance_mse16x16
-#define vp8_variance_mse16x16 vp8_mse16x16_mmx
-
-#undef  vp8_variance_get4x4sse_cs
-#define vp8_variance_get4x4sse_cs vp8_get4x4sse_cs_mmx
-
-#endif
-#endif
-
-
-#if HAVE_SSE2
-extern prototype_sad(vp8_sad4x4_wmt);
-extern prototype_sad(vp8_sad8x8_wmt);
-extern prototype_sad(vp8_sad8x16_wmt);
-extern prototype_sad(vp8_sad16x8_wmt);
-extern prototype_sad(vp8_sad16x16_wmt);
-extern prototype_sad(vp8_copy32xn_sse2);
-extern prototype_variance(vp8_variance4x4_wmt);
-extern prototype_variance(vp8_variance8x8_wmt);
-extern prototype_variance(vp8_variance8x16_wmt);
-extern prototype_variance(vp8_variance16x8_wmt);
-extern prototype_variance(vp8_variance16x16_wmt);
-extern prototype_subpixvariance(vp8_sub_pixel_variance4x4_wmt);
-extern prototype_subpixvariance(vp8_sub_pixel_variance8x8_wmt);
-extern prototype_subpixvariance(vp8_sub_pixel_variance8x16_wmt);
-extern prototype_subpixvariance(vp8_sub_pixel_variance16x8_wmt);
-extern prototype_subpixvariance(vp8_sub_pixel_variance16x16_wmt);
-extern prototype_variance(vp8_variance_halfpixvar16x16_h_wmt);
-extern prototype_variance(vp8_variance_halfpixvar16x16_v_wmt);
-extern prototype_variance(vp8_variance_halfpixvar16x16_hv_wmt);
-extern prototype_subpixvariance(vp8_sub_pixel_mse16x16_wmt);
-extern prototype_getmbss(vp8_get_mb_ss_sse2);
-extern prototype_variance(vp8_mse16x16_wmt);
-extern prototype_variance2(vp8_get8x8var_sse2);
-extern prototype_variance2(vp8_get16x16var_sse2);
-extern prototype_ssimpf(vp8_ssim_parms_8x8_sse2)
-extern prototype_ssimpf(vp8_ssim_parms_16x16_sse2)
-
-#if !CONFIG_RUNTIME_CPU_DETECT
-#undef  vp8_variance_sad4x4
-#define vp8_variance_sad4x4 vp8_sad4x4_wmt
-
-#undef  vp8_variance_sad8x8
-#define vp8_variance_sad8x8 vp8_sad8x8_wmt
-
-#undef  vp8_variance_sad8x16
-#define vp8_variance_sad8x16 vp8_sad8x16_wmt
-
-#undef  vp8_variance_sad16x8
-#define vp8_variance_sad16x8 vp8_sad16x8_wmt
-
-#undef  vp8_variance_sad16x16
-#define vp8_variance_sad16x16 vp8_sad16x16_wmt
-
-#undef  vp8_variance_copy32xn
-#define vp8_variance_copy32xn vp8_copy32xn_sse2
-
-#undef  vp8_variance_var4x4
-#define vp8_variance_var4x4 vp8_variance4x4_wmt
-
-#undef  vp8_variance_var8x8
-#define vp8_variance_var8x8 vp8_variance8x8_wmt
-
-#undef  vp8_variance_var8x16
-#define vp8_variance_var8x16 vp8_variance8x16_wmt
-
-#undef  vp8_variance_var16x8
-#define vp8_variance_var16x8 vp8_variance16x8_wmt
-
-#undef  vp8_variance_var16x16
-#define vp8_variance_var16x16 vp8_variance16x16_wmt
-
-#undef  vp8_variance_subpixvar4x4
-#define vp8_variance_subpixvar4x4 vp8_sub_pixel_variance4x4_wmt
-
-#undef  vp8_variance_subpixvar8x8
-#define vp8_variance_subpixvar8x8 vp8_sub_pixel_variance8x8_wmt
-
-#undef  vp8_variance_subpixvar8x16
-#define vp8_variance_subpixvar8x16 vp8_sub_pixel_variance8x16_wmt
-
-#undef  vp8_variance_subpixvar16x8
-#define vp8_variance_subpixvar16x8 vp8_sub_pixel_variance16x8_wmt
-
-#undef  vp8_variance_subpixvar16x16
-#define vp8_variance_subpixvar16x16 vp8_sub_pixel_variance16x16_wmt
-
-#undef  vp8_variance_halfpixvar16x16_h
-#define vp8_variance_halfpixvar16x16_h vp8_variance_halfpixvar16x16_h_wmt
-
-#undef  vp8_variance_halfpixvar16x16_v
-#define vp8_variance_halfpixvar16x16_v vp8_variance_halfpixvar16x16_v_wmt
-
-#undef  vp8_variance_halfpixvar16x16_hv
-#define vp8_variance_halfpixvar16x16_hv vp8_variance_halfpixvar16x16_hv_wmt
-
-#undef  vp8_variance_subpixmse16x16
-#define vp8_variance_subpixmse16x16 vp8_sub_pixel_mse16x16_wmt
-
-#undef  vp8_variance_getmbss
-#define vp8_variance_getmbss vp8_get_mb_ss_sse2
-
-#undef  vp8_variance_mse16x16
-#define vp8_variance_mse16x16 vp8_mse16x16_wmt
-
-#if ARCH_X86_64
-#undef  vp8_ssimpf_8x8
-#define vp8_ssimpf_8x8 vp8_ssim_parms_8x8_sse2
-
-#undef  vp8_ssimpf_16x16
-#define vp8_ssimpf_16x16 vp8_ssim_parms_16x16_sse2
-#endif
-
-#endif
-#endif
-
-
-#if HAVE_SSE3
-extern prototype_sad(vp8_sad16x16_sse3);
-extern prototype_sad(vp8_sad16x8_sse3);
-extern prototype_sad_multi_same_address(vp8_sad16x16x3_sse3);
-extern prototype_sad_multi_same_address(vp8_sad16x8x3_sse3);
-extern prototype_sad_multi_same_address(vp8_sad8x16x3_sse3);
-extern prototype_sad_multi_same_address(vp8_sad8x8x3_sse3);
-extern prototype_sad_multi_same_address(vp8_sad4x4x3_sse3);
-
-extern prototype_sad_multi_dif_address(vp8_sad16x16x4d_sse3);
-extern prototype_sad_multi_dif_address(vp8_sad16x8x4d_sse3);
-extern prototype_sad_multi_dif_address(vp8_sad8x16x4d_sse3);
-extern prototype_sad_multi_dif_address(vp8_sad8x8x4d_sse3);
-extern prototype_sad_multi_dif_address(vp8_sad4x4x4d_sse3);
-extern prototype_sad(vp8_copy32xn_sse3);
-
-#if !CONFIG_RUNTIME_CPU_DETECT
-
-#undef  vp8_variance_sad16x16
-#define vp8_variance_sad16x16 vp8_sad16x16_sse3
-
-#undef  vp8_variance_sad16x16x3
-#define vp8_variance_sad16x16x3 vp8_sad16x16x3_sse3
-
-#undef  vp8_variance_sad16x8x3
-#define vp8_variance_sad16x8x3 vp8_sad16x8x3_sse3
-
-#undef  vp8_variance_sad8x16x3
-#define vp8_variance_sad8x16x3 vp8_sad8x16x3_sse3
-
-#undef  vp8_variance_sad8x8x3
-#define vp8_variance_sad8x8x3 vp8_sad8x8x3_sse3
-
-#undef  vp8_variance_sad4x4x3
-#define vp8_variance_sad4x4x3 vp8_sad4x4x3_sse3
-
-#undef  vp8_variance_sad16x16x4d
-#define vp8_variance_sad16x16x4d vp8_sad16x16x4d_sse3
-
-#undef  vp8_variance_sad16x8x4d
-#define vp8_variance_sad16x8x4d vp8_sad16x8x4d_sse3
-
-#undef  vp8_variance_sad8x16x4d
-#define vp8_variance_sad8x16x4d vp8_sad8x16x4d_sse3
-
-#undef  vp8_variance_sad8x8x4d
-#define vp8_variance_sad8x8x4d vp8_sad8x8x4d_sse3
-
-#undef  vp8_variance_sad4x4x4d
-#define vp8_variance_sad4x4x4d vp8_sad4x4x4d_sse3
-
-#undef  vp8_variance_copy32xn
-#define vp8_variance_copy32xn vp8_copy32xn_sse3
-
-#endif
-#endif
-
-
-#if HAVE_SSSE3
-extern prototype_sad_multi_same_address(vp8_sad16x16x3_ssse3);
-extern prototype_sad_multi_same_address(vp8_sad16x8x3_ssse3);
-extern prototype_subpixvariance(vp8_sub_pixel_variance16x8_ssse3);
-extern prototype_subpixvariance(vp8_sub_pixel_variance16x16_ssse3);
-
-#if !CONFIG_RUNTIME_CPU_DETECT
-#undef  vp8_variance_sad16x16x3
-#define vp8_variance_sad16x16x3 vp8_sad16x16x3_ssse3
-
-#undef  vp8_variance_sad16x8x3
-#define vp8_variance_sad16x8x3 vp8_sad16x8x3_ssse3
-
-#undef  vp8_variance_subpixvar16x8
-#define vp8_variance_subpixvar16x8 vp8_sub_pixel_variance16x8_ssse3
-
-#undef  vp8_variance_subpixvar16x16
-#define vp8_variance_subpixvar16x16 vp8_sub_pixel_variance16x16_ssse3
-
-#endif
-#endif
-
-
-#if HAVE_SSE4_1
-extern prototype_sad_multi_same_address_1(vp8_sad16x16x8_sse4);
-extern prototype_sad_multi_same_address_1(vp8_sad16x8x8_sse4);
-extern prototype_sad_multi_same_address_1(vp8_sad8x16x8_sse4);
-extern prototype_sad_multi_same_address_1(vp8_sad8x8x8_sse4);
-extern prototype_sad_multi_same_address_1(vp8_sad4x4x8_sse4);
-
-#if !CONFIG_RUNTIME_CPU_DETECT
-#undef  vp8_variance_sad16x16x8
-#define vp8_variance_sad16x16x8 vp8_sad16x16x8_sse4
-
-#undef  vp8_variance_sad16x8x8
-#define vp8_variance_sad16x8x8 vp8_sad16x8x8_sse4
-
-#undef  vp8_variance_sad8x16x8
-#define vp8_variance_sad8x16x8 vp8_sad8x16x8_sse4
-
-#undef  vp8_variance_sad8x8x8
-#define vp8_variance_sad8x8x8 vp8_sad8x8x8_sse4
-
-#undef  vp8_variance_sad4x4x8
-#define vp8_variance_sad4x4x8 vp8_sad4x4x8_sse4
-
-#endif
-#endif
-
-#endif
index 7f5208461809be295b3bd46f8ba2f3f190a7a60e..8fb79469626a58c8f9303af9b2f98311b16a441b 100644 (file)
@@ -127,33 +127,6 @@ void vp8_arch_x86_encoder_init(VP8_COMP *cpi)
 #if HAVE_MMX
     if (flags & HAS_MMX)
     {
-        cpi->rtcd.variance.sad16x16              = vp8_sad16x16_mmx;
-        cpi->rtcd.variance.sad16x8               = vp8_sad16x8_mmx;
-        cpi->rtcd.variance.sad8x16               = vp8_sad8x16_mmx;
-        cpi->rtcd.variance.sad8x8                = vp8_sad8x8_mmx;
-        cpi->rtcd.variance.sad4x4                = vp8_sad4x4_mmx;
-
-        cpi->rtcd.variance.var4x4                = vp8_variance4x4_mmx;
-        cpi->rtcd.variance.var8x8                = vp8_variance8x8_mmx;
-        cpi->rtcd.variance.var8x16               = vp8_variance8x16_mmx;
-        cpi->rtcd.variance.var16x8               = vp8_variance16x8_mmx;
-        cpi->rtcd.variance.var16x16              = vp8_variance16x16_mmx;
-
-        cpi->rtcd.variance.subpixvar4x4          = vp8_sub_pixel_variance4x4_mmx;
-        cpi->rtcd.variance.subpixvar8x8          = vp8_sub_pixel_variance8x8_mmx;
-        cpi->rtcd.variance.subpixvar8x16         = vp8_sub_pixel_variance8x16_mmx;
-        cpi->rtcd.variance.subpixvar16x8         = vp8_sub_pixel_variance16x8_mmx;
-        cpi->rtcd.variance.subpixvar16x16        = vp8_sub_pixel_variance16x16_mmx;
-        cpi->rtcd.variance.halfpixvar16x16_h     = vp8_variance_halfpixvar16x16_h_mmx;
-        cpi->rtcd.variance.halfpixvar16x16_v     = vp8_variance_halfpixvar16x16_v_mmx;
-        cpi->rtcd.variance.halfpixvar16x16_hv    = vp8_variance_halfpixvar16x16_hv_mmx;
-        cpi->rtcd.variance.subpixmse16x16        = vp8_sub_pixel_mse16x16_mmx;
-
-        cpi->rtcd.variance.mse16x16              = vp8_mse16x16_mmx;
-        cpi->rtcd.variance.getmbss               = vp8_get_mb_ss_mmx;
-
-        cpi->rtcd.variance.get4x4sse_cs          = vp8_get4x4sse_cs_mmx;
-
         cpi->rtcd.fdct.short4x4                  = vp8_short_fdct4x4_mmx;
         cpi->rtcd.fdct.short8x4                  = vp8_short_fdct8x4_mmx;
         cpi->rtcd.fdct.fast4x4                   = vp8_short_fdct4x4_mmx;
@@ -175,34 +148,6 @@ void vp8_arch_x86_encoder_init(VP8_COMP *cpi)
 #if HAVE_SSE2
     if (flags & HAS_SSE2)
     {
-        cpi->rtcd.variance.sad16x16              = vp8_sad16x16_wmt;
-        cpi->rtcd.variance.sad16x8               = vp8_sad16x8_wmt;
-        cpi->rtcd.variance.sad8x16               = vp8_sad8x16_wmt;
-        cpi->rtcd.variance.sad8x8                = vp8_sad8x8_wmt;
-        cpi->rtcd.variance.sad4x4                = vp8_sad4x4_wmt;
-        cpi->rtcd.variance.copy32xn              = vp8_copy32xn_sse2;
-
-        cpi->rtcd.variance.var4x4                = vp8_variance4x4_wmt;
-        cpi->rtcd.variance.var8x8                = vp8_variance8x8_wmt;
-        cpi->rtcd.variance.var8x16               = vp8_variance8x16_wmt;
-        cpi->rtcd.variance.var16x8               = vp8_variance16x8_wmt;
-        cpi->rtcd.variance.var16x16              = vp8_variance16x16_wmt;
-
-        cpi->rtcd.variance.subpixvar4x4          = vp8_sub_pixel_variance4x4_wmt;
-        cpi->rtcd.variance.subpixvar8x8          = vp8_sub_pixel_variance8x8_wmt;
-        cpi->rtcd.variance.subpixvar8x16         = vp8_sub_pixel_variance8x16_wmt;
-        cpi->rtcd.variance.subpixvar16x8         = vp8_sub_pixel_variance16x8_wmt;
-        cpi->rtcd.variance.subpixvar16x16        = vp8_sub_pixel_variance16x16_wmt;
-        cpi->rtcd.variance.halfpixvar16x16_h     = vp8_variance_halfpixvar16x16_h_wmt;
-        cpi->rtcd.variance.halfpixvar16x16_v     = vp8_variance_halfpixvar16x16_v_wmt;
-        cpi->rtcd.variance.halfpixvar16x16_hv    = vp8_variance_halfpixvar16x16_hv_wmt;
-        cpi->rtcd.variance.subpixmse16x16        = vp8_sub_pixel_mse16x16_wmt;
-
-        cpi->rtcd.variance.mse16x16              = vp8_mse16x16_wmt;
-        cpi->rtcd.variance.getmbss               = vp8_get_mb_ss_sse2;
-
-        /* cpi->rtcd.variance.get4x4sse_cs  not implemented for wmt */;
-
         cpi->rtcd.fdct.short4x4                  = vp8_short_fdct4x4_sse2;
         cpi->rtcd.fdct.short8x4                  = vp8_short_fdct8x4_sse2;
         cpi->rtcd.fdct.fast4x4                   = vp8_short_fdct4x4_sse2;
@@ -224,31 +169,13 @@ void vp8_arch_x86_encoder_init(VP8_COMP *cpi)
         cpi->rtcd.temporal.apply                 = vp8_temporal_filter_apply_sse2;
 #endif
 
-#if CONFIG_INTERNAL_STATS
-#if ARCH_X86_64
-        cpi->rtcd.variance.ssimpf_8x8            = vp8_ssim_parms_8x8_sse2;
-        cpi->rtcd.variance.ssimpf_16x16          = vp8_ssim_parms_16x16_sse2;
-#endif
-#endif
     }
 #endif
 
 #if HAVE_SSE3
     if (flags & HAS_SSE3)
     {
-        cpi->rtcd.variance.sad16x16              = vp8_sad16x16_sse3;
-        cpi->rtcd.variance.sad16x16x3            = vp8_sad16x16x3_sse3;
-        cpi->rtcd.variance.sad16x8x3             = vp8_sad16x8x3_sse3;
-        cpi->rtcd.variance.sad8x16x3             = vp8_sad8x16x3_sse3;
-        cpi->rtcd.variance.sad8x8x3              = vp8_sad8x8x3_sse3;
-        cpi->rtcd.variance.sad4x4x3              = vp8_sad4x4x3_sse3;
         cpi->rtcd.search.full_search             = vp8_full_search_sadx3;
-        cpi->rtcd.variance.sad16x16x4d           = vp8_sad16x16x4d_sse3;
-        cpi->rtcd.variance.sad16x8x4d            = vp8_sad16x8x4d_sse3;
-        cpi->rtcd.variance.sad8x16x4d            = vp8_sad8x16x4d_sse3;
-        cpi->rtcd.variance.sad8x8x4d             = vp8_sad8x8x4d_sse3;
-        cpi->rtcd.variance.sad4x4x4d             = vp8_sad4x4x4d_sse3;
-        cpi->rtcd.variance.copy32xn              = vp8_copy32xn_sse3;
         cpi->rtcd.search.diamond_search          = vp8_diamond_search_sadx4;
         cpi->rtcd.search.refining_search         = vp8_refining_search_sadx4;
     }
@@ -257,12 +184,6 @@ void vp8_arch_x86_encoder_init(VP8_COMP *cpi)
 #if HAVE_SSSE3
     if (flags & HAS_SSSE3)
     {
-        cpi->rtcd.variance.sad16x16x3            = vp8_sad16x16x3_ssse3;
-        cpi->rtcd.variance.sad16x8x3             = vp8_sad16x8x3_ssse3;
-
-        cpi->rtcd.variance.subpixvar16x8         = vp8_sub_pixel_variance16x8_ssse3;
-        cpi->rtcd.variance.subpixvar16x16        = vp8_sub_pixel_variance16x16_ssse3;
-
         cpi->rtcd.quantize.fastquantb            = vp8_fast_quantize_b_ssse3;
     }
 #endif
@@ -272,11 +193,6 @@ void vp8_arch_x86_encoder_init(VP8_COMP *cpi)
 #if HAVE_SSE4_1
     if (flags & HAS_SSE4_1)
     {
-        cpi->rtcd.variance.sad16x16x8            = vp8_sad16x16x8_sse4;
-        cpi->rtcd.variance.sad16x8x8             = vp8_sad16x8x8_sse4;
-        cpi->rtcd.variance.sad8x16x8             = vp8_sad8x16x8_sse4;
-        cpi->rtcd.variance.sad8x8x8              = vp8_sad8x8x8_sse4;
-        cpi->rtcd.variance.sad4x4x8              = vp8_sad4x4x8_sse4;
         cpi->rtcd.search.full_search             = vp8_full_search_sadx8;
 
         cpi->rtcd.quantize.quantb                = vp8_regular_quantize_b_sse4;
index 2d99981f5348ea9d286a64772781d8ab8f374c4d..f56328960fb41f952e0760228fb0838f0d8b936a 100644 (file)
@@ -97,7 +97,6 @@ endif
 VP8_CX_SRCS-$(ARCH_X86)$(ARCH_X86_64) += encoder/x86/encodemb_x86.h
 VP8_CX_SRCS-$(ARCH_X86)$(ARCH_X86_64) += encoder/x86/dct_x86.h
 VP8_CX_SRCS-$(ARCH_X86)$(ARCH_X86_64) += encoder/x86/mcomp_x86.h
-VP8_CX_SRCS-$(ARCH_X86)$(ARCH_X86_64) += encoder/x86/variance_x86.h
 VP8_CX_SRCS-$(ARCH_X86)$(ARCH_X86_64) += encoder/x86/quantize_x86.h
 VP8_CX_SRCS-$(ARCH_X86)$(ARCH_X86_64) += encoder/x86/temporal_filter_x86.h
 VP8_CX_SRCS-$(ARCH_X86)$(ARCH_X86_64) += encoder/x86/x86_csystemdependent.c
index 7429e16d7556acac27495e974e20358d439add7a..6808422394cbe7cbd9cbc7b5fbe737f1cdcec7b1 100644 (file)
@@ -21,7 +21,6 @@ VP8_CX_SRCS-$(ARCH_ARM)  += encoder/arm/encodemb_arm.h
 VP8_CX_SRCS-$(ARCH_ARM)  += encoder/arm/quantize_arm.c
 VP8_CX_SRCS-$(ARCH_ARM)  += encoder/arm/quantize_arm.h
 VP8_CX_SRCS-$(ARCH_ARM)  += encoder/arm/variance_arm.c
-VP8_CX_SRCS-$(ARCH_ARM)  += encoder/arm/variance_arm.h
 
 #File list for edsp
 # encoder