]> granicus.if.org Git - libvpx/commitdiff
remove redundant functions
authorYaowu Xu <yaowu@google.com>
Mon, 6 Jun 2011 23:42:58 +0000 (16:42 -0700)
committerYaowu Xu <yaowu@google.com>
Mon, 6 Jun 2011 23:44:05 +0000 (16:44 -0700)
The encoder defined about 4 set of similar functions to calculate sum,
variance or sse or a combination of them. This commit removed one set
of these functions, get8x8var and get16x16var, where calls to the later
function are replaced with var16x16 by using the fact on a 16x16 MB:
    variance == sse - sum*sum/256

Change-Id: I803eabd1fb3ab177780a40338cbd596dffaed267

vp8/encoder/arm/arm_csystemdependent.c
vp8/encoder/arm/variance_arm.h
vp8/encoder/encodeframe.c
vp8/encoder/generic/csystemdependent.c
vp8/encoder/ppc/csystemdependent.c
vp8/encoder/rdopt.c
vp8/encoder/variance.h
vp8/encoder/variance_c.c
vp8/encoder/x86/variance_mmx.c
vp8/encoder/x86/variance_x86.h
vp8/encoder/x86/x86_csystemdependent.c

index db079d5edeb4de0278759e72e3ceeae9593ad5b5..75e3a53d2b924c73f1363f9d6608e7c69a5decfe 100644 (file)
@@ -54,8 +54,6 @@ void vp8_arch_arm_encoder_init(VP8_COMP *cpi)
         /*cpi->rtcd.variance.getmbss               = vp8_get_mb_ss_c;*/
 
         /*cpi->rtcd.variance.get16x16prederror     = vp8_get16x16pred_error_c;
-        cpi->rtcd.variance.get8x8var             = vp8_get8x8var_c;
-        cpi->rtcd.variance.get16x16var           = vp8_get16x16var_c;;
         cpi->rtcd.variance.get4x4sse_cs          = vp8_get4x4sse_cs_c;*/
 
         /*cpi->rtcd.fdct.short4x4                  = vp8_short_fdct4x4_c;
@@ -104,8 +102,6 @@ void vp8_arch_arm_encoder_init(VP8_COMP *cpi)
         /*cpi->rtcd.variance.getmbss               = vp8_get_mb_ss_c;*/
 
         cpi->rtcd.variance.get16x16prederror     = vp8_get16x16pred_error_neon;
-        /*cpi->rtcd.variance.get8x8var             = vp8_get8x8var_c;
-        cpi->rtcd.variance.get16x16var           = vp8_get16x16var_c;*/
         cpi->rtcd.variance.get4x4sse_cs          = vp8_get4x4sse_cs_neon;
 
         cpi->rtcd.fdct.short4x4                  = vp8_short_fdct4x4_neon;
index ad0d37193455243d1d876ff2b965d5dcfeb62eb6..cbfc753b335637f587941219ce7f3ca0c68d1251 100644 (file)
@@ -84,8 +84,6 @@ extern prototype_variance(vp8_variance_halfpixvar16x16_hv_neon);
 //extern prototype_getmbss(vp8_get_mb_ss_c);
 extern prototype_variance(vp8_mse16x16_neon);
 extern prototype_get16x16prederror(vp8_get16x16pred_error_neon);
-//extern prototype_variance2(vp8_get8x8var_c);
-//extern prototype_variance2(vp8_get16x16var_c);
 extern prototype_get16x16prederror(vp8_get4x4sse_cs_neon);
 
 #if !CONFIG_RUNTIME_CPU_DETECT
@@ -152,12 +150,6 @@ extern prototype_get16x16prederror(vp8_get4x4sse_cs_neon);
 #undef  vp8_variance_get16x16prederror
 #define vp8_variance_get16x16prederror vp8_get16x16pred_error_neon
 
-//#undef  vp8_variance_get8x8var
-//#define vp8_variance_get8x8var vp8_get8x8var_c
-
-//#undef  vp8_variance_get16x16var
-//#define vp8_variance_get16x16var vp8_get16x16var_c
-
 #undef  vp8_variance_get4x4sse_cs
 #define vp8_variance_get4x4sse_cs vp8_get4x4sse_cs_neon
 #endif
index ecbf0265f396192ba859393266ffd9056ec9fdf9..dac18aca195070a553dd72fb01f9060dc3aa6405 100644 (file)
@@ -84,8 +84,6 @@ static unsigned int tt_activity_measure( VP8_COMP *cpi, MACROBLOCK *x )
 {
     unsigned int act;
     unsigned int sse;
-    int sum;
-
     /* TODO: This could also be done over smaller areas (8x8), but that would
      *  require extensive changes elsewhere, as lambda is assumed to be fixed
      *  over an entire MB in most of the code.
@@ -93,14 +91,9 @@ static unsigned int tt_activity_measure( VP8_COMP *cpi, MACROBLOCK *x )
      *  lambda using a non-linear combination (e.g., the smallest, or second
      *  smallest, etc.).
      */
-    VARIANCE_INVOKE(&cpi->rtcd.variance, get16x16var)(x->src.y_buffer,
-                    x->src.y_stride, VP8_VAR_OFFS, 0, &sse, &sum);
-
-    /* This requires a full 32 bits of precision. */
-    act = (sse<<8) - sum*sum;
-
-    /* Drop 4 to give us some headroom to work with. */
-    act = (act + 8) >> 4;
+    act =     VARIANCE_INVOKE(&cpi->rtcd.variance, var16x16)(x->src.y_buffer,
+                    x->src.y_stride, VP8_VAR_OFFS, 0, &sse);
+    act = act<<4;
 
     /* If the region is flat, lower the activity some more. */
     if (act < 8<<12)
index 9af3f183a8a793a2dbd400ceed61359eb59667e2..37885dadf9201b9d1afc78b0c50d2375abcfcd51 100644 (file)
@@ -68,8 +68,6 @@ void vp8_cmachine_specific_config(VP8_COMP *cpi)
     cpi->rtcd.variance.getmbss               = vp8_get_mb_ss_c;
 
     cpi->rtcd.variance.get16x16prederror     = vp8_get16x16pred_error_c;
-    cpi->rtcd.variance.get8x8var             = vp8_get8x8var_c;
-    cpi->rtcd.variance.get16x16var           = vp8_get16x16var_c;;
     cpi->rtcd.variance.get4x4sse_cs          = vp8_get4x4sse_cs_c;
 
     cpi->rtcd.fdct.short4x4                  = vp8_short_fdct4x4_c;
index 8dfd2a5439238fc326c28e26d6e2e707012bc925..0dd097f84eb6d2cd39e76fe07497635f9b942998 100644 (file)
@@ -49,8 +49,6 @@ void (*vp8_subtract_mbuv)(short *diff, unsigned char *usrc, unsigned char *vsrc,
 void (*vp8_fast_quantize_b)(BLOCK *b, BLOCKD *d);
 
 unsigned int (*vp8_get16x16pred_error)(unsigned char *src_ptr, int src_stride, unsigned char *ref_ptr, int ref_stride);
-unsigned int (*vp8_get8x8var)(unsigned char *src_ptr, int  source_stride, unsigned char *ref_ptr, int  recon_stride, unsigned int *SSE, int *Sum);
-unsigned int (*vp8_get16x16var)(unsigned char *src_ptr, int  source_stride, unsigned char *ref_ptr, int  recon_stride, unsigned int *SSE, int *Sum);
 unsigned int (*vp8_get4x4sse_cs)(unsigned char *src_ptr, int  source_stride, unsigned char *ref_ptr, int  recon_stride);
 
 // c imports
@@ -89,8 +87,6 @@ extern sub_pixel_variance_function sub_pixel_variance16x16_c;
 
 extern unsigned int vp8_get_mb_ss_c(short *);
 extern unsigned int vp8_get16x16pred_error_c(unsigned char *src_ptr, int src_stride, unsigned char *ref_ptr, int ref_stride);
-extern unsigned int vp8_get8x8var_c(unsigned char *src_ptr, int  source_stride, unsigned char *ref_ptr, int  recon_stride, unsigned int *SSE, int *Sum);
-extern unsigned int vp8_get16x16var_c(unsigned char *src_ptr, int  source_stride, unsigned char *ref_ptr, int  recon_stride, unsigned int *SSE, int *Sum);
 extern unsigned int vp8_get4x4sse_cs_c(unsigned char *src_ptr, int  source_stride, unsigned char *ref_ptr, int  recon_stride);
 
 // ppc
@@ -150,8 +146,6 @@ void vp8_cmachine_specific_config(void)
 
     vp8_get_mb_ss                 = vp8_get_mb_ss_c;
     vp8_get16x16pred_error       = vp8_get16x16pred_error_c;
-    vp8_get8x8var               = vp8_get8x8var_ppc;
-    vp8_get16x16var             = vp8_get16x16var_ppc;
     vp8_get4x4sse_cs            = vp8_get4x4sse_cs_c;
 
     vp8_sad16x16                = vp8_sad16x16_ppc;
index d4d6cd7c7559f677bc2da8801809bb9ff26931f1..95dbca00266f08045d52baebcbec347b8dac085d 100644 (file)
@@ -2182,29 +2182,28 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
             }
             else if (x->encode_breakout)
             {
-                int sum;
                 unsigned int sse;
+                unsigned int var;
                 int threshold = (xd->block[0].dequant[1]
                             * xd->block[0].dequant[1] >>4);
 
                 if(threshold < x->encode_breakout)
                     threshold = x->encode_breakout;
 
-                VARIANCE_INVOKE(&cpi->rtcd.variance, get16x16var)
-                    (x->src.y_buffer, x->src.y_stride,
-                     x->e_mbd.predictor, 16, &sse, &sum);
+                var = VARIANCE_INVOKE(&cpi->rtcd.variance, var16x16)
+                        (x->src.y_buffer, x->src.y_stride,
+                        x->e_mbd.predictor, 16, &sse);
 
                 if (sse < threshold)
                 {
-                    // Check u and v to make sure skip is ok
-                    int sse2 = 0;
+                     unsigned int q2dc = xd->block[24].dequant[0];
                     /* If theres is no codeable 2nd order dc
                        or a very small uniform pixel change change */
-                    if (abs(sum) < (xd->block[24].dequant[0]<<2)||
-                        ((sum * sum>>8) > sse && abs(sum) <128))
+                    if ((sse - var < q2dc * q2dc >>4) ||
+                        (sse /2 > var && sse-var < 64))
                     {
-                        sse2 = VP8_UVSSE(x, IF_RTCD(&cpi->rtcd.variance));
-
+                        // Check u and v to make sure skip is ok
+                        int sse2=  VP8_UVSSE(x, IF_RTCD(&cpi->rtcd.variance));
                         if (sse2 * 2 < threshold)
                         {
                             x->skip = 1;
index 0d7d977d7c4b804d9f03a7c27460a0191c69de6b..d52aa1b1d74c5ea352ab5abbb34cc4fffe4a8e8c 100644 (file)
@@ -313,16 +313,6 @@ extern prototype_variance(vp8_variance_mse16x16);
 #endif
 extern prototype_get16x16prederror(vp8_variance_get16x16prederror);
 
-#ifndef vp8_variance_get8x8var
-#define vp8_variance_get8x8var vp8_get8x8var_c
-#endif
-extern prototype_variance2(vp8_variance_get8x8var);
-
-#ifndef vp8_variance_get16x16var
-#define vp8_variance_get16x16var vp8_get16x16var_c
-#endif
-extern prototype_variance2(vp8_variance_get16x16var);
-
 #ifndef vp8_variance_get4x4sse_cs
 #define vp8_variance_get4x4sse_cs vp8_get4x4sse_cs_c
 #endif
@@ -377,8 +367,6 @@ typedef struct
     vp8_variance_fn_t        mse16x16;
 
     vp8_get16x16prederror_fn_t get16x16prederror;
-    vp8_variance2_fn_t       get8x8var;
-    vp8_variance2_fn_t       get16x16var;
     vp8_get16x16prederror_fn_t get4x4sse_cs;
 
     vp8_sad_multi_fn_t       sad16x16x3;
index ede07c8db3bf877aa6a8cc8b9359b70c371d9647..c7b9c22093646e35b11fd461bea30041b8ba5916 100644 (file)
@@ -61,40 +61,6 @@ static void variance(
     }
 }
 
-unsigned int
-vp8_get8x8var_c
-(
-    const unsigned char *src_ptr,
-    int  source_stride,
-    const unsigned char *ref_ptr,
-    int  recon_stride,
-    unsigned int *SSE,
-    int *Sum
-)
-{
-
-    variance(src_ptr, source_stride, ref_ptr, recon_stride, 8, 8, SSE, Sum);
-    return (*SSE - (((*Sum) * (*Sum)) >> 6));
-}
-
-unsigned int
-vp8_get16x16var_c
-(
-    const unsigned char *src_ptr,
-    int  source_stride,
-    const unsigned char *ref_ptr,
-    int  recon_stride,
-    unsigned int *SSE,
-    int *Sum
-)
-{
-
-    variance(src_ptr, source_stride, ref_ptr, recon_stride, 16, 16, SSE, Sum);
-    return (*SSE - (((*Sum) * (*Sum)) >> 8));
-
-}
-
-
 
 unsigned int vp8_variance16x16_c(
     const unsigned char *src_ptr,
index 4a89868c2c0442cc7b7fea7d5417184b3d6c041b..1b05571f13712d399067833ee7e0bbe9524a9ef1 100644 (file)
@@ -84,36 +84,6 @@ extern unsigned int vp8_get16x16pred_error_mmx
     int ref_stride
 );
 
-unsigned int vp8_get16x16var_mmx(
-    const unsigned char *src_ptr,
-    int  source_stride,
-    const unsigned char *ref_ptr,
-    int  recon_stride,
-    unsigned int *SSE,
-    int *SUM
-)
-{
-    unsigned int sse0, sse1, sse2, sse3, var;
-    int sum0, sum1, sum2, sum3, avg;
-
-
-    vp8_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0) ;
-    vp8_get8x8var_mmx(src_ptr + 8, source_stride, ref_ptr + 8, recon_stride, &sse1, &sum1);
-    vp8_get8x8var_mmx(src_ptr + 8 * source_stride, source_stride, ref_ptr + 8 * recon_stride, recon_stride, &sse2, &sum2) ;
-    vp8_get8x8var_mmx(src_ptr + 8 * source_stride + 8, source_stride, ref_ptr + 8 * recon_stride + 8, recon_stride, &sse3, &sum3);
-
-    var = sse0 + sse1 + sse2 + sse3;
-    avg = sum0 + sum1 + sum2 + sum3;
-
-    *SSE = var;
-    *SUM = avg;
-    return (var - ((avg * avg) >> 8));
-
-}
-
-
-
-
 
 unsigned int vp8_variance4x4_mmx(
     const unsigned char *src_ptr,
index 77e05e1e83208fd56dfd884e7877f8683e095e1e..4a640d7aa5604fae6422632e6a878619ffb60e10 100644 (file)
@@ -43,7 +43,6 @@ extern prototype_getmbss(vp8_get_mb_ss_mmx);
 extern prototype_variance(vp8_mse16x16_mmx);
 extern prototype_get16x16prederror(vp8_get16x16pred_error_mmx);
 extern prototype_variance2(vp8_get8x8var_mmx);
-extern prototype_variance2(vp8_get16x16var_mmx);
 extern prototype_get16x16prederror(vp8_get4x4sse_cs_mmx);
 
 #if !CONFIG_RUNTIME_CPU_DETECT
@@ -113,12 +112,6 @@ extern prototype_get16x16prederror(vp8_get4x4sse_cs_mmx);
 #undef  vp8_variance_get16x16prederror
 #define vp8_variance_get16x16prederror vp8_get16x16pred_error_mmx
 
-#undef  vp8_variance_get8x8var
-#define vp8_variance_get8x8var vp8_get8x8var_mmx
-
-#undef  vp8_variance_get16x16var
-#define vp8_variance_get16x16var vp8_get16x16var_mmx
-
 #undef  vp8_variance_get4x4sse_cs
 #define vp8_variance_get4x4sse_cs vp8_get4x4sse_cs_mmx
 
@@ -219,12 +212,6 @@ extern prototype_variance2(vp8_get16x16var_sse2);
 #undef  vp8_variance_get16x16prederror
 #define vp8_variance_get16x16prederror vp8_get16x16pred_error_sse2
 
-#undef  vp8_variance_get8x8var
-#define vp8_variance_get8x8var vp8_get8x8var_sse2
-
-#undef  vp8_variance_get16x16var
-#define vp8_variance_get16x16var vp8_get16x16var_sse2
-
 #endif
 #endif
 
index 378b140665ac06dadf7eaa4c7b831c58907326b0..f33c74a1c885002e496a5c3b42bdc15f594ec0e8 100644 (file)
@@ -176,8 +176,6 @@ void vp8_arch_x86_encoder_init(VP8_COMP *cpi)
         cpi->rtcd.variance.getmbss               = vp8_get_mb_ss_mmx;
 
         cpi->rtcd.variance.get16x16prederror     = vp8_get16x16pred_error_mmx;
-        cpi->rtcd.variance.get8x8var             = vp8_get8x8var_mmx;
-        cpi->rtcd.variance.get16x16var           = vp8_get16x16var_mmx;
         cpi->rtcd.variance.get4x4sse_cs          = vp8_get4x4sse_cs_mmx;
 
         cpi->rtcd.fdct.short4x4                  = vp8_short_fdct4x4_mmx;
@@ -227,9 +225,6 @@ void vp8_arch_x86_encoder_init(VP8_COMP *cpi)
         cpi->rtcd.variance.getmbss               = vp8_get_mb_ss_sse2;
 
         cpi->rtcd.variance.get16x16prederror     = vp8_get16x16pred_error_sse2;
-        cpi->rtcd.variance.get8x8var             = vp8_get8x8var_sse2;
-        cpi->rtcd.variance.get16x16var           = vp8_get16x16var_sse2;
-
 
         /* cpi->rtcd.variance.get4x4sse_cs  not implemented for wmt */;