]> granicus.if.org Git - libvpx/commitdiff
Make type conversions explicit
authorYaowu Xu <yaowu@google.com>
Fri, 6 May 2016 21:34:26 +0000 (14:34 -0700)
committerYaowu Xu <yaowu@google.com>
Sat, 7 May 2016 20:33:40 +0000 (20:33 +0000)
This eliminates MSVC compiler warnings.

Change-Id: Id6ace2586ed7c6248366905b133448fe8ecbd53d

vp10/encoder/bitstream.c
vpx_dsp/variance.c
vpx_dsp/x86/highbd_variance_sse4.c
vpx_dsp/x86/masked_variance_intrin_ssse3.c

index b79b94ad82e5c94ed9957726692f7f2134c2e71a..119e99a879dc1a722e93524730cab2c63323f96a 100644 (file)
@@ -2899,7 +2899,7 @@ static uint32_t write_tiles(VP10_COMP *const cpi,
     }
   }
 #endif  // CONFIG_EXT_TILE
-  return total_size;
+  return (uint32_t)total_size;
 }
 
 static void write_render_size(const VP10_COMMON *cm,
@@ -3448,7 +3448,7 @@ void vp10_pack_bitstream(VP10_COMP *const cpi, uint8_t *dst, size_t *size) {
   // Size of compressed header
   vpx_wb_write_literal(&wb, 0, 16);
 
-  uncompressed_header_size = vpx_wb_bytes_written(&wb);
+  uncompressed_header_size = (uint32_t)vpx_wb_bytes_written(&wb);
   data += uncompressed_header_size;
 
   vpx_clear_system_state();
index e6be1dd7352115326730a5298d0affd580d2ce15..cc99d256b14efb3926e01277de844f095f0798cd 100644 (file)
@@ -719,8 +719,8 @@ void masked_variance(const uint8_t *a, int  a_stride,
     m += m_stride;
   }
   sum64 = (sum64 >= 0) ? sum64  : -sum64;
-  *sum = ROUND_POWER_OF_TWO(sum64, 6);
-  *sse = ROUND_POWER_OF_TWO(sse64, 12);
+  *sum = (int)ROUND_POWER_OF_TWO(sum64, 6);
+  *sse = (uint32_t)ROUND_POWER_OF_TWO(sse64, 12);
 }
 
 #define MASK_VAR(W, H) \
index 5c1dfe4dc4130887beb3136e10715dcf80da7c96..54fc609fbe92374f76301013f34f572c92aa808b 100644 (file)
@@ -76,7 +76,7 @@ uint32_t vpx_highbd_8_variance4x4_sse4_1(const uint8_t *a,
   variance4x4_64_sse4_1(a, a_stride, b, b_stride, &local_sse, &sum);
   *sse = (uint32_t)local_sse;
 
-  return *sse - ((sum * sum) >> 4);
+  return *sse - (uint32_t)((sum * sum) >> 4);
 }
 
 uint32_t vpx_highbd_10_variance4x4_sse4_1(const uint8_t *a,
@@ -91,7 +91,7 @@ uint32_t vpx_highbd_10_variance4x4_sse4_1(const uint8_t *a,
   *sse = (uint32_t)ROUND_POWER_OF_TWO(local_sse, 4);
   sum = ROUND_POWER_OF_TWO(sum, 2);
 
-  return *sse - ((sum * sum) >> 4);
+  return *sse - (uint32_t)((sum * sum) >> 4);
 }
 
 uint32_t vpx_highbd_12_variance4x4_sse4_1(const uint8_t *a,
@@ -106,7 +106,7 @@ uint32_t vpx_highbd_12_variance4x4_sse4_1(const uint8_t *a,
   *sse = (uint32_t)ROUND_POWER_OF_TWO(local_sse, 8);
   sum = ROUND_POWER_OF_TWO(sum, 4);
 
-  return *sse - ((sum * sum) >> 4);
+  return *sse - (uint32_t)((sum * sum) >> 4);
 }
 
 // Sub-pixel
index ca4f6fcff90ad2f62f3bbbcb59a83fb0b12b756d..47e2c32d87805ea76e96458bd9ac09cffed6f22e 100644 (file)
@@ -54,9 +54,9 @@ static INLINE int64_t hsum_epi32_si64(__m128i v_d) {
 }
 #endif  // CONFIG_VP9_HIGHBITDEPTH
 
-static INLINE int calc_masked_variance(__m128i v_sum_d, __m128i v_sse_q,
-                                       unsigned int* sse,
-                                       const int w, const int h) {
+static INLINE uint32_t calc_masked_variance(__m128i v_sum_d, __m128i v_sse_q,
+                                            uint32_t* sse,
+                                            const int w, const int h) {
   int64_t sum64;
   uint64_t sse64;
 
@@ -71,9 +71,9 @@ static INLINE int calc_masked_variance(__m128i v_sum_d, __m128i v_sse_q,
   sse64 = ROUND_POWER_OF_TWO(sse64, 12);
 
   // Store the SSE
-  *sse = (unsigned int)sse64;
+  *sse = (uint32_t)sse64;
   // Compute the variance
-  return  *sse - ((sum64 * sum64) / (w * h));
+  return  *sse - (uint32_t)((sum64 * sum64) / (w * h));
 }
 
 /*****************************************************************************
@@ -497,9 +497,9 @@ static INLINE unsigned int highbd_masked_variancewxh_ssse3(
             &sum64, &sse64);
 
   // Store the SSE
-  *sse = (unsigned int)sse64;
+  *sse = (uint32_t)sse64;
   // Compute and return variance
-  return *sse - ((sum64 * sum64) / (w * h));
+  return *sse - (uint32_t)((sum64 * sum64) / (w * h));
 }
 
 static INLINE unsigned int highbd_10_masked_variancewxh_ssse3(
@@ -523,9 +523,9 @@ static INLINE unsigned int highbd_10_masked_variancewxh_ssse3(
   sse64 = ROUND_POWER_OF_TWO(sse64, 4);
 
   // Store the SSE
-  *sse = (unsigned int)sse64;
+  *sse = (uint32_t)sse64;
   // Compute and return variance
-  return *sse - ((sum64 * sum64) / (w * h));
+  return *sse - (uint32_t)((sum64 * sum64) / (w * h));
 }
 
 static INLINE unsigned int highbd_12_masked_variancewxh_ssse3(
@@ -548,9 +548,9 @@ static INLINE unsigned int highbd_12_masked_variancewxh_ssse3(
   sse64 = ROUND_POWER_OF_TWO(sse64, 8);
 
   // Store the SSE
-  *sse = (unsigned int)sse64;
+  *sse = (uint32_t)sse64;
   // Compute and return variance
-  return *sse - ((sum64 * sum64) / (w * h));
+  return *sse - (uint32_t)((sum64 * sum64) / (w * h));
 }
 
 #define HIGHBD_MASKED_VARWXH(W, H)                                             \
@@ -1460,10 +1460,11 @@ static void highbd_sum_and_sse(const __m128i v_a_w, const __m128i v_b_w,
   *v_sse_q = _mm_add_epi64(*v_sse_q, v_se_q);
 }
 
-static INLINE int highbd_10_calc_masked_variance(__m128i v_sum_d,
-                                                 __m128i v_sse_q,
-                                                 unsigned int* sse,
-                                                 const int w, const int h) {
+static INLINE uint32_t highbd_10_calc_masked_variance(__m128i v_sum_d,
+                                                      __m128i v_sse_q,
+                                                      uint32_t* sse,
+                                                      const int w,
+                                                      const int h) {
   int64_t sum64;
   uint64_t sse64;
 
@@ -1482,14 +1483,15 @@ static INLINE int highbd_10_calc_masked_variance(__m128i v_sum_d,
   sse64 = ROUND_POWER_OF_TWO(sse64, 4);
 
   // Store the SSE
-  *sse = (unsigned int)sse64;
+  *sse = (uint32_t)sse64;
   // Compute the variance
-  return  *sse - ((sum64 * sum64) / (w * h));
+  return  *sse - (uint32_t)((sum64 * sum64) / (w * h));
 }
-static INLINE int highbd_12_calc_masked_variance(__m128i v_sum_d,
-                                                 __m128i v_sse_q,
-                                                 unsigned int* sse,
-                                                 const int w, const int h) {
+static INLINE uint32_t highbd_12_calc_masked_variance(__m128i v_sum_d,
+                                                      __m128i v_sse_q,
+                                                      uint32_t* sse,
+                                                      const int w,
+                                                      const int h) {
   int64_t sum64;
   uint64_t sse64;
 
@@ -1508,9 +1510,9 @@ static INLINE int highbd_12_calc_masked_variance(__m128i v_sum_d,
   sse64 = ROUND_POWER_OF_TWO(sse64, 8);
 
   // Store the SSE
-  *sse = (unsigned int)sse64;
+  *sse = (uint32_t)sse64;
   // Compute the variance
-  return  *sse - ((sum64 * sum64) / (w * h));
+  return  *sse - (uint32_t)((sum64 * sum64) / (w * h));
 }