variance4x4_64_sse4_1(a, a_stride, b, b_stride, &local_sse, &sum);
*sse = (uint32_t)local_sse;
- return *sse - ((sum * sum) >> 4);
+ return *sse - (uint32_t)((sum * sum) >> 4);
}
uint32_t vpx_highbd_10_variance4x4_sse4_1(const uint8_t *a,
*sse = (uint32_t)ROUND_POWER_OF_TWO(local_sse, 4);
sum = ROUND_POWER_OF_TWO(sum, 2);
- return *sse - ((sum * sum) >> 4);
+ return *sse - (uint32_t)((sum * sum) >> 4);
}
uint32_t vpx_highbd_12_variance4x4_sse4_1(const uint8_t *a,
*sse = (uint32_t)ROUND_POWER_OF_TWO(local_sse, 8);
sum = ROUND_POWER_OF_TWO(sum, 4);
- return *sse - ((sum * sum) >> 4);
+ return *sse - (uint32_t)((sum * sum) >> 4);
}
// Sub-pixel
}
#endif // CONFIG_VP9_HIGHBITDEPTH
-static INLINE int calc_masked_variance(__m128i v_sum_d, __m128i v_sse_q,
- unsigned int* sse,
- const int w, const int h) {
+static INLINE uint32_t calc_masked_variance(__m128i v_sum_d, __m128i v_sse_q,
+ uint32_t* sse,
+ const int w, const int h) {
int64_t sum64;
uint64_t sse64;
sse64 = ROUND_POWER_OF_TWO(sse64, 12);
// Store the SSE
- *sse = (unsigned int)sse64;
+ *sse = (uint32_t)sse64;
// Compute the variance
- return *sse - ((sum64 * sum64) / (w * h));
+ return *sse - (uint32_t)((sum64 * sum64) / (w * h));
}
/*****************************************************************************
&sum64, &sse64);
// Store the SSE
- *sse = (unsigned int)sse64;
+ *sse = (uint32_t)sse64;
// Compute and return variance
- return *sse - ((sum64 * sum64) / (w * h));
+ return *sse - (uint32_t)((sum64 * sum64) / (w * h));
}
static INLINE unsigned int highbd_10_masked_variancewxh_ssse3(
sse64 = ROUND_POWER_OF_TWO(sse64, 4);
// Store the SSE
- *sse = (unsigned int)sse64;
+ *sse = (uint32_t)sse64;
// Compute and return variance
- return *sse - ((sum64 * sum64) / (w * h));
+ return *sse - (uint32_t)((sum64 * sum64) / (w * h));
}
static INLINE unsigned int highbd_12_masked_variancewxh_ssse3(
sse64 = ROUND_POWER_OF_TWO(sse64, 8);
// Store the SSE
- *sse = (unsigned int)sse64;
+ *sse = (uint32_t)sse64;
// Compute and return variance
- return *sse - ((sum64 * sum64) / (w * h));
+ return *sse - (uint32_t)((sum64 * sum64) / (w * h));
}
#define HIGHBD_MASKED_VARWXH(W, H) \
*v_sse_q = _mm_add_epi64(*v_sse_q, v_se_q);
}
-static INLINE int highbd_10_calc_masked_variance(__m128i v_sum_d,
- __m128i v_sse_q,
- unsigned int* sse,
- const int w, const int h) {
+static INLINE uint32_t highbd_10_calc_masked_variance(__m128i v_sum_d,
+ __m128i v_sse_q,
+ uint32_t* sse,
+ const int w,
+ const int h) {
int64_t sum64;
uint64_t sse64;
sse64 = ROUND_POWER_OF_TWO(sse64, 4);
// Store the SSE
- *sse = (unsigned int)sse64;
+ *sse = (uint32_t)sse64;
// Compute the variance
- return *sse - ((sum64 * sum64) / (w * h));
+ return *sse - (uint32_t)((sum64 * sum64) / (w * h));
}
-static INLINE int highbd_12_calc_masked_variance(__m128i v_sum_d,
- __m128i v_sse_q,
- unsigned int* sse,
- const int w, const int h) {
+static INLINE uint32_t highbd_12_calc_masked_variance(__m128i v_sum_d,
+ __m128i v_sse_q,
+ uint32_t* sse,
+ const int w,
+ const int h) {
int64_t sum64;
uint64_t sse64;
sse64 = ROUND_POWER_OF_TWO(sse64, 8);
// Store the SSE
- *sse = (unsigned int)sse64;
+ *sse = (uint32_t)sse64;
// Compute the variance
- return *sse - ((sum64 * sum64) / (w * h));
+ return *sse - (uint32_t)((sum64 * sum64) / (w * h));
}