static inline void bs_flush( bs_t *s )
{
M32( s->p ) = endian_fix32( s->cur_bits << (s->i_left&31) );
- s->p += WORD_SIZE - s->i_left / 8;
+ s->p += WORD_SIZE - (s->i_left >> 3);
s->i_left = WORD_SIZE*8;
}
/* The inverse of bs_flush: prepare the bitstream to be written to again. */
int z = 0;
float ssim = 0.0;
int (*sum0)[4] = buf;
- int (*sum1)[4] = sum0 + width/4+3;
+ int (*sum1)[4] = sum0 + (width >> 2) + 3;
width >>= 2;
height >>= 2;
for( int y = 1; y < height; y++ )
{
ALIGNED_16(unsigned char result[64]);
uint32_t *src_int = (uint32_t *)result, *dst_int = (uint32_t *)dst;
- int int_dst_stride = dst_stride/4;
+ int int_dst_stride = dst_stride >> 2;
vec_st(r0, 0, result);
vec_st(r1, 16, result);
static void frame_init_lowres_core_altivec( uint8_t *src0, uint8_t *dst0, uint8_t *dsth, uint8_t *dstv, uint8_t *dstc,
int src_stride, int dst_stride, int width, int height )
{
- int w = width/16;
+ int w = width >> 4;
int end = (width & 15);
vec_u8_t src0v, src1v, src2v;
vec_u8_t lv, hv, src1p1v;
satdv = vec_splat( satdv, 1 );
vec_ste( satdv, 0, &i_satd );
- return i_satd / 2;
+ return i_satd >> 1;
}
/***********************************************************************
satdv = vec_splat( satdv, 1 );
vec_ste( satdv, 0, &i_satd );
- return i_satd / 2;
+ return i_satd >> 1;
}
/***********************************************************************
satdv = vec_splat( satdv, 1 );
vec_ste( satdv, 0, &i_satd );
- return i_satd / 2;
+ return i_satd >> 1;
}
/***********************************************************************
satdv = vec_splat( satdv, 3 );
vec_ste( satdv, 0, &i_satd );
- return i_satd / 2;
+ return i_satd >> 1;
}
/***********************************************************************
satdv = vec_splat( satdv, 3 );
vec_ste( satdv, 0, &i_satd );
- return i_satd / 2;
+ return i_satd >> 1;
}
/***********************************************************************
satdv = vec_splat( satdv, 3 );
vec_ste( satdv, 0, &i_satd );
- return i_satd / 2;
+ return i_satd >> 1;
}
/***********************************************************************
satdv = vec_splat( satdv, 3 );
vec_ste( satdv, 0, &i_satd );
- return i_satd / 2;
+ return i_satd >> 1;
}
for( int i = 0; i < 4; i++ )
{
x264_me_t *l0m = &a->l0.me8x8[i];
- const int x8 = i%2;
- const int y8 = i/2;
+ int x8 = i&1;
+ int y8 = i>>1;
m.i_pixel = PIXEL_8x8;
for( int i = 0; i < 4; i++ )
{
x264_me_t *m = &a->l0.me8x8[i];
- const int x8 = i%2;
- const int y8 = i/2;
+ int x8 = i&1;
+ int y8 = i>>1;
m->i_pixel = PIXEL_8x8;
m->i_ref_cost = i_ref_cost;
static inline void x264_mb_cache_mv_p8x8( x264_t *h, x264_mb_analysis_t *a, int i )
{
- const int x = 2*(i%2);
- const int y = 2*(i/2);
+ int x = 2*(i&1);
+ int y = i&2;
switch( h->mb.i_sub_partition[i] )
{
static void x264_mb_load_mv_direct8x8( x264_t *h, int idx )
{
- const int x = 2*(idx&1);
- const int y = 2*(idx>>1);
+ int x = 2*(idx&1);
+ int y = idx&2;
x264_macroblock_cache_ref( h, x, y, 2, 2, 0, h->mb.cache.direct_ref[0][idx] );
x264_macroblock_cache_ref( h, x, y, 2, 2, 1, h->mb.cache.direct_ref[1][idx] );
x264_macroblock_cache_mv_ptr( h, x, y, 2, 2, 0, h->mb.cache.direct_mv[0][idx] );
static inline void x264_mb_cache_mv_b8x8( x264_t *h, x264_mb_analysis_t *a, int i, int b_mvd )
{
- int x = (i%2)*2;
- int y = (i/2)*2;
+ int x = 2*(i&1);
+ int y = i&2;
if( h->mb.i_sub_partition[i] == D_DIRECT_8x8 )
{
x264_mb_load_mv_direct8x8( h, i );
for( int i = 0; i < 4; i++ )
{
- int x8 = i%2;
- int y8 = i/2;
+ int x8 = i&1;
+ int y8 = i>>1;
int i_part_cost;
int i_part_cost_bi;
int stride[2] = {8,8};
for( int i = 0; i < 4; i++ )
{
- const int x8 = i%2;
- const int y8 = i/2;
+ int x8 = i&1;
+ int y8 = i>>1;
int i_part_cost;
int i_part_cost_bi = 0;
int stride[2] = {8,8};
else
{
for( int i = 0; i < 16; i++ )
- if( h->mb.i_cbp_luma & ( 1 << ( i / 4 ) ) )
+ if( h->mb.i_cbp_luma & ( 1 << ( i >> 2 ) ) )
block_residual_write_cabac_cbf( h, cb, DCT_LUMA_4x4, i, h->dct.luma4x4[i], b_intra );
}
continue;
bsad -= ycost;
xn = h->pixf.ads[i_pixel]( enc_dc, sums_base + min_x + my * stride, delta,
- cost_fpel_mvx+min_x, xs, width, bsad*17/16 );
+ cost_fpel_mvx+min_x, xs, width, bsad * 17 >> 4 );
for( i = 0; i < xn-2; i += 3 )
{
pixel *ref = p_fref_w+min_x+my*stride;
+ p_cost_mvx[ mx ] + p_cost_mvy[ my ]; \
if( b_chroma_me && cost < bcost ) \
{ \
- h->mc.mc_chroma( pix, 8, m->p_fref[4], m->i_stride[1], mx, my + mvy_offset, bw/2, bh/2 ); \
+ h->mc.mc_chroma( pix, 8, m->p_fref[4], m->i_stride[1], mx, my + mvy_offset, bw>>1, bh>>1 ); \
if( m->weight[1].weightfn ) \
m->weight[1].weightfn[x264_pixel_size[i_pixel].w>>3]( pix, 8, pix, 8, \
&m->weight[1], x264_pixel_size[i_pixel].h>>1 ); \
cost += h->pixf.mbcmp[i_pixel+3]( m->p_fenc[1], FENC_STRIDE, pix, 8 ); \
if( cost < bcost ) \
{ \
- h->mc.mc_chroma( pix, 8, m->p_fref[5], m->i_stride[1], mx, my + mvy_offset, bw/2, bh/2 ); \
+ h->mc.mc_chroma( pix, 8, m->p_fref[5], m->i_stride[1], mx, my + mvy_offset, bw>>1, bh>>1 ); \
if( m->weight[2].weightfn ) \
m->weight[2].weightfn[x264_pixel_size[i_pixel].w>>3]( pix, 8, pix, 8, \
&m->weight[2], x264_pixel_size[i_pixel].h>>1 ); \