vec_vsx_st( dcvsum8, 0, dest ); \
}
-static void idct8_dc_altivec( uint8_t *dst, int16_t dc1, int16_t dc2 )
+static void idct8_dc_altivec( uint8_t *dst, vec_s16_t dcv )
{
- dc1 = (dc1 + 32) >> 6;
- dc2 = (dc2 + 32) >> 6;
- vec_s16_t dcv = { dc1, dc1, dc1, dc1, dc2, dc2, dc2, dc2 };
-
LOAD_ZERO;
ALTIVEC_STORE8_DC_SUM_CLIP( &dst[0*FDEC_STRIDE], dcv );
ALTIVEC_STORE8_DC_SUM_CLIP( &dst[1*FDEC_STRIDE], dcv );
void x264_add8x8_idct_dc_altivec( uint8_t *p_dst, int16_t dct[4] )
{
- idct8_dc_altivec( &p_dst[0], dct[0], dct[1] );
- idct8_dc_altivec( &p_dst[4*FDEC_STRIDE+0], dct[2], dct[3] );
+ vec_s16_t dcv;
+ vec_s16_t v32 = vec_sl( vec_splat_s16( 8 ), vec_splat_u16( 2 ) );
+ vec_u16_t v6 = vec_splat_u16( 6 );
+ vec_s16_t dctv = vec_vsx_ld( 0, dct );
+
+ dctv = vec_sra( vec_add( dctv, v32 ), v6 );
+ dcv = (vec_s16_t)vec_mergeh( (vec_s32_t)vec_splat( dctv, 0 ), (vec_s32_t)vec_splat( dctv, 1 ) );
+ dcv = (vec_s16_t)vec_mergeh( (vec_s32_t)dcv, (vec_s32_t)dcv );
+ idct8_dc_altivec( &p_dst[0], dcv );
+ dcv = (vec_s16_t)vec_mergeh( (vec_s32_t)vec_splat( dctv, 2 ), (vec_s32_t)vec_splat( dctv, 3 ) );
+ dcv = (vec_s16_t)vec_mergeh( (vec_s32_t)dcv, (vec_s32_t)dcv );
+ idct8_dc_altivec( &p_dst[4*FDEC_STRIDE+0], dcv );
}
#define IDCT_1D_ALTIVEC(s0, s1, s2, s3, d0, d1, d2, d3) \