{
LOAD_ZERO;
vector bool short mskA;
- vec_u32_t i_qbitsv;
+ vec_u32_t i_qbitsv = vec_splats( (uint32_t)16 );
vec_u16_t coefvA;
vec_u32_t multEvenvA, multOddvA;
vec_u16_t mfvA;
vec_s16_t temp1v, temp2v, tmpv;
- vec_u32_u qbits_u;
- qbits_u.s[0]=16;
- i_qbitsv = vec_splat(qbits_u.v, 0);
-
QUANT_16_U( 0, 16 );
return vec_any_ne(nz, zero_s16v);
}
vec_u32_t multEvenvA, multOddvA;
vec_s16_t one = vec_splat_s16(1);
vec_s16_t nz = zero_s16v;
+ static const vec_s16_t mask2 = CV(-1, -1, -1, -1, 0, 0, 0, 0);
vec_s16_t temp1v, temp2v;
i_qbitsv = vec_splats( (uint32_t) 16 );
biasv = vec_splats( (uint16_t)bias );
- static const vec_s16_t mask2 = CV(-1, -1, -1, -1, 0, 0, 0, 0);
QUANT_4_U_DC(0);
return vec_any_ne(vec_and(nz, mask2), zero_s16v);
}
vec_s16_t temp1v, temp2v, tmpv;
- vec_u32_u qbits_u;
- qbits_u.s[0]=16;
- i_qbitsv = vec_splat(qbits_u.v, 0);
+ i_qbitsv = vec_splats( (uint32_t)16 );
for( int i = 0; i < 4; i++ )
QUANT_16_U( i*2*16, i*2*16+16 );
\
multEvenvA = vec_mule(dctv, mfv); \
multOddvA = vec_mulo(dctv, mfv); \
- dctv = (vec_s16_t) vec_packs(vec_mergeh(multEvenvA, multOddvA), \
- vec_mergel(multEvenvA, multOddvA)); \
dctv = (vec_s16_t) vec_packs( multEvenvA, multOddvA ); \
tmpv = xxpermdi( dctv, dctv, 2 ); \
dctv = vec_mergeh( dctv, tmpv ); \