CHECKED_MALLOCZERO( frame->lowres_mvs[j][i], 2*h->mb.i_mb_count*sizeof(int16_t) );
CHECKED_MALLOC( frame->lowres_mv_costs[j][i], h->mb.i_mb_count*sizeof(int) );
}
- CHECKED_MALLOC( frame->i_propagate_cost, (i_mb_count+3) * sizeof(uint16_t) );
+ CHECKED_MALLOC( frame->i_propagate_cost, (i_mb_count+7) * sizeof(uint16_t) );
for( int j = 0; j <= h->param.i_bframe+1; j++ )
for( int i = 0; i <= h->param.i_bframe+1; i++ )
CHECKED_MALLOC( frame->lowres_costs[j][i], (i_mb_count+3) * sizeof(uint16_t) );
pd_16: times 4 dd 16
pd_0f: times 4 dd 0xffff
-pf_inv256: times 4 dd 0.00390625
+pf_inv256: times 8 dd 0.00390625
pad10: times 8 dw 10*PIXEL_MAX
pad20: times 8 dw 20*PIXEL_MAX
; uint16_t *inter_costs, uint16_t *inv_qscales, float *fps_factor, int len )
;-----------------------------------------------------------------------------
cglobal mbtree_propagate_cost_sse2, 7,7,7
- shl r6d, 1
+ add r6d, r6d
lea r0, [r0+r6*2]
add r1, r6
add r2, r6
jl .loop
REP_RET
+%macro INT16_TO_FLOAT 1
+ vpunpckhwd xmm4, xmm%1, xmm7
+ vpunpcklwd xmm%1, xmm7
+ vinsertf128 ymm%1, ymm%1, xmm4, 1
+ vcvtdq2ps ymm%1, ymm%1
+%endmacro
+
+; FIXME: align loads/stores to 16 bytes
+cglobal mbtree_propagate_cost_avx, 7,7,8
+ add r6d, r6d
+ lea r0, [r0+r6*2]
+ add r1, r6
+ add r2, r6
+ add r3, r6
+ add r4, r6
+ neg r6
+ vmovdqa xmm5, [pw_3fff]
+ vbroadcastss ymm6, [r5]
+ vmulps ymm6, ymm6, [pf_inv256]
+ vpxor xmm7, xmm7
+.loop:
+ vmovdqu xmm0, [r2+r6] ; intra
+ vmovdqu xmm1, [r4+r6] ; invq
+ vmovdqu xmm2, [r1+r6] ; prop
+ vpand xmm3, xmm5, [r3+r6] ; inter
+ INT16_TO_FLOAT 0
+ INT16_TO_FLOAT 1
+ INT16_TO_FLOAT 2
+ INT16_TO_FLOAT 3
+ vmulps ymm1, ymm1, ymm0
+ vsubps ymm4, ymm0, ymm3
+ vmulps ymm1, ymm1, ymm6 ; intra*invq*fps_factor>>8
+ vaddps ymm1, ymm1, ymm2 ; prop + (intra*invq*fps_factor>>8)
+ vrcpps ymm3, ymm0 ; 1 / intra 1st approximation
+ vmulps ymm2, ymm0, ymm3 ; intra * (1/intra 1st approx)
+ vmulps ymm2, ymm2, ymm3 ; intra * (1/intra 1st approx)^2
+ vmulps ymm1, ymm1, ymm4 ; (prop + (intra*invq*fps_factor>>8)) * (intra - inter)
+ vaddps ymm3, ymm3, ymm3 ; 2 * (1/intra 1st approx)
+ vsubps ymm3, ymm3, ymm2 ; 2nd approximation for 1/intra
+ vmulps ymm1, ymm1, ymm3 ; / intra
+ vcvtps2dq ymm1, ymm1
+ vmovdqu [r0+r6*2], ymm1
+ add r6, 16
+ jl .loop
+ vzeroupper
+ RET
void x264_integral_init4v_ssse3( uint16_t *sum8, uint16_t *sum4, int stride );
void x264_mbtree_propagate_cost_sse2( int *dst, uint16_t *propagate_in, uint16_t *intra_costs,
uint16_t *inter_costs, uint16_t *inv_qscales, float *fps_factor, int len );
+void x264_mbtree_propagate_cost_avx( int *dst, uint16_t *propagate_in, uint16_t *intra_costs,
+ uint16_t *inter_costs, uint16_t *inv_qscales, float *fps_factor, int len );
#define MC_CHROMA(cpu)\
void x264_mc_chroma_##cpu( pixel *dstu, pixel *dstv, int i_dst,\
if( !(cpu&X264_CPU_STACK_MOD4) )
pf->mc_chroma = x264_mc_chroma_avx;
#endif // HIGH_BIT_DEPTH
+
+ if( !(cpu&X264_CPU_AVX) )
+ return;
+ pf->mbtree_propagate_cost = x264_mbtree_propagate_cost_avx;
}