RET
%endmacro ; LOAD_DEINTERLEAVE_CHROMA
+%macro LOAD_DEINTERLEAVE_CHROMA_FDEC_AVX512 0
+cglobal load_deinterleave_chroma_fdec, 4,5
+ vbroadcasti32x8 m0, [deinterleave_shuf32a]
+ mov r4d, 0x3333ff00
+ kmovd k1, r4d
+ lea r4, [r2*3]
+ kshiftrd k2, k1, 16
+.loop:
+ vbroadcasti128 ym1, [r1]
+ vbroadcasti32x4 m1 {k1}, [r1+r2]
+ vbroadcasti128 ym2, [r1+r2*2]
+ vbroadcasti32x4 m2 {k1}, [r1+r4]
+ lea r1, [r1+r2*4]
+ pshufb m1, m0
+ pshufb m2, m0
+ vmovdqa32 [r0] {k2}, m1
+ vmovdqa32 [r0+mmsize] {k2}, m2
+ add r0, 2*mmsize
+ sub r3d, 4
+ jg .loop
+ RET
+%endmacro
+
%macro LOAD_DEINTERLEAVE_CHROMA_FENC_AVX2 0
cglobal load_deinterleave_chroma_fenc, 4,5
vbroadcasti128 m0, [deinterleave_shuf]
LOAD_DEINTERLEAVE_CHROMA_FENC_AVX2
PLANE_DEINTERLEAVE_RGB
INIT_ZMM avx512
+LOAD_DEINTERLEAVE_CHROMA_FDEC_AVX512
LOAD_DEINTERLEAVE_CHROMA_FENC_AVX2
%endif
void x264_load_deinterleave_chroma_fdec_avx( uint16_t *dst, uint16_t *src, intptr_t i_src, int height );
#define x264_load_deinterleave_chroma_fdec_avx2 x264_template(load_deinterleave_chroma_fdec_avx2)
void x264_load_deinterleave_chroma_fdec_avx2( uint16_t *dst, uint16_t *src, intptr_t i_src, int height );
+#define x264_load_deinterleave_chroma_fdec_avx512 x264_template(load_deinterleave_chroma_fdec_avx512)
+void x264_load_deinterleave_chroma_fdec_avx512( uint8_t *dst, uint8_t *src, intptr_t i_src, int height );
#define x264_memcpy_aligned_sse x264_template(memcpy_aligned_sse)
void *x264_memcpy_aligned_sse ( void *dst, const void *src, size_t n );
#define x264_memcpy_aligned_avx x264_template(memcpy_aligned_avx)
pf->avg[PIXEL_8x16] = x264_pixel_avg_8x16_avx512;
pf->avg[PIXEL_8x8] = x264_pixel_avg_8x8_avx512;
pf->avg[PIXEL_8x4] = x264_pixel_avg_8x4_avx512;
+ pf->load_deinterleave_chroma_fdec = x264_load_deinterleave_chroma_fdec_avx512;
pf->load_deinterleave_chroma_fenc = x264_load_deinterleave_chroma_fenc_avx512;
}
#endif // HIGH_BIT_DEPTH