Unused frame_sort.
Unused x86_64 dequant_4x4dc_mmx2, predict_8x8_vr_mmx2.
Unused and broken high_depth integral_init*h_sse4, optimize_chroma_*, dequant_flat_*, sub8x8_dct_dc_*, zigzag_sub_*.
Useless high_depth dequant_sse4, dequant_dc_sse4.
return frame;
}
-void x264_frame_sort( x264_frame_t **list, int b_dts )
-{
- int b_ok;
- do {
- b_ok = 1;
- for( int i = 0; list[i+1]; i++ )
- {
- int dtype = list[i]->i_type - list[i+1]->i_type;
- int dtime = list[i]->i_frame - list[i+1]->i_frame;
- int swap = b_dts ? dtype > 0 || ( dtype == 0 && dtime > 0 )
- : dtime > 0;
- if( swap )
- {
- XCHG( x264_frame_t*, list[i], list[i+1] );
- b_ok = 0;
- }
- }
- } while( !b_ok );
-}
-
void x264_weight_scale_plane( x264_t *h, pixel *dst, int i_dst_stride, pixel *src, int i_src_stride,
int i_width, int i_height, x264_weight_t *w )
{
void x264_weight_scale_plane( x264_t *h, pixel *dst, int i_dst_stride, pixel *src, int i_src_stride,
int i_width, int i_height, x264_weight_t *w );
x264_frame_t *x264_frame_pop_unused( x264_t *h, int b_fdec );
-void x264_frame_sort( x264_frame_t **list, int b_dts );
void x264_frame_delete_list( x264_frame_t **list );
int x264_sync_frame_list_init( x264_sync_frame_list_t *slist, int nelem );
void x264_sync_frame_list_push( x264_sync_frame_list_t *slist, x264_frame_t *frame );
x264_frame_t *x264_sync_frame_list_pop( x264_sync_frame_list_t *slist );
-#define x264_frame_sort_dts(list) x264_frame_sort(list, 1)
-#define x264_frame_sort_pts(list) x264_frame_sort(list, 0)
-
#endif
# define LBOT 0
#endif
-void ALWAYS_INLINE x264_macroblock_cache_load( x264_t *h, int mb_x, int mb_y, int b_mbaff )
+static void ALWAYS_INLINE x264_macroblock_cache_load( x264_t *h, int mb_x, int mb_y, int b_mbaff )
{
x264_macroblock_cache_load_neighbours( h, mb_x, mb_y, b_mbaff );
void x264_macroblock_slice_init( x264_t *h );
void x264_macroblock_thread_init( x264_t *h );
-void x264_macroblock_cache_load( x264_t *h, int mb_x, int mb_y, int b_interlaced );
void x264_macroblock_cache_load_progressive( x264_t *h, int mb_x, int mb_y );
void x264_macroblock_cache_load_interlaced( x264_t *h, int mb_x, int mb_y );
void x264_macroblock_deblock_strength( x264_t *h );
psubw mm0, mm1 ; d02-d13 s02-s13 d02+d13 s02+s13
%endmacro
+%ifndef HIGH_BIT_DEPTH
INIT_MMX
cglobal sub8x8_dct_dc_mmx2, 3,3
DCTDC_2ROW_MMX m0, m4, 0
DCT2x2 mm0, mm7
movq [r0], mm0
RET
+%endif ; !HIGH_BIT_DEPTH
;-----------------------------------------------------------------------------
; void zigzag_scan_8x8_frame( int16_t level[64], int16_t dct[8][8] )
RET
%endmacro
+%ifndef HIGH_BIT_DEPTH
INIT_XMM ssse3
ZIGZAG_SUB_4x4 , frame
ZIGZAG_SUB_4x4 ac, frame
ZIGZAG_SUB_4x4 ac, frame
ZIGZAG_SUB_4x4 , field
ZIGZAG_SUB_4x4 ac, field
+%endif ; !HIGH_BIT_DEPTH
;-----------------------------------------------------------------------------
; void zigzag_interleave_8x8_cavlc( int16_t *dst, int16_t *src, uint8_t *nnz )
+%ifndef HIGH_BIT_DEPTH
;-----------------------------------------------------------------------------
; void integral_init4h( uint16_t *sum, uint8_t *pix, int stride )
;-----------------------------------------------------------------------------
INTEGRAL_INIT8H
INIT_XMM avx
INTEGRAL_INIT8H
+%endif ; !HIGH_BIT_DEPTH
%macro INTEGRAL_INIT_8V 0
;-----------------------------------------------------------------------------
void x264_mc_copy_w4_mmx( pixel *, int, pixel *, int, int );
void x264_mc_copy_w8_mmx( pixel *, int, pixel *, int, int );
void x264_mc_copy_w8_sse2( pixel *, int, pixel *, int, int );
-void x264_mc_copy_w8_aligned_sse2( pixel *, int, pixel *, int, int );
void x264_mc_copy_w16_mmx( pixel *, int, pixel *, int, int );
void x264_mc_copy_w16_sse2( pixel *, int, pixel *, int, int );
void x264_mc_copy_w16_aligned_sse2( pixel *, int, pixel *, int, int );
PREDICT_8x8_VR w, dq, 2
INIT_XMM avx
PREDICT_8x8_VR w, dq, 2
-%else
+%elifndef ARCH_X86_64
INIT_MMX mmx2
PREDICT_8x8_VR b, q , 8
%endif
psrld m3, 1
DEQUANT_LOOP DEQUANT32_R, %1*%1/4, %3
+%ifndef HIGH_BIT_DEPTH
%if notcpuflag(avx)
cglobal dequant_%1x%1_flat16, 0,3
movifnidn t2d, r2m
%endif
RET
%endif ; !AVX
+%endif ; !HIGH_BIT_DEPTH
%endmacro ; DEQUANT
%ifdef HIGH_BIT_DEPTH
INIT_XMM sse2
DEQUANT 4, 4, 1
DEQUANT 8, 6, 1
-INIT_XMM sse4
-DEQUANT 4, 4, 1
-DEQUANT 8, 6, 1
%else
%ifndef ARCH_X86_64
INIT_MMX mmx
%ifdef HIGH_BIT_DEPTH
INIT_XMM sse2
DEQUANT_DC d, pmaddwd
-INIT_XMM sse4
-DEQUANT_DC d, pmaddwd
-INIT_XMM avx
-DEQUANT_DC d, pmaddwd
%else
+%ifndef ARCH_X86_64
INIT_MMX mmx2
DEQUANT_DC w, pmullw
+%endif
INIT_XMM sse2
DEQUANT_DC w, pmullw
INIT_XMM avx
REP_RET
%endmacro
+%ifndef HIGH_BIT_DEPTH
INIT_XMM sse2
OPTIMIZE_CHROMA_DC
INIT_XMM ssse3
OPTIMIZE_CHROMA_DC
INIT_XMM avx
OPTIMIZE_CHROMA_DC
+%endif ; !HIGH_BIT_DEPTH
%ifdef HIGH_BIT_DEPTH
;-----------------------------------------------------------------------------