Also make x264_weighted_reference_duplicate() static.
int qpel_idx = ((mvy&3)<<2) + (mvx&3);
intptr_t offset = (mvy>>2)*i_src_stride + (mvx>>2);
uint8_t *src1 = src[x264_hpel_ref0[qpel_idx]] + offset;
- if ( (mvy&3) == 3 ) // explict if() to force conditional add
+ if( (mvy&3) == 3 ) // explict if() to force conditional add
src1 += i_src_stride;
if( qpel_idx & 5 ) /* qpel interpolation needed */
int qpel_idx = ((mvy&3)<<2) + (mvx&3);
intptr_t offset = (mvy>>2)*i_src_stride + (mvx>>2);
uint8_t *src1 = src[x264_hpel_ref0[qpel_idx]] + offset;
- if ( (mvy&3) == 3 ) // explict if() to force conditional add
+ if( (mvy&3) == 3 ) // explict if() to force conditional add
src1 += i_src_stride;
if( qpel_idx & 5 ) /* qpel interpolation needed */
void x264_predict_4x4_init_aarch64( int cpu, x264_predict_t pf[12] )
{
#if !HIGH_BIT_DEPTH
- if (cpu&X264_CPU_ARMV8)
+ if( cpu&X264_CPU_ARMV8 )
{
pf[I_PRED_4x4_H] = x264_predict_4x4_h_aarch64;
pf[I_PRED_4x4_V] = x264_predict_4x4_v_aarch64;
}
- if (cpu&X264_CPU_NEON)
+ if( cpu&X264_CPU_NEON )
{
pf[I_PRED_4x4_DC] = x264_predict_4x4_dc_neon;
pf[I_PRED_4x4_DC_TOP] = x264_predict_4x4_dc_top_neon;
void x264_predict_8x8c_init_aarch64( int cpu, x264_predict_t pf[7] )
{
#if !HIGH_BIT_DEPTH
- if (cpu&X264_CPU_ARMV8) {
+ if( cpu&X264_CPU_ARMV8 )
+ {
pf[I_PRED_CHROMA_V] = x264_predict_8x8c_v_aarch64;
}
- if (!(cpu&X264_CPU_NEON))
+ if( !(cpu&X264_CPU_NEON) )
return;
pf[I_PRED_CHROMA_DC] = x264_predict_8x8c_dc_neon;
void x264_predict_8x16c_init_aarch64( int cpu, x264_predict_t pf[7] )
{
- if (!(cpu&X264_CPU_NEON))
+ if( !(cpu&X264_CPU_NEON) )
return;
#if !HIGH_BIT_DEPTH
void x264_predict_8x8_init_aarch64( int cpu, x264_predict8x8_t pf[12], x264_predict_8x8_filter_t *predict_filter )
{
- if (!(cpu&X264_CPU_NEON))
+ if( !(cpu&X264_CPU_NEON) )
return;
#if !HIGH_BIT_DEPTH
void x264_predict_16x16_init_aarch64( int cpu, x264_predict_t pf[7] )
{
- if (!(cpu&X264_CPU_NEON))
+ if( !(cpu&X264_CPU_NEON) )
return;
#if !HIGH_BIT_DEPTH
int qpel_idx = ((mvy&3)<<2) + (mvx&3);
intptr_t offset = (mvy>>2)*i_src_stride + (mvx>>2);
uint8_t *src1 = src[x264_hpel_ref0[qpel_idx]] + offset;
- if ( (mvy&3) == 3 ) // explict if() to force conditional add
+ if( (mvy&3) == 3 ) // explict if() to force conditional add
src1 += i_src_stride;
if( qpel_idx & 5 ) /* qpel interpolation needed */
int qpel_idx = ((mvy&3)<<2) + (mvx&3);
intptr_t offset = (mvy>>2)*i_src_stride + (mvx>>2);
uint8_t *src1 = src[x264_hpel_ref0[qpel_idx]] + offset;
- if ( (mvy&3) == 3 ) // explict if() to force conditional add
+ if( (mvy&3) == 3 ) // explict if() to force conditional add
src1 += i_src_stride;
if( qpel_idx & 5 ) /* qpel interpolation needed */
void x264_predict_4x4_init_arm( int cpu, x264_predict_t pf[12] )
{
- if (!(cpu&X264_CPU_ARMV6))
+ if( !(cpu&X264_CPU_ARMV6) )
return;
#if !HIGH_BIT_DEPTH
pf[I_PRED_4x4_DC] = x264_predict_4x4_dc_armv6;
pf[I_PRED_4x4_DDR] = x264_predict_4x4_ddr_armv6;
- if (!(cpu&X264_CPU_NEON))
+ if( !(cpu&X264_CPU_NEON) )
return;
pf[I_PRED_4x4_DC_TOP] = x264_predict_4x4_dc_top_neon;
void x264_predict_8x8c_init_arm( int cpu, x264_predict_t pf[7] )
{
- if (!(cpu&X264_CPU_NEON))
+ if( !(cpu&X264_CPU_NEON) )
return;
#if !HIGH_BIT_DEPTH
void x264_predict_8x16c_init_arm( int cpu, x264_predict_t pf[7] )
{
- if (!(cpu&X264_CPU_NEON))
+ if( !(cpu&X264_CPU_NEON) )
return;
#if !HIGH_BIT_DEPTH
void x264_predict_8x8_init_arm( int cpu, x264_predict8x8_t pf[12], x264_predict_8x8_filter_t *predict_filter )
{
- if (!(cpu&X264_CPU_NEON))
+ if( !(cpu&X264_CPU_NEON) )
return;
#if !HIGH_BIT_DEPTH
void x264_predict_16x16_init_arm( int cpu, x264_predict_t pf[7] )
{
- if (!(cpu&X264_CPU_NEON))
+ if( !(cpu&X264_CPU_NEON) )
return;
#if !HIGH_BIT_DEPTH
void x264_cabac_block_residual_8x8_rd_internal_sse2_lzcnt ( dctcoef *l, int b_interlaced, intptr_t ctx_block_cat, x264_cabac_t *cb );
void x264_cabac_block_residual_8x8_rd_internal_ssse3 ( dctcoef *l, int b_interlaced, intptr_t ctx_block_cat, x264_cabac_t *cb );
void x264_cabac_block_residual_8x8_rd_internal_ssse3_lzcnt( dctcoef *l, int b_interlaced, intptr_t ctx_block_cat, x264_cabac_t *cb );
-void x264_cabac_block_residual_internal_sse2 ( dctcoef *l, int b_interlaced, intptr_t ctx_block_cat, x264_cabac_t *cb );
-void x264_cabac_block_residual_internal_sse2_lzcnt ( dctcoef *l, int b_interlaced, intptr_t ctx_block_cat, x264_cabac_t *cb );
+void x264_cabac_block_residual_internal_sse2 ( dctcoef *l, int b_interlaced, intptr_t ctx_block_cat, x264_cabac_t *cb );
+void x264_cabac_block_residual_internal_sse2_lzcnt( dctcoef *l, int b_interlaced, intptr_t ctx_block_cat, x264_cabac_t *cb );
void x264_cabac_block_residual_internal_avx2_bmi2 ( dctcoef *l, int b_interlaced, intptr_t ctx_block_cat, x264_cabac_t *cb );
uint8_t *x264_nal_escape_neon( uint8_t *dst, uint8_t *src, uint8_t *end );
typedef struct
{
- uint8_t *(*nal_escape) ( uint8_t *dst, uint8_t *src, uint8_t *end );
+ uint8_t *(*nal_escape)( uint8_t *dst, uint8_t *src, uint8_t *end );
void (*cabac_block_residual_internal)( dctcoef *l, int b_interlaced,
intptr_t ctx_block_cat, x264_cabac_t *cb );
void (*cabac_block_residual_rd_internal)( dctcoef *l, int b_interlaced,
void x264_cabac_context_init( x264_t *h, x264_cabac_t *cb, int i_slice_type, int i_qp, int i_model );
void x264_cabac_encode_init_core( x264_cabac_t *cb );
-void x264_cabac_encode_init ( x264_cabac_t *cb, uint8_t *p_data, uint8_t *p_end );
+void x264_cabac_encode_init( x264_cabac_t *cb, uint8_t *p_data, uint8_t *p_end );
void x264_cabac_encode_decision_c( x264_cabac_t *cb, int i_ctx, int b );
void x264_cabac_encode_decision_asm( x264_cabac_t *cb, int i_ctx, int b );
void x264_cabac_encode_bypass_c( x264_cabac_t *cb, int b );
#define OPT(STR) else if( !strcmp( name, STR ) )
#define OPT2(STR0, STR1) else if( !strcmp( name, STR0 ) || !strcmp( name, STR1 ) )
- if(0);
+ if( 0 );
OPT("asm")
{
p->cpu = isdigit(value[0]) ? atoi(value) :
#define X264_MAX3(a,b,c) X264_MAX((a),X264_MAX((b),(c)))
#define X264_MIN4(a,b,c,d) X264_MIN((a),X264_MIN3((b),(c),(d)))
#define X264_MAX4(a,b,c,d) X264_MAX((a),X264_MAX3((b),(c),(d)))
-#define XCHG(type,a,b) do{ type t = a; a = b; b = t; } while(0)
+#define XCHG(type,a,b) do { type t = a; a = b; b = t; } while( 0 )
#define IS_DISPOSABLE(type) ( type == X264_TYPE_B )
#define FIX8(f) ((int)(f*(1<<8)+.5))
#define ALIGN(x,a) (((x)+((a)-1))&~((a)-1))
var = (void*)prealloc_size;\
preallocs[prealloc_idx++] = (uint8_t**)&var;\
prealloc_size += ALIGN(size, NATIVE_ALIGN);\
-} while(0)
+} while( 0 )
#define PREALLOC_END( ptr )\
do {\
CHECKED_MALLOC( ptr, prealloc_size );\
while( prealloc_idx-- )\
*preallocs[prealloc_idx] += (intptr_t)ptr;\
-} while(0)
+} while( 0 )
#define ARRAY_SIZE(array) (sizeof(array)/sizeof(array[0]))
// pix1 stride = FENC_STRIDE
// pix2 stride = FDEC_STRIDE
// p_dst stride = FDEC_STRIDE
- void (*sub4x4_dct) ( dctcoef dct[16], pixel *pix1, pixel *pix2 );
- void (*add4x4_idct) ( pixel *p_dst, dctcoef dct[16] );
+ void (*sub4x4_dct) ( dctcoef dct[16], pixel *pix1, pixel *pix2 );
+ void (*add4x4_idct)( pixel *p_dst, dctcoef dct[16] );
- void (*sub8x8_dct) ( dctcoef dct[4][16], pixel *pix1, pixel *pix2 );
- void (*sub8x8_dct_dc)( dctcoef dct[4], pixel *pix1, pixel *pix2 );
- void (*add8x8_idct) ( pixel *p_dst, dctcoef dct[4][16] );
- void (*add8x8_idct_dc) ( pixel *p_dst, dctcoef dct[4] );
+ void (*sub8x8_dct) ( dctcoef dct[4][16], pixel *pix1, pixel *pix2 );
+ void (*sub8x8_dct_dc) ( dctcoef dct[4], pixel *pix1, pixel *pix2 );
+ void (*add8x8_idct) ( pixel *p_dst, dctcoef dct[4][16] );
+ void (*add8x8_idct_dc)( pixel *p_dst, dctcoef dct[4] );
void (*sub8x16_dct_dc)( dctcoef dct[8], pixel *pix1, pixel *pix2 );
- void (*sub16x16_dct) ( dctcoef dct[16][16], pixel *pix1, pixel *pix2 );
- void (*add16x16_idct)( pixel *p_dst, dctcoef dct[16][16] );
- void (*add16x16_idct_dc) ( pixel *p_dst, dctcoef dct[16] );
+ void (*sub16x16_dct) ( dctcoef dct[16][16], pixel *pix1, pixel *pix2 );
+ void (*add16x16_idct) ( pixel *p_dst, dctcoef dct[16][16] );
+ void (*add16x16_idct_dc)( pixel *p_dst, dctcoef dct[16] );
- void (*sub8x8_dct8) ( dctcoef dct[64], pixel *pix1, pixel *pix2 );
- void (*add8x8_idct8) ( pixel *p_dst, dctcoef dct[64] );
+ void (*sub8x8_dct8) ( dctcoef dct[64], pixel *pix1, pixel *pix2 );
+ void (*add8x8_idct8)( pixel *p_dst, dctcoef dct[64] );
void (*sub16x16_dct8) ( dctcoef dct[4][64], pixel *pix1, pixel *pix2 );
void (*add16x16_idct8)( pixel *p_dst, dctcoef dct[4][64] );
stride2uv, bs[dir][edge], chroma_qp, a, b, 1,\
h->loopf.deblock_chroma##intra[dir] );\
}\
- } while(0)
+ } while( 0 )
if( h->mb.i_neighbour & MB_LEFT )
{
FDEC_STRIDE, bs[dir][edge], qpc, a, b, 0,\
h->loopf.deblock_luma[dir] );\
}\
- } while(0)
+ } while( 0 )
if( !transform_8x8 ) FILTER( 0, 1 );
FILTER( 0, 2 );
return 0;
}
-#define get_plane_ptr(...) do{ if( get_plane_ptr(__VA_ARGS__) < 0 ) return -1; }while(0)
+#define get_plane_ptr(...) do { if( get_plane_ptr(__VA_ARGS__) < 0 ) return -1; } while( 0 )
int x264_frame_copy_picture( x264_t *h, x264_frame_t *dst, x264_picture_t *src )
{
x264_deblock_intra_t deblock_chroma_intra_mbaff;
x264_deblock_intra_t deblock_chroma_420_intra_mbaff;
x264_deblock_intra_t deblock_chroma_422_intra_mbaff;
- void (*deblock_strength) ( uint8_t nnz[X264_SCAN8_SIZE], int8_t ref[2][X264_SCAN8_LUMA_SIZE],
- int16_t mv[2][X264_SCAN8_LUMA_SIZE][2], uint8_t bs[2][8][4], int mvy_limit,
- int bframe );
+ void (*deblock_strength)( uint8_t nnz[X264_SCAN8_SIZE], int8_t ref[2][X264_SCAN8_LUMA_SIZE],
+ int16_t mv[2][X264_SCAN8_LUMA_SIZE][2], uint8_t bs[2][8][4], int mvy_limit,
+ int bframe );
} x264_deblock_function_t;
void x264_frame_delete( x264_frame_t *frame );
M16( h->mb.cache.mvd[l][x264_scan8[0]-1+0*8] ) = 0;
M16( h->mb.cache.mvd[l][x264_scan8[0]-1+1*8] ) = 0;
}
- if( h->mb.i_neighbour & MB_LEFT && (!b_mbaff || h->mb.cache.ref[l][x264_scan8[0]-1+2*8] >=0) )
+ if( h->mb.i_neighbour & MB_LEFT && (!b_mbaff || h->mb.cache.ref[l][x264_scan8[0]-1+2*8] >= 0) )
{
CP16( h->mb.cache.mvd[l][x264_scan8[8 ] - 1], mvd[left[LBOT]][left_index_table->intra[2]] );
CP16( h->mb.cache.mvd[l][x264_scan8[10] - 1], mvd[left[LBOT]][left_index_table->intra[3]] );
{{0,0},{0,0}} /* B_SKIP */
};
-#define IS_SUB4x4(type) ( (type ==D_L0_4x4)||(type ==D_L1_4x4)||(type ==D_BI_4x4))
-#define IS_SUB4x8(type) ( (type ==D_L0_4x8)||(type ==D_L1_4x8)||(type ==D_BI_4x8))
-#define IS_SUB8x4(type) ( (type ==D_L0_8x4)||(type ==D_L1_8x4)||(type ==D_BI_8x4))
-#define IS_SUB8x8(type) ( (type ==D_L0_8x8)||(type ==D_L1_8x8)||(type ==D_BI_8x8)||(type ==D_DIRECT_8x8))
+#define IS_SUB4x4(type) ( (type == D_L0_4x4)||(type == D_L1_4x4)||(type == D_BI_4x4) )
+#define IS_SUB4x8(type) ( (type == D_L0_4x8)||(type == D_L1_4x8)||(type == D_BI_4x8) )
+#define IS_SUB8x4(type) ( (type == D_L0_8x4)||(type == D_L1_8x4)||(type == D_BI_8x4) )
+#define IS_SUB8x8(type) ( (type == D_L0_8x8)||(type == D_L1_8x8)||(type == D_BI_8x8)||(type == D_DIRECT_8x8) )
enum mb_partition_e
{
/* sub partition type for P_8x8 and B_8x8 */
{\
MC_CLIP_ADD((s)[0], (x)[0]);\
MC_CLIP_ADD((s)[1], (x)[1]);\
-} while(0)
+} while( 0 )
#define PROPAGATE_LIST(cpu)\
void x264_mbtree_propagate_list_internal_##cpu( int16_t (*mvs)[2], int16_t *propagate_amount,\
v16u8 diff;
v8u16 sad = { 0 };
- for ( i_ht_cnt = ( i_height >> 2 ); i_ht_cnt--; )
+ for( i_ht_cnt = ( i_height >> 2 ); i_ht_cnt--; )
{
LW4( p_src, i_src_stride, u_src0, u_src1, u_src2, u_src3 );
p_src += ( 4 * i_src_stride );
v16u8 src0, src1, src2, src3, ref0, ref1, ref2, ref3;
v8u16 sad = { 0 };
- for ( i_ht_cnt = ( i_height >> 2 ); i_ht_cnt--; )
+ for( i_ht_cnt = ( i_height >> 2 ); i_ht_cnt--; )
{
LD_UB4( p_src, i_src_stride, src0, src1, src2, src3 );
p_src += ( 4 * i_src_stride );
v16u8 src0, src1, ref0, ref1;
v8u16 sad = { 0 };
- for ( i_ht_cnt = ( i_height >> 2 ); i_ht_cnt--; )
+ for( i_ht_cnt = ( i_height >> 2 ); i_ht_cnt--; )
{
LD_UB2( p_src, i_src_stride, src0, src1 );
p_src += ( 2 * i_src_stride );
v8u16 sad1 = { 0 };
v8u16 sad2 = { 0 };
- for ( i_ht_cnt = ( i_height >> 2 ); i_ht_cnt--; )
+ for( i_ht_cnt = ( i_height >> 2 ); i_ht_cnt--; )
{
LW4( p_src, i_src_stride, src0, src1, src2, src3 );
INSERT_W4_UB( src0, src1, src2, src3, src );
v8u16 sad1 = { 0 };
v8u16 sad2 = { 0 };
- for ( i_ht_cnt = ( i_height >> 2 ); i_ht_cnt--; )
+ for( i_ht_cnt = ( i_height >> 2 ); i_ht_cnt--; )
{
LD_UB4( p_src, i_src_stride, src0, src1, src2, src3 );
p_src += ( 4 * i_src_stride );
v8u16 sad1 = { 0 };
v8u16 sad2 = { 0 };
- for ( i_ht_cnt = ( i_height >> 1 ); i_ht_cnt--; )
+ for( i_ht_cnt = ( i_height >> 1 ); i_ht_cnt--; )
{
src = LD_UB( p_src );
p_src += i_src_stride;
p_ref2 = p_aref[2];
p_ref3 = p_aref[3];
- for ( i_ht_cnt = ( i_height >> 2 ); i_ht_cnt--; )
+ for( i_ht_cnt = ( i_height >> 2 ); i_ht_cnt--; )
{
LW4( p_src, i_src_stride, src0, src1, src2, src3 );
INSERT_W4_UB( src0, src1, src2, src3, src );
p_ref2 = p_aref[2];
p_ref3 = p_aref[3];
- for ( i_ht_cnt = ( i_height >> 2 ); i_ht_cnt--; )
+ for( i_ht_cnt = ( i_height >> 2 ); i_ht_cnt--; )
{
LD_UB4( p_src, i_src_stride, src0, src1, src2, src3 );
p_src += ( 4 * i_src_stride );
p_ref2 = p_aref[2];
p_ref3 = p_aref[3];
- for ( i_ht_cnt = ( i_height >> 1 ); i_ht_cnt--; )
+ for( i_ht_cnt = ( i_height >> 1 ); i_ht_cnt--; )
{
src = LD_UB( p_src );
p_src += i_src_stride;
v8u16 add, pix_r, pix_l;
v4u32 sqr = { 0 };
- for ( u_cnt = i_height; u_cnt--; )
+ for( u_cnt = i_height; u_cnt--; )
{
pix = LD_SB( p_pix );
p_pix += i_stride;
v8u16 add, pix_r;
v4u32 sqr = { 0 };
- for ( u_cnt = i_height; u_cnt--; )
+ for( u_cnt = i_height; u_cnt--; )
{
pix = LD_SB( p_pix );
p_pix += i_stride;
v8i16 avg = { 0 };
v4i32 vec, var = { 0 };
- for ( i_ht_cnt = ( i_height >> 2 ); i_ht_cnt--; )
+ for( i_ht_cnt = ( i_height >> 2 ); i_ht_cnt--; )
{
LD_UB4( p_src, i_src_stride, src0, src1, src2, src3 );
p_src += ( 4 * i_src_stride );
v16u8 ref = { 0 };
v4i32 var = { 0 };
- for ( i_ht_cnt = ( i_height >> 2 ); i_ht_cnt--; )
+ for( i_ht_cnt = ( i_height >> 2 ); i_ht_cnt--; )
{
LW4( p_src, i_src_stride, u_src0, u_src1, u_src2, u_src3 );
p_src += ( 4 * i_src_stride );
v16u8 ref0, ref1, ref2, ref3;
v4i32 var = { 0 };
- for ( i_ht_cnt = ( i_height >> 2 ); i_ht_cnt--; )
+ for( i_ht_cnt = ( i_height >> 2 ); i_ht_cnt--; )
{
LD_UB4( p_src, i_src_stride, src0, src1, src2, src3 );
p_src += ( 4 * i_src_stride );
v16u8 src, ref;
v4i32 var = { 0 };
- for ( i_ht_cnt = ( i_height >> 2 ); i_ht_cnt--; )
+ for( i_ht_cnt = ( i_height >> 2 ); i_ht_cnt--; )
{
src = LD_UB( p_src );
p_src += i_src_stride;
v8i16 diff0, diff1, diff2, diff3;
v8i16 temp0, temp1, temp2, temp3;
- for ( cnt = i_height >> 2; cnt--; )
+ for( cnt = i_height >> 2; cnt--; )
{
LD_SB4( p_src, i_src_stride, src0, src1, src2, src3 );
p_src += 4 * i_src_stride;
v8i16 diff0, diff1, diff2, diff3, diff4, diff5, diff6, diff7;
v8i16 temp0, temp1, temp2, temp3;
- for ( cnt = i_height >> 2; cnt--; )
+ for( cnt = i_height >> 2; cnt--; )
{
LD_SB4( p_src, i_src_stride, src0, src1, src2, src3 );
p_src += 4 * i_src_stride;
uint8_t u_inp0, u_inp1, u_inp2, u_inp3;
v16u8 src0, src1, src2, src3;
- for ( u_row = 4; u_row--; )
+ for( u_row = 4; u_row--; )
{
u_inp0 = p_src[0];
p_src += i_src_stride;
v8u16 sum_above;
v4u32 sum;
- if ( is_left && is_above )
+ if( is_left && is_above )
{
src_above = LD_UB( p_src_top );
sum = __msa_hadd_u_w( sum_above, sum_above );
u_addition = __msa_copy_u_w( ( v4i32 ) sum, 0 );
- for ( u_row = 0; u_row < 4; u_row++ )
+ for( u_row = 0; u_row < 4; u_row++ )
{
u_addition += p_src_left[u_row * i_src_stride_left];
}
u_addition = ( u_addition + 4 ) >> 3;
store = ( v16u8 ) __msa_fill_b( u_addition );
}
- else if ( is_left )
+ else if( is_left )
{
- for ( u_row = 0; u_row < 4; u_row++ )
+ for( u_row = 0; u_row < 4; u_row++ )
{
u_addition += p_src_left[u_row * i_src_stride_left];
}
u_addition = ( u_addition + 2 ) >> 2;
store = ( v16u8 ) __msa_fill_b( u_addition );
}
- else if ( is_above )
+ else if( is_above )
{
src_above = LD_UB( p_src_top );
v4u32 sum_top;
v2u64 sum;
- if ( is_left && is_above )
+ if( is_left && is_above )
{
src_above = LD_UB( p_src_top );
sum = __msa_hadd_u_d( sum_top, sum_top );
u_addition = __msa_copy_u_w( ( v4i32 ) sum, 0 );
- for ( u_row = 0; u_row < 16; u_row++ )
+ for( u_row = 0; u_row < 16; u_row++ )
{
u_addition += p_src_left[u_row * i_src_stride_left];
}
u_addition = ( u_addition + 16 ) >> 5;
store = ( v16u8 ) __msa_fill_b( u_addition );
}
- else if ( is_left )
+ else if( is_left )
{
- for ( u_row = 0; u_row < 16; u_row++ )
+ for( u_row = 0; u_row < 16; u_row++ )
{
u_addition += p_src_left[u_row * i_src_stride_left];
}
u_addition = ( u_addition + 8 ) >> 4;
store = ( v16u8 ) __msa_fill_b( u_addition );
}
- else if ( is_above )
+ else if( is_above )
{
src_above = LD_UB( p_src_top );
vec5 = vec8 * int_multiplier;
vec3 = vec8 * 4;
- for ( u_lpcnt = 4; u_lpcnt--; )
+ for( u_lpcnt = 4; u_lpcnt--; )
{
vec0 = vec5;
vec0 += vec4;
vec6 = vec8 * 4;
vec7 = vec8 * int_multiplier;
- for ( u_lpcnt = 16; u_lpcnt--; )
+ for( u_lpcnt = 16; u_lpcnt--; )
{
vec0 = vec7;
vec0 += vec4;
u_src0 = __msa_copy_u_w( ( v4i32 ) sum, 0 );
u_src1 = __msa_copy_u_w( ( v4i32 ) sum, 1 );
- for ( u_lp_cnt = 0; u_lp_cnt < 4; u_lp_cnt++ )
+ for( u_lp_cnt = 0; u_lp_cnt < 4; u_lp_cnt++ )
{
u_src0 += p_src[u_lp_cnt * i_stride - 1];
u_src2 += p_src[( 4 + u_lp_cnt ) * i_stride - 1];
u_out2 = u_src2 * 0x01010101;
u_out3 = u_src3 * 0x01010101;
- for ( u_lp_cnt = 4; u_lp_cnt--; )
+ for( u_lp_cnt = 4; u_lp_cnt--; )
{
SW( u_out0, p_src );
SW( u_out1, ( p_src + 4 ) );
LD_SW2( pi_dequant_mf[i_mf], 4, dequant_m_f0, dequant_m_f1 );
LD_SW2( pi_dequant_mf[i_mf] + 8, 4, dequant_m_f2, dequant_m_f3 );
- if ( q_bits >= 0 )
+ if( q_bits >= 0 )
{
v8i16 dequant_mf_h0, dequant_mf_h1, q_bits_vec;
LD_SW2( pi_dequant_mf[i_mf] + 48, 4, dequant_m_f12, dequant_m_f13 );
LD_SW2( pi_dequant_mf[i_mf] + 56, 4, dequant_m_f14, dequant_m_f15 );
- if ( q_bits >= 0 )
+ if( q_bits >= 0 )
{
v8i16 q_bits_vec;
v8i16 dequant_mf_h0, dequant_mf_h1, dequant_mf_h2, dequant_mf_h3;
LD_SH2( p_dct, 8, dct0, dct1 );
- if ( q_bits >= 0 )
+ if( q_bits >= 0 )
{
i_dmf <<= q_bits;
rewind( fp );
CHECKED_MALLOC( binary, size );
- if ( fread( binary, 1, size, fp ) != size )
+ if( fread( binary, 1, size, fp ) != size )
goto fail;
const uint8_t *ptr = (const uint8_t*)binary;
#define INTRA_MBS 2
#define COPY2_IF_LT( x, y, a, b )\
- if((y)<(x))\
+ if( (y) < (x) )\
{\
(x) = (y);\
(a) = (b);\
// SSD assumes all args aligned
// other cmp functions assume first arg aligned
-typedef int (*x264_pixel_cmp_t) ( pixel *, intptr_t, pixel *, intptr_t );
-typedef void (*x264_pixel_cmp_x3_t) ( pixel *, pixel *, pixel *, pixel *, intptr_t, int[3] );
-typedef void (*x264_pixel_cmp_x4_t) ( pixel *, pixel *, pixel *, pixel *, pixel *, intptr_t, int[4] );
+typedef int (*x264_pixel_cmp_t)( pixel *, intptr_t, pixel *, intptr_t );
+typedef void (*x264_pixel_cmp_x3_t)( pixel *, pixel *, pixel *, pixel *, intptr_t, int[3] );
+typedef void (*x264_pixel_cmp_x4_t)( pixel *, pixel *, pixel *, pixel *, pixel *, intptr_t, int[4] );
enum
{
finaltc0 = vec_and((vec_u8_t)tc0vec, mask); /* tc = tc0 */ \
\
p1mask = diff_lt_altivec(p2, p0, betavec); \
- p1mask = vec_and(p1mask, mask); /* if( |p2 - p0| < beta) */ \
+ p1mask = vec_and(p1mask, mask); /* if( |p2 - p0| < beta ) */ \
tc0masked = vec_and(p1mask, (vec_u8_t)tc0vec); \
finaltc0 = vec_sub(finaltc0, p1mask); /* tc++ */ \
newp1 = h264_deblock_q1(p0, p1, p2, q0, tc0masked); \
/*end if*/ \
\
q1mask = diff_lt_altivec(q2, q0, betavec); \
- q1mask = vec_and(q1mask, mask); /* if ( |q2 - q0| < beta ) */\
+ q1mask = vec_and(q1mask, mask); /* if( |q2 - q0| < beta ) */ \
tc0masked = vec_and(q1mask, (vec_u8_t)tc0vec); \
finaltc0 = vec_sub(finaltc0, q1mask); /* tc++ */ \
newq1 = h264_deblock_q1(p0, q1, q2, q0, tc0masked); \
* SSD routines
**********************************************************************/
-static int pixel_ssd_16x16_altivec ( uint8_t *pix1, intptr_t i_stride_pix1,
- uint8_t *pix2, intptr_t i_stride_pix2 )
+static int pixel_ssd_16x16_altivec( uint8_t *pix1, intptr_t i_stride_pix1,
+ uint8_t *pix2, intptr_t i_stride_pix2 )
{
ALIGNED_16( int sum );
return sum;
}
-static int pixel_ssd_8x8_altivec ( uint8_t *pix1, intptr_t i_stride_pix1,
- uint8_t *pix2, intptr_t i_stride_pix2 )
+static int pixel_ssd_8x8_altivec( uint8_t *pix1, intptr_t i_stride_pix1,
+ uint8_t *pix2, intptr_t i_stride_pix2 )
{
ALIGNED_16( int sum );
#ifndef X264_PPC_PREDICT_H
#define X264_PPC_PREDICT_H
-void x264_predict_16x16_init_altivec ( x264_predict_t pf[7] );
+void x264_predict_16x16_init_altivec( x264_predict_t pf[7] );
void x264_predict_8x8c_init_altivec( x264_predict_t pf[7] );
#endif /* X264_PPC_PREDICT_H */
{
int dc0 = 0, dc1 = 0;
- for(int x = 0; x < 4; x++ )
+ for( int x = 0; x < 4; x++ )
{
dc0 += src[x - FDEC_STRIDE];
dc1 += src[x + 4 - FDEC_STRIDE];
typedef void (*x264_predict_t)( pixel *src );
typedef void (*x264_predict8x8_t)( pixel *src, pixel edge[36] );
-typedef void (*x264_predict_8x8_filter_t) ( pixel *src, pixel edge[36], int i_neighbor, int i_filters );
+typedef void (*x264_predict_8x8_filter_t)( pixel *src, pixel edge[36], int i_neighbor, int i_filters );
enum intra_chroma_pred_e
{
:"m"(x)\
);\
s = temp;\
-} while(0)
+} while( 0 )
#undef MC_CLIP_ADD2
#define MC_CLIP_ADD2(s,x)\
:"+m"(M32(s))\
:"m"(M32(x))\
);\
-} while(0)
+} while( 0 )
#endif
PROPAGATE_LIST(ssse3)
#ifndef X264_I386_PREDICT_H
#define X264_I386_PREDICT_H
-void x264_predict_16x16_init_mmx ( int cpu, x264_predict_t pf[7] );
-void x264_predict_8x16c_init_mmx ( int cpu, x264_predict_t pf[7] );
-void x264_predict_8x8c_init_mmx ( int cpu, x264_predict_t pf[7] );
-void x264_predict_4x4_init_mmx ( int cpu, x264_predict_t pf[12] );
-void x264_predict_8x8_init_mmx ( int cpu, x264_predict8x8_t pf[12], x264_predict_8x8_filter_t *predict_8x8_filter );
+void x264_predict_16x16_init_mmx( int cpu, x264_predict_t pf[7] );
+void x264_predict_8x16c_init_mmx( int cpu, x264_predict_t pf[7] );
+void x264_predict_8x8c_init_mmx ( int cpu, x264_predict_t pf[7] );
+void x264_predict_4x4_init_mmx ( int cpu, x264_predict_t pf[12] );
+void x264_predict_8x8_init_mmx ( int cpu, x264_predict8x8_t pf[12], x264_predict_8x8_filter_t *predict_8x8_filter );
void x264_predict_16x16_v_mmx2( pixel *src );
void x264_predict_16x16_v_sse ( pixel *src );
void x264_slicetype_analyse( x264_t *h, int intra_minigop );
-int x264_weighted_reference_duplicate( x264_t *h, int i_ref, const x264_weight_t *w );
-
int x264_lookahead_init( x264_t *h, int i_slicetype_length );
int x264_lookahead_is_empty( x264_t *h );
void x264_lookahead_put_frame( x264_t *h, x264_frame_t *frame );
{\
uint16_t mvd = x264_cabac_mvd(h,cb,i_list,idx,width);\
x264_macroblock_cache_mvd( h, block_idx_x[idx], block_idx_y[idx], width, height, i_list, mvd );\
-} while(0)
+} while( 0 )
static inline void x264_cabac_8x8_mvd( x264_t *h, x264_cabac_t *cb, int i )
{
}\
else\
x264_cabac_encode_decision( cb, ctxidxinc, 0 );\
-} while(0)
+} while( 0 )
#define x264_cabac_block_residual_dc_cbf( h, cb, ctx_block_cat, i_idx, l, b_intra )\
x264_cabac_block_residual_cbf_internal( h, cb, ctx_block_cat, i_idx, l, b_intra, 1, )
}
/* return -1 on failure, else return the index of the new reference frame */
-int x264_weighted_reference_duplicate( x264_t *h, int i_ref, const x264_weight_t *w )
+static int x264_weighted_reference_duplicate( x264_t *h, int i_ref, const x264_weight_t *w )
{
int i = h->i_ref[0];
int j = 1;
{\
M16( &h->mb.cache.non_zero_count[x264_scan8[p*16+idx*4]+0] ) = (nz) * 0x0101;\
M16( &h->mb.cache.non_zero_count[x264_scan8[p*16+idx*4]+8] ) = (nz) * 0x0101;\
-} while(0)
+} while( 0 )
#define CLEAR_16x16_NNZ( p ) \
do\
M32( &h->mb.cache.non_zero_count[x264_scan8[16*p] + 1*8] ) = 0;\
M32( &h->mb.cache.non_zero_count[x264_scan8[16*p] + 2*8] ) = 0;\
M32( &h->mb.cache.non_zero_count[x264_scan8[16*p] + 3*8] ) = 0;\
-} while(0)
+} while( 0 )
/* A special for loop that iterates branchlessly over each set
* bit in a 4-bit input. */
&p_fref_w[(my)*stride+(mx)], stride )\
+ BITS_MVD(mx,my);\
COPY3_IF_LT( bcost, cost, bmx, mx, bmy, my );\
-} while(0)
+} while( 0 )
#define COST_MV_HPEL( mx, my, cost )\
do\
pixel *src = h->mc.get_ref( pix, &stride2, m->p_fref, stride, mx, my, bw, bh, &m->weight[0] );\
cost = h->pixf.fpelcmp[i_pixel]( p_fenc, FENC_STRIDE, src, stride2 )\
+ p_cost_mvx[ mx ] + p_cost_mvy[ my ];\
-} while(0)
+} while( 0 )
#define COST_MV_X3_DIR( m0x, m0y, m1x, m1y, m2x, m2y, costs )\
{\
#undef SADS
#undef ADD_MVCOST
#undef MIN_MV
- if(dir)
+ if( dir )
{
bmx = omx + i*(dir>>4);
bmy = omy + i*((dir<<28)>>28);
extern uint16_t *x264_cost_mv_fpel[QP_MAX+1][4];
#define COPY1_IF_LT(x,y)\
-if((y)<(x))\
- (x)=(y);
+if( (y) < (x) )\
+ (x) = (y);
#define COPY2_IF_LT(x,y,a,b)\
-if((y)<(x))\
+if( (y) < (x) )\
{\
- (x)=(y);\
- (a)=(b);\
+ (x) = (y);\
+ (a) = (b);\
}
#define COPY3_IF_LT(x,y,a,b,c,d)\
-if((y)<(x))\
+if( (y) < (x) )\
{\
- (x)=(y);\
- (a)=(b);\
- (c)=(d);\
+ (x) = (y);\
+ (a) = (b);\
+ (c) = (d);\
}
#define COPY4_IF_LT(x,y,a,b,c,d,e,f)\
-if((y)<(x))\
+if( (y) < (x) )\
{\
- (x)=(y);\
- (a)=(b);\
- (c)=(d);\
- (e)=(f);\
+ (x) = (y);\
+ (a) = (b);\
+ (c) = (d);\
+ (e) = (f);\
}
#define COPY2_IF_GT(x,y,a,b)\
-if((y)>(x))\
+if( (y) > (x) )\
{\
- (x)=(y);\
- (a)=(b);\
+ (x) = (y);\
+ (a) = (b);\
}
#endif
&rce->weight[2][0], &rce->weight[2][1] );
if( count == 3 )
rce->i_weight_denom[1] = -1;
- else if ( count != 8 )
+ else if( count != 8 )
rce->i_weight_denom[0] = rce->i_weight_denom[1] = -1;
}
t0 = 0;
/* fix overflows */
adj_min = 1;
- while(adj_min && find_underflow( h, fills, &t0, &t1, 1 ))
+ while( adj_min && find_underflow( h, fills, &t0, &t1, 1 ) )
{
adj_min = fix_underflow( h, t0, t1, adjustment, qscale_min, qscale_max );
t0 = t1;
void x264_ratecontrol_set_estimated_size( x264_t *, int bits );
int x264_ratecontrol_get_estimated_size( x264_t const *);
int x264_rc_analyse_slice( x264_t *h );
-int x264_weighted_reference_duplicate( x264_t *h, int i_ref, const x264_weight_t *w );
void x264_threads_distribute_ratecontrol( x264_t *h );
void x264_threads_merge_ratecontrol( x264_t *h );
void x264_hrd_fullness( x264_t *h );
return 0;
}
- if(0) // accessible only by goto, not fallthrough
+ if( 0 ) // accessible only by goto, not fallthrough
{
// node_ctx 1..7 (ctx0 ruled out because we never try both level0 and level2+ on the same coef)
TRELLIS_LOOP(1);
bs_write1( &q, h->param.i_frame_packing == 5 && !(h->fenc->i_frame&1) ); // current_frame_is_frame0_flag
bs_write1( &q, 0 ); // frame0_self_contained_flag
bs_write1( &q, 0 ); // frame1_self_contained_flag
- if ( quincunx_sampling_flag == 0 && h->param.i_frame_packing != 5 )
+ if( quincunx_sampling_flag == 0 && h->param.i_frame_packing != 5 )
{
bs_write( &q, 4, 0 ); // frame0_grid_position_x
bs_write( &q, 4, 0 ); // frame0_grid_position_y
if( p_param->vui.i_sar_width && p_param->vui.i_sar_height
&& p_param->vui.i_sar_width != p_param->vui.i_sar_height )
{
- if ( p_param->vui.i_sar_width > p_param->vui.i_sar_height ) {
+ if( p_param->vui.i_sar_width > p_param->vui.i_sar_height )
+ {
dw = dw * p_param->vui.i_sar_width / p_param->vui.i_sar_height;
- } else {
+ }
+ else
+ {
dh = dh * p_param->vui.i_sar_height / p_param->vui.i_sar_width;
}
}
{
char buf[50];
#define H0 printf
-#define H1 if(longhelp>=1) printf
-#define H2 if(longhelp==2) printf
+#define H1 if( longhelp >= 1 ) printf
+#define H2 if( longhelp == 2 ) printf
H0( "x264 core:%d%s\n"
"Syntax: x264 [options] -o outfile infile\n"
"\n"
* NAL unit. This helps distinguish between nalu_process calls from different sources,
* e.g. if doing multiple encodes in one process.
*/
- void (*nalu_process) ( x264_t *h, x264_nal_t *nal, void *opaque );
+ void (*nalu_process)( x264_t *h, x264_nal_t *nal, void *opaque );
} x264_param_t;
void x264_nal_encode( x264_t *h, uint8_t *dst, x264_nal_t *nal );
int x264_encoder_encode( x264_t *, x264_nal_t **pp_nal, int *pi_nal, x264_picture_t *pic_in, x264_picture_t *pic_out );
/* x264_encoder_close:
* close an encoder handler */
-void x264_encoder_close ( x264_t * );
+void x264_encoder_close( x264_t * );
/* x264_encoder_delayed_frames:
* return the number of currently delayed (buffered) frames
* this should be used at the end of the stream, to know when you have all the encoded frames. */