x264_macroblock_cache_ref( h, 0, 0, 4, 2, 0, analysis.l0.me16x8[0].i_ref );
x264_macroblock_cache_ref( h, 0, 2, 4, 2, 0, analysis.l0.me16x8[1].i_ref );
x264_me_refine_qpel_rd( h, &analysis.l0.me16x8[0], analysis.i_lambda2, 0, 0 );
- x264_me_refine_qpel_rd( h, &analysis.l0.me16x8[1], analysis.i_lambda2, 2, 0 );
+ x264_me_refine_qpel_rd( h, &analysis.l0.me16x8[1], analysis.i_lambda2, 8, 0 );
}
else if( i_partition == D_8x16 )
{
x264_macroblock_cache_ref( h, 0, 0, 2, 4, 0, analysis.l0.me8x16[0].i_ref );
x264_macroblock_cache_ref( h, 2, 0, 2, 4, 0, analysis.l0.me8x16[1].i_ref );
x264_me_refine_qpel_rd( h, &analysis.l0.me8x16[0], analysis.i_lambda2, 0, 0 );
- x264_me_refine_qpel_rd( h, &analysis.l0.me8x16[1], analysis.i_lambda2, 1, 0 );
+ x264_me_refine_qpel_rd( h, &analysis.l0.me8x16[1], analysis.i_lambda2, 4, 0 );
}
else if( i_partition == D_8x8 )
{
int i8x8;
x264_analyse_update_cache( h, &analysis );
for( i8x8 = 0; i8x8 < 4; i8x8++ )
- if( h->mb.i_sub_partition[i8x8] == D_L0_8x8 )
- x264_me_refine_qpel_rd( h, &analysis.l0.me8x8[i8x8], analysis.i_lambda2, i8x8, 0 );
+ {
+ if( h->mb.i_sub_partition[i8x8] == D_L0_8x8 )
+ {
+ x264_me_refine_qpel_rd( h, &analysis.l0.me8x8[i8x8], analysis.i_lambda2, i8x8*4, 0 );
+ }
+ else if( h->mb.i_sub_partition[i8x8] == D_L0_8x4 )
+ {
+ x264_me_refine_qpel_rd( h, &analysis.l0.me8x4[i8x8][0], analysis.i_lambda2, i8x8*4+0, 0 );
+ x264_me_refine_qpel_rd( h, &analysis.l0.me8x4[i8x8][1], analysis.i_lambda2, i8x8*4+2, 0 );
+ }
+ else if( h->mb.i_sub_partition[i8x8] == D_L0_4x8 )
+ {
+ x264_me_refine_qpel_rd( h, &analysis.l0.me4x8[i8x8][0], analysis.i_lambda2, i8x8*4+0, 0 );
+ x264_me_refine_qpel_rd( h, &analysis.l0.me4x8[i8x8][1], analysis.i_lambda2, i8x8*4+1, 0 );
+ }
+ else if( h->mb.i_sub_partition[i8x8] == D_L0_4x4 )
+ {
+ x264_me_refine_qpel_rd( h, &analysis.l0.me4x4[i8x8][0], analysis.i_lambda2, i8x8*4+0, 0 );
+ x264_me_refine_qpel_rd( h, &analysis.l0.me4x4[i8x8][1], analysis.i_lambda2, i8x8*4+1, 0 );
+ x264_me_refine_qpel_rd( h, &analysis.l0.me4x4[i8x8][2], analysis.i_lambda2, i8x8*4+2, 0 );
+ x264_me_refine_qpel_rd( h, &analysis.l0.me4x4[i8x8][3], analysis.i_lambda2, i8x8*4+3, 0 );
+ }
+ }
}
}
}
{
h->mb.i_sub_partition[i*2] = h->mb.i_sub_partition[i*2+1] = analysis.i_mb_partition16x8[i];
if( analysis.i_mb_partition16x8[i] == D_L0_8x8 )
- x264_me_refine_qpel_rd( h, &analysis.l0.me16x8[i], analysis.i_lambda2, i*2, 0 );
+ x264_me_refine_qpel_rd( h, &analysis.l0.me16x8[i], analysis.i_lambda2, i*8, 0 );
else if( analysis.i_mb_partition16x8[i] == D_L1_8x8 )
- x264_me_refine_qpel_rd( h, &analysis.l1.me16x8[i], analysis.i_lambda2, i*2, 1 );
+ x264_me_refine_qpel_rd( h, &analysis.l1.me16x8[i], analysis.i_lambda2, i*8, 1 );
else if( analysis.i_mb_partition16x8[i] == D_BI_8x8 )
x264_me_refine_bidir_rd( h, &analysis.l0.me16x8[i], &analysis.l1.me16x8[i], i_biweight, i*2, analysis.i_lambda2 );
}
{
h->mb.i_sub_partition[i] = h->mb.i_sub_partition[i+2] = analysis.i_mb_partition8x16[i];
if( analysis.i_mb_partition8x16[i] == D_L0_8x8 )
- x264_me_refine_qpel_rd( h, &analysis.l0.me8x16[i], analysis.i_lambda2, i, 0 );
+ x264_me_refine_qpel_rd( h, &analysis.l0.me8x16[i], analysis.i_lambda2, i*4, 0 );
else if( analysis.i_mb_partition8x16[i] == D_L1_8x8 )
- x264_me_refine_qpel_rd( h, &analysis.l1.me8x16[i], analysis.i_lambda2, i, 1 );
+ x264_me_refine_qpel_rd( h, &analysis.l1.me8x16[i], analysis.i_lambda2, i*4, 1 );
else if( analysis.i_mb_partition8x16[i] == D_BI_8x8 )
x264_me_refine_bidir_rd( h, &analysis.l0.me8x16[i], &analysis.l1.me8x16[i], i_biweight, i, analysis.i_lambda2 );
}
for( i = 0; i < 4; i++ )
{
if( h->mb.i_sub_partition[i] == D_L0_8x8 )
- x264_me_refine_qpel_rd( h, &analysis.l0.me8x8[i], analysis.i_lambda2, i, 0 );
+ x264_me_refine_qpel_rd( h, &analysis.l0.me8x8[i], analysis.i_lambda2, i*4, 0 );
else if( h->mb.i_sub_partition[i] == D_L1_8x8 )
- x264_me_refine_qpel_rd( h, &analysis.l1.me8x8[i], analysis.i_lambda2, i, 1 );
+ x264_me_refine_qpel_rd( h, &analysis.l1.me8x8[i], analysis.i_lambda2, i*4, 1 );
else if( h->mb.i_sub_partition[i] == D_BI_8x8 )
x264_me_refine_bidir_rd( h, &analysis.l0.me8x8[i], &analysis.l1.me8x8[i], i_biweight, i, analysis.i_lambda2 );
}
}
}
+static void x264_subpartition_size_cabac( x264_t *h, x264_cabac_t *cb, int i4, int i_pixel )
+{
+ int b_8x4 = i_pixel == PIXEL_8x4;
+ block_residual_write_cabac( h, cb, DCT_LUMA_4x4, i4, h->dct.luma4x4[i4], 16 );
+ if( i_pixel == PIXEL_4x4 )
+ x264_cabac_mb_mvd( h, cb, 0, i4, 1, 1 );
+ else
+ {
+ x264_cabac_mb_mvd( h, cb, 0, i4, 1+b_8x4, 2-b_8x4 );
+ block_residual_write_cabac( h, cb, DCT_LUMA_4x4, i4+2-b_8x4, h->dct.luma4x4[i4+2-b_8x4], 16 );
+ }
+}
+
static void x264_partition_i8x8_size_cabac( x264_t *h, x264_cabac_t *cb, int i8, int i_mode )
{
const int i_pred = x264_mb_predict_intra4x4_mode( h, 4*i8 );
return s.i_bits_encoded;
}
+static int x264_subpartition_size_cavlc( x264_t *h, int i4, int i_pixel )
+{
+ bs_t s;
+ int b_8x4 = i_pixel == PIXEL_8x4;
+ s.i_bits_encoded = 0;
+ cavlc_mb_mvd( h, &s, 0, i4, 1+b_8x4 );
+ h->mb.cache.non_zero_count[x264_scan8[i4]] = array_non_zero_count( h->dct.luma4x4[i4] );
+ block_residual_write_cavlc( h, &s, i4, h->dct.luma4x4[i4], 16 );
+ if( i_pixel != PIXEL_4x4 )
+ {
+ i4 += 2-b_8x4;
+ h->mb.cache.non_zero_count[x264_scan8[i4]] = array_non_zero_count( h->dct.luma4x4[i4] );
+ block_residual_write_cavlc( h, &s, i4, h->dct.luma4x4[i4], 16 );
+ }
+
+ return s.i_bits_encoded;
+}
+
static int cavlc_intra4x4_pred_size( x264_t *h, int i4, int i_mode )
{
if( x264_mb_predict_intra4x4_mode( h, i4 ) == x264_mb_pred_mode4x4_fix( i_mode ) )
h->mb.i_cbp_luma |= nnz8x8 << i8;
h->mb.i_cbp_chroma = 0x02;
}
+
+/*****************************************************************************
+ * RD only, luma only
+ *****************************************************************************/
+void x264_macroblock_encode_p4x4( x264_t *h, int i4 )
+{
+ int i_qp = h->mb.i_qp;
+ uint8_t *p_fenc = &h->mb.pic.p_fenc[0][block_idx_xy_fenc[i4]];
+ uint8_t *p_fdec = &h->mb.pic.p_fdec[0][block_idx_xy_fdec[i4]];
+ const int i_ref = h->mb.cache.ref[0][x264_scan8[i4]];
+ const int mvx = x264_clip3( h->mb.cache.mv[0][x264_scan8[i4]][0], h->mb.mv_min[0], h->mb.mv_max[0] );
+ const int mvy = x264_clip3( h->mb.cache.mv[0][x264_scan8[i4]][1], h->mb.mv_min[1], h->mb.mv_max[1] );
+
+ h->mc.mc_luma( p_fdec, FDEC_STRIDE, h->mb.pic.p_fref[0][i_ref], h->mb.pic.i_stride[0], mvx + 4*4*block_idx_x[i4], mvy + 4*4*block_idx_y[i4], 4, 4 );
+
+ if( h->mb.b_lossless )
+ h->zigzagf.sub_4x4( h->dct.luma4x4[i4], p_fenc, p_fdec );
+ else
+ {
+ DECLARE_ALIGNED_16( int16_t dct4x4[4][4] );
+ h->dctf.sub4x4_dct( dct4x4, p_fenc, p_fdec );
+ x264_quant_4x4( h, dct4x4, i_qp, DCT_LUMA_4x4, 0, i4 );
+ h->zigzagf.scan_4x4( h->dct.luma4x4[i4], dct4x4 );
+ if( array_non_zero( dct4x4 ) )
+ {
+ h->quantf.dequant_4x4( dct4x4, h->dequant4_mf[CQM_4PY], i_qp );
+ h->dctf.add4x4_idct( p_fdec, dct4x4 );
+ }
+ }
+}
void x264_macroblock_write_cavlc ( x264_t *h, bs_t *s );
void x264_macroblock_encode_p8x8( x264_t *h, int i8 );
+void x264_macroblock_encode_p4x4( x264_t *h, int i4 );
void x264_mb_encode_i4x4( x264_t *h, int idx, int i_qp );
void x264_mb_encode_i8x8( x264_t *h, int idx, int i_qp );
void x264_mb_encode_8x8_chroma( x264_t *h, int b_inter, int i_qp );
#undef COST_MV_SATD
#define COST_MV_SATD( mx, my, dst, avoid_mvp ) \
{ \
- if( !avoid_pmv || !(mx == pmx && my == pmy) ) \
+ if( !avoid_mvp || !(mx == pmx && my == pmy) ) \
{ \
int stride = 16; \
uint8_t *src = h->mc.get_ref( pix, &stride, m->p_fref, m->i_stride[0], mx, my, bw*4, bh*4 ); \
{ \
uint64_t cost; \
*(uint32_t*)cache_mv = *(uint32_t*)cache_mv2 = pack16to32_mask(mx,my); \
- cost = x264_rd_cost_part( h, i_lambda2, i8, m->i_pixel ); \
+ cost = x264_rd_cost_part( h, i_lambda2, i4, m->i_pixel ); \
COPY4_IF_LT( bcost, cost, bmx, mx, bmy, my, dir, do_dir?mdir:dir ); \
} \
}
-void x264_me_refine_qpel_rd( x264_t *h, x264_me_t *m, int i_lambda2, int i8, int i_list )
+void x264_me_refine_qpel_rd( x264_t *h, x264_me_t *m, int i_lambda2, int i4, int i_list )
{
// don't have to fill the whole mv cache rectangle
- static const int pixel_mv_offs[] = { 0, 4, 4*8, 0 };
- int16_t *cache_mv = h->mb.cache.mv[i_list][x264_scan8[i8*4]];
+ static const int pixel_mv_offs[] = { 0, 4, 4*8, 0, 2, 2*8, 0 };
+ int16_t *cache_mv = h->mb.cache.mv[i_list][x264_scan8[i4]];
int16_t *cache_mv2 = cache_mv + pixel_mv_offs[m->i_pixel];
const int16_t *p_cost_mvx, *p_cost_mvy;
const int bw = x264_pixel_size[m->i_pixel].w>>2;
int dir = -2;
int satds[8];
- if( m->i_pixel != PIXEL_16x16 && i8 != 0 )
- x264_mb_predict_mv( h, i_list, i8*4, bw, m->mvp );
+ if( m->i_pixel != PIXEL_16x16 && i4 != 0 )
+ x264_mb_predict_mv( h, i_list, i4, bw, m->mvp );
pmx = m->mvp[0];
pmy = m->mvp[1];
p_cost_mvx = m->p_cost_mv - pmx;
m->cost = bcost;
m->mv[0] = bmx;
m->mv[1] = bmy;
- x264_macroblock_cache_mv ( h, 2*(i8&1), i8&2, bw, bh, i_list, pack16to32_mask(bmx, bmy) );
- x264_macroblock_cache_mvd( h, 2*(i8&1), i8&2, bw, bh, i_list, pack16to32_mask(bmx - m->mvp[0], bmy - m->mvp[1]) );
+ x264_macroblock_cache_mv ( h, block_idx_x[i4], block_idx_y[i4], bw, bh, i_list, pack16to32_mask(bmx, bmy) );
+ x264_macroblock_cache_mvd( h, block_idx_x[i4], block_idx_y[i4], bw, bh, i_list, pack16to32_mask(bmx - m->mvp[0], bmy - m->mvp[1]) );
}
-
{ x264_me_search_ref( h, m, mvc, i_mvc, NULL ); }
void x264_me_refine_qpel( x264_t *h, x264_me_t *m );
-void x264_me_refine_qpel_rd( x264_t *h, x264_me_t *m, int i_lambda2, int i8, int i_list );
+void x264_me_refine_qpel_rd( x264_t *h, x264_me_t *m, int i_lambda2, int i4, int i_list );
void x264_me_refine_bidir_rd( x264_t *h, x264_me_t *m0, x264_me_t *m1, int i_weight, int i8, int i_lambda2 );
void x264_me_refine_bidir_satd( x264_t *h, x264_me_t *m0, x264_me_t *m1, int i_weight );
uint64_t x264_rd_cost_part( x264_t *h, int i_lambda2, int i8, int i_pixel );
return i_ssd + i_bits;
}
-/* subpartition RD functions use 8 bits more precision to avoid large rounding errors at low QPs */
+/* partition RD functions use 8 bits more precision to avoid large rounding errors at low QPs */
-uint64_t x264_rd_cost_part( x264_t *h, int i_lambda2, int i8, int i_pixel )
+static uint64_t x264_rd_cost_subpart( x264_t *h, int i_lambda2, int i4, int i_pixel )
{
uint64_t i_ssd, i_bits;
+ x264_macroblock_encode_p4x4( h, i4 );
+ if( i_pixel == PIXEL_8x4 )
+ x264_macroblock_encode_p4x4( h, i4+1 );
+ if( i_pixel == PIXEL_4x8 )
+ x264_macroblock_encode_p4x4( h, i4+2 );
+
+ i_ssd = ssd_plane( h, i_pixel, 0, block_idx_x[i4]*4, block_idx_y[i4]*4 );
+
+ if( h->param.b_cabac )
+ {
+ x264_cabac_t cabac_tmp;
+ COPY_CABAC;
+ x264_subpartition_size_cabac( h, &cabac_tmp, i4, i_pixel );
+ i_bits = ( (uint64_t)cabac_tmp.f8_bits_encoded * i_lambda2 + 128 ) >> 8;
+ }
+ else
+ {
+ i_bits = x264_subpartition_size_cavlc( h, i4, i_pixel );
+ }
+
+ return (i_ssd<<8) + i_bits;
+}
+
+uint64_t x264_rd_cost_part( x264_t *h, int i_lambda2, int i4, int i_pixel )
+{
+ uint64_t i_ssd, i_bits;
+ int i8 = i4 >> 2;
+
if( i_pixel == PIXEL_16x16 )
{
int type_bak = h->mb.i_type;
return i_cost;
}
+ if( i_pixel > PIXEL_8x8 )
+ return x264_rd_cost_subpart( h, i_lambda2, i4, i_pixel );
+
x264_macroblock_encode_p8x8( h, i8 );
if( i_pixel == PIXEL_16x8 )
x264_macroblock_encode_p8x8( h, i8+1 );