cb->state[i_ctx] = x264_cabac_transition[b][i_state];
cb->f8_bits_encoded += x264_cabac_entropy[ b ? 127 - i_state : i_state ];
}
+
+int x264_cabac_size_decision2( uint8_t *state, int b )
+{
+ int i_state = *state;
+ *state = x264_cabac_transition[b][i_state];
+ return x264_cabac_entropy[ b ? 127 - i_state : i_state ];
+}
+
+int x264_cabac_size_decision_noup( uint8_t *state, int b )
+{
+ return x264_cabac_entropy[ b ? 127 - *state : *state ];
+}
void x264_cabac_encode_flush( x264_cabac_t *cb );
/* don't write the bitstream, just calculate cost: */
void x264_cabac_size_decision( x264_cabac_t *cb, int i_ctx, int b );
+int x264_cabac_size_decision2( uint8_t *state, int b );
+int x264_cabac_size_decision_noup( uint8_t *state, int b );
static inline int x264_cabac_pos( x264_cabac_t *cb )
{
int dequant8_mf[2][6][8][8];
int quant4_mf[4][6][4][4];
int quant8_mf[2][6][8][8];
+ int unquant4_mf[4][52][16];
+ int unquant8_mf[2][52][64];
/* Slice header */
x264_slice_header_t sh;
int i_me_method;
int i_subpel_refine;
int b_chroma_me;
+ int b_trellis;
/* Allowed qpel MV range to stay within the picture + emulated edge pixels */
int mv_min[2];
int mv_max[2];
#ifndef _DCT_H
#define _DCT_H 1
+/* the inverse of the scaling factors introduced by 8x8 fdct */
+#define W(i) (i==0 ? FIX8(1.0000) :\
+ i==1 ? FIX8(0.8859) :\
+ i==2 ? FIX8(1.6000) :\
+ i==3 ? FIX8(0.9415) :\
+ i==4 ? FIX8(1.2651) :\
+ i==5 ? FIX8(1.1910) :0)
+static const int x264_dct8_weight_tab[64] = {
+ W(0), W(3), W(4), W(3), W(0), W(3), W(4), W(3),
+ W(3), W(1), W(5), W(1), W(3), W(1), W(5), W(1),
+ W(4), W(5), W(2), W(5), W(4), W(5), W(2), W(5),
+ W(3), W(1), W(5), W(1), W(3), W(1), W(5), W(1),
+
+ W(0), W(3), W(4), W(3), W(0), W(3), W(4), W(3),
+ W(3), W(1), W(5), W(1), W(3), W(1), W(5), W(1),
+ W(4), W(5), W(2), W(5), W(4), W(5), W(2), W(5),
+ W(3), W(1), W(5), W(1), W(3), W(1), W(5), W(1)
+};
+#undef W
+
+/* inverse squared */
+#define W(i) (i==0 ? FIX8(3.125) :\
+ i==1 ? FIX8(1.25) :\
+ i==2 ? FIX8(0.5) :0)
+static const int x264_dct4_weight2_zigzag[16] = {
+ W(0), W(1), W(1), W(0), W(2), W(0), W(1), W(1),
+ W(1), W(1), W(2), W(0), W(2), W(1), W(1), W(2)
+};
+#undef W
+
+#define W(i) (i==0 ? FIX8(1.00000) :\
+ i==1 ? FIX8(0.78487) :\
+ i==2 ? FIX8(2.56132) :\
+ i==3 ? FIX8(0.88637) :\
+ i==4 ? FIX8(1.60040) :\
+ i==5 ? FIX8(1.41850) :0)
+static const int x264_dct8_weight2_zigzag[64] = {
+ W(0), W(3), W(3), W(4), W(1), W(4), W(3), W(5),
+ W(5), W(3), W(0), W(1), W(2), W(1), W(0), W(3),
+ W(3), W(5), W(5), W(3), W(3), W(4), W(1), W(4),
+ W(1), W(4), W(1), W(4), W(3), W(5), W(5), W(3),
+ W(3), W(5), W(5), W(3), W(1), W(2), W(1), W(0),
+ W(1), W(2), W(1), W(5), W(5), W(3), W(3), W(5),
+ W(5), W(1), W(4), W(1), W(4), W(1), W(3), W(5),
+ W(5), W(3), W(1), W(2), W(1), W(5), W(5), W(1)
+};
+#undef W
+
typedef struct
{
void (*sub4x4_dct) ( int16_t dct[4][4], uint8_t *pix1, int i_pix1, uint8_t *pix2, int i_pix2 );
h-> quant8_mf[i_list][q][0][i] = def_quant8[q][i] * 16 / h->pps->scaling_list[4+i_list][i];
}
}
+ for( q = 0; q < 52; q++ )
+ {
+ for( i_list = 0; i_list < 4; i_list++ )
+ for( i = 0; i < 16; i++ )
+ h->unquant4_mf[i_list][q][i] = (1 << (q/6 + 15 + 8)) / h->quant4_mf[i_list][q%6][0][i];
+ for( i_list = 0; i_list < 2; i_list++ )
+ for( i = 0; i < 64; i++ )
+ h->unquant8_mf[i_list][q][i] = (1 << (q/6 + 16 + 8)) / h->quant8_mf[i_list][q%6][0][i];
+ }
}
int x264_cqm_parse_jmlist( x264_t *h, const char *buf, const char *name,
h->mb.i_subpel_refine = h->param.analyse.i_subpel_refine;
h->mb.b_chroma_me = h->param.analyse.b_chroma_me && h->sh.i_type == SLICE_TYPE_P
&& h->mb.i_subpel_refine >= 5;
-
+ h->mb.b_trellis = h->param.analyse.i_trellis > 1;
h->mb.b_transform_8x8 = 0;
/* I: Intra part */
if( !analysis.b_mbrd )
x264_mb_analyse_transform( h );
+
+ h->mb.b_trellis = h->param.analyse.i_trellis;
}
/*-------------------- Update MB from the analysis ----------------------*/
}
+static const int significant_coeff_flag_offset[6] = { 105, 120, 134, 149, 152, 402 };
+static const int last_coeff_flag_offset[6] = { 166, 181, 195, 210, 213, 417 };
+static const int coeff_abs_level_m1_offset[6] = { 227, 237, 247, 257, 266, 426 };
+static const int significant_coeff_flag_offset_8x8[63] = {
+ 0, 1, 2, 3, 4, 5, 5, 4, 4, 3, 3, 4, 4, 4, 5, 5,
+ 4, 4, 4, 4, 3, 3, 6, 7, 7, 7, 8, 9,10, 9, 8, 7,
+ 7, 6,11,12,13,11, 6, 7, 8, 9,14,10, 9, 8, 6,11,
+ 12,13,11, 6, 9,14,10, 9,11,12,13,11,14,10,12
+};
+static const int last_coeff_flag_offset_8x8[63] = {
+ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
+ 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8
+};
+
static void block_residual_write_cabac( x264_t *h, x264_cabac_t *cb, int i_ctxBlockCat, int i_idx, int *l, int i_count )
{
- static const int significant_coeff_flag_offset[6] = { 105, 120, 134, 149, 152, 402 };
- static const int last_coeff_flag_offset[6] = { 166, 181, 195, 210, 213, 417 };
- static const int coeff_abs_level_m1_offset[6] = { 227, 237, 247, 257, 266, 426 };
- static const int significant_coeff_flag_offset_8x8[63] = {
- 0, 1, 2, 3, 4, 5, 5, 4, 4, 3, 3, 4, 4, 4, 5, 5,
- 4, 4, 4, 4, 3, 3, 6, 7, 7, 7, 8, 9,10, 9, 8, 7,
- 7, 6,11,12,13,11, 6, 7, 8, 9,14,10, 9, 8, 6,11,
- 12,13,11, 6, 9,14,10, 9,11,12,13,11,14,10,12
- };
- static const int last_coeff_flag_offset_8x8[63] = {
- 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
- 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8
- };
-
const int i_ctx_sig = significant_coeff_flag_offset[i_ctxBlockCat];
const int i_ctx_last = last_coeff_flag_offset[i_ctxBlockCat];
const int i_ctx_level = coeff_abs_level_m1_offset[i_ctxBlockCat];
}
h->param.analyse.i_chroma_qp_offset = x264_clip3(h->param.analyse.i_chroma_qp_offset, -12, 12);
h->param.analyse.i_mv_range = x264_clip3(h->param.analyse.i_mv_range, 32, 2048);
+ if( !h->param.b_cabac )
+ h->param.analyse.i_trellis = 0;
if( h->param.rc.f_qblur < 0 )
h->param.rc.f_qblur = 0;
h->fdec = h->frames.reference[0];
- /* init mb cache */
x264_macroblock_cache_init( h );
+ x264_rdo_init( );
/* init CPU functions */
x264_predict_16x16_init( h->param.cpu, h->predict_16x16 );
h->param.analyse.i_me_method = param->analyse.i_me_method;
h->param.analyse.i_me_range = param->analyse.i_me_range;
h->param.analyse.i_subpel_refine = param->analyse.i_subpel_refine;
+ h->param.analyse.i_trellis = param->analyse.i_trellis;
h->param.analyse.intra = param->analyse.intra;
h->param.analyse.inter = param->analyse.inter;
if( h->sps->b_direct8x8_inference && h->param.i_bframe
}
h->dctf.sub4x4_dct( dct4x4, p_src, i_stride, p_dst, i_stride );
- quant_4x4( h, dct4x4, h->quant4_mf[CQM_4IY], i_qscale, 1 );
+
+ if( h->mb.b_trellis )
+ x264_quant_4x4_trellis( h, dct4x4, CQM_4IY, i_qscale, DCT_LUMA_4x4, 1 );
+ else
+ quant_4x4( h, dct4x4, h->quant4_mf[CQM_4IY], i_qscale, 1 );
+
scan_zigzag_4x4full( h->dct.block[idx].luma4x4, dct4x4 );
x264_mb_dequant_4x4( dct4x4, h->dequant4_mf[CQM_4IY], i_qscale );
int16_t dct8x8[8][8];
h->dctf.sub8x8_dct8( dct8x8, p_src, i_stride, p_dst, i_stride );
- quant_8x8( h, dct8x8, h->quant8_mf[CQM_8IY], i_qscale, 1 );
+
+ if( h->mb.b_trellis )
+ x264_quant_8x8_trellis( h, dct8x8, CQM_8IY, i_qscale, 1 );
+ else
+ quant_8x8( h, dct8x8, h->quant8_mf[CQM_8IY], i_qscale, 1 );
+
scan_zigzag_8x8full( h->dct.luma8x8[idx], dct8x8 );
x264_mb_dequant_8x8( dct8x8, h->dequant8_mf[CQM_8IY], i_qscale );
h->dctf.add8x8_idct8( p_dst, i_stride, dct8x8 );
dct4x4[0][block_idx_y[i]][block_idx_x[i]] = dct4x4[1+i][0][0];
/* quant/scan/dequant */
- quant_4x4( h, dct4x4[1+i], h->quant4_mf[CQM_4IY], i_qscale, 1 );
+ if( h->mb.b_trellis )
+ x264_quant_4x4_trellis( h, dct4x4[1+i], CQM_4IY, i_qscale, DCT_LUMA_AC, 1 );
+ else
+ quant_4x4( h, dct4x4[1+i], h->quant4_mf[CQM_4IY], i_qscale, 1 );
+
scan_zigzag_4x4( h->dct.block[i].residual_ac, dct4x4[1+i] );
x264_mb_dequant_4x4( dct4x4[1+i], h->dequant4_mf[CQM_4IY], i_qscale );
}
/* copy dc coeff */
dct2x2[block_idx_y[i]][block_idx_x[i]] = dct4x4[i][0][0];
+ /* no trellis; it doesn't seem to help chroma noticeably */
quant_4x4( h, dct4x4[i], h->quant4_mf[CQM_4IC + b_inter], i_qscale, !b_inter );
scan_zigzag_4x4( h->dct.block[16+i+ch*4].residual_ac, dct4x4[i] );
x264_mb_dequant_4x4( dct4x4[i], h->dequant4_mf[CQM_4IC + b_inter], i_qscale );
for( idx = 0; idx < 4; idx++ )
{
- int i_decimate_8x8;
+ if( h->mb.b_trellis )
+ x264_quant_8x8_trellis( h, dct8x8[idx], CQM_8PY, i_qp, 0 );
+ else
+ quant_8x8( h, dct8x8[idx], h->quant8_mf[CQM_8PY], i_qp, 0 );
- quant_8x8( h, dct8x8[idx], h->quant8_mf[CQM_8PY], i_qp, 0 );
scan_zigzag_8x8full( h->dct.luma8x8[idx], dct8x8[idx] );
x264_mb_dequant_8x8( dct8x8[idx], h->dequant8_mf[CQM_8PY], i_qp );
- i_decimate_8x8 = x264_mb_decimate_score( h->dct.luma8x8[idx], 64 );
- i_decimate_mb += i_decimate_8x8;
- if( i_decimate_8x8 < 4 )
+ if( !h->mb.b_trellis )
{
- memset( h->dct.luma8x8[idx], 0, sizeof( h->dct.luma8x8[idx] ) );
- memset( dct8x8[idx], 0, sizeof( dct8x8[idx] ) );
+ int i_decimate_8x8 = x264_mb_decimate_score( h->dct.luma8x8[idx], 64 );
+ i_decimate_mb += i_decimate_8x8;
+ if( i_decimate_8x8 < 4 )
+ {
+ memset( h->dct.luma8x8[idx], 0, sizeof( h->dct.luma8x8[idx] ) );
+ memset( dct8x8[idx], 0, sizeof( dct8x8[idx] ) );
+ }
}
}
- if( i_decimate_mb < 6 )
+ if( i_decimate_mb < 6 && !h->mb.b_trellis )
memset( h->dct.luma8x8, 0, sizeof( h->dct.luma8x8 ) );
else
h->dctf.add16x16_idct8( h->mb.pic.p_fdec[0], h->mb.pic.i_stride[0], dct8x8 );
{
idx = i8x8 * 4 + i4x4;
- quant_4x4( h, dct4x4[idx], h->quant4_mf[CQM_4PY], i_qp, 0 );
+ if( h->mb.b_trellis )
+ x264_quant_4x4_trellis( h, dct4x4[idx], CQM_4PY, i_qp, DCT_LUMA_4x4, 0 );
+ else
+ quant_4x4( h, dct4x4[idx], h->quant4_mf[CQM_4PY], i_qp, 0 );
+
scan_zigzag_4x4full( h->dct.block[idx].luma4x4, dct4x4[idx] );
x264_mb_dequant_4x4( dct4x4[idx], h->dequant4_mf[CQM_4PY], i_qp );
#include "common/macroblock.h"
+void x264_rdo_init( );
+
int x264_macroblock_probe_skip( x264_t *h, int b_bidir );
static inline int x264_macroblock_probe_pskip( x264_t *h )
void x264_cabac_mb_skip( x264_t *h, int b_skip );
+void x264_quant_4x4_trellis( x264_t *h, int16_t dct[4][4], int i_quant_cat,
+ int i_qp, int i_ctxBlockCat, int b_intra );
+void x264_quant_8x8_trellis( x264_t *h, int16_t dct[8][8], int i_quant_cat,
+ int i_qp, int b_intra );
+
static inline int array_non_zero( int *v, int i_count )
{
int i;
return i_ssd + i_bits;
}
+
+
+/****************************************************************************
+ * Trellis RD quantization
+ ****************************************************************************/
+
+#define TRELLIS_SCORE_MAX (1ULL<<50)
+#define CABAC_SIZE_BITS 8
+#define SSD_WEIGHT_BITS 5
+#define LAMBDA_BITS 4
+
+/* precalculate the cost of coding abs_level_m1 */
+static int cabac_prefix_transition[15][128];
+static int cabac_prefix_size[15][128];
+void x264_rdo_init( )
+{
+ int i_prefix;
+ int i_ctx;
+ for( i_prefix = 0; i_prefix < 15; i_prefix++ )
+ {
+ for( i_ctx = 0; i_ctx < 128; i_ctx++ )
+ {
+ int f8_bits = 0;
+ uint8_t ctx = i_ctx;
+ int i;
+
+ for( i = 1; i < i_prefix; i++ )
+ f8_bits += x264_cabac_size_decision2( &ctx, 1 );
+ if( i_prefix > 0 && i_prefix < 14 )
+ f8_bits += x264_cabac_size_decision2( &ctx, 0 );
+ f8_bits += 1 << CABAC_SIZE_BITS; //sign
+
+ cabac_prefix_size[i_prefix][i_ctx] = f8_bits;
+ cabac_prefix_transition[i_prefix][i_ctx] = ctx;
+ }
+ }
+}
+
+// node ctx: 0..3: abslevel1 (with abslevelgt1 == 0).
+// 4..7: abslevelgt1 + 3 (and abslevel1 doesn't matter).
+/* map node ctx => cabac ctx for level=1 */
+static const int coeff_abs_level1_ctx[8] = { 1, 2, 3, 4, 0, 0, 0, 0 };
+/* map node ctx => cabac ctx for level>1 */
+static const int coeff_abs_levelgt1_ctx[8] = { 5, 5, 5, 5, 6, 7, 8, 9 };
+static const int coeff_abs_level_transition[2][8] = {
+/* update node.ctx after coding a level=1 */
+ { 1, 2, 3, 3, 4, 5, 6, 7 },
+/* update node.ctx after coding a level>1 */
+ { 4, 4, 4, 4, 5, 6, 7, 7 }
+};
+
+static const int lambda2_tab[6] = { 1024, 1290, 1625, 2048, 2580, 3251 };
+
+typedef struct {
+ uint64_t score;
+ int level_idx; // index into level_tree[]
+ uint8_t cabac_state[10]; //just the contexts relevant to coding abs_level_m1
+} trellis_node_t;
+
+// TODO:
+// support chroma and i16x16 DC
+// save cabac state between blocks?
+// use trellis' RD score instead of x264_mb_decimate_score?
+// code 8x8 sig/last flags forwards with deadzone and save the contexts at
+// each position?
+// change weights when using CQMs?
+
+// possible optimizations:
+// make scores fit in 32bit
+// save quantized coefs during rd, to avoid a duplicate trellis in the final encode
+// if trellissing all MBRD modes, finish SSD calculation so we can skip all of
+// the normal dequant/idct/ssd/cabac
+
+// the unquant_mf here is not the same as dequant_mf:
+// in normal operation (dct->quant->dequant->idct) the dct and idct are not
+// normalized. quant/dequant absorb those scaling factors.
+// in this function, we just do (quant->unquant) and want the output to be
+// comparable to the input. so unquant is the direct inverse of quant,
+// and uses the dct scaling factors, not the idct ones.
+
+static void quant_trellis_cabac( x264_t *h, int16_t *dct,
+ const int *quant_mf, const int *unquant_mf,
+ const int *coef_weight, const int *zigzag,
+ int i_ctxBlockCat, int i_qbits, int i_lambda2, int b_ac, int i_coefs )
+{
+ int abs_coefs[64], signs[64];
+ trellis_node_t nodes[2][8];
+ trellis_node_t *nodes_cur = nodes[0];
+ trellis_node_t *nodes_prev = nodes[1];
+ trellis_node_t *bnode;
+ uint8_t cabac_state_sig[64];
+ uint8_t cabac_state_last[64];
+ const int f = 1 << (i_qbits-1); // no deadzone
+ int i_last_nnz = -1;
+ int i, j;
+
+ // (# of coefs) * (# of ctx) * (# of levels tried) = 1024
+ // we don't need to keep all of those: (# of coefs) * (# of ctx) would be enough,
+ // but it takes more time to remove dead states than you gain in reduced memory.
+ struct {
+ uint16_t abs_level;
+ uint16_t next;
+ } level_tree[64*8*2];
+ int i_levels_used = 1;
+
+ /* init coefs */
+ for( i = b_ac; i < i_coefs; i++ )
+ {
+ int coef = dct[zigzag[i]];
+ abs_coefs[i] = abs(coef);
+ signs[i] = coef < 0 ? -1 : 1;
+ if( f <= abs_coefs[i] * quant_mf[zigzag[i]] )
+ i_last_nnz = i;
+ }
+
+ if( i_last_nnz == -1 )
+ {
+ memset( dct, 0, i_coefs * sizeof(*dct) );
+ return;
+ }
+
+ /* init trellis */
+ for( i = 1; i < 8; i++ )
+ nodes_cur[i].score = TRELLIS_SCORE_MAX;
+ nodes_cur[0].score = 0;
+ nodes_cur[0].level_idx = 0;
+ level_tree[0].abs_level = 0;
+ level_tree[0].next = 0;
+
+ // coefs are processed in reverse order, because that's how the abs value is coded.
+ // last_coef and significant_coef flags are normally coded in forward order, but
+ // we have to reverse them to match the levels.
+ // in 4x4 blocks, last_coef and significant_coef use a separate context for each
+ // position, so the order doesn't matter, and we don't even have to update their contexts.
+ // in 8x8 blocks, some positions share contexts, so we'll just have to hope that
+ // cabac isn't too sensitive.
+
+ if( i_coefs == 64 )
+ {
+ const uint8_t *ctx_sig = &h->cabac.state[ significant_coeff_flag_offset[i_ctxBlockCat] ];
+ const uint8_t *ctx_last = &h->cabac.state[ last_coeff_flag_offset[i_ctxBlockCat] ];
+ for( i = 0; i < 63; i++ )
+ {
+ cabac_state_sig[i] = ctx_sig[ significant_coeff_flag_offset_8x8[i] ];
+ cabac_state_last[i] = ctx_last[ last_coeff_flag_offset_8x8[i] ];
+ }
+ }
+ else
+ {
+ memcpy( cabac_state_sig, &h->cabac.state[ significant_coeff_flag_offset[i_ctxBlockCat] ], 15 );
+ memcpy( cabac_state_last, &h->cabac.state[ last_coeff_flag_offset[i_ctxBlockCat] ], 15 );
+ }
+ memcpy( nodes_cur[0].cabac_state, &h->cabac.state[ coeff_abs_level_m1_offset[i_ctxBlockCat] ], 10 );
+
+ for( i = i_last_nnz; i >= b_ac; i-- )
+ {
+ int i_coef = abs_coefs[i];
+ int q = ( f + i_coef * quant_mf[zigzag[i]] ) >> i_qbits;
+ int abs_level;
+ int cost_sig[2], cost_last[2];
+ trellis_node_t n;
+
+ // skip 0s: this doesn't affect the output, but saves some unnecessary computation.
+ if( q == 0 )
+ {
+ // no need to calculate ssd of 0s: it's the same in all nodes.
+ // no need to modify level_tree for ctx=0: it starts with an infinite loop of 0s.
+ const int cost_sig0 = x264_cabac_size_decision_noup( &cabac_state_sig[i], 0 )
+ * i_lambda2 >> ( CABAC_SIZE_BITS - LAMBDA_BITS );
+ for( j = 1; j < 8; j++ )
+ {
+ if( nodes_cur[j].score != TRELLIS_SCORE_MAX )
+ {
+#define SET_LEVEL(n,l) \
+ level_tree[i_levels_used].abs_level = l; \
+ level_tree[i_levels_used].next = n.level_idx; \
+ n.level_idx = i_levels_used; \
+ i_levels_used++;
+
+ SET_LEVEL( nodes_cur[j], 0 );
+ nodes_cur[j].score += cost_sig0;
+ }
+ }
+ continue;
+ }
+
+ XCHG( trellis_node_t*, nodes_cur, nodes_prev );
+
+ for( j = 0; j < 8; j++ )
+ nodes_cur[j].score = TRELLIS_SCORE_MAX;
+
+ if( i < i_coefs-1 )
+ {
+ cost_sig[0] = x264_cabac_size_decision_noup( &cabac_state_sig[i], 0 );
+ cost_sig[1] = x264_cabac_size_decision_noup( &cabac_state_sig[i], 1 );
+ cost_last[0] = x264_cabac_size_decision_noup( &cabac_state_last[i], 0 );
+ cost_last[1] = x264_cabac_size_decision_noup( &cabac_state_last[i], 1 );
+ }
+ else
+ {
+ cost_sig[0] = cost_sig[1] = 0;
+ cost_last[0] = cost_last[1] = 0;
+ }
+
+ // there are a few cases where increasing the coeff magnitude helps,
+ // but it's only around .003 dB, and skipping them ~doubles the speed of trellis.
+ // could also try q-2: that sometimes helps, but also sometimes decimates blocks
+ // that are better left coded, especially at QP > 40.
+ for( abs_level = q; abs_level >= q-1; abs_level-- )
+ {
+ int u = (unquant_mf[zigzag[i]] * abs_level + 128) >> 8;
+ int64_t d = i_coef - u;
+ uint64_t ssd = d*d * coef_weight[i];
+
+ for( j = 0; j < 8; j++ )
+ {
+ int node_ctx = j;
+ if( nodes_prev[j].score == TRELLIS_SCORE_MAX )
+ continue;
+ n = nodes_prev[j];
+
+ /* code the proposed level, and count how much entropy it would take */
+ if( abs_level || node_ctx )
+ {
+ uint64_t f8_bits = cost_sig[ abs_level != 0 ];
+ if( abs_level )
+ {
+ const int i_prefix = X264_MIN( abs_level - 1, 14 );
+ f8_bits += cost_last[ node_ctx == 0 ];
+ f8_bits += x264_cabac_size_decision2( &n.cabac_state[coeff_abs_level1_ctx[node_ctx]], i_prefix > 0 );
+ if( i_prefix > 0 )
+ {
+ uint8_t *ctx = &n.cabac_state[coeff_abs_levelgt1_ctx[node_ctx]];
+ f8_bits += cabac_prefix_size[i_prefix][*ctx];
+ *ctx = cabac_prefix_transition[i_prefix][*ctx];
+ if( abs_level >= 15 )
+ f8_bits += bs_size_ue( abs_level - 15 ) << CABAC_SIZE_BITS;
+ node_ctx = coeff_abs_level_transition[1][node_ctx];
+ }
+ else
+ {
+ f8_bits += 1 << CABAC_SIZE_BITS;
+ node_ctx = coeff_abs_level_transition[0][node_ctx];
+ }
+ }
+ n.score += f8_bits * i_lambda2 >> ( CABAC_SIZE_BITS - LAMBDA_BITS );
+ }
+
+ n.score += ssd;
+
+ /* save the node if it's better than any existing node with the same cabac ctx */
+ if( n.score < nodes_cur[node_ctx].score )
+ {
+ SET_LEVEL( n, abs_level );
+ nodes_cur[node_ctx] = n;
+ }
+ }
+ }
+ }
+
+ /* output levels from the best path through the trellis */
+ bnode = &nodes_cur[0];
+ for( j = 1; j < 8; j++ )
+ if( nodes_cur[j].score < bnode->score )
+ bnode = &nodes_cur[j];
+
+ j = bnode->level_idx;
+ for( i = b_ac; i < i_coefs; i++ )
+ {
+ dct[zigzag[i]] = level_tree[j].abs_level * signs[i];
+ j = level_tree[j].next;
+ }
+}
+
+
+void x264_quant_4x4_trellis( x264_t *h, int16_t dct[4][4], int i_quant_cat,
+ int i_qp, int i_ctxBlockCat, int b_intra )
+{
+ const int i_qbits = i_qp / 6;
+ const int i_mf = i_qp % 6;
+ const int b_ac = (i_ctxBlockCat == DCT_LUMA_AC);
+ /* should the lambdas be different? I'm just matching the behaviour of deadzone quant. */
+ const int i_lambda_mult = b_intra ? 65 : 85;
+ const int i_lambda2 = ((lambda2_tab[i_mf] * i_lambda_mult*i_lambda_mult / 10000)
+ << (2*i_qbits)) >> LAMBDA_BITS;
+
+ quant_trellis_cabac( h, (int16_t*)dct,
+ (int*)h->quant4_mf[i_quant_cat][i_mf], h->unquant4_mf[i_quant_cat][i_qp],
+ x264_dct4_weight2_zigzag, x264_zigzag_scan4,
+ i_ctxBlockCat, 15+i_qbits, i_lambda2, b_ac, 16 );
+}
+
+
+void x264_quant_8x8_trellis( x264_t *h, int16_t dct[8][8], int i_quant_cat,
+ int i_qp, int b_intra )
+{
+ const int i_qbits = i_qp / 6;
+ const int i_mf = i_qp % 6;
+ const int i_lambda_mult = b_intra ? 65 : 85;
+ const int i_lambda2 = ((lambda2_tab[i_mf] * i_lambda_mult*i_lambda_mult / 10000)
+ << (2*i_qbits)) >> LAMBDA_BITS;
+
+ quant_trellis_cabac( h, (int16_t*)dct,
+ (int*)h->quant8_mf[i_quant_cat][i_mf], h->unquant8_mf[i_quant_cat][i_qp],
+ x264_dct8_weight2_zigzag, x264_zigzag_scan8,
+ DCT_LUMA_8x8, 16+i_qbits, i_lambda2, 0, 64 );
+}
+
" --mixed-refs Decide references on a per partition basis\n"
" --no-chroma-me Ignore chroma in motion estimation\n"
" -8, --8x8dct Adaptive spatial transform size\n"
+ " -t, --trellis <integer> Trellis RD quantization. Requires CABAC. [%d]\n"
+ " - 0: disabled\n"
+ " - 1: enabled only on the final encode of a MB\n"
+ " - 2: enabled on all mode decisions\n"
"\n"
" --cqm <string> Preset quant matrices [\"flat\"]\n"
" - jvt, flat\n"
strtable_lookup( x264_motion_est_names, defaults->analyse.i_me_method ),
defaults->analyse.i_me_range,
defaults->analyse.i_subpel_refine,
+ defaults->analyse.i_trellis,
strtable_lookup( overscan_str, defaults->vui.i_overscan ),
strtable_lookup( vidformat_str, defaults->vui.i_vidformat ),
strtable_lookup( fullrange_str, defaults->vui.b_fullrange ),
{ "mixed-refs", no_argument, NULL, OPT_MIXED_REFS },
{ "no-chroma-me", no_argument, NULL, OPT_NO_CHROMA_ME },
{ "8x8dct", no_argument, NULL, '8' },
+ { "trellis", required_argument, NULL, 't' },
{ "level", required_argument, NULL, OPT_LEVEL },
{ "ratetol", required_argument, NULL, OPT_RATETOL },
{ "vbv-maxrate", required_argument, NULL, OPT_VBVMAXRATE },
int c;
- c = getopt_long( argc, argv, "hi:I:b:r:cxB:q:f:o:A:m:p:vw8",
+ c = getopt_long( argc, argv, "hi:I:b:r:cxB:q:f:o:A:m:p:t:vw8",
long_options, &long_options_index);
if( c == -1 )
case '8':
param->analyse.b_transform_8x8 = 1;
break;
+ case 't':
+ param->analyse.i_trellis = atoi(optarg);
+ break;
case OPT_LEVEL:
param->i_level_idc = atoi(optarg);
break;
#include <stdarg.h>
-#define X264_BUILD 38
+#define X264_BUILD 39
/* x264_t:
* opaque handler for decoder and encoder */
int i_subpel_refine; /* subpixel motion estimation quality */
int b_chroma_me; /* chroma ME for subpel and mode decision in P-frames */
int b_mixed_references; /* allow each mb partition in P-frames to have it's own reference number */
+ int i_trellis; /* trellis RD quantization */
int b_psnr; /* Do we compute PSNR stats (save a few % of cpu) */
} analyse;