}
}
+#define FIXED_SCALE 256
+
/* This just improves encoder performance, it's not part of the spec */
void x264_mb_predict_mv_ref16x16( x264_t *h, int i_list, int i_ref, int mvc[8][2], int *i_mvc )
{
if( h->fref0[0]->i_ref[0] > 0 && !h->sh.b_mbaff )
{
x264_frame_t *l0 = h->fref0[0];
- int ref_col_cur, ref_col_prev = -1;
- int scale = 0;
#define SET_TMVP(dx, dy) { \
int i_b4 = h->mb.i_b4_xy + dx*4 + dy*4*h->mb.i_b4_stride; \
int i_b8 = h->mb.i_b8_xy + dx*2 + dy*2*h->mb.i_b8_stride; \
- ref_col_cur = l0->ref[0][i_b8]; \
- if( ref_col_cur >= 0 ) \
+ int ref_col = l0->ref[0][i_b8]; \
+ if( ref_col >= 0 ) \
{ \
- /* TODO: calc once per frame and tablize? */\
- if( ref_col_cur != ref_col_prev ) \
- scale = 256 * (h->fenc->i_poc - h->fref0[i_ref]->i_poc) \
- / (l0->i_poc - l0->ref_poc[0][ref_col_cur]); \
- mvc[i][0] = l0->mv[0][i_b4][0] * scale / 256; \
- mvc[i][1] = l0->mv[0][i_b4][1] * scale / 256; \
+ int scale = (h->fdec->i_poc - h->fdec->ref_poc[0][i_ref]) * l0->inv_ref_poc[ref_col];\
+ mvc[i][0] = l0->mv[0][i_b4][0] * scale / FIXED_SCALE; \
+ mvc[i][1] = l0->mv[0][i_b4][1] * scale / FIXED_SCALE; \
i++; \
- ref_col_prev = ref_col_cur; \
} \
}
*i_mvc = i;
}
+/* Set up a lookup table for delta pocs to reduce an IDIV to an IMUL */
+static void setup_inverse_delta_pocs( x264_t *h )
+{
+ int i;
+ for( i = 0; i < h->i_ref0; i++ )
+ {
+ int delta = h->fdec->i_poc - h->fref0[i]->i_poc;
+ h->fdec->inv_ref_poc[i] = (FIXED_SCALE + delta/2) / delta;
+ }
+}
+
static inline void x264_mb_mc_0xywh( x264_t *h, int x, int y, int width, int height )
{
const int i8 = x264_scan8[0]+x+8*y;
}
if( h->sh.i_type == SLICE_TYPE_P )
memset( h->mb.cache.skip, 0, X264_SCAN8_SIZE * sizeof( int8_t ) );
+
+ setup_inverse_delta_pocs( h );
}
void x264_prefetch_fenc( x264_t *h, x264_frame_t *fenc, int i_mb_x, int i_mb_y )