2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
13 #include <vpx_mem/vpx_mem.h>
14 #include <vp9/encoder/vp9_encodeintra.h>
15 #include <vp9/encoder/vp9_rdopt.h>
16 #include <vp9/common/vp9_blockd.h>
17 #include <vp9/common/vp9_reconinter.h>
18 #include <vp9/common/vp9_systemdependent.h>
19 #include <vp9/encoder/vp9_segmentation.h>
21 static unsigned int do_16x16_motion_iteration(VP9_COMP *cpi,
26 MACROBLOCK *const x = &cpi->mb;
27 MACROBLOCKD *const xd = &x->e_mbd;
28 vp9_variance_fn_ptr_t v_fn_ptr = cpi->fn_ptr[BLOCK_16X16];
29 unsigned int best_err;
31 const int tmp_col_min = x->mv_col_min;
32 const int tmp_col_max = x->mv_col_max;
33 const int tmp_row_min = x->mv_row_min;
34 const int tmp_row_max = x->mv_row_max;
37 // Further step/diamond searches as necessary
38 int step_param = cpi->sf.reduce_first_step_size +
39 (cpi->speed < 8 ? (cpi->speed > 5 ? 1 : 0) : 2);
40 step_param = MIN(step_param, (cpi->sf.max_step_search_steps - 2));
42 vp9_clamp_mv_min_max(x, ref_mv);
44 ref_full.as_mv.col = ref_mv->as_mv.col >> 3;
45 ref_full.as_mv.row = ref_mv->as_mv.row >> 3;
47 /*cpi->sf.search_method == HEX*/
48 best_err = vp9_hex_search(x, &ref_full, dst_mv, step_param, x->errorperbit,
49 &v_fn_ptr, NULL, NULL, NULL, NULL, ref_mv);
52 // if (bestsme > error_thresh && bestsme < INT_MAX)
56 best_err = cpi->find_fractional_mv_step(
59 x->errorperbit, &v_fn_ptr,
64 vp9_set_mbmode_and_mvs(x, NEWMV, dst_mv);
65 vp9_build_inter_predictors_sby(xd, mb_row, mb_col, BLOCK_SIZE_MB16X16);
66 best_err = vp9_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride,
67 xd->plane[0].dst.buf, xd->plane[0].dst.stride,
70 /* restore UMV window */
71 x->mv_col_min = tmp_col_min;
72 x->mv_col_max = tmp_col_max;
73 x->mv_row_min = tmp_row_min;
74 x->mv_row_max = tmp_row_max;
79 static int do_16x16_motion_search(VP9_COMP *cpi,
80 int_mv *ref_mv, int_mv *dst_mv,
81 int buf_mb_y_offset, int mb_y_offset,
82 int mb_row, int mb_col) {
83 MACROBLOCK *const x = &cpi->mb;
84 MACROBLOCKD *const xd = &x->e_mbd;
85 unsigned int err, tmp_err;
89 // FIXME should really use something like near/nearest MV and/or MV prediction
90 err = vp9_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride,
91 xd->plane[0].pre[0].buf, xd->plane[0].pre[0].stride,
95 // Test last reference frame using the previous best mv as the
96 // starting point (best reference) for the search
97 tmp_err = do_16x16_motion_iteration(cpi, ref_mv, &tmp_mv, mb_row, mb_col);
100 dst_mv->as_int = tmp_mv.as_int;
103 // If the current best reference mv is not centred on 0,0 then do a 0,0 based search as well
104 if (ref_mv->as_int) {
105 unsigned int tmp_err;
106 int_mv zero_ref_mv, tmp_mv;
108 zero_ref_mv.as_int = 0;
109 tmp_err = do_16x16_motion_iteration(cpi, &zero_ref_mv, &tmp_mv,
112 dst_mv->as_int = tmp_mv.as_int;
120 static int do_16x16_zerozero_search(VP9_COMP *cpi,
122 int buf_mb_y_offset, int mb_y_offset) {
123 MACROBLOCK *const x = &cpi->mb;
124 MACROBLOCKD *const xd = &x->e_mbd;
128 // FIXME should really use something like near/nearest MV and/or MV prediction
129 err = vp9_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride,
130 xd->plane[0].pre[0].buf, xd->plane[0].pre[0].stride,
137 static int find_best_16x16_intra(VP9_COMP *cpi,
139 MB_PREDICTION_MODE *pbest_mode) {
140 MACROBLOCK *const x = &cpi->mb;
141 MACROBLOCKD *const xd = &x->e_mbd;
142 MB_PREDICTION_MODE best_mode = -1, mode;
143 unsigned int best_err = INT_MAX;
145 // calculate SATD for each intra prediction mode;
146 // we're intentionally not doing 4x4, we just want a rough estimate
147 for (mode = DC_PRED; mode <= TM_PRED; mode++) {
149 const int bwl = b_width_log2(BLOCK_SIZE_MB16X16), bw = 4 << bwl;
150 const int bhl = b_height_log2(BLOCK_SIZE_MB16X16), bh = 4 << bhl;
152 xd->mode_info_context->mbmi.mode = mode;
153 vp9_build_intra_predictors(x->plane[0].src.buf, x->plane[0].src.stride,
154 xd->plane[0].dst.buf, xd->plane[0].dst.stride,
155 xd->mode_info_context->mbmi.mode,
157 xd->up_available, xd->left_available,
158 xd->right_available);
159 err = vp9_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride,
160 xd->plane[0].dst.buf, xd->plane[0].dst.stride, best_err);
163 if (err < best_err) {
170 *pbest_mode = best_mode;
175 static void update_mbgraph_mb_stats
178 MBGRAPH_MB_STATS *stats,
179 YV12_BUFFER_CONFIG *buf,
181 YV12_BUFFER_CONFIG *golden_ref,
182 int_mv *prev_golden_ref_mv,
184 YV12_BUFFER_CONFIG *alt_ref,
185 int_mv *prev_alt_ref_mv,
190 MACROBLOCK *const x = &cpi->mb;
191 MACROBLOCKD *const xd = &x->e_mbd;
193 VP9_COMMON *cm = &cpi->common;
195 // FIXME in practice we're completely ignoring chroma here
196 x->plane[0].src.buf = buf->y_buffer + mb_y_offset;
197 x->plane[0].src.stride = buf->y_stride;
199 xd->plane[0].dst.buf = cm->yv12_fb[cm->new_fb_idx].y_buffer + mb_y_offset;
200 xd->plane[0].dst.stride = cm->yv12_fb[cm->new_fb_idx].y_stride;
202 // do intra 16x16 prediction
203 intra_error = find_best_16x16_intra(cpi, mb_y_offset,
204 &stats->ref[INTRA_FRAME].m.mode);
205 if (intra_error <= 0)
207 stats->ref[INTRA_FRAME].err = intra_error;
209 // Golden frame MV search, if it exists and is different than last frame
212 xd->plane[0].pre[0].buf = golden_ref->y_buffer + mb_y_offset;
213 xd->plane[0].pre[0].stride = golden_ref->y_stride;
214 g_motion_error = do_16x16_motion_search(cpi,
216 &stats->ref[GOLDEN_FRAME].m.mv,
217 mb_y_offset, gld_y_offset,
219 stats->ref[GOLDEN_FRAME].err = g_motion_error;
221 stats->ref[GOLDEN_FRAME].err = INT_MAX;
222 stats->ref[GOLDEN_FRAME].m.mv.as_int = 0;
225 // Alt-ref frame MV search, if it exists and is different than last/golden frame
228 xd->plane[0].pre[0].buf = alt_ref->y_buffer + mb_y_offset;
229 xd->plane[0].pre[0].stride = alt_ref->y_stride;
230 a_motion_error = do_16x16_zerozero_search(cpi,
231 &stats->ref[ALTREF_FRAME].m.mv,
232 mb_y_offset, arf_y_offset);
234 stats->ref[ALTREF_FRAME].err = a_motion_error;
236 stats->ref[ALTREF_FRAME].err = INT_MAX;
237 stats->ref[ALTREF_FRAME].m.mv.as_int = 0;
241 static void update_mbgraph_frame_stats(VP9_COMP *cpi,
242 MBGRAPH_FRAME_STATS *stats,
243 YV12_BUFFER_CONFIG *buf,
244 YV12_BUFFER_CONFIG *golden_ref,
245 YV12_BUFFER_CONFIG *alt_ref) {
246 MACROBLOCK *const x = &cpi->mb;
247 MACROBLOCKD *const xd = &x->e_mbd;
248 VP9_COMMON *const cm = &cpi->common;
250 int mb_col, mb_row, offset = 0;
251 int mb_y_offset = 0, arf_y_offset = 0, gld_y_offset = 0;
252 int_mv arf_top_mv, gld_top_mv;
255 // Make sure the mi context starts in a consistent state.
256 memset(&mi_local, 0, sizeof(mi_local));
258 // Set up limit values for motion vectors to prevent them extending outside the UMV borders
259 arf_top_mv.as_int = 0;
260 gld_top_mv.as_int = 0;
261 x->mv_row_min = -(VP9BORDERINPIXELS - 8 - VP9_INTERP_EXTEND);
262 x->mv_row_max = (cm->mb_rows - 1) * 8 + VP9BORDERINPIXELS
263 - 8 - VP9_INTERP_EXTEND;
264 xd->up_available = 0;
265 xd->plane[0].dst.stride = buf->y_stride;
266 xd->plane[0].pre[0].stride = buf->y_stride;
267 xd->plane[1].dst.stride = buf->uv_stride;
268 xd->mode_info_context = &mi_local;
269 mi_local.mbmi.sb_type = BLOCK_SIZE_MB16X16;
270 mi_local.mbmi.ref_frame[0] = LAST_FRAME;
271 mi_local.mbmi.ref_frame[1] = NONE;
273 for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
274 int_mv arf_left_mv, gld_left_mv;
275 int mb_y_in_offset = mb_y_offset;
276 int arf_y_in_offset = arf_y_offset;
277 int gld_y_in_offset = gld_y_offset;
279 // Set up limit values for motion vectors to prevent them extending outside the UMV borders
280 arf_left_mv.as_int = arf_top_mv.as_int;
281 gld_left_mv.as_int = gld_top_mv.as_int;
282 x->mv_col_min = -(VP9BORDERINPIXELS - 8 - VP9_INTERP_EXTEND);
283 x->mv_col_max = (cm->mb_cols - 1) * 8 + VP9BORDERINPIXELS
284 - 8 - VP9_INTERP_EXTEND;
285 xd->left_available = 0;
287 for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
288 MBGRAPH_MB_STATS *mb_stats = &stats->mb_stats[offset + mb_col];
290 update_mbgraph_mb_stats(cpi, mb_stats, buf, mb_y_in_offset,
291 golden_ref, &gld_left_mv, gld_y_in_offset,
292 alt_ref, &arf_left_mv, arf_y_in_offset,
294 arf_left_mv.as_int = mb_stats->ref[ALTREF_FRAME].m.mv.as_int;
295 gld_left_mv.as_int = mb_stats->ref[GOLDEN_FRAME].m.mv.as_int;
297 arf_top_mv.as_int = arf_left_mv.as_int;
298 gld_top_mv.as_int = gld_left_mv.as_int;
300 xd->left_available = 1;
301 mb_y_in_offset += 16;
302 gld_y_in_offset += 16;
303 arf_y_in_offset += 16;
307 xd->up_available = 1;
308 mb_y_offset += buf->y_stride * 16;
309 gld_y_offset += golden_ref->y_stride * 16;
311 arf_y_offset += alt_ref->y_stride * 16;
314 offset += cm->mb_cols;
318 // void separate_arf_mbs_byzz
319 static void separate_arf_mbs(VP9_COMP *cpi) {
320 VP9_COMMON *const cm = &cpi->common;
321 int mb_col, mb_row, offset, i;
323 int n_frames = cpi->mbgraph_n_frames;
327 CHECK_MEM_ERROR(arf_not_zz,
328 vpx_calloc(cm->mb_rows * cm->mb_cols * sizeof(*arf_not_zz), 1));
330 // We are not interested in results beyond the alt ref itself.
331 if (n_frames > cpi->frames_till_gf_update_due)
332 n_frames = cpi->frames_till_gf_update_due;
334 // defer cost to reference frames
335 for (i = n_frames - 1; i >= 0; i--) {
336 MBGRAPH_FRAME_STATS *frame_stats = &cpi->mbgraph_stats[i];
338 for (offset = 0, mb_row = 0; mb_row < cm->mb_rows;
339 offset += cm->mb_cols, mb_row++) {
340 for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
341 MBGRAPH_MB_STATS *mb_stats = &frame_stats->mb_stats[offset + mb_col];
343 int altref_err = mb_stats->ref[ALTREF_FRAME].err;
344 int intra_err = mb_stats->ref[INTRA_FRAME ].err;
345 int golden_err = mb_stats->ref[GOLDEN_FRAME].err;
347 // Test for altref vs intra and gf and that its mv was 0,0.
348 if (altref_err > 1000 ||
349 altref_err > intra_err ||
350 altref_err > golden_err) {
351 arf_not_zz[offset + mb_col]++;
357 vpx_memset(ncnt, 0, sizeof(ncnt));
358 for (offset = 0, mb_row = 0; mb_row < cm->mb_rows;
359 offset += cm->mb_cols, mb_row++) {
360 for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
361 // If any of the blocks in the sequence failed then the MB
363 if (arf_not_zz[offset + mb_col]) {
365 cpi->segmentation_map[offset * 4 + 2 * mb_col] = 0;
366 cpi->segmentation_map[offset * 4 + 2 * mb_col + 1] = 0;
367 cpi->segmentation_map[offset * 4 + 2 * mb_col + cm->mi_cols] = 0;
368 cpi->segmentation_map[offset * 4 + 2 * mb_col + cm->mi_cols + 1] = 0;
370 cpi->segmentation_map[offset * 4 + 2 * mb_col] = 1;
371 cpi->segmentation_map[offset * 4 + 2 * mb_col + 1] = 1;
372 cpi->segmentation_map[offset * 4 + 2 * mb_col + cm->mi_cols] = 1;
373 cpi->segmentation_map[offset * 4 + 2 * mb_col + cm->mi_cols + 1] = 1;
379 // Only bother with segmentation if over 10% of the MBs in static segment
380 // if ( ncnt[1] && (ncnt[0] / ncnt[1] < 10) )
382 // Note % of blocks that are marked as static
384 cpi->static_mb_pct = (ncnt[1] * 100) / cm->MBs;
386 // This error case should not be reachable as this function should
387 // never be called with the common data structure uninitialized.
389 cpi->static_mb_pct = 0;
391 cpi->seg0_cnt = ncnt[0];
392 vp9_enable_segmentation((VP9_PTR)cpi);
394 cpi->static_mb_pct = 0;
395 vp9_disable_segmentation((VP9_PTR)cpi);
398 // Free localy allocated storage
399 vpx_free(arf_not_zz);
402 void vp9_update_mbgraph_stats(VP9_COMP *cpi) {
403 VP9_COMMON *const cm = &cpi->common;
404 int i, n_frames = vp9_lookahead_depth(cpi->lookahead);
405 YV12_BUFFER_CONFIG *golden_ref =
406 &cm->yv12_fb[cm->ref_frame_map[cpi->gld_fb_idx]];
408 // we need to look ahead beyond where the ARF transitions into
409 // being a GF - so exit if we don't look ahead beyond that
410 if (n_frames <= cpi->frames_till_gf_update_due)
412 if (n_frames > (int)cpi->common.frames_till_alt_ref_frame)
413 n_frames = cpi->common.frames_till_alt_ref_frame;
414 if (n_frames > MAX_LAG_BUFFERS)
415 n_frames = MAX_LAG_BUFFERS;
417 cpi->mbgraph_n_frames = n_frames;
418 for (i = 0; i < n_frames; i++) {
419 MBGRAPH_FRAME_STATS *frame_stats = &cpi->mbgraph_stats[i];
420 vpx_memset(frame_stats->mb_stats, 0,
421 cm->mb_rows * cm->mb_cols * sizeof(*cpi->mbgraph_stats[i].mb_stats));
424 // do motion search to find contribution of each reference to data
425 // later on in this GF group
426 // FIXME really, the GF/last MC search should be done forward, and
427 // the ARF MC search backwards, to get optimal results for MV caching
428 for (i = 0; i < n_frames; i++) {
429 MBGRAPH_FRAME_STATS *frame_stats = &cpi->mbgraph_stats[i];
430 struct lookahead_entry *q_cur = vp9_lookahead_peek(cpi->lookahead, i);
432 assert(q_cur != NULL);
434 update_mbgraph_frame_stats(cpi, frame_stats, &q_cur->img,
435 golden_ref, cpi->Source);
438 vp9_clear_system_state(); // __asm emms;
440 separate_arf_mbs(cpi);