2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
15 #include "./vp9_rtcd.h"
16 #include "./vpx_config.h"
18 #include "vpx_ports/vpx_timer.h"
20 #include "vp9/common/vp9_common.h"
21 #include "vp9/common/vp9_entropy.h"
22 #include "vp9/common/vp9_entropymode.h"
23 #include "vp9/common/vp9_extend.h"
24 #include "vp9/common/vp9_findnearmv.h"
25 #include "vp9/common/vp9_mvref_common.h"
26 #include "vp9/common/vp9_pred_common.h"
27 #include "vp9/common/vp9_quant_common.h"
28 #include "vp9/common/vp9_reconintra.h"
29 #include "vp9/common/vp9_reconinter.h"
30 #include "vp9/common/vp9_seg_common.h"
31 #include "vp9/common/vp9_tile_common.h"
33 #include "vp9/encoder/vp9_encodeframe.h"
34 #include "vp9/encoder/vp9_encodeintra.h"
35 #include "vp9/encoder/vp9_encodemb.h"
36 #include "vp9/encoder/vp9_encodemv.h"
37 #include "vp9/encoder/vp9_onyx_int.h"
38 #include "vp9/encoder/vp9_rdopt.h"
39 #include "vp9/encoder/vp9_segmentation.h"
40 #include "vp9/encoder/vp9_tokenize.h"
42 #define DBG_PRNT_SEGMAP 0
49 static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, int output_enabled,
50 int mi_row, int mi_col, BLOCK_SIZE bsize);
52 static void adjust_act_zbin(VP9_COMP *cpi, MACROBLOCK *x);
54 /* activity_avg must be positive, or flat regions could get a zero weight
55 * (infinite lambda), which confounds analysis.
56 * This also avoids the need for divide by zero checks in
57 * vp9_activity_masking().
59 #define ACTIVITY_AVG_MIN (64)
61 /* Motion vector component magnitude threshold for defining fast motion. */
62 #define FAST_MOTION_MV_THRESH (24)
64 /* This is used as a reference when computing the source variance for the
65 * purposes of activity masking.
66 * Eventually this should be replaced by custom no-reference routines,
67 * which will be faster.
69 static const uint8_t VP9_VAR_OFFS[64] = {
70 128, 128, 128, 128, 128, 128, 128, 128,
71 128, 128, 128, 128, 128, 128, 128, 128,
72 128, 128, 128, 128, 128, 128, 128, 128,
73 128, 128, 128, 128, 128, 128, 128, 128,
74 128, 128, 128, 128, 128, 128, 128, 128,
75 128, 128, 128, 128, 128, 128, 128, 128,
76 128, 128, 128, 128, 128, 128, 128, 128,
77 128, 128, 128, 128, 128, 128, 128, 128
80 static unsigned int get_sby_perpixel_variance(VP9_COMP *cpi, MACROBLOCK *x,
82 unsigned int var, sse;
83 var = cpi->fn_ptr[bs].vf(x->plane[0].src.buf,
84 x->plane[0].src.stride,
85 VP9_VAR_OFFS, 0, &sse);
86 return (var + (1 << (num_pels_log2_lookup[bs] - 1))) >>
87 num_pels_log2_lookup[bs];
90 // Original activity measure from Tim T's code.
91 static unsigned int tt_activity_measure(MACROBLOCK *x) {
94 /* TODO: This could also be done over smaller areas (8x8), but that would
95 * require extensive changes elsewhere, as lambda is assumed to be fixed
96 * over an entire MB in most of the code.
97 * Another option is to compute four 8x8 variances, and pick a single
98 * lambda using a non-linear combination (e.g., the smallest, or second
101 act = vp9_variance16x16(x->plane[0].src.buf, x->plane[0].src.stride,
102 VP9_VAR_OFFS, 0, &sse);
105 /* If the region is flat, lower the activity some more. */
107 act = act < 5 << 12 ? act : 5 << 12;
112 // Stub for alternative experimental activity measures.
113 static unsigned int alt_activity_measure(MACROBLOCK *x, int use_dc_pred) {
114 return vp9_encode_intra(x, use_dc_pred);
116 DECLARE_ALIGNED(16, static const uint8_t, vp9_64x64_zeros[64*64]) = {0};
118 // Measure the activity of the current macroblock
119 // What we measure here is TBD so abstracted to this function
120 #define ALT_ACT_MEASURE 1
121 static unsigned int mb_activity_measure(MACROBLOCK *x, int mb_row, int mb_col) {
122 unsigned int mb_activity;
124 if (ALT_ACT_MEASURE) {
125 int use_dc_pred = (mb_col || mb_row) && (!mb_col || !mb_row);
127 // Or use and alternative.
128 mb_activity = alt_activity_measure(x, use_dc_pred);
130 // Original activity measure from Tim T's code.
131 mb_activity = tt_activity_measure(x);
134 if (mb_activity < ACTIVITY_AVG_MIN)
135 mb_activity = ACTIVITY_AVG_MIN;
140 // Calculate an "average" mb activity value for the frame
142 static void calc_av_activity(VP9_COMP *cpi, int64_t activity_sum) {
144 // Find median: Simple n^2 algorithm for experimentation
148 unsigned int *sortlist;
151 // Create a list to sort to
152 CHECK_MEM_ERROR(&cpi->common, sortlist, vpx_calloc(sizeof(unsigned int),
155 // Copy map to sort list
156 vpx_memcpy(sortlist, cpi->mb_activity_map,
157 sizeof(unsigned int) * cpi->common.MBs);
159 // Ripple each value down to its correct position
160 for (i = 1; i < cpi->common.MBs; i ++) {
161 for (j = i; j > 0; j --) {
162 if (sortlist[j] < sortlist[j - 1]) {
164 tmp = sortlist[j - 1];
165 sortlist[j - 1] = sortlist[j];
172 // Even number MBs so estimate median as mean of two either side.
173 median = (1 + sortlist[cpi->common.MBs >> 1] +
174 sortlist[(cpi->common.MBs >> 1) + 1]) >> 1;
176 cpi->activity_avg = median;
181 // Simple mean for now
182 cpi->activity_avg = (unsigned int) (activity_sum / cpi->common.MBs);
185 if (cpi->activity_avg < ACTIVITY_AVG_MIN)
186 cpi->activity_avg = ACTIVITY_AVG_MIN;
188 // Experimental code: return fixed value normalized for several clips
190 cpi->activity_avg = 100000;
193 #define USE_ACT_INDEX 0
194 #define OUTPUT_NORM_ACT_STATS 0
197 // Calculate an activity index for each mb
198 static void calc_activity_index(VP9_COMP *cpi, MACROBLOCK *x) {
199 VP9_COMMON *const cm = &cpi->common;
206 #if OUTPUT_NORM_ACT_STATS
207 FILE *f = fopen("norm_act.stt", "a");
208 fprintf(f, "\n%12d\n", cpi->activity_avg);
211 // Reset pointers to start of activity map
212 x->mb_activity_ptr = cpi->mb_activity_map;
214 // Calculate normalized mb activity number.
215 for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
216 // for each macroblock col in image
217 for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
218 // Read activity from the map
219 act = *(x->mb_activity_ptr);
221 // Calculate a normalized activity number
222 a = act + 4 * cpi->activity_avg;
223 b = 4 * act + cpi->activity_avg;
226 *(x->activity_ptr) = (int)((b + (a >> 1)) / a) - 1;
228 *(x->activity_ptr) = 1 - (int)((a + (b >> 1)) / b);
230 #if OUTPUT_NORM_ACT_STATS
231 fprintf(f, " %6d", *(x->mb_activity_ptr));
233 // Increment activity map pointers
234 x->mb_activity_ptr++;
237 #if OUTPUT_NORM_ACT_STATS
243 #if OUTPUT_NORM_ACT_STATS
248 #endif // USE_ACT_INDEX
250 // Loop through all MBs. Note activity of each, average activity and
251 // calculate a normalized activity for each
252 static void build_activity_map(VP9_COMP *cpi) {
253 MACROBLOCK * const x = &cpi->mb;
254 MACROBLOCKD *xd = &x->e_mbd;
255 VP9_COMMON * const cm = &cpi->common;
258 YV12_BUFFER_CONFIG *new_yv12 = &cm->yv12_fb[cm->new_fb_idx];
260 int recon_y_stride = new_yv12->y_stride;
264 unsigned int mb_activity;
265 int64_t activity_sum = 0;
267 x->mb_activity_ptr = cpi->mb_activity_map;
269 // for each macroblock row in image
270 for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
272 // reset above block coeffs
273 xd->up_available = (mb_row != 0);
274 recon_yoffset = (mb_row * recon_y_stride * 16);
276 // for each macroblock col in image
277 for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
279 xd->plane[0].dst.buf = new_yv12->y_buffer + recon_yoffset;
280 xd->left_available = (mb_col != 0);
285 mb_activity = mb_activity_measure(x, mb_row, mb_col);
288 activity_sum += mb_activity;
290 // Store MB level activity details.
291 *x->mb_activity_ptr = mb_activity;
293 // Increment activity map pointer
294 x->mb_activity_ptr++;
296 // adjust to the next column of source macroblocks
297 x->plane[0].src.buf += 16;
300 // adjust to the next row of mbs
301 x->plane[0].src.buf += 16 * x->plane[0].src.stride - 16 * cm->mb_cols;
304 // Calculate an "average" MB activity
305 calc_av_activity(cpi, activity_sum);
308 // Calculate an activity index number of each mb
309 calc_activity_index(cpi, x);
314 // Macroblock activity masking
315 void vp9_activity_masking(VP9_COMP *cpi, MACROBLOCK *x) {
317 x->rdmult += *(x->mb_activity_ptr) * (x->rdmult >> 2);
318 x->errorperbit = x->rdmult * 100 / (110 * x->rddiv);
319 x->errorperbit += (x->errorperbit == 0);
323 int64_t act = *(x->mb_activity_ptr);
325 // Apply the masking to the RD multiplier.
326 a = act + (2 * cpi->activity_avg);
327 b = (2 * act) + cpi->activity_avg;
329 x->rdmult = (unsigned int) (((int64_t) x->rdmult * b + (a >> 1)) / a);
330 x->errorperbit = x->rdmult * 100 / (110 * x->rddiv);
331 x->errorperbit += (x->errorperbit == 0);
334 // Activity based Zbin adjustment
335 adjust_act_zbin(cpi, x);
338 static void update_state(VP9_COMP *cpi, PICK_MODE_CONTEXT *ctx,
339 BLOCK_SIZE bsize, int output_enabled) {
341 VP9_COMMON *const cm = &cpi->common;
342 MACROBLOCK *const x = &cpi->mb;
343 MACROBLOCKD *const xd = &x->e_mbd;
344 MODE_INFO *mi = &ctx->mic;
345 MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi;
347 int mb_mode_index = ctx->best_mode_index;
348 const int mis = cm->mode_info_stride;
349 const int mi_width = num_8x8_blocks_wide_lookup[bsize];
350 const int mi_height = num_8x8_blocks_high_lookup[bsize];
352 assert(mi->mbmi.mode < MB_MODE_COUNT);
353 assert(mb_mode_index < MAX_MODES);
354 assert(mi->mbmi.ref_frame[0] < MAX_REF_FRAMES);
355 assert(mi->mbmi.ref_frame[1] < MAX_REF_FRAMES);
356 assert(mi->mbmi.sb_type == bsize);
358 // Restore the coding context of the MB to that that was in place
359 // when the mode was picked for it
360 for (y = 0; y < mi_height; y++)
361 for (x_idx = 0; x_idx < mi_width; x_idx++)
362 if ((xd->mb_to_right_edge >> (3 + MI_SIZE_LOG2)) + mi_width > x_idx
363 && (xd->mb_to_bottom_edge >> (3 + MI_SIZE_LOG2)) + mi_height > y)
364 xd->mode_info_context[x_idx + y * mis] = *mi;
366 // FIXME(rbultje) I'm pretty sure this should go to the end of this block
367 // (i.e. after the output_enabled)
368 if (bsize < BLOCK_32X32) {
369 if (bsize < BLOCK_16X16)
370 ctx->tx_rd_diff[ALLOW_16X16] = ctx->tx_rd_diff[ALLOW_8X8];
371 ctx->tx_rd_diff[ALLOW_32X32] = ctx->tx_rd_diff[ALLOW_16X16];
374 if (is_inter_block(mbmi) && mbmi->sb_type < BLOCK_8X8) {
375 *x->partition_info = ctx->partition_info;
376 mbmi->mv[0].as_int = mi->bmi[3].as_mv[0].as_int;
377 mbmi->mv[1].as_int = mi->bmi[3].as_mv[1].as_int;
384 if (!vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
385 for (i = 0; i < TX_MODES; i++)
386 cpi->rd_tx_select_diff[i] += ctx->tx_rd_diff[i];
389 if (cm->frame_type == KEY_FRAME) {
390 // Restore the coding modes to that held in the coding context
391 // if (mb_mode == I4X4_PRED)
392 // for (i = 0; i < 16; i++)
394 // xd->block[i].bmi.as_mode =
395 // xd->mode_info_context->bmi[i].as_mode;
396 // assert(xd->mode_info_context->bmi[i].as_mode < MB_MODE_COUNT);
398 #if CONFIG_INTERNAL_STATS
399 static const int kf_mode_index[] = {
401 THR_V_PRED /*V_PRED*/,
402 THR_H_PRED /*H_PRED*/,
403 THR_D45_PRED /*D45_PRED*/,
404 THR_D135_PRED /*D135_PRED*/,
405 THR_D117_PRED /*D117_PRED*/,
406 THR_D153_PRED /*D153_PRED*/,
407 THR_D207_PRED /*D207_PRED*/,
408 THR_D63_PRED /*D63_PRED*/,
410 THR_B_PRED /*I4X4_PRED*/,
412 cpi->mode_chosen_counts[kf_mode_index[mi->mbmi.mode]]++;
415 // Note how often each mode chosen as best
416 cpi->mode_chosen_counts[mb_mode_index]++;
417 if (is_inter_block(mbmi)
418 && (mbmi->sb_type < BLOCK_8X8 || mbmi->mode == NEWMV)) {
419 int_mv best_mv, best_second_mv;
420 const MV_REFERENCE_FRAME rf1 = mbmi->ref_frame[0];
421 const MV_REFERENCE_FRAME rf2 = mbmi->ref_frame[1];
422 best_mv.as_int = ctx->best_ref_mv.as_int;
423 best_second_mv.as_int = ctx->second_best_ref_mv.as_int;
424 if (mbmi->mode == NEWMV) {
425 best_mv.as_int = mbmi->ref_mvs[rf1][0].as_int;
426 best_second_mv.as_int = mbmi->ref_mvs[rf2][0].as_int;
428 mbmi->best_mv.as_int = best_mv.as_int;
429 mbmi->best_second_mv.as_int = best_second_mv.as_int;
430 vp9_update_nmv_count(cpi, x, &best_mv, &best_second_mv);
433 if (bsize > BLOCK_8X8 && mbmi->mode == NEWMV) {
435 for (j = 0; j < mi_height; ++j)
436 for (i = 0; i < mi_width; ++i)
437 if ((xd->mb_to_right_edge >> (3 + MI_SIZE_LOG2)) + mi_width > i
438 && (xd->mb_to_bottom_edge >> (3 + MI_SIZE_LOG2)) + mi_height > j)
439 xd->mode_info_context[mis * j + i].mbmi = *mbmi;
442 if (cm->mcomp_filter_type == SWITCHABLE && is_inter_mode(mbmi->mode)) {
443 const int ctx = vp9_get_pred_context_switchable_interp(xd);
444 ++cm->counts.switchable_interp[ctx][mbmi->interp_filter];
447 cpi->rd_comp_pred_diff[SINGLE_PREDICTION_ONLY] += ctx->single_pred_diff;
448 cpi->rd_comp_pred_diff[COMP_PREDICTION_ONLY] += ctx->comp_pred_diff;
449 cpi->rd_comp_pred_diff[HYBRID_PREDICTION] += ctx->hybrid_pred_diff;
451 for (i = 0; i <= SWITCHABLE_FILTERS; i++)
452 cpi->rd_filter_diff[i] += ctx->best_filter_diff[i];
456 void vp9_setup_src_planes(MACROBLOCK *x, const YV12_BUFFER_CONFIG *src,
457 int mb_row, int mb_col) {
458 uint8_t *buffers[4] = {src->y_buffer, src->u_buffer, src->v_buffer, src
460 int strides[4] = {src->y_stride, src->uv_stride, src->uv_stride, src
464 for (i = 0; i < MAX_MB_PLANE; i++) {
465 setup_pred_plane(&x->plane[i].src, buffers[i], strides[i], mb_row, mb_col,
466 NULL, x->e_mbd.plane[i].subsampling_x,
467 x->e_mbd.plane[i].subsampling_y);
471 static void set_offsets(VP9_COMP *cpi, int mi_row, int mi_col,
473 MACROBLOCK *const x = &cpi->mb;
474 VP9_COMMON *const cm = &cpi->common;
475 MACROBLOCKD *const xd = &x->e_mbd;
477 const int dst_fb_idx = cm->new_fb_idx;
478 const int idx_str = xd->mode_info_stride * mi_row + mi_col;
479 const int mi_width = num_8x8_blocks_wide_lookup[bsize];
480 const int mi_height = num_8x8_blocks_high_lookup[bsize];
481 const int mb_row = mi_row >> 1;
482 const int mb_col = mi_col >> 1;
483 const int idx_map = mb_row * cm->mb_cols + mb_col;
484 const struct segmentation *const seg = &cm->seg;
486 set_skip_context(cm, xd, mi_row, mi_col);
487 set_partition_seg_context(cm, xd, mi_row, mi_col);
489 // Activity map pointer
490 x->mb_activity_ptr = &cpi->mb_activity_map[idx_map];
491 x->active_ptr = cpi->active_map + idx_map;
493 /* pointers to mode info contexts */
494 x->partition_info = x->pi + idx_str;
495 xd->mode_info_context = cm->mi + idx_str;
496 mbmi = &xd->mode_info_context->mbmi;
497 // Special case: if prev_mi is NULL, the previous mode info context
499 xd->prev_mode_info_context = cm->prev_mi ? cm->prev_mi + idx_str : NULL;
501 // Set up destination pointers
502 setup_dst_planes(xd, &cm->yv12_fb[dst_fb_idx], mi_row, mi_col);
504 // Set up limit values for MV components
505 // mv beyond the range do not produce new/different prediction block
506 x->mv_row_min = -(((mi_row + mi_height) * MI_SIZE) + VP9_INTERP_EXTEND);
507 x->mv_col_min = -(((mi_col + mi_width) * MI_SIZE) + VP9_INTERP_EXTEND);
508 x->mv_row_max = (cm->mi_rows - mi_row) * MI_SIZE + VP9_INTERP_EXTEND;
509 x->mv_col_max = (cm->mi_cols - mi_col) * MI_SIZE + VP9_INTERP_EXTEND;
511 // Set up distance of MB to edge of frame in 1/8th pel units
512 assert(!(mi_col & (mi_width - 1)) && !(mi_row & (mi_height - 1)));
513 set_mi_row_col(cm, xd, mi_row, mi_height, mi_col, mi_width);
515 /* set up source buffers */
516 vp9_setup_src_planes(x, cpi->Source, mi_row, mi_col);
519 x->rddiv = cpi->RDDIV;
520 x->rdmult = cpi->RDMULT;
524 uint8_t *map = seg->update_map ? cpi->segmentation_map
525 : cm->last_frame_seg_map;
526 mbmi->segment_id = vp9_get_segment_id(cm, map, bsize, mi_row, mi_col);
528 vp9_mb_init_quantizer(cpi, x);
530 if (seg->enabled && cpi->seg0_cnt > 0
531 && !vp9_segfeature_active(seg, 0, SEG_LVL_REF_FRAME)
532 && vp9_segfeature_active(seg, 1, SEG_LVL_REF_FRAME)) {
533 cpi->seg0_progress = (cpi->seg0_idx << 16) / cpi->seg0_cnt;
535 const int y = mb_row & ~3;
536 const int x = mb_col & ~3;
537 const int p16 = ((mb_row & 1) << 1) + (mb_col & 1);
538 const int p32 = ((mb_row & 2) << 2) + ((mb_col & 2) << 1);
539 const int tile_progress = cm->cur_tile_mi_col_start * cm->mb_rows >> 1;
540 const int mb_cols = (cm->cur_tile_mi_col_end - cm->cur_tile_mi_col_start)
543 cpi->seg0_progress = ((y * mb_cols + x * 4 + p32 + p16 + tile_progress)
547 x->encode_breakout = cpi->segment_encode_breakout[mbmi->segment_id];
549 mbmi->segment_id = 0;
550 x->encode_breakout = cpi->oxcf.encode_breakout;
554 static void pick_sb_modes(VP9_COMP *cpi, int mi_row, int mi_col,
555 int *totalrate, int64_t *totaldist,
556 BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
558 VP9_COMMON *const cm = &cpi->common;
559 MACROBLOCK *const x = &cpi->mb;
560 MACROBLOCKD *const xd = &x->e_mbd;
562 // Use the lower precision, but faster, 32x32 fdct for mode selection.
563 x->use_lp32x32fdct = 1;
565 if (bsize < BLOCK_8X8) {
566 // When ab_index = 0 all sub-blocks are handled, so for ab_index != 0
567 // there is nothing to be done.
568 if (xd->ab_index != 0) {
575 set_offsets(cpi, mi_row, mi_col, bsize);
576 xd->mode_info_context->mbmi.sb_type = bsize;
578 // Set to zero to make sure we do not use the previous encoded frame stats
579 xd->mode_info_context->mbmi.skip_coeff = 0;
581 x->source_variance = get_sby_perpixel_variance(cpi, x, bsize);
583 if (cpi->oxcf.tuning == VP8_TUNE_SSIM)
584 vp9_activity_masking(cpi, x);
586 // Find best coding mode & reconstruct the MB so it is available
587 // as a predictor for MBs that follow in the SB
588 if (cm->frame_type == KEY_FRAME)
589 vp9_rd_pick_intra_mode_sb(cpi, x, totalrate, totaldist, bsize, ctx,
592 vp9_rd_pick_inter_mode_sb(cpi, x, mi_row, mi_col, totalrate, totaldist,
593 bsize, ctx, best_rd);
596 static void update_stats(VP9_COMP *cpi) {
597 VP9_COMMON *const cm = &cpi->common;
598 MACROBLOCK *const x = &cpi->mb;
599 MACROBLOCKD *const xd = &x->e_mbd;
600 MODE_INFO *mi = xd->mode_info_context;
601 MB_MODE_INFO *const mbmi = &mi->mbmi;
603 if (cm->frame_type != KEY_FRAME) {
604 const int seg_ref_active = vp9_segfeature_active(&cm->seg, mbmi->segment_id,
608 cpi->intra_inter_count[vp9_get_pred_context_intra_inter(xd)]
609 [is_inter_block(mbmi)]++;
611 // If the segment reference feature is enabled we have only a single
612 // reference frame allowed for the segment so exclude it from
613 // the reference frame counts used to work out probabilities.
614 if (is_inter_block(mbmi) && !seg_ref_active) {
615 if (cm->comp_pred_mode == HYBRID_PREDICTION)
616 cpi->comp_inter_count[vp9_get_pred_context_comp_inter_inter(cm, xd)]
617 [has_second_ref(mbmi)]++;
619 if (has_second_ref(mbmi)) {
620 cpi->comp_ref_count[vp9_get_pred_context_comp_ref_p(cm, xd)]
621 [mbmi->ref_frame[0] == GOLDEN_FRAME]++;
623 cpi->single_ref_count[vp9_get_pred_context_single_ref_p1(xd)][0]
624 [mbmi->ref_frame[0] != LAST_FRAME]++;
625 if (mbmi->ref_frame[0] != LAST_FRAME)
626 cpi->single_ref_count[vp9_get_pred_context_single_ref_p2(xd)][1]
627 [mbmi->ref_frame[0] != GOLDEN_FRAME]++;
631 // Count of last ref frame 0,0 usage
632 if (mbmi->mode == ZEROMV && mbmi->ref_frame[0] == LAST_FRAME)
633 cpi->inter_zz_count++;
637 // TODO(jingning): the variables used here are little complicated. need further
638 // refactoring on organizing the temporary buffers, when recursive
639 // partition down to 4x4 block size is enabled.
640 static PICK_MODE_CONTEXT *get_block_context(MACROBLOCK *x, BLOCK_SIZE bsize) {
641 MACROBLOCKD *const xd = &x->e_mbd;
645 return &x->sb64_context;
647 return &x->sb64x32_context[xd->sb_index];
649 return &x->sb32x64_context[xd->sb_index];
651 return &x->sb32_context[xd->sb_index];
653 return &x->sb32x16_context[xd->sb_index][xd->mb_index];
655 return &x->sb16x32_context[xd->sb_index][xd->mb_index];
657 return &x->mb_context[xd->sb_index][xd->mb_index];
659 return &x->sb16x8_context[xd->sb_index][xd->mb_index][xd->b_index];
661 return &x->sb8x16_context[xd->sb_index][xd->mb_index][xd->b_index];
663 return &x->sb8x8_context[xd->sb_index][xd->mb_index][xd->b_index];
665 return &x->sb8x4_context[xd->sb_index][xd->mb_index][xd->b_index];
667 return &x->sb4x8_context[xd->sb_index][xd->mb_index][xd->b_index];
669 return &x->ab4x4_context[xd->sb_index][xd->mb_index][xd->b_index];
676 static BLOCK_SIZE *get_sb_partitioning(MACROBLOCK *x, BLOCK_SIZE bsize) {
677 MACROBLOCKD *const xd = &x->e_mbd;
680 return &x->sb64_partitioning;
682 return &x->sb_partitioning[xd->sb_index];
684 return &x->mb_partitioning[xd->sb_index][xd->mb_index];
686 return &x->b_partitioning[xd->sb_index][xd->mb_index][xd->b_index];
693 static void restore_context(VP9_COMP *cpi, int mi_row, int mi_col,
694 ENTROPY_CONTEXT a[16 * MAX_MB_PLANE],
695 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE],
696 PARTITION_CONTEXT sa[8], PARTITION_CONTEXT sl[8],
698 VP9_COMMON *const cm = &cpi->common;
699 MACROBLOCK *const x = &cpi->mb;
700 MACROBLOCKD *const xd = &x->e_mbd;
702 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
703 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
704 int mi_width = num_8x8_blocks_wide_lookup[bsize];
705 int mi_height = num_8x8_blocks_high_lookup[bsize];
706 for (p = 0; p < MAX_MB_PLANE; p++) {
708 cm->above_context[p] + ((mi_col * 2) >> xd->plane[p].subsampling_x),
709 a + num_4x4_blocks_wide * p,
710 (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_wide) >>
711 xd->plane[p].subsampling_x);
714 + ((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y),
715 l + num_4x4_blocks_high * p,
716 (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_high) >>
717 xd->plane[p].subsampling_y);
719 vpx_memcpy(cm->above_seg_context + mi_col, sa,
720 sizeof(PARTITION_CONTEXT) * mi_width);
721 vpx_memcpy(cm->left_seg_context + (mi_row & MI_MASK), sl,
722 sizeof(PARTITION_CONTEXT) * mi_height);
724 static void save_context(VP9_COMP *cpi, int mi_row, int mi_col,
725 ENTROPY_CONTEXT a[16 * MAX_MB_PLANE],
726 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE],
727 PARTITION_CONTEXT sa[8], PARTITION_CONTEXT sl[8],
729 const VP9_COMMON *const cm = &cpi->common;
730 const MACROBLOCK *const x = &cpi->mb;
731 const MACROBLOCKD *const xd = &x->e_mbd;
733 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
734 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
735 int mi_width = num_8x8_blocks_wide_lookup[bsize];
736 int mi_height = num_8x8_blocks_high_lookup[bsize];
738 // buffer the above/left context information of the block in search.
739 for (p = 0; p < MAX_MB_PLANE; ++p) {
741 a + num_4x4_blocks_wide * p,
742 cm->above_context[p] + (mi_col * 2 >> xd->plane[p].subsampling_x),
743 (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_wide) >>
744 xd->plane[p].subsampling_x);
746 l + num_4x4_blocks_high * p,
748 + ((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y),
749 (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_high) >>
750 xd->plane[p].subsampling_y);
752 vpx_memcpy(sa, cm->above_seg_context + mi_col,
753 sizeof(PARTITION_CONTEXT) * mi_width);
754 vpx_memcpy(sl, cm->left_seg_context + (mi_row & MI_MASK),
755 sizeof(PARTITION_CONTEXT) * mi_height);
758 static void encode_b(VP9_COMP *cpi, TOKENEXTRA **tp, int mi_row, int mi_col,
759 int output_enabled, BLOCK_SIZE bsize, int sub_index) {
760 VP9_COMMON * const cm = &cpi->common;
761 MACROBLOCK * const x = &cpi->mb;
762 MACROBLOCKD * const xd = &x->e_mbd;
764 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
768 *get_sb_index(xd, bsize) = sub_index;
770 if (bsize < BLOCK_8X8) {
771 // When ab_index = 0 all sub-blocks are handled, so for ab_index != 0
772 // there is nothing to be done.
773 if (xd->ab_index > 0)
776 set_offsets(cpi, mi_row, mi_col, bsize);
777 update_state(cpi, get_block_context(x, bsize), bsize, output_enabled);
778 encode_superblock(cpi, tp, output_enabled, mi_row, mi_col, bsize);
780 if (output_enabled) {
783 (*tp)->token = EOSB_TOKEN;
788 static void encode_sb(VP9_COMP *cpi, TOKENEXTRA **tp, int mi_row, int mi_col,
789 int output_enabled, BLOCK_SIZE bsize) {
790 VP9_COMMON * const cm = &cpi->common;
791 MACROBLOCK * const x = &cpi->mb;
792 MACROBLOCKD * const xd = &x->e_mbd;
793 BLOCK_SIZE c1 = BLOCK_8X8;
794 const int bsl = b_width_log2(bsize), bs = (1 << bsl) / 4;
795 int UNINITIALIZED_IS_SAFE(pl);
796 PARTITION_TYPE partition;
800 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
804 if (bsize >= BLOCK_8X8) {
805 set_partition_seg_context(cm, xd, mi_row, mi_col);
806 pl = partition_plane_context(xd, bsize);
807 c1 = *(get_sb_partitioning(x, bsize));
809 partition = partition_lookup[bsl][c1];
813 if (output_enabled && bsize >= BLOCK_8X8)
814 cpi->partition_count[pl][PARTITION_NONE]++;
815 encode_b(cpi, tp, mi_row, mi_col, output_enabled, c1, -1);
819 cpi->partition_count[pl][PARTITION_VERT]++;
820 encode_b(cpi, tp, mi_row, mi_col, output_enabled, c1, 0);
821 encode_b(cpi, tp, mi_row, mi_col + bs, output_enabled, c1, 1);
825 cpi->partition_count[pl][PARTITION_HORZ]++;
826 encode_b(cpi, tp, mi_row, mi_col, output_enabled, c1, 0);
827 encode_b(cpi, tp, mi_row + bs, mi_col, output_enabled, c1, 1);
829 case PARTITION_SPLIT:
830 subsize = get_subsize(bsize, PARTITION_SPLIT);
833 cpi->partition_count[pl][PARTITION_SPLIT]++;
835 for (i = 0; i < 4; i++) {
836 const int x_idx = i & 1, y_idx = i >> 1;
838 *get_sb_index(xd, subsize) = i;
839 encode_sb(cpi, tp, mi_row + y_idx * bs, mi_col + x_idx * bs,
840 output_enabled, subsize);
848 if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8) {
849 set_partition_seg_context(cm, xd, mi_row, mi_col);
850 update_partition_context(xd, c1, bsize);
854 static void set_partitioning(VP9_COMP *cpi, MODE_INFO *m, BLOCK_SIZE bsize) {
855 VP9_COMMON *const cm = &cpi->common;
856 const int mis = cm->mode_info_stride;
857 int block_row, block_col;
858 for (block_row = 0; block_row < 8; ++block_row) {
859 for (block_col = 0; block_col < 8; ++block_col) {
860 m[block_row * mis + block_col].mbmi.sb_type = bsize;
864 static void copy_partitioning(VP9_COMP *cpi, MODE_INFO *m, MODE_INFO *p) {
865 VP9_COMMON *const cm = &cpi->common;
866 const int mis = cm->mode_info_stride;
867 int block_row, block_col;
868 for (block_row = 0; block_row < 8; ++block_row) {
869 for (block_col = 0; block_col < 8; ++block_col) {
870 m[block_row * mis + block_col].mbmi.sb_type =
871 p[block_row * mis + block_col].mbmi.sb_type;
876 static void set_block_size(VP9_COMMON * const cm, MODE_INFO *mi,
877 BLOCK_SIZE bsize, int mis, int mi_row,
880 const int bs = MAX(num_8x8_blocks_wide_lookup[bsize],
881 num_8x8_blocks_high_lookup[bsize]);
882 MODE_INFO *const mi2 = &mi[mi_row * mis + mi_col];
883 for (r = 0; r < bs; r++)
884 for (c = 0; c < bs; c++)
885 if (mi_row + r < cm->mi_rows && mi_col + c < cm->mi_cols)
886 mi2[r * mis + c].mbmi.sb_type = bsize;
890 int64_t sum_square_error;
900 } partition_variance;
902 #define VT(TYPE, BLOCKSIZE) \
904 partition_variance vt; \
905 BLOCKSIZE split[4]; } TYPE;
913 partition_variance *vt;
923 static void tree_to_node(void *data, BLOCK_SIZE bsize, vt_node *node) {
927 v64x64 *vt = (v64x64 *) data;
929 for (i = 0; i < 4; i++)
930 node->split[i] = &vt->split[i].vt.none;
934 v32x32 *vt = (v32x32 *) data;
936 for (i = 0; i < 4; i++)
937 node->split[i] = &vt->split[i].vt.none;
941 v16x16 *vt = (v16x16 *) data;
943 for (i = 0; i < 4; i++)
944 node->split[i] = &vt->split[i].vt.none;
948 v8x8 *vt = (v8x8 *) data;
950 for (i = 0; i < 4; i++)
951 node->split[i] = &vt->split[i];
956 for (i = 0; i < 4; i++)
962 // Set variance values given sum square error, sum error, count.
963 static void fill_variance(var *v, int64_t s2, int64_t s, int c) {
964 v->sum_square_error = s2;
969 * (v->sum_square_error - v->sum_error * v->sum_error / v->count)
975 // Combine 2 variance structures by summing the sum_error, sum_square_error,
976 // and counts and then calculating the new variance.
977 void sum_2_variances(var *r, var *a, var*b) {
978 fill_variance(r, a->sum_square_error + b->sum_square_error,
979 a->sum_error + b->sum_error, a->count + b->count);
982 static void fill_variance_tree(void *data, BLOCK_SIZE bsize) {
984 tree_to_node(data, bsize, &node);
985 sum_2_variances(&node.vt->horz[0], node.split[0], node.split[1]);
986 sum_2_variances(&node.vt->horz[1], node.split[2], node.split[3]);
987 sum_2_variances(&node.vt->vert[0], node.split[0], node.split[2]);
988 sum_2_variances(&node.vt->vert[1], node.split[1], node.split[3]);
989 sum_2_variances(&node.vt->none, &node.vt->vert[0], &node.vt->vert[1]);
992 #if PERFORM_RANDOM_PARTITIONING
993 static int set_vt_partitioning(VP9_COMP *cpi, void *data, MODE_INFO *m,
994 BLOCK_SIZE block_size, int mi_row,
995 int mi_col, int mi_size) {
996 VP9_COMMON * const cm = &cpi->common;
998 const int mis = cm->mode_info_stride;
999 int64_t threshold = 4 * cpi->common.base_qindex * cpi->common.base_qindex;
1001 tree_to_node(data, block_size, &vt);
1003 // split none is available only if we have more than half a block size
1004 // in width and height inside the visible image
1005 if (mi_col + mi_size < cm->mi_cols && mi_row + mi_size < cm->mi_rows &&
1007 set_block_size(cm, m, block_size, mis, mi_row, mi_col);
1011 // vertical split is available on all but the bottom border
1012 if (mi_row + mi_size < cm->mi_rows && vt.vt->vert[0].variance < threshold
1013 && (rand() & 3) < 1) {
1014 set_block_size(cm, m, get_subsize(block_size, PARTITION_VERT), mis, mi_row,
1019 // horizontal split is available on all but the right border
1020 if (mi_col + mi_size < cm->mi_cols && vt.vt->horz[0].variance < threshold
1021 && (rand() & 3) < 1) {
1022 set_block_size(cm, m, get_subsize(block_size, PARTITION_HORZ), mis, mi_row,
1030 #else // !PERFORM_RANDOM_PARTITIONING
1032 static int set_vt_partitioning(VP9_COMP *cpi, void *data, MODE_INFO *m,
1033 BLOCK_SIZE bsize, int mi_row,
1034 int mi_col, int mi_size) {
1035 VP9_COMMON * const cm = &cpi->common;
1037 const int mis = cm->mode_info_stride;
1038 int64_t threshold = 50 * cpi->common.base_qindex;
1040 tree_to_node(data, bsize, &vt);
1042 // split none is available only if we have more than half a block size
1043 // in width and height inside the visible image
1044 if (mi_col + mi_size < cm->mi_cols && mi_row + mi_size < cm->mi_rows
1045 && vt.vt->none.variance < threshold) {
1046 set_block_size(cm, m, bsize, mis, mi_row, mi_col);
1050 // vertical split is available on all but the bottom border
1051 if (mi_row + mi_size < cm->mi_rows && vt.vt->vert[0].variance < threshold
1052 && vt.vt->vert[1].variance < threshold) {
1053 set_block_size(cm, m, get_subsize(bsize, PARTITION_VERT), mis, mi_row,
1058 // horizontal split is available on all but the right border
1059 if (mi_col + mi_size < cm->mi_cols && vt.vt->horz[0].variance < threshold
1060 && vt.vt->horz[1].variance < threshold) {
1061 set_block_size(cm, m, get_subsize(bsize, PARTITION_HORZ), mis, mi_row,
1068 #endif // PERFORM_RANDOM_PARTITIONING
1070 static void choose_partitioning(VP9_COMP *cpi, MODE_INFO *m, int mi_row,
1072 VP9_COMMON * const cm = &cpi->common;
1073 MACROBLOCK *x = &cpi->mb;
1074 MACROBLOCKD *xd = &cpi->mb.e_mbd;
1075 const int mis = cm->mode_info_stride;
1076 // TODO(JBB): More experimentation or testing of this threshold;
1077 int64_t threshold = 4;
1082 const unsigned char * d;
1084 int pixels_wide = 64, pixels_high = 64;
1087 set_offsets(cpi, mi_row, mi_col, BLOCK_64X64);
1089 if (xd->mb_to_right_edge < 0)
1090 pixels_wide += (xd->mb_to_right_edge >> 3);
1092 if (xd->mb_to_bottom_edge < 0)
1093 pixels_high += (xd->mb_to_bottom_edge >> 3);
1095 s = x->plane[0].src.buf;
1096 sp = x->plane[0].src.stride;
1098 // TODO(JBB): Clearly the higher the quantizer the fewer partitions we want
1099 // but this needs more experimentation.
1100 threshold = threshold * cpi->common.base_qindex * cpi->common.base_qindex;
1102 d = vp9_64x64_zeros;
1104 if (cm->frame_type != KEY_FRAME) {
1105 int_mv nearest_mv, near_mv;
1106 const int idx = cm->ref_frame_map[get_ref_frame_idx(cpi, LAST_FRAME)];
1107 YV12_BUFFER_CONFIG *ref_fb = &cm->yv12_fb[idx];
1108 YV12_BUFFER_CONFIG *second_ref_fb = NULL;
1110 setup_pre_planes(xd, 0, ref_fb, mi_row, mi_col,
1111 &xd->scale_factor[0]);
1112 setup_pre_planes(xd, 1, second_ref_fb, mi_row, mi_col,
1113 &xd->scale_factor[1]);
1114 xd->mode_info_context->mbmi.ref_frame[0] = LAST_FRAME;
1115 xd->mode_info_context->mbmi.sb_type = BLOCK_64X64;
1116 vp9_find_best_ref_mvs(xd, m->mbmi.ref_mvs[m->mbmi.ref_frame[0]],
1117 &nearest_mv, &near_mv);
1119 xd->mode_info_context->mbmi.mv[0] = nearest_mv;
1120 vp9_build_inter_predictors_sby(xd, mi_row, mi_col, BLOCK_64X64);
1121 d = xd->plane[0].dst.buf;
1122 dp = xd->plane[0].dst.stride;
1125 // Fill in the entire tree of 8x8 variances for splits.
1126 for (i = 0; i < 4; i++) {
1127 const int x32_idx = ((i & 1) << 5);
1128 const int y32_idx = ((i >> 1) << 5);
1129 for (j = 0; j < 4; j++) {
1130 const int x16_idx = x32_idx + ((j & 1) << 4);
1131 const int y16_idx = y32_idx + ((j >> 1) << 4);
1132 v16x16 *vst = &vt.split[i].split[j];
1133 for (k = 0; k < 4; k++) {
1134 int x_idx = x16_idx + ((k & 1) << 3);
1135 int y_idx = y16_idx + ((k >> 1) << 3);
1136 unsigned int sse = 0;
1138 if (x_idx < pixels_wide && y_idx < pixels_high)
1139 vp9_get_sse_sum_8x8(s + y_idx * sp + x_idx, sp,
1140 d + y_idx * dp + x_idx, dp, &sse, &sum);
1141 fill_variance(&vst->split[k].vt.none, sse, sum, 64);
1145 // Fill the rest of the variance tree by summing the split partition
1147 for (i = 0; i < 4; i++) {
1148 for (j = 0; j < 4; j++) {
1149 fill_variance_tree(&vt.split[i].split[j], BLOCK_16X16);
1151 fill_variance_tree(&vt.split[i], BLOCK_32X32);
1153 fill_variance_tree(&vt, BLOCK_64X64);
1154 // Now go through the entire structure, splitting every block size until
1155 // we get to one that's got a variance lower than our threshold, or we
1157 if (!set_vt_partitioning(cpi, &vt, m, BLOCK_64X64, mi_row, mi_col,
1159 for (i = 0; i < 4; ++i) {
1160 const int x32_idx = ((i & 1) << 2);
1161 const int y32_idx = ((i >> 1) << 2);
1162 if (!set_vt_partitioning(cpi, &vt.split[i], m, BLOCK_32X32,
1163 (mi_row + y32_idx), (mi_col + x32_idx), 2)) {
1164 for (j = 0; j < 4; ++j) {
1165 const int x16_idx = ((j & 1) << 1);
1166 const int y16_idx = ((j >> 1) << 1);
1167 if (!set_vt_partitioning(cpi, &vt.split[i].split[j], m,
1169 (mi_row + y32_idx + y16_idx),
1170 (mi_col + x32_idx + x16_idx), 1)) {
1171 for (k = 0; k < 4; ++k) {
1172 const int x8_idx = (k & 1);
1173 const int y8_idx = (k >> 1);
1174 set_block_size(cm, m, BLOCK_8X8, mis,
1175 (mi_row + y32_idx + y16_idx + y8_idx),
1176 (mi_col + x32_idx + x16_idx + x8_idx));
1185 static void rd_use_partition(VP9_COMP *cpi, MODE_INFO *m, TOKENEXTRA **tp,
1186 int mi_row, int mi_col, BLOCK_SIZE bsize,
1187 int *rate, int64_t *dist, int do_recon) {
1188 VP9_COMMON * const cm = &cpi->common;
1189 MACROBLOCK * const x = &cpi->mb;
1190 MACROBLOCKD *xd = &cpi->mb.e_mbd;
1191 const int mis = cm->mode_info_stride;
1192 int bsl = b_width_log2(bsize);
1193 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
1194 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
1195 int ms = num_4x4_blocks_wide / 2;
1196 int mh = num_4x4_blocks_high / 2;
1197 int bss = (1 << bsl) / 4;
1199 PARTITION_TYPE partition = PARTITION_NONE;
1201 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
1202 PARTITION_CONTEXT sl[8], sa[8];
1203 int last_part_rate = INT_MAX;
1204 int64_t last_part_dist = INT_MAX;
1205 int split_rate = INT_MAX;
1206 int64_t split_dist = INT_MAX;
1207 int none_rate = INT_MAX;
1208 int64_t none_dist = INT_MAX;
1209 int chosen_rate = INT_MAX;
1210 int64_t chosen_dist = INT_MAX;
1211 BLOCK_SIZE sub_subsize = BLOCK_4X4;
1212 int splits_below = 0;
1213 BLOCK_SIZE bs_type = m->mbmi.sb_type;
1215 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
1218 partition = partition_lookup[bsl][bs_type];
1220 subsize = get_subsize(bsize, partition);
1222 if (bsize < BLOCK_8X8) {
1223 // When ab_index = 0 all sub-blocks are handled, so for ab_index != 0
1224 // there is nothing to be done.
1225 if (xd->ab_index != 0) {
1231 *(get_sb_partitioning(x, bsize)) = subsize;
1233 save_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
1236 x->pred_mv.as_int = 0;
1237 x->subblock_ref = 0;
1239 if (cpi->sf.adjust_partitioning_from_last_frame) {
1240 // Check if any of the sub blocks are further split.
1241 if (partition == PARTITION_SPLIT && subsize > BLOCK_8X8) {
1242 sub_subsize = get_subsize(subsize, PARTITION_SPLIT);
1244 for (i = 0; i < 4; i++) {
1245 int jj = i >> 1, ii = i & 0x01;
1246 if (m[jj * bss * mis + ii * bss].mbmi.sb_type >= sub_subsize) {
1252 // If partition is not none try none unless each of the 4 splits are split
1254 if (partition != PARTITION_NONE && !splits_below &&
1255 mi_row + (ms >> 1) < cm->mi_rows &&
1256 mi_col + (ms >> 1) < cm->mi_cols) {
1257 *(get_sb_partitioning(x, bsize)) = bsize;
1258 pick_sb_modes(cpi, mi_row, mi_col, &none_rate, &none_dist, bsize,
1259 get_block_context(x, bsize), INT64_MAX);
1261 set_partition_seg_context(cm, xd, mi_row, mi_col);
1262 pl = partition_plane_context(xd, bsize);
1263 none_rate += x->partition_cost[pl][PARTITION_NONE];
1265 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
1266 m->mbmi.sb_type = bs_type;
1267 *(get_sb_partitioning(x, bsize)) = subsize;
1271 switch (partition) {
1272 case PARTITION_NONE:
1273 pick_sb_modes(cpi, mi_row, mi_col, &last_part_rate, &last_part_dist,
1274 bsize, get_block_context(x, bsize), INT64_MAX);
1276 case PARTITION_HORZ:
1277 *get_sb_index(xd, subsize) = 0;
1278 pick_sb_modes(cpi, mi_row, mi_col, &last_part_rate, &last_part_dist,
1279 subsize, get_block_context(x, subsize), INT64_MAX);
1280 if (last_part_rate != INT_MAX &&
1281 bsize >= BLOCK_8X8 && mi_row + (mh >> 1) < cm->mi_rows) {
1284 update_state(cpi, get_block_context(x, subsize), subsize, 0);
1285 encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize);
1286 *get_sb_index(xd, subsize) = 1;
1287 pick_sb_modes(cpi, mi_row + (ms >> 1), mi_col, &rt, &dt, subsize,
1288 get_block_context(x, subsize), INT64_MAX);
1289 if (rt == INT_MAX || dt == INT_MAX) {
1290 last_part_rate = INT_MAX;
1291 last_part_dist = INT_MAX;
1295 last_part_rate += rt;
1296 last_part_dist += dt;
1299 case PARTITION_VERT:
1300 *get_sb_index(xd, subsize) = 0;
1301 pick_sb_modes(cpi, mi_row, mi_col, &last_part_rate, &last_part_dist,
1302 subsize, get_block_context(x, subsize), INT64_MAX);
1303 if (last_part_rate != INT_MAX &&
1304 bsize >= BLOCK_8X8 && mi_col + (ms >> 1) < cm->mi_cols) {
1307 update_state(cpi, get_block_context(x, subsize), subsize, 0);
1308 encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize);
1309 *get_sb_index(xd, subsize) = 1;
1310 pick_sb_modes(cpi, mi_row, mi_col + (ms >> 1), &rt, &dt, subsize,
1311 get_block_context(x, subsize), INT64_MAX);
1312 if (rt == INT_MAX || dt == INT_MAX) {
1313 last_part_rate = INT_MAX;
1314 last_part_dist = INT_MAX;
1317 last_part_rate += rt;
1318 last_part_dist += dt;
1321 case PARTITION_SPLIT:
1325 for (i = 0; i < 4; i++) {
1326 int x_idx = (i & 1) * (ms >> 1);
1327 int y_idx = (i >> 1) * (ms >> 1);
1328 int jj = i >> 1, ii = i & 0x01;
1332 if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols))
1335 *get_sb_index(xd, subsize) = i;
1337 rd_use_partition(cpi, m + jj * bss * mis + ii * bss, tp, mi_row + y_idx,
1338 mi_col + x_idx, subsize, &rt, &dt, i != 3);
1339 if (rt == INT_MAX || dt == INT_MAX) {
1340 last_part_rate = INT_MAX;
1341 last_part_dist = INT_MAX;
1344 last_part_rate += rt;
1345 last_part_dist += dt;
1351 set_partition_seg_context(cm, xd, mi_row, mi_col);
1352 pl = partition_plane_context(xd, bsize);
1353 if (last_part_rate < INT_MAX)
1354 last_part_rate += x->partition_cost[pl][partition];
1356 if (cpi->sf.adjust_partitioning_from_last_frame
1357 && partition != PARTITION_SPLIT && bsize > BLOCK_8X8
1358 && (mi_row + ms < cm->mi_rows || mi_row + (ms >> 1) == cm->mi_rows)
1359 && (mi_col + ms < cm->mi_cols || mi_col + (ms >> 1) == cm->mi_cols)) {
1360 BLOCK_SIZE split_subsize = get_subsize(bsize, PARTITION_SPLIT);
1363 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
1366 for (i = 0; i < 4; i++) {
1367 int x_idx = (i & 1) * (num_4x4_blocks_wide >> 2);
1368 int y_idx = (i >> 1) * (num_4x4_blocks_wide >> 2);
1371 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
1372 PARTITION_CONTEXT sl[8], sa[8];
1374 if ((mi_row + y_idx >= cm->mi_rows)
1375 || (mi_col + x_idx >= cm->mi_cols))
1378 *get_sb_index(xd, split_subsize) = i;
1379 *get_sb_partitioning(x, bsize) = split_subsize;
1380 *get_sb_partitioning(x, split_subsize) = split_subsize;
1382 save_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
1384 pick_sb_modes(cpi, mi_row + y_idx, mi_col + x_idx, &rt, &dt,
1385 split_subsize, get_block_context(x, split_subsize),
1388 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
1390 if (rt == INT_MAX || dt == INT_MAX) {
1391 split_rate = INT_MAX;
1392 split_dist = INT_MAX;
1397 encode_sb(cpi, tp, mi_row + y_idx, mi_col + x_idx, 0,
1402 set_partition_seg_context(cm, xd, mi_row + y_idx, mi_col + x_idx);
1403 pl = partition_plane_context(xd, bsize);
1404 split_rate += x->partition_cost[pl][PARTITION_NONE];
1406 set_partition_seg_context(cm, xd, mi_row, mi_col);
1407 pl = partition_plane_context(xd, bsize);
1408 if (split_rate < INT_MAX) {
1409 split_rate += x->partition_cost[pl][PARTITION_SPLIT];
1411 chosen_rate = split_rate;
1412 chosen_dist = split_dist;
1416 // If last_part is better set the partitioning to that...
1417 if (RDCOST(x->rdmult, x->rddiv, last_part_rate, last_part_dist)
1418 < RDCOST(x->rdmult, x->rddiv, chosen_rate, chosen_dist)) {
1419 m->mbmi.sb_type = bsize;
1420 if (bsize >= BLOCK_8X8)
1421 *(get_sb_partitioning(x, bsize)) = subsize;
1422 chosen_rate = last_part_rate;
1423 chosen_dist = last_part_dist;
1425 // If none was better set the partitioning to that...
1426 if (RDCOST(x->rdmult, x->rddiv, chosen_rate, chosen_dist)
1427 > RDCOST(x->rdmult, x->rddiv, none_rate, none_dist)) {
1428 if (bsize >= BLOCK_8X8)
1429 *(get_sb_partitioning(x, bsize)) = bsize;
1430 chosen_rate = none_rate;
1431 chosen_dist = none_dist;
1434 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
1436 // We must have chosen a partitioning and encoding or we'll fail later on.
1437 // No other opportunities for success.
1438 if ( bsize == BLOCK_64X64)
1439 assert(chosen_rate < INT_MAX && chosen_dist < INT_MAX);
1442 encode_sb(cpi, tp, mi_row, mi_col, bsize == BLOCK_64X64, bsize);
1444 *rate = chosen_rate;
1445 *dist = chosen_dist;
1448 static const BLOCK_SIZE min_partition_size[BLOCK_SIZES] = {
1449 BLOCK_4X4, BLOCK_4X4, BLOCK_4X4, BLOCK_4X4,
1450 BLOCK_4X4, BLOCK_4X4, BLOCK_8X8, BLOCK_8X8,
1451 BLOCK_8X8, BLOCK_16X16, BLOCK_16X16, BLOCK_16X16, BLOCK_16X16
1454 static const BLOCK_SIZE max_partition_size[BLOCK_SIZES] = {
1455 BLOCK_8X8, BLOCK_16X16, BLOCK_16X16, BLOCK_16X16,
1456 BLOCK_32X32, BLOCK_32X32, BLOCK_32X32, BLOCK_64X64,
1457 BLOCK_64X64, BLOCK_64X64, BLOCK_64X64, BLOCK_64X64, BLOCK_64X64
1460 // Look at all the mode_info entries for blocks that are part of this
1461 // partition and find the min and max values for sb_type.
1462 // At the moment this is designed to work on a 64x64 SB but could be
1463 // adjusted to use a size parameter.
1465 // The min and max are assumed to have been initialized prior to calling this
1466 // function so repeat calls can accumulate a min and max of more than one sb64.
1467 static void get_sb_partition_size_range(VP9_COMP *cpi, MODE_INFO * mi,
1468 BLOCK_SIZE *min_block_size,
1469 BLOCK_SIZE *max_block_size ) {
1470 MACROBLOCKD *const xd = &cpi->mb.e_mbd;
1471 int sb_width_in_blocks = MI_BLOCK_SIZE;
1472 int sb_height_in_blocks = MI_BLOCK_SIZE;
1476 // Check the sb_type for each block that belongs to this region.
1477 for (i = 0; i < sb_height_in_blocks; ++i) {
1478 for (j = 0; j < sb_width_in_blocks; ++j) {
1479 *min_block_size = MIN(*min_block_size, mi[index + j].mbmi.sb_type);
1480 *max_block_size = MAX(*max_block_size, mi[index + j].mbmi.sb_type);
1482 index += xd->mode_info_stride;
1486 // Look at neighboring blocks and set a min and max partition size based on
1488 static void rd_auto_partition_range(VP9_COMP *cpi,
1489 BLOCK_SIZE *min_block_size,
1490 BLOCK_SIZE *max_block_size) {
1491 MACROBLOCKD *const xd = &cpi->mb.e_mbd;
1492 MODE_INFO *mi = xd->mode_info_context;
1493 MODE_INFO *above_sb64_mi;
1494 MODE_INFO *left_sb64_mi;
1495 const MB_MODE_INFO *const above_mbmi = &mi[-xd->mode_info_stride].mbmi;
1496 const MB_MODE_INFO *const left_mbmi = &mi[-1].mbmi;
1497 const int left_in_image = xd->left_available && left_mbmi->in_image;
1498 const int above_in_image = xd->up_available && above_mbmi->in_image;
1501 if (cpi->sf.auto_min_max_partition_count <= 0) {
1502 cpi->sf.auto_min_max_partition_count =
1503 cpi->sf.auto_min_max_partition_interval;
1504 *min_block_size = BLOCK_4X4;
1505 *max_block_size = BLOCK_64X64;
1508 --cpi->sf.auto_min_max_partition_count;
1511 // Set default values if not left or above neighbour
1512 if (!left_in_image && !above_in_image) {
1513 *min_block_size = BLOCK_4X4;
1514 *max_block_size = BLOCK_64X64;
1516 // Default "min to max" and "max to min"
1517 *min_block_size = BLOCK_64X64;
1518 *max_block_size = BLOCK_4X4;
1520 // Find the min and max partition sizes used in the left SB64
1521 if (left_in_image) {
1522 left_sb64_mi = &mi[-MI_BLOCK_SIZE];
1523 get_sb_partition_size_range(cpi, left_sb64_mi,
1524 min_block_size, max_block_size);
1527 // Find the min and max partition sizes used in the above SB64 taking
1528 // the values found for left as a starting point.
1529 if (above_in_image) {
1530 above_sb64_mi = &mi[-xd->mode_info_stride * MI_BLOCK_SIZE];
1531 get_sb_partition_size_range(cpi, above_sb64_mi,
1532 min_block_size, max_block_size);
1535 // give a bit of leaway either side of the observed min and max
1536 *min_block_size = min_partition_size[*min_block_size];
1537 *max_block_size = max_partition_size[*max_block_size];
1541 static void compute_fast_motion_search_level(VP9_COMP *cpi, BLOCK_SIZE bsize) {
1542 VP9_COMMON *const cm = &cpi->common;
1543 MACROBLOCK *const x = &cpi->mb;
1544 MACROBLOCKD *const xd = &x->e_mbd;
1546 // Only use 8x8 result for non HD videos.
1547 // int use_8x8 = (MIN(cpi->common.width, cpi->common.height) < 720) ? 1 : 0;
1550 if (cm->frame_type && !cpi->is_src_frame_alt_ref &&
1551 ((use_8x8 && bsize == BLOCK_16X16) ||
1552 bsize == BLOCK_32X32 || bsize == BLOCK_64X64)) {
1553 int ref0 = 0, ref1 = 0, ref2 = 0, ref3 = 0;
1554 PICK_MODE_CONTEXT *block_context = NULL;
1556 if (bsize == BLOCK_16X16) {
1557 block_context = x->sb8x8_context[xd->sb_index][xd->mb_index];
1558 } else if (bsize == BLOCK_32X32) {
1559 block_context = x->mb_context[xd->sb_index];
1560 } else if (bsize == BLOCK_64X64) {
1561 block_context = x->sb32_context;
1564 if (block_context) {
1565 ref0 = block_context[0].mic.mbmi.ref_frame[0];
1566 ref1 = block_context[1].mic.mbmi.ref_frame[0];
1567 ref2 = block_context[2].mic.mbmi.ref_frame[0];
1568 ref3 = block_context[3].mic.mbmi.ref_frame[0];
1571 // Currently, only consider 4 inter reference frames.
1572 if (ref0 && ref1 && ref2 && ref3) {
1573 int d01, d23, d02, d13;
1575 // Motion vectors for the four subblocks.
1576 int16_t mvr0 = block_context[0].mic.mbmi.mv[0].as_mv.row;
1577 int16_t mvc0 = block_context[0].mic.mbmi.mv[0].as_mv.col;
1578 int16_t mvr1 = block_context[1].mic.mbmi.mv[0].as_mv.row;
1579 int16_t mvc1 = block_context[1].mic.mbmi.mv[0].as_mv.col;
1580 int16_t mvr2 = block_context[2].mic.mbmi.mv[0].as_mv.row;
1581 int16_t mvc2 = block_context[2].mic.mbmi.mv[0].as_mv.col;
1582 int16_t mvr3 = block_context[3].mic.mbmi.mv[0].as_mv.row;
1583 int16_t mvc3 = block_context[3].mic.mbmi.mv[0].as_mv.col;
1585 // Adjust sign if ref is alt_ref.
1586 if (cm->ref_frame_sign_bias[ref0]) {
1591 if (cm->ref_frame_sign_bias[ref1]) {
1596 if (cm->ref_frame_sign_bias[ref2]) {
1601 if (cm->ref_frame_sign_bias[ref3]) {
1606 // Calculate mv distances.
1607 d01 = MAX(abs(mvr0 - mvr1), abs(mvc0 - mvc1));
1608 d23 = MAX(abs(mvr2 - mvr3), abs(mvc2 - mvc3));
1609 d02 = MAX(abs(mvr0 - mvr2), abs(mvc0 - mvc2));
1610 d13 = MAX(abs(mvr1 - mvr3), abs(mvc1 - mvc3));
1612 if (d01 < FAST_MOTION_MV_THRESH && d23 < FAST_MOTION_MV_THRESH &&
1613 d02 < FAST_MOTION_MV_THRESH && d13 < FAST_MOTION_MV_THRESH) {
1614 // Set fast motion search level.
1617 // Calculate prediction MV.
1618 x->pred_mv.as_mv.row = (mvr0 + mvr1 + mvr2 + mvr3) >> 2;
1619 x->pred_mv.as_mv.col = (mvc0 + mvc1 + mvc2 + mvc3) >> 2;
1621 if (ref0 == ref1 && ref1 == ref2 && ref2 == ref3 &&
1622 d01 < 2 && d23 < 2 && d02 < 2 && d13 < 2) {
1623 // Set fast motion search level.
1626 if (!d01 && !d23 && !d02 && !d13) {
1628 x->subblock_ref = ref0;
1636 // TODO(jingning,jimbankoski,rbultje): properly skip partition types that are
1637 // unlikely to be selected depending on previous rate-distortion optimization
1638 // results, for encoding speed-up.
1639 static void rd_pick_partition(VP9_COMP *cpi, TOKENEXTRA **tp, int mi_row,
1640 int mi_col, BLOCK_SIZE bsize, int *rate,
1641 int64_t *dist, int do_recon, int64_t best_rd) {
1642 VP9_COMMON * const cm = &cpi->common;
1643 MACROBLOCK * const x = &cpi->mb;
1644 MACROBLOCKD * const xd = &x->e_mbd;
1645 const int ms = num_8x8_blocks_wide_lookup[bsize] / 2;
1646 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
1647 PARTITION_CONTEXT sl[8], sa[8];
1648 TOKENEXTRA *tp_orig = *tp;
1651 int this_rate, sum_rate = 0, best_rate = INT_MAX;
1652 int64_t this_dist, sum_dist = 0, best_dist = INT64_MAX;
1654 int do_split = bsize >= BLOCK_8X8;
1656 // Override skipping rectangular partition operations for edge blocks
1657 const int force_horz_split = (mi_row + ms >= cm->mi_rows);
1658 const int force_vert_split = (mi_col + ms >= cm->mi_cols);
1660 int partition_none_allowed = !force_horz_split && !force_vert_split;
1661 int partition_horz_allowed = !force_vert_split && bsize >= BLOCK_8X8;
1662 int partition_vert_allowed = !force_horz_split && bsize >= BLOCK_8X8;
1664 int partition_split_done = 0;
1667 if (bsize < BLOCK_8X8) {
1668 // When ab_index = 0 all sub-blocks are handled, so for ab_index != 0
1669 // there is nothing to be done.
1670 if (xd->ab_index != 0) {
1676 assert(mi_height_log2(bsize) == mi_width_log2(bsize));
1678 // Determine partition types in search according to the speed features.
1679 // The threshold set here has to be of square block size.
1680 if (cpi->sf.auto_min_max_partition_size) {
1681 partition_none_allowed &= (bsize <= cpi->sf.max_partition_size &&
1682 bsize >= cpi->sf.min_partition_size);
1683 partition_horz_allowed &= ((bsize <= cpi->sf.max_partition_size &&
1684 bsize > cpi->sf.min_partition_size) ||
1686 partition_vert_allowed &= ((bsize <= cpi->sf.max_partition_size &&
1687 bsize > cpi->sf.min_partition_size) ||
1689 do_split &= bsize > cpi->sf.min_partition_size;
1691 if (cpi->sf.use_square_partition_only) {
1692 partition_horz_allowed &= force_horz_split;
1693 partition_vert_allowed &= force_vert_split;
1696 save_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
1698 if (cpi->sf.disable_split_var_thresh && partition_none_allowed) {
1699 unsigned int source_variancey;
1700 vp9_setup_src_planes(x, cpi->Source, mi_row, mi_col);
1701 source_variancey = get_sby_perpixel_variance(cpi, x, bsize);
1702 if (source_variancey < cpi->sf.disable_split_var_thresh) {
1704 if (source_variancey < cpi->sf.disable_split_var_thresh / 2)
1710 if (partition_none_allowed) {
1711 pick_sb_modes(cpi, mi_row, mi_col, &this_rate, &this_dist, bsize,
1712 get_block_context(x, bsize), best_rd);
1713 if (this_rate != INT_MAX) {
1714 if (bsize >= BLOCK_8X8) {
1715 set_partition_seg_context(cm, xd, mi_row, mi_col);
1716 pl = partition_plane_context(xd, bsize);
1717 this_rate += x->partition_cost[pl][PARTITION_NONE];
1719 sum_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_dist);
1720 if (sum_rd < best_rd) {
1721 best_rate = this_rate;
1722 best_dist = this_dist;
1724 if (bsize >= BLOCK_8X8)
1725 *(get_sb_partitioning(x, bsize)) = bsize;
1728 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
1733 // TODO(jingning): use the motion vectors given by the above search as
1734 // the starting point of motion search in the following partition type check.
1736 subsize = get_subsize(bsize, PARTITION_SPLIT);
1737 for (i = 0; i < 4 && sum_rd < best_rd; ++i) {
1738 const int x_idx = (i & 1) * ms;
1739 const int y_idx = (i >> 1) * ms;
1741 if (mi_row + y_idx >= cm->mi_rows || mi_col + x_idx >= cm->mi_cols)
1744 *get_sb_index(xd, subsize) = i;
1746 rd_pick_partition(cpi, tp, mi_row + y_idx, mi_col + x_idx, subsize,
1747 &this_rate, &this_dist, i != 3, best_rd - sum_rd);
1749 if (this_rate == INT_MAX) {
1752 sum_rate += this_rate;
1753 sum_dist += this_dist;
1754 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
1757 if (sum_rd < best_rd && i == 4) {
1758 set_partition_seg_context(cm, xd, mi_row, mi_col);
1759 pl = partition_plane_context(xd, bsize);
1760 sum_rate += x->partition_cost[pl][PARTITION_SPLIT];
1761 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
1762 if (sum_rd < best_rd) {
1763 best_rate = sum_rate;
1764 best_dist = sum_dist;
1766 *(get_sb_partitioning(x, bsize)) = subsize;
1768 // skip rectangular partition test when larger block size
1769 // gives better rd cost
1770 if (cpi->sf.less_rectangular_check)
1771 do_rect &= !partition_none_allowed;
1774 partition_split_done = 1;
1775 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
1779 x->pred_mv.as_int = 0;
1780 x->subblock_ref = 0;
1782 if (partition_split_done &&
1783 cpi->sf.using_small_partition_info) {
1784 compute_fast_motion_search_level(cpi, bsize);
1788 if (partition_horz_allowed && do_rect) {
1789 subsize = get_subsize(bsize, PARTITION_HORZ);
1790 *get_sb_index(xd, subsize) = 0;
1791 pick_sb_modes(cpi, mi_row, mi_col, &sum_rate, &sum_dist, subsize,
1792 get_block_context(x, subsize), best_rd);
1793 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
1795 if (sum_rd < best_rd && mi_row + ms < cm->mi_rows) {
1796 update_state(cpi, get_block_context(x, subsize), subsize, 0);
1797 encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize);
1799 *get_sb_index(xd, subsize) = 1;
1800 pick_sb_modes(cpi, mi_row + ms, mi_col, &this_rate,
1801 &this_dist, subsize, get_block_context(x, subsize),
1803 if (this_rate == INT_MAX) {
1806 sum_rate += this_rate;
1807 sum_dist += this_dist;
1808 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
1811 if (sum_rd < best_rd) {
1812 set_partition_seg_context(cm, xd, mi_row, mi_col);
1813 pl = partition_plane_context(xd, bsize);
1814 sum_rate += x->partition_cost[pl][PARTITION_HORZ];
1815 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
1816 if (sum_rd < best_rd) {
1818 best_rate = sum_rate;
1819 best_dist = sum_dist;
1820 *(get_sb_partitioning(x, bsize)) = subsize;
1823 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
1827 if (partition_vert_allowed && do_rect) {
1828 subsize = get_subsize(bsize, PARTITION_VERT);
1830 *get_sb_index(xd, subsize) = 0;
1831 pick_sb_modes(cpi, mi_row, mi_col, &sum_rate, &sum_dist, subsize,
1832 get_block_context(x, subsize), best_rd);
1833 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
1834 if (sum_rd < best_rd && mi_col + ms < cm->mi_cols) {
1835 update_state(cpi, get_block_context(x, subsize), subsize, 0);
1836 encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize);
1838 *get_sb_index(xd, subsize) = 1;
1839 pick_sb_modes(cpi, mi_row, mi_col + ms, &this_rate,
1840 &this_dist, subsize, get_block_context(x, subsize),
1842 if (this_rate == INT_MAX) {
1845 sum_rate += this_rate;
1846 sum_dist += this_dist;
1847 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
1850 if (sum_rd < best_rd) {
1851 set_partition_seg_context(cm, xd, mi_row, mi_col);
1852 pl = partition_plane_context(xd, bsize);
1853 sum_rate += x->partition_cost[pl][PARTITION_VERT];
1854 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
1855 if (sum_rd < best_rd) {
1856 best_rate = sum_rate;
1857 best_dist = sum_dist;
1859 *(get_sb_partitioning(x, bsize)) = subsize;
1862 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
1869 if (best_rate < INT_MAX && best_dist < INT64_MAX && do_recon)
1870 encode_sb(cpi, tp, mi_row, mi_col, bsize == BLOCK_64X64, bsize);
1871 if (bsize == BLOCK_64X64) {
1872 assert(tp_orig < *tp);
1873 assert(best_rate < INT_MAX);
1874 assert(best_dist < INT_MAX);
1876 assert(tp_orig == *tp);
1880 // Examines 64x64 block and chooses a best reference frame
1881 static void rd_pick_reference_frame(VP9_COMP *cpi, int mi_row, int mi_col) {
1882 VP9_COMMON * const cm = &cpi->common;
1883 MACROBLOCK * const x = &cpi->mb;
1884 MACROBLOCKD * const xd = &x->e_mbd;
1885 int bsl = b_width_log2(BLOCK_64X64), bs = 1 << bsl;
1887 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
1888 PARTITION_CONTEXT sl[8], sa[8];
1893 save_context(cpi, mi_row, mi_col, a, l, sa, sl, BLOCK_64X64);
1895 // Default is non mask (all reference frames allowed.
1896 cpi->ref_frame_mask = 0;
1898 // Do RD search for 64x64.
1899 if ((mi_row + (ms >> 1) < cm->mi_rows) &&
1900 (mi_col + (ms >> 1) < cm->mi_cols)) {
1901 cpi->set_ref_frame_mask = 1;
1902 pick_sb_modes(cpi, mi_row, mi_col, &r, &d, BLOCK_64X64,
1903 get_block_context(x, BLOCK_64X64), INT64_MAX);
1904 set_partition_seg_context(cm, xd, mi_row, mi_col);
1905 pl = partition_plane_context(xd, BLOCK_64X64);
1906 r += x->partition_cost[pl][PARTITION_NONE];
1908 *(get_sb_partitioning(x, BLOCK_64X64)) = BLOCK_64X64;
1909 cpi->set_ref_frame_mask = 0;
1912 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, BLOCK_64X64);
1915 static void encode_sb_row(VP9_COMP *cpi, int mi_row, TOKENEXTRA **tp,
1917 VP9_COMMON * const cm = &cpi->common;
1920 // Initialize the left context for the new SB row
1921 vpx_memset(&cm->left_context, 0, sizeof(cm->left_context));
1922 vpx_memset(cm->left_seg_context, 0, sizeof(cm->left_seg_context));
1924 // Code each SB in the row
1925 for (mi_col = cm->cur_tile_mi_col_start; mi_col < cm->cur_tile_mi_col_end;
1926 mi_col += MI_BLOCK_SIZE) {
1930 // Initialize a mask of modes that we will not consider;
1931 // cpi->unused_mode_skip_mask = 0x0000000AAE17F800 (test no golden)
1932 if (cpi->common.frame_type == KEY_FRAME)
1933 cpi->unused_mode_skip_mask = 0;
1935 cpi->unused_mode_skip_mask = 0xFFFFFFFFFFFFFE00;
1937 if (cpi->sf.reference_masking)
1938 rd_pick_reference_frame(cpi, mi_row, mi_col);
1940 if (cpi->sf.partition_by_variance || cpi->sf.use_lastframe_partitioning ||
1941 cpi->sf.use_one_partition_size_always ) {
1942 const int idx_str = cm->mode_info_stride * mi_row + mi_col;
1943 MODE_INFO *m = cm->mi + idx_str;
1944 MODE_INFO *p = cm->prev_mi + idx_str;
1946 cpi->mb.source_variance = UINT_MAX;
1947 if (cpi->sf.use_one_partition_size_always) {
1948 set_offsets(cpi, mi_row, mi_col, BLOCK_64X64);
1949 set_partitioning(cpi, m, cpi->sf.always_this_block_size);
1950 rd_use_partition(cpi, m, tp, mi_row, mi_col, BLOCK_64X64,
1951 &dummy_rate, &dummy_dist, 1);
1952 } else if (cpi->sf.partition_by_variance) {
1953 choose_partitioning(cpi, cm->mi, mi_row, mi_col);
1954 rd_use_partition(cpi, m, tp, mi_row, mi_col, BLOCK_64X64,
1955 &dummy_rate, &dummy_dist, 1);
1957 if ((cpi->common.current_video_frame
1958 % cpi->sf.last_partitioning_redo_frequency) == 0
1960 || cpi->common.show_frame == 0
1961 || cpi->common.frame_type == KEY_FRAME
1962 || cpi->is_src_frame_alt_ref) {
1963 // If required set upper and lower partition size limits
1964 if (cpi->sf.auto_min_max_partition_size) {
1965 set_offsets(cpi, mi_row, mi_col, BLOCK_64X64);
1966 rd_auto_partition_range(cpi,
1967 &cpi->sf.min_partition_size,
1968 &cpi->sf.max_partition_size);
1970 rd_pick_partition(cpi, tp, mi_row, mi_col, BLOCK_64X64,
1971 &dummy_rate, &dummy_dist, 1, INT64_MAX);
1973 copy_partitioning(cpi, m, p);
1974 rd_use_partition(cpi, m, tp, mi_row, mi_col, BLOCK_64X64,
1975 &dummy_rate, &dummy_dist, 1);
1979 // If required set upper and lower partition size limits
1980 if (cpi->sf.auto_min_max_partition_size) {
1981 set_offsets(cpi, mi_row, mi_col, BLOCK_64X64);
1982 rd_auto_partition_range(cpi, &cpi->sf.min_partition_size,
1983 &cpi->sf.max_partition_size);
1986 rd_pick_partition(cpi, tp, mi_row, mi_col, BLOCK_64X64,
1987 &dummy_rate, &dummy_dist, 1, INT64_MAX);
1992 static void init_encode_frame_mb_context(VP9_COMP *cpi) {
1993 MACROBLOCK *const x = &cpi->mb;
1994 VP9_COMMON *const cm = &cpi->common;
1995 MACROBLOCKD *const xd = &x->e_mbd;
1996 const int aligned_mi_cols = mi_cols_aligned_to_sb(cm->mi_cols);
1998 x->act_zbin_adj = 0;
2001 xd->mode_info_stride = cm->mode_info_stride;
2003 // reset intra mode contexts
2004 if (cm->frame_type == KEY_FRAME)
2005 vp9_init_mbmode_probs(cm);
2007 // Copy data over into macro block data structures.
2008 vp9_setup_src_planes(x, cpi->Source, 0, 0);
2010 // TODO(jkoleszar): are these initializations required?
2011 setup_pre_planes(xd, 0, &cm->yv12_fb[cm->ref_frame_map[cpi->lst_fb_idx]],
2013 setup_dst_planes(xd, &cm->yv12_fb[cm->new_fb_idx], 0, 0);
2015 setup_block_dptrs(&x->e_mbd, cm->subsampling_x, cm->subsampling_y);
2017 xd->mode_info_context->mbmi.mode = DC_PRED;
2018 xd->mode_info_context->mbmi.uv_mode = DC_PRED;
2020 vp9_zero(cpi->y_mode_count)
2021 vp9_zero(cpi->y_uv_mode_count)
2022 vp9_zero(cm->counts.inter_mode)
2023 vp9_zero(cpi->partition_count);
2024 vp9_zero(cpi->intra_inter_count);
2025 vp9_zero(cpi->comp_inter_count);
2026 vp9_zero(cpi->single_ref_count);
2027 vp9_zero(cpi->comp_ref_count);
2028 vp9_zero(cm->counts.tx);
2029 vp9_zero(cm->counts.mbskip);
2031 // Note: this memset assumes above_context[0], [1] and [2]
2032 // are allocated as part of the same buffer.
2033 vpx_memset(cm->above_context[0], 0,
2034 sizeof(ENTROPY_CONTEXT) * 2 * MAX_MB_PLANE * aligned_mi_cols);
2035 vpx_memset(cm->above_seg_context, 0,
2036 sizeof(PARTITION_CONTEXT) * aligned_mi_cols);
2039 static void switch_lossless_mode(VP9_COMP *cpi, int lossless) {
2041 // printf("Switching to lossless\n");
2042 cpi->mb.fwd_txm8x4 = vp9_short_walsh8x4;
2043 cpi->mb.fwd_txm4x4 = vp9_short_walsh4x4;
2044 cpi->mb.e_mbd.inv_txm4x4_1_add = vp9_short_iwalsh4x4_1_add;
2045 cpi->mb.e_mbd.inv_txm4x4_add = vp9_short_iwalsh4x4_add;
2046 cpi->mb.optimize = 0;
2047 cpi->common.lf.filter_level = 0;
2048 cpi->zbin_mode_boost_enabled = 0;
2049 cpi->common.tx_mode = ONLY_4X4;
2051 // printf("Not lossless\n");
2052 cpi->mb.fwd_txm8x4 = vp9_short_fdct8x4;
2053 cpi->mb.fwd_txm4x4 = vp9_short_fdct4x4;
2054 cpi->mb.e_mbd.inv_txm4x4_1_add = vp9_short_idct4x4_1_add;
2055 cpi->mb.e_mbd.inv_txm4x4_add = vp9_short_idct4x4_add;
2059 static void switch_tx_mode(VP9_COMP *cpi) {
2060 if (cpi->sf.tx_size_search_method == USE_LARGESTALL &&
2061 cpi->common.tx_mode >= ALLOW_32X32)
2062 cpi->common.tx_mode = ALLOW_32X32;
2065 static void encode_frame_internal(VP9_COMP *cpi) {
2067 MACROBLOCK * const x = &cpi->mb;
2068 VP9_COMMON * const cm = &cpi->common;
2069 MACROBLOCKD * const xd = &x->e_mbd;
2072 // fprintf(stderr, "encode_frame_internal frame %d (%d) type %d\n",
2073 // cpi->common.current_video_frame, cpi->common.show_frame,
2080 statsfile = fopen("segmap2.stt", "a");
2081 fprintf(statsfile, "\n");
2088 // Reset frame count of inter 0,0 motion vector usage.
2089 cpi->inter_zz_count = 0;
2091 vp9_zero(cm->counts.switchable_interp);
2092 vp9_zero(cpi->txfm_stepdown_count);
2094 xd->mode_info_context = cm->mi;
2095 xd->prev_mode_info_context = cm->prev_mi;
2097 vp9_zero(cpi->NMVcount);
2098 vp9_zero(cpi->coef_counts);
2099 vp9_zero(cm->counts.eob_branch);
2101 cpi->mb.e_mbd.lossless = cm->base_qindex == 0 && cm->y_dc_delta_q == 0
2102 && cm->uv_dc_delta_q == 0 && cm->uv_ac_delta_q == 0;
2103 switch_lossless_mode(cpi, cpi->mb.e_mbd.lossless);
2105 vp9_frame_init_quantizer(cpi);
2107 vp9_initialize_rd_consts(cpi, cm->base_qindex + cm->y_dc_delta_q);
2108 vp9_initialize_me_consts(cpi, cm->base_qindex);
2109 switch_tx_mode(cpi);
2111 if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
2112 // Initialize encode frame context.
2113 init_encode_frame_mb_context(cpi);
2115 // Build a frame level activity map
2116 build_activity_map(cpi);
2119 // Re-initialize encode frame context.
2120 init_encode_frame_mb_context(cpi);
2122 vp9_zero(cpi->rd_comp_pred_diff);
2123 vp9_zero(cpi->rd_filter_diff);
2124 vp9_zero(cpi->rd_tx_select_diff);
2125 vp9_zero(cpi->rd_tx_select_threshes);
2130 struct vpx_usec_timer emr_timer;
2131 vpx_usec_timer_start(&emr_timer);
2134 // Take tiles into account and give start/end MB
2135 int tile_col, tile_row;
2136 TOKENEXTRA *tp = cpi->tok;
2137 const int tile_cols = 1 << cm->log2_tile_cols;
2138 const int tile_rows = 1 << cm->log2_tile_rows;
2140 for (tile_row = 0; tile_row < tile_rows; tile_row++) {
2141 vp9_get_tile_row_offsets(cm, tile_row);
2143 for (tile_col = 0; tile_col < tile_cols; tile_col++) {
2144 TOKENEXTRA *tp_old = tp;
2146 // For each row of SBs in the frame
2147 vp9_get_tile_col_offsets(cm, tile_col);
2148 for (mi_row = cm->cur_tile_mi_row_start;
2149 mi_row < cm->cur_tile_mi_row_end; mi_row += 8)
2150 encode_sb_row(cpi, mi_row, &tp, &totalrate);
2152 cpi->tok_count[tile_row][tile_col] = (unsigned int)(tp - tp_old);
2153 assert(tp - cpi->tok <= get_token_alloc(cm->mb_rows, cm->mb_cols));
2158 vpx_usec_timer_mark(&emr_timer);
2159 cpi->time_encode_sb_row += vpx_usec_timer_elapsed(&emr_timer);
2162 if (cpi->sf.skip_encode_sb) {
2164 unsigned int intra_count = 0, inter_count = 0;
2165 for (j = 0; j < INTRA_INTER_CONTEXTS; ++j) {
2166 intra_count += cpi->intra_inter_count[j][0];
2167 inter_count += cpi->intra_inter_count[j][1];
2169 cpi->sf.skip_encode_frame = ((intra_count << 2) < inter_count);
2170 cpi->sf.skip_encode_frame &= (cm->frame_type != KEY_FRAME);
2171 cpi->sf.skip_encode_frame &= cm->show_frame;
2173 cpi->sf.skip_encode_frame = 0;
2176 // 256 rate units to the bit,
2177 // projected_frame_size in units of BYTES
2178 cpi->projected_frame_size = totalrate >> 8;
2181 // Keep record of the total distortion this time around for future use
2182 cpi->last_frame_distortion = cpi->frame_distortion;
2187 static int check_dual_ref_flags(VP9_COMP *cpi) {
2188 const int ref_flags = cpi->ref_frame_flags;
2190 if (vp9_segfeature_active(&cpi->common.seg, 1, SEG_LVL_REF_FRAME)) {
2193 return (!!(ref_flags & VP9_GOLD_FLAG) + !!(ref_flags & VP9_LAST_FLAG)
2194 + !!(ref_flags & VP9_ALT_FLAG)) >= 2;
2198 static int get_skip_flag(MODE_INFO *mi, int mis, int ymbs, int xmbs) {
2201 for (y = 0; y < ymbs; y++) {
2202 for (x = 0; x < xmbs; x++) {
2203 if (!mi[y * mis + x].mbmi.skip_coeff)
2211 static void set_txfm_flag(MODE_INFO *mi, int mis, int ymbs, int xmbs,
2215 for (y = 0; y < ymbs; y++) {
2216 for (x = 0; x < xmbs; x++)
2217 mi[y * mis + x].mbmi.tx_size = tx_size;
2221 static void reset_skip_txfm_size_b(VP9_COMP *cpi, MODE_INFO *mi, int mis,
2222 TX_SIZE max_tx_size, int bw, int bh,
2223 int mi_row, int mi_col, BLOCK_SIZE bsize) {
2224 VP9_COMMON *const cm = &cpi->common;
2225 MB_MODE_INFO *const mbmi = &mi->mbmi;
2227 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
2230 if (mbmi->tx_size > max_tx_size) {
2231 MACROBLOCK * const x = &cpi->mb;
2232 MACROBLOCKD * const xd = &x->e_mbd;
2233 const int ymbs = MIN(bh, cm->mi_rows - mi_row);
2234 const int xmbs = MIN(bw, cm->mi_cols - mi_col);
2236 xd->mode_info_context = mi;
2237 assert(vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP) ||
2238 get_skip_flag(mi, mis, ymbs, xmbs));
2239 set_txfm_flag(mi, mis, ymbs, xmbs, max_tx_size);
2243 static void reset_skip_txfm_size_sb(VP9_COMP *cpi, MODE_INFO *mi,
2244 TX_SIZE max_tx_size, int mi_row, int mi_col,
2246 const VP9_COMMON *const cm = &cpi->common;
2247 const int mis = cm->mode_info_stride;
2249 const int bs = num_8x8_blocks_wide_lookup[bsize], hbs = bs / 2;
2251 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
2254 bw = num_8x8_blocks_wide_lookup[mi->mbmi.sb_type];
2255 bh = num_8x8_blocks_high_lookup[mi->mbmi.sb_type];
2257 if (bw == bs && bh == bs) {
2258 reset_skip_txfm_size_b(cpi, mi, mis, max_tx_size, bs, bs, mi_row,
2260 } else if (bw == bs && bh < bs) {
2261 reset_skip_txfm_size_b(cpi, mi, mis, max_tx_size, bs, hbs, mi_row, mi_col,
2263 reset_skip_txfm_size_b(cpi, mi + hbs * mis, mis, max_tx_size, bs, hbs,
2264 mi_row + hbs, mi_col, bsize);
2265 } else if (bw < bs && bh == bs) {
2266 reset_skip_txfm_size_b(cpi, mi, mis, max_tx_size, hbs, bs, mi_row, mi_col,
2268 reset_skip_txfm_size_b(cpi, mi + hbs, mis, max_tx_size, hbs, bs, mi_row,
2269 mi_col + hbs, bsize);
2271 const BLOCK_SIZE subsize = subsize_lookup[PARTITION_SPLIT][bsize];
2274 assert(bw < bs && bh < bs);
2276 for (n = 0; n < 4; n++) {
2277 const int mi_dc = hbs * (n & 1);
2278 const int mi_dr = hbs * (n >> 1);
2280 reset_skip_txfm_size_sb(cpi, &mi[mi_dr * mis + mi_dc], max_tx_size,
2281 mi_row + mi_dr, mi_col + mi_dc, subsize);
2286 static void reset_skip_txfm_size(VP9_COMP *cpi, TX_SIZE txfm_max) {
2287 VP9_COMMON * const cm = &cpi->common;
2289 const int mis = cm->mode_info_stride;
2290 MODE_INFO *mi, *mi_ptr = cm->mi;
2292 for (mi_row = 0; mi_row < cm->mi_rows; mi_row += 8, mi_ptr += 8 * mis) {
2294 for (mi_col = 0; mi_col < cm->mi_cols; mi_col += 8, mi += 8)
2295 reset_skip_txfm_size_sb(cpi, mi, txfm_max, mi_row, mi_col, BLOCK_64X64);
2299 static int get_frame_type(VP9_COMP *cpi) {
2301 if (cpi->common.frame_type == KEY_FRAME)
2303 else if (cpi->is_src_frame_alt_ref && cpi->refresh_golden_frame)
2305 else if (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)
2312 static void select_tx_mode(VP9_COMP *cpi) {
2313 if (cpi->oxcf.lossless) {
2314 cpi->common.tx_mode = ONLY_4X4;
2315 } else if (cpi->common.current_video_frame == 0) {
2316 cpi->common.tx_mode = TX_MODE_SELECT;
2318 if (cpi->sf.tx_size_search_method == USE_LARGESTALL) {
2319 cpi->common.tx_mode = ALLOW_32X32;
2320 } else if (cpi->sf.tx_size_search_method == USE_FULL_RD) {
2321 int frame_type = get_frame_type(cpi);
2322 cpi->common.tx_mode =
2323 cpi->rd_tx_select_threshes[frame_type][ALLOW_32X32]
2324 > cpi->rd_tx_select_threshes[frame_type][TX_MODE_SELECT] ?
2325 ALLOW_32X32 : TX_MODE_SELECT;
2327 unsigned int total = 0;
2329 for (i = 0; i < TX_SIZES; ++i)
2330 total += cpi->txfm_stepdown_count[i];
2332 double fraction = (double)cpi->txfm_stepdown_count[0] / total;
2333 cpi->common.tx_mode = fraction > 0.90 ? ALLOW_32X32 : TX_MODE_SELECT;
2334 // printf("fraction = %f\n", fraction);
2335 } // else keep unchanged
2340 void vp9_encode_frame(VP9_COMP *cpi) {
2341 VP9_COMMON * const cm = &cpi->common;
2343 // In the longer term the encoder should be generalized to match the
2344 // decoder such that we allow compound where one of the 3 buffers has a
2345 // different sign bias and that buffer is then the fixed ref. However, this
2346 // requires further work in the rd loop. For now the only supported encoder
2347 // side behavior is where the ALT ref buffer has opposite sign bias to
2349 if ((cm->ref_frame_sign_bias[ALTREF_FRAME]
2350 == cm->ref_frame_sign_bias[GOLDEN_FRAME])
2351 || (cm->ref_frame_sign_bias[ALTREF_FRAME]
2352 == cm->ref_frame_sign_bias[LAST_FRAME])) {
2353 cm->allow_comp_inter_inter = 0;
2355 cm->allow_comp_inter_inter = 1;
2356 cm->comp_fixed_ref = ALTREF_FRAME;
2357 cm->comp_var_ref[0] = LAST_FRAME;
2358 cm->comp_var_ref[1] = GOLDEN_FRAME;
2363 INTERPOLATIONFILTERTYPE filter_type;
2365 * This code does a single RD pass over the whole frame assuming
2366 * either compound, single or hybrid prediction as per whatever has
2367 * worked best for that type of frame in the past.
2368 * It also predicts whether another coding mode would have worked
2369 * better that this coding mode. If that is the case, it remembers
2370 * that for subsequent frames.
2371 * It does the same analysis for transform size selection also.
2373 int frame_type = get_frame_type(cpi);
2375 /* prediction (compound, single or hybrid) mode selection */
2376 if (frame_type == 3 || !cm->allow_comp_inter_inter)
2377 pred_type = SINGLE_PREDICTION_ONLY;
2378 else if (cpi->rd_prediction_type_threshes[frame_type][1]
2379 > cpi->rd_prediction_type_threshes[frame_type][0]
2380 && cpi->rd_prediction_type_threshes[frame_type][1]
2381 > cpi->rd_prediction_type_threshes[frame_type][2]
2382 && check_dual_ref_flags(cpi) && cpi->static_mb_pct == 100)
2383 pred_type = COMP_PREDICTION_ONLY;
2384 else if (cpi->rd_prediction_type_threshes[frame_type][0]
2385 > cpi->rd_prediction_type_threshes[frame_type][2])
2386 pred_type = SINGLE_PREDICTION_ONLY;
2388 pred_type = HYBRID_PREDICTION;
2390 /* filter type selection */
2391 // FIXME(rbultje) for some odd reason, we often select smooth_filter
2392 // as default filter for ARF overlay frames. This is a REALLY BAD
2393 // IDEA so we explicitly disable it here.
2394 if (frame_type != 3 &&
2395 cpi->rd_filter_threshes[frame_type][1] >
2396 cpi->rd_filter_threshes[frame_type][0] &&
2397 cpi->rd_filter_threshes[frame_type][1] >
2398 cpi->rd_filter_threshes[frame_type][2] &&
2399 cpi->rd_filter_threshes[frame_type][1] >
2400 cpi->rd_filter_threshes[frame_type][SWITCHABLE_FILTERS]) {
2401 filter_type = EIGHTTAP_SMOOTH;
2402 } else if (cpi->rd_filter_threshes[frame_type][2] >
2403 cpi->rd_filter_threshes[frame_type][0] &&
2404 cpi->rd_filter_threshes[frame_type][2] >
2405 cpi->rd_filter_threshes[frame_type][SWITCHABLE_FILTERS]) {
2406 filter_type = EIGHTTAP_SHARP;
2407 } else if (cpi->rd_filter_threshes[frame_type][0] >
2408 cpi->rd_filter_threshes[frame_type][SWITCHABLE_FILTERS]) {
2409 filter_type = EIGHTTAP;
2411 filter_type = SWITCHABLE;
2414 cpi->mb.e_mbd.lossless = 0;
2415 if (cpi->oxcf.lossless) {
2416 cpi->mb.e_mbd.lossless = 1;
2419 /* transform size selection (4x4, 8x8, 16x16 or select-per-mb) */
2420 select_tx_mode(cpi);
2421 cpi->common.comp_pred_mode = pred_type;
2422 cpi->common.mcomp_filter_type = filter_type;
2423 encode_frame_internal(cpi);
2425 for (i = 0; i < NB_PREDICTION_TYPES; ++i) {
2426 const int diff = (int) (cpi->rd_comp_pred_diff[i] / cpi->common.MBs);
2427 cpi->rd_prediction_type_threshes[frame_type][i] += diff;
2428 cpi->rd_prediction_type_threshes[frame_type][i] >>= 1;
2431 for (i = 0; i <= SWITCHABLE_FILTERS; i++) {
2432 const int64_t diff = cpi->rd_filter_diff[i] / cpi->common.MBs;
2433 cpi->rd_filter_threshes[frame_type][i] =
2434 (cpi->rd_filter_threshes[frame_type][i] + diff) / 2;
2437 for (i = 0; i < TX_MODES; ++i) {
2438 int64_t pd = cpi->rd_tx_select_diff[i];
2440 if (i == TX_MODE_SELECT)
2441 pd -= RDCOST(cpi->mb.rdmult, cpi->mb.rddiv,
2442 2048 * (TX_SIZES - 1), 0);
2443 diff = (int) (pd / cpi->common.MBs);
2444 cpi->rd_tx_select_threshes[frame_type][i] += diff;
2445 cpi->rd_tx_select_threshes[frame_type][i] /= 2;
2448 if (cpi->common.comp_pred_mode == HYBRID_PREDICTION) {
2449 int single_count_zero = 0;
2450 int comp_count_zero = 0;
2452 for (i = 0; i < COMP_INTER_CONTEXTS; i++) {
2453 single_count_zero += cpi->comp_inter_count[i][0];
2454 comp_count_zero += cpi->comp_inter_count[i][1];
2457 if (comp_count_zero == 0) {
2458 cpi->common.comp_pred_mode = SINGLE_PREDICTION_ONLY;
2459 vp9_zero(cpi->comp_inter_count);
2460 } else if (single_count_zero == 0) {
2461 cpi->common.comp_pred_mode = COMP_PREDICTION_ONLY;
2462 vp9_zero(cpi->comp_inter_count);
2466 if (cpi->common.tx_mode == TX_MODE_SELECT) {
2468 int count8x8_lp = 0, count8x8_8x8p = 0;
2469 int count16x16_16x16p = 0, count16x16_lp = 0;
2472 for (i = 0; i < TX_SIZE_CONTEXTS; ++i) {
2473 count4x4 += cm->counts.tx.p32x32[i][TX_4X4];
2474 count4x4 += cm->counts.tx.p16x16[i][TX_4X4];
2475 count4x4 += cm->counts.tx.p8x8[i][TX_4X4];
2477 count8x8_lp += cm->counts.tx.p32x32[i][TX_8X8];
2478 count8x8_lp += cm->counts.tx.p16x16[i][TX_8X8];
2479 count8x8_8x8p += cm->counts.tx.p8x8[i][TX_8X8];
2481 count16x16_16x16p += cm->counts.tx.p16x16[i][TX_16X16];
2482 count16x16_lp += cm->counts.tx.p32x32[i][TX_16X16];
2483 count32x32 += cm->counts.tx.p32x32[i][TX_32X32];
2486 if (count4x4 == 0 && count16x16_lp == 0 && count16x16_16x16p == 0
2487 && count32x32 == 0) {
2488 cpi->common.tx_mode = ALLOW_8X8;
2489 reset_skip_txfm_size(cpi, TX_8X8);
2490 } else if (count8x8_8x8p == 0 && count16x16_16x16p == 0
2491 && count8x8_lp == 0 && count16x16_lp == 0 && count32x32 == 0) {
2492 cpi->common.tx_mode = ONLY_4X4;
2493 reset_skip_txfm_size(cpi, TX_4X4);
2494 } else if (count8x8_lp == 0 && count16x16_lp == 0 && count4x4 == 0) {
2495 cpi->common.tx_mode = ALLOW_32X32;
2496 } else if (count32x32 == 0 && count8x8_lp == 0 && count4x4 == 0) {
2497 cpi->common.tx_mode = ALLOW_16X16;
2498 reset_skip_txfm_size(cpi, TX_16X16);
2502 encode_frame_internal(cpi);
2507 static void sum_intra_stats(VP9_COMP *cpi, const MODE_INFO *mi) {
2508 const MB_PREDICTION_MODE y_mode = mi->mbmi.mode;
2509 const MB_PREDICTION_MODE uv_mode = mi->mbmi.uv_mode;
2510 const BLOCK_SIZE bsize = mi->mbmi.sb_type;
2512 ++cpi->y_uv_mode_count[y_mode][uv_mode];
2514 if (bsize < BLOCK_8X8) {
2516 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
2517 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
2518 for (idy = 0; idy < 2; idy += num_4x4_blocks_high)
2519 for (idx = 0; idx < 2; idx += num_4x4_blocks_wide)
2520 ++cpi->y_mode_count[0][mi->bmi[idy * 2 + idx].as_mode];
2522 ++cpi->y_mode_count[size_group_lookup[bsize]][y_mode];
2526 // Experimental stub function to create a per MB zbin adjustment based on
2527 // some previously calculated measure of MB activity.
2528 static void adjust_act_zbin(VP9_COMP *cpi, MACROBLOCK *x) {
2530 x->act_zbin_adj = *(x->mb_activity_ptr);
2534 int64_t act = *(x->mb_activity_ptr);
2536 // Apply the masking to the RD multiplier.
2537 a = act + 4 * cpi->activity_avg;
2538 b = 4 * act + cpi->activity_avg;
2540 if (act > cpi->activity_avg)
2541 x->act_zbin_adj = (int) (((int64_t) b + (a >> 1)) / a) - 1;
2543 x->act_zbin_adj = 1 - (int) (((int64_t) a + (b >> 1)) / b);
2547 static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, int output_enabled,
2548 int mi_row, int mi_col, BLOCK_SIZE bsize) {
2549 VP9_COMMON * const cm = &cpi->common;
2550 MACROBLOCK * const x = &cpi->mb;
2551 MACROBLOCKD * const xd = &x->e_mbd;
2552 MODE_INFO *mi = xd->mode_info_context;
2553 MB_MODE_INFO *mbmi = &mi->mbmi;
2554 unsigned int segment_id = mbmi->segment_id;
2555 const int mis = cm->mode_info_stride;
2556 const int mi_width = num_8x8_blocks_wide_lookup[bsize];
2557 const int mi_height = num_8x8_blocks_high_lookup[bsize];
2558 x->use_lp32x32fdct = cpi->sf.use_lp32x32fdct;
2559 x->skip_encode = (!output_enabled && cpi->sf.skip_encode_frame &&
2560 xd->q_index < QIDX_SKIP_THRESH);
2564 if (cm->frame_type == KEY_FRAME) {
2565 if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
2566 adjust_act_zbin(cpi, x);
2567 vp9_update_zbin_extra(cpi, x);
2570 vp9_setup_interp_filters(xd, mbmi->interp_filter, cm);
2572 if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
2573 // Adjust the zbin based on this MB rate.
2574 adjust_act_zbin(cpi, x);
2577 // Experimental code. Special case for gf and arf zeromv modes.
2578 // Increase zbin size to suppress noise
2579 cpi->zbin_mode_boost = 0;
2580 if (cpi->zbin_mode_boost_enabled) {
2581 if (is_inter_block(mbmi)) {
2582 if (mbmi->mode == ZEROMV) {
2583 if (mbmi->ref_frame[0] != LAST_FRAME)
2584 cpi->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
2586 cpi->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST;
2587 } else if (mbmi->sb_type < BLOCK_8X8) {
2588 cpi->zbin_mode_boost = SPLIT_MV_ZBIN_BOOST;
2590 cpi->zbin_mode_boost = MV_ZBIN_BOOST;
2593 cpi->zbin_mode_boost = INTRA_ZBIN_BOOST;
2597 vp9_update_zbin_extra(cpi, x);
2600 if (!is_inter_block(mbmi)) {
2601 vp9_encode_intra_block_y(x, MAX(bsize, BLOCK_8X8));
2602 vp9_encode_intra_block_uv(x, MAX(bsize, BLOCK_8X8));
2604 sum_intra_stats(cpi, mi);
2606 int idx = cm->ref_frame_map[get_ref_frame_idx(cpi, mbmi->ref_frame[0])];
2607 YV12_BUFFER_CONFIG *ref_fb = &cm->yv12_fb[idx];
2608 YV12_BUFFER_CONFIG *second_ref_fb = NULL;
2609 if (mbmi->ref_frame[1] > 0) {
2610 idx = cm->ref_frame_map[get_ref_frame_idx(cpi, mbmi->ref_frame[1])];
2611 second_ref_fb = &cm->yv12_fb[idx];
2614 assert(cm->frame_type != KEY_FRAME);
2616 setup_pre_planes(xd, 0, ref_fb, mi_row, mi_col,
2617 &xd->scale_factor[0]);
2618 setup_pre_planes(xd, 1, second_ref_fb, mi_row, mi_col,
2619 &xd->scale_factor[1]);
2622 vp9_build_inter_predictors_sb(xd, mi_row, mi_col, MAX(bsize, BLOCK_8X8));
2625 if (!is_inter_block(mbmi)) {
2626 vp9_tokenize_sb(cpi, t, !output_enabled, MAX(bsize, BLOCK_8X8));
2627 } else if (!x->skip) {
2628 vp9_encode_sb(x, MAX(bsize, BLOCK_8X8));
2629 vp9_tokenize_sb(cpi, t, !output_enabled, MAX(bsize, BLOCK_8X8));
2631 int mb_skip_context = xd->left_available ? (mi - 1)->mbmi.skip_coeff : 0;
2632 mb_skip_context += (mi - mis)->mbmi.skip_coeff;
2634 mbmi->skip_coeff = 1;
2636 cm->counts.mbskip[mb_skip_context][1]++;
2637 reset_skip_context(xd, MAX(bsize, BLOCK_8X8));
2640 // copy skip flag on all mb_mode_info contexts in this SB
2641 // if this was a skip at this txfm size
2642 vp9_set_pred_flag_mbskip(cm, bsize, mi_row, mi_col, mi->mbmi.skip_coeff);
2644 if (output_enabled) {
2645 if (cm->tx_mode == TX_MODE_SELECT &&
2646 mbmi->sb_type >= BLOCK_8X8 &&
2647 !(is_inter_block(mbmi) &&
2648 (mbmi->skip_coeff ||
2649 vp9_segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)))) {
2650 const uint8_t context = vp9_get_pred_context_tx_size(xd);
2651 update_tx_counts(bsize, context, mbmi->tx_size, &cm->counts.tx);
2654 TX_SIZE sz = (cm->tx_mode == TX_MODE_SELECT) ? TX_32X32 : cm->tx_mode;
2655 // The new intra coding scheme requires no change of transform size
2656 if (is_inter_block(&mi->mbmi)) {
2657 if (sz == TX_32X32 && bsize < BLOCK_32X32)
2659 if (sz == TX_16X16 && bsize < BLOCK_16X16)
2661 if (sz == TX_8X8 && bsize < BLOCK_8X8)
2663 } else if (bsize >= BLOCK_8X8) {
2669 for (y = 0; y < mi_height; y++)
2670 for (x = 0; x < mi_width; x++)
2671 if (mi_col + x < cm->mi_cols && mi_row + y < cm->mi_rows)
2672 mi[mis * y + x].mbmi.tx_size = sz;