2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
15 #include "./vp9_rtcd.h"
16 #include "./vpx_config.h"
18 #include "vpx_ports/vpx_timer.h"
20 #include "vp9/common/vp9_common.h"
21 #include "vp9/common/vp9_entropy.h"
22 #include "vp9/common/vp9_entropymode.h"
23 #include "vp9/common/vp9_idct.h"
24 #include "vp9/common/vp9_mvref_common.h"
25 #include "vp9/common/vp9_pred_common.h"
26 #include "vp9/common/vp9_quant_common.h"
27 #include "vp9/common/vp9_reconintra.h"
28 #include "vp9/common/vp9_reconinter.h"
29 #include "vp9/common/vp9_seg_common.h"
30 #include "vp9/common/vp9_systemdependent.h"
31 #include "vp9/common/vp9_tile_common.h"
33 #include "vp9/encoder/vp9_aq_cyclicrefresh.h"
34 #include "vp9/encoder/vp9_aq_variance.h"
35 #include "vp9/encoder/vp9_encodeframe.h"
36 #include "vp9/encoder/vp9_encodemb.h"
37 #include "vp9/encoder/vp9_encodemv.h"
38 #include "vp9/encoder/vp9_extend.h"
39 #include "vp9/encoder/vp9_pickmode.h"
40 #include "vp9/encoder/vp9_rdopt.h"
41 #include "vp9/encoder/vp9_segmentation.h"
42 #include "vp9/encoder/vp9_tokenize.h"
44 #define GF_ZEROMV_ZBIN_BOOST 0
45 #define LF_ZEROMV_ZBIN_BOOST 0
46 #define MV_ZBIN_BOOST 0
47 #define SPLIT_MV_ZBIN_BOOST 0
48 #define INTRA_ZBIN_BOOST 0
50 static INLINE uint8_t *get_sb_index(MACROBLOCK *x, BLOCK_SIZE subsize) {
75 static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, int output_enabled,
76 int mi_row, int mi_col, BLOCK_SIZE bsize);
78 static void adjust_act_zbin(VP9_COMP *cpi, MACROBLOCK *x);
80 // activity_avg must be positive, or flat regions could get a zero weight
81 // (infinite lambda), which confounds analysis.
82 // This also avoids the need for divide by zero checks in
83 // vp9_activity_masking().
84 #define ACTIVITY_AVG_MIN 64
86 // Motion vector component magnitude threshold for defining fast motion.
87 #define FAST_MOTION_MV_THRESH 24
89 // This is used as a reference when computing the source variance for the
90 // purposes of activity masking.
91 // Eventually this should be replaced by custom no-reference routines,
92 // which will be faster.
93 static const uint8_t VP9_VAR_OFFS[64] = {
94 128, 128, 128, 128, 128, 128, 128, 128,
95 128, 128, 128, 128, 128, 128, 128, 128,
96 128, 128, 128, 128, 128, 128, 128, 128,
97 128, 128, 128, 128, 128, 128, 128, 128,
98 128, 128, 128, 128, 128, 128, 128, 128,
99 128, 128, 128, 128, 128, 128, 128, 128,
100 128, 128, 128, 128, 128, 128, 128, 128,
101 128, 128, 128, 128, 128, 128, 128, 128
104 static unsigned int get_sby_perpixel_variance(VP9_COMP *cpi,
107 unsigned int var, sse;
108 var = cpi->fn_ptr[bs].vf(x->plane[0].src.buf, x->plane[0].src.stride,
109 VP9_VAR_OFFS, 0, &sse);
110 return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
113 static unsigned int get_sby_perpixel_diff_variance(VP9_COMP *cpi,
118 const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, LAST_FRAME);
119 int offset = (mi_row * MI_SIZE) * yv12->y_stride + (mi_col * MI_SIZE);
120 unsigned int var, sse;
121 var = cpi->fn_ptr[bs].vf(x->plane[0].src.buf,
122 x->plane[0].src.stride,
123 yv12->y_buffer + offset,
126 return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
129 static BLOCK_SIZE get_rd_var_based_fixed_partition(VP9_COMP *cpi,
132 unsigned int var = get_sby_perpixel_diff_variance(cpi, &cpi->mb,
145 static BLOCK_SIZE get_nonrd_var_based_fixed_partition(VP9_COMP *cpi,
148 unsigned int var = get_sby_perpixel_diff_variance(cpi, &cpi->mb,
159 // Lighter version of set_offsets that only sets the mode info
161 static INLINE void set_modeinfo_offsets(VP9_COMMON *const cm,
162 MACROBLOCKD *const xd,
165 const int idx_str = xd->mode_info_stride * mi_row + mi_col;
166 xd->mi_8x8 = cm->mi_grid_visible + idx_str;
167 xd->mi_8x8[0] = cm->mi + idx_str;
170 static int is_block_in_mb_map(VP9_COMP *cpi, int mi_row, int mi_col,
172 VP9_COMMON *const cm = &cpi->common;
173 const int mb_rows = cm->mb_rows;
174 const int mb_cols = cm->mb_cols;
175 const int mb_row = mi_row >> 1;
176 const int mb_col = mi_col >> 1;
177 const int mb_width = num_8x8_blocks_wide_lookup[bsize] >> 1;
178 const int mb_height = num_8x8_blocks_high_lookup[bsize] >> 1;
180 if (bsize <= BLOCK_16X16) {
181 return cpi->active_map[mb_row * mb_cols + mb_col];
183 for (r = 0; r < mb_height; ++r) {
184 for (c = 0; c < mb_width; ++c) {
185 int row = mb_row + r;
186 int col = mb_col + c;
187 if (row >= mb_rows || col >= mb_cols)
189 if (cpi->active_map[row * mb_cols + col])
196 static void set_offsets(VP9_COMP *cpi, const TileInfo *const tile,
197 int mi_row, int mi_col, BLOCK_SIZE bsize) {
198 MACROBLOCK *const x = &cpi->mb;
199 VP9_COMMON *const cm = &cpi->common;
200 MACROBLOCKD *const xd = &x->e_mbd;
202 const int mi_width = num_8x8_blocks_wide_lookup[bsize];
203 const int mi_height = num_8x8_blocks_high_lookup[bsize];
204 const int mb_row = mi_row >> 1;
205 const int mb_col = mi_col >> 1;
206 const int idx_map = mb_row * cm->mb_cols + mb_col;
207 const struct segmentation *const seg = &cm->seg;
209 set_skip_context(xd, xd->above_context, xd->left_context, mi_row, mi_col);
211 // Activity map pointer
212 x->mb_activity_ptr = &cpi->mb_activity_map[idx_map];
214 if (cpi->active_map_enabled && !x->e_mbd.lossless) {
215 x->in_active_map = is_block_in_mb_map(cpi, mi_row, mi_col, bsize);
217 x->in_active_map = 1;
220 set_modeinfo_offsets(cm, xd, mi_row, mi_col);
222 mbmi = &xd->mi_8x8[0]->mbmi;
224 // Set up destination pointers.
225 vp9_setup_dst_planes(xd, get_frame_new_buffer(cm), mi_row, mi_col);
227 // Set up limit values for MV components.
228 // Mv beyond the range do not produce new/different prediction block.
229 x->mv_row_min = -(((mi_row + mi_height) * MI_SIZE) + VP9_INTERP_EXTEND);
230 x->mv_col_min = -(((mi_col + mi_width) * MI_SIZE) + VP9_INTERP_EXTEND);
231 x->mv_row_max = (cm->mi_rows - mi_row) * MI_SIZE + VP9_INTERP_EXTEND;
232 x->mv_col_max = (cm->mi_cols - mi_col) * MI_SIZE + VP9_INTERP_EXTEND;
234 // Set up distance of MB to edge of frame in 1/8th pel units.
235 assert(!(mi_col & (mi_width - 1)) && !(mi_row & (mi_height - 1)));
236 set_mi_row_col(xd, tile, mi_row, mi_height, mi_col, mi_width,
237 cm->mi_rows, cm->mi_cols);
239 // Set up source buffers.
240 vp9_setup_src_planes(x, cpi->Source, mi_row, mi_col);
243 x->rddiv = cpi->RDDIV;
244 x->rdmult = cpi->RDMULT;
248 if (cpi->oxcf.aq_mode != VARIANCE_AQ) {
249 const uint8_t *const map = seg->update_map ? cpi->segmentation_map
250 : cm->last_frame_seg_map;
251 mbmi->segment_id = vp9_get_segment_id(cm, map, bsize, mi_row, mi_col);
253 vp9_init_plane_quantizers(cpi, x);
255 if (seg->enabled && cpi->seg0_cnt > 0 &&
256 !vp9_segfeature_active(seg, 0, SEG_LVL_REF_FRAME) &&
257 vp9_segfeature_active(seg, 1, SEG_LVL_REF_FRAME)) {
258 cpi->seg0_progress = (cpi->seg0_idx << 16) / cpi->seg0_cnt;
260 const int y = mb_row & ~3;
261 const int x = mb_col & ~3;
262 const int p16 = ((mb_row & 1) << 1) + (mb_col & 1);
263 const int p32 = ((mb_row & 2) << 2) + ((mb_col & 2) << 1);
264 const int tile_progress = tile->mi_col_start * cm->mb_rows >> 1;
265 const int mb_cols = (tile->mi_col_end - tile->mi_col_start) >> 1;
267 cpi->seg0_progress = ((y * mb_cols + x * 4 + p32 + p16 + tile_progress)
271 x->encode_breakout = cpi->segment_encode_breakout[mbmi->segment_id];
273 mbmi->segment_id = 0;
274 x->encode_breakout = cpi->encode_breakout;
278 static void duplicate_mode_info_in_sb(VP9_COMMON * const cm,
279 MACROBLOCKD *const xd,
283 const int block_width = num_8x8_blocks_wide_lookup[bsize];
284 const int block_height = num_8x8_blocks_high_lookup[bsize];
285 const int mis = xd->mode_info_stride;
287 for (j = 0; j < block_height; ++j)
288 for (i = 0; i < block_width; ++i) {
289 if (mi_row + j < cm->mi_rows && mi_col + i < cm->mi_cols)
290 xd->mi_8x8[j * mis + i] = xd->mi_8x8[0];
294 static void set_block_size(VP9_COMP * const cpi,
295 const TileInfo *const tile,
296 int mi_row, int mi_col,
298 if (cpi->common.mi_cols > mi_col && cpi->common.mi_rows > mi_row) {
299 MACROBLOCKD *const xd = &cpi->mb.e_mbd;
300 set_modeinfo_offsets(&cpi->common, xd, mi_row, mi_col);
301 xd->mi_8x8[0]->mbmi.sb_type = bsize;
302 duplicate_mode_info_in_sb(&cpi->common, xd, mi_row, mi_col, bsize);
307 int64_t sum_square_error;
317 } partition_variance;
320 partition_variance part_variances;
325 partition_variance part_variances;
330 partition_variance part_variances;
335 partition_variance part_variances;
340 partition_variance *part_variances;
350 static void tree_to_node(void *data, BLOCK_SIZE bsize, variance_node *node) {
354 v64x64 *vt = (v64x64 *) data;
355 node->part_variances = &vt->part_variances;
356 for (i = 0; i < 4; i++)
357 node->split[i] = &vt->split[i].part_variances.none;
361 v32x32 *vt = (v32x32 *) data;
362 node->part_variances = &vt->part_variances;
363 for (i = 0; i < 4; i++)
364 node->split[i] = &vt->split[i].part_variances.none;
368 v16x16 *vt = (v16x16 *) data;
369 node->part_variances = &vt->part_variances;
370 for (i = 0; i < 4; i++)
371 node->split[i] = &vt->split[i].part_variances.none;
375 v8x8 *vt = (v8x8 *) data;
376 node->part_variances = &vt->part_variances;
377 for (i = 0; i < 4; i++)
378 node->split[i] = &vt->split[i];
387 // Set variance values given sum square error, sum error, count.
388 static void fill_variance(int64_t s2, int64_t s, int c, var *v) {
389 v->sum_square_error = s2;
393 v->variance = (int)(256 *
394 (v->sum_square_error - v->sum_error * v->sum_error /
395 v->count) / v->count);
400 void sum_2_variances(const var *a, const var *b, var *r) {
401 fill_variance(a->sum_square_error + b->sum_square_error,
402 a->sum_error + b->sum_error, a->count + b->count, r);
405 static void fill_variance_tree(void *data, BLOCK_SIZE bsize) {
407 tree_to_node(data, bsize, &node);
408 sum_2_variances(node.split[0], node.split[1], &node.part_variances->horz[0]);
409 sum_2_variances(node.split[2], node.split[3], &node.part_variances->horz[1]);
410 sum_2_variances(node.split[0], node.split[2], &node.part_variances->vert[0]);
411 sum_2_variances(node.split[1], node.split[3], &node.part_variances->vert[1]);
412 sum_2_variances(&node.part_variances->vert[0], &node.part_variances->vert[1],
413 &node.part_variances->none);
416 static int set_vt_partitioning(VP9_COMP *cpi,
418 const TileInfo *const tile,
423 VP9_COMMON * const cm = &cpi->common;
425 const int block_width = num_8x8_blocks_wide_lookup[bsize];
426 const int block_height = num_8x8_blocks_high_lookup[bsize];
427 // TODO(debargha): Choose this more intelligently.
428 const int64_t threshold_multiplier = 25;
429 int64_t threshold = threshold_multiplier * cpi->common.base_qindex;
430 assert(block_height == block_width);
432 tree_to_node(data, bsize, &vt);
434 // Split none is available only if we have more than half a block size
435 // in width and height inside the visible image.
436 if (mi_col + block_width / 2 < cm->mi_cols &&
437 mi_row + block_height / 2 < cm->mi_rows &&
438 vt.part_variances->none.variance < threshold) {
439 set_block_size(cpi, tile, mi_row, mi_col, bsize);
443 // Vertical split is available on all but the bottom border.
444 if (mi_row + block_height / 2 < cm->mi_rows &&
445 vt.part_variances->vert[0].variance < threshold &&
446 vt.part_variances->vert[1].variance < threshold) {
447 BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_VERT);
448 set_block_size(cpi, tile, mi_row, mi_col, subsize);
449 set_block_size(cpi, tile, mi_row, mi_col + block_width / 2, subsize);
453 // Horizontal split is available on all but the right border.
454 if (mi_col + block_width / 2 < cm->mi_cols &&
455 vt.part_variances->horz[0].variance < threshold &&
456 vt.part_variances->horz[1].variance < threshold) {
457 BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_HORZ);
458 set_block_size(cpi, tile, mi_row, mi_col, subsize);
459 set_block_size(cpi, tile, mi_row + block_height / 2, mi_col, subsize);
465 // TODO(debargha): Fix this function and make it work as expected.
466 static void choose_partitioning(VP9_COMP *cpi,
467 const TileInfo *const tile,
468 int mi_row, int mi_col) {
469 VP9_COMMON * const cm = &cpi->common;
470 MACROBLOCK *x = &cpi->mb;
471 MACROBLOCKD *xd = &cpi->mb.e_mbd;
479 int pixels_wide = 64, pixels_high = 64;
480 int_mv nearest_mv, near_mv;
481 const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, LAST_FRAME);
482 const struct scale_factors *const sf = &cm->frame_refs[LAST_FRAME - 1].sf;
485 set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64);
487 if (xd->mb_to_right_edge < 0)
488 pixels_wide += (xd->mb_to_right_edge >> 3);
489 if (xd->mb_to_bottom_edge < 0)
490 pixels_high += (xd->mb_to_bottom_edge >> 3);
492 s = x->plane[0].src.buf;
493 sp = x->plane[0].src.stride;
495 if (cm->frame_type != KEY_FRAME) {
496 vp9_setup_pre_planes(xd, 0, yv12, mi_row, mi_col, sf);
498 xd->mi_8x8[0]->mbmi.ref_frame[0] = LAST_FRAME;
499 xd->mi_8x8[0]->mbmi.sb_type = BLOCK_64X64;
500 vp9_find_best_ref_mvs(xd, cm->allow_high_precision_mv,
501 xd->mi_8x8[0]->mbmi.ref_mvs[LAST_FRAME],
502 &nearest_mv, &near_mv);
504 xd->mi_8x8[0]->mbmi.mv[0] = nearest_mv;
505 vp9_build_inter_predictors_sby(xd, mi_row, mi_col, BLOCK_64X64);
507 d = xd->plane[0].dst.buf;
508 dp = xd->plane[0].dst.stride;
514 // Fill in the entire tree of 8x8 variances for splits.
515 for (i = 0; i < 4; i++) {
516 const int x32_idx = ((i & 1) << 5);
517 const int y32_idx = ((i >> 1) << 5);
518 for (j = 0; j < 4; j++) {
519 const int x16_idx = x32_idx + ((j & 1) << 4);
520 const int y16_idx = y32_idx + ((j >> 1) << 4);
521 v16x16 *vst = &vt.split[i].split[j];
522 for (k = 0; k < 4; k++) {
523 int x_idx = x16_idx + ((k & 1) << 3);
524 int y_idx = y16_idx + ((k >> 1) << 3);
525 unsigned int sse = 0;
527 if (x_idx < pixels_wide && y_idx < pixels_high)
528 vp9_get_sse_sum_8x8(s + y_idx * sp + x_idx, sp,
529 d + y_idx * dp + x_idx, dp, &sse, &sum);
530 fill_variance(sse, sum, 64, &vst->split[k].part_variances.none);
534 // Fill the rest of the variance tree by summing split partition values.
535 for (i = 0; i < 4; i++) {
536 for (j = 0; j < 4; j++) {
537 fill_variance_tree(&vt.split[i].split[j], BLOCK_16X16);
539 fill_variance_tree(&vt.split[i], BLOCK_32X32);
541 fill_variance_tree(&vt, BLOCK_64X64);
543 // Now go through the entire structure, splitting every block size until
544 // we get to one that's got a variance lower than our threshold, or we
546 if (!set_vt_partitioning(cpi, &vt, tile, BLOCK_64X64,
547 mi_row, mi_col, 8)) {
548 for (i = 0; i < 4; ++i) {
549 const int x32_idx = ((i & 1) << 2);
550 const int y32_idx = ((i >> 1) << 2);
551 if (!set_vt_partitioning(cpi, &vt.split[i], tile, BLOCK_32X32,
552 (mi_row + y32_idx), (mi_col + x32_idx), 4)) {
553 for (j = 0; j < 4; ++j) {
554 const int x16_idx = ((j & 1) << 1);
555 const int y16_idx = ((j >> 1) << 1);
556 // NOTE: This is a temporary hack to disable 8x8 partitions,
557 // since it works really bad - possibly due to a bug
558 #define DISABLE_8X8_VAR_BASED_PARTITION
559 #ifdef DISABLE_8X8_VAR_BASED_PARTITION
560 if (mi_row + y32_idx + y16_idx + 1 < cm->mi_rows &&
561 mi_row + x32_idx + x16_idx + 1 < cm->mi_cols) {
562 set_block_size(cpi, tile,
563 (mi_row + y32_idx + y16_idx),
564 (mi_col + x32_idx + x16_idx),
567 for (k = 0; k < 4; ++k) {
568 const int x8_idx = (k & 1);
569 const int y8_idx = (k >> 1);
570 set_block_size(cpi, tile,
571 (mi_row + y32_idx + y16_idx + y8_idx),
572 (mi_col + x32_idx + x16_idx + x8_idx),
577 if (!set_vt_partitioning(cpi, &vt.split[i].split[j], tile,
579 (mi_row + y32_idx + y16_idx),
580 (mi_col + x32_idx + x16_idx), 2)) {
581 for (k = 0; k < 4; ++k) {
582 const int x8_idx = (k & 1);
583 const int y8_idx = (k >> 1);
584 set_block_size(cpi, tile,
585 (mi_row + y32_idx + y16_idx + y8_idx),
586 (mi_col + x32_idx + x16_idx + x8_idx),
597 // Original activity measure from Tim T's code.
598 static unsigned int tt_activity_measure(MACROBLOCK *x) {
600 // TODO: This could also be done over smaller areas (8x8), but that would
601 // require extensive changes elsewhere, as lambda is assumed to be fixed
602 // over an entire MB in most of the code.
603 // Another option is to compute four 8x8 variances, and pick a single
604 // lambda using a non-linear combination (e.g., the smallest, or second
606 const unsigned int act = vp9_variance16x16(x->plane[0].src.buf,
607 x->plane[0].src.stride,
608 VP9_VAR_OFFS, 0, &sse) << 4;
609 // If the region is flat, lower the activity some more.
610 return act < (8 << 12) ? MIN(act, 5 << 12) : act;
613 // Stub for alternative experimental activity measures.
614 static unsigned int alt_activity_measure(MACROBLOCK *x, int use_dc_pred) {
615 return vp9_encode_intra(x, use_dc_pred);
618 // Measure the activity of the current macroblock
619 // What we measure here is TBD so abstracted to this function
620 #define ALT_ACT_MEASURE 1
621 static unsigned int mb_activity_measure(MACROBLOCK *x, int mb_row, int mb_col) {
622 unsigned int mb_activity;
624 if (ALT_ACT_MEASURE) {
625 const int use_dc_pred = (mb_col || mb_row) && (!mb_col || !mb_row);
627 // Or use and alternative.
628 mb_activity = alt_activity_measure(x, use_dc_pred);
630 // Original activity measure from Tim T's code.
631 mb_activity = tt_activity_measure(x);
634 return MAX(mb_activity, ACTIVITY_AVG_MIN);
637 // Calculate an "average" mb activity value for the frame
639 static void calc_av_activity(VP9_COMP *cpi, int64_t activity_sum) {
641 // Find median: Simple n^2 algorithm for experimentation
645 unsigned int *sortlist;
648 // Create a list to sort to
649 CHECK_MEM_ERROR(&cpi->common, sortlist, vpx_calloc(sizeof(unsigned int),
652 // Copy map to sort list
653 vpx_memcpy(sortlist, cpi->mb_activity_map,
654 sizeof(unsigned int) * cpi->common.MBs);
656 // Ripple each value down to its correct position
657 for (i = 1; i < cpi->common.MBs; i ++) {
658 for (j = i; j > 0; j --) {
659 if (sortlist[j] < sortlist[j - 1]) {
661 tmp = sortlist[j - 1];
662 sortlist[j - 1] = sortlist[j];
670 // Even number MBs so estimate median as mean of two either side.
671 median = (1 + sortlist[cpi->common.MBs >> 1] +
672 sortlist[(cpi->common.MBs >> 1) + 1]) >> 1;
674 cpi->activity_avg = median;
679 // Simple mean for now
680 cpi->activity_avg = (unsigned int) (activity_sum / cpi->common.MBs);
683 if (cpi->activity_avg < ACTIVITY_AVG_MIN)
684 cpi->activity_avg = ACTIVITY_AVG_MIN;
686 // Experimental code: return fixed value normalized for several clips
688 cpi->activity_avg = 100000;
691 #define USE_ACT_INDEX 0
692 #define OUTPUT_NORM_ACT_STATS 0
695 // Calculate an activity index for each mb
696 static void calc_activity_index(VP9_COMP *cpi, MACROBLOCK *x) {
697 VP9_COMMON *const cm = &cpi->common;
704 #if OUTPUT_NORM_ACT_STATS
705 FILE *f = fopen("norm_act.stt", "a");
706 fprintf(f, "\n%12d\n", cpi->activity_avg);
709 // Reset pointers to start of activity map
710 x->mb_activity_ptr = cpi->mb_activity_map;
712 // Calculate normalized mb activity number.
713 for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
714 // for each macroblock col in image
715 for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
716 // Read activity from the map
717 act = *(x->mb_activity_ptr);
719 // Calculate a normalized activity number
720 a = act + 4 * cpi->activity_avg;
721 b = 4 * act + cpi->activity_avg;
724 *(x->activity_ptr) = (int)((b + (a >> 1)) / a) - 1;
726 *(x->activity_ptr) = 1 - (int)((a + (b >> 1)) / b);
728 #if OUTPUT_NORM_ACT_STATS
729 fprintf(f, " %6d", *(x->mb_activity_ptr));
731 // Increment activity map pointers
732 x->mb_activity_ptr++;
735 #if OUTPUT_NORM_ACT_STATS
740 #if OUTPUT_NORM_ACT_STATS
744 #endif // USE_ACT_INDEX
746 // Loop through all MBs. Note activity of each, average activity and
747 // calculate a normalized activity for each
748 static void build_activity_map(VP9_COMP *cpi) {
749 MACROBLOCK *const x = &cpi->mb;
750 MACROBLOCKD *xd = &x->e_mbd;
751 VP9_COMMON *const cm = &cpi->common;
754 YV12_BUFFER_CONFIG *new_yv12 = get_frame_new_buffer(cm);
756 int recon_y_stride = new_yv12->y_stride;
760 unsigned int mb_activity;
761 int64_t activity_sum = 0;
763 x->mb_activity_ptr = cpi->mb_activity_map;
765 // for each macroblock row in image
766 for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
768 // reset above block coeffs
769 xd->up_available = (mb_row != 0);
770 recon_yoffset = (mb_row * recon_y_stride * 16);
772 // for each macroblock col in image
773 for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
775 xd->plane[0].dst.buf = new_yv12->y_buffer + recon_yoffset;
776 xd->left_available = (mb_col != 0);
781 mb_activity = mb_activity_measure(x, mb_row, mb_col);
784 activity_sum += mb_activity;
786 // Store MB level activity details.
787 *x->mb_activity_ptr = mb_activity;
789 // Increment activity map pointer
790 x->mb_activity_ptr++;
792 // adjust to the next column of source macroblocks
793 x->plane[0].src.buf += 16;
796 // adjust to the next row of mbs
797 x->plane[0].src.buf += 16 * x->plane[0].src.stride - 16 * cm->mb_cols;
800 // Calculate an "average" MB activity
801 calc_av_activity(cpi, activity_sum);
804 // Calculate an activity index number of each mb
805 calc_activity_index(cpi, x);
809 // Macroblock activity masking
810 static void activity_masking(VP9_COMP *cpi, MACROBLOCK *x) {
812 x->rdmult += *(x->mb_activity_ptr) * (x->rdmult >> 2);
813 x->errorperbit = x->rdmult * 100 / (110 * x->rddiv);
814 x->errorperbit += (x->errorperbit == 0);
816 const int64_t act = *(x->mb_activity_ptr);
818 // Apply the masking to the RD multiplier.
819 const int64_t a = act + (2 * cpi->activity_avg);
820 const int64_t b = (2 * act) + cpi->activity_avg;
822 x->rdmult = (unsigned int) (((int64_t) x->rdmult * b + (a >> 1)) / a);
823 x->errorperbit = x->rdmult * 100 / (110 * x->rddiv);
824 x->errorperbit += (x->errorperbit == 0);
827 // Activity based Zbin adjustment
828 adjust_act_zbin(cpi, x);
831 // Select a segment for the current SB64
832 static void select_in_frame_q_segment(VP9_COMP *cpi,
833 int mi_row, int mi_col,
834 int output_enabled, int projected_rate) {
835 VP9_COMMON *const cm = &cpi->common;
837 const int mi_offset = mi_row * cm->mi_cols + mi_col;
838 const int bw = num_8x8_blocks_wide_lookup[BLOCK_64X64];
839 const int bh = num_8x8_blocks_high_lookup[BLOCK_64X64];
840 const int xmis = MIN(cm->mi_cols - mi_col, bw);
841 const int ymis = MIN(cm->mi_rows - mi_row, bh);
842 int complexity_metric = 64;
845 unsigned char segment;
847 if (!output_enabled) {
850 // Rate depends on fraction of a SB64 in frame (xmis * ymis / bw * bh).
851 // It is converted to bits * 256 units
852 const int target_rate = (cpi->rc.sb64_target_rate * xmis * ymis * 256) /
855 if (projected_rate < (target_rate / 4)) {
861 if (target_rate > 0) {
863 clamp((int)((projected_rate * 64) / target_rate), 16, 255);
867 // Fill in the entires in the segment map corresponding to this SB64
868 for (y = 0; y < ymis; y++) {
869 for (x = 0; x < xmis; x++) {
870 cpi->segmentation_map[mi_offset + y * cm->mi_cols + x] = segment;
871 cpi->complexity_map[mi_offset + y * cm->mi_cols + x] =
872 (unsigned char)complexity_metric;
877 static void update_state(VP9_COMP *cpi, PICK_MODE_CONTEXT *ctx,
878 int mi_row, int mi_col, BLOCK_SIZE bsize,
879 int output_enabled) {
881 VP9_COMMON *const cm = &cpi->common;
882 MACROBLOCK *const x = &cpi->mb;
883 MACROBLOCKD *const xd = &x->e_mbd;
884 struct macroblock_plane *const p = x->plane;
885 struct macroblockd_plane *const pd = xd->plane;
886 MODE_INFO *mi = &ctx->mic;
887 MB_MODE_INFO *const mbmi = &xd->mi_8x8[0]->mbmi;
888 MODE_INFO *mi_addr = xd->mi_8x8[0];
889 const struct segmentation *const seg = &cm->seg;
891 const int mis = cm->mode_info_stride;
892 const int mi_width = num_8x8_blocks_wide_lookup[bsize];
893 const int mi_height = num_8x8_blocks_high_lookup[bsize];
896 assert(mi->mbmi.sb_type == bsize);
900 // For in frame adaptive Q, check for reseting the segment_id and updating
901 // the cyclic refresh map.
902 if ((cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) && seg->enabled &&
904 vp9_cyclic_refresh_update_segment(cpi, &xd->mi_8x8[0]->mbmi,
905 mi_row, mi_col, bsize, 1);
906 vp9_init_plane_quantizers(cpi, x);
909 max_plane = is_inter_block(mbmi) ? MAX_MB_PLANE : 1;
910 for (i = 0; i < max_plane; ++i) {
911 p[i].coeff = ctx->coeff_pbuf[i][1];
912 p[i].qcoeff = ctx->qcoeff_pbuf[i][1];
913 pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][1];
914 p[i].eobs = ctx->eobs_pbuf[i][1];
917 for (i = max_plane; i < MAX_MB_PLANE; ++i) {
918 p[i].coeff = ctx->coeff_pbuf[i][2];
919 p[i].qcoeff = ctx->qcoeff_pbuf[i][2];
920 pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][2];
921 p[i].eobs = ctx->eobs_pbuf[i][2];
924 // Restore the coding context of the MB to that that was in place
925 // when the mode was picked for it
926 for (y = 0; y < mi_height; y++)
927 for (x_idx = 0; x_idx < mi_width; x_idx++)
928 if ((xd->mb_to_right_edge >> (3 + MI_SIZE_LOG2)) + mi_width > x_idx
929 && (xd->mb_to_bottom_edge >> (3 + MI_SIZE_LOG2)) + mi_height > y) {
930 xd->mi_8x8[x_idx + y * mis] = mi_addr;
933 if (cpi->oxcf.aq_mode)
934 vp9_init_plane_quantizers(cpi, x);
936 // FIXME(rbultje) I'm pretty sure this should go to the end of this block
937 // (i.e. after the output_enabled)
938 if (bsize < BLOCK_32X32) {
939 if (bsize < BLOCK_16X16)
940 ctx->tx_rd_diff[ALLOW_16X16] = ctx->tx_rd_diff[ALLOW_8X8];
941 ctx->tx_rd_diff[ALLOW_32X32] = ctx->tx_rd_diff[ALLOW_16X16];
944 if (is_inter_block(mbmi) && mbmi->sb_type < BLOCK_8X8) {
945 mbmi->mv[0].as_int = mi->bmi[3].as_mv[0].as_int;
946 mbmi->mv[1].as_int = mi->bmi[3].as_mv[1].as_int;
950 vpx_memcpy(x->zcoeff_blk[mbmi->tx_size], ctx->zcoeff_blk,
951 sizeof(uint8_t) * ctx->num_4x4_blk);
956 if (!vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
957 for (i = 0; i < TX_MODES; i++)
958 cpi->rd_tx_select_diff[i] += ctx->tx_rd_diff[i];
961 #if CONFIG_INTERNAL_STATS
962 if (frame_is_intra_only(cm)) {
963 static const int kf_mode_index[] = {
965 THR_V_PRED /*V_PRED*/,
966 THR_H_PRED /*H_PRED*/,
967 THR_D45_PRED /*D45_PRED*/,
968 THR_D135_PRED /*D135_PRED*/,
969 THR_D117_PRED /*D117_PRED*/,
970 THR_D153_PRED /*D153_PRED*/,
971 THR_D207_PRED /*D207_PRED*/,
972 THR_D63_PRED /*D63_PRED*/,
975 ++cpi->mode_chosen_counts[kf_mode_index[mbmi->mode]];
977 // Note how often each mode chosen as best
978 ++cpi->mode_chosen_counts[ctx->best_mode_index];
981 if (!frame_is_intra_only(cm)) {
982 if (is_inter_block(mbmi)) {
983 vp9_update_mv_count(cm, xd);
985 if (cm->interp_filter == SWITCHABLE) {
986 const int ctx = vp9_get_pred_context_switchable_interp(xd);
987 ++cm->counts.switchable_interp[ctx][mbmi->interp_filter];
991 cpi->rd_comp_pred_diff[SINGLE_REFERENCE] += ctx->single_pred_diff;
992 cpi->rd_comp_pred_diff[COMPOUND_REFERENCE] += ctx->comp_pred_diff;
993 cpi->rd_comp_pred_diff[REFERENCE_MODE_SELECT] += ctx->hybrid_pred_diff;
995 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
996 cpi->rd_filter_diff[i] += ctx->best_filter_diff[i];
1000 void vp9_setup_src_planes(MACROBLOCK *x, const YV12_BUFFER_CONFIG *src,
1001 int mi_row, int mi_col) {
1002 uint8_t *const buffers[4] = {src->y_buffer, src->u_buffer, src->v_buffer,
1004 const int strides[4] = {src->y_stride, src->uv_stride, src->uv_stride,
1008 // Set current frame pointer.
1009 x->e_mbd.cur_buf = src;
1011 for (i = 0; i < MAX_MB_PLANE; i++)
1012 setup_pred_plane(&x->plane[i].src, buffers[i], strides[i], mi_row, mi_col,
1013 NULL, x->e_mbd.plane[i].subsampling_x,
1014 x->e_mbd.plane[i].subsampling_y);
1017 static void rd_pick_sb_modes(VP9_COMP *cpi, const TileInfo *const tile,
1018 int mi_row, int mi_col,
1019 int *totalrate, int64_t *totaldist,
1020 BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
1022 VP9_COMMON *const cm = &cpi->common;
1023 MACROBLOCK *const x = &cpi->mb;
1024 MACROBLOCKD *const xd = &x->e_mbd;
1026 struct macroblock_plane *const p = x->plane;
1027 struct macroblockd_plane *const pd = xd->plane;
1028 const AQ_MODE aq_mode = cpi->oxcf.aq_mode;
1030 double rdmult_ratio;
1032 vp9_clear_system_state();
1033 rdmult_ratio = 1.0; // avoid uninitialized warnings
1035 // Use the lower precision, but faster, 32x32 fdct for mode selection.
1036 x->use_lp32x32fdct = 1;
1038 if (bsize < BLOCK_8X8) {
1039 // When ab_index = 0 all sub-blocks are handled, so for ab_index != 0
1040 // there is nothing to be done.
1041 if (x->ab_index != 0) {
1048 set_offsets(cpi, tile, mi_row, mi_col, bsize);
1049 mbmi = &xd->mi_8x8[0]->mbmi;
1050 mbmi->sb_type = bsize;
1052 for (i = 0; i < MAX_MB_PLANE; ++i) {
1053 p[i].coeff = ctx->coeff_pbuf[i][0];
1054 p[i].qcoeff = ctx->qcoeff_pbuf[i][0];
1055 pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][0];
1056 p[i].eobs = ctx->eobs_pbuf[i][0];
1061 // Set to zero to make sure we do not use the previous encoded frame stats
1064 x->source_variance = get_sby_perpixel_variance(cpi, x, bsize);
1066 if (aq_mode == VARIANCE_AQ) {
1067 const int energy = bsize <= BLOCK_16X16 ? x->mb_energy
1068 : vp9_block_energy(cpi, x, bsize);
1070 if (cm->frame_type == KEY_FRAME ||
1071 cpi->refresh_alt_ref_frame ||
1072 (cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref)) {
1073 mbmi->segment_id = vp9_vaq_segment_id(energy);
1075 const uint8_t *const map = cm->seg.update_map ? cpi->segmentation_map
1076 : cm->last_frame_seg_map;
1077 mbmi->segment_id = vp9_get_segment_id(cm, map, bsize, mi_row, mi_col);
1080 rdmult_ratio = vp9_vaq_rdmult_ratio(energy);
1081 vp9_init_plane_quantizers(cpi, x);
1084 // Save rdmult before it might be changed, so it can be restored later.
1085 orig_rdmult = x->rdmult;
1086 if (cpi->oxcf.tuning == VP8_TUNE_SSIM)
1087 activity_masking(cpi, x);
1089 if (aq_mode == VARIANCE_AQ) {
1090 vp9_clear_system_state();
1091 x->rdmult = (int)round(x->rdmult * rdmult_ratio);
1092 } else if (aq_mode == COMPLEXITY_AQ) {
1093 const int mi_offset = mi_row * cm->mi_cols + mi_col;
1094 unsigned char complexity = cpi->complexity_map[mi_offset];
1095 const int is_edge = (mi_row <= 1) || (mi_row >= (cm->mi_rows - 2)) ||
1096 (mi_col <= 1) || (mi_col >= (cm->mi_cols - 2));
1097 if (!is_edge && (complexity > 128))
1098 x->rdmult += ((x->rdmult * (complexity - 128)) / 256);
1099 } else if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) {
1100 const uint8_t *const map = cm->seg.update_map ? cpi->segmentation_map
1101 : cm->last_frame_seg_map;
1102 // If segment 1, use rdmult for that segment.
1103 if (vp9_get_segment_id(cm, map, bsize, mi_row, mi_col))
1104 x->rdmult = vp9_cyclic_refresh_get_rdmult(cpi->cyclic_refresh);
1107 // Find best coding mode & reconstruct the MB so it is available
1108 // as a predictor for MBs that follow in the SB
1109 if (frame_is_intra_only(cm)) {
1110 vp9_rd_pick_intra_mode_sb(cpi, x, totalrate, totaldist, bsize, ctx,
1113 if (bsize >= BLOCK_8X8)
1114 vp9_rd_pick_inter_mode_sb(cpi, x, tile, mi_row, mi_col,
1115 totalrate, totaldist, bsize, ctx, best_rd);
1117 vp9_rd_pick_inter_mode_sub8x8(cpi, x, tile, mi_row, mi_col, totalrate,
1118 totaldist, bsize, ctx, best_rd);
1121 if (aq_mode == VARIANCE_AQ) {
1122 x->rdmult = orig_rdmult;
1123 if (*totalrate != INT_MAX) {
1124 vp9_clear_system_state();
1125 *totalrate = (int)round(*totalrate * rdmult_ratio);
1127 } else if ((cpi->oxcf.aq_mode == COMPLEXITY_AQ) ||
1128 (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ)) {
1129 x->rdmult = orig_rdmult;
1133 static void update_stats(VP9_COMP *cpi) {
1134 VP9_COMMON *const cm = &cpi->common;
1135 const MACROBLOCK *const x = &cpi->mb;
1136 const MACROBLOCKD *const xd = &x->e_mbd;
1137 const MODE_INFO *const mi = xd->mi_8x8[0];
1138 const MB_MODE_INFO *const mbmi = &mi->mbmi;
1140 if (!frame_is_intra_only(cm)) {
1141 const int seg_ref_active = vp9_segfeature_active(&cm->seg, mbmi->segment_id,
1143 if (!seg_ref_active) {
1144 FRAME_COUNTS *const counts = &cm->counts;
1145 const int inter_block = is_inter_block(mbmi);
1147 counts->intra_inter[vp9_get_intra_inter_context(xd)][inter_block]++;
1149 // If the segment reference feature is enabled we have only a single
1150 // reference frame allowed for the segment so exclude it from
1151 // the reference frame counts used to work out probabilities.
1153 const MV_REFERENCE_FRAME ref0 = mbmi->ref_frame[0];
1155 if (cm->reference_mode == REFERENCE_MODE_SELECT)
1156 counts->comp_inter[vp9_get_reference_mode_context(cm, xd)]
1157 [has_second_ref(mbmi)]++;
1159 if (has_second_ref(mbmi)) {
1160 counts->comp_ref[vp9_get_pred_context_comp_ref_p(cm, xd)]
1161 [ref0 == GOLDEN_FRAME]++;
1163 counts->single_ref[vp9_get_pred_context_single_ref_p1(xd)][0]
1164 [ref0 != LAST_FRAME]++;
1165 if (ref0 != LAST_FRAME)
1166 counts->single_ref[vp9_get_pred_context_single_ref_p2(xd)][1]
1167 [ref0 != GOLDEN_FRAME]++;
1174 static BLOCK_SIZE *get_sb_partitioning(MACROBLOCK *x, BLOCK_SIZE bsize) {
1177 return &x->sb64_partitioning;
1179 return &x->sb_partitioning[x->sb_index];
1181 return &x->mb_partitioning[x->sb_index][x->mb_index];
1183 return &x->b_partitioning[x->sb_index][x->mb_index][x->b_index];
1190 static void restore_context(VP9_COMP *cpi, int mi_row, int mi_col,
1191 ENTROPY_CONTEXT a[16 * MAX_MB_PLANE],
1192 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE],
1193 PARTITION_CONTEXT sa[8], PARTITION_CONTEXT sl[8],
1195 MACROBLOCK *const x = &cpi->mb;
1196 MACROBLOCKD *const xd = &x->e_mbd;
1198 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
1199 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
1200 int mi_width = num_8x8_blocks_wide_lookup[bsize];
1201 int mi_height = num_8x8_blocks_high_lookup[bsize];
1202 for (p = 0; p < MAX_MB_PLANE; p++) {
1204 xd->above_context[p] + ((mi_col * 2) >> xd->plane[p].subsampling_x),
1205 a + num_4x4_blocks_wide * p,
1206 (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_wide) >>
1207 xd->plane[p].subsampling_x);
1210 + ((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y),
1211 l + num_4x4_blocks_high * p,
1212 (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_high) >>
1213 xd->plane[p].subsampling_y);
1215 vpx_memcpy(xd->above_seg_context + mi_col, sa,
1216 sizeof(*xd->above_seg_context) * mi_width);
1217 vpx_memcpy(xd->left_seg_context + (mi_row & MI_MASK), sl,
1218 sizeof(xd->left_seg_context[0]) * mi_height);
1220 static void save_context(VP9_COMP *cpi, int mi_row, int mi_col,
1221 ENTROPY_CONTEXT a[16 * MAX_MB_PLANE],
1222 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE],
1223 PARTITION_CONTEXT sa[8], PARTITION_CONTEXT sl[8],
1225 const MACROBLOCK *const x = &cpi->mb;
1226 const MACROBLOCKD *const xd = &x->e_mbd;
1228 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
1229 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
1230 int mi_width = num_8x8_blocks_wide_lookup[bsize];
1231 int mi_height = num_8x8_blocks_high_lookup[bsize];
1233 // buffer the above/left context information of the block in search.
1234 for (p = 0; p < MAX_MB_PLANE; ++p) {
1236 a + num_4x4_blocks_wide * p,
1237 xd->above_context[p] + (mi_col * 2 >> xd->plane[p].subsampling_x),
1238 (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_wide) >>
1239 xd->plane[p].subsampling_x);
1241 l + num_4x4_blocks_high * p,
1243 + ((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y),
1244 (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_high) >>
1245 xd->plane[p].subsampling_y);
1247 vpx_memcpy(sa, xd->above_seg_context + mi_col,
1248 sizeof(*xd->above_seg_context) * mi_width);
1249 vpx_memcpy(sl, xd->left_seg_context + (mi_row & MI_MASK),
1250 sizeof(xd->left_seg_context[0]) * mi_height);
1253 static void encode_b(VP9_COMP *cpi, const TileInfo *const tile,
1254 TOKENEXTRA **tp, int mi_row, int mi_col,
1255 int output_enabled, BLOCK_SIZE bsize) {
1256 MACROBLOCK *const x = &cpi->mb;
1258 if (bsize < BLOCK_8X8) {
1259 // When ab_index = 0 all sub-blocks are handled, so for ab_index != 0
1260 // there is nothing to be done.
1261 if (x->ab_index > 0)
1264 set_offsets(cpi, tile, mi_row, mi_col, bsize);
1265 update_state(cpi, get_block_context(x, bsize), mi_row, mi_col, bsize,
1267 encode_superblock(cpi, tp, output_enabled, mi_row, mi_col, bsize);
1269 if (output_enabled) {
1272 (*tp)->token = EOSB_TOKEN;
1277 static void encode_sb(VP9_COMP *cpi, const TileInfo *const tile,
1278 TOKENEXTRA **tp, int mi_row, int mi_col,
1279 int output_enabled, BLOCK_SIZE bsize) {
1280 VP9_COMMON *const cm = &cpi->common;
1281 MACROBLOCK *const x = &cpi->mb;
1282 MACROBLOCKD *const xd = &x->e_mbd;
1284 const int bsl = b_width_log2(bsize), hbs = (1 << bsl) / 4;
1286 PARTITION_TYPE partition;
1289 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
1292 if (bsize >= BLOCK_8X8) {
1293 ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
1294 subsize = *get_sb_partitioning(x, bsize);
1297 subsize = BLOCK_4X4;
1300 partition = partition_lookup[bsl][subsize];
1302 switch (partition) {
1303 case PARTITION_NONE:
1304 if (output_enabled && bsize >= BLOCK_8X8)
1305 cm->counts.partition[ctx][PARTITION_NONE]++;
1306 encode_b(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize);
1308 case PARTITION_VERT:
1310 cm->counts.partition[ctx][PARTITION_VERT]++;
1311 *get_sb_index(x, subsize) = 0;
1312 encode_b(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize);
1313 if (mi_col + hbs < cm->mi_cols) {
1314 *get_sb_index(x, subsize) = 1;
1315 encode_b(cpi, tile, tp, mi_row, mi_col + hbs, output_enabled, subsize);
1318 case PARTITION_HORZ:
1320 cm->counts.partition[ctx][PARTITION_HORZ]++;
1321 *get_sb_index(x, subsize) = 0;
1322 encode_b(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize);
1323 if (mi_row + hbs < cm->mi_rows) {
1324 *get_sb_index(x, subsize) = 1;
1325 encode_b(cpi, tile, tp, mi_row + hbs, mi_col, output_enabled, subsize);
1328 case PARTITION_SPLIT:
1329 subsize = get_subsize(bsize, PARTITION_SPLIT);
1331 cm->counts.partition[ctx][PARTITION_SPLIT]++;
1333 *get_sb_index(x, subsize) = 0;
1334 encode_sb(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize);
1335 *get_sb_index(x, subsize) = 1;
1336 encode_sb(cpi, tile, tp, mi_row, mi_col + hbs, output_enabled, subsize);
1337 *get_sb_index(x, subsize) = 2;
1338 encode_sb(cpi, tile, tp, mi_row + hbs, mi_col, output_enabled, subsize);
1339 *get_sb_index(x, subsize) = 3;
1340 encode_sb(cpi, tile, tp, mi_row + hbs, mi_col + hbs, output_enabled,
1344 assert("Invalid partition type.");
1347 if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8)
1348 update_partition_context(xd, mi_row, mi_col, subsize, bsize);
1351 // Check to see if the given partition size is allowed for a specified number
1352 // of 8x8 block rows and columns remaining in the image.
1353 // If not then return the largest allowed partition size
1354 static BLOCK_SIZE find_partition_size(BLOCK_SIZE bsize,
1355 int rows_left, int cols_left,
1357 if (rows_left <= 0 || cols_left <= 0) {
1358 return MIN(bsize, BLOCK_8X8);
1360 for (; bsize > 0; bsize -= 3) {
1361 *bh = num_8x8_blocks_high_lookup[bsize];
1362 *bw = num_8x8_blocks_wide_lookup[bsize];
1363 if ((*bh <= rows_left) && (*bw <= cols_left)) {
1371 // This function attempts to set all mode info entries in a given SB64
1372 // to the same block partition size.
1373 // However, at the bottom and right borders of the image the requested size
1374 // may not be allowed in which case this code attempts to choose the largest
1375 // allowable partition.
1376 static void set_fixed_partitioning(VP9_COMP *cpi, const TileInfo *const tile,
1377 MODE_INFO **mi_8x8, int mi_row, int mi_col,
1379 VP9_COMMON *const cm = &cpi->common;
1380 const int mis = cm->mode_info_stride;
1381 int row8x8_remaining = tile->mi_row_end - mi_row;
1382 int col8x8_remaining = tile->mi_col_end - mi_col;
1383 int block_row, block_col;
1384 MODE_INFO *mi_upper_left = cm->mi + mi_row * mis + mi_col;
1385 int bh = num_8x8_blocks_high_lookup[bsize];
1386 int bw = num_8x8_blocks_wide_lookup[bsize];
1388 assert((row8x8_remaining > 0) && (col8x8_remaining > 0));
1390 // Apply the requested partition size to the SB64 if it is all "in image"
1391 if ((col8x8_remaining >= MI_BLOCK_SIZE) &&
1392 (row8x8_remaining >= MI_BLOCK_SIZE)) {
1393 for (block_row = 0; block_row < MI_BLOCK_SIZE; block_row += bh) {
1394 for (block_col = 0; block_col < MI_BLOCK_SIZE; block_col += bw) {
1395 int index = block_row * mis + block_col;
1396 mi_8x8[index] = mi_upper_left + index;
1397 mi_8x8[index]->mbmi.sb_type = bsize;
1401 // Else this is a partial SB64.
1402 for (block_row = 0; block_row < MI_BLOCK_SIZE; block_row += bh) {
1403 for (block_col = 0; block_col < MI_BLOCK_SIZE; block_col += bw) {
1404 int index = block_row * mis + block_col;
1405 // Find a partition size that fits
1406 bsize = find_partition_size(bsize,
1407 (row8x8_remaining - block_row),
1408 (col8x8_remaining - block_col), &bh, &bw);
1409 mi_8x8[index] = mi_upper_left + index;
1410 mi_8x8[index]->mbmi.sb_type = bsize;
1416 static void copy_partitioning(VP9_COMMON *cm, MODE_INFO **mi_8x8,
1417 MODE_INFO **prev_mi_8x8) {
1418 const int mis = cm->mode_info_stride;
1419 int block_row, block_col;
1421 for (block_row = 0; block_row < 8; ++block_row) {
1422 for (block_col = 0; block_col < 8; ++block_col) {
1423 MODE_INFO *const prev_mi = prev_mi_8x8[block_row * mis + block_col];
1424 const BLOCK_SIZE sb_type = prev_mi ? prev_mi->mbmi.sb_type : 0;
1426 const ptrdiff_t offset = prev_mi - cm->prev_mi;
1427 mi_8x8[block_row * mis + block_col] = cm->mi + offset;
1428 mi_8x8[block_row * mis + block_col]->mbmi.sb_type = sb_type;
1434 static int sb_has_motion(const VP9_COMMON *cm, MODE_INFO **prev_mi_8x8) {
1435 const int mis = cm->mode_info_stride;
1436 int block_row, block_col;
1439 for (block_row = 0; block_row < 8; ++block_row) {
1440 for (block_col = 0; block_col < 8; ++block_col) {
1441 const MODE_INFO *prev_mi = prev_mi_8x8[block_row * mis + block_col];
1443 if (abs(prev_mi->mbmi.mv[0].as_mv.row) >= 8 ||
1444 abs(prev_mi->mbmi.mv[0].as_mv.col) >= 8)
1453 static void update_state_rt(VP9_COMP *cpi, const PICK_MODE_CONTEXT *ctx,
1454 int mi_row, int mi_col, int bsize) {
1455 VP9_COMMON *const cm = &cpi->common;
1456 MACROBLOCK *const x = &cpi->mb;
1457 MACROBLOCKD *const xd = &x->e_mbd;
1458 MB_MODE_INFO *const mbmi = &xd->mi_8x8[0]->mbmi;
1459 const struct segmentation *const seg = &cm->seg;
1461 // TODO(jingning) We might need PICK_MODE_CONTEXT to buffer coding modes
1462 // associated with variable block sizes. Otherwise, remove this ctx
1463 // from argument list.
1466 *(xd->mi_8x8[0]) = ctx->mic;
1468 // Check for reseting segment_id and update cyclic map.
1469 if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && seg->enabled) {
1470 vp9_cyclic_refresh_update_segment(cpi, &xd->mi_8x8[0]->mbmi,
1471 mi_row, mi_col, bsize, 1);
1472 vp9_init_plane_quantizers(cpi, x);
1475 if (is_inter_block(mbmi)) {
1476 vp9_update_mv_count(cm, xd);
1478 if (cm->interp_filter == SWITCHABLE) {
1479 const int pred_ctx = vp9_get_pred_context_switchable_interp(xd);
1480 ++cm->counts.switchable_interp[pred_ctx][mbmi->interp_filter];
1485 static void encode_b_rt(VP9_COMP *cpi, const TileInfo *const tile,
1486 TOKENEXTRA **tp, int mi_row, int mi_col,
1487 int output_enabled, BLOCK_SIZE bsize) {
1488 MACROBLOCK *const x = &cpi->mb;
1490 if (bsize < BLOCK_8X8) {
1491 // When ab_index = 0 all sub-blocks are handled, so for ab_index != 0
1492 // there is nothing to be done.
1493 if (x->ab_index > 0)
1496 set_offsets(cpi, tile, mi_row, mi_col, bsize);
1497 update_state_rt(cpi, get_block_context(x, bsize), mi_row, mi_col, bsize);
1499 encode_superblock(cpi, tp, output_enabled, mi_row, mi_col, bsize);
1502 (*tp)->token = EOSB_TOKEN;
1506 static void encode_sb_rt(VP9_COMP *cpi, const TileInfo *const tile,
1507 TOKENEXTRA **tp, int mi_row, int mi_col,
1508 int output_enabled, BLOCK_SIZE bsize) {
1509 VP9_COMMON *const cm = &cpi->common;
1510 MACROBLOCK *const x = &cpi->mb;
1511 MACROBLOCKD *const xd = &x->e_mbd;
1513 const int bsl = b_width_log2(bsize), hbs = (1 << bsl) / 4;
1515 PARTITION_TYPE partition;
1518 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
1521 if (bsize >= BLOCK_8X8) {
1522 MACROBLOCKD *const xd = &cpi->mb.e_mbd;
1523 const int idx_str = xd->mode_info_stride * mi_row + mi_col;
1524 MODE_INFO ** mi_8x8 = cm->mi_grid_visible + idx_str;
1525 ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
1526 subsize = mi_8x8[0]->mbmi.sb_type;
1529 subsize = BLOCK_4X4;
1532 partition = partition_lookup[bsl][subsize];
1534 switch (partition) {
1535 case PARTITION_NONE:
1536 if (output_enabled && bsize >= BLOCK_8X8)
1537 cm->counts.partition[ctx][PARTITION_NONE]++;
1538 encode_b_rt(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize);
1540 case PARTITION_VERT:
1542 cm->counts.partition[ctx][PARTITION_VERT]++;
1543 *get_sb_index(x, subsize) = 0;
1544 encode_b_rt(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize);
1545 if (mi_col + hbs < cm->mi_cols) {
1546 *get_sb_index(x, subsize) = 1;
1547 encode_b_rt(cpi, tile, tp, mi_row, mi_col + hbs, output_enabled,
1551 case PARTITION_HORZ:
1553 cm->counts.partition[ctx][PARTITION_HORZ]++;
1554 *get_sb_index(x, subsize) = 0;
1555 encode_b_rt(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize);
1556 if (mi_row + hbs < cm->mi_rows) {
1557 *get_sb_index(x, subsize) = 1;
1558 encode_b_rt(cpi, tile, tp, mi_row + hbs, mi_col, output_enabled,
1562 case PARTITION_SPLIT:
1563 subsize = get_subsize(bsize, PARTITION_SPLIT);
1565 cm->counts.partition[ctx][PARTITION_SPLIT]++;
1567 *get_sb_index(x, subsize) = 0;
1568 encode_sb_rt(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize);
1569 *get_sb_index(x, subsize) = 1;
1570 encode_sb_rt(cpi, tile, tp, mi_row, mi_col + hbs, output_enabled,
1572 *get_sb_index(x, subsize) = 2;
1573 encode_sb_rt(cpi, tile, tp, mi_row + hbs, mi_col, output_enabled,
1575 *get_sb_index(x, subsize) = 3;
1576 encode_sb_rt(cpi, tile, tp, mi_row + hbs, mi_col + hbs, output_enabled,
1580 assert("Invalid partition type.");
1583 if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8)
1584 update_partition_context(xd, mi_row, mi_col, subsize, bsize);
1587 static void rd_use_partition(VP9_COMP *cpi,
1588 const TileInfo *const tile,
1590 TOKENEXTRA **tp, int mi_row, int mi_col,
1591 BLOCK_SIZE bsize, int *rate, int64_t *dist,
1593 VP9_COMMON *const cm = &cpi->common;
1594 MACROBLOCK *const x = &cpi->mb;
1595 MACROBLOCKD *const xd = &x->e_mbd;
1596 const int mis = cm->mode_info_stride;
1597 const int bsl = b_width_log2(bsize);
1598 const int mi_step = num_4x4_blocks_wide_lookup[bsize] / 2;
1599 const int bss = (1 << bsl) / 4;
1601 PARTITION_TYPE partition = PARTITION_NONE;
1603 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
1604 PARTITION_CONTEXT sl[8], sa[8];
1605 int last_part_rate = INT_MAX;
1606 int64_t last_part_dist = INT64_MAX;
1607 int64_t last_part_rd = INT64_MAX;
1608 int none_rate = INT_MAX;
1609 int64_t none_dist = INT64_MAX;
1610 int64_t none_rd = INT64_MAX;
1611 int chosen_rate = INT_MAX;
1612 int64_t chosen_dist = INT64_MAX;
1613 int64_t chosen_rd = INT64_MAX;
1614 BLOCK_SIZE sub_subsize = BLOCK_4X4;
1615 int splits_below = 0;
1616 BLOCK_SIZE bs_type = mi_8x8[0]->mbmi.sb_type;
1617 int do_partition_search = 1;
1619 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
1622 assert(num_4x4_blocks_wide_lookup[bsize] ==
1623 num_4x4_blocks_high_lookup[bsize]);
1625 partition = partition_lookup[bsl][bs_type];
1626 subsize = get_subsize(bsize, partition);
1628 if (bsize < BLOCK_8X8) {
1629 // When ab_index = 0 all sub-blocks are handled, so for ab_index != 0
1630 // there is nothing to be done.
1631 if (x->ab_index != 0) {
1637 *(get_sb_partitioning(x, bsize)) = subsize;
1639 save_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
1641 set_offsets(cpi, tile, mi_row, mi_col, bsize);
1642 if (bsize == BLOCK_16X16) {
1643 x->mb_energy = vp9_block_energy(cpi, x, bsize);
1646 if (!x->in_active_map) {
1647 do_partition_search = 0;
1648 if (mi_row + (mi_step >> 1) < cm->mi_rows &&
1649 mi_col + (mi_step >> 1) < cm->mi_cols) {
1650 *(get_sb_partitioning(x, bsize)) = bsize;
1651 bs_type = mi_8x8[0]->mbmi.sb_type = bsize;
1653 partition = PARTITION_NONE;
1656 if (do_partition_search &&
1657 cpi->sf.partition_search_type == SEARCH_PARTITION &&
1658 cpi->sf.adjust_partitioning_from_last_frame) {
1659 // Check if any of the sub blocks are further split.
1660 if (partition == PARTITION_SPLIT && subsize > BLOCK_8X8) {
1661 sub_subsize = get_subsize(subsize, PARTITION_SPLIT);
1663 for (i = 0; i < 4; i++) {
1664 int jj = i >> 1, ii = i & 0x01;
1665 MODE_INFO * this_mi = mi_8x8[jj * bss * mis + ii * bss];
1666 if (this_mi && this_mi->mbmi.sb_type >= sub_subsize) {
1672 // If partition is not none try none unless each of the 4 splits are split
1674 if (partition != PARTITION_NONE && !splits_below &&
1675 mi_row + (mi_step >> 1) < cm->mi_rows &&
1676 mi_col + (mi_step >> 1) < cm->mi_cols) {
1677 *(get_sb_partitioning(x, bsize)) = bsize;
1678 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &none_rate, &none_dist, bsize,
1679 get_block_context(x, bsize), INT64_MAX);
1681 pl = partition_plane_context(xd, mi_row, mi_col, bsize);
1683 if (none_rate < INT_MAX) {
1684 none_rate += x->partition_cost[pl][PARTITION_NONE];
1685 none_rd = RDCOST(x->rdmult, x->rddiv, none_rate, none_dist);
1688 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
1689 mi_8x8[0]->mbmi.sb_type = bs_type;
1690 *(get_sb_partitioning(x, bsize)) = subsize;
1694 switch (partition) {
1695 case PARTITION_NONE:
1696 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &last_part_rate,
1697 &last_part_dist, bsize,
1698 get_block_context(x, bsize), INT64_MAX);
1700 case PARTITION_HORZ:
1701 *get_sb_index(x, subsize) = 0;
1702 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &last_part_rate,
1703 &last_part_dist, subsize,
1704 get_block_context(x, subsize), INT64_MAX);
1705 if (last_part_rate != INT_MAX &&
1706 bsize >= BLOCK_8X8 && mi_row + (mi_step >> 1) < cm->mi_rows) {
1709 update_state(cpi, get_block_context(x, subsize), mi_row, mi_col,
1711 encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize);
1712 *get_sb_index(x, subsize) = 1;
1713 rd_pick_sb_modes(cpi, tile, mi_row + (mi_step >> 1), mi_col, &rt, &dt,
1714 subsize, get_block_context(x, subsize), INT64_MAX);
1715 if (rt == INT_MAX || dt == INT64_MAX) {
1716 last_part_rate = INT_MAX;
1717 last_part_dist = INT64_MAX;
1721 last_part_rate += rt;
1722 last_part_dist += dt;
1725 case PARTITION_VERT:
1726 *get_sb_index(x, subsize) = 0;
1727 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &last_part_rate,
1728 &last_part_dist, subsize,
1729 get_block_context(x, subsize), INT64_MAX);
1730 if (last_part_rate != INT_MAX &&
1731 bsize >= BLOCK_8X8 && mi_col + (mi_step >> 1) < cm->mi_cols) {
1734 update_state(cpi, get_block_context(x, subsize), mi_row, mi_col,
1736 encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize);
1737 *get_sb_index(x, subsize) = 1;
1738 rd_pick_sb_modes(cpi, tile, mi_row, mi_col + (mi_step >> 1), &rt, &dt,
1739 subsize, get_block_context(x, subsize), INT64_MAX);
1740 if (rt == INT_MAX || dt == INT64_MAX) {
1741 last_part_rate = INT_MAX;
1742 last_part_dist = INT64_MAX;
1745 last_part_rate += rt;
1746 last_part_dist += dt;
1749 case PARTITION_SPLIT:
1753 for (i = 0; i < 4; i++) {
1754 int x_idx = (i & 1) * (mi_step >> 1);
1755 int y_idx = (i >> 1) * (mi_step >> 1);
1756 int jj = i >> 1, ii = i & 0x01;
1760 if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols))
1763 *get_sb_index(x, subsize) = i;
1765 rd_use_partition(cpi, tile, mi_8x8 + jj * bss * mis + ii * bss, tp,
1766 mi_row + y_idx, mi_col + x_idx, subsize, &rt, &dt,
1768 if (rt == INT_MAX || dt == INT64_MAX) {
1769 last_part_rate = INT_MAX;
1770 last_part_dist = INT64_MAX;
1773 last_part_rate += rt;
1774 last_part_dist += dt;
1781 pl = partition_plane_context(xd, mi_row, mi_col, bsize);
1782 if (last_part_rate < INT_MAX) {
1783 last_part_rate += x->partition_cost[pl][partition];
1784 last_part_rd = RDCOST(x->rdmult, x->rddiv, last_part_rate, last_part_dist);
1787 if (do_partition_search
1788 && cpi->sf.adjust_partitioning_from_last_frame
1789 && cpi->sf.partition_search_type == SEARCH_PARTITION
1790 && partition != PARTITION_SPLIT && bsize > BLOCK_8X8
1791 && (mi_row + mi_step < cm->mi_rows ||
1792 mi_row + (mi_step >> 1) == cm->mi_rows)
1793 && (mi_col + mi_step < cm->mi_cols ||
1794 mi_col + (mi_step >> 1) == cm->mi_cols)) {
1795 BLOCK_SIZE split_subsize = get_subsize(bsize, PARTITION_SPLIT);
1798 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
1801 for (i = 0; i < 4; i++) {
1802 int x_idx = (i & 1) * (mi_step >> 1);
1803 int y_idx = (i >> 1) * (mi_step >> 1);
1806 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
1807 PARTITION_CONTEXT sl[8], sa[8];
1809 if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols))
1812 *get_sb_index(x, split_subsize) = i;
1813 *get_sb_partitioning(x, bsize) = split_subsize;
1814 *get_sb_partitioning(x, split_subsize) = split_subsize;
1816 save_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
1818 rd_pick_sb_modes(cpi, tile, mi_row + y_idx, mi_col + x_idx, &rt, &dt,
1819 split_subsize, get_block_context(x, split_subsize),
1822 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
1824 if (rt == INT_MAX || dt == INT64_MAX) {
1825 chosen_rate = INT_MAX;
1826 chosen_dist = INT64_MAX;
1834 encode_sb(cpi, tile, tp, mi_row + y_idx, mi_col + x_idx, 0,
1837 pl = partition_plane_context(xd, mi_row + y_idx, mi_col + x_idx,
1839 chosen_rate += x->partition_cost[pl][PARTITION_NONE];
1841 pl = partition_plane_context(xd, mi_row, mi_col, bsize);
1842 if (chosen_rate < INT_MAX) {
1843 chosen_rate += x->partition_cost[pl][PARTITION_SPLIT];
1844 chosen_rd = RDCOST(x->rdmult, x->rddiv, chosen_rate, chosen_dist);
1848 // If last_part is better set the partitioning to that...
1849 if (last_part_rd < chosen_rd) {
1850 mi_8x8[0]->mbmi.sb_type = bsize;
1851 if (bsize >= BLOCK_8X8)
1852 *(get_sb_partitioning(x, bsize)) = subsize;
1853 chosen_rate = last_part_rate;
1854 chosen_dist = last_part_dist;
1855 chosen_rd = last_part_rd;
1857 // If none was better set the partitioning to that...
1858 if (none_rd < chosen_rd) {
1859 if (bsize >= BLOCK_8X8)
1860 *(get_sb_partitioning(x, bsize)) = bsize;
1861 chosen_rate = none_rate;
1862 chosen_dist = none_dist;
1865 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
1867 // We must have chosen a partitioning and encoding or we'll fail later on.
1868 // No other opportunities for success.
1869 if ( bsize == BLOCK_64X64)
1870 assert(chosen_rate < INT_MAX && chosen_dist < INT64_MAX);
1873 int output_enabled = (bsize == BLOCK_64X64);
1875 // Check the projected output rate for this SB against it's target
1876 // and and if necessary apply a Q delta using segmentation to get
1877 // closer to the target.
1878 if ((cpi->oxcf.aq_mode == COMPLEXITY_AQ) && cm->seg.update_map) {
1879 select_in_frame_q_segment(cpi, mi_row, mi_col,
1880 output_enabled, chosen_rate);
1883 if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ)
1884 vp9_cyclic_refresh_set_rate_and_dist_sb(cpi->cyclic_refresh,
1885 chosen_rate, chosen_dist);
1887 encode_sb(cpi, tile, tp, mi_row, mi_col, output_enabled, bsize);
1890 *rate = chosen_rate;
1891 *dist = chosen_dist;
1894 static const BLOCK_SIZE min_partition_size[BLOCK_SIZES] = {
1895 BLOCK_4X4, BLOCK_4X4, BLOCK_4X4,
1896 BLOCK_4X4, BLOCK_4X4, BLOCK_4X4,
1897 BLOCK_8X8, BLOCK_8X8, BLOCK_8X8,
1898 BLOCK_16X16, BLOCK_16X16, BLOCK_16X16,
1902 static const BLOCK_SIZE max_partition_size[BLOCK_SIZES] = {
1903 BLOCK_8X8, BLOCK_16X16, BLOCK_16X16,
1904 BLOCK_16X16, BLOCK_32X32, BLOCK_32X32,
1905 BLOCK_32X32, BLOCK_64X64, BLOCK_64X64,
1906 BLOCK_64X64, BLOCK_64X64, BLOCK_64X64,
1910 // Look at all the mode_info entries for blocks that are part of this
1911 // partition and find the min and max values for sb_type.
1912 // At the moment this is designed to work on a 64x64 SB but could be
1913 // adjusted to use a size parameter.
1915 // The min and max are assumed to have been initialized prior to calling this
1916 // function so repeat calls can accumulate a min and max of more than one sb64.
1917 static void get_sb_partition_size_range(VP9_COMP *cpi, MODE_INFO ** mi_8x8,
1918 BLOCK_SIZE * min_block_size,
1919 BLOCK_SIZE * max_block_size ) {
1920 MACROBLOCKD *const xd = &cpi->mb.e_mbd;
1921 int sb_width_in_blocks = MI_BLOCK_SIZE;
1922 int sb_height_in_blocks = MI_BLOCK_SIZE;
1926 // Check the sb_type for each block that belongs to this region.
1927 for (i = 0; i < sb_height_in_blocks; ++i) {
1928 for (j = 0; j < sb_width_in_blocks; ++j) {
1929 MODE_INFO * mi = mi_8x8[index+j];
1930 BLOCK_SIZE sb_type = mi ? mi->mbmi.sb_type : 0;
1931 *min_block_size = MIN(*min_block_size, sb_type);
1932 *max_block_size = MAX(*max_block_size, sb_type);
1934 index += xd->mode_info_stride;
1938 // Next square block size less or equal than current block size.
1939 static const BLOCK_SIZE next_square_size[BLOCK_SIZES] = {
1940 BLOCK_4X4, BLOCK_4X4, BLOCK_4X4,
1941 BLOCK_8X8, BLOCK_8X8, BLOCK_8X8,
1942 BLOCK_16X16, BLOCK_16X16, BLOCK_16X16,
1943 BLOCK_32X32, BLOCK_32X32, BLOCK_32X32,
1947 // Look at neighboring blocks and set a min and max partition size based on
1949 static void rd_auto_partition_range(VP9_COMP *cpi, const TileInfo *const tile,
1950 int mi_row, int mi_col,
1951 BLOCK_SIZE *min_block_size,
1952 BLOCK_SIZE *max_block_size) {
1953 VP9_COMMON *const cm = &cpi->common;
1954 MACROBLOCKD *const xd = &cpi->mb.e_mbd;
1955 MODE_INFO **mi_8x8 = xd->mi_8x8;
1956 const int left_in_image = xd->left_available && mi_8x8[-1];
1957 const int above_in_image = xd->up_available &&
1958 mi_8x8[-xd->mode_info_stride];
1959 MODE_INFO **above_sb64_mi_8x8;
1960 MODE_INFO **left_sb64_mi_8x8;
1962 int row8x8_remaining = tile->mi_row_end - mi_row;
1963 int col8x8_remaining = tile->mi_col_end - mi_col;
1965 BLOCK_SIZE min_size = BLOCK_4X4;
1966 BLOCK_SIZE max_size = BLOCK_64X64;
1967 // Trap case where we do not have a prediction.
1968 if (left_in_image || above_in_image || cm->frame_type != KEY_FRAME) {
1969 // Default "min to max" and "max to min"
1970 min_size = BLOCK_64X64;
1971 max_size = BLOCK_4X4;
1973 // NOTE: each call to get_sb_partition_size_range() uses the previous
1974 // passed in values for min and max as a starting point.
1975 // Find the min and max partition used in previous frame at this location
1976 if (cm->frame_type != KEY_FRAME) {
1977 MODE_INFO **const prev_mi =
1978 &cm->prev_mi_grid_visible[mi_row * xd->mode_info_stride + mi_col];
1979 get_sb_partition_size_range(cpi, prev_mi, &min_size, &max_size);
1981 // Find the min and max partition sizes used in the left SB64
1982 if (left_in_image) {
1983 left_sb64_mi_8x8 = &mi_8x8[-MI_BLOCK_SIZE];
1984 get_sb_partition_size_range(cpi, left_sb64_mi_8x8,
1985 &min_size, &max_size);
1987 // Find the min and max partition sizes used in the above SB64.
1988 if (above_in_image) {
1989 above_sb64_mi_8x8 = &mi_8x8[-xd->mode_info_stride * MI_BLOCK_SIZE];
1990 get_sb_partition_size_range(cpi, above_sb64_mi_8x8,
1991 &min_size, &max_size);
1993 // adjust observed min and max
1994 if (cpi->sf.auto_min_max_partition_size == RELAXED_NEIGHBORING_MIN_MAX) {
1995 min_size = min_partition_size[min_size];
1996 max_size = max_partition_size[max_size];
2000 // Check border cases where max and min from neighbors may not be legal.
2001 max_size = find_partition_size(max_size,
2002 row8x8_remaining, col8x8_remaining,
2004 min_size = MIN(min_size, max_size);
2006 // When use_square_partition_only is true, make sure at least one square
2007 // partition is allowed by selecting the next smaller square size as
2009 if (cpi->sf.use_square_partition_only &&
2010 next_square_size[max_size] < min_size) {
2011 min_size = next_square_size[max_size];
2013 *min_block_size = min_size;
2014 *max_block_size = max_size;
2017 static INLINE void store_pred_mv(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) {
2018 vpx_memcpy(ctx->pred_mv, x->pred_mv, sizeof(x->pred_mv));
2021 static INLINE void load_pred_mv(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) {
2022 vpx_memcpy(x->pred_mv, ctx->pred_mv, sizeof(x->pred_mv));
2025 // TODO(jingning,jimbankoski,rbultje): properly skip partition types that are
2026 // unlikely to be selected depending on previous rate-distortion optimization
2027 // results, for encoding speed-up.
2028 static void rd_pick_partition(VP9_COMP *cpi, const TileInfo *const tile,
2029 TOKENEXTRA **tp, int mi_row,
2030 int mi_col, BLOCK_SIZE bsize, int *rate,
2031 int64_t *dist, int do_recon, int64_t best_rd) {
2032 VP9_COMMON *const cm = &cpi->common;
2033 MACROBLOCK *const x = &cpi->mb;
2034 MACROBLOCKD *const xd = &x->e_mbd;
2035 const int mi_step = num_8x8_blocks_wide_lookup[bsize] / 2;
2036 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
2037 PARTITION_CONTEXT sl[8], sa[8];
2038 TOKENEXTRA *tp_orig = *tp;
2039 PICK_MODE_CONTEXT *ctx = get_block_context(x, bsize);
2042 int this_rate, sum_rate = 0, best_rate = INT_MAX;
2043 int64_t this_dist, sum_dist = 0, best_dist = INT64_MAX;
2045 int do_split = bsize >= BLOCK_8X8;
2047 // Override skipping rectangular partition operations for edge blocks
2048 const int force_horz_split = (mi_row + mi_step >= cm->mi_rows);
2049 const int force_vert_split = (mi_col + mi_step >= cm->mi_cols);
2050 const int xss = x->e_mbd.plane[1].subsampling_x;
2051 const int yss = x->e_mbd.plane[1].subsampling_y;
2053 int partition_none_allowed = !force_horz_split && !force_vert_split;
2054 int partition_horz_allowed = !force_vert_split && yss <= xss &&
2056 int partition_vert_allowed = !force_horz_split && xss <= yss &&
2060 if (bsize < BLOCK_8X8) {
2061 // When ab_index = 0 all sub-blocks are handled, so for ab_index != 0
2062 // there is nothing to be done.
2063 if (x->ab_index != 0) {
2069 assert(num_8x8_blocks_wide_lookup[bsize] ==
2070 num_8x8_blocks_high_lookup[bsize]);
2072 if (bsize == BLOCK_16X16) {
2073 set_offsets(cpi, tile, mi_row, mi_col, bsize);
2074 x->mb_energy = vp9_block_energy(cpi, x, bsize);
2077 // Determine partition types in search according to the speed features.
2078 // The threshold set here has to be of square block size.
2079 if (cpi->sf.auto_min_max_partition_size) {
2080 partition_none_allowed &= (bsize <= cpi->sf.max_partition_size &&
2081 bsize >= cpi->sf.min_partition_size);
2082 partition_horz_allowed &= ((bsize <= cpi->sf.max_partition_size &&
2083 bsize > cpi->sf.min_partition_size) ||
2085 partition_vert_allowed &= ((bsize <= cpi->sf.max_partition_size &&
2086 bsize > cpi->sf.min_partition_size) ||
2088 do_split &= bsize > cpi->sf.min_partition_size;
2090 if (cpi->sf.use_square_partition_only) {
2091 partition_horz_allowed &= force_horz_split;
2092 partition_vert_allowed &= force_vert_split;
2095 save_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
2097 if (cpi->sf.disable_split_var_thresh && partition_none_allowed) {
2098 unsigned int source_variancey;
2099 vp9_setup_src_planes(x, cpi->Source, mi_row, mi_col);
2100 source_variancey = get_sby_perpixel_variance(cpi, x, bsize);
2101 if (source_variancey < cpi->sf.disable_split_var_thresh) {
2103 if (source_variancey < cpi->sf.disable_split_var_thresh / 2)
2108 if (!x->in_active_map && (partition_horz_allowed || partition_vert_allowed))
2111 if (partition_none_allowed) {
2112 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &this_rate, &this_dist, bsize,
2114 if (this_rate != INT_MAX) {
2115 if (bsize >= BLOCK_8X8) {
2116 pl = partition_plane_context(xd, mi_row, mi_col, bsize);
2117 this_rate += x->partition_cost[pl][PARTITION_NONE];
2119 sum_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_dist);
2120 if (sum_rd < best_rd) {
2121 int64_t stop_thresh = 4096;
2122 int64_t stop_thresh_rd;
2124 best_rate = this_rate;
2125 best_dist = this_dist;
2127 if (bsize >= BLOCK_8X8)
2128 *(get_sb_partitioning(x, bsize)) = bsize;
2130 // Adjust threshold according to partition size.
2131 stop_thresh >>= 8 - (b_width_log2_lookup[bsize] +
2132 b_height_log2_lookup[bsize]);
2134 stop_thresh_rd = RDCOST(x->rdmult, x->rddiv, 0, stop_thresh);
2135 // If obtained distortion is very small, choose current partition
2136 // and stop splitting.
2137 if (!x->e_mbd.lossless && best_rd < stop_thresh_rd) {
2143 if (!x->in_active_map) {
2147 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
2150 // store estimated motion vector
2151 if (cpi->sf.adaptive_motion_search)
2152 store_pred_mv(x, ctx);
2156 // TODO(jingning): use the motion vectors given by the above search as
2157 // the starting point of motion search in the following partition type check.
2159 subsize = get_subsize(bsize, PARTITION_SPLIT);
2160 for (i = 0; i < 4 && sum_rd < best_rd; ++i) {
2161 const int x_idx = (i & 1) * mi_step;
2162 const int y_idx = (i >> 1) * mi_step;
2164 if (mi_row + y_idx >= cm->mi_rows || mi_col + x_idx >= cm->mi_cols)
2167 *get_sb_index(x, subsize) = i;
2168 if (cpi->sf.adaptive_motion_search)
2169 load_pred_mv(x, ctx);
2170 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
2171 partition_none_allowed)
2172 get_block_context(x, subsize)->pred_interp_filter =
2173 ctx->mic.mbmi.interp_filter;
2174 rd_pick_partition(cpi, tile, tp, mi_row + y_idx, mi_col + x_idx, subsize,
2175 &this_rate, &this_dist, i != 3, best_rd - sum_rd);
2177 if (this_rate == INT_MAX) {
2180 sum_rate += this_rate;
2181 sum_dist += this_dist;
2182 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
2185 if (sum_rd < best_rd && i == 4) {
2186 pl = partition_plane_context(xd, mi_row, mi_col, bsize);
2187 sum_rate += x->partition_cost[pl][PARTITION_SPLIT];
2188 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
2189 if (sum_rd < best_rd) {
2190 best_rate = sum_rate;
2191 best_dist = sum_dist;
2193 *(get_sb_partitioning(x, bsize)) = subsize;
2196 // skip rectangular partition test when larger block size
2197 // gives better rd cost
2198 if (cpi->sf.less_rectangular_check)
2199 do_rect &= !partition_none_allowed;
2201 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
2205 if (partition_horz_allowed && do_rect) {
2206 subsize = get_subsize(bsize, PARTITION_HORZ);
2207 *get_sb_index(x, subsize) = 0;
2208 if (cpi->sf.adaptive_motion_search)
2209 load_pred_mv(x, ctx);
2210 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
2211 partition_none_allowed)
2212 get_block_context(x, subsize)->pred_interp_filter =
2213 ctx->mic.mbmi.interp_filter;
2214 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &sum_rate, &sum_dist, subsize,
2215 get_block_context(x, subsize), best_rd);
2216 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
2218 if (sum_rd < best_rd && mi_row + mi_step < cm->mi_rows) {
2219 update_state(cpi, get_block_context(x, subsize), mi_row, mi_col,
2221 encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize);
2223 *get_sb_index(x, subsize) = 1;
2224 if (cpi->sf.adaptive_motion_search)
2225 load_pred_mv(x, ctx);
2226 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
2227 partition_none_allowed)
2228 get_block_context(x, subsize)->pred_interp_filter =
2229 ctx->mic.mbmi.interp_filter;
2230 rd_pick_sb_modes(cpi, tile, mi_row + mi_step, mi_col, &this_rate,
2231 &this_dist, subsize, get_block_context(x, subsize),
2233 if (this_rate == INT_MAX) {
2236 sum_rate += this_rate;
2237 sum_dist += this_dist;
2238 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
2241 if (sum_rd < best_rd) {
2242 pl = partition_plane_context(xd, mi_row, mi_col, bsize);
2243 sum_rate += x->partition_cost[pl][PARTITION_HORZ];
2244 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
2245 if (sum_rd < best_rd) {
2247 best_rate = sum_rate;
2248 best_dist = sum_dist;
2249 *(get_sb_partitioning(x, bsize)) = subsize;
2252 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
2256 if (partition_vert_allowed && do_rect) {
2257 subsize = get_subsize(bsize, PARTITION_VERT);
2259 *get_sb_index(x, subsize) = 0;
2260 if (cpi->sf.adaptive_motion_search)
2261 load_pred_mv(x, ctx);
2262 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
2263 partition_none_allowed)
2264 get_block_context(x, subsize)->pred_interp_filter =
2265 ctx->mic.mbmi.interp_filter;
2266 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &sum_rate, &sum_dist, subsize,
2267 get_block_context(x, subsize), best_rd);
2268 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
2269 if (sum_rd < best_rd && mi_col + mi_step < cm->mi_cols) {
2270 update_state(cpi, get_block_context(x, subsize), mi_row, mi_col,
2272 encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize);
2274 *get_sb_index(x, subsize) = 1;
2275 if (cpi->sf.adaptive_motion_search)
2276 load_pred_mv(x, ctx);
2277 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
2278 partition_none_allowed)
2279 get_block_context(x, subsize)->pred_interp_filter =
2280 ctx->mic.mbmi.interp_filter;
2281 rd_pick_sb_modes(cpi, tile, mi_row, mi_col + mi_step, &this_rate,
2282 &this_dist, subsize, get_block_context(x, subsize),
2284 if (this_rate == INT_MAX) {
2287 sum_rate += this_rate;
2288 sum_dist += this_dist;
2289 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
2292 if (sum_rd < best_rd) {
2293 pl = partition_plane_context(xd, mi_row, mi_col, bsize);
2294 sum_rate += x->partition_cost[pl][PARTITION_VERT];
2295 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
2296 if (sum_rd < best_rd) {
2297 best_rate = sum_rate;
2298 best_dist = sum_dist;
2300 *(get_sb_partitioning(x, bsize)) = subsize;
2303 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
2306 // TODO(jbb): This code added so that we avoid static analysis
2307 // warning related to the fact that best_rd isn't used after this
2308 // point. This code should be refactored so that the duplicate
2309 // checks occur in some sub function and thus are used...
2314 if (best_rate < INT_MAX && best_dist < INT64_MAX && do_recon) {
2315 int output_enabled = (bsize == BLOCK_64X64);
2317 // Check the projected output rate for this SB against it's target
2318 // and and if necessary apply a Q delta using segmentation to get
2319 // closer to the target.
2320 if ((cpi->oxcf.aq_mode == COMPLEXITY_AQ) && cm->seg.update_map) {
2321 select_in_frame_q_segment(cpi, mi_row, mi_col, output_enabled, best_rate);
2324 if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ)
2325 vp9_cyclic_refresh_set_rate_and_dist_sb(cpi->cyclic_refresh,
2326 best_rate, best_dist);
2328 encode_sb(cpi, tile, tp, mi_row, mi_col, output_enabled, bsize);
2330 if (bsize == BLOCK_64X64) {
2331 assert(tp_orig < *tp);
2332 assert(best_rate < INT_MAX);
2333 assert(best_dist < INT64_MAX);
2335 assert(tp_orig == *tp);
2339 static void encode_rd_sb_row(VP9_COMP *cpi, const TileInfo *const tile,
2340 int mi_row, TOKENEXTRA **tp) {
2341 VP9_COMMON *const cm = &cpi->common;
2342 MACROBLOCKD *const xd = &cpi->mb.e_mbd;
2345 // Initialize the left context for the new SB row
2346 vpx_memset(&xd->left_context, 0, sizeof(xd->left_context));
2347 vpx_memset(xd->left_seg_context, 0, sizeof(xd->left_seg_context));
2349 // Code each SB in the row
2350 for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end;
2351 mi_col += MI_BLOCK_SIZE) {
2356 MACROBLOCK *x = &cpi->mb;
2358 if (cpi->sf.adaptive_pred_interp_filter) {
2359 for (i = BLOCK_4X4; i < BLOCK_8X8; ++i) {
2360 const int num_4x4_w = num_4x4_blocks_wide_lookup[i];
2361 const int num_4x4_h = num_4x4_blocks_high_lookup[i];
2362 const int num_4x4_blk = MAX(4, num_4x4_w * num_4x4_h);
2363 for (x->sb_index = 0; x->sb_index < 4; ++x->sb_index)
2364 for (x->mb_index = 0; x->mb_index < 4; ++x->mb_index)
2365 for (x->b_index = 0; x->b_index < 16 / num_4x4_blk; ++x->b_index)
2366 get_block_context(x, i)->pred_interp_filter = SWITCHABLE;
2370 vp9_zero(cpi->mb.pred_mv);
2372 if ((cpi->sf.partition_search_type == SEARCH_PARTITION &&
2373 cpi->sf.use_lastframe_partitioning) ||
2374 cpi->sf.partition_search_type == FIXED_PARTITION ||
2375 cpi->sf.partition_search_type == VAR_BASED_FIXED_PARTITION) {
2376 const int idx_str = cm->mode_info_stride * mi_row + mi_col;
2377 MODE_INFO **mi_8x8 = cm->mi_grid_visible + idx_str;
2378 MODE_INFO **prev_mi_8x8 = cm->prev_mi_grid_visible + idx_str;
2379 cpi->mb.source_variance = UINT_MAX;
2380 if (cpi->sf.partition_search_type == FIXED_PARTITION) {
2381 set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64);
2382 set_fixed_partitioning(cpi, tile, mi_8x8, mi_row, mi_col,
2383 cpi->sf.always_this_block_size);
2384 rd_use_partition(cpi, tile, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64,
2385 &dummy_rate, &dummy_dist, 1);
2386 } else if (cpi->sf.partition_search_type == VAR_BASED_FIXED_PARTITION) {
2388 set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64);
2389 bsize = get_rd_var_based_fixed_partition(cpi, mi_row, mi_col);
2390 set_fixed_partitioning(cpi, tile, mi_8x8, mi_row, mi_col, bsize);
2391 rd_use_partition(cpi, tile, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64,
2392 &dummy_rate, &dummy_dist, 1);
2393 } else if (cpi->sf.partition_search_type == VAR_BASED_PARTITION) {
2394 choose_partitioning(cpi, tile, mi_row, mi_col);
2395 rd_use_partition(cpi, tile, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64,
2396 &dummy_rate, &dummy_dist, 1);
2398 if ((cm->current_video_frame
2399 % cpi->sf.last_partitioning_redo_frequency) == 0
2401 || cm->show_frame == 0
2402 || cm->frame_type == KEY_FRAME
2403 || cpi->rc.is_src_frame_alt_ref
2404 || ((cpi->sf.use_lastframe_partitioning ==
2405 LAST_FRAME_PARTITION_LOW_MOTION) &&
2406 sb_has_motion(cm, prev_mi_8x8))) {
2407 // If required set upper and lower partition size limits
2408 if (cpi->sf.auto_min_max_partition_size) {
2409 set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64);
2410 rd_auto_partition_range(cpi, tile, mi_row, mi_col,
2411 &cpi->sf.min_partition_size,
2412 &cpi->sf.max_partition_size);
2414 rd_pick_partition(cpi, tile, tp, mi_row, mi_col, BLOCK_64X64,
2415 &dummy_rate, &dummy_dist, 1, INT64_MAX);
2417 copy_partitioning(cm, mi_8x8, prev_mi_8x8);
2418 rd_use_partition(cpi, tile, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64,
2419 &dummy_rate, &dummy_dist, 1);
2423 // If required set upper and lower partition size limits
2424 if (cpi->sf.auto_min_max_partition_size) {
2425 set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64);
2426 rd_auto_partition_range(cpi, tile, mi_row, mi_col,
2427 &cpi->sf.min_partition_size,
2428 &cpi->sf.max_partition_size);
2430 rd_pick_partition(cpi, tile, tp, mi_row, mi_col, BLOCK_64X64,
2431 &dummy_rate, &dummy_dist, 1, INT64_MAX);
2436 static void init_encode_frame_mb_context(VP9_COMP *cpi) {
2437 MACROBLOCK *const x = &cpi->mb;
2438 VP9_COMMON *const cm = &cpi->common;
2439 MACROBLOCKD *const xd = &x->e_mbd;
2440 const int aligned_mi_cols = mi_cols_aligned_to_sb(cm->mi_cols);
2442 x->act_zbin_adj = 0;
2445 xd->mode_info_stride = cm->mode_info_stride;
2447 // Copy data over into macro block data structures.
2448 vp9_setup_src_planes(x, cpi->Source, 0, 0);
2450 // TODO(jkoleszar): are these initializations required?
2451 vp9_setup_pre_planes(xd, 0, get_ref_frame_buffer(cpi, LAST_FRAME), 0, 0,
2453 vp9_setup_dst_planes(xd, get_frame_new_buffer(cm), 0, 0);
2455 vp9_setup_block_planes(&x->e_mbd, cm->subsampling_x, cm->subsampling_y);
2457 xd->mi_8x8[0]->mbmi.mode = DC_PRED;
2458 xd->mi_8x8[0]->mbmi.uv_mode = DC_PRED;
2460 vp9_zero(cm->counts.y_mode);
2461 vp9_zero(cm->counts.uv_mode);
2462 vp9_zero(cm->counts.inter_mode);
2463 vp9_zero(cm->counts.partition);
2464 vp9_zero(cm->counts.intra_inter);
2465 vp9_zero(cm->counts.comp_inter);
2466 vp9_zero(cm->counts.single_ref);
2467 vp9_zero(cm->counts.comp_ref);
2468 vp9_zero(cm->counts.tx);
2469 vp9_zero(cm->counts.skip);
2471 // Note: this memset assumes above_context[0], [1] and [2]
2472 // are allocated as part of the same buffer.
2473 vpx_memset(xd->above_context[0], 0,
2474 sizeof(*xd->above_context[0]) *
2475 2 * aligned_mi_cols * MAX_MB_PLANE);
2476 vpx_memset(xd->above_seg_context, 0,
2477 sizeof(*xd->above_seg_context) * aligned_mi_cols);
2480 static void switch_lossless_mode(VP9_COMP *cpi, int lossless) {
2482 // printf("Switching to lossless\n");
2483 cpi->mb.fwd_txm4x4 = vp9_fwht4x4;
2484 cpi->mb.e_mbd.itxm_add = vp9_iwht4x4_add;
2485 cpi->mb.optimize = 0;
2486 cpi->common.lf.filter_level = 0;
2487 cpi->zbin_mode_boost_enabled = 0;
2488 cpi->common.tx_mode = ONLY_4X4;
2490 // printf("Not lossless\n");
2491 cpi->mb.fwd_txm4x4 = vp9_fdct4x4;
2492 cpi->mb.e_mbd.itxm_add = vp9_idct4x4_add;
2496 static int check_dual_ref_flags(VP9_COMP *cpi) {
2497 const int ref_flags = cpi->ref_frame_flags;
2499 if (vp9_segfeature_active(&cpi->common.seg, 1, SEG_LVL_REF_FRAME)) {
2502 return (!!(ref_flags & VP9_GOLD_FLAG) + !!(ref_flags & VP9_LAST_FLAG)
2503 + !!(ref_flags & VP9_ALT_FLAG)) >= 2;
2507 static int get_skip_flag(MODE_INFO **mi_8x8, int mis, int ymbs, int xmbs) {
2510 for (y = 0; y < ymbs; y++) {
2511 for (x = 0; x < xmbs; x++) {
2512 if (!mi_8x8[y * mis + x]->mbmi.skip)
2520 static void reset_skip_txfm_size(VP9_COMMON *cm, TX_SIZE txfm_max) {
2522 const int mis = cm->mode_info_stride;
2523 MODE_INFO **mi_ptr = cm->mi_grid_visible;
2525 for (mi_row = 0; mi_row < cm->mi_rows; ++mi_row, mi_ptr += mis) {
2526 for (mi_col = 0; mi_col < cm->mi_cols; ++mi_col) {
2527 if (mi_ptr[mi_col]->mbmi.tx_size > txfm_max)
2528 mi_ptr[mi_col]->mbmi.tx_size = txfm_max;
2533 static MV_REFERENCE_FRAME get_frame_type(const VP9_COMP *cpi) {
2534 if (frame_is_intra_only(&cpi->common))
2536 else if (cpi->rc.is_src_frame_alt_ref && cpi->refresh_golden_frame)
2537 return ALTREF_FRAME;
2538 else if (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)
2541 return GOLDEN_FRAME;
2544 static TX_MODE select_tx_mode(const VP9_COMP *cpi) {
2545 if (cpi->oxcf.lossless) {
2547 } else if (cpi->common.current_video_frame == 0) {
2548 return TX_MODE_SELECT;
2550 if (cpi->sf.tx_size_search_method == USE_LARGESTALL) {
2552 } else if (cpi->sf.tx_size_search_method == USE_FULL_RD) {
2553 const MV_REFERENCE_FRAME frame_type = get_frame_type(cpi);
2554 return cpi->rd_tx_select_threshes[frame_type][ALLOW_32X32] >
2555 cpi->rd_tx_select_threshes[frame_type][TX_MODE_SELECT] ?
2556 ALLOW_32X32 : TX_MODE_SELECT;
2558 unsigned int total = 0;
2560 for (i = 0; i < TX_SIZES; ++i)
2561 total += cpi->tx_stepdown_count[i];
2564 const double fraction = (double)cpi->tx_stepdown_count[0] / total;
2565 return fraction > 0.90 ? ALLOW_32X32 : TX_MODE_SELECT;
2567 return cpi->common.tx_mode;
2573 // Start RTC Exploration
2576 ZERO_PLUS_PREDICTED = 1,
2578 NEW_PLUS_NON_INTRA = 3,
2580 INTRA_PLUS_NON_INTRA = 5,
2583 } motion_vector_context;
2585 static void set_mode_info(MB_MODE_INFO *mbmi, BLOCK_SIZE bsize,
2586 MB_PREDICTION_MODE mode) {
2588 mbmi->uv_mode = mode;
2589 mbmi->mv[0].as_int = 0;
2590 mbmi->mv[1].as_int = 0;
2591 mbmi->ref_frame[0] = INTRA_FRAME;
2592 mbmi->ref_frame[1] = NONE;
2593 mbmi->tx_size = max_txsize_lookup[bsize];
2595 mbmi->sb_type = bsize;
2596 mbmi->segment_id = 0;
2599 static void nonrd_pick_sb_modes(VP9_COMP *cpi, const TileInfo *const tile,
2600 int mi_row, int mi_col,
2601 int *rate, int64_t *dist,
2603 VP9_COMMON *const cm = &cpi->common;
2604 MACROBLOCK *const x = &cpi->mb;
2605 MACROBLOCKD *const xd = &x->e_mbd;
2606 set_offsets(cpi, tile, mi_row, mi_col, bsize);
2607 xd->mi_8x8[0]->mbmi.sb_type = bsize;
2609 if (!frame_is_intra_only(cm)) {
2610 vp9_pick_inter_mode(cpi, x, tile, mi_row, mi_col,
2613 MB_PREDICTION_MODE intramode = DC_PRED;
2614 set_mode_info(&xd->mi_8x8[0]->mbmi, bsize, intramode);
2616 duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, bsize);
2619 static void fill_mode_info_sb(VP9_COMMON *cm, MACROBLOCK *x,
2620 int mi_row, int mi_col,
2621 BLOCK_SIZE bsize, BLOCK_SIZE subsize) {
2622 MACROBLOCKD *xd = &x->e_mbd;
2623 int bsl = b_width_log2(bsize), hbs = (1 << bsl) / 4;
2624 PARTITION_TYPE partition = partition_lookup[bsl][subsize];
2626 assert(bsize >= BLOCK_8X8);
2628 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
2631 switch (partition) {
2632 case PARTITION_NONE:
2633 set_modeinfo_offsets(cm, xd, mi_row, mi_col);
2634 *(xd->mi_8x8[0]) = (get_block_context(x, subsize))->mic;
2635 duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, bsize);
2637 case PARTITION_VERT:
2638 *get_sb_index(x, subsize) = 0;
2639 set_modeinfo_offsets(cm, xd, mi_row, mi_col);
2640 *(xd->mi_8x8[0]) = (get_block_context(x, subsize))->mic;
2641 duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, bsize);
2643 if (mi_col + hbs < cm->mi_cols) {
2644 *get_sb_index(x, subsize) = 1;
2645 set_modeinfo_offsets(cm, xd, mi_row, mi_col + hbs);
2646 *(xd->mi_8x8[0]) = (get_block_context(x, subsize))->mic;
2647 duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col + hbs, bsize);
2650 case PARTITION_HORZ:
2651 *get_sb_index(x, subsize) = 0;
2652 set_modeinfo_offsets(cm, xd, mi_row, mi_col);
2653 *(xd->mi_8x8[0]) = (get_block_context(x, subsize))->mic;
2654 duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, bsize);
2655 if (mi_row + hbs < cm->mi_rows) {
2656 *get_sb_index(x, subsize) = 1;
2657 set_modeinfo_offsets(cm, xd, mi_row + hbs, mi_col);
2658 *(xd->mi_8x8[0]) = (get_block_context(x, subsize))->mic;
2659 duplicate_mode_info_in_sb(cm, xd, mi_row + hbs, mi_col, bsize);
2662 case PARTITION_SPLIT:
2663 *get_sb_index(x, subsize) = 0;
2664 fill_mode_info_sb(cm, x, mi_row, mi_col, subsize,
2665 *(get_sb_partitioning(x, subsize)));
2666 *get_sb_index(x, subsize) = 1;
2667 fill_mode_info_sb(cm, x, mi_row, mi_col + hbs, subsize,
2668 *(get_sb_partitioning(x, subsize)));
2669 *get_sb_index(x, subsize) = 2;
2670 fill_mode_info_sb(cm, x, mi_row + hbs, mi_col, subsize,
2671 *(get_sb_partitioning(x, subsize)));
2672 *get_sb_index(x, subsize) = 3;
2673 fill_mode_info_sb(cm, x, mi_row + hbs, mi_col + hbs, subsize,
2674 *(get_sb_partitioning(x, subsize)));
2681 static void nonrd_pick_partition(VP9_COMP *cpi, const TileInfo *const tile,
2682 TOKENEXTRA **tp, int mi_row,
2683 int mi_col, BLOCK_SIZE bsize, int *rate,
2684 int64_t *dist, int do_recon, int64_t best_rd) {
2685 VP9_COMMON *const cm = &cpi->common;
2686 MACROBLOCK *const x = &cpi->mb;
2687 MACROBLOCKD *const xd = &x->e_mbd;
2688 const int ms = num_8x8_blocks_wide_lookup[bsize] / 2;
2689 TOKENEXTRA *tp_orig = *tp;
2690 PICK_MODE_CONTEXT *ctx = get_block_context(x, bsize);
2693 int this_rate, sum_rate = 0, best_rate = INT_MAX;
2694 int64_t this_dist, sum_dist = 0, best_dist = INT64_MAX;
2696 int do_split = bsize >= BLOCK_8X8;
2698 // Override skipping rectangular partition operations for edge blocks
2699 const int force_horz_split = (mi_row + ms >= cm->mi_rows);
2700 const int force_vert_split = (mi_col + ms >= cm->mi_cols);
2701 const int xss = x->e_mbd.plane[1].subsampling_x;
2702 const int yss = x->e_mbd.plane[1].subsampling_y;
2704 int partition_none_allowed = !force_horz_split && !force_vert_split;
2705 int partition_horz_allowed = !force_vert_split && yss <= xss &&
2707 int partition_vert_allowed = !force_horz_split && xss <= yss &&
2711 if (bsize < BLOCK_8X8) {
2712 // When ab_index = 0 all sub-blocks are handled, so for ab_index != 0
2713 // there is nothing to be done.
2714 if (x->ab_index != 0) {
2721 assert(num_8x8_blocks_wide_lookup[bsize] ==
2722 num_8x8_blocks_high_lookup[bsize]);
2724 // Determine partition types in search according to the speed features.
2725 // The threshold set here has to be of square block size.
2726 if (cpi->sf.auto_min_max_partition_size) {
2727 partition_none_allowed &= (bsize <= cpi->sf.max_partition_size &&
2728 bsize >= cpi->sf.min_partition_size);
2729 partition_horz_allowed &= ((bsize <= cpi->sf.max_partition_size &&
2730 bsize > cpi->sf.min_partition_size) ||
2732 partition_vert_allowed &= ((bsize <= cpi->sf.max_partition_size &&
2733 bsize > cpi->sf.min_partition_size) ||
2735 do_split &= bsize > cpi->sf.min_partition_size;
2737 if (cpi->sf.use_square_partition_only) {
2738 partition_horz_allowed &= force_horz_split;
2739 partition_vert_allowed &= force_vert_split;
2742 if (!x->in_active_map && (partition_horz_allowed || partition_vert_allowed))
2746 if (partition_none_allowed) {
2747 nonrd_pick_sb_modes(cpi, tile, mi_row, mi_col,
2748 &this_rate, &this_dist, bsize);
2749 ctx->mic.mbmi = xd->mi_8x8[0]->mbmi;
2751 if (this_rate != INT_MAX) {
2752 int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
2753 this_rate += x->partition_cost[pl][PARTITION_NONE];
2754 sum_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_dist);
2755 if (sum_rd < best_rd) {
2756 int64_t stop_thresh = 4096;
2757 int64_t stop_thresh_rd;
2759 best_rate = this_rate;
2760 best_dist = this_dist;
2762 if (bsize >= BLOCK_8X8)
2763 *(get_sb_partitioning(x, bsize)) = bsize;
2765 // Adjust threshold according to partition size.
2766 stop_thresh >>= 8 - (b_width_log2_lookup[bsize] +
2767 b_height_log2_lookup[bsize]);
2769 stop_thresh_rd = RDCOST(x->rdmult, x->rddiv, 0, stop_thresh);
2770 // If obtained distortion is very small, choose current partition
2771 // and stop splitting.
2772 if (!x->e_mbd.lossless && best_rd < stop_thresh_rd) {
2778 if (!x->in_active_map) {
2784 // store estimated motion vector
2785 store_pred_mv(x, ctx);
2790 int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
2791 sum_rate += x->partition_cost[pl][PARTITION_SPLIT];
2792 subsize = get_subsize(bsize, PARTITION_SPLIT);
2793 for (i = 0; i < 4 && sum_rd < best_rd; ++i) {
2794 const int x_idx = (i & 1) * ms;
2795 const int y_idx = (i >> 1) * ms;
2797 if (mi_row + y_idx >= cm->mi_rows || mi_col + x_idx >= cm->mi_cols)
2800 *get_sb_index(x, subsize) = i;
2801 load_pred_mv(x, ctx);
2803 nonrd_pick_partition(cpi, tile, tp, mi_row + y_idx, mi_col + x_idx,
2804 subsize, &this_rate, &this_dist, 0,
2807 if (this_rate == INT_MAX) {
2810 sum_rate += this_rate;
2811 sum_dist += this_dist;
2812 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
2816 if (sum_rd < best_rd) {
2817 best_rate = sum_rate;
2818 best_dist = sum_dist;
2820 *(get_sb_partitioning(x, bsize)) = subsize;
2822 // skip rectangular partition test when larger block size
2823 // gives better rd cost
2824 if (cpi->sf.less_rectangular_check)
2825 do_rect &= !partition_none_allowed;
2830 if (partition_horz_allowed && do_rect) {
2831 subsize = get_subsize(bsize, PARTITION_HORZ);
2832 *get_sb_index(x, subsize) = 0;
2833 if (cpi->sf.adaptive_motion_search)
2834 load_pred_mv(x, ctx);
2836 nonrd_pick_sb_modes(cpi, tile, mi_row, mi_col,
2837 &this_rate, &this_dist, subsize);
2839 (get_block_context(x, subsize))->mic.mbmi = xd->mi_8x8[0]->mbmi;
2841 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
2843 if (sum_rd < best_rd && mi_row + ms < cm->mi_rows) {
2844 *get_sb_index(x, subsize) = 1;
2846 load_pred_mv(x, ctx);
2848 nonrd_pick_sb_modes(cpi, tile, mi_row + ms, mi_col,
2849 &this_rate, &this_dist, subsize);
2851 (get_block_context(x, subsize))->mic.mbmi = xd->mi_8x8[0]->mbmi;
2853 if (this_rate == INT_MAX) {
2856 int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
2857 this_rate += x->partition_cost[pl][PARTITION_HORZ];
2858 sum_rate += this_rate;
2859 sum_dist += this_dist;
2860 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
2863 if (sum_rd < best_rd) {
2865 best_rate = sum_rate;
2866 best_dist = sum_dist;
2867 *(get_sb_partitioning(x, bsize)) = subsize;
2872 if (partition_vert_allowed && do_rect) {
2873 subsize = get_subsize(bsize, PARTITION_VERT);
2875 *get_sb_index(x, subsize) = 0;
2876 if (cpi->sf.adaptive_motion_search)
2877 load_pred_mv(x, ctx);
2879 nonrd_pick_sb_modes(cpi, tile, mi_row, mi_col,
2880 &this_rate, &this_dist, subsize);
2881 (get_block_context(x, subsize))->mic.mbmi = xd->mi_8x8[0]->mbmi;
2882 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
2883 if (sum_rd < best_rd && mi_col + ms < cm->mi_cols) {
2884 *get_sb_index(x, subsize) = 1;
2886 load_pred_mv(x, ctx);
2888 nonrd_pick_sb_modes(cpi, tile, mi_row, mi_col + ms,
2889 &this_rate, &this_dist, subsize);
2891 (get_block_context(x, subsize))->mic.mbmi = xd->mi_8x8[0]->mbmi;
2893 if (this_rate == INT_MAX) {
2896 int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
2897 this_rate += x->partition_cost[pl][PARTITION_VERT];
2898 sum_rate += this_rate;
2899 sum_dist += this_dist;
2900 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
2903 if (sum_rd < best_rd) {
2904 best_rate = sum_rate;
2905 best_dist = sum_dist;
2907 *(get_sb_partitioning(x, bsize)) = subsize;
2914 if (best_rate == INT_MAX)
2917 // update mode info array
2918 fill_mode_info_sb(cm, x, mi_row, mi_col, bsize,
2919 *(get_sb_partitioning(x, bsize)));
2921 if (best_rate < INT_MAX && best_dist < INT64_MAX && do_recon) {
2922 int output_enabled = (bsize == BLOCK_64X64);
2924 // Check the projected output rate for this SB against it's target
2925 // and and if necessary apply a Q delta using segmentation to get
2926 // closer to the target.
2927 if ((cpi->oxcf.aq_mode == COMPLEXITY_AQ) && cm->seg.update_map) {
2928 select_in_frame_q_segment(cpi, mi_row, mi_col, output_enabled, best_rate);
2931 if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ)
2932 vp9_cyclic_refresh_set_rate_and_dist_sb(cpi->cyclic_refresh,
2933 best_rate, best_dist);
2935 encode_sb_rt(cpi, tile, tp, mi_row, mi_col, output_enabled, bsize);
2938 if (bsize == BLOCK_64X64) {
2939 assert(tp_orig < *tp);
2940 assert(best_rate < INT_MAX);
2941 assert(best_dist < INT64_MAX);
2943 assert(tp_orig == *tp);
2947 static void nonrd_use_partition(VP9_COMP *cpi,
2948 const TileInfo *const tile,
2951 int mi_row, int mi_col,
2952 BLOCK_SIZE bsize, int output_enabled,
2953 int *totrate, int64_t *totdist) {
2954 VP9_COMMON *const cm = &cpi->common;
2955 MACROBLOCK *const x = &cpi->mb;
2956 MACROBLOCKD *const xd = &x->e_mbd;
2957 const int bsl = b_width_log2(bsize), hbs = (1 << bsl) / 4;
2958 const int mis = cm->mode_info_stride;
2959 PARTITION_TYPE partition;
2962 int64_t dist = INT64_MAX;
2964 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
2967 if (bsize >= BLOCK_8X8) {
2968 subsize = mi_8x8[0]->mbmi.sb_type;
2970 subsize = BLOCK_4X4;
2973 partition = partition_lookup[bsl][subsize];
2975 switch (partition) {
2976 case PARTITION_NONE:
2977 nonrd_pick_sb_modes(cpi, tile, mi_row, mi_col, totrate, totdist, subsize);
2978 (get_block_context(x, subsize))->mic.mbmi = xd->mi_8x8[0]->mbmi;
2980 case PARTITION_VERT:
2981 *get_sb_index(x, subsize) = 0;
2982 nonrd_pick_sb_modes(cpi, tile, mi_row, mi_col, totrate, totdist, subsize);
2983 (get_block_context(x, subsize))->mic.mbmi = xd->mi_8x8[0]->mbmi;
2984 if (mi_col + hbs < cm->mi_cols) {
2985 *get_sb_index(x, subsize) = 1;
2986 nonrd_pick_sb_modes(cpi, tile, mi_row, mi_col + hbs,
2987 &rate, &dist, subsize);
2988 (get_block_context(x, subsize))->mic.mbmi = xd->mi_8x8[0]->mbmi;
2989 if (rate != INT_MAX && dist != INT64_MAX &&
2990 *totrate != INT_MAX && *totdist != INT64_MAX) {
2996 case PARTITION_HORZ:
2997 *get_sb_index(x, subsize) = 0;
2998 nonrd_pick_sb_modes(cpi, tile, mi_row, mi_col, totrate, totdist, subsize);
2999 (get_block_context(x, subsize))->mic.mbmi = xd->mi_8x8[0]->mbmi;
3000 if (mi_row + hbs < cm->mi_rows) {
3001 *get_sb_index(x, subsize) = 1;
3002 nonrd_pick_sb_modes(cpi, tile, mi_row + hbs, mi_col,
3003 &rate, &dist, subsize);
3004 (get_block_context(x, subsize))->mic.mbmi = xd->mi_8x8[0]->mbmi;
3005 if (rate != INT_MAX && dist != INT64_MAX &&
3006 *totrate != INT_MAX && *totdist != INT64_MAX) {
3012 case PARTITION_SPLIT:
3013 subsize = get_subsize(bsize, PARTITION_SPLIT);
3014 *get_sb_index(x, subsize) = 0;
3015 nonrd_use_partition(cpi, tile, mi_8x8, tp, mi_row, mi_col,
3016 subsize, output_enabled, totrate, totdist);
3017 *get_sb_index(x, subsize) = 1;
3018 nonrd_use_partition(cpi, tile, mi_8x8 + hbs, tp,
3019 mi_row, mi_col + hbs, subsize, output_enabled,
3021 if (rate != INT_MAX && dist != INT64_MAX &&
3022 *totrate != INT_MAX && *totdist != INT64_MAX) {
3026 *get_sb_index(x, subsize) = 2;
3027 nonrd_use_partition(cpi, tile, mi_8x8 + hbs * mis, tp,
3028 mi_row + hbs, mi_col, subsize, output_enabled,
3030 if (rate != INT_MAX && dist != INT64_MAX &&
3031 *totrate != INT_MAX && *totdist != INT64_MAX) {
3035 *get_sb_index(x, subsize) = 3;
3036 nonrd_use_partition(cpi, tile, mi_8x8 + hbs * mis + hbs, tp,
3037 mi_row + hbs, mi_col + hbs, subsize, output_enabled,
3039 if (rate != INT_MAX && dist != INT64_MAX &&
3040 *totrate != INT_MAX && *totdist != INT64_MAX) {
3046 assert("Invalid partition type.");
3049 if (bsize == BLOCK_64X64 && output_enabled) {
3050 if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ)
3051 vp9_cyclic_refresh_set_rate_and_dist_sb(cpi->cyclic_refresh,
3052 *totrate, *totdist);
3053 encode_sb_rt(cpi, tile, tp, mi_row, mi_col, 1, bsize);
3057 static void encode_nonrd_sb_row(VP9_COMP *cpi, const TileInfo *const tile,
3058 int mi_row, TOKENEXTRA **tp) {
3059 VP9_COMMON *cm = &cpi->common;
3060 MACROBLOCKD *xd = &cpi->mb.e_mbd;
3063 // Initialize the left context for the new SB row
3064 vpx_memset(&xd->left_context, 0, sizeof(xd->left_context));
3065 vpx_memset(xd->left_seg_context, 0, sizeof(xd->left_seg_context));
3067 // Code each SB in the row
3068 for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end;
3069 mi_col += MI_BLOCK_SIZE) {
3071 int64_t dummy_dist = 0;
3072 const int idx_str = cm->mode_info_stride * mi_row + mi_col;
3073 MODE_INFO **mi_8x8 = cm->mi_grid_visible + idx_str;
3074 MODE_INFO **prev_mi_8x8 = cm->prev_mi_grid_visible + idx_str;
3076 BLOCK_SIZE bsize = cpi->sf.partition_search_type == FIXED_PARTITION ?
3077 cpi->sf.always_this_block_size :
3078 get_nonrd_var_based_fixed_partition(cpi, mi_row, mi_col);
3080 cpi->mb.source_variance = UINT_MAX;
3081 vp9_zero(cpi->mb.pred_mv);
3083 // Set the partition type of the 64X64 block
3084 switch (cpi->sf.partition_search_type) {
3085 case VAR_BASED_PARTITION:
3086 choose_partitioning(cpi, tile, mi_row, mi_col);
3087 nonrd_use_partition(cpi, tile, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64,
3088 1, &dummy_rate, &dummy_dist);
3090 case VAR_BASED_FIXED_PARTITION:
3091 case FIXED_PARTITION:
3092 set_fixed_partitioning(cpi, tile, mi_8x8, mi_row, mi_col, bsize);
3093 nonrd_use_partition(cpi, tile, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64,
3094 1, &dummy_rate, &dummy_dist);
3096 case REFERENCE_PARTITION:
3097 if (cpi->sf.partition_check || sb_has_motion(cm, prev_mi_8x8)) {
3098 nonrd_pick_partition(cpi, tile, tp, mi_row, mi_col, BLOCK_64X64,
3099 &dummy_rate, &dummy_dist, 1, INT64_MAX);
3101 copy_partitioning(cm, mi_8x8, prev_mi_8x8);
3102 nonrd_use_partition(cpi, tile, mi_8x8, tp, mi_row, mi_col,
3103 BLOCK_64X64, 1, &dummy_rate, &dummy_dist);
3111 // end RTC play code
3113 static void encode_frame_internal(VP9_COMP *cpi) {
3115 MACROBLOCK *const x = &cpi->mb;
3116 VP9_COMMON *const cm = &cpi->common;
3117 MACROBLOCKD *const xd = &x->e_mbd;
3119 // fprintf(stderr, "encode_frame_internal frame %d (%d) type %d\n",
3120 // cpi->common.current_video_frame, cpi->common.show_frame,
3123 vp9_zero(cm->counts.switchable_interp);
3124 vp9_zero(cpi->tx_stepdown_count);
3126 xd->mi_8x8 = cm->mi_grid_visible;
3127 // required for vp9_frame_init_quantizer
3128 xd->mi_8x8[0] = cm->mi;
3130 vp9_zero(cm->counts.mv);
3131 vp9_zero(cpi->coef_counts);
3132 vp9_zero(cm->counts.eob_branch);
3134 // Set frame level transform size use case
3135 cm->tx_mode = select_tx_mode(cpi);
3137 cpi->mb.e_mbd.lossless = cm->base_qindex == 0 && cm->y_dc_delta_q == 0
3138 && cm->uv_dc_delta_q == 0 && cm->uv_ac_delta_q == 0;
3139 switch_lossless_mode(cpi, cpi->mb.e_mbd.lossless);
3141 vp9_frame_init_quantizer(cpi);
3143 vp9_initialize_rd_consts(cpi);
3144 vp9_initialize_me_consts(cpi, cm->base_qindex);
3146 if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
3147 // Initialize encode frame context.
3148 init_encode_frame_mb_context(cpi);
3150 // Build a frame level activity map
3151 build_activity_map(cpi);
3154 // Re-initialize encode frame context.
3155 init_encode_frame_mb_context(cpi);
3157 vp9_zero(cpi->rd_comp_pred_diff);
3158 vp9_zero(cpi->rd_filter_diff);
3159 vp9_zero(cpi->rd_tx_select_diff);
3160 vp9_zero(cpi->rd_tx_select_threshes);
3164 if (cpi->sf.use_nonrd_pick_mode) {
3165 // Initialize internal buffer pointers for rtc coding, where non-RD
3166 // mode decision is used and hence no buffer pointer swap needed.
3168 struct macroblock_plane *const p = x->plane;
3169 struct macroblockd_plane *const pd = xd->plane;
3170 PICK_MODE_CONTEXT *ctx = &cpi->mb.sb64_context;
3172 for (i = 0; i < MAX_MB_PLANE; ++i) {
3173 p[i].coeff = ctx->coeff_pbuf[i][0];
3174 p[i].qcoeff = ctx->qcoeff_pbuf[i][0];
3175 pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][0];
3176 p[i].eobs = ctx->eobs_pbuf[i][0];
3178 vp9_zero(x->zcoeff_blk);
3182 struct vpx_usec_timer emr_timer;
3183 vpx_usec_timer_start(&emr_timer);
3186 // Take tiles into account and give start/end MB
3187 int tile_col, tile_row;
3188 TOKENEXTRA *tp = cpi->tok;
3189 const int tile_cols = 1 << cm->log2_tile_cols;
3190 const int tile_rows = 1 << cm->log2_tile_rows;
3192 for (tile_row = 0; tile_row < tile_rows; tile_row++) {
3193 for (tile_col = 0; tile_col < tile_cols; tile_col++) {
3195 TOKENEXTRA *tp_old = tp;
3197 // For each row of SBs in the frame
3198 vp9_tile_init(&tile, cm, tile_row, tile_col);
3199 for (mi_row = tile.mi_row_start;
3200 mi_row < tile.mi_row_end; mi_row += MI_BLOCK_SIZE) {
3201 if (cpi->sf.use_nonrd_pick_mode && cm->frame_type != KEY_FRAME)
3202 encode_nonrd_sb_row(cpi, &tile, mi_row, &tp);
3204 encode_rd_sb_row(cpi, &tile, mi_row, &tp);
3206 cpi->tok_count[tile_row][tile_col] = (unsigned int)(tp - tp_old);
3207 assert(tp - cpi->tok <= get_token_alloc(cm->mb_rows, cm->mb_cols));
3212 vpx_usec_timer_mark(&emr_timer);
3213 cpi->time_encode_sb_row += vpx_usec_timer_elapsed(&emr_timer);
3216 if (cpi->sf.skip_encode_sb) {
3218 unsigned int intra_count = 0, inter_count = 0;
3219 for (j = 0; j < INTRA_INTER_CONTEXTS; ++j) {
3220 intra_count += cm->counts.intra_inter[j][0];
3221 inter_count += cm->counts.intra_inter[j][1];
3223 cpi->sf.skip_encode_frame = (intra_count << 2) < inter_count &&
3224 cm->frame_type != KEY_FRAME &&
3227 cpi->sf.skip_encode_frame = 0;
3231 // Keep record of the total distortion this time around for future use
3232 cpi->last_frame_distortion = cpi->frame_distortion;
3236 void vp9_encode_frame(VP9_COMP *cpi) {
3237 VP9_COMMON *const cm = &cpi->common;
3239 // In the longer term the encoder should be generalized to match the
3240 // decoder such that we allow compound where one of the 3 buffers has a
3241 // different sign bias and that buffer is then the fixed ref. However, this
3242 // requires further work in the rd loop. For now the only supported encoder
3243 // side behavior is where the ALT ref buffer has opposite sign bias to
3245 if (!frame_is_intra_only(cm)) {
3246 if ((cm->ref_frame_sign_bias[ALTREF_FRAME] ==
3247 cm->ref_frame_sign_bias[GOLDEN_FRAME]) ||
3248 (cm->ref_frame_sign_bias[ALTREF_FRAME] ==
3249 cm->ref_frame_sign_bias[LAST_FRAME])) {
3250 cm->allow_comp_inter_inter = 0;
3252 cm->allow_comp_inter_inter = 1;
3253 cm->comp_fixed_ref = ALTREF_FRAME;
3254 cm->comp_var_ref[0] = LAST_FRAME;
3255 cm->comp_var_ref[1] = GOLDEN_FRAME;
3259 if (cpi->sf.frame_parameter_update) {
3261 REFERENCE_MODE reference_mode;
3263 * This code does a single RD pass over the whole frame assuming
3264 * either compound, single or hybrid prediction as per whatever has
3265 * worked best for that type of frame in the past.
3266 * It also predicts whether another coding mode would have worked
3267 * better that this coding mode. If that is the case, it remembers
3268 * that for subsequent frames.
3269 * It does the same analysis for transform size selection also.
3271 const MV_REFERENCE_FRAME frame_type = get_frame_type(cpi);
3272 const int64_t *mode_thresh = cpi->rd_prediction_type_threshes[frame_type];
3273 const int64_t *filter_thresh = cpi->rd_filter_threshes[frame_type];
3275 /* prediction (compound, single or hybrid) mode selection */
3276 if (frame_type == 3 || !cm->allow_comp_inter_inter)
3277 reference_mode = SINGLE_REFERENCE;
3278 else if (mode_thresh[COMPOUND_REFERENCE] > mode_thresh[SINGLE_REFERENCE] &&
3279 mode_thresh[COMPOUND_REFERENCE] >
3280 mode_thresh[REFERENCE_MODE_SELECT] &&
3281 check_dual_ref_flags(cpi) &&
3282 cpi->static_mb_pct == 100)
3283 reference_mode = COMPOUND_REFERENCE;
3284 else if (mode_thresh[SINGLE_REFERENCE] > mode_thresh[REFERENCE_MODE_SELECT])
3285 reference_mode = SINGLE_REFERENCE;
3287 reference_mode = REFERENCE_MODE_SELECT;
3289 if (cm->interp_filter == SWITCHABLE) {
3290 if (frame_type != ALTREF_FRAME &&
3291 filter_thresh[EIGHTTAP_SMOOTH] > filter_thresh[EIGHTTAP] &&
3292 filter_thresh[EIGHTTAP_SMOOTH] > filter_thresh[EIGHTTAP_SHARP] &&
3293 filter_thresh[EIGHTTAP_SMOOTH] > filter_thresh[SWITCHABLE - 1]) {
3294 cm->interp_filter = EIGHTTAP_SMOOTH;
3295 } else if (filter_thresh[EIGHTTAP_SHARP] > filter_thresh[EIGHTTAP] &&
3296 filter_thresh[EIGHTTAP_SHARP] > filter_thresh[SWITCHABLE - 1]) {
3297 cm->interp_filter = EIGHTTAP_SHARP;
3298 } else if (filter_thresh[EIGHTTAP] > filter_thresh[SWITCHABLE - 1]) {
3299 cm->interp_filter = EIGHTTAP;
3303 cpi->mb.e_mbd.lossless = cpi->oxcf.lossless;
3304 cm->reference_mode = reference_mode;
3306 encode_frame_internal(cpi);
3308 for (i = 0; i < REFERENCE_MODES; ++i) {
3309 const int diff = (int) (cpi->rd_comp_pred_diff[i] / cm->MBs);
3310 cpi->rd_prediction_type_threshes[frame_type][i] += diff;
3311 cpi->rd_prediction_type_threshes[frame_type][i] >>= 1;
3314 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) {
3315 const int64_t diff = cpi->rd_filter_diff[i] / cm->MBs;
3316 cpi->rd_filter_threshes[frame_type][i] =
3317 (cpi->rd_filter_threshes[frame_type][i] + diff) / 2;
3320 for (i = 0; i < TX_MODES; ++i) {
3321 int64_t pd = cpi->rd_tx_select_diff[i];
3323 if (i == TX_MODE_SELECT)
3324 pd -= RDCOST(cpi->mb.rdmult, cpi->mb.rddiv, 2048 * (TX_SIZES - 1), 0);
3325 diff = (int) (pd / cm->MBs);
3326 cpi->rd_tx_select_threshes[frame_type][i] += diff;
3327 cpi->rd_tx_select_threshes[frame_type][i] /= 2;
3330 if (cm->reference_mode == REFERENCE_MODE_SELECT) {
3331 int single_count_zero = 0;
3332 int comp_count_zero = 0;
3334 for (i = 0; i < COMP_INTER_CONTEXTS; i++) {
3335 single_count_zero += cm->counts.comp_inter[i][0];
3336 comp_count_zero += cm->counts.comp_inter[i][1];
3339 if (comp_count_zero == 0) {
3340 cm->reference_mode = SINGLE_REFERENCE;
3341 vp9_zero(cm->counts.comp_inter);
3342 } else if (single_count_zero == 0) {
3343 cm->reference_mode = COMPOUND_REFERENCE;
3344 vp9_zero(cm->counts.comp_inter);
3348 if (cm->tx_mode == TX_MODE_SELECT) {
3350 int count8x8_lp = 0, count8x8_8x8p = 0;
3351 int count16x16_16x16p = 0, count16x16_lp = 0;
3354 for (i = 0; i < TX_SIZE_CONTEXTS; ++i) {
3355 count4x4 += cm->counts.tx.p32x32[i][TX_4X4];
3356 count4x4 += cm->counts.tx.p16x16[i][TX_4X4];
3357 count4x4 += cm->counts.tx.p8x8[i][TX_4X4];
3359 count8x8_lp += cm->counts.tx.p32x32[i][TX_8X8];
3360 count8x8_lp += cm->counts.tx.p16x16[i][TX_8X8];
3361 count8x8_8x8p += cm->counts.tx.p8x8[i][TX_8X8];
3363 count16x16_16x16p += cm->counts.tx.p16x16[i][TX_16X16];
3364 count16x16_lp += cm->counts.tx.p32x32[i][TX_16X16];
3365 count32x32 += cm->counts.tx.p32x32[i][TX_32X32];
3368 if (count4x4 == 0 && count16x16_lp == 0 && count16x16_16x16p == 0 &&
3370 cm->tx_mode = ALLOW_8X8;
3371 reset_skip_txfm_size(cm, TX_8X8);
3372 } else if (count8x8_8x8p == 0 && count16x16_16x16p == 0 &&
3373 count8x8_lp == 0 && count16x16_lp == 0 && count32x32 == 0) {
3374 cm->tx_mode = ONLY_4X4;
3375 reset_skip_txfm_size(cm, TX_4X4);
3376 } else if (count8x8_lp == 0 && count16x16_lp == 0 && count4x4 == 0) {
3377 cm->tx_mode = ALLOW_32X32;
3378 } else if (count32x32 == 0 && count8x8_lp == 0 && count4x4 == 0) {
3379 cm->tx_mode = ALLOW_16X16;
3380 reset_skip_txfm_size(cm, TX_16X16);
3384 cpi->mb.e_mbd.lossless = cpi->oxcf.lossless;
3385 cm->reference_mode = SINGLE_REFERENCE;
3386 // Force the usage of the BILINEAR interp_filter.
3387 cm->interp_filter = BILINEAR;
3388 encode_frame_internal(cpi);
3392 static void sum_intra_stats(FRAME_COUNTS *counts, const MODE_INFO *mi) {
3393 const MB_PREDICTION_MODE y_mode = mi->mbmi.mode;
3394 const MB_PREDICTION_MODE uv_mode = mi->mbmi.uv_mode;
3395 const BLOCK_SIZE bsize = mi->mbmi.sb_type;
3397 if (bsize < BLOCK_8X8) {
3399 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
3400 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
3401 for (idy = 0; idy < 2; idy += num_4x4_h)
3402 for (idx = 0; idx < 2; idx += num_4x4_w)
3403 ++counts->y_mode[0][mi->bmi[idy * 2 + idx].as_mode];
3405 ++counts->y_mode[size_group_lookup[bsize]][y_mode];
3408 ++counts->uv_mode[y_mode][uv_mode];
3411 // Experimental stub function to create a per MB zbin adjustment based on
3412 // some previously calculated measure of MB activity.
3413 static void adjust_act_zbin(VP9_COMP *cpi, MACROBLOCK *x) {
3415 x->act_zbin_adj = *(x->mb_activity_ptr);
3417 // Apply the masking to the RD multiplier.
3418 const int64_t act = *(x->mb_activity_ptr);
3419 const int64_t a = act + 4 * cpi->activity_avg;
3420 const int64_t b = 4 * act + cpi->activity_avg;
3422 if (act > cpi->activity_avg)
3423 x->act_zbin_adj = (int) (((int64_t) b + (a >> 1)) / a) - 1;
3425 x->act_zbin_adj = 1 - (int) (((int64_t) a + (b >> 1)) / b);
3429 static int get_zbin_mode_boost(const MB_MODE_INFO *mbmi, int enabled) {
3431 if (is_inter_block(mbmi)) {
3432 if (mbmi->mode == ZEROMV) {
3433 return mbmi->ref_frame[0] != LAST_FRAME ? GF_ZEROMV_ZBIN_BOOST
3434 : LF_ZEROMV_ZBIN_BOOST;
3436 return mbmi->sb_type < BLOCK_8X8 ? SPLIT_MV_ZBIN_BOOST
3440 return INTRA_ZBIN_BOOST;
3447 static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, int output_enabled,
3448 int mi_row, int mi_col, BLOCK_SIZE bsize) {
3449 VP9_COMMON *const cm = &cpi->common;
3450 MACROBLOCK *const x = &cpi->mb;
3451 MACROBLOCKD *const xd = &x->e_mbd;
3452 MODE_INFO **mi_8x8 = xd->mi_8x8;
3453 MODE_INFO *mi = mi_8x8[0];
3454 MB_MODE_INFO *mbmi = &mi->mbmi;
3455 PICK_MODE_CONTEXT *ctx = get_block_context(x, bsize);
3456 unsigned int segment_id = mbmi->segment_id;
3457 const int mis = cm->mode_info_stride;
3458 const int mi_width = num_8x8_blocks_wide_lookup[bsize];
3459 const int mi_height = num_8x8_blocks_high_lookup[bsize];
3461 x->skip_recode = !x->select_txfm_size && mbmi->sb_type >= BLOCK_8X8 &&
3462 cpi->oxcf.aq_mode != COMPLEXITY_AQ &&
3463 cpi->oxcf.aq_mode != CYCLIC_REFRESH_AQ &&
3464 cpi->sf.allow_skip_recode;
3466 x->skip_optimize = ctx->is_coded;
3468 x->use_lp32x32fdct = cpi->sf.use_lp32x32fdct;
3469 x->skip_encode = (!output_enabled && cpi->sf.skip_encode_frame &&
3470 x->q_index < QIDX_SKIP_THRESH);
3475 if (cm->frame_type == KEY_FRAME) {
3476 if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
3477 adjust_act_zbin(cpi, x);
3478 vp9_update_zbin_extra(cpi, x);
3481 set_ref_ptrs(cm, xd, mbmi->ref_frame[0], mbmi->ref_frame[1]);
3482 xd->interp_kernel = vp9_get_interp_kernel(mbmi->interp_filter);
3484 if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
3485 // Adjust the zbin based on this MB rate.
3486 adjust_act_zbin(cpi, x);
3489 // Experimental code. Special case for gf and arf zeromv modes.
3490 // Increase zbin size to suppress noise
3491 cpi->zbin_mode_boost = get_zbin_mode_boost(mbmi,
3492 cpi->zbin_mode_boost_enabled);
3493 vp9_update_zbin_extra(cpi, x);
3496 if (!is_inter_block(mbmi)) {
3499 for (plane = 0; plane < MAX_MB_PLANE; ++plane)
3500 vp9_encode_intra_block_plane(x, MAX(bsize, BLOCK_8X8), plane);
3502 sum_intra_stats(&cm->counts, mi);
3503 vp9_tokenize_sb(cpi, t, !output_enabled, MAX(bsize, BLOCK_8X8));
3506 const int is_compound = has_second_ref(mbmi);
3507 for (ref = 0; ref < 1 + is_compound; ++ref) {
3508 YV12_BUFFER_CONFIG *cfg = get_ref_frame_buffer(cpi,
3509 mbmi->ref_frame[ref]);
3510 vp9_setup_pre_planes(xd, ref, cfg, mi_row, mi_col,
3511 &xd->block_refs[ref]->sf);
3513 vp9_build_inter_predictors_sb(xd, mi_row, mi_col, MAX(bsize, BLOCK_8X8));
3517 vp9_encode_sb(x, MAX(bsize, BLOCK_8X8));
3518 vp9_tokenize_sb(cpi, t, !output_enabled, MAX(bsize, BLOCK_8X8));
3522 cm->counts.skip[vp9_get_skip_context(xd)][1]++;
3523 reset_skip_context(xd, MAX(bsize, BLOCK_8X8));
3527 if (output_enabled) {
3528 if (cm->tx_mode == TX_MODE_SELECT &&
3529 mbmi->sb_type >= BLOCK_8X8 &&
3530 !(is_inter_block(mbmi) &&
3532 vp9_segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)))) {
3533 ++get_tx_counts(max_txsize_lookup[bsize], vp9_get_tx_size_context(xd),
3534 &cm->counts.tx)[mbmi->tx_size];
3538 // The new intra coding scheme requires no change of transform size
3539 if (is_inter_block(&mi->mbmi)) {
3540 tx_size = MIN(tx_mode_to_biggest_tx_size[cm->tx_mode],
3541 max_txsize_lookup[bsize]);
3543 tx_size = (bsize >= BLOCK_8X8) ? mbmi->tx_size : TX_4X4;
3546 for (y = 0; y < mi_height; y++)
3547 for (x = 0; x < mi_width; x++)
3548 if (mi_col + x < cm->mi_cols && mi_row + y < cm->mi_rows)
3549 mi_8x8[mis * y + x]->mbmi.tx_size = tx_size;