2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
15 #include "./vp9_rtcd.h"
16 #include "./vpx_config.h"
18 #include "vpx_ports/vpx_timer.h"
20 #include "vp9/common/vp9_common.h"
21 #include "vp9/common/vp9_entropy.h"
22 #include "vp9/common/vp9_entropymode.h"
23 #include "vp9/common/vp9_idct.h"
24 #include "vp9/common/vp9_mvref_common.h"
25 #include "vp9/common/vp9_pred_common.h"
26 #include "vp9/common/vp9_quant_common.h"
27 #include "vp9/common/vp9_reconintra.h"
28 #include "vp9/common/vp9_reconinter.h"
29 #include "vp9/common/vp9_seg_common.h"
30 #include "vp9/common/vp9_systemdependent.h"
31 #include "vp9/common/vp9_tile_common.h"
33 #include "vp9/encoder/vp9_aq_complexity.h"
34 #include "vp9/encoder/vp9_aq_cyclicrefresh.h"
35 #include "vp9/encoder/vp9_aq_variance.h"
36 #include "vp9/encoder/vp9_encodeframe.h"
37 #include "vp9/encoder/vp9_encodemb.h"
38 #include "vp9/encoder/vp9_encodemv.h"
39 #include "vp9/encoder/vp9_extend.h"
40 #include "vp9/encoder/vp9_pickmode.h"
41 #include "vp9/encoder/vp9_rd.h"
42 #include "vp9/encoder/vp9_rdopt.h"
43 #include "vp9/encoder/vp9_segmentation.h"
44 #include "vp9/encoder/vp9_tokenize.h"
46 #define GF_ZEROMV_ZBIN_BOOST 0
47 #define LF_ZEROMV_ZBIN_BOOST 0
48 #define MV_ZBIN_BOOST 0
49 #define SPLIT_MV_ZBIN_BOOST 0
50 #define INTRA_ZBIN_BOOST 0
52 static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, int output_enabled,
53 int mi_row, int mi_col, BLOCK_SIZE bsize,
54 PICK_MODE_CONTEXT *ctx);
56 // Motion vector component magnitude threshold for defining fast motion.
57 #define FAST_MOTION_MV_THRESH 24
59 // This is used as a reference when computing the source variance for the
60 // purposes of activity masking.
61 // Eventually this should be replaced by custom no-reference routines,
62 // which will be faster.
63 static const uint8_t VP9_VAR_OFFS[64] = {
64 128, 128, 128, 128, 128, 128, 128, 128,
65 128, 128, 128, 128, 128, 128, 128, 128,
66 128, 128, 128, 128, 128, 128, 128, 128,
67 128, 128, 128, 128, 128, 128, 128, 128,
68 128, 128, 128, 128, 128, 128, 128, 128,
69 128, 128, 128, 128, 128, 128, 128, 128,
70 128, 128, 128, 128, 128, 128, 128, 128,
71 128, 128, 128, 128, 128, 128, 128, 128
74 static unsigned int get_sby_perpixel_variance(VP9_COMP *cpi,
75 const struct buf_2d *ref,
78 const unsigned int var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
79 VP9_VAR_OFFS, 0, &sse);
80 return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
83 static unsigned int get_sby_perpixel_diff_variance(VP9_COMP *cpi,
84 const struct buf_2d *ref,
85 int mi_row, int mi_col,
87 const YV12_BUFFER_CONFIG *last = get_ref_frame_buffer(cpi, LAST_FRAME);
88 const uint8_t* last_y = &last->y_buffer[mi_row * MI_SIZE * last->y_stride +
91 const unsigned int var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
92 last_y, last->y_stride, &sse);
93 return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
96 static BLOCK_SIZE get_rd_var_based_fixed_partition(VP9_COMP *cpi,
99 unsigned int var = get_sby_perpixel_diff_variance(cpi, &cpi->mb.plane[0].src,
112 static BLOCK_SIZE get_nonrd_var_based_fixed_partition(VP9_COMP *cpi,
115 unsigned int var = get_sby_perpixel_diff_variance(cpi, &cpi->mb.plane[0].src,
126 // Lighter version of set_offsets that only sets the mode info
128 static INLINE void set_modeinfo_offsets(VP9_COMMON *const cm,
129 MACROBLOCKD *const xd,
132 const int idx_str = xd->mi_stride * mi_row + mi_col;
133 xd->mi = cm->mi_grid_visible + idx_str;
134 xd->mi[0] = cm->mi + idx_str;
137 static void set_offsets(VP9_COMP *cpi, const TileInfo *const tile,
138 int mi_row, int mi_col, BLOCK_SIZE bsize) {
139 MACROBLOCK *const x = &cpi->mb;
140 VP9_COMMON *const cm = &cpi->common;
141 MACROBLOCKD *const xd = &x->e_mbd;
143 const int mi_width = num_8x8_blocks_wide_lookup[bsize];
144 const int mi_height = num_8x8_blocks_high_lookup[bsize];
145 const struct segmentation *const seg = &cm->seg;
147 set_skip_context(xd, mi_row, mi_col);
149 set_modeinfo_offsets(cm, xd, mi_row, mi_col);
151 mbmi = &xd->mi[0]->mbmi;
153 // Set up destination pointers.
154 vp9_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
156 // Set up limit values for MV components.
157 // Mv beyond the range do not produce new/different prediction block.
158 x->mv_row_min = -(((mi_row + mi_height) * MI_SIZE) + VP9_INTERP_EXTEND);
159 x->mv_col_min = -(((mi_col + mi_width) * MI_SIZE) + VP9_INTERP_EXTEND);
160 x->mv_row_max = (cm->mi_rows - mi_row) * MI_SIZE + VP9_INTERP_EXTEND;
161 x->mv_col_max = (cm->mi_cols - mi_col) * MI_SIZE + VP9_INTERP_EXTEND;
163 // Set up distance of MB to edge of frame in 1/8th pel units.
164 assert(!(mi_col & (mi_width - 1)) && !(mi_row & (mi_height - 1)));
165 set_mi_row_col(xd, tile, mi_row, mi_height, mi_col, mi_width,
166 cm->mi_rows, cm->mi_cols);
168 // Set up source buffers.
169 vp9_setup_src_planes(x, cpi->Source, mi_row, mi_col);
172 x->rddiv = cpi->rd.RDDIV;
173 x->rdmult = cpi->rd.RDMULT;
177 if (cpi->oxcf.aq_mode != VARIANCE_AQ) {
178 const uint8_t *const map = seg->update_map ? cpi->segmentation_map
179 : cm->last_frame_seg_map;
180 mbmi->segment_id = vp9_get_segment_id(cm, map, bsize, mi_row, mi_col);
182 vp9_init_plane_quantizers(cpi, x);
184 x->encode_breakout = cpi->segment_encode_breakout[mbmi->segment_id];
186 mbmi->segment_id = 0;
187 x->encode_breakout = cpi->encode_breakout;
191 static void duplicate_mode_info_in_sb(VP9_COMMON * const cm,
192 MACROBLOCKD *const xd,
196 const int block_width = num_8x8_blocks_wide_lookup[bsize];
197 const int block_height = num_8x8_blocks_high_lookup[bsize];
199 for (j = 0; j < block_height; ++j)
200 for (i = 0; i < block_width; ++i) {
201 if (mi_row + j < cm->mi_rows && mi_col + i < cm->mi_cols)
202 xd->mi[j * xd->mi_stride + i] = xd->mi[0];
206 static void set_block_size(VP9_COMP * const cpi,
207 int mi_row, int mi_col,
209 if (cpi->common.mi_cols > mi_col && cpi->common.mi_rows > mi_row) {
210 MACROBLOCKD *const xd = &cpi->mb.e_mbd;
211 set_modeinfo_offsets(&cpi->common, xd, mi_row, mi_col);
212 xd->mi[0]->mbmi.sb_type = bsize;
213 duplicate_mode_info_in_sb(&cpi->common, xd, mi_row, mi_col, bsize);
218 int64_t sum_square_error;
228 } partition_variance;
231 partition_variance part_variances;
236 partition_variance part_variances;
241 partition_variance part_variances;
246 partition_variance part_variances;
251 partition_variance *part_variances;
261 static void tree_to_node(void *data, BLOCK_SIZE bsize, variance_node *node) {
263 node->part_variances = NULL;
264 vpx_memset(node->split, 0, sizeof(node->split));
267 v64x64 *vt = (v64x64 *) data;
268 node->part_variances = &vt->part_variances;
269 for (i = 0; i < 4; i++)
270 node->split[i] = &vt->split[i].part_variances.none;
274 v32x32 *vt = (v32x32 *) data;
275 node->part_variances = &vt->part_variances;
276 for (i = 0; i < 4; i++)
277 node->split[i] = &vt->split[i].part_variances.none;
281 v16x16 *vt = (v16x16 *) data;
282 node->part_variances = &vt->part_variances;
283 for (i = 0; i < 4; i++)
284 node->split[i] = &vt->split[i].part_variances.none;
288 v8x8 *vt = (v8x8 *) data;
289 node->part_variances = &vt->part_variances;
290 for (i = 0; i < 4; i++)
291 node->split[i] = &vt->split[i];
301 // Set variance values given sum square error, sum error, count.
302 static void fill_variance(int64_t s2, int64_t s, int c, var *v) {
303 v->sum_square_error = s2;
307 v->variance = (int)(256 *
308 (v->sum_square_error - v->sum_error * v->sum_error /
309 v->count) / v->count);
314 void sum_2_variances(const var *a, const var *b, var *r) {
315 fill_variance(a->sum_square_error + b->sum_square_error,
316 a->sum_error + b->sum_error, a->count + b->count, r);
319 static void fill_variance_tree(void *data, BLOCK_SIZE bsize) {
321 tree_to_node(data, bsize, &node);
322 sum_2_variances(node.split[0], node.split[1], &node.part_variances->horz[0]);
323 sum_2_variances(node.split[2], node.split[3], &node.part_variances->horz[1]);
324 sum_2_variances(node.split[0], node.split[2], &node.part_variances->vert[0]);
325 sum_2_variances(node.split[1], node.split[3], &node.part_variances->vert[1]);
326 sum_2_variances(&node.part_variances->vert[0], &node.part_variances->vert[1],
327 &node.part_variances->none);
330 static int set_vt_partitioning(VP9_COMP *cpi,
335 VP9_COMMON * const cm = &cpi->common;
337 const int block_width = num_8x8_blocks_wide_lookup[bsize];
338 const int block_height = num_8x8_blocks_high_lookup[bsize];
339 // TODO(debargha): Choose this more intelligently.
340 const int64_t threshold_multiplier = 25;
341 int64_t threshold = threshold_multiplier * cpi->common.base_qindex;
342 assert(block_height == block_width);
344 tree_to_node(data, bsize, &vt);
346 // Split none is available only if we have more than half a block size
347 // in width and height inside the visible image.
348 if (mi_col + block_width / 2 < cm->mi_cols &&
349 mi_row + block_height / 2 < cm->mi_rows &&
350 vt.part_variances->none.variance < threshold) {
351 set_block_size(cpi, mi_row, mi_col, bsize);
355 // Vertical split is available on all but the bottom border.
356 if (mi_row + block_height / 2 < cm->mi_rows &&
357 vt.part_variances->vert[0].variance < threshold &&
358 vt.part_variances->vert[1].variance < threshold) {
359 BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_VERT);
360 set_block_size(cpi, mi_row, mi_col, subsize);
361 set_block_size(cpi, mi_row, mi_col + block_width / 2, subsize);
365 // Horizontal split is available on all but the right border.
366 if (mi_col + block_width / 2 < cm->mi_cols &&
367 vt.part_variances->horz[0].variance < threshold &&
368 vt.part_variances->horz[1].variance < threshold) {
369 BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_HORZ);
370 set_block_size(cpi, mi_row, mi_col, subsize);
371 set_block_size(cpi, mi_row + block_height / 2, mi_col, subsize);
377 // TODO(debargha): Fix this function and make it work as expected.
378 static void choose_partitioning(VP9_COMP *cpi,
379 const TileInfo *const tile,
380 int mi_row, int mi_col) {
381 VP9_COMMON * const cm = &cpi->common;
382 MACROBLOCK *x = &cpi->mb;
383 MACROBLOCKD *xd = &cpi->mb.e_mbd;
391 int pixels_wide = 64, pixels_high = 64;
392 int_mv nearest_mv, near_mv;
393 const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, LAST_FRAME);
394 const struct scale_factors *const sf = &cm->frame_refs[LAST_FRAME - 1].sf;
397 set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64);
399 if (xd->mb_to_right_edge < 0)
400 pixels_wide += (xd->mb_to_right_edge >> 3);
401 if (xd->mb_to_bottom_edge < 0)
402 pixels_high += (xd->mb_to_bottom_edge >> 3);
404 s = x->plane[0].src.buf;
405 sp = x->plane[0].src.stride;
407 if (cm->frame_type != KEY_FRAME) {
408 vp9_setup_pre_planes(xd, 0, yv12, mi_row, mi_col, sf);
410 xd->mi[0]->mbmi.ref_frame[0] = LAST_FRAME;
411 xd->mi[0]->mbmi.sb_type = BLOCK_64X64;
412 vp9_find_best_ref_mvs(xd, cm->allow_high_precision_mv,
413 xd->mi[0]->mbmi.ref_mvs[LAST_FRAME],
414 &nearest_mv, &near_mv);
416 xd->mi[0]->mbmi.mv[0] = nearest_mv;
417 vp9_build_inter_predictors_sby(xd, mi_row, mi_col, BLOCK_64X64);
419 d = xd->plane[0].dst.buf;
420 dp = xd->plane[0].dst.stride;
426 // Fill in the entire tree of 8x8 variances for splits.
427 for (i = 0; i < 4; i++) {
428 const int x32_idx = ((i & 1) << 5);
429 const int y32_idx = ((i >> 1) << 5);
430 for (j = 0; j < 4; j++) {
431 const int x16_idx = x32_idx + ((j & 1) << 4);
432 const int y16_idx = y32_idx + ((j >> 1) << 4);
433 v16x16 *vst = &vt.split[i].split[j];
434 for (k = 0; k < 4; k++) {
435 int x_idx = x16_idx + ((k & 1) << 3);
436 int y_idx = y16_idx + ((k >> 1) << 3);
437 unsigned int sse = 0;
439 if (x_idx < pixels_wide && y_idx < pixels_high)
440 vp9_get8x8var(s + y_idx * sp + x_idx, sp,
441 d + y_idx * dp + x_idx, dp, &sse, &sum);
442 fill_variance(sse, sum, 64, &vst->split[k].part_variances.none);
446 // Fill the rest of the variance tree by summing split partition values.
447 for (i = 0; i < 4; i++) {
448 for (j = 0; j < 4; j++) {
449 fill_variance_tree(&vt.split[i].split[j], BLOCK_16X16);
451 fill_variance_tree(&vt.split[i], BLOCK_32X32);
453 fill_variance_tree(&vt, BLOCK_64X64);
455 // Now go through the entire structure, splitting every block size until
456 // we get to one that's got a variance lower than our threshold, or we
458 if (!set_vt_partitioning(cpi, &vt, BLOCK_64X64,
460 for (i = 0; i < 4; ++i) {
461 const int x32_idx = ((i & 1) << 2);
462 const int y32_idx = ((i >> 1) << 2);
463 if (!set_vt_partitioning(cpi, &vt.split[i], BLOCK_32X32,
464 (mi_row + y32_idx), (mi_col + x32_idx))) {
465 for (j = 0; j < 4; ++j) {
466 const int x16_idx = ((j & 1) << 1);
467 const int y16_idx = ((j >> 1) << 1);
468 // NOTE: This is a temporary hack to disable 8x8 partitions,
469 // since it works really bad - possibly due to a bug
470 #define DISABLE_8X8_VAR_BASED_PARTITION
471 #ifdef DISABLE_8X8_VAR_BASED_PARTITION
472 if (mi_row + y32_idx + y16_idx + 1 < cm->mi_rows &&
473 mi_row + x32_idx + x16_idx + 1 < cm->mi_cols) {
475 (mi_row + y32_idx + y16_idx),
476 (mi_col + x32_idx + x16_idx),
479 for (k = 0; k < 4; ++k) {
480 const int x8_idx = (k & 1);
481 const int y8_idx = (k >> 1);
483 (mi_row + y32_idx + y16_idx + y8_idx),
484 (mi_col + x32_idx + x16_idx + x8_idx),
489 if (!set_vt_partitioning(cpi, &vt.split[i].split[j], tile,
491 (mi_row + y32_idx + y16_idx),
492 (mi_col + x32_idx + x16_idx), 2)) {
493 for (k = 0; k < 4; ++k) {
494 const int x8_idx = (k & 1);
495 const int y8_idx = (k >> 1);
497 (mi_row + y32_idx + y16_idx + y8_idx),
498 (mi_col + x32_idx + x16_idx + x8_idx),
509 static void update_state(VP9_COMP *cpi, PICK_MODE_CONTEXT *ctx,
510 int mi_row, int mi_col, BLOCK_SIZE bsize,
511 int output_enabled) {
513 VP9_COMMON *const cm = &cpi->common;
514 RD_OPT *const rd_opt = &cpi->rd;
515 MACROBLOCK *const x = &cpi->mb;
516 MACROBLOCKD *const xd = &x->e_mbd;
517 struct macroblock_plane *const p = x->plane;
518 struct macroblockd_plane *const pd = xd->plane;
519 MODE_INFO *mi = &ctx->mic;
520 MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
521 MODE_INFO *mi_addr = xd->mi[0];
522 const struct segmentation *const seg = &cm->seg;
524 const int mis = cm->mi_stride;
525 const int mi_width = num_8x8_blocks_wide_lookup[bsize];
526 const int mi_height = num_8x8_blocks_high_lookup[bsize];
529 assert(mi->mbmi.sb_type == bsize);
533 // If segmentation in use
534 if (seg->enabled && output_enabled) {
535 // For in frame complexity AQ copy the segment id from the segment map.
536 if (cpi->oxcf.aq_mode == COMPLEXITY_AQ) {
537 const uint8_t *const map = seg->update_map ? cpi->segmentation_map
538 : cm->last_frame_seg_map;
539 mi_addr->mbmi.segment_id =
540 vp9_get_segment_id(cm, map, bsize, mi_row, mi_col);
542 // Else for cyclic refresh mode update the segment map, set the segment id
543 // and then update the quantizer.
544 else if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) {
545 vp9_cyclic_refresh_update_segment(cpi, &xd->mi[0]->mbmi,
546 mi_row, mi_col, bsize, 1);
547 vp9_init_plane_quantizers(cpi, x);
551 max_plane = is_inter_block(mbmi) ? MAX_MB_PLANE : 1;
552 for (i = 0; i < max_plane; ++i) {
553 p[i].coeff = ctx->coeff_pbuf[i][1];
554 p[i].qcoeff = ctx->qcoeff_pbuf[i][1];
555 pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][1];
556 p[i].eobs = ctx->eobs_pbuf[i][1];
559 for (i = max_plane; i < MAX_MB_PLANE; ++i) {
560 p[i].coeff = ctx->coeff_pbuf[i][2];
561 p[i].qcoeff = ctx->qcoeff_pbuf[i][2];
562 pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][2];
563 p[i].eobs = ctx->eobs_pbuf[i][2];
566 // Restore the coding context of the MB to that that was in place
567 // when the mode was picked for it
568 for (y = 0; y < mi_height; y++)
569 for (x_idx = 0; x_idx < mi_width; x_idx++)
570 if ((xd->mb_to_right_edge >> (3 + MI_SIZE_LOG2)) + mi_width > x_idx
571 && (xd->mb_to_bottom_edge >> (3 + MI_SIZE_LOG2)) + mi_height > y) {
572 xd->mi[x_idx + y * mis] = mi_addr;
575 if (cpi->oxcf.aq_mode)
576 vp9_init_plane_quantizers(cpi, x);
578 // FIXME(rbultje) I'm pretty sure this should go to the end of this block
579 // (i.e. after the output_enabled)
580 if (bsize < BLOCK_32X32) {
581 if (bsize < BLOCK_16X16)
582 ctx->tx_rd_diff[ALLOW_16X16] = ctx->tx_rd_diff[ALLOW_8X8];
583 ctx->tx_rd_diff[ALLOW_32X32] = ctx->tx_rd_diff[ALLOW_16X16];
586 if (is_inter_block(mbmi) && mbmi->sb_type < BLOCK_8X8) {
587 mbmi->mv[0].as_int = mi->bmi[3].as_mv[0].as_int;
588 mbmi->mv[1].as_int = mi->bmi[3].as_mv[1].as_int;
592 vpx_memcpy(x->zcoeff_blk[mbmi->tx_size], ctx->zcoeff_blk,
593 sizeof(uint8_t) * ctx->num_4x4_blk);
598 if (!vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
599 for (i = 0; i < TX_MODES; i++)
600 rd_opt->tx_select_diff[i] += ctx->tx_rd_diff[i];
603 #if CONFIG_INTERNAL_STATS
604 if (frame_is_intra_only(cm)) {
605 static const int kf_mode_index[] = {
607 THR_V_PRED /*V_PRED*/,
608 THR_H_PRED /*H_PRED*/,
609 THR_D45_PRED /*D45_PRED*/,
610 THR_D135_PRED /*D135_PRED*/,
611 THR_D117_PRED /*D117_PRED*/,
612 THR_D153_PRED /*D153_PRED*/,
613 THR_D207_PRED /*D207_PRED*/,
614 THR_D63_PRED /*D63_PRED*/,
617 ++cpi->mode_chosen_counts[kf_mode_index[mbmi->mode]];
619 // Note how often each mode chosen as best
620 ++cpi->mode_chosen_counts[ctx->best_mode_index];
623 if (!frame_is_intra_only(cm)) {
624 if (is_inter_block(mbmi)) {
625 vp9_update_mv_count(cm, xd);
627 if (cm->interp_filter == SWITCHABLE) {
628 const int ctx = vp9_get_pred_context_switchable_interp(xd);
629 ++cm->counts.switchable_interp[ctx][mbmi->interp_filter];
633 rd_opt->comp_pred_diff[SINGLE_REFERENCE] += ctx->single_pred_diff;
634 rd_opt->comp_pred_diff[COMPOUND_REFERENCE] += ctx->comp_pred_diff;
635 rd_opt->comp_pred_diff[REFERENCE_MODE_SELECT] += ctx->hybrid_pred_diff;
637 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
638 rd_opt->filter_diff[i] += ctx->best_filter_diff[i];
642 void vp9_setup_src_planes(MACROBLOCK *x, const YV12_BUFFER_CONFIG *src,
643 int mi_row, int mi_col) {
644 uint8_t *const buffers[3] = {src->y_buffer, src->u_buffer, src->v_buffer };
645 const int strides[3] = {src->y_stride, src->uv_stride, src->uv_stride };
648 // Set current frame pointer.
649 x->e_mbd.cur_buf = src;
651 for (i = 0; i < MAX_MB_PLANE; i++)
652 setup_pred_plane(&x->plane[i].src, buffers[i], strides[i], mi_row, mi_col,
653 NULL, x->e_mbd.plane[i].subsampling_x,
654 x->e_mbd.plane[i].subsampling_y);
657 static void set_mode_info_seg_skip(MACROBLOCK *x, TX_MODE tx_mode, int *rate,
658 int64_t *dist, BLOCK_SIZE bsize) {
659 MACROBLOCKD *const xd = &x->e_mbd;
660 MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
661 INTERP_FILTER filter_ref;
663 if (xd->up_available)
664 filter_ref = xd->mi[-xd->mi_stride]->mbmi.interp_filter;
665 else if (xd->left_available)
666 filter_ref = xd->mi[-1]->mbmi.interp_filter;
668 filter_ref = EIGHTTAP;
670 mbmi->sb_type = bsize;
672 mbmi->tx_size = MIN(max_txsize_lookup[bsize],
673 tx_mode_to_biggest_tx_size[tx_mode]);
675 mbmi->uv_mode = DC_PRED;
676 mbmi->ref_frame[0] = LAST_FRAME;
677 mbmi->ref_frame[1] = NONE;
678 mbmi->mv[0].as_int = 0;
679 mbmi->interp_filter = filter_ref;
681 xd->mi[0]->bmi[0].as_mv[0].as_int = 0;
688 static void rd_pick_sb_modes(VP9_COMP *cpi, const TileInfo *const tile,
689 int mi_row, int mi_col,
690 int *totalrate, int64_t *totaldist,
691 BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
692 int64_t best_rd, int block) {
693 VP9_COMMON *const cm = &cpi->common;
694 MACROBLOCK *const x = &cpi->mb;
695 MACROBLOCKD *const xd = &x->e_mbd;
697 struct macroblock_plane *const p = x->plane;
698 struct macroblockd_plane *const pd = xd->plane;
699 const AQ_MODE aq_mode = cpi->oxcf.aq_mode;
703 vp9_clear_system_state();
704 rdmult_ratio = 1.0; // avoid uninitialized warnings
706 // Use the lower precision, but faster, 32x32 fdct for mode selection.
707 x->use_lp32x32fdct = 1;
709 // TODO(JBB): Most other places in the code instead of calling the function
710 // and then checking if its not the first 8x8 we put the check in the
711 // calling function. Do that here.
712 if (bsize < BLOCK_8X8) {
713 // When ab_index = 0 all sub-blocks are handled, so for ab_index != 0
714 // there is nothing to be done.
722 set_offsets(cpi, tile, mi_row, mi_col, bsize);
723 mbmi = &xd->mi[0]->mbmi;
724 mbmi->sb_type = bsize;
726 for (i = 0; i < MAX_MB_PLANE; ++i) {
727 p[i].coeff = ctx->coeff_pbuf[i][0];
728 p[i].qcoeff = ctx->qcoeff_pbuf[i][0];
729 pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][0];
730 p[i].eobs = ctx->eobs_pbuf[i][0];
735 // Set to zero to make sure we do not use the previous encoded frame stats
738 x->source_variance = get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize);
740 // Save rdmult before it might be changed, so it can be restored later.
741 orig_rdmult = x->rdmult;
743 if (aq_mode == VARIANCE_AQ) {
744 const int energy = bsize <= BLOCK_16X16 ? x->mb_energy
745 : vp9_block_energy(cpi, x, bsize);
746 if (cm->frame_type == KEY_FRAME ||
747 cpi->refresh_alt_ref_frame ||
748 (cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref)) {
749 mbmi->segment_id = vp9_vaq_segment_id(energy);
751 const uint8_t *const map = cm->seg.update_map ? cpi->segmentation_map
752 : cm->last_frame_seg_map;
753 mbmi->segment_id = vp9_get_segment_id(cm, map, bsize, mi_row, mi_col);
756 rdmult_ratio = vp9_vaq_rdmult_ratio(energy);
757 vp9_init_plane_quantizers(cpi, x);
758 vp9_clear_system_state();
759 x->rdmult = (int)round(x->rdmult * rdmult_ratio);
760 } else if (aq_mode == COMPLEXITY_AQ) {
761 const int mi_offset = mi_row * cm->mi_cols + mi_col;
762 unsigned char complexity = cpi->complexity_map[mi_offset];
763 const int is_edge = (mi_row <= 1) || (mi_row >= (cm->mi_rows - 2)) ||
764 (mi_col <= 1) || (mi_col >= (cm->mi_cols - 2));
765 if (!is_edge && (complexity > 128))
766 x->rdmult += ((x->rdmult * (complexity - 128)) / 256);
767 } else if (aq_mode == CYCLIC_REFRESH_AQ) {
768 const uint8_t *const map = cm->seg.update_map ? cpi->segmentation_map
769 : cm->last_frame_seg_map;
770 // If segment 1, use rdmult for that segment.
771 if (vp9_get_segment_id(cm, map, bsize, mi_row, mi_col))
772 x->rdmult = vp9_cyclic_refresh_get_rdmult(cpi->cyclic_refresh);
775 // Find best coding mode & reconstruct the MB so it is available
776 // as a predictor for MBs that follow in the SB
777 if (frame_is_intra_only(cm)) {
778 vp9_rd_pick_intra_mode_sb(cpi, x, totalrate, totaldist, bsize, ctx,
781 if (bsize >= BLOCK_8X8) {
782 if (vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP))
783 vp9_rd_pick_inter_mode_sb_seg_skip(cpi, x, totalrate, totaldist, bsize,
786 vp9_rd_pick_inter_mode_sb(cpi, x, tile, mi_row, mi_col,
787 totalrate, totaldist, bsize, ctx, best_rd);
789 vp9_rd_pick_inter_mode_sub8x8(cpi, x, tile, mi_row, mi_col, totalrate,
790 totaldist, bsize, ctx, best_rd);
794 x->rdmult = orig_rdmult;
796 if (aq_mode == VARIANCE_AQ && *totalrate != INT_MAX) {
797 vp9_clear_system_state();
798 *totalrate = (int)round(*totalrate * rdmult_ratio);
802 static void update_stats(VP9_COMP *cpi) {
803 VP9_COMMON *const cm = &cpi->common;
804 const MACROBLOCK *const x = &cpi->mb;
805 const MACROBLOCKD *const xd = &x->e_mbd;
806 const MODE_INFO *const mi = xd->mi[0];
807 const MB_MODE_INFO *const mbmi = &mi->mbmi;
809 if (!frame_is_intra_only(cm)) {
810 const int seg_ref_active = vp9_segfeature_active(&cm->seg, mbmi->segment_id,
812 if (!seg_ref_active) {
813 FRAME_COUNTS *const counts = &cm->counts;
814 const int inter_block = is_inter_block(mbmi);
816 counts->intra_inter[vp9_get_intra_inter_context(xd)][inter_block]++;
818 // If the segment reference feature is enabled we have only a single
819 // reference frame allowed for the segment so exclude it from
820 // the reference frame counts used to work out probabilities.
822 const MV_REFERENCE_FRAME ref0 = mbmi->ref_frame[0];
824 if (cm->reference_mode == REFERENCE_MODE_SELECT)
825 counts->comp_inter[vp9_get_reference_mode_context(cm, xd)]
826 [has_second_ref(mbmi)]++;
828 if (has_second_ref(mbmi)) {
829 counts->comp_ref[vp9_get_pred_context_comp_ref_p(cm, xd)]
830 [ref0 == GOLDEN_FRAME]++;
832 counts->single_ref[vp9_get_pred_context_single_ref_p1(xd)][0]
833 [ref0 != LAST_FRAME]++;
834 if (ref0 != LAST_FRAME)
835 counts->single_ref[vp9_get_pred_context_single_ref_p2(xd)][1]
836 [ref0 != GOLDEN_FRAME]++;
843 static void restore_context(VP9_COMP *cpi, int mi_row, int mi_col,
844 ENTROPY_CONTEXT a[16 * MAX_MB_PLANE],
845 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE],
846 PARTITION_CONTEXT sa[8], PARTITION_CONTEXT sl[8],
848 MACROBLOCK *const x = &cpi->mb;
849 MACROBLOCKD *const xd = &x->e_mbd;
851 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
852 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
853 int mi_width = num_8x8_blocks_wide_lookup[bsize];
854 int mi_height = num_8x8_blocks_high_lookup[bsize];
855 for (p = 0; p < MAX_MB_PLANE; p++) {
857 xd->above_context[p] + ((mi_col * 2) >> xd->plane[p].subsampling_x),
858 a + num_4x4_blocks_wide * p,
859 (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_wide) >>
860 xd->plane[p].subsampling_x);
863 + ((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y),
864 l + num_4x4_blocks_high * p,
865 (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_high) >>
866 xd->plane[p].subsampling_y);
868 vpx_memcpy(xd->above_seg_context + mi_col, sa,
869 sizeof(*xd->above_seg_context) * mi_width);
870 vpx_memcpy(xd->left_seg_context + (mi_row & MI_MASK), sl,
871 sizeof(xd->left_seg_context[0]) * mi_height);
874 static void save_context(VP9_COMP *cpi, int mi_row, int mi_col,
875 ENTROPY_CONTEXT a[16 * MAX_MB_PLANE],
876 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE],
877 PARTITION_CONTEXT sa[8], PARTITION_CONTEXT sl[8],
879 const MACROBLOCK *const x = &cpi->mb;
880 const MACROBLOCKD *const xd = &x->e_mbd;
882 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
883 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
884 int mi_width = num_8x8_blocks_wide_lookup[bsize];
885 int mi_height = num_8x8_blocks_high_lookup[bsize];
887 // buffer the above/left context information of the block in search.
888 for (p = 0; p < MAX_MB_PLANE; ++p) {
890 a + num_4x4_blocks_wide * p,
891 xd->above_context[p] + (mi_col * 2 >> xd->plane[p].subsampling_x),
892 (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_wide) >>
893 xd->plane[p].subsampling_x);
895 l + num_4x4_blocks_high * p,
897 + ((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y),
898 (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_high) >>
899 xd->plane[p].subsampling_y);
901 vpx_memcpy(sa, xd->above_seg_context + mi_col,
902 sizeof(*xd->above_seg_context) * mi_width);
903 vpx_memcpy(sl, xd->left_seg_context + (mi_row & MI_MASK),
904 sizeof(xd->left_seg_context[0]) * mi_height);
907 static void encode_b(VP9_COMP *cpi, const TileInfo *const tile,
908 TOKENEXTRA **tp, int mi_row, int mi_col,
909 int output_enabled, BLOCK_SIZE bsize,
910 PICK_MODE_CONTEXT *ctx) {
911 set_offsets(cpi, tile, mi_row, mi_col, bsize);
912 update_state(cpi, ctx, mi_row, mi_col, bsize, output_enabled);
913 encode_superblock(cpi, tp, output_enabled, mi_row, mi_col, bsize, ctx);
915 if (output_enabled) {
918 (*tp)->token = EOSB_TOKEN;
923 static void encode_sb(VP9_COMP *cpi, const TileInfo *const tile,
924 TOKENEXTRA **tp, int mi_row, int mi_col,
925 int output_enabled, BLOCK_SIZE bsize,
927 VP9_COMMON *const cm = &cpi->common;
928 MACROBLOCK *const x = &cpi->mb;
929 MACROBLOCKD *const xd = &x->e_mbd;
931 const int bsl = b_width_log2(bsize), hbs = (1 << bsl) / 4;
933 PARTITION_TYPE partition;
934 BLOCK_SIZE subsize = bsize;
936 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
939 if (bsize >= BLOCK_8X8) {
940 ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
941 subsize = get_subsize(bsize, pc_tree->partitioning);
947 partition = partition_lookup[bsl][subsize];
948 if (output_enabled && bsize != BLOCK_4X4)
949 cm->counts.partition[ctx][partition]++;
953 encode_b(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize,
957 encode_b(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize,
958 &pc_tree->vertical[0]);
959 if (mi_col + hbs < cm->mi_cols && bsize > BLOCK_8X8) {
960 encode_b(cpi, tile, tp, mi_row, mi_col + hbs, output_enabled, subsize,
961 &pc_tree->vertical[1]);
965 encode_b(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize,
966 &pc_tree->horizontal[0]);
967 if (mi_row + hbs < cm->mi_rows && bsize > BLOCK_8X8) {
968 encode_b(cpi, tile, tp, mi_row + hbs, mi_col, output_enabled, subsize,
969 &pc_tree->horizontal[1]);
972 case PARTITION_SPLIT:
973 if (bsize == BLOCK_8X8) {
974 encode_b(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize,
975 pc_tree->leaf_split[0]);
977 encode_sb(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize,
979 encode_sb(cpi, tile, tp, mi_row, mi_col + hbs, output_enabled, subsize,
981 encode_sb(cpi, tile, tp, mi_row + hbs, mi_col, output_enabled, subsize,
983 encode_sb(cpi, tile, tp, mi_row + hbs, mi_col + hbs, output_enabled,
984 subsize, pc_tree->split[3]);
988 assert("Invalid partition type.");
992 if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8)
993 update_partition_context(xd, mi_row, mi_col, subsize, bsize);
996 // Check to see if the given partition size is allowed for a specified number
997 // of 8x8 block rows and columns remaining in the image.
998 // If not then return the largest allowed partition size
999 static BLOCK_SIZE find_partition_size(BLOCK_SIZE bsize,
1000 int rows_left, int cols_left,
1002 if (rows_left <= 0 || cols_left <= 0) {
1003 return MIN(bsize, BLOCK_8X8);
1005 for (; bsize > 0; bsize -= 3) {
1006 *bh = num_8x8_blocks_high_lookup[bsize];
1007 *bw = num_8x8_blocks_wide_lookup[bsize];
1008 if ((*bh <= rows_left) && (*bw <= cols_left)) {
1016 static void set_partial_b64x64_partition(MODE_INFO *mi, int mis,
1017 int bh_in, int bw_in, int row8x8_remaining, int col8x8_remaining,
1018 BLOCK_SIZE bsize, MODE_INFO **mi_8x8) {
1021 for (r = 0; r < MI_BLOCK_SIZE; r += bh) {
1023 for (c = 0; c < MI_BLOCK_SIZE; c += bw) {
1024 const int index = r * mis + c;
1025 mi_8x8[index] = mi + index;
1026 mi_8x8[index]->mbmi.sb_type = find_partition_size(bsize,
1027 row8x8_remaining - r, col8x8_remaining - c, &bh, &bw);
1032 // This function attempts to set all mode info entries in a given SB64
1033 // to the same block partition size.
1034 // However, at the bottom and right borders of the image the requested size
1035 // may not be allowed in which case this code attempts to choose the largest
1036 // allowable partition.
1037 static void set_fixed_partitioning(VP9_COMP *cpi, const TileInfo *const tile,
1038 MODE_INFO **mi_8x8, int mi_row, int mi_col,
1040 VP9_COMMON *const cm = &cpi->common;
1041 const int mis = cm->mi_stride;
1042 const int row8x8_remaining = tile->mi_row_end - mi_row;
1043 const int col8x8_remaining = tile->mi_col_end - mi_col;
1044 int block_row, block_col;
1045 MODE_INFO *mi_upper_left = cm->mi + mi_row * mis + mi_col;
1046 int bh = num_8x8_blocks_high_lookup[bsize];
1047 int bw = num_8x8_blocks_wide_lookup[bsize];
1049 assert((row8x8_remaining > 0) && (col8x8_remaining > 0));
1051 // Apply the requested partition size to the SB64 if it is all "in image"
1052 if ((col8x8_remaining >= MI_BLOCK_SIZE) &&
1053 (row8x8_remaining >= MI_BLOCK_SIZE)) {
1054 for (block_row = 0; block_row < MI_BLOCK_SIZE; block_row += bh) {
1055 for (block_col = 0; block_col < MI_BLOCK_SIZE; block_col += bw) {
1056 int index = block_row * mis + block_col;
1057 mi_8x8[index] = mi_upper_left + index;
1058 mi_8x8[index]->mbmi.sb_type = bsize;
1062 // Else this is a partial SB64.
1063 set_partial_b64x64_partition(mi_upper_left, mis, bh, bw, row8x8_remaining,
1064 col8x8_remaining, bsize, mi_8x8);
1068 static void copy_partitioning(VP9_COMMON *cm, MODE_INFO **mi_8x8,
1069 MODE_INFO **prev_mi_8x8) {
1070 const int mis = cm->mi_stride;
1071 int block_row, block_col;
1073 for (block_row = 0; block_row < 8; ++block_row) {
1074 for (block_col = 0; block_col < 8; ++block_col) {
1075 MODE_INFO *const prev_mi = prev_mi_8x8[block_row * mis + block_col];
1076 const BLOCK_SIZE sb_type = prev_mi ? prev_mi->mbmi.sb_type : 0;
1079 const ptrdiff_t offset = prev_mi - cm->prev_mi;
1080 mi_8x8[block_row * mis + block_col] = cm->mi + offset;
1081 mi_8x8[block_row * mis + block_col]->mbmi.sb_type = sb_type;
1087 static void constrain_copy_partitioning(VP9_COMP *const cpi,
1088 const TileInfo *const tile,
1090 MODE_INFO **prev_mi_8x8,
1091 int mi_row, int mi_col,
1093 VP9_COMMON *const cm = &cpi->common;
1094 const int mis = cm->mi_stride;
1095 const int row8x8_remaining = tile->mi_row_end - mi_row;
1096 const int col8x8_remaining = tile->mi_col_end - mi_col;
1097 MODE_INFO *const mi_upper_left = cm->mi + mi_row * mis + mi_col;
1098 const int bh = num_8x8_blocks_high_lookup[bsize];
1099 const int bw = num_8x8_blocks_wide_lookup[bsize];
1100 int block_row, block_col;
1102 assert((row8x8_remaining > 0) && (col8x8_remaining > 0));
1104 // If the SB64 if it is all "in image".
1105 if ((col8x8_remaining >= MI_BLOCK_SIZE) &&
1106 (row8x8_remaining >= MI_BLOCK_SIZE)) {
1107 for (block_row = 0; block_row < MI_BLOCK_SIZE; block_row += bh) {
1108 for (block_col = 0; block_col < MI_BLOCK_SIZE; block_col += bw) {
1109 const int index = block_row * mis + block_col;
1110 MODE_INFO *prev_mi = prev_mi_8x8[index];
1111 const BLOCK_SIZE sb_type = prev_mi ? prev_mi->mbmi.sb_type : 0;
1112 // Use previous partition if block size is not larger than bsize.
1113 if (prev_mi && sb_type <= bsize) {
1114 int block_row2, block_col2;
1115 for (block_row2 = 0; block_row2 < bh; ++block_row2) {
1116 for (block_col2 = 0; block_col2 < bw; ++block_col2) {
1117 const int index2 = (block_row + block_row2) * mis +
1118 block_col + block_col2;
1119 prev_mi = prev_mi_8x8[index2];
1121 const ptrdiff_t offset = prev_mi - cm->prev_mi;
1122 mi_8x8[index2] = cm->mi + offset;
1123 mi_8x8[index2]->mbmi.sb_type = prev_mi->mbmi.sb_type;
1128 // Otherwise, use fixed partition of size bsize.
1129 mi_8x8[index] = mi_upper_left + index;
1130 mi_8x8[index]->mbmi.sb_type = bsize;
1135 // Else this is a partial SB64, copy previous partition.
1136 copy_partitioning(cm, mi_8x8, prev_mi_8x8);
1143 } coord_lookup[16] = {
1145 {0, 0}, {0, 2}, {2, 0}, {2, 2},
1147 {0, 4}, {0, 6}, {2, 4}, {2, 6},
1149 {4, 0}, {4, 2}, {6, 0}, {6, 2},
1151 {4, 4}, {4, 6}, {6, 4}, {6, 6},
1154 static void set_source_var_based_partition(VP9_COMP *cpi,
1155 const TileInfo *const tile,
1157 int mi_row, int mi_col) {
1158 VP9_COMMON *const cm = &cpi->common;
1159 MACROBLOCK *const x = &cpi->mb;
1160 const int mis = cm->mi_stride;
1161 const int row8x8_remaining = tile->mi_row_end - mi_row;
1162 const int col8x8_remaining = tile->mi_col_end - mi_col;
1163 MODE_INFO *mi_upper_left = cm->mi + mi_row * mis + mi_col;
1165 vp9_setup_src_planes(x, cpi->Source, mi_row, mi_col);
1167 assert((row8x8_remaining > 0) && (col8x8_remaining > 0));
1170 if ((col8x8_remaining >= MI_BLOCK_SIZE) &&
1171 (row8x8_remaining >= MI_BLOCK_SIZE)) {
1175 const int offset = (mi_row >> 1) * cm->mb_cols + (mi_col >> 1);
1176 int is_larger_better = 0;
1178 unsigned int thr = cpi->source_var_thresh;
1180 vpx_memset(d32, 0, 4 * sizeof(diff));
1182 for (i = 0; i < 4; i++) {
1185 for (j = 0; j < 4; j++) {
1186 int b_mi_row = coord_lookup[i * 4 + j].row;
1187 int b_mi_col = coord_lookup[i * 4 + j].col;
1188 int boffset = b_mi_row / 2 * cm->mb_cols +
1191 d16[j] = cpi->source_diff_var + offset + boffset;
1193 index = b_mi_row * mis + b_mi_col;
1194 mi_8x8[index] = mi_upper_left + index;
1195 mi_8x8[index]->mbmi.sb_type = BLOCK_16X16;
1197 // TODO(yunqingwang): If d16[j].var is very large, use 8x8 partition
1198 // size to further improve quality.
1201 is_larger_better = (d16[0]->var < thr) && (d16[1]->var < thr) &&
1202 (d16[2]->var < thr) && (d16[3]->var < thr);
1204 // Use 32x32 partition
1205 if (is_larger_better) {
1208 for (j = 0; j < 4; j++) {
1209 d32[i].sse += d16[j]->sse;
1210 d32[i].sum += d16[j]->sum;
1213 d32[i].var = d32[i].sse - (((int64_t)d32[i].sum * d32[i].sum) >> 10);
1215 index = coord_lookup[i*4].row * mis + coord_lookup[i*4].col;
1216 mi_8x8[index] = mi_upper_left + index;
1217 mi_8x8[index]->mbmi.sb_type = BLOCK_32X32;
1221 if (use32x32 == 4) {
1223 is_larger_better = (d32[0].var < thr) && (d32[1].var < thr) &&
1224 (d32[2].var < thr) && (d32[3].var < thr);
1226 // Use 64x64 partition
1227 if (is_larger_better) {
1228 mi_8x8[0] = mi_upper_left;
1229 mi_8x8[0]->mbmi.sb_type = BLOCK_64X64;
1232 } else { // partial in-image SB64
1233 int bh = num_8x8_blocks_high_lookup[BLOCK_16X16];
1234 int bw = num_8x8_blocks_wide_lookup[BLOCK_16X16];
1235 set_partial_b64x64_partition(mi_upper_left, mis, bh, bw,
1236 row8x8_remaining, col8x8_remaining, BLOCK_16X16, mi_8x8);
1240 static int is_background(VP9_COMP *cpi, const TileInfo *const tile,
1241 int mi_row, int mi_col) {
1242 MACROBLOCK *x = &cpi->mb;
1244 int src_stride, pre_stride;
1246 const int row8x8_remaining = tile->mi_row_end - mi_row;
1247 const int col8x8_remaining = tile->mi_col_end - mi_col;
1252 // This assumes the input source frames are of the same dimension.
1253 src_stride = cpi->Source->y_stride;
1254 src = cpi->Source->y_buffer + (mi_row * MI_SIZE) * src_stride +
1256 pre_stride = cpi->Last_Source->y_stride;
1257 pre = cpi->Last_Source->y_buffer + (mi_row * MI_SIZE) * pre_stride +
1260 if (row8x8_remaining >= MI_BLOCK_SIZE &&
1261 col8x8_remaining >= MI_BLOCK_SIZE) {
1262 this_sad = cpi->fn_ptr[BLOCK_64X64].sdf(src, src_stride,
1264 threshold = (1 << 12);
1267 for (r = 0; r < row8x8_remaining; r += 2)
1268 for (c = 0; c < col8x8_remaining; c += 2)
1269 this_sad += cpi->fn_ptr[BLOCK_16X16].sdf(src, src_stride,
1271 threshold = (row8x8_remaining * col8x8_remaining) << 6;
1274 x->in_static_area = (this_sad < 2 * threshold);
1275 return x->in_static_area;
1278 static int sb_has_motion(const VP9_COMMON *cm, MODE_INFO **prev_mi_8x8,
1279 const int motion_thresh) {
1280 const int mis = cm->mi_stride;
1281 int block_row, block_col;
1284 for (block_row = 0; block_row < 8; ++block_row) {
1285 for (block_col = 0; block_col < 8; ++block_col) {
1286 const MODE_INFO *prev_mi = prev_mi_8x8[block_row * mis + block_col];
1288 if (abs(prev_mi->mbmi.mv[0].as_mv.row) > motion_thresh ||
1289 abs(prev_mi->mbmi.mv[0].as_mv.col) > motion_thresh)
1298 static void update_state_rt(VP9_COMP *cpi, PICK_MODE_CONTEXT *ctx,
1299 int mi_row, int mi_col, int bsize) {
1300 VP9_COMMON *const cm = &cpi->common;
1301 MACROBLOCK *const x = &cpi->mb;
1302 MACROBLOCKD *const xd = &x->e_mbd;
1303 MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
1304 const struct segmentation *const seg = &cm->seg;
1306 *(xd->mi[0]) = ctx->mic;
1308 // For in frame adaptive Q, check for reseting the segment_id and updating
1309 // the cyclic refresh map.
1310 if ((cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) && seg->enabled) {
1311 vp9_cyclic_refresh_update_segment(cpi, &xd->mi[0]->mbmi,
1312 mi_row, mi_col, bsize, 1);
1313 vp9_init_plane_quantizers(cpi, x);
1316 if (is_inter_block(mbmi)) {
1317 vp9_update_mv_count(cm, xd);
1319 if (cm->interp_filter == SWITCHABLE) {
1320 const int pred_ctx = vp9_get_pred_context_switchable_interp(xd);
1321 ++cm->counts.switchable_interp[pred_ctx][mbmi->interp_filter];
1325 x->skip = ctx->skip;
1326 x->skip_txfm = mbmi->segment_id ? 0 : ctx->skip_txfm;
1329 static void encode_b_rt(VP9_COMP *cpi, const TileInfo *const tile,
1330 TOKENEXTRA **tp, int mi_row, int mi_col,
1331 int output_enabled, BLOCK_SIZE bsize,
1332 PICK_MODE_CONTEXT *ctx) {
1335 set_offsets(cpi, tile, mi_row, mi_col, bsize);
1336 update_state_rt(cpi, ctx, mi_row, mi_col, bsize);
1338 #if CONFIG_VP9_TEMPORAL_DENOISING
1339 if (cpi->oxcf.noise_sensitivity > 0 && output_enabled) {
1340 vp9_denoiser_denoise(&cpi->denoiser, &cpi->mb, mi_row, mi_col,
1341 MAX(BLOCK_8X8, bsize), ctx);
1345 encode_superblock(cpi, tp, output_enabled, mi_row, mi_col, bsize, ctx);
1348 (*tp)->token = EOSB_TOKEN;
1352 static void encode_sb_rt(VP9_COMP *cpi, const TileInfo *const tile,
1353 TOKENEXTRA **tp, int mi_row, int mi_col,
1354 int output_enabled, BLOCK_SIZE bsize,
1356 VP9_COMMON *const cm = &cpi->common;
1357 MACROBLOCK *const x = &cpi->mb;
1358 MACROBLOCKD *const xd = &x->e_mbd;
1360 const int bsl = b_width_log2(bsize), hbs = (1 << bsl) / 4;
1362 PARTITION_TYPE partition;
1365 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
1368 if (bsize >= BLOCK_8X8) {
1369 MACROBLOCKD *const xd = &cpi->mb.e_mbd;
1370 const int idx_str = xd->mi_stride * mi_row + mi_col;
1371 MODE_INFO ** mi_8x8 = cm->mi_grid_visible + idx_str;
1372 ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
1373 subsize = mi_8x8[0]->mbmi.sb_type;
1376 subsize = BLOCK_4X4;
1379 partition = partition_lookup[bsl][subsize];
1380 if (output_enabled && bsize != BLOCK_4X4)
1381 cm->counts.partition[ctx][partition]++;
1383 switch (partition) {
1384 case PARTITION_NONE:
1385 encode_b_rt(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize,
1388 case PARTITION_VERT:
1389 encode_b_rt(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize,
1390 &pc_tree->vertical[0]);
1391 if (mi_col + hbs < cm->mi_cols && bsize > BLOCK_8X8) {
1392 encode_b_rt(cpi, tile, tp, mi_row, mi_col + hbs, output_enabled,
1393 subsize, &pc_tree->vertical[1]);
1396 case PARTITION_HORZ:
1397 encode_b_rt(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize,
1398 &pc_tree->horizontal[0]);
1399 if (mi_row + hbs < cm->mi_rows && bsize > BLOCK_8X8) {
1400 encode_b_rt(cpi, tile, tp, mi_row + hbs, mi_col, output_enabled,
1401 subsize, &pc_tree->horizontal[1]);
1404 case PARTITION_SPLIT:
1405 subsize = get_subsize(bsize, PARTITION_SPLIT);
1406 encode_sb_rt(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize,
1408 encode_sb_rt(cpi, tile, tp, mi_row, mi_col + hbs, output_enabled,
1409 subsize, pc_tree->split[1]);
1410 encode_sb_rt(cpi, tile, tp, mi_row + hbs, mi_col, output_enabled,
1411 subsize, pc_tree->split[2]);
1412 encode_sb_rt(cpi, tile, tp, mi_row + hbs, mi_col + hbs, output_enabled,
1413 subsize, pc_tree->split[3]);
1416 assert("Invalid partition type.");
1420 if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8)
1421 update_partition_context(xd, mi_row, mi_col, subsize, bsize);
1424 static void rd_use_partition(VP9_COMP *cpi,
1425 const TileInfo *const tile,
1427 TOKENEXTRA **tp, int mi_row, int mi_col,
1428 BLOCK_SIZE bsize, int *rate, int64_t *dist,
1429 int do_recon, PC_TREE *pc_tree) {
1430 VP9_COMMON *const cm = &cpi->common;
1431 MACROBLOCK *const x = &cpi->mb;
1432 MACROBLOCKD *const xd = &x->e_mbd;
1433 const int mis = cm->mi_stride;
1434 const int bsl = b_width_log2(bsize);
1435 const int mi_step = num_4x4_blocks_wide_lookup[bsize] / 2;
1436 const int bss = (1 << bsl) / 4;
1438 PARTITION_TYPE partition = PARTITION_NONE;
1440 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
1441 PARTITION_CONTEXT sl[8], sa[8];
1442 int last_part_rate = INT_MAX;
1443 int64_t last_part_dist = INT64_MAX;
1444 int64_t last_part_rd = INT64_MAX;
1445 int none_rate = INT_MAX;
1446 int64_t none_dist = INT64_MAX;
1447 int64_t none_rd = INT64_MAX;
1448 int chosen_rate = INT_MAX;
1449 int64_t chosen_dist = INT64_MAX;
1450 int64_t chosen_rd = INT64_MAX;
1451 BLOCK_SIZE sub_subsize = BLOCK_4X4;
1452 int splits_below = 0;
1453 BLOCK_SIZE bs_type = mi_8x8[0]->mbmi.sb_type;
1454 int do_partition_search = 1;
1455 PICK_MODE_CONTEXT *ctx = &pc_tree->none;
1457 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
1460 assert(num_4x4_blocks_wide_lookup[bsize] ==
1461 num_4x4_blocks_high_lookup[bsize]);
1463 partition = partition_lookup[bsl][bs_type];
1464 subsize = get_subsize(bsize, partition);
1466 pc_tree->partitioning = partition;
1467 save_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
1469 if (bsize == BLOCK_16X16 && cpi->oxcf.aq_mode) {
1470 set_offsets(cpi, tile, mi_row, mi_col, bsize);
1471 x->mb_energy = vp9_block_energy(cpi, x, bsize);
1474 if (do_partition_search &&
1475 cpi->sf.partition_search_type == SEARCH_PARTITION &&
1476 cpi->sf.adjust_partitioning_from_last_frame) {
1477 // Check if any of the sub blocks are further split.
1478 if (partition == PARTITION_SPLIT && subsize > BLOCK_8X8) {
1479 sub_subsize = get_subsize(subsize, PARTITION_SPLIT);
1481 for (i = 0; i < 4; i++) {
1482 int jj = i >> 1, ii = i & 0x01;
1483 MODE_INFO * this_mi = mi_8x8[jj * bss * mis + ii * bss];
1484 if (this_mi && this_mi->mbmi.sb_type >= sub_subsize) {
1490 // If partition is not none try none unless each of the 4 splits are split
1492 if (partition != PARTITION_NONE && !splits_below &&
1493 mi_row + (mi_step >> 1) < cm->mi_rows &&
1494 mi_col + (mi_step >> 1) < cm->mi_cols) {
1495 pc_tree->partitioning = PARTITION_NONE;
1496 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &none_rate, &none_dist, bsize,
1499 pl = partition_plane_context(xd, mi_row, mi_col, bsize);
1501 if (none_rate < INT_MAX) {
1502 none_rate += cpi->partition_cost[pl][PARTITION_NONE];
1503 none_rd = RDCOST(x->rdmult, x->rddiv, none_rate, none_dist);
1506 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
1507 mi_8x8[0]->mbmi.sb_type = bs_type;
1508 pc_tree->partitioning = partition;
1512 switch (partition) {
1513 case PARTITION_NONE:
1514 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &last_part_rate,
1515 &last_part_dist, bsize, ctx, INT64_MAX, 0);
1517 case PARTITION_HORZ:
1518 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &last_part_rate,
1519 &last_part_dist, subsize, &pc_tree->horizontal[0],
1521 if (last_part_rate != INT_MAX &&
1522 bsize >= BLOCK_8X8 && mi_row + (mi_step >> 1) < cm->mi_rows) {
1525 PICK_MODE_CONTEXT *ctx = &pc_tree->horizontal[0];
1526 update_state(cpi, ctx, mi_row, mi_col, subsize, 0);
1527 encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize, ctx);
1528 rd_pick_sb_modes(cpi, tile, mi_row + (mi_step >> 1), mi_col, &rt, &dt,
1529 subsize, &pc_tree->horizontal[1], INT64_MAX, 1);
1530 if (rt == INT_MAX || dt == INT64_MAX) {
1531 last_part_rate = INT_MAX;
1532 last_part_dist = INT64_MAX;
1536 last_part_rate += rt;
1537 last_part_dist += dt;
1540 case PARTITION_VERT:
1541 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &last_part_rate,
1542 &last_part_dist, subsize, &pc_tree->vertical[0],
1544 if (last_part_rate != INT_MAX &&
1545 bsize >= BLOCK_8X8 && mi_col + (mi_step >> 1) < cm->mi_cols) {
1548 PICK_MODE_CONTEXT *ctx = &pc_tree->vertical[0];
1549 update_state(cpi, ctx, mi_row, mi_col, subsize, 0);
1550 encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize, ctx);
1551 rd_pick_sb_modes(cpi, tile, mi_row, mi_col + (mi_step >> 1), &rt, &dt,
1552 subsize, &pc_tree->vertical[bsize > BLOCK_8X8],
1554 if (rt == INT_MAX || dt == INT64_MAX) {
1555 last_part_rate = INT_MAX;
1556 last_part_dist = INT64_MAX;
1559 last_part_rate += rt;
1560 last_part_dist += dt;
1563 case PARTITION_SPLIT:
1564 if (bsize == BLOCK_8X8) {
1565 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &last_part_rate,
1566 &last_part_dist, subsize, pc_tree->leaf_split[0],
1572 for (i = 0; i < 4; i++) {
1573 int x_idx = (i & 1) * (mi_step >> 1);
1574 int y_idx = (i >> 1) * (mi_step >> 1);
1575 int jj = i >> 1, ii = i & 0x01;
1579 if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols))
1582 rd_use_partition(cpi, tile, mi_8x8 + jj * bss * mis + ii * bss, tp,
1583 mi_row + y_idx, mi_col + x_idx, subsize, &rt, &dt,
1584 i != 3, pc_tree->split[i]);
1585 if (rt == INT_MAX || dt == INT64_MAX) {
1586 last_part_rate = INT_MAX;
1587 last_part_dist = INT64_MAX;
1590 last_part_rate += rt;
1591 last_part_dist += dt;
1599 pl = partition_plane_context(xd, mi_row, mi_col, bsize);
1600 if (last_part_rate < INT_MAX) {
1601 last_part_rate += cpi->partition_cost[pl][partition];
1602 last_part_rd = RDCOST(x->rdmult, x->rddiv, last_part_rate, last_part_dist);
1605 if (do_partition_search
1606 && cpi->sf.adjust_partitioning_from_last_frame
1607 && cpi->sf.partition_search_type == SEARCH_PARTITION
1608 && partition != PARTITION_SPLIT && bsize > BLOCK_8X8
1609 && (mi_row + mi_step < cm->mi_rows ||
1610 mi_row + (mi_step >> 1) == cm->mi_rows)
1611 && (mi_col + mi_step < cm->mi_cols ||
1612 mi_col + (mi_step >> 1) == cm->mi_cols)) {
1613 BLOCK_SIZE split_subsize = get_subsize(bsize, PARTITION_SPLIT);
1616 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
1617 pc_tree->partitioning = PARTITION_SPLIT;
1620 for (i = 0; i < 4; i++) {
1621 int x_idx = (i & 1) * (mi_step >> 1);
1622 int y_idx = (i >> 1) * (mi_step >> 1);
1625 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
1626 PARTITION_CONTEXT sl[8], sa[8];
1628 if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols))
1631 save_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
1632 pc_tree->split[i]->partitioning = PARTITION_NONE;
1633 rd_pick_sb_modes(cpi, tile, mi_row + y_idx, mi_col + x_idx, &rt, &dt,
1634 split_subsize, &pc_tree->split[i]->none,
1637 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
1639 if (rt == INT_MAX || dt == INT64_MAX) {
1640 chosen_rate = INT_MAX;
1641 chosen_dist = INT64_MAX;
1649 encode_sb(cpi, tile, tp, mi_row + y_idx, mi_col + x_idx, 0,
1650 split_subsize, pc_tree->split[i]);
1652 pl = partition_plane_context(xd, mi_row + y_idx, mi_col + x_idx,
1654 chosen_rate += cpi->partition_cost[pl][PARTITION_NONE];
1656 pl = partition_plane_context(xd, mi_row, mi_col, bsize);
1657 if (chosen_rate < INT_MAX) {
1658 chosen_rate += cpi->partition_cost[pl][PARTITION_SPLIT];
1659 chosen_rd = RDCOST(x->rdmult, x->rddiv, chosen_rate, chosen_dist);
1663 // If last_part is better set the partitioning to that.
1664 if (last_part_rd < chosen_rd) {
1665 mi_8x8[0]->mbmi.sb_type = bsize;
1666 if (bsize >= BLOCK_8X8)
1667 pc_tree->partitioning = partition;
1668 chosen_rate = last_part_rate;
1669 chosen_dist = last_part_dist;
1670 chosen_rd = last_part_rd;
1672 // If none was better set the partitioning to that.
1673 if (none_rd < chosen_rd) {
1674 if (bsize >= BLOCK_8X8)
1675 pc_tree->partitioning = PARTITION_NONE;
1676 chosen_rate = none_rate;
1677 chosen_dist = none_dist;
1680 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
1682 // We must have chosen a partitioning and encoding or we'll fail later on.
1683 // No other opportunities for success.
1684 if ( bsize == BLOCK_64X64)
1685 assert(chosen_rate < INT_MAX && chosen_dist < INT64_MAX);
1688 int output_enabled = (bsize == BLOCK_64X64);
1690 // Check the projected output rate for this SB against it's target
1691 // and and if necessary apply a Q delta using segmentation to get
1692 // closer to the target.
1693 if ((cpi->oxcf.aq_mode == COMPLEXITY_AQ) && cm->seg.update_map) {
1694 vp9_select_in_frame_q_segment(cpi, mi_row, mi_col,
1695 output_enabled, chosen_rate);
1698 if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ)
1699 vp9_cyclic_refresh_set_rate_and_dist_sb(cpi->cyclic_refresh,
1700 chosen_rate, chosen_dist);
1701 encode_sb(cpi, tile, tp, mi_row, mi_col, output_enabled, bsize,
1705 *rate = chosen_rate;
1706 *dist = chosen_dist;
1709 static const BLOCK_SIZE min_partition_size[BLOCK_SIZES] = {
1710 BLOCK_4X4, BLOCK_4X4, BLOCK_4X4,
1711 BLOCK_4X4, BLOCK_4X4, BLOCK_4X4,
1712 BLOCK_8X8, BLOCK_8X8, BLOCK_8X8,
1713 BLOCK_16X16, BLOCK_16X16, BLOCK_16X16,
1717 static const BLOCK_SIZE max_partition_size[BLOCK_SIZES] = {
1718 BLOCK_8X8, BLOCK_16X16, BLOCK_16X16,
1719 BLOCK_16X16, BLOCK_32X32, BLOCK_32X32,
1720 BLOCK_32X32, BLOCK_64X64, BLOCK_64X64,
1721 BLOCK_64X64, BLOCK_64X64, BLOCK_64X64,
1725 // Look at all the mode_info entries for blocks that are part of this
1726 // partition and find the min and max values for sb_type.
1727 // At the moment this is designed to work on a 64x64 SB but could be
1728 // adjusted to use a size parameter.
1730 // The min and max are assumed to have been initialized prior to calling this
1731 // function so repeat calls can accumulate a min and max of more than one sb64.
1732 static void get_sb_partition_size_range(VP9_COMP *cpi, MODE_INFO ** mi_8x8,
1733 BLOCK_SIZE * min_block_size,
1734 BLOCK_SIZE * max_block_size ) {
1735 MACROBLOCKD *const xd = &cpi->mb.e_mbd;
1736 int sb_width_in_blocks = MI_BLOCK_SIZE;
1737 int sb_height_in_blocks = MI_BLOCK_SIZE;
1741 // Check the sb_type for each block that belongs to this region.
1742 for (i = 0; i < sb_height_in_blocks; ++i) {
1743 for (j = 0; j < sb_width_in_blocks; ++j) {
1744 MODE_INFO * mi = mi_8x8[index+j];
1745 BLOCK_SIZE sb_type = mi ? mi->mbmi.sb_type : 0;
1746 *min_block_size = MIN(*min_block_size, sb_type);
1747 *max_block_size = MAX(*max_block_size, sb_type);
1749 index += xd->mi_stride;
1753 // Next square block size less or equal than current block size.
1754 static const BLOCK_SIZE next_square_size[BLOCK_SIZES] = {
1755 BLOCK_4X4, BLOCK_4X4, BLOCK_4X4,
1756 BLOCK_8X8, BLOCK_8X8, BLOCK_8X8,
1757 BLOCK_16X16, BLOCK_16X16, BLOCK_16X16,
1758 BLOCK_32X32, BLOCK_32X32, BLOCK_32X32,
1762 // Look at neighboring blocks and set a min and max partition size based on
1764 static void rd_auto_partition_range(VP9_COMP *cpi, const TileInfo *const tile,
1765 int mi_row, int mi_col,
1766 BLOCK_SIZE *min_block_size,
1767 BLOCK_SIZE *max_block_size) {
1768 VP9_COMMON *const cm = &cpi->common;
1769 MACROBLOCKD *const xd = &cpi->mb.e_mbd;
1770 MODE_INFO **mi = xd->mi;
1771 const int left_in_image = xd->left_available && mi[-1];
1772 const int above_in_image = xd->up_available && mi[-xd->mi_stride];
1773 const int row8x8_remaining = tile->mi_row_end - mi_row;
1774 const int col8x8_remaining = tile->mi_col_end - mi_col;
1776 BLOCK_SIZE min_size = BLOCK_4X4;
1777 BLOCK_SIZE max_size = BLOCK_64X64;
1778 // Trap case where we do not have a prediction.
1779 if (left_in_image || above_in_image || cm->frame_type != KEY_FRAME) {
1780 // Default "min to max" and "max to min"
1781 min_size = BLOCK_64X64;
1782 max_size = BLOCK_4X4;
1784 // NOTE: each call to get_sb_partition_size_range() uses the previous
1785 // passed in values for min and max as a starting point.
1786 // Find the min and max partition used in previous frame at this location
1787 if (cm->frame_type != KEY_FRAME) {
1788 MODE_INFO **const prev_mi =
1789 &cm->prev_mi_grid_visible[mi_row * xd->mi_stride + mi_col];
1790 get_sb_partition_size_range(cpi, prev_mi, &min_size, &max_size);
1792 // Find the min and max partition sizes used in the left SB64
1793 if (left_in_image) {
1794 MODE_INFO **left_sb64_mi = &mi[-MI_BLOCK_SIZE];
1795 get_sb_partition_size_range(cpi, left_sb64_mi, &min_size, &max_size);
1797 // Find the min and max partition sizes used in the above SB64.
1798 if (above_in_image) {
1799 MODE_INFO **above_sb64_mi = &mi[-xd->mi_stride * MI_BLOCK_SIZE];
1800 get_sb_partition_size_range(cpi, above_sb64_mi, &min_size, &max_size);
1802 // adjust observed min and max
1803 if (cpi->sf.auto_min_max_partition_size == RELAXED_NEIGHBORING_MIN_MAX) {
1804 min_size = min_partition_size[min_size];
1805 max_size = max_partition_size[max_size];
1809 // Check border cases where max and min from neighbors may not be legal.
1810 max_size = find_partition_size(max_size,
1811 row8x8_remaining, col8x8_remaining,
1813 min_size = MIN(min_size, max_size);
1815 // When use_square_partition_only is true, make sure at least one square
1816 // partition is allowed by selecting the next smaller square size as
1818 if (cpi->sf.use_square_partition_only &&
1819 next_square_size[max_size] < min_size) {
1820 min_size = next_square_size[max_size];
1822 *min_block_size = min_size;
1823 *max_block_size = max_size;
1826 static void auto_partition_range(VP9_COMP *cpi, const TileInfo *const tile,
1827 int mi_row, int mi_col,
1828 BLOCK_SIZE *min_block_size,
1829 BLOCK_SIZE *max_block_size) {
1830 VP9_COMMON *const cm = &cpi->common;
1831 MACROBLOCKD *const xd = &cpi->mb.e_mbd;
1832 MODE_INFO **mi_8x8 = xd->mi;
1833 const int left_in_image = xd->left_available && mi_8x8[-1];
1834 const int above_in_image = xd->up_available &&
1835 mi_8x8[-xd->mi_stride];
1836 int row8x8_remaining = tile->mi_row_end - mi_row;
1837 int col8x8_remaining = tile->mi_col_end - mi_col;
1839 BLOCK_SIZE min_size = BLOCK_32X32;
1840 BLOCK_SIZE max_size = BLOCK_8X8;
1841 int bsl = mi_width_log2(BLOCK_64X64);
1842 const int search_range_ctrl = (((mi_row + mi_col) >> bsl) +
1843 get_chessboard_index(cm->current_video_frame)) & 0x1;
1844 // Trap case where we do not have a prediction.
1845 if (search_range_ctrl &&
1846 (left_in_image || above_in_image || cm->frame_type != KEY_FRAME)) {
1851 // Find the min and max partition sizes used in the left SB64.
1852 if (left_in_image) {
1855 for (block = 0; block < MI_BLOCK_SIZE; ++block) {
1856 cur_mi = mi[block * xd->mi_stride];
1857 sb_type = cur_mi ? cur_mi->mbmi.sb_type : 0;
1858 min_size = MIN(min_size, sb_type);
1859 max_size = MAX(max_size, sb_type);
1862 // Find the min and max partition sizes used in the above SB64.
1863 if (above_in_image) {
1864 mi = &mi_8x8[-xd->mi_stride * MI_BLOCK_SIZE];
1865 for (block = 0; block < MI_BLOCK_SIZE; ++block) {
1866 sb_type = mi[block] ? mi[block]->mbmi.sb_type : 0;
1867 min_size = MIN(min_size, sb_type);
1868 max_size = MAX(max_size, sb_type);
1872 min_size = min_partition_size[min_size];
1873 max_size = find_partition_size(max_size, row8x8_remaining, col8x8_remaining,
1875 min_size = MIN(min_size, max_size);
1876 min_size = MAX(min_size, BLOCK_8X8);
1877 max_size = MIN(max_size, BLOCK_32X32);
1879 min_size = BLOCK_8X8;
1880 max_size = BLOCK_32X32;
1883 *min_block_size = min_size;
1884 *max_block_size = max_size;
1887 // TODO(jingning) refactor functions setting partition search range
1888 static void set_partition_range(VP9_COMMON *cm, MACROBLOCKD *xd,
1889 int mi_row, int mi_col, BLOCK_SIZE bsize,
1890 BLOCK_SIZE *min_bs, BLOCK_SIZE *max_bs) {
1891 int mi_width = num_8x8_blocks_wide_lookup[bsize];
1892 int mi_height = num_8x8_blocks_high_lookup[bsize];
1896 MODE_INFO **prev_mi =
1897 &cm->prev_mi_grid_visible[mi_row * cm->mi_stride + mi_col];
1898 BLOCK_SIZE bs, min_size, max_size;
1900 min_size = BLOCK_64X64;
1901 max_size = BLOCK_4X4;
1904 for (idy = 0; idy < mi_height; ++idy) {
1905 for (idx = 0; idx < mi_width; ++idx) {
1906 mi = prev_mi[idy * cm->mi_stride + idx];
1907 bs = mi ? mi->mbmi.sb_type : bsize;
1908 min_size = MIN(min_size, bs);
1909 max_size = MAX(max_size, bs);
1914 if (xd->left_available) {
1915 for (idy = 0; idy < mi_height; ++idy) {
1916 mi = xd->mi[idy * cm->mi_stride - 1];
1917 bs = mi ? mi->mbmi.sb_type : bsize;
1918 min_size = MIN(min_size, bs);
1919 max_size = MAX(max_size, bs);
1923 if (xd->up_available) {
1924 for (idx = 0; idx < mi_width; ++idx) {
1925 mi = xd->mi[idx - cm->mi_stride];
1926 bs = mi ? mi->mbmi.sb_type : bsize;
1927 min_size = MIN(min_size, bs);
1928 max_size = MAX(max_size, bs);
1932 if (min_size == max_size) {
1933 min_size = min_partition_size[min_size];
1934 max_size = max_partition_size[max_size];
1941 static INLINE void store_pred_mv(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) {
1942 vpx_memcpy(ctx->pred_mv, x->pred_mv, sizeof(x->pred_mv));
1945 static INLINE void load_pred_mv(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) {
1946 vpx_memcpy(x->pred_mv, ctx->pred_mv, sizeof(x->pred_mv));
1949 #if CONFIG_FP_MB_STATS
1950 const int num_16x16_blocks_wide_lookup[BLOCK_SIZES] =
1951 {1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 4, 4};
1952 const int num_16x16_blocks_high_lookup[BLOCK_SIZES] =
1953 {1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 4, 2, 4};
1954 const int qindex_skip_threshold_lookup[BLOCK_SIZES] =
1955 {0, 10, 10, 30, 40, 40, 60, 80, 80, 90, 100, 100, 120};
1958 // TODO(jingning,jimbankoski,rbultje): properly skip partition types that are
1959 // unlikely to be selected depending on previous rate-distortion optimization
1960 // results, for encoding speed-up.
1961 static void rd_pick_partition(VP9_COMP *cpi, const TileInfo *const tile,
1962 TOKENEXTRA **tp, int mi_row,
1963 int mi_col, BLOCK_SIZE bsize, int *rate,
1964 int64_t *dist, int64_t best_rd,
1966 VP9_COMMON *const cm = &cpi->common;
1967 MACROBLOCK *const x = &cpi->mb;
1968 MACROBLOCKD *const xd = &x->e_mbd;
1969 const int mi_step = num_8x8_blocks_wide_lookup[bsize] / 2;
1970 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
1971 PARTITION_CONTEXT sl[8], sa[8];
1972 TOKENEXTRA *tp_orig = *tp;
1973 PICK_MODE_CONTEXT *ctx = &pc_tree->none;
1976 int this_rate, sum_rate = 0, best_rate = INT_MAX;
1977 int64_t this_dist, sum_dist = 0, best_dist = INT64_MAX;
1979 int do_split = bsize >= BLOCK_8X8;
1981 // Override skipping rectangular partition operations for edge blocks
1982 const int force_horz_split = (mi_row + mi_step >= cm->mi_rows);
1983 const int force_vert_split = (mi_col + mi_step >= cm->mi_cols);
1984 const int xss = x->e_mbd.plane[1].subsampling_x;
1985 const int yss = x->e_mbd.plane[1].subsampling_y;
1987 BLOCK_SIZE min_size = cpi->sf.min_partition_size;
1988 BLOCK_SIZE max_size = cpi->sf.max_partition_size;
1990 int partition_none_allowed = !force_horz_split && !force_vert_split;
1991 int partition_horz_allowed = !force_vert_split && yss <= xss &&
1993 int partition_vert_allowed = !force_horz_split && xss <= yss &&
1997 assert(num_8x8_blocks_wide_lookup[bsize] ==
1998 num_8x8_blocks_high_lookup[bsize]);
2000 set_offsets(cpi, tile, mi_row, mi_col, bsize);
2002 if (bsize == BLOCK_16X16 && cpi->oxcf.aq_mode)
2003 x->mb_energy = vp9_block_energy(cpi, x, bsize);
2005 if (cpi->sf.cb_partition_search && bsize == BLOCK_16X16) {
2006 int cb_partition_search_ctrl = ((pc_tree->index == 0 || pc_tree->index == 3)
2007 + get_chessboard_index(cm->current_video_frame)) & 0x1;
2009 if (cb_partition_search_ctrl && bsize > min_size && bsize < max_size)
2010 set_partition_range(cm, xd, mi_row, mi_col, bsize, &min_size, &max_size);
2013 // Determine partition types in search according to the speed features.
2014 // The threshold set here has to be of square block size.
2015 if (cpi->sf.auto_min_max_partition_size) {
2016 partition_none_allowed &= (bsize <= max_size && bsize >= min_size);
2017 partition_horz_allowed &= ((bsize <= max_size && bsize > min_size) ||
2019 partition_vert_allowed &= ((bsize <= max_size && bsize > min_size) ||
2021 do_split &= bsize > min_size;
2023 if (cpi->sf.use_square_partition_only) {
2024 partition_horz_allowed &= force_horz_split;
2025 partition_vert_allowed &= force_vert_split;
2028 save_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
2030 if (cpi->sf.disable_split_var_thresh && partition_none_allowed) {
2031 unsigned int source_variancey;
2032 vp9_setup_src_planes(x, cpi->Source, mi_row, mi_col);
2033 source_variancey = get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize);
2034 if (source_variancey < cpi->sf.disable_split_var_thresh) {
2036 if (source_variancey < cpi->sf.disable_split_var_thresh / 2)
2042 if (partition_none_allowed) {
2043 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &this_rate, &this_dist, bsize,
2045 if (this_rate != INT_MAX) {
2046 if (bsize >= BLOCK_8X8) {
2047 pl = partition_plane_context(xd, mi_row, mi_col, bsize);
2048 this_rate += cpi->partition_cost[pl][PARTITION_NONE];
2050 sum_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_dist);
2051 if (sum_rd < best_rd) {
2052 int64_t stop_thresh = 4096;
2053 int64_t stop_thresh_rd;
2055 best_rate = this_rate;
2056 best_dist = this_dist;
2058 if (bsize >= BLOCK_8X8)
2059 pc_tree->partitioning = PARTITION_NONE;
2061 // Adjust threshold according to partition size.
2062 stop_thresh >>= 8 - (b_width_log2(bsize) +
2063 b_height_log2(bsize));
2065 stop_thresh_rd = RDCOST(x->rdmult, x->rddiv, 0, stop_thresh);
2066 // If obtained distortion is very small, choose current partition
2067 // and stop splitting.
2068 if (!x->e_mbd.lossless && best_rd < stop_thresh_rd) {
2073 #if CONFIG_FP_MB_STATS
2074 // Check if every 16x16 first pass block statistics has zero
2075 // motion and the corresponding first pass residue is small enough.
2076 // If that is the case, check the difference variance between the
2077 // current frame and the last frame. If the variance is small enough,
2078 // stop further splitting in RD optimization
2079 if (cpi->use_fp_mb_stats && do_split != 0 &&
2080 cm->base_qindex > qindex_skip_threshold_lookup[bsize]) {
2081 VP9_COMMON *cm = &cpi->common;
2082 int mb_row = mi_row >> 1;
2083 int mb_col = mi_col >> 1;
2085 MIN(mb_row + num_16x16_blocks_high_lookup[bsize], cm->mb_rows);
2087 MIN(mb_col + num_16x16_blocks_wide_lookup[bsize], cm->mb_cols);
2091 for (r = mb_row; r < mb_row_end; r++) {
2092 for (c = mb_col; c < mb_col_end; c++) {
2093 const int mb_index = r * cm->mb_cols + c;
2094 if ((cpi->twopass.this_frame_mb_stats[mb_index] &
2095 FPMB_NONZERO_MOTION_MASK) ||
2096 !(cpi->twopass.this_frame_mb_stats[mb_index] &
2097 FPMB_ERROR_LEVEL0_MASK)) {
2108 set_offsets(cpi, tile, mi_row, mi_col, bsize);
2109 var = get_sby_perpixel_diff_variance(cpi, &cpi->mb.plane[0].src,
2110 mi_row, mi_col, bsize);
2120 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
2123 // store estimated motion vector
2124 if (cpi->sf.adaptive_motion_search)
2125 store_pred_mv(x, ctx);
2129 // TODO(jingning): use the motion vectors given by the above search as
2130 // the starting point of motion search in the following partition type check.
2132 subsize = get_subsize(bsize, PARTITION_SPLIT);
2133 if (bsize == BLOCK_8X8) {
2135 if (cpi->sf.adaptive_pred_interp_filter && partition_none_allowed)
2136 pc_tree->leaf_split[0]->pred_interp_filter =
2137 ctx->mic.mbmi.interp_filter;
2138 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &sum_rate, &sum_dist, subsize,
2139 pc_tree->leaf_split[0], best_rd, 0);
2140 if (sum_rate == INT_MAX)
2143 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
2145 for (i = 0; i < 4 && sum_rd < best_rd; ++i) {
2146 const int x_idx = (i & 1) * mi_step;
2147 const int y_idx = (i >> 1) * mi_step;
2149 if (mi_row + y_idx >= cm->mi_rows || mi_col + x_idx >= cm->mi_cols)
2152 if (cpi->sf.adaptive_motion_search)
2153 load_pred_mv(x, ctx);
2155 pc_tree->split[i]->index = i;
2156 rd_pick_partition(cpi, tile, tp, mi_row + y_idx, mi_col + x_idx,
2157 subsize, &this_rate, &this_dist,
2158 best_rd - sum_rd, pc_tree->split[i]);
2160 if (this_rate == INT_MAX) {
2163 sum_rate += this_rate;
2164 sum_dist += this_dist;
2165 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
2170 if (sum_rd < best_rd && i == 4) {
2171 pl = partition_plane_context(xd, mi_row, mi_col, bsize);
2172 sum_rate += cpi->partition_cost[pl][PARTITION_SPLIT];
2173 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
2174 if (sum_rd < best_rd) {
2175 best_rate = sum_rate;
2176 best_dist = sum_dist;
2178 pc_tree->partitioning = PARTITION_SPLIT;
2181 // skip rectangular partition test when larger block size
2182 // gives better rd cost
2183 if (cpi->sf.less_rectangular_check)
2184 do_rect &= !partition_none_allowed;
2186 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
2190 if (partition_horz_allowed && do_rect) {
2191 subsize = get_subsize(bsize, PARTITION_HORZ);
2192 if (cpi->sf.adaptive_motion_search)
2193 load_pred_mv(x, ctx);
2194 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
2195 partition_none_allowed)
2196 pc_tree->horizontal[0].pred_interp_filter =
2197 ctx->mic.mbmi.interp_filter;
2198 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &sum_rate, &sum_dist, subsize,
2199 &pc_tree->horizontal[0], best_rd, 0);
2200 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
2202 if (sum_rd < best_rd && mi_row + mi_step < cm->mi_rows) {
2203 PICK_MODE_CONTEXT *ctx = &pc_tree->horizontal[0];
2204 update_state(cpi, ctx, mi_row, mi_col, subsize, 0);
2205 encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize, ctx);
2207 if (cpi->sf.adaptive_motion_search)
2208 load_pred_mv(x, ctx);
2209 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
2210 partition_none_allowed)
2211 pc_tree->horizontal[1].pred_interp_filter =
2212 ctx->mic.mbmi.interp_filter;
2213 rd_pick_sb_modes(cpi, tile, mi_row + mi_step, mi_col, &this_rate,
2214 &this_dist, subsize, &pc_tree->horizontal[1],
2215 best_rd - sum_rd, 1);
2216 if (this_rate == INT_MAX) {
2219 sum_rate += this_rate;
2220 sum_dist += this_dist;
2221 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
2224 if (sum_rd < best_rd) {
2225 pl = partition_plane_context(xd, mi_row, mi_col, bsize);
2226 sum_rate += cpi->partition_cost[pl][PARTITION_HORZ];
2227 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
2228 if (sum_rd < best_rd) {
2230 best_rate = sum_rate;
2231 best_dist = sum_dist;
2232 pc_tree->partitioning = PARTITION_HORZ;
2235 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
2238 if (partition_vert_allowed && do_rect) {
2239 subsize = get_subsize(bsize, PARTITION_VERT);
2241 if (cpi->sf.adaptive_motion_search)
2242 load_pred_mv(x, ctx);
2243 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
2244 partition_none_allowed)
2245 pc_tree->vertical[0].pred_interp_filter =
2246 ctx->mic.mbmi.interp_filter;
2247 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &sum_rate, &sum_dist, subsize,
2248 &pc_tree->vertical[0], best_rd, 0);
2249 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
2250 if (sum_rd < best_rd && mi_col + mi_step < cm->mi_cols) {
2251 update_state(cpi, &pc_tree->vertical[0], mi_row, mi_col, subsize, 0);
2252 encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize,
2253 &pc_tree->vertical[0]);
2255 if (cpi->sf.adaptive_motion_search)
2256 load_pred_mv(x, ctx);
2257 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
2258 partition_none_allowed)
2259 pc_tree->vertical[1].pred_interp_filter =
2260 ctx->mic.mbmi.interp_filter;
2261 rd_pick_sb_modes(cpi, tile, mi_row, mi_col + mi_step, &this_rate,
2262 &this_dist, subsize,
2263 &pc_tree->vertical[1], best_rd - sum_rd,
2265 if (this_rate == INT_MAX) {
2268 sum_rate += this_rate;
2269 sum_dist += this_dist;
2270 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
2273 if (sum_rd < best_rd) {
2274 pl = partition_plane_context(xd, mi_row, mi_col, bsize);
2275 sum_rate += cpi->partition_cost[pl][PARTITION_VERT];
2276 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
2277 if (sum_rd < best_rd) {
2278 best_rate = sum_rate;
2279 best_dist = sum_dist;
2281 pc_tree->partitioning = PARTITION_VERT;
2284 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
2286 // TODO(jbb): This code added so that we avoid static analysis
2287 // warning related to the fact that best_rd isn't used after this
2288 // point. This code should be refactored so that the duplicate
2289 // checks occur in some sub function and thus are used...
2294 if (best_rate < INT_MAX && best_dist < INT64_MAX && pc_tree->index != 3) {
2295 int output_enabled = (bsize == BLOCK_64X64);
2297 // Check the projected output rate for this SB against it's target
2298 // and and if necessary apply a Q delta using segmentation to get
2299 // closer to the target.
2300 if ((cpi->oxcf.aq_mode == COMPLEXITY_AQ) && cm->seg.update_map)
2301 vp9_select_in_frame_q_segment(cpi, mi_row, mi_col, output_enabled,
2303 if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ)
2304 vp9_cyclic_refresh_set_rate_and_dist_sb(cpi->cyclic_refresh,
2305 best_rate, best_dist);
2307 encode_sb(cpi, tile, tp, mi_row, mi_col, output_enabled, bsize, pc_tree);
2310 if (bsize == BLOCK_64X64) {
2311 assert(tp_orig < *tp);
2312 assert(best_rate < INT_MAX);
2313 assert(best_dist < INT64_MAX);
2315 assert(tp_orig == *tp);
2319 static void encode_rd_sb_row(VP9_COMP *cpi, const TileInfo *const tile,
2320 int mi_row, TOKENEXTRA **tp) {
2321 VP9_COMMON *const cm = &cpi->common;
2322 MACROBLOCKD *const xd = &cpi->mb.e_mbd;
2323 SPEED_FEATURES *const sf = &cpi->sf;
2326 // Initialize the left context for the new SB row
2327 vpx_memset(&xd->left_context, 0, sizeof(xd->left_context));
2328 vpx_memset(xd->left_seg_context, 0, sizeof(xd->left_seg_context));
2330 // Code each SB in the row
2331 for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end;
2332 mi_col += MI_BLOCK_SIZE) {
2338 if (sf->adaptive_pred_interp_filter) {
2339 for (i = 0; i < 64; ++i)
2340 cpi->leaf_tree[i].pred_interp_filter = SWITCHABLE;
2342 for (i = 0; i < 64; ++i) {
2343 cpi->pc_tree[i].vertical[0].pred_interp_filter = SWITCHABLE;
2344 cpi->pc_tree[i].vertical[1].pred_interp_filter = SWITCHABLE;
2345 cpi->pc_tree[i].horizontal[0].pred_interp_filter = SWITCHABLE;
2346 cpi->pc_tree[i].horizontal[1].pred_interp_filter = SWITCHABLE;
2350 vp9_zero(cpi->mb.pred_mv);
2351 cpi->pc_root->index = 0;
2353 if ((sf->partition_search_type == SEARCH_PARTITION &&
2354 sf->use_lastframe_partitioning) ||
2355 sf->partition_search_type == FIXED_PARTITION ||
2356 sf->partition_search_type == VAR_BASED_PARTITION ||
2357 sf->partition_search_type == VAR_BASED_FIXED_PARTITION) {
2358 const int idx_str = cm->mi_stride * mi_row + mi_col;
2359 MODE_INFO **mi = cm->mi_grid_visible + idx_str;
2360 MODE_INFO **prev_mi = cm->prev_mi_grid_visible + idx_str;
2361 cpi->mb.source_variance = UINT_MAX;
2362 if (sf->partition_search_type == FIXED_PARTITION) {
2363 set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64);
2364 set_fixed_partitioning(cpi, tile, mi, mi_row, mi_col,
2365 sf->always_this_block_size);
2366 rd_use_partition(cpi, tile, mi, tp, mi_row, mi_col, BLOCK_64X64,
2367 &dummy_rate, &dummy_dist, 1, cpi->pc_root);
2368 } else if (cpi->skippable_frame ||
2369 sf->partition_search_type == VAR_BASED_FIXED_PARTITION) {
2371 set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64);
2372 bsize = get_rd_var_based_fixed_partition(cpi, mi_row, mi_col);
2373 set_fixed_partitioning(cpi, tile, mi, mi_row, mi_col, bsize);
2374 rd_use_partition(cpi, tile, mi, tp, mi_row, mi_col, BLOCK_64X64,
2375 &dummy_rate, &dummy_dist, 1, cpi->pc_root);
2376 } else if (sf->partition_search_type == VAR_BASED_PARTITION) {
2377 choose_partitioning(cpi, tile, mi_row, mi_col);
2378 rd_use_partition(cpi, tile, mi, tp, mi_row, mi_col, BLOCK_64X64,
2379 &dummy_rate, &dummy_dist, 1, cpi->pc_root);
2381 GF_GROUP * gf_grp = &cpi->twopass.gf_group;
2382 int last_was_mid_sequence_overlay = 0;
2383 if ((cpi->pass == 2) && (gf_grp->index)) {
2384 if (gf_grp->update_type[gf_grp->index - 1] == OVERLAY_UPDATE)
2385 last_was_mid_sequence_overlay = 1;
2387 if ((cm->current_video_frame
2388 % sf->last_partitioning_redo_frequency) == 0
2389 || last_was_mid_sequence_overlay
2391 || cm->show_frame == 0
2392 || cm->frame_type == KEY_FRAME
2393 || cpi->rc.is_src_frame_alt_ref
2394 || ((sf->use_lastframe_partitioning ==
2395 LAST_FRAME_PARTITION_LOW_MOTION) &&
2396 sb_has_motion(cm, prev_mi, sf->lf_motion_threshold))) {
2397 // If required set upper and lower partition size limits
2398 if (sf->auto_min_max_partition_size) {
2399 set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64);
2400 rd_auto_partition_range(cpi, tile, mi_row, mi_col,
2401 &sf->min_partition_size,
2402 &sf->max_partition_size);
2404 rd_pick_partition(cpi, tile, tp, mi_row, mi_col, BLOCK_64X64,
2405 &dummy_rate, &dummy_dist, INT64_MAX,
2408 if (sf->constrain_copy_partition &&
2409 sb_has_motion(cm, prev_mi, sf->lf_motion_threshold))
2410 constrain_copy_partitioning(cpi, tile, mi, prev_mi,
2411 mi_row, mi_col, BLOCK_16X16);
2413 copy_partitioning(cm, mi, prev_mi);
2414 rd_use_partition(cpi, tile, mi, tp, mi_row, mi_col, BLOCK_64X64,
2415 &dummy_rate, &dummy_dist, 1, cpi->pc_root);
2419 // If required set upper and lower partition size limits
2420 if (sf->auto_min_max_partition_size) {
2421 set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64);
2422 rd_auto_partition_range(cpi, tile, mi_row, mi_col,
2423 &sf->min_partition_size,
2424 &sf->max_partition_size);
2426 rd_pick_partition(cpi, tile, tp, mi_row, mi_col, BLOCK_64X64,
2427 &dummy_rate, &dummy_dist, INT64_MAX, cpi->pc_root);
2432 static void init_encode_frame_mb_context(VP9_COMP *cpi) {
2433 MACROBLOCK *const x = &cpi->mb;
2434 VP9_COMMON *const cm = &cpi->common;
2435 MACROBLOCKD *const xd = &x->e_mbd;
2436 const int aligned_mi_cols = mi_cols_aligned_to_sb(cm->mi_cols);
2438 // Copy data over into macro block data structures.
2439 vp9_setup_src_planes(x, cpi->Source, 0, 0);
2441 vp9_setup_block_planes(&x->e_mbd, cm->subsampling_x, cm->subsampling_y);
2443 // Note: this memset assumes above_context[0], [1] and [2]
2444 // are allocated as part of the same buffer.
2445 vpx_memset(xd->above_context[0], 0,
2446 sizeof(*xd->above_context[0]) *
2447 2 * aligned_mi_cols * MAX_MB_PLANE);
2448 vpx_memset(xd->above_seg_context, 0,
2449 sizeof(*xd->above_seg_context) * aligned_mi_cols);
2452 static int check_dual_ref_flags(VP9_COMP *cpi) {
2453 const int ref_flags = cpi->ref_frame_flags;
2455 if (vp9_segfeature_active(&cpi->common.seg, 1, SEG_LVL_REF_FRAME)) {
2458 return (!!(ref_flags & VP9_GOLD_FLAG) + !!(ref_flags & VP9_LAST_FLAG)
2459 + !!(ref_flags & VP9_ALT_FLAG)) >= 2;
2463 static void reset_skip_tx_size(VP9_COMMON *cm, TX_SIZE max_tx_size) {
2465 const int mis = cm->mi_stride;
2466 MODE_INFO **mi_ptr = cm->mi_grid_visible;
2468 for (mi_row = 0; mi_row < cm->mi_rows; ++mi_row, mi_ptr += mis) {
2469 for (mi_col = 0; mi_col < cm->mi_cols; ++mi_col) {
2470 if (mi_ptr[mi_col]->mbmi.tx_size > max_tx_size)
2471 mi_ptr[mi_col]->mbmi.tx_size = max_tx_size;
2476 static MV_REFERENCE_FRAME get_frame_type(const VP9_COMP *cpi) {
2477 if (frame_is_intra_only(&cpi->common))
2479 else if (cpi->rc.is_src_frame_alt_ref && cpi->refresh_golden_frame)
2480 return ALTREF_FRAME;
2481 else if (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)
2482 return GOLDEN_FRAME;
2487 static TX_MODE select_tx_mode(const VP9_COMP *cpi) {
2488 if (cpi->mb.e_mbd.lossless) {
2490 } else if (cpi->common.current_video_frame == 0) {
2491 return TX_MODE_SELECT;
2493 if (cpi->sf.tx_size_search_method == USE_LARGESTALL) {
2495 } else if (cpi->sf.tx_size_search_method == USE_FULL_RD) {
2496 const RD_OPT *const rd_opt = &cpi->rd;
2497 const MV_REFERENCE_FRAME frame_type = get_frame_type(cpi);
2498 return rd_opt->tx_select_threshes[frame_type][ALLOW_32X32] >
2499 rd_opt->tx_select_threshes[frame_type][TX_MODE_SELECT] ?
2500 ALLOW_32X32 : TX_MODE_SELECT;
2501 } else if (cpi->sf.tx_size_search_method == USE_TX_8X8) {
2502 return TX_MODE_SELECT;
2504 unsigned int total = 0;
2506 for (i = 0; i < TX_SIZES; ++i)
2507 total += cpi->tx_stepdown_count[i];
2510 const double fraction = (double)cpi->tx_stepdown_count[0] / total;
2511 return fraction > 0.90 ? ALLOW_32X32 : TX_MODE_SELECT;
2513 return cpi->common.tx_mode;
2519 static void nonrd_pick_sb_modes(VP9_COMP *cpi, const TileInfo *const tile,
2520 int mi_row, int mi_col,
2521 int *rate, int64_t *dist,
2522 BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx) {
2523 VP9_COMMON *const cm = &cpi->common;
2524 MACROBLOCK *const x = &cpi->mb;
2525 MACROBLOCKD *const xd = &x->e_mbd;
2527 set_offsets(cpi, tile, mi_row, mi_col, bsize);
2528 mbmi = &xd->mi[0]->mbmi;
2529 mbmi->sb_type = bsize;
2531 if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled)
2532 if (mbmi->segment_id && x->in_static_area)
2533 x->rdmult = vp9_cyclic_refresh_get_rdmult(cpi->cyclic_refresh);
2535 if (vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP))
2536 set_mode_info_seg_skip(x, cm->tx_mode, rate, dist, bsize);
2538 vp9_pick_inter_mode(cpi, x, tile, mi_row, mi_col, rate, dist, bsize, ctx);
2540 duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, bsize);
2543 static void fill_mode_info_sb(VP9_COMMON *cm, MACROBLOCK *x,
2544 int mi_row, int mi_col,
2545 BLOCK_SIZE bsize, BLOCK_SIZE subsize,
2547 MACROBLOCKD *xd = &x->e_mbd;
2548 int bsl = b_width_log2(bsize), hbs = (1 << bsl) / 4;
2549 PARTITION_TYPE partition = pc_tree->partitioning;
2551 assert(bsize >= BLOCK_8X8);
2553 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
2556 switch (partition) {
2557 case PARTITION_NONE:
2558 set_modeinfo_offsets(cm, xd, mi_row, mi_col);
2559 *(xd->mi[0]) = pc_tree->none.mic;
2560 duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, bsize);
2562 case PARTITION_VERT:
2563 set_modeinfo_offsets(cm, xd, mi_row, mi_col);
2564 *(xd->mi[0]) = pc_tree->vertical[0].mic;
2565 duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, bsize);
2567 if (mi_col + hbs < cm->mi_cols) {
2568 set_modeinfo_offsets(cm, xd, mi_row, mi_col + hbs);
2569 *(xd->mi[0]) = pc_tree->vertical[1].mic;
2570 duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col + hbs, bsize);
2573 case PARTITION_HORZ:
2574 set_modeinfo_offsets(cm, xd, mi_row, mi_col);
2575 *(xd->mi[0]) = pc_tree->horizontal[0].mic;
2576 duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, bsize);
2577 if (mi_row + hbs < cm->mi_rows) {
2578 set_modeinfo_offsets(cm, xd, mi_row + hbs, mi_col);
2579 *(xd->mi[0]) = pc_tree->horizontal[1].mic;
2580 duplicate_mode_info_in_sb(cm, xd, mi_row + hbs, mi_col, bsize);
2583 case PARTITION_SPLIT: {
2584 BLOCK_SIZE subsubsize = get_subsize(subsize, PARTITION_SPLIT);
2585 fill_mode_info_sb(cm, x, mi_row, mi_col, subsize,
2586 subsubsize, pc_tree->split[0]);
2587 fill_mode_info_sb(cm, x, mi_row, mi_col + hbs, subsize,
2588 subsubsize, pc_tree->split[1]);
2589 fill_mode_info_sb(cm, x, mi_row + hbs, mi_col, subsize,
2590 subsubsize, pc_tree->split[2]);
2591 fill_mode_info_sb(cm, x, mi_row + hbs, mi_col + hbs, subsize,
2592 subsubsize, pc_tree->split[3]);
2600 static void nonrd_pick_partition(VP9_COMP *cpi, const TileInfo *const tile,
2601 TOKENEXTRA **tp, int mi_row,
2602 int mi_col, BLOCK_SIZE bsize, int *rate,
2603 int64_t *dist, int do_recon, int64_t best_rd,
2605 VP9_COMMON *const cm = &cpi->common;
2606 MACROBLOCK *const x = &cpi->mb;
2607 MACROBLOCKD *const xd = &x->e_mbd;
2608 const int ms = num_8x8_blocks_wide_lookup[bsize] / 2;
2609 TOKENEXTRA *tp_orig = *tp;
2610 PICK_MODE_CONTEXT *ctx = &pc_tree->none;
2612 BLOCK_SIZE subsize = bsize;
2613 int this_rate, sum_rate = 0, best_rate = INT_MAX;
2614 int64_t this_dist, sum_dist = 0, best_dist = INT64_MAX;
2616 int do_split = bsize >= BLOCK_8X8;
2618 // Override skipping rectangular partition operations for edge blocks
2619 const int force_horz_split = (mi_row + ms >= cm->mi_rows);
2620 const int force_vert_split = (mi_col + ms >= cm->mi_cols);
2621 const int xss = x->e_mbd.plane[1].subsampling_x;
2622 const int yss = x->e_mbd.plane[1].subsampling_y;
2624 int partition_none_allowed = !force_horz_split && !force_vert_split;
2625 int partition_horz_allowed = !force_vert_split && yss <= xss &&
2627 int partition_vert_allowed = !force_horz_split && xss <= yss &&
2631 assert(num_8x8_blocks_wide_lookup[bsize] ==
2632 num_8x8_blocks_high_lookup[bsize]);
2634 // Determine partition types in search according to the speed features.
2635 // The threshold set here has to be of square block size.
2636 if (cpi->sf.auto_min_max_partition_size) {
2637 partition_none_allowed &= (bsize <= cpi->sf.max_partition_size &&
2638 bsize >= cpi->sf.min_partition_size);
2639 partition_horz_allowed &= ((bsize <= cpi->sf.max_partition_size &&
2640 bsize > cpi->sf.min_partition_size) ||
2642 partition_vert_allowed &= ((bsize <= cpi->sf.max_partition_size &&
2643 bsize > cpi->sf.min_partition_size) ||
2645 do_split &= bsize > cpi->sf.min_partition_size;
2647 if (cpi->sf.use_square_partition_only) {
2648 partition_horz_allowed &= force_horz_split;
2649 partition_vert_allowed &= force_vert_split;
2653 if (partition_none_allowed) {
2654 nonrd_pick_sb_modes(cpi, tile, mi_row, mi_col,
2655 &this_rate, &this_dist, bsize, ctx);
2656 ctx->mic.mbmi = xd->mi[0]->mbmi;
2657 ctx->skip_txfm = x->skip_txfm;
2658 ctx->skip = x->skip;
2660 if (this_rate != INT_MAX) {
2661 int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
2662 this_rate += cpi->partition_cost[pl][PARTITION_NONE];
2663 sum_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_dist);
2664 if (sum_rd < best_rd) {
2665 int64_t stop_thresh = 4096;
2666 int64_t stop_thresh_rd;
2668 best_rate = this_rate;
2669 best_dist = this_dist;
2671 if (bsize >= BLOCK_8X8)
2672 pc_tree->partitioning = PARTITION_NONE;
2674 // Adjust threshold according to partition size.
2675 stop_thresh >>= 8 - (b_width_log2(bsize) +
2676 b_height_log2(bsize));
2678 stop_thresh_rd = RDCOST(x->rdmult, x->rddiv, 0, stop_thresh);
2679 // If obtained distortion is very small, choose current partition
2680 // and stop splitting.
2681 if (!x->e_mbd.lossless && best_rd < stop_thresh_rd) {
2689 // store estimated motion vector
2690 store_pred_mv(x, ctx);
2695 int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
2696 sum_rate += cpi->partition_cost[pl][PARTITION_SPLIT];
2697 subsize = get_subsize(bsize, PARTITION_SPLIT);
2698 for (i = 0; i < 4 && sum_rd < best_rd; ++i) {
2699 const int x_idx = (i & 1) * ms;
2700 const int y_idx = (i >> 1) * ms;
2702 if (mi_row + y_idx >= cm->mi_rows || mi_col + x_idx >= cm->mi_cols)
2704 load_pred_mv(x, ctx);
2705 nonrd_pick_partition(cpi, tile, tp, mi_row + y_idx, mi_col + x_idx,
2706 subsize, &this_rate, &this_dist, 0,
2707 best_rd - sum_rd, pc_tree->split[i]);
2709 if (this_rate == INT_MAX) {
2712 sum_rate += this_rate;
2713 sum_dist += this_dist;
2714 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
2718 if (sum_rd < best_rd) {
2719 best_rate = sum_rate;
2720 best_dist = sum_dist;
2722 pc_tree->partitioning = PARTITION_SPLIT;
2724 // skip rectangular partition test when larger block size
2725 // gives better rd cost
2726 if (cpi->sf.less_rectangular_check)
2727 do_rect &= !partition_none_allowed;
2732 if (partition_horz_allowed && do_rect) {
2733 subsize = get_subsize(bsize, PARTITION_HORZ);
2734 if (cpi->sf.adaptive_motion_search)
2735 load_pred_mv(x, ctx);
2737 nonrd_pick_sb_modes(cpi, tile, mi_row, mi_col,
2738 &this_rate, &this_dist, subsize,
2739 &pc_tree->horizontal[0]);
2741 pc_tree->horizontal[0].mic.mbmi = xd->mi[0]->mbmi;
2742 pc_tree->horizontal[0].skip_txfm = x->skip_txfm;
2743 pc_tree->horizontal[0].skip = x->skip;
2745 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
2747 if (sum_rd < best_rd && mi_row + ms < cm->mi_rows) {
2748 load_pred_mv(x, ctx);
2749 nonrd_pick_sb_modes(cpi, tile, mi_row + ms, mi_col,
2750 &this_rate, &this_dist, subsize,
2751 &pc_tree->horizontal[1]);
2753 pc_tree->horizontal[1].mic.mbmi = xd->mi[0]->mbmi;
2754 pc_tree->horizontal[1].skip_txfm = x->skip_txfm;
2755 pc_tree->horizontal[1].skip = x->skip;
2757 if (this_rate == INT_MAX) {
2760 int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
2761 this_rate += cpi->partition_cost[pl][PARTITION_HORZ];
2762 sum_rate += this_rate;
2763 sum_dist += this_dist;
2764 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
2767 if (sum_rd < best_rd) {
2769 best_rate = sum_rate;
2770 best_dist = sum_dist;
2771 pc_tree->partitioning = PARTITION_HORZ;
2776 if (partition_vert_allowed && do_rect) {
2777 subsize = get_subsize(bsize, PARTITION_VERT);
2779 if (cpi->sf.adaptive_motion_search)
2780 load_pred_mv(x, ctx);
2782 nonrd_pick_sb_modes(cpi, tile, mi_row, mi_col,
2783 &this_rate, &this_dist, subsize,
2784 &pc_tree->vertical[0]);
2785 pc_tree->vertical[0].mic.mbmi = xd->mi[0]->mbmi;
2786 pc_tree->vertical[0].skip_txfm = x->skip_txfm;
2787 pc_tree->vertical[0].skip = x->skip;
2788 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
2789 if (sum_rd < best_rd && mi_col + ms < cm->mi_cols) {
2790 load_pred_mv(x, ctx);
2791 nonrd_pick_sb_modes(cpi, tile, mi_row, mi_col + ms,
2792 &this_rate, &this_dist, subsize,
2793 &pc_tree->vertical[1]);
2794 pc_tree->vertical[1].mic.mbmi = xd->mi[0]->mbmi;
2795 pc_tree->vertical[1].skip_txfm = x->skip_txfm;
2796 pc_tree->vertical[1].skip = x->skip;
2797 if (this_rate == INT_MAX) {
2800 int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
2801 this_rate += cpi->partition_cost[pl][PARTITION_VERT];
2802 sum_rate += this_rate;
2803 sum_dist += this_dist;
2804 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
2807 if (sum_rd < best_rd) {
2808 best_rate = sum_rate;
2809 best_dist = sum_dist;
2811 pc_tree->partitioning = PARTITION_VERT;
2814 // TODO(JBB): The following line is here just to avoid a static warning
2815 // that occurs because at this point we never again reuse best_rd
2816 // despite setting it here. The code should be refactored to avoid this.
2822 if (best_rate == INT_MAX)
2825 // update mode info array
2826 subsize = get_subsize(bsize, pc_tree->partitioning);
2827 fill_mode_info_sb(cm, x, mi_row, mi_col, bsize, subsize,
2830 if (best_rate < INT_MAX && best_dist < INT64_MAX && do_recon) {
2831 int output_enabled = (bsize == BLOCK_64X64);
2833 // Check the projected output rate for this SB against it's target
2834 // and and if necessary apply a Q delta using segmentation to get
2835 // closer to the target.
2836 if ((cpi->oxcf.aq_mode == COMPLEXITY_AQ) && cm->seg.update_map) {
2837 vp9_select_in_frame_q_segment(cpi, mi_row, mi_col, output_enabled,
2841 if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ)
2842 vp9_cyclic_refresh_set_rate_and_dist_sb(cpi->cyclic_refresh,
2843 best_rate, best_dist);
2845 encode_sb_rt(cpi, tile, tp, mi_row, mi_col, output_enabled, bsize, pc_tree);
2848 if (bsize == BLOCK_64X64) {
2849 assert(tp_orig < *tp);
2850 assert(best_rate < INT_MAX);
2851 assert(best_dist < INT64_MAX);
2853 assert(tp_orig == *tp);
2857 static void nonrd_use_partition(VP9_COMP *cpi,
2858 const TileInfo *const tile,
2861 int mi_row, int mi_col,
2862 BLOCK_SIZE bsize, int output_enabled,
2863 int *totrate, int64_t *totdist,
2865 VP9_COMMON *const cm = &cpi->common;
2866 MACROBLOCK *const x = &cpi->mb;
2867 MACROBLOCKD *const xd = &x->e_mbd;
2868 const int bsl = b_width_log2(bsize), hbs = (1 << bsl) / 4;
2869 const int mis = cm->mi_stride;
2870 PARTITION_TYPE partition;
2873 int64_t dist = INT64_MAX;
2875 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
2878 subsize = (bsize >= BLOCK_8X8) ? mi[0]->mbmi.sb_type : BLOCK_4X4;
2879 partition = partition_lookup[bsl][subsize];
2881 switch (partition) {
2882 case PARTITION_NONE:
2883 nonrd_pick_sb_modes(cpi, tile, mi_row, mi_col, totrate, totdist,
2884 subsize, &pc_tree->none);
2885 pc_tree->none.mic.mbmi = xd->mi[0]->mbmi;
2886 pc_tree->none.skip_txfm = x->skip_txfm;
2887 pc_tree->none.skip = x->skip;
2889 case PARTITION_VERT:
2890 nonrd_pick_sb_modes(cpi, tile, mi_row, mi_col, totrate, totdist,
2891 subsize, &pc_tree->vertical[0]);
2892 pc_tree->vertical[0].mic.mbmi = xd->mi[0]->mbmi;
2893 pc_tree->vertical[0].skip_txfm = x->skip_txfm;
2894 pc_tree->vertical[0].skip = x->skip;
2895 if (mi_col + hbs < cm->mi_cols) {
2896 nonrd_pick_sb_modes(cpi, tile, mi_row, mi_col + hbs,
2897 &rate, &dist, subsize, &pc_tree->vertical[1]);
2898 pc_tree->vertical[1].mic.mbmi = xd->mi[0]->mbmi;
2899 pc_tree->vertical[1].skip_txfm = x->skip_txfm;
2900 pc_tree->vertical[1].skip = x->skip;
2901 if (rate != INT_MAX && dist != INT64_MAX &&
2902 *totrate != INT_MAX && *totdist != INT64_MAX) {
2908 case PARTITION_HORZ:
2909 nonrd_pick_sb_modes(cpi, tile, mi_row, mi_col, totrate, totdist,
2910 subsize, &pc_tree->horizontal[0]);
2911 pc_tree->horizontal[0].mic.mbmi = xd->mi[0]->mbmi;
2912 pc_tree->horizontal[0].skip_txfm = x->skip_txfm;
2913 pc_tree->horizontal[0].skip = x->skip;
2914 if (mi_row + hbs < cm->mi_rows) {
2915 nonrd_pick_sb_modes(cpi, tile, mi_row + hbs, mi_col,
2916 &rate, &dist, subsize, &pc_tree->horizontal[0]);
2917 pc_tree->horizontal[1].mic.mbmi = xd->mi[0]->mbmi;
2918 pc_tree->horizontal[1].skip_txfm = x->skip_txfm;
2919 pc_tree->horizontal[1].skip = x->skip;
2920 if (rate != INT_MAX && dist != INT64_MAX &&
2921 *totrate != INT_MAX && *totdist != INT64_MAX) {
2927 case PARTITION_SPLIT:
2928 subsize = get_subsize(bsize, PARTITION_SPLIT);
2929 nonrd_use_partition(cpi, tile, mi, tp, mi_row, mi_col,
2930 subsize, output_enabled, totrate, totdist,
2932 nonrd_use_partition(cpi, tile, mi + hbs, tp,
2933 mi_row, mi_col + hbs, subsize, output_enabled,
2934 &rate, &dist, pc_tree->split[1]);
2935 if (rate != INT_MAX && dist != INT64_MAX &&
2936 *totrate != INT_MAX && *totdist != INT64_MAX) {
2940 nonrd_use_partition(cpi, tile, mi + hbs * mis, tp,
2941 mi_row + hbs, mi_col, subsize, output_enabled,
2942 &rate, &dist, pc_tree->split[2]);
2943 if (rate != INT_MAX && dist != INT64_MAX &&
2944 *totrate != INT_MAX && *totdist != INT64_MAX) {
2948 nonrd_use_partition(cpi, tile, mi + hbs * mis + hbs, tp,
2949 mi_row + hbs, mi_col + hbs, subsize, output_enabled,
2950 &rate, &dist, pc_tree->split[3]);
2951 if (rate != INT_MAX && dist != INT64_MAX &&
2952 *totrate != INT_MAX && *totdist != INT64_MAX) {
2958 assert("Invalid partition type.");
2962 if (bsize == BLOCK_64X64 && output_enabled) {
2963 if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ)
2964 vp9_cyclic_refresh_set_rate_and_dist_sb(cpi->cyclic_refresh,
2965 *totrate, *totdist);
2966 encode_sb_rt(cpi, tile, tp, mi_row, mi_col, 1, bsize, pc_tree);
2970 static void encode_nonrd_sb_row(VP9_COMP *cpi, const TileInfo *const tile,
2971 int mi_row, TOKENEXTRA **tp) {
2972 VP9_COMMON *cm = &cpi->common;
2973 MACROBLOCK *x = &cpi->mb;
2974 MACROBLOCKD *xd = &x->e_mbd;
2977 // Initialize the left context for the new SB row
2978 vpx_memset(&xd->left_context, 0, sizeof(xd->left_context));
2979 vpx_memset(xd->left_seg_context, 0, sizeof(xd->left_seg_context));
2981 // Code each SB in the row
2982 for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end;
2983 mi_col += MI_BLOCK_SIZE) {
2984 MACROBLOCK *x = &cpi->mb;
2986 int64_t dummy_dist = 0;
2987 const int idx_str = cm->mi_stride * mi_row + mi_col;
2988 MODE_INFO **mi = cm->mi_grid_visible + idx_str;
2989 MODE_INFO **prev_mi = cm->prev_mi_grid_visible + idx_str;
2992 x->in_static_area = 0;
2993 x->source_variance = UINT_MAX;
2994 vp9_zero(x->pred_mv);
2996 // Set the partition type of the 64X64 block
2997 switch (cpi->sf.partition_search_type) {
2998 case VAR_BASED_PARTITION:
2999 choose_partitioning(cpi, tile, mi_row, mi_col);
3000 nonrd_use_partition(cpi, tile, mi, tp, mi_row, mi_col, BLOCK_64X64,
3001 1, &dummy_rate, &dummy_dist, cpi->pc_root);
3003 case SOURCE_VAR_BASED_PARTITION:
3004 set_source_var_based_partition(cpi, tile, mi, mi_row, mi_col);
3005 nonrd_use_partition(cpi, tile, mi, tp, mi_row, mi_col, BLOCK_64X64,
3006 1, &dummy_rate, &dummy_dist, cpi->pc_root);
3008 case VAR_BASED_FIXED_PARTITION:
3009 case FIXED_PARTITION:
3010 bsize = cpi->sf.partition_search_type == FIXED_PARTITION ?
3011 cpi->sf.always_this_block_size :
3012 get_nonrd_var_based_fixed_partition(cpi, mi_row, mi_col);
3013 set_fixed_partitioning(cpi, tile, mi, mi_row, mi_col, bsize);
3014 nonrd_use_partition(cpi, tile, mi, tp, mi_row, mi_col, BLOCK_64X64,
3015 1, &dummy_rate, &dummy_dist, cpi->pc_root);
3017 case REFERENCE_PARTITION:
3018 if (cpi->sf.partition_check ||
3019 !is_background(cpi, tile, mi_row, mi_col)) {
3020 set_modeinfo_offsets(cm, xd, mi_row, mi_col);
3021 auto_partition_range(cpi, tile, mi_row, mi_col,
3022 &cpi->sf.min_partition_size,
3023 &cpi->sf.max_partition_size);
3024 nonrd_pick_partition(cpi, tile, tp, mi_row, mi_col, BLOCK_64X64,
3025 &dummy_rate, &dummy_dist, 1, INT64_MAX,
3028 copy_partitioning(cm, mi, prev_mi);
3029 nonrd_use_partition(cpi, tile, mi, tp, mi_row, mi_col,
3030 BLOCK_64X64, 1, &dummy_rate, &dummy_dist,
3040 // end RTC play code
3042 static int set_var_thresh_from_histogram(VP9_COMP *cpi) {
3043 SPEED_FEATURES *const sf = &cpi->sf;
3044 VP9_COMMON *const cm = &cpi->common;
3046 const uint8_t *src = cpi->Source->y_buffer;
3047 const uint8_t *last_src = cpi->Last_Source->y_buffer;
3048 const int src_stride = cpi->Source->y_stride;
3049 const int last_stride = cpi->Last_Source->y_stride;
3051 // Pick cutoff threshold
3052 const int cutoff = (MIN(cm->width, cm->height) >= 720) ?
3053 (cm->MBs * VAR_HIST_LARGE_CUT_OFF / 100) :
3054 (cm->MBs * VAR_HIST_SMALL_CUT_OFF / 100);
3055 DECLARE_ALIGNED_ARRAY(16, int, hist, VAR_HIST_BINS);
3056 diff *var16 = cpi->source_diff_var;
3061 vpx_memset(hist, 0, VAR_HIST_BINS * sizeof(hist[0]));
3063 for (i = 0; i < cm->mb_rows; i++) {
3064 for (j = 0; j < cm->mb_cols; j++) {
3065 vp9_get16x16var(src, src_stride, last_src, last_stride,
3066 &var16->sse, &var16->sum);
3068 var16->var = var16->sse -
3069 (((uint32_t)var16->sum * var16->sum) >> 8);
3071 if (var16->var >= VAR_HIST_MAX_BG_VAR)
3072 hist[VAR_HIST_BINS - 1]++;
3074 hist[var16->var / VAR_HIST_FACTOR]++;
3081 src = src - cm->mb_cols * 16 + 16 * src_stride;
3082 last_src = last_src - cm->mb_cols * 16 + 16 * last_stride;
3085 cpi->source_var_thresh = 0;
3087 if (hist[VAR_HIST_BINS - 1] < cutoff) {
3088 for (i = 0; i < VAR_HIST_BINS - 1; i++) {
3092 cpi->source_var_thresh = (i + 1) * VAR_HIST_FACTOR;
3098 return sf->search_type_check_frequency;
3101 static void source_var_based_partition_search_method(VP9_COMP *cpi) {
3102 VP9_COMMON *const cm = &cpi->common;
3103 SPEED_FEATURES *const sf = &cpi->sf;
3105 if (cm->frame_type == KEY_FRAME) {
3106 // For key frame, use SEARCH_PARTITION.
3107 sf->partition_search_type = SEARCH_PARTITION;
3108 } else if (cm->intra_only) {
3109 sf->partition_search_type = FIXED_PARTITION;
3111 if (cm->last_width != cm->width || cm->last_height != cm->height) {
3112 if (cpi->source_diff_var)
3113 vpx_free(cpi->source_diff_var);
3115 CHECK_MEM_ERROR(cm, cpi->source_diff_var,
3116 vpx_calloc(cm->MBs, sizeof(diff)));
3119 if (!cpi->frames_till_next_var_check)
3120 cpi->frames_till_next_var_check = set_var_thresh_from_histogram(cpi);
3122 if (cpi->frames_till_next_var_check > 0) {
3123 sf->partition_search_type = FIXED_PARTITION;
3124 cpi->frames_till_next_var_check--;
3129 static int get_skip_encode_frame(const VP9_COMMON *cm) {
3130 unsigned int intra_count = 0, inter_count = 0;
3133 for (j = 0; j < INTRA_INTER_CONTEXTS; ++j) {
3134 intra_count += cm->counts.intra_inter[j][0];
3135 inter_count += cm->counts.intra_inter[j][1];
3138 return (intra_count << 2) < inter_count &&
3139 cm->frame_type != KEY_FRAME &&
3143 static void encode_tiles(VP9_COMP *cpi) {
3144 const VP9_COMMON *const cm = &cpi->common;
3145 const int tile_cols = 1 << cm->log2_tile_cols;
3146 const int tile_rows = 1 << cm->log2_tile_rows;
3147 int tile_col, tile_row;
3148 TOKENEXTRA *tok = cpi->tok;
3150 for (tile_row = 0; tile_row < tile_rows; ++tile_row) {
3151 for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
3153 TOKENEXTRA *old_tok = tok;
3156 vp9_tile_init(&tile, cm, tile_row, tile_col);
3157 for (mi_row = tile.mi_row_start; mi_row < tile.mi_row_end;
3158 mi_row += MI_BLOCK_SIZE) {
3159 if (cpi->sf.use_nonrd_pick_mode && !frame_is_intra_only(cm))
3160 encode_nonrd_sb_row(cpi, &tile, mi_row, &tok);
3162 encode_rd_sb_row(cpi, &tile, mi_row, &tok);
3164 cpi->tok_count[tile_row][tile_col] = (unsigned int)(tok - old_tok);
3165 assert(tok - cpi->tok <= get_token_alloc(cm->mb_rows, cm->mb_cols));
3170 #if CONFIG_FP_MB_STATS
3171 static int input_fpmb_stats(FIRSTPASS_MB_STATS *firstpass_mb_stats,
3172 VP9_COMMON *cm, uint8_t **this_frame_mb_stats) {
3173 uint8_t *mb_stats_in = firstpass_mb_stats->mb_stats_start +
3174 cm->current_video_frame * cm->MBs * sizeof(uint8_t);
3176 if (mb_stats_in > firstpass_mb_stats->mb_stats_end)
3179 *this_frame_mb_stats = mb_stats_in;
3185 static void encode_frame_internal(VP9_COMP *cpi) {
3186 SPEED_FEATURES *const sf = &cpi->sf;
3187 RD_OPT *const rd_opt = &cpi->rd;
3188 MACROBLOCK *const x = &cpi->mb;
3189 VP9_COMMON *const cm = &cpi->common;
3190 MACROBLOCKD *const xd = &x->e_mbd;
3192 xd->mi = cm->mi_grid_visible;
3195 vp9_zero(cm->counts);
3196 vp9_zero(cpi->coef_counts);
3197 vp9_zero(cpi->tx_stepdown_count);
3198 vp9_zero(rd_opt->comp_pred_diff);
3199 vp9_zero(rd_opt->filter_diff);
3200 vp9_zero(rd_opt->tx_select_diff);
3201 vp9_zero(rd_opt->tx_select_threshes);
3203 cpi->mb.e_mbd.lossless = cm->base_qindex == 0 &&
3204 cm->y_dc_delta_q == 0 &&
3205 cm->uv_dc_delta_q == 0 &&
3206 cm->uv_ac_delta_q == 0;
3208 cm->tx_mode = select_tx_mode(cpi);
3210 cpi->mb.fwd_txm4x4 = cpi->mb.e_mbd.lossless ? vp9_fwht4x4 : vp9_fdct4x4;
3211 cpi->mb.itxm_add = cpi->mb.e_mbd.lossless ? vp9_iwht4x4_add : vp9_idct4x4_add;
3213 if (cpi->mb.e_mbd.lossless) {
3214 cpi->mb.optimize = 0;
3215 cpi->common.lf.filter_level = 0;
3216 cpi->zbin_mode_boost_enabled = 0;
3219 vp9_frame_init_quantizer(cpi);
3221 vp9_initialize_rd_consts(cpi);
3222 vp9_initialize_me_consts(cpi, cm->base_qindex);
3223 init_encode_frame_mb_context(cpi);
3226 x->quant_fp = cpi->sf.use_quant_fp;
3228 if (sf->use_nonrd_pick_mode) {
3229 // Initialize internal buffer pointers for rtc coding, where non-RD
3230 // mode decision is used and hence no buffer pointer swap needed.
3232 struct macroblock_plane *const p = x->plane;
3233 struct macroblockd_plane *const pd = xd->plane;
3234 PICK_MODE_CONTEXT *ctx = &cpi->pc_root->none;
3236 for (i = 0; i < MAX_MB_PLANE; ++i) {
3237 p[i].coeff = ctx->coeff_pbuf[i][0];
3238 p[i].qcoeff = ctx->qcoeff_pbuf[i][0];
3239 pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][0];
3240 p[i].eobs = ctx->eobs_pbuf[i][0];
3242 vp9_zero(x->zcoeff_blk);
3244 if (sf->partition_search_type == SOURCE_VAR_BASED_PARTITION)
3245 source_var_based_partition_search_method(cpi);
3249 struct vpx_usec_timer emr_timer;
3250 vpx_usec_timer_start(&emr_timer);
3252 #if CONFIG_FP_MB_STATS
3253 if (cpi->use_fp_mb_stats) {
3254 input_fpmb_stats(&cpi->twopass.firstpass_mb_stats, cm,
3255 &cpi->twopass.this_frame_mb_stats);
3261 vpx_usec_timer_mark(&emr_timer);
3262 cpi->time_encode_sb_row += vpx_usec_timer_elapsed(&emr_timer);
3265 sf->skip_encode_frame = sf->skip_encode_sb ? get_skip_encode_frame(cm) : 0;
3268 // Keep record of the total distortion this time around for future use
3269 cpi->last_frame_distortion = cpi->frame_distortion;
3273 static INTERP_FILTER get_interp_filter(
3274 const int64_t threshes[SWITCHABLE_FILTER_CONTEXTS], int is_alt_ref) {
3276 threshes[EIGHTTAP_SMOOTH] > threshes[EIGHTTAP] &&
3277 threshes[EIGHTTAP_SMOOTH] > threshes[EIGHTTAP_SHARP] &&
3278 threshes[EIGHTTAP_SMOOTH] > threshes[SWITCHABLE - 1]) {
3279 return EIGHTTAP_SMOOTH;
3280 } else if (threshes[EIGHTTAP_SHARP] > threshes[EIGHTTAP] &&
3281 threshes[EIGHTTAP_SHARP] > threshes[SWITCHABLE - 1]) {
3282 return EIGHTTAP_SHARP;
3283 } else if (threshes[EIGHTTAP] > threshes[SWITCHABLE - 1]) {
3290 void vp9_encode_frame(VP9_COMP *cpi) {
3291 VP9_COMMON *const cm = &cpi->common;
3292 RD_OPT *const rd_opt = &cpi->rd;
3294 // In the longer term the encoder should be generalized to match the
3295 // decoder such that we allow compound where one of the 3 buffers has a
3296 // different sign bias and that buffer is then the fixed ref. However, this
3297 // requires further work in the rd loop. For now the only supported encoder
3298 // side behavior is where the ALT ref buffer has opposite sign bias to
3300 if (!frame_is_intra_only(cm)) {
3301 if ((cm->ref_frame_sign_bias[ALTREF_FRAME] ==
3302 cm->ref_frame_sign_bias[GOLDEN_FRAME]) ||
3303 (cm->ref_frame_sign_bias[ALTREF_FRAME] ==
3304 cm->ref_frame_sign_bias[LAST_FRAME])) {
3305 cm->allow_comp_inter_inter = 0;
3307 cm->allow_comp_inter_inter = 1;
3308 cm->comp_fixed_ref = ALTREF_FRAME;
3309 cm->comp_var_ref[0] = LAST_FRAME;
3310 cm->comp_var_ref[1] = GOLDEN_FRAME;
3314 if (cpi->sf.frame_parameter_update) {
3317 // This code does a single RD pass over the whole frame assuming
3318 // either compound, single or hybrid prediction as per whatever has
3319 // worked best for that type of frame in the past.
3320 // It also predicts whether another coding mode would have worked
3321 // better that this coding mode. If that is the case, it remembers
3322 // that for subsequent frames.
3323 // It does the same analysis for transform size selection also.
3324 const MV_REFERENCE_FRAME frame_type = get_frame_type(cpi);
3325 int64_t *const mode_thrs = rd_opt->prediction_type_threshes[frame_type];
3326 int64_t *const filter_thrs = rd_opt->filter_threshes[frame_type];
3327 int *const tx_thrs = rd_opt->tx_select_threshes[frame_type];
3328 const int is_alt_ref = frame_type == ALTREF_FRAME;
3330 /* prediction (compound, single or hybrid) mode selection */
3331 if (is_alt_ref || !cm->allow_comp_inter_inter)
3332 cm->reference_mode = SINGLE_REFERENCE;
3333 else if (mode_thrs[COMPOUND_REFERENCE] > mode_thrs[SINGLE_REFERENCE] &&
3334 mode_thrs[COMPOUND_REFERENCE] >
3335 mode_thrs[REFERENCE_MODE_SELECT] &&
3336 check_dual_ref_flags(cpi) &&
3337 cpi->static_mb_pct == 100)
3338 cm->reference_mode = COMPOUND_REFERENCE;
3339 else if (mode_thrs[SINGLE_REFERENCE] > mode_thrs[REFERENCE_MODE_SELECT])
3340 cm->reference_mode = SINGLE_REFERENCE;
3342 cm->reference_mode = REFERENCE_MODE_SELECT;
3344 if (cm->interp_filter == SWITCHABLE)
3345 cm->interp_filter = get_interp_filter(filter_thrs, is_alt_ref);
3347 encode_frame_internal(cpi);
3349 for (i = 0; i < REFERENCE_MODES; ++i)
3350 mode_thrs[i] = (mode_thrs[i] + rd_opt->comp_pred_diff[i] / cm->MBs) / 2;
3352 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
3353 filter_thrs[i] = (filter_thrs[i] + rd_opt->filter_diff[i] / cm->MBs) / 2;
3355 for (i = 0; i < TX_MODES; ++i) {
3356 int64_t pd = rd_opt->tx_select_diff[i];
3357 if (i == TX_MODE_SELECT)
3358 pd -= RDCOST(cpi->mb.rdmult, cpi->mb.rddiv, 2048 * (TX_SIZES - 1), 0);
3359 tx_thrs[i] = (tx_thrs[i] + (int)(pd / cm->MBs)) / 2;
3362 if (cm->reference_mode == REFERENCE_MODE_SELECT) {
3363 int single_count_zero = 0;
3364 int comp_count_zero = 0;
3366 for (i = 0; i < COMP_INTER_CONTEXTS; i++) {
3367 single_count_zero += cm->counts.comp_inter[i][0];
3368 comp_count_zero += cm->counts.comp_inter[i][1];
3371 if (comp_count_zero == 0) {
3372 cm->reference_mode = SINGLE_REFERENCE;
3373 vp9_zero(cm->counts.comp_inter);
3374 } else if (single_count_zero == 0) {
3375 cm->reference_mode = COMPOUND_REFERENCE;
3376 vp9_zero(cm->counts.comp_inter);
3380 if (cm->tx_mode == TX_MODE_SELECT) {
3382 int count8x8_lp = 0, count8x8_8x8p = 0;
3383 int count16x16_16x16p = 0, count16x16_lp = 0;
3386 for (i = 0; i < TX_SIZE_CONTEXTS; ++i) {
3387 count4x4 += cm->counts.tx.p32x32[i][TX_4X4];
3388 count4x4 += cm->counts.tx.p16x16[i][TX_4X4];
3389 count4x4 += cm->counts.tx.p8x8[i][TX_4X4];
3391 count8x8_lp += cm->counts.tx.p32x32[i][TX_8X8];
3392 count8x8_lp += cm->counts.tx.p16x16[i][TX_8X8];
3393 count8x8_8x8p += cm->counts.tx.p8x8[i][TX_8X8];
3395 count16x16_16x16p += cm->counts.tx.p16x16[i][TX_16X16];
3396 count16x16_lp += cm->counts.tx.p32x32[i][TX_16X16];
3397 count32x32 += cm->counts.tx.p32x32[i][TX_32X32];
3400 if (count4x4 == 0 && count16x16_lp == 0 && count16x16_16x16p == 0 &&
3402 cm->tx_mode = ALLOW_8X8;
3403 reset_skip_tx_size(cm, TX_8X8);
3404 } else if (count8x8_8x8p == 0 && count16x16_16x16p == 0 &&
3405 count8x8_lp == 0 && count16x16_lp == 0 && count32x32 == 0) {
3406 cm->tx_mode = ONLY_4X4;
3407 reset_skip_tx_size(cm, TX_4X4);
3408 } else if (count8x8_lp == 0 && count16x16_lp == 0 && count4x4 == 0) {
3409 cm->tx_mode = ALLOW_32X32;
3410 } else if (count32x32 == 0 && count8x8_lp == 0 && count4x4 == 0) {
3411 cm->tx_mode = ALLOW_16X16;
3412 reset_skip_tx_size(cm, TX_16X16);
3416 cm->reference_mode = SINGLE_REFERENCE;
3417 encode_frame_internal(cpi);
3421 static void sum_intra_stats(FRAME_COUNTS *counts, const MODE_INFO *mi) {
3422 const PREDICTION_MODE y_mode = mi->mbmi.mode;
3423 const PREDICTION_MODE uv_mode = mi->mbmi.uv_mode;
3424 const BLOCK_SIZE bsize = mi->mbmi.sb_type;
3426 if (bsize < BLOCK_8X8) {
3428 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
3429 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
3430 for (idy = 0; idy < 2; idy += num_4x4_h)
3431 for (idx = 0; idx < 2; idx += num_4x4_w)
3432 ++counts->y_mode[0][mi->bmi[idy * 2 + idx].as_mode];
3434 ++counts->y_mode[size_group_lookup[bsize]][y_mode];
3437 ++counts->uv_mode[y_mode][uv_mode];
3440 static int get_zbin_mode_boost(const MB_MODE_INFO *mbmi, int enabled) {
3442 if (is_inter_block(mbmi)) {
3443 if (mbmi->mode == ZEROMV) {
3444 return mbmi->ref_frame[0] != LAST_FRAME ? GF_ZEROMV_ZBIN_BOOST
3445 : LF_ZEROMV_ZBIN_BOOST;
3447 return mbmi->sb_type < BLOCK_8X8 ? SPLIT_MV_ZBIN_BOOST
3451 return INTRA_ZBIN_BOOST;
3458 static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, int output_enabled,
3459 int mi_row, int mi_col, BLOCK_SIZE bsize,
3460 PICK_MODE_CONTEXT *ctx) {
3461 VP9_COMMON *const cm = &cpi->common;
3462 MACROBLOCK *const x = &cpi->mb;
3463 MACROBLOCKD *const xd = &x->e_mbd;
3464 MODE_INFO **mi_8x8 = xd->mi;
3465 MODE_INFO *mi = mi_8x8[0];
3466 MB_MODE_INFO *mbmi = &mi->mbmi;
3467 unsigned int segment_id = mbmi->segment_id;
3468 const int mis = cm->mi_stride;
3469 const int mi_width = num_8x8_blocks_wide_lookup[bsize];
3470 const int mi_height = num_8x8_blocks_high_lookup[bsize];
3472 x->skip_recode = !x->select_tx_size && mbmi->sb_type >= BLOCK_8X8 &&
3473 cpi->oxcf.aq_mode != COMPLEXITY_AQ &&
3474 cpi->oxcf.aq_mode != CYCLIC_REFRESH_AQ &&
3475 cpi->sf.allow_skip_recode;
3477 x->skip_optimize = ctx->is_coded;
3479 x->use_lp32x32fdct = cpi->sf.use_lp32x32fdct;
3480 x->skip_encode = (!output_enabled && cpi->sf.skip_encode_frame &&
3481 x->q_index < QIDX_SKIP_THRESH);
3486 set_ref_ptrs(cm, xd, mbmi->ref_frame[0], mbmi->ref_frame[1]);
3488 // Experimental code. Special case for gf and arf zeromv modes.
3489 // Increase zbin size to suppress noise
3490 cpi->zbin_mode_boost = get_zbin_mode_boost(mbmi,
3491 cpi->zbin_mode_boost_enabled);
3492 vp9_update_zbin_extra(cpi, x);
3494 if (!is_inter_block(mbmi)) {
3497 for (plane = 0; plane < MAX_MB_PLANE; ++plane)
3498 vp9_encode_intra_block_plane(x, MAX(bsize, BLOCK_8X8), plane);
3500 sum_intra_stats(&cm->counts, mi);
3501 vp9_tokenize_sb(cpi, t, !output_enabled, MAX(bsize, BLOCK_8X8));
3504 const int is_compound = has_second_ref(mbmi);
3505 for (ref = 0; ref < 1 + is_compound; ++ref) {
3506 YV12_BUFFER_CONFIG *cfg = get_ref_frame_buffer(cpi,
3507 mbmi->ref_frame[ref]);
3508 vp9_setup_pre_planes(xd, ref, cfg, mi_row, mi_col,
3509 &xd->block_refs[ref]->sf);
3511 if (!cpi->sf.reuse_inter_pred_sby)
3512 vp9_build_inter_predictors_sby(xd, mi_row, mi_col, MAX(bsize, BLOCK_8X8));
3514 vp9_build_inter_predictors_sbuv(xd, mi_row, mi_col, MAX(bsize, BLOCK_8X8));
3518 vp9_encode_sb(x, MAX(bsize, BLOCK_8X8));
3519 vp9_tokenize_sb(cpi, t, !output_enabled, MAX(bsize, BLOCK_8X8));
3522 if (output_enabled &&
3523 !vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP))
3524 cm->counts.skip[vp9_get_skip_context(xd)][1]++;
3525 reset_skip_context(xd, MAX(bsize, BLOCK_8X8));
3529 if (output_enabled) {
3530 if (cm->tx_mode == TX_MODE_SELECT &&
3531 mbmi->sb_type >= BLOCK_8X8 &&
3532 !(is_inter_block(mbmi) &&
3534 vp9_segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)))) {
3535 ++get_tx_counts(max_txsize_lookup[bsize], vp9_get_tx_size_context(xd),
3536 &cm->counts.tx)[mbmi->tx_size];
3540 // The new intra coding scheme requires no change of transform size
3541 if (is_inter_block(&mi->mbmi)) {
3542 tx_size = MIN(tx_mode_to_biggest_tx_size[cm->tx_mode],
3543 max_txsize_lookup[bsize]);
3545 tx_size = (bsize >= BLOCK_8X8) ? mbmi->tx_size : TX_4X4;
3548 for (y = 0; y < mi_height; y++)
3549 for (x = 0; x < mi_width; x++)
3550 if (mi_col + x < cm->mi_cols && mi_row + y < cm->mi_rows)
3551 mi_8x8[mis * y + x]->mbmi.tx_size = tx_size;