2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
15 #include "./vp9_rtcd.h"
16 #include "./vpx_dsp_rtcd.h"
17 #include "./vpx_config.h"
19 #include "vpx_dsp/vpx_dsp_common.h"
20 #include "vpx_ports/mem.h"
21 #include "vpx_ports/vpx_timer.h"
22 #include "vpx_ports/system_state.h"
24 #include "vp9/common/vp9_common.h"
25 #include "vp9/common/vp9_entropy.h"
26 #include "vp9/common/vp9_entropymode.h"
27 #include "vp9/common/vp9_idct.h"
28 #include "vp9/common/vp9_mvref_common.h"
29 #include "vp9/common/vp9_pred_common.h"
30 #include "vp9/common/vp9_quant_common.h"
31 #include "vp9/common/vp9_reconintra.h"
32 #include "vp9/common/vp9_reconinter.h"
33 #include "vp9/common/vp9_seg_common.h"
34 #include "vp9/common/vp9_tile_common.h"
36 #include "vp9/encoder/vp9_aq_360.h"
37 #include "vp9/encoder/vp9_aq_complexity.h"
38 #include "vp9/encoder/vp9_aq_cyclicrefresh.h"
39 #include "vp9/encoder/vp9_aq_variance.h"
40 #include "vp9/encoder/vp9_encodeframe.h"
41 #include "vp9/encoder/vp9_encodemb.h"
42 #include "vp9/encoder/vp9_encodemv.h"
43 #include "vp9/encoder/vp9_ethread.h"
44 #include "vp9/encoder/vp9_extend.h"
45 #include "vp9/encoder/vp9_pickmode.h"
46 #include "vp9/encoder/vp9_rd.h"
47 #include "vp9/encoder/vp9_rdopt.h"
48 #include "vp9/encoder/vp9_segmentation.h"
49 #include "vp9/encoder/vp9_tokenize.h"
51 static void encode_superblock(VP9_COMP *cpi, ThreadData * td,
52 TOKENEXTRA **t, int output_enabled,
53 int mi_row, int mi_col, BLOCK_SIZE bsize,
54 PICK_MODE_CONTEXT *ctx);
56 // This is used as a reference when computing the source variance for the
57 // purposes of activity masking.
58 // Eventually this should be replaced by custom no-reference routines,
59 // which will be faster.
60 static const uint8_t VP9_VAR_OFFS[64] = {
61 128, 128, 128, 128, 128, 128, 128, 128,
62 128, 128, 128, 128, 128, 128, 128, 128,
63 128, 128, 128, 128, 128, 128, 128, 128,
64 128, 128, 128, 128, 128, 128, 128, 128,
65 128, 128, 128, 128, 128, 128, 128, 128,
66 128, 128, 128, 128, 128, 128, 128, 128,
67 128, 128, 128, 128, 128, 128, 128, 128,
68 128, 128, 128, 128, 128, 128, 128, 128
71 #if CONFIG_VP9_HIGHBITDEPTH
72 static const uint16_t VP9_HIGH_VAR_OFFS_8[64] = {
73 128, 128, 128, 128, 128, 128, 128, 128,
74 128, 128, 128, 128, 128, 128, 128, 128,
75 128, 128, 128, 128, 128, 128, 128, 128,
76 128, 128, 128, 128, 128, 128, 128, 128,
77 128, 128, 128, 128, 128, 128, 128, 128,
78 128, 128, 128, 128, 128, 128, 128, 128,
79 128, 128, 128, 128, 128, 128, 128, 128,
80 128, 128, 128, 128, 128, 128, 128, 128
83 static const uint16_t VP9_HIGH_VAR_OFFS_10[64] = {
84 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
85 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
86 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
87 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
88 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
89 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
90 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
91 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4
94 static const uint16_t VP9_HIGH_VAR_OFFS_12[64] = {
95 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
96 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
97 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
98 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
99 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
100 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
101 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
102 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16
104 #endif // CONFIG_VP9_HIGHBITDEPTH
106 unsigned int vp9_get_sby_perpixel_variance(VP9_COMP *cpi,
107 const struct buf_2d *ref,
110 const unsigned int var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
111 VP9_VAR_OFFS, 0, &sse);
112 return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
115 #if CONFIG_VP9_HIGHBITDEPTH
116 unsigned int vp9_high_get_sby_perpixel_variance(
117 VP9_COMP *cpi, const struct buf_2d *ref, BLOCK_SIZE bs, int bd) {
118 unsigned int var, sse;
121 var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
122 CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_10),
126 var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
127 CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_12),
132 var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
133 CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_8),
137 return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
139 #endif // CONFIG_VP9_HIGHBITDEPTH
141 static unsigned int get_sby_perpixel_diff_variance(VP9_COMP *cpi,
142 const struct buf_2d *ref,
143 int mi_row, int mi_col,
145 unsigned int sse, var;
147 const YV12_BUFFER_CONFIG *last = get_ref_frame_buffer(cpi, LAST_FRAME);
149 assert(last != NULL);
151 &last->y_buffer[mi_row * MI_SIZE * last->y_stride + mi_col * MI_SIZE];
152 var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride, last_y, last->y_stride, &sse);
153 return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
156 static BLOCK_SIZE get_rd_var_based_fixed_partition(VP9_COMP *cpi, MACROBLOCK *x,
159 unsigned int var = get_sby_perpixel_diff_variance(cpi, &x->plane[0].src,
172 // Lighter version of set_offsets that only sets the mode info
174 static INLINE void set_mode_info_offsets(VP9_COMMON *const cm,
176 MACROBLOCKD *const xd,
179 const int idx_str = xd->mi_stride * mi_row + mi_col;
180 xd->mi = cm->mi_grid_visible + idx_str;
181 xd->mi[0] = cm->mi + idx_str;
182 x->mbmi_ext = x->mbmi_ext_base + (mi_row * cm->mi_cols + mi_col);
185 static void set_offsets(VP9_COMP *cpi, const TileInfo *const tile,
186 MACROBLOCK *const x, int mi_row, int mi_col,
188 VP9_COMMON *const cm = &cpi->common;
189 MACROBLOCKD *const xd = &x->e_mbd;
191 const int mi_width = num_8x8_blocks_wide_lookup[bsize];
192 const int mi_height = num_8x8_blocks_high_lookup[bsize];
193 const struct segmentation *const seg = &cm->seg;
195 set_skip_context(xd, mi_row, mi_col);
197 set_mode_info_offsets(cm, x, xd, mi_row, mi_col);
201 // Set up destination pointers.
202 vp9_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
204 // Set up limit values for MV components.
205 // Mv beyond the range do not produce new/different prediction block.
206 x->mv_row_min = -(((mi_row + mi_height) * MI_SIZE) + VP9_INTERP_EXTEND);
207 x->mv_col_min = -(((mi_col + mi_width) * MI_SIZE) + VP9_INTERP_EXTEND);
208 x->mv_row_max = (cm->mi_rows - mi_row) * MI_SIZE + VP9_INTERP_EXTEND;
209 x->mv_col_max = (cm->mi_cols - mi_col) * MI_SIZE + VP9_INTERP_EXTEND;
211 // Set up distance of MB to edge of frame in 1/8th pel units.
212 assert(!(mi_col & (mi_width - 1)) && !(mi_row & (mi_height - 1)));
213 set_mi_row_col(xd, tile, mi_row, mi_height, mi_col, mi_width,
214 cm->mi_rows, cm->mi_cols);
216 // Set up source buffers.
217 vp9_setup_src_planes(x, cpi->Source, mi_row, mi_col);
220 x->rddiv = cpi->rd.RDDIV;
221 x->rdmult = cpi->rd.RDMULT;
225 if (cpi->oxcf.aq_mode != VARIANCE_AQ &&
226 cpi->oxcf.aq_mode != EQUATOR360_AQ) {
227 const uint8_t *const map = seg->update_map ? cpi->segmentation_map
228 : cm->last_frame_seg_map;
229 mi->segment_id = get_segment_id(cm, map, bsize, mi_row, mi_col);
231 vp9_init_plane_quantizers(cpi, x);
233 x->encode_breakout = cpi->segment_encode_breakout[mi->segment_id];
236 x->encode_breakout = cpi->encode_breakout;
239 // required by vp9_append_sub8x8_mvs_for_idx() and vp9_find_best_ref_mvs()
243 static void duplicate_mode_info_in_sb(VP9_COMMON *cm, MACROBLOCKD *xd,
244 int mi_row, int mi_col,
246 const int block_width = num_8x8_blocks_wide_lookup[bsize];
247 const int block_height = num_8x8_blocks_high_lookup[bsize];
249 for (j = 0; j < block_height; ++j)
250 for (i = 0; i < block_width; ++i) {
251 if (mi_row + j < cm->mi_rows && mi_col + i < cm->mi_cols)
252 xd->mi[j * xd->mi_stride + i] = xd->mi[0];
256 static void set_block_size(VP9_COMP * const cpi,
258 MACROBLOCKD *const xd,
259 int mi_row, int mi_col,
261 if (cpi->common.mi_cols > mi_col && cpi->common.mi_rows > mi_row) {
262 set_mode_info_offsets(&cpi->common, x, xd, mi_row, mi_col);
263 xd->mi[0]->sb_type = bsize;
268 int64_t sum_square_error;
278 } partition_variance;
281 partition_variance part_variances;
286 partition_variance part_variances;
291 partition_variance part_variances;
296 partition_variance part_variances;
301 partition_variance part_variances;
306 partition_variance *part_variances;
316 static void tree_to_node(void *data, BLOCK_SIZE bsize, variance_node *node) {
318 node->part_variances = NULL;
321 v64x64 *vt = (v64x64 *) data;
322 node->part_variances = &vt->part_variances;
323 for (i = 0; i < 4; i++)
324 node->split[i] = &vt->split[i].part_variances.none;
328 v32x32 *vt = (v32x32 *) data;
329 node->part_variances = &vt->part_variances;
330 for (i = 0; i < 4; i++)
331 node->split[i] = &vt->split[i].part_variances.none;
335 v16x16 *vt = (v16x16 *) data;
336 node->part_variances = &vt->part_variances;
337 for (i = 0; i < 4; i++)
338 node->split[i] = &vt->split[i].part_variances.none;
342 v8x8 *vt = (v8x8 *) data;
343 node->part_variances = &vt->part_variances;
344 for (i = 0; i < 4; i++)
345 node->split[i] = &vt->split[i].part_variances.none;
349 v4x4 *vt = (v4x4 *) data;
350 node->part_variances = &vt->part_variances;
351 for (i = 0; i < 4; i++)
352 node->split[i] = &vt->split[i];
362 // Set variance values given sum square error, sum error, count.
363 static void fill_variance(int64_t s2, int64_t s, int c, var *v) {
364 v->sum_square_error = s2;
369 static void get_variance(var *v) {
370 v->variance = (int)(256 * (v->sum_square_error -
371 ((v->sum_error * v->sum_error) >> v->log2_count)) >> v->log2_count);
374 static void sum_2_variances(const var *a, const var *b, var *r) {
375 assert(a->log2_count == b->log2_count);
376 fill_variance(a->sum_square_error + b->sum_square_error,
377 a->sum_error + b->sum_error, a->log2_count + 1, r);
380 static void fill_variance_tree(void *data, BLOCK_SIZE bsize) {
382 memset(&node, 0, sizeof(node));
383 tree_to_node(data, bsize, &node);
384 sum_2_variances(node.split[0], node.split[1], &node.part_variances->horz[0]);
385 sum_2_variances(node.split[2], node.split[3], &node.part_variances->horz[1]);
386 sum_2_variances(node.split[0], node.split[2], &node.part_variances->vert[0]);
387 sum_2_variances(node.split[1], node.split[3], &node.part_variances->vert[1]);
388 sum_2_variances(&node.part_variances->vert[0], &node.part_variances->vert[1],
389 &node.part_variances->none);
392 static int set_vt_partitioning(VP9_COMP *cpi,
394 MACROBLOCKD *const xd,
400 BLOCK_SIZE bsize_min,
402 VP9_COMMON * const cm = &cpi->common;
404 const int block_width = num_8x8_blocks_wide_lookup[bsize];
405 const int block_height = num_8x8_blocks_high_lookup[bsize];
407 assert(block_height == block_width);
408 tree_to_node(data, bsize, &vt);
410 if (force_split == 1)
413 // For bsize=bsize_min (16x16/8x8 for 8x8/4x4 downsampling), select if
414 // variance is below threshold, otherwise split will be selected.
415 // No check for vert/horiz split as too few samples for variance.
416 if (bsize == bsize_min) {
417 // Variance already computed to set the force_split.
418 if (cm->frame_type == KEY_FRAME)
419 get_variance(&vt.part_variances->none);
420 if (mi_col + block_width / 2 < cm->mi_cols &&
421 mi_row + block_height / 2 < cm->mi_rows &&
422 vt.part_variances->none.variance < threshold) {
423 set_block_size(cpi, x, xd, mi_row, mi_col, bsize);
427 } else if (bsize > bsize_min) {
428 // Variance already computed to set the force_split.
429 if (cm->frame_type == KEY_FRAME)
430 get_variance(&vt.part_variances->none);
431 // For key frame: take split for bsize above 32X32 or very high variance.
432 if (cm->frame_type == KEY_FRAME &&
433 (bsize > BLOCK_32X32 ||
434 vt.part_variances->none.variance > (threshold << 4))) {
437 // If variance is low, take the bsize (no split).
438 if (mi_col + block_width / 2 < cm->mi_cols &&
439 mi_row + block_height / 2 < cm->mi_rows &&
440 vt.part_variances->none.variance < threshold) {
441 set_block_size(cpi, x, xd, mi_row, mi_col, bsize);
445 // Check vertical split.
446 if (mi_row + block_height / 2 < cm->mi_rows) {
447 BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_VERT);
448 get_variance(&vt.part_variances->vert[0]);
449 get_variance(&vt.part_variances->vert[1]);
450 if (vt.part_variances->vert[0].variance < threshold &&
451 vt.part_variances->vert[1].variance < threshold &&
452 get_plane_block_size(subsize, &xd->plane[1]) < BLOCK_INVALID) {
453 set_block_size(cpi, x, xd, mi_row, mi_col, subsize);
454 set_block_size(cpi, x, xd, mi_row, mi_col + block_width / 2, subsize);
458 // Check horizontal split.
459 if (mi_col + block_width / 2 < cm->mi_cols) {
460 BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_HORZ);
461 get_variance(&vt.part_variances->horz[0]);
462 get_variance(&vt.part_variances->horz[1]);
463 if (vt.part_variances->horz[0].variance < threshold &&
464 vt.part_variances->horz[1].variance < threshold &&
465 get_plane_block_size(subsize, &xd->plane[1]) < BLOCK_INVALID) {
466 set_block_size(cpi, x, xd, mi_row, mi_col, subsize);
467 set_block_size(cpi, x, xd, mi_row + block_height / 2, mi_col, subsize);
477 // Set the variance split thresholds for following the block sizes:
478 // 0 - threshold_64x64, 1 - threshold_32x32, 2 - threshold_16x16,
479 // 3 - vbp_threshold_8x8. vbp_threshold_8x8 (to split to 4x4 partition) is
480 // currently only used on key frame.
481 static void set_vbp_thresholds(VP9_COMP *cpi, int64_t thresholds[], int q) {
482 VP9_COMMON *const cm = &cpi->common;
483 const int is_key_frame = (cm->frame_type == KEY_FRAME);
484 const int threshold_multiplier = is_key_frame ? 20 : 1;
485 int64_t threshold_base = (int64_t)(threshold_multiplier *
486 cpi->y_dequant[q][1]);
488 thresholds[0] = threshold_base;
489 thresholds[1] = threshold_base >> 2;
490 thresholds[2] = threshold_base >> 2;
491 thresholds[3] = threshold_base << 2;
493 // Increase base variance threshold based on estimated noise level.
494 if (cpi->noise_estimate.enabled) {
495 NOISE_LEVEL noise_level = vp9_noise_estimate_extract_level(
496 &cpi->noise_estimate);
497 if (noise_level == kHigh)
498 threshold_base = 3 * threshold_base;
499 else if (noise_level == kMedium)
500 threshold_base = threshold_base << 1;
501 else if (noise_level < kLow)
502 threshold_base = (7 * threshold_base) >> 3;
504 if (cm->width <= 352 && cm->height <= 288) {
505 thresholds[0] = threshold_base >> 3;
506 thresholds[1] = threshold_base >> 1;
507 thresholds[2] = threshold_base << 3;
509 thresholds[0] = threshold_base;
510 thresholds[1] = (5 * threshold_base) >> 2;
511 if (cm->width >= 1920 && cm->height >= 1080)
512 thresholds[1] = (7 * threshold_base) >> 2;
513 thresholds[2] = threshold_base << cpi->oxcf.speed;
518 void vp9_set_variance_partition_thresholds(VP9_COMP *cpi, int q) {
519 VP9_COMMON *const cm = &cpi->common;
520 SPEED_FEATURES *const sf = &cpi->sf;
521 const int is_key_frame = (cm->frame_type == KEY_FRAME);
522 if (sf->partition_search_type != VAR_BASED_PARTITION &&
523 sf->partition_search_type != REFERENCE_PARTITION) {
526 set_vbp_thresholds(cpi, cpi->vbp_thresholds, q);
527 // The thresholds below are not changed locally.
529 cpi->vbp_threshold_sad = 0;
530 cpi->vbp_bsize_min = BLOCK_8X8;
532 if (cm->width <= 352 && cm->height <= 288)
533 cpi->vbp_threshold_sad = 10;
535 cpi->vbp_threshold_sad = (cpi->y_dequant[q][1] << 1) > 1000 ?
536 (cpi->y_dequant[q][1] << 1) : 1000;
537 cpi->vbp_bsize_min = BLOCK_16X16;
539 cpi->vbp_threshold_minmax = 15 + (q >> 3);
543 // Compute the minmax over the 8x8 subblocks.
544 static int compute_minmax_8x8(const uint8_t *s, int sp, const uint8_t *d,
545 int dp, int x16_idx, int y16_idx,
546 #if CONFIG_VP9_HIGHBITDEPTH
553 int minmax_min = 255;
554 // Loop over the 4 8x8 subblocks.
555 for (k = 0; k < 4; k++) {
556 int x8_idx = x16_idx + ((k & 1) << 3);
557 int y8_idx = y16_idx + ((k >> 1) << 3);
560 if (x8_idx < pixels_wide && y8_idx < pixels_high) {
561 #if CONFIG_VP9_HIGHBITDEPTH
562 if (highbd_flag & YV12_FLAG_HIGHBITDEPTH) {
563 vpx_highbd_minmax_8x8(s + y8_idx * sp + x8_idx, sp,
564 d + y8_idx * dp + x8_idx, dp,
567 vpx_minmax_8x8(s + y8_idx * sp + x8_idx, sp,
568 d + y8_idx * dp + x8_idx, dp,
572 vpx_minmax_8x8(s + y8_idx * sp + x8_idx, sp,
573 d + y8_idx * dp + x8_idx, dp,
576 if ((max - min) > minmax_max)
577 minmax_max = (max - min);
578 if ((max - min) < minmax_min)
579 minmax_min = (max - min);
582 return (minmax_max - minmax_min);
585 static void fill_variance_4x4avg(const uint8_t *s, int sp, const uint8_t *d,
586 int dp, int x8_idx, int y8_idx, v8x8 *vst,
587 #if CONFIG_VP9_HIGHBITDEPTH
594 for (k = 0; k < 4; k++) {
595 int x4_idx = x8_idx + ((k & 1) << 2);
596 int y4_idx = y8_idx + ((k >> 1) << 2);
597 unsigned int sse = 0;
599 if (x4_idx < pixels_wide && y4_idx < pixels_high) {
602 #if CONFIG_VP9_HIGHBITDEPTH
603 if (highbd_flag & YV12_FLAG_HIGHBITDEPTH) {
604 s_avg = vpx_highbd_avg_4x4(s + y4_idx * sp + x4_idx, sp);
606 d_avg = vpx_highbd_avg_4x4(d + y4_idx * dp + x4_idx, dp);
608 s_avg = vpx_avg_4x4(s + y4_idx * sp + x4_idx, sp);
610 d_avg = vpx_avg_4x4(d + y4_idx * dp + x4_idx, dp);
613 s_avg = vpx_avg_4x4(s + y4_idx * sp + x4_idx, sp);
615 d_avg = vpx_avg_4x4(d + y4_idx * dp + x4_idx, dp);
620 fill_variance(sse, sum, 0, &vst->split[k].part_variances.none);
624 static void fill_variance_8x8avg(const uint8_t *s, int sp, const uint8_t *d,
625 int dp, int x16_idx, int y16_idx, v16x16 *vst,
626 #if CONFIG_VP9_HIGHBITDEPTH
633 for (k = 0; k < 4; k++) {
634 int x8_idx = x16_idx + ((k & 1) << 3);
635 int y8_idx = y16_idx + ((k >> 1) << 3);
636 unsigned int sse = 0;
638 if (x8_idx < pixels_wide && y8_idx < pixels_high) {
641 #if CONFIG_VP9_HIGHBITDEPTH
642 if (highbd_flag & YV12_FLAG_HIGHBITDEPTH) {
643 s_avg = vpx_highbd_avg_8x8(s + y8_idx * sp + x8_idx, sp);
645 d_avg = vpx_highbd_avg_8x8(d + y8_idx * dp + x8_idx, dp);
647 s_avg = vpx_avg_8x8(s + y8_idx * sp + x8_idx, sp);
649 d_avg = vpx_avg_8x8(d + y8_idx * dp + x8_idx, dp);
652 s_avg = vpx_avg_8x8(s + y8_idx * sp + x8_idx, sp);
654 d_avg = vpx_avg_8x8(d + y8_idx * dp + x8_idx, dp);
659 fill_variance(sse, sum, 0, &vst->split[k].part_variances.none);
663 // This function chooses partitioning based on the variance between source and
664 // reconstructed last, where variance is computed for down-sampled inputs.
665 static int choose_partitioning(VP9_COMP *cpi,
666 const TileInfo *const tile,
668 int mi_row, int mi_col) {
669 VP9_COMMON * const cm = &cpi->common;
670 MACROBLOCKD *xd = &x->e_mbd;
681 int pixels_wide = 64, pixels_high = 64;
682 int64_t thresholds[4] = {cpi->vbp_thresholds[0], cpi->vbp_thresholds[1],
683 cpi->vbp_thresholds[2], cpi->vbp_thresholds[3]};
685 // For the variance computation under SVC mode, we treat the frame as key if
686 // the reference (base layer frame) is key frame (i.e., is_key_frame == 1).
687 const int is_key_frame = (cm->frame_type == KEY_FRAME ||
688 (is_one_pass_cbr_svc(cpi) &&
689 cpi->svc.layer_context[cpi->svc.temporal_layer_id].is_key_frame));
690 // Always use 4x4 partition for key frame.
691 const int use_4x4_partition = cm->frame_type == KEY_FRAME;
692 const int low_res = (cm->width <= 352 && cm->height <= 288);
693 int variance4x4downsample[16];
695 int segment_id = CR_SEGMENT_ID_BASE;
696 if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled) {
697 const uint8_t *const map = cm->seg.update_map ? cpi->segmentation_map :
698 cm->last_frame_seg_map;
699 segment_id = get_segment_id(cm, map, BLOCK_64X64, mi_row, mi_col);
701 if (cyclic_refresh_segment_id_boosted(segment_id)) {
702 int q = vp9_get_qindex(&cm->seg, segment_id, cm->base_qindex);
703 set_vbp_thresholds(cpi, thresholds, q);
707 set_offsets(cpi, tile, x, mi_row, mi_col, BLOCK_64X64);
709 if (xd->mb_to_right_edge < 0)
710 pixels_wide += (xd->mb_to_right_edge >> 3);
711 if (xd->mb_to_bottom_edge < 0)
712 pixels_high += (xd->mb_to_bottom_edge >> 3);
714 s = x->plane[0].src.buf;
715 sp = x->plane[0].src.stride;
717 // Index for force_split: 0 for 64x64, 1-4 for 32x32 blocks,
718 // 5-20 for the 16x16 blocks.
722 // In the case of spatial/temporal scalable coding, the assumption here is
723 // that the temporal reference frame will always be of type LAST_FRAME.
724 // TODO(marpan): If that assumption is broken, we need to revisit this code.
725 MODE_INFO *mi = xd->mi[0];
727 const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, LAST_FRAME);
729 const YV12_BUFFER_CONFIG *yv12_g = NULL;
730 unsigned int y_sad, y_sad_g;
731 const BLOCK_SIZE bsize = BLOCK_32X32
732 + (mi_col + 4 < cm->mi_cols) * 2 + (mi_row + 4 < cm->mi_rows);
734 assert(yv12 != NULL);
736 if (!(is_one_pass_cbr_svc(cpi) && cpi->svc.spatial_layer_id)) {
737 // For now, GOLDEN will not be used for non-zero spatial layers, since
738 // it may not be a temporal reference.
739 yv12_g = get_ref_frame_buffer(cpi, GOLDEN_FRAME);
742 if (yv12_g && yv12_g != yv12 &&
743 (cpi->ref_frame_flags & VP9_GOLD_FLAG)) {
744 vp9_setup_pre_planes(xd, 0, yv12_g, mi_row, mi_col,
745 &cm->frame_refs[GOLDEN_FRAME - 1].sf);
746 y_sad_g = cpi->fn_ptr[bsize].sdf(x->plane[0].src.buf,
747 x->plane[0].src.stride,
748 xd->plane[0].pre[0].buf,
749 xd->plane[0].pre[0].stride);
754 vp9_setup_pre_planes(xd, 0, yv12, mi_row, mi_col,
755 &cm->frame_refs[LAST_FRAME - 1].sf);
756 mi->ref_frame[0] = LAST_FRAME;
757 mi->ref_frame[1] = NONE;
758 mi->sb_type = BLOCK_64X64;
759 mi->mv[0].as_int = 0;
760 mi->interp_filter = BILINEAR;
762 y_sad = vp9_int_pro_motion_estimation(cpi, x, bsize, mi_row, mi_col);
763 if (y_sad_g < y_sad) {
764 vp9_setup_pre_planes(xd, 0, yv12_g, mi_row, mi_col,
765 &cm->frame_refs[GOLDEN_FRAME - 1].sf);
766 mi->ref_frame[0] = GOLDEN_FRAME;
767 mi->mv[0].as_int = 0;
770 x->pred_mv[LAST_FRAME] = mi->mv[0].as_mv;
773 vp9_build_inter_predictors_sb(xd, mi_row, mi_col, BLOCK_64X64);
775 // Check if most of the superblock is skin content, and if so, force split
776 // to 32x32. Avoid checking superblocks on/near boundary and avoid low
777 // resolutons for now.
778 // Note superblock may still pick 64X64 if y_sad is very small
779 // (i.e., y_sad < cpi->vbp_threshold_sad) below. For now leave this as is.
781 #if !CONFIG_VP9_HIGHBITDEPTH
782 if (cpi->use_skin_detection && !low_res && (mi_col >= 8 &&
783 mi_col + 8 < cm->mi_cols && mi_row >= 8 && mi_row + 8 < cm->mi_rows)) {
784 int num_16x16_skin = 0;
785 int num_16x16_nonskin = 0;
786 uint8_t *ysignal = x->plane[0].src.buf;
787 uint8_t *usignal = x->plane[1].src.buf;
788 uint8_t *vsignal = x->plane[2].src.buf;
789 int spuv = x->plane[1].src.stride;
790 for (i = 0; i < 4; i++) {
791 for (j = 0; j < 4; j++) {
792 int is_skin = vp9_compute_skin_block(ysignal,
798 num_16x16_skin += is_skin;
799 num_16x16_nonskin += (1 - is_skin);
800 if (num_16x16_nonskin > 3) {
801 // Exit loop if at least 4 of the 16x16 blocks are not skin.
809 ysignal += (sp << 4) - 64;
810 usignal += (spuv << 3) - 32;
811 vsignal += (spuv << 3) - 32;
813 if (num_16x16_skin > 12) {
819 for (i = 1; i <= 2; ++i) {
820 struct macroblock_plane *p = &x->plane[i];
821 struct macroblockd_plane *pd = &xd->plane[i];
822 const BLOCK_SIZE bs = get_plane_block_size(bsize, pd);
824 if (bs == BLOCK_INVALID)
827 uv_sad = cpi->fn_ptr[bs].sdf(p->src.buf, p->src.stride,
828 pd->dst.buf, pd->dst.stride);
830 // TODO(marpan): Investigate if we should lower this threshold if
831 // superblock is detected as skin.
832 x->color_sensitivity[i - 1] = uv_sad > (y_sad >> 2);
835 d = xd->plane[0].dst.buf;
836 dp = xd->plane[0].dst.stride;
838 // If the y_sad is very small, take 64x64 as partition and exit.
839 // Don't check on boosted segment for now, as 64x64 is suppressed there.
840 if (segment_id == CR_SEGMENT_ID_BASE &&
841 y_sad < cpi->vbp_threshold_sad) {
842 const int block_width = num_8x8_blocks_wide_lookup[BLOCK_64X64];
843 const int block_height = num_8x8_blocks_high_lookup[BLOCK_64X64];
844 if (mi_col + block_width / 2 < cm->mi_cols &&
845 mi_row + block_height / 2 < cm->mi_rows) {
846 set_block_size(cpi, x, xd, mi_row, mi_col, BLOCK_64X64);
853 #if CONFIG_VP9_HIGHBITDEPTH
854 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
857 d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_10);
860 d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_12);
864 d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_8);
868 #endif // CONFIG_VP9_HIGHBITDEPTH
871 // Fill in the entire tree of 8x8 (or 4x4 under some conditions) variances
873 for (i = 0; i < 4; i++) {
874 const int x32_idx = ((i & 1) << 5);
875 const int y32_idx = ((i >> 1) << 5);
876 const int i2 = i << 2;
877 force_split[i + 1] = 0;
879 for (j = 0; j < 4; j++) {
880 const int x16_idx = x32_idx + ((j & 1) << 4);
881 const int y16_idx = y32_idx + ((j >> 1) << 4);
882 const int split_index = 5 + i2 + j;
883 v16x16 *vst = &vt.split[i].split[j];
884 force_split[split_index] = 0;
885 variance4x4downsample[i2 + j] = 0;
887 fill_variance_8x8avg(s, sp, d, dp, x16_idx, y16_idx, vst,
888 #if CONFIG_VP9_HIGHBITDEPTH
894 fill_variance_tree(&vt.split[i].split[j], BLOCK_16X16);
895 get_variance(&vt.split[i].split[j].part_variances.none);
896 avg_16x16[i] += vt.split[i].split[j].part_variances.none.variance;
897 if (vt.split[i].split[j].part_variances.none.variance >
899 // 16X16 variance is above threshold for split, so force split to 8x8
900 // for this 16x16 block (this also forces splits for upper levels).
901 force_split[split_index] = 1;
902 force_split[i + 1] = 1;
904 } else if (cpi->oxcf.speed < 8 &&
905 vt.split[i].split[j].part_variances.none.variance >
907 !cyclic_refresh_segment_id_boosted(segment_id)) {
908 // We have some nominal amount of 16x16 variance (based on average),
909 // compute the minmax over the 8x8 sub-blocks, and if above threshold,
910 // force split to 8x8 block for this 16x16 block.
911 int minmax = compute_minmax_8x8(s, sp, d, dp, x16_idx, y16_idx,
912 #if CONFIG_VP9_HIGHBITDEPTH
915 pixels_wide, pixels_high);
916 if (minmax > cpi->vbp_threshold_minmax) {
917 force_split[split_index] = 1;
918 force_split[i + 1] = 1;
923 if (is_key_frame || (low_res &&
924 vt.split[i].split[j].part_variances.none.variance >
925 (thresholds[1] << 1))) {
926 force_split[split_index] = 0;
927 // Go down to 4x4 down-sampling for variance.
928 variance4x4downsample[i2 + j] = 1;
929 for (k = 0; k < 4; k++) {
930 int x8_idx = x16_idx + ((k & 1) << 3);
931 int y8_idx = y16_idx + ((k >> 1) << 3);
932 v8x8 *vst2 = is_key_frame ? &vst->split[k] :
933 &vt2[i2 + j].split[k];
934 fill_variance_4x4avg(s, sp, d, dp, x8_idx, y8_idx, vst2,
935 #if CONFIG_VP9_HIGHBITDEPTH
945 // Fill the rest of the variance tree by summing split partition values.
947 for (i = 0; i < 4; i++) {
948 const int i2 = i << 2;
949 for (j = 0; j < 4; j++) {
950 if (variance4x4downsample[i2 + j] == 1) {
951 v16x16 *vtemp = (!is_key_frame) ? &vt2[i2 + j] :
952 &vt.split[i].split[j];
953 for (m = 0; m < 4; m++)
954 fill_variance_tree(&vtemp->split[m], BLOCK_8X8);
955 fill_variance_tree(vtemp, BLOCK_16X16);
956 // If variance of this 16x16 block is above the threshold, force block
957 // to split. This also forces a split on the upper levels.
958 get_variance(&vtemp->part_variances.none);
959 if (vtemp->part_variances.none.variance > thresholds[2]) {
960 force_split[5 + i2 + j] = 1;
961 force_split[i + 1] = 1;
966 fill_variance_tree(&vt.split[i], BLOCK_32X32);
967 // If variance of this 32x32 block is above the threshold, or if its above
968 // (some threshold of) the average variance over the sub-16x16 blocks, then
969 // force this block to split. This also forces a split on the upper
971 if (!force_split[i + 1]) {
972 get_variance(&vt.split[i].part_variances.none);
973 if (vt.split[i].part_variances.none.variance > thresholds[1] ||
975 vt.split[i].part_variances.none.variance > (thresholds[1] >> 1) &&
976 vt.split[i].part_variances.none.variance > (avg_16x16[i] >> 1))) {
977 force_split[i + 1] = 1;
980 avg_32x32 += vt.split[i].part_variances.none.variance;
983 if (!force_split[0]) {
984 fill_variance_tree(&vt, BLOCK_64X64);
985 get_variance(&vt.part_variances.none);
986 // If variance of this 64x64 block is above (some threshold of) the average
987 // variance over the sub-32x32 blocks, then force this block to split.
989 vt.part_variances.none.variance > (5 * avg_32x32) >> 4)
993 // Now go through the entire structure, splitting every block size until
994 // we get to one that's got a variance lower than our threshold.
995 if ( mi_col + 8 > cm->mi_cols || mi_row + 8 > cm->mi_rows ||
996 !set_vt_partitioning(cpi, x, xd, &vt, BLOCK_64X64, mi_row, mi_col,
997 thresholds[0], BLOCK_16X16, force_split[0])) {
998 for (i = 0; i < 4; ++i) {
999 const int x32_idx = ((i & 1) << 2);
1000 const int y32_idx = ((i >> 1) << 2);
1001 const int i2 = i << 2;
1002 if (!set_vt_partitioning(cpi, x, xd, &vt.split[i], BLOCK_32X32,
1003 (mi_row + y32_idx), (mi_col + x32_idx),
1004 thresholds[1], BLOCK_16X16,
1005 force_split[i + 1])) {
1006 for (j = 0; j < 4; ++j) {
1007 const int x16_idx = ((j & 1) << 1);
1008 const int y16_idx = ((j >> 1) << 1);
1009 // For inter frames: if variance4x4downsample[] == 1 for this 16x16
1010 // block, then the variance is based on 4x4 down-sampling, so use vt2
1011 // in set_vt_partioning(), otherwise use vt.
1012 v16x16 *vtemp = (!is_key_frame &&
1013 variance4x4downsample[i2 + j] == 1) ?
1014 &vt2[i2 + j] : &vt.split[i].split[j];
1015 if (!set_vt_partitioning(cpi, x, xd, vtemp, BLOCK_16X16,
1016 mi_row + y32_idx + y16_idx,
1017 mi_col + x32_idx + x16_idx,
1020 force_split[5 + i2 + j])) {
1021 for (k = 0; k < 4; ++k) {
1022 const int x8_idx = (k & 1);
1023 const int y8_idx = (k >> 1);
1024 if (use_4x4_partition) {
1025 if (!set_vt_partitioning(cpi, x, xd, &vtemp->split[k],
1027 mi_row + y32_idx + y16_idx + y8_idx,
1028 mi_col + x32_idx + x16_idx + x8_idx,
1029 thresholds[3], BLOCK_8X8, 0)) {
1030 set_block_size(cpi, x, xd,
1031 (mi_row + y32_idx + y16_idx + y8_idx),
1032 (mi_col + x32_idx + x16_idx + x8_idx),
1036 set_block_size(cpi, x, xd,
1037 (mi_row + y32_idx + y16_idx + y8_idx),
1038 (mi_col + x32_idx + x16_idx + x8_idx),
1050 static void update_state(VP9_COMP *cpi, ThreadData *td,
1051 PICK_MODE_CONTEXT *ctx,
1052 int mi_row, int mi_col, BLOCK_SIZE bsize,
1053 int output_enabled) {
1055 VP9_COMMON *const cm = &cpi->common;
1056 RD_COUNTS *const rdc = &td->rd_counts;
1057 MACROBLOCK *const x = &td->mb;
1058 MACROBLOCKD *const xd = &x->e_mbd;
1059 struct macroblock_plane *const p = x->plane;
1060 struct macroblockd_plane *const pd = xd->plane;
1061 MODE_INFO *mi = &ctx->mic;
1062 MODE_INFO *const xdmi = xd->mi[0];
1063 MODE_INFO *mi_addr = xd->mi[0];
1064 const struct segmentation *const seg = &cm->seg;
1065 const int bw = num_8x8_blocks_wide_lookup[mi->sb_type];
1066 const int bh = num_8x8_blocks_high_lookup[mi->sb_type];
1067 const int x_mis = VPXMIN(bw, cm->mi_cols - mi_col);
1068 const int y_mis = VPXMIN(bh, cm->mi_rows - mi_row);
1069 MV_REF *const frame_mvs =
1070 cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col;
1073 const int mis = cm->mi_stride;
1074 const int mi_width = num_8x8_blocks_wide_lookup[bsize];
1075 const int mi_height = num_8x8_blocks_high_lookup[bsize];
1078 assert(mi->sb_type == bsize);
1081 *x->mbmi_ext = ctx->mbmi_ext;
1083 // If segmentation in use
1085 // For in frame complexity AQ copy the segment id from the segment map.
1086 if (cpi->oxcf.aq_mode == COMPLEXITY_AQ) {
1087 const uint8_t *const map = seg->update_map ? cpi->segmentation_map
1088 : cm->last_frame_seg_map;
1089 mi_addr->segment_id =
1090 get_segment_id(cm, map, bsize, mi_row, mi_col);
1092 // Else for cyclic refresh mode update the segment map, set the segment id
1093 // and then update the quantizer.
1094 if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) {
1095 vp9_cyclic_refresh_update_segment(cpi, xd->mi[0], mi_row,
1096 mi_col, bsize, ctx->rate, ctx->dist,
1101 max_plane = is_inter_block(xdmi) ? MAX_MB_PLANE : 1;
1102 for (i = 0; i < max_plane; ++i) {
1103 p[i].coeff = ctx->coeff_pbuf[i][1];
1104 p[i].qcoeff = ctx->qcoeff_pbuf[i][1];
1105 pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][1];
1106 p[i].eobs = ctx->eobs_pbuf[i][1];
1109 for (i = max_plane; i < MAX_MB_PLANE; ++i) {
1110 p[i].coeff = ctx->coeff_pbuf[i][2];
1111 p[i].qcoeff = ctx->qcoeff_pbuf[i][2];
1112 pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][2];
1113 p[i].eobs = ctx->eobs_pbuf[i][2];
1116 // Restore the coding context of the MB to that that was in place
1117 // when the mode was picked for it
1118 for (y = 0; y < mi_height; y++)
1119 for (x_idx = 0; x_idx < mi_width; x_idx++)
1120 if ((xd->mb_to_right_edge >> (3 + MI_SIZE_LOG2)) + mi_width > x_idx
1121 && (xd->mb_to_bottom_edge >> (3 + MI_SIZE_LOG2)) + mi_height > y) {
1122 xd->mi[x_idx + y * mis] = mi_addr;
1125 if (cpi->oxcf.aq_mode)
1126 vp9_init_plane_quantizers(cpi, x);
1128 if (is_inter_block(xdmi) && xdmi->sb_type < BLOCK_8X8) {
1129 xdmi->mv[0].as_int = mi->bmi[3].as_mv[0].as_int;
1130 xdmi->mv[1].as_int = mi->bmi[3].as_mv[1].as_int;
1133 x->skip = ctx->skip;
1134 memcpy(x->zcoeff_blk[xdmi->tx_size], ctx->zcoeff_blk,
1135 sizeof(ctx->zcoeff_blk[0]) * ctx->num_4x4_blk);
1137 if (!output_enabled)
1140 #if CONFIG_INTERNAL_STATS
1141 if (frame_is_intra_only(cm)) {
1142 static const int kf_mode_index[] = {
1144 THR_V_PRED /*V_PRED*/,
1145 THR_H_PRED /*H_PRED*/,
1146 THR_D45_PRED /*D45_PRED*/,
1147 THR_D135_PRED /*D135_PRED*/,
1148 THR_D117_PRED /*D117_PRED*/,
1149 THR_D153_PRED /*D153_PRED*/,
1150 THR_D207_PRED /*D207_PRED*/,
1151 THR_D63_PRED /*D63_PRED*/,
1154 ++cpi->mode_chosen_counts[kf_mode_index[xdmi->mode]];
1156 // Note how often each mode chosen as best
1157 ++cpi->mode_chosen_counts[ctx->best_mode_index];
1160 if (!frame_is_intra_only(cm)) {
1161 if (is_inter_block(xdmi)) {
1162 vp9_update_mv_count(td);
1164 if (cm->interp_filter == SWITCHABLE) {
1165 const int ctx = vp9_get_pred_context_switchable_interp(xd);
1166 ++td->counts->switchable_interp[ctx][xdmi->interp_filter];
1170 rdc->comp_pred_diff[SINGLE_REFERENCE] += ctx->single_pred_diff;
1171 rdc->comp_pred_diff[COMPOUND_REFERENCE] += ctx->comp_pred_diff;
1172 rdc->comp_pred_diff[REFERENCE_MODE_SELECT] += ctx->hybrid_pred_diff;
1174 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
1175 rdc->filter_diff[i] += ctx->best_filter_diff[i];
1178 for (h = 0; h < y_mis; ++h) {
1179 MV_REF *const frame_mv = frame_mvs + h * cm->mi_cols;
1180 for (w = 0; w < x_mis; ++w) {
1181 MV_REF *const mv = frame_mv + w;
1182 mv->ref_frame[0] = mi->ref_frame[0];
1183 mv->ref_frame[1] = mi->ref_frame[1];
1184 mv->mv[0].as_int = mi->mv[0].as_int;
1185 mv->mv[1].as_int = mi->mv[1].as_int;
1190 void vp9_setup_src_planes(MACROBLOCK *x, const YV12_BUFFER_CONFIG *src,
1191 int mi_row, int mi_col) {
1192 uint8_t *const buffers[3] = {src->y_buffer, src->u_buffer, src->v_buffer };
1193 const int strides[3] = {src->y_stride, src->uv_stride, src->uv_stride };
1196 // Set current frame pointer.
1197 x->e_mbd.cur_buf = src;
1199 for (i = 0; i < MAX_MB_PLANE; i++)
1200 setup_pred_plane(&x->plane[i].src, buffers[i], strides[i], mi_row, mi_col,
1201 NULL, x->e_mbd.plane[i].subsampling_x,
1202 x->e_mbd.plane[i].subsampling_y);
1205 static void set_mode_info_seg_skip(MACROBLOCK *x, TX_MODE tx_mode,
1206 RD_COST *rd_cost, BLOCK_SIZE bsize) {
1207 MACROBLOCKD *const xd = &x->e_mbd;
1208 MODE_INFO *const mi = xd->mi[0];
1209 INTERP_FILTER filter_ref;
1211 if (xd->up_available)
1212 filter_ref = xd->mi[-xd->mi_stride]->interp_filter;
1213 else if (xd->left_available)
1214 filter_ref = xd->mi[-1]->interp_filter;
1216 filter_ref = EIGHTTAP;
1218 mi->sb_type = bsize;
1221 VPXMIN(max_txsize_lookup[bsize], tx_mode_to_biggest_tx_size[tx_mode]);
1223 mi->uv_mode = DC_PRED;
1224 mi->ref_frame[0] = LAST_FRAME;
1225 mi->ref_frame[1] = NONE;
1226 mi->mv[0].as_int = 0;
1227 mi->interp_filter = filter_ref;
1229 xd->mi[0]->bmi[0].as_mv[0].as_int = 0;
1232 vp9_rd_cost_init(rd_cost);
1235 static int set_segment_rdmult(VP9_COMP *const cpi,
1236 MACROBLOCK *const x,
1237 int8_t segment_id) {
1239 VP9_COMMON *const cm = &cpi->common;
1240 vp9_init_plane_quantizers(cpi, x);
1241 vpx_clear_system_state();
1242 segment_qindex = vp9_get_qindex(&cm->seg, segment_id,
1244 return vp9_compute_rd_mult(cpi, segment_qindex + cm->y_dc_delta_q);
1247 static void rd_pick_sb_modes(VP9_COMP *cpi,
1248 TileDataEnc *tile_data,
1249 MACROBLOCK *const x,
1250 int mi_row, int mi_col, RD_COST *rd_cost,
1251 BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
1253 VP9_COMMON *const cm = &cpi->common;
1254 TileInfo *const tile_info = &tile_data->tile_info;
1255 MACROBLOCKD *const xd = &x->e_mbd;
1257 struct macroblock_plane *const p = x->plane;
1258 struct macroblockd_plane *const pd = xd->plane;
1259 const AQ_MODE aq_mode = cpi->oxcf.aq_mode;
1262 vpx_clear_system_state();
1264 // Use the lower precision, but faster, 32x32 fdct for mode selection.
1265 x->use_lp32x32fdct = 1;
1267 set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
1269 mi->sb_type = bsize;
1271 for (i = 0; i < MAX_MB_PLANE; ++i) {
1272 p[i].coeff = ctx->coeff_pbuf[i][0];
1273 p[i].qcoeff = ctx->qcoeff_pbuf[i][0];
1274 pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][0];
1275 p[i].eobs = ctx->eobs_pbuf[i][0];
1279 ctx->pred_pixel_ready = 0;
1282 // Set to zero to make sure we do not use the previous encoded frame stats
1285 #if CONFIG_VP9_HIGHBITDEPTH
1286 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
1287 x->source_variance =
1288 vp9_high_get_sby_perpixel_variance(cpi, &x->plane[0].src,
1291 x->source_variance =
1292 vp9_get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize);
1295 x->source_variance =
1296 vp9_get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize);
1297 #endif // CONFIG_VP9_HIGHBITDEPTH
1299 // Save rdmult before it might be changed, so it can be restored later.
1300 orig_rdmult = x->rdmult;
1302 if (aq_mode == VARIANCE_AQ) {
1303 const int energy = bsize <= BLOCK_16X16 ? x->mb_energy
1304 : vp9_block_energy(cpi, x, bsize);
1305 if (cm->frame_type == KEY_FRAME ||
1306 cpi->refresh_alt_ref_frame ||
1307 (cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref)) {
1308 mi->segment_id = vp9_vaq_segment_id(energy);
1310 const uint8_t *const map = cm->seg.update_map ? cpi->segmentation_map
1311 : cm->last_frame_seg_map;
1312 mi->segment_id = get_segment_id(cm, map, bsize, mi_row, mi_col);
1314 x->rdmult = set_segment_rdmult(cpi, x, mi->segment_id);
1315 } else if (aq_mode == EQUATOR360_AQ) {
1316 if (cm->frame_type == KEY_FRAME) {
1317 mi->segment_id = vp9_360aq_segment_id(mi_row, cm->mi_rows);
1319 const uint8_t *const map = cm->seg.update_map ? cpi->segmentation_map
1320 : cm->last_frame_seg_map;
1321 mi->segment_id = get_segment_id(cm, map, bsize, mi_row, mi_col);
1323 x->rdmult = set_segment_rdmult(cpi, x, mi->segment_id);
1324 } else if (aq_mode == COMPLEXITY_AQ) {
1325 x->rdmult = set_segment_rdmult(cpi, x, mi->segment_id);
1326 } else if (aq_mode == CYCLIC_REFRESH_AQ) {
1327 const uint8_t *const map = cm->seg.update_map ? cpi->segmentation_map
1328 : cm->last_frame_seg_map;
1329 // If segment is boosted, use rdmult for that segment.
1330 if (cyclic_refresh_segment_id_boosted(
1331 get_segment_id(cm, map, bsize, mi_row, mi_col)))
1332 x->rdmult = vp9_cyclic_refresh_get_rdmult(cpi->cyclic_refresh);
1335 // Find best coding mode & reconstruct the MB so it is available
1336 // as a predictor for MBs that follow in the SB
1337 if (frame_is_intra_only(cm)) {
1338 vp9_rd_pick_intra_mode_sb(cpi, x, rd_cost, bsize, ctx, best_rd);
1340 if (bsize >= BLOCK_8X8) {
1341 if (segfeature_active(&cm->seg, mi->segment_id, SEG_LVL_SKIP))
1342 vp9_rd_pick_inter_mode_sb_seg_skip(cpi, tile_data, x, rd_cost, bsize,
1345 vp9_rd_pick_inter_mode_sb(cpi, tile_data, x, mi_row, mi_col,
1346 rd_cost, bsize, ctx, best_rd);
1348 vp9_rd_pick_inter_mode_sub8x8(cpi, tile_data, x, mi_row, mi_col,
1349 rd_cost, bsize, ctx, best_rd);
1354 // Examine the resulting rate and for AQ mode 2 make a segment choice.
1355 if ((rd_cost->rate != INT_MAX) &&
1356 (aq_mode == COMPLEXITY_AQ) && (bsize >= BLOCK_16X16) &&
1357 (cm->frame_type == KEY_FRAME ||
1358 cpi->refresh_alt_ref_frame ||
1359 (cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref))) {
1360 vp9_caq_select_segment(cpi, x, bsize, mi_row, mi_col, rd_cost->rate);
1363 x->rdmult = orig_rdmult;
1365 // TODO(jingning) The rate-distortion optimization flow needs to be
1366 // refactored to provide proper exit/return handle.
1367 if (rd_cost->rate == INT_MAX)
1368 rd_cost->rdcost = INT64_MAX;
1370 ctx->rate = rd_cost->rate;
1371 ctx->dist = rd_cost->dist;
1374 static void update_stats(VP9_COMMON *cm, ThreadData *td) {
1375 const MACROBLOCK *x = &td->mb;
1376 const MACROBLOCKD *const xd = &x->e_mbd;
1377 const MODE_INFO *const mi = xd->mi[0];
1378 const MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
1379 const BLOCK_SIZE bsize = mi->sb_type;
1381 if (!frame_is_intra_only(cm)) {
1382 FRAME_COUNTS *const counts = td->counts;
1383 const int inter_block = is_inter_block(mi);
1384 const int seg_ref_active = segfeature_active(&cm->seg, mi->segment_id,
1386 if (!seg_ref_active) {
1387 counts->intra_inter[vp9_get_intra_inter_context(xd)][inter_block]++;
1388 // If the segment reference feature is enabled we have only a single
1389 // reference frame allowed for the segment so exclude it from
1390 // the reference frame counts used to work out probabilities.
1392 const MV_REFERENCE_FRAME ref0 = mi->ref_frame[0];
1393 if (cm->reference_mode == REFERENCE_MODE_SELECT)
1394 counts->comp_inter[vp9_get_reference_mode_context(cm, xd)]
1395 [has_second_ref(mi)]++;
1397 if (has_second_ref(mi)) {
1398 counts->comp_ref[vp9_get_pred_context_comp_ref_p(cm, xd)]
1399 [ref0 == GOLDEN_FRAME]++;
1401 counts->single_ref[vp9_get_pred_context_single_ref_p1(xd)][0]
1402 [ref0 != LAST_FRAME]++;
1403 if (ref0 != LAST_FRAME)
1404 counts->single_ref[vp9_get_pred_context_single_ref_p2(xd)][1]
1405 [ref0 != GOLDEN_FRAME]++;
1410 !segfeature_active(&cm->seg, mi->segment_id, SEG_LVL_SKIP)) {
1411 const int mode_ctx = mbmi_ext->mode_context[mi->ref_frame[0]];
1412 if (bsize >= BLOCK_8X8) {
1413 const PREDICTION_MODE mode = mi->mode;
1414 ++counts->inter_mode[mode_ctx][INTER_OFFSET(mode)];
1416 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
1417 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
1419 for (idy = 0; idy < 2; idy += num_4x4_h) {
1420 for (idx = 0; idx < 2; idx += num_4x4_w) {
1421 const int j = idy * 2 + idx;
1422 const PREDICTION_MODE b_mode = mi->bmi[j].as_mode;
1423 ++counts->inter_mode[mode_ctx][INTER_OFFSET(b_mode)];
1431 static void restore_context(MACROBLOCK *const x, int mi_row, int mi_col,
1432 ENTROPY_CONTEXT a[16 * MAX_MB_PLANE],
1433 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE],
1434 PARTITION_CONTEXT sa[8], PARTITION_CONTEXT sl[8],
1436 MACROBLOCKD *const xd = &x->e_mbd;
1438 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
1439 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
1440 int mi_width = num_8x8_blocks_wide_lookup[bsize];
1441 int mi_height = num_8x8_blocks_high_lookup[bsize];
1442 for (p = 0; p < MAX_MB_PLANE; p++) {
1444 xd->above_context[p] + ((mi_col * 2) >> xd->plane[p].subsampling_x),
1445 a + num_4x4_blocks_wide * p,
1446 (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_wide) >>
1447 xd->plane[p].subsampling_x);
1450 + ((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y),
1451 l + num_4x4_blocks_high * p,
1452 (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_high) >>
1453 xd->plane[p].subsampling_y);
1455 memcpy(xd->above_seg_context + mi_col, sa,
1456 sizeof(*xd->above_seg_context) * mi_width);
1457 memcpy(xd->left_seg_context + (mi_row & MI_MASK), sl,
1458 sizeof(xd->left_seg_context[0]) * mi_height);
1461 static void save_context(MACROBLOCK *const x, int mi_row, int mi_col,
1462 ENTROPY_CONTEXT a[16 * MAX_MB_PLANE],
1463 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE],
1464 PARTITION_CONTEXT sa[8], PARTITION_CONTEXT sl[8],
1466 const MACROBLOCKD *const xd = &x->e_mbd;
1468 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
1469 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
1470 int mi_width = num_8x8_blocks_wide_lookup[bsize];
1471 int mi_height = num_8x8_blocks_high_lookup[bsize];
1473 // buffer the above/left context information of the block in search.
1474 for (p = 0; p < MAX_MB_PLANE; ++p) {
1476 a + num_4x4_blocks_wide * p,
1477 xd->above_context[p] + (mi_col * 2 >> xd->plane[p].subsampling_x),
1478 (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_wide) >>
1479 xd->plane[p].subsampling_x);
1481 l + num_4x4_blocks_high * p,
1483 + ((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y),
1484 (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_high) >>
1485 xd->plane[p].subsampling_y);
1487 memcpy(sa, xd->above_seg_context + mi_col,
1488 sizeof(*xd->above_seg_context) * mi_width);
1489 memcpy(sl, xd->left_seg_context + (mi_row & MI_MASK),
1490 sizeof(xd->left_seg_context[0]) * mi_height);
1493 static void encode_b(VP9_COMP *cpi, const TileInfo *const tile,
1495 TOKENEXTRA **tp, int mi_row, int mi_col,
1496 int output_enabled, BLOCK_SIZE bsize,
1497 PICK_MODE_CONTEXT *ctx) {
1498 MACROBLOCK *const x = &td->mb;
1499 set_offsets(cpi, tile, x, mi_row, mi_col, bsize);
1500 update_state(cpi, td, ctx, mi_row, mi_col, bsize, output_enabled);
1501 encode_superblock(cpi, td, tp, output_enabled, mi_row, mi_col, bsize, ctx);
1503 if (output_enabled) {
1504 update_stats(&cpi->common, td);
1506 (*tp)->token = EOSB_TOKEN;
1511 static void encode_sb(VP9_COMP *cpi, ThreadData *td,
1512 const TileInfo *const tile,
1513 TOKENEXTRA **tp, int mi_row, int mi_col,
1514 int output_enabled, BLOCK_SIZE bsize,
1516 VP9_COMMON *const cm = &cpi->common;
1517 MACROBLOCK *const x = &td->mb;
1518 MACROBLOCKD *const xd = &x->e_mbd;
1520 const int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4;
1522 PARTITION_TYPE partition;
1523 BLOCK_SIZE subsize = bsize;
1525 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
1528 if (bsize >= BLOCK_8X8) {
1529 ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
1530 subsize = get_subsize(bsize, pc_tree->partitioning);
1533 subsize = BLOCK_4X4;
1536 partition = partition_lookup[bsl][subsize];
1537 if (output_enabled && bsize != BLOCK_4X4)
1538 td->counts->partition[ctx][partition]++;
1540 switch (partition) {
1541 case PARTITION_NONE:
1542 encode_b(cpi, tile, td, tp, mi_row, mi_col, output_enabled, subsize,
1545 case PARTITION_VERT:
1546 encode_b(cpi, tile, td, tp, mi_row, mi_col, output_enabled, subsize,
1547 &pc_tree->vertical[0]);
1548 if (mi_col + hbs < cm->mi_cols && bsize > BLOCK_8X8) {
1549 encode_b(cpi, tile, td, tp, mi_row, mi_col + hbs, output_enabled,
1550 subsize, &pc_tree->vertical[1]);
1553 case PARTITION_HORZ:
1554 encode_b(cpi, tile, td, tp, mi_row, mi_col, output_enabled, subsize,
1555 &pc_tree->horizontal[0]);
1556 if (mi_row + hbs < cm->mi_rows && bsize > BLOCK_8X8) {
1557 encode_b(cpi, tile, td, tp, mi_row + hbs, mi_col, output_enabled,
1558 subsize, &pc_tree->horizontal[1]);
1561 case PARTITION_SPLIT:
1562 if (bsize == BLOCK_8X8) {
1563 encode_b(cpi, tile, td, tp, mi_row, mi_col, output_enabled, subsize,
1564 pc_tree->leaf_split[0]);
1566 encode_sb(cpi, td, tile, tp, mi_row, mi_col, output_enabled, subsize,
1568 encode_sb(cpi, td, tile, tp, mi_row, mi_col + hbs, output_enabled,
1569 subsize, pc_tree->split[1]);
1570 encode_sb(cpi, td, tile, tp, mi_row + hbs, mi_col, output_enabled,
1571 subsize, pc_tree->split[2]);
1572 encode_sb(cpi, td, tile, tp, mi_row + hbs, mi_col + hbs, output_enabled,
1573 subsize, pc_tree->split[3]);
1577 assert(0 && "Invalid partition type.");
1581 if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8)
1582 update_partition_context(xd, mi_row, mi_col, subsize, bsize);
1585 // Check to see if the given partition size is allowed for a specified number
1586 // of 8x8 block rows and columns remaining in the image.
1587 // If not then return the largest allowed partition size
1588 static BLOCK_SIZE find_partition_size(BLOCK_SIZE bsize,
1589 int rows_left, int cols_left,
1591 if (rows_left <= 0 || cols_left <= 0) {
1592 return VPXMIN(bsize, BLOCK_8X8);
1594 for (; bsize > 0; bsize -= 3) {
1595 *bh = num_8x8_blocks_high_lookup[bsize];
1596 *bw = num_8x8_blocks_wide_lookup[bsize];
1597 if ((*bh <= rows_left) && (*bw <= cols_left)) {
1605 static void set_partial_b64x64_partition(MODE_INFO *mi, int mis,
1606 int bh_in, int bw_in, int row8x8_remaining, int col8x8_remaining,
1607 BLOCK_SIZE bsize, MODE_INFO **mi_8x8) {
1610 for (r = 0; r < MI_BLOCK_SIZE; r += bh) {
1612 for (c = 0; c < MI_BLOCK_SIZE; c += bw) {
1613 const int index = r * mis + c;
1614 mi_8x8[index] = mi + index;
1615 mi_8x8[index]->sb_type = find_partition_size(bsize,
1616 row8x8_remaining - r, col8x8_remaining - c, &bh, &bw);
1621 // This function attempts to set all mode info entries in a given SB64
1622 // to the same block partition size.
1623 // However, at the bottom and right borders of the image the requested size
1624 // may not be allowed in which case this code attempts to choose the largest
1625 // allowable partition.
1626 static void set_fixed_partitioning(VP9_COMP *cpi, const TileInfo *const tile,
1627 MODE_INFO **mi_8x8, int mi_row, int mi_col,
1629 VP9_COMMON *const cm = &cpi->common;
1630 const int mis = cm->mi_stride;
1631 const int row8x8_remaining = tile->mi_row_end - mi_row;
1632 const int col8x8_remaining = tile->mi_col_end - mi_col;
1633 int block_row, block_col;
1634 MODE_INFO *mi_upper_left = cm->mi + mi_row * mis + mi_col;
1635 int bh = num_8x8_blocks_high_lookup[bsize];
1636 int bw = num_8x8_blocks_wide_lookup[bsize];
1638 assert((row8x8_remaining > 0) && (col8x8_remaining > 0));
1640 // Apply the requested partition size to the SB64 if it is all "in image"
1641 if ((col8x8_remaining >= MI_BLOCK_SIZE) &&
1642 (row8x8_remaining >= MI_BLOCK_SIZE)) {
1643 for (block_row = 0; block_row < MI_BLOCK_SIZE; block_row += bh) {
1644 for (block_col = 0; block_col < MI_BLOCK_SIZE; block_col += bw) {
1645 int index = block_row * mis + block_col;
1646 mi_8x8[index] = mi_upper_left + index;
1647 mi_8x8[index]->sb_type = bsize;
1651 // Else this is a partial SB64.
1652 set_partial_b64x64_partition(mi_upper_left, mis, bh, bw, row8x8_remaining,
1653 col8x8_remaining, bsize, mi_8x8);
1657 static const struct {
1660 } coord_lookup[16] = {
1662 {0, 0}, {0, 2}, {2, 0}, {2, 2},
1664 {0, 4}, {0, 6}, {2, 4}, {2, 6},
1666 {4, 0}, {4, 2}, {6, 0}, {6, 2},
1668 {4, 4}, {4, 6}, {6, 4}, {6, 6},
1671 static void set_source_var_based_partition(VP9_COMP *cpi,
1672 const TileInfo *const tile,
1673 MACROBLOCK *const x,
1675 int mi_row, int mi_col) {
1676 VP9_COMMON *const cm = &cpi->common;
1677 const int mis = cm->mi_stride;
1678 const int row8x8_remaining = tile->mi_row_end - mi_row;
1679 const int col8x8_remaining = tile->mi_col_end - mi_col;
1680 MODE_INFO *mi_upper_left = cm->mi + mi_row * mis + mi_col;
1682 vp9_setup_src_planes(x, cpi->Source, mi_row, mi_col);
1684 assert((row8x8_remaining > 0) && (col8x8_remaining > 0));
1687 if ((col8x8_remaining >= MI_BLOCK_SIZE) &&
1688 (row8x8_remaining >= MI_BLOCK_SIZE)) {
1692 const int offset = (mi_row >> 1) * cm->mb_cols + (mi_col >> 1);
1693 int is_larger_better = 0;
1695 unsigned int thr = cpi->source_var_thresh;
1697 memset(d32, 0, 4 * sizeof(diff));
1699 for (i = 0; i < 4; i++) {
1702 for (j = 0; j < 4; j++) {
1703 int b_mi_row = coord_lookup[i * 4 + j].row;
1704 int b_mi_col = coord_lookup[i * 4 + j].col;
1705 int boffset = b_mi_row / 2 * cm->mb_cols +
1708 d16[j] = cpi->source_diff_var + offset + boffset;
1710 index = b_mi_row * mis + b_mi_col;
1711 mi_8x8[index] = mi_upper_left + index;
1712 mi_8x8[index]->sb_type = BLOCK_16X16;
1714 // TODO(yunqingwang): If d16[j].var is very large, use 8x8 partition
1715 // size to further improve quality.
1718 is_larger_better = (d16[0]->var < thr) && (d16[1]->var < thr) &&
1719 (d16[2]->var < thr) && (d16[3]->var < thr);
1721 // Use 32x32 partition
1722 if (is_larger_better) {
1725 for (j = 0; j < 4; j++) {
1726 d32[i].sse += d16[j]->sse;
1727 d32[i].sum += d16[j]->sum;
1730 d32[i].var = d32[i].sse - (((int64_t)d32[i].sum * d32[i].sum) >> 10);
1732 index = coord_lookup[i*4].row * mis + coord_lookup[i*4].col;
1733 mi_8x8[index] = mi_upper_left + index;
1734 mi_8x8[index]->sb_type = BLOCK_32X32;
1738 if (use32x32 == 4) {
1740 is_larger_better = (d32[0].var < thr) && (d32[1].var < thr) &&
1741 (d32[2].var < thr) && (d32[3].var < thr);
1743 // Use 64x64 partition
1744 if (is_larger_better) {
1745 mi_8x8[0] = mi_upper_left;
1746 mi_8x8[0]->sb_type = BLOCK_64X64;
1749 } else { // partial in-image SB64
1750 int bh = num_8x8_blocks_high_lookup[BLOCK_16X16];
1751 int bw = num_8x8_blocks_wide_lookup[BLOCK_16X16];
1752 set_partial_b64x64_partition(mi_upper_left, mis, bh, bw,
1753 row8x8_remaining, col8x8_remaining, BLOCK_16X16, mi_8x8);
1757 static void update_state_rt(VP9_COMP *cpi, ThreadData *td,
1758 PICK_MODE_CONTEXT *ctx,
1759 int mi_row, int mi_col, int bsize) {
1760 VP9_COMMON *const cm = &cpi->common;
1761 MACROBLOCK *const x = &td->mb;
1762 MACROBLOCKD *const xd = &x->e_mbd;
1763 MODE_INFO *const mi = xd->mi[0];
1764 struct macroblock_plane *const p = x->plane;
1765 const struct segmentation *const seg = &cm->seg;
1766 const int bw = num_8x8_blocks_wide_lookup[mi->sb_type];
1767 const int bh = num_8x8_blocks_high_lookup[mi->sb_type];
1768 const int x_mis = VPXMIN(bw, cm->mi_cols - mi_col);
1769 const int y_mis = VPXMIN(bh, cm->mi_rows - mi_row);
1771 *(xd->mi[0]) = ctx->mic;
1772 *(x->mbmi_ext) = ctx->mbmi_ext;
1774 if (seg->enabled && cpi->oxcf.aq_mode) {
1775 // For in frame complexity AQ or variance AQ, copy segment_id from
1776 // segmentation_map.
1777 if (cpi->oxcf.aq_mode == COMPLEXITY_AQ ||
1778 cpi->oxcf.aq_mode == VARIANCE_AQ ||
1779 cpi->oxcf.aq_mode == EQUATOR360_AQ) {
1780 const uint8_t *const map = seg->update_map ? cpi->segmentation_map
1781 : cm->last_frame_seg_map;
1782 mi->segment_id = get_segment_id(cm, map, bsize, mi_row, mi_col);
1784 // Setting segmentation map for cyclic_refresh.
1785 vp9_cyclic_refresh_update_segment(cpi, mi, mi_row, mi_col, bsize,
1786 ctx->rate, ctx->dist, x->skip, p);
1788 vp9_init_plane_quantizers(cpi, x);
1791 if (is_inter_block(mi)) {
1792 vp9_update_mv_count(td);
1793 if (cm->interp_filter == SWITCHABLE) {
1794 const int pred_ctx = vp9_get_pred_context_switchable_interp(xd);
1795 ++td->counts->switchable_interp[pred_ctx][mi->interp_filter];
1798 if (mi->sb_type < BLOCK_8X8) {
1799 mi->mv[0].as_int = mi->bmi[3].as_mv[0].as_int;
1800 mi->mv[1].as_int = mi->bmi[3].as_mv[1].as_int;
1804 if (cm->use_prev_frame_mvs ||
1805 (cpi->svc.use_base_mv && cpi->svc.number_spatial_layers > 1
1806 && cpi->svc.spatial_layer_id != cpi->svc.number_spatial_layers - 1)) {
1807 MV_REF *const frame_mvs =
1808 cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col;
1811 for (h = 0; h < y_mis; ++h) {
1812 MV_REF *const frame_mv = frame_mvs + h * cm->mi_cols;
1813 for (w = 0; w < x_mis; ++w) {
1814 MV_REF *const mv = frame_mv + w;
1815 mv->ref_frame[0] = mi->ref_frame[0];
1816 mv->ref_frame[1] = mi->ref_frame[1];
1817 mv->mv[0].as_int = mi->mv[0].as_int;
1818 mv->mv[1].as_int = mi->mv[1].as_int;
1823 x->skip = ctx->skip;
1824 x->skip_txfm[0] = mi->segment_id ? 0 : ctx->skip_txfm[0];
1827 static void encode_b_rt(VP9_COMP *cpi, ThreadData *td,
1828 const TileInfo *const tile,
1829 TOKENEXTRA **tp, int mi_row, int mi_col,
1830 int output_enabled, BLOCK_SIZE bsize,
1831 PICK_MODE_CONTEXT *ctx) {
1832 MACROBLOCK *const x = &td->mb;
1833 set_offsets(cpi, tile, x, mi_row, mi_col, bsize);
1834 update_state_rt(cpi, td, ctx, mi_row, mi_col, bsize);
1836 encode_superblock(cpi, td, tp, output_enabled, mi_row, mi_col, bsize, ctx);
1837 update_stats(&cpi->common, td);
1839 (*tp)->token = EOSB_TOKEN;
1843 static void encode_sb_rt(VP9_COMP *cpi, ThreadData *td,
1844 const TileInfo *const tile,
1845 TOKENEXTRA **tp, int mi_row, int mi_col,
1846 int output_enabled, BLOCK_SIZE bsize,
1848 VP9_COMMON *const cm = &cpi->common;
1849 MACROBLOCK *const x = &td->mb;
1850 MACROBLOCKD *const xd = &x->e_mbd;
1852 const int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4;
1854 PARTITION_TYPE partition;
1857 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
1860 if (bsize >= BLOCK_8X8) {
1861 const int idx_str = xd->mi_stride * mi_row + mi_col;
1862 MODE_INFO ** mi_8x8 = cm->mi_grid_visible + idx_str;
1863 ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
1864 subsize = mi_8x8[0]->sb_type;
1867 subsize = BLOCK_4X4;
1870 partition = partition_lookup[bsl][subsize];
1871 if (output_enabled && bsize != BLOCK_4X4)
1872 td->counts->partition[ctx][partition]++;
1874 switch (partition) {
1875 case PARTITION_NONE:
1876 encode_b_rt(cpi, td, tile, tp, mi_row, mi_col, output_enabled, subsize,
1879 case PARTITION_VERT:
1880 encode_b_rt(cpi, td, tile, tp, mi_row, mi_col, output_enabled, subsize,
1881 &pc_tree->vertical[0]);
1882 if (mi_col + hbs < cm->mi_cols && bsize > BLOCK_8X8) {
1883 encode_b_rt(cpi, td, tile, tp, mi_row, mi_col + hbs, output_enabled,
1884 subsize, &pc_tree->vertical[1]);
1887 case PARTITION_HORZ:
1888 encode_b_rt(cpi, td, tile, tp, mi_row, mi_col, output_enabled, subsize,
1889 &pc_tree->horizontal[0]);
1890 if (mi_row + hbs < cm->mi_rows && bsize > BLOCK_8X8) {
1891 encode_b_rt(cpi, td, tile, tp, mi_row + hbs, mi_col, output_enabled,
1892 subsize, &pc_tree->horizontal[1]);
1895 case PARTITION_SPLIT:
1896 subsize = get_subsize(bsize, PARTITION_SPLIT);
1897 encode_sb_rt(cpi, td, tile, tp, mi_row, mi_col, output_enabled, subsize,
1899 encode_sb_rt(cpi, td, tile, tp, mi_row, mi_col + hbs, output_enabled,
1900 subsize, pc_tree->split[1]);
1901 encode_sb_rt(cpi, td, tile, tp, mi_row + hbs, mi_col, output_enabled,
1902 subsize, pc_tree->split[2]);
1903 encode_sb_rt(cpi, td, tile, tp, mi_row + hbs, mi_col + hbs,
1904 output_enabled, subsize, pc_tree->split[3]);
1907 assert(0 && "Invalid partition type.");
1911 if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8)
1912 update_partition_context(xd, mi_row, mi_col, subsize, bsize);
1915 static void rd_use_partition(VP9_COMP *cpi,
1917 TileDataEnc *tile_data,
1918 MODE_INFO **mi_8x8, TOKENEXTRA **tp,
1919 int mi_row, int mi_col,
1921 int *rate, int64_t *dist,
1922 int do_recon, PC_TREE *pc_tree) {
1923 VP9_COMMON *const cm = &cpi->common;
1924 TileInfo *const tile_info = &tile_data->tile_info;
1925 MACROBLOCK *const x = &td->mb;
1926 MACROBLOCKD *const xd = &x->e_mbd;
1927 const int mis = cm->mi_stride;
1928 const int bsl = b_width_log2_lookup[bsize];
1929 const int mi_step = num_4x4_blocks_wide_lookup[bsize] / 2;
1930 const int bss = (1 << bsl) / 4;
1932 PARTITION_TYPE partition = PARTITION_NONE;
1934 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
1935 PARTITION_CONTEXT sl[8], sa[8];
1936 RD_COST last_part_rdc, none_rdc, chosen_rdc;
1937 BLOCK_SIZE sub_subsize = BLOCK_4X4;
1938 int splits_below = 0;
1939 BLOCK_SIZE bs_type = mi_8x8[0]->sb_type;
1940 int do_partition_search = 1;
1941 PICK_MODE_CONTEXT *ctx = &pc_tree->none;
1943 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
1946 assert(num_4x4_blocks_wide_lookup[bsize] ==
1947 num_4x4_blocks_high_lookup[bsize]);
1949 vp9_rd_cost_reset(&last_part_rdc);
1950 vp9_rd_cost_reset(&none_rdc);
1951 vp9_rd_cost_reset(&chosen_rdc);
1953 partition = partition_lookup[bsl][bs_type];
1954 subsize = get_subsize(bsize, partition);
1956 pc_tree->partitioning = partition;
1957 save_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
1959 if (bsize == BLOCK_16X16 && cpi->oxcf.aq_mode) {
1960 set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
1961 x->mb_energy = vp9_block_energy(cpi, x, bsize);
1964 if (do_partition_search &&
1965 cpi->sf.partition_search_type == SEARCH_PARTITION &&
1966 cpi->sf.adjust_partitioning_from_last_frame) {
1967 // Check if any of the sub blocks are further split.
1968 if (partition == PARTITION_SPLIT && subsize > BLOCK_8X8) {
1969 sub_subsize = get_subsize(subsize, PARTITION_SPLIT);
1971 for (i = 0; i < 4; i++) {
1972 int jj = i >> 1, ii = i & 0x01;
1973 MODE_INFO *this_mi = mi_8x8[jj * bss * mis + ii * bss];
1974 if (this_mi && this_mi->sb_type >= sub_subsize) {
1980 // If partition is not none try none unless each of the 4 splits are split
1982 if (partition != PARTITION_NONE && !splits_below &&
1983 mi_row + (mi_step >> 1) < cm->mi_rows &&
1984 mi_col + (mi_step >> 1) < cm->mi_cols) {
1985 pc_tree->partitioning = PARTITION_NONE;
1986 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &none_rdc, bsize,
1989 pl = partition_plane_context(xd, mi_row, mi_col, bsize);
1991 if (none_rdc.rate < INT_MAX) {
1992 none_rdc.rate += cpi->partition_cost[pl][PARTITION_NONE];
1993 none_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, none_rdc.rate,
1997 restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
1998 mi_8x8[0]->sb_type = bs_type;
1999 pc_tree->partitioning = partition;
2003 switch (partition) {
2004 case PARTITION_NONE:
2005 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc,
2006 bsize, ctx, INT64_MAX);
2008 case PARTITION_HORZ:
2009 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc,
2010 subsize, &pc_tree->horizontal[0],
2012 if (last_part_rdc.rate != INT_MAX &&
2013 bsize >= BLOCK_8X8 && mi_row + (mi_step >> 1) < cm->mi_rows) {
2015 PICK_MODE_CONTEXT *ctx = &pc_tree->horizontal[0];
2016 vp9_rd_cost_init(&tmp_rdc);
2017 update_state(cpi, td, ctx, mi_row, mi_col, subsize, 0);
2018 encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize, ctx);
2019 rd_pick_sb_modes(cpi, tile_data, x,
2020 mi_row + (mi_step >> 1), mi_col, &tmp_rdc,
2021 subsize, &pc_tree->horizontal[1], INT64_MAX);
2022 if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
2023 vp9_rd_cost_reset(&last_part_rdc);
2026 last_part_rdc.rate += tmp_rdc.rate;
2027 last_part_rdc.dist += tmp_rdc.dist;
2028 last_part_rdc.rdcost += tmp_rdc.rdcost;
2031 case PARTITION_VERT:
2032 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc,
2033 subsize, &pc_tree->vertical[0], INT64_MAX);
2034 if (last_part_rdc.rate != INT_MAX &&
2035 bsize >= BLOCK_8X8 && mi_col + (mi_step >> 1) < cm->mi_cols) {
2037 PICK_MODE_CONTEXT *ctx = &pc_tree->vertical[0];
2038 vp9_rd_cost_init(&tmp_rdc);
2039 update_state(cpi, td, ctx, mi_row, mi_col, subsize, 0);
2040 encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize, ctx);
2041 rd_pick_sb_modes(cpi, tile_data, x,
2042 mi_row, mi_col + (mi_step >> 1), &tmp_rdc,
2043 subsize, &pc_tree->vertical[bsize > BLOCK_8X8],
2045 if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
2046 vp9_rd_cost_reset(&last_part_rdc);
2049 last_part_rdc.rate += tmp_rdc.rate;
2050 last_part_rdc.dist += tmp_rdc.dist;
2051 last_part_rdc.rdcost += tmp_rdc.rdcost;
2054 case PARTITION_SPLIT:
2055 if (bsize == BLOCK_8X8) {
2056 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc,
2057 subsize, pc_tree->leaf_split[0], INT64_MAX);
2060 last_part_rdc.rate = 0;
2061 last_part_rdc.dist = 0;
2062 last_part_rdc.rdcost = 0;
2063 for (i = 0; i < 4; i++) {
2064 int x_idx = (i & 1) * (mi_step >> 1);
2065 int y_idx = (i >> 1) * (mi_step >> 1);
2066 int jj = i >> 1, ii = i & 0x01;
2068 if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols))
2071 vp9_rd_cost_init(&tmp_rdc);
2072 rd_use_partition(cpi, td, tile_data,
2073 mi_8x8 + jj * bss * mis + ii * bss, tp,
2074 mi_row + y_idx, mi_col + x_idx, subsize,
2075 &tmp_rdc.rate, &tmp_rdc.dist,
2076 i != 3, pc_tree->split[i]);
2077 if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
2078 vp9_rd_cost_reset(&last_part_rdc);
2081 last_part_rdc.rate += tmp_rdc.rate;
2082 last_part_rdc.dist += tmp_rdc.dist;
2090 pl = partition_plane_context(xd, mi_row, mi_col, bsize);
2091 if (last_part_rdc.rate < INT_MAX) {
2092 last_part_rdc.rate += cpi->partition_cost[pl][partition];
2093 last_part_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
2094 last_part_rdc.rate, last_part_rdc.dist);
2097 if (do_partition_search
2098 && cpi->sf.adjust_partitioning_from_last_frame
2099 && cpi->sf.partition_search_type == SEARCH_PARTITION
2100 && partition != PARTITION_SPLIT && bsize > BLOCK_8X8
2101 && (mi_row + mi_step < cm->mi_rows ||
2102 mi_row + (mi_step >> 1) == cm->mi_rows)
2103 && (mi_col + mi_step < cm->mi_cols ||
2104 mi_col + (mi_step >> 1) == cm->mi_cols)) {
2105 BLOCK_SIZE split_subsize = get_subsize(bsize, PARTITION_SPLIT);
2106 chosen_rdc.rate = 0;
2107 chosen_rdc.dist = 0;
2108 restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
2109 pc_tree->partitioning = PARTITION_SPLIT;
2112 for (i = 0; i < 4; i++) {
2113 int x_idx = (i & 1) * (mi_step >> 1);
2114 int y_idx = (i >> 1) * (mi_step >> 1);
2116 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
2117 PARTITION_CONTEXT sl[8], sa[8];
2119 if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols))
2122 save_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
2123 pc_tree->split[i]->partitioning = PARTITION_NONE;
2124 rd_pick_sb_modes(cpi, tile_data, x,
2125 mi_row + y_idx, mi_col + x_idx, &tmp_rdc,
2126 split_subsize, &pc_tree->split[i]->none, INT64_MAX);
2128 restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
2130 if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
2131 vp9_rd_cost_reset(&chosen_rdc);
2135 chosen_rdc.rate += tmp_rdc.rate;
2136 chosen_rdc.dist += tmp_rdc.dist;
2139 encode_sb(cpi, td, tile_info, tp, mi_row + y_idx, mi_col + x_idx, 0,
2140 split_subsize, pc_tree->split[i]);
2142 pl = partition_plane_context(xd, mi_row + y_idx, mi_col + x_idx,
2144 chosen_rdc.rate += cpi->partition_cost[pl][PARTITION_NONE];
2146 pl = partition_plane_context(xd, mi_row, mi_col, bsize);
2147 if (chosen_rdc.rate < INT_MAX) {
2148 chosen_rdc.rate += cpi->partition_cost[pl][PARTITION_SPLIT];
2149 chosen_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
2150 chosen_rdc.rate, chosen_rdc.dist);
2154 // If last_part is better set the partitioning to that.
2155 if (last_part_rdc.rdcost < chosen_rdc.rdcost) {
2156 mi_8x8[0]->sb_type = bsize;
2157 if (bsize >= BLOCK_8X8)
2158 pc_tree->partitioning = partition;
2159 chosen_rdc = last_part_rdc;
2161 // If none was better set the partitioning to that.
2162 if (none_rdc.rdcost < chosen_rdc.rdcost) {
2163 if (bsize >= BLOCK_8X8)
2164 pc_tree->partitioning = PARTITION_NONE;
2165 chosen_rdc = none_rdc;
2168 restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
2170 // We must have chosen a partitioning and encoding or we'll fail later on.
2171 // No other opportunities for success.
2172 if (bsize == BLOCK_64X64)
2173 assert(chosen_rdc.rate < INT_MAX && chosen_rdc.dist < INT64_MAX);
2176 int output_enabled = (bsize == BLOCK_64X64);
2177 encode_sb(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled, bsize,
2181 *rate = chosen_rdc.rate;
2182 *dist = chosen_rdc.dist;
2185 static const BLOCK_SIZE min_partition_size[BLOCK_SIZES] = {
2186 BLOCK_4X4, BLOCK_4X4, BLOCK_4X4,
2187 BLOCK_4X4, BLOCK_4X4, BLOCK_4X4,
2188 BLOCK_8X8, BLOCK_8X8, BLOCK_8X8,
2189 BLOCK_16X16, BLOCK_16X16, BLOCK_16X16,
2193 static const BLOCK_SIZE max_partition_size[BLOCK_SIZES] = {
2194 BLOCK_8X8, BLOCK_16X16, BLOCK_16X16,
2195 BLOCK_16X16, BLOCK_32X32, BLOCK_32X32,
2196 BLOCK_32X32, BLOCK_64X64, BLOCK_64X64,
2197 BLOCK_64X64, BLOCK_64X64, BLOCK_64X64,
2202 // Look at all the mode_info entries for blocks that are part of this
2203 // partition and find the min and max values for sb_type.
2204 // At the moment this is designed to work on a 64x64 SB but could be
2205 // adjusted to use a size parameter.
2207 // The min and max are assumed to have been initialized prior to calling this
2208 // function so repeat calls can accumulate a min and max of more than one sb64.
2209 static void get_sb_partition_size_range(MACROBLOCKD *xd, MODE_INFO **mi_8x8,
2210 BLOCK_SIZE *min_block_size,
2211 BLOCK_SIZE *max_block_size,
2212 int bs_hist[BLOCK_SIZES]) {
2213 int sb_width_in_blocks = MI_BLOCK_SIZE;
2214 int sb_height_in_blocks = MI_BLOCK_SIZE;
2218 // Check the sb_type for each block that belongs to this region.
2219 for (i = 0; i < sb_height_in_blocks; ++i) {
2220 for (j = 0; j < sb_width_in_blocks; ++j) {
2221 MODE_INFO *mi = mi_8x8[index+j];
2222 BLOCK_SIZE sb_type = mi ? mi->sb_type : 0;
2224 *min_block_size = VPXMIN(*min_block_size, sb_type);
2225 *max_block_size = VPXMAX(*max_block_size, sb_type);
2227 index += xd->mi_stride;
2231 // Next square block size less or equal than current block size.
2232 static const BLOCK_SIZE next_square_size[BLOCK_SIZES] = {
2233 BLOCK_4X4, BLOCK_4X4, BLOCK_4X4,
2234 BLOCK_8X8, BLOCK_8X8, BLOCK_8X8,
2235 BLOCK_16X16, BLOCK_16X16, BLOCK_16X16,
2236 BLOCK_32X32, BLOCK_32X32, BLOCK_32X32,
2240 // Look at neighboring blocks and set a min and max partition size based on
2242 static void rd_auto_partition_range(VP9_COMP *cpi, const TileInfo *const tile,
2243 MACROBLOCKD *const xd,
2244 int mi_row, int mi_col,
2245 BLOCK_SIZE *min_block_size,
2246 BLOCK_SIZE *max_block_size) {
2247 VP9_COMMON *const cm = &cpi->common;
2248 MODE_INFO **mi = xd->mi;
2249 const int left_in_image = xd->left_available && mi[-1];
2250 const int above_in_image = xd->up_available && mi[-xd->mi_stride];
2251 const int row8x8_remaining = tile->mi_row_end - mi_row;
2252 const int col8x8_remaining = tile->mi_col_end - mi_col;
2254 BLOCK_SIZE min_size = BLOCK_4X4;
2255 BLOCK_SIZE max_size = BLOCK_64X64;
2256 int bs_hist[BLOCK_SIZES] = {0};
2258 // Trap case where we do not have a prediction.
2259 if (left_in_image || above_in_image || cm->frame_type != KEY_FRAME) {
2260 // Default "min to max" and "max to min"
2261 min_size = BLOCK_64X64;
2262 max_size = BLOCK_4X4;
2264 // NOTE: each call to get_sb_partition_size_range() uses the previous
2265 // passed in values for min and max as a starting point.
2266 // Find the min and max partition used in previous frame at this location
2267 if (cm->frame_type != KEY_FRAME) {
2268 MODE_INFO **prev_mi =
2269 &cm->prev_mi_grid_visible[mi_row * xd->mi_stride + mi_col];
2270 get_sb_partition_size_range(xd, prev_mi, &min_size, &max_size, bs_hist);
2272 // Find the min and max partition sizes used in the left SB64
2273 if (left_in_image) {
2274 MODE_INFO **left_sb64_mi = &mi[-MI_BLOCK_SIZE];
2275 get_sb_partition_size_range(xd, left_sb64_mi, &min_size, &max_size,
2278 // Find the min and max partition sizes used in the above SB64.
2279 if (above_in_image) {
2280 MODE_INFO **above_sb64_mi = &mi[-xd->mi_stride * MI_BLOCK_SIZE];
2281 get_sb_partition_size_range(xd, above_sb64_mi, &min_size, &max_size,
2285 // Adjust observed min and max for "relaxed" auto partition case.
2286 if (cpi->sf.auto_min_max_partition_size == RELAXED_NEIGHBORING_MIN_MAX) {
2287 min_size = min_partition_size[min_size];
2288 max_size = max_partition_size[max_size];
2292 // Check border cases where max and min from neighbors may not be legal.
2293 max_size = find_partition_size(max_size,
2294 row8x8_remaining, col8x8_remaining,
2296 // Test for blocks at the edge of the active image.
2297 // This may be the actual edge of the image or where there are formatting
2299 if (vp9_active_edge_sb(cpi, mi_row, mi_col)) {
2300 min_size = BLOCK_4X4;
2303 VPXMIN(cpi->sf.rd_auto_partition_min_limit, VPXMIN(min_size, max_size));
2306 // When use_square_partition_only is true, make sure at least one square
2307 // partition is allowed by selecting the next smaller square size as
2309 if (cpi->sf.use_square_partition_only &&
2310 next_square_size[max_size] < min_size) {
2311 min_size = next_square_size[max_size];
2314 *min_block_size = min_size;
2315 *max_block_size = max_size;
2318 // TODO(jingning) refactor functions setting partition search range
2319 static void set_partition_range(VP9_COMMON *cm, MACROBLOCKD *xd,
2320 int mi_row, int mi_col, BLOCK_SIZE bsize,
2321 BLOCK_SIZE *min_bs, BLOCK_SIZE *max_bs) {
2322 int mi_width = num_8x8_blocks_wide_lookup[bsize];
2323 int mi_height = num_8x8_blocks_high_lookup[bsize];
2327 const int idx_str = cm->mi_stride * mi_row + mi_col;
2328 MODE_INFO **prev_mi = &cm->prev_mi_grid_visible[idx_str];
2329 BLOCK_SIZE bs, min_size, max_size;
2331 min_size = BLOCK_64X64;
2332 max_size = BLOCK_4X4;
2335 for (idy = 0; idy < mi_height; ++idy) {
2336 for (idx = 0; idx < mi_width; ++idx) {
2337 mi = prev_mi[idy * cm->mi_stride + idx];
2338 bs = mi ? mi->sb_type : bsize;
2339 min_size = VPXMIN(min_size, bs);
2340 max_size = VPXMAX(max_size, bs);
2345 if (xd->left_available) {
2346 for (idy = 0; idy < mi_height; ++idy) {
2347 mi = xd->mi[idy * cm->mi_stride - 1];
2348 bs = mi ? mi->sb_type : bsize;
2349 min_size = VPXMIN(min_size, bs);
2350 max_size = VPXMAX(max_size, bs);
2354 if (xd->up_available) {
2355 for (idx = 0; idx < mi_width; ++idx) {
2356 mi = xd->mi[idx - cm->mi_stride];
2357 bs = mi ? mi->sb_type : bsize;
2358 min_size = VPXMIN(min_size, bs);
2359 max_size = VPXMAX(max_size, bs);
2363 if (min_size == max_size) {
2364 min_size = min_partition_size[min_size];
2365 max_size = max_partition_size[max_size];
2372 static INLINE void store_pred_mv(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) {
2373 memcpy(ctx->pred_mv, x->pred_mv, sizeof(x->pred_mv));
2376 static INLINE void load_pred_mv(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) {
2377 memcpy(x->pred_mv, ctx->pred_mv, sizeof(x->pred_mv));
2380 #if CONFIG_FP_MB_STATS
2381 const int num_16x16_blocks_wide_lookup[BLOCK_SIZES] =
2382 {1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 4, 4};
2383 const int num_16x16_blocks_high_lookup[BLOCK_SIZES] =
2384 {1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 4, 2, 4};
2385 const int qindex_skip_threshold_lookup[BLOCK_SIZES] =
2386 {0, 10, 10, 30, 40, 40, 60, 80, 80, 90, 100, 100, 120};
2387 const int qindex_split_threshold_lookup[BLOCK_SIZES] =
2388 {0, 3, 3, 7, 15, 15, 30, 40, 40, 60, 80, 80, 120};
2389 const int complexity_16x16_blocks_threshold[BLOCK_SIZES] =
2390 {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 4, 4, 6};
2401 static INLINE MOTION_DIRECTION get_motion_direction_fp(uint8_t fp_byte) {
2402 if (fp_byte & FPMB_MOTION_ZERO_MASK) {
2404 } else if (fp_byte & FPMB_MOTION_LEFT_MASK) {
2406 } else if (fp_byte & FPMB_MOTION_RIGHT_MASK) {
2408 } else if (fp_byte & FPMB_MOTION_UP_MASK) {
2415 static INLINE int get_motion_inconsistency(MOTION_DIRECTION this_mv,
2416 MOTION_DIRECTION that_mv) {
2417 if (this_mv == that_mv) {
2420 return abs(this_mv - that_mv) == 2 ? 2 : 1;
2425 // TODO(jingning,jimbankoski,rbultje): properly skip partition types that are
2426 // unlikely to be selected depending on previous rate-distortion optimization
2427 // results, for encoding speed-up.
2428 static void rd_pick_partition(VP9_COMP *cpi, ThreadData *td,
2429 TileDataEnc *tile_data,
2430 TOKENEXTRA **tp, int mi_row, int mi_col,
2431 BLOCK_SIZE bsize, RD_COST *rd_cost,
2432 int64_t best_rd, PC_TREE *pc_tree) {
2433 VP9_COMMON *const cm = &cpi->common;
2434 TileInfo *const tile_info = &tile_data->tile_info;
2435 MACROBLOCK *const x = &td->mb;
2436 MACROBLOCKD *const xd = &x->e_mbd;
2437 const int mi_step = num_8x8_blocks_wide_lookup[bsize] / 2;
2438 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
2439 PARTITION_CONTEXT sl[8], sa[8];
2440 TOKENEXTRA *tp_orig = *tp;
2441 PICK_MODE_CONTEXT *ctx = &pc_tree->none;
2444 RD_COST this_rdc, sum_rdc, best_rdc;
2445 int do_split = bsize >= BLOCK_8X8;
2448 // Override skipping rectangular partition operations for edge blocks
2449 const int force_horz_split = (mi_row + mi_step >= cm->mi_rows);
2450 const int force_vert_split = (mi_col + mi_step >= cm->mi_cols);
2451 const int xss = x->e_mbd.plane[1].subsampling_x;
2452 const int yss = x->e_mbd.plane[1].subsampling_y;
2454 BLOCK_SIZE min_size = x->min_partition_size;
2455 BLOCK_SIZE max_size = x->max_partition_size;
2457 #if CONFIG_FP_MB_STATS
2458 unsigned int src_diff_var = UINT_MAX;
2459 int none_complexity = 0;
2462 int partition_none_allowed = !force_horz_split && !force_vert_split;
2463 int partition_horz_allowed = !force_vert_split && yss <= xss &&
2465 int partition_vert_allowed = !force_horz_split && xss <= yss &&
2468 int64_t dist_breakout_thr = cpi->sf.partition_search_breakout_dist_thr;
2469 int rate_breakout_thr = cpi->sf.partition_search_breakout_rate_thr;
2473 assert(num_8x8_blocks_wide_lookup[bsize] ==
2474 num_8x8_blocks_high_lookup[bsize]);
2476 // Adjust dist breakout threshold according to the partition size.
2477 dist_breakout_thr >>= 8 - (b_width_log2_lookup[bsize] +
2478 b_height_log2_lookup[bsize]);
2479 rate_breakout_thr *= num_pels_log2_lookup[bsize];
2481 vp9_rd_cost_init(&this_rdc);
2482 vp9_rd_cost_init(&sum_rdc);
2483 vp9_rd_cost_reset(&best_rdc);
2484 best_rdc.rdcost = best_rd;
2486 set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
2488 if (bsize == BLOCK_16X16 && cpi->oxcf.aq_mode)
2489 x->mb_energy = vp9_block_energy(cpi, x, bsize);
2491 if (cpi->sf.cb_partition_search && bsize == BLOCK_16X16) {
2492 int cb_partition_search_ctrl = ((pc_tree->index == 0 || pc_tree->index == 3)
2493 + get_chessboard_index(cm->current_video_frame)) & 0x1;
2495 if (cb_partition_search_ctrl && bsize > min_size && bsize < max_size)
2496 set_partition_range(cm, xd, mi_row, mi_col, bsize, &min_size, &max_size);
2499 // Determine partition types in search according to the speed features.
2500 // The threshold set here has to be of square block size.
2501 if (cpi->sf.auto_min_max_partition_size) {
2502 partition_none_allowed &= (bsize <= max_size && bsize >= min_size);
2503 partition_horz_allowed &= ((bsize <= max_size && bsize > min_size) ||
2505 partition_vert_allowed &= ((bsize <= max_size && bsize > min_size) ||
2507 do_split &= bsize > min_size;
2510 if (cpi->sf.use_square_partition_only &&
2511 bsize > cpi->sf.use_square_only_threshold) {
2513 if (!vp9_active_h_edge(cpi, mi_row, mi_step) || x->e_mbd.lossless)
2514 partition_horz_allowed &= force_horz_split;
2515 if (!vp9_active_v_edge(cpi, mi_row, mi_step) || x->e_mbd.lossless)
2516 partition_vert_allowed &= force_vert_split;
2518 partition_horz_allowed &= force_horz_split;
2519 partition_vert_allowed &= force_vert_split;
2523 save_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
2525 #if CONFIG_FP_MB_STATS
2526 if (cpi->use_fp_mb_stats) {
2527 set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
2528 src_diff_var = get_sby_perpixel_diff_variance(cpi, &x->plane[0].src,
2529 mi_row, mi_col, bsize);
2533 #if CONFIG_FP_MB_STATS
2534 // Decide whether we shall split directly and skip searching NONE by using
2535 // the first pass block statistics
2536 if (cpi->use_fp_mb_stats && bsize >= BLOCK_32X32 && do_split &&
2537 partition_none_allowed && src_diff_var > 4 &&
2538 cm->base_qindex < qindex_split_threshold_lookup[bsize]) {
2539 int mb_row = mi_row >> 1;
2540 int mb_col = mi_col >> 1;
2542 VPXMIN(mb_row + num_16x16_blocks_high_lookup[bsize], cm->mb_rows);
2544 VPXMIN(mb_col + num_16x16_blocks_wide_lookup[bsize], cm->mb_cols);
2547 // compute a complexity measure, basically measure inconsistency of motion
2548 // vectors obtained from the first pass in the current block
2549 for (r = mb_row; r < mb_row_end ; r++) {
2550 for (c = mb_col; c < mb_col_end; c++) {
2551 const int mb_index = r * cm->mb_cols + c;
2553 MOTION_DIRECTION this_mv;
2554 MOTION_DIRECTION right_mv;
2555 MOTION_DIRECTION bottom_mv;
2558 get_motion_direction_fp(cpi->twopass.this_frame_mb_stats[mb_index]);
2561 if (c != mb_col_end - 1) {
2562 right_mv = get_motion_direction_fp(
2563 cpi->twopass.this_frame_mb_stats[mb_index + 1]);
2564 none_complexity += get_motion_inconsistency(this_mv, right_mv);
2568 if (r != mb_row_end - 1) {
2569 bottom_mv = get_motion_direction_fp(
2570 cpi->twopass.this_frame_mb_stats[mb_index + cm->mb_cols]);
2571 none_complexity += get_motion_inconsistency(this_mv, bottom_mv);
2574 // do not count its left and top neighbors to avoid double counting
2578 if (none_complexity > complexity_16x16_blocks_threshold[bsize]) {
2579 partition_none_allowed = 0;
2585 if (partition_none_allowed) {
2586 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col,
2587 &this_rdc, bsize, ctx, best_rdc.rdcost);
2588 if (this_rdc.rate != INT_MAX) {
2589 if (bsize >= BLOCK_8X8) {
2590 pl = partition_plane_context(xd, mi_row, mi_col, bsize);
2591 this_rdc.rate += cpi->partition_cost[pl][PARTITION_NONE];
2592 this_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
2593 this_rdc.rate, this_rdc.dist);
2596 if (this_rdc.rdcost < best_rdc.rdcost) {
2597 best_rdc = this_rdc;
2598 if (bsize >= BLOCK_8X8)
2599 pc_tree->partitioning = PARTITION_NONE;
2601 // If all y, u, v transform blocks in this partition are skippable, and
2602 // the dist & rate are within the thresholds, the partition search is
2603 // terminated for current branch of the partition search tree.
2604 if (!x->e_mbd.lossless && ctx->skippable &&
2605 ((best_rdc.dist < (dist_breakout_thr >> 2)) ||
2606 (best_rdc.dist < dist_breakout_thr &&
2607 best_rdc.rate < rate_breakout_thr))) {
2612 #if CONFIG_FP_MB_STATS
2613 // Check if every 16x16 first pass block statistics has zero
2614 // motion and the corresponding first pass residue is small enough.
2615 // If that is the case, check the difference variance between the
2616 // current frame and the last frame. If the variance is small enough,
2617 // stop further splitting in RD optimization
2618 if (cpi->use_fp_mb_stats && do_split != 0 &&
2619 cm->base_qindex > qindex_skip_threshold_lookup[bsize]) {
2620 int mb_row = mi_row >> 1;
2621 int mb_col = mi_col >> 1;
2623 VPXMIN(mb_row + num_16x16_blocks_high_lookup[bsize], cm->mb_rows);
2625 VPXMIN(mb_col + num_16x16_blocks_wide_lookup[bsize], cm->mb_cols);
2629 for (r = mb_row; r < mb_row_end; r++) {
2630 for (c = mb_col; c < mb_col_end; c++) {
2631 const int mb_index = r * cm->mb_cols + c;
2632 if (!(cpi->twopass.this_frame_mb_stats[mb_index] &
2633 FPMB_MOTION_ZERO_MASK) ||
2634 !(cpi->twopass.this_frame_mb_stats[mb_index] &
2635 FPMB_ERROR_SMALL_MASK)) {
2645 if (src_diff_var == UINT_MAX) {
2646 set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
2647 src_diff_var = get_sby_perpixel_diff_variance(
2648 cpi, &x->plane[0].src, mi_row, mi_col, bsize);
2650 if (src_diff_var < 8) {
2659 restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
2662 // store estimated motion vector
2663 if (cpi->sf.adaptive_motion_search)
2664 store_pred_mv(x, ctx);
2667 // TODO(jingning): use the motion vectors given by the above search as
2668 // the starting point of motion search in the following partition type check.
2670 subsize = get_subsize(bsize, PARTITION_SPLIT);
2671 if (bsize == BLOCK_8X8) {
2673 if (cpi->sf.adaptive_pred_interp_filter && partition_none_allowed)
2674 pc_tree->leaf_split[0]->pred_interp_filter =
2675 ctx->mic.interp_filter;
2676 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize,
2677 pc_tree->leaf_split[0], best_rdc.rdcost);
2678 if (sum_rdc.rate == INT_MAX)
2679 sum_rdc.rdcost = INT64_MAX;
2681 for (i = 0; i < 4 && sum_rdc.rdcost < best_rdc.rdcost; ++i) {
2682 const int x_idx = (i & 1) * mi_step;
2683 const int y_idx = (i >> 1) * mi_step;
2685 if (mi_row + y_idx >= cm->mi_rows || mi_col + x_idx >= cm->mi_cols)
2688 if (cpi->sf.adaptive_motion_search)
2689 load_pred_mv(x, ctx);
2691 pc_tree->split[i]->index = i;
2692 rd_pick_partition(cpi, td, tile_data, tp,
2693 mi_row + y_idx, mi_col + x_idx,
2695 best_rdc.rdcost - sum_rdc.rdcost, pc_tree->split[i]);
2697 if (this_rdc.rate == INT_MAX) {
2698 sum_rdc.rdcost = INT64_MAX;
2701 sum_rdc.rate += this_rdc.rate;
2702 sum_rdc.dist += this_rdc.dist;
2703 sum_rdc.rdcost += this_rdc.rdcost;
2708 if (sum_rdc.rdcost < best_rdc.rdcost && i == 4) {
2709 pl = partition_plane_context(xd, mi_row, mi_col, bsize);
2710 sum_rdc.rate += cpi->partition_cost[pl][PARTITION_SPLIT];
2711 sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
2712 sum_rdc.rate, sum_rdc.dist);
2714 if (sum_rdc.rdcost < best_rdc.rdcost) {
2716 pc_tree->partitioning = PARTITION_SPLIT;
2718 // Rate and distortion based partition search termination clause.
2719 if (!x->e_mbd.lossless &&
2720 ((best_rdc.dist < (dist_breakout_thr >> 2)) ||
2721 (best_rdc.dist < dist_breakout_thr &&
2722 best_rdc.rate < rate_breakout_thr))) {
2727 // skip rectangular partition test when larger block size
2728 // gives better rd cost
2729 if ((cpi->sf.less_rectangular_check) &&
2730 ((bsize > cpi->sf.use_square_only_threshold) ||
2731 (best_rdc.dist < dist_breakout_thr)))
2732 do_rect &= !partition_none_allowed;
2734 restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
2738 if (partition_horz_allowed &&
2739 (do_rect || vp9_active_h_edge(cpi, mi_row, mi_step))) {
2740 subsize = get_subsize(bsize, PARTITION_HORZ);
2741 if (cpi->sf.adaptive_motion_search)
2742 load_pred_mv(x, ctx);
2743 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
2744 partition_none_allowed)
2745 pc_tree->horizontal[0].pred_interp_filter =
2746 ctx->mic.interp_filter;
2747 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize,
2748 &pc_tree->horizontal[0], best_rdc.rdcost);
2750 if (sum_rdc.rdcost < best_rdc.rdcost && mi_row + mi_step < cm->mi_rows &&
2751 bsize > BLOCK_8X8) {
2752 PICK_MODE_CONTEXT *ctx = &pc_tree->horizontal[0];
2753 update_state(cpi, td, ctx, mi_row, mi_col, subsize, 0);
2754 encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize, ctx);
2756 if (cpi->sf.adaptive_motion_search)
2757 load_pred_mv(x, ctx);
2758 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
2759 partition_none_allowed)
2760 pc_tree->horizontal[1].pred_interp_filter =
2761 ctx->mic.interp_filter;
2762 rd_pick_sb_modes(cpi, tile_data, x, mi_row + mi_step, mi_col,
2763 &this_rdc, subsize, &pc_tree->horizontal[1],
2764 best_rdc.rdcost - sum_rdc.rdcost);
2765 if (this_rdc.rate == INT_MAX) {
2766 sum_rdc.rdcost = INT64_MAX;
2768 sum_rdc.rate += this_rdc.rate;
2769 sum_rdc.dist += this_rdc.dist;
2770 sum_rdc.rdcost += this_rdc.rdcost;
2774 if (sum_rdc.rdcost < best_rdc.rdcost) {
2775 pl = partition_plane_context(xd, mi_row, mi_col, bsize);
2776 sum_rdc.rate += cpi->partition_cost[pl][PARTITION_HORZ];
2777 sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, sum_rdc.rate, sum_rdc.dist);
2778 if (sum_rdc.rdcost < best_rdc.rdcost) {
2780 pc_tree->partitioning = PARTITION_HORZ;
2782 if ((cpi->sf.less_rectangular_check) &&
2783 (bsize > cpi->sf.use_square_only_threshold))
2787 restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
2790 if (partition_vert_allowed &&
2791 (do_rect || vp9_active_v_edge(cpi, mi_col, mi_step))) {
2792 subsize = get_subsize(bsize, PARTITION_VERT);
2794 if (cpi->sf.adaptive_motion_search)
2795 load_pred_mv(x, ctx);
2796 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
2797 partition_none_allowed)
2798 pc_tree->vertical[0].pred_interp_filter =
2799 ctx->mic.interp_filter;
2800 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize,
2801 &pc_tree->vertical[0], best_rdc.rdcost);
2802 if (sum_rdc.rdcost < best_rdc.rdcost && mi_col + mi_step < cm->mi_cols &&
2803 bsize > BLOCK_8X8) {
2804 update_state(cpi, td, &pc_tree->vertical[0], mi_row, mi_col, subsize, 0);
2805 encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize,
2806 &pc_tree->vertical[0]);
2808 if (cpi->sf.adaptive_motion_search)
2809 load_pred_mv(x, ctx);
2810 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
2811 partition_none_allowed)
2812 pc_tree->vertical[1].pred_interp_filter =
2813 ctx->mic.interp_filter;
2814 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + mi_step,
2816 &pc_tree->vertical[1], best_rdc.rdcost - sum_rdc.rdcost);
2817 if (this_rdc.rate == INT_MAX) {
2818 sum_rdc.rdcost = INT64_MAX;
2820 sum_rdc.rate += this_rdc.rate;
2821 sum_rdc.dist += this_rdc.dist;
2822 sum_rdc.rdcost += this_rdc.rdcost;
2826 if (sum_rdc.rdcost < best_rdc.rdcost) {
2827 pl = partition_plane_context(xd, mi_row, mi_col, bsize);
2828 sum_rdc.rate += cpi->partition_cost[pl][PARTITION_VERT];
2829 sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
2830 sum_rdc.rate, sum_rdc.dist);
2831 if (sum_rdc.rdcost < best_rdc.rdcost) {
2833 pc_tree->partitioning = PARTITION_VERT;
2836 restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
2839 // TODO(jbb): This code added so that we avoid static analysis
2840 // warning related to the fact that best_rd isn't used after this
2841 // point. This code should be refactored so that the duplicate
2842 // checks occur in some sub function and thus are used...
2844 *rd_cost = best_rdc;
2846 if (best_rdc.rate < INT_MAX && best_rdc.dist < INT64_MAX &&
2847 pc_tree->index != 3) {
2848 int output_enabled = (bsize == BLOCK_64X64);
2849 encode_sb(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled,
2853 if (bsize == BLOCK_64X64) {
2854 assert(tp_orig < *tp);
2855 assert(best_rdc.rate < INT_MAX);
2856 assert(best_rdc.dist < INT64_MAX);
2858 assert(tp_orig == *tp);
2862 static void encode_rd_sb_row(VP9_COMP *cpi,
2864 TileDataEnc *tile_data,
2867 VP9_COMMON *const cm = &cpi->common;
2868 TileInfo *const tile_info = &tile_data->tile_info;
2869 MACROBLOCK *const x = &td->mb;
2870 MACROBLOCKD *const xd = &x->e_mbd;
2871 SPEED_FEATURES *const sf = &cpi->sf;
2874 // Initialize the left context for the new SB row
2875 memset(&xd->left_context, 0, sizeof(xd->left_context));
2876 memset(xd->left_seg_context, 0, sizeof(xd->left_seg_context));
2878 // Code each SB in the row
2879 for (mi_col = tile_info->mi_col_start; mi_col < tile_info->mi_col_end;
2880 mi_col += MI_BLOCK_SIZE) {
2881 const struct segmentation *const seg = &cm->seg;
2888 const int idx_str = cm->mi_stride * mi_row + mi_col;
2889 MODE_INFO **mi = cm->mi_grid_visible + idx_str;
2891 if (sf->adaptive_pred_interp_filter) {
2892 for (i = 0; i < 64; ++i)
2893 td->leaf_tree[i].pred_interp_filter = SWITCHABLE;
2895 for (i = 0; i < 64; ++i) {
2896 td->pc_tree[i].vertical[0].pred_interp_filter = SWITCHABLE;
2897 td->pc_tree[i].vertical[1].pred_interp_filter = SWITCHABLE;
2898 td->pc_tree[i].horizontal[0].pred_interp_filter = SWITCHABLE;
2899 td->pc_tree[i].horizontal[1].pred_interp_filter = SWITCHABLE;
2903 vp9_zero(x->pred_mv);
2904 td->pc_root->index = 0;
2907 const uint8_t *const map = seg->update_map ? cpi->segmentation_map
2908 : cm->last_frame_seg_map;
2909 int segment_id = get_segment_id(cm, map, BLOCK_64X64, mi_row, mi_col);
2910 seg_skip = segfeature_active(seg, segment_id, SEG_LVL_SKIP);
2913 x->source_variance = UINT_MAX;
2914 if (sf->partition_search_type == FIXED_PARTITION || seg_skip) {
2915 const BLOCK_SIZE bsize =
2916 seg_skip ? BLOCK_64X64 : sf->always_this_block_size;
2917 set_offsets(cpi, tile_info, x, mi_row, mi_col, BLOCK_64X64);
2918 set_fixed_partitioning(cpi, tile_info, mi, mi_row, mi_col, bsize);
2919 rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
2920 BLOCK_64X64, &dummy_rate, &dummy_dist, 1, td->pc_root);
2921 } else if (cpi->partition_search_skippable_frame) {
2923 set_offsets(cpi, tile_info, x, mi_row, mi_col, BLOCK_64X64);
2924 bsize = get_rd_var_based_fixed_partition(cpi, x, mi_row, mi_col);
2925 set_fixed_partitioning(cpi, tile_info, mi, mi_row, mi_col, bsize);
2926 rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
2927 BLOCK_64X64, &dummy_rate, &dummy_dist, 1, td->pc_root);
2928 } else if (sf->partition_search_type == VAR_BASED_PARTITION &&
2929 cm->frame_type != KEY_FRAME) {
2930 choose_partitioning(cpi, tile_info, x, mi_row, mi_col);
2931 rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
2932 BLOCK_64X64, &dummy_rate, &dummy_dist, 1, td->pc_root);
2934 // If required set upper and lower partition size limits
2935 if (sf->auto_min_max_partition_size) {
2936 set_offsets(cpi, tile_info, x, mi_row, mi_col, BLOCK_64X64);
2937 rd_auto_partition_range(cpi, tile_info, xd, mi_row, mi_col,
2938 &x->min_partition_size,
2939 &x->max_partition_size);
2941 rd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, BLOCK_64X64,
2942 &dummy_rdc, INT64_MAX, td->pc_root);
2947 static void init_encode_frame_mb_context(VP9_COMP *cpi) {
2948 MACROBLOCK *const x = &cpi->td.mb;
2949 VP9_COMMON *const cm = &cpi->common;
2950 MACROBLOCKD *const xd = &x->e_mbd;
2951 const int aligned_mi_cols = mi_cols_aligned_to_sb(cm->mi_cols);
2953 // Copy data over into macro block data structures.
2954 vp9_setup_src_planes(x, cpi->Source, 0, 0);
2956 vp9_setup_block_planes(&x->e_mbd, cm->subsampling_x, cm->subsampling_y);
2958 // Note: this memset assumes above_context[0], [1] and [2]
2959 // are allocated as part of the same buffer.
2960 memset(xd->above_context[0], 0,
2961 sizeof(*xd->above_context[0]) *
2962 2 * aligned_mi_cols * MAX_MB_PLANE);
2963 memset(xd->above_seg_context, 0,
2964 sizeof(*xd->above_seg_context) * aligned_mi_cols);
2967 static int check_dual_ref_flags(VP9_COMP *cpi) {
2968 const int ref_flags = cpi->ref_frame_flags;
2970 if (segfeature_active(&cpi->common.seg, 1, SEG_LVL_REF_FRAME)) {
2973 return (!!(ref_flags & VP9_GOLD_FLAG) + !!(ref_flags & VP9_LAST_FLAG)
2974 + !!(ref_flags & VP9_ALT_FLAG)) >= 2;
2978 static void reset_skip_tx_size(VP9_COMMON *cm, TX_SIZE max_tx_size) {
2980 const int mis = cm->mi_stride;
2981 MODE_INFO **mi_ptr = cm->mi_grid_visible;
2983 for (mi_row = 0; mi_row < cm->mi_rows; ++mi_row, mi_ptr += mis) {
2984 for (mi_col = 0; mi_col < cm->mi_cols; ++mi_col) {
2985 if (mi_ptr[mi_col]->tx_size > max_tx_size)
2986 mi_ptr[mi_col]->tx_size = max_tx_size;
2991 static MV_REFERENCE_FRAME get_frame_type(const VP9_COMP *cpi) {
2992 if (frame_is_intra_only(&cpi->common))
2994 else if (cpi->rc.is_src_frame_alt_ref && cpi->refresh_golden_frame)
2995 return ALTREF_FRAME;
2996 else if (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)
2997 return GOLDEN_FRAME;
3002 static TX_MODE select_tx_mode(const VP9_COMP *cpi, MACROBLOCKD *const xd) {
3005 if (cpi->common.frame_type == KEY_FRAME &&
3006 cpi->sf.use_nonrd_pick_mode)
3008 if (cpi->sf.tx_size_search_method == USE_LARGESTALL)
3010 else if (cpi->sf.tx_size_search_method == USE_FULL_RD||
3011 cpi->sf.tx_size_search_method == USE_TX_8X8)
3012 return TX_MODE_SELECT;
3014 return cpi->common.tx_mode;
3017 static void hybrid_intra_mode_search(VP9_COMP *cpi, MACROBLOCK *const x,
3018 RD_COST *rd_cost, BLOCK_SIZE bsize,
3019 PICK_MODE_CONTEXT *ctx) {
3020 if (bsize < BLOCK_16X16)
3021 vp9_rd_pick_intra_mode_sb(cpi, x, rd_cost, bsize, ctx, INT64_MAX);
3023 vp9_pick_intra_mode(cpi, x, rd_cost, bsize, ctx);
3026 static void nonrd_pick_sb_modes(VP9_COMP *cpi,
3027 TileDataEnc *tile_data, MACROBLOCK *const x,
3028 int mi_row, int mi_col, RD_COST *rd_cost,
3029 BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx) {
3030 VP9_COMMON *const cm = &cpi->common;
3031 TileInfo *const tile_info = &tile_data->tile_info;
3032 MACROBLOCKD *const xd = &x->e_mbd;
3034 set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
3036 mi->sb_type = bsize;
3038 if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled)
3039 if (cyclic_refresh_segment_id_boosted(mi->segment_id))
3040 x->rdmult = vp9_cyclic_refresh_get_rdmult(cpi->cyclic_refresh);
3042 if (cm->frame_type == KEY_FRAME)
3043 hybrid_intra_mode_search(cpi, x, rd_cost, bsize, ctx);
3044 else if (segfeature_active(&cm->seg, mi->segment_id, SEG_LVL_SKIP))
3045 set_mode_info_seg_skip(x, cm->tx_mode, rd_cost, bsize);
3046 else if (bsize >= BLOCK_8X8)
3047 vp9_pick_inter_mode(cpi, x, tile_data, mi_row, mi_col,
3048 rd_cost, bsize, ctx);
3050 vp9_pick_inter_mode_sub8x8(cpi, x, mi_row, mi_col,
3051 rd_cost, bsize, ctx);
3053 duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, bsize);
3055 if (rd_cost->rate == INT_MAX)
3056 vp9_rd_cost_reset(rd_cost);
3058 ctx->rate = rd_cost->rate;
3059 ctx->dist = rd_cost->dist;
3062 static void fill_mode_info_sb(VP9_COMMON *cm, MACROBLOCK *x,
3063 int mi_row, int mi_col,
3066 MACROBLOCKD *xd = &x->e_mbd;
3067 int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4;
3068 PARTITION_TYPE partition = pc_tree->partitioning;
3069 BLOCK_SIZE subsize = get_subsize(bsize, partition);
3071 assert(bsize >= BLOCK_8X8);
3073 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
3076 switch (partition) {
3077 case PARTITION_NONE:
3078 set_mode_info_offsets(cm, x, xd, mi_row, mi_col);
3079 *(xd->mi[0]) = pc_tree->none.mic;
3080 *(x->mbmi_ext) = pc_tree->none.mbmi_ext;
3081 duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, bsize);
3083 case PARTITION_VERT:
3084 set_mode_info_offsets(cm, x, xd, mi_row, mi_col);
3085 *(xd->mi[0]) = pc_tree->vertical[0].mic;
3086 *(x->mbmi_ext) = pc_tree->vertical[0].mbmi_ext;
3087 duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, subsize);
3089 if (mi_col + hbs < cm->mi_cols) {
3090 set_mode_info_offsets(cm, x, xd, mi_row, mi_col + hbs);
3091 *(xd->mi[0]) = pc_tree->vertical[1].mic;
3092 *(x->mbmi_ext) = pc_tree->vertical[1].mbmi_ext;
3093 duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col + hbs, subsize);
3096 case PARTITION_HORZ:
3097 set_mode_info_offsets(cm, x, xd, mi_row, mi_col);
3098 *(xd->mi[0]) = pc_tree->horizontal[0].mic;
3099 *(x->mbmi_ext) = pc_tree->horizontal[0].mbmi_ext;
3100 duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, subsize);
3101 if (mi_row + hbs < cm->mi_rows) {
3102 set_mode_info_offsets(cm, x, xd, mi_row + hbs, mi_col);
3103 *(xd->mi[0]) = pc_tree->horizontal[1].mic;
3104 *(x->mbmi_ext) = pc_tree->horizontal[1].mbmi_ext;
3105 duplicate_mode_info_in_sb(cm, xd, mi_row + hbs, mi_col, subsize);
3108 case PARTITION_SPLIT: {
3109 fill_mode_info_sb(cm, x, mi_row, mi_col, subsize, pc_tree->split[0]);
3110 fill_mode_info_sb(cm, x, mi_row, mi_col + hbs, subsize,
3112 fill_mode_info_sb(cm, x, mi_row + hbs, mi_col, subsize,
3114 fill_mode_info_sb(cm, x, mi_row + hbs, mi_col + hbs, subsize,
3123 // Reset the prediction pixel ready flag recursively.
3124 static void pred_pixel_ready_reset(PC_TREE *pc_tree, BLOCK_SIZE bsize) {
3125 pc_tree->none.pred_pixel_ready = 0;
3126 pc_tree->horizontal[0].pred_pixel_ready = 0;
3127 pc_tree->horizontal[1].pred_pixel_ready = 0;
3128 pc_tree->vertical[0].pred_pixel_ready = 0;
3129 pc_tree->vertical[1].pred_pixel_ready = 0;
3131 if (bsize > BLOCK_8X8) {
3132 BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_SPLIT);
3134 for (i = 0; i < 4; ++i)
3135 pred_pixel_ready_reset(pc_tree->split[i], subsize);
3139 static void nonrd_pick_partition(VP9_COMP *cpi, ThreadData *td,
3140 TileDataEnc *tile_data,
3141 TOKENEXTRA **tp, int mi_row,
3142 int mi_col, BLOCK_SIZE bsize, RD_COST *rd_cost,
3143 int do_recon, int64_t best_rd,
3145 const SPEED_FEATURES *const sf = &cpi->sf;
3146 VP9_COMMON *const cm = &cpi->common;
3147 TileInfo *const tile_info = &tile_data->tile_info;
3148 MACROBLOCK *const x = &td->mb;
3149 MACROBLOCKD *const xd = &x->e_mbd;
3150 const int ms = num_8x8_blocks_wide_lookup[bsize] / 2;
3151 TOKENEXTRA *tp_orig = *tp;
3152 PICK_MODE_CONTEXT *ctx = &pc_tree->none;
3154 BLOCK_SIZE subsize = bsize;
3155 RD_COST this_rdc, sum_rdc, best_rdc;
3156 int do_split = bsize >= BLOCK_8X8;
3158 // Override skipping rectangular partition operations for edge blocks
3159 const int force_horz_split = (mi_row + ms >= cm->mi_rows);
3160 const int force_vert_split = (mi_col + ms >= cm->mi_cols);
3161 const int xss = x->e_mbd.plane[1].subsampling_x;
3162 const int yss = x->e_mbd.plane[1].subsampling_y;
3164 int partition_none_allowed = !force_horz_split && !force_vert_split;
3165 int partition_horz_allowed = !force_vert_split && yss <= xss &&
3167 int partition_vert_allowed = !force_horz_split && xss <= yss &&
3171 assert(num_8x8_blocks_wide_lookup[bsize] ==
3172 num_8x8_blocks_high_lookup[bsize]);
3174 vp9_rd_cost_init(&sum_rdc);
3175 vp9_rd_cost_reset(&best_rdc);
3176 best_rdc.rdcost = best_rd;
3178 // Determine partition types in search according to the speed features.
3179 // The threshold set here has to be of square block size.
3180 if (sf->auto_min_max_partition_size) {
3181 partition_none_allowed &= (bsize <= x->max_partition_size &&
3182 bsize >= x->min_partition_size);
3183 partition_horz_allowed &= ((bsize <= x->max_partition_size &&
3184 bsize > x->min_partition_size) ||
3186 partition_vert_allowed &= ((bsize <= x->max_partition_size &&
3187 bsize > x->min_partition_size) ||
3189 do_split &= bsize > x->min_partition_size;
3191 if (sf->use_square_partition_only) {
3192 partition_horz_allowed &= force_horz_split;
3193 partition_vert_allowed &= force_vert_split;
3196 ctx->pred_pixel_ready = !(partition_vert_allowed ||
3197 partition_horz_allowed ||
3201 if (partition_none_allowed) {
3202 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col,
3203 &this_rdc, bsize, ctx);
3204 ctx->mic = *xd->mi[0];
3205 ctx->mbmi_ext = *x->mbmi_ext;
3206 ctx->skip_txfm[0] = x->skip_txfm[0];
3207 ctx->skip = x->skip;
3209 if (this_rdc.rate != INT_MAX) {
3210 int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
3211 this_rdc.rate += cpi->partition_cost[pl][PARTITION_NONE];
3212 this_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
3213 this_rdc.rate, this_rdc.dist);
3214 if (this_rdc.rdcost < best_rdc.rdcost) {
3215 int64_t dist_breakout_thr = sf->partition_search_breakout_dist_thr;
3216 int64_t rate_breakout_thr = sf->partition_search_breakout_rate_thr;
3218 dist_breakout_thr >>= 8 - (b_width_log2_lookup[bsize] +
3219 b_height_log2_lookup[bsize]);
3221 rate_breakout_thr *= num_pels_log2_lookup[bsize];
3223 best_rdc = this_rdc;
3224 if (bsize >= BLOCK_8X8)
3225 pc_tree->partitioning = PARTITION_NONE;
3227 if (!x->e_mbd.lossless &&
3228 this_rdc.rate < rate_breakout_thr &&
3229 this_rdc.dist < dist_breakout_thr) {
3237 // store estimated motion vector
3238 store_pred_mv(x, ctx);
3242 int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
3243 sum_rdc.rate += cpi->partition_cost[pl][PARTITION_SPLIT];
3244 sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, sum_rdc.rate, sum_rdc.dist);
3245 subsize = get_subsize(bsize, PARTITION_SPLIT);
3246 for (i = 0; i < 4 && sum_rdc.rdcost < best_rdc.rdcost; ++i) {
3247 const int x_idx = (i & 1) * ms;
3248 const int y_idx = (i >> 1) * ms;
3250 if (mi_row + y_idx >= cm->mi_rows || mi_col + x_idx >= cm->mi_cols)
3252 load_pred_mv(x, ctx);
3253 nonrd_pick_partition(cpi, td, tile_data, tp,
3254 mi_row + y_idx, mi_col + x_idx,
3255 subsize, &this_rdc, 0,
3256 best_rdc.rdcost - sum_rdc.rdcost, pc_tree->split[i]);
3258 if (this_rdc.rate == INT_MAX) {
3259 vp9_rd_cost_reset(&sum_rdc);
3261 sum_rdc.rate += this_rdc.rate;
3262 sum_rdc.dist += this_rdc.dist;
3263 sum_rdc.rdcost += this_rdc.rdcost;
3267 if (sum_rdc.rdcost < best_rdc.rdcost) {
3269 pc_tree->partitioning = PARTITION_SPLIT;
3271 // skip rectangular partition test when larger block size
3272 // gives better rd cost
3273 if (sf->less_rectangular_check)
3274 do_rect &= !partition_none_allowed;
3279 if (partition_horz_allowed && do_rect) {
3280 subsize = get_subsize(bsize, PARTITION_HORZ);
3281 if (sf->adaptive_motion_search)
3282 load_pred_mv(x, ctx);
3283 pc_tree->horizontal[0].pred_pixel_ready = 1;
3284 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize,
3285 &pc_tree->horizontal[0]);
3287 pc_tree->horizontal[0].mic = *xd->mi[0];
3288 pc_tree->horizontal[0].mbmi_ext = *x->mbmi_ext;
3289 pc_tree->horizontal[0].skip_txfm[0] = x->skip_txfm[0];
3290 pc_tree->horizontal[0].skip = x->skip;
3292 if (sum_rdc.rdcost < best_rdc.rdcost && mi_row + ms < cm->mi_rows) {
3293 load_pred_mv(x, ctx);
3294 pc_tree->horizontal[1].pred_pixel_ready = 1;
3295 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row + ms, mi_col,
3297 &pc_tree->horizontal[1]);
3299 pc_tree->horizontal[1].mic = *xd->mi[0];
3300 pc_tree->horizontal[1].mbmi_ext = *x->mbmi_ext;
3301 pc_tree->horizontal[1].skip_txfm[0] = x->skip_txfm[0];
3302 pc_tree->horizontal[1].skip = x->skip;
3304 if (this_rdc.rate == INT_MAX) {
3305 vp9_rd_cost_reset(&sum_rdc);
3307 int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
3308 this_rdc.rate += cpi->partition_cost[pl][PARTITION_HORZ];
3309 sum_rdc.rate += this_rdc.rate;
3310 sum_rdc.dist += this_rdc.dist;
3311 sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
3312 sum_rdc.rate, sum_rdc.dist);
3316 if (sum_rdc.rdcost < best_rdc.rdcost) {
3318 pc_tree->partitioning = PARTITION_HORZ;
3320 pred_pixel_ready_reset(pc_tree, bsize);
3325 if (partition_vert_allowed && do_rect) {
3326 subsize = get_subsize(bsize, PARTITION_VERT);
3327 if (sf->adaptive_motion_search)
3328 load_pred_mv(x, ctx);
3329 pc_tree->vertical[0].pred_pixel_ready = 1;
3330 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize,
3331 &pc_tree->vertical[0]);
3332 pc_tree->vertical[0].mic = *xd->mi[0];
3333 pc_tree->vertical[0].mbmi_ext = *x->mbmi_ext;
3334 pc_tree->vertical[0].skip_txfm[0] = x->skip_txfm[0];
3335 pc_tree->vertical[0].skip = x->skip;
3337 if (sum_rdc.rdcost < best_rdc.rdcost && mi_col + ms < cm->mi_cols) {
3338 load_pred_mv(x, ctx);
3339 pc_tree->vertical[1].pred_pixel_ready = 1;
3340 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + ms,
3342 &pc_tree->vertical[1]);
3343 pc_tree->vertical[1].mic = *xd->mi[0];
3344 pc_tree->vertical[1].mbmi_ext = *x->mbmi_ext;
3345 pc_tree->vertical[1].skip_txfm[0] = x->skip_txfm[0];
3346 pc_tree->vertical[1].skip = x->skip;
3348 if (this_rdc.rate == INT_MAX) {
3349 vp9_rd_cost_reset(&sum_rdc);
3351 int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
3352 sum_rdc.rate += cpi->partition_cost[pl][PARTITION_VERT];
3353 sum_rdc.rate += this_rdc.rate;
3354 sum_rdc.dist += this_rdc.dist;
3355 sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
3356 sum_rdc.rate, sum_rdc.dist);
3360 if (sum_rdc.rdcost < best_rdc.rdcost) {
3362 pc_tree->partitioning = PARTITION_VERT;
3364 pred_pixel_ready_reset(pc_tree, bsize);
3368 *rd_cost = best_rdc;
3370 if (best_rdc.rate == INT_MAX) {
3371 vp9_rd_cost_reset(rd_cost);
3375 // update mode info array
3376 fill_mode_info_sb(cm, x, mi_row, mi_col, bsize, pc_tree);
3378 if (best_rdc.rate < INT_MAX && best_rdc.dist < INT64_MAX && do_recon) {
3379 int output_enabled = (bsize == BLOCK_64X64);
3380 encode_sb_rt(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled,
3384 if (bsize == BLOCK_64X64 && do_recon) {
3385 assert(tp_orig < *tp);
3386 assert(best_rdc.rate < INT_MAX);
3387 assert(best_rdc.dist < INT64_MAX);
3389 assert(tp_orig == *tp);
3393 static void nonrd_select_partition(VP9_COMP *cpi,
3395 TileDataEnc *tile_data,
3398 int mi_row, int mi_col,
3399 BLOCK_SIZE bsize, int output_enabled,
3400 RD_COST *rd_cost, PC_TREE *pc_tree) {
3401 VP9_COMMON *const cm = &cpi->common;
3402 TileInfo *const tile_info = &tile_data->tile_info;
3403 MACROBLOCK *const x = &td->mb;
3404 MACROBLOCKD *const xd = &x->e_mbd;
3405 const int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4;
3406 const int mis = cm->mi_stride;
3407 PARTITION_TYPE partition;
3411 vp9_rd_cost_reset(&this_rdc);
3412 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
3415 subsize = (bsize >= BLOCK_8X8) ? mi[0]->sb_type : BLOCK_4X4;
3416 partition = partition_lookup[bsl][subsize];
3418 if (bsize == BLOCK_32X32 && subsize == BLOCK_32X32) {
3419 x->max_partition_size = BLOCK_32X32;
3420 x->min_partition_size = BLOCK_16X16;
3421 nonrd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, bsize,
3422 rd_cost, 0, INT64_MAX, pc_tree);
3423 } else if (bsize == BLOCK_32X32 && partition != PARTITION_NONE &&
3424 subsize >= BLOCK_16X16) {
3425 x->max_partition_size = BLOCK_32X32;
3426 x->min_partition_size = BLOCK_8X8;
3427 nonrd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, bsize,
3428 rd_cost, 0, INT64_MAX, pc_tree);
3429 } else if (bsize == BLOCK_16X16 && partition != PARTITION_NONE) {
3430 x->max_partition_size = BLOCK_16X16;
3431 x->min_partition_size = BLOCK_8X8;
3432 nonrd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, bsize,
3433 rd_cost, 0, INT64_MAX, pc_tree);
3435 switch (partition) {
3436 case PARTITION_NONE:
3437 pc_tree->none.pred_pixel_ready = 1;
3438 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, rd_cost,
3439 subsize, &pc_tree->none);
3440 pc_tree->none.mic = *xd->mi[0];
3441 pc_tree->none.mbmi_ext = *x->mbmi_ext;
3442 pc_tree->none.skip_txfm[0] = x->skip_txfm[0];
3443 pc_tree->none.skip = x->skip;
3445 case PARTITION_VERT:
3446 pc_tree->vertical[0].pred_pixel_ready = 1;
3447 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, rd_cost,
3448 subsize, &pc_tree->vertical[0]);
3449 pc_tree->vertical[0].mic = *xd->mi[0];
3450 pc_tree->vertical[0].mbmi_ext = *x->mbmi_ext;
3451 pc_tree->vertical[0].skip_txfm[0] = x->skip_txfm[0];
3452 pc_tree->vertical[0].skip = x->skip;
3453 if (mi_col + hbs < cm->mi_cols) {
3454 pc_tree->vertical[1].pred_pixel_ready = 1;
3455 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + hbs,
3456 &this_rdc, subsize, &pc_tree->vertical[1]);
3457 pc_tree->vertical[1].mic = *xd->mi[0];
3458 pc_tree->vertical[1].mbmi_ext = *x->mbmi_ext;
3459 pc_tree->vertical[1].skip_txfm[0] = x->skip_txfm[0];
3460 pc_tree->vertical[1].skip = x->skip;
3461 if (this_rdc.rate != INT_MAX && this_rdc.dist != INT64_MAX &&
3462 rd_cost->rate != INT_MAX && rd_cost->dist != INT64_MAX) {
3463 rd_cost->rate += this_rdc.rate;
3464 rd_cost->dist += this_rdc.dist;
3468 case PARTITION_HORZ:
3469 pc_tree->horizontal[0].pred_pixel_ready = 1;
3470 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, rd_cost,
3471 subsize, &pc_tree->horizontal[0]);
3472 pc_tree->horizontal[0].mic = *xd->mi[0];
3473 pc_tree->horizontal[0].mbmi_ext = *x->mbmi_ext;
3474 pc_tree->horizontal[0].skip_txfm[0] = x->skip_txfm[0];
3475 pc_tree->horizontal[0].skip = x->skip;
3476 if (mi_row + hbs < cm->mi_rows) {
3477 pc_tree->horizontal[1].pred_pixel_ready = 1;
3478 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row + hbs, mi_col,
3479 &this_rdc, subsize, &pc_tree->horizontal[1]);
3480 pc_tree->horizontal[1].mic = *xd->mi[0];
3481 pc_tree->horizontal[1].mbmi_ext = *x->mbmi_ext;
3482 pc_tree->horizontal[1].skip_txfm[0] = x->skip_txfm[0];
3483 pc_tree->horizontal[1].skip = x->skip;
3484 if (this_rdc.rate != INT_MAX && this_rdc.dist != INT64_MAX &&
3485 rd_cost->rate != INT_MAX && rd_cost->dist != INT64_MAX) {
3486 rd_cost->rate += this_rdc.rate;
3487 rd_cost->dist += this_rdc.dist;
3491 case PARTITION_SPLIT:
3492 subsize = get_subsize(bsize, PARTITION_SPLIT);
3493 nonrd_select_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
3494 subsize, output_enabled, rd_cost,
3496 nonrd_select_partition(cpi, td, tile_data, mi + hbs, tp,
3497 mi_row, mi_col + hbs, subsize, output_enabled,
3498 &this_rdc, pc_tree->split[1]);
3499 if (this_rdc.rate != INT_MAX && this_rdc.dist != INT64_MAX &&
3500 rd_cost->rate != INT_MAX && rd_cost->dist != INT64_MAX) {
3501 rd_cost->rate += this_rdc.rate;
3502 rd_cost->dist += this_rdc.dist;
3504 nonrd_select_partition(cpi, td, tile_data, mi + hbs * mis, tp,
3505 mi_row + hbs, mi_col, subsize, output_enabled,
3506 &this_rdc, pc_tree->split[2]);
3507 if (this_rdc.rate != INT_MAX && this_rdc.dist != INT64_MAX &&
3508 rd_cost->rate != INT_MAX && rd_cost->dist != INT64_MAX) {
3509 rd_cost->rate += this_rdc.rate;
3510 rd_cost->dist += this_rdc.dist;
3512 nonrd_select_partition(cpi, td, tile_data, mi + hbs * mis + hbs, tp,
3513 mi_row + hbs, mi_col + hbs, subsize,
3514 output_enabled, &this_rdc, pc_tree->split[3]);
3515 if (this_rdc.rate != INT_MAX && this_rdc.dist != INT64_MAX &&
3516 rd_cost->rate != INT_MAX && rd_cost->dist != INT64_MAX) {
3517 rd_cost->rate += this_rdc.rate;
3518 rd_cost->dist += this_rdc.dist;
3522 assert(0 && "Invalid partition type.");
3527 if (bsize == BLOCK_64X64 && output_enabled)
3528 encode_sb_rt(cpi, td, tile_info, tp, mi_row, mi_col, 1, bsize, pc_tree);
3532 static void nonrd_use_partition(VP9_COMP *cpi,
3534 TileDataEnc *tile_data,
3537 int mi_row, int mi_col,
3538 BLOCK_SIZE bsize, int output_enabled,
3539 RD_COST *dummy_cost, PC_TREE *pc_tree) {
3540 VP9_COMMON *const cm = &cpi->common;
3541 TileInfo *tile_info = &tile_data->tile_info;
3542 MACROBLOCK *const x = &td->mb;
3543 MACROBLOCKD *const xd = &x->e_mbd;
3544 const int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4;
3545 const int mis = cm->mi_stride;
3546 PARTITION_TYPE partition;
3549 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
3552 subsize = (bsize >= BLOCK_8X8) ? mi[0]->sb_type : BLOCK_4X4;
3553 partition = partition_lookup[bsl][subsize];
3555 if (output_enabled && bsize != BLOCK_4X4) {
3556 int ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
3557 td->counts->partition[ctx][partition]++;
3560 switch (partition) {
3561 case PARTITION_NONE:
3562 pc_tree->none.pred_pixel_ready = 1;
3563 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, dummy_cost,
3564 subsize, &pc_tree->none);
3565 pc_tree->none.mic = *xd->mi[0];
3566 pc_tree->none.mbmi_ext = *x->mbmi_ext;
3567 pc_tree->none.skip_txfm[0] = x->skip_txfm[0];
3568 pc_tree->none.skip = x->skip;
3569 encode_b_rt(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled,
3570 subsize, &pc_tree->none);
3572 case PARTITION_VERT:
3573 pc_tree->vertical[0].pred_pixel_ready = 1;
3574 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, dummy_cost,
3575 subsize, &pc_tree->vertical[0]);
3576 pc_tree->vertical[0].mic = *xd->mi[0];
3577 pc_tree->vertical[0].mbmi_ext = *x->mbmi_ext;
3578 pc_tree->vertical[0].skip_txfm[0] = x->skip_txfm[0];
3579 pc_tree->vertical[0].skip = x->skip;
3580 encode_b_rt(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled,
3581 subsize, &pc_tree->vertical[0]);
3582 if (mi_col + hbs < cm->mi_cols && bsize > BLOCK_8X8) {
3583 pc_tree->vertical[1].pred_pixel_ready = 1;
3584 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + hbs,
3585 dummy_cost, subsize, &pc_tree->vertical[1]);
3586 pc_tree->vertical[1].mic = *xd->mi[0];
3587 pc_tree->vertical[1].mbmi_ext = *x->mbmi_ext;
3588 pc_tree->vertical[1].skip_txfm[0] = x->skip_txfm[0];
3589 pc_tree->vertical[1].skip = x->skip;
3590 encode_b_rt(cpi, td, tile_info, tp, mi_row, mi_col + hbs,
3591 output_enabled, subsize, &pc_tree->vertical[1]);
3594 case PARTITION_HORZ:
3595 pc_tree->horizontal[0].pred_pixel_ready = 1;
3596 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, dummy_cost,
3597 subsize, &pc_tree->horizontal[0]);
3598 pc_tree->horizontal[0].mic = *xd->mi[0];
3599 pc_tree->horizontal[0].mbmi_ext = *x->mbmi_ext;
3600 pc_tree->horizontal[0].skip_txfm[0] = x->skip_txfm[0];
3601 pc_tree->horizontal[0].skip = x->skip;
3602 encode_b_rt(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled,
3603 subsize, &pc_tree->horizontal[0]);
3605 if (mi_row + hbs < cm->mi_rows && bsize > BLOCK_8X8) {
3606 pc_tree->horizontal[1].pred_pixel_ready = 1;
3607 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row + hbs, mi_col,
3608 dummy_cost, subsize, &pc_tree->horizontal[1]);
3609 pc_tree->horizontal[1].mic = *xd->mi[0];
3610 pc_tree->horizontal[1].mbmi_ext = *x->mbmi_ext;
3611 pc_tree->horizontal[1].skip_txfm[0] = x->skip_txfm[0];
3612 pc_tree->horizontal[1].skip = x->skip;
3613 encode_b_rt(cpi, td, tile_info, tp, mi_row + hbs, mi_col,
3614 output_enabled, subsize, &pc_tree->horizontal[1]);
3617 case PARTITION_SPLIT:
3618 subsize = get_subsize(bsize, PARTITION_SPLIT);
3619 if (bsize == BLOCK_8X8) {
3620 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, dummy_cost,
3621 subsize, pc_tree->leaf_split[0]);
3622 encode_b_rt(cpi, td, tile_info, tp, mi_row, mi_col,
3623 output_enabled, subsize, pc_tree->leaf_split[0]);
3625 nonrd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
3626 subsize, output_enabled, dummy_cost,
3628 nonrd_use_partition(cpi, td, tile_data, mi + hbs, tp,
3629 mi_row, mi_col + hbs, subsize, output_enabled,
3630 dummy_cost, pc_tree->split[1]);
3631 nonrd_use_partition(cpi, td, tile_data, mi + hbs * mis, tp,
3632 mi_row + hbs, mi_col, subsize, output_enabled,
3633 dummy_cost, pc_tree->split[2]);
3634 nonrd_use_partition(cpi, td, tile_data, mi + hbs * mis + hbs, tp,
3635 mi_row + hbs, mi_col + hbs, subsize, output_enabled,
3636 dummy_cost, pc_tree->split[3]);
3640 assert(0 && "Invalid partition type.");
3644 if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8)
3645 update_partition_context(xd, mi_row, mi_col, subsize, bsize);
3648 static void encode_nonrd_sb_row(VP9_COMP *cpi,
3650 TileDataEnc *tile_data,
3653 SPEED_FEATURES *const sf = &cpi->sf;
3654 VP9_COMMON *const cm = &cpi->common;
3655 TileInfo *const tile_info = &tile_data->tile_info;
3656 MACROBLOCK *const x = &td->mb;
3657 MACROBLOCKD *const xd = &x->e_mbd;
3660 // Initialize the left context for the new SB row
3661 memset(&xd->left_context, 0, sizeof(xd->left_context));
3662 memset(xd->left_seg_context, 0, sizeof(xd->left_seg_context));
3664 // Code each SB in the row
3665 for (mi_col = tile_info->mi_col_start; mi_col < tile_info->mi_col_end;
3666 mi_col += MI_BLOCK_SIZE) {
3667 const struct segmentation *const seg = &cm->seg;
3669 const int idx_str = cm->mi_stride * mi_row + mi_col;
3670 MODE_INFO **mi = cm->mi_grid_visible + idx_str;
3671 PARTITION_SEARCH_TYPE partition_search_type = sf->partition_search_type;
3672 BLOCK_SIZE bsize = BLOCK_64X64;
3674 x->source_variance = UINT_MAX;
3675 vp9_zero(x->pred_mv);
3676 vp9_rd_cost_init(&dummy_rdc);
3677 x->color_sensitivity[0] = 0;
3678 x->color_sensitivity[1] = 0;
3682 const uint8_t *const map = seg->update_map ? cpi->segmentation_map
3683 : cm->last_frame_seg_map;
3684 int segment_id = get_segment_id(cm, map, BLOCK_64X64, mi_row, mi_col);
3685 seg_skip = segfeature_active(seg, segment_id, SEG_LVL_SKIP);
3687 partition_search_type = FIXED_PARTITION;
3691 // Set the partition type of the 64X64 block
3692 switch (partition_search_type) {
3693 case VAR_BASED_PARTITION:
3694 // TODO(jingning, marpan): The mode decision and encoding process
3695 // support both intra and inter sub8x8 block coding for RTC mode.
3696 // Tune the thresholds accordingly to use sub8x8 block coding for
3697 // coding performance improvement.
3698 choose_partitioning(cpi, tile_info, x, mi_row, mi_col);
3699 nonrd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
3700 BLOCK_64X64, 1, &dummy_rdc, td->pc_root);
3702 case SOURCE_VAR_BASED_PARTITION:
3703 set_source_var_based_partition(cpi, tile_info, x, mi, mi_row, mi_col);
3704 nonrd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
3705 BLOCK_64X64, 1, &dummy_rdc, td->pc_root);
3707 case FIXED_PARTITION:
3709 bsize = sf->always_this_block_size;
3710 set_fixed_partitioning(cpi, tile_info, mi, mi_row, mi_col, bsize);
3711 nonrd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
3712 BLOCK_64X64, 1, &dummy_rdc, td->pc_root);
3714 case REFERENCE_PARTITION:
3715 set_offsets(cpi, tile_info, x, mi_row, mi_col, BLOCK_64X64);
3716 if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled &&
3717 xd->mi[0]->segment_id) {
3718 // Use lower max_partition_size for low resoultions.
3719 if (cm->width <= 352 && cm->height <= 288)
3720 x->max_partition_size = BLOCK_32X32;
3722 x->max_partition_size = BLOCK_64X64;
3723 x->min_partition_size = BLOCK_8X8;
3724 nonrd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col,
3725 BLOCK_64X64, &dummy_rdc, 1,
3726 INT64_MAX, td->pc_root);
3728 choose_partitioning(cpi, tile_info, x, mi_row, mi_col);
3729 // TODO(marpan): Seems like nonrd_select_partition does not support
3730 // 4x4 partition. Since 4x4 is used on key frame, use this switch
3732 if (cm->frame_type == KEY_FRAME)
3733 nonrd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
3734 BLOCK_64X64, 1, &dummy_rdc, td->pc_root);
3736 nonrd_select_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
3737 BLOCK_64X64, 1, &dummy_rdc, td->pc_root);
3747 // end RTC play code
3749 static int set_var_thresh_from_histogram(VP9_COMP *cpi) {
3750 const SPEED_FEATURES *const sf = &cpi->sf;
3751 const VP9_COMMON *const cm = &cpi->common;
3753 const uint8_t *src = cpi->Source->y_buffer;
3754 const uint8_t *last_src = cpi->Last_Source->y_buffer;
3755 const int src_stride = cpi->Source->y_stride;
3756 const int last_stride = cpi->Last_Source->y_stride;
3758 // Pick cutoff threshold
3759 const int cutoff = (VPXMIN(cm->width, cm->height) >= 720) ?
3760 (cm->MBs * VAR_HIST_LARGE_CUT_OFF / 100) :
3761 (cm->MBs * VAR_HIST_SMALL_CUT_OFF / 100);
3762 DECLARE_ALIGNED(16, int, hist[VAR_HIST_BINS]);
3763 diff *var16 = cpi->source_diff_var;
3768 memset(hist, 0, VAR_HIST_BINS * sizeof(hist[0]));
3770 for (i = 0; i < cm->mb_rows; i++) {
3771 for (j = 0; j < cm->mb_cols; j++) {
3772 #if CONFIG_VP9_HIGHBITDEPTH
3773 if (cm->use_highbitdepth) {
3774 switch (cm->bit_depth) {
3776 vpx_highbd_8_get16x16var(src, src_stride, last_src, last_stride,
3777 &var16->sse, &var16->sum);
3780 vpx_highbd_10_get16x16var(src, src_stride, last_src, last_stride,
3781 &var16->sse, &var16->sum);
3784 vpx_highbd_12_get16x16var(src, src_stride, last_src, last_stride,
3785 &var16->sse, &var16->sum);
3788 assert(0 && "cm->bit_depth should be VPX_BITS_8, VPX_BITS_10"
3793 vpx_get16x16var(src, src_stride, last_src, last_stride,
3794 &var16->sse, &var16->sum);
3797 vpx_get16x16var(src, src_stride, last_src, last_stride,
3798 &var16->sse, &var16->sum);
3799 #endif // CONFIG_VP9_HIGHBITDEPTH
3800 var16->var = var16->sse -
3801 (((uint32_t)var16->sum * var16->sum) >> 8);
3803 if (var16->var >= VAR_HIST_MAX_BG_VAR)
3804 hist[VAR_HIST_BINS - 1]++;
3806 hist[var16->var / VAR_HIST_FACTOR]++;
3813 src = src - cm->mb_cols * 16 + 16 * src_stride;
3814 last_src = last_src - cm->mb_cols * 16 + 16 * last_stride;
3817 cpi->source_var_thresh = 0;
3819 if (hist[VAR_HIST_BINS - 1] < cutoff) {
3820 for (i = 0; i < VAR_HIST_BINS - 1; i++) {
3824 cpi->source_var_thresh = (i + 1) * VAR_HIST_FACTOR;
3830 return sf->search_type_check_frequency;
3833 static void source_var_based_partition_search_method(VP9_COMP *cpi) {
3834 VP9_COMMON *const cm = &cpi->common;
3835 SPEED_FEATURES *const sf = &cpi->sf;
3837 if (cm->frame_type == KEY_FRAME) {
3838 // For key frame, use SEARCH_PARTITION.
3839 sf->partition_search_type = SEARCH_PARTITION;
3840 } else if (cm->intra_only) {
3841 sf->partition_search_type = FIXED_PARTITION;
3843 if (cm->last_width != cm->width || cm->last_height != cm->height) {
3844 if (cpi->source_diff_var)
3845 vpx_free(cpi->source_diff_var);
3847 CHECK_MEM_ERROR(cm, cpi->source_diff_var,
3848 vpx_calloc(cm->MBs, sizeof(diff)));
3851 if (!cpi->frames_till_next_var_check)
3852 cpi->frames_till_next_var_check = set_var_thresh_from_histogram(cpi);
3854 if (cpi->frames_till_next_var_check > 0) {
3855 sf->partition_search_type = FIXED_PARTITION;
3856 cpi->frames_till_next_var_check--;
3861 static int get_skip_encode_frame(const VP9_COMMON *cm, ThreadData *const td) {
3862 unsigned int intra_count = 0, inter_count = 0;
3865 for (j = 0; j < INTRA_INTER_CONTEXTS; ++j) {
3866 intra_count += td->counts->intra_inter[j][0];
3867 inter_count += td->counts->intra_inter[j][1];
3870 return (intra_count << 2) < inter_count &&
3871 cm->frame_type != KEY_FRAME &&
3875 void vp9_init_tile_data(VP9_COMP *cpi) {
3876 VP9_COMMON *const cm = &cpi->common;
3877 const int tile_cols = 1 << cm->log2_tile_cols;
3878 const int tile_rows = 1 << cm->log2_tile_rows;
3879 int tile_col, tile_row;
3880 TOKENEXTRA *pre_tok = cpi->tile_tok[0][0];
3883 if (cpi->tile_data == NULL || cpi->allocated_tiles < tile_cols * tile_rows) {
3884 if (cpi->tile_data != NULL)
3885 vpx_free(cpi->tile_data);
3886 CHECK_MEM_ERROR(cm, cpi->tile_data,
3887 vpx_malloc(tile_cols * tile_rows * sizeof(*cpi->tile_data)));
3888 cpi->allocated_tiles = tile_cols * tile_rows;
3890 for (tile_row = 0; tile_row < tile_rows; ++tile_row)
3891 for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
3892 TileDataEnc *tile_data =
3893 &cpi->tile_data[tile_row * tile_cols + tile_col];
3895 for (i = 0; i < BLOCK_SIZES; ++i) {
3896 for (j = 0; j < MAX_MODES; ++j) {
3897 tile_data->thresh_freq_fact[i][j] = 32;
3898 tile_data->mode_map[i][j] = j;
3904 for (tile_row = 0; tile_row < tile_rows; ++tile_row) {
3905 for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
3906 TileInfo *tile_info =
3907 &cpi->tile_data[tile_row * tile_cols + tile_col].tile_info;
3908 vp9_tile_init(tile_info, cm, tile_row, tile_col);
3910 cpi->tile_tok[tile_row][tile_col] = pre_tok + tile_tok;
3911 pre_tok = cpi->tile_tok[tile_row][tile_col];
3912 tile_tok = allocated_tokens(*tile_info);
3917 void vp9_encode_tile(VP9_COMP *cpi, ThreadData *td,
3918 int tile_row, int tile_col) {
3919 VP9_COMMON *const cm = &cpi->common;
3920 const int tile_cols = 1 << cm->log2_tile_cols;
3921 TileDataEnc *this_tile =
3922 &cpi->tile_data[tile_row * tile_cols + tile_col];
3923 const TileInfo * const tile_info = &this_tile->tile_info;
3924 TOKENEXTRA *tok = cpi->tile_tok[tile_row][tile_col];
3927 // Set up pointers to per thread motion search counters.
3928 td->mb.m_search_count_ptr = &td->rd_counts.m_search_count;
3929 td->mb.ex_search_count_ptr = &td->rd_counts.ex_search_count;
3931 for (mi_row = tile_info->mi_row_start; mi_row < tile_info->mi_row_end;
3932 mi_row += MI_BLOCK_SIZE) {
3933 if (cpi->sf.use_nonrd_pick_mode)
3934 encode_nonrd_sb_row(cpi, td, this_tile, mi_row, &tok);
3936 encode_rd_sb_row(cpi, td, this_tile, mi_row, &tok);
3938 cpi->tok_count[tile_row][tile_col] =
3939 (unsigned int)(tok - cpi->tile_tok[tile_row][tile_col]);
3940 assert(tok - cpi->tile_tok[tile_row][tile_col] <=
3941 allocated_tokens(*tile_info));
3944 static void encode_tiles(VP9_COMP *cpi) {
3945 VP9_COMMON *const cm = &cpi->common;
3946 const int tile_cols = 1 << cm->log2_tile_cols;
3947 const int tile_rows = 1 << cm->log2_tile_rows;
3948 int tile_col, tile_row;
3950 vp9_init_tile_data(cpi);
3952 for (tile_row = 0; tile_row < tile_rows; ++tile_row)
3953 for (tile_col = 0; tile_col < tile_cols; ++tile_col)
3954 vp9_encode_tile(cpi, &cpi->td, tile_row, tile_col);
3957 #if CONFIG_FP_MB_STATS
3958 static int input_fpmb_stats(FIRSTPASS_MB_STATS *firstpass_mb_stats,
3959 VP9_COMMON *cm, uint8_t **this_frame_mb_stats) {
3960 uint8_t *mb_stats_in = firstpass_mb_stats->mb_stats_start +
3961 cm->current_video_frame * cm->MBs * sizeof(uint8_t);
3963 if (mb_stats_in > firstpass_mb_stats->mb_stats_end)
3966 *this_frame_mb_stats = mb_stats_in;
3972 static void encode_frame_internal(VP9_COMP *cpi) {
3973 SPEED_FEATURES *const sf = &cpi->sf;
3974 ThreadData *const td = &cpi->td;
3975 MACROBLOCK *const x = &td->mb;
3976 VP9_COMMON *const cm = &cpi->common;
3977 MACROBLOCKD *const xd = &x->e_mbd;
3978 RD_COUNTS *const rdc = &cpi->td.rd_counts;
3980 xd->mi = cm->mi_grid_visible;
3983 vp9_zero(*td->counts);
3984 vp9_zero(rdc->coef_counts);
3985 vp9_zero(rdc->comp_pred_diff);
3986 vp9_zero(rdc->filter_diff);
3987 rdc->m_search_count = 0; // Count of motion search hits.
3988 rdc->ex_search_count = 0; // Exhaustive mesh search hits.
3991 xd->lossless = cm->base_qindex == 0 &&
3992 cm->y_dc_delta_q == 0 &&
3993 cm->uv_dc_delta_q == 0 &&
3994 cm->uv_ac_delta_q == 0;
3996 #if CONFIG_VP9_HIGHBITDEPTH
3997 if (cm->use_highbitdepth)
3998 x->fwd_txm4x4 = xd->lossless ? vp9_highbd_fwht4x4 : vpx_highbd_fdct4x4;
4000 x->fwd_txm4x4 = xd->lossless ? vp9_fwht4x4 : vpx_fdct4x4;
4001 x->highbd_itxm_add = xd->lossless ? vp9_highbd_iwht4x4_add :
4002 vp9_highbd_idct4x4_add;
4004 x->fwd_txm4x4 = xd->lossless ? vp9_fwht4x4 : vpx_fdct4x4;
4005 #endif // CONFIG_VP9_HIGHBITDEPTH
4006 x->itxm_add = xd->lossless ? vp9_iwht4x4_add : vp9_idct4x4_add;
4011 cm->tx_mode = select_tx_mode(cpi, xd);
4013 vp9_frame_init_quantizer(cpi);
4015 vp9_initialize_rd_consts(cpi);
4016 vp9_initialize_me_consts(cpi, x, cm->base_qindex);
4017 init_encode_frame_mb_context(cpi);
4018 cm->use_prev_frame_mvs = !cm->error_resilient_mode &&
4019 cm->width == cm->last_width &&
4020 cm->height == cm->last_height &&
4022 cm->last_show_frame;
4023 // Special case: set prev_mi to NULL when the previous mode info
4024 // context cannot be used.
4025 cm->prev_mi = cm->use_prev_frame_mvs ?
4026 cm->prev_mip + cm->mi_stride + 1 : NULL;
4028 x->quant_fp = cpi->sf.use_quant_fp;
4029 vp9_zero(x->skip_txfm);
4030 if (sf->use_nonrd_pick_mode) {
4031 // Initialize internal buffer pointers for rtc coding, where non-RD
4032 // mode decision is used and hence no buffer pointer swap needed.
4034 struct macroblock_plane *const p = x->plane;
4035 struct macroblockd_plane *const pd = xd->plane;
4036 PICK_MODE_CONTEXT *ctx = &cpi->td.pc_root->none;
4038 for (i = 0; i < MAX_MB_PLANE; ++i) {
4039 p[i].coeff = ctx->coeff_pbuf[i][0];
4040 p[i].qcoeff = ctx->qcoeff_pbuf[i][0];
4041 pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][0];
4042 p[i].eobs = ctx->eobs_pbuf[i][0];
4044 vp9_zero(x->zcoeff_blk);
4046 if (cm->frame_type != KEY_FRAME &&
4047 cpi->rc.frames_since_golden == 0 &&
4049 cpi->ref_frame_flags &= (~VP9_GOLD_FLAG);
4051 if (sf->partition_search_type == SOURCE_VAR_BASED_PARTITION)
4052 source_var_based_partition_search_method(cpi);
4056 struct vpx_usec_timer emr_timer;
4057 vpx_usec_timer_start(&emr_timer);
4059 #if CONFIG_FP_MB_STATS
4060 if (cpi->use_fp_mb_stats) {
4061 input_fpmb_stats(&cpi->twopass.firstpass_mb_stats, cm,
4062 &cpi->twopass.this_frame_mb_stats);
4066 // If allowed, encoding tiles in parallel with one thread handling one tile.
4067 if (VPXMIN(cpi->oxcf.max_threads, 1 << cm->log2_tile_cols) > 1)
4068 vp9_encode_tiles_mt(cpi);
4072 vpx_usec_timer_mark(&emr_timer);
4073 cpi->time_encode_sb_row += vpx_usec_timer_elapsed(&emr_timer);
4076 sf->skip_encode_frame = sf->skip_encode_sb ?
4077 get_skip_encode_frame(cm, td) : 0;
4080 // Keep record of the total distortion this time around for future use
4081 cpi->last_frame_distortion = cpi->frame_distortion;
4085 static INTERP_FILTER get_interp_filter(
4086 const int64_t threshes[SWITCHABLE_FILTER_CONTEXTS], int is_alt_ref) {
4088 threshes[EIGHTTAP_SMOOTH] > threshes[EIGHTTAP] &&
4089 threshes[EIGHTTAP_SMOOTH] > threshes[EIGHTTAP_SHARP] &&
4090 threshes[EIGHTTAP_SMOOTH] > threshes[SWITCHABLE - 1]) {
4091 return EIGHTTAP_SMOOTH;
4092 } else if (threshes[EIGHTTAP_SHARP] > threshes[EIGHTTAP] &&
4093 threshes[EIGHTTAP_SHARP] > threshes[SWITCHABLE - 1]) {
4094 return EIGHTTAP_SHARP;
4095 } else if (threshes[EIGHTTAP] > threshes[SWITCHABLE - 1]) {
4102 void vp9_encode_frame(VP9_COMP *cpi) {
4103 VP9_COMMON *const cm = &cpi->common;
4105 // In the longer term the encoder should be generalized to match the
4106 // decoder such that we allow compound where one of the 3 buffers has a
4107 // different sign bias and that buffer is then the fixed ref. However, this
4108 // requires further work in the rd loop. For now the only supported encoder
4109 // side behavior is where the ALT ref buffer has opposite sign bias to
4111 if (!frame_is_intra_only(cm)) {
4112 if ((cm->ref_frame_sign_bias[ALTREF_FRAME] ==
4113 cm->ref_frame_sign_bias[GOLDEN_FRAME]) ||
4114 (cm->ref_frame_sign_bias[ALTREF_FRAME] ==
4115 cm->ref_frame_sign_bias[LAST_FRAME])) {
4116 cpi->allow_comp_inter_inter = 0;
4118 cpi->allow_comp_inter_inter = 1;
4119 cm->comp_fixed_ref = ALTREF_FRAME;
4120 cm->comp_var_ref[0] = LAST_FRAME;
4121 cm->comp_var_ref[1] = GOLDEN_FRAME;
4125 if (cpi->sf.frame_parameter_update) {
4127 RD_OPT *const rd_opt = &cpi->rd;
4128 FRAME_COUNTS *counts = cpi->td.counts;
4129 RD_COUNTS *const rdc = &cpi->td.rd_counts;
4131 // This code does a single RD pass over the whole frame assuming
4132 // either compound, single or hybrid prediction as per whatever has
4133 // worked best for that type of frame in the past.
4134 // It also predicts whether another coding mode would have worked
4135 // better that this coding mode. If that is the case, it remembers
4136 // that for subsequent frames.
4137 // It does the same analysis for transform size selection also.
4138 const MV_REFERENCE_FRAME frame_type = get_frame_type(cpi);
4139 int64_t *const mode_thrs = rd_opt->prediction_type_threshes[frame_type];
4140 int64_t *const filter_thrs = rd_opt->filter_threshes[frame_type];
4141 const int is_alt_ref = frame_type == ALTREF_FRAME;
4143 /* prediction (compound, single or hybrid) mode selection */
4144 if (is_alt_ref || !cpi->allow_comp_inter_inter)
4145 cm->reference_mode = SINGLE_REFERENCE;
4146 else if (mode_thrs[COMPOUND_REFERENCE] > mode_thrs[SINGLE_REFERENCE] &&
4147 mode_thrs[COMPOUND_REFERENCE] >
4148 mode_thrs[REFERENCE_MODE_SELECT] &&
4149 check_dual_ref_flags(cpi) &&
4150 cpi->static_mb_pct == 100)
4151 cm->reference_mode = COMPOUND_REFERENCE;
4152 else if (mode_thrs[SINGLE_REFERENCE] > mode_thrs[REFERENCE_MODE_SELECT])
4153 cm->reference_mode = SINGLE_REFERENCE;
4155 cm->reference_mode = REFERENCE_MODE_SELECT;
4157 if (cm->interp_filter == SWITCHABLE)
4158 cm->interp_filter = get_interp_filter(filter_thrs, is_alt_ref);
4160 encode_frame_internal(cpi);
4162 for (i = 0; i < REFERENCE_MODES; ++i)
4163 mode_thrs[i] = (mode_thrs[i] + rdc->comp_pred_diff[i] / cm->MBs) / 2;
4165 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
4166 filter_thrs[i] = (filter_thrs[i] + rdc->filter_diff[i] / cm->MBs) / 2;
4168 if (cm->reference_mode == REFERENCE_MODE_SELECT) {
4169 int single_count_zero = 0;
4170 int comp_count_zero = 0;
4172 for (i = 0; i < COMP_INTER_CONTEXTS; i++) {
4173 single_count_zero += counts->comp_inter[i][0];
4174 comp_count_zero += counts->comp_inter[i][1];
4177 if (comp_count_zero == 0) {
4178 cm->reference_mode = SINGLE_REFERENCE;
4179 vp9_zero(counts->comp_inter);
4180 } else if (single_count_zero == 0) {
4181 cm->reference_mode = COMPOUND_REFERENCE;
4182 vp9_zero(counts->comp_inter);
4186 if (cm->tx_mode == TX_MODE_SELECT) {
4188 int count8x8_lp = 0, count8x8_8x8p = 0;
4189 int count16x16_16x16p = 0, count16x16_lp = 0;
4192 for (i = 0; i < TX_SIZE_CONTEXTS; ++i) {
4193 count4x4 += counts->tx.p32x32[i][TX_4X4];
4194 count4x4 += counts->tx.p16x16[i][TX_4X4];
4195 count4x4 += counts->tx.p8x8[i][TX_4X4];
4197 count8x8_lp += counts->tx.p32x32[i][TX_8X8];
4198 count8x8_lp += counts->tx.p16x16[i][TX_8X8];
4199 count8x8_8x8p += counts->tx.p8x8[i][TX_8X8];
4201 count16x16_16x16p += counts->tx.p16x16[i][TX_16X16];
4202 count16x16_lp += counts->tx.p32x32[i][TX_16X16];
4203 count32x32 += counts->tx.p32x32[i][TX_32X32];
4205 if (count4x4 == 0 && count16x16_lp == 0 && count16x16_16x16p == 0 &&
4207 cm->tx_mode = ALLOW_8X8;
4208 reset_skip_tx_size(cm, TX_8X8);
4209 } else if (count8x8_8x8p == 0 && count16x16_16x16p == 0 &&
4210 count8x8_lp == 0 && count16x16_lp == 0 && count32x32 == 0) {
4211 cm->tx_mode = ONLY_4X4;
4212 reset_skip_tx_size(cm, TX_4X4);
4213 } else if (count8x8_lp == 0 && count16x16_lp == 0 && count4x4 == 0) {
4214 cm->tx_mode = ALLOW_32X32;
4215 } else if (count32x32 == 0 && count8x8_lp == 0 && count4x4 == 0) {
4216 cm->tx_mode = ALLOW_16X16;
4217 reset_skip_tx_size(cm, TX_16X16);
4221 cm->reference_mode = SINGLE_REFERENCE;
4222 encode_frame_internal(cpi);
4226 static void sum_intra_stats(FRAME_COUNTS *counts, const MODE_INFO *mi) {
4227 const PREDICTION_MODE y_mode = mi->mode;
4228 const PREDICTION_MODE uv_mode = mi->uv_mode;
4229 const BLOCK_SIZE bsize = mi->sb_type;
4231 if (bsize < BLOCK_8X8) {
4233 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
4234 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
4235 for (idy = 0; idy < 2; idy += num_4x4_h)
4236 for (idx = 0; idx < 2; idx += num_4x4_w)
4237 ++counts->y_mode[0][mi->bmi[idy * 2 + idx].as_mode];
4239 ++counts->y_mode[size_group_lookup[bsize]][y_mode];
4242 ++counts->uv_mode[y_mode][uv_mode];
4245 static void encode_superblock(VP9_COMP *cpi, ThreadData *td,
4246 TOKENEXTRA **t, int output_enabled,
4247 int mi_row, int mi_col, BLOCK_SIZE bsize,
4248 PICK_MODE_CONTEXT *ctx) {
4249 VP9_COMMON *const cm = &cpi->common;
4250 MACROBLOCK *const x = &td->mb;
4251 MACROBLOCKD *const xd = &x->e_mbd;
4252 MODE_INFO **mi_8x8 = xd->mi;
4253 MODE_INFO *mi = mi_8x8[0];
4254 const int seg_skip = segfeature_active(&cm->seg, mi->segment_id,
4256 const int mis = cm->mi_stride;
4257 const int mi_width = num_8x8_blocks_wide_lookup[bsize];
4258 const int mi_height = num_8x8_blocks_high_lookup[bsize];
4260 x->skip_recode = !x->select_tx_size && mi->sb_type >= BLOCK_8X8 &&
4261 cpi->oxcf.aq_mode != COMPLEXITY_AQ &&
4262 cpi->oxcf.aq_mode != CYCLIC_REFRESH_AQ &&
4263 cpi->sf.allow_skip_recode;
4265 if (!x->skip_recode && !cpi->sf.use_nonrd_pick_mode)
4266 memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
4268 x->skip_optimize = ctx->is_coded;
4270 x->use_lp32x32fdct = cpi->sf.use_lp32x32fdct;
4271 x->skip_encode = (!output_enabled && cpi->sf.skip_encode_frame &&
4272 x->q_index < QIDX_SKIP_THRESH);
4277 if (!is_inter_block(mi)) {
4280 for (plane = 0; plane < MAX_MB_PLANE; ++plane)
4281 vp9_encode_intra_block_plane(x, VPXMAX(bsize, BLOCK_8X8), plane);
4283 sum_intra_stats(td->counts, mi);
4284 vp9_tokenize_sb(cpi, td, t, !output_enabled, VPXMAX(bsize, BLOCK_8X8));
4287 const int is_compound = has_second_ref(mi);
4288 set_ref_ptrs(cm, xd, mi->ref_frame[0], mi->ref_frame[1]);
4289 for (ref = 0; ref < 1 + is_compound; ++ref) {
4290 YV12_BUFFER_CONFIG *cfg = get_ref_frame_buffer(cpi,
4291 mi->ref_frame[ref]);
4292 assert(cfg != NULL);
4293 vp9_setup_pre_planes(xd, ref, cfg, mi_row, mi_col,
4294 &xd->block_refs[ref]->sf);
4296 if (!(cpi->sf.reuse_inter_pred_sby && ctx->pred_pixel_ready) || seg_skip)
4297 vp9_build_inter_predictors_sby(xd, mi_row, mi_col,
4298 VPXMAX(bsize, BLOCK_8X8));
4300 vp9_build_inter_predictors_sbuv(xd, mi_row, mi_col,
4301 VPXMAX(bsize, BLOCK_8X8));
4303 vp9_encode_sb(x, VPXMAX(bsize, BLOCK_8X8));
4304 vp9_tokenize_sb(cpi, td, t, !output_enabled, VPXMAX(bsize, BLOCK_8X8));
4307 if (output_enabled) {
4308 if (cm->tx_mode == TX_MODE_SELECT &&
4309 mi->sb_type >= BLOCK_8X8 &&
4310 !(is_inter_block(mi) && (mi->skip || seg_skip))) {
4311 ++get_tx_counts(max_txsize_lookup[bsize], get_tx_size_context(xd),
4312 &td->counts->tx)[mi->tx_size];
4316 // The new intra coding scheme requires no change of transform size
4317 if (is_inter_block(mi)) {
4318 tx_size = VPXMIN(tx_mode_to_biggest_tx_size[cm->tx_mode],
4319 max_txsize_lookup[bsize]);
4321 tx_size = (bsize >= BLOCK_8X8) ? mi->tx_size : TX_4X4;
4324 for (y = 0; y < mi_height; y++)
4325 for (x = 0; x < mi_width; x++)
4326 if (mi_col + x < cm->mi_cols && mi_row + y < cm->mi_rows)
4327 mi_8x8[mis * y + x]->tx_size = tx_size;
4329 ++td->counts->tx.tx_totals[mi->tx_size];
4330 ++td->counts->tx.tx_totals[get_uv_tx_size(mi, &xd->plane[1])];
4331 if (cm->seg.enabled && cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ)
4332 vp9_cyclic_refresh_update_sb_postencode(cpi, mi, mi_row, mi_col, bsize);