2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
15 #include "./vp9_rtcd.h"
16 #include "./vpx_dsp_rtcd.h"
17 #include "./vpx_config.h"
19 #include "vpx_dsp/vpx_dsp_common.h"
20 #include "vpx_ports/mem.h"
21 #include "vpx_ports/vpx_timer.h"
22 #include "vpx_ports/system_state.h"
24 #include "vp9/common/vp9_common.h"
25 #include "vp9/common/vp9_entropy.h"
26 #include "vp9/common/vp9_entropymode.h"
27 #include "vp9/common/vp9_idct.h"
28 #include "vp9/common/vp9_mvref_common.h"
29 #include "vp9/common/vp9_pred_common.h"
30 #include "vp9/common/vp9_quant_common.h"
31 #include "vp9/common/vp9_reconintra.h"
32 #include "vp9/common/vp9_reconinter.h"
33 #include "vp9/common/vp9_seg_common.h"
34 #include "vp9/common/vp9_tile_common.h"
36 #include "vp9/encoder/vp9_aq_360.h"
37 #include "vp9/encoder/vp9_aq_complexity.h"
38 #include "vp9/encoder/vp9_aq_cyclicrefresh.h"
39 #include "vp9/encoder/vp9_aq_variance.h"
40 #include "vp9/encoder/vp9_encodeframe.h"
41 #include "vp9/encoder/vp9_encodemb.h"
42 #include "vp9/encoder/vp9_encodemv.h"
43 #include "vp9/encoder/vp9_ethread.h"
44 #include "vp9/encoder/vp9_extend.h"
45 #include "vp9/encoder/vp9_pickmode.h"
46 #include "vp9/encoder/vp9_rd.h"
47 #include "vp9/encoder/vp9_rdopt.h"
48 #include "vp9/encoder/vp9_segmentation.h"
49 #include "vp9/encoder/vp9_tokenize.h"
51 static void encode_superblock(VP9_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
52 int output_enabled, int mi_row, int mi_col,
53 BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx);
55 // Machine learning-based early termination parameters.
56 static const double train_mean[24] = {
57 303501.697372, 3042630.372158, 24.694696, 1.392182,
58 689.413511, 162.027012, 1.478213, 0.0,
59 135382.260230, 912738.513263, 28.845217, 1.515230,
60 544.158492, 131.807995, 1.436863, 0.0,
61 43682.377587, 208131.711766, 28.084737, 1.356677,
62 138.254122, 119.522553, 1.252322, 0.0
65 static const double train_stdm[24] = {
66 673689.212982, 5996652.516628, 0.024449, 1.989792,
67 985.880847, 0.014638, 2.001898, 0.0,
68 208798.775332, 1812548.443284, 0.018693, 1.838009,
69 396.986910, 0.015657, 1.332541, 0.0,
70 55888.847031, 448587.962714, 0.017900, 1.904776,
71 98.652832, 0.016598, 1.320992, 0.0
74 // Error tolerance: 0.01%-0.0.05%-0.1%
75 static const double classifiers[24] = {
76 0.111736, 0.289977, 0.042219, 0.204765, 0.120410, -0.143863,
77 0.282376, 0.847811, 0.637161, 0.131570, 0.018636, 0.202134,
78 0.112797, 0.028162, 0.182450, 1.124367, 0.386133, 0.083700,
79 0.050028, 0.150873, 0.061119, 0.109318, 0.127255, 0.625211
82 // This is used as a reference when computing the source variance for the
83 // purpose of activity masking.
84 // Eventually this should be replaced by custom no-reference routines,
85 // which will be faster.
86 static const uint8_t VP9_VAR_OFFS[64] = {
87 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
88 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
89 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
90 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
91 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128
94 #if CONFIG_VP9_HIGHBITDEPTH
95 static const uint16_t VP9_HIGH_VAR_OFFS_8[64] = {
96 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
97 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
98 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
99 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
100 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128
103 static const uint16_t VP9_HIGH_VAR_OFFS_10[64] = {
104 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
105 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
106 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
107 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
108 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
109 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
110 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
111 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4
114 static const uint16_t VP9_HIGH_VAR_OFFS_12[64] = {
115 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
116 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
117 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
118 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
119 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
120 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
121 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
122 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
123 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
126 #endif // CONFIG_VP9_HIGHBITDEPTH
128 unsigned int vp9_get_sby_perpixel_variance(VP9_COMP *cpi,
129 const struct buf_2d *ref,
132 const unsigned int var =
133 cpi->fn_ptr[bs].vf(ref->buf, ref->stride, VP9_VAR_OFFS, 0, &sse);
134 return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
137 #if CONFIG_VP9_HIGHBITDEPTH
138 unsigned int vp9_high_get_sby_perpixel_variance(VP9_COMP *cpi,
139 const struct buf_2d *ref,
140 BLOCK_SIZE bs, int bd) {
141 unsigned int var, sse;
145 cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
146 CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_10), 0, &sse);
150 cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
151 CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_12), 0, &sse);
156 cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
157 CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_8), 0, &sse);
160 return (unsigned int)ROUND64_POWER_OF_TWO((int64_t)var,
161 num_pels_log2_lookup[bs]);
163 #endif // CONFIG_VP9_HIGHBITDEPTH
165 static unsigned int get_sby_perpixel_diff_variance(VP9_COMP *cpi,
166 const struct buf_2d *ref,
167 int mi_row, int mi_col,
169 unsigned int sse, var;
171 const YV12_BUFFER_CONFIG *last = get_ref_frame_buffer(cpi, LAST_FRAME);
173 assert(last != NULL);
175 &last->y_buffer[mi_row * MI_SIZE * last->y_stride + mi_col * MI_SIZE];
176 var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride, last_y, last->y_stride, &sse);
177 return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
180 static BLOCK_SIZE get_rd_var_based_fixed_partition(VP9_COMP *cpi, MACROBLOCK *x,
181 int mi_row, int mi_col) {
182 unsigned int var = get_sby_perpixel_diff_variance(
183 cpi, &x->plane[0].src, mi_row, mi_col, BLOCK_64X64);
194 // Lighter version of set_offsets that only sets the mode info
196 static INLINE void set_mode_info_offsets(VP9_COMMON *const cm,
198 MACROBLOCKD *const xd, int mi_row,
200 const int idx_str = xd->mi_stride * mi_row + mi_col;
201 xd->mi = cm->mi_grid_visible + idx_str;
202 xd->mi[0] = cm->mi + idx_str;
203 x->mbmi_ext = x->mbmi_ext_base + (mi_row * cm->mi_cols + mi_col);
206 static void set_offsets(VP9_COMP *cpi, const TileInfo *const tile,
207 MACROBLOCK *const x, int mi_row, int mi_col,
209 VP9_COMMON *const cm = &cpi->common;
210 MACROBLOCKD *const xd = &x->e_mbd;
212 const int mi_width = num_8x8_blocks_wide_lookup[bsize];
213 const int mi_height = num_8x8_blocks_high_lookup[bsize];
214 const struct segmentation *const seg = &cm->seg;
215 MvLimits *const mv_limits = &x->mv_limits;
217 set_skip_context(xd, mi_row, mi_col);
219 set_mode_info_offsets(cm, x, xd, mi_row, mi_col);
223 // Set up destination pointers.
224 vp9_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
226 // Set up limit values for MV components.
227 // Mv beyond the range do not produce new/different prediction block.
228 mv_limits->row_min = -(((mi_row + mi_height) * MI_SIZE) + VP9_INTERP_EXTEND);
229 mv_limits->col_min = -(((mi_col + mi_width) * MI_SIZE) + VP9_INTERP_EXTEND);
230 mv_limits->row_max = (cm->mi_rows - mi_row) * MI_SIZE + VP9_INTERP_EXTEND;
231 mv_limits->col_max = (cm->mi_cols - mi_col) * MI_SIZE + VP9_INTERP_EXTEND;
233 // Set up distance of MB to edge of frame in 1/8th pel units.
234 assert(!(mi_col & (mi_width - 1)) && !(mi_row & (mi_height - 1)));
235 set_mi_row_col(xd, tile, mi_row, mi_height, mi_col, mi_width, cm->mi_rows,
238 // Set up source buffers.
239 vp9_setup_src_planes(x, cpi->Source, mi_row, mi_col);
242 x->rddiv = cpi->rd.RDDIV;
243 x->rdmult = cpi->rd.RDMULT;
247 if (cpi->oxcf.aq_mode != VARIANCE_AQ && cpi->oxcf.aq_mode != LOOKAHEAD_AQ &&
248 cpi->oxcf.aq_mode != EQUATOR360_AQ) {
249 const uint8_t *const map =
250 seg->update_map ? cpi->segmentation_map : cm->last_frame_seg_map;
251 mi->segment_id = get_segment_id(cm, map, bsize, mi_row, mi_col);
253 vp9_init_plane_quantizers(cpi, x);
255 x->encode_breakout = cpi->segment_encode_breakout[mi->segment_id];
258 x->encode_breakout = cpi->encode_breakout;
261 // required by vp9_append_sub8x8_mvs_for_idx() and vp9_find_best_ref_mvs()
265 static void duplicate_mode_info_in_sb(VP9_COMMON *cm, MACROBLOCKD *xd,
266 int mi_row, int mi_col,
268 const int block_width =
269 VPXMIN(num_8x8_blocks_wide_lookup[bsize], cm->mi_cols - mi_col);
270 const int block_height =
271 VPXMIN(num_8x8_blocks_high_lookup[bsize], cm->mi_rows - mi_row);
272 const int mi_stride = xd->mi_stride;
273 MODE_INFO *const src_mi = xd->mi[0];
276 for (j = 0; j < block_height; ++j)
277 for (i = 0; i < block_width; ++i) xd->mi[j * mi_stride + i] = src_mi;
280 static void set_block_size(VP9_COMP *const cpi, MACROBLOCK *const x,
281 MACROBLOCKD *const xd, int mi_row, int mi_col,
283 if (cpi->common.mi_cols > mi_col && cpi->common.mi_rows > mi_row) {
284 set_mode_info_offsets(&cpi->common, x, xd, mi_row, mi_col);
285 xd->mi[0]->sb_type = bsize;
290 int64_t sum_square_error;
300 } partition_variance;
303 partition_variance part_variances;
308 partition_variance part_variances;
313 partition_variance part_variances;
318 partition_variance part_variances;
323 partition_variance part_variances;
328 partition_variance *part_variances;
338 static void tree_to_node(void *data, BLOCK_SIZE bsize, variance_node *node) {
340 node->part_variances = NULL;
343 v64x64 *vt = (v64x64 *)data;
344 node->part_variances = &vt->part_variances;
345 for (i = 0; i < 4; i++)
346 node->split[i] = &vt->split[i].part_variances.none;
350 v32x32 *vt = (v32x32 *)data;
351 node->part_variances = &vt->part_variances;
352 for (i = 0; i < 4; i++)
353 node->split[i] = &vt->split[i].part_variances.none;
357 v16x16 *vt = (v16x16 *)data;
358 node->part_variances = &vt->part_variances;
359 for (i = 0; i < 4; i++)
360 node->split[i] = &vt->split[i].part_variances.none;
364 v8x8 *vt = (v8x8 *)data;
365 node->part_variances = &vt->part_variances;
366 for (i = 0; i < 4; i++)
367 node->split[i] = &vt->split[i].part_variances.none;
371 v4x4 *vt = (v4x4 *)data;
372 node->part_variances = &vt->part_variances;
373 for (i = 0; i < 4; i++) node->split[i] = &vt->split[i];
383 // Set variance values given sum square error, sum error, count.
384 static void fill_variance(int64_t s2, int64_t s, int c, var *v) {
385 v->sum_square_error = s2;
390 static void get_variance(var *v) {
392 (int)(256 * (v->sum_square_error -
393 ((v->sum_error * v->sum_error) >> v->log2_count)) >>
397 static void sum_2_variances(const var *a, const var *b, var *r) {
398 assert(a->log2_count == b->log2_count);
399 fill_variance(a->sum_square_error + b->sum_square_error,
400 a->sum_error + b->sum_error, a->log2_count + 1, r);
403 static void fill_variance_tree(void *data, BLOCK_SIZE bsize) {
405 memset(&node, 0, sizeof(node));
406 tree_to_node(data, bsize, &node);
407 sum_2_variances(node.split[0], node.split[1], &node.part_variances->horz[0]);
408 sum_2_variances(node.split[2], node.split[3], &node.part_variances->horz[1]);
409 sum_2_variances(node.split[0], node.split[2], &node.part_variances->vert[0]);
410 sum_2_variances(node.split[1], node.split[3], &node.part_variances->vert[1]);
411 sum_2_variances(&node.part_variances->vert[0], &node.part_variances->vert[1],
412 &node.part_variances->none);
415 static int set_vt_partitioning(VP9_COMP *cpi, MACROBLOCK *const x,
416 MACROBLOCKD *const xd, void *data,
417 BLOCK_SIZE bsize, int mi_row, int mi_col,
418 int64_t threshold, BLOCK_SIZE bsize_min,
420 VP9_COMMON *const cm = &cpi->common;
422 const int block_width = num_8x8_blocks_wide_lookup[bsize];
423 const int block_height = num_8x8_blocks_high_lookup[bsize];
425 assert(block_height == block_width);
426 tree_to_node(data, bsize, &vt);
428 if (force_split == 1) return 0;
430 // For bsize=bsize_min (16x16/8x8 for 8x8/4x4 downsampling), select if
431 // variance is below threshold, otherwise split will be selected.
432 // No check for vert/horiz split as too few samples for variance.
433 if (bsize == bsize_min) {
434 // Variance already computed to set the force_split.
435 if (cm->frame_type == KEY_FRAME) get_variance(&vt.part_variances->none);
436 if (mi_col + block_width / 2 < cm->mi_cols &&
437 mi_row + block_height / 2 < cm->mi_rows &&
438 vt.part_variances->none.variance < threshold) {
439 set_block_size(cpi, x, xd, mi_row, mi_col, bsize);
443 } else if (bsize > bsize_min) {
444 // Variance already computed to set the force_split.
445 if (cm->frame_type == KEY_FRAME) get_variance(&vt.part_variances->none);
446 // For key frame: take split for bsize above 32X32 or very high variance.
447 if (cm->frame_type == KEY_FRAME &&
448 (bsize > BLOCK_32X32 ||
449 vt.part_variances->none.variance > (threshold << 4))) {
452 // If variance is low, take the bsize (no split).
453 if (mi_col + block_width / 2 < cm->mi_cols &&
454 mi_row + block_height / 2 < cm->mi_rows &&
455 vt.part_variances->none.variance < threshold) {
456 set_block_size(cpi, x, xd, mi_row, mi_col, bsize);
460 // Check vertical split.
461 if (mi_row + block_height / 2 < cm->mi_rows) {
462 BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_VERT);
463 get_variance(&vt.part_variances->vert[0]);
464 get_variance(&vt.part_variances->vert[1]);
465 if (vt.part_variances->vert[0].variance < threshold &&
466 vt.part_variances->vert[1].variance < threshold &&
467 get_plane_block_size(subsize, &xd->plane[1]) < BLOCK_INVALID) {
468 set_block_size(cpi, x, xd, mi_row, mi_col, subsize);
469 set_block_size(cpi, x, xd, mi_row, mi_col + block_width / 2, subsize);
473 // Check horizontal split.
474 if (mi_col + block_width / 2 < cm->mi_cols) {
475 BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_HORZ);
476 get_variance(&vt.part_variances->horz[0]);
477 get_variance(&vt.part_variances->horz[1]);
478 if (vt.part_variances->horz[0].variance < threshold &&
479 vt.part_variances->horz[1].variance < threshold &&
480 get_plane_block_size(subsize, &xd->plane[1]) < BLOCK_INVALID) {
481 set_block_size(cpi, x, xd, mi_row, mi_col, subsize);
482 set_block_size(cpi, x, xd, mi_row + block_height / 2, mi_col, subsize);
492 static int64_t scale_part_thresh_sumdiff(int64_t threshold_base, int speed,
493 int width, int height,
496 if (width <= 640 && height <= 480)
497 return (5 * threshold_base) >> 2;
498 else if ((content_state == kLowSadLowSumdiff) ||
499 (content_state == kHighSadLowSumdiff) ||
500 (content_state == kLowVarHighSumdiff))
501 return (5 * threshold_base) >> 2;
502 } else if (speed == 7) {
503 if ((content_state == kLowSadLowSumdiff) ||
504 (content_state == kHighSadLowSumdiff) ||
505 (content_state == kLowVarHighSumdiff)) {
506 return (5 * threshold_base) >> 2;
509 return threshold_base;
512 // Set the variance split thresholds for following the block sizes:
513 // 0 - threshold_64x64, 1 - threshold_32x32, 2 - threshold_16x16,
514 // 3 - vbp_threshold_8x8. vbp_threshold_8x8 (to split to 4x4 partition) is
515 // currently only used on key frame.
516 static void set_vbp_thresholds(VP9_COMP *cpi, int64_t thresholds[], int q,
518 VP9_COMMON *const cm = &cpi->common;
519 const int is_key_frame = (cm->frame_type == KEY_FRAME);
520 const int threshold_multiplier = is_key_frame ? 20 : 1;
521 int64_t threshold_base =
522 (int64_t)(threshold_multiplier * cpi->y_dequant[q][1]);
525 thresholds[0] = threshold_base;
526 thresholds[1] = threshold_base >> 2;
527 thresholds[2] = threshold_base >> 2;
528 thresholds[3] = threshold_base << 2;
530 // Increase base variance threshold based on estimated noise level.
531 if (cpi->noise_estimate.enabled && cm->width >= 640 && cm->height >= 480) {
532 NOISE_LEVEL noise_level =
533 vp9_noise_estimate_extract_level(&cpi->noise_estimate);
534 if (noise_level == kHigh)
535 threshold_base = 3 * threshold_base;
536 else if (noise_level == kMedium)
537 threshold_base = threshold_base << 1;
538 else if (noise_level < kLow)
539 threshold_base = (7 * threshold_base) >> 3;
541 #if CONFIG_VP9_TEMPORAL_DENOISING
542 if (cpi->oxcf.noise_sensitivity > 0 && denoise_svc(cpi) &&
543 cpi->oxcf.speed > 5 && cpi->denoiser.denoising_level >= kDenLow)
545 vp9_scale_part_thresh(threshold_base, cpi->denoiser.denoising_level,
546 content_state, cpi->svc.temporal_layer_id);
549 scale_part_thresh_sumdiff(threshold_base, cpi->oxcf.speed, cm->width,
550 cm->height, content_state);
552 // Increase base variance threshold based on content_state/sum_diff level.
553 threshold_base = scale_part_thresh_sumdiff(
554 threshold_base, cpi->oxcf.speed, cm->width, cm->height, content_state);
556 thresholds[0] = threshold_base;
557 thresholds[2] = threshold_base << cpi->oxcf.speed;
558 if (cm->width <= 352 && cm->height <= 288) {
559 thresholds[0] = threshold_base >> 3;
560 thresholds[1] = threshold_base >> 1;
561 thresholds[2] = threshold_base << 3;
562 } else if (cm->width < 1280 && cm->height < 720) {
563 thresholds[1] = (5 * threshold_base) >> 2;
564 } else if (cm->width < 1920 && cm->height < 1080) {
565 thresholds[1] = threshold_base << 1;
567 thresholds[1] = (5 * threshold_base) >> 1;
572 void vp9_set_variance_partition_thresholds(VP9_COMP *cpi, int q,
574 VP9_COMMON *const cm = &cpi->common;
575 SPEED_FEATURES *const sf = &cpi->sf;
576 const int is_key_frame = (cm->frame_type == KEY_FRAME);
577 if (sf->partition_search_type != VAR_BASED_PARTITION &&
578 sf->partition_search_type != REFERENCE_PARTITION) {
581 set_vbp_thresholds(cpi, cpi->vbp_thresholds, q, content_state);
582 // The thresholds below are not changed locally.
584 cpi->vbp_threshold_sad = 0;
585 cpi->vbp_threshold_copy = 0;
586 cpi->vbp_bsize_min = BLOCK_8X8;
588 if (cm->width <= 352 && cm->height <= 288)
589 cpi->vbp_threshold_sad = 10;
591 cpi->vbp_threshold_sad = (cpi->y_dequant[q][1] << 1) > 1000
592 ? (cpi->y_dequant[q][1] << 1)
594 cpi->vbp_bsize_min = BLOCK_16X16;
595 if (cm->width <= 352 && cm->height <= 288)
596 cpi->vbp_threshold_copy = 4000;
597 else if (cm->width <= 640 && cm->height <= 360)
598 cpi->vbp_threshold_copy = 8000;
600 cpi->vbp_threshold_copy = (cpi->y_dequant[q][1] << 3) > 8000
601 ? (cpi->y_dequant[q][1] << 3)
604 cpi->vbp_threshold_minmax = 15 + (q >> 3);
608 // Compute the minmax over the 8x8 subblocks.
609 static int compute_minmax_8x8(const uint8_t *s, int sp, const uint8_t *d,
610 int dp, int x16_idx, int y16_idx,
611 #if CONFIG_VP9_HIGHBITDEPTH
614 int pixels_wide, int pixels_high) {
617 int minmax_min = 255;
618 // Loop over the 4 8x8 subblocks.
619 for (k = 0; k < 4; k++) {
620 int x8_idx = x16_idx + ((k & 1) << 3);
621 int y8_idx = y16_idx + ((k >> 1) << 3);
624 if (x8_idx < pixels_wide && y8_idx < pixels_high) {
625 #if CONFIG_VP9_HIGHBITDEPTH
626 if (highbd_flag & YV12_FLAG_HIGHBITDEPTH) {
627 vpx_highbd_minmax_8x8(s + y8_idx * sp + x8_idx, sp,
628 d + y8_idx * dp + x8_idx, dp, &min, &max);
630 vpx_minmax_8x8(s + y8_idx * sp + x8_idx, sp, d + y8_idx * dp + x8_idx,
634 vpx_minmax_8x8(s + y8_idx * sp + x8_idx, sp, d + y8_idx * dp + x8_idx, dp,
637 if ((max - min) > minmax_max) minmax_max = (max - min);
638 if ((max - min) < minmax_min) minmax_min = (max - min);
641 return (minmax_max - minmax_min);
644 static void fill_variance_4x4avg(const uint8_t *s, int sp, const uint8_t *d,
645 int dp, int x8_idx, int y8_idx, v8x8 *vst,
646 #if CONFIG_VP9_HIGHBITDEPTH
649 int pixels_wide, int pixels_high,
652 for (k = 0; k < 4; k++) {
653 int x4_idx = x8_idx + ((k & 1) << 2);
654 int y4_idx = y8_idx + ((k >> 1) << 2);
655 unsigned int sse = 0;
657 if (x4_idx < pixels_wide && y4_idx < pixels_high) {
660 #if CONFIG_VP9_HIGHBITDEPTH
661 if (highbd_flag & YV12_FLAG_HIGHBITDEPTH) {
662 s_avg = vpx_highbd_avg_4x4(s + y4_idx * sp + x4_idx, sp);
664 d_avg = vpx_highbd_avg_4x4(d + y4_idx * dp + x4_idx, dp);
666 s_avg = vpx_avg_4x4(s + y4_idx * sp + x4_idx, sp);
667 if (!is_key_frame) d_avg = vpx_avg_4x4(d + y4_idx * dp + x4_idx, dp);
670 s_avg = vpx_avg_4x4(s + y4_idx * sp + x4_idx, sp);
671 if (!is_key_frame) d_avg = vpx_avg_4x4(d + y4_idx * dp + x4_idx, dp);
676 fill_variance(sse, sum, 0, &vst->split[k].part_variances.none);
680 static void fill_variance_8x8avg(const uint8_t *s, int sp, const uint8_t *d,
681 int dp, int x16_idx, int y16_idx, v16x16 *vst,
682 #if CONFIG_VP9_HIGHBITDEPTH
685 int pixels_wide, int pixels_high,
688 for (k = 0; k < 4; k++) {
689 int x8_idx = x16_idx + ((k & 1) << 3);
690 int y8_idx = y16_idx + ((k >> 1) << 3);
691 unsigned int sse = 0;
693 if (x8_idx < pixels_wide && y8_idx < pixels_high) {
696 #if CONFIG_VP9_HIGHBITDEPTH
697 if (highbd_flag & YV12_FLAG_HIGHBITDEPTH) {
698 s_avg = vpx_highbd_avg_8x8(s + y8_idx * sp + x8_idx, sp);
700 d_avg = vpx_highbd_avg_8x8(d + y8_idx * dp + x8_idx, dp);
702 s_avg = vpx_avg_8x8(s + y8_idx * sp + x8_idx, sp);
703 if (!is_key_frame) d_avg = vpx_avg_8x8(d + y8_idx * dp + x8_idx, dp);
706 s_avg = vpx_avg_8x8(s + y8_idx * sp + x8_idx, sp);
707 if (!is_key_frame) d_avg = vpx_avg_8x8(d + y8_idx * dp + x8_idx, dp);
712 fill_variance(sse, sum, 0, &vst->split[k].part_variances.none);
716 // Check if most of the superblock is skin content, and if so, force split to
717 // 32x32, and set x->sb_is_skin for use in mode selection.
718 static int skin_sb_split(VP9_COMP *cpi, MACROBLOCK *x, const int low_res,
719 int mi_row, int mi_col, int *force_split) {
720 VP9_COMMON *const cm = &cpi->common;
721 #if CONFIG_VP9_HIGHBITDEPTH
722 if (cm->use_highbitdepth) return 0;
724 // Avoid checking superblocks on/near boundary and avoid low resolutions.
725 // Note superblock may still pick 64X64 if y_sad is very small
726 // (i.e., y_sad < cpi->vbp_threshold_sad) below. For now leave this as is.
727 if (!low_res && (mi_col >= 8 && mi_col + 8 < cm->mi_cols && mi_row >= 8 &&
728 mi_row + 8 < cm->mi_rows)) {
729 int num_16x16_skin = 0;
730 int num_16x16_nonskin = 0;
731 uint8_t *ysignal = x->plane[0].src.buf;
732 uint8_t *usignal = x->plane[1].src.buf;
733 uint8_t *vsignal = x->plane[2].src.buf;
734 int sp = x->plane[0].src.stride;
735 int spuv = x->plane[1].src.stride;
736 const int block_index = mi_row * cm->mi_cols + mi_col;
737 const int bw = num_8x8_blocks_wide_lookup[BLOCK_64X64];
738 const int bh = num_8x8_blocks_high_lookup[BLOCK_64X64];
739 const int xmis = VPXMIN(cm->mi_cols - mi_col, bw);
740 const int ymis = VPXMIN(cm->mi_rows - mi_row, bh);
741 // Loop through the 16x16 sub-blocks.
743 for (i = 0; i < ymis; i += 2) {
744 for (j = 0; j < xmis; j += 2) {
745 int bl_index = block_index + i * cm->mi_cols + j;
746 int bl_index1 = bl_index + 1;
747 int bl_index2 = bl_index + cm->mi_cols;
748 int bl_index3 = bl_index2 + 1;
750 VPXMIN(cpi->consec_zero_mv[bl_index],
751 VPXMIN(cpi->consec_zero_mv[bl_index1],
752 VPXMIN(cpi->consec_zero_mv[bl_index2],
753 cpi->consec_zero_mv[bl_index3])));
754 int is_skin = vp9_compute_skin_block(
755 ysignal, usignal, vsignal, sp, spuv, BLOCK_16X16, consec_zeromv, 0);
756 num_16x16_skin += is_skin;
757 num_16x16_nonskin += (1 - is_skin);
758 if (num_16x16_nonskin > 3) {
759 // Exit loop if at least 4 of the 16x16 blocks are not skin.
767 ysignal += (sp << 4) - 64;
768 usignal += (spuv << 3) - 32;
769 vsignal += (spuv << 3) - 32;
771 if (num_16x16_skin > 12) {
779 static void set_low_temp_var_flag(VP9_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *xd,
780 v64x64 *vt, int64_t thresholds[],
781 MV_REFERENCE_FRAME ref_frame_partition,
782 int mi_col, int mi_row) {
784 VP9_COMMON *const cm = &cpi->common;
785 const int mv_thr = cm->width > 640 ? 8 : 4;
786 // Check temporal variance for bsize >= 16x16, if LAST_FRAME was selected and
787 // int_pro mv is small. If the temporal variance is small set the flag
788 // variance_low for the block. The variance threshold can be adjusted, the
789 // higher the more aggressive.
790 if (ref_frame_partition == LAST_FRAME &&
791 (cpi->sf.short_circuit_low_temp_var == 1 ||
792 (xd->mi[0]->mv[0].as_mv.col < mv_thr &&
793 xd->mi[0]->mv[0].as_mv.col > -mv_thr &&
794 xd->mi[0]->mv[0].as_mv.row < mv_thr &&
795 xd->mi[0]->mv[0].as_mv.row > -mv_thr))) {
796 if (xd->mi[0]->sb_type == BLOCK_64X64) {
797 if ((vt->part_variances).none.variance < (thresholds[0] >> 1))
798 x->variance_low[0] = 1;
799 } else if (xd->mi[0]->sb_type == BLOCK_64X32) {
800 for (i = 0; i < 2; i++) {
801 if (vt->part_variances.horz[i].variance < (thresholds[0] >> 2))
802 x->variance_low[i + 1] = 1;
804 } else if (xd->mi[0]->sb_type == BLOCK_32X64) {
805 for (i = 0; i < 2; i++) {
806 if (vt->part_variances.vert[i].variance < (thresholds[0] >> 2))
807 x->variance_low[i + 3] = 1;
810 for (i = 0; i < 4; i++) {
811 const int idx[4][2] = { { 0, 0 }, { 0, 4 }, { 4, 0 }, { 4, 4 } };
813 cm->mi_stride * (mi_row + idx[i][0]) + mi_col + idx[i][1];
814 MODE_INFO **this_mi = cm->mi_grid_visible + idx_str;
816 if (cm->mi_cols <= mi_col + idx[i][1] ||
817 cm->mi_rows <= mi_row + idx[i][0])
820 if ((*this_mi)->sb_type == BLOCK_32X32) {
821 int64_t threshold_32x32 = (cpi->sf.short_circuit_low_temp_var == 1 ||
822 cpi->sf.short_circuit_low_temp_var == 3)
823 ? ((5 * thresholds[1]) >> 3)
824 : (thresholds[1] >> 1);
825 if (vt->split[i].part_variances.none.variance < threshold_32x32)
826 x->variance_low[i + 5] = 1;
827 } else if (cpi->sf.short_circuit_low_temp_var >= 2) {
828 // For 32x16 and 16x32 blocks, the flag is set on each 16x16 block
830 if ((*this_mi)->sb_type == BLOCK_16X16 ||
831 (*this_mi)->sb_type == BLOCK_32X16 ||
832 (*this_mi)->sb_type == BLOCK_16X32) {
833 for (j = 0; j < 4; j++) {
834 if (vt->split[i].split[j].part_variances.none.variance <
835 (thresholds[2] >> 8))
836 x->variance_low[(i << 2) + j + 9] = 1;
845 static void copy_partitioning_helper(VP9_COMP *cpi, MACROBLOCK *x,
846 MACROBLOCKD *xd, BLOCK_SIZE bsize,
847 int mi_row, int mi_col) {
848 VP9_COMMON *const cm = &cpi->common;
849 BLOCK_SIZE *prev_part = cpi->prev_partition;
850 int start_pos = mi_row * cm->mi_stride + mi_col;
852 const int bsl = b_width_log2_lookup[bsize];
853 const int bs = (1 << bsl) / 4;
855 PARTITION_TYPE partition;
857 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
859 partition = partition_lookup[bsl][prev_part[start_pos]];
860 subsize = get_subsize(bsize, partition);
862 if (subsize < BLOCK_8X8) {
863 set_block_size(cpi, x, xd, mi_row, mi_col, bsize);
867 set_block_size(cpi, x, xd, mi_row, mi_col, bsize);
870 set_block_size(cpi, x, xd, mi_row, mi_col, subsize);
871 set_block_size(cpi, x, xd, mi_row + bs, mi_col, subsize);
874 set_block_size(cpi, x, xd, mi_row, mi_col, subsize);
875 set_block_size(cpi, x, xd, mi_row, mi_col + bs, subsize);
877 case PARTITION_SPLIT:
878 copy_partitioning_helper(cpi, x, xd, subsize, mi_row, mi_col);
879 copy_partitioning_helper(cpi, x, xd, subsize, mi_row + bs, mi_col);
880 copy_partitioning_helper(cpi, x, xd, subsize, mi_row, mi_col + bs);
881 copy_partitioning_helper(cpi, x, xd, subsize, mi_row + bs, mi_col + bs);
888 static int copy_partitioning(VP9_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *xd,
889 int mi_row, int mi_col, int segment_id,
891 int svc_copy_allowed = 1;
892 int frames_since_key_thresh = 1;
894 // For SVC, don't allow copy if base spatial layer is key frame, or if
895 // frame is not a temporal enhancement layer frame.
896 int layer = LAYER_IDS_TO_IDX(0, cpi->svc.temporal_layer_id,
897 cpi->svc.number_temporal_layers);
898 const LAYER_CONTEXT *lc = &cpi->svc.layer_context[layer];
899 if (lc->is_key_frame ||
900 (cpi->svc.temporal_layer_id != cpi->svc.number_temporal_layers - 1 &&
901 cpi->svc.number_temporal_layers > 1))
902 svc_copy_allowed = 0;
903 frames_since_key_thresh = cpi->svc.number_spatial_layers << 1;
905 if (cpi->rc.frames_since_key > frames_since_key_thresh && svc_copy_allowed &&
906 !cpi->resize_pending && segment_id == CR_SEGMENT_ID_BASE &&
907 cpi->prev_segment_id[sb_offset] == CR_SEGMENT_ID_BASE &&
908 cpi->copied_frame_cnt[sb_offset] < cpi->max_copied_frame) {
909 if (cpi->prev_partition != NULL) {
910 copy_partitioning_helper(cpi, x, xd, BLOCK_64X64, mi_row, mi_col);
911 cpi->copied_frame_cnt[sb_offset] += 1;
912 memcpy(x->variance_low, &(cpi->prev_variance_low[sb_offset * 25]),
913 sizeof(x->variance_low));
921 static void update_prev_partition(VP9_COMP *cpi, BLOCK_SIZE bsize, int mi_row,
923 VP9_COMMON *const cm = &cpi->common;
924 BLOCK_SIZE *prev_part = cpi->prev_partition;
925 int start_pos = mi_row * cm->mi_stride + mi_col;
926 const int bsl = b_width_log2_lookup[bsize];
927 const int bs = (1 << bsl) / 4;
929 PARTITION_TYPE partition;
930 const MODE_INFO *mi = NULL;
932 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
934 mi = cm->mi_grid_visible[start_pos];
935 partition = partition_lookup[bsl][mi->sb_type];
936 subsize = get_subsize(bsize, partition);
937 if (subsize < BLOCK_8X8) {
938 prev_part[start_pos] = bsize;
941 case PARTITION_NONE: prev_part[start_pos] = bsize; break;
943 prev_part[start_pos] = subsize;
944 if (mi_row + bs < cm->mi_rows)
945 prev_part[start_pos + bs * cm->mi_stride] = subsize;
948 prev_part[start_pos] = subsize;
949 if (mi_col + bs < cm->mi_cols) prev_part[start_pos + bs] = subsize;
951 case PARTITION_SPLIT:
952 update_prev_partition(cpi, subsize, mi_row, mi_col);
953 update_prev_partition(cpi, subsize, mi_row + bs, mi_col);
954 update_prev_partition(cpi, subsize, mi_row, mi_col + bs);
955 update_prev_partition(cpi, subsize, mi_row + bs, mi_col + bs);
962 static void chroma_check(VP9_COMP *cpi, MACROBLOCK *x, int bsize,
963 unsigned int y_sad, int is_key_frame) {
965 MACROBLOCKD *xd = &x->e_mbd;
967 if (is_key_frame) return;
969 // For speed >= 8, avoid the chroma check if y_sad is above threshold.
970 if (cpi->oxcf.speed >= 8) {
971 if (y_sad > cpi->vbp_thresholds[1] &&
972 (!cpi->noise_estimate.enabled ||
973 vp9_noise_estimate_extract_level(&cpi->noise_estimate) < kMedium))
977 for (i = 1; i <= 2; ++i) {
978 unsigned int uv_sad = UINT_MAX;
979 struct macroblock_plane *p = &x->plane[i];
980 struct macroblockd_plane *pd = &xd->plane[i];
981 const BLOCK_SIZE bs = get_plane_block_size(bsize, pd);
983 if (bs != BLOCK_INVALID)
984 uv_sad = cpi->fn_ptr[bs].sdf(p->src.buf, p->src.stride, pd->dst.buf,
987 // TODO(marpan): Investigate if we should lower this threshold if
988 // superblock is detected as skin.
989 x->color_sensitivity[i - 1] = uv_sad > (y_sad >> 2);
993 static void avg_source_sad(VP9_COMP *cpi, MACROBLOCK *x, int shift,
995 unsigned int tmp_sse;
997 unsigned int tmp_variance;
998 const BLOCK_SIZE bsize = BLOCK_64X64;
999 uint8_t *src_y = cpi->Source->y_buffer;
1000 int src_ystride = cpi->Source->y_stride;
1001 uint8_t *last_src_y = cpi->Last_Source->y_buffer;
1002 int last_src_ystride = cpi->Last_Source->y_stride;
1003 uint64_t avg_source_sad_threshold = 10000;
1004 uint64_t avg_source_sad_threshold2 = 12000;
1005 #if CONFIG_VP9_HIGHBITDEPTH
1006 if (cpi->common.use_highbitdepth) return;
1009 last_src_y += shift;
1011 cpi->fn_ptr[bsize].sdf(src_y, src_ystride, last_src_y, last_src_ystride);
1012 tmp_variance = vpx_variance64x64(src_y, src_ystride, last_src_y,
1013 last_src_ystride, &tmp_sse);
1014 // Note: tmp_sse - tmp_variance = ((sum * sum) >> 12)
1015 if (tmp_sad < avg_source_sad_threshold)
1016 x->content_state_sb = ((tmp_sse - tmp_variance) < 25) ? kLowSadLowSumdiff
1017 : kLowSadHighSumdiff;
1019 x->content_state_sb = ((tmp_sse - tmp_variance) < 25) ? kHighSadLowSumdiff
1020 : kHighSadHighSumdiff;
1022 // Detect large lighting change.
1023 if (tmp_variance < (tmp_sse >> 3) && (tmp_sse - tmp_variance) > 10000)
1024 x->content_state_sb = kLowVarHighSumdiff;
1026 if (tmp_sad > (avg_source_sad_threshold << 1))
1027 x->content_state_sb = kVeryHighSad;
1029 if (cpi->content_state_sb_fd != NULL) {
1030 if (tmp_sad < avg_source_sad_threshold2) {
1031 // Cap the increment to 255.
1032 if (cpi->content_state_sb_fd[sb_offset] < 255)
1033 cpi->content_state_sb_fd[sb_offset]++;
1035 cpi->content_state_sb_fd[sb_offset] = 0;
1041 // This function chooses partitioning based on the variance between source and
1042 // reconstructed last, where variance is computed for down-sampled inputs.
1043 static int choose_partitioning(VP9_COMP *cpi, const TileInfo *const tile,
1044 MACROBLOCK *x, int mi_row, int mi_col) {
1045 VP9_COMMON *const cm = &cpi->common;
1046 MACROBLOCKD *xd = &x->e_mbd;
1050 int force_split[21];
1052 int max_var_32x32 = 0;
1053 int min_var_32x32 = INT_MAX;
1056 int maxvar_16x16[4];
1057 int minvar_16x16[4];
1058 int64_t threshold_4x4avg;
1059 NOISE_LEVEL noise_level = kLow;
1060 int content_state = 0;
1065 unsigned int y_sad = UINT_MAX;
1066 BLOCK_SIZE bsize = BLOCK_64X64;
1067 // Ref frame used in partitioning.
1068 MV_REFERENCE_FRAME ref_frame_partition = LAST_FRAME;
1069 int pixels_wide = 64, pixels_high = 64;
1070 int64_t thresholds[4] = { cpi->vbp_thresholds[0], cpi->vbp_thresholds[1],
1071 cpi->vbp_thresholds[2], cpi->vbp_thresholds[3] };
1073 // For the variance computation under SVC mode, we treat the frame as key if
1074 // the reference (base layer frame) is key frame (i.e., is_key_frame == 1).
1075 const int is_key_frame =
1076 (cm->frame_type == KEY_FRAME ||
1077 (is_one_pass_cbr_svc(cpi) &&
1078 cpi->svc.layer_context[cpi->svc.temporal_layer_id].is_key_frame));
1079 // Always use 4x4 partition for key frame.
1080 const int use_4x4_partition = cm->frame_type == KEY_FRAME;
1081 const int low_res = (cm->width <= 352 && cm->height <= 288);
1082 int variance4x4downsample[16];
1084 int sb_offset = (cm->mi_stride >> 3) * (mi_row >> 3) + (mi_col >> 3);
1086 set_offsets(cpi, tile, x, mi_row, mi_col, BLOCK_64X64);
1087 segment_id = xd->mi[0]->segment_id;
1089 if (cpi->sf.use_source_sad && !is_key_frame) {
1090 int sb_offset2 = ((cm->mi_cols + 7) >> 3) * (mi_row >> 3) + (mi_col >> 3);
1091 content_state = x->content_state_sb;
1092 x->skip_low_source_sad = (content_state == kLowSadLowSumdiff ||
1093 content_state == kLowSadHighSumdiff)
1096 x->lowvar_highsumdiff = (content_state == kLowVarHighSumdiff) ? 1 : 0;
1097 if (cpi->content_state_sb_fd != NULL)
1098 x->last_sb_high_content = cpi->content_state_sb_fd[sb_offset2];
1099 // If source_sad is low copy the partition without computing the y_sad.
1100 if (x->skip_low_source_sad && cpi->sf.copy_partition_flag &&
1101 copy_partitioning(cpi, x, xd, mi_row, mi_col, segment_id, sb_offset)) {
1106 if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled &&
1107 cyclic_refresh_segment_id_boosted(segment_id)) {
1108 int q = vp9_get_qindex(&cm->seg, segment_id, cm->base_qindex);
1109 set_vbp_thresholds(cpi, thresholds, q, content_state);
1111 set_vbp_thresholds(cpi, thresholds, cm->base_qindex, content_state);
1114 // For non keyframes, disable 4x4 average for low resolution when speed = 8
1115 threshold_4x4avg = (cpi->oxcf.speed < 8) ? thresholds[1] << 1 : INT64_MAX;
1117 memset(x->variance_low, 0, sizeof(x->variance_low));
1119 if (xd->mb_to_right_edge < 0) pixels_wide += (xd->mb_to_right_edge >> 3);
1120 if (xd->mb_to_bottom_edge < 0) pixels_high += (xd->mb_to_bottom_edge >> 3);
1122 s = x->plane[0].src.buf;
1123 sp = x->plane[0].src.stride;
1125 // Index for force_split: 0 for 64x64, 1-4 for 32x32 blocks,
1126 // 5-20 for the 16x16 blocks.
1129 if (!is_key_frame) {
1130 // In the case of spatial/temporal scalable coding, the assumption here is
1131 // that the temporal reference frame will always be of type LAST_FRAME.
1132 // TODO(marpan): If that assumption is broken, we need to revisit this code.
1133 MODE_INFO *mi = xd->mi[0];
1134 YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, LAST_FRAME);
1136 const YV12_BUFFER_CONFIG *yv12_g = NULL;
1137 unsigned int y_sad_g, y_sad_thr, y_sad_last;
1138 bsize = BLOCK_32X32 + (mi_col + 4 < cm->mi_cols) * 2 +
1139 (mi_row + 4 < cm->mi_rows);
1141 assert(yv12 != NULL);
1143 if (!(is_one_pass_cbr_svc(cpi) && cpi->svc.spatial_layer_id)) {
1144 // For now, GOLDEN will not be used for non-zero spatial layers, since
1145 // it may not be a temporal reference.
1146 yv12_g = get_ref_frame_buffer(cpi, GOLDEN_FRAME);
1149 // Only compute y_sad_g (sad for golden reference) for speed < 8.
1150 if (cpi->oxcf.speed < 8 && yv12_g && yv12_g != yv12 &&
1151 (cpi->ref_frame_flags & VP9_GOLD_FLAG)) {
1152 vp9_setup_pre_planes(xd, 0, yv12_g, mi_row, mi_col,
1153 &cm->frame_refs[GOLDEN_FRAME - 1].sf);
1154 y_sad_g = cpi->fn_ptr[bsize].sdf(
1155 x->plane[0].src.buf, x->plane[0].src.stride, xd->plane[0].pre[0].buf,
1156 xd->plane[0].pre[0].stride);
1161 if (cpi->oxcf.lag_in_frames > 0 && cpi->oxcf.rc_mode == VPX_VBR &&
1162 cpi->rc.is_src_frame_alt_ref) {
1163 yv12 = get_ref_frame_buffer(cpi, ALTREF_FRAME);
1164 vp9_setup_pre_planes(xd, 0, yv12, mi_row, mi_col,
1165 &cm->frame_refs[ALTREF_FRAME - 1].sf);
1166 mi->ref_frame[0] = ALTREF_FRAME;
1169 vp9_setup_pre_planes(xd, 0, yv12, mi_row, mi_col,
1170 &cm->frame_refs[LAST_FRAME - 1].sf);
1171 mi->ref_frame[0] = LAST_FRAME;
1173 mi->ref_frame[1] = NONE;
1174 mi->sb_type = BLOCK_64X64;
1175 mi->mv[0].as_int = 0;
1176 mi->interp_filter = BILINEAR;
1178 if (cpi->oxcf.speed >= 8 && !low_res)
1179 y_sad = cpi->fn_ptr[bsize].sdf(
1180 x->plane[0].src.buf, x->plane[0].src.stride, xd->plane[0].pre[0].buf,
1181 xd->plane[0].pre[0].stride);
1183 y_sad = vp9_int_pro_motion_estimation(cpi, x, bsize, mi_row, mi_col);
1186 // Pick ref frame for partitioning, bias last frame when y_sad_g and y_sad
1187 // are close if short_circuit_low_temp_var is on.
1188 y_sad_thr = cpi->sf.short_circuit_low_temp_var ? (y_sad * 7) >> 3 : y_sad;
1189 if (y_sad_g < y_sad_thr) {
1190 vp9_setup_pre_planes(xd, 0, yv12_g, mi_row, mi_col,
1191 &cm->frame_refs[GOLDEN_FRAME - 1].sf);
1192 mi->ref_frame[0] = GOLDEN_FRAME;
1193 mi->mv[0].as_int = 0;
1195 ref_frame_partition = GOLDEN_FRAME;
1197 x->pred_mv[LAST_FRAME] = mi->mv[0].as_mv;
1198 ref_frame_partition = LAST_FRAME;
1201 set_ref_ptrs(cm, xd, mi->ref_frame[0], mi->ref_frame[1]);
1202 vp9_build_inter_predictors_sb(xd, mi_row, mi_col, BLOCK_64X64);
1204 if (cpi->use_skin_detection)
1206 skin_sb_split(cpi, x, low_res, mi_row, mi_col, force_split);
1208 d = xd->plane[0].dst.buf;
1209 dp = xd->plane[0].dst.stride;
1211 // If the y_sad is very small, take 64x64 as partition and exit.
1212 // Don't check on boosted segment for now, as 64x64 is suppressed there.
1213 if (segment_id == CR_SEGMENT_ID_BASE && y_sad < cpi->vbp_threshold_sad) {
1214 const int block_width = num_8x8_blocks_wide_lookup[BLOCK_64X64];
1215 const int block_height = num_8x8_blocks_high_lookup[BLOCK_64X64];
1216 if (mi_col + block_width / 2 < cm->mi_cols &&
1217 mi_row + block_height / 2 < cm->mi_rows) {
1218 set_block_size(cpi, x, xd, mi_row, mi_col, BLOCK_64X64);
1219 x->variance_low[0] = 1;
1220 chroma_check(cpi, x, bsize, y_sad, is_key_frame);
1225 // If the y_sad is small enough, copy the partition of the superblock in the
1226 // last frame to current frame only if the last frame is not a keyframe.
1227 // Stop the copy every cpi->max_copied_frame to refresh the partition.
1228 // TODO(jianj) : tune the threshold.
1229 if (cpi->sf.copy_partition_flag && y_sad_last < cpi->vbp_threshold_copy &&
1230 copy_partitioning(cpi, x, xd, mi_row, mi_col, segment_id, sb_offset)) {
1231 chroma_check(cpi, x, bsize, y_sad, is_key_frame);
1237 #if CONFIG_VP9_HIGHBITDEPTH
1238 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
1240 case 10: d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_10); break;
1241 case 12: d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_12); break;
1243 default: d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_8); break;
1246 #endif // CONFIG_VP9_HIGHBITDEPTH
1249 // Fill in the entire tree of 8x8 (or 4x4 under some conditions) variances
1251 for (i = 0; i < 4; i++) {
1252 const int x32_idx = ((i & 1) << 5);
1253 const int y32_idx = ((i >> 1) << 5);
1254 const int i2 = i << 2;
1255 force_split[i + 1] = 0;
1257 maxvar_16x16[i] = 0;
1258 minvar_16x16[i] = INT_MAX;
1259 for (j = 0; j < 4; j++) {
1260 const int x16_idx = x32_idx + ((j & 1) << 4);
1261 const int y16_idx = y32_idx + ((j >> 1) << 4);
1262 const int split_index = 5 + i2 + j;
1263 v16x16 *vst = &vt.split[i].split[j];
1264 force_split[split_index] = 0;
1265 variance4x4downsample[i2 + j] = 0;
1266 if (!is_key_frame) {
1267 fill_variance_8x8avg(s, sp, d, dp, x16_idx, y16_idx, vst,
1268 #if CONFIG_VP9_HIGHBITDEPTH
1271 pixels_wide, pixels_high, is_key_frame);
1272 fill_variance_tree(&vt.split[i].split[j], BLOCK_16X16);
1273 get_variance(&vt.split[i].split[j].part_variances.none);
1274 avg_16x16[i] += vt.split[i].split[j].part_variances.none.variance;
1275 if (vt.split[i].split[j].part_variances.none.variance < minvar_16x16[i])
1276 minvar_16x16[i] = vt.split[i].split[j].part_variances.none.variance;
1277 if (vt.split[i].split[j].part_variances.none.variance > maxvar_16x16[i])
1278 maxvar_16x16[i] = vt.split[i].split[j].part_variances.none.variance;
1279 if (vt.split[i].split[j].part_variances.none.variance > thresholds[2]) {
1280 // 16X16 variance is above threshold for split, so force split to 8x8
1281 // for this 16x16 block (this also forces splits for upper levels).
1282 force_split[split_index] = 1;
1283 force_split[i + 1] = 1;
1285 } else if (cpi->oxcf.speed < 8 &&
1286 vt.split[i].split[j].part_variances.none.variance >
1288 !cyclic_refresh_segment_id_boosted(segment_id)) {
1289 // We have some nominal amount of 16x16 variance (based on average),
1290 // compute the minmax over the 8x8 sub-blocks, and if above threshold,
1291 // force split to 8x8 block for this 16x16 block.
1292 int minmax = compute_minmax_8x8(s, sp, d, dp, x16_idx, y16_idx,
1293 #if CONFIG_VP9_HIGHBITDEPTH
1296 pixels_wide, pixels_high);
1297 if (minmax > cpi->vbp_threshold_minmax) {
1298 force_split[split_index] = 1;
1299 force_split[i + 1] = 1;
1304 if (is_key_frame || (low_res &&
1305 vt.split[i].split[j].part_variances.none.variance >
1306 threshold_4x4avg)) {
1307 force_split[split_index] = 0;
1308 // Go down to 4x4 down-sampling for variance.
1309 variance4x4downsample[i2 + j] = 1;
1310 for (k = 0; k < 4; k++) {
1311 int x8_idx = x16_idx + ((k & 1) << 3);
1312 int y8_idx = y16_idx + ((k >> 1) << 3);
1313 v8x8 *vst2 = is_key_frame ? &vst->split[k] : &vt2[i2 + j].split[k];
1314 fill_variance_4x4avg(s, sp, d, dp, x8_idx, y8_idx, vst2,
1315 #if CONFIG_VP9_HIGHBITDEPTH
1318 pixels_wide, pixels_high, is_key_frame);
1323 if (cpi->noise_estimate.enabled)
1324 noise_level = vp9_noise_estimate_extract_level(&cpi->noise_estimate);
1325 // Fill the rest of the variance tree by summing split partition values.
1327 for (i = 0; i < 4; i++) {
1328 const int i2 = i << 2;
1329 for (j = 0; j < 4; j++) {
1330 if (variance4x4downsample[i2 + j] == 1) {
1331 v16x16 *vtemp = (!is_key_frame) ? &vt2[i2 + j] : &vt.split[i].split[j];
1332 for (m = 0; m < 4; m++) fill_variance_tree(&vtemp->split[m], BLOCK_8X8);
1333 fill_variance_tree(vtemp, BLOCK_16X16);
1334 // If variance of this 16x16 block is above the threshold, force block
1335 // to split. This also forces a split on the upper levels.
1336 get_variance(&vtemp->part_variances.none);
1337 if (vtemp->part_variances.none.variance > thresholds[2]) {
1338 force_split[5 + i2 + j] = 1;
1339 force_split[i + 1] = 1;
1344 fill_variance_tree(&vt.split[i], BLOCK_32X32);
1345 // If variance of this 32x32 block is above the threshold, or if its above
1346 // (some threshold of) the average variance over the sub-16x16 blocks, then
1347 // force this block to split. This also forces a split on the upper
1349 if (!force_split[i + 1]) {
1350 get_variance(&vt.split[i].part_variances.none);
1351 var_32x32 = vt.split[i].part_variances.none.variance;
1352 max_var_32x32 = VPXMAX(var_32x32, max_var_32x32);
1353 min_var_32x32 = VPXMIN(var_32x32, min_var_32x32);
1354 if (vt.split[i].part_variances.none.variance > thresholds[1] ||
1356 vt.split[i].part_variances.none.variance > (thresholds[1] >> 1) &&
1357 vt.split[i].part_variances.none.variance > (avg_16x16[i] >> 1))) {
1358 force_split[i + 1] = 1;
1360 } else if (!is_key_frame && noise_level < kLow && cm->height <= 360 &&
1361 (maxvar_16x16[i] - minvar_16x16[i]) > (thresholds[1] >> 1) &&
1362 maxvar_16x16[i] > thresholds[1]) {
1363 force_split[i + 1] = 1;
1366 avg_32x32 += var_32x32;
1369 if (!force_split[0]) {
1370 fill_variance_tree(&vt, BLOCK_64X64);
1371 get_variance(&vt.part_variances.none);
1372 // If variance of this 64x64 block is above (some threshold of) the average
1373 // variance over the sub-32x32 blocks, then force this block to split.
1374 // Only checking this for noise level >= medium for now.
1375 if (!is_key_frame && noise_level >= kMedium &&
1376 vt.part_variances.none.variance > (9 * avg_32x32) >> 5)
1378 // Else if the maximum 32x32 variance minus the miniumum 32x32 variance in
1379 // a 64x64 block is greater than threshold and the maximum 32x32 variance is
1380 // above a miniumum threshold, then force the split of a 64x64 block
1381 // Only check this for low noise.
1382 else if (!is_key_frame && noise_level < kMedium &&
1383 (max_var_32x32 - min_var_32x32) > 3 * (thresholds[0] >> 3) &&
1384 max_var_32x32 > thresholds[0] >> 1)
1388 // Now go through the entire structure, splitting every block size until
1389 // we get to one that's got a variance lower than our threshold.
1390 if (mi_col + 8 > cm->mi_cols || mi_row + 8 > cm->mi_rows ||
1391 !set_vt_partitioning(cpi, x, xd, &vt, BLOCK_64X64, mi_row, mi_col,
1392 thresholds[0], BLOCK_16X16, force_split[0])) {
1393 for (i = 0; i < 4; ++i) {
1394 const int x32_idx = ((i & 1) << 2);
1395 const int y32_idx = ((i >> 1) << 2);
1396 const int i2 = i << 2;
1397 if (!set_vt_partitioning(cpi, x, xd, &vt.split[i], BLOCK_32X32,
1398 (mi_row + y32_idx), (mi_col + x32_idx),
1399 thresholds[1], BLOCK_16X16,
1400 force_split[i + 1])) {
1401 for (j = 0; j < 4; ++j) {
1402 const int x16_idx = ((j & 1) << 1);
1403 const int y16_idx = ((j >> 1) << 1);
1404 // For inter frames: if variance4x4downsample[] == 1 for this 16x16
1405 // block, then the variance is based on 4x4 down-sampling, so use vt2
1406 // in set_vt_partioning(), otherwise use vt.
1407 v16x16 *vtemp = (!is_key_frame && variance4x4downsample[i2 + j] == 1)
1409 : &vt.split[i].split[j];
1410 if (!set_vt_partitioning(
1411 cpi, x, xd, vtemp, BLOCK_16X16, mi_row + y32_idx + y16_idx,
1412 mi_col + x32_idx + x16_idx, thresholds[2], cpi->vbp_bsize_min,
1413 force_split[5 + i2 + j])) {
1414 for (k = 0; k < 4; ++k) {
1415 const int x8_idx = (k & 1);
1416 const int y8_idx = (k >> 1);
1417 if (use_4x4_partition) {
1418 if (!set_vt_partitioning(cpi, x, xd, &vtemp->split[k],
1420 mi_row + y32_idx + y16_idx + y8_idx,
1421 mi_col + x32_idx + x16_idx + x8_idx,
1422 thresholds[3], BLOCK_8X8, 0)) {
1424 cpi, x, xd, (mi_row + y32_idx + y16_idx + y8_idx),
1425 (mi_col + x32_idx + x16_idx + x8_idx), BLOCK_4X4);
1429 cpi, x, xd, (mi_row + y32_idx + y16_idx + y8_idx),
1430 (mi_col + x32_idx + x16_idx + x8_idx), BLOCK_8X8);
1439 if (cm->frame_type != KEY_FRAME && cpi->sf.copy_partition_flag) {
1440 update_prev_partition(cpi, BLOCK_64X64, mi_row, mi_col);
1441 cpi->prev_segment_id[sb_offset] = segment_id;
1442 memcpy(&(cpi->prev_variance_low[sb_offset * 25]), x->variance_low,
1443 sizeof(x->variance_low));
1444 // Reset the counter for copy partitioning
1445 if (cpi->copied_frame_cnt[sb_offset] == cpi->max_copied_frame)
1446 cpi->copied_frame_cnt[sb_offset] = 0;
1449 if (cpi->sf.short_circuit_low_temp_var) {
1450 set_low_temp_var_flag(cpi, x, xd, &vt, thresholds, ref_frame_partition,
1454 chroma_check(cpi, x, bsize, y_sad, is_key_frame);
1458 static void update_state(VP9_COMP *cpi, ThreadData *td, PICK_MODE_CONTEXT *ctx,
1459 int mi_row, int mi_col, BLOCK_SIZE bsize,
1460 int output_enabled) {
1462 VP9_COMMON *const cm = &cpi->common;
1463 RD_COUNTS *const rdc = &td->rd_counts;
1464 MACROBLOCK *const x = &td->mb;
1465 MACROBLOCKD *const xd = &x->e_mbd;
1466 struct macroblock_plane *const p = x->plane;
1467 struct macroblockd_plane *const pd = xd->plane;
1468 MODE_INFO *mi = &ctx->mic;
1469 MODE_INFO *const xdmi = xd->mi[0];
1470 MODE_INFO *mi_addr = xd->mi[0];
1471 const struct segmentation *const seg = &cm->seg;
1472 const int bw = num_8x8_blocks_wide_lookup[mi->sb_type];
1473 const int bh = num_8x8_blocks_high_lookup[mi->sb_type];
1474 const int x_mis = VPXMIN(bw, cm->mi_cols - mi_col);
1475 const int y_mis = VPXMIN(bh, cm->mi_rows - mi_row);
1476 MV_REF *const frame_mvs = cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col;
1479 const int mis = cm->mi_stride;
1480 const int mi_width = num_8x8_blocks_wide_lookup[bsize];
1481 const int mi_height = num_8x8_blocks_high_lookup[bsize];
1484 assert(mi->sb_type == bsize);
1487 *x->mbmi_ext = ctx->mbmi_ext;
1489 // If segmentation in use
1491 // For in frame complexity AQ copy the segment id from the segment map.
1492 if (cpi->oxcf.aq_mode == COMPLEXITY_AQ) {
1493 const uint8_t *const map =
1494 seg->update_map ? cpi->segmentation_map : cm->last_frame_seg_map;
1495 mi_addr->segment_id = get_segment_id(cm, map, bsize, mi_row, mi_col);
1497 // Else for cyclic refresh mode update the segment map, set the segment id
1498 // and then update the quantizer.
1499 if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) {
1500 vp9_cyclic_refresh_update_segment(cpi, xd->mi[0], mi_row, mi_col, bsize,
1501 ctx->rate, ctx->dist, x->skip, p);
1505 max_plane = is_inter_block(xdmi) ? MAX_MB_PLANE : 1;
1506 for (i = 0; i < max_plane; ++i) {
1507 p[i].coeff = ctx->coeff_pbuf[i][1];
1508 p[i].qcoeff = ctx->qcoeff_pbuf[i][1];
1509 pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][1];
1510 p[i].eobs = ctx->eobs_pbuf[i][1];
1513 for (i = max_plane; i < MAX_MB_PLANE; ++i) {
1514 p[i].coeff = ctx->coeff_pbuf[i][2];
1515 p[i].qcoeff = ctx->qcoeff_pbuf[i][2];
1516 pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][2];
1517 p[i].eobs = ctx->eobs_pbuf[i][2];
1520 // Restore the coding context of the MB to that that was in place
1521 // when the mode was picked for it
1522 for (y = 0; y < mi_height; y++)
1523 for (x_idx = 0; x_idx < mi_width; x_idx++)
1524 if ((xd->mb_to_right_edge >> (3 + MI_SIZE_LOG2)) + mi_width > x_idx &&
1525 (xd->mb_to_bottom_edge >> (3 + MI_SIZE_LOG2)) + mi_height > y) {
1526 xd->mi[x_idx + y * mis] = mi_addr;
1529 if (cpi->oxcf.aq_mode != NO_AQ) vp9_init_plane_quantizers(cpi, x);
1531 if (is_inter_block(xdmi) && xdmi->sb_type < BLOCK_8X8) {
1532 xdmi->mv[0].as_int = mi->bmi[3].as_mv[0].as_int;
1533 xdmi->mv[1].as_int = mi->bmi[3].as_mv[1].as_int;
1536 x->skip = ctx->skip;
1537 memcpy(x->zcoeff_blk[xdmi->tx_size], ctx->zcoeff_blk,
1538 sizeof(ctx->zcoeff_blk[0]) * ctx->num_4x4_blk);
1540 if (!output_enabled) return;
1542 #if CONFIG_INTERNAL_STATS
1543 if (frame_is_intra_only(cm)) {
1544 static const int kf_mode_index[] = {
1545 THR_DC /*DC_PRED*/, THR_V_PRED /*V_PRED*/,
1546 THR_H_PRED /*H_PRED*/, THR_D45_PRED /*D45_PRED*/,
1547 THR_D135_PRED /*D135_PRED*/, THR_D117_PRED /*D117_PRED*/,
1548 THR_D153_PRED /*D153_PRED*/, THR_D207_PRED /*D207_PRED*/,
1549 THR_D63_PRED /*D63_PRED*/, THR_TM /*TM_PRED*/,
1551 ++cpi->mode_chosen_counts[kf_mode_index[xdmi->mode]];
1553 // Note how often each mode chosen as best
1554 ++cpi->mode_chosen_counts[ctx->best_mode_index];
1557 if (!frame_is_intra_only(cm)) {
1558 if (is_inter_block(xdmi)) {
1559 vp9_update_mv_count(td);
1561 if (cm->interp_filter == SWITCHABLE) {
1562 const int ctx = get_pred_context_switchable_interp(xd);
1563 ++td->counts->switchable_interp[ctx][xdmi->interp_filter];
1567 rdc->comp_pred_diff[SINGLE_REFERENCE] += ctx->single_pred_diff;
1568 rdc->comp_pred_diff[COMPOUND_REFERENCE] += ctx->comp_pred_diff;
1569 rdc->comp_pred_diff[REFERENCE_MODE_SELECT] += ctx->hybrid_pred_diff;
1571 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
1572 rdc->filter_diff[i] += ctx->best_filter_diff[i];
1575 for (h = 0; h < y_mis; ++h) {
1576 MV_REF *const frame_mv = frame_mvs + h * cm->mi_cols;
1577 for (w = 0; w < x_mis; ++w) {
1578 MV_REF *const mv = frame_mv + w;
1579 mv->ref_frame[0] = mi->ref_frame[0];
1580 mv->ref_frame[1] = mi->ref_frame[1];
1581 mv->mv[0].as_int = mi->mv[0].as_int;
1582 mv->mv[1].as_int = mi->mv[1].as_int;
1587 void vp9_setup_src_planes(MACROBLOCK *x, const YV12_BUFFER_CONFIG *src,
1588 int mi_row, int mi_col) {
1589 uint8_t *const buffers[3] = { src->y_buffer, src->u_buffer, src->v_buffer };
1590 const int strides[3] = { src->y_stride, src->uv_stride, src->uv_stride };
1593 // Set current frame pointer.
1594 x->e_mbd.cur_buf = src;
1596 for (i = 0; i < MAX_MB_PLANE; i++)
1597 setup_pred_plane(&x->plane[i].src, buffers[i], strides[i], mi_row, mi_col,
1598 NULL, x->e_mbd.plane[i].subsampling_x,
1599 x->e_mbd.plane[i].subsampling_y);
1602 static void set_mode_info_seg_skip(MACROBLOCK *x, TX_MODE tx_mode,
1603 RD_COST *rd_cost, BLOCK_SIZE bsize) {
1604 MACROBLOCKD *const xd = &x->e_mbd;
1605 MODE_INFO *const mi = xd->mi[0];
1606 INTERP_FILTER filter_ref;
1608 filter_ref = get_pred_context_switchable_interp(xd);
1609 if (filter_ref == SWITCHABLE_FILTERS) filter_ref = EIGHTTAP;
1611 mi->sb_type = bsize;
1614 VPXMIN(max_txsize_lookup[bsize], tx_mode_to_biggest_tx_size[tx_mode]);
1616 mi->uv_mode = DC_PRED;
1617 mi->ref_frame[0] = LAST_FRAME;
1618 mi->ref_frame[1] = NONE;
1619 mi->mv[0].as_int = 0;
1620 mi->interp_filter = filter_ref;
1622 xd->mi[0]->bmi[0].as_mv[0].as_int = 0;
1625 vp9_rd_cost_init(rd_cost);
1628 static int set_segment_rdmult(VP9_COMP *const cpi, MACROBLOCK *const x,
1629 int8_t segment_id) {
1631 VP9_COMMON *const cm = &cpi->common;
1632 vp9_init_plane_quantizers(cpi, x);
1633 vpx_clear_system_state();
1634 segment_qindex = vp9_get_qindex(&cm->seg, segment_id, cm->base_qindex);
1635 return vp9_compute_rd_mult(cpi, segment_qindex + cm->y_dc_delta_q);
1638 static void rd_pick_sb_modes(VP9_COMP *cpi, TileDataEnc *tile_data,
1639 MACROBLOCK *const x, int mi_row, int mi_col,
1640 RD_COST *rd_cost, BLOCK_SIZE bsize,
1641 PICK_MODE_CONTEXT *ctx, int64_t best_rd) {
1642 VP9_COMMON *const cm = &cpi->common;
1643 TileInfo *const tile_info = &tile_data->tile_info;
1644 MACROBLOCKD *const xd = &x->e_mbd;
1646 struct macroblock_plane *const p = x->plane;
1647 struct macroblockd_plane *const pd = xd->plane;
1648 const AQ_MODE aq_mode = cpi->oxcf.aq_mode;
1651 vpx_clear_system_state();
1653 // Use the lower precision, but faster, 32x32 fdct for mode selection.
1654 x->use_lp32x32fdct = 1;
1656 set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
1658 mi->sb_type = bsize;
1660 for (i = 0; i < MAX_MB_PLANE; ++i) {
1661 p[i].coeff = ctx->coeff_pbuf[i][0];
1662 p[i].qcoeff = ctx->qcoeff_pbuf[i][0];
1663 pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][0];
1664 p[i].eobs = ctx->eobs_pbuf[i][0];
1668 ctx->pred_pixel_ready = 0;
1671 // Set to zero to make sure we do not use the previous encoded frame stats
1674 #if CONFIG_VP9_HIGHBITDEPTH
1675 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
1676 x->source_variance = vp9_high_get_sby_perpixel_variance(
1677 cpi, &x->plane[0].src, bsize, xd->bd);
1679 x->source_variance =
1680 vp9_get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize);
1683 x->source_variance =
1684 vp9_get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize);
1685 #endif // CONFIG_VP9_HIGHBITDEPTH
1687 // Save rdmult before it might be changed, so it can be restored later.
1688 orig_rdmult = x->rdmult;
1690 if ((cpi->sf.tx_domain_thresh > 0.0) || (cpi->sf.quant_opt_thresh > 0.0)) {
1691 double logvar = vp9_log_block_var(cpi, x, bsize);
1692 // Check block complexity as part of descision on using pixel or transform
1693 // domain distortion in rd tests.
1694 x->block_tx_domain = cpi->sf.allow_txfm_domain_distortion &&
1695 (logvar >= cpi->sf.tx_domain_thresh);
1697 // Check block complexity as part of descision on using quantized
1698 // coefficient optimisation inside the rd loop.
1699 x->block_qcoeff_opt =
1700 cpi->sf.allow_quant_coeff_opt && (logvar <= cpi->sf.quant_opt_thresh);
1702 x->block_tx_domain = cpi->sf.allow_txfm_domain_distortion;
1703 x->block_qcoeff_opt = cpi->sf.allow_quant_coeff_opt;
1706 if (aq_mode == VARIANCE_AQ) {
1708 bsize <= BLOCK_16X16 ? x->mb_energy : vp9_block_energy(cpi, x, bsize);
1710 if (cm->frame_type == KEY_FRAME || cpi->refresh_alt_ref_frame ||
1711 cpi->force_update_segmentation ||
1712 (cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref)) {
1713 mi->segment_id = vp9_vaq_segment_id(energy);
1715 const uint8_t *const map =
1716 cm->seg.update_map ? cpi->segmentation_map : cm->last_frame_seg_map;
1717 mi->segment_id = get_segment_id(cm, map, bsize, mi_row, mi_col);
1719 x->rdmult = set_segment_rdmult(cpi, x, mi->segment_id);
1720 } else if (aq_mode == LOOKAHEAD_AQ) {
1721 const uint8_t *const map = cpi->segmentation_map;
1723 // I do not change rdmult here consciously.
1724 mi->segment_id = get_segment_id(cm, map, bsize, mi_row, mi_col);
1725 } else if (aq_mode == EQUATOR360_AQ) {
1726 if (cm->frame_type == KEY_FRAME || cpi->force_update_segmentation) {
1727 mi->segment_id = vp9_360aq_segment_id(mi_row, cm->mi_rows);
1729 const uint8_t *const map =
1730 cm->seg.update_map ? cpi->segmentation_map : cm->last_frame_seg_map;
1731 mi->segment_id = get_segment_id(cm, map, bsize, mi_row, mi_col);
1733 x->rdmult = set_segment_rdmult(cpi, x, mi->segment_id);
1734 } else if (aq_mode == COMPLEXITY_AQ) {
1735 x->rdmult = set_segment_rdmult(cpi, x, mi->segment_id);
1736 } else if (aq_mode == CYCLIC_REFRESH_AQ) {
1737 const uint8_t *const map =
1738 cm->seg.update_map ? cpi->segmentation_map : cm->last_frame_seg_map;
1739 // If segment is boosted, use rdmult for that segment.
1740 if (cyclic_refresh_segment_id_boosted(
1741 get_segment_id(cm, map, bsize, mi_row, mi_col)))
1742 x->rdmult = vp9_cyclic_refresh_get_rdmult(cpi->cyclic_refresh);
1745 // Find best coding mode & reconstruct the MB so it is available
1746 // as a predictor for MBs that follow in the SB
1747 if (frame_is_intra_only(cm)) {
1748 vp9_rd_pick_intra_mode_sb(cpi, x, rd_cost, bsize, ctx, best_rd);
1750 if (bsize >= BLOCK_8X8) {
1751 if (segfeature_active(&cm->seg, mi->segment_id, SEG_LVL_SKIP))
1752 vp9_rd_pick_inter_mode_sb_seg_skip(cpi, tile_data, x, rd_cost, bsize,
1755 vp9_rd_pick_inter_mode_sb(cpi, tile_data, x, mi_row, mi_col, rd_cost,
1756 bsize, ctx, best_rd);
1758 vp9_rd_pick_inter_mode_sub8x8(cpi, tile_data, x, mi_row, mi_col, rd_cost,
1759 bsize, ctx, best_rd);
1763 // Examine the resulting rate and for AQ mode 2 make a segment choice.
1764 if ((rd_cost->rate != INT_MAX) && (aq_mode == COMPLEXITY_AQ) &&
1765 (bsize >= BLOCK_16X16) &&
1766 (cm->frame_type == KEY_FRAME || cpi->refresh_alt_ref_frame ||
1767 (cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref))) {
1768 vp9_caq_select_segment(cpi, x, bsize, mi_row, mi_col, rd_cost->rate);
1771 x->rdmult = orig_rdmult;
1773 // TODO(jingning) The rate-distortion optimization flow needs to be
1774 // refactored to provide proper exit/return handle.
1775 if (rd_cost->rate == INT_MAX) rd_cost->rdcost = INT64_MAX;
1777 ctx->rate = rd_cost->rate;
1778 ctx->dist = rd_cost->dist;
1781 static void update_stats(VP9_COMMON *cm, ThreadData *td) {
1782 const MACROBLOCK *x = &td->mb;
1783 const MACROBLOCKD *const xd = &x->e_mbd;
1784 const MODE_INFO *const mi = xd->mi[0];
1785 const MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
1786 const BLOCK_SIZE bsize = mi->sb_type;
1788 if (!frame_is_intra_only(cm)) {
1789 FRAME_COUNTS *const counts = td->counts;
1790 const int inter_block = is_inter_block(mi);
1791 const int seg_ref_active =
1792 segfeature_active(&cm->seg, mi->segment_id, SEG_LVL_REF_FRAME);
1793 if (!seg_ref_active) {
1794 counts->intra_inter[get_intra_inter_context(xd)][inter_block]++;
1795 // If the segment reference feature is enabled we have only a single
1796 // reference frame allowed for the segment so exclude it from
1797 // the reference frame counts used to work out probabilities.
1799 const MV_REFERENCE_FRAME ref0 = mi->ref_frame[0];
1800 if (cm->reference_mode == REFERENCE_MODE_SELECT)
1801 counts->comp_inter[vp9_get_reference_mode_context(cm, xd)]
1802 [has_second_ref(mi)]++;
1804 if (has_second_ref(mi)) {
1805 counts->comp_ref[vp9_get_pred_context_comp_ref_p(cm, xd)]
1806 [ref0 == GOLDEN_FRAME]++;
1808 counts->single_ref[vp9_get_pred_context_single_ref_p1(xd)][0]
1809 [ref0 != LAST_FRAME]++;
1810 if (ref0 != LAST_FRAME)
1811 counts->single_ref[vp9_get_pred_context_single_ref_p2(xd)][1]
1812 [ref0 != GOLDEN_FRAME]++;
1817 !segfeature_active(&cm->seg, mi->segment_id, SEG_LVL_SKIP)) {
1818 const int mode_ctx = mbmi_ext->mode_context[mi->ref_frame[0]];
1819 if (bsize >= BLOCK_8X8) {
1820 const PREDICTION_MODE mode = mi->mode;
1821 ++counts->inter_mode[mode_ctx][INTER_OFFSET(mode)];
1823 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
1824 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
1826 for (idy = 0; idy < 2; idy += num_4x4_h) {
1827 for (idx = 0; idx < 2; idx += num_4x4_w) {
1828 const int j = idy * 2 + idx;
1829 const PREDICTION_MODE b_mode = mi->bmi[j].as_mode;
1830 ++counts->inter_mode[mode_ctx][INTER_OFFSET(b_mode)];
1838 static void restore_context(MACROBLOCK *const x, int mi_row, int mi_col,
1839 ENTROPY_CONTEXT a[16 * MAX_MB_PLANE],
1840 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE],
1841 PARTITION_CONTEXT sa[8], PARTITION_CONTEXT sl[8],
1843 MACROBLOCKD *const xd = &x->e_mbd;
1845 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
1846 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
1847 int mi_width = num_8x8_blocks_wide_lookup[bsize];
1848 int mi_height = num_8x8_blocks_high_lookup[bsize];
1849 for (p = 0; p < MAX_MB_PLANE; p++) {
1850 memcpy(xd->above_context[p] + ((mi_col * 2) >> xd->plane[p].subsampling_x),
1851 a + num_4x4_blocks_wide * p,
1852 (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_wide) >>
1853 xd->plane[p].subsampling_x);
1854 memcpy(xd->left_context[p] +
1855 ((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y),
1856 l + num_4x4_blocks_high * p,
1857 (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_high) >>
1858 xd->plane[p].subsampling_y);
1860 memcpy(xd->above_seg_context + mi_col, sa,
1861 sizeof(*xd->above_seg_context) * mi_width);
1862 memcpy(xd->left_seg_context + (mi_row & MI_MASK), sl,
1863 sizeof(xd->left_seg_context[0]) * mi_height);
1866 static void save_context(MACROBLOCK *const x, int mi_row, int mi_col,
1867 ENTROPY_CONTEXT a[16 * MAX_MB_PLANE],
1868 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE],
1869 PARTITION_CONTEXT sa[8], PARTITION_CONTEXT sl[8],
1871 const MACROBLOCKD *const xd = &x->e_mbd;
1873 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
1874 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
1875 int mi_width = num_8x8_blocks_wide_lookup[bsize];
1876 int mi_height = num_8x8_blocks_high_lookup[bsize];
1878 // buffer the above/left context information of the block in search.
1879 for (p = 0; p < MAX_MB_PLANE; ++p) {
1880 memcpy(a + num_4x4_blocks_wide * p,
1881 xd->above_context[p] + (mi_col * 2 >> xd->plane[p].subsampling_x),
1882 (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_wide) >>
1883 xd->plane[p].subsampling_x);
1884 memcpy(l + num_4x4_blocks_high * p,
1885 xd->left_context[p] +
1886 ((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y),
1887 (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_high) >>
1888 xd->plane[p].subsampling_y);
1890 memcpy(sa, xd->above_seg_context + mi_col,
1891 sizeof(*xd->above_seg_context) * mi_width);
1892 memcpy(sl, xd->left_seg_context + (mi_row & MI_MASK),
1893 sizeof(xd->left_seg_context[0]) * mi_height);
1896 static void encode_b(VP9_COMP *cpi, const TileInfo *const tile, ThreadData *td,
1897 TOKENEXTRA **tp, int mi_row, int mi_col,
1898 int output_enabled, BLOCK_SIZE bsize,
1899 PICK_MODE_CONTEXT *ctx) {
1900 MACROBLOCK *const x = &td->mb;
1901 set_offsets(cpi, tile, x, mi_row, mi_col, bsize);
1902 update_state(cpi, td, ctx, mi_row, mi_col, bsize, output_enabled);
1903 encode_superblock(cpi, td, tp, output_enabled, mi_row, mi_col, bsize, ctx);
1905 if (output_enabled) {
1906 update_stats(&cpi->common, td);
1908 (*tp)->token = EOSB_TOKEN;
1913 static void encode_sb(VP9_COMP *cpi, ThreadData *td, const TileInfo *const tile,
1914 TOKENEXTRA **tp, int mi_row, int mi_col,
1915 int output_enabled, BLOCK_SIZE bsize, PC_TREE *pc_tree) {
1916 VP9_COMMON *const cm = &cpi->common;
1917 MACROBLOCK *const x = &td->mb;
1918 MACROBLOCKD *const xd = &x->e_mbd;
1920 const int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4;
1922 PARTITION_TYPE partition;
1923 BLOCK_SIZE subsize = bsize;
1925 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
1927 if (bsize >= BLOCK_8X8) {
1928 ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
1929 subsize = get_subsize(bsize, pc_tree->partitioning);
1932 subsize = BLOCK_4X4;
1935 partition = partition_lookup[bsl][subsize];
1936 if (output_enabled && bsize != BLOCK_4X4)
1937 td->counts->partition[ctx][partition]++;
1939 switch (partition) {
1940 case PARTITION_NONE:
1941 encode_b(cpi, tile, td, tp, mi_row, mi_col, output_enabled, subsize,
1944 case PARTITION_VERT:
1945 encode_b(cpi, tile, td, tp, mi_row, mi_col, output_enabled, subsize,
1946 &pc_tree->vertical[0]);
1947 if (mi_col + hbs < cm->mi_cols && bsize > BLOCK_8X8) {
1948 encode_b(cpi, tile, td, tp, mi_row, mi_col + hbs, output_enabled,
1949 subsize, &pc_tree->vertical[1]);
1952 case PARTITION_HORZ:
1953 encode_b(cpi, tile, td, tp, mi_row, mi_col, output_enabled, subsize,
1954 &pc_tree->horizontal[0]);
1955 if (mi_row + hbs < cm->mi_rows && bsize > BLOCK_8X8) {
1956 encode_b(cpi, tile, td, tp, mi_row + hbs, mi_col, output_enabled,
1957 subsize, &pc_tree->horizontal[1]);
1960 case PARTITION_SPLIT:
1961 if (bsize == BLOCK_8X8) {
1962 encode_b(cpi, tile, td, tp, mi_row, mi_col, output_enabled, subsize,
1963 pc_tree->leaf_split[0]);
1965 encode_sb(cpi, td, tile, tp, mi_row, mi_col, output_enabled, subsize,
1967 encode_sb(cpi, td, tile, tp, mi_row, mi_col + hbs, output_enabled,
1968 subsize, pc_tree->split[1]);
1969 encode_sb(cpi, td, tile, tp, mi_row + hbs, mi_col, output_enabled,
1970 subsize, pc_tree->split[2]);
1971 encode_sb(cpi, td, tile, tp, mi_row + hbs, mi_col + hbs, output_enabled,
1972 subsize, pc_tree->split[3]);
1975 default: assert(0 && "Invalid partition type."); break;
1978 if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8)
1979 update_partition_context(xd, mi_row, mi_col, subsize, bsize);
1982 // Check to see if the given partition size is allowed for a specified number
1983 // of 8x8 block rows and columns remaining in the image.
1984 // If not then return the largest allowed partition size
1985 static BLOCK_SIZE find_partition_size(BLOCK_SIZE bsize, int rows_left,
1986 int cols_left, int *bh, int *bw) {
1987 if (rows_left <= 0 || cols_left <= 0) {
1988 return VPXMIN(bsize, BLOCK_8X8);
1990 for (; bsize > 0; bsize -= 3) {
1991 *bh = num_8x8_blocks_high_lookup[bsize];
1992 *bw = num_8x8_blocks_wide_lookup[bsize];
1993 if ((*bh <= rows_left) && (*bw <= cols_left)) {
2001 static void set_partial_b64x64_partition(MODE_INFO *mi, int mis, int bh_in,
2002 int bw_in, int row8x8_remaining,
2003 int col8x8_remaining, BLOCK_SIZE bsize,
2004 MODE_INFO **mi_8x8) {
2007 for (r = 0; r < MI_BLOCK_SIZE; r += bh) {
2009 for (c = 0; c < MI_BLOCK_SIZE; c += bw) {
2010 const int index = r * mis + c;
2011 mi_8x8[index] = mi + index;
2012 mi_8x8[index]->sb_type = find_partition_size(
2013 bsize, row8x8_remaining - r, col8x8_remaining - c, &bh, &bw);
2018 // This function attempts to set all mode info entries in a given SB64
2019 // to the same block partition size.
2020 // However, at the bottom and right borders of the image the requested size
2021 // may not be allowed in which case this code attempts to choose the largest
2022 // allowable partition.
2023 static void set_fixed_partitioning(VP9_COMP *cpi, const TileInfo *const tile,
2024 MODE_INFO **mi_8x8, int mi_row, int mi_col,
2026 VP9_COMMON *const cm = &cpi->common;
2027 const int mis = cm->mi_stride;
2028 const int row8x8_remaining = tile->mi_row_end - mi_row;
2029 const int col8x8_remaining = tile->mi_col_end - mi_col;
2030 int block_row, block_col;
2031 MODE_INFO *mi_upper_left = cm->mi + mi_row * mis + mi_col;
2032 int bh = num_8x8_blocks_high_lookup[bsize];
2033 int bw = num_8x8_blocks_wide_lookup[bsize];
2035 assert((row8x8_remaining > 0) && (col8x8_remaining > 0));
2037 // Apply the requested partition size to the SB64 if it is all "in image"
2038 if ((col8x8_remaining >= MI_BLOCK_SIZE) &&
2039 (row8x8_remaining >= MI_BLOCK_SIZE)) {
2040 for (block_row = 0; block_row < MI_BLOCK_SIZE; block_row += bh) {
2041 for (block_col = 0; block_col < MI_BLOCK_SIZE; block_col += bw) {
2042 int index = block_row * mis + block_col;
2043 mi_8x8[index] = mi_upper_left + index;
2044 mi_8x8[index]->sb_type = bsize;
2048 // Else this is a partial SB64.
2049 set_partial_b64x64_partition(mi_upper_left, mis, bh, bw, row8x8_remaining,
2050 col8x8_remaining, bsize, mi_8x8);
2054 static const struct {
2057 } coord_lookup[16] = {
2080 static void set_source_var_based_partition(VP9_COMP *cpi,
2081 const TileInfo *const tile,
2082 MACROBLOCK *const x,
2083 MODE_INFO **mi_8x8, int mi_row,
2085 VP9_COMMON *const cm = &cpi->common;
2086 const int mis = cm->mi_stride;
2087 const int row8x8_remaining = tile->mi_row_end - mi_row;
2088 const int col8x8_remaining = tile->mi_col_end - mi_col;
2089 MODE_INFO *mi_upper_left = cm->mi + mi_row * mis + mi_col;
2091 vp9_setup_src_planes(x, cpi->Source, mi_row, mi_col);
2093 assert((row8x8_remaining > 0) && (col8x8_remaining > 0));
2096 if ((col8x8_remaining >= MI_BLOCK_SIZE) &&
2097 (row8x8_remaining >= MI_BLOCK_SIZE)) {
2101 const int offset = (mi_row >> 1) * cm->mb_cols + (mi_col >> 1);
2102 int is_larger_better = 0;
2104 unsigned int thr = cpi->source_var_thresh;
2106 memset(d32, 0, 4 * sizeof(diff));
2108 for (i = 0; i < 4; i++) {
2111 for (j = 0; j < 4; j++) {
2112 int b_mi_row = coord_lookup[i * 4 + j].row;
2113 int b_mi_col = coord_lookup[i * 4 + j].col;
2114 int boffset = b_mi_row / 2 * cm->mb_cols + b_mi_col / 2;
2116 d16[j] = cpi->source_diff_var + offset + boffset;
2118 index = b_mi_row * mis + b_mi_col;
2119 mi_8x8[index] = mi_upper_left + index;
2120 mi_8x8[index]->sb_type = BLOCK_16X16;
2122 // TODO(yunqingwang): If d16[j].var is very large, use 8x8 partition
2123 // size to further improve quality.
2126 is_larger_better = (d16[0]->var < thr) && (d16[1]->var < thr) &&
2127 (d16[2]->var < thr) && (d16[3]->var < thr);
2129 // Use 32x32 partition
2130 if (is_larger_better) {
2133 for (j = 0; j < 4; j++) {
2134 d32[i].sse += d16[j]->sse;
2135 d32[i].sum += d16[j]->sum;
2139 (unsigned int)(d32[i].sse -
2140 (unsigned int)(((int64_t)d32[i].sum * d32[i].sum) >>
2143 index = coord_lookup[i * 4].row * mis + coord_lookup[i * 4].col;
2144 mi_8x8[index] = mi_upper_left + index;
2145 mi_8x8[index]->sb_type = BLOCK_32X32;
2149 if (use32x32 == 4) {
2151 is_larger_better = (d32[0].var < thr) && (d32[1].var < thr) &&
2152 (d32[2].var < thr) && (d32[3].var < thr);
2154 // Use 64x64 partition
2155 if (is_larger_better) {
2156 mi_8x8[0] = mi_upper_left;
2157 mi_8x8[0]->sb_type = BLOCK_64X64;
2160 } else { // partial in-image SB64
2161 int bh = num_8x8_blocks_high_lookup[BLOCK_16X16];
2162 int bw = num_8x8_blocks_wide_lookup[BLOCK_16X16];
2163 set_partial_b64x64_partition(mi_upper_left, mis, bh, bw, row8x8_remaining,
2164 col8x8_remaining, BLOCK_16X16, mi_8x8);
2168 static void update_state_rt(VP9_COMP *cpi, ThreadData *td,
2169 PICK_MODE_CONTEXT *ctx, int mi_row, int mi_col,
2171 VP9_COMMON *const cm = &cpi->common;
2172 MACROBLOCK *const x = &td->mb;
2173 MACROBLOCKD *const xd = &x->e_mbd;
2174 MODE_INFO *const mi = xd->mi[0];
2175 struct macroblock_plane *const p = x->plane;
2176 const struct segmentation *const seg = &cm->seg;
2177 const int bw = num_8x8_blocks_wide_lookup[mi->sb_type];
2178 const int bh = num_8x8_blocks_high_lookup[mi->sb_type];
2179 const int x_mis = VPXMIN(bw, cm->mi_cols - mi_col);
2180 const int y_mis = VPXMIN(bh, cm->mi_rows - mi_row);
2182 *(xd->mi[0]) = ctx->mic;
2183 *(x->mbmi_ext) = ctx->mbmi_ext;
2185 if (seg->enabled && cpi->oxcf.aq_mode != NO_AQ) {
2186 // For in frame complexity AQ or variance AQ, copy segment_id from
2187 // segmentation_map.
2188 if (cpi->oxcf.aq_mode != CYCLIC_REFRESH_AQ) {
2189 const uint8_t *const map =
2190 seg->update_map ? cpi->segmentation_map : cm->last_frame_seg_map;
2191 mi->segment_id = get_segment_id(cm, map, bsize, mi_row, mi_col);
2193 // Setting segmentation map for cyclic_refresh.
2194 vp9_cyclic_refresh_update_segment(cpi, mi, mi_row, mi_col, bsize,
2195 ctx->rate, ctx->dist, x->skip, p);
2197 vp9_init_plane_quantizers(cpi, x);
2200 if (is_inter_block(mi)) {
2201 vp9_update_mv_count(td);
2202 if (cm->interp_filter == SWITCHABLE) {
2203 const int pred_ctx = get_pred_context_switchable_interp(xd);
2204 ++td->counts->switchable_interp[pred_ctx][mi->interp_filter];
2207 if (mi->sb_type < BLOCK_8X8) {
2208 mi->mv[0].as_int = mi->bmi[3].as_mv[0].as_int;
2209 mi->mv[1].as_int = mi->bmi[3].as_mv[1].as_int;
2213 if (cm->use_prev_frame_mvs || !cm->error_resilient_mode ||
2214 (cpi->svc.use_base_mv && cpi->svc.number_spatial_layers > 1 &&
2215 cpi->svc.spatial_layer_id != cpi->svc.number_spatial_layers - 1)) {
2216 MV_REF *const frame_mvs =
2217 cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col;
2220 for (h = 0; h < y_mis; ++h) {
2221 MV_REF *const frame_mv = frame_mvs + h * cm->mi_cols;
2222 for (w = 0; w < x_mis; ++w) {
2223 MV_REF *const mv = frame_mv + w;
2224 mv->ref_frame[0] = mi->ref_frame[0];
2225 mv->ref_frame[1] = mi->ref_frame[1];
2226 mv->mv[0].as_int = mi->mv[0].as_int;
2227 mv->mv[1].as_int = mi->mv[1].as_int;
2232 x->skip = ctx->skip;
2233 x->skip_txfm[0] = mi->segment_id ? 0 : ctx->skip_txfm[0];
2236 static void encode_b_rt(VP9_COMP *cpi, ThreadData *td,
2237 const TileInfo *const tile, TOKENEXTRA **tp, int mi_row,
2238 int mi_col, int output_enabled, BLOCK_SIZE bsize,
2239 PICK_MODE_CONTEXT *ctx) {
2240 MACROBLOCK *const x = &td->mb;
2241 set_offsets(cpi, tile, x, mi_row, mi_col, bsize);
2242 update_state_rt(cpi, td, ctx, mi_row, mi_col, bsize);
2244 encode_superblock(cpi, td, tp, output_enabled, mi_row, mi_col, bsize, ctx);
2245 update_stats(&cpi->common, td);
2247 (*tp)->token = EOSB_TOKEN;
2251 static void encode_sb_rt(VP9_COMP *cpi, ThreadData *td,
2252 const TileInfo *const tile, TOKENEXTRA **tp,
2253 int mi_row, int mi_col, int output_enabled,
2254 BLOCK_SIZE bsize, PC_TREE *pc_tree) {
2255 VP9_COMMON *const cm = &cpi->common;
2256 MACROBLOCK *const x = &td->mb;
2257 MACROBLOCKD *const xd = &x->e_mbd;
2259 const int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4;
2261 PARTITION_TYPE partition;
2264 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
2266 if (bsize >= BLOCK_8X8) {
2267 const int idx_str = xd->mi_stride * mi_row + mi_col;
2268 MODE_INFO **mi_8x8 = cm->mi_grid_visible + idx_str;
2269 ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
2270 subsize = mi_8x8[0]->sb_type;
2273 subsize = BLOCK_4X4;
2276 partition = partition_lookup[bsl][subsize];
2277 if (output_enabled && bsize != BLOCK_4X4)
2278 td->counts->partition[ctx][partition]++;
2280 switch (partition) {
2281 case PARTITION_NONE:
2282 encode_b_rt(cpi, td, tile, tp, mi_row, mi_col, output_enabled, subsize,
2285 case PARTITION_VERT:
2286 encode_b_rt(cpi, td, tile, tp, mi_row, mi_col, output_enabled, subsize,
2287 &pc_tree->vertical[0]);
2288 if (mi_col + hbs < cm->mi_cols && bsize > BLOCK_8X8) {
2289 encode_b_rt(cpi, td, tile, tp, mi_row, mi_col + hbs, output_enabled,
2290 subsize, &pc_tree->vertical[1]);
2293 case PARTITION_HORZ:
2294 encode_b_rt(cpi, td, tile, tp, mi_row, mi_col, output_enabled, subsize,
2295 &pc_tree->horizontal[0]);
2296 if (mi_row + hbs < cm->mi_rows && bsize > BLOCK_8X8) {
2297 encode_b_rt(cpi, td, tile, tp, mi_row + hbs, mi_col, output_enabled,
2298 subsize, &pc_tree->horizontal[1]);
2301 case PARTITION_SPLIT:
2302 subsize = get_subsize(bsize, PARTITION_SPLIT);
2303 encode_sb_rt(cpi, td, tile, tp, mi_row, mi_col, output_enabled, subsize,
2305 encode_sb_rt(cpi, td, tile, tp, mi_row, mi_col + hbs, output_enabled,
2306 subsize, pc_tree->split[1]);
2307 encode_sb_rt(cpi, td, tile, tp, mi_row + hbs, mi_col, output_enabled,
2308 subsize, pc_tree->split[2]);
2309 encode_sb_rt(cpi, td, tile, tp, mi_row + hbs, mi_col + hbs,
2310 output_enabled, subsize, pc_tree->split[3]);
2312 default: assert(0 && "Invalid partition type."); break;
2315 if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8)
2316 update_partition_context(xd, mi_row, mi_col, subsize, bsize);
2319 static void rd_use_partition(VP9_COMP *cpi, ThreadData *td,
2320 TileDataEnc *tile_data, MODE_INFO **mi_8x8,
2321 TOKENEXTRA **tp, int mi_row, int mi_col,
2322 BLOCK_SIZE bsize, int *rate, int64_t *dist,
2323 int do_recon, PC_TREE *pc_tree) {
2324 VP9_COMMON *const cm = &cpi->common;
2325 TileInfo *const tile_info = &tile_data->tile_info;
2326 MACROBLOCK *const x = &td->mb;
2327 MACROBLOCKD *const xd = &x->e_mbd;
2328 const int mis = cm->mi_stride;
2329 const int bsl = b_width_log2_lookup[bsize];
2330 const int mi_step = num_4x4_blocks_wide_lookup[bsize] / 2;
2331 const int bss = (1 << bsl) / 4;
2333 PARTITION_TYPE partition = PARTITION_NONE;
2335 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
2336 PARTITION_CONTEXT sl[8], sa[8];
2337 RD_COST last_part_rdc, none_rdc, chosen_rdc;
2338 BLOCK_SIZE sub_subsize = BLOCK_4X4;
2339 int splits_below = 0;
2340 BLOCK_SIZE bs_type = mi_8x8[0]->sb_type;
2341 int do_partition_search = 1;
2342 PICK_MODE_CONTEXT *ctx = &pc_tree->none;
2344 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
2346 assert(num_4x4_blocks_wide_lookup[bsize] ==
2347 num_4x4_blocks_high_lookup[bsize]);
2349 vp9_rd_cost_reset(&last_part_rdc);
2350 vp9_rd_cost_reset(&none_rdc);
2351 vp9_rd_cost_reset(&chosen_rdc);
2353 partition = partition_lookup[bsl][bs_type];
2354 subsize = get_subsize(bsize, partition);
2356 pc_tree->partitioning = partition;
2357 save_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
2359 if (bsize == BLOCK_16X16 && cpi->oxcf.aq_mode != NO_AQ) {
2360 set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
2361 x->mb_energy = vp9_block_energy(cpi, x, bsize);
2364 if (do_partition_search &&
2365 cpi->sf.partition_search_type == SEARCH_PARTITION &&
2366 cpi->sf.adjust_partitioning_from_last_frame) {
2367 // Check if any of the sub blocks are further split.
2368 if (partition == PARTITION_SPLIT && subsize > BLOCK_8X8) {
2369 sub_subsize = get_subsize(subsize, PARTITION_SPLIT);
2371 for (i = 0; i < 4; i++) {
2372 int jj = i >> 1, ii = i & 0x01;
2373 MODE_INFO *this_mi = mi_8x8[jj * bss * mis + ii * bss];
2374 if (this_mi && this_mi->sb_type >= sub_subsize) {
2380 // If partition is not none try none unless each of the 4 splits are split
2382 if (partition != PARTITION_NONE && !splits_below &&
2383 mi_row + (mi_step >> 1) < cm->mi_rows &&
2384 mi_col + (mi_step >> 1) < cm->mi_cols) {
2385 pc_tree->partitioning = PARTITION_NONE;
2386 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &none_rdc, bsize, ctx,
2389 pl = partition_plane_context(xd, mi_row, mi_col, bsize);
2391 if (none_rdc.rate < INT_MAX) {
2392 none_rdc.rate += cpi->partition_cost[pl][PARTITION_NONE];
2394 RDCOST(x->rdmult, x->rddiv, none_rdc.rate, none_rdc.dist);
2397 restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
2398 mi_8x8[0]->sb_type = bs_type;
2399 pc_tree->partitioning = partition;
2403 switch (partition) {
2404 case PARTITION_NONE:
2405 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc, bsize,
2408 case PARTITION_HORZ:
2409 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc,
2410 subsize, &pc_tree->horizontal[0], INT64_MAX);
2411 if (last_part_rdc.rate != INT_MAX && bsize >= BLOCK_8X8 &&
2412 mi_row + (mi_step >> 1) < cm->mi_rows) {
2414 PICK_MODE_CONTEXT *ctx = &pc_tree->horizontal[0];
2415 vp9_rd_cost_init(&tmp_rdc);
2416 update_state(cpi, td, ctx, mi_row, mi_col, subsize, 0);
2417 encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize, ctx);
2418 rd_pick_sb_modes(cpi, tile_data, x, mi_row + (mi_step >> 1), mi_col,
2419 &tmp_rdc, subsize, &pc_tree->horizontal[1], INT64_MAX);
2420 if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
2421 vp9_rd_cost_reset(&last_part_rdc);
2424 last_part_rdc.rate += tmp_rdc.rate;
2425 last_part_rdc.dist += tmp_rdc.dist;
2426 last_part_rdc.rdcost += tmp_rdc.rdcost;
2429 case PARTITION_VERT:
2430 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc,
2431 subsize, &pc_tree->vertical[0], INT64_MAX);
2432 if (last_part_rdc.rate != INT_MAX && bsize >= BLOCK_8X8 &&
2433 mi_col + (mi_step >> 1) < cm->mi_cols) {
2435 PICK_MODE_CONTEXT *ctx = &pc_tree->vertical[0];
2436 vp9_rd_cost_init(&tmp_rdc);
2437 update_state(cpi, td, ctx, mi_row, mi_col, subsize, 0);
2438 encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize, ctx);
2439 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + (mi_step >> 1),
2441 &pc_tree->vertical[bsize > BLOCK_8X8], INT64_MAX);
2442 if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
2443 vp9_rd_cost_reset(&last_part_rdc);
2446 last_part_rdc.rate += tmp_rdc.rate;
2447 last_part_rdc.dist += tmp_rdc.dist;
2448 last_part_rdc.rdcost += tmp_rdc.rdcost;
2451 case PARTITION_SPLIT:
2452 if (bsize == BLOCK_8X8) {
2453 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc,
2454 subsize, pc_tree->leaf_split[0], INT64_MAX);
2457 last_part_rdc.rate = 0;
2458 last_part_rdc.dist = 0;
2459 last_part_rdc.rdcost = 0;
2460 for (i = 0; i < 4; i++) {
2461 int x_idx = (i & 1) * (mi_step >> 1);
2462 int y_idx = (i >> 1) * (mi_step >> 1);
2463 int jj = i >> 1, ii = i & 0x01;
2465 if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols))
2468 vp9_rd_cost_init(&tmp_rdc);
2469 rd_use_partition(cpi, td, tile_data, mi_8x8 + jj * bss * mis + ii * bss,
2470 tp, mi_row + y_idx, mi_col + x_idx, subsize,
2471 &tmp_rdc.rate, &tmp_rdc.dist, i != 3,
2473 if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
2474 vp9_rd_cost_reset(&last_part_rdc);
2477 last_part_rdc.rate += tmp_rdc.rate;
2478 last_part_rdc.dist += tmp_rdc.dist;
2481 default: assert(0); break;
2484 pl = partition_plane_context(xd, mi_row, mi_col, bsize);
2485 if (last_part_rdc.rate < INT_MAX) {
2486 last_part_rdc.rate += cpi->partition_cost[pl][partition];
2487 last_part_rdc.rdcost =
2488 RDCOST(x->rdmult, x->rddiv, last_part_rdc.rate, last_part_rdc.dist);
2491 if (do_partition_search && cpi->sf.adjust_partitioning_from_last_frame &&
2492 cpi->sf.partition_search_type == SEARCH_PARTITION &&
2493 partition != PARTITION_SPLIT && bsize > BLOCK_8X8 &&
2494 (mi_row + mi_step < cm->mi_rows ||
2495 mi_row + (mi_step >> 1) == cm->mi_rows) &&
2496 (mi_col + mi_step < cm->mi_cols ||
2497 mi_col + (mi_step >> 1) == cm->mi_cols)) {
2498 BLOCK_SIZE split_subsize = get_subsize(bsize, PARTITION_SPLIT);
2499 chosen_rdc.rate = 0;
2500 chosen_rdc.dist = 0;
2501 restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
2502 pc_tree->partitioning = PARTITION_SPLIT;
2505 for (i = 0; i < 4; i++) {
2506 int x_idx = (i & 1) * (mi_step >> 1);
2507 int y_idx = (i >> 1) * (mi_step >> 1);
2509 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
2510 PARTITION_CONTEXT sl[8], sa[8];
2512 if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols))
2515 save_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
2516 pc_tree->split[i]->partitioning = PARTITION_NONE;
2517 rd_pick_sb_modes(cpi, tile_data, x, mi_row + y_idx, mi_col + x_idx,
2518 &tmp_rdc, split_subsize, &pc_tree->split[i]->none,
2521 restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
2523 if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
2524 vp9_rd_cost_reset(&chosen_rdc);
2528 chosen_rdc.rate += tmp_rdc.rate;
2529 chosen_rdc.dist += tmp_rdc.dist;
2532 encode_sb(cpi, td, tile_info, tp, mi_row + y_idx, mi_col + x_idx, 0,
2533 split_subsize, pc_tree->split[i]);
2535 pl = partition_plane_context(xd, mi_row + y_idx, mi_col + x_idx,
2537 chosen_rdc.rate += cpi->partition_cost[pl][PARTITION_NONE];
2539 pl = partition_plane_context(xd, mi_row, mi_col, bsize);
2540 if (chosen_rdc.rate < INT_MAX) {
2541 chosen_rdc.rate += cpi->partition_cost[pl][PARTITION_SPLIT];
2543 RDCOST(x->rdmult, x->rddiv, chosen_rdc.rate, chosen_rdc.dist);
2547 // If last_part is better set the partitioning to that.
2548 if (last_part_rdc.rdcost < chosen_rdc.rdcost) {
2549 mi_8x8[0]->sb_type = bsize;
2550 if (bsize >= BLOCK_8X8) pc_tree->partitioning = partition;
2551 chosen_rdc = last_part_rdc;
2553 // If none was better set the partitioning to that.
2554 if (none_rdc.rdcost < chosen_rdc.rdcost) {
2555 if (bsize >= BLOCK_8X8) pc_tree->partitioning = PARTITION_NONE;
2556 chosen_rdc = none_rdc;
2559 restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
2561 // We must have chosen a partitioning and encoding or we'll fail later on.
2562 // No other opportunities for success.
2563 if (bsize == BLOCK_64X64)
2564 assert(chosen_rdc.rate < INT_MAX && chosen_rdc.dist < INT64_MAX);
2567 int output_enabled = (bsize == BLOCK_64X64);
2568 encode_sb(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled, bsize,
2572 *rate = chosen_rdc.rate;
2573 *dist = chosen_rdc.dist;
2576 static const BLOCK_SIZE min_partition_size[BLOCK_SIZES] = {
2577 BLOCK_4X4, BLOCK_4X4, BLOCK_4X4, BLOCK_4X4, BLOCK_4X4,
2578 BLOCK_4X4, BLOCK_8X8, BLOCK_8X8, BLOCK_8X8, BLOCK_16X16,
2579 BLOCK_16X16, BLOCK_16X16, BLOCK_16X16
2582 static const BLOCK_SIZE max_partition_size[BLOCK_SIZES] = {
2583 BLOCK_8X8, BLOCK_16X16, BLOCK_16X16, BLOCK_16X16, BLOCK_32X32,
2584 BLOCK_32X32, BLOCK_32X32, BLOCK_64X64, BLOCK_64X64, BLOCK_64X64,
2585 BLOCK_64X64, BLOCK_64X64, BLOCK_64X64
2588 // Look at all the mode_info entries for blocks that are part of this
2589 // partition and find the min and max values for sb_type.
2590 // At the moment this is designed to work on a 64x64 SB but could be
2591 // adjusted to use a size parameter.
2593 // The min and max are assumed to have been initialized prior to calling this
2594 // function so repeat calls can accumulate a min and max of more than one sb64.
2595 static void get_sb_partition_size_range(MACROBLOCKD *xd, MODE_INFO **mi_8x8,
2596 BLOCK_SIZE *min_block_size,
2597 BLOCK_SIZE *max_block_size,
2598 int bs_hist[BLOCK_SIZES]) {
2599 int sb_width_in_blocks = MI_BLOCK_SIZE;
2600 int sb_height_in_blocks = MI_BLOCK_SIZE;
2604 // Check the sb_type for each block that belongs to this region.
2605 for (i = 0; i < sb_height_in_blocks; ++i) {
2606 for (j = 0; j < sb_width_in_blocks; ++j) {
2607 MODE_INFO *mi = mi_8x8[index + j];
2608 BLOCK_SIZE sb_type = mi ? mi->sb_type : 0;
2610 *min_block_size = VPXMIN(*min_block_size, sb_type);
2611 *max_block_size = VPXMAX(*max_block_size, sb_type);
2613 index += xd->mi_stride;
2617 // Next square block size less or equal than current block size.
2618 static const BLOCK_SIZE next_square_size[BLOCK_SIZES] = {
2619 BLOCK_4X4, BLOCK_4X4, BLOCK_4X4, BLOCK_8X8, BLOCK_8X8,
2620 BLOCK_8X8, BLOCK_16X16, BLOCK_16X16, BLOCK_16X16, BLOCK_32X32,
2621 BLOCK_32X32, BLOCK_32X32, BLOCK_64X64
2624 // Look at neighboring blocks and set a min and max partition size based on
2626 static void rd_auto_partition_range(VP9_COMP *cpi, const TileInfo *const tile,
2627 MACROBLOCKD *const xd, int mi_row,
2628 int mi_col, BLOCK_SIZE *min_block_size,
2629 BLOCK_SIZE *max_block_size) {
2630 VP9_COMMON *const cm = &cpi->common;
2631 MODE_INFO **mi = xd->mi;
2632 const int left_in_image = !!xd->left_mi;
2633 const int above_in_image = !!xd->above_mi;
2634 const int row8x8_remaining = tile->mi_row_end - mi_row;
2635 const int col8x8_remaining = tile->mi_col_end - mi_col;
2637 BLOCK_SIZE min_size = BLOCK_4X4;
2638 BLOCK_SIZE max_size = BLOCK_64X64;
2639 int bs_hist[BLOCK_SIZES] = { 0 };
2641 // Trap case where we do not have a prediction.
2642 if (left_in_image || above_in_image || cm->frame_type != KEY_FRAME) {
2643 // Default "min to max" and "max to min"
2644 min_size = BLOCK_64X64;
2645 max_size = BLOCK_4X4;
2647 // NOTE: each call to get_sb_partition_size_range() uses the previous
2648 // passed in values for min and max as a starting point.
2649 // Find the min and max partition used in previous frame at this location
2650 if (cm->frame_type != KEY_FRAME) {
2651 MODE_INFO **prev_mi =
2652 &cm->prev_mi_grid_visible[mi_row * xd->mi_stride + mi_col];
2653 get_sb_partition_size_range(xd, prev_mi, &min_size, &max_size, bs_hist);
2655 // Find the min and max partition sizes used in the left SB64
2656 if (left_in_image) {
2657 MODE_INFO **left_sb64_mi = &mi[-MI_BLOCK_SIZE];
2658 get_sb_partition_size_range(xd, left_sb64_mi, &min_size, &max_size,
2661 // Find the min and max partition sizes used in the above SB64.
2662 if (above_in_image) {
2663 MODE_INFO **above_sb64_mi = &mi[-xd->mi_stride * MI_BLOCK_SIZE];
2664 get_sb_partition_size_range(xd, above_sb64_mi, &min_size, &max_size,
2668 // Adjust observed min and max for "relaxed" auto partition case.
2669 if (cpi->sf.auto_min_max_partition_size == RELAXED_NEIGHBORING_MIN_MAX) {
2670 min_size = min_partition_size[min_size];
2671 max_size = max_partition_size[max_size];
2675 // Check border cases where max and min from neighbors may not be legal.
2676 max_size = find_partition_size(max_size, row8x8_remaining, col8x8_remaining,
2678 // Test for blocks at the edge of the active image.
2679 // This may be the actual edge of the image or where there are formatting
2681 if (vp9_active_edge_sb(cpi, mi_row, mi_col)) {
2682 min_size = BLOCK_4X4;
2685 VPXMIN(cpi->sf.rd_auto_partition_min_limit, VPXMIN(min_size, max_size));
2688 // When use_square_partition_only is true, make sure at least one square
2689 // partition is allowed by selecting the next smaller square size as
2691 if (cpi->sf.use_square_partition_only &&
2692 next_square_size[max_size] < min_size) {
2693 min_size = next_square_size[max_size];
2696 *min_block_size = min_size;
2697 *max_block_size = max_size;
2700 // TODO(jingning) refactor functions setting partition search range
2701 static void set_partition_range(VP9_COMMON *cm, MACROBLOCKD *xd, int mi_row,
2702 int mi_col, BLOCK_SIZE bsize,
2703 BLOCK_SIZE *min_bs, BLOCK_SIZE *max_bs) {
2704 int mi_width = num_8x8_blocks_wide_lookup[bsize];
2705 int mi_height = num_8x8_blocks_high_lookup[bsize];
2709 const int idx_str = cm->mi_stride * mi_row + mi_col;
2710 MODE_INFO **prev_mi = &cm->prev_mi_grid_visible[idx_str];
2711 BLOCK_SIZE bs, min_size, max_size;
2713 min_size = BLOCK_64X64;
2714 max_size = BLOCK_4X4;
2717 for (idy = 0; idy < mi_height; ++idy) {
2718 for (idx = 0; idx < mi_width; ++idx) {
2719 mi = prev_mi[idy * cm->mi_stride + idx];
2720 bs = mi ? mi->sb_type : bsize;
2721 min_size = VPXMIN(min_size, bs);
2722 max_size = VPXMAX(max_size, bs);
2728 for (idy = 0; idy < mi_height; ++idy) {
2729 mi = xd->mi[idy * cm->mi_stride - 1];
2730 bs = mi ? mi->sb_type : bsize;
2731 min_size = VPXMIN(min_size, bs);
2732 max_size = VPXMAX(max_size, bs);
2737 for (idx = 0; idx < mi_width; ++idx) {
2738 mi = xd->mi[idx - cm->mi_stride];
2739 bs = mi ? mi->sb_type : bsize;
2740 min_size = VPXMIN(min_size, bs);
2741 max_size = VPXMAX(max_size, bs);
2745 if (min_size == max_size) {
2746 min_size = min_partition_size[min_size];
2747 max_size = max_partition_size[max_size];
2754 static INLINE void store_pred_mv(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) {
2755 memcpy(ctx->pred_mv, x->pred_mv, sizeof(x->pred_mv));
2758 static INLINE void load_pred_mv(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) {
2759 memcpy(x->pred_mv, ctx->pred_mv, sizeof(x->pred_mv));
2762 #if CONFIG_FP_MB_STATS
2763 const int num_16x16_blocks_wide_lookup[BLOCK_SIZES] = { 1, 1, 1, 1, 1, 1, 1,
2765 const int num_16x16_blocks_high_lookup[BLOCK_SIZES] = { 1, 1, 1, 1, 1, 1, 1,
2767 const int qindex_skip_threshold_lookup[BLOCK_SIZES] = {
2768 0, 10, 10, 30, 40, 40, 60, 80, 80, 90, 100, 100, 120
2770 const int qindex_split_threshold_lookup[BLOCK_SIZES] = {
2771 0, 3, 3, 7, 15, 15, 30, 40, 40, 60, 80, 80, 120
2773 const int complexity_16x16_blocks_threshold[BLOCK_SIZES] = {
2774 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 4, 4, 6
2786 static INLINE MOTION_DIRECTION get_motion_direction_fp(uint8_t fp_byte) {
2787 if (fp_byte & FPMB_MOTION_ZERO_MASK) {
2789 } else if (fp_byte & FPMB_MOTION_LEFT_MASK) {
2791 } else if (fp_byte & FPMB_MOTION_RIGHT_MASK) {
2793 } else if (fp_byte & FPMB_MOTION_UP_MASK) {
2800 static INLINE int get_motion_inconsistency(MOTION_DIRECTION this_mv,
2801 MOTION_DIRECTION that_mv) {
2802 if (this_mv == that_mv) {
2805 return abs(this_mv - that_mv) == 2 ? 2 : 1;
2810 // Calculate the score used in machine-learning based partition search early
2812 static double compute_score(VP9_COMMON *const cm, MACROBLOCKD *const xd,
2813 PICK_MODE_CONTEXT *ctx, int mi_row, int mi_col,
2819 abs(ctx->mic.mv[0].as_mv.col) + abs(ctx->mic.mv[0].as_mv.row);
2820 const int left_in_image = !!xd->left_mi;
2821 const int above_in_image = !!xd->above_mi;
2822 MODE_INFO **prev_mi =
2823 &cm->prev_mi_grid_visible[mi_col + cm->mi_stride * mi_row];
2824 int above_par = 0; // above_partitioning
2825 int left_par = 0; // left_partitioning
2826 int last_par = 0; // last_partitioning
2827 BLOCK_SIZE context_size;
2831 assert(b_width_log2_lookup[bsize] == b_height_log2_lookup[bsize]);
2833 if (above_in_image) {
2834 context_size = xd->above_mi->sb_type;
2835 if (context_size < bsize)
2837 else if (context_size == bsize)
2841 if (left_in_image) {
2842 context_size = xd->left_mi->sb_type;
2843 if (context_size < bsize)
2845 else if (context_size == bsize)
2850 context_size = prev_mi[0]->sb_type;
2851 if (context_size < bsize)
2853 else if (context_size == bsize)
2857 if (bsize == BLOCK_64X64)
2859 else if (bsize == BLOCK_32X32)
2861 else if (bsize == BLOCK_16X16)
2864 // early termination score calculation
2865 clf = &classifiers[offset];
2866 mean = &train_mean[offset];
2867 sd = &train_stdm[offset];
2868 score = clf[0] * (((double)ctx->rate - mean[0]) / sd[0]) +
2869 clf[1] * (((double)ctx->dist - mean[1]) / sd[1]) +
2870 clf[2] * (((double)mag_mv / 2 - mean[2]) * sd[2]) +
2871 clf[3] * (((double)(left_par + above_par) / 2 - mean[3]) * sd[3]) +
2872 clf[4] * (((double)ctx->sum_y_eobs - mean[4]) / sd[4]) +
2873 clf[5] * (((double)cm->base_qindex - mean[5]) * sd[5]) +
2874 clf[6] * (((double)last_par - mean[6]) * sd[6]) + clf[7];
2878 // TODO(jingning,jimbankoski,rbultje): properly skip partition types that are
2879 // unlikely to be selected depending on previous rate-distortion optimization
2880 // results, for encoding speed-up.
2881 static void rd_pick_partition(VP9_COMP *cpi, ThreadData *td,
2882 TileDataEnc *tile_data, TOKENEXTRA **tp,
2883 int mi_row, int mi_col, BLOCK_SIZE bsize,
2884 RD_COST *rd_cost, int64_t best_rd,
2886 VP9_COMMON *const cm = &cpi->common;
2887 TileInfo *const tile_info = &tile_data->tile_info;
2888 MACROBLOCK *const x = &td->mb;
2889 MACROBLOCKD *const xd = &x->e_mbd;
2890 const int mi_step = num_8x8_blocks_wide_lookup[bsize] / 2;
2891 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
2892 PARTITION_CONTEXT sl[8], sa[8];
2893 TOKENEXTRA *tp_orig = *tp;
2894 PICK_MODE_CONTEXT *ctx = &pc_tree->none;
2896 const int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
2898 RD_COST this_rdc, sum_rdc, best_rdc;
2899 int do_split = bsize >= BLOCK_8X8;
2901 INTERP_FILTER pred_interp_filter;
2903 // Override skipping rectangular partition operations for edge blocks
2904 const int force_horz_split = (mi_row + mi_step >= cm->mi_rows);
2905 const int force_vert_split = (mi_col + mi_step >= cm->mi_cols);
2906 const int xss = x->e_mbd.plane[1].subsampling_x;
2907 const int yss = x->e_mbd.plane[1].subsampling_y;
2909 BLOCK_SIZE min_size = x->min_partition_size;
2910 BLOCK_SIZE max_size = x->max_partition_size;
2912 #if CONFIG_FP_MB_STATS
2913 unsigned int src_diff_var = UINT_MAX;
2914 int none_complexity = 0;
2917 int partition_none_allowed = !force_horz_split && !force_vert_split;
2918 int partition_horz_allowed =
2919 !force_vert_split && yss <= xss && bsize >= BLOCK_8X8;
2920 int partition_vert_allowed =
2921 !force_horz_split && xss <= yss && bsize >= BLOCK_8X8;
2923 int64_t dist_breakout_thr = cpi->sf.partition_search_breakout_thr.dist;
2924 int rate_breakout_thr = cpi->sf.partition_search_breakout_thr.rate;
2928 assert(num_8x8_blocks_wide_lookup[bsize] ==
2929 num_8x8_blocks_high_lookup[bsize]);
2931 // Adjust dist breakout threshold according to the partition size.
2932 dist_breakout_thr >>=
2933 8 - (b_width_log2_lookup[bsize] + b_height_log2_lookup[bsize]);
2934 rate_breakout_thr *= num_pels_log2_lookup[bsize];
2936 vp9_rd_cost_init(&this_rdc);
2937 vp9_rd_cost_init(&sum_rdc);
2938 vp9_rd_cost_reset(&best_rdc);
2939 best_rdc.rdcost = best_rd;
2941 set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
2943 if (bsize == BLOCK_16X16 && cpi->oxcf.aq_mode != NO_AQ &&
2944 cpi->oxcf.aq_mode != LOOKAHEAD_AQ)
2945 x->mb_energy = vp9_block_energy(cpi, x, bsize);
2947 if (cpi->sf.cb_partition_search && bsize == BLOCK_16X16) {
2948 int cb_partition_search_ctrl =
2949 ((pc_tree->index == 0 || pc_tree->index == 3) +
2950 get_chessboard_index(cm->current_video_frame)) &
2953 if (cb_partition_search_ctrl && bsize > min_size && bsize < max_size)
2954 set_partition_range(cm, xd, mi_row, mi_col, bsize, &min_size, &max_size);
2957 // Determine partition types in search according to the speed features.
2958 // The threshold set here has to be of square block size.
2959 if (cpi->sf.auto_min_max_partition_size) {
2960 partition_none_allowed &= (bsize <= max_size && bsize >= min_size);
2961 partition_horz_allowed &=
2962 ((bsize <= max_size && bsize > min_size) || force_horz_split);
2963 partition_vert_allowed &=
2964 ((bsize <= max_size && bsize > min_size) || force_vert_split);
2965 do_split &= bsize > min_size;
2968 if (cpi->sf.use_square_partition_only &&
2969 bsize > cpi->sf.use_square_only_threshold) {
2971 if (!vp9_active_h_edge(cpi, mi_row, mi_step) || x->e_mbd.lossless)
2972 partition_horz_allowed &= force_horz_split;
2973 if (!vp9_active_v_edge(cpi, mi_row, mi_step) || x->e_mbd.lossless)
2974 partition_vert_allowed &= force_vert_split;
2976 partition_horz_allowed &= force_horz_split;
2977 partition_vert_allowed &= force_vert_split;
2981 save_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
2983 #if CONFIG_FP_MB_STATS
2984 if (cpi->use_fp_mb_stats) {
2985 set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
2986 src_diff_var = get_sby_perpixel_diff_variance(cpi, &x->plane[0].src, mi_row,
2991 #if CONFIG_FP_MB_STATS
2992 // Decide whether we shall split directly and skip searching NONE by using
2993 // the first pass block statistics
2994 if (cpi->use_fp_mb_stats && bsize >= BLOCK_32X32 && do_split &&
2995 partition_none_allowed && src_diff_var > 4 &&
2996 cm->base_qindex < qindex_split_threshold_lookup[bsize]) {
2997 int mb_row = mi_row >> 1;
2998 int mb_col = mi_col >> 1;
3000 VPXMIN(mb_row + num_16x16_blocks_high_lookup[bsize], cm->mb_rows);
3002 VPXMIN(mb_col + num_16x16_blocks_wide_lookup[bsize], cm->mb_cols);
3005 // compute a complexity measure, basically measure inconsistency of motion
3006 // vectors obtained from the first pass in the current block
3007 for (r = mb_row; r < mb_row_end; r++) {
3008 for (c = mb_col; c < mb_col_end; c++) {
3009 const int mb_index = r * cm->mb_cols + c;
3011 MOTION_DIRECTION this_mv;
3012 MOTION_DIRECTION right_mv;
3013 MOTION_DIRECTION bottom_mv;
3016 get_motion_direction_fp(cpi->twopass.this_frame_mb_stats[mb_index]);
3019 if (c != mb_col_end - 1) {
3020 right_mv = get_motion_direction_fp(
3021 cpi->twopass.this_frame_mb_stats[mb_index + 1]);
3022 none_complexity += get_motion_inconsistency(this_mv, right_mv);
3026 if (r != mb_row_end - 1) {
3027 bottom_mv = get_motion_direction_fp(
3028 cpi->twopass.this_frame_mb_stats[mb_index + cm->mb_cols]);
3029 none_complexity += get_motion_inconsistency(this_mv, bottom_mv);
3032 // do not count its left and top neighbors to avoid double counting
3036 if (none_complexity > complexity_16x16_blocks_threshold[bsize]) {
3037 partition_none_allowed = 0;
3043 if (partition_none_allowed) {
3044 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &this_rdc, bsize, ctx,
3046 if (this_rdc.rate != INT_MAX) {
3047 if (bsize >= BLOCK_8X8) {
3048 this_rdc.rate += cpi->partition_cost[pl][PARTITION_NONE];
3050 RDCOST(x->rdmult, x->rddiv, this_rdc.rate, this_rdc.dist);
3053 if (this_rdc.rdcost < best_rdc.rdcost) {
3054 MODE_INFO *mi = xd->mi[0];
3056 best_rdc = this_rdc;
3057 if (bsize >= BLOCK_8X8) pc_tree->partitioning = PARTITION_NONE;
3059 if (!cpi->sf.ml_partition_search_early_termination) {
3060 // If all y, u, v transform blocks in this partition are skippable,
3061 // and the dist & rate are within the thresholds, the partition search
3062 // is terminated for current branch of the partition search tree.
3063 if (!x->e_mbd.lossless && ctx->skippable &&
3064 ((best_rdc.dist < (dist_breakout_thr >> 2)) ||
3065 (best_rdc.dist < dist_breakout_thr &&
3066 best_rdc.rate < rate_breakout_thr))) {
3071 // Currently, the machine-learning based partition search early
3072 // termination is only used while bsize is 16x16, 32x32 or 64x64,
3073 // VPXMIN(cm->width, cm->height) >= 480, and speed = 0.
3074 if (!x->e_mbd.lossless &&
3075 !segfeature_active(&cm->seg, mi->segment_id, SEG_LVL_SKIP) &&
3076 ctx->mic.mode >= INTRA_MODES && bsize >= BLOCK_16X16) {
3077 if (compute_score(cm, xd, ctx, mi_row, mi_col, bsize) < 0.0) {
3084 #if CONFIG_FP_MB_STATS
3085 // Check if every 16x16 first pass block statistics has zero
3086 // motion and the corresponding first pass residue is small enough.
3087 // If that is the case, check the difference variance between the
3088 // current frame and the last frame. If the variance is small enough,
3089 // stop further splitting in RD optimization
3090 if (cpi->use_fp_mb_stats && do_split != 0 &&
3091 cm->base_qindex > qindex_skip_threshold_lookup[bsize]) {
3092 int mb_row = mi_row >> 1;
3093 int mb_col = mi_col >> 1;
3095 VPXMIN(mb_row + num_16x16_blocks_high_lookup[bsize], cm->mb_rows);
3097 VPXMIN(mb_col + num_16x16_blocks_wide_lookup[bsize], cm->mb_cols);
3101 for (r = mb_row; r < mb_row_end; r++) {
3102 for (c = mb_col; c < mb_col_end; c++) {
3103 const int mb_index = r * cm->mb_cols + c;
3104 if (!(cpi->twopass.this_frame_mb_stats[mb_index] &
3105 FPMB_MOTION_ZERO_MASK) ||
3106 !(cpi->twopass.this_frame_mb_stats[mb_index] &
3107 FPMB_ERROR_SMALL_MASK)) {
3118 if (src_diff_var == UINT_MAX) {
3119 set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
3120 src_diff_var = get_sby_perpixel_diff_variance(
3121 cpi, &x->plane[0].src, mi_row, mi_col, bsize);
3123 if (src_diff_var < 8) {
3132 restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
3135 // store estimated motion vector
3136 if (cpi->sf.adaptive_motion_search) store_pred_mv(x, ctx);
3138 // If the interp_filter is marked as SWITCHABLE_FILTERS, it was for an
3139 // intra block and used for context purposes.
3140 if (ctx->mic.interp_filter == SWITCHABLE_FILTERS) {
3141 pred_interp_filter = EIGHTTAP;
3143 pred_interp_filter = ctx->mic.interp_filter;
3147 // TODO(jingning): use the motion vectors given by the above search as
3148 // the starting point of motion search in the following partition type check.
3150 subsize = get_subsize(bsize, PARTITION_SPLIT);
3151 if (bsize == BLOCK_8X8) {
3153 if (cpi->sf.adaptive_pred_interp_filter && partition_none_allowed)
3154 pc_tree->leaf_split[0]->pred_interp_filter = pred_interp_filter;
3155 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize,
3156 pc_tree->leaf_split[0], best_rdc.rdcost);
3158 if (sum_rdc.rate == INT_MAX) sum_rdc.rdcost = INT64_MAX;
3160 for (i = 0; i < 4 && sum_rdc.rdcost < best_rdc.rdcost; ++i) {
3161 const int x_idx = (i & 1) * mi_step;
3162 const int y_idx = (i >> 1) * mi_step;
3164 if (mi_row + y_idx >= cm->mi_rows || mi_col + x_idx >= cm->mi_cols)
3167 if (cpi->sf.adaptive_motion_search) load_pred_mv(x, ctx);
3169 pc_tree->split[i]->index = i;
3170 rd_pick_partition(cpi, td, tile_data, tp, mi_row + y_idx,
3171 mi_col + x_idx, subsize, &this_rdc,
3172 best_rdc.rdcost - sum_rdc.rdcost, pc_tree->split[i]);
3174 if (this_rdc.rate == INT_MAX) {
3175 sum_rdc.rdcost = INT64_MAX;
3178 sum_rdc.rate += this_rdc.rate;
3179 sum_rdc.dist += this_rdc.dist;
3180 sum_rdc.rdcost += this_rdc.rdcost;
3185 if (sum_rdc.rdcost < best_rdc.rdcost && i == 4) {
3186 sum_rdc.rate += cpi->partition_cost[pl][PARTITION_SPLIT];
3187 sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, sum_rdc.rate, sum_rdc.dist);
3189 if (sum_rdc.rdcost < best_rdc.rdcost) {
3191 pc_tree->partitioning = PARTITION_SPLIT;
3193 // Rate and distortion based partition search termination clause.
3194 if (!cpi->sf.ml_partition_search_early_termination &&
3195 !x->e_mbd.lossless && ((best_rdc.dist < (dist_breakout_thr >> 2)) ||
3196 (best_rdc.dist < dist_breakout_thr &&
3197 best_rdc.rate < rate_breakout_thr))) {
3202 // skip rectangular partition test when larger block size
3203 // gives better rd cost
3204 if ((cpi->sf.less_rectangular_check) &&
3205 ((bsize > cpi->sf.use_square_only_threshold) ||
3206 (best_rdc.dist < dist_breakout_thr)))
3207 do_rect &= !partition_none_allowed;
3209 restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
3213 if (partition_horz_allowed &&
3214 (do_rect || vp9_active_h_edge(cpi, mi_row, mi_step))) {
3215 subsize = get_subsize(bsize, PARTITION_HORZ);
3216 if (cpi->sf.adaptive_motion_search) load_pred_mv(x, ctx);
3217 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
3218 partition_none_allowed)
3219 pc_tree->horizontal[0].pred_interp_filter = pred_interp_filter;
3220 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize,
3221 &pc_tree->horizontal[0], best_rdc.rdcost);
3223 if (sum_rdc.rdcost < best_rdc.rdcost && mi_row + mi_step < cm->mi_rows &&
3224 bsize > BLOCK_8X8) {
3225 PICK_MODE_CONTEXT *ctx = &pc_tree->horizontal[0];
3226 update_state(cpi, td, ctx, mi_row, mi_col, subsize, 0);
3227 encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize, ctx);
3229 if (cpi->sf.adaptive_motion_search) load_pred_mv(x, ctx);
3230 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
3231 partition_none_allowed)
3232 pc_tree->horizontal[1].pred_interp_filter = pred_interp_filter;
3233 rd_pick_sb_modes(cpi, tile_data, x, mi_row + mi_step, mi_col, &this_rdc,
3234 subsize, &pc_tree->horizontal[1],
3235 best_rdc.rdcost - sum_rdc.rdcost);
3236 if (this_rdc.rate == INT_MAX) {
3237 sum_rdc.rdcost = INT64_MAX;
3239 sum_rdc.rate += this_rdc.rate;
3240 sum_rdc.dist += this_rdc.dist;
3241 sum_rdc.rdcost += this_rdc.rdcost;
3245 if (sum_rdc.rdcost < best_rdc.rdcost) {
3246 sum_rdc.rate += cpi->partition_cost[pl][PARTITION_HORZ];
3247 sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, sum_rdc.rate, sum_rdc.dist);
3248 if (sum_rdc.rdcost < best_rdc.rdcost) {
3250 pc_tree->partitioning = PARTITION_HORZ;
3252 if ((cpi->sf.less_rectangular_check) &&
3253 (bsize > cpi->sf.use_square_only_threshold))
3257 restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
3261 if (partition_vert_allowed &&
3262 (do_rect || vp9_active_v_edge(cpi, mi_col, mi_step))) {
3263 subsize = get_subsize(bsize, PARTITION_VERT);
3265 if (cpi->sf.adaptive_motion_search) load_pred_mv(x, ctx);
3266 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
3267 partition_none_allowed)
3268 pc_tree->vertical[0].pred_interp_filter = pred_interp_filter;
3269 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize,
3270 &pc_tree->vertical[0], best_rdc.rdcost);
3271 if (sum_rdc.rdcost < best_rdc.rdcost && mi_col + mi_step < cm->mi_cols &&
3272 bsize > BLOCK_8X8) {
3273 update_state(cpi, td, &pc_tree->vertical[0], mi_row, mi_col, subsize, 0);
3274 encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize,
3275 &pc_tree->vertical[0]);
3277 if (cpi->sf.adaptive_motion_search) load_pred_mv(x, ctx);
3278 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
3279 partition_none_allowed)
3280 pc_tree->vertical[1].pred_interp_filter = pred_interp_filter;
3281 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + mi_step, &this_rdc,
3282 subsize, &pc_tree->vertical[1],
3283 best_rdc.rdcost - sum_rdc.rdcost);
3284 if (this_rdc.rate == INT_MAX) {
3285 sum_rdc.rdcost = INT64_MAX;
3287 sum_rdc.rate += this_rdc.rate;
3288 sum_rdc.dist += this_rdc.dist;
3289 sum_rdc.rdcost += this_rdc.rdcost;
3293 if (sum_rdc.rdcost < best_rdc.rdcost) {
3294 sum_rdc.rate += cpi->partition_cost[pl][PARTITION_VERT];
3295 sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, sum_rdc.rate, sum_rdc.dist);
3296 if (sum_rdc.rdcost < best_rdc.rdcost) {
3298 pc_tree->partitioning = PARTITION_VERT;
3301 restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
3304 // TODO(jbb): This code added so that we avoid static analysis
3305 // warning related to the fact that best_rd isn't used after this
3306 // point. This code should be refactored so that the duplicate
3307 // checks occur in some sub function and thus are used...
3309 *rd_cost = best_rdc;
3311 if (best_rdc.rate < INT_MAX && best_rdc.dist < INT64_MAX &&
3312 pc_tree->index != 3) {
3313 int output_enabled = (bsize == BLOCK_64X64);
3314 encode_sb(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled, bsize,
3318 if (bsize == BLOCK_64X64) {
3319 assert(tp_orig < *tp);
3320 assert(best_rdc.rate < INT_MAX);
3321 assert(best_rdc.dist < INT64_MAX);
3323 assert(tp_orig == *tp);
3327 static void encode_rd_sb_row(VP9_COMP *cpi, ThreadData *td,
3328 TileDataEnc *tile_data, int mi_row,
3330 VP9_COMMON *const cm = &cpi->common;
3331 TileInfo *const tile_info = &tile_data->tile_info;
3332 MACROBLOCK *const x = &td->mb;
3333 MACROBLOCKD *const xd = &x->e_mbd;
3334 SPEED_FEATURES *const sf = &cpi->sf;
3335 const int mi_col_start = tile_info->mi_col_start;
3336 const int mi_col_end = tile_info->mi_col_end;
3338 const int sb_row = mi_row >> MI_BLOCK_SIZE_LOG2;
3339 const int num_sb_cols =
3340 get_num_cols(tile_data->tile_info, MI_BLOCK_SIZE_LOG2);
3343 // Initialize the left context for the new SB row
3344 memset(&xd->left_context, 0, sizeof(xd->left_context));
3345 memset(xd->left_seg_context, 0, sizeof(xd->left_seg_context));
3347 // Code each SB in the row
3348 for (mi_col = mi_col_start, sb_col_in_tile = 0; mi_col < mi_col_end;
3349 mi_col += MI_BLOCK_SIZE, sb_col_in_tile++) {
3350 const struct segmentation *const seg = &cm->seg;
3357 const int idx_str = cm->mi_stride * mi_row + mi_col;
3358 MODE_INFO **mi = cm->mi_grid_visible + idx_str;
3360 (*(cpi->row_mt_sync_read_ptr))(&tile_data->row_mt_sync, sb_row,
3363 if (sf->adaptive_pred_interp_filter) {
3364 for (i = 0; i < 64; ++i) td->leaf_tree[i].pred_interp_filter = SWITCHABLE;
3366 for (i = 0; i < 64; ++i) {
3367 td->pc_tree[i].vertical[0].pred_interp_filter = SWITCHABLE;
3368 td->pc_tree[i].vertical[1].pred_interp_filter = SWITCHABLE;
3369 td->pc_tree[i].horizontal[0].pred_interp_filter = SWITCHABLE;
3370 td->pc_tree[i].horizontal[1].pred_interp_filter = SWITCHABLE;
3374 vp9_zero(x->pred_mv);
3375 td->pc_root->index = 0;
3378 const uint8_t *const map =
3379 seg->update_map ? cpi->segmentation_map : cm->last_frame_seg_map;
3380 int segment_id = get_segment_id(cm, map, BLOCK_64X64, mi_row, mi_col);
3381 seg_skip = segfeature_active(seg, segment_id, SEG_LVL_SKIP);
3384 x->source_variance = UINT_MAX;
3385 if (sf->partition_search_type == FIXED_PARTITION || seg_skip) {
3386 const BLOCK_SIZE bsize =
3387 seg_skip ? BLOCK_64X64 : sf->always_this_block_size;
3388 set_offsets(cpi, tile_info, x, mi_row, mi_col, BLOCK_64X64);
3389 set_fixed_partitioning(cpi, tile_info, mi, mi_row, mi_col, bsize);
3390 rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col, BLOCK_64X64,
3391 &dummy_rate, &dummy_dist, 1, td->pc_root);
3392 } else if (cpi->partition_search_skippable_frame) {
3394 set_offsets(cpi, tile_info, x, mi_row, mi_col, BLOCK_64X64);
3395 bsize = get_rd_var_based_fixed_partition(cpi, x, mi_row, mi_col);
3396 set_fixed_partitioning(cpi, tile_info, mi, mi_row, mi_col, bsize);
3397 rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col, BLOCK_64X64,
3398 &dummy_rate, &dummy_dist, 1, td->pc_root);
3399 } else if (sf->partition_search_type == VAR_BASED_PARTITION &&
3400 cm->frame_type != KEY_FRAME) {
3401 choose_partitioning(cpi, tile_info, x, mi_row, mi_col);
3402 rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col, BLOCK_64X64,
3403 &dummy_rate, &dummy_dist, 1, td->pc_root);
3405 // If required set upper and lower partition size limits
3406 if (sf->auto_min_max_partition_size) {
3407 set_offsets(cpi, tile_info, x, mi_row, mi_col, BLOCK_64X64);
3408 rd_auto_partition_range(cpi, tile_info, xd, mi_row, mi_col,
3409 &x->min_partition_size, &x->max_partition_size);
3411 rd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, BLOCK_64X64,
3412 &dummy_rdc, INT64_MAX, td->pc_root);
3414 (*(cpi->row_mt_sync_write_ptr))(&tile_data->row_mt_sync, sb_row,
3415 sb_col_in_tile, num_sb_cols);
3419 static void init_encode_frame_mb_context(VP9_COMP *cpi) {
3420 MACROBLOCK *const x = &cpi->td.mb;
3421 VP9_COMMON *const cm = &cpi->common;
3422 MACROBLOCKD *const xd = &x->e_mbd;
3423 const int aligned_mi_cols = mi_cols_aligned_to_sb(cm->mi_cols);
3425 // Copy data over into macro block data structures.
3426 vp9_setup_src_planes(x, cpi->Source, 0, 0);
3428 vp9_setup_block_planes(&x->e_mbd, cm->subsampling_x, cm->subsampling_y);
3430 // Note: this memset assumes above_context[0], [1] and [2]
3431 // are allocated as part of the same buffer.
3432 memset(xd->above_context[0], 0,
3433 sizeof(*xd->above_context[0]) * 2 * aligned_mi_cols * MAX_MB_PLANE);
3434 memset(xd->above_seg_context, 0,
3435 sizeof(*xd->above_seg_context) * aligned_mi_cols);
3438 static int check_dual_ref_flags(VP9_COMP *cpi) {
3439 const int ref_flags = cpi->ref_frame_flags;
3441 if (segfeature_active(&cpi->common.seg, 1, SEG_LVL_REF_FRAME)) {
3444 return (!!(ref_flags & VP9_GOLD_FLAG) + !!(ref_flags & VP9_LAST_FLAG) +
3445 !!(ref_flags & VP9_ALT_FLAG)) >= 2;
3449 static void reset_skip_tx_size(VP9_COMMON *cm, TX_SIZE max_tx_size) {
3451 const int mis = cm->mi_stride;
3452 MODE_INFO **mi_ptr = cm->mi_grid_visible;
3454 for (mi_row = 0; mi_row < cm->mi_rows; ++mi_row, mi_ptr += mis) {
3455 for (mi_col = 0; mi_col < cm->mi_cols; ++mi_col) {
3456 if (mi_ptr[mi_col]->tx_size > max_tx_size)
3457 mi_ptr[mi_col]->tx_size = max_tx_size;
3462 static MV_REFERENCE_FRAME get_frame_type(const VP9_COMP *cpi) {
3463 if (frame_is_intra_only(&cpi->common))
3465 else if (cpi->rc.is_src_frame_alt_ref && cpi->refresh_golden_frame)
3466 return ALTREF_FRAME;
3467 else if (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)
3468 return GOLDEN_FRAME;
3473 static TX_MODE select_tx_mode(const VP9_COMP *cpi, MACROBLOCKD *const xd) {
3474 if (xd->lossless) return ONLY_4X4;
3475 if (cpi->common.frame_type == KEY_FRAME && cpi->sf.use_nonrd_pick_mode)
3477 if (cpi->sf.tx_size_search_method == USE_LARGESTALL)
3479 else if (cpi->sf.tx_size_search_method == USE_FULL_RD ||
3480 cpi->sf.tx_size_search_method == USE_TX_8X8)
3481 return TX_MODE_SELECT;
3483 return cpi->common.tx_mode;
3486 static void hybrid_intra_mode_search(VP9_COMP *cpi, MACROBLOCK *const x,
3487 RD_COST *rd_cost, BLOCK_SIZE bsize,
3488 PICK_MODE_CONTEXT *ctx) {
3489 if (bsize < BLOCK_16X16)
3490 vp9_rd_pick_intra_mode_sb(cpi, x, rd_cost, bsize, ctx, INT64_MAX);
3492 vp9_pick_intra_mode(cpi, x, rd_cost, bsize, ctx);
3495 static void nonrd_pick_sb_modes(VP9_COMP *cpi, TileDataEnc *tile_data,
3496 MACROBLOCK *const x, int mi_row, int mi_col,
3497 RD_COST *rd_cost, BLOCK_SIZE bsize,
3498 PICK_MODE_CONTEXT *ctx) {
3499 VP9_COMMON *const cm = &cpi->common;
3500 TileInfo *const tile_info = &tile_data->tile_info;
3501 MACROBLOCKD *const xd = &x->e_mbd;
3503 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
3504 BLOCK_SIZE bs = VPXMAX(bsize, BLOCK_8X8); // processing unit block size
3505 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bs];
3506 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bs];
3509 set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
3511 mi->sb_type = bsize;
3513 for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
3514 struct macroblockd_plane *pd = &xd->plane[plane];
3515 memcpy(a + num_4x4_blocks_wide * plane, pd->above_context,
3516 (sizeof(a[0]) * num_4x4_blocks_wide) >> pd->subsampling_x);
3517 memcpy(l + num_4x4_blocks_high * plane, pd->left_context,
3518 (sizeof(l[0]) * num_4x4_blocks_high) >> pd->subsampling_y);
3521 if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled)
3522 if (cyclic_refresh_segment_id_boosted(mi->segment_id))
3523 x->rdmult = vp9_cyclic_refresh_get_rdmult(cpi->cyclic_refresh);
3525 if (cm->frame_type == KEY_FRAME)
3526 hybrid_intra_mode_search(cpi, x, rd_cost, bsize, ctx);
3527 else if (segfeature_active(&cm->seg, mi->segment_id, SEG_LVL_SKIP))
3528 set_mode_info_seg_skip(x, cm->tx_mode, rd_cost, bsize);
3529 else if (bsize >= BLOCK_8X8)
3530 vp9_pick_inter_mode(cpi, x, tile_data, mi_row, mi_col, rd_cost, bsize, ctx);
3532 vp9_pick_inter_mode_sub8x8(cpi, x, mi_row, mi_col, rd_cost, bsize, ctx);
3534 duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, bsize);
3536 for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
3537 struct macroblockd_plane *pd = &xd->plane[plane];
3538 memcpy(pd->above_context, a + num_4x4_blocks_wide * plane,
3539 (sizeof(a[0]) * num_4x4_blocks_wide) >> pd->subsampling_x);
3540 memcpy(pd->left_context, l + num_4x4_blocks_high * plane,
3541 (sizeof(l[0]) * num_4x4_blocks_high) >> pd->subsampling_y);
3544 if (rd_cost->rate == INT_MAX) vp9_rd_cost_reset(rd_cost);
3546 ctx->rate = rd_cost->rate;
3547 ctx->dist = rd_cost->dist;
3550 static void fill_mode_info_sb(VP9_COMMON *cm, MACROBLOCK *x, int mi_row,
3551 int mi_col, BLOCK_SIZE bsize, PC_TREE *pc_tree) {
3552 MACROBLOCKD *xd = &x->e_mbd;
3553 int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4;
3554 PARTITION_TYPE partition = pc_tree->partitioning;
3555 BLOCK_SIZE subsize = get_subsize(bsize, partition);
3557 assert(bsize >= BLOCK_8X8);
3559 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
3561 switch (partition) {
3562 case PARTITION_NONE:
3563 set_mode_info_offsets(cm, x, xd, mi_row, mi_col);
3564 *(xd->mi[0]) = pc_tree->none.mic;
3565 *(x->mbmi_ext) = pc_tree->none.mbmi_ext;
3566 duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, bsize);
3568 case PARTITION_VERT:
3569 set_mode_info_offsets(cm, x, xd, mi_row, mi_col);
3570 *(xd->mi[0]) = pc_tree->vertical[0].mic;
3571 *(x->mbmi_ext) = pc_tree->vertical[0].mbmi_ext;
3572 duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, subsize);
3574 if (mi_col + hbs < cm->mi_cols) {
3575 set_mode_info_offsets(cm, x, xd, mi_row, mi_col + hbs);
3576 *(xd->mi[0]) = pc_tree->vertical[1].mic;
3577 *(x->mbmi_ext) = pc_tree->vertical[1].mbmi_ext;
3578 duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col + hbs, subsize);
3581 case PARTITION_HORZ:
3582 set_mode_info_offsets(cm, x, xd, mi_row, mi_col);
3583 *(xd->mi[0]) = pc_tree->horizontal[0].mic;
3584 *(x->mbmi_ext) = pc_tree->horizontal[0].mbmi_ext;
3585 duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, subsize);
3586 if (mi_row + hbs < cm->mi_rows) {
3587 set_mode_info_offsets(cm, x, xd, mi_row + hbs, mi_col);
3588 *(xd->mi[0]) = pc_tree->horizontal[1].mic;
3589 *(x->mbmi_ext) = pc_tree->horizontal[1].mbmi_ext;
3590 duplicate_mode_info_in_sb(cm, xd, mi_row + hbs, mi_col, subsize);
3593 case PARTITION_SPLIT: {
3594 fill_mode_info_sb(cm, x, mi_row, mi_col, subsize, pc_tree->split[0]);
3595 fill_mode_info_sb(cm, x, mi_row, mi_col + hbs, subsize,
3597 fill_mode_info_sb(cm, x, mi_row + hbs, mi_col, subsize,
3599 fill_mode_info_sb(cm, x, mi_row + hbs, mi_col + hbs, subsize,
3607 // Reset the prediction pixel ready flag recursively.
3608 static void pred_pixel_ready_reset(PC_TREE *pc_tree, BLOCK_SIZE bsize) {
3609 pc_tree->none.pred_pixel_ready = 0;
3610 pc_tree->horizontal[0].pred_pixel_ready = 0;
3611 pc_tree->horizontal[1].pred_pixel_ready = 0;
3612 pc_tree->vertical[0].pred_pixel_ready = 0;
3613 pc_tree->vertical[1].pred_pixel_ready = 0;
3615 if (bsize > BLOCK_8X8) {
3616 BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_SPLIT);
3618 for (i = 0; i < 4; ++i) pred_pixel_ready_reset(pc_tree->split[i], subsize);
3622 static void nonrd_pick_partition(VP9_COMP *cpi, ThreadData *td,
3623 TileDataEnc *tile_data, TOKENEXTRA **tp,
3624 int mi_row, int mi_col, BLOCK_SIZE bsize,
3625 RD_COST *rd_cost, int do_recon,
3626 int64_t best_rd, PC_TREE *pc_tree) {
3627 const SPEED_FEATURES *const sf = &cpi->sf;
3628 VP9_COMMON *const cm = &cpi->common;
3629 TileInfo *const tile_info = &tile_data->tile_info;
3630 MACROBLOCK *const x = &td->mb;
3631 MACROBLOCKD *const xd = &x->e_mbd;
3632 const int ms = num_8x8_blocks_wide_lookup[bsize] / 2;
3633 TOKENEXTRA *tp_orig = *tp;
3634 PICK_MODE_CONTEXT *ctx = &pc_tree->none;
3636 BLOCK_SIZE subsize = bsize;
3637 RD_COST this_rdc, sum_rdc, best_rdc;
3638 int do_split = bsize >= BLOCK_8X8;
3640 // Override skipping rectangular partition operations for edge blocks
3641 const int force_horz_split = (mi_row + ms >= cm->mi_rows);
3642 const int force_vert_split = (mi_col + ms >= cm->mi_cols);
3643 const int xss = x->e_mbd.plane[1].subsampling_x;
3644 const int yss = x->e_mbd.plane[1].subsampling_y;
3646 int partition_none_allowed = !force_horz_split && !force_vert_split;
3647 int partition_horz_allowed =
3648 !force_vert_split && yss <= xss && bsize >= BLOCK_8X8;
3649 int partition_vert_allowed =
3650 !force_horz_split && xss <= yss && bsize >= BLOCK_8X8;
3653 assert(num_8x8_blocks_wide_lookup[bsize] ==
3654 num_8x8_blocks_high_lookup[bsize]);
3656 vp9_rd_cost_init(&sum_rdc);
3657 vp9_rd_cost_reset(&best_rdc);
3658 best_rdc.rdcost = best_rd;
3660 // Determine partition types in search according to the speed features.
3661 // The threshold set here has to be of square block size.
3662 if (sf->auto_min_max_partition_size) {
3663 partition_none_allowed &=
3664 (bsize <= x->max_partition_size && bsize >= x->min_partition_size);
3665 partition_horz_allowed &=
3666 ((bsize <= x->max_partition_size && bsize > x->min_partition_size) ||
3668 partition_vert_allowed &=
3669 ((bsize <= x->max_partition_size && bsize > x->min_partition_size) ||
3671 do_split &= bsize > x->min_partition_size;
3673 if (sf->use_square_partition_only) {
3674 partition_horz_allowed &= force_horz_split;
3675 partition_vert_allowed &= force_vert_split;
3678 ctx->pred_pixel_ready =
3679 !(partition_vert_allowed || partition_horz_allowed || do_split);
3682 if (partition_none_allowed) {
3683 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &this_rdc, bsize,
3685 ctx->mic = *xd->mi[0];
3686 ctx->mbmi_ext = *x->mbmi_ext;
3687 ctx->skip_txfm[0] = x->skip_txfm[0];
3688 ctx->skip = x->skip;
3690 if (this_rdc.rate != INT_MAX) {
3691 int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
3692 this_rdc.rate += cpi->partition_cost[pl][PARTITION_NONE];
3694 RDCOST(x->rdmult, x->rddiv, this_rdc.rate, this_rdc.dist);
3695 if (this_rdc.rdcost < best_rdc.rdcost) {
3696 int64_t dist_breakout_thr = sf->partition_search_breakout_thr.dist;
3697 int64_t rate_breakout_thr = sf->partition_search_breakout_thr.rate;
3699 dist_breakout_thr >>=
3700 8 - (b_width_log2_lookup[bsize] + b_height_log2_lookup[bsize]);
3702 rate_breakout_thr *= num_pels_log2_lookup[bsize];
3704 best_rdc = this_rdc;
3705 if (bsize >= BLOCK_8X8) pc_tree->partitioning = PARTITION_NONE;
3707 if (!x->e_mbd.lossless && this_rdc.rate < rate_breakout_thr &&
3708 this_rdc.dist < dist_breakout_thr) {
3716 // store estimated motion vector
3717 store_pred_mv(x, ctx);
3721 int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
3722 sum_rdc.rate += cpi->partition_cost[pl][PARTITION_SPLIT];
3723 sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, sum_rdc.rate, sum_rdc.dist);
3724 subsize = get_subsize(bsize, PARTITION_SPLIT);
3725 for (i = 0; i < 4 && sum_rdc.rdcost < best_rdc.rdcost; ++i) {
3726 const int x_idx = (i & 1) * ms;
3727 const int y_idx = (i >> 1) * ms;
3729 if (mi_row + y_idx >= cm->mi_rows || mi_col + x_idx >= cm->mi_cols)
3731 load_pred_mv(x, ctx);
3732 nonrd_pick_partition(cpi, td, tile_data, tp, mi_row + y_idx,
3733 mi_col + x_idx, subsize, &this_rdc, 0,
3734 best_rdc.rdcost - sum_rdc.rdcost, pc_tree->split[i]);
3736 if (this_rdc.rate == INT_MAX) {
3737 vp9_rd_cost_reset(&sum_rdc);
3739 sum_rdc.rate += this_rdc.rate;
3740 sum_rdc.dist += this_rdc.dist;
3741 sum_rdc.rdcost += this_rdc.rdcost;
3745 if (sum_rdc.rdcost < best_rdc.rdcost) {
3747 pc_tree->partitioning = PARTITION_SPLIT;
3749 // skip rectangular partition test when larger block size
3750 // gives better rd cost
3751 if (sf->less_rectangular_check) do_rect &= !partition_none_allowed;
3756 if (partition_horz_allowed && do_rect) {
3757 subsize = get_subsize(bsize, PARTITION_HORZ);
3758 if (sf->adaptive_motion_search) load_pred_mv(x, ctx);
3759 pc_tree->horizontal[0].pred_pixel_ready = 1;
3760 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize,
3761 &pc_tree->horizontal[0]);
3763 pc_tree->horizontal[0].mic = *xd->mi[0];
3764 pc_tree->horizontal[0].mbmi_ext = *x->mbmi_ext;
3765 pc_tree->horizontal[0].skip_txfm[0] = x->skip_txfm[0];
3766 pc_tree->horizontal[0].skip = x->skip;
3768 if (sum_rdc.rdcost < best_rdc.rdcost && mi_row + ms < cm->mi_rows) {
3769 load_pred_mv(x, ctx);
3770 pc_tree->horizontal[1].pred_pixel_ready = 1;
3771 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row + ms, mi_col, &this_rdc,
3772 subsize, &pc_tree->horizontal[1]);
3774 pc_tree->horizontal[1].mic = *xd->mi[0];
3775 pc_tree->horizontal[1].mbmi_ext = *x->mbmi_ext;
3776 pc_tree->horizontal[1].skip_txfm[0] = x->skip_txfm[0];
3777 pc_tree->horizontal[1].skip = x->skip;
3779 if (this_rdc.rate == INT_MAX) {
3780 vp9_rd_cost_reset(&sum_rdc);
3782 int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
3783 this_rdc.rate += cpi->partition_cost[pl][PARTITION_HORZ];
3784 sum_rdc.rate += this_rdc.rate;
3785 sum_rdc.dist += this_rdc.dist;
3787 RDCOST(x->rdmult, x->rddiv, sum_rdc.rate, sum_rdc.dist);
3791 if (sum_rdc.rdcost < best_rdc.rdcost) {
3793 pc_tree->partitioning = PARTITION_HORZ;
3795 pred_pixel_ready_reset(pc_tree, bsize);
3800 if (partition_vert_allowed && do_rect) {
3801 subsize = get_subsize(bsize, PARTITION_VERT);
3802 if (sf->adaptive_motion_search) load_pred_mv(x, ctx);
3803 pc_tree->vertical[0].pred_pixel_ready = 1;
3804 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize,
3805 &pc_tree->vertical[0]);
3806 pc_tree->vertical[0].mic = *xd->mi[0];
3807 pc_tree->vertical[0].mbmi_ext = *x->mbmi_ext;
3808 pc_tree->vertical[0].skip_txfm[0] = x->skip_txfm[0];
3809 pc_tree->vertical[0].skip = x->skip;
3811 if (sum_rdc.rdcost < best_rdc.rdcost && mi_col + ms < cm->mi_cols) {
3812 load_pred_mv(x, ctx);
3813 pc_tree->vertical[1].pred_pixel_ready = 1;
3814 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + ms, &this_rdc,
3815 subsize, &pc_tree->vertical[1]);
3816 pc_tree->vertical[1].mic = *xd->mi[0];
3817 pc_tree->vertical[1].mbmi_ext = *x->mbmi_ext;
3818 pc_tree->vertical[1].skip_txfm[0] = x->skip_txfm[0];
3819 pc_tree->vertical[1].skip = x->skip;
3821 if (this_rdc.rate == INT_MAX) {
3822 vp9_rd_cost_reset(&sum_rdc);
3824 int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
3825 sum_rdc.rate += cpi->partition_cost[pl][PARTITION_VERT];
3826 sum_rdc.rate += this_rdc.rate;
3827 sum_rdc.dist += this_rdc.dist;
3829 RDCOST(x->rdmult, x->rddiv, sum_rdc.rate, sum_rdc.dist);
3833 if (sum_rdc.rdcost < best_rdc.rdcost) {
3835 pc_tree->partitioning = PARTITION_VERT;
3837 pred_pixel_ready_reset(pc_tree, bsize);
3841 *rd_cost = best_rdc;
3843 if (best_rdc.rate == INT_MAX) {
3844 vp9_rd_cost_reset(rd_cost);
3848 // update mode info array
3849 fill_mode_info_sb(cm, x, mi_row, mi_col, bsize, pc_tree);
3851 if (best_rdc.rate < INT_MAX && best_rdc.dist < INT64_MAX && do_recon) {
3852 int output_enabled = (bsize == BLOCK_64X64);
3853 encode_sb_rt(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled, bsize,
3857 if (bsize == BLOCK_64X64 && do_recon) {
3858 assert(tp_orig < *tp);
3859 assert(best_rdc.rate < INT_MAX);
3860 assert(best_rdc.dist < INT64_MAX);
3862 assert(tp_orig == *tp);
3866 static void nonrd_select_partition(VP9_COMP *cpi, ThreadData *td,
3867 TileDataEnc *tile_data, MODE_INFO **mi,
3868 TOKENEXTRA **tp, int mi_row, int mi_col,
3869 BLOCK_SIZE bsize, int output_enabled,
3870 RD_COST *rd_cost, PC_TREE *pc_tree) {
3871 VP9_COMMON *const cm = &cpi->common;
3872 TileInfo *const tile_info = &tile_data->tile_info;
3873 MACROBLOCK *const x = &td->mb;
3874 MACROBLOCKD *const xd = &x->e_mbd;
3875 const int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4;
3876 const int mis = cm->mi_stride;
3877 PARTITION_TYPE partition;
3881 vp9_rd_cost_reset(&this_rdc);
3882 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
3884 subsize = (bsize >= BLOCK_8X8) ? mi[0]->sb_type : BLOCK_4X4;
3885 partition = partition_lookup[bsl][subsize];
3887 if (bsize == BLOCK_32X32 && subsize == BLOCK_32X32) {
3888 x->max_partition_size = BLOCK_32X32;
3889 x->min_partition_size = BLOCK_16X16;
3890 nonrd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, bsize, rd_cost,
3891 0, INT64_MAX, pc_tree);
3892 } else if (bsize == BLOCK_32X32 && partition != PARTITION_NONE &&
3893 subsize >= BLOCK_16X16) {
3894 x->max_partition_size = BLOCK_32X32;
3895 x->min_partition_size = BLOCK_8X8;
3896 nonrd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, bsize, rd_cost,
3897 0, INT64_MAX, pc_tree);
3898 } else if (bsize == BLOCK_16X16 && partition != PARTITION_NONE) {
3899 x->max_partition_size = BLOCK_16X16;
3900 x->min_partition_size = BLOCK_8X8;
3901 nonrd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, bsize, rd_cost,
3902 0, INT64_MAX, pc_tree);
3904 switch (partition) {
3905 case PARTITION_NONE:
3906 pc_tree->none.pred_pixel_ready = 1;
3907 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, rd_cost, subsize,
3909 pc_tree->none.mic = *xd->mi[0];
3910 pc_tree->none.mbmi_ext = *x->mbmi_ext;
3911 pc_tree->none.skip_txfm[0] = x->skip_txfm[0];
3912 pc_tree->none.skip = x->skip;
3914 case PARTITION_VERT:
3915 pc_tree->vertical[0].pred_pixel_ready = 1;
3916 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, rd_cost, subsize,
3917 &pc_tree->vertical[0]);
3918 pc_tree->vertical[0].mic = *xd->mi[0];
3919 pc_tree->vertical[0].mbmi_ext = *x->mbmi_ext;
3920 pc_tree->vertical[0].skip_txfm[0] = x->skip_txfm[0];
3921 pc_tree->vertical[0].skip = x->skip;
3922 if (mi_col + hbs < cm->mi_cols) {
3923 pc_tree->vertical[1].pred_pixel_ready = 1;
3924 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + hbs,
3925 &this_rdc, subsize, &pc_tree->vertical[1]);
3926 pc_tree->vertical[1].mic = *xd->mi[0];
3927 pc_tree->vertical[1].mbmi_ext = *x->mbmi_ext;
3928 pc_tree->vertical[1].skip_txfm[0] = x->skip_txfm[0];
3929 pc_tree->vertical[1].skip = x->skip;
3930 if (this_rdc.rate != INT_MAX && this_rdc.dist != INT64_MAX &&
3931 rd_cost->rate != INT_MAX && rd_cost->dist != INT64_MAX) {
3932 rd_cost->rate += this_rdc.rate;
3933 rd_cost->dist += this_rdc.dist;
3937 case PARTITION_HORZ:
3938 pc_tree->horizontal[0].pred_pixel_ready = 1;
3939 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, rd_cost, subsize,
3940 &pc_tree->horizontal[0]);
3941 pc_tree->horizontal[0].mic = *xd->mi[0];
3942 pc_tree->horizontal[0].mbmi_ext = *x->mbmi_ext;
3943 pc_tree->horizontal[0].skip_txfm[0] = x->skip_txfm[0];
3944 pc_tree->horizontal[0].skip = x->skip;
3945 if (mi_row + hbs < cm->mi_rows) {
3946 pc_tree->horizontal[1].pred_pixel_ready = 1;
3947 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row + hbs, mi_col,
3948 &this_rdc, subsize, &pc_tree->horizontal[1]);
3949 pc_tree->horizontal[1].mic = *xd->mi[0];
3950 pc_tree->horizontal[1].mbmi_ext = *x->mbmi_ext;
3951 pc_tree->horizontal[1].skip_txfm[0] = x->skip_txfm[0];
3952 pc_tree->horizontal[1].skip = x->skip;
3953 if (this_rdc.rate != INT_MAX && this_rdc.dist != INT64_MAX &&
3954 rd_cost->rate != INT_MAX && rd_cost->dist != INT64_MAX) {
3955 rd_cost->rate += this_rdc.rate;
3956 rd_cost->dist += this_rdc.dist;
3960 case PARTITION_SPLIT:
3961 subsize = get_subsize(bsize, PARTITION_SPLIT);
3962 nonrd_select_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
3963 subsize, output_enabled, rd_cost,
3965 nonrd_select_partition(cpi, td, tile_data, mi + hbs, tp, mi_row,
3966 mi_col + hbs, subsize, output_enabled, &this_rdc,
3968 if (this_rdc.rate != INT_MAX && this_rdc.dist != INT64_MAX &&
3969 rd_cost->rate != INT_MAX && rd_cost->dist != INT64_MAX) {
3970 rd_cost->rate += this_rdc.rate;
3971 rd_cost->dist += this_rdc.dist;
3973 nonrd_select_partition(cpi, td, tile_data, mi + hbs * mis, tp,
3974 mi_row + hbs, mi_col, subsize, output_enabled,
3975 &this_rdc, pc_tree->split[2]);
3976 if (this_rdc.rate != INT_MAX && this_rdc.dist != INT64_MAX &&
3977 rd_cost->rate != INT_MAX && rd_cost->dist != INT64_MAX) {
3978 rd_cost->rate += this_rdc.rate;
3979 rd_cost->dist += this_rdc.dist;
3981 nonrd_select_partition(cpi, td, tile_data, mi + hbs * mis + hbs, tp,
3982 mi_row + hbs, mi_col + hbs, subsize,
3983 output_enabled, &this_rdc, pc_tree->split[3]);
3984 if (this_rdc.rate != INT_MAX && this_rdc.dist != INT64_MAX &&
3985 rd_cost->rate != INT_MAX && rd_cost->dist != INT64_MAX) {
3986 rd_cost->rate += this_rdc.rate;
3987 rd_cost->dist += this_rdc.dist;
3990 default: assert(0 && "Invalid partition type."); break;
3994 if (bsize == BLOCK_64X64 && output_enabled)
3995 encode_sb_rt(cpi, td, tile_info, tp, mi_row, mi_col, 1, bsize, pc_tree);
3998 static void nonrd_use_partition(VP9_COMP *cpi, ThreadData *td,
3999 TileDataEnc *tile_data, MODE_INFO **mi,
4000 TOKENEXTRA **tp, int mi_row, int mi_col,
4001 BLOCK_SIZE bsize, int output_enabled,
4002 RD_COST *dummy_cost, PC_TREE *pc_tree) {
4003 VP9_COMMON *const cm = &cpi->common;
4004 TileInfo *tile_info = &tile_data->tile_info;
4005 MACROBLOCK *const x = &td->mb;
4006 MACROBLOCKD *const xd = &x->e_mbd;
4007 const int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4;
4008 const int mis = cm->mi_stride;
4009 PARTITION_TYPE partition;
4012 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
4014 subsize = (bsize >= BLOCK_8X8) ? mi[0]->sb_type : BLOCK_4X4;
4015 partition = partition_lookup[bsl][subsize];
4017 if (output_enabled && bsize != BLOCK_4X4) {
4018 int ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
4019 td->counts->partition[ctx][partition]++;
4022 switch (partition) {
4023 case PARTITION_NONE:
4024 pc_tree->none.pred_pixel_ready = 1;
4025 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, dummy_cost,
4026 subsize, &pc_tree->none);
4027 pc_tree->none.mic = *xd->mi[0];
4028 pc_tree->none.mbmi_ext = *x->mbmi_ext;
4029 pc_tree->none.skip_txfm[0] = x->skip_txfm[0];
4030 pc_tree->none.skip = x->skip;
4031 encode_b_rt(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled,
4032 subsize, &pc_tree->none);
4034 case PARTITION_VERT:
4035 pc_tree->vertical[0].pred_pixel_ready = 1;
4036 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, dummy_cost,
4037 subsize, &pc_tree->vertical[0]);
4038 pc_tree->vertical[0].mic = *xd->mi[0];
4039 pc_tree->vertical[0].mbmi_ext = *x->mbmi_ext;
4040 pc_tree->vertical[0].skip_txfm[0] = x->skip_txfm[0];
4041 pc_tree->vertical[0].skip = x->skip;
4042 encode_b_rt(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled,
4043 subsize, &pc_tree->vertical[0]);
4044 if (mi_col + hbs < cm->mi_cols && bsize > BLOCK_8X8) {
4045 pc_tree->vertical[1].pred_pixel_ready = 1;
4046 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + hbs, dummy_cost,
4047 subsize, &pc_tree->vertical[1]);
4048 pc_tree->vertical[1].mic = *xd->mi[0];
4049 pc_tree->vertical[1].mbmi_ext = *x->mbmi_ext;
4050 pc_tree->vertical[1].skip_txfm[0] = x->skip_txfm[0];
4051 pc_tree->vertical[1].skip = x->skip;
4052 encode_b_rt(cpi, td, tile_info, tp, mi_row, mi_col + hbs,
4053 output_enabled, subsize, &pc_tree->vertical[1]);
4056 case PARTITION_HORZ:
4057 pc_tree->horizontal[0].pred_pixel_ready = 1;
4058 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, dummy_cost,
4059 subsize, &pc_tree->horizontal[0]);
4060 pc_tree->horizontal[0].mic = *xd->mi[0];
4061 pc_tree->horizontal[0].mbmi_ext = *x->mbmi_ext;
4062 pc_tree->horizontal[0].skip_txfm[0] = x->skip_txfm[0];
4063 pc_tree->horizontal[0].skip = x->skip;
4064 encode_b_rt(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled,
4065 subsize, &pc_tree->horizontal[0]);
4067 if (mi_row + hbs < cm->mi_rows && bsize > BLOCK_8X8) {
4068 pc_tree->horizontal[1].pred_pixel_ready = 1;
4069 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row + hbs, mi_col, dummy_cost,
4070 subsize, &pc_tree->horizontal[1]);
4071 pc_tree->horizontal[1].mic = *xd->mi[0];
4072 pc_tree->horizontal[1].mbmi_ext = *x->mbmi_ext;
4073 pc_tree->horizontal[1].skip_txfm[0] = x->skip_txfm[0];
4074 pc_tree->horizontal[1].skip = x->skip;
4075 encode_b_rt(cpi, td, tile_info, tp, mi_row + hbs, mi_col,
4076 output_enabled, subsize, &pc_tree->horizontal[1]);
4079 case PARTITION_SPLIT:
4080 subsize = get_subsize(bsize, PARTITION_SPLIT);
4081 if (bsize == BLOCK_8X8) {
4082 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, dummy_cost,
4083 subsize, pc_tree->leaf_split[0]);
4084 encode_b_rt(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled,
4085 subsize, pc_tree->leaf_split[0]);
4087 nonrd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col, subsize,
4088 output_enabled, dummy_cost, pc_tree->split[0]);
4089 nonrd_use_partition(cpi, td, tile_data, mi + hbs, tp, mi_row,
4090 mi_col + hbs, subsize, output_enabled, dummy_cost,
4092 nonrd_use_partition(cpi, td, tile_data, mi + hbs * mis, tp,
4093 mi_row + hbs, mi_col, subsize, output_enabled,
4094 dummy_cost, pc_tree->split[2]);
4095 nonrd_use_partition(cpi, td, tile_data, mi + hbs * mis + hbs, tp,
4096 mi_row + hbs, mi_col + hbs, subsize, output_enabled,
4097 dummy_cost, pc_tree->split[3]);
4100 default: assert(0 && "Invalid partition type."); break;
4103 if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8)
4104 update_partition_context(xd, mi_row, mi_col, subsize, bsize);
4107 static void encode_nonrd_sb_row(VP9_COMP *cpi, ThreadData *td,
4108 TileDataEnc *tile_data, int mi_row,
4110 SPEED_FEATURES *const sf = &cpi->sf;
4111 VP9_COMMON *const cm = &cpi->common;
4112 TileInfo *const tile_info = &tile_data->tile_info;
4113 MACROBLOCK *const x = &td->mb;
4114 MACROBLOCKD *const xd = &x->e_mbd;
4115 const int mi_col_start = tile_info->mi_col_start;
4116 const int mi_col_end = tile_info->mi_col_end;
4118 const int sb_row = mi_row >> MI_BLOCK_SIZE_LOG2;
4119 const int num_sb_cols =
4120 get_num_cols(tile_data->tile_info, MI_BLOCK_SIZE_LOG2);
4123 // Initialize the left context for the new SB row
4124 memset(&xd->left_context, 0, sizeof(xd->left_context));
4125 memset(xd->left_seg_context, 0, sizeof(xd->left_seg_context));
4127 // Code each SB in the row
4128 for (mi_col = mi_col_start, sb_col_in_tile = 0; mi_col < mi_col_end;
4129 mi_col += MI_BLOCK_SIZE, ++sb_col_in_tile) {
4130 const struct segmentation *const seg = &cm->seg;
4132 const int idx_str = cm->mi_stride * mi_row + mi_col;
4133 MODE_INFO **mi = cm->mi_grid_visible + idx_str;
4134 PARTITION_SEARCH_TYPE partition_search_type = sf->partition_search_type;
4135 BLOCK_SIZE bsize = BLOCK_64X64;
4138 (*(cpi->row_mt_sync_read_ptr))(&tile_data->row_mt_sync, sb_row,
4141 x->source_variance = UINT_MAX;
4142 vp9_zero(x->pred_mv);
4143 vp9_rd_cost_init(&dummy_rdc);
4144 x->color_sensitivity[0] = 0;
4145 x->color_sensitivity[1] = 0;
4147 x->skip_low_source_sad = 0;
4148 x->lowvar_highsumdiff = 0;
4149 x->content_state_sb = 0;
4152 const uint8_t *const map =
4153 seg->update_map ? cpi->segmentation_map : cm->last_frame_seg_map;
4154 int segment_id = get_segment_id(cm, map, BLOCK_64X64, mi_row, mi_col);
4155 seg_skip = segfeature_active(seg, segment_id, SEG_LVL_SKIP);
4157 partition_search_type = FIXED_PARTITION;
4161 if (cpi->compute_source_sad_onepass && cpi->sf.use_source_sad) {
4162 int shift = cpi->Source->y_stride * (mi_row << 3) + (mi_col << 3);
4163 int sb_offset2 = ((cm->mi_cols + 7) >> 3) * (mi_row >> 3) + (mi_col >> 3);
4164 avg_source_sad(cpi, x, shift, sb_offset2);
4167 // Set the partition type of the 64X64 block
4168 switch (partition_search_type) {
4169 case VAR_BASED_PARTITION:
4170 // TODO(jingning, marpan): The mode decision and encoding process
4171 // support both intra and inter sub8x8 block coding for RTC mode.
4172 // Tune the thresholds accordingly to use sub8x8 block coding for
4173 // coding performance improvement.
4174 choose_partitioning(cpi, tile_info, x, mi_row, mi_col);
4175 nonrd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
4176 BLOCK_64X64, 1, &dummy_rdc, td->pc_root);
4178 case SOURCE_VAR_BASED_PARTITION:
4179 set_source_var_based_partition(cpi, tile_info, x, mi, mi_row, mi_col);
4180 nonrd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
4181 BLOCK_64X64, 1, &dummy_rdc, td->pc_root);
4183 case FIXED_PARTITION:
4184 if (!seg_skip) bsize = sf->always_this_block_size;
4185 set_fixed_partitioning(cpi, tile_info, mi, mi_row, mi_col, bsize);
4186 nonrd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
4187 BLOCK_64X64, 1, &dummy_rdc, td->pc_root);
4189 case REFERENCE_PARTITION:
4190 set_offsets(cpi, tile_info, x, mi_row, mi_col, BLOCK_64X64);
4191 // Use nonrd_pick_partition on scene-cut for VBR mode.
4192 // nonrd_pick_partition does not support 4x4 partition, so avoid it
4193 // on key frame for now.
4194 if ((cpi->oxcf.rc_mode == VPX_VBR && cpi->rc.high_source_sad &&
4195 cm->frame_type != KEY_FRAME)) {
4196 // Use lower max_partition_size for low resoultions.
4197 if (cm->width <= 352 && cm->height <= 288)
4198 x->max_partition_size = BLOCK_32X32;
4200 x->max_partition_size = BLOCK_64X64;
4201 x->min_partition_size = BLOCK_8X8;
4202 nonrd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col,
4203 BLOCK_64X64, &dummy_rdc, 1, INT64_MAX,
4206 choose_partitioning(cpi, tile_info, x, mi_row, mi_col);
4207 // TODO(marpan): Seems like nonrd_select_partition does not support
4208 // 4x4 partition. Since 4x4 is used on key frame, use this switch
4210 if (cm->frame_type == KEY_FRAME)
4211 nonrd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
4212 BLOCK_64X64, 1, &dummy_rdc, td->pc_root);
4214 nonrd_select_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
4215 BLOCK_64X64, 1, &dummy_rdc, td->pc_root);
4219 default: assert(0); break;
4222 (*(cpi->row_mt_sync_write_ptr))(&tile_data->row_mt_sync, sb_row,
4223 sb_col_in_tile, num_sb_cols);
4226 // end RTC play code
4228 static int set_var_thresh_from_histogram(VP9_COMP *cpi) {
4229 const SPEED_FEATURES *const sf = &cpi->sf;
4230 const VP9_COMMON *const cm = &cpi->common;
4232 const uint8_t *src = cpi->Source->y_buffer;
4233 const uint8_t *last_src = cpi->Last_Source->y_buffer;
4234 const int src_stride = cpi->Source->y_stride;
4235 const int last_stride = cpi->Last_Source->y_stride;
4237 // Pick cutoff threshold
4238 const int cutoff = (VPXMIN(cm->width, cm->height) >= 720)
4239 ? (cm->MBs * VAR_HIST_LARGE_CUT_OFF / 100)
4240 : (cm->MBs * VAR_HIST_SMALL_CUT_OFF / 100);
4241 DECLARE_ALIGNED(16, int, hist[VAR_HIST_BINS]);
4242 diff *var16 = cpi->source_diff_var;
4247 memset(hist, 0, VAR_HIST_BINS * sizeof(hist[0]));
4249 for (i = 0; i < cm->mb_rows; i++) {
4250 for (j = 0; j < cm->mb_cols; j++) {
4251 #if CONFIG_VP9_HIGHBITDEPTH
4252 if (cm->use_highbitdepth) {
4253 switch (cm->bit_depth) {
4255 vpx_highbd_8_get16x16var(src, src_stride, last_src, last_stride,
4256 &var16->sse, &var16->sum);
4259 vpx_highbd_10_get16x16var(src, src_stride, last_src, last_stride,
4260 &var16->sse, &var16->sum);
4263 vpx_highbd_12_get16x16var(src, src_stride, last_src, last_stride,
4264 &var16->sse, &var16->sum);
4268 "cm->bit_depth should be VPX_BITS_8, VPX_BITS_10"
4273 vpx_get16x16var(src, src_stride, last_src, last_stride, &var16->sse,
4277 vpx_get16x16var(src, src_stride, last_src, last_stride, &var16->sse,
4279 #endif // CONFIG_VP9_HIGHBITDEPTH
4280 var16->var = var16->sse - (((uint32_t)var16->sum * var16->sum) >> 8);
4282 if (var16->var >= VAR_HIST_MAX_BG_VAR)
4283 hist[VAR_HIST_BINS - 1]++;
4285 hist[var16->var / VAR_HIST_FACTOR]++;
4292 src = src - cm->mb_cols * 16 + 16 * src_stride;
4293 last_src = last_src - cm->mb_cols * 16 + 16 * last_stride;
4296 cpi->source_var_thresh = 0;
4298 if (hist[VAR_HIST_BINS - 1] < cutoff) {
4299 for (i = 0; i < VAR_HIST_BINS - 1; i++) {
4303 cpi->source_var_thresh = (i + 1) * VAR_HIST_FACTOR;
4309 return sf->search_type_check_frequency;
4312 static void source_var_based_partition_search_method(VP9_COMP *cpi) {
4313 VP9_COMMON *const cm = &cpi->common;
4314 SPEED_FEATURES *const sf = &cpi->sf;
4316 if (cm->frame_type == KEY_FRAME) {
4317 // For key frame, use SEARCH_PARTITION.
4318 sf->partition_search_type = SEARCH_PARTITION;
4319 } else if (cm->intra_only) {
4320 sf->partition_search_type = FIXED_PARTITION;
4322 if (cm->last_width != cm->width || cm->last_height != cm->height) {
4323 if (cpi->source_diff_var) vpx_free(cpi->source_diff_var);
4325 CHECK_MEM_ERROR(cm, cpi->source_diff_var,
4326 vpx_calloc(cm->MBs, sizeof(diff)));
4329 if (!cpi->frames_till_next_var_check)
4330 cpi->frames_till_next_var_check = set_var_thresh_from_histogram(cpi);
4332 if (cpi->frames_till_next_var_check > 0) {
4333 sf->partition_search_type = FIXED_PARTITION;
4334 cpi->frames_till_next_var_check--;
4339 static int get_skip_encode_frame(const VP9_COMMON *cm, ThreadData *const td) {
4340 unsigned int intra_count = 0, inter_count = 0;
4343 for (j = 0; j < INTRA_INTER_CONTEXTS; ++j) {
4344 intra_count += td->counts->intra_inter[j][0];
4345 inter_count += td->counts->intra_inter[j][1];
4348 return (intra_count << 2) < inter_count && cm->frame_type != KEY_FRAME &&
4352 void vp9_init_tile_data(VP9_COMP *cpi) {
4353 VP9_COMMON *const cm = &cpi->common;
4354 const int tile_cols = 1 << cm->log2_tile_cols;
4355 const int tile_rows = 1 << cm->log2_tile_rows;
4356 int tile_col, tile_row;
4357 TOKENEXTRA *pre_tok = cpi->tile_tok[0][0];
4358 TOKENLIST *tplist = cpi->tplist[0][0];
4360 int tplist_count = 0;
4362 if (cpi->tile_data == NULL || cpi->allocated_tiles < tile_cols * tile_rows) {
4363 if (cpi->tile_data != NULL) vpx_free(cpi->tile_data);
4364 CHECK_MEM_ERROR(cm, cpi->tile_data, vpx_malloc(tile_cols * tile_rows *
4365 sizeof(*cpi->tile_data)));
4366 cpi->allocated_tiles = tile_cols * tile_rows;
4368 for (tile_row = 0; tile_row < tile_rows; ++tile_row)
4369 for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
4370 TileDataEnc *tile_data =
4371 &cpi->tile_data[tile_row * tile_cols + tile_col];
4373 for (i = 0; i < BLOCK_SIZES; ++i) {
4374 for (j = 0; j < MAX_MODES; ++j) {
4375 tile_data->thresh_freq_fact[i][j] = RD_THRESH_INIT_FACT;
4376 tile_data->mode_map[i][j] = j;
4379 #if CONFIG_MULTITHREAD
4380 tile_data->row_base_thresh_freq_fact = NULL;
4385 for (tile_row = 0; tile_row < tile_rows; ++tile_row) {
4386 for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
4387 TileDataEnc *this_tile = &cpi->tile_data[tile_row * tile_cols + tile_col];
4388 TileInfo *tile_info = &this_tile->tile_info;
4389 vp9_tile_init(tile_info, cm, tile_row, tile_col);
4391 cpi->tile_tok[tile_row][tile_col] = pre_tok + tile_tok;
4392 pre_tok = cpi->tile_tok[tile_row][tile_col];
4393 tile_tok = allocated_tokens(*tile_info);
4395 cpi->tplist[tile_row][tile_col] = tplist + tplist_count;
4396 tplist = cpi->tplist[tile_row][tile_col];
4397 tplist_count = get_num_vert_units(*tile_info, MI_BLOCK_SIZE_LOG2);
4402 void vp9_encode_sb_row(VP9_COMP *cpi, ThreadData *td, int tile_row,
4403 int tile_col, int mi_row) {
4404 VP9_COMMON *const cm = &cpi->common;
4405 const int tile_cols = 1 << cm->log2_tile_cols;
4406 TileDataEnc *this_tile = &cpi->tile_data[tile_row * tile_cols + tile_col];
4407 const TileInfo *const tile_info = &this_tile->tile_info;
4408 TOKENEXTRA *tok = NULL;
4410 int tile_mb_cols = (tile_info->mi_col_end - tile_info->mi_col_start + 1) >> 1;
4412 tile_sb_row = mi_cols_aligned_to_sb(mi_row - tile_info->mi_row_start) >>
4414 get_start_tok(cpi, tile_row, tile_col, mi_row, &tok);
4415 cpi->tplist[tile_row][tile_col][tile_sb_row].start = tok;
4417 if (cpi->sf.use_nonrd_pick_mode)
4418 encode_nonrd_sb_row(cpi, td, this_tile, mi_row, &tok);
4420 encode_rd_sb_row(cpi, td, this_tile, mi_row, &tok);
4422 cpi->tplist[tile_row][tile_col][tile_sb_row].stop = tok;
4423 cpi->tplist[tile_row][tile_col][tile_sb_row].count =
4424 (unsigned int)(cpi->tplist[tile_row][tile_col][tile_sb_row].stop -
4425 cpi->tplist[tile_row][tile_col][tile_sb_row].start);
4426 assert(tok - cpi->tplist[tile_row][tile_col][tile_sb_row].start <=
4427 get_token_alloc(MI_BLOCK_SIZE >> 1, tile_mb_cols));
4432 void vp9_encode_tile(VP9_COMP *cpi, ThreadData *td, int tile_row,
4434 VP9_COMMON *const cm = &cpi->common;
4435 const int tile_cols = 1 << cm->log2_tile_cols;
4436 TileDataEnc *this_tile = &cpi->tile_data[tile_row * tile_cols + tile_col];
4437 const TileInfo *const tile_info = &this_tile->tile_info;
4438 const int mi_row_start = tile_info->mi_row_start;
4439 const int mi_row_end = tile_info->mi_row_end;
4442 for (mi_row = mi_row_start; mi_row < mi_row_end; mi_row += MI_BLOCK_SIZE)
4443 vp9_encode_sb_row(cpi, td, tile_row, tile_col, mi_row);
4446 static void encode_tiles(VP9_COMP *cpi) {
4447 VP9_COMMON *const cm = &cpi->common;
4448 const int tile_cols = 1 << cm->log2_tile_cols;
4449 const int tile_rows = 1 << cm->log2_tile_rows;
4450 int tile_col, tile_row;
4452 vp9_init_tile_data(cpi);
4454 for (tile_row = 0; tile_row < tile_rows; ++tile_row)
4455 for (tile_col = 0; tile_col < tile_cols; ++tile_col)
4456 vp9_encode_tile(cpi, &cpi->td, tile_row, tile_col);
4459 #if CONFIG_FP_MB_STATS
4460 static int input_fpmb_stats(FIRSTPASS_MB_STATS *firstpass_mb_stats,
4461 VP9_COMMON *cm, uint8_t **this_frame_mb_stats) {
4462 uint8_t *mb_stats_in = firstpass_mb_stats->mb_stats_start +
4463 cm->current_video_frame * cm->MBs * sizeof(uint8_t);
4465 if (mb_stats_in > firstpass_mb_stats->mb_stats_end) return EOF;
4467 *this_frame_mb_stats = mb_stats_in;
4473 static void encode_frame_internal(VP9_COMP *cpi) {
4474 SPEED_FEATURES *const sf = &cpi->sf;
4475 ThreadData *const td = &cpi->td;
4476 MACROBLOCK *const x = &td->mb;
4477 VP9_COMMON *const cm = &cpi->common;
4478 MACROBLOCKD *const xd = &x->e_mbd;
4480 xd->mi = cm->mi_grid_visible;
4483 vp9_zero(*td->counts);
4484 vp9_zero(cpi->td.rd_counts);
4486 xd->lossless = cm->base_qindex == 0 && cm->y_dc_delta_q == 0 &&
4487 cm->uv_dc_delta_q == 0 && cm->uv_ac_delta_q == 0;
4489 #if CONFIG_VP9_HIGHBITDEPTH
4490 if (cm->use_highbitdepth)
4491 x->fwd_txm4x4 = xd->lossless ? vp9_highbd_fwht4x4 : vpx_highbd_fdct4x4;
4493 x->fwd_txm4x4 = xd->lossless ? vp9_fwht4x4 : vpx_fdct4x4;
4494 x->highbd_itxm_add =
4495 xd->lossless ? vp9_highbd_iwht4x4_add : vp9_highbd_idct4x4_add;
4497 x->fwd_txm4x4 = xd->lossless ? vp9_fwht4x4 : vpx_fdct4x4;
4498 #endif // CONFIG_VP9_HIGHBITDEPTH
4499 x->itxm_add = xd->lossless ? vp9_iwht4x4_add : vp9_idct4x4_add;
4501 if (xd->lossless) x->optimize = 0;
4503 cm->tx_mode = select_tx_mode(cpi, xd);
4505 vp9_frame_init_quantizer(cpi);
4507 vp9_initialize_rd_consts(cpi);
4508 vp9_initialize_me_consts(cpi, x, cm->base_qindex);
4509 init_encode_frame_mb_context(cpi);
4510 cm->use_prev_frame_mvs =
4511 !cm->error_resilient_mode && cm->width == cm->last_width &&
4512 cm->height == cm->last_height && !cm->intra_only && cm->last_show_frame;
4513 // Special case: set prev_mi to NULL when the previous mode info
4514 // context cannot be used.
4516 cm->use_prev_frame_mvs ? cm->prev_mip + cm->mi_stride + 1 : NULL;
4518 x->quant_fp = cpi->sf.use_quant_fp;
4519 vp9_zero(x->skip_txfm);
4520 if (sf->use_nonrd_pick_mode) {
4521 // Initialize internal buffer pointers for rtc coding, where non-RD
4522 // mode decision is used and hence no buffer pointer swap needed.
4524 struct macroblock_plane *const p = x->plane;
4525 struct macroblockd_plane *const pd = xd->plane;
4526 PICK_MODE_CONTEXT *ctx = &cpi->td.pc_root->none;
4528 for (i = 0; i < MAX_MB_PLANE; ++i) {
4529 p[i].coeff = ctx->coeff_pbuf[i][0];
4530 p[i].qcoeff = ctx->qcoeff_pbuf[i][0];
4531 pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][0];
4532 p[i].eobs = ctx->eobs_pbuf[i][0];
4534 vp9_zero(x->zcoeff_blk);
4536 if (cm->frame_type != KEY_FRAME && cpi->rc.frames_since_golden == 0 &&
4537 !(cpi->oxcf.lag_in_frames > 0 && cpi->oxcf.rc_mode == VPX_VBR) &&
4539 cpi->ref_frame_flags &= (~VP9_GOLD_FLAG);
4541 if (sf->partition_search_type == SOURCE_VAR_BASED_PARTITION)
4542 source_var_based_partition_search_method(cpi);
4546 struct vpx_usec_timer emr_timer;
4547 vpx_usec_timer_start(&emr_timer);
4549 #if CONFIG_FP_MB_STATS
4550 if (cpi->use_fp_mb_stats) {
4551 input_fpmb_stats(&cpi->twopass.firstpass_mb_stats, cm,
4552 &cpi->twopass.this_frame_mb_stats);
4557 cpi->row_mt_sync_read_ptr = vp9_row_mt_sync_read_dummy;
4558 cpi->row_mt_sync_write_ptr = vp9_row_mt_sync_write_dummy;
4559 // If allowed, encoding tiles in parallel with one thread handling one
4560 // tile when row based multi-threading is disabled.
4561 if (VPXMIN(cpi->oxcf.max_threads, 1 << cm->log2_tile_cols) > 1)
4562 vp9_encode_tiles_mt(cpi);
4566 cpi->row_mt_sync_read_ptr = vp9_row_mt_sync_read;
4567 cpi->row_mt_sync_write_ptr = vp9_row_mt_sync_write;
4568 vp9_encode_tiles_row_mt(cpi);
4571 vpx_usec_timer_mark(&emr_timer);
4572 cpi->time_encode_sb_row += vpx_usec_timer_elapsed(&emr_timer);
4575 sf->skip_encode_frame =
4576 sf->skip_encode_sb ? get_skip_encode_frame(cm, td) : 0;
4579 // Keep record of the total distortion this time around for future use
4580 cpi->last_frame_distortion = cpi->frame_distortion;
4584 static INTERP_FILTER get_interp_filter(
4585 const int64_t threshes[SWITCHABLE_FILTER_CONTEXTS], int is_alt_ref) {
4586 if (!is_alt_ref && threshes[EIGHTTAP_SMOOTH] > threshes[EIGHTTAP] &&
4587 threshes[EIGHTTAP_SMOOTH] > threshes[EIGHTTAP_SHARP] &&
4588 threshes[EIGHTTAP_SMOOTH] > threshes[SWITCHABLE - 1]) {
4589 return EIGHTTAP_SMOOTH;
4590 } else if (threshes[EIGHTTAP_SHARP] > threshes[EIGHTTAP] &&
4591 threshes[EIGHTTAP_SHARP] > threshes[SWITCHABLE - 1]) {
4592 return EIGHTTAP_SHARP;
4593 } else if (threshes[EIGHTTAP] > threshes[SWITCHABLE - 1]) {
4600 static int compute_frame_aq_offset(struct VP9_COMP *cpi) {
4601 VP9_COMMON *const cm = &cpi->common;
4602 MODE_INFO **mi_8x8_ptr = cm->mi_grid_visible;
4603 struct segmentation *const seg = &cm->seg;
4611 for (mi_row = 0; mi_row < cm->mi_rows; mi_row++) {
4612 MODE_INFO **mi_8x8 = mi_8x8_ptr;
4613 for (mi_col = 0; mi_col < cm->mi_cols; mi_col++, mi_8x8++) {
4614 segment_id = mi_8x8[0]->segment_id;
4615 qdelta_index = get_segdata(seg, segment_id, SEG_LVL_ALT_Q);
4616 sum_delta += qdelta_index;
4619 mi_8x8_ptr += cm->mi_stride;
4622 return sum_delta / (cm->mi_rows * cm->mi_cols);
4625 void vp9_encode_frame(VP9_COMP *cpi) {
4626 VP9_COMMON *const cm = &cpi->common;
4628 // In the longer term the encoder should be generalized to match the
4629 // decoder such that we allow compound where one of the 3 buffers has a
4630 // different sign bias and that buffer is then the fixed ref. However, this
4631 // requires further work in the rd loop. For now the only supported encoder
4632 // side behavior is where the ALT ref buffer has opposite sign bias to
4634 if (!frame_is_intra_only(cm)) {
4635 if ((cm->ref_frame_sign_bias[ALTREF_FRAME] ==
4636 cm->ref_frame_sign_bias[GOLDEN_FRAME]) ||
4637 (cm->ref_frame_sign_bias[ALTREF_FRAME] ==
4638 cm->ref_frame_sign_bias[LAST_FRAME])) {
4639 cpi->allow_comp_inter_inter = 0;
4641 cpi->allow_comp_inter_inter = 1;
4642 cm->comp_fixed_ref = ALTREF_FRAME;
4643 cm->comp_var_ref[0] = LAST_FRAME;
4644 cm->comp_var_ref[1] = GOLDEN_FRAME;
4648 if (cpi->sf.frame_parameter_update) {
4650 RD_OPT *const rd_opt = &cpi->rd;
4651 FRAME_COUNTS *counts = cpi->td.counts;
4652 RD_COUNTS *const rdc = &cpi->td.rd_counts;
4654 // This code does a single RD pass over the whole frame assuming
4655 // either compound, single or hybrid prediction as per whatever has
4656 // worked best for that type of frame in the past.
4657 // It also predicts whether another coding mode would have worked
4658 // better than this coding mode. If that is the case, it remembers
4659 // that for subsequent frames.
4660 // It also does the same analysis for transform size selection.
4661 const MV_REFERENCE_FRAME frame_type = get_frame_type(cpi);
4662 int64_t *const mode_thrs = rd_opt->prediction_type_threshes[frame_type];
4663 int64_t *const filter_thrs = rd_opt->filter_threshes[frame_type];
4664 const int is_alt_ref = frame_type == ALTREF_FRAME;
4666 /* prediction (compound, single or hybrid) mode selection */
4667 if (is_alt_ref || !cpi->allow_comp_inter_inter)
4668 cm->reference_mode = SINGLE_REFERENCE;
4669 else if (mode_thrs[COMPOUND_REFERENCE] > mode_thrs[SINGLE_REFERENCE] &&
4670 mode_thrs[COMPOUND_REFERENCE] > mode_thrs[REFERENCE_MODE_SELECT] &&
4671 check_dual_ref_flags(cpi) && cpi->static_mb_pct == 100)
4672 cm->reference_mode = COMPOUND_REFERENCE;
4673 else if (mode_thrs[SINGLE_REFERENCE] > mode_thrs[REFERENCE_MODE_SELECT])
4674 cm->reference_mode = SINGLE_REFERENCE;
4676 cm->reference_mode = REFERENCE_MODE_SELECT;
4678 if (cm->interp_filter == SWITCHABLE)
4679 cm->interp_filter = get_interp_filter(filter_thrs, is_alt_ref);
4681 encode_frame_internal(cpi);
4683 for (i = 0; i < REFERENCE_MODES; ++i)
4684 mode_thrs[i] = (mode_thrs[i] + rdc->comp_pred_diff[i] / cm->MBs) / 2;
4686 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
4687 filter_thrs[i] = (filter_thrs[i] + rdc->filter_diff[i] / cm->MBs) / 2;
4689 if (cm->reference_mode == REFERENCE_MODE_SELECT) {
4690 int single_count_zero = 0;
4691 int comp_count_zero = 0;
4693 for (i = 0; i < COMP_INTER_CONTEXTS; i++) {
4694 single_count_zero += counts->comp_inter[i][0];
4695 comp_count_zero += counts->comp_inter[i][1];
4698 if (comp_count_zero == 0) {
4699 cm->reference_mode = SINGLE_REFERENCE;
4700 vp9_zero(counts->comp_inter);
4701 } else if (single_count_zero == 0) {
4702 cm->reference_mode = COMPOUND_REFERENCE;
4703 vp9_zero(counts->comp_inter);
4707 if (cm->tx_mode == TX_MODE_SELECT) {
4709 int count8x8_lp = 0, count8x8_8x8p = 0;
4710 int count16x16_16x16p = 0, count16x16_lp = 0;
4713 for (i = 0; i < TX_SIZE_CONTEXTS; ++i) {
4714 count4x4 += counts->tx.p32x32[i][TX_4X4];
4715 count4x4 += counts->tx.p16x16[i][TX_4X4];
4716 count4x4 += counts->tx.p8x8[i][TX_4X4];
4718 count8x8_lp += counts->tx.p32x32[i][TX_8X8];
4719 count8x8_lp += counts->tx.p16x16[i][TX_8X8];
4720 count8x8_8x8p += counts->tx.p8x8[i][TX_8X8];
4722 count16x16_16x16p += counts->tx.p16x16[i][TX_16X16];
4723 count16x16_lp += counts->tx.p32x32[i][TX_16X16];
4724 count32x32 += counts->tx.p32x32[i][TX_32X32];
4726 if (count4x4 == 0 && count16x16_lp == 0 && count16x16_16x16p == 0 &&
4728 cm->tx_mode = ALLOW_8X8;
4729 reset_skip_tx_size(cm, TX_8X8);
4730 } else if (count8x8_8x8p == 0 && count16x16_16x16p == 0 &&
4731 count8x8_lp == 0 && count16x16_lp == 0 && count32x32 == 0) {
4732 cm->tx_mode = ONLY_4X4;
4733 reset_skip_tx_size(cm, TX_4X4);
4734 } else if (count8x8_lp == 0 && count16x16_lp == 0 && count4x4 == 0) {
4735 cm->tx_mode = ALLOW_32X32;
4736 } else if (count32x32 == 0 && count8x8_lp == 0 && count4x4 == 0) {
4737 cm->tx_mode = ALLOW_16X16;
4738 reset_skip_tx_size(cm, TX_16X16);
4742 cm->reference_mode = SINGLE_REFERENCE;
4743 encode_frame_internal(cpi);
4746 // If segmented AQ is enabled compute the average AQ weighting.
4747 if (cm->seg.enabled && (cpi->oxcf.aq_mode != NO_AQ) &&
4748 (cm->seg.update_map || cm->seg.update_data)) {
4749 cm->seg.aq_av_offset = compute_frame_aq_offset(cpi);
4753 static void sum_intra_stats(FRAME_COUNTS *counts, const MODE_INFO *mi) {
4754 const PREDICTION_MODE y_mode = mi->mode;
4755 const PREDICTION_MODE uv_mode = mi->uv_mode;
4756 const BLOCK_SIZE bsize = mi->sb_type;
4758 if (bsize < BLOCK_8X8) {
4760 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
4761 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
4762 for (idy = 0; idy < 2; idy += num_4x4_h)
4763 for (idx = 0; idx < 2; idx += num_4x4_w)
4764 ++counts->y_mode[0][mi->bmi[idy * 2 + idx].as_mode];
4766 ++counts->y_mode[size_group_lookup[bsize]][y_mode];
4769 ++counts->uv_mode[y_mode][uv_mode];
4772 static void update_zeromv_cnt(VP9_COMP *const cpi, const MODE_INFO *const mi,
4773 int mi_row, int mi_col, BLOCK_SIZE bsize) {
4774 const VP9_COMMON *const cm = &cpi->common;
4775 MV mv = mi->mv[0].as_mv;
4776 const int bw = num_8x8_blocks_wide_lookup[bsize];
4777 const int bh = num_8x8_blocks_high_lookup[bsize];
4778 const int xmis = VPXMIN(cm->mi_cols - mi_col, bw);
4779 const int ymis = VPXMIN(cm->mi_rows - mi_row, bh);
4780 const int block_index = mi_row * cm->mi_cols + mi_col;
4782 for (y = 0; y < ymis; y++)
4783 for (x = 0; x < xmis; x++) {
4784 int map_offset = block_index + y * cm->mi_cols + x;
4785 if (is_inter_block(mi) && mi->segment_id <= CR_SEGMENT_ID_BOOST2) {
4786 if (abs(mv.row) < 8 && abs(mv.col) < 8) {
4787 if (cpi->consec_zero_mv[map_offset] < 255)
4788 cpi->consec_zero_mv[map_offset]++;
4790 cpi->consec_zero_mv[map_offset] = 0;
4796 static void encode_superblock(VP9_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
4797 int output_enabled, int mi_row, int mi_col,
4798 BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx) {
4799 VP9_COMMON *const cm = &cpi->common;
4800 MACROBLOCK *const x = &td->mb;
4801 MACROBLOCKD *const xd = &x->e_mbd;
4802 MODE_INFO *mi = xd->mi[0];
4803 const int seg_skip =
4804 segfeature_active(&cm->seg, mi->segment_id, SEG_LVL_SKIP);
4805 x->skip_recode = !x->select_tx_size && mi->sb_type >= BLOCK_8X8 &&
4806 cpi->oxcf.aq_mode != COMPLEXITY_AQ &&
4807 cpi->oxcf.aq_mode != CYCLIC_REFRESH_AQ &&
4808 cpi->sf.allow_skip_recode;
4810 if (!x->skip_recode && !cpi->sf.use_nonrd_pick_mode)
4811 memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
4813 x->skip_optimize = ctx->is_coded;
4815 x->use_lp32x32fdct = cpi->sf.use_lp32x32fdct;
4816 x->skip_encode = (!output_enabled && cpi->sf.skip_encode_frame &&
4817 x->q_index < QIDX_SKIP_THRESH);
4819 if (x->skip_encode) return;
4821 if (!is_inter_block(mi)) {
4823 #if CONFIG_BETTER_HW_COMPATIBILITY && CONFIG_VP9_HIGHBITDEPTH
4824 if ((xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) &&
4825 (xd->above_mi == NULL || xd->left_mi == NULL) &&
4826 need_top_left[mi->uv_mode])
4828 #endif // CONFIG_BETTER_HW_COMPATIBILITY && CONFIG_VP9_HIGHBITDEPTH
4830 for (plane = 0; plane < MAX_MB_PLANE; ++plane)
4831 vp9_encode_intra_block_plane(x, VPXMAX(bsize, BLOCK_8X8), plane, 1);
4832 if (output_enabled) sum_intra_stats(td->counts, mi);
4833 vp9_tokenize_sb(cpi, td, t, !output_enabled, seg_skip,
4834 VPXMAX(bsize, BLOCK_8X8));
4837 const int is_compound = has_second_ref(mi);
4838 set_ref_ptrs(cm, xd, mi->ref_frame[0], mi->ref_frame[1]);
4839 for (ref = 0; ref < 1 + is_compound; ++ref) {
4840 YV12_BUFFER_CONFIG *cfg = get_ref_frame_buffer(cpi, mi->ref_frame[ref]);
4841 assert(cfg != NULL);
4842 vp9_setup_pre_planes(xd, ref, cfg, mi_row, mi_col,
4843 &xd->block_refs[ref]->sf);
4845 if (!(cpi->sf.reuse_inter_pred_sby && ctx->pred_pixel_ready) || seg_skip)
4846 vp9_build_inter_predictors_sby(xd, mi_row, mi_col,
4847 VPXMAX(bsize, BLOCK_8X8));
4849 vp9_build_inter_predictors_sbuv(xd, mi_row, mi_col,
4850 VPXMAX(bsize, BLOCK_8X8));
4852 vp9_encode_sb(x, VPXMAX(bsize, BLOCK_8X8));
4853 vp9_tokenize_sb(cpi, td, t, !output_enabled, seg_skip,
4854 VPXMAX(bsize, BLOCK_8X8));
4861 if (output_enabled) {
4862 if (cm->tx_mode == TX_MODE_SELECT && mi->sb_type >= BLOCK_8X8 &&
4863 !(is_inter_block(mi) && mi->skip)) {
4864 ++get_tx_counts(max_txsize_lookup[bsize], get_tx_size_context(xd),
4865 &td->counts->tx)[mi->tx_size];
4867 // The new intra coding scheme requires no change of transform size
4868 if (is_inter_block(mi)) {
4869 mi->tx_size = VPXMIN(tx_mode_to_biggest_tx_size[cm->tx_mode],
4870 max_txsize_lookup[bsize]);
4872 mi->tx_size = (bsize >= BLOCK_8X8) ? mi->tx_size : TX_4X4;
4876 ++td->counts->tx.tx_totals[mi->tx_size];
4877 ++td->counts->tx.tx_totals[get_uv_tx_size(mi, &xd->plane[1])];
4878 if (cm->seg.enabled && cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ)
4879 vp9_cyclic_refresh_update_sb_postencode(cpi, mi, mi_row, mi_col, bsize);
4880 if (cpi->oxcf.pass == 0 && cpi->svc.temporal_layer_id == 0)
4881 update_zeromv_cnt(cpi, mi, mi_row, mi_col, bsize);