2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
15 #include "./vp9_rtcd.h"
16 #include "./vpx_config.h"
18 #include "vpx_ports/vpx_timer.h"
20 #include "vp9/common/vp9_common.h"
21 #include "vp9/common/vp9_entropy.h"
22 #include "vp9/common/vp9_entropymode.h"
23 #include "vp9/common/vp9_idct.h"
24 #include "vp9/common/vp9_mvref_common.h"
25 #include "vp9/common/vp9_pred_common.h"
26 #include "vp9/common/vp9_quant_common.h"
27 #include "vp9/common/vp9_reconintra.h"
28 #include "vp9/common/vp9_reconinter.h"
29 #include "vp9/common/vp9_seg_common.h"
30 #include "vp9/common/vp9_systemdependent.h"
31 #include "vp9/common/vp9_tile_common.h"
33 #include "vp9/encoder/vp9_aq_complexity.h"
34 #include "vp9/encoder/vp9_aq_cyclicrefresh.h"
35 #include "vp9/encoder/vp9_aq_variance.h"
36 #include "vp9/encoder/vp9_encodeframe.h"
37 #include "vp9/encoder/vp9_encodemb.h"
38 #include "vp9/encoder/vp9_encodemv.h"
39 #include "vp9/encoder/vp9_ethread.h"
40 #include "vp9/encoder/vp9_extend.h"
41 #include "vp9/encoder/vp9_pickmode.h"
42 #include "vp9/encoder/vp9_rd.h"
43 #include "vp9/encoder/vp9_rdopt.h"
44 #include "vp9/encoder/vp9_segmentation.h"
45 #include "vp9/encoder/vp9_tokenize.h"
47 static void encode_superblock(VP9_COMP *cpi, ThreadData * td,
48 TOKENEXTRA **t, int output_enabled,
49 int mi_row, int mi_col, BLOCK_SIZE bsize,
50 PICK_MODE_CONTEXT *ctx);
52 // This is used as a reference when computing the source variance for the
53 // purposes of activity masking.
54 // Eventually this should be replaced by custom no-reference routines,
55 // which will be faster.
56 static const uint8_t VP9_VAR_OFFS[64] = {
57 128, 128, 128, 128, 128, 128, 128, 128,
58 128, 128, 128, 128, 128, 128, 128, 128,
59 128, 128, 128, 128, 128, 128, 128, 128,
60 128, 128, 128, 128, 128, 128, 128, 128,
61 128, 128, 128, 128, 128, 128, 128, 128,
62 128, 128, 128, 128, 128, 128, 128, 128,
63 128, 128, 128, 128, 128, 128, 128, 128,
64 128, 128, 128, 128, 128, 128, 128, 128
67 #if CONFIG_VP9_HIGHBITDEPTH
68 static const uint16_t VP9_HIGH_VAR_OFFS_8[64] = {
69 128, 128, 128, 128, 128, 128, 128, 128,
70 128, 128, 128, 128, 128, 128, 128, 128,
71 128, 128, 128, 128, 128, 128, 128, 128,
72 128, 128, 128, 128, 128, 128, 128, 128,
73 128, 128, 128, 128, 128, 128, 128, 128,
74 128, 128, 128, 128, 128, 128, 128, 128,
75 128, 128, 128, 128, 128, 128, 128, 128,
76 128, 128, 128, 128, 128, 128, 128, 128
79 static const uint16_t VP9_HIGH_VAR_OFFS_10[64] = {
80 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
81 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
82 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
83 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
84 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
85 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
86 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
87 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4
90 static const uint16_t VP9_HIGH_VAR_OFFS_12[64] = {
91 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
92 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
93 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
94 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
95 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
96 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
97 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
98 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16
100 #endif // CONFIG_VP9_HIGHBITDEPTH
102 unsigned int vp9_get_sby_perpixel_variance(VP9_COMP *cpi,
103 const struct buf_2d *ref,
106 const unsigned int var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
107 VP9_VAR_OFFS, 0, &sse);
108 return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
111 #if CONFIG_VP9_HIGHBITDEPTH
112 unsigned int vp9_high_get_sby_perpixel_variance(
113 VP9_COMP *cpi, const struct buf_2d *ref, BLOCK_SIZE bs, int bd) {
114 unsigned int var, sse;
117 var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
118 CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_10),
122 var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
123 CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_12),
128 var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
129 CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_8),
133 return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
135 #endif // CONFIG_VP9_HIGHBITDEPTH
137 static unsigned int get_sby_perpixel_diff_variance(VP9_COMP *cpi,
138 const struct buf_2d *ref,
139 int mi_row, int mi_col,
141 unsigned int sse, var;
143 const YV12_BUFFER_CONFIG *last = get_ref_frame_buffer(cpi, LAST_FRAME);
145 assert(last != NULL);
147 &last->y_buffer[mi_row * MI_SIZE * last->y_stride + mi_col * MI_SIZE];
148 var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride, last_y, last->y_stride, &sse);
149 return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
152 static BLOCK_SIZE get_rd_var_based_fixed_partition(VP9_COMP *cpi, MACROBLOCK *x,
155 unsigned int var = get_sby_perpixel_diff_variance(cpi, &x->plane[0].src,
168 static BLOCK_SIZE get_nonrd_var_based_fixed_partition(VP9_COMP *cpi,
172 unsigned int var = get_sby_perpixel_diff_variance(cpi, &x->plane[0].src,
183 // Lighter version of set_offsets that only sets the mode info
185 static INLINE void set_mode_info_offsets(VP9_COMMON *const cm,
186 MACROBLOCKD *const xd,
189 const int idx_str = xd->mi_stride * mi_row + mi_col;
190 xd->mi = cm->mi + idx_str;
191 xd->mi[0].src_mi = &xd->mi[0];
194 static void set_offsets(VP9_COMP *cpi, const TileInfo *const tile,
195 MACROBLOCK *const x, int mi_row, int mi_col,
197 VP9_COMMON *const cm = &cpi->common;
198 MACROBLOCKD *const xd = &x->e_mbd;
200 const int mi_width = num_8x8_blocks_wide_lookup[bsize];
201 const int mi_height = num_8x8_blocks_high_lookup[bsize];
202 const struct segmentation *const seg = &cm->seg;
204 set_skip_context(xd, mi_row, mi_col);
206 set_mode_info_offsets(cm, xd, mi_row, mi_col);
208 mbmi = &xd->mi[0].src_mi->mbmi;
210 // Set up destination pointers.
211 vp9_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
213 // Set up limit values for MV components.
214 // Mv beyond the range do not produce new/different prediction block.
215 x->mv_row_min = -(((mi_row + mi_height) * MI_SIZE) + VP9_INTERP_EXTEND);
216 x->mv_col_min = -(((mi_col + mi_width) * MI_SIZE) + VP9_INTERP_EXTEND);
217 x->mv_row_max = (cm->mi_rows - mi_row) * MI_SIZE + VP9_INTERP_EXTEND;
218 x->mv_col_max = (cm->mi_cols - mi_col) * MI_SIZE + VP9_INTERP_EXTEND;
220 // Set up distance of MB to edge of frame in 1/8th pel units.
221 assert(!(mi_col & (mi_width - 1)) && !(mi_row & (mi_height - 1)));
222 set_mi_row_col(xd, tile, mi_row, mi_height, mi_col, mi_width,
223 cm->mi_rows, cm->mi_cols);
225 // Set up source buffers.
226 vp9_setup_src_planes(x, cpi->Source, mi_row, mi_col);
229 x->rddiv = cpi->rd.RDDIV;
230 x->rdmult = cpi->rd.RDMULT;
234 if (cpi->oxcf.aq_mode != VARIANCE_AQ) {
235 const uint8_t *const map = seg->update_map ? cpi->segmentation_map
236 : cm->last_frame_seg_map;
237 mbmi->segment_id = vp9_get_segment_id(cm, map, bsize, mi_row, mi_col);
239 vp9_init_plane_quantizers(cpi, x);
241 x->encode_breakout = cpi->segment_encode_breakout[mbmi->segment_id];
243 mbmi->segment_id = 0;
244 x->encode_breakout = cpi->encode_breakout;
248 static void duplicate_mode_info_in_sb(VP9_COMMON *cm, MACROBLOCKD *xd,
249 int mi_row, int mi_col,
251 const int block_width = num_8x8_blocks_wide_lookup[bsize];
252 const int block_height = num_8x8_blocks_high_lookup[bsize];
254 for (j = 0; j < block_height; ++j)
255 for (i = 0; i < block_width; ++i) {
256 if (mi_row + j < cm->mi_rows && mi_col + i < cm->mi_cols)
257 xd->mi[j * xd->mi_stride + i].src_mi = &xd->mi[0];
261 static void set_block_size(VP9_COMP * const cpi,
262 MACROBLOCKD *const xd,
263 int mi_row, int mi_col,
265 if (cpi->common.mi_cols > mi_col && cpi->common.mi_rows > mi_row) {
266 set_mode_info_offsets(&cpi->common, xd, mi_row, mi_col);
267 xd->mi[0].src_mi->mbmi.sb_type = bsize;
272 int64_t sum_square_error;
282 } partition_variance;
285 partition_variance part_variances;
290 partition_variance part_variances;
295 partition_variance part_variances;
300 partition_variance part_variances;
305 partition_variance part_variances;
310 partition_variance *part_variances;
320 static void tree_to_node(void *data, BLOCK_SIZE bsize, variance_node *node) {
322 node->part_variances = NULL;
325 v64x64 *vt = (v64x64 *) data;
326 node->part_variances = &vt->part_variances;
327 for (i = 0; i < 4; i++)
328 node->split[i] = &vt->split[i].part_variances.none;
332 v32x32 *vt = (v32x32 *) data;
333 node->part_variances = &vt->part_variances;
334 for (i = 0; i < 4; i++)
335 node->split[i] = &vt->split[i].part_variances.none;
339 v16x16 *vt = (v16x16 *) data;
340 node->part_variances = &vt->part_variances;
341 for (i = 0; i < 4; i++)
342 node->split[i] = &vt->split[i].part_variances.none;
346 v8x8 *vt = (v8x8 *) data;
347 node->part_variances = &vt->part_variances;
348 for (i = 0; i < 4; i++)
349 node->split[i] = &vt->split[i].part_variances.none;
353 v4x4 *vt = (v4x4 *) data;
354 node->part_variances = &vt->part_variances;
355 for (i = 0; i < 4; i++)
356 node->split[i] = &vt->split[i];
366 // Set variance values given sum square error, sum error, count.
367 static void fill_variance(int64_t s2, int64_t s, int c, var *v) {
368 v->sum_square_error = s2;
373 static void get_variance(var *v) {
374 v->variance = (int)(256 * (v->sum_square_error -
375 ((v->sum_error * v->sum_error) >> v->log2_count)) >> v->log2_count);
378 void sum_2_variances(const var *a, const var *b, var *r) {
379 assert(a->log2_count == b->log2_count);
380 fill_variance(a->sum_square_error + b->sum_square_error,
381 a->sum_error + b->sum_error, a->log2_count + 1, r);
384 static void fill_variance_tree(void *data, BLOCK_SIZE bsize) {
386 tree_to_node(data, bsize, &node);
387 sum_2_variances(node.split[0], node.split[1], &node.part_variances->horz[0]);
388 sum_2_variances(node.split[2], node.split[3], &node.part_variances->horz[1]);
389 sum_2_variances(node.split[0], node.split[2], &node.part_variances->vert[0]);
390 sum_2_variances(node.split[1], node.split[3], &node.part_variances->vert[1]);
391 sum_2_variances(&node.part_variances->vert[0], &node.part_variances->vert[1],
392 &node.part_variances->none);
395 static int set_vt_partitioning(VP9_COMP *cpi,
396 MACROBLOCKD *const xd,
402 BLOCK_SIZE bsize_min,
404 VP9_COMMON * const cm = &cpi->common;
406 const int block_width = num_8x8_blocks_wide_lookup[bsize];
407 const int block_height = num_8x8_blocks_high_lookup[bsize];
409 assert(block_height == block_width);
410 tree_to_node(data, bsize, &vt);
415 // For bsize=bsize_min (16x16/8x8 for 8x8/4x4 downsampling), select if
416 // variance is below threshold, otherwise split will be selected.
417 // No check for vert/horiz split as too few samples for variance.
418 if (bsize == bsize_min) {
419 get_variance(&vt.part_variances->none);
420 if (mi_col + block_width / 2 < cm->mi_cols &&
421 mi_row + block_height / 2 < cm->mi_rows &&
422 vt.part_variances->none.variance < threshold) {
423 set_block_size(cpi, xd, mi_row, mi_col, bsize);
427 } else if (bsize > bsize_min) {
428 // Variance is already computed for 32x32 blocks to set the force_split.
429 if (bsize != BLOCK_32X32)
430 get_variance(&vt.part_variances->none);
431 // For key frame or low_res: for bsize above 32X32 or very high variance,
433 if (cm->frame_type == KEY_FRAME &&
434 (bsize > BLOCK_32X32 ||
435 vt.part_variances->none.variance > (threshold << 4))) {
438 // If variance is low, take the bsize (no split).
439 if (mi_col + block_width / 2 < cm->mi_cols &&
440 mi_row + block_height / 2 < cm->mi_rows &&
441 vt.part_variances->none.variance < threshold) {
442 set_block_size(cpi, xd, mi_row, mi_col, bsize);
446 // Check vertical split.
447 if (mi_row + block_height / 2 < cm->mi_rows) {
448 get_variance(&vt.part_variances->vert[0]);
449 get_variance(&vt.part_variances->vert[1]);
450 if (vt.part_variances->vert[0].variance < threshold &&
451 vt.part_variances->vert[1].variance < threshold) {
452 BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_VERT);
453 set_block_size(cpi, xd, mi_row, mi_col, subsize);
454 set_block_size(cpi, xd, mi_row, mi_col + block_width / 2, subsize);
458 // Check horizontal split.
459 if (mi_col + block_width / 2 < cm->mi_cols) {
460 get_variance(&vt.part_variances->horz[0]);
461 get_variance(&vt.part_variances->horz[1]);
462 if (vt.part_variances->horz[0].variance < threshold &&
463 vt.part_variances->horz[1].variance < threshold) {
464 BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_HORZ);
465 set_block_size(cpi, xd, mi_row, mi_col, subsize);
466 set_block_size(cpi, xd, mi_row + block_height / 2, mi_col, subsize);
477 void vp9_set_vbp_thresholds(VP9_COMP *cpi, int64_t thresholds[], int q) {
478 SPEED_FEATURES *const sf = &cpi->sf;
479 if (sf->partition_search_type != VAR_BASED_PARTITION &&
480 sf->partition_search_type != REFERENCE_PARTITION) {
483 VP9_COMMON *const cm = &cpi->common;
484 const int is_key_frame = (cm->frame_type == KEY_FRAME);
485 const int threshold_multiplier = is_key_frame ? 20 : 1;
486 const int64_t threshold_base = (int64_t)(threshold_multiplier *
487 cpi->y_dequant[q][1]);
489 // TODO(marpan): Allow 4x4 partitions for inter-frames.
490 // use_4x4_partition = (variance4x4downsample[i2 + j] == 1);
491 // If 4x4 partition is not used, then 8x8 partition will be selected
492 // if variance of 16x16 block is very high, so use larger threshold
493 // for 16x16 (threshold_bsize_min) in that case.
495 // Array index: 0 - threshold_64x64; 1 - threshold_32x32;
496 // 2 - threshold_16x16; 3 - vbp_threshold_8x8;
498 thresholds[0] = threshold_base;
499 thresholds[1] = threshold_base >> 2;
500 thresholds[2] = threshold_base >> 2;
501 thresholds[3] = threshold_base << 2;
502 cpi->vbp_bsize_min = BLOCK_8X8;
504 thresholds[1] = threshold_base;
505 if (cm->width <= 352 && cm->height <= 288) {
506 thresholds[0] = threshold_base >> 2;
507 thresholds[2] = threshold_base << 3;
509 thresholds[0] = threshold_base;
510 thresholds[2] = threshold_base << cpi->oxcf.speed;
512 cpi->vbp_bsize_min = BLOCK_16X16;
517 static void fill_variance_4x4avg(const uint8_t *s, int sp, const uint8_t *d,
518 int dp, int x8_idx, int y8_idx, v8x8 *vst,
519 #if CONFIG_VP9_HIGHBITDEPTH
526 for (k = 0; k < 4; k++) {
527 int x4_idx = x8_idx + ((k & 1) << 2);
528 int y4_idx = y8_idx + ((k >> 1) << 2);
529 unsigned int sse = 0;
531 if (x4_idx < pixels_wide && y4_idx < pixels_high) {
534 #if CONFIG_VP9_HIGHBITDEPTH
535 if (highbd_flag & YV12_FLAG_HIGHBITDEPTH) {
536 s_avg = vp9_highbd_avg_4x4(s + y4_idx * sp + x4_idx, sp);
538 d_avg = vp9_highbd_avg_4x4(d + y4_idx * dp + x4_idx, dp);
540 s_avg = vp9_avg_4x4(s + y4_idx * sp + x4_idx, sp);
542 d_avg = vp9_avg_4x4(d + y4_idx * dp + x4_idx, dp);
545 s_avg = vp9_avg_4x4(s + y4_idx * sp + x4_idx, sp);
547 d_avg = vp9_avg_4x4(d + y4_idx * dp + x4_idx, dp);
552 fill_variance(sse, sum, 0, &vst->split[k].part_variances.none);
556 static void fill_variance_8x8avg(const uint8_t *s, int sp, const uint8_t *d,
557 int dp, int x16_idx, int y16_idx, v16x16 *vst,
558 #if CONFIG_VP9_HIGHBITDEPTH
565 for (k = 0; k < 4; k++) {
566 int x8_idx = x16_idx + ((k & 1) << 3);
567 int y8_idx = y16_idx + ((k >> 1) << 3);
568 unsigned int sse = 0;
570 if (x8_idx < pixels_wide && y8_idx < pixels_high) {
573 #if CONFIG_VP9_HIGHBITDEPTH
574 if (highbd_flag & YV12_FLAG_HIGHBITDEPTH) {
575 s_avg = vp9_highbd_avg_8x8(s + y8_idx * sp + x8_idx, sp);
577 d_avg = vp9_highbd_avg_8x8(d + y8_idx * dp + x8_idx, dp);
579 s_avg = vp9_avg_8x8(s + y8_idx * sp + x8_idx, sp);
581 d_avg = vp9_avg_8x8(d + y8_idx * dp + x8_idx, dp);
584 s_avg = vp9_avg_8x8(s + y8_idx * sp + x8_idx, sp);
586 d_avg = vp9_avg_8x8(d + y8_idx * dp + x8_idx, dp);
591 fill_variance(sse, sum, 0, &vst->split[k].part_variances.none);
595 // This function chooses partitioning based on the variance between source and
596 // reconstructed last, where variance is computed for down-sampled inputs.
597 static void choose_partitioning(VP9_COMP *cpi,
598 const TileInfo *const tile,
600 int mi_row, int mi_col) {
601 VP9_COMMON * const cm = &cpi->common;
602 MACROBLOCKD *xd = &x->e_mbd;
611 int pixels_wide = 64, pixels_high = 64;
612 int64_t thresholds[4] = {cpi->vbp_thresholds[0], cpi->vbp_thresholds[1],
613 cpi->vbp_thresholds[2], cpi->vbp_thresholds[3]};
615 // Always use 4x4 partition for key frame.
616 const int is_key_frame = (cm->frame_type == KEY_FRAME);
617 const int use_4x4_partition = is_key_frame;
618 const int low_res = (cm->width <= 352 && cm->height <= 288);
619 int variance4x4downsample[16];
621 int segment_id = CR_SEGMENT_ID_BASE;
622 if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled) {
623 const uint8_t *const map = cm->seg.update_map ? cpi->segmentation_map :
624 cm->last_frame_seg_map;
625 segment_id = vp9_get_segment_id(cm, map, BLOCK_64X64, mi_row, mi_col);
627 if (cyclic_refresh_segment_id_boosted(segment_id)) {
628 int q = vp9_get_qindex(&cm->seg, segment_id, cm->base_qindex);
629 vp9_set_vbp_thresholds(cpi, thresholds, q);
633 set_offsets(cpi, tile, x, mi_row, mi_col, BLOCK_64X64);
635 if (xd->mb_to_right_edge < 0)
636 pixels_wide += (xd->mb_to_right_edge >> 3);
637 if (xd->mb_to_bottom_edge < 0)
638 pixels_high += (xd->mb_to_bottom_edge >> 3);
640 s = x->plane[0].src.buf;
641 sp = x->plane[0].src.stride;
644 MB_MODE_INFO *mbmi = &xd->mi[0].src_mi->mbmi;
646 const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, LAST_FRAME);
648 const YV12_BUFFER_CONFIG *yv12_g = get_ref_frame_buffer(cpi, GOLDEN_FRAME);
649 unsigned int y_sad, y_sad_g;
650 const BLOCK_SIZE bsize = BLOCK_32X32
651 + (mi_col + 4 < cm->mi_cols) * 2 + (mi_row + 4 < cm->mi_rows);
653 assert(yv12 != NULL);
654 if (yv12_g && yv12_g != yv12) {
655 vp9_setup_pre_planes(xd, 0, yv12_g, mi_row, mi_col,
656 &cm->frame_refs[GOLDEN_FRAME - 1].sf);
657 y_sad_g = cpi->fn_ptr[bsize].sdf(x->plane[0].src.buf,
658 x->plane[0].src.stride,
659 xd->plane[0].pre[0].buf,
660 xd->plane[0].pre[0].stride);
665 vp9_setup_pre_planes(xd, 0, yv12, mi_row, mi_col,
666 &cm->frame_refs[LAST_FRAME - 1].sf);
667 mbmi->ref_frame[0] = LAST_FRAME;
668 mbmi->ref_frame[1] = NONE;
669 mbmi->sb_type = BLOCK_64X64;
670 mbmi->mv[0].as_int = 0;
671 mbmi->interp_filter = BILINEAR;
673 y_sad = vp9_int_pro_motion_estimation(cpi, x, bsize);
674 if (y_sad_g < y_sad) {
675 vp9_setup_pre_planes(xd, 0, yv12_g, mi_row, mi_col,
676 &cm->frame_refs[GOLDEN_FRAME - 1].sf);
677 mbmi->ref_frame[0] = GOLDEN_FRAME;
678 mbmi->mv[0].as_int = 0;
681 x->pred_mv[LAST_FRAME] = mbmi->mv[0].as_mv;
684 vp9_build_inter_predictors_sb(xd, mi_row, mi_col, BLOCK_64X64);
686 for (i = 1; i <= 2; ++i) {
687 struct macroblock_plane *p = &x->plane[i];
688 struct macroblockd_plane *pd = &xd->plane[i];
689 const BLOCK_SIZE bs = get_plane_block_size(bsize, pd);
691 if (bs == BLOCK_INVALID)
694 uv_sad = cpi->fn_ptr[bs].sdf(p->src.buf, p->src.stride,
695 pd->dst.buf, pd->dst.stride);
697 x->color_sensitivity[i - 1] = uv_sad > (y_sad >> 2);
700 d = xd->plane[0].dst.buf;
701 dp = xd->plane[0].dst.stride;
705 #if CONFIG_VP9_HIGHBITDEPTH
706 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
709 d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_10);
712 d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_12);
716 d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_8);
720 #endif // CONFIG_VP9_HIGHBITDEPTH
723 // Index for force_split: 0 for 64x64, 1-4 for 32x32 blocks,
725 // Fill in the entire tree of 8x8 (or 4x4 under some conditions) variances
727 for (i = 0; i < 4; i++) {
728 const int x32_idx = ((i & 1) << 5);
729 const int y32_idx = ((i >> 1) << 5);
730 const int i2 = i << 2;
731 force_split[i + 1] = 0;
732 for (j = 0; j < 4; j++) {
733 const int x16_idx = x32_idx + ((j & 1) << 4);
734 const int y16_idx = y32_idx + ((j >> 1) << 4);
735 v16x16 *vst = &vt.split[i].split[j];
736 variance4x4downsample[i2 + j] = 0;
738 fill_variance_8x8avg(s, sp, d, dp, x16_idx, y16_idx, vst,
739 #if CONFIG_VP9_HIGHBITDEPTH
745 fill_variance_tree(&vt.split[i].split[j], BLOCK_16X16);
746 // For low-resolution, compute the variance based on 8x8 down-sampling,
747 // and if it is large (above the threshold) we go down for 4x4.
748 // For key frame we always go down to 4x4.
750 get_variance(&vt.split[i].split[j].part_variances.none);
752 if (is_key_frame || (low_res &&
753 vt.split[i].split[j].part_variances.none.variance >
754 (thresholds[1] << 1))) {
755 // Go down to 4x4 down-sampling for variance.
756 variance4x4downsample[i2 + j] = 1;
757 for (k = 0; k < 4; k++) {
758 int x8_idx = x16_idx + ((k & 1) << 3);
759 int y8_idx = y16_idx + ((k >> 1) << 3);
760 v8x8 *vst2 = is_key_frame ? &vst->split[k] :
761 &vt2[i2 + j].split[k];
762 fill_variance_4x4avg(s, sp, d, dp, x8_idx, y8_idx, vst2,
763 #if CONFIG_VP9_HIGHBITDEPTH
774 // Fill the rest of the variance tree by summing split partition values.
775 for (i = 0; i < 4; i++) {
776 const int i2 = i << 2;
777 for (j = 0; j < 4; j++) {
778 if (variance4x4downsample[i2 + j] == 1) {
779 v16x16 *vtemp = (!is_key_frame) ? &vt2[i2 + j] :
780 &vt.split[i].split[j];
781 for (m = 0; m < 4; m++)
782 fill_variance_tree(&vtemp->split[m], BLOCK_8X8);
783 fill_variance_tree(vtemp, BLOCK_16X16);
786 fill_variance_tree(&vt.split[i], BLOCK_32X32);
787 // If variance of this 32x32 block is above the threshold, force the block
788 // to split. This also forces a split on the upper (64x64) level.
789 get_variance(&vt.split[i].part_variances.none);
790 if (vt.split[i].part_variances.none.variance > thresholds[1]) {
791 force_split[i + 1] = 1;
796 fill_variance_tree(&vt, BLOCK_64X64);
798 // Now go through the entire structure, splitting every block size until
799 // we get to one that's got a variance lower than our threshold.
800 if ( mi_col + 8 > cm->mi_cols || mi_row + 8 > cm->mi_rows ||
801 !set_vt_partitioning(cpi, xd, &vt, BLOCK_64X64, mi_row, mi_col,
802 thresholds[0], BLOCK_16X16, force_split[0])) {
803 for (i = 0; i < 4; ++i) {
804 const int x32_idx = ((i & 1) << 2);
805 const int y32_idx = ((i >> 1) << 2);
806 const int i2 = i << 2;
807 if (!set_vt_partitioning(cpi, xd, &vt.split[i], BLOCK_32X32,
808 (mi_row + y32_idx), (mi_col + x32_idx),
809 thresholds[1], BLOCK_16X16,
810 force_split[i + 1])) {
811 for (j = 0; j < 4; ++j) {
812 const int x16_idx = ((j & 1) << 1);
813 const int y16_idx = ((j >> 1) << 1);
814 // For inter frames: if variance4x4downsample[] == 1 for this 16x16
815 // block, then the variance is based on 4x4 down-sampling, so use vt2
816 // in set_vt_partioning(), otherwise use vt.
817 v16x16 *vtemp = (!is_key_frame &&
818 variance4x4downsample[i2 + j] == 1) ?
819 &vt2[i2 + j] : &vt.split[i].split[j];
820 if (!set_vt_partitioning(cpi, xd, vtemp, BLOCK_16X16,
821 mi_row + y32_idx + y16_idx,
822 mi_col + x32_idx + x16_idx,
823 thresholds[2], cpi->vbp_bsize_min, 0)) {
824 for (k = 0; k < 4; ++k) {
825 const int x8_idx = (k & 1);
826 const int y8_idx = (k >> 1);
827 if (use_4x4_partition) {
828 if (!set_vt_partitioning(cpi, xd, &vtemp->split[k],
830 mi_row + y32_idx + y16_idx + y8_idx,
831 mi_col + x32_idx + x16_idx + x8_idx,
832 thresholds[3], BLOCK_8X8, 0)) {
833 set_block_size(cpi, xd,
834 (mi_row + y32_idx + y16_idx + y8_idx),
835 (mi_col + x32_idx + x16_idx + x8_idx),
839 set_block_size(cpi, xd,
840 (mi_row + y32_idx + y16_idx + y8_idx),
841 (mi_col + x32_idx + x16_idx + x8_idx),
852 static void update_state(VP9_COMP *cpi, ThreadData *td,
853 PICK_MODE_CONTEXT *ctx,
854 int mi_row, int mi_col, BLOCK_SIZE bsize,
855 int output_enabled) {
857 VP9_COMMON *const cm = &cpi->common;
858 RD_COUNTS *const rdc = &td->rd_counts;
859 MACROBLOCK *const x = &td->mb;
860 MACROBLOCKD *const xd = &x->e_mbd;
861 struct macroblock_plane *const p = x->plane;
862 struct macroblockd_plane *const pd = xd->plane;
863 MODE_INFO *mi = &ctx->mic;
864 MB_MODE_INFO *const mbmi = &xd->mi[0].src_mi->mbmi;
865 MODE_INFO *mi_addr = &xd->mi[0];
866 const struct segmentation *const seg = &cm->seg;
867 const int bw = num_8x8_blocks_wide_lookup[mi->mbmi.sb_type];
868 const int bh = num_8x8_blocks_high_lookup[mi->mbmi.sb_type];
869 const int x_mis = MIN(bw, cm->mi_cols - mi_col);
870 const int y_mis = MIN(bh, cm->mi_rows - mi_row);
871 MV_REF *const frame_mvs =
872 cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col;
875 const int mis = cm->mi_stride;
876 const int mi_width = num_8x8_blocks_wide_lookup[bsize];
877 const int mi_height = num_8x8_blocks_high_lookup[bsize];
880 assert(mi->mbmi.sb_type == bsize);
883 mi_addr->src_mi = mi_addr;
885 // If segmentation in use
887 // For in frame complexity AQ copy the segment id from the segment map.
888 if (cpi->oxcf.aq_mode == COMPLEXITY_AQ) {
889 const uint8_t *const map = seg->update_map ? cpi->segmentation_map
890 : cm->last_frame_seg_map;
891 mi_addr->mbmi.segment_id =
892 vp9_get_segment_id(cm, map, bsize, mi_row, mi_col);
894 // Else for cyclic refresh mode update the segment map, set the segment id
895 // and then update the quantizer.
896 if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) {
897 vp9_cyclic_refresh_update_segment(cpi, &xd->mi[0].src_mi->mbmi, mi_row,
898 mi_col, bsize, ctx->rate, ctx->dist,
903 max_plane = is_inter_block(mbmi) ? MAX_MB_PLANE : 1;
904 for (i = 0; i < max_plane; ++i) {
905 p[i].coeff = ctx->coeff_pbuf[i][1];
906 p[i].qcoeff = ctx->qcoeff_pbuf[i][1];
907 pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][1];
908 p[i].eobs = ctx->eobs_pbuf[i][1];
911 for (i = max_plane; i < MAX_MB_PLANE; ++i) {
912 p[i].coeff = ctx->coeff_pbuf[i][2];
913 p[i].qcoeff = ctx->qcoeff_pbuf[i][2];
914 pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][2];
915 p[i].eobs = ctx->eobs_pbuf[i][2];
918 // Restore the coding context of the MB to that that was in place
919 // when the mode was picked for it
920 for (y = 0; y < mi_height; y++)
921 for (x_idx = 0; x_idx < mi_width; x_idx++)
922 if ((xd->mb_to_right_edge >> (3 + MI_SIZE_LOG2)) + mi_width > x_idx
923 && (xd->mb_to_bottom_edge >> (3 + MI_SIZE_LOG2)) + mi_height > y) {
924 xd->mi[x_idx + y * mis].src_mi = mi_addr;
927 if (cpi->oxcf.aq_mode)
928 vp9_init_plane_quantizers(cpi, x);
930 // FIXME(rbultje) I'm pretty sure this should go to the end of this block
931 // (i.e. after the output_enabled)
932 if (bsize < BLOCK_32X32) {
933 if (bsize < BLOCK_16X16)
934 ctx->tx_rd_diff[ALLOW_16X16] = ctx->tx_rd_diff[ALLOW_8X8];
935 ctx->tx_rd_diff[ALLOW_32X32] = ctx->tx_rd_diff[ALLOW_16X16];
938 if (is_inter_block(mbmi) && mbmi->sb_type < BLOCK_8X8) {
939 mbmi->mv[0].as_int = mi->bmi[3].as_mv[0].as_int;
940 mbmi->mv[1].as_int = mi->bmi[3].as_mv[1].as_int;
944 vpx_memcpy(x->zcoeff_blk[mbmi->tx_size], ctx->zcoeff_blk,
945 sizeof(uint8_t) * ctx->num_4x4_blk);
950 if (!vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
951 for (i = 0; i < TX_MODES; i++)
952 rdc->tx_select_diff[i] += ctx->tx_rd_diff[i];
955 #if CONFIG_INTERNAL_STATS
956 if (frame_is_intra_only(cm)) {
957 static const int kf_mode_index[] = {
959 THR_V_PRED /*V_PRED*/,
960 THR_H_PRED /*H_PRED*/,
961 THR_D45_PRED /*D45_PRED*/,
962 THR_D135_PRED /*D135_PRED*/,
963 THR_D117_PRED /*D117_PRED*/,
964 THR_D153_PRED /*D153_PRED*/,
965 THR_D207_PRED /*D207_PRED*/,
966 THR_D63_PRED /*D63_PRED*/,
969 ++cpi->mode_chosen_counts[kf_mode_index[mbmi->mode]];
971 // Note how often each mode chosen as best
972 ++cpi->mode_chosen_counts[ctx->best_mode_index];
975 if (!frame_is_intra_only(cm)) {
976 if (is_inter_block(mbmi)) {
977 vp9_update_mv_count(td);
979 if (cm->interp_filter == SWITCHABLE) {
980 const int ctx = vp9_get_pred_context_switchable_interp(xd);
981 ++td->counts->switchable_interp[ctx][mbmi->interp_filter];
985 rdc->comp_pred_diff[SINGLE_REFERENCE] += ctx->single_pred_diff;
986 rdc->comp_pred_diff[COMPOUND_REFERENCE] += ctx->comp_pred_diff;
987 rdc->comp_pred_diff[REFERENCE_MODE_SELECT] += ctx->hybrid_pred_diff;
989 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
990 rdc->filter_diff[i] += ctx->best_filter_diff[i];
993 for (h = 0; h < y_mis; ++h) {
994 MV_REF *const frame_mv = frame_mvs + h * cm->mi_cols;
995 for (w = 0; w < x_mis; ++w) {
996 MV_REF *const mv = frame_mv + w;
997 mv->ref_frame[0] = mi->src_mi->mbmi.ref_frame[0];
998 mv->ref_frame[1] = mi->src_mi->mbmi.ref_frame[1];
999 mv->mv[0].as_int = mi->src_mi->mbmi.mv[0].as_int;
1000 mv->mv[1].as_int = mi->src_mi->mbmi.mv[1].as_int;
1005 void vp9_setup_src_planes(MACROBLOCK *x, const YV12_BUFFER_CONFIG *src,
1006 int mi_row, int mi_col) {
1007 uint8_t *const buffers[3] = {src->y_buffer, src->u_buffer, src->v_buffer };
1008 const int strides[3] = {src->y_stride, src->uv_stride, src->uv_stride };
1011 // Set current frame pointer.
1012 x->e_mbd.cur_buf = src;
1014 for (i = 0; i < MAX_MB_PLANE; i++)
1015 setup_pred_plane(&x->plane[i].src, buffers[i], strides[i], mi_row, mi_col,
1016 NULL, x->e_mbd.plane[i].subsampling_x,
1017 x->e_mbd.plane[i].subsampling_y);
1020 static void set_mode_info_seg_skip(MACROBLOCK *x, TX_MODE tx_mode,
1021 RD_COST *rd_cost, BLOCK_SIZE bsize) {
1022 MACROBLOCKD *const xd = &x->e_mbd;
1023 MB_MODE_INFO *const mbmi = &xd->mi[0].src_mi->mbmi;
1024 INTERP_FILTER filter_ref;
1026 if (xd->up_available)
1027 filter_ref = xd->mi[-xd->mi_stride].src_mi->mbmi.interp_filter;
1028 else if (xd->left_available)
1029 filter_ref = xd->mi[-1].src_mi->mbmi.interp_filter;
1031 filter_ref = EIGHTTAP;
1033 mbmi->sb_type = bsize;
1034 mbmi->mode = ZEROMV;
1035 mbmi->tx_size = MIN(max_txsize_lookup[bsize],
1036 tx_mode_to_biggest_tx_size[tx_mode]);
1038 mbmi->uv_mode = DC_PRED;
1039 mbmi->ref_frame[0] = LAST_FRAME;
1040 mbmi->ref_frame[1] = NONE;
1041 mbmi->mv[0].as_int = 0;
1042 mbmi->interp_filter = filter_ref;
1044 xd->mi[0].src_mi->bmi[0].as_mv[0].as_int = 0;
1047 vp9_rd_cost_init(rd_cost);
1050 static int set_segment_rdmult(VP9_COMP *const cpi,
1051 MACROBLOCK *const x,
1052 int8_t segment_id) {
1054 VP9_COMMON *const cm = &cpi->common;
1055 vp9_init_plane_quantizers(cpi, x);
1056 vp9_clear_system_state();
1057 segment_qindex = vp9_get_qindex(&cm->seg, segment_id,
1059 return vp9_compute_rd_mult(cpi, segment_qindex + cm->y_dc_delta_q);
1062 static void rd_pick_sb_modes(VP9_COMP *cpi,
1063 TileDataEnc *tile_data,
1064 MACROBLOCK *const x,
1065 int mi_row, int mi_col, RD_COST *rd_cost,
1066 BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
1068 VP9_COMMON *const cm = &cpi->common;
1069 TileInfo *const tile_info = &tile_data->tile_info;
1070 MACROBLOCKD *const xd = &x->e_mbd;
1072 struct macroblock_plane *const p = x->plane;
1073 struct macroblockd_plane *const pd = xd->plane;
1074 const AQ_MODE aq_mode = cpi->oxcf.aq_mode;
1077 vp9_clear_system_state();
1079 // Use the lower precision, but faster, 32x32 fdct for mode selection.
1080 x->use_lp32x32fdct = 1;
1082 set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
1083 mbmi = &xd->mi[0].src_mi->mbmi;
1084 mbmi->sb_type = bsize;
1086 for (i = 0; i < MAX_MB_PLANE; ++i) {
1087 p[i].coeff = ctx->coeff_pbuf[i][0];
1088 p[i].qcoeff = ctx->qcoeff_pbuf[i][0];
1089 pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][0];
1090 p[i].eobs = ctx->eobs_pbuf[i][0];
1094 ctx->pred_pixel_ready = 0;
1097 // Set to zero to make sure we do not use the previous encoded frame stats
1100 #if CONFIG_VP9_HIGHBITDEPTH
1101 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
1102 x->source_variance =
1103 vp9_high_get_sby_perpixel_variance(cpi, &x->plane[0].src,
1106 x->source_variance =
1107 vp9_get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize);
1110 x->source_variance =
1111 vp9_get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize);
1112 #endif // CONFIG_VP9_HIGHBITDEPTH
1114 // Save rdmult before it might be changed, so it can be restored later.
1115 orig_rdmult = x->rdmult;
1117 if (aq_mode == VARIANCE_AQ) {
1118 const int energy = bsize <= BLOCK_16X16 ? x->mb_energy
1119 : vp9_block_energy(cpi, x, bsize);
1120 if (cm->frame_type == KEY_FRAME ||
1121 cpi->refresh_alt_ref_frame ||
1122 (cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref)) {
1123 mbmi->segment_id = vp9_vaq_segment_id(energy);
1125 const uint8_t *const map = cm->seg.update_map ? cpi->segmentation_map
1126 : cm->last_frame_seg_map;
1127 mbmi->segment_id = vp9_get_segment_id(cm, map, bsize, mi_row, mi_col);
1129 x->rdmult = set_segment_rdmult(cpi, x, mbmi->segment_id);
1130 } else if (aq_mode == COMPLEXITY_AQ) {
1131 x->rdmult = set_segment_rdmult(cpi, x, mbmi->segment_id);
1132 } else if (aq_mode == CYCLIC_REFRESH_AQ) {
1133 const uint8_t *const map = cm->seg.update_map ? cpi->segmentation_map
1134 : cm->last_frame_seg_map;
1135 // If segment is boosted, use rdmult for that segment.
1136 if (cyclic_refresh_segment_id_boosted(
1137 vp9_get_segment_id(cm, map, bsize, mi_row, mi_col)))
1138 x->rdmult = vp9_cyclic_refresh_get_rdmult(cpi->cyclic_refresh);
1141 // Find best coding mode & reconstruct the MB so it is available
1142 // as a predictor for MBs that follow in the SB
1143 if (frame_is_intra_only(cm)) {
1144 vp9_rd_pick_intra_mode_sb(cpi, x, rd_cost, bsize, ctx, best_rd);
1146 if (bsize >= BLOCK_8X8) {
1147 if (vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP))
1148 vp9_rd_pick_inter_mode_sb_seg_skip(cpi, tile_data, x, rd_cost, bsize,
1151 vp9_rd_pick_inter_mode_sb(cpi, tile_data, x, mi_row, mi_col,
1152 rd_cost, bsize, ctx, best_rd);
1154 vp9_rd_pick_inter_mode_sub8x8(cpi, tile_data, x, mi_row, mi_col,
1155 rd_cost, bsize, ctx, best_rd);
1160 // Examine the resulting rate and for AQ mode 2 make a segment choice.
1161 if ((rd_cost->rate != INT_MAX) &&
1162 (aq_mode == COMPLEXITY_AQ) && (bsize >= BLOCK_16X16) &&
1163 (cm->frame_type == KEY_FRAME ||
1164 cpi->refresh_alt_ref_frame ||
1165 (cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref))) {
1166 vp9_caq_select_segment(cpi, x, bsize, mi_row, mi_col, rd_cost->rate);
1169 x->rdmult = orig_rdmult;
1171 // TODO(jingning) The rate-distortion optimization flow needs to be
1172 // refactored to provide proper exit/return handle.
1173 if (rd_cost->rate == INT_MAX)
1174 rd_cost->rdcost = INT64_MAX;
1176 ctx->rate = rd_cost->rate;
1177 ctx->dist = rd_cost->dist;
1180 static void update_stats(VP9_COMMON *cm, ThreadData *td) {
1181 const MACROBLOCK *x = &td->mb;
1182 const MACROBLOCKD *const xd = &x->e_mbd;
1183 const MODE_INFO *const mi = xd->mi[0].src_mi;
1184 const MB_MODE_INFO *const mbmi = &mi->mbmi;
1185 const BLOCK_SIZE bsize = mbmi->sb_type;
1187 if (!frame_is_intra_only(cm)) {
1188 FRAME_COUNTS *const counts = td->counts;
1189 const int inter_block = is_inter_block(mbmi);
1190 const int seg_ref_active = vp9_segfeature_active(&cm->seg, mbmi->segment_id,
1192 if (!seg_ref_active) {
1193 counts->intra_inter[vp9_get_intra_inter_context(xd)][inter_block]++;
1194 // If the segment reference feature is enabled we have only a single
1195 // reference frame allowed for the segment so exclude it from
1196 // the reference frame counts used to work out probabilities.
1198 const MV_REFERENCE_FRAME ref0 = mbmi->ref_frame[0];
1199 if (cm->reference_mode == REFERENCE_MODE_SELECT)
1200 counts->comp_inter[vp9_get_reference_mode_context(cm, xd)]
1201 [has_second_ref(mbmi)]++;
1203 if (has_second_ref(mbmi)) {
1204 counts->comp_ref[vp9_get_pred_context_comp_ref_p(cm, xd)]
1205 [ref0 == GOLDEN_FRAME]++;
1207 counts->single_ref[vp9_get_pred_context_single_ref_p1(xd)][0]
1208 [ref0 != LAST_FRAME]++;
1209 if (ref0 != LAST_FRAME)
1210 counts->single_ref[vp9_get_pred_context_single_ref_p2(xd)][1]
1211 [ref0 != GOLDEN_FRAME]++;
1216 !vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
1217 const int mode_ctx = mbmi->mode_context[mbmi->ref_frame[0]];
1218 if (bsize >= BLOCK_8X8) {
1219 const PREDICTION_MODE mode = mbmi->mode;
1220 ++counts->inter_mode[mode_ctx][INTER_OFFSET(mode)];
1222 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
1223 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
1225 for (idy = 0; idy < 2; idy += num_4x4_h) {
1226 for (idx = 0; idx < 2; idx += num_4x4_w) {
1227 const int j = idy * 2 + idx;
1228 const PREDICTION_MODE b_mode = mi->bmi[j].as_mode;
1229 ++counts->inter_mode[mode_ctx][INTER_OFFSET(b_mode)];
1237 static void restore_context(MACROBLOCK *const x, int mi_row, int mi_col,
1238 ENTROPY_CONTEXT a[16 * MAX_MB_PLANE],
1239 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE],
1240 PARTITION_CONTEXT sa[8], PARTITION_CONTEXT sl[8],
1242 MACROBLOCKD *const xd = &x->e_mbd;
1244 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
1245 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
1246 int mi_width = num_8x8_blocks_wide_lookup[bsize];
1247 int mi_height = num_8x8_blocks_high_lookup[bsize];
1248 for (p = 0; p < MAX_MB_PLANE; p++) {
1250 xd->above_context[p] + ((mi_col * 2) >> xd->plane[p].subsampling_x),
1251 a + num_4x4_blocks_wide * p,
1252 (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_wide) >>
1253 xd->plane[p].subsampling_x);
1256 + ((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y),
1257 l + num_4x4_blocks_high * p,
1258 (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_high) >>
1259 xd->plane[p].subsampling_y);
1261 vpx_memcpy(xd->above_seg_context + mi_col, sa,
1262 sizeof(*xd->above_seg_context) * mi_width);
1263 vpx_memcpy(xd->left_seg_context + (mi_row & MI_MASK), sl,
1264 sizeof(xd->left_seg_context[0]) * mi_height);
1267 static void save_context(MACROBLOCK *const x, int mi_row, int mi_col,
1268 ENTROPY_CONTEXT a[16 * MAX_MB_PLANE],
1269 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE],
1270 PARTITION_CONTEXT sa[8], PARTITION_CONTEXT sl[8],
1272 const MACROBLOCKD *const xd = &x->e_mbd;
1274 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
1275 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
1276 int mi_width = num_8x8_blocks_wide_lookup[bsize];
1277 int mi_height = num_8x8_blocks_high_lookup[bsize];
1279 // buffer the above/left context information of the block in search.
1280 for (p = 0; p < MAX_MB_PLANE; ++p) {
1282 a + num_4x4_blocks_wide * p,
1283 xd->above_context[p] + (mi_col * 2 >> xd->plane[p].subsampling_x),
1284 (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_wide) >>
1285 xd->plane[p].subsampling_x);
1287 l + num_4x4_blocks_high * p,
1289 + ((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y),
1290 (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_high) >>
1291 xd->plane[p].subsampling_y);
1293 vpx_memcpy(sa, xd->above_seg_context + mi_col,
1294 sizeof(*xd->above_seg_context) * mi_width);
1295 vpx_memcpy(sl, xd->left_seg_context + (mi_row & MI_MASK),
1296 sizeof(xd->left_seg_context[0]) * mi_height);
1299 static void encode_b(VP9_COMP *cpi, const TileInfo *const tile,
1301 TOKENEXTRA **tp, int mi_row, int mi_col,
1302 int output_enabled, BLOCK_SIZE bsize,
1303 PICK_MODE_CONTEXT *ctx) {
1304 MACROBLOCK *const x = &td->mb;
1305 set_offsets(cpi, tile, x, mi_row, mi_col, bsize);
1306 update_state(cpi, td, ctx, mi_row, mi_col, bsize, output_enabled);
1307 encode_superblock(cpi, td, tp, output_enabled, mi_row, mi_col, bsize, ctx);
1309 if (output_enabled) {
1310 update_stats(&cpi->common, td);
1312 (*tp)->token = EOSB_TOKEN;
1317 static void encode_sb(VP9_COMP *cpi, ThreadData *td,
1318 const TileInfo *const tile,
1319 TOKENEXTRA **tp, int mi_row, int mi_col,
1320 int output_enabled, BLOCK_SIZE bsize,
1322 VP9_COMMON *const cm = &cpi->common;
1323 MACROBLOCK *const x = &td->mb;
1324 MACROBLOCKD *const xd = &x->e_mbd;
1326 const int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4;
1328 PARTITION_TYPE partition;
1329 BLOCK_SIZE subsize = bsize;
1331 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
1334 if (bsize >= BLOCK_8X8) {
1335 ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
1336 subsize = get_subsize(bsize, pc_tree->partitioning);
1339 subsize = BLOCK_4X4;
1342 partition = partition_lookup[bsl][subsize];
1343 if (output_enabled && bsize != BLOCK_4X4)
1344 td->counts->partition[ctx][partition]++;
1346 switch (partition) {
1347 case PARTITION_NONE:
1348 encode_b(cpi, tile, td, tp, mi_row, mi_col, output_enabled, subsize,
1351 case PARTITION_VERT:
1352 encode_b(cpi, tile, td, tp, mi_row, mi_col, output_enabled, subsize,
1353 &pc_tree->vertical[0]);
1354 if (mi_col + hbs < cm->mi_cols && bsize > BLOCK_8X8) {
1355 encode_b(cpi, tile, td, tp, mi_row, mi_col + hbs, output_enabled,
1356 subsize, &pc_tree->vertical[1]);
1359 case PARTITION_HORZ:
1360 encode_b(cpi, tile, td, tp, mi_row, mi_col, output_enabled, subsize,
1361 &pc_tree->horizontal[0]);
1362 if (mi_row + hbs < cm->mi_rows && bsize > BLOCK_8X8) {
1363 encode_b(cpi, tile, td, tp, mi_row + hbs, mi_col, output_enabled,
1364 subsize, &pc_tree->horizontal[1]);
1367 case PARTITION_SPLIT:
1368 if (bsize == BLOCK_8X8) {
1369 encode_b(cpi, tile, td, tp, mi_row, mi_col, output_enabled, subsize,
1370 pc_tree->leaf_split[0]);
1372 encode_sb(cpi, td, tile, tp, mi_row, mi_col, output_enabled, subsize,
1374 encode_sb(cpi, td, tile, tp, mi_row, mi_col + hbs, output_enabled,
1375 subsize, pc_tree->split[1]);
1376 encode_sb(cpi, td, tile, tp, mi_row + hbs, mi_col, output_enabled,
1377 subsize, pc_tree->split[2]);
1378 encode_sb(cpi, td, tile, tp, mi_row + hbs, mi_col + hbs, output_enabled,
1379 subsize, pc_tree->split[3]);
1383 assert(0 && "Invalid partition type.");
1387 if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8)
1388 update_partition_context(xd, mi_row, mi_col, subsize, bsize);
1391 // Check to see if the given partition size is allowed for a specified number
1392 // of 8x8 block rows and columns remaining in the image.
1393 // If not then return the largest allowed partition size
1394 static BLOCK_SIZE find_partition_size(BLOCK_SIZE bsize,
1395 int rows_left, int cols_left,
1397 if (rows_left <= 0 || cols_left <= 0) {
1398 return MIN(bsize, BLOCK_8X8);
1400 for (; bsize > 0; bsize -= 3) {
1401 *bh = num_8x8_blocks_high_lookup[bsize];
1402 *bw = num_8x8_blocks_wide_lookup[bsize];
1403 if ((*bh <= rows_left) && (*bw <= cols_left)) {
1411 static void set_partial_b64x64_partition(MODE_INFO *mi, int mis,
1412 int bh_in, int bw_in, int row8x8_remaining, int col8x8_remaining,
1413 BLOCK_SIZE bsize, MODE_INFO *mi_8x8) {
1416 for (r = 0; r < MI_BLOCK_SIZE; r += bh) {
1418 for (c = 0; c < MI_BLOCK_SIZE; c += bw) {
1419 const int index = r * mis + c;
1420 mi_8x8[index].src_mi = mi + index;
1421 mi_8x8[index].src_mi->mbmi.sb_type = find_partition_size(bsize,
1422 row8x8_remaining - r, col8x8_remaining - c, &bh, &bw);
1427 // This function attempts to set all mode info entries in a given SB64
1428 // to the same block partition size.
1429 // However, at the bottom and right borders of the image the requested size
1430 // may not be allowed in which case this code attempts to choose the largest
1431 // allowable partition.
1432 static void set_fixed_partitioning(VP9_COMP *cpi, const TileInfo *const tile,
1433 MODE_INFO *mi_8x8, int mi_row, int mi_col,
1435 VP9_COMMON *const cm = &cpi->common;
1436 const int mis = cm->mi_stride;
1437 const int row8x8_remaining = tile->mi_row_end - mi_row;
1438 const int col8x8_remaining = tile->mi_col_end - mi_col;
1439 int block_row, block_col;
1440 MODE_INFO *mi_upper_left = cm->mi + mi_row * mis + mi_col;
1441 int bh = num_8x8_blocks_high_lookup[bsize];
1442 int bw = num_8x8_blocks_wide_lookup[bsize];
1444 assert((row8x8_remaining > 0) && (col8x8_remaining > 0));
1446 // Apply the requested partition size to the SB64 if it is all "in image"
1447 if ((col8x8_remaining >= MI_BLOCK_SIZE) &&
1448 (row8x8_remaining >= MI_BLOCK_SIZE)) {
1449 for (block_row = 0; block_row < MI_BLOCK_SIZE; block_row += bh) {
1450 for (block_col = 0; block_col < MI_BLOCK_SIZE; block_col += bw) {
1451 int index = block_row * mis + block_col;
1452 mi_8x8[index].src_mi = mi_upper_left + index;
1453 mi_8x8[index].src_mi->mbmi.sb_type = bsize;
1457 // Else this is a partial SB64.
1458 set_partial_b64x64_partition(mi_upper_left, mis, bh, bw, row8x8_remaining,
1459 col8x8_remaining, bsize, mi_8x8);
1466 } coord_lookup[16] = {
1468 {0, 0}, {0, 2}, {2, 0}, {2, 2},
1470 {0, 4}, {0, 6}, {2, 4}, {2, 6},
1472 {4, 0}, {4, 2}, {6, 0}, {6, 2},
1474 {4, 4}, {4, 6}, {6, 4}, {6, 6},
1477 static void set_source_var_based_partition(VP9_COMP *cpi,
1478 const TileInfo *const tile,
1479 MACROBLOCK *const x,
1481 int mi_row, int mi_col) {
1482 VP9_COMMON *const cm = &cpi->common;
1483 const int mis = cm->mi_stride;
1484 const int row8x8_remaining = tile->mi_row_end - mi_row;
1485 const int col8x8_remaining = tile->mi_col_end - mi_col;
1486 MODE_INFO *mi_upper_left = cm->mi + mi_row * mis + mi_col;
1488 vp9_setup_src_planes(x, cpi->Source, mi_row, mi_col);
1490 assert((row8x8_remaining > 0) && (col8x8_remaining > 0));
1493 if ((col8x8_remaining >= MI_BLOCK_SIZE) &&
1494 (row8x8_remaining >= MI_BLOCK_SIZE)) {
1498 const int offset = (mi_row >> 1) * cm->mb_cols + (mi_col >> 1);
1499 int is_larger_better = 0;
1501 unsigned int thr = cpi->source_var_thresh;
1503 vpx_memset(d32, 0, 4 * sizeof(diff));
1505 for (i = 0; i < 4; i++) {
1508 for (j = 0; j < 4; j++) {
1509 int b_mi_row = coord_lookup[i * 4 + j].row;
1510 int b_mi_col = coord_lookup[i * 4 + j].col;
1511 int boffset = b_mi_row / 2 * cm->mb_cols +
1514 d16[j] = cpi->source_diff_var + offset + boffset;
1516 index = b_mi_row * mis + b_mi_col;
1517 mi_8x8[index].src_mi = mi_upper_left + index;
1518 mi_8x8[index].src_mi->mbmi.sb_type = BLOCK_16X16;
1520 // TODO(yunqingwang): If d16[j].var is very large, use 8x8 partition
1521 // size to further improve quality.
1524 is_larger_better = (d16[0]->var < thr) && (d16[1]->var < thr) &&
1525 (d16[2]->var < thr) && (d16[3]->var < thr);
1527 // Use 32x32 partition
1528 if (is_larger_better) {
1531 for (j = 0; j < 4; j++) {
1532 d32[i].sse += d16[j]->sse;
1533 d32[i].sum += d16[j]->sum;
1536 d32[i].var = d32[i].sse - (((int64_t)d32[i].sum * d32[i].sum) >> 10);
1538 index = coord_lookup[i*4].row * mis + coord_lookup[i*4].col;
1539 mi_8x8[index].src_mi = mi_upper_left + index;
1540 mi_8x8[index].src_mi->mbmi.sb_type = BLOCK_32X32;
1544 if (use32x32 == 4) {
1546 is_larger_better = (d32[0].var < thr) && (d32[1].var < thr) &&
1547 (d32[2].var < thr) && (d32[3].var < thr);
1549 // Use 64x64 partition
1550 if (is_larger_better) {
1551 mi_8x8[0].src_mi = mi_upper_left;
1552 mi_8x8[0].src_mi->mbmi.sb_type = BLOCK_64X64;
1555 } else { // partial in-image SB64
1556 int bh = num_8x8_blocks_high_lookup[BLOCK_16X16];
1557 int bw = num_8x8_blocks_wide_lookup[BLOCK_16X16];
1558 set_partial_b64x64_partition(mi_upper_left, mis, bh, bw,
1559 row8x8_remaining, col8x8_remaining, BLOCK_16X16, mi_8x8);
1563 static void update_state_rt(VP9_COMP *cpi, ThreadData *td,
1564 PICK_MODE_CONTEXT *ctx,
1565 int mi_row, int mi_col, int bsize) {
1566 VP9_COMMON *const cm = &cpi->common;
1567 MACROBLOCK *const x = &td->mb;
1568 MACROBLOCKD *const xd = &x->e_mbd;
1569 MODE_INFO *const mi = xd->mi[0].src_mi;
1570 MB_MODE_INFO *const mbmi = &xd->mi[0].src_mi->mbmi;
1571 const struct segmentation *const seg = &cm->seg;
1572 const int bw = num_8x8_blocks_wide_lookup[mi->mbmi.sb_type];
1573 const int bh = num_8x8_blocks_high_lookup[mi->mbmi.sb_type];
1574 const int x_mis = MIN(bw, cm->mi_cols - mi_col);
1575 const int y_mis = MIN(bh, cm->mi_rows - mi_row);
1577 xd->mi[0] = ctx->mic;
1578 xd->mi[0].src_mi = &xd->mi[0];
1580 if (seg->enabled && cpi->oxcf.aq_mode) {
1581 // For in frame complexity AQ or variance AQ, copy segment_id from
1582 // segmentation_map.
1583 if (cpi->oxcf.aq_mode == COMPLEXITY_AQ ||
1584 cpi->oxcf.aq_mode == VARIANCE_AQ ) {
1585 const uint8_t *const map = seg->update_map ? cpi->segmentation_map
1586 : cm->last_frame_seg_map;
1587 mbmi->segment_id = vp9_get_segment_id(cm, map, bsize, mi_row, mi_col);
1589 // Setting segmentation map for cyclic_refresh.
1590 vp9_cyclic_refresh_update_segment(cpi, mbmi, mi_row, mi_col, bsize,
1591 ctx->rate, ctx->dist, x->skip);
1593 vp9_init_plane_quantizers(cpi, x);
1596 if (is_inter_block(mbmi)) {
1597 vp9_update_mv_count(td);
1598 if (cm->interp_filter == SWITCHABLE) {
1599 const int pred_ctx = vp9_get_pred_context_switchable_interp(xd);
1600 ++td->counts->switchable_interp[pred_ctx][mbmi->interp_filter];
1603 if (mbmi->sb_type < BLOCK_8X8) {
1604 mbmi->mv[0].as_int = mi->bmi[3].as_mv[0].as_int;
1605 mbmi->mv[1].as_int = mi->bmi[3].as_mv[1].as_int;
1609 if (cm->use_prev_frame_mvs) {
1610 MV_REF *const frame_mvs =
1611 cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col;
1614 for (h = 0; h < y_mis; ++h) {
1615 MV_REF *const frame_mv = frame_mvs + h * cm->mi_cols;
1616 for (w = 0; w < x_mis; ++w) {
1617 MV_REF *const mv = frame_mv + w;
1618 mv->ref_frame[0] = mi->src_mi->mbmi.ref_frame[0];
1619 mv->ref_frame[1] = mi->src_mi->mbmi.ref_frame[1];
1620 mv->mv[0].as_int = mi->src_mi->mbmi.mv[0].as_int;
1621 mv->mv[1].as_int = mi->src_mi->mbmi.mv[1].as_int;
1626 x->skip = ctx->skip;
1627 x->skip_txfm[0] = mbmi->segment_id ? 0 : ctx->skip_txfm[0];
1630 static void encode_b_rt(VP9_COMP *cpi, ThreadData *td,
1631 const TileInfo *const tile,
1632 TOKENEXTRA **tp, int mi_row, int mi_col,
1633 int output_enabled, BLOCK_SIZE bsize,
1634 PICK_MODE_CONTEXT *ctx) {
1635 MACROBLOCK *const x = &td->mb;
1636 set_offsets(cpi, tile, x, mi_row, mi_col, bsize);
1637 update_state_rt(cpi, td, ctx, mi_row, mi_col, bsize);
1639 #if CONFIG_VP9_TEMPORAL_DENOISING
1640 if (cpi->oxcf.noise_sensitivity > 0 && output_enabled &&
1641 cpi->common.frame_type != KEY_FRAME) {
1642 vp9_denoiser_denoise(&cpi->denoiser, x, mi_row, mi_col,
1643 MAX(BLOCK_8X8, bsize), ctx);
1647 encode_superblock(cpi, td, tp, output_enabled, mi_row, mi_col, bsize, ctx);
1648 update_stats(&cpi->common, td);
1650 (*tp)->token = EOSB_TOKEN;
1654 static void encode_sb_rt(VP9_COMP *cpi, ThreadData *td,
1655 const TileInfo *const tile,
1656 TOKENEXTRA **tp, int mi_row, int mi_col,
1657 int output_enabled, BLOCK_SIZE bsize,
1659 VP9_COMMON *const cm = &cpi->common;
1660 MACROBLOCK *const x = &td->mb;
1661 MACROBLOCKD *const xd = &x->e_mbd;
1663 const int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4;
1665 PARTITION_TYPE partition;
1668 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
1671 if (bsize >= BLOCK_8X8) {
1672 const int idx_str = xd->mi_stride * mi_row + mi_col;
1673 MODE_INFO *mi_8x8 = cm->mi[idx_str].src_mi;
1674 ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
1675 subsize = mi_8x8[0].src_mi->mbmi.sb_type;
1678 subsize = BLOCK_4X4;
1681 partition = partition_lookup[bsl][subsize];
1682 if (output_enabled && bsize != BLOCK_4X4)
1683 td->counts->partition[ctx][partition]++;
1685 switch (partition) {
1686 case PARTITION_NONE:
1687 encode_b_rt(cpi, td, tile, tp, mi_row, mi_col, output_enabled, subsize,
1690 case PARTITION_VERT:
1691 encode_b_rt(cpi, td, tile, tp, mi_row, mi_col, output_enabled, subsize,
1692 &pc_tree->vertical[0]);
1693 if (mi_col + hbs < cm->mi_cols && bsize > BLOCK_8X8) {
1694 encode_b_rt(cpi, td, tile, tp, mi_row, mi_col + hbs, output_enabled,
1695 subsize, &pc_tree->vertical[1]);
1698 case PARTITION_HORZ:
1699 encode_b_rt(cpi, td, tile, tp, mi_row, mi_col, output_enabled, subsize,
1700 &pc_tree->horizontal[0]);
1701 if (mi_row + hbs < cm->mi_rows && bsize > BLOCK_8X8) {
1702 encode_b_rt(cpi, td, tile, tp, mi_row + hbs, mi_col, output_enabled,
1703 subsize, &pc_tree->horizontal[1]);
1706 case PARTITION_SPLIT:
1707 subsize = get_subsize(bsize, PARTITION_SPLIT);
1708 encode_sb_rt(cpi, td, tile, tp, mi_row, mi_col, output_enabled, subsize,
1710 encode_sb_rt(cpi, td, tile, tp, mi_row, mi_col + hbs, output_enabled,
1711 subsize, pc_tree->split[1]);
1712 encode_sb_rt(cpi, td, tile, tp, mi_row + hbs, mi_col, output_enabled,
1713 subsize, pc_tree->split[2]);
1714 encode_sb_rt(cpi, td, tile, tp, mi_row + hbs, mi_col + hbs,
1715 output_enabled, subsize, pc_tree->split[3]);
1718 assert(0 && "Invalid partition type.");
1722 if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8)
1723 update_partition_context(xd, mi_row, mi_col, subsize, bsize);
1726 static void rd_use_partition(VP9_COMP *cpi,
1728 TileDataEnc *tile_data,
1729 MODE_INFO *mi_8x8, TOKENEXTRA **tp,
1730 int mi_row, int mi_col,
1732 int *rate, int64_t *dist,
1733 int do_recon, PC_TREE *pc_tree) {
1734 VP9_COMMON *const cm = &cpi->common;
1735 TileInfo *const tile_info = &tile_data->tile_info;
1736 MACROBLOCK *const x = &td->mb;
1737 MACROBLOCKD *const xd = &x->e_mbd;
1738 const int mis = cm->mi_stride;
1739 const int bsl = b_width_log2_lookup[bsize];
1740 const int mi_step = num_4x4_blocks_wide_lookup[bsize] / 2;
1741 const int bss = (1 << bsl) / 4;
1743 PARTITION_TYPE partition = PARTITION_NONE;
1745 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
1746 PARTITION_CONTEXT sl[8], sa[8];
1747 RD_COST last_part_rdc, none_rdc, chosen_rdc;
1748 BLOCK_SIZE sub_subsize = BLOCK_4X4;
1749 int splits_below = 0;
1750 BLOCK_SIZE bs_type = mi_8x8[0].src_mi->mbmi.sb_type;
1751 int do_partition_search = 1;
1752 PICK_MODE_CONTEXT *ctx = &pc_tree->none;
1754 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
1757 assert(num_4x4_blocks_wide_lookup[bsize] ==
1758 num_4x4_blocks_high_lookup[bsize]);
1760 vp9_rd_cost_reset(&last_part_rdc);
1761 vp9_rd_cost_reset(&none_rdc);
1762 vp9_rd_cost_reset(&chosen_rdc);
1764 partition = partition_lookup[bsl][bs_type];
1765 subsize = get_subsize(bsize, partition);
1767 pc_tree->partitioning = partition;
1768 save_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
1770 if (bsize == BLOCK_16X16 && cpi->oxcf.aq_mode) {
1771 set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
1772 x->mb_energy = vp9_block_energy(cpi, x, bsize);
1775 if (do_partition_search &&
1776 cpi->sf.partition_search_type == SEARCH_PARTITION &&
1777 cpi->sf.adjust_partitioning_from_last_frame) {
1778 // Check if any of the sub blocks are further split.
1779 if (partition == PARTITION_SPLIT && subsize > BLOCK_8X8) {
1780 sub_subsize = get_subsize(subsize, PARTITION_SPLIT);
1782 for (i = 0; i < 4; i++) {
1783 int jj = i >> 1, ii = i & 0x01;
1784 MODE_INFO *this_mi = mi_8x8[jj * bss * mis + ii * bss].src_mi;
1785 if (this_mi && this_mi->mbmi.sb_type >= sub_subsize) {
1791 // If partition is not none try none unless each of the 4 splits are split
1793 if (partition != PARTITION_NONE && !splits_below &&
1794 mi_row + (mi_step >> 1) < cm->mi_rows &&
1795 mi_col + (mi_step >> 1) < cm->mi_cols) {
1796 pc_tree->partitioning = PARTITION_NONE;
1797 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &none_rdc, bsize,
1800 pl = partition_plane_context(xd, mi_row, mi_col, bsize);
1802 if (none_rdc.rate < INT_MAX) {
1803 none_rdc.rate += cpi->partition_cost[pl][PARTITION_NONE];
1804 none_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, none_rdc.rate,
1808 restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
1809 mi_8x8[0].src_mi->mbmi.sb_type = bs_type;
1810 pc_tree->partitioning = partition;
1814 switch (partition) {
1815 case PARTITION_NONE:
1816 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc,
1817 bsize, ctx, INT64_MAX);
1819 case PARTITION_HORZ:
1820 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc,
1821 subsize, &pc_tree->horizontal[0],
1823 if (last_part_rdc.rate != INT_MAX &&
1824 bsize >= BLOCK_8X8 && mi_row + (mi_step >> 1) < cm->mi_rows) {
1826 PICK_MODE_CONTEXT *ctx = &pc_tree->horizontal[0];
1827 vp9_rd_cost_init(&tmp_rdc);
1828 update_state(cpi, td, ctx, mi_row, mi_col, subsize, 0);
1829 encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize, ctx);
1830 rd_pick_sb_modes(cpi, tile_data, x,
1831 mi_row + (mi_step >> 1), mi_col, &tmp_rdc,
1832 subsize, &pc_tree->horizontal[1], INT64_MAX);
1833 if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
1834 vp9_rd_cost_reset(&last_part_rdc);
1837 last_part_rdc.rate += tmp_rdc.rate;
1838 last_part_rdc.dist += tmp_rdc.dist;
1839 last_part_rdc.rdcost += tmp_rdc.rdcost;
1842 case PARTITION_VERT:
1843 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc,
1844 subsize, &pc_tree->vertical[0], INT64_MAX);
1845 if (last_part_rdc.rate != INT_MAX &&
1846 bsize >= BLOCK_8X8 && mi_col + (mi_step >> 1) < cm->mi_cols) {
1848 PICK_MODE_CONTEXT *ctx = &pc_tree->vertical[0];
1849 vp9_rd_cost_init(&tmp_rdc);
1850 update_state(cpi, td, ctx, mi_row, mi_col, subsize, 0);
1851 encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize, ctx);
1852 rd_pick_sb_modes(cpi, tile_data, x,
1853 mi_row, mi_col + (mi_step >> 1), &tmp_rdc,
1854 subsize, &pc_tree->vertical[bsize > BLOCK_8X8],
1856 if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
1857 vp9_rd_cost_reset(&last_part_rdc);
1860 last_part_rdc.rate += tmp_rdc.rate;
1861 last_part_rdc.dist += tmp_rdc.dist;
1862 last_part_rdc.rdcost += tmp_rdc.rdcost;
1865 case PARTITION_SPLIT:
1866 if (bsize == BLOCK_8X8) {
1867 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc,
1868 subsize, pc_tree->leaf_split[0], INT64_MAX);
1871 last_part_rdc.rate = 0;
1872 last_part_rdc.dist = 0;
1873 last_part_rdc.rdcost = 0;
1874 for (i = 0; i < 4; i++) {
1875 int x_idx = (i & 1) * (mi_step >> 1);
1876 int y_idx = (i >> 1) * (mi_step >> 1);
1877 int jj = i >> 1, ii = i & 0x01;
1879 if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols))
1882 vp9_rd_cost_init(&tmp_rdc);
1883 rd_use_partition(cpi, td, tile_data,
1884 mi_8x8 + jj * bss * mis + ii * bss, tp,
1885 mi_row + y_idx, mi_col + x_idx, subsize,
1886 &tmp_rdc.rate, &tmp_rdc.dist,
1887 i != 3, pc_tree->split[i]);
1888 if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
1889 vp9_rd_cost_reset(&last_part_rdc);
1892 last_part_rdc.rate += tmp_rdc.rate;
1893 last_part_rdc.dist += tmp_rdc.dist;
1901 pl = partition_plane_context(xd, mi_row, mi_col, bsize);
1902 if (last_part_rdc.rate < INT_MAX) {
1903 last_part_rdc.rate += cpi->partition_cost[pl][partition];
1904 last_part_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
1905 last_part_rdc.rate, last_part_rdc.dist);
1908 if (do_partition_search
1909 && cpi->sf.adjust_partitioning_from_last_frame
1910 && cpi->sf.partition_search_type == SEARCH_PARTITION
1911 && partition != PARTITION_SPLIT && bsize > BLOCK_8X8
1912 && (mi_row + mi_step < cm->mi_rows ||
1913 mi_row + (mi_step >> 1) == cm->mi_rows)
1914 && (mi_col + mi_step < cm->mi_cols ||
1915 mi_col + (mi_step >> 1) == cm->mi_cols)) {
1916 BLOCK_SIZE split_subsize = get_subsize(bsize, PARTITION_SPLIT);
1917 chosen_rdc.rate = 0;
1918 chosen_rdc.dist = 0;
1919 restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
1920 pc_tree->partitioning = PARTITION_SPLIT;
1923 for (i = 0; i < 4; i++) {
1924 int x_idx = (i & 1) * (mi_step >> 1);
1925 int y_idx = (i >> 1) * (mi_step >> 1);
1927 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
1928 PARTITION_CONTEXT sl[8], sa[8];
1930 if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols))
1933 save_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
1934 pc_tree->split[i]->partitioning = PARTITION_NONE;
1935 rd_pick_sb_modes(cpi, tile_data, x,
1936 mi_row + y_idx, mi_col + x_idx, &tmp_rdc,
1937 split_subsize, &pc_tree->split[i]->none, INT64_MAX);
1939 restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
1941 if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
1942 vp9_rd_cost_reset(&chosen_rdc);
1946 chosen_rdc.rate += tmp_rdc.rate;
1947 chosen_rdc.dist += tmp_rdc.dist;
1950 encode_sb(cpi, td, tile_info, tp, mi_row + y_idx, mi_col + x_idx, 0,
1951 split_subsize, pc_tree->split[i]);
1953 pl = partition_plane_context(xd, mi_row + y_idx, mi_col + x_idx,
1955 chosen_rdc.rate += cpi->partition_cost[pl][PARTITION_NONE];
1957 pl = partition_plane_context(xd, mi_row, mi_col, bsize);
1958 if (chosen_rdc.rate < INT_MAX) {
1959 chosen_rdc.rate += cpi->partition_cost[pl][PARTITION_SPLIT];
1960 chosen_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
1961 chosen_rdc.rate, chosen_rdc.dist);
1965 // If last_part is better set the partitioning to that.
1966 if (last_part_rdc.rdcost < chosen_rdc.rdcost) {
1967 mi_8x8[0].src_mi->mbmi.sb_type = bsize;
1968 if (bsize >= BLOCK_8X8)
1969 pc_tree->partitioning = partition;
1970 chosen_rdc = last_part_rdc;
1972 // If none was better set the partitioning to that.
1973 if (none_rdc.rdcost < chosen_rdc.rdcost) {
1974 if (bsize >= BLOCK_8X8)
1975 pc_tree->partitioning = PARTITION_NONE;
1976 chosen_rdc = none_rdc;
1979 restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
1981 // We must have chosen a partitioning and encoding or we'll fail later on.
1982 // No other opportunities for success.
1983 if (bsize == BLOCK_64X64)
1984 assert(chosen_rdc.rate < INT_MAX && chosen_rdc.dist < INT64_MAX);
1987 int output_enabled = (bsize == BLOCK_64X64);
1988 encode_sb(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled, bsize,
1992 *rate = chosen_rdc.rate;
1993 *dist = chosen_rdc.dist;
1996 static const BLOCK_SIZE min_partition_size[BLOCK_SIZES] = {
1997 BLOCK_4X4, BLOCK_4X4, BLOCK_4X4,
1998 BLOCK_4X4, BLOCK_4X4, BLOCK_4X4,
1999 BLOCK_8X8, BLOCK_8X8, BLOCK_8X8,
2000 BLOCK_16X16, BLOCK_16X16, BLOCK_16X16,
2004 static const BLOCK_SIZE max_partition_size[BLOCK_SIZES] = {
2005 BLOCK_8X8, BLOCK_16X16, BLOCK_16X16,
2006 BLOCK_16X16, BLOCK_32X32, BLOCK_32X32,
2007 BLOCK_32X32, BLOCK_64X64, BLOCK_64X64,
2008 BLOCK_64X64, BLOCK_64X64, BLOCK_64X64,
2012 // Look at all the mode_info entries for blocks that are part of this
2013 // partition and find the min and max values for sb_type.
2014 // At the moment this is designed to work on a 64x64 SB but could be
2015 // adjusted to use a size parameter.
2017 // The min and max are assumed to have been initialized prior to calling this
2018 // function so repeat calls can accumulate a min and max of more than one sb64.
2019 static void get_sb_partition_size_range(MACROBLOCKD *xd, MODE_INFO *mi_8x8,
2020 BLOCK_SIZE *min_block_size,
2021 BLOCK_SIZE *max_block_size,
2022 int bs_hist[BLOCK_SIZES]) {
2023 int sb_width_in_blocks = MI_BLOCK_SIZE;
2024 int sb_height_in_blocks = MI_BLOCK_SIZE;
2028 // Check the sb_type for each block that belongs to this region.
2029 for (i = 0; i < sb_height_in_blocks; ++i) {
2030 for (j = 0; j < sb_width_in_blocks; ++j) {
2031 MODE_INFO *mi = mi_8x8[index+j].src_mi;
2032 BLOCK_SIZE sb_type = mi ? mi->mbmi.sb_type : 0;
2034 *min_block_size = MIN(*min_block_size, sb_type);
2035 *max_block_size = MAX(*max_block_size, sb_type);
2037 index += xd->mi_stride;
2041 // Next square block size less or equal than current block size.
2042 static const BLOCK_SIZE next_square_size[BLOCK_SIZES] = {
2043 BLOCK_4X4, BLOCK_4X4, BLOCK_4X4,
2044 BLOCK_8X8, BLOCK_8X8, BLOCK_8X8,
2045 BLOCK_16X16, BLOCK_16X16, BLOCK_16X16,
2046 BLOCK_32X32, BLOCK_32X32, BLOCK_32X32,
2050 // Look at neighboring blocks and set a min and max partition size based on
2052 static void rd_auto_partition_range(VP9_COMP *cpi, const TileInfo *const tile,
2053 MACROBLOCKD *const xd,
2054 int mi_row, int mi_col,
2055 BLOCK_SIZE *min_block_size,
2056 BLOCK_SIZE *max_block_size) {
2057 VP9_COMMON *const cm = &cpi->common;
2058 MODE_INFO *mi = xd->mi[0].src_mi;
2059 const int left_in_image = xd->left_available && mi[-1].src_mi;
2060 const int above_in_image = xd->up_available && mi[-xd->mi_stride].src_mi;
2061 const int row8x8_remaining = tile->mi_row_end - mi_row;
2062 const int col8x8_remaining = tile->mi_col_end - mi_col;
2064 BLOCK_SIZE min_size = BLOCK_4X4;
2065 BLOCK_SIZE max_size = BLOCK_64X64;
2067 int bs_hist[BLOCK_SIZES] = {0};
2069 // Trap case where we do not have a prediction.
2070 if (left_in_image || above_in_image || cm->frame_type != KEY_FRAME) {
2071 // Default "min to max" and "max to min"
2072 min_size = BLOCK_64X64;
2073 max_size = BLOCK_4X4;
2075 // NOTE: each call to get_sb_partition_size_range() uses the previous
2076 // passed in values for min and max as a starting point.
2077 // Find the min and max partition used in previous frame at this location
2078 if (cm->frame_type != KEY_FRAME) {
2079 MODE_INFO *prev_mi =
2080 cm->prev_mip + cm->mi_stride + 1 + mi_row * xd->mi_stride + mi_col;
2082 get_sb_partition_size_range(xd, prev_mi, &min_size, &max_size, bs_hist);
2084 // Find the min and max partition sizes used in the left SB64
2085 if (left_in_image) {
2086 MODE_INFO *left_sb64_mi = mi[-MI_BLOCK_SIZE].src_mi;
2087 get_sb_partition_size_range(xd, left_sb64_mi, &min_size, &max_size,
2090 // Find the min and max partition sizes used in the above SB64.
2091 if (above_in_image) {
2092 MODE_INFO *above_sb64_mi = mi[-xd->mi_stride * MI_BLOCK_SIZE].src_mi;
2093 get_sb_partition_size_range(xd, above_sb64_mi, &min_size, &max_size,
2097 // adjust observed min and max
2098 if (cpi->sf.auto_min_max_partition_size == RELAXED_NEIGHBORING_MIN_MAX) {
2099 min_size = min_partition_size[min_size];
2100 max_size = max_partition_size[max_size];
2101 } else if (cpi->sf.auto_min_max_partition_size ==
2102 CONSTRAIN_NEIGHBORING_MIN_MAX) {
2103 // adjust the search range based on the histogram of the observed
2104 // partition sizes from left, above the previous co-located blocks
2106 int first_moment = 0;
2107 int second_moment = 0;
2108 int var_unnormalized = 0;
2110 for (i = 0; i < BLOCK_SIZES; i++) {
2112 first_moment += bs_hist[i] * i;
2113 second_moment += bs_hist[i] * i * i;
2116 // if variance is small enough,
2117 // adjust the range around its mean size, which gives a tighter range
2118 var_unnormalized = second_moment - first_moment * first_moment / sum;
2119 if (var_unnormalized <= 4 * sum) {
2120 int mean = first_moment / sum;
2121 min_size = min_partition_size[mean];
2122 max_size = max_partition_size[mean];
2124 min_size = min_partition_size[min_size];
2125 max_size = max_partition_size[max_size];
2130 // Check border cases where max and min from neighbors may not be legal.
2131 max_size = find_partition_size(max_size,
2132 row8x8_remaining, col8x8_remaining,
2134 min_size = MIN(min_size, max_size);
2136 // When use_square_partition_only is true, make sure at least one square
2137 // partition is allowed by selecting the next smaller square size as
2139 if (cpi->sf.use_square_partition_only &&
2140 next_square_size[max_size] < min_size) {
2141 min_size = next_square_size[max_size];
2144 *min_block_size = min_size;
2145 *max_block_size = max_size;
2148 static void auto_partition_range(VP9_COMP *cpi, const TileInfo *const tile,
2149 MACROBLOCKD *const xd,
2150 int mi_row, int mi_col,
2151 BLOCK_SIZE *min_block_size,
2152 BLOCK_SIZE *max_block_size) {
2153 VP9_COMMON *const cm = &cpi->common;
2154 MODE_INFO *mi_8x8 = xd->mi;
2155 const int left_in_image = xd->left_available && mi_8x8[-1].src_mi;
2156 const int above_in_image = xd->up_available &&
2157 mi_8x8[-xd->mi_stride].src_mi;
2158 int row8x8_remaining = tile->mi_row_end - mi_row;
2159 int col8x8_remaining = tile->mi_col_end - mi_col;
2161 BLOCK_SIZE min_size = BLOCK_32X32;
2162 BLOCK_SIZE max_size = BLOCK_8X8;
2163 int bsl = mi_width_log2_lookup[BLOCK_64X64];
2164 const int search_range_ctrl = (((mi_row + mi_col) >> bsl) +
2165 get_chessboard_index(cm->current_video_frame)) & 0x1;
2166 // Trap case where we do not have a prediction.
2167 if (search_range_ctrl &&
2168 (left_in_image || above_in_image || cm->frame_type != KEY_FRAME)) {
2173 // Find the min and max partition sizes used in the left SB64.
2174 if (left_in_image) {
2176 mi = mi_8x8[-1].src_mi;
2177 for (block = 0; block < MI_BLOCK_SIZE; ++block) {
2178 cur_mi = mi[block * xd->mi_stride].src_mi;
2179 sb_type = cur_mi ? cur_mi->mbmi.sb_type : 0;
2180 min_size = MIN(min_size, sb_type);
2181 max_size = MAX(max_size, sb_type);
2184 // Find the min and max partition sizes used in the above SB64.
2185 if (above_in_image) {
2186 mi = mi_8x8[-xd->mi_stride * MI_BLOCK_SIZE].src_mi;
2187 for (block = 0; block < MI_BLOCK_SIZE; ++block) {
2188 sb_type = mi[block].src_mi ? mi[block].src_mi->mbmi.sb_type : 0;
2189 min_size = MIN(min_size, sb_type);
2190 max_size = MAX(max_size, sb_type);
2194 min_size = min_partition_size[min_size];
2195 max_size = find_partition_size(max_size, row8x8_remaining, col8x8_remaining,
2197 min_size = MIN(min_size, max_size);
2198 min_size = MAX(min_size, BLOCK_8X8);
2199 max_size = MIN(max_size, BLOCK_32X32);
2201 min_size = BLOCK_8X8;
2202 max_size = BLOCK_32X32;
2205 *min_block_size = min_size;
2206 *max_block_size = max_size;
2209 // TODO(jingning) refactor functions setting partition search range
2210 static void set_partition_range(VP9_COMMON *cm, MACROBLOCKD *xd,
2211 int mi_row, int mi_col, BLOCK_SIZE bsize,
2212 BLOCK_SIZE *min_bs, BLOCK_SIZE *max_bs) {
2213 int mi_width = num_8x8_blocks_wide_lookup[bsize];
2214 int mi_height = num_8x8_blocks_high_lookup[bsize];
2218 const int idx_str = cm->mi_stride * mi_row + mi_col;
2219 MODE_INFO *prev_mi = (cm->prev_mip + cm->mi_stride + 1 + idx_str)->src_mi;
2222 BLOCK_SIZE bs, min_size, max_size;
2224 min_size = BLOCK_64X64;
2225 max_size = BLOCK_4X4;
2228 for (idy = 0; idy < mi_height; ++idy) {
2229 for (idx = 0; idx < mi_width; ++idx) {
2230 mi = prev_mi[idy * cm->mi_stride + idx].src_mi;
2231 bs = mi ? mi->mbmi.sb_type : bsize;
2232 min_size = MIN(min_size, bs);
2233 max_size = MAX(max_size, bs);
2238 if (xd->left_available) {
2239 for (idy = 0; idy < mi_height; ++idy) {
2240 mi = xd->mi[idy * cm->mi_stride - 1].src_mi;
2241 bs = mi ? mi->mbmi.sb_type : bsize;
2242 min_size = MIN(min_size, bs);
2243 max_size = MAX(max_size, bs);
2247 if (xd->up_available) {
2248 for (idx = 0; idx < mi_width; ++idx) {
2249 mi = xd->mi[idx - cm->mi_stride].src_mi;
2250 bs = mi ? mi->mbmi.sb_type : bsize;
2251 min_size = MIN(min_size, bs);
2252 max_size = MAX(max_size, bs);
2256 if (min_size == max_size) {
2257 min_size = min_partition_size[min_size];
2258 max_size = max_partition_size[max_size];
2265 static INLINE void store_pred_mv(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) {
2266 vpx_memcpy(ctx->pred_mv, x->pred_mv, sizeof(x->pred_mv));
2269 static INLINE void load_pred_mv(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) {
2270 vpx_memcpy(x->pred_mv, ctx->pred_mv, sizeof(x->pred_mv));
2273 #if CONFIG_FP_MB_STATS
2274 const int num_16x16_blocks_wide_lookup[BLOCK_SIZES] =
2275 {1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 4, 4};
2276 const int num_16x16_blocks_high_lookup[BLOCK_SIZES] =
2277 {1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 4, 2, 4};
2278 const int qindex_skip_threshold_lookup[BLOCK_SIZES] =
2279 {0, 10, 10, 30, 40, 40, 60, 80, 80, 90, 100, 100, 120};
2280 const int qindex_split_threshold_lookup[BLOCK_SIZES] =
2281 {0, 3, 3, 7, 15, 15, 30, 40, 40, 60, 80, 80, 120};
2282 const int complexity_16x16_blocks_threshold[BLOCK_SIZES] =
2283 {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 4, 4, 6};
2294 static INLINE MOTION_DIRECTION get_motion_direction_fp(uint8_t fp_byte) {
2295 if (fp_byte & FPMB_MOTION_ZERO_MASK) {
2297 } else if (fp_byte & FPMB_MOTION_LEFT_MASK) {
2299 } else if (fp_byte & FPMB_MOTION_RIGHT_MASK) {
2301 } else if (fp_byte & FPMB_MOTION_UP_MASK) {
2308 static INLINE int get_motion_inconsistency(MOTION_DIRECTION this_mv,
2309 MOTION_DIRECTION that_mv) {
2310 if (this_mv == that_mv) {
2313 return abs(this_mv - that_mv) == 2 ? 2 : 1;
2318 // TODO(jingning,jimbankoski,rbultje): properly skip partition types that are
2319 // unlikely to be selected depending on previous rate-distortion optimization
2320 // results, for encoding speed-up.
2321 static void rd_pick_partition(VP9_COMP *cpi, ThreadData *td,
2322 TileDataEnc *tile_data,
2323 TOKENEXTRA **tp, int mi_row, int mi_col,
2324 BLOCK_SIZE bsize, RD_COST *rd_cost,
2325 int64_t best_rd, PC_TREE *pc_tree) {
2326 VP9_COMMON *const cm = &cpi->common;
2327 TileInfo *const tile_info = &tile_data->tile_info;
2328 MACROBLOCK *const x = &td->mb;
2329 MACROBLOCKD *const xd = &x->e_mbd;
2330 const int mi_step = num_8x8_blocks_wide_lookup[bsize] / 2;
2331 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
2332 PARTITION_CONTEXT sl[8], sa[8];
2333 TOKENEXTRA *tp_orig = *tp;
2334 PICK_MODE_CONTEXT *ctx = &pc_tree->none;
2337 RD_COST this_rdc, sum_rdc, best_rdc;
2338 int do_split = bsize >= BLOCK_8X8;
2341 // Override skipping rectangular partition operations for edge blocks
2342 const int force_horz_split = (mi_row + mi_step >= cm->mi_rows);
2343 const int force_vert_split = (mi_col + mi_step >= cm->mi_cols);
2344 const int xss = x->e_mbd.plane[1].subsampling_x;
2345 const int yss = x->e_mbd.plane[1].subsampling_y;
2347 BLOCK_SIZE min_size = x->min_partition_size;
2348 BLOCK_SIZE max_size = x->max_partition_size;
2350 #if CONFIG_FP_MB_STATS
2351 unsigned int src_diff_var = UINT_MAX;
2352 int none_complexity = 0;
2355 int partition_none_allowed = !force_horz_split && !force_vert_split;
2356 int partition_horz_allowed = !force_vert_split && yss <= xss &&
2358 int partition_vert_allowed = !force_horz_split && xss <= yss &&
2362 assert(num_8x8_blocks_wide_lookup[bsize] ==
2363 num_8x8_blocks_high_lookup[bsize]);
2365 vp9_rd_cost_init(&this_rdc);
2366 vp9_rd_cost_init(&sum_rdc);
2367 vp9_rd_cost_reset(&best_rdc);
2368 best_rdc.rdcost = best_rd;
2370 set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
2372 if (bsize == BLOCK_16X16 && cpi->oxcf.aq_mode)
2373 x->mb_energy = vp9_block_energy(cpi, x, bsize);
2375 if (cpi->sf.cb_partition_search && bsize == BLOCK_16X16) {
2376 int cb_partition_search_ctrl = ((pc_tree->index == 0 || pc_tree->index == 3)
2377 + get_chessboard_index(cm->current_video_frame)) & 0x1;
2379 if (cb_partition_search_ctrl && bsize > min_size && bsize < max_size)
2380 set_partition_range(cm, xd, mi_row, mi_col, bsize, &min_size, &max_size);
2383 // Determine partition types in search according to the speed features.
2384 // The threshold set here has to be of square block size.
2385 if (cpi->sf.auto_min_max_partition_size) {
2386 partition_none_allowed &= (bsize <= max_size && bsize >= min_size);
2387 partition_horz_allowed &= ((bsize <= max_size && bsize > min_size) ||
2389 partition_vert_allowed &= ((bsize <= max_size && bsize > min_size) ||
2391 do_split &= bsize > min_size;
2393 if (cpi->sf.use_square_partition_only) {
2394 partition_horz_allowed &= force_horz_split;
2395 partition_vert_allowed &= force_vert_split;
2398 save_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
2400 #if CONFIG_FP_MB_STATS
2401 if (cpi->use_fp_mb_stats) {
2402 set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
2403 src_diff_var = get_sby_perpixel_diff_variance(cpi, &x->plane[0].src,
2404 mi_row, mi_col, bsize);
2408 #if CONFIG_FP_MB_STATS
2409 // Decide whether we shall split directly and skip searching NONE by using
2410 // the first pass block statistics
2411 if (cpi->use_fp_mb_stats && bsize >= BLOCK_32X32 && do_split &&
2412 partition_none_allowed && src_diff_var > 4 &&
2413 cm->base_qindex < qindex_split_threshold_lookup[bsize]) {
2414 int mb_row = mi_row >> 1;
2415 int mb_col = mi_col >> 1;
2417 MIN(mb_row + num_16x16_blocks_high_lookup[bsize], cm->mb_rows);
2419 MIN(mb_col + num_16x16_blocks_wide_lookup[bsize], cm->mb_cols);
2422 // compute a complexity measure, basically measure inconsistency of motion
2423 // vectors obtained from the first pass in the current block
2424 for (r = mb_row; r < mb_row_end ; r++) {
2425 for (c = mb_col; c < mb_col_end; c++) {
2426 const int mb_index = r * cm->mb_cols + c;
2428 MOTION_DIRECTION this_mv;
2429 MOTION_DIRECTION right_mv;
2430 MOTION_DIRECTION bottom_mv;
2433 get_motion_direction_fp(cpi->twopass.this_frame_mb_stats[mb_index]);
2436 if (c != mb_col_end - 1) {
2437 right_mv = get_motion_direction_fp(
2438 cpi->twopass.this_frame_mb_stats[mb_index + 1]);
2439 none_complexity += get_motion_inconsistency(this_mv, right_mv);
2443 if (r != mb_row_end - 1) {
2444 bottom_mv = get_motion_direction_fp(
2445 cpi->twopass.this_frame_mb_stats[mb_index + cm->mb_cols]);
2446 none_complexity += get_motion_inconsistency(this_mv, bottom_mv);
2449 // do not count its left and top neighbors to avoid double counting
2453 if (none_complexity > complexity_16x16_blocks_threshold[bsize]) {
2454 partition_none_allowed = 0;
2460 if (partition_none_allowed) {
2461 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col,
2462 &this_rdc, bsize, ctx, best_rdc.rdcost);
2463 if (this_rdc.rate != INT_MAX) {
2464 if (bsize >= BLOCK_8X8) {
2465 pl = partition_plane_context(xd, mi_row, mi_col, bsize);
2466 this_rdc.rate += cpi->partition_cost[pl][PARTITION_NONE];
2467 this_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
2468 this_rdc.rate, this_rdc.dist);
2471 if (this_rdc.rdcost < best_rdc.rdcost) {
2472 int64_t dist_breakout_thr = cpi->sf.partition_search_breakout_dist_thr;
2473 int rate_breakout_thr = cpi->sf.partition_search_breakout_rate_thr;
2475 best_rdc = this_rdc;
2476 if (bsize >= BLOCK_8X8)
2477 pc_tree->partitioning = PARTITION_NONE;
2479 // Adjust dist breakout threshold according to the partition size.
2480 dist_breakout_thr >>= 8 - (b_width_log2_lookup[bsize] +
2481 b_height_log2_lookup[bsize]);
2483 rate_breakout_thr *= num_pels_log2_lookup[bsize];
2485 // If all y, u, v transform blocks in this partition are skippable, and
2486 // the dist & rate are within the thresholds, the partition search is
2487 // terminated for current branch of the partition search tree.
2488 // The dist & rate thresholds are set to 0 at speed 0 to disable the
2489 // early termination at that speed.
2490 if (!x->e_mbd.lossless &&
2491 (ctx->skippable && best_rdc.dist < dist_breakout_thr &&
2492 best_rdc.rate < rate_breakout_thr)) {
2497 #if CONFIG_FP_MB_STATS
2498 // Check if every 16x16 first pass block statistics has zero
2499 // motion and the corresponding first pass residue is small enough.
2500 // If that is the case, check the difference variance between the
2501 // current frame and the last frame. If the variance is small enough,
2502 // stop further splitting in RD optimization
2503 if (cpi->use_fp_mb_stats && do_split != 0 &&
2504 cm->base_qindex > qindex_skip_threshold_lookup[bsize]) {
2505 int mb_row = mi_row >> 1;
2506 int mb_col = mi_col >> 1;
2508 MIN(mb_row + num_16x16_blocks_high_lookup[bsize], cm->mb_rows);
2510 MIN(mb_col + num_16x16_blocks_wide_lookup[bsize], cm->mb_cols);
2514 for (r = mb_row; r < mb_row_end; r++) {
2515 for (c = mb_col; c < mb_col_end; c++) {
2516 const int mb_index = r * cm->mb_cols + c;
2517 if (!(cpi->twopass.this_frame_mb_stats[mb_index] &
2518 FPMB_MOTION_ZERO_MASK) ||
2519 !(cpi->twopass.this_frame_mb_stats[mb_index] &
2520 FPMB_ERROR_SMALL_MASK)) {
2530 if (src_diff_var == UINT_MAX) {
2531 set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
2532 src_diff_var = get_sby_perpixel_diff_variance(
2533 cpi, &x->plane[0].src, mi_row, mi_col, bsize);
2535 if (src_diff_var < 8) {
2544 restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
2547 // store estimated motion vector
2548 if (cpi->sf.adaptive_motion_search)
2549 store_pred_mv(x, ctx);
2552 // TODO(jingning): use the motion vectors given by the above search as
2553 // the starting point of motion search in the following partition type check.
2555 subsize = get_subsize(bsize, PARTITION_SPLIT);
2556 if (bsize == BLOCK_8X8) {
2558 if (cpi->sf.adaptive_pred_interp_filter && partition_none_allowed)
2559 pc_tree->leaf_split[0]->pred_interp_filter =
2560 ctx->mic.mbmi.interp_filter;
2561 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize,
2562 pc_tree->leaf_split[0], best_rdc.rdcost);
2563 if (sum_rdc.rate == INT_MAX)
2564 sum_rdc.rdcost = INT64_MAX;
2566 for (i = 0; i < 4 && sum_rdc.rdcost < best_rdc.rdcost; ++i) {
2567 const int x_idx = (i & 1) * mi_step;
2568 const int y_idx = (i >> 1) * mi_step;
2570 if (mi_row + y_idx >= cm->mi_rows || mi_col + x_idx >= cm->mi_cols)
2573 if (cpi->sf.adaptive_motion_search)
2574 load_pred_mv(x, ctx);
2576 pc_tree->split[i]->index = i;
2577 rd_pick_partition(cpi, td, tile_data, tp,
2578 mi_row + y_idx, mi_col + x_idx,
2580 best_rdc.rdcost - sum_rdc.rdcost, pc_tree->split[i]);
2582 if (this_rdc.rate == INT_MAX) {
2583 sum_rdc.rdcost = INT64_MAX;
2586 sum_rdc.rate += this_rdc.rate;
2587 sum_rdc.dist += this_rdc.dist;
2588 sum_rdc.rdcost += this_rdc.rdcost;
2593 if (sum_rdc.rdcost < best_rdc.rdcost && i == 4) {
2594 pl = partition_plane_context(xd, mi_row, mi_col, bsize);
2595 sum_rdc.rate += cpi->partition_cost[pl][PARTITION_SPLIT];
2596 sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
2597 sum_rdc.rate, sum_rdc.dist);
2599 if (sum_rdc.rdcost < best_rdc.rdcost) {
2601 pc_tree->partitioning = PARTITION_SPLIT;
2604 // skip rectangular partition test when larger block size
2605 // gives better rd cost
2606 if (cpi->sf.less_rectangular_check)
2607 do_rect &= !partition_none_allowed;
2609 restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
2613 if (partition_horz_allowed && do_rect) {
2614 subsize = get_subsize(bsize, PARTITION_HORZ);
2615 if (cpi->sf.adaptive_motion_search)
2616 load_pred_mv(x, ctx);
2617 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
2618 partition_none_allowed)
2619 pc_tree->horizontal[0].pred_interp_filter =
2620 ctx->mic.mbmi.interp_filter;
2621 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize,
2622 &pc_tree->horizontal[0], best_rdc.rdcost);
2624 if (sum_rdc.rdcost < best_rdc.rdcost && mi_row + mi_step < cm->mi_rows &&
2625 bsize > BLOCK_8X8) {
2626 PICK_MODE_CONTEXT *ctx = &pc_tree->horizontal[0];
2627 update_state(cpi, td, ctx, mi_row, mi_col, subsize, 0);
2628 encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize, ctx);
2630 if (cpi->sf.adaptive_motion_search)
2631 load_pred_mv(x, ctx);
2632 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
2633 partition_none_allowed)
2634 pc_tree->horizontal[1].pred_interp_filter =
2635 ctx->mic.mbmi.interp_filter;
2636 rd_pick_sb_modes(cpi, tile_data, x, mi_row + mi_step, mi_col,
2637 &this_rdc, subsize, &pc_tree->horizontal[1],
2638 best_rdc.rdcost - sum_rdc.rdcost);
2639 if (this_rdc.rate == INT_MAX) {
2640 sum_rdc.rdcost = INT64_MAX;
2642 sum_rdc.rate += this_rdc.rate;
2643 sum_rdc.dist += this_rdc.dist;
2644 sum_rdc.rdcost += this_rdc.rdcost;
2648 if (sum_rdc.rdcost < best_rdc.rdcost) {
2649 pl = partition_plane_context(xd, mi_row, mi_col, bsize);
2650 sum_rdc.rate += cpi->partition_cost[pl][PARTITION_HORZ];
2651 sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, sum_rdc.rate, sum_rdc.dist);
2652 if (sum_rdc.rdcost < best_rdc.rdcost) {
2654 pc_tree->partitioning = PARTITION_HORZ;
2657 restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
2660 if (partition_vert_allowed && do_rect) {
2661 subsize = get_subsize(bsize, PARTITION_VERT);
2663 if (cpi->sf.adaptive_motion_search)
2664 load_pred_mv(x, ctx);
2665 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
2666 partition_none_allowed)
2667 pc_tree->vertical[0].pred_interp_filter =
2668 ctx->mic.mbmi.interp_filter;
2669 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize,
2670 &pc_tree->vertical[0], best_rdc.rdcost);
2671 if (sum_rdc.rdcost < best_rdc.rdcost && mi_col + mi_step < cm->mi_cols &&
2672 bsize > BLOCK_8X8) {
2673 update_state(cpi, td, &pc_tree->vertical[0], mi_row, mi_col, subsize, 0);
2674 encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize,
2675 &pc_tree->vertical[0]);
2677 if (cpi->sf.adaptive_motion_search)
2678 load_pred_mv(x, ctx);
2679 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
2680 partition_none_allowed)
2681 pc_tree->vertical[1].pred_interp_filter =
2682 ctx->mic.mbmi.interp_filter;
2683 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + mi_step,
2685 &pc_tree->vertical[1], best_rdc.rdcost - sum_rdc.rdcost);
2686 if (this_rdc.rate == INT_MAX) {
2687 sum_rdc.rdcost = INT64_MAX;
2689 sum_rdc.rate += this_rdc.rate;
2690 sum_rdc.dist += this_rdc.dist;
2691 sum_rdc.rdcost += this_rdc.rdcost;
2695 if (sum_rdc.rdcost < best_rdc.rdcost) {
2696 pl = partition_plane_context(xd, mi_row, mi_col, bsize);
2697 sum_rdc.rate += cpi->partition_cost[pl][PARTITION_VERT];
2698 sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
2699 sum_rdc.rate, sum_rdc.dist);
2700 if (sum_rdc.rdcost < best_rdc.rdcost) {
2702 pc_tree->partitioning = PARTITION_VERT;
2705 restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
2708 // TODO(jbb): This code added so that we avoid static analysis
2709 // warning related to the fact that best_rd isn't used after this
2710 // point. This code should be refactored so that the duplicate
2711 // checks occur in some sub function and thus are used...
2713 *rd_cost = best_rdc;
2716 if (best_rdc.rate < INT_MAX && best_rdc.dist < INT64_MAX &&
2717 pc_tree->index != 3) {
2718 int output_enabled = (bsize == BLOCK_64X64);
2719 encode_sb(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled,
2723 if (bsize == BLOCK_64X64) {
2724 assert(tp_orig < *tp);
2725 assert(best_rdc.rate < INT_MAX);
2726 assert(best_rdc.dist < INT64_MAX);
2728 assert(tp_orig == *tp);
2732 static void encode_rd_sb_row(VP9_COMP *cpi,
2734 TileDataEnc *tile_data,
2737 VP9_COMMON *const cm = &cpi->common;
2738 TileInfo *const tile_info = &tile_data->tile_info;
2739 MACROBLOCK *const x = &td->mb;
2740 MACROBLOCKD *const xd = &x->e_mbd;
2741 SPEED_FEATURES *const sf = &cpi->sf;
2744 // Initialize the left context for the new SB row
2745 vpx_memset(&xd->left_context, 0, sizeof(xd->left_context));
2746 vpx_memset(xd->left_seg_context, 0, sizeof(xd->left_seg_context));
2748 // Code each SB in the row
2749 for (mi_col = tile_info->mi_col_start; mi_col < tile_info->mi_col_end;
2750 mi_col += MI_BLOCK_SIZE) {
2751 const struct segmentation *const seg = &cm->seg;
2758 const int idx_str = cm->mi_stride * mi_row + mi_col;
2759 MODE_INFO *mi = cm->mi + idx_str;
2761 if (sf->adaptive_pred_interp_filter) {
2762 for (i = 0; i < 64; ++i)
2763 td->leaf_tree[i].pred_interp_filter = SWITCHABLE;
2765 for (i = 0; i < 64; ++i) {
2766 td->pc_tree[i].vertical[0].pred_interp_filter = SWITCHABLE;
2767 td->pc_tree[i].vertical[1].pred_interp_filter = SWITCHABLE;
2768 td->pc_tree[i].horizontal[0].pred_interp_filter = SWITCHABLE;
2769 td->pc_tree[i].horizontal[1].pred_interp_filter = SWITCHABLE;
2773 vp9_zero(x->pred_mv);
2774 td->pc_root->index = 0;
2777 const uint8_t *const map = seg->update_map ? cpi->segmentation_map
2778 : cm->last_frame_seg_map;
2779 int segment_id = vp9_get_segment_id(cm, map, BLOCK_64X64, mi_row, mi_col);
2780 seg_skip = vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP);
2783 x->source_variance = UINT_MAX;
2784 if (sf->partition_search_type == FIXED_PARTITION || seg_skip) {
2785 const BLOCK_SIZE bsize =
2786 seg_skip ? BLOCK_64X64 : sf->always_this_block_size;
2787 set_offsets(cpi, tile_info, x, mi_row, mi_col, BLOCK_64X64);
2788 set_fixed_partitioning(cpi, tile_info, mi, mi_row, mi_col, bsize);
2789 rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
2790 BLOCK_64X64, &dummy_rate, &dummy_dist, 1, td->pc_root);
2791 } else if (cpi->partition_search_skippable_frame) {
2793 set_offsets(cpi, tile_info, x, mi_row, mi_col, BLOCK_64X64);
2794 bsize = get_rd_var_based_fixed_partition(cpi, x, mi_row, mi_col);
2795 set_fixed_partitioning(cpi, tile_info, mi, mi_row, mi_col, bsize);
2796 rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
2797 BLOCK_64X64, &dummy_rate, &dummy_dist, 1, td->pc_root);
2798 } else if (sf->partition_search_type == VAR_BASED_PARTITION &&
2799 cm->frame_type != KEY_FRAME) {
2800 choose_partitioning(cpi, tile_info, x, mi_row, mi_col);
2801 rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
2802 BLOCK_64X64, &dummy_rate, &dummy_dist, 1, td->pc_root);
2804 // If required set upper and lower partition size limits
2805 if (sf->auto_min_max_partition_size) {
2806 set_offsets(cpi, tile_info, x, mi_row, mi_col, BLOCK_64X64);
2807 rd_auto_partition_range(cpi, tile_info, xd, mi_row, mi_col,
2808 &x->min_partition_size,
2809 &x->max_partition_size);
2811 rd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, BLOCK_64X64,
2812 &dummy_rdc, INT64_MAX, td->pc_root);
2817 static void init_encode_frame_mb_context(VP9_COMP *cpi) {
2818 MACROBLOCK *const x = &cpi->td.mb;
2819 VP9_COMMON *const cm = &cpi->common;
2820 MACROBLOCKD *const xd = &x->e_mbd;
2821 const int aligned_mi_cols = mi_cols_aligned_to_sb(cm->mi_cols);
2823 // Copy data over into macro block data structures.
2824 vp9_setup_src_planes(x, cpi->Source, 0, 0);
2826 vp9_setup_block_planes(&x->e_mbd, cm->subsampling_x, cm->subsampling_y);
2828 // Note: this memset assumes above_context[0], [1] and [2]
2829 // are allocated as part of the same buffer.
2830 vpx_memset(xd->above_context[0], 0,
2831 sizeof(*xd->above_context[0]) *
2832 2 * aligned_mi_cols * MAX_MB_PLANE);
2833 vpx_memset(xd->above_seg_context, 0,
2834 sizeof(*xd->above_seg_context) * aligned_mi_cols);
2837 static int check_dual_ref_flags(VP9_COMP *cpi) {
2838 const int ref_flags = cpi->ref_frame_flags;
2840 if (vp9_segfeature_active(&cpi->common.seg, 1, SEG_LVL_REF_FRAME)) {
2843 return (!!(ref_flags & VP9_GOLD_FLAG) + !!(ref_flags & VP9_LAST_FLAG)
2844 + !!(ref_flags & VP9_ALT_FLAG)) >= 2;
2848 static void reset_skip_tx_size(VP9_COMMON *cm, TX_SIZE max_tx_size) {
2850 const int mis = cm->mi_stride;
2851 MODE_INFO *mi_ptr = cm->mi;
2853 for (mi_row = 0; mi_row < cm->mi_rows; ++mi_row, mi_ptr += mis) {
2854 for (mi_col = 0; mi_col < cm->mi_cols; ++mi_col) {
2855 if (mi_ptr[mi_col].src_mi->mbmi.tx_size > max_tx_size)
2856 mi_ptr[mi_col].src_mi->mbmi.tx_size = max_tx_size;
2861 static MV_REFERENCE_FRAME get_frame_type(const VP9_COMP *cpi) {
2862 if (frame_is_intra_only(&cpi->common))
2864 else if (cpi->rc.is_src_frame_alt_ref && cpi->refresh_golden_frame)
2865 return ALTREF_FRAME;
2866 else if (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)
2867 return GOLDEN_FRAME;
2872 static TX_MODE select_tx_mode(const VP9_COMP *cpi, MACROBLOCKD *const xd) {
2875 if (cpi->common.frame_type == KEY_FRAME &&
2876 cpi->sf.use_nonrd_pick_mode &&
2877 cpi->sf.partition_search_type == VAR_BASED_PARTITION)
2879 if (cpi->sf.tx_size_search_method == USE_LARGESTALL)
2881 else if (cpi->sf.tx_size_search_method == USE_FULL_RD||
2882 cpi->sf.tx_size_search_method == USE_TX_8X8)
2883 return TX_MODE_SELECT;
2885 return cpi->common.tx_mode;
2888 static void hybrid_intra_mode_search(VP9_COMP *cpi, MACROBLOCK *const x,
2889 RD_COST *rd_cost, BLOCK_SIZE bsize,
2890 PICK_MODE_CONTEXT *ctx) {
2891 if (bsize < BLOCK_16X16)
2892 vp9_rd_pick_intra_mode_sb(cpi, x, rd_cost, bsize, ctx, INT64_MAX);
2894 vp9_pick_intra_mode(cpi, x, rd_cost, bsize, ctx);
2897 static void nonrd_pick_sb_modes(VP9_COMP *cpi,
2898 TileDataEnc *tile_data, MACROBLOCK *const x,
2899 int mi_row, int mi_col, RD_COST *rd_cost,
2900 BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx) {
2901 VP9_COMMON *const cm = &cpi->common;
2902 TileInfo *const tile_info = &tile_data->tile_info;
2903 MACROBLOCKD *const xd = &x->e_mbd;
2905 set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
2906 mbmi = &xd->mi[0].src_mi->mbmi;
2907 mbmi->sb_type = bsize;
2909 if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled)
2910 if (cyclic_refresh_segment_id_boosted(mbmi->segment_id))
2911 x->rdmult = vp9_cyclic_refresh_get_rdmult(cpi->cyclic_refresh);
2913 if (cm->frame_type == KEY_FRAME)
2914 hybrid_intra_mode_search(cpi, x, rd_cost, bsize, ctx);
2915 else if (vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP))
2916 set_mode_info_seg_skip(x, cm->tx_mode, rd_cost, bsize);
2917 else if (bsize >= BLOCK_8X8)
2918 vp9_pick_inter_mode(cpi, x, tile_data, mi_row, mi_col,
2919 rd_cost, bsize, ctx);
2921 vp9_pick_inter_mode_sub8x8(cpi, x, tile_data, mi_row, mi_col,
2922 rd_cost, bsize, ctx);
2924 duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, bsize);
2926 if (rd_cost->rate == INT_MAX)
2927 vp9_rd_cost_reset(rd_cost);
2929 ctx->rate = rd_cost->rate;
2930 ctx->dist = rd_cost->dist;
2933 static void fill_mode_info_sb(VP9_COMMON *cm, MACROBLOCK *x,
2934 int mi_row, int mi_col,
2937 MACROBLOCKD *xd = &x->e_mbd;
2938 int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4;
2939 PARTITION_TYPE partition = pc_tree->partitioning;
2940 BLOCK_SIZE subsize = get_subsize(bsize, partition);
2942 assert(bsize >= BLOCK_8X8);
2944 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
2947 switch (partition) {
2948 case PARTITION_NONE:
2949 set_mode_info_offsets(cm, xd, mi_row, mi_col);
2950 *(xd->mi[0].src_mi) = pc_tree->none.mic;
2951 duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, bsize);
2953 case PARTITION_VERT:
2954 set_mode_info_offsets(cm, xd, mi_row, mi_col);
2955 *(xd->mi[0].src_mi) = pc_tree->vertical[0].mic;
2956 duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, subsize);
2958 if (mi_col + hbs < cm->mi_cols) {
2959 set_mode_info_offsets(cm, xd, mi_row, mi_col + hbs);
2960 *(xd->mi[0].src_mi) = pc_tree->vertical[1].mic;
2961 duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col + hbs, subsize);
2964 case PARTITION_HORZ:
2965 set_mode_info_offsets(cm, xd, mi_row, mi_col);
2966 *(xd->mi[0].src_mi) = pc_tree->horizontal[0].mic;
2967 duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, subsize);
2968 if (mi_row + hbs < cm->mi_rows) {
2969 set_mode_info_offsets(cm, xd, mi_row + hbs, mi_col);
2970 *(xd->mi[0].src_mi) = pc_tree->horizontal[1].mic;
2971 duplicate_mode_info_in_sb(cm, xd, mi_row + hbs, mi_col, subsize);
2974 case PARTITION_SPLIT: {
2975 fill_mode_info_sb(cm, x, mi_row, mi_col, subsize, pc_tree->split[0]);
2976 fill_mode_info_sb(cm, x, mi_row, mi_col + hbs, subsize,
2978 fill_mode_info_sb(cm, x, mi_row + hbs, mi_col, subsize,
2980 fill_mode_info_sb(cm, x, mi_row + hbs, mi_col + hbs, subsize,
2989 // Reset the prediction pixel ready flag recursively.
2990 static void pred_pixel_ready_reset(PC_TREE *pc_tree, BLOCK_SIZE bsize) {
2991 pc_tree->none.pred_pixel_ready = 0;
2992 pc_tree->horizontal[0].pred_pixel_ready = 0;
2993 pc_tree->horizontal[1].pred_pixel_ready = 0;
2994 pc_tree->vertical[0].pred_pixel_ready = 0;
2995 pc_tree->vertical[1].pred_pixel_ready = 0;
2997 if (bsize > BLOCK_8X8) {
2998 BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_SPLIT);
3000 for (i = 0; i < 4; ++i)
3001 pred_pixel_ready_reset(pc_tree->split[i], subsize);
3005 static void nonrd_pick_partition(VP9_COMP *cpi, ThreadData *td,
3006 TileDataEnc *tile_data,
3007 TOKENEXTRA **tp, int mi_row,
3008 int mi_col, BLOCK_SIZE bsize, RD_COST *rd_cost,
3009 int do_recon, int64_t best_rd,
3011 const SPEED_FEATURES *const sf = &cpi->sf;
3012 VP9_COMMON *const cm = &cpi->common;
3013 TileInfo *const tile_info = &tile_data->tile_info;
3014 MACROBLOCK *const x = &td->mb;
3015 MACROBLOCKD *const xd = &x->e_mbd;
3016 const int ms = num_8x8_blocks_wide_lookup[bsize] / 2;
3017 TOKENEXTRA *tp_orig = *tp;
3018 PICK_MODE_CONTEXT *ctx = &pc_tree->none;
3020 BLOCK_SIZE subsize = bsize;
3021 RD_COST this_rdc, sum_rdc, best_rdc;
3022 int do_split = bsize >= BLOCK_8X8;
3024 // Override skipping rectangular partition operations for edge blocks
3025 const int force_horz_split = (mi_row + ms >= cm->mi_rows);
3026 const int force_vert_split = (mi_col + ms >= cm->mi_cols);
3027 const int xss = x->e_mbd.plane[1].subsampling_x;
3028 const int yss = x->e_mbd.plane[1].subsampling_y;
3030 int partition_none_allowed = !force_horz_split && !force_vert_split;
3031 int partition_horz_allowed = !force_vert_split && yss <= xss &&
3033 int partition_vert_allowed = !force_horz_split && xss <= yss &&
3037 assert(num_8x8_blocks_wide_lookup[bsize] ==
3038 num_8x8_blocks_high_lookup[bsize]);
3040 vp9_rd_cost_init(&sum_rdc);
3041 vp9_rd_cost_reset(&best_rdc);
3042 best_rdc.rdcost = best_rd;
3044 // Determine partition types in search according to the speed features.
3045 // The threshold set here has to be of square block size.
3046 if (sf->auto_min_max_partition_size) {
3047 partition_none_allowed &= (bsize <= x->max_partition_size &&
3048 bsize >= x->min_partition_size);
3049 partition_horz_allowed &= ((bsize <= x->max_partition_size &&
3050 bsize > x->min_partition_size) ||
3052 partition_vert_allowed &= ((bsize <= x->max_partition_size &&
3053 bsize > x->min_partition_size) ||
3055 do_split &= bsize > x->min_partition_size;
3057 if (sf->use_square_partition_only) {
3058 partition_horz_allowed &= force_horz_split;
3059 partition_vert_allowed &= force_vert_split;
3062 ctx->pred_pixel_ready = !(partition_vert_allowed ||
3063 partition_horz_allowed ||
3067 if (partition_none_allowed) {
3068 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col,
3069 &this_rdc, bsize, ctx);
3070 ctx->mic.mbmi = xd->mi[0].src_mi->mbmi;
3071 ctx->skip_txfm[0] = x->skip_txfm[0];
3072 ctx->skip = x->skip;
3074 if (this_rdc.rate != INT_MAX) {
3075 int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
3076 this_rdc.rate += cpi->partition_cost[pl][PARTITION_NONE];
3077 this_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
3078 this_rdc.rate, this_rdc.dist);
3079 if (this_rdc.rdcost < best_rdc.rdcost) {
3080 int64_t dist_breakout_thr = sf->partition_search_breakout_dist_thr;
3081 int64_t rate_breakout_thr = sf->partition_search_breakout_rate_thr;
3083 dist_breakout_thr >>= 8 - (b_width_log2_lookup[bsize] +
3084 b_height_log2_lookup[bsize]);
3086 rate_breakout_thr *= num_pels_log2_lookup[bsize];
3088 best_rdc = this_rdc;
3089 if (bsize >= BLOCK_8X8)
3090 pc_tree->partitioning = PARTITION_NONE;
3092 if (!x->e_mbd.lossless &&
3093 this_rdc.rate < rate_breakout_thr &&
3094 this_rdc.dist < dist_breakout_thr) {
3102 // store estimated motion vector
3103 store_pred_mv(x, ctx);
3107 int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
3108 sum_rdc.rate += cpi->partition_cost[pl][PARTITION_SPLIT];
3109 sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, sum_rdc.rate, sum_rdc.dist);
3110 subsize = get_subsize(bsize, PARTITION_SPLIT);
3111 for (i = 0; i < 4 && sum_rdc.rdcost < best_rdc.rdcost; ++i) {
3112 const int x_idx = (i & 1) * ms;
3113 const int y_idx = (i >> 1) * ms;
3115 if (mi_row + y_idx >= cm->mi_rows || mi_col + x_idx >= cm->mi_cols)
3117 load_pred_mv(x, ctx);
3118 nonrd_pick_partition(cpi, td, tile_data, tp,
3119 mi_row + y_idx, mi_col + x_idx,
3120 subsize, &this_rdc, 0,
3121 best_rdc.rdcost - sum_rdc.rdcost, pc_tree->split[i]);
3123 if (this_rdc.rate == INT_MAX) {
3124 vp9_rd_cost_reset(&sum_rdc);
3126 sum_rdc.rate += this_rdc.rate;
3127 sum_rdc.dist += this_rdc.dist;
3128 sum_rdc.rdcost += this_rdc.rdcost;
3132 if (sum_rdc.rdcost < best_rdc.rdcost) {
3134 pc_tree->partitioning = PARTITION_SPLIT;
3136 // skip rectangular partition test when larger block size
3137 // gives better rd cost
3138 if (sf->less_rectangular_check)
3139 do_rect &= !partition_none_allowed;
3144 if (partition_horz_allowed && do_rect) {
3145 subsize = get_subsize(bsize, PARTITION_HORZ);
3146 if (sf->adaptive_motion_search)
3147 load_pred_mv(x, ctx);
3148 pc_tree->horizontal[0].pred_pixel_ready = 1;
3149 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize,
3150 &pc_tree->horizontal[0]);
3152 pc_tree->horizontal[0].mic.mbmi = xd->mi[0].src_mi->mbmi;
3153 pc_tree->horizontal[0].skip_txfm[0] = x->skip_txfm[0];
3154 pc_tree->horizontal[0].skip = x->skip;
3156 if (sum_rdc.rdcost < best_rdc.rdcost && mi_row + ms < cm->mi_rows) {
3157 load_pred_mv(x, ctx);
3158 pc_tree->horizontal[1].pred_pixel_ready = 1;
3159 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row + ms, mi_col,
3161 &pc_tree->horizontal[1]);
3163 pc_tree->horizontal[1].mic.mbmi = xd->mi[0].src_mi->mbmi;
3164 pc_tree->horizontal[1].skip_txfm[0] = x->skip_txfm[0];
3165 pc_tree->horizontal[1].skip = x->skip;
3167 if (this_rdc.rate == INT_MAX) {
3168 vp9_rd_cost_reset(&sum_rdc);
3170 int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
3171 this_rdc.rate += cpi->partition_cost[pl][PARTITION_HORZ];
3172 sum_rdc.rate += this_rdc.rate;
3173 sum_rdc.dist += this_rdc.dist;
3174 sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
3175 sum_rdc.rate, sum_rdc.dist);
3179 if (sum_rdc.rdcost < best_rdc.rdcost) {
3181 pc_tree->partitioning = PARTITION_HORZ;
3183 pred_pixel_ready_reset(pc_tree, bsize);
3188 if (partition_vert_allowed && do_rect) {
3189 subsize = get_subsize(bsize, PARTITION_VERT);
3190 if (sf->adaptive_motion_search)
3191 load_pred_mv(x, ctx);
3192 pc_tree->vertical[0].pred_pixel_ready = 1;
3193 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize,
3194 &pc_tree->vertical[0]);
3195 pc_tree->vertical[0].mic.mbmi = xd->mi[0].src_mi->mbmi;
3196 pc_tree->vertical[0].skip_txfm[0] = x->skip_txfm[0];
3197 pc_tree->vertical[0].skip = x->skip;
3199 if (sum_rdc.rdcost < best_rdc.rdcost && mi_col + ms < cm->mi_cols) {
3200 load_pred_mv(x, ctx);
3201 pc_tree->vertical[1].pred_pixel_ready = 1;
3202 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + ms,
3204 &pc_tree->vertical[1]);
3205 pc_tree->vertical[1].mic.mbmi = xd->mi[0].src_mi->mbmi;
3206 pc_tree->vertical[1].skip_txfm[0] = x->skip_txfm[0];
3207 pc_tree->vertical[1].skip = x->skip;
3209 if (this_rdc.rate == INT_MAX) {
3210 vp9_rd_cost_reset(&sum_rdc);
3212 int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
3213 sum_rdc.rate += cpi->partition_cost[pl][PARTITION_VERT];
3214 sum_rdc.rate += this_rdc.rate;
3215 sum_rdc.dist += this_rdc.dist;
3216 sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
3217 sum_rdc.rate, sum_rdc.dist);
3221 if (sum_rdc.rdcost < best_rdc.rdcost) {
3223 pc_tree->partitioning = PARTITION_VERT;
3225 pred_pixel_ready_reset(pc_tree, bsize);
3229 *rd_cost = best_rdc;
3231 if (best_rdc.rate == INT_MAX) {
3232 vp9_rd_cost_reset(rd_cost);
3236 // update mode info array
3237 fill_mode_info_sb(cm, x, mi_row, mi_col, bsize, pc_tree);
3239 if (best_rdc.rate < INT_MAX && best_rdc.dist < INT64_MAX && do_recon) {
3240 int output_enabled = (bsize == BLOCK_64X64);
3241 encode_sb_rt(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled,
3245 if (bsize == BLOCK_64X64 && do_recon) {
3246 assert(tp_orig < *tp);
3247 assert(best_rdc.rate < INT_MAX);
3248 assert(best_rdc.dist < INT64_MAX);
3250 assert(tp_orig == *tp);
3254 static void nonrd_select_partition(VP9_COMP *cpi,
3256 TileDataEnc *tile_data,
3259 int mi_row, int mi_col,
3260 BLOCK_SIZE bsize, int output_enabled,
3261 RD_COST *rd_cost, PC_TREE *pc_tree) {
3262 VP9_COMMON *const cm = &cpi->common;
3263 TileInfo *const tile_info = &tile_data->tile_info;
3264 MACROBLOCK *const x = &td->mb;
3265 MACROBLOCKD *const xd = &x->e_mbd;
3266 const int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4;
3267 const int mis = cm->mi_stride;
3268 PARTITION_TYPE partition;
3272 vp9_rd_cost_reset(&this_rdc);
3273 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
3276 subsize = (bsize >= BLOCK_8X8) ? mi[0].src_mi->mbmi.sb_type : BLOCK_4X4;
3277 partition = partition_lookup[bsl][subsize];
3279 if (bsize == BLOCK_32X32 && partition != PARTITION_NONE &&
3280 subsize >= BLOCK_16X16) {
3281 x->max_partition_size = BLOCK_32X32;
3282 x->min_partition_size = BLOCK_8X8;
3283 nonrd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, bsize,
3284 rd_cost, 0, INT64_MAX, pc_tree);
3285 } else if (bsize == BLOCK_16X16 && partition != PARTITION_NONE) {
3286 x->max_partition_size = BLOCK_16X16;
3287 x->min_partition_size = BLOCK_8X8;
3288 nonrd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, bsize,
3289 rd_cost, 0, INT64_MAX, pc_tree);
3291 switch (partition) {
3292 case PARTITION_NONE:
3293 pc_tree->none.pred_pixel_ready = 1;
3294 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, rd_cost,
3295 subsize, &pc_tree->none);
3296 pc_tree->none.mic.mbmi = xd->mi[0].src_mi->mbmi;
3297 pc_tree->none.skip_txfm[0] = x->skip_txfm[0];
3298 pc_tree->none.skip = x->skip;
3300 case PARTITION_VERT:
3301 pc_tree->vertical[0].pred_pixel_ready = 1;
3302 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, rd_cost,
3303 subsize, &pc_tree->vertical[0]);
3304 pc_tree->vertical[0].mic.mbmi = xd->mi[0].src_mi->mbmi;
3305 pc_tree->vertical[0].skip_txfm[0] = x->skip_txfm[0];
3306 pc_tree->vertical[0].skip = x->skip;
3307 if (mi_col + hbs < cm->mi_cols) {
3308 pc_tree->vertical[1].pred_pixel_ready = 1;
3309 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + hbs,
3310 &this_rdc, subsize, &pc_tree->vertical[1]);
3311 pc_tree->vertical[1].mic.mbmi = xd->mi[0].src_mi->mbmi;
3312 pc_tree->vertical[1].skip_txfm[0] = x->skip_txfm[0];
3313 pc_tree->vertical[1].skip = x->skip;
3314 if (this_rdc.rate != INT_MAX && this_rdc.dist != INT64_MAX &&
3315 rd_cost->rate != INT_MAX && rd_cost->dist != INT64_MAX) {
3316 rd_cost->rate += this_rdc.rate;
3317 rd_cost->dist += this_rdc.dist;
3321 case PARTITION_HORZ:
3322 pc_tree->horizontal[0].pred_pixel_ready = 1;
3323 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, rd_cost,
3324 subsize, &pc_tree->horizontal[0]);
3325 pc_tree->horizontal[0].mic.mbmi = xd->mi[0].src_mi->mbmi;
3326 pc_tree->horizontal[0].skip_txfm[0] = x->skip_txfm[0];
3327 pc_tree->horizontal[0].skip = x->skip;
3328 if (mi_row + hbs < cm->mi_rows) {
3329 pc_tree->horizontal[1].pred_pixel_ready = 1;
3330 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row + hbs, mi_col,
3331 &this_rdc, subsize, &pc_tree->horizontal[1]);
3332 pc_tree->horizontal[1].mic.mbmi = xd->mi[0].src_mi->mbmi;
3333 pc_tree->horizontal[1].skip_txfm[0] = x->skip_txfm[0];
3334 pc_tree->horizontal[1].skip = x->skip;
3335 if (this_rdc.rate != INT_MAX && this_rdc.dist != INT64_MAX &&
3336 rd_cost->rate != INT_MAX && rd_cost->dist != INT64_MAX) {
3337 rd_cost->rate += this_rdc.rate;
3338 rd_cost->dist += this_rdc.dist;
3342 case PARTITION_SPLIT:
3343 subsize = get_subsize(bsize, PARTITION_SPLIT);
3344 nonrd_select_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
3345 subsize, output_enabled, rd_cost,
3347 nonrd_select_partition(cpi, td, tile_data, mi + hbs, tp,
3348 mi_row, mi_col + hbs, subsize, output_enabled,
3349 &this_rdc, pc_tree->split[1]);
3350 if (this_rdc.rate != INT_MAX && this_rdc.dist != INT64_MAX &&
3351 rd_cost->rate != INT_MAX && rd_cost->dist != INT64_MAX) {
3352 rd_cost->rate += this_rdc.rate;
3353 rd_cost->dist += this_rdc.dist;
3355 nonrd_select_partition(cpi, td, tile_data, mi + hbs * mis, tp,
3356 mi_row + hbs, mi_col, subsize, output_enabled,
3357 &this_rdc, pc_tree->split[2]);
3358 if (this_rdc.rate != INT_MAX && this_rdc.dist != INT64_MAX &&
3359 rd_cost->rate != INT_MAX && rd_cost->dist != INT64_MAX) {
3360 rd_cost->rate += this_rdc.rate;
3361 rd_cost->dist += this_rdc.dist;
3363 nonrd_select_partition(cpi, td, tile_data, mi + hbs * mis + hbs, tp,
3364 mi_row + hbs, mi_col + hbs, subsize,
3365 output_enabled, &this_rdc, pc_tree->split[3]);
3366 if (this_rdc.rate != INT_MAX && this_rdc.dist != INT64_MAX &&
3367 rd_cost->rate != INT_MAX && rd_cost->dist != INT64_MAX) {
3368 rd_cost->rate += this_rdc.rate;
3369 rd_cost->dist += this_rdc.dist;
3373 assert(0 && "Invalid partition type.");
3378 if (bsize == BLOCK_64X64 && output_enabled)
3379 encode_sb_rt(cpi, td, tile_info, tp, mi_row, mi_col, 1, bsize, pc_tree);
3383 static void nonrd_use_partition(VP9_COMP *cpi,
3385 TileDataEnc *tile_data,
3388 int mi_row, int mi_col,
3389 BLOCK_SIZE bsize, int output_enabled,
3390 RD_COST *dummy_cost, PC_TREE *pc_tree) {
3391 VP9_COMMON *const cm = &cpi->common;
3392 TileInfo *tile_info = &tile_data->tile_info;
3393 MACROBLOCK *const x = &td->mb;
3394 MACROBLOCKD *const xd = &x->e_mbd;
3395 const int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4;
3396 const int mis = cm->mi_stride;
3397 PARTITION_TYPE partition;
3400 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
3403 subsize = (bsize >= BLOCK_8X8) ? mi[0].src_mi->mbmi.sb_type : BLOCK_4X4;
3404 partition = partition_lookup[bsl][subsize];
3406 if (output_enabled && bsize != BLOCK_4X4) {
3407 int ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
3408 td->counts->partition[ctx][partition]++;
3411 switch (partition) {
3412 case PARTITION_NONE:
3413 pc_tree->none.pred_pixel_ready = 1;
3414 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, dummy_cost,
3415 subsize, &pc_tree->none);
3416 pc_tree->none.mic.mbmi = xd->mi[0].src_mi->mbmi;
3417 pc_tree->none.skip_txfm[0] = x->skip_txfm[0];
3418 pc_tree->none.skip = x->skip;
3419 encode_b_rt(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled,
3420 subsize, &pc_tree->none);
3422 case PARTITION_VERT:
3423 pc_tree->vertical[0].pred_pixel_ready = 1;
3424 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, dummy_cost,
3425 subsize, &pc_tree->vertical[0]);
3426 pc_tree->vertical[0].mic.mbmi = xd->mi[0].src_mi->mbmi;
3427 pc_tree->vertical[0].skip_txfm[0] = x->skip_txfm[0];
3428 pc_tree->vertical[0].skip = x->skip;
3429 encode_b_rt(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled,
3430 subsize, &pc_tree->vertical[0]);
3431 if (mi_col + hbs < cm->mi_cols && bsize > BLOCK_8X8) {
3432 pc_tree->vertical[1].pred_pixel_ready = 1;
3433 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + hbs,
3434 dummy_cost, subsize, &pc_tree->vertical[1]);
3435 pc_tree->vertical[1].mic.mbmi = xd->mi[0].src_mi->mbmi;
3436 pc_tree->vertical[1].skip_txfm[0] = x->skip_txfm[0];
3437 pc_tree->vertical[1].skip = x->skip;
3438 encode_b_rt(cpi, td, tile_info, tp, mi_row, mi_col + hbs,
3439 output_enabled, subsize, &pc_tree->vertical[1]);
3442 case PARTITION_HORZ:
3443 pc_tree->horizontal[0].pred_pixel_ready = 1;
3444 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, dummy_cost,
3445 subsize, &pc_tree->horizontal[0]);
3446 pc_tree->horizontal[0].mic.mbmi = xd->mi[0].src_mi->mbmi;
3447 pc_tree->horizontal[0].skip_txfm[0] = x->skip_txfm[0];
3448 pc_tree->horizontal[0].skip = x->skip;
3449 encode_b_rt(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled,
3450 subsize, &pc_tree->horizontal[0]);
3452 if (mi_row + hbs < cm->mi_rows && bsize > BLOCK_8X8) {
3453 pc_tree->horizontal[1].pred_pixel_ready = 1;
3454 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row + hbs, mi_col,
3455 dummy_cost, subsize, &pc_tree->horizontal[1]);
3456 pc_tree->horizontal[1].mic.mbmi = xd->mi[0].src_mi->mbmi;
3457 pc_tree->horizontal[1].skip_txfm[0] = x->skip_txfm[0];
3458 pc_tree->horizontal[1].skip = x->skip;
3459 encode_b_rt(cpi, td, tile_info, tp, mi_row + hbs, mi_col,
3460 output_enabled, subsize, &pc_tree->horizontal[1]);
3463 case PARTITION_SPLIT:
3464 subsize = get_subsize(bsize, PARTITION_SPLIT);
3465 if (bsize == BLOCK_8X8) {
3466 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, dummy_cost,
3467 subsize, pc_tree->leaf_split[0]);
3468 encode_b_rt(cpi, td, tile_info, tp, mi_row, mi_col,
3469 output_enabled, subsize, pc_tree->leaf_split[0]);
3471 nonrd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
3472 subsize, output_enabled, dummy_cost,
3474 nonrd_use_partition(cpi, td, tile_data, mi + hbs, tp,
3475 mi_row, mi_col + hbs, subsize, output_enabled,
3476 dummy_cost, pc_tree->split[1]);
3477 nonrd_use_partition(cpi, td, tile_data, mi + hbs * mis, tp,
3478 mi_row + hbs, mi_col, subsize, output_enabled,
3479 dummy_cost, pc_tree->split[2]);
3480 nonrd_use_partition(cpi, td, tile_data, mi + hbs * mis + hbs, tp,
3481 mi_row + hbs, mi_col + hbs, subsize, output_enabled,
3482 dummy_cost, pc_tree->split[3]);
3486 assert(0 && "Invalid partition type.");
3490 if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8)
3491 update_partition_context(xd, mi_row, mi_col, subsize, bsize);
3494 static void encode_nonrd_sb_row(VP9_COMP *cpi,
3496 TileDataEnc *tile_data,
3499 SPEED_FEATURES *const sf = &cpi->sf;
3500 VP9_COMMON *const cm = &cpi->common;
3501 TileInfo *const tile_info = &tile_data->tile_info;
3502 MACROBLOCK *const x = &td->mb;
3503 MACROBLOCKD *const xd = &x->e_mbd;
3506 // Initialize the left context for the new SB row
3507 vpx_memset(&xd->left_context, 0, sizeof(xd->left_context));
3508 vpx_memset(xd->left_seg_context, 0, sizeof(xd->left_seg_context));
3510 // Code each SB in the row
3511 for (mi_col = tile_info->mi_col_start; mi_col < tile_info->mi_col_end;
3512 mi_col += MI_BLOCK_SIZE) {
3513 const struct segmentation *const seg = &cm->seg;
3515 const int idx_str = cm->mi_stride * mi_row + mi_col;
3516 MODE_INFO *mi = cm->mi + idx_str;
3517 PARTITION_SEARCH_TYPE partition_search_type = sf->partition_search_type;
3518 BLOCK_SIZE bsize = BLOCK_64X64;
3520 x->source_variance = UINT_MAX;
3521 vp9_zero(x->pred_mv);
3522 vp9_rd_cost_init(&dummy_rdc);
3523 x->color_sensitivity[0] = 0;
3524 x->color_sensitivity[1] = 0;
3527 const uint8_t *const map = seg->update_map ? cpi->segmentation_map
3528 : cm->last_frame_seg_map;
3529 int segment_id = vp9_get_segment_id(cm, map, BLOCK_64X64, mi_row, mi_col);
3530 seg_skip = vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP);
3532 partition_search_type = FIXED_PARTITION;
3536 // Set the partition type of the 64X64 block
3537 switch (partition_search_type) {
3538 case VAR_BASED_PARTITION:
3539 // TODO(jingning, marpan): The mode decision and encoding process
3540 // support both intra and inter sub8x8 block coding for RTC mode.
3541 // Tune the thresholds accordingly to use sub8x8 block coding for
3542 // coding performance improvement.
3543 choose_partitioning(cpi, tile_info, x, mi_row, mi_col);
3544 nonrd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
3545 BLOCK_64X64, 1, &dummy_rdc, td->pc_root);
3547 case SOURCE_VAR_BASED_PARTITION:
3548 set_source_var_based_partition(cpi, tile_info, x, mi, mi_row, mi_col);
3549 nonrd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
3550 BLOCK_64X64, 1, &dummy_rdc, td->pc_root);
3552 case FIXED_PARTITION:
3554 bsize = sf->always_this_block_size;
3555 set_fixed_partitioning(cpi, tile_info, mi, mi_row, mi_col, bsize);
3556 nonrd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
3557 BLOCK_64X64, 1, &dummy_rdc, td->pc_root);
3559 case REFERENCE_PARTITION:
3560 set_offsets(cpi, tile_info, x, mi_row, mi_col, BLOCK_64X64);
3561 if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled &&
3562 xd->mi[0].src_mi->mbmi.segment_id) {
3563 x->max_partition_size = BLOCK_64X64;
3564 x->min_partition_size = BLOCK_8X8;
3565 nonrd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col,
3566 BLOCK_64X64, &dummy_rdc, 1,
3567 INT64_MAX, td->pc_root);
3569 choose_partitioning(cpi, tile_info, x, mi_row, mi_col);
3570 nonrd_select_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
3571 BLOCK_64X64, 1, &dummy_rdc, td->pc_root);
3581 // end RTC play code
3583 static int set_var_thresh_from_histogram(VP9_COMP *cpi) {
3584 const SPEED_FEATURES *const sf = &cpi->sf;
3585 const VP9_COMMON *const cm = &cpi->common;
3587 const uint8_t *src = cpi->Source->y_buffer;
3588 const uint8_t *last_src = cpi->Last_Source->y_buffer;
3589 const int src_stride = cpi->Source->y_stride;
3590 const int last_stride = cpi->Last_Source->y_stride;
3592 // Pick cutoff threshold
3593 const int cutoff = (MIN(cm->width, cm->height) >= 720) ?
3594 (cm->MBs * VAR_HIST_LARGE_CUT_OFF / 100) :
3595 (cm->MBs * VAR_HIST_SMALL_CUT_OFF / 100);
3596 DECLARE_ALIGNED_ARRAY(16, int, hist, VAR_HIST_BINS);
3597 diff *var16 = cpi->source_diff_var;
3602 vpx_memset(hist, 0, VAR_HIST_BINS * sizeof(hist[0]));
3604 for (i = 0; i < cm->mb_rows; i++) {
3605 for (j = 0; j < cm->mb_cols; j++) {
3606 #if CONFIG_VP9_HIGHBITDEPTH
3607 if (cm->use_highbitdepth) {
3608 switch (cm->bit_depth) {
3610 vp9_highbd_get16x16var(src, src_stride, last_src, last_stride,
3611 &var16->sse, &var16->sum);
3614 vp9_highbd_10_get16x16var(src, src_stride, last_src, last_stride,
3615 &var16->sse, &var16->sum);
3618 vp9_highbd_12_get16x16var(src, src_stride, last_src, last_stride,
3619 &var16->sse, &var16->sum);
3622 assert(0 && "cm->bit_depth should be VPX_BITS_8, VPX_BITS_10"
3627 vp9_get16x16var(src, src_stride, last_src, last_stride,
3628 &var16->sse, &var16->sum);
3631 vp9_get16x16var(src, src_stride, last_src, last_stride,
3632 &var16->sse, &var16->sum);
3633 #endif // CONFIG_VP9_HIGHBITDEPTH
3634 var16->var = var16->sse -
3635 (((uint32_t)var16->sum * var16->sum) >> 8);
3637 if (var16->var >= VAR_HIST_MAX_BG_VAR)
3638 hist[VAR_HIST_BINS - 1]++;
3640 hist[var16->var / VAR_HIST_FACTOR]++;
3647 src = src - cm->mb_cols * 16 + 16 * src_stride;
3648 last_src = last_src - cm->mb_cols * 16 + 16 * last_stride;
3651 cpi->source_var_thresh = 0;
3653 if (hist[VAR_HIST_BINS - 1] < cutoff) {
3654 for (i = 0; i < VAR_HIST_BINS - 1; i++) {
3658 cpi->source_var_thresh = (i + 1) * VAR_HIST_FACTOR;
3664 return sf->search_type_check_frequency;
3667 static void source_var_based_partition_search_method(VP9_COMP *cpi) {
3668 VP9_COMMON *const cm = &cpi->common;
3669 SPEED_FEATURES *const sf = &cpi->sf;
3671 if (cm->frame_type == KEY_FRAME) {
3672 // For key frame, use SEARCH_PARTITION.
3673 sf->partition_search_type = SEARCH_PARTITION;
3674 } else if (cm->intra_only) {
3675 sf->partition_search_type = FIXED_PARTITION;
3677 if (cm->last_width != cm->width || cm->last_height != cm->height) {
3678 if (cpi->source_diff_var)
3679 vpx_free(cpi->source_diff_var);
3681 CHECK_MEM_ERROR(cm, cpi->source_diff_var,
3682 vpx_calloc(cm->MBs, sizeof(diff)));
3685 if (!cpi->frames_till_next_var_check)
3686 cpi->frames_till_next_var_check = set_var_thresh_from_histogram(cpi);
3688 if (cpi->frames_till_next_var_check > 0) {
3689 sf->partition_search_type = FIXED_PARTITION;
3690 cpi->frames_till_next_var_check--;
3695 static int get_skip_encode_frame(const VP9_COMMON *cm, ThreadData *const td) {
3696 unsigned int intra_count = 0, inter_count = 0;
3699 for (j = 0; j < INTRA_INTER_CONTEXTS; ++j) {
3700 intra_count += td->counts->intra_inter[j][0];
3701 inter_count += td->counts->intra_inter[j][1];
3704 return (intra_count << 2) < inter_count &&
3705 cm->frame_type != KEY_FRAME &&
3709 void vp9_init_tile_data(VP9_COMP *cpi) {
3710 VP9_COMMON *const cm = &cpi->common;
3711 const int tile_cols = 1 << cm->log2_tile_cols;
3712 const int tile_rows = 1 << cm->log2_tile_rows;
3713 int tile_col, tile_row;
3714 TOKENEXTRA *pre_tok = cpi->tile_tok[0][0];
3717 if (cpi->tile_data == NULL) {
3718 CHECK_MEM_ERROR(cm, cpi->tile_data,
3719 vpx_malloc(tile_cols * tile_rows * sizeof(*cpi->tile_data)));
3720 for (tile_row = 0; tile_row < tile_rows; ++tile_row)
3721 for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
3722 TileDataEnc *tile_data =
3723 &cpi->tile_data[tile_row * tile_cols + tile_col];
3725 for (i = 0; i < BLOCK_SIZES; ++i) {
3726 for (j = 0; j < MAX_MODES; ++j) {
3727 tile_data->thresh_freq_fact[i][j] = 32;
3728 tile_data->mode_map[i][j] = j;
3734 for (tile_row = 0; tile_row < tile_rows; ++tile_row) {
3735 for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
3736 TileInfo *tile_info =
3737 &cpi->tile_data[tile_row * tile_cols + tile_col].tile_info;
3738 vp9_tile_init(tile_info, cm, tile_row, tile_col);
3740 cpi->tile_tok[tile_row][tile_col] = pre_tok + tile_tok;
3741 pre_tok = cpi->tile_tok[tile_row][tile_col];
3742 tile_tok = allocated_tokens(*tile_info);
3747 void vp9_encode_tile(VP9_COMP *cpi, ThreadData *td,
3748 int tile_row, int tile_col) {
3749 VP9_COMMON *const cm = &cpi->common;
3750 const int tile_cols = 1 << cm->log2_tile_cols;
3751 TileDataEnc *this_tile =
3752 &cpi->tile_data[tile_row * tile_cols + tile_col];
3753 const TileInfo * const tile_info = &this_tile->tile_info;
3754 TOKENEXTRA *tok = cpi->tile_tok[tile_row][tile_col];
3757 for (mi_row = tile_info->mi_row_start; mi_row < tile_info->mi_row_end;
3758 mi_row += MI_BLOCK_SIZE) {
3759 if (cpi->sf.use_nonrd_pick_mode)
3760 encode_nonrd_sb_row(cpi, td, this_tile, mi_row, &tok);
3762 encode_rd_sb_row(cpi, td, this_tile, mi_row, &tok);
3764 cpi->tok_count[tile_row][tile_col] =
3765 (unsigned int)(tok - cpi->tile_tok[tile_row][tile_col]);
3766 assert(tok - cpi->tile_tok[tile_row][tile_col] <=
3767 allocated_tokens(*tile_info));
3770 static void encode_tiles(VP9_COMP *cpi) {
3771 VP9_COMMON *const cm = &cpi->common;
3772 const int tile_cols = 1 << cm->log2_tile_cols;
3773 const int tile_rows = 1 << cm->log2_tile_rows;
3774 int tile_col, tile_row;
3776 vp9_init_tile_data(cpi);
3778 for (tile_row = 0; tile_row < tile_rows; ++tile_row)
3779 for (tile_col = 0; tile_col < tile_cols; ++tile_col)
3780 vp9_encode_tile(cpi, &cpi->td, tile_row, tile_col);
3783 #if CONFIG_FP_MB_STATS
3784 static int input_fpmb_stats(FIRSTPASS_MB_STATS *firstpass_mb_stats,
3785 VP9_COMMON *cm, uint8_t **this_frame_mb_stats) {
3786 uint8_t *mb_stats_in = firstpass_mb_stats->mb_stats_start +
3787 cm->current_video_frame * cm->MBs * sizeof(uint8_t);
3789 if (mb_stats_in > firstpass_mb_stats->mb_stats_end)
3792 *this_frame_mb_stats = mb_stats_in;
3798 static void encode_frame_internal(VP9_COMP *cpi) {
3799 SPEED_FEATURES *const sf = &cpi->sf;
3800 RD_OPT *const rd_opt = &cpi->rd;
3801 ThreadData *const td = &cpi->td;
3802 MACROBLOCK *const x = &td->mb;
3803 VP9_COMMON *const cm = &cpi->common;
3804 MACROBLOCKD *const xd = &x->e_mbd;
3805 RD_COUNTS *const rdc = &cpi->td.rd_counts;
3808 xd->mi[0].src_mi = &xd->mi[0];
3810 vp9_zero(*td->counts);
3811 vp9_zero(rdc->coef_counts);
3812 vp9_zero(rdc->comp_pred_diff);
3813 vp9_zero(rdc->filter_diff);
3814 vp9_zero(rdc->tx_select_diff);
3815 vp9_zero(rd_opt->tx_select_threshes);
3817 xd->lossless = cm->base_qindex == 0 &&
3818 cm->y_dc_delta_q == 0 &&
3819 cm->uv_dc_delta_q == 0 &&
3820 cm->uv_ac_delta_q == 0;
3822 #if CONFIG_VP9_HIGHBITDEPTH
3823 if (cm->use_highbitdepth)
3824 x->fwd_txm4x4 = xd->lossless ? vp9_highbd_fwht4x4 : vp9_highbd_fdct4x4;
3826 x->fwd_txm4x4 = xd->lossless ? vp9_fwht4x4 : vp9_fdct4x4;
3827 x->highbd_itxm_add = xd->lossless ? vp9_highbd_iwht4x4_add :
3828 vp9_highbd_idct4x4_add;
3830 x->fwd_txm4x4 = xd->lossless ? vp9_fwht4x4 : vp9_fdct4x4;
3831 #endif // CONFIG_VP9_HIGHBITDEPTH
3832 x->itxm_add = xd->lossless ? vp9_iwht4x4_add : vp9_idct4x4_add;
3837 cm->tx_mode = select_tx_mode(cpi, xd);
3839 vp9_frame_init_quantizer(cpi);
3841 vp9_initialize_rd_consts(cpi);
3842 vp9_initialize_me_consts(cpi, x, cm->base_qindex);
3843 init_encode_frame_mb_context(cpi);
3844 cm->use_prev_frame_mvs = !cm->error_resilient_mode &&
3845 cm->width == cm->last_width &&
3846 cm->height == cm->last_height &&
3848 cm->last_show_frame;
3849 // Special case: set prev_mi to NULL when the previous mode info
3850 // context cannot be used.
3851 cm->prev_mi = cm->use_prev_frame_mvs ?
3852 cm->prev_mip + cm->mi_stride + 1 : NULL;
3854 x->quant_fp = cpi->sf.use_quant_fp;
3855 vp9_zero(x->skip_txfm);
3856 if (sf->use_nonrd_pick_mode) {
3857 // Initialize internal buffer pointers for rtc coding, where non-RD
3858 // mode decision is used and hence no buffer pointer swap needed.
3860 struct macroblock_plane *const p = x->plane;
3861 struct macroblockd_plane *const pd = xd->plane;
3862 PICK_MODE_CONTEXT *ctx = &cpi->td.pc_root->none;
3864 for (i = 0; i < MAX_MB_PLANE; ++i) {
3865 p[i].coeff = ctx->coeff_pbuf[i][0];
3866 p[i].qcoeff = ctx->qcoeff_pbuf[i][0];
3867 pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][0];
3868 p[i].eobs = ctx->eobs_pbuf[i][0];
3870 vp9_zero(x->zcoeff_blk);
3872 if (sf->partition_search_type == SOURCE_VAR_BASED_PARTITION)
3873 source_var_based_partition_search_method(cpi);
3877 struct vpx_usec_timer emr_timer;
3878 vpx_usec_timer_start(&emr_timer);
3880 #if CONFIG_FP_MB_STATS
3881 if (cpi->use_fp_mb_stats) {
3882 input_fpmb_stats(&cpi->twopass.firstpass_mb_stats, cm,
3883 &cpi->twopass.this_frame_mb_stats);
3887 // If allowed, encoding tiles in parallel with one thread handling one tile.
3888 if (MIN(cpi->oxcf.max_threads, 1 << cm->log2_tile_cols) > 1)
3889 vp9_encode_tiles_mt(cpi);
3893 vpx_usec_timer_mark(&emr_timer);
3894 cpi->time_encode_sb_row += vpx_usec_timer_elapsed(&emr_timer);
3897 sf->skip_encode_frame = sf->skip_encode_sb ?
3898 get_skip_encode_frame(cm, td) : 0;
3901 // Keep record of the total distortion this time around for future use
3902 cpi->last_frame_distortion = cpi->frame_distortion;
3906 static INTERP_FILTER get_interp_filter(
3907 const int64_t threshes[SWITCHABLE_FILTER_CONTEXTS], int is_alt_ref) {
3909 threshes[EIGHTTAP_SMOOTH] > threshes[EIGHTTAP] &&
3910 threshes[EIGHTTAP_SMOOTH] > threshes[EIGHTTAP_SHARP] &&
3911 threshes[EIGHTTAP_SMOOTH] > threshes[SWITCHABLE - 1]) {
3912 return EIGHTTAP_SMOOTH;
3913 } else if (threshes[EIGHTTAP_SHARP] > threshes[EIGHTTAP] &&
3914 threshes[EIGHTTAP_SHARP] > threshes[SWITCHABLE - 1]) {
3915 return EIGHTTAP_SHARP;
3916 } else if (threshes[EIGHTTAP] > threshes[SWITCHABLE - 1]) {
3923 void vp9_encode_frame(VP9_COMP *cpi) {
3924 VP9_COMMON *const cm = &cpi->common;
3926 // In the longer term the encoder should be generalized to match the
3927 // decoder such that we allow compound where one of the 3 buffers has a
3928 // different sign bias and that buffer is then the fixed ref. However, this
3929 // requires further work in the rd loop. For now the only supported encoder
3930 // side behavior is where the ALT ref buffer has opposite sign bias to
3932 if (!frame_is_intra_only(cm)) {
3933 if ((cm->ref_frame_sign_bias[ALTREF_FRAME] ==
3934 cm->ref_frame_sign_bias[GOLDEN_FRAME]) ||
3935 (cm->ref_frame_sign_bias[ALTREF_FRAME] ==
3936 cm->ref_frame_sign_bias[LAST_FRAME])) {
3937 cpi->allow_comp_inter_inter = 0;
3939 cpi->allow_comp_inter_inter = 1;
3940 cm->comp_fixed_ref = ALTREF_FRAME;
3941 cm->comp_var_ref[0] = LAST_FRAME;
3942 cm->comp_var_ref[1] = GOLDEN_FRAME;
3946 if (cpi->sf.frame_parameter_update) {
3948 RD_OPT *const rd_opt = &cpi->rd;
3949 FRAME_COUNTS *counts = cpi->td.counts;
3950 RD_COUNTS *const rdc = &cpi->td.rd_counts;
3952 // This code does a single RD pass over the whole frame assuming
3953 // either compound, single or hybrid prediction as per whatever has
3954 // worked best for that type of frame in the past.
3955 // It also predicts whether another coding mode would have worked
3956 // better that this coding mode. If that is the case, it remembers
3957 // that for subsequent frames.
3958 // It does the same analysis for transform size selection also.
3959 const MV_REFERENCE_FRAME frame_type = get_frame_type(cpi);
3960 int64_t *const mode_thrs = rd_opt->prediction_type_threshes[frame_type];
3961 int64_t *const filter_thrs = rd_opt->filter_threshes[frame_type];
3962 int *const tx_thrs = rd_opt->tx_select_threshes[frame_type];
3963 const int is_alt_ref = frame_type == ALTREF_FRAME;
3965 /* prediction (compound, single or hybrid) mode selection */
3966 if (is_alt_ref || !cpi->allow_comp_inter_inter)
3967 cm->reference_mode = SINGLE_REFERENCE;
3968 else if (mode_thrs[COMPOUND_REFERENCE] > mode_thrs[SINGLE_REFERENCE] &&
3969 mode_thrs[COMPOUND_REFERENCE] >
3970 mode_thrs[REFERENCE_MODE_SELECT] &&
3971 check_dual_ref_flags(cpi) &&
3972 cpi->static_mb_pct == 100)
3973 cm->reference_mode = COMPOUND_REFERENCE;
3974 else if (mode_thrs[SINGLE_REFERENCE] > mode_thrs[REFERENCE_MODE_SELECT])
3975 cm->reference_mode = SINGLE_REFERENCE;
3977 cm->reference_mode = REFERENCE_MODE_SELECT;
3979 if (cm->interp_filter == SWITCHABLE)
3980 cm->interp_filter = get_interp_filter(filter_thrs, is_alt_ref);
3982 encode_frame_internal(cpi);
3984 for (i = 0; i < REFERENCE_MODES; ++i)
3985 mode_thrs[i] = (mode_thrs[i] + rdc->comp_pred_diff[i] / cm->MBs) / 2;
3987 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
3988 filter_thrs[i] = (filter_thrs[i] + rdc->filter_diff[i] / cm->MBs) / 2;
3990 for (i = 0; i < TX_MODES; ++i) {
3991 int64_t pd = rdc->tx_select_diff[i];
3992 if (i == TX_MODE_SELECT)
3993 pd -= RDCOST(cpi->td.mb.rdmult, cpi->td.mb.rddiv, 2048 * (TX_SIZES - 1),
3995 tx_thrs[i] = (tx_thrs[i] + (int)(pd / cm->MBs)) / 2;
3998 if (cm->reference_mode == REFERENCE_MODE_SELECT) {
3999 int single_count_zero = 0;
4000 int comp_count_zero = 0;
4002 for (i = 0; i < COMP_INTER_CONTEXTS; i++) {
4003 single_count_zero += counts->comp_inter[i][0];
4004 comp_count_zero += counts->comp_inter[i][1];
4007 if (comp_count_zero == 0) {
4008 cm->reference_mode = SINGLE_REFERENCE;
4009 vp9_zero(counts->comp_inter);
4010 } else if (single_count_zero == 0) {
4011 cm->reference_mode = COMPOUND_REFERENCE;
4012 vp9_zero(counts->comp_inter);
4016 if (cm->tx_mode == TX_MODE_SELECT) {
4018 int count8x8_lp = 0, count8x8_8x8p = 0;
4019 int count16x16_16x16p = 0, count16x16_lp = 0;
4022 for (i = 0; i < TX_SIZE_CONTEXTS; ++i) {
4023 count4x4 += counts->tx.p32x32[i][TX_4X4];
4024 count4x4 += counts->tx.p16x16[i][TX_4X4];
4025 count4x4 += counts->tx.p8x8[i][TX_4X4];
4027 count8x8_lp += counts->tx.p32x32[i][TX_8X8];
4028 count8x8_lp += counts->tx.p16x16[i][TX_8X8];
4029 count8x8_8x8p += counts->tx.p8x8[i][TX_8X8];
4031 count16x16_16x16p += counts->tx.p16x16[i][TX_16X16];
4032 count16x16_lp += counts->tx.p32x32[i][TX_16X16];
4033 count32x32 += counts->tx.p32x32[i][TX_32X32];
4035 if (count4x4 == 0 && count16x16_lp == 0 && count16x16_16x16p == 0 &&
4037 cm->tx_mode = ALLOW_8X8;
4038 reset_skip_tx_size(cm, TX_8X8);
4039 } else if (count8x8_8x8p == 0 && count16x16_16x16p == 0 &&
4040 count8x8_lp == 0 && count16x16_lp == 0 && count32x32 == 0) {
4041 cm->tx_mode = ONLY_4X4;
4042 reset_skip_tx_size(cm, TX_4X4);
4043 } else if (count8x8_lp == 0 && count16x16_lp == 0 && count4x4 == 0) {
4044 cm->tx_mode = ALLOW_32X32;
4045 } else if (count32x32 == 0 && count8x8_lp == 0 && count4x4 == 0) {
4046 cm->tx_mode = ALLOW_16X16;
4047 reset_skip_tx_size(cm, TX_16X16);
4051 cm->reference_mode = SINGLE_REFERENCE;
4052 encode_frame_internal(cpi);
4056 static void sum_intra_stats(FRAME_COUNTS *counts, const MODE_INFO *mi) {
4057 const PREDICTION_MODE y_mode = mi->mbmi.mode;
4058 const PREDICTION_MODE uv_mode = mi->mbmi.uv_mode;
4059 const BLOCK_SIZE bsize = mi->mbmi.sb_type;
4061 if (bsize < BLOCK_8X8) {
4063 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
4064 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
4065 for (idy = 0; idy < 2; idy += num_4x4_h)
4066 for (idx = 0; idx < 2; idx += num_4x4_w)
4067 ++counts->y_mode[0][mi->bmi[idy * 2 + idx].as_mode];
4069 ++counts->y_mode[size_group_lookup[bsize]][y_mode];
4072 ++counts->uv_mode[y_mode][uv_mode];
4075 static void encode_superblock(VP9_COMP *cpi, ThreadData *td,
4076 TOKENEXTRA **t, int output_enabled,
4077 int mi_row, int mi_col, BLOCK_SIZE bsize,
4078 PICK_MODE_CONTEXT *ctx) {
4079 VP9_COMMON *const cm = &cpi->common;
4080 MACROBLOCK *const x = &td->mb;
4081 MACROBLOCKD *const xd = &x->e_mbd;
4082 MODE_INFO *mi_8x8 = xd->mi;
4083 MODE_INFO *mi = mi_8x8;
4084 MB_MODE_INFO *mbmi = &mi->mbmi;
4085 const int seg_skip = vp9_segfeature_active(&cm->seg, mbmi->segment_id,
4087 const int mis = cm->mi_stride;
4088 const int mi_width = num_8x8_blocks_wide_lookup[bsize];
4089 const int mi_height = num_8x8_blocks_high_lookup[bsize];
4091 x->skip_recode = !x->select_tx_size && mbmi->sb_type >= BLOCK_8X8 &&
4092 cpi->oxcf.aq_mode != COMPLEXITY_AQ &&
4093 cpi->oxcf.aq_mode != CYCLIC_REFRESH_AQ &&
4094 cpi->sf.allow_skip_recode;
4096 if (!x->skip_recode && !cpi->sf.use_nonrd_pick_mode)
4097 vpx_memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
4099 x->skip_optimize = ctx->is_coded;
4101 x->use_lp32x32fdct = cpi->sf.use_lp32x32fdct;
4102 x->skip_encode = (!output_enabled && cpi->sf.skip_encode_frame &&
4103 x->q_index < QIDX_SKIP_THRESH);
4108 set_ref_ptrs(cm, xd, mbmi->ref_frame[0], mbmi->ref_frame[1]);
4110 if (!is_inter_block(mbmi)) {
4113 for (plane = 0; plane < MAX_MB_PLANE; ++plane)
4114 vp9_encode_intra_block_plane(x, MAX(bsize, BLOCK_8X8), plane);
4116 sum_intra_stats(td->counts, mi);
4117 vp9_tokenize_sb(cpi, td, t, !output_enabled, MAX(bsize, BLOCK_8X8));
4120 const int is_compound = has_second_ref(mbmi);
4121 for (ref = 0; ref < 1 + is_compound; ++ref) {
4122 YV12_BUFFER_CONFIG *cfg = get_ref_frame_buffer(cpi,
4123 mbmi->ref_frame[ref]);
4124 assert(cfg != NULL);
4125 vp9_setup_pre_planes(xd, ref, cfg, mi_row, mi_col,
4126 &xd->block_refs[ref]->sf);
4128 if (!(cpi->sf.reuse_inter_pred_sby && ctx->pred_pixel_ready) || seg_skip)
4129 vp9_build_inter_predictors_sby(xd, mi_row, mi_col, MAX(bsize, BLOCK_8X8));
4131 vp9_build_inter_predictors_sbuv(xd, mi_row, mi_col, MAX(bsize, BLOCK_8X8));
4133 vp9_encode_sb(x, MAX(bsize, BLOCK_8X8));
4134 vp9_tokenize_sb(cpi, td, t, !output_enabled, MAX(bsize, BLOCK_8X8));
4137 if (output_enabled) {
4138 if (cm->tx_mode == TX_MODE_SELECT &&
4139 mbmi->sb_type >= BLOCK_8X8 &&
4140 !(is_inter_block(mbmi) && (mbmi->skip || seg_skip))) {
4141 ++get_tx_counts(max_txsize_lookup[bsize], vp9_get_tx_size_context(xd),
4142 &td->counts->tx)[mbmi->tx_size];
4146 // The new intra coding scheme requires no change of transform size
4147 if (is_inter_block(&mi->mbmi)) {
4148 tx_size = MIN(tx_mode_to_biggest_tx_size[cm->tx_mode],
4149 max_txsize_lookup[bsize]);
4151 tx_size = (bsize >= BLOCK_8X8) ? mbmi->tx_size : TX_4X4;
4154 for (y = 0; y < mi_height; y++)
4155 for (x = 0; x < mi_width; x++)
4156 if (mi_col + x < cm->mi_cols && mi_row + y < cm->mi_rows)
4157 mi_8x8[mis * y + x].src_mi->mbmi.tx_size = tx_size;
4159 ++td->counts->tx.tx_totals[mbmi->tx_size];
4160 ++td->counts->tx.tx_totals[get_uv_tx_size(mbmi, &xd->plane[1])];