2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
15 #include "./vp9_rtcd.h"
16 #include "./vpx_dsp_rtcd.h"
17 #include "./vpx_config.h"
19 #include "vpx_ports/mem.h"
20 #include "vpx_ports/vpx_timer.h"
22 #include "vp9/common/vp9_common.h"
23 #include "vp9/common/vp9_entropy.h"
24 #include "vp9/common/vp9_entropymode.h"
25 #include "vp9/common/vp9_idct.h"
26 #include "vp9/common/vp9_mvref_common.h"
27 #include "vp9/common/vp9_pred_common.h"
28 #include "vp9/common/vp9_quant_common.h"
29 #include "vp9/common/vp9_reconintra.h"
30 #include "vp9/common/vp9_reconinter.h"
31 #include "vp9/common/vp9_seg_common.h"
32 #include "vp9/common/vp9_systemdependent.h"
33 #include "vp9/common/vp9_tile_common.h"
35 #include "vp9/encoder/vp9_aq_complexity.h"
36 #include "vp9/encoder/vp9_aq_cyclicrefresh.h"
37 #include "vp9/encoder/vp9_aq_variance.h"
38 #include "vp9/encoder/vp9_encodeframe.h"
39 #include "vp9/encoder/vp9_encodemb.h"
40 #include "vp9/encoder/vp9_encodemv.h"
41 #include "vp9/encoder/vp9_ethread.h"
42 #include "vp9/encoder/vp9_extend.h"
43 #include "vp9/encoder/vp9_pickmode.h"
44 #include "vp9/encoder/vp9_rd.h"
45 #include "vp9/encoder/vp9_rdopt.h"
46 #include "vp9/encoder/vp9_segmentation.h"
47 #include "vp9/encoder/vp9_tokenize.h"
49 static void encode_superblock(VP9_COMP *cpi, ThreadData * td,
50 TOKENEXTRA **t, int output_enabled,
51 int mi_row, int mi_col, BLOCK_SIZE bsize,
52 PICK_MODE_CONTEXT *ctx);
54 // This is used as a reference when computing the source variance for the
55 // purposes of activity masking.
56 // Eventually this should be replaced by custom no-reference routines,
57 // which will be faster.
58 static const uint8_t VP9_VAR_OFFS[64] = {
59 128, 128, 128, 128, 128, 128, 128, 128,
60 128, 128, 128, 128, 128, 128, 128, 128,
61 128, 128, 128, 128, 128, 128, 128, 128,
62 128, 128, 128, 128, 128, 128, 128, 128,
63 128, 128, 128, 128, 128, 128, 128, 128,
64 128, 128, 128, 128, 128, 128, 128, 128,
65 128, 128, 128, 128, 128, 128, 128, 128,
66 128, 128, 128, 128, 128, 128, 128, 128
69 #if CONFIG_VP9_HIGHBITDEPTH
70 static const uint16_t VP9_HIGH_VAR_OFFS_8[64] = {
71 128, 128, 128, 128, 128, 128, 128, 128,
72 128, 128, 128, 128, 128, 128, 128, 128,
73 128, 128, 128, 128, 128, 128, 128, 128,
74 128, 128, 128, 128, 128, 128, 128, 128,
75 128, 128, 128, 128, 128, 128, 128, 128,
76 128, 128, 128, 128, 128, 128, 128, 128,
77 128, 128, 128, 128, 128, 128, 128, 128,
78 128, 128, 128, 128, 128, 128, 128, 128
81 static const uint16_t VP9_HIGH_VAR_OFFS_10[64] = {
82 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
83 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
84 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
85 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
86 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
87 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
88 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
89 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4
92 static const uint16_t VP9_HIGH_VAR_OFFS_12[64] = {
93 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
94 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
95 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
96 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
97 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
98 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
99 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
100 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16
102 #endif // CONFIG_VP9_HIGHBITDEPTH
104 unsigned int vp9_get_sby_perpixel_variance(VP9_COMP *cpi,
105 const struct buf_2d *ref,
108 const unsigned int var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
109 VP9_VAR_OFFS, 0, &sse);
110 return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
113 #if CONFIG_VP9_HIGHBITDEPTH
114 unsigned int vp9_high_get_sby_perpixel_variance(
115 VP9_COMP *cpi, const struct buf_2d *ref, BLOCK_SIZE bs, int bd) {
116 unsigned int var, sse;
119 var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
120 CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_10),
124 var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
125 CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_12),
130 var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
131 CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_8),
135 return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
137 #endif // CONFIG_VP9_HIGHBITDEPTH
139 static unsigned int get_sby_perpixel_diff_variance(VP9_COMP *cpi,
140 const struct buf_2d *ref,
141 int mi_row, int mi_col,
143 unsigned int sse, var;
145 const YV12_BUFFER_CONFIG *last = get_ref_frame_buffer(cpi, LAST_FRAME);
147 assert(last != NULL);
149 &last->y_buffer[mi_row * MI_SIZE * last->y_stride + mi_col * MI_SIZE];
150 var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride, last_y, last->y_stride, &sse);
151 return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
154 static BLOCK_SIZE get_rd_var_based_fixed_partition(VP9_COMP *cpi, MACROBLOCK *x,
157 unsigned int var = get_sby_perpixel_diff_variance(cpi, &x->plane[0].src,
170 // Lighter version of set_offsets that only sets the mode info
172 static INLINE void set_mode_info_offsets(VP9_COMMON *const cm,
173 MACROBLOCKD *const xd,
176 const int idx_str = xd->mi_stride * mi_row + mi_col;
177 xd->mi = cm->mi_grid_visible + idx_str;
178 xd->mi[0] = cm->mi + idx_str;
181 static void set_offsets(VP9_COMP *cpi, const TileInfo *const tile,
182 MACROBLOCK *const x, int mi_row, int mi_col,
184 VP9_COMMON *const cm = &cpi->common;
185 MACROBLOCKD *const xd = &x->e_mbd;
187 const int mi_width = num_8x8_blocks_wide_lookup[bsize];
188 const int mi_height = num_8x8_blocks_high_lookup[bsize];
189 const struct segmentation *const seg = &cm->seg;
191 set_skip_context(xd, mi_row, mi_col);
193 set_mode_info_offsets(cm, xd, mi_row, mi_col);
195 mbmi = &xd->mi[0]->mbmi;
197 // Set up destination pointers.
198 vp9_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
200 // Set up limit values for MV components.
201 // Mv beyond the range do not produce new/different prediction block.
202 x->mv_row_min = -(((mi_row + mi_height) * MI_SIZE) + VP9_INTERP_EXTEND);
203 x->mv_col_min = -(((mi_col + mi_width) * MI_SIZE) + VP9_INTERP_EXTEND);
204 x->mv_row_max = (cm->mi_rows - mi_row) * MI_SIZE + VP9_INTERP_EXTEND;
205 x->mv_col_max = (cm->mi_cols - mi_col) * MI_SIZE + VP9_INTERP_EXTEND;
207 // Set up distance of MB to edge of frame in 1/8th pel units.
208 assert(!(mi_col & (mi_width - 1)) && !(mi_row & (mi_height - 1)));
209 set_mi_row_col(xd, tile, mi_row, mi_height, mi_col, mi_width,
210 cm->mi_rows, cm->mi_cols);
212 // Set up source buffers.
213 vp9_setup_src_planes(x, cpi->Source, mi_row, mi_col);
216 x->rddiv = cpi->rd.RDDIV;
217 x->rdmult = cpi->rd.RDMULT;
221 if (cpi->oxcf.aq_mode != VARIANCE_AQ) {
222 const uint8_t *const map = seg->update_map ? cpi->segmentation_map
223 : cm->last_frame_seg_map;
224 mbmi->segment_id = vp9_get_segment_id(cm, map, bsize, mi_row, mi_col);
226 vp9_init_plane_quantizers(cpi, x);
228 x->encode_breakout = cpi->segment_encode_breakout[mbmi->segment_id];
230 mbmi->segment_id = 0;
231 x->encode_breakout = cpi->encode_breakout;
235 static void duplicate_mode_info_in_sb(VP9_COMMON *cm, MACROBLOCKD *xd,
236 int mi_row, int mi_col,
238 const int block_width = num_8x8_blocks_wide_lookup[bsize];
239 const int block_height = num_8x8_blocks_high_lookup[bsize];
241 for (j = 0; j < block_height; ++j)
242 for (i = 0; i < block_width; ++i) {
243 if (mi_row + j < cm->mi_rows && mi_col + i < cm->mi_cols)
244 xd->mi[j * xd->mi_stride + i] = xd->mi[0];
248 static void set_block_size(VP9_COMP * const cpi,
249 MACROBLOCKD *const xd,
250 int mi_row, int mi_col,
252 if (cpi->common.mi_cols > mi_col && cpi->common.mi_rows > mi_row) {
253 set_mode_info_offsets(&cpi->common, xd, mi_row, mi_col);
254 xd->mi[0]->mbmi.sb_type = bsize;
259 int64_t sum_square_error;
269 } partition_variance;
272 partition_variance part_variances;
277 partition_variance part_variances;
282 partition_variance part_variances;
287 partition_variance part_variances;
292 partition_variance part_variances;
297 partition_variance *part_variances;
307 static void tree_to_node(void *data, BLOCK_SIZE bsize, variance_node *node) {
309 node->part_variances = NULL;
312 v64x64 *vt = (v64x64 *) data;
313 node->part_variances = &vt->part_variances;
314 for (i = 0; i < 4; i++)
315 node->split[i] = &vt->split[i].part_variances.none;
319 v32x32 *vt = (v32x32 *) data;
320 node->part_variances = &vt->part_variances;
321 for (i = 0; i < 4; i++)
322 node->split[i] = &vt->split[i].part_variances.none;
326 v16x16 *vt = (v16x16 *) data;
327 node->part_variances = &vt->part_variances;
328 for (i = 0; i < 4; i++)
329 node->split[i] = &vt->split[i].part_variances.none;
333 v8x8 *vt = (v8x8 *) data;
334 node->part_variances = &vt->part_variances;
335 for (i = 0; i < 4; i++)
336 node->split[i] = &vt->split[i].part_variances.none;
340 v4x4 *vt = (v4x4 *) data;
341 node->part_variances = &vt->part_variances;
342 for (i = 0; i < 4; i++)
343 node->split[i] = &vt->split[i];
353 // Set variance values given sum square error, sum error, count.
354 static void fill_variance(int64_t s2, int64_t s, int c, var *v) {
355 v->sum_square_error = s2;
360 static void get_variance(var *v) {
361 v->variance = (int)(256 * (v->sum_square_error -
362 ((v->sum_error * v->sum_error) >> v->log2_count)) >> v->log2_count);
365 static void sum_2_variances(const var *a, const var *b, var *r) {
366 assert(a->log2_count == b->log2_count);
367 fill_variance(a->sum_square_error + b->sum_square_error,
368 a->sum_error + b->sum_error, a->log2_count + 1, r);
371 static void fill_variance_tree(void *data, BLOCK_SIZE bsize) {
373 tree_to_node(data, bsize, &node);
374 sum_2_variances(node.split[0], node.split[1], &node.part_variances->horz[0]);
375 sum_2_variances(node.split[2], node.split[3], &node.part_variances->horz[1]);
376 sum_2_variances(node.split[0], node.split[2], &node.part_variances->vert[0]);
377 sum_2_variances(node.split[1], node.split[3], &node.part_variances->vert[1]);
378 sum_2_variances(&node.part_variances->vert[0], &node.part_variances->vert[1],
379 &node.part_variances->none);
382 static int set_vt_partitioning(VP9_COMP *cpi,
383 MACROBLOCKD *const xd,
389 BLOCK_SIZE bsize_min,
391 VP9_COMMON * const cm = &cpi->common;
393 const int block_width = num_8x8_blocks_wide_lookup[bsize];
394 const int block_height = num_8x8_blocks_high_lookup[bsize];
395 const int low_res = (cm->width <= 352 && cm->height <= 288);
397 assert(block_height == block_width);
398 tree_to_node(data, bsize, &vt);
400 if (force_split == 1)
403 // For bsize=bsize_min (16x16/8x8 for 8x8/4x4 downsampling), select if
404 // variance is below threshold, otherwise split will be selected.
405 // No check for vert/horiz split as too few samples for variance.
406 if (bsize == bsize_min) {
407 // Variance already computed to set the force_split.
408 if (low_res || cm->frame_type == KEY_FRAME)
409 get_variance(&vt.part_variances->none);
410 if (mi_col + block_width / 2 < cm->mi_cols &&
411 mi_row + block_height / 2 < cm->mi_rows &&
412 vt.part_variances->none.variance < threshold) {
413 set_block_size(cpi, xd, mi_row, mi_col, bsize);
417 } else if (bsize > bsize_min) {
418 // Variance already computed to set the force_split.
419 if (low_res || cm->frame_type == KEY_FRAME)
420 get_variance(&vt.part_variances->none);
421 // For key frame: take split for bsize above 32X32 or very high variance.
422 if (cm->frame_type == KEY_FRAME &&
423 (bsize > BLOCK_32X32 ||
424 vt.part_variances->none.variance > (threshold << 4))) {
427 // If variance is low, take the bsize (no split).
428 if (mi_col + block_width / 2 < cm->mi_cols &&
429 mi_row + block_height / 2 < cm->mi_rows &&
430 vt.part_variances->none.variance < threshold) {
431 set_block_size(cpi, xd, mi_row, mi_col, bsize);
435 // Check vertical split.
436 if (mi_row + block_height / 2 < cm->mi_rows) {
437 BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_VERT);
438 get_variance(&vt.part_variances->vert[0]);
439 get_variance(&vt.part_variances->vert[1]);
440 if (vt.part_variances->vert[0].variance < threshold &&
441 vt.part_variances->vert[1].variance < threshold &&
442 get_plane_block_size(subsize, &xd->plane[1]) < BLOCK_INVALID) {
443 set_block_size(cpi, xd, mi_row, mi_col, subsize);
444 set_block_size(cpi, xd, mi_row, mi_col + block_width / 2, subsize);
448 // Check horizontal split.
449 if (mi_col + block_width / 2 < cm->mi_cols) {
450 BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_HORZ);
451 get_variance(&vt.part_variances->horz[0]);
452 get_variance(&vt.part_variances->horz[1]);
453 if (vt.part_variances->horz[0].variance < threshold &&
454 vt.part_variances->horz[1].variance < threshold &&
455 get_plane_block_size(subsize, &xd->plane[1]) < BLOCK_INVALID) {
456 set_block_size(cpi, xd, mi_row, mi_col, subsize);
457 set_block_size(cpi, xd, mi_row + block_height / 2, mi_col, subsize);
467 // Set the variance split thresholds for following the block sizes:
468 // 0 - threshold_64x64, 1 - threshold_32x32, 2 - threshold_16x16,
469 // 3 - vbp_threshold_8x8. vbp_threshold_8x8 (to split to 4x4 partition) is
470 // currently only used on key frame.
471 static void set_vbp_thresholds(VP9_COMP *cpi, int64_t thresholds[], int q) {
472 VP9_COMMON *const cm = &cpi->common;
473 const int is_key_frame = (cm->frame_type == KEY_FRAME);
474 const int threshold_multiplier = is_key_frame ? 20 : 1;
475 const int64_t threshold_base = (int64_t)(threshold_multiplier *
476 cpi->y_dequant[q][1]);
478 thresholds[0] = threshold_base;
479 thresholds[1] = threshold_base >> 2;
480 thresholds[2] = threshold_base >> 2;
481 thresholds[3] = threshold_base << 2;
483 thresholds[1] = threshold_base;
484 if (cm->width <= 352 && cm->height <= 288) {
485 thresholds[0] = threshold_base >> 2;
486 thresholds[2] = threshold_base << 3;
488 thresholds[0] = threshold_base;
489 thresholds[1] = (5 * threshold_base) >> 2;
490 if (cm->width >= 1920 && cm->height >= 1080)
491 thresholds[1] = (7 * threshold_base) >> 2;
492 thresholds[2] = threshold_base << cpi->oxcf.speed;
497 void vp9_set_variance_partition_thresholds(VP9_COMP *cpi, int q) {
498 VP9_COMMON *const cm = &cpi->common;
499 SPEED_FEATURES *const sf = &cpi->sf;
500 const int is_key_frame = (cm->frame_type == KEY_FRAME);
501 if (sf->partition_search_type != VAR_BASED_PARTITION &&
502 sf->partition_search_type != REFERENCE_PARTITION) {
505 set_vbp_thresholds(cpi, cpi->vbp_thresholds, q);
506 // The thresholds below are not changed locally.
508 cpi->vbp_threshold_sad = 0;
509 cpi->vbp_bsize_min = BLOCK_8X8;
511 if (cm->width <= 352 && cm->height <= 288)
512 cpi->vbp_threshold_sad = 100;
514 cpi->vbp_threshold_sad = (cpi->y_dequant[q][1] << 1) > 1000 ?
515 (cpi->y_dequant[q][1] << 1) : 1000;
516 cpi->vbp_bsize_min = BLOCK_16X16;
518 cpi->vbp_threshold_minmax = 15 + (q >> 3);
522 // Compute the minmax over the 8x8 subblocks.
523 static int compute_minmax_8x8(const uint8_t *s, int sp, const uint8_t *d,
524 int dp, int x16_idx, int y16_idx,
525 #if CONFIG_VP9_HIGHBITDEPTH
532 int minmax_min = 255;
533 // Loop over the 4 8x8 subblocks.
534 for (k = 0; k < 4; k++) {
535 int x8_idx = x16_idx + ((k & 1) << 3);
536 int y8_idx = y16_idx + ((k >> 1) << 3);
539 if (x8_idx < pixels_wide && y8_idx < pixels_high) {
540 #if CONFIG_VP9_HIGHBITDEPTH
541 if (highbd_flag & YV12_FLAG_HIGHBITDEPTH) {
542 vp9_highbd_minmax_8x8(s + y8_idx * sp + x8_idx, sp,
543 d + y8_idx * dp + x8_idx, dp,
546 vp9_minmax_8x8(s + y8_idx * sp + x8_idx, sp,
547 d + y8_idx * dp + x8_idx, dp,
551 vp9_minmax_8x8(s + y8_idx * sp + x8_idx, sp,
552 d + y8_idx * dp + x8_idx, dp,
555 if ((max - min) > minmax_max)
556 minmax_max = (max - min);
557 if ((max - min) < minmax_min)
558 minmax_min = (max - min);
561 return (minmax_max - minmax_min);
564 static void fill_variance_4x4avg(const uint8_t *s, int sp, const uint8_t *d,
565 int dp, int x8_idx, int y8_idx, v8x8 *vst,
566 #if CONFIG_VP9_HIGHBITDEPTH
573 for (k = 0; k < 4; k++) {
574 int x4_idx = x8_idx + ((k & 1) << 2);
575 int y4_idx = y8_idx + ((k >> 1) << 2);
576 unsigned int sse = 0;
578 if (x4_idx < pixels_wide && y4_idx < pixels_high) {
581 #if CONFIG_VP9_HIGHBITDEPTH
582 if (highbd_flag & YV12_FLAG_HIGHBITDEPTH) {
583 s_avg = vp9_highbd_avg_4x4(s + y4_idx * sp + x4_idx, sp);
585 d_avg = vp9_highbd_avg_4x4(d + y4_idx * dp + x4_idx, dp);
587 s_avg = vp9_avg_4x4(s + y4_idx * sp + x4_idx, sp);
589 d_avg = vp9_avg_4x4(d + y4_idx * dp + x4_idx, dp);
592 s_avg = vp9_avg_4x4(s + y4_idx * sp + x4_idx, sp);
594 d_avg = vp9_avg_4x4(d + y4_idx * dp + x4_idx, dp);
599 fill_variance(sse, sum, 0, &vst->split[k].part_variances.none);
603 static void fill_variance_8x8avg(const uint8_t *s, int sp, const uint8_t *d,
604 int dp, int x16_idx, int y16_idx, v16x16 *vst,
605 #if CONFIG_VP9_HIGHBITDEPTH
612 for (k = 0; k < 4; k++) {
613 int x8_idx = x16_idx + ((k & 1) << 3);
614 int y8_idx = y16_idx + ((k >> 1) << 3);
615 unsigned int sse = 0;
617 if (x8_idx < pixels_wide && y8_idx < pixels_high) {
620 #if CONFIG_VP9_HIGHBITDEPTH
621 if (highbd_flag & YV12_FLAG_HIGHBITDEPTH) {
622 s_avg = vp9_highbd_avg_8x8(s + y8_idx * sp + x8_idx, sp);
624 d_avg = vp9_highbd_avg_8x8(d + y8_idx * dp + x8_idx, dp);
626 s_avg = vp9_avg_8x8(s + y8_idx * sp + x8_idx, sp);
628 d_avg = vp9_avg_8x8(d + y8_idx * dp + x8_idx, dp);
631 s_avg = vp9_avg_8x8(s + y8_idx * sp + x8_idx, sp);
633 d_avg = vp9_avg_8x8(d + y8_idx * dp + x8_idx, dp);
638 fill_variance(sse, sum, 0, &vst->split[k].part_variances.none);
642 // This function chooses partitioning based on the variance between source and
643 // reconstructed last, where variance is computed for down-sampled inputs.
644 static int choose_partitioning(VP9_COMP *cpi,
645 const TileInfo *const tile,
647 int mi_row, int mi_col) {
648 VP9_COMMON * const cm = &cpi->common;
649 MACROBLOCKD *xd = &x->e_mbd;
658 int pixels_wide = 64, pixels_high = 64;
659 int64_t thresholds[4] = {cpi->vbp_thresholds[0], cpi->vbp_thresholds[1],
660 cpi->vbp_thresholds[2], cpi->vbp_thresholds[3]};
662 // Always use 4x4 partition for key frame.
663 const int is_key_frame = (cm->frame_type == KEY_FRAME);
664 const int use_4x4_partition = is_key_frame;
665 const int low_res = (cm->width <= 352 && cm->height <= 288);
666 int variance4x4downsample[16];
668 int segment_id = CR_SEGMENT_ID_BASE;
669 if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled) {
670 const uint8_t *const map = cm->seg.update_map ? cpi->segmentation_map :
671 cm->last_frame_seg_map;
672 segment_id = vp9_get_segment_id(cm, map, BLOCK_64X64, mi_row, mi_col);
674 if (cyclic_refresh_segment_id_boosted(segment_id)) {
675 int q = vp9_get_qindex(&cm->seg, segment_id, cm->base_qindex);
676 set_vbp_thresholds(cpi, thresholds, q);
680 set_offsets(cpi, tile, x, mi_row, mi_col, BLOCK_64X64);
682 if (xd->mb_to_right_edge < 0)
683 pixels_wide += (xd->mb_to_right_edge >> 3);
684 if (xd->mb_to_bottom_edge < 0)
685 pixels_high += (xd->mb_to_bottom_edge >> 3);
687 s = x->plane[0].src.buf;
688 sp = x->plane[0].src.stride;
690 if (!is_key_frame && !(is_one_pass_cbr_svc(cpi) &&
691 cpi->svc.layer_context[cpi->svc.temporal_layer_id].is_key_frame)) {
692 // In the case of spatial/temporal scalable coding, the assumption here is
693 // that the temporal reference frame will always be of type LAST_FRAME.
694 // TODO(marpan): If that assumption is broken, we need to revisit this code.
695 MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
697 const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, LAST_FRAME);
699 const YV12_BUFFER_CONFIG *yv12_g = NULL;
700 unsigned int y_sad, y_sad_g;
701 const BLOCK_SIZE bsize = BLOCK_32X32
702 + (mi_col + 4 < cm->mi_cols) * 2 + (mi_row + 4 < cm->mi_rows);
704 assert(yv12 != NULL);
706 if (!(is_one_pass_cbr_svc(cpi) && cpi->svc.spatial_layer_id)) {
707 // For now, GOLDEN will not be used for non-zero spatial layers, since
708 // it may not be a temporal reference.
709 yv12_g = get_ref_frame_buffer(cpi, GOLDEN_FRAME);
712 if (yv12_g && yv12_g != yv12) {
713 vp9_setup_pre_planes(xd, 0, yv12_g, mi_row, mi_col,
714 &cm->frame_refs[GOLDEN_FRAME - 1].sf);
715 y_sad_g = cpi->fn_ptr[bsize].sdf(x->plane[0].src.buf,
716 x->plane[0].src.stride,
717 xd->plane[0].pre[0].buf,
718 xd->plane[0].pre[0].stride);
723 vp9_setup_pre_planes(xd, 0, yv12, mi_row, mi_col,
724 &cm->frame_refs[LAST_FRAME - 1].sf);
725 mbmi->ref_frame[0] = LAST_FRAME;
726 mbmi->ref_frame[1] = NONE;
727 mbmi->sb_type = BLOCK_64X64;
728 mbmi->mv[0].as_int = 0;
729 mbmi->interp_filter = BILINEAR;
731 y_sad = vp9_int_pro_motion_estimation(cpi, x, bsize, mi_row, mi_col);
732 if (y_sad_g < y_sad) {
733 vp9_setup_pre_planes(xd, 0, yv12_g, mi_row, mi_col,
734 &cm->frame_refs[GOLDEN_FRAME - 1].sf);
735 mbmi->ref_frame[0] = GOLDEN_FRAME;
736 mbmi->mv[0].as_int = 0;
739 x->pred_mv[LAST_FRAME] = mbmi->mv[0].as_mv;
742 vp9_build_inter_predictors_sb(xd, mi_row, mi_col, BLOCK_64X64);
744 for (i = 1; i <= 2; ++i) {
745 struct macroblock_plane *p = &x->plane[i];
746 struct macroblockd_plane *pd = &xd->plane[i];
747 const BLOCK_SIZE bs = get_plane_block_size(bsize, pd);
749 if (bs == BLOCK_INVALID)
752 uv_sad = cpi->fn_ptr[bs].sdf(p->src.buf, p->src.stride,
753 pd->dst.buf, pd->dst.stride);
755 x->color_sensitivity[i - 1] = uv_sad > (y_sad >> 2);
758 d = xd->plane[0].dst.buf;
759 dp = xd->plane[0].dst.stride;
761 // If the y_sad is very small, take 64x64 as partition and exit.
762 // Don't check on boosted segment for now, as 64x64 is suppressed there.
763 if (segment_id == CR_SEGMENT_ID_BASE &&
764 y_sad < cpi->vbp_threshold_sad) {
765 const int block_width = num_8x8_blocks_wide_lookup[BLOCK_64X64];
766 const int block_height = num_8x8_blocks_high_lookup[BLOCK_64X64];
767 if (mi_col + block_width / 2 < cm->mi_cols &&
768 mi_row + block_height / 2 < cm->mi_rows) {
769 set_block_size(cpi, xd, mi_row, mi_col, BLOCK_64X64);
776 #if CONFIG_VP9_HIGHBITDEPTH
777 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
780 d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_10);
783 d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_12);
787 d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_8);
791 #endif // CONFIG_VP9_HIGHBITDEPTH
794 // Index for force_split: 0 for 64x64, 1-4 for 32x32 blocks,
795 // 5-20 for the 16x16 blocks.
797 // Fill in the entire tree of 8x8 (or 4x4 under some conditions) variances
799 for (i = 0; i < 4; i++) {
800 const int x32_idx = ((i & 1) << 5);
801 const int y32_idx = ((i >> 1) << 5);
802 const int i2 = i << 2;
803 force_split[i + 1] = 0;
804 for (j = 0; j < 4; j++) {
805 const int x16_idx = x32_idx + ((j & 1) << 4);
806 const int y16_idx = y32_idx + ((j >> 1) << 4);
807 const int split_index = 5 + i2 + j;
808 v16x16 *vst = &vt.split[i].split[j];
809 force_split[split_index] = 0;
810 variance4x4downsample[i2 + j] = 0;
812 fill_variance_8x8avg(s, sp, d, dp, x16_idx, y16_idx, vst,
813 #if CONFIG_VP9_HIGHBITDEPTH
819 fill_variance_tree(&vt.split[i].split[j], BLOCK_16X16);
820 get_variance(&vt.split[i].split[j].part_variances.none);
821 if (vt.split[i].split[j].part_variances.none.variance >
823 // 16X16 variance is above threshold for split, so force split to 8x8
824 // for this 16x16 block (this also forces splits for upper levels).
825 force_split[split_index] = 1;
826 force_split[i + 1] = 1;
828 } else if (vt.split[i].split[j].part_variances.none.variance >
830 !cyclic_refresh_segment_id_boosted(segment_id)) {
831 // We have some nominal amount of 16x16 variance (based on average),
832 // compute the minmax over the 8x8 sub-blocks, and if above threshold,
833 // force split to 8x8 block for this 16x16 block.
834 int minmax = compute_minmax_8x8(s, sp, d, dp, x16_idx, y16_idx,
835 #if CONFIG_VP9_HIGHBITDEPTH
838 pixels_wide, pixels_high);
839 if (minmax > cpi->vbp_threshold_minmax) {
840 force_split[split_index] = 1;
841 force_split[i + 1] = 1;
846 if (is_key_frame || (low_res &&
847 vt.split[i].split[j].part_variances.none.variance >
848 (thresholds[1] << 1))) {
849 force_split[split_index] = 0;
850 // Go down to 4x4 down-sampling for variance.
851 variance4x4downsample[i2 + j] = 1;
852 for (k = 0; k < 4; k++) {
853 int x8_idx = x16_idx + ((k & 1) << 3);
854 int y8_idx = y16_idx + ((k >> 1) << 3);
855 v8x8 *vst2 = is_key_frame ? &vst->split[k] :
856 &vt2[i2 + j].split[k];
857 fill_variance_4x4avg(s, sp, d, dp, x8_idx, y8_idx, vst2,
858 #if CONFIG_VP9_HIGHBITDEPTH
869 // Fill the rest of the variance tree by summing split partition values.
870 for (i = 0; i < 4; i++) {
871 const int i2 = i << 2;
872 for (j = 0; j < 4; j++) {
873 if (variance4x4downsample[i2 + j] == 1) {
874 v16x16 *vtemp = (!is_key_frame) ? &vt2[i2 + j] :
875 &vt.split[i].split[j];
876 for (m = 0; m < 4; m++)
877 fill_variance_tree(&vtemp->split[m], BLOCK_8X8);
878 fill_variance_tree(vtemp, BLOCK_16X16);
881 fill_variance_tree(&vt.split[i], BLOCK_32X32);
882 // If variance of this 32x32 block is above the threshold, force the block
883 // to split. This also forces a split on the upper (64x64) level.
884 if (!force_split[i + 1]) {
885 get_variance(&vt.split[i].part_variances.none);
886 if (vt.split[i].part_variances.none.variance > thresholds[1]) {
887 force_split[i + 1] = 1;
892 if (!force_split[0]) {
893 fill_variance_tree(&vt, BLOCK_64X64);
894 get_variance(&vt.part_variances.none);
897 // Now go through the entire structure, splitting every block size until
898 // we get to one that's got a variance lower than our threshold.
899 if ( mi_col + 8 > cm->mi_cols || mi_row + 8 > cm->mi_rows ||
900 !set_vt_partitioning(cpi, xd, &vt, BLOCK_64X64, mi_row, mi_col,
901 thresholds[0], BLOCK_16X16, force_split[0])) {
902 for (i = 0; i < 4; ++i) {
903 const int x32_idx = ((i & 1) << 2);
904 const int y32_idx = ((i >> 1) << 2);
905 const int i2 = i << 2;
906 if (!set_vt_partitioning(cpi, xd, &vt.split[i], BLOCK_32X32,
907 (mi_row + y32_idx), (mi_col + x32_idx),
908 thresholds[1], BLOCK_16X16,
909 force_split[i + 1])) {
910 for (j = 0; j < 4; ++j) {
911 const int x16_idx = ((j & 1) << 1);
912 const int y16_idx = ((j >> 1) << 1);
913 // For inter frames: if variance4x4downsample[] == 1 for this 16x16
914 // block, then the variance is based on 4x4 down-sampling, so use vt2
915 // in set_vt_partioning(), otherwise use vt.
916 v16x16 *vtemp = (!is_key_frame &&
917 variance4x4downsample[i2 + j] == 1) ?
918 &vt2[i2 + j] : &vt.split[i].split[j];
919 if (!set_vt_partitioning(cpi, xd, vtemp, BLOCK_16X16,
920 mi_row + y32_idx + y16_idx,
921 mi_col + x32_idx + x16_idx,
924 force_split[5 + i2 + j])) {
925 for (k = 0; k < 4; ++k) {
926 const int x8_idx = (k & 1);
927 const int y8_idx = (k >> 1);
928 if (use_4x4_partition) {
929 if (!set_vt_partitioning(cpi, xd, &vtemp->split[k],
931 mi_row + y32_idx + y16_idx + y8_idx,
932 mi_col + x32_idx + x16_idx + x8_idx,
933 thresholds[3], BLOCK_8X8, 0)) {
934 set_block_size(cpi, xd,
935 (mi_row + y32_idx + y16_idx + y8_idx),
936 (mi_col + x32_idx + x16_idx + x8_idx),
940 set_block_size(cpi, xd,
941 (mi_row + y32_idx + y16_idx + y8_idx),
942 (mi_col + x32_idx + x16_idx + x8_idx),
954 static void update_state(VP9_COMP *cpi, ThreadData *td,
955 PICK_MODE_CONTEXT *ctx,
956 int mi_row, int mi_col, BLOCK_SIZE bsize,
957 int output_enabled) {
959 VP9_COMMON *const cm = &cpi->common;
960 RD_COUNTS *const rdc = &td->rd_counts;
961 MACROBLOCK *const x = &td->mb;
962 MACROBLOCKD *const xd = &x->e_mbd;
963 struct macroblock_plane *const p = x->plane;
964 struct macroblockd_plane *const pd = xd->plane;
965 MODE_INFO *mi = &ctx->mic;
966 MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
967 MODE_INFO *mi_addr = xd->mi[0];
968 const struct segmentation *const seg = &cm->seg;
969 const int bw = num_8x8_blocks_wide_lookup[mi->mbmi.sb_type];
970 const int bh = num_8x8_blocks_high_lookup[mi->mbmi.sb_type];
971 const int x_mis = MIN(bw, cm->mi_cols - mi_col);
972 const int y_mis = MIN(bh, cm->mi_rows - mi_row);
973 MV_REF *const frame_mvs =
974 cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col;
977 const int mis = cm->mi_stride;
978 const int mi_width = num_8x8_blocks_wide_lookup[bsize];
979 const int mi_height = num_8x8_blocks_high_lookup[bsize];
982 assert(mi->mbmi.sb_type == bsize);
986 // If segmentation in use
988 // For in frame complexity AQ copy the segment id from the segment map.
989 if (cpi->oxcf.aq_mode == COMPLEXITY_AQ) {
990 const uint8_t *const map = seg->update_map ? cpi->segmentation_map
991 : cm->last_frame_seg_map;
992 mi_addr->mbmi.segment_id =
993 vp9_get_segment_id(cm, map, bsize, mi_row, mi_col);
995 // Else for cyclic refresh mode update the segment map, set the segment id
996 // and then update the quantizer.
997 if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) {
998 vp9_cyclic_refresh_update_segment(cpi, &xd->mi[0]->mbmi, mi_row,
999 mi_col, bsize, ctx->rate, ctx->dist,
1004 max_plane = is_inter_block(mbmi) ? MAX_MB_PLANE : 1;
1005 for (i = 0; i < max_plane; ++i) {
1006 p[i].coeff = ctx->coeff_pbuf[i][1];
1007 p[i].qcoeff = ctx->qcoeff_pbuf[i][1];
1008 pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][1];
1009 p[i].eobs = ctx->eobs_pbuf[i][1];
1012 for (i = max_plane; i < MAX_MB_PLANE; ++i) {
1013 p[i].coeff = ctx->coeff_pbuf[i][2];
1014 p[i].qcoeff = ctx->qcoeff_pbuf[i][2];
1015 pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][2];
1016 p[i].eobs = ctx->eobs_pbuf[i][2];
1019 // Restore the coding context of the MB to that that was in place
1020 // when the mode was picked for it
1021 for (y = 0; y < mi_height; y++)
1022 for (x_idx = 0; x_idx < mi_width; x_idx++)
1023 if ((xd->mb_to_right_edge >> (3 + MI_SIZE_LOG2)) + mi_width > x_idx
1024 && (xd->mb_to_bottom_edge >> (3 + MI_SIZE_LOG2)) + mi_height > y) {
1025 xd->mi[x_idx + y * mis] = mi_addr;
1028 if (cpi->oxcf.aq_mode)
1029 vp9_init_plane_quantizers(cpi, x);
1031 // FIXME(rbultje) I'm pretty sure this should go to the end of this block
1032 // (i.e. after the output_enabled)
1033 if (bsize < BLOCK_32X32) {
1034 if (bsize < BLOCK_16X16)
1035 ctx->tx_rd_diff[ALLOW_16X16] = ctx->tx_rd_diff[ALLOW_8X8];
1036 ctx->tx_rd_diff[ALLOW_32X32] = ctx->tx_rd_diff[ALLOW_16X16];
1039 if (is_inter_block(mbmi) && mbmi->sb_type < BLOCK_8X8) {
1040 mbmi->mv[0].as_int = mi->bmi[3].as_mv[0].as_int;
1041 mbmi->mv[1].as_int = mi->bmi[3].as_mv[1].as_int;
1044 x->skip = ctx->skip;
1045 memcpy(x->zcoeff_blk[mbmi->tx_size], ctx->zcoeff_blk,
1046 sizeof(uint8_t) * ctx->num_4x4_blk);
1048 if (!output_enabled)
1051 if (!vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
1052 for (i = 0; i < TX_MODES; i++)
1053 rdc->tx_select_diff[i] += ctx->tx_rd_diff[i];
1056 #if CONFIG_INTERNAL_STATS
1057 if (frame_is_intra_only(cm)) {
1058 static const int kf_mode_index[] = {
1060 THR_V_PRED /*V_PRED*/,
1061 THR_H_PRED /*H_PRED*/,
1062 THR_D45_PRED /*D45_PRED*/,
1063 THR_D135_PRED /*D135_PRED*/,
1064 THR_D117_PRED /*D117_PRED*/,
1065 THR_D153_PRED /*D153_PRED*/,
1066 THR_D207_PRED /*D207_PRED*/,
1067 THR_D63_PRED /*D63_PRED*/,
1070 ++cpi->mode_chosen_counts[kf_mode_index[mbmi->mode]];
1072 // Note how often each mode chosen as best
1073 ++cpi->mode_chosen_counts[ctx->best_mode_index];
1076 if (!frame_is_intra_only(cm)) {
1077 if (is_inter_block(mbmi)) {
1078 vp9_update_mv_count(td);
1080 if (cm->interp_filter == SWITCHABLE) {
1081 const int ctx = vp9_get_pred_context_switchable_interp(xd);
1082 ++td->counts->switchable_interp[ctx][mbmi->interp_filter];
1086 rdc->comp_pred_diff[SINGLE_REFERENCE] += ctx->single_pred_diff;
1087 rdc->comp_pred_diff[COMPOUND_REFERENCE] += ctx->comp_pred_diff;
1088 rdc->comp_pred_diff[REFERENCE_MODE_SELECT] += ctx->hybrid_pred_diff;
1090 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
1091 rdc->filter_diff[i] += ctx->best_filter_diff[i];
1094 for (h = 0; h < y_mis; ++h) {
1095 MV_REF *const frame_mv = frame_mvs + h * cm->mi_cols;
1096 for (w = 0; w < x_mis; ++w) {
1097 MV_REF *const mv = frame_mv + w;
1098 mv->ref_frame[0] = mi->mbmi.ref_frame[0];
1099 mv->ref_frame[1] = mi->mbmi.ref_frame[1];
1100 mv->mv[0].as_int = mi->mbmi.mv[0].as_int;
1101 mv->mv[1].as_int = mi->mbmi.mv[1].as_int;
1106 void vp9_setup_src_planes(MACROBLOCK *x, const YV12_BUFFER_CONFIG *src,
1107 int mi_row, int mi_col) {
1108 uint8_t *const buffers[3] = {src->y_buffer, src->u_buffer, src->v_buffer };
1109 const int strides[3] = {src->y_stride, src->uv_stride, src->uv_stride };
1112 // Set current frame pointer.
1113 x->e_mbd.cur_buf = src;
1115 for (i = 0; i < MAX_MB_PLANE; i++)
1116 setup_pred_plane(&x->plane[i].src, buffers[i], strides[i], mi_row, mi_col,
1117 NULL, x->e_mbd.plane[i].subsampling_x,
1118 x->e_mbd.plane[i].subsampling_y);
1121 static void set_mode_info_seg_skip(MACROBLOCK *x, TX_MODE tx_mode,
1122 RD_COST *rd_cost, BLOCK_SIZE bsize) {
1123 MACROBLOCKD *const xd = &x->e_mbd;
1124 MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
1125 INTERP_FILTER filter_ref;
1127 if (xd->up_available)
1128 filter_ref = xd->mi[-xd->mi_stride]->mbmi.interp_filter;
1129 else if (xd->left_available)
1130 filter_ref = xd->mi[-1]->mbmi.interp_filter;
1132 filter_ref = EIGHTTAP;
1134 mbmi->sb_type = bsize;
1135 mbmi->mode = ZEROMV;
1136 mbmi->tx_size = MIN(max_txsize_lookup[bsize],
1137 tx_mode_to_biggest_tx_size[tx_mode]);
1139 mbmi->uv_mode = DC_PRED;
1140 mbmi->ref_frame[0] = LAST_FRAME;
1141 mbmi->ref_frame[1] = NONE;
1142 mbmi->mv[0].as_int = 0;
1143 mbmi->interp_filter = filter_ref;
1145 xd->mi[0]->bmi[0].as_mv[0].as_int = 0;
1148 vp9_rd_cost_init(rd_cost);
1151 static int set_segment_rdmult(VP9_COMP *const cpi,
1152 MACROBLOCK *const x,
1153 int8_t segment_id) {
1155 VP9_COMMON *const cm = &cpi->common;
1156 vp9_init_plane_quantizers(cpi, x);
1157 vp9_clear_system_state();
1158 segment_qindex = vp9_get_qindex(&cm->seg, segment_id,
1160 return vp9_compute_rd_mult(cpi, segment_qindex + cm->y_dc_delta_q);
1163 static void rd_pick_sb_modes(VP9_COMP *cpi,
1164 TileDataEnc *tile_data,
1165 MACROBLOCK *const x,
1166 int mi_row, int mi_col, RD_COST *rd_cost,
1167 BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
1169 VP9_COMMON *const cm = &cpi->common;
1170 TileInfo *const tile_info = &tile_data->tile_info;
1171 MACROBLOCKD *const xd = &x->e_mbd;
1173 struct macroblock_plane *const p = x->plane;
1174 struct macroblockd_plane *const pd = xd->plane;
1175 const AQ_MODE aq_mode = cpi->oxcf.aq_mode;
1178 vp9_clear_system_state();
1180 // Use the lower precision, but faster, 32x32 fdct for mode selection.
1181 x->use_lp32x32fdct = 1;
1183 set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
1184 mbmi = &xd->mi[0]->mbmi;
1185 mbmi->sb_type = bsize;
1187 for (i = 0; i < MAX_MB_PLANE; ++i) {
1188 p[i].coeff = ctx->coeff_pbuf[i][0];
1189 p[i].qcoeff = ctx->qcoeff_pbuf[i][0];
1190 pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][0];
1191 p[i].eobs = ctx->eobs_pbuf[i][0];
1195 ctx->pred_pixel_ready = 0;
1198 // Set to zero to make sure we do not use the previous encoded frame stats
1201 #if CONFIG_VP9_HIGHBITDEPTH
1202 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
1203 x->source_variance =
1204 vp9_high_get_sby_perpixel_variance(cpi, &x->plane[0].src,
1207 x->source_variance =
1208 vp9_get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize);
1211 x->source_variance =
1212 vp9_get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize);
1213 #endif // CONFIG_VP9_HIGHBITDEPTH
1215 // Save rdmult before it might be changed, so it can be restored later.
1216 orig_rdmult = x->rdmult;
1218 if (aq_mode == VARIANCE_AQ) {
1219 const int energy = bsize <= BLOCK_16X16 ? x->mb_energy
1220 : vp9_block_energy(cpi, x, bsize);
1221 if (cm->frame_type == KEY_FRAME ||
1222 cpi->refresh_alt_ref_frame ||
1223 (cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref)) {
1224 mbmi->segment_id = vp9_vaq_segment_id(energy);
1226 const uint8_t *const map = cm->seg.update_map ? cpi->segmentation_map
1227 : cm->last_frame_seg_map;
1228 mbmi->segment_id = vp9_get_segment_id(cm, map, bsize, mi_row, mi_col);
1230 x->rdmult = set_segment_rdmult(cpi, x, mbmi->segment_id);
1231 } else if (aq_mode == COMPLEXITY_AQ) {
1232 x->rdmult = set_segment_rdmult(cpi, x, mbmi->segment_id);
1233 } else if (aq_mode == CYCLIC_REFRESH_AQ) {
1234 const uint8_t *const map = cm->seg.update_map ? cpi->segmentation_map
1235 : cm->last_frame_seg_map;
1236 // If segment is boosted, use rdmult for that segment.
1237 if (cyclic_refresh_segment_id_boosted(
1238 vp9_get_segment_id(cm, map, bsize, mi_row, mi_col)))
1239 x->rdmult = vp9_cyclic_refresh_get_rdmult(cpi->cyclic_refresh);
1242 // Find best coding mode & reconstruct the MB so it is available
1243 // as a predictor for MBs that follow in the SB
1244 if (frame_is_intra_only(cm)) {
1245 vp9_rd_pick_intra_mode_sb(cpi, x, rd_cost, bsize, ctx, best_rd);
1247 if (bsize >= BLOCK_8X8) {
1248 if (vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP))
1249 vp9_rd_pick_inter_mode_sb_seg_skip(cpi, tile_data, x, rd_cost, bsize,
1252 vp9_rd_pick_inter_mode_sb(cpi, tile_data, x, mi_row, mi_col,
1253 rd_cost, bsize, ctx, best_rd);
1255 vp9_rd_pick_inter_mode_sub8x8(cpi, tile_data, x, mi_row, mi_col,
1256 rd_cost, bsize, ctx, best_rd);
1261 // Examine the resulting rate and for AQ mode 2 make a segment choice.
1262 if ((rd_cost->rate != INT_MAX) &&
1263 (aq_mode == COMPLEXITY_AQ) && (bsize >= BLOCK_16X16) &&
1264 (cm->frame_type == KEY_FRAME ||
1265 cpi->refresh_alt_ref_frame ||
1266 (cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref))) {
1267 vp9_caq_select_segment(cpi, x, bsize, mi_row, mi_col, rd_cost->rate);
1270 x->rdmult = orig_rdmult;
1272 // TODO(jingning) The rate-distortion optimization flow needs to be
1273 // refactored to provide proper exit/return handle.
1274 if (rd_cost->rate == INT_MAX)
1275 rd_cost->rdcost = INT64_MAX;
1277 ctx->rate = rd_cost->rate;
1278 ctx->dist = rd_cost->dist;
1281 static void update_stats(VP9_COMMON *cm, ThreadData *td) {
1282 const MACROBLOCK *x = &td->mb;
1283 const MACROBLOCKD *const xd = &x->e_mbd;
1284 const MODE_INFO *const mi = xd->mi[0];
1285 const MB_MODE_INFO *const mbmi = &mi->mbmi;
1286 const BLOCK_SIZE bsize = mbmi->sb_type;
1288 if (!frame_is_intra_only(cm)) {
1289 FRAME_COUNTS *const counts = td->counts;
1290 const int inter_block = is_inter_block(mbmi);
1291 const int seg_ref_active = vp9_segfeature_active(&cm->seg, mbmi->segment_id,
1293 if (!seg_ref_active) {
1294 counts->intra_inter[vp9_get_intra_inter_context(xd)][inter_block]++;
1295 // If the segment reference feature is enabled we have only a single
1296 // reference frame allowed for the segment so exclude it from
1297 // the reference frame counts used to work out probabilities.
1299 const MV_REFERENCE_FRAME ref0 = mbmi->ref_frame[0];
1300 if (cm->reference_mode == REFERENCE_MODE_SELECT)
1301 counts->comp_inter[vp9_get_reference_mode_context(cm, xd)]
1302 [has_second_ref(mbmi)]++;
1304 if (has_second_ref(mbmi)) {
1305 counts->comp_ref[vp9_get_pred_context_comp_ref_p(cm, xd)]
1306 [ref0 == GOLDEN_FRAME]++;
1308 counts->single_ref[vp9_get_pred_context_single_ref_p1(xd)][0]
1309 [ref0 != LAST_FRAME]++;
1310 if (ref0 != LAST_FRAME)
1311 counts->single_ref[vp9_get_pred_context_single_ref_p2(xd)][1]
1312 [ref0 != GOLDEN_FRAME]++;
1317 !vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
1318 const int mode_ctx = mbmi->mode_context[mbmi->ref_frame[0]];
1319 if (bsize >= BLOCK_8X8) {
1320 const PREDICTION_MODE mode = mbmi->mode;
1321 ++counts->inter_mode[mode_ctx][INTER_OFFSET(mode)];
1323 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
1324 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
1326 for (idy = 0; idy < 2; idy += num_4x4_h) {
1327 for (idx = 0; idx < 2; idx += num_4x4_w) {
1328 const int j = idy * 2 + idx;
1329 const PREDICTION_MODE b_mode = mi->bmi[j].as_mode;
1330 ++counts->inter_mode[mode_ctx][INTER_OFFSET(b_mode)];
1338 static void restore_context(MACROBLOCK *const x, int mi_row, int mi_col,
1339 ENTROPY_CONTEXT a[16 * MAX_MB_PLANE],
1340 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE],
1341 PARTITION_CONTEXT sa[8], PARTITION_CONTEXT sl[8],
1343 MACROBLOCKD *const xd = &x->e_mbd;
1345 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
1346 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
1347 int mi_width = num_8x8_blocks_wide_lookup[bsize];
1348 int mi_height = num_8x8_blocks_high_lookup[bsize];
1349 for (p = 0; p < MAX_MB_PLANE; p++) {
1351 xd->above_context[p] + ((mi_col * 2) >> xd->plane[p].subsampling_x),
1352 a + num_4x4_blocks_wide * p,
1353 (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_wide) >>
1354 xd->plane[p].subsampling_x);
1357 + ((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y),
1358 l + num_4x4_blocks_high * p,
1359 (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_high) >>
1360 xd->plane[p].subsampling_y);
1362 memcpy(xd->above_seg_context + mi_col, sa,
1363 sizeof(*xd->above_seg_context) * mi_width);
1364 memcpy(xd->left_seg_context + (mi_row & MI_MASK), sl,
1365 sizeof(xd->left_seg_context[0]) * mi_height);
1368 static void save_context(MACROBLOCK *const x, int mi_row, int mi_col,
1369 ENTROPY_CONTEXT a[16 * MAX_MB_PLANE],
1370 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE],
1371 PARTITION_CONTEXT sa[8], PARTITION_CONTEXT sl[8],
1373 const MACROBLOCKD *const xd = &x->e_mbd;
1375 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
1376 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
1377 int mi_width = num_8x8_blocks_wide_lookup[bsize];
1378 int mi_height = num_8x8_blocks_high_lookup[bsize];
1380 // buffer the above/left context information of the block in search.
1381 for (p = 0; p < MAX_MB_PLANE; ++p) {
1383 a + num_4x4_blocks_wide * p,
1384 xd->above_context[p] + (mi_col * 2 >> xd->plane[p].subsampling_x),
1385 (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_wide) >>
1386 xd->plane[p].subsampling_x);
1388 l + num_4x4_blocks_high * p,
1390 + ((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y),
1391 (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_high) >>
1392 xd->plane[p].subsampling_y);
1394 memcpy(sa, xd->above_seg_context + mi_col,
1395 sizeof(*xd->above_seg_context) * mi_width);
1396 memcpy(sl, xd->left_seg_context + (mi_row & MI_MASK),
1397 sizeof(xd->left_seg_context[0]) * mi_height);
1400 static void encode_b(VP9_COMP *cpi, const TileInfo *const tile,
1402 TOKENEXTRA **tp, int mi_row, int mi_col,
1403 int output_enabled, BLOCK_SIZE bsize,
1404 PICK_MODE_CONTEXT *ctx) {
1405 MACROBLOCK *const x = &td->mb;
1406 set_offsets(cpi, tile, x, mi_row, mi_col, bsize);
1407 update_state(cpi, td, ctx, mi_row, mi_col, bsize, output_enabled);
1408 encode_superblock(cpi, td, tp, output_enabled, mi_row, mi_col, bsize, ctx);
1410 if (output_enabled) {
1411 update_stats(&cpi->common, td);
1413 (*tp)->token = EOSB_TOKEN;
1418 static void encode_sb(VP9_COMP *cpi, ThreadData *td,
1419 const TileInfo *const tile,
1420 TOKENEXTRA **tp, int mi_row, int mi_col,
1421 int output_enabled, BLOCK_SIZE bsize,
1423 VP9_COMMON *const cm = &cpi->common;
1424 MACROBLOCK *const x = &td->mb;
1425 MACROBLOCKD *const xd = &x->e_mbd;
1427 const int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4;
1429 PARTITION_TYPE partition;
1430 BLOCK_SIZE subsize = bsize;
1432 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
1435 if (bsize >= BLOCK_8X8) {
1436 ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
1437 subsize = get_subsize(bsize, pc_tree->partitioning);
1440 subsize = BLOCK_4X4;
1443 partition = partition_lookup[bsl][subsize];
1444 if (output_enabled && bsize != BLOCK_4X4)
1445 td->counts->partition[ctx][partition]++;
1447 switch (partition) {
1448 case PARTITION_NONE:
1449 encode_b(cpi, tile, td, tp, mi_row, mi_col, output_enabled, subsize,
1452 case PARTITION_VERT:
1453 encode_b(cpi, tile, td, tp, mi_row, mi_col, output_enabled, subsize,
1454 &pc_tree->vertical[0]);
1455 if (mi_col + hbs < cm->mi_cols && bsize > BLOCK_8X8) {
1456 encode_b(cpi, tile, td, tp, mi_row, mi_col + hbs, output_enabled,
1457 subsize, &pc_tree->vertical[1]);
1460 case PARTITION_HORZ:
1461 encode_b(cpi, tile, td, tp, mi_row, mi_col, output_enabled, subsize,
1462 &pc_tree->horizontal[0]);
1463 if (mi_row + hbs < cm->mi_rows && bsize > BLOCK_8X8) {
1464 encode_b(cpi, tile, td, tp, mi_row + hbs, mi_col, output_enabled,
1465 subsize, &pc_tree->horizontal[1]);
1468 case PARTITION_SPLIT:
1469 if (bsize == BLOCK_8X8) {
1470 encode_b(cpi, tile, td, tp, mi_row, mi_col, output_enabled, subsize,
1471 pc_tree->leaf_split[0]);
1473 encode_sb(cpi, td, tile, tp, mi_row, mi_col, output_enabled, subsize,
1475 encode_sb(cpi, td, tile, tp, mi_row, mi_col + hbs, output_enabled,
1476 subsize, pc_tree->split[1]);
1477 encode_sb(cpi, td, tile, tp, mi_row + hbs, mi_col, output_enabled,
1478 subsize, pc_tree->split[2]);
1479 encode_sb(cpi, td, tile, tp, mi_row + hbs, mi_col + hbs, output_enabled,
1480 subsize, pc_tree->split[3]);
1484 assert(0 && "Invalid partition type.");
1488 if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8)
1489 update_partition_context(xd, mi_row, mi_col, subsize, bsize);
1492 // Check to see if the given partition size is allowed for a specified number
1493 // of 8x8 block rows and columns remaining in the image.
1494 // If not then return the largest allowed partition size
1495 static BLOCK_SIZE find_partition_size(BLOCK_SIZE bsize,
1496 int rows_left, int cols_left,
1498 if (rows_left <= 0 || cols_left <= 0) {
1499 return MIN(bsize, BLOCK_8X8);
1501 for (; bsize > 0; bsize -= 3) {
1502 *bh = num_8x8_blocks_high_lookup[bsize];
1503 *bw = num_8x8_blocks_wide_lookup[bsize];
1504 if ((*bh <= rows_left) && (*bw <= cols_left)) {
1512 static void set_partial_b64x64_partition(MODE_INFO *mi, int mis,
1513 int bh_in, int bw_in, int row8x8_remaining, int col8x8_remaining,
1514 BLOCK_SIZE bsize, MODE_INFO **mi_8x8) {
1517 for (r = 0; r < MI_BLOCK_SIZE; r += bh) {
1519 for (c = 0; c < MI_BLOCK_SIZE; c += bw) {
1520 const int index = r * mis + c;
1521 mi_8x8[index] = mi + index;
1522 mi_8x8[index]->mbmi.sb_type = find_partition_size(bsize,
1523 row8x8_remaining - r, col8x8_remaining - c, &bh, &bw);
1528 // This function attempts to set all mode info entries in a given SB64
1529 // to the same block partition size.
1530 // However, at the bottom and right borders of the image the requested size
1531 // may not be allowed in which case this code attempts to choose the largest
1532 // allowable partition.
1533 static void set_fixed_partitioning(VP9_COMP *cpi, const TileInfo *const tile,
1534 MODE_INFO **mi_8x8, int mi_row, int mi_col,
1536 VP9_COMMON *const cm = &cpi->common;
1537 const int mis = cm->mi_stride;
1538 const int row8x8_remaining = tile->mi_row_end - mi_row;
1539 const int col8x8_remaining = tile->mi_col_end - mi_col;
1540 int block_row, block_col;
1541 MODE_INFO *mi_upper_left = cm->mi + mi_row * mis + mi_col;
1542 int bh = num_8x8_blocks_high_lookup[bsize];
1543 int bw = num_8x8_blocks_wide_lookup[bsize];
1545 assert((row8x8_remaining > 0) && (col8x8_remaining > 0));
1547 // Apply the requested partition size to the SB64 if it is all "in image"
1548 if ((col8x8_remaining >= MI_BLOCK_SIZE) &&
1549 (row8x8_remaining >= MI_BLOCK_SIZE)) {
1550 for (block_row = 0; block_row < MI_BLOCK_SIZE; block_row += bh) {
1551 for (block_col = 0; block_col < MI_BLOCK_SIZE; block_col += bw) {
1552 int index = block_row * mis + block_col;
1553 mi_8x8[index] = mi_upper_left + index;
1554 mi_8x8[index]->mbmi.sb_type = bsize;
1558 // Else this is a partial SB64.
1559 set_partial_b64x64_partition(mi_upper_left, mis, bh, bw, row8x8_remaining,
1560 col8x8_remaining, bsize, mi_8x8);
1567 } coord_lookup[16] = {
1569 {0, 0}, {0, 2}, {2, 0}, {2, 2},
1571 {0, 4}, {0, 6}, {2, 4}, {2, 6},
1573 {4, 0}, {4, 2}, {6, 0}, {6, 2},
1575 {4, 4}, {4, 6}, {6, 4}, {6, 6},
1578 static void set_source_var_based_partition(VP9_COMP *cpi,
1579 const TileInfo *const tile,
1580 MACROBLOCK *const x,
1582 int mi_row, int mi_col) {
1583 VP9_COMMON *const cm = &cpi->common;
1584 const int mis = cm->mi_stride;
1585 const int row8x8_remaining = tile->mi_row_end - mi_row;
1586 const int col8x8_remaining = tile->mi_col_end - mi_col;
1587 MODE_INFO *mi_upper_left = cm->mi + mi_row * mis + mi_col;
1589 vp9_setup_src_planes(x, cpi->Source, mi_row, mi_col);
1591 assert((row8x8_remaining > 0) && (col8x8_remaining > 0));
1594 if ((col8x8_remaining >= MI_BLOCK_SIZE) &&
1595 (row8x8_remaining >= MI_BLOCK_SIZE)) {
1599 const int offset = (mi_row >> 1) * cm->mb_cols + (mi_col >> 1);
1600 int is_larger_better = 0;
1602 unsigned int thr = cpi->source_var_thresh;
1604 memset(d32, 0, 4 * sizeof(diff));
1606 for (i = 0; i < 4; i++) {
1609 for (j = 0; j < 4; j++) {
1610 int b_mi_row = coord_lookup[i * 4 + j].row;
1611 int b_mi_col = coord_lookup[i * 4 + j].col;
1612 int boffset = b_mi_row / 2 * cm->mb_cols +
1615 d16[j] = cpi->source_diff_var + offset + boffset;
1617 index = b_mi_row * mis + b_mi_col;
1618 mi_8x8[index] = mi_upper_left + index;
1619 mi_8x8[index]->mbmi.sb_type = BLOCK_16X16;
1621 // TODO(yunqingwang): If d16[j].var is very large, use 8x8 partition
1622 // size to further improve quality.
1625 is_larger_better = (d16[0]->var < thr) && (d16[1]->var < thr) &&
1626 (d16[2]->var < thr) && (d16[3]->var < thr);
1628 // Use 32x32 partition
1629 if (is_larger_better) {
1632 for (j = 0; j < 4; j++) {
1633 d32[i].sse += d16[j]->sse;
1634 d32[i].sum += d16[j]->sum;
1637 d32[i].var = d32[i].sse - (((int64_t)d32[i].sum * d32[i].sum) >> 10);
1639 index = coord_lookup[i*4].row * mis + coord_lookup[i*4].col;
1640 mi_8x8[index] = mi_upper_left + index;
1641 mi_8x8[index]->mbmi.sb_type = BLOCK_32X32;
1645 if (use32x32 == 4) {
1647 is_larger_better = (d32[0].var < thr) && (d32[1].var < thr) &&
1648 (d32[2].var < thr) && (d32[3].var < thr);
1650 // Use 64x64 partition
1651 if (is_larger_better) {
1652 mi_8x8[0] = mi_upper_left;
1653 mi_8x8[0]->mbmi.sb_type = BLOCK_64X64;
1656 } else { // partial in-image SB64
1657 int bh = num_8x8_blocks_high_lookup[BLOCK_16X16];
1658 int bw = num_8x8_blocks_wide_lookup[BLOCK_16X16];
1659 set_partial_b64x64_partition(mi_upper_left, mis, bh, bw,
1660 row8x8_remaining, col8x8_remaining, BLOCK_16X16, mi_8x8);
1664 static void update_state_rt(VP9_COMP *cpi, ThreadData *td,
1665 PICK_MODE_CONTEXT *ctx,
1666 int mi_row, int mi_col, int bsize) {
1667 VP9_COMMON *const cm = &cpi->common;
1668 MACROBLOCK *const x = &td->mb;
1669 MACROBLOCKD *const xd = &x->e_mbd;
1670 MODE_INFO *const mi = xd->mi[0];
1671 MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
1672 const struct segmentation *const seg = &cm->seg;
1673 const int bw = num_8x8_blocks_wide_lookup[mi->mbmi.sb_type];
1674 const int bh = num_8x8_blocks_high_lookup[mi->mbmi.sb_type];
1675 const int x_mis = MIN(bw, cm->mi_cols - mi_col);
1676 const int y_mis = MIN(bh, cm->mi_rows - mi_row);
1678 *(xd->mi[0]) = ctx->mic;
1680 if (seg->enabled && cpi->oxcf.aq_mode) {
1681 // For in frame complexity AQ or variance AQ, copy segment_id from
1682 // segmentation_map.
1683 if (cpi->oxcf.aq_mode == COMPLEXITY_AQ ||
1684 cpi->oxcf.aq_mode == VARIANCE_AQ ) {
1685 const uint8_t *const map = seg->update_map ? cpi->segmentation_map
1686 : cm->last_frame_seg_map;
1687 mbmi->segment_id = vp9_get_segment_id(cm, map, bsize, mi_row, mi_col);
1689 // Setting segmentation map for cyclic_refresh.
1690 vp9_cyclic_refresh_update_segment(cpi, mbmi, mi_row, mi_col, bsize,
1691 ctx->rate, ctx->dist, x->skip);
1693 vp9_init_plane_quantizers(cpi, x);
1696 if (is_inter_block(mbmi)) {
1697 vp9_update_mv_count(td);
1698 if (cm->interp_filter == SWITCHABLE) {
1699 const int pred_ctx = vp9_get_pred_context_switchable_interp(xd);
1700 ++td->counts->switchable_interp[pred_ctx][mbmi->interp_filter];
1703 if (mbmi->sb_type < BLOCK_8X8) {
1704 mbmi->mv[0].as_int = mi->bmi[3].as_mv[0].as_int;
1705 mbmi->mv[1].as_int = mi->bmi[3].as_mv[1].as_int;
1709 if (cm->use_prev_frame_mvs) {
1710 MV_REF *const frame_mvs =
1711 cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col;
1714 for (h = 0; h < y_mis; ++h) {
1715 MV_REF *const frame_mv = frame_mvs + h * cm->mi_cols;
1716 for (w = 0; w < x_mis; ++w) {
1717 MV_REF *const mv = frame_mv + w;
1718 mv->ref_frame[0] = mi->mbmi.ref_frame[0];
1719 mv->ref_frame[1] = mi->mbmi.ref_frame[1];
1720 mv->mv[0].as_int = mi->mbmi.mv[0].as_int;
1721 mv->mv[1].as_int = mi->mbmi.mv[1].as_int;
1726 x->skip = ctx->skip;
1727 x->skip_txfm[0] = mbmi->segment_id ? 0 : ctx->skip_txfm[0];
1730 static void encode_b_rt(VP9_COMP *cpi, ThreadData *td,
1731 const TileInfo *const tile,
1732 TOKENEXTRA **tp, int mi_row, int mi_col,
1733 int output_enabled, BLOCK_SIZE bsize,
1734 PICK_MODE_CONTEXT *ctx) {
1735 MACROBLOCK *const x = &td->mb;
1736 set_offsets(cpi, tile, x, mi_row, mi_col, bsize);
1737 update_state_rt(cpi, td, ctx, mi_row, mi_col, bsize);
1739 #if CONFIG_VP9_TEMPORAL_DENOISING
1740 if (cpi->oxcf.noise_sensitivity > 0 && output_enabled &&
1741 cpi->common.frame_type != KEY_FRAME) {
1742 vp9_denoiser_denoise(&cpi->denoiser, x, mi_row, mi_col,
1743 MAX(BLOCK_8X8, bsize), ctx);
1747 encode_superblock(cpi, td, tp, output_enabled, mi_row, mi_col, bsize, ctx);
1748 update_stats(&cpi->common, td);
1750 (*tp)->token = EOSB_TOKEN;
1754 static void encode_sb_rt(VP9_COMP *cpi, ThreadData *td,
1755 const TileInfo *const tile,
1756 TOKENEXTRA **tp, int mi_row, int mi_col,
1757 int output_enabled, BLOCK_SIZE bsize,
1759 VP9_COMMON *const cm = &cpi->common;
1760 MACROBLOCK *const x = &td->mb;
1761 MACROBLOCKD *const xd = &x->e_mbd;
1763 const int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4;
1765 PARTITION_TYPE partition;
1768 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
1771 if (bsize >= BLOCK_8X8) {
1772 const int idx_str = xd->mi_stride * mi_row + mi_col;
1773 MODE_INFO ** mi_8x8 = cm->mi_grid_visible + idx_str;
1774 ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
1775 subsize = mi_8x8[0]->mbmi.sb_type;
1778 subsize = BLOCK_4X4;
1781 partition = partition_lookup[bsl][subsize];
1782 if (output_enabled && bsize != BLOCK_4X4)
1783 td->counts->partition[ctx][partition]++;
1785 switch (partition) {
1786 case PARTITION_NONE:
1787 encode_b_rt(cpi, td, tile, tp, mi_row, mi_col, output_enabled, subsize,
1790 case PARTITION_VERT:
1791 encode_b_rt(cpi, td, tile, tp, mi_row, mi_col, output_enabled, subsize,
1792 &pc_tree->vertical[0]);
1793 if (mi_col + hbs < cm->mi_cols && bsize > BLOCK_8X8) {
1794 encode_b_rt(cpi, td, tile, tp, mi_row, mi_col + hbs, output_enabled,
1795 subsize, &pc_tree->vertical[1]);
1798 case PARTITION_HORZ:
1799 encode_b_rt(cpi, td, tile, tp, mi_row, mi_col, output_enabled, subsize,
1800 &pc_tree->horizontal[0]);
1801 if (mi_row + hbs < cm->mi_rows && bsize > BLOCK_8X8) {
1802 encode_b_rt(cpi, td, tile, tp, mi_row + hbs, mi_col, output_enabled,
1803 subsize, &pc_tree->horizontal[1]);
1806 case PARTITION_SPLIT:
1807 subsize = get_subsize(bsize, PARTITION_SPLIT);
1808 encode_sb_rt(cpi, td, tile, tp, mi_row, mi_col, output_enabled, subsize,
1810 encode_sb_rt(cpi, td, tile, tp, mi_row, mi_col + hbs, output_enabled,
1811 subsize, pc_tree->split[1]);
1812 encode_sb_rt(cpi, td, tile, tp, mi_row + hbs, mi_col, output_enabled,
1813 subsize, pc_tree->split[2]);
1814 encode_sb_rt(cpi, td, tile, tp, mi_row + hbs, mi_col + hbs,
1815 output_enabled, subsize, pc_tree->split[3]);
1818 assert(0 && "Invalid partition type.");
1822 if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8)
1823 update_partition_context(xd, mi_row, mi_col, subsize, bsize);
1826 static void rd_use_partition(VP9_COMP *cpi,
1828 TileDataEnc *tile_data,
1829 MODE_INFO **mi_8x8, TOKENEXTRA **tp,
1830 int mi_row, int mi_col,
1832 int *rate, int64_t *dist,
1833 int do_recon, PC_TREE *pc_tree) {
1834 VP9_COMMON *const cm = &cpi->common;
1835 TileInfo *const tile_info = &tile_data->tile_info;
1836 MACROBLOCK *const x = &td->mb;
1837 MACROBLOCKD *const xd = &x->e_mbd;
1838 const int mis = cm->mi_stride;
1839 const int bsl = b_width_log2_lookup[bsize];
1840 const int mi_step = num_4x4_blocks_wide_lookup[bsize] / 2;
1841 const int bss = (1 << bsl) / 4;
1843 PARTITION_TYPE partition = PARTITION_NONE;
1845 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
1846 PARTITION_CONTEXT sl[8], sa[8];
1847 RD_COST last_part_rdc, none_rdc, chosen_rdc;
1848 BLOCK_SIZE sub_subsize = BLOCK_4X4;
1849 int splits_below = 0;
1850 BLOCK_SIZE bs_type = mi_8x8[0]->mbmi.sb_type;
1851 int do_partition_search = 1;
1852 PICK_MODE_CONTEXT *ctx = &pc_tree->none;
1854 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
1857 assert(num_4x4_blocks_wide_lookup[bsize] ==
1858 num_4x4_blocks_high_lookup[bsize]);
1860 vp9_rd_cost_reset(&last_part_rdc);
1861 vp9_rd_cost_reset(&none_rdc);
1862 vp9_rd_cost_reset(&chosen_rdc);
1864 partition = partition_lookup[bsl][bs_type];
1865 subsize = get_subsize(bsize, partition);
1867 pc_tree->partitioning = partition;
1868 save_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
1870 if (bsize == BLOCK_16X16 && cpi->oxcf.aq_mode) {
1871 set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
1872 x->mb_energy = vp9_block_energy(cpi, x, bsize);
1875 if (do_partition_search &&
1876 cpi->sf.partition_search_type == SEARCH_PARTITION &&
1877 cpi->sf.adjust_partitioning_from_last_frame) {
1878 // Check if any of the sub blocks are further split.
1879 if (partition == PARTITION_SPLIT && subsize > BLOCK_8X8) {
1880 sub_subsize = get_subsize(subsize, PARTITION_SPLIT);
1882 for (i = 0; i < 4; i++) {
1883 int jj = i >> 1, ii = i & 0x01;
1884 MODE_INFO *this_mi = mi_8x8[jj * bss * mis + ii * bss];
1885 if (this_mi && this_mi->mbmi.sb_type >= sub_subsize) {
1891 // If partition is not none try none unless each of the 4 splits are split
1893 if (partition != PARTITION_NONE && !splits_below &&
1894 mi_row + (mi_step >> 1) < cm->mi_rows &&
1895 mi_col + (mi_step >> 1) < cm->mi_cols) {
1896 pc_tree->partitioning = PARTITION_NONE;
1897 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &none_rdc, bsize,
1900 pl = partition_plane_context(xd, mi_row, mi_col, bsize);
1902 if (none_rdc.rate < INT_MAX) {
1903 none_rdc.rate += cpi->partition_cost[pl][PARTITION_NONE];
1904 none_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, none_rdc.rate,
1908 restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
1909 mi_8x8[0]->mbmi.sb_type = bs_type;
1910 pc_tree->partitioning = partition;
1914 switch (partition) {
1915 case PARTITION_NONE:
1916 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc,
1917 bsize, ctx, INT64_MAX);
1919 case PARTITION_HORZ:
1920 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc,
1921 subsize, &pc_tree->horizontal[0],
1923 if (last_part_rdc.rate != INT_MAX &&
1924 bsize >= BLOCK_8X8 && mi_row + (mi_step >> 1) < cm->mi_rows) {
1926 PICK_MODE_CONTEXT *ctx = &pc_tree->horizontal[0];
1927 vp9_rd_cost_init(&tmp_rdc);
1928 update_state(cpi, td, ctx, mi_row, mi_col, subsize, 0);
1929 encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize, ctx);
1930 rd_pick_sb_modes(cpi, tile_data, x,
1931 mi_row + (mi_step >> 1), mi_col, &tmp_rdc,
1932 subsize, &pc_tree->horizontal[1], INT64_MAX);
1933 if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
1934 vp9_rd_cost_reset(&last_part_rdc);
1937 last_part_rdc.rate += tmp_rdc.rate;
1938 last_part_rdc.dist += tmp_rdc.dist;
1939 last_part_rdc.rdcost += tmp_rdc.rdcost;
1942 case PARTITION_VERT:
1943 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc,
1944 subsize, &pc_tree->vertical[0], INT64_MAX);
1945 if (last_part_rdc.rate != INT_MAX &&
1946 bsize >= BLOCK_8X8 && mi_col + (mi_step >> 1) < cm->mi_cols) {
1948 PICK_MODE_CONTEXT *ctx = &pc_tree->vertical[0];
1949 vp9_rd_cost_init(&tmp_rdc);
1950 update_state(cpi, td, ctx, mi_row, mi_col, subsize, 0);
1951 encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize, ctx);
1952 rd_pick_sb_modes(cpi, tile_data, x,
1953 mi_row, mi_col + (mi_step >> 1), &tmp_rdc,
1954 subsize, &pc_tree->vertical[bsize > BLOCK_8X8],
1956 if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
1957 vp9_rd_cost_reset(&last_part_rdc);
1960 last_part_rdc.rate += tmp_rdc.rate;
1961 last_part_rdc.dist += tmp_rdc.dist;
1962 last_part_rdc.rdcost += tmp_rdc.rdcost;
1965 case PARTITION_SPLIT:
1966 if (bsize == BLOCK_8X8) {
1967 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc,
1968 subsize, pc_tree->leaf_split[0], INT64_MAX);
1971 last_part_rdc.rate = 0;
1972 last_part_rdc.dist = 0;
1973 last_part_rdc.rdcost = 0;
1974 for (i = 0; i < 4; i++) {
1975 int x_idx = (i & 1) * (mi_step >> 1);
1976 int y_idx = (i >> 1) * (mi_step >> 1);
1977 int jj = i >> 1, ii = i & 0x01;
1979 if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols))
1982 vp9_rd_cost_init(&tmp_rdc);
1983 rd_use_partition(cpi, td, tile_data,
1984 mi_8x8 + jj * bss * mis + ii * bss, tp,
1985 mi_row + y_idx, mi_col + x_idx, subsize,
1986 &tmp_rdc.rate, &tmp_rdc.dist,
1987 i != 3, pc_tree->split[i]);
1988 if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
1989 vp9_rd_cost_reset(&last_part_rdc);
1992 last_part_rdc.rate += tmp_rdc.rate;
1993 last_part_rdc.dist += tmp_rdc.dist;
2001 pl = partition_plane_context(xd, mi_row, mi_col, bsize);
2002 if (last_part_rdc.rate < INT_MAX) {
2003 last_part_rdc.rate += cpi->partition_cost[pl][partition];
2004 last_part_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
2005 last_part_rdc.rate, last_part_rdc.dist);
2008 if (do_partition_search
2009 && cpi->sf.adjust_partitioning_from_last_frame
2010 && cpi->sf.partition_search_type == SEARCH_PARTITION
2011 && partition != PARTITION_SPLIT && bsize > BLOCK_8X8
2012 && (mi_row + mi_step < cm->mi_rows ||
2013 mi_row + (mi_step >> 1) == cm->mi_rows)
2014 && (mi_col + mi_step < cm->mi_cols ||
2015 mi_col + (mi_step >> 1) == cm->mi_cols)) {
2016 BLOCK_SIZE split_subsize = get_subsize(bsize, PARTITION_SPLIT);
2017 chosen_rdc.rate = 0;
2018 chosen_rdc.dist = 0;
2019 restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
2020 pc_tree->partitioning = PARTITION_SPLIT;
2023 for (i = 0; i < 4; i++) {
2024 int x_idx = (i & 1) * (mi_step >> 1);
2025 int y_idx = (i >> 1) * (mi_step >> 1);
2027 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
2028 PARTITION_CONTEXT sl[8], sa[8];
2030 if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols))
2033 save_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
2034 pc_tree->split[i]->partitioning = PARTITION_NONE;
2035 rd_pick_sb_modes(cpi, tile_data, x,
2036 mi_row + y_idx, mi_col + x_idx, &tmp_rdc,
2037 split_subsize, &pc_tree->split[i]->none, INT64_MAX);
2039 restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
2041 if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
2042 vp9_rd_cost_reset(&chosen_rdc);
2046 chosen_rdc.rate += tmp_rdc.rate;
2047 chosen_rdc.dist += tmp_rdc.dist;
2050 encode_sb(cpi, td, tile_info, tp, mi_row + y_idx, mi_col + x_idx, 0,
2051 split_subsize, pc_tree->split[i]);
2053 pl = partition_plane_context(xd, mi_row + y_idx, mi_col + x_idx,
2055 chosen_rdc.rate += cpi->partition_cost[pl][PARTITION_NONE];
2057 pl = partition_plane_context(xd, mi_row, mi_col, bsize);
2058 if (chosen_rdc.rate < INT_MAX) {
2059 chosen_rdc.rate += cpi->partition_cost[pl][PARTITION_SPLIT];
2060 chosen_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
2061 chosen_rdc.rate, chosen_rdc.dist);
2065 // If last_part is better set the partitioning to that.
2066 if (last_part_rdc.rdcost < chosen_rdc.rdcost) {
2067 mi_8x8[0]->mbmi.sb_type = bsize;
2068 if (bsize >= BLOCK_8X8)
2069 pc_tree->partitioning = partition;
2070 chosen_rdc = last_part_rdc;
2072 // If none was better set the partitioning to that.
2073 if (none_rdc.rdcost < chosen_rdc.rdcost) {
2074 if (bsize >= BLOCK_8X8)
2075 pc_tree->partitioning = PARTITION_NONE;
2076 chosen_rdc = none_rdc;
2079 restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
2081 // We must have chosen a partitioning and encoding or we'll fail later on.
2082 // No other opportunities for success.
2083 if (bsize == BLOCK_64X64)
2084 assert(chosen_rdc.rate < INT_MAX && chosen_rdc.dist < INT64_MAX);
2087 int output_enabled = (bsize == BLOCK_64X64);
2088 encode_sb(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled, bsize,
2092 *rate = chosen_rdc.rate;
2093 *dist = chosen_rdc.dist;
2096 static const BLOCK_SIZE min_partition_size[BLOCK_SIZES] = {
2097 BLOCK_4X4, BLOCK_4X4, BLOCK_4X4,
2098 BLOCK_4X4, BLOCK_4X4, BLOCK_4X4,
2099 BLOCK_8X8, BLOCK_8X8, BLOCK_8X8,
2100 BLOCK_16X16, BLOCK_16X16, BLOCK_16X16,
2104 static const BLOCK_SIZE max_partition_size[BLOCK_SIZES] = {
2105 BLOCK_8X8, BLOCK_16X16, BLOCK_16X16,
2106 BLOCK_16X16, BLOCK_32X32, BLOCK_32X32,
2107 BLOCK_32X32, BLOCK_64X64, BLOCK_64X64,
2108 BLOCK_64X64, BLOCK_64X64, BLOCK_64X64,
2112 // Look at all the mode_info entries for blocks that are part of this
2113 // partition and find the min and max values for sb_type.
2114 // At the moment this is designed to work on a 64x64 SB but could be
2115 // adjusted to use a size parameter.
2117 // The min and max are assumed to have been initialized prior to calling this
2118 // function so repeat calls can accumulate a min and max of more than one sb64.
2119 static void get_sb_partition_size_range(MACROBLOCKD *xd, MODE_INFO **mi_8x8,
2120 BLOCK_SIZE *min_block_size,
2121 BLOCK_SIZE *max_block_size,
2122 int bs_hist[BLOCK_SIZES]) {
2123 int sb_width_in_blocks = MI_BLOCK_SIZE;
2124 int sb_height_in_blocks = MI_BLOCK_SIZE;
2128 // Check the sb_type for each block that belongs to this region.
2129 for (i = 0; i < sb_height_in_blocks; ++i) {
2130 for (j = 0; j < sb_width_in_blocks; ++j) {
2131 MODE_INFO *mi = mi_8x8[index+j];
2132 BLOCK_SIZE sb_type = mi ? mi->mbmi.sb_type : 0;
2134 *min_block_size = MIN(*min_block_size, sb_type);
2135 *max_block_size = MAX(*max_block_size, sb_type);
2137 index += xd->mi_stride;
2141 // Next square block size less or equal than current block size.
2142 static const BLOCK_SIZE next_square_size[BLOCK_SIZES] = {
2143 BLOCK_4X4, BLOCK_4X4, BLOCK_4X4,
2144 BLOCK_8X8, BLOCK_8X8, BLOCK_8X8,
2145 BLOCK_16X16, BLOCK_16X16, BLOCK_16X16,
2146 BLOCK_32X32, BLOCK_32X32, BLOCK_32X32,
2150 // Look at neighboring blocks and set a min and max partition size based on
2152 static void rd_auto_partition_range(VP9_COMP *cpi, const TileInfo *const tile,
2153 MACROBLOCKD *const xd,
2154 int mi_row, int mi_col,
2155 BLOCK_SIZE *min_block_size,
2156 BLOCK_SIZE *max_block_size) {
2157 VP9_COMMON *const cm = &cpi->common;
2158 MODE_INFO **mi = xd->mi;
2159 const int left_in_image = xd->left_available && mi[-1];
2160 const int above_in_image = xd->up_available && mi[-xd->mi_stride];
2161 const int row8x8_remaining = tile->mi_row_end - mi_row;
2162 const int col8x8_remaining = tile->mi_col_end - mi_col;
2164 BLOCK_SIZE min_size = BLOCK_4X4;
2165 BLOCK_SIZE max_size = BLOCK_64X64;
2166 int bs_hist[BLOCK_SIZES] = {0};
2168 // Trap case where we do not have a prediction.
2169 if (left_in_image || above_in_image || cm->frame_type != KEY_FRAME) {
2170 // Default "min to max" and "max to min"
2171 min_size = BLOCK_64X64;
2172 max_size = BLOCK_4X4;
2174 // NOTE: each call to get_sb_partition_size_range() uses the previous
2175 // passed in values for min and max as a starting point.
2176 // Find the min and max partition used in previous frame at this location
2177 if (cm->frame_type != KEY_FRAME) {
2178 MODE_INFO **prev_mi =
2179 &cm->prev_mi_grid_visible[mi_row * xd->mi_stride + mi_col];
2180 get_sb_partition_size_range(xd, prev_mi, &min_size, &max_size, bs_hist);
2182 // Find the min and max partition sizes used in the left SB64
2183 if (left_in_image) {
2184 MODE_INFO **left_sb64_mi = &mi[-MI_BLOCK_SIZE];
2185 get_sb_partition_size_range(xd, left_sb64_mi, &min_size, &max_size,
2188 // Find the min and max partition sizes used in the above SB64.
2189 if (above_in_image) {
2190 MODE_INFO **above_sb64_mi = &mi[-xd->mi_stride * MI_BLOCK_SIZE];
2191 get_sb_partition_size_range(xd, above_sb64_mi, &min_size, &max_size,
2195 // Adjust observed min and max for "relaxed" auto partition case.
2196 if (cpi->sf.auto_min_max_partition_size == RELAXED_NEIGHBORING_MIN_MAX) {
2197 min_size = min_partition_size[min_size];
2198 max_size = max_partition_size[max_size];
2202 // Check border cases where max and min from neighbors may not be legal.
2203 max_size = find_partition_size(max_size,
2204 row8x8_remaining, col8x8_remaining,
2206 min_size = MIN(cpi->sf.rd_auto_partition_min_limit, MIN(min_size, max_size));
2208 // When use_square_partition_only is true, make sure at least one square
2209 // partition is allowed by selecting the next smaller square size as
2211 if (cpi->sf.use_square_partition_only &&
2212 next_square_size[max_size] < min_size) {
2213 min_size = next_square_size[max_size];
2216 *min_block_size = min_size;
2217 *max_block_size = max_size;
2220 static void auto_partition_range(VP9_COMP *cpi, const TileInfo *const tile,
2221 MACROBLOCKD *const xd,
2222 int mi_row, int mi_col,
2223 BLOCK_SIZE *min_block_size,
2224 BLOCK_SIZE *max_block_size) {
2225 VP9_COMMON *const cm = &cpi->common;
2226 MODE_INFO **mi_8x8 = xd->mi;
2227 const int left_in_image = xd->left_available && mi_8x8[-1];
2228 const int above_in_image = xd->up_available && mi_8x8[-xd->mi_stride];
2229 int row8x8_remaining = tile->mi_row_end - mi_row;
2230 int col8x8_remaining = tile->mi_col_end - mi_col;
2232 BLOCK_SIZE min_size = BLOCK_32X32;
2233 BLOCK_SIZE max_size = BLOCK_8X8;
2234 int bsl = mi_width_log2_lookup[BLOCK_64X64];
2235 const int search_range_ctrl = (((mi_row + mi_col) >> bsl) +
2236 get_chessboard_index(cm->current_video_frame)) & 0x1;
2237 // Trap case where we do not have a prediction.
2238 if (search_range_ctrl &&
2239 (left_in_image || above_in_image || cm->frame_type != KEY_FRAME)) {
2244 // Find the min and max partition sizes used in the left SB64.
2245 if (left_in_image) {
2248 for (block = 0; block < MI_BLOCK_SIZE; ++block) {
2249 cur_mi = mi[block * xd->mi_stride];
2250 sb_type = cur_mi ? cur_mi->mbmi.sb_type : 0;
2251 min_size = MIN(min_size, sb_type);
2252 max_size = MAX(max_size, sb_type);
2255 // Find the min and max partition sizes used in the above SB64.
2256 if (above_in_image) {
2257 mi = &mi_8x8[-xd->mi_stride * MI_BLOCK_SIZE];
2258 for (block = 0; block < MI_BLOCK_SIZE; ++block) {
2259 sb_type = mi[block] ? mi[block]->mbmi.sb_type : 0;
2260 min_size = MIN(min_size, sb_type);
2261 max_size = MAX(max_size, sb_type);
2265 min_size = min_partition_size[min_size];
2266 max_size = find_partition_size(max_size, row8x8_remaining, col8x8_remaining,
2268 min_size = MIN(min_size, max_size);
2269 min_size = MAX(min_size, BLOCK_8X8);
2270 max_size = MIN(max_size, BLOCK_32X32);
2272 min_size = BLOCK_8X8;
2273 max_size = BLOCK_32X32;
2276 *min_block_size = min_size;
2277 *max_block_size = max_size;
2280 // TODO(jingning) refactor functions setting partition search range
2281 static void set_partition_range(VP9_COMMON *cm, MACROBLOCKD *xd,
2282 int mi_row, int mi_col, BLOCK_SIZE bsize,
2283 BLOCK_SIZE *min_bs, BLOCK_SIZE *max_bs) {
2284 int mi_width = num_8x8_blocks_wide_lookup[bsize];
2285 int mi_height = num_8x8_blocks_high_lookup[bsize];
2289 const int idx_str = cm->mi_stride * mi_row + mi_col;
2290 MODE_INFO **prev_mi = &cm->prev_mi_grid_visible[idx_str];
2291 BLOCK_SIZE bs, min_size, max_size;
2293 min_size = BLOCK_64X64;
2294 max_size = BLOCK_4X4;
2297 for (idy = 0; idy < mi_height; ++idy) {
2298 for (idx = 0; idx < mi_width; ++idx) {
2299 mi = prev_mi[idy * cm->mi_stride + idx];
2300 bs = mi ? mi->mbmi.sb_type : bsize;
2301 min_size = MIN(min_size, bs);
2302 max_size = MAX(max_size, bs);
2307 if (xd->left_available) {
2308 for (idy = 0; idy < mi_height; ++idy) {
2309 mi = xd->mi[idy * cm->mi_stride - 1];
2310 bs = mi ? mi->mbmi.sb_type : bsize;
2311 min_size = MIN(min_size, bs);
2312 max_size = MAX(max_size, bs);
2316 if (xd->up_available) {
2317 for (idx = 0; idx < mi_width; ++idx) {
2318 mi = xd->mi[idx - cm->mi_stride];
2319 bs = mi ? mi->mbmi.sb_type : bsize;
2320 min_size = MIN(min_size, bs);
2321 max_size = MAX(max_size, bs);
2325 if (min_size == max_size) {
2326 min_size = min_partition_size[min_size];
2327 max_size = max_partition_size[max_size];
2334 static INLINE void store_pred_mv(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) {
2335 memcpy(ctx->pred_mv, x->pred_mv, sizeof(x->pred_mv));
2338 static INLINE void load_pred_mv(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) {
2339 memcpy(x->pred_mv, ctx->pred_mv, sizeof(x->pred_mv));
2342 #if CONFIG_FP_MB_STATS
2343 const int num_16x16_blocks_wide_lookup[BLOCK_SIZES] =
2344 {1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 4, 4};
2345 const int num_16x16_blocks_high_lookup[BLOCK_SIZES] =
2346 {1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 4, 2, 4};
2347 const int qindex_skip_threshold_lookup[BLOCK_SIZES] =
2348 {0, 10, 10, 30, 40, 40, 60, 80, 80, 90, 100, 100, 120};
2349 const int qindex_split_threshold_lookup[BLOCK_SIZES] =
2350 {0, 3, 3, 7, 15, 15, 30, 40, 40, 60, 80, 80, 120};
2351 const int complexity_16x16_blocks_threshold[BLOCK_SIZES] =
2352 {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 4, 4, 6};
2363 static INLINE MOTION_DIRECTION get_motion_direction_fp(uint8_t fp_byte) {
2364 if (fp_byte & FPMB_MOTION_ZERO_MASK) {
2366 } else if (fp_byte & FPMB_MOTION_LEFT_MASK) {
2368 } else if (fp_byte & FPMB_MOTION_RIGHT_MASK) {
2370 } else if (fp_byte & FPMB_MOTION_UP_MASK) {
2377 static INLINE int get_motion_inconsistency(MOTION_DIRECTION this_mv,
2378 MOTION_DIRECTION that_mv) {
2379 if (this_mv == that_mv) {
2382 return abs(this_mv - that_mv) == 2 ? 2 : 1;
2387 // TODO(jingning,jimbankoski,rbultje): properly skip partition types that are
2388 // unlikely to be selected depending on previous rate-distortion optimization
2389 // results, for encoding speed-up.
2390 static void rd_pick_partition(VP9_COMP *cpi, ThreadData *td,
2391 TileDataEnc *tile_data,
2392 TOKENEXTRA **tp, int mi_row, int mi_col,
2393 BLOCK_SIZE bsize, RD_COST *rd_cost,
2394 int64_t best_rd, PC_TREE *pc_tree) {
2395 VP9_COMMON *const cm = &cpi->common;
2396 TileInfo *const tile_info = &tile_data->tile_info;
2397 MACROBLOCK *const x = &td->mb;
2398 MACROBLOCKD *const xd = &x->e_mbd;
2399 const int mi_step = num_8x8_blocks_wide_lookup[bsize] / 2;
2400 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
2401 PARTITION_CONTEXT sl[8], sa[8];
2402 TOKENEXTRA *tp_orig = *tp;
2403 PICK_MODE_CONTEXT *ctx = &pc_tree->none;
2406 RD_COST this_rdc, sum_rdc, best_rdc;
2407 int do_split = bsize >= BLOCK_8X8;
2410 // Override skipping rectangular partition operations for edge blocks
2411 const int force_horz_split = (mi_row + mi_step >= cm->mi_rows);
2412 const int force_vert_split = (mi_col + mi_step >= cm->mi_cols);
2413 const int xss = x->e_mbd.plane[1].subsampling_x;
2414 const int yss = x->e_mbd.plane[1].subsampling_y;
2416 BLOCK_SIZE min_size = x->min_partition_size;
2417 BLOCK_SIZE max_size = x->max_partition_size;
2419 #if CONFIG_FP_MB_STATS
2420 unsigned int src_diff_var = UINT_MAX;
2421 int none_complexity = 0;
2424 int partition_none_allowed = !force_horz_split && !force_vert_split;
2425 int partition_horz_allowed = !force_vert_split && yss <= xss &&
2427 int partition_vert_allowed = !force_horz_split && xss <= yss &&
2431 assert(num_8x8_blocks_wide_lookup[bsize] ==
2432 num_8x8_blocks_high_lookup[bsize]);
2434 vp9_rd_cost_init(&this_rdc);
2435 vp9_rd_cost_init(&sum_rdc);
2436 vp9_rd_cost_reset(&best_rdc);
2437 best_rdc.rdcost = best_rd;
2439 set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
2441 if (bsize == BLOCK_16X16 && cpi->oxcf.aq_mode)
2442 x->mb_energy = vp9_block_energy(cpi, x, bsize);
2444 if (cpi->sf.cb_partition_search && bsize == BLOCK_16X16) {
2445 int cb_partition_search_ctrl = ((pc_tree->index == 0 || pc_tree->index == 3)
2446 + get_chessboard_index(cm->current_video_frame)) & 0x1;
2448 if (cb_partition_search_ctrl && bsize > min_size && bsize < max_size)
2449 set_partition_range(cm, xd, mi_row, mi_col, bsize, &min_size, &max_size);
2452 // Determine partition types in search according to the speed features.
2453 // The threshold set here has to be of square block size.
2454 if (cpi->sf.auto_min_max_partition_size) {
2455 partition_none_allowed &= (bsize <= max_size && bsize >= min_size);
2456 partition_horz_allowed &= ((bsize <= max_size && bsize > min_size) ||
2458 partition_vert_allowed &= ((bsize <= max_size && bsize > min_size) ||
2460 do_split &= bsize > min_size;
2462 if (cpi->sf.use_square_partition_only) {
2463 partition_horz_allowed &= force_horz_split;
2464 partition_vert_allowed &= force_vert_split;
2467 save_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
2469 #if CONFIG_FP_MB_STATS
2470 if (cpi->use_fp_mb_stats) {
2471 set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
2472 src_diff_var = get_sby_perpixel_diff_variance(cpi, &x->plane[0].src,
2473 mi_row, mi_col, bsize);
2477 #if CONFIG_FP_MB_STATS
2478 // Decide whether we shall split directly and skip searching NONE by using
2479 // the first pass block statistics
2480 if (cpi->use_fp_mb_stats && bsize >= BLOCK_32X32 && do_split &&
2481 partition_none_allowed && src_diff_var > 4 &&
2482 cm->base_qindex < qindex_split_threshold_lookup[bsize]) {
2483 int mb_row = mi_row >> 1;
2484 int mb_col = mi_col >> 1;
2486 MIN(mb_row + num_16x16_blocks_high_lookup[bsize], cm->mb_rows);
2488 MIN(mb_col + num_16x16_blocks_wide_lookup[bsize], cm->mb_cols);
2491 // compute a complexity measure, basically measure inconsistency of motion
2492 // vectors obtained from the first pass in the current block
2493 for (r = mb_row; r < mb_row_end ; r++) {
2494 for (c = mb_col; c < mb_col_end; c++) {
2495 const int mb_index = r * cm->mb_cols + c;
2497 MOTION_DIRECTION this_mv;
2498 MOTION_DIRECTION right_mv;
2499 MOTION_DIRECTION bottom_mv;
2502 get_motion_direction_fp(cpi->twopass.this_frame_mb_stats[mb_index]);
2505 if (c != mb_col_end - 1) {
2506 right_mv = get_motion_direction_fp(
2507 cpi->twopass.this_frame_mb_stats[mb_index + 1]);
2508 none_complexity += get_motion_inconsistency(this_mv, right_mv);
2512 if (r != mb_row_end - 1) {
2513 bottom_mv = get_motion_direction_fp(
2514 cpi->twopass.this_frame_mb_stats[mb_index + cm->mb_cols]);
2515 none_complexity += get_motion_inconsistency(this_mv, bottom_mv);
2518 // do not count its left and top neighbors to avoid double counting
2522 if (none_complexity > complexity_16x16_blocks_threshold[bsize]) {
2523 partition_none_allowed = 0;
2529 if (partition_none_allowed) {
2530 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col,
2531 &this_rdc, bsize, ctx, best_rdc.rdcost);
2532 if (this_rdc.rate != INT_MAX) {
2533 if (bsize >= BLOCK_8X8) {
2534 pl = partition_plane_context(xd, mi_row, mi_col, bsize);
2535 this_rdc.rate += cpi->partition_cost[pl][PARTITION_NONE];
2536 this_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
2537 this_rdc.rate, this_rdc.dist);
2540 if (this_rdc.rdcost < best_rdc.rdcost) {
2541 int64_t dist_breakout_thr = cpi->sf.partition_search_breakout_dist_thr;
2542 int rate_breakout_thr = cpi->sf.partition_search_breakout_rate_thr;
2544 best_rdc = this_rdc;
2545 if (bsize >= BLOCK_8X8)
2546 pc_tree->partitioning = PARTITION_NONE;
2548 // Adjust dist breakout threshold according to the partition size.
2549 dist_breakout_thr >>= 8 - (b_width_log2_lookup[bsize] +
2550 b_height_log2_lookup[bsize]);
2552 rate_breakout_thr *= num_pels_log2_lookup[bsize];
2554 // If all y, u, v transform blocks in this partition are skippable, and
2555 // the dist & rate are within the thresholds, the partition search is
2556 // terminated for current branch of the partition search tree.
2557 // The dist & rate thresholds are set to 0 at speed 0 to disable the
2558 // early termination at that speed.
2559 if (!x->e_mbd.lossless &&
2560 (ctx->skippable && best_rdc.dist < dist_breakout_thr &&
2561 best_rdc.rate < rate_breakout_thr)) {
2566 #if CONFIG_FP_MB_STATS
2567 // Check if every 16x16 first pass block statistics has zero
2568 // motion and the corresponding first pass residue is small enough.
2569 // If that is the case, check the difference variance between the
2570 // current frame and the last frame. If the variance is small enough,
2571 // stop further splitting in RD optimization
2572 if (cpi->use_fp_mb_stats && do_split != 0 &&
2573 cm->base_qindex > qindex_skip_threshold_lookup[bsize]) {
2574 int mb_row = mi_row >> 1;
2575 int mb_col = mi_col >> 1;
2577 MIN(mb_row + num_16x16_blocks_high_lookup[bsize], cm->mb_rows);
2579 MIN(mb_col + num_16x16_blocks_wide_lookup[bsize], cm->mb_cols);
2583 for (r = mb_row; r < mb_row_end; r++) {
2584 for (c = mb_col; c < mb_col_end; c++) {
2585 const int mb_index = r * cm->mb_cols + c;
2586 if (!(cpi->twopass.this_frame_mb_stats[mb_index] &
2587 FPMB_MOTION_ZERO_MASK) ||
2588 !(cpi->twopass.this_frame_mb_stats[mb_index] &
2589 FPMB_ERROR_SMALL_MASK)) {
2599 if (src_diff_var == UINT_MAX) {
2600 set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
2601 src_diff_var = get_sby_perpixel_diff_variance(
2602 cpi, &x->plane[0].src, mi_row, mi_col, bsize);
2604 if (src_diff_var < 8) {
2613 restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
2616 // store estimated motion vector
2617 if (cpi->sf.adaptive_motion_search)
2618 store_pred_mv(x, ctx);
2621 // TODO(jingning): use the motion vectors given by the above search as
2622 // the starting point of motion search in the following partition type check.
2624 subsize = get_subsize(bsize, PARTITION_SPLIT);
2625 if (bsize == BLOCK_8X8) {
2627 if (cpi->sf.adaptive_pred_interp_filter && partition_none_allowed)
2628 pc_tree->leaf_split[0]->pred_interp_filter =
2629 ctx->mic.mbmi.interp_filter;
2630 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize,
2631 pc_tree->leaf_split[0], best_rdc.rdcost);
2632 if (sum_rdc.rate == INT_MAX)
2633 sum_rdc.rdcost = INT64_MAX;
2635 for (i = 0; i < 4 && sum_rdc.rdcost < best_rdc.rdcost; ++i) {
2636 const int x_idx = (i & 1) * mi_step;
2637 const int y_idx = (i >> 1) * mi_step;
2639 if (mi_row + y_idx >= cm->mi_rows || mi_col + x_idx >= cm->mi_cols)
2642 if (cpi->sf.adaptive_motion_search)
2643 load_pred_mv(x, ctx);
2645 pc_tree->split[i]->index = i;
2646 rd_pick_partition(cpi, td, tile_data, tp,
2647 mi_row + y_idx, mi_col + x_idx,
2649 best_rdc.rdcost - sum_rdc.rdcost, pc_tree->split[i]);
2651 if (this_rdc.rate == INT_MAX) {
2652 sum_rdc.rdcost = INT64_MAX;
2655 sum_rdc.rate += this_rdc.rate;
2656 sum_rdc.dist += this_rdc.dist;
2657 sum_rdc.rdcost += this_rdc.rdcost;
2662 if (sum_rdc.rdcost < best_rdc.rdcost && i == 4) {
2663 pl = partition_plane_context(xd, mi_row, mi_col, bsize);
2664 sum_rdc.rate += cpi->partition_cost[pl][PARTITION_SPLIT];
2665 sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
2666 sum_rdc.rate, sum_rdc.dist);
2668 if (sum_rdc.rdcost < best_rdc.rdcost) {
2670 pc_tree->partitioning = PARTITION_SPLIT;
2673 // skip rectangular partition test when larger block size
2674 // gives better rd cost
2675 if (cpi->sf.less_rectangular_check)
2676 do_rect &= !partition_none_allowed;
2678 restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
2682 if (partition_horz_allowed && do_rect) {
2683 subsize = get_subsize(bsize, PARTITION_HORZ);
2684 if (cpi->sf.adaptive_motion_search)
2685 load_pred_mv(x, ctx);
2686 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
2687 partition_none_allowed)
2688 pc_tree->horizontal[0].pred_interp_filter =
2689 ctx->mic.mbmi.interp_filter;
2690 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize,
2691 &pc_tree->horizontal[0], best_rdc.rdcost);
2693 if (sum_rdc.rdcost < best_rdc.rdcost && mi_row + mi_step < cm->mi_rows &&
2694 bsize > BLOCK_8X8) {
2695 PICK_MODE_CONTEXT *ctx = &pc_tree->horizontal[0];
2696 update_state(cpi, td, ctx, mi_row, mi_col, subsize, 0);
2697 encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize, ctx);
2699 if (cpi->sf.adaptive_motion_search)
2700 load_pred_mv(x, ctx);
2701 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
2702 partition_none_allowed)
2703 pc_tree->horizontal[1].pred_interp_filter =
2704 ctx->mic.mbmi.interp_filter;
2705 rd_pick_sb_modes(cpi, tile_data, x, mi_row + mi_step, mi_col,
2706 &this_rdc, subsize, &pc_tree->horizontal[1],
2707 best_rdc.rdcost - sum_rdc.rdcost);
2708 if (this_rdc.rate == INT_MAX) {
2709 sum_rdc.rdcost = INT64_MAX;
2711 sum_rdc.rate += this_rdc.rate;
2712 sum_rdc.dist += this_rdc.dist;
2713 sum_rdc.rdcost += this_rdc.rdcost;
2717 if (sum_rdc.rdcost < best_rdc.rdcost) {
2718 pl = partition_plane_context(xd, mi_row, mi_col, bsize);
2719 sum_rdc.rate += cpi->partition_cost[pl][PARTITION_HORZ];
2720 sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, sum_rdc.rate, sum_rdc.dist);
2721 if (sum_rdc.rdcost < best_rdc.rdcost) {
2723 pc_tree->partitioning = PARTITION_HORZ;
2726 restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
2729 if (partition_vert_allowed && do_rect) {
2730 subsize = get_subsize(bsize, PARTITION_VERT);
2732 if (cpi->sf.adaptive_motion_search)
2733 load_pred_mv(x, ctx);
2734 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
2735 partition_none_allowed)
2736 pc_tree->vertical[0].pred_interp_filter =
2737 ctx->mic.mbmi.interp_filter;
2738 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize,
2739 &pc_tree->vertical[0], best_rdc.rdcost);
2740 if (sum_rdc.rdcost < best_rdc.rdcost && mi_col + mi_step < cm->mi_cols &&
2741 bsize > BLOCK_8X8) {
2742 update_state(cpi, td, &pc_tree->vertical[0], mi_row, mi_col, subsize, 0);
2743 encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize,
2744 &pc_tree->vertical[0]);
2746 if (cpi->sf.adaptive_motion_search)
2747 load_pred_mv(x, ctx);
2748 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
2749 partition_none_allowed)
2750 pc_tree->vertical[1].pred_interp_filter =
2751 ctx->mic.mbmi.interp_filter;
2752 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + mi_step,
2754 &pc_tree->vertical[1], best_rdc.rdcost - sum_rdc.rdcost);
2755 if (this_rdc.rate == INT_MAX) {
2756 sum_rdc.rdcost = INT64_MAX;
2758 sum_rdc.rate += this_rdc.rate;
2759 sum_rdc.dist += this_rdc.dist;
2760 sum_rdc.rdcost += this_rdc.rdcost;
2764 if (sum_rdc.rdcost < best_rdc.rdcost) {
2765 pl = partition_plane_context(xd, mi_row, mi_col, bsize);
2766 sum_rdc.rate += cpi->partition_cost[pl][PARTITION_VERT];
2767 sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
2768 sum_rdc.rate, sum_rdc.dist);
2769 if (sum_rdc.rdcost < best_rdc.rdcost) {
2771 pc_tree->partitioning = PARTITION_VERT;
2774 restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
2777 // TODO(jbb): This code added so that we avoid static analysis
2778 // warning related to the fact that best_rd isn't used after this
2779 // point. This code should be refactored so that the duplicate
2780 // checks occur in some sub function and thus are used...
2782 *rd_cost = best_rdc;
2785 if (best_rdc.rate < INT_MAX && best_rdc.dist < INT64_MAX &&
2786 pc_tree->index != 3) {
2787 int output_enabled = (bsize == BLOCK_64X64);
2788 encode_sb(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled,
2792 if (bsize == BLOCK_64X64) {
2793 assert(tp_orig < *tp);
2794 assert(best_rdc.rate < INT_MAX);
2795 assert(best_rdc.dist < INT64_MAX);
2797 assert(tp_orig == *tp);
2801 static void encode_rd_sb_row(VP9_COMP *cpi,
2803 TileDataEnc *tile_data,
2806 VP9_COMMON *const cm = &cpi->common;
2807 TileInfo *const tile_info = &tile_data->tile_info;
2808 MACROBLOCK *const x = &td->mb;
2809 MACROBLOCKD *const xd = &x->e_mbd;
2810 SPEED_FEATURES *const sf = &cpi->sf;
2813 // Initialize the left context for the new SB row
2814 memset(&xd->left_context, 0, sizeof(xd->left_context));
2815 memset(xd->left_seg_context, 0, sizeof(xd->left_seg_context));
2817 // Code each SB in the row
2818 for (mi_col = tile_info->mi_col_start; mi_col < tile_info->mi_col_end;
2819 mi_col += MI_BLOCK_SIZE) {
2820 const struct segmentation *const seg = &cm->seg;
2827 const int idx_str = cm->mi_stride * mi_row + mi_col;
2828 MODE_INFO **mi = cm->mi_grid_visible + idx_str;
2830 if (sf->adaptive_pred_interp_filter) {
2831 for (i = 0; i < 64; ++i)
2832 td->leaf_tree[i].pred_interp_filter = SWITCHABLE;
2834 for (i = 0; i < 64; ++i) {
2835 td->pc_tree[i].vertical[0].pred_interp_filter = SWITCHABLE;
2836 td->pc_tree[i].vertical[1].pred_interp_filter = SWITCHABLE;
2837 td->pc_tree[i].horizontal[0].pred_interp_filter = SWITCHABLE;
2838 td->pc_tree[i].horizontal[1].pred_interp_filter = SWITCHABLE;
2842 vp9_zero(x->pred_mv);
2843 td->pc_root->index = 0;
2846 const uint8_t *const map = seg->update_map ? cpi->segmentation_map
2847 : cm->last_frame_seg_map;
2848 int segment_id = vp9_get_segment_id(cm, map, BLOCK_64X64, mi_row, mi_col);
2849 seg_skip = vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP);
2852 x->source_variance = UINT_MAX;
2853 if (sf->partition_search_type == FIXED_PARTITION || seg_skip) {
2854 const BLOCK_SIZE bsize =
2855 seg_skip ? BLOCK_64X64 : sf->always_this_block_size;
2856 set_offsets(cpi, tile_info, x, mi_row, mi_col, BLOCK_64X64);
2857 set_fixed_partitioning(cpi, tile_info, mi, mi_row, mi_col, bsize);
2858 rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
2859 BLOCK_64X64, &dummy_rate, &dummy_dist, 1, td->pc_root);
2860 } else if (cpi->partition_search_skippable_frame) {
2862 set_offsets(cpi, tile_info, x, mi_row, mi_col, BLOCK_64X64);
2863 bsize = get_rd_var_based_fixed_partition(cpi, x, mi_row, mi_col);
2864 set_fixed_partitioning(cpi, tile_info, mi, mi_row, mi_col, bsize);
2865 rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
2866 BLOCK_64X64, &dummy_rate, &dummy_dist, 1, td->pc_root);
2867 } else if (sf->partition_search_type == VAR_BASED_PARTITION &&
2868 cm->frame_type != KEY_FRAME) {
2869 choose_partitioning(cpi, tile_info, x, mi_row, mi_col);
2870 rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
2871 BLOCK_64X64, &dummy_rate, &dummy_dist, 1, td->pc_root);
2873 // If required set upper and lower partition size limits
2874 if (sf->auto_min_max_partition_size) {
2875 set_offsets(cpi, tile_info, x, mi_row, mi_col, BLOCK_64X64);
2876 rd_auto_partition_range(cpi, tile_info, xd, mi_row, mi_col,
2877 &x->min_partition_size,
2878 &x->max_partition_size);
2880 rd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, BLOCK_64X64,
2881 &dummy_rdc, INT64_MAX, td->pc_root);
2886 static void init_encode_frame_mb_context(VP9_COMP *cpi) {
2887 MACROBLOCK *const x = &cpi->td.mb;
2888 VP9_COMMON *const cm = &cpi->common;
2889 MACROBLOCKD *const xd = &x->e_mbd;
2890 const int aligned_mi_cols = mi_cols_aligned_to_sb(cm->mi_cols);
2892 // Copy data over into macro block data structures.
2893 vp9_setup_src_planes(x, cpi->Source, 0, 0);
2895 vp9_setup_block_planes(&x->e_mbd, cm->subsampling_x, cm->subsampling_y);
2897 // Note: this memset assumes above_context[0], [1] and [2]
2898 // are allocated as part of the same buffer.
2899 memset(xd->above_context[0], 0,
2900 sizeof(*xd->above_context[0]) *
2901 2 * aligned_mi_cols * MAX_MB_PLANE);
2902 memset(xd->above_seg_context, 0,
2903 sizeof(*xd->above_seg_context) * aligned_mi_cols);
2906 static int check_dual_ref_flags(VP9_COMP *cpi) {
2907 const int ref_flags = cpi->ref_frame_flags;
2909 if (vp9_segfeature_active(&cpi->common.seg, 1, SEG_LVL_REF_FRAME)) {
2912 return (!!(ref_flags & VP9_GOLD_FLAG) + !!(ref_flags & VP9_LAST_FLAG)
2913 + !!(ref_flags & VP9_ALT_FLAG)) >= 2;
2917 static void reset_skip_tx_size(VP9_COMMON *cm, TX_SIZE max_tx_size) {
2919 const int mis = cm->mi_stride;
2920 MODE_INFO **mi_ptr = cm->mi_grid_visible;
2922 for (mi_row = 0; mi_row < cm->mi_rows; ++mi_row, mi_ptr += mis) {
2923 for (mi_col = 0; mi_col < cm->mi_cols; ++mi_col) {
2924 if (mi_ptr[mi_col]->mbmi.tx_size > max_tx_size)
2925 mi_ptr[mi_col]->mbmi.tx_size = max_tx_size;
2930 static MV_REFERENCE_FRAME get_frame_type(const VP9_COMP *cpi) {
2931 if (frame_is_intra_only(&cpi->common))
2933 else if (cpi->rc.is_src_frame_alt_ref && cpi->refresh_golden_frame)
2934 return ALTREF_FRAME;
2935 else if (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)
2936 return GOLDEN_FRAME;
2941 static TX_MODE select_tx_mode(const VP9_COMP *cpi, MACROBLOCKD *const xd) {
2944 if (cpi->common.frame_type == KEY_FRAME &&
2945 cpi->sf.use_nonrd_pick_mode &&
2946 cpi->sf.partition_search_type == VAR_BASED_PARTITION)
2948 if (cpi->sf.tx_size_search_method == USE_LARGESTALL)
2950 else if (cpi->sf.tx_size_search_method == USE_FULL_RD||
2951 cpi->sf.tx_size_search_method == USE_TX_8X8)
2952 return TX_MODE_SELECT;
2954 return cpi->common.tx_mode;
2957 static void hybrid_intra_mode_search(VP9_COMP *cpi, MACROBLOCK *const x,
2958 RD_COST *rd_cost, BLOCK_SIZE bsize,
2959 PICK_MODE_CONTEXT *ctx) {
2960 if (bsize < BLOCK_16X16)
2961 vp9_rd_pick_intra_mode_sb(cpi, x, rd_cost, bsize, ctx, INT64_MAX);
2963 vp9_pick_intra_mode(cpi, x, rd_cost, bsize, ctx);
2966 static void nonrd_pick_sb_modes(VP9_COMP *cpi,
2967 TileDataEnc *tile_data, MACROBLOCK *const x,
2968 int mi_row, int mi_col, RD_COST *rd_cost,
2969 BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx) {
2970 VP9_COMMON *const cm = &cpi->common;
2971 TileInfo *const tile_info = &tile_data->tile_info;
2972 MACROBLOCKD *const xd = &x->e_mbd;
2974 set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
2975 mbmi = &xd->mi[0]->mbmi;
2976 mbmi->sb_type = bsize;
2978 if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled)
2979 if (cyclic_refresh_segment_id_boosted(mbmi->segment_id))
2980 x->rdmult = vp9_cyclic_refresh_get_rdmult(cpi->cyclic_refresh);
2982 if (cm->frame_type == KEY_FRAME)
2983 hybrid_intra_mode_search(cpi, x, rd_cost, bsize, ctx);
2984 else if (vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP))
2985 set_mode_info_seg_skip(x, cm->tx_mode, rd_cost, bsize);
2986 else if (bsize >= BLOCK_8X8)
2987 vp9_pick_inter_mode(cpi, x, tile_data, mi_row, mi_col,
2988 rd_cost, bsize, ctx);
2990 vp9_pick_inter_mode_sub8x8(cpi, x, tile_data, mi_row, mi_col,
2991 rd_cost, bsize, ctx);
2993 duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, bsize);
2995 if (rd_cost->rate == INT_MAX)
2996 vp9_rd_cost_reset(rd_cost);
2998 ctx->rate = rd_cost->rate;
2999 ctx->dist = rd_cost->dist;
3002 static void fill_mode_info_sb(VP9_COMMON *cm, MACROBLOCK *x,
3003 int mi_row, int mi_col,
3006 MACROBLOCKD *xd = &x->e_mbd;
3007 int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4;
3008 PARTITION_TYPE partition = pc_tree->partitioning;
3009 BLOCK_SIZE subsize = get_subsize(bsize, partition);
3011 assert(bsize >= BLOCK_8X8);
3013 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
3016 switch (partition) {
3017 case PARTITION_NONE:
3018 set_mode_info_offsets(cm, xd, mi_row, mi_col);
3019 *(xd->mi[0]) = pc_tree->none.mic;
3020 duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, bsize);
3022 case PARTITION_VERT:
3023 set_mode_info_offsets(cm, xd, mi_row, mi_col);
3024 *(xd->mi[0]) = pc_tree->vertical[0].mic;
3025 duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, subsize);
3027 if (mi_col + hbs < cm->mi_cols) {
3028 set_mode_info_offsets(cm, xd, mi_row, mi_col + hbs);
3029 *(xd->mi[0]) = pc_tree->vertical[1].mic;
3030 duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col + hbs, subsize);
3033 case PARTITION_HORZ:
3034 set_mode_info_offsets(cm, xd, mi_row, mi_col);
3035 *(xd->mi[0]) = pc_tree->horizontal[0].mic;
3036 duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, subsize);
3037 if (mi_row + hbs < cm->mi_rows) {
3038 set_mode_info_offsets(cm, xd, mi_row + hbs, mi_col);
3039 *(xd->mi[0]) = pc_tree->horizontal[1].mic;
3040 duplicate_mode_info_in_sb(cm, xd, mi_row + hbs, mi_col, subsize);
3043 case PARTITION_SPLIT: {
3044 fill_mode_info_sb(cm, x, mi_row, mi_col, subsize, pc_tree->split[0]);
3045 fill_mode_info_sb(cm, x, mi_row, mi_col + hbs, subsize,
3047 fill_mode_info_sb(cm, x, mi_row + hbs, mi_col, subsize,
3049 fill_mode_info_sb(cm, x, mi_row + hbs, mi_col + hbs, subsize,
3058 // Reset the prediction pixel ready flag recursively.
3059 static void pred_pixel_ready_reset(PC_TREE *pc_tree, BLOCK_SIZE bsize) {
3060 pc_tree->none.pred_pixel_ready = 0;
3061 pc_tree->horizontal[0].pred_pixel_ready = 0;
3062 pc_tree->horizontal[1].pred_pixel_ready = 0;
3063 pc_tree->vertical[0].pred_pixel_ready = 0;
3064 pc_tree->vertical[1].pred_pixel_ready = 0;
3066 if (bsize > BLOCK_8X8) {
3067 BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_SPLIT);
3069 for (i = 0; i < 4; ++i)
3070 pred_pixel_ready_reset(pc_tree->split[i], subsize);
3074 static void nonrd_pick_partition(VP9_COMP *cpi, ThreadData *td,
3075 TileDataEnc *tile_data,
3076 TOKENEXTRA **tp, int mi_row,
3077 int mi_col, BLOCK_SIZE bsize, RD_COST *rd_cost,
3078 int do_recon, int64_t best_rd,
3080 const SPEED_FEATURES *const sf = &cpi->sf;
3081 VP9_COMMON *const cm = &cpi->common;
3082 TileInfo *const tile_info = &tile_data->tile_info;
3083 MACROBLOCK *const x = &td->mb;
3084 MACROBLOCKD *const xd = &x->e_mbd;
3085 const int ms = num_8x8_blocks_wide_lookup[bsize] / 2;
3086 TOKENEXTRA *tp_orig = *tp;
3087 PICK_MODE_CONTEXT *ctx = &pc_tree->none;
3089 BLOCK_SIZE subsize = bsize;
3090 RD_COST this_rdc, sum_rdc, best_rdc;
3091 int do_split = bsize >= BLOCK_8X8;
3093 // Override skipping rectangular partition operations for edge blocks
3094 const int force_horz_split = (mi_row + ms >= cm->mi_rows);
3095 const int force_vert_split = (mi_col + ms >= cm->mi_cols);
3096 const int xss = x->e_mbd.plane[1].subsampling_x;
3097 const int yss = x->e_mbd.plane[1].subsampling_y;
3099 int partition_none_allowed = !force_horz_split && !force_vert_split;
3100 int partition_horz_allowed = !force_vert_split && yss <= xss &&
3102 int partition_vert_allowed = !force_horz_split && xss <= yss &&
3106 assert(num_8x8_blocks_wide_lookup[bsize] ==
3107 num_8x8_blocks_high_lookup[bsize]);
3109 vp9_rd_cost_init(&sum_rdc);
3110 vp9_rd_cost_reset(&best_rdc);
3111 best_rdc.rdcost = best_rd;
3113 // Determine partition types in search according to the speed features.
3114 // The threshold set here has to be of square block size.
3115 if (sf->auto_min_max_partition_size) {
3116 partition_none_allowed &= (bsize <= x->max_partition_size &&
3117 bsize >= x->min_partition_size);
3118 partition_horz_allowed &= ((bsize <= x->max_partition_size &&
3119 bsize > x->min_partition_size) ||
3121 partition_vert_allowed &= ((bsize <= x->max_partition_size &&
3122 bsize > x->min_partition_size) ||
3124 do_split &= bsize > x->min_partition_size;
3126 if (sf->use_square_partition_only) {
3127 partition_horz_allowed &= force_horz_split;
3128 partition_vert_allowed &= force_vert_split;
3131 ctx->pred_pixel_ready = !(partition_vert_allowed ||
3132 partition_horz_allowed ||
3136 if (partition_none_allowed) {
3137 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col,
3138 &this_rdc, bsize, ctx);
3139 ctx->mic.mbmi = xd->mi[0]->mbmi;
3140 ctx->skip_txfm[0] = x->skip_txfm[0];
3141 ctx->skip = x->skip;
3143 if (this_rdc.rate != INT_MAX) {
3144 int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
3145 this_rdc.rate += cpi->partition_cost[pl][PARTITION_NONE];
3146 this_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
3147 this_rdc.rate, this_rdc.dist);
3148 if (this_rdc.rdcost < best_rdc.rdcost) {
3149 int64_t dist_breakout_thr = sf->partition_search_breakout_dist_thr;
3150 int64_t rate_breakout_thr = sf->partition_search_breakout_rate_thr;
3152 dist_breakout_thr >>= 8 - (b_width_log2_lookup[bsize] +
3153 b_height_log2_lookup[bsize]);
3155 rate_breakout_thr *= num_pels_log2_lookup[bsize];
3157 best_rdc = this_rdc;
3158 if (bsize >= BLOCK_8X8)
3159 pc_tree->partitioning = PARTITION_NONE;
3161 if (!x->e_mbd.lossless &&
3162 this_rdc.rate < rate_breakout_thr &&
3163 this_rdc.dist < dist_breakout_thr) {
3171 // store estimated motion vector
3172 store_pred_mv(x, ctx);
3176 int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
3177 sum_rdc.rate += cpi->partition_cost[pl][PARTITION_SPLIT];
3178 sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, sum_rdc.rate, sum_rdc.dist);
3179 subsize = get_subsize(bsize, PARTITION_SPLIT);
3180 for (i = 0; i < 4 && sum_rdc.rdcost < best_rdc.rdcost; ++i) {
3181 const int x_idx = (i & 1) * ms;
3182 const int y_idx = (i >> 1) * ms;
3184 if (mi_row + y_idx >= cm->mi_rows || mi_col + x_idx >= cm->mi_cols)
3186 load_pred_mv(x, ctx);
3187 nonrd_pick_partition(cpi, td, tile_data, tp,
3188 mi_row + y_idx, mi_col + x_idx,
3189 subsize, &this_rdc, 0,
3190 best_rdc.rdcost - sum_rdc.rdcost, pc_tree->split[i]);
3192 if (this_rdc.rate == INT_MAX) {
3193 vp9_rd_cost_reset(&sum_rdc);
3195 sum_rdc.rate += this_rdc.rate;
3196 sum_rdc.dist += this_rdc.dist;
3197 sum_rdc.rdcost += this_rdc.rdcost;
3201 if (sum_rdc.rdcost < best_rdc.rdcost) {
3203 pc_tree->partitioning = PARTITION_SPLIT;
3205 // skip rectangular partition test when larger block size
3206 // gives better rd cost
3207 if (sf->less_rectangular_check)
3208 do_rect &= !partition_none_allowed;
3213 if (partition_horz_allowed && do_rect) {
3214 subsize = get_subsize(bsize, PARTITION_HORZ);
3215 if (sf->adaptive_motion_search)
3216 load_pred_mv(x, ctx);
3217 pc_tree->horizontal[0].pred_pixel_ready = 1;
3218 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize,
3219 &pc_tree->horizontal[0]);
3221 pc_tree->horizontal[0].mic.mbmi = xd->mi[0]->mbmi;
3222 pc_tree->horizontal[0].skip_txfm[0] = x->skip_txfm[0];
3223 pc_tree->horizontal[0].skip = x->skip;
3225 if (sum_rdc.rdcost < best_rdc.rdcost && mi_row + ms < cm->mi_rows) {
3226 load_pred_mv(x, ctx);
3227 pc_tree->horizontal[1].pred_pixel_ready = 1;
3228 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row + ms, mi_col,
3230 &pc_tree->horizontal[1]);
3232 pc_tree->horizontal[1].mic.mbmi = xd->mi[0]->mbmi;
3233 pc_tree->horizontal[1].skip_txfm[0] = x->skip_txfm[0];
3234 pc_tree->horizontal[1].skip = x->skip;
3236 if (this_rdc.rate == INT_MAX) {
3237 vp9_rd_cost_reset(&sum_rdc);
3239 int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
3240 this_rdc.rate += cpi->partition_cost[pl][PARTITION_HORZ];
3241 sum_rdc.rate += this_rdc.rate;
3242 sum_rdc.dist += this_rdc.dist;
3243 sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
3244 sum_rdc.rate, sum_rdc.dist);
3248 if (sum_rdc.rdcost < best_rdc.rdcost) {
3250 pc_tree->partitioning = PARTITION_HORZ;
3252 pred_pixel_ready_reset(pc_tree, bsize);
3257 if (partition_vert_allowed && do_rect) {
3258 subsize = get_subsize(bsize, PARTITION_VERT);
3259 if (sf->adaptive_motion_search)
3260 load_pred_mv(x, ctx);
3261 pc_tree->vertical[0].pred_pixel_ready = 1;
3262 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize,
3263 &pc_tree->vertical[0]);
3264 pc_tree->vertical[0].mic.mbmi = xd->mi[0]->mbmi;
3265 pc_tree->vertical[0].skip_txfm[0] = x->skip_txfm[0];
3266 pc_tree->vertical[0].skip = x->skip;
3268 if (sum_rdc.rdcost < best_rdc.rdcost && mi_col + ms < cm->mi_cols) {
3269 load_pred_mv(x, ctx);
3270 pc_tree->vertical[1].pred_pixel_ready = 1;
3271 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + ms,
3273 &pc_tree->vertical[1]);
3274 pc_tree->vertical[1].mic.mbmi = xd->mi[0]->mbmi;
3275 pc_tree->vertical[1].skip_txfm[0] = x->skip_txfm[0];
3276 pc_tree->vertical[1].skip = x->skip;
3278 if (this_rdc.rate == INT_MAX) {
3279 vp9_rd_cost_reset(&sum_rdc);
3281 int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
3282 sum_rdc.rate += cpi->partition_cost[pl][PARTITION_VERT];
3283 sum_rdc.rate += this_rdc.rate;
3284 sum_rdc.dist += this_rdc.dist;
3285 sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
3286 sum_rdc.rate, sum_rdc.dist);
3290 if (sum_rdc.rdcost < best_rdc.rdcost) {
3292 pc_tree->partitioning = PARTITION_VERT;
3294 pred_pixel_ready_reset(pc_tree, bsize);
3298 *rd_cost = best_rdc;
3300 if (best_rdc.rate == INT_MAX) {
3301 vp9_rd_cost_reset(rd_cost);
3305 // update mode info array
3306 fill_mode_info_sb(cm, x, mi_row, mi_col, bsize, pc_tree);
3308 if (best_rdc.rate < INT_MAX && best_rdc.dist < INT64_MAX && do_recon) {
3309 int output_enabled = (bsize == BLOCK_64X64);
3310 encode_sb_rt(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled,
3314 if (bsize == BLOCK_64X64 && do_recon) {
3315 assert(tp_orig < *tp);
3316 assert(best_rdc.rate < INT_MAX);
3317 assert(best_rdc.dist < INT64_MAX);
3319 assert(tp_orig == *tp);
3323 static void nonrd_select_partition(VP9_COMP *cpi,
3325 TileDataEnc *tile_data,
3328 int mi_row, int mi_col,
3329 BLOCK_SIZE bsize, int output_enabled,
3330 RD_COST *rd_cost, PC_TREE *pc_tree) {
3331 VP9_COMMON *const cm = &cpi->common;
3332 TileInfo *const tile_info = &tile_data->tile_info;
3333 MACROBLOCK *const x = &td->mb;
3334 MACROBLOCKD *const xd = &x->e_mbd;
3335 const int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4;
3336 const int mis = cm->mi_stride;
3337 PARTITION_TYPE partition;
3341 vp9_rd_cost_reset(&this_rdc);
3342 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
3345 subsize = (bsize >= BLOCK_8X8) ? mi[0]->mbmi.sb_type : BLOCK_4X4;
3346 partition = partition_lookup[bsl][subsize];
3348 if (bsize == BLOCK_32X32 && partition != PARTITION_NONE &&
3349 subsize >= BLOCK_16X16) {
3350 x->max_partition_size = BLOCK_32X32;
3351 x->min_partition_size = BLOCK_8X8;
3352 nonrd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, bsize,
3353 rd_cost, 0, INT64_MAX, pc_tree);
3354 } else if (bsize == BLOCK_16X16 && partition != PARTITION_NONE) {
3355 x->max_partition_size = BLOCK_16X16;
3356 x->min_partition_size = BLOCK_8X8;
3357 nonrd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, bsize,
3358 rd_cost, 0, INT64_MAX, pc_tree);
3360 switch (partition) {
3361 case PARTITION_NONE:
3362 pc_tree->none.pred_pixel_ready = 1;
3363 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, rd_cost,
3364 subsize, &pc_tree->none);
3365 pc_tree->none.mic.mbmi = xd->mi[0]->mbmi;
3366 pc_tree->none.skip_txfm[0] = x->skip_txfm[0];
3367 pc_tree->none.skip = x->skip;
3369 case PARTITION_VERT:
3370 pc_tree->vertical[0].pred_pixel_ready = 1;
3371 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, rd_cost,
3372 subsize, &pc_tree->vertical[0]);
3373 pc_tree->vertical[0].mic.mbmi = xd->mi[0]->mbmi;
3374 pc_tree->vertical[0].skip_txfm[0] = x->skip_txfm[0];
3375 pc_tree->vertical[0].skip = x->skip;
3376 if (mi_col + hbs < cm->mi_cols) {
3377 pc_tree->vertical[1].pred_pixel_ready = 1;
3378 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + hbs,
3379 &this_rdc, subsize, &pc_tree->vertical[1]);
3380 pc_tree->vertical[1].mic.mbmi = xd->mi[0]->mbmi;
3381 pc_tree->vertical[1].skip_txfm[0] = x->skip_txfm[0];
3382 pc_tree->vertical[1].skip = x->skip;
3383 if (this_rdc.rate != INT_MAX && this_rdc.dist != INT64_MAX &&
3384 rd_cost->rate != INT_MAX && rd_cost->dist != INT64_MAX) {
3385 rd_cost->rate += this_rdc.rate;
3386 rd_cost->dist += this_rdc.dist;
3390 case PARTITION_HORZ:
3391 pc_tree->horizontal[0].pred_pixel_ready = 1;
3392 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, rd_cost,
3393 subsize, &pc_tree->horizontal[0]);
3394 pc_tree->horizontal[0].mic.mbmi = xd->mi[0]->mbmi;
3395 pc_tree->horizontal[0].skip_txfm[0] = x->skip_txfm[0];
3396 pc_tree->horizontal[0].skip = x->skip;
3397 if (mi_row + hbs < cm->mi_rows) {
3398 pc_tree->horizontal[1].pred_pixel_ready = 1;
3399 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row + hbs, mi_col,
3400 &this_rdc, subsize, &pc_tree->horizontal[1]);
3401 pc_tree->horizontal[1].mic.mbmi = xd->mi[0]->mbmi;
3402 pc_tree->horizontal[1].skip_txfm[0] = x->skip_txfm[0];
3403 pc_tree->horizontal[1].skip = x->skip;
3404 if (this_rdc.rate != INT_MAX && this_rdc.dist != INT64_MAX &&
3405 rd_cost->rate != INT_MAX && rd_cost->dist != INT64_MAX) {
3406 rd_cost->rate += this_rdc.rate;
3407 rd_cost->dist += this_rdc.dist;
3411 case PARTITION_SPLIT:
3412 subsize = get_subsize(bsize, PARTITION_SPLIT);
3413 nonrd_select_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
3414 subsize, output_enabled, rd_cost,
3416 nonrd_select_partition(cpi, td, tile_data, mi + hbs, tp,
3417 mi_row, mi_col + hbs, subsize, output_enabled,
3418 &this_rdc, pc_tree->split[1]);
3419 if (this_rdc.rate != INT_MAX && this_rdc.dist != INT64_MAX &&
3420 rd_cost->rate != INT_MAX && rd_cost->dist != INT64_MAX) {
3421 rd_cost->rate += this_rdc.rate;
3422 rd_cost->dist += this_rdc.dist;
3424 nonrd_select_partition(cpi, td, tile_data, mi + hbs * mis, tp,
3425 mi_row + hbs, mi_col, subsize, output_enabled,
3426 &this_rdc, pc_tree->split[2]);
3427 if (this_rdc.rate != INT_MAX && this_rdc.dist != INT64_MAX &&
3428 rd_cost->rate != INT_MAX && rd_cost->dist != INT64_MAX) {
3429 rd_cost->rate += this_rdc.rate;
3430 rd_cost->dist += this_rdc.dist;
3432 nonrd_select_partition(cpi, td, tile_data, mi + hbs * mis + hbs, tp,
3433 mi_row + hbs, mi_col + hbs, subsize,
3434 output_enabled, &this_rdc, pc_tree->split[3]);
3435 if (this_rdc.rate != INT_MAX && this_rdc.dist != INT64_MAX &&
3436 rd_cost->rate != INT_MAX && rd_cost->dist != INT64_MAX) {
3437 rd_cost->rate += this_rdc.rate;
3438 rd_cost->dist += this_rdc.dist;
3442 assert(0 && "Invalid partition type.");
3447 if (bsize == BLOCK_64X64 && output_enabled)
3448 encode_sb_rt(cpi, td, tile_info, tp, mi_row, mi_col, 1, bsize, pc_tree);
3452 static void nonrd_use_partition(VP9_COMP *cpi,
3454 TileDataEnc *tile_data,
3457 int mi_row, int mi_col,
3458 BLOCK_SIZE bsize, int output_enabled,
3459 RD_COST *dummy_cost, PC_TREE *pc_tree) {
3460 VP9_COMMON *const cm = &cpi->common;
3461 TileInfo *tile_info = &tile_data->tile_info;
3462 MACROBLOCK *const x = &td->mb;
3463 MACROBLOCKD *const xd = &x->e_mbd;
3464 const int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4;
3465 const int mis = cm->mi_stride;
3466 PARTITION_TYPE partition;
3469 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
3472 subsize = (bsize >= BLOCK_8X8) ? mi[0]->mbmi.sb_type : BLOCK_4X4;
3473 partition = partition_lookup[bsl][subsize];
3475 if (output_enabled && bsize != BLOCK_4X4) {
3476 int ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
3477 td->counts->partition[ctx][partition]++;
3480 switch (partition) {
3481 case PARTITION_NONE:
3482 pc_tree->none.pred_pixel_ready = 1;
3483 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, dummy_cost,
3484 subsize, &pc_tree->none);
3485 pc_tree->none.mic.mbmi = xd->mi[0]->mbmi;
3486 pc_tree->none.skip_txfm[0] = x->skip_txfm[0];
3487 pc_tree->none.skip = x->skip;
3488 encode_b_rt(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled,
3489 subsize, &pc_tree->none);
3491 case PARTITION_VERT:
3492 pc_tree->vertical[0].pred_pixel_ready = 1;
3493 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, dummy_cost,
3494 subsize, &pc_tree->vertical[0]);
3495 pc_tree->vertical[0].mic.mbmi = xd->mi[0]->mbmi;
3496 pc_tree->vertical[0].skip_txfm[0] = x->skip_txfm[0];
3497 pc_tree->vertical[0].skip = x->skip;
3498 encode_b_rt(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled,
3499 subsize, &pc_tree->vertical[0]);
3500 if (mi_col + hbs < cm->mi_cols && bsize > BLOCK_8X8) {
3501 pc_tree->vertical[1].pred_pixel_ready = 1;
3502 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + hbs,
3503 dummy_cost, subsize, &pc_tree->vertical[1]);
3504 pc_tree->vertical[1].mic.mbmi = xd->mi[0]->mbmi;
3505 pc_tree->vertical[1].skip_txfm[0] = x->skip_txfm[0];
3506 pc_tree->vertical[1].skip = x->skip;
3507 encode_b_rt(cpi, td, tile_info, tp, mi_row, mi_col + hbs,
3508 output_enabled, subsize, &pc_tree->vertical[1]);
3511 case PARTITION_HORZ:
3512 pc_tree->horizontal[0].pred_pixel_ready = 1;
3513 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, dummy_cost,
3514 subsize, &pc_tree->horizontal[0]);
3515 pc_tree->horizontal[0].mic.mbmi = xd->mi[0]->mbmi;
3516 pc_tree->horizontal[0].skip_txfm[0] = x->skip_txfm[0];
3517 pc_tree->horizontal[0].skip = x->skip;
3518 encode_b_rt(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled,
3519 subsize, &pc_tree->horizontal[0]);
3521 if (mi_row + hbs < cm->mi_rows && bsize > BLOCK_8X8) {
3522 pc_tree->horizontal[1].pred_pixel_ready = 1;
3523 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row + hbs, mi_col,
3524 dummy_cost, subsize, &pc_tree->horizontal[1]);
3525 pc_tree->horizontal[1].mic.mbmi = xd->mi[0]->mbmi;
3526 pc_tree->horizontal[1].skip_txfm[0] = x->skip_txfm[0];
3527 pc_tree->horizontal[1].skip = x->skip;
3528 encode_b_rt(cpi, td, tile_info, tp, mi_row + hbs, mi_col,
3529 output_enabled, subsize, &pc_tree->horizontal[1]);
3532 case PARTITION_SPLIT:
3533 subsize = get_subsize(bsize, PARTITION_SPLIT);
3534 if (bsize == BLOCK_8X8) {
3535 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, dummy_cost,
3536 subsize, pc_tree->leaf_split[0]);
3537 encode_b_rt(cpi, td, tile_info, tp, mi_row, mi_col,
3538 output_enabled, subsize, pc_tree->leaf_split[0]);
3540 nonrd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
3541 subsize, output_enabled, dummy_cost,
3543 nonrd_use_partition(cpi, td, tile_data, mi + hbs, tp,
3544 mi_row, mi_col + hbs, subsize, output_enabled,
3545 dummy_cost, pc_tree->split[1]);
3546 nonrd_use_partition(cpi, td, tile_data, mi + hbs * mis, tp,
3547 mi_row + hbs, mi_col, subsize, output_enabled,
3548 dummy_cost, pc_tree->split[2]);
3549 nonrd_use_partition(cpi, td, tile_data, mi + hbs * mis + hbs, tp,
3550 mi_row + hbs, mi_col + hbs, subsize, output_enabled,
3551 dummy_cost, pc_tree->split[3]);
3555 assert(0 && "Invalid partition type.");
3559 if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8)
3560 update_partition_context(xd, mi_row, mi_col, subsize, bsize);
3563 static void encode_nonrd_sb_row(VP9_COMP *cpi,
3565 TileDataEnc *tile_data,
3568 SPEED_FEATURES *const sf = &cpi->sf;
3569 VP9_COMMON *const cm = &cpi->common;
3570 TileInfo *const tile_info = &tile_data->tile_info;
3571 MACROBLOCK *const x = &td->mb;
3572 MACROBLOCKD *const xd = &x->e_mbd;
3575 // Initialize the left context for the new SB row
3576 memset(&xd->left_context, 0, sizeof(xd->left_context));
3577 memset(xd->left_seg_context, 0, sizeof(xd->left_seg_context));
3579 // Code each SB in the row
3580 for (mi_col = tile_info->mi_col_start; mi_col < tile_info->mi_col_end;
3581 mi_col += MI_BLOCK_SIZE) {
3582 const struct segmentation *const seg = &cm->seg;
3584 const int idx_str = cm->mi_stride * mi_row + mi_col;
3585 MODE_INFO **mi = cm->mi_grid_visible + idx_str;
3586 PARTITION_SEARCH_TYPE partition_search_type = sf->partition_search_type;
3587 BLOCK_SIZE bsize = BLOCK_64X64;
3589 x->source_variance = UINT_MAX;
3590 vp9_zero(x->pred_mv);
3591 vp9_rd_cost_init(&dummy_rdc);
3592 x->color_sensitivity[0] = 0;
3593 x->color_sensitivity[1] = 0;
3596 const uint8_t *const map = seg->update_map ? cpi->segmentation_map
3597 : cm->last_frame_seg_map;
3598 int segment_id = vp9_get_segment_id(cm, map, BLOCK_64X64, mi_row, mi_col);
3599 seg_skip = vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP);
3601 partition_search_type = FIXED_PARTITION;
3605 // Set the partition type of the 64X64 block
3606 switch (partition_search_type) {
3607 case VAR_BASED_PARTITION:
3608 // TODO(jingning, marpan): The mode decision and encoding process
3609 // support both intra and inter sub8x8 block coding for RTC mode.
3610 // Tune the thresholds accordingly to use sub8x8 block coding for
3611 // coding performance improvement.
3612 choose_partitioning(cpi, tile_info, x, mi_row, mi_col);
3613 nonrd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
3614 BLOCK_64X64, 1, &dummy_rdc, td->pc_root);
3616 case SOURCE_VAR_BASED_PARTITION:
3617 set_source_var_based_partition(cpi, tile_info, x, mi, mi_row, mi_col);
3618 nonrd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
3619 BLOCK_64X64, 1, &dummy_rdc, td->pc_root);
3621 case FIXED_PARTITION:
3623 bsize = sf->always_this_block_size;
3624 set_fixed_partitioning(cpi, tile_info, mi, mi_row, mi_col, bsize);
3625 nonrd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
3626 BLOCK_64X64, 1, &dummy_rdc, td->pc_root);
3628 case REFERENCE_PARTITION:
3629 set_offsets(cpi, tile_info, x, mi_row, mi_col, BLOCK_64X64);
3630 if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled &&
3631 xd->mi[0]->mbmi.segment_id) {
3632 x->max_partition_size = BLOCK_64X64;
3633 x->min_partition_size = BLOCK_8X8;
3634 nonrd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col,
3635 BLOCK_64X64, &dummy_rdc, 1,
3636 INT64_MAX, td->pc_root);
3638 choose_partitioning(cpi, tile_info, x, mi_row, mi_col);
3639 nonrd_select_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
3640 BLOCK_64X64, 1, &dummy_rdc, td->pc_root);
3650 // end RTC play code
3652 static int set_var_thresh_from_histogram(VP9_COMP *cpi) {
3653 const SPEED_FEATURES *const sf = &cpi->sf;
3654 const VP9_COMMON *const cm = &cpi->common;
3656 const uint8_t *src = cpi->Source->y_buffer;
3657 const uint8_t *last_src = cpi->Last_Source->y_buffer;
3658 const int src_stride = cpi->Source->y_stride;
3659 const int last_stride = cpi->Last_Source->y_stride;
3661 // Pick cutoff threshold
3662 const int cutoff = (MIN(cm->width, cm->height) >= 720) ?
3663 (cm->MBs * VAR_HIST_LARGE_CUT_OFF / 100) :
3664 (cm->MBs * VAR_HIST_SMALL_CUT_OFF / 100);
3665 DECLARE_ALIGNED(16, int, hist[VAR_HIST_BINS]);
3666 diff *var16 = cpi->source_diff_var;
3671 memset(hist, 0, VAR_HIST_BINS * sizeof(hist[0]));
3673 for (i = 0; i < cm->mb_rows; i++) {
3674 for (j = 0; j < cm->mb_cols; j++) {
3675 #if CONFIG_VP9_HIGHBITDEPTH
3676 if (cm->use_highbitdepth) {
3677 switch (cm->bit_depth) {
3679 vpx_highbd_8_get16x16var(src, src_stride, last_src, last_stride,
3680 &var16->sse, &var16->sum);
3683 vpx_highbd_10_get16x16var(src, src_stride, last_src, last_stride,
3684 &var16->sse, &var16->sum);
3687 vpx_highbd_12_get16x16var(src, src_stride, last_src, last_stride,
3688 &var16->sse, &var16->sum);
3691 assert(0 && "cm->bit_depth should be VPX_BITS_8, VPX_BITS_10"
3696 vpx_get16x16var(src, src_stride, last_src, last_stride,
3697 &var16->sse, &var16->sum);
3700 vpx_get16x16var(src, src_stride, last_src, last_stride,
3701 &var16->sse, &var16->sum);
3702 #endif // CONFIG_VP9_HIGHBITDEPTH
3703 var16->var = var16->sse -
3704 (((uint32_t)var16->sum * var16->sum) >> 8);
3706 if (var16->var >= VAR_HIST_MAX_BG_VAR)
3707 hist[VAR_HIST_BINS - 1]++;
3709 hist[var16->var / VAR_HIST_FACTOR]++;
3716 src = src - cm->mb_cols * 16 + 16 * src_stride;
3717 last_src = last_src - cm->mb_cols * 16 + 16 * last_stride;
3720 cpi->source_var_thresh = 0;
3722 if (hist[VAR_HIST_BINS - 1] < cutoff) {
3723 for (i = 0; i < VAR_HIST_BINS - 1; i++) {
3727 cpi->source_var_thresh = (i + 1) * VAR_HIST_FACTOR;
3733 return sf->search_type_check_frequency;
3736 static void source_var_based_partition_search_method(VP9_COMP *cpi) {
3737 VP9_COMMON *const cm = &cpi->common;
3738 SPEED_FEATURES *const sf = &cpi->sf;
3740 if (cm->frame_type == KEY_FRAME) {
3741 // For key frame, use SEARCH_PARTITION.
3742 sf->partition_search_type = SEARCH_PARTITION;
3743 } else if (cm->intra_only) {
3744 sf->partition_search_type = FIXED_PARTITION;
3746 if (cm->last_width != cm->width || cm->last_height != cm->height) {
3747 if (cpi->source_diff_var)
3748 vpx_free(cpi->source_diff_var);
3750 CHECK_MEM_ERROR(cm, cpi->source_diff_var,
3751 vpx_calloc(cm->MBs, sizeof(diff)));
3754 if (!cpi->frames_till_next_var_check)
3755 cpi->frames_till_next_var_check = set_var_thresh_from_histogram(cpi);
3757 if (cpi->frames_till_next_var_check > 0) {
3758 sf->partition_search_type = FIXED_PARTITION;
3759 cpi->frames_till_next_var_check--;
3764 static int get_skip_encode_frame(const VP9_COMMON *cm, ThreadData *const td) {
3765 unsigned int intra_count = 0, inter_count = 0;
3768 for (j = 0; j < INTRA_INTER_CONTEXTS; ++j) {
3769 intra_count += td->counts->intra_inter[j][0];
3770 inter_count += td->counts->intra_inter[j][1];
3773 return (intra_count << 2) < inter_count &&
3774 cm->frame_type != KEY_FRAME &&
3778 void vp9_init_tile_data(VP9_COMP *cpi) {
3779 VP9_COMMON *const cm = &cpi->common;
3780 const int tile_cols = 1 << cm->log2_tile_cols;
3781 const int tile_rows = 1 << cm->log2_tile_rows;
3782 int tile_col, tile_row;
3783 TOKENEXTRA *pre_tok = cpi->tile_tok[0][0];
3786 if (cpi->tile_data == NULL) {
3787 CHECK_MEM_ERROR(cm, cpi->tile_data,
3788 vpx_malloc(tile_cols * tile_rows * sizeof(*cpi->tile_data)));
3789 for (tile_row = 0; tile_row < tile_rows; ++tile_row)
3790 for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
3791 TileDataEnc *tile_data =
3792 &cpi->tile_data[tile_row * tile_cols + tile_col];
3794 for (i = 0; i < BLOCK_SIZES; ++i) {
3795 for (j = 0; j < MAX_MODES; ++j) {
3796 tile_data->thresh_freq_fact[i][j] = 32;
3797 tile_data->mode_map[i][j] = j;
3803 for (tile_row = 0; tile_row < tile_rows; ++tile_row) {
3804 for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
3805 TileInfo *tile_info =
3806 &cpi->tile_data[tile_row * tile_cols + tile_col].tile_info;
3807 vp9_tile_init(tile_info, cm, tile_row, tile_col);
3809 cpi->tile_tok[tile_row][tile_col] = pre_tok + tile_tok;
3810 pre_tok = cpi->tile_tok[tile_row][tile_col];
3811 tile_tok = allocated_tokens(*tile_info);
3816 void vp9_encode_tile(VP9_COMP *cpi, ThreadData *td,
3817 int tile_row, int tile_col) {
3818 VP9_COMMON *const cm = &cpi->common;
3819 const int tile_cols = 1 << cm->log2_tile_cols;
3820 TileDataEnc *this_tile =
3821 &cpi->tile_data[tile_row * tile_cols + tile_col];
3822 const TileInfo * const tile_info = &this_tile->tile_info;
3823 TOKENEXTRA *tok = cpi->tile_tok[tile_row][tile_col];
3826 for (mi_row = tile_info->mi_row_start; mi_row < tile_info->mi_row_end;
3827 mi_row += MI_BLOCK_SIZE) {
3828 if (cpi->sf.use_nonrd_pick_mode)
3829 encode_nonrd_sb_row(cpi, td, this_tile, mi_row, &tok);
3831 encode_rd_sb_row(cpi, td, this_tile, mi_row, &tok);
3833 cpi->tok_count[tile_row][tile_col] =
3834 (unsigned int)(tok - cpi->tile_tok[tile_row][tile_col]);
3835 assert(tok - cpi->tile_tok[tile_row][tile_col] <=
3836 allocated_tokens(*tile_info));
3839 static void encode_tiles(VP9_COMP *cpi) {
3840 VP9_COMMON *const cm = &cpi->common;
3841 const int tile_cols = 1 << cm->log2_tile_cols;
3842 const int tile_rows = 1 << cm->log2_tile_rows;
3843 int tile_col, tile_row;
3845 vp9_init_tile_data(cpi);
3847 for (tile_row = 0; tile_row < tile_rows; ++tile_row)
3848 for (tile_col = 0; tile_col < tile_cols; ++tile_col)
3849 vp9_encode_tile(cpi, &cpi->td, tile_row, tile_col);
3852 #if CONFIG_FP_MB_STATS
3853 static int input_fpmb_stats(FIRSTPASS_MB_STATS *firstpass_mb_stats,
3854 VP9_COMMON *cm, uint8_t **this_frame_mb_stats) {
3855 uint8_t *mb_stats_in = firstpass_mb_stats->mb_stats_start +
3856 cm->current_video_frame * cm->MBs * sizeof(uint8_t);
3858 if (mb_stats_in > firstpass_mb_stats->mb_stats_end)
3861 *this_frame_mb_stats = mb_stats_in;
3867 static void encode_frame_internal(VP9_COMP *cpi) {
3868 SPEED_FEATURES *const sf = &cpi->sf;
3869 RD_OPT *const rd_opt = &cpi->rd;
3870 ThreadData *const td = &cpi->td;
3871 MACROBLOCK *const x = &td->mb;
3872 VP9_COMMON *const cm = &cpi->common;
3873 MACROBLOCKD *const xd = &x->e_mbd;
3874 RD_COUNTS *const rdc = &cpi->td.rd_counts;
3876 xd->mi = cm->mi_grid_visible;
3879 vp9_zero(*td->counts);
3880 vp9_zero(rdc->coef_counts);
3881 vp9_zero(rdc->comp_pred_diff);
3882 vp9_zero(rdc->filter_diff);
3883 vp9_zero(rdc->tx_select_diff);
3884 vp9_zero(rd_opt->tx_select_threshes);
3886 xd->lossless = cm->base_qindex == 0 &&
3887 cm->y_dc_delta_q == 0 &&
3888 cm->uv_dc_delta_q == 0 &&
3889 cm->uv_ac_delta_q == 0;
3891 #if CONFIG_VP9_HIGHBITDEPTH
3892 if (cm->use_highbitdepth)
3893 x->fwd_txm4x4 = xd->lossless ? vp9_highbd_fwht4x4 : vp9_highbd_fdct4x4;
3895 x->fwd_txm4x4 = xd->lossless ? vp9_fwht4x4 : vp9_fdct4x4;
3896 x->highbd_itxm_add = xd->lossless ? vp9_highbd_iwht4x4_add :
3897 vp9_highbd_idct4x4_add;
3899 x->fwd_txm4x4 = xd->lossless ? vp9_fwht4x4 : vp9_fdct4x4;
3900 #endif // CONFIG_VP9_HIGHBITDEPTH
3901 x->itxm_add = xd->lossless ? vp9_iwht4x4_add : vp9_idct4x4_add;
3906 cm->tx_mode = select_tx_mode(cpi, xd);
3908 vp9_frame_init_quantizer(cpi);
3910 vp9_initialize_rd_consts(cpi);
3911 vp9_initialize_me_consts(cpi, x, cm->base_qindex);
3912 init_encode_frame_mb_context(cpi);
3913 cm->use_prev_frame_mvs = !cm->error_resilient_mode &&
3914 cm->width == cm->last_width &&
3915 cm->height == cm->last_height &&
3917 cm->last_show_frame;
3918 // Special case: set prev_mi to NULL when the previous mode info
3919 // context cannot be used.
3920 cm->prev_mi = cm->use_prev_frame_mvs ?
3921 cm->prev_mip + cm->mi_stride + 1 : NULL;
3923 x->quant_fp = cpi->sf.use_quant_fp;
3924 vp9_zero(x->skip_txfm);
3925 if (sf->use_nonrd_pick_mode) {
3926 // Initialize internal buffer pointers for rtc coding, where non-RD
3927 // mode decision is used and hence no buffer pointer swap needed.
3929 struct macroblock_plane *const p = x->plane;
3930 struct macroblockd_plane *const pd = xd->plane;
3931 PICK_MODE_CONTEXT *ctx = &cpi->td.pc_root->none;
3933 for (i = 0; i < MAX_MB_PLANE; ++i) {
3934 p[i].coeff = ctx->coeff_pbuf[i][0];
3935 p[i].qcoeff = ctx->qcoeff_pbuf[i][0];
3936 pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][0];
3937 p[i].eobs = ctx->eobs_pbuf[i][0];
3939 vp9_zero(x->zcoeff_blk);
3941 if (cm->frame_type != KEY_FRAME && cpi->rc.frames_since_golden == 0)
3942 cpi->ref_frame_flags &= (~VP9_GOLD_FLAG);
3944 if (sf->partition_search_type == SOURCE_VAR_BASED_PARTITION)
3945 source_var_based_partition_search_method(cpi);
3949 struct vpx_usec_timer emr_timer;
3950 vpx_usec_timer_start(&emr_timer);
3952 #if CONFIG_FP_MB_STATS
3953 if (cpi->use_fp_mb_stats) {
3954 input_fpmb_stats(&cpi->twopass.firstpass_mb_stats, cm,
3955 &cpi->twopass.this_frame_mb_stats);
3959 // If allowed, encoding tiles in parallel with one thread handling one tile.
3960 if (MIN(cpi->oxcf.max_threads, 1 << cm->log2_tile_cols) > 1)
3961 vp9_encode_tiles_mt(cpi);
3965 vpx_usec_timer_mark(&emr_timer);
3966 cpi->time_encode_sb_row += vpx_usec_timer_elapsed(&emr_timer);
3969 sf->skip_encode_frame = sf->skip_encode_sb ?
3970 get_skip_encode_frame(cm, td) : 0;
3973 // Keep record of the total distortion this time around for future use
3974 cpi->last_frame_distortion = cpi->frame_distortion;
3978 static INTERP_FILTER get_interp_filter(
3979 const int64_t threshes[SWITCHABLE_FILTER_CONTEXTS], int is_alt_ref) {
3981 threshes[EIGHTTAP_SMOOTH] > threshes[EIGHTTAP] &&
3982 threshes[EIGHTTAP_SMOOTH] > threshes[EIGHTTAP_SHARP] &&
3983 threshes[EIGHTTAP_SMOOTH] > threshes[SWITCHABLE - 1]) {
3984 return EIGHTTAP_SMOOTH;
3985 } else if (threshes[EIGHTTAP_SHARP] > threshes[EIGHTTAP] &&
3986 threshes[EIGHTTAP_SHARP] > threshes[SWITCHABLE - 1]) {
3987 return EIGHTTAP_SHARP;
3988 } else if (threshes[EIGHTTAP] > threshes[SWITCHABLE - 1]) {
3995 void vp9_encode_frame(VP9_COMP *cpi) {
3996 VP9_COMMON *const cm = &cpi->common;
3998 // In the longer term the encoder should be generalized to match the
3999 // decoder such that we allow compound where one of the 3 buffers has a
4000 // different sign bias and that buffer is then the fixed ref. However, this
4001 // requires further work in the rd loop. For now the only supported encoder
4002 // side behavior is where the ALT ref buffer has opposite sign bias to
4004 if (!frame_is_intra_only(cm)) {
4005 if ((cm->ref_frame_sign_bias[ALTREF_FRAME] ==
4006 cm->ref_frame_sign_bias[GOLDEN_FRAME]) ||
4007 (cm->ref_frame_sign_bias[ALTREF_FRAME] ==
4008 cm->ref_frame_sign_bias[LAST_FRAME])) {
4009 cpi->allow_comp_inter_inter = 0;
4011 cpi->allow_comp_inter_inter = 1;
4012 cm->comp_fixed_ref = ALTREF_FRAME;
4013 cm->comp_var_ref[0] = LAST_FRAME;
4014 cm->comp_var_ref[1] = GOLDEN_FRAME;
4018 if (cpi->sf.frame_parameter_update) {
4020 RD_OPT *const rd_opt = &cpi->rd;
4021 FRAME_COUNTS *counts = cpi->td.counts;
4022 RD_COUNTS *const rdc = &cpi->td.rd_counts;
4024 // This code does a single RD pass over the whole frame assuming
4025 // either compound, single or hybrid prediction as per whatever has
4026 // worked best for that type of frame in the past.
4027 // It also predicts whether another coding mode would have worked
4028 // better that this coding mode. If that is the case, it remembers
4029 // that for subsequent frames.
4030 // It does the same analysis for transform size selection also.
4031 const MV_REFERENCE_FRAME frame_type = get_frame_type(cpi);
4032 int64_t *const mode_thrs = rd_opt->prediction_type_threshes[frame_type];
4033 int64_t *const filter_thrs = rd_opt->filter_threshes[frame_type];
4034 int *const tx_thrs = rd_opt->tx_select_threshes[frame_type];
4035 const int is_alt_ref = frame_type == ALTREF_FRAME;
4037 /* prediction (compound, single or hybrid) mode selection */
4038 if (is_alt_ref || !cpi->allow_comp_inter_inter)
4039 cm->reference_mode = SINGLE_REFERENCE;
4040 else if (mode_thrs[COMPOUND_REFERENCE] > mode_thrs[SINGLE_REFERENCE] &&
4041 mode_thrs[COMPOUND_REFERENCE] >
4042 mode_thrs[REFERENCE_MODE_SELECT] &&
4043 check_dual_ref_flags(cpi) &&
4044 cpi->static_mb_pct == 100)
4045 cm->reference_mode = COMPOUND_REFERENCE;
4046 else if (mode_thrs[SINGLE_REFERENCE] > mode_thrs[REFERENCE_MODE_SELECT])
4047 cm->reference_mode = SINGLE_REFERENCE;
4049 cm->reference_mode = REFERENCE_MODE_SELECT;
4051 if (cm->interp_filter == SWITCHABLE)
4052 cm->interp_filter = get_interp_filter(filter_thrs, is_alt_ref);
4054 encode_frame_internal(cpi);
4056 for (i = 0; i < REFERENCE_MODES; ++i)
4057 mode_thrs[i] = (mode_thrs[i] + rdc->comp_pred_diff[i] / cm->MBs) / 2;
4059 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
4060 filter_thrs[i] = (filter_thrs[i] + rdc->filter_diff[i] / cm->MBs) / 2;
4062 for (i = 0; i < TX_MODES; ++i) {
4063 int64_t pd = rdc->tx_select_diff[i];
4064 if (i == TX_MODE_SELECT)
4065 pd -= RDCOST(cpi->td.mb.rdmult, cpi->td.mb.rddiv, 2048 * (TX_SIZES - 1),
4067 tx_thrs[i] = (tx_thrs[i] + (int)(pd / cm->MBs)) / 2;
4070 if (cm->reference_mode == REFERENCE_MODE_SELECT) {
4071 int single_count_zero = 0;
4072 int comp_count_zero = 0;
4074 for (i = 0; i < COMP_INTER_CONTEXTS; i++) {
4075 single_count_zero += counts->comp_inter[i][0];
4076 comp_count_zero += counts->comp_inter[i][1];
4079 if (comp_count_zero == 0) {
4080 cm->reference_mode = SINGLE_REFERENCE;
4081 vp9_zero(counts->comp_inter);
4082 } else if (single_count_zero == 0) {
4083 cm->reference_mode = COMPOUND_REFERENCE;
4084 vp9_zero(counts->comp_inter);
4088 if (cm->tx_mode == TX_MODE_SELECT) {
4090 int count8x8_lp = 0, count8x8_8x8p = 0;
4091 int count16x16_16x16p = 0, count16x16_lp = 0;
4094 for (i = 0; i < TX_SIZE_CONTEXTS; ++i) {
4095 count4x4 += counts->tx.p32x32[i][TX_4X4];
4096 count4x4 += counts->tx.p16x16[i][TX_4X4];
4097 count4x4 += counts->tx.p8x8[i][TX_4X4];
4099 count8x8_lp += counts->tx.p32x32[i][TX_8X8];
4100 count8x8_lp += counts->tx.p16x16[i][TX_8X8];
4101 count8x8_8x8p += counts->tx.p8x8[i][TX_8X8];
4103 count16x16_16x16p += counts->tx.p16x16[i][TX_16X16];
4104 count16x16_lp += counts->tx.p32x32[i][TX_16X16];
4105 count32x32 += counts->tx.p32x32[i][TX_32X32];
4107 if (count4x4 == 0 && count16x16_lp == 0 && count16x16_16x16p == 0 &&
4109 cm->tx_mode = ALLOW_8X8;
4110 reset_skip_tx_size(cm, TX_8X8);
4111 } else if (count8x8_8x8p == 0 && count16x16_16x16p == 0 &&
4112 count8x8_lp == 0 && count16x16_lp == 0 && count32x32 == 0) {
4113 cm->tx_mode = ONLY_4X4;
4114 reset_skip_tx_size(cm, TX_4X4);
4115 } else if (count8x8_lp == 0 && count16x16_lp == 0 && count4x4 == 0) {
4116 cm->tx_mode = ALLOW_32X32;
4117 } else if (count32x32 == 0 && count8x8_lp == 0 && count4x4 == 0) {
4118 cm->tx_mode = ALLOW_16X16;
4119 reset_skip_tx_size(cm, TX_16X16);
4123 cm->reference_mode = SINGLE_REFERENCE;
4124 encode_frame_internal(cpi);
4128 static void sum_intra_stats(FRAME_COUNTS *counts, const MODE_INFO *mi) {
4129 const PREDICTION_MODE y_mode = mi->mbmi.mode;
4130 const PREDICTION_MODE uv_mode = mi->mbmi.uv_mode;
4131 const BLOCK_SIZE bsize = mi->mbmi.sb_type;
4133 if (bsize < BLOCK_8X8) {
4135 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
4136 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
4137 for (idy = 0; idy < 2; idy += num_4x4_h)
4138 for (idx = 0; idx < 2; idx += num_4x4_w)
4139 ++counts->y_mode[0][mi->bmi[idy * 2 + idx].as_mode];
4141 ++counts->y_mode[size_group_lookup[bsize]][y_mode];
4144 ++counts->uv_mode[y_mode][uv_mode];
4147 static void encode_superblock(VP9_COMP *cpi, ThreadData *td,
4148 TOKENEXTRA **t, int output_enabled,
4149 int mi_row, int mi_col, BLOCK_SIZE bsize,
4150 PICK_MODE_CONTEXT *ctx) {
4151 VP9_COMMON *const cm = &cpi->common;
4152 MACROBLOCK *const x = &td->mb;
4153 MACROBLOCKD *const xd = &x->e_mbd;
4154 MODE_INFO **mi_8x8 = xd->mi;
4155 MODE_INFO *mi = mi_8x8[0];
4156 MB_MODE_INFO *mbmi = &mi->mbmi;
4157 const int seg_skip = vp9_segfeature_active(&cm->seg, mbmi->segment_id,
4159 const int mis = cm->mi_stride;
4160 const int mi_width = num_8x8_blocks_wide_lookup[bsize];
4161 const int mi_height = num_8x8_blocks_high_lookup[bsize];
4163 x->skip_recode = !x->select_tx_size && mbmi->sb_type >= BLOCK_8X8 &&
4164 cpi->oxcf.aq_mode != COMPLEXITY_AQ &&
4165 cpi->oxcf.aq_mode != CYCLIC_REFRESH_AQ &&
4166 cpi->sf.allow_skip_recode;
4168 if (!x->skip_recode && !cpi->sf.use_nonrd_pick_mode)
4169 memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
4171 x->skip_optimize = ctx->is_coded;
4173 x->use_lp32x32fdct = cpi->sf.use_lp32x32fdct;
4174 x->skip_encode = (!output_enabled && cpi->sf.skip_encode_frame &&
4175 x->q_index < QIDX_SKIP_THRESH);
4180 set_ref_ptrs(cm, xd, mbmi->ref_frame[0], mbmi->ref_frame[1]);
4182 if (!is_inter_block(mbmi)) {
4185 for (plane = 0; plane < MAX_MB_PLANE; ++plane)
4186 vp9_encode_intra_block_plane(x, MAX(bsize, BLOCK_8X8), plane);
4188 sum_intra_stats(td->counts, mi);
4189 vp9_tokenize_sb(cpi, td, t, !output_enabled, MAX(bsize, BLOCK_8X8));
4192 const int is_compound = has_second_ref(mbmi);
4193 for (ref = 0; ref < 1 + is_compound; ++ref) {
4194 YV12_BUFFER_CONFIG *cfg = get_ref_frame_buffer(cpi,
4195 mbmi->ref_frame[ref]);
4196 assert(cfg != NULL);
4197 vp9_setup_pre_planes(xd, ref, cfg, mi_row, mi_col,
4198 &xd->block_refs[ref]->sf);
4200 if (!(cpi->sf.reuse_inter_pred_sby && ctx->pred_pixel_ready) || seg_skip)
4201 vp9_build_inter_predictors_sby(xd, mi_row, mi_col, MAX(bsize, BLOCK_8X8));
4203 vp9_build_inter_predictors_sbuv(xd, mi_row, mi_col, MAX(bsize, BLOCK_8X8));
4205 vp9_encode_sb(x, MAX(bsize, BLOCK_8X8));
4206 vp9_tokenize_sb(cpi, td, t, !output_enabled, MAX(bsize, BLOCK_8X8));
4209 if (output_enabled) {
4210 if (cm->tx_mode == TX_MODE_SELECT &&
4211 mbmi->sb_type >= BLOCK_8X8 &&
4212 !(is_inter_block(mbmi) && (mbmi->skip || seg_skip))) {
4213 ++get_tx_counts(max_txsize_lookup[bsize], vp9_get_tx_size_context(xd),
4214 &td->counts->tx)[mbmi->tx_size];
4218 // The new intra coding scheme requires no change of transform size
4219 if (is_inter_block(&mi->mbmi)) {
4220 tx_size = MIN(tx_mode_to_biggest_tx_size[cm->tx_mode],
4221 max_txsize_lookup[bsize]);
4223 tx_size = (bsize >= BLOCK_8X8) ? mbmi->tx_size : TX_4X4;
4226 for (y = 0; y < mi_height; y++)
4227 for (x = 0; x < mi_width; x++)
4228 if (mi_col + x < cm->mi_cols && mi_row + y < cm->mi_rows)
4229 mi_8x8[mis * y + x]->mbmi.tx_size = tx_size;
4231 ++td->counts->tx.tx_totals[mbmi->tx_size];
4232 ++td->counts->tx.tx_totals[get_uv_tx_size(mbmi, &xd->plane[1])];