2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
15 #include "./vp9_rtcd.h"
16 #include "./vpx_config.h"
18 #include "vpx_ports/vpx_timer.h"
20 #include "vp9/common/vp9_common.h"
21 #include "vp9/common/vp9_entropy.h"
22 #include "vp9/common/vp9_entropymode.h"
23 #include "vp9/common/vp9_idct.h"
24 #include "vp9/common/vp9_mvref_common.h"
25 #include "vp9/common/vp9_pred_common.h"
26 #include "vp9/common/vp9_quant_common.h"
27 #include "vp9/common/vp9_reconintra.h"
28 #include "vp9/common/vp9_reconinter.h"
29 #include "vp9/common/vp9_seg_common.h"
30 #include "vp9/common/vp9_systemdependent.h"
31 #include "vp9/common/vp9_tile_common.h"
33 #include "vp9/encoder/vp9_aq_complexity.h"
34 #include "vp9/encoder/vp9_aq_cyclicrefresh.h"
35 #include "vp9/encoder/vp9_aq_variance.h"
36 #include "vp9/encoder/vp9_encodeframe.h"
37 #include "vp9/encoder/vp9_encodemb.h"
38 #include "vp9/encoder/vp9_encodemv.h"
39 #include "vp9/encoder/vp9_ethread.h"
40 #include "vp9/encoder/vp9_extend.h"
41 #include "vp9/encoder/vp9_pickmode.h"
42 #include "vp9/encoder/vp9_rd.h"
43 #include "vp9/encoder/vp9_rdopt.h"
44 #include "vp9/encoder/vp9_segmentation.h"
45 #include "vp9/encoder/vp9_tokenize.h"
47 #define GF_ZEROMV_ZBIN_BOOST 0
48 #define LF_ZEROMV_ZBIN_BOOST 0
49 #define MV_ZBIN_BOOST 0
50 #define SPLIT_MV_ZBIN_BOOST 0
51 #define INTRA_ZBIN_BOOST 0
53 static void encode_superblock(VP9_COMP *cpi, ThreadData * td,
54 TOKENEXTRA **t, int output_enabled,
55 int mi_row, int mi_col, BLOCK_SIZE bsize,
56 PICK_MODE_CONTEXT *ctx);
58 // This is used as a reference when computing the source variance for the
59 // purposes of activity masking.
60 // Eventually this should be replaced by custom no-reference routines,
61 // which will be faster.
62 static const uint8_t VP9_VAR_OFFS[64] = {
63 128, 128, 128, 128, 128, 128, 128, 128,
64 128, 128, 128, 128, 128, 128, 128, 128,
65 128, 128, 128, 128, 128, 128, 128, 128,
66 128, 128, 128, 128, 128, 128, 128, 128,
67 128, 128, 128, 128, 128, 128, 128, 128,
68 128, 128, 128, 128, 128, 128, 128, 128,
69 128, 128, 128, 128, 128, 128, 128, 128,
70 128, 128, 128, 128, 128, 128, 128, 128
73 #if CONFIG_VP9_HIGHBITDEPTH
74 static const uint16_t VP9_HIGH_VAR_OFFS_8[64] = {
75 128, 128, 128, 128, 128, 128, 128, 128,
76 128, 128, 128, 128, 128, 128, 128, 128,
77 128, 128, 128, 128, 128, 128, 128, 128,
78 128, 128, 128, 128, 128, 128, 128, 128,
79 128, 128, 128, 128, 128, 128, 128, 128,
80 128, 128, 128, 128, 128, 128, 128, 128,
81 128, 128, 128, 128, 128, 128, 128, 128,
82 128, 128, 128, 128, 128, 128, 128, 128
85 static const uint16_t VP9_HIGH_VAR_OFFS_10[64] = {
86 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
87 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
88 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
89 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
90 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
91 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
92 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
93 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4
96 static const uint16_t VP9_HIGH_VAR_OFFS_12[64] = {
97 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
98 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
99 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
100 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
101 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
102 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
103 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
104 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16
106 #endif // CONFIG_VP9_HIGHBITDEPTH
108 static unsigned int get_sby_perpixel_variance(VP9_COMP *cpi,
109 const struct buf_2d *ref,
112 const unsigned int var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
113 VP9_VAR_OFFS, 0, &sse);
114 return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
117 #if CONFIG_VP9_HIGHBITDEPTH
118 static unsigned int high_get_sby_perpixel_variance(
119 VP9_COMP *cpi, const struct buf_2d *ref, BLOCK_SIZE bs, int bd) {
120 unsigned int var, sse;
123 var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
124 CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_10),
128 var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
129 CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_12),
134 var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
135 CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_8),
139 return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
141 #endif // CONFIG_VP9_HIGHBITDEPTH
143 static unsigned int get_sby_perpixel_diff_variance(VP9_COMP *cpi,
144 const struct buf_2d *ref,
145 int mi_row, int mi_col,
147 unsigned int sse, var;
149 const YV12_BUFFER_CONFIG *last = get_ref_frame_buffer(cpi, LAST_FRAME);
151 assert(last != NULL);
153 &last->y_buffer[mi_row * MI_SIZE * last->y_stride + mi_col * MI_SIZE];
154 var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride, last_y, last->y_stride, &sse);
155 return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
158 static BLOCK_SIZE get_rd_var_based_fixed_partition(VP9_COMP *cpi, MACROBLOCK *x,
161 unsigned int var = get_sby_perpixel_diff_variance(cpi, &x->plane[0].src,
174 static BLOCK_SIZE get_nonrd_var_based_fixed_partition(VP9_COMP *cpi,
178 unsigned int var = get_sby_perpixel_diff_variance(cpi, &x->plane[0].src,
189 // Lighter version of set_offsets that only sets the mode info
191 static INLINE void set_mode_info_offsets(VP9_COMMON *const cm,
192 MACROBLOCKD *const xd,
195 const int idx_str = xd->mi_stride * mi_row + mi_col;
196 xd->mi = cm->mi + idx_str;
197 xd->mi[0].src_mi = &xd->mi[0];
200 static void set_offsets(VP9_COMP *cpi, const TileInfo *const tile,
201 MACROBLOCK *const x, int mi_row, int mi_col,
203 VP9_COMMON *const cm = &cpi->common;
204 MACROBLOCKD *const xd = &x->e_mbd;
206 const int mi_width = num_8x8_blocks_wide_lookup[bsize];
207 const int mi_height = num_8x8_blocks_high_lookup[bsize];
208 const struct segmentation *const seg = &cm->seg;
210 set_skip_context(xd, mi_row, mi_col);
212 set_mode_info_offsets(cm, xd, mi_row, mi_col);
214 mbmi = &xd->mi[0].src_mi->mbmi;
216 // Set up destination pointers.
217 vp9_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
219 // Set up limit values for MV components.
220 // Mv beyond the range do not produce new/different prediction block.
221 x->mv_row_min = -(((mi_row + mi_height) * MI_SIZE) + VP9_INTERP_EXTEND);
222 x->mv_col_min = -(((mi_col + mi_width) * MI_SIZE) + VP9_INTERP_EXTEND);
223 x->mv_row_max = (cm->mi_rows - mi_row) * MI_SIZE + VP9_INTERP_EXTEND;
224 x->mv_col_max = (cm->mi_cols - mi_col) * MI_SIZE + VP9_INTERP_EXTEND;
226 // Set up distance of MB to edge of frame in 1/8th pel units.
227 assert(!(mi_col & (mi_width - 1)) && !(mi_row & (mi_height - 1)));
228 set_mi_row_col(xd, tile, mi_row, mi_height, mi_col, mi_width,
229 cm->mi_rows, cm->mi_cols);
231 // Set up source buffers.
232 vp9_setup_src_planes(x, cpi->Source, mi_row, mi_col);
235 x->rddiv = cpi->rd.RDDIV;
236 x->rdmult = cpi->rd.RDMULT;
240 if (cpi->oxcf.aq_mode != VARIANCE_AQ) {
241 const uint8_t *const map = seg->update_map ? cpi->segmentation_map
242 : cm->last_frame_seg_map;
243 mbmi->segment_id = vp9_get_segment_id(cm, map, bsize, mi_row, mi_col);
245 vp9_init_plane_quantizers(cpi, x);
247 x->encode_breakout = cpi->segment_encode_breakout[mbmi->segment_id];
249 mbmi->segment_id = 0;
250 x->encode_breakout = cpi->encode_breakout;
254 static void duplicate_mode_info_in_sb(VP9_COMMON *cm, MACROBLOCKD *xd,
255 int mi_row, int mi_col,
257 const int block_width = num_8x8_blocks_wide_lookup[bsize];
258 const int block_height = num_8x8_blocks_high_lookup[bsize];
260 for (j = 0; j < block_height; ++j)
261 for (i = 0; i < block_width; ++i) {
262 if (mi_row + j < cm->mi_rows && mi_col + i < cm->mi_cols)
263 xd->mi[j * xd->mi_stride + i].src_mi = &xd->mi[0];
267 static void set_block_size(VP9_COMP * const cpi,
268 MACROBLOCKD *const xd,
269 int mi_row, int mi_col,
271 if (cpi->common.mi_cols > mi_col && cpi->common.mi_rows > mi_row) {
272 set_mode_info_offsets(&cpi->common, xd, mi_row, mi_col);
273 xd->mi[0].src_mi->mbmi.sb_type = bsize;
278 int64_t sum_square_error;
288 } partition_variance;
291 partition_variance part_variances;
296 partition_variance part_variances;
301 partition_variance part_variances;
306 partition_variance part_variances;
311 partition_variance part_variances;
316 partition_variance *part_variances;
326 static void tree_to_node(void *data, BLOCK_SIZE bsize, variance_node *node) {
328 node->part_variances = NULL;
331 v64x64 *vt = (v64x64 *) data;
332 node->part_variances = &vt->part_variances;
333 for (i = 0; i < 4; i++)
334 node->split[i] = &vt->split[i].part_variances.none;
338 v32x32 *vt = (v32x32 *) data;
339 node->part_variances = &vt->part_variances;
340 for (i = 0; i < 4; i++)
341 node->split[i] = &vt->split[i].part_variances.none;
345 v16x16 *vt = (v16x16 *) data;
346 node->part_variances = &vt->part_variances;
347 for (i = 0; i < 4; i++)
348 node->split[i] = &vt->split[i].part_variances.none;
352 v8x8 *vt = (v8x8 *) data;
353 node->part_variances = &vt->part_variances;
354 for (i = 0; i < 4; i++)
355 node->split[i] = &vt->split[i].part_variances.none;
359 v4x4 *vt = (v4x4 *) data;
360 node->part_variances = &vt->part_variances;
361 for (i = 0; i < 4; i++)
362 node->split[i] = &vt->split[i];
372 // Set variance values given sum square error, sum error, count.
373 static void fill_variance(int64_t s2, int64_t s, int c, var *v) {
374 v->sum_square_error = s2;
379 static void get_variance(var *v) {
380 v->variance = (int)(256 * (v->sum_square_error -
381 ((v->sum_error * v->sum_error) >> v->log2_count)) >> v->log2_count);
384 void sum_2_variances(const var *a, const var *b, var *r) {
385 assert(a->log2_count == b->log2_count);
386 fill_variance(a->sum_square_error + b->sum_square_error,
387 a->sum_error + b->sum_error, a->log2_count + 1, r);
390 static void fill_variance_tree(void *data, BLOCK_SIZE bsize) {
392 tree_to_node(data, bsize, &node);
393 sum_2_variances(node.split[0], node.split[1], &node.part_variances->horz[0]);
394 sum_2_variances(node.split[2], node.split[3], &node.part_variances->horz[1]);
395 sum_2_variances(node.split[0], node.split[2], &node.part_variances->vert[0]);
396 sum_2_variances(node.split[1], node.split[3], &node.part_variances->vert[1]);
397 sum_2_variances(&node.part_variances->vert[0], &node.part_variances->vert[1],
398 &node.part_variances->none);
401 static int set_vt_partitioning(VP9_COMP *cpi,
402 MACROBLOCKD *const xd,
408 BLOCK_SIZE bsize_min,
410 VP9_COMMON * const cm = &cpi->common;
412 const int block_width = num_8x8_blocks_wide_lookup[bsize];
413 const int block_height = num_8x8_blocks_high_lookup[bsize];
415 assert(block_height == block_width);
416 tree_to_node(data, bsize, &vt);
418 // No 64x64 blocks on segments other than base (un-boosted) segment.
419 if (segment_id != CR_SEGMENT_ID_BASE && bsize == BLOCK_64X64)
422 // For bsize=bsize_min (16x16/8x8 for 8x8/4x4 downsampling), select if
423 // variance is below threshold, otherwise split will be selected.
424 // No check for vert/horiz split as too few samples for variance.
425 if (bsize == bsize_min) {
426 get_variance(&vt.part_variances->none);
427 if (mi_col + block_width / 2 < cm->mi_cols &&
428 mi_row + block_height / 2 < cm->mi_rows &&
429 vt.part_variances->none.variance < threshold) {
430 set_block_size(cpi, xd, mi_row, mi_col, bsize);
434 } else if (bsize > bsize_min) {
435 get_variance(&vt.part_variances->none);
436 // For key frame or low_res: for bsize above 32X32 or very high variance,
438 if (cm->frame_type == KEY_FRAME &&
439 (bsize > BLOCK_32X32 ||
440 vt.part_variances->none.variance > (threshold << 4))) {
443 // If variance is low, take the bsize (no split).
444 if (mi_col + block_width / 2 < cm->mi_cols &&
445 mi_row + block_height / 2 < cm->mi_rows &&
446 vt.part_variances->none.variance < threshold) {
447 set_block_size(cpi, xd, mi_row, mi_col, bsize);
451 // Check vertical split.
452 if (mi_row + block_height / 2 < cm->mi_rows) {
453 get_variance(&vt.part_variances->vert[0]);
454 get_variance(&vt.part_variances->vert[1]);
455 if (vt.part_variances->vert[0].variance < threshold &&
456 vt.part_variances->vert[1].variance < threshold) {
457 BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_VERT);
458 set_block_size(cpi, xd, mi_row, mi_col, subsize);
459 set_block_size(cpi, xd, mi_row, mi_col + block_width / 2, subsize);
463 // Check horizontal split.
464 if (mi_col + block_width / 2 < cm->mi_cols) {
465 get_variance(&vt.part_variances->horz[0]);
466 get_variance(&vt.part_variances->horz[1]);
467 if (vt.part_variances->horz[0].variance < threshold &&
468 vt.part_variances->horz[1].variance < threshold) {
469 BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_HORZ);
470 set_block_size(cpi, xd, mi_row, mi_col, subsize);
471 set_block_size(cpi, xd, mi_row + block_height / 2, mi_col, subsize);
482 void vp9_set_vbp_thresholds(VP9_COMP *cpi, int q) {
483 SPEED_FEATURES *const sf = &cpi->sf;
484 if (sf->partition_search_type != VAR_BASED_PARTITION) {
487 VP9_COMMON *const cm = &cpi->common;
488 const VP9EncoderConfig *const oxcf = &cpi->oxcf;
489 const int is_key_frame = (cm->frame_type == KEY_FRAME);
490 const int use_4x4_partition = is_key_frame;
491 const int low_res = (cm->width <= 352 && cm->height <= 288);
492 const int threshold_multiplier = is_key_frame ? 80 : 4;
493 const int64_t threshold_base = (int64_t)(threshold_multiplier *
494 vp9_convert_qindex_to_q(q, cm->bit_depth));
495 cpi->vbp_threshold = threshold_base;
496 cpi->vbp_threshold_bsize_min = threshold_base << oxcf->speed;
497 cpi->vbp_threshold_bsize_max = threshold_base;
500 cpi->vbp_threshold = threshold_base >> 2;
501 cpi->vbp_threshold_bsize_min = threshold_base << 2;
502 } else if (low_res) {
503 cpi->vbp_threshold_bsize_min = threshold_base << 3;
504 cpi->vbp_threshold_bsize_max = threshold_base >> 2;
506 // TODO(marpan): Allow 4x4 partitions for inter-frames.
507 // use_4x4_partition = (variance4x4downsample[i2 + j] == 1);
508 // If 4x4 partition is not used, then 8x8 partition will be selected
509 // if variance of 16x16 block is very high, so use larger threshold
510 // for 16x16 (threshold_bsize_min) in that case.
511 cpi->vbp_threshold_16x16 = (use_4x4_partition) ?
512 cpi->vbp_threshold : cpi->vbp_threshold_bsize_min;
513 cpi->vbp_bsize_min = (use_4x4_partition) ? BLOCK_8X8 : BLOCK_16X16;
517 #if CONFIG_VP9_HIGHBITDEPTH
518 #define GLOBAL_MOTION 0
520 #define GLOBAL_MOTION 1
523 // This function chooses partitioning based on the variance between source and
524 // reconstructed last, where variance is computed for down-sampled inputs.
525 static void choose_partitioning(VP9_COMP *cpi,
526 const TileInfo *const tile,
528 int mi_row, int mi_col) {
529 VP9_COMMON * const cm = &cpi->common;
530 MACROBLOCKD *xd = &x->e_mbd;
538 int pixels_wide = 64, pixels_high = 64;
540 // Always use 4x4 partition for key frame.
541 const int is_key_frame = (cm->frame_type == KEY_FRAME);
542 const int use_4x4_partition = is_key_frame;
543 const int low_res = (cm->width <= 352 && cm->height <= 288);
544 int variance4x4downsample[16];
546 int segment_id = CR_SEGMENT_ID_BASE;
547 if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled) {
548 const uint8_t *const map = cm->seg.update_map ? cpi->segmentation_map :
549 cm->last_frame_seg_map;
550 segment_id = vp9_get_segment_id(cm, map, BLOCK_64X64, mi_row, mi_col);
553 set_offsets(cpi, tile, x, mi_row, mi_col, BLOCK_64X64);
555 if (xd->mb_to_right_edge < 0)
556 pixels_wide += (xd->mb_to_right_edge >> 3);
557 if (xd->mb_to_bottom_edge < 0)
558 pixels_high += (xd->mb_to_bottom_edge >> 3);
560 s = x->plane[0].src.buf;
561 sp = x->plane[0].src.stride;
564 MB_MODE_INFO *mbmi = &xd->mi[0].src_mi->mbmi;
570 const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, LAST_FRAME);
571 assert(yv12 != NULL);
572 vp9_setup_pre_planes(xd, 0, yv12, mi_row, mi_col,
573 &cm->frame_refs[LAST_FRAME - 1].sf);
574 mbmi->ref_frame[0] = LAST_FRAME;
575 mbmi->ref_frame[1] = NONE;
576 mbmi->sb_type = BLOCK_64X64;
577 mbmi->mv[0].as_int = 0;
578 mbmi->interp_filter = BILINEAR;
581 if (mi_row + 4 < cm->mi_rows && mi_col + 4 < cm->mi_cols)
583 else if (mi_row + 4 < cm->mi_rows && mi_col + 4 >= cm->mi_cols)
585 else if (mi_row + 4 >= cm->mi_rows && mi_col + 4 < cm->mi_cols)
590 y_sad = vp9_int_pro_motion_estimation(cpi, x, bsize);
593 vp9_build_inter_predictors_sb(xd, mi_row, mi_col, BLOCK_64X64);
595 for (i = 1; i <= 2; ++i) {
596 struct macroblock_plane *p = &x->plane[i];
597 struct macroblockd_plane *pd = &xd->plane[i];
599 const BLOCK_SIZE bs = get_plane_block_size(bsize, pd);
601 const BLOCK_SIZE bs = get_plane_block_size(BLOCK_64X64, pd);
603 if (bs == BLOCK_INVALID)
606 uv_sad = cpi->fn_ptr[bs].sdf(p->src.buf, p->src.stride,
607 pd->dst.buf, pd->dst.stride);
610 x->color_sensitivity[i - 1] = uv_sad * 4 > y_sad;
612 x->color_sensitivity[i - 1] = (uv_sad > 512);
616 d = xd->plane[0].dst.buf;
617 dp = xd->plane[0].dst.stride;
621 #if CONFIG_VP9_HIGHBITDEPTH
622 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
625 d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_10);
628 d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_12);
632 d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_8);
636 #endif // CONFIG_VP9_HIGHBITDEPTH
639 // Fill in the entire tree of 8x8 (or 4x4 under some conditions) variances
641 for (i = 0; i < 4; i++) {
642 const int x32_idx = ((i & 1) << 5);
643 const int y32_idx = ((i >> 1) << 5);
644 const int i2 = i << 2;
645 for (j = 0; j < 4; j++) {
646 const int x16_idx = x32_idx + ((j & 1) << 4);
647 const int y16_idx = y32_idx + ((j >> 1) << 4);
648 v16x16 *vst = &vt.split[i].split[j];
649 variance4x4downsample[i2 + j] = 0;
651 for (k = 0; k < 4; k++) {
652 int x8_idx = x16_idx + ((k & 1) << 3);
653 int y8_idx = y16_idx + ((k >> 1) << 3);
654 unsigned int sse = 0;
656 if (x8_idx < pixels_wide && y8_idx < pixels_high) {
658 #if CONFIG_VP9_HIGHBITDEPTH
659 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
660 s_avg = vp9_highbd_avg_8x8(s + y8_idx * sp + x8_idx, sp);
661 d_avg = vp9_highbd_avg_8x8(d + y8_idx * dp + x8_idx, dp);
663 s_avg = vp9_avg_8x8(s + y8_idx * sp + x8_idx, sp);
664 d_avg = vp9_avg_8x8(d + y8_idx * dp + x8_idx, dp);
667 s_avg = vp9_avg_8x8(s + y8_idx * sp + x8_idx, sp);
668 d_avg = vp9_avg_8x8(d + y8_idx * dp + x8_idx, dp);
673 // If variance is based on 8x8 downsampling, we stop here and have
674 // one sample for 8x8 block (so use 1 for count in fill_variance),
675 // which of course means variance = 0 for 8x8 block.
676 fill_variance(sse, sum, 0, &vst->split[k].part_variances.none);
678 fill_variance_tree(&vt.split[i].split[j], BLOCK_16X16);
679 // For low-resolution, compute the variance based on 8x8 down-sampling,
680 // and if it is large (above the threshold) we go down for 4x4.
681 // For key frame we always go down to 4x4.
683 get_variance(&vt.split[i].split[j].part_variances.none);
685 if (is_key_frame || (low_res &&
686 vt.split[i].split[j].part_variances.none.variance >
687 (cpi->vbp_threshold << 1))) {
688 // Go down to 4x4 down-sampling for variance.
689 variance4x4downsample[i2 + j] = 1;
690 for (k = 0; k < 4; k++) {
691 int x8_idx = x16_idx + ((k & 1) << 3);
692 int y8_idx = y16_idx + ((k >> 1) << 3);
693 v8x8 *vst2 = is_key_frame ? &vst->split[k] :
694 &vt2[i2 + j].split[k];
695 for (m = 0; m < 4; m++) {
696 int x4_idx = x8_idx + ((m & 1) << 2);
697 int y4_idx = y8_idx + ((m >> 1) << 2);
698 unsigned int sse = 0;
700 if (x4_idx < pixels_wide && y4_idx < pixels_high) {
702 #if CONFIG_VP9_HIGHBITDEPTH
704 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
705 s_avg = vp9_highbd_avg_4x4(s + y4_idx * sp + x4_idx, sp);
706 if (cm->frame_type != KEY_FRAME)
707 d_avg = vp9_highbd_avg_4x4(d + y4_idx * dp + x4_idx, dp);
709 s_avg = vp9_avg_4x4(s + y4_idx * sp + x4_idx, sp);
710 if (cm->frame_type != KEY_FRAME)
711 d_avg = vp9_avg_4x4(d + y4_idx * dp + x4_idx, dp);
714 int s_avg = vp9_avg_4x4(s + y4_idx * sp + x4_idx, sp);
716 d_avg = vp9_avg_4x4(d + y4_idx * dp + x4_idx, dp);
721 // If variance is based on 4x4 down-sampling, we stop here and have
722 // one sample for 4x4 block (so use 1 for count in fill_variance),
723 // which of course means variance = 0 for 4x4 block.
724 fill_variance(sse, sum, 0, &vst2->split[m].part_variances.none);
731 // Fill the rest of the variance tree by summing split partition values.
732 for (i = 0; i < 4; i++) {
733 const int i2 = i << 2;
734 for (j = 0; j < 4; j++) {
735 if (variance4x4downsample[i2 + j] == 1) {
736 v16x16 *vtemp = (!is_key_frame) ? &vt2[i2 + j] :
737 &vt.split[i].split[j];
738 for (m = 0; m < 4; m++) {
739 fill_variance_tree(&vtemp->split[m], BLOCK_8X8);
741 fill_variance_tree(vtemp, BLOCK_16X16);
744 fill_variance_tree(&vt.split[i], BLOCK_32X32);
746 fill_variance_tree(&vt, BLOCK_64X64);
748 // Now go through the entire structure, splitting every block size until
749 // we get to one that's got a variance lower than our threshold.
750 if ( mi_col + 8 > cm->mi_cols || mi_row + 8 > cm->mi_rows ||
751 !set_vt_partitioning(cpi, xd, &vt, BLOCK_64X64, mi_row, mi_col,
752 cpi->vbp_threshold_bsize_max, BLOCK_16X16,
754 for (i = 0; i < 4; ++i) {
755 const int x32_idx = ((i & 1) << 2);
756 const int y32_idx = ((i >> 1) << 2);
757 const int i2 = i << 2;
758 if (!set_vt_partitioning(cpi, xd, &vt.split[i], BLOCK_32X32,
759 (mi_row + y32_idx), (mi_col + x32_idx),
761 BLOCK_16X16, segment_id)) {
762 for (j = 0; j < 4; ++j) {
763 const int x16_idx = ((j & 1) << 1);
764 const int y16_idx = ((j >> 1) << 1);
765 // For inter frames: if variance4x4downsample[] == 1 for this 16x16
766 // block, then the variance is based on 4x4 down-sampling, so use vt2
767 // in set_vt_partioning(), otherwise use vt.
768 v16x16 *vtemp = (!is_key_frame &&
769 variance4x4downsample[i2 + j] == 1) ?
770 &vt2[i2 + j] : &vt.split[i].split[j];
771 if (!set_vt_partitioning(cpi, xd, vtemp, BLOCK_16X16,
772 mi_row + y32_idx + y16_idx,
773 mi_col + x32_idx + x16_idx,
774 cpi->vbp_threshold_16x16,
775 cpi->vbp_bsize_min, segment_id)) {
776 for (k = 0; k < 4; ++k) {
777 const int x8_idx = (k & 1);
778 const int y8_idx = (k >> 1);
779 if (use_4x4_partition) {
780 if (!set_vt_partitioning(cpi, xd, &vtemp->split[k],
782 mi_row + y32_idx + y16_idx + y8_idx,
783 mi_col + x32_idx + x16_idx + x8_idx,
784 cpi->vbp_threshold_bsize_min,
785 BLOCK_8X8, segment_id)) {
786 set_block_size(cpi, xd,
787 (mi_row + y32_idx + y16_idx + y8_idx),
788 (mi_col + x32_idx + x16_idx + x8_idx),
792 set_block_size(cpi, xd,
793 (mi_row + y32_idx + y16_idx + y8_idx),
794 (mi_col + x32_idx + x16_idx + x8_idx),
805 static void update_state(VP9_COMP *cpi, ThreadData *td,
806 PICK_MODE_CONTEXT *ctx,
807 int mi_row, int mi_col, BLOCK_SIZE bsize,
808 int output_enabled) {
810 VP9_COMMON *const cm = &cpi->common;
811 RD_COUNTS *const rdc = &td->rd_counts;
812 MACROBLOCK *const x = &td->mb;
813 MACROBLOCKD *const xd = &x->e_mbd;
814 struct macroblock_plane *const p = x->plane;
815 struct macroblockd_plane *const pd = xd->plane;
816 MODE_INFO *mi = &ctx->mic;
817 MB_MODE_INFO *const mbmi = &xd->mi[0].src_mi->mbmi;
818 MODE_INFO *mi_addr = &xd->mi[0];
819 const struct segmentation *const seg = &cm->seg;
820 const int bw = num_8x8_blocks_wide_lookup[mi->mbmi.sb_type];
821 const int bh = num_8x8_blocks_high_lookup[mi->mbmi.sb_type];
822 const int x_mis = MIN(bw, cm->mi_cols - mi_col);
823 const int y_mis = MIN(bh, cm->mi_rows - mi_row);
824 MV_REF *const frame_mvs =
825 cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col;
828 const int mis = cm->mi_stride;
829 const int mi_width = num_8x8_blocks_wide_lookup[bsize];
830 const int mi_height = num_8x8_blocks_high_lookup[bsize];
833 assert(mi->mbmi.sb_type == bsize);
836 mi_addr->src_mi = mi_addr;
838 // If segmentation in use
840 // For in frame complexity AQ copy the segment id from the segment map.
841 if (cpi->oxcf.aq_mode == COMPLEXITY_AQ) {
842 const uint8_t *const map = seg->update_map ? cpi->segmentation_map
843 : cm->last_frame_seg_map;
844 mi_addr->mbmi.segment_id =
845 vp9_get_segment_id(cm, map, bsize, mi_row, mi_col);
847 // Else for cyclic refresh mode update the segment map, set the segment id
848 // and then update the quantizer.
849 if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) {
850 vp9_cyclic_refresh_update_segment(cpi, &xd->mi[0].src_mi->mbmi, mi_row,
851 mi_col, bsize, ctx->rate, ctx->dist);
855 max_plane = is_inter_block(mbmi) ? MAX_MB_PLANE : 1;
856 for (i = 0; i < max_plane; ++i) {
857 p[i].coeff = ctx->coeff_pbuf[i][1];
858 p[i].qcoeff = ctx->qcoeff_pbuf[i][1];
859 pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][1];
860 p[i].eobs = ctx->eobs_pbuf[i][1];
863 for (i = max_plane; i < MAX_MB_PLANE; ++i) {
864 p[i].coeff = ctx->coeff_pbuf[i][2];
865 p[i].qcoeff = ctx->qcoeff_pbuf[i][2];
866 pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][2];
867 p[i].eobs = ctx->eobs_pbuf[i][2];
870 // Restore the coding context of the MB to that that was in place
871 // when the mode was picked for it
872 for (y = 0; y < mi_height; y++)
873 for (x_idx = 0; x_idx < mi_width; x_idx++)
874 if ((xd->mb_to_right_edge >> (3 + MI_SIZE_LOG2)) + mi_width > x_idx
875 && (xd->mb_to_bottom_edge >> (3 + MI_SIZE_LOG2)) + mi_height > y) {
876 xd->mi[x_idx + y * mis].src_mi = mi_addr;
879 if (cpi->oxcf.aq_mode)
880 vp9_init_plane_quantizers(cpi, x);
882 // FIXME(rbultje) I'm pretty sure this should go to the end of this block
883 // (i.e. after the output_enabled)
884 if (bsize < BLOCK_32X32) {
885 if (bsize < BLOCK_16X16)
886 ctx->tx_rd_diff[ALLOW_16X16] = ctx->tx_rd_diff[ALLOW_8X8];
887 ctx->tx_rd_diff[ALLOW_32X32] = ctx->tx_rd_diff[ALLOW_16X16];
890 if (is_inter_block(mbmi) && mbmi->sb_type < BLOCK_8X8) {
891 mbmi->mv[0].as_int = mi->bmi[3].as_mv[0].as_int;
892 mbmi->mv[1].as_int = mi->bmi[3].as_mv[1].as_int;
896 vpx_memcpy(x->zcoeff_blk[mbmi->tx_size], ctx->zcoeff_blk,
897 sizeof(uint8_t) * ctx->num_4x4_blk);
902 if (!vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
903 for (i = 0; i < TX_MODES; i++)
904 rdc->tx_select_diff[i] += ctx->tx_rd_diff[i];
907 #if CONFIG_INTERNAL_STATS
908 if (frame_is_intra_only(cm)) {
909 static const int kf_mode_index[] = {
911 THR_V_PRED /*V_PRED*/,
912 THR_H_PRED /*H_PRED*/,
913 THR_D45_PRED /*D45_PRED*/,
914 THR_D135_PRED /*D135_PRED*/,
915 THR_D117_PRED /*D117_PRED*/,
916 THR_D153_PRED /*D153_PRED*/,
917 THR_D207_PRED /*D207_PRED*/,
918 THR_D63_PRED /*D63_PRED*/,
921 ++cpi->mode_chosen_counts[kf_mode_index[mbmi->mode]];
923 // Note how often each mode chosen as best
924 ++cpi->mode_chosen_counts[ctx->best_mode_index];
927 if (!frame_is_intra_only(cm)) {
928 if (is_inter_block(mbmi)) {
929 vp9_update_mv_count(td);
931 if (cm->interp_filter == SWITCHABLE) {
932 const int ctx = vp9_get_pred_context_switchable_interp(xd);
933 ++td->counts->switchable_interp[ctx][mbmi->interp_filter];
937 rdc->comp_pred_diff[SINGLE_REFERENCE] += ctx->single_pred_diff;
938 rdc->comp_pred_diff[COMPOUND_REFERENCE] += ctx->comp_pred_diff;
939 rdc->comp_pred_diff[REFERENCE_MODE_SELECT] += ctx->hybrid_pred_diff;
941 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
942 rdc->filter_diff[i] += ctx->best_filter_diff[i];
945 for (h = 0; h < y_mis; ++h) {
946 MV_REF *const frame_mv = frame_mvs + h * cm->mi_cols;
947 for (w = 0; w < x_mis; ++w) {
948 MV_REF *const mv = frame_mv + w;
949 mv->ref_frame[0] = mi->src_mi->mbmi.ref_frame[0];
950 mv->ref_frame[1] = mi->src_mi->mbmi.ref_frame[1];
951 mv->mv[0].as_int = mi->src_mi->mbmi.mv[0].as_int;
952 mv->mv[1].as_int = mi->src_mi->mbmi.mv[1].as_int;
957 void vp9_setup_src_planes(MACROBLOCK *x, const YV12_BUFFER_CONFIG *src,
958 int mi_row, int mi_col) {
959 uint8_t *const buffers[3] = {src->y_buffer, src->u_buffer, src->v_buffer };
960 const int strides[3] = {src->y_stride, src->uv_stride, src->uv_stride };
963 // Set current frame pointer.
964 x->e_mbd.cur_buf = src;
966 for (i = 0; i < MAX_MB_PLANE; i++)
967 setup_pred_plane(&x->plane[i].src, buffers[i], strides[i], mi_row, mi_col,
968 NULL, x->e_mbd.plane[i].subsampling_x,
969 x->e_mbd.plane[i].subsampling_y);
972 static void set_mode_info_seg_skip(MACROBLOCK *x, TX_MODE tx_mode,
973 RD_COST *rd_cost, BLOCK_SIZE bsize) {
974 MACROBLOCKD *const xd = &x->e_mbd;
975 MB_MODE_INFO *const mbmi = &xd->mi[0].src_mi->mbmi;
976 INTERP_FILTER filter_ref;
978 if (xd->up_available)
979 filter_ref = xd->mi[-xd->mi_stride].src_mi->mbmi.interp_filter;
980 else if (xd->left_available)
981 filter_ref = xd->mi[-1].src_mi->mbmi.interp_filter;
983 filter_ref = EIGHTTAP;
985 mbmi->sb_type = bsize;
987 mbmi->tx_size = MIN(max_txsize_lookup[bsize],
988 tx_mode_to_biggest_tx_size[tx_mode]);
990 mbmi->uv_mode = DC_PRED;
991 mbmi->ref_frame[0] = LAST_FRAME;
992 mbmi->ref_frame[1] = NONE;
993 mbmi->mv[0].as_int = 0;
994 mbmi->interp_filter = filter_ref;
996 xd->mi[0].src_mi->bmi[0].as_mv[0].as_int = 0;
999 vp9_rd_cost_init(rd_cost);
1002 static int set_segment_rdmult(VP9_COMP *const cpi,
1003 MACROBLOCK *const x,
1004 int8_t segment_id) {
1006 VP9_COMMON *const cm = &cpi->common;
1007 vp9_init_plane_quantizers(cpi, x);
1008 vp9_clear_system_state();
1009 segment_qindex = vp9_get_qindex(&cm->seg, segment_id,
1011 return vp9_compute_rd_mult(cpi, segment_qindex + cm->y_dc_delta_q);
1014 static void rd_pick_sb_modes(VP9_COMP *cpi,
1015 TileDataEnc *tile_data,
1016 MACROBLOCK *const x,
1017 int mi_row, int mi_col, RD_COST *rd_cost,
1018 BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
1020 VP9_COMMON *const cm = &cpi->common;
1021 TileInfo *const tile_info = &tile_data->tile_info;
1022 MACROBLOCKD *const xd = &x->e_mbd;
1024 struct macroblock_plane *const p = x->plane;
1025 struct macroblockd_plane *const pd = xd->plane;
1026 const AQ_MODE aq_mode = cpi->oxcf.aq_mode;
1029 vp9_clear_system_state();
1031 // Use the lower precision, but faster, 32x32 fdct for mode selection.
1032 x->use_lp32x32fdct = 1;
1034 set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
1035 mbmi = &xd->mi[0].src_mi->mbmi;
1036 mbmi->sb_type = bsize;
1038 for (i = 0; i < MAX_MB_PLANE; ++i) {
1039 p[i].coeff = ctx->coeff_pbuf[i][0];
1040 p[i].qcoeff = ctx->qcoeff_pbuf[i][0];
1041 pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][0];
1042 p[i].eobs = ctx->eobs_pbuf[i][0];
1046 ctx->pred_pixel_ready = 0;
1049 // Set to zero to make sure we do not use the previous encoded frame stats
1052 #if CONFIG_VP9_HIGHBITDEPTH
1053 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
1054 x->source_variance =
1055 high_get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize, xd->bd);
1057 x->source_variance =
1058 get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize);
1061 x->source_variance = get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize);
1062 #endif // CONFIG_VP9_HIGHBITDEPTH
1064 // Save rdmult before it might be changed, so it can be restored later.
1065 orig_rdmult = x->rdmult;
1067 if (aq_mode == VARIANCE_AQ) {
1068 const int energy = bsize <= BLOCK_16X16 ? x->mb_energy
1069 : vp9_block_energy(cpi, x, bsize);
1070 if (cm->frame_type == KEY_FRAME ||
1071 cpi->refresh_alt_ref_frame ||
1072 (cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref)) {
1073 mbmi->segment_id = vp9_vaq_segment_id(energy);
1075 const uint8_t *const map = cm->seg.update_map ? cpi->segmentation_map
1076 : cm->last_frame_seg_map;
1077 mbmi->segment_id = vp9_get_segment_id(cm, map, bsize, mi_row, mi_col);
1079 x->rdmult = set_segment_rdmult(cpi, x, mbmi->segment_id);
1080 } else if (aq_mode == COMPLEXITY_AQ) {
1081 x->rdmult = set_segment_rdmult(cpi, x, mbmi->segment_id);
1082 } else if (aq_mode == CYCLIC_REFRESH_AQ) {
1083 const uint8_t *const map = cm->seg.update_map ? cpi->segmentation_map
1084 : cm->last_frame_seg_map;
1085 // If segment 1, use rdmult for that segment.
1086 if (vp9_get_segment_id(cm, map, bsize, mi_row, mi_col))
1087 x->rdmult = vp9_cyclic_refresh_get_rdmult(cpi->cyclic_refresh);
1090 // Find best coding mode & reconstruct the MB so it is available
1091 // as a predictor for MBs that follow in the SB
1092 if (frame_is_intra_only(cm)) {
1093 vp9_rd_pick_intra_mode_sb(cpi, x, rd_cost, bsize, ctx, best_rd);
1095 if (bsize >= BLOCK_8X8) {
1096 if (vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP))
1097 vp9_rd_pick_inter_mode_sb_seg_skip(cpi, tile_data, x, rd_cost, bsize,
1100 vp9_rd_pick_inter_mode_sb(cpi, tile_data, x, mi_row, mi_col,
1101 rd_cost, bsize, ctx, best_rd);
1103 vp9_rd_pick_inter_mode_sub8x8(cpi, tile_data, x, mi_row, mi_col,
1104 rd_cost, bsize, ctx, best_rd);
1109 // Examine the resulting rate and for AQ mode 2 make a segment choice.
1110 if ((rd_cost->rate != INT_MAX) &&
1111 (aq_mode == COMPLEXITY_AQ) && (bsize >= BLOCK_16X16) &&
1112 (cm->frame_type == KEY_FRAME ||
1113 cpi->refresh_alt_ref_frame ||
1114 (cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref))) {
1115 vp9_caq_select_segment(cpi, x, bsize, mi_row, mi_col, rd_cost->rate);
1118 x->rdmult = orig_rdmult;
1120 // TODO(jingning) The rate-distortion optimization flow needs to be
1121 // refactored to provide proper exit/return handle.
1122 if (rd_cost->rate == INT_MAX)
1123 rd_cost->rdcost = INT64_MAX;
1125 ctx->rate = rd_cost->rate;
1126 ctx->dist = rd_cost->dist;
1129 static void update_stats(VP9_COMMON *cm, ThreadData *td) {
1130 const MACROBLOCK *x = &td->mb;
1131 const MACROBLOCKD *const xd = &x->e_mbd;
1132 const MODE_INFO *const mi = xd->mi[0].src_mi;
1133 const MB_MODE_INFO *const mbmi = &mi->mbmi;
1134 const BLOCK_SIZE bsize = mbmi->sb_type;
1136 if (!frame_is_intra_only(cm)) {
1137 FRAME_COUNTS *const counts = td->counts;
1138 const int inter_block = is_inter_block(mbmi);
1139 const int seg_ref_active = vp9_segfeature_active(&cm->seg, mbmi->segment_id,
1141 if (!seg_ref_active) {
1142 counts->intra_inter[vp9_get_intra_inter_context(xd)][inter_block]++;
1143 // If the segment reference feature is enabled we have only a single
1144 // reference frame allowed for the segment so exclude it from
1145 // the reference frame counts used to work out probabilities.
1147 const MV_REFERENCE_FRAME ref0 = mbmi->ref_frame[0];
1148 if (cm->reference_mode == REFERENCE_MODE_SELECT)
1149 counts->comp_inter[vp9_get_reference_mode_context(cm, xd)]
1150 [has_second_ref(mbmi)]++;
1152 if (has_second_ref(mbmi)) {
1153 counts->comp_ref[vp9_get_pred_context_comp_ref_p(cm, xd)]
1154 [ref0 == GOLDEN_FRAME]++;
1156 counts->single_ref[vp9_get_pred_context_single_ref_p1(xd)][0]
1157 [ref0 != LAST_FRAME]++;
1158 if (ref0 != LAST_FRAME)
1159 counts->single_ref[vp9_get_pred_context_single_ref_p2(xd)][1]
1160 [ref0 != GOLDEN_FRAME]++;
1165 !vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
1166 const int mode_ctx = mbmi->mode_context[mbmi->ref_frame[0]];
1167 if (bsize >= BLOCK_8X8) {
1168 const PREDICTION_MODE mode = mbmi->mode;
1169 ++counts->inter_mode[mode_ctx][INTER_OFFSET(mode)];
1171 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
1172 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
1174 for (idy = 0; idy < 2; idy += num_4x4_h) {
1175 for (idx = 0; idx < 2; idx += num_4x4_w) {
1176 const int j = idy * 2 + idx;
1177 const PREDICTION_MODE b_mode = mi->bmi[j].as_mode;
1178 ++counts->inter_mode[mode_ctx][INTER_OFFSET(b_mode)];
1186 static void restore_context(MACROBLOCK *const x, int mi_row, int mi_col,
1187 ENTROPY_CONTEXT a[16 * MAX_MB_PLANE],
1188 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE],
1189 PARTITION_CONTEXT sa[8], PARTITION_CONTEXT sl[8],
1191 MACROBLOCKD *const xd = &x->e_mbd;
1193 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
1194 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
1195 int mi_width = num_8x8_blocks_wide_lookup[bsize];
1196 int mi_height = num_8x8_blocks_high_lookup[bsize];
1197 for (p = 0; p < MAX_MB_PLANE; p++) {
1199 xd->above_context[p] + ((mi_col * 2) >> xd->plane[p].subsampling_x),
1200 a + num_4x4_blocks_wide * p,
1201 (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_wide) >>
1202 xd->plane[p].subsampling_x);
1205 + ((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y),
1206 l + num_4x4_blocks_high * p,
1207 (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_high) >>
1208 xd->plane[p].subsampling_y);
1210 vpx_memcpy(xd->above_seg_context + mi_col, sa,
1211 sizeof(*xd->above_seg_context) * mi_width);
1212 vpx_memcpy(xd->left_seg_context + (mi_row & MI_MASK), sl,
1213 sizeof(xd->left_seg_context[0]) * mi_height);
1216 static void save_context(MACROBLOCK *const x, int mi_row, int mi_col,
1217 ENTROPY_CONTEXT a[16 * MAX_MB_PLANE],
1218 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE],
1219 PARTITION_CONTEXT sa[8], PARTITION_CONTEXT sl[8],
1221 const MACROBLOCKD *const xd = &x->e_mbd;
1223 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
1224 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
1225 int mi_width = num_8x8_blocks_wide_lookup[bsize];
1226 int mi_height = num_8x8_blocks_high_lookup[bsize];
1228 // buffer the above/left context information of the block in search.
1229 for (p = 0; p < MAX_MB_PLANE; ++p) {
1231 a + num_4x4_blocks_wide * p,
1232 xd->above_context[p] + (mi_col * 2 >> xd->plane[p].subsampling_x),
1233 (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_wide) >>
1234 xd->plane[p].subsampling_x);
1236 l + num_4x4_blocks_high * p,
1238 + ((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y),
1239 (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_high) >>
1240 xd->plane[p].subsampling_y);
1242 vpx_memcpy(sa, xd->above_seg_context + mi_col,
1243 sizeof(*xd->above_seg_context) * mi_width);
1244 vpx_memcpy(sl, xd->left_seg_context + (mi_row & MI_MASK),
1245 sizeof(xd->left_seg_context[0]) * mi_height);
1248 static void encode_b(VP9_COMP *cpi, const TileInfo *const tile,
1250 TOKENEXTRA **tp, int mi_row, int mi_col,
1251 int output_enabled, BLOCK_SIZE bsize,
1252 PICK_MODE_CONTEXT *ctx) {
1253 MACROBLOCK *const x = &td->mb;
1254 set_offsets(cpi, tile, x, mi_row, mi_col, bsize);
1255 update_state(cpi, td, ctx, mi_row, mi_col, bsize, output_enabled);
1256 encode_superblock(cpi, td, tp, output_enabled, mi_row, mi_col, bsize, ctx);
1258 if (output_enabled) {
1259 update_stats(&cpi->common, td);
1261 (*tp)->token = EOSB_TOKEN;
1266 static void encode_sb(VP9_COMP *cpi, ThreadData *td,
1267 const TileInfo *const tile,
1268 TOKENEXTRA **tp, int mi_row, int mi_col,
1269 int output_enabled, BLOCK_SIZE bsize,
1271 VP9_COMMON *const cm = &cpi->common;
1272 MACROBLOCK *const x = &td->mb;
1273 MACROBLOCKD *const xd = &x->e_mbd;
1275 const int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4;
1277 PARTITION_TYPE partition;
1278 BLOCK_SIZE subsize = bsize;
1280 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
1283 if (bsize >= BLOCK_8X8) {
1284 ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
1285 subsize = get_subsize(bsize, pc_tree->partitioning);
1288 subsize = BLOCK_4X4;
1291 partition = partition_lookup[bsl][subsize];
1292 if (output_enabled && bsize != BLOCK_4X4)
1293 td->counts->partition[ctx][partition]++;
1295 switch (partition) {
1296 case PARTITION_NONE:
1297 encode_b(cpi, tile, td, tp, mi_row, mi_col, output_enabled, subsize,
1300 case PARTITION_VERT:
1301 encode_b(cpi, tile, td, tp, mi_row, mi_col, output_enabled, subsize,
1302 &pc_tree->vertical[0]);
1303 if (mi_col + hbs < cm->mi_cols && bsize > BLOCK_8X8) {
1304 encode_b(cpi, tile, td, tp, mi_row, mi_col + hbs, output_enabled,
1305 subsize, &pc_tree->vertical[1]);
1308 case PARTITION_HORZ:
1309 encode_b(cpi, tile, td, tp, mi_row, mi_col, output_enabled, subsize,
1310 &pc_tree->horizontal[0]);
1311 if (mi_row + hbs < cm->mi_rows && bsize > BLOCK_8X8) {
1312 encode_b(cpi, tile, td, tp, mi_row + hbs, mi_col, output_enabled,
1313 subsize, &pc_tree->horizontal[1]);
1316 case PARTITION_SPLIT:
1317 if (bsize == BLOCK_8X8) {
1318 encode_b(cpi, tile, td, tp, mi_row, mi_col, output_enabled, subsize,
1319 pc_tree->leaf_split[0]);
1321 encode_sb(cpi, td, tile, tp, mi_row, mi_col, output_enabled, subsize,
1323 encode_sb(cpi, td, tile, tp, mi_row, mi_col + hbs, output_enabled,
1324 subsize, pc_tree->split[1]);
1325 encode_sb(cpi, td, tile, tp, mi_row + hbs, mi_col, output_enabled,
1326 subsize, pc_tree->split[2]);
1327 encode_sb(cpi, td, tile, tp, mi_row + hbs, mi_col + hbs, output_enabled,
1328 subsize, pc_tree->split[3]);
1332 assert(0 && "Invalid partition type.");
1336 if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8)
1337 update_partition_context(xd, mi_row, mi_col, subsize, bsize);
1340 // Check to see if the given partition size is allowed for a specified number
1341 // of 8x8 block rows and columns remaining in the image.
1342 // If not then return the largest allowed partition size
1343 static BLOCK_SIZE find_partition_size(BLOCK_SIZE bsize,
1344 int rows_left, int cols_left,
1346 if (rows_left <= 0 || cols_left <= 0) {
1347 return MIN(bsize, BLOCK_8X8);
1349 for (; bsize > 0; bsize -= 3) {
1350 *bh = num_8x8_blocks_high_lookup[bsize];
1351 *bw = num_8x8_blocks_wide_lookup[bsize];
1352 if ((*bh <= rows_left) && (*bw <= cols_left)) {
1360 static void set_partial_b64x64_partition(MODE_INFO *mi, int mis,
1361 int bh_in, int bw_in, int row8x8_remaining, int col8x8_remaining,
1362 BLOCK_SIZE bsize, MODE_INFO *mi_8x8) {
1365 for (r = 0; r < MI_BLOCK_SIZE; r += bh) {
1367 for (c = 0; c < MI_BLOCK_SIZE; c += bw) {
1368 const int index = r * mis + c;
1369 mi_8x8[index].src_mi = mi + index;
1370 mi_8x8[index].src_mi->mbmi.sb_type = find_partition_size(bsize,
1371 row8x8_remaining - r, col8x8_remaining - c, &bh, &bw);
1376 // This function attempts to set all mode info entries in a given SB64
1377 // to the same block partition size.
1378 // However, at the bottom and right borders of the image the requested size
1379 // may not be allowed in which case this code attempts to choose the largest
1380 // allowable partition.
1381 static void set_fixed_partitioning(VP9_COMP *cpi, const TileInfo *const tile,
1382 MODE_INFO *mi_8x8, int mi_row, int mi_col,
1384 VP9_COMMON *const cm = &cpi->common;
1385 const int mis = cm->mi_stride;
1386 const int row8x8_remaining = tile->mi_row_end - mi_row;
1387 const int col8x8_remaining = tile->mi_col_end - mi_col;
1388 int block_row, block_col;
1389 MODE_INFO *mi_upper_left = cm->mi + mi_row * mis + mi_col;
1390 int bh = num_8x8_blocks_high_lookup[bsize];
1391 int bw = num_8x8_blocks_wide_lookup[bsize];
1393 assert((row8x8_remaining > 0) && (col8x8_remaining > 0));
1395 // Apply the requested partition size to the SB64 if it is all "in image"
1396 if ((col8x8_remaining >= MI_BLOCK_SIZE) &&
1397 (row8x8_remaining >= MI_BLOCK_SIZE)) {
1398 for (block_row = 0; block_row < MI_BLOCK_SIZE; block_row += bh) {
1399 for (block_col = 0; block_col < MI_BLOCK_SIZE; block_col += bw) {
1400 int index = block_row * mis + block_col;
1401 mi_8x8[index].src_mi = mi_upper_left + index;
1402 mi_8x8[index].src_mi->mbmi.sb_type = bsize;
1406 // Else this is a partial SB64.
1407 set_partial_b64x64_partition(mi_upper_left, mis, bh, bw, row8x8_remaining,
1408 col8x8_remaining, bsize, mi_8x8);
1415 } coord_lookup[16] = {
1417 {0, 0}, {0, 2}, {2, 0}, {2, 2},
1419 {0, 4}, {0, 6}, {2, 4}, {2, 6},
1421 {4, 0}, {4, 2}, {6, 0}, {6, 2},
1423 {4, 4}, {4, 6}, {6, 4}, {6, 6},
1426 static void set_source_var_based_partition(VP9_COMP *cpi,
1427 const TileInfo *const tile,
1428 MACROBLOCK *const x,
1430 int mi_row, int mi_col) {
1431 VP9_COMMON *const cm = &cpi->common;
1432 const int mis = cm->mi_stride;
1433 const int row8x8_remaining = tile->mi_row_end - mi_row;
1434 const int col8x8_remaining = tile->mi_col_end - mi_col;
1435 MODE_INFO *mi_upper_left = cm->mi + mi_row * mis + mi_col;
1437 vp9_setup_src_planes(x, cpi->Source, mi_row, mi_col);
1439 assert((row8x8_remaining > 0) && (col8x8_remaining > 0));
1442 if ((col8x8_remaining >= MI_BLOCK_SIZE) &&
1443 (row8x8_remaining >= MI_BLOCK_SIZE)) {
1447 const int offset = (mi_row >> 1) * cm->mb_cols + (mi_col >> 1);
1448 int is_larger_better = 0;
1450 unsigned int thr = cpi->source_var_thresh;
1452 vpx_memset(d32, 0, 4 * sizeof(diff));
1454 for (i = 0; i < 4; i++) {
1457 for (j = 0; j < 4; j++) {
1458 int b_mi_row = coord_lookup[i * 4 + j].row;
1459 int b_mi_col = coord_lookup[i * 4 + j].col;
1460 int boffset = b_mi_row / 2 * cm->mb_cols +
1463 d16[j] = cpi->source_diff_var + offset + boffset;
1465 index = b_mi_row * mis + b_mi_col;
1466 mi_8x8[index].src_mi = mi_upper_left + index;
1467 mi_8x8[index].src_mi->mbmi.sb_type = BLOCK_16X16;
1469 // TODO(yunqingwang): If d16[j].var is very large, use 8x8 partition
1470 // size to further improve quality.
1473 is_larger_better = (d16[0]->var < thr) && (d16[1]->var < thr) &&
1474 (d16[2]->var < thr) && (d16[3]->var < thr);
1476 // Use 32x32 partition
1477 if (is_larger_better) {
1480 for (j = 0; j < 4; j++) {
1481 d32[i].sse += d16[j]->sse;
1482 d32[i].sum += d16[j]->sum;
1485 d32[i].var = d32[i].sse - (((int64_t)d32[i].sum * d32[i].sum) >> 10);
1487 index = coord_lookup[i*4].row * mis + coord_lookup[i*4].col;
1488 mi_8x8[index].src_mi = mi_upper_left + index;
1489 mi_8x8[index].src_mi->mbmi.sb_type = BLOCK_32X32;
1493 if (use32x32 == 4) {
1495 is_larger_better = (d32[0].var < thr) && (d32[1].var < thr) &&
1496 (d32[2].var < thr) && (d32[3].var < thr);
1498 // Use 64x64 partition
1499 if (is_larger_better) {
1500 mi_8x8[0].src_mi = mi_upper_left;
1501 mi_8x8[0].src_mi->mbmi.sb_type = BLOCK_64X64;
1504 } else { // partial in-image SB64
1505 int bh = num_8x8_blocks_high_lookup[BLOCK_16X16];
1506 int bw = num_8x8_blocks_wide_lookup[BLOCK_16X16];
1507 set_partial_b64x64_partition(mi_upper_left, mis, bh, bw,
1508 row8x8_remaining, col8x8_remaining, BLOCK_16X16, mi_8x8);
1512 static void update_state_rt(VP9_COMP *cpi, ThreadData *td,
1513 PICK_MODE_CONTEXT *ctx,
1514 int mi_row, int mi_col, int bsize) {
1515 VP9_COMMON *const cm = &cpi->common;
1516 MACROBLOCK *const x = &td->mb;
1517 MACROBLOCKD *const xd = &x->e_mbd;
1518 MODE_INFO *const mi = xd->mi[0].src_mi;
1519 MB_MODE_INFO *const mbmi = &xd->mi[0].src_mi->mbmi;
1520 const struct segmentation *const seg = &cm->seg;
1521 const int bw = num_8x8_blocks_wide_lookup[mi->mbmi.sb_type];
1522 const int bh = num_8x8_blocks_high_lookup[mi->mbmi.sb_type];
1523 const int x_mis = MIN(bw, cm->mi_cols - mi_col);
1524 const int y_mis = MIN(bh, cm->mi_rows - mi_row);
1526 xd->mi[0] = ctx->mic;
1527 xd->mi[0].src_mi = &xd->mi[0];
1529 if (seg->enabled && cpi->oxcf.aq_mode) {
1530 // For in frame complexity AQ or variance AQ, copy segment_id from
1531 // segmentation_map.
1532 if (cpi->oxcf.aq_mode == COMPLEXITY_AQ ||
1533 cpi->oxcf.aq_mode == VARIANCE_AQ ) {
1534 const uint8_t *const map = seg->update_map ? cpi->segmentation_map
1535 : cm->last_frame_seg_map;
1536 mbmi->segment_id = vp9_get_segment_id(cm, map, bsize, mi_row, mi_col);
1538 // Setting segmentation map for cyclic_refresh.
1539 vp9_cyclic_refresh_update_segment(cpi, mbmi, mi_row, mi_col, bsize,
1540 ctx->rate, ctx->dist);
1542 vp9_init_plane_quantizers(cpi, x);
1545 if (is_inter_block(mbmi)) {
1546 vp9_update_mv_count(td);
1547 if (cm->interp_filter == SWITCHABLE) {
1548 const int pred_ctx = vp9_get_pred_context_switchable_interp(xd);
1549 ++td->counts->switchable_interp[pred_ctx][mbmi->interp_filter];
1552 if (mbmi->sb_type < BLOCK_8X8) {
1553 mbmi->mv[0].as_int = mi->bmi[3].as_mv[0].as_int;
1554 mbmi->mv[1].as_int = mi->bmi[3].as_mv[1].as_int;
1558 if (cm->use_prev_frame_mvs) {
1559 MV_REF *const frame_mvs =
1560 cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col;
1563 for (h = 0; h < y_mis; ++h) {
1564 MV_REF *const frame_mv = frame_mvs + h * cm->mi_cols;
1565 for (w = 0; w < x_mis; ++w) {
1566 MV_REF *const mv = frame_mv + w;
1567 mv->ref_frame[0] = mi->src_mi->mbmi.ref_frame[0];
1568 mv->ref_frame[1] = mi->src_mi->mbmi.ref_frame[1];
1569 mv->mv[0].as_int = mi->src_mi->mbmi.mv[0].as_int;
1570 mv->mv[1].as_int = mi->src_mi->mbmi.mv[1].as_int;
1575 x->skip = ctx->skip;
1576 x->skip_txfm[0] = mbmi->segment_id ? 0 : ctx->skip_txfm[0];
1579 static void encode_b_rt(VP9_COMP *cpi, ThreadData *td,
1580 const TileInfo *const tile,
1581 TOKENEXTRA **tp, int mi_row, int mi_col,
1582 int output_enabled, BLOCK_SIZE bsize,
1583 PICK_MODE_CONTEXT *ctx) {
1584 MACROBLOCK *const x = &td->mb;
1585 set_offsets(cpi, tile, x, mi_row, mi_col, bsize);
1586 update_state_rt(cpi, td, ctx, mi_row, mi_col, bsize);
1588 #if CONFIG_VP9_TEMPORAL_DENOISING
1589 if (cpi->oxcf.noise_sensitivity > 0 && output_enabled) {
1590 vp9_denoiser_denoise(&cpi->denoiser, x, mi_row, mi_col,
1591 MAX(BLOCK_8X8, bsize), ctx);
1595 encode_superblock(cpi, td, tp, output_enabled, mi_row, mi_col, bsize, ctx);
1596 update_stats(&cpi->common, td);
1598 (*tp)->token = EOSB_TOKEN;
1602 static void encode_sb_rt(VP9_COMP *cpi, ThreadData *td,
1603 const TileInfo *const tile,
1604 TOKENEXTRA **tp, int mi_row, int mi_col,
1605 int output_enabled, BLOCK_SIZE bsize,
1607 VP9_COMMON *const cm = &cpi->common;
1608 MACROBLOCK *const x = &td->mb;
1609 MACROBLOCKD *const xd = &x->e_mbd;
1611 const int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4;
1613 PARTITION_TYPE partition;
1616 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
1619 if (bsize >= BLOCK_8X8) {
1620 const int idx_str = xd->mi_stride * mi_row + mi_col;
1621 MODE_INFO *mi_8x8 = cm->mi[idx_str].src_mi;
1622 ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
1623 subsize = mi_8x8[0].src_mi->mbmi.sb_type;
1626 subsize = BLOCK_4X4;
1629 partition = partition_lookup[bsl][subsize];
1630 if (output_enabled && bsize != BLOCK_4X4)
1631 td->counts->partition[ctx][partition]++;
1633 switch (partition) {
1634 case PARTITION_NONE:
1635 encode_b_rt(cpi, td, tile, tp, mi_row, mi_col, output_enabled, subsize,
1638 case PARTITION_VERT:
1639 encode_b_rt(cpi, td, tile, tp, mi_row, mi_col, output_enabled, subsize,
1640 &pc_tree->vertical[0]);
1641 if (mi_col + hbs < cm->mi_cols && bsize > BLOCK_8X8) {
1642 encode_b_rt(cpi, td, tile, tp, mi_row, mi_col + hbs, output_enabled,
1643 subsize, &pc_tree->vertical[1]);
1646 case PARTITION_HORZ:
1647 encode_b_rt(cpi, td, tile, tp, mi_row, mi_col, output_enabled, subsize,
1648 &pc_tree->horizontal[0]);
1649 if (mi_row + hbs < cm->mi_rows && bsize > BLOCK_8X8) {
1650 encode_b_rt(cpi, td, tile, tp, mi_row + hbs, mi_col, output_enabled,
1651 subsize, &pc_tree->horizontal[1]);
1654 case PARTITION_SPLIT:
1655 subsize = get_subsize(bsize, PARTITION_SPLIT);
1656 encode_sb_rt(cpi, td, tile, tp, mi_row, mi_col, output_enabled, subsize,
1658 encode_sb_rt(cpi, td, tile, tp, mi_row, mi_col + hbs, output_enabled,
1659 subsize, pc_tree->split[1]);
1660 encode_sb_rt(cpi, td, tile, tp, mi_row + hbs, mi_col, output_enabled,
1661 subsize, pc_tree->split[2]);
1662 encode_sb_rt(cpi, td, tile, tp, mi_row + hbs, mi_col + hbs,
1663 output_enabled, subsize, pc_tree->split[3]);
1666 assert(0 && "Invalid partition type.");
1670 if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8)
1671 update_partition_context(xd, mi_row, mi_col, subsize, bsize);
1674 static void rd_use_partition(VP9_COMP *cpi,
1676 TileDataEnc *tile_data,
1677 MODE_INFO *mi_8x8, TOKENEXTRA **tp,
1678 int mi_row, int mi_col,
1680 int *rate, int64_t *dist,
1681 int do_recon, PC_TREE *pc_tree) {
1682 VP9_COMMON *const cm = &cpi->common;
1683 TileInfo *const tile_info = &tile_data->tile_info;
1684 MACROBLOCK *const x = &td->mb;
1685 MACROBLOCKD *const xd = &x->e_mbd;
1686 const int mis = cm->mi_stride;
1687 const int bsl = b_width_log2_lookup[bsize];
1688 const int mi_step = num_4x4_blocks_wide_lookup[bsize] / 2;
1689 const int bss = (1 << bsl) / 4;
1691 PARTITION_TYPE partition = PARTITION_NONE;
1693 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
1694 PARTITION_CONTEXT sl[8], sa[8];
1695 RD_COST last_part_rdc, none_rdc, chosen_rdc;
1696 BLOCK_SIZE sub_subsize = BLOCK_4X4;
1697 int splits_below = 0;
1698 BLOCK_SIZE bs_type = mi_8x8[0].src_mi->mbmi.sb_type;
1699 int do_partition_search = 1;
1700 PICK_MODE_CONTEXT *ctx = &pc_tree->none;
1702 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
1705 assert(num_4x4_blocks_wide_lookup[bsize] ==
1706 num_4x4_blocks_high_lookup[bsize]);
1708 vp9_rd_cost_reset(&last_part_rdc);
1709 vp9_rd_cost_reset(&none_rdc);
1710 vp9_rd_cost_reset(&chosen_rdc);
1712 partition = partition_lookup[bsl][bs_type];
1713 subsize = get_subsize(bsize, partition);
1715 pc_tree->partitioning = partition;
1716 save_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
1718 if (bsize == BLOCK_16X16 && cpi->oxcf.aq_mode) {
1719 set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
1720 x->mb_energy = vp9_block_energy(cpi, x, bsize);
1723 if (do_partition_search &&
1724 cpi->sf.partition_search_type == SEARCH_PARTITION &&
1725 cpi->sf.adjust_partitioning_from_last_frame) {
1726 // Check if any of the sub blocks are further split.
1727 if (partition == PARTITION_SPLIT && subsize > BLOCK_8X8) {
1728 sub_subsize = get_subsize(subsize, PARTITION_SPLIT);
1730 for (i = 0; i < 4; i++) {
1731 int jj = i >> 1, ii = i & 0x01;
1732 MODE_INFO *this_mi = mi_8x8[jj * bss * mis + ii * bss].src_mi;
1733 if (this_mi && this_mi->mbmi.sb_type >= sub_subsize) {
1739 // If partition is not none try none unless each of the 4 splits are split
1741 if (partition != PARTITION_NONE && !splits_below &&
1742 mi_row + (mi_step >> 1) < cm->mi_rows &&
1743 mi_col + (mi_step >> 1) < cm->mi_cols) {
1744 pc_tree->partitioning = PARTITION_NONE;
1745 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &none_rdc, bsize,
1748 pl = partition_plane_context(xd, mi_row, mi_col, bsize);
1750 if (none_rdc.rate < INT_MAX) {
1751 none_rdc.rate += cpi->partition_cost[pl][PARTITION_NONE];
1752 none_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, none_rdc.rate,
1756 restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
1757 mi_8x8[0].src_mi->mbmi.sb_type = bs_type;
1758 pc_tree->partitioning = partition;
1762 switch (partition) {
1763 case PARTITION_NONE:
1764 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc,
1765 bsize, ctx, INT64_MAX);
1767 case PARTITION_HORZ:
1768 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc,
1769 subsize, &pc_tree->horizontal[0],
1771 if (last_part_rdc.rate != INT_MAX &&
1772 bsize >= BLOCK_8X8 && mi_row + (mi_step >> 1) < cm->mi_rows) {
1774 PICK_MODE_CONTEXT *ctx = &pc_tree->horizontal[0];
1775 vp9_rd_cost_init(&tmp_rdc);
1776 update_state(cpi, td, ctx, mi_row, mi_col, subsize, 0);
1777 encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize, ctx);
1778 rd_pick_sb_modes(cpi, tile_data, x,
1779 mi_row + (mi_step >> 1), mi_col, &tmp_rdc,
1780 subsize, &pc_tree->horizontal[1], INT64_MAX);
1781 if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
1782 vp9_rd_cost_reset(&last_part_rdc);
1785 last_part_rdc.rate += tmp_rdc.rate;
1786 last_part_rdc.dist += tmp_rdc.dist;
1787 last_part_rdc.rdcost += tmp_rdc.rdcost;
1790 case PARTITION_VERT:
1791 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc,
1792 subsize, &pc_tree->vertical[0], INT64_MAX);
1793 if (last_part_rdc.rate != INT_MAX &&
1794 bsize >= BLOCK_8X8 && mi_col + (mi_step >> 1) < cm->mi_cols) {
1796 PICK_MODE_CONTEXT *ctx = &pc_tree->vertical[0];
1797 vp9_rd_cost_init(&tmp_rdc);
1798 update_state(cpi, td, ctx, mi_row, mi_col, subsize, 0);
1799 encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize, ctx);
1800 rd_pick_sb_modes(cpi, tile_data, x,
1801 mi_row, mi_col + (mi_step >> 1), &tmp_rdc,
1802 subsize, &pc_tree->vertical[bsize > BLOCK_8X8],
1804 if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
1805 vp9_rd_cost_reset(&last_part_rdc);
1808 last_part_rdc.rate += tmp_rdc.rate;
1809 last_part_rdc.dist += tmp_rdc.dist;
1810 last_part_rdc.rdcost += tmp_rdc.rdcost;
1813 case PARTITION_SPLIT:
1814 if (bsize == BLOCK_8X8) {
1815 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc,
1816 subsize, pc_tree->leaf_split[0], INT64_MAX);
1819 last_part_rdc.rate = 0;
1820 last_part_rdc.dist = 0;
1821 last_part_rdc.rdcost = 0;
1822 for (i = 0; i < 4; i++) {
1823 int x_idx = (i & 1) * (mi_step >> 1);
1824 int y_idx = (i >> 1) * (mi_step >> 1);
1825 int jj = i >> 1, ii = i & 0x01;
1827 if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols))
1830 vp9_rd_cost_init(&tmp_rdc);
1831 rd_use_partition(cpi, td, tile_data,
1832 mi_8x8 + jj * bss * mis + ii * bss, tp,
1833 mi_row + y_idx, mi_col + x_idx, subsize,
1834 &tmp_rdc.rate, &tmp_rdc.dist,
1835 i != 3, pc_tree->split[i]);
1836 if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
1837 vp9_rd_cost_reset(&last_part_rdc);
1840 last_part_rdc.rate += tmp_rdc.rate;
1841 last_part_rdc.dist += tmp_rdc.dist;
1849 pl = partition_plane_context(xd, mi_row, mi_col, bsize);
1850 if (last_part_rdc.rate < INT_MAX) {
1851 last_part_rdc.rate += cpi->partition_cost[pl][partition];
1852 last_part_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
1853 last_part_rdc.rate, last_part_rdc.dist);
1856 if (do_partition_search
1857 && cpi->sf.adjust_partitioning_from_last_frame
1858 && cpi->sf.partition_search_type == SEARCH_PARTITION
1859 && partition != PARTITION_SPLIT && bsize > BLOCK_8X8
1860 && (mi_row + mi_step < cm->mi_rows ||
1861 mi_row + (mi_step >> 1) == cm->mi_rows)
1862 && (mi_col + mi_step < cm->mi_cols ||
1863 mi_col + (mi_step >> 1) == cm->mi_cols)) {
1864 BLOCK_SIZE split_subsize = get_subsize(bsize, PARTITION_SPLIT);
1865 chosen_rdc.rate = 0;
1866 chosen_rdc.dist = 0;
1867 restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
1868 pc_tree->partitioning = PARTITION_SPLIT;
1871 for (i = 0; i < 4; i++) {
1872 int x_idx = (i & 1) * (mi_step >> 1);
1873 int y_idx = (i >> 1) * (mi_step >> 1);
1875 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
1876 PARTITION_CONTEXT sl[8], sa[8];
1878 if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols))
1881 save_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
1882 pc_tree->split[i]->partitioning = PARTITION_NONE;
1883 rd_pick_sb_modes(cpi, tile_data, x,
1884 mi_row + y_idx, mi_col + x_idx, &tmp_rdc,
1885 split_subsize, &pc_tree->split[i]->none, INT64_MAX);
1887 restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
1889 if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
1890 vp9_rd_cost_reset(&chosen_rdc);
1894 chosen_rdc.rate += tmp_rdc.rate;
1895 chosen_rdc.dist += tmp_rdc.dist;
1898 encode_sb(cpi, td, tile_info, tp, mi_row + y_idx, mi_col + x_idx, 0,
1899 split_subsize, pc_tree->split[i]);
1901 pl = partition_plane_context(xd, mi_row + y_idx, mi_col + x_idx,
1903 chosen_rdc.rate += cpi->partition_cost[pl][PARTITION_NONE];
1905 pl = partition_plane_context(xd, mi_row, mi_col, bsize);
1906 if (chosen_rdc.rate < INT_MAX) {
1907 chosen_rdc.rate += cpi->partition_cost[pl][PARTITION_SPLIT];
1908 chosen_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
1909 chosen_rdc.rate, chosen_rdc.dist);
1913 // If last_part is better set the partitioning to that.
1914 if (last_part_rdc.rdcost < chosen_rdc.rdcost) {
1915 mi_8x8[0].src_mi->mbmi.sb_type = bsize;
1916 if (bsize >= BLOCK_8X8)
1917 pc_tree->partitioning = partition;
1918 chosen_rdc = last_part_rdc;
1920 // If none was better set the partitioning to that.
1921 if (none_rdc.rdcost < chosen_rdc.rdcost) {
1922 if (bsize >= BLOCK_8X8)
1923 pc_tree->partitioning = PARTITION_NONE;
1924 chosen_rdc = none_rdc;
1927 restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
1929 // We must have chosen a partitioning and encoding or we'll fail later on.
1930 // No other opportunities for success.
1931 if (bsize == BLOCK_64X64)
1932 assert(chosen_rdc.rate < INT_MAX && chosen_rdc.dist < INT64_MAX);
1935 int output_enabled = (bsize == BLOCK_64X64);
1936 encode_sb(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled, bsize,
1940 *rate = chosen_rdc.rate;
1941 *dist = chosen_rdc.dist;
1944 static const BLOCK_SIZE min_partition_size[BLOCK_SIZES] = {
1945 BLOCK_4X4, BLOCK_4X4, BLOCK_4X4,
1946 BLOCK_4X4, BLOCK_4X4, BLOCK_4X4,
1947 BLOCK_8X8, BLOCK_8X8, BLOCK_8X8,
1948 BLOCK_16X16, BLOCK_16X16, BLOCK_16X16,
1952 static const BLOCK_SIZE max_partition_size[BLOCK_SIZES] = {
1953 BLOCK_8X8, BLOCK_16X16, BLOCK_16X16,
1954 BLOCK_16X16, BLOCK_32X32, BLOCK_32X32,
1955 BLOCK_32X32, BLOCK_64X64, BLOCK_64X64,
1956 BLOCK_64X64, BLOCK_64X64, BLOCK_64X64,
1960 // Look at all the mode_info entries for blocks that are part of this
1961 // partition and find the min and max values for sb_type.
1962 // At the moment this is designed to work on a 64x64 SB but could be
1963 // adjusted to use a size parameter.
1965 // The min and max are assumed to have been initialized prior to calling this
1966 // function so repeat calls can accumulate a min and max of more than one sb64.
1967 static void get_sb_partition_size_range(MACROBLOCKD *xd, MODE_INFO *mi_8x8,
1968 BLOCK_SIZE *min_block_size,
1969 BLOCK_SIZE *max_block_size,
1970 int bs_hist[BLOCK_SIZES]) {
1971 int sb_width_in_blocks = MI_BLOCK_SIZE;
1972 int sb_height_in_blocks = MI_BLOCK_SIZE;
1976 // Check the sb_type for each block that belongs to this region.
1977 for (i = 0; i < sb_height_in_blocks; ++i) {
1978 for (j = 0; j < sb_width_in_blocks; ++j) {
1979 MODE_INFO *mi = mi_8x8[index+j].src_mi;
1980 BLOCK_SIZE sb_type = mi ? mi->mbmi.sb_type : 0;
1982 *min_block_size = MIN(*min_block_size, sb_type);
1983 *max_block_size = MAX(*max_block_size, sb_type);
1985 index += xd->mi_stride;
1989 // Next square block size less or equal than current block size.
1990 static const BLOCK_SIZE next_square_size[BLOCK_SIZES] = {
1991 BLOCK_4X4, BLOCK_4X4, BLOCK_4X4,
1992 BLOCK_8X8, BLOCK_8X8, BLOCK_8X8,
1993 BLOCK_16X16, BLOCK_16X16, BLOCK_16X16,
1994 BLOCK_32X32, BLOCK_32X32, BLOCK_32X32,
1998 // Look at neighboring blocks and set a min and max partition size based on
2000 static void rd_auto_partition_range(VP9_COMP *cpi, const TileInfo *const tile,
2001 MACROBLOCKD *const xd,
2002 int mi_row, int mi_col,
2003 BLOCK_SIZE *min_block_size,
2004 BLOCK_SIZE *max_block_size) {
2005 VP9_COMMON *const cm = &cpi->common;
2006 MODE_INFO *mi = xd->mi[0].src_mi;
2007 const int left_in_image = xd->left_available && mi[-1].src_mi;
2008 const int above_in_image = xd->up_available && mi[-xd->mi_stride].src_mi;
2009 const int row8x8_remaining = tile->mi_row_end - mi_row;
2010 const int col8x8_remaining = tile->mi_col_end - mi_col;
2012 BLOCK_SIZE min_size = BLOCK_4X4;
2013 BLOCK_SIZE max_size = BLOCK_64X64;
2015 int bs_hist[BLOCK_SIZES] = {0};
2017 // Trap case where we do not have a prediction.
2018 if (left_in_image || above_in_image || cm->frame_type != KEY_FRAME) {
2019 // Default "min to max" and "max to min"
2020 min_size = BLOCK_64X64;
2021 max_size = BLOCK_4X4;
2023 // NOTE: each call to get_sb_partition_size_range() uses the previous
2024 // passed in values for min and max as a starting point.
2025 // Find the min and max partition used in previous frame at this location
2026 if (cm->frame_type != KEY_FRAME) {
2027 MODE_INFO *prev_mi =
2028 cm->prev_mip + cm->mi_stride + 1 + mi_row * xd->mi_stride + mi_col;
2030 get_sb_partition_size_range(xd, prev_mi, &min_size, &max_size, bs_hist);
2032 // Find the min and max partition sizes used in the left SB64
2033 if (left_in_image) {
2034 MODE_INFO *left_sb64_mi = mi[-MI_BLOCK_SIZE].src_mi;
2035 get_sb_partition_size_range(xd, left_sb64_mi, &min_size, &max_size,
2038 // Find the min and max partition sizes used in the above SB64.
2039 if (above_in_image) {
2040 MODE_INFO *above_sb64_mi = mi[-xd->mi_stride * MI_BLOCK_SIZE].src_mi;
2041 get_sb_partition_size_range(xd, above_sb64_mi, &min_size, &max_size,
2045 // adjust observed min and max
2046 if (cpi->sf.auto_min_max_partition_size == RELAXED_NEIGHBORING_MIN_MAX) {
2047 min_size = min_partition_size[min_size];
2048 max_size = max_partition_size[max_size];
2049 } else if (cpi->sf.auto_min_max_partition_size ==
2050 CONSTRAIN_NEIGHBORING_MIN_MAX) {
2051 // adjust the search range based on the histogram of the observed
2052 // partition sizes from left, above the previous co-located blocks
2054 int first_moment = 0;
2055 int second_moment = 0;
2056 int var_unnormalized = 0;
2058 for (i = 0; i < BLOCK_SIZES; i++) {
2060 first_moment += bs_hist[i] * i;
2061 second_moment += bs_hist[i] * i * i;
2064 // if variance is small enough,
2065 // adjust the range around its mean size, which gives a tighter range
2066 var_unnormalized = second_moment - first_moment * first_moment / sum;
2067 if (var_unnormalized <= 4 * sum) {
2068 int mean = first_moment / sum;
2069 min_size = min_partition_size[mean];
2070 max_size = max_partition_size[mean];
2072 min_size = min_partition_size[min_size];
2073 max_size = max_partition_size[max_size];
2078 // Check border cases where max and min from neighbors may not be legal.
2079 max_size = find_partition_size(max_size,
2080 row8x8_remaining, col8x8_remaining,
2082 min_size = MIN(min_size, max_size);
2084 // When use_square_partition_only is true, make sure at least one square
2085 // partition is allowed by selecting the next smaller square size as
2087 if (cpi->sf.use_square_partition_only &&
2088 next_square_size[max_size] < min_size) {
2089 min_size = next_square_size[max_size];
2092 *min_block_size = min_size;
2093 *max_block_size = max_size;
2096 static void auto_partition_range(VP9_COMP *cpi, const TileInfo *const tile,
2097 MACROBLOCKD *const xd,
2098 int mi_row, int mi_col,
2099 BLOCK_SIZE *min_block_size,
2100 BLOCK_SIZE *max_block_size) {
2101 VP9_COMMON *const cm = &cpi->common;
2102 MODE_INFO *mi_8x8 = xd->mi;
2103 const int left_in_image = xd->left_available && mi_8x8[-1].src_mi;
2104 const int above_in_image = xd->up_available &&
2105 mi_8x8[-xd->mi_stride].src_mi;
2106 int row8x8_remaining = tile->mi_row_end - mi_row;
2107 int col8x8_remaining = tile->mi_col_end - mi_col;
2109 BLOCK_SIZE min_size = BLOCK_32X32;
2110 BLOCK_SIZE max_size = BLOCK_8X8;
2111 int bsl = mi_width_log2_lookup[BLOCK_64X64];
2112 const int search_range_ctrl = (((mi_row + mi_col) >> bsl) +
2113 get_chessboard_index(cm->current_video_frame)) & 0x1;
2114 // Trap case where we do not have a prediction.
2115 if (search_range_ctrl &&
2116 (left_in_image || above_in_image || cm->frame_type != KEY_FRAME)) {
2121 // Find the min and max partition sizes used in the left SB64.
2122 if (left_in_image) {
2124 mi = mi_8x8[-1].src_mi;
2125 for (block = 0; block < MI_BLOCK_SIZE; ++block) {
2126 cur_mi = mi[block * xd->mi_stride].src_mi;
2127 sb_type = cur_mi ? cur_mi->mbmi.sb_type : 0;
2128 min_size = MIN(min_size, sb_type);
2129 max_size = MAX(max_size, sb_type);
2132 // Find the min and max partition sizes used in the above SB64.
2133 if (above_in_image) {
2134 mi = mi_8x8[-xd->mi_stride * MI_BLOCK_SIZE].src_mi;
2135 for (block = 0; block < MI_BLOCK_SIZE; ++block) {
2136 sb_type = mi[block].src_mi ? mi[block].src_mi->mbmi.sb_type : 0;
2137 min_size = MIN(min_size, sb_type);
2138 max_size = MAX(max_size, sb_type);
2142 min_size = min_partition_size[min_size];
2143 max_size = find_partition_size(max_size, row8x8_remaining, col8x8_remaining,
2145 min_size = MIN(min_size, max_size);
2146 min_size = MAX(min_size, BLOCK_8X8);
2147 max_size = MIN(max_size, BLOCK_32X32);
2149 min_size = BLOCK_8X8;
2150 max_size = BLOCK_32X32;
2153 *min_block_size = min_size;
2154 *max_block_size = max_size;
2157 // TODO(jingning) refactor functions setting partition search range
2158 static void set_partition_range(VP9_COMMON *cm, MACROBLOCKD *xd,
2159 int mi_row, int mi_col, BLOCK_SIZE bsize,
2160 BLOCK_SIZE *min_bs, BLOCK_SIZE *max_bs) {
2161 int mi_width = num_8x8_blocks_wide_lookup[bsize];
2162 int mi_height = num_8x8_blocks_high_lookup[bsize];
2166 const int idx_str = cm->mi_stride * mi_row + mi_col;
2167 MODE_INFO *prev_mi = (cm->prev_mip + cm->mi_stride + 1 + idx_str)->src_mi;
2170 BLOCK_SIZE bs, min_size, max_size;
2172 min_size = BLOCK_64X64;
2173 max_size = BLOCK_4X4;
2176 for (idy = 0; idy < mi_height; ++idy) {
2177 for (idx = 0; idx < mi_width; ++idx) {
2178 mi = prev_mi[idy * cm->mi_stride + idx].src_mi;
2179 bs = mi ? mi->mbmi.sb_type : bsize;
2180 min_size = MIN(min_size, bs);
2181 max_size = MAX(max_size, bs);
2186 if (xd->left_available) {
2187 for (idy = 0; idy < mi_height; ++idy) {
2188 mi = xd->mi[idy * cm->mi_stride - 1].src_mi;
2189 bs = mi ? mi->mbmi.sb_type : bsize;
2190 min_size = MIN(min_size, bs);
2191 max_size = MAX(max_size, bs);
2195 if (xd->up_available) {
2196 for (idx = 0; idx < mi_width; ++idx) {
2197 mi = xd->mi[idx - cm->mi_stride].src_mi;
2198 bs = mi ? mi->mbmi.sb_type : bsize;
2199 min_size = MIN(min_size, bs);
2200 max_size = MAX(max_size, bs);
2204 if (min_size == max_size) {
2205 min_size = min_partition_size[min_size];
2206 max_size = max_partition_size[max_size];
2213 static INLINE void store_pred_mv(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) {
2214 vpx_memcpy(ctx->pred_mv, x->pred_mv, sizeof(x->pred_mv));
2217 static INLINE void load_pred_mv(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) {
2218 vpx_memcpy(x->pred_mv, ctx->pred_mv, sizeof(x->pred_mv));
2221 #if CONFIG_FP_MB_STATS
2222 const int num_16x16_blocks_wide_lookup[BLOCK_SIZES] =
2223 {1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 4, 4};
2224 const int num_16x16_blocks_high_lookup[BLOCK_SIZES] =
2225 {1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 4, 2, 4};
2226 const int qindex_skip_threshold_lookup[BLOCK_SIZES] =
2227 {0, 10, 10, 30, 40, 40, 60, 80, 80, 90, 100, 100, 120};
2228 const int qindex_split_threshold_lookup[BLOCK_SIZES] =
2229 {0, 3, 3, 7, 15, 15, 30, 40, 40, 60, 80, 80, 120};
2230 const int complexity_16x16_blocks_threshold[BLOCK_SIZES] =
2231 {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 4, 4, 6};
2242 static INLINE MOTION_DIRECTION get_motion_direction_fp(uint8_t fp_byte) {
2243 if (fp_byte & FPMB_MOTION_ZERO_MASK) {
2245 } else if (fp_byte & FPMB_MOTION_LEFT_MASK) {
2247 } else if (fp_byte & FPMB_MOTION_RIGHT_MASK) {
2249 } else if (fp_byte & FPMB_MOTION_UP_MASK) {
2256 static INLINE int get_motion_inconsistency(MOTION_DIRECTION this_mv,
2257 MOTION_DIRECTION that_mv) {
2258 if (this_mv == that_mv) {
2261 return abs(this_mv - that_mv) == 2 ? 2 : 1;
2266 // TODO(jingning,jimbankoski,rbultje): properly skip partition types that are
2267 // unlikely to be selected depending on previous rate-distortion optimization
2268 // results, for encoding speed-up.
2269 static void rd_pick_partition(VP9_COMP *cpi, ThreadData *td,
2270 TileDataEnc *tile_data,
2271 TOKENEXTRA **tp, int mi_row, int mi_col,
2272 BLOCK_SIZE bsize, RD_COST *rd_cost,
2273 int64_t best_rd, PC_TREE *pc_tree) {
2274 VP9_COMMON *const cm = &cpi->common;
2275 TileInfo *const tile_info = &tile_data->tile_info;
2276 MACROBLOCK *const x = &td->mb;
2277 MACROBLOCKD *const xd = &x->e_mbd;
2278 const int mi_step = num_8x8_blocks_wide_lookup[bsize] / 2;
2279 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
2280 PARTITION_CONTEXT sl[8], sa[8];
2281 TOKENEXTRA *tp_orig = *tp;
2282 PICK_MODE_CONTEXT *ctx = &pc_tree->none;
2285 RD_COST this_rdc, sum_rdc, best_rdc;
2286 int do_split = bsize >= BLOCK_8X8;
2289 // Override skipping rectangular partition operations for edge blocks
2290 const int force_horz_split = (mi_row + mi_step >= cm->mi_rows);
2291 const int force_vert_split = (mi_col + mi_step >= cm->mi_cols);
2292 const int xss = x->e_mbd.plane[1].subsampling_x;
2293 const int yss = x->e_mbd.plane[1].subsampling_y;
2295 BLOCK_SIZE min_size = x->min_partition_size;
2296 BLOCK_SIZE max_size = x->max_partition_size;
2298 #if CONFIG_FP_MB_STATS
2299 unsigned int src_diff_var = UINT_MAX;
2300 int none_complexity = 0;
2303 int partition_none_allowed = !force_horz_split && !force_vert_split;
2304 int partition_horz_allowed = !force_vert_split && yss <= xss &&
2306 int partition_vert_allowed = !force_horz_split && xss <= yss &&
2310 assert(num_8x8_blocks_wide_lookup[bsize] ==
2311 num_8x8_blocks_high_lookup[bsize]);
2313 vp9_rd_cost_init(&this_rdc);
2314 vp9_rd_cost_init(&sum_rdc);
2315 vp9_rd_cost_reset(&best_rdc);
2316 best_rdc.rdcost = best_rd;
2318 set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
2320 if (bsize == BLOCK_16X16 && cpi->oxcf.aq_mode)
2321 x->mb_energy = vp9_block_energy(cpi, x, bsize);
2323 if (cpi->sf.cb_partition_search && bsize == BLOCK_16X16) {
2324 int cb_partition_search_ctrl = ((pc_tree->index == 0 || pc_tree->index == 3)
2325 + get_chessboard_index(cm->current_video_frame)) & 0x1;
2327 if (cb_partition_search_ctrl && bsize > min_size && bsize < max_size)
2328 set_partition_range(cm, xd, mi_row, mi_col, bsize, &min_size, &max_size);
2331 // Determine partition types in search according to the speed features.
2332 // The threshold set here has to be of square block size.
2333 if (cpi->sf.auto_min_max_partition_size) {
2334 partition_none_allowed &= (bsize <= max_size && bsize >= min_size);
2335 partition_horz_allowed &= ((bsize <= max_size && bsize > min_size) ||
2337 partition_vert_allowed &= ((bsize <= max_size && bsize > min_size) ||
2339 do_split &= bsize > min_size;
2341 if (cpi->sf.use_square_partition_only) {
2342 partition_horz_allowed &= force_horz_split;
2343 partition_vert_allowed &= force_vert_split;
2346 save_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
2348 #if CONFIG_FP_MB_STATS
2349 if (cpi->use_fp_mb_stats) {
2350 set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
2351 src_diff_var = get_sby_perpixel_diff_variance(cpi, &x->plane[0].src,
2352 mi_row, mi_col, bsize);
2356 #if CONFIG_FP_MB_STATS
2357 // Decide whether we shall split directly and skip searching NONE by using
2358 // the first pass block statistics
2359 if (cpi->use_fp_mb_stats && bsize >= BLOCK_32X32 && do_split &&
2360 partition_none_allowed && src_diff_var > 4 &&
2361 cm->base_qindex < qindex_split_threshold_lookup[bsize]) {
2362 int mb_row = mi_row >> 1;
2363 int mb_col = mi_col >> 1;
2365 MIN(mb_row + num_16x16_blocks_high_lookup[bsize], cm->mb_rows);
2367 MIN(mb_col + num_16x16_blocks_wide_lookup[bsize], cm->mb_cols);
2370 // compute a complexity measure, basically measure inconsistency of motion
2371 // vectors obtained from the first pass in the current block
2372 for (r = mb_row; r < mb_row_end ; r++) {
2373 for (c = mb_col; c < mb_col_end; c++) {
2374 const int mb_index = r * cm->mb_cols + c;
2376 MOTION_DIRECTION this_mv;
2377 MOTION_DIRECTION right_mv;
2378 MOTION_DIRECTION bottom_mv;
2381 get_motion_direction_fp(cpi->twopass.this_frame_mb_stats[mb_index]);
2384 if (c != mb_col_end - 1) {
2385 right_mv = get_motion_direction_fp(
2386 cpi->twopass.this_frame_mb_stats[mb_index + 1]);
2387 none_complexity += get_motion_inconsistency(this_mv, right_mv);
2391 if (r != mb_row_end - 1) {
2392 bottom_mv = get_motion_direction_fp(
2393 cpi->twopass.this_frame_mb_stats[mb_index + cm->mb_cols]);
2394 none_complexity += get_motion_inconsistency(this_mv, bottom_mv);
2397 // do not count its left and top neighbors to avoid double counting
2401 if (none_complexity > complexity_16x16_blocks_threshold[bsize]) {
2402 partition_none_allowed = 0;
2408 if (partition_none_allowed) {
2409 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col,
2410 &this_rdc, bsize, ctx, best_rdc.rdcost);
2411 if (this_rdc.rate != INT_MAX) {
2412 if (bsize >= BLOCK_8X8) {
2413 pl = partition_plane_context(xd, mi_row, mi_col, bsize);
2414 this_rdc.rate += cpi->partition_cost[pl][PARTITION_NONE];
2415 this_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
2416 this_rdc.rate, this_rdc.dist);
2419 if (this_rdc.rdcost < best_rdc.rdcost) {
2420 int64_t dist_breakout_thr = cpi->sf.partition_search_breakout_dist_thr;
2421 int rate_breakout_thr = cpi->sf.partition_search_breakout_rate_thr;
2423 best_rdc = this_rdc;
2424 if (bsize >= BLOCK_8X8)
2425 pc_tree->partitioning = PARTITION_NONE;
2427 // Adjust dist breakout threshold according to the partition size.
2428 dist_breakout_thr >>= 8 - (b_width_log2_lookup[bsize] +
2429 b_height_log2_lookup[bsize]);
2431 rate_breakout_thr *= num_pels_log2_lookup[bsize];
2433 // If all y, u, v transform blocks in this partition are skippable, and
2434 // the dist & rate are within the thresholds, the partition search is
2435 // terminated for current branch of the partition search tree.
2436 // The dist & rate thresholds are set to 0 at speed 0 to disable the
2437 // early termination at that speed.
2438 if (!x->e_mbd.lossless &&
2439 (ctx->skippable && best_rdc.dist < dist_breakout_thr &&
2440 best_rdc.rate < rate_breakout_thr)) {
2445 #if CONFIG_FP_MB_STATS
2446 // Check if every 16x16 first pass block statistics has zero
2447 // motion and the corresponding first pass residue is small enough.
2448 // If that is the case, check the difference variance between the
2449 // current frame and the last frame. If the variance is small enough,
2450 // stop further splitting in RD optimization
2451 if (cpi->use_fp_mb_stats && do_split != 0 &&
2452 cm->base_qindex > qindex_skip_threshold_lookup[bsize]) {
2453 int mb_row = mi_row >> 1;
2454 int mb_col = mi_col >> 1;
2456 MIN(mb_row + num_16x16_blocks_high_lookup[bsize], cm->mb_rows);
2458 MIN(mb_col + num_16x16_blocks_wide_lookup[bsize], cm->mb_cols);
2462 for (r = mb_row; r < mb_row_end; r++) {
2463 for (c = mb_col; c < mb_col_end; c++) {
2464 const int mb_index = r * cm->mb_cols + c;
2465 if (!(cpi->twopass.this_frame_mb_stats[mb_index] &
2466 FPMB_MOTION_ZERO_MASK) ||
2467 !(cpi->twopass.this_frame_mb_stats[mb_index] &
2468 FPMB_ERROR_SMALL_MASK)) {
2478 if (src_diff_var == UINT_MAX) {
2479 set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
2480 src_diff_var = get_sby_perpixel_diff_variance(
2481 cpi, &x->plane[0].src, mi_row, mi_col, bsize);
2483 if (src_diff_var < 8) {
2492 restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
2495 // store estimated motion vector
2496 if (cpi->sf.adaptive_motion_search)
2497 store_pred_mv(x, ctx);
2500 // TODO(jingning): use the motion vectors given by the above search as
2501 // the starting point of motion search in the following partition type check.
2503 subsize = get_subsize(bsize, PARTITION_SPLIT);
2504 if (bsize == BLOCK_8X8) {
2506 if (cpi->sf.adaptive_pred_interp_filter && partition_none_allowed)
2507 pc_tree->leaf_split[0]->pred_interp_filter =
2508 ctx->mic.mbmi.interp_filter;
2509 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize,
2510 pc_tree->leaf_split[0], best_rdc.rdcost);
2511 if (sum_rdc.rate == INT_MAX)
2512 sum_rdc.rdcost = INT64_MAX;
2514 for (i = 0; i < 4 && sum_rdc.rdcost < best_rdc.rdcost; ++i) {
2515 const int x_idx = (i & 1) * mi_step;
2516 const int y_idx = (i >> 1) * mi_step;
2518 if (mi_row + y_idx >= cm->mi_rows || mi_col + x_idx >= cm->mi_cols)
2521 if (cpi->sf.adaptive_motion_search)
2522 load_pred_mv(x, ctx);
2524 pc_tree->split[i]->index = i;
2525 rd_pick_partition(cpi, td, tile_data, tp,
2526 mi_row + y_idx, mi_col + x_idx,
2528 best_rdc.rdcost - sum_rdc.rdcost, pc_tree->split[i]);
2530 if (this_rdc.rate == INT_MAX) {
2531 sum_rdc.rdcost = INT64_MAX;
2534 sum_rdc.rate += this_rdc.rate;
2535 sum_rdc.dist += this_rdc.dist;
2536 sum_rdc.rdcost += this_rdc.rdcost;
2541 if (sum_rdc.rdcost < best_rdc.rdcost && i == 4) {
2542 pl = partition_plane_context(xd, mi_row, mi_col, bsize);
2543 sum_rdc.rate += cpi->partition_cost[pl][PARTITION_SPLIT];
2544 sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
2545 sum_rdc.rate, sum_rdc.dist);
2547 if (sum_rdc.rdcost < best_rdc.rdcost) {
2549 pc_tree->partitioning = PARTITION_SPLIT;
2552 // skip rectangular partition test when larger block size
2553 // gives better rd cost
2554 if (cpi->sf.less_rectangular_check)
2555 do_rect &= !partition_none_allowed;
2557 restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
2561 if (partition_horz_allowed && do_rect) {
2562 subsize = get_subsize(bsize, PARTITION_HORZ);
2563 if (cpi->sf.adaptive_motion_search)
2564 load_pred_mv(x, ctx);
2565 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
2566 partition_none_allowed)
2567 pc_tree->horizontal[0].pred_interp_filter =
2568 ctx->mic.mbmi.interp_filter;
2569 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize,
2570 &pc_tree->horizontal[0], best_rdc.rdcost);
2572 if (sum_rdc.rdcost < best_rdc.rdcost && mi_row + mi_step < cm->mi_rows &&
2573 bsize > BLOCK_8X8) {
2574 PICK_MODE_CONTEXT *ctx = &pc_tree->horizontal[0];
2575 update_state(cpi, td, ctx, mi_row, mi_col, subsize, 0);
2576 encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize, ctx);
2578 if (cpi->sf.adaptive_motion_search)
2579 load_pred_mv(x, ctx);
2580 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
2581 partition_none_allowed)
2582 pc_tree->horizontal[1].pred_interp_filter =
2583 ctx->mic.mbmi.interp_filter;
2584 rd_pick_sb_modes(cpi, tile_data, x, mi_row + mi_step, mi_col,
2585 &this_rdc, subsize, &pc_tree->horizontal[1],
2586 best_rdc.rdcost - sum_rdc.rdcost);
2587 if (this_rdc.rate == INT_MAX) {
2588 sum_rdc.rdcost = INT64_MAX;
2590 sum_rdc.rate += this_rdc.rate;
2591 sum_rdc.dist += this_rdc.dist;
2592 sum_rdc.rdcost += this_rdc.rdcost;
2596 if (sum_rdc.rdcost < best_rdc.rdcost) {
2597 pl = partition_plane_context(xd, mi_row, mi_col, bsize);
2598 sum_rdc.rate += cpi->partition_cost[pl][PARTITION_HORZ];
2599 sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, sum_rdc.rate, sum_rdc.dist);
2600 if (sum_rdc.rdcost < best_rdc.rdcost) {
2602 pc_tree->partitioning = PARTITION_HORZ;
2605 restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
2608 if (partition_vert_allowed && do_rect) {
2609 subsize = get_subsize(bsize, PARTITION_VERT);
2611 if (cpi->sf.adaptive_motion_search)
2612 load_pred_mv(x, ctx);
2613 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
2614 partition_none_allowed)
2615 pc_tree->vertical[0].pred_interp_filter =
2616 ctx->mic.mbmi.interp_filter;
2617 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize,
2618 &pc_tree->vertical[0], best_rdc.rdcost);
2619 if (sum_rdc.rdcost < best_rdc.rdcost && mi_col + mi_step < cm->mi_cols &&
2620 bsize > BLOCK_8X8) {
2621 update_state(cpi, td, &pc_tree->vertical[0], mi_row, mi_col, subsize, 0);
2622 encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize,
2623 &pc_tree->vertical[0]);
2625 if (cpi->sf.adaptive_motion_search)
2626 load_pred_mv(x, ctx);
2627 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
2628 partition_none_allowed)
2629 pc_tree->vertical[1].pred_interp_filter =
2630 ctx->mic.mbmi.interp_filter;
2631 rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + mi_step,
2633 &pc_tree->vertical[1], best_rdc.rdcost - sum_rdc.rdcost);
2634 if (this_rdc.rate == INT_MAX) {
2635 sum_rdc.rdcost = INT64_MAX;
2637 sum_rdc.rate += this_rdc.rate;
2638 sum_rdc.dist += this_rdc.dist;
2639 sum_rdc.rdcost += this_rdc.rdcost;
2643 if (sum_rdc.rdcost < best_rdc.rdcost) {
2644 pl = partition_plane_context(xd, mi_row, mi_col, bsize);
2645 sum_rdc.rate += cpi->partition_cost[pl][PARTITION_VERT];
2646 sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
2647 sum_rdc.rate, sum_rdc.dist);
2648 if (sum_rdc.rdcost < best_rdc.rdcost) {
2650 pc_tree->partitioning = PARTITION_VERT;
2653 restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
2656 // TODO(jbb): This code added so that we avoid static analysis
2657 // warning related to the fact that best_rd isn't used after this
2658 // point. This code should be refactored so that the duplicate
2659 // checks occur in some sub function and thus are used...
2661 *rd_cost = best_rdc;
2664 if (best_rdc.rate < INT_MAX && best_rdc.dist < INT64_MAX &&
2665 pc_tree->index != 3) {
2666 int output_enabled = (bsize == BLOCK_64X64);
2667 encode_sb(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled,
2671 if (bsize == BLOCK_64X64) {
2672 assert(tp_orig < *tp);
2673 assert(best_rdc.rate < INT_MAX);
2674 assert(best_rdc.dist < INT64_MAX);
2676 assert(tp_orig == *tp);
2680 static void encode_rd_sb_row(VP9_COMP *cpi,
2682 TileDataEnc *tile_data,
2685 VP9_COMMON *const cm = &cpi->common;
2686 TileInfo *const tile_info = &tile_data->tile_info;
2687 MACROBLOCK *const x = &td->mb;
2688 MACROBLOCKD *const xd = &x->e_mbd;
2689 SPEED_FEATURES *const sf = &cpi->sf;
2692 // Initialize the left context for the new SB row
2693 vpx_memset(&xd->left_context, 0, sizeof(xd->left_context));
2694 vpx_memset(xd->left_seg_context, 0, sizeof(xd->left_seg_context));
2696 // Code each SB in the row
2697 for (mi_col = tile_info->mi_col_start; mi_col < tile_info->mi_col_end;
2698 mi_col += MI_BLOCK_SIZE) {
2704 const int idx_str = cm->mi_stride * mi_row + mi_col;
2705 MODE_INFO *mi = cm->mi + idx_str;
2707 if (sf->adaptive_pred_interp_filter) {
2708 for (i = 0; i < 64; ++i)
2709 td->leaf_tree[i].pred_interp_filter = SWITCHABLE;
2711 for (i = 0; i < 64; ++i) {
2712 td->pc_tree[i].vertical[0].pred_interp_filter = SWITCHABLE;
2713 td->pc_tree[i].vertical[1].pred_interp_filter = SWITCHABLE;
2714 td->pc_tree[i].horizontal[0].pred_interp_filter = SWITCHABLE;
2715 td->pc_tree[i].horizontal[1].pred_interp_filter = SWITCHABLE;
2719 vp9_zero(x->pred_mv);
2720 td->pc_root->index = 0;
2722 x->source_variance = UINT_MAX;
2723 if (sf->partition_search_type == FIXED_PARTITION) {
2724 set_offsets(cpi, tile_info, x, mi_row, mi_col, BLOCK_64X64);
2725 set_fixed_partitioning(cpi, tile_info, mi, mi_row, mi_col,
2726 sf->always_this_block_size);
2727 rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
2728 BLOCK_64X64, &dummy_rate, &dummy_dist, 1, td->pc_root);
2729 } else if (cpi->partition_search_skippable_frame) {
2731 set_offsets(cpi, tile_info, x, mi_row, mi_col, BLOCK_64X64);
2732 bsize = get_rd_var_based_fixed_partition(cpi, x, mi_row, mi_col);
2733 set_fixed_partitioning(cpi, tile_info, mi, mi_row, mi_col, bsize);
2734 rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
2735 BLOCK_64X64, &dummy_rate, &dummy_dist, 1, td->pc_root);
2736 } else if (sf->partition_search_type == VAR_BASED_PARTITION &&
2737 cm->frame_type != KEY_FRAME) {
2738 choose_partitioning(cpi, tile_info, x, mi_row, mi_col);
2739 rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
2740 BLOCK_64X64, &dummy_rate, &dummy_dist, 1, td->pc_root);
2742 // If required set upper and lower partition size limits
2743 if (sf->auto_min_max_partition_size) {
2744 set_offsets(cpi, tile_info, x, mi_row, mi_col, BLOCK_64X64);
2745 rd_auto_partition_range(cpi, tile_info, xd, mi_row, mi_col,
2746 &x->min_partition_size,
2747 &x->max_partition_size);
2749 rd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, BLOCK_64X64,
2750 &dummy_rdc, INT64_MAX, td->pc_root);
2755 static void init_encode_frame_mb_context(VP9_COMP *cpi) {
2756 MACROBLOCK *const x = &cpi->td.mb;
2757 VP9_COMMON *const cm = &cpi->common;
2758 MACROBLOCKD *const xd = &x->e_mbd;
2759 const int aligned_mi_cols = mi_cols_aligned_to_sb(cm->mi_cols);
2761 // Copy data over into macro block data structures.
2762 vp9_setup_src_planes(x, cpi->Source, 0, 0);
2764 vp9_setup_block_planes(&x->e_mbd, cm->subsampling_x, cm->subsampling_y);
2766 // Note: this memset assumes above_context[0], [1] and [2]
2767 // are allocated as part of the same buffer.
2768 vpx_memset(xd->above_context[0], 0,
2769 sizeof(*xd->above_context[0]) *
2770 2 * aligned_mi_cols * MAX_MB_PLANE);
2771 vpx_memset(xd->above_seg_context, 0,
2772 sizeof(*xd->above_seg_context) * aligned_mi_cols);
2775 static int check_dual_ref_flags(VP9_COMP *cpi) {
2776 const int ref_flags = cpi->ref_frame_flags;
2778 if (vp9_segfeature_active(&cpi->common.seg, 1, SEG_LVL_REF_FRAME)) {
2781 return (!!(ref_flags & VP9_GOLD_FLAG) + !!(ref_flags & VP9_LAST_FLAG)
2782 + !!(ref_flags & VP9_ALT_FLAG)) >= 2;
2786 static void reset_skip_tx_size(VP9_COMMON *cm, TX_SIZE max_tx_size) {
2788 const int mis = cm->mi_stride;
2789 MODE_INFO *mi_ptr = cm->mi;
2791 for (mi_row = 0; mi_row < cm->mi_rows; ++mi_row, mi_ptr += mis) {
2792 for (mi_col = 0; mi_col < cm->mi_cols; ++mi_col) {
2793 if (mi_ptr[mi_col].src_mi->mbmi.tx_size > max_tx_size)
2794 mi_ptr[mi_col].src_mi->mbmi.tx_size = max_tx_size;
2799 static MV_REFERENCE_FRAME get_frame_type(const VP9_COMP *cpi) {
2800 if (frame_is_intra_only(&cpi->common))
2802 else if (cpi->rc.is_src_frame_alt_ref && cpi->refresh_golden_frame)
2803 return ALTREF_FRAME;
2804 else if (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)
2805 return GOLDEN_FRAME;
2810 static TX_MODE select_tx_mode(const VP9_COMP *cpi, MACROBLOCKD *const xd) {
2813 if (cpi->common.frame_type == KEY_FRAME &&
2814 cpi->sf.use_nonrd_pick_mode &&
2815 cpi->sf.partition_search_type == VAR_BASED_PARTITION)
2817 if (cpi->sf.tx_size_search_method == USE_LARGESTALL)
2819 else if (cpi->sf.tx_size_search_method == USE_FULL_RD||
2820 cpi->sf.tx_size_search_method == USE_TX_8X8)
2821 return TX_MODE_SELECT;
2823 return cpi->common.tx_mode;
2826 static void hybrid_intra_mode_search(VP9_COMP *cpi, MACROBLOCK *const x,
2827 RD_COST *rd_cost, BLOCK_SIZE bsize,
2828 PICK_MODE_CONTEXT *ctx) {
2829 if (bsize < BLOCK_16X16)
2830 vp9_rd_pick_intra_mode_sb(cpi, x, rd_cost, bsize, ctx, INT64_MAX);
2832 vp9_pick_intra_mode(cpi, x, rd_cost, bsize, ctx);
2835 static void nonrd_pick_sb_modes(VP9_COMP *cpi,
2836 TileDataEnc *tile_data, MACROBLOCK *const x,
2837 int mi_row, int mi_col, RD_COST *rd_cost,
2838 BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx) {
2839 VP9_COMMON *const cm = &cpi->common;
2840 TileInfo *const tile_info = &tile_data->tile_info;
2841 MACROBLOCKD *const xd = &x->e_mbd;
2843 set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
2844 mbmi = &xd->mi[0].src_mi->mbmi;
2845 mbmi->sb_type = bsize;
2847 if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled)
2848 if (mbmi->segment_id)
2849 x->rdmult = vp9_cyclic_refresh_get_rdmult(cpi->cyclic_refresh);
2851 if (cm->frame_type == KEY_FRAME)
2852 hybrid_intra_mode_search(cpi, x, rd_cost, bsize, ctx);
2853 else if (vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP))
2854 set_mode_info_seg_skip(x, cm->tx_mode, rd_cost, bsize);
2855 else if (bsize >= BLOCK_8X8)
2856 vp9_pick_inter_mode(cpi, x, tile_data, mi_row, mi_col,
2857 rd_cost, bsize, ctx);
2859 vp9_pick_inter_mode_sub8x8(cpi, x, tile_data, mi_row, mi_col,
2860 rd_cost, bsize, ctx);
2862 duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, bsize);
2864 if (rd_cost->rate == INT_MAX)
2865 vp9_rd_cost_reset(rd_cost);
2867 ctx->rate = rd_cost->rate;
2868 ctx->dist = rd_cost->dist;
2871 static void fill_mode_info_sb(VP9_COMMON *cm, MACROBLOCK *x,
2872 int mi_row, int mi_col,
2875 MACROBLOCKD *xd = &x->e_mbd;
2876 int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4;
2877 PARTITION_TYPE partition = pc_tree->partitioning;
2878 BLOCK_SIZE subsize = get_subsize(bsize, partition);
2880 assert(bsize >= BLOCK_8X8);
2882 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
2885 switch (partition) {
2886 case PARTITION_NONE:
2887 set_mode_info_offsets(cm, xd, mi_row, mi_col);
2888 *(xd->mi[0].src_mi) = pc_tree->none.mic;
2889 duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, bsize);
2891 case PARTITION_VERT:
2892 set_mode_info_offsets(cm, xd, mi_row, mi_col);
2893 *(xd->mi[0].src_mi) = pc_tree->vertical[0].mic;
2894 duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, subsize);
2896 if (mi_col + hbs < cm->mi_cols) {
2897 set_mode_info_offsets(cm, xd, mi_row, mi_col + hbs);
2898 *(xd->mi[0].src_mi) = pc_tree->vertical[1].mic;
2899 duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col + hbs, subsize);
2902 case PARTITION_HORZ:
2903 set_mode_info_offsets(cm, xd, mi_row, mi_col);
2904 *(xd->mi[0].src_mi) = pc_tree->horizontal[0].mic;
2905 duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, subsize);
2906 if (mi_row + hbs < cm->mi_rows) {
2907 set_mode_info_offsets(cm, xd, mi_row + hbs, mi_col);
2908 *(xd->mi[0].src_mi) = pc_tree->horizontal[1].mic;
2909 duplicate_mode_info_in_sb(cm, xd, mi_row + hbs, mi_col, subsize);
2912 case PARTITION_SPLIT: {
2913 fill_mode_info_sb(cm, x, mi_row, mi_col, subsize, pc_tree->split[0]);
2914 fill_mode_info_sb(cm, x, mi_row, mi_col + hbs, subsize,
2916 fill_mode_info_sb(cm, x, mi_row + hbs, mi_col, subsize,
2918 fill_mode_info_sb(cm, x, mi_row + hbs, mi_col + hbs, subsize,
2927 // Reset the prediction pixel ready flag recursively.
2928 static void pred_pixel_ready_reset(PC_TREE *pc_tree, BLOCK_SIZE bsize) {
2929 pc_tree->none.pred_pixel_ready = 0;
2930 pc_tree->horizontal[0].pred_pixel_ready = 0;
2931 pc_tree->horizontal[1].pred_pixel_ready = 0;
2932 pc_tree->vertical[0].pred_pixel_ready = 0;
2933 pc_tree->vertical[1].pred_pixel_ready = 0;
2935 if (bsize > BLOCK_8X8) {
2936 BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_SPLIT);
2938 for (i = 0; i < 4; ++i)
2939 pred_pixel_ready_reset(pc_tree->split[i], subsize);
2943 static void nonrd_pick_partition(VP9_COMP *cpi, ThreadData *td,
2944 TileDataEnc *tile_data,
2945 TOKENEXTRA **tp, int mi_row,
2946 int mi_col, BLOCK_SIZE bsize, RD_COST *rd_cost,
2947 int do_recon, int64_t best_rd,
2949 const SPEED_FEATURES *const sf = &cpi->sf;
2950 VP9_COMMON *const cm = &cpi->common;
2951 TileInfo *const tile_info = &tile_data->tile_info;
2952 MACROBLOCK *const x = &td->mb;
2953 MACROBLOCKD *const xd = &x->e_mbd;
2954 const int ms = num_8x8_blocks_wide_lookup[bsize] / 2;
2955 TOKENEXTRA *tp_orig = *tp;
2956 PICK_MODE_CONTEXT *ctx = &pc_tree->none;
2958 BLOCK_SIZE subsize = bsize;
2959 RD_COST this_rdc, sum_rdc, best_rdc;
2960 int do_split = bsize >= BLOCK_8X8;
2962 // Override skipping rectangular partition operations for edge blocks
2963 const int force_horz_split = (mi_row + ms >= cm->mi_rows);
2964 const int force_vert_split = (mi_col + ms >= cm->mi_cols);
2965 const int xss = x->e_mbd.plane[1].subsampling_x;
2966 const int yss = x->e_mbd.plane[1].subsampling_y;
2968 int partition_none_allowed = !force_horz_split && !force_vert_split;
2969 int partition_horz_allowed = !force_vert_split && yss <= xss &&
2971 int partition_vert_allowed = !force_horz_split && xss <= yss &&
2975 assert(num_8x8_blocks_wide_lookup[bsize] ==
2976 num_8x8_blocks_high_lookup[bsize]);
2978 vp9_rd_cost_init(&sum_rdc);
2979 vp9_rd_cost_reset(&best_rdc);
2980 best_rdc.rdcost = best_rd;
2982 // Determine partition types in search according to the speed features.
2983 // The threshold set here has to be of square block size.
2984 if (sf->auto_min_max_partition_size) {
2985 partition_none_allowed &= (bsize <= x->max_partition_size &&
2986 bsize >= x->min_partition_size);
2987 partition_horz_allowed &= ((bsize <= x->max_partition_size &&
2988 bsize > x->min_partition_size) ||
2990 partition_vert_allowed &= ((bsize <= x->max_partition_size &&
2991 bsize > x->min_partition_size) ||
2993 do_split &= bsize > x->min_partition_size;
2995 if (sf->use_square_partition_only) {
2996 partition_horz_allowed &= force_horz_split;
2997 partition_vert_allowed &= force_vert_split;
3000 ctx->pred_pixel_ready = !(partition_vert_allowed ||
3001 partition_horz_allowed ||
3005 if (partition_none_allowed) {
3006 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col,
3007 &this_rdc, bsize, ctx);
3008 ctx->mic.mbmi = xd->mi[0].src_mi->mbmi;
3009 ctx->skip_txfm[0] = x->skip_txfm[0];
3010 ctx->skip = x->skip;
3012 if (this_rdc.rate != INT_MAX) {
3013 int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
3014 this_rdc.rate += cpi->partition_cost[pl][PARTITION_NONE];
3015 this_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
3016 this_rdc.rate, this_rdc.dist);
3017 if (this_rdc.rdcost < best_rdc.rdcost) {
3018 int64_t dist_breakout_thr = sf->partition_search_breakout_dist_thr;
3019 int64_t rate_breakout_thr = sf->partition_search_breakout_rate_thr;
3021 dist_breakout_thr >>= 8 - (b_width_log2_lookup[bsize] +
3022 b_height_log2_lookup[bsize]);
3024 rate_breakout_thr *= num_pels_log2_lookup[bsize];
3026 best_rdc = this_rdc;
3027 if (bsize >= BLOCK_8X8)
3028 pc_tree->partitioning = PARTITION_NONE;
3030 if (!x->e_mbd.lossless &&
3031 this_rdc.rate < rate_breakout_thr &&
3032 this_rdc.dist < dist_breakout_thr) {
3040 // store estimated motion vector
3041 store_pred_mv(x, ctx);
3045 int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
3046 sum_rdc.rate += cpi->partition_cost[pl][PARTITION_SPLIT];
3047 sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, sum_rdc.rate, sum_rdc.dist);
3048 subsize = get_subsize(bsize, PARTITION_SPLIT);
3049 for (i = 0; i < 4 && sum_rdc.rdcost < best_rdc.rdcost; ++i) {
3050 const int x_idx = (i & 1) * ms;
3051 const int y_idx = (i >> 1) * ms;
3053 if (mi_row + y_idx >= cm->mi_rows || mi_col + x_idx >= cm->mi_cols)
3055 load_pred_mv(x, ctx);
3056 nonrd_pick_partition(cpi, td, tile_data, tp,
3057 mi_row + y_idx, mi_col + x_idx,
3058 subsize, &this_rdc, 0,
3059 best_rdc.rdcost - sum_rdc.rdcost, pc_tree->split[i]);
3061 if (this_rdc.rate == INT_MAX) {
3062 vp9_rd_cost_reset(&sum_rdc);
3064 sum_rdc.rate += this_rdc.rate;
3065 sum_rdc.dist += this_rdc.dist;
3066 sum_rdc.rdcost += this_rdc.rdcost;
3070 if (sum_rdc.rdcost < best_rdc.rdcost) {
3072 pc_tree->partitioning = PARTITION_SPLIT;
3074 // skip rectangular partition test when larger block size
3075 // gives better rd cost
3076 if (sf->less_rectangular_check)
3077 do_rect &= !partition_none_allowed;
3082 if (partition_horz_allowed && do_rect) {
3083 subsize = get_subsize(bsize, PARTITION_HORZ);
3084 if (sf->adaptive_motion_search)
3085 load_pred_mv(x, ctx);
3086 pc_tree->horizontal[0].pred_pixel_ready = 1;
3087 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize,
3088 &pc_tree->horizontal[0]);
3090 pc_tree->horizontal[0].mic.mbmi = xd->mi[0].src_mi->mbmi;
3091 pc_tree->horizontal[0].skip_txfm[0] = x->skip_txfm[0];
3092 pc_tree->horizontal[0].skip = x->skip;
3094 if (sum_rdc.rdcost < best_rdc.rdcost && mi_row + ms < cm->mi_rows) {
3095 load_pred_mv(x, ctx);
3096 pc_tree->horizontal[1].pred_pixel_ready = 1;
3097 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row + ms, mi_col,
3099 &pc_tree->horizontal[1]);
3101 pc_tree->horizontal[1].mic.mbmi = xd->mi[0].src_mi->mbmi;
3102 pc_tree->horizontal[1].skip_txfm[0] = x->skip_txfm[0];
3103 pc_tree->horizontal[1].skip = x->skip;
3105 if (this_rdc.rate == INT_MAX) {
3106 vp9_rd_cost_reset(&sum_rdc);
3108 int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
3109 this_rdc.rate += cpi->partition_cost[pl][PARTITION_HORZ];
3110 sum_rdc.rate += this_rdc.rate;
3111 sum_rdc.dist += this_rdc.dist;
3112 sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
3113 sum_rdc.rate, sum_rdc.dist);
3117 if (sum_rdc.rdcost < best_rdc.rdcost) {
3119 pc_tree->partitioning = PARTITION_HORZ;
3121 pred_pixel_ready_reset(pc_tree, bsize);
3126 if (partition_vert_allowed && do_rect) {
3127 subsize = get_subsize(bsize, PARTITION_VERT);
3128 if (sf->adaptive_motion_search)
3129 load_pred_mv(x, ctx);
3130 pc_tree->vertical[0].pred_pixel_ready = 1;
3131 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize,
3132 &pc_tree->vertical[0]);
3133 pc_tree->vertical[0].mic.mbmi = xd->mi[0].src_mi->mbmi;
3134 pc_tree->vertical[0].skip_txfm[0] = x->skip_txfm[0];
3135 pc_tree->vertical[0].skip = x->skip;
3137 if (sum_rdc.rdcost < best_rdc.rdcost && mi_col + ms < cm->mi_cols) {
3138 load_pred_mv(x, ctx);
3139 pc_tree->vertical[1].pred_pixel_ready = 1;
3140 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + ms,
3142 &pc_tree->vertical[1]);
3143 pc_tree->vertical[1].mic.mbmi = xd->mi[0].src_mi->mbmi;
3144 pc_tree->vertical[1].skip_txfm[0] = x->skip_txfm[0];
3145 pc_tree->vertical[1].skip = x->skip;
3147 if (this_rdc.rate == INT_MAX) {
3148 vp9_rd_cost_reset(&sum_rdc);
3150 int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
3151 sum_rdc.rate += cpi->partition_cost[pl][PARTITION_VERT];
3152 sum_rdc.rate += this_rdc.rate;
3153 sum_rdc.dist += this_rdc.dist;
3154 sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
3155 sum_rdc.rate, sum_rdc.dist);
3159 if (sum_rdc.rdcost < best_rdc.rdcost) {
3161 pc_tree->partitioning = PARTITION_VERT;
3163 pred_pixel_ready_reset(pc_tree, bsize);
3167 *rd_cost = best_rdc;
3169 if (best_rdc.rate == INT_MAX) {
3170 vp9_rd_cost_reset(rd_cost);
3174 // update mode info array
3175 fill_mode_info_sb(cm, x, mi_row, mi_col, bsize, pc_tree);
3177 if (best_rdc.rate < INT_MAX && best_rdc.dist < INT64_MAX && do_recon) {
3178 int output_enabled = (bsize == BLOCK_64X64);
3179 encode_sb_rt(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled,
3183 if (bsize == BLOCK_64X64 && do_recon) {
3184 assert(tp_orig < *tp);
3185 assert(best_rdc.rate < INT_MAX);
3186 assert(best_rdc.dist < INT64_MAX);
3188 assert(tp_orig == *tp);
3192 static void nonrd_select_partition(VP9_COMP *cpi,
3194 TileDataEnc *tile_data,
3197 int mi_row, int mi_col,
3198 BLOCK_SIZE bsize, int output_enabled,
3199 RD_COST *rd_cost, PC_TREE *pc_tree) {
3200 VP9_COMMON *const cm = &cpi->common;
3201 TileInfo *const tile_info = &tile_data->tile_info;
3202 MACROBLOCK *const x = &td->mb;
3203 MACROBLOCKD *const xd = &x->e_mbd;
3204 const int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4;
3205 const int mis = cm->mi_stride;
3206 PARTITION_TYPE partition;
3210 vp9_rd_cost_reset(&this_rdc);
3211 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
3214 subsize = (bsize >= BLOCK_8X8) ? mi[0].src_mi->mbmi.sb_type : BLOCK_4X4;
3215 partition = partition_lookup[bsl][subsize];
3217 if (bsize == BLOCK_32X32 && partition != PARTITION_NONE &&
3218 subsize >= BLOCK_16X16) {
3219 x->max_partition_size = BLOCK_32X32;
3220 x->min_partition_size = BLOCK_8X8;
3221 nonrd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, bsize,
3222 rd_cost, 0, INT64_MAX, pc_tree);
3223 } else if (bsize == BLOCK_16X16 && partition != PARTITION_NONE) {
3224 x->max_partition_size = BLOCK_16X16;
3225 x->min_partition_size = BLOCK_8X8;
3226 nonrd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, bsize,
3227 rd_cost, 0, INT64_MAX, pc_tree);
3229 switch (partition) {
3230 case PARTITION_NONE:
3231 pc_tree->none.pred_pixel_ready = 1;
3232 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, rd_cost,
3233 subsize, &pc_tree->none);
3234 pc_tree->none.mic.mbmi = xd->mi[0].src_mi->mbmi;
3235 pc_tree->none.skip_txfm[0] = x->skip_txfm[0];
3236 pc_tree->none.skip = x->skip;
3238 case PARTITION_VERT:
3239 pc_tree->vertical[0].pred_pixel_ready = 1;
3240 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, rd_cost,
3241 subsize, &pc_tree->vertical[0]);
3242 pc_tree->vertical[0].mic.mbmi = xd->mi[0].src_mi->mbmi;
3243 pc_tree->vertical[0].skip_txfm[0] = x->skip_txfm[0];
3244 pc_tree->vertical[0].skip = x->skip;
3245 if (mi_col + hbs < cm->mi_cols) {
3246 pc_tree->vertical[1].pred_pixel_ready = 1;
3247 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + hbs,
3248 &this_rdc, subsize, &pc_tree->vertical[1]);
3249 pc_tree->vertical[1].mic.mbmi = xd->mi[0].src_mi->mbmi;
3250 pc_tree->vertical[1].skip_txfm[0] = x->skip_txfm[0];
3251 pc_tree->vertical[1].skip = x->skip;
3252 if (this_rdc.rate != INT_MAX && this_rdc.dist != INT64_MAX &&
3253 rd_cost->rate != INT_MAX && rd_cost->dist != INT64_MAX) {
3254 rd_cost->rate += this_rdc.rate;
3255 rd_cost->dist += this_rdc.dist;
3259 case PARTITION_HORZ:
3260 pc_tree->horizontal[0].pred_pixel_ready = 1;
3261 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, rd_cost,
3262 subsize, &pc_tree->horizontal[0]);
3263 pc_tree->horizontal[0].mic.mbmi = xd->mi[0].src_mi->mbmi;
3264 pc_tree->horizontal[0].skip_txfm[0] = x->skip_txfm[0];
3265 pc_tree->horizontal[0].skip = x->skip;
3266 if (mi_row + hbs < cm->mi_rows) {
3267 pc_tree->horizontal[1].pred_pixel_ready = 1;
3268 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row + hbs, mi_col,
3269 &this_rdc, subsize, &pc_tree->horizontal[1]);
3270 pc_tree->horizontal[1].mic.mbmi = xd->mi[0].src_mi->mbmi;
3271 pc_tree->horizontal[1].skip_txfm[0] = x->skip_txfm[0];
3272 pc_tree->horizontal[1].skip = x->skip;
3273 if (this_rdc.rate != INT_MAX && this_rdc.dist != INT64_MAX &&
3274 rd_cost->rate != INT_MAX && rd_cost->dist != INT64_MAX) {
3275 rd_cost->rate += this_rdc.rate;
3276 rd_cost->dist += this_rdc.dist;
3280 case PARTITION_SPLIT:
3281 subsize = get_subsize(bsize, PARTITION_SPLIT);
3282 nonrd_select_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
3283 subsize, output_enabled, rd_cost,
3285 nonrd_select_partition(cpi, td, tile_data, mi + hbs, tp,
3286 mi_row, mi_col + hbs, subsize, output_enabled,
3287 &this_rdc, pc_tree->split[1]);
3288 if (this_rdc.rate != INT_MAX && this_rdc.dist != INT64_MAX &&
3289 rd_cost->rate != INT_MAX && rd_cost->dist != INT64_MAX) {
3290 rd_cost->rate += this_rdc.rate;
3291 rd_cost->dist += this_rdc.dist;
3293 nonrd_select_partition(cpi, td, tile_data, mi + hbs * mis, tp,
3294 mi_row + hbs, mi_col, subsize, output_enabled,
3295 &this_rdc, pc_tree->split[2]);
3296 if (this_rdc.rate != INT_MAX && this_rdc.dist != INT64_MAX &&
3297 rd_cost->rate != INT_MAX && rd_cost->dist != INT64_MAX) {
3298 rd_cost->rate += this_rdc.rate;
3299 rd_cost->dist += this_rdc.dist;
3301 nonrd_select_partition(cpi, td, tile_data, mi + hbs * mis + hbs, tp,
3302 mi_row + hbs, mi_col + hbs, subsize,
3303 output_enabled, &this_rdc, pc_tree->split[3]);
3304 if (this_rdc.rate != INT_MAX && this_rdc.dist != INT64_MAX &&
3305 rd_cost->rate != INT_MAX && rd_cost->dist != INT64_MAX) {
3306 rd_cost->rate += this_rdc.rate;
3307 rd_cost->dist += this_rdc.dist;
3311 assert(0 && "Invalid partition type.");
3316 if (bsize == BLOCK_64X64 && output_enabled)
3317 encode_sb_rt(cpi, td, tile_info, tp, mi_row, mi_col, 1, bsize, pc_tree);
3321 static void nonrd_use_partition(VP9_COMP *cpi,
3323 TileDataEnc *tile_data,
3326 int mi_row, int mi_col,
3327 BLOCK_SIZE bsize, int output_enabled,
3328 RD_COST *dummy_cost, PC_TREE *pc_tree) {
3329 VP9_COMMON *const cm = &cpi->common;
3330 TileInfo *tile_info = &tile_data->tile_info;
3331 MACROBLOCK *const x = &td->mb;
3332 MACROBLOCKD *const xd = &x->e_mbd;
3333 const int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4;
3334 const int mis = cm->mi_stride;
3335 PARTITION_TYPE partition;
3338 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
3341 subsize = (bsize >= BLOCK_8X8) ? mi[0].src_mi->mbmi.sb_type : BLOCK_4X4;
3342 partition = partition_lookup[bsl][subsize];
3344 if (output_enabled && bsize != BLOCK_4X4) {
3345 int ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
3346 td->counts->partition[ctx][partition]++;
3349 switch (partition) {
3350 case PARTITION_NONE:
3351 pc_tree->none.pred_pixel_ready = 1;
3352 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, dummy_cost,
3353 subsize, &pc_tree->none);
3354 pc_tree->none.mic.mbmi = xd->mi[0].src_mi->mbmi;
3355 pc_tree->none.skip_txfm[0] = x->skip_txfm[0];
3356 pc_tree->none.skip = x->skip;
3357 encode_b_rt(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled,
3358 subsize, &pc_tree->none);
3360 case PARTITION_VERT:
3361 pc_tree->vertical[0].pred_pixel_ready = 1;
3362 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, dummy_cost,
3363 subsize, &pc_tree->vertical[0]);
3364 pc_tree->vertical[0].mic.mbmi = xd->mi[0].src_mi->mbmi;
3365 pc_tree->vertical[0].skip_txfm[0] = x->skip_txfm[0];
3366 pc_tree->vertical[0].skip = x->skip;
3367 encode_b_rt(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled,
3368 subsize, &pc_tree->vertical[0]);
3369 if (mi_col + hbs < cm->mi_cols && bsize > BLOCK_8X8) {
3370 pc_tree->vertical[1].pred_pixel_ready = 1;
3371 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + hbs,
3372 dummy_cost, subsize, &pc_tree->vertical[1]);
3373 pc_tree->vertical[1].mic.mbmi = xd->mi[0].src_mi->mbmi;
3374 pc_tree->vertical[1].skip_txfm[0] = x->skip_txfm[0];
3375 pc_tree->vertical[1].skip = x->skip;
3376 encode_b_rt(cpi, td, tile_info, tp, mi_row, mi_col + hbs,
3377 output_enabled, subsize, &pc_tree->vertical[1]);
3380 case PARTITION_HORZ:
3381 pc_tree->horizontal[0].pred_pixel_ready = 1;
3382 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, dummy_cost,
3383 subsize, &pc_tree->horizontal[0]);
3384 pc_tree->horizontal[0].mic.mbmi = xd->mi[0].src_mi->mbmi;
3385 pc_tree->horizontal[0].skip_txfm[0] = x->skip_txfm[0];
3386 pc_tree->horizontal[0].skip = x->skip;
3387 encode_b_rt(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled,
3388 subsize, &pc_tree->horizontal[0]);
3390 if (mi_row + hbs < cm->mi_rows && bsize > BLOCK_8X8) {
3391 pc_tree->horizontal[1].pred_pixel_ready = 1;
3392 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row + hbs, mi_col,
3393 dummy_cost, subsize, &pc_tree->horizontal[1]);
3394 pc_tree->horizontal[1].mic.mbmi = xd->mi[0].src_mi->mbmi;
3395 pc_tree->horizontal[1].skip_txfm[0] = x->skip_txfm[0];
3396 pc_tree->horizontal[1].skip = x->skip;
3397 encode_b_rt(cpi, td, tile_info, tp, mi_row + hbs, mi_col,
3398 output_enabled, subsize, &pc_tree->horizontal[1]);
3401 case PARTITION_SPLIT:
3402 subsize = get_subsize(bsize, PARTITION_SPLIT);
3403 if (bsize == BLOCK_8X8) {
3404 nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, dummy_cost,
3405 subsize, pc_tree->leaf_split[0]);
3406 encode_b_rt(cpi, td, tile_info, tp, mi_row, mi_col,
3407 output_enabled, subsize, pc_tree->leaf_split[0]);
3409 nonrd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
3410 subsize, output_enabled, dummy_cost,
3412 nonrd_use_partition(cpi, td, tile_data, mi + hbs, tp,
3413 mi_row, mi_col + hbs, subsize, output_enabled,
3414 dummy_cost, pc_tree->split[1]);
3415 nonrd_use_partition(cpi, td, tile_data, mi + hbs * mis, tp,
3416 mi_row + hbs, mi_col, subsize, output_enabled,
3417 dummy_cost, pc_tree->split[2]);
3418 nonrd_use_partition(cpi, td, tile_data, mi + hbs * mis + hbs, tp,
3419 mi_row + hbs, mi_col + hbs, subsize, output_enabled,
3420 dummy_cost, pc_tree->split[3]);
3424 assert(0 && "Invalid partition type.");
3428 if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8)
3429 update_partition_context(xd, mi_row, mi_col, subsize, bsize);
3432 static void encode_nonrd_sb_row(VP9_COMP *cpi,
3434 TileDataEnc *tile_data,
3437 SPEED_FEATURES *const sf = &cpi->sf;
3438 VP9_COMMON *const cm = &cpi->common;
3439 TileInfo *const tile_info = &tile_data->tile_info;
3440 MACROBLOCK *const x = &td->mb;
3441 MACROBLOCKD *const xd = &x->e_mbd;
3444 // Initialize the left context for the new SB row
3445 vpx_memset(&xd->left_context, 0, sizeof(xd->left_context));
3446 vpx_memset(xd->left_seg_context, 0, sizeof(xd->left_seg_context));
3448 // Code each SB in the row
3449 for (mi_col = tile_info->mi_col_start; mi_col < tile_info->mi_col_end;
3450 mi_col += MI_BLOCK_SIZE) {
3452 const int idx_str = cm->mi_stride * mi_row + mi_col;
3453 MODE_INFO *mi = cm->mi + idx_str;
3455 x->source_variance = UINT_MAX;
3456 vp9_zero(x->pred_mv);
3457 vp9_rd_cost_init(&dummy_rdc);
3458 x->color_sensitivity[0] = 0;
3459 x->color_sensitivity[1] = 0;
3461 // Set the partition type of the 64X64 block
3462 switch (sf->partition_search_type) {
3463 case VAR_BASED_PARTITION:
3464 // TODO(jingning, marpan): The mode decision and encoding process
3465 // support both intra and inter sub8x8 block coding for RTC mode.
3466 // Tune the thresholds accordingly to use sub8x8 block coding for
3467 // coding performance improvement.
3468 choose_partitioning(cpi, tile_info, x, mi_row, mi_col);
3469 nonrd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
3470 BLOCK_64X64, 1, &dummy_rdc, td->pc_root);
3472 case SOURCE_VAR_BASED_PARTITION:
3473 set_source_var_based_partition(cpi, tile_info, x, mi, mi_row, mi_col);
3474 nonrd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
3475 BLOCK_64X64, 1, &dummy_rdc, td->pc_root);
3477 case FIXED_PARTITION:
3478 bsize = sf->partition_search_type == FIXED_PARTITION ?
3479 sf->always_this_block_size :
3480 get_nonrd_var_based_fixed_partition(cpi, x, mi_row, mi_col);
3481 set_fixed_partitioning(cpi, tile_info, mi, mi_row, mi_col, bsize);
3482 nonrd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
3483 BLOCK_64X64, 1, &dummy_rdc, td->pc_root);
3485 case REFERENCE_PARTITION:
3486 set_offsets(cpi, tile_info, x, mi_row, mi_col, BLOCK_64X64);
3487 if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled &&
3488 xd->mi[0].src_mi->mbmi.segment_id) {
3489 auto_partition_range(cpi, tile_info, xd, mi_row, mi_col,
3490 &x->min_partition_size,
3491 &x->max_partition_size);
3492 nonrd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col,
3493 BLOCK_64X64, &dummy_rdc, 1,
3494 INT64_MAX, td->pc_root);
3496 choose_partitioning(cpi, tile_info, x, mi_row, mi_col);
3497 nonrd_select_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
3498 BLOCK_64X64, 1, &dummy_rdc, td->pc_root);
3508 // end RTC play code
3510 static int set_var_thresh_from_histogram(VP9_COMP *cpi) {
3511 const SPEED_FEATURES *const sf = &cpi->sf;
3512 const VP9_COMMON *const cm = &cpi->common;
3514 const uint8_t *src = cpi->Source->y_buffer;
3515 const uint8_t *last_src = cpi->Last_Source->y_buffer;
3516 const int src_stride = cpi->Source->y_stride;
3517 const int last_stride = cpi->Last_Source->y_stride;
3519 // Pick cutoff threshold
3520 const int cutoff = (MIN(cm->width, cm->height) >= 720) ?
3521 (cm->MBs * VAR_HIST_LARGE_CUT_OFF / 100) :
3522 (cm->MBs * VAR_HIST_SMALL_CUT_OFF / 100);
3523 DECLARE_ALIGNED_ARRAY(16, int, hist, VAR_HIST_BINS);
3524 diff *var16 = cpi->source_diff_var;
3529 vpx_memset(hist, 0, VAR_HIST_BINS * sizeof(hist[0]));
3531 for (i = 0; i < cm->mb_rows; i++) {
3532 for (j = 0; j < cm->mb_cols; j++) {
3533 #if CONFIG_VP9_HIGHBITDEPTH
3534 if (cm->use_highbitdepth) {
3535 switch (cm->bit_depth) {
3537 vp9_highbd_get16x16var(src, src_stride, last_src, last_stride,
3538 &var16->sse, &var16->sum);
3541 vp9_highbd_10_get16x16var(src, src_stride, last_src, last_stride,
3542 &var16->sse, &var16->sum);
3545 vp9_highbd_12_get16x16var(src, src_stride, last_src, last_stride,
3546 &var16->sse, &var16->sum);
3549 assert(0 && "cm->bit_depth should be VPX_BITS_8, VPX_BITS_10"
3554 vp9_get16x16var(src, src_stride, last_src, last_stride,
3555 &var16->sse, &var16->sum);
3558 vp9_get16x16var(src, src_stride, last_src, last_stride,
3559 &var16->sse, &var16->sum);
3560 #endif // CONFIG_VP9_HIGHBITDEPTH
3561 var16->var = var16->sse -
3562 (((uint32_t)var16->sum * var16->sum) >> 8);
3564 if (var16->var >= VAR_HIST_MAX_BG_VAR)
3565 hist[VAR_HIST_BINS - 1]++;
3567 hist[var16->var / VAR_HIST_FACTOR]++;
3574 src = src - cm->mb_cols * 16 + 16 * src_stride;
3575 last_src = last_src - cm->mb_cols * 16 + 16 * last_stride;
3578 cpi->source_var_thresh = 0;
3580 if (hist[VAR_HIST_BINS - 1] < cutoff) {
3581 for (i = 0; i < VAR_HIST_BINS - 1; i++) {
3585 cpi->source_var_thresh = (i + 1) * VAR_HIST_FACTOR;
3591 return sf->search_type_check_frequency;
3594 static void source_var_based_partition_search_method(VP9_COMP *cpi) {
3595 VP9_COMMON *const cm = &cpi->common;
3596 SPEED_FEATURES *const sf = &cpi->sf;
3598 if (cm->frame_type == KEY_FRAME) {
3599 // For key frame, use SEARCH_PARTITION.
3600 sf->partition_search_type = SEARCH_PARTITION;
3601 } else if (cm->intra_only) {
3602 sf->partition_search_type = FIXED_PARTITION;
3604 if (cm->last_width != cm->width || cm->last_height != cm->height) {
3605 if (cpi->source_diff_var)
3606 vpx_free(cpi->source_diff_var);
3608 CHECK_MEM_ERROR(cm, cpi->source_diff_var,
3609 vpx_calloc(cm->MBs, sizeof(diff)));
3612 if (!cpi->frames_till_next_var_check)
3613 cpi->frames_till_next_var_check = set_var_thresh_from_histogram(cpi);
3615 if (cpi->frames_till_next_var_check > 0) {
3616 sf->partition_search_type = FIXED_PARTITION;
3617 cpi->frames_till_next_var_check--;
3622 static int get_skip_encode_frame(const VP9_COMMON *cm, ThreadData *const td) {
3623 unsigned int intra_count = 0, inter_count = 0;
3626 for (j = 0; j < INTRA_INTER_CONTEXTS; ++j) {
3627 intra_count += td->counts->intra_inter[j][0];
3628 inter_count += td->counts->intra_inter[j][1];
3631 return (intra_count << 2) < inter_count &&
3632 cm->frame_type != KEY_FRAME &&
3636 void vp9_init_tile_data(VP9_COMP *cpi) {
3637 VP9_COMMON *const cm = &cpi->common;
3638 const int tile_cols = 1 << cm->log2_tile_cols;
3639 const int tile_rows = 1 << cm->log2_tile_rows;
3640 int tile_col, tile_row;
3641 TOKENEXTRA *pre_tok = cpi->tile_tok[0][0];
3644 if (cpi->tile_data == NULL) {
3645 CHECK_MEM_ERROR(cm, cpi->tile_data,
3646 vpx_malloc(tile_cols * tile_rows * sizeof(*cpi->tile_data)));
3647 for (tile_row = 0; tile_row < tile_rows; ++tile_row)
3648 for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
3649 TileDataEnc *tile_data =
3650 &cpi->tile_data[tile_row * tile_cols + tile_col];
3652 for (i = 0; i < BLOCK_SIZES; ++i) {
3653 for (j = 0; j < MAX_MODES; ++j) {
3654 tile_data->thresh_freq_fact[i][j] = 32;
3655 tile_data->mode_map[i][j] = j;
3661 for (tile_row = 0; tile_row < tile_rows; ++tile_row) {
3662 for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
3663 TileInfo *tile_info =
3664 &cpi->tile_data[tile_row * tile_cols + tile_col].tile_info;
3665 vp9_tile_init(tile_info, cm, tile_row, tile_col);
3667 cpi->tile_tok[tile_row][tile_col] = pre_tok + tile_tok;
3668 pre_tok = cpi->tile_tok[tile_row][tile_col];
3669 tile_tok = allocated_tokens(*tile_info);
3674 void vp9_encode_tile(VP9_COMP *cpi, ThreadData *td,
3675 int tile_row, int tile_col) {
3676 VP9_COMMON *const cm = &cpi->common;
3677 const int tile_cols = 1 << cm->log2_tile_cols;
3678 TileDataEnc *this_tile =
3679 &cpi->tile_data[tile_row * tile_cols + tile_col];
3680 const TileInfo * const tile_info = &this_tile->tile_info;
3681 TOKENEXTRA *tok = cpi->tile_tok[tile_row][tile_col];
3684 for (mi_row = tile_info->mi_row_start; mi_row < tile_info->mi_row_end;
3685 mi_row += MI_BLOCK_SIZE) {
3686 if (cpi->sf.use_nonrd_pick_mode)
3687 encode_nonrd_sb_row(cpi, td, this_tile, mi_row, &tok);
3689 encode_rd_sb_row(cpi, td, this_tile, mi_row, &tok);
3691 cpi->tok_count[tile_row][tile_col] =
3692 (unsigned int)(tok - cpi->tile_tok[tile_row][tile_col]);
3693 assert(tok - cpi->tile_tok[tile_row][tile_col] <=
3694 allocated_tokens(*tile_info));
3697 static void encode_tiles(VP9_COMP *cpi) {
3698 VP9_COMMON *const cm = &cpi->common;
3699 const int tile_cols = 1 << cm->log2_tile_cols;
3700 const int tile_rows = 1 << cm->log2_tile_rows;
3701 int tile_col, tile_row;
3703 vp9_init_tile_data(cpi);
3705 for (tile_row = 0; tile_row < tile_rows; ++tile_row)
3706 for (tile_col = 0; tile_col < tile_cols; ++tile_col)
3707 vp9_encode_tile(cpi, &cpi->td, tile_row, tile_col);
3710 #if CONFIG_FP_MB_STATS
3711 static int input_fpmb_stats(FIRSTPASS_MB_STATS *firstpass_mb_stats,
3712 VP9_COMMON *cm, uint8_t **this_frame_mb_stats) {
3713 uint8_t *mb_stats_in = firstpass_mb_stats->mb_stats_start +
3714 cm->current_video_frame * cm->MBs * sizeof(uint8_t);
3716 if (mb_stats_in > firstpass_mb_stats->mb_stats_end)
3719 *this_frame_mb_stats = mb_stats_in;
3725 static void encode_frame_internal(VP9_COMP *cpi) {
3726 SPEED_FEATURES *const sf = &cpi->sf;
3727 RD_OPT *const rd_opt = &cpi->rd;
3728 ThreadData *const td = &cpi->td;
3729 MACROBLOCK *const x = &td->mb;
3730 VP9_COMMON *const cm = &cpi->common;
3731 MACROBLOCKD *const xd = &x->e_mbd;
3732 RD_COUNTS *const rdc = &cpi->td.rd_counts;
3735 xd->mi[0].src_mi = &xd->mi[0];
3737 vp9_zero(*td->counts);
3738 vp9_zero(rdc->coef_counts);
3739 vp9_zero(rdc->comp_pred_diff);
3740 vp9_zero(rdc->filter_diff);
3741 vp9_zero(rdc->tx_select_diff);
3742 vp9_zero(rd_opt->tx_select_threshes);
3744 xd->lossless = cm->base_qindex == 0 &&
3745 cm->y_dc_delta_q == 0 &&
3746 cm->uv_dc_delta_q == 0 &&
3747 cm->uv_ac_delta_q == 0;
3749 #if CONFIG_VP9_HIGHBITDEPTH
3750 if (cm->use_highbitdepth)
3751 x->fwd_txm4x4 = xd->lossless ? vp9_highbd_fwht4x4 : vp9_highbd_fdct4x4;
3753 x->fwd_txm4x4 = xd->lossless ? vp9_fwht4x4 : vp9_fdct4x4;
3754 x->highbd_itxm_add = xd->lossless ? vp9_highbd_iwht4x4_add :
3755 vp9_highbd_idct4x4_add;
3757 x->fwd_txm4x4 = xd->lossless ? vp9_fwht4x4 : vp9_fdct4x4;
3758 #endif // CONFIG_VP9_HIGHBITDEPTH
3759 x->itxm_add = xd->lossless ? vp9_iwht4x4_add : vp9_idct4x4_add;
3764 cm->tx_mode = select_tx_mode(cpi, xd);
3766 vp9_frame_init_quantizer(cpi);
3768 vp9_initialize_rd_consts(cpi);
3769 vp9_initialize_me_consts(cpi, cm->base_qindex);
3770 init_encode_frame_mb_context(cpi);
3771 cm->use_prev_frame_mvs = !cm->error_resilient_mode &&
3772 cm->width == cm->last_width &&
3773 cm->height == cm->last_height &&
3775 cm->last_show_frame;
3776 // Special case: set prev_mi to NULL when the previous mode info
3777 // context cannot be used.
3778 cm->prev_mi = cm->use_prev_frame_mvs ?
3779 cm->prev_mip + cm->mi_stride + 1 : NULL;
3781 x->quant_fp = cpi->sf.use_quant_fp;
3782 vp9_zero(x->skip_txfm);
3783 if (sf->use_nonrd_pick_mode) {
3784 // Initialize internal buffer pointers for rtc coding, where non-RD
3785 // mode decision is used and hence no buffer pointer swap needed.
3787 struct macroblock_plane *const p = x->plane;
3788 struct macroblockd_plane *const pd = xd->plane;
3789 PICK_MODE_CONTEXT *ctx = &cpi->td.pc_root->none;
3791 for (i = 0; i < MAX_MB_PLANE; ++i) {
3792 p[i].coeff = ctx->coeff_pbuf[i][0];
3793 p[i].qcoeff = ctx->qcoeff_pbuf[i][0];
3794 pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][0];
3795 p[i].eobs = ctx->eobs_pbuf[i][0];
3797 vp9_zero(x->zcoeff_blk);
3799 if (sf->partition_search_type == SOURCE_VAR_BASED_PARTITION)
3800 source_var_based_partition_search_method(cpi);
3804 struct vpx_usec_timer emr_timer;
3805 vpx_usec_timer_start(&emr_timer);
3807 #if CONFIG_FP_MB_STATS
3808 if (cpi->use_fp_mb_stats) {
3809 input_fpmb_stats(&cpi->twopass.firstpass_mb_stats, cm,
3810 &cpi->twopass.this_frame_mb_stats);
3814 // If allowed, encoding tiles in parallel with one thread handling one tile.
3815 if (MIN(cpi->oxcf.max_threads, 1 << cm->log2_tile_cols) > 1)
3816 vp9_encode_tiles_mt(cpi);
3820 vpx_usec_timer_mark(&emr_timer);
3821 cpi->time_encode_sb_row += vpx_usec_timer_elapsed(&emr_timer);
3824 sf->skip_encode_frame = sf->skip_encode_sb ?
3825 get_skip_encode_frame(cm, td) : 0;
3828 // Keep record of the total distortion this time around for future use
3829 cpi->last_frame_distortion = cpi->frame_distortion;
3833 static INTERP_FILTER get_interp_filter(
3834 const int64_t threshes[SWITCHABLE_FILTER_CONTEXTS], int is_alt_ref) {
3836 threshes[EIGHTTAP_SMOOTH] > threshes[EIGHTTAP] &&
3837 threshes[EIGHTTAP_SMOOTH] > threshes[EIGHTTAP_SHARP] &&
3838 threshes[EIGHTTAP_SMOOTH] > threshes[SWITCHABLE - 1]) {
3839 return EIGHTTAP_SMOOTH;
3840 } else if (threshes[EIGHTTAP_SHARP] > threshes[EIGHTTAP] &&
3841 threshes[EIGHTTAP_SHARP] > threshes[SWITCHABLE - 1]) {
3842 return EIGHTTAP_SHARP;
3843 } else if (threshes[EIGHTTAP] > threshes[SWITCHABLE - 1]) {
3850 void vp9_encode_frame(VP9_COMP *cpi) {
3851 VP9_COMMON *const cm = &cpi->common;
3853 // In the longer term the encoder should be generalized to match the
3854 // decoder such that we allow compound where one of the 3 buffers has a
3855 // different sign bias and that buffer is then the fixed ref. However, this
3856 // requires further work in the rd loop. For now the only supported encoder
3857 // side behavior is where the ALT ref buffer has opposite sign bias to
3859 if (!frame_is_intra_only(cm)) {
3860 if ((cm->ref_frame_sign_bias[ALTREF_FRAME] ==
3861 cm->ref_frame_sign_bias[GOLDEN_FRAME]) ||
3862 (cm->ref_frame_sign_bias[ALTREF_FRAME] ==
3863 cm->ref_frame_sign_bias[LAST_FRAME])) {
3864 cpi->allow_comp_inter_inter = 0;
3866 cpi->allow_comp_inter_inter = 1;
3867 cm->comp_fixed_ref = ALTREF_FRAME;
3868 cm->comp_var_ref[0] = LAST_FRAME;
3869 cm->comp_var_ref[1] = GOLDEN_FRAME;
3873 if (cpi->sf.frame_parameter_update) {
3875 RD_OPT *const rd_opt = &cpi->rd;
3876 FRAME_COUNTS *counts = cpi->td.counts;
3877 RD_COUNTS *const rdc = &cpi->td.rd_counts;
3879 // This code does a single RD pass over the whole frame assuming
3880 // either compound, single or hybrid prediction as per whatever has
3881 // worked best for that type of frame in the past.
3882 // It also predicts whether another coding mode would have worked
3883 // better that this coding mode. If that is the case, it remembers
3884 // that for subsequent frames.
3885 // It does the same analysis for transform size selection also.
3886 const MV_REFERENCE_FRAME frame_type = get_frame_type(cpi);
3887 int64_t *const mode_thrs = rd_opt->prediction_type_threshes[frame_type];
3888 int64_t *const filter_thrs = rd_opt->filter_threshes[frame_type];
3889 int *const tx_thrs = rd_opt->tx_select_threshes[frame_type];
3890 const int is_alt_ref = frame_type == ALTREF_FRAME;
3892 /* prediction (compound, single or hybrid) mode selection */
3893 if (is_alt_ref || !cpi->allow_comp_inter_inter)
3894 cm->reference_mode = SINGLE_REFERENCE;
3895 else if (mode_thrs[COMPOUND_REFERENCE] > mode_thrs[SINGLE_REFERENCE] &&
3896 mode_thrs[COMPOUND_REFERENCE] >
3897 mode_thrs[REFERENCE_MODE_SELECT] &&
3898 check_dual_ref_flags(cpi) &&
3899 cpi->static_mb_pct == 100)
3900 cm->reference_mode = COMPOUND_REFERENCE;
3901 else if (mode_thrs[SINGLE_REFERENCE] > mode_thrs[REFERENCE_MODE_SELECT])
3902 cm->reference_mode = SINGLE_REFERENCE;
3904 cm->reference_mode = REFERENCE_MODE_SELECT;
3906 if (cm->interp_filter == SWITCHABLE)
3907 cm->interp_filter = get_interp_filter(filter_thrs, is_alt_ref);
3909 encode_frame_internal(cpi);
3911 for (i = 0; i < REFERENCE_MODES; ++i)
3912 mode_thrs[i] = (mode_thrs[i] + rdc->comp_pred_diff[i] / cm->MBs) / 2;
3914 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
3915 filter_thrs[i] = (filter_thrs[i] + rdc->filter_diff[i] / cm->MBs) / 2;
3917 for (i = 0; i < TX_MODES; ++i) {
3918 int64_t pd = rdc->tx_select_diff[i];
3919 if (i == TX_MODE_SELECT)
3920 pd -= RDCOST(cpi->td.mb.rdmult, cpi->td.mb.rddiv, 2048 * (TX_SIZES - 1),
3922 tx_thrs[i] = (tx_thrs[i] + (int)(pd / cm->MBs)) / 2;
3925 if (cm->reference_mode == REFERENCE_MODE_SELECT) {
3926 int single_count_zero = 0;
3927 int comp_count_zero = 0;
3929 for (i = 0; i < COMP_INTER_CONTEXTS; i++) {
3930 single_count_zero += counts->comp_inter[i][0];
3931 comp_count_zero += counts->comp_inter[i][1];
3934 if (comp_count_zero == 0) {
3935 cm->reference_mode = SINGLE_REFERENCE;
3936 vp9_zero(counts->comp_inter);
3937 } else if (single_count_zero == 0) {
3938 cm->reference_mode = COMPOUND_REFERENCE;
3939 vp9_zero(counts->comp_inter);
3943 if (cm->tx_mode == TX_MODE_SELECT) {
3945 int count8x8_lp = 0, count8x8_8x8p = 0;
3946 int count16x16_16x16p = 0, count16x16_lp = 0;
3949 for (i = 0; i < TX_SIZE_CONTEXTS; ++i) {
3950 count4x4 += counts->tx.p32x32[i][TX_4X4];
3951 count4x4 += counts->tx.p16x16[i][TX_4X4];
3952 count4x4 += counts->tx.p8x8[i][TX_4X4];
3954 count8x8_lp += counts->tx.p32x32[i][TX_8X8];
3955 count8x8_lp += counts->tx.p16x16[i][TX_8X8];
3956 count8x8_8x8p += counts->tx.p8x8[i][TX_8X8];
3958 count16x16_16x16p += counts->tx.p16x16[i][TX_16X16];
3959 count16x16_lp += counts->tx.p32x32[i][TX_16X16];
3960 count32x32 += counts->tx.p32x32[i][TX_32X32];
3962 if (count4x4 == 0 && count16x16_lp == 0 && count16x16_16x16p == 0 &&
3964 cm->tx_mode = ALLOW_8X8;
3965 reset_skip_tx_size(cm, TX_8X8);
3966 } else if (count8x8_8x8p == 0 && count16x16_16x16p == 0 &&
3967 count8x8_lp == 0 && count16x16_lp == 0 && count32x32 == 0) {
3968 cm->tx_mode = ONLY_4X4;
3969 reset_skip_tx_size(cm, TX_4X4);
3970 } else if (count8x8_lp == 0 && count16x16_lp == 0 && count4x4 == 0) {
3971 cm->tx_mode = ALLOW_32X32;
3972 } else if (count32x32 == 0 && count8x8_lp == 0 && count4x4 == 0) {
3973 cm->tx_mode = ALLOW_16X16;
3974 reset_skip_tx_size(cm, TX_16X16);
3978 cm->reference_mode = SINGLE_REFERENCE;
3979 encode_frame_internal(cpi);
3983 static void sum_intra_stats(FRAME_COUNTS *counts, const MODE_INFO *mi) {
3984 const PREDICTION_MODE y_mode = mi->mbmi.mode;
3985 const PREDICTION_MODE uv_mode = mi->mbmi.uv_mode;
3986 const BLOCK_SIZE bsize = mi->mbmi.sb_type;
3988 if (bsize < BLOCK_8X8) {
3990 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
3991 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
3992 for (idy = 0; idy < 2; idy += num_4x4_h)
3993 for (idx = 0; idx < 2; idx += num_4x4_w)
3994 ++counts->y_mode[0][mi->bmi[idy * 2 + idx].as_mode];
3996 ++counts->y_mode[size_group_lookup[bsize]][y_mode];
3999 ++counts->uv_mode[y_mode][uv_mode];
4002 static void encode_superblock(VP9_COMP *cpi, ThreadData *td,
4003 TOKENEXTRA **t, int output_enabled,
4004 int mi_row, int mi_col, BLOCK_SIZE bsize,
4005 PICK_MODE_CONTEXT *ctx) {
4006 VP9_COMMON *const cm = &cpi->common;
4007 MACROBLOCK *const x = &td->mb;
4008 MACROBLOCKD *const xd = &x->e_mbd;
4009 MODE_INFO *mi_8x8 = xd->mi;
4010 MODE_INFO *mi = mi_8x8;
4011 MB_MODE_INFO *mbmi = &mi->mbmi;
4012 const int seg_skip = vp9_segfeature_active(&cm->seg, mbmi->segment_id,
4014 const int mis = cm->mi_stride;
4015 const int mi_width = num_8x8_blocks_wide_lookup[bsize];
4016 const int mi_height = num_8x8_blocks_high_lookup[bsize];
4018 x->skip_recode = !x->select_tx_size && mbmi->sb_type >= BLOCK_8X8 &&
4019 cpi->oxcf.aq_mode != COMPLEXITY_AQ &&
4020 cpi->oxcf.aq_mode != CYCLIC_REFRESH_AQ &&
4021 cpi->sf.allow_skip_recode;
4023 if (!x->skip_recode && !cpi->sf.use_nonrd_pick_mode)
4024 vpx_memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
4026 x->skip_optimize = ctx->is_coded;
4028 x->use_lp32x32fdct = cpi->sf.use_lp32x32fdct;
4029 x->skip_encode = (!output_enabled && cpi->sf.skip_encode_frame &&
4030 x->q_index < QIDX_SKIP_THRESH);
4035 set_ref_ptrs(cm, xd, mbmi->ref_frame[0], mbmi->ref_frame[1]);
4037 if (!is_inter_block(mbmi)) {
4040 for (plane = 0; plane < MAX_MB_PLANE; ++plane)
4041 vp9_encode_intra_block_plane(x, MAX(bsize, BLOCK_8X8), plane);
4043 sum_intra_stats(td->counts, mi);
4044 vp9_tokenize_sb(cpi, td, t, !output_enabled, MAX(bsize, BLOCK_8X8));
4047 const int is_compound = has_second_ref(mbmi);
4048 for (ref = 0; ref < 1 + is_compound; ++ref) {
4049 YV12_BUFFER_CONFIG *cfg = get_ref_frame_buffer(cpi,
4050 mbmi->ref_frame[ref]);
4051 assert(cfg != NULL);
4052 vp9_setup_pre_planes(xd, ref, cfg, mi_row, mi_col,
4053 &xd->block_refs[ref]->sf);
4055 if (!(cpi->sf.reuse_inter_pred_sby && ctx->pred_pixel_ready) || seg_skip)
4056 vp9_build_inter_predictors_sby(xd, mi_row, mi_col, MAX(bsize, BLOCK_8X8));
4058 vp9_build_inter_predictors_sbuv(xd, mi_row, mi_col, MAX(bsize, BLOCK_8X8));
4060 vp9_encode_sb(x, MAX(bsize, BLOCK_8X8));
4061 vp9_tokenize_sb(cpi, td, t, !output_enabled, MAX(bsize, BLOCK_8X8));
4064 if (output_enabled) {
4065 if (cm->tx_mode == TX_MODE_SELECT &&
4066 mbmi->sb_type >= BLOCK_8X8 &&
4067 !(is_inter_block(mbmi) && (mbmi->skip || seg_skip))) {
4068 ++get_tx_counts(max_txsize_lookup[bsize], vp9_get_tx_size_context(xd),
4069 &td->counts->tx)[mbmi->tx_size];
4073 // The new intra coding scheme requires no change of transform size
4074 if (is_inter_block(&mi->mbmi)) {
4075 tx_size = MIN(tx_mode_to_biggest_tx_size[cm->tx_mode],
4076 max_txsize_lookup[bsize]);
4078 tx_size = (bsize >= BLOCK_8X8) ? mbmi->tx_size : TX_4X4;
4081 for (y = 0; y < mi_height; y++)
4082 for (x = 0; x < mi_width; x++)
4083 if (mi_col + x < cm->mi_cols && mi_row + y < cm->mi_rows)
4084 mi_8x8[mis * y + x].src_mi->mbmi.tx_size = tx_size;
4086 ++td->counts->tx.tx_totals[mbmi->tx_size];
4087 ++td->counts->tx.tx_totals[get_uv_tx_size(mbmi, &xd->plane[1])];