From: Marco Date: Mon, 16 Mar 2015 02:19:29 +0000 (-0700) Subject: Update to variance partition. X-Git-Tag: v1.4.0~23 X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=e52109158aab3109db76c36117445f8d0e521244;p=libvpx Update to variance partition. Use force_split to constrain the partition selection. This is used because in the top-down approach to variance partition, a block size may be selected even though one of its subblocks may have high variance. In this patch the selection of the 64x64 block size will only be allowed if the variance of all the 32x32 subblocks are also below the threshold. Stil testing, but some visual improvement for areas near slow moving boundary can be seen. Metrics for RTC set increase by about ~0.5%. Change-Id: Iab3e7b19bf70f534236f7a43fd873895a2bb261d --- diff --git a/vp9/encoder/vp9_encodeframe.c b/vp9/encoder/vp9_encodeframe.c index b3deb46a2..ac9de21cc 100644 --- a/vp9/encoder/vp9_encodeframe.c +++ b/vp9/encoder/vp9_encodeframe.c @@ -400,7 +400,7 @@ static int set_vt_partitioning(VP9_COMP *cpi, int mi_col, int64_t threshold, BLOCK_SIZE bsize_min, - int segment_id) { + int force_split) { VP9_COMMON * const cm = &cpi->common; variance_node vt; const int block_width = num_8x8_blocks_wide_lookup[bsize]; @@ -409,8 +409,7 @@ static int set_vt_partitioning(VP9_COMP *cpi, assert(block_height == block_width); tree_to_node(data, bsize, &vt); - // No 64x64 blocks on segments other than base (un-boosted) segment. - if (cyclic_refresh_segment_id_boosted(segment_id) && bsize == BLOCK_64X64) + if (force_split) return 0; // For bsize=bsize_min (16x16/8x8 for 8x8/4x4 downsampling), select if @@ -426,7 +425,9 @@ static int set_vt_partitioning(VP9_COMP *cpi, } return 0; } else if (bsize > bsize_min) { - get_variance(&vt.part_variances->none); + // Variance is already computed for 32x32 blocks to set the force_split. + if (bsize != BLOCK_32X32) + get_variance(&vt.part_variances->none); // For key frame or low_res: for bsize above 32X32 or very high variance, // take split. if (cm->frame_type == KEY_FRAME && @@ -522,6 +523,7 @@ static void choose_partitioning(VP9_COMP *cpi, int i, j, k, m; v64x64 vt; v16x16 vt2[16]; + int force_split[5]; uint8_t *s; const uint8_t *d; int sp; @@ -639,12 +641,15 @@ static void choose_partitioning(VP9_COMP *cpi, #endif // CONFIG_VP9_HIGHBITDEPTH } + // Index for force_split: 0 for 64x64, 1-4 for 32x32 blocks, + force_split[0] = 0; // Fill in the entire tree of 8x8 (or 4x4 under some conditions) variances // for splits. for (i = 0; i < 4; i++) { const int x32_idx = ((i & 1) << 5); const int y32_idx = ((i >> 1) << 5); const int i2 = i << 2; + force_split[i + 1] = 0; for (j = 0; j < 4; j++) { const int x16_idx = x32_idx + ((j & 1) << 4); const int y16_idx = y32_idx + ((j >> 1) << 4); @@ -731,6 +736,11 @@ static void choose_partitioning(VP9_COMP *cpi, } } + // No 64x64 blocks on segments other than base (un-boosted) segment, + // so force split. + if (cyclic_refresh_segment_id_boosted(segment_id)) + force_split[0] = 1; + // Fill the rest of the variance tree by summing split partition values. for (i = 0; i < 4; i++) { const int i2 = i << 2; @@ -738,22 +748,29 @@ static void choose_partitioning(VP9_COMP *cpi, if (variance4x4downsample[i2 + j] == 1) { v16x16 *vtemp = (!is_key_frame) ? &vt2[i2 + j] : &vt.split[i].split[j]; - for (m = 0; m < 4; m++) { + for (m = 0; m < 4; m++) fill_variance_tree(&vtemp->split[m], BLOCK_8X8); - } fill_variance_tree(vtemp, BLOCK_16X16); } } fill_variance_tree(&vt.split[i], BLOCK_32X32); + // If variance of this 32x32 block is above the threshold, force the block + // to split. This also forces a split on the upper (64x64) level. + get_variance(&vt.split[i].part_variances.none); + if (vt.split[i].part_variances.none.variance > cpi->vbp_threshold) { + force_split[i + 1] = 1; + force_split[0] = 1; + } } - fill_variance_tree(&vt, BLOCK_64X64); + if (!force_split[0]) + fill_variance_tree(&vt, BLOCK_64X64); // Now go through the entire structure, splitting every block size until // we get to one that's got a variance lower than our threshold. if ( mi_col + 8 > cm->mi_cols || mi_row + 8 > cm->mi_rows || !set_vt_partitioning(cpi, xd, &vt, BLOCK_64X64, mi_row, mi_col, cpi->vbp_threshold_bsize_max, BLOCK_16X16, - segment_id)) { + force_split[0])) { for (i = 0; i < 4; ++i) { const int x32_idx = ((i & 1) << 2); const int y32_idx = ((i >> 1) << 2); @@ -761,7 +778,7 @@ static void choose_partitioning(VP9_COMP *cpi, if (!set_vt_partitioning(cpi, xd, &vt.split[i], BLOCK_32X32, (mi_row + y32_idx), (mi_col + x32_idx), cpi->vbp_threshold, - BLOCK_16X16, segment_id)) { + BLOCK_16X16, force_split[i + 1])) { for (j = 0; j < 4; ++j) { const int x16_idx = ((j & 1) << 1); const int y16_idx = ((j >> 1) << 1); @@ -775,7 +792,7 @@ static void choose_partitioning(VP9_COMP *cpi, mi_row + y32_idx + y16_idx, mi_col + x32_idx + x16_idx, cpi->vbp_threshold_16x16, - cpi->vbp_bsize_min, segment_id)) { + cpi->vbp_bsize_min, 0)) { for (k = 0; k < 4; ++k) { const int x8_idx = (k & 1); const int y8_idx = (k >> 1); @@ -785,7 +802,7 @@ static void choose_partitioning(VP9_COMP *cpi, mi_row + y32_idx + y16_idx + y8_idx, mi_col + x32_idx + x16_idx + x8_idx, cpi->vbp_threshold_bsize_min, - BLOCK_8X8, segment_id)) { + BLOCK_8X8, 0)) { set_block_size(cpi, xd, (mi_row + y32_idx + y16_idx + y8_idx), (mi_col + x32_idx + x16_idx + x8_idx),