eval "${var}_name=$name"
eval "${var}_guid=$guid"
- # assume that all projects have the same list of possible configurations,
- # so overwriting old config_lists is not a problem
if [ "$sfx" = "vcproj" ]; then
- config_list=`grep -A1 '<Configuration' $file |
+ cur_config_list=`grep -A1 '<Configuration' $file |
grep Name | cut -d\" -f2`
else
- config_list=`grep -B1 'Label="Configuration"' $file |
+ cur_config_list=`grep -B1 'Label="Configuration"' $file |
grep Condition | cut -d\' -f4`
fi
+ new_config_list=$(for i in $config_list $cur_config_list; do
+ echo $i
+ done | sort | uniq)
+ if [ "$config_list" != "" ] && [ "$config_list" != "$new_config_list" ]; then
+ mixed_platforms=1
+ fi
+ config_list="$new_config_list"
+ eval "${var}_config_list=\"$cur_config_list\""
proj_list="${proj_list} ${var}"
}
indent_push
IFS_bak=${IFS}
IFS=$'\r'$'\n'
+ if [ "$mixed_platforms" != "" ]; then
+ config_list="
+Release|Mixed Platforms
+Debug|Mixed Platforms"
+ fi
for config in ${config_list}; do
echo "${indent}$config = $config"
done
indent_push
for proj in ${proj_list}; do
eval "local proj_guid=\${${proj}_guid}"
+ eval "local proj_config_list=\${${proj}_config_list}"
IFS=$'\r'$'\n'
- for config in ${config_list}; do
- echo "${indent}${proj_guid}.${config}.ActiveCfg = ${config}"
- echo "${indent}${proj_guid}.${config}.Build.0 = ${config}"
+ for config in ${proj_config_list}; do
+ if [ "$mixed_platforms" != "" ]; then
+ local c=${config%%|*}
+ echo "${indent}${proj_guid}.${c}|Mixed Platforms.ActiveCfg = ${config}"
+ echo "${indent}${proj_guid}.${c}|Mixed Platforms.Build.0 = ${config}"
+ else
+ echo "${indent}${proj_guid}.${config}.ActiveCfg = ${config}"
+ echo "${indent}${proj_guid}.${config}.Build.0 = ${config}"
+ fi
done
IFS=${IFS_bak}
multiple_arf
non420
alpha
- balanced_coeftree
"
CONFIG_LIST="
external_build
MACRO
IDCT8x8_1D
; stage 1
- vdup.16 d0, r3; ; duplicate cospi_28_64
- vdup.16 d1, r4; ; duplicate cospi_4_64
+ vdup.16 d0, r3 ; duplicate cospi_28_64
+ vdup.16 d1, r4 ; duplicate cospi_4_64
; input[1] * cospi_28_64
vmull.s16 q2, d18, d0
vqrshrn.s32 d14, q2, #14 ; >> 14
vqrshrn.s32 d15, q3, #14 ; >> 14
- vdup.16 d0, r5; ; duplicate cospi_12_64
- vdup.16 d1, r6; ; duplicate cospi_20_64
+ vdup.16 d0, r5 ; duplicate cospi_12_64
+ vdup.16 d1, r6 ; duplicate cospi_20_64
; input[5] * cospi_12_64
vmull.s16 q2, d26, d0
vqrshrn.s32 d13, q1, #14 ; >> 14
; stage 2 & stage 3 - even half
- vdup.16 d0, r7; ; duplicate cospi_16_64
+ vdup.16 d0, r7 ; duplicate cospi_16_64
; input[0] * cospi_16_64
vmull.s16 q2, d16, d0
vqrshrn.s32 d23, q3, #14 ; >> 14
; input[1] * cospi_24_64 - input[3] * cospi_8_64
- vdup.16 d0, r8; ; duplicate cospi_24_64
- vdup.16 d1, r9; ; duplicate cospi_8_64
+ vdup.16 d0, r8 ; duplicate cospi_24_64
+ vdup.16 d1, r9 ; duplicate cospi_8_64
; input[1] * cospi_24_64
vmull.s16 q2, d20, d0
vadd.s16 q7, q7, q6 ; step2[7] = step1[6] + step1[7]
; stage 3 -odd half
- vdup.16 d16, r7; ; duplicate cospi_16_64
+ vdup.16 d16, r7 ; duplicate cospi_16_64
; step2[6] * cospi_16_64
vmull.s16 q9, d28, d16
vqrshrn.s32 d13, q10, #14 ; >> 14
; stage 4
- vadd.s16 q8, q0, q7; ; output[0] = step1[0] + step1[7];
- vadd.s16 q9, q1, q6; ; output[1] = step1[1] + step1[6];
- vadd.s16 q10, q2, q5; ; output[2] = step1[2] + step1[5];
- vadd.s16 q11, q3, q4; ; output[3] = step1[3] + step1[4];
- vsub.s16 q12, q3, q4; ; output[4] = step1[3] - step1[4];
- vsub.s16 q13, q2, q5; ; output[5] = step1[2] - step1[5];
- vsub.s16 q14, q1, q6; ; output[6] = step1[1] - step1[6];
- vsub.s16 q15, q0, q7; ; output[7] = step1[0] - step1[7];
+ vadd.s16 q8, q0, q7 ; output[0] = step1[0] + step1[7];
+ vadd.s16 q9, q1, q6 ; output[1] = step1[1] + step1[6];
+ vadd.s16 q10, q2, q5 ; output[2] = step1[2] + step1[5];
+ vadd.s16 q11, q3, q4 ; output[3] = step1[3] + step1[4];
+ vsub.s16 q12, q3, q4 ; output[4] = step1[3] - step1[4];
+ vsub.s16 q13, q2, q5 ; output[5] = step1[2] - step1[5];
+ vsub.s16 q14, q1, q6 ; output[6] = step1[1] - step1[6];
+ vsub.s16 q15, q0, q7 ; output[7] = step1[0] - step1[7];
MEND
; Transpose a 8x8 16bit data matrix. Datas are loaded in q8-q15.
mov r0, r1
; load destination data
- vld1.u8 {d0}, [r1], r2
- vld1.u8 {d1}, [r1], r2
- vld1.s16 {d2}, [r1], r2
- vld1.s16 {d3}, [r1], r2
- vld1.s16 {d4}, [r1], r2
- vld1.s16 {d5}, [r1], r2
- vld1.s16 {d6}, [r1], r2
- vld1.s16 {d7}, [r1]
+ vld1.64 {d0}, [r1], r2
+ vld1.64 {d1}, [r1], r2
+ vld1.64 {d2}, [r1], r2
+ vld1.64 {d3}, [r1], r2
+ vld1.64 {d4}, [r1], r2
+ vld1.64 {d5}, [r1], r2
+ vld1.64 {d6}, [r1], r2
+ vld1.64 {d7}, [r1]
; ROUND_POWER_OF_TWO(temp_out[j], 5) + dest[j * dest_stride + i]
vaddw.u8 q8, q8, d0
#include "vp9/common/vp9_treecoder.h"
#define BLOCK_SIZE_GROUPS 4
-
-#define PREDICTION_PROBS 3
-
#define MBSKIP_CONTEXTS 3
/* Segment Feature Masks */
extern const TX_TYPE mode2txfm_map[MB_MODE_COUNT];
-static INLINE TX_TYPE get_tx_type_4x4(const MACROBLOCKD *xd, int ib) {
- MODE_INFO *const mi = xd->mode_info_context;
- MB_MODE_INFO *const mbmi = &mi->mbmi;
+static INLINE TX_TYPE get_tx_type_4x4(PLANE_TYPE plane_type,
+ const MACROBLOCKD *xd, int ib) {
+ const MODE_INFO *const mi = xd->mode_info_context;
+ const MB_MODE_INFO *const mbmi = &mi->mbmi;
- if (xd->lossless || mbmi->ref_frame[0] != INTRA_FRAME)
+ if (plane_type != PLANE_TYPE_Y_WITH_DC ||
+ xd->lossless ||
+ mbmi->ref_frame[0] != INTRA_FRAME)
return DCT_DCT;
return mode2txfm_map[mbmi->sb_type < BLOCK_SIZE_SB8X8 ?
mi->bmi[ib].as_mode : mbmi->mode];
}
-static INLINE TX_TYPE get_tx_type_8x8(const MACROBLOCKD *xd) {
- return mode2txfm_map[xd->mode_info_context->mbmi.mode];
+static INLINE TX_TYPE get_tx_type_8x8(PLANE_TYPE plane_type,
+ const MACROBLOCKD *xd) {
+ return plane_type == PLANE_TYPE_Y_WITH_DC ?
+ mode2txfm_map[xd->mode_info_context->mbmi.mode] : DCT_DCT;
}
-static INLINE TX_TYPE get_tx_type_16x16(const MACROBLOCKD *xd) {
- return mode2txfm_map[xd->mode_info_context->mbmi.mode];
+static INLINE TX_TYPE get_tx_type_16x16(PLANE_TYPE plane_type,
+ const MACROBLOCKD *xd) {
+ return plane_type == PLANE_TYPE_Y_WITH_DC ?
+ mode2txfm_map[xd->mode_info_context->mbmi.mode] : DCT_DCT;
}
static void setup_block_dptrs(MACROBLOCKD *xd, int ss_x, int ss_y) {
const int num_8x8_blocks_high_lookup[BLOCK_SIZE_TYPES] =
{1, 1, 1, 1, 2, 1, 2, 4, 2, 4, 8, 4, 8};
+// MIN(3, MIN(b_width_log2(bsize), b_height_log2(bsize)))
+const int size_group_lookup[BLOCK_SIZE_TYPES] =
+ {0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3};
+
+
const PARTITION_TYPE partition_lookup[][BLOCK_SIZE_TYPES] = {
{ // 4X4
// 4X4, 4X8,8X4,8X8,8X16,16X8,16X16,16X32,32X16,32X32,32X64,64X32,64X64
extern const int num_8x8_blocks_high_lookup[BLOCK_SIZE_TYPES];
extern const int num_4x4_blocks_high_lookup[BLOCK_SIZE_TYPES];
extern const int num_4x4_blocks_wide_lookup[BLOCK_SIZE_TYPES];
-extern const PARTITION_TYPE
- partition_lookup[][BLOCK_SIZE_TYPES];
+extern const int size_group_lookup[BLOCK_SIZE_TYPES];
+
+extern const PARTITION_TYPE partition_lookup[][BLOCK_SIZE_TYPES];
extern const BLOCK_SIZE_TYPE subsize_lookup[PARTITION_TYPES][BLOCK_SIZE_TYPES];
* be found in the AUTHORS file in the root of the source tree.
*/
-
/*Generated file, included by vp9_entropy.c*/
-
-#if CONFIG_BALANCED_COEFTREE
-static const vp9_coeff_probs_model default_coef_probs_4x4[BLOCK_TYPES] = {
- { /* block Type 0 */
- { /* Intra */
- { /* Coeff Band 0 */
- { 6, 213, 178 },
- { 26, 113, 132 },
- { 34, 17, 68 }
- }, { /* Coeff Band 1 */
- { 66, 96, 178 },
- { 63, 96, 174 },
- { 67, 54, 154 },
- { 62, 28, 126 },
- { 48, 9, 84 },
- { 20, 1, 32 }
- }, { /* Coeff Band 2 */
- { 64, 144, 206 },
- { 70, 99, 191 },
- { 69, 36, 152 },
- { 55, 9, 106 },
- { 35, 1, 60 },
- { 14, 1, 22 }
- }, { /* Coeff Band 3 */
- { 82, 154, 222 },
- { 83, 112, 205 },
- { 81, 31, 164 },
- { 62, 7, 118 },
- { 42, 1, 74 },
- { 18, 1, 30 }
- }, { /* Coeff Band 4 */
- { 52, 179, 233 },
- { 64, 132, 214 },
- { 73, 36, 170 },
- { 59, 8, 116 },
- { 38, 1, 65 },
- { 15, 1, 26 }
- }, { /* Coeff Band 5 */
- { 29, 175, 238 },
- { 26, 169, 223 },
- { 41, 80, 182 },
- { 39, 32, 127 },
- { 26, 10, 69 },
- { 11, 2, 28 }
- }
- }, { /* Inter */
- { /* Coeff Band 0 */
- { 21, 226, 234 },
- { 52, 182, 212 },
- { 80, 112, 177 }
- }, { /* Coeff Band 1 */
- { 111, 164, 243 },
- { 88, 152, 231 },
- { 90, 43, 186 },
- { 70, 12, 132 },
- { 44, 2, 76 },
- { 19, 1, 33 }
- }, { /* Coeff Band 2 */
- { 96, 185, 246 },
- { 99, 127, 231 },
- { 88, 21, 177 },
- { 64, 5, 122 },
- { 38, 1, 69 },
- { 18, 1, 30 }
- }, { /* Coeff Band 3 */
- { 84, 206, 249 },
- { 94, 147, 237 },
- { 95, 33, 187 },
- { 71, 8, 131 },
- { 47, 1, 83 },
- { 26, 1, 44 }
- }, { /* Coeff Band 4 */
- { 38, 221, 252 },
- { 58, 177, 241 },
- { 78, 46, 188 },
- { 59, 9, 122 },
- { 34, 1, 66 },
- { 18, 1, 34 }
- }, { /* Coeff Band 5 */
- { 21, 216, 253 },
- { 21, 206, 244 },
- { 42, 93, 200 },
- { 43, 41, 146 },
- { 36, 13, 93 },
- { 31, 1, 55 }
- }
- }
- }, { /* block Type 1 */
- { /* Intra */
- { /* Coeff Band 0 */
- { 7, 213, 219 },
- { 23, 139, 182 },
- { 38, 60, 125 }
- }, { /* Coeff Band 1 */
- { 69, 156, 220 },
- { 52, 178, 213 },
- { 69, 111, 190 },
- { 69, 58, 155 },
- { 58, 21, 104 },
- { 39, 7, 60 }
- }, { /* Coeff Band 2 */
- { 68, 189, 228 },
- { 70, 158, 221 },
- { 83, 64, 189 },
- { 73, 18, 141 },
- { 48, 4, 88 },
- { 23, 1, 41 }
- }, { /* Coeff Band 3 */
- { 99, 194, 236 },
- { 91, 138, 224 },
- { 91, 53, 189 },
- { 74, 20, 142 },
- { 48, 6, 90 },
- { 22, 1, 41 }
- }, { /* Coeff Band 4 */
- { 52, 203, 244 },
- { 60, 168, 231 },
- { 75, 62, 189 },
- { 61, 18, 132 },
- { 38, 4, 72 },
- { 17, 1, 39 }
- }, { /* Coeff Band 5 */
- { 33, 192, 247 },
- { 31, 185, 234 },
- { 46, 85, 185 },
- { 39, 35, 132 },
- { 28, 15, 80 },
- { 13, 5, 38 }
- }
- }, { /* Inter */
- { /* Coeff Band 0 */
- { 5, 247, 246 },
- { 28, 209, 228 },
- { 65, 137, 203 }
- }, { /* Coeff Band 1 */
- { 69, 208, 250 },
- { 54, 207, 242 },
- { 81, 92, 204 },
- { 70, 54, 153 },
- { 58, 40, 108 },
- { 58, 35, 71 }
- }, { /* Coeff Band 2 */
- { 65, 215, 250 },
- { 72, 185, 239 },
- { 92, 50, 197 },
- { 75, 14, 147 },
- { 49, 2, 99 },
- { 26, 1, 53 }
- }, { /* Coeff Band 3 */
- { 70, 220, 251 },
- { 76, 186, 241 },
- { 90, 65, 198 },
- { 75, 26, 151 },
- { 58, 12, 112 },
- { 34, 6, 49 }
- }, { /* Coeff Band 4 */
- { 34, 224, 253 },
- { 44, 204, 245 },
- { 69, 85, 204 },
- { 64, 31, 150 },
- { 44, 2, 78 },
- { 1, 1, 128 }
- }, { /* Coeff Band 5 */
- { 25, 216, 253 },
- { 21, 215, 248 },
- { 47, 108, 214 },
- { 47, 48, 160 },
- { 26, 20, 90 },
- { 64, 171, 128 }
- }
- }
- }
-};
-static const vp9_coeff_probs_model default_coef_probs_8x8[BLOCK_TYPES] = {
- { /* block Type 0 */
- { /* Intra */
- { /* Coeff Band 0 */
- { 9, 203, 199 },
- { 26, 92, 128 },
- { 28, 11, 55 }
- }, { /* Coeff Band 1 */
- { 99, 54, 160 },
- { 78, 99, 155 },
- { 80, 44, 138 },
- { 71, 17, 115 },
- { 51, 5, 80 },
- { 27, 1, 40 }
- }, { /* Coeff Band 2 */
- { 135, 81, 190 },
- { 113, 61, 182 },
- { 93, 16, 153 },
- { 70, 4, 115 },
- { 41, 1, 68 },
- { 16, 1, 27 }
- }, { /* Coeff Band 3 */
- { 155, 103, 214 },
- { 129, 48, 199 },
- { 95, 10, 159 },
- { 63, 1, 110 },
- { 32, 1, 58 },
- { 12, 1, 21 }
- }, { /* Coeff Band 4 */
- { 163, 149, 231 },
- { 137, 69, 213 },
- { 95, 11, 164 },
- { 62, 3, 108 },
- { 32, 1, 57 },
- { 13, 1, 22 }
- }, { /* Coeff Band 5 */
- { 136, 189, 239 },
- { 123, 102, 223 },
- { 97, 19, 170 },
- { 66, 4, 111 },
- { 38, 1, 60 },
- { 18, 1, 26 }
- }
- }, { /* Inter */
- { /* Coeff Band 0 */
- { 24, 226, 244 },
- { 54, 178, 211 },
- { 80, 74, 152 }
- }, { /* Coeff Band 1 */
- { 145, 153, 236 },
- { 101, 163, 223 },
- { 108, 50, 187 },
- { 90, 22, 145 },
- { 66, 8, 97 },
- { 42, 4, 50 }
- }, { /* Coeff Band 2 */
- { 150, 159, 238 },
- { 128, 90, 218 },
- { 94, 9, 163 },
- { 64, 3, 110 },
- { 34, 1, 61 },
- { 13, 1, 24 }
- }, { /* Coeff Band 3 */
- { 151, 162, 242 },
- { 135, 80, 222 },
- { 93, 9, 166 },
- { 61, 3, 111 },
- { 31, 1, 59 },
- { 12, 1, 22 }
- }, { /* Coeff Band 4 */
- { 161, 170, 245 },
- { 140, 84, 228 },
- { 99, 8, 174 },
- { 64, 1, 116 },
- { 34, 1, 63 },
- { 14, 1, 26 }
- }, { /* Coeff Band 5 */
- { 138, 197, 246 },
- { 127, 109, 233 },
- { 100, 16, 179 },
- { 66, 3, 119 },
- { 37, 1, 66 },
- { 16, 1, 30 }
- }
- }
- }, { /* block Type 1 */
- { /* Intra */
- { /* Coeff Band 0 */
- { 6, 216, 212 },
- { 25, 134, 171 },
- { 43, 48, 118 }
- }, { /* Coeff Band 1 */
- { 93, 112, 209 },
- { 66, 159, 206 },
- { 82, 78, 184 },
- { 75, 28, 148 },
- { 46, 4, 82 },
- { 18, 1, 28 }
- }, { /* Coeff Band 2 */
- { 108, 148, 220 },
- { 90, 130, 216 },
- { 92, 40, 186 },
- { 73, 10, 135 },
- { 46, 1, 79 },
- { 20, 1, 35 }
- }, { /* Coeff Band 3 */
- { 125, 173, 232 },
- { 109, 117, 223 },
- { 97, 31, 183 },
- { 71, 7, 127 },
- { 44, 1, 76 },
- { 21, 1, 36 }
- }, { /* Coeff Band 4 */
- { 133, 195, 236 },
- { 112, 121, 224 },
- { 97, 23, 178 },
- { 69, 3, 122 },
- { 42, 1, 72 },
- { 19, 1, 34 }
- }, { /* Coeff Band 5 */
- { 132, 180, 238 },
- { 119, 102, 225 },
- { 101, 18, 179 },
- { 71, 3, 124 },
- { 42, 1, 70 },
- { 17, 1, 28 }
- }
- }, { /* Inter */
- { /* Coeff Band 0 */
- { 5, 242, 250 },
- { 26, 198, 226 },
- { 58, 98, 168 }
- }, { /* Coeff Band 1 */
- { 82, 201, 246 },
- { 50, 219, 237 },
- { 94, 107, 205 },
- { 89, 61, 167 },
- { 77, 31, 131 },
- { 57, 14, 91 }
- }, { /* Coeff Band 2 */
- { 99, 202, 247 },
- { 96, 165, 234 },
- { 100, 31, 190 },
- { 72, 8, 131 },
- { 41, 1, 72 },
- { 14, 1, 24 }
- }, { /* Coeff Band 3 */
- { 108, 204, 248 },
- { 107, 156, 235 },
- { 103, 27, 186 },
- { 71, 4, 124 },
- { 39, 1, 66 },
- { 14, 1, 19 }
- }, { /* Coeff Band 4 */
- { 120, 211, 248 },
- { 118, 149, 234 },
- { 107, 19, 182 },
- { 72, 3, 126 },
- { 40, 1, 69 },
- { 16, 1, 24 }
- }, { /* Coeff Band 5 */
- { 127, 199, 245 },
- { 122, 125, 232 },
- { 112, 20, 186 },
- { 82, 3, 136 },
- { 55, 1, 88 },
- { 10, 1, 38 }
- }
- }
- }
-};
-static const vp9_coeff_probs_model default_coef_probs_16x16[BLOCK_TYPES] = {
- { /* block Type 0 */
- { /* Intra */
- { /* Coeff Band 0 */
- { 25, 9, 101 },
- { 25, 2, 67 },
- { 15, 1, 28 }
- }, { /* Coeff Band 1 */
- { 67, 30, 118 },
- { 61, 56, 116 },
- { 60, 31, 105 },
- { 52, 11, 85 },
- { 34, 2, 54 },
- { 14, 1, 22 }
- }, { /* Coeff Band 2 */
- { 107, 58, 149 },
- { 92, 53, 147 },
- { 78, 14, 123 },
- { 56, 3, 87 },
- { 35, 1, 56 },
- { 17, 1, 27 }
- }, { /* Coeff Band 3 */
- { 142, 61, 171 },
- { 111, 30, 162 },
- { 80, 4, 128 },
- { 53, 1, 87 },
- { 31, 1, 52 },
- { 14, 1, 24 }
- }, { /* Coeff Band 4 */
- { 171, 73, 200 },
- { 129, 28, 184 },
- { 86, 3, 140 },
- { 54, 1, 90 },
- { 28, 1, 49 },
- { 12, 1, 21 }
- }, { /* Coeff Band 5 */
- { 193, 129, 227 },
- { 148, 28, 200 },
- { 90, 2, 144 },
- { 53, 1, 90 },
- { 28, 1, 50 },
- { 13, 1, 22 }
- }
- }, { /* Inter */
- { /* Coeff Band 0 */
- { 60, 7, 234 },
- { 64, 4, 184 },
- { 56, 1, 104 }
- }, { /* Coeff Band 1 */
- { 150, 111, 210 },
- { 87, 185, 202 },
- { 101, 81, 177 },
- { 90, 34, 142 },
- { 67, 11, 95 },
- { 38, 2, 51 }
- }, { /* Coeff Band 2 */
- { 153, 139, 218 },
- { 120, 72, 195 },
- { 90, 11, 147 },
- { 63, 3, 101 },
- { 39, 1, 61 },
- { 20, 1, 33 }
- }, { /* Coeff Band 3 */
- { 171, 132, 223 },
- { 131, 56, 200 },
- { 92, 6, 147 },
- { 58, 1, 95 },
- { 32, 1, 52 },
- { 14, 1, 23 }
- }, { /* Coeff Band 4 */
- { 183, 137, 227 },
- { 139, 48, 204 },
- { 91, 3, 148 },
- { 55, 1, 91 },
- { 28, 1, 47 },
- { 13, 1, 21 }
- }, { /* Coeff Band 5 */
- { 198, 149, 234 },
- { 153, 32, 208 },
- { 95, 2, 148 },
- { 55, 1, 90 },
- { 30, 1, 51 },
- { 16, 1, 25 }
- }
- }
- }, { /* block Type 1 */
- { /* Intra */
- { /* Coeff Band 0 */
- { 7, 209, 217 },
- { 31, 106, 151 },
- { 40, 21, 86 }
- }, { /* Coeff Band 1 */
- { 101, 71, 184 },
- { 74, 131, 177 },
- { 88, 50, 158 },
- { 78, 16, 129 },
- { 51, 2, 82 },
- { 18, 1, 29 }
- }, { /* Coeff Band 2 */
- { 116, 115, 199 },
- { 102, 88, 191 },
- { 94, 22, 160 },
- { 74, 6, 122 },
- { 47, 1, 77 },
- { 18, 1, 30 }
- }, { /* Coeff Band 3 */
- { 157, 124, 210 },
- { 130, 53, 201 },
- { 102, 10, 165 },
- { 73, 1, 120 },
- { 42, 1, 69 },
- { 16, 1, 27 }
- }, { /* Coeff Band 4 */
- { 174, 147, 225 },
- { 134, 67, 212 },
- { 100, 10, 168 },
- { 66, 1, 111 },
- { 36, 1, 60 },
- { 16, 1, 27 }
- }, { /* Coeff Band 5 */
- { 185, 165, 232 },
- { 147, 56, 214 },
- { 105, 5, 165 },
- { 66, 1, 108 },
- { 35, 1, 59 },
- { 16, 1, 27 }
- }
- }, { /* Inter */
- { /* Coeff Band 0 */
- { 3, 232, 245 },
- { 18, 162, 210 },
- { 38, 64, 131 }
- }, { /* Coeff Band 1 */
- { 84, 187, 239 },
- { 35, 231, 231 },
- { 82, 150, 209 },
- { 87, 97, 181 },
- { 81, 64, 151 },
- { 67, 60, 119 }
- }, { /* Coeff Band 2 */
- { 107, 185, 239 },
- { 100, 149, 224 },
- { 107, 34, 185 },
- { 83, 12, 141 },
- { 49, 4, 92 },
- { 21, 1, 40 }
- }, { /* Coeff Band 3 */
- { 125, 184, 243 },
- { 121, 127, 228 },
- { 113, 25, 185 },
- { 82, 6, 134 },
- { 48, 1, 82 },
- { 26, 1, 38 }
- }, { /* Coeff Band 4 */
- { 143, 185, 245 },
- { 133, 115, 231 },
- { 114, 14, 184 },
- { 77, 3, 126 },
- { 43, 1, 68 },
- { 34, 1, 40 }
- }, { /* Coeff Band 5 */
- { 170, 194, 241 },
- { 151, 80, 226 },
- { 118, 9, 180 },
- { 81, 1, 130 },
- { 51, 1, 78 },
- { 18, 1, 49 }
- }
- }
- }
-};
-static const vp9_coeff_probs_model default_coef_probs_32x32[BLOCK_TYPES] = {
- { /* block Type 0 */
- { /* Intra */
- { /* Coeff Band 0 */
- { 29, 42, 137 },
- { 26, 3, 60 },
- { 13, 1, 23 }
- }, { /* Coeff Band 1 */
- { 69, 36, 122 },
- { 63, 57, 123 },
- { 60, 33, 112 },
- { 52, 11, 90 },
- { 32, 2, 52 },
- { 10, 1, 15 }
- }, { /* Coeff Band 2 */
- { 107, 55, 143 },
- { 86, 69, 143 },
- { 74, 24, 116 },
- { 52, 5, 78 },
- { 29, 1, 44 },
- { 12, 1, 18 }
- }, { /* Coeff Band 3 */
- { 137, 71, 160 },
- { 107, 34, 152 },
- { 73, 6, 114 },
- { 44, 1, 69 },
- { 25, 1, 40 },
- { 12, 1, 18 }
- }, { /* Coeff Band 4 */
- { 165, 70, 174 },
- { 118, 24, 159 },
- { 74, 3, 117 },
- { 45, 1, 73 },
- { 26, 1, 43 },
- { 12, 1, 19 }
- }, { /* Coeff Band 5 */
- { 220, 93, 223 },
- { 153, 10, 187 },
- { 86, 2, 131 },
- { 49, 1, 79 },
- { 26, 1, 43 },
- { 12, 1, 20 }
- }
- }, { /* Inter */
- { /* Coeff Band 0 */
- { 30, 58, 227 },
- { 35, 10, 172 },
- { 24, 23, 112 }
- }, { /* Coeff Band 1 */
- { 117, 145, 219 },
- { 51, 221, 216 },
- { 75, 169, 196 },
- { 88, 96, 165 },
- { 77, 43, 117 },
- { 53, 18, 60 }
- }, { /* Coeff Band 2 */
- { 128, 176, 225 },
- { 108, 114, 202 },
- { 92, 19, 152 },
- { 65, 4, 103 },
- { 38, 1, 61 },
- { 19, 1, 30 }
- }, { /* Coeff Band 3 */
- { 146, 184, 228 },
- { 122, 95, 205 },
- { 92, 11, 149 },
- { 62, 1, 98 },
- { 35, 1, 57 },
- { 17, 1, 26 }
- }, { /* Coeff Band 4 */
- { 165, 192, 230 },
- { 132, 81, 206 },
- { 93, 6, 147 },
- { 58, 1, 94 },
- { 32, 1, 52 },
- { 15, 1, 24 }
- }, { /* Coeff Band 5 */
- { 204, 223, 234 },
- { 156, 49, 204 },
- { 97, 3, 145 },
- { 59, 1, 92 },
- { 33, 1, 52 },
- { 15, 1, 24 }
- }
- }
- }, { /* block Type 1 */
- { /* Intra */
- { /* Coeff Band 0 */
- { 7, 184, 200 },
- { 25, 67, 113 },
- { 30, 9, 59 }
- }, { /* Coeff Band 1 */
- { 92, 42, 158 },
- { 65, 121, 159 },
- { 77, 56, 146 },
- { 70, 22, 120 },
- { 47, 4, 76 },
- { 18, 1, 26 }
- }, { /* Coeff Band 2 */
- { 113, 81, 177 },
- { 96, 75, 167 },
- { 84, 24, 136 },
- { 63, 8, 100 },
- { 37, 1, 58 },
- { 13, 1, 19 }
- }, { /* Coeff Band 3 */
- { 147, 85, 194 },
- { 119, 36, 178 },
- { 88, 8, 139 },
- { 59, 1, 93 },
- { 31, 1, 49 },
- { 10, 1, 18 }
- }, { /* Coeff Band 4 */
- { 169, 108, 210 },
- { 131, 41, 191 },
- { 92, 5, 144 },
- { 56, 1, 88 },
- { 29, 1, 47 },
- { 14, 1, 22 }
- }, { /* Coeff Band 5 */
- { 210, 106, 223 },
- { 148, 14, 192 },
- { 89, 2, 138 },
- { 52, 1, 84 },
- { 29, 1, 47 },
- { 14, 1, 23 }
- }
- }, { /* Inter */
- { /* Coeff Band 0 */
- { 3, 207, 245 },
- { 12, 102, 213 },
- { 18, 33, 144 }
- }, { /* Coeff Band 1 */
- { 85, 205, 245 },
- { 18, 249, 242 },
- { 59, 221, 229 },
- { 91, 166, 213 },
- { 88, 117, 183 },
- { 70, 95, 149 }
- }, { /* Coeff Band 2 */
- { 114, 193, 241 },
- { 104, 155, 221 },
- { 100, 33, 181 },
- { 78, 10, 132 },
- { 43, 2, 75 },
- { 15, 1, 48 }
- }, { /* Coeff Band 3 */
- { 118, 198, 244 },
- { 117, 142, 224 },
- { 111, 25, 179 },
- { 83, 4, 134 },
- { 57, 1, 84 },
- { 1, 1, 1 }
- }, { /* Coeff Band 4 */
- { 144, 201, 248 },
- { 136, 130, 234 },
- { 124, 12, 188 },
- { 83, 1, 130 },
- { 61, 1, 66 },
- { 64, 171, 128 }
- }, { /* Coeff Band 5 */
- { 174, 227, 250 },
- { 165, 118, 242 },
- { 132, 21, 197 },
- { 84, 3, 134 },
- { 70, 1, 69 },
- { 1, 1, 1 }
- }
- }
- }
-};
-#else
static const vp9_coeff_probs_model default_coef_probs_4x4[BLOCK_TYPES] = {
{ /* block Type 0 */
{ /* Intra */
}
}
};
-#endif
+
const vp9_tree_index vp9_coef_tree[ 22] = /* corresponding _CONTEXT_NODEs */
{
-#if CONFIG_BALANCED_COEFTREE
- -ZERO_TOKEN, 2, /* 0 = ZERO */
- -DCT_EOB_TOKEN, 4, /* 1 = EOB */
-#else
-DCT_EOB_TOKEN, 2, /* 0 = EOB */
-ZERO_TOKEN, 4, /* 1 = ZERO */
-#endif
-ONE_TOKEN, 6, /* 2 = ONE */
8, 12, /* 3 = LOW_VAL */
-TWO_TOKEN, 10, /* 4 = TWO */
};
const vp9_tree_index vp9_coefmodel_tree[6] = {
-#if CONFIG_BALANCED_COEFTREE
- -ZERO_TOKEN, 2,
- -DCT_EOB_MODEL_TOKEN, 4,
-#else
-DCT_EOB_MODEL_TOKEN, 2, /* 0 = EOB */
-ZERO_TOKEN, 4, /* 1 = ZERO */
-#endif
-ONE_TOKEN, -TWO_TOKEN,
};
#include "vp9/common/vp9_default_coef_probs.h"
void vp9_default_coef_probs(VP9_COMMON *pc) {
- vpx_memcpy(pc->fc.coef_probs[TX_4X4], default_coef_probs_4x4,
- sizeof(pc->fc.coef_probs[TX_4X4]));
- vpx_memcpy(pc->fc.coef_probs[TX_8X8], default_coef_probs_8x8,
- sizeof(pc->fc.coef_probs[TX_8X8]));
- vpx_memcpy(pc->fc.coef_probs[TX_16X16], default_coef_probs_16x16,
- sizeof(pc->fc.coef_probs[TX_16X16]));
- vpx_memcpy(pc->fc.coef_probs[TX_32X32], default_coef_probs_32x32,
- sizeof(pc->fc.coef_probs[TX_32X32]));
+ vp9_copy(pc->fc.coef_probs[TX_4X4], default_coef_probs_4x4);
+ vp9_copy(pc->fc.coef_probs[TX_8X8], default_coef_probs_8x8);
+ vp9_copy(pc->fc.coef_probs[TX_16X16], default_coef_probs_16x16);
+ vp9_copy(pc->fc.coef_probs[TX_32X32], default_coef_probs_32x32);
}
// Neighborhood 5-tuples for various scans and blocksizes,
#define COEF_MAX_UPDATE_FACTOR_AFTER_KEY 128
static void adapt_coef_probs(VP9_COMMON *cm, TX_SIZE txfm_size,
- int count_sat, int update_factor) {
+ unsigned int count_sat,
+ unsigned int update_factor) {
FRAME_CONTEXT *pre_fc = &cm->frame_contexts[cm->frame_context_idx];
vp9_coeff_probs_model *dst_coef_probs = cm->fc.coef_probs[txfm_size];
vp9_coeff_count_model *coef_counts = cm->counts.coef[txfm_size];
unsigned int (*eob_branch_count)[REF_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS] =
cm->counts.eob_branch[txfm_size];
- int t, i, j, k, l, count;
- int factor;
+ int t, i, j, k, l;
unsigned int branch_ct[UNCONSTRAINED_NODES][2];
vp9_prob coef_probs[UNCONSTRAINED_NODES];
int entropy_nodes_adapt = UNCONSTRAINED_NODES;
for (l = 0; l < PREV_COEF_CONTEXTS; ++l) {
if (l >= 3 && k == 0)
continue;
- vp9_tree_probs_from_distribution(
- vp9_coefmodel_tree,
- coef_probs, branch_ct,
- coef_counts[i][j][k][l], 0);
-#if CONFIG_BALANCED_COEFTREE
- branch_ct[1][1] = eob_branch_count[i][j][k][l] - branch_ct[1][0];
- coef_probs[1] = get_binary_prob(branch_ct[1][0], branch_ct[1][1]);
-#else
+ vp9_tree_probs_from_distribution(vp9_coefmodel_tree, coef_probs,
+ branch_ct, coef_counts[i][j][k][l],
+ 0);
branch_ct[0][1] = eob_branch_count[i][j][k][l] - branch_ct[0][0];
coef_probs[0] = get_binary_prob(branch_ct[0][0], branch_ct[0][1]);
-#endif
- for (t = 0; t < entropy_nodes_adapt; ++t) {
- count = branch_ct[t][0] + branch_ct[t][1];
- count = count > count_sat ? count_sat : count;
- factor = (update_factor * count / count_sat);
- dst_coef_probs[i][j][k][l][t] =
- weighted_prob(pre_coef_probs[i][j][k][l][t],
- coef_probs[t], factor);
- }
+ for (t = 0; t < entropy_nodes_adapt; ++t)
+ dst_coef_probs[i][j][k][l][t] = merge_probs(
+ pre_coef_probs[i][j][k][l][t], coef_probs[t],
+ branch_ct[t], count_sat, update_factor);
}
}
void vp9_adapt_coef_probs(VP9_COMMON *cm) {
TX_SIZE t;
- int count_sat;
- int update_factor; /* denominator 256 */
+ unsigned int count_sat, update_factor;
- if ((cm->frame_type == KEY_FRAME) || cm->intra_only) {
+ if (cm->frame_type == KEY_FRAME || cm->intra_only) {
update_factor = COEF_MAX_UPDATE_FACTOR_KEY;
count_sat = COEF_COUNT_SAT_KEY;
} else if (cm->last_frame_type == KEY_FRAME) {
coefficient band (and since zigzag positions 0, 1, and 2 are in
distinct bands). */
-/*# define DC_TOKEN_CONTEXTS 3*/ /* 00, 0!0, !0!0 */
#define PREV_COEF_CONTEXTS 6
// #define ENTROPY_STATS
vp9_inter_mode_tree, NEARESTMV);
}
-void vp9_init_mode_contexts(VP9_COMMON *pc) {
- vp9_zero(pc->counts.inter_mode);
- vp9_copy(pc->fc.inter_mode_probs, default_inter_mode_probs);
-}
-
void vp9_accum_mv_refs(VP9_COMMON *pc,
MB_PREDICTION_MODE m,
const int context) {
#define COUNT_SAT 20
#define MAX_UPDATE_FACTOR 128
-static int update_ct(vp9_prob pre_prob, vp9_prob prob,
- unsigned int ct[2]) {
- const int count = MIN(ct[0] + ct[1], COUNT_SAT);
- const int factor = MAX_UPDATE_FACTOR * count / COUNT_SAT;
- return weighted_prob(pre_prob, prob, factor);
+static int update_ct(vp9_prob pre_prob, vp9_prob prob, unsigned int ct[2]) {
+ return merge_probs(pre_prob, prob, ct, COUNT_SAT, MAX_UPDATE_FACTOR);
}
static int update_ct2(vp9_prob pre_prob, unsigned int ct[2]) {
- return update_ct(pre_prob, get_binary_prob(ct[0], ct[1]), ct);
-}
-
-void vp9_adapt_mode_context(VP9_COMMON *pc) {
- int i, j;
- FRAME_CONTEXT *const fc = &pc->fc;
- FRAME_CONTEXT *const pre_fc = &pc->frame_contexts[pc->frame_context_idx];
- FRAME_COUNTS *const counts = &pc->counts;
-
- for (j = 0; j < INTER_MODE_CONTEXTS; j++)
- for (i = 0; i < VP9_INTER_MODES - 1; i++)
- fc->inter_mode_probs[j][i] = update_ct2(pre_fc->inter_mode_probs[j][i],
- counts->inter_mode[j][i]);
+ return merge_probs2(pre_prob, ct, COUNT_SAT, MAX_UPDATE_FACTOR);
}
static void update_mode_probs(int n_modes,
fc->single_ref_prob[i][j] = update_ct2(pre_fc->single_ref_prob[i][j],
counts->single_ref[i][j]);
+ for (j = 0; j < INTER_MODE_CONTEXTS; j++)
+ for (i = 0; i < VP9_INTER_MODES - 1; i++)
+ fc->inter_mode_probs[j][i] = update_ct2(pre_fc->inter_mode_probs[j][i],
+ counts->inter_mode[j][i]);
for (i = 0; i < BLOCK_SIZE_GROUPS; i++)
update_mode_probs(VP9_INTRA_MODES, vp9_intra_mode_tree,
counts->y_mode[i], pre_fc->y_mode_prob[i],
vp9_zero(xd->lf.last_mode_deltas);
set_default_lf_deltas(xd);
- vp9_default_coef_probs(cm);
- vp9_init_mbmode_probs(cm);
-
- vp9_init_mv_probs(cm);
-
// To force update of the sharpness
xd->lf.last_sharpness_level = -1;
- vp9_init_mode_contexts(cm);
+ vp9_default_coef_probs(cm);
+ vp9_init_mbmode_probs(cm);
+ vp9_init_mv_probs(cm);
+ vp9_copy(cm->fc.inter_mode_probs, default_inter_mode_probs);
if (cm->frame_type == KEY_FRAME ||
cm->error_resilient_mode || cm->reset_frame_context == 3) {
void vp9_entropy_mode_init();
-int vp9_mv_cont(const int_mv *l, const int_mv *a);
-
void vp9_setup_past_independence(struct VP9Common *cm, MACROBLOCKD *xd);
void vp9_init_mbmode_probs(struct VP9Common *x);
-void vp9_init_mode_contexts(struct VP9Common *pc);
-
-void vp9_adapt_mode_context(struct VP9Common *pc);
-
void vp9_adapt_mode_probs(struct VP9Common *);
-void vp9_accum_mv_refs(struct VP9Common *pc, MB_PREDICTION_MODE m, int context);
+void vp9_accum_mv_refs(struct VP9Common *pc,
+ MB_PREDICTION_MODE m,
+ const int context);
void tx_counts_to_branch_counts_32x32(unsigned int *tx_count_32x32p,
unsigned int (*ct_32x32p)[2]);
#define MV_MAX_UPDATE_FACTOR 128
/* Integer pel reference mv threshold for use of high-precision 1/8 mv */
-#define COMPANDED_MVREF_THRESH 8
+#define COMPANDED_MVREF_THRESH 8
const vp9_tree_index vp9_mv_joint_tree[2 * MV_JOINTS - 2] = {
-MV_JOINT_ZERO, 2,
return mv_class_base(c) + offset;
}
-static void inc_mv_component_count(int v, nmv_component_counts *comp_counts,
- int incr) {
- assert (v != 0);
- comp_counts->mvcount[MV_MAX + v] += incr;
-}
-
static void inc_mv_component(int v, nmv_component_counts *comp_counts,
int incr, int usehp) {
int s, z, c, o, d, e, f;
}
}
-void vp9_inc_mv(const MV *mv, nmv_context_counts *mvctx) {
+void vp9_inc_mv(const MV *mv, nmv_context_counts *counts) {
const MV_JOINT_TYPE j = vp9_get_mv_joint(mv);
- mvctx->joints[j]++;
+ ++counts->joints[j];
+
if (mv_joint_vertical(j))
- inc_mv_component_count(mv->row, &mvctx->comps[0], 1);
+ ++counts->comps[0].mvcount[MV_MAX + mv->row];
if (mv_joint_horizontal(j))
- inc_mv_component_count(mv->col, &mvctx->comps[1], 1);
+ ++counts->comps[1].mvcount[MV_MAX + mv->col];
}
-static void adapt_prob(vp9_prob *dest, vp9_prob prep, unsigned int ct[2]) {
- const int count = MIN(ct[0] + ct[1], MV_COUNT_SAT);
- if (count) {
- const vp9_prob newp = get_binary_prob(ct[0], ct[1]);
- const int factor = MV_MAX_UPDATE_FACTOR * count / MV_COUNT_SAT;
- *dest = weighted_prob(prep, newp, factor);
- } else {
- *dest = prep;
- }
+static vp9_prob adapt_prob(vp9_prob prep, const unsigned int ct[2]) {
+ return merge_probs2(prep, ct, MV_COUNT_SAT, MV_MAX_UPDATE_FACTOR);
}
void vp9_counts_process(nmv_context_counts *nmv_count, int usehp) {
vp9_prob this_probs[],
const vp9_prob last_probs[],
const unsigned int num_events[]) {
- vp9_prob this_prob;
- const uint32_t left = tree[i] <= 0
+
+ const unsigned int left = tree[i] <= 0
? num_events[-tree[i]]
: adapt_probs(tree[i], tree, this_probs, last_probs, num_events);
- const uint32_t right = tree[i + 1] <= 0
+ const unsigned int right = tree[i + 1] <= 0
? num_events[-tree[i + 1]]
: adapt_probs(tree[i + 1], tree, this_probs, last_probs, num_events);
-
- uint32_t weight = left + right;
- if (weight) {
- this_prob = get_binary_prob(left, right);
- weight = weight > MV_COUNT_SAT ? MV_COUNT_SAT : weight;
- this_prob = weighted_prob(last_probs[i >> 1], this_prob,
- MV_MAX_UPDATE_FACTOR * weight / MV_COUNT_SAT);
- } else {
- this_prob = last_probs[i >> 1];
- }
- this_probs[i >> 1] = this_prob;
+ const unsigned int ct[2] = { left, right };
+ this_probs[i >> 1] = adapt_prob(last_probs[i >> 1], ct);
return left + right;
}
adapt_probs(0, vp9_mv_joint_tree, ctx->joints, pre_ctx->joints, cts->joints);
for (i = 0; i < 2; ++i) {
- adapt_prob(&ctx->comps[i].sign, pre_ctx->comps[i].sign, cts->comps[i].sign);
+ ctx->comps[i].sign = adapt_prob(pre_ctx->comps[i].sign, cts->comps[i].sign);
adapt_probs(0, vp9_mv_class_tree, ctx->comps[i].classes,
pre_ctx->comps[i].classes, cts->comps[i].classes);
adapt_probs(0, vp9_mv_class0_tree, ctx->comps[i].class0,
pre_ctx->comps[i].class0, cts->comps[i].class0);
for (j = 0; j < MV_OFFSET_BITS; ++j)
- adapt_prob(&ctx->comps[i].bits[j], pre_ctx->comps[i].bits[j],
- cts->comps[i].bits[j]);
- }
+ ctx->comps[i].bits[j] = adapt_prob(pre_ctx->comps[i].bits[j],
+ cts->comps[i].bits[j]);
- for (i = 0; i < 2; ++i) {
for (j = 0; j < CLASS0_SIZE; ++j)
adapt_probs(0, vp9_mv_fp_tree, ctx->comps[i].class0_fp[j],
pre_ctx->comps[i].class0_fp[j], cts->comps[i].class0_fp[j]);
adapt_probs(0, vp9_mv_fp_tree, ctx->comps[i].fp, pre_ctx->comps[i].fp,
cts->comps[i].fp);
- }
- if (usehp) {
- for (i = 0; i < 2; ++i) {
- adapt_prob(&ctx->comps[i].class0_hp, pre_ctx->comps[i].class0_hp,
- cts->comps[i].class0_hp);
- adapt_prob(&ctx->comps[i].hp, pre_ctx->comps[i].hp, cts->comps[i].hp);
+ if (usehp) {
+ ctx->comps[i].class0_hp = adapt_prob(pre_ctx->comps[i].class0_hp,
+ cts->comps[i].class0_hp);
+ ctx->comps[i].hp = adapt_prob(pre_ctx->comps[i].hp, cts->comps[i].hp);
}
}
}
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "vp9/common/vp9_extend.h"
#include "vpx_mem/vpx_mem.h"
+#include "vp9/common/vp9_common.h"
+#include "vp9/common/vp9_extend.h"
+
static void copy_and_extend_plane(const uint8_t *src, int src_pitch,
uint8_t *dst, int dst_pitch,
int w, int h,
const int src_y_offset = srcy * src->y_stride + srcx;
const int dst_y_offset = srcy * dst->y_stride + srcx;
- const int et_uv = (et_y + 1) >> 1;
- const int el_uv = (el_y + 1) >> 1;
- const int eb_uv = (eb_y + 1) >> 1;
- const int er_uv = (er_y + 1) >> 1;
+ const int et_uv = ROUND_POWER_OF_TWO(et_y, 1);
+ const int el_uv = ROUND_POWER_OF_TWO(el_y, 1);
+ const int eb_uv = ROUND_POWER_OF_TWO(eb_y, 1);
+ const int er_uv = ROUND_POWER_OF_TWO(er_y, 1);
const int src_uv_offset = ((srcy * src->uv_stride) >> 1) + (srcx >> 1);
const int dst_uv_offset = ((srcy * dst->uv_stride) >> 1) + (srcx >> 1);
- const int srch_uv = (srch + 1) >> 1;
- const int srcw_uv = (srcw + 1) >> 1;
+ const int srch_uv = ROUND_POWER_OF_TWO(srch, 1);
+ const int srcw_uv = ROUND_POWER_OF_TWO(srcw, 1);
copy_and_extend_plane(src->y_buffer + src_y_offset, src->y_stride,
dst->y_buffer + dst_y_offset, dst->y_stride,
}
}
-void vp9_dc_only_idct_add_c(int input_dc, uint8_t *pred_ptr,
- uint8_t *dst_ptr, int pitch, int stride) {
- int a1;
- int r, c;
- int16_t out = dct_const_round_shift(input_dc * cospi_16_64);
- out = dct_const_round_shift(out * cospi_16_64);
- a1 = ROUND_POWER_OF_TWO(out, 4);
-
- for (r = 0; r < 4; r++) {
- for (c = 0; c < 4; c++)
- dst_ptr[c] = clip_pixel(a1 + pred_ptr[c]);
-
- dst_ptr += stride;
- pred_ptr += pitch;
- }
-}
-
static void idct8_1d(int16_t *input, int16_t *output) {
int16_t step1[8], step2[8];
int temp1, temp2;
}
}
+void vp9_short_idct8x8_1_add_c(int16_t *input, uint8_t *dest, int dest_stride) {
+ int i, j;
+ int a1;
+ int16_t out = dct_const_round_shift(input[0] * cospi_16_64);
+ out = dct_const_round_shift(out * cospi_16_64);
+ a1 = ROUND_POWER_OF_TWO(out, 5);
+ for (j = 0; j < 8; ++j) {
+ for (i = 0; i < 8; ++i)
+ dest[i] = clip_pixel(dest[i] + a1);
+ dest += dest_stride;
+ }
+}
+
static void iadst4_1d(int16_t *input, int16_t *output) {
int s0, s1, s2, s3, s4, s5, s6, s7;
}
}
-void vp9_short_idct1_8x8_c(int16_t *input, int16_t *output) {
- int16_t out = dct_const_round_shift(input[0] * cospi_16_64);
- out = dct_const_round_shift(out * cospi_16_64);
- output[0] = ROUND_POWER_OF_TWO(out, 5);
-}
-
static void idct16_1d(int16_t *input, int16_t *output) {
int16_t step1[16], step2[16];
int temp1, temp2;
MV_REFERENCE_FRAME c2_ref_frame;
int candidate_scores[MAX_MV_REF_CANDIDATES] = { 0 };
int refmv_count = 0;
- int split_count = 0;
const int (*mv_ref_search)[2] = mv_ref_blocks[mbmi->sb_type];
const int mi_col = get_mi_col(xd);
const int mi_row = get_mi_row(xd);
add_candidate_mv(mv_ref_list, candidate_scores,
&refmv_count, c_refmv, 16);
}
- split_count += (candidate_mi->mbmi.sb_type < BLOCK_SIZE_SB8X8 &&
- candidate_mi->mbmi.ref_frame[0] != INTRA_FRAME);
// Count number of neihgbours coded intra and zeromv
intra_count += (candidate_mi->mbmi.mode < NEARESTMV);
const int ref = cm->active_ref_idx[i];
struct scale_factors *const sf = &cm->active_ref_scale[i];
if (ref >= NUM_YV12_BUFFERS) {
- memset(sf, 0, sizeof(*sf));
+ vp9_zero(*sf);
} else {
YV12_BUFFER_CONFIG *const fb = &cm->yv12_fb[ref];
vp9_setup_scale_factors_for_frame(sf,
# dct
#
prototype void vp9_short_idct4x4_1_add "int16_t *input, uint8_t *dest, int dest_stride"
-specialize vp9_short_idct4x4_1_add
+specialize vp9_short_idct4x4_1_add sse2
prototype void vp9_short_idct4x4_add "int16_t *input, uint8_t *dest, int dest_stride"
specialize vp9_short_idct4x4_add sse2
+prototype void vp9_short_idct8x8_1_add "int16_t *input, uint8_t *dest, int dest_stride"
+specialize vp9_short_idct8x8_1_add sse2
+
prototype void vp9_short_idct8x8_add "int16_t *input, uint8_t *dest, int dest_stride"
specialize vp9_short_idct8x8_add sse2 neon
prototype void vp9_short_idct10_8x8_add "int16_t *input, uint8_t *dest, int dest_stride"
specialize vp9_short_idct10_8x8_add sse2
-prototype void vp9_short_idct1_8x8 "int16_t *input, int16_t *output"
-specialize vp9_short_idct1_8x8
-
prototype void vp9_short_idct16x16_add "int16_t *input, uint8_t *dest, int dest_stride"
specialize vp9_short_idct16x16_add sse2
specialize vp9_idct4_1d sse2
# dct and add
-prototype void vp9_dc_only_idct_add "int input_dc, uint8_t *pred_ptr, uint8_t *dst_ptr, int pitch, int stride"
-specialize vp9_dc_only_idct_add sse2 neon
-
prototype void vp9_short_iwalsh4x4_1_add "int16_t *input, uint8_t *dest, int dest_stride"
specialize vp9_short_iwalsh4x4_1_add
return ROUND_POWER_OF_TWO(prob1 * (256 - factor) + prob2 * factor, 8);
}
+static INLINE vp9_prob merge_probs(vp9_prob pre_prob, vp9_prob prob,
+ const unsigned int ct[2],
+ unsigned int count_sat,
+ unsigned int max_update_factor) {
+ const unsigned int count = MIN(ct[0] + ct[1], count_sat);
+ const unsigned int factor = max_update_factor * count / count_sat;
+ return weighted_prob(pre_prob, prob, factor);
+}
+
+static INLINE vp9_prob merge_probs2(vp9_prob pre_prob,
+ const unsigned int ct[2],
+ unsigned int count_sat,
+ unsigned int max_update_factor) {
+ return merge_probs(pre_prob, get_binary_prob(ct[0], ct[1]), ct, count_sat,
+ max_update_factor);
+}
+
+
#endif // VP9_COMMON_VP9_TREECODER_H_
#include "vp9/common/vp9_common.h"
#include "vp9/common/vp9_idct.h"
-// In order to improve performance, clip absolute diff values to [0, 255],
-// which allows to keep the additions/subtractions in 8 bits.
-void vp9_dc_only_idct_add_sse2(int input_dc, uint8_t *pred_ptr,
- uint8_t *dst_ptr, int pitch, int stride) {
- int a1;
- int16_t out;
- uint8_t abs_diff;
- __m128i p0, p1, p2, p3;
- unsigned int extended_diff;
- __m128i diff;
-
- out = dct_const_round_shift(input_dc * cospi_16_64);
- out = dct_const_round_shift(out * cospi_16_64);
- a1 = ROUND_POWER_OF_TWO(out, 4);
-
- // Read prediction data.
- p0 = _mm_cvtsi32_si128 (*(const int *)(pred_ptr + 0 * pitch));
- p1 = _mm_cvtsi32_si128 (*(const int *)(pred_ptr + 1 * pitch));
- p2 = _mm_cvtsi32_si128 (*(const int *)(pred_ptr + 2 * pitch));
- p3 = _mm_cvtsi32_si128 (*(const int *)(pred_ptr + 3 * pitch));
-
- // Unpack prediction data, and store 4x4 array in 1 XMM register.
- p0 = _mm_unpacklo_epi32(p0, p1);
- p2 = _mm_unpacklo_epi32(p2, p3);
- p0 = _mm_unpacklo_epi64(p0, p2);
-
- // Clip dc value to [0, 255] range. Then, do addition or subtraction
- // according to its sign.
- if (a1 >= 0) {
- abs_diff = (a1 > 255) ? 255 : a1;
- extended_diff = abs_diff * 0x01010101u;
- diff = _mm_shuffle_epi32(_mm_cvtsi32_si128((int)extended_diff), 0);
-
- p1 = _mm_adds_epu8(p0, diff);
- } else {
- abs_diff = (a1 < -255) ? 255 : -a1;
- extended_diff = abs_diff * 0x01010101u;
- diff = _mm_shuffle_epi32(_mm_cvtsi32_si128((int)extended_diff), 0);
-
- p1 = _mm_subs_epu8(p0, diff);
- }
-
- // Store results to dst.
- *(int *)dst_ptr = _mm_cvtsi128_si32(p1);
- dst_ptr += stride;
-
- p1 = _mm_srli_si128(p1, 4);
- *(int *)dst_ptr = _mm_cvtsi128_si32(p1);
- dst_ptr += stride;
-
- p1 = _mm_srli_si128(p1, 4);
- *(int *)dst_ptr = _mm_cvtsi128_si32(p1);
- dst_ptr += stride;
-
- p1 = _mm_srli_si128(p1, 4);
- *(int *)dst_ptr = _mm_cvtsi128_si32(p1);
-}
-
void vp9_short_idct4x4_add_sse2(int16_t *input, uint8_t *dest, int stride) {
const __m128i zero = _mm_setzero_si128();
const __m128i eight = _mm_set1_epi16(8);
RECON_AND_STORE4X4(dest, input3);
}
+void vp9_short_idct4x4_1_add_sse2(int16_t *input, uint8_t *dest, int stride) {
+ __m128i dc_value;
+ const __m128i zero = _mm_setzero_si128();
+ int a;
+
+ a = dct_const_round_shift(input[0] * cospi_16_64);
+ a = dct_const_round_shift(a * cospi_16_64);
+ a = ROUND_POWER_OF_TWO(a, 4);
+
+ dc_value = _mm_set1_epi16(a);
+
+ RECON_AND_STORE4X4(dest, dc_value);
+ RECON_AND_STORE4X4(dest, dc_value);
+ RECON_AND_STORE4X4(dest, dc_value);
+ RECON_AND_STORE4X4(dest, dc_value);
+}
+
void vp9_idct4_1d_sse2(int16_t *input, int16_t *output) {
const __m128i zero = _mm_setzero_si128();
const __m128i c1 = _mm_setr_epi16((int16_t)cospi_16_64, (int16_t)cospi_16_64,
{ \
__m128i d0 = _mm_loadl_epi64((__m128i *)(dest)); \
d0 = _mm_unpacklo_epi8(d0, zero); \
- in_x = _mm_add_epi16(in_x, d0); \
- in_x = _mm_packus_epi16(in_x, in_x); \
- _mm_storel_epi64((__m128i *)(dest), in_x); \
+ d0 = _mm_add_epi16(in_x, d0); \
+ d0 = _mm_packus_epi16(d0, d0); \
+ _mm_storel_epi64((__m128i *)(dest), d0); \
dest += stride; \
}
RECON_AND_STORE(dest, in7);
}
+void vp9_short_idct8x8_1_add_sse2(int16_t *input, uint8_t *dest, int stride) {
+ __m128i dc_value;
+ const __m128i zero = _mm_setzero_si128();
+ int a;
+
+ a = dct_const_round_shift(input[0] * cospi_16_64);
+ a = dct_const_round_shift(a * cospi_16_64);
+ a = ROUND_POWER_OF_TWO(a, 5);
+
+ dc_value = _mm_set1_epi16(a);
+
+ RECON_AND_STORE(dest, dc_value);
+ RECON_AND_STORE(dest, dc_value);
+ RECON_AND_STORE(dest, dc_value);
+ RECON_AND_STORE(dest, dc_value);
+ RECON_AND_STORE(dest, dc_value);
+ RECON_AND_STORE(dest, dc_value);
+ RECON_AND_STORE(dest, dc_value);
+ RECON_AND_STORE(dest, dc_value);
+}
+
// perform 8x8 transpose
static INLINE void array_transpose_8x8(__m128i *in, __m128i *res) {
const __m128i tr0_0 = _mm_unpacklo_epi16(in[0], in[1]);
return vp9_switchable_interp[index];
}
-static void read_intra_block_modes(VP9D_COMP *pbi, MODE_INFO *mi,
- vp9_reader *r) {
+static void read_intra_block_part(VP9D_COMP *pbi, MODE_INFO *mi,
+ vp9_reader *r) {
VP9_COMMON *const cm = &pbi->common;
MB_MODE_INFO *const mbmi = &mi->mbmi;
const BLOCK_SIZE_TYPE bsize = mi->mbmi.sb_type;
- const int bwl = b_width_log2(bsize), bhl = b_height_log2(bsize);
if (bsize >= BLOCK_SIZE_SB8X8) {
- const int size_group = MIN(3, MIN(bwl, bhl));
+ const int size_group = size_group_lookup[bsize];
mbmi->mode = read_intra_mode(r, cm->fc.y_mode_prob[size_group]);
cm->counts.y_mode[size_group][mbmi->mode]++;
} else {
// Only 4x4, 4x8, 8x4 blocks
- const int bw = 1 << bwl, bh = 1 << bhl;
+ const int bw = 1 << b_width_log2(bsize), bh = 1 << b_height_log2(bsize);
int idx, idy;
for (idy = 0; idy < 2; idy += bh) {
return ref;
}
-static void read_inter_mode_info(VP9D_COMP *pbi, MODE_INFO *mi,
- int mi_row, int mi_col, vp9_reader *r) {
+static void read_inter_block_part(VP9D_COMP *pbi, MODE_INFO *mi,
+ vp9_reader *r) {
VP9_COMMON *const cm = &pbi->common;
MACROBLOCKD *const xd = &pbi->mb;
nmv_context *const nmvc = &cm->fc.nmvc;
MB_MODE_INFO *const mbmi = &mi->mbmi;
-
int_mv *const mv0 = &mbmi->mv[0];
int_mv *const mv1 = &mbmi->mv[1];
- const BLOCK_SIZE_TYPE bsize = mi->mbmi.sb_type;
+ const BLOCK_SIZE_TYPE bsize = mbmi->sb_type;
const int bw = 1 << b_width_log2(bsize);
const int bh = 1 << b_height_log2(bsize);
- int idx, idy;
+ int_mv nearest, nearby, best_mv;
+ int_mv nearest_second, nearby_second, best_mv_second;
+ vp9_prob *mv_ref_p;
+ MV_REFERENCE_FRAME ref0, ref1;
- mbmi->segment_id = read_inter_segment_id(pbi, mi_row, mi_col, r);
- mbmi->mb_skip_coeff = read_skip_coeff(pbi, mbmi->segment_id, r);
- mbmi->ref_frame[0] = read_reference_frame(pbi, mbmi->segment_id, r);
- mbmi->ref_frame[1] = NONE;
- mbmi->txfm_size = read_tx_size(pbi, cm->tx_mode, bsize,
- (!mbmi->mb_skip_coeff || mbmi->ref_frame[0] == INTRA_FRAME), r);
+ read_ref_frame(pbi, r, mbmi->segment_id, mbmi->ref_frame);
+ ref0 = mbmi->ref_frame[0];
+ ref1 = mbmi->ref_frame[1];
- if (mbmi->ref_frame[0] != INTRA_FRAME) {
- int_mv nearest, nearby, best_mv;
- int_mv nearest_second, nearby_second, best_mv_second;
- vp9_prob *mv_ref_p;
- MV_REFERENCE_FRAME ref0, ref1;
+ vp9_find_mv_refs(cm, xd, mi, xd->prev_mode_info_context,
+ ref0, mbmi->ref_mvs[ref0], cm->ref_frame_sign_bias);
- read_ref_frame(pbi, r, mbmi->segment_id, mbmi->ref_frame);
- ref0 = mbmi->ref_frame[0];
- ref1 = mbmi->ref_frame[1];
+ mv_ref_p = cm->fc.inter_mode_probs[mbmi->mb_mode_context[ref0]];
- vp9_find_mv_refs(cm, xd, mi, xd->prev_mode_info_context,
- ref0, mbmi->ref_mvs[ref0], cm->ref_frame_sign_bias);
+ if (vp9_segfeature_active(&xd->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
+ mbmi->mode = ZEROMV;
+ } else if (bsize >= BLOCK_SIZE_SB8X8) {
+ mbmi->mode = read_inter_mode(r, mv_ref_p);
+ vp9_accum_mv_refs(cm, mbmi->mode, mbmi->mb_mode_context[ref0]);
+ }
+ mbmi->uv_mode = DC_PRED;
- mv_ref_p = cm->fc.inter_mode_probs[mbmi->mb_mode_context[ref0]];
+ // nearest, nearby
+ if (bsize < BLOCK_SIZE_SB8X8 || mbmi->mode != ZEROMV) {
+ vp9_find_best_ref_mvs(xd, mbmi->ref_mvs[ref0], &nearest, &nearby);
+ best_mv.as_int = mbmi->ref_mvs[ref0][0].as_int;
+ }
- if (vp9_segfeature_active(&xd->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
- mbmi->mode = ZEROMV;
- } else if (bsize >= BLOCK_SIZE_SB8X8) {
- mbmi->mode = read_inter_mode(r, mv_ref_p);
- vp9_accum_mv_refs(cm, mbmi->mode, mbmi->mb_mode_context[ref0]);
- }
- mbmi->uv_mode = DC_PRED;
+ mbmi->interp_filter = cm->mcomp_filter_type == SWITCHABLE
+ ? read_switchable_filter_type(pbi, r)
+ : cm->mcomp_filter_type;
+
+ if (ref1 > INTRA_FRAME) {
+ vp9_find_mv_refs(cm, xd, mi, xd->prev_mode_info_context,
+ ref1, mbmi->ref_mvs[ref1], cm->ref_frame_sign_bias);
- // nearest, nearby
if (bsize < BLOCK_SIZE_SB8X8 || mbmi->mode != ZEROMV) {
- vp9_find_best_ref_mvs(xd, mbmi->ref_mvs[ref0], &nearest, &nearby);
- best_mv.as_int = mbmi->ref_mvs[ref0][0].as_int;
+ vp9_find_best_ref_mvs(xd, mbmi->ref_mvs[ref1],
+ &nearest_second, &nearby_second);
+ best_mv_second.as_int = mbmi->ref_mvs[ref1][0].as_int;
}
+ }
- mbmi->interp_filter = cm->mcomp_filter_type == SWITCHABLE
- ? read_switchable_filter_type(pbi, r)
- : cm->mcomp_filter_type;
-
- if (ref1 > INTRA_FRAME) {
- vp9_find_mv_refs(cm, xd, mi, xd->prev_mode_info_context,
- ref1, mbmi->ref_mvs[ref1], cm->ref_frame_sign_bias);
- if (bsize < BLOCK_SIZE_SB8X8 || mbmi->mode != ZEROMV) {
- vp9_find_best_ref_mvs(xd, mbmi->ref_mvs[ref1],
- &nearest_second, &nearby_second);
- best_mv_second.as_int = mbmi->ref_mvs[ref1][0].as_int;
- }
- }
+ if (mbmi->sb_type < BLOCK_SIZE_SB8X8) {
+ int idx, idy;
+ for (idy = 0; idy < 2; idy += bh) {
+ for (idx = 0; idx < 2; idx += bw) {
+ int_mv blockmv, secondmv;
+ const int j = idy * 2 + idx;
+ const int blockmode = read_inter_mode(r, mv_ref_p);
+ vp9_accum_mv_refs(cm, blockmode, mbmi->mb_mode_context[ref0]);
+ if (blockmode == NEARESTMV || blockmode == NEARMV) {
+ vp9_append_sub8x8_mvs_for_idx(cm, xd, &nearest, &nearby, j, 0);
+ if (ref1 > 0)
+ vp9_append_sub8x8_mvs_for_idx(cm, xd, &nearest_second,
+ &nearby_second, j, 1);
+ }
- if (mbmi->sb_type < BLOCK_SIZE_SB8X8) {
- for (idy = 0; idy < 2; idy += bh) {
- for (idx = 0; idx < 2; idx += bw) {
- int_mv blockmv, secondmv;
- const int j = idy * 2 + idx;
- const int blockmode = read_inter_mode(r, mv_ref_p);
+ switch (blockmode) {
+ case NEWMV:
+ read_mv(r, &blockmv.as_mv, &best_mv.as_mv, nmvc,
+ &cm->counts.mv, xd->allow_high_precision_mv);
- vp9_accum_mv_refs(cm, blockmode, mbmi->mb_mode_context[ref0]);
- if (blockmode == NEARESTMV || blockmode == NEARMV) {
- vp9_append_sub8x8_mvs_for_idx(cm, xd, &nearest, &nearby, j, 0);
if (ref1 > 0)
- vp9_append_sub8x8_mvs_for_idx(cm, xd, &nearest_second,
- &nearby_second, j, 1);
- }
-
- switch (blockmode) {
- case NEWMV:
- read_mv(r, &blockmv.as_mv, &best_mv.as_mv, nmvc,
+ read_mv(r, &secondmv.as_mv, &best_mv_second.as_mv, nmvc,
&cm->counts.mv, xd->allow_high_precision_mv);
-
- if (ref1 > 0)
- read_mv(r, &secondmv.as_mv, &best_mv_second.as_mv, nmvc,
- &cm->counts.mv, xd->allow_high_precision_mv);
- break;
- case NEARESTMV:
- blockmv.as_int = nearest.as_int;
- if (ref1 > 0)
- secondmv.as_int = nearest_second.as_int;
- break;
- case NEARMV:
- blockmv.as_int = nearby.as_int;
- if (ref1 > 0)
- secondmv.as_int = nearby_second.as_int;
- break;
- case ZEROMV:
- blockmv.as_int = 0;
- if (ref1 > 0)
- secondmv.as_int = 0;
- break;
- default:
- assert(!"Invalid inter mode value");
- }
- mi->bmi[j].as_mv[0].as_int = blockmv.as_int;
- if (ref1 > 0)
- mi->bmi[j].as_mv[1].as_int = secondmv.as_int;
-
- if (bh == 2)
- mi->bmi[j + 2] = mi->bmi[j];
- if (bw == 2)
- mi->bmi[j + 1] = mi->bmi[j];
- mi->mbmi.mode = blockmode;
+ break;
+ case NEARESTMV:
+ blockmv.as_int = nearest.as_int;
+ if (ref1 > 0)
+ secondmv.as_int = nearest_second.as_int;
+ break;
+ case NEARMV:
+ blockmv.as_int = nearby.as_int;
+ if (ref1 > 0)
+ secondmv.as_int = nearby_second.as_int;
+ break;
+ case ZEROMV:
+ blockmv.as_int = 0;
+ if (ref1 > 0)
+ secondmv.as_int = 0;
+ break;
+ default:
+ assert(!"Invalid inter mode value");
}
- }
-
- mv0->as_int = mi->bmi[3].as_mv[0].as_int;
- mv1->as_int = mi->bmi[3].as_mv[1].as_int;
- } else {
- const int mb_to_top_edge = xd->mb_to_top_edge - LEFT_TOP_MARGIN;
- const int mb_to_bottom_edge = xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARGIN;
- const int mb_to_left_edge = xd->mb_to_left_edge - LEFT_TOP_MARGIN;
- const int mb_to_right_edge = xd->mb_to_right_edge + RIGHT_BOTTOM_MARGIN;
-
- switch (mbmi->mode) {
- case NEARMV:
- // Clip "next_nearest" so that it does not extend to far out of image
- assign_and_clamp_mv(mv0, &nearby, mb_to_left_edge,
- mb_to_right_edge,
- mb_to_top_edge,
- mb_to_bottom_edge);
- if (ref1 > 0)
- assign_and_clamp_mv(mv1, &nearby_second, mb_to_left_edge,
- mb_to_right_edge,
- mb_to_top_edge,
- mb_to_bottom_edge);
- break;
-
- case NEARESTMV:
- // Clip "next_nearest" so that it does not extend to far out of image
- assign_and_clamp_mv(mv0, &nearest, mb_to_left_edge,
- mb_to_right_edge,
- mb_to_top_edge,
- mb_to_bottom_edge);
- if (ref1 > 0)
- assign_and_clamp_mv(mv1, &nearest_second, mb_to_left_edge,
- mb_to_right_edge,
- mb_to_top_edge,
- mb_to_bottom_edge);
- break;
-
- case ZEROMV:
- mv0->as_int = 0;
- if (ref1 > 0)
- mv1->as_int = 0;
- break;
+ mi->bmi[j].as_mv[0].as_int = blockmv.as_int;
+ if (ref1 > 0)
+ mi->bmi[j].as_mv[1].as_int = secondmv.as_int;
- case NEWMV:
- read_mv(r, &mv0->as_mv, &best_mv.as_mv, nmvc, &cm->counts.mv,
- xd->allow_high_precision_mv);
- if (ref1 > 0)
- read_mv(r, &mv1->as_mv, &best_mv_second.as_mv, nmvc,
- &cm->counts.mv, xd->allow_high_precision_mv);
- break;
- default:
- assert(!"Invalid inter mode value");
+ if (bh == 2)
+ mi->bmi[j + 2] = mi->bmi[j];
+ if (bw == 2)
+ mi->bmi[j + 1] = mi->bmi[j];
+ mi->mbmi.mode = blockmode;
}
}
+
+ mv0->as_int = mi->bmi[3].as_mv[0].as_int;
+ mv1->as_int = mi->bmi[3].as_mv[1].as_int;
} else {
- mv0->as_int = 0; // required for left and above block mv
- read_intra_block_modes(pbi, mi, r);
+ const int mb_to_top_edge = xd->mb_to_top_edge - LEFT_TOP_MARGIN;
+ const int mb_to_bottom_edge = xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARGIN;
+ const int mb_to_left_edge = xd->mb_to_left_edge - LEFT_TOP_MARGIN;
+ const int mb_to_right_edge = xd->mb_to_right_edge + RIGHT_BOTTOM_MARGIN;
+
+ switch (mbmi->mode) {
+ case NEARMV:
+ // Clip "next_nearest" so that it does not extend to far out of image
+ assign_and_clamp_mv(mv0, &nearby, mb_to_left_edge,
+ mb_to_right_edge,
+ mb_to_top_edge,
+ mb_to_bottom_edge);
+ if (ref1 > 0)
+ assign_and_clamp_mv(mv1, &nearby_second, mb_to_left_edge,
+ mb_to_right_edge,
+ mb_to_top_edge,
+ mb_to_bottom_edge);
+ break;
+
+ case NEARESTMV:
+ // Clip "next_nearest" so that it does not extend to far out of image
+ assign_and_clamp_mv(mv0, &nearest, mb_to_left_edge,
+ mb_to_right_edge,
+ mb_to_top_edge,
+ mb_to_bottom_edge);
+ if (ref1 > 0)
+ assign_and_clamp_mv(mv1, &nearest_second, mb_to_left_edge,
+ mb_to_right_edge,
+ mb_to_top_edge,
+ mb_to_bottom_edge);
+ break;
+
+ case ZEROMV:
+ mv0->as_int = 0;
+ if (ref1 > 0)
+ mv1->as_int = 0;
+ break;
+
+ case NEWMV:
+ read_mv(r, &mv0->as_mv, &best_mv.as_mv, nmvc, &cm->counts.mv,
+ xd->allow_high_precision_mv);
+ if (ref1 > 0)
+ read_mv(r, &mv1->as_mv, &best_mv_second.as_mv, nmvc,
+ &cm->counts.mv, xd->allow_high_precision_mv);
+ break;
+ default:
+ assert(!"Invalid inter mode value");
+ }
}
}
+static void read_inter_mode_info(VP9D_COMP *pbi, MODE_INFO *mi,
+ int mi_row, int mi_col, vp9_reader *r) {
+ VP9_COMMON *const cm = &pbi->common;
+ MB_MODE_INFO *const mbmi = &mi->mbmi;
+ int intra_block;
+
+ mbmi->segment_id = read_inter_segment_id(pbi, mi_row, mi_col, r);
+ mbmi->mb_skip_coeff = read_skip_coeff(pbi, mbmi->segment_id, r);
+ mbmi->ref_frame[0] = read_reference_frame(pbi, mbmi->segment_id, r);
+ mbmi->ref_frame[1] = NONE;
+ intra_block = mbmi->ref_frame[0] == INTRA_FRAME;
+ mbmi->txfm_size = read_tx_size(pbi, cm->tx_mode, mbmi->sb_type,
+ !mbmi->mb_skip_coeff || intra_block, r);
+ mbmi->mv[0].as_int = 0;
+ mbmi->mv[1].as_int = 0;
+
+ if (intra_block)
+ read_intra_block_part(pbi, mi, r);
+ else
+ read_inter_block_part(pbi, mi, r);
+}
+
static void read_comp_pred(VP9_COMMON *cm, vp9_reader *r) {
int i;
#define VP9_DECODER_VP9_DECODEMV_H_
#include "vp9/decoder/vp9_onyxd_int.h"
+#include "vp9/decoder/vp9_dboolhuff.h"
void vp9_prepare_read_mode_info(VP9D_COMP* pbi, vp9_reader *r);
#include "vp9/decoder/vp9_detokenize.h"
#include "vp9/decoder/vp9_decodemv.h"
#include "vp9/decoder/vp9_dsubexp.h"
+#include "vp9/decoder/vp9_idct_blk.h"
#include "vp9/decoder/vp9_onyxd_int.h"
#include "vp9/decoder/vp9_read_bit_buffer.h"
+#include "vp9/decoder/vp9_treereader.h"
static int read_be32(const uint8_t *p) {
return (p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3];
struct macroblockd_plane *pd = &xd->plane[plane];
int16_t* const qcoeff = BLOCK_OFFSET(pd->qcoeff, block, 16);
const int stride = pd->dst.stride;
+ const int eob = pd->eobs[block];
const int raster_block = txfrm_block_to_raster_block(xd, bsize, plane,
block, ss_txfrm_size);
uint8_t* const dst = raster_block_offset_uint8(xd, bsize, plane,
raster_block,
pd->dst.buf, stride);
- TX_TYPE tx_type;
-
switch (ss_txfrm_size / 2) {
- case TX_4X4:
- tx_type = plane == 0 ? get_tx_type_4x4(xd, raster_block) : DCT_DCT;
+ case TX_4X4: {
+ const TX_TYPE tx_type = get_tx_type_4x4(pd->plane_type, xd, raster_block);
if (tx_type == DCT_DCT)
- xd->itxm_add(qcoeff, dst, stride, pd->eobs[block]);
+ xd->itxm_add(qcoeff, dst, stride, eob);
else
- vp9_iht_add_c(tx_type, qcoeff, dst, stride, pd->eobs[block]);
+ vp9_iht_add_c(tx_type, qcoeff, dst, stride, eob);
break;
+ }
case TX_8X8:
- tx_type = plane == 0 ? get_tx_type_8x8(xd) : DCT_DCT;
- vp9_iht_add_8x8_c(tx_type, qcoeff, dst, stride, pd->eobs[block]);
+ vp9_iht_add_8x8_c(get_tx_type_8x8(pd->plane_type, xd), qcoeff, dst,
+ stride, eob);
break;
case TX_16X16:
- tx_type = plane == 0 ? get_tx_type_16x16(xd) : DCT_DCT;
- vp9_iht_add_16x16_c(tx_type, qcoeff, dst, stride, pd->eobs[block]);
+ vp9_iht_add_16x16_c(get_tx_type_16x16(pd->plane_type, xd), qcoeff, dst,
+ stride, eob);
break;
case TX_32X32:
- vp9_idct_add_32x32(qcoeff, dst, stride, pd->eobs[block]);
+ vp9_idct_add_32x32(qcoeff, dst, stride, eob);
break;
}
}
if (!keyframe && !pc->intra_only) {
vp9_adapt_mode_probs(pc);
- vp9_adapt_mode_context(pc);
vp9_adapt_mv_probs(pc, xd->allow_high_precision_mv);
}
}
#include "vp9/common/vp9_common.h"
#include "vp9/common/vp9_seg_common.h"
+#include "vp9/decoder/vp9_dboolhuff.h"
#include "vp9/decoder/vp9_detokenize.h"
#include "vp9/decoder/vp9_onyxd_int.h"
+#include "vp9/decoder/vp9_treereader.h"
-#if CONFIG_BALANCED_COEFTREE
-#define ZERO_CONTEXT_NODE 0
-#define EOB_CONTEXT_NODE 1
-#else
#define EOB_CONTEXT_NODE 0
#define ZERO_CONTEXT_NODE 1
-#endif
-
#define ONE_CONTEXT_NODE 2
#define LOW_VAL_CONTEXT_NODE 3
#define TWO_CONTEXT_NODE 4
FRAME_CONTEXT *const fc = &cm->fc;
FRAME_COUNTS *const counts = &cm->counts;
ENTROPY_CONTEXT above_ec, left_ec;
- int pt, c = 0;
- int band;
- vp9_prob (*coef_probs)[PREV_COEF_CONTEXTS][UNCONSTRAINED_NODES];
+ const int ref = xd->mode_info_context->mbmi.ref_frame[0] != INTRA_FRAME;
+ int band, pt, c = 0;
+ vp9_prob (*coef_probs)[PREV_COEF_CONTEXTS][UNCONSTRAINED_NODES] =
+ fc->coef_probs[txfm_size][type][ref];
vp9_prob coef_probs_full[COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES];
- uint8_t load_map[COEF_BANDS][PREV_COEF_CONTEXTS] = {
- {0, 0, 0, 0, 0, 0},
- {0, 0, 0, 0, 0, 0},
- {0, 0, 0, 0, 0, 0},
- {0, 0, 0, 0, 0, 0},
- {0, 0, 0, 0, 0, 0},
- {0, 0, 0, 0, 0, 0},
- };
-
+ uint8_t load_map[COEF_BANDS][PREV_COEF_CONTEXTS] = { { 0 } };
vp9_prob *prob;
- vp9_coeff_count_model *coef_counts;
- const int ref = xd->mode_info_context->mbmi.ref_frame[0] != INTRA_FRAME;
+ vp9_coeff_count_model *coef_counts = counts->coef[txfm_size];
const int16_t *scan, *nb;
uint8_t token_cache[1024];
const uint8_t * band_translate;
-#if CONFIG_BALANCED_COEFTREE
- int skip_eob_node = 0;
-#endif
- coef_probs = fc->coef_probs[txfm_size][type][ref];
- coef_counts = counts->coef[txfm_size];
switch (txfm_size) {
default:
case TX_4X4: {
- const TX_TYPE tx_type = type == PLANE_TYPE_Y_WITH_DC ?
- get_tx_type_4x4(xd, block_idx) : DCT_DCT;
- scan = get_scan_4x4(tx_type);
+ scan = get_scan_4x4(get_tx_type_4x4(type, xd, block_idx));
above_ec = A[0] != 0;
left_ec = L[0] != 0;
band_translate = vp9_coefband_trans_4x4;
break;
}
case TX_8X8: {
- const TX_TYPE tx_type = type == PLANE_TYPE_Y_WITH_DC ?
- get_tx_type_8x8(xd) : DCT_DCT;
- scan = get_scan_8x8(tx_type);
+ scan = get_scan_8x8(get_tx_type_8x8(type, xd));
above_ec = (A[0] + A[1]) != 0;
left_ec = (L[0] + L[1]) != 0;
band_translate = vp9_coefband_trans_8x8plus;
break;
}
case TX_16X16: {
- const TX_TYPE tx_type = type == PLANE_TYPE_Y_WITH_DC ?
- get_tx_type_16x16(xd) : DCT_DCT;
- scan = get_scan_16x16(tx_type);
+ scan = get_scan_16x16(get_tx_type_16x16(type, xd));
above_ec = (A[0] + A[1] + A[2] + A[3]) != 0;
left_ec = (L[0] + L[1] + L[2] + L[3]) != 0;
band_translate = vp9_coefband_trans_8x8plus;
pt = get_coef_context(nb, token_cache, c);
band = get_coef_band(band_translate, c);
prob = coef_probs[band][pt];
-#if !CONFIG_BALANCED_COEFTREE
counts->eob_branch[txfm_size][type][ref][band][pt]++;
if (!vp9_read(r, prob[EOB_CONTEXT_NODE]))
break;
SKIP_START:
-#endif
if (c >= seg_eob)
break;
if (c)
if (!vp9_read(r, prob[ZERO_CONTEXT_NODE])) {
INCREMENT_COUNT(ZERO_TOKEN);
++c;
-#if CONFIG_BALANCED_COEFTREE
- skip_eob_node = 1;
- continue;
-#else
goto SKIP_START;
-#endif
- }
-#if CONFIG_BALANCED_COEFTREE
- if (!skip_eob_node) {
- fc->eob_branch_counts[txfm_size][type][ref][band][pt]++;
- if (!vp9_read(r, prob[EOB_CONTEXT_NODE]))
- break;
}
- skip_eob_node = 0;
-#endif
// ONE_CONTEXT_NODE_0_
if (!vp9_read(r, prob[ONE_CONTEXT_NODE])) {
#define VP9_DECODER_VP9_DETOKENIZE_H_
#include "vp9/decoder/vp9_onyxd_int.h"
+#include "vp9/decoder/vp9_dboolhuff.h"
int vp9_decode_tokens(VP9D_COMP* pbi, vp9_reader *r, BLOCK_SIZE_TYPE bsize);
vp9_short_idct4x4_add(input, dest, stride);
vpx_memset(input, 0, 32);
} else {
- vp9_dc_only_idct_add(input[0], dest, dest, stride, stride);
+ vp9_short_idct4x4_1_add(input, dest, stride);
((int *)input)[0] = 0;
}
}
if (eob) {
if (eob == 1) {
// DC only DCT coefficient
- int16_t in = input[0];
- int16_t out;
-
- // Note: the idct1 will need to be modified accordingly whenever
- // vp9_short_idct8x8_c() is modified.
- vp9_short_idct1_8x8_c(&in, &out);
+ vp9_short_idct8x8_1_add(input, dest, stride);
input[0] = 0;
-
- vp9_add_constant_residual_8x8(out, dest, stride);
} else {
vp9_short_idct8x8_add(input, dest, stride);
vpx_memset(input, 0, 128);
* be found in the AUTHORS file in the root of the source tree.
*/
-
-#include <stdio.h>
#include <assert.h>
+#include <limits.h>
+#include <stdio.h>
#include "vp9/common/vp9_onyxc_int.h"
#if CONFIG_POSTPROC
if (!pbi)
return NULL;
- vpx_memset(pbi, 0, sizeof(VP9D_COMP));
+ vp9_zero(*pbi);
if (setjmp(pbi->common.error.jmp)) {
pbi->common.error.setjmp = 0;
#include "vp9/common/vp9_onyxc_int.h"
-#include "vp9/decoder/vp9_idct_blk.h"
#include "vp9/decoder/vp9_onyxd.h"
-#include "vp9/decoder/vp9_treereader.h"
typedef struct VP9Decompressor {
DECLARE_ALIGNED(16, MACROBLOCKD, mb);
#include "vp9/common/vp9_treecoder.h"
#include "vp9/decoder/vp9_dboolhuff.h"
-#define vp9_read_prob(r) ((vp9_prob)vp9_read_literal(r, 8))
#define vp9_read_and_apply_sign(r, value) (vp9_read_bit(r) ? -(value) : (value))
// Intent of tree data structure is to make decoding trivial.
assert(pp != 0);
/* skip one or two nodes */
-#if !CONFIG_BALANCED_COEFTREE
if (p->skip_eob_node) {
n -= p->skip_eob_node;
i = 2 * p->skip_eob_node;
}
-#endif
do {
const int bb = (v >> --n) & 1;
-#if CONFIG_BALANCED_COEFTREE
- if (i == 2 && p->skip_eob_node) {
- i += 2;
- assert(bb == 1);
- continue;
- }
-#endif
vp9_write(bc, bb, pp[i >> 1]);
i = vp9_coef_tree[i + bb];
} while (n);
const MB_PREDICTION_MODE mode = mi->mode;
const int segment_id = mi->segment_id;
int skip_coeff;
+ const BLOCK_SIZE_TYPE bsize = mi->sb_type;
- xd->prev_mode_info_context = pc->prev_mi + (m - pc->mi);
x->partition_info = x->pi + (m - pc->mi);
#ifdef ENTROPY_STATS
if (seg->update_map) {
if (seg->temporal_update) {
- const int pred_flag = xd->mode_info_context->mbmi.seg_id_predicted;
+ const int pred_flag = mi->seg_id_predicted;
vp9_prob pred_prob = vp9_get_pred_prob_seg_id(xd);
vp9_write(bc, pred_flag, pred_prob);
if (!pred_flag)
vp9_write(bc, rf != INTRA_FRAME,
vp9_get_pred_prob_intra_inter(pc, xd));
- if (mi->sb_type >= BLOCK_SIZE_SB8X8 && pc->tx_mode == TX_MODE_SELECT &&
+ if (bsize >= BLOCK_SIZE_SB8X8 && pc->tx_mode == TX_MODE_SELECT &&
!(rf != INTRA_FRAME &&
(skip_coeff || vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)))) {
- write_selected_txfm_size(cpi, mi->txfm_size, mi->sb_type, bc);
+ write_selected_txfm_size(cpi, mi->txfm_size, bsize, bc);
}
if (rf == INTRA_FRAME) {
active_section = 6;
#endif
- if (m->mbmi.sb_type >= BLOCK_SIZE_SB8X8) {
- const BLOCK_SIZE_TYPE bsize = xd->mode_info_context->mbmi.sb_type;
- const int bwl = b_width_log2(bsize), bhl = b_height_log2(bsize);
- const int bsl = MIN(bwl, bhl);
- write_intra_mode(bc, mode, pc->fc.y_mode_prob[MIN(3, bsl)]);
+ if (bsize >= BLOCK_SIZE_SB8X8) {
+ write_intra_mode(bc, mode, pc->fc.y_mode_prob[size_group_lookup[bsize]]);
} else {
int idx, idy;
- int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[mi->sb_type];
- int num_4x4_blocks_high = num_4x4_blocks_high_lookup[mi->sb_type];
+ int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
+ int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
for (idy = 0; idy < 2; idy += num_4x4_blocks_high)
for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) {
const MB_PREDICTION_MODE bm = m->bmi[idy * 2 + idx].as_mode;
// If segment skip is not enabled code the mode.
if (!vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)) {
- if (mi->sb_type >= BLOCK_SIZE_SB8X8) {
+ if (bsize >= BLOCK_SIZE_SB8X8) {
write_sb_mv_ref(bc, mode, mv_ref_p);
vp9_accum_mv_refs(&cpi->common, mode, mi->mb_mode_context[rf]);
}
assert(mi->interp_filter == cpi->common.mcomp_filter_type);
}
- if (xd->mode_info_context->mbmi.sb_type < BLOCK_SIZE_SB8X8) {
+ if (bsize < BLOCK_SIZE_SB8X8) {
int j;
MB_PREDICTION_MODE blockmode;
int_mv blockmv;
- int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[mi->sb_type];
- int num_4x4_blocks_high = num_4x4_blocks_high_lookup[mi->sb_type];
+ int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
+ int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
int idx, idy;
for (idy = 0; idy < 2; idy += num_4x4_blocks_high) {
for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) {
j = idy * 2 + idx;
- blockmode = cpi->mb.partition_info->bmi[j].mode;
+ blockmode = x->partition_info->bmi[j].mode;
blockmv = m->bmi[j].as_mv[0];
write_sb_mv_ref(bc, blockmode, mv_ref_p);
vp9_accum_mv_refs(&cpi->common, blockmode, mi->mb_mode_context[rf]);
coef_counts[i][j][k][l], 0);
vpx_memcpy(coef_probs[i][j][k][l], full_probs,
sizeof(vp9_prob) * UNCONSTRAINED_NODES);
-#if CONFIG_BALANCED_COEFTREE
- coef_branch_ct[i][j][k][l][1][1] = eob_branch_ct[i][j][k][l] -
- coef_branch_ct[i][j][k][l][1][0];
- coef_probs[i][j][k][l][1] =
- get_binary_prob(coef_branch_ct[i][j][k][l][1][0],
- coef_branch_ct[i][j][k][l][1][1]);
-#else
coef_branch_ct[i][j][k][l][0][1] = eob_branch_ct[i][j][k][l] -
coef_branch_ct[i][j][k][l][0][0];
coef_probs[i][j][k][l][0] =
get_binary_prob(coef_branch_ct[i][j][k][l][0][0],
coef_branch_ct[i][j][k][l][0][1]);
-#endif
#ifdef ENTROPY_STATS
if (!cpi->dummy_packing) {
int t;
cpi->seg0_progress = ((y * mb_cols + x * 4 + p32 + p16 + tile_progress)
<< 16) / cm->MBs;
}
+
+ x->encode_breakout = cpi->segment_encode_breakout[mbmi->segment_id];
} else {
mbmi->segment_id = 0;
+ x->encode_breakout = cpi->oxcf.encode_breakout;
}
}
int bhl = b_height_log2(bsize);
int bsl = (bwl > bhl ? bwl : bhl);
- int bs = (1 << bsl) / 2; //
+ int bs = (1 << bsl) / 2; // Block size in units of 8 pels.
MODE_INFO *m2 = m + mi_row * mis + mi_col;
for (row = 0; row < bs; row++) {
for (col = 0; col < bs; col++) {
int dp;
int pixels_wide = 64, pixels_high = 64;
- vpx_memset(&vt, 0, sizeof(vt));
-
+ vp9_zero(vt);
set_offsets(cpi, mi_row, mi_col, BLOCK_SIZE_SB64X64);
if (xd->mb_to_right_edge < 0)
}
save_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
+ x->fast_ms = 0;
+ x->pred_mv.as_int = 0;
+ x->subblock_ref = 0;
+
if (cpi->sf.adjust_partitioning_from_last_frame) {
// Check if any of the sub blocks are further split.
if (partition == PARTITION_SPLIT && subsize > BLOCK_SIZE_SB8X8) {
*dist = chosen_dist;
}
+static BLOCK_SIZE_TYPE min_partition_size[BLOCK_SIZE_TYPES] =
+ { BLOCK_4X4, BLOCK_4X4, BLOCK_4X4, BLOCK_4X4,
+ BLOCK_4X4, BLOCK_4X4, BLOCK_8X8, BLOCK_8X8,
+ BLOCK_8X8, BLOCK_16X16, BLOCK_16X16, BLOCK_16X16, BLOCK_16X16 };
+static BLOCK_SIZE_TYPE max_partition_size[BLOCK_SIZE_TYPES] =
+ { BLOCK_8X8, BLOCK_16X16, BLOCK_16X16, BLOCK_16X16,
+ BLOCK_32X32, BLOCK_32X32, BLOCK_32X32, BLOCK_64X64,
+ BLOCK_64X64, BLOCK_64X64, BLOCK_64X64, BLOCK_64X64, BLOCK_64X64 };
+
+
+// Look at neighbouring blocks and set a min and max partition size based on
+// what they chose.
+static void rd_auto_partition_range(VP9_COMP *cpi,
+ BLOCK_SIZE_TYPE * min_block_size,
+ BLOCK_SIZE_TYPE * max_block_size) {
+ MACROBLOCKD *const xd = &cpi->mb.e_mbd;
+ const MODE_INFO *const mi = xd->mode_info_context;
+ const MB_MODE_INFO *const above_mbmi = &mi[-xd->mode_info_stride].mbmi;
+ const MB_MODE_INFO *const left_mbmi = &mi[-1].mbmi;
+ const int left_in_image = xd->left_available && left_mbmi->mb_in_image;
+ const int above_in_image = xd->up_available && above_mbmi->mb_in_image;
+
+ // Frequency check
+ if (cpi->sf.auto_min_max_partition_count <= 0) {
+ cpi->sf.auto_min_max_partition_count =
+ cpi->sf.auto_min_max_partition_interval;
+ *min_block_size = BLOCK_4X4;
+ *max_block_size = BLOCK_64X64;
+ return;
+ } else {
+ --cpi->sf.auto_min_max_partition_count;
+ }
+
+ // Check for edge cases
+ if (!left_in_image && !above_in_image) {
+ *min_block_size = BLOCK_4X4;
+ *max_block_size = BLOCK_64X64;
+ } else if (!left_in_image) {
+ *min_block_size = min_partition_size[above_mbmi->sb_type];
+ *max_block_size = max_partition_size[above_mbmi->sb_type];
+ } else if (!above_in_image) {
+ *min_block_size = min_partition_size[left_mbmi->sb_type];
+ *max_block_size = max_partition_size[left_mbmi->sb_type];
+ } else {
+ *min_block_size =
+ min_partition_size[MIN(left_mbmi->sb_type, above_mbmi->sb_type)];
+ *max_block_size =
+ max_partition_size[MAX(left_mbmi->sb_type, above_mbmi->sb_type)];
+ }
+}
// TODO(jingning,jimbankoski,rbultje): properly skip partition types that are
// unlikely to be selected depending on previously rate-distortion optimization
save_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
// PARTITION_SPLIT
- if (!cpi->sf.use_partitions_greater_than
- || (cpi->sf.use_partitions_greater_than
- && bsize > cpi->sf.greater_than_block_size)) {
+ if (!cpi->sf.auto_min_max_partition_size ||
+ bsize >= cpi->sf.min_partition_size) {
if (bsize > BLOCK_SIZE_SB8X8) {
int r4 = 0;
int64_t d4 = 0, sum_rd = 0;
((use_8x8 && bsize == BLOCK_SIZE_MB16X16) ||
bsize == BLOCK_SIZE_SB32X32 || bsize == BLOCK_SIZE_SB64X64)) {
int ref0 = 0, ref1 = 0, ref2 = 0, ref3 = 0;
+ PICK_MODE_CONTEXT *block_context = NULL;
if (bsize == BLOCK_SIZE_MB16X16) {
- ref0 = x->sb8x8_context[xd->sb_index][xd->mb_index][0].mic.mbmi.
- ref_frame[0];
- ref1 = x->sb8x8_context[xd->sb_index][xd->mb_index][1].mic.mbmi.
- ref_frame[0];
- ref2 = x->sb8x8_context[xd->sb_index][xd->mb_index][2].mic.mbmi.
- ref_frame[0];
- ref3 = x->sb8x8_context[xd->sb_index][xd->mb_index][3].mic.mbmi.
- ref_frame[0];
+ block_context = x->sb8x8_context[xd->sb_index][xd->mb_index];
} else if (bsize == BLOCK_SIZE_SB32X32) {
- ref0 = x->mb_context[xd->sb_index][0].mic.mbmi.ref_frame[0];
- ref1 = x->mb_context[xd->sb_index][1].mic.mbmi.ref_frame[0];
- ref2 = x->mb_context[xd->sb_index][2].mic.mbmi.ref_frame[0];
- ref3 = x->mb_context[xd->sb_index][3].mic.mbmi.ref_frame[0];
+ block_context = x->mb_context[xd->sb_index];
} else if (bsize == BLOCK_SIZE_SB64X64) {
- ref0 = x->sb32_context[0].mic.mbmi.ref_frame[0];
- ref1 = x->sb32_context[1].mic.mbmi.ref_frame[0];
- ref2 = x->sb32_context[2].mic.mbmi.ref_frame[0];
- ref3 = x->sb32_context[3].mic.mbmi.ref_frame[0];
+ block_context = x->sb32_context;
+ }
+
+ if (block_context) {
+ ref0 = block_context[0].mic.mbmi.ref_frame[0];
+ ref1 = block_context[1].mic.mbmi.ref_frame[0];
+ ref2 = block_context[2].mic.mbmi.ref_frame[0];
+ ref3 = block_context[3].mic.mbmi.ref_frame[0];
}
// Currently, only consider 4 inter ref frames.
int d01, d23, d02, d13; // motion vector distance between 2 blocks
// Get each subblock's motion vectors.
- if (bsize == BLOCK_SIZE_MB16X16) {
- mvr0 = x->sb8x8_context[xd->sb_index][xd->mb_index][0].mic.mbmi.mv[0].
- as_mv.row;
- mvc0 = x->sb8x8_context[xd->sb_index][xd->mb_index][0].mic.mbmi.mv[0].
- as_mv.col;
- mvr1 = x->sb8x8_context[xd->sb_index][xd->mb_index][1].mic.mbmi.mv[0].
- as_mv.row;
- mvc1 = x->sb8x8_context[xd->sb_index][xd->mb_index][1].mic.mbmi.mv[0].
- as_mv.col;
- mvr2 = x->sb8x8_context[xd->sb_index][xd->mb_index][2].mic.mbmi.mv[0].
- as_mv.row;
- mvc2 = x->sb8x8_context[xd->sb_index][xd->mb_index][2].mic.mbmi.mv[0].
- as_mv.col;
- mvr3 = x->sb8x8_context[xd->sb_index][xd->mb_index][3].mic.mbmi.mv[0].
- as_mv.row;
- mvc3 = x->sb8x8_context[xd->sb_index][xd->mb_index][3].mic.mbmi.mv[0].
- as_mv.col;
- } else if (bsize == BLOCK_SIZE_SB32X32) {
- mvr0 = x->mb_context[xd->sb_index][0].mic.mbmi.mv[0].as_mv.row;
- mvc0 = x->mb_context[xd->sb_index][0].mic.mbmi.mv[0].as_mv.col;
- mvr1 = x->mb_context[xd->sb_index][1].mic.mbmi.mv[0].as_mv.row;
- mvc1 = x->mb_context[xd->sb_index][1].mic.mbmi.mv[0].as_mv.col;
- mvr2 = x->mb_context[xd->sb_index][2].mic.mbmi.mv[0].as_mv.row;
- mvc2 = x->mb_context[xd->sb_index][2].mic.mbmi.mv[0].as_mv.col;
- mvr3 = x->mb_context[xd->sb_index][3].mic.mbmi.mv[0].as_mv.row;
- mvc3 = x->mb_context[xd->sb_index][3].mic.mbmi.mv[0].as_mv.col;
- } else if (bsize == BLOCK_SIZE_SB64X64) {
- mvr0 = x->sb32_context[0].mic.mbmi.mv[0].as_mv.row;
- mvc0 = x->sb32_context[0].mic.mbmi.mv[0].as_mv.col;
- mvr1 = x->sb32_context[1].mic.mbmi.mv[0].as_mv.row;
- mvc1 = x->sb32_context[1].mic.mbmi.mv[0].as_mv.col;
- mvr2 = x->sb32_context[2].mic.mbmi.mv[0].as_mv.row;
- mvc2 = x->sb32_context[2].mic.mbmi.mv[0].as_mv.col;
- mvr3 = x->sb32_context[3].mic.mbmi.mv[0].as_mv.row;
- mvc3 = x->sb32_context[3].mic.mbmi.mv[0].as_mv.col;
- }
+ mvr0 = block_context[0].mic.mbmi.mv[0].as_mv.row;
+ mvc0 = block_context[0].mic.mbmi.mv[0].as_mv.col;
+ mvr1 = block_context[1].mic.mbmi.mv[0].as_mv.row;
+ mvc1 = block_context[1].mic.mbmi.mv[0].as_mv.col;
+ mvr2 = block_context[2].mic.mbmi.mv[0].as_mv.row;
+ mvc2 = block_context[2].mic.mbmi.mv[0].as_mv.col;
+ mvr3 = block_context[3].mic.mbmi.mv[0].as_mv.row;
+ mvc3 = block_context[3].mic.mbmi.mv[0].as_mv.col;
// Adjust sign if ref is alt_ref
if (cm->ref_frame_sign_bias[ref0]) {
}
}
- if (!cpi->sf.use_partitions_less_than
- || (cpi->sf.use_partitions_less_than
- && bsize <= cpi->sf.less_than_block_size)) {
+ if (!cpi->sf.use_max_partition_size ||
+ bsize <= cpi->sf.max_partition_size) {
int larger_is_better = 0;
// PARTITION_NONE
if ((mi_row + (ms >> 1) < cm->mi_rows) &&
|| cpi->common.show_frame == 0
|| cpi->common.frame_type == KEY_FRAME
|| cpi->is_src_frame_alt_ref) {
+ // If required set upper and lower partition size limits
+ if (cpi->sf.auto_min_max_partition_size) {
+ rd_auto_partition_range(cpi,
+ &cpi->sf.min_partition_size,
+ &cpi->sf.max_partition_size);
+ }
rd_pick_partition(cpi, tp, mi_row, mi_col, BLOCK_SIZE_SB64X64,
&dummy_rate, &dummy_dist, 1, INT64_MAX);
} else {
}
}
} else {
+ // If required set upper and lower partition size limits
+ if (cpi->sf.auto_min_max_partition_size) {
+ rd_auto_partition_range(cpi,
+ &cpi->sf.min_partition_size,
+ &cpi->sf.max_partition_size);
+ }
+
rd_pick_partition(cpi, tp, mi_row, mi_col, BLOCK_SIZE_SB64X64,
&dummy_rate, &dummy_dist, 1, INT64_MAX);
}
// In the longer term the encoder should be generalized to match the
// decoder such that we allow compound where one of the 3 buffers has a
- // differnt sign bias and that buffer is then the fixed ref. However, this
+ // different sign bias and that buffer is then the fixed ref. However, this
// requires further work in the rd loop. For now the only supported encoder
- // side behaviour is where the ALT ref buffer has oppositie sign bias to
+ // side behaviour is where the ALT ref buffer has opposite sign bias to
// the other two.
if ((cm->ref_frame_sign_bias[ALTREF_FRAME]
== cm->ref_frame_sign_bias[GOLDEN_FRAME])
/* filter type selection */
// FIXME(rbultje) for some odd reason, we often select smooth_filter
// as default filter for ARF overlay frames. This is a REALLY BAD
- // IDEA so we explicitely disable it here.
+ // IDEA so we explicitly disable it here.
if (frame_type != 3 &&
cpi->rd_filter_threshes[frame_type][1] >
cpi->rd_filter_threshes[frame_type][0] &&
xd->inv_txm4x4_add(dqcoeff, dest, stride);
}
+static void inverse_transform_b_8x8_add(MACROBLOCKD *xd, int eob,
+ int16_t *dqcoeff, uint8_t *dest,
+ int stride) {
+ if (eob <= 1)
+ vp9_short_idct8x8_1_add(dqcoeff, dest, stride);
+ else
+ vp9_short_idct8x8_add(dqcoeff, dest, stride);
+}
static void subtract_plane(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize, int plane) {
struct macroblock_plane *const p = &x->plane[plane];
qcoeff_ptr = BLOCK_OFFSET(xd->plane[plane].qcoeff, block, 16);
switch (tx_size) {
default:
- case TX_4X4: {
- const TX_TYPE tx_type = plane == 0 ? get_tx_type_4x4(xd, ib) : DCT_DCT;
+ case TX_4X4:
default_eob = 16;
- scan = get_scan_4x4(tx_type);
+ scan = get_scan_4x4(get_tx_type_4x4(type, xd, ib));
band_translate = vp9_coefband_trans_4x4;
break;
- }
- case TX_8X8: {
- const TX_TYPE tx_type = plane == 0 ? get_tx_type_8x8(xd) : DCT_DCT;
- scan = get_scan_8x8(tx_type);
+ case TX_8X8:
+ scan = get_scan_8x8(get_tx_type_8x8(type, xd));
default_eob = 64;
band_translate = vp9_coefband_trans_8x8plus;
break;
- }
- case TX_16X16: {
- const TX_TYPE tx_type = plane == 0 ? get_tx_type_16x16(xd) : DCT_DCT;
- scan = get_scan_16x16(tx_type);
+ case TX_16X16:
+ scan = get_scan_16x16(get_tx_type_16x16(type, xd));
default_eob = 256;
band_translate = vp9_coefband_trans_8x8plus;
break;
- }
case TX_32X32:
scan = vp9_default_scan_32x32;
default_eob = 1024;
args->ctx);
}
-void vp9_optimize_init(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize,
- struct optimize_ctx *ctx) {
- int p;
-
- for (p = 0; p < MAX_MB_PLANE; p++) {
- const struct macroblockd_plane* const plane = &xd->plane[p];
- const int bwl = b_width_log2(bsize) - plane->subsampling_x;
- const int bhl = b_height_log2(bsize) - plane->subsampling_y;
- const MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi;
- const TX_SIZE tx_size = p ? get_uv_tx_size(mbmi)
- : mbmi->txfm_size;
- int i, j;
-
- for (i = 0; i < 1 << bwl; i += 1 << tx_size) {
- int c = 0;
- ctx->ta[p][i] = 0;
- for (j = 0; j < 1 << tx_size && !c; j++) {
- c = ctx->ta[p][i] |= plane->above_context[i + j];
- }
- }
- for (i = 0; i < 1 << bhl; i += 1 << tx_size) {
- int c = 0;
- ctx->tl[p][i] = 0;
- for (j = 0; j < 1 << tx_size && !c; j++) {
- c = ctx->tl[p][i] |= plane->left_context[i + j];
- }
- }
+void optimize_init_b(int plane, BLOCK_SIZE_TYPE bsize, void *arg) {
+ const struct encode_b_args* const args = arg;
+ const MACROBLOCKD *xd = &args->x->e_mbd;
+ const struct macroblockd_plane* const pd = &xd->plane[plane];
+ const int bwl = b_width_log2(bsize) - pd->subsampling_x;
+ const int bhl = b_height_log2(bsize) - pd->subsampling_y;
+ const int bw = 1 << bwl, bh = 1 << bhl;
+ const MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi;
+ const TX_SIZE tx_size = plane ? get_uv_tx_size(mbmi) : mbmi->txfm_size;
+ int i;
+
+ switch (tx_size) {
+ case TX_4X4:
+ vpx_memcpy(args->ctx->ta[plane], pd->above_context,
+ sizeof(ENTROPY_CONTEXT) * bw);
+ vpx_memcpy(args->ctx->tl[plane], pd->left_context,
+ sizeof(ENTROPY_CONTEXT) * bh);
+ break;
+ case TX_8X8:
+ for (i = 0; i < bw; i += 2)
+ args->ctx->ta[plane][i] = !!*(uint16_t *)&pd->above_context[i];
+ for (i = 0; i < bh; i += 2)
+ args->ctx->tl[plane][i] = !!*(uint16_t *)&pd->left_context[i];
+ break;
+ case TX_16X16:
+ for (i = 0; i < bw; i += 4)
+ args->ctx->ta[plane][i] = !!*(uint32_t *)&pd->above_context[i];
+ for (i = 0; i < bh; i += 4)
+ args->ctx->tl[plane][i] = !!*(uint32_t *)&pd->left_context[i];
+ break;
+ case TX_32X32:
+ for (i = 0; i < bw; i += 8)
+ args->ctx->ta[plane][i] = !!*(uint64_t *)&pd->above_context[i];
+ for (i = 0; i < bh; i += 8)
+ args->ctx->tl[plane][i] = !!*(uint64_t *)&pd->left_context[i];
+ break;
+ default:
+ assert(0);
}
}
void vp9_optimize_sby(VP9_COMMON *cm, MACROBLOCK *x, BLOCK_SIZE_TYPE bsize) {
struct optimize_ctx ctx;
struct encode_b_args arg = {cm, x, &ctx};
- vp9_optimize_init(&x->e_mbd, bsize, &ctx);
+ optimize_init_b(0, bsize, &arg);
foreach_transformed_block_in_plane(&x->e_mbd, bsize, 0, optimize_block, &arg);
}
BLOCK_SIZE_TYPE bsize) {
struct optimize_ctx ctx;
struct encode_b_args arg = {cm, x, &ctx};
- vp9_optimize_init(&x->e_mbd, bsize, &ctx);
+ int i;
+ for (i = 1; i < MAX_MB_PLANE; ++i)
+ optimize_init_b(i, bsize, &arg);
+
foreach_transformed_block_uv(&x->e_mbd, bsize, optimize_block, &arg);
}
vp9_short_idct16x16_add(dqcoeff, dst, pd->dst.stride);
break;
case TX_8X8:
- vp9_short_idct8x8_add(dqcoeff, dst, pd->dst.stride);
+ inverse_transform_b_8x8_add(xd, pd->eobs[block], dqcoeff,
+ dst, pd->dst.stride);
break;
case TX_4X4:
// this is like vp9_short_idct4x4 but has a special case around eob<=1
vp9_subtract_sby(x, bsize);
if (x->optimize)
- vp9_optimize_init(xd, bsize, &ctx);
+ optimize_init_b(0, bsize, &arg);
foreach_transformed_block_in_plane(xd, bsize, 0, encode_block, &arg);
}
struct encode_b_args arg = {cm, x, &ctx};
vp9_subtract_sbuv(x, bsize);
- if (x->optimize)
- vp9_optimize_init(xd, bsize, &ctx);
+ if (x->optimize) {
+ int i;
+ for (i = 1; i < MAX_MB_PLANE; ++i)
+ optimize_init_b(i, bsize, &arg);
+ }
foreach_transformed_block_uv(xd, bsize, encode_block, &arg);
}
struct encode_b_args arg = {cm, x, &ctx};
vp9_subtract_sb(x, bsize);
- if (x->optimize)
- vp9_optimize_init(xd, bsize, &ctx);
+
+ if (x->optimize) {
+ int i;
+ for (i = 0; i < MAX_MB_PLANE; ++i)
+ optimize_init_b(i, bsize, &arg);
+ }
foreach_transformed_block(xd, bsize, encode_block, &arg);
}
vp9_short_idct32x32_add(dqcoeff, dst, pd->dst.stride);
break;
case TX_16X16:
- tx_type = plane == 0 ? get_tx_type_16x16(xd) : DCT_DCT;
+ tx_type = get_tx_type_16x16(pd->plane_type, xd);
scan = get_scan_16x16(tx_type);
iscan = get_iscan_16x16(tx_type);
mode = plane == 0 ? mbmi->mode : mbmi->uv_mode;
}
break;
case TX_8X8:
- tx_type = plane == 0 ? get_tx_type_8x8(xd) : DCT_DCT;
+ tx_type = get_tx_type_8x8(pd->plane_type, xd);
scan = get_scan_8x8(tx_type);
iscan = get_iscan_8x8(tx_type);
mode = plane == 0 ? mbmi->mode : mbmi->uv_mode;
pd->dequant, p->zbin_extra, eob, scan, iscan);
if (!x->skip_encode && *eob) {
if (tx_type == DCT_DCT)
- vp9_short_idct8x8_add(dqcoeff, dst, pd->dst.stride);
+ inverse_transform_b_8x8_add(xd, *eob, dqcoeff, dst, pd->dst.stride);
else
vp9_short_iht8x8_add(dqcoeff, dst, pd->dst.stride, tx_type);
}
break;
case TX_4X4:
- tx_type = plane == 0 ? get_tx_type_4x4(xd, block) : DCT_DCT;
+ tx_type = get_tx_type_4x4(pd->plane_type, xd, block);
scan = get_scan_4x4(tx_type);
iscan = get_iscan_4x4(tx_type);
if (mbmi->sb_type < BLOCK_SIZE_SB8X8 && plane == 0) {
// this is like vp9_short_idct4x4 but has a special case around eob<=1
// which is significant (not just an optimization) for the lossless
// case.
- inverse_transform_b_4x4_add(xd, *eob, dqcoeff,
- dst, pd->dst.stride);
+ inverse_transform_b_4x4_add(xd, *eob, dqcoeff, dst, pd->dst.stride);
else
vp9_short_iht4x4_add(dqcoeff, dst, pd->dst.stride, tx_type);
}
struct optimize_ctx *ctx;
};
-void vp9_optimize_init(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize,
- struct optimize_ctx *ctx);
void vp9_optimize_b(int plane, int block, BLOCK_SIZE_TYPE bsize,
int ss_txfrm_size, VP9_COMMON *cm, MACROBLOCK *x,
struct optimize_ctx *ctx);
adjust_active_maxq(cpi->active_worst_quality, tmp_q);
}
#endif
-
- vpx_memset(&this_frame, 0, sizeof(FIRSTPASS_STATS));
+ vp9_zero(this_frame);
if (EOF == input_stats(cpi, &this_frame))
return;
double kf_group_coded_err = 0.0;
double recent_loop_decay[8] = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0};
- vpx_memset(&next_frame, 0, sizeof(next_frame)); // assure clean
+ vp9_zero(next_frame);
vp9_clear_system_state(); // __asm emms;
start_position = cpi->twopass.stats_in;
int_mv arf_top_mv, gld_top_mv;
MODE_INFO mi_local;
- // Make sure the mi context starts in a consistent state.
- memset(&mi_local, 0, sizeof(mi_local));
+ vp9_zero(mi_local);
// Set up limit values for motion vectors to prevent them extending outside the UMV borders
arf_top_mv.as_int = 0;
vp9_enable_segfeature(&xd->seg, 0, SEG_LVL_SKIP);
vp9_enable_segfeature(&xd->seg, 1, SEG_LVL_SKIP);
}
- // Enable data udpate
+ // Enable data update
xd->seg.update_data = 1;
} else {
// All other frames.
sf->use_one_partition_size_always = 0;
sf->less_rectangular_check = 0;
sf->use_square_partition_only = 0;
- sf->use_partitions_less_than = 0;
- sf->less_than_block_size = BLOCK_SIZE_MB16X16;
- sf->use_partitions_greater_than = 0;
- sf->greater_than_block_size = BLOCK_SIZE_SB8X8;
+ sf->auto_min_max_partition_size = 0;
+ sf->auto_min_max_partition_interval = 0;
+ sf->auto_min_max_partition_count = 0;
+ sf->use_max_partition_size = 0;
+ sf->max_partition_size = BLOCK_64X64;
+ sf->use_min_partition_size = 0;
+ sf->min_partition_size = BLOCK_4X4;
sf->adjust_partitioning_from_last_frame = 0;
sf->last_partitioning_redo_frequency = 4;
sf->disable_splitmv = 0;
sf->mode_search_skip_flags = FLAG_SKIP_INTRA_DIRMISMATCH |
FLAG_SKIP_INTRA_BESTINTER |
FLAG_SKIP_COMP_BESTINTRA;
- sf->last_chroma_intra_mode = H_PRED;
+ sf->use_uv_intra_rd_estimate = 1;
sf->use_rd_breakout = 1;
sf->skip_encode_sb = 1;
sf->auto_mv_step_size = 1;
+
+ sf->auto_min_max_partition_size = 1;
+ sf->use_max_partition_size = 1;
+ sf->use_min_partition_size = 1;
+ sf->auto_min_max_partition_interval = 1;
}
if (speed == 2) {
sf->adjust_thresholds_by_speed = 1;
FLAG_SKIP_COMP_BESTINTRA |
FLAG_SKIP_COMP_REFMISMATCH;
sf->last_chroma_intra_mode = DC_PRED;
+ sf->use_uv_intra_rd_estimate = 1;
sf->use_rd_breakout = 1;
sf->skip_encode_sb = 1;
- sf->use_uv_intra_rd_estimate = 1;
sf->using_small_partition_info = 1;
sf->disable_splitmv =
(MIN(cpi->common.width, cpi->common.height) >= 720)? 1 : 0;
if (speed == 2) {
sf->first_step = 0;
sf->comp_inter_joint_search_thresh = BLOCK_SIZE_SB8X8;
- sf->use_partitions_less_than = 1;
- sf->less_than_block_size = BLOCK_SIZE_MB16X16;
+ sf->use_max_partition_size = 1;
+ sf->max_partition_size = BLOCK_SIZE_MB16X16;
}
if (speed == 3) {
sf->first_step = 0;
sf->comp_inter_joint_search_thresh = BLOCK_SIZE_SB8X8;
- sf->use_partitions_greater_than = 1;
- sf->greater_than_block_size = BLOCK_SIZE_SB8X8;
+ sf->use_min_partition_size = 1;
+ sf->min_partition_size = BLOCK_SIZE_SB8X8;
}
*/
cm = &cpi->common;
- vpx_memset(cpi, 0, sizeof(VP9_COMP));
+ vp9_zero(*cpi);
if (setjmp(cm->error.jmp)) {
VP9_PTR ptr = ctx.ptr;
}
#endif
loop_count = 0;
- vpx_memset(cpi->rd_tx_select_threshes, 0, sizeof(cpi->rd_tx_select_threshes));
+ vp9_zero(cpi->rd_tx_select_threshes);
if (cm->frame_type != KEY_FRAME) {
/* TODO: Decide this more intelligently */
if (!cpi->common.error_resilient_mode &&
!cpi->common.frame_parallel_decoding_mode) {
vp9_adapt_mode_probs(&cpi->common);
- vp9_adapt_mode_context(&cpi->common);
vp9_adapt_mv_probs(&cpi->common, cpi->mb.e_mbd.allow_high_precision_mv);
}
}
int unused_mode_skip_lvl;
int reference_masking;
BLOCK_SIZE_TYPE always_this_block_size;
- int use_partitions_greater_than;
- BLOCK_SIZE_TYPE greater_than_block_size;
- int use_partitions_less_than;
- BLOCK_SIZE_TYPE less_than_block_size;
+ int auto_min_max_partition_size;
+ int auto_min_max_partition_interval;
+ int auto_min_max_partition_count;
+ int use_min_partition_size;
+ BLOCK_SIZE_TYPE min_partition_size;
+ int use_max_partition_size;
+ BLOCK_SIZE_TYPE max_partition_size;
int adjust_partitioning_from_last_frame;
int last_partitioning_redo_frequency;
int disable_splitmv;
vp9_model_to_full_probs(p[t][i][j][k][l], probs);
vp9_cost_tokens((int *)c[t][i][j][0][k][l], probs,
vp9_coef_tree);
-#if CONFIG_BALANCED_COEFTREE
- // Replace the eob node prob with a very small value so that the
- // cost approximately equals the cost without the eob node
- probs[1] = 1;
- vp9_cost_tokens((int *)c[t][i][j][1][k][l], probs, vp9_coef_tree);
-#else
vp9_cost_tokens_skip((int *)c[t][i][j][1][k][l], probs,
vp9_coef_tree);
assert(c[t][i][j][0][k][l][DCT_EOB_TOKEN] ==
c[t][i][j][1][k][l][DCT_EOB_TOKEN]);
-#endif
}
}
sizeof(ENTROPY_CONTEXT) * bw);
vpx_memcpy(&args.t_left, pd->left_context,
sizeof(ENTROPY_CONTEXT) * bh);
- get_scan_nb_4x4(get_tx_type_4x4(xd, 0), &args.scan, &args.nb);
+ get_scan_nb_4x4(get_tx_type_4x4(PLANE_TYPE_Y_WITH_DC, xd, 0),
+ &args.scan, &args.nb);
break;
case TX_8X8:
for (i = 0; i < bw; i += 2)
args.t_above[i] = !!*(uint16_t *)&pd->above_context[i];
for (i = 0; i < bh; i += 2)
args.t_left[i] = !!*(uint16_t *)&pd->left_context[i];
- get_scan_nb_8x8(get_tx_type_8x8(xd), &args.scan, &args.nb);
+ get_scan_nb_8x8(get_tx_type_8x8(PLANE_TYPE_Y_WITH_DC, xd),
+ &args.scan, &args.nb);
break;
case TX_16X16:
for (i = 0; i < bw; i += 4)
args.t_above[i] = !!*(uint32_t *)&pd->above_context[i];
for (i = 0; i < bh; i += 4)
args.t_left[i] = !!*(uint32_t *)&pd->left_context[i];
- get_scan_nb_16x16(get_tx_type_16x16(xd), &args.scan, &args.nb);
+ get_scan_nb_16x16(get_tx_type_16x16(PLANE_TYPE_Y_WITH_DC, xd),
+ &args.scan, &args.nb);
break;
case TX_32X32:
for (i = 0; i < bw; i += 8)
src, src_stride,
dst, dst_stride);
- tx_type = get_tx_type_4x4(xd, block);
+ tx_type = get_tx_type_4x4(PLANE_TYPE_Y_WITH_DC, xd, block);
if (tx_type != DCT_DCT) {
vp9_short_fht4x4(src_diff, coeff, 8, tx_type);
x->quantize_b_4x4(x, block, tx_type, 16);
x->quantize_b_4x4(x, block, tx_type, 16);
}
- scan = get_scan_4x4(get_tx_type_4x4(xd, block));
+ scan = get_scan_4x4(get_tx_type_4x4(PLANE_TYPE_Y_WITH_DC, xd, block));
ratey += cost_coeffs(cm, x, 0, block, PLANE_TYPE_Y_WITH_DC,
tempa + idx, templ + idy, TX_4X4, scan,
vp9_get_coef_neighbors_handle(scan));
MB_MODE_INFO *mbmi = &mi->mbmi;
int mode_idx;
- vpx_memset(bsi, 0, sizeof(*bsi));
+ vp9_zero(*bsi);
bsi->segment_rd = best_rd;
bsi->ref_mv = best_ref_mv;
if (cpi->common.mcomp_filter_type == SWITCHABLE)
*rate2 += get_switchable_rate(cm, x);
- if (cpi->active_map_enabled && x->active_ptr[0] == 0)
- x->skip = 1;
- else if (x->encode_breakout) {
- const BLOCK_SIZE_TYPE y_size = get_plane_block_size(bsize, &xd->plane[0]);
- const BLOCK_SIZE_TYPE uv_size = get_plane_block_size(bsize, &xd->plane[1]);
-
- unsigned int var, sse;
- int threshold = (xd->plane[0].dequant[1] * xd->plane[0].dequant[1] >> 4);
-
-
- if (threshold < x->encode_breakout)
- threshold = x->encode_breakout;
-
- var = cpi->fn_ptr[y_size].vf(x->plane[0].src.buf, x->plane[0].src.stride,
- xd->plane[0].dst.buf, xd->plane[0].dst.stride,
- &sse);
-
- if ((int)sse < threshold) {
- unsigned int q2dc = xd->plane[0].dequant[0];
- // If there is no codeable 2nd order dc
- // or a very small uniform pixel change change
- if ((sse - var < q2dc * q2dc >> 4) ||
- (sse / 2 > var && sse - var < 64)) {
- // Check u and v to make sure skip is ok
- int sse2;
- unsigned int sse2u, sse2v;
- var = cpi->fn_ptr[uv_size].vf(x->plane[1].src.buf,
- x->plane[1].src.stride,
- xd->plane[1].dst.buf,
- xd->plane[1].dst.stride, &sse2u);
- var = cpi->fn_ptr[uv_size].vf(x->plane[2].src.buf,
- x->plane[2].src.stride,
- xd->plane[2].dst.buf,
- xd->plane[2].dst.stride, &sse2v);
- sse2 = sse2u + sse2v;
-
- if (sse2 * 2 < threshold) {
- x->skip = 1;
- *distortion = sse + sse2;
- *rate2 = 500;
-
- // for best yrd calculation
- *rate_uv = 0;
- *distortion_uv = sse2;
-
- *disable_skip = 1;
- this_rd = RDCOST(x->rdmult, x->rddiv, *rate2, *distortion);
+ if (!is_comp_pred) {
+ if (cpi->active_map_enabled && x->active_ptr[0] == 0)
+ x->skip = 1;
+ else if (x->encode_breakout) {
+ const BLOCK_SIZE_TYPE y_size = get_plane_block_size(bsize, &xd->plane[0]);
+ const BLOCK_SIZE_TYPE uv_size = get_plane_block_size(bsize,
+ &xd->plane[1]);
+ unsigned int var, sse;
+ // Skipping threshold for ac.
+ unsigned int thresh_ac;
+ // The encode_breakout input
+ unsigned int encode_breakout = x->encode_breakout << 4;
+
+ // Calculate threshold according to dequant value.
+ thresh_ac = (xd->plane[0].dequant[1] * xd->plane[0].dequant[1]) / 9;
+
+ // Set a maximum for threshold to avoid big PSNR loss in low bitrate case.
+ if (thresh_ac > 36000)
+ thresh_ac = 36000;
+
+ // Use encode_breakout input if it is bigger than internal threshold.
+ if (thresh_ac < encode_breakout)
+ thresh_ac = encode_breakout;
+
+ var = cpi->fn_ptr[y_size].vf(x->plane[0].src.buf, x->plane[0].src.stride,
+ xd->plane[0].dst.buf,
+ xd->plane[0].dst.stride, &sse);
+
+ // Adjust threshold according to partition size.
+ thresh_ac >>= 8 - (b_width_log2_lookup[bsize] +
+ b_height_log2_lookup[bsize]);
+
+ // Y skipping condition checking
+ if (sse < thresh_ac || sse == 0) {
+ // Skipping threshold for dc
+ unsigned int thresh_dc;
+
+ thresh_dc = (xd->plane[0].dequant[0] * xd->plane[0].dequant[0] >> 6);
+
+ // dc skipping checking
+ if ((sse - var) < thresh_dc || sse == var) {
+ unsigned int sse_u, sse_v;
+ unsigned int var_u, var_v;
+
+ var_u = cpi->fn_ptr[uv_size].vf(x->plane[1].src.buf,
+ x->plane[1].src.stride,
+ xd->plane[1].dst.buf,
+ xd->plane[1].dst.stride, &sse_u);
+
+ // U skipping condition checking
+ if ((sse_u * 4 < thresh_ac || sse_u == 0) &&
+ (sse_u - var_u < thresh_dc || sse_u == var_u)) {
+ var_v = cpi->fn_ptr[uv_size].vf(x->plane[2].src.buf,
+ x->plane[2].src.stride,
+ xd->plane[2].dst.buf,
+ xd->plane[2].dst.stride, &sse_v);
+
+ // V skipping condition checking
+ if ((sse_v * 4 < thresh_ac || sse_v == 0) &&
+ (sse_v - var_v < thresh_dc || sse_v == var_v)) {
+ x->skip = 1;
+
+ *rate2 = 500;
+ *rate_uv = 0;
+
+ // Scaling factor for SSE from spatial domain to frequency domain
+ // is 16. Adjust distortion accordingly.
+ *distortion_uv = (sse_u + sse_v) << 4;
+ *distortion = (sse << 4) + *distortion_uv;
+
+ *disable_skip = 1;
+ this_rd = RDCOST(x->rdmult, x->rddiv, *rate2, *distortion);
+ }
+ }
}
}
}
MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi;
const BLOCK_SIZE_TYPE block_size = get_plane_block_size(bsize, &xd->plane[0]);
MB_PREDICTION_MODE this_mode;
- MV_REFERENCE_FRAME ref_frame;
+ MV_REFERENCE_FRAME ref_frame, second_ref_frame;
unsigned char segment_id = xd->mode_info_context->mbmi.segment_id;
int comp_pred, i;
int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES];
for (i = 0; i < NB_TXFM_MODES; ++i)
txfm_cache[i] = INT64_MAX;
+ x->skip = 0;
this_mode = vp9_mode_order[mode_index].mode;
ref_frame = vp9_mode_order[mode_index].ref_frame;
+ second_ref_frame = vp9_mode_order[mode_index].second_ref_frame;
- // Slip modes that have been masked off but always consider first mode.
+ // Skip modes that have been masked off but always consider first mode.
if ( mode_index && (bsize > cpi->sf.unused_mode_skip_lvl) &&
(cpi->unused_mode_skip_mask & (1 << mode_index)) )
continue;
- // Skip if the current refernce frame has been masked off
+ // Skip if the current reference frame has been masked off
if (cpi->sf.reference_masking && !cpi->set_ref_frame_mask &&
(cpi->ref_frame_mask & (1 << ref_frame)))
continue;
// Do not allow compound prediction if the segment level reference
// frame feature is in use as in this case there can only be one reference.
- if ((vp9_mode_order[mode_index].second_ref_frame > INTRA_FRAME) &&
+ if ((second_ref_frame > INTRA_FRAME) &&
vp9_segfeature_active(&xd->seg, segment_id, SEG_LVL_REF_FRAME))
continue;
- x->skip = 0;
-
// Skip some checking based on small partitions' result.
if (x->fast_ms > 1 && !ref_frame)
continue;
if (!(mode_mask & (1 << this_mode))) {
continue;
}
- if (vp9_mode_order[mode_index].second_ref_frame != NONE
- && !(ref_frame_mask
- & (1 << vp9_mode_order[mode_index].second_ref_frame))) {
+ if (second_ref_frame != NONE
+ && !(ref_frame_mask & (1 << second_ref_frame))) {
continue;
}
}
mbmi->ref_frame[0] = ref_frame;
- mbmi->ref_frame[1] = vp9_mode_order[mode_index].second_ref_frame;
+ mbmi->ref_frame[1] = second_ref_frame;
if (!(ref_frame == INTRA_FRAME
|| (cpi->ref_frame_flags & flag_list[ref_frame]))) {
continue;
}
- if (!(mbmi->ref_frame[1] == NONE
- || (cpi->ref_frame_flags & flag_list[mbmi->ref_frame[1]]))) {
+ if (!(second_ref_frame == NONE
+ || (cpi->ref_frame_flags & flag_list[second_ref_frame]))) {
continue;
}
- comp_pred = mbmi->ref_frame[1] > INTRA_FRAME;
+ comp_pred = second_ref_frame > INTRA_FRAME;
if (comp_pred) {
if (cpi->sf.mode_search_skip_flags & FLAG_SKIP_COMP_BESTINTRA)
if (vp9_mode_order[best_mode_index].ref_frame == INTRA_FRAME)
continue;
if (cpi->sf.mode_search_skip_flags & FLAG_SKIP_COMP_REFMISMATCH)
- if (vp9_mode_order[mode_index].ref_frame != best_inter_ref_frame &&
- vp9_mode_order[mode_index].second_ref_frame != best_inter_ref_frame)
+ if (ref_frame != best_inter_ref_frame &&
+ second_ref_frame != best_inter_ref_frame)
continue;
}
// TODO(jingning, jkoleszar): scaling reference frame not supported for
// SPLITMV.
- if (mbmi->ref_frame[0] > 0 &&
- (scale_factor[mbmi->ref_frame[0]].x_scale_fp != VP9_REF_NO_SCALE ||
- scale_factor[mbmi->ref_frame[0]].y_scale_fp != VP9_REF_NO_SCALE) &&
+ if (ref_frame > 0 &&
+ (scale_factor[ref_frame].x_scale_fp != VP9_REF_NO_SCALE ||
+ scale_factor[ref_frame].y_scale_fp != VP9_REF_NO_SCALE) &&
this_mode == SPLITMV)
continue;
- if (mbmi->ref_frame[1] > 0 &&
- (scale_factor[mbmi->ref_frame[1]].x_scale_fp != VP9_REF_NO_SCALE ||
- scale_factor[mbmi->ref_frame[1]].y_scale_fp != VP9_REF_NO_SCALE) &&
+ if (second_ref_frame > 0 &&
+ (scale_factor[second_ref_frame].x_scale_fp != VP9_REF_NO_SCALE ||
+ scale_factor[second_ref_frame].y_scale_fp != VP9_REF_NO_SCALE) &&
this_mode == SPLITMV)
continue;
- set_scale_factors(xd, mbmi->ref_frame[0], mbmi->ref_frame[1],
- scale_factor);
+ set_scale_factors(xd, ref_frame, second_ref_frame, scale_factor);
mbmi->mode = this_mode;
mbmi->uv_mode = DC_PRED;
continue;
if (comp_pred) {
- if (!(cpi->ref_frame_flags & flag_list[mbmi->ref_frame[1]]))
+ if (!(cpi->ref_frame_flags & flag_list[second_ref_frame]))
continue;
- set_scale_factors(xd, mbmi->ref_frame[0], mbmi->ref_frame[1],
- scale_factor);
+ set_scale_factors(xd, ref_frame, second_ref_frame, scale_factor);
mode_excluded = mode_excluded
? mode_excluded
: cm->comp_pred_mode == SINGLE_PREDICTION_ONLY;
} else {
- // mbmi->ref_frame[1] = vp9_mode_order[mode_index].ref_frame[1];
- if (ref_frame != INTRA_FRAME) {
- if (mbmi->ref_frame[1] != INTRA_FRAME)
- mode_excluded =
- mode_excluded ?
- mode_excluded : cm->comp_pred_mode == COMP_PREDICTION_ONLY;
+ if (ref_frame != INTRA_FRAME && second_ref_frame != INTRA_FRAME) {
+ mode_excluded =
+ mode_excluded ?
+ mode_excluded : cm->comp_pred_mode == COMP_PREDICTION_ONLY;
}
}
for (i = 0; i < MAX_MB_PLANE; i++) {
xd->plane[i].pre[0] = yv12_mb[ref_frame][i];
if (comp_pred)
- xd->plane[i].pre[1] = yv12_mb[mbmi->ref_frame[1]][i];
+ xd->plane[i].pre[1] = yv12_mb[second_ref_frame][i];
}
// If the segment reference frame feature is enabled....
rate2 += intra_cost_penalty;
distortion2 = distortion_y + distortion_uv;
} else if (this_mode == SPLITMV) {
- const int is_comp_pred = mbmi->ref_frame[1] > 0;
+ const int is_comp_pred = second_ref_frame > 0;
int rate;
int64_t distortion;
int64_t this_rd_thresh;
int tmp_best_skippable = 0;
int switchable_filter_index;
int_mv *second_ref = is_comp_pred ?
- &mbmi->ref_mvs[mbmi->ref_frame[1]][0] : NULL;
+ &mbmi->ref_mvs[second_ref_frame][0] : NULL;
union b_mode_info tmp_best_bmodes[16];
MB_MODE_INFO tmp_best_mbmode;
PARTITION_INFO tmp_best_partition;
if (vp9_mode_order[best_mode_index].ref_frame == INTRA_FRAME)
continue;
if (cpi->sf.mode_search_skip_flags & FLAG_SKIP_COMP_REFMISMATCH)
- if (vp9_mode_order[mode_index].ref_frame != best_inter_ref_frame &&
- vp9_mode_order[mode_index].second_ref_frame !=
- best_inter_ref_frame)
+ if (ref_frame != best_inter_ref_frame &&
+ second_ref_frame != best_inter_ref_frame)
continue;
}
- this_rd_thresh = (mbmi->ref_frame[0] == LAST_FRAME) ?
+ this_rd_thresh = (ref_frame == LAST_FRAME) ?
cpi->rd_threshes[bsize][THR_NEWMV] :
cpi->rd_threshes[bsize][THR_NEWA];
- this_rd_thresh = (mbmi->ref_frame[0] == GOLDEN_FRAME) ?
+ this_rd_thresh = (ref_frame == GOLDEN_FRAME) ?
cpi->rd_threshes[bsize][THR_NEWG] : this_rd_thresh;
xd->mode_info_context->mbmi.txfm_size = TX_4X4;
vp9_setup_interp_filters(xd, mbmi->interp_filter, &cpi->common);
tmp_rd = rd_pick_best_mbsegmentation(cpi, x,
- &mbmi->ref_mvs[mbmi->ref_frame[0]][0],
+ &mbmi->ref_mvs[ref_frame][0],
second_ref,
best_yrd,
&rate, &rate_y, &distortion,
// Handles the special case when a filter that is not in the
// switchable list (bilinear, 6-tap) is indicated at the frame level
tmp_rd = rd_pick_best_mbsegmentation(cpi, x,
- &mbmi->ref_mvs[mbmi->ref_frame[0]][0],
+ &mbmi->ref_mvs[ref_frame][0],
second_ref,
best_yrd,
&rate, &rate_y, &distortion,
txfm_cache[i] = txfm_cache[ONLY_4X4];
}
} else {
- compmode_cost = vp9_cost_bit(comp_mode_p,
- mbmi->ref_frame[1] > INTRA_FRAME);
+ compmode_cost = vp9_cost_bit(comp_mode_p, second_ref_frame > INTRA_FRAME);
this_rd = handle_inter_mode(cpi, x, bsize,
txfm_cache,
&rate2, &distortion2, &skippable,
// Estimate the reference frame signaling cost and add it
// to the rolling cost variable.
- if (mbmi->ref_frame[1] > INTRA_FRAME) {
- rate2 += ref_costs_comp[mbmi->ref_frame[0]];
+ if (second_ref_frame > INTRA_FRAME) {
+ rate2 += ref_costs_comp[ref_frame];
} else {
- rate2 += ref_costs_single[mbmi->ref_frame[0]];
+ rate2 += ref_costs_single[ref_frame];
}
if (!disable_skip) {
rate2 += prob_skip_cost;
}
}
- } else if (mb_skip_allowed && ref_frame != INTRA_FRAME &&
- !xd->lossless) {
+ } else if (mb_skip_allowed && ref_frame != INTRA_FRAME && !xd->lossless) {
if (RDCOST(x->rdmult, x->rddiv, rate_y + rate_uv, distortion2) <
RDCOST(x->rdmult, x->rddiv, 0, total_sse)) {
// Add in the cost of the no skip flag.
// best_inter_mode = xd->mode_info_context->mbmi.mode;
}
- if (!disable_skip && mbmi->ref_frame[0] == INTRA_FRAME) {
+ if (!disable_skip && ref_frame == INTRA_FRAME) {
for (i = 0; i < NB_PREDICTION_TYPES; ++i)
best_pred_rd[i] = MIN(best_pred_rd[i], this_rd);
for (i = 0; i <= VP9_SWITCHABLE_FILTERS; i++)
|| distortion2 < mode_distortions[this_mode]) {
mode_distortions[this_mode] = distortion2;
}
- if (frame_distortions[mbmi->ref_frame[0]] == -1
- || distortion2 < frame_distortions[mbmi->ref_frame[0]]) {
- frame_distortions[mbmi->ref_frame[0]] = distortion2;
+ if (frame_distortions[ref_frame] == -1
+ || distortion2 < frame_distortions[ref_frame]) {
+ frame_distortions[ref_frame] = distortion2;
}
}
}
/* keep record of best compound/single-only prediction */
- if (!disable_skip && mbmi->ref_frame[0] != INTRA_FRAME) {
+ if (!disable_skip && ref_frame != INTRA_FRAME) {
int single_rd, hybrid_rd, single_rate, hybrid_rate;
if (cpi->common.comp_pred_mode == HYBRID_PREDICTION) {
single_rd = RDCOST(x->rdmult, x->rddiv, single_rate, distortion2);
hybrid_rd = RDCOST(x->rdmult, x->rddiv, hybrid_rate, distortion2);
- if (mbmi->ref_frame[1] <= INTRA_FRAME &&
+ if (second_ref_frame <= INTRA_FRAME &&
single_rd < best_pred_rd[SINGLE_PREDICTION_ONLY]) {
best_pred_rd[SINGLE_PREDICTION_ONLY] = single_rd;
- } else if (mbmi->ref_frame[1] > INTRA_FRAME &&
+ } else if (second_ref_frame > INTRA_FRAME &&
single_rd < best_pred_rd[COMP_PREDICTION_ONLY]) {
best_pred_rd[COMP_PREDICTION_ONLY] = single_rd;
}
}
/* keep record of best filter type */
- if (!mode_excluded && !disable_skip && mbmi->ref_frame[0] != INTRA_FRAME &&
+ if (!mode_excluded && !disable_skip && ref_frame != INTRA_FRAME &&
cm->mcomp_filter_type != BILINEAR) {
int64_t ref = cpi->rd_filter_cache[cm->mcomp_filter_type == SWITCHABLE ?
VP9_SWITCHABLE_FILTERS :
if (early_term)
break;
- if (x->skip && !mode_excluded)
+ if (x->skip && !comp_pred)
break;
}
+
if (best_rd >= best_rd_so_far)
return INT64_MAX;
VP9_COMP *cpi = args->cpi;
MACROBLOCKD *xd = args->xd;
TOKENEXTRA **tp = args->tp;
- PLANE_TYPE type = plane ? PLANE_TYPE_UV : PLANE_TYPE_Y_WITH_DC;
TX_SIZE tx_size = ss_txfrm_size / 2;
int dry_run = args->dry_run;
int c = 0, rc = 0;
TOKENEXTRA *t = *tp; /* store tokens starting here */
const int eob = xd->plane[plane].eobs[block];
+ const PLANE_TYPE type = xd->plane[plane].plane_type;
const int16_t *qcoeff_ptr = BLOCK_OFFSET(xd->plane[plane].qcoeff, block, 16);
const BLOCK_SIZE_TYPE sb_type = (mbmi->sb_type < BLOCK_SIZE_SB8X8) ?
BLOCK_SIZE_SB8X8 : mbmi->sb_type;
coef_probs = cpi->common.fc.coef_probs[tx_size];
switch (tx_size) {
default:
- case TX_4X4: {
- const TX_TYPE tx_type = type == PLANE_TYPE_Y_WITH_DC ?
- get_tx_type_4x4(xd, block) : DCT_DCT;
+ case TX_4X4:
above_ec = A[0] != 0;
left_ec = L[0] != 0;
seg_eob = 16;
- scan = get_scan_4x4(tx_type);
+ scan = get_scan_4x4(get_tx_type_4x4(type, xd, block));
band_translate = vp9_coefband_trans_4x4;
break;
- }
- case TX_8X8: {
- const TX_TYPE tx_type = type == PLANE_TYPE_Y_WITH_DC ?
- get_tx_type_8x8(xd) : DCT_DCT;
+ case TX_8X8:
above_ec = (A[0] + A[1]) != 0;
left_ec = (L[0] + L[1]) != 0;
seg_eob = 64;
- scan = get_scan_8x8(tx_type);
+ scan = get_scan_8x8(get_tx_type_8x8(type, xd));
band_translate = vp9_coefband_trans_8x8plus;
break;
- }
- case TX_16X16: {
- const TX_TYPE tx_type = type == PLANE_TYPE_Y_WITH_DC ?
- get_tx_type_16x16(xd) : DCT_DCT;
+ case TX_16X16:
above_ec = (A[0] + A[1] + A[2] + A[3]) != 0;
left_ec = (L[0] + L[1] + L[2] + L[3]) != 0;
seg_eob = 256;
- scan = get_scan_16x16(tx_type);
+ scan = get_scan_16x16(get_tx_type_16x16(type, xd));
band_translate = vp9_coefband_trans_8x8plus;
break;
- }
case TX_32X32:
above_ec = (A[0] + A[1] + A[2] + A[3] + A[4] + A[5] + A[6] + A[7]) != 0;
left_ec = (L[0] + L[1] + L[2] + L[3] + L[4] + L[5] + L[6] + L[7]) != 0;
t->context_tree = coef_probs[type][ref][band][pt];
t->skip_eob_node = (c > 0) && (token_cache[scan[c - 1]] == 0);
-#if CONFIG_BALANCED_COEFTREE
- assert(token <= ZERO_TOKEN ||
- vp9_coef_encodings[t->token].len - t->skip_eob_node > 0);
-#else
assert(vp9_coef_encodings[t->token].len - t->skip_eob_node > 0);
-#endif
if (!dry_run) {
++counts[type][ref][band][pt][token];
-#if CONFIG_BALANCED_COEFTREE
- if (!t->skip_eob_node && token > ZERO_TOKEN)
-#else
if (!t->skip_eob_node)
-#endif
++cpi->common.counts.eob_branch[tx_size][type][ref][band][pt];
}
token_cache[scan[c]] = vp9_pt_energy_class[token];