2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
11 #include "vpx_config.h"
12 #include "./vpx_scale_rtcd.h"
13 #include "./vpx_dsp_rtcd.h"
14 #include "./vp8_rtcd.h"
15 #include "vp8/common/onyxc_int.h"
16 #include "vp8/common/blockd.h"
18 #include "vp8/common/systemdependent.h"
19 #include "vp8/encoder/quantize.h"
20 #include "vp8/common/alloccommon.h"
22 #include "firstpass.h"
23 #include "vpx_dsp/psnr.h"
24 #include "vpx_scale/vpx_scale.h"
25 #include "vp8/common/extend.h"
27 #include "vp8/common/quant_common.h"
28 #include "segmentation.h"
30 #include "vp8/common/postproc.h"
32 #include "vpx_mem/vpx_mem.h"
33 #include "vp8/common/reconintra.h"
34 #include "vp8/common/swapyv12buffer.h"
35 #include "vp8/common/threading.h"
36 #include "vpx_ports/system_state.h"
37 #include "vpx_ports/vpx_timer.h"
39 #include "vpx_ports/arm.h"
41 #if CONFIG_MULTI_RES_ENCODING
42 #include "mr_dissim.h"
44 #include "encodeframe.h"
50 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
51 extern int vp8_update_coef_context(VP8_COMP *cpi);
52 extern void vp8_update_coef_probs(VP8_COMP *cpi);
55 extern void vp8cx_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi);
56 extern void vp8cx_set_alt_lf_level(VP8_COMP *cpi, int filt_val);
57 extern void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi);
59 extern void vp8_deblock_frame(YV12_BUFFER_CONFIG *source,
60 YV12_BUFFER_CONFIG *post, int filt_lvl,
61 int low_var_thresh, int flag);
62 extern void print_parms(VP8_CONFIG *ocf, char *filenam);
63 extern unsigned int vp8_get_processor_freq();
64 extern void print_tree_update_probs();
65 extern int vp8cx_create_encoder_threads(VP8_COMP *cpi);
66 extern void vp8cx_remove_encoder_threads(VP8_COMP *cpi);
68 int vp8_estimate_entropy_savings(VP8_COMP *cpi);
70 int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest);
72 extern void vp8_temporal_filter_prepare_c(VP8_COMP *cpi, int distance);
74 static void set_default_lf_deltas(VP8_COMP *cpi);
76 extern const int vp8_gf_interval_table[101];
78 #if CONFIG_INTERNAL_STATS
80 #include "vpx_dsp/ssim.h"
86 #ifdef OUTPUT_YUV_DENOISED
87 FILE *yuv_denoised_file;
97 extern int skip_true_count;
98 extern int skip_false_count;
101 #ifdef VP8_ENTROPY_STATS
102 extern int intra_mode_stats[10][10][10];
106 unsigned int frames_at_speed[16] = { 0, 0, 0, 0, 0, 0, 0, 0,
107 0, 0, 0, 0, 0, 0, 0, 0 };
108 unsigned int tot_pm = 0;
109 unsigned int cnt_pm = 0;
110 unsigned int tot_ef = 0;
111 unsigned int cnt_ef = 0;
115 extern unsigned __int64 Sectionbits[50];
116 extern int y_modes[5];
117 extern int uv_modes[4];
118 extern int b_modes[10];
120 extern int inter_y_modes[10];
121 extern int inter_uv_modes[4];
122 extern unsigned int inter_b_modes[15];
125 extern const int vp8_bits_per_mb[2][QINDEX_RANGE];
127 extern const int qrounding_factors[129];
128 extern const int qzbin_factors[129];
129 extern void vp8cx_init_quantizer(VP8_COMP *cpi);
130 extern const int vp8cx_base_skip_false_prob[128];
132 /* Tables relating active max Q to active min Q */
133 static const unsigned char kf_low_motion_minq[QINDEX_RANGE] = {
134 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
135 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
136 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
137 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 4, 4, 4, 5, 5, 5,
138 5, 5, 6, 6, 6, 6, 7, 7, 8, 8, 8, 8, 9, 9, 10, 10, 10, 10, 11,
139 11, 11, 11, 12, 12, 13, 13, 13, 13, 14, 14, 15, 15, 15, 15, 16, 16, 16, 16,
140 17, 17, 18, 18, 18, 18, 19, 20, 20, 21, 21, 22, 23, 23
142 static const unsigned char kf_high_motion_minq[QINDEX_RANGE] = {
143 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
144 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1,
145 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 5,
146 5, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 8, 8, 8, 8, 9, 9, 10, 10,
147 10, 10, 11, 11, 11, 11, 12, 12, 13, 13, 13, 13, 14, 14, 15, 15, 15, 15, 16,
148 16, 16, 16, 17, 17, 18, 18, 18, 18, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21,
149 22, 22, 23, 23, 24, 25, 25, 26, 26, 27, 28, 28, 29, 30
151 static const unsigned char gf_low_motion_minq[QINDEX_RANGE] = {
152 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3,
153 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8,
154 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15,
155 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24,
156 25, 25, 26, 26, 27, 27, 28, 28, 29, 29, 30, 30, 31, 31, 32, 32, 33, 33, 34,
157 34, 35, 35, 36, 36, 37, 37, 38, 38, 39, 39, 40, 40, 41, 41, 42, 42, 43, 44,
158 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58
160 static const unsigned char gf_mid_motion_minq[QINDEX_RANGE] = {
161 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 5,
162 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10, 10, 11,
163 11, 11, 12, 12, 12, 12, 13, 13, 13, 14, 14, 14, 15, 15, 16, 16, 17, 17, 18,
164 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24, 25, 25, 26, 26, 27, 27,
165 28, 28, 29, 29, 30, 30, 31, 31, 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 37,
166 37, 38, 39, 39, 40, 40, 41, 41, 42, 42, 43, 43, 44, 45, 46, 47, 48, 49, 50,
167 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
169 static const unsigned char gf_high_motion_minq[QINDEX_RANGE] = {
170 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5,
171 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10, 11, 11,
172 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21,
173 21, 22, 22, 23, 23, 24, 24, 25, 25, 26, 26, 27, 27, 28, 28, 29, 29, 30, 30,
174 31, 31, 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 37, 37, 38, 38, 39, 39, 40,
175 40, 41, 41, 42, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
176 57, 58, 59, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80
178 static const unsigned char inter_minq[QINDEX_RANGE] = {
179 0, 0, 1, 1, 2, 3, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 9, 10, 11,
180 11, 12, 13, 13, 14, 15, 15, 16, 17, 17, 18, 19, 20, 20, 21, 22, 22, 23, 24,
181 24, 25, 26, 27, 27, 28, 29, 30, 30, 31, 32, 33, 33, 34, 35, 36, 36, 37, 38,
182 39, 39, 40, 41, 42, 42, 43, 44, 45, 46, 46, 47, 48, 49, 50, 50, 51, 52, 53,
183 54, 55, 55, 56, 57, 58, 59, 60, 60, 61, 62, 63, 64, 65, 66, 67, 67, 68, 69,
184 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 86,
185 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100
188 #ifdef PACKET_TESTING
189 extern FILE *vpxlogc;
192 static void save_layer_context(VP8_COMP *cpi) {
193 LAYER_CONTEXT *lc = &cpi->layer_context[cpi->current_layer];
195 /* Save layer dependent coding state */
196 lc->target_bandwidth = cpi->target_bandwidth;
197 lc->starting_buffer_level = cpi->oxcf.starting_buffer_level;
198 lc->optimal_buffer_level = cpi->oxcf.optimal_buffer_level;
199 lc->maximum_buffer_size = cpi->oxcf.maximum_buffer_size;
200 lc->starting_buffer_level_in_ms = cpi->oxcf.starting_buffer_level_in_ms;
201 lc->optimal_buffer_level_in_ms = cpi->oxcf.optimal_buffer_level_in_ms;
202 lc->maximum_buffer_size_in_ms = cpi->oxcf.maximum_buffer_size_in_ms;
203 lc->buffer_level = cpi->buffer_level;
204 lc->bits_off_target = cpi->bits_off_target;
205 lc->total_actual_bits = cpi->total_actual_bits;
206 lc->worst_quality = cpi->worst_quality;
207 lc->active_worst_quality = cpi->active_worst_quality;
208 lc->best_quality = cpi->best_quality;
209 lc->active_best_quality = cpi->active_best_quality;
210 lc->ni_av_qi = cpi->ni_av_qi;
211 lc->ni_tot_qi = cpi->ni_tot_qi;
212 lc->ni_frames = cpi->ni_frames;
213 lc->avg_frame_qindex = cpi->avg_frame_qindex;
214 lc->rate_correction_factor = cpi->rate_correction_factor;
215 lc->key_frame_rate_correction_factor = cpi->key_frame_rate_correction_factor;
216 lc->gf_rate_correction_factor = cpi->gf_rate_correction_factor;
217 lc->zbin_over_quant = cpi->mb.zbin_over_quant;
218 lc->inter_frame_target = cpi->inter_frame_target;
219 lc->total_byte_count = cpi->total_byte_count;
220 lc->filter_level = cpi->common.filter_level;
222 lc->last_frame_percent_intra = cpi->last_frame_percent_intra;
224 memcpy(lc->count_mb_ref_frame_usage, cpi->mb.count_mb_ref_frame_usage,
225 sizeof(cpi->mb.count_mb_ref_frame_usage));
228 static void restore_layer_context(VP8_COMP *cpi, const int layer) {
229 LAYER_CONTEXT *lc = &cpi->layer_context[layer];
231 /* Restore layer dependent coding state */
232 cpi->current_layer = layer;
233 cpi->target_bandwidth = lc->target_bandwidth;
234 cpi->oxcf.target_bandwidth = lc->target_bandwidth;
235 cpi->oxcf.starting_buffer_level = lc->starting_buffer_level;
236 cpi->oxcf.optimal_buffer_level = lc->optimal_buffer_level;
237 cpi->oxcf.maximum_buffer_size = lc->maximum_buffer_size;
238 cpi->oxcf.starting_buffer_level_in_ms = lc->starting_buffer_level_in_ms;
239 cpi->oxcf.optimal_buffer_level_in_ms = lc->optimal_buffer_level_in_ms;
240 cpi->oxcf.maximum_buffer_size_in_ms = lc->maximum_buffer_size_in_ms;
241 cpi->buffer_level = lc->buffer_level;
242 cpi->bits_off_target = lc->bits_off_target;
243 cpi->total_actual_bits = lc->total_actual_bits;
244 cpi->active_worst_quality = lc->active_worst_quality;
245 cpi->active_best_quality = lc->active_best_quality;
246 cpi->ni_av_qi = lc->ni_av_qi;
247 cpi->ni_tot_qi = lc->ni_tot_qi;
248 cpi->ni_frames = lc->ni_frames;
249 cpi->avg_frame_qindex = lc->avg_frame_qindex;
250 cpi->rate_correction_factor = lc->rate_correction_factor;
251 cpi->key_frame_rate_correction_factor = lc->key_frame_rate_correction_factor;
252 cpi->gf_rate_correction_factor = lc->gf_rate_correction_factor;
253 cpi->mb.zbin_over_quant = lc->zbin_over_quant;
254 cpi->inter_frame_target = lc->inter_frame_target;
255 cpi->total_byte_count = lc->total_byte_count;
256 cpi->common.filter_level = lc->filter_level;
258 cpi->last_frame_percent_intra = lc->last_frame_percent_intra;
260 memcpy(cpi->mb.count_mb_ref_frame_usage, lc->count_mb_ref_frame_usage,
261 sizeof(cpi->mb.count_mb_ref_frame_usage));
264 static int rescale(int val, int num, int denom) {
266 int64_t llden = denom;
269 return (int)(llval * llnum / llden);
272 static void init_temporal_layer_context(VP8_COMP *cpi, VP8_CONFIG *oxcf,
274 double prev_layer_framerate) {
275 LAYER_CONTEXT *lc = &cpi->layer_context[layer];
277 lc->framerate = cpi->output_framerate / cpi->oxcf.rate_decimator[layer];
278 lc->target_bandwidth = cpi->oxcf.target_bitrate[layer] * 1000;
280 lc->starting_buffer_level_in_ms = oxcf->starting_buffer_level;
281 lc->optimal_buffer_level_in_ms = oxcf->optimal_buffer_level;
282 lc->maximum_buffer_size_in_ms = oxcf->maximum_buffer_size;
284 lc->starting_buffer_level =
285 rescale((int)(oxcf->starting_buffer_level), lc->target_bandwidth, 1000);
287 if (oxcf->optimal_buffer_level == 0) {
288 lc->optimal_buffer_level = lc->target_bandwidth / 8;
290 lc->optimal_buffer_level =
291 rescale((int)(oxcf->optimal_buffer_level), lc->target_bandwidth, 1000);
294 if (oxcf->maximum_buffer_size == 0) {
295 lc->maximum_buffer_size = lc->target_bandwidth / 8;
297 lc->maximum_buffer_size =
298 rescale((int)(oxcf->maximum_buffer_size), lc->target_bandwidth, 1000);
301 /* Work out the average size of a frame within this layer */
303 lc->avg_frame_size_for_layer =
304 (int)((cpi->oxcf.target_bitrate[layer] -
305 cpi->oxcf.target_bitrate[layer - 1]) *
306 1000 / (lc->framerate - prev_layer_framerate));
309 lc->active_worst_quality = cpi->oxcf.worst_allowed_q;
310 lc->active_best_quality = cpi->oxcf.best_allowed_q;
311 lc->avg_frame_qindex = cpi->oxcf.worst_allowed_q;
313 lc->buffer_level = lc->starting_buffer_level;
314 lc->bits_off_target = lc->starting_buffer_level;
316 lc->total_actual_bits = 0;
320 lc->rate_correction_factor = 1.0;
321 lc->key_frame_rate_correction_factor = 1.0;
322 lc->gf_rate_correction_factor = 1.0;
323 lc->inter_frame_target = 0;
326 // Upon a run-time change in temporal layers, reset the layer context parameters
327 // for any "new" layers. For "existing" layers, let them inherit the parameters
328 // from the previous layer state (at the same layer #). In future we may want
329 // to better map the previous layer state(s) to the "new" ones.
330 static void reset_temporal_layer_change(VP8_COMP *cpi, VP8_CONFIG *oxcf,
331 const int prev_num_layers) {
333 double prev_layer_framerate = 0;
334 const int curr_num_layers = cpi->oxcf.number_of_layers;
335 // If the previous state was 1 layer, get current layer context from cpi.
336 // We need this to set the layer context for the new layers below.
337 if (prev_num_layers == 1) {
338 cpi->current_layer = 0;
339 save_layer_context(cpi);
341 for (i = 0; i < curr_num_layers; ++i) {
342 LAYER_CONTEXT *lc = &cpi->layer_context[i];
343 if (i >= prev_num_layers) {
344 init_temporal_layer_context(cpi, oxcf, i, prev_layer_framerate);
346 // The initial buffer levels are set based on their starting levels.
347 // We could set the buffer levels based on the previous state (normalized
348 // properly by the layer bandwidths) but we would need to keep track of
349 // the previous set of layer bandwidths (i.e., target_bitrate[i])
350 // before the layer change. For now, reset to the starting levels.
352 cpi->oxcf.starting_buffer_level_in_ms * cpi->oxcf.target_bitrate[i];
353 lc->bits_off_target = lc->buffer_level;
354 // TDOD(marpan): Should we set the rate_correction_factor and
355 // active_worst/best_quality to values derived from the previous layer
356 // state (to smooth-out quality dips/rate fluctuation at transition)?
358 // We need to treat the 1 layer case separately: oxcf.target_bitrate[i]
359 // is not set for 1 layer, and the restore_layer_context/save_context()
360 // are not called in the encoding loop, so we need to call it here to
361 // pass the layer context state to |cpi|.
362 if (curr_num_layers == 1) {
363 lc->target_bandwidth = cpi->oxcf.target_bandwidth;
365 cpi->oxcf.starting_buffer_level_in_ms * lc->target_bandwidth / 1000;
366 lc->bits_off_target = lc->buffer_level;
367 restore_layer_context(cpi, 0);
369 prev_layer_framerate = cpi->output_framerate / cpi->oxcf.rate_decimator[i];
373 static void setup_features(VP8_COMP *cpi) {
374 // If segmentation enabled set the update flags
375 if (cpi->mb.e_mbd.segmentation_enabled) {
376 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
377 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
379 cpi->mb.e_mbd.update_mb_segmentation_map = 0;
380 cpi->mb.e_mbd.update_mb_segmentation_data = 0;
383 cpi->mb.e_mbd.mode_ref_lf_delta_enabled = 0;
384 cpi->mb.e_mbd.mode_ref_lf_delta_update = 0;
385 memset(cpi->mb.e_mbd.ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas));
386 memset(cpi->mb.e_mbd.mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas));
387 memset(cpi->mb.e_mbd.last_ref_lf_deltas, 0,
388 sizeof(cpi->mb.e_mbd.ref_lf_deltas));
389 memset(cpi->mb.e_mbd.last_mode_lf_deltas, 0,
390 sizeof(cpi->mb.e_mbd.mode_lf_deltas));
392 set_default_lf_deltas(cpi);
395 static void dealloc_raw_frame_buffers(VP8_COMP *cpi);
397 void vp8_initialize_enc(void) {
398 static volatile int init_done = 0;
402 vp8_init_intra_predictors();
407 static void dealloc_compressor_data(VP8_COMP *cpi) {
408 vpx_free(cpi->tplist);
411 /* Delete last frame MV storage buffers */
415 vpx_free(cpi->lf_ref_frame_sign_bias);
416 cpi->lf_ref_frame_sign_bias = 0;
418 vpx_free(cpi->lf_ref_frame);
419 cpi->lf_ref_frame = 0;
421 /* Delete sementation map */
422 vpx_free(cpi->segmentation_map);
423 cpi->segmentation_map = 0;
425 vpx_free(cpi->active_map);
428 vp8_de_alloc_frame_buffers(&cpi->common);
430 vp8_yv12_de_alloc_frame_buffer(&cpi->pick_lf_lvl_frame);
431 vp8_yv12_de_alloc_frame_buffer(&cpi->scaled_source);
432 dealloc_raw_frame_buffers(cpi);
437 /* Structure used to monitor GF usage */
438 vpx_free(cpi->gf_active_flags);
439 cpi->gf_active_flags = 0;
441 /* Activity mask based per mb zbin adjustments */
442 vpx_free(cpi->mb_activity_map);
443 cpi->mb_activity_map = 0;
445 vpx_free(cpi->mb.pip);
448 #if CONFIG_MULTITHREAD
449 vpx_free(cpi->mt_current_mb_col);
450 cpi->mt_current_mb_col = NULL;
454 static void enable_segmentation(VP8_COMP *cpi) {
455 /* Set the appropriate feature bit */
456 cpi->mb.e_mbd.segmentation_enabled = 1;
457 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
458 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
460 static void disable_segmentation(VP8_COMP *cpi) {
461 /* Clear the appropriate feature bit */
462 cpi->mb.e_mbd.segmentation_enabled = 0;
465 /* Valid values for a segment are 0 to 3
466 * Segmentation map is arrange as [Rows][Columns]
468 static void set_segmentation_map(VP8_COMP *cpi,
469 unsigned char *segmentation_map) {
470 /* Copy in the new segmentation map */
471 memcpy(cpi->segmentation_map, segmentation_map,
472 (cpi->common.mb_rows * cpi->common.mb_cols));
474 /* Signal that the map should be updated. */
475 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
476 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
479 /* The values given for each segment can be either deltas (from the default
480 * value chosen for the frame) or absolute values.
482 * Valid range for abs values is:
483 * (0-127 for MB_LVL_ALT_Q), (0-63 for SEGMENT_ALT_LF)
484 * Valid range for delta values are:
485 * (+/-127 for MB_LVL_ALT_Q), (+/-63 for SEGMENT_ALT_LF)
487 * abs_delta = SEGMENT_DELTADATA (deltas)
488 * abs_delta = SEGMENT_ABSDATA (use the absolute values given).
491 static void set_segment_data(VP8_COMP *cpi, signed char *feature_data,
492 unsigned char abs_delta) {
493 cpi->mb.e_mbd.mb_segement_abs_delta = abs_delta;
494 memcpy(cpi->segment_feature_data, feature_data,
495 sizeof(cpi->segment_feature_data));
498 /* A simple function to cyclically refresh the background at a lower Q */
499 static void cyclic_background_refresh(VP8_COMP *cpi, int Q, int lf_adjustment) {
500 unsigned char *seg_map = cpi->segmentation_map;
501 signed char feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS];
503 int block_count = cpi->cyclic_refresh_mode_max_mbs_perframe;
504 int mbs_in_frame = cpi->common.mb_rows * cpi->common.mb_cols;
506 cpi->cyclic_refresh_q = Q / 2;
508 if (cpi->oxcf.screen_content_mode) {
509 // Modify quality ramp-up based on Q. Above some Q level, increase the
510 // number of blocks to be refreshed, and reduce it below the thredhold.
511 // Turn-off under certain conditions (i.e., away from key frame, and if
512 // we are at good quality (low Q) and most of the blocks were
514 // in previous frame.
515 int qp_thresh = (cpi->oxcf.screen_content_mode == 2) ? 80 : 100;
516 if (Q >= qp_thresh) {
517 cpi->cyclic_refresh_mode_max_mbs_perframe =
518 (cpi->common.mb_rows * cpi->common.mb_cols) / 10;
519 } else if (cpi->frames_since_key > 250 && Q < 20 &&
520 cpi->mb.skip_true_count > (int)(0.95 * mbs_in_frame)) {
521 cpi->cyclic_refresh_mode_max_mbs_perframe = 0;
523 cpi->cyclic_refresh_mode_max_mbs_perframe =
524 (cpi->common.mb_rows * cpi->common.mb_cols) / 20;
526 block_count = cpi->cyclic_refresh_mode_max_mbs_perframe;
529 // Set every macroblock to be eligible for update.
530 // For key frame this will reset seg map to 0.
531 memset(cpi->segmentation_map, 0, mbs_in_frame);
533 if (cpi->common.frame_type != KEY_FRAME && block_count > 0) {
534 /* Cycle through the macro_block rows */
535 /* MB loop to set local segmentation map */
536 i = cpi->cyclic_refresh_mode_index;
537 assert(i < mbs_in_frame);
539 /* If the MB is as a candidate for clean up then mark it for
540 * possible boost/refresh (segment 1) The segment id may get
541 * reset to 0 later if the MB gets coded anything other than
542 * last frame 0,0 as only (last frame 0,0) MBs are eligable for
543 * refresh : that is to say Mbs likely to be background blocks.
545 if (cpi->cyclic_refresh_map[i] == 0) {
548 } else if (cpi->cyclic_refresh_map[i] < 0) {
549 cpi->cyclic_refresh_map[i]++;
553 if (i == mbs_in_frame) i = 0;
555 } while (block_count && i != cpi->cyclic_refresh_mode_index);
557 cpi->cyclic_refresh_mode_index = i;
559 #if CONFIG_TEMPORAL_DENOISING
560 if (cpi->oxcf.noise_sensitivity > 0) {
561 if (cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive &&
562 Q < (int)cpi->denoiser.denoise_pars.qp_thresh &&
563 (cpi->frames_since_key >
564 2 * cpi->denoiser.denoise_pars.consec_zerolast)) {
565 // Under aggressive denoising, use segmentation to turn off loop
566 // filter below some qp thresh. The filter is reduced for all
567 // blocks that have been encoded as ZEROMV LAST x frames in a row,
568 // where x is set by cpi->denoiser.denoise_pars.consec_zerolast.
569 // This is to avoid "dot" artifacts that can occur from repeated
570 // loop filtering on noisy input source.
571 cpi->cyclic_refresh_q = Q;
572 // lf_adjustment = -MAX_LOOP_FILTER;
574 for (i = 0; i < mbs_in_frame; ++i) {
575 seg_map[i] = (cpi->consec_zero_last[i] >
576 cpi->denoiser.denoise_pars.consec_zerolast)
585 /* Activate segmentation. */
586 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
587 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
588 enable_segmentation(cpi);
590 /* Set up the quant segment data */
591 feature_data[MB_LVL_ALT_Q][0] = 0;
592 feature_data[MB_LVL_ALT_Q][1] = (cpi->cyclic_refresh_q - Q);
593 feature_data[MB_LVL_ALT_Q][2] = 0;
594 feature_data[MB_LVL_ALT_Q][3] = 0;
596 /* Set up the loop segment data */
597 feature_data[MB_LVL_ALT_LF][0] = 0;
598 feature_data[MB_LVL_ALT_LF][1] = lf_adjustment;
599 feature_data[MB_LVL_ALT_LF][2] = 0;
600 feature_data[MB_LVL_ALT_LF][3] = 0;
602 /* Initialise the feature data structure */
603 set_segment_data(cpi, &feature_data[0][0], SEGMENT_DELTADATA);
606 static void set_default_lf_deltas(VP8_COMP *cpi) {
607 cpi->mb.e_mbd.mode_ref_lf_delta_enabled = 1;
608 cpi->mb.e_mbd.mode_ref_lf_delta_update = 1;
610 memset(cpi->mb.e_mbd.ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas));
611 memset(cpi->mb.e_mbd.mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas));
613 /* Test of ref frame deltas */
614 cpi->mb.e_mbd.ref_lf_deltas[INTRA_FRAME] = 2;
615 cpi->mb.e_mbd.ref_lf_deltas[LAST_FRAME] = 0;
616 cpi->mb.e_mbd.ref_lf_deltas[GOLDEN_FRAME] = -2;
617 cpi->mb.e_mbd.ref_lf_deltas[ALTREF_FRAME] = -2;
619 cpi->mb.e_mbd.mode_lf_deltas[0] = 4; /* BPRED */
621 if (cpi->oxcf.Mode == MODE_REALTIME) {
622 cpi->mb.e_mbd.mode_lf_deltas[1] = -12; /* Zero */
624 cpi->mb.e_mbd.mode_lf_deltas[1] = -2; /* Zero */
627 cpi->mb.e_mbd.mode_lf_deltas[2] = 2; /* New mv */
628 cpi->mb.e_mbd.mode_lf_deltas[3] = 4; /* Split mv */
631 /* Convenience macros for mapping speed and mode into a continuous
634 #define GOOD(x) (x + 1)
635 #define RT(x) (x + 7)
637 static int speed_map(int speed, const int *map) {
642 } while (speed >= *map++);
646 static const int thresh_mult_map_znn[] = {
647 /* map common to zero, nearest, and near */
648 0, GOOD(2), 1500, GOOD(3), 2000, RT(0), 1000, RT(2), 2000, INT_MAX
651 static const int thresh_mult_map_vhpred[] = { 1000, GOOD(2), 1500, GOOD(3),
652 2000, RT(0), 1000, RT(1),
653 2000, RT(7), INT_MAX, INT_MAX };
655 static const int thresh_mult_map_bpred[] = { 2000, GOOD(0), 2500, GOOD(2),
656 5000, GOOD(3), 7500, RT(0),
657 2500, RT(1), 5000, RT(6),
660 static const int thresh_mult_map_tm[] = { 1000, GOOD(2), 1500, GOOD(3),
661 2000, RT(0), 0, RT(1),
662 1000, RT(2), 2000, RT(7),
665 static const int thresh_mult_map_new1[] = { 1000, GOOD(2), 2000,
666 RT(0), 2000, INT_MAX };
668 static const int thresh_mult_map_new2[] = { 1000, GOOD(2), 2000, GOOD(3),
669 2500, GOOD(5), 4000, RT(0),
670 2000, RT(2), 2500, RT(5),
673 static const int thresh_mult_map_split1[] = {
674 2500, GOOD(0), 1700, GOOD(2), 10000, GOOD(3), 25000, GOOD(4), INT_MAX,
675 RT(0), 5000, RT(1), 10000, RT(2), 25000, RT(3), INT_MAX, INT_MAX
678 static const int thresh_mult_map_split2[] = {
679 5000, GOOD(0), 4500, GOOD(2), 20000, GOOD(3), 50000, GOOD(4), INT_MAX,
680 RT(0), 10000, RT(1), 20000, RT(2), 50000, RT(3), INT_MAX, INT_MAX
683 static const int mode_check_freq_map_zn2[] = {
684 /* {zero,nearest}{2,3} */
685 0, RT(10), 1 << 1, RT(11), 1 << 2, RT(12), 1 << 3, INT_MAX
688 static const int mode_check_freq_map_vhbpred[] = {
689 0, GOOD(5), 2, RT(0), 0, RT(3), 2, RT(5), 4, INT_MAX
692 static const int mode_check_freq_map_near2[] = {
693 0, GOOD(5), 2, RT(0), 0, RT(3), 2,
694 RT(10), 1 << 2, RT(11), 1 << 3, RT(12), 1 << 4, INT_MAX
697 static const int mode_check_freq_map_new1[] = {
698 0, RT(10), 1 << 1, RT(11), 1 << 2, RT(12), 1 << 3, INT_MAX
701 static const int mode_check_freq_map_new2[] = { 0, GOOD(5), 4, RT(0),
703 1 << 3, RT(11), 1 << 4, RT(12),
706 static const int mode_check_freq_map_split1[] = {
707 0, GOOD(2), 2, GOOD(3), 7, RT(1), 2, RT(2), 7, INT_MAX
710 static const int mode_check_freq_map_split2[] = {
711 0, GOOD(1), 2, GOOD(2), 4, GOOD(3), 15, RT(1), 4, RT(2), 15, INT_MAX
714 void vp8_set_speed_features(VP8_COMP *cpi) {
715 SPEED_FEATURES *sf = &cpi->sf;
716 int Mode = cpi->compressor_speed;
717 int Speed = cpi->Speed;
719 VP8_COMMON *cm = &cpi->common;
720 int last_improved_quant = sf->improved_quant;
723 /* Initialise default mode frequency sampling variables */
724 for (i = 0; i < MAX_MODES; ++i) {
725 cpi->mode_check_freq[i] = 0;
728 cpi->mb.mbs_tested_so_far = 0;
729 cpi->mb.mbs_zero_last_dot_suppress = 0;
731 /* best quality defaults */
733 sf->search_method = NSTEP;
734 sf->improved_quant = 1;
735 sf->improved_dct = 1;
738 sf->quarter_pixel_search = 1;
739 sf->half_pixel_search = 1;
740 sf->iterative_sub_pixel = 1;
741 sf->optimize_coefficients = 1;
742 sf->use_fastquant_for_pick = 0;
743 sf->no_skip_block4x4_search = 1;
746 sf->max_step_search_steps = MAX_MVSEARCH_STEPS;
747 sf->improved_mv_pred = 1;
749 /* default thresholds to 0 */
750 for (i = 0; i < MAX_MODES; ++i) sf->thresh_mult[i] = 0;
752 /* Count enabled references */
754 if (cpi->ref_frame_flags & VP8_LAST_FRAME) ref_frames++;
755 if (cpi->ref_frame_flags & VP8_GOLD_FRAME) ref_frames++;
756 if (cpi->ref_frame_flags & VP8_ALTR_FRAME) ref_frames++;
758 /* Convert speed to continuous range, with clamping */
761 } else if (Mode == 2) {
764 if (Speed > 5) Speed = 5;
768 sf->thresh_mult[THR_ZERO1] = sf->thresh_mult[THR_NEAREST1] =
769 sf->thresh_mult[THR_NEAR1] = sf->thresh_mult[THR_DC] = 0; /* always */
771 sf->thresh_mult[THR_ZERO2] = sf->thresh_mult[THR_ZERO3] =
772 sf->thresh_mult[THR_NEAREST2] = sf->thresh_mult[THR_NEAREST3] =
773 sf->thresh_mult[THR_NEAR2] = sf->thresh_mult[THR_NEAR3] =
774 speed_map(Speed, thresh_mult_map_znn);
776 sf->thresh_mult[THR_V_PRED] = sf->thresh_mult[THR_H_PRED] =
777 speed_map(Speed, thresh_mult_map_vhpred);
778 sf->thresh_mult[THR_B_PRED] = speed_map(Speed, thresh_mult_map_bpred);
779 sf->thresh_mult[THR_TM] = speed_map(Speed, thresh_mult_map_tm);
780 sf->thresh_mult[THR_NEW1] = speed_map(Speed, thresh_mult_map_new1);
781 sf->thresh_mult[THR_NEW2] = sf->thresh_mult[THR_NEW3] =
782 speed_map(Speed, thresh_mult_map_new2);
783 sf->thresh_mult[THR_SPLIT1] = speed_map(Speed, thresh_mult_map_split1);
784 sf->thresh_mult[THR_SPLIT2] = sf->thresh_mult[THR_SPLIT3] =
785 speed_map(Speed, thresh_mult_map_split2);
787 // Special case for temporal layers.
788 // Reduce the thresholds for zero/nearest/near for GOLDEN, if GOLDEN is
789 // used as second reference. We don't modify thresholds for ALTREF case
790 // since ALTREF is usually used as long-term reference in temporal layers.
791 if ((cpi->Speed <= 6) && (cpi->oxcf.number_of_layers > 1) &&
792 (cpi->ref_frame_flags & VP8_LAST_FRAME) &&
793 (cpi->ref_frame_flags & VP8_GOLD_FRAME)) {
794 if (cpi->closest_reference_frame == GOLDEN_FRAME) {
795 sf->thresh_mult[THR_ZERO2] = sf->thresh_mult[THR_ZERO2] >> 3;
796 sf->thresh_mult[THR_NEAREST2] = sf->thresh_mult[THR_NEAREST2] >> 3;
797 sf->thresh_mult[THR_NEAR2] = sf->thresh_mult[THR_NEAR2] >> 3;
799 sf->thresh_mult[THR_ZERO2] = sf->thresh_mult[THR_ZERO2] >> 1;
800 sf->thresh_mult[THR_NEAREST2] = sf->thresh_mult[THR_NEAREST2] >> 1;
801 sf->thresh_mult[THR_NEAR2] = sf->thresh_mult[THR_NEAR2] >> 1;
805 cpi->mode_check_freq[THR_ZERO1] = cpi->mode_check_freq[THR_NEAREST1] =
806 cpi->mode_check_freq[THR_NEAR1] = cpi->mode_check_freq[THR_TM] =
807 cpi->mode_check_freq[THR_DC] = 0; /* always */
809 cpi->mode_check_freq[THR_ZERO2] = cpi->mode_check_freq[THR_ZERO3] =
810 cpi->mode_check_freq[THR_NEAREST2] = cpi->mode_check_freq[THR_NEAREST3] =
811 speed_map(Speed, mode_check_freq_map_zn2);
813 cpi->mode_check_freq[THR_NEAR2] = cpi->mode_check_freq[THR_NEAR3] =
814 speed_map(Speed, mode_check_freq_map_near2);
816 cpi->mode_check_freq[THR_V_PRED] = cpi->mode_check_freq[THR_H_PRED] =
817 cpi->mode_check_freq[THR_B_PRED] =
818 speed_map(Speed, mode_check_freq_map_vhbpred);
819 cpi->mode_check_freq[THR_NEW1] = speed_map(Speed, mode_check_freq_map_new1);
820 cpi->mode_check_freq[THR_NEW2] = cpi->mode_check_freq[THR_NEW3] =
821 speed_map(Speed, mode_check_freq_map_new2);
822 cpi->mode_check_freq[THR_SPLIT1] =
823 speed_map(Speed, mode_check_freq_map_split1);
824 cpi->mode_check_freq[THR_SPLIT2] = cpi->mode_check_freq[THR_SPLIT3] =
825 speed_map(Speed, mode_check_freq_map_split2);
828 #if !CONFIG_REALTIME_ONLY
829 case 0: /* best quality mode */
831 sf->max_step_search_steps = MAX_MVSEARCH_STEPS;
836 /* Disable coefficient optimization above speed 0 */
837 sf->optimize_coefficients = 0;
838 sf->use_fastquant_for_pick = 1;
839 sf->no_skip_block4x4_search = 0;
845 sf->improved_quant = 0;
846 sf->improved_dct = 0;
848 /* Only do recode loop on key frames, golden frames and
856 sf->recode_loop = 0; /* recode loop off */
857 sf->RD = 0; /* Turn rd off */
861 sf->auto_filter = 0; /* Faster selection of loop filter */
867 sf->optimize_coefficients = 0;
870 sf->iterative_sub_pixel = 1;
871 sf->search_method = NSTEP;
874 sf->improved_quant = 0;
875 sf->improved_dct = 0;
877 sf->use_fastquant_for_pick = 1;
878 sf->no_skip_block4x4_search = 0;
882 if (Speed > 2) sf->auto_filter = 0; /* Faster selection of loop filter */
890 sf->auto_filter = 0; /* Faster selection of loop filter */
891 sf->search_method = HEX;
892 sf->iterative_sub_pixel = 0;
896 unsigned int sum = 0;
897 unsigned int total_mbs = cm->MBs;
899 unsigned int total_skip;
903 if (cpi->oxcf.encode_breakout > 2000) min = cpi->oxcf.encode_breakout;
907 for (i = 0; i < min; ++i) {
908 sum += cpi->mb.error_bins[i];
914 /* i starts from 2 to make sure thresh started from 2048 */
915 for (; i < 1024; ++i) {
916 sum += cpi->mb.error_bins[i];
919 (unsigned int)(cpi->Speed - 6) * (total_mbs - total_skip)) {
927 if (thresh < 2000) thresh = 2000;
929 if (ref_frames > 1) {
930 sf->thresh_mult[THR_NEW1] = thresh;
931 sf->thresh_mult[THR_NEAREST1] = thresh >> 1;
932 sf->thresh_mult[THR_NEAR1] = thresh >> 1;
935 if (ref_frames > 2) {
936 sf->thresh_mult[THR_NEW2] = thresh << 1;
937 sf->thresh_mult[THR_NEAREST2] = thresh;
938 sf->thresh_mult[THR_NEAR2] = thresh;
941 if (ref_frames > 3) {
942 sf->thresh_mult[THR_NEW3] = thresh << 1;
943 sf->thresh_mult[THR_NEAREST3] = thresh;
944 sf->thresh_mult[THR_NEAR3] = thresh;
947 sf->improved_mv_pred = 0;
950 if (Speed > 8) sf->quarter_pixel_search = 0;
952 if (cm->version == 0) {
953 cm->filter_type = NORMAL_LOOPFILTER;
955 if (Speed >= 14) cm->filter_type = SIMPLE_LOOPFILTER;
957 cm->filter_type = SIMPLE_LOOPFILTER;
960 /* This has a big hit on quality. Last resort */
961 if (Speed >= 15) sf->half_pixel_search = 0;
963 memset(cpi->mb.error_bins, 0, sizeof(cpi->mb.error_bins));
967 /* Slow quant, dct and trellis not worthwhile for first pass
968 * so make sure they are always turned off.
970 if (cpi->pass == 1) {
971 sf->improved_quant = 0;
972 sf->optimize_coefficients = 0;
973 sf->improved_dct = 0;
976 if (cpi->sf.search_method == NSTEP) {
977 vp8_init3smotion_compensation(&cpi->mb,
978 cm->yv12_fb[cm->lst_fb_idx].y_stride);
979 } else if (cpi->sf.search_method == DIAMOND) {
980 vp8_init_dsmotion_compensation(&cpi->mb,
981 cm->yv12_fb[cm->lst_fb_idx].y_stride);
984 if (cpi->sf.improved_dct) {
985 cpi->mb.short_fdct8x4 = vp8_short_fdct8x4;
986 cpi->mb.short_fdct4x4 = vp8_short_fdct4x4;
988 /* No fast FDCT defined for any platform at this time. */
989 cpi->mb.short_fdct8x4 = vp8_short_fdct8x4;
990 cpi->mb.short_fdct4x4 = vp8_short_fdct4x4;
993 cpi->mb.short_walsh4x4 = vp8_short_walsh4x4;
995 if (cpi->sf.improved_quant) {
996 cpi->mb.quantize_b = vp8_regular_quantize_b;
998 cpi->mb.quantize_b = vp8_fast_quantize_b;
1000 if (cpi->sf.improved_quant != last_improved_quant) vp8cx_init_quantizer(cpi);
1002 if (cpi->sf.iterative_sub_pixel == 1) {
1003 cpi->find_fractional_mv_step = vp8_find_best_sub_pixel_step_iteratively;
1004 } else if (cpi->sf.quarter_pixel_search) {
1005 cpi->find_fractional_mv_step = vp8_find_best_sub_pixel_step;
1006 } else if (cpi->sf.half_pixel_search) {
1007 cpi->find_fractional_mv_step = vp8_find_best_half_pixel_step;
1009 cpi->find_fractional_mv_step = vp8_skip_fractional_mv_step;
1012 if (cpi->sf.optimize_coefficients == 1 && cpi->pass != 1) {
1013 cpi->mb.optimize = 1;
1015 cpi->mb.optimize = 0;
1018 if (cpi->common.full_pixel) {
1019 cpi->find_fractional_mv_step = vp8_skip_fractional_mv_step;
1023 frames_at_speed[cpi->Speed]++;
1029 static void alloc_raw_frame_buffers(VP8_COMP *cpi) {
1030 #if VP8_TEMPORAL_ALT_REF
1031 int width = (cpi->oxcf.Width + 15) & ~15;
1032 int height = (cpi->oxcf.Height + 15) & ~15;
1035 cpi->lookahead = vp8_lookahead_init(cpi->oxcf.Width, cpi->oxcf.Height,
1036 cpi->oxcf.lag_in_frames);
1037 if (!cpi->lookahead) {
1038 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1039 "Failed to allocate lag buffers");
1042 #if VP8_TEMPORAL_ALT_REF
1044 if (vp8_yv12_alloc_frame_buffer(&cpi->alt_ref_buffer, width, height,
1045 VP8BORDERINPIXELS)) {
1046 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1047 "Failed to allocate altref buffer");
1053 static void dealloc_raw_frame_buffers(VP8_COMP *cpi) {
1054 #if VP8_TEMPORAL_ALT_REF
1055 vp8_yv12_de_alloc_frame_buffer(&cpi->alt_ref_buffer);
1057 vp8_lookahead_destroy(cpi->lookahead);
1060 static int vp8_alloc_partition_data(VP8_COMP *cpi) {
1061 vpx_free(cpi->mb.pip);
1064 vpx_calloc((cpi->common.mb_cols + 1) * (cpi->common.mb_rows + 1),
1065 sizeof(PARTITION_INFO));
1066 if (!cpi->mb.pip) return 1;
1068 cpi->mb.pi = cpi->mb.pip + cpi->common.mode_info_stride + 1;
1073 void vp8_alloc_compressor_data(VP8_COMP *cpi) {
1074 VP8_COMMON *cm = &cpi->common;
1076 int width = cm->Width;
1077 int height = cm->Height;
1079 if (vp8_alloc_frame_buffers(cm, width, height)) {
1080 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1081 "Failed to allocate frame buffers");
1084 if (vp8_alloc_partition_data(cpi)) {
1085 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1086 "Failed to allocate partition data");
1089 if ((width & 0xf) != 0) width += 16 - (width & 0xf);
1091 if ((height & 0xf) != 0) height += 16 - (height & 0xf);
1093 if (vp8_yv12_alloc_frame_buffer(&cpi->pick_lf_lvl_frame, width, height,
1094 VP8BORDERINPIXELS)) {
1095 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1096 "Failed to allocate last frame buffer");
1099 if (vp8_yv12_alloc_frame_buffer(&cpi->scaled_source, width, height,
1100 VP8BORDERINPIXELS)) {
1101 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1102 "Failed to allocate scaled source buffer");
1108 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
1109 unsigned int tokens = 8 * 24 * 16; /* one MB for each thread */
1111 unsigned int tokens = cm->mb_rows * cm->mb_cols * 24 * 16;
1113 CHECK_MEM_ERROR(cpi->tok, vpx_calloc(tokens, sizeof(*cpi->tok)));
1116 /* Data used for real time vc mode to see if gf needs refreshing */
1117 cpi->zeromv_count = 0;
1119 /* Structures used to monitor GF usage */
1120 vpx_free(cpi->gf_active_flags);
1122 cpi->gf_active_flags,
1123 vpx_calloc(sizeof(*cpi->gf_active_flags), cm->mb_rows * cm->mb_cols));
1124 cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
1126 vpx_free(cpi->mb_activity_map);
1128 cpi->mb_activity_map,
1129 vpx_calloc(sizeof(*cpi->mb_activity_map), cm->mb_rows * cm->mb_cols));
1131 /* allocate memory for storing last frame's MVs for MV prediction. */
1132 vpx_free(cpi->lfmv);
1133 CHECK_MEM_ERROR(cpi->lfmv, vpx_calloc((cm->mb_rows + 2) * (cm->mb_cols + 2),
1134 sizeof(*cpi->lfmv)));
1135 vpx_free(cpi->lf_ref_frame_sign_bias);
1136 CHECK_MEM_ERROR(cpi->lf_ref_frame_sign_bias,
1137 vpx_calloc((cm->mb_rows + 2) * (cm->mb_cols + 2),
1138 sizeof(*cpi->lf_ref_frame_sign_bias)));
1139 vpx_free(cpi->lf_ref_frame);
1140 CHECK_MEM_ERROR(cpi->lf_ref_frame,
1141 vpx_calloc((cm->mb_rows + 2) * (cm->mb_cols + 2),
1142 sizeof(*cpi->lf_ref_frame)));
1144 /* Create the encoder segmentation map and set all entries to 0 */
1145 vpx_free(cpi->segmentation_map);
1147 cpi->segmentation_map,
1148 vpx_calloc(cm->mb_rows * cm->mb_cols, sizeof(*cpi->segmentation_map)));
1149 cpi->cyclic_refresh_mode_index = 0;
1150 vpx_free(cpi->active_map);
1151 CHECK_MEM_ERROR(cpi->active_map, vpx_calloc(cm->mb_rows * cm->mb_cols,
1152 sizeof(*cpi->active_map)));
1153 memset(cpi->active_map, 1, (cm->mb_rows * cm->mb_cols));
1155 #if CONFIG_MULTITHREAD
1157 cpi->mt_sync_range = 1;
1158 } else if (width <= 1280) {
1159 cpi->mt_sync_range = 4;
1160 } else if (width <= 2560) {
1161 cpi->mt_sync_range = 8;
1163 cpi->mt_sync_range = 16;
1166 if (cpi->oxcf.multi_threaded > 1) {
1167 vpx_free(cpi->mt_current_mb_col);
1168 CHECK_MEM_ERROR(cpi->mt_current_mb_col,
1169 vpx_malloc(sizeof(*cpi->mt_current_mb_col) * cm->mb_rows));
1174 vpx_free(cpi->tplist);
1175 CHECK_MEM_ERROR(cpi->tplist, vpx_malloc(sizeof(TOKENLIST) * cm->mb_rows));
1177 #if CONFIG_TEMPORAL_DENOISING
1178 if (cpi->oxcf.noise_sensitivity > 0) {
1179 vp8_denoiser_free(&cpi->denoiser);
1180 if (vp8_denoiser_allocate(&cpi->denoiser, width, height, cm->mb_rows,
1181 cm->mb_cols, cpi->oxcf.noise_sensitivity)) {
1182 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1183 "Failed to allocate denoiser");
1190 static const int q_trans[] = {
1191 0, 1, 2, 3, 4, 5, 7, 8, 9, 10, 12, 13, 15, 17, 18, 19,
1192 20, 21, 23, 24, 25, 26, 27, 28, 29, 30, 31, 33, 35, 37, 39, 41,
1193 43, 45, 47, 49, 51, 53, 55, 57, 59, 61, 64, 67, 70, 73, 76, 79,
1194 82, 85, 88, 91, 94, 97, 100, 103, 106, 109, 112, 115, 118, 121, 124, 127,
1197 int vp8_reverse_trans(int x) {
1200 for (i = 0; i < 64; ++i) {
1201 if (q_trans[i] >= x) return i;
1206 void vp8_new_framerate(VP8_COMP *cpi, double framerate) {
1207 if (framerate < .1) framerate = 30;
1209 cpi->framerate = framerate;
1210 cpi->output_framerate = framerate;
1211 cpi->per_frame_bandwidth =
1212 (int)(cpi->oxcf.target_bandwidth / cpi->output_framerate);
1213 cpi->av_per_frame_bandwidth = cpi->per_frame_bandwidth;
1214 cpi->min_frame_bandwidth = (int)(cpi->av_per_frame_bandwidth *
1215 cpi->oxcf.two_pass_vbrmin_section / 100);
1217 /* Set Maximum gf/arf interval */
1218 cpi->max_gf_interval = ((int)(cpi->output_framerate / 2.0) + 2);
1220 if (cpi->max_gf_interval < 12) cpi->max_gf_interval = 12;
1222 /* Extended interval for genuinely static scenes */
1223 cpi->twopass.static_scene_max_gf_interval = cpi->key_frame_frequency >> 1;
1225 /* Special conditions when altr ref frame enabled in lagged compress mode */
1226 if (cpi->oxcf.play_alternate && cpi->oxcf.lag_in_frames) {
1227 if (cpi->max_gf_interval > cpi->oxcf.lag_in_frames - 1) {
1228 cpi->max_gf_interval = cpi->oxcf.lag_in_frames - 1;
1231 if (cpi->twopass.static_scene_max_gf_interval >
1232 cpi->oxcf.lag_in_frames - 1) {
1233 cpi->twopass.static_scene_max_gf_interval = cpi->oxcf.lag_in_frames - 1;
1237 if (cpi->max_gf_interval > cpi->twopass.static_scene_max_gf_interval) {
1238 cpi->max_gf_interval = cpi->twopass.static_scene_max_gf_interval;
1242 static void init_config(VP8_COMP *cpi, VP8_CONFIG *oxcf) {
1243 VP8_COMMON *cm = &cpi->common;
1248 cpi->auto_adjust_gold_quantizer = 1;
1250 cm->version = oxcf->Version;
1251 vp8_setup_version(cm);
1253 /* Frame rate is not available on the first frame, as it's derived from
1254 * the observed timestamps. The actual value used here doesn't matter
1255 * too much, as it will adapt quickly.
1257 if (oxcf->timebase.num > 0) {
1259 (double)(oxcf->timebase.den) / (double)(oxcf->timebase.num);
1261 cpi->framerate = 30;
1264 /* If the reciprocal of the timebase seems like a reasonable framerate,
1265 * then use that as a guess, otherwise use 30.
1267 if (cpi->framerate > 180) cpi->framerate = 30;
1269 cpi->ref_framerate = cpi->framerate;
1271 cpi->ref_frame_flags = VP8_ALTR_FRAME | VP8_GOLD_FRAME | VP8_LAST_FRAME;
1273 cm->refresh_golden_frame = 0;
1274 cm->refresh_last_frame = 1;
1275 cm->refresh_entropy_probs = 1;
1277 /* change includes all joint functionality */
1278 vp8_change_config(cpi, oxcf);
1280 /* Initialize active best and worst q and average q values. */
1281 cpi->active_worst_quality = cpi->oxcf.worst_allowed_q;
1282 cpi->active_best_quality = cpi->oxcf.best_allowed_q;
1283 cpi->avg_frame_qindex = cpi->oxcf.worst_allowed_q;
1285 /* Initialise the starting buffer levels */
1286 cpi->buffer_level = cpi->oxcf.starting_buffer_level;
1287 cpi->bits_off_target = cpi->oxcf.starting_buffer_level;
1289 cpi->rolling_target_bits = cpi->av_per_frame_bandwidth;
1290 cpi->rolling_actual_bits = cpi->av_per_frame_bandwidth;
1291 cpi->long_rolling_target_bits = cpi->av_per_frame_bandwidth;
1292 cpi->long_rolling_actual_bits = cpi->av_per_frame_bandwidth;
1294 cpi->total_actual_bits = 0;
1295 cpi->total_target_vs_actual = 0;
1297 /* Temporal scalabilty */
1298 if (cpi->oxcf.number_of_layers > 1) {
1300 double prev_layer_framerate = 0;
1302 for (i = 0; i < cpi->oxcf.number_of_layers; ++i) {
1303 init_temporal_layer_context(cpi, oxcf, i, prev_layer_framerate);
1304 prev_layer_framerate =
1305 cpi->output_framerate / cpi->oxcf.rate_decimator[i];
1309 #if VP8_TEMPORAL_ALT_REF
1313 cpi->fixed_divide[0] = 0;
1315 for (i = 1; i < 512; ++i) cpi->fixed_divide[i] = 0x80000 / i;
1320 static void update_layer_contexts(VP8_COMP *cpi) {
1321 VP8_CONFIG *oxcf = &cpi->oxcf;
1323 /* Update snapshots of the layer contexts to reflect new parameters */
1324 if (oxcf->number_of_layers > 1) {
1326 double prev_layer_framerate = 0;
1328 assert(oxcf->number_of_layers <= VPX_TS_MAX_LAYERS);
1329 for (i = 0; i < oxcf->number_of_layers && i < VPX_TS_MAX_LAYERS; ++i) {
1330 LAYER_CONTEXT *lc = &cpi->layer_context[i];
1332 lc->framerate = cpi->ref_framerate / oxcf->rate_decimator[i];
1333 lc->target_bandwidth = oxcf->target_bitrate[i] * 1000;
1335 lc->starting_buffer_level = rescale(
1336 (int)oxcf->starting_buffer_level_in_ms, lc->target_bandwidth, 1000);
1338 if (oxcf->optimal_buffer_level == 0) {
1339 lc->optimal_buffer_level = lc->target_bandwidth / 8;
1341 lc->optimal_buffer_level = rescale(
1342 (int)oxcf->optimal_buffer_level_in_ms, lc->target_bandwidth, 1000);
1345 if (oxcf->maximum_buffer_size == 0) {
1346 lc->maximum_buffer_size = lc->target_bandwidth / 8;
1348 lc->maximum_buffer_size = rescale((int)oxcf->maximum_buffer_size_in_ms,
1349 lc->target_bandwidth, 1000);
1352 /* Work out the average size of a frame within this layer */
1354 lc->avg_frame_size_for_layer =
1355 (int)((oxcf->target_bitrate[i] - oxcf->target_bitrate[i - 1]) *
1356 1000 / (lc->framerate - prev_layer_framerate));
1359 prev_layer_framerate = lc->framerate;
1364 void vp8_change_config(VP8_COMP *cpi, VP8_CONFIG *oxcf) {
1365 VP8_COMMON *cm = &cpi->common;
1367 unsigned int prev_number_of_layers;
1373 if (cm->version != oxcf->Version) {
1374 cm->version = oxcf->Version;
1375 vp8_setup_version(cm);
1378 last_w = cpi->oxcf.Width;
1379 last_h = cpi->oxcf.Height;
1380 prev_number_of_layers = cpi->oxcf.number_of_layers;
1384 switch (cpi->oxcf.Mode) {
1387 cpi->compressor_speed = 2;
1389 if (cpi->oxcf.cpu_used < -16) {
1390 cpi->oxcf.cpu_used = -16;
1393 if (cpi->oxcf.cpu_used > 16) cpi->oxcf.cpu_used = 16;
1397 case MODE_GOODQUALITY:
1399 cpi->compressor_speed = 1;
1401 if (cpi->oxcf.cpu_used < -5) {
1402 cpi->oxcf.cpu_used = -5;
1405 if (cpi->oxcf.cpu_used > 5) cpi->oxcf.cpu_used = 5;
1409 case MODE_BESTQUALITY:
1411 cpi->compressor_speed = 0;
1414 case MODE_FIRSTPASS:
1416 cpi->compressor_speed = 1;
1418 case MODE_SECONDPASS:
1420 cpi->compressor_speed = 1;
1422 if (cpi->oxcf.cpu_used < -5) {
1423 cpi->oxcf.cpu_used = -5;
1426 if (cpi->oxcf.cpu_used > 5) cpi->oxcf.cpu_used = 5;
1429 case MODE_SECONDPASS_BEST:
1431 cpi->compressor_speed = 0;
1435 if (cpi->pass == 0) cpi->auto_worst_q = 1;
1437 cpi->oxcf.worst_allowed_q = q_trans[oxcf->worst_allowed_q];
1438 cpi->oxcf.best_allowed_q = q_trans[oxcf->best_allowed_q];
1439 cpi->oxcf.cq_level = q_trans[cpi->oxcf.cq_level];
1441 if (oxcf->fixed_q >= 0) {
1442 if (oxcf->worst_allowed_q < 0) {
1443 cpi->oxcf.fixed_q = q_trans[0];
1445 cpi->oxcf.fixed_q = q_trans[oxcf->worst_allowed_q];
1448 if (oxcf->alt_q < 0) {
1449 cpi->oxcf.alt_q = q_trans[0];
1451 cpi->oxcf.alt_q = q_trans[oxcf->alt_q];
1454 if (oxcf->key_q < 0) {
1455 cpi->oxcf.key_q = q_trans[0];
1457 cpi->oxcf.key_q = q_trans[oxcf->key_q];
1460 if (oxcf->gold_q < 0) {
1461 cpi->oxcf.gold_q = q_trans[0];
1463 cpi->oxcf.gold_q = q_trans[oxcf->gold_q];
1467 cpi->baseline_gf_interval =
1468 cpi->oxcf.alt_freq ? cpi->oxcf.alt_freq : DEFAULT_GF_INTERVAL;
1470 #if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
1471 cpi->oxcf.token_partitions = 3;
1474 if (cpi->oxcf.token_partitions >= 0 && cpi->oxcf.token_partitions <= 3) {
1475 cm->multi_token_partition = (TOKEN_PARTITION)cpi->oxcf.token_partitions;
1478 setup_features(cpi);
1483 for (i = 0; i < MAX_MB_SEGMENTS; ++i) {
1484 cpi->segment_encode_breakout[i] = cpi->oxcf.encode_breakout;
1488 /* At the moment the first order values may not be > MAXQ */
1489 if (cpi->oxcf.fixed_q > MAXQ) cpi->oxcf.fixed_q = MAXQ;
1491 /* local file playback mode == really big buffer */
1492 if (cpi->oxcf.end_usage == USAGE_LOCAL_FILE_PLAYBACK) {
1493 cpi->oxcf.starting_buffer_level = 60000;
1494 cpi->oxcf.optimal_buffer_level = 60000;
1495 cpi->oxcf.maximum_buffer_size = 240000;
1496 cpi->oxcf.starting_buffer_level_in_ms = 60000;
1497 cpi->oxcf.optimal_buffer_level_in_ms = 60000;
1498 cpi->oxcf.maximum_buffer_size_in_ms = 240000;
1501 /* Convert target bandwidth from Kbit/s to Bit/s */
1502 cpi->oxcf.target_bandwidth *= 1000;
1504 cpi->oxcf.starting_buffer_level = rescale(
1505 (int)cpi->oxcf.starting_buffer_level, cpi->oxcf.target_bandwidth, 1000);
1507 /* Set or reset optimal and maximum buffer levels. */
1508 if (cpi->oxcf.optimal_buffer_level == 0) {
1509 cpi->oxcf.optimal_buffer_level = cpi->oxcf.target_bandwidth / 8;
1511 cpi->oxcf.optimal_buffer_level = rescale(
1512 (int)cpi->oxcf.optimal_buffer_level, cpi->oxcf.target_bandwidth, 1000);
1515 if (cpi->oxcf.maximum_buffer_size == 0) {
1516 cpi->oxcf.maximum_buffer_size = cpi->oxcf.target_bandwidth / 8;
1518 cpi->oxcf.maximum_buffer_size = rescale((int)cpi->oxcf.maximum_buffer_size,
1519 cpi->oxcf.target_bandwidth, 1000);
1521 // Under a configuration change, where maximum_buffer_size may change,
1522 // keep buffer level clipped to the maximum allowed buffer size.
1523 if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size) {
1524 cpi->bits_off_target = cpi->oxcf.maximum_buffer_size;
1525 cpi->buffer_level = cpi->bits_off_target;
1528 /* Set up frame rate and related parameters rate control values. */
1529 vp8_new_framerate(cpi, cpi->framerate);
1531 /* Set absolute upper and lower quality limits */
1532 cpi->worst_quality = cpi->oxcf.worst_allowed_q;
1533 cpi->best_quality = cpi->oxcf.best_allowed_q;
1535 /* active values should only be modified if out of new range */
1536 if (cpi->active_worst_quality > cpi->oxcf.worst_allowed_q) {
1537 cpi->active_worst_quality = cpi->oxcf.worst_allowed_q;
1540 else if (cpi->active_worst_quality < cpi->oxcf.best_allowed_q) {
1541 cpi->active_worst_quality = cpi->oxcf.best_allowed_q;
1543 if (cpi->active_best_quality < cpi->oxcf.best_allowed_q) {
1544 cpi->active_best_quality = cpi->oxcf.best_allowed_q;
1547 else if (cpi->active_best_quality > cpi->oxcf.worst_allowed_q) {
1548 cpi->active_best_quality = cpi->oxcf.worst_allowed_q;
1551 cpi->buffered_mode = cpi->oxcf.optimal_buffer_level > 0;
1553 cpi->cq_target_quality = cpi->oxcf.cq_level;
1555 /* Only allow dropped frames in buffered mode */
1556 cpi->drop_frames_allowed = cpi->oxcf.allow_df && cpi->buffered_mode;
1558 cpi->target_bandwidth = cpi->oxcf.target_bandwidth;
1560 // Check if the number of temporal layers has changed, and if so reset the
1561 // pattern counter and set/initialize the temporal layer context for the
1562 // new layer configuration.
1563 if (cpi->oxcf.number_of_layers != prev_number_of_layers) {
1564 // If the number of temporal layers are changed we must start at the
1565 // base of the pattern cycle, so set the layer id to 0 and reset
1566 // the temporal pattern counter.
1567 if (cpi->temporal_layer_id > 0) {
1568 cpi->temporal_layer_id = 0;
1570 cpi->temporal_pattern_counter = 0;
1571 reset_temporal_layer_change(cpi, oxcf, prev_number_of_layers);
1574 if (!cpi->initial_width) {
1575 cpi->initial_width = cpi->oxcf.Width;
1576 cpi->initial_height = cpi->oxcf.Height;
1579 cm->Width = cpi->oxcf.Width;
1580 cm->Height = cpi->oxcf.Height;
1581 assert(cm->Width <= cpi->initial_width);
1582 assert(cm->Height <= cpi->initial_height);
1584 /* TODO(jkoleszar): if an internal spatial resampling is active,
1585 * and we downsize the input image, maybe we should clear the
1586 * internal scale immediately rather than waiting for it to
1590 /* VP8 sharpness level mapping 0-7 (vs 0-10 in general VPx dialogs) */
1591 if (cpi->oxcf.Sharpness > 7) cpi->oxcf.Sharpness = 7;
1593 cm->sharpness_level = cpi->oxcf.Sharpness;
1595 if (cm->horiz_scale != NORMAL || cm->vert_scale != NORMAL) {
1596 int UNINITIALIZED_IS_SAFE(hr), UNINITIALIZED_IS_SAFE(hs);
1597 int UNINITIALIZED_IS_SAFE(vr), UNINITIALIZED_IS_SAFE(vs);
1599 Scale2Ratio(cm->horiz_scale, &hr, &hs);
1600 Scale2Ratio(cm->vert_scale, &vr, &vs);
1602 /* always go to the next whole number */
1603 cm->Width = (hs - 1 + cpi->oxcf.Width * hr) / hs;
1604 cm->Height = (vs - 1 + cpi->oxcf.Height * vr) / vs;
1607 if (last_w != cpi->oxcf.Width || last_h != cpi->oxcf.Height) {
1608 cpi->force_next_frame_intra = 1;
1611 if (((cm->Width + 15) & ~15) != cm->yv12_fb[cm->lst_fb_idx].y_width ||
1612 ((cm->Height + 15) & ~15) != cm->yv12_fb[cm->lst_fb_idx].y_height ||
1613 cm->yv12_fb[cm->lst_fb_idx].y_width == 0) {
1614 dealloc_raw_frame_buffers(cpi);
1615 alloc_raw_frame_buffers(cpi);
1616 vp8_alloc_compressor_data(cpi);
1619 if (cpi->oxcf.fixed_q >= 0) {
1620 cpi->last_q[0] = cpi->oxcf.fixed_q;
1621 cpi->last_q[1] = cpi->oxcf.fixed_q;
1624 cpi->Speed = cpi->oxcf.cpu_used;
1626 /* force to allowlag to 0 if lag_in_frames is 0; */
1627 if (cpi->oxcf.lag_in_frames == 0) {
1628 cpi->oxcf.allow_lag = 0;
1630 /* Limit on lag buffers as these are not currently dynamically allocated */
1631 else if (cpi->oxcf.lag_in_frames > MAX_LAG_BUFFERS) {
1632 cpi->oxcf.lag_in_frames = MAX_LAG_BUFFERS;
1636 cpi->alt_ref_source = NULL;
1637 cpi->is_src_frame_alt_ref = 0;
1639 #if CONFIG_TEMPORAL_DENOISING
1640 if (cpi->oxcf.noise_sensitivity) {
1641 if (!cpi->denoiser.yv12_mc_running_avg.buffer_alloc) {
1642 int width = (cpi->oxcf.Width + 15) & ~15;
1643 int height = (cpi->oxcf.Height + 15) & ~15;
1644 if (vp8_denoiser_allocate(&cpi->denoiser, width, height, cm->mb_rows,
1645 cm->mb_cols, cpi->oxcf.noise_sensitivity)) {
1646 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1647 "Failed to allocate denoiser");
1654 /* Experimental RD Code */
1655 cpi->frame_distortion = 0;
1656 cpi->last_frame_distortion = 0;
1661 #define M_LOG2_E 0.693147180559945309417
1663 #define log2f(x) (log(x) / (float)M_LOG2_E)
1665 static void cal_mvsadcosts(int *mvsadcost[2]) {
1668 mvsadcost[0][0] = 300;
1669 mvsadcost[1][0] = 300;
1672 double z = 256 * (2 * (log2f(8 * i) + .6));
1673 mvsadcost[0][i] = (int)z;
1674 mvsadcost[1][i] = (int)z;
1675 mvsadcost[0][-i] = (int)z;
1676 mvsadcost[1][-i] = (int)z;
1677 } while (++i <= mvfp_max);
1680 struct VP8_COMP *vp8_create_compressor(VP8_CONFIG *oxcf) {
1686 cpi = vpx_memalign(32, sizeof(VP8_COMP));
1687 /* Check that the CPI instance is valid */
1692 memset(cpi, 0, sizeof(VP8_COMP));
1694 if (setjmp(cm->error.jmp)) {
1695 cpi->common.error.setjmp = 0;
1696 vp8_remove_compressor(&cpi);
1700 cpi->common.error.setjmp = 1;
1702 CHECK_MEM_ERROR(cpi->mb.ss, vpx_calloc(sizeof(search_site),
1703 (MAX_MVSEARCH_STEPS * 8) + 1));
1705 vp8_create_common(&cpi->common);
1707 init_config(cpi, oxcf);
1709 memcpy(cpi->base_skip_false_prob, vp8cx_base_skip_false_prob,
1710 sizeof(vp8cx_base_skip_false_prob));
1711 cpi->common.current_video_frame = 0;
1712 cpi->temporal_pattern_counter = 0;
1713 cpi->temporal_layer_id = -1;
1714 cpi->kf_overspend_bits = 0;
1715 cpi->kf_bitrate_adjustment = 0;
1716 cpi->frames_till_gf_update_due = 0;
1717 cpi->gf_overspend_bits = 0;
1718 cpi->non_gf_bitrate_adjustment = 0;
1719 cpi->prob_last_coded = 128;
1720 cpi->prob_gf_coded = 128;
1721 cpi->prob_intra_coded = 63;
1723 /* Prime the recent reference frame usage counters.
1724 * Hereafter they will be maintained as a sort of moving average
1726 cpi->recent_ref_frame_usage[INTRA_FRAME] = 1;
1727 cpi->recent_ref_frame_usage[LAST_FRAME] = 1;
1728 cpi->recent_ref_frame_usage[GOLDEN_FRAME] = 1;
1729 cpi->recent_ref_frame_usage[ALTREF_FRAME] = 1;
1731 /* Set reference frame sign bias for ALTREF frame to 1 (for now) */
1732 cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 1;
1734 cpi->twopass.gf_decay_rate = 0;
1735 cpi->baseline_gf_interval = DEFAULT_GF_INTERVAL;
1737 cpi->gold_is_last = 0;
1738 cpi->alt_is_last = 0;
1739 cpi->gold_is_alt = 0;
1741 cpi->active_map_enabled = 0;
1744 /* Experimental code for lagged and one pass */
1745 /* Initialise one_pass GF frames stats */
1746 /* Update stats used for GF selection */
1749 cpi->one_pass_frame_index = 0;
1751 for (i = 0; i < MAX_LAG_BUFFERS; ++i)
1753 cpi->one_pass_frame_stats[i].frames_so_far = 0;
1754 cpi->one_pass_frame_stats[i].frame_intra_error = 0.0;
1755 cpi->one_pass_frame_stats[i].frame_coded_error = 0.0;
1756 cpi->one_pass_frame_stats[i].frame_pcnt_inter = 0.0;
1757 cpi->one_pass_frame_stats[i].frame_pcnt_motion = 0.0;
1758 cpi->one_pass_frame_stats[i].frame_mvr = 0.0;
1759 cpi->one_pass_frame_stats[i].frame_mvr_abs = 0.0;
1760 cpi->one_pass_frame_stats[i].frame_mvc = 0.0;
1761 cpi->one_pass_frame_stats[i].frame_mvc_abs = 0.0;
1766 cpi->mse_source_denoised = 0;
1768 /* Should we use the cyclic refresh method.
1769 * Currently there is no external control for this.
1770 * Enable it for error_resilient_mode, or for 1 pass CBR mode.
1772 cpi->cyclic_refresh_mode_enabled =
1773 (cpi->oxcf.error_resilient_mode ||
1774 (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER &&
1775 cpi->oxcf.Mode <= 2));
1776 cpi->cyclic_refresh_mode_max_mbs_perframe =
1777 (cpi->common.mb_rows * cpi->common.mb_cols) / 7;
1778 if (cpi->oxcf.number_of_layers == 1) {
1779 cpi->cyclic_refresh_mode_max_mbs_perframe =
1780 (cpi->common.mb_rows * cpi->common.mb_cols) / 20;
1781 } else if (cpi->oxcf.number_of_layers == 2) {
1782 cpi->cyclic_refresh_mode_max_mbs_perframe =
1783 (cpi->common.mb_rows * cpi->common.mb_cols) / 10;
1785 cpi->cyclic_refresh_mode_index = 0;
1786 cpi->cyclic_refresh_q = 32;
1788 if (cpi->cyclic_refresh_mode_enabled) {
1789 CHECK_MEM_ERROR(cpi->cyclic_refresh_map,
1790 vpx_calloc((cpi->common.mb_rows * cpi->common.mb_cols), 1));
1792 cpi->cyclic_refresh_map = (signed char *)NULL;
1795 CHECK_MEM_ERROR(cpi->consec_zero_last,
1796 vpx_calloc(cm->mb_rows * cm->mb_cols, 1));
1797 CHECK_MEM_ERROR(cpi->consec_zero_last_mvbias,
1798 vpx_calloc((cpi->common.mb_rows * cpi->common.mb_cols), 1));
1800 #ifdef VP8_ENTROPY_STATS
1801 init_context_counters();
1804 /*Initialize the feed-forward activity masking.*/
1805 cpi->activity_avg = 90 << 12;
1807 /* Give a sensible default for the first frame. */
1808 cpi->frames_since_key = 8;
1809 cpi->key_frame_frequency = cpi->oxcf.key_freq;
1810 cpi->this_key_frame_forced = 0;
1811 cpi->next_key_frame_forced = 0;
1813 cpi->source_alt_ref_pending = 0;
1814 cpi->source_alt_ref_active = 0;
1815 cpi->common.refresh_alt_ref_frame = 0;
1817 cpi->force_maxqp = 0;
1819 cpi->b_calculate_psnr = CONFIG_INTERNAL_STATS;
1820 #if CONFIG_INTERNAL_STATS
1821 cpi->b_calculate_ssimg = 0;
1826 if (cpi->b_calculate_psnr) {
1827 cpi->total_sq_error = 0.0;
1828 cpi->total_sq_error2 = 0.0;
1833 cpi->totalp_y = 0.0;
1834 cpi->totalp_u = 0.0;
1835 cpi->totalp_v = 0.0;
1837 cpi->tot_recode_hits = 0;
1838 cpi->summed_quality = 0;
1839 cpi->summed_weights = 0;
1844 cpi->first_time_stamp_ever = 0x7FFFFFFF;
1846 cpi->frames_till_gf_update_due = 0;
1847 cpi->key_frame_count = 1;
1849 cpi->ni_av_qi = cpi->oxcf.worst_allowed_q;
1852 cpi->total_byte_count = 0;
1854 cpi->drop_frame = 0;
1856 cpi->rate_correction_factor = 1.0;
1857 cpi->key_frame_rate_correction_factor = 1.0;
1858 cpi->gf_rate_correction_factor = 1.0;
1859 cpi->twopass.est_max_qcorrection_factor = 1.0;
1861 for (i = 0; i < KEY_FRAME_CONTEXT; ++i) {
1862 cpi->prior_key_frame_distance[i] = (int)cpi->output_framerate;
1865 #ifdef OUTPUT_YUV_SRC
1866 yuv_file = fopen("bd.yuv", "ab");
1868 #ifdef OUTPUT_YUV_DENOISED
1869 yuv_denoised_file = fopen("denoised.yuv", "ab");
1873 framepsnr = fopen("framepsnr.stt", "a");
1874 kf_list = fopen("kf_list.stt", "w");
1877 cpi->output_pkt_list = oxcf->output_pkt_list;
1879 #if !CONFIG_REALTIME_ONLY
1881 if (cpi->pass == 1) {
1882 vp8_init_first_pass(cpi);
1883 } else if (cpi->pass == 2) {
1884 size_t packet_sz = sizeof(FIRSTPASS_STATS);
1885 int packets = (int)(oxcf->two_pass_stats_in.sz / packet_sz);
1887 cpi->twopass.stats_in_start = oxcf->two_pass_stats_in.buf;
1888 cpi->twopass.stats_in = cpi->twopass.stats_in_start;
1889 cpi->twopass.stats_in_end =
1890 (void *)((char *)cpi->twopass.stats_in + (packets - 1) * packet_sz);
1891 vp8_init_second_pass(cpi);
1896 if (cpi->compressor_speed == 2) {
1897 cpi->avg_encode_time = 0;
1898 cpi->avg_pick_mode_time = 0;
1901 vp8_set_speed_features(cpi);
1903 /* Set starting values of RD threshold multipliers (128 = *1) */
1904 for (i = 0; i < MAX_MODES; ++i) {
1905 cpi->mb.rd_thresh_mult[i] = 128;
1908 #ifdef VP8_ENTROPY_STATS
1909 init_mv_ref_counts();
1912 #if CONFIG_MULTITHREAD
1913 if (vp8cx_create_encoder_threads(cpi)) {
1914 vp8_remove_compressor(&cpi);
1919 cpi->fn_ptr[BLOCK_16X16].sdf = vpx_sad16x16;
1920 cpi->fn_ptr[BLOCK_16X16].vf = vpx_variance16x16;
1921 cpi->fn_ptr[BLOCK_16X16].svf = vpx_sub_pixel_variance16x16;
1922 cpi->fn_ptr[BLOCK_16X16].sdx3f = vpx_sad16x16x3;
1923 cpi->fn_ptr[BLOCK_16X16].sdx8f = vpx_sad16x16x8;
1924 cpi->fn_ptr[BLOCK_16X16].sdx4df = vpx_sad16x16x4d;
1926 cpi->fn_ptr[BLOCK_16X8].sdf = vpx_sad16x8;
1927 cpi->fn_ptr[BLOCK_16X8].vf = vpx_variance16x8;
1928 cpi->fn_ptr[BLOCK_16X8].svf = vpx_sub_pixel_variance16x8;
1929 cpi->fn_ptr[BLOCK_16X8].sdx3f = vpx_sad16x8x3;
1930 cpi->fn_ptr[BLOCK_16X8].sdx8f = vpx_sad16x8x8;
1931 cpi->fn_ptr[BLOCK_16X8].sdx4df = vpx_sad16x8x4d;
1933 cpi->fn_ptr[BLOCK_8X16].sdf = vpx_sad8x16;
1934 cpi->fn_ptr[BLOCK_8X16].vf = vpx_variance8x16;
1935 cpi->fn_ptr[BLOCK_8X16].svf = vpx_sub_pixel_variance8x16;
1936 cpi->fn_ptr[BLOCK_8X16].sdx3f = vpx_sad8x16x3;
1937 cpi->fn_ptr[BLOCK_8X16].sdx8f = vpx_sad8x16x8;
1938 cpi->fn_ptr[BLOCK_8X16].sdx4df = vpx_sad8x16x4d;
1940 cpi->fn_ptr[BLOCK_8X8].sdf = vpx_sad8x8;
1941 cpi->fn_ptr[BLOCK_8X8].vf = vpx_variance8x8;
1942 cpi->fn_ptr[BLOCK_8X8].svf = vpx_sub_pixel_variance8x8;
1943 cpi->fn_ptr[BLOCK_8X8].sdx3f = vpx_sad8x8x3;
1944 cpi->fn_ptr[BLOCK_8X8].sdx8f = vpx_sad8x8x8;
1945 cpi->fn_ptr[BLOCK_8X8].sdx4df = vpx_sad8x8x4d;
1947 cpi->fn_ptr[BLOCK_4X4].sdf = vpx_sad4x4;
1948 cpi->fn_ptr[BLOCK_4X4].vf = vpx_variance4x4;
1949 cpi->fn_ptr[BLOCK_4X4].svf = vpx_sub_pixel_variance4x4;
1950 cpi->fn_ptr[BLOCK_4X4].sdx3f = vpx_sad4x4x3;
1951 cpi->fn_ptr[BLOCK_4X4].sdx8f = vpx_sad4x4x8;
1952 cpi->fn_ptr[BLOCK_4X4].sdx4df = vpx_sad4x4x4d;
1954 #if ARCH_X86 || ARCH_X86_64
1955 cpi->fn_ptr[BLOCK_16X16].copymem = vp8_copy32xn;
1956 cpi->fn_ptr[BLOCK_16X8].copymem = vp8_copy32xn;
1957 cpi->fn_ptr[BLOCK_8X16].copymem = vp8_copy32xn;
1958 cpi->fn_ptr[BLOCK_8X8].copymem = vp8_copy32xn;
1959 cpi->fn_ptr[BLOCK_4X4].copymem = vp8_copy32xn;
1962 cpi->full_search_sad = vp8_full_search_sad;
1963 cpi->diamond_search_sad = vp8_diamond_search_sad;
1964 cpi->refining_search_sad = vp8_refining_search_sad;
1966 /* make sure frame 1 is okay */
1967 cpi->mb.error_bins[0] = cpi->common.MBs;
1969 /* vp8cx_init_quantizer() is first called here. Add check in
1970 * vp8cx_frame_init_quantizer() so that vp8cx_init_quantizer is only
1971 * called later when needed. This will avoid unnecessary calls of
1972 * vp8cx_init_quantizer() for every frame.
1974 vp8cx_init_quantizer(cpi);
1976 vp8_loop_filter_init(cm);
1978 cpi->common.error.setjmp = 0;
1980 #if CONFIG_MULTI_RES_ENCODING
1982 /* Calculate # of MBs in a row in lower-resolution level image. */
1983 if (cpi->oxcf.mr_encoder_id > 0) vp8_cal_low_res_mb_cols(cpi);
1987 /* setup RD costs to MACROBLOCK struct */
1989 cpi->mb.mvcost[0] = &cpi->rd_costs.mvcosts[0][mv_max + 1];
1990 cpi->mb.mvcost[1] = &cpi->rd_costs.mvcosts[1][mv_max + 1];
1991 cpi->mb.mvsadcost[0] = &cpi->rd_costs.mvsadcosts[0][mvfp_max + 1];
1992 cpi->mb.mvsadcost[1] = &cpi->rd_costs.mvsadcosts[1][mvfp_max + 1];
1994 cal_mvsadcosts(cpi->mb.mvsadcost);
1996 cpi->mb.mbmode_cost = cpi->rd_costs.mbmode_cost;
1997 cpi->mb.intra_uv_mode_cost = cpi->rd_costs.intra_uv_mode_cost;
1998 cpi->mb.bmode_costs = cpi->rd_costs.bmode_costs;
1999 cpi->mb.inter_bmode_costs = cpi->rd_costs.inter_bmode_costs;
2000 cpi->mb.token_costs = cpi->rd_costs.token_costs;
2002 /* setup block ptrs & offsets */
2003 vp8_setup_block_ptrs(&cpi->mb);
2004 vp8_setup_block_dptrs(&cpi->mb.e_mbd);
2009 void vp8_remove_compressor(VP8_COMP **ptr) {
2010 VP8_COMP *cpi = *ptr;
2014 if (cpi && (cpi->common.current_video_frame > 0)) {
2015 #if !CONFIG_REALTIME_ONLY
2017 if (cpi->pass == 2) {
2018 vp8_end_second_pass(cpi);
2023 #ifdef VP8_ENTROPY_STATS
2024 print_context_counters();
2025 print_tree_update_probs();
2026 print_mode_context();
2029 #if CONFIG_INTERNAL_STATS
2031 if (cpi->pass != 1) {
2032 FILE *f = fopen("opsnr.stt", "a");
2033 double time_encoded =
2034 (cpi->last_end_time_stamp_seen - cpi->first_time_stamp_ever) /
2036 double total_encode_time =
2037 (cpi->time_receive_data + cpi->time_compress_data) / 1000.000;
2038 double dr = (double)cpi->bytes * 8.0 / 1000.0 / time_encoded;
2039 const double target_rate = (double)cpi->oxcf.target_bandwidth / 1000;
2040 const double rate_err = ((100.0 * (dr - target_rate)) / target_rate);
2042 if (cpi->b_calculate_psnr) {
2043 if (cpi->oxcf.number_of_layers > 1) {
2047 "Layer\tBitrate\tAVGPsnr\tGLBPsnr\tAVPsnrP\t"
2048 "GLPsnrP\tVPXSSIM\n");
2049 for (i = 0; i < (int)cpi->oxcf.number_of_layers; ++i) {
2051 (double)cpi->bytes_in_layer[i] * 8.0 / 1000.0 / time_encoded;
2052 double samples = 3.0 / 2 * cpi->frames_in_layer[i] *
2053 cpi->common.Width * cpi->common.Height;
2055 vpx_sse_to_psnr(samples, 255.0, cpi->total_error2[i]);
2056 double total_psnr2 =
2057 vpx_sse_to_psnr(samples, 255.0, cpi->total_error2_p[i]);
2059 100 * pow(cpi->sum_ssim[i] / cpi->sum_weights[i], 8.0);
2062 "%5d\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t"
2064 i, dr, cpi->sum_psnr[i] / cpi->frames_in_layer[i],
2065 total_psnr, cpi->sum_psnr_p[i] / cpi->frames_in_layer[i],
2066 total_psnr2, total_ssim);
2070 3.0 / 2 * cpi->count * cpi->common.Width * cpi->common.Height;
2072 vpx_sse_to_psnr(samples, 255.0, cpi->total_sq_error);
2073 double total_psnr2 =
2074 vpx_sse_to_psnr(samples, 255.0, cpi->total_sq_error2);
2076 100 * pow(cpi->summed_quality / cpi->summed_weights, 8.0);
2079 "Bitrate\tAVGPsnr\tGLBPsnr\tAVPsnrP\t"
2080 "GLPsnrP\tVPXSSIM\n");
2082 "%7.3f\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t"
2084 dr, cpi->total / cpi->count, total_psnr,
2085 cpi->totalp / cpi->count, total_psnr2, total_ssim);
2090 f = fopen("qskip.stt", "a");
2091 fprintf(f, "minq:%d -maxq:%d skiptrue:skipfalse = %d:%d\n", cpi->oxcf.best_allowed_q, cpi->oxcf.worst_allowed_q, skiptruecount, skipfalsecount);
2100 if (cpi->compressor_speed == 2) {
2102 FILE *f = fopen("cxspeed.stt", "a");
2103 cnt_pm /= cpi->common.MBs;
2105 for (i = 0; i < 16; ++i) fprintf(f, "%5d", frames_at_speed[i]);
2115 extern int count_mb_seg[4];
2116 FILE *f = fopen("modes.stt", "a");
2117 double dr = (double)cpi->framerate * (double)bytes * (double)8 /
2118 (double)count / (double)1000;
2119 fprintf(f, "intra_mode in Intra Frames:\n");
2120 fprintf(f, "Y: %8d, %8d, %8d, %8d, %8d\n", y_modes[0], y_modes[1],
2121 y_modes[2], y_modes[3], y_modes[4]);
2122 fprintf(f, "UV:%8d, %8d, %8d, %8d\n", uv_modes[0], uv_modes[1],
2123 uv_modes[2], uv_modes[3]);
2128 for (i = 0; i < 10; ++i) fprintf(f, "%8d, ", b_modes[i]);
2133 fprintf(f, "Modes in Inter Frames:\n");
2134 fprintf(f, "Y: %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d\n",
2135 inter_y_modes[0], inter_y_modes[1], inter_y_modes[2],
2136 inter_y_modes[3], inter_y_modes[4], inter_y_modes[5],
2137 inter_y_modes[6], inter_y_modes[7], inter_y_modes[8],
2139 fprintf(f, "UV:%8d, %8d, %8d, %8d\n", inter_uv_modes[0],
2140 inter_uv_modes[1], inter_uv_modes[2], inter_uv_modes[3]);
2145 for (i = 0; i < 15; ++i) fprintf(f, "%8d, ", inter_b_modes[i]);
2149 fprintf(f, "P:%8d, %8d, %8d, %8d\n", count_mb_seg[0], count_mb_seg[1],
2150 count_mb_seg[2], count_mb_seg[3]);
2151 fprintf(f, "PB:%8d, %8d, %8d, %8d\n", inter_b_modes[LEFT4X4],
2152 inter_b_modes[ABOVE4X4], inter_b_modes[ZERO4X4],
2153 inter_b_modes[NEW4X4]);
2159 #ifdef VP8_ENTROPY_STATS
2162 FILE *fmode = fopen("modecontext.c", "w");
2164 fprintf(fmode, "\n#include \"entropymode.h\"\n\n");
2165 fprintf(fmode, "const unsigned int vp8_kf_default_bmode_counts ");
2167 "[VP8_BINTRAMODES] [VP8_BINTRAMODES] [VP8_BINTRAMODES] =\n{\n");
2169 for (i = 0; i < 10; ++i) {
2170 fprintf(fmode, " { /* Above Mode : %d */\n", i);
2172 for (j = 0; j < 10; ++j) {
2173 fprintf(fmode, " {");
2175 for (k = 0; k < 10; ++k) {
2176 if (!intra_mode_stats[i][j][k])
2177 fprintf(fmode, " %5d, ", 1);
2179 fprintf(fmode, " %5d, ", intra_mode_stats[i][j][k]);
2182 fprintf(fmode, "}, /* left_mode %d */\n", j);
2185 fprintf(fmode, " },\n");
2188 fprintf(fmode, "};\n");
2193 #if defined(SECTIONBITS_OUTPUT)
2197 FILE *f = fopen("tokenbits.stt", "a");
2199 for (i = 0; i < 28; ++i) fprintf(f, "%8d", (int)(Sectionbits[i] / 256));
2209 printf("\n_pick_loop_filter_level:%d\n", cpi->time_pick_lpf / 1000);
2210 printf("\n_frames recive_data encod_mb_row compress_frame Total\n");
2211 printf("%6d %10ld %10ld %10ld %10ld\n", cpi->common.current_video_frame, cpi->time_receive_data / 1000, cpi->time_encode_mb_row / 1000, cpi->time_compress_data / 1000, (cpi->time_receive_data + cpi->time_compress_data) / 1000);
2216 #if CONFIG_MULTITHREAD
2217 vp8cx_remove_encoder_threads(cpi);
2220 #if CONFIG_TEMPORAL_DENOISING
2221 vp8_denoiser_free(&cpi->denoiser);
2223 dealloc_compressor_data(cpi);
2224 vpx_free(cpi->mb.ss);
2226 vpx_free(cpi->cyclic_refresh_map);
2227 vpx_free(cpi->consec_zero_last);
2228 vpx_free(cpi->consec_zero_last_mvbias);
2230 vp8_remove_common(&cpi->common);
2234 #ifdef OUTPUT_YUV_SRC
2237 #ifdef OUTPUT_YUV_DENOISED
2238 fclose(yuv_denoised_file);
2255 static uint64_t calc_plane_error(unsigned char *orig, int orig_stride,
2256 unsigned char *recon, int recon_stride,
2257 unsigned int cols, unsigned int rows) {
2258 unsigned int row, col;
2259 uint64_t total_sse = 0;
2262 for (row = 0; row + 16 <= rows; row += 16) {
2263 for (col = 0; col + 16 <= cols; col += 16) {
2266 vpx_mse16x16(orig + col, orig_stride, recon + col, recon_stride, &sse);
2270 /* Handle odd-sized width */
2272 unsigned int border_row, border_col;
2273 unsigned char *border_orig = orig;
2274 unsigned char *border_recon = recon;
2276 for (border_row = 0; border_row < 16; ++border_row) {
2277 for (border_col = col; border_col < cols; ++border_col) {
2278 diff = border_orig[border_col] - border_recon[border_col];
2279 total_sse += diff * diff;
2282 border_orig += orig_stride;
2283 border_recon += recon_stride;
2287 orig += orig_stride * 16;
2288 recon += recon_stride * 16;
2291 /* Handle odd-sized height */
2292 for (; row < rows; ++row) {
2293 for (col = 0; col < cols; ++col) {
2294 diff = orig[col] - recon[col];
2295 total_sse += diff * diff;
2298 orig += orig_stride;
2299 recon += recon_stride;
2302 vpx_clear_system_state();
2306 static void generate_psnr_packet(VP8_COMP *cpi) {
2307 YV12_BUFFER_CONFIG *orig = cpi->Source;
2308 YV12_BUFFER_CONFIG *recon = cpi->common.frame_to_show;
2309 struct vpx_codec_cx_pkt pkt;
2312 unsigned int width = cpi->common.Width;
2313 unsigned int height = cpi->common.Height;
2315 pkt.kind = VPX_CODEC_PSNR_PKT;
2316 sse = calc_plane_error(orig->y_buffer, orig->y_stride, recon->y_buffer,
2317 recon->y_stride, width, height);
2318 pkt.data.psnr.sse[0] = sse;
2319 pkt.data.psnr.sse[1] = sse;
2320 pkt.data.psnr.samples[0] = width * height;
2321 pkt.data.psnr.samples[1] = width * height;
2323 width = (width + 1) / 2;
2324 height = (height + 1) / 2;
2326 sse = calc_plane_error(orig->u_buffer, orig->uv_stride, recon->u_buffer,
2327 recon->uv_stride, width, height);
2328 pkt.data.psnr.sse[0] += sse;
2329 pkt.data.psnr.sse[2] = sse;
2330 pkt.data.psnr.samples[0] += width * height;
2331 pkt.data.psnr.samples[2] = width * height;
2333 sse = calc_plane_error(orig->v_buffer, orig->uv_stride, recon->v_buffer,
2334 recon->uv_stride, width, height);
2335 pkt.data.psnr.sse[0] += sse;
2336 pkt.data.psnr.sse[3] = sse;
2337 pkt.data.psnr.samples[0] += width * height;
2338 pkt.data.psnr.samples[3] = width * height;
2340 for (i = 0; i < 4; ++i) {
2341 pkt.data.psnr.psnr[i] = vpx_sse_to_psnr(pkt.data.psnr.samples[i], 255.0,
2342 (double)(pkt.data.psnr.sse[i]));
2345 vpx_codec_pkt_list_add(cpi->output_pkt_list, &pkt);
2348 int vp8_use_as_reference(VP8_COMP *cpi, int ref_frame_flags) {
2349 if (ref_frame_flags > 7) return -1;
2351 cpi->ref_frame_flags = ref_frame_flags;
2354 int vp8_update_reference(VP8_COMP *cpi, int ref_frame_flags) {
2355 if (ref_frame_flags > 7) return -1;
2357 cpi->common.refresh_golden_frame = 0;
2358 cpi->common.refresh_alt_ref_frame = 0;
2359 cpi->common.refresh_last_frame = 0;
2361 if (ref_frame_flags & VP8_LAST_FRAME) cpi->common.refresh_last_frame = 1;
2363 if (ref_frame_flags & VP8_GOLD_FRAME) cpi->common.refresh_golden_frame = 1;
2365 if (ref_frame_flags & VP8_ALTR_FRAME) cpi->common.refresh_alt_ref_frame = 1;
2370 int vp8_get_reference(VP8_COMP *cpi, enum vpx_ref_frame_type ref_frame_flag,
2371 YV12_BUFFER_CONFIG *sd) {
2372 VP8_COMMON *cm = &cpi->common;
2375 if (ref_frame_flag == VP8_LAST_FRAME) {
2376 ref_fb_idx = cm->lst_fb_idx;
2377 } else if (ref_frame_flag == VP8_GOLD_FRAME) {
2378 ref_fb_idx = cm->gld_fb_idx;
2379 } else if (ref_frame_flag == VP8_ALTR_FRAME) {
2380 ref_fb_idx = cm->alt_fb_idx;
2385 vp8_yv12_copy_frame(&cm->yv12_fb[ref_fb_idx], sd);
2389 int vp8_set_reference(VP8_COMP *cpi, enum vpx_ref_frame_type ref_frame_flag,
2390 YV12_BUFFER_CONFIG *sd) {
2391 VP8_COMMON *cm = &cpi->common;
2395 if (ref_frame_flag == VP8_LAST_FRAME) {
2396 ref_fb_idx = cm->lst_fb_idx;
2397 } else if (ref_frame_flag == VP8_GOLD_FRAME) {
2398 ref_fb_idx = cm->gld_fb_idx;
2399 } else if (ref_frame_flag == VP8_ALTR_FRAME) {
2400 ref_fb_idx = cm->alt_fb_idx;
2405 vp8_yv12_copy_frame(sd, &cm->yv12_fb[ref_fb_idx]);
2409 int vp8_update_entropy(VP8_COMP *cpi, int update) {
2410 VP8_COMMON *cm = &cpi->common;
2411 cm->refresh_entropy_probs = update;
2416 #if defined(OUTPUT_YUV_SRC) || defined(OUTPUT_YUV_DENOISED)
2417 void vp8_write_yuv_frame(FILE *yuv_file, YV12_BUFFER_CONFIG *s) {
2418 unsigned char *src = s->y_buffer;
2419 int h = s->y_height;
2422 fwrite(src, s->y_width, 1, yuv_file);
2430 fwrite(src, s->uv_width, 1, yuv_file);
2431 src += s->uv_stride;
2438 fwrite(src, s->uv_width, 1, yuv_file);
2439 src += s->uv_stride;
2444 static void scale_and_extend_source(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) {
2445 VP8_COMMON *cm = &cpi->common;
2447 /* are we resizing the image */
2448 if (cm->horiz_scale != 0 || cm->vert_scale != 0) {
2449 #if CONFIG_SPATIAL_RESAMPLING
2450 int UNINITIALIZED_IS_SAFE(hr), UNINITIALIZED_IS_SAFE(hs);
2451 int UNINITIALIZED_IS_SAFE(vr), UNINITIALIZED_IS_SAFE(vs);
2454 if (cm->vert_scale == 3) {
2460 Scale2Ratio(cm->horiz_scale, &hr, &hs);
2461 Scale2Ratio(cm->vert_scale, &vr, &vs);
2463 vpx_scale_frame(sd, &cpi->scaled_source, cm->temp_scale_frame.y_buffer,
2464 tmp_height, hs, hr, vs, vr, 0);
2466 vp8_yv12_extend_frame_borders(&cpi->scaled_source);
2467 cpi->Source = &cpi->scaled_source;
2474 static int resize_key_frame(VP8_COMP *cpi) {
2475 #if CONFIG_SPATIAL_RESAMPLING
2476 VP8_COMMON *cm = &cpi->common;
2478 /* Do we need to apply resampling for one pass cbr.
2479 * In one pass this is more limited than in two pass cbr.
2480 * The test and any change is only made once per key frame sequence.
2482 if (cpi->oxcf.allow_spatial_resampling &&
2483 (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)) {
2484 int UNINITIALIZED_IS_SAFE(hr), UNINITIALIZED_IS_SAFE(hs);
2485 int UNINITIALIZED_IS_SAFE(vr), UNINITIALIZED_IS_SAFE(vs);
2486 int new_width, new_height;
2488 /* If we are below the resample DOWN watermark then scale down a
2491 if (cpi->buffer_level < (cpi->oxcf.resample_down_water_mark *
2492 cpi->oxcf.optimal_buffer_level / 100)) {
2494 (cm->horiz_scale < ONETWO) ? cm->horiz_scale + 1 : ONETWO;
2495 cm->vert_scale = (cm->vert_scale < ONETWO) ? cm->vert_scale + 1 : ONETWO;
2497 /* Should we now start scaling back up */
2498 else if (cpi->buffer_level > (cpi->oxcf.resample_up_water_mark *
2499 cpi->oxcf.optimal_buffer_level / 100)) {
2501 (cm->horiz_scale > NORMAL) ? cm->horiz_scale - 1 : NORMAL;
2502 cm->vert_scale = (cm->vert_scale > NORMAL) ? cm->vert_scale - 1 : NORMAL;
2505 /* Get the new height and width */
2506 Scale2Ratio(cm->horiz_scale, &hr, &hs);
2507 Scale2Ratio(cm->vert_scale, &vr, &vs);
2508 new_width = ((hs - 1) + (cpi->oxcf.Width * hr)) / hs;
2509 new_height = ((vs - 1) + (cpi->oxcf.Height * vr)) / vs;
2511 /* If the image size has changed we need to reallocate the buffers
2512 * and resample the source image
2514 if ((cm->Width != new_width) || (cm->Height != new_height)) {
2515 cm->Width = new_width;
2516 cm->Height = new_height;
2517 vp8_alloc_compressor_data(cpi);
2518 scale_and_extend_source(cpi->un_scaled_source, cpi);
2527 static void update_alt_ref_frame_stats(VP8_COMP *cpi) {
2528 VP8_COMMON *cm = &cpi->common;
2530 /* Select an interval before next GF or altref */
2531 if (!cpi->auto_gold) cpi->frames_till_gf_update_due = DEFAULT_GF_INTERVAL;
2533 if ((cpi->pass != 2) && cpi->frames_till_gf_update_due) {
2534 cpi->current_gf_interval = cpi->frames_till_gf_update_due;
2536 /* Set the bits per frame that we should try and recover in
2537 * subsequent inter frames to account for the extra GF spend...
2538 * note that his does not apply for GF updates that occur
2539 * coincident with a key frame as the extra cost of key frames is
2540 * dealt with elsewhere.
2542 cpi->gf_overspend_bits += cpi->projected_frame_size;
2543 cpi->non_gf_bitrate_adjustment =
2544 cpi->gf_overspend_bits / cpi->frames_till_gf_update_due;
2547 /* Update data structure that monitors level of reference to last GF */
2548 memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols));
2549 cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
2551 /* this frame refreshes means next frames don't unless specified by user */
2552 cpi->frames_since_golden = 0;
2554 /* Clear the alternate reference update pending flag. */
2555 cpi->source_alt_ref_pending = 0;
2557 /* Set the alternate reference frame active flag */
2558 cpi->source_alt_ref_active = 1;
2560 static void update_golden_frame_stats(VP8_COMP *cpi) {
2561 VP8_COMMON *cm = &cpi->common;
2563 /* Update the Golden frame usage counts. */
2564 if (cm->refresh_golden_frame) {
2565 /* Select an interval before next GF */
2566 if (!cpi->auto_gold) cpi->frames_till_gf_update_due = DEFAULT_GF_INTERVAL;
2568 if ((cpi->pass != 2) && (cpi->frames_till_gf_update_due > 0)) {
2569 cpi->current_gf_interval = cpi->frames_till_gf_update_due;
2571 /* Set the bits per frame that we should try and recover in
2572 * subsequent inter frames to account for the extra GF spend...
2573 * note that his does not apply for GF updates that occur
2574 * coincident with a key frame as the extra cost of key frames
2575 * is dealt with elsewhere.
2577 if ((cm->frame_type != KEY_FRAME) && !cpi->source_alt_ref_active) {
2578 /* Calcluate GF bits to be recovered
2579 * Projected size - av frame bits available for inter
2580 * frames for clip as a whole
2582 cpi->gf_overspend_bits +=
2583 (cpi->projected_frame_size - cpi->inter_frame_target);
2586 cpi->non_gf_bitrate_adjustment =
2587 cpi->gf_overspend_bits / cpi->frames_till_gf_update_due;
2590 /* Update data structure that monitors level of reference to last GF */
2591 memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols));
2592 cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
2594 /* this frame refreshes means next frames don't unless specified by
2597 cm->refresh_golden_frame = 0;
2598 cpi->frames_since_golden = 0;
2600 cpi->recent_ref_frame_usage[INTRA_FRAME] = 1;
2601 cpi->recent_ref_frame_usage[LAST_FRAME] = 1;
2602 cpi->recent_ref_frame_usage[GOLDEN_FRAME] = 1;
2603 cpi->recent_ref_frame_usage[ALTREF_FRAME] = 1;
2605 /* ******** Fixed Q test code only ************ */
2606 /* If we are going to use the ALT reference for the next group of
2607 * frames set a flag to say so.
2609 if (cpi->oxcf.fixed_q >= 0 && cpi->oxcf.play_alternate &&
2610 !cpi->common.refresh_alt_ref_frame) {
2611 cpi->source_alt_ref_pending = 1;
2612 cpi->frames_till_gf_update_due = cpi->baseline_gf_interval;
2615 if (!cpi->source_alt_ref_pending) cpi->source_alt_ref_active = 0;
2617 /* Decrement count down till next gf */
2618 if (cpi->frames_till_gf_update_due > 0) cpi->frames_till_gf_update_due--;
2620 } else if (!cpi->common.refresh_alt_ref_frame) {
2621 /* Decrement count down till next gf */
2622 if (cpi->frames_till_gf_update_due > 0) cpi->frames_till_gf_update_due--;
2624 if (cpi->frames_till_alt_ref_frame) cpi->frames_till_alt_ref_frame--;
2626 cpi->frames_since_golden++;
2628 if (cpi->frames_since_golden > 1) {
2629 cpi->recent_ref_frame_usage[INTRA_FRAME] +=
2630 cpi->mb.count_mb_ref_frame_usage[INTRA_FRAME];
2631 cpi->recent_ref_frame_usage[LAST_FRAME] +=
2632 cpi->mb.count_mb_ref_frame_usage[LAST_FRAME];
2633 cpi->recent_ref_frame_usage[GOLDEN_FRAME] +=
2634 cpi->mb.count_mb_ref_frame_usage[GOLDEN_FRAME];
2635 cpi->recent_ref_frame_usage[ALTREF_FRAME] +=
2636 cpi->mb.count_mb_ref_frame_usage[ALTREF_FRAME];
2641 /* This function updates the reference frame probability estimates that
2642 * will be used during mode selection
2644 static void update_rd_ref_frame_probs(VP8_COMP *cpi) {
2645 VP8_COMMON *cm = &cpi->common;
2647 const int *const rfct = cpi->mb.count_mb_ref_frame_usage;
2648 const int rf_intra = rfct[INTRA_FRAME];
2649 const int rf_inter =
2650 rfct[LAST_FRAME] + rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME];
2652 if (cm->frame_type == KEY_FRAME) {
2653 cpi->prob_intra_coded = 255;
2654 cpi->prob_last_coded = 128;
2655 cpi->prob_gf_coded = 128;
2656 } else if (!(rf_intra + rf_inter)) {
2657 cpi->prob_intra_coded = 63;
2658 cpi->prob_last_coded = 128;
2659 cpi->prob_gf_coded = 128;
2662 /* update reference frame costs since we can do better than what we got
2665 if (cpi->oxcf.number_of_layers == 1) {
2666 if (cpi->common.refresh_alt_ref_frame) {
2667 cpi->prob_intra_coded += 40;
2668 if (cpi->prob_intra_coded > 255) cpi->prob_intra_coded = 255;
2669 cpi->prob_last_coded = 200;
2670 cpi->prob_gf_coded = 1;
2671 } else if (cpi->frames_since_golden == 0) {
2672 cpi->prob_last_coded = 214;
2673 } else if (cpi->frames_since_golden == 1) {
2674 cpi->prob_last_coded = 192;
2675 cpi->prob_gf_coded = 220;
2676 } else if (cpi->source_alt_ref_active) {
2677 cpi->prob_gf_coded -= 20;
2679 if (cpi->prob_gf_coded < 10) cpi->prob_gf_coded = 10;
2681 if (!cpi->source_alt_ref_active) cpi->prob_gf_coded = 255;
2685 #if !CONFIG_REALTIME_ONLY
2686 /* 1 = key, 0 = inter */
2687 static int decide_key_frame(VP8_COMP *cpi) {
2688 VP8_COMMON *cm = &cpi->common;
2690 int code_key_frame = 0;
2694 if (cpi->Speed > 11) return 0;
2696 /* Clear down mmx registers */
2697 vpx_clear_system_state();
2699 if ((cpi->compressor_speed == 2) && (cpi->Speed >= 5) && (cpi->sf.RD == 0)) {
2700 double change = 1.0 *
2701 abs((int)(cpi->mb.intra_error - cpi->last_intra_error)) /
2702 (1 + cpi->last_intra_error);
2705 abs((int)(cpi->mb.prediction_error - cpi->last_prediction_error)) /
2706 (1 + cpi->last_prediction_error);
2707 double minerror = cm->MBs * 256;
2709 cpi->last_intra_error = cpi->mb.intra_error;
2710 cpi->last_prediction_error = cpi->mb.prediction_error;
2712 if (10 * cpi->mb.intra_error / (1 + cpi->mb.prediction_error) < 15 &&
2713 cpi->mb.prediction_error > minerror &&
2714 (change > .25 || change2 > .25)) {
2715 /*(change > 1.4 || change < .75)&& cpi->this_frame_percent_intra >
2716 * cpi->last_frame_percent_intra + 3*/
2723 /* If the following are true we might as well code a key frame */
2724 if (((cpi->this_frame_percent_intra == 100) &&
2725 (cpi->this_frame_percent_intra > (cpi->last_frame_percent_intra + 2))) ||
2726 ((cpi->this_frame_percent_intra > 95) &&
2727 (cpi->this_frame_percent_intra >=
2728 (cpi->last_frame_percent_intra + 5)))) {
2731 /* in addition if the following are true and this is not a golden frame
2732 * then code a key frame Note that on golden frames there often seems
2733 * to be a pop in intra useage anyway hence this restriction is
2734 * designed to prevent spurious key frames. The Intra pop needs to be
2737 else if (((cpi->this_frame_percent_intra > 60) &&
2738 (cpi->this_frame_percent_intra >
2739 (cpi->last_frame_percent_intra * 2))) ||
2740 ((cpi->this_frame_percent_intra > 75) &&
2741 (cpi->this_frame_percent_intra >
2742 (cpi->last_frame_percent_intra * 3 / 2))) ||
2743 ((cpi->this_frame_percent_intra > 90) &&
2744 (cpi->this_frame_percent_intra >
2745 (cpi->last_frame_percent_intra + 10)))) {
2746 if (!cm->refresh_golden_frame) code_key_frame = 1;
2749 return code_key_frame;
2752 static void Pass1Encode(VP8_COMP *cpi, size_t *size, unsigned char *dest,
2753 unsigned int *frame_flags) {
2757 vp8_set_quantizer(cpi, 26);
2759 vp8_first_pass(cpi);
2764 void write_cx_frame_to_file(YV12_BUFFER_CONFIG *frame, int this_frame)
2767 /* write the frame */
2772 sprintf(filename, "cx\\y%04d.raw", this_frame);
2773 yframe = fopen(filename, "wb");
2775 for (i = 0; i < frame->y_height; ++i)
2776 fwrite(frame->y_buffer + i * frame->y_stride, frame->y_width, 1, yframe);
2779 sprintf(filename, "cx\\u%04d.raw", this_frame);
2780 yframe = fopen(filename, "wb");
2782 for (i = 0; i < frame->uv_height; ++i)
2783 fwrite(frame->u_buffer + i * frame->uv_stride, frame->uv_width, 1, yframe);
2786 sprintf(filename, "cx\\v%04d.raw", this_frame);
2787 yframe = fopen(filename, "wb");
2789 for (i = 0; i < frame->uv_height; ++i)
2790 fwrite(frame->v_buffer + i * frame->uv_stride, frame->uv_width, 1, yframe);
2795 /* return of 0 means drop frame */
2797 #if !CONFIG_REALTIME_ONLY
2798 /* Function to test for conditions that indeicate we should loop
2799 * back and recode a frame.
2801 static int recode_loop_test(VP8_COMP *cpi, int high_limit, int low_limit, int q,
2802 int maxq, int minq) {
2803 int force_recode = 0;
2804 VP8_COMMON *cm = &cpi->common;
2806 /* Is frame recode allowed at all
2807 * Yes if either recode mode 1 is selected or mode two is selcted
2808 * and the frame is a key frame. golden frame or alt_ref_frame
2810 if ((cpi->sf.recode_loop == 1) ||
2811 ((cpi->sf.recode_loop == 2) &&
2812 ((cm->frame_type == KEY_FRAME) || cm->refresh_golden_frame ||
2813 cm->refresh_alt_ref_frame))) {
2814 /* General over and under shoot tests */
2815 if (((cpi->projected_frame_size > high_limit) && (q < maxq)) ||
2816 ((cpi->projected_frame_size < low_limit) && (q > minq))) {
2819 /* Special Constrained quality tests */
2820 else if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) {
2821 /* Undershoot and below auto cq level */
2822 if ((q > cpi->cq_target_quality) &&
2823 (cpi->projected_frame_size < ((cpi->this_frame_target * 7) >> 3))) {
2826 /* Severe undershoot and between auto and user cq level */
2827 else if ((q > cpi->oxcf.cq_level) &&
2828 (cpi->projected_frame_size < cpi->min_frame_bandwidth) &&
2829 (cpi->active_best_quality > cpi->oxcf.cq_level)) {
2831 cpi->active_best_quality = cpi->oxcf.cq_level;
2836 return force_recode;
2838 #endif // !CONFIG_REALTIME_ONLY
2840 static void update_reference_frames(VP8_COMP *cpi) {
2841 VP8_COMMON *cm = &cpi->common;
2842 YV12_BUFFER_CONFIG *yv12_fb = cm->yv12_fb;
2844 /* At this point the new frame has been encoded.
2845 * If any buffer copy / swapping is signaled it should be done here.
2848 if (cm->frame_type == KEY_FRAME) {
2849 yv12_fb[cm->new_fb_idx].flags |= VP8_GOLD_FRAME | VP8_ALTR_FRAME;
2851 yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
2852 yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
2854 cm->alt_fb_idx = cm->gld_fb_idx = cm->new_fb_idx;
2856 cpi->current_ref_frames[GOLDEN_FRAME] = cm->current_video_frame;
2857 cpi->current_ref_frames[ALTREF_FRAME] = cm->current_video_frame;
2858 } else /* For non key frames */
2860 if (cm->refresh_alt_ref_frame) {
2861 assert(!cm->copy_buffer_to_arf);
2863 cm->yv12_fb[cm->new_fb_idx].flags |= VP8_ALTR_FRAME;
2864 cm->yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
2865 cm->alt_fb_idx = cm->new_fb_idx;
2867 cpi->current_ref_frames[ALTREF_FRAME] = cm->current_video_frame;
2868 } else if (cm->copy_buffer_to_arf) {
2869 assert(!(cm->copy_buffer_to_arf & ~0x3));
2871 if (cm->copy_buffer_to_arf == 1) {
2872 if (cm->alt_fb_idx != cm->lst_fb_idx) {
2873 yv12_fb[cm->lst_fb_idx].flags |= VP8_ALTR_FRAME;
2874 yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
2875 cm->alt_fb_idx = cm->lst_fb_idx;
2877 cpi->current_ref_frames[ALTREF_FRAME] =
2878 cpi->current_ref_frames[LAST_FRAME];
2880 } else /* if (cm->copy_buffer_to_arf == 2) */
2882 if (cm->alt_fb_idx != cm->gld_fb_idx) {
2883 yv12_fb[cm->gld_fb_idx].flags |= VP8_ALTR_FRAME;
2884 yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
2885 cm->alt_fb_idx = cm->gld_fb_idx;
2887 cpi->current_ref_frames[ALTREF_FRAME] =
2888 cpi->current_ref_frames[GOLDEN_FRAME];
2893 if (cm->refresh_golden_frame) {
2894 assert(!cm->copy_buffer_to_gf);
2896 cm->yv12_fb[cm->new_fb_idx].flags |= VP8_GOLD_FRAME;
2897 cm->yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
2898 cm->gld_fb_idx = cm->new_fb_idx;
2900 cpi->current_ref_frames[GOLDEN_FRAME] = cm->current_video_frame;
2901 } else if (cm->copy_buffer_to_gf) {
2902 assert(!(cm->copy_buffer_to_arf & ~0x3));
2904 if (cm->copy_buffer_to_gf == 1) {
2905 if (cm->gld_fb_idx != cm->lst_fb_idx) {
2906 yv12_fb[cm->lst_fb_idx].flags |= VP8_GOLD_FRAME;
2907 yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
2908 cm->gld_fb_idx = cm->lst_fb_idx;
2910 cpi->current_ref_frames[GOLDEN_FRAME] =
2911 cpi->current_ref_frames[LAST_FRAME];
2913 } else /* if (cm->copy_buffer_to_gf == 2) */
2915 if (cm->alt_fb_idx != cm->gld_fb_idx) {
2916 yv12_fb[cm->alt_fb_idx].flags |= VP8_GOLD_FRAME;
2917 yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
2918 cm->gld_fb_idx = cm->alt_fb_idx;
2920 cpi->current_ref_frames[GOLDEN_FRAME] =
2921 cpi->current_ref_frames[ALTREF_FRAME];
2927 if (cm->refresh_last_frame) {
2928 cm->yv12_fb[cm->new_fb_idx].flags |= VP8_LAST_FRAME;
2929 cm->yv12_fb[cm->lst_fb_idx].flags &= ~VP8_LAST_FRAME;
2930 cm->lst_fb_idx = cm->new_fb_idx;
2932 cpi->current_ref_frames[LAST_FRAME] = cm->current_video_frame;
2935 #if CONFIG_TEMPORAL_DENOISING
2936 if (cpi->oxcf.noise_sensitivity) {
2937 /* we shouldn't have to keep multiple copies as we know in advance which
2938 * buffer we should start - for now to get something up and running
2939 * I've chosen to copy the buffers
2941 if (cm->frame_type == KEY_FRAME) {
2943 for (i = LAST_FRAME; i < MAX_REF_FRAMES; ++i)
2944 vp8_yv12_copy_frame(cpi->Source, &cpi->denoiser.yv12_running_avg[i]);
2945 } else /* For non key frames */
2947 vp8_yv12_extend_frame_borders(
2948 &cpi->denoiser.yv12_running_avg[INTRA_FRAME]);
2950 if (cm->refresh_alt_ref_frame || cm->copy_buffer_to_arf) {
2951 vp8_yv12_copy_frame(&cpi->denoiser.yv12_running_avg[INTRA_FRAME],
2952 &cpi->denoiser.yv12_running_avg[ALTREF_FRAME]);
2954 if (cm->refresh_golden_frame || cm->copy_buffer_to_gf) {
2955 vp8_yv12_copy_frame(&cpi->denoiser.yv12_running_avg[INTRA_FRAME],
2956 &cpi->denoiser.yv12_running_avg[GOLDEN_FRAME]);
2958 if (cm->refresh_last_frame) {
2959 vp8_yv12_copy_frame(&cpi->denoiser.yv12_running_avg[INTRA_FRAME],
2960 &cpi->denoiser.yv12_running_avg[LAST_FRAME]);
2963 if (cpi->oxcf.noise_sensitivity == 4)
2964 vp8_yv12_copy_frame(cpi->Source, &cpi->denoiser.yv12_last_source);
2969 static int measure_square_diff_partial(YV12_BUFFER_CONFIG *source,
2970 YV12_BUFFER_CONFIG *dest,
2976 int min_consec_zero_last = 10;
2977 int tot_num_blocks = (source->y_height * source->y_width) >> 8;
2978 unsigned char *src = source->y_buffer;
2979 unsigned char *dst = dest->y_buffer;
2981 /* Loop through the Y plane, every |skip| blocks along rows and colmumns,
2982 * summing the square differences, and only for blocks that have been
2983 * zero_last mode at least |x| frames in a row.
2985 for (i = 0; i < source->y_height; i += 16 * skip) {
2986 int block_index_row = (i >> 4) * cpi->common.mb_cols;
2987 for (j = 0; j < source->y_width; j += 16 * skip) {
2988 int index = block_index_row + (j >> 4);
2989 if (cpi->consec_zero_last[index] >= min_consec_zero_last) {
2991 Total += vpx_mse16x16(src + j, source->y_stride, dst + j,
2992 dest->y_stride, &sse);
2996 src += 16 * skip * source->y_stride;
2997 dst += 16 * skip * dest->y_stride;
2999 // Only return non-zero if we have at least ~1/16 samples for estimate.
3000 if (num_blocks > (tot_num_blocks >> 4)) {
3001 return (Total / num_blocks);
3007 #if CONFIG_TEMPORAL_DENOISING
3008 static void process_denoiser_mode_change(VP8_COMP *cpi) {
3009 const VP8_COMMON *const cm = &cpi->common;
3013 // Number of blocks skipped along row/column in computing the
3014 // nmse (normalized mean square error) of source.
3016 // Only select blocks for computing nmse that have been encoded
3017 // as ZERO LAST min_consec_zero_last frames in a row.
3018 // Scale with number of temporal layers.
3019 int min_consec_zero_last = 12 / cpi->oxcf.number_of_layers;
3020 // Decision is tested for changing the denoising mode every
3021 // num_mode_change times this function is called. Note that this
3022 // function called every 8 frames, so (8 * num_mode_change) is number
3023 // of frames where denoising mode change is tested for switch.
3024 int num_mode_change = 20;
3025 // Framerate factor, to compensate for larger mse at lower framerates.
3026 // Use ref_framerate, which is full source framerate for temporal layers.
3027 // TODO(marpan): Adjust this factor.
3028 int fac_framerate = cpi->ref_framerate < 25.0f ? 80 : 100;
3029 int tot_num_blocks = cm->mb_rows * cm->mb_cols;
3030 int ystride = cpi->Source->y_stride;
3031 unsigned char *src = cpi->Source->y_buffer;
3032 unsigned char *dst = cpi->denoiser.yv12_last_source.y_buffer;
3033 static const unsigned char const_source[16] = { 128, 128, 128, 128, 128, 128,
3034 128, 128, 128, 128, 128, 128,
3035 128, 128, 128, 128 };
3036 int bandwidth = (int)(cpi->target_bandwidth);
3037 // For temporal layers, use full bandwidth (top layer).
3038 if (cpi->oxcf.number_of_layers > 1) {
3039 LAYER_CONTEXT *lc = &cpi->layer_context[cpi->oxcf.number_of_layers - 1];
3040 bandwidth = (int)(lc->target_bandwidth);
3042 // Loop through the Y plane, every skip blocks along rows and columns,
3043 // summing the normalized mean square error, only for blocks that have
3044 // been encoded as ZEROMV LAST at least min_consec_zero_last least frames in
3045 // a row and have small sum difference between current and previous frame.
3046 // Normalization here is by the contrast of the current frame block.
3047 for (i = 0; i < cm->Height; i += 16 * skip) {
3048 int block_index_row = (i >> 4) * cm->mb_cols;
3049 for (j = 0; j < cm->Width; j += 16 * skip) {
3050 int index = block_index_row + (j >> 4);
3051 if (cpi->consec_zero_last[index] >= min_consec_zero_last) {
3053 const unsigned int var =
3054 vpx_variance16x16(src + j, ystride, dst + j, ystride, &sse);
3055 // Only consider this block as valid for noise measurement
3056 // if the sum_diff average of the current and previous frame
3057 // is small (to avoid effects from lighting change).
3058 if ((sse - var) < 128) {
3060 const unsigned int act =
3061 vpx_variance16x16(src + j, ystride, const_source, 0, &sse2);
3062 if (act > 0) total += sse / act;
3067 src += 16 * skip * ystride;
3068 dst += 16 * skip * ystride;
3070 total = total * fac_framerate / 100;
3072 // Only consider this frame as valid sample if we have computed nmse over
3073 // at least ~1/16 blocks, and Total > 0 (Total == 0 can happen if the
3074 // application inputs duplicate frames, or contrast is all zero).
3075 if (total > 0 && (num_blocks > (tot_num_blocks >> 4))) {
3076 // Update the recursive mean square source_diff.
3077 total = (total << 8) / num_blocks;
3078 if (cpi->denoiser.nmse_source_diff_count == 0) {
3079 // First sample in new interval.
3080 cpi->denoiser.nmse_source_diff = total;
3081 cpi->denoiser.qp_avg = cm->base_qindex;
3083 // For subsequent samples, use average with weight ~1/4 for new sample.
3084 cpi->denoiser.nmse_source_diff =
3085 (int)((total + 3 * cpi->denoiser.nmse_source_diff) >> 2);
3086 cpi->denoiser.qp_avg =
3087 (int)((cm->base_qindex + 3 * cpi->denoiser.qp_avg) >> 2);
3089 cpi->denoiser.nmse_source_diff_count++;
3091 // Check for changing the denoiser mode, when we have obtained #samples =
3092 // num_mode_change. Condition the change also on the bitrate and QP.
3093 if (cpi->denoiser.nmse_source_diff_count == num_mode_change) {
3094 // Check for going up: from normal to aggressive mode.
3095 if ((cpi->denoiser.denoiser_mode == kDenoiserOnYUV) &&
3096 (cpi->denoiser.nmse_source_diff >
3097 cpi->denoiser.threshold_aggressive_mode) &&
3098 (cpi->denoiser.qp_avg < cpi->denoiser.qp_threshold_up &&
3099 bandwidth > cpi->denoiser.bitrate_threshold)) {
3100 vp8_denoiser_set_parameters(&cpi->denoiser, kDenoiserOnYUVAggressive);
3102 // Check for going down: from aggressive to normal mode.
3103 if (((cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive) &&
3104 (cpi->denoiser.nmse_source_diff <
3105 cpi->denoiser.threshold_aggressive_mode)) ||
3106 ((cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive) &&
3107 (cpi->denoiser.qp_avg > cpi->denoiser.qp_threshold_down ||
3108 bandwidth < cpi->denoiser.bitrate_threshold))) {
3109 vp8_denoiser_set_parameters(&cpi->denoiser, kDenoiserOnYUV);
3112 // Reset metric and counter for next interval.
3113 cpi->denoiser.nmse_source_diff = 0;
3114 cpi->denoiser.qp_avg = 0;
3115 cpi->denoiser.nmse_source_diff_count = 0;
3120 void vp8_loopfilter_frame(VP8_COMP *cpi, VP8_COMMON *cm) {
3121 const FRAME_TYPE frame_type = cm->frame_type;
3123 int update_any_ref_buffers = 1;
3124 if (cpi->common.refresh_last_frame == 0 &&
3125 cpi->common.refresh_golden_frame == 0 &&
3126 cpi->common.refresh_alt_ref_frame == 0) {
3127 update_any_ref_buffers = 0;
3131 cm->filter_level = 0;
3133 struct vpx_usec_timer timer;
3135 vpx_clear_system_state();
3137 vpx_usec_timer_start(&timer);
3138 if (cpi->sf.auto_filter == 0) {
3139 #if CONFIG_TEMPORAL_DENOISING
3140 if (cpi->oxcf.noise_sensitivity && cm->frame_type != KEY_FRAME) {
3141 // Use the denoised buffer for selecting base loop filter level.
3142 // Denoised signal for current frame is stored in INTRA_FRAME.
3143 // No denoising on key frames.
3144 vp8cx_pick_filter_level_fast(
3145 &cpi->denoiser.yv12_running_avg[INTRA_FRAME], cpi);
3147 vp8cx_pick_filter_level_fast(cpi->Source, cpi);
3150 vp8cx_pick_filter_level_fast(cpi->Source, cpi);
3153 #if CONFIG_TEMPORAL_DENOISING
3154 if (cpi->oxcf.noise_sensitivity && cm->frame_type != KEY_FRAME) {
3155 // Use the denoised buffer for selecting base loop filter level.
3156 // Denoised signal for current frame is stored in INTRA_FRAME.
3157 // No denoising on key frames.
3158 vp8cx_pick_filter_level(&cpi->denoiser.yv12_running_avg[INTRA_FRAME],
3161 vp8cx_pick_filter_level(cpi->Source, cpi);
3164 vp8cx_pick_filter_level(cpi->Source, cpi);
3168 if (cm->filter_level > 0) {
3169 vp8cx_set_alt_lf_level(cpi, cm->filter_level);
3172 vpx_usec_timer_mark(&timer);
3173 cpi->time_pick_lpf += vpx_usec_timer_elapsed(&timer);
3176 #if CONFIG_MULTITHREAD
3177 if (cpi->b_multi_threaded) {
3178 sem_post(&cpi->h_event_end_lpf); /* signal that we have set filter_level */
3182 // No need to apply loop-filter if the encoded frame does not update
3183 // any reference buffers.
3184 if (cm->filter_level > 0 && update_any_ref_buffers) {
3185 vp8_loop_filter_frame(cm, &cpi->mb.e_mbd, frame_type);
3188 vp8_yv12_extend_frame_borders(cm->frame_to_show);
3191 static void encode_frame_to_data_rate(VP8_COMP *cpi, size_t *size,
3192 unsigned char *dest,
3193 unsigned char *dest_end,
3194 unsigned int *frame_flags) {
3196 int frame_over_shoot_limit;
3197 int frame_under_shoot_limit;
3202 VP8_COMMON *cm = &cpi->common;
3203 int active_worst_qchanged = 0;
3205 #if !CONFIG_REALTIME_ONLY
3209 int zbin_oq_low = 0;
3212 int overshoot_seen = 0;
3213 int undershoot_seen = 0;
3216 int drop_mark = (int)(cpi->oxcf.drop_frames_water_mark *
3217 cpi->oxcf.optimal_buffer_level / 100);
3218 int drop_mark75 = drop_mark * 2 / 3;
3219 int drop_mark50 = drop_mark / 4;
3220 int drop_mark25 = drop_mark / 8;
3222 /* Clear down mmx registers to allow floating point in what follows */
3223 vpx_clear_system_state();
3225 if (cpi->force_next_frame_intra) {
3226 cm->frame_type = KEY_FRAME; /* delayed intra frame */
3227 cpi->force_next_frame_intra = 0;
3230 /* For an alt ref frame in 2 pass we skip the call to the second pass
3231 * function that sets the target bandwidth
3233 switch (cpi->pass) {
3234 #if !CONFIG_REALTIME_ONLY
3236 if (cpi->common.refresh_alt_ref_frame) {
3237 /* Per frame bit target for the alt ref frame */
3238 cpi->per_frame_bandwidth = cpi->twopass.gf_bits;
3239 /* per second target bitrate */
3240 cpi->target_bandwidth =
3241 (int)(cpi->twopass.gf_bits * cpi->output_framerate);
3244 #endif // !CONFIG_REALTIME_ONLY
3246 cpi->per_frame_bandwidth =
3247 (int)(cpi->target_bandwidth / cpi->output_framerate);
3251 /* Default turn off buffer to buffer copying */
3252 cm->copy_buffer_to_gf = 0;
3253 cm->copy_buffer_to_arf = 0;
3255 /* Clear zbin over-quant value and mode boost values. */
3256 cpi->mb.zbin_over_quant = 0;
3257 cpi->mb.zbin_mode_boost = 0;
3259 /* Enable or disable mode based tweaking of the zbin
3260 * For 2 Pass Only used where GF/ARF prediction quality
3261 * is above a threshold
3263 cpi->mb.zbin_mode_boost_enabled = 1;
3264 if (cpi->pass == 2) {
3265 if (cpi->gfu_boost <= 400) {
3266 cpi->mb.zbin_mode_boost_enabled = 0;
3270 /* Current default encoder behaviour for the altref sign bias */
3271 if (cpi->source_alt_ref_active) {
3272 cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 1;
3274 cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 0;
3277 /* Check to see if a key frame is signaled
3278 * For two pass with auto key frame enabled cm->frame_type may already
3279 * be set, but not for one pass.
3281 if ((cm->current_video_frame == 0) || (cm->frame_flags & FRAMEFLAGS_KEY) ||
3282 (cpi->oxcf.auto_key &&
3283 (cpi->frames_since_key % cpi->key_frame_frequency == 0))) {
3284 /* Key frame from VFW/auto-keyframe/first frame */
3285 cm->frame_type = KEY_FRAME;
3286 #if CONFIG_TEMPORAL_DENOISING
3287 if (cpi->oxcf.noise_sensitivity == 4) {
3288 // For adaptive mode, reset denoiser to normal mode on key frame.
3289 vp8_denoiser_set_parameters(&cpi->denoiser, kDenoiserOnYUV);
3294 #if CONFIG_MULTI_RES_ENCODING
3295 if (cpi->oxcf.mr_total_resolutions > 1) {
3296 LOWER_RES_FRAME_INFO *low_res_frame_info =
3297 (LOWER_RES_FRAME_INFO *)cpi->oxcf.mr_low_res_mode_info;
3299 if (cpi->oxcf.mr_encoder_id) {
3300 // TODO(marpan): This constraint shouldn't be needed, as we would like
3301 // to allow for key frame setting (forced or periodic) defined per
3302 // spatial layer. For now, keep this in.
3303 cm->frame_type = low_res_frame_info->frame_type;
3305 // Check if lower resolution is available for motion vector reuse.
3306 if (cm->frame_type != KEY_FRAME) {
3307 cpi->mr_low_res_mv_avail = 1;
3308 cpi->mr_low_res_mv_avail &= !(low_res_frame_info->is_frame_dropped);
3310 if (cpi->ref_frame_flags & VP8_LAST_FRAME)
3311 cpi->mr_low_res_mv_avail &=
3312 (cpi->current_ref_frames[LAST_FRAME] ==
3313 low_res_frame_info->low_res_ref_frames[LAST_FRAME]);
3315 if (cpi->ref_frame_flags & VP8_GOLD_FRAME)
3316 cpi->mr_low_res_mv_avail &=
3317 (cpi->current_ref_frames[GOLDEN_FRAME] ==
3318 low_res_frame_info->low_res_ref_frames[GOLDEN_FRAME]);
3320 // Don't use altref to determine whether low res is available.
3321 // TODO (marpan): Should we make this type of condition on a
3322 // per-reference frame basis?
3324 if (cpi->ref_frame_flags & VP8_ALTR_FRAME)
3325 cpi->mr_low_res_mv_avail &= (cpi->current_ref_frames[ALTREF_FRAME]
3326 == low_res_frame_info->low_res_ref_frames[ALTREF_FRAME]);
3331 // On a key frame: For the lowest resolution, keep track of the key frame
3332 // counter value. For the higher resolutions, reset the current video
3333 // frame counter to that of the lowest resolution.
3334 // This is done to the handle the case where we may stop/start encoding
3335 // higher layer(s). The restart-encoding of higher layer is only signaled
3336 // by a key frame for now.
3337 // TODO (marpan): Add flag to indicate restart-encoding of higher layer.
3338 if (cm->frame_type == KEY_FRAME) {
3339 if (cpi->oxcf.mr_encoder_id) {
3340 // If the initial starting value of the buffer level is zero (this can
3341 // happen because we may have not started encoding this higher stream),
3342 // then reset it to non-zero value based on |starting_buffer_level|.
3343 if (cpi->common.current_video_frame == 0 && cpi->buffer_level == 0) {
3345 cpi->bits_off_target = cpi->oxcf.starting_buffer_level;
3346 cpi->buffer_level = cpi->oxcf.starting_buffer_level;
3347 for (i = 0; i < cpi->oxcf.number_of_layers; ++i) {
3348 LAYER_CONTEXT *lc = &cpi->layer_context[i];
3349 lc->bits_off_target = lc->starting_buffer_level;
3350 lc->buffer_level = lc->starting_buffer_level;
3353 cpi->common.current_video_frame =
3354 low_res_frame_info->key_frame_counter_value;
3356 low_res_frame_info->key_frame_counter_value =
3357 cpi->common.current_video_frame;
3363 // Find the reference frame closest to the current frame.
3364 cpi->closest_reference_frame = LAST_FRAME;
3365 if (cm->frame_type != KEY_FRAME) {
3367 MV_REFERENCE_FRAME closest_ref = INTRA_FRAME;
3368 if (cpi->ref_frame_flags & VP8_LAST_FRAME) {
3369 closest_ref = LAST_FRAME;
3370 } else if (cpi->ref_frame_flags & VP8_GOLD_FRAME) {
3371 closest_ref = GOLDEN_FRAME;
3372 } else if (cpi->ref_frame_flags & VP8_ALTR_FRAME) {
3373 closest_ref = ALTREF_FRAME;
3375 for (i = 1; i <= 3; ++i) {
3376 vpx_ref_frame_type_t ref_frame_type =
3377 (vpx_ref_frame_type_t)((i == 3) ? 4 : i);
3378 if (cpi->ref_frame_flags & ref_frame_type) {
3379 if ((cm->current_video_frame - cpi->current_ref_frames[i]) <
3380 (cm->current_video_frame - cpi->current_ref_frames[closest_ref])) {
3385 cpi->closest_reference_frame = closest_ref;
3388 /* Set various flags etc to special state if it is a key frame */
3389 if (cm->frame_type == KEY_FRAME) {
3392 // Set the loop filter deltas and segmentation map update
3393 setup_features(cpi);
3395 /* The alternate reference frame cannot be active for a key frame */
3396 cpi->source_alt_ref_active = 0;
3398 /* Reset the RD threshold multipliers to default of * 1 (128) */
3399 for (i = 0; i < MAX_MODES; ++i) {
3400 cpi->mb.rd_thresh_mult[i] = 128;
3403 // Reset the zero_last counter to 0 on key frame.
3404 memset(cpi->consec_zero_last, 0, cm->mb_rows * cm->mb_cols);
3405 memset(cpi->consec_zero_last_mvbias, 0,
3406 (cpi->common.mb_rows * cpi->common.mb_cols));
3410 /* Experimental code for lagged compress and one pass
3411 * Initialise one_pass GF frames stats
3412 * Update stats used for GF selection
3415 cpi->one_pass_frame_index = cm->current_video_frame % MAX_LAG_BUFFERS;
3417 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frames_so_far = 0;
3418 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_intra_error = 0.0;
3419 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_coded_error = 0.0;
3420 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_pcnt_inter = 0.0;
3421 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_pcnt_motion = 0.0;
3422 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvr = 0.0;
3423 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvr_abs = 0.0;
3424 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvc = 0.0;
3425 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvc_abs = 0.0;
3429 update_rd_ref_frame_probs(cpi);
3431 if (cpi->drop_frames_allowed) {
3432 /* The reset to decimation 0 is only done here for one pass.
3433 * Once it is set two pass leaves decimation on till the next kf.
3435 if ((cpi->buffer_level > drop_mark) && (cpi->decimation_factor > 0)) {
3436 cpi->decimation_factor--;
3439 if (cpi->buffer_level > drop_mark75 && cpi->decimation_factor > 0) {
3440 cpi->decimation_factor = 1;
3442 } else if (cpi->buffer_level < drop_mark25 &&
3443 (cpi->decimation_factor == 2 || cpi->decimation_factor == 3)) {
3444 cpi->decimation_factor = 3;
3445 } else if (cpi->buffer_level < drop_mark50 &&
3446 (cpi->decimation_factor == 1 || cpi->decimation_factor == 2)) {
3447 cpi->decimation_factor = 2;
3448 } else if (cpi->buffer_level < drop_mark75 &&
3449 (cpi->decimation_factor == 0 || cpi->decimation_factor == 1)) {
3450 cpi->decimation_factor = 1;
3454 /* The following decimates the frame rate according to a regular
3455 * pattern (i.e. to 1/2 or 2/3 frame rate) This can be used to help
3456 * prevent buffer under-run in CBR mode. Alternatively it might be
3457 * desirable in some situations to drop frame rate but throw more bits
3460 * Note that dropping a key frame can be problematic if spatial
3461 * resampling is also active
3463 if (cpi->decimation_factor > 0) {
3464 switch (cpi->decimation_factor) {
3466 cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 3 / 2;
3469 cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 5 / 4;
3472 cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 5 / 4;
3476 /* Note that we should not throw out a key frame (especially when
3477 * spatial resampling is enabled).
3479 if (cm->frame_type == KEY_FRAME) {
3480 cpi->decimation_count = cpi->decimation_factor;
3481 } else if (cpi->decimation_count > 0) {
3482 cpi->decimation_count--;
3484 cpi->bits_off_target += cpi->av_per_frame_bandwidth;
3485 if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size) {
3486 cpi->bits_off_target = cpi->oxcf.maximum_buffer_size;
3489 #if CONFIG_MULTI_RES_ENCODING
3490 vp8_store_drop_frame_info(cpi);
3493 cm->current_video_frame++;
3494 cpi->frames_since_key++;
3495 // We advance the temporal pattern for dropped frames.
3496 cpi->temporal_pattern_counter++;
3498 #if CONFIG_INTERNAL_STATS
3502 cpi->buffer_level = cpi->bits_off_target;
3504 if (cpi->oxcf.number_of_layers > 1) {
3507 /* Propagate bits saved by dropping the frame to higher
3510 for (i = cpi->current_layer + 1; i < cpi->oxcf.number_of_layers; ++i) {
3511 LAYER_CONTEXT *lc = &cpi->layer_context[i];
3512 lc->bits_off_target += (int)(lc->target_bandwidth / lc->framerate);
3513 if (lc->bits_off_target > lc->maximum_buffer_size) {
3514 lc->bits_off_target = lc->maximum_buffer_size;
3516 lc->buffer_level = lc->bits_off_target;
3522 cpi->decimation_count = cpi->decimation_factor;
3525 cpi->decimation_count = 0;
3528 /* Decide how big to make the frame */
3529 if (!vp8_pick_frame_size(cpi)) {
3530 /*TODO: 2 drop_frame and return code could be put together. */
3531 #if CONFIG_MULTI_RES_ENCODING
3532 vp8_store_drop_frame_info(cpi);
3534 cm->current_video_frame++;
3535 cpi->frames_since_key++;
3536 // We advance the temporal pattern for dropped frames.
3537 cpi->temporal_pattern_counter++;
3541 /* Reduce active_worst_allowed_q for CBR if our buffer is getting too full.
3542 * This has a knock on effect on active best quality as well.
3543 * For CBR if the buffer reaches its maximum level then we can no longer
3544 * save up bits for later frames so we might as well use them up
3545 * on the current frame.
3547 if ((cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) &&
3548 (cpi->buffer_level >= cpi->oxcf.optimal_buffer_level) &&
3549 cpi->buffered_mode) {
3550 /* Max adjustment is 1/4 */
3551 int Adjustment = cpi->active_worst_quality / 4;
3556 if (cpi->buffer_level < cpi->oxcf.maximum_buffer_size) {
3557 buff_lvl_step = (int)((cpi->oxcf.maximum_buffer_size -
3558 cpi->oxcf.optimal_buffer_level) /
3561 if (buff_lvl_step) {
3563 (int)((cpi->buffer_level - cpi->oxcf.optimal_buffer_level) /
3570 cpi->active_worst_quality -= Adjustment;
3572 if (cpi->active_worst_quality < cpi->active_best_quality) {
3573 cpi->active_worst_quality = cpi->active_best_quality;
3578 /* Set an active best quality and if necessary active worst quality
3579 * There is some odd behavior for one pass here that needs attention.
3581 if ((cpi->pass == 2) || (cpi->ni_frames > 150)) {
3582 vpx_clear_system_state();
3584 Q = cpi->active_worst_quality;
3586 if (cm->frame_type == KEY_FRAME) {
3587 if (cpi->pass == 2) {
3588 if (cpi->gfu_boost > 600) {
3589 cpi->active_best_quality = kf_low_motion_minq[Q];
3591 cpi->active_best_quality = kf_high_motion_minq[Q];
3594 /* Special case for key frames forced because we have reached
3595 * the maximum key frame interval. Here force the Q to a range
3596 * based on the ambient Q to reduce the risk of popping
3598 if (cpi->this_key_frame_forced) {
3599 if (cpi->active_best_quality > cpi->avg_frame_qindex * 7 / 8) {
3600 cpi->active_best_quality = cpi->avg_frame_qindex * 7 / 8;
3601 } else if (cpi->active_best_quality<cpi->avg_frame_qindex>> 2) {
3602 cpi->active_best_quality = cpi->avg_frame_qindex >> 2;
3606 /* One pass more conservative */
3608 cpi->active_best_quality = kf_high_motion_minq[Q];
3612 else if (cpi->oxcf.number_of_layers == 1 &&
3613 (cm->refresh_golden_frame || cpi->common.refresh_alt_ref_frame)) {
3614 /* Use the lower of cpi->active_worst_quality and recent
3615 * average Q as basis for GF/ARF Q limit unless last frame was
3618 if ((cpi->frames_since_key > 1) &&
3619 (cpi->avg_frame_qindex < cpi->active_worst_quality)) {
3620 Q = cpi->avg_frame_qindex;
3623 /* For constrained quality dont allow Q less than the cq level */
3624 if ((cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
3625 (Q < cpi->cq_target_quality)) {
3626 Q = cpi->cq_target_quality;
3629 if (cpi->pass == 2) {
3630 if (cpi->gfu_boost > 1000) {
3631 cpi->active_best_quality = gf_low_motion_minq[Q];
3632 } else if (cpi->gfu_boost < 400) {
3633 cpi->active_best_quality = gf_high_motion_minq[Q];
3635 cpi->active_best_quality = gf_mid_motion_minq[Q];
3638 /* Constrained quality use slightly lower active best. */
3639 if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) {
3640 cpi->active_best_quality = cpi->active_best_quality * 15 / 16;
3643 /* One pass more conservative */
3645 cpi->active_best_quality = gf_high_motion_minq[Q];
3648 cpi->active_best_quality = inter_minq[Q];
3650 /* For the constant/constrained quality mode we dont want
3651 * q to fall below the cq level.
3653 if ((cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
3654 (cpi->active_best_quality < cpi->cq_target_quality)) {
3655 /* If we are strongly undershooting the target rate in the last
3656 * frames then use the user passed in cq value not the auto
3659 if (cpi->rolling_actual_bits < cpi->min_frame_bandwidth) {
3660 cpi->active_best_quality = cpi->oxcf.cq_level;
3662 cpi->active_best_quality = cpi->cq_target_quality;
3667 /* If CBR and the buffer is as full then it is reasonable to allow
3668 * higher quality on the frames to prevent bits just going to waste.
3670 if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) {
3671 /* Note that the use of >= here elliminates the risk of a devide
3672 * by 0 error in the else if clause
3674 if (cpi->buffer_level >= cpi->oxcf.maximum_buffer_size) {
3675 cpi->active_best_quality = cpi->best_quality;
3677 } else if (cpi->buffer_level > cpi->oxcf.optimal_buffer_level) {
3679 (int)(((cpi->buffer_level - cpi->oxcf.optimal_buffer_level) * 128) /
3680 (cpi->oxcf.maximum_buffer_size -
3681 cpi->oxcf.optimal_buffer_level));
3682 int min_qadjustment =
3683 ((cpi->active_best_quality - cpi->best_quality) * Fraction) / 128;
3685 cpi->active_best_quality -= min_qadjustment;
3689 /* Make sure constrained quality mode limits are adhered to for the first
3690 * few frames of one pass encodes
3692 else if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) {
3693 if ((cm->frame_type == KEY_FRAME) || cm->refresh_golden_frame ||
3694 cpi->common.refresh_alt_ref_frame) {
3695 cpi->active_best_quality = cpi->best_quality;
3696 } else if (cpi->active_best_quality < cpi->cq_target_quality) {
3697 cpi->active_best_quality = cpi->cq_target_quality;
3701 /* Clip the active best and worst quality values to limits */
3702 if (cpi->active_worst_quality > cpi->worst_quality) {
3703 cpi->active_worst_quality = cpi->worst_quality;
3706 if (cpi->active_best_quality < cpi->best_quality) {
3707 cpi->active_best_quality = cpi->best_quality;
3710 if (cpi->active_worst_quality < cpi->active_best_quality) {
3711 cpi->active_worst_quality = cpi->active_best_quality;
3714 /* Determine initial Q to try */
3715 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
3717 #if !CONFIG_REALTIME_ONLY
3719 /* Set highest allowed value for Zbin over quant */
3720 if (cm->frame_type == KEY_FRAME) {
3722 } else if ((cpi->oxcf.number_of_layers == 1) &&
3723 ((cm->refresh_alt_ref_frame ||
3724 (cm->refresh_golden_frame && !cpi->source_alt_ref_active)))) {
3727 zbin_oq_high = ZBIN_OQ_MAX;
3731 /* Setup background Q adjustment for error resilient mode.
3732 * For multi-layer encodes only enable this for the base layer.
3734 if (cpi->cyclic_refresh_mode_enabled) {
3735 // Special case for screen_content_mode with golden frame updates.
3737 (cpi->oxcf.screen_content_mode == 2 && cm->refresh_golden_frame);
3738 if (cpi->current_layer == 0 && cpi->force_maxqp == 0 && !disable_cr_gf) {
3739 cyclic_background_refresh(cpi, Q, 0);
3741 disable_segmentation(cpi);
3745 vp8_compute_frame_size_bounds(cpi, &frame_under_shoot_limit,
3746 &frame_over_shoot_limit);
3748 #if !CONFIG_REALTIME_ONLY
3749 /* Limit Q range for the adaptive loop. */
3750 bottom_index = cpi->active_best_quality;
3751 top_index = cpi->active_worst_quality;
3752 q_low = cpi->active_best_quality;
3753 q_high = cpi->active_worst_quality;
3756 vp8_save_coding_context(cpi);
3760 scale_and_extend_source(cpi->un_scaled_source, cpi);
3762 #if CONFIG_TEMPORAL_DENOISING && CONFIG_POSTPROC
3763 // Option to apply spatial blur under the aggressive or adaptive
3764 // (temporal denoising) mode.
3765 if (cpi->oxcf.noise_sensitivity >= 3) {
3766 if (cpi->denoiser.denoise_pars.spatial_blur != 0) {
3767 vp8_de_noise(cm, cpi->Source, cpi->Source,
3768 cpi->denoiser.denoise_pars.spatial_blur, 1, 0, 0);
3773 #if !(CONFIG_REALTIME_ONLY) && CONFIG_POSTPROC && !(CONFIG_TEMPORAL_DENOISING)
3775 if (cpi->oxcf.noise_sensitivity > 0) {
3779 switch (cpi->oxcf.noise_sensitivity) {
3780 case 1: l = 20; break;
3781 case 2: l = 40; break;
3782 case 3: l = 60; break;
3783 case 4: l = 80; break;
3784 case 5: l = 100; break;
3785 case 6: l = 150; break;
3788 if (cm->frame_type == KEY_FRAME) {
3789 vp8_de_noise(cm, cpi->Source, cpi->Source, l, 1, 0, 1);
3791 vp8_de_noise(cm, cpi->Source, cpi->Source, l, 1, 0, 1);
3793 src = cpi->Source->y_buffer;
3795 if (cpi->Source->y_stride < 0) {
3796 src += cpi->Source->y_stride * (cpi->Source->y_height - 1);
3803 #ifdef OUTPUT_YUV_SRC
3804 vp8_write_yuv_frame(yuv_file, cpi->Source);
3808 vpx_clear_system_state();
3810 vp8_set_quantizer(cpi, Q);
3812 /* setup skip prob for costing in mode/mv decision */
3813 if (cpi->common.mb_no_coeff_skip) {
3814 cpi->prob_skip_false = cpi->base_skip_false_prob[Q];
3816 if (cm->frame_type != KEY_FRAME) {
3817 if (cpi->common.refresh_alt_ref_frame) {
3818 if (cpi->last_skip_false_probs[2] != 0) {
3819 cpi->prob_skip_false = cpi->last_skip_false_probs[2];
3823 if(cpi->last_skip_false_probs[2]!=0 && abs(Q-
3824 cpi->last_skip_probs_q[2])<=16 )
3825 cpi->prob_skip_false = cpi->last_skip_false_probs[2];
3826 else if (cpi->last_skip_false_probs[2]!=0)
3827 cpi->prob_skip_false = (cpi->last_skip_false_probs[2] +
3828 cpi->prob_skip_false ) / 2;
3830 } else if (cpi->common.refresh_golden_frame) {
3831 if (cpi->last_skip_false_probs[1] != 0) {
3832 cpi->prob_skip_false = cpi->last_skip_false_probs[1];
3836 if(cpi->last_skip_false_probs[1]!=0 && abs(Q-
3837 cpi->last_skip_probs_q[1])<=16 )
3838 cpi->prob_skip_false = cpi->last_skip_false_probs[1];
3839 else if (cpi->last_skip_false_probs[1]!=0)
3840 cpi->prob_skip_false = (cpi->last_skip_false_probs[1] +
3841 cpi->prob_skip_false ) / 2;
3844 if (cpi->last_skip_false_probs[0] != 0) {
3845 cpi->prob_skip_false = cpi->last_skip_false_probs[0];
3849 if(cpi->last_skip_false_probs[0]!=0 && abs(Q-
3850 cpi->last_skip_probs_q[0])<=16 )
3851 cpi->prob_skip_false = cpi->last_skip_false_probs[0];
3852 else if(cpi->last_skip_false_probs[0]!=0)
3853 cpi->prob_skip_false = (cpi->last_skip_false_probs[0] +
3854 cpi->prob_skip_false ) / 2;
3858 /* as this is for cost estimate, let's make sure it does not
3859 * go extreme eitehr way
3861 if (cpi->prob_skip_false < 5) cpi->prob_skip_false = 5;
3863 if (cpi->prob_skip_false > 250) cpi->prob_skip_false = 250;
3865 if (cpi->oxcf.number_of_layers == 1 && cpi->is_src_frame_alt_ref) {
3866 cpi->prob_skip_false = 1;
3874 FILE *f = fopen("skip.stt", "a");
3875 fprintf(f, "%d, %d, %4d ", cpi->common.refresh_golden_frame, cpi->common.refresh_alt_ref_frame, cpi->prob_skip_false);
3882 if (cm->frame_type == KEY_FRAME) {
3883 if (resize_key_frame(cpi)) {
3884 /* If the frame size has changed, need to reset Q, quantizer,
3885 * and background refresh.
3887 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
3888 if (cpi->cyclic_refresh_mode_enabled) {
3889 if (cpi->current_layer == 0) {
3890 cyclic_background_refresh(cpi, Q, 0);
3892 disable_segmentation(cpi);
3895 // Reset the zero_last counter to 0 on key frame.
3896 memset(cpi->consec_zero_last, 0, cm->mb_rows * cm->mb_cols);
3897 memset(cpi->consec_zero_last_mvbias, 0,
3898 (cpi->common.mb_rows * cpi->common.mb_cols));
3899 vp8_set_quantizer(cpi, Q);
3902 vp8_setup_key_frame(cpi);
3905 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
3907 if (cpi->oxcf.error_resilient_mode) cm->refresh_entropy_probs = 0;
3909 if (cpi->oxcf.error_resilient_mode & VPX_ERROR_RESILIENT_PARTITIONS) {
3910 if (cm->frame_type == KEY_FRAME) cm->refresh_entropy_probs = 1;
3913 if (cm->refresh_entropy_probs == 0) {
3914 /* save a copy for later refresh */
3915 memcpy(&cm->lfc, &cm->fc, sizeof(cm->fc));
3918 vp8_update_coef_context(cpi);
3920 vp8_update_coef_probs(cpi);
3922 /* transform / motion compensation build reconstruction frame
3923 * +pack coef partitions
3925 vp8_encode_frame(cpi);
3927 /* cpi->projected_frame_size is not needed for RT mode */
3930 /* transform / motion compensation build reconstruction frame */
3931 vp8_encode_frame(cpi);
3933 if (cpi->oxcf.screen_content_mode == 2) {
3934 if (vp8_drop_encodedframe_overshoot(cpi, Q)) return;
3937 cpi->projected_frame_size -= vp8_estimate_entropy_savings(cpi);
3938 cpi->projected_frame_size =
3939 (cpi->projected_frame_size > 0) ? cpi->projected_frame_size : 0;
3941 vpx_clear_system_state();
3943 /* Test to see if the stats generated for this frame indicate that
3944 * we should have coded a key frame (assuming that we didn't)!
3947 if (cpi->pass != 2 && cpi->oxcf.auto_key && cm->frame_type != KEY_FRAME &&
3948 cpi->compressor_speed != 2) {
3949 #if !CONFIG_REALTIME_ONLY
3950 if (decide_key_frame(cpi)) {
3951 /* Reset all our sizing numbers and recode */
3952 cm->frame_type = KEY_FRAME;
3954 vp8_pick_frame_size(cpi);
3956 /* Clear the Alt reference frame active flag when we have
3959 cpi->source_alt_ref_active = 0;
3961 // Set the loop filter deltas and segmentation map update
3962 setup_features(cpi);
3964 vp8_restore_coding_context(cpi);
3966 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
3968 vp8_compute_frame_size_bounds(cpi, &frame_under_shoot_limit,
3969 &frame_over_shoot_limit);
3971 /* Limit Q range for the adaptive loop. */
3972 bottom_index = cpi->active_best_quality;
3973 top_index = cpi->active_worst_quality;
3974 q_low = cpi->active_best_quality;
3975 q_high = cpi->active_worst_quality;
3985 vpx_clear_system_state();
3987 if (frame_over_shoot_limit == 0) frame_over_shoot_limit = 1;
3989 /* Are we are overshooting and up against the limit of active max Q. */
3990 if (((cpi->pass != 2) ||
3991 (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)) &&
3992 (Q == cpi->active_worst_quality) &&
3993 (cpi->active_worst_quality < cpi->worst_quality) &&
3994 (cpi->projected_frame_size > frame_over_shoot_limit)) {
3995 int over_size_percent =
3996 ((cpi->projected_frame_size - frame_over_shoot_limit) * 100) /
3997 frame_over_shoot_limit;
3999 /* If so is there any scope for relaxing it */
4000 while ((cpi->active_worst_quality < cpi->worst_quality) &&
4001 (over_size_percent > 0)) {
4002 cpi->active_worst_quality++;
4003 /* Assume 1 qstep = about 4% on frame size. */
4004 over_size_percent = (int)(over_size_percent * 0.96);
4006 #if !CONFIG_REALTIME_ONLY
4007 top_index = cpi->active_worst_quality;
4008 #endif // !CONFIG_REALTIME_ONLY
4009 /* If we have updated the active max Q do not call
4010 * vp8_update_rate_correction_factors() this loop.
4012 active_worst_qchanged = 1;
4014 active_worst_qchanged = 0;
4017 #if CONFIG_REALTIME_ONLY
4020 /* Special case handling for forced key frames */
4021 if ((cm->frame_type == KEY_FRAME) && cpi->this_key_frame_forced) {
4023 int kf_err = vp8_calc_ss_err(cpi->Source, &cm->yv12_fb[cm->new_fb_idx]);
4025 /* The key frame is not good enough */
4026 if (kf_err > ((cpi->ambient_err * 7) >> 3)) {
4028 q_high = (Q > q_low) ? (Q - 1) : q_low;
4031 Q = (q_high + q_low) >> 1;
4033 /* The key frame is much better than the previous frame */
4034 else if (kf_err < (cpi->ambient_err >> 1)) {
4036 q_low = (Q < q_high) ? (Q + 1) : q_high;
4039 Q = (q_high + q_low + 1) >> 1;
4042 /* Clamp Q to upper and lower limits: */
4045 } else if (Q < q_low) {
4052 /* Is the projected frame size out of range and are we allowed
4053 * to attempt to recode.
4055 else if (recode_loop_test(cpi, frame_over_shoot_limit,
4056 frame_under_shoot_limit, Q, top_index,
4061 /* Frame size out of permitted range. Update correction factor
4062 * & compute new Q to try...
4065 /* Frame is too large */
4066 if (cpi->projected_frame_size > cpi->this_frame_target) {
4067 /* Raise Qlow as to at least the current value */
4068 q_low = (Q < q_high) ? (Q + 1) : q_high;
4070 /* If we are using over quant do the same for zbin_oq_low */
4071 if (cpi->mb.zbin_over_quant > 0) {
4072 zbin_oq_low = (cpi->mb.zbin_over_quant < zbin_oq_high)
4073 ? (cpi->mb.zbin_over_quant + 1)
4077 if (undershoot_seen) {
4078 /* Update rate_correction_factor unless
4079 * cpi->active_worst_quality has changed.
4081 if (!active_worst_qchanged) {
4082 vp8_update_rate_correction_factors(cpi, 1);
4085 Q = (q_high + q_low + 1) / 2;
4087 /* Adjust cpi->zbin_over_quant (only allowed when Q
4091 cpi->mb.zbin_over_quant = 0;
4093 zbin_oq_low = (cpi->mb.zbin_over_quant < zbin_oq_high)
4094 ? (cpi->mb.zbin_over_quant + 1)
4096 cpi->mb.zbin_over_quant = (zbin_oq_high + zbin_oq_low) / 2;
4099 /* Update rate_correction_factor unless
4100 * cpi->active_worst_quality has changed.
4102 if (!active_worst_qchanged) {
4103 vp8_update_rate_correction_factors(cpi, 0);
4106 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4108 while (((Q < q_low) || (cpi->mb.zbin_over_quant < zbin_oq_low)) &&
4110 vp8_update_rate_correction_factors(cpi, 0);
4111 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4118 /* Frame is too small */
4120 if (cpi->mb.zbin_over_quant == 0) {
4121 /* Lower q_high if not using over quant */
4122 q_high = (Q > q_low) ? (Q - 1) : q_low;
4124 /* else lower zbin_oq_high */
4125 zbin_oq_high = (cpi->mb.zbin_over_quant > zbin_oq_low)
4126 ? (cpi->mb.zbin_over_quant - 1)
4130 if (overshoot_seen) {
4131 /* Update rate_correction_factor unless
4132 * cpi->active_worst_quality has changed.
4134 if (!active_worst_qchanged) {
4135 vp8_update_rate_correction_factors(cpi, 1);
4138 Q = (q_high + q_low) / 2;
4140 /* Adjust cpi->zbin_over_quant (only allowed when Q
4144 cpi->mb.zbin_over_quant = 0;
4146 cpi->mb.zbin_over_quant = (zbin_oq_high + zbin_oq_low) / 2;
4149 /* Update rate_correction_factor unless
4150 * cpi->active_worst_quality has changed.
4152 if (!active_worst_qchanged) {
4153 vp8_update_rate_correction_factors(cpi, 0);
4156 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4158 /* Special case reset for qlow for constrained quality.
4159 * This should only trigger where there is very substantial
4160 * undershoot on a frame and the auto cq level is above
4161 * the user passsed in value.
4163 if ((cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
4168 while (((Q > q_high) || (cpi->mb.zbin_over_quant > zbin_oq_high)) &&
4170 vp8_update_rate_correction_factors(cpi, 0);
4171 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4176 undershoot_seen = 1;
4179 /* Clamp Q to upper and lower limits: */
4182 } else if (Q < q_low) {
4186 /* Clamp cpi->zbin_over_quant */
4187 cpi->mb.zbin_over_quant = (cpi->mb.zbin_over_quant < zbin_oq_low)
4189 : (cpi->mb.zbin_over_quant > zbin_oq_high)
4191 : cpi->mb.zbin_over_quant;
4197 #endif // CONFIG_REALTIME_ONLY
4199 if (cpi->is_src_frame_alt_ref) Loop = 0;
4202 vp8_restore_coding_context(cpi);
4204 #if CONFIG_INTERNAL_STATS
4205 cpi->tot_recode_hits++;
4208 } while (Loop == 1);
4211 /* Experimental code for lagged and one pass
4212 * Update stats used for one pass GF selection
4215 cpi->one_pass_frame_stats[cpi->one_pass_frame_index].frame_coded_error = (double)cpi->prediction_error;
4216 cpi->one_pass_frame_stats[cpi->one_pass_frame_index].frame_intra_error = (double)cpi->intra_error;
4217 cpi->one_pass_frame_stats[cpi->one_pass_frame_index].frame_pcnt_inter = (double)(100 - cpi->this_frame_percent_intra) / 100.0;
4221 /* Special case code to reduce pulsing when key frames are forced at a
4222 * fixed interval. Note the reconstruction error if it is the frame before
4223 * the force key frame
4225 if (cpi->next_key_frame_forced && (cpi->twopass.frames_to_key == 0)) {
4227 vp8_calc_ss_err(cpi->Source, &cm->yv12_fb[cm->new_fb_idx]);
4230 /* This frame's MVs are saved and will be used in next frame's MV predictor.
4231 * Last frame has one more line(add to bottom) and one more column(add to
4232 * right) than cm->mip. The edge elements are initialized to 0.
4234 #if CONFIG_MULTI_RES_ENCODING
4235 if (!cpi->oxcf.mr_encoder_id && cm->show_frame)
4237 if (cm->show_frame) /* do not save for altref frame */
4242 /* Point to beginning of allocated MODE_INFO arrays. */
4243 MODE_INFO *tmp = cm->mip;
4245 if (cm->frame_type != KEY_FRAME) {
4246 for (mb_row = 0; mb_row < cm->mb_rows + 1; ++mb_row) {
4247 for (mb_col = 0; mb_col < cm->mb_cols + 1; ++mb_col) {
4248 if (tmp->mbmi.ref_frame != INTRA_FRAME) {
4249 cpi->lfmv[mb_col + mb_row * (cm->mode_info_stride + 1)].as_int =
4250 tmp->mbmi.mv.as_int;
4253 cpi->lf_ref_frame_sign_bias[mb_col +
4254 mb_row * (cm->mode_info_stride + 1)] =
4255 cm->ref_frame_sign_bias[tmp->mbmi.ref_frame];
4256 cpi->lf_ref_frame[mb_col + mb_row * (cm->mode_info_stride + 1)] =
4257 tmp->mbmi.ref_frame;
4264 /* Count last ref frame 0,0 usage on current encoded frame. */
4268 /* Point to beginning of MODE_INFO arrays. */
4269 MODE_INFO *tmp = cm->mi;
4271 cpi->zeromv_count = 0;
4273 if (cm->frame_type != KEY_FRAME) {
4274 for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) {
4275 for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) {
4276 if (tmp->mbmi.mode == ZEROMV && tmp->mbmi.ref_frame == LAST_FRAME) {
4277 cpi->zeromv_count++;
4286 #if CONFIG_MULTI_RES_ENCODING
4287 vp8_cal_dissimilarity(cpi);
4290 /* Update the GF useage maps.
4291 * This is done after completing the compression of a frame when all
4292 * modes etc. are finalized but before loop filter
4294 if (cpi->oxcf.number_of_layers == 1) {
4295 vp8_update_gf_useage_maps(cpi, cm, &cpi->mb);
4298 if (cm->frame_type == KEY_FRAME) cm->refresh_last_frame = 1;
4302 FILE *f = fopen("gfactive.stt", "a");
4303 fprintf(f, "%8d %8d %8d %8d %8d\n", cm->current_video_frame, (100 * cpi->gf_active_count) / (cpi->common.mb_rows * cpi->common.mb_cols), cpi->this_iiratio, cpi->next_iiratio, cm->refresh_golden_frame);
4308 /* For inter frames the current default behavior is that when
4309 * cm->refresh_golden_frame is set we copy the old GF over to the ARF buffer
4310 * This is purely an encoder decision at present.
4312 if (!cpi->oxcf.error_resilient_mode && cm->refresh_golden_frame) {
4313 cm->copy_buffer_to_arf = 2;
4315 cm->copy_buffer_to_arf = 0;
4318 cm->frame_to_show = &cm->yv12_fb[cm->new_fb_idx];
4320 #if CONFIG_TEMPORAL_DENOISING
4321 // Get some measure of the amount of noise, by measuring the (partial) mse
4322 // between source and denoised buffer, for y channel. Partial refers to
4323 // computing the sse for a sub-sample of the frame (i.e., skip x blocks along
4325 // and only for blocks in that set that are consecutive ZEROMV_LAST mode.
4326 // Do this every ~8 frames, to further reduce complexity.
4327 // TODO(marpan): Keep this for now for the case cpi->oxcf.noise_sensitivity <
4329 // should be removed in favor of the process_denoiser_mode_change() function
4331 if (cpi->oxcf.noise_sensitivity > 0 && cpi->oxcf.noise_sensitivity < 4 &&
4332 !cpi->oxcf.screen_content_mode && cpi->frames_since_key % 8 == 0 &&
4333 cm->frame_type != KEY_FRAME) {
4334 cpi->mse_source_denoised = measure_square_diff_partial(
4335 &cpi->denoiser.yv12_running_avg[INTRA_FRAME], cpi->Source, cpi);
4338 // For the adaptive denoising mode (noise_sensitivity == 4), sample the mse
4339 // of source diff (between current and previous frame), and determine if we
4340 // should switch the denoiser mode. Sampling refers to computing the mse for
4341 // a sub-sample of the frame (i.e., skip x blocks along row/column), and
4342 // only for blocks in that set that have used ZEROMV LAST, along with some
4343 // constraint on the sum diff between blocks. This process is called every
4344 // ~8 frames, to further reduce complexity.
4345 if (cpi->oxcf.noise_sensitivity == 4 && !cpi->oxcf.screen_content_mode &&
4346 cpi->frames_since_key % 8 == 0 && cm->frame_type != KEY_FRAME) {
4347 process_denoiser_mode_change(cpi);
4351 #if CONFIG_MULTITHREAD
4352 if (cpi->b_multi_threaded) {
4353 /* start loopfilter in separate thread */
4354 sem_post(&cpi->h_event_start_lpf);
4355 cpi->b_lpf_running = 1;
4359 vp8_loopfilter_frame(cpi, cm);
4362 update_reference_frames(cpi);
4364 #ifdef OUTPUT_YUV_DENOISED
4365 vp8_write_yuv_frame(yuv_denoised_file,
4366 &cpi->denoiser.yv12_running_avg[INTRA_FRAME]);
4369 #if !(CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
4370 if (cpi->oxcf.error_resilient_mode) {
4371 cm->refresh_entropy_probs = 0;
4375 #if CONFIG_MULTITHREAD
4376 /* wait that filter_level is picked so that we can continue with stream
4378 if (cpi->b_multi_threaded) sem_wait(&cpi->h_event_end_lpf);
4381 /* build the bitstream */
4382 vp8_pack_bitstream(cpi, dest, dest_end, size);
4384 /* Move storing frame_type out of the above loop since it is also
4385 * needed in motion search besides loopfilter */
4386 cm->last_frame_type = cm->frame_type;
4388 /* Update rate control heuristics */
4389 cpi->total_byte_count += (*size);
4390 cpi->projected_frame_size = (int)(*size) << 3;
4392 if (cpi->oxcf.number_of_layers > 1) {
4394 for (i = cpi->current_layer + 1; i < cpi->oxcf.number_of_layers; ++i) {
4395 cpi->layer_context[i].total_byte_count += (*size);
4399 if (!active_worst_qchanged) vp8_update_rate_correction_factors(cpi, 2);
4401 cpi->last_q[cm->frame_type] = cm->base_qindex;
4403 if (cm->frame_type == KEY_FRAME) {
4404 vp8_adjust_key_frame_context(cpi);
4407 /* Keep a record of ambient average Q. */
4408 if (cm->frame_type != KEY_FRAME) {
4409 cpi->avg_frame_qindex =
4410 (2 + 3 * cpi->avg_frame_qindex + cm->base_qindex) >> 2;
4413 /* Keep a record from which we can calculate the average Q excluding
4414 * GF updates and key frames
4416 if ((cm->frame_type != KEY_FRAME) &&
4417 ((cpi->oxcf.number_of_layers > 1) ||
4418 (!cm->refresh_golden_frame && !cm->refresh_alt_ref_frame))) {
4421 /* Calculate the average Q for normal inter frames (not key or GFU
4424 if (cpi->pass == 2) {
4425 cpi->ni_tot_qi += Q;
4426 cpi->ni_av_qi = (cpi->ni_tot_qi / cpi->ni_frames);
4428 /* Damp value for first few frames */
4429 if (cpi->ni_frames > 150) {
4430 cpi->ni_tot_qi += Q;
4431 cpi->ni_av_qi = (cpi->ni_tot_qi / cpi->ni_frames);
4433 /* For one pass, early in the clip ... average the current frame Q
4434 * value with the worstq entered by the user as a dampening measure
4437 cpi->ni_tot_qi += Q;
4439 ((cpi->ni_tot_qi / cpi->ni_frames) + cpi->worst_quality + 1) / 2;
4442 /* If the average Q is higher than what was used in the last
4443 * frame (after going through the recode loop to keep the frame
4444 * size within range) then use the last frame value - 1. The -1
4445 * is designed to stop Q and hence the data rate, from
4446 * progressively falling away during difficult sections, but at
4447 * the same time reduce the number of itterations around the
4450 if (Q > cpi->ni_av_qi) cpi->ni_av_qi = Q - 1;
4454 /* Update the buffer level variable. */
4455 /* Non-viewable frames are a special case and are treated as pure overhead. */
4456 if (!cm->show_frame) {
4457 cpi->bits_off_target -= cpi->projected_frame_size;
4459 cpi->bits_off_target +=
4460 cpi->av_per_frame_bandwidth - cpi->projected_frame_size;
4463 /* Clip the buffer level to the maximum specified buffer size */
4464 if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size) {
4465 cpi->bits_off_target = cpi->oxcf.maximum_buffer_size;
4468 // If the frame dropper is not enabled, don't let the buffer level go below
4469 // some threshold, given here by -|maximum_buffer_size|. For now we only do
4470 // this for screen content input.
4471 if (cpi->drop_frames_allowed == 0 && cpi->oxcf.screen_content_mode &&
4472 cpi->bits_off_target < -cpi->oxcf.maximum_buffer_size) {
4473 cpi->bits_off_target = -cpi->oxcf.maximum_buffer_size;
4476 /* Rolling monitors of whether we are over or underspending used to
4477 * help regulate min and Max Q in two pass.
4479 cpi->rolling_target_bits =
4480 ((cpi->rolling_target_bits * 3) + cpi->this_frame_target + 2) / 4;
4481 cpi->rolling_actual_bits =
4482 ((cpi->rolling_actual_bits * 3) + cpi->projected_frame_size + 2) / 4;
4483 cpi->long_rolling_target_bits =
4484 ((cpi->long_rolling_target_bits * 31) + cpi->this_frame_target + 16) / 32;
4485 cpi->long_rolling_actual_bits =
4486 ((cpi->long_rolling_actual_bits * 31) + cpi->projected_frame_size + 16) /
4489 /* Actual bits spent */
4490 cpi->total_actual_bits += cpi->projected_frame_size;
4493 cpi->total_target_vs_actual +=
4494 (cpi->this_frame_target - cpi->projected_frame_size);
4496 cpi->buffer_level = cpi->bits_off_target;
4498 /* Propagate values to higher temporal layers */
4499 if (cpi->oxcf.number_of_layers > 1) {
4502 for (i = cpi->current_layer + 1; i < cpi->oxcf.number_of_layers; ++i) {
4503 LAYER_CONTEXT *lc = &cpi->layer_context[i];
4504 int bits_off_for_this_layer = (int)(lc->target_bandwidth / lc->framerate -
4505 cpi->projected_frame_size);
4507 lc->bits_off_target += bits_off_for_this_layer;
4509 /* Clip buffer level to maximum buffer size for the layer */
4510 if (lc->bits_off_target > lc->maximum_buffer_size) {
4511 lc->bits_off_target = lc->maximum_buffer_size;
4514 lc->total_actual_bits += cpi->projected_frame_size;
4515 lc->total_target_vs_actual += bits_off_for_this_layer;
4516 lc->buffer_level = lc->bits_off_target;
4520 /* Update bits left to the kf and gf groups to account for overshoot
4521 * or undershoot on these frames
4523 if (cm->frame_type == KEY_FRAME) {
4524 cpi->twopass.kf_group_bits +=
4525 cpi->this_frame_target - cpi->projected_frame_size;
4527 if (cpi->twopass.kf_group_bits < 0) cpi->twopass.kf_group_bits = 0;
4528 } else if (cm->refresh_golden_frame || cm->refresh_alt_ref_frame) {
4529 cpi->twopass.gf_group_bits +=
4530 cpi->this_frame_target - cpi->projected_frame_size;
4532 if (cpi->twopass.gf_group_bits < 0) cpi->twopass.gf_group_bits = 0;
4535 if (cm->frame_type != KEY_FRAME) {
4536 if (cpi->common.refresh_alt_ref_frame) {
4537 cpi->last_skip_false_probs[2] = cpi->prob_skip_false;
4538 cpi->last_skip_probs_q[2] = cm->base_qindex;
4539 } else if (cpi->common.refresh_golden_frame) {
4540 cpi->last_skip_false_probs[1] = cpi->prob_skip_false;
4541 cpi->last_skip_probs_q[1] = cm->base_qindex;
4543 cpi->last_skip_false_probs[0] = cpi->prob_skip_false;
4544 cpi->last_skip_probs_q[0] = cm->base_qindex;
4546 /* update the baseline */
4547 cpi->base_skip_false_prob[cm->base_qindex] = cpi->prob_skip_false;
4551 #if 0 && CONFIG_INTERNAL_STATS
4553 FILE *f = fopen("tmp.stt", "a");
4555 vpx_clear_system_state();
4557 if (cpi->twopass.total_left_stats.coded_error != 0.0)
4558 fprintf(f, "%10d %10d %10d %10d %10d %10"PRId64" %10"PRId64
4559 "%10"PRId64" %10d %6d %6d %6d %6d %5d %5d %5d %8d "
4560 "%8.2lf %"PRId64" %10.3lf %10"PRId64" %8d\n",
4561 cpi->common.current_video_frame, cpi->this_frame_target,
4562 cpi->projected_frame_size,
4563 (cpi->projected_frame_size - cpi->this_frame_target),
4564 cpi->total_target_vs_actual,
4566 (cpi->oxcf.starting_buffer_level-cpi->bits_off_target),
4567 cpi->total_actual_bits, cm->base_qindex,
4568 cpi->active_best_quality, cpi->active_worst_quality,
4569 cpi->ni_av_qi, cpi->cq_target_quality,
4570 cm->refresh_golden_frame, cm->refresh_alt_ref_frame,
4571 cm->frame_type, cpi->gfu_boost,
4572 cpi->twopass.est_max_qcorrection_factor,
4573 cpi->twopass.bits_left,
4574 cpi->twopass.total_left_stats.coded_error,
4575 (double)cpi->twopass.bits_left /
4576 cpi->twopass.total_left_stats.coded_error,
4577 cpi->tot_recode_hits);
4579 fprintf(f, "%10d %10d %10d %10d %10d %10"PRId64" %10"PRId64
4580 "%10"PRId64" %10d %6d %6d %6d %6d %5d %5d %5d %8d "
4581 "%8.2lf %"PRId64" %10.3lf %8d\n",
4582 cpi->common.current_video_frame, cpi->this_frame_target,
4583 cpi->projected_frame_size,
4584 (cpi->projected_frame_size - cpi->this_frame_target),
4585 cpi->total_target_vs_actual,
4587 (cpi->oxcf.starting_buffer_level-cpi->bits_off_target),
4588 cpi->total_actual_bits, cm->base_qindex,
4589 cpi->active_best_quality, cpi->active_worst_quality,
4590 cpi->ni_av_qi, cpi->cq_target_quality,
4591 cm->refresh_golden_frame, cm->refresh_alt_ref_frame,
4592 cm->frame_type, cpi->gfu_boost,
4593 cpi->twopass.est_max_qcorrection_factor,
4594 cpi->twopass.bits_left,
4595 cpi->twopass.total_left_stats.coded_error,
4596 cpi->tot_recode_hits);
4601 FILE *fmodes = fopen("Modes.stt", "a");
4603 fprintf(fmodes, "%6d:%1d:%1d:%1d ",
4604 cpi->common.current_video_frame,
4605 cm->frame_type, cm->refresh_golden_frame,
4606 cm->refresh_alt_ref_frame);
4608 fprintf(fmodes, "\n");
4616 if (cm->refresh_golden_frame == 1) {
4617 cm->frame_flags = cm->frame_flags | FRAMEFLAGS_GOLDEN;
4619 cm->frame_flags = cm->frame_flags & ~FRAMEFLAGS_GOLDEN;
4622 if (cm->refresh_alt_ref_frame == 1) {
4623 cm->frame_flags = cm->frame_flags | FRAMEFLAGS_ALTREF;
4625 cm->frame_flags = cm->frame_flags & ~FRAMEFLAGS_ALTREF;
4628 if (cm->refresh_last_frame & cm->refresh_golden_frame) { /* both refreshed */
4629 cpi->gold_is_last = 1;
4630 } else if (cm->refresh_last_frame ^ cm->refresh_golden_frame) {
4631 /* 1 refreshed but not the other */
4632 cpi->gold_is_last = 0;
4635 if (cm->refresh_last_frame & cm->refresh_alt_ref_frame) { /* both refreshed */
4636 cpi->alt_is_last = 1;
4637 } else if (cm->refresh_last_frame ^ cm->refresh_alt_ref_frame) {
4638 /* 1 refreshed but not the other */
4639 cpi->alt_is_last = 0;
4642 if (cm->refresh_alt_ref_frame &
4643 cm->refresh_golden_frame) { /* both refreshed */
4644 cpi->gold_is_alt = 1;
4645 } else if (cm->refresh_alt_ref_frame ^ cm->refresh_golden_frame) {
4646 /* 1 refreshed but not the other */
4647 cpi->gold_is_alt = 0;
4650 cpi->ref_frame_flags = VP8_ALTR_FRAME | VP8_GOLD_FRAME | VP8_LAST_FRAME;
4652 if (cpi->gold_is_last) cpi->ref_frame_flags &= ~VP8_GOLD_FRAME;
4654 if (cpi->alt_is_last) cpi->ref_frame_flags &= ~VP8_ALTR_FRAME;
4656 if (cpi->gold_is_alt) cpi->ref_frame_flags &= ~VP8_ALTR_FRAME;
4658 if (!cpi->oxcf.error_resilient_mode) {
4659 if (cpi->oxcf.play_alternate && cm->refresh_alt_ref_frame &&
4660 (cm->frame_type != KEY_FRAME)) {
4661 /* Update the alternate reference frame stats as appropriate. */
4662 update_alt_ref_frame_stats(cpi);
4664 /* Update the Golden frame stats as appropriate. */
4665 update_golden_frame_stats(cpi);
4669 if (cm->frame_type == KEY_FRAME) {
4670 /* Tell the caller that the frame was coded as a key frame */
4671 *frame_flags = cm->frame_flags | FRAMEFLAGS_KEY;
4673 /* As this frame is a key frame the next defaults to an inter frame. */
4674 cm->frame_type = INTER_FRAME;
4676 cpi->last_frame_percent_intra = 100;
4678 *frame_flags = cm->frame_flags & ~FRAMEFLAGS_KEY;
4680 cpi->last_frame_percent_intra = cpi->this_frame_percent_intra;
4683 /* Clear the one shot update flags for segmentation map and mode/ref
4684 * loop filter deltas.
4686 cpi->mb.e_mbd.update_mb_segmentation_map = 0;
4687 cpi->mb.e_mbd.update_mb_segmentation_data = 0;
4688 cpi->mb.e_mbd.mode_ref_lf_delta_update = 0;
4690 /* Dont increment frame counters if this was an altref buffer update
4693 if (cm->show_frame) {
4694 cm->current_video_frame++;
4695 cpi->frames_since_key++;
4696 cpi->temporal_pattern_counter++;
4699 /* reset to normal state now that we are done. */
4705 sprintf(filename, "enc%04d.yuv", (int) cm->current_video_frame);
4706 recon_file = fopen(filename, "wb");
4707 fwrite(cm->yv12_fb[cm->lst_fb_idx].buffer_alloc,
4708 cm->yv12_fb[cm->lst_fb_idx].frame_size, 1, recon_file);
4714 /* vp8_write_yuv_frame("encoder_recon.yuv", cm->frame_to_show); */
4716 #if !CONFIG_REALTIME_ONLY
4717 static void Pass2Encode(VP8_COMP *cpi, size_t *size, unsigned char *dest,
4718 unsigned char *dest_end, unsigned int *frame_flags) {
4719 if (!cpi->common.refresh_alt_ref_frame) vp8_second_pass(cpi);
4721 encode_frame_to_data_rate(cpi, size, dest, dest_end, frame_flags);
4722 cpi->twopass.bits_left -= 8 * (int)(*size);
4724 if (!cpi->common.refresh_alt_ref_frame) {
4725 double two_pass_min_rate =
4726 (double)(cpi->oxcf.target_bandwidth *
4727 cpi->oxcf.two_pass_vbrmin_section / 100);
4728 cpi->twopass.bits_left += (int64_t)(two_pass_min_rate / cpi->framerate);
4733 int vp8_receive_raw_frame(VP8_COMP *cpi, unsigned int frame_flags,
4734 YV12_BUFFER_CONFIG *sd, int64_t time_stamp,
4736 struct vpx_usec_timer timer;
4739 vpx_usec_timer_start(&timer);
4741 /* Reinit the lookahead buffer if the frame size changes */
4742 if (sd->y_width != cpi->oxcf.Width || sd->y_height != cpi->oxcf.Height) {
4743 assert(cpi->oxcf.lag_in_frames < 2);
4744 dealloc_raw_frame_buffers(cpi);
4745 alloc_raw_frame_buffers(cpi);
4748 if (vp8_lookahead_push(cpi->lookahead, sd, time_stamp, end_time, frame_flags,
4749 cpi->active_map_enabled ? cpi->active_map : NULL)) {
4752 vpx_usec_timer_mark(&timer);
4753 cpi->time_receive_data += vpx_usec_timer_elapsed(&timer);
4758 static int frame_is_reference(const VP8_COMP *cpi) {
4759 const VP8_COMMON *cm = &cpi->common;
4760 const MACROBLOCKD *xd = &cpi->mb.e_mbd;
4762 return cm->frame_type == KEY_FRAME || cm->refresh_last_frame ||
4763 cm->refresh_golden_frame || cm->refresh_alt_ref_frame ||
4764 cm->copy_buffer_to_gf || cm->copy_buffer_to_arf ||
4765 cm->refresh_entropy_probs || xd->mode_ref_lf_delta_update ||
4766 xd->update_mb_segmentation_map || xd->update_mb_segmentation_data;
4769 int vp8_get_compressed_data(VP8_COMP *cpi, unsigned int *frame_flags,
4770 size_t *size, unsigned char *dest,
4771 unsigned char *dest_end, int64_t *time_stamp,
4772 int64_t *time_end, int flush) {
4774 struct vpx_usec_timer tsctimer;
4775 struct vpx_usec_timer ticktimer;
4776 struct vpx_usec_timer cmptimer;
4777 YV12_BUFFER_CONFIG *force_src_buffer = NULL;
4779 if (!cpi) return -1;
4783 if (setjmp(cpi->common.error.jmp)) {
4784 cpi->common.error.setjmp = 0;
4785 vpx_clear_system_state();
4786 return VPX_CODEC_CORRUPT_FRAME;
4789 cpi->common.error.setjmp = 1;
4791 vpx_usec_timer_start(&cmptimer);
4795 #if !CONFIG_REALTIME_ONLY
4796 /* Should we code an alternate reference frame */
4797 if (cpi->oxcf.error_resilient_mode == 0 && cpi->oxcf.play_alternate &&
4798 cpi->source_alt_ref_pending) {
4799 if ((cpi->source = vp8_lookahead_peek(
4800 cpi->lookahead, cpi->frames_till_gf_update_due, PEEK_FORWARD))) {
4801 cpi->alt_ref_source = cpi->source;
4802 if (cpi->oxcf.arnr_max_frames > 0) {
4803 vp8_temporal_filter_prepare_c(cpi, cpi->frames_till_gf_update_due);
4804 force_src_buffer = &cpi->alt_ref_buffer;
4806 cpi->frames_till_alt_ref_frame = cpi->frames_till_gf_update_due;
4807 cm->refresh_alt_ref_frame = 1;
4808 cm->refresh_golden_frame = 0;
4809 cm->refresh_last_frame = 0;
4811 /* Clear Pending alt Ref flag. */
4812 cpi->source_alt_ref_pending = 0;
4813 cpi->is_src_frame_alt_ref = 0;
4819 /* Read last frame source if we are encoding first pass. */
4820 if (cpi->pass == 1 && cm->current_video_frame > 0) {
4821 if ((cpi->last_source =
4822 vp8_lookahead_peek(cpi->lookahead, 1, PEEK_BACKWARD)) == NULL) {
4827 if ((cpi->source = vp8_lookahead_pop(cpi->lookahead, flush))) {
4830 cpi->is_src_frame_alt_ref =
4831 cpi->alt_ref_source && (cpi->source == cpi->alt_ref_source);
4833 if (cpi->is_src_frame_alt_ref) cpi->alt_ref_source = NULL;
4838 cpi->Source = force_src_buffer ? force_src_buffer : &cpi->source->img;
4839 cpi->un_scaled_source = cpi->Source;
4840 *time_stamp = cpi->source->ts_start;
4841 *time_end = cpi->source->ts_end;
4842 *frame_flags = cpi->source->flags;
4844 if (cpi->pass == 1 && cm->current_video_frame > 0) {
4845 cpi->last_frame_unscaled_source = &cpi->last_source->img;
4849 #if !CONFIG_REALTIME_ONLY
4851 if (flush && cpi->pass == 1 && !cpi->twopass.first_pass_done) {
4852 vp8_end_first_pass(cpi); /* get last stats packet */
4853 cpi->twopass.first_pass_done = 1;
4861 if (cpi->source->ts_start < cpi->first_time_stamp_ever) {
4862 cpi->first_time_stamp_ever = cpi->source->ts_start;
4863 cpi->last_end_time_stamp_seen = cpi->source->ts_start;
4866 /* adjust frame rates based on timestamps given */
4867 if (cm->show_frame) {
4868 int64_t this_duration;
4871 if (cpi->source->ts_start == cpi->first_time_stamp_ever) {
4872 this_duration = cpi->source->ts_end - cpi->source->ts_start;
4875 int64_t last_duration;
4877 this_duration = cpi->source->ts_end - cpi->last_end_time_stamp_seen;
4878 last_duration = cpi->last_end_time_stamp_seen - cpi->last_time_stamp_seen;
4879 /* do a step update if the duration changes by 10% */
4880 if (last_duration) {
4881 step = (int)(((this_duration - last_duration) * 10 / last_duration));
4885 if (this_duration) {
4887 cpi->ref_framerate = 10000000.0 / this_duration;
4889 double avg_duration, interval;
4891 /* Average this frame's rate into the last second's average
4892 * frame rate. If we haven't seen 1 second yet, then average
4893 * over the whole interval seen.
4895 interval = (double)(cpi->source->ts_end - cpi->first_time_stamp_ever);
4896 if (interval > 10000000.0) interval = 10000000;
4898 avg_duration = 10000000.0 / cpi->ref_framerate;
4899 avg_duration *= (interval - avg_duration + this_duration);
4900 avg_duration /= interval;
4902 cpi->ref_framerate = 10000000.0 / avg_duration;
4904 #if CONFIG_MULTI_RES_ENCODING
4905 if (cpi->oxcf.mr_total_resolutions > 1) {
4906 LOWER_RES_FRAME_INFO *low_res_frame_info =
4907 (LOWER_RES_FRAME_INFO *)cpi->oxcf.mr_low_res_mode_info;
4908 // Frame rate should be the same for all spatial layers in
4909 // multi-res-encoding (simulcast), so we constrain the frame for
4910 // higher layers to be that of lowest resolution. This is needed
4911 // as he application may decide to skip encoding a high layer and
4912 // then start again, in which case a big jump in time-stamps will
4913 // be received for that high layer, which will yield an incorrect
4914 // frame rate (from time-stamp adjustment in above calculation).
4915 if (cpi->oxcf.mr_encoder_id) {
4916 cpi->ref_framerate = low_res_frame_info->low_res_framerate;
4918 // Keep track of frame rate for lowest resolution.
4919 low_res_frame_info->low_res_framerate = cpi->ref_framerate;
4923 if (cpi->oxcf.number_of_layers > 1) {
4926 /* Update frame rates for each layer */
4927 assert(cpi->oxcf.number_of_layers <= VPX_TS_MAX_LAYERS);
4928 for (i = 0; i < cpi->oxcf.number_of_layers && i < VPX_TS_MAX_LAYERS;
4930 LAYER_CONTEXT *lc = &cpi->layer_context[i];
4931 lc->framerate = cpi->ref_framerate / cpi->oxcf.rate_decimator[i];
4934 vp8_new_framerate(cpi, cpi->ref_framerate);
4938 cpi->last_time_stamp_seen = cpi->source->ts_start;
4939 cpi->last_end_time_stamp_seen = cpi->source->ts_end;
4942 if (cpi->oxcf.number_of_layers > 1) {
4945 update_layer_contexts(cpi);
4947 /* Restore layer specific context & set frame rate */
4948 if (cpi->temporal_layer_id >= 0) {
4949 layer = cpi->temporal_layer_id;
4953 .layer_id[cpi->temporal_pattern_counter % cpi->oxcf.periodicity];
4955 restore_layer_context(cpi, layer);
4956 vp8_new_framerate(cpi, cpi->layer_context[layer].framerate);
4959 if (cpi->compressor_speed == 2) {
4960 vpx_usec_timer_start(&tsctimer);
4961 vpx_usec_timer_start(&ticktimer);
4964 cpi->lf_zeromv_pct = (cpi->zeromv_count * 100) / cm->MBs;
4966 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
4969 const int num_part = (1 << cm->multi_token_partition);
4970 /* the available bytes in dest */
4971 const unsigned long dest_size = dest_end - dest;
4972 const int tok_part_buff_size = (dest_size * 9) / (10 * num_part);
4974 unsigned char *dp = dest;
4976 cpi->partition_d[0] = dp;
4977 dp += dest_size / 10; /* reserve 1/10 for control partition */
4978 cpi->partition_d_end[0] = dp;
4980 for (i = 0; i < num_part; ++i) {
4981 cpi->partition_d[i + 1] = dp;
4982 dp += tok_part_buff_size;
4983 cpi->partition_d_end[i + 1] = dp;
4988 /* start with a 0 size frame */
4991 /* Clear down mmx registers */
4992 vpx_clear_system_state();
4994 cm->frame_type = INTER_FRAME;
4995 cm->frame_flags = *frame_flags;
4999 if (cm->refresh_alt_ref_frame)
5001 cm->refresh_golden_frame = 0;
5002 cm->refresh_last_frame = 0;
5006 cm->refresh_golden_frame = 0;
5007 cm->refresh_last_frame = 1;
5011 /* find a free buffer for the new frame */
5014 for (; i < NUM_YV12_BUFFERS; ++i) {
5015 if (!cm->yv12_fb[i].flags) {
5021 assert(i < NUM_YV12_BUFFERS);
5023 switch (cpi->pass) {
5024 #if !CONFIG_REALTIME_ONLY
5025 case 1: Pass1Encode(cpi, size, dest, frame_flags); break;
5026 case 2: Pass2Encode(cpi, size, dest, dest_end, frame_flags); break;
5027 #endif // !CONFIG_REALTIME_ONLY
5029 encode_frame_to_data_rate(cpi, size, dest, dest_end, frame_flags);
5033 if (cpi->compressor_speed == 2) {
5034 unsigned int duration, duration2;
5035 vpx_usec_timer_mark(&tsctimer);
5036 vpx_usec_timer_mark(&ticktimer);
5038 duration = (int)(vpx_usec_timer_elapsed(&ticktimer));
5039 duration2 = (unsigned int)((double)duration / 2);
5041 if (cm->frame_type != KEY_FRAME) {
5042 if (cpi->avg_encode_time == 0) {
5043 cpi->avg_encode_time = duration;
5045 cpi->avg_encode_time = (7 * cpi->avg_encode_time + duration) >> 3;
5051 if (cpi->avg_pick_mode_time == 0) {
5052 cpi->avg_pick_mode_time = duration2;
5054 cpi->avg_pick_mode_time =
5055 (7 * cpi->avg_pick_mode_time + duration2) >> 3;
5061 if (cm->refresh_entropy_probs == 0) {
5062 memcpy(&cm->fc, &cm->lfc, sizeof(cm->fc));
5065 /* Save the contexts separately for alt ref, gold and last. */
5066 /* (TODO jbb -> Optimize this with pointers to avoid extra copies. ) */
5067 if (cm->refresh_alt_ref_frame) memcpy(&cpi->lfc_a, &cm->fc, sizeof(cm->fc));
5069 if (cm->refresh_golden_frame) memcpy(&cpi->lfc_g, &cm->fc, sizeof(cm->fc));
5071 if (cm->refresh_last_frame) memcpy(&cpi->lfc_n, &cm->fc, sizeof(cm->fc));
5073 /* if its a dropped frame honor the requests on subsequent frames */
5075 cpi->droppable = !frame_is_reference(cpi);
5077 /* return to normal state */
5078 cm->refresh_entropy_probs = 1;
5079 cm->refresh_alt_ref_frame = 0;
5080 cm->refresh_golden_frame = 0;
5081 cm->refresh_last_frame = 1;
5082 cm->frame_type = INTER_FRAME;
5085 /* Save layer specific state */
5086 if (cpi->oxcf.number_of_layers > 1) save_layer_context(cpi);
5088 vpx_usec_timer_mark(&cmptimer);
5089 cpi->time_compress_data += vpx_usec_timer_elapsed(&cmptimer);
5091 if (cpi->b_calculate_psnr && cpi->pass != 1 && cm->show_frame) {
5092 generate_psnr_packet(cpi);
5095 #if CONFIG_INTERNAL_STATS
5097 if (cpi->pass != 1) {
5098 cpi->bytes += *size;
5100 if (cm->show_frame) {
5101 cpi->common.show_frame_mi = cpi->common.mi;
5104 if (cpi->b_calculate_psnr) {
5105 uint64_t ye, ue, ve;
5107 YV12_BUFFER_CONFIG *orig = cpi->Source;
5108 YV12_BUFFER_CONFIG *recon = cpi->common.frame_to_show;
5109 unsigned int y_width = cpi->common.Width;
5110 unsigned int y_height = cpi->common.Height;
5111 unsigned int uv_width = (y_width + 1) / 2;
5112 unsigned int uv_height = (y_height + 1) / 2;
5113 int y_samples = y_height * y_width;
5114 int uv_samples = uv_height * uv_width;
5115 int t_samples = y_samples + 2 * uv_samples;
5118 ye = calc_plane_error(orig->y_buffer, orig->y_stride, recon->y_buffer,
5119 recon->y_stride, y_width, y_height);
5121 ue = calc_plane_error(orig->u_buffer, orig->uv_stride, recon->u_buffer,
5122 recon->uv_stride, uv_width, uv_height);
5124 ve = calc_plane_error(orig->v_buffer, orig->uv_stride, recon->v_buffer,
5125 recon->uv_stride, uv_width, uv_height);
5127 sq_error = (double)(ye + ue + ve);
5129 frame_psnr = vpx_sse_to_psnr(t_samples, 255.0, sq_error);
5131 cpi->total_y += vpx_sse_to_psnr(y_samples, 255.0, (double)ye);
5132 cpi->total_u += vpx_sse_to_psnr(uv_samples, 255.0, (double)ue);
5133 cpi->total_v += vpx_sse_to_psnr(uv_samples, 255.0, (double)ve);
5134 cpi->total_sq_error += sq_error;
5135 cpi->total += frame_psnr;
5138 YV12_BUFFER_CONFIG *pp = &cm->post_proc_buffer;
5140 double frame_psnr2, frame_ssim2 = 0;
5143 vp8_deblock(cm, cm->frame_to_show, &cm->post_proc_buffer,
5144 cm->filter_level * 10 / 6, 1, 0);
5145 vpx_clear_system_state();
5147 ye = calc_plane_error(orig->y_buffer, orig->y_stride, pp->y_buffer,
5148 pp->y_stride, y_width, y_height);
5150 ue = calc_plane_error(orig->u_buffer, orig->uv_stride, pp->u_buffer,
5151 pp->uv_stride, uv_width, uv_height);
5153 ve = calc_plane_error(orig->v_buffer, orig->uv_stride, pp->v_buffer,
5154 pp->uv_stride, uv_width, uv_height);
5156 sq_error2 = (double)(ye + ue + ve);
5158 frame_psnr2 = vpx_sse_to_psnr(t_samples, 255.0, sq_error2);
5160 cpi->totalp_y += vpx_sse_to_psnr(y_samples, 255.0, (double)ye);
5161 cpi->totalp_u += vpx_sse_to_psnr(uv_samples, 255.0, (double)ue);
5162 cpi->totalp_v += vpx_sse_to_psnr(uv_samples, 255.0, (double)ve);
5163 cpi->total_sq_error2 += sq_error2;
5164 cpi->totalp += frame_psnr2;
5167 vpx_calc_ssim(cpi->Source, &cm->post_proc_buffer, &weight);
5169 cpi->summed_quality += frame_ssim2 * weight;
5170 cpi->summed_weights += weight;
5172 if (cpi->oxcf.number_of_layers > 1) {
5175 for (i = cpi->current_layer; i < cpi->oxcf.number_of_layers; ++i) {
5176 cpi->frames_in_layer[i]++;
5178 cpi->bytes_in_layer[i] += *size;
5179 cpi->sum_psnr[i] += frame_psnr;
5180 cpi->sum_psnr_p[i] += frame_psnr2;
5181 cpi->total_error2[i] += sq_error;
5182 cpi->total_error2_p[i] += sq_error2;
5183 cpi->sum_ssim[i] += frame_ssim2 * weight;
5184 cpi->sum_weights[i] += weight;
5195 if (cpi->common.frame_type != 0 && cpi->common.base_qindex == cpi->oxcf.worst_allowed_q)
5197 skiptruecount += cpi->skip_true_count;
5198 skipfalsecount += cpi->skip_false_count;
5206 FILE *f = fopen("skip.stt", "a");
5207 fprintf(f, "frame:%4d flags:%4x Q:%4d P:%4d Size:%5d\n", cpi->common.current_video_frame, *frame_flags, cpi->common.base_qindex, cpi->prob_skip_false, *size);
5209 if (cpi->is_src_frame_alt_ref == 1)
5210 fprintf(f, "skipcount: %4d framesize: %d\n", cpi->skip_true_count , *size);
5218 cpi->common.error.setjmp = 0;
5220 #if CONFIG_MULTITHREAD
5221 /* wait for the lpf thread done */
5222 if (cpi->b_multi_threaded && cpi->b_lpf_running) {
5223 sem_wait(&cpi->h_event_end_lpf);
5224 cpi->b_lpf_running = 0;
5231 int vp8_get_preview_raw_frame(VP8_COMP *cpi, YV12_BUFFER_CONFIG *dest,
5232 vp8_ppflags_t *flags) {
5233 if (cpi->common.refresh_alt_ref_frame) {
5239 cpi->common.show_frame_mi = cpi->common.mi;
5240 ret = vp8_post_proc_frame(&cpi->common, dest, flags);
5244 if (cpi->common.frame_to_show) {
5245 *dest = *cpi->common.frame_to_show;
5246 dest->y_width = cpi->common.Width;
5247 dest->y_height = cpi->common.Height;
5248 dest->uv_height = cpi->common.Height / 2;
5255 vpx_clear_system_state();
5260 int vp8_set_roimap(VP8_COMP *cpi, unsigned char *map, unsigned int rows,
5261 unsigned int cols, int delta_q[4], int delta_lf[4],
5262 unsigned int threshold[4]) {
5263 signed char feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS];
5264 int internal_delta_q[MAX_MB_SEGMENTS];
5265 const int range = 63;
5268 // This method is currently incompatible with the cyclic refresh method
5269 if (cpi->cyclic_refresh_mode_enabled) return -1;
5271 // Check number of rows and columns match
5272 if (cpi->common.mb_rows != (int)rows || cpi->common.mb_cols != (int)cols) {
5276 // Range check the delta Q values and convert the external Q range values
5277 // to internal ones.
5278 if ((abs(delta_q[0]) > range) || (abs(delta_q[1]) > range) ||
5279 (abs(delta_q[2]) > range) || (abs(delta_q[3]) > range)) {
5283 // Range check the delta lf values
5284 if ((abs(delta_lf[0]) > range) || (abs(delta_lf[1]) > range) ||
5285 (abs(delta_lf[2]) > range) || (abs(delta_lf[3]) > range)) {
5290 disable_segmentation(cpi);
5294 // Translate the external delta q values to internal values.
5295 for (i = 0; i < MAX_MB_SEGMENTS; ++i) {
5296 internal_delta_q[i] =
5297 (delta_q[i] >= 0) ? q_trans[delta_q[i]] : -q_trans[-delta_q[i]];
5300 /* Set the segmentation Map */
5301 set_segmentation_map(cpi, map);
5303 /* Activate segmentation. */
5304 enable_segmentation(cpi);
5306 /* Set up the quant segment data */
5307 feature_data[MB_LVL_ALT_Q][0] = internal_delta_q[0];
5308 feature_data[MB_LVL_ALT_Q][1] = internal_delta_q[1];
5309 feature_data[MB_LVL_ALT_Q][2] = internal_delta_q[2];
5310 feature_data[MB_LVL_ALT_Q][3] = internal_delta_q[3];
5312 /* Set up the loop segment data s */
5313 feature_data[MB_LVL_ALT_LF][0] = delta_lf[0];
5314 feature_data[MB_LVL_ALT_LF][1] = delta_lf[1];
5315 feature_data[MB_LVL_ALT_LF][2] = delta_lf[2];
5316 feature_data[MB_LVL_ALT_LF][3] = delta_lf[3];
5318 cpi->segment_encode_breakout[0] = threshold[0];
5319 cpi->segment_encode_breakout[1] = threshold[1];
5320 cpi->segment_encode_breakout[2] = threshold[2];
5321 cpi->segment_encode_breakout[3] = threshold[3];
5323 /* Initialise the feature data structure */
5324 set_segment_data(cpi, &feature_data[0][0], SEGMENT_DELTADATA);
5329 int vp8_set_active_map(VP8_COMP *cpi, unsigned char *map, unsigned int rows,
5330 unsigned int cols) {
5331 if ((int)rows == cpi->common.mb_rows && (int)cols == cpi->common.mb_cols) {
5333 memcpy(cpi->active_map, map, rows * cols);
5334 cpi->active_map_enabled = 1;
5336 cpi->active_map_enabled = 0;
5345 int vp8_set_internal_size(VP8_COMP *cpi, VPX_SCALING horiz_mode,
5346 VPX_SCALING vert_mode) {
5347 if (horiz_mode <= ONETWO) {
5348 cpi->common.horiz_scale = horiz_mode;
5353 if (vert_mode <= ONETWO) {
5354 cpi->common.vert_scale = vert_mode;
5362 int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest) {
5366 unsigned char *src = source->y_buffer;
5367 unsigned char *dst = dest->y_buffer;
5369 /* Loop through the Y plane raw and reconstruction data summing
5370 * (square differences)
5372 for (i = 0; i < source->y_height; i += 16) {
5373 for (j = 0; j < source->y_width; j += 16) {
5375 Total += vpx_mse16x16(src + j, source->y_stride, dst + j, dest->y_stride,
5379 src += 16 * source->y_stride;
5380 dst += 16 * dest->y_stride;
5386 int vp8_get_quantizer(VP8_COMP *cpi) { return cpi->common.base_qindex; }