2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
11 #include "vpx_config.h"
12 #include "./vpx_scale_rtcd.h"
13 #include "./vpx_dsp_rtcd.h"
14 #include "./vp8_rtcd.h"
15 #include "vp8/common/onyxc_int.h"
16 #include "vp8/common/blockd.h"
18 #include "vp8/common/systemdependent.h"
19 #include "vp8/encoder/quantize.h"
20 #include "vp8/common/alloccommon.h"
22 #include "firstpass.h"
23 #include "vpx_dsp/psnr.h"
24 #include "vpx_scale/vpx_scale.h"
25 #include "vp8/common/extend.h"
27 #include "vp8/common/quant_common.h"
28 #include "segmentation.h"
30 #include "vp8/common/postproc.h"
32 #include "vpx_mem/vpx_mem.h"
33 #include "vp8/common/reconintra.h"
34 #include "vp8/common/swapyv12buffer.h"
35 #include "vp8/common/threading.h"
36 #include "vpx_ports/system_state.h"
37 #include "vpx_ports/vpx_timer.h"
39 #include "vpx_ports/arm.h"
41 #if CONFIG_MULTI_RES_ENCODING
42 #include "mr_dissim.h"
44 #include "encodeframe.h"
50 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
51 extern int vp8_update_coef_context(VP8_COMP *cpi);
52 extern void vp8_update_coef_probs(VP8_COMP *cpi);
55 extern void vp8cx_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi);
56 extern void vp8cx_set_alt_lf_level(VP8_COMP *cpi, int filt_val);
57 extern void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi);
59 extern void vp8_deblock_frame(YV12_BUFFER_CONFIG *source,
60 YV12_BUFFER_CONFIG *post, int filt_lvl,
61 int low_var_thresh, int flag);
62 extern void print_parms(VP8_CONFIG *ocf, char *filenam);
63 extern unsigned int vp8_get_processor_freq();
64 extern void print_tree_update_probs();
65 extern int vp8cx_create_encoder_threads(VP8_COMP *cpi);
66 extern void vp8cx_remove_encoder_threads(VP8_COMP *cpi);
68 int vp8_estimate_entropy_savings(VP8_COMP *cpi);
70 int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest);
72 extern void vp8_temporal_filter_prepare_c(VP8_COMP *cpi, int distance);
74 static void set_default_lf_deltas(VP8_COMP *cpi);
76 extern const int vp8_gf_interval_table[101];
78 #if CONFIG_INTERNAL_STATS
80 #include "vpx_dsp/ssim.h"
86 #ifdef OUTPUT_YUV_DENOISED
87 FILE *yuv_denoised_file;
97 extern int skip_true_count;
98 extern int skip_false_count;
101 #ifdef VP8_ENTROPY_STATS
102 extern int intra_mode_stats[10][10][10];
106 unsigned int frames_at_speed[16] = { 0, 0, 0, 0, 0, 0, 0, 0,
107 0, 0, 0, 0, 0, 0, 0, 0 };
108 unsigned int tot_pm = 0;
109 unsigned int cnt_pm = 0;
110 unsigned int tot_ef = 0;
111 unsigned int cnt_ef = 0;
115 extern unsigned __int64 Sectionbits[50];
116 extern int y_modes[5];
117 extern int uv_modes[4];
118 extern int b_modes[10];
120 extern int inter_y_modes[10];
121 extern int inter_uv_modes[4];
122 extern unsigned int inter_b_modes[15];
125 extern const int vp8_bits_per_mb[2][QINDEX_RANGE];
127 extern const int qrounding_factors[129];
128 extern const int qzbin_factors[129];
129 extern void vp8cx_init_quantizer(VP8_COMP *cpi);
130 extern const int vp8cx_base_skip_false_prob[128];
132 /* Tables relating active max Q to active min Q */
133 static const unsigned char kf_low_motion_minq[QINDEX_RANGE] = {
134 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
135 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
136 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
137 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 4, 4, 4, 5, 5, 5,
138 5, 5, 6, 6, 6, 6, 7, 7, 8, 8, 8, 8, 9, 9, 10, 10, 10, 10, 11,
139 11, 11, 11, 12, 12, 13, 13, 13, 13, 14, 14, 15, 15, 15, 15, 16, 16, 16, 16,
140 17, 17, 18, 18, 18, 18, 19, 20, 20, 21, 21, 22, 23, 23
142 static const unsigned char kf_high_motion_minq[QINDEX_RANGE] = {
143 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
144 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1,
145 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 5,
146 5, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 8, 8, 8, 8, 9, 9, 10, 10,
147 10, 10, 11, 11, 11, 11, 12, 12, 13, 13, 13, 13, 14, 14, 15, 15, 15, 15, 16,
148 16, 16, 16, 17, 17, 18, 18, 18, 18, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21,
149 22, 22, 23, 23, 24, 25, 25, 26, 26, 27, 28, 28, 29, 30
151 static const unsigned char gf_low_motion_minq[QINDEX_RANGE] = {
152 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3,
153 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8,
154 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15,
155 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24,
156 25, 25, 26, 26, 27, 27, 28, 28, 29, 29, 30, 30, 31, 31, 32, 32, 33, 33, 34,
157 34, 35, 35, 36, 36, 37, 37, 38, 38, 39, 39, 40, 40, 41, 41, 42, 42, 43, 44,
158 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58
160 static const unsigned char gf_mid_motion_minq[QINDEX_RANGE] = {
161 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 5,
162 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10, 10, 11,
163 11, 11, 12, 12, 12, 12, 13, 13, 13, 14, 14, 14, 15, 15, 16, 16, 17, 17, 18,
164 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24, 25, 25, 26, 26, 27, 27,
165 28, 28, 29, 29, 30, 30, 31, 31, 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 37,
166 37, 38, 39, 39, 40, 40, 41, 41, 42, 42, 43, 43, 44, 45, 46, 47, 48, 49, 50,
167 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
169 static const unsigned char gf_high_motion_minq[QINDEX_RANGE] = {
170 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5,
171 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10, 11, 11,
172 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21,
173 21, 22, 22, 23, 23, 24, 24, 25, 25, 26, 26, 27, 27, 28, 28, 29, 29, 30, 30,
174 31, 31, 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 37, 37, 38, 38, 39, 39, 40,
175 40, 41, 41, 42, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
176 57, 58, 59, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80
178 static const unsigned char inter_minq[QINDEX_RANGE] = {
179 0, 0, 1, 1, 2, 3, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 9, 10, 11,
180 11, 12, 13, 13, 14, 15, 15, 16, 17, 17, 18, 19, 20, 20, 21, 22, 22, 23, 24,
181 24, 25, 26, 27, 27, 28, 29, 30, 30, 31, 32, 33, 33, 34, 35, 36, 36, 37, 38,
182 39, 39, 40, 41, 42, 42, 43, 44, 45, 46, 46, 47, 48, 49, 50, 50, 51, 52, 53,
183 54, 55, 55, 56, 57, 58, 59, 60, 60, 61, 62, 63, 64, 65, 66, 67, 67, 68, 69,
184 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 86,
185 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100
188 #ifdef PACKET_TESTING
189 extern FILE *vpxlogc;
192 static void save_layer_context(VP8_COMP *cpi) {
193 LAYER_CONTEXT *lc = &cpi->layer_context[cpi->current_layer];
195 /* Save layer dependent coding state */
196 lc->target_bandwidth = cpi->target_bandwidth;
197 lc->starting_buffer_level = cpi->oxcf.starting_buffer_level;
198 lc->optimal_buffer_level = cpi->oxcf.optimal_buffer_level;
199 lc->maximum_buffer_size = cpi->oxcf.maximum_buffer_size;
200 lc->starting_buffer_level_in_ms = cpi->oxcf.starting_buffer_level_in_ms;
201 lc->optimal_buffer_level_in_ms = cpi->oxcf.optimal_buffer_level_in_ms;
202 lc->maximum_buffer_size_in_ms = cpi->oxcf.maximum_buffer_size_in_ms;
203 lc->buffer_level = cpi->buffer_level;
204 lc->bits_off_target = cpi->bits_off_target;
205 lc->total_actual_bits = cpi->total_actual_bits;
206 lc->worst_quality = cpi->worst_quality;
207 lc->active_worst_quality = cpi->active_worst_quality;
208 lc->best_quality = cpi->best_quality;
209 lc->active_best_quality = cpi->active_best_quality;
210 lc->ni_av_qi = cpi->ni_av_qi;
211 lc->ni_tot_qi = cpi->ni_tot_qi;
212 lc->ni_frames = cpi->ni_frames;
213 lc->avg_frame_qindex = cpi->avg_frame_qindex;
214 lc->rate_correction_factor = cpi->rate_correction_factor;
215 lc->key_frame_rate_correction_factor = cpi->key_frame_rate_correction_factor;
216 lc->gf_rate_correction_factor = cpi->gf_rate_correction_factor;
217 lc->zbin_over_quant = cpi->mb.zbin_over_quant;
218 lc->inter_frame_target = cpi->inter_frame_target;
219 lc->total_byte_count = cpi->total_byte_count;
220 lc->filter_level = cpi->common.filter_level;
222 lc->last_frame_percent_intra = cpi->last_frame_percent_intra;
224 memcpy(lc->count_mb_ref_frame_usage, cpi->mb.count_mb_ref_frame_usage,
225 sizeof(cpi->mb.count_mb_ref_frame_usage));
228 static void restore_layer_context(VP8_COMP *cpi, const int layer) {
229 LAYER_CONTEXT *lc = &cpi->layer_context[layer];
231 /* Restore layer dependent coding state */
232 cpi->current_layer = layer;
233 cpi->target_bandwidth = lc->target_bandwidth;
234 cpi->oxcf.target_bandwidth = lc->target_bandwidth;
235 cpi->oxcf.starting_buffer_level = lc->starting_buffer_level;
236 cpi->oxcf.optimal_buffer_level = lc->optimal_buffer_level;
237 cpi->oxcf.maximum_buffer_size = lc->maximum_buffer_size;
238 cpi->oxcf.starting_buffer_level_in_ms = lc->starting_buffer_level_in_ms;
239 cpi->oxcf.optimal_buffer_level_in_ms = lc->optimal_buffer_level_in_ms;
240 cpi->oxcf.maximum_buffer_size_in_ms = lc->maximum_buffer_size_in_ms;
241 cpi->buffer_level = lc->buffer_level;
242 cpi->bits_off_target = lc->bits_off_target;
243 cpi->total_actual_bits = lc->total_actual_bits;
244 cpi->active_worst_quality = lc->active_worst_quality;
245 cpi->active_best_quality = lc->active_best_quality;
246 cpi->ni_av_qi = lc->ni_av_qi;
247 cpi->ni_tot_qi = lc->ni_tot_qi;
248 cpi->ni_frames = lc->ni_frames;
249 cpi->avg_frame_qindex = lc->avg_frame_qindex;
250 cpi->rate_correction_factor = lc->rate_correction_factor;
251 cpi->key_frame_rate_correction_factor = lc->key_frame_rate_correction_factor;
252 cpi->gf_rate_correction_factor = lc->gf_rate_correction_factor;
253 cpi->mb.zbin_over_quant = lc->zbin_over_quant;
254 cpi->inter_frame_target = lc->inter_frame_target;
255 cpi->total_byte_count = lc->total_byte_count;
256 cpi->common.filter_level = lc->filter_level;
258 cpi->last_frame_percent_intra = lc->last_frame_percent_intra;
260 memcpy(cpi->mb.count_mb_ref_frame_usage, lc->count_mb_ref_frame_usage,
261 sizeof(cpi->mb.count_mb_ref_frame_usage));
264 static int rescale(int val, int num, int denom) {
266 int64_t llden = denom;
269 return (int)(llval * llnum / llden);
272 static void init_temporal_layer_context(VP8_COMP *cpi, VP8_CONFIG *oxcf,
274 double prev_layer_framerate) {
275 LAYER_CONTEXT *lc = &cpi->layer_context[layer];
277 lc->framerate = cpi->output_framerate / cpi->oxcf.rate_decimator[layer];
278 lc->target_bandwidth = cpi->oxcf.target_bitrate[layer] * 1000;
280 lc->starting_buffer_level_in_ms = oxcf->starting_buffer_level;
281 lc->optimal_buffer_level_in_ms = oxcf->optimal_buffer_level;
282 lc->maximum_buffer_size_in_ms = oxcf->maximum_buffer_size;
284 lc->starting_buffer_level =
285 rescale((int)(oxcf->starting_buffer_level), lc->target_bandwidth, 1000);
287 if (oxcf->optimal_buffer_level == 0) {
288 lc->optimal_buffer_level = lc->target_bandwidth / 8;
290 lc->optimal_buffer_level =
291 rescale((int)(oxcf->optimal_buffer_level), lc->target_bandwidth, 1000);
294 if (oxcf->maximum_buffer_size == 0) {
295 lc->maximum_buffer_size = lc->target_bandwidth / 8;
297 lc->maximum_buffer_size =
298 rescale((int)(oxcf->maximum_buffer_size), lc->target_bandwidth, 1000);
301 /* Work out the average size of a frame within this layer */
303 lc->avg_frame_size_for_layer =
304 (int)((cpi->oxcf.target_bitrate[layer] -
305 cpi->oxcf.target_bitrate[layer - 1]) *
306 1000 / (lc->framerate - prev_layer_framerate));
309 lc->active_worst_quality = cpi->oxcf.worst_allowed_q;
310 lc->active_best_quality = cpi->oxcf.best_allowed_q;
311 lc->avg_frame_qindex = cpi->oxcf.worst_allowed_q;
313 lc->buffer_level = lc->starting_buffer_level;
314 lc->bits_off_target = lc->starting_buffer_level;
316 lc->total_actual_bits = 0;
320 lc->rate_correction_factor = 1.0;
321 lc->key_frame_rate_correction_factor = 1.0;
322 lc->gf_rate_correction_factor = 1.0;
323 lc->inter_frame_target = 0;
326 // Upon a run-time change in temporal layers, reset the layer context parameters
327 // for any "new" layers. For "existing" layers, let them inherit the parameters
328 // from the previous layer state (at the same layer #). In future we may want
329 // to better map the previous layer state(s) to the "new" ones.
330 static void reset_temporal_layer_change(VP8_COMP *cpi, VP8_CONFIG *oxcf,
331 const int prev_num_layers) {
333 double prev_layer_framerate = 0;
334 const int curr_num_layers = cpi->oxcf.number_of_layers;
335 // If the previous state was 1 layer, get current layer context from cpi.
336 // We need this to set the layer context for the new layers below.
337 if (prev_num_layers == 1) {
338 cpi->current_layer = 0;
339 save_layer_context(cpi);
341 for (i = 0; i < curr_num_layers; ++i) {
342 LAYER_CONTEXT *lc = &cpi->layer_context[i];
343 if (i >= prev_num_layers) {
344 init_temporal_layer_context(cpi, oxcf, i, prev_layer_framerate);
346 // The initial buffer levels are set based on their starting levels.
347 // We could set the buffer levels based on the previous state (normalized
348 // properly by the layer bandwidths) but we would need to keep track of
349 // the previous set of layer bandwidths (i.e., target_bitrate[i])
350 // before the layer change. For now, reset to the starting levels.
352 cpi->oxcf.starting_buffer_level_in_ms * cpi->oxcf.target_bitrate[i];
353 lc->bits_off_target = lc->buffer_level;
354 // TDOD(marpan): Should we set the rate_correction_factor and
355 // active_worst/best_quality to values derived from the previous layer
356 // state (to smooth-out quality dips/rate fluctuation at transition)?
358 // We need to treat the 1 layer case separately: oxcf.target_bitrate[i]
359 // is not set for 1 layer, and the restore_layer_context/save_context()
360 // are not called in the encoding loop, so we need to call it here to
361 // pass the layer context state to |cpi|.
362 if (curr_num_layers == 1) {
363 lc->target_bandwidth = cpi->oxcf.target_bandwidth;
365 cpi->oxcf.starting_buffer_level_in_ms * lc->target_bandwidth / 1000;
366 lc->bits_off_target = lc->buffer_level;
367 restore_layer_context(cpi, 0);
369 prev_layer_framerate = cpi->output_framerate / cpi->oxcf.rate_decimator[i];
373 static void setup_features(VP8_COMP *cpi) {
374 // If segmentation enabled set the update flags
375 if (cpi->mb.e_mbd.segmentation_enabled) {
376 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
377 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
379 cpi->mb.e_mbd.update_mb_segmentation_map = 0;
380 cpi->mb.e_mbd.update_mb_segmentation_data = 0;
383 cpi->mb.e_mbd.mode_ref_lf_delta_enabled = 0;
384 cpi->mb.e_mbd.mode_ref_lf_delta_update = 0;
385 memset(cpi->mb.e_mbd.ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas));
386 memset(cpi->mb.e_mbd.mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas));
387 memset(cpi->mb.e_mbd.last_ref_lf_deltas, 0,
388 sizeof(cpi->mb.e_mbd.ref_lf_deltas));
389 memset(cpi->mb.e_mbd.last_mode_lf_deltas, 0,
390 sizeof(cpi->mb.e_mbd.mode_lf_deltas));
392 set_default_lf_deltas(cpi);
395 static void dealloc_raw_frame_buffers(VP8_COMP *cpi);
397 void vp8_initialize_enc(void) {
398 static volatile int init_done = 0;
402 vp8_init_intra_predictors();
407 static void dealloc_compressor_data(VP8_COMP *cpi) {
408 vpx_free(cpi->tplist);
411 /* Delete last frame MV storage buffers */
415 vpx_free(cpi->lf_ref_frame_sign_bias);
416 cpi->lf_ref_frame_sign_bias = 0;
418 vpx_free(cpi->lf_ref_frame);
419 cpi->lf_ref_frame = 0;
421 /* Delete sementation map */
422 vpx_free(cpi->segmentation_map);
423 cpi->segmentation_map = 0;
425 vpx_free(cpi->active_map);
428 vp8_de_alloc_frame_buffers(&cpi->common);
430 vp8_yv12_de_alloc_frame_buffer(&cpi->pick_lf_lvl_frame);
431 vp8_yv12_de_alloc_frame_buffer(&cpi->scaled_source);
432 dealloc_raw_frame_buffers(cpi);
437 /* Structure used to monitor GF usage */
438 vpx_free(cpi->gf_active_flags);
439 cpi->gf_active_flags = 0;
441 /* Activity mask based per mb zbin adjustments */
442 vpx_free(cpi->mb_activity_map);
443 cpi->mb_activity_map = 0;
445 vpx_free(cpi->mb.pip);
448 #if CONFIG_MULTITHREAD
449 /* De-allocate mutex */
450 if (cpi->pmutex != NULL) {
451 VP8_COMMON *const pc = &cpi->common;
454 for (i = 0; i < pc->mb_rows; ++i) {
455 pthread_mutex_destroy(&cpi->pmutex[i]);
457 vpx_free(cpi->pmutex);
461 vpx_free(cpi->mt_current_mb_col);
462 cpi->mt_current_mb_col = NULL;
466 static void enable_segmentation(VP8_COMP *cpi) {
467 /* Set the appropriate feature bit */
468 cpi->mb.e_mbd.segmentation_enabled = 1;
469 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
470 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
472 static void disable_segmentation(VP8_COMP *cpi) {
473 /* Clear the appropriate feature bit */
474 cpi->mb.e_mbd.segmentation_enabled = 0;
477 /* Valid values for a segment are 0 to 3
478 * Segmentation map is arrange as [Rows][Columns]
480 static void set_segmentation_map(VP8_COMP *cpi,
481 unsigned char *segmentation_map) {
482 /* Copy in the new segmentation map */
483 memcpy(cpi->segmentation_map, segmentation_map,
484 (cpi->common.mb_rows * cpi->common.mb_cols));
486 /* Signal that the map should be updated. */
487 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
488 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
491 /* The values given for each segment can be either deltas (from the default
492 * value chosen for the frame) or absolute values.
494 * Valid range for abs values is:
495 * (0-127 for MB_LVL_ALT_Q), (0-63 for SEGMENT_ALT_LF)
496 * Valid range for delta values are:
497 * (+/-127 for MB_LVL_ALT_Q), (+/-63 for SEGMENT_ALT_LF)
499 * abs_delta = SEGMENT_DELTADATA (deltas)
500 * abs_delta = SEGMENT_ABSDATA (use the absolute values given).
503 static void set_segment_data(VP8_COMP *cpi, signed char *feature_data,
504 unsigned char abs_delta) {
505 cpi->mb.e_mbd.mb_segement_abs_delta = abs_delta;
506 memcpy(cpi->segment_feature_data, feature_data,
507 sizeof(cpi->segment_feature_data));
510 /* A simple function to cyclically refresh the background at a lower Q */
511 static void cyclic_background_refresh(VP8_COMP *cpi, int Q, int lf_adjustment) {
512 unsigned char *seg_map = cpi->segmentation_map;
513 signed char feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS];
515 int block_count = cpi->cyclic_refresh_mode_max_mbs_perframe;
516 int mbs_in_frame = cpi->common.mb_rows * cpi->common.mb_cols;
518 cpi->cyclic_refresh_q = Q / 2;
520 if (cpi->oxcf.screen_content_mode) {
521 // Modify quality ramp-up based on Q. Above some Q level, increase the
522 // number of blocks to be refreshed, and reduce it below the thredhold.
523 // Turn-off under certain conditions (i.e., away from key frame, and if
524 // we are at good quality (low Q) and most of the blocks were
526 // in previous frame.
527 int qp_thresh = (cpi->oxcf.screen_content_mode == 2) ? 80 : 100;
528 if (Q >= qp_thresh) {
529 cpi->cyclic_refresh_mode_max_mbs_perframe =
530 (cpi->common.mb_rows * cpi->common.mb_cols) / 10;
531 } else if (cpi->frames_since_key > 250 && Q < 20 &&
532 cpi->mb.skip_true_count > (int)(0.95 * mbs_in_frame)) {
533 cpi->cyclic_refresh_mode_max_mbs_perframe = 0;
535 cpi->cyclic_refresh_mode_max_mbs_perframe =
536 (cpi->common.mb_rows * cpi->common.mb_cols) / 20;
538 block_count = cpi->cyclic_refresh_mode_max_mbs_perframe;
541 // Set every macroblock to be eligible for update.
542 // For key frame this will reset seg map to 0.
543 memset(cpi->segmentation_map, 0, mbs_in_frame);
545 if (cpi->common.frame_type != KEY_FRAME && block_count > 0) {
546 /* Cycle through the macro_block rows */
547 /* MB loop to set local segmentation map */
548 i = cpi->cyclic_refresh_mode_index;
549 assert(i < mbs_in_frame);
551 /* If the MB is as a candidate for clean up then mark it for
552 * possible boost/refresh (segment 1) The segment id may get
553 * reset to 0 later if the MB gets coded anything other than
554 * last frame 0,0 as only (last frame 0,0) MBs are eligable for
555 * refresh : that is to say Mbs likely to be background blocks.
557 if (cpi->cyclic_refresh_map[i] == 0) {
560 } else if (cpi->cyclic_refresh_map[i] < 0) {
561 cpi->cyclic_refresh_map[i]++;
565 if (i == mbs_in_frame) i = 0;
567 } while (block_count && i != cpi->cyclic_refresh_mode_index);
569 cpi->cyclic_refresh_mode_index = i;
571 #if CONFIG_TEMPORAL_DENOISING
572 if (cpi->oxcf.noise_sensitivity > 0) {
573 if (cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive &&
574 Q < (int)cpi->denoiser.denoise_pars.qp_thresh &&
575 (cpi->frames_since_key >
576 2 * cpi->denoiser.denoise_pars.consec_zerolast)) {
577 // Under aggressive denoising, use segmentation to turn off loop
578 // filter below some qp thresh. The filter is reduced for all
579 // blocks that have been encoded as ZEROMV LAST x frames in a row,
580 // where x is set by cpi->denoiser.denoise_pars.consec_zerolast.
581 // This is to avoid "dot" artifacts that can occur from repeated
582 // loop filtering on noisy input source.
583 cpi->cyclic_refresh_q = Q;
584 // lf_adjustment = -MAX_LOOP_FILTER;
586 for (i = 0; i < mbs_in_frame; ++i) {
587 seg_map[i] = (cpi->consec_zero_last[i] >
588 cpi->denoiser.denoise_pars.consec_zerolast)
597 /* Activate segmentation. */
598 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
599 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
600 enable_segmentation(cpi);
602 /* Set up the quant segment data */
603 feature_data[MB_LVL_ALT_Q][0] = 0;
604 feature_data[MB_LVL_ALT_Q][1] = (cpi->cyclic_refresh_q - Q);
605 feature_data[MB_LVL_ALT_Q][2] = 0;
606 feature_data[MB_LVL_ALT_Q][3] = 0;
608 /* Set up the loop segment data */
609 feature_data[MB_LVL_ALT_LF][0] = 0;
610 feature_data[MB_LVL_ALT_LF][1] = lf_adjustment;
611 feature_data[MB_LVL_ALT_LF][2] = 0;
612 feature_data[MB_LVL_ALT_LF][3] = 0;
614 /* Initialise the feature data structure */
615 set_segment_data(cpi, &feature_data[0][0], SEGMENT_DELTADATA);
618 static void set_default_lf_deltas(VP8_COMP *cpi) {
619 cpi->mb.e_mbd.mode_ref_lf_delta_enabled = 1;
620 cpi->mb.e_mbd.mode_ref_lf_delta_update = 1;
622 memset(cpi->mb.e_mbd.ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas));
623 memset(cpi->mb.e_mbd.mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas));
625 /* Test of ref frame deltas */
626 cpi->mb.e_mbd.ref_lf_deltas[INTRA_FRAME] = 2;
627 cpi->mb.e_mbd.ref_lf_deltas[LAST_FRAME] = 0;
628 cpi->mb.e_mbd.ref_lf_deltas[GOLDEN_FRAME] = -2;
629 cpi->mb.e_mbd.ref_lf_deltas[ALTREF_FRAME] = -2;
631 cpi->mb.e_mbd.mode_lf_deltas[0] = 4; /* BPRED */
633 if (cpi->oxcf.Mode == MODE_REALTIME) {
634 cpi->mb.e_mbd.mode_lf_deltas[1] = -12; /* Zero */
636 cpi->mb.e_mbd.mode_lf_deltas[1] = -2; /* Zero */
639 cpi->mb.e_mbd.mode_lf_deltas[2] = 2; /* New mv */
640 cpi->mb.e_mbd.mode_lf_deltas[3] = 4; /* Split mv */
643 /* Convenience macros for mapping speed and mode into a continuous
646 #define GOOD(x) (x + 1)
647 #define RT(x) (x + 7)
649 static int speed_map(int speed, const int *map) {
654 } while (speed >= *map++);
658 static const int thresh_mult_map_znn[] = {
659 /* map common to zero, nearest, and near */
660 0, GOOD(2), 1500, GOOD(3), 2000, RT(0), 1000, RT(2), 2000, INT_MAX
663 static const int thresh_mult_map_vhpred[] = { 1000, GOOD(2), 1500, GOOD(3),
664 2000, RT(0), 1000, RT(1),
665 2000, RT(7), INT_MAX, INT_MAX };
667 static const int thresh_mult_map_bpred[] = { 2000, GOOD(0), 2500, GOOD(2),
668 5000, GOOD(3), 7500, RT(0),
669 2500, RT(1), 5000, RT(6),
672 static const int thresh_mult_map_tm[] = { 1000, GOOD(2), 1500, GOOD(3),
673 2000, RT(0), 0, RT(1),
674 1000, RT(2), 2000, RT(7),
677 static const int thresh_mult_map_new1[] = { 1000, GOOD(2), 2000,
678 RT(0), 2000, INT_MAX };
680 static const int thresh_mult_map_new2[] = { 1000, GOOD(2), 2000, GOOD(3),
681 2500, GOOD(5), 4000, RT(0),
682 2000, RT(2), 2500, RT(5),
685 static const int thresh_mult_map_split1[] = {
686 2500, GOOD(0), 1700, GOOD(2), 10000, GOOD(3), 25000, GOOD(4), INT_MAX,
687 RT(0), 5000, RT(1), 10000, RT(2), 25000, RT(3), INT_MAX, INT_MAX
690 static const int thresh_mult_map_split2[] = {
691 5000, GOOD(0), 4500, GOOD(2), 20000, GOOD(3), 50000, GOOD(4), INT_MAX,
692 RT(0), 10000, RT(1), 20000, RT(2), 50000, RT(3), INT_MAX, INT_MAX
695 static const int mode_check_freq_map_zn2[] = {
696 /* {zero,nearest}{2,3} */
697 0, RT(10), 1 << 1, RT(11), 1 << 2, RT(12), 1 << 3, INT_MAX
700 static const int mode_check_freq_map_vhbpred[] = {
701 0, GOOD(5), 2, RT(0), 0, RT(3), 2, RT(5), 4, INT_MAX
704 static const int mode_check_freq_map_near2[] = {
705 0, GOOD(5), 2, RT(0), 0, RT(3), 2,
706 RT(10), 1 << 2, RT(11), 1 << 3, RT(12), 1 << 4, INT_MAX
709 static const int mode_check_freq_map_new1[] = {
710 0, RT(10), 1 << 1, RT(11), 1 << 2, RT(12), 1 << 3, INT_MAX
713 static const int mode_check_freq_map_new2[] = { 0, GOOD(5), 4, RT(0),
715 1 << 3, RT(11), 1 << 4, RT(12),
718 static const int mode_check_freq_map_split1[] = {
719 0, GOOD(2), 2, GOOD(3), 7, RT(1), 2, RT(2), 7, INT_MAX
722 static const int mode_check_freq_map_split2[] = {
723 0, GOOD(1), 2, GOOD(2), 4, GOOD(3), 15, RT(1), 4, RT(2), 15, INT_MAX
726 void vp8_set_speed_features(VP8_COMP *cpi) {
727 SPEED_FEATURES *sf = &cpi->sf;
728 int Mode = cpi->compressor_speed;
729 int Speed = cpi->Speed;
731 VP8_COMMON *cm = &cpi->common;
732 int last_improved_quant = sf->improved_quant;
735 /* Initialise default mode frequency sampling variables */
736 for (i = 0; i < MAX_MODES; ++i) {
737 cpi->mode_check_freq[i] = 0;
740 cpi->mb.mbs_tested_so_far = 0;
741 cpi->mb.mbs_zero_last_dot_suppress = 0;
743 /* best quality defaults */
745 sf->search_method = NSTEP;
746 sf->improved_quant = 1;
747 sf->improved_dct = 1;
750 sf->quarter_pixel_search = 1;
751 sf->half_pixel_search = 1;
752 sf->iterative_sub_pixel = 1;
753 sf->optimize_coefficients = 1;
754 sf->use_fastquant_for_pick = 0;
755 sf->no_skip_block4x4_search = 1;
758 sf->max_step_search_steps = MAX_MVSEARCH_STEPS;
759 sf->improved_mv_pred = 1;
761 /* default thresholds to 0 */
762 for (i = 0; i < MAX_MODES; ++i) sf->thresh_mult[i] = 0;
764 /* Count enabled references */
766 if (cpi->ref_frame_flags & VP8_LAST_FRAME) ref_frames++;
767 if (cpi->ref_frame_flags & VP8_GOLD_FRAME) ref_frames++;
768 if (cpi->ref_frame_flags & VP8_ALTR_FRAME) ref_frames++;
770 /* Convert speed to continuous range, with clamping */
773 } else if (Mode == 2) {
776 if (Speed > 5) Speed = 5;
780 sf->thresh_mult[THR_ZERO1] = sf->thresh_mult[THR_NEAREST1] =
781 sf->thresh_mult[THR_NEAR1] = sf->thresh_mult[THR_DC] = 0; /* always */
783 sf->thresh_mult[THR_ZERO2] = sf->thresh_mult[THR_ZERO3] =
784 sf->thresh_mult[THR_NEAREST2] = sf->thresh_mult[THR_NEAREST3] =
785 sf->thresh_mult[THR_NEAR2] = sf->thresh_mult[THR_NEAR3] =
786 speed_map(Speed, thresh_mult_map_znn);
788 sf->thresh_mult[THR_V_PRED] = sf->thresh_mult[THR_H_PRED] =
789 speed_map(Speed, thresh_mult_map_vhpred);
790 sf->thresh_mult[THR_B_PRED] = speed_map(Speed, thresh_mult_map_bpred);
791 sf->thresh_mult[THR_TM] = speed_map(Speed, thresh_mult_map_tm);
792 sf->thresh_mult[THR_NEW1] = speed_map(Speed, thresh_mult_map_new1);
793 sf->thresh_mult[THR_NEW2] = sf->thresh_mult[THR_NEW3] =
794 speed_map(Speed, thresh_mult_map_new2);
795 sf->thresh_mult[THR_SPLIT1] = speed_map(Speed, thresh_mult_map_split1);
796 sf->thresh_mult[THR_SPLIT2] = sf->thresh_mult[THR_SPLIT3] =
797 speed_map(Speed, thresh_mult_map_split2);
799 // Special case for temporal layers.
800 // Reduce the thresholds for zero/nearest/near for GOLDEN, if GOLDEN is
801 // used as second reference. We don't modify thresholds for ALTREF case
802 // since ALTREF is usually used as long-term reference in temporal layers.
803 if ((cpi->Speed <= 6) && (cpi->oxcf.number_of_layers > 1) &&
804 (cpi->ref_frame_flags & VP8_LAST_FRAME) &&
805 (cpi->ref_frame_flags & VP8_GOLD_FRAME)) {
806 if (cpi->closest_reference_frame == GOLDEN_FRAME) {
807 sf->thresh_mult[THR_ZERO2] = sf->thresh_mult[THR_ZERO2] >> 3;
808 sf->thresh_mult[THR_NEAREST2] = sf->thresh_mult[THR_NEAREST2] >> 3;
809 sf->thresh_mult[THR_NEAR2] = sf->thresh_mult[THR_NEAR2] >> 3;
811 sf->thresh_mult[THR_ZERO2] = sf->thresh_mult[THR_ZERO2] >> 1;
812 sf->thresh_mult[THR_NEAREST2] = sf->thresh_mult[THR_NEAREST2] >> 1;
813 sf->thresh_mult[THR_NEAR2] = sf->thresh_mult[THR_NEAR2] >> 1;
817 cpi->mode_check_freq[THR_ZERO1] = cpi->mode_check_freq[THR_NEAREST1] =
818 cpi->mode_check_freq[THR_NEAR1] = cpi->mode_check_freq[THR_TM] =
819 cpi->mode_check_freq[THR_DC] = 0; /* always */
821 cpi->mode_check_freq[THR_ZERO2] = cpi->mode_check_freq[THR_ZERO3] =
822 cpi->mode_check_freq[THR_NEAREST2] = cpi->mode_check_freq[THR_NEAREST3] =
823 speed_map(Speed, mode_check_freq_map_zn2);
825 cpi->mode_check_freq[THR_NEAR2] = cpi->mode_check_freq[THR_NEAR3] =
826 speed_map(Speed, mode_check_freq_map_near2);
828 cpi->mode_check_freq[THR_V_PRED] = cpi->mode_check_freq[THR_H_PRED] =
829 cpi->mode_check_freq[THR_B_PRED] =
830 speed_map(Speed, mode_check_freq_map_vhbpred);
831 cpi->mode_check_freq[THR_NEW1] = speed_map(Speed, mode_check_freq_map_new1);
832 cpi->mode_check_freq[THR_NEW2] = cpi->mode_check_freq[THR_NEW3] =
833 speed_map(Speed, mode_check_freq_map_new2);
834 cpi->mode_check_freq[THR_SPLIT1] =
835 speed_map(Speed, mode_check_freq_map_split1);
836 cpi->mode_check_freq[THR_SPLIT2] = cpi->mode_check_freq[THR_SPLIT3] =
837 speed_map(Speed, mode_check_freq_map_split2);
840 #if !CONFIG_REALTIME_ONLY
841 case 0: /* best quality mode */
843 sf->max_step_search_steps = MAX_MVSEARCH_STEPS;
848 /* Disable coefficient optimization above speed 0 */
849 sf->optimize_coefficients = 0;
850 sf->use_fastquant_for_pick = 1;
851 sf->no_skip_block4x4_search = 0;
857 sf->improved_quant = 0;
858 sf->improved_dct = 0;
860 /* Only do recode loop on key frames, golden frames and
868 sf->recode_loop = 0; /* recode loop off */
869 sf->RD = 0; /* Turn rd off */
873 sf->auto_filter = 0; /* Faster selection of loop filter */
879 sf->optimize_coefficients = 0;
882 sf->iterative_sub_pixel = 1;
883 sf->search_method = NSTEP;
886 sf->improved_quant = 0;
887 sf->improved_dct = 0;
889 sf->use_fastquant_for_pick = 1;
890 sf->no_skip_block4x4_search = 0;
894 if (Speed > 2) sf->auto_filter = 0; /* Faster selection of loop filter */
902 sf->auto_filter = 0; /* Faster selection of loop filter */
903 sf->search_method = HEX;
904 sf->iterative_sub_pixel = 0;
908 unsigned int sum = 0;
909 unsigned int total_mbs = cm->MBs;
911 unsigned int total_skip;
915 if (cpi->oxcf.encode_breakout > 2000) min = cpi->oxcf.encode_breakout;
919 for (i = 0; i < min; ++i) {
920 sum += cpi->mb.error_bins[i];
926 /* i starts from 2 to make sure thresh started from 2048 */
927 for (; i < 1024; ++i) {
928 sum += cpi->mb.error_bins[i];
931 (unsigned int)(cpi->Speed - 6) * (total_mbs - total_skip)) {
939 if (thresh < 2000) thresh = 2000;
941 if (ref_frames > 1) {
942 sf->thresh_mult[THR_NEW1] = thresh;
943 sf->thresh_mult[THR_NEAREST1] = thresh >> 1;
944 sf->thresh_mult[THR_NEAR1] = thresh >> 1;
947 if (ref_frames > 2) {
948 sf->thresh_mult[THR_NEW2] = thresh << 1;
949 sf->thresh_mult[THR_NEAREST2] = thresh;
950 sf->thresh_mult[THR_NEAR2] = thresh;
953 if (ref_frames > 3) {
954 sf->thresh_mult[THR_NEW3] = thresh << 1;
955 sf->thresh_mult[THR_NEAREST3] = thresh;
956 sf->thresh_mult[THR_NEAR3] = thresh;
959 sf->improved_mv_pred = 0;
962 if (Speed > 8) sf->quarter_pixel_search = 0;
964 if (cm->version == 0) {
965 cm->filter_type = NORMAL_LOOPFILTER;
967 if (Speed >= 14) cm->filter_type = SIMPLE_LOOPFILTER;
969 cm->filter_type = SIMPLE_LOOPFILTER;
972 /* This has a big hit on quality. Last resort */
973 if (Speed >= 15) sf->half_pixel_search = 0;
975 memset(cpi->mb.error_bins, 0, sizeof(cpi->mb.error_bins));
979 /* Slow quant, dct and trellis not worthwhile for first pass
980 * so make sure they are always turned off.
982 if (cpi->pass == 1) {
983 sf->improved_quant = 0;
984 sf->optimize_coefficients = 0;
985 sf->improved_dct = 0;
988 if (cpi->sf.search_method == NSTEP) {
989 vp8_init3smotion_compensation(&cpi->mb,
990 cm->yv12_fb[cm->lst_fb_idx].y_stride);
991 } else if (cpi->sf.search_method == DIAMOND) {
992 vp8_init_dsmotion_compensation(&cpi->mb,
993 cm->yv12_fb[cm->lst_fb_idx].y_stride);
996 if (cpi->sf.improved_dct) {
997 cpi->mb.short_fdct8x4 = vp8_short_fdct8x4;
998 cpi->mb.short_fdct4x4 = vp8_short_fdct4x4;
1000 /* No fast FDCT defined for any platform at this time. */
1001 cpi->mb.short_fdct8x4 = vp8_short_fdct8x4;
1002 cpi->mb.short_fdct4x4 = vp8_short_fdct4x4;
1005 cpi->mb.short_walsh4x4 = vp8_short_walsh4x4;
1007 if (cpi->sf.improved_quant) {
1008 cpi->mb.quantize_b = vp8_regular_quantize_b;
1010 cpi->mb.quantize_b = vp8_fast_quantize_b;
1012 if (cpi->sf.improved_quant != last_improved_quant) vp8cx_init_quantizer(cpi);
1014 if (cpi->sf.iterative_sub_pixel == 1) {
1015 cpi->find_fractional_mv_step = vp8_find_best_sub_pixel_step_iteratively;
1016 } else if (cpi->sf.quarter_pixel_search) {
1017 cpi->find_fractional_mv_step = vp8_find_best_sub_pixel_step;
1018 } else if (cpi->sf.half_pixel_search) {
1019 cpi->find_fractional_mv_step = vp8_find_best_half_pixel_step;
1021 cpi->find_fractional_mv_step = vp8_skip_fractional_mv_step;
1024 if (cpi->sf.optimize_coefficients == 1 && cpi->pass != 1) {
1025 cpi->mb.optimize = 1;
1027 cpi->mb.optimize = 0;
1030 if (cpi->common.full_pixel) {
1031 cpi->find_fractional_mv_step = vp8_skip_fractional_mv_step;
1035 frames_at_speed[cpi->Speed]++;
1041 static void alloc_raw_frame_buffers(VP8_COMP *cpi) {
1042 #if VP8_TEMPORAL_ALT_REF
1043 int width = (cpi->oxcf.Width + 15) & ~15;
1044 int height = (cpi->oxcf.Height + 15) & ~15;
1047 cpi->lookahead = vp8_lookahead_init(cpi->oxcf.Width, cpi->oxcf.Height,
1048 cpi->oxcf.lag_in_frames);
1049 if (!cpi->lookahead) {
1050 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1051 "Failed to allocate lag buffers");
1054 #if VP8_TEMPORAL_ALT_REF
1056 if (vp8_yv12_alloc_frame_buffer(&cpi->alt_ref_buffer, width, height,
1057 VP8BORDERINPIXELS)) {
1058 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1059 "Failed to allocate altref buffer");
1065 static void dealloc_raw_frame_buffers(VP8_COMP *cpi) {
1066 #if VP8_TEMPORAL_ALT_REF
1067 vp8_yv12_de_alloc_frame_buffer(&cpi->alt_ref_buffer);
1069 vp8_lookahead_destroy(cpi->lookahead);
1072 static int vp8_alloc_partition_data(VP8_COMP *cpi) {
1073 vpx_free(cpi->mb.pip);
1076 vpx_calloc((cpi->common.mb_cols + 1) * (cpi->common.mb_rows + 1),
1077 sizeof(PARTITION_INFO));
1078 if (!cpi->mb.pip) return 1;
1080 cpi->mb.pi = cpi->mb.pip + cpi->common.mode_info_stride + 1;
1085 void vp8_alloc_compressor_data(VP8_COMP *cpi) {
1086 VP8_COMMON *cm = &cpi->common;
1088 int width = cm->Width;
1089 int height = cm->Height;
1090 #if CONFIG_MULTITHREAD
1091 int prev_mb_rows = cm->mb_rows;
1094 if (vp8_alloc_frame_buffers(cm, width, height)) {
1095 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1096 "Failed to allocate frame buffers");
1099 if (vp8_alloc_partition_data(cpi)) {
1100 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1101 "Failed to allocate partition data");
1104 if ((width & 0xf) != 0) width += 16 - (width & 0xf);
1106 if ((height & 0xf) != 0) height += 16 - (height & 0xf);
1108 if (vp8_yv12_alloc_frame_buffer(&cpi->pick_lf_lvl_frame, width, height,
1109 VP8BORDERINPIXELS)) {
1110 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1111 "Failed to allocate last frame buffer");
1114 if (vp8_yv12_alloc_frame_buffer(&cpi->scaled_source, width, height,
1115 VP8BORDERINPIXELS)) {
1116 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1117 "Failed to allocate scaled source buffer");
1123 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
1124 unsigned int tokens = 8 * 24 * 16; /* one MB for each thread */
1126 unsigned int tokens = cm->mb_rows * cm->mb_cols * 24 * 16;
1128 CHECK_MEM_ERROR(cpi->tok, vpx_calloc(tokens, sizeof(*cpi->tok)));
1131 /* Data used for real time vc mode to see if gf needs refreshing */
1132 cpi->zeromv_count = 0;
1134 /* Structures used to monitor GF usage */
1135 vpx_free(cpi->gf_active_flags);
1137 cpi->gf_active_flags,
1138 vpx_calloc(sizeof(*cpi->gf_active_flags), cm->mb_rows * cm->mb_cols));
1139 cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
1141 vpx_free(cpi->mb_activity_map);
1143 cpi->mb_activity_map,
1144 vpx_calloc(sizeof(*cpi->mb_activity_map), cm->mb_rows * cm->mb_cols));
1146 /* allocate memory for storing last frame's MVs for MV prediction. */
1147 vpx_free(cpi->lfmv);
1148 CHECK_MEM_ERROR(cpi->lfmv, vpx_calloc((cm->mb_rows + 2) * (cm->mb_cols + 2),
1149 sizeof(*cpi->lfmv)));
1150 vpx_free(cpi->lf_ref_frame_sign_bias);
1151 CHECK_MEM_ERROR(cpi->lf_ref_frame_sign_bias,
1152 vpx_calloc((cm->mb_rows + 2) * (cm->mb_cols + 2),
1153 sizeof(*cpi->lf_ref_frame_sign_bias)));
1154 vpx_free(cpi->lf_ref_frame);
1155 CHECK_MEM_ERROR(cpi->lf_ref_frame,
1156 vpx_calloc((cm->mb_rows + 2) * (cm->mb_cols + 2),
1157 sizeof(*cpi->lf_ref_frame)));
1159 /* Create the encoder segmentation map and set all entries to 0 */
1160 vpx_free(cpi->segmentation_map);
1162 cpi->segmentation_map,
1163 vpx_calloc(cm->mb_rows * cm->mb_cols, sizeof(*cpi->segmentation_map)));
1164 cpi->cyclic_refresh_mode_index = 0;
1165 vpx_free(cpi->active_map);
1166 CHECK_MEM_ERROR(cpi->active_map, vpx_calloc(cm->mb_rows * cm->mb_cols,
1167 sizeof(*cpi->active_map)));
1168 memset(cpi->active_map, 1, (cm->mb_rows * cm->mb_cols));
1170 #if CONFIG_MULTITHREAD
1172 cpi->mt_sync_range = 1;
1173 } else if (width <= 1280) {
1174 cpi->mt_sync_range = 4;
1175 } else if (width <= 2560) {
1176 cpi->mt_sync_range = 8;
1178 cpi->mt_sync_range = 16;
1181 if (cpi->oxcf.multi_threaded > 1) {
1184 /* De-allocate and re-allocate mutex */
1185 if (cpi->pmutex != NULL) {
1186 for (i = 0; i < prev_mb_rows; ++i) {
1187 pthread_mutex_destroy(&cpi->pmutex[i]);
1189 vpx_free(cpi->pmutex);
1193 CHECK_MEM_ERROR(cpi->pmutex,
1194 vpx_malloc(sizeof(*cpi->pmutex) * cm->mb_rows));
1196 for (i = 0; i < cm->mb_rows; ++i) {
1197 pthread_mutex_init(&cpi->pmutex[i], NULL);
1201 vpx_free(cpi->mt_current_mb_col);
1202 CHECK_MEM_ERROR(cpi->mt_current_mb_col,
1203 vpx_malloc(sizeof(*cpi->mt_current_mb_col) * cm->mb_rows));
1208 vpx_free(cpi->tplist);
1209 CHECK_MEM_ERROR(cpi->tplist, vpx_malloc(sizeof(TOKENLIST) * cm->mb_rows));
1211 #if CONFIG_TEMPORAL_DENOISING
1212 if (cpi->oxcf.noise_sensitivity > 0) {
1213 vp8_denoiser_free(&cpi->denoiser);
1214 if (vp8_denoiser_allocate(&cpi->denoiser, width, height, cm->mb_rows,
1215 cm->mb_cols, cpi->oxcf.noise_sensitivity)) {
1216 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1217 "Failed to allocate denoiser");
1224 static const int q_trans[] = {
1225 0, 1, 2, 3, 4, 5, 7, 8, 9, 10, 12, 13, 15, 17, 18, 19,
1226 20, 21, 23, 24, 25, 26, 27, 28, 29, 30, 31, 33, 35, 37, 39, 41,
1227 43, 45, 47, 49, 51, 53, 55, 57, 59, 61, 64, 67, 70, 73, 76, 79,
1228 82, 85, 88, 91, 94, 97, 100, 103, 106, 109, 112, 115, 118, 121, 124, 127,
1231 int vp8_reverse_trans(int x) {
1234 for (i = 0; i < 64; ++i) {
1235 if (q_trans[i] >= x) return i;
1240 void vp8_new_framerate(VP8_COMP *cpi, double framerate) {
1241 if (framerate < .1) framerate = 30;
1243 cpi->framerate = framerate;
1244 cpi->output_framerate = framerate;
1245 cpi->per_frame_bandwidth =
1246 (int)(cpi->oxcf.target_bandwidth / cpi->output_framerate);
1247 cpi->av_per_frame_bandwidth = cpi->per_frame_bandwidth;
1248 cpi->min_frame_bandwidth = (int)(cpi->av_per_frame_bandwidth *
1249 cpi->oxcf.two_pass_vbrmin_section / 100);
1251 /* Set Maximum gf/arf interval */
1252 cpi->max_gf_interval = ((int)(cpi->output_framerate / 2.0) + 2);
1254 if (cpi->max_gf_interval < 12) cpi->max_gf_interval = 12;
1256 /* Extended interval for genuinely static scenes */
1257 cpi->twopass.static_scene_max_gf_interval = cpi->key_frame_frequency >> 1;
1259 /* Special conditions when altr ref frame enabled in lagged compress mode */
1260 if (cpi->oxcf.play_alternate && cpi->oxcf.lag_in_frames) {
1261 if (cpi->max_gf_interval > cpi->oxcf.lag_in_frames - 1) {
1262 cpi->max_gf_interval = cpi->oxcf.lag_in_frames - 1;
1265 if (cpi->twopass.static_scene_max_gf_interval >
1266 cpi->oxcf.lag_in_frames - 1) {
1267 cpi->twopass.static_scene_max_gf_interval = cpi->oxcf.lag_in_frames - 1;
1271 if (cpi->max_gf_interval > cpi->twopass.static_scene_max_gf_interval) {
1272 cpi->max_gf_interval = cpi->twopass.static_scene_max_gf_interval;
1276 static void init_config(VP8_COMP *cpi, VP8_CONFIG *oxcf) {
1277 VP8_COMMON *cm = &cpi->common;
1282 cpi->auto_adjust_gold_quantizer = 1;
1284 cm->version = oxcf->Version;
1285 vp8_setup_version(cm);
1287 /* Frame rate is not available on the first frame, as it's derived from
1288 * the observed timestamps. The actual value used here doesn't matter
1289 * too much, as it will adapt quickly.
1291 if (oxcf->timebase.num > 0) {
1293 (double)(oxcf->timebase.den) / (double)(oxcf->timebase.num);
1295 cpi->framerate = 30;
1298 /* If the reciprocal of the timebase seems like a reasonable framerate,
1299 * then use that as a guess, otherwise use 30.
1301 if (cpi->framerate > 180) cpi->framerate = 30;
1303 cpi->ref_framerate = cpi->framerate;
1305 cpi->ref_frame_flags = VP8_ALTR_FRAME | VP8_GOLD_FRAME | VP8_LAST_FRAME;
1307 cm->refresh_golden_frame = 0;
1308 cm->refresh_last_frame = 1;
1309 cm->refresh_entropy_probs = 1;
1311 /* change includes all joint functionality */
1312 vp8_change_config(cpi, oxcf);
1314 /* Initialize active best and worst q and average q values. */
1315 cpi->active_worst_quality = cpi->oxcf.worst_allowed_q;
1316 cpi->active_best_quality = cpi->oxcf.best_allowed_q;
1317 cpi->avg_frame_qindex = cpi->oxcf.worst_allowed_q;
1319 /* Initialise the starting buffer levels */
1320 cpi->buffer_level = cpi->oxcf.starting_buffer_level;
1321 cpi->bits_off_target = cpi->oxcf.starting_buffer_level;
1323 cpi->rolling_target_bits = cpi->av_per_frame_bandwidth;
1324 cpi->rolling_actual_bits = cpi->av_per_frame_bandwidth;
1325 cpi->long_rolling_target_bits = cpi->av_per_frame_bandwidth;
1326 cpi->long_rolling_actual_bits = cpi->av_per_frame_bandwidth;
1328 cpi->total_actual_bits = 0;
1329 cpi->total_target_vs_actual = 0;
1331 /* Temporal scalabilty */
1332 if (cpi->oxcf.number_of_layers > 1) {
1334 double prev_layer_framerate = 0;
1336 for (i = 0; i < cpi->oxcf.number_of_layers; ++i) {
1337 init_temporal_layer_context(cpi, oxcf, i, prev_layer_framerate);
1338 prev_layer_framerate =
1339 cpi->output_framerate / cpi->oxcf.rate_decimator[i];
1343 #if VP8_TEMPORAL_ALT_REF
1347 cpi->fixed_divide[0] = 0;
1349 for (i = 1; i < 512; ++i) cpi->fixed_divide[i] = 0x80000 / i;
1354 static void update_layer_contexts(VP8_COMP *cpi) {
1355 VP8_CONFIG *oxcf = &cpi->oxcf;
1357 /* Update snapshots of the layer contexts to reflect new parameters */
1358 if (oxcf->number_of_layers > 1) {
1360 double prev_layer_framerate = 0;
1362 assert(oxcf->number_of_layers <= VPX_TS_MAX_LAYERS);
1363 for (i = 0; i < oxcf->number_of_layers && i < VPX_TS_MAX_LAYERS; ++i) {
1364 LAYER_CONTEXT *lc = &cpi->layer_context[i];
1366 lc->framerate = cpi->ref_framerate / oxcf->rate_decimator[i];
1367 lc->target_bandwidth = oxcf->target_bitrate[i] * 1000;
1369 lc->starting_buffer_level = rescale(
1370 (int)oxcf->starting_buffer_level_in_ms, lc->target_bandwidth, 1000);
1372 if (oxcf->optimal_buffer_level == 0) {
1373 lc->optimal_buffer_level = lc->target_bandwidth / 8;
1375 lc->optimal_buffer_level = rescale(
1376 (int)oxcf->optimal_buffer_level_in_ms, lc->target_bandwidth, 1000);
1379 if (oxcf->maximum_buffer_size == 0) {
1380 lc->maximum_buffer_size = lc->target_bandwidth / 8;
1382 lc->maximum_buffer_size = rescale((int)oxcf->maximum_buffer_size_in_ms,
1383 lc->target_bandwidth, 1000);
1386 /* Work out the average size of a frame within this layer */
1388 lc->avg_frame_size_for_layer =
1389 (int)((oxcf->target_bitrate[i] - oxcf->target_bitrate[i - 1]) *
1390 1000 / (lc->framerate - prev_layer_framerate));
1393 prev_layer_framerate = lc->framerate;
1398 void vp8_change_config(VP8_COMP *cpi, VP8_CONFIG *oxcf) {
1399 VP8_COMMON *cm = &cpi->common;
1401 unsigned int prev_number_of_layers;
1407 if (cm->version != oxcf->Version) {
1408 cm->version = oxcf->Version;
1409 vp8_setup_version(cm);
1412 last_w = cpi->oxcf.Width;
1413 last_h = cpi->oxcf.Height;
1414 prev_number_of_layers = cpi->oxcf.number_of_layers;
1418 switch (cpi->oxcf.Mode) {
1421 cpi->compressor_speed = 2;
1423 if (cpi->oxcf.cpu_used < -16) {
1424 cpi->oxcf.cpu_used = -16;
1427 if (cpi->oxcf.cpu_used > 16) cpi->oxcf.cpu_used = 16;
1431 case MODE_GOODQUALITY:
1433 cpi->compressor_speed = 1;
1435 if (cpi->oxcf.cpu_used < -5) {
1436 cpi->oxcf.cpu_used = -5;
1439 if (cpi->oxcf.cpu_used > 5) cpi->oxcf.cpu_used = 5;
1443 case MODE_BESTQUALITY:
1445 cpi->compressor_speed = 0;
1448 case MODE_FIRSTPASS:
1450 cpi->compressor_speed = 1;
1452 case MODE_SECONDPASS:
1454 cpi->compressor_speed = 1;
1456 if (cpi->oxcf.cpu_used < -5) {
1457 cpi->oxcf.cpu_used = -5;
1460 if (cpi->oxcf.cpu_used > 5) cpi->oxcf.cpu_used = 5;
1463 case MODE_SECONDPASS_BEST:
1465 cpi->compressor_speed = 0;
1469 if (cpi->pass == 0) cpi->auto_worst_q = 1;
1471 cpi->oxcf.worst_allowed_q = q_trans[oxcf->worst_allowed_q];
1472 cpi->oxcf.best_allowed_q = q_trans[oxcf->best_allowed_q];
1473 cpi->oxcf.cq_level = q_trans[cpi->oxcf.cq_level];
1475 if (oxcf->fixed_q >= 0) {
1476 if (oxcf->worst_allowed_q < 0) {
1477 cpi->oxcf.fixed_q = q_trans[0];
1479 cpi->oxcf.fixed_q = q_trans[oxcf->worst_allowed_q];
1482 if (oxcf->alt_q < 0) {
1483 cpi->oxcf.alt_q = q_trans[0];
1485 cpi->oxcf.alt_q = q_trans[oxcf->alt_q];
1488 if (oxcf->key_q < 0) {
1489 cpi->oxcf.key_q = q_trans[0];
1491 cpi->oxcf.key_q = q_trans[oxcf->key_q];
1494 if (oxcf->gold_q < 0) {
1495 cpi->oxcf.gold_q = q_trans[0];
1497 cpi->oxcf.gold_q = q_trans[oxcf->gold_q];
1501 cpi->baseline_gf_interval =
1502 cpi->oxcf.alt_freq ? cpi->oxcf.alt_freq : DEFAULT_GF_INTERVAL;
1504 // GF behavior for 1 pass CBR, used when error_resilience is off.
1505 if (!cpi->oxcf.error_resilient_mode &&
1506 cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER &&
1507 cpi->oxcf.Mode == MODE_REALTIME)
1508 cpi->baseline_gf_interval = cpi->gf_interval_onepass_cbr;
1510 #if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
1511 cpi->oxcf.token_partitions = 3;
1514 if (cpi->oxcf.token_partitions >= 0 && cpi->oxcf.token_partitions <= 3) {
1515 cm->multi_token_partition = (TOKEN_PARTITION)cpi->oxcf.token_partitions;
1518 setup_features(cpi);
1523 for (i = 0; i < MAX_MB_SEGMENTS; ++i) {
1524 cpi->segment_encode_breakout[i] = cpi->oxcf.encode_breakout;
1528 /* At the moment the first order values may not be > MAXQ */
1529 if (cpi->oxcf.fixed_q > MAXQ) cpi->oxcf.fixed_q = MAXQ;
1531 /* local file playback mode == really big buffer */
1532 if (cpi->oxcf.end_usage == USAGE_LOCAL_FILE_PLAYBACK) {
1533 cpi->oxcf.starting_buffer_level = 60000;
1534 cpi->oxcf.optimal_buffer_level = 60000;
1535 cpi->oxcf.maximum_buffer_size = 240000;
1536 cpi->oxcf.starting_buffer_level_in_ms = 60000;
1537 cpi->oxcf.optimal_buffer_level_in_ms = 60000;
1538 cpi->oxcf.maximum_buffer_size_in_ms = 240000;
1541 /* Convert target bandwidth from Kbit/s to Bit/s */
1542 cpi->oxcf.target_bandwidth *= 1000;
1544 cpi->oxcf.starting_buffer_level = rescale(
1545 (int)cpi->oxcf.starting_buffer_level, cpi->oxcf.target_bandwidth, 1000);
1547 /* Set or reset optimal and maximum buffer levels. */
1548 if (cpi->oxcf.optimal_buffer_level == 0) {
1549 cpi->oxcf.optimal_buffer_level = cpi->oxcf.target_bandwidth / 8;
1551 cpi->oxcf.optimal_buffer_level = rescale(
1552 (int)cpi->oxcf.optimal_buffer_level, cpi->oxcf.target_bandwidth, 1000);
1555 if (cpi->oxcf.maximum_buffer_size == 0) {
1556 cpi->oxcf.maximum_buffer_size = cpi->oxcf.target_bandwidth / 8;
1558 cpi->oxcf.maximum_buffer_size = rescale((int)cpi->oxcf.maximum_buffer_size,
1559 cpi->oxcf.target_bandwidth, 1000);
1561 // Under a configuration change, where maximum_buffer_size may change,
1562 // keep buffer level clipped to the maximum allowed buffer size.
1563 if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size) {
1564 cpi->bits_off_target = cpi->oxcf.maximum_buffer_size;
1565 cpi->buffer_level = cpi->bits_off_target;
1568 /* Set up frame rate and related parameters rate control values. */
1569 vp8_new_framerate(cpi, cpi->framerate);
1571 /* Set absolute upper and lower quality limits */
1572 cpi->worst_quality = cpi->oxcf.worst_allowed_q;
1573 cpi->best_quality = cpi->oxcf.best_allowed_q;
1575 /* active values should only be modified if out of new range */
1576 if (cpi->active_worst_quality > cpi->oxcf.worst_allowed_q) {
1577 cpi->active_worst_quality = cpi->oxcf.worst_allowed_q;
1580 else if (cpi->active_worst_quality < cpi->oxcf.best_allowed_q) {
1581 cpi->active_worst_quality = cpi->oxcf.best_allowed_q;
1583 if (cpi->active_best_quality < cpi->oxcf.best_allowed_q) {
1584 cpi->active_best_quality = cpi->oxcf.best_allowed_q;
1587 else if (cpi->active_best_quality > cpi->oxcf.worst_allowed_q) {
1588 cpi->active_best_quality = cpi->oxcf.worst_allowed_q;
1591 cpi->buffered_mode = cpi->oxcf.optimal_buffer_level > 0;
1593 cpi->cq_target_quality = cpi->oxcf.cq_level;
1595 /* Only allow dropped frames in buffered mode */
1596 cpi->drop_frames_allowed = cpi->oxcf.allow_df && cpi->buffered_mode;
1598 cpi->target_bandwidth = cpi->oxcf.target_bandwidth;
1600 // Check if the number of temporal layers has changed, and if so reset the
1601 // pattern counter and set/initialize the temporal layer context for the
1602 // new layer configuration.
1603 if (cpi->oxcf.number_of_layers != prev_number_of_layers) {
1604 // If the number of temporal layers are changed we must start at the
1605 // base of the pattern cycle, so set the layer id to 0 and reset
1606 // the temporal pattern counter.
1607 if (cpi->temporal_layer_id > 0) {
1608 cpi->temporal_layer_id = 0;
1610 cpi->temporal_pattern_counter = 0;
1611 reset_temporal_layer_change(cpi, oxcf, prev_number_of_layers);
1614 if (!cpi->initial_width) {
1615 cpi->initial_width = cpi->oxcf.Width;
1616 cpi->initial_height = cpi->oxcf.Height;
1619 cm->Width = cpi->oxcf.Width;
1620 cm->Height = cpi->oxcf.Height;
1621 assert(cm->Width <= cpi->initial_width);
1622 assert(cm->Height <= cpi->initial_height);
1624 /* TODO(jkoleszar): if an internal spatial resampling is active,
1625 * and we downsize the input image, maybe we should clear the
1626 * internal scale immediately rather than waiting for it to
1630 /* VP8 sharpness level mapping 0-7 (vs 0-10 in general VPx dialogs) */
1631 if (cpi->oxcf.Sharpness > 7) cpi->oxcf.Sharpness = 7;
1633 cm->sharpness_level = cpi->oxcf.Sharpness;
1635 if (cm->horiz_scale != NORMAL || cm->vert_scale != NORMAL) {
1636 int UNINITIALIZED_IS_SAFE(hr), UNINITIALIZED_IS_SAFE(hs);
1637 int UNINITIALIZED_IS_SAFE(vr), UNINITIALIZED_IS_SAFE(vs);
1639 Scale2Ratio(cm->horiz_scale, &hr, &hs);
1640 Scale2Ratio(cm->vert_scale, &vr, &vs);
1642 /* always go to the next whole number */
1643 cm->Width = (hs - 1 + cpi->oxcf.Width * hr) / hs;
1644 cm->Height = (vs - 1 + cpi->oxcf.Height * vr) / vs;
1647 if (last_w != cpi->oxcf.Width || last_h != cpi->oxcf.Height) {
1648 cpi->force_next_frame_intra = 1;
1651 if (((cm->Width + 15) & ~15) != cm->yv12_fb[cm->lst_fb_idx].y_width ||
1652 ((cm->Height + 15) & ~15) != cm->yv12_fb[cm->lst_fb_idx].y_height ||
1653 cm->yv12_fb[cm->lst_fb_idx].y_width == 0) {
1654 dealloc_raw_frame_buffers(cpi);
1655 alloc_raw_frame_buffers(cpi);
1656 vp8_alloc_compressor_data(cpi);
1659 if (cpi->oxcf.fixed_q >= 0) {
1660 cpi->last_q[0] = cpi->oxcf.fixed_q;
1661 cpi->last_q[1] = cpi->oxcf.fixed_q;
1664 cpi->Speed = cpi->oxcf.cpu_used;
1666 /* force to allowlag to 0 if lag_in_frames is 0; */
1667 if (cpi->oxcf.lag_in_frames == 0) {
1668 cpi->oxcf.allow_lag = 0;
1670 /* Limit on lag buffers as these are not currently dynamically allocated */
1671 else if (cpi->oxcf.lag_in_frames > MAX_LAG_BUFFERS) {
1672 cpi->oxcf.lag_in_frames = MAX_LAG_BUFFERS;
1676 cpi->alt_ref_source = NULL;
1677 cpi->is_src_frame_alt_ref = 0;
1679 #if CONFIG_TEMPORAL_DENOISING
1680 if (cpi->oxcf.noise_sensitivity) {
1681 if (!cpi->denoiser.yv12_mc_running_avg.buffer_alloc) {
1682 int width = (cpi->oxcf.Width + 15) & ~15;
1683 int height = (cpi->oxcf.Height + 15) & ~15;
1684 if (vp8_denoiser_allocate(&cpi->denoiser, width, height, cm->mb_rows,
1685 cm->mb_cols, cpi->oxcf.noise_sensitivity)) {
1686 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1687 "Failed to allocate denoiser");
1694 /* Experimental RD Code */
1695 cpi->frame_distortion = 0;
1696 cpi->last_frame_distortion = 0;
1701 #define M_LOG2_E 0.693147180559945309417
1703 #define log2f(x) (log(x) / (float)M_LOG2_E)
1705 static void cal_mvsadcosts(int *mvsadcost[2]) {
1708 mvsadcost[0][0] = 300;
1709 mvsadcost[1][0] = 300;
1712 double z = 256 * (2 * (log2f(8 * i) + .6));
1713 mvsadcost[0][i] = (int)z;
1714 mvsadcost[1][i] = (int)z;
1715 mvsadcost[0][-i] = (int)z;
1716 mvsadcost[1][-i] = (int)z;
1717 } while (++i <= mvfp_max);
1720 struct VP8_COMP *vp8_create_compressor(VP8_CONFIG *oxcf) {
1726 cpi = vpx_memalign(32, sizeof(VP8_COMP));
1727 /* Check that the CPI instance is valid */
1732 memset(cpi, 0, sizeof(VP8_COMP));
1734 if (setjmp(cm->error.jmp)) {
1735 cpi->common.error.setjmp = 0;
1736 vp8_remove_compressor(&cpi);
1740 cpi->common.error.setjmp = 1;
1742 CHECK_MEM_ERROR(cpi->mb.ss, vpx_calloc(sizeof(search_site),
1743 (MAX_MVSEARCH_STEPS * 8) + 1));
1745 vp8_create_common(&cpi->common);
1747 init_config(cpi, oxcf);
1749 memcpy(cpi->base_skip_false_prob, vp8cx_base_skip_false_prob,
1750 sizeof(vp8cx_base_skip_false_prob));
1751 cpi->common.current_video_frame = 0;
1752 cpi->temporal_pattern_counter = 0;
1753 cpi->temporal_layer_id = -1;
1754 cpi->kf_overspend_bits = 0;
1755 cpi->kf_bitrate_adjustment = 0;
1756 cpi->frames_till_gf_update_due = 0;
1757 cpi->gf_overspend_bits = 0;
1758 cpi->non_gf_bitrate_adjustment = 0;
1759 cpi->prob_last_coded = 128;
1760 cpi->prob_gf_coded = 128;
1761 cpi->prob_intra_coded = 63;
1763 /* Prime the recent reference frame usage counters.
1764 * Hereafter they will be maintained as a sort of moving average
1766 cpi->recent_ref_frame_usage[INTRA_FRAME] = 1;
1767 cpi->recent_ref_frame_usage[LAST_FRAME] = 1;
1768 cpi->recent_ref_frame_usage[GOLDEN_FRAME] = 1;
1769 cpi->recent_ref_frame_usage[ALTREF_FRAME] = 1;
1771 /* Set reference frame sign bias for ALTREF frame to 1 (for now) */
1772 cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 1;
1774 cpi->twopass.gf_decay_rate = 0;
1775 cpi->baseline_gf_interval = DEFAULT_GF_INTERVAL;
1777 cpi->gold_is_last = 0;
1778 cpi->alt_is_last = 0;
1779 cpi->gold_is_alt = 0;
1781 cpi->active_map_enabled = 0;
1784 /* Experimental code for lagged and one pass */
1785 /* Initialise one_pass GF frames stats */
1786 /* Update stats used for GF selection */
1789 cpi->one_pass_frame_index = 0;
1791 for (i = 0; i < MAX_LAG_BUFFERS; ++i)
1793 cpi->one_pass_frame_stats[i].frames_so_far = 0;
1794 cpi->one_pass_frame_stats[i].frame_intra_error = 0.0;
1795 cpi->one_pass_frame_stats[i].frame_coded_error = 0.0;
1796 cpi->one_pass_frame_stats[i].frame_pcnt_inter = 0.0;
1797 cpi->one_pass_frame_stats[i].frame_pcnt_motion = 0.0;
1798 cpi->one_pass_frame_stats[i].frame_mvr = 0.0;
1799 cpi->one_pass_frame_stats[i].frame_mvr_abs = 0.0;
1800 cpi->one_pass_frame_stats[i].frame_mvc = 0.0;
1801 cpi->one_pass_frame_stats[i].frame_mvc_abs = 0.0;
1806 cpi->mse_source_denoised = 0;
1808 /* Should we use the cyclic refresh method.
1809 * Currently there is no external control for this.
1810 * Enable it for error_resilient_mode, or for 1 pass CBR mode.
1812 cpi->cyclic_refresh_mode_enabled =
1813 (cpi->oxcf.error_resilient_mode ||
1814 (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER &&
1815 cpi->oxcf.Mode <= 2));
1816 cpi->cyclic_refresh_mode_max_mbs_perframe =
1817 (cpi->common.mb_rows * cpi->common.mb_cols) / 7;
1818 if (cpi->oxcf.number_of_layers == 1) {
1819 cpi->cyclic_refresh_mode_max_mbs_perframe =
1820 (cpi->common.mb_rows * cpi->common.mb_cols) / 20;
1821 } else if (cpi->oxcf.number_of_layers == 2) {
1822 cpi->cyclic_refresh_mode_max_mbs_perframe =
1823 (cpi->common.mb_rows * cpi->common.mb_cols) / 10;
1825 cpi->cyclic_refresh_mode_index = 0;
1826 cpi->cyclic_refresh_q = 32;
1828 // GF behavior for 1 pass CBR, used when error_resilience is off.
1829 cpi->gf_update_onepass_cbr = 0;
1830 cpi->gf_noboost_onepass_cbr = 0;
1831 if (!cpi->oxcf.error_resilient_mode &&
1832 cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER && cpi->oxcf.Mode <= 2) {
1833 cpi->gf_update_onepass_cbr = 1;
1834 cpi->gf_noboost_onepass_cbr = 1;
1835 cpi->gf_interval_onepass_cbr =
1836 cpi->cyclic_refresh_mode_max_mbs_perframe > 0
1837 ? (2 * (cpi->common.mb_rows * cpi->common.mb_cols) /
1838 cpi->cyclic_refresh_mode_max_mbs_perframe)
1840 cpi->gf_interval_onepass_cbr =
1841 VPXMIN(40, VPXMAX(6, cpi->gf_interval_onepass_cbr));
1842 cpi->baseline_gf_interval = cpi->gf_interval_onepass_cbr;
1845 if (cpi->cyclic_refresh_mode_enabled) {
1846 CHECK_MEM_ERROR(cpi->cyclic_refresh_map,
1847 vpx_calloc((cpi->common.mb_rows * cpi->common.mb_cols), 1));
1849 cpi->cyclic_refresh_map = (signed char *)NULL;
1852 CHECK_MEM_ERROR(cpi->consec_zero_last,
1853 vpx_calloc(cm->mb_rows * cm->mb_cols, 1));
1854 CHECK_MEM_ERROR(cpi->consec_zero_last_mvbias,
1855 vpx_calloc((cpi->common.mb_rows * cpi->common.mb_cols), 1));
1857 #ifdef VP8_ENTROPY_STATS
1858 init_context_counters();
1861 /*Initialize the feed-forward activity masking.*/
1862 cpi->activity_avg = 90 << 12;
1864 /* Give a sensible default for the first frame. */
1865 cpi->frames_since_key = 8;
1866 cpi->key_frame_frequency = cpi->oxcf.key_freq;
1867 cpi->this_key_frame_forced = 0;
1868 cpi->next_key_frame_forced = 0;
1870 cpi->source_alt_ref_pending = 0;
1871 cpi->source_alt_ref_active = 0;
1872 cpi->common.refresh_alt_ref_frame = 0;
1874 cpi->force_maxqp = 0;
1876 cpi->b_calculate_psnr = CONFIG_INTERNAL_STATS;
1877 #if CONFIG_INTERNAL_STATS
1878 cpi->b_calculate_ssimg = 0;
1883 if (cpi->b_calculate_psnr) {
1884 cpi->total_sq_error = 0.0;
1885 cpi->total_sq_error2 = 0.0;
1890 cpi->totalp_y = 0.0;
1891 cpi->totalp_u = 0.0;
1892 cpi->totalp_v = 0.0;
1894 cpi->tot_recode_hits = 0;
1895 cpi->summed_quality = 0;
1896 cpi->summed_weights = 0;
1901 cpi->first_time_stamp_ever = 0x7FFFFFFF;
1903 cpi->frames_till_gf_update_due = 0;
1904 cpi->key_frame_count = 1;
1906 cpi->ni_av_qi = cpi->oxcf.worst_allowed_q;
1909 cpi->total_byte_count = 0;
1911 cpi->drop_frame = 0;
1913 cpi->rate_correction_factor = 1.0;
1914 cpi->key_frame_rate_correction_factor = 1.0;
1915 cpi->gf_rate_correction_factor = 1.0;
1916 cpi->twopass.est_max_qcorrection_factor = 1.0;
1918 for (i = 0; i < KEY_FRAME_CONTEXT; ++i) {
1919 cpi->prior_key_frame_distance[i] = (int)cpi->output_framerate;
1922 #ifdef OUTPUT_YUV_SRC
1923 yuv_file = fopen("bd.yuv", "ab");
1925 #ifdef OUTPUT_YUV_DENOISED
1926 yuv_denoised_file = fopen("denoised.yuv", "ab");
1930 framepsnr = fopen("framepsnr.stt", "a");
1931 kf_list = fopen("kf_list.stt", "w");
1934 cpi->output_pkt_list = oxcf->output_pkt_list;
1936 #if !CONFIG_REALTIME_ONLY
1938 if (cpi->pass == 1) {
1939 vp8_init_first_pass(cpi);
1940 } else if (cpi->pass == 2) {
1941 size_t packet_sz = sizeof(FIRSTPASS_STATS);
1942 int packets = (int)(oxcf->two_pass_stats_in.sz / packet_sz);
1944 cpi->twopass.stats_in_start = oxcf->two_pass_stats_in.buf;
1945 cpi->twopass.stats_in = cpi->twopass.stats_in_start;
1946 cpi->twopass.stats_in_end =
1947 (void *)((char *)cpi->twopass.stats_in + (packets - 1) * packet_sz);
1948 vp8_init_second_pass(cpi);
1953 if (cpi->compressor_speed == 2) {
1954 cpi->avg_encode_time = 0;
1955 cpi->avg_pick_mode_time = 0;
1958 vp8_set_speed_features(cpi);
1960 /* Set starting values of RD threshold multipliers (128 = *1) */
1961 for (i = 0; i < MAX_MODES; ++i) {
1962 cpi->mb.rd_thresh_mult[i] = 128;
1965 #ifdef VP8_ENTROPY_STATS
1966 init_mv_ref_counts();
1969 #if CONFIG_MULTITHREAD
1970 if (vp8cx_create_encoder_threads(cpi)) {
1971 vp8_remove_compressor(&cpi);
1976 cpi->fn_ptr[BLOCK_16X16].sdf = vpx_sad16x16;
1977 cpi->fn_ptr[BLOCK_16X16].vf = vpx_variance16x16;
1978 cpi->fn_ptr[BLOCK_16X16].svf = vpx_sub_pixel_variance16x16;
1979 cpi->fn_ptr[BLOCK_16X16].sdx3f = vpx_sad16x16x3;
1980 cpi->fn_ptr[BLOCK_16X16].sdx8f = vpx_sad16x16x8;
1981 cpi->fn_ptr[BLOCK_16X16].sdx4df = vpx_sad16x16x4d;
1983 cpi->fn_ptr[BLOCK_16X8].sdf = vpx_sad16x8;
1984 cpi->fn_ptr[BLOCK_16X8].vf = vpx_variance16x8;
1985 cpi->fn_ptr[BLOCK_16X8].svf = vpx_sub_pixel_variance16x8;
1986 cpi->fn_ptr[BLOCK_16X8].sdx3f = vpx_sad16x8x3;
1987 cpi->fn_ptr[BLOCK_16X8].sdx8f = vpx_sad16x8x8;
1988 cpi->fn_ptr[BLOCK_16X8].sdx4df = vpx_sad16x8x4d;
1990 cpi->fn_ptr[BLOCK_8X16].sdf = vpx_sad8x16;
1991 cpi->fn_ptr[BLOCK_8X16].vf = vpx_variance8x16;
1992 cpi->fn_ptr[BLOCK_8X16].svf = vpx_sub_pixel_variance8x16;
1993 cpi->fn_ptr[BLOCK_8X16].sdx3f = vpx_sad8x16x3;
1994 cpi->fn_ptr[BLOCK_8X16].sdx8f = vpx_sad8x16x8;
1995 cpi->fn_ptr[BLOCK_8X16].sdx4df = vpx_sad8x16x4d;
1997 cpi->fn_ptr[BLOCK_8X8].sdf = vpx_sad8x8;
1998 cpi->fn_ptr[BLOCK_8X8].vf = vpx_variance8x8;
1999 cpi->fn_ptr[BLOCK_8X8].svf = vpx_sub_pixel_variance8x8;
2000 cpi->fn_ptr[BLOCK_8X8].sdx3f = vpx_sad8x8x3;
2001 cpi->fn_ptr[BLOCK_8X8].sdx8f = vpx_sad8x8x8;
2002 cpi->fn_ptr[BLOCK_8X8].sdx4df = vpx_sad8x8x4d;
2004 cpi->fn_ptr[BLOCK_4X4].sdf = vpx_sad4x4;
2005 cpi->fn_ptr[BLOCK_4X4].vf = vpx_variance4x4;
2006 cpi->fn_ptr[BLOCK_4X4].svf = vpx_sub_pixel_variance4x4;
2007 cpi->fn_ptr[BLOCK_4X4].sdx3f = vpx_sad4x4x3;
2008 cpi->fn_ptr[BLOCK_4X4].sdx8f = vpx_sad4x4x8;
2009 cpi->fn_ptr[BLOCK_4X4].sdx4df = vpx_sad4x4x4d;
2011 #if ARCH_X86 || ARCH_X86_64
2012 cpi->fn_ptr[BLOCK_16X16].copymem = vp8_copy32xn;
2013 cpi->fn_ptr[BLOCK_16X8].copymem = vp8_copy32xn;
2014 cpi->fn_ptr[BLOCK_8X16].copymem = vp8_copy32xn;
2015 cpi->fn_ptr[BLOCK_8X8].copymem = vp8_copy32xn;
2016 cpi->fn_ptr[BLOCK_4X4].copymem = vp8_copy32xn;
2019 cpi->full_search_sad = vp8_full_search_sad;
2020 cpi->diamond_search_sad = vp8_diamond_search_sad;
2021 cpi->refining_search_sad = vp8_refining_search_sad;
2023 /* make sure frame 1 is okay */
2024 cpi->mb.error_bins[0] = cpi->common.MBs;
2026 /* vp8cx_init_quantizer() is first called here. Add check in
2027 * vp8cx_frame_init_quantizer() so that vp8cx_init_quantizer is only
2028 * called later when needed. This will avoid unnecessary calls of
2029 * vp8cx_init_quantizer() for every frame.
2031 vp8cx_init_quantizer(cpi);
2033 vp8_loop_filter_init(cm);
2035 cpi->common.error.setjmp = 0;
2037 #if CONFIG_MULTI_RES_ENCODING
2039 /* Calculate # of MBs in a row in lower-resolution level image. */
2040 if (cpi->oxcf.mr_encoder_id > 0) vp8_cal_low_res_mb_cols(cpi);
2044 /* setup RD costs to MACROBLOCK struct */
2046 cpi->mb.mvcost[0] = &cpi->rd_costs.mvcosts[0][mv_max + 1];
2047 cpi->mb.mvcost[1] = &cpi->rd_costs.mvcosts[1][mv_max + 1];
2048 cpi->mb.mvsadcost[0] = &cpi->rd_costs.mvsadcosts[0][mvfp_max + 1];
2049 cpi->mb.mvsadcost[1] = &cpi->rd_costs.mvsadcosts[1][mvfp_max + 1];
2051 cal_mvsadcosts(cpi->mb.mvsadcost);
2053 cpi->mb.mbmode_cost = cpi->rd_costs.mbmode_cost;
2054 cpi->mb.intra_uv_mode_cost = cpi->rd_costs.intra_uv_mode_cost;
2055 cpi->mb.bmode_costs = cpi->rd_costs.bmode_costs;
2056 cpi->mb.inter_bmode_costs = cpi->rd_costs.inter_bmode_costs;
2057 cpi->mb.token_costs = cpi->rd_costs.token_costs;
2059 /* setup block ptrs & offsets */
2060 vp8_setup_block_ptrs(&cpi->mb);
2061 vp8_setup_block_dptrs(&cpi->mb.e_mbd);
2066 void vp8_remove_compressor(VP8_COMP **ptr) {
2067 VP8_COMP *cpi = *ptr;
2071 if (cpi && (cpi->common.current_video_frame > 0)) {
2072 #if !CONFIG_REALTIME_ONLY
2074 if (cpi->pass == 2) {
2075 vp8_end_second_pass(cpi);
2080 #ifdef VP8_ENTROPY_STATS
2081 print_context_counters();
2082 print_tree_update_probs();
2083 print_mode_context();
2086 #if CONFIG_INTERNAL_STATS
2088 if (cpi->pass != 1) {
2089 FILE *f = fopen("opsnr.stt", "a");
2090 double time_encoded =
2091 (cpi->last_end_time_stamp_seen - cpi->first_time_stamp_ever) /
2093 double total_encode_time =
2094 (cpi->time_receive_data + cpi->time_compress_data) / 1000.000;
2095 double dr = (double)cpi->bytes * 8.0 / 1000.0 / time_encoded;
2096 const double target_rate = (double)cpi->oxcf.target_bandwidth / 1000;
2097 const double rate_err = ((100.0 * (dr - target_rate)) / target_rate);
2099 if (cpi->b_calculate_psnr) {
2100 if (cpi->oxcf.number_of_layers > 1) {
2104 "Layer\tBitrate\tAVGPsnr\tGLBPsnr\tAVPsnrP\t"
2105 "GLPsnrP\tVPXSSIM\n");
2106 for (i = 0; i < (int)cpi->oxcf.number_of_layers; ++i) {
2108 (double)cpi->bytes_in_layer[i] * 8.0 / 1000.0 / time_encoded;
2109 double samples = 3.0 / 2 * cpi->frames_in_layer[i] *
2110 cpi->common.Width * cpi->common.Height;
2112 vpx_sse_to_psnr(samples, 255.0, cpi->total_error2[i]);
2113 double total_psnr2 =
2114 vpx_sse_to_psnr(samples, 255.0, cpi->total_error2_p[i]);
2116 100 * pow(cpi->sum_ssim[i] / cpi->sum_weights[i], 8.0);
2119 "%5d\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t"
2121 i, dr, cpi->sum_psnr[i] / cpi->frames_in_layer[i],
2122 total_psnr, cpi->sum_psnr_p[i] / cpi->frames_in_layer[i],
2123 total_psnr2, total_ssim);
2127 3.0 / 2 * cpi->count * cpi->common.Width * cpi->common.Height;
2129 vpx_sse_to_psnr(samples, 255.0, cpi->total_sq_error);
2130 double total_psnr2 =
2131 vpx_sse_to_psnr(samples, 255.0, cpi->total_sq_error2);
2133 100 * pow(cpi->summed_quality / cpi->summed_weights, 8.0);
2136 "Bitrate\tAVGPsnr\tGLBPsnr\tAVPsnrP\t"
2137 "GLPsnrP\tVPXSSIM\n");
2139 "%7.3f\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t"
2141 dr, cpi->total / cpi->count, total_psnr,
2142 cpi->totalp / cpi->count, total_psnr2, total_ssim);
2147 f = fopen("qskip.stt", "a");
2148 fprintf(f, "minq:%d -maxq:%d skiptrue:skipfalse = %d:%d\n", cpi->oxcf.best_allowed_q, cpi->oxcf.worst_allowed_q, skiptruecount, skipfalsecount);
2157 if (cpi->compressor_speed == 2) {
2159 FILE *f = fopen("cxspeed.stt", "a");
2160 cnt_pm /= cpi->common.MBs;
2162 for (i = 0; i < 16; ++i) fprintf(f, "%5d", frames_at_speed[i]);
2172 extern int count_mb_seg[4];
2173 FILE *f = fopen("modes.stt", "a");
2174 double dr = (double)cpi->framerate * (double)bytes * (double)8 /
2175 (double)count / (double)1000;
2176 fprintf(f, "intra_mode in Intra Frames:\n");
2177 fprintf(f, "Y: %8d, %8d, %8d, %8d, %8d\n", y_modes[0], y_modes[1],
2178 y_modes[2], y_modes[3], y_modes[4]);
2179 fprintf(f, "UV:%8d, %8d, %8d, %8d\n", uv_modes[0], uv_modes[1],
2180 uv_modes[2], uv_modes[3]);
2185 for (i = 0; i < 10; ++i) fprintf(f, "%8d, ", b_modes[i]);
2190 fprintf(f, "Modes in Inter Frames:\n");
2191 fprintf(f, "Y: %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d\n",
2192 inter_y_modes[0], inter_y_modes[1], inter_y_modes[2],
2193 inter_y_modes[3], inter_y_modes[4], inter_y_modes[5],
2194 inter_y_modes[6], inter_y_modes[7], inter_y_modes[8],
2196 fprintf(f, "UV:%8d, %8d, %8d, %8d\n", inter_uv_modes[0],
2197 inter_uv_modes[1], inter_uv_modes[2], inter_uv_modes[3]);
2202 for (i = 0; i < 15; ++i) fprintf(f, "%8d, ", inter_b_modes[i]);
2206 fprintf(f, "P:%8d, %8d, %8d, %8d\n", count_mb_seg[0], count_mb_seg[1],
2207 count_mb_seg[2], count_mb_seg[3]);
2208 fprintf(f, "PB:%8d, %8d, %8d, %8d\n", inter_b_modes[LEFT4X4],
2209 inter_b_modes[ABOVE4X4], inter_b_modes[ZERO4X4],
2210 inter_b_modes[NEW4X4]);
2216 #ifdef VP8_ENTROPY_STATS
2219 FILE *fmode = fopen("modecontext.c", "w");
2221 fprintf(fmode, "\n#include \"entropymode.h\"\n\n");
2222 fprintf(fmode, "const unsigned int vp8_kf_default_bmode_counts ");
2224 "[VP8_BINTRAMODES] [VP8_BINTRAMODES] [VP8_BINTRAMODES] =\n{\n");
2226 for (i = 0; i < 10; ++i) {
2227 fprintf(fmode, " { /* Above Mode : %d */\n", i);
2229 for (j = 0; j < 10; ++j) {
2230 fprintf(fmode, " {");
2232 for (k = 0; k < 10; ++k) {
2233 if (!intra_mode_stats[i][j][k])
2234 fprintf(fmode, " %5d, ", 1);
2236 fprintf(fmode, " %5d, ", intra_mode_stats[i][j][k]);
2239 fprintf(fmode, "}, /* left_mode %d */\n", j);
2242 fprintf(fmode, " },\n");
2245 fprintf(fmode, "};\n");
2250 #if defined(SECTIONBITS_OUTPUT)
2254 FILE *f = fopen("tokenbits.stt", "a");
2256 for (i = 0; i < 28; ++i) fprintf(f, "%8d", (int)(Sectionbits[i] / 256));
2266 printf("\n_pick_loop_filter_level:%d\n", cpi->time_pick_lpf / 1000);
2267 printf("\n_frames recive_data encod_mb_row compress_frame Total\n");
2268 printf("%6d %10ld %10ld %10ld %10ld\n", cpi->common.current_video_frame, cpi->time_receive_data / 1000, cpi->time_encode_mb_row / 1000, cpi->time_compress_data / 1000, (cpi->time_receive_data + cpi->time_compress_data) / 1000);
2273 #if CONFIG_MULTITHREAD
2274 vp8cx_remove_encoder_threads(cpi);
2277 #if CONFIG_TEMPORAL_DENOISING
2278 vp8_denoiser_free(&cpi->denoiser);
2280 dealloc_compressor_data(cpi);
2281 vpx_free(cpi->mb.ss);
2283 vpx_free(cpi->cyclic_refresh_map);
2284 vpx_free(cpi->consec_zero_last);
2285 vpx_free(cpi->consec_zero_last_mvbias);
2287 vp8_remove_common(&cpi->common);
2291 #ifdef OUTPUT_YUV_SRC
2294 #ifdef OUTPUT_YUV_DENOISED
2295 fclose(yuv_denoised_file);
2312 static uint64_t calc_plane_error(unsigned char *orig, int orig_stride,
2313 unsigned char *recon, int recon_stride,
2314 unsigned int cols, unsigned int rows) {
2315 unsigned int row, col;
2316 uint64_t total_sse = 0;
2319 for (row = 0; row + 16 <= rows; row += 16) {
2320 for (col = 0; col + 16 <= cols; col += 16) {
2323 vpx_mse16x16(orig + col, orig_stride, recon + col, recon_stride, &sse);
2327 /* Handle odd-sized width */
2329 unsigned int border_row, border_col;
2330 unsigned char *border_orig = orig;
2331 unsigned char *border_recon = recon;
2333 for (border_row = 0; border_row < 16; ++border_row) {
2334 for (border_col = col; border_col < cols; ++border_col) {
2335 diff = border_orig[border_col] - border_recon[border_col];
2336 total_sse += diff * diff;
2339 border_orig += orig_stride;
2340 border_recon += recon_stride;
2344 orig += orig_stride * 16;
2345 recon += recon_stride * 16;
2348 /* Handle odd-sized height */
2349 for (; row < rows; ++row) {
2350 for (col = 0; col < cols; ++col) {
2351 diff = orig[col] - recon[col];
2352 total_sse += diff * diff;
2355 orig += orig_stride;
2356 recon += recon_stride;
2359 vpx_clear_system_state();
2363 static void generate_psnr_packet(VP8_COMP *cpi) {
2364 YV12_BUFFER_CONFIG *orig = cpi->Source;
2365 YV12_BUFFER_CONFIG *recon = cpi->common.frame_to_show;
2366 struct vpx_codec_cx_pkt pkt;
2369 unsigned int width = cpi->common.Width;
2370 unsigned int height = cpi->common.Height;
2372 pkt.kind = VPX_CODEC_PSNR_PKT;
2373 sse = calc_plane_error(orig->y_buffer, orig->y_stride, recon->y_buffer,
2374 recon->y_stride, width, height);
2375 pkt.data.psnr.sse[0] = sse;
2376 pkt.data.psnr.sse[1] = sse;
2377 pkt.data.psnr.samples[0] = width * height;
2378 pkt.data.psnr.samples[1] = width * height;
2380 width = (width + 1) / 2;
2381 height = (height + 1) / 2;
2383 sse = calc_plane_error(orig->u_buffer, orig->uv_stride, recon->u_buffer,
2384 recon->uv_stride, width, height);
2385 pkt.data.psnr.sse[0] += sse;
2386 pkt.data.psnr.sse[2] = sse;
2387 pkt.data.psnr.samples[0] += width * height;
2388 pkt.data.psnr.samples[2] = width * height;
2390 sse = calc_plane_error(orig->v_buffer, orig->uv_stride, recon->v_buffer,
2391 recon->uv_stride, width, height);
2392 pkt.data.psnr.sse[0] += sse;
2393 pkt.data.psnr.sse[3] = sse;
2394 pkt.data.psnr.samples[0] += width * height;
2395 pkt.data.psnr.samples[3] = width * height;
2397 for (i = 0; i < 4; ++i) {
2398 pkt.data.psnr.psnr[i] = vpx_sse_to_psnr(pkt.data.psnr.samples[i], 255.0,
2399 (double)(pkt.data.psnr.sse[i]));
2402 vpx_codec_pkt_list_add(cpi->output_pkt_list, &pkt);
2405 int vp8_use_as_reference(VP8_COMP *cpi, int ref_frame_flags) {
2406 if (ref_frame_flags > 7) return -1;
2408 cpi->ref_frame_flags = ref_frame_flags;
2411 int vp8_update_reference(VP8_COMP *cpi, int ref_frame_flags) {
2412 if (ref_frame_flags > 7) return -1;
2414 cpi->common.refresh_golden_frame = 0;
2415 cpi->common.refresh_alt_ref_frame = 0;
2416 cpi->common.refresh_last_frame = 0;
2418 if (ref_frame_flags & VP8_LAST_FRAME) cpi->common.refresh_last_frame = 1;
2420 if (ref_frame_flags & VP8_GOLD_FRAME) cpi->common.refresh_golden_frame = 1;
2422 if (ref_frame_flags & VP8_ALTR_FRAME) cpi->common.refresh_alt_ref_frame = 1;
2427 int vp8_get_reference(VP8_COMP *cpi, enum vpx_ref_frame_type ref_frame_flag,
2428 YV12_BUFFER_CONFIG *sd) {
2429 VP8_COMMON *cm = &cpi->common;
2432 if (ref_frame_flag == VP8_LAST_FRAME) {
2433 ref_fb_idx = cm->lst_fb_idx;
2434 } else if (ref_frame_flag == VP8_GOLD_FRAME) {
2435 ref_fb_idx = cm->gld_fb_idx;
2436 } else if (ref_frame_flag == VP8_ALTR_FRAME) {
2437 ref_fb_idx = cm->alt_fb_idx;
2442 vp8_yv12_copy_frame(&cm->yv12_fb[ref_fb_idx], sd);
2446 int vp8_set_reference(VP8_COMP *cpi, enum vpx_ref_frame_type ref_frame_flag,
2447 YV12_BUFFER_CONFIG *sd) {
2448 VP8_COMMON *cm = &cpi->common;
2452 if (ref_frame_flag == VP8_LAST_FRAME) {
2453 ref_fb_idx = cm->lst_fb_idx;
2454 } else if (ref_frame_flag == VP8_GOLD_FRAME) {
2455 ref_fb_idx = cm->gld_fb_idx;
2456 } else if (ref_frame_flag == VP8_ALTR_FRAME) {
2457 ref_fb_idx = cm->alt_fb_idx;
2462 vp8_yv12_copy_frame(sd, &cm->yv12_fb[ref_fb_idx]);
2466 int vp8_update_entropy(VP8_COMP *cpi, int update) {
2467 VP8_COMMON *cm = &cpi->common;
2468 cm->refresh_entropy_probs = update;
2473 #if defined(OUTPUT_YUV_SRC) || defined(OUTPUT_YUV_DENOISED)
2474 void vp8_write_yuv_frame(FILE *yuv_file, YV12_BUFFER_CONFIG *s) {
2475 unsigned char *src = s->y_buffer;
2476 int h = s->y_height;
2479 fwrite(src, s->y_width, 1, yuv_file);
2487 fwrite(src, s->uv_width, 1, yuv_file);
2488 src += s->uv_stride;
2495 fwrite(src, s->uv_width, 1, yuv_file);
2496 src += s->uv_stride;
2501 static void scale_and_extend_source(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) {
2502 VP8_COMMON *cm = &cpi->common;
2504 /* are we resizing the image */
2505 if (cm->horiz_scale != 0 || cm->vert_scale != 0) {
2506 #if CONFIG_SPATIAL_RESAMPLING
2507 int UNINITIALIZED_IS_SAFE(hr), UNINITIALIZED_IS_SAFE(hs);
2508 int UNINITIALIZED_IS_SAFE(vr), UNINITIALIZED_IS_SAFE(vs);
2511 if (cm->vert_scale == 3) {
2517 Scale2Ratio(cm->horiz_scale, &hr, &hs);
2518 Scale2Ratio(cm->vert_scale, &vr, &vs);
2520 vpx_scale_frame(sd, &cpi->scaled_source, cm->temp_scale_frame.y_buffer,
2521 tmp_height, hs, hr, vs, vr, 0);
2523 vp8_yv12_extend_frame_borders(&cpi->scaled_source);
2524 cpi->Source = &cpi->scaled_source;
2531 static int resize_key_frame(VP8_COMP *cpi) {
2532 #if CONFIG_SPATIAL_RESAMPLING
2533 VP8_COMMON *cm = &cpi->common;
2535 /* Do we need to apply resampling for one pass cbr.
2536 * In one pass this is more limited than in two pass cbr.
2537 * The test and any change is only made once per key frame sequence.
2539 if (cpi->oxcf.allow_spatial_resampling &&
2540 (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)) {
2541 int UNINITIALIZED_IS_SAFE(hr), UNINITIALIZED_IS_SAFE(hs);
2542 int UNINITIALIZED_IS_SAFE(vr), UNINITIALIZED_IS_SAFE(vs);
2543 int new_width, new_height;
2545 /* If we are below the resample DOWN watermark then scale down a
2548 if (cpi->buffer_level < (cpi->oxcf.resample_down_water_mark *
2549 cpi->oxcf.optimal_buffer_level / 100)) {
2551 (cm->horiz_scale < ONETWO) ? cm->horiz_scale + 1 : ONETWO;
2552 cm->vert_scale = (cm->vert_scale < ONETWO) ? cm->vert_scale + 1 : ONETWO;
2554 /* Should we now start scaling back up */
2555 else if (cpi->buffer_level > (cpi->oxcf.resample_up_water_mark *
2556 cpi->oxcf.optimal_buffer_level / 100)) {
2558 (cm->horiz_scale > NORMAL) ? cm->horiz_scale - 1 : NORMAL;
2559 cm->vert_scale = (cm->vert_scale > NORMAL) ? cm->vert_scale - 1 : NORMAL;
2562 /* Get the new height and width */
2563 Scale2Ratio(cm->horiz_scale, &hr, &hs);
2564 Scale2Ratio(cm->vert_scale, &vr, &vs);
2565 new_width = ((hs - 1) + (cpi->oxcf.Width * hr)) / hs;
2566 new_height = ((vs - 1) + (cpi->oxcf.Height * vr)) / vs;
2568 /* If the image size has changed we need to reallocate the buffers
2569 * and resample the source image
2571 if ((cm->Width != new_width) || (cm->Height != new_height)) {
2572 cm->Width = new_width;
2573 cm->Height = new_height;
2574 vp8_alloc_compressor_data(cpi);
2575 scale_and_extend_source(cpi->un_scaled_source, cpi);
2584 static void update_alt_ref_frame_stats(VP8_COMP *cpi) {
2585 VP8_COMMON *cm = &cpi->common;
2587 /* Select an interval before next GF or altref */
2588 if (!cpi->auto_gold) cpi->frames_till_gf_update_due = DEFAULT_GF_INTERVAL;
2590 if ((cpi->pass != 2) && cpi->frames_till_gf_update_due) {
2591 cpi->current_gf_interval = cpi->frames_till_gf_update_due;
2593 /* Set the bits per frame that we should try and recover in
2594 * subsequent inter frames to account for the extra GF spend...
2595 * note that his does not apply for GF updates that occur
2596 * coincident with a key frame as the extra cost of key frames is
2597 * dealt with elsewhere.
2599 cpi->gf_overspend_bits += cpi->projected_frame_size;
2600 cpi->non_gf_bitrate_adjustment =
2601 cpi->gf_overspend_bits / cpi->frames_till_gf_update_due;
2604 /* Update data structure that monitors level of reference to last GF */
2605 memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols));
2606 cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
2608 /* this frame refreshes means next frames don't unless specified by user */
2609 cpi->frames_since_golden = 0;
2611 /* Clear the alternate reference update pending flag. */
2612 cpi->source_alt_ref_pending = 0;
2614 /* Set the alternate reference frame active flag */
2615 cpi->source_alt_ref_active = 1;
2617 static void update_golden_frame_stats(VP8_COMP *cpi) {
2618 VP8_COMMON *cm = &cpi->common;
2620 /* Update the Golden frame usage counts. */
2621 if (cm->refresh_golden_frame) {
2622 /* Select an interval before next GF */
2623 if (!cpi->auto_gold) cpi->frames_till_gf_update_due = DEFAULT_GF_INTERVAL;
2625 if ((cpi->pass != 2) && (cpi->frames_till_gf_update_due > 0)) {
2626 cpi->current_gf_interval = cpi->frames_till_gf_update_due;
2628 /* Set the bits per frame that we should try and recover in
2629 * subsequent inter frames to account for the extra GF spend...
2630 * note that his does not apply for GF updates that occur
2631 * coincident with a key frame as the extra cost of key frames
2632 * is dealt with elsewhere.
2634 if ((cm->frame_type != KEY_FRAME) && !cpi->source_alt_ref_active) {
2635 /* Calcluate GF bits to be recovered
2636 * Projected size - av frame bits available for inter
2637 * frames for clip as a whole
2639 cpi->gf_overspend_bits +=
2640 (cpi->projected_frame_size - cpi->inter_frame_target);
2643 cpi->non_gf_bitrate_adjustment =
2644 cpi->gf_overspend_bits / cpi->frames_till_gf_update_due;
2647 /* Update data structure that monitors level of reference to last GF */
2648 memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols));
2649 cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
2651 /* this frame refreshes means next frames don't unless specified by
2654 cm->refresh_golden_frame = 0;
2655 cpi->frames_since_golden = 0;
2657 cpi->recent_ref_frame_usage[INTRA_FRAME] = 1;
2658 cpi->recent_ref_frame_usage[LAST_FRAME] = 1;
2659 cpi->recent_ref_frame_usage[GOLDEN_FRAME] = 1;
2660 cpi->recent_ref_frame_usage[ALTREF_FRAME] = 1;
2662 /* ******** Fixed Q test code only ************ */
2663 /* If we are going to use the ALT reference for the next group of
2664 * frames set a flag to say so.
2666 if (cpi->oxcf.fixed_q >= 0 && cpi->oxcf.play_alternate &&
2667 !cpi->common.refresh_alt_ref_frame) {
2668 cpi->source_alt_ref_pending = 1;
2669 cpi->frames_till_gf_update_due = cpi->baseline_gf_interval;
2672 if (!cpi->source_alt_ref_pending) cpi->source_alt_ref_active = 0;
2674 /* Decrement count down till next gf */
2675 if (cpi->frames_till_gf_update_due > 0) cpi->frames_till_gf_update_due--;
2677 } else if (!cpi->common.refresh_alt_ref_frame) {
2678 /* Decrement count down till next gf */
2679 if (cpi->frames_till_gf_update_due > 0) cpi->frames_till_gf_update_due--;
2681 if (cpi->frames_till_alt_ref_frame) cpi->frames_till_alt_ref_frame--;
2683 cpi->frames_since_golden++;
2685 if (cpi->frames_since_golden > 1) {
2686 cpi->recent_ref_frame_usage[INTRA_FRAME] +=
2687 cpi->mb.count_mb_ref_frame_usage[INTRA_FRAME];
2688 cpi->recent_ref_frame_usage[LAST_FRAME] +=
2689 cpi->mb.count_mb_ref_frame_usage[LAST_FRAME];
2690 cpi->recent_ref_frame_usage[GOLDEN_FRAME] +=
2691 cpi->mb.count_mb_ref_frame_usage[GOLDEN_FRAME];
2692 cpi->recent_ref_frame_usage[ALTREF_FRAME] +=
2693 cpi->mb.count_mb_ref_frame_usage[ALTREF_FRAME];
2698 /* This function updates the reference frame probability estimates that
2699 * will be used during mode selection
2701 static void update_rd_ref_frame_probs(VP8_COMP *cpi) {
2702 VP8_COMMON *cm = &cpi->common;
2704 const int *const rfct = cpi->mb.count_mb_ref_frame_usage;
2705 const int rf_intra = rfct[INTRA_FRAME];
2706 const int rf_inter =
2707 rfct[LAST_FRAME] + rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME];
2709 if (cm->frame_type == KEY_FRAME) {
2710 cpi->prob_intra_coded = 255;
2711 cpi->prob_last_coded = 128;
2712 cpi->prob_gf_coded = 128;
2713 } else if (!(rf_intra + rf_inter)) {
2714 cpi->prob_intra_coded = 63;
2715 cpi->prob_last_coded = 128;
2716 cpi->prob_gf_coded = 128;
2719 /* update reference frame costs since we can do better than what we got
2722 if (cpi->oxcf.number_of_layers == 1) {
2723 if (cpi->common.refresh_alt_ref_frame) {
2724 cpi->prob_intra_coded += 40;
2725 if (cpi->prob_intra_coded > 255) cpi->prob_intra_coded = 255;
2726 cpi->prob_last_coded = 200;
2727 cpi->prob_gf_coded = 1;
2728 } else if (cpi->frames_since_golden == 0) {
2729 cpi->prob_last_coded = 214;
2730 } else if (cpi->frames_since_golden == 1) {
2731 cpi->prob_last_coded = 192;
2732 cpi->prob_gf_coded = 220;
2733 } else if (cpi->source_alt_ref_active) {
2734 cpi->prob_gf_coded -= 20;
2736 if (cpi->prob_gf_coded < 10) cpi->prob_gf_coded = 10;
2738 if (!cpi->source_alt_ref_active) cpi->prob_gf_coded = 255;
2742 #if !CONFIG_REALTIME_ONLY
2743 /* 1 = key, 0 = inter */
2744 static int decide_key_frame(VP8_COMP *cpi) {
2745 VP8_COMMON *cm = &cpi->common;
2747 int code_key_frame = 0;
2751 if (cpi->Speed > 11) return 0;
2753 /* Clear down mmx registers */
2754 vpx_clear_system_state();
2756 if ((cpi->compressor_speed == 2) && (cpi->Speed >= 5) && (cpi->sf.RD == 0)) {
2757 double change = 1.0 *
2758 abs((int)(cpi->mb.intra_error - cpi->last_intra_error)) /
2759 (1 + cpi->last_intra_error);
2762 abs((int)(cpi->mb.prediction_error - cpi->last_prediction_error)) /
2763 (1 + cpi->last_prediction_error);
2764 double minerror = cm->MBs * 256;
2766 cpi->last_intra_error = cpi->mb.intra_error;
2767 cpi->last_prediction_error = cpi->mb.prediction_error;
2769 if (10 * cpi->mb.intra_error / (1 + cpi->mb.prediction_error) < 15 &&
2770 cpi->mb.prediction_error > minerror &&
2771 (change > .25 || change2 > .25)) {
2772 /*(change > 1.4 || change < .75)&& cpi->this_frame_percent_intra >
2773 * cpi->last_frame_percent_intra + 3*/
2780 /* If the following are true we might as well code a key frame */
2781 if (((cpi->this_frame_percent_intra == 100) &&
2782 (cpi->this_frame_percent_intra > (cpi->last_frame_percent_intra + 2))) ||
2783 ((cpi->this_frame_percent_intra > 95) &&
2784 (cpi->this_frame_percent_intra >=
2785 (cpi->last_frame_percent_intra + 5)))) {
2788 /* in addition if the following are true and this is not a golden frame
2789 * then code a key frame Note that on golden frames there often seems
2790 * to be a pop in intra useage anyway hence this restriction is
2791 * designed to prevent spurious key frames. The Intra pop needs to be
2794 else if (((cpi->this_frame_percent_intra > 60) &&
2795 (cpi->this_frame_percent_intra >
2796 (cpi->last_frame_percent_intra * 2))) ||
2797 ((cpi->this_frame_percent_intra > 75) &&
2798 (cpi->this_frame_percent_intra >
2799 (cpi->last_frame_percent_intra * 3 / 2))) ||
2800 ((cpi->this_frame_percent_intra > 90) &&
2801 (cpi->this_frame_percent_intra >
2802 (cpi->last_frame_percent_intra + 10)))) {
2803 if (!cm->refresh_golden_frame) code_key_frame = 1;
2806 return code_key_frame;
2809 static void Pass1Encode(VP8_COMP *cpi, size_t *size, unsigned char *dest,
2810 unsigned int *frame_flags) {
2814 vp8_set_quantizer(cpi, 26);
2816 vp8_first_pass(cpi);
2821 void write_cx_frame_to_file(YV12_BUFFER_CONFIG *frame, int this_frame)
2824 /* write the frame */
2829 sprintf(filename, "cx\\y%04d.raw", this_frame);
2830 yframe = fopen(filename, "wb");
2832 for (i = 0; i < frame->y_height; ++i)
2833 fwrite(frame->y_buffer + i * frame->y_stride, frame->y_width, 1, yframe);
2836 sprintf(filename, "cx\\u%04d.raw", this_frame);
2837 yframe = fopen(filename, "wb");
2839 for (i = 0; i < frame->uv_height; ++i)
2840 fwrite(frame->u_buffer + i * frame->uv_stride, frame->uv_width, 1, yframe);
2843 sprintf(filename, "cx\\v%04d.raw", this_frame);
2844 yframe = fopen(filename, "wb");
2846 for (i = 0; i < frame->uv_height; ++i)
2847 fwrite(frame->v_buffer + i * frame->uv_stride, frame->uv_width, 1, yframe);
2852 /* return of 0 means drop frame */
2854 #if !CONFIG_REALTIME_ONLY
2855 /* Function to test for conditions that indeicate we should loop
2856 * back and recode a frame.
2858 static int recode_loop_test(VP8_COMP *cpi, int high_limit, int low_limit, int q,
2859 int maxq, int minq) {
2860 int force_recode = 0;
2861 VP8_COMMON *cm = &cpi->common;
2863 /* Is frame recode allowed at all
2864 * Yes if either recode mode 1 is selected or mode two is selcted
2865 * and the frame is a key frame. golden frame or alt_ref_frame
2867 if ((cpi->sf.recode_loop == 1) ||
2868 ((cpi->sf.recode_loop == 2) &&
2869 ((cm->frame_type == KEY_FRAME) || cm->refresh_golden_frame ||
2870 cm->refresh_alt_ref_frame))) {
2871 /* General over and under shoot tests */
2872 if (((cpi->projected_frame_size > high_limit) && (q < maxq)) ||
2873 ((cpi->projected_frame_size < low_limit) && (q > minq))) {
2876 /* Special Constrained quality tests */
2877 else if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) {
2878 /* Undershoot and below auto cq level */
2879 if ((q > cpi->cq_target_quality) &&
2880 (cpi->projected_frame_size < ((cpi->this_frame_target * 7) >> 3))) {
2883 /* Severe undershoot and between auto and user cq level */
2884 else if ((q > cpi->oxcf.cq_level) &&
2885 (cpi->projected_frame_size < cpi->min_frame_bandwidth) &&
2886 (cpi->active_best_quality > cpi->oxcf.cq_level)) {
2888 cpi->active_best_quality = cpi->oxcf.cq_level;
2893 return force_recode;
2895 #endif // !CONFIG_REALTIME_ONLY
2897 static void update_reference_frames(VP8_COMP *cpi) {
2898 VP8_COMMON *cm = &cpi->common;
2899 YV12_BUFFER_CONFIG *yv12_fb = cm->yv12_fb;
2901 /* At this point the new frame has been encoded.
2902 * If any buffer copy / swapping is signaled it should be done here.
2905 if (cm->frame_type == KEY_FRAME) {
2906 yv12_fb[cm->new_fb_idx].flags |= VP8_GOLD_FRAME | VP8_ALTR_FRAME;
2908 yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
2909 yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
2911 cm->alt_fb_idx = cm->gld_fb_idx = cm->new_fb_idx;
2913 cpi->current_ref_frames[GOLDEN_FRAME] = cm->current_video_frame;
2914 cpi->current_ref_frames[ALTREF_FRAME] = cm->current_video_frame;
2915 } else /* For non key frames */
2917 if (cm->refresh_alt_ref_frame) {
2918 assert(!cm->copy_buffer_to_arf);
2920 cm->yv12_fb[cm->new_fb_idx].flags |= VP8_ALTR_FRAME;
2921 cm->yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
2922 cm->alt_fb_idx = cm->new_fb_idx;
2924 cpi->current_ref_frames[ALTREF_FRAME] = cm->current_video_frame;
2925 } else if (cm->copy_buffer_to_arf) {
2926 assert(!(cm->copy_buffer_to_arf & ~0x3));
2928 if (cm->copy_buffer_to_arf == 1) {
2929 if (cm->alt_fb_idx != cm->lst_fb_idx) {
2930 yv12_fb[cm->lst_fb_idx].flags |= VP8_ALTR_FRAME;
2931 yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
2932 cm->alt_fb_idx = cm->lst_fb_idx;
2934 cpi->current_ref_frames[ALTREF_FRAME] =
2935 cpi->current_ref_frames[LAST_FRAME];
2937 } else /* if (cm->copy_buffer_to_arf == 2) */
2939 if (cm->alt_fb_idx != cm->gld_fb_idx) {
2940 yv12_fb[cm->gld_fb_idx].flags |= VP8_ALTR_FRAME;
2941 yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
2942 cm->alt_fb_idx = cm->gld_fb_idx;
2944 cpi->current_ref_frames[ALTREF_FRAME] =
2945 cpi->current_ref_frames[GOLDEN_FRAME];
2950 if (cm->refresh_golden_frame) {
2951 assert(!cm->copy_buffer_to_gf);
2953 cm->yv12_fb[cm->new_fb_idx].flags |= VP8_GOLD_FRAME;
2954 cm->yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
2955 cm->gld_fb_idx = cm->new_fb_idx;
2957 cpi->current_ref_frames[GOLDEN_FRAME] = cm->current_video_frame;
2958 } else if (cm->copy_buffer_to_gf) {
2959 assert(!(cm->copy_buffer_to_arf & ~0x3));
2961 if (cm->copy_buffer_to_gf == 1) {
2962 if (cm->gld_fb_idx != cm->lst_fb_idx) {
2963 yv12_fb[cm->lst_fb_idx].flags |= VP8_GOLD_FRAME;
2964 yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
2965 cm->gld_fb_idx = cm->lst_fb_idx;
2967 cpi->current_ref_frames[GOLDEN_FRAME] =
2968 cpi->current_ref_frames[LAST_FRAME];
2970 } else /* if (cm->copy_buffer_to_gf == 2) */
2972 if (cm->alt_fb_idx != cm->gld_fb_idx) {
2973 yv12_fb[cm->alt_fb_idx].flags |= VP8_GOLD_FRAME;
2974 yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
2975 cm->gld_fb_idx = cm->alt_fb_idx;
2977 cpi->current_ref_frames[GOLDEN_FRAME] =
2978 cpi->current_ref_frames[ALTREF_FRAME];
2984 if (cm->refresh_last_frame) {
2985 cm->yv12_fb[cm->new_fb_idx].flags |= VP8_LAST_FRAME;
2986 cm->yv12_fb[cm->lst_fb_idx].flags &= ~VP8_LAST_FRAME;
2987 cm->lst_fb_idx = cm->new_fb_idx;
2989 cpi->current_ref_frames[LAST_FRAME] = cm->current_video_frame;
2992 #if CONFIG_TEMPORAL_DENOISING
2993 if (cpi->oxcf.noise_sensitivity) {
2994 /* we shouldn't have to keep multiple copies as we know in advance which
2995 * buffer we should start - for now to get something up and running
2996 * I've chosen to copy the buffers
2998 if (cm->frame_type == KEY_FRAME) {
3000 for (i = LAST_FRAME; i < MAX_REF_FRAMES; ++i)
3001 vp8_yv12_copy_frame(cpi->Source, &cpi->denoiser.yv12_running_avg[i]);
3002 } else /* For non key frames */
3004 vp8_yv12_extend_frame_borders(
3005 &cpi->denoiser.yv12_running_avg[INTRA_FRAME]);
3007 if (cm->refresh_alt_ref_frame || cm->copy_buffer_to_arf) {
3008 vp8_yv12_copy_frame(&cpi->denoiser.yv12_running_avg[INTRA_FRAME],
3009 &cpi->denoiser.yv12_running_avg[ALTREF_FRAME]);
3011 if (cm->refresh_golden_frame || cm->copy_buffer_to_gf) {
3012 vp8_yv12_copy_frame(&cpi->denoiser.yv12_running_avg[INTRA_FRAME],
3013 &cpi->denoiser.yv12_running_avg[GOLDEN_FRAME]);
3015 if (cm->refresh_last_frame) {
3016 vp8_yv12_copy_frame(&cpi->denoiser.yv12_running_avg[INTRA_FRAME],
3017 &cpi->denoiser.yv12_running_avg[LAST_FRAME]);
3020 if (cpi->oxcf.noise_sensitivity == 4)
3021 vp8_yv12_copy_frame(cpi->Source, &cpi->denoiser.yv12_last_source);
3026 static int measure_square_diff_partial(YV12_BUFFER_CONFIG *source,
3027 YV12_BUFFER_CONFIG *dest,
3033 int min_consec_zero_last = 10;
3034 int tot_num_blocks = (source->y_height * source->y_width) >> 8;
3035 unsigned char *src = source->y_buffer;
3036 unsigned char *dst = dest->y_buffer;
3038 /* Loop through the Y plane, every |skip| blocks along rows and colmumns,
3039 * summing the square differences, and only for blocks that have been
3040 * zero_last mode at least |x| frames in a row.
3042 for (i = 0; i < source->y_height; i += 16 * skip) {
3043 int block_index_row = (i >> 4) * cpi->common.mb_cols;
3044 for (j = 0; j < source->y_width; j += 16 * skip) {
3045 int index = block_index_row + (j >> 4);
3046 if (cpi->consec_zero_last[index] >= min_consec_zero_last) {
3048 Total += vpx_mse16x16(src + j, source->y_stride, dst + j,
3049 dest->y_stride, &sse);
3053 src += 16 * skip * source->y_stride;
3054 dst += 16 * skip * dest->y_stride;
3056 // Only return non-zero if we have at least ~1/16 samples for estimate.
3057 if (num_blocks > (tot_num_blocks >> 4)) {
3058 return (Total / num_blocks);
3064 #if CONFIG_TEMPORAL_DENOISING
3065 static void process_denoiser_mode_change(VP8_COMP *cpi) {
3066 const VP8_COMMON *const cm = &cpi->common;
3070 // Number of blocks skipped along row/column in computing the
3071 // nmse (normalized mean square error) of source.
3073 // Only select blocks for computing nmse that have been encoded
3074 // as ZERO LAST min_consec_zero_last frames in a row.
3075 // Scale with number of temporal layers.
3076 int min_consec_zero_last = 12 / cpi->oxcf.number_of_layers;
3077 // Decision is tested for changing the denoising mode every
3078 // num_mode_change times this function is called. Note that this
3079 // function called every 8 frames, so (8 * num_mode_change) is number
3080 // of frames where denoising mode change is tested for switch.
3081 int num_mode_change = 20;
3082 // Framerate factor, to compensate for larger mse at lower framerates.
3083 // Use ref_framerate, which is full source framerate for temporal layers.
3084 // TODO(marpan): Adjust this factor.
3085 int fac_framerate = cpi->ref_framerate < 25.0f ? 80 : 100;
3086 int tot_num_blocks = cm->mb_rows * cm->mb_cols;
3087 int ystride = cpi->Source->y_stride;
3088 unsigned char *src = cpi->Source->y_buffer;
3089 unsigned char *dst = cpi->denoiser.yv12_last_source.y_buffer;
3090 static const unsigned char const_source[16] = { 128, 128, 128, 128, 128, 128,
3091 128, 128, 128, 128, 128, 128,
3092 128, 128, 128, 128 };
3093 int bandwidth = (int)(cpi->target_bandwidth);
3094 // For temporal layers, use full bandwidth (top layer).
3095 if (cpi->oxcf.number_of_layers > 1) {
3096 LAYER_CONTEXT *lc = &cpi->layer_context[cpi->oxcf.number_of_layers - 1];
3097 bandwidth = (int)(lc->target_bandwidth);
3099 // Loop through the Y plane, every skip blocks along rows and columns,
3100 // summing the normalized mean square error, only for blocks that have
3101 // been encoded as ZEROMV LAST at least min_consec_zero_last least frames in
3102 // a row and have small sum difference between current and previous frame.
3103 // Normalization here is by the contrast of the current frame block.
3104 for (i = 0; i < cm->Height; i += 16 * skip) {
3105 int block_index_row = (i >> 4) * cm->mb_cols;
3106 for (j = 0; j < cm->Width; j += 16 * skip) {
3107 int index = block_index_row + (j >> 4);
3108 if (cpi->consec_zero_last[index] >= min_consec_zero_last) {
3110 const unsigned int var =
3111 vpx_variance16x16(src + j, ystride, dst + j, ystride, &sse);
3112 // Only consider this block as valid for noise measurement
3113 // if the sum_diff average of the current and previous frame
3114 // is small (to avoid effects from lighting change).
3115 if ((sse - var) < 128) {
3117 const unsigned int act =
3118 vpx_variance16x16(src + j, ystride, const_source, 0, &sse2);
3119 if (act > 0) total += sse / act;
3124 src += 16 * skip * ystride;
3125 dst += 16 * skip * ystride;
3127 total = total * fac_framerate / 100;
3129 // Only consider this frame as valid sample if we have computed nmse over
3130 // at least ~1/16 blocks, and Total > 0 (Total == 0 can happen if the
3131 // application inputs duplicate frames, or contrast is all zero).
3132 if (total > 0 && (num_blocks > (tot_num_blocks >> 4))) {
3133 // Update the recursive mean square source_diff.
3134 total = (total << 8) / num_blocks;
3135 if (cpi->denoiser.nmse_source_diff_count == 0) {
3136 // First sample in new interval.
3137 cpi->denoiser.nmse_source_diff = total;
3138 cpi->denoiser.qp_avg = cm->base_qindex;
3140 // For subsequent samples, use average with weight ~1/4 for new sample.
3141 cpi->denoiser.nmse_source_diff =
3142 (int)((total + 3 * cpi->denoiser.nmse_source_diff) >> 2);
3143 cpi->denoiser.qp_avg =
3144 (int)((cm->base_qindex + 3 * cpi->denoiser.qp_avg) >> 2);
3146 cpi->denoiser.nmse_source_diff_count++;
3148 // Check for changing the denoiser mode, when we have obtained #samples =
3149 // num_mode_change. Condition the change also on the bitrate and QP.
3150 if (cpi->denoiser.nmse_source_diff_count == num_mode_change) {
3151 // Check for going up: from normal to aggressive mode.
3152 if ((cpi->denoiser.denoiser_mode == kDenoiserOnYUV) &&
3153 (cpi->denoiser.nmse_source_diff >
3154 cpi->denoiser.threshold_aggressive_mode) &&
3155 (cpi->denoiser.qp_avg < cpi->denoiser.qp_threshold_up &&
3156 bandwidth > cpi->denoiser.bitrate_threshold)) {
3157 vp8_denoiser_set_parameters(&cpi->denoiser, kDenoiserOnYUVAggressive);
3159 // Check for going down: from aggressive to normal mode.
3160 if (((cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive) &&
3161 (cpi->denoiser.nmse_source_diff <
3162 cpi->denoiser.threshold_aggressive_mode)) ||
3163 ((cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive) &&
3164 (cpi->denoiser.qp_avg > cpi->denoiser.qp_threshold_down ||
3165 bandwidth < cpi->denoiser.bitrate_threshold))) {
3166 vp8_denoiser_set_parameters(&cpi->denoiser, kDenoiserOnYUV);
3169 // Reset metric and counter for next interval.
3170 cpi->denoiser.nmse_source_diff = 0;
3171 cpi->denoiser.qp_avg = 0;
3172 cpi->denoiser.nmse_source_diff_count = 0;
3177 void vp8_loopfilter_frame(VP8_COMP *cpi, VP8_COMMON *cm) {
3178 const FRAME_TYPE frame_type = cm->frame_type;
3180 int update_any_ref_buffers = 1;
3181 if (cpi->common.refresh_last_frame == 0 &&
3182 cpi->common.refresh_golden_frame == 0 &&
3183 cpi->common.refresh_alt_ref_frame == 0) {
3184 update_any_ref_buffers = 0;
3188 cm->filter_level = 0;
3190 struct vpx_usec_timer timer;
3192 vpx_clear_system_state();
3194 vpx_usec_timer_start(&timer);
3195 if (cpi->sf.auto_filter == 0) {
3196 #if CONFIG_TEMPORAL_DENOISING
3197 if (cpi->oxcf.noise_sensitivity && cm->frame_type != KEY_FRAME) {
3198 // Use the denoised buffer for selecting base loop filter level.
3199 // Denoised signal for current frame is stored in INTRA_FRAME.
3200 // No denoising on key frames.
3201 vp8cx_pick_filter_level_fast(
3202 &cpi->denoiser.yv12_running_avg[INTRA_FRAME], cpi);
3204 vp8cx_pick_filter_level_fast(cpi->Source, cpi);
3207 vp8cx_pick_filter_level_fast(cpi->Source, cpi);
3210 #if CONFIG_TEMPORAL_DENOISING
3211 if (cpi->oxcf.noise_sensitivity && cm->frame_type != KEY_FRAME) {
3212 // Use the denoised buffer for selecting base loop filter level.
3213 // Denoised signal for current frame is stored in INTRA_FRAME.
3214 // No denoising on key frames.
3215 vp8cx_pick_filter_level(&cpi->denoiser.yv12_running_avg[INTRA_FRAME],
3218 vp8cx_pick_filter_level(cpi->Source, cpi);
3221 vp8cx_pick_filter_level(cpi->Source, cpi);
3225 if (cm->filter_level > 0) {
3226 vp8cx_set_alt_lf_level(cpi, cm->filter_level);
3229 vpx_usec_timer_mark(&timer);
3230 cpi->time_pick_lpf += vpx_usec_timer_elapsed(&timer);
3233 #if CONFIG_MULTITHREAD
3234 if (cpi->b_multi_threaded) {
3235 sem_post(&cpi->h_event_end_lpf); /* signal that we have set filter_level */
3239 // No need to apply loop-filter if the encoded frame does not update
3240 // any reference buffers.
3241 if (cm->filter_level > 0 && update_any_ref_buffers) {
3242 vp8_loop_filter_frame(cm, &cpi->mb.e_mbd, frame_type);
3245 vp8_yv12_extend_frame_borders(cm->frame_to_show);
3248 static void encode_frame_to_data_rate(VP8_COMP *cpi, size_t *size,
3249 unsigned char *dest,
3250 unsigned char *dest_end,
3251 unsigned int *frame_flags) {
3253 int frame_over_shoot_limit;
3254 int frame_under_shoot_limit;
3259 VP8_COMMON *cm = &cpi->common;
3260 int active_worst_qchanged = 0;
3262 #if !CONFIG_REALTIME_ONLY
3266 int zbin_oq_low = 0;
3269 int overshoot_seen = 0;
3270 int undershoot_seen = 0;
3273 int drop_mark = (int)(cpi->oxcf.drop_frames_water_mark *
3274 cpi->oxcf.optimal_buffer_level / 100);
3275 int drop_mark75 = drop_mark * 2 / 3;
3276 int drop_mark50 = drop_mark / 4;
3277 int drop_mark25 = drop_mark / 8;
3279 /* Clear down mmx registers to allow floating point in what follows */
3280 vpx_clear_system_state();
3282 if (cpi->force_next_frame_intra) {
3283 cm->frame_type = KEY_FRAME; /* delayed intra frame */
3284 cpi->force_next_frame_intra = 0;
3287 /* For an alt ref frame in 2 pass we skip the call to the second pass
3288 * function that sets the target bandwidth
3290 switch (cpi->pass) {
3291 #if !CONFIG_REALTIME_ONLY
3293 if (cpi->common.refresh_alt_ref_frame) {
3294 /* Per frame bit target for the alt ref frame */
3295 cpi->per_frame_bandwidth = cpi->twopass.gf_bits;
3296 /* per second target bitrate */
3297 cpi->target_bandwidth =
3298 (int)(cpi->twopass.gf_bits * cpi->output_framerate);
3301 #endif // !CONFIG_REALTIME_ONLY
3303 cpi->per_frame_bandwidth =
3304 (int)(cpi->target_bandwidth / cpi->output_framerate);
3308 /* Default turn off buffer to buffer copying */
3309 cm->copy_buffer_to_gf = 0;
3310 cm->copy_buffer_to_arf = 0;
3312 /* Clear zbin over-quant value and mode boost values. */
3313 cpi->mb.zbin_over_quant = 0;
3314 cpi->mb.zbin_mode_boost = 0;
3316 /* Enable or disable mode based tweaking of the zbin
3317 * For 2 Pass Only used where GF/ARF prediction quality
3318 * is above a threshold
3320 cpi->mb.zbin_mode_boost_enabled = 1;
3321 if (cpi->pass == 2) {
3322 if (cpi->gfu_boost <= 400) {
3323 cpi->mb.zbin_mode_boost_enabled = 0;
3327 /* Current default encoder behaviour for the altref sign bias */
3328 if (cpi->source_alt_ref_active) {
3329 cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 1;
3331 cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 0;
3334 /* Check to see if a key frame is signaled
3335 * For two pass with auto key frame enabled cm->frame_type may already
3336 * be set, but not for one pass.
3338 if ((cm->current_video_frame == 0) || (cm->frame_flags & FRAMEFLAGS_KEY) ||
3339 (cpi->oxcf.auto_key &&
3340 (cpi->frames_since_key % cpi->key_frame_frequency == 0))) {
3341 /* Key frame from VFW/auto-keyframe/first frame */
3342 cm->frame_type = KEY_FRAME;
3343 #if CONFIG_TEMPORAL_DENOISING
3344 if (cpi->oxcf.noise_sensitivity == 4) {
3345 // For adaptive mode, reset denoiser to normal mode on key frame.
3346 vp8_denoiser_set_parameters(&cpi->denoiser, kDenoiserOnYUV);
3351 #if CONFIG_MULTI_RES_ENCODING
3352 if (cpi->oxcf.mr_total_resolutions > 1) {
3353 LOWER_RES_FRAME_INFO *low_res_frame_info =
3354 (LOWER_RES_FRAME_INFO *)cpi->oxcf.mr_low_res_mode_info;
3356 if (cpi->oxcf.mr_encoder_id) {
3357 // TODO(marpan): This constraint shouldn't be needed, as we would like
3358 // to allow for key frame setting (forced or periodic) defined per
3359 // spatial layer. For now, keep this in.
3360 cm->frame_type = low_res_frame_info->frame_type;
3362 // Check if lower resolution is available for motion vector reuse.
3363 if (cm->frame_type != KEY_FRAME) {
3364 cpi->mr_low_res_mv_avail = 1;
3365 cpi->mr_low_res_mv_avail &= !(low_res_frame_info->is_frame_dropped);
3367 if (cpi->ref_frame_flags & VP8_LAST_FRAME)
3368 cpi->mr_low_res_mv_avail &=
3369 (cpi->current_ref_frames[LAST_FRAME] ==
3370 low_res_frame_info->low_res_ref_frames[LAST_FRAME]);
3372 if (cpi->ref_frame_flags & VP8_GOLD_FRAME)
3373 cpi->mr_low_res_mv_avail &=
3374 (cpi->current_ref_frames[GOLDEN_FRAME] ==
3375 low_res_frame_info->low_res_ref_frames[GOLDEN_FRAME]);
3377 // Don't use altref to determine whether low res is available.
3378 // TODO (marpan): Should we make this type of condition on a
3379 // per-reference frame basis?
3381 if (cpi->ref_frame_flags & VP8_ALTR_FRAME)
3382 cpi->mr_low_res_mv_avail &= (cpi->current_ref_frames[ALTREF_FRAME]
3383 == low_res_frame_info->low_res_ref_frames[ALTREF_FRAME]);
3388 // On a key frame: For the lowest resolution, keep track of the key frame
3389 // counter value. For the higher resolutions, reset the current video
3390 // frame counter to that of the lowest resolution.
3391 // This is done to the handle the case where we may stop/start encoding
3392 // higher layer(s). The restart-encoding of higher layer is only signaled
3393 // by a key frame for now.
3394 // TODO (marpan): Add flag to indicate restart-encoding of higher layer.
3395 if (cm->frame_type == KEY_FRAME) {
3396 if (cpi->oxcf.mr_encoder_id) {
3397 // If the initial starting value of the buffer level is zero (this can
3398 // happen because we may have not started encoding this higher stream),
3399 // then reset it to non-zero value based on |starting_buffer_level|.
3400 if (cpi->common.current_video_frame == 0 && cpi->buffer_level == 0) {
3402 cpi->bits_off_target = cpi->oxcf.starting_buffer_level;
3403 cpi->buffer_level = cpi->oxcf.starting_buffer_level;
3404 for (i = 0; i < cpi->oxcf.number_of_layers; ++i) {
3405 LAYER_CONTEXT *lc = &cpi->layer_context[i];
3406 lc->bits_off_target = lc->starting_buffer_level;
3407 lc->buffer_level = lc->starting_buffer_level;
3410 cpi->common.current_video_frame =
3411 low_res_frame_info->key_frame_counter_value;
3413 low_res_frame_info->key_frame_counter_value =
3414 cpi->common.current_video_frame;
3420 // Find the reference frame closest to the current frame.
3421 cpi->closest_reference_frame = LAST_FRAME;
3422 if (cm->frame_type != KEY_FRAME) {
3424 MV_REFERENCE_FRAME closest_ref = INTRA_FRAME;
3425 if (cpi->ref_frame_flags & VP8_LAST_FRAME) {
3426 closest_ref = LAST_FRAME;
3427 } else if (cpi->ref_frame_flags & VP8_GOLD_FRAME) {
3428 closest_ref = GOLDEN_FRAME;
3429 } else if (cpi->ref_frame_flags & VP8_ALTR_FRAME) {
3430 closest_ref = ALTREF_FRAME;
3432 for (i = 1; i <= 3; ++i) {
3433 vpx_ref_frame_type_t ref_frame_type =
3434 (vpx_ref_frame_type_t)((i == 3) ? 4 : i);
3435 if (cpi->ref_frame_flags & ref_frame_type) {
3436 if ((cm->current_video_frame - cpi->current_ref_frames[i]) <
3437 (cm->current_video_frame - cpi->current_ref_frames[closest_ref])) {
3442 cpi->closest_reference_frame = closest_ref;
3445 /* Set various flags etc to special state if it is a key frame */
3446 if (cm->frame_type == KEY_FRAME) {
3449 // Set the loop filter deltas and segmentation map update
3450 setup_features(cpi);
3452 /* The alternate reference frame cannot be active for a key frame */
3453 cpi->source_alt_ref_active = 0;
3455 /* Reset the RD threshold multipliers to default of * 1 (128) */
3456 for (i = 0; i < MAX_MODES; ++i) {
3457 cpi->mb.rd_thresh_mult[i] = 128;
3460 // Reset the zero_last counter to 0 on key frame.
3461 memset(cpi->consec_zero_last, 0, cm->mb_rows * cm->mb_cols);
3462 memset(cpi->consec_zero_last_mvbias, 0,
3463 (cpi->common.mb_rows * cpi->common.mb_cols));
3467 /* Experimental code for lagged compress and one pass
3468 * Initialise one_pass GF frames stats
3469 * Update stats used for GF selection
3472 cpi->one_pass_frame_index = cm->current_video_frame % MAX_LAG_BUFFERS;
3474 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frames_so_far = 0;
3475 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_intra_error = 0.0;
3476 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_coded_error = 0.0;
3477 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_pcnt_inter = 0.0;
3478 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_pcnt_motion = 0.0;
3479 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvr = 0.0;
3480 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvr_abs = 0.0;
3481 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvc = 0.0;
3482 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvc_abs = 0.0;
3486 update_rd_ref_frame_probs(cpi);
3488 if (cpi->drop_frames_allowed) {
3489 /* The reset to decimation 0 is only done here for one pass.
3490 * Once it is set two pass leaves decimation on till the next kf.
3492 if ((cpi->buffer_level > drop_mark) && (cpi->decimation_factor > 0)) {
3493 cpi->decimation_factor--;
3496 if (cpi->buffer_level > drop_mark75 && cpi->decimation_factor > 0) {
3497 cpi->decimation_factor = 1;
3499 } else if (cpi->buffer_level < drop_mark25 &&
3500 (cpi->decimation_factor == 2 || cpi->decimation_factor == 3)) {
3501 cpi->decimation_factor = 3;
3502 } else if (cpi->buffer_level < drop_mark50 &&
3503 (cpi->decimation_factor == 1 || cpi->decimation_factor == 2)) {
3504 cpi->decimation_factor = 2;
3505 } else if (cpi->buffer_level < drop_mark75 &&
3506 (cpi->decimation_factor == 0 || cpi->decimation_factor == 1)) {
3507 cpi->decimation_factor = 1;
3511 /* The following decimates the frame rate according to a regular
3512 * pattern (i.e. to 1/2 or 2/3 frame rate) This can be used to help
3513 * prevent buffer under-run in CBR mode. Alternatively it might be
3514 * desirable in some situations to drop frame rate but throw more bits
3517 * Note that dropping a key frame can be problematic if spatial
3518 * resampling is also active
3520 if (cpi->decimation_factor > 0) {
3521 switch (cpi->decimation_factor) {
3523 cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 3 / 2;
3526 cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 5 / 4;
3529 cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 5 / 4;
3533 /* Note that we should not throw out a key frame (especially when
3534 * spatial resampling is enabled).
3536 if (cm->frame_type == KEY_FRAME) {
3537 cpi->decimation_count = cpi->decimation_factor;
3538 } else if (cpi->decimation_count > 0) {
3539 cpi->decimation_count--;
3541 cpi->bits_off_target += cpi->av_per_frame_bandwidth;
3542 if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size) {
3543 cpi->bits_off_target = cpi->oxcf.maximum_buffer_size;
3546 #if CONFIG_MULTI_RES_ENCODING
3547 vp8_store_drop_frame_info(cpi);
3550 cm->current_video_frame++;
3551 cpi->frames_since_key++;
3552 // We advance the temporal pattern for dropped frames.
3553 cpi->temporal_pattern_counter++;
3555 #if CONFIG_INTERNAL_STATS
3559 cpi->buffer_level = cpi->bits_off_target;
3561 if (cpi->oxcf.number_of_layers > 1) {
3564 /* Propagate bits saved by dropping the frame to higher
3567 for (i = cpi->current_layer + 1; i < cpi->oxcf.number_of_layers; ++i) {
3568 LAYER_CONTEXT *lc = &cpi->layer_context[i];
3569 lc->bits_off_target += (int)(lc->target_bandwidth / lc->framerate);
3570 if (lc->bits_off_target > lc->maximum_buffer_size) {
3571 lc->bits_off_target = lc->maximum_buffer_size;
3573 lc->buffer_level = lc->bits_off_target;
3579 cpi->decimation_count = cpi->decimation_factor;
3582 cpi->decimation_count = 0;
3585 /* Decide how big to make the frame */
3586 if (!vp8_pick_frame_size(cpi)) {
3587 /*TODO: 2 drop_frame and return code could be put together. */
3588 #if CONFIG_MULTI_RES_ENCODING
3589 vp8_store_drop_frame_info(cpi);
3591 cm->current_video_frame++;
3592 cpi->frames_since_key++;
3593 // We advance the temporal pattern for dropped frames.
3594 cpi->temporal_pattern_counter++;
3598 /* Reduce active_worst_allowed_q for CBR if our buffer is getting too full.
3599 * This has a knock on effect on active best quality as well.
3600 * For CBR if the buffer reaches its maximum level then we can no longer
3601 * save up bits for later frames so we might as well use them up
3602 * on the current frame.
3604 if ((cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) &&
3605 (cpi->buffer_level >= cpi->oxcf.optimal_buffer_level) &&
3606 cpi->buffered_mode) {
3607 /* Max adjustment is 1/4 */
3608 int Adjustment = cpi->active_worst_quality / 4;
3613 if (cpi->buffer_level < cpi->oxcf.maximum_buffer_size) {
3614 buff_lvl_step = (int)((cpi->oxcf.maximum_buffer_size -
3615 cpi->oxcf.optimal_buffer_level) /
3618 if (buff_lvl_step) {
3620 (int)((cpi->buffer_level - cpi->oxcf.optimal_buffer_level) /
3627 cpi->active_worst_quality -= Adjustment;
3629 if (cpi->active_worst_quality < cpi->active_best_quality) {
3630 cpi->active_worst_quality = cpi->active_best_quality;
3635 /* Set an active best quality and if necessary active worst quality
3636 * There is some odd behavior for one pass here that needs attention.
3638 if ((cpi->pass == 2) || (cpi->ni_frames > 150)) {
3639 vpx_clear_system_state();
3641 Q = cpi->active_worst_quality;
3643 if (cm->frame_type == KEY_FRAME) {
3644 if (cpi->pass == 2) {
3645 if (cpi->gfu_boost > 600) {
3646 cpi->active_best_quality = kf_low_motion_minq[Q];
3648 cpi->active_best_quality = kf_high_motion_minq[Q];
3651 /* Special case for key frames forced because we have reached
3652 * the maximum key frame interval. Here force the Q to a range
3653 * based on the ambient Q to reduce the risk of popping
3655 if (cpi->this_key_frame_forced) {
3656 if (cpi->active_best_quality > cpi->avg_frame_qindex * 7 / 8) {
3657 cpi->active_best_quality = cpi->avg_frame_qindex * 7 / 8;
3658 } else if (cpi->active_best_quality<cpi->avg_frame_qindex>> 2) {
3659 cpi->active_best_quality = cpi->avg_frame_qindex >> 2;
3663 /* One pass more conservative */
3665 cpi->active_best_quality = kf_high_motion_minq[Q];
3669 else if (cpi->oxcf.number_of_layers == 1 &&
3670 (cm->refresh_golden_frame || cpi->common.refresh_alt_ref_frame)) {
3671 /* Use the lower of cpi->active_worst_quality and recent
3672 * average Q as basis for GF/ARF Q limit unless last frame was
3675 if ((cpi->frames_since_key > 1) &&
3676 (cpi->avg_frame_qindex < cpi->active_worst_quality)) {
3677 Q = cpi->avg_frame_qindex;
3680 /* For constrained quality dont allow Q less than the cq level */
3681 if ((cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
3682 (Q < cpi->cq_target_quality)) {
3683 Q = cpi->cq_target_quality;
3686 if (cpi->pass == 2) {
3687 if (cpi->gfu_boost > 1000) {
3688 cpi->active_best_quality = gf_low_motion_minq[Q];
3689 } else if (cpi->gfu_boost < 400) {
3690 cpi->active_best_quality = gf_high_motion_minq[Q];
3692 cpi->active_best_quality = gf_mid_motion_minq[Q];
3695 /* Constrained quality use slightly lower active best. */
3696 if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) {
3697 cpi->active_best_quality = cpi->active_best_quality * 15 / 16;
3700 /* One pass more conservative */
3702 cpi->active_best_quality = gf_high_motion_minq[Q];
3705 cpi->active_best_quality = inter_minq[Q];
3707 /* For the constant/constrained quality mode we dont want
3708 * q to fall below the cq level.
3710 if ((cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
3711 (cpi->active_best_quality < cpi->cq_target_quality)) {
3712 /* If we are strongly undershooting the target rate in the last
3713 * frames then use the user passed in cq value not the auto
3716 if (cpi->rolling_actual_bits < cpi->min_frame_bandwidth) {
3717 cpi->active_best_quality = cpi->oxcf.cq_level;
3719 cpi->active_best_quality = cpi->cq_target_quality;
3724 /* If CBR and the buffer is as full then it is reasonable to allow
3725 * higher quality on the frames to prevent bits just going to waste.
3727 if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) {
3728 /* Note that the use of >= here elliminates the risk of a devide
3729 * by 0 error in the else if clause
3731 if (cpi->buffer_level >= cpi->oxcf.maximum_buffer_size) {
3732 cpi->active_best_quality = cpi->best_quality;
3734 } else if (cpi->buffer_level > cpi->oxcf.optimal_buffer_level) {
3736 (int)(((cpi->buffer_level - cpi->oxcf.optimal_buffer_level) * 128) /
3737 (cpi->oxcf.maximum_buffer_size -
3738 cpi->oxcf.optimal_buffer_level));
3739 int min_qadjustment =
3740 ((cpi->active_best_quality - cpi->best_quality) * Fraction) / 128;
3742 cpi->active_best_quality -= min_qadjustment;
3746 /* Make sure constrained quality mode limits are adhered to for the first
3747 * few frames of one pass encodes
3749 else if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) {
3750 if ((cm->frame_type == KEY_FRAME) || cm->refresh_golden_frame ||
3751 cpi->common.refresh_alt_ref_frame) {
3752 cpi->active_best_quality = cpi->best_quality;
3753 } else if (cpi->active_best_quality < cpi->cq_target_quality) {
3754 cpi->active_best_quality = cpi->cq_target_quality;
3758 /* Clip the active best and worst quality values to limits */
3759 if (cpi->active_worst_quality > cpi->worst_quality) {
3760 cpi->active_worst_quality = cpi->worst_quality;
3763 if (cpi->active_best_quality < cpi->best_quality) {
3764 cpi->active_best_quality = cpi->best_quality;
3767 if (cpi->active_worst_quality < cpi->active_best_quality) {
3768 cpi->active_worst_quality = cpi->active_best_quality;
3771 /* Determine initial Q to try */
3772 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
3774 #if !CONFIG_REALTIME_ONLY
3776 /* Set highest allowed value for Zbin over quant */
3777 if (cm->frame_type == KEY_FRAME) {
3779 } else if ((cpi->oxcf.number_of_layers == 1) &&
3780 ((cm->refresh_alt_ref_frame ||
3781 (cm->refresh_golden_frame && !cpi->source_alt_ref_active)))) {
3784 zbin_oq_high = ZBIN_OQ_MAX;
3788 /* Setup background Q adjustment for error resilient mode.
3789 * For multi-layer encodes only enable this for the base layer.
3791 if (cpi->cyclic_refresh_mode_enabled) {
3792 // Special case for screen_content_mode with golden frame updates.
3794 (cpi->oxcf.screen_content_mode == 2 && cm->refresh_golden_frame);
3795 if (cpi->current_layer == 0 && cpi->force_maxqp == 0 && !disable_cr_gf) {
3796 cyclic_background_refresh(cpi, Q, 0);
3798 disable_segmentation(cpi);
3802 vp8_compute_frame_size_bounds(cpi, &frame_under_shoot_limit,
3803 &frame_over_shoot_limit);
3805 #if !CONFIG_REALTIME_ONLY
3806 /* Limit Q range for the adaptive loop. */
3807 bottom_index = cpi->active_best_quality;
3808 top_index = cpi->active_worst_quality;
3809 q_low = cpi->active_best_quality;
3810 q_high = cpi->active_worst_quality;
3813 vp8_save_coding_context(cpi);
3817 scale_and_extend_source(cpi->un_scaled_source, cpi);
3819 #if CONFIG_TEMPORAL_DENOISING && CONFIG_POSTPROC
3820 // Option to apply spatial blur under the aggressive or adaptive
3821 // (temporal denoising) mode.
3822 if (cpi->oxcf.noise_sensitivity >= 3) {
3823 if (cpi->denoiser.denoise_pars.spatial_blur != 0) {
3824 vp8_de_noise(cm, cpi->Source, cpi->Source,
3825 cpi->denoiser.denoise_pars.spatial_blur, 1, 0, 0);
3830 #if !(CONFIG_REALTIME_ONLY) && CONFIG_POSTPROC && !(CONFIG_TEMPORAL_DENOISING)
3832 if (cpi->oxcf.noise_sensitivity > 0) {
3836 switch (cpi->oxcf.noise_sensitivity) {
3837 case 1: l = 20; break;
3838 case 2: l = 40; break;
3839 case 3: l = 60; break;
3840 case 4: l = 80; break;
3841 case 5: l = 100; break;
3842 case 6: l = 150; break;
3845 if (cm->frame_type == KEY_FRAME) {
3846 vp8_de_noise(cm, cpi->Source, cpi->Source, l, 1, 0, 1);
3848 vp8_de_noise(cm, cpi->Source, cpi->Source, l, 1, 0, 1);
3850 src = cpi->Source->y_buffer;
3852 if (cpi->Source->y_stride < 0) {
3853 src += cpi->Source->y_stride * (cpi->Source->y_height - 1);
3860 #ifdef OUTPUT_YUV_SRC
3861 vp8_write_yuv_frame(yuv_file, cpi->Source);
3865 vpx_clear_system_state();
3867 vp8_set_quantizer(cpi, Q);
3869 /* setup skip prob for costing in mode/mv decision */
3870 if (cpi->common.mb_no_coeff_skip) {
3871 cpi->prob_skip_false = cpi->base_skip_false_prob[Q];
3873 if (cm->frame_type != KEY_FRAME) {
3874 if (cpi->common.refresh_alt_ref_frame) {
3875 if (cpi->last_skip_false_probs[2] != 0) {
3876 cpi->prob_skip_false = cpi->last_skip_false_probs[2];
3880 if(cpi->last_skip_false_probs[2]!=0 && abs(Q-
3881 cpi->last_skip_probs_q[2])<=16 )
3882 cpi->prob_skip_false = cpi->last_skip_false_probs[2];
3883 else if (cpi->last_skip_false_probs[2]!=0)
3884 cpi->prob_skip_false = (cpi->last_skip_false_probs[2] +
3885 cpi->prob_skip_false ) / 2;
3887 } else if (cpi->common.refresh_golden_frame) {
3888 if (cpi->last_skip_false_probs[1] != 0) {
3889 cpi->prob_skip_false = cpi->last_skip_false_probs[1];
3893 if(cpi->last_skip_false_probs[1]!=0 && abs(Q-
3894 cpi->last_skip_probs_q[1])<=16 )
3895 cpi->prob_skip_false = cpi->last_skip_false_probs[1];
3896 else if (cpi->last_skip_false_probs[1]!=0)
3897 cpi->prob_skip_false = (cpi->last_skip_false_probs[1] +
3898 cpi->prob_skip_false ) / 2;
3901 if (cpi->last_skip_false_probs[0] != 0) {
3902 cpi->prob_skip_false = cpi->last_skip_false_probs[0];
3906 if(cpi->last_skip_false_probs[0]!=0 && abs(Q-
3907 cpi->last_skip_probs_q[0])<=16 )
3908 cpi->prob_skip_false = cpi->last_skip_false_probs[0];
3909 else if(cpi->last_skip_false_probs[0]!=0)
3910 cpi->prob_skip_false = (cpi->last_skip_false_probs[0] +
3911 cpi->prob_skip_false ) / 2;
3915 /* as this is for cost estimate, let's make sure it does not
3916 * go extreme eitehr way
3918 if (cpi->prob_skip_false < 5) cpi->prob_skip_false = 5;
3920 if (cpi->prob_skip_false > 250) cpi->prob_skip_false = 250;
3922 if (cpi->oxcf.number_of_layers == 1 && cpi->is_src_frame_alt_ref) {
3923 cpi->prob_skip_false = 1;
3931 FILE *f = fopen("skip.stt", "a");
3932 fprintf(f, "%d, %d, %4d ", cpi->common.refresh_golden_frame, cpi->common.refresh_alt_ref_frame, cpi->prob_skip_false);
3939 if (cm->frame_type == KEY_FRAME) {
3940 if (resize_key_frame(cpi)) {
3941 /* If the frame size has changed, need to reset Q, quantizer,
3942 * and background refresh.
3944 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
3945 if (cpi->cyclic_refresh_mode_enabled) {
3946 if (cpi->current_layer == 0) {
3947 cyclic_background_refresh(cpi, Q, 0);
3949 disable_segmentation(cpi);
3952 // Reset the zero_last counter to 0 on key frame.
3953 memset(cpi->consec_zero_last, 0, cm->mb_rows * cm->mb_cols);
3954 memset(cpi->consec_zero_last_mvbias, 0,
3955 (cpi->common.mb_rows * cpi->common.mb_cols));
3956 vp8_set_quantizer(cpi, Q);
3959 vp8_setup_key_frame(cpi);
3962 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
3964 if (cpi->oxcf.error_resilient_mode) cm->refresh_entropy_probs = 0;
3966 if (cpi->oxcf.error_resilient_mode & VPX_ERROR_RESILIENT_PARTITIONS) {
3967 if (cm->frame_type == KEY_FRAME) cm->refresh_entropy_probs = 1;
3970 if (cm->refresh_entropy_probs == 0) {
3971 /* save a copy for later refresh */
3972 memcpy(&cm->lfc, &cm->fc, sizeof(cm->fc));
3975 vp8_update_coef_context(cpi);
3977 vp8_update_coef_probs(cpi);
3979 /* transform / motion compensation build reconstruction frame
3980 * +pack coef partitions
3982 vp8_encode_frame(cpi);
3984 /* cpi->projected_frame_size is not needed for RT mode */
3987 /* transform / motion compensation build reconstruction frame */
3988 vp8_encode_frame(cpi);
3989 if (cpi->oxcf.screen_content_mode == 2) {
3990 if (vp8_drop_encodedframe_overshoot(cpi, Q)) return;
3993 cpi->projected_frame_size -= vp8_estimate_entropy_savings(cpi);
3994 cpi->projected_frame_size =
3995 (cpi->projected_frame_size > 0) ? cpi->projected_frame_size : 0;
3997 vpx_clear_system_state();
3999 /* Test to see if the stats generated for this frame indicate that
4000 * we should have coded a key frame (assuming that we didn't)!
4003 if (cpi->pass != 2 && cpi->oxcf.auto_key && cm->frame_type != KEY_FRAME &&
4004 cpi->compressor_speed != 2) {
4005 #if !CONFIG_REALTIME_ONLY
4006 if (decide_key_frame(cpi)) {
4007 /* Reset all our sizing numbers and recode */
4008 cm->frame_type = KEY_FRAME;
4010 vp8_pick_frame_size(cpi);
4012 /* Clear the Alt reference frame active flag when we have
4015 cpi->source_alt_ref_active = 0;
4017 // Set the loop filter deltas and segmentation map update
4018 setup_features(cpi);
4020 vp8_restore_coding_context(cpi);
4022 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4024 vp8_compute_frame_size_bounds(cpi, &frame_under_shoot_limit,
4025 &frame_over_shoot_limit);
4027 /* Limit Q range for the adaptive loop. */
4028 bottom_index = cpi->active_best_quality;
4029 top_index = cpi->active_worst_quality;
4030 q_low = cpi->active_best_quality;
4031 q_high = cpi->active_worst_quality;
4041 vpx_clear_system_state();
4043 if (frame_over_shoot_limit == 0) frame_over_shoot_limit = 1;
4045 /* Are we are overshooting and up against the limit of active max Q. */
4046 if (((cpi->pass != 2) ||
4047 (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)) &&
4048 (Q == cpi->active_worst_quality) &&
4049 (cpi->active_worst_quality < cpi->worst_quality) &&
4050 (cpi->projected_frame_size > frame_over_shoot_limit)) {
4051 int over_size_percent =
4052 ((cpi->projected_frame_size - frame_over_shoot_limit) * 100) /
4053 frame_over_shoot_limit;
4055 /* If so is there any scope for relaxing it */
4056 while ((cpi->active_worst_quality < cpi->worst_quality) &&
4057 (over_size_percent > 0)) {
4058 cpi->active_worst_quality++;
4059 /* Assume 1 qstep = about 4% on frame size. */
4060 over_size_percent = (int)(over_size_percent * 0.96);
4062 #if !CONFIG_REALTIME_ONLY
4063 top_index = cpi->active_worst_quality;
4064 #endif // !CONFIG_REALTIME_ONLY
4065 /* If we have updated the active max Q do not call
4066 * vp8_update_rate_correction_factors() this loop.
4068 active_worst_qchanged = 1;
4070 active_worst_qchanged = 0;
4073 #if CONFIG_REALTIME_ONLY
4076 /* Special case handling for forced key frames */
4077 if ((cm->frame_type == KEY_FRAME) && cpi->this_key_frame_forced) {
4079 int kf_err = vp8_calc_ss_err(cpi->Source, &cm->yv12_fb[cm->new_fb_idx]);
4081 /* The key frame is not good enough */
4082 if (kf_err > ((cpi->ambient_err * 7) >> 3)) {
4084 q_high = (Q > q_low) ? (Q - 1) : q_low;
4087 Q = (q_high + q_low) >> 1;
4089 /* The key frame is much better than the previous frame */
4090 else if (kf_err < (cpi->ambient_err >> 1)) {
4092 q_low = (Q < q_high) ? (Q + 1) : q_high;
4095 Q = (q_high + q_low + 1) >> 1;
4098 /* Clamp Q to upper and lower limits: */
4101 } else if (Q < q_low) {
4108 /* Is the projected frame size out of range and are we allowed
4109 * to attempt to recode.
4111 else if (recode_loop_test(cpi, frame_over_shoot_limit,
4112 frame_under_shoot_limit, Q, top_index,
4117 /* Frame size out of permitted range. Update correction factor
4118 * & compute new Q to try...
4121 /* Frame is too large */
4122 if (cpi->projected_frame_size > cpi->this_frame_target) {
4123 /* Raise Qlow as to at least the current value */
4124 q_low = (Q < q_high) ? (Q + 1) : q_high;
4126 /* If we are using over quant do the same for zbin_oq_low */
4127 if (cpi->mb.zbin_over_quant > 0) {
4128 zbin_oq_low = (cpi->mb.zbin_over_quant < zbin_oq_high)
4129 ? (cpi->mb.zbin_over_quant + 1)
4133 if (undershoot_seen) {
4134 /* Update rate_correction_factor unless
4135 * cpi->active_worst_quality has changed.
4137 if (!active_worst_qchanged) {
4138 vp8_update_rate_correction_factors(cpi, 1);
4141 Q = (q_high + q_low + 1) / 2;
4143 /* Adjust cpi->zbin_over_quant (only allowed when Q
4147 cpi->mb.zbin_over_quant = 0;
4149 zbin_oq_low = (cpi->mb.zbin_over_quant < zbin_oq_high)
4150 ? (cpi->mb.zbin_over_quant + 1)
4152 cpi->mb.zbin_over_quant = (zbin_oq_high + zbin_oq_low) / 2;
4155 /* Update rate_correction_factor unless
4156 * cpi->active_worst_quality has changed.
4158 if (!active_worst_qchanged) {
4159 vp8_update_rate_correction_factors(cpi, 0);
4162 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4164 while (((Q < q_low) || (cpi->mb.zbin_over_quant < zbin_oq_low)) &&
4166 vp8_update_rate_correction_factors(cpi, 0);
4167 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4174 /* Frame is too small */
4176 if (cpi->mb.zbin_over_quant == 0) {
4177 /* Lower q_high if not using over quant */
4178 q_high = (Q > q_low) ? (Q - 1) : q_low;
4180 /* else lower zbin_oq_high */
4181 zbin_oq_high = (cpi->mb.zbin_over_quant > zbin_oq_low)
4182 ? (cpi->mb.zbin_over_quant - 1)
4186 if (overshoot_seen) {
4187 /* Update rate_correction_factor unless
4188 * cpi->active_worst_quality has changed.
4190 if (!active_worst_qchanged) {
4191 vp8_update_rate_correction_factors(cpi, 1);
4194 Q = (q_high + q_low) / 2;
4196 /* Adjust cpi->zbin_over_quant (only allowed when Q
4200 cpi->mb.zbin_over_quant = 0;
4202 cpi->mb.zbin_over_quant = (zbin_oq_high + zbin_oq_low) / 2;
4205 /* Update rate_correction_factor unless
4206 * cpi->active_worst_quality has changed.
4208 if (!active_worst_qchanged) {
4209 vp8_update_rate_correction_factors(cpi, 0);
4212 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4214 /* Special case reset for qlow for constrained quality.
4215 * This should only trigger where there is very substantial
4216 * undershoot on a frame and the auto cq level is above
4217 * the user passsed in value.
4219 if ((cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
4224 while (((Q > q_high) || (cpi->mb.zbin_over_quant > zbin_oq_high)) &&
4226 vp8_update_rate_correction_factors(cpi, 0);
4227 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4232 undershoot_seen = 1;
4235 /* Clamp Q to upper and lower limits: */
4238 } else if (Q < q_low) {
4242 /* Clamp cpi->zbin_over_quant */
4243 cpi->mb.zbin_over_quant = (cpi->mb.zbin_over_quant < zbin_oq_low)
4245 : (cpi->mb.zbin_over_quant > zbin_oq_high)
4247 : cpi->mb.zbin_over_quant;
4253 #endif // CONFIG_REALTIME_ONLY
4255 if (cpi->is_src_frame_alt_ref) Loop = 0;
4258 vp8_restore_coding_context(cpi);
4260 #if CONFIG_INTERNAL_STATS
4261 cpi->tot_recode_hits++;
4264 } while (Loop == 1);
4266 #if defined(DROP_UNCODED_FRAMES)
4267 /* if there are no coded macroblocks at all drop this frame */
4268 if (cpi->common.MBs == cpi->mb.skip_true_count &&
4269 (cpi->drop_frame_count & 7) != 7 && cm->frame_type != KEY_FRAME) {
4270 cpi->common.current_video_frame++;
4271 cpi->frames_since_key++;
4272 cpi->drop_frame_count++;
4273 // We advance the temporal pattern for dropped frames.
4274 cpi->temporal_pattern_counter++;
4277 cpi->drop_frame_count = 0;
4281 /* Experimental code for lagged and one pass
4282 * Update stats used for one pass GF selection
4285 cpi->one_pass_frame_stats[cpi->one_pass_frame_index].frame_coded_error = (double)cpi->prediction_error;
4286 cpi->one_pass_frame_stats[cpi->one_pass_frame_index].frame_intra_error = (double)cpi->intra_error;
4287 cpi->one_pass_frame_stats[cpi->one_pass_frame_index].frame_pcnt_inter = (double)(100 - cpi->this_frame_percent_intra) / 100.0;
4291 /* Special case code to reduce pulsing when key frames are forced at a
4292 * fixed interval. Note the reconstruction error if it is the frame before
4293 * the force key frame
4295 if (cpi->next_key_frame_forced && (cpi->twopass.frames_to_key == 0)) {
4297 vp8_calc_ss_err(cpi->Source, &cm->yv12_fb[cm->new_fb_idx]);
4300 /* This frame's MVs are saved and will be used in next frame's MV predictor.
4301 * Last frame has one more line(add to bottom) and one more column(add to
4302 * right) than cm->mip. The edge elements are initialized to 0.
4304 #if CONFIG_MULTI_RES_ENCODING
4305 if (!cpi->oxcf.mr_encoder_id && cm->show_frame)
4307 if (cm->show_frame) /* do not save for altref frame */
4312 /* Point to beginning of allocated MODE_INFO arrays. */
4313 MODE_INFO *tmp = cm->mip;
4315 if (cm->frame_type != KEY_FRAME) {
4316 for (mb_row = 0; mb_row < cm->mb_rows + 1; ++mb_row) {
4317 for (mb_col = 0; mb_col < cm->mb_cols + 1; ++mb_col) {
4318 if (tmp->mbmi.ref_frame != INTRA_FRAME) {
4319 cpi->lfmv[mb_col + mb_row * (cm->mode_info_stride + 1)].as_int =
4320 tmp->mbmi.mv.as_int;
4323 cpi->lf_ref_frame_sign_bias[mb_col +
4324 mb_row * (cm->mode_info_stride + 1)] =
4325 cm->ref_frame_sign_bias[tmp->mbmi.ref_frame];
4326 cpi->lf_ref_frame[mb_col + mb_row * (cm->mode_info_stride + 1)] =
4327 tmp->mbmi.ref_frame;
4334 /* Count last ref frame 0,0 usage on current encoded frame. */
4338 /* Point to beginning of MODE_INFO arrays. */
4339 MODE_INFO *tmp = cm->mi;
4341 cpi->zeromv_count = 0;
4343 if (cm->frame_type != KEY_FRAME) {
4344 for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) {
4345 for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) {
4346 if (tmp->mbmi.mode == ZEROMV && tmp->mbmi.ref_frame == LAST_FRAME) {
4347 cpi->zeromv_count++;
4356 #if CONFIG_MULTI_RES_ENCODING
4357 vp8_cal_dissimilarity(cpi);
4360 /* Update the GF useage maps.
4361 * This is done after completing the compression of a frame when all
4362 * modes etc. are finalized but before loop filter
4364 if (cpi->oxcf.number_of_layers == 1) {
4365 vp8_update_gf_useage_maps(cpi, cm, &cpi->mb);
4368 if (cm->frame_type == KEY_FRAME) cm->refresh_last_frame = 1;
4372 FILE *f = fopen("gfactive.stt", "a");
4373 fprintf(f, "%8d %8d %8d %8d %8d\n", cm->current_video_frame, (100 * cpi->gf_active_count) / (cpi->common.mb_rows * cpi->common.mb_cols), cpi->this_iiratio, cpi->next_iiratio, cm->refresh_golden_frame);
4378 /* For inter frames the current default behavior is that when
4379 * cm->refresh_golden_frame is set we copy the old GF over to the ARF buffer
4380 * This is purely an encoder decision at present.
4382 if (!cpi->oxcf.error_resilient_mode && cm->refresh_golden_frame) {
4383 cm->copy_buffer_to_arf = 2;
4385 cm->copy_buffer_to_arf = 0;
4388 cm->frame_to_show = &cm->yv12_fb[cm->new_fb_idx];
4390 #if CONFIG_TEMPORAL_DENOISING
4391 // Get some measure of the amount of noise, by measuring the (partial) mse
4392 // between source and denoised buffer, for y channel. Partial refers to
4393 // computing the sse for a sub-sample of the frame (i.e., skip x blocks along
4395 // and only for blocks in that set that are consecutive ZEROMV_LAST mode.
4396 // Do this every ~8 frames, to further reduce complexity.
4397 // TODO(marpan): Keep this for now for the case cpi->oxcf.noise_sensitivity <
4399 // should be removed in favor of the process_denoiser_mode_change() function
4401 if (cpi->oxcf.noise_sensitivity > 0 && cpi->oxcf.noise_sensitivity < 4 &&
4402 !cpi->oxcf.screen_content_mode && cpi->frames_since_key % 8 == 0 &&
4403 cm->frame_type != KEY_FRAME) {
4404 cpi->mse_source_denoised = measure_square_diff_partial(
4405 &cpi->denoiser.yv12_running_avg[INTRA_FRAME], cpi->Source, cpi);
4408 // For the adaptive denoising mode (noise_sensitivity == 4), sample the mse
4409 // of source diff (between current and previous frame), and determine if we
4410 // should switch the denoiser mode. Sampling refers to computing the mse for
4411 // a sub-sample of the frame (i.e., skip x blocks along row/column), and
4412 // only for blocks in that set that have used ZEROMV LAST, along with some
4413 // constraint on the sum diff between blocks. This process is called every
4414 // ~8 frames, to further reduce complexity.
4415 if (cpi->oxcf.noise_sensitivity == 4 && !cpi->oxcf.screen_content_mode &&
4416 cpi->frames_since_key % 8 == 0 && cm->frame_type != KEY_FRAME) {
4417 process_denoiser_mode_change(cpi);
4421 #if CONFIG_MULTITHREAD
4422 if (cpi->b_multi_threaded) {
4423 /* start loopfilter in separate thread */
4424 sem_post(&cpi->h_event_start_lpf);
4425 cpi->b_lpf_running = 1;
4429 vp8_loopfilter_frame(cpi, cm);
4432 update_reference_frames(cpi);
4434 #ifdef OUTPUT_YUV_DENOISED
4435 vp8_write_yuv_frame(yuv_denoised_file,
4436 &cpi->denoiser.yv12_running_avg[INTRA_FRAME]);
4439 #if !(CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
4440 if (cpi->oxcf.error_resilient_mode) {
4441 cm->refresh_entropy_probs = 0;
4445 #if CONFIG_MULTITHREAD
4446 /* wait that filter_level is picked so that we can continue with stream
4448 if (cpi->b_multi_threaded) sem_wait(&cpi->h_event_end_lpf);
4451 /* build the bitstream */
4452 vp8_pack_bitstream(cpi, dest, dest_end, size);
4454 /* Move storing frame_type out of the above loop since it is also
4455 * needed in motion search besides loopfilter */
4456 cm->last_frame_type = cm->frame_type;
4458 /* Update rate control heuristics */
4459 cpi->total_byte_count += (*size);
4460 cpi->projected_frame_size = (int)(*size) << 3;
4462 if (cpi->oxcf.number_of_layers > 1) {
4464 for (i = cpi->current_layer + 1; i < cpi->oxcf.number_of_layers; ++i) {
4465 cpi->layer_context[i].total_byte_count += (*size);
4469 if (!active_worst_qchanged) vp8_update_rate_correction_factors(cpi, 2);
4471 cpi->last_q[cm->frame_type] = cm->base_qindex;
4473 if (cm->frame_type == KEY_FRAME) {
4474 vp8_adjust_key_frame_context(cpi);
4477 /* Keep a record of ambient average Q. */
4478 if (cm->frame_type != KEY_FRAME) {
4479 cpi->avg_frame_qindex =
4480 (2 + 3 * cpi->avg_frame_qindex + cm->base_qindex) >> 2;
4483 /* Keep a record from which we can calculate the average Q excluding
4484 * GF updates and key frames
4486 if ((cm->frame_type != KEY_FRAME) &&
4487 ((cpi->oxcf.number_of_layers > 1) ||
4488 (!cm->refresh_golden_frame && !cm->refresh_alt_ref_frame))) {
4491 /* Calculate the average Q for normal inter frames (not key or GFU
4494 if (cpi->pass == 2) {
4495 cpi->ni_tot_qi += Q;
4496 cpi->ni_av_qi = (cpi->ni_tot_qi / cpi->ni_frames);
4498 /* Damp value for first few frames */
4499 if (cpi->ni_frames > 150) {
4500 cpi->ni_tot_qi += Q;
4501 cpi->ni_av_qi = (cpi->ni_tot_qi / cpi->ni_frames);
4503 /* For one pass, early in the clip ... average the current frame Q
4504 * value with the worstq entered by the user as a dampening measure
4507 cpi->ni_tot_qi += Q;
4509 ((cpi->ni_tot_qi / cpi->ni_frames) + cpi->worst_quality + 1) / 2;
4512 /* If the average Q is higher than what was used in the last
4513 * frame (after going through the recode loop to keep the frame
4514 * size within range) then use the last frame value - 1. The -1
4515 * is designed to stop Q and hence the data rate, from
4516 * progressively falling away during difficult sections, but at
4517 * the same time reduce the number of itterations around the
4520 if (Q > cpi->ni_av_qi) cpi->ni_av_qi = Q - 1;
4524 /* Update the buffer level variable. */
4525 /* Non-viewable frames are a special case and are treated as pure overhead. */
4526 if (!cm->show_frame) {
4527 cpi->bits_off_target -= cpi->projected_frame_size;
4529 cpi->bits_off_target +=
4530 cpi->av_per_frame_bandwidth - cpi->projected_frame_size;
4533 /* Clip the buffer level to the maximum specified buffer size */
4534 if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size) {
4535 cpi->bits_off_target = cpi->oxcf.maximum_buffer_size;
4538 // If the frame dropper is not enabled, don't let the buffer level go below
4539 // some threshold, given here by -|maximum_buffer_size|. For now we only do
4540 // this for screen content input.
4541 if (cpi->drop_frames_allowed == 0 && cpi->oxcf.screen_content_mode &&
4542 cpi->bits_off_target < -cpi->oxcf.maximum_buffer_size) {
4543 cpi->bits_off_target = -cpi->oxcf.maximum_buffer_size;
4546 /* Rolling monitors of whether we are over or underspending used to
4547 * help regulate min and Max Q in two pass.
4549 cpi->rolling_target_bits =
4550 ((cpi->rolling_target_bits * 3) + cpi->this_frame_target + 2) / 4;
4551 cpi->rolling_actual_bits =
4552 ((cpi->rolling_actual_bits * 3) + cpi->projected_frame_size + 2) / 4;
4553 cpi->long_rolling_target_bits =
4554 ((cpi->long_rolling_target_bits * 31) + cpi->this_frame_target + 16) / 32;
4555 cpi->long_rolling_actual_bits =
4556 ((cpi->long_rolling_actual_bits * 31) + cpi->projected_frame_size + 16) /
4559 /* Actual bits spent */
4560 cpi->total_actual_bits += cpi->projected_frame_size;
4563 cpi->total_target_vs_actual +=
4564 (cpi->this_frame_target - cpi->projected_frame_size);
4566 cpi->buffer_level = cpi->bits_off_target;
4568 /* Propagate values to higher temporal layers */
4569 if (cpi->oxcf.number_of_layers > 1) {
4572 for (i = cpi->current_layer + 1; i < cpi->oxcf.number_of_layers; ++i) {
4573 LAYER_CONTEXT *lc = &cpi->layer_context[i];
4574 int bits_off_for_this_layer = (int)(lc->target_bandwidth / lc->framerate -
4575 cpi->projected_frame_size);
4577 lc->bits_off_target += bits_off_for_this_layer;
4579 /* Clip buffer level to maximum buffer size for the layer */
4580 if (lc->bits_off_target > lc->maximum_buffer_size) {
4581 lc->bits_off_target = lc->maximum_buffer_size;
4584 lc->total_actual_bits += cpi->projected_frame_size;
4585 lc->total_target_vs_actual += bits_off_for_this_layer;
4586 lc->buffer_level = lc->bits_off_target;
4590 /* Update bits left to the kf and gf groups to account for overshoot
4591 * or undershoot on these frames
4593 if (cm->frame_type == KEY_FRAME) {
4594 cpi->twopass.kf_group_bits +=
4595 cpi->this_frame_target - cpi->projected_frame_size;
4597 if (cpi->twopass.kf_group_bits < 0) cpi->twopass.kf_group_bits = 0;
4598 } else if (cm->refresh_golden_frame || cm->refresh_alt_ref_frame) {
4599 cpi->twopass.gf_group_bits +=
4600 cpi->this_frame_target - cpi->projected_frame_size;
4602 if (cpi->twopass.gf_group_bits < 0) cpi->twopass.gf_group_bits = 0;
4605 if (cm->frame_type != KEY_FRAME) {
4606 if (cpi->common.refresh_alt_ref_frame) {
4607 cpi->last_skip_false_probs[2] = cpi->prob_skip_false;
4608 cpi->last_skip_probs_q[2] = cm->base_qindex;
4609 } else if (cpi->common.refresh_golden_frame) {
4610 cpi->last_skip_false_probs[1] = cpi->prob_skip_false;
4611 cpi->last_skip_probs_q[1] = cm->base_qindex;
4613 cpi->last_skip_false_probs[0] = cpi->prob_skip_false;
4614 cpi->last_skip_probs_q[0] = cm->base_qindex;
4616 /* update the baseline */
4617 cpi->base_skip_false_prob[cm->base_qindex] = cpi->prob_skip_false;
4621 #if 0 && CONFIG_INTERNAL_STATS
4623 FILE *f = fopen("tmp.stt", "a");
4625 vpx_clear_system_state();
4627 if (cpi->twopass.total_left_stats.coded_error != 0.0)
4628 fprintf(f, "%10d %10d %10d %10d %10d %10"PRId64" %10"PRId64
4629 "%10"PRId64" %10d %6d %6d %6d %6d %5d %5d %5d %8d "
4630 "%8.2lf %"PRId64" %10.3lf %10"PRId64" %8d\n",
4631 cpi->common.current_video_frame, cpi->this_frame_target,
4632 cpi->projected_frame_size,
4633 (cpi->projected_frame_size - cpi->this_frame_target),
4634 cpi->total_target_vs_actual,
4636 (cpi->oxcf.starting_buffer_level-cpi->bits_off_target),
4637 cpi->total_actual_bits, cm->base_qindex,
4638 cpi->active_best_quality, cpi->active_worst_quality,
4639 cpi->ni_av_qi, cpi->cq_target_quality,
4640 cm->refresh_golden_frame, cm->refresh_alt_ref_frame,
4641 cm->frame_type, cpi->gfu_boost,
4642 cpi->twopass.est_max_qcorrection_factor,
4643 cpi->twopass.bits_left,
4644 cpi->twopass.total_left_stats.coded_error,
4645 (double)cpi->twopass.bits_left /
4646 cpi->twopass.total_left_stats.coded_error,
4647 cpi->tot_recode_hits);
4649 fprintf(f, "%10d %10d %10d %10d %10d %10"PRId64" %10"PRId64
4650 "%10"PRId64" %10d %6d %6d %6d %6d %5d %5d %5d %8d "
4651 "%8.2lf %"PRId64" %10.3lf %8d\n",
4652 cpi->common.current_video_frame, cpi->this_frame_target,
4653 cpi->projected_frame_size,
4654 (cpi->projected_frame_size - cpi->this_frame_target),
4655 cpi->total_target_vs_actual,
4657 (cpi->oxcf.starting_buffer_level-cpi->bits_off_target),
4658 cpi->total_actual_bits, cm->base_qindex,
4659 cpi->active_best_quality, cpi->active_worst_quality,
4660 cpi->ni_av_qi, cpi->cq_target_quality,
4661 cm->refresh_golden_frame, cm->refresh_alt_ref_frame,
4662 cm->frame_type, cpi->gfu_boost,
4663 cpi->twopass.est_max_qcorrection_factor,
4664 cpi->twopass.bits_left,
4665 cpi->twopass.total_left_stats.coded_error,
4666 cpi->tot_recode_hits);
4671 FILE *fmodes = fopen("Modes.stt", "a");
4673 fprintf(fmodes, "%6d:%1d:%1d:%1d ",
4674 cpi->common.current_video_frame,
4675 cm->frame_type, cm->refresh_golden_frame,
4676 cm->refresh_alt_ref_frame);
4678 fprintf(fmodes, "\n");
4686 if (cm->refresh_golden_frame == 1) {
4687 cm->frame_flags = cm->frame_flags | FRAMEFLAGS_GOLDEN;
4689 cm->frame_flags = cm->frame_flags & ~FRAMEFLAGS_GOLDEN;
4692 if (cm->refresh_alt_ref_frame == 1) {
4693 cm->frame_flags = cm->frame_flags | FRAMEFLAGS_ALTREF;
4695 cm->frame_flags = cm->frame_flags & ~FRAMEFLAGS_ALTREF;
4698 if (cm->refresh_last_frame & cm->refresh_golden_frame) { /* both refreshed */
4699 cpi->gold_is_last = 1;
4700 } else if (cm->refresh_last_frame ^ cm->refresh_golden_frame) {
4701 /* 1 refreshed but not the other */
4702 cpi->gold_is_last = 0;
4705 if (cm->refresh_last_frame & cm->refresh_alt_ref_frame) { /* both refreshed */
4706 cpi->alt_is_last = 1;
4707 } else if (cm->refresh_last_frame ^ cm->refresh_alt_ref_frame) {
4708 /* 1 refreshed but not the other */
4709 cpi->alt_is_last = 0;
4712 if (cm->refresh_alt_ref_frame &
4713 cm->refresh_golden_frame) { /* both refreshed */
4714 cpi->gold_is_alt = 1;
4715 } else if (cm->refresh_alt_ref_frame ^ cm->refresh_golden_frame) {
4716 /* 1 refreshed but not the other */
4717 cpi->gold_is_alt = 0;
4720 cpi->ref_frame_flags = VP8_ALTR_FRAME | VP8_GOLD_FRAME | VP8_LAST_FRAME;
4722 if (cpi->gold_is_last) cpi->ref_frame_flags &= ~VP8_GOLD_FRAME;
4724 if (cpi->alt_is_last) cpi->ref_frame_flags &= ~VP8_ALTR_FRAME;
4726 if (cpi->gold_is_alt) cpi->ref_frame_flags &= ~VP8_ALTR_FRAME;
4728 if (!cpi->oxcf.error_resilient_mode) {
4729 if (cpi->oxcf.play_alternate && cm->refresh_alt_ref_frame &&
4730 (cm->frame_type != KEY_FRAME)) {
4731 /* Update the alternate reference frame stats as appropriate. */
4732 update_alt_ref_frame_stats(cpi);
4734 /* Update the Golden frame stats as appropriate. */
4735 update_golden_frame_stats(cpi);
4739 if (cm->frame_type == KEY_FRAME) {
4740 /* Tell the caller that the frame was coded as a key frame */
4741 *frame_flags = cm->frame_flags | FRAMEFLAGS_KEY;
4743 /* As this frame is a key frame the next defaults to an inter frame. */
4744 cm->frame_type = INTER_FRAME;
4746 cpi->last_frame_percent_intra = 100;
4748 *frame_flags = cm->frame_flags & ~FRAMEFLAGS_KEY;
4750 cpi->last_frame_percent_intra = cpi->this_frame_percent_intra;
4753 /* Clear the one shot update flags for segmentation map and mode/ref
4754 * loop filter deltas.
4756 cpi->mb.e_mbd.update_mb_segmentation_map = 0;
4757 cpi->mb.e_mbd.update_mb_segmentation_data = 0;
4758 cpi->mb.e_mbd.mode_ref_lf_delta_update = 0;
4760 /* Dont increment frame counters if this was an altref buffer update
4763 if (cm->show_frame) {
4764 cm->current_video_frame++;
4765 cpi->frames_since_key++;
4766 cpi->temporal_pattern_counter++;
4769 /* reset to normal state now that we are done. */
4775 sprintf(filename, "enc%04d.yuv", (int) cm->current_video_frame);
4776 recon_file = fopen(filename, "wb");
4777 fwrite(cm->yv12_fb[cm->lst_fb_idx].buffer_alloc,
4778 cm->yv12_fb[cm->lst_fb_idx].frame_size, 1, recon_file);
4784 /* vp8_write_yuv_frame("encoder_recon.yuv", cm->frame_to_show); */
4786 #if !CONFIG_REALTIME_ONLY
4787 static void Pass2Encode(VP8_COMP *cpi, size_t *size, unsigned char *dest,
4788 unsigned char *dest_end, unsigned int *frame_flags) {
4789 if (!cpi->common.refresh_alt_ref_frame) vp8_second_pass(cpi);
4791 encode_frame_to_data_rate(cpi, size, dest, dest_end, frame_flags);
4792 cpi->twopass.bits_left -= 8 * (int)(*size);
4794 if (!cpi->common.refresh_alt_ref_frame) {
4795 double two_pass_min_rate =
4796 (double)(cpi->oxcf.target_bandwidth *
4797 cpi->oxcf.two_pass_vbrmin_section / 100);
4798 cpi->twopass.bits_left += (int64_t)(two_pass_min_rate / cpi->framerate);
4803 int vp8_receive_raw_frame(VP8_COMP *cpi, unsigned int frame_flags,
4804 YV12_BUFFER_CONFIG *sd, int64_t time_stamp,
4806 struct vpx_usec_timer timer;
4809 vpx_usec_timer_start(&timer);
4811 /* Reinit the lookahead buffer if the frame size changes */
4812 if (sd->y_width != cpi->oxcf.Width || sd->y_height != cpi->oxcf.Height) {
4813 assert(cpi->oxcf.lag_in_frames < 2);
4814 dealloc_raw_frame_buffers(cpi);
4815 alloc_raw_frame_buffers(cpi);
4818 if (vp8_lookahead_push(cpi->lookahead, sd, time_stamp, end_time, frame_flags,
4819 cpi->active_map_enabled ? cpi->active_map : NULL)) {
4822 vpx_usec_timer_mark(&timer);
4823 cpi->time_receive_data += vpx_usec_timer_elapsed(&timer);
4828 static int frame_is_reference(const VP8_COMP *cpi) {
4829 const VP8_COMMON *cm = &cpi->common;
4830 const MACROBLOCKD *xd = &cpi->mb.e_mbd;
4832 return cm->frame_type == KEY_FRAME || cm->refresh_last_frame ||
4833 cm->refresh_golden_frame || cm->refresh_alt_ref_frame ||
4834 cm->copy_buffer_to_gf || cm->copy_buffer_to_arf ||
4835 cm->refresh_entropy_probs || xd->mode_ref_lf_delta_update ||
4836 xd->update_mb_segmentation_map || xd->update_mb_segmentation_data;
4839 int vp8_get_compressed_data(VP8_COMP *cpi, unsigned int *frame_flags,
4840 size_t *size, unsigned char *dest,
4841 unsigned char *dest_end, int64_t *time_stamp,
4842 int64_t *time_end, int flush) {
4844 struct vpx_usec_timer tsctimer;
4845 struct vpx_usec_timer ticktimer;
4846 struct vpx_usec_timer cmptimer;
4847 YV12_BUFFER_CONFIG *force_src_buffer = NULL;
4849 if (!cpi) return -1;
4853 if (setjmp(cpi->common.error.jmp)) {
4854 cpi->common.error.setjmp = 0;
4855 vpx_clear_system_state();
4856 return VPX_CODEC_CORRUPT_FRAME;
4859 cpi->common.error.setjmp = 1;
4861 vpx_usec_timer_start(&cmptimer);
4865 #if !CONFIG_REALTIME_ONLY
4866 /* Should we code an alternate reference frame */
4867 if (cpi->oxcf.error_resilient_mode == 0 && cpi->oxcf.play_alternate &&
4868 cpi->source_alt_ref_pending) {
4869 if ((cpi->source = vp8_lookahead_peek(
4870 cpi->lookahead, cpi->frames_till_gf_update_due, PEEK_FORWARD))) {
4871 cpi->alt_ref_source = cpi->source;
4872 if (cpi->oxcf.arnr_max_frames > 0) {
4873 vp8_temporal_filter_prepare_c(cpi, cpi->frames_till_gf_update_due);
4874 force_src_buffer = &cpi->alt_ref_buffer;
4876 cpi->frames_till_alt_ref_frame = cpi->frames_till_gf_update_due;
4877 cm->refresh_alt_ref_frame = 1;
4878 cm->refresh_golden_frame = 0;
4879 cm->refresh_last_frame = 0;
4881 /* Clear Pending alt Ref flag. */
4882 cpi->source_alt_ref_pending = 0;
4883 cpi->is_src_frame_alt_ref = 0;
4889 /* Read last frame source if we are encoding first pass. */
4890 if (cpi->pass == 1 && cm->current_video_frame > 0) {
4891 if ((cpi->last_source =
4892 vp8_lookahead_peek(cpi->lookahead, 1, PEEK_BACKWARD)) == NULL) {
4897 if ((cpi->source = vp8_lookahead_pop(cpi->lookahead, flush))) {
4900 cpi->is_src_frame_alt_ref =
4901 cpi->alt_ref_source && (cpi->source == cpi->alt_ref_source);
4903 if (cpi->is_src_frame_alt_ref) cpi->alt_ref_source = NULL;
4908 cpi->Source = force_src_buffer ? force_src_buffer : &cpi->source->img;
4909 cpi->un_scaled_source = cpi->Source;
4910 *time_stamp = cpi->source->ts_start;
4911 *time_end = cpi->source->ts_end;
4912 *frame_flags = cpi->source->flags;
4914 if (cpi->pass == 1 && cm->current_video_frame > 0) {
4915 cpi->last_frame_unscaled_source = &cpi->last_source->img;
4919 #if !CONFIG_REALTIME_ONLY
4921 if (flush && cpi->pass == 1 && !cpi->twopass.first_pass_done) {
4922 vp8_end_first_pass(cpi); /* get last stats packet */
4923 cpi->twopass.first_pass_done = 1;
4931 if (cpi->source->ts_start < cpi->first_time_stamp_ever) {
4932 cpi->first_time_stamp_ever = cpi->source->ts_start;
4933 cpi->last_end_time_stamp_seen = cpi->source->ts_start;
4936 /* adjust frame rates based on timestamps given */
4937 if (cm->show_frame) {
4938 int64_t this_duration;
4941 if (cpi->source->ts_start == cpi->first_time_stamp_ever) {
4942 this_duration = cpi->source->ts_end - cpi->source->ts_start;
4945 int64_t last_duration;
4947 this_duration = cpi->source->ts_end - cpi->last_end_time_stamp_seen;
4948 last_duration = cpi->last_end_time_stamp_seen - cpi->last_time_stamp_seen;
4949 /* do a step update if the duration changes by 10% */
4950 if (last_duration) {
4951 step = (int)(((this_duration - last_duration) * 10 / last_duration));
4955 if (this_duration) {
4957 cpi->ref_framerate = 10000000.0 / this_duration;
4959 double avg_duration, interval;
4961 /* Average this frame's rate into the last second's average
4962 * frame rate. If we haven't seen 1 second yet, then average
4963 * over the whole interval seen.
4965 interval = (double)(cpi->source->ts_end - cpi->first_time_stamp_ever);
4966 if (interval > 10000000.0) interval = 10000000;
4968 avg_duration = 10000000.0 / cpi->ref_framerate;
4969 avg_duration *= (interval - avg_duration + this_duration);
4970 avg_duration /= interval;
4972 cpi->ref_framerate = 10000000.0 / avg_duration;
4974 #if CONFIG_MULTI_RES_ENCODING
4975 if (cpi->oxcf.mr_total_resolutions > 1) {
4976 LOWER_RES_FRAME_INFO *low_res_frame_info =
4977 (LOWER_RES_FRAME_INFO *)cpi->oxcf.mr_low_res_mode_info;
4978 // Frame rate should be the same for all spatial layers in
4979 // multi-res-encoding (simulcast), so we constrain the frame for
4980 // higher layers to be that of lowest resolution. This is needed
4981 // as he application may decide to skip encoding a high layer and
4982 // then start again, in which case a big jump in time-stamps will
4983 // be received for that high layer, which will yield an incorrect
4984 // frame rate (from time-stamp adjustment in above calculation).
4985 if (cpi->oxcf.mr_encoder_id) {
4986 cpi->ref_framerate = low_res_frame_info->low_res_framerate;
4988 // Keep track of frame rate for lowest resolution.
4989 low_res_frame_info->low_res_framerate = cpi->ref_framerate;
4993 if (cpi->oxcf.number_of_layers > 1) {
4996 /* Update frame rates for each layer */
4997 assert(cpi->oxcf.number_of_layers <= VPX_TS_MAX_LAYERS);
4998 for (i = 0; i < cpi->oxcf.number_of_layers && i < VPX_TS_MAX_LAYERS;
5000 LAYER_CONTEXT *lc = &cpi->layer_context[i];
5001 lc->framerate = cpi->ref_framerate / cpi->oxcf.rate_decimator[i];
5004 vp8_new_framerate(cpi, cpi->ref_framerate);
5008 cpi->last_time_stamp_seen = cpi->source->ts_start;
5009 cpi->last_end_time_stamp_seen = cpi->source->ts_end;
5012 if (cpi->oxcf.number_of_layers > 1) {
5015 update_layer_contexts(cpi);
5017 /* Restore layer specific context & set frame rate */
5018 if (cpi->temporal_layer_id >= 0) {
5019 layer = cpi->temporal_layer_id;
5023 .layer_id[cpi->temporal_pattern_counter % cpi->oxcf.periodicity];
5025 restore_layer_context(cpi, layer);
5026 vp8_new_framerate(cpi, cpi->layer_context[layer].framerate);
5029 if (cpi->compressor_speed == 2) {
5030 vpx_usec_timer_start(&tsctimer);
5031 vpx_usec_timer_start(&ticktimer);
5034 cpi->lf_zeromv_pct = (cpi->zeromv_count * 100) / cm->MBs;
5036 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
5039 const int num_part = (1 << cm->multi_token_partition);
5040 /* the available bytes in dest */
5041 const unsigned long dest_size = dest_end - dest;
5042 const int tok_part_buff_size = (dest_size * 9) / (10 * num_part);
5044 unsigned char *dp = dest;
5046 cpi->partition_d[0] = dp;
5047 dp += dest_size / 10; /* reserve 1/10 for control partition */
5048 cpi->partition_d_end[0] = dp;
5050 for (i = 0; i < num_part; ++i) {
5051 cpi->partition_d[i + 1] = dp;
5052 dp += tok_part_buff_size;
5053 cpi->partition_d_end[i + 1] = dp;
5058 /* start with a 0 size frame */
5061 /* Clear down mmx registers */
5062 vpx_clear_system_state();
5064 cm->frame_type = INTER_FRAME;
5065 cm->frame_flags = *frame_flags;
5069 if (cm->refresh_alt_ref_frame)
5071 cm->refresh_golden_frame = 0;
5072 cm->refresh_last_frame = 0;
5076 cm->refresh_golden_frame = 0;
5077 cm->refresh_last_frame = 1;
5081 /* find a free buffer for the new frame */
5084 for (; i < NUM_YV12_BUFFERS; ++i) {
5085 if (!cm->yv12_fb[i].flags) {
5091 assert(i < NUM_YV12_BUFFERS);
5093 switch (cpi->pass) {
5094 #if !CONFIG_REALTIME_ONLY
5095 case 1: Pass1Encode(cpi, size, dest, frame_flags); break;
5096 case 2: Pass2Encode(cpi, size, dest, dest_end, frame_flags); break;
5097 #endif // !CONFIG_REALTIME_ONLY
5099 encode_frame_to_data_rate(cpi, size, dest, dest_end, frame_flags);
5103 if (cpi->compressor_speed == 2) {
5104 unsigned int duration, duration2;
5105 vpx_usec_timer_mark(&tsctimer);
5106 vpx_usec_timer_mark(&ticktimer);
5108 duration = (int)(vpx_usec_timer_elapsed(&ticktimer));
5109 duration2 = (unsigned int)((double)duration / 2);
5111 if (cm->frame_type != KEY_FRAME) {
5112 if (cpi->avg_encode_time == 0) {
5113 cpi->avg_encode_time = duration;
5115 cpi->avg_encode_time = (7 * cpi->avg_encode_time + duration) >> 3;
5121 if (cpi->avg_pick_mode_time == 0) {
5122 cpi->avg_pick_mode_time = duration2;
5124 cpi->avg_pick_mode_time =
5125 (7 * cpi->avg_pick_mode_time + duration2) >> 3;
5131 if (cm->refresh_entropy_probs == 0) {
5132 memcpy(&cm->fc, &cm->lfc, sizeof(cm->fc));
5135 /* Save the contexts separately for alt ref, gold and last. */
5136 /* (TODO jbb -> Optimize this with pointers to avoid extra copies. ) */
5137 if (cm->refresh_alt_ref_frame) memcpy(&cpi->lfc_a, &cm->fc, sizeof(cm->fc));
5139 if (cm->refresh_golden_frame) memcpy(&cpi->lfc_g, &cm->fc, sizeof(cm->fc));
5141 if (cm->refresh_last_frame) memcpy(&cpi->lfc_n, &cm->fc, sizeof(cm->fc));
5143 /* if its a dropped frame honor the requests on subsequent frames */
5145 cpi->droppable = !frame_is_reference(cpi);
5147 /* return to normal state */
5148 cm->refresh_entropy_probs = 1;
5149 cm->refresh_alt_ref_frame = 0;
5150 cm->refresh_golden_frame = 0;
5151 cm->refresh_last_frame = 1;
5152 cm->frame_type = INTER_FRAME;
5155 /* Save layer specific state */
5156 if (cpi->oxcf.number_of_layers > 1) save_layer_context(cpi);
5158 vpx_usec_timer_mark(&cmptimer);
5159 cpi->time_compress_data += vpx_usec_timer_elapsed(&cmptimer);
5161 if (cpi->b_calculate_psnr && cpi->pass != 1 && cm->show_frame) {
5162 generate_psnr_packet(cpi);
5165 #if CONFIG_INTERNAL_STATS
5167 if (cpi->pass != 1) {
5168 cpi->bytes += *size;
5170 if (cm->show_frame) {
5171 cpi->common.show_frame_mi = cpi->common.mi;
5174 if (cpi->b_calculate_psnr) {
5175 uint64_t ye, ue, ve;
5177 YV12_BUFFER_CONFIG *orig = cpi->Source;
5178 YV12_BUFFER_CONFIG *recon = cpi->common.frame_to_show;
5179 unsigned int y_width = cpi->common.Width;
5180 unsigned int y_height = cpi->common.Height;
5181 unsigned int uv_width = (y_width + 1) / 2;
5182 unsigned int uv_height = (y_height + 1) / 2;
5183 int y_samples = y_height * y_width;
5184 int uv_samples = uv_height * uv_width;
5185 int t_samples = y_samples + 2 * uv_samples;
5188 ye = calc_plane_error(orig->y_buffer, orig->y_stride, recon->y_buffer,
5189 recon->y_stride, y_width, y_height);
5191 ue = calc_plane_error(orig->u_buffer, orig->uv_stride, recon->u_buffer,
5192 recon->uv_stride, uv_width, uv_height);
5194 ve = calc_plane_error(orig->v_buffer, orig->uv_stride, recon->v_buffer,
5195 recon->uv_stride, uv_width, uv_height);
5197 sq_error = (double)(ye + ue + ve);
5199 frame_psnr = vpx_sse_to_psnr(t_samples, 255.0, sq_error);
5201 cpi->total_y += vpx_sse_to_psnr(y_samples, 255.0, (double)ye);
5202 cpi->total_u += vpx_sse_to_psnr(uv_samples, 255.0, (double)ue);
5203 cpi->total_v += vpx_sse_to_psnr(uv_samples, 255.0, (double)ve);
5204 cpi->total_sq_error += sq_error;
5205 cpi->total += frame_psnr;
5208 YV12_BUFFER_CONFIG *pp = &cm->post_proc_buffer;
5210 double frame_psnr2, frame_ssim2 = 0;
5213 vp8_deblock(cm, cm->frame_to_show, &cm->post_proc_buffer,
5214 cm->filter_level * 10 / 6, 1, 0);
5215 vpx_clear_system_state();
5217 ye = calc_plane_error(orig->y_buffer, orig->y_stride, pp->y_buffer,
5218 pp->y_stride, y_width, y_height);
5220 ue = calc_plane_error(orig->u_buffer, orig->uv_stride, pp->u_buffer,
5221 pp->uv_stride, uv_width, uv_height);
5223 ve = calc_plane_error(orig->v_buffer, orig->uv_stride, pp->v_buffer,
5224 pp->uv_stride, uv_width, uv_height);
5226 sq_error2 = (double)(ye + ue + ve);
5228 frame_psnr2 = vpx_sse_to_psnr(t_samples, 255.0, sq_error2);
5230 cpi->totalp_y += vpx_sse_to_psnr(y_samples, 255.0, (double)ye);
5231 cpi->totalp_u += vpx_sse_to_psnr(uv_samples, 255.0, (double)ue);
5232 cpi->totalp_v += vpx_sse_to_psnr(uv_samples, 255.0, (double)ve);
5233 cpi->total_sq_error2 += sq_error2;
5234 cpi->totalp += frame_psnr2;
5237 vpx_calc_ssim(cpi->Source, &cm->post_proc_buffer, &weight);
5239 cpi->summed_quality += frame_ssim2 * weight;
5240 cpi->summed_weights += weight;
5242 if (cpi->oxcf.number_of_layers > 1) {
5245 for (i = cpi->current_layer; i < cpi->oxcf.number_of_layers; ++i) {
5246 cpi->frames_in_layer[i]++;
5248 cpi->bytes_in_layer[i] += *size;
5249 cpi->sum_psnr[i] += frame_psnr;
5250 cpi->sum_psnr_p[i] += frame_psnr2;
5251 cpi->total_error2[i] += sq_error;
5252 cpi->total_error2_p[i] += sq_error2;
5253 cpi->sum_ssim[i] += frame_ssim2 * weight;
5254 cpi->sum_weights[i] += weight;
5265 if (cpi->common.frame_type != 0 && cpi->common.base_qindex == cpi->oxcf.worst_allowed_q)
5267 skiptruecount += cpi->skip_true_count;
5268 skipfalsecount += cpi->skip_false_count;
5276 FILE *f = fopen("skip.stt", "a");
5277 fprintf(f, "frame:%4d flags:%4x Q:%4d P:%4d Size:%5d\n", cpi->common.current_video_frame, *frame_flags, cpi->common.base_qindex, cpi->prob_skip_false, *size);
5279 if (cpi->is_src_frame_alt_ref == 1)
5280 fprintf(f, "skipcount: %4d framesize: %d\n", cpi->skip_true_count , *size);
5288 cpi->common.error.setjmp = 0;
5290 #if CONFIG_MULTITHREAD
5291 /* wait for the lpf thread done */
5292 if (cpi->b_multi_threaded && cpi->b_lpf_running) {
5293 sem_wait(&cpi->h_event_end_lpf);
5294 cpi->b_lpf_running = 0;
5301 int vp8_get_preview_raw_frame(VP8_COMP *cpi, YV12_BUFFER_CONFIG *dest,
5302 vp8_ppflags_t *flags) {
5303 if (cpi->common.refresh_alt_ref_frame) {
5309 cpi->common.show_frame_mi = cpi->common.mi;
5310 ret = vp8_post_proc_frame(&cpi->common, dest, flags);
5314 if (cpi->common.frame_to_show) {
5315 *dest = *cpi->common.frame_to_show;
5316 dest->y_width = cpi->common.Width;
5317 dest->y_height = cpi->common.Height;
5318 dest->uv_height = cpi->common.Height / 2;
5325 vpx_clear_system_state();
5330 int vp8_set_roimap(VP8_COMP *cpi, unsigned char *map, unsigned int rows,
5331 unsigned int cols, int delta_q[4], int delta_lf[4],
5332 unsigned int threshold[4]) {
5333 signed char feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS];
5334 int internal_delta_q[MAX_MB_SEGMENTS];
5335 const int range = 63;
5338 // This method is currently incompatible with the cyclic refresh method
5339 if (cpi->cyclic_refresh_mode_enabled) return -1;
5341 // Check number of rows and columns match
5342 if (cpi->common.mb_rows != (int)rows || cpi->common.mb_cols != (int)cols) {
5346 // Range check the delta Q values and convert the external Q range values
5347 // to internal ones.
5348 if ((abs(delta_q[0]) > range) || (abs(delta_q[1]) > range) ||
5349 (abs(delta_q[2]) > range) || (abs(delta_q[3]) > range)) {
5353 // Range check the delta lf values
5354 if ((abs(delta_lf[0]) > range) || (abs(delta_lf[1]) > range) ||
5355 (abs(delta_lf[2]) > range) || (abs(delta_lf[3]) > range)) {
5360 disable_segmentation(cpi);
5364 // Translate the external delta q values to internal values.
5365 for (i = 0; i < MAX_MB_SEGMENTS; ++i) {
5366 internal_delta_q[i] =
5367 (delta_q[i] >= 0) ? q_trans[delta_q[i]] : -q_trans[-delta_q[i]];
5370 /* Set the segmentation Map */
5371 set_segmentation_map(cpi, map);
5373 /* Activate segmentation. */
5374 enable_segmentation(cpi);
5376 /* Set up the quant segment data */
5377 feature_data[MB_LVL_ALT_Q][0] = internal_delta_q[0];
5378 feature_data[MB_LVL_ALT_Q][1] = internal_delta_q[1];
5379 feature_data[MB_LVL_ALT_Q][2] = internal_delta_q[2];
5380 feature_data[MB_LVL_ALT_Q][3] = internal_delta_q[3];
5382 /* Set up the loop segment data s */
5383 feature_data[MB_LVL_ALT_LF][0] = delta_lf[0];
5384 feature_data[MB_LVL_ALT_LF][1] = delta_lf[1];
5385 feature_data[MB_LVL_ALT_LF][2] = delta_lf[2];
5386 feature_data[MB_LVL_ALT_LF][3] = delta_lf[3];
5388 cpi->segment_encode_breakout[0] = threshold[0];
5389 cpi->segment_encode_breakout[1] = threshold[1];
5390 cpi->segment_encode_breakout[2] = threshold[2];
5391 cpi->segment_encode_breakout[3] = threshold[3];
5393 /* Initialise the feature data structure */
5394 set_segment_data(cpi, &feature_data[0][0], SEGMENT_DELTADATA);
5399 int vp8_set_active_map(VP8_COMP *cpi, unsigned char *map, unsigned int rows,
5400 unsigned int cols) {
5401 if ((int)rows == cpi->common.mb_rows && (int)cols == cpi->common.mb_cols) {
5403 memcpy(cpi->active_map, map, rows * cols);
5404 cpi->active_map_enabled = 1;
5406 cpi->active_map_enabled = 0;
5415 int vp8_set_internal_size(VP8_COMP *cpi, VPX_SCALING horiz_mode,
5416 VPX_SCALING vert_mode) {
5417 if (horiz_mode <= ONETWO) {
5418 cpi->common.horiz_scale = horiz_mode;
5423 if (vert_mode <= ONETWO) {
5424 cpi->common.vert_scale = vert_mode;
5432 int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest) {
5436 unsigned char *src = source->y_buffer;
5437 unsigned char *dst = dest->y_buffer;
5439 /* Loop through the Y plane raw and reconstruction data summing
5440 * (square differences)
5442 for (i = 0; i < source->y_height; i += 16) {
5443 for (j = 0; j < source->y_width; j += 16) {
5445 Total += vpx_mse16x16(src + j, source->y_stride, dst + j, dest->y_stride,
5449 src += 16 * source->y_stride;
5450 dst += 16 * dest->y_stride;
5456 int vp8_get_quantizer(VP8_COMP *cpi) { return cpi->common.base_qindex; }