2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
11 #include "vpx_config.h"
12 #include "./vpx_scale_rtcd.h"
13 #include "./vpx_dsp_rtcd.h"
14 #include "./vp8_rtcd.h"
15 #include "vp8/common/onyxc_int.h"
16 #include "vp8/common/blockd.h"
18 #include "vp8/common/systemdependent.h"
19 #include "vp8/encoder/quantize.h"
20 #include "vp8/common/alloccommon.h"
22 #include "firstpass.h"
23 #include "vpx_dsp/psnr.h"
24 #include "vpx_scale/vpx_scale.h"
25 #include "vp8/common/extend.h"
27 #include "vp8/common/quant_common.h"
28 #include "segmentation.h"
30 #include "vp8/common/postproc.h"
32 #include "vpx_mem/vpx_mem.h"
33 #include "vp8/common/reconintra.h"
34 #include "vp8/common/swapyv12buffer.h"
35 #include "vp8/common/threading.h"
36 #include "vpx_ports/system_state.h"
37 #include "vpx_ports/vpx_timer.h"
39 #include "vpx_ports/arm.h"
41 #if CONFIG_MULTI_RES_ENCODING
42 #include "mr_dissim.h"
44 #include "encodeframe.h"
50 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
51 extern int vp8_update_coef_context(VP8_COMP *cpi);
52 extern void vp8_update_coef_probs(VP8_COMP *cpi);
55 extern void vp8cx_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi);
56 extern void vp8cx_set_alt_lf_level(VP8_COMP *cpi, int filt_val);
57 extern void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi);
59 extern void vp8_deblock_frame(YV12_BUFFER_CONFIG *source,
60 YV12_BUFFER_CONFIG *post, int filt_lvl,
61 int low_var_thresh, int flag);
62 extern void print_parms(VP8_CONFIG *ocf, char *filenam);
63 extern unsigned int vp8_get_processor_freq();
64 extern void print_tree_update_probs();
65 extern int vp8cx_create_encoder_threads(VP8_COMP *cpi);
66 extern void vp8cx_remove_encoder_threads(VP8_COMP *cpi);
68 int vp8_estimate_entropy_savings(VP8_COMP *cpi);
70 int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest);
72 extern void vp8_temporal_filter_prepare_c(VP8_COMP *cpi, int distance);
74 static void set_default_lf_deltas(VP8_COMP *cpi);
76 extern const int vp8_gf_interval_table[101];
78 #if CONFIG_INTERNAL_STATS
80 #include "vpx_dsp/ssim.h"
86 #ifdef OUTPUT_YUV_DENOISED
87 FILE *yuv_denoised_file;
97 extern int skip_true_count;
98 extern int skip_false_count;
101 #ifdef VP8_ENTROPY_STATS
102 extern int intra_mode_stats[10][10][10];
106 unsigned int frames_at_speed[16] = { 0, 0, 0, 0, 0, 0, 0, 0,
107 0, 0, 0, 0, 0, 0, 0, 0 };
108 unsigned int tot_pm = 0;
109 unsigned int cnt_pm = 0;
110 unsigned int tot_ef = 0;
111 unsigned int cnt_ef = 0;
115 extern unsigned __int64 Sectionbits[50];
116 extern int y_modes[5];
117 extern int uv_modes[4];
118 extern int b_modes[10];
120 extern int inter_y_modes[10];
121 extern int inter_uv_modes[4];
122 extern unsigned int inter_b_modes[15];
125 extern const int vp8_bits_per_mb[2][QINDEX_RANGE];
127 extern const int qrounding_factors[129];
128 extern const int qzbin_factors[129];
129 extern void vp8cx_init_quantizer(VP8_COMP *cpi);
130 extern const int vp8cx_base_skip_false_prob[128];
132 /* Tables relating active max Q to active min Q */
133 static const unsigned char kf_low_motion_minq[QINDEX_RANGE] = {
134 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
135 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
136 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
137 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 4, 4, 4, 5, 5, 5,
138 5, 5, 6, 6, 6, 6, 7, 7, 8, 8, 8, 8, 9, 9, 10, 10, 10, 10, 11,
139 11, 11, 11, 12, 12, 13, 13, 13, 13, 14, 14, 15, 15, 15, 15, 16, 16, 16, 16,
140 17, 17, 18, 18, 18, 18, 19, 20, 20, 21, 21, 22, 23, 23
142 static const unsigned char kf_high_motion_minq[QINDEX_RANGE] = {
143 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
144 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1,
145 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 5,
146 5, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 8, 8, 8, 8, 9, 9, 10, 10,
147 10, 10, 11, 11, 11, 11, 12, 12, 13, 13, 13, 13, 14, 14, 15, 15, 15, 15, 16,
148 16, 16, 16, 17, 17, 18, 18, 18, 18, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21,
149 22, 22, 23, 23, 24, 25, 25, 26, 26, 27, 28, 28, 29, 30
151 static const unsigned char gf_low_motion_minq[QINDEX_RANGE] = {
152 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3,
153 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8,
154 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15,
155 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24,
156 25, 25, 26, 26, 27, 27, 28, 28, 29, 29, 30, 30, 31, 31, 32, 32, 33, 33, 34,
157 34, 35, 35, 36, 36, 37, 37, 38, 38, 39, 39, 40, 40, 41, 41, 42, 42, 43, 44,
158 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58
160 static const unsigned char gf_mid_motion_minq[QINDEX_RANGE] = {
161 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 5,
162 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10, 10, 11,
163 11, 11, 12, 12, 12, 12, 13, 13, 13, 14, 14, 14, 15, 15, 16, 16, 17, 17, 18,
164 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24, 25, 25, 26, 26, 27, 27,
165 28, 28, 29, 29, 30, 30, 31, 31, 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 37,
166 37, 38, 39, 39, 40, 40, 41, 41, 42, 42, 43, 43, 44, 45, 46, 47, 48, 49, 50,
167 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
169 static const unsigned char gf_high_motion_minq[QINDEX_RANGE] = {
170 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5,
171 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10, 11, 11,
172 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21,
173 21, 22, 22, 23, 23, 24, 24, 25, 25, 26, 26, 27, 27, 28, 28, 29, 29, 30, 30,
174 31, 31, 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 37, 37, 38, 38, 39, 39, 40,
175 40, 41, 41, 42, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
176 57, 58, 59, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80
178 static const unsigned char inter_minq[QINDEX_RANGE] = {
179 0, 0, 1, 1, 2, 3, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 9, 10, 11,
180 11, 12, 13, 13, 14, 15, 15, 16, 17, 17, 18, 19, 20, 20, 21, 22, 22, 23, 24,
181 24, 25, 26, 27, 27, 28, 29, 30, 30, 31, 32, 33, 33, 34, 35, 36, 36, 37, 38,
182 39, 39, 40, 41, 42, 42, 43, 44, 45, 46, 46, 47, 48, 49, 50, 50, 51, 52, 53,
183 54, 55, 55, 56, 57, 58, 59, 60, 60, 61, 62, 63, 64, 65, 66, 67, 67, 68, 69,
184 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 86,
185 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100
188 #ifdef PACKET_TESTING
189 extern FILE *vpxlogc;
192 static void save_layer_context(VP8_COMP *cpi) {
193 LAYER_CONTEXT *lc = &cpi->layer_context[cpi->current_layer];
195 /* Save layer dependent coding state */
196 lc->target_bandwidth = cpi->target_bandwidth;
197 lc->starting_buffer_level = cpi->oxcf.starting_buffer_level;
198 lc->optimal_buffer_level = cpi->oxcf.optimal_buffer_level;
199 lc->maximum_buffer_size = cpi->oxcf.maximum_buffer_size;
200 lc->starting_buffer_level_in_ms = cpi->oxcf.starting_buffer_level_in_ms;
201 lc->optimal_buffer_level_in_ms = cpi->oxcf.optimal_buffer_level_in_ms;
202 lc->maximum_buffer_size_in_ms = cpi->oxcf.maximum_buffer_size_in_ms;
203 lc->buffer_level = cpi->buffer_level;
204 lc->bits_off_target = cpi->bits_off_target;
205 lc->total_actual_bits = cpi->total_actual_bits;
206 lc->worst_quality = cpi->worst_quality;
207 lc->active_worst_quality = cpi->active_worst_quality;
208 lc->best_quality = cpi->best_quality;
209 lc->active_best_quality = cpi->active_best_quality;
210 lc->ni_av_qi = cpi->ni_av_qi;
211 lc->ni_tot_qi = cpi->ni_tot_qi;
212 lc->ni_frames = cpi->ni_frames;
213 lc->avg_frame_qindex = cpi->avg_frame_qindex;
214 lc->rate_correction_factor = cpi->rate_correction_factor;
215 lc->key_frame_rate_correction_factor = cpi->key_frame_rate_correction_factor;
216 lc->gf_rate_correction_factor = cpi->gf_rate_correction_factor;
217 lc->zbin_over_quant = cpi->mb.zbin_over_quant;
218 lc->inter_frame_target = cpi->inter_frame_target;
219 lc->total_byte_count = cpi->total_byte_count;
220 lc->filter_level = cpi->common.filter_level;
222 lc->last_frame_percent_intra = cpi->last_frame_percent_intra;
224 memcpy(lc->count_mb_ref_frame_usage, cpi->mb.count_mb_ref_frame_usage,
225 sizeof(cpi->mb.count_mb_ref_frame_usage));
228 static void restore_layer_context(VP8_COMP *cpi, const int layer) {
229 LAYER_CONTEXT *lc = &cpi->layer_context[layer];
231 /* Restore layer dependent coding state */
232 cpi->current_layer = layer;
233 cpi->target_bandwidth = lc->target_bandwidth;
234 cpi->oxcf.target_bandwidth = lc->target_bandwidth;
235 cpi->oxcf.starting_buffer_level = lc->starting_buffer_level;
236 cpi->oxcf.optimal_buffer_level = lc->optimal_buffer_level;
237 cpi->oxcf.maximum_buffer_size = lc->maximum_buffer_size;
238 cpi->oxcf.starting_buffer_level_in_ms = lc->starting_buffer_level_in_ms;
239 cpi->oxcf.optimal_buffer_level_in_ms = lc->optimal_buffer_level_in_ms;
240 cpi->oxcf.maximum_buffer_size_in_ms = lc->maximum_buffer_size_in_ms;
241 cpi->buffer_level = lc->buffer_level;
242 cpi->bits_off_target = lc->bits_off_target;
243 cpi->total_actual_bits = lc->total_actual_bits;
244 cpi->active_worst_quality = lc->active_worst_quality;
245 cpi->active_best_quality = lc->active_best_quality;
246 cpi->ni_av_qi = lc->ni_av_qi;
247 cpi->ni_tot_qi = lc->ni_tot_qi;
248 cpi->ni_frames = lc->ni_frames;
249 cpi->avg_frame_qindex = lc->avg_frame_qindex;
250 cpi->rate_correction_factor = lc->rate_correction_factor;
251 cpi->key_frame_rate_correction_factor = lc->key_frame_rate_correction_factor;
252 cpi->gf_rate_correction_factor = lc->gf_rate_correction_factor;
253 cpi->mb.zbin_over_quant = lc->zbin_over_quant;
254 cpi->inter_frame_target = lc->inter_frame_target;
255 cpi->total_byte_count = lc->total_byte_count;
256 cpi->common.filter_level = lc->filter_level;
258 cpi->last_frame_percent_intra = lc->last_frame_percent_intra;
260 memcpy(cpi->mb.count_mb_ref_frame_usage, lc->count_mb_ref_frame_usage,
261 sizeof(cpi->mb.count_mb_ref_frame_usage));
264 static int rescale(int val, int num, int denom) {
266 int64_t llden = denom;
269 return (int)(llval * llnum / llden);
272 static void init_temporal_layer_context(VP8_COMP *cpi, VP8_CONFIG *oxcf,
274 double prev_layer_framerate) {
275 LAYER_CONTEXT *lc = &cpi->layer_context[layer];
277 lc->framerate = cpi->output_framerate / cpi->oxcf.rate_decimator[layer];
278 lc->target_bandwidth = cpi->oxcf.target_bitrate[layer] * 1000;
280 lc->starting_buffer_level_in_ms = oxcf->starting_buffer_level;
281 lc->optimal_buffer_level_in_ms = oxcf->optimal_buffer_level;
282 lc->maximum_buffer_size_in_ms = oxcf->maximum_buffer_size;
284 lc->starting_buffer_level =
285 rescale((int)(oxcf->starting_buffer_level), lc->target_bandwidth, 1000);
287 if (oxcf->optimal_buffer_level == 0) {
288 lc->optimal_buffer_level = lc->target_bandwidth / 8;
290 lc->optimal_buffer_level =
291 rescale((int)(oxcf->optimal_buffer_level), lc->target_bandwidth, 1000);
294 if (oxcf->maximum_buffer_size == 0) {
295 lc->maximum_buffer_size = lc->target_bandwidth / 8;
297 lc->maximum_buffer_size =
298 rescale((int)(oxcf->maximum_buffer_size), lc->target_bandwidth, 1000);
301 /* Work out the average size of a frame within this layer */
303 lc->avg_frame_size_for_layer =
304 (int)((cpi->oxcf.target_bitrate[layer] -
305 cpi->oxcf.target_bitrate[layer - 1]) *
306 1000 / (lc->framerate - prev_layer_framerate));
309 lc->active_worst_quality = cpi->oxcf.worst_allowed_q;
310 lc->active_best_quality = cpi->oxcf.best_allowed_q;
311 lc->avg_frame_qindex = cpi->oxcf.worst_allowed_q;
313 lc->buffer_level = lc->starting_buffer_level;
314 lc->bits_off_target = lc->starting_buffer_level;
316 lc->total_actual_bits = 0;
320 lc->rate_correction_factor = 1.0;
321 lc->key_frame_rate_correction_factor = 1.0;
322 lc->gf_rate_correction_factor = 1.0;
323 lc->inter_frame_target = 0;
326 // Upon a run-time change in temporal layers, reset the layer context parameters
327 // for any "new" layers. For "existing" layers, let them inherit the parameters
328 // from the previous layer state (at the same layer #). In future we may want
329 // to better map the previous layer state(s) to the "new" ones.
330 static void reset_temporal_layer_change(VP8_COMP *cpi, VP8_CONFIG *oxcf,
331 const int prev_num_layers) {
333 double prev_layer_framerate = 0;
334 const int curr_num_layers = cpi->oxcf.number_of_layers;
335 // If the previous state was 1 layer, get current layer context from cpi.
336 // We need this to set the layer context for the new layers below.
337 if (prev_num_layers == 1) {
338 cpi->current_layer = 0;
339 save_layer_context(cpi);
341 for (i = 0; i < curr_num_layers; ++i) {
342 LAYER_CONTEXT *lc = &cpi->layer_context[i];
343 if (i >= prev_num_layers) {
344 init_temporal_layer_context(cpi, oxcf, i, prev_layer_framerate);
346 // The initial buffer levels are set based on their starting levels.
347 // We could set the buffer levels based on the previous state (normalized
348 // properly by the layer bandwidths) but we would need to keep track of
349 // the previous set of layer bandwidths (i.e., target_bitrate[i])
350 // before the layer change. For now, reset to the starting levels.
352 cpi->oxcf.starting_buffer_level_in_ms * cpi->oxcf.target_bitrate[i];
353 lc->bits_off_target = lc->buffer_level;
354 // TDOD(marpan): Should we set the rate_correction_factor and
355 // active_worst/best_quality to values derived from the previous layer
356 // state (to smooth-out quality dips/rate fluctuation at transition)?
358 // We need to treat the 1 layer case separately: oxcf.target_bitrate[i]
359 // is not set for 1 layer, and the restore_layer_context/save_context()
360 // are not called in the encoding loop, so we need to call it here to
361 // pass the layer context state to |cpi|.
362 if (curr_num_layers == 1) {
363 lc->target_bandwidth = cpi->oxcf.target_bandwidth;
365 cpi->oxcf.starting_buffer_level_in_ms * lc->target_bandwidth / 1000;
366 lc->bits_off_target = lc->buffer_level;
367 restore_layer_context(cpi, 0);
369 prev_layer_framerate = cpi->output_framerate / cpi->oxcf.rate_decimator[i];
373 static void setup_features(VP8_COMP *cpi) {
374 // If segmentation enabled set the update flags
375 if (cpi->mb.e_mbd.segmentation_enabled) {
376 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
377 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
379 cpi->mb.e_mbd.update_mb_segmentation_map = 0;
380 cpi->mb.e_mbd.update_mb_segmentation_data = 0;
383 cpi->mb.e_mbd.mode_ref_lf_delta_enabled = 0;
384 cpi->mb.e_mbd.mode_ref_lf_delta_update = 0;
385 memset(cpi->mb.e_mbd.ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas));
386 memset(cpi->mb.e_mbd.mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas));
387 memset(cpi->mb.e_mbd.last_ref_lf_deltas, 0,
388 sizeof(cpi->mb.e_mbd.ref_lf_deltas));
389 memset(cpi->mb.e_mbd.last_mode_lf_deltas, 0,
390 sizeof(cpi->mb.e_mbd.mode_lf_deltas));
392 set_default_lf_deltas(cpi);
395 static void dealloc_raw_frame_buffers(VP8_COMP *cpi);
397 void vp8_initialize_enc(void) {
398 static volatile int init_done = 0;
402 vp8_init_intra_predictors();
407 static void dealloc_compressor_data(VP8_COMP *cpi) {
408 vpx_free(cpi->tplist);
411 /* Delete last frame MV storage buffers */
415 vpx_free(cpi->lf_ref_frame_sign_bias);
416 cpi->lf_ref_frame_sign_bias = 0;
418 vpx_free(cpi->lf_ref_frame);
419 cpi->lf_ref_frame = 0;
421 /* Delete sementation map */
422 vpx_free(cpi->segmentation_map);
423 cpi->segmentation_map = 0;
425 vpx_free(cpi->active_map);
428 vp8_de_alloc_frame_buffers(&cpi->common);
430 vp8_yv12_de_alloc_frame_buffer(&cpi->pick_lf_lvl_frame);
431 vp8_yv12_de_alloc_frame_buffer(&cpi->scaled_source);
432 dealloc_raw_frame_buffers(cpi);
437 /* Structure used to monitor GF usage */
438 vpx_free(cpi->gf_active_flags);
439 cpi->gf_active_flags = 0;
441 /* Activity mask based per mb zbin adjustments */
442 vpx_free(cpi->mb_activity_map);
443 cpi->mb_activity_map = 0;
445 vpx_free(cpi->mb.pip);
448 #if CONFIG_MULTITHREAD
449 /* De-allocate mutex */
450 if (cpi->pmutex != NULL) {
451 VP8_COMMON *const pc = &cpi->common;
454 for (i = 0; i < pc->mb_rows; ++i) {
455 pthread_mutex_destroy(&cpi->pmutex[i]);
457 vpx_free(cpi->pmutex);
461 vpx_free(cpi->mt_current_mb_col);
462 cpi->mt_current_mb_col = NULL;
466 static void enable_segmentation(VP8_COMP *cpi) {
467 /* Set the appropriate feature bit */
468 cpi->mb.e_mbd.segmentation_enabled = 1;
469 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
470 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
472 static void disable_segmentation(VP8_COMP *cpi) {
473 /* Clear the appropriate feature bit */
474 cpi->mb.e_mbd.segmentation_enabled = 0;
477 /* Valid values for a segment are 0 to 3
478 * Segmentation map is arrange as [Rows][Columns]
480 static void set_segmentation_map(VP8_COMP *cpi,
481 unsigned char *segmentation_map) {
482 /* Copy in the new segmentation map */
483 memcpy(cpi->segmentation_map, segmentation_map,
484 (cpi->common.mb_rows * cpi->common.mb_cols));
486 /* Signal that the map should be updated. */
487 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
488 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
491 /* The values given for each segment can be either deltas (from the default
492 * value chosen for the frame) or absolute values.
494 * Valid range for abs values is:
495 * (0-127 for MB_LVL_ALT_Q), (0-63 for SEGMENT_ALT_LF)
496 * Valid range for delta values are:
497 * (+/-127 for MB_LVL_ALT_Q), (+/-63 for SEGMENT_ALT_LF)
499 * abs_delta = SEGMENT_DELTADATA (deltas)
500 * abs_delta = SEGMENT_ABSDATA (use the absolute values given).
503 static void set_segment_data(VP8_COMP *cpi, signed char *feature_data,
504 unsigned char abs_delta) {
505 cpi->mb.e_mbd.mb_segement_abs_delta = abs_delta;
506 memcpy(cpi->segment_feature_data, feature_data,
507 sizeof(cpi->segment_feature_data));
510 /* A simple function to cyclically refresh the background at a lower Q */
511 static void cyclic_background_refresh(VP8_COMP *cpi, int Q, int lf_adjustment) {
512 unsigned char *seg_map = cpi->segmentation_map;
513 signed char feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS];
515 int block_count = cpi->cyclic_refresh_mode_max_mbs_perframe;
516 int mbs_in_frame = cpi->common.mb_rows * cpi->common.mb_cols;
518 cpi->cyclic_refresh_q = Q / 2;
520 if (cpi->oxcf.screen_content_mode) {
521 // Modify quality ramp-up based on Q. Above some Q level, increase the
522 // number of blocks to be refreshed, and reduce it below the thredhold.
523 // Turn-off under certain conditions (i.e., away from key frame, and if
524 // we are at good quality (low Q) and most of the blocks were
526 // in previous frame.
527 int qp_thresh = (cpi->oxcf.screen_content_mode == 2) ? 80 : 100;
528 if (Q >= qp_thresh) {
529 cpi->cyclic_refresh_mode_max_mbs_perframe =
530 (cpi->common.mb_rows * cpi->common.mb_cols) / 10;
531 } else if (cpi->frames_since_key > 250 && Q < 20 &&
532 cpi->mb.skip_true_count > (int)(0.95 * mbs_in_frame)) {
533 cpi->cyclic_refresh_mode_max_mbs_perframe = 0;
535 cpi->cyclic_refresh_mode_max_mbs_perframe =
536 (cpi->common.mb_rows * cpi->common.mb_cols) / 20;
538 block_count = cpi->cyclic_refresh_mode_max_mbs_perframe;
541 // Set every macroblock to be eligible for update.
542 // For key frame this will reset seg map to 0.
543 memset(cpi->segmentation_map, 0, mbs_in_frame);
545 if (cpi->common.frame_type != KEY_FRAME && block_count > 0) {
546 /* Cycle through the macro_block rows */
547 /* MB loop to set local segmentation map */
548 i = cpi->cyclic_refresh_mode_index;
549 assert(i < mbs_in_frame);
551 /* If the MB is as a candidate for clean up then mark it for
552 * possible boost/refresh (segment 1) The segment id may get
553 * reset to 0 later if the MB gets coded anything other than
554 * last frame 0,0 as only (last frame 0,0) MBs are eligable for
555 * refresh : that is to say Mbs likely to be background blocks.
557 if (cpi->cyclic_refresh_map[i] == 0) {
560 } else if (cpi->cyclic_refresh_map[i] < 0) {
561 cpi->cyclic_refresh_map[i]++;
565 if (i == mbs_in_frame) i = 0;
567 } while (block_count && i != cpi->cyclic_refresh_mode_index);
569 cpi->cyclic_refresh_mode_index = i;
571 #if CONFIG_TEMPORAL_DENOISING
572 if (cpi->oxcf.noise_sensitivity > 0) {
573 if (cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive &&
574 Q < (int)cpi->denoiser.denoise_pars.qp_thresh &&
575 (cpi->frames_since_key >
576 2 * cpi->denoiser.denoise_pars.consec_zerolast)) {
577 // Under aggressive denoising, use segmentation to turn off loop
578 // filter below some qp thresh. The filter is reduced for all
579 // blocks that have been encoded as ZEROMV LAST x frames in a row,
580 // where x is set by cpi->denoiser.denoise_pars.consec_zerolast.
581 // This is to avoid "dot" artifacts that can occur from repeated
582 // loop filtering on noisy input source.
583 cpi->cyclic_refresh_q = Q;
584 // lf_adjustment = -MAX_LOOP_FILTER;
586 for (i = 0; i < mbs_in_frame; ++i) {
587 seg_map[i] = (cpi->consec_zero_last[i] >
588 cpi->denoiser.denoise_pars.consec_zerolast)
597 /* Activate segmentation. */
598 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
599 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
600 enable_segmentation(cpi);
602 /* Set up the quant segment data */
603 feature_data[MB_LVL_ALT_Q][0] = 0;
604 feature_data[MB_LVL_ALT_Q][1] = (cpi->cyclic_refresh_q - Q);
605 feature_data[MB_LVL_ALT_Q][2] = 0;
606 feature_data[MB_LVL_ALT_Q][3] = 0;
608 /* Set up the loop segment data */
609 feature_data[MB_LVL_ALT_LF][0] = 0;
610 feature_data[MB_LVL_ALT_LF][1] = lf_adjustment;
611 feature_data[MB_LVL_ALT_LF][2] = 0;
612 feature_data[MB_LVL_ALT_LF][3] = 0;
614 /* Initialise the feature data structure */
615 set_segment_data(cpi, &feature_data[0][0], SEGMENT_DELTADATA);
618 static void set_default_lf_deltas(VP8_COMP *cpi) {
619 cpi->mb.e_mbd.mode_ref_lf_delta_enabled = 1;
620 cpi->mb.e_mbd.mode_ref_lf_delta_update = 1;
622 memset(cpi->mb.e_mbd.ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas));
623 memset(cpi->mb.e_mbd.mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas));
625 /* Test of ref frame deltas */
626 cpi->mb.e_mbd.ref_lf_deltas[INTRA_FRAME] = 2;
627 cpi->mb.e_mbd.ref_lf_deltas[LAST_FRAME] = 0;
628 cpi->mb.e_mbd.ref_lf_deltas[GOLDEN_FRAME] = -2;
629 cpi->mb.e_mbd.ref_lf_deltas[ALTREF_FRAME] = -2;
631 cpi->mb.e_mbd.mode_lf_deltas[0] = 4; /* BPRED */
633 if (cpi->oxcf.Mode == MODE_REALTIME) {
634 cpi->mb.e_mbd.mode_lf_deltas[1] = -12; /* Zero */
636 cpi->mb.e_mbd.mode_lf_deltas[1] = -2; /* Zero */
639 cpi->mb.e_mbd.mode_lf_deltas[2] = 2; /* New mv */
640 cpi->mb.e_mbd.mode_lf_deltas[3] = 4; /* Split mv */
643 /* Convenience macros for mapping speed and mode into a continuous
646 #define GOOD(x) (x + 1)
647 #define RT(x) (x + 7)
649 static int speed_map(int speed, const int *map) {
654 } while (speed >= *map++);
658 static const int thresh_mult_map_znn[] = {
659 /* map common to zero, nearest, and near */
660 0, GOOD(2), 1500, GOOD(3), 2000, RT(0), 1000, RT(2), 2000, INT_MAX
663 static const int thresh_mult_map_vhpred[] = { 1000, GOOD(2), 1500, GOOD(3),
664 2000, RT(0), 1000, RT(1),
665 2000, RT(7), INT_MAX, INT_MAX };
667 static const int thresh_mult_map_bpred[] = { 2000, GOOD(0), 2500, GOOD(2),
668 5000, GOOD(3), 7500, RT(0),
669 2500, RT(1), 5000, RT(6),
672 static const int thresh_mult_map_tm[] = { 1000, GOOD(2), 1500, GOOD(3),
673 2000, RT(0), 0, RT(1),
674 1000, RT(2), 2000, RT(7),
677 static const int thresh_mult_map_new1[] = { 1000, GOOD(2), 2000,
678 RT(0), 2000, INT_MAX };
680 static const int thresh_mult_map_new2[] = { 1000, GOOD(2), 2000, GOOD(3),
681 2500, GOOD(5), 4000, RT(0),
682 2000, RT(2), 2500, RT(5),
685 static const int thresh_mult_map_split1[] = {
686 2500, GOOD(0), 1700, GOOD(2), 10000, GOOD(3), 25000, GOOD(4), INT_MAX,
687 RT(0), 5000, RT(1), 10000, RT(2), 25000, RT(3), INT_MAX, INT_MAX
690 static const int thresh_mult_map_split2[] = {
691 5000, GOOD(0), 4500, GOOD(2), 20000, GOOD(3), 50000, GOOD(4), INT_MAX,
692 RT(0), 10000, RT(1), 20000, RT(2), 50000, RT(3), INT_MAX, INT_MAX
695 static const int mode_check_freq_map_zn2[] = {
696 /* {zero,nearest}{2,3} */
697 0, RT(10), 1 << 1, RT(11), 1 << 2, RT(12), 1 << 3, INT_MAX
700 static const int mode_check_freq_map_vhbpred[] = {
701 0, GOOD(5), 2, RT(0), 0, RT(3), 2, RT(5), 4, INT_MAX
704 static const int mode_check_freq_map_near2[] = {
705 0, GOOD(5), 2, RT(0), 0, RT(3), 2,
706 RT(10), 1 << 2, RT(11), 1 << 3, RT(12), 1 << 4, INT_MAX
709 static const int mode_check_freq_map_new1[] = {
710 0, RT(10), 1 << 1, RT(11), 1 << 2, RT(12), 1 << 3, INT_MAX
713 static const int mode_check_freq_map_new2[] = { 0, GOOD(5), 4, RT(0),
715 1 << 3, RT(11), 1 << 4, RT(12),
718 static const int mode_check_freq_map_split1[] = {
719 0, GOOD(2), 2, GOOD(3), 7, RT(1), 2, RT(2), 7, INT_MAX
722 static const int mode_check_freq_map_split2[] = {
723 0, GOOD(1), 2, GOOD(2), 4, GOOD(3), 15, RT(1), 4, RT(2), 15, INT_MAX
726 void vp8_set_speed_features(VP8_COMP *cpi) {
727 SPEED_FEATURES *sf = &cpi->sf;
728 int Mode = cpi->compressor_speed;
729 int Speed = cpi->Speed;
731 VP8_COMMON *cm = &cpi->common;
732 int last_improved_quant = sf->improved_quant;
735 /* Initialise default mode frequency sampling variables */
736 for (i = 0; i < MAX_MODES; ++i) {
737 cpi->mode_check_freq[i] = 0;
740 cpi->mb.mbs_tested_so_far = 0;
741 cpi->mb.mbs_zero_last_dot_suppress = 0;
743 /* best quality defaults */
745 sf->search_method = NSTEP;
746 sf->improved_quant = 1;
747 sf->improved_dct = 1;
750 sf->quarter_pixel_search = 1;
751 sf->half_pixel_search = 1;
752 sf->iterative_sub_pixel = 1;
753 sf->optimize_coefficients = 1;
754 sf->use_fastquant_for_pick = 0;
755 sf->no_skip_block4x4_search = 1;
758 sf->max_step_search_steps = MAX_MVSEARCH_STEPS;
759 sf->improved_mv_pred = 1;
761 /* default thresholds to 0 */
762 for (i = 0; i < MAX_MODES; ++i) sf->thresh_mult[i] = 0;
764 /* Count enabled references */
766 if (cpi->ref_frame_flags & VP8_LAST_FRAME) ref_frames++;
767 if (cpi->ref_frame_flags & VP8_GOLD_FRAME) ref_frames++;
768 if (cpi->ref_frame_flags & VP8_ALTR_FRAME) ref_frames++;
770 /* Convert speed to continuous range, with clamping */
773 } else if (Mode == 2) {
776 if (Speed > 5) Speed = 5;
780 sf->thresh_mult[THR_ZERO1] = sf->thresh_mult[THR_NEAREST1] =
781 sf->thresh_mult[THR_NEAR1] = sf->thresh_mult[THR_DC] = 0; /* always */
783 sf->thresh_mult[THR_ZERO2] = sf->thresh_mult[THR_ZERO3] =
784 sf->thresh_mult[THR_NEAREST2] = sf->thresh_mult[THR_NEAREST3] =
785 sf->thresh_mult[THR_NEAR2] = sf->thresh_mult[THR_NEAR3] =
786 speed_map(Speed, thresh_mult_map_znn);
788 sf->thresh_mult[THR_V_PRED] = sf->thresh_mult[THR_H_PRED] =
789 speed_map(Speed, thresh_mult_map_vhpred);
790 sf->thresh_mult[THR_B_PRED] = speed_map(Speed, thresh_mult_map_bpred);
791 sf->thresh_mult[THR_TM] = speed_map(Speed, thresh_mult_map_tm);
792 sf->thresh_mult[THR_NEW1] = speed_map(Speed, thresh_mult_map_new1);
793 sf->thresh_mult[THR_NEW2] = sf->thresh_mult[THR_NEW3] =
794 speed_map(Speed, thresh_mult_map_new2);
795 sf->thresh_mult[THR_SPLIT1] = speed_map(Speed, thresh_mult_map_split1);
796 sf->thresh_mult[THR_SPLIT2] = sf->thresh_mult[THR_SPLIT3] =
797 speed_map(Speed, thresh_mult_map_split2);
799 // Special case for temporal layers.
800 // Reduce the thresholds for zero/nearest/near for GOLDEN, if GOLDEN is
801 // used as second reference. We don't modify thresholds for ALTREF case
802 // since ALTREF is usually used as long-term reference in temporal layers.
803 if ((cpi->Speed <= 6) && (cpi->oxcf.number_of_layers > 1) &&
804 (cpi->ref_frame_flags & VP8_LAST_FRAME) &&
805 (cpi->ref_frame_flags & VP8_GOLD_FRAME)) {
806 if (cpi->closest_reference_frame == GOLDEN_FRAME) {
807 sf->thresh_mult[THR_ZERO2] = sf->thresh_mult[THR_ZERO2] >> 3;
808 sf->thresh_mult[THR_NEAREST2] = sf->thresh_mult[THR_NEAREST2] >> 3;
809 sf->thresh_mult[THR_NEAR2] = sf->thresh_mult[THR_NEAR2] >> 3;
811 sf->thresh_mult[THR_ZERO2] = sf->thresh_mult[THR_ZERO2] >> 1;
812 sf->thresh_mult[THR_NEAREST2] = sf->thresh_mult[THR_NEAREST2] >> 1;
813 sf->thresh_mult[THR_NEAR2] = sf->thresh_mult[THR_NEAR2] >> 1;
817 cpi->mode_check_freq[THR_ZERO1] = cpi->mode_check_freq[THR_NEAREST1] =
818 cpi->mode_check_freq[THR_NEAR1] = cpi->mode_check_freq[THR_TM] =
819 cpi->mode_check_freq[THR_DC] = 0; /* always */
821 cpi->mode_check_freq[THR_ZERO2] = cpi->mode_check_freq[THR_ZERO3] =
822 cpi->mode_check_freq[THR_NEAREST2] = cpi->mode_check_freq[THR_NEAREST3] =
823 speed_map(Speed, mode_check_freq_map_zn2);
825 cpi->mode_check_freq[THR_NEAR2] = cpi->mode_check_freq[THR_NEAR3] =
826 speed_map(Speed, mode_check_freq_map_near2);
828 cpi->mode_check_freq[THR_V_PRED] = cpi->mode_check_freq[THR_H_PRED] =
829 cpi->mode_check_freq[THR_B_PRED] =
830 speed_map(Speed, mode_check_freq_map_vhbpred);
831 cpi->mode_check_freq[THR_NEW1] = speed_map(Speed, mode_check_freq_map_new1);
832 cpi->mode_check_freq[THR_NEW2] = cpi->mode_check_freq[THR_NEW3] =
833 speed_map(Speed, mode_check_freq_map_new2);
834 cpi->mode_check_freq[THR_SPLIT1] =
835 speed_map(Speed, mode_check_freq_map_split1);
836 cpi->mode_check_freq[THR_SPLIT2] = cpi->mode_check_freq[THR_SPLIT3] =
837 speed_map(Speed, mode_check_freq_map_split2);
840 #if !CONFIG_REALTIME_ONLY
841 case 0: /* best quality mode */
843 sf->max_step_search_steps = MAX_MVSEARCH_STEPS;
848 /* Disable coefficient optimization above speed 0 */
849 sf->optimize_coefficients = 0;
850 sf->use_fastquant_for_pick = 1;
851 sf->no_skip_block4x4_search = 0;
857 sf->improved_quant = 0;
858 sf->improved_dct = 0;
860 /* Only do recode loop on key frames, golden frames and
868 sf->recode_loop = 0; /* recode loop off */
869 sf->RD = 0; /* Turn rd off */
873 sf->auto_filter = 0; /* Faster selection of loop filter */
879 sf->optimize_coefficients = 0;
882 sf->iterative_sub_pixel = 1;
883 sf->search_method = NSTEP;
886 sf->improved_quant = 0;
887 sf->improved_dct = 0;
889 sf->use_fastquant_for_pick = 1;
890 sf->no_skip_block4x4_search = 0;
894 if (Speed > 2) sf->auto_filter = 0; /* Faster selection of loop filter */
902 sf->auto_filter = 0; /* Faster selection of loop filter */
903 sf->search_method = HEX;
904 sf->iterative_sub_pixel = 0;
908 unsigned int sum = 0;
909 unsigned int total_mbs = cm->MBs;
911 unsigned int total_skip;
915 if (cpi->oxcf.encode_breakout > 2000) min = cpi->oxcf.encode_breakout;
919 for (i = 0; i < min; ++i) {
920 sum += cpi->mb.error_bins[i];
926 /* i starts from 2 to make sure thresh started from 2048 */
927 for (; i < 1024; ++i) {
928 sum += cpi->mb.error_bins[i];
931 (unsigned int)(cpi->Speed - 6) * (total_mbs - total_skip)) {
939 if (thresh < 2000) thresh = 2000;
941 if (ref_frames > 1) {
942 sf->thresh_mult[THR_NEW1] = thresh;
943 sf->thresh_mult[THR_NEAREST1] = thresh >> 1;
944 sf->thresh_mult[THR_NEAR1] = thresh >> 1;
947 if (ref_frames > 2) {
948 sf->thresh_mult[THR_NEW2] = thresh << 1;
949 sf->thresh_mult[THR_NEAREST2] = thresh;
950 sf->thresh_mult[THR_NEAR2] = thresh;
953 if (ref_frames > 3) {
954 sf->thresh_mult[THR_NEW3] = thresh << 1;
955 sf->thresh_mult[THR_NEAREST3] = thresh;
956 sf->thresh_mult[THR_NEAR3] = thresh;
959 sf->improved_mv_pred = 0;
962 if (Speed > 8) sf->quarter_pixel_search = 0;
964 if (cm->version == 0) {
965 cm->filter_type = NORMAL_LOOPFILTER;
967 if (Speed >= 14) cm->filter_type = SIMPLE_LOOPFILTER;
969 cm->filter_type = SIMPLE_LOOPFILTER;
972 /* This has a big hit on quality. Last resort */
973 if (Speed >= 15) sf->half_pixel_search = 0;
975 memset(cpi->mb.error_bins, 0, sizeof(cpi->mb.error_bins));
979 /* Slow quant, dct and trellis not worthwhile for first pass
980 * so make sure they are always turned off.
982 if (cpi->pass == 1) {
983 sf->improved_quant = 0;
984 sf->optimize_coefficients = 0;
985 sf->improved_dct = 0;
988 if (cpi->sf.search_method == NSTEP) {
989 vp8_init3smotion_compensation(&cpi->mb,
990 cm->yv12_fb[cm->lst_fb_idx].y_stride);
991 } else if (cpi->sf.search_method == DIAMOND) {
992 vp8_init_dsmotion_compensation(&cpi->mb,
993 cm->yv12_fb[cm->lst_fb_idx].y_stride);
996 if (cpi->sf.improved_dct) {
997 cpi->mb.short_fdct8x4 = vp8_short_fdct8x4;
998 cpi->mb.short_fdct4x4 = vp8_short_fdct4x4;
1000 /* No fast FDCT defined for any platform at this time. */
1001 cpi->mb.short_fdct8x4 = vp8_short_fdct8x4;
1002 cpi->mb.short_fdct4x4 = vp8_short_fdct4x4;
1005 cpi->mb.short_walsh4x4 = vp8_short_walsh4x4;
1007 if (cpi->sf.improved_quant) {
1008 cpi->mb.quantize_b = vp8_regular_quantize_b;
1010 cpi->mb.quantize_b = vp8_fast_quantize_b;
1012 if (cpi->sf.improved_quant != last_improved_quant) vp8cx_init_quantizer(cpi);
1014 if (cpi->sf.iterative_sub_pixel == 1) {
1015 cpi->find_fractional_mv_step = vp8_find_best_sub_pixel_step_iteratively;
1016 } else if (cpi->sf.quarter_pixel_search) {
1017 cpi->find_fractional_mv_step = vp8_find_best_sub_pixel_step;
1018 } else if (cpi->sf.half_pixel_search) {
1019 cpi->find_fractional_mv_step = vp8_find_best_half_pixel_step;
1021 cpi->find_fractional_mv_step = vp8_skip_fractional_mv_step;
1024 if (cpi->sf.optimize_coefficients == 1 && cpi->pass != 1) {
1025 cpi->mb.optimize = 1;
1027 cpi->mb.optimize = 0;
1030 if (cpi->common.full_pixel) {
1031 cpi->find_fractional_mv_step = vp8_skip_fractional_mv_step;
1035 frames_at_speed[cpi->Speed]++;
1041 static void alloc_raw_frame_buffers(VP8_COMP *cpi) {
1042 #if VP8_TEMPORAL_ALT_REF
1043 int width = (cpi->oxcf.Width + 15) & ~15;
1044 int height = (cpi->oxcf.Height + 15) & ~15;
1047 cpi->lookahead = vp8_lookahead_init(cpi->oxcf.Width, cpi->oxcf.Height,
1048 cpi->oxcf.lag_in_frames);
1049 if (!cpi->lookahead) {
1050 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1051 "Failed to allocate lag buffers");
1054 #if VP8_TEMPORAL_ALT_REF
1056 if (vp8_yv12_alloc_frame_buffer(&cpi->alt_ref_buffer, width, height,
1057 VP8BORDERINPIXELS)) {
1058 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1059 "Failed to allocate altref buffer");
1065 static void dealloc_raw_frame_buffers(VP8_COMP *cpi) {
1066 #if VP8_TEMPORAL_ALT_REF
1067 vp8_yv12_de_alloc_frame_buffer(&cpi->alt_ref_buffer);
1069 vp8_lookahead_destroy(cpi->lookahead);
1072 static int vp8_alloc_partition_data(VP8_COMP *cpi) {
1073 vpx_free(cpi->mb.pip);
1076 vpx_calloc((cpi->common.mb_cols + 1) * (cpi->common.mb_rows + 1),
1077 sizeof(PARTITION_INFO));
1078 if (!cpi->mb.pip) return 1;
1080 cpi->mb.pi = cpi->mb.pip + cpi->common.mode_info_stride + 1;
1085 void vp8_alloc_compressor_data(VP8_COMP *cpi) {
1086 VP8_COMMON *cm = &cpi->common;
1088 int width = cm->Width;
1089 int height = cm->Height;
1090 #if CONFIG_MULTITHREAD
1091 int prev_mb_rows = cm->mb_rows;
1094 if (vp8_alloc_frame_buffers(cm, width, height)) {
1095 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1096 "Failed to allocate frame buffers");
1099 if (vp8_alloc_partition_data(cpi)) {
1100 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1101 "Failed to allocate partition data");
1104 if ((width & 0xf) != 0) width += 16 - (width & 0xf);
1106 if ((height & 0xf) != 0) height += 16 - (height & 0xf);
1108 if (vp8_yv12_alloc_frame_buffer(&cpi->pick_lf_lvl_frame, width, height,
1109 VP8BORDERINPIXELS)) {
1110 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1111 "Failed to allocate last frame buffer");
1114 if (vp8_yv12_alloc_frame_buffer(&cpi->scaled_source, width, height,
1115 VP8BORDERINPIXELS)) {
1116 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1117 "Failed to allocate scaled source buffer");
1123 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
1124 unsigned int tokens = 8 * 24 * 16; /* one MB for each thread */
1126 unsigned int tokens = cm->mb_rows * cm->mb_cols * 24 * 16;
1128 CHECK_MEM_ERROR(cpi->tok, vpx_calloc(tokens, sizeof(*cpi->tok)));
1131 /* Data used for real time vc mode to see if gf needs refreshing */
1132 cpi->zeromv_count = 0;
1134 /* Structures used to monitor GF usage */
1135 vpx_free(cpi->gf_active_flags);
1137 cpi->gf_active_flags,
1138 vpx_calloc(sizeof(*cpi->gf_active_flags), cm->mb_rows * cm->mb_cols));
1139 cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
1141 vpx_free(cpi->mb_activity_map);
1143 cpi->mb_activity_map,
1144 vpx_calloc(sizeof(*cpi->mb_activity_map), cm->mb_rows * cm->mb_cols));
1146 /* allocate memory for storing last frame's MVs for MV prediction. */
1147 vpx_free(cpi->lfmv);
1148 CHECK_MEM_ERROR(cpi->lfmv, vpx_calloc((cm->mb_rows + 2) * (cm->mb_cols + 2),
1149 sizeof(*cpi->lfmv)));
1150 vpx_free(cpi->lf_ref_frame_sign_bias);
1151 CHECK_MEM_ERROR(cpi->lf_ref_frame_sign_bias,
1152 vpx_calloc((cm->mb_rows + 2) * (cm->mb_cols + 2),
1153 sizeof(*cpi->lf_ref_frame_sign_bias)));
1154 vpx_free(cpi->lf_ref_frame);
1155 CHECK_MEM_ERROR(cpi->lf_ref_frame,
1156 vpx_calloc((cm->mb_rows + 2) * (cm->mb_cols + 2),
1157 sizeof(*cpi->lf_ref_frame)));
1159 /* Create the encoder segmentation map and set all entries to 0 */
1160 vpx_free(cpi->segmentation_map);
1162 cpi->segmentation_map,
1163 vpx_calloc(cm->mb_rows * cm->mb_cols, sizeof(*cpi->segmentation_map)));
1164 cpi->cyclic_refresh_mode_index = 0;
1165 vpx_free(cpi->active_map);
1166 CHECK_MEM_ERROR(cpi->active_map, vpx_calloc(cm->mb_rows * cm->mb_cols,
1167 sizeof(*cpi->active_map)));
1168 memset(cpi->active_map, 1, (cm->mb_rows * cm->mb_cols));
1170 #if CONFIG_MULTITHREAD
1172 cpi->mt_sync_range = 1;
1173 } else if (width <= 1280) {
1174 cpi->mt_sync_range = 4;
1175 } else if (width <= 2560) {
1176 cpi->mt_sync_range = 8;
1178 cpi->mt_sync_range = 16;
1181 if (cpi->oxcf.multi_threaded > 1) {
1184 /* De-allocate and re-allocate mutex */
1185 if (cpi->pmutex != NULL) {
1186 for (i = 0; i < prev_mb_rows; ++i) {
1187 pthread_mutex_destroy(&cpi->pmutex[i]);
1189 vpx_free(cpi->pmutex);
1193 CHECK_MEM_ERROR(cpi->pmutex,
1194 vpx_malloc(sizeof(*cpi->pmutex) * cm->mb_rows));
1196 for (i = 0; i < cm->mb_rows; ++i) {
1197 pthread_mutex_init(&cpi->pmutex[i], NULL);
1201 vpx_free(cpi->mt_current_mb_col);
1202 CHECK_MEM_ERROR(cpi->mt_current_mb_col,
1203 vpx_malloc(sizeof(*cpi->mt_current_mb_col) * cm->mb_rows));
1208 vpx_free(cpi->tplist);
1209 CHECK_MEM_ERROR(cpi->tplist, vpx_malloc(sizeof(TOKENLIST) * cm->mb_rows));
1211 #if CONFIG_TEMPORAL_DENOISING
1212 if (cpi->oxcf.noise_sensitivity > 0) {
1213 vp8_denoiser_free(&cpi->denoiser);
1214 if (vp8_denoiser_allocate(&cpi->denoiser, width, height, cm->mb_rows,
1215 cm->mb_cols, cpi->oxcf.noise_sensitivity)) {
1216 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1217 "Failed to allocate denoiser");
1224 static const int q_trans[] = {
1225 0, 1, 2, 3, 4, 5, 7, 8, 9, 10, 12, 13, 15, 17, 18, 19,
1226 20, 21, 23, 24, 25, 26, 27, 28, 29, 30, 31, 33, 35, 37, 39, 41,
1227 43, 45, 47, 49, 51, 53, 55, 57, 59, 61, 64, 67, 70, 73, 76, 79,
1228 82, 85, 88, 91, 94, 97, 100, 103, 106, 109, 112, 115, 118, 121, 124, 127,
1231 int vp8_reverse_trans(int x) {
1234 for (i = 0; i < 64; ++i) {
1235 if (q_trans[i] >= x) return i;
1240 void vp8_new_framerate(VP8_COMP *cpi, double framerate) {
1241 if (framerate < .1) framerate = 30;
1243 cpi->framerate = framerate;
1244 cpi->output_framerate = framerate;
1245 cpi->per_frame_bandwidth =
1246 (int)(cpi->oxcf.target_bandwidth / cpi->output_framerate);
1247 cpi->av_per_frame_bandwidth = cpi->per_frame_bandwidth;
1248 cpi->min_frame_bandwidth = (int)(cpi->av_per_frame_bandwidth *
1249 cpi->oxcf.two_pass_vbrmin_section / 100);
1251 /* Set Maximum gf/arf interval */
1252 cpi->max_gf_interval = ((int)(cpi->output_framerate / 2.0) + 2);
1254 if (cpi->max_gf_interval < 12) cpi->max_gf_interval = 12;
1256 /* Extended interval for genuinely static scenes */
1257 cpi->twopass.static_scene_max_gf_interval = cpi->key_frame_frequency >> 1;
1259 /* Special conditions when altr ref frame enabled in lagged compress mode */
1260 if (cpi->oxcf.play_alternate && cpi->oxcf.lag_in_frames) {
1261 if (cpi->max_gf_interval > cpi->oxcf.lag_in_frames - 1) {
1262 cpi->max_gf_interval = cpi->oxcf.lag_in_frames - 1;
1265 if (cpi->twopass.static_scene_max_gf_interval >
1266 cpi->oxcf.lag_in_frames - 1) {
1267 cpi->twopass.static_scene_max_gf_interval = cpi->oxcf.lag_in_frames - 1;
1271 if (cpi->max_gf_interval > cpi->twopass.static_scene_max_gf_interval) {
1272 cpi->max_gf_interval = cpi->twopass.static_scene_max_gf_interval;
1276 static void init_config(VP8_COMP *cpi, VP8_CONFIG *oxcf) {
1277 VP8_COMMON *cm = &cpi->common;
1282 cpi->auto_adjust_gold_quantizer = 1;
1284 cm->version = oxcf->Version;
1285 vp8_setup_version(cm);
1287 /* Frame rate is not available on the first frame, as it's derived from
1288 * the observed timestamps. The actual value used here doesn't matter
1289 * too much, as it will adapt quickly.
1291 if (oxcf->timebase.num > 0) {
1293 (double)(oxcf->timebase.den) / (double)(oxcf->timebase.num);
1295 cpi->framerate = 30;
1298 /* If the reciprocal of the timebase seems like a reasonable framerate,
1299 * then use that as a guess, otherwise use 30.
1301 if (cpi->framerate > 180) cpi->framerate = 30;
1303 cpi->ref_framerate = cpi->framerate;
1305 cpi->ref_frame_flags = VP8_ALTR_FRAME | VP8_GOLD_FRAME | VP8_LAST_FRAME;
1307 cm->refresh_golden_frame = 0;
1308 cm->refresh_last_frame = 1;
1309 cm->refresh_entropy_probs = 1;
1311 /* change includes all joint functionality */
1312 vp8_change_config(cpi, oxcf);
1314 /* Initialize active best and worst q and average q values. */
1315 cpi->active_worst_quality = cpi->oxcf.worst_allowed_q;
1316 cpi->active_best_quality = cpi->oxcf.best_allowed_q;
1317 cpi->avg_frame_qindex = cpi->oxcf.worst_allowed_q;
1319 /* Initialise the starting buffer levels */
1320 cpi->buffer_level = cpi->oxcf.starting_buffer_level;
1321 cpi->bits_off_target = cpi->oxcf.starting_buffer_level;
1323 cpi->rolling_target_bits = cpi->av_per_frame_bandwidth;
1324 cpi->rolling_actual_bits = cpi->av_per_frame_bandwidth;
1325 cpi->long_rolling_target_bits = cpi->av_per_frame_bandwidth;
1326 cpi->long_rolling_actual_bits = cpi->av_per_frame_bandwidth;
1328 cpi->total_actual_bits = 0;
1329 cpi->total_target_vs_actual = 0;
1331 /* Temporal scalabilty */
1332 if (cpi->oxcf.number_of_layers > 1) {
1334 double prev_layer_framerate = 0;
1336 for (i = 0; i < cpi->oxcf.number_of_layers; ++i) {
1337 init_temporal_layer_context(cpi, oxcf, i, prev_layer_framerate);
1338 prev_layer_framerate =
1339 cpi->output_framerate / cpi->oxcf.rate_decimator[i];
1343 #if VP8_TEMPORAL_ALT_REF
1347 cpi->fixed_divide[0] = 0;
1349 for (i = 1; i < 512; ++i) cpi->fixed_divide[i] = 0x80000 / i;
1354 static void update_layer_contexts(VP8_COMP *cpi) {
1355 VP8_CONFIG *oxcf = &cpi->oxcf;
1357 /* Update snapshots of the layer contexts to reflect new parameters */
1358 if (oxcf->number_of_layers > 1) {
1360 double prev_layer_framerate = 0;
1362 assert(oxcf->number_of_layers <= VPX_TS_MAX_LAYERS);
1363 for (i = 0; i < oxcf->number_of_layers && i < VPX_TS_MAX_LAYERS; ++i) {
1364 LAYER_CONTEXT *lc = &cpi->layer_context[i];
1366 lc->framerate = cpi->ref_framerate / oxcf->rate_decimator[i];
1367 lc->target_bandwidth = oxcf->target_bitrate[i] * 1000;
1369 lc->starting_buffer_level = rescale(
1370 (int)oxcf->starting_buffer_level_in_ms, lc->target_bandwidth, 1000);
1372 if (oxcf->optimal_buffer_level == 0) {
1373 lc->optimal_buffer_level = lc->target_bandwidth / 8;
1375 lc->optimal_buffer_level = rescale(
1376 (int)oxcf->optimal_buffer_level_in_ms, lc->target_bandwidth, 1000);
1379 if (oxcf->maximum_buffer_size == 0) {
1380 lc->maximum_buffer_size = lc->target_bandwidth / 8;
1382 lc->maximum_buffer_size = rescale((int)oxcf->maximum_buffer_size_in_ms,
1383 lc->target_bandwidth, 1000);
1386 /* Work out the average size of a frame within this layer */
1388 lc->avg_frame_size_for_layer =
1389 (int)((oxcf->target_bitrate[i] - oxcf->target_bitrate[i - 1]) *
1390 1000 / (lc->framerate - prev_layer_framerate));
1393 prev_layer_framerate = lc->framerate;
1398 void vp8_change_config(VP8_COMP *cpi, VP8_CONFIG *oxcf) {
1399 VP8_COMMON *cm = &cpi->common;
1401 unsigned int prev_number_of_layers;
1407 if (cm->version != oxcf->Version) {
1408 cm->version = oxcf->Version;
1409 vp8_setup_version(cm);
1412 last_w = cpi->oxcf.Width;
1413 last_h = cpi->oxcf.Height;
1414 prev_number_of_layers = cpi->oxcf.number_of_layers;
1418 switch (cpi->oxcf.Mode) {
1421 cpi->compressor_speed = 2;
1423 if (cpi->oxcf.cpu_used < -16) {
1424 cpi->oxcf.cpu_used = -16;
1427 if (cpi->oxcf.cpu_used > 16) cpi->oxcf.cpu_used = 16;
1431 case MODE_GOODQUALITY:
1433 cpi->compressor_speed = 1;
1435 if (cpi->oxcf.cpu_used < -5) {
1436 cpi->oxcf.cpu_used = -5;
1439 if (cpi->oxcf.cpu_used > 5) cpi->oxcf.cpu_used = 5;
1443 case MODE_BESTQUALITY:
1445 cpi->compressor_speed = 0;
1448 case MODE_FIRSTPASS:
1450 cpi->compressor_speed = 1;
1452 case MODE_SECONDPASS:
1454 cpi->compressor_speed = 1;
1456 if (cpi->oxcf.cpu_used < -5) {
1457 cpi->oxcf.cpu_used = -5;
1460 if (cpi->oxcf.cpu_used > 5) cpi->oxcf.cpu_used = 5;
1463 case MODE_SECONDPASS_BEST:
1465 cpi->compressor_speed = 0;
1469 if (cpi->pass == 0) cpi->auto_worst_q = 1;
1471 cpi->oxcf.worst_allowed_q = q_trans[oxcf->worst_allowed_q];
1472 cpi->oxcf.best_allowed_q = q_trans[oxcf->best_allowed_q];
1473 cpi->oxcf.cq_level = q_trans[cpi->oxcf.cq_level];
1475 if (oxcf->fixed_q >= 0) {
1476 if (oxcf->worst_allowed_q < 0) {
1477 cpi->oxcf.fixed_q = q_trans[0];
1479 cpi->oxcf.fixed_q = q_trans[oxcf->worst_allowed_q];
1482 if (oxcf->alt_q < 0) {
1483 cpi->oxcf.alt_q = q_trans[0];
1485 cpi->oxcf.alt_q = q_trans[oxcf->alt_q];
1488 if (oxcf->key_q < 0) {
1489 cpi->oxcf.key_q = q_trans[0];
1491 cpi->oxcf.key_q = q_trans[oxcf->key_q];
1494 if (oxcf->gold_q < 0) {
1495 cpi->oxcf.gold_q = q_trans[0];
1497 cpi->oxcf.gold_q = q_trans[oxcf->gold_q];
1501 cpi->baseline_gf_interval =
1502 cpi->oxcf.alt_freq ? cpi->oxcf.alt_freq : DEFAULT_GF_INTERVAL;
1504 // GF behavior for 1 pass CBR, used when error_resilience is off.
1505 if (!cpi->oxcf.error_resilient_mode &&
1506 cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER &&
1507 cpi->oxcf.Mode == MODE_REALTIME)
1508 cpi->baseline_gf_interval = cpi->gf_interval_onepass_cbr;
1510 #if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
1511 cpi->oxcf.token_partitions = 3;
1514 if (cpi->oxcf.token_partitions >= 0 && cpi->oxcf.token_partitions <= 3) {
1515 cm->multi_token_partition = (TOKEN_PARTITION)cpi->oxcf.token_partitions;
1518 setup_features(cpi);
1523 for (i = 0; i < MAX_MB_SEGMENTS; ++i) {
1524 cpi->segment_encode_breakout[i] = cpi->oxcf.encode_breakout;
1528 /* At the moment the first order values may not be > MAXQ */
1529 if (cpi->oxcf.fixed_q > MAXQ) cpi->oxcf.fixed_q = MAXQ;
1531 /* local file playback mode == really big buffer */
1532 if (cpi->oxcf.end_usage == USAGE_LOCAL_FILE_PLAYBACK) {
1533 cpi->oxcf.starting_buffer_level = 60000;
1534 cpi->oxcf.optimal_buffer_level = 60000;
1535 cpi->oxcf.maximum_buffer_size = 240000;
1536 cpi->oxcf.starting_buffer_level_in_ms = 60000;
1537 cpi->oxcf.optimal_buffer_level_in_ms = 60000;
1538 cpi->oxcf.maximum_buffer_size_in_ms = 240000;
1541 /* Convert target bandwidth from Kbit/s to Bit/s */
1542 cpi->oxcf.target_bandwidth *= 1000;
1544 cpi->oxcf.starting_buffer_level = rescale(
1545 (int)cpi->oxcf.starting_buffer_level, cpi->oxcf.target_bandwidth, 1000);
1547 /* Set or reset optimal and maximum buffer levels. */
1548 if (cpi->oxcf.optimal_buffer_level == 0) {
1549 cpi->oxcf.optimal_buffer_level = cpi->oxcf.target_bandwidth / 8;
1551 cpi->oxcf.optimal_buffer_level = rescale(
1552 (int)cpi->oxcf.optimal_buffer_level, cpi->oxcf.target_bandwidth, 1000);
1555 if (cpi->oxcf.maximum_buffer_size == 0) {
1556 cpi->oxcf.maximum_buffer_size = cpi->oxcf.target_bandwidth / 8;
1558 cpi->oxcf.maximum_buffer_size = rescale((int)cpi->oxcf.maximum_buffer_size,
1559 cpi->oxcf.target_bandwidth, 1000);
1561 // Under a configuration change, where maximum_buffer_size may change,
1562 // keep buffer level clipped to the maximum allowed buffer size.
1563 if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size) {
1564 cpi->bits_off_target = cpi->oxcf.maximum_buffer_size;
1565 cpi->buffer_level = cpi->bits_off_target;
1568 /* Set up frame rate and related parameters rate control values. */
1569 vp8_new_framerate(cpi, cpi->framerate);
1571 /* Set absolute upper and lower quality limits */
1572 cpi->worst_quality = cpi->oxcf.worst_allowed_q;
1573 cpi->best_quality = cpi->oxcf.best_allowed_q;
1575 /* active values should only be modified if out of new range */
1576 if (cpi->active_worst_quality > cpi->oxcf.worst_allowed_q) {
1577 cpi->active_worst_quality = cpi->oxcf.worst_allowed_q;
1580 else if (cpi->active_worst_quality < cpi->oxcf.best_allowed_q) {
1581 cpi->active_worst_quality = cpi->oxcf.best_allowed_q;
1583 if (cpi->active_best_quality < cpi->oxcf.best_allowed_q) {
1584 cpi->active_best_quality = cpi->oxcf.best_allowed_q;
1587 else if (cpi->active_best_quality > cpi->oxcf.worst_allowed_q) {
1588 cpi->active_best_quality = cpi->oxcf.worst_allowed_q;
1591 cpi->buffered_mode = cpi->oxcf.optimal_buffer_level > 0;
1593 cpi->cq_target_quality = cpi->oxcf.cq_level;
1595 /* Only allow dropped frames in buffered mode */
1596 cpi->drop_frames_allowed = cpi->oxcf.allow_df && cpi->buffered_mode;
1598 cpi->target_bandwidth = cpi->oxcf.target_bandwidth;
1600 // Check if the number of temporal layers has changed, and if so reset the
1601 // pattern counter and set/initialize the temporal layer context for the
1602 // new layer configuration.
1603 if (cpi->oxcf.number_of_layers != prev_number_of_layers) {
1604 // If the number of temporal layers are changed we must start at the
1605 // base of the pattern cycle, so set the layer id to 0 and reset
1606 // the temporal pattern counter.
1607 if (cpi->temporal_layer_id > 0) {
1608 cpi->temporal_layer_id = 0;
1610 cpi->temporal_pattern_counter = 0;
1611 reset_temporal_layer_change(cpi, oxcf, prev_number_of_layers);
1614 if (!cpi->initial_width) {
1615 cpi->initial_width = cpi->oxcf.Width;
1616 cpi->initial_height = cpi->oxcf.Height;
1619 cm->Width = cpi->oxcf.Width;
1620 cm->Height = cpi->oxcf.Height;
1621 assert(cm->Width <= cpi->initial_width);
1622 assert(cm->Height <= cpi->initial_height);
1624 /* TODO(jkoleszar): if an internal spatial resampling is active,
1625 * and we downsize the input image, maybe we should clear the
1626 * internal scale immediately rather than waiting for it to
1630 /* VP8 sharpness level mapping 0-7 (vs 0-10 in general VPx dialogs) */
1631 if (cpi->oxcf.Sharpness > 7) cpi->oxcf.Sharpness = 7;
1633 cm->sharpness_level = cpi->oxcf.Sharpness;
1635 if (cm->horiz_scale != NORMAL || cm->vert_scale != NORMAL) {
1638 Scale2Ratio(cm->horiz_scale, &hr, &hs);
1639 Scale2Ratio(cm->vert_scale, &vr, &vs);
1641 /* always go to the next whole number */
1642 cm->Width = (hs - 1 + cpi->oxcf.Width * hr) / hs;
1643 cm->Height = (vs - 1 + cpi->oxcf.Height * vr) / vs;
1646 if (last_w != cpi->oxcf.Width || last_h != cpi->oxcf.Height) {
1647 cpi->force_next_frame_intra = 1;
1650 if (((cm->Width + 15) & ~15) != cm->yv12_fb[cm->lst_fb_idx].y_width ||
1651 ((cm->Height + 15) & ~15) != cm->yv12_fb[cm->lst_fb_idx].y_height ||
1652 cm->yv12_fb[cm->lst_fb_idx].y_width == 0) {
1653 dealloc_raw_frame_buffers(cpi);
1654 alloc_raw_frame_buffers(cpi);
1655 vp8_alloc_compressor_data(cpi);
1658 if (cpi->oxcf.fixed_q >= 0) {
1659 cpi->last_q[0] = cpi->oxcf.fixed_q;
1660 cpi->last_q[1] = cpi->oxcf.fixed_q;
1663 cpi->Speed = cpi->oxcf.cpu_used;
1665 /* force to allowlag to 0 if lag_in_frames is 0; */
1666 if (cpi->oxcf.lag_in_frames == 0) {
1667 cpi->oxcf.allow_lag = 0;
1669 /* Limit on lag buffers as these are not currently dynamically allocated */
1670 else if (cpi->oxcf.lag_in_frames > MAX_LAG_BUFFERS) {
1671 cpi->oxcf.lag_in_frames = MAX_LAG_BUFFERS;
1675 cpi->alt_ref_source = NULL;
1676 cpi->is_src_frame_alt_ref = 0;
1678 #if CONFIG_TEMPORAL_DENOISING
1679 if (cpi->oxcf.noise_sensitivity) {
1680 if (!cpi->denoiser.yv12_mc_running_avg.buffer_alloc) {
1681 int width = (cpi->oxcf.Width + 15) & ~15;
1682 int height = (cpi->oxcf.Height + 15) & ~15;
1683 if (vp8_denoiser_allocate(&cpi->denoiser, width, height, cm->mb_rows,
1684 cm->mb_cols, cpi->oxcf.noise_sensitivity)) {
1685 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1686 "Failed to allocate denoiser");
1693 /* Experimental RD Code */
1694 cpi->frame_distortion = 0;
1695 cpi->last_frame_distortion = 0;
1700 #define M_LOG2_E 0.693147180559945309417
1702 #define log2f(x) (log(x) / (float)M_LOG2_E)
1704 static void cal_mvsadcosts(int *mvsadcost[2]) {
1707 mvsadcost[0][0] = 300;
1708 mvsadcost[1][0] = 300;
1711 double z = 256 * (2 * (log2f(8 * i) + .6));
1712 mvsadcost[0][i] = (int)z;
1713 mvsadcost[1][i] = (int)z;
1714 mvsadcost[0][-i] = (int)z;
1715 mvsadcost[1][-i] = (int)z;
1716 } while (++i <= mvfp_max);
1719 struct VP8_COMP *vp8_create_compressor(VP8_CONFIG *oxcf) {
1725 cpi = vpx_memalign(32, sizeof(VP8_COMP));
1726 /* Check that the CPI instance is valid */
1731 memset(cpi, 0, sizeof(VP8_COMP));
1733 if (setjmp(cm->error.jmp)) {
1734 cpi->common.error.setjmp = 0;
1735 vp8_remove_compressor(&cpi);
1739 cpi->common.error.setjmp = 1;
1741 CHECK_MEM_ERROR(cpi->mb.ss, vpx_calloc(sizeof(search_site),
1742 (MAX_MVSEARCH_STEPS * 8) + 1));
1744 vp8_create_common(&cpi->common);
1746 init_config(cpi, oxcf);
1748 memcpy(cpi->base_skip_false_prob, vp8cx_base_skip_false_prob,
1749 sizeof(vp8cx_base_skip_false_prob));
1750 cpi->common.current_video_frame = 0;
1751 cpi->temporal_pattern_counter = 0;
1752 cpi->temporal_layer_id = -1;
1753 cpi->kf_overspend_bits = 0;
1754 cpi->kf_bitrate_adjustment = 0;
1755 cpi->frames_till_gf_update_due = 0;
1756 cpi->gf_overspend_bits = 0;
1757 cpi->non_gf_bitrate_adjustment = 0;
1758 cpi->prob_last_coded = 128;
1759 cpi->prob_gf_coded = 128;
1760 cpi->prob_intra_coded = 63;
1762 /* Prime the recent reference frame usage counters.
1763 * Hereafter they will be maintained as a sort of moving average
1765 cpi->recent_ref_frame_usage[INTRA_FRAME] = 1;
1766 cpi->recent_ref_frame_usage[LAST_FRAME] = 1;
1767 cpi->recent_ref_frame_usage[GOLDEN_FRAME] = 1;
1768 cpi->recent_ref_frame_usage[ALTREF_FRAME] = 1;
1770 /* Set reference frame sign bias for ALTREF frame to 1 (for now) */
1771 cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 1;
1773 cpi->twopass.gf_decay_rate = 0;
1774 cpi->baseline_gf_interval = DEFAULT_GF_INTERVAL;
1776 cpi->gold_is_last = 0;
1777 cpi->alt_is_last = 0;
1778 cpi->gold_is_alt = 0;
1780 cpi->active_map_enabled = 0;
1783 /* Experimental code for lagged and one pass */
1784 /* Initialise one_pass GF frames stats */
1785 /* Update stats used for GF selection */
1788 cpi->one_pass_frame_index = 0;
1790 for (i = 0; i < MAX_LAG_BUFFERS; ++i)
1792 cpi->one_pass_frame_stats[i].frames_so_far = 0;
1793 cpi->one_pass_frame_stats[i].frame_intra_error = 0.0;
1794 cpi->one_pass_frame_stats[i].frame_coded_error = 0.0;
1795 cpi->one_pass_frame_stats[i].frame_pcnt_inter = 0.0;
1796 cpi->one_pass_frame_stats[i].frame_pcnt_motion = 0.0;
1797 cpi->one_pass_frame_stats[i].frame_mvr = 0.0;
1798 cpi->one_pass_frame_stats[i].frame_mvr_abs = 0.0;
1799 cpi->one_pass_frame_stats[i].frame_mvc = 0.0;
1800 cpi->one_pass_frame_stats[i].frame_mvc_abs = 0.0;
1805 cpi->mse_source_denoised = 0;
1807 /* Should we use the cyclic refresh method.
1808 * Currently there is no external control for this.
1809 * Enable it for error_resilient_mode, or for 1 pass CBR mode.
1811 cpi->cyclic_refresh_mode_enabled =
1812 (cpi->oxcf.error_resilient_mode ||
1813 (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER &&
1814 cpi->oxcf.Mode <= 2));
1815 cpi->cyclic_refresh_mode_max_mbs_perframe =
1816 (cpi->common.mb_rows * cpi->common.mb_cols) / 7;
1817 if (cpi->oxcf.number_of_layers == 1) {
1818 cpi->cyclic_refresh_mode_max_mbs_perframe =
1819 (cpi->common.mb_rows * cpi->common.mb_cols) / 20;
1820 } else if (cpi->oxcf.number_of_layers == 2) {
1821 cpi->cyclic_refresh_mode_max_mbs_perframe =
1822 (cpi->common.mb_rows * cpi->common.mb_cols) / 10;
1824 cpi->cyclic_refresh_mode_index = 0;
1825 cpi->cyclic_refresh_q = 32;
1827 // GF behavior for 1 pass CBR, used when error_resilience is off.
1828 cpi->gf_update_onepass_cbr = 0;
1829 cpi->gf_noboost_onepass_cbr = 0;
1830 if (!cpi->oxcf.error_resilient_mode &&
1831 cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER && cpi->oxcf.Mode <= 2) {
1832 cpi->gf_update_onepass_cbr = 1;
1833 cpi->gf_noboost_onepass_cbr = 1;
1834 cpi->gf_interval_onepass_cbr =
1835 cpi->cyclic_refresh_mode_max_mbs_perframe > 0
1836 ? (2 * (cpi->common.mb_rows * cpi->common.mb_cols) /
1837 cpi->cyclic_refresh_mode_max_mbs_perframe)
1839 cpi->gf_interval_onepass_cbr =
1840 VPXMIN(40, VPXMAX(6, cpi->gf_interval_onepass_cbr));
1841 cpi->baseline_gf_interval = cpi->gf_interval_onepass_cbr;
1844 if (cpi->cyclic_refresh_mode_enabled) {
1845 CHECK_MEM_ERROR(cpi->cyclic_refresh_map,
1846 vpx_calloc((cpi->common.mb_rows * cpi->common.mb_cols), 1));
1848 cpi->cyclic_refresh_map = (signed char *)NULL;
1851 CHECK_MEM_ERROR(cpi->consec_zero_last,
1852 vpx_calloc(cm->mb_rows * cm->mb_cols, 1));
1853 CHECK_MEM_ERROR(cpi->consec_zero_last_mvbias,
1854 vpx_calloc((cpi->common.mb_rows * cpi->common.mb_cols), 1));
1856 #ifdef VP8_ENTROPY_STATS
1857 init_context_counters();
1860 /*Initialize the feed-forward activity masking.*/
1861 cpi->activity_avg = 90 << 12;
1863 /* Give a sensible default for the first frame. */
1864 cpi->frames_since_key = 8;
1865 cpi->key_frame_frequency = cpi->oxcf.key_freq;
1866 cpi->this_key_frame_forced = 0;
1867 cpi->next_key_frame_forced = 0;
1869 cpi->source_alt_ref_pending = 0;
1870 cpi->source_alt_ref_active = 0;
1871 cpi->common.refresh_alt_ref_frame = 0;
1873 cpi->force_maxqp = 0;
1875 cpi->b_calculate_psnr = CONFIG_INTERNAL_STATS;
1876 #if CONFIG_INTERNAL_STATS
1877 cpi->b_calculate_ssimg = 0;
1882 if (cpi->b_calculate_psnr) {
1883 cpi->total_sq_error = 0.0;
1884 cpi->total_sq_error2 = 0.0;
1889 cpi->totalp_y = 0.0;
1890 cpi->totalp_u = 0.0;
1891 cpi->totalp_v = 0.0;
1893 cpi->tot_recode_hits = 0;
1894 cpi->summed_quality = 0;
1895 cpi->summed_weights = 0;
1900 cpi->first_time_stamp_ever = 0x7FFFFFFF;
1902 cpi->frames_till_gf_update_due = 0;
1903 cpi->key_frame_count = 1;
1905 cpi->ni_av_qi = cpi->oxcf.worst_allowed_q;
1908 cpi->total_byte_count = 0;
1910 cpi->drop_frame = 0;
1912 cpi->rate_correction_factor = 1.0;
1913 cpi->key_frame_rate_correction_factor = 1.0;
1914 cpi->gf_rate_correction_factor = 1.0;
1915 cpi->twopass.est_max_qcorrection_factor = 1.0;
1917 for (i = 0; i < KEY_FRAME_CONTEXT; ++i) {
1918 cpi->prior_key_frame_distance[i] = (int)cpi->output_framerate;
1921 #ifdef OUTPUT_YUV_SRC
1922 yuv_file = fopen("bd.yuv", "ab");
1924 #ifdef OUTPUT_YUV_DENOISED
1925 yuv_denoised_file = fopen("denoised.yuv", "ab");
1929 framepsnr = fopen("framepsnr.stt", "a");
1930 kf_list = fopen("kf_list.stt", "w");
1933 cpi->output_pkt_list = oxcf->output_pkt_list;
1935 #if !CONFIG_REALTIME_ONLY
1937 if (cpi->pass == 1) {
1938 vp8_init_first_pass(cpi);
1939 } else if (cpi->pass == 2) {
1940 size_t packet_sz = sizeof(FIRSTPASS_STATS);
1941 int packets = (int)(oxcf->two_pass_stats_in.sz / packet_sz);
1943 cpi->twopass.stats_in_start = oxcf->two_pass_stats_in.buf;
1944 cpi->twopass.stats_in = cpi->twopass.stats_in_start;
1945 cpi->twopass.stats_in_end =
1946 (void *)((char *)cpi->twopass.stats_in + (packets - 1) * packet_sz);
1947 vp8_init_second_pass(cpi);
1952 if (cpi->compressor_speed == 2) {
1953 cpi->avg_encode_time = 0;
1954 cpi->avg_pick_mode_time = 0;
1957 vp8_set_speed_features(cpi);
1959 /* Set starting values of RD threshold multipliers (128 = *1) */
1960 for (i = 0; i < MAX_MODES; ++i) {
1961 cpi->mb.rd_thresh_mult[i] = 128;
1964 #ifdef VP8_ENTROPY_STATS
1965 init_mv_ref_counts();
1968 #if CONFIG_MULTITHREAD
1969 if (vp8cx_create_encoder_threads(cpi)) {
1970 vp8_remove_compressor(&cpi);
1975 cpi->fn_ptr[BLOCK_16X16].sdf = vpx_sad16x16;
1976 cpi->fn_ptr[BLOCK_16X16].vf = vpx_variance16x16;
1977 cpi->fn_ptr[BLOCK_16X16].svf = vpx_sub_pixel_variance16x16;
1978 cpi->fn_ptr[BLOCK_16X16].sdx3f = vpx_sad16x16x3;
1979 cpi->fn_ptr[BLOCK_16X16].sdx8f = vpx_sad16x16x8;
1980 cpi->fn_ptr[BLOCK_16X16].sdx4df = vpx_sad16x16x4d;
1982 cpi->fn_ptr[BLOCK_16X8].sdf = vpx_sad16x8;
1983 cpi->fn_ptr[BLOCK_16X8].vf = vpx_variance16x8;
1984 cpi->fn_ptr[BLOCK_16X8].svf = vpx_sub_pixel_variance16x8;
1985 cpi->fn_ptr[BLOCK_16X8].sdx3f = vpx_sad16x8x3;
1986 cpi->fn_ptr[BLOCK_16X8].sdx8f = vpx_sad16x8x8;
1987 cpi->fn_ptr[BLOCK_16X8].sdx4df = vpx_sad16x8x4d;
1989 cpi->fn_ptr[BLOCK_8X16].sdf = vpx_sad8x16;
1990 cpi->fn_ptr[BLOCK_8X16].vf = vpx_variance8x16;
1991 cpi->fn_ptr[BLOCK_8X16].svf = vpx_sub_pixel_variance8x16;
1992 cpi->fn_ptr[BLOCK_8X16].sdx3f = vpx_sad8x16x3;
1993 cpi->fn_ptr[BLOCK_8X16].sdx8f = vpx_sad8x16x8;
1994 cpi->fn_ptr[BLOCK_8X16].sdx4df = vpx_sad8x16x4d;
1996 cpi->fn_ptr[BLOCK_8X8].sdf = vpx_sad8x8;
1997 cpi->fn_ptr[BLOCK_8X8].vf = vpx_variance8x8;
1998 cpi->fn_ptr[BLOCK_8X8].svf = vpx_sub_pixel_variance8x8;
1999 cpi->fn_ptr[BLOCK_8X8].sdx3f = vpx_sad8x8x3;
2000 cpi->fn_ptr[BLOCK_8X8].sdx8f = vpx_sad8x8x8;
2001 cpi->fn_ptr[BLOCK_8X8].sdx4df = vpx_sad8x8x4d;
2003 cpi->fn_ptr[BLOCK_4X4].sdf = vpx_sad4x4;
2004 cpi->fn_ptr[BLOCK_4X4].vf = vpx_variance4x4;
2005 cpi->fn_ptr[BLOCK_4X4].svf = vpx_sub_pixel_variance4x4;
2006 cpi->fn_ptr[BLOCK_4X4].sdx3f = vpx_sad4x4x3;
2007 cpi->fn_ptr[BLOCK_4X4].sdx8f = vpx_sad4x4x8;
2008 cpi->fn_ptr[BLOCK_4X4].sdx4df = vpx_sad4x4x4d;
2010 #if ARCH_X86 || ARCH_X86_64
2011 cpi->fn_ptr[BLOCK_16X16].copymem = vp8_copy32xn;
2012 cpi->fn_ptr[BLOCK_16X8].copymem = vp8_copy32xn;
2013 cpi->fn_ptr[BLOCK_8X16].copymem = vp8_copy32xn;
2014 cpi->fn_ptr[BLOCK_8X8].copymem = vp8_copy32xn;
2015 cpi->fn_ptr[BLOCK_4X4].copymem = vp8_copy32xn;
2018 cpi->full_search_sad = vp8_full_search_sad;
2019 cpi->diamond_search_sad = vp8_diamond_search_sad;
2020 cpi->refining_search_sad = vp8_refining_search_sad;
2022 /* make sure frame 1 is okay */
2023 cpi->mb.error_bins[0] = cpi->common.MBs;
2025 /* vp8cx_init_quantizer() is first called here. Add check in
2026 * vp8cx_frame_init_quantizer() so that vp8cx_init_quantizer is only
2027 * called later when needed. This will avoid unnecessary calls of
2028 * vp8cx_init_quantizer() for every frame.
2030 vp8cx_init_quantizer(cpi);
2032 vp8_loop_filter_init(cm);
2034 cpi->common.error.setjmp = 0;
2036 #if CONFIG_MULTI_RES_ENCODING
2038 /* Calculate # of MBs in a row in lower-resolution level image. */
2039 if (cpi->oxcf.mr_encoder_id > 0) vp8_cal_low_res_mb_cols(cpi);
2043 /* setup RD costs to MACROBLOCK struct */
2045 cpi->mb.mvcost[0] = &cpi->rd_costs.mvcosts[0][mv_max + 1];
2046 cpi->mb.mvcost[1] = &cpi->rd_costs.mvcosts[1][mv_max + 1];
2047 cpi->mb.mvsadcost[0] = &cpi->rd_costs.mvsadcosts[0][mvfp_max + 1];
2048 cpi->mb.mvsadcost[1] = &cpi->rd_costs.mvsadcosts[1][mvfp_max + 1];
2050 cal_mvsadcosts(cpi->mb.mvsadcost);
2052 cpi->mb.mbmode_cost = cpi->rd_costs.mbmode_cost;
2053 cpi->mb.intra_uv_mode_cost = cpi->rd_costs.intra_uv_mode_cost;
2054 cpi->mb.bmode_costs = cpi->rd_costs.bmode_costs;
2055 cpi->mb.inter_bmode_costs = cpi->rd_costs.inter_bmode_costs;
2056 cpi->mb.token_costs = cpi->rd_costs.token_costs;
2058 /* setup block ptrs & offsets */
2059 vp8_setup_block_ptrs(&cpi->mb);
2060 vp8_setup_block_dptrs(&cpi->mb.e_mbd);
2065 void vp8_remove_compressor(VP8_COMP **ptr) {
2066 VP8_COMP *cpi = *ptr;
2070 if (cpi && (cpi->common.current_video_frame > 0)) {
2071 #if !CONFIG_REALTIME_ONLY
2073 if (cpi->pass == 2) {
2074 vp8_end_second_pass(cpi);
2079 #ifdef VP8_ENTROPY_STATS
2080 print_context_counters();
2081 print_tree_update_probs();
2082 print_mode_context();
2085 #if CONFIG_INTERNAL_STATS
2087 if (cpi->pass != 1) {
2088 FILE *f = fopen("opsnr.stt", "a");
2089 double time_encoded =
2090 (cpi->last_end_time_stamp_seen - cpi->first_time_stamp_ever) /
2092 double total_encode_time =
2093 (cpi->time_receive_data + cpi->time_compress_data) / 1000.000;
2094 double dr = (double)cpi->bytes * 8.0 / 1000.0 / time_encoded;
2095 const double target_rate = (double)cpi->oxcf.target_bandwidth / 1000;
2096 const double rate_err = ((100.0 * (dr - target_rate)) / target_rate);
2098 if (cpi->b_calculate_psnr) {
2099 if (cpi->oxcf.number_of_layers > 1) {
2103 "Layer\tBitrate\tAVGPsnr\tGLBPsnr\tAVPsnrP\t"
2104 "GLPsnrP\tVPXSSIM\n");
2105 for (i = 0; i < (int)cpi->oxcf.number_of_layers; ++i) {
2107 (double)cpi->bytes_in_layer[i] * 8.0 / 1000.0 / time_encoded;
2108 double samples = 3.0 / 2 * cpi->frames_in_layer[i] *
2109 cpi->common.Width * cpi->common.Height;
2111 vpx_sse_to_psnr(samples, 255.0, cpi->total_error2[i]);
2112 double total_psnr2 =
2113 vpx_sse_to_psnr(samples, 255.0, cpi->total_error2_p[i]);
2115 100 * pow(cpi->sum_ssim[i] / cpi->sum_weights[i], 8.0);
2118 "%5d\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t"
2120 i, dr, cpi->sum_psnr[i] / cpi->frames_in_layer[i],
2121 total_psnr, cpi->sum_psnr_p[i] / cpi->frames_in_layer[i],
2122 total_psnr2, total_ssim);
2126 3.0 / 2 * cpi->count * cpi->common.Width * cpi->common.Height;
2128 vpx_sse_to_psnr(samples, 255.0, cpi->total_sq_error);
2129 double total_psnr2 =
2130 vpx_sse_to_psnr(samples, 255.0, cpi->total_sq_error2);
2132 100 * pow(cpi->summed_quality / cpi->summed_weights, 8.0);
2135 "Bitrate\tAVGPsnr\tGLBPsnr\tAVPsnrP\t"
2136 "GLPsnrP\tVPXSSIM\n");
2138 "%7.3f\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t"
2140 dr, cpi->total / cpi->count, total_psnr,
2141 cpi->totalp / cpi->count, total_psnr2, total_ssim);
2146 f = fopen("qskip.stt", "a");
2147 fprintf(f, "minq:%d -maxq:%d skiptrue:skipfalse = %d:%d\n", cpi->oxcf.best_allowed_q, cpi->oxcf.worst_allowed_q, skiptruecount, skipfalsecount);
2156 if (cpi->compressor_speed == 2) {
2158 FILE *f = fopen("cxspeed.stt", "a");
2159 cnt_pm /= cpi->common.MBs;
2161 for (i = 0; i < 16; ++i) fprintf(f, "%5d", frames_at_speed[i]);
2171 extern int count_mb_seg[4];
2172 FILE *f = fopen("modes.stt", "a");
2173 double dr = (double)cpi->framerate * (double)bytes * (double)8 /
2174 (double)count / (double)1000;
2175 fprintf(f, "intra_mode in Intra Frames:\n");
2176 fprintf(f, "Y: %8d, %8d, %8d, %8d, %8d\n", y_modes[0], y_modes[1],
2177 y_modes[2], y_modes[3], y_modes[4]);
2178 fprintf(f, "UV:%8d, %8d, %8d, %8d\n", uv_modes[0], uv_modes[1],
2179 uv_modes[2], uv_modes[3]);
2184 for (i = 0; i < 10; ++i) fprintf(f, "%8d, ", b_modes[i]);
2189 fprintf(f, "Modes in Inter Frames:\n");
2190 fprintf(f, "Y: %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d\n",
2191 inter_y_modes[0], inter_y_modes[1], inter_y_modes[2],
2192 inter_y_modes[3], inter_y_modes[4], inter_y_modes[5],
2193 inter_y_modes[6], inter_y_modes[7], inter_y_modes[8],
2195 fprintf(f, "UV:%8d, %8d, %8d, %8d\n", inter_uv_modes[0],
2196 inter_uv_modes[1], inter_uv_modes[2], inter_uv_modes[3]);
2201 for (i = 0; i < 15; ++i) fprintf(f, "%8d, ", inter_b_modes[i]);
2205 fprintf(f, "P:%8d, %8d, %8d, %8d\n", count_mb_seg[0], count_mb_seg[1],
2206 count_mb_seg[2], count_mb_seg[3]);
2207 fprintf(f, "PB:%8d, %8d, %8d, %8d\n", inter_b_modes[LEFT4X4],
2208 inter_b_modes[ABOVE4X4], inter_b_modes[ZERO4X4],
2209 inter_b_modes[NEW4X4]);
2215 #ifdef VP8_ENTROPY_STATS
2218 FILE *fmode = fopen("modecontext.c", "w");
2220 fprintf(fmode, "\n#include \"entropymode.h\"\n\n");
2221 fprintf(fmode, "const unsigned int vp8_kf_default_bmode_counts ");
2223 "[VP8_BINTRAMODES] [VP8_BINTRAMODES] [VP8_BINTRAMODES] =\n{\n");
2225 for (i = 0; i < 10; ++i) {
2226 fprintf(fmode, " { /* Above Mode : %d */\n", i);
2228 for (j = 0; j < 10; ++j) {
2229 fprintf(fmode, " {");
2231 for (k = 0; k < 10; ++k) {
2232 if (!intra_mode_stats[i][j][k])
2233 fprintf(fmode, " %5d, ", 1);
2235 fprintf(fmode, " %5d, ", intra_mode_stats[i][j][k]);
2238 fprintf(fmode, "}, /* left_mode %d */\n", j);
2241 fprintf(fmode, " },\n");
2244 fprintf(fmode, "};\n");
2249 #if defined(SECTIONBITS_OUTPUT)
2253 FILE *f = fopen("tokenbits.stt", "a");
2255 for (i = 0; i < 28; ++i) fprintf(f, "%8d", (int)(Sectionbits[i] / 256));
2265 printf("\n_pick_loop_filter_level:%d\n", cpi->time_pick_lpf / 1000);
2266 printf("\n_frames recive_data encod_mb_row compress_frame Total\n");
2267 printf("%6d %10ld %10ld %10ld %10ld\n", cpi->common.current_video_frame, cpi->time_receive_data / 1000, cpi->time_encode_mb_row / 1000, cpi->time_compress_data / 1000, (cpi->time_receive_data + cpi->time_compress_data) / 1000);
2272 #if CONFIG_MULTITHREAD
2273 vp8cx_remove_encoder_threads(cpi);
2276 #if CONFIG_TEMPORAL_DENOISING
2277 vp8_denoiser_free(&cpi->denoiser);
2279 dealloc_compressor_data(cpi);
2280 vpx_free(cpi->mb.ss);
2282 vpx_free(cpi->cyclic_refresh_map);
2283 vpx_free(cpi->consec_zero_last);
2284 vpx_free(cpi->consec_zero_last_mvbias);
2286 vp8_remove_common(&cpi->common);
2290 #ifdef OUTPUT_YUV_SRC
2293 #ifdef OUTPUT_YUV_DENOISED
2294 fclose(yuv_denoised_file);
2311 static uint64_t calc_plane_error(unsigned char *orig, int orig_stride,
2312 unsigned char *recon, int recon_stride,
2313 unsigned int cols, unsigned int rows) {
2314 unsigned int row, col;
2315 uint64_t total_sse = 0;
2318 for (row = 0; row + 16 <= rows; row += 16) {
2319 for (col = 0; col + 16 <= cols; col += 16) {
2322 vpx_mse16x16(orig + col, orig_stride, recon + col, recon_stride, &sse);
2326 /* Handle odd-sized width */
2328 unsigned int border_row, border_col;
2329 unsigned char *border_orig = orig;
2330 unsigned char *border_recon = recon;
2332 for (border_row = 0; border_row < 16; ++border_row) {
2333 for (border_col = col; border_col < cols; ++border_col) {
2334 diff = border_orig[border_col] - border_recon[border_col];
2335 total_sse += diff * diff;
2338 border_orig += orig_stride;
2339 border_recon += recon_stride;
2343 orig += orig_stride * 16;
2344 recon += recon_stride * 16;
2347 /* Handle odd-sized height */
2348 for (; row < rows; ++row) {
2349 for (col = 0; col < cols; ++col) {
2350 diff = orig[col] - recon[col];
2351 total_sse += diff * diff;
2354 orig += orig_stride;
2355 recon += recon_stride;
2358 vpx_clear_system_state();
2362 static void generate_psnr_packet(VP8_COMP *cpi) {
2363 YV12_BUFFER_CONFIG *orig = cpi->Source;
2364 YV12_BUFFER_CONFIG *recon = cpi->common.frame_to_show;
2365 struct vpx_codec_cx_pkt pkt;
2368 unsigned int width = cpi->common.Width;
2369 unsigned int height = cpi->common.Height;
2371 pkt.kind = VPX_CODEC_PSNR_PKT;
2372 sse = calc_plane_error(orig->y_buffer, orig->y_stride, recon->y_buffer,
2373 recon->y_stride, width, height);
2374 pkt.data.psnr.sse[0] = sse;
2375 pkt.data.psnr.sse[1] = sse;
2376 pkt.data.psnr.samples[0] = width * height;
2377 pkt.data.psnr.samples[1] = width * height;
2379 width = (width + 1) / 2;
2380 height = (height + 1) / 2;
2382 sse = calc_plane_error(orig->u_buffer, orig->uv_stride, recon->u_buffer,
2383 recon->uv_stride, width, height);
2384 pkt.data.psnr.sse[0] += sse;
2385 pkt.data.psnr.sse[2] = sse;
2386 pkt.data.psnr.samples[0] += width * height;
2387 pkt.data.psnr.samples[2] = width * height;
2389 sse = calc_plane_error(orig->v_buffer, orig->uv_stride, recon->v_buffer,
2390 recon->uv_stride, width, height);
2391 pkt.data.psnr.sse[0] += sse;
2392 pkt.data.psnr.sse[3] = sse;
2393 pkt.data.psnr.samples[0] += width * height;
2394 pkt.data.psnr.samples[3] = width * height;
2396 for (i = 0; i < 4; ++i) {
2397 pkt.data.psnr.psnr[i] = vpx_sse_to_psnr(pkt.data.psnr.samples[i], 255.0,
2398 (double)(pkt.data.psnr.sse[i]));
2401 vpx_codec_pkt_list_add(cpi->output_pkt_list, &pkt);
2404 int vp8_use_as_reference(VP8_COMP *cpi, int ref_frame_flags) {
2405 if (ref_frame_flags > 7) return -1;
2407 cpi->ref_frame_flags = ref_frame_flags;
2410 int vp8_update_reference(VP8_COMP *cpi, int ref_frame_flags) {
2411 if (ref_frame_flags > 7) return -1;
2413 cpi->common.refresh_golden_frame = 0;
2414 cpi->common.refresh_alt_ref_frame = 0;
2415 cpi->common.refresh_last_frame = 0;
2417 if (ref_frame_flags & VP8_LAST_FRAME) cpi->common.refresh_last_frame = 1;
2419 if (ref_frame_flags & VP8_GOLD_FRAME) cpi->common.refresh_golden_frame = 1;
2421 if (ref_frame_flags & VP8_ALTR_FRAME) cpi->common.refresh_alt_ref_frame = 1;
2426 int vp8_get_reference(VP8_COMP *cpi, enum vpx_ref_frame_type ref_frame_flag,
2427 YV12_BUFFER_CONFIG *sd) {
2428 VP8_COMMON *cm = &cpi->common;
2431 if (ref_frame_flag == VP8_LAST_FRAME) {
2432 ref_fb_idx = cm->lst_fb_idx;
2433 } else if (ref_frame_flag == VP8_GOLD_FRAME) {
2434 ref_fb_idx = cm->gld_fb_idx;
2435 } else if (ref_frame_flag == VP8_ALTR_FRAME) {
2436 ref_fb_idx = cm->alt_fb_idx;
2441 vp8_yv12_copy_frame(&cm->yv12_fb[ref_fb_idx], sd);
2445 int vp8_set_reference(VP8_COMP *cpi, enum vpx_ref_frame_type ref_frame_flag,
2446 YV12_BUFFER_CONFIG *sd) {
2447 VP8_COMMON *cm = &cpi->common;
2451 if (ref_frame_flag == VP8_LAST_FRAME) {
2452 ref_fb_idx = cm->lst_fb_idx;
2453 } else if (ref_frame_flag == VP8_GOLD_FRAME) {
2454 ref_fb_idx = cm->gld_fb_idx;
2455 } else if (ref_frame_flag == VP8_ALTR_FRAME) {
2456 ref_fb_idx = cm->alt_fb_idx;
2461 vp8_yv12_copy_frame(sd, &cm->yv12_fb[ref_fb_idx]);
2465 int vp8_update_entropy(VP8_COMP *cpi, int update) {
2466 VP8_COMMON *cm = &cpi->common;
2467 cm->refresh_entropy_probs = update;
2472 #if defined(OUTPUT_YUV_SRC) || defined(OUTPUT_YUV_DENOISED)
2473 void vp8_write_yuv_frame(FILE *yuv_file, YV12_BUFFER_CONFIG *s) {
2474 unsigned char *src = s->y_buffer;
2475 int h = s->y_height;
2478 fwrite(src, s->y_width, 1, yuv_file);
2486 fwrite(src, s->uv_width, 1, yuv_file);
2487 src += s->uv_stride;
2494 fwrite(src, s->uv_width, 1, yuv_file);
2495 src += s->uv_stride;
2500 static void scale_and_extend_source(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) {
2501 VP8_COMMON *cm = &cpi->common;
2503 /* are we resizing the image */
2504 if (cm->horiz_scale != 0 || cm->vert_scale != 0) {
2505 #if CONFIG_SPATIAL_RESAMPLING
2509 if (cm->vert_scale == 3) {
2515 Scale2Ratio(cm->horiz_scale, &hr, &hs);
2516 Scale2Ratio(cm->vert_scale, &vr, &vs);
2518 vpx_scale_frame(sd, &cpi->scaled_source, cm->temp_scale_frame.y_buffer,
2519 tmp_height, hs, hr, vs, vr, 0);
2521 vp8_yv12_extend_frame_borders(&cpi->scaled_source);
2522 cpi->Source = &cpi->scaled_source;
2529 static int resize_key_frame(VP8_COMP *cpi) {
2530 #if CONFIG_SPATIAL_RESAMPLING
2531 VP8_COMMON *cm = &cpi->common;
2533 /* Do we need to apply resampling for one pass cbr.
2534 * In one pass this is more limited than in two pass cbr.
2535 * The test and any change is only made once per key frame sequence.
2537 if (cpi->oxcf.allow_spatial_resampling &&
2538 (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)) {
2540 int new_width, new_height;
2542 /* If we are below the resample DOWN watermark then scale down a
2545 if (cpi->buffer_level < (cpi->oxcf.resample_down_water_mark *
2546 cpi->oxcf.optimal_buffer_level / 100)) {
2548 (cm->horiz_scale < ONETWO) ? cm->horiz_scale + 1 : ONETWO;
2549 cm->vert_scale = (cm->vert_scale < ONETWO) ? cm->vert_scale + 1 : ONETWO;
2551 /* Should we now start scaling back up */
2552 else if (cpi->buffer_level > (cpi->oxcf.resample_up_water_mark *
2553 cpi->oxcf.optimal_buffer_level / 100)) {
2555 (cm->horiz_scale > NORMAL) ? cm->horiz_scale - 1 : NORMAL;
2556 cm->vert_scale = (cm->vert_scale > NORMAL) ? cm->vert_scale - 1 : NORMAL;
2559 /* Get the new height and width */
2560 Scale2Ratio(cm->horiz_scale, &hr, &hs);
2561 Scale2Ratio(cm->vert_scale, &vr, &vs);
2562 new_width = ((hs - 1) + (cpi->oxcf.Width * hr)) / hs;
2563 new_height = ((vs - 1) + (cpi->oxcf.Height * vr)) / vs;
2565 /* If the image size has changed we need to reallocate the buffers
2566 * and resample the source image
2568 if ((cm->Width != new_width) || (cm->Height != new_height)) {
2569 cm->Width = new_width;
2570 cm->Height = new_height;
2571 vp8_alloc_compressor_data(cpi);
2572 scale_and_extend_source(cpi->un_scaled_source, cpi);
2581 static void update_alt_ref_frame_stats(VP8_COMP *cpi) {
2582 VP8_COMMON *cm = &cpi->common;
2584 /* Select an interval before next GF or altref */
2585 if (!cpi->auto_gold) cpi->frames_till_gf_update_due = DEFAULT_GF_INTERVAL;
2587 if ((cpi->pass != 2) && cpi->frames_till_gf_update_due) {
2588 cpi->current_gf_interval = cpi->frames_till_gf_update_due;
2590 /* Set the bits per frame that we should try and recover in
2591 * subsequent inter frames to account for the extra GF spend...
2592 * note that his does not apply for GF updates that occur
2593 * coincident with a key frame as the extra cost of key frames is
2594 * dealt with elsewhere.
2596 cpi->gf_overspend_bits += cpi->projected_frame_size;
2597 cpi->non_gf_bitrate_adjustment =
2598 cpi->gf_overspend_bits / cpi->frames_till_gf_update_due;
2601 /* Update data structure that monitors level of reference to last GF */
2602 memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols));
2603 cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
2605 /* this frame refreshes means next frames don't unless specified by user */
2606 cpi->frames_since_golden = 0;
2608 /* Clear the alternate reference update pending flag. */
2609 cpi->source_alt_ref_pending = 0;
2611 /* Set the alternate reference frame active flag */
2612 cpi->source_alt_ref_active = 1;
2614 static void update_golden_frame_stats(VP8_COMP *cpi) {
2615 VP8_COMMON *cm = &cpi->common;
2617 /* Update the Golden frame usage counts. */
2618 if (cm->refresh_golden_frame) {
2619 /* Select an interval before next GF */
2620 if (!cpi->auto_gold) cpi->frames_till_gf_update_due = DEFAULT_GF_INTERVAL;
2622 if ((cpi->pass != 2) && (cpi->frames_till_gf_update_due > 0)) {
2623 cpi->current_gf_interval = cpi->frames_till_gf_update_due;
2625 /* Set the bits per frame that we should try and recover in
2626 * subsequent inter frames to account for the extra GF spend...
2627 * note that his does not apply for GF updates that occur
2628 * coincident with a key frame as the extra cost of key frames
2629 * is dealt with elsewhere.
2631 if ((cm->frame_type != KEY_FRAME) && !cpi->source_alt_ref_active) {
2632 /* Calcluate GF bits to be recovered
2633 * Projected size - av frame bits available for inter
2634 * frames for clip as a whole
2636 cpi->gf_overspend_bits +=
2637 (cpi->projected_frame_size - cpi->inter_frame_target);
2640 cpi->non_gf_bitrate_adjustment =
2641 cpi->gf_overspend_bits / cpi->frames_till_gf_update_due;
2644 /* Update data structure that monitors level of reference to last GF */
2645 memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols));
2646 cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
2648 /* this frame refreshes means next frames don't unless specified by
2651 cm->refresh_golden_frame = 0;
2652 cpi->frames_since_golden = 0;
2654 cpi->recent_ref_frame_usage[INTRA_FRAME] = 1;
2655 cpi->recent_ref_frame_usage[LAST_FRAME] = 1;
2656 cpi->recent_ref_frame_usage[GOLDEN_FRAME] = 1;
2657 cpi->recent_ref_frame_usage[ALTREF_FRAME] = 1;
2659 /* ******** Fixed Q test code only ************ */
2660 /* If we are going to use the ALT reference for the next group of
2661 * frames set a flag to say so.
2663 if (cpi->oxcf.fixed_q >= 0 && cpi->oxcf.play_alternate &&
2664 !cpi->common.refresh_alt_ref_frame) {
2665 cpi->source_alt_ref_pending = 1;
2666 cpi->frames_till_gf_update_due = cpi->baseline_gf_interval;
2669 if (!cpi->source_alt_ref_pending) cpi->source_alt_ref_active = 0;
2671 /* Decrement count down till next gf */
2672 if (cpi->frames_till_gf_update_due > 0) cpi->frames_till_gf_update_due--;
2674 } else if (!cpi->common.refresh_alt_ref_frame) {
2675 /* Decrement count down till next gf */
2676 if (cpi->frames_till_gf_update_due > 0) cpi->frames_till_gf_update_due--;
2678 if (cpi->frames_till_alt_ref_frame) cpi->frames_till_alt_ref_frame--;
2680 cpi->frames_since_golden++;
2682 if (cpi->frames_since_golden > 1) {
2683 cpi->recent_ref_frame_usage[INTRA_FRAME] +=
2684 cpi->mb.count_mb_ref_frame_usage[INTRA_FRAME];
2685 cpi->recent_ref_frame_usage[LAST_FRAME] +=
2686 cpi->mb.count_mb_ref_frame_usage[LAST_FRAME];
2687 cpi->recent_ref_frame_usage[GOLDEN_FRAME] +=
2688 cpi->mb.count_mb_ref_frame_usage[GOLDEN_FRAME];
2689 cpi->recent_ref_frame_usage[ALTREF_FRAME] +=
2690 cpi->mb.count_mb_ref_frame_usage[ALTREF_FRAME];
2695 /* This function updates the reference frame probability estimates that
2696 * will be used during mode selection
2698 static void update_rd_ref_frame_probs(VP8_COMP *cpi) {
2699 VP8_COMMON *cm = &cpi->common;
2701 const int *const rfct = cpi->mb.count_mb_ref_frame_usage;
2702 const int rf_intra = rfct[INTRA_FRAME];
2703 const int rf_inter =
2704 rfct[LAST_FRAME] + rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME];
2706 if (cm->frame_type == KEY_FRAME) {
2707 cpi->prob_intra_coded = 255;
2708 cpi->prob_last_coded = 128;
2709 cpi->prob_gf_coded = 128;
2710 } else if (!(rf_intra + rf_inter)) {
2711 cpi->prob_intra_coded = 63;
2712 cpi->prob_last_coded = 128;
2713 cpi->prob_gf_coded = 128;
2716 /* update reference frame costs since we can do better than what we got
2719 if (cpi->oxcf.number_of_layers == 1) {
2720 if (cpi->common.refresh_alt_ref_frame) {
2721 cpi->prob_intra_coded += 40;
2722 if (cpi->prob_intra_coded > 255) cpi->prob_intra_coded = 255;
2723 cpi->prob_last_coded = 200;
2724 cpi->prob_gf_coded = 1;
2725 } else if (cpi->frames_since_golden == 0) {
2726 cpi->prob_last_coded = 214;
2727 } else if (cpi->frames_since_golden == 1) {
2728 cpi->prob_last_coded = 192;
2729 cpi->prob_gf_coded = 220;
2730 } else if (cpi->source_alt_ref_active) {
2731 cpi->prob_gf_coded -= 20;
2733 if (cpi->prob_gf_coded < 10) cpi->prob_gf_coded = 10;
2735 if (!cpi->source_alt_ref_active) cpi->prob_gf_coded = 255;
2739 #if !CONFIG_REALTIME_ONLY
2740 /* 1 = key, 0 = inter */
2741 static int decide_key_frame(VP8_COMP *cpi) {
2742 VP8_COMMON *cm = &cpi->common;
2744 int code_key_frame = 0;
2748 if (cpi->Speed > 11) return 0;
2750 /* Clear down mmx registers */
2751 vpx_clear_system_state();
2753 if ((cpi->compressor_speed == 2) && (cpi->Speed >= 5) && (cpi->sf.RD == 0)) {
2754 double change = 1.0 *
2755 abs((int)(cpi->mb.intra_error - cpi->last_intra_error)) /
2756 (1 + cpi->last_intra_error);
2759 abs((int)(cpi->mb.prediction_error - cpi->last_prediction_error)) /
2760 (1 + cpi->last_prediction_error);
2761 double minerror = cm->MBs * 256;
2763 cpi->last_intra_error = cpi->mb.intra_error;
2764 cpi->last_prediction_error = cpi->mb.prediction_error;
2766 if (10 * cpi->mb.intra_error / (1 + cpi->mb.prediction_error) < 15 &&
2767 cpi->mb.prediction_error > minerror &&
2768 (change > .25 || change2 > .25)) {
2769 /*(change > 1.4 || change < .75)&& cpi->this_frame_percent_intra >
2770 * cpi->last_frame_percent_intra + 3*/
2777 /* If the following are true we might as well code a key frame */
2778 if (((cpi->this_frame_percent_intra == 100) &&
2779 (cpi->this_frame_percent_intra > (cpi->last_frame_percent_intra + 2))) ||
2780 ((cpi->this_frame_percent_intra > 95) &&
2781 (cpi->this_frame_percent_intra >=
2782 (cpi->last_frame_percent_intra + 5)))) {
2785 /* in addition if the following are true and this is not a golden frame
2786 * then code a key frame Note that on golden frames there often seems
2787 * to be a pop in intra useage anyway hence this restriction is
2788 * designed to prevent spurious key frames. The Intra pop needs to be
2791 else if (((cpi->this_frame_percent_intra > 60) &&
2792 (cpi->this_frame_percent_intra >
2793 (cpi->last_frame_percent_intra * 2))) ||
2794 ((cpi->this_frame_percent_intra > 75) &&
2795 (cpi->this_frame_percent_intra >
2796 (cpi->last_frame_percent_intra * 3 / 2))) ||
2797 ((cpi->this_frame_percent_intra > 90) &&
2798 (cpi->this_frame_percent_intra >
2799 (cpi->last_frame_percent_intra + 10)))) {
2800 if (!cm->refresh_golden_frame) code_key_frame = 1;
2803 return code_key_frame;
2806 static void Pass1Encode(VP8_COMP *cpi, size_t *size, unsigned char *dest,
2807 unsigned int *frame_flags) {
2811 vp8_set_quantizer(cpi, 26);
2813 vp8_first_pass(cpi);
2818 void write_cx_frame_to_file(YV12_BUFFER_CONFIG *frame, int this_frame)
2821 /* write the frame */
2826 sprintf(filename, "cx\\y%04d.raw", this_frame);
2827 yframe = fopen(filename, "wb");
2829 for (i = 0; i < frame->y_height; ++i)
2830 fwrite(frame->y_buffer + i * frame->y_stride, frame->y_width, 1, yframe);
2833 sprintf(filename, "cx\\u%04d.raw", this_frame);
2834 yframe = fopen(filename, "wb");
2836 for (i = 0; i < frame->uv_height; ++i)
2837 fwrite(frame->u_buffer + i * frame->uv_stride, frame->uv_width, 1, yframe);
2840 sprintf(filename, "cx\\v%04d.raw", this_frame);
2841 yframe = fopen(filename, "wb");
2843 for (i = 0; i < frame->uv_height; ++i)
2844 fwrite(frame->v_buffer + i * frame->uv_stride, frame->uv_width, 1, yframe);
2849 /* return of 0 means drop frame */
2851 #if !CONFIG_REALTIME_ONLY
2852 /* Function to test for conditions that indeicate we should loop
2853 * back and recode a frame.
2855 static int recode_loop_test(VP8_COMP *cpi, int high_limit, int low_limit, int q,
2856 int maxq, int minq) {
2857 int force_recode = 0;
2858 VP8_COMMON *cm = &cpi->common;
2860 /* Is frame recode allowed at all
2861 * Yes if either recode mode 1 is selected or mode two is selcted
2862 * and the frame is a key frame. golden frame or alt_ref_frame
2864 if ((cpi->sf.recode_loop == 1) ||
2865 ((cpi->sf.recode_loop == 2) &&
2866 ((cm->frame_type == KEY_FRAME) || cm->refresh_golden_frame ||
2867 cm->refresh_alt_ref_frame))) {
2868 /* General over and under shoot tests */
2869 if (((cpi->projected_frame_size > high_limit) && (q < maxq)) ||
2870 ((cpi->projected_frame_size < low_limit) && (q > minq))) {
2873 /* Special Constrained quality tests */
2874 else if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) {
2875 /* Undershoot and below auto cq level */
2876 if ((q > cpi->cq_target_quality) &&
2877 (cpi->projected_frame_size < ((cpi->this_frame_target * 7) >> 3))) {
2880 /* Severe undershoot and between auto and user cq level */
2881 else if ((q > cpi->oxcf.cq_level) &&
2882 (cpi->projected_frame_size < cpi->min_frame_bandwidth) &&
2883 (cpi->active_best_quality > cpi->oxcf.cq_level)) {
2885 cpi->active_best_quality = cpi->oxcf.cq_level;
2890 return force_recode;
2892 #endif // !CONFIG_REALTIME_ONLY
2894 static void update_reference_frames(VP8_COMP *cpi) {
2895 VP8_COMMON *cm = &cpi->common;
2896 YV12_BUFFER_CONFIG *yv12_fb = cm->yv12_fb;
2898 /* At this point the new frame has been encoded.
2899 * If any buffer copy / swapping is signaled it should be done here.
2902 if (cm->frame_type == KEY_FRAME) {
2903 yv12_fb[cm->new_fb_idx].flags |= VP8_GOLD_FRAME | VP8_ALTR_FRAME;
2905 yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
2906 yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
2908 cm->alt_fb_idx = cm->gld_fb_idx = cm->new_fb_idx;
2910 cpi->current_ref_frames[GOLDEN_FRAME] = cm->current_video_frame;
2911 cpi->current_ref_frames[ALTREF_FRAME] = cm->current_video_frame;
2912 } else /* For non key frames */
2914 if (cm->refresh_alt_ref_frame) {
2915 assert(!cm->copy_buffer_to_arf);
2917 cm->yv12_fb[cm->new_fb_idx].flags |= VP8_ALTR_FRAME;
2918 cm->yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
2919 cm->alt_fb_idx = cm->new_fb_idx;
2921 cpi->current_ref_frames[ALTREF_FRAME] = cm->current_video_frame;
2922 } else if (cm->copy_buffer_to_arf) {
2923 assert(!(cm->copy_buffer_to_arf & ~0x3));
2925 if (cm->copy_buffer_to_arf == 1) {
2926 if (cm->alt_fb_idx != cm->lst_fb_idx) {
2927 yv12_fb[cm->lst_fb_idx].flags |= VP8_ALTR_FRAME;
2928 yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
2929 cm->alt_fb_idx = cm->lst_fb_idx;
2931 cpi->current_ref_frames[ALTREF_FRAME] =
2932 cpi->current_ref_frames[LAST_FRAME];
2934 } else /* if (cm->copy_buffer_to_arf == 2) */
2936 if (cm->alt_fb_idx != cm->gld_fb_idx) {
2937 yv12_fb[cm->gld_fb_idx].flags |= VP8_ALTR_FRAME;
2938 yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
2939 cm->alt_fb_idx = cm->gld_fb_idx;
2941 cpi->current_ref_frames[ALTREF_FRAME] =
2942 cpi->current_ref_frames[GOLDEN_FRAME];
2947 if (cm->refresh_golden_frame) {
2948 assert(!cm->copy_buffer_to_gf);
2950 cm->yv12_fb[cm->new_fb_idx].flags |= VP8_GOLD_FRAME;
2951 cm->yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
2952 cm->gld_fb_idx = cm->new_fb_idx;
2954 cpi->current_ref_frames[GOLDEN_FRAME] = cm->current_video_frame;
2955 } else if (cm->copy_buffer_to_gf) {
2956 assert(!(cm->copy_buffer_to_arf & ~0x3));
2958 if (cm->copy_buffer_to_gf == 1) {
2959 if (cm->gld_fb_idx != cm->lst_fb_idx) {
2960 yv12_fb[cm->lst_fb_idx].flags |= VP8_GOLD_FRAME;
2961 yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
2962 cm->gld_fb_idx = cm->lst_fb_idx;
2964 cpi->current_ref_frames[GOLDEN_FRAME] =
2965 cpi->current_ref_frames[LAST_FRAME];
2967 } else /* if (cm->copy_buffer_to_gf == 2) */
2969 if (cm->alt_fb_idx != cm->gld_fb_idx) {
2970 yv12_fb[cm->alt_fb_idx].flags |= VP8_GOLD_FRAME;
2971 yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
2972 cm->gld_fb_idx = cm->alt_fb_idx;
2974 cpi->current_ref_frames[GOLDEN_FRAME] =
2975 cpi->current_ref_frames[ALTREF_FRAME];
2981 if (cm->refresh_last_frame) {
2982 cm->yv12_fb[cm->new_fb_idx].flags |= VP8_LAST_FRAME;
2983 cm->yv12_fb[cm->lst_fb_idx].flags &= ~VP8_LAST_FRAME;
2984 cm->lst_fb_idx = cm->new_fb_idx;
2986 cpi->current_ref_frames[LAST_FRAME] = cm->current_video_frame;
2989 #if CONFIG_TEMPORAL_DENOISING
2990 if (cpi->oxcf.noise_sensitivity) {
2991 /* we shouldn't have to keep multiple copies as we know in advance which
2992 * buffer we should start - for now to get something up and running
2993 * I've chosen to copy the buffers
2995 if (cm->frame_type == KEY_FRAME) {
2997 for (i = LAST_FRAME; i < MAX_REF_FRAMES; ++i)
2998 vp8_yv12_copy_frame(cpi->Source, &cpi->denoiser.yv12_running_avg[i]);
2999 } else /* For non key frames */
3001 vp8_yv12_extend_frame_borders(
3002 &cpi->denoiser.yv12_running_avg[INTRA_FRAME]);
3004 if (cm->refresh_alt_ref_frame || cm->copy_buffer_to_arf) {
3005 vp8_yv12_copy_frame(&cpi->denoiser.yv12_running_avg[INTRA_FRAME],
3006 &cpi->denoiser.yv12_running_avg[ALTREF_FRAME]);
3008 if (cm->refresh_golden_frame || cm->copy_buffer_to_gf) {
3009 vp8_yv12_copy_frame(&cpi->denoiser.yv12_running_avg[INTRA_FRAME],
3010 &cpi->denoiser.yv12_running_avg[GOLDEN_FRAME]);
3012 if (cm->refresh_last_frame) {
3013 vp8_yv12_copy_frame(&cpi->denoiser.yv12_running_avg[INTRA_FRAME],
3014 &cpi->denoiser.yv12_running_avg[LAST_FRAME]);
3017 if (cpi->oxcf.noise_sensitivity == 4)
3018 vp8_yv12_copy_frame(cpi->Source, &cpi->denoiser.yv12_last_source);
3023 static int measure_square_diff_partial(YV12_BUFFER_CONFIG *source,
3024 YV12_BUFFER_CONFIG *dest,
3030 int min_consec_zero_last = 10;
3031 int tot_num_blocks = (source->y_height * source->y_width) >> 8;
3032 unsigned char *src = source->y_buffer;
3033 unsigned char *dst = dest->y_buffer;
3035 /* Loop through the Y plane, every |skip| blocks along rows and colmumns,
3036 * summing the square differences, and only for blocks that have been
3037 * zero_last mode at least |x| frames in a row.
3039 for (i = 0; i < source->y_height; i += 16 * skip) {
3040 int block_index_row = (i >> 4) * cpi->common.mb_cols;
3041 for (j = 0; j < source->y_width; j += 16 * skip) {
3042 int index = block_index_row + (j >> 4);
3043 if (cpi->consec_zero_last[index] >= min_consec_zero_last) {
3045 Total += vpx_mse16x16(src + j, source->y_stride, dst + j,
3046 dest->y_stride, &sse);
3050 src += 16 * skip * source->y_stride;
3051 dst += 16 * skip * dest->y_stride;
3053 // Only return non-zero if we have at least ~1/16 samples for estimate.
3054 if (num_blocks > (tot_num_blocks >> 4)) {
3055 return (Total / num_blocks);
3061 #if CONFIG_TEMPORAL_DENOISING
3062 static void process_denoiser_mode_change(VP8_COMP *cpi) {
3063 const VP8_COMMON *const cm = &cpi->common;
3067 // Number of blocks skipped along row/column in computing the
3068 // nmse (normalized mean square error) of source.
3070 // Only select blocks for computing nmse that have been encoded
3071 // as ZERO LAST min_consec_zero_last frames in a row.
3072 // Scale with number of temporal layers.
3073 int min_consec_zero_last = 12 / cpi->oxcf.number_of_layers;
3074 // Decision is tested for changing the denoising mode every
3075 // num_mode_change times this function is called. Note that this
3076 // function called every 8 frames, so (8 * num_mode_change) is number
3077 // of frames where denoising mode change is tested for switch.
3078 int num_mode_change = 20;
3079 // Framerate factor, to compensate for larger mse at lower framerates.
3080 // Use ref_framerate, which is full source framerate for temporal layers.
3081 // TODO(marpan): Adjust this factor.
3082 int fac_framerate = cpi->ref_framerate < 25.0f ? 80 : 100;
3083 int tot_num_blocks = cm->mb_rows * cm->mb_cols;
3084 int ystride = cpi->Source->y_stride;
3085 unsigned char *src = cpi->Source->y_buffer;
3086 unsigned char *dst = cpi->denoiser.yv12_last_source.y_buffer;
3087 static const unsigned char const_source[16] = { 128, 128, 128, 128, 128, 128,
3088 128, 128, 128, 128, 128, 128,
3089 128, 128, 128, 128 };
3090 int bandwidth = (int)(cpi->target_bandwidth);
3091 // For temporal layers, use full bandwidth (top layer).
3092 if (cpi->oxcf.number_of_layers > 1) {
3093 LAYER_CONTEXT *lc = &cpi->layer_context[cpi->oxcf.number_of_layers - 1];
3094 bandwidth = (int)(lc->target_bandwidth);
3096 // Loop through the Y plane, every skip blocks along rows and columns,
3097 // summing the normalized mean square error, only for blocks that have
3098 // been encoded as ZEROMV LAST at least min_consec_zero_last least frames in
3099 // a row and have small sum difference between current and previous frame.
3100 // Normalization here is by the contrast of the current frame block.
3101 for (i = 0; i < cm->Height; i += 16 * skip) {
3102 int block_index_row = (i >> 4) * cm->mb_cols;
3103 for (j = 0; j < cm->Width; j += 16 * skip) {
3104 int index = block_index_row + (j >> 4);
3105 if (cpi->consec_zero_last[index] >= min_consec_zero_last) {
3107 const unsigned int var =
3108 vpx_variance16x16(src + j, ystride, dst + j, ystride, &sse);
3109 // Only consider this block as valid for noise measurement
3110 // if the sum_diff average of the current and previous frame
3111 // is small (to avoid effects from lighting change).
3112 if ((sse - var) < 128) {
3114 const unsigned int act =
3115 vpx_variance16x16(src + j, ystride, const_source, 0, &sse2);
3116 if (act > 0) total += sse / act;
3121 src += 16 * skip * ystride;
3122 dst += 16 * skip * ystride;
3124 total = total * fac_framerate / 100;
3126 // Only consider this frame as valid sample if we have computed nmse over
3127 // at least ~1/16 blocks, and Total > 0 (Total == 0 can happen if the
3128 // application inputs duplicate frames, or contrast is all zero).
3129 if (total > 0 && (num_blocks > (tot_num_blocks >> 4))) {
3130 // Update the recursive mean square source_diff.
3131 total = (total << 8) / num_blocks;
3132 if (cpi->denoiser.nmse_source_diff_count == 0) {
3133 // First sample in new interval.
3134 cpi->denoiser.nmse_source_diff = total;
3135 cpi->denoiser.qp_avg = cm->base_qindex;
3137 // For subsequent samples, use average with weight ~1/4 for new sample.
3138 cpi->denoiser.nmse_source_diff =
3139 (int)((total + 3 * cpi->denoiser.nmse_source_diff) >> 2);
3140 cpi->denoiser.qp_avg =
3141 (int)((cm->base_qindex + 3 * cpi->denoiser.qp_avg) >> 2);
3143 cpi->denoiser.nmse_source_diff_count++;
3145 // Check for changing the denoiser mode, when we have obtained #samples =
3146 // num_mode_change. Condition the change also on the bitrate and QP.
3147 if (cpi->denoiser.nmse_source_diff_count == num_mode_change) {
3148 // Check for going up: from normal to aggressive mode.
3149 if ((cpi->denoiser.denoiser_mode == kDenoiserOnYUV) &&
3150 (cpi->denoiser.nmse_source_diff >
3151 cpi->denoiser.threshold_aggressive_mode) &&
3152 (cpi->denoiser.qp_avg < cpi->denoiser.qp_threshold_up &&
3153 bandwidth > cpi->denoiser.bitrate_threshold)) {
3154 vp8_denoiser_set_parameters(&cpi->denoiser, kDenoiserOnYUVAggressive);
3156 // Check for going down: from aggressive to normal mode.
3157 if (((cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive) &&
3158 (cpi->denoiser.nmse_source_diff <
3159 cpi->denoiser.threshold_aggressive_mode)) ||
3160 ((cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive) &&
3161 (cpi->denoiser.qp_avg > cpi->denoiser.qp_threshold_down ||
3162 bandwidth < cpi->denoiser.bitrate_threshold))) {
3163 vp8_denoiser_set_parameters(&cpi->denoiser, kDenoiserOnYUV);
3166 // Reset metric and counter for next interval.
3167 cpi->denoiser.nmse_source_diff = 0;
3168 cpi->denoiser.qp_avg = 0;
3169 cpi->denoiser.nmse_source_diff_count = 0;
3174 void vp8_loopfilter_frame(VP8_COMP *cpi, VP8_COMMON *cm) {
3175 const FRAME_TYPE frame_type = cm->frame_type;
3177 int update_any_ref_buffers = 1;
3178 if (cpi->common.refresh_last_frame == 0 &&
3179 cpi->common.refresh_golden_frame == 0 &&
3180 cpi->common.refresh_alt_ref_frame == 0) {
3181 update_any_ref_buffers = 0;
3185 cm->filter_level = 0;
3187 struct vpx_usec_timer timer;
3189 vpx_clear_system_state();
3191 vpx_usec_timer_start(&timer);
3192 if (cpi->sf.auto_filter == 0) {
3193 #if CONFIG_TEMPORAL_DENOISING
3194 if (cpi->oxcf.noise_sensitivity && cm->frame_type != KEY_FRAME) {
3195 // Use the denoised buffer for selecting base loop filter level.
3196 // Denoised signal for current frame is stored in INTRA_FRAME.
3197 // No denoising on key frames.
3198 vp8cx_pick_filter_level_fast(
3199 &cpi->denoiser.yv12_running_avg[INTRA_FRAME], cpi);
3201 vp8cx_pick_filter_level_fast(cpi->Source, cpi);
3204 vp8cx_pick_filter_level_fast(cpi->Source, cpi);
3207 #if CONFIG_TEMPORAL_DENOISING
3208 if (cpi->oxcf.noise_sensitivity && cm->frame_type != KEY_FRAME) {
3209 // Use the denoised buffer for selecting base loop filter level.
3210 // Denoised signal for current frame is stored in INTRA_FRAME.
3211 // No denoising on key frames.
3212 vp8cx_pick_filter_level(&cpi->denoiser.yv12_running_avg[INTRA_FRAME],
3215 vp8cx_pick_filter_level(cpi->Source, cpi);
3218 vp8cx_pick_filter_level(cpi->Source, cpi);
3222 if (cm->filter_level > 0) {
3223 vp8cx_set_alt_lf_level(cpi, cm->filter_level);
3226 vpx_usec_timer_mark(&timer);
3227 cpi->time_pick_lpf += vpx_usec_timer_elapsed(&timer);
3230 #if CONFIG_MULTITHREAD
3231 if (cpi->b_multi_threaded) {
3232 sem_post(&cpi->h_event_end_lpf); /* signal that we have set filter_level */
3236 // No need to apply loop-filter if the encoded frame does not update
3237 // any reference buffers.
3238 if (cm->filter_level > 0 && update_any_ref_buffers) {
3239 vp8_loop_filter_frame(cm, &cpi->mb.e_mbd, frame_type);
3242 vp8_yv12_extend_frame_borders(cm->frame_to_show);
3245 static void encode_frame_to_data_rate(VP8_COMP *cpi, size_t *size,
3246 unsigned char *dest,
3247 unsigned char *dest_end,
3248 unsigned int *frame_flags) {
3250 int frame_over_shoot_limit;
3251 int frame_under_shoot_limit;
3256 VP8_COMMON *cm = &cpi->common;
3257 int active_worst_qchanged = 0;
3259 #if !CONFIG_REALTIME_ONLY
3263 int zbin_oq_low = 0;
3266 int overshoot_seen = 0;
3267 int undershoot_seen = 0;
3270 int drop_mark = (int)(cpi->oxcf.drop_frames_water_mark *
3271 cpi->oxcf.optimal_buffer_level / 100);
3272 int drop_mark75 = drop_mark * 2 / 3;
3273 int drop_mark50 = drop_mark / 4;
3274 int drop_mark25 = drop_mark / 8;
3276 /* Clear down mmx registers to allow floating point in what follows */
3277 vpx_clear_system_state();
3279 if (cpi->force_next_frame_intra) {
3280 cm->frame_type = KEY_FRAME; /* delayed intra frame */
3281 cpi->force_next_frame_intra = 0;
3284 /* For an alt ref frame in 2 pass we skip the call to the second pass
3285 * function that sets the target bandwidth
3287 switch (cpi->pass) {
3288 #if !CONFIG_REALTIME_ONLY
3290 if (cpi->common.refresh_alt_ref_frame) {
3291 /* Per frame bit target for the alt ref frame */
3292 cpi->per_frame_bandwidth = cpi->twopass.gf_bits;
3293 /* per second target bitrate */
3294 cpi->target_bandwidth =
3295 (int)(cpi->twopass.gf_bits * cpi->output_framerate);
3298 #endif // !CONFIG_REALTIME_ONLY
3300 cpi->per_frame_bandwidth =
3301 (int)(cpi->target_bandwidth / cpi->output_framerate);
3305 /* Default turn off buffer to buffer copying */
3306 cm->copy_buffer_to_gf = 0;
3307 cm->copy_buffer_to_arf = 0;
3309 /* Clear zbin over-quant value and mode boost values. */
3310 cpi->mb.zbin_over_quant = 0;
3311 cpi->mb.zbin_mode_boost = 0;
3313 /* Enable or disable mode based tweaking of the zbin
3314 * For 2 Pass Only used where GF/ARF prediction quality
3315 * is above a threshold
3317 cpi->mb.zbin_mode_boost_enabled = 1;
3318 if (cpi->pass == 2) {
3319 if (cpi->gfu_boost <= 400) {
3320 cpi->mb.zbin_mode_boost_enabled = 0;
3324 /* Current default encoder behaviour for the altref sign bias */
3325 if (cpi->source_alt_ref_active) {
3326 cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 1;
3328 cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 0;
3331 /* Check to see if a key frame is signaled
3332 * For two pass with auto key frame enabled cm->frame_type may already
3333 * be set, but not for one pass.
3335 if ((cm->current_video_frame == 0) || (cm->frame_flags & FRAMEFLAGS_KEY) ||
3336 (cpi->oxcf.auto_key &&
3337 (cpi->frames_since_key % cpi->key_frame_frequency == 0))) {
3338 /* Key frame from VFW/auto-keyframe/first frame */
3339 cm->frame_type = KEY_FRAME;
3340 #if CONFIG_TEMPORAL_DENOISING
3341 if (cpi->oxcf.noise_sensitivity == 4) {
3342 // For adaptive mode, reset denoiser to normal mode on key frame.
3343 vp8_denoiser_set_parameters(&cpi->denoiser, kDenoiserOnYUV);
3348 #if CONFIG_MULTI_RES_ENCODING
3349 if (cpi->oxcf.mr_total_resolutions > 1) {
3350 LOWER_RES_FRAME_INFO *low_res_frame_info =
3351 (LOWER_RES_FRAME_INFO *)cpi->oxcf.mr_low_res_mode_info;
3353 if (cpi->oxcf.mr_encoder_id) {
3354 // TODO(marpan): This constraint shouldn't be needed, as we would like
3355 // to allow for key frame setting (forced or periodic) defined per
3356 // spatial layer. For now, keep this in.
3357 cm->frame_type = low_res_frame_info->frame_type;
3359 // Check if lower resolution is available for motion vector reuse.
3360 if (cm->frame_type != KEY_FRAME) {
3361 cpi->mr_low_res_mv_avail = 1;
3362 cpi->mr_low_res_mv_avail &= !(low_res_frame_info->is_frame_dropped);
3364 if (cpi->ref_frame_flags & VP8_LAST_FRAME)
3365 cpi->mr_low_res_mv_avail &=
3366 (cpi->current_ref_frames[LAST_FRAME] ==
3367 low_res_frame_info->low_res_ref_frames[LAST_FRAME]);
3369 if (cpi->ref_frame_flags & VP8_GOLD_FRAME)
3370 cpi->mr_low_res_mv_avail &=
3371 (cpi->current_ref_frames[GOLDEN_FRAME] ==
3372 low_res_frame_info->low_res_ref_frames[GOLDEN_FRAME]);
3374 // Don't use altref to determine whether low res is available.
3375 // TODO (marpan): Should we make this type of condition on a
3376 // per-reference frame basis?
3378 if (cpi->ref_frame_flags & VP8_ALTR_FRAME)
3379 cpi->mr_low_res_mv_avail &= (cpi->current_ref_frames[ALTREF_FRAME]
3380 == low_res_frame_info->low_res_ref_frames[ALTREF_FRAME]);
3385 // On a key frame: For the lowest resolution, keep track of the key frame
3386 // counter value. For the higher resolutions, reset the current video
3387 // frame counter to that of the lowest resolution.
3388 // This is done to the handle the case where we may stop/start encoding
3389 // higher layer(s). The restart-encoding of higher layer is only signaled
3390 // by a key frame for now.
3391 // TODO (marpan): Add flag to indicate restart-encoding of higher layer.
3392 if (cm->frame_type == KEY_FRAME) {
3393 if (cpi->oxcf.mr_encoder_id) {
3394 // If the initial starting value of the buffer level is zero (this can
3395 // happen because we may have not started encoding this higher stream),
3396 // then reset it to non-zero value based on |starting_buffer_level|.
3397 if (cpi->common.current_video_frame == 0 && cpi->buffer_level == 0) {
3399 cpi->bits_off_target = cpi->oxcf.starting_buffer_level;
3400 cpi->buffer_level = cpi->oxcf.starting_buffer_level;
3401 for (i = 0; i < cpi->oxcf.number_of_layers; ++i) {
3402 LAYER_CONTEXT *lc = &cpi->layer_context[i];
3403 lc->bits_off_target = lc->starting_buffer_level;
3404 lc->buffer_level = lc->starting_buffer_level;
3407 cpi->common.current_video_frame =
3408 low_res_frame_info->key_frame_counter_value;
3410 low_res_frame_info->key_frame_counter_value =
3411 cpi->common.current_video_frame;
3417 // Find the reference frame closest to the current frame.
3418 cpi->closest_reference_frame = LAST_FRAME;
3419 if (cm->frame_type != KEY_FRAME) {
3421 MV_REFERENCE_FRAME closest_ref = INTRA_FRAME;
3422 if (cpi->ref_frame_flags & VP8_LAST_FRAME) {
3423 closest_ref = LAST_FRAME;
3424 } else if (cpi->ref_frame_flags & VP8_GOLD_FRAME) {
3425 closest_ref = GOLDEN_FRAME;
3426 } else if (cpi->ref_frame_flags & VP8_ALTR_FRAME) {
3427 closest_ref = ALTREF_FRAME;
3429 for (i = 1; i <= 3; ++i) {
3430 vpx_ref_frame_type_t ref_frame_type =
3431 (vpx_ref_frame_type_t)((i == 3) ? 4 : i);
3432 if (cpi->ref_frame_flags & ref_frame_type) {
3433 if ((cm->current_video_frame - cpi->current_ref_frames[i]) <
3434 (cm->current_video_frame - cpi->current_ref_frames[closest_ref])) {
3439 cpi->closest_reference_frame = closest_ref;
3442 /* Set various flags etc to special state if it is a key frame */
3443 if (cm->frame_type == KEY_FRAME) {
3446 // Set the loop filter deltas and segmentation map update
3447 setup_features(cpi);
3449 /* The alternate reference frame cannot be active for a key frame */
3450 cpi->source_alt_ref_active = 0;
3452 /* Reset the RD threshold multipliers to default of * 1 (128) */
3453 for (i = 0; i < MAX_MODES; ++i) {
3454 cpi->mb.rd_thresh_mult[i] = 128;
3457 // Reset the zero_last counter to 0 on key frame.
3458 memset(cpi->consec_zero_last, 0, cm->mb_rows * cm->mb_cols);
3459 memset(cpi->consec_zero_last_mvbias, 0,
3460 (cpi->common.mb_rows * cpi->common.mb_cols));
3464 /* Experimental code for lagged compress and one pass
3465 * Initialise one_pass GF frames stats
3466 * Update stats used for GF selection
3469 cpi->one_pass_frame_index = cm->current_video_frame % MAX_LAG_BUFFERS;
3471 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frames_so_far = 0;
3472 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_intra_error = 0.0;
3473 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_coded_error = 0.0;
3474 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_pcnt_inter = 0.0;
3475 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_pcnt_motion = 0.0;
3476 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvr = 0.0;
3477 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvr_abs = 0.0;
3478 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvc = 0.0;
3479 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvc_abs = 0.0;
3483 update_rd_ref_frame_probs(cpi);
3485 if (cpi->drop_frames_allowed) {
3486 /* The reset to decimation 0 is only done here for one pass.
3487 * Once it is set two pass leaves decimation on till the next kf.
3489 if ((cpi->buffer_level > drop_mark) && (cpi->decimation_factor > 0)) {
3490 cpi->decimation_factor--;
3493 if (cpi->buffer_level > drop_mark75 && cpi->decimation_factor > 0) {
3494 cpi->decimation_factor = 1;
3496 } else if (cpi->buffer_level < drop_mark25 &&
3497 (cpi->decimation_factor == 2 || cpi->decimation_factor == 3)) {
3498 cpi->decimation_factor = 3;
3499 } else if (cpi->buffer_level < drop_mark50 &&
3500 (cpi->decimation_factor == 1 || cpi->decimation_factor == 2)) {
3501 cpi->decimation_factor = 2;
3502 } else if (cpi->buffer_level < drop_mark75 &&
3503 (cpi->decimation_factor == 0 || cpi->decimation_factor == 1)) {
3504 cpi->decimation_factor = 1;
3508 /* The following decimates the frame rate according to a regular
3509 * pattern (i.e. to 1/2 or 2/3 frame rate) This can be used to help
3510 * prevent buffer under-run in CBR mode. Alternatively it might be
3511 * desirable in some situations to drop frame rate but throw more bits
3514 * Note that dropping a key frame can be problematic if spatial
3515 * resampling is also active
3517 if (cpi->decimation_factor > 0) {
3518 switch (cpi->decimation_factor) {
3520 cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 3 / 2;
3523 cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 5 / 4;
3526 cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 5 / 4;
3530 /* Note that we should not throw out a key frame (especially when
3531 * spatial resampling is enabled).
3533 if (cm->frame_type == KEY_FRAME) {
3534 cpi->decimation_count = cpi->decimation_factor;
3535 } else if (cpi->decimation_count > 0) {
3536 cpi->decimation_count--;
3538 cpi->bits_off_target += cpi->av_per_frame_bandwidth;
3539 if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size) {
3540 cpi->bits_off_target = cpi->oxcf.maximum_buffer_size;
3543 #if CONFIG_MULTI_RES_ENCODING
3544 vp8_store_drop_frame_info(cpi);
3547 cm->current_video_frame++;
3548 cpi->frames_since_key++;
3549 // We advance the temporal pattern for dropped frames.
3550 cpi->temporal_pattern_counter++;
3552 #if CONFIG_INTERNAL_STATS
3556 cpi->buffer_level = cpi->bits_off_target;
3558 if (cpi->oxcf.number_of_layers > 1) {
3561 /* Propagate bits saved by dropping the frame to higher
3564 for (i = cpi->current_layer + 1; i < cpi->oxcf.number_of_layers; ++i) {
3565 LAYER_CONTEXT *lc = &cpi->layer_context[i];
3566 lc->bits_off_target += (int)(lc->target_bandwidth / lc->framerate);
3567 if (lc->bits_off_target > lc->maximum_buffer_size) {
3568 lc->bits_off_target = lc->maximum_buffer_size;
3570 lc->buffer_level = lc->bits_off_target;
3576 cpi->decimation_count = cpi->decimation_factor;
3579 cpi->decimation_count = 0;
3582 /* Decide how big to make the frame */
3583 if (!vp8_pick_frame_size(cpi)) {
3584 /*TODO: 2 drop_frame and return code could be put together. */
3585 #if CONFIG_MULTI_RES_ENCODING
3586 vp8_store_drop_frame_info(cpi);
3588 cm->current_video_frame++;
3589 cpi->frames_since_key++;
3590 // We advance the temporal pattern for dropped frames.
3591 cpi->temporal_pattern_counter++;
3595 /* Reduce active_worst_allowed_q for CBR if our buffer is getting too full.
3596 * This has a knock on effect on active best quality as well.
3597 * For CBR if the buffer reaches its maximum level then we can no longer
3598 * save up bits for later frames so we might as well use them up
3599 * on the current frame.
3601 if ((cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) &&
3602 (cpi->buffer_level >= cpi->oxcf.optimal_buffer_level) &&
3603 cpi->buffered_mode) {
3604 /* Max adjustment is 1/4 */
3605 int Adjustment = cpi->active_worst_quality / 4;
3610 if (cpi->buffer_level < cpi->oxcf.maximum_buffer_size) {
3611 buff_lvl_step = (int)((cpi->oxcf.maximum_buffer_size -
3612 cpi->oxcf.optimal_buffer_level) /
3615 if (buff_lvl_step) {
3617 (int)((cpi->buffer_level - cpi->oxcf.optimal_buffer_level) /
3624 cpi->active_worst_quality -= Adjustment;
3626 if (cpi->active_worst_quality < cpi->active_best_quality) {
3627 cpi->active_worst_quality = cpi->active_best_quality;
3632 /* Set an active best quality and if necessary active worst quality
3633 * There is some odd behavior for one pass here that needs attention.
3635 if ((cpi->pass == 2) || (cpi->ni_frames > 150)) {
3636 vpx_clear_system_state();
3638 Q = cpi->active_worst_quality;
3640 if (cm->frame_type == KEY_FRAME) {
3641 if (cpi->pass == 2) {
3642 if (cpi->gfu_boost > 600) {
3643 cpi->active_best_quality = kf_low_motion_minq[Q];
3645 cpi->active_best_quality = kf_high_motion_minq[Q];
3648 /* Special case for key frames forced because we have reached
3649 * the maximum key frame interval. Here force the Q to a range
3650 * based on the ambient Q to reduce the risk of popping
3652 if (cpi->this_key_frame_forced) {
3653 if (cpi->active_best_quality > cpi->avg_frame_qindex * 7 / 8) {
3654 cpi->active_best_quality = cpi->avg_frame_qindex * 7 / 8;
3655 } else if (cpi->active_best_quality<cpi->avg_frame_qindex>> 2) {
3656 cpi->active_best_quality = cpi->avg_frame_qindex >> 2;
3660 /* One pass more conservative */
3662 cpi->active_best_quality = kf_high_motion_minq[Q];
3666 else if (cpi->oxcf.number_of_layers == 1 &&
3667 (cm->refresh_golden_frame || cpi->common.refresh_alt_ref_frame)) {
3668 /* Use the lower of cpi->active_worst_quality and recent
3669 * average Q as basis for GF/ARF Q limit unless last frame was
3672 if ((cpi->frames_since_key > 1) &&
3673 (cpi->avg_frame_qindex < cpi->active_worst_quality)) {
3674 Q = cpi->avg_frame_qindex;
3677 /* For constrained quality dont allow Q less than the cq level */
3678 if ((cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
3679 (Q < cpi->cq_target_quality)) {
3680 Q = cpi->cq_target_quality;
3683 if (cpi->pass == 2) {
3684 if (cpi->gfu_boost > 1000) {
3685 cpi->active_best_quality = gf_low_motion_minq[Q];
3686 } else if (cpi->gfu_boost < 400) {
3687 cpi->active_best_quality = gf_high_motion_minq[Q];
3689 cpi->active_best_quality = gf_mid_motion_minq[Q];
3692 /* Constrained quality use slightly lower active best. */
3693 if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) {
3694 cpi->active_best_quality = cpi->active_best_quality * 15 / 16;
3697 /* One pass more conservative */
3699 cpi->active_best_quality = gf_high_motion_minq[Q];
3702 cpi->active_best_quality = inter_minq[Q];
3704 /* For the constant/constrained quality mode we dont want
3705 * q to fall below the cq level.
3707 if ((cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
3708 (cpi->active_best_quality < cpi->cq_target_quality)) {
3709 /* If we are strongly undershooting the target rate in the last
3710 * frames then use the user passed in cq value not the auto
3713 if (cpi->rolling_actual_bits < cpi->min_frame_bandwidth) {
3714 cpi->active_best_quality = cpi->oxcf.cq_level;
3716 cpi->active_best_quality = cpi->cq_target_quality;
3721 /* If CBR and the buffer is as full then it is reasonable to allow
3722 * higher quality on the frames to prevent bits just going to waste.
3724 if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) {
3725 /* Note that the use of >= here elliminates the risk of a devide
3726 * by 0 error in the else if clause
3728 if (cpi->buffer_level >= cpi->oxcf.maximum_buffer_size) {
3729 cpi->active_best_quality = cpi->best_quality;
3731 } else if (cpi->buffer_level > cpi->oxcf.optimal_buffer_level) {
3733 (int)(((cpi->buffer_level - cpi->oxcf.optimal_buffer_level) * 128) /
3734 (cpi->oxcf.maximum_buffer_size -
3735 cpi->oxcf.optimal_buffer_level));
3736 int min_qadjustment =
3737 ((cpi->active_best_quality - cpi->best_quality) * Fraction) / 128;
3739 cpi->active_best_quality -= min_qadjustment;
3743 /* Make sure constrained quality mode limits are adhered to for the first
3744 * few frames of one pass encodes
3746 else if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) {
3747 if ((cm->frame_type == KEY_FRAME) || cm->refresh_golden_frame ||
3748 cpi->common.refresh_alt_ref_frame) {
3749 cpi->active_best_quality = cpi->best_quality;
3750 } else if (cpi->active_best_quality < cpi->cq_target_quality) {
3751 cpi->active_best_quality = cpi->cq_target_quality;
3755 /* Clip the active best and worst quality values to limits */
3756 if (cpi->active_worst_quality > cpi->worst_quality) {
3757 cpi->active_worst_quality = cpi->worst_quality;
3760 if (cpi->active_best_quality < cpi->best_quality) {
3761 cpi->active_best_quality = cpi->best_quality;
3764 if (cpi->active_worst_quality < cpi->active_best_quality) {
3765 cpi->active_worst_quality = cpi->active_best_quality;
3768 /* Determine initial Q to try */
3769 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
3771 #if !CONFIG_REALTIME_ONLY
3773 /* Set highest allowed value for Zbin over quant */
3774 if (cm->frame_type == KEY_FRAME) {
3776 } else if ((cpi->oxcf.number_of_layers == 1) &&
3777 ((cm->refresh_alt_ref_frame ||
3778 (cm->refresh_golden_frame && !cpi->source_alt_ref_active)))) {
3781 zbin_oq_high = ZBIN_OQ_MAX;
3785 /* Setup background Q adjustment for error resilient mode.
3786 * For multi-layer encodes only enable this for the base layer.
3788 if (cpi->cyclic_refresh_mode_enabled) {
3789 // Special case for screen_content_mode with golden frame updates.
3791 (cpi->oxcf.screen_content_mode == 2 && cm->refresh_golden_frame);
3792 if (cpi->current_layer == 0 && cpi->force_maxqp == 0 && !disable_cr_gf) {
3793 cyclic_background_refresh(cpi, Q, 0);
3795 disable_segmentation(cpi);
3799 vp8_compute_frame_size_bounds(cpi, &frame_under_shoot_limit,
3800 &frame_over_shoot_limit);
3802 #if !CONFIG_REALTIME_ONLY
3803 /* Limit Q range for the adaptive loop. */
3804 bottom_index = cpi->active_best_quality;
3805 top_index = cpi->active_worst_quality;
3806 q_low = cpi->active_best_quality;
3807 q_high = cpi->active_worst_quality;
3810 vp8_save_coding_context(cpi);
3814 scale_and_extend_source(cpi->un_scaled_source, cpi);
3816 #if CONFIG_TEMPORAL_DENOISING && CONFIG_POSTPROC
3817 // Option to apply spatial blur under the aggressive or adaptive
3818 // (temporal denoising) mode.
3819 if (cpi->oxcf.noise_sensitivity >= 3) {
3820 if (cpi->denoiser.denoise_pars.spatial_blur != 0) {
3821 vp8_de_noise(cm, cpi->Source, cpi->Source,
3822 cpi->denoiser.denoise_pars.spatial_blur, 1, 0, 0);
3827 #if !(CONFIG_REALTIME_ONLY) && CONFIG_POSTPROC && !(CONFIG_TEMPORAL_DENOISING)
3829 if (cpi->oxcf.noise_sensitivity > 0) {
3833 switch (cpi->oxcf.noise_sensitivity) {
3834 case 1: l = 20; break;
3835 case 2: l = 40; break;
3836 case 3: l = 60; break;
3837 case 4: l = 80; break;
3838 case 5: l = 100; break;
3839 case 6: l = 150; break;
3842 if (cm->frame_type == KEY_FRAME) {
3843 vp8_de_noise(cm, cpi->Source, cpi->Source, l, 1, 0, 1);
3845 vp8_de_noise(cm, cpi->Source, cpi->Source, l, 1, 0, 1);
3847 src = cpi->Source->y_buffer;
3849 if (cpi->Source->y_stride < 0) {
3850 src += cpi->Source->y_stride * (cpi->Source->y_height - 1);
3857 #ifdef OUTPUT_YUV_SRC
3858 vp8_write_yuv_frame(yuv_file, cpi->Source);
3862 vpx_clear_system_state();
3864 vp8_set_quantizer(cpi, Q);
3866 /* setup skip prob for costing in mode/mv decision */
3867 if (cpi->common.mb_no_coeff_skip) {
3868 cpi->prob_skip_false = cpi->base_skip_false_prob[Q];
3870 if (cm->frame_type != KEY_FRAME) {
3871 if (cpi->common.refresh_alt_ref_frame) {
3872 if (cpi->last_skip_false_probs[2] != 0) {
3873 cpi->prob_skip_false = cpi->last_skip_false_probs[2];
3877 if(cpi->last_skip_false_probs[2]!=0 && abs(Q-
3878 cpi->last_skip_probs_q[2])<=16 )
3879 cpi->prob_skip_false = cpi->last_skip_false_probs[2];
3880 else if (cpi->last_skip_false_probs[2]!=0)
3881 cpi->prob_skip_false = (cpi->last_skip_false_probs[2] +
3882 cpi->prob_skip_false ) / 2;
3884 } else if (cpi->common.refresh_golden_frame) {
3885 if (cpi->last_skip_false_probs[1] != 0) {
3886 cpi->prob_skip_false = cpi->last_skip_false_probs[1];
3890 if(cpi->last_skip_false_probs[1]!=0 && abs(Q-
3891 cpi->last_skip_probs_q[1])<=16 )
3892 cpi->prob_skip_false = cpi->last_skip_false_probs[1];
3893 else if (cpi->last_skip_false_probs[1]!=0)
3894 cpi->prob_skip_false = (cpi->last_skip_false_probs[1] +
3895 cpi->prob_skip_false ) / 2;
3898 if (cpi->last_skip_false_probs[0] != 0) {
3899 cpi->prob_skip_false = cpi->last_skip_false_probs[0];
3903 if(cpi->last_skip_false_probs[0]!=0 && abs(Q-
3904 cpi->last_skip_probs_q[0])<=16 )
3905 cpi->prob_skip_false = cpi->last_skip_false_probs[0];
3906 else if(cpi->last_skip_false_probs[0]!=0)
3907 cpi->prob_skip_false = (cpi->last_skip_false_probs[0] +
3908 cpi->prob_skip_false ) / 2;
3912 /* as this is for cost estimate, let's make sure it does not
3913 * go extreme eitehr way
3915 if (cpi->prob_skip_false < 5) cpi->prob_skip_false = 5;
3917 if (cpi->prob_skip_false > 250) cpi->prob_skip_false = 250;
3919 if (cpi->oxcf.number_of_layers == 1 && cpi->is_src_frame_alt_ref) {
3920 cpi->prob_skip_false = 1;
3928 FILE *f = fopen("skip.stt", "a");
3929 fprintf(f, "%d, %d, %4d ", cpi->common.refresh_golden_frame, cpi->common.refresh_alt_ref_frame, cpi->prob_skip_false);
3936 if (cm->frame_type == KEY_FRAME) {
3937 if (resize_key_frame(cpi)) {
3938 /* If the frame size has changed, need to reset Q, quantizer,
3939 * and background refresh.
3941 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
3942 if (cpi->cyclic_refresh_mode_enabled) {
3943 if (cpi->current_layer == 0) {
3944 cyclic_background_refresh(cpi, Q, 0);
3946 disable_segmentation(cpi);
3949 // Reset the zero_last counter to 0 on key frame.
3950 memset(cpi->consec_zero_last, 0, cm->mb_rows * cm->mb_cols);
3951 memset(cpi->consec_zero_last_mvbias, 0,
3952 (cpi->common.mb_rows * cpi->common.mb_cols));
3953 vp8_set_quantizer(cpi, Q);
3956 vp8_setup_key_frame(cpi);
3959 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
3961 if (cpi->oxcf.error_resilient_mode) cm->refresh_entropy_probs = 0;
3963 if (cpi->oxcf.error_resilient_mode & VPX_ERROR_RESILIENT_PARTITIONS) {
3964 if (cm->frame_type == KEY_FRAME) cm->refresh_entropy_probs = 1;
3967 if (cm->refresh_entropy_probs == 0) {
3968 /* save a copy for later refresh */
3969 memcpy(&cm->lfc, &cm->fc, sizeof(cm->fc));
3972 vp8_update_coef_context(cpi);
3974 vp8_update_coef_probs(cpi);
3976 /* transform / motion compensation build reconstruction frame
3977 * +pack coef partitions
3979 vp8_encode_frame(cpi);
3981 /* cpi->projected_frame_size is not needed for RT mode */
3984 /* transform / motion compensation build reconstruction frame */
3985 vp8_encode_frame(cpi);
3986 if (cpi->oxcf.screen_content_mode == 2) {
3987 if (vp8_drop_encodedframe_overshoot(cpi, Q)) return;
3990 cpi->projected_frame_size -= vp8_estimate_entropy_savings(cpi);
3991 cpi->projected_frame_size =
3992 (cpi->projected_frame_size > 0) ? cpi->projected_frame_size : 0;
3994 vpx_clear_system_state();
3996 /* Test to see if the stats generated for this frame indicate that
3997 * we should have coded a key frame (assuming that we didn't)!
4000 if (cpi->pass != 2 && cpi->oxcf.auto_key && cm->frame_type != KEY_FRAME &&
4001 cpi->compressor_speed != 2) {
4002 #if !CONFIG_REALTIME_ONLY
4003 if (decide_key_frame(cpi)) {
4004 /* Reset all our sizing numbers and recode */
4005 cm->frame_type = KEY_FRAME;
4007 vp8_pick_frame_size(cpi);
4009 /* Clear the Alt reference frame active flag when we have
4012 cpi->source_alt_ref_active = 0;
4014 // Set the loop filter deltas and segmentation map update
4015 setup_features(cpi);
4017 vp8_restore_coding_context(cpi);
4019 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4021 vp8_compute_frame_size_bounds(cpi, &frame_under_shoot_limit,
4022 &frame_over_shoot_limit);
4024 /* Limit Q range for the adaptive loop. */
4025 bottom_index = cpi->active_best_quality;
4026 top_index = cpi->active_worst_quality;
4027 q_low = cpi->active_best_quality;
4028 q_high = cpi->active_worst_quality;
4038 vpx_clear_system_state();
4040 if (frame_over_shoot_limit == 0) frame_over_shoot_limit = 1;
4042 /* Are we are overshooting and up against the limit of active max Q. */
4043 if (((cpi->pass != 2) ||
4044 (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)) &&
4045 (Q == cpi->active_worst_quality) &&
4046 (cpi->active_worst_quality < cpi->worst_quality) &&
4047 (cpi->projected_frame_size > frame_over_shoot_limit)) {
4048 int over_size_percent =
4049 ((cpi->projected_frame_size - frame_over_shoot_limit) * 100) /
4050 frame_over_shoot_limit;
4052 /* If so is there any scope for relaxing it */
4053 while ((cpi->active_worst_quality < cpi->worst_quality) &&
4054 (over_size_percent > 0)) {
4055 cpi->active_worst_quality++;
4056 /* Assume 1 qstep = about 4% on frame size. */
4057 over_size_percent = (int)(over_size_percent * 0.96);
4059 #if !CONFIG_REALTIME_ONLY
4060 top_index = cpi->active_worst_quality;
4061 #endif // !CONFIG_REALTIME_ONLY
4062 /* If we have updated the active max Q do not call
4063 * vp8_update_rate_correction_factors() this loop.
4065 active_worst_qchanged = 1;
4067 active_worst_qchanged = 0;
4070 #if CONFIG_REALTIME_ONLY
4073 /* Special case handling for forced key frames */
4074 if ((cm->frame_type == KEY_FRAME) && cpi->this_key_frame_forced) {
4076 int kf_err = vp8_calc_ss_err(cpi->Source, &cm->yv12_fb[cm->new_fb_idx]);
4078 /* The key frame is not good enough */
4079 if (kf_err > ((cpi->ambient_err * 7) >> 3)) {
4081 q_high = (Q > q_low) ? (Q - 1) : q_low;
4084 Q = (q_high + q_low) >> 1;
4086 /* The key frame is much better than the previous frame */
4087 else if (kf_err < (cpi->ambient_err >> 1)) {
4089 q_low = (Q < q_high) ? (Q + 1) : q_high;
4092 Q = (q_high + q_low + 1) >> 1;
4095 /* Clamp Q to upper and lower limits: */
4098 } else if (Q < q_low) {
4105 /* Is the projected frame size out of range and are we allowed
4106 * to attempt to recode.
4108 else if (recode_loop_test(cpi, frame_over_shoot_limit,
4109 frame_under_shoot_limit, Q, top_index,
4114 /* Frame size out of permitted range. Update correction factor
4115 * & compute new Q to try...
4118 /* Frame is too large */
4119 if (cpi->projected_frame_size > cpi->this_frame_target) {
4120 /* Raise Qlow as to at least the current value */
4121 q_low = (Q < q_high) ? (Q + 1) : q_high;
4123 /* If we are using over quant do the same for zbin_oq_low */
4124 if (cpi->mb.zbin_over_quant > 0) {
4125 zbin_oq_low = (cpi->mb.zbin_over_quant < zbin_oq_high)
4126 ? (cpi->mb.zbin_over_quant + 1)
4130 if (undershoot_seen) {
4131 /* Update rate_correction_factor unless
4132 * cpi->active_worst_quality has changed.
4134 if (!active_worst_qchanged) {
4135 vp8_update_rate_correction_factors(cpi, 1);
4138 Q = (q_high + q_low + 1) / 2;
4140 /* Adjust cpi->zbin_over_quant (only allowed when Q
4144 cpi->mb.zbin_over_quant = 0;
4146 zbin_oq_low = (cpi->mb.zbin_over_quant < zbin_oq_high)
4147 ? (cpi->mb.zbin_over_quant + 1)
4149 cpi->mb.zbin_over_quant = (zbin_oq_high + zbin_oq_low) / 2;
4152 /* Update rate_correction_factor unless
4153 * cpi->active_worst_quality has changed.
4155 if (!active_worst_qchanged) {
4156 vp8_update_rate_correction_factors(cpi, 0);
4159 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4161 while (((Q < q_low) || (cpi->mb.zbin_over_quant < zbin_oq_low)) &&
4163 vp8_update_rate_correction_factors(cpi, 0);
4164 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4171 /* Frame is too small */
4173 if (cpi->mb.zbin_over_quant == 0) {
4174 /* Lower q_high if not using over quant */
4175 q_high = (Q > q_low) ? (Q - 1) : q_low;
4177 /* else lower zbin_oq_high */
4178 zbin_oq_high = (cpi->mb.zbin_over_quant > zbin_oq_low)
4179 ? (cpi->mb.zbin_over_quant - 1)
4183 if (overshoot_seen) {
4184 /* Update rate_correction_factor unless
4185 * cpi->active_worst_quality has changed.
4187 if (!active_worst_qchanged) {
4188 vp8_update_rate_correction_factors(cpi, 1);
4191 Q = (q_high + q_low) / 2;
4193 /* Adjust cpi->zbin_over_quant (only allowed when Q
4197 cpi->mb.zbin_over_quant = 0;
4199 cpi->mb.zbin_over_quant = (zbin_oq_high + zbin_oq_low) / 2;
4202 /* Update rate_correction_factor unless
4203 * cpi->active_worst_quality has changed.
4205 if (!active_worst_qchanged) {
4206 vp8_update_rate_correction_factors(cpi, 0);
4209 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4211 /* Special case reset for qlow for constrained quality.
4212 * This should only trigger where there is very substantial
4213 * undershoot on a frame and the auto cq level is above
4214 * the user passsed in value.
4216 if ((cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
4221 while (((Q > q_high) || (cpi->mb.zbin_over_quant > zbin_oq_high)) &&
4223 vp8_update_rate_correction_factors(cpi, 0);
4224 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4229 undershoot_seen = 1;
4232 /* Clamp Q to upper and lower limits: */
4235 } else if (Q < q_low) {
4239 /* Clamp cpi->zbin_over_quant */
4240 cpi->mb.zbin_over_quant = (cpi->mb.zbin_over_quant < zbin_oq_low)
4242 : (cpi->mb.zbin_over_quant > zbin_oq_high)
4244 : cpi->mb.zbin_over_quant;
4250 #endif // CONFIG_REALTIME_ONLY
4252 if (cpi->is_src_frame_alt_ref) Loop = 0;
4255 vp8_restore_coding_context(cpi);
4257 #if CONFIG_INTERNAL_STATS
4258 cpi->tot_recode_hits++;
4261 } while (Loop == 1);
4263 #if defined(DROP_UNCODED_FRAMES)
4264 /* if there are no coded macroblocks at all drop this frame */
4265 if (cpi->common.MBs == cpi->mb.skip_true_count &&
4266 (cpi->drop_frame_count & 7) != 7 && cm->frame_type != KEY_FRAME) {
4267 cpi->common.current_video_frame++;
4268 cpi->frames_since_key++;
4269 cpi->drop_frame_count++;
4270 // We advance the temporal pattern for dropped frames.
4271 cpi->temporal_pattern_counter++;
4274 cpi->drop_frame_count = 0;
4278 /* Experimental code for lagged and one pass
4279 * Update stats used for one pass GF selection
4282 cpi->one_pass_frame_stats[cpi->one_pass_frame_index].frame_coded_error = (double)cpi->prediction_error;
4283 cpi->one_pass_frame_stats[cpi->one_pass_frame_index].frame_intra_error = (double)cpi->intra_error;
4284 cpi->one_pass_frame_stats[cpi->one_pass_frame_index].frame_pcnt_inter = (double)(100 - cpi->this_frame_percent_intra) / 100.0;
4288 /* Special case code to reduce pulsing when key frames are forced at a
4289 * fixed interval. Note the reconstruction error if it is the frame before
4290 * the force key frame
4292 if (cpi->next_key_frame_forced && (cpi->twopass.frames_to_key == 0)) {
4294 vp8_calc_ss_err(cpi->Source, &cm->yv12_fb[cm->new_fb_idx]);
4297 /* This frame's MVs are saved and will be used in next frame's MV predictor.
4298 * Last frame has one more line(add to bottom) and one more column(add to
4299 * right) than cm->mip. The edge elements are initialized to 0.
4301 #if CONFIG_MULTI_RES_ENCODING
4302 if (!cpi->oxcf.mr_encoder_id && cm->show_frame)
4304 if (cm->show_frame) /* do not save for altref frame */
4309 /* Point to beginning of allocated MODE_INFO arrays. */
4310 MODE_INFO *tmp = cm->mip;
4312 if (cm->frame_type != KEY_FRAME) {
4313 for (mb_row = 0; mb_row < cm->mb_rows + 1; ++mb_row) {
4314 for (mb_col = 0; mb_col < cm->mb_cols + 1; ++mb_col) {
4315 if (tmp->mbmi.ref_frame != INTRA_FRAME) {
4316 cpi->lfmv[mb_col + mb_row * (cm->mode_info_stride + 1)].as_int =
4317 tmp->mbmi.mv.as_int;
4320 cpi->lf_ref_frame_sign_bias[mb_col +
4321 mb_row * (cm->mode_info_stride + 1)] =
4322 cm->ref_frame_sign_bias[tmp->mbmi.ref_frame];
4323 cpi->lf_ref_frame[mb_col + mb_row * (cm->mode_info_stride + 1)] =
4324 tmp->mbmi.ref_frame;
4331 /* Count last ref frame 0,0 usage on current encoded frame. */
4335 /* Point to beginning of MODE_INFO arrays. */
4336 MODE_INFO *tmp = cm->mi;
4338 cpi->zeromv_count = 0;
4340 if (cm->frame_type != KEY_FRAME) {
4341 for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) {
4342 for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) {
4343 if (tmp->mbmi.mode == ZEROMV && tmp->mbmi.ref_frame == LAST_FRAME) {
4344 cpi->zeromv_count++;
4353 #if CONFIG_MULTI_RES_ENCODING
4354 vp8_cal_dissimilarity(cpi);
4357 /* Update the GF useage maps.
4358 * This is done after completing the compression of a frame when all
4359 * modes etc. are finalized but before loop filter
4361 if (cpi->oxcf.number_of_layers == 1) {
4362 vp8_update_gf_useage_maps(cpi, cm, &cpi->mb);
4365 if (cm->frame_type == KEY_FRAME) cm->refresh_last_frame = 1;
4369 FILE *f = fopen("gfactive.stt", "a");
4370 fprintf(f, "%8d %8d %8d %8d %8d\n", cm->current_video_frame, (100 * cpi->gf_active_count) / (cpi->common.mb_rows * cpi->common.mb_cols), cpi->this_iiratio, cpi->next_iiratio, cm->refresh_golden_frame);
4375 /* For inter frames the current default behavior is that when
4376 * cm->refresh_golden_frame is set we copy the old GF over to the ARF buffer
4377 * This is purely an encoder decision at present.
4379 if (!cpi->oxcf.error_resilient_mode && cm->refresh_golden_frame) {
4380 cm->copy_buffer_to_arf = 2;
4382 cm->copy_buffer_to_arf = 0;
4385 cm->frame_to_show = &cm->yv12_fb[cm->new_fb_idx];
4387 #if CONFIG_TEMPORAL_DENOISING
4388 // Get some measure of the amount of noise, by measuring the (partial) mse
4389 // between source and denoised buffer, for y channel. Partial refers to
4390 // computing the sse for a sub-sample of the frame (i.e., skip x blocks along
4392 // and only for blocks in that set that are consecutive ZEROMV_LAST mode.
4393 // Do this every ~8 frames, to further reduce complexity.
4394 // TODO(marpan): Keep this for now for the case cpi->oxcf.noise_sensitivity <
4396 // should be removed in favor of the process_denoiser_mode_change() function
4398 if (cpi->oxcf.noise_sensitivity > 0 && cpi->oxcf.noise_sensitivity < 4 &&
4399 !cpi->oxcf.screen_content_mode && cpi->frames_since_key % 8 == 0 &&
4400 cm->frame_type != KEY_FRAME) {
4401 cpi->mse_source_denoised = measure_square_diff_partial(
4402 &cpi->denoiser.yv12_running_avg[INTRA_FRAME], cpi->Source, cpi);
4405 // For the adaptive denoising mode (noise_sensitivity == 4), sample the mse
4406 // of source diff (between current and previous frame), and determine if we
4407 // should switch the denoiser mode. Sampling refers to computing the mse for
4408 // a sub-sample of the frame (i.e., skip x blocks along row/column), and
4409 // only for blocks in that set that have used ZEROMV LAST, along with some
4410 // constraint on the sum diff between blocks. This process is called every
4411 // ~8 frames, to further reduce complexity.
4412 if (cpi->oxcf.noise_sensitivity == 4 && !cpi->oxcf.screen_content_mode &&
4413 cpi->frames_since_key % 8 == 0 && cm->frame_type != KEY_FRAME) {
4414 process_denoiser_mode_change(cpi);
4418 #if CONFIG_MULTITHREAD
4419 if (cpi->b_multi_threaded) {
4420 /* start loopfilter in separate thread */
4421 sem_post(&cpi->h_event_start_lpf);
4422 cpi->b_lpf_running = 1;
4426 vp8_loopfilter_frame(cpi, cm);
4429 update_reference_frames(cpi);
4431 #ifdef OUTPUT_YUV_DENOISED
4432 vp8_write_yuv_frame(yuv_denoised_file,
4433 &cpi->denoiser.yv12_running_avg[INTRA_FRAME]);
4436 #if !(CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
4437 if (cpi->oxcf.error_resilient_mode) {
4438 cm->refresh_entropy_probs = 0;
4442 #if CONFIG_MULTITHREAD
4443 /* wait that filter_level is picked so that we can continue with stream
4445 if (cpi->b_multi_threaded) sem_wait(&cpi->h_event_end_lpf);
4448 /* build the bitstream */
4449 vp8_pack_bitstream(cpi, dest, dest_end, size);
4451 /* Move storing frame_type out of the above loop since it is also
4452 * needed in motion search besides loopfilter */
4453 cm->last_frame_type = cm->frame_type;
4455 /* Update rate control heuristics */
4456 cpi->total_byte_count += (*size);
4457 cpi->projected_frame_size = (int)(*size) << 3;
4459 if (cpi->oxcf.number_of_layers > 1) {
4461 for (i = cpi->current_layer + 1; i < cpi->oxcf.number_of_layers; ++i) {
4462 cpi->layer_context[i].total_byte_count += (*size);
4466 if (!active_worst_qchanged) vp8_update_rate_correction_factors(cpi, 2);
4468 cpi->last_q[cm->frame_type] = cm->base_qindex;
4470 if (cm->frame_type == KEY_FRAME) {
4471 vp8_adjust_key_frame_context(cpi);
4474 /* Keep a record of ambient average Q. */
4475 if (cm->frame_type != KEY_FRAME) {
4476 cpi->avg_frame_qindex =
4477 (2 + 3 * cpi->avg_frame_qindex + cm->base_qindex) >> 2;
4480 /* Keep a record from which we can calculate the average Q excluding
4481 * GF updates and key frames
4483 if ((cm->frame_type != KEY_FRAME) &&
4484 ((cpi->oxcf.number_of_layers > 1) ||
4485 (!cm->refresh_golden_frame && !cm->refresh_alt_ref_frame))) {
4488 /* Calculate the average Q for normal inter frames (not key or GFU
4491 if (cpi->pass == 2) {
4492 cpi->ni_tot_qi += Q;
4493 cpi->ni_av_qi = (cpi->ni_tot_qi / cpi->ni_frames);
4495 /* Damp value for first few frames */
4496 if (cpi->ni_frames > 150) {
4497 cpi->ni_tot_qi += Q;
4498 cpi->ni_av_qi = (cpi->ni_tot_qi / cpi->ni_frames);
4500 /* For one pass, early in the clip ... average the current frame Q
4501 * value with the worstq entered by the user as a dampening measure
4504 cpi->ni_tot_qi += Q;
4506 ((cpi->ni_tot_qi / cpi->ni_frames) + cpi->worst_quality + 1) / 2;
4509 /* If the average Q is higher than what was used in the last
4510 * frame (after going through the recode loop to keep the frame
4511 * size within range) then use the last frame value - 1. The -1
4512 * is designed to stop Q and hence the data rate, from
4513 * progressively falling away during difficult sections, but at
4514 * the same time reduce the number of itterations around the
4517 if (Q > cpi->ni_av_qi) cpi->ni_av_qi = Q - 1;
4521 /* Update the buffer level variable. */
4522 /* Non-viewable frames are a special case and are treated as pure overhead. */
4523 if (!cm->show_frame) {
4524 cpi->bits_off_target -= cpi->projected_frame_size;
4526 cpi->bits_off_target +=
4527 cpi->av_per_frame_bandwidth - cpi->projected_frame_size;
4530 /* Clip the buffer level to the maximum specified buffer size */
4531 if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size) {
4532 cpi->bits_off_target = cpi->oxcf.maximum_buffer_size;
4535 // If the frame dropper is not enabled, don't let the buffer level go below
4536 // some threshold, given here by -|maximum_buffer_size|. For now we only do
4537 // this for screen content input.
4538 if (cpi->drop_frames_allowed == 0 && cpi->oxcf.screen_content_mode &&
4539 cpi->bits_off_target < -cpi->oxcf.maximum_buffer_size) {
4540 cpi->bits_off_target = -cpi->oxcf.maximum_buffer_size;
4543 /* Rolling monitors of whether we are over or underspending used to
4544 * help regulate min and Max Q in two pass.
4546 cpi->rolling_target_bits =
4547 ((cpi->rolling_target_bits * 3) + cpi->this_frame_target + 2) / 4;
4548 cpi->rolling_actual_bits =
4549 ((cpi->rolling_actual_bits * 3) + cpi->projected_frame_size + 2) / 4;
4550 cpi->long_rolling_target_bits =
4551 ((cpi->long_rolling_target_bits * 31) + cpi->this_frame_target + 16) / 32;
4552 cpi->long_rolling_actual_bits =
4553 ((cpi->long_rolling_actual_bits * 31) + cpi->projected_frame_size + 16) /
4556 /* Actual bits spent */
4557 cpi->total_actual_bits += cpi->projected_frame_size;
4560 cpi->total_target_vs_actual +=
4561 (cpi->this_frame_target - cpi->projected_frame_size);
4563 cpi->buffer_level = cpi->bits_off_target;
4565 /* Propagate values to higher temporal layers */
4566 if (cpi->oxcf.number_of_layers > 1) {
4569 for (i = cpi->current_layer + 1; i < cpi->oxcf.number_of_layers; ++i) {
4570 LAYER_CONTEXT *lc = &cpi->layer_context[i];
4571 int bits_off_for_this_layer = (int)(lc->target_bandwidth / lc->framerate -
4572 cpi->projected_frame_size);
4574 lc->bits_off_target += bits_off_for_this_layer;
4576 /* Clip buffer level to maximum buffer size for the layer */
4577 if (lc->bits_off_target > lc->maximum_buffer_size) {
4578 lc->bits_off_target = lc->maximum_buffer_size;
4581 lc->total_actual_bits += cpi->projected_frame_size;
4582 lc->total_target_vs_actual += bits_off_for_this_layer;
4583 lc->buffer_level = lc->bits_off_target;
4587 /* Update bits left to the kf and gf groups to account for overshoot
4588 * or undershoot on these frames
4590 if (cm->frame_type == KEY_FRAME) {
4591 cpi->twopass.kf_group_bits +=
4592 cpi->this_frame_target - cpi->projected_frame_size;
4594 if (cpi->twopass.kf_group_bits < 0) cpi->twopass.kf_group_bits = 0;
4595 } else if (cm->refresh_golden_frame || cm->refresh_alt_ref_frame) {
4596 cpi->twopass.gf_group_bits +=
4597 cpi->this_frame_target - cpi->projected_frame_size;
4599 if (cpi->twopass.gf_group_bits < 0) cpi->twopass.gf_group_bits = 0;
4602 if (cm->frame_type != KEY_FRAME) {
4603 if (cpi->common.refresh_alt_ref_frame) {
4604 cpi->last_skip_false_probs[2] = cpi->prob_skip_false;
4605 cpi->last_skip_probs_q[2] = cm->base_qindex;
4606 } else if (cpi->common.refresh_golden_frame) {
4607 cpi->last_skip_false_probs[1] = cpi->prob_skip_false;
4608 cpi->last_skip_probs_q[1] = cm->base_qindex;
4610 cpi->last_skip_false_probs[0] = cpi->prob_skip_false;
4611 cpi->last_skip_probs_q[0] = cm->base_qindex;
4613 /* update the baseline */
4614 cpi->base_skip_false_prob[cm->base_qindex] = cpi->prob_skip_false;
4618 #if 0 && CONFIG_INTERNAL_STATS
4620 FILE *f = fopen("tmp.stt", "a");
4622 vpx_clear_system_state();
4624 if (cpi->twopass.total_left_stats.coded_error != 0.0)
4625 fprintf(f, "%10d %10d %10d %10d %10d %10"PRId64" %10"PRId64
4626 "%10"PRId64" %10d %6d %6d %6d %6d %5d %5d %5d %8d "
4627 "%8.2lf %"PRId64" %10.3lf %10"PRId64" %8d\n",
4628 cpi->common.current_video_frame, cpi->this_frame_target,
4629 cpi->projected_frame_size,
4630 (cpi->projected_frame_size - cpi->this_frame_target),
4631 cpi->total_target_vs_actual,
4633 (cpi->oxcf.starting_buffer_level-cpi->bits_off_target),
4634 cpi->total_actual_bits, cm->base_qindex,
4635 cpi->active_best_quality, cpi->active_worst_quality,
4636 cpi->ni_av_qi, cpi->cq_target_quality,
4637 cm->refresh_golden_frame, cm->refresh_alt_ref_frame,
4638 cm->frame_type, cpi->gfu_boost,
4639 cpi->twopass.est_max_qcorrection_factor,
4640 cpi->twopass.bits_left,
4641 cpi->twopass.total_left_stats.coded_error,
4642 (double)cpi->twopass.bits_left /
4643 cpi->twopass.total_left_stats.coded_error,
4644 cpi->tot_recode_hits);
4646 fprintf(f, "%10d %10d %10d %10d %10d %10"PRId64" %10"PRId64
4647 "%10"PRId64" %10d %6d %6d %6d %6d %5d %5d %5d %8d "
4648 "%8.2lf %"PRId64" %10.3lf %8d\n",
4649 cpi->common.current_video_frame, cpi->this_frame_target,
4650 cpi->projected_frame_size,
4651 (cpi->projected_frame_size - cpi->this_frame_target),
4652 cpi->total_target_vs_actual,
4654 (cpi->oxcf.starting_buffer_level-cpi->bits_off_target),
4655 cpi->total_actual_bits, cm->base_qindex,
4656 cpi->active_best_quality, cpi->active_worst_quality,
4657 cpi->ni_av_qi, cpi->cq_target_quality,
4658 cm->refresh_golden_frame, cm->refresh_alt_ref_frame,
4659 cm->frame_type, cpi->gfu_boost,
4660 cpi->twopass.est_max_qcorrection_factor,
4661 cpi->twopass.bits_left,
4662 cpi->twopass.total_left_stats.coded_error,
4663 cpi->tot_recode_hits);
4668 FILE *fmodes = fopen("Modes.stt", "a");
4670 fprintf(fmodes, "%6d:%1d:%1d:%1d ",
4671 cpi->common.current_video_frame,
4672 cm->frame_type, cm->refresh_golden_frame,
4673 cm->refresh_alt_ref_frame);
4675 fprintf(fmodes, "\n");
4683 if (cm->refresh_golden_frame == 1) {
4684 cm->frame_flags = cm->frame_flags | FRAMEFLAGS_GOLDEN;
4686 cm->frame_flags = cm->frame_flags & ~FRAMEFLAGS_GOLDEN;
4689 if (cm->refresh_alt_ref_frame == 1) {
4690 cm->frame_flags = cm->frame_flags | FRAMEFLAGS_ALTREF;
4692 cm->frame_flags = cm->frame_flags & ~FRAMEFLAGS_ALTREF;
4695 if (cm->refresh_last_frame & cm->refresh_golden_frame) { /* both refreshed */
4696 cpi->gold_is_last = 1;
4697 } else if (cm->refresh_last_frame ^ cm->refresh_golden_frame) {
4698 /* 1 refreshed but not the other */
4699 cpi->gold_is_last = 0;
4702 if (cm->refresh_last_frame & cm->refresh_alt_ref_frame) { /* both refreshed */
4703 cpi->alt_is_last = 1;
4704 } else if (cm->refresh_last_frame ^ cm->refresh_alt_ref_frame) {
4705 /* 1 refreshed but not the other */
4706 cpi->alt_is_last = 0;
4709 if (cm->refresh_alt_ref_frame &
4710 cm->refresh_golden_frame) { /* both refreshed */
4711 cpi->gold_is_alt = 1;
4712 } else if (cm->refresh_alt_ref_frame ^ cm->refresh_golden_frame) {
4713 /* 1 refreshed but not the other */
4714 cpi->gold_is_alt = 0;
4717 cpi->ref_frame_flags = VP8_ALTR_FRAME | VP8_GOLD_FRAME | VP8_LAST_FRAME;
4719 if (cpi->gold_is_last) cpi->ref_frame_flags &= ~VP8_GOLD_FRAME;
4721 if (cpi->alt_is_last) cpi->ref_frame_flags &= ~VP8_ALTR_FRAME;
4723 if (cpi->gold_is_alt) cpi->ref_frame_flags &= ~VP8_ALTR_FRAME;
4725 if (!cpi->oxcf.error_resilient_mode) {
4726 if (cpi->oxcf.play_alternate && cm->refresh_alt_ref_frame &&
4727 (cm->frame_type != KEY_FRAME)) {
4728 /* Update the alternate reference frame stats as appropriate. */
4729 update_alt_ref_frame_stats(cpi);
4731 /* Update the Golden frame stats as appropriate. */
4732 update_golden_frame_stats(cpi);
4736 if (cm->frame_type == KEY_FRAME) {
4737 /* Tell the caller that the frame was coded as a key frame */
4738 *frame_flags = cm->frame_flags | FRAMEFLAGS_KEY;
4740 /* As this frame is a key frame the next defaults to an inter frame. */
4741 cm->frame_type = INTER_FRAME;
4743 cpi->last_frame_percent_intra = 100;
4745 *frame_flags = cm->frame_flags & ~FRAMEFLAGS_KEY;
4747 cpi->last_frame_percent_intra = cpi->this_frame_percent_intra;
4750 /* Clear the one shot update flags for segmentation map and mode/ref
4751 * loop filter deltas.
4753 cpi->mb.e_mbd.update_mb_segmentation_map = 0;
4754 cpi->mb.e_mbd.update_mb_segmentation_data = 0;
4755 cpi->mb.e_mbd.mode_ref_lf_delta_update = 0;
4757 /* Dont increment frame counters if this was an altref buffer update
4760 if (cm->show_frame) {
4761 cm->current_video_frame++;
4762 cpi->frames_since_key++;
4763 cpi->temporal_pattern_counter++;
4766 /* reset to normal state now that we are done. */
4772 sprintf(filename, "enc%04d.yuv", (int) cm->current_video_frame);
4773 recon_file = fopen(filename, "wb");
4774 fwrite(cm->yv12_fb[cm->lst_fb_idx].buffer_alloc,
4775 cm->yv12_fb[cm->lst_fb_idx].frame_size, 1, recon_file);
4781 /* vp8_write_yuv_frame("encoder_recon.yuv", cm->frame_to_show); */
4783 #if !CONFIG_REALTIME_ONLY
4784 static void Pass2Encode(VP8_COMP *cpi, size_t *size, unsigned char *dest,
4785 unsigned char *dest_end, unsigned int *frame_flags) {
4786 if (!cpi->common.refresh_alt_ref_frame) vp8_second_pass(cpi);
4788 encode_frame_to_data_rate(cpi, size, dest, dest_end, frame_flags);
4789 cpi->twopass.bits_left -= 8 * (int)(*size);
4791 if (!cpi->common.refresh_alt_ref_frame) {
4792 double two_pass_min_rate =
4793 (double)(cpi->oxcf.target_bandwidth *
4794 cpi->oxcf.two_pass_vbrmin_section / 100);
4795 cpi->twopass.bits_left += (int64_t)(two_pass_min_rate / cpi->framerate);
4800 int vp8_receive_raw_frame(VP8_COMP *cpi, unsigned int frame_flags,
4801 YV12_BUFFER_CONFIG *sd, int64_t time_stamp,
4803 struct vpx_usec_timer timer;
4806 vpx_usec_timer_start(&timer);
4808 /* Reinit the lookahead buffer if the frame size changes */
4809 if (sd->y_width != cpi->oxcf.Width || sd->y_height != cpi->oxcf.Height) {
4810 assert(cpi->oxcf.lag_in_frames < 2);
4811 dealloc_raw_frame_buffers(cpi);
4812 alloc_raw_frame_buffers(cpi);
4815 if (vp8_lookahead_push(cpi->lookahead, sd, time_stamp, end_time, frame_flags,
4816 cpi->active_map_enabled ? cpi->active_map : NULL)) {
4819 vpx_usec_timer_mark(&timer);
4820 cpi->time_receive_data += vpx_usec_timer_elapsed(&timer);
4825 static int frame_is_reference(const VP8_COMP *cpi) {
4826 const VP8_COMMON *cm = &cpi->common;
4827 const MACROBLOCKD *xd = &cpi->mb.e_mbd;
4829 return cm->frame_type == KEY_FRAME || cm->refresh_last_frame ||
4830 cm->refresh_golden_frame || cm->refresh_alt_ref_frame ||
4831 cm->copy_buffer_to_gf || cm->copy_buffer_to_arf ||
4832 cm->refresh_entropy_probs || xd->mode_ref_lf_delta_update ||
4833 xd->update_mb_segmentation_map || xd->update_mb_segmentation_data;
4836 int vp8_get_compressed_data(VP8_COMP *cpi, unsigned int *frame_flags,
4837 size_t *size, unsigned char *dest,
4838 unsigned char *dest_end, int64_t *time_stamp,
4839 int64_t *time_end, int flush) {
4841 struct vpx_usec_timer tsctimer;
4842 struct vpx_usec_timer ticktimer;
4843 struct vpx_usec_timer cmptimer;
4844 YV12_BUFFER_CONFIG *force_src_buffer = NULL;
4846 if (!cpi) return -1;
4850 if (setjmp(cpi->common.error.jmp)) {
4851 cpi->common.error.setjmp = 0;
4852 vpx_clear_system_state();
4853 return VPX_CODEC_CORRUPT_FRAME;
4856 cpi->common.error.setjmp = 1;
4858 vpx_usec_timer_start(&cmptimer);
4862 #if !CONFIG_REALTIME_ONLY
4863 /* Should we code an alternate reference frame */
4864 if (cpi->oxcf.error_resilient_mode == 0 && cpi->oxcf.play_alternate &&
4865 cpi->source_alt_ref_pending) {
4866 if ((cpi->source = vp8_lookahead_peek(
4867 cpi->lookahead, cpi->frames_till_gf_update_due, PEEK_FORWARD))) {
4868 cpi->alt_ref_source = cpi->source;
4869 if (cpi->oxcf.arnr_max_frames > 0) {
4870 vp8_temporal_filter_prepare_c(cpi, cpi->frames_till_gf_update_due);
4871 force_src_buffer = &cpi->alt_ref_buffer;
4873 cpi->frames_till_alt_ref_frame = cpi->frames_till_gf_update_due;
4874 cm->refresh_alt_ref_frame = 1;
4875 cm->refresh_golden_frame = 0;
4876 cm->refresh_last_frame = 0;
4878 /* Clear Pending alt Ref flag. */
4879 cpi->source_alt_ref_pending = 0;
4880 cpi->is_src_frame_alt_ref = 0;
4886 /* Read last frame source if we are encoding first pass. */
4887 if (cpi->pass == 1 && cm->current_video_frame > 0) {
4888 if ((cpi->last_source =
4889 vp8_lookahead_peek(cpi->lookahead, 1, PEEK_BACKWARD)) == NULL) {
4894 if ((cpi->source = vp8_lookahead_pop(cpi->lookahead, flush))) {
4897 cpi->is_src_frame_alt_ref =
4898 cpi->alt_ref_source && (cpi->source == cpi->alt_ref_source);
4900 if (cpi->is_src_frame_alt_ref) cpi->alt_ref_source = NULL;
4905 cpi->Source = force_src_buffer ? force_src_buffer : &cpi->source->img;
4906 cpi->un_scaled_source = cpi->Source;
4907 *time_stamp = cpi->source->ts_start;
4908 *time_end = cpi->source->ts_end;
4909 *frame_flags = cpi->source->flags;
4911 if (cpi->pass == 1 && cm->current_video_frame > 0) {
4912 cpi->last_frame_unscaled_source = &cpi->last_source->img;
4916 #if !CONFIG_REALTIME_ONLY
4918 if (flush && cpi->pass == 1 && !cpi->twopass.first_pass_done) {
4919 vp8_end_first_pass(cpi); /* get last stats packet */
4920 cpi->twopass.first_pass_done = 1;
4928 if (cpi->source->ts_start < cpi->first_time_stamp_ever) {
4929 cpi->first_time_stamp_ever = cpi->source->ts_start;
4930 cpi->last_end_time_stamp_seen = cpi->source->ts_start;
4933 /* adjust frame rates based on timestamps given */
4934 if (cm->show_frame) {
4935 int64_t this_duration;
4938 if (cpi->source->ts_start == cpi->first_time_stamp_ever) {
4939 this_duration = cpi->source->ts_end - cpi->source->ts_start;
4942 int64_t last_duration;
4944 this_duration = cpi->source->ts_end - cpi->last_end_time_stamp_seen;
4945 last_duration = cpi->last_end_time_stamp_seen - cpi->last_time_stamp_seen;
4946 /* do a step update if the duration changes by 10% */
4947 if (last_duration) {
4948 step = (int)(((this_duration - last_duration) * 10 / last_duration));
4952 if (this_duration) {
4954 cpi->ref_framerate = 10000000.0 / this_duration;
4956 double avg_duration, interval;
4958 /* Average this frame's rate into the last second's average
4959 * frame rate. If we haven't seen 1 second yet, then average
4960 * over the whole interval seen.
4962 interval = (double)(cpi->source->ts_end - cpi->first_time_stamp_ever);
4963 if (interval > 10000000.0) interval = 10000000;
4965 avg_duration = 10000000.0 / cpi->ref_framerate;
4966 avg_duration *= (interval - avg_duration + this_duration);
4967 avg_duration /= interval;
4969 cpi->ref_framerate = 10000000.0 / avg_duration;
4971 #if CONFIG_MULTI_RES_ENCODING
4972 if (cpi->oxcf.mr_total_resolutions > 1) {
4973 LOWER_RES_FRAME_INFO *low_res_frame_info =
4974 (LOWER_RES_FRAME_INFO *)cpi->oxcf.mr_low_res_mode_info;
4975 // Frame rate should be the same for all spatial layers in
4976 // multi-res-encoding (simulcast), so we constrain the frame for
4977 // higher layers to be that of lowest resolution. This is needed
4978 // as he application may decide to skip encoding a high layer and
4979 // then start again, in which case a big jump in time-stamps will
4980 // be received for that high layer, which will yield an incorrect
4981 // frame rate (from time-stamp adjustment in above calculation).
4982 if (cpi->oxcf.mr_encoder_id) {
4983 cpi->ref_framerate = low_res_frame_info->low_res_framerate;
4985 // Keep track of frame rate for lowest resolution.
4986 low_res_frame_info->low_res_framerate = cpi->ref_framerate;
4990 if (cpi->oxcf.number_of_layers > 1) {
4993 /* Update frame rates for each layer */
4994 assert(cpi->oxcf.number_of_layers <= VPX_TS_MAX_LAYERS);
4995 for (i = 0; i < cpi->oxcf.number_of_layers && i < VPX_TS_MAX_LAYERS;
4997 LAYER_CONTEXT *lc = &cpi->layer_context[i];
4998 lc->framerate = cpi->ref_framerate / cpi->oxcf.rate_decimator[i];
5001 vp8_new_framerate(cpi, cpi->ref_framerate);
5005 cpi->last_time_stamp_seen = cpi->source->ts_start;
5006 cpi->last_end_time_stamp_seen = cpi->source->ts_end;
5009 if (cpi->oxcf.number_of_layers > 1) {
5012 update_layer_contexts(cpi);
5014 /* Restore layer specific context & set frame rate */
5015 if (cpi->temporal_layer_id >= 0) {
5016 layer = cpi->temporal_layer_id;
5020 .layer_id[cpi->temporal_pattern_counter % cpi->oxcf.periodicity];
5022 restore_layer_context(cpi, layer);
5023 vp8_new_framerate(cpi, cpi->layer_context[layer].framerate);
5026 if (cpi->compressor_speed == 2) {
5027 vpx_usec_timer_start(&tsctimer);
5028 vpx_usec_timer_start(&ticktimer);
5031 cpi->lf_zeromv_pct = (cpi->zeromv_count * 100) / cm->MBs;
5033 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
5036 const int num_part = (1 << cm->multi_token_partition);
5037 /* the available bytes in dest */
5038 const unsigned long dest_size = dest_end - dest;
5039 const int tok_part_buff_size = (dest_size * 9) / (10 * num_part);
5041 unsigned char *dp = dest;
5043 cpi->partition_d[0] = dp;
5044 dp += dest_size / 10; /* reserve 1/10 for control partition */
5045 cpi->partition_d_end[0] = dp;
5047 for (i = 0; i < num_part; ++i) {
5048 cpi->partition_d[i + 1] = dp;
5049 dp += tok_part_buff_size;
5050 cpi->partition_d_end[i + 1] = dp;
5055 /* start with a 0 size frame */
5058 /* Clear down mmx registers */
5059 vpx_clear_system_state();
5061 cm->frame_type = INTER_FRAME;
5062 cm->frame_flags = *frame_flags;
5066 if (cm->refresh_alt_ref_frame)
5068 cm->refresh_golden_frame = 0;
5069 cm->refresh_last_frame = 0;
5073 cm->refresh_golden_frame = 0;
5074 cm->refresh_last_frame = 1;
5078 /* find a free buffer for the new frame */
5081 for (; i < NUM_YV12_BUFFERS; ++i) {
5082 if (!cm->yv12_fb[i].flags) {
5088 assert(i < NUM_YV12_BUFFERS);
5090 switch (cpi->pass) {
5091 #if !CONFIG_REALTIME_ONLY
5092 case 1: Pass1Encode(cpi, size, dest, frame_flags); break;
5093 case 2: Pass2Encode(cpi, size, dest, dest_end, frame_flags); break;
5094 #endif // !CONFIG_REALTIME_ONLY
5096 encode_frame_to_data_rate(cpi, size, dest, dest_end, frame_flags);
5100 if (cpi->compressor_speed == 2) {
5101 unsigned int duration, duration2;
5102 vpx_usec_timer_mark(&tsctimer);
5103 vpx_usec_timer_mark(&ticktimer);
5105 duration = (int)(vpx_usec_timer_elapsed(&ticktimer));
5106 duration2 = (unsigned int)((double)duration / 2);
5108 if (cm->frame_type != KEY_FRAME) {
5109 if (cpi->avg_encode_time == 0) {
5110 cpi->avg_encode_time = duration;
5112 cpi->avg_encode_time = (7 * cpi->avg_encode_time + duration) >> 3;
5118 if (cpi->avg_pick_mode_time == 0) {
5119 cpi->avg_pick_mode_time = duration2;
5121 cpi->avg_pick_mode_time =
5122 (7 * cpi->avg_pick_mode_time + duration2) >> 3;
5128 if (cm->refresh_entropy_probs == 0) {
5129 memcpy(&cm->fc, &cm->lfc, sizeof(cm->fc));
5132 /* Save the contexts separately for alt ref, gold and last. */
5133 /* (TODO jbb -> Optimize this with pointers to avoid extra copies. ) */
5134 if (cm->refresh_alt_ref_frame) memcpy(&cpi->lfc_a, &cm->fc, sizeof(cm->fc));
5136 if (cm->refresh_golden_frame) memcpy(&cpi->lfc_g, &cm->fc, sizeof(cm->fc));
5138 if (cm->refresh_last_frame) memcpy(&cpi->lfc_n, &cm->fc, sizeof(cm->fc));
5140 /* if its a dropped frame honor the requests on subsequent frames */
5142 cpi->droppable = !frame_is_reference(cpi);
5144 /* return to normal state */
5145 cm->refresh_entropy_probs = 1;
5146 cm->refresh_alt_ref_frame = 0;
5147 cm->refresh_golden_frame = 0;
5148 cm->refresh_last_frame = 1;
5149 cm->frame_type = INTER_FRAME;
5152 /* Save layer specific state */
5153 if (cpi->oxcf.number_of_layers > 1) save_layer_context(cpi);
5155 vpx_usec_timer_mark(&cmptimer);
5156 cpi->time_compress_data += vpx_usec_timer_elapsed(&cmptimer);
5158 if (cpi->b_calculate_psnr && cpi->pass != 1 && cm->show_frame) {
5159 generate_psnr_packet(cpi);
5162 #if CONFIG_INTERNAL_STATS
5164 if (cpi->pass != 1) {
5165 cpi->bytes += *size;
5167 if (cm->show_frame) {
5168 cpi->common.show_frame_mi = cpi->common.mi;
5171 if (cpi->b_calculate_psnr) {
5172 uint64_t ye, ue, ve;
5174 YV12_BUFFER_CONFIG *orig = cpi->Source;
5175 YV12_BUFFER_CONFIG *recon = cpi->common.frame_to_show;
5176 unsigned int y_width = cpi->common.Width;
5177 unsigned int y_height = cpi->common.Height;
5178 unsigned int uv_width = (y_width + 1) / 2;
5179 unsigned int uv_height = (y_height + 1) / 2;
5180 int y_samples = y_height * y_width;
5181 int uv_samples = uv_height * uv_width;
5182 int t_samples = y_samples + 2 * uv_samples;
5185 ye = calc_plane_error(orig->y_buffer, orig->y_stride, recon->y_buffer,
5186 recon->y_stride, y_width, y_height);
5188 ue = calc_plane_error(orig->u_buffer, orig->uv_stride, recon->u_buffer,
5189 recon->uv_stride, uv_width, uv_height);
5191 ve = calc_plane_error(orig->v_buffer, orig->uv_stride, recon->v_buffer,
5192 recon->uv_stride, uv_width, uv_height);
5194 sq_error = (double)(ye + ue + ve);
5196 frame_psnr = vpx_sse_to_psnr(t_samples, 255.0, sq_error);
5198 cpi->total_y += vpx_sse_to_psnr(y_samples, 255.0, (double)ye);
5199 cpi->total_u += vpx_sse_to_psnr(uv_samples, 255.0, (double)ue);
5200 cpi->total_v += vpx_sse_to_psnr(uv_samples, 255.0, (double)ve);
5201 cpi->total_sq_error += sq_error;
5202 cpi->total += frame_psnr;
5205 YV12_BUFFER_CONFIG *pp = &cm->post_proc_buffer;
5207 double frame_psnr2, frame_ssim2 = 0;
5210 vp8_deblock(cm, cm->frame_to_show, &cm->post_proc_buffer,
5211 cm->filter_level * 10 / 6, 1, 0);
5212 vpx_clear_system_state();
5214 ye = calc_plane_error(orig->y_buffer, orig->y_stride, pp->y_buffer,
5215 pp->y_stride, y_width, y_height);
5217 ue = calc_plane_error(orig->u_buffer, orig->uv_stride, pp->u_buffer,
5218 pp->uv_stride, uv_width, uv_height);
5220 ve = calc_plane_error(orig->v_buffer, orig->uv_stride, pp->v_buffer,
5221 pp->uv_stride, uv_width, uv_height);
5223 sq_error2 = (double)(ye + ue + ve);
5225 frame_psnr2 = vpx_sse_to_psnr(t_samples, 255.0, sq_error2);
5227 cpi->totalp_y += vpx_sse_to_psnr(y_samples, 255.0, (double)ye);
5228 cpi->totalp_u += vpx_sse_to_psnr(uv_samples, 255.0, (double)ue);
5229 cpi->totalp_v += vpx_sse_to_psnr(uv_samples, 255.0, (double)ve);
5230 cpi->total_sq_error2 += sq_error2;
5231 cpi->totalp += frame_psnr2;
5234 vpx_calc_ssim(cpi->Source, &cm->post_proc_buffer, &weight);
5236 cpi->summed_quality += frame_ssim2 * weight;
5237 cpi->summed_weights += weight;
5239 if (cpi->oxcf.number_of_layers > 1) {
5242 for (i = cpi->current_layer; i < cpi->oxcf.number_of_layers; ++i) {
5243 cpi->frames_in_layer[i]++;
5245 cpi->bytes_in_layer[i] += *size;
5246 cpi->sum_psnr[i] += frame_psnr;
5247 cpi->sum_psnr_p[i] += frame_psnr2;
5248 cpi->total_error2[i] += sq_error;
5249 cpi->total_error2_p[i] += sq_error2;
5250 cpi->sum_ssim[i] += frame_ssim2 * weight;
5251 cpi->sum_weights[i] += weight;
5262 if (cpi->common.frame_type != 0 && cpi->common.base_qindex == cpi->oxcf.worst_allowed_q)
5264 skiptruecount += cpi->skip_true_count;
5265 skipfalsecount += cpi->skip_false_count;
5273 FILE *f = fopen("skip.stt", "a");
5274 fprintf(f, "frame:%4d flags:%4x Q:%4d P:%4d Size:%5d\n", cpi->common.current_video_frame, *frame_flags, cpi->common.base_qindex, cpi->prob_skip_false, *size);
5276 if (cpi->is_src_frame_alt_ref == 1)
5277 fprintf(f, "skipcount: %4d framesize: %d\n", cpi->skip_true_count , *size);
5285 cpi->common.error.setjmp = 0;
5287 #if CONFIG_MULTITHREAD
5288 /* wait for the lpf thread done */
5289 if (cpi->b_multi_threaded && cpi->b_lpf_running) {
5290 sem_wait(&cpi->h_event_end_lpf);
5291 cpi->b_lpf_running = 0;
5298 int vp8_get_preview_raw_frame(VP8_COMP *cpi, YV12_BUFFER_CONFIG *dest,
5299 vp8_ppflags_t *flags) {
5300 if (cpi->common.refresh_alt_ref_frame) {
5306 cpi->common.show_frame_mi = cpi->common.mi;
5307 ret = vp8_post_proc_frame(&cpi->common, dest, flags);
5311 if (cpi->common.frame_to_show) {
5312 *dest = *cpi->common.frame_to_show;
5313 dest->y_width = cpi->common.Width;
5314 dest->y_height = cpi->common.Height;
5315 dest->uv_height = cpi->common.Height / 2;
5322 vpx_clear_system_state();
5327 int vp8_set_roimap(VP8_COMP *cpi, unsigned char *map, unsigned int rows,
5328 unsigned int cols, int delta_q[4], int delta_lf[4],
5329 unsigned int threshold[4]) {
5330 signed char feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS];
5331 int internal_delta_q[MAX_MB_SEGMENTS];
5332 const int range = 63;
5335 // This method is currently incompatible with the cyclic refresh method
5336 if (cpi->cyclic_refresh_mode_enabled) return -1;
5338 // Check number of rows and columns match
5339 if (cpi->common.mb_rows != (int)rows || cpi->common.mb_cols != (int)cols) {
5343 // Range check the delta Q values and convert the external Q range values
5344 // to internal ones.
5345 if ((abs(delta_q[0]) > range) || (abs(delta_q[1]) > range) ||
5346 (abs(delta_q[2]) > range) || (abs(delta_q[3]) > range)) {
5350 // Range check the delta lf values
5351 if ((abs(delta_lf[0]) > range) || (abs(delta_lf[1]) > range) ||
5352 (abs(delta_lf[2]) > range) || (abs(delta_lf[3]) > range)) {
5357 disable_segmentation(cpi);
5361 // Translate the external delta q values to internal values.
5362 for (i = 0; i < MAX_MB_SEGMENTS; ++i) {
5363 internal_delta_q[i] =
5364 (delta_q[i] >= 0) ? q_trans[delta_q[i]] : -q_trans[-delta_q[i]];
5367 /* Set the segmentation Map */
5368 set_segmentation_map(cpi, map);
5370 /* Activate segmentation. */
5371 enable_segmentation(cpi);
5373 /* Set up the quant segment data */
5374 feature_data[MB_LVL_ALT_Q][0] = internal_delta_q[0];
5375 feature_data[MB_LVL_ALT_Q][1] = internal_delta_q[1];
5376 feature_data[MB_LVL_ALT_Q][2] = internal_delta_q[2];
5377 feature_data[MB_LVL_ALT_Q][3] = internal_delta_q[3];
5379 /* Set up the loop segment data s */
5380 feature_data[MB_LVL_ALT_LF][0] = delta_lf[0];
5381 feature_data[MB_LVL_ALT_LF][1] = delta_lf[1];
5382 feature_data[MB_LVL_ALT_LF][2] = delta_lf[2];
5383 feature_data[MB_LVL_ALT_LF][3] = delta_lf[3];
5385 cpi->segment_encode_breakout[0] = threshold[0];
5386 cpi->segment_encode_breakout[1] = threshold[1];
5387 cpi->segment_encode_breakout[2] = threshold[2];
5388 cpi->segment_encode_breakout[3] = threshold[3];
5390 /* Initialise the feature data structure */
5391 set_segment_data(cpi, &feature_data[0][0], SEGMENT_DELTADATA);
5396 int vp8_set_active_map(VP8_COMP *cpi, unsigned char *map, unsigned int rows,
5397 unsigned int cols) {
5398 if ((int)rows == cpi->common.mb_rows && (int)cols == cpi->common.mb_cols) {
5400 memcpy(cpi->active_map, map, rows * cols);
5401 cpi->active_map_enabled = 1;
5403 cpi->active_map_enabled = 0;
5412 int vp8_set_internal_size(VP8_COMP *cpi, VPX_SCALING horiz_mode,
5413 VPX_SCALING vert_mode) {
5414 if (horiz_mode <= ONETWO) {
5415 cpi->common.horiz_scale = horiz_mode;
5420 if (vert_mode <= ONETWO) {
5421 cpi->common.vert_scale = vert_mode;
5429 int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest) {
5433 unsigned char *src = source->y_buffer;
5434 unsigned char *dst = dest->y_buffer;
5436 /* Loop through the Y plane raw and reconstruction data summing
5437 * (square differences)
5439 for (i = 0; i < source->y_height; i += 16) {
5440 for (j = 0; j < source->y_width; j += 16) {
5442 Total += vpx_mse16x16(src + j, source->y_stride, dst + j, dest->y_stride,
5446 src += 16 * source->y_stride;
5447 dst += 16 * dest->y_stride;
5453 int vp8_get_quantizer(VP8_COMP *cpi) { return cpi->common.base_qindex; }