2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
11 #include "vpx_config.h"
12 #include "./vpx_scale_rtcd.h"
13 #include "./vpx_dsp_rtcd.h"
14 #include "./vp8_rtcd.h"
15 #include "vp8/common/onyxc_int.h"
16 #include "vp8/common/blockd.h"
18 #include "vp8/common/systemdependent.h"
19 #include "vp8/encoder/quantize.h"
20 #include "vp8/common/alloccommon.h"
22 #include "firstpass.h"
23 #include "vpx_dsp/psnr.h"
24 #include "vpx_scale/vpx_scale.h"
25 #include "vp8/common/extend.h"
27 #include "vp8/common/quant_common.h"
28 #include "segmentation.h"
30 #include "vp8/common/postproc.h"
32 #include "vpx_mem/vpx_mem.h"
33 #include "vp8/common/reconintra.h"
34 #include "vp8/common/swapyv12buffer.h"
35 #include "vp8/common/threading.h"
36 #include "vpx_ports/system_state.h"
37 #include "vpx_ports/vpx_timer.h"
39 #include "vpx_ports/arm.h"
41 #if CONFIG_MULTI_RES_ENCODING
42 #include "mr_dissim.h"
44 #include "encodeframe.h"
51 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
52 extern int vp8_update_coef_context(VP8_COMP *cpi);
53 extern void vp8_update_coef_probs(VP8_COMP *cpi);
56 extern void vp8cx_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi);
57 extern void vp8cx_set_alt_lf_level(VP8_COMP *cpi, int filt_val);
58 extern void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi);
60 extern void vp8_deblock_frame(YV12_BUFFER_CONFIG *source,
61 YV12_BUFFER_CONFIG *post, int filt_lvl,
62 int low_var_thresh, int flag);
63 extern void print_parms(VP8_CONFIG *ocf, char *filenam);
64 extern unsigned int vp8_get_processor_freq();
65 extern void print_tree_update_probs();
66 extern int vp8cx_create_encoder_threads(VP8_COMP *cpi);
67 extern void vp8cx_remove_encoder_threads(VP8_COMP *cpi);
69 int vp8_estimate_entropy_savings(VP8_COMP *cpi);
71 int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest);
73 extern void vp8_temporal_filter_prepare_c(VP8_COMP *cpi, int distance);
75 static void set_default_lf_deltas(VP8_COMP *cpi);
77 extern const int vp8_gf_interval_table[101];
79 #if CONFIG_INTERNAL_STATS
81 #include "vpx_dsp/ssim.h"
87 #ifdef OUTPUT_YUV_DENOISED
88 FILE *yuv_denoised_file;
98 extern int skip_true_count;
99 extern int skip_false_count;
102 #ifdef VP8_ENTROPY_STATS
103 extern int intra_mode_stats[10][10][10];
107 unsigned int frames_at_speed[16] = { 0, 0, 0, 0, 0, 0, 0, 0,
108 0, 0, 0, 0, 0, 0, 0, 0 };
109 unsigned int tot_pm = 0;
110 unsigned int cnt_pm = 0;
111 unsigned int tot_ef = 0;
112 unsigned int cnt_ef = 0;
116 extern unsigned __int64 Sectionbits[50];
117 extern int y_modes[5];
118 extern int uv_modes[4];
119 extern int b_modes[10];
121 extern int inter_y_modes[10];
122 extern int inter_uv_modes[4];
123 extern unsigned int inter_b_modes[15];
126 extern const int vp8_bits_per_mb[2][QINDEX_RANGE];
128 extern const int qrounding_factors[129];
129 extern const int qzbin_factors[129];
130 extern void vp8cx_init_quantizer(VP8_COMP *cpi);
131 extern const int vp8cx_base_skip_false_prob[128];
133 /* Tables relating active max Q to active min Q */
134 static const unsigned char kf_low_motion_minq[QINDEX_RANGE] = {
135 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
136 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
137 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
138 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 4, 4, 4, 5, 5, 5,
139 5, 5, 6, 6, 6, 6, 7, 7, 8, 8, 8, 8, 9, 9, 10, 10, 10, 10, 11,
140 11, 11, 11, 12, 12, 13, 13, 13, 13, 14, 14, 15, 15, 15, 15, 16, 16, 16, 16,
141 17, 17, 18, 18, 18, 18, 19, 20, 20, 21, 21, 22, 23, 23
143 static const unsigned char kf_high_motion_minq[QINDEX_RANGE] = {
144 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
145 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1,
146 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 5,
147 5, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 8, 8, 8, 8, 9, 9, 10, 10,
148 10, 10, 11, 11, 11, 11, 12, 12, 13, 13, 13, 13, 14, 14, 15, 15, 15, 15, 16,
149 16, 16, 16, 17, 17, 18, 18, 18, 18, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21,
150 22, 22, 23, 23, 24, 25, 25, 26, 26, 27, 28, 28, 29, 30
152 static const unsigned char gf_low_motion_minq[QINDEX_RANGE] = {
153 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3,
154 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8,
155 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15,
156 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24,
157 25, 25, 26, 26, 27, 27, 28, 28, 29, 29, 30, 30, 31, 31, 32, 32, 33, 33, 34,
158 34, 35, 35, 36, 36, 37, 37, 38, 38, 39, 39, 40, 40, 41, 41, 42, 42, 43, 44,
159 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58
161 static const unsigned char gf_mid_motion_minq[QINDEX_RANGE] = {
162 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 5,
163 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10, 10, 11,
164 11, 11, 12, 12, 12, 12, 13, 13, 13, 14, 14, 14, 15, 15, 16, 16, 17, 17, 18,
165 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24, 25, 25, 26, 26, 27, 27,
166 28, 28, 29, 29, 30, 30, 31, 31, 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 37,
167 37, 38, 39, 39, 40, 40, 41, 41, 42, 42, 43, 43, 44, 45, 46, 47, 48, 49, 50,
168 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
170 static const unsigned char gf_high_motion_minq[QINDEX_RANGE] = {
171 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5,
172 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10, 11, 11,
173 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21,
174 21, 22, 22, 23, 23, 24, 24, 25, 25, 26, 26, 27, 27, 28, 28, 29, 29, 30, 30,
175 31, 31, 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 37, 37, 38, 38, 39, 39, 40,
176 40, 41, 41, 42, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
177 57, 58, 59, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80
179 static const unsigned char inter_minq[QINDEX_RANGE] = {
180 0, 0, 1, 1, 2, 3, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 9, 10, 11,
181 11, 12, 13, 13, 14, 15, 15, 16, 17, 17, 18, 19, 20, 20, 21, 22, 22, 23, 24,
182 24, 25, 26, 27, 27, 28, 29, 30, 30, 31, 32, 33, 33, 34, 35, 36, 36, 37, 38,
183 39, 39, 40, 41, 42, 42, 43, 44, 45, 46, 46, 47, 48, 49, 50, 50, 51, 52, 53,
184 54, 55, 55, 56, 57, 58, 59, 60, 60, 61, 62, 63, 64, 65, 66, 67, 67, 68, 69,
185 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 86,
186 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100
189 #ifdef PACKET_TESTING
190 extern FILE *vpxlogc;
193 static void save_layer_context(VP8_COMP *cpi) {
194 LAYER_CONTEXT *lc = &cpi->layer_context[cpi->current_layer];
196 /* Save layer dependent coding state */
197 lc->target_bandwidth = cpi->target_bandwidth;
198 lc->starting_buffer_level = cpi->oxcf.starting_buffer_level;
199 lc->optimal_buffer_level = cpi->oxcf.optimal_buffer_level;
200 lc->maximum_buffer_size = cpi->oxcf.maximum_buffer_size;
201 lc->starting_buffer_level_in_ms = cpi->oxcf.starting_buffer_level_in_ms;
202 lc->optimal_buffer_level_in_ms = cpi->oxcf.optimal_buffer_level_in_ms;
203 lc->maximum_buffer_size_in_ms = cpi->oxcf.maximum_buffer_size_in_ms;
204 lc->buffer_level = cpi->buffer_level;
205 lc->bits_off_target = cpi->bits_off_target;
206 lc->total_actual_bits = cpi->total_actual_bits;
207 lc->worst_quality = cpi->worst_quality;
208 lc->active_worst_quality = cpi->active_worst_quality;
209 lc->best_quality = cpi->best_quality;
210 lc->active_best_quality = cpi->active_best_quality;
211 lc->ni_av_qi = cpi->ni_av_qi;
212 lc->ni_tot_qi = cpi->ni_tot_qi;
213 lc->ni_frames = cpi->ni_frames;
214 lc->avg_frame_qindex = cpi->avg_frame_qindex;
215 lc->rate_correction_factor = cpi->rate_correction_factor;
216 lc->key_frame_rate_correction_factor = cpi->key_frame_rate_correction_factor;
217 lc->gf_rate_correction_factor = cpi->gf_rate_correction_factor;
218 lc->zbin_over_quant = cpi->mb.zbin_over_quant;
219 lc->inter_frame_target = cpi->inter_frame_target;
220 lc->total_byte_count = cpi->total_byte_count;
221 lc->filter_level = cpi->common.filter_level;
223 lc->last_frame_percent_intra = cpi->last_frame_percent_intra;
225 memcpy(lc->count_mb_ref_frame_usage, cpi->mb.count_mb_ref_frame_usage,
226 sizeof(cpi->mb.count_mb_ref_frame_usage));
229 static void restore_layer_context(VP8_COMP *cpi, const int layer) {
230 LAYER_CONTEXT *lc = &cpi->layer_context[layer];
232 /* Restore layer dependent coding state */
233 cpi->current_layer = layer;
234 cpi->target_bandwidth = lc->target_bandwidth;
235 cpi->oxcf.target_bandwidth = lc->target_bandwidth;
236 cpi->oxcf.starting_buffer_level = lc->starting_buffer_level;
237 cpi->oxcf.optimal_buffer_level = lc->optimal_buffer_level;
238 cpi->oxcf.maximum_buffer_size = lc->maximum_buffer_size;
239 cpi->oxcf.starting_buffer_level_in_ms = lc->starting_buffer_level_in_ms;
240 cpi->oxcf.optimal_buffer_level_in_ms = lc->optimal_buffer_level_in_ms;
241 cpi->oxcf.maximum_buffer_size_in_ms = lc->maximum_buffer_size_in_ms;
242 cpi->buffer_level = lc->buffer_level;
243 cpi->bits_off_target = lc->bits_off_target;
244 cpi->total_actual_bits = lc->total_actual_bits;
245 cpi->active_worst_quality = lc->active_worst_quality;
246 cpi->active_best_quality = lc->active_best_quality;
247 cpi->ni_av_qi = lc->ni_av_qi;
248 cpi->ni_tot_qi = lc->ni_tot_qi;
249 cpi->ni_frames = lc->ni_frames;
250 cpi->avg_frame_qindex = lc->avg_frame_qindex;
251 cpi->rate_correction_factor = lc->rate_correction_factor;
252 cpi->key_frame_rate_correction_factor = lc->key_frame_rate_correction_factor;
253 cpi->gf_rate_correction_factor = lc->gf_rate_correction_factor;
254 cpi->mb.zbin_over_quant = lc->zbin_over_quant;
255 cpi->inter_frame_target = lc->inter_frame_target;
256 cpi->total_byte_count = lc->total_byte_count;
257 cpi->common.filter_level = lc->filter_level;
259 cpi->last_frame_percent_intra = lc->last_frame_percent_intra;
261 memcpy(cpi->mb.count_mb_ref_frame_usage, lc->count_mb_ref_frame_usage,
262 sizeof(cpi->mb.count_mb_ref_frame_usage));
265 static int rescale(int val, int num, int denom) {
267 int64_t llden = denom;
270 return (int)(llval * llnum / llden);
273 static void init_temporal_layer_context(VP8_COMP *cpi, VP8_CONFIG *oxcf,
275 double prev_layer_framerate) {
276 LAYER_CONTEXT *lc = &cpi->layer_context[layer];
278 lc->framerate = cpi->output_framerate / cpi->oxcf.rate_decimator[layer];
279 lc->target_bandwidth = cpi->oxcf.target_bitrate[layer] * 1000;
281 lc->starting_buffer_level_in_ms = oxcf->starting_buffer_level;
282 lc->optimal_buffer_level_in_ms = oxcf->optimal_buffer_level;
283 lc->maximum_buffer_size_in_ms = oxcf->maximum_buffer_size;
285 lc->starting_buffer_level =
286 rescale((int)(oxcf->starting_buffer_level), lc->target_bandwidth, 1000);
288 if (oxcf->optimal_buffer_level == 0) {
289 lc->optimal_buffer_level = lc->target_bandwidth / 8;
291 lc->optimal_buffer_level =
292 rescale((int)(oxcf->optimal_buffer_level), lc->target_bandwidth, 1000);
295 if (oxcf->maximum_buffer_size == 0) {
296 lc->maximum_buffer_size = lc->target_bandwidth / 8;
298 lc->maximum_buffer_size =
299 rescale((int)(oxcf->maximum_buffer_size), lc->target_bandwidth, 1000);
302 /* Work out the average size of a frame within this layer */
304 lc->avg_frame_size_for_layer =
305 (int)((cpi->oxcf.target_bitrate[layer] -
306 cpi->oxcf.target_bitrate[layer - 1]) *
307 1000 / (lc->framerate - prev_layer_framerate));
310 lc->active_worst_quality = cpi->oxcf.worst_allowed_q;
311 lc->active_best_quality = cpi->oxcf.best_allowed_q;
312 lc->avg_frame_qindex = cpi->oxcf.worst_allowed_q;
314 lc->buffer_level = lc->starting_buffer_level;
315 lc->bits_off_target = lc->starting_buffer_level;
317 lc->total_actual_bits = 0;
321 lc->rate_correction_factor = 1.0;
322 lc->key_frame_rate_correction_factor = 1.0;
323 lc->gf_rate_correction_factor = 1.0;
324 lc->inter_frame_target = 0;
327 // Upon a run-time change in temporal layers, reset the layer context parameters
328 // for any "new" layers. For "existing" layers, let them inherit the parameters
329 // from the previous layer state (at the same layer #). In future we may want
330 // to better map the previous layer state(s) to the "new" ones.
331 static void reset_temporal_layer_change(VP8_COMP *cpi, VP8_CONFIG *oxcf,
332 const int prev_num_layers) {
334 double prev_layer_framerate = 0;
335 const int curr_num_layers = cpi->oxcf.number_of_layers;
336 // If the previous state was 1 layer, get current layer context from cpi.
337 // We need this to set the layer context for the new layers below.
338 if (prev_num_layers == 1) {
339 cpi->current_layer = 0;
340 save_layer_context(cpi);
342 for (i = 0; i < curr_num_layers; ++i) {
343 LAYER_CONTEXT *lc = &cpi->layer_context[i];
344 if (i >= prev_num_layers) {
345 init_temporal_layer_context(cpi, oxcf, i, prev_layer_framerate);
347 // The initial buffer levels are set based on their starting levels.
348 // We could set the buffer levels based on the previous state (normalized
349 // properly by the layer bandwidths) but we would need to keep track of
350 // the previous set of layer bandwidths (i.e., target_bitrate[i])
351 // before the layer change. For now, reset to the starting levels.
353 cpi->oxcf.starting_buffer_level_in_ms * cpi->oxcf.target_bitrate[i];
354 lc->bits_off_target = lc->buffer_level;
355 // TDOD(marpan): Should we set the rate_correction_factor and
356 // active_worst/best_quality to values derived from the previous layer
357 // state (to smooth-out quality dips/rate fluctuation at transition)?
359 // We need to treat the 1 layer case separately: oxcf.target_bitrate[i]
360 // is not set for 1 layer, and the restore_layer_context/save_context()
361 // are not called in the encoding loop, so we need to call it here to
362 // pass the layer context state to |cpi|.
363 if (curr_num_layers == 1) {
364 lc->target_bandwidth = cpi->oxcf.target_bandwidth;
366 cpi->oxcf.starting_buffer_level_in_ms * lc->target_bandwidth / 1000;
367 lc->bits_off_target = lc->buffer_level;
368 restore_layer_context(cpi, 0);
370 prev_layer_framerate = cpi->output_framerate / cpi->oxcf.rate_decimator[i];
374 static void setup_features(VP8_COMP *cpi) {
375 // If segmentation enabled set the update flags
376 if (cpi->mb.e_mbd.segmentation_enabled) {
377 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
378 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
380 cpi->mb.e_mbd.update_mb_segmentation_map = 0;
381 cpi->mb.e_mbd.update_mb_segmentation_data = 0;
384 cpi->mb.e_mbd.mode_ref_lf_delta_enabled = 0;
385 cpi->mb.e_mbd.mode_ref_lf_delta_update = 0;
386 memset(cpi->mb.e_mbd.ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas));
387 memset(cpi->mb.e_mbd.mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas));
388 memset(cpi->mb.e_mbd.last_ref_lf_deltas, 0,
389 sizeof(cpi->mb.e_mbd.ref_lf_deltas));
390 memset(cpi->mb.e_mbd.last_mode_lf_deltas, 0,
391 sizeof(cpi->mb.e_mbd.mode_lf_deltas));
393 set_default_lf_deltas(cpi);
396 static void dealloc_raw_frame_buffers(VP8_COMP *cpi);
398 void vp8_initialize_enc(void) {
399 static volatile int init_done = 0;
403 vp8_init_intra_predictors();
408 static void dealloc_compressor_data(VP8_COMP *cpi) {
409 vpx_free(cpi->tplist);
412 /* Delete last frame MV storage buffers */
416 vpx_free(cpi->lf_ref_frame_sign_bias);
417 cpi->lf_ref_frame_sign_bias = 0;
419 vpx_free(cpi->lf_ref_frame);
420 cpi->lf_ref_frame = 0;
422 /* Delete sementation map */
423 vpx_free(cpi->segmentation_map);
424 cpi->segmentation_map = 0;
426 vpx_free(cpi->active_map);
429 vp8_de_alloc_frame_buffers(&cpi->common);
431 vp8_yv12_de_alloc_frame_buffer(&cpi->pick_lf_lvl_frame);
432 vp8_yv12_de_alloc_frame_buffer(&cpi->scaled_source);
433 dealloc_raw_frame_buffers(cpi);
438 /* Structure used to monitor GF usage */
439 vpx_free(cpi->gf_active_flags);
440 cpi->gf_active_flags = 0;
442 /* Activity mask based per mb zbin adjustments */
443 vpx_free(cpi->mb_activity_map);
444 cpi->mb_activity_map = 0;
446 vpx_free(cpi->mb.pip);
449 #if CONFIG_MULTITHREAD
450 /* De-allocate mutex */
451 if (cpi->pmutex != NULL) {
452 VP8_COMMON *const pc = &cpi->common;
455 for (i = 0; i < pc->mb_rows; ++i) {
456 pthread_mutex_destroy(&cpi->pmutex[i]);
458 vpx_free(cpi->pmutex);
462 vpx_free(cpi->mt_current_mb_col);
463 cpi->mt_current_mb_col = NULL;
467 static void enable_segmentation(VP8_COMP *cpi) {
468 /* Set the appropriate feature bit */
469 cpi->mb.e_mbd.segmentation_enabled = 1;
470 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
471 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
473 static void disable_segmentation(VP8_COMP *cpi) {
474 /* Clear the appropriate feature bit */
475 cpi->mb.e_mbd.segmentation_enabled = 0;
478 /* Valid values for a segment are 0 to 3
479 * Segmentation map is arrange as [Rows][Columns]
481 static void set_segmentation_map(VP8_COMP *cpi,
482 unsigned char *segmentation_map) {
483 /* Copy in the new segmentation map */
484 memcpy(cpi->segmentation_map, segmentation_map,
485 (cpi->common.mb_rows * cpi->common.mb_cols));
487 /* Signal that the map should be updated. */
488 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
489 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
492 /* The values given for each segment can be either deltas (from the default
493 * value chosen for the frame) or absolute values.
495 * Valid range for abs values is:
496 * (0-127 for MB_LVL_ALT_Q), (0-63 for SEGMENT_ALT_LF)
497 * Valid range for delta values are:
498 * (+/-127 for MB_LVL_ALT_Q), (+/-63 for SEGMENT_ALT_LF)
500 * abs_delta = SEGMENT_DELTADATA (deltas)
501 * abs_delta = SEGMENT_ABSDATA (use the absolute values given).
504 static void set_segment_data(VP8_COMP *cpi, signed char *feature_data,
505 unsigned char abs_delta) {
506 cpi->mb.e_mbd.mb_segement_abs_delta = abs_delta;
507 memcpy(cpi->segment_feature_data, feature_data,
508 sizeof(cpi->segment_feature_data));
511 /* A simple function to cyclically refresh the background at a lower Q */
512 static void cyclic_background_refresh(VP8_COMP *cpi, int Q, int lf_adjustment) {
513 unsigned char *seg_map = cpi->segmentation_map;
514 signed char feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS];
516 int block_count = cpi->cyclic_refresh_mode_max_mbs_perframe;
517 int mbs_in_frame = cpi->common.mb_rows * cpi->common.mb_cols;
519 cpi->cyclic_refresh_q = Q / 2;
521 if (cpi->oxcf.screen_content_mode) {
522 // Modify quality ramp-up based on Q. Above some Q level, increase the
523 // number of blocks to be refreshed, and reduce it below the thredhold.
524 // Turn-off under certain conditions (i.e., away from key frame, and if
525 // we are at good quality (low Q) and most of the blocks were
527 // in previous frame.
528 int qp_thresh = (cpi->oxcf.screen_content_mode == 2) ? 80 : 100;
529 if (Q >= qp_thresh) {
530 cpi->cyclic_refresh_mode_max_mbs_perframe =
531 (cpi->common.mb_rows * cpi->common.mb_cols) / 10;
532 } else if (cpi->frames_since_key > 250 && Q < 20 &&
533 cpi->mb.skip_true_count > (int)(0.95 * mbs_in_frame)) {
534 cpi->cyclic_refresh_mode_max_mbs_perframe = 0;
536 cpi->cyclic_refresh_mode_max_mbs_perframe =
537 (cpi->common.mb_rows * cpi->common.mb_cols) / 20;
539 block_count = cpi->cyclic_refresh_mode_max_mbs_perframe;
542 // Set every macroblock to be eligible for update.
543 // For key frame this will reset seg map to 0.
544 memset(cpi->segmentation_map, 0, mbs_in_frame);
546 if (cpi->common.frame_type != KEY_FRAME && block_count > 0) {
547 /* Cycle through the macro_block rows */
548 /* MB loop to set local segmentation map */
549 i = cpi->cyclic_refresh_mode_index;
550 assert(i < mbs_in_frame);
552 /* If the MB is as a candidate for clean up then mark it for
553 * possible boost/refresh (segment 1) The segment id may get
554 * reset to 0 later if the MB gets coded anything other than
555 * last frame 0,0 as only (last frame 0,0) MBs are eligable for
556 * refresh : that is to say Mbs likely to be background blocks.
558 if (cpi->cyclic_refresh_map[i] == 0) {
561 } else if (cpi->cyclic_refresh_map[i] < 0) {
562 cpi->cyclic_refresh_map[i]++;
566 if (i == mbs_in_frame) i = 0;
568 } while (block_count && i != cpi->cyclic_refresh_mode_index);
570 cpi->cyclic_refresh_mode_index = i;
572 #if CONFIG_TEMPORAL_DENOISING
573 if (cpi->oxcf.noise_sensitivity > 0) {
574 if (cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive &&
575 Q < (int)cpi->denoiser.denoise_pars.qp_thresh &&
576 (cpi->frames_since_key >
577 2 * cpi->denoiser.denoise_pars.consec_zerolast)) {
578 // Under aggressive denoising, use segmentation to turn off loop
579 // filter below some qp thresh. The filter is reduced for all
580 // blocks that have been encoded as ZEROMV LAST x frames in a row,
581 // where x is set by cpi->denoiser.denoise_pars.consec_zerolast.
582 // This is to avoid "dot" artifacts that can occur from repeated
583 // loop filtering on noisy input source.
584 cpi->cyclic_refresh_q = Q;
585 // lf_adjustment = -MAX_LOOP_FILTER;
587 for (i = 0; i < mbs_in_frame; ++i) {
588 seg_map[i] = (cpi->consec_zero_last[i] >
589 cpi->denoiser.denoise_pars.consec_zerolast)
598 /* Activate segmentation. */
599 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
600 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
601 enable_segmentation(cpi);
603 /* Set up the quant segment data */
604 feature_data[MB_LVL_ALT_Q][0] = 0;
605 feature_data[MB_LVL_ALT_Q][1] = (cpi->cyclic_refresh_q - Q);
606 feature_data[MB_LVL_ALT_Q][2] = 0;
607 feature_data[MB_LVL_ALT_Q][3] = 0;
609 /* Set up the loop segment data */
610 feature_data[MB_LVL_ALT_LF][0] = 0;
611 feature_data[MB_LVL_ALT_LF][1] = lf_adjustment;
612 feature_data[MB_LVL_ALT_LF][2] = 0;
613 feature_data[MB_LVL_ALT_LF][3] = 0;
615 /* Initialise the feature data structure */
616 set_segment_data(cpi, &feature_data[0][0], SEGMENT_DELTADATA);
619 static void set_default_lf_deltas(VP8_COMP *cpi) {
620 cpi->mb.e_mbd.mode_ref_lf_delta_enabled = 1;
621 cpi->mb.e_mbd.mode_ref_lf_delta_update = 1;
623 memset(cpi->mb.e_mbd.ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas));
624 memset(cpi->mb.e_mbd.mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas));
626 /* Test of ref frame deltas */
627 cpi->mb.e_mbd.ref_lf_deltas[INTRA_FRAME] = 2;
628 cpi->mb.e_mbd.ref_lf_deltas[LAST_FRAME] = 0;
629 cpi->mb.e_mbd.ref_lf_deltas[GOLDEN_FRAME] = -2;
630 cpi->mb.e_mbd.ref_lf_deltas[ALTREF_FRAME] = -2;
632 cpi->mb.e_mbd.mode_lf_deltas[0] = 4; /* BPRED */
634 if (cpi->oxcf.Mode == MODE_REALTIME) {
635 cpi->mb.e_mbd.mode_lf_deltas[1] = -12; /* Zero */
637 cpi->mb.e_mbd.mode_lf_deltas[1] = -2; /* Zero */
640 cpi->mb.e_mbd.mode_lf_deltas[2] = 2; /* New mv */
641 cpi->mb.e_mbd.mode_lf_deltas[3] = 4; /* Split mv */
644 /* Convenience macros for mapping speed and mode into a continuous
647 #define GOOD(x) (x + 1)
648 #define RT(x) (x + 7)
650 static int speed_map(int speed, const int *map) {
655 } while (speed >= *map++);
659 static const int thresh_mult_map_znn[] = {
660 /* map common to zero, nearest, and near */
661 0, GOOD(2), 1500, GOOD(3), 2000, RT(0), 1000, RT(2), 2000, INT_MAX
664 static const int thresh_mult_map_vhpred[] = { 1000, GOOD(2), 1500, GOOD(3),
665 2000, RT(0), 1000, RT(1),
666 2000, RT(7), INT_MAX, INT_MAX };
668 static const int thresh_mult_map_bpred[] = { 2000, GOOD(0), 2500, GOOD(2),
669 5000, GOOD(3), 7500, RT(0),
670 2500, RT(1), 5000, RT(6),
673 static const int thresh_mult_map_tm[] = { 1000, GOOD(2), 1500, GOOD(3),
674 2000, RT(0), 0, RT(1),
675 1000, RT(2), 2000, RT(7),
678 static const int thresh_mult_map_new1[] = { 1000, GOOD(2), 2000,
679 RT(0), 2000, INT_MAX };
681 static const int thresh_mult_map_new2[] = { 1000, GOOD(2), 2000, GOOD(3),
682 2500, GOOD(5), 4000, RT(0),
683 2000, RT(2), 2500, RT(5),
686 static const int thresh_mult_map_split1[] = {
687 2500, GOOD(0), 1700, GOOD(2), 10000, GOOD(3), 25000, GOOD(4), INT_MAX,
688 RT(0), 5000, RT(1), 10000, RT(2), 25000, RT(3), INT_MAX, INT_MAX
691 static const int thresh_mult_map_split2[] = {
692 5000, GOOD(0), 4500, GOOD(2), 20000, GOOD(3), 50000, GOOD(4), INT_MAX,
693 RT(0), 10000, RT(1), 20000, RT(2), 50000, RT(3), INT_MAX, INT_MAX
696 static const int mode_check_freq_map_zn2[] = {
697 /* {zero,nearest}{2,3} */
698 0, RT(10), 1 << 1, RT(11), 1 << 2, RT(12), 1 << 3, INT_MAX
701 static const int mode_check_freq_map_vhbpred[] = {
702 0, GOOD(5), 2, RT(0), 0, RT(3), 2, RT(5), 4, INT_MAX
705 static const int mode_check_freq_map_near2[] = {
706 0, GOOD(5), 2, RT(0), 0, RT(3), 2,
707 RT(10), 1 << 2, RT(11), 1 << 3, RT(12), 1 << 4, INT_MAX
710 static const int mode_check_freq_map_new1[] = {
711 0, RT(10), 1 << 1, RT(11), 1 << 2, RT(12), 1 << 3, INT_MAX
714 static const int mode_check_freq_map_new2[] = { 0, GOOD(5), 4, RT(0),
716 1 << 3, RT(11), 1 << 4, RT(12),
719 static const int mode_check_freq_map_split1[] = {
720 0, GOOD(2), 2, GOOD(3), 7, RT(1), 2, RT(2), 7, INT_MAX
723 static const int mode_check_freq_map_split2[] = {
724 0, GOOD(1), 2, GOOD(2), 4, GOOD(3), 15, RT(1), 4, RT(2), 15, INT_MAX
727 void vp8_set_speed_features(VP8_COMP *cpi) {
728 SPEED_FEATURES *sf = &cpi->sf;
729 int Mode = cpi->compressor_speed;
730 int Speed = cpi->Speed;
732 VP8_COMMON *cm = &cpi->common;
733 int last_improved_quant = sf->improved_quant;
736 /* Initialise default mode frequency sampling variables */
737 for (i = 0; i < MAX_MODES; ++i) {
738 cpi->mode_check_freq[i] = 0;
741 cpi->mb.mbs_tested_so_far = 0;
742 cpi->mb.mbs_zero_last_dot_suppress = 0;
744 /* best quality defaults */
746 sf->search_method = NSTEP;
747 sf->improved_quant = 1;
748 sf->improved_dct = 1;
751 sf->quarter_pixel_search = 1;
752 sf->half_pixel_search = 1;
753 sf->iterative_sub_pixel = 1;
754 sf->optimize_coefficients = 1;
755 sf->use_fastquant_for_pick = 0;
756 sf->no_skip_block4x4_search = 1;
759 sf->max_step_search_steps = MAX_MVSEARCH_STEPS;
760 sf->improved_mv_pred = 1;
762 /* default thresholds to 0 */
763 for (i = 0; i < MAX_MODES; ++i) sf->thresh_mult[i] = 0;
765 /* Count enabled references */
767 if (cpi->ref_frame_flags & VP8_LAST_FRAME) ref_frames++;
768 if (cpi->ref_frame_flags & VP8_GOLD_FRAME) ref_frames++;
769 if (cpi->ref_frame_flags & VP8_ALTR_FRAME) ref_frames++;
771 /* Convert speed to continuous range, with clamping */
774 } else if (Mode == 2) {
777 if (Speed > 5) Speed = 5;
781 sf->thresh_mult[THR_ZERO1] = sf->thresh_mult[THR_NEAREST1] =
782 sf->thresh_mult[THR_NEAR1] = sf->thresh_mult[THR_DC] = 0; /* always */
784 sf->thresh_mult[THR_ZERO2] = sf->thresh_mult[THR_ZERO3] =
785 sf->thresh_mult[THR_NEAREST2] = sf->thresh_mult[THR_NEAREST3] =
786 sf->thresh_mult[THR_NEAR2] = sf->thresh_mult[THR_NEAR3] =
787 speed_map(Speed, thresh_mult_map_znn);
789 sf->thresh_mult[THR_V_PRED] = sf->thresh_mult[THR_H_PRED] =
790 speed_map(Speed, thresh_mult_map_vhpred);
791 sf->thresh_mult[THR_B_PRED] = speed_map(Speed, thresh_mult_map_bpred);
792 sf->thresh_mult[THR_TM] = speed_map(Speed, thresh_mult_map_tm);
793 sf->thresh_mult[THR_NEW1] = speed_map(Speed, thresh_mult_map_new1);
794 sf->thresh_mult[THR_NEW2] = sf->thresh_mult[THR_NEW3] =
795 speed_map(Speed, thresh_mult_map_new2);
796 sf->thresh_mult[THR_SPLIT1] = speed_map(Speed, thresh_mult_map_split1);
797 sf->thresh_mult[THR_SPLIT2] = sf->thresh_mult[THR_SPLIT3] =
798 speed_map(Speed, thresh_mult_map_split2);
800 // Special case for temporal layers.
801 // Reduce the thresholds for zero/nearest/near for GOLDEN, if GOLDEN is
802 // used as second reference. We don't modify thresholds for ALTREF case
803 // since ALTREF is usually used as long-term reference in temporal layers.
804 if ((cpi->Speed <= 6) && (cpi->oxcf.number_of_layers > 1) &&
805 (cpi->ref_frame_flags & VP8_LAST_FRAME) &&
806 (cpi->ref_frame_flags & VP8_GOLD_FRAME)) {
807 if (cpi->closest_reference_frame == GOLDEN_FRAME) {
808 sf->thresh_mult[THR_ZERO2] = sf->thresh_mult[THR_ZERO2] >> 3;
809 sf->thresh_mult[THR_NEAREST2] = sf->thresh_mult[THR_NEAREST2] >> 3;
810 sf->thresh_mult[THR_NEAR2] = sf->thresh_mult[THR_NEAR2] >> 3;
812 sf->thresh_mult[THR_ZERO2] = sf->thresh_mult[THR_ZERO2] >> 1;
813 sf->thresh_mult[THR_NEAREST2] = sf->thresh_mult[THR_NEAREST2] >> 1;
814 sf->thresh_mult[THR_NEAR2] = sf->thresh_mult[THR_NEAR2] >> 1;
818 cpi->mode_check_freq[THR_ZERO1] = cpi->mode_check_freq[THR_NEAREST1] =
819 cpi->mode_check_freq[THR_NEAR1] = cpi->mode_check_freq[THR_TM] =
820 cpi->mode_check_freq[THR_DC] = 0; /* always */
822 cpi->mode_check_freq[THR_ZERO2] = cpi->mode_check_freq[THR_ZERO3] =
823 cpi->mode_check_freq[THR_NEAREST2] = cpi->mode_check_freq[THR_NEAREST3] =
824 speed_map(Speed, mode_check_freq_map_zn2);
826 cpi->mode_check_freq[THR_NEAR2] = cpi->mode_check_freq[THR_NEAR3] =
827 speed_map(Speed, mode_check_freq_map_near2);
829 cpi->mode_check_freq[THR_V_PRED] = cpi->mode_check_freq[THR_H_PRED] =
830 cpi->mode_check_freq[THR_B_PRED] =
831 speed_map(Speed, mode_check_freq_map_vhbpred);
832 cpi->mode_check_freq[THR_NEW1] = speed_map(Speed, mode_check_freq_map_new1);
833 cpi->mode_check_freq[THR_NEW2] = cpi->mode_check_freq[THR_NEW3] =
834 speed_map(Speed, mode_check_freq_map_new2);
835 cpi->mode_check_freq[THR_SPLIT1] =
836 speed_map(Speed, mode_check_freq_map_split1);
837 cpi->mode_check_freq[THR_SPLIT2] = cpi->mode_check_freq[THR_SPLIT3] =
838 speed_map(Speed, mode_check_freq_map_split2);
841 #if !CONFIG_REALTIME_ONLY
842 case 0: /* best quality mode */
844 sf->max_step_search_steps = MAX_MVSEARCH_STEPS;
849 /* Disable coefficient optimization above speed 0 */
850 sf->optimize_coefficients = 0;
851 sf->use_fastquant_for_pick = 1;
852 sf->no_skip_block4x4_search = 0;
858 sf->improved_quant = 0;
859 sf->improved_dct = 0;
861 /* Only do recode loop on key frames, golden frames and
869 sf->recode_loop = 0; /* recode loop off */
870 sf->RD = 0; /* Turn rd off */
874 sf->auto_filter = 0; /* Faster selection of loop filter */
880 sf->optimize_coefficients = 0;
883 sf->iterative_sub_pixel = 1;
884 sf->search_method = NSTEP;
887 sf->improved_quant = 0;
888 sf->improved_dct = 0;
890 sf->use_fastquant_for_pick = 1;
891 sf->no_skip_block4x4_search = 0;
895 if (Speed > 2) sf->auto_filter = 0; /* Faster selection of loop filter */
903 sf->auto_filter = 0; /* Faster selection of loop filter */
904 sf->search_method = HEX;
905 sf->iterative_sub_pixel = 0;
909 unsigned int sum = 0;
910 unsigned int total_mbs = cm->MBs;
912 unsigned int total_skip;
916 if (cpi->oxcf.encode_breakout > 2000) min = cpi->oxcf.encode_breakout;
920 for (i = 0; i < min; ++i) {
921 sum += cpi->mb.error_bins[i];
927 /* i starts from 2 to make sure thresh started from 2048 */
928 for (; i < 1024; ++i) {
929 sum += cpi->mb.error_bins[i];
932 (unsigned int)(cpi->Speed - 6) * (total_mbs - total_skip)) {
940 if (thresh < 2000) thresh = 2000;
942 if (ref_frames > 1) {
943 sf->thresh_mult[THR_NEW1] = thresh;
944 sf->thresh_mult[THR_NEAREST1] = thresh >> 1;
945 sf->thresh_mult[THR_NEAR1] = thresh >> 1;
948 if (ref_frames > 2) {
949 sf->thresh_mult[THR_NEW2] = thresh << 1;
950 sf->thresh_mult[THR_NEAREST2] = thresh;
951 sf->thresh_mult[THR_NEAR2] = thresh;
954 if (ref_frames > 3) {
955 sf->thresh_mult[THR_NEW3] = thresh << 1;
956 sf->thresh_mult[THR_NEAREST3] = thresh;
957 sf->thresh_mult[THR_NEAR3] = thresh;
960 sf->improved_mv_pred = 0;
963 if (Speed > 8) sf->quarter_pixel_search = 0;
965 if (cm->version == 0) {
966 cm->filter_type = NORMAL_LOOPFILTER;
968 if (Speed >= 14) cm->filter_type = SIMPLE_LOOPFILTER;
970 cm->filter_type = SIMPLE_LOOPFILTER;
973 /* This has a big hit on quality. Last resort */
974 if (Speed >= 15) sf->half_pixel_search = 0;
976 memset(cpi->mb.error_bins, 0, sizeof(cpi->mb.error_bins));
980 /* Slow quant, dct and trellis not worthwhile for first pass
981 * so make sure they are always turned off.
983 if (cpi->pass == 1) {
984 sf->improved_quant = 0;
985 sf->optimize_coefficients = 0;
986 sf->improved_dct = 0;
989 if (cpi->sf.search_method == NSTEP) {
990 vp8_init3smotion_compensation(&cpi->mb,
991 cm->yv12_fb[cm->lst_fb_idx].y_stride);
992 } else if (cpi->sf.search_method == DIAMOND) {
993 vp8_init_dsmotion_compensation(&cpi->mb,
994 cm->yv12_fb[cm->lst_fb_idx].y_stride);
997 if (cpi->sf.improved_dct) {
998 cpi->mb.short_fdct8x4 = vp8_short_fdct8x4;
999 cpi->mb.short_fdct4x4 = vp8_short_fdct4x4;
1001 /* No fast FDCT defined for any platform at this time. */
1002 cpi->mb.short_fdct8x4 = vp8_short_fdct8x4;
1003 cpi->mb.short_fdct4x4 = vp8_short_fdct4x4;
1006 cpi->mb.short_walsh4x4 = vp8_short_walsh4x4;
1008 if (cpi->sf.improved_quant) {
1009 cpi->mb.quantize_b = vp8_regular_quantize_b;
1011 cpi->mb.quantize_b = vp8_fast_quantize_b;
1013 if (cpi->sf.improved_quant != last_improved_quant) vp8cx_init_quantizer(cpi);
1015 if (cpi->sf.iterative_sub_pixel == 1) {
1016 cpi->find_fractional_mv_step = vp8_find_best_sub_pixel_step_iteratively;
1017 } else if (cpi->sf.quarter_pixel_search) {
1018 cpi->find_fractional_mv_step = vp8_find_best_sub_pixel_step;
1019 } else if (cpi->sf.half_pixel_search) {
1020 cpi->find_fractional_mv_step = vp8_find_best_half_pixel_step;
1022 cpi->find_fractional_mv_step = vp8_skip_fractional_mv_step;
1025 if (cpi->sf.optimize_coefficients == 1 && cpi->pass != 1) {
1026 cpi->mb.optimize = 1;
1028 cpi->mb.optimize = 0;
1031 if (cpi->common.full_pixel) {
1032 cpi->find_fractional_mv_step = vp8_skip_fractional_mv_step;
1036 frames_at_speed[cpi->Speed]++;
1042 static void alloc_raw_frame_buffers(VP8_COMP *cpi) {
1043 #if VP8_TEMPORAL_ALT_REF
1044 int width = (cpi->oxcf.Width + 15) & ~15;
1045 int height = (cpi->oxcf.Height + 15) & ~15;
1048 cpi->lookahead = vp8_lookahead_init(cpi->oxcf.Width, cpi->oxcf.Height,
1049 cpi->oxcf.lag_in_frames);
1050 if (!cpi->lookahead) {
1051 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1052 "Failed to allocate lag buffers");
1055 #if VP8_TEMPORAL_ALT_REF
1057 if (vp8_yv12_alloc_frame_buffer(&cpi->alt_ref_buffer, width, height,
1058 VP8BORDERINPIXELS)) {
1059 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1060 "Failed to allocate altref buffer");
1066 static void dealloc_raw_frame_buffers(VP8_COMP *cpi) {
1067 #if VP8_TEMPORAL_ALT_REF
1068 vp8_yv12_de_alloc_frame_buffer(&cpi->alt_ref_buffer);
1070 vp8_lookahead_destroy(cpi->lookahead);
1073 static int vp8_alloc_partition_data(VP8_COMP *cpi) {
1074 vpx_free(cpi->mb.pip);
1077 vpx_calloc((cpi->common.mb_cols + 1) * (cpi->common.mb_rows + 1),
1078 sizeof(PARTITION_INFO));
1079 if (!cpi->mb.pip) return 1;
1081 cpi->mb.pi = cpi->mb.pip + cpi->common.mode_info_stride + 1;
1086 void vp8_alloc_compressor_data(VP8_COMP *cpi) {
1087 VP8_COMMON *cm = &cpi->common;
1089 int width = cm->Width;
1090 int height = cm->Height;
1091 #if CONFIG_MULTITHREAD
1092 int prev_mb_rows = cm->mb_rows;
1095 if (vp8_alloc_frame_buffers(cm, width, height)) {
1096 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1097 "Failed to allocate frame buffers");
1100 if (vp8_alloc_partition_data(cpi)) {
1101 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1102 "Failed to allocate partition data");
1105 if ((width & 0xf) != 0) width += 16 - (width & 0xf);
1107 if ((height & 0xf) != 0) height += 16 - (height & 0xf);
1109 if (vp8_yv12_alloc_frame_buffer(&cpi->pick_lf_lvl_frame, width, height,
1110 VP8BORDERINPIXELS)) {
1111 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1112 "Failed to allocate last frame buffer");
1115 if (vp8_yv12_alloc_frame_buffer(&cpi->scaled_source, width, height,
1116 VP8BORDERINPIXELS)) {
1117 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1118 "Failed to allocate scaled source buffer");
1124 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
1125 unsigned int tokens = 8 * 24 * 16; /* one MB for each thread */
1127 unsigned int tokens = cm->mb_rows * cm->mb_cols * 24 * 16;
1129 CHECK_MEM_ERROR(cpi->tok, vpx_calloc(tokens, sizeof(*cpi->tok)));
1132 /* Data used for real time vc mode to see if gf needs refreshing */
1133 cpi->zeromv_count = 0;
1135 /* Structures used to monitor GF usage */
1136 vpx_free(cpi->gf_active_flags);
1138 cpi->gf_active_flags,
1139 vpx_calloc(sizeof(*cpi->gf_active_flags), cm->mb_rows * cm->mb_cols));
1140 cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
1142 vpx_free(cpi->mb_activity_map);
1144 cpi->mb_activity_map,
1145 vpx_calloc(sizeof(*cpi->mb_activity_map), cm->mb_rows * cm->mb_cols));
1147 /* allocate memory for storing last frame's MVs for MV prediction. */
1148 vpx_free(cpi->lfmv);
1149 CHECK_MEM_ERROR(cpi->lfmv, vpx_calloc((cm->mb_rows + 2) * (cm->mb_cols + 2),
1150 sizeof(*cpi->lfmv)));
1151 vpx_free(cpi->lf_ref_frame_sign_bias);
1152 CHECK_MEM_ERROR(cpi->lf_ref_frame_sign_bias,
1153 vpx_calloc((cm->mb_rows + 2) * (cm->mb_cols + 2),
1154 sizeof(*cpi->lf_ref_frame_sign_bias)));
1155 vpx_free(cpi->lf_ref_frame);
1156 CHECK_MEM_ERROR(cpi->lf_ref_frame,
1157 vpx_calloc((cm->mb_rows + 2) * (cm->mb_cols + 2),
1158 sizeof(*cpi->lf_ref_frame)));
1160 /* Create the encoder segmentation map and set all entries to 0 */
1161 vpx_free(cpi->segmentation_map);
1163 cpi->segmentation_map,
1164 vpx_calloc(cm->mb_rows * cm->mb_cols, sizeof(*cpi->segmentation_map)));
1165 cpi->cyclic_refresh_mode_index = 0;
1166 vpx_free(cpi->active_map);
1167 CHECK_MEM_ERROR(cpi->active_map, vpx_calloc(cm->mb_rows * cm->mb_cols,
1168 sizeof(*cpi->active_map)));
1169 memset(cpi->active_map, 1, (cm->mb_rows * cm->mb_cols));
1171 #if CONFIG_MULTITHREAD
1173 cpi->mt_sync_range = 1;
1174 } else if (width <= 1280) {
1175 cpi->mt_sync_range = 4;
1176 } else if (width <= 2560) {
1177 cpi->mt_sync_range = 8;
1179 cpi->mt_sync_range = 16;
1182 if (cpi->oxcf.multi_threaded > 1) {
1185 /* De-allocate and re-allocate mutex */
1186 if (cpi->pmutex != NULL) {
1187 for (i = 0; i < prev_mb_rows; ++i) {
1188 pthread_mutex_destroy(&cpi->pmutex[i]);
1190 vpx_free(cpi->pmutex);
1194 CHECK_MEM_ERROR(cpi->pmutex,
1195 vpx_malloc(sizeof(*cpi->pmutex) * cm->mb_rows));
1197 for (i = 0; i < cm->mb_rows; ++i) {
1198 pthread_mutex_init(&cpi->pmutex[i], NULL);
1202 vpx_free(cpi->mt_current_mb_col);
1203 CHECK_MEM_ERROR(cpi->mt_current_mb_col,
1204 vpx_malloc(sizeof(*cpi->mt_current_mb_col) * cm->mb_rows));
1209 vpx_free(cpi->tplist);
1210 CHECK_MEM_ERROR(cpi->tplist, vpx_malloc(sizeof(TOKENLIST) * cm->mb_rows));
1212 #if CONFIG_TEMPORAL_DENOISING
1213 if (cpi->oxcf.noise_sensitivity > 0) {
1214 vp8_denoiser_free(&cpi->denoiser);
1215 if (vp8_denoiser_allocate(&cpi->denoiser, width, height, cm->mb_rows,
1216 cm->mb_cols, cpi->oxcf.noise_sensitivity)) {
1217 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1218 "Failed to allocate denoiser");
1225 static const int q_trans[] = {
1226 0, 1, 2, 3, 4, 5, 7, 8, 9, 10, 12, 13, 15, 17, 18, 19,
1227 20, 21, 23, 24, 25, 26, 27, 28, 29, 30, 31, 33, 35, 37, 39, 41,
1228 43, 45, 47, 49, 51, 53, 55, 57, 59, 61, 64, 67, 70, 73, 76, 79,
1229 82, 85, 88, 91, 94, 97, 100, 103, 106, 109, 112, 115, 118, 121, 124, 127,
1232 int vp8_reverse_trans(int x) {
1235 for (i = 0; i < 64; ++i) {
1236 if (q_trans[i] >= x) return i;
1241 void vp8_new_framerate(VP8_COMP *cpi, double framerate) {
1242 if (framerate < .1) framerate = 30;
1244 cpi->framerate = framerate;
1245 cpi->output_framerate = framerate;
1246 cpi->per_frame_bandwidth =
1247 (int)(cpi->oxcf.target_bandwidth / cpi->output_framerate);
1248 cpi->av_per_frame_bandwidth = cpi->per_frame_bandwidth;
1249 cpi->min_frame_bandwidth = (int)(cpi->av_per_frame_bandwidth *
1250 cpi->oxcf.two_pass_vbrmin_section / 100);
1252 /* Set Maximum gf/arf interval */
1253 cpi->max_gf_interval = ((int)(cpi->output_framerate / 2.0) + 2);
1255 if (cpi->max_gf_interval < 12) cpi->max_gf_interval = 12;
1257 /* Extended interval for genuinely static scenes */
1258 cpi->twopass.static_scene_max_gf_interval = cpi->key_frame_frequency >> 1;
1260 /* Special conditions when altr ref frame enabled in lagged compress mode */
1261 if (cpi->oxcf.play_alternate && cpi->oxcf.lag_in_frames) {
1262 if (cpi->max_gf_interval > cpi->oxcf.lag_in_frames - 1) {
1263 cpi->max_gf_interval = cpi->oxcf.lag_in_frames - 1;
1266 if (cpi->twopass.static_scene_max_gf_interval >
1267 cpi->oxcf.lag_in_frames - 1) {
1268 cpi->twopass.static_scene_max_gf_interval = cpi->oxcf.lag_in_frames - 1;
1272 if (cpi->max_gf_interval > cpi->twopass.static_scene_max_gf_interval) {
1273 cpi->max_gf_interval = cpi->twopass.static_scene_max_gf_interval;
1277 static void init_config(VP8_COMP *cpi, VP8_CONFIG *oxcf) {
1278 VP8_COMMON *cm = &cpi->common;
1283 cpi->auto_adjust_gold_quantizer = 1;
1285 cm->version = oxcf->Version;
1286 vp8_setup_version(cm);
1288 /* Frame rate is not available on the first frame, as it's derived from
1289 * the observed timestamps. The actual value used here doesn't matter
1290 * too much, as it will adapt quickly.
1292 if (oxcf->timebase.num > 0) {
1294 (double)(oxcf->timebase.den) / (double)(oxcf->timebase.num);
1296 cpi->framerate = 30;
1299 /* If the reciprocal of the timebase seems like a reasonable framerate,
1300 * then use that as a guess, otherwise use 30.
1302 if (cpi->framerate > 180) cpi->framerate = 30;
1304 cpi->ref_framerate = cpi->framerate;
1306 cpi->ref_frame_flags = VP8_ALTR_FRAME | VP8_GOLD_FRAME | VP8_LAST_FRAME;
1308 cm->refresh_golden_frame = 0;
1309 cm->refresh_last_frame = 1;
1310 cm->refresh_entropy_probs = 1;
1312 /* change includes all joint functionality */
1313 vp8_change_config(cpi, oxcf);
1315 /* Initialize active best and worst q and average q values. */
1316 cpi->active_worst_quality = cpi->oxcf.worst_allowed_q;
1317 cpi->active_best_quality = cpi->oxcf.best_allowed_q;
1318 cpi->avg_frame_qindex = cpi->oxcf.worst_allowed_q;
1320 /* Initialise the starting buffer levels */
1321 cpi->buffer_level = cpi->oxcf.starting_buffer_level;
1322 cpi->bits_off_target = cpi->oxcf.starting_buffer_level;
1324 cpi->rolling_target_bits = cpi->av_per_frame_bandwidth;
1325 cpi->rolling_actual_bits = cpi->av_per_frame_bandwidth;
1326 cpi->long_rolling_target_bits = cpi->av_per_frame_bandwidth;
1327 cpi->long_rolling_actual_bits = cpi->av_per_frame_bandwidth;
1329 cpi->total_actual_bits = 0;
1330 cpi->total_target_vs_actual = 0;
1332 /* Temporal scalabilty */
1333 if (cpi->oxcf.number_of_layers > 1) {
1335 double prev_layer_framerate = 0;
1337 for (i = 0; i < cpi->oxcf.number_of_layers; ++i) {
1338 init_temporal_layer_context(cpi, oxcf, i, prev_layer_framerate);
1339 prev_layer_framerate =
1340 cpi->output_framerate / cpi->oxcf.rate_decimator[i];
1344 #if VP8_TEMPORAL_ALT_REF
1348 cpi->fixed_divide[0] = 0;
1350 for (i = 1; i < 512; ++i) cpi->fixed_divide[i] = 0x80000 / i;
1355 static void update_layer_contexts(VP8_COMP *cpi) {
1356 VP8_CONFIG *oxcf = &cpi->oxcf;
1358 /* Update snapshots of the layer contexts to reflect new parameters */
1359 if (oxcf->number_of_layers > 1) {
1361 double prev_layer_framerate = 0;
1363 assert(oxcf->number_of_layers <= VPX_TS_MAX_LAYERS);
1364 for (i = 0; i < oxcf->number_of_layers && i < VPX_TS_MAX_LAYERS; ++i) {
1365 LAYER_CONTEXT *lc = &cpi->layer_context[i];
1367 lc->framerate = cpi->ref_framerate / oxcf->rate_decimator[i];
1368 lc->target_bandwidth = oxcf->target_bitrate[i] * 1000;
1370 lc->starting_buffer_level = rescale(
1371 (int)oxcf->starting_buffer_level_in_ms, lc->target_bandwidth, 1000);
1373 if (oxcf->optimal_buffer_level == 0) {
1374 lc->optimal_buffer_level = lc->target_bandwidth / 8;
1376 lc->optimal_buffer_level = rescale(
1377 (int)oxcf->optimal_buffer_level_in_ms, lc->target_bandwidth, 1000);
1380 if (oxcf->maximum_buffer_size == 0) {
1381 lc->maximum_buffer_size = lc->target_bandwidth / 8;
1383 lc->maximum_buffer_size = rescale((int)oxcf->maximum_buffer_size_in_ms,
1384 lc->target_bandwidth, 1000);
1387 /* Work out the average size of a frame within this layer */
1389 lc->avg_frame_size_for_layer =
1390 (int)((oxcf->target_bitrate[i] - oxcf->target_bitrate[i - 1]) *
1391 1000 / (lc->framerate - prev_layer_framerate));
1394 prev_layer_framerate = lc->framerate;
1399 void vp8_change_config(VP8_COMP *cpi, VP8_CONFIG *oxcf) {
1400 VP8_COMMON *cm = &cpi->common;
1402 unsigned int prev_number_of_layers;
1408 if (cm->version != oxcf->Version) {
1409 cm->version = oxcf->Version;
1410 vp8_setup_version(cm);
1413 last_w = cpi->oxcf.Width;
1414 last_h = cpi->oxcf.Height;
1415 prev_number_of_layers = cpi->oxcf.number_of_layers;
1419 switch (cpi->oxcf.Mode) {
1422 cpi->compressor_speed = 2;
1424 if (cpi->oxcf.cpu_used < -16) {
1425 cpi->oxcf.cpu_used = -16;
1428 if (cpi->oxcf.cpu_used > 16) cpi->oxcf.cpu_used = 16;
1432 case MODE_GOODQUALITY:
1434 cpi->compressor_speed = 1;
1436 if (cpi->oxcf.cpu_used < -5) {
1437 cpi->oxcf.cpu_used = -5;
1440 if (cpi->oxcf.cpu_used > 5) cpi->oxcf.cpu_used = 5;
1444 case MODE_BESTQUALITY:
1446 cpi->compressor_speed = 0;
1449 case MODE_FIRSTPASS:
1451 cpi->compressor_speed = 1;
1453 case MODE_SECONDPASS:
1455 cpi->compressor_speed = 1;
1457 if (cpi->oxcf.cpu_used < -5) {
1458 cpi->oxcf.cpu_used = -5;
1461 if (cpi->oxcf.cpu_used > 5) cpi->oxcf.cpu_used = 5;
1464 case MODE_SECONDPASS_BEST:
1466 cpi->compressor_speed = 0;
1470 if (cpi->pass == 0) cpi->auto_worst_q = 1;
1472 cpi->oxcf.worst_allowed_q = q_trans[oxcf->worst_allowed_q];
1473 cpi->oxcf.best_allowed_q = q_trans[oxcf->best_allowed_q];
1474 cpi->oxcf.cq_level = q_trans[cpi->oxcf.cq_level];
1476 if (oxcf->fixed_q >= 0) {
1477 if (oxcf->worst_allowed_q < 0) {
1478 cpi->oxcf.fixed_q = q_trans[0];
1480 cpi->oxcf.fixed_q = q_trans[oxcf->worst_allowed_q];
1483 if (oxcf->alt_q < 0) {
1484 cpi->oxcf.alt_q = q_trans[0];
1486 cpi->oxcf.alt_q = q_trans[oxcf->alt_q];
1489 if (oxcf->key_q < 0) {
1490 cpi->oxcf.key_q = q_trans[0];
1492 cpi->oxcf.key_q = q_trans[oxcf->key_q];
1495 if (oxcf->gold_q < 0) {
1496 cpi->oxcf.gold_q = q_trans[0];
1498 cpi->oxcf.gold_q = q_trans[oxcf->gold_q];
1502 cpi->baseline_gf_interval =
1503 cpi->oxcf.alt_freq ? cpi->oxcf.alt_freq : DEFAULT_GF_INTERVAL;
1505 // GF behavior for 1 pass CBR, used when error_resilience is off.
1506 if (!cpi->oxcf.error_resilient_mode &&
1507 cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER &&
1508 cpi->oxcf.Mode == MODE_REALTIME)
1509 cpi->baseline_gf_interval = cpi->gf_interval_onepass_cbr;
1511 #if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
1512 cpi->oxcf.token_partitions = 3;
1515 if (cpi->oxcf.token_partitions >= 0 && cpi->oxcf.token_partitions <= 3) {
1516 cm->multi_token_partition = (TOKEN_PARTITION)cpi->oxcf.token_partitions;
1519 setup_features(cpi);
1524 for (i = 0; i < MAX_MB_SEGMENTS; ++i) {
1525 cpi->segment_encode_breakout[i] = cpi->oxcf.encode_breakout;
1529 /* At the moment the first order values may not be > MAXQ */
1530 if (cpi->oxcf.fixed_q > MAXQ) cpi->oxcf.fixed_q = MAXQ;
1532 /* local file playback mode == really big buffer */
1533 if (cpi->oxcf.end_usage == USAGE_LOCAL_FILE_PLAYBACK) {
1534 cpi->oxcf.starting_buffer_level = 60000;
1535 cpi->oxcf.optimal_buffer_level = 60000;
1536 cpi->oxcf.maximum_buffer_size = 240000;
1537 cpi->oxcf.starting_buffer_level_in_ms = 60000;
1538 cpi->oxcf.optimal_buffer_level_in_ms = 60000;
1539 cpi->oxcf.maximum_buffer_size_in_ms = 240000;
1542 /* Convert target bandwidth from Kbit/s to Bit/s */
1543 cpi->oxcf.target_bandwidth *= 1000;
1545 cpi->oxcf.starting_buffer_level = rescale(
1546 (int)cpi->oxcf.starting_buffer_level, cpi->oxcf.target_bandwidth, 1000);
1548 /* Set or reset optimal and maximum buffer levels. */
1549 if (cpi->oxcf.optimal_buffer_level == 0) {
1550 cpi->oxcf.optimal_buffer_level = cpi->oxcf.target_bandwidth / 8;
1552 cpi->oxcf.optimal_buffer_level = rescale(
1553 (int)cpi->oxcf.optimal_buffer_level, cpi->oxcf.target_bandwidth, 1000);
1556 if (cpi->oxcf.maximum_buffer_size == 0) {
1557 cpi->oxcf.maximum_buffer_size = cpi->oxcf.target_bandwidth / 8;
1559 cpi->oxcf.maximum_buffer_size = rescale((int)cpi->oxcf.maximum_buffer_size,
1560 cpi->oxcf.target_bandwidth, 1000);
1562 // Under a configuration change, where maximum_buffer_size may change,
1563 // keep buffer level clipped to the maximum allowed buffer size.
1564 if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size) {
1565 cpi->bits_off_target = cpi->oxcf.maximum_buffer_size;
1566 cpi->buffer_level = cpi->bits_off_target;
1569 /* Set up frame rate and related parameters rate control values. */
1570 vp8_new_framerate(cpi, cpi->framerate);
1572 /* Set absolute upper and lower quality limits */
1573 cpi->worst_quality = cpi->oxcf.worst_allowed_q;
1574 cpi->best_quality = cpi->oxcf.best_allowed_q;
1576 /* active values should only be modified if out of new range */
1577 if (cpi->active_worst_quality > cpi->oxcf.worst_allowed_q) {
1578 cpi->active_worst_quality = cpi->oxcf.worst_allowed_q;
1581 else if (cpi->active_worst_quality < cpi->oxcf.best_allowed_q) {
1582 cpi->active_worst_quality = cpi->oxcf.best_allowed_q;
1584 if (cpi->active_best_quality < cpi->oxcf.best_allowed_q) {
1585 cpi->active_best_quality = cpi->oxcf.best_allowed_q;
1588 else if (cpi->active_best_quality > cpi->oxcf.worst_allowed_q) {
1589 cpi->active_best_quality = cpi->oxcf.worst_allowed_q;
1592 cpi->buffered_mode = cpi->oxcf.optimal_buffer_level > 0;
1594 cpi->cq_target_quality = cpi->oxcf.cq_level;
1596 /* Only allow dropped frames in buffered mode */
1597 cpi->drop_frames_allowed = cpi->oxcf.allow_df && cpi->buffered_mode;
1599 cpi->target_bandwidth = cpi->oxcf.target_bandwidth;
1601 // Check if the number of temporal layers has changed, and if so reset the
1602 // pattern counter and set/initialize the temporal layer context for the
1603 // new layer configuration.
1604 if (cpi->oxcf.number_of_layers != prev_number_of_layers) {
1605 // If the number of temporal layers are changed we must start at the
1606 // base of the pattern cycle, so set the layer id to 0 and reset
1607 // the temporal pattern counter.
1608 if (cpi->temporal_layer_id > 0) {
1609 cpi->temporal_layer_id = 0;
1611 cpi->temporal_pattern_counter = 0;
1612 reset_temporal_layer_change(cpi, oxcf, prev_number_of_layers);
1615 if (!cpi->initial_width) {
1616 cpi->initial_width = cpi->oxcf.Width;
1617 cpi->initial_height = cpi->oxcf.Height;
1620 cm->Width = cpi->oxcf.Width;
1621 cm->Height = cpi->oxcf.Height;
1622 assert(cm->Width <= cpi->initial_width);
1623 assert(cm->Height <= cpi->initial_height);
1625 /* TODO(jkoleszar): if an internal spatial resampling is active,
1626 * and we downsize the input image, maybe we should clear the
1627 * internal scale immediately rather than waiting for it to
1631 /* VP8 sharpness level mapping 0-7 (vs 0-10 in general VPx dialogs) */
1632 if (cpi->oxcf.Sharpness > 7) cpi->oxcf.Sharpness = 7;
1634 cm->sharpness_level = cpi->oxcf.Sharpness;
1636 if (cm->horiz_scale != NORMAL || cm->vert_scale != NORMAL) {
1639 Scale2Ratio(cm->horiz_scale, &hr, &hs);
1640 Scale2Ratio(cm->vert_scale, &vr, &vs);
1642 /* always go to the next whole number */
1643 cm->Width = (hs - 1 + cpi->oxcf.Width * hr) / hs;
1644 cm->Height = (vs - 1 + cpi->oxcf.Height * vr) / vs;
1647 if (last_w != cpi->oxcf.Width || last_h != cpi->oxcf.Height) {
1648 cpi->force_next_frame_intra = 1;
1651 if (((cm->Width + 15) & ~15) != cm->yv12_fb[cm->lst_fb_idx].y_width ||
1652 ((cm->Height + 15) & ~15) != cm->yv12_fb[cm->lst_fb_idx].y_height ||
1653 cm->yv12_fb[cm->lst_fb_idx].y_width == 0) {
1654 dealloc_raw_frame_buffers(cpi);
1655 alloc_raw_frame_buffers(cpi);
1656 vp8_alloc_compressor_data(cpi);
1659 if (cpi->oxcf.fixed_q >= 0) {
1660 cpi->last_q[0] = cpi->oxcf.fixed_q;
1661 cpi->last_q[1] = cpi->oxcf.fixed_q;
1664 cpi->Speed = cpi->oxcf.cpu_used;
1666 /* force to allowlag to 0 if lag_in_frames is 0; */
1667 if (cpi->oxcf.lag_in_frames == 0) {
1668 cpi->oxcf.allow_lag = 0;
1670 /* Limit on lag buffers as these are not currently dynamically allocated */
1671 else if (cpi->oxcf.lag_in_frames > MAX_LAG_BUFFERS) {
1672 cpi->oxcf.lag_in_frames = MAX_LAG_BUFFERS;
1676 cpi->alt_ref_source = NULL;
1677 cpi->is_src_frame_alt_ref = 0;
1679 #if CONFIG_TEMPORAL_DENOISING
1680 if (cpi->oxcf.noise_sensitivity) {
1681 if (!cpi->denoiser.yv12_mc_running_avg.buffer_alloc) {
1682 int width = (cpi->oxcf.Width + 15) & ~15;
1683 int height = (cpi->oxcf.Height + 15) & ~15;
1684 if (vp8_denoiser_allocate(&cpi->denoiser, width, height, cm->mb_rows,
1685 cm->mb_cols, cpi->oxcf.noise_sensitivity)) {
1686 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1687 "Failed to allocate denoiser");
1694 /* Experimental RD Code */
1695 cpi->frame_distortion = 0;
1696 cpi->last_frame_distortion = 0;
1701 #define M_LOG2_E 0.693147180559945309417
1703 #define log2f(x) (log(x) / (float)M_LOG2_E)
1705 static void cal_mvsadcosts(int *mvsadcost[2]) {
1708 mvsadcost[0][0] = 300;
1709 mvsadcost[1][0] = 300;
1712 double z = 256 * (2 * (log2f(8 * i) + .6));
1713 mvsadcost[0][i] = (int)z;
1714 mvsadcost[1][i] = (int)z;
1715 mvsadcost[0][-i] = (int)z;
1716 mvsadcost[1][-i] = (int)z;
1717 } while (++i <= mvfp_max);
1720 struct VP8_COMP *vp8_create_compressor(VP8_CONFIG *oxcf) {
1726 cpi = vpx_memalign(32, sizeof(VP8_COMP));
1727 /* Check that the CPI instance is valid */
1732 memset(cpi, 0, sizeof(VP8_COMP));
1734 if (setjmp(cm->error.jmp)) {
1735 cpi->common.error.setjmp = 0;
1736 vp8_remove_compressor(&cpi);
1740 cpi->common.error.setjmp = 1;
1742 CHECK_MEM_ERROR(cpi->mb.ss, vpx_calloc(sizeof(search_site),
1743 (MAX_MVSEARCH_STEPS * 8) + 1));
1745 vp8_create_common(&cpi->common);
1747 init_config(cpi, oxcf);
1749 memcpy(cpi->base_skip_false_prob, vp8cx_base_skip_false_prob,
1750 sizeof(vp8cx_base_skip_false_prob));
1751 cpi->common.current_video_frame = 0;
1752 cpi->temporal_pattern_counter = 0;
1753 cpi->temporal_layer_id = -1;
1754 cpi->kf_overspend_bits = 0;
1755 cpi->kf_bitrate_adjustment = 0;
1756 cpi->frames_till_gf_update_due = 0;
1757 cpi->gf_overspend_bits = 0;
1758 cpi->non_gf_bitrate_adjustment = 0;
1759 cpi->prob_last_coded = 128;
1760 cpi->prob_gf_coded = 128;
1761 cpi->prob_intra_coded = 63;
1763 /* Prime the recent reference frame usage counters.
1764 * Hereafter they will be maintained as a sort of moving average
1766 cpi->recent_ref_frame_usage[INTRA_FRAME] = 1;
1767 cpi->recent_ref_frame_usage[LAST_FRAME] = 1;
1768 cpi->recent_ref_frame_usage[GOLDEN_FRAME] = 1;
1769 cpi->recent_ref_frame_usage[ALTREF_FRAME] = 1;
1771 /* Set reference frame sign bias for ALTREF frame to 1 (for now) */
1772 cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 1;
1774 cpi->twopass.gf_decay_rate = 0;
1775 cpi->baseline_gf_interval = DEFAULT_GF_INTERVAL;
1777 cpi->gold_is_last = 0;
1778 cpi->alt_is_last = 0;
1779 cpi->gold_is_alt = 0;
1781 cpi->active_map_enabled = 0;
1784 /* Experimental code for lagged and one pass */
1785 /* Initialise one_pass GF frames stats */
1786 /* Update stats used for GF selection */
1789 cpi->one_pass_frame_index = 0;
1791 for (i = 0; i < MAX_LAG_BUFFERS; ++i)
1793 cpi->one_pass_frame_stats[i].frames_so_far = 0;
1794 cpi->one_pass_frame_stats[i].frame_intra_error = 0.0;
1795 cpi->one_pass_frame_stats[i].frame_coded_error = 0.0;
1796 cpi->one_pass_frame_stats[i].frame_pcnt_inter = 0.0;
1797 cpi->one_pass_frame_stats[i].frame_pcnt_motion = 0.0;
1798 cpi->one_pass_frame_stats[i].frame_mvr = 0.0;
1799 cpi->one_pass_frame_stats[i].frame_mvr_abs = 0.0;
1800 cpi->one_pass_frame_stats[i].frame_mvc = 0.0;
1801 cpi->one_pass_frame_stats[i].frame_mvc_abs = 0.0;
1806 cpi->mse_source_denoised = 0;
1808 /* Should we use the cyclic refresh method.
1809 * Currently there is no external control for this.
1810 * Enable it for error_resilient_mode, or for 1 pass CBR mode.
1812 cpi->cyclic_refresh_mode_enabled =
1813 (cpi->oxcf.error_resilient_mode ||
1814 (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER &&
1815 cpi->oxcf.Mode <= 2));
1816 cpi->cyclic_refresh_mode_max_mbs_perframe =
1817 (cpi->common.mb_rows * cpi->common.mb_cols) / 7;
1818 if (cpi->oxcf.number_of_layers == 1) {
1819 cpi->cyclic_refresh_mode_max_mbs_perframe =
1820 (cpi->common.mb_rows * cpi->common.mb_cols) / 20;
1821 } else if (cpi->oxcf.number_of_layers == 2) {
1822 cpi->cyclic_refresh_mode_max_mbs_perframe =
1823 (cpi->common.mb_rows * cpi->common.mb_cols) / 10;
1825 cpi->cyclic_refresh_mode_index = 0;
1826 cpi->cyclic_refresh_q = 32;
1828 // GF behavior for 1 pass CBR, used when error_resilience is off.
1829 cpi->gf_update_onepass_cbr = 0;
1830 cpi->gf_noboost_onepass_cbr = 0;
1831 if (!cpi->oxcf.error_resilient_mode &&
1832 cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER && cpi->oxcf.Mode <= 2) {
1833 cpi->gf_update_onepass_cbr = 1;
1834 cpi->gf_noboost_onepass_cbr = 1;
1835 cpi->gf_interval_onepass_cbr =
1836 cpi->cyclic_refresh_mode_max_mbs_perframe > 0
1837 ? (2 * (cpi->common.mb_rows * cpi->common.mb_cols) /
1838 cpi->cyclic_refresh_mode_max_mbs_perframe)
1840 cpi->gf_interval_onepass_cbr =
1841 VPXMIN(40, VPXMAX(6, cpi->gf_interval_onepass_cbr));
1842 cpi->baseline_gf_interval = cpi->gf_interval_onepass_cbr;
1845 if (cpi->cyclic_refresh_mode_enabled) {
1846 CHECK_MEM_ERROR(cpi->cyclic_refresh_map,
1847 vpx_calloc((cpi->common.mb_rows * cpi->common.mb_cols), 1));
1849 cpi->cyclic_refresh_map = (signed char *)NULL;
1852 CHECK_MEM_ERROR(cpi->consec_zero_last,
1853 vpx_calloc(cm->mb_rows * cm->mb_cols, 1));
1854 CHECK_MEM_ERROR(cpi->consec_zero_last_mvbias,
1855 vpx_calloc((cpi->common.mb_rows * cpi->common.mb_cols), 1));
1857 #ifdef VP8_ENTROPY_STATS
1858 init_context_counters();
1861 /*Initialize the feed-forward activity masking.*/
1862 cpi->activity_avg = 90 << 12;
1864 /* Give a sensible default for the first frame. */
1865 cpi->frames_since_key = 8;
1866 cpi->key_frame_frequency = cpi->oxcf.key_freq;
1867 cpi->this_key_frame_forced = 0;
1868 cpi->next_key_frame_forced = 0;
1870 cpi->source_alt_ref_pending = 0;
1871 cpi->source_alt_ref_active = 0;
1872 cpi->common.refresh_alt_ref_frame = 0;
1874 cpi->force_maxqp = 0;
1876 cpi->b_calculate_psnr = CONFIG_INTERNAL_STATS;
1877 #if CONFIG_INTERNAL_STATS
1878 cpi->b_calculate_ssimg = 0;
1883 if (cpi->b_calculate_psnr) {
1884 cpi->total_sq_error = 0.0;
1885 cpi->total_sq_error2 = 0.0;
1890 cpi->totalp_y = 0.0;
1891 cpi->totalp_u = 0.0;
1892 cpi->totalp_v = 0.0;
1894 cpi->tot_recode_hits = 0;
1895 cpi->summed_quality = 0;
1896 cpi->summed_weights = 0;
1901 cpi->first_time_stamp_ever = 0x7FFFFFFF;
1903 cpi->frames_till_gf_update_due = 0;
1904 cpi->key_frame_count = 1;
1906 cpi->ni_av_qi = cpi->oxcf.worst_allowed_q;
1909 cpi->total_byte_count = 0;
1911 cpi->drop_frame = 0;
1913 cpi->rate_correction_factor = 1.0;
1914 cpi->key_frame_rate_correction_factor = 1.0;
1915 cpi->gf_rate_correction_factor = 1.0;
1916 cpi->twopass.est_max_qcorrection_factor = 1.0;
1918 for (i = 0; i < KEY_FRAME_CONTEXT; ++i) {
1919 cpi->prior_key_frame_distance[i] = (int)cpi->output_framerate;
1922 #ifdef OUTPUT_YUV_SRC
1923 yuv_file = fopen("bd.yuv", "ab");
1925 #ifdef OUTPUT_YUV_DENOISED
1926 yuv_denoised_file = fopen("denoised.yuv", "ab");
1930 framepsnr = fopen("framepsnr.stt", "a");
1931 kf_list = fopen("kf_list.stt", "w");
1934 cpi->output_pkt_list = oxcf->output_pkt_list;
1936 #if !CONFIG_REALTIME_ONLY
1938 if (cpi->pass == 1) {
1939 vp8_init_first_pass(cpi);
1940 } else if (cpi->pass == 2) {
1941 size_t packet_sz = sizeof(FIRSTPASS_STATS);
1942 int packets = (int)(oxcf->two_pass_stats_in.sz / packet_sz);
1944 cpi->twopass.stats_in_start = oxcf->two_pass_stats_in.buf;
1945 cpi->twopass.stats_in = cpi->twopass.stats_in_start;
1946 cpi->twopass.stats_in_end =
1947 (void *)((char *)cpi->twopass.stats_in + (packets - 1) * packet_sz);
1948 vp8_init_second_pass(cpi);
1953 if (cpi->compressor_speed == 2) {
1954 cpi->avg_encode_time = 0;
1955 cpi->avg_pick_mode_time = 0;
1958 vp8_set_speed_features(cpi);
1960 /* Set starting values of RD threshold multipliers (128 = *1) */
1961 for (i = 0; i < MAX_MODES; ++i) {
1962 cpi->mb.rd_thresh_mult[i] = 128;
1965 #ifdef VP8_ENTROPY_STATS
1966 init_mv_ref_counts();
1969 #if CONFIG_MULTITHREAD
1970 if (vp8cx_create_encoder_threads(cpi)) {
1971 vp8_remove_compressor(&cpi);
1976 cpi->fn_ptr[BLOCK_16X16].sdf = vpx_sad16x16;
1977 cpi->fn_ptr[BLOCK_16X16].vf = vpx_variance16x16;
1978 cpi->fn_ptr[BLOCK_16X16].svf = vpx_sub_pixel_variance16x16;
1979 cpi->fn_ptr[BLOCK_16X16].sdx3f = vpx_sad16x16x3;
1980 cpi->fn_ptr[BLOCK_16X16].sdx8f = vpx_sad16x16x8;
1981 cpi->fn_ptr[BLOCK_16X16].sdx4df = vpx_sad16x16x4d;
1983 cpi->fn_ptr[BLOCK_16X8].sdf = vpx_sad16x8;
1984 cpi->fn_ptr[BLOCK_16X8].vf = vpx_variance16x8;
1985 cpi->fn_ptr[BLOCK_16X8].svf = vpx_sub_pixel_variance16x8;
1986 cpi->fn_ptr[BLOCK_16X8].sdx3f = vpx_sad16x8x3;
1987 cpi->fn_ptr[BLOCK_16X8].sdx8f = vpx_sad16x8x8;
1988 cpi->fn_ptr[BLOCK_16X8].sdx4df = vpx_sad16x8x4d;
1990 cpi->fn_ptr[BLOCK_8X16].sdf = vpx_sad8x16;
1991 cpi->fn_ptr[BLOCK_8X16].vf = vpx_variance8x16;
1992 cpi->fn_ptr[BLOCK_8X16].svf = vpx_sub_pixel_variance8x16;
1993 cpi->fn_ptr[BLOCK_8X16].sdx3f = vpx_sad8x16x3;
1994 cpi->fn_ptr[BLOCK_8X16].sdx8f = vpx_sad8x16x8;
1995 cpi->fn_ptr[BLOCK_8X16].sdx4df = vpx_sad8x16x4d;
1997 cpi->fn_ptr[BLOCK_8X8].sdf = vpx_sad8x8;
1998 cpi->fn_ptr[BLOCK_8X8].vf = vpx_variance8x8;
1999 cpi->fn_ptr[BLOCK_8X8].svf = vpx_sub_pixel_variance8x8;
2000 cpi->fn_ptr[BLOCK_8X8].sdx3f = vpx_sad8x8x3;
2001 cpi->fn_ptr[BLOCK_8X8].sdx8f = vpx_sad8x8x8;
2002 cpi->fn_ptr[BLOCK_8X8].sdx4df = vpx_sad8x8x4d;
2004 cpi->fn_ptr[BLOCK_4X4].sdf = vpx_sad4x4;
2005 cpi->fn_ptr[BLOCK_4X4].vf = vpx_variance4x4;
2006 cpi->fn_ptr[BLOCK_4X4].svf = vpx_sub_pixel_variance4x4;
2007 cpi->fn_ptr[BLOCK_4X4].sdx3f = vpx_sad4x4x3;
2008 cpi->fn_ptr[BLOCK_4X4].sdx8f = vpx_sad4x4x8;
2009 cpi->fn_ptr[BLOCK_4X4].sdx4df = vpx_sad4x4x4d;
2011 #if ARCH_X86 || ARCH_X86_64
2012 cpi->fn_ptr[BLOCK_16X16].copymem = vp8_copy32xn;
2013 cpi->fn_ptr[BLOCK_16X8].copymem = vp8_copy32xn;
2014 cpi->fn_ptr[BLOCK_8X16].copymem = vp8_copy32xn;
2015 cpi->fn_ptr[BLOCK_8X8].copymem = vp8_copy32xn;
2016 cpi->fn_ptr[BLOCK_4X4].copymem = vp8_copy32xn;
2019 cpi->full_search_sad = vp8_full_search_sad;
2020 cpi->diamond_search_sad = vp8_diamond_search_sad;
2021 cpi->refining_search_sad = vp8_refining_search_sad;
2023 /* make sure frame 1 is okay */
2024 cpi->mb.error_bins[0] = cpi->common.MBs;
2026 /* vp8cx_init_quantizer() is first called here. Add check in
2027 * vp8cx_frame_init_quantizer() so that vp8cx_init_quantizer is only
2028 * called later when needed. This will avoid unnecessary calls of
2029 * vp8cx_init_quantizer() for every frame.
2031 vp8cx_init_quantizer(cpi);
2033 vp8_loop_filter_init(cm);
2035 cpi->common.error.setjmp = 0;
2037 #if CONFIG_MULTI_RES_ENCODING
2039 /* Calculate # of MBs in a row in lower-resolution level image. */
2040 if (cpi->oxcf.mr_encoder_id > 0) vp8_cal_low_res_mb_cols(cpi);
2044 /* setup RD costs to MACROBLOCK struct */
2046 cpi->mb.mvcost[0] = &cpi->rd_costs.mvcosts[0][mv_max + 1];
2047 cpi->mb.mvcost[1] = &cpi->rd_costs.mvcosts[1][mv_max + 1];
2048 cpi->mb.mvsadcost[0] = &cpi->rd_costs.mvsadcosts[0][mvfp_max + 1];
2049 cpi->mb.mvsadcost[1] = &cpi->rd_costs.mvsadcosts[1][mvfp_max + 1];
2051 cal_mvsadcosts(cpi->mb.mvsadcost);
2053 cpi->mb.mbmode_cost = cpi->rd_costs.mbmode_cost;
2054 cpi->mb.intra_uv_mode_cost = cpi->rd_costs.intra_uv_mode_cost;
2055 cpi->mb.bmode_costs = cpi->rd_costs.bmode_costs;
2056 cpi->mb.inter_bmode_costs = cpi->rd_costs.inter_bmode_costs;
2057 cpi->mb.token_costs = cpi->rd_costs.token_costs;
2059 /* setup block ptrs & offsets */
2060 vp8_setup_block_ptrs(&cpi->mb);
2061 vp8_setup_block_dptrs(&cpi->mb.e_mbd);
2066 void vp8_remove_compressor(VP8_COMP **ptr) {
2067 VP8_COMP *cpi = *ptr;
2071 if (cpi && (cpi->common.current_video_frame > 0)) {
2072 #if !CONFIG_REALTIME_ONLY
2074 if (cpi->pass == 2) {
2075 vp8_end_second_pass(cpi);
2080 #ifdef VP8_ENTROPY_STATS
2081 print_context_counters();
2082 print_tree_update_probs();
2083 print_mode_context();
2086 #if CONFIG_INTERNAL_STATS
2088 if (cpi->pass != 1) {
2089 FILE *f = fopen("opsnr.stt", "a");
2090 double time_encoded =
2091 (cpi->last_end_time_stamp_seen - cpi->first_time_stamp_ever) /
2093 double total_encode_time =
2094 (cpi->time_receive_data + cpi->time_compress_data) / 1000.000;
2095 double dr = (double)cpi->bytes * 8.0 / 1000.0 / time_encoded;
2096 const double target_rate = (double)cpi->oxcf.target_bandwidth / 1000;
2097 const double rate_err = ((100.0 * (dr - target_rate)) / target_rate);
2099 if (cpi->b_calculate_psnr) {
2100 if (cpi->oxcf.number_of_layers > 1) {
2104 "Layer\tBitrate\tAVGPsnr\tGLBPsnr\tAVPsnrP\t"
2105 "GLPsnrP\tVPXSSIM\n");
2106 for (i = 0; i < (int)cpi->oxcf.number_of_layers; ++i) {
2108 (double)cpi->bytes_in_layer[i] * 8.0 / 1000.0 / time_encoded;
2109 double samples = 3.0 / 2 * cpi->frames_in_layer[i] *
2110 cpi->common.Width * cpi->common.Height;
2112 vpx_sse_to_psnr(samples, 255.0, cpi->total_error2[i]);
2113 double total_psnr2 =
2114 vpx_sse_to_psnr(samples, 255.0, cpi->total_error2_p[i]);
2116 100 * pow(cpi->sum_ssim[i] / cpi->sum_weights[i], 8.0);
2119 "%5d\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t"
2121 i, dr, cpi->sum_psnr[i] / cpi->frames_in_layer[i],
2122 total_psnr, cpi->sum_psnr_p[i] / cpi->frames_in_layer[i],
2123 total_psnr2, total_ssim);
2127 3.0 / 2 * cpi->count * cpi->common.Width * cpi->common.Height;
2129 vpx_sse_to_psnr(samples, 255.0, cpi->total_sq_error);
2130 double total_psnr2 =
2131 vpx_sse_to_psnr(samples, 255.0, cpi->total_sq_error2);
2133 100 * pow(cpi->summed_quality / cpi->summed_weights, 8.0);
2136 "Bitrate\tAVGPsnr\tGLBPsnr\tAVPsnrP\t"
2137 "GLPsnrP\tVPXSSIM\n");
2139 "%7.3f\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t"
2141 dr, cpi->total / cpi->count, total_psnr,
2142 cpi->totalp / cpi->count, total_psnr2, total_ssim);
2147 f = fopen("qskip.stt", "a");
2148 fprintf(f, "minq:%d -maxq:%d skiptrue:skipfalse = %d:%d\n", cpi->oxcf.best_allowed_q, cpi->oxcf.worst_allowed_q, skiptruecount, skipfalsecount);
2157 if (cpi->compressor_speed == 2) {
2159 FILE *f = fopen("cxspeed.stt", "a");
2160 cnt_pm /= cpi->common.MBs;
2162 for (i = 0; i < 16; ++i) fprintf(f, "%5d", frames_at_speed[i]);
2172 extern int count_mb_seg[4];
2173 FILE *f = fopen("modes.stt", "a");
2174 double dr = (double)cpi->framerate * (double)bytes * (double)8 /
2175 (double)count / (double)1000;
2176 fprintf(f, "intra_mode in Intra Frames:\n");
2177 fprintf(f, "Y: %8d, %8d, %8d, %8d, %8d\n", y_modes[0], y_modes[1],
2178 y_modes[2], y_modes[3], y_modes[4]);
2179 fprintf(f, "UV:%8d, %8d, %8d, %8d\n", uv_modes[0], uv_modes[1],
2180 uv_modes[2], uv_modes[3]);
2185 for (i = 0; i < 10; ++i) fprintf(f, "%8d, ", b_modes[i]);
2190 fprintf(f, "Modes in Inter Frames:\n");
2191 fprintf(f, "Y: %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d\n",
2192 inter_y_modes[0], inter_y_modes[1], inter_y_modes[2],
2193 inter_y_modes[3], inter_y_modes[4], inter_y_modes[5],
2194 inter_y_modes[6], inter_y_modes[7], inter_y_modes[8],
2196 fprintf(f, "UV:%8d, %8d, %8d, %8d\n", inter_uv_modes[0],
2197 inter_uv_modes[1], inter_uv_modes[2], inter_uv_modes[3]);
2202 for (i = 0; i < 15; ++i) fprintf(f, "%8d, ", inter_b_modes[i]);
2206 fprintf(f, "P:%8d, %8d, %8d, %8d\n", count_mb_seg[0], count_mb_seg[1],
2207 count_mb_seg[2], count_mb_seg[3]);
2208 fprintf(f, "PB:%8d, %8d, %8d, %8d\n", inter_b_modes[LEFT4X4],
2209 inter_b_modes[ABOVE4X4], inter_b_modes[ZERO4X4],
2210 inter_b_modes[NEW4X4]);
2216 #ifdef VP8_ENTROPY_STATS
2219 FILE *fmode = fopen("modecontext.c", "w");
2221 fprintf(fmode, "\n#include \"entropymode.h\"\n\n");
2222 fprintf(fmode, "const unsigned int vp8_kf_default_bmode_counts ");
2224 "[VP8_BINTRAMODES] [VP8_BINTRAMODES] [VP8_BINTRAMODES] =\n{\n");
2226 for (i = 0; i < 10; ++i) {
2227 fprintf(fmode, " { /* Above Mode : %d */\n", i);
2229 for (j = 0; j < 10; ++j) {
2230 fprintf(fmode, " {");
2232 for (k = 0; k < 10; ++k) {
2233 if (!intra_mode_stats[i][j][k])
2234 fprintf(fmode, " %5d, ", 1);
2236 fprintf(fmode, " %5d, ", intra_mode_stats[i][j][k]);
2239 fprintf(fmode, "}, /* left_mode %d */\n", j);
2242 fprintf(fmode, " },\n");
2245 fprintf(fmode, "};\n");
2250 #if defined(SECTIONBITS_OUTPUT)
2254 FILE *f = fopen("tokenbits.stt", "a");
2256 for (i = 0; i < 28; ++i) fprintf(f, "%8d", (int)(Sectionbits[i] / 256));
2266 printf("\n_pick_loop_filter_level:%d\n", cpi->time_pick_lpf / 1000);
2267 printf("\n_frames recive_data encod_mb_row compress_frame Total\n");
2268 printf("%6d %10ld %10ld %10ld %10ld\n", cpi->common.current_video_frame, cpi->time_receive_data / 1000, cpi->time_encode_mb_row / 1000, cpi->time_compress_data / 1000, (cpi->time_receive_data + cpi->time_compress_data) / 1000);
2273 #if CONFIG_MULTITHREAD
2274 vp8cx_remove_encoder_threads(cpi);
2277 #if CONFIG_TEMPORAL_DENOISING
2278 vp8_denoiser_free(&cpi->denoiser);
2280 dealloc_compressor_data(cpi);
2281 vpx_free(cpi->mb.ss);
2283 vpx_free(cpi->cyclic_refresh_map);
2284 vpx_free(cpi->consec_zero_last);
2285 vpx_free(cpi->consec_zero_last_mvbias);
2287 vp8_remove_common(&cpi->common);
2291 #ifdef OUTPUT_YUV_SRC
2294 #ifdef OUTPUT_YUV_DENOISED
2295 fclose(yuv_denoised_file);
2312 static uint64_t calc_plane_error(unsigned char *orig, int orig_stride,
2313 unsigned char *recon, int recon_stride,
2314 unsigned int cols, unsigned int rows) {
2315 unsigned int row, col;
2316 uint64_t total_sse = 0;
2319 for (row = 0; row + 16 <= rows; row += 16) {
2320 for (col = 0; col + 16 <= cols; col += 16) {
2323 vpx_mse16x16(orig + col, orig_stride, recon + col, recon_stride, &sse);
2327 /* Handle odd-sized width */
2329 unsigned int border_row, border_col;
2330 unsigned char *border_orig = orig;
2331 unsigned char *border_recon = recon;
2333 for (border_row = 0; border_row < 16; ++border_row) {
2334 for (border_col = col; border_col < cols; ++border_col) {
2335 diff = border_orig[border_col] - border_recon[border_col];
2336 total_sse += diff * diff;
2339 border_orig += orig_stride;
2340 border_recon += recon_stride;
2344 orig += orig_stride * 16;
2345 recon += recon_stride * 16;
2348 /* Handle odd-sized height */
2349 for (; row < rows; ++row) {
2350 for (col = 0; col < cols; ++col) {
2351 diff = orig[col] - recon[col];
2352 total_sse += diff * diff;
2355 orig += orig_stride;
2356 recon += recon_stride;
2359 vpx_clear_system_state();
2363 static void generate_psnr_packet(VP8_COMP *cpi) {
2364 YV12_BUFFER_CONFIG *orig = cpi->Source;
2365 YV12_BUFFER_CONFIG *recon = cpi->common.frame_to_show;
2366 struct vpx_codec_cx_pkt pkt;
2369 unsigned int width = cpi->common.Width;
2370 unsigned int height = cpi->common.Height;
2372 pkt.kind = VPX_CODEC_PSNR_PKT;
2373 sse = calc_plane_error(orig->y_buffer, orig->y_stride, recon->y_buffer,
2374 recon->y_stride, width, height);
2375 pkt.data.psnr.sse[0] = sse;
2376 pkt.data.psnr.sse[1] = sse;
2377 pkt.data.psnr.samples[0] = width * height;
2378 pkt.data.psnr.samples[1] = width * height;
2380 width = (width + 1) / 2;
2381 height = (height + 1) / 2;
2383 sse = calc_plane_error(orig->u_buffer, orig->uv_stride, recon->u_buffer,
2384 recon->uv_stride, width, height);
2385 pkt.data.psnr.sse[0] += sse;
2386 pkt.data.psnr.sse[2] = sse;
2387 pkt.data.psnr.samples[0] += width * height;
2388 pkt.data.psnr.samples[2] = width * height;
2390 sse = calc_plane_error(orig->v_buffer, orig->uv_stride, recon->v_buffer,
2391 recon->uv_stride, width, height);
2392 pkt.data.psnr.sse[0] += sse;
2393 pkt.data.psnr.sse[3] = sse;
2394 pkt.data.psnr.samples[0] += width * height;
2395 pkt.data.psnr.samples[3] = width * height;
2397 for (i = 0; i < 4; ++i) {
2398 pkt.data.psnr.psnr[i] = vpx_sse_to_psnr(pkt.data.psnr.samples[i], 255.0,
2399 (double)(pkt.data.psnr.sse[i]));
2402 vpx_codec_pkt_list_add(cpi->output_pkt_list, &pkt);
2405 int vp8_use_as_reference(VP8_COMP *cpi, int ref_frame_flags) {
2406 if (ref_frame_flags > 7) return -1;
2408 cpi->ref_frame_flags = ref_frame_flags;
2411 int vp8_update_reference(VP8_COMP *cpi, int ref_frame_flags) {
2412 if (ref_frame_flags > 7) return -1;
2414 cpi->common.refresh_golden_frame = 0;
2415 cpi->common.refresh_alt_ref_frame = 0;
2416 cpi->common.refresh_last_frame = 0;
2418 if (ref_frame_flags & VP8_LAST_FRAME) cpi->common.refresh_last_frame = 1;
2420 if (ref_frame_flags & VP8_GOLD_FRAME) cpi->common.refresh_golden_frame = 1;
2422 if (ref_frame_flags & VP8_ALTR_FRAME) cpi->common.refresh_alt_ref_frame = 1;
2427 int vp8_get_reference(VP8_COMP *cpi, enum vpx_ref_frame_type ref_frame_flag,
2428 YV12_BUFFER_CONFIG *sd) {
2429 VP8_COMMON *cm = &cpi->common;
2432 if (ref_frame_flag == VP8_LAST_FRAME) {
2433 ref_fb_idx = cm->lst_fb_idx;
2434 } else if (ref_frame_flag == VP8_GOLD_FRAME) {
2435 ref_fb_idx = cm->gld_fb_idx;
2436 } else if (ref_frame_flag == VP8_ALTR_FRAME) {
2437 ref_fb_idx = cm->alt_fb_idx;
2442 vp8_yv12_copy_frame(&cm->yv12_fb[ref_fb_idx], sd);
2446 int vp8_set_reference(VP8_COMP *cpi, enum vpx_ref_frame_type ref_frame_flag,
2447 YV12_BUFFER_CONFIG *sd) {
2448 VP8_COMMON *cm = &cpi->common;
2452 if (ref_frame_flag == VP8_LAST_FRAME) {
2453 ref_fb_idx = cm->lst_fb_idx;
2454 } else if (ref_frame_flag == VP8_GOLD_FRAME) {
2455 ref_fb_idx = cm->gld_fb_idx;
2456 } else if (ref_frame_flag == VP8_ALTR_FRAME) {
2457 ref_fb_idx = cm->alt_fb_idx;
2462 vp8_yv12_copy_frame(sd, &cm->yv12_fb[ref_fb_idx]);
2466 int vp8_update_entropy(VP8_COMP *cpi, int update) {
2467 VP8_COMMON *cm = &cpi->common;
2468 cm->refresh_entropy_probs = update;
2473 #if defined(OUTPUT_YUV_SRC) || defined(OUTPUT_YUV_DENOISED)
2474 void vp8_write_yuv_frame(FILE *yuv_file, YV12_BUFFER_CONFIG *s) {
2475 unsigned char *src = s->y_buffer;
2476 int h = s->y_height;
2479 fwrite(src, s->y_width, 1, yuv_file);
2487 fwrite(src, s->uv_width, 1, yuv_file);
2488 src += s->uv_stride;
2495 fwrite(src, s->uv_width, 1, yuv_file);
2496 src += s->uv_stride;
2501 static void scale_and_extend_source(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) {
2502 VP8_COMMON *cm = &cpi->common;
2504 /* are we resizing the image */
2505 if (cm->horiz_scale != 0 || cm->vert_scale != 0) {
2506 #if CONFIG_SPATIAL_RESAMPLING
2510 if (cm->vert_scale == 3) {
2516 Scale2Ratio(cm->horiz_scale, &hr, &hs);
2517 Scale2Ratio(cm->vert_scale, &vr, &vs);
2519 vpx_scale_frame(sd, &cpi->scaled_source, cm->temp_scale_frame.y_buffer,
2520 tmp_height, hs, hr, vs, vr, 0);
2522 vp8_yv12_extend_frame_borders(&cpi->scaled_source);
2523 cpi->Source = &cpi->scaled_source;
2530 static int resize_key_frame(VP8_COMP *cpi) {
2531 #if CONFIG_SPATIAL_RESAMPLING
2532 VP8_COMMON *cm = &cpi->common;
2534 /* Do we need to apply resampling for one pass cbr.
2535 * In one pass this is more limited than in two pass cbr.
2536 * The test and any change is only made once per key frame sequence.
2538 if (cpi->oxcf.allow_spatial_resampling &&
2539 (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)) {
2541 int new_width, new_height;
2543 /* If we are below the resample DOWN watermark then scale down a
2546 if (cpi->buffer_level < (cpi->oxcf.resample_down_water_mark *
2547 cpi->oxcf.optimal_buffer_level / 100)) {
2549 (cm->horiz_scale < ONETWO) ? cm->horiz_scale + 1 : ONETWO;
2550 cm->vert_scale = (cm->vert_scale < ONETWO) ? cm->vert_scale + 1 : ONETWO;
2552 /* Should we now start scaling back up */
2553 else if (cpi->buffer_level > (cpi->oxcf.resample_up_water_mark *
2554 cpi->oxcf.optimal_buffer_level / 100)) {
2556 (cm->horiz_scale > NORMAL) ? cm->horiz_scale - 1 : NORMAL;
2557 cm->vert_scale = (cm->vert_scale > NORMAL) ? cm->vert_scale - 1 : NORMAL;
2560 /* Get the new height and width */
2561 Scale2Ratio(cm->horiz_scale, &hr, &hs);
2562 Scale2Ratio(cm->vert_scale, &vr, &vs);
2563 new_width = ((hs - 1) + (cpi->oxcf.Width * hr)) / hs;
2564 new_height = ((vs - 1) + (cpi->oxcf.Height * vr)) / vs;
2566 /* If the image size has changed we need to reallocate the buffers
2567 * and resample the source image
2569 if ((cm->Width != new_width) || (cm->Height != new_height)) {
2570 cm->Width = new_width;
2571 cm->Height = new_height;
2572 vp8_alloc_compressor_data(cpi);
2573 scale_and_extend_source(cpi->un_scaled_source, cpi);
2582 static void update_alt_ref_frame_stats(VP8_COMP *cpi) {
2583 VP8_COMMON *cm = &cpi->common;
2585 /* Select an interval before next GF or altref */
2586 if (!cpi->auto_gold) cpi->frames_till_gf_update_due = DEFAULT_GF_INTERVAL;
2588 if ((cpi->pass != 2) && cpi->frames_till_gf_update_due) {
2589 cpi->current_gf_interval = cpi->frames_till_gf_update_due;
2591 /* Set the bits per frame that we should try and recover in
2592 * subsequent inter frames to account for the extra GF spend...
2593 * note that his does not apply for GF updates that occur
2594 * coincident with a key frame as the extra cost of key frames is
2595 * dealt with elsewhere.
2597 cpi->gf_overspend_bits += cpi->projected_frame_size;
2598 cpi->non_gf_bitrate_adjustment =
2599 cpi->gf_overspend_bits / cpi->frames_till_gf_update_due;
2602 /* Update data structure that monitors level of reference to last GF */
2603 memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols));
2604 cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
2606 /* this frame refreshes means next frames don't unless specified by user */
2607 cpi->frames_since_golden = 0;
2609 /* Clear the alternate reference update pending flag. */
2610 cpi->source_alt_ref_pending = 0;
2612 /* Set the alternate reference frame active flag */
2613 cpi->source_alt_ref_active = 1;
2615 static void update_golden_frame_stats(VP8_COMP *cpi) {
2616 VP8_COMMON *cm = &cpi->common;
2618 /* Update the Golden frame usage counts. */
2619 if (cm->refresh_golden_frame) {
2620 /* Select an interval before next GF */
2621 if (!cpi->auto_gold) cpi->frames_till_gf_update_due = DEFAULT_GF_INTERVAL;
2623 if ((cpi->pass != 2) && (cpi->frames_till_gf_update_due > 0)) {
2624 cpi->current_gf_interval = cpi->frames_till_gf_update_due;
2626 /* Set the bits per frame that we should try and recover in
2627 * subsequent inter frames to account for the extra GF spend...
2628 * note that his does not apply for GF updates that occur
2629 * coincident with a key frame as the extra cost of key frames
2630 * is dealt with elsewhere.
2632 if ((cm->frame_type != KEY_FRAME) && !cpi->source_alt_ref_active) {
2633 /* Calcluate GF bits to be recovered
2634 * Projected size - av frame bits available for inter
2635 * frames for clip as a whole
2637 cpi->gf_overspend_bits +=
2638 (cpi->projected_frame_size - cpi->inter_frame_target);
2641 cpi->non_gf_bitrate_adjustment =
2642 cpi->gf_overspend_bits / cpi->frames_till_gf_update_due;
2645 /* Update data structure that monitors level of reference to last GF */
2646 memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols));
2647 cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
2649 /* this frame refreshes means next frames don't unless specified by
2652 cm->refresh_golden_frame = 0;
2653 cpi->frames_since_golden = 0;
2655 cpi->recent_ref_frame_usage[INTRA_FRAME] = 1;
2656 cpi->recent_ref_frame_usage[LAST_FRAME] = 1;
2657 cpi->recent_ref_frame_usage[GOLDEN_FRAME] = 1;
2658 cpi->recent_ref_frame_usage[ALTREF_FRAME] = 1;
2660 /* ******** Fixed Q test code only ************ */
2661 /* If we are going to use the ALT reference for the next group of
2662 * frames set a flag to say so.
2664 if (cpi->oxcf.fixed_q >= 0 && cpi->oxcf.play_alternate &&
2665 !cpi->common.refresh_alt_ref_frame) {
2666 cpi->source_alt_ref_pending = 1;
2667 cpi->frames_till_gf_update_due = cpi->baseline_gf_interval;
2670 if (!cpi->source_alt_ref_pending) cpi->source_alt_ref_active = 0;
2672 /* Decrement count down till next gf */
2673 if (cpi->frames_till_gf_update_due > 0) cpi->frames_till_gf_update_due--;
2675 } else if (!cpi->common.refresh_alt_ref_frame) {
2676 /* Decrement count down till next gf */
2677 if (cpi->frames_till_gf_update_due > 0) cpi->frames_till_gf_update_due--;
2679 if (cpi->frames_till_alt_ref_frame) cpi->frames_till_alt_ref_frame--;
2681 cpi->frames_since_golden++;
2683 if (cpi->frames_since_golden > 1) {
2684 cpi->recent_ref_frame_usage[INTRA_FRAME] +=
2685 cpi->mb.count_mb_ref_frame_usage[INTRA_FRAME];
2686 cpi->recent_ref_frame_usage[LAST_FRAME] +=
2687 cpi->mb.count_mb_ref_frame_usage[LAST_FRAME];
2688 cpi->recent_ref_frame_usage[GOLDEN_FRAME] +=
2689 cpi->mb.count_mb_ref_frame_usage[GOLDEN_FRAME];
2690 cpi->recent_ref_frame_usage[ALTREF_FRAME] +=
2691 cpi->mb.count_mb_ref_frame_usage[ALTREF_FRAME];
2696 /* This function updates the reference frame probability estimates that
2697 * will be used during mode selection
2699 static void update_rd_ref_frame_probs(VP8_COMP *cpi) {
2700 VP8_COMMON *cm = &cpi->common;
2702 const int *const rfct = cpi->mb.count_mb_ref_frame_usage;
2703 const int rf_intra = rfct[INTRA_FRAME];
2704 const int rf_inter =
2705 rfct[LAST_FRAME] + rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME];
2707 if (cm->frame_type == KEY_FRAME) {
2708 cpi->prob_intra_coded = 255;
2709 cpi->prob_last_coded = 128;
2710 cpi->prob_gf_coded = 128;
2711 } else if (!(rf_intra + rf_inter)) {
2712 cpi->prob_intra_coded = 63;
2713 cpi->prob_last_coded = 128;
2714 cpi->prob_gf_coded = 128;
2717 /* update reference frame costs since we can do better than what we got
2720 if (cpi->oxcf.number_of_layers == 1) {
2721 if (cpi->common.refresh_alt_ref_frame) {
2722 cpi->prob_intra_coded += 40;
2723 if (cpi->prob_intra_coded > 255) cpi->prob_intra_coded = 255;
2724 cpi->prob_last_coded = 200;
2725 cpi->prob_gf_coded = 1;
2726 } else if (cpi->frames_since_golden == 0) {
2727 cpi->prob_last_coded = 214;
2728 } else if (cpi->frames_since_golden == 1) {
2729 cpi->prob_last_coded = 192;
2730 cpi->prob_gf_coded = 220;
2731 } else if (cpi->source_alt_ref_active) {
2732 cpi->prob_gf_coded -= 20;
2734 if (cpi->prob_gf_coded < 10) cpi->prob_gf_coded = 10;
2736 if (!cpi->source_alt_ref_active) cpi->prob_gf_coded = 255;
2740 #if !CONFIG_REALTIME_ONLY
2741 /* 1 = key, 0 = inter */
2742 static int decide_key_frame(VP8_COMP *cpi) {
2743 VP8_COMMON *cm = &cpi->common;
2745 int code_key_frame = 0;
2749 if (cpi->Speed > 11) return 0;
2751 /* Clear down mmx registers */
2752 vpx_clear_system_state();
2754 if ((cpi->compressor_speed == 2) && (cpi->Speed >= 5) && (cpi->sf.RD == 0)) {
2755 double change = 1.0 *
2756 abs((int)(cpi->mb.intra_error - cpi->last_intra_error)) /
2757 (1 + cpi->last_intra_error);
2760 abs((int)(cpi->mb.prediction_error - cpi->last_prediction_error)) /
2761 (1 + cpi->last_prediction_error);
2762 double minerror = cm->MBs * 256;
2764 cpi->last_intra_error = cpi->mb.intra_error;
2765 cpi->last_prediction_error = cpi->mb.prediction_error;
2767 if (10 * cpi->mb.intra_error / (1 + cpi->mb.prediction_error) < 15 &&
2768 cpi->mb.prediction_error > minerror &&
2769 (change > .25 || change2 > .25)) {
2770 /*(change > 1.4 || change < .75)&& cpi->this_frame_percent_intra >
2771 * cpi->last_frame_percent_intra + 3*/
2778 /* If the following are true we might as well code a key frame */
2779 if (((cpi->this_frame_percent_intra == 100) &&
2780 (cpi->this_frame_percent_intra > (cpi->last_frame_percent_intra + 2))) ||
2781 ((cpi->this_frame_percent_intra > 95) &&
2782 (cpi->this_frame_percent_intra >=
2783 (cpi->last_frame_percent_intra + 5)))) {
2786 /* in addition if the following are true and this is not a golden frame
2787 * then code a key frame Note that on golden frames there often seems
2788 * to be a pop in intra useage anyway hence this restriction is
2789 * designed to prevent spurious key frames. The Intra pop needs to be
2792 else if (((cpi->this_frame_percent_intra > 60) &&
2793 (cpi->this_frame_percent_intra >
2794 (cpi->last_frame_percent_intra * 2))) ||
2795 ((cpi->this_frame_percent_intra > 75) &&
2796 (cpi->this_frame_percent_intra >
2797 (cpi->last_frame_percent_intra * 3 / 2))) ||
2798 ((cpi->this_frame_percent_intra > 90) &&
2799 (cpi->this_frame_percent_intra >
2800 (cpi->last_frame_percent_intra + 10)))) {
2801 if (!cm->refresh_golden_frame) code_key_frame = 1;
2804 return code_key_frame;
2807 static void Pass1Encode(VP8_COMP *cpi, size_t *size, unsigned char *dest,
2808 unsigned int *frame_flags) {
2812 vp8_set_quantizer(cpi, 26);
2814 vp8_first_pass(cpi);
2819 void write_cx_frame_to_file(YV12_BUFFER_CONFIG *frame, int this_frame)
2822 /* write the frame */
2827 sprintf(filename, "cx\\y%04d.raw", this_frame);
2828 yframe = fopen(filename, "wb");
2830 for (i = 0; i < frame->y_height; ++i)
2831 fwrite(frame->y_buffer + i * frame->y_stride, frame->y_width, 1, yframe);
2834 sprintf(filename, "cx\\u%04d.raw", this_frame);
2835 yframe = fopen(filename, "wb");
2837 for (i = 0; i < frame->uv_height; ++i)
2838 fwrite(frame->u_buffer + i * frame->uv_stride, frame->uv_width, 1, yframe);
2841 sprintf(filename, "cx\\v%04d.raw", this_frame);
2842 yframe = fopen(filename, "wb");
2844 for (i = 0; i < frame->uv_height; ++i)
2845 fwrite(frame->v_buffer + i * frame->uv_stride, frame->uv_width, 1, yframe);
2850 /* return of 0 means drop frame */
2852 #if !CONFIG_REALTIME_ONLY
2853 /* Function to test for conditions that indeicate we should loop
2854 * back and recode a frame.
2856 static int recode_loop_test(VP8_COMP *cpi, int high_limit, int low_limit, int q,
2857 int maxq, int minq) {
2858 int force_recode = 0;
2859 VP8_COMMON *cm = &cpi->common;
2861 /* Is frame recode allowed at all
2862 * Yes if either recode mode 1 is selected or mode two is selcted
2863 * and the frame is a key frame. golden frame or alt_ref_frame
2865 if ((cpi->sf.recode_loop == 1) ||
2866 ((cpi->sf.recode_loop == 2) &&
2867 ((cm->frame_type == KEY_FRAME) || cm->refresh_golden_frame ||
2868 cm->refresh_alt_ref_frame))) {
2869 /* General over and under shoot tests */
2870 if (((cpi->projected_frame_size > high_limit) && (q < maxq)) ||
2871 ((cpi->projected_frame_size < low_limit) && (q > minq))) {
2874 /* Special Constrained quality tests */
2875 else if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) {
2876 /* Undershoot and below auto cq level */
2877 if ((q > cpi->cq_target_quality) &&
2878 (cpi->projected_frame_size < ((cpi->this_frame_target * 7) >> 3))) {
2881 /* Severe undershoot and between auto and user cq level */
2882 else if ((q > cpi->oxcf.cq_level) &&
2883 (cpi->projected_frame_size < cpi->min_frame_bandwidth) &&
2884 (cpi->active_best_quality > cpi->oxcf.cq_level)) {
2886 cpi->active_best_quality = cpi->oxcf.cq_level;
2891 return force_recode;
2893 #endif // !CONFIG_REALTIME_ONLY
2895 static void update_reference_frames(VP8_COMP *cpi) {
2896 VP8_COMMON *cm = &cpi->common;
2897 YV12_BUFFER_CONFIG *yv12_fb = cm->yv12_fb;
2899 /* At this point the new frame has been encoded.
2900 * If any buffer copy / swapping is signaled it should be done here.
2903 if (cm->frame_type == KEY_FRAME) {
2904 yv12_fb[cm->new_fb_idx].flags |= VP8_GOLD_FRAME | VP8_ALTR_FRAME;
2906 yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
2907 yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
2909 cm->alt_fb_idx = cm->gld_fb_idx = cm->new_fb_idx;
2911 cpi->current_ref_frames[GOLDEN_FRAME] = cm->current_video_frame;
2912 cpi->current_ref_frames[ALTREF_FRAME] = cm->current_video_frame;
2913 } else /* For non key frames */
2915 if (cm->refresh_alt_ref_frame) {
2916 assert(!cm->copy_buffer_to_arf);
2918 cm->yv12_fb[cm->new_fb_idx].flags |= VP8_ALTR_FRAME;
2919 cm->yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
2920 cm->alt_fb_idx = cm->new_fb_idx;
2922 cpi->current_ref_frames[ALTREF_FRAME] = cm->current_video_frame;
2923 } else if (cm->copy_buffer_to_arf) {
2924 assert(!(cm->copy_buffer_to_arf & ~0x3));
2926 if (cm->copy_buffer_to_arf == 1) {
2927 if (cm->alt_fb_idx != cm->lst_fb_idx) {
2928 yv12_fb[cm->lst_fb_idx].flags |= VP8_ALTR_FRAME;
2929 yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
2930 cm->alt_fb_idx = cm->lst_fb_idx;
2932 cpi->current_ref_frames[ALTREF_FRAME] =
2933 cpi->current_ref_frames[LAST_FRAME];
2935 } else /* if (cm->copy_buffer_to_arf == 2) */
2937 if (cm->alt_fb_idx != cm->gld_fb_idx) {
2938 yv12_fb[cm->gld_fb_idx].flags |= VP8_ALTR_FRAME;
2939 yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
2940 cm->alt_fb_idx = cm->gld_fb_idx;
2942 cpi->current_ref_frames[ALTREF_FRAME] =
2943 cpi->current_ref_frames[GOLDEN_FRAME];
2948 if (cm->refresh_golden_frame) {
2949 assert(!cm->copy_buffer_to_gf);
2951 cm->yv12_fb[cm->new_fb_idx].flags |= VP8_GOLD_FRAME;
2952 cm->yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
2953 cm->gld_fb_idx = cm->new_fb_idx;
2955 cpi->current_ref_frames[GOLDEN_FRAME] = cm->current_video_frame;
2956 } else if (cm->copy_buffer_to_gf) {
2957 assert(!(cm->copy_buffer_to_arf & ~0x3));
2959 if (cm->copy_buffer_to_gf == 1) {
2960 if (cm->gld_fb_idx != cm->lst_fb_idx) {
2961 yv12_fb[cm->lst_fb_idx].flags |= VP8_GOLD_FRAME;
2962 yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
2963 cm->gld_fb_idx = cm->lst_fb_idx;
2965 cpi->current_ref_frames[GOLDEN_FRAME] =
2966 cpi->current_ref_frames[LAST_FRAME];
2968 } else /* if (cm->copy_buffer_to_gf == 2) */
2970 if (cm->alt_fb_idx != cm->gld_fb_idx) {
2971 yv12_fb[cm->alt_fb_idx].flags |= VP8_GOLD_FRAME;
2972 yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
2973 cm->gld_fb_idx = cm->alt_fb_idx;
2975 cpi->current_ref_frames[GOLDEN_FRAME] =
2976 cpi->current_ref_frames[ALTREF_FRAME];
2982 if (cm->refresh_last_frame) {
2983 cm->yv12_fb[cm->new_fb_idx].flags |= VP8_LAST_FRAME;
2984 cm->yv12_fb[cm->lst_fb_idx].flags &= ~VP8_LAST_FRAME;
2985 cm->lst_fb_idx = cm->new_fb_idx;
2987 cpi->current_ref_frames[LAST_FRAME] = cm->current_video_frame;
2990 #if CONFIG_TEMPORAL_DENOISING
2991 if (cpi->oxcf.noise_sensitivity) {
2992 /* we shouldn't have to keep multiple copies as we know in advance which
2993 * buffer we should start - for now to get something up and running
2994 * I've chosen to copy the buffers
2996 if (cm->frame_type == KEY_FRAME) {
2998 for (i = LAST_FRAME; i < MAX_REF_FRAMES; ++i)
2999 vp8_yv12_copy_frame(cpi->Source, &cpi->denoiser.yv12_running_avg[i]);
3000 } else /* For non key frames */
3002 vp8_yv12_extend_frame_borders(
3003 &cpi->denoiser.yv12_running_avg[INTRA_FRAME]);
3005 if (cm->refresh_alt_ref_frame || cm->copy_buffer_to_arf) {
3006 vp8_yv12_copy_frame(&cpi->denoiser.yv12_running_avg[INTRA_FRAME],
3007 &cpi->denoiser.yv12_running_avg[ALTREF_FRAME]);
3009 if (cm->refresh_golden_frame || cm->copy_buffer_to_gf) {
3010 vp8_yv12_copy_frame(&cpi->denoiser.yv12_running_avg[INTRA_FRAME],
3011 &cpi->denoiser.yv12_running_avg[GOLDEN_FRAME]);
3013 if (cm->refresh_last_frame) {
3014 vp8_yv12_copy_frame(&cpi->denoiser.yv12_running_avg[INTRA_FRAME],
3015 &cpi->denoiser.yv12_running_avg[LAST_FRAME]);
3018 if (cpi->oxcf.noise_sensitivity == 4)
3019 vp8_yv12_copy_frame(cpi->Source, &cpi->denoiser.yv12_last_source);
3024 static int measure_square_diff_partial(YV12_BUFFER_CONFIG *source,
3025 YV12_BUFFER_CONFIG *dest,
3031 int min_consec_zero_last = 10;
3032 int tot_num_blocks = (source->y_height * source->y_width) >> 8;
3033 unsigned char *src = source->y_buffer;
3034 unsigned char *dst = dest->y_buffer;
3036 /* Loop through the Y plane, every |skip| blocks along rows and colmumns,
3037 * summing the square differences, and only for blocks that have been
3038 * zero_last mode at least |x| frames in a row.
3040 for (i = 0; i < source->y_height; i += 16 * skip) {
3041 int block_index_row = (i >> 4) * cpi->common.mb_cols;
3042 for (j = 0; j < source->y_width; j += 16 * skip) {
3043 int index = block_index_row + (j >> 4);
3044 if (cpi->consec_zero_last[index] >= min_consec_zero_last) {
3046 Total += vpx_mse16x16(src + j, source->y_stride, dst + j,
3047 dest->y_stride, &sse);
3051 src += 16 * skip * source->y_stride;
3052 dst += 16 * skip * dest->y_stride;
3054 // Only return non-zero if we have at least ~1/16 samples for estimate.
3055 if (num_blocks > (tot_num_blocks >> 4)) {
3056 assert(num_blocks != 0);
3057 return (Total / num_blocks);
3063 #if CONFIG_TEMPORAL_DENOISING
3064 static void process_denoiser_mode_change(VP8_COMP *cpi) {
3065 const VP8_COMMON *const cm = &cpi->common;
3069 // Number of blocks skipped along row/column in computing the
3070 // nmse (normalized mean square error) of source.
3072 // Only select blocks for computing nmse that have been encoded
3073 // as ZERO LAST min_consec_zero_last frames in a row.
3074 // Scale with number of temporal layers.
3075 int min_consec_zero_last = 12 / cpi->oxcf.number_of_layers;
3076 // Decision is tested for changing the denoising mode every
3077 // num_mode_change times this function is called. Note that this
3078 // function called every 8 frames, so (8 * num_mode_change) is number
3079 // of frames where denoising mode change is tested for switch.
3080 int num_mode_change = 20;
3081 // Framerate factor, to compensate for larger mse at lower framerates.
3082 // Use ref_framerate, which is full source framerate for temporal layers.
3083 // TODO(marpan): Adjust this factor.
3084 int fac_framerate = cpi->ref_framerate < 25.0f ? 80 : 100;
3085 int tot_num_blocks = cm->mb_rows * cm->mb_cols;
3086 int ystride = cpi->Source->y_stride;
3087 unsigned char *src = cpi->Source->y_buffer;
3088 unsigned char *dst = cpi->denoiser.yv12_last_source.y_buffer;
3089 static const unsigned char const_source[16] = { 128, 128, 128, 128, 128, 128,
3090 128, 128, 128, 128, 128, 128,
3091 128, 128, 128, 128 };
3092 int bandwidth = (int)(cpi->target_bandwidth);
3093 // For temporal layers, use full bandwidth (top layer).
3094 if (cpi->oxcf.number_of_layers > 1) {
3095 LAYER_CONTEXT *lc = &cpi->layer_context[cpi->oxcf.number_of_layers - 1];
3096 bandwidth = (int)(lc->target_bandwidth);
3098 // Loop through the Y plane, every skip blocks along rows and columns,
3099 // summing the normalized mean square error, only for blocks that have
3100 // been encoded as ZEROMV LAST at least min_consec_zero_last least frames in
3101 // a row and have small sum difference between current and previous frame.
3102 // Normalization here is by the contrast of the current frame block.
3103 for (i = 0; i < cm->Height; i += 16 * skip) {
3104 int block_index_row = (i >> 4) * cm->mb_cols;
3105 for (j = 0; j < cm->Width; j += 16 * skip) {
3106 int index = block_index_row + (j >> 4);
3107 if (cpi->consec_zero_last[index] >= min_consec_zero_last) {
3109 const unsigned int var =
3110 vpx_variance16x16(src + j, ystride, dst + j, ystride, &sse);
3111 // Only consider this block as valid for noise measurement
3112 // if the sum_diff average of the current and previous frame
3113 // is small (to avoid effects from lighting change).
3114 if ((sse - var) < 128) {
3116 const unsigned int act =
3117 vpx_variance16x16(src + j, ystride, const_source, 0, &sse2);
3118 if (act > 0) total += sse / act;
3123 src += 16 * skip * ystride;
3124 dst += 16 * skip * ystride;
3126 total = total * fac_framerate / 100;
3128 // Only consider this frame as valid sample if we have computed nmse over
3129 // at least ~1/16 blocks, and Total > 0 (Total == 0 can happen if the
3130 // application inputs duplicate frames, or contrast is all zero).
3131 if (total > 0 && (num_blocks > (tot_num_blocks >> 4))) {
3132 // Update the recursive mean square source_diff.
3133 total = (total << 8) / num_blocks;
3134 if (cpi->denoiser.nmse_source_diff_count == 0) {
3135 // First sample in new interval.
3136 cpi->denoiser.nmse_source_diff = total;
3137 cpi->denoiser.qp_avg = cm->base_qindex;
3139 // For subsequent samples, use average with weight ~1/4 for new sample.
3140 cpi->denoiser.nmse_source_diff =
3141 (int)((total + 3 * cpi->denoiser.nmse_source_diff) >> 2);
3142 cpi->denoiser.qp_avg =
3143 (int)((cm->base_qindex + 3 * cpi->denoiser.qp_avg) >> 2);
3145 cpi->denoiser.nmse_source_diff_count++;
3147 // Check for changing the denoiser mode, when we have obtained #samples =
3148 // num_mode_change. Condition the change also on the bitrate and QP.
3149 if (cpi->denoiser.nmse_source_diff_count == num_mode_change) {
3150 // Check for going up: from normal to aggressive mode.
3151 if ((cpi->denoiser.denoiser_mode == kDenoiserOnYUV) &&
3152 (cpi->denoiser.nmse_source_diff >
3153 cpi->denoiser.threshold_aggressive_mode) &&
3154 (cpi->denoiser.qp_avg < cpi->denoiser.qp_threshold_up &&
3155 bandwidth > cpi->denoiser.bitrate_threshold)) {
3156 vp8_denoiser_set_parameters(&cpi->denoiser, kDenoiserOnYUVAggressive);
3158 // Check for going down: from aggressive to normal mode.
3159 if (((cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive) &&
3160 (cpi->denoiser.nmse_source_diff <
3161 cpi->denoiser.threshold_aggressive_mode)) ||
3162 ((cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive) &&
3163 (cpi->denoiser.qp_avg > cpi->denoiser.qp_threshold_down ||
3164 bandwidth < cpi->denoiser.bitrate_threshold))) {
3165 vp8_denoiser_set_parameters(&cpi->denoiser, kDenoiserOnYUV);
3168 // Reset metric and counter for next interval.
3169 cpi->denoiser.nmse_source_diff = 0;
3170 cpi->denoiser.qp_avg = 0;
3171 cpi->denoiser.nmse_source_diff_count = 0;
3176 void vp8_loopfilter_frame(VP8_COMP *cpi, VP8_COMMON *cm) {
3177 const FRAME_TYPE frame_type = cm->frame_type;
3179 int update_any_ref_buffers = 1;
3180 if (cpi->common.refresh_last_frame == 0 &&
3181 cpi->common.refresh_golden_frame == 0 &&
3182 cpi->common.refresh_alt_ref_frame == 0) {
3183 update_any_ref_buffers = 0;
3187 cm->filter_level = 0;
3189 struct vpx_usec_timer timer;
3191 vpx_clear_system_state();
3193 vpx_usec_timer_start(&timer);
3194 if (cpi->sf.auto_filter == 0) {
3195 #if CONFIG_TEMPORAL_DENOISING
3196 if (cpi->oxcf.noise_sensitivity && cm->frame_type != KEY_FRAME) {
3197 // Use the denoised buffer for selecting base loop filter level.
3198 // Denoised signal for current frame is stored in INTRA_FRAME.
3199 // No denoising on key frames.
3200 vp8cx_pick_filter_level_fast(
3201 &cpi->denoiser.yv12_running_avg[INTRA_FRAME], cpi);
3203 vp8cx_pick_filter_level_fast(cpi->Source, cpi);
3206 vp8cx_pick_filter_level_fast(cpi->Source, cpi);
3209 #if CONFIG_TEMPORAL_DENOISING
3210 if (cpi->oxcf.noise_sensitivity && cm->frame_type != KEY_FRAME) {
3211 // Use the denoised buffer for selecting base loop filter level.
3212 // Denoised signal for current frame is stored in INTRA_FRAME.
3213 // No denoising on key frames.
3214 vp8cx_pick_filter_level(&cpi->denoiser.yv12_running_avg[INTRA_FRAME],
3217 vp8cx_pick_filter_level(cpi->Source, cpi);
3220 vp8cx_pick_filter_level(cpi->Source, cpi);
3224 if (cm->filter_level > 0) {
3225 vp8cx_set_alt_lf_level(cpi, cm->filter_level);
3228 vpx_usec_timer_mark(&timer);
3229 cpi->time_pick_lpf += vpx_usec_timer_elapsed(&timer);
3232 #if CONFIG_MULTITHREAD
3233 if (cpi->b_multi_threaded) {
3234 sem_post(&cpi->h_event_end_lpf); /* signal that we have set filter_level */
3238 // No need to apply loop-filter if the encoded frame does not update
3239 // any reference buffers.
3240 if (cm->filter_level > 0 && update_any_ref_buffers) {
3241 vp8_loop_filter_frame(cm, &cpi->mb.e_mbd, frame_type);
3244 vp8_yv12_extend_frame_borders(cm->frame_to_show);
3247 static void encode_frame_to_data_rate(VP8_COMP *cpi, size_t *size,
3248 unsigned char *dest,
3249 unsigned char *dest_end,
3250 unsigned int *frame_flags) {
3252 int frame_over_shoot_limit;
3253 int frame_under_shoot_limit;
3258 VP8_COMMON *cm = &cpi->common;
3259 int active_worst_qchanged = 0;
3261 #if !CONFIG_REALTIME_ONLY
3265 int zbin_oq_low = 0;
3268 int overshoot_seen = 0;
3269 int undershoot_seen = 0;
3272 int drop_mark = (int)(cpi->oxcf.drop_frames_water_mark *
3273 cpi->oxcf.optimal_buffer_level / 100);
3274 int drop_mark75 = drop_mark * 2 / 3;
3275 int drop_mark50 = drop_mark / 4;
3276 int drop_mark25 = drop_mark / 8;
3278 /* Clear down mmx registers to allow floating point in what follows */
3279 vpx_clear_system_state();
3281 if (cpi->force_next_frame_intra) {
3282 cm->frame_type = KEY_FRAME; /* delayed intra frame */
3283 cpi->force_next_frame_intra = 0;
3286 /* For an alt ref frame in 2 pass we skip the call to the second pass
3287 * function that sets the target bandwidth
3289 switch (cpi->pass) {
3290 #if !CONFIG_REALTIME_ONLY
3292 if (cpi->common.refresh_alt_ref_frame) {
3293 /* Per frame bit target for the alt ref frame */
3294 cpi->per_frame_bandwidth = cpi->twopass.gf_bits;
3295 /* per second target bitrate */
3296 cpi->target_bandwidth =
3297 (int)(cpi->twopass.gf_bits * cpi->output_framerate);
3300 #endif // !CONFIG_REALTIME_ONLY
3302 cpi->per_frame_bandwidth =
3303 (int)(cpi->target_bandwidth / cpi->output_framerate);
3307 /* Default turn off buffer to buffer copying */
3308 cm->copy_buffer_to_gf = 0;
3309 cm->copy_buffer_to_arf = 0;
3311 /* Clear zbin over-quant value and mode boost values. */
3312 cpi->mb.zbin_over_quant = 0;
3313 cpi->mb.zbin_mode_boost = 0;
3315 /* Enable or disable mode based tweaking of the zbin
3316 * For 2 Pass Only used where GF/ARF prediction quality
3317 * is above a threshold
3319 cpi->mb.zbin_mode_boost_enabled = 1;
3320 if (cpi->pass == 2) {
3321 if (cpi->gfu_boost <= 400) {
3322 cpi->mb.zbin_mode_boost_enabled = 0;
3326 /* Current default encoder behaviour for the altref sign bias */
3327 if (cpi->source_alt_ref_active) {
3328 cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 1;
3330 cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 0;
3333 /* Check to see if a key frame is signaled
3334 * For two pass with auto key frame enabled cm->frame_type may already
3335 * be set, but not for one pass.
3337 if ((cm->current_video_frame == 0) || (cm->frame_flags & FRAMEFLAGS_KEY) ||
3338 (cpi->oxcf.auto_key &&
3339 (cpi->frames_since_key % cpi->key_frame_frequency == 0))) {
3340 /* Key frame from VFW/auto-keyframe/first frame */
3341 cm->frame_type = KEY_FRAME;
3342 #if CONFIG_TEMPORAL_DENOISING
3343 if (cpi->oxcf.noise_sensitivity == 4) {
3344 // For adaptive mode, reset denoiser to normal mode on key frame.
3345 vp8_denoiser_set_parameters(&cpi->denoiser, kDenoiserOnYUV);
3350 #if CONFIG_MULTI_RES_ENCODING
3351 if (cpi->oxcf.mr_total_resolutions > 1) {
3352 LOWER_RES_FRAME_INFO *low_res_frame_info =
3353 (LOWER_RES_FRAME_INFO *)cpi->oxcf.mr_low_res_mode_info;
3355 if (cpi->oxcf.mr_encoder_id) {
3356 // TODO(marpan): This constraint shouldn't be needed, as we would like
3357 // to allow for key frame setting (forced or periodic) defined per
3358 // spatial layer. For now, keep this in.
3359 cm->frame_type = low_res_frame_info->frame_type;
3361 // Check if lower resolution is available for motion vector reuse.
3362 if (cm->frame_type != KEY_FRAME) {
3363 cpi->mr_low_res_mv_avail = 1;
3364 cpi->mr_low_res_mv_avail &= !(low_res_frame_info->is_frame_dropped);
3366 if (cpi->ref_frame_flags & VP8_LAST_FRAME)
3367 cpi->mr_low_res_mv_avail &=
3368 (cpi->current_ref_frames[LAST_FRAME] ==
3369 low_res_frame_info->low_res_ref_frames[LAST_FRAME]);
3371 if (cpi->ref_frame_flags & VP8_GOLD_FRAME)
3372 cpi->mr_low_res_mv_avail &=
3373 (cpi->current_ref_frames[GOLDEN_FRAME] ==
3374 low_res_frame_info->low_res_ref_frames[GOLDEN_FRAME]);
3376 // Don't use altref to determine whether low res is available.
3377 // TODO (marpan): Should we make this type of condition on a
3378 // per-reference frame basis?
3380 if (cpi->ref_frame_flags & VP8_ALTR_FRAME)
3381 cpi->mr_low_res_mv_avail &= (cpi->current_ref_frames[ALTREF_FRAME]
3382 == low_res_frame_info->low_res_ref_frames[ALTREF_FRAME]);
3387 // On a key frame: For the lowest resolution, keep track of the key frame
3388 // counter value. For the higher resolutions, reset the current video
3389 // frame counter to that of the lowest resolution.
3390 // This is done to the handle the case where we may stop/start encoding
3391 // higher layer(s). The restart-encoding of higher layer is only signaled
3392 // by a key frame for now.
3393 // TODO (marpan): Add flag to indicate restart-encoding of higher layer.
3394 if (cm->frame_type == KEY_FRAME) {
3395 if (cpi->oxcf.mr_encoder_id) {
3396 // If the initial starting value of the buffer level is zero (this can
3397 // happen because we may have not started encoding this higher stream),
3398 // then reset it to non-zero value based on |starting_buffer_level|.
3399 if (cpi->common.current_video_frame == 0 && cpi->buffer_level == 0) {
3401 cpi->bits_off_target = cpi->oxcf.starting_buffer_level;
3402 cpi->buffer_level = cpi->oxcf.starting_buffer_level;
3403 for (i = 0; i < cpi->oxcf.number_of_layers; ++i) {
3404 LAYER_CONTEXT *lc = &cpi->layer_context[i];
3405 lc->bits_off_target = lc->starting_buffer_level;
3406 lc->buffer_level = lc->starting_buffer_level;
3409 cpi->common.current_video_frame =
3410 low_res_frame_info->key_frame_counter_value;
3412 low_res_frame_info->key_frame_counter_value =
3413 cpi->common.current_video_frame;
3419 // Find the reference frame closest to the current frame.
3420 cpi->closest_reference_frame = LAST_FRAME;
3421 if (cm->frame_type != KEY_FRAME) {
3423 MV_REFERENCE_FRAME closest_ref = INTRA_FRAME;
3424 if (cpi->ref_frame_flags & VP8_LAST_FRAME) {
3425 closest_ref = LAST_FRAME;
3426 } else if (cpi->ref_frame_flags & VP8_GOLD_FRAME) {
3427 closest_ref = GOLDEN_FRAME;
3428 } else if (cpi->ref_frame_flags & VP8_ALTR_FRAME) {
3429 closest_ref = ALTREF_FRAME;
3431 for (i = 1; i <= 3; ++i) {
3432 vpx_ref_frame_type_t ref_frame_type =
3433 (vpx_ref_frame_type_t)((i == 3) ? 4 : i);
3434 if (cpi->ref_frame_flags & ref_frame_type) {
3435 if ((cm->current_video_frame - cpi->current_ref_frames[i]) <
3436 (cm->current_video_frame - cpi->current_ref_frames[closest_ref])) {
3441 cpi->closest_reference_frame = closest_ref;
3444 /* Set various flags etc to special state if it is a key frame */
3445 if (cm->frame_type == KEY_FRAME) {
3448 // Set the loop filter deltas and segmentation map update
3449 setup_features(cpi);
3451 /* The alternate reference frame cannot be active for a key frame */
3452 cpi->source_alt_ref_active = 0;
3454 /* Reset the RD threshold multipliers to default of * 1 (128) */
3455 for (i = 0; i < MAX_MODES; ++i) {
3456 cpi->mb.rd_thresh_mult[i] = 128;
3459 // Reset the zero_last counter to 0 on key frame.
3460 memset(cpi->consec_zero_last, 0, cm->mb_rows * cm->mb_cols);
3461 memset(cpi->consec_zero_last_mvbias, 0,
3462 (cpi->common.mb_rows * cpi->common.mb_cols));
3466 /* Experimental code for lagged compress and one pass
3467 * Initialise one_pass GF frames stats
3468 * Update stats used for GF selection
3471 cpi->one_pass_frame_index = cm->current_video_frame % MAX_LAG_BUFFERS;
3473 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frames_so_far = 0;
3474 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_intra_error = 0.0;
3475 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_coded_error = 0.0;
3476 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_pcnt_inter = 0.0;
3477 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_pcnt_motion = 0.0;
3478 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvr = 0.0;
3479 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvr_abs = 0.0;
3480 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvc = 0.0;
3481 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvc_abs = 0.0;
3485 update_rd_ref_frame_probs(cpi);
3487 if (cpi->drop_frames_allowed) {
3488 /* The reset to decimation 0 is only done here for one pass.
3489 * Once it is set two pass leaves decimation on till the next kf.
3491 if ((cpi->buffer_level > drop_mark) && (cpi->decimation_factor > 0)) {
3492 cpi->decimation_factor--;
3495 if (cpi->buffer_level > drop_mark75 && cpi->decimation_factor > 0) {
3496 cpi->decimation_factor = 1;
3498 } else if (cpi->buffer_level < drop_mark25 &&
3499 (cpi->decimation_factor == 2 || cpi->decimation_factor == 3)) {
3500 cpi->decimation_factor = 3;
3501 } else if (cpi->buffer_level < drop_mark50 &&
3502 (cpi->decimation_factor == 1 || cpi->decimation_factor == 2)) {
3503 cpi->decimation_factor = 2;
3504 } else if (cpi->buffer_level < drop_mark75 &&
3505 (cpi->decimation_factor == 0 || cpi->decimation_factor == 1)) {
3506 cpi->decimation_factor = 1;
3510 /* The following decimates the frame rate according to a regular
3511 * pattern (i.e. to 1/2 or 2/3 frame rate) This can be used to help
3512 * prevent buffer under-run in CBR mode. Alternatively it might be
3513 * desirable in some situations to drop frame rate but throw more bits
3516 * Note that dropping a key frame can be problematic if spatial
3517 * resampling is also active
3519 if (cpi->decimation_factor > 0) {
3520 switch (cpi->decimation_factor) {
3522 cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 3 / 2;
3525 cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 5 / 4;
3528 cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 5 / 4;
3532 /* Note that we should not throw out a key frame (especially when
3533 * spatial resampling is enabled).
3535 if (cm->frame_type == KEY_FRAME) {
3536 cpi->decimation_count = cpi->decimation_factor;
3537 } else if (cpi->decimation_count > 0) {
3538 cpi->decimation_count--;
3540 cpi->bits_off_target += cpi->av_per_frame_bandwidth;
3541 if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size) {
3542 cpi->bits_off_target = cpi->oxcf.maximum_buffer_size;
3545 #if CONFIG_MULTI_RES_ENCODING
3546 vp8_store_drop_frame_info(cpi);
3549 cm->current_video_frame++;
3550 cpi->frames_since_key++;
3551 // We advance the temporal pattern for dropped frames.
3552 cpi->temporal_pattern_counter++;
3554 #if CONFIG_INTERNAL_STATS
3558 cpi->buffer_level = cpi->bits_off_target;
3560 if (cpi->oxcf.number_of_layers > 1) {
3563 /* Propagate bits saved by dropping the frame to higher
3566 for (i = cpi->current_layer + 1; i < cpi->oxcf.number_of_layers; ++i) {
3567 LAYER_CONTEXT *lc = &cpi->layer_context[i];
3568 lc->bits_off_target += (int)(lc->target_bandwidth / lc->framerate);
3569 if (lc->bits_off_target > lc->maximum_buffer_size) {
3570 lc->bits_off_target = lc->maximum_buffer_size;
3572 lc->buffer_level = lc->bits_off_target;
3578 cpi->decimation_count = cpi->decimation_factor;
3581 cpi->decimation_count = 0;
3584 /* Decide how big to make the frame */
3585 if (!vp8_pick_frame_size(cpi)) {
3586 /*TODO: 2 drop_frame and return code could be put together. */
3587 #if CONFIG_MULTI_RES_ENCODING
3588 vp8_store_drop_frame_info(cpi);
3590 cm->current_video_frame++;
3591 cpi->frames_since_key++;
3592 // We advance the temporal pattern for dropped frames.
3593 cpi->temporal_pattern_counter++;
3597 /* Reduce active_worst_allowed_q for CBR if our buffer is getting too full.
3598 * This has a knock on effect on active best quality as well.
3599 * For CBR if the buffer reaches its maximum level then we can no longer
3600 * save up bits for later frames so we might as well use them up
3601 * on the current frame.
3603 if ((cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) &&
3604 (cpi->buffer_level >= cpi->oxcf.optimal_buffer_level) &&
3605 cpi->buffered_mode) {
3606 /* Max adjustment is 1/4 */
3607 int Adjustment = cpi->active_worst_quality / 4;
3612 if (cpi->buffer_level < cpi->oxcf.maximum_buffer_size) {
3613 buff_lvl_step = (int)((cpi->oxcf.maximum_buffer_size -
3614 cpi->oxcf.optimal_buffer_level) /
3617 if (buff_lvl_step) {
3619 (int)((cpi->buffer_level - cpi->oxcf.optimal_buffer_level) /
3626 cpi->active_worst_quality -= Adjustment;
3628 if (cpi->active_worst_quality < cpi->active_best_quality) {
3629 cpi->active_worst_quality = cpi->active_best_quality;
3634 /* Set an active best quality and if necessary active worst quality
3635 * There is some odd behavior for one pass here that needs attention.
3637 if ((cpi->pass == 2) || (cpi->ni_frames > 150)) {
3638 vpx_clear_system_state();
3640 Q = cpi->active_worst_quality;
3642 if (cm->frame_type == KEY_FRAME) {
3643 if (cpi->pass == 2) {
3644 if (cpi->gfu_boost > 600) {
3645 cpi->active_best_quality = kf_low_motion_minq[Q];
3647 cpi->active_best_quality = kf_high_motion_minq[Q];
3650 /* Special case for key frames forced because we have reached
3651 * the maximum key frame interval. Here force the Q to a range
3652 * based on the ambient Q to reduce the risk of popping
3654 if (cpi->this_key_frame_forced) {
3655 if (cpi->active_best_quality > cpi->avg_frame_qindex * 7 / 8) {
3656 cpi->active_best_quality = cpi->avg_frame_qindex * 7 / 8;
3657 } else if (cpi->active_best_quality<cpi->avg_frame_qindex>> 2) {
3658 cpi->active_best_quality = cpi->avg_frame_qindex >> 2;
3662 /* One pass more conservative */
3664 cpi->active_best_quality = kf_high_motion_minq[Q];
3668 else if (cpi->oxcf.number_of_layers == 1 &&
3669 (cm->refresh_golden_frame || cpi->common.refresh_alt_ref_frame)) {
3670 /* Use the lower of cpi->active_worst_quality and recent
3671 * average Q as basis for GF/ARF Q limit unless last frame was
3674 if ((cpi->frames_since_key > 1) &&
3675 (cpi->avg_frame_qindex < cpi->active_worst_quality)) {
3676 Q = cpi->avg_frame_qindex;
3679 /* For constrained quality dont allow Q less than the cq level */
3680 if ((cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
3681 (Q < cpi->cq_target_quality)) {
3682 Q = cpi->cq_target_quality;
3685 if (cpi->pass == 2) {
3686 if (cpi->gfu_boost > 1000) {
3687 cpi->active_best_quality = gf_low_motion_minq[Q];
3688 } else if (cpi->gfu_boost < 400) {
3689 cpi->active_best_quality = gf_high_motion_minq[Q];
3691 cpi->active_best_quality = gf_mid_motion_minq[Q];
3694 /* Constrained quality use slightly lower active best. */
3695 if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) {
3696 cpi->active_best_quality = cpi->active_best_quality * 15 / 16;
3699 /* One pass more conservative */
3701 cpi->active_best_quality = gf_high_motion_minq[Q];
3704 cpi->active_best_quality = inter_minq[Q];
3706 /* For the constant/constrained quality mode we dont want
3707 * q to fall below the cq level.
3709 if ((cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
3710 (cpi->active_best_quality < cpi->cq_target_quality)) {
3711 /* If we are strongly undershooting the target rate in the last
3712 * frames then use the user passed in cq value not the auto
3715 if (cpi->rolling_actual_bits < cpi->min_frame_bandwidth) {
3716 cpi->active_best_quality = cpi->oxcf.cq_level;
3718 cpi->active_best_quality = cpi->cq_target_quality;
3723 /* If CBR and the buffer is as full then it is reasonable to allow
3724 * higher quality on the frames to prevent bits just going to waste.
3726 if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) {
3727 /* Note that the use of >= here elliminates the risk of a devide
3728 * by 0 error in the else if clause
3730 if (cpi->buffer_level >= cpi->oxcf.maximum_buffer_size) {
3731 cpi->active_best_quality = cpi->best_quality;
3733 } else if (cpi->buffer_level > cpi->oxcf.optimal_buffer_level) {
3735 (int)(((cpi->buffer_level - cpi->oxcf.optimal_buffer_level) * 128) /
3736 (cpi->oxcf.maximum_buffer_size -
3737 cpi->oxcf.optimal_buffer_level));
3738 int min_qadjustment =
3739 ((cpi->active_best_quality - cpi->best_quality) * Fraction) / 128;
3741 cpi->active_best_quality -= min_qadjustment;
3745 /* Make sure constrained quality mode limits are adhered to for the first
3746 * few frames of one pass encodes
3748 else if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) {
3749 if ((cm->frame_type == KEY_FRAME) || cm->refresh_golden_frame ||
3750 cpi->common.refresh_alt_ref_frame) {
3751 cpi->active_best_quality = cpi->best_quality;
3752 } else if (cpi->active_best_quality < cpi->cq_target_quality) {
3753 cpi->active_best_quality = cpi->cq_target_quality;
3757 /* Clip the active best and worst quality values to limits */
3758 if (cpi->active_worst_quality > cpi->worst_quality) {
3759 cpi->active_worst_quality = cpi->worst_quality;
3762 if (cpi->active_best_quality < cpi->best_quality) {
3763 cpi->active_best_quality = cpi->best_quality;
3766 if (cpi->active_worst_quality < cpi->active_best_quality) {
3767 cpi->active_worst_quality = cpi->active_best_quality;
3770 /* Determine initial Q to try */
3771 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
3773 #if !CONFIG_REALTIME_ONLY
3775 /* Set highest allowed value for Zbin over quant */
3776 if (cm->frame_type == KEY_FRAME) {
3778 } else if ((cpi->oxcf.number_of_layers == 1) &&
3779 ((cm->refresh_alt_ref_frame ||
3780 (cm->refresh_golden_frame && !cpi->source_alt_ref_active)))) {
3783 zbin_oq_high = ZBIN_OQ_MAX;
3787 /* Setup background Q adjustment for error resilient mode.
3788 * For multi-layer encodes only enable this for the base layer.
3790 if (cpi->cyclic_refresh_mode_enabled) {
3791 // Special case for screen_content_mode with golden frame updates.
3793 (cpi->oxcf.screen_content_mode == 2 && cm->refresh_golden_frame);
3794 if (cpi->current_layer == 0 && cpi->force_maxqp == 0 && !disable_cr_gf) {
3795 cyclic_background_refresh(cpi, Q, 0);
3797 disable_segmentation(cpi);
3801 vp8_compute_frame_size_bounds(cpi, &frame_under_shoot_limit,
3802 &frame_over_shoot_limit);
3804 #if !CONFIG_REALTIME_ONLY
3805 /* Limit Q range for the adaptive loop. */
3806 bottom_index = cpi->active_best_quality;
3807 top_index = cpi->active_worst_quality;
3808 q_low = cpi->active_best_quality;
3809 q_high = cpi->active_worst_quality;
3812 vp8_save_coding_context(cpi);
3816 scale_and_extend_source(cpi->un_scaled_source, cpi);
3818 #if CONFIG_TEMPORAL_DENOISING && CONFIG_POSTPROC
3819 // Option to apply spatial blur under the aggressive or adaptive
3820 // (temporal denoising) mode.
3821 if (cpi->oxcf.noise_sensitivity >= 3) {
3822 if (cpi->denoiser.denoise_pars.spatial_blur != 0) {
3823 vp8_de_noise(cm, cpi->Source, cpi->Source,
3824 cpi->denoiser.denoise_pars.spatial_blur, 1, 0, 0);
3829 #if !(CONFIG_REALTIME_ONLY) && CONFIG_POSTPROC && !(CONFIG_TEMPORAL_DENOISING)
3831 if (cpi->oxcf.noise_sensitivity > 0) {
3835 switch (cpi->oxcf.noise_sensitivity) {
3836 case 1: l = 20; break;
3837 case 2: l = 40; break;
3838 case 3: l = 60; break;
3839 case 4: l = 80; break;
3840 case 5: l = 100; break;
3841 case 6: l = 150; break;
3844 if (cm->frame_type == KEY_FRAME) {
3845 vp8_de_noise(cm, cpi->Source, cpi->Source, l, 1, 0, 1);
3847 vp8_de_noise(cm, cpi->Source, cpi->Source, l, 1, 0, 1);
3849 src = cpi->Source->y_buffer;
3851 if (cpi->Source->y_stride < 0) {
3852 src += cpi->Source->y_stride * (cpi->Source->y_height - 1);
3859 #ifdef OUTPUT_YUV_SRC
3860 vp8_write_yuv_frame(yuv_file, cpi->Source);
3864 vpx_clear_system_state();
3866 vp8_set_quantizer(cpi, Q);
3868 /* setup skip prob for costing in mode/mv decision */
3869 if (cpi->common.mb_no_coeff_skip) {
3870 cpi->prob_skip_false = cpi->base_skip_false_prob[Q];
3872 if (cm->frame_type != KEY_FRAME) {
3873 if (cpi->common.refresh_alt_ref_frame) {
3874 if (cpi->last_skip_false_probs[2] != 0) {
3875 cpi->prob_skip_false = cpi->last_skip_false_probs[2];
3879 if(cpi->last_skip_false_probs[2]!=0 && abs(Q-
3880 cpi->last_skip_probs_q[2])<=16 )
3881 cpi->prob_skip_false = cpi->last_skip_false_probs[2];
3882 else if (cpi->last_skip_false_probs[2]!=0)
3883 cpi->prob_skip_false = (cpi->last_skip_false_probs[2] +
3884 cpi->prob_skip_false ) / 2;
3886 } else if (cpi->common.refresh_golden_frame) {
3887 if (cpi->last_skip_false_probs[1] != 0) {
3888 cpi->prob_skip_false = cpi->last_skip_false_probs[1];
3892 if(cpi->last_skip_false_probs[1]!=0 && abs(Q-
3893 cpi->last_skip_probs_q[1])<=16 )
3894 cpi->prob_skip_false = cpi->last_skip_false_probs[1];
3895 else if (cpi->last_skip_false_probs[1]!=0)
3896 cpi->prob_skip_false = (cpi->last_skip_false_probs[1] +
3897 cpi->prob_skip_false ) / 2;
3900 if (cpi->last_skip_false_probs[0] != 0) {
3901 cpi->prob_skip_false = cpi->last_skip_false_probs[0];
3905 if(cpi->last_skip_false_probs[0]!=0 && abs(Q-
3906 cpi->last_skip_probs_q[0])<=16 )
3907 cpi->prob_skip_false = cpi->last_skip_false_probs[0];
3908 else if(cpi->last_skip_false_probs[0]!=0)
3909 cpi->prob_skip_false = (cpi->last_skip_false_probs[0] +
3910 cpi->prob_skip_false ) / 2;
3914 /* as this is for cost estimate, let's make sure it does not
3915 * go extreme eitehr way
3917 if (cpi->prob_skip_false < 5) cpi->prob_skip_false = 5;
3919 if (cpi->prob_skip_false > 250) cpi->prob_skip_false = 250;
3921 if (cpi->oxcf.number_of_layers == 1 && cpi->is_src_frame_alt_ref) {
3922 cpi->prob_skip_false = 1;
3930 FILE *f = fopen("skip.stt", "a");
3931 fprintf(f, "%d, %d, %4d ", cpi->common.refresh_golden_frame, cpi->common.refresh_alt_ref_frame, cpi->prob_skip_false);
3938 if (cm->frame_type == KEY_FRAME) {
3939 if (resize_key_frame(cpi)) {
3940 /* If the frame size has changed, need to reset Q, quantizer,
3941 * and background refresh.
3943 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
3944 if (cpi->cyclic_refresh_mode_enabled) {
3945 if (cpi->current_layer == 0) {
3946 cyclic_background_refresh(cpi, Q, 0);
3948 disable_segmentation(cpi);
3951 // Reset the zero_last counter to 0 on key frame.
3952 memset(cpi->consec_zero_last, 0, cm->mb_rows * cm->mb_cols);
3953 memset(cpi->consec_zero_last_mvbias, 0,
3954 (cpi->common.mb_rows * cpi->common.mb_cols));
3955 vp8_set_quantizer(cpi, Q);
3958 vp8_setup_key_frame(cpi);
3961 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
3963 if (cpi->oxcf.error_resilient_mode) cm->refresh_entropy_probs = 0;
3965 if (cpi->oxcf.error_resilient_mode & VPX_ERROR_RESILIENT_PARTITIONS) {
3966 if (cm->frame_type == KEY_FRAME) cm->refresh_entropy_probs = 1;
3969 if (cm->refresh_entropy_probs == 0) {
3970 /* save a copy for later refresh */
3971 memcpy(&cm->lfc, &cm->fc, sizeof(cm->fc));
3974 vp8_update_coef_context(cpi);
3976 vp8_update_coef_probs(cpi);
3978 /* transform / motion compensation build reconstruction frame
3979 * +pack coef partitions
3981 vp8_encode_frame(cpi);
3983 /* cpi->projected_frame_size is not needed for RT mode */
3986 /* transform / motion compensation build reconstruction frame */
3987 vp8_encode_frame(cpi);
3988 if (cpi->oxcf.screen_content_mode == 2) {
3989 if (vp8_drop_encodedframe_overshoot(cpi, Q)) return;
3992 cpi->projected_frame_size -= vp8_estimate_entropy_savings(cpi);
3993 cpi->projected_frame_size =
3994 (cpi->projected_frame_size > 0) ? cpi->projected_frame_size : 0;
3996 vpx_clear_system_state();
3998 /* Test to see if the stats generated for this frame indicate that
3999 * we should have coded a key frame (assuming that we didn't)!
4002 if (cpi->pass != 2 && cpi->oxcf.auto_key && cm->frame_type != KEY_FRAME &&
4003 cpi->compressor_speed != 2) {
4004 #if !CONFIG_REALTIME_ONLY
4005 if (decide_key_frame(cpi)) {
4006 /* Reset all our sizing numbers and recode */
4007 cm->frame_type = KEY_FRAME;
4009 vp8_pick_frame_size(cpi);
4011 /* Clear the Alt reference frame active flag when we have
4014 cpi->source_alt_ref_active = 0;
4016 // Set the loop filter deltas and segmentation map update
4017 setup_features(cpi);
4019 vp8_restore_coding_context(cpi);
4021 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4023 vp8_compute_frame_size_bounds(cpi, &frame_under_shoot_limit,
4024 &frame_over_shoot_limit);
4026 /* Limit Q range for the adaptive loop. */
4027 bottom_index = cpi->active_best_quality;
4028 top_index = cpi->active_worst_quality;
4029 q_low = cpi->active_best_quality;
4030 q_high = cpi->active_worst_quality;
4040 vpx_clear_system_state();
4042 if (frame_over_shoot_limit == 0) frame_over_shoot_limit = 1;
4044 /* Are we are overshooting and up against the limit of active max Q. */
4045 if (((cpi->pass != 2) ||
4046 (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)) &&
4047 (Q == cpi->active_worst_quality) &&
4048 (cpi->active_worst_quality < cpi->worst_quality) &&
4049 (cpi->projected_frame_size > frame_over_shoot_limit)) {
4050 int over_size_percent =
4051 ((cpi->projected_frame_size - frame_over_shoot_limit) * 100) /
4052 frame_over_shoot_limit;
4054 /* If so is there any scope for relaxing it */
4055 while ((cpi->active_worst_quality < cpi->worst_quality) &&
4056 (over_size_percent > 0)) {
4057 cpi->active_worst_quality++;
4058 /* Assume 1 qstep = about 4% on frame size. */
4059 over_size_percent = (int)(over_size_percent * 0.96);
4061 #if !CONFIG_REALTIME_ONLY
4062 top_index = cpi->active_worst_quality;
4063 #endif // !CONFIG_REALTIME_ONLY
4064 /* If we have updated the active max Q do not call
4065 * vp8_update_rate_correction_factors() this loop.
4067 active_worst_qchanged = 1;
4069 active_worst_qchanged = 0;
4072 #if CONFIG_REALTIME_ONLY
4075 /* Special case handling for forced key frames */
4076 if ((cm->frame_type == KEY_FRAME) && cpi->this_key_frame_forced) {
4078 int kf_err = vp8_calc_ss_err(cpi->Source, &cm->yv12_fb[cm->new_fb_idx]);
4080 /* The key frame is not good enough */
4081 if (kf_err > ((cpi->ambient_err * 7) >> 3)) {
4083 q_high = (Q > q_low) ? (Q - 1) : q_low;
4086 Q = (q_high + q_low) >> 1;
4088 /* The key frame is much better than the previous frame */
4089 else if (kf_err < (cpi->ambient_err >> 1)) {
4091 q_low = (Q < q_high) ? (Q + 1) : q_high;
4094 Q = (q_high + q_low + 1) >> 1;
4097 /* Clamp Q to upper and lower limits: */
4100 } else if (Q < q_low) {
4107 /* Is the projected frame size out of range and are we allowed
4108 * to attempt to recode.
4110 else if (recode_loop_test(cpi, frame_over_shoot_limit,
4111 frame_under_shoot_limit, Q, top_index,
4116 /* Frame size out of permitted range. Update correction factor
4117 * & compute new Q to try...
4120 /* Frame is too large */
4121 if (cpi->projected_frame_size > cpi->this_frame_target) {
4122 /* Raise Qlow as to at least the current value */
4123 q_low = (Q < q_high) ? (Q + 1) : q_high;
4125 /* If we are using over quant do the same for zbin_oq_low */
4126 if (cpi->mb.zbin_over_quant > 0) {
4127 zbin_oq_low = (cpi->mb.zbin_over_quant < zbin_oq_high)
4128 ? (cpi->mb.zbin_over_quant + 1)
4132 if (undershoot_seen) {
4133 /* Update rate_correction_factor unless
4134 * cpi->active_worst_quality has changed.
4136 if (!active_worst_qchanged) {
4137 vp8_update_rate_correction_factors(cpi, 1);
4140 Q = (q_high + q_low + 1) / 2;
4142 /* Adjust cpi->zbin_over_quant (only allowed when Q
4146 cpi->mb.zbin_over_quant = 0;
4148 zbin_oq_low = (cpi->mb.zbin_over_quant < zbin_oq_high)
4149 ? (cpi->mb.zbin_over_quant + 1)
4151 cpi->mb.zbin_over_quant = (zbin_oq_high + zbin_oq_low) / 2;
4154 /* Update rate_correction_factor unless
4155 * cpi->active_worst_quality has changed.
4157 if (!active_worst_qchanged) {
4158 vp8_update_rate_correction_factors(cpi, 0);
4161 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4163 while (((Q < q_low) || (cpi->mb.zbin_over_quant < zbin_oq_low)) &&
4165 vp8_update_rate_correction_factors(cpi, 0);
4166 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4173 /* Frame is too small */
4175 if (cpi->mb.zbin_over_quant == 0) {
4176 /* Lower q_high if not using over quant */
4177 q_high = (Q > q_low) ? (Q - 1) : q_low;
4179 /* else lower zbin_oq_high */
4180 zbin_oq_high = (cpi->mb.zbin_over_quant > zbin_oq_low)
4181 ? (cpi->mb.zbin_over_quant - 1)
4185 if (overshoot_seen) {
4186 /* Update rate_correction_factor unless
4187 * cpi->active_worst_quality has changed.
4189 if (!active_worst_qchanged) {
4190 vp8_update_rate_correction_factors(cpi, 1);
4193 Q = (q_high + q_low) / 2;
4195 /* Adjust cpi->zbin_over_quant (only allowed when Q
4199 cpi->mb.zbin_over_quant = 0;
4201 cpi->mb.zbin_over_quant = (zbin_oq_high + zbin_oq_low) / 2;
4204 /* Update rate_correction_factor unless
4205 * cpi->active_worst_quality has changed.
4207 if (!active_worst_qchanged) {
4208 vp8_update_rate_correction_factors(cpi, 0);
4211 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4213 /* Special case reset for qlow for constrained quality.
4214 * This should only trigger where there is very substantial
4215 * undershoot on a frame and the auto cq level is above
4216 * the user passsed in value.
4218 if ((cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
4223 while (((Q > q_high) || (cpi->mb.zbin_over_quant > zbin_oq_high)) &&
4225 vp8_update_rate_correction_factors(cpi, 0);
4226 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4231 undershoot_seen = 1;
4234 /* Clamp Q to upper and lower limits: */
4237 } else if (Q < q_low) {
4241 /* Clamp cpi->zbin_over_quant */
4242 cpi->mb.zbin_over_quant = (cpi->mb.zbin_over_quant < zbin_oq_low)
4244 : (cpi->mb.zbin_over_quant > zbin_oq_high)
4246 : cpi->mb.zbin_over_quant;
4252 #endif // CONFIG_REALTIME_ONLY
4254 if (cpi->is_src_frame_alt_ref) Loop = 0;
4257 vp8_restore_coding_context(cpi);
4259 #if CONFIG_INTERNAL_STATS
4260 cpi->tot_recode_hits++;
4263 } while (Loop == 1);
4265 #if defined(DROP_UNCODED_FRAMES)
4266 /* if there are no coded macroblocks at all drop this frame */
4267 if (cpi->common.MBs == cpi->mb.skip_true_count &&
4268 (cpi->drop_frame_count & 7) != 7 && cm->frame_type != KEY_FRAME) {
4269 cpi->common.current_video_frame++;
4270 cpi->frames_since_key++;
4271 cpi->drop_frame_count++;
4272 // We advance the temporal pattern for dropped frames.
4273 cpi->temporal_pattern_counter++;
4276 cpi->drop_frame_count = 0;
4280 /* Experimental code for lagged and one pass
4281 * Update stats used for one pass GF selection
4284 cpi->one_pass_frame_stats[cpi->one_pass_frame_index].frame_coded_error = (double)cpi->prediction_error;
4285 cpi->one_pass_frame_stats[cpi->one_pass_frame_index].frame_intra_error = (double)cpi->intra_error;
4286 cpi->one_pass_frame_stats[cpi->one_pass_frame_index].frame_pcnt_inter = (double)(100 - cpi->this_frame_percent_intra) / 100.0;
4290 /* Special case code to reduce pulsing when key frames are forced at a
4291 * fixed interval. Note the reconstruction error if it is the frame before
4292 * the force key frame
4294 if (cpi->next_key_frame_forced && (cpi->twopass.frames_to_key == 0)) {
4296 vp8_calc_ss_err(cpi->Source, &cm->yv12_fb[cm->new_fb_idx]);
4299 /* This frame's MVs are saved and will be used in next frame's MV predictor.
4300 * Last frame has one more line(add to bottom) and one more column(add to
4301 * right) than cm->mip. The edge elements are initialized to 0.
4303 #if CONFIG_MULTI_RES_ENCODING
4304 if (!cpi->oxcf.mr_encoder_id && cm->show_frame)
4306 if (cm->show_frame) /* do not save for altref frame */
4311 /* Point to beginning of allocated MODE_INFO arrays. */
4312 MODE_INFO *tmp = cm->mip;
4314 if (cm->frame_type != KEY_FRAME) {
4315 for (mb_row = 0; mb_row < cm->mb_rows + 1; ++mb_row) {
4316 for (mb_col = 0; mb_col < cm->mb_cols + 1; ++mb_col) {
4317 if (tmp->mbmi.ref_frame != INTRA_FRAME) {
4318 cpi->lfmv[mb_col + mb_row * (cm->mode_info_stride + 1)].as_int =
4319 tmp->mbmi.mv.as_int;
4322 cpi->lf_ref_frame_sign_bias[mb_col +
4323 mb_row * (cm->mode_info_stride + 1)] =
4324 cm->ref_frame_sign_bias[tmp->mbmi.ref_frame];
4325 cpi->lf_ref_frame[mb_col + mb_row * (cm->mode_info_stride + 1)] =
4326 tmp->mbmi.ref_frame;
4333 /* Count last ref frame 0,0 usage on current encoded frame. */
4337 /* Point to beginning of MODE_INFO arrays. */
4338 MODE_INFO *tmp = cm->mi;
4340 cpi->zeromv_count = 0;
4342 if (cm->frame_type != KEY_FRAME) {
4343 for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) {
4344 for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) {
4345 if (tmp->mbmi.mode == ZEROMV && tmp->mbmi.ref_frame == LAST_FRAME) {
4346 cpi->zeromv_count++;
4355 #if CONFIG_MULTI_RES_ENCODING
4356 vp8_cal_dissimilarity(cpi);
4359 /* Update the GF useage maps.
4360 * This is done after completing the compression of a frame when all
4361 * modes etc. are finalized but before loop filter
4363 if (cpi->oxcf.number_of_layers == 1) {
4364 vp8_update_gf_useage_maps(cpi, cm, &cpi->mb);
4367 if (cm->frame_type == KEY_FRAME) cm->refresh_last_frame = 1;
4371 FILE *f = fopen("gfactive.stt", "a");
4372 fprintf(f, "%8d %8d %8d %8d %8d\n", cm->current_video_frame, (100 * cpi->gf_active_count) / (cpi->common.mb_rows * cpi->common.mb_cols), cpi->this_iiratio, cpi->next_iiratio, cm->refresh_golden_frame);
4377 /* For inter frames the current default behavior is that when
4378 * cm->refresh_golden_frame is set we copy the old GF over to the ARF buffer
4379 * This is purely an encoder decision at present.
4381 if (!cpi->oxcf.error_resilient_mode && cm->refresh_golden_frame) {
4382 cm->copy_buffer_to_arf = 2;
4384 cm->copy_buffer_to_arf = 0;
4387 cm->frame_to_show = &cm->yv12_fb[cm->new_fb_idx];
4389 #if CONFIG_TEMPORAL_DENOISING
4390 // Get some measure of the amount of noise, by measuring the (partial) mse
4391 // between source and denoised buffer, for y channel. Partial refers to
4392 // computing the sse for a sub-sample of the frame (i.e., skip x blocks along
4394 // and only for blocks in that set that are consecutive ZEROMV_LAST mode.
4395 // Do this every ~8 frames, to further reduce complexity.
4396 // TODO(marpan): Keep this for now for the case cpi->oxcf.noise_sensitivity <
4398 // should be removed in favor of the process_denoiser_mode_change() function
4400 if (cpi->oxcf.noise_sensitivity > 0 && cpi->oxcf.noise_sensitivity < 4 &&
4401 !cpi->oxcf.screen_content_mode && cpi->frames_since_key % 8 == 0 &&
4402 cm->frame_type != KEY_FRAME) {
4403 cpi->mse_source_denoised = measure_square_diff_partial(
4404 &cpi->denoiser.yv12_running_avg[INTRA_FRAME], cpi->Source, cpi);
4407 // For the adaptive denoising mode (noise_sensitivity == 4), sample the mse
4408 // of source diff (between current and previous frame), and determine if we
4409 // should switch the denoiser mode. Sampling refers to computing the mse for
4410 // a sub-sample of the frame (i.e., skip x blocks along row/column), and
4411 // only for blocks in that set that have used ZEROMV LAST, along with some
4412 // constraint on the sum diff between blocks. This process is called every
4413 // ~8 frames, to further reduce complexity.
4414 if (cpi->oxcf.noise_sensitivity == 4 && !cpi->oxcf.screen_content_mode &&
4415 cpi->frames_since_key % 8 == 0 && cm->frame_type != KEY_FRAME) {
4416 process_denoiser_mode_change(cpi);
4420 #if CONFIG_MULTITHREAD
4421 if (cpi->b_multi_threaded) {
4422 /* start loopfilter in separate thread */
4423 sem_post(&cpi->h_event_start_lpf);
4424 cpi->b_lpf_running = 1;
4428 vp8_loopfilter_frame(cpi, cm);
4431 update_reference_frames(cpi);
4433 #ifdef OUTPUT_YUV_DENOISED
4434 vp8_write_yuv_frame(yuv_denoised_file,
4435 &cpi->denoiser.yv12_running_avg[INTRA_FRAME]);
4438 #if !(CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
4439 if (cpi->oxcf.error_resilient_mode) {
4440 cm->refresh_entropy_probs = 0;
4444 #if CONFIG_MULTITHREAD
4445 /* wait that filter_level is picked so that we can continue with stream
4447 if (cpi->b_multi_threaded) sem_wait(&cpi->h_event_end_lpf);
4450 /* build the bitstream */
4451 vp8_pack_bitstream(cpi, dest, dest_end, size);
4453 /* Move storing frame_type out of the above loop since it is also
4454 * needed in motion search besides loopfilter */
4455 cm->last_frame_type = cm->frame_type;
4457 /* Update rate control heuristics */
4458 cpi->total_byte_count += (*size);
4459 cpi->projected_frame_size = (int)(*size) << 3;
4461 if (cpi->oxcf.number_of_layers > 1) {
4463 for (i = cpi->current_layer + 1; i < cpi->oxcf.number_of_layers; ++i) {
4464 cpi->layer_context[i].total_byte_count += (*size);
4468 if (!active_worst_qchanged) vp8_update_rate_correction_factors(cpi, 2);
4470 cpi->last_q[cm->frame_type] = cm->base_qindex;
4472 if (cm->frame_type == KEY_FRAME) {
4473 vp8_adjust_key_frame_context(cpi);
4476 /* Keep a record of ambient average Q. */
4477 if (cm->frame_type != KEY_FRAME) {
4478 cpi->avg_frame_qindex =
4479 (2 + 3 * cpi->avg_frame_qindex + cm->base_qindex) >> 2;
4482 /* Keep a record from which we can calculate the average Q excluding
4483 * GF updates and key frames
4485 if ((cm->frame_type != KEY_FRAME) &&
4486 ((cpi->oxcf.number_of_layers > 1) ||
4487 (!cm->refresh_golden_frame && !cm->refresh_alt_ref_frame))) {
4490 /* Calculate the average Q for normal inter frames (not key or GFU
4493 if (cpi->pass == 2) {
4494 cpi->ni_tot_qi += Q;
4495 cpi->ni_av_qi = (cpi->ni_tot_qi / cpi->ni_frames);
4497 /* Damp value for first few frames */
4498 if (cpi->ni_frames > 150) {
4499 cpi->ni_tot_qi += Q;
4500 cpi->ni_av_qi = (cpi->ni_tot_qi / cpi->ni_frames);
4502 /* For one pass, early in the clip ... average the current frame Q
4503 * value with the worstq entered by the user as a dampening measure
4506 cpi->ni_tot_qi += Q;
4508 ((cpi->ni_tot_qi / cpi->ni_frames) + cpi->worst_quality + 1) / 2;
4511 /* If the average Q is higher than what was used in the last
4512 * frame (after going through the recode loop to keep the frame
4513 * size within range) then use the last frame value - 1. The -1
4514 * is designed to stop Q and hence the data rate, from
4515 * progressively falling away during difficult sections, but at
4516 * the same time reduce the number of itterations around the
4519 if (Q > cpi->ni_av_qi) cpi->ni_av_qi = Q - 1;
4523 /* Update the buffer level variable. */
4524 /* Non-viewable frames are a special case and are treated as pure overhead. */
4525 if (!cm->show_frame) {
4526 cpi->bits_off_target -= cpi->projected_frame_size;
4528 cpi->bits_off_target +=
4529 cpi->av_per_frame_bandwidth - cpi->projected_frame_size;
4532 /* Clip the buffer level to the maximum specified buffer size */
4533 if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size) {
4534 cpi->bits_off_target = cpi->oxcf.maximum_buffer_size;
4537 // If the frame dropper is not enabled, don't let the buffer level go below
4538 // some threshold, given here by -|maximum_buffer_size|. For now we only do
4539 // this for screen content input.
4540 if (cpi->drop_frames_allowed == 0 && cpi->oxcf.screen_content_mode &&
4541 cpi->bits_off_target < -cpi->oxcf.maximum_buffer_size) {
4542 cpi->bits_off_target = -cpi->oxcf.maximum_buffer_size;
4545 /* Rolling monitors of whether we are over or underspending used to
4546 * help regulate min and Max Q in two pass.
4548 cpi->rolling_target_bits =
4549 ((cpi->rolling_target_bits * 3) + cpi->this_frame_target + 2) / 4;
4550 cpi->rolling_actual_bits =
4551 ((cpi->rolling_actual_bits * 3) + cpi->projected_frame_size + 2) / 4;
4552 cpi->long_rolling_target_bits =
4553 ((cpi->long_rolling_target_bits * 31) + cpi->this_frame_target + 16) / 32;
4554 cpi->long_rolling_actual_bits =
4555 ((cpi->long_rolling_actual_bits * 31) + cpi->projected_frame_size + 16) /
4558 /* Actual bits spent */
4559 cpi->total_actual_bits += cpi->projected_frame_size;
4562 cpi->total_target_vs_actual +=
4563 (cpi->this_frame_target - cpi->projected_frame_size);
4565 cpi->buffer_level = cpi->bits_off_target;
4567 /* Propagate values to higher temporal layers */
4568 if (cpi->oxcf.number_of_layers > 1) {
4571 for (i = cpi->current_layer + 1; i < cpi->oxcf.number_of_layers; ++i) {
4572 LAYER_CONTEXT *lc = &cpi->layer_context[i];
4573 int bits_off_for_this_layer = (int)(lc->target_bandwidth / lc->framerate -
4574 cpi->projected_frame_size);
4576 lc->bits_off_target += bits_off_for_this_layer;
4578 /* Clip buffer level to maximum buffer size for the layer */
4579 if (lc->bits_off_target > lc->maximum_buffer_size) {
4580 lc->bits_off_target = lc->maximum_buffer_size;
4583 lc->total_actual_bits += cpi->projected_frame_size;
4584 lc->total_target_vs_actual += bits_off_for_this_layer;
4585 lc->buffer_level = lc->bits_off_target;
4589 /* Update bits left to the kf and gf groups to account for overshoot
4590 * or undershoot on these frames
4592 if (cm->frame_type == KEY_FRAME) {
4593 cpi->twopass.kf_group_bits +=
4594 cpi->this_frame_target - cpi->projected_frame_size;
4596 if (cpi->twopass.kf_group_bits < 0) cpi->twopass.kf_group_bits = 0;
4597 } else if (cm->refresh_golden_frame || cm->refresh_alt_ref_frame) {
4598 cpi->twopass.gf_group_bits +=
4599 cpi->this_frame_target - cpi->projected_frame_size;
4601 if (cpi->twopass.gf_group_bits < 0) cpi->twopass.gf_group_bits = 0;
4604 if (cm->frame_type != KEY_FRAME) {
4605 if (cpi->common.refresh_alt_ref_frame) {
4606 cpi->last_skip_false_probs[2] = cpi->prob_skip_false;
4607 cpi->last_skip_probs_q[2] = cm->base_qindex;
4608 } else if (cpi->common.refresh_golden_frame) {
4609 cpi->last_skip_false_probs[1] = cpi->prob_skip_false;
4610 cpi->last_skip_probs_q[1] = cm->base_qindex;
4612 cpi->last_skip_false_probs[0] = cpi->prob_skip_false;
4613 cpi->last_skip_probs_q[0] = cm->base_qindex;
4615 /* update the baseline */
4616 cpi->base_skip_false_prob[cm->base_qindex] = cpi->prob_skip_false;
4620 #if 0 && CONFIG_INTERNAL_STATS
4622 FILE *f = fopen("tmp.stt", "a");
4624 vpx_clear_system_state();
4626 if (cpi->twopass.total_left_stats.coded_error != 0.0)
4627 fprintf(f, "%10d %10d %10d %10d %10d %10"PRId64" %10"PRId64
4628 "%10"PRId64" %10d %6d %6d %6d %6d %5d %5d %5d %8d "
4629 "%8.2lf %"PRId64" %10.3lf %10"PRId64" %8d\n",
4630 cpi->common.current_video_frame, cpi->this_frame_target,
4631 cpi->projected_frame_size,
4632 (cpi->projected_frame_size - cpi->this_frame_target),
4633 cpi->total_target_vs_actual,
4635 (cpi->oxcf.starting_buffer_level-cpi->bits_off_target),
4636 cpi->total_actual_bits, cm->base_qindex,
4637 cpi->active_best_quality, cpi->active_worst_quality,
4638 cpi->ni_av_qi, cpi->cq_target_quality,
4639 cm->refresh_golden_frame, cm->refresh_alt_ref_frame,
4640 cm->frame_type, cpi->gfu_boost,
4641 cpi->twopass.est_max_qcorrection_factor,
4642 cpi->twopass.bits_left,
4643 cpi->twopass.total_left_stats.coded_error,
4644 (double)cpi->twopass.bits_left /
4645 cpi->twopass.total_left_stats.coded_error,
4646 cpi->tot_recode_hits);
4648 fprintf(f, "%10d %10d %10d %10d %10d %10"PRId64" %10"PRId64
4649 "%10"PRId64" %10d %6d %6d %6d %6d %5d %5d %5d %8d "
4650 "%8.2lf %"PRId64" %10.3lf %8d\n",
4651 cpi->common.current_video_frame, cpi->this_frame_target,
4652 cpi->projected_frame_size,
4653 (cpi->projected_frame_size - cpi->this_frame_target),
4654 cpi->total_target_vs_actual,
4656 (cpi->oxcf.starting_buffer_level-cpi->bits_off_target),
4657 cpi->total_actual_bits, cm->base_qindex,
4658 cpi->active_best_quality, cpi->active_worst_quality,
4659 cpi->ni_av_qi, cpi->cq_target_quality,
4660 cm->refresh_golden_frame, cm->refresh_alt_ref_frame,
4661 cm->frame_type, cpi->gfu_boost,
4662 cpi->twopass.est_max_qcorrection_factor,
4663 cpi->twopass.bits_left,
4664 cpi->twopass.total_left_stats.coded_error,
4665 cpi->tot_recode_hits);
4670 FILE *fmodes = fopen("Modes.stt", "a");
4672 fprintf(fmodes, "%6d:%1d:%1d:%1d ",
4673 cpi->common.current_video_frame,
4674 cm->frame_type, cm->refresh_golden_frame,
4675 cm->refresh_alt_ref_frame);
4677 fprintf(fmodes, "\n");
4685 if (cm->refresh_golden_frame == 1) {
4686 cm->frame_flags = cm->frame_flags | FRAMEFLAGS_GOLDEN;
4688 cm->frame_flags = cm->frame_flags & ~FRAMEFLAGS_GOLDEN;
4691 if (cm->refresh_alt_ref_frame == 1) {
4692 cm->frame_flags = cm->frame_flags | FRAMEFLAGS_ALTREF;
4694 cm->frame_flags = cm->frame_flags & ~FRAMEFLAGS_ALTREF;
4697 if (cm->refresh_last_frame & cm->refresh_golden_frame) { /* both refreshed */
4698 cpi->gold_is_last = 1;
4699 } else if (cm->refresh_last_frame ^ cm->refresh_golden_frame) {
4700 /* 1 refreshed but not the other */
4701 cpi->gold_is_last = 0;
4704 if (cm->refresh_last_frame & cm->refresh_alt_ref_frame) { /* both refreshed */
4705 cpi->alt_is_last = 1;
4706 } else if (cm->refresh_last_frame ^ cm->refresh_alt_ref_frame) {
4707 /* 1 refreshed but not the other */
4708 cpi->alt_is_last = 0;
4711 if (cm->refresh_alt_ref_frame &
4712 cm->refresh_golden_frame) { /* both refreshed */
4713 cpi->gold_is_alt = 1;
4714 } else if (cm->refresh_alt_ref_frame ^ cm->refresh_golden_frame) {
4715 /* 1 refreshed but not the other */
4716 cpi->gold_is_alt = 0;
4719 cpi->ref_frame_flags = VP8_ALTR_FRAME | VP8_GOLD_FRAME | VP8_LAST_FRAME;
4721 if (cpi->gold_is_last) cpi->ref_frame_flags &= ~VP8_GOLD_FRAME;
4723 if (cpi->alt_is_last) cpi->ref_frame_flags &= ~VP8_ALTR_FRAME;
4725 if (cpi->gold_is_alt) cpi->ref_frame_flags &= ~VP8_ALTR_FRAME;
4727 if (!cpi->oxcf.error_resilient_mode) {
4728 if (cpi->oxcf.play_alternate && cm->refresh_alt_ref_frame &&
4729 (cm->frame_type != KEY_FRAME)) {
4730 /* Update the alternate reference frame stats as appropriate. */
4731 update_alt_ref_frame_stats(cpi);
4733 /* Update the Golden frame stats as appropriate. */
4734 update_golden_frame_stats(cpi);
4738 if (cm->frame_type == KEY_FRAME) {
4739 /* Tell the caller that the frame was coded as a key frame */
4740 *frame_flags = cm->frame_flags | FRAMEFLAGS_KEY;
4742 /* As this frame is a key frame the next defaults to an inter frame. */
4743 cm->frame_type = INTER_FRAME;
4745 cpi->last_frame_percent_intra = 100;
4747 *frame_flags = cm->frame_flags & ~FRAMEFLAGS_KEY;
4749 cpi->last_frame_percent_intra = cpi->this_frame_percent_intra;
4752 /* Clear the one shot update flags for segmentation map and mode/ref
4753 * loop filter deltas.
4755 cpi->mb.e_mbd.update_mb_segmentation_map = 0;
4756 cpi->mb.e_mbd.update_mb_segmentation_data = 0;
4757 cpi->mb.e_mbd.mode_ref_lf_delta_update = 0;
4759 /* Dont increment frame counters if this was an altref buffer update
4762 if (cm->show_frame) {
4763 cm->current_video_frame++;
4764 cpi->frames_since_key++;
4765 cpi->temporal_pattern_counter++;
4768 /* reset to normal state now that we are done. */
4774 sprintf(filename, "enc%04d.yuv", (int) cm->current_video_frame);
4775 recon_file = fopen(filename, "wb");
4776 fwrite(cm->yv12_fb[cm->lst_fb_idx].buffer_alloc,
4777 cm->yv12_fb[cm->lst_fb_idx].frame_size, 1, recon_file);
4783 /* vp8_write_yuv_frame("encoder_recon.yuv", cm->frame_to_show); */
4785 #if !CONFIG_REALTIME_ONLY
4786 static void Pass2Encode(VP8_COMP *cpi, size_t *size, unsigned char *dest,
4787 unsigned char *dest_end, unsigned int *frame_flags) {
4788 if (!cpi->common.refresh_alt_ref_frame) vp8_second_pass(cpi);
4790 encode_frame_to_data_rate(cpi, size, dest, dest_end, frame_flags);
4791 cpi->twopass.bits_left -= 8 * (int)(*size);
4793 if (!cpi->common.refresh_alt_ref_frame) {
4794 double two_pass_min_rate =
4795 (double)(cpi->oxcf.target_bandwidth *
4796 cpi->oxcf.two_pass_vbrmin_section / 100);
4797 cpi->twopass.bits_left += (int64_t)(two_pass_min_rate / cpi->framerate);
4802 int vp8_receive_raw_frame(VP8_COMP *cpi, unsigned int frame_flags,
4803 YV12_BUFFER_CONFIG *sd, int64_t time_stamp,
4805 struct vpx_usec_timer timer;
4808 vpx_usec_timer_start(&timer);
4810 /* Reinit the lookahead buffer if the frame size changes */
4811 if (sd->y_width != cpi->oxcf.Width || sd->y_height != cpi->oxcf.Height) {
4812 assert(cpi->oxcf.lag_in_frames < 2);
4813 dealloc_raw_frame_buffers(cpi);
4814 alloc_raw_frame_buffers(cpi);
4817 if (vp8_lookahead_push(cpi->lookahead, sd, time_stamp, end_time, frame_flags,
4818 cpi->active_map_enabled ? cpi->active_map : NULL)) {
4821 vpx_usec_timer_mark(&timer);
4822 cpi->time_receive_data += vpx_usec_timer_elapsed(&timer);
4827 static int frame_is_reference(const VP8_COMP *cpi) {
4828 const VP8_COMMON *cm = &cpi->common;
4829 const MACROBLOCKD *xd = &cpi->mb.e_mbd;
4831 return cm->frame_type == KEY_FRAME || cm->refresh_last_frame ||
4832 cm->refresh_golden_frame || cm->refresh_alt_ref_frame ||
4833 cm->copy_buffer_to_gf || cm->copy_buffer_to_arf ||
4834 cm->refresh_entropy_probs || xd->mode_ref_lf_delta_update ||
4835 xd->update_mb_segmentation_map || xd->update_mb_segmentation_data;
4838 int vp8_get_compressed_data(VP8_COMP *cpi, unsigned int *frame_flags,
4839 size_t *size, unsigned char *dest,
4840 unsigned char *dest_end, int64_t *time_stamp,
4841 int64_t *time_end, int flush) {
4843 struct vpx_usec_timer tsctimer;
4844 struct vpx_usec_timer ticktimer;
4845 struct vpx_usec_timer cmptimer;
4846 YV12_BUFFER_CONFIG *force_src_buffer = NULL;
4848 if (!cpi) return -1;
4852 if (setjmp(cpi->common.error.jmp)) {
4853 cpi->common.error.setjmp = 0;
4854 vpx_clear_system_state();
4855 return VPX_CODEC_CORRUPT_FRAME;
4858 cpi->common.error.setjmp = 1;
4860 vpx_usec_timer_start(&cmptimer);
4864 #if !CONFIG_REALTIME_ONLY
4865 /* Should we code an alternate reference frame */
4866 if (cpi->oxcf.error_resilient_mode == 0 && cpi->oxcf.play_alternate &&
4867 cpi->source_alt_ref_pending) {
4868 if ((cpi->source = vp8_lookahead_peek(
4869 cpi->lookahead, cpi->frames_till_gf_update_due, PEEK_FORWARD))) {
4870 cpi->alt_ref_source = cpi->source;
4871 if (cpi->oxcf.arnr_max_frames > 0) {
4872 vp8_temporal_filter_prepare_c(cpi, cpi->frames_till_gf_update_due);
4873 force_src_buffer = &cpi->alt_ref_buffer;
4875 cpi->frames_till_alt_ref_frame = cpi->frames_till_gf_update_due;
4876 cm->refresh_alt_ref_frame = 1;
4877 cm->refresh_golden_frame = 0;
4878 cm->refresh_last_frame = 0;
4880 /* Clear Pending alt Ref flag. */
4881 cpi->source_alt_ref_pending = 0;
4882 cpi->is_src_frame_alt_ref = 0;
4888 /* Read last frame source if we are encoding first pass. */
4889 if (cpi->pass == 1 && cm->current_video_frame > 0) {
4890 if ((cpi->last_source =
4891 vp8_lookahead_peek(cpi->lookahead, 1, PEEK_BACKWARD)) == NULL) {
4896 if ((cpi->source = vp8_lookahead_pop(cpi->lookahead, flush))) {
4899 cpi->is_src_frame_alt_ref =
4900 cpi->alt_ref_source && (cpi->source == cpi->alt_ref_source);
4902 if (cpi->is_src_frame_alt_ref) cpi->alt_ref_source = NULL;
4907 cpi->Source = force_src_buffer ? force_src_buffer : &cpi->source->img;
4908 cpi->un_scaled_source = cpi->Source;
4909 *time_stamp = cpi->source->ts_start;
4910 *time_end = cpi->source->ts_end;
4911 *frame_flags = cpi->source->flags;
4913 if (cpi->pass == 1 && cm->current_video_frame > 0) {
4914 cpi->last_frame_unscaled_source = &cpi->last_source->img;
4918 #if !CONFIG_REALTIME_ONLY
4920 if (flush && cpi->pass == 1 && !cpi->twopass.first_pass_done) {
4921 vp8_end_first_pass(cpi); /* get last stats packet */
4922 cpi->twopass.first_pass_done = 1;
4930 if (cpi->source->ts_start < cpi->first_time_stamp_ever) {
4931 cpi->first_time_stamp_ever = cpi->source->ts_start;
4932 cpi->last_end_time_stamp_seen = cpi->source->ts_start;
4935 /* adjust frame rates based on timestamps given */
4936 if (cm->show_frame) {
4937 int64_t this_duration;
4940 if (cpi->source->ts_start == cpi->first_time_stamp_ever) {
4941 this_duration = cpi->source->ts_end - cpi->source->ts_start;
4944 int64_t last_duration;
4946 this_duration = cpi->source->ts_end - cpi->last_end_time_stamp_seen;
4947 last_duration = cpi->last_end_time_stamp_seen - cpi->last_time_stamp_seen;
4948 /* do a step update if the duration changes by 10% */
4949 if (last_duration) {
4950 step = (int)(((this_duration - last_duration) * 10 / last_duration));
4954 if (this_duration) {
4956 cpi->ref_framerate = 10000000.0 / this_duration;
4958 double avg_duration, interval;
4960 /* Average this frame's rate into the last second's average
4961 * frame rate. If we haven't seen 1 second yet, then average
4962 * over the whole interval seen.
4964 interval = (double)(cpi->source->ts_end - cpi->first_time_stamp_ever);
4965 if (interval > 10000000.0) interval = 10000000;
4967 avg_duration = 10000000.0 / cpi->ref_framerate;
4968 avg_duration *= (interval - avg_duration + this_duration);
4969 avg_duration /= interval;
4971 cpi->ref_framerate = 10000000.0 / avg_duration;
4973 #if CONFIG_MULTI_RES_ENCODING
4974 if (cpi->oxcf.mr_total_resolutions > 1) {
4975 LOWER_RES_FRAME_INFO *low_res_frame_info =
4976 (LOWER_RES_FRAME_INFO *)cpi->oxcf.mr_low_res_mode_info;
4977 // Frame rate should be the same for all spatial layers in
4978 // multi-res-encoding (simulcast), so we constrain the frame for
4979 // higher layers to be that of lowest resolution. This is needed
4980 // as he application may decide to skip encoding a high layer and
4981 // then start again, in which case a big jump in time-stamps will
4982 // be received for that high layer, which will yield an incorrect
4983 // frame rate (from time-stamp adjustment in above calculation).
4984 if (cpi->oxcf.mr_encoder_id) {
4985 cpi->ref_framerate = low_res_frame_info->low_res_framerate;
4987 // Keep track of frame rate for lowest resolution.
4988 low_res_frame_info->low_res_framerate = cpi->ref_framerate;
4992 if (cpi->oxcf.number_of_layers > 1) {
4995 /* Update frame rates for each layer */
4996 assert(cpi->oxcf.number_of_layers <= VPX_TS_MAX_LAYERS);
4997 for (i = 0; i < cpi->oxcf.number_of_layers && i < VPX_TS_MAX_LAYERS;
4999 LAYER_CONTEXT *lc = &cpi->layer_context[i];
5000 lc->framerate = cpi->ref_framerate / cpi->oxcf.rate_decimator[i];
5003 vp8_new_framerate(cpi, cpi->ref_framerate);
5007 cpi->last_time_stamp_seen = cpi->source->ts_start;
5008 cpi->last_end_time_stamp_seen = cpi->source->ts_end;
5011 if (cpi->oxcf.number_of_layers > 1) {
5014 update_layer_contexts(cpi);
5016 /* Restore layer specific context & set frame rate */
5017 if (cpi->temporal_layer_id >= 0) {
5018 layer = cpi->temporal_layer_id;
5022 .layer_id[cpi->temporal_pattern_counter % cpi->oxcf.periodicity];
5024 restore_layer_context(cpi, layer);
5025 vp8_new_framerate(cpi, cpi->layer_context[layer].framerate);
5028 if (cpi->compressor_speed == 2) {
5029 vpx_usec_timer_start(&tsctimer);
5030 vpx_usec_timer_start(&ticktimer);
5033 cpi->lf_zeromv_pct = (cpi->zeromv_count * 100) / cm->MBs;
5035 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
5038 const int num_part = (1 << cm->multi_token_partition);
5039 /* the available bytes in dest */
5040 const unsigned long dest_size = dest_end - dest;
5041 const int tok_part_buff_size = (dest_size * 9) / (10 * num_part);
5043 unsigned char *dp = dest;
5045 cpi->partition_d[0] = dp;
5046 dp += dest_size / 10; /* reserve 1/10 for control partition */
5047 cpi->partition_d_end[0] = dp;
5049 for (i = 0; i < num_part; ++i) {
5050 cpi->partition_d[i + 1] = dp;
5051 dp += tok_part_buff_size;
5052 cpi->partition_d_end[i + 1] = dp;
5057 /* start with a 0 size frame */
5060 /* Clear down mmx registers */
5061 vpx_clear_system_state();
5063 cm->frame_type = INTER_FRAME;
5064 cm->frame_flags = *frame_flags;
5068 if (cm->refresh_alt_ref_frame)
5070 cm->refresh_golden_frame = 0;
5071 cm->refresh_last_frame = 0;
5075 cm->refresh_golden_frame = 0;
5076 cm->refresh_last_frame = 1;
5080 /* find a free buffer for the new frame */
5083 for (; i < NUM_YV12_BUFFERS; ++i) {
5084 if (!cm->yv12_fb[i].flags) {
5090 assert(i < NUM_YV12_BUFFERS);
5092 switch (cpi->pass) {
5093 #if !CONFIG_REALTIME_ONLY
5094 case 1: Pass1Encode(cpi, size, dest, frame_flags); break;
5095 case 2: Pass2Encode(cpi, size, dest, dest_end, frame_flags); break;
5096 #endif // !CONFIG_REALTIME_ONLY
5098 encode_frame_to_data_rate(cpi, size, dest, dest_end, frame_flags);
5102 if (cpi->compressor_speed == 2) {
5103 unsigned int duration, duration2;
5104 vpx_usec_timer_mark(&tsctimer);
5105 vpx_usec_timer_mark(&ticktimer);
5107 duration = (int)(vpx_usec_timer_elapsed(&ticktimer));
5108 duration2 = (unsigned int)((double)duration / 2);
5110 if (cm->frame_type != KEY_FRAME) {
5111 if (cpi->avg_encode_time == 0) {
5112 cpi->avg_encode_time = duration;
5114 cpi->avg_encode_time = (7 * cpi->avg_encode_time + duration) >> 3;
5120 if (cpi->avg_pick_mode_time == 0) {
5121 cpi->avg_pick_mode_time = duration2;
5123 cpi->avg_pick_mode_time =
5124 (7 * cpi->avg_pick_mode_time + duration2) >> 3;
5130 if (cm->refresh_entropy_probs == 0) {
5131 memcpy(&cm->fc, &cm->lfc, sizeof(cm->fc));
5134 /* Save the contexts separately for alt ref, gold and last. */
5135 /* (TODO jbb -> Optimize this with pointers to avoid extra copies. ) */
5136 if (cm->refresh_alt_ref_frame) memcpy(&cpi->lfc_a, &cm->fc, sizeof(cm->fc));
5138 if (cm->refresh_golden_frame) memcpy(&cpi->lfc_g, &cm->fc, sizeof(cm->fc));
5140 if (cm->refresh_last_frame) memcpy(&cpi->lfc_n, &cm->fc, sizeof(cm->fc));
5142 /* if its a dropped frame honor the requests on subsequent frames */
5144 cpi->droppable = !frame_is_reference(cpi);
5146 /* return to normal state */
5147 cm->refresh_entropy_probs = 1;
5148 cm->refresh_alt_ref_frame = 0;
5149 cm->refresh_golden_frame = 0;
5150 cm->refresh_last_frame = 1;
5151 cm->frame_type = INTER_FRAME;
5154 /* Save layer specific state */
5155 if (cpi->oxcf.number_of_layers > 1) save_layer_context(cpi);
5157 vpx_usec_timer_mark(&cmptimer);
5158 cpi->time_compress_data += vpx_usec_timer_elapsed(&cmptimer);
5160 if (cpi->b_calculate_psnr && cpi->pass != 1 && cm->show_frame) {
5161 generate_psnr_packet(cpi);
5164 #if CONFIG_INTERNAL_STATS
5166 if (cpi->pass != 1) {
5167 cpi->bytes += *size;
5169 if (cm->show_frame) {
5170 cpi->common.show_frame_mi = cpi->common.mi;
5173 if (cpi->b_calculate_psnr) {
5174 uint64_t ye, ue, ve;
5176 YV12_BUFFER_CONFIG *orig = cpi->Source;
5177 YV12_BUFFER_CONFIG *recon = cpi->common.frame_to_show;
5178 unsigned int y_width = cpi->common.Width;
5179 unsigned int y_height = cpi->common.Height;
5180 unsigned int uv_width = (y_width + 1) / 2;
5181 unsigned int uv_height = (y_height + 1) / 2;
5182 int y_samples = y_height * y_width;
5183 int uv_samples = uv_height * uv_width;
5184 int t_samples = y_samples + 2 * uv_samples;
5187 ye = calc_plane_error(orig->y_buffer, orig->y_stride, recon->y_buffer,
5188 recon->y_stride, y_width, y_height);
5190 ue = calc_plane_error(orig->u_buffer, orig->uv_stride, recon->u_buffer,
5191 recon->uv_stride, uv_width, uv_height);
5193 ve = calc_plane_error(orig->v_buffer, orig->uv_stride, recon->v_buffer,
5194 recon->uv_stride, uv_width, uv_height);
5196 sq_error = (double)(ye + ue + ve);
5198 frame_psnr = vpx_sse_to_psnr(t_samples, 255.0, sq_error);
5200 cpi->total_y += vpx_sse_to_psnr(y_samples, 255.0, (double)ye);
5201 cpi->total_u += vpx_sse_to_psnr(uv_samples, 255.0, (double)ue);
5202 cpi->total_v += vpx_sse_to_psnr(uv_samples, 255.0, (double)ve);
5203 cpi->total_sq_error += sq_error;
5204 cpi->total += frame_psnr;
5207 YV12_BUFFER_CONFIG *pp = &cm->post_proc_buffer;
5209 double frame_psnr2, frame_ssim2 = 0;
5212 vp8_deblock(cm, cm->frame_to_show, &cm->post_proc_buffer,
5213 cm->filter_level * 10 / 6, 1, 0);
5214 vpx_clear_system_state();
5216 ye = calc_plane_error(orig->y_buffer, orig->y_stride, pp->y_buffer,
5217 pp->y_stride, y_width, y_height);
5219 ue = calc_plane_error(orig->u_buffer, orig->uv_stride, pp->u_buffer,
5220 pp->uv_stride, uv_width, uv_height);
5222 ve = calc_plane_error(orig->v_buffer, orig->uv_stride, pp->v_buffer,
5223 pp->uv_stride, uv_width, uv_height);
5225 sq_error2 = (double)(ye + ue + ve);
5227 frame_psnr2 = vpx_sse_to_psnr(t_samples, 255.0, sq_error2);
5229 cpi->totalp_y += vpx_sse_to_psnr(y_samples, 255.0, (double)ye);
5230 cpi->totalp_u += vpx_sse_to_psnr(uv_samples, 255.0, (double)ue);
5231 cpi->totalp_v += vpx_sse_to_psnr(uv_samples, 255.0, (double)ve);
5232 cpi->total_sq_error2 += sq_error2;
5233 cpi->totalp += frame_psnr2;
5236 vpx_calc_ssim(cpi->Source, &cm->post_proc_buffer, &weight);
5238 cpi->summed_quality += frame_ssim2 * weight;
5239 cpi->summed_weights += weight;
5241 if (cpi->oxcf.number_of_layers > 1) {
5244 for (i = cpi->current_layer; i < cpi->oxcf.number_of_layers; ++i) {
5245 cpi->frames_in_layer[i]++;
5247 cpi->bytes_in_layer[i] += *size;
5248 cpi->sum_psnr[i] += frame_psnr;
5249 cpi->sum_psnr_p[i] += frame_psnr2;
5250 cpi->total_error2[i] += sq_error;
5251 cpi->total_error2_p[i] += sq_error2;
5252 cpi->sum_ssim[i] += frame_ssim2 * weight;
5253 cpi->sum_weights[i] += weight;
5264 if (cpi->common.frame_type != 0 && cpi->common.base_qindex == cpi->oxcf.worst_allowed_q)
5266 skiptruecount += cpi->skip_true_count;
5267 skipfalsecount += cpi->skip_false_count;
5275 FILE *f = fopen("skip.stt", "a");
5276 fprintf(f, "frame:%4d flags:%4x Q:%4d P:%4d Size:%5d\n", cpi->common.current_video_frame, *frame_flags, cpi->common.base_qindex, cpi->prob_skip_false, *size);
5278 if (cpi->is_src_frame_alt_ref == 1)
5279 fprintf(f, "skipcount: %4d framesize: %d\n", cpi->skip_true_count , *size);
5287 cpi->common.error.setjmp = 0;
5289 #if CONFIG_MULTITHREAD
5290 /* wait for the lpf thread done */
5291 if (cpi->b_multi_threaded && cpi->b_lpf_running) {
5292 sem_wait(&cpi->h_event_end_lpf);
5293 cpi->b_lpf_running = 0;
5300 int vp8_get_preview_raw_frame(VP8_COMP *cpi, YV12_BUFFER_CONFIG *dest,
5301 vp8_ppflags_t *flags) {
5302 if (cpi->common.refresh_alt_ref_frame) {
5308 cpi->common.show_frame_mi = cpi->common.mi;
5309 ret = vp8_post_proc_frame(&cpi->common, dest, flags);
5313 if (cpi->common.frame_to_show) {
5314 *dest = *cpi->common.frame_to_show;
5315 dest->y_width = cpi->common.Width;
5316 dest->y_height = cpi->common.Height;
5317 dest->uv_height = cpi->common.Height / 2;
5324 vpx_clear_system_state();
5329 int vp8_set_roimap(VP8_COMP *cpi, unsigned char *map, unsigned int rows,
5330 unsigned int cols, int delta_q[4], int delta_lf[4],
5331 unsigned int threshold[4]) {
5332 signed char feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS];
5333 int internal_delta_q[MAX_MB_SEGMENTS];
5334 const int range = 63;
5337 // This method is currently incompatible with the cyclic refresh method
5338 if (cpi->cyclic_refresh_mode_enabled) return -1;
5340 // Check number of rows and columns match
5341 if (cpi->common.mb_rows != (int)rows || cpi->common.mb_cols != (int)cols) {
5345 // Range check the delta Q values and convert the external Q range values
5346 // to internal ones.
5347 if ((abs(delta_q[0]) > range) || (abs(delta_q[1]) > range) ||
5348 (abs(delta_q[2]) > range) || (abs(delta_q[3]) > range)) {
5352 // Range check the delta lf values
5353 if ((abs(delta_lf[0]) > range) || (abs(delta_lf[1]) > range) ||
5354 (abs(delta_lf[2]) > range) || (abs(delta_lf[3]) > range)) {
5359 disable_segmentation(cpi);
5363 // Translate the external delta q values to internal values.
5364 for (i = 0; i < MAX_MB_SEGMENTS; ++i) {
5365 internal_delta_q[i] =
5366 (delta_q[i] >= 0) ? q_trans[delta_q[i]] : -q_trans[-delta_q[i]];
5369 /* Set the segmentation Map */
5370 set_segmentation_map(cpi, map);
5372 /* Activate segmentation. */
5373 enable_segmentation(cpi);
5375 /* Set up the quant segment data */
5376 feature_data[MB_LVL_ALT_Q][0] = internal_delta_q[0];
5377 feature_data[MB_LVL_ALT_Q][1] = internal_delta_q[1];
5378 feature_data[MB_LVL_ALT_Q][2] = internal_delta_q[2];
5379 feature_data[MB_LVL_ALT_Q][3] = internal_delta_q[3];
5381 /* Set up the loop segment data s */
5382 feature_data[MB_LVL_ALT_LF][0] = delta_lf[0];
5383 feature_data[MB_LVL_ALT_LF][1] = delta_lf[1];
5384 feature_data[MB_LVL_ALT_LF][2] = delta_lf[2];
5385 feature_data[MB_LVL_ALT_LF][3] = delta_lf[3];
5387 cpi->segment_encode_breakout[0] = threshold[0];
5388 cpi->segment_encode_breakout[1] = threshold[1];
5389 cpi->segment_encode_breakout[2] = threshold[2];
5390 cpi->segment_encode_breakout[3] = threshold[3];
5392 /* Initialise the feature data structure */
5393 set_segment_data(cpi, &feature_data[0][0], SEGMENT_DELTADATA);
5398 int vp8_set_active_map(VP8_COMP *cpi, unsigned char *map, unsigned int rows,
5399 unsigned int cols) {
5400 if ((int)rows == cpi->common.mb_rows && (int)cols == cpi->common.mb_cols) {
5402 memcpy(cpi->active_map, map, rows * cols);
5403 cpi->active_map_enabled = 1;
5405 cpi->active_map_enabled = 0;
5414 int vp8_set_internal_size(VP8_COMP *cpi, VPX_SCALING horiz_mode,
5415 VPX_SCALING vert_mode) {
5416 if (horiz_mode <= ONETWO) {
5417 cpi->common.horiz_scale = horiz_mode;
5422 if (vert_mode <= ONETWO) {
5423 cpi->common.vert_scale = vert_mode;
5431 int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest) {
5435 unsigned char *src = source->y_buffer;
5436 unsigned char *dst = dest->y_buffer;
5438 /* Loop through the Y plane raw and reconstruction data summing
5439 * (square differences)
5441 for (i = 0; i < source->y_height; i += 16) {
5442 for (j = 0; j < source->y_width; j += 16) {
5444 Total += vpx_mse16x16(src + j, source->y_stride, dst + j, dest->y_stride,
5448 src += 16 * source->y_stride;
5449 dst += 16 * dest->y_stride;
5455 int vp8_get_quantizer(VP8_COMP *cpi) { return cpi->common.base_qindex; }