2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
11 #include "vpx_config.h"
12 #include "./vpx_scale_rtcd.h"
13 #include "./vpx_dsp_rtcd.h"
14 #include "./vp8_rtcd.h"
15 #include "vp8/common/onyxc_int.h"
16 #include "vp8/common/blockd.h"
18 #include "vp8/common/systemdependent.h"
19 #include "vp8/encoder/quantize.h"
20 #include "vp8/common/alloccommon.h"
22 #include "firstpass.h"
23 #include "vpx_dsp/psnr.h"
24 #include "vpx_scale/vpx_scale.h"
25 #include "vp8/common/extend.h"
27 #include "vp8/common/quant_common.h"
28 #include "segmentation.h"
30 #include "vp8/common/postproc.h"
32 #include "vpx_mem/vpx_mem.h"
33 #include "vp8/common/reconintra.h"
34 #include "vp8/common/swapyv12buffer.h"
35 #include "vp8/common/threading.h"
36 #include "vpx_ports/vpx_timer.h"
38 #include "vpx_ports/arm.h"
40 #if CONFIG_MULTI_RES_ENCODING
41 #include "mr_dissim.h"
43 #include "encodeframe.h"
49 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
50 extern int vp8_update_coef_context(VP8_COMP *cpi);
51 extern void vp8_update_coef_probs(VP8_COMP *cpi);
54 extern void vp8cx_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi);
55 extern void vp8cx_set_alt_lf_level(VP8_COMP *cpi, int filt_val);
56 extern void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi);
58 extern void vp8_deblock_frame(YV12_BUFFER_CONFIG *source,
59 YV12_BUFFER_CONFIG *post, int filt_lvl,
60 int low_var_thresh, int flag);
61 extern void print_parms(VP8_CONFIG *ocf, char *filenam);
62 extern unsigned int vp8_get_processor_freq();
63 extern void print_tree_update_probs();
64 extern int vp8cx_create_encoder_threads(VP8_COMP *cpi);
65 extern void vp8cx_remove_encoder_threads(VP8_COMP *cpi);
67 int vp8_estimate_entropy_savings(VP8_COMP *cpi);
69 int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest);
71 extern void vp8_temporal_filter_prepare_c(VP8_COMP *cpi, int distance);
73 static void set_default_lf_deltas(VP8_COMP *cpi);
75 extern const int vp8_gf_interval_table[101];
77 #if CONFIG_INTERNAL_STATS
79 #include "vpx_dsp/ssim.h"
85 #ifdef OUTPUT_YUV_DENOISED
86 FILE *yuv_denoised_file;
96 extern int skip_true_count;
97 extern int skip_false_count;
100 #ifdef VP8_ENTROPY_STATS
101 extern int intra_mode_stats[10][10][10];
105 unsigned int frames_at_speed[16] = { 0, 0, 0, 0, 0, 0, 0, 0,
106 0, 0, 0, 0, 0, 0, 0, 0 };
107 unsigned int tot_pm = 0;
108 unsigned int cnt_pm = 0;
109 unsigned int tot_ef = 0;
110 unsigned int cnt_ef = 0;
114 extern unsigned __int64 Sectionbits[50];
115 extern int y_modes[5];
116 extern int uv_modes[4];
117 extern int b_modes[10];
119 extern int inter_y_modes[10];
120 extern int inter_uv_modes[4];
121 extern unsigned int inter_b_modes[15];
124 extern const int vp8_bits_per_mb[2][QINDEX_RANGE];
126 extern const int qrounding_factors[129];
127 extern const int qzbin_factors[129];
128 extern void vp8cx_init_quantizer(VP8_COMP *cpi);
129 extern const int vp8cx_base_skip_false_prob[128];
131 /* Tables relating active max Q to active min Q */
132 static const unsigned char kf_low_motion_minq[QINDEX_RANGE] = {
133 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
134 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
135 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
136 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 4, 4, 4, 5, 5, 5,
137 5, 5, 6, 6, 6, 6, 7, 7, 8, 8, 8, 8, 9, 9, 10, 10, 10, 10, 11,
138 11, 11, 11, 12, 12, 13, 13, 13, 13, 14, 14, 15, 15, 15, 15, 16, 16, 16, 16,
139 17, 17, 18, 18, 18, 18, 19, 20, 20, 21, 21, 22, 23, 23
141 static const unsigned char kf_high_motion_minq[QINDEX_RANGE] = {
142 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
143 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1,
144 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 5,
145 5, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 8, 8, 8, 8, 9, 9, 10, 10,
146 10, 10, 11, 11, 11, 11, 12, 12, 13, 13, 13, 13, 14, 14, 15, 15, 15, 15, 16,
147 16, 16, 16, 17, 17, 18, 18, 18, 18, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21,
148 22, 22, 23, 23, 24, 25, 25, 26, 26, 27, 28, 28, 29, 30
150 static const unsigned char gf_low_motion_minq[QINDEX_RANGE] = {
151 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3,
152 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8,
153 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15,
154 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24,
155 25, 25, 26, 26, 27, 27, 28, 28, 29, 29, 30, 30, 31, 31, 32, 32, 33, 33, 34,
156 34, 35, 35, 36, 36, 37, 37, 38, 38, 39, 39, 40, 40, 41, 41, 42, 42, 43, 44,
157 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58
159 static const unsigned char gf_mid_motion_minq[QINDEX_RANGE] = {
160 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 5,
161 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10, 10, 11,
162 11, 11, 12, 12, 12, 12, 13, 13, 13, 14, 14, 14, 15, 15, 16, 16, 17, 17, 18,
163 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24, 25, 25, 26, 26, 27, 27,
164 28, 28, 29, 29, 30, 30, 31, 31, 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 37,
165 37, 38, 39, 39, 40, 40, 41, 41, 42, 42, 43, 43, 44, 45, 46, 47, 48, 49, 50,
166 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
168 static const unsigned char gf_high_motion_minq[QINDEX_RANGE] = {
169 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5,
170 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10, 11, 11,
171 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21,
172 21, 22, 22, 23, 23, 24, 24, 25, 25, 26, 26, 27, 27, 28, 28, 29, 29, 30, 30,
173 31, 31, 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 37, 37, 38, 38, 39, 39, 40,
174 40, 41, 41, 42, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
175 57, 58, 59, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80
177 static const unsigned char inter_minq[QINDEX_RANGE] = {
178 0, 0, 1, 1, 2, 3, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 9, 10, 11,
179 11, 12, 13, 13, 14, 15, 15, 16, 17, 17, 18, 19, 20, 20, 21, 22, 22, 23, 24,
180 24, 25, 26, 27, 27, 28, 29, 30, 30, 31, 32, 33, 33, 34, 35, 36, 36, 37, 38,
181 39, 39, 40, 41, 42, 42, 43, 44, 45, 46, 46, 47, 48, 49, 50, 50, 51, 52, 53,
182 54, 55, 55, 56, 57, 58, 59, 60, 60, 61, 62, 63, 64, 65, 66, 67, 67, 68, 69,
183 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 86,
184 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100
187 #ifdef PACKET_TESTING
188 extern FILE *vpxlogc;
191 static void save_layer_context(VP8_COMP *cpi) {
192 LAYER_CONTEXT *lc = &cpi->layer_context[cpi->current_layer];
194 /* Save layer dependent coding state */
195 lc->target_bandwidth = cpi->target_bandwidth;
196 lc->starting_buffer_level = cpi->oxcf.starting_buffer_level;
197 lc->optimal_buffer_level = cpi->oxcf.optimal_buffer_level;
198 lc->maximum_buffer_size = cpi->oxcf.maximum_buffer_size;
199 lc->starting_buffer_level_in_ms = cpi->oxcf.starting_buffer_level_in_ms;
200 lc->optimal_buffer_level_in_ms = cpi->oxcf.optimal_buffer_level_in_ms;
201 lc->maximum_buffer_size_in_ms = cpi->oxcf.maximum_buffer_size_in_ms;
202 lc->buffer_level = cpi->buffer_level;
203 lc->bits_off_target = cpi->bits_off_target;
204 lc->total_actual_bits = cpi->total_actual_bits;
205 lc->worst_quality = cpi->worst_quality;
206 lc->active_worst_quality = cpi->active_worst_quality;
207 lc->best_quality = cpi->best_quality;
208 lc->active_best_quality = cpi->active_best_quality;
209 lc->ni_av_qi = cpi->ni_av_qi;
210 lc->ni_tot_qi = cpi->ni_tot_qi;
211 lc->ni_frames = cpi->ni_frames;
212 lc->avg_frame_qindex = cpi->avg_frame_qindex;
213 lc->rate_correction_factor = cpi->rate_correction_factor;
214 lc->key_frame_rate_correction_factor = cpi->key_frame_rate_correction_factor;
215 lc->gf_rate_correction_factor = cpi->gf_rate_correction_factor;
216 lc->zbin_over_quant = cpi->mb.zbin_over_quant;
217 lc->inter_frame_target = cpi->inter_frame_target;
218 lc->total_byte_count = cpi->total_byte_count;
219 lc->filter_level = cpi->common.filter_level;
221 lc->last_frame_percent_intra = cpi->last_frame_percent_intra;
223 memcpy(lc->count_mb_ref_frame_usage, cpi->mb.count_mb_ref_frame_usage,
224 sizeof(cpi->mb.count_mb_ref_frame_usage));
227 static void restore_layer_context(VP8_COMP *cpi, const int layer) {
228 LAYER_CONTEXT *lc = &cpi->layer_context[layer];
230 /* Restore layer dependent coding state */
231 cpi->current_layer = layer;
232 cpi->target_bandwidth = lc->target_bandwidth;
233 cpi->oxcf.target_bandwidth = lc->target_bandwidth;
234 cpi->oxcf.starting_buffer_level = lc->starting_buffer_level;
235 cpi->oxcf.optimal_buffer_level = lc->optimal_buffer_level;
236 cpi->oxcf.maximum_buffer_size = lc->maximum_buffer_size;
237 cpi->oxcf.starting_buffer_level_in_ms = lc->starting_buffer_level_in_ms;
238 cpi->oxcf.optimal_buffer_level_in_ms = lc->optimal_buffer_level_in_ms;
239 cpi->oxcf.maximum_buffer_size_in_ms = lc->maximum_buffer_size_in_ms;
240 cpi->buffer_level = lc->buffer_level;
241 cpi->bits_off_target = lc->bits_off_target;
242 cpi->total_actual_bits = lc->total_actual_bits;
243 cpi->active_worst_quality = lc->active_worst_quality;
244 cpi->active_best_quality = lc->active_best_quality;
245 cpi->ni_av_qi = lc->ni_av_qi;
246 cpi->ni_tot_qi = lc->ni_tot_qi;
247 cpi->ni_frames = lc->ni_frames;
248 cpi->avg_frame_qindex = lc->avg_frame_qindex;
249 cpi->rate_correction_factor = lc->rate_correction_factor;
250 cpi->key_frame_rate_correction_factor = lc->key_frame_rate_correction_factor;
251 cpi->gf_rate_correction_factor = lc->gf_rate_correction_factor;
252 cpi->mb.zbin_over_quant = lc->zbin_over_quant;
253 cpi->inter_frame_target = lc->inter_frame_target;
254 cpi->total_byte_count = lc->total_byte_count;
255 cpi->common.filter_level = lc->filter_level;
257 cpi->last_frame_percent_intra = lc->last_frame_percent_intra;
259 memcpy(cpi->mb.count_mb_ref_frame_usage, lc->count_mb_ref_frame_usage,
260 sizeof(cpi->mb.count_mb_ref_frame_usage));
263 static int rescale(int val, int num, int denom) {
265 int64_t llden = denom;
268 return (int)(llval * llnum / llden);
271 static void init_temporal_layer_context(VP8_COMP *cpi, VP8_CONFIG *oxcf,
273 double prev_layer_framerate) {
274 LAYER_CONTEXT *lc = &cpi->layer_context[layer];
276 lc->framerate = cpi->output_framerate / cpi->oxcf.rate_decimator[layer];
277 lc->target_bandwidth = cpi->oxcf.target_bitrate[layer] * 1000;
279 lc->starting_buffer_level_in_ms = oxcf->starting_buffer_level;
280 lc->optimal_buffer_level_in_ms = oxcf->optimal_buffer_level;
281 lc->maximum_buffer_size_in_ms = oxcf->maximum_buffer_size;
283 lc->starting_buffer_level =
284 rescale((int)(oxcf->starting_buffer_level), lc->target_bandwidth, 1000);
286 if (oxcf->optimal_buffer_level == 0) {
287 lc->optimal_buffer_level = lc->target_bandwidth / 8;
289 lc->optimal_buffer_level =
290 rescale((int)(oxcf->optimal_buffer_level), lc->target_bandwidth, 1000);
293 if (oxcf->maximum_buffer_size == 0) {
294 lc->maximum_buffer_size = lc->target_bandwidth / 8;
296 lc->maximum_buffer_size =
297 rescale((int)(oxcf->maximum_buffer_size), lc->target_bandwidth, 1000);
300 /* Work out the average size of a frame within this layer */
302 lc->avg_frame_size_for_layer =
303 (int)((cpi->oxcf.target_bitrate[layer] -
304 cpi->oxcf.target_bitrate[layer - 1]) *
305 1000 / (lc->framerate - prev_layer_framerate));
308 lc->active_worst_quality = cpi->oxcf.worst_allowed_q;
309 lc->active_best_quality = cpi->oxcf.best_allowed_q;
310 lc->avg_frame_qindex = cpi->oxcf.worst_allowed_q;
312 lc->buffer_level = lc->starting_buffer_level;
313 lc->bits_off_target = lc->starting_buffer_level;
315 lc->total_actual_bits = 0;
319 lc->rate_correction_factor = 1.0;
320 lc->key_frame_rate_correction_factor = 1.0;
321 lc->gf_rate_correction_factor = 1.0;
322 lc->inter_frame_target = 0;
325 // Upon a run-time change in temporal layers, reset the layer context parameters
326 // for any "new" layers. For "existing" layers, let them inherit the parameters
327 // from the previous layer state (at the same layer #). In future we may want
328 // to better map the previous layer state(s) to the "new" ones.
329 static void reset_temporal_layer_change(VP8_COMP *cpi, VP8_CONFIG *oxcf,
330 const int prev_num_layers) {
332 double prev_layer_framerate = 0;
333 const int curr_num_layers = cpi->oxcf.number_of_layers;
334 // If the previous state was 1 layer, get current layer context from cpi.
335 // We need this to set the layer context for the new layers below.
336 if (prev_num_layers == 1) {
337 cpi->current_layer = 0;
338 save_layer_context(cpi);
340 for (i = 0; i < curr_num_layers; ++i) {
341 LAYER_CONTEXT *lc = &cpi->layer_context[i];
342 if (i >= prev_num_layers) {
343 init_temporal_layer_context(cpi, oxcf, i, prev_layer_framerate);
345 // The initial buffer levels are set based on their starting levels.
346 // We could set the buffer levels based on the previous state (normalized
347 // properly by the layer bandwidths) but we would need to keep track of
348 // the previous set of layer bandwidths (i.e., target_bitrate[i])
349 // before the layer change. For now, reset to the starting levels.
351 cpi->oxcf.starting_buffer_level_in_ms * cpi->oxcf.target_bitrate[i];
352 lc->bits_off_target = lc->buffer_level;
353 // TDOD(marpan): Should we set the rate_correction_factor and
354 // active_worst/best_quality to values derived from the previous layer
355 // state (to smooth-out quality dips/rate fluctuation at transition)?
357 // We need to treat the 1 layer case separately: oxcf.target_bitrate[i]
358 // is not set for 1 layer, and the restore_layer_context/save_context()
359 // are not called in the encoding loop, so we need to call it here to
360 // pass the layer context state to |cpi|.
361 if (curr_num_layers == 1) {
362 lc->target_bandwidth = cpi->oxcf.target_bandwidth;
364 cpi->oxcf.starting_buffer_level_in_ms * lc->target_bandwidth / 1000;
365 lc->bits_off_target = lc->buffer_level;
366 restore_layer_context(cpi, 0);
368 prev_layer_framerate = cpi->output_framerate / cpi->oxcf.rate_decimator[i];
372 static void setup_features(VP8_COMP *cpi) {
373 // If segmentation enabled set the update flags
374 if (cpi->mb.e_mbd.segmentation_enabled) {
375 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
376 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
378 cpi->mb.e_mbd.update_mb_segmentation_map = 0;
379 cpi->mb.e_mbd.update_mb_segmentation_data = 0;
382 cpi->mb.e_mbd.mode_ref_lf_delta_enabled = 0;
383 cpi->mb.e_mbd.mode_ref_lf_delta_update = 0;
384 memset(cpi->mb.e_mbd.ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas));
385 memset(cpi->mb.e_mbd.mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas));
386 memset(cpi->mb.e_mbd.last_ref_lf_deltas, 0,
387 sizeof(cpi->mb.e_mbd.ref_lf_deltas));
388 memset(cpi->mb.e_mbd.last_mode_lf_deltas, 0,
389 sizeof(cpi->mb.e_mbd.mode_lf_deltas));
391 set_default_lf_deltas(cpi);
394 static void dealloc_raw_frame_buffers(VP8_COMP *cpi);
396 void vp8_initialize_enc(void) {
397 static volatile int init_done = 0;
401 vp8_init_intra_predictors();
406 static void dealloc_compressor_data(VP8_COMP *cpi) {
407 vpx_free(cpi->tplist);
410 /* Delete last frame MV storage buffers */
414 vpx_free(cpi->lf_ref_frame_sign_bias);
415 cpi->lf_ref_frame_sign_bias = 0;
417 vpx_free(cpi->lf_ref_frame);
418 cpi->lf_ref_frame = 0;
420 /* Delete sementation map */
421 vpx_free(cpi->segmentation_map);
422 cpi->segmentation_map = 0;
424 vpx_free(cpi->active_map);
427 vp8_de_alloc_frame_buffers(&cpi->common);
429 vp8_yv12_de_alloc_frame_buffer(&cpi->pick_lf_lvl_frame);
430 vp8_yv12_de_alloc_frame_buffer(&cpi->scaled_source);
431 dealloc_raw_frame_buffers(cpi);
436 /* Structure used to monitor GF usage */
437 vpx_free(cpi->gf_active_flags);
438 cpi->gf_active_flags = 0;
440 /* Activity mask based per mb zbin adjustments */
441 vpx_free(cpi->mb_activity_map);
442 cpi->mb_activity_map = 0;
444 vpx_free(cpi->mb.pip);
447 #if CONFIG_MULTITHREAD
448 vpx_free(cpi->mt_current_mb_col);
449 cpi->mt_current_mb_col = NULL;
453 static void enable_segmentation(VP8_COMP *cpi) {
454 /* Set the appropriate feature bit */
455 cpi->mb.e_mbd.segmentation_enabled = 1;
456 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
457 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
459 static void disable_segmentation(VP8_COMP *cpi) {
460 /* Clear the appropriate feature bit */
461 cpi->mb.e_mbd.segmentation_enabled = 0;
464 /* Valid values for a segment are 0 to 3
465 * Segmentation map is arrange as [Rows][Columns]
467 static void set_segmentation_map(VP8_COMP *cpi,
468 unsigned char *segmentation_map) {
469 /* Copy in the new segmentation map */
470 memcpy(cpi->segmentation_map, segmentation_map,
471 (cpi->common.mb_rows * cpi->common.mb_cols));
473 /* Signal that the map should be updated. */
474 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
475 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
478 /* The values given for each segment can be either deltas (from the default
479 * value chosen for the frame) or absolute values.
481 * Valid range for abs values is:
482 * (0-127 for MB_LVL_ALT_Q), (0-63 for SEGMENT_ALT_LF)
483 * Valid range for delta values are:
484 * (+/-127 for MB_LVL_ALT_Q), (+/-63 for SEGMENT_ALT_LF)
486 * abs_delta = SEGMENT_DELTADATA (deltas)
487 * abs_delta = SEGMENT_ABSDATA (use the absolute values given).
490 static void set_segment_data(VP8_COMP *cpi, signed char *feature_data,
491 unsigned char abs_delta) {
492 cpi->mb.e_mbd.mb_segement_abs_delta = abs_delta;
493 memcpy(cpi->segment_feature_data, feature_data,
494 sizeof(cpi->segment_feature_data));
497 /* A simple function to cyclically refresh the background at a lower Q */
498 static void cyclic_background_refresh(VP8_COMP *cpi, int Q, int lf_adjustment) {
499 unsigned char *seg_map = cpi->segmentation_map;
500 signed char feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS];
502 int block_count = cpi->cyclic_refresh_mode_max_mbs_perframe;
503 int mbs_in_frame = cpi->common.mb_rows * cpi->common.mb_cols;
505 cpi->cyclic_refresh_q = Q / 2;
507 if (cpi->oxcf.screen_content_mode) {
508 // Modify quality ramp-up based on Q. Above some Q level, increase the
509 // number of blocks to be refreshed, and reduce it below the thredhold.
510 // Turn-off under certain conditions (i.e., away from key frame, and if
511 // we are at good quality (low Q) and most of the blocks were
513 // in previous frame.
514 int qp_thresh = (cpi->oxcf.screen_content_mode == 2) ? 80 : 100;
515 if (Q >= qp_thresh) {
516 cpi->cyclic_refresh_mode_max_mbs_perframe =
517 (cpi->common.mb_rows * cpi->common.mb_cols) / 10;
518 } else if (cpi->frames_since_key > 250 && Q < 20 &&
519 cpi->mb.skip_true_count > (int)(0.95 * mbs_in_frame)) {
520 cpi->cyclic_refresh_mode_max_mbs_perframe = 0;
522 cpi->cyclic_refresh_mode_max_mbs_perframe =
523 (cpi->common.mb_rows * cpi->common.mb_cols) / 20;
525 block_count = cpi->cyclic_refresh_mode_max_mbs_perframe;
528 // Set every macroblock to be eligible for update.
529 // For key frame this will reset seg map to 0.
530 memset(cpi->segmentation_map, 0, mbs_in_frame);
532 if (cpi->common.frame_type != KEY_FRAME && block_count > 0) {
533 /* Cycle through the macro_block rows */
534 /* MB loop to set local segmentation map */
535 i = cpi->cyclic_refresh_mode_index;
536 assert(i < mbs_in_frame);
538 /* If the MB is as a candidate for clean up then mark it for
539 * possible boost/refresh (segment 1) The segment id may get
540 * reset to 0 later if the MB gets coded anything other than
541 * last frame 0,0 as only (last frame 0,0) MBs are eligable for
542 * refresh : that is to say Mbs likely to be background blocks.
544 if (cpi->cyclic_refresh_map[i] == 0) {
547 } else if (cpi->cyclic_refresh_map[i] < 0) {
548 cpi->cyclic_refresh_map[i]++;
552 if (i == mbs_in_frame) i = 0;
554 } while (block_count && i != cpi->cyclic_refresh_mode_index);
556 cpi->cyclic_refresh_mode_index = i;
558 #if CONFIG_TEMPORAL_DENOISING
559 if (cpi->oxcf.noise_sensitivity > 0) {
560 if (cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive &&
561 Q < (int)cpi->denoiser.denoise_pars.qp_thresh &&
562 (cpi->frames_since_key >
563 2 * cpi->denoiser.denoise_pars.consec_zerolast)) {
564 // Under aggressive denoising, use segmentation to turn off loop
565 // filter below some qp thresh. The filter is reduced for all
566 // blocks that have been encoded as ZEROMV LAST x frames in a row,
567 // where x is set by cpi->denoiser.denoise_pars.consec_zerolast.
568 // This is to avoid "dot" artifacts that can occur from repeated
569 // loop filtering on noisy input source.
570 cpi->cyclic_refresh_q = Q;
571 // lf_adjustment = -MAX_LOOP_FILTER;
573 for (i = 0; i < mbs_in_frame; ++i) {
574 seg_map[i] = (cpi->consec_zero_last[i] >
575 cpi->denoiser.denoise_pars.consec_zerolast)
584 /* Activate segmentation. */
585 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
586 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
587 enable_segmentation(cpi);
589 /* Set up the quant segment data */
590 feature_data[MB_LVL_ALT_Q][0] = 0;
591 feature_data[MB_LVL_ALT_Q][1] = (cpi->cyclic_refresh_q - Q);
592 feature_data[MB_LVL_ALT_Q][2] = 0;
593 feature_data[MB_LVL_ALT_Q][3] = 0;
595 /* Set up the loop segment data */
596 feature_data[MB_LVL_ALT_LF][0] = 0;
597 feature_data[MB_LVL_ALT_LF][1] = lf_adjustment;
598 feature_data[MB_LVL_ALT_LF][2] = 0;
599 feature_data[MB_LVL_ALT_LF][3] = 0;
601 /* Initialise the feature data structure */
602 set_segment_data(cpi, &feature_data[0][0], SEGMENT_DELTADATA);
605 static void set_default_lf_deltas(VP8_COMP *cpi) {
606 cpi->mb.e_mbd.mode_ref_lf_delta_enabled = 1;
607 cpi->mb.e_mbd.mode_ref_lf_delta_update = 1;
609 memset(cpi->mb.e_mbd.ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas));
610 memset(cpi->mb.e_mbd.mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas));
612 /* Test of ref frame deltas */
613 cpi->mb.e_mbd.ref_lf_deltas[INTRA_FRAME] = 2;
614 cpi->mb.e_mbd.ref_lf_deltas[LAST_FRAME] = 0;
615 cpi->mb.e_mbd.ref_lf_deltas[GOLDEN_FRAME] = -2;
616 cpi->mb.e_mbd.ref_lf_deltas[ALTREF_FRAME] = -2;
618 cpi->mb.e_mbd.mode_lf_deltas[0] = 4; /* BPRED */
620 if (cpi->oxcf.Mode == MODE_REALTIME) {
621 cpi->mb.e_mbd.mode_lf_deltas[1] = -12; /* Zero */
623 cpi->mb.e_mbd.mode_lf_deltas[1] = -2; /* Zero */
626 cpi->mb.e_mbd.mode_lf_deltas[2] = 2; /* New mv */
627 cpi->mb.e_mbd.mode_lf_deltas[3] = 4; /* Split mv */
630 /* Convenience macros for mapping speed and mode into a continuous
633 #define GOOD(x) (x + 1)
634 #define RT(x) (x + 7)
636 static int speed_map(int speed, const int *map) {
641 } while (speed >= *map++);
645 static const int thresh_mult_map_znn[] = {
646 /* map common to zero, nearest, and near */
647 0, GOOD(2), 1500, GOOD(3), 2000, RT(0), 1000, RT(2), 2000, INT_MAX
650 static const int thresh_mult_map_vhpred[] = { 1000, GOOD(2), 1500, GOOD(3),
651 2000, RT(0), 1000, RT(1),
652 2000, RT(7), INT_MAX, INT_MAX };
654 static const int thresh_mult_map_bpred[] = { 2000, GOOD(0), 2500, GOOD(2),
655 5000, GOOD(3), 7500, RT(0),
656 2500, RT(1), 5000, RT(6),
659 static const int thresh_mult_map_tm[] = { 1000, GOOD(2), 1500, GOOD(3),
660 2000, RT(0), 0, RT(1),
661 1000, RT(2), 2000, RT(7),
664 static const int thresh_mult_map_new1[] = { 1000, GOOD(2), 2000,
665 RT(0), 2000, INT_MAX };
667 static const int thresh_mult_map_new2[] = { 1000, GOOD(2), 2000, GOOD(3),
668 2500, GOOD(5), 4000, RT(0),
669 2000, RT(2), 2500, RT(5),
672 static const int thresh_mult_map_split1[] = {
673 2500, GOOD(0), 1700, GOOD(2), 10000, GOOD(3), 25000, GOOD(4), INT_MAX,
674 RT(0), 5000, RT(1), 10000, RT(2), 25000, RT(3), INT_MAX, INT_MAX
677 static const int thresh_mult_map_split2[] = {
678 5000, GOOD(0), 4500, GOOD(2), 20000, GOOD(3), 50000, GOOD(4), INT_MAX,
679 RT(0), 10000, RT(1), 20000, RT(2), 50000, RT(3), INT_MAX, INT_MAX
682 static const int mode_check_freq_map_zn2[] = {
683 /* {zero,nearest}{2,3} */
684 0, RT(10), 1 << 1, RT(11), 1 << 2, RT(12), 1 << 3, INT_MAX
687 static const int mode_check_freq_map_vhbpred[] = {
688 0, GOOD(5), 2, RT(0), 0, RT(3), 2, RT(5), 4, INT_MAX
691 static const int mode_check_freq_map_near2[] = {
692 0, GOOD(5), 2, RT(0), 0, RT(3), 2,
693 RT(10), 1 << 2, RT(11), 1 << 3, RT(12), 1 << 4, INT_MAX
696 static const int mode_check_freq_map_new1[] = {
697 0, RT(10), 1 << 1, RT(11), 1 << 2, RT(12), 1 << 3, INT_MAX
700 static const int mode_check_freq_map_new2[] = { 0, GOOD(5), 4, RT(0),
702 1 << 3, RT(11), 1 << 4, RT(12),
705 static const int mode_check_freq_map_split1[] = {
706 0, GOOD(2), 2, GOOD(3), 7, RT(1), 2, RT(2), 7, INT_MAX
709 static const int mode_check_freq_map_split2[] = {
710 0, GOOD(1), 2, GOOD(2), 4, GOOD(3), 15, RT(1), 4, RT(2), 15, INT_MAX
713 void vp8_set_speed_features(VP8_COMP *cpi) {
714 SPEED_FEATURES *sf = &cpi->sf;
715 int Mode = cpi->compressor_speed;
716 int Speed = cpi->Speed;
718 VP8_COMMON *cm = &cpi->common;
719 int last_improved_quant = sf->improved_quant;
722 /* Initialise default mode frequency sampling variables */
723 for (i = 0; i < MAX_MODES; ++i) {
724 cpi->mode_check_freq[i] = 0;
727 cpi->mb.mbs_tested_so_far = 0;
728 cpi->mb.mbs_zero_last_dot_suppress = 0;
730 /* best quality defaults */
732 sf->search_method = NSTEP;
733 sf->improved_quant = 1;
734 sf->improved_dct = 1;
737 sf->quarter_pixel_search = 1;
738 sf->half_pixel_search = 1;
739 sf->iterative_sub_pixel = 1;
740 sf->optimize_coefficients = 1;
741 sf->use_fastquant_for_pick = 0;
742 sf->no_skip_block4x4_search = 1;
745 sf->max_step_search_steps = MAX_MVSEARCH_STEPS;
746 sf->improved_mv_pred = 1;
748 /* default thresholds to 0 */
749 for (i = 0; i < MAX_MODES; ++i) sf->thresh_mult[i] = 0;
751 /* Count enabled references */
753 if (cpi->ref_frame_flags & VP8_LAST_FRAME) ref_frames++;
754 if (cpi->ref_frame_flags & VP8_GOLD_FRAME) ref_frames++;
755 if (cpi->ref_frame_flags & VP8_ALTR_FRAME) ref_frames++;
757 /* Convert speed to continuous range, with clamping */
760 } else if (Mode == 2) {
763 if (Speed > 5) Speed = 5;
767 sf->thresh_mult[THR_ZERO1] = sf->thresh_mult[THR_NEAREST1] =
768 sf->thresh_mult[THR_NEAR1] = sf->thresh_mult[THR_DC] = 0; /* always */
770 sf->thresh_mult[THR_ZERO2] = sf->thresh_mult[THR_ZERO3] =
771 sf->thresh_mult[THR_NEAREST2] = sf->thresh_mult[THR_NEAREST3] =
772 sf->thresh_mult[THR_NEAR2] = sf->thresh_mult[THR_NEAR3] =
773 speed_map(Speed, thresh_mult_map_znn);
775 sf->thresh_mult[THR_V_PRED] = sf->thresh_mult[THR_H_PRED] =
776 speed_map(Speed, thresh_mult_map_vhpred);
777 sf->thresh_mult[THR_B_PRED] = speed_map(Speed, thresh_mult_map_bpred);
778 sf->thresh_mult[THR_TM] = speed_map(Speed, thresh_mult_map_tm);
779 sf->thresh_mult[THR_NEW1] = speed_map(Speed, thresh_mult_map_new1);
780 sf->thresh_mult[THR_NEW2] = sf->thresh_mult[THR_NEW3] =
781 speed_map(Speed, thresh_mult_map_new2);
782 sf->thresh_mult[THR_SPLIT1] = speed_map(Speed, thresh_mult_map_split1);
783 sf->thresh_mult[THR_SPLIT2] = sf->thresh_mult[THR_SPLIT3] =
784 speed_map(Speed, thresh_mult_map_split2);
786 // Special case for temporal layers.
787 // Reduce the thresholds for zero/nearest/near for GOLDEN, if GOLDEN is
788 // used as second reference. We don't modify thresholds for ALTREF case
789 // since ALTREF is usually used as long-term reference in temporal layers.
790 if ((cpi->Speed <= 6) && (cpi->oxcf.number_of_layers > 1) &&
791 (cpi->ref_frame_flags & VP8_LAST_FRAME) &&
792 (cpi->ref_frame_flags & VP8_GOLD_FRAME)) {
793 if (cpi->closest_reference_frame == GOLDEN_FRAME) {
794 sf->thresh_mult[THR_ZERO2] = sf->thresh_mult[THR_ZERO2] >> 3;
795 sf->thresh_mult[THR_NEAREST2] = sf->thresh_mult[THR_NEAREST2] >> 3;
796 sf->thresh_mult[THR_NEAR2] = sf->thresh_mult[THR_NEAR2] >> 3;
798 sf->thresh_mult[THR_ZERO2] = sf->thresh_mult[THR_ZERO2] >> 1;
799 sf->thresh_mult[THR_NEAREST2] = sf->thresh_mult[THR_NEAREST2] >> 1;
800 sf->thresh_mult[THR_NEAR2] = sf->thresh_mult[THR_NEAR2] >> 1;
804 cpi->mode_check_freq[THR_ZERO1] = cpi->mode_check_freq[THR_NEAREST1] =
805 cpi->mode_check_freq[THR_NEAR1] = cpi->mode_check_freq[THR_TM] =
806 cpi->mode_check_freq[THR_DC] = 0; /* always */
808 cpi->mode_check_freq[THR_ZERO2] = cpi->mode_check_freq[THR_ZERO3] =
809 cpi->mode_check_freq[THR_NEAREST2] = cpi->mode_check_freq[THR_NEAREST3] =
810 speed_map(Speed, mode_check_freq_map_zn2);
812 cpi->mode_check_freq[THR_NEAR2] = cpi->mode_check_freq[THR_NEAR3] =
813 speed_map(Speed, mode_check_freq_map_near2);
815 cpi->mode_check_freq[THR_V_PRED] = cpi->mode_check_freq[THR_H_PRED] =
816 cpi->mode_check_freq[THR_B_PRED] =
817 speed_map(Speed, mode_check_freq_map_vhbpred);
818 cpi->mode_check_freq[THR_NEW1] = speed_map(Speed, mode_check_freq_map_new1);
819 cpi->mode_check_freq[THR_NEW2] = cpi->mode_check_freq[THR_NEW3] =
820 speed_map(Speed, mode_check_freq_map_new2);
821 cpi->mode_check_freq[THR_SPLIT1] =
822 speed_map(Speed, mode_check_freq_map_split1);
823 cpi->mode_check_freq[THR_SPLIT2] = cpi->mode_check_freq[THR_SPLIT3] =
824 speed_map(Speed, mode_check_freq_map_split2);
827 #if !CONFIG_REALTIME_ONLY
828 case 0: /* best quality mode */
830 sf->max_step_search_steps = MAX_MVSEARCH_STEPS;
835 /* Disable coefficient optimization above speed 0 */
836 sf->optimize_coefficients = 0;
837 sf->use_fastquant_for_pick = 1;
838 sf->no_skip_block4x4_search = 0;
844 sf->improved_quant = 0;
845 sf->improved_dct = 0;
847 /* Only do recode loop on key frames, golden frames and
855 sf->recode_loop = 0; /* recode loop off */
856 sf->RD = 0; /* Turn rd off */
860 sf->auto_filter = 0; /* Faster selection of loop filter */
866 sf->optimize_coefficients = 0;
869 sf->iterative_sub_pixel = 1;
870 sf->search_method = NSTEP;
873 sf->improved_quant = 0;
874 sf->improved_dct = 0;
876 sf->use_fastquant_for_pick = 1;
877 sf->no_skip_block4x4_search = 0;
881 if (Speed > 2) sf->auto_filter = 0; /* Faster selection of loop filter */
889 sf->auto_filter = 0; /* Faster selection of loop filter */
890 sf->search_method = HEX;
891 sf->iterative_sub_pixel = 0;
895 unsigned int sum = 0;
896 unsigned int total_mbs = cm->MBs;
898 unsigned int total_skip;
902 if (cpi->oxcf.encode_breakout > 2000) min = cpi->oxcf.encode_breakout;
906 for (i = 0; i < min; ++i) {
907 sum += cpi->mb.error_bins[i];
913 /* i starts from 2 to make sure thresh started from 2048 */
914 for (; i < 1024; ++i) {
915 sum += cpi->mb.error_bins[i];
918 (unsigned int)(cpi->Speed - 6) * (total_mbs - total_skip)) {
926 if (thresh < 2000) thresh = 2000;
928 if (ref_frames > 1) {
929 sf->thresh_mult[THR_NEW1] = thresh;
930 sf->thresh_mult[THR_NEAREST1] = thresh >> 1;
931 sf->thresh_mult[THR_NEAR1] = thresh >> 1;
934 if (ref_frames > 2) {
935 sf->thresh_mult[THR_NEW2] = thresh << 1;
936 sf->thresh_mult[THR_NEAREST2] = thresh;
937 sf->thresh_mult[THR_NEAR2] = thresh;
940 if (ref_frames > 3) {
941 sf->thresh_mult[THR_NEW3] = thresh << 1;
942 sf->thresh_mult[THR_NEAREST3] = thresh;
943 sf->thresh_mult[THR_NEAR3] = thresh;
946 sf->improved_mv_pred = 0;
949 if (Speed > 8) sf->quarter_pixel_search = 0;
951 if (cm->version == 0) {
952 cm->filter_type = NORMAL_LOOPFILTER;
954 if (Speed >= 14) cm->filter_type = SIMPLE_LOOPFILTER;
956 cm->filter_type = SIMPLE_LOOPFILTER;
959 /* This has a big hit on quality. Last resort */
960 if (Speed >= 15) sf->half_pixel_search = 0;
962 memset(cpi->mb.error_bins, 0, sizeof(cpi->mb.error_bins));
966 /* Slow quant, dct and trellis not worthwhile for first pass
967 * so make sure they are always turned off.
969 if (cpi->pass == 1) {
970 sf->improved_quant = 0;
971 sf->optimize_coefficients = 0;
972 sf->improved_dct = 0;
975 if (cpi->sf.search_method == NSTEP) {
976 vp8_init3smotion_compensation(&cpi->mb,
977 cm->yv12_fb[cm->lst_fb_idx].y_stride);
978 } else if (cpi->sf.search_method == DIAMOND) {
979 vp8_init_dsmotion_compensation(&cpi->mb,
980 cm->yv12_fb[cm->lst_fb_idx].y_stride);
983 if (cpi->sf.improved_dct) {
984 cpi->mb.short_fdct8x4 = vp8_short_fdct8x4;
985 cpi->mb.short_fdct4x4 = vp8_short_fdct4x4;
987 /* No fast FDCT defined for any platform at this time. */
988 cpi->mb.short_fdct8x4 = vp8_short_fdct8x4;
989 cpi->mb.short_fdct4x4 = vp8_short_fdct4x4;
992 cpi->mb.short_walsh4x4 = vp8_short_walsh4x4;
994 if (cpi->sf.improved_quant) {
995 cpi->mb.quantize_b = vp8_regular_quantize_b;
997 cpi->mb.quantize_b = vp8_fast_quantize_b;
999 if (cpi->sf.improved_quant != last_improved_quant) vp8cx_init_quantizer(cpi);
1001 if (cpi->sf.iterative_sub_pixel == 1) {
1002 cpi->find_fractional_mv_step = vp8_find_best_sub_pixel_step_iteratively;
1003 } else if (cpi->sf.quarter_pixel_search) {
1004 cpi->find_fractional_mv_step = vp8_find_best_sub_pixel_step;
1005 } else if (cpi->sf.half_pixel_search) {
1006 cpi->find_fractional_mv_step = vp8_find_best_half_pixel_step;
1008 cpi->find_fractional_mv_step = vp8_skip_fractional_mv_step;
1011 if (cpi->sf.optimize_coefficients == 1 && cpi->pass != 1) {
1012 cpi->mb.optimize = 1;
1014 cpi->mb.optimize = 0;
1017 if (cpi->common.full_pixel) {
1018 cpi->find_fractional_mv_step = vp8_skip_fractional_mv_step;
1022 frames_at_speed[cpi->Speed]++;
1028 static void alloc_raw_frame_buffers(VP8_COMP *cpi) {
1029 #if VP8_TEMPORAL_ALT_REF
1030 int width = (cpi->oxcf.Width + 15) & ~15;
1031 int height = (cpi->oxcf.Height + 15) & ~15;
1034 cpi->lookahead = vp8_lookahead_init(cpi->oxcf.Width, cpi->oxcf.Height,
1035 cpi->oxcf.lag_in_frames);
1036 if (!cpi->lookahead) {
1037 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1038 "Failed to allocate lag buffers");
1041 #if VP8_TEMPORAL_ALT_REF
1043 if (vp8_yv12_alloc_frame_buffer(&cpi->alt_ref_buffer, width, height,
1044 VP8BORDERINPIXELS)) {
1045 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1046 "Failed to allocate altref buffer");
1052 static void dealloc_raw_frame_buffers(VP8_COMP *cpi) {
1053 #if VP8_TEMPORAL_ALT_REF
1054 vp8_yv12_de_alloc_frame_buffer(&cpi->alt_ref_buffer);
1056 vp8_lookahead_destroy(cpi->lookahead);
1059 static int vp8_alloc_partition_data(VP8_COMP *cpi) {
1060 vpx_free(cpi->mb.pip);
1063 vpx_calloc((cpi->common.mb_cols + 1) * (cpi->common.mb_rows + 1),
1064 sizeof(PARTITION_INFO));
1065 if (!cpi->mb.pip) return 1;
1067 cpi->mb.pi = cpi->mb.pip + cpi->common.mode_info_stride + 1;
1072 void vp8_alloc_compressor_data(VP8_COMP *cpi) {
1073 VP8_COMMON *cm = &cpi->common;
1075 int width = cm->Width;
1076 int height = cm->Height;
1078 if (vp8_alloc_frame_buffers(cm, width, height)) {
1079 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1080 "Failed to allocate frame buffers");
1083 if (vp8_alloc_partition_data(cpi)) {
1084 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1085 "Failed to allocate partition data");
1088 if ((width & 0xf) != 0) width += 16 - (width & 0xf);
1090 if ((height & 0xf) != 0) height += 16 - (height & 0xf);
1092 if (vp8_yv12_alloc_frame_buffer(&cpi->pick_lf_lvl_frame, width, height,
1093 VP8BORDERINPIXELS)) {
1094 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1095 "Failed to allocate last frame buffer");
1098 if (vp8_yv12_alloc_frame_buffer(&cpi->scaled_source, width, height,
1099 VP8BORDERINPIXELS)) {
1100 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1101 "Failed to allocate scaled source buffer");
1107 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
1108 unsigned int tokens = 8 * 24 * 16; /* one MB for each thread */
1110 unsigned int tokens = cm->mb_rows * cm->mb_cols * 24 * 16;
1112 CHECK_MEM_ERROR(cpi->tok, vpx_calloc(tokens, sizeof(*cpi->tok)));
1115 /* Data used for real time vc mode to see if gf needs refreshing */
1116 cpi->zeromv_count = 0;
1118 /* Structures used to monitor GF usage */
1119 vpx_free(cpi->gf_active_flags);
1121 cpi->gf_active_flags,
1122 vpx_calloc(sizeof(*cpi->gf_active_flags), cm->mb_rows * cm->mb_cols));
1123 cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
1125 vpx_free(cpi->mb_activity_map);
1127 cpi->mb_activity_map,
1128 vpx_calloc(sizeof(*cpi->mb_activity_map), cm->mb_rows * cm->mb_cols));
1130 /* allocate memory for storing last frame's MVs for MV prediction. */
1131 vpx_free(cpi->lfmv);
1132 CHECK_MEM_ERROR(cpi->lfmv, vpx_calloc((cm->mb_rows + 2) * (cm->mb_cols + 2),
1133 sizeof(*cpi->lfmv)));
1134 vpx_free(cpi->lf_ref_frame_sign_bias);
1135 CHECK_MEM_ERROR(cpi->lf_ref_frame_sign_bias,
1136 vpx_calloc((cm->mb_rows + 2) * (cm->mb_cols + 2),
1137 sizeof(*cpi->lf_ref_frame_sign_bias)));
1138 vpx_free(cpi->lf_ref_frame);
1139 CHECK_MEM_ERROR(cpi->lf_ref_frame,
1140 vpx_calloc((cm->mb_rows + 2) * (cm->mb_cols + 2),
1141 sizeof(*cpi->lf_ref_frame)));
1143 /* Create the encoder segmentation map and set all entries to 0 */
1144 vpx_free(cpi->segmentation_map);
1146 cpi->segmentation_map,
1147 vpx_calloc(cm->mb_rows * cm->mb_cols, sizeof(*cpi->segmentation_map)));
1148 cpi->cyclic_refresh_mode_index = 0;
1149 vpx_free(cpi->active_map);
1150 CHECK_MEM_ERROR(cpi->active_map, vpx_calloc(cm->mb_rows * cm->mb_cols,
1151 sizeof(*cpi->active_map)));
1152 memset(cpi->active_map, 1, (cm->mb_rows * cm->mb_cols));
1154 #if CONFIG_MULTITHREAD
1156 cpi->mt_sync_range = 1;
1157 } else if (width <= 1280) {
1158 cpi->mt_sync_range = 4;
1159 } else if (width <= 2560) {
1160 cpi->mt_sync_range = 8;
1162 cpi->mt_sync_range = 16;
1165 if (cpi->oxcf.multi_threaded > 1) {
1166 vpx_free(cpi->mt_current_mb_col);
1167 CHECK_MEM_ERROR(cpi->mt_current_mb_col,
1168 vpx_malloc(sizeof(*cpi->mt_current_mb_col) * cm->mb_rows));
1173 vpx_free(cpi->tplist);
1174 CHECK_MEM_ERROR(cpi->tplist, vpx_malloc(sizeof(TOKENLIST) * cm->mb_rows));
1176 #if CONFIG_TEMPORAL_DENOISING
1177 if (cpi->oxcf.noise_sensitivity > 0) {
1178 vp8_denoiser_free(&cpi->denoiser);
1179 if (vp8_denoiser_allocate(&cpi->denoiser, width, height, cm->mb_rows,
1180 cm->mb_cols, cpi->oxcf.noise_sensitivity)) {
1181 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1182 "Failed to allocate denoiser");
1189 static const int q_trans[] = {
1190 0, 1, 2, 3, 4, 5, 7, 8, 9, 10, 12, 13, 15, 17, 18, 19,
1191 20, 21, 23, 24, 25, 26, 27, 28, 29, 30, 31, 33, 35, 37, 39, 41,
1192 43, 45, 47, 49, 51, 53, 55, 57, 59, 61, 64, 67, 70, 73, 76, 79,
1193 82, 85, 88, 91, 94, 97, 100, 103, 106, 109, 112, 115, 118, 121, 124, 127,
1196 int vp8_reverse_trans(int x) {
1199 for (i = 0; i < 64; ++i) {
1200 if (q_trans[i] >= x) return i;
1205 void vp8_new_framerate(VP8_COMP *cpi, double framerate) {
1206 if (framerate < .1) framerate = 30;
1208 cpi->framerate = framerate;
1209 cpi->output_framerate = framerate;
1210 cpi->per_frame_bandwidth =
1211 (int)(cpi->oxcf.target_bandwidth / cpi->output_framerate);
1212 cpi->av_per_frame_bandwidth = cpi->per_frame_bandwidth;
1213 cpi->min_frame_bandwidth = (int)(cpi->av_per_frame_bandwidth *
1214 cpi->oxcf.two_pass_vbrmin_section / 100);
1216 /* Set Maximum gf/arf interval */
1217 cpi->max_gf_interval = ((int)(cpi->output_framerate / 2.0) + 2);
1219 if (cpi->max_gf_interval < 12) cpi->max_gf_interval = 12;
1221 /* Extended interval for genuinely static scenes */
1222 cpi->twopass.static_scene_max_gf_interval = cpi->key_frame_frequency >> 1;
1224 /* Special conditions when altr ref frame enabled in lagged compress mode */
1225 if (cpi->oxcf.play_alternate && cpi->oxcf.lag_in_frames) {
1226 if (cpi->max_gf_interval > cpi->oxcf.lag_in_frames - 1) {
1227 cpi->max_gf_interval = cpi->oxcf.lag_in_frames - 1;
1230 if (cpi->twopass.static_scene_max_gf_interval >
1231 cpi->oxcf.lag_in_frames - 1) {
1232 cpi->twopass.static_scene_max_gf_interval = cpi->oxcf.lag_in_frames - 1;
1236 if (cpi->max_gf_interval > cpi->twopass.static_scene_max_gf_interval) {
1237 cpi->max_gf_interval = cpi->twopass.static_scene_max_gf_interval;
1241 static void init_config(VP8_COMP *cpi, VP8_CONFIG *oxcf) {
1242 VP8_COMMON *cm = &cpi->common;
1247 cpi->auto_adjust_gold_quantizer = 1;
1249 cm->version = oxcf->Version;
1250 vp8_setup_version(cm);
1252 /* Frame rate is not available on the first frame, as it's derived from
1253 * the observed timestamps. The actual value used here doesn't matter
1254 * too much, as it will adapt quickly.
1256 if (oxcf->timebase.num > 0) {
1258 (double)(oxcf->timebase.den) / (double)(oxcf->timebase.num);
1260 cpi->framerate = 30;
1263 /* If the reciprocal of the timebase seems like a reasonable framerate,
1264 * then use that as a guess, otherwise use 30.
1266 if (cpi->framerate > 180) cpi->framerate = 30;
1268 cpi->ref_framerate = cpi->framerate;
1270 cpi->ref_frame_flags = VP8_ALTR_FRAME | VP8_GOLD_FRAME | VP8_LAST_FRAME;
1272 cm->refresh_golden_frame = 0;
1273 cm->refresh_last_frame = 1;
1274 cm->refresh_entropy_probs = 1;
1276 /* change includes all joint functionality */
1277 vp8_change_config(cpi, oxcf);
1279 /* Initialize active best and worst q and average q values. */
1280 cpi->active_worst_quality = cpi->oxcf.worst_allowed_q;
1281 cpi->active_best_quality = cpi->oxcf.best_allowed_q;
1282 cpi->avg_frame_qindex = cpi->oxcf.worst_allowed_q;
1284 /* Initialise the starting buffer levels */
1285 cpi->buffer_level = cpi->oxcf.starting_buffer_level;
1286 cpi->bits_off_target = cpi->oxcf.starting_buffer_level;
1288 cpi->rolling_target_bits = cpi->av_per_frame_bandwidth;
1289 cpi->rolling_actual_bits = cpi->av_per_frame_bandwidth;
1290 cpi->long_rolling_target_bits = cpi->av_per_frame_bandwidth;
1291 cpi->long_rolling_actual_bits = cpi->av_per_frame_bandwidth;
1293 cpi->total_actual_bits = 0;
1294 cpi->total_target_vs_actual = 0;
1296 /* Temporal scalabilty */
1297 if (cpi->oxcf.number_of_layers > 1) {
1299 double prev_layer_framerate = 0;
1301 for (i = 0; i < cpi->oxcf.number_of_layers; ++i) {
1302 init_temporal_layer_context(cpi, oxcf, i, prev_layer_framerate);
1303 prev_layer_framerate =
1304 cpi->output_framerate / cpi->oxcf.rate_decimator[i];
1308 #if VP8_TEMPORAL_ALT_REF
1312 cpi->fixed_divide[0] = 0;
1314 for (i = 1; i < 512; ++i) cpi->fixed_divide[i] = 0x80000 / i;
1319 static void update_layer_contexts(VP8_COMP *cpi) {
1320 VP8_CONFIG *oxcf = &cpi->oxcf;
1322 /* Update snapshots of the layer contexts to reflect new parameters */
1323 if (oxcf->number_of_layers > 1) {
1325 double prev_layer_framerate = 0;
1327 assert(oxcf->number_of_layers <= VPX_TS_MAX_LAYERS);
1328 for (i = 0; i < oxcf->number_of_layers && i < VPX_TS_MAX_LAYERS; ++i) {
1329 LAYER_CONTEXT *lc = &cpi->layer_context[i];
1331 lc->framerate = cpi->ref_framerate / oxcf->rate_decimator[i];
1332 lc->target_bandwidth = oxcf->target_bitrate[i] * 1000;
1334 lc->starting_buffer_level = rescale(
1335 (int)oxcf->starting_buffer_level_in_ms, lc->target_bandwidth, 1000);
1337 if (oxcf->optimal_buffer_level == 0) {
1338 lc->optimal_buffer_level = lc->target_bandwidth / 8;
1340 lc->optimal_buffer_level = rescale(
1341 (int)oxcf->optimal_buffer_level_in_ms, lc->target_bandwidth, 1000);
1344 if (oxcf->maximum_buffer_size == 0) {
1345 lc->maximum_buffer_size = lc->target_bandwidth / 8;
1347 lc->maximum_buffer_size = rescale((int)oxcf->maximum_buffer_size_in_ms,
1348 lc->target_bandwidth, 1000);
1351 /* Work out the average size of a frame within this layer */
1353 lc->avg_frame_size_for_layer =
1354 (int)((oxcf->target_bitrate[i] - oxcf->target_bitrate[i - 1]) *
1355 1000 / (lc->framerate - prev_layer_framerate));
1358 prev_layer_framerate = lc->framerate;
1363 void vp8_change_config(VP8_COMP *cpi, VP8_CONFIG *oxcf) {
1364 VP8_COMMON *cm = &cpi->common;
1366 unsigned int prev_number_of_layers;
1372 if (cm->version != oxcf->Version) {
1373 cm->version = oxcf->Version;
1374 vp8_setup_version(cm);
1377 last_w = cpi->oxcf.Width;
1378 last_h = cpi->oxcf.Height;
1379 prev_number_of_layers = cpi->oxcf.number_of_layers;
1383 switch (cpi->oxcf.Mode) {
1386 cpi->compressor_speed = 2;
1388 if (cpi->oxcf.cpu_used < -16) {
1389 cpi->oxcf.cpu_used = -16;
1392 if (cpi->oxcf.cpu_used > 16) cpi->oxcf.cpu_used = 16;
1396 case MODE_GOODQUALITY:
1398 cpi->compressor_speed = 1;
1400 if (cpi->oxcf.cpu_used < -5) {
1401 cpi->oxcf.cpu_used = -5;
1404 if (cpi->oxcf.cpu_used > 5) cpi->oxcf.cpu_used = 5;
1408 case MODE_BESTQUALITY:
1410 cpi->compressor_speed = 0;
1413 case MODE_FIRSTPASS:
1415 cpi->compressor_speed = 1;
1417 case MODE_SECONDPASS:
1419 cpi->compressor_speed = 1;
1421 if (cpi->oxcf.cpu_used < -5) {
1422 cpi->oxcf.cpu_used = -5;
1425 if (cpi->oxcf.cpu_used > 5) cpi->oxcf.cpu_used = 5;
1428 case MODE_SECONDPASS_BEST:
1430 cpi->compressor_speed = 0;
1434 if (cpi->pass == 0) cpi->auto_worst_q = 1;
1436 cpi->oxcf.worst_allowed_q = q_trans[oxcf->worst_allowed_q];
1437 cpi->oxcf.best_allowed_q = q_trans[oxcf->best_allowed_q];
1438 cpi->oxcf.cq_level = q_trans[cpi->oxcf.cq_level];
1440 if (oxcf->fixed_q >= 0) {
1441 if (oxcf->worst_allowed_q < 0) {
1442 cpi->oxcf.fixed_q = q_trans[0];
1444 cpi->oxcf.fixed_q = q_trans[oxcf->worst_allowed_q];
1447 if (oxcf->alt_q < 0) {
1448 cpi->oxcf.alt_q = q_trans[0];
1450 cpi->oxcf.alt_q = q_trans[oxcf->alt_q];
1453 if (oxcf->key_q < 0) {
1454 cpi->oxcf.key_q = q_trans[0];
1456 cpi->oxcf.key_q = q_trans[oxcf->key_q];
1459 if (oxcf->gold_q < 0) {
1460 cpi->oxcf.gold_q = q_trans[0];
1462 cpi->oxcf.gold_q = q_trans[oxcf->gold_q];
1466 cpi->baseline_gf_interval =
1467 cpi->oxcf.alt_freq ? cpi->oxcf.alt_freq : DEFAULT_GF_INTERVAL;
1469 #if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
1470 cpi->oxcf.token_partitions = 3;
1473 if (cpi->oxcf.token_partitions >= 0 && cpi->oxcf.token_partitions <= 3) {
1474 cm->multi_token_partition = (TOKEN_PARTITION)cpi->oxcf.token_partitions;
1477 setup_features(cpi);
1482 for (i = 0; i < MAX_MB_SEGMENTS; ++i) {
1483 cpi->segment_encode_breakout[i] = cpi->oxcf.encode_breakout;
1487 /* At the moment the first order values may not be > MAXQ */
1488 if (cpi->oxcf.fixed_q > MAXQ) cpi->oxcf.fixed_q = MAXQ;
1490 /* local file playback mode == really big buffer */
1491 if (cpi->oxcf.end_usage == USAGE_LOCAL_FILE_PLAYBACK) {
1492 cpi->oxcf.starting_buffer_level = 60000;
1493 cpi->oxcf.optimal_buffer_level = 60000;
1494 cpi->oxcf.maximum_buffer_size = 240000;
1495 cpi->oxcf.starting_buffer_level_in_ms = 60000;
1496 cpi->oxcf.optimal_buffer_level_in_ms = 60000;
1497 cpi->oxcf.maximum_buffer_size_in_ms = 240000;
1500 /* Convert target bandwidth from Kbit/s to Bit/s */
1501 cpi->oxcf.target_bandwidth *= 1000;
1503 cpi->oxcf.starting_buffer_level = rescale(
1504 (int)cpi->oxcf.starting_buffer_level, cpi->oxcf.target_bandwidth, 1000);
1506 /* Set or reset optimal and maximum buffer levels. */
1507 if (cpi->oxcf.optimal_buffer_level == 0) {
1508 cpi->oxcf.optimal_buffer_level = cpi->oxcf.target_bandwidth / 8;
1510 cpi->oxcf.optimal_buffer_level = rescale(
1511 (int)cpi->oxcf.optimal_buffer_level, cpi->oxcf.target_bandwidth, 1000);
1514 if (cpi->oxcf.maximum_buffer_size == 0) {
1515 cpi->oxcf.maximum_buffer_size = cpi->oxcf.target_bandwidth / 8;
1517 cpi->oxcf.maximum_buffer_size = rescale((int)cpi->oxcf.maximum_buffer_size,
1518 cpi->oxcf.target_bandwidth, 1000);
1520 // Under a configuration change, where maximum_buffer_size may change,
1521 // keep buffer level clipped to the maximum allowed buffer size.
1522 if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size) {
1523 cpi->bits_off_target = cpi->oxcf.maximum_buffer_size;
1524 cpi->buffer_level = cpi->bits_off_target;
1527 /* Set up frame rate and related parameters rate control values. */
1528 vp8_new_framerate(cpi, cpi->framerate);
1530 /* Set absolute upper and lower quality limits */
1531 cpi->worst_quality = cpi->oxcf.worst_allowed_q;
1532 cpi->best_quality = cpi->oxcf.best_allowed_q;
1534 /* active values should only be modified if out of new range */
1535 if (cpi->active_worst_quality > cpi->oxcf.worst_allowed_q) {
1536 cpi->active_worst_quality = cpi->oxcf.worst_allowed_q;
1539 else if (cpi->active_worst_quality < cpi->oxcf.best_allowed_q) {
1540 cpi->active_worst_quality = cpi->oxcf.best_allowed_q;
1542 if (cpi->active_best_quality < cpi->oxcf.best_allowed_q) {
1543 cpi->active_best_quality = cpi->oxcf.best_allowed_q;
1546 else if (cpi->active_best_quality > cpi->oxcf.worst_allowed_q) {
1547 cpi->active_best_quality = cpi->oxcf.worst_allowed_q;
1550 cpi->buffered_mode = cpi->oxcf.optimal_buffer_level > 0;
1552 cpi->cq_target_quality = cpi->oxcf.cq_level;
1554 /* Only allow dropped frames in buffered mode */
1555 cpi->drop_frames_allowed = cpi->oxcf.allow_df && cpi->buffered_mode;
1557 cpi->target_bandwidth = cpi->oxcf.target_bandwidth;
1559 // Check if the number of temporal layers has changed, and if so reset the
1560 // pattern counter and set/initialize the temporal layer context for the
1561 // new layer configuration.
1562 if (cpi->oxcf.number_of_layers != prev_number_of_layers) {
1563 // If the number of temporal layers are changed we must start at the
1564 // base of the pattern cycle, so set the layer id to 0 and reset
1565 // the temporal pattern counter.
1566 if (cpi->temporal_layer_id > 0) {
1567 cpi->temporal_layer_id = 0;
1569 cpi->temporal_pattern_counter = 0;
1570 reset_temporal_layer_change(cpi, oxcf, prev_number_of_layers);
1573 if (!cpi->initial_width) {
1574 cpi->initial_width = cpi->oxcf.Width;
1575 cpi->initial_height = cpi->oxcf.Height;
1578 cm->Width = cpi->oxcf.Width;
1579 cm->Height = cpi->oxcf.Height;
1580 assert(cm->Width <= cpi->initial_width);
1581 assert(cm->Height <= cpi->initial_height);
1583 /* TODO(jkoleszar): if an internal spatial resampling is active,
1584 * and we downsize the input image, maybe we should clear the
1585 * internal scale immediately rather than waiting for it to
1589 /* VP8 sharpness level mapping 0-7 (vs 0-10 in general VPx dialogs) */
1590 if (cpi->oxcf.Sharpness > 7) cpi->oxcf.Sharpness = 7;
1592 cm->sharpness_level = cpi->oxcf.Sharpness;
1594 if (cm->horiz_scale != NORMAL || cm->vert_scale != NORMAL) {
1595 int UNINITIALIZED_IS_SAFE(hr), UNINITIALIZED_IS_SAFE(hs);
1596 int UNINITIALIZED_IS_SAFE(vr), UNINITIALIZED_IS_SAFE(vs);
1598 Scale2Ratio(cm->horiz_scale, &hr, &hs);
1599 Scale2Ratio(cm->vert_scale, &vr, &vs);
1601 /* always go to the next whole number */
1602 cm->Width = (hs - 1 + cpi->oxcf.Width * hr) / hs;
1603 cm->Height = (vs - 1 + cpi->oxcf.Height * vr) / vs;
1606 if (last_w != cpi->oxcf.Width || last_h != cpi->oxcf.Height) {
1607 cpi->force_next_frame_intra = 1;
1610 if (((cm->Width + 15) & ~15) != cm->yv12_fb[cm->lst_fb_idx].y_width ||
1611 ((cm->Height + 15) & ~15) != cm->yv12_fb[cm->lst_fb_idx].y_height ||
1612 cm->yv12_fb[cm->lst_fb_idx].y_width == 0) {
1613 dealloc_raw_frame_buffers(cpi);
1614 alloc_raw_frame_buffers(cpi);
1615 vp8_alloc_compressor_data(cpi);
1618 if (cpi->oxcf.fixed_q >= 0) {
1619 cpi->last_q[0] = cpi->oxcf.fixed_q;
1620 cpi->last_q[1] = cpi->oxcf.fixed_q;
1623 cpi->Speed = cpi->oxcf.cpu_used;
1625 /* force to allowlag to 0 if lag_in_frames is 0; */
1626 if (cpi->oxcf.lag_in_frames == 0) {
1627 cpi->oxcf.allow_lag = 0;
1629 /* Limit on lag buffers as these are not currently dynamically allocated */
1630 else if (cpi->oxcf.lag_in_frames > MAX_LAG_BUFFERS) {
1631 cpi->oxcf.lag_in_frames = MAX_LAG_BUFFERS;
1635 cpi->alt_ref_source = NULL;
1636 cpi->is_src_frame_alt_ref = 0;
1638 #if CONFIG_TEMPORAL_DENOISING
1639 if (cpi->oxcf.noise_sensitivity) {
1640 if (!cpi->denoiser.yv12_mc_running_avg.buffer_alloc) {
1641 int width = (cpi->oxcf.Width + 15) & ~15;
1642 int height = (cpi->oxcf.Height + 15) & ~15;
1643 if (vp8_denoiser_allocate(&cpi->denoiser, width, height, cm->mb_rows,
1644 cm->mb_cols, cpi->oxcf.noise_sensitivity)) {
1645 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1646 "Failed to allocate denoiser");
1653 /* Experimental RD Code */
1654 cpi->frame_distortion = 0;
1655 cpi->last_frame_distortion = 0;
1660 #define M_LOG2_E 0.693147180559945309417
1662 #define log2f(x) (log(x) / (float)M_LOG2_E)
1664 static void cal_mvsadcosts(int *mvsadcost[2]) {
1667 mvsadcost[0][0] = 300;
1668 mvsadcost[1][0] = 300;
1671 double z = 256 * (2 * (log2f(8 * i) + .6));
1672 mvsadcost[0][i] = (int)z;
1673 mvsadcost[1][i] = (int)z;
1674 mvsadcost[0][-i] = (int)z;
1675 mvsadcost[1][-i] = (int)z;
1676 } while (++i <= mvfp_max);
1679 struct VP8_COMP *vp8_create_compressor(VP8_CONFIG *oxcf) {
1685 cpi = vpx_memalign(32, sizeof(VP8_COMP));
1686 /* Check that the CPI instance is valid */
1691 memset(cpi, 0, sizeof(VP8_COMP));
1693 if (setjmp(cm->error.jmp)) {
1694 cpi->common.error.setjmp = 0;
1695 vp8_remove_compressor(&cpi);
1699 cpi->common.error.setjmp = 1;
1701 CHECK_MEM_ERROR(cpi->mb.ss, vpx_calloc(sizeof(search_site),
1702 (MAX_MVSEARCH_STEPS * 8) + 1));
1704 vp8_create_common(&cpi->common);
1706 init_config(cpi, oxcf);
1708 memcpy(cpi->base_skip_false_prob, vp8cx_base_skip_false_prob,
1709 sizeof(vp8cx_base_skip_false_prob));
1710 cpi->common.current_video_frame = 0;
1711 cpi->temporal_pattern_counter = 0;
1712 cpi->temporal_layer_id = -1;
1713 cpi->kf_overspend_bits = 0;
1714 cpi->kf_bitrate_adjustment = 0;
1715 cpi->frames_till_gf_update_due = 0;
1716 cpi->gf_overspend_bits = 0;
1717 cpi->non_gf_bitrate_adjustment = 0;
1718 cpi->prob_last_coded = 128;
1719 cpi->prob_gf_coded = 128;
1720 cpi->prob_intra_coded = 63;
1722 /* Prime the recent reference frame usage counters.
1723 * Hereafter they will be maintained as a sort of moving average
1725 cpi->recent_ref_frame_usage[INTRA_FRAME] = 1;
1726 cpi->recent_ref_frame_usage[LAST_FRAME] = 1;
1727 cpi->recent_ref_frame_usage[GOLDEN_FRAME] = 1;
1728 cpi->recent_ref_frame_usage[ALTREF_FRAME] = 1;
1730 /* Set reference frame sign bias for ALTREF frame to 1 (for now) */
1731 cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 1;
1733 cpi->twopass.gf_decay_rate = 0;
1734 cpi->baseline_gf_interval = DEFAULT_GF_INTERVAL;
1736 cpi->gold_is_last = 0;
1737 cpi->alt_is_last = 0;
1738 cpi->gold_is_alt = 0;
1740 cpi->active_map_enabled = 0;
1743 /* Experimental code for lagged and one pass */
1744 /* Initialise one_pass GF frames stats */
1745 /* Update stats used for GF selection */
1748 cpi->one_pass_frame_index = 0;
1750 for (i = 0; i < MAX_LAG_BUFFERS; ++i)
1752 cpi->one_pass_frame_stats[i].frames_so_far = 0;
1753 cpi->one_pass_frame_stats[i].frame_intra_error = 0.0;
1754 cpi->one_pass_frame_stats[i].frame_coded_error = 0.0;
1755 cpi->one_pass_frame_stats[i].frame_pcnt_inter = 0.0;
1756 cpi->one_pass_frame_stats[i].frame_pcnt_motion = 0.0;
1757 cpi->one_pass_frame_stats[i].frame_mvr = 0.0;
1758 cpi->one_pass_frame_stats[i].frame_mvr_abs = 0.0;
1759 cpi->one_pass_frame_stats[i].frame_mvc = 0.0;
1760 cpi->one_pass_frame_stats[i].frame_mvc_abs = 0.0;
1765 cpi->mse_source_denoised = 0;
1767 /* Should we use the cyclic refresh method.
1768 * Currently this is tied to error resilliant mode
1770 cpi->cyclic_refresh_mode_enabled = cpi->oxcf.error_resilient_mode;
1771 cpi->cyclic_refresh_mode_max_mbs_perframe =
1772 (cpi->common.mb_rows * cpi->common.mb_cols) / 7;
1773 if (cpi->oxcf.number_of_layers == 1) {
1774 cpi->cyclic_refresh_mode_max_mbs_perframe =
1775 (cpi->common.mb_rows * cpi->common.mb_cols) / 20;
1776 } else if (cpi->oxcf.number_of_layers == 2) {
1777 cpi->cyclic_refresh_mode_max_mbs_perframe =
1778 (cpi->common.mb_rows * cpi->common.mb_cols) / 10;
1780 cpi->cyclic_refresh_mode_index = 0;
1781 cpi->cyclic_refresh_q = 32;
1783 if (cpi->cyclic_refresh_mode_enabled) {
1784 CHECK_MEM_ERROR(cpi->cyclic_refresh_map,
1785 vpx_calloc((cpi->common.mb_rows * cpi->common.mb_cols), 1));
1787 cpi->cyclic_refresh_map = (signed char *)NULL;
1790 CHECK_MEM_ERROR(cpi->consec_zero_last,
1791 vpx_calloc(cm->mb_rows * cm->mb_cols, 1));
1792 CHECK_MEM_ERROR(cpi->consec_zero_last_mvbias,
1793 vpx_calloc((cpi->common.mb_rows * cpi->common.mb_cols), 1));
1795 #ifdef VP8_ENTROPY_STATS
1796 init_context_counters();
1799 /*Initialize the feed-forward activity masking.*/
1800 cpi->activity_avg = 90 << 12;
1802 /* Give a sensible default for the first frame. */
1803 cpi->frames_since_key = 8;
1804 cpi->key_frame_frequency = cpi->oxcf.key_freq;
1805 cpi->this_key_frame_forced = 0;
1806 cpi->next_key_frame_forced = 0;
1808 cpi->source_alt_ref_pending = 0;
1809 cpi->source_alt_ref_active = 0;
1810 cpi->common.refresh_alt_ref_frame = 0;
1812 cpi->force_maxqp = 0;
1814 cpi->b_calculate_psnr = CONFIG_INTERNAL_STATS;
1815 #if CONFIG_INTERNAL_STATS
1816 cpi->b_calculate_ssimg = 0;
1821 if (cpi->b_calculate_psnr) {
1822 cpi->total_sq_error = 0.0;
1823 cpi->total_sq_error2 = 0.0;
1828 cpi->totalp_y = 0.0;
1829 cpi->totalp_u = 0.0;
1830 cpi->totalp_v = 0.0;
1832 cpi->tot_recode_hits = 0;
1833 cpi->summed_quality = 0;
1834 cpi->summed_weights = 0;
1839 cpi->first_time_stamp_ever = 0x7FFFFFFF;
1841 cpi->frames_till_gf_update_due = 0;
1842 cpi->key_frame_count = 1;
1844 cpi->ni_av_qi = cpi->oxcf.worst_allowed_q;
1847 cpi->total_byte_count = 0;
1849 cpi->drop_frame = 0;
1851 cpi->rate_correction_factor = 1.0;
1852 cpi->key_frame_rate_correction_factor = 1.0;
1853 cpi->gf_rate_correction_factor = 1.0;
1854 cpi->twopass.est_max_qcorrection_factor = 1.0;
1856 for (i = 0; i < KEY_FRAME_CONTEXT; ++i) {
1857 cpi->prior_key_frame_distance[i] = (int)cpi->output_framerate;
1860 #ifdef OUTPUT_YUV_SRC
1861 yuv_file = fopen("bd.yuv", "ab");
1863 #ifdef OUTPUT_YUV_DENOISED
1864 yuv_denoised_file = fopen("denoised.yuv", "ab");
1868 framepsnr = fopen("framepsnr.stt", "a");
1869 kf_list = fopen("kf_list.stt", "w");
1872 cpi->output_pkt_list = oxcf->output_pkt_list;
1874 #if !CONFIG_REALTIME_ONLY
1876 if (cpi->pass == 1) {
1877 vp8_init_first_pass(cpi);
1878 } else if (cpi->pass == 2) {
1879 size_t packet_sz = sizeof(FIRSTPASS_STATS);
1880 int packets = (int)(oxcf->two_pass_stats_in.sz / packet_sz);
1882 cpi->twopass.stats_in_start = oxcf->two_pass_stats_in.buf;
1883 cpi->twopass.stats_in = cpi->twopass.stats_in_start;
1884 cpi->twopass.stats_in_end =
1885 (void *)((char *)cpi->twopass.stats_in + (packets - 1) * packet_sz);
1886 vp8_init_second_pass(cpi);
1891 if (cpi->compressor_speed == 2) {
1892 cpi->avg_encode_time = 0;
1893 cpi->avg_pick_mode_time = 0;
1896 vp8_set_speed_features(cpi);
1898 /* Set starting values of RD threshold multipliers (128 = *1) */
1899 for (i = 0; i < MAX_MODES; ++i) {
1900 cpi->mb.rd_thresh_mult[i] = 128;
1903 #ifdef VP8_ENTROPY_STATS
1904 init_mv_ref_counts();
1907 #if CONFIG_MULTITHREAD
1908 if (vp8cx_create_encoder_threads(cpi)) {
1909 vp8_remove_compressor(&cpi);
1914 cpi->fn_ptr[BLOCK_16X16].sdf = vpx_sad16x16;
1915 cpi->fn_ptr[BLOCK_16X16].vf = vpx_variance16x16;
1916 cpi->fn_ptr[BLOCK_16X16].svf = vpx_sub_pixel_variance16x16;
1917 cpi->fn_ptr[BLOCK_16X16].sdx3f = vpx_sad16x16x3;
1918 cpi->fn_ptr[BLOCK_16X16].sdx8f = vpx_sad16x16x8;
1919 cpi->fn_ptr[BLOCK_16X16].sdx4df = vpx_sad16x16x4d;
1921 cpi->fn_ptr[BLOCK_16X8].sdf = vpx_sad16x8;
1922 cpi->fn_ptr[BLOCK_16X8].vf = vpx_variance16x8;
1923 cpi->fn_ptr[BLOCK_16X8].svf = vpx_sub_pixel_variance16x8;
1924 cpi->fn_ptr[BLOCK_16X8].sdx3f = vpx_sad16x8x3;
1925 cpi->fn_ptr[BLOCK_16X8].sdx8f = vpx_sad16x8x8;
1926 cpi->fn_ptr[BLOCK_16X8].sdx4df = vpx_sad16x8x4d;
1928 cpi->fn_ptr[BLOCK_8X16].sdf = vpx_sad8x16;
1929 cpi->fn_ptr[BLOCK_8X16].vf = vpx_variance8x16;
1930 cpi->fn_ptr[BLOCK_8X16].svf = vpx_sub_pixel_variance8x16;
1931 cpi->fn_ptr[BLOCK_8X16].sdx3f = vpx_sad8x16x3;
1932 cpi->fn_ptr[BLOCK_8X16].sdx8f = vpx_sad8x16x8;
1933 cpi->fn_ptr[BLOCK_8X16].sdx4df = vpx_sad8x16x4d;
1935 cpi->fn_ptr[BLOCK_8X8].sdf = vpx_sad8x8;
1936 cpi->fn_ptr[BLOCK_8X8].vf = vpx_variance8x8;
1937 cpi->fn_ptr[BLOCK_8X8].svf = vpx_sub_pixel_variance8x8;
1938 cpi->fn_ptr[BLOCK_8X8].sdx3f = vpx_sad8x8x3;
1939 cpi->fn_ptr[BLOCK_8X8].sdx8f = vpx_sad8x8x8;
1940 cpi->fn_ptr[BLOCK_8X8].sdx4df = vpx_sad8x8x4d;
1942 cpi->fn_ptr[BLOCK_4X4].sdf = vpx_sad4x4;
1943 cpi->fn_ptr[BLOCK_4X4].vf = vpx_variance4x4;
1944 cpi->fn_ptr[BLOCK_4X4].svf = vpx_sub_pixel_variance4x4;
1945 cpi->fn_ptr[BLOCK_4X4].sdx3f = vpx_sad4x4x3;
1946 cpi->fn_ptr[BLOCK_4X4].sdx8f = vpx_sad4x4x8;
1947 cpi->fn_ptr[BLOCK_4X4].sdx4df = vpx_sad4x4x4d;
1949 #if ARCH_X86 || ARCH_X86_64
1950 cpi->fn_ptr[BLOCK_16X16].copymem = vp8_copy32xn;
1951 cpi->fn_ptr[BLOCK_16X8].copymem = vp8_copy32xn;
1952 cpi->fn_ptr[BLOCK_8X16].copymem = vp8_copy32xn;
1953 cpi->fn_ptr[BLOCK_8X8].copymem = vp8_copy32xn;
1954 cpi->fn_ptr[BLOCK_4X4].copymem = vp8_copy32xn;
1957 cpi->full_search_sad = vp8_full_search_sad;
1958 cpi->diamond_search_sad = vp8_diamond_search_sad;
1959 cpi->refining_search_sad = vp8_refining_search_sad;
1961 /* make sure frame 1 is okay */
1962 cpi->mb.error_bins[0] = cpi->common.MBs;
1964 /* vp8cx_init_quantizer() is first called here. Add check in
1965 * vp8cx_frame_init_quantizer() so that vp8cx_init_quantizer is only
1966 * called later when needed. This will avoid unnecessary calls of
1967 * vp8cx_init_quantizer() for every frame.
1969 vp8cx_init_quantizer(cpi);
1971 vp8_loop_filter_init(cm);
1973 cpi->common.error.setjmp = 0;
1975 #if CONFIG_MULTI_RES_ENCODING
1977 /* Calculate # of MBs in a row in lower-resolution level image. */
1978 if (cpi->oxcf.mr_encoder_id > 0) vp8_cal_low_res_mb_cols(cpi);
1982 /* setup RD costs to MACROBLOCK struct */
1984 cpi->mb.mvcost[0] = &cpi->rd_costs.mvcosts[0][mv_max + 1];
1985 cpi->mb.mvcost[1] = &cpi->rd_costs.mvcosts[1][mv_max + 1];
1986 cpi->mb.mvsadcost[0] = &cpi->rd_costs.mvsadcosts[0][mvfp_max + 1];
1987 cpi->mb.mvsadcost[1] = &cpi->rd_costs.mvsadcosts[1][mvfp_max + 1];
1989 cal_mvsadcosts(cpi->mb.mvsadcost);
1991 cpi->mb.mbmode_cost = cpi->rd_costs.mbmode_cost;
1992 cpi->mb.intra_uv_mode_cost = cpi->rd_costs.intra_uv_mode_cost;
1993 cpi->mb.bmode_costs = cpi->rd_costs.bmode_costs;
1994 cpi->mb.inter_bmode_costs = cpi->rd_costs.inter_bmode_costs;
1995 cpi->mb.token_costs = cpi->rd_costs.token_costs;
1997 /* setup block ptrs & offsets */
1998 vp8_setup_block_ptrs(&cpi->mb);
1999 vp8_setup_block_dptrs(&cpi->mb.e_mbd);
2004 void vp8_remove_compressor(VP8_COMP **ptr) {
2005 VP8_COMP *cpi = *ptr;
2009 if (cpi && (cpi->common.current_video_frame > 0)) {
2010 #if !CONFIG_REALTIME_ONLY
2012 if (cpi->pass == 2) {
2013 vp8_end_second_pass(cpi);
2018 #ifdef VP8_ENTROPY_STATS
2019 print_context_counters();
2020 print_tree_update_probs();
2021 print_mode_context();
2024 #if CONFIG_INTERNAL_STATS
2026 if (cpi->pass != 1) {
2027 FILE *f = fopen("opsnr.stt", "a");
2028 double time_encoded =
2029 (cpi->last_end_time_stamp_seen - cpi->first_time_stamp_ever) /
2031 double total_encode_time =
2032 (cpi->time_receive_data + cpi->time_compress_data) / 1000.000;
2033 double dr = (double)cpi->bytes * 8.0 / 1000.0 / time_encoded;
2034 const double target_rate = (double)cpi->oxcf.target_bandwidth / 1000;
2035 const double rate_err = ((100.0 * (dr - target_rate)) / target_rate);
2037 if (cpi->b_calculate_psnr) {
2038 if (cpi->oxcf.number_of_layers > 1) {
2042 "Layer\tBitrate\tAVGPsnr\tGLBPsnr\tAVPsnrP\t"
2043 "GLPsnrP\tVPXSSIM\n");
2044 for (i = 0; i < (int)cpi->oxcf.number_of_layers; ++i) {
2046 (double)cpi->bytes_in_layer[i] * 8.0 / 1000.0 / time_encoded;
2047 double samples = 3.0 / 2 * cpi->frames_in_layer[i] *
2048 cpi->common.Width * cpi->common.Height;
2050 vpx_sse_to_psnr(samples, 255.0, cpi->total_error2[i]);
2051 double total_psnr2 =
2052 vpx_sse_to_psnr(samples, 255.0, cpi->total_error2_p[i]);
2054 100 * pow(cpi->sum_ssim[i] / cpi->sum_weights[i], 8.0);
2057 "%5d\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t"
2059 i, dr, cpi->sum_psnr[i] / cpi->frames_in_layer[i],
2060 total_psnr, cpi->sum_psnr_p[i] / cpi->frames_in_layer[i],
2061 total_psnr2, total_ssim);
2065 3.0 / 2 * cpi->count * cpi->common.Width * cpi->common.Height;
2067 vpx_sse_to_psnr(samples, 255.0, cpi->total_sq_error);
2068 double total_psnr2 =
2069 vpx_sse_to_psnr(samples, 255.0, cpi->total_sq_error2);
2071 100 * pow(cpi->summed_quality / cpi->summed_weights, 8.0);
2074 "Bitrate\tAVGPsnr\tGLBPsnr\tAVPsnrP\t"
2075 "GLPsnrP\tVPXSSIM\n");
2077 "%7.3f\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t"
2079 dr, cpi->total / cpi->count, total_psnr,
2080 cpi->totalp / cpi->count, total_psnr2, total_ssim);
2085 f = fopen("qskip.stt", "a");
2086 fprintf(f, "minq:%d -maxq:%d skiptrue:skipfalse = %d:%d\n", cpi->oxcf.best_allowed_q, cpi->oxcf.worst_allowed_q, skiptruecount, skipfalsecount);
2095 if (cpi->compressor_speed == 2) {
2097 FILE *f = fopen("cxspeed.stt", "a");
2098 cnt_pm /= cpi->common.MBs;
2100 for (i = 0; i < 16; ++i) fprintf(f, "%5d", frames_at_speed[i]);
2110 extern int count_mb_seg[4];
2111 FILE *f = fopen("modes.stt", "a");
2112 double dr = (double)cpi->framerate * (double)bytes * (double)8 /
2113 (double)count / (double)1000;
2114 fprintf(f, "intra_mode in Intra Frames:\n");
2115 fprintf(f, "Y: %8d, %8d, %8d, %8d, %8d\n", y_modes[0], y_modes[1],
2116 y_modes[2], y_modes[3], y_modes[4]);
2117 fprintf(f, "UV:%8d, %8d, %8d, %8d\n", uv_modes[0], uv_modes[1],
2118 uv_modes[2], uv_modes[3]);
2123 for (i = 0; i < 10; ++i) fprintf(f, "%8d, ", b_modes[i]);
2128 fprintf(f, "Modes in Inter Frames:\n");
2129 fprintf(f, "Y: %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d\n",
2130 inter_y_modes[0], inter_y_modes[1], inter_y_modes[2],
2131 inter_y_modes[3], inter_y_modes[4], inter_y_modes[5],
2132 inter_y_modes[6], inter_y_modes[7], inter_y_modes[8],
2134 fprintf(f, "UV:%8d, %8d, %8d, %8d\n", inter_uv_modes[0],
2135 inter_uv_modes[1], inter_uv_modes[2], inter_uv_modes[3]);
2140 for (i = 0; i < 15; ++i) fprintf(f, "%8d, ", inter_b_modes[i]);
2144 fprintf(f, "P:%8d, %8d, %8d, %8d\n", count_mb_seg[0], count_mb_seg[1],
2145 count_mb_seg[2], count_mb_seg[3]);
2146 fprintf(f, "PB:%8d, %8d, %8d, %8d\n", inter_b_modes[LEFT4X4],
2147 inter_b_modes[ABOVE4X4], inter_b_modes[ZERO4X4],
2148 inter_b_modes[NEW4X4]);
2154 #ifdef VP8_ENTROPY_STATS
2157 FILE *fmode = fopen("modecontext.c", "w");
2159 fprintf(fmode, "\n#include \"entropymode.h\"\n\n");
2160 fprintf(fmode, "const unsigned int vp8_kf_default_bmode_counts ");
2162 "[VP8_BINTRAMODES] [VP8_BINTRAMODES] [VP8_BINTRAMODES] =\n{\n");
2164 for (i = 0; i < 10; ++i) {
2165 fprintf(fmode, " { /* Above Mode : %d */\n", i);
2167 for (j = 0; j < 10; ++j) {
2168 fprintf(fmode, " {");
2170 for (k = 0; k < 10; ++k) {
2171 if (!intra_mode_stats[i][j][k])
2172 fprintf(fmode, " %5d, ", 1);
2174 fprintf(fmode, " %5d, ", intra_mode_stats[i][j][k]);
2177 fprintf(fmode, "}, /* left_mode %d */\n", j);
2180 fprintf(fmode, " },\n");
2183 fprintf(fmode, "};\n");
2188 #if defined(SECTIONBITS_OUTPUT)
2192 FILE *f = fopen("tokenbits.stt", "a");
2194 for (i = 0; i < 28; ++i) fprintf(f, "%8d", (int)(Sectionbits[i] / 256));
2204 printf("\n_pick_loop_filter_level:%d\n", cpi->time_pick_lpf / 1000);
2205 printf("\n_frames recive_data encod_mb_row compress_frame Total\n");
2206 printf("%6d %10ld %10ld %10ld %10ld\n", cpi->common.current_video_frame, cpi->time_receive_data / 1000, cpi->time_encode_mb_row / 1000, cpi->time_compress_data / 1000, (cpi->time_receive_data + cpi->time_compress_data) / 1000);
2211 #if CONFIG_MULTITHREAD
2212 vp8cx_remove_encoder_threads(cpi);
2215 #if CONFIG_TEMPORAL_DENOISING
2216 vp8_denoiser_free(&cpi->denoiser);
2218 dealloc_compressor_data(cpi);
2219 vpx_free(cpi->mb.ss);
2221 vpx_free(cpi->cyclic_refresh_map);
2222 vpx_free(cpi->consec_zero_last);
2223 vpx_free(cpi->consec_zero_last_mvbias);
2225 vp8_remove_common(&cpi->common);
2229 #ifdef OUTPUT_YUV_SRC
2232 #ifdef OUTPUT_YUV_DENOISED
2233 fclose(yuv_denoised_file);
2250 static uint64_t calc_plane_error(unsigned char *orig, int orig_stride,
2251 unsigned char *recon, int recon_stride,
2252 unsigned int cols, unsigned int rows) {
2253 unsigned int row, col;
2254 uint64_t total_sse = 0;
2257 for (row = 0; row + 16 <= rows; row += 16) {
2258 for (col = 0; col + 16 <= cols; col += 16) {
2261 vpx_mse16x16(orig + col, orig_stride, recon + col, recon_stride, &sse);
2265 /* Handle odd-sized width */
2267 unsigned int border_row, border_col;
2268 unsigned char *border_orig = orig;
2269 unsigned char *border_recon = recon;
2271 for (border_row = 0; border_row < 16; ++border_row) {
2272 for (border_col = col; border_col < cols; ++border_col) {
2273 diff = border_orig[border_col] - border_recon[border_col];
2274 total_sse += diff * diff;
2277 border_orig += orig_stride;
2278 border_recon += recon_stride;
2282 orig += orig_stride * 16;
2283 recon += recon_stride * 16;
2286 /* Handle odd-sized height */
2287 for (; row < rows; ++row) {
2288 for (col = 0; col < cols; ++col) {
2289 diff = orig[col] - recon[col];
2290 total_sse += diff * diff;
2293 orig += orig_stride;
2294 recon += recon_stride;
2297 vp8_clear_system_state();
2301 static void generate_psnr_packet(VP8_COMP *cpi) {
2302 YV12_BUFFER_CONFIG *orig = cpi->Source;
2303 YV12_BUFFER_CONFIG *recon = cpi->common.frame_to_show;
2304 struct vpx_codec_cx_pkt pkt;
2307 unsigned int width = cpi->common.Width;
2308 unsigned int height = cpi->common.Height;
2310 pkt.kind = VPX_CODEC_PSNR_PKT;
2311 sse = calc_plane_error(orig->y_buffer, orig->y_stride, recon->y_buffer,
2312 recon->y_stride, width, height);
2313 pkt.data.psnr.sse[0] = sse;
2314 pkt.data.psnr.sse[1] = sse;
2315 pkt.data.psnr.samples[0] = width * height;
2316 pkt.data.psnr.samples[1] = width * height;
2318 width = (width + 1) / 2;
2319 height = (height + 1) / 2;
2321 sse = calc_plane_error(orig->u_buffer, orig->uv_stride, recon->u_buffer,
2322 recon->uv_stride, width, height);
2323 pkt.data.psnr.sse[0] += sse;
2324 pkt.data.psnr.sse[2] = sse;
2325 pkt.data.psnr.samples[0] += width * height;
2326 pkt.data.psnr.samples[2] = width * height;
2328 sse = calc_plane_error(orig->v_buffer, orig->uv_stride, recon->v_buffer,
2329 recon->uv_stride, width, height);
2330 pkt.data.psnr.sse[0] += sse;
2331 pkt.data.psnr.sse[3] = sse;
2332 pkt.data.psnr.samples[0] += width * height;
2333 pkt.data.psnr.samples[3] = width * height;
2335 for (i = 0; i < 4; ++i) {
2336 pkt.data.psnr.psnr[i] = vpx_sse_to_psnr(pkt.data.psnr.samples[i], 255.0,
2337 (double)(pkt.data.psnr.sse[i]));
2340 vpx_codec_pkt_list_add(cpi->output_pkt_list, &pkt);
2343 int vp8_use_as_reference(VP8_COMP *cpi, int ref_frame_flags) {
2344 if (ref_frame_flags > 7) return -1;
2346 cpi->ref_frame_flags = ref_frame_flags;
2349 int vp8_update_reference(VP8_COMP *cpi, int ref_frame_flags) {
2350 if (ref_frame_flags > 7) return -1;
2352 cpi->common.refresh_golden_frame = 0;
2353 cpi->common.refresh_alt_ref_frame = 0;
2354 cpi->common.refresh_last_frame = 0;
2356 if (ref_frame_flags & VP8_LAST_FRAME) cpi->common.refresh_last_frame = 1;
2358 if (ref_frame_flags & VP8_GOLD_FRAME) cpi->common.refresh_golden_frame = 1;
2360 if (ref_frame_flags & VP8_ALTR_FRAME) cpi->common.refresh_alt_ref_frame = 1;
2365 int vp8_get_reference(VP8_COMP *cpi, enum vpx_ref_frame_type ref_frame_flag,
2366 YV12_BUFFER_CONFIG *sd) {
2367 VP8_COMMON *cm = &cpi->common;
2370 if (ref_frame_flag == VP8_LAST_FRAME) {
2371 ref_fb_idx = cm->lst_fb_idx;
2372 } else if (ref_frame_flag == VP8_GOLD_FRAME) {
2373 ref_fb_idx = cm->gld_fb_idx;
2374 } else if (ref_frame_flag == VP8_ALTR_FRAME) {
2375 ref_fb_idx = cm->alt_fb_idx;
2380 vp8_yv12_copy_frame(&cm->yv12_fb[ref_fb_idx], sd);
2384 int vp8_set_reference(VP8_COMP *cpi, enum vpx_ref_frame_type ref_frame_flag,
2385 YV12_BUFFER_CONFIG *sd) {
2386 VP8_COMMON *cm = &cpi->common;
2390 if (ref_frame_flag == VP8_LAST_FRAME) {
2391 ref_fb_idx = cm->lst_fb_idx;
2392 } else if (ref_frame_flag == VP8_GOLD_FRAME) {
2393 ref_fb_idx = cm->gld_fb_idx;
2394 } else if (ref_frame_flag == VP8_ALTR_FRAME) {
2395 ref_fb_idx = cm->alt_fb_idx;
2400 vp8_yv12_copy_frame(sd, &cm->yv12_fb[ref_fb_idx]);
2404 int vp8_update_entropy(VP8_COMP *cpi, int update) {
2405 VP8_COMMON *cm = &cpi->common;
2406 cm->refresh_entropy_probs = update;
2411 #if defined(OUTPUT_YUV_SRC) || defined(OUTPUT_YUV_DENOISED)
2412 void vp8_write_yuv_frame(FILE *yuv_file, YV12_BUFFER_CONFIG *s) {
2413 unsigned char *src = s->y_buffer;
2414 int h = s->y_height;
2417 fwrite(src, s->y_width, 1, yuv_file);
2425 fwrite(src, s->uv_width, 1, yuv_file);
2426 src += s->uv_stride;
2433 fwrite(src, s->uv_width, 1, yuv_file);
2434 src += s->uv_stride;
2439 static void scale_and_extend_source(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) {
2440 VP8_COMMON *cm = &cpi->common;
2442 /* are we resizing the image */
2443 if (cm->horiz_scale != 0 || cm->vert_scale != 0) {
2444 #if CONFIG_SPATIAL_RESAMPLING
2445 int UNINITIALIZED_IS_SAFE(hr), UNINITIALIZED_IS_SAFE(hs);
2446 int UNINITIALIZED_IS_SAFE(vr), UNINITIALIZED_IS_SAFE(vs);
2449 if (cm->vert_scale == 3) {
2455 Scale2Ratio(cm->horiz_scale, &hr, &hs);
2456 Scale2Ratio(cm->vert_scale, &vr, &vs);
2458 vpx_scale_frame(sd, &cpi->scaled_source, cm->temp_scale_frame.y_buffer,
2459 tmp_height, hs, hr, vs, vr, 0);
2461 vp8_yv12_extend_frame_borders(&cpi->scaled_source);
2462 cpi->Source = &cpi->scaled_source;
2469 static int resize_key_frame(VP8_COMP *cpi) {
2470 #if CONFIG_SPATIAL_RESAMPLING
2471 VP8_COMMON *cm = &cpi->common;
2473 /* Do we need to apply resampling for one pass cbr.
2474 * In one pass this is more limited than in two pass cbr.
2475 * The test and any change is only made once per key frame sequence.
2477 if (cpi->oxcf.allow_spatial_resampling &&
2478 (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)) {
2479 int UNINITIALIZED_IS_SAFE(hr), UNINITIALIZED_IS_SAFE(hs);
2480 int UNINITIALIZED_IS_SAFE(vr), UNINITIALIZED_IS_SAFE(vs);
2481 int new_width, new_height;
2483 /* If we are below the resample DOWN watermark then scale down a
2486 if (cpi->buffer_level < (cpi->oxcf.resample_down_water_mark *
2487 cpi->oxcf.optimal_buffer_level / 100)) {
2489 (cm->horiz_scale < ONETWO) ? cm->horiz_scale + 1 : ONETWO;
2490 cm->vert_scale = (cm->vert_scale < ONETWO) ? cm->vert_scale + 1 : ONETWO;
2492 /* Should we now start scaling back up */
2493 else if (cpi->buffer_level > (cpi->oxcf.resample_up_water_mark *
2494 cpi->oxcf.optimal_buffer_level / 100)) {
2496 (cm->horiz_scale > NORMAL) ? cm->horiz_scale - 1 : NORMAL;
2497 cm->vert_scale = (cm->vert_scale > NORMAL) ? cm->vert_scale - 1 : NORMAL;
2500 /* Get the new height and width */
2501 Scale2Ratio(cm->horiz_scale, &hr, &hs);
2502 Scale2Ratio(cm->vert_scale, &vr, &vs);
2503 new_width = ((hs - 1) + (cpi->oxcf.Width * hr)) / hs;
2504 new_height = ((vs - 1) + (cpi->oxcf.Height * vr)) / vs;
2506 /* If the image size has changed we need to reallocate the buffers
2507 * and resample the source image
2509 if ((cm->Width != new_width) || (cm->Height != new_height)) {
2510 cm->Width = new_width;
2511 cm->Height = new_height;
2512 vp8_alloc_compressor_data(cpi);
2513 scale_and_extend_source(cpi->un_scaled_source, cpi);
2522 static void update_alt_ref_frame_stats(VP8_COMP *cpi) {
2523 VP8_COMMON *cm = &cpi->common;
2525 /* Select an interval before next GF or altref */
2526 if (!cpi->auto_gold) cpi->frames_till_gf_update_due = DEFAULT_GF_INTERVAL;
2528 if ((cpi->pass != 2) && cpi->frames_till_gf_update_due) {
2529 cpi->current_gf_interval = cpi->frames_till_gf_update_due;
2531 /* Set the bits per frame that we should try and recover in
2532 * subsequent inter frames to account for the extra GF spend...
2533 * note that his does not apply for GF updates that occur
2534 * coincident with a key frame as the extra cost of key frames is
2535 * dealt with elsewhere.
2537 cpi->gf_overspend_bits += cpi->projected_frame_size;
2538 cpi->non_gf_bitrate_adjustment =
2539 cpi->gf_overspend_bits / cpi->frames_till_gf_update_due;
2542 /* Update data structure that monitors level of reference to last GF */
2543 memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols));
2544 cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
2546 /* this frame refreshes means next frames don't unless specified by user */
2547 cpi->frames_since_golden = 0;
2549 /* Clear the alternate reference update pending flag. */
2550 cpi->source_alt_ref_pending = 0;
2552 /* Set the alternate reference frame active flag */
2553 cpi->source_alt_ref_active = 1;
2555 static void update_golden_frame_stats(VP8_COMP *cpi) {
2556 VP8_COMMON *cm = &cpi->common;
2558 /* Update the Golden frame usage counts. */
2559 if (cm->refresh_golden_frame) {
2560 /* Select an interval before next GF */
2561 if (!cpi->auto_gold) cpi->frames_till_gf_update_due = DEFAULT_GF_INTERVAL;
2563 if ((cpi->pass != 2) && (cpi->frames_till_gf_update_due > 0)) {
2564 cpi->current_gf_interval = cpi->frames_till_gf_update_due;
2566 /* Set the bits per frame that we should try and recover in
2567 * subsequent inter frames to account for the extra GF spend...
2568 * note that his does not apply for GF updates that occur
2569 * coincident with a key frame as the extra cost of key frames
2570 * is dealt with elsewhere.
2572 if ((cm->frame_type != KEY_FRAME) && !cpi->source_alt_ref_active) {
2573 /* Calcluate GF bits to be recovered
2574 * Projected size - av frame bits available for inter
2575 * frames for clip as a whole
2577 cpi->gf_overspend_bits +=
2578 (cpi->projected_frame_size - cpi->inter_frame_target);
2581 cpi->non_gf_bitrate_adjustment =
2582 cpi->gf_overspend_bits / cpi->frames_till_gf_update_due;
2585 /* Update data structure that monitors level of reference to last GF */
2586 memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols));
2587 cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
2589 /* this frame refreshes means next frames don't unless specified by
2592 cm->refresh_golden_frame = 0;
2593 cpi->frames_since_golden = 0;
2595 cpi->recent_ref_frame_usage[INTRA_FRAME] = 1;
2596 cpi->recent_ref_frame_usage[LAST_FRAME] = 1;
2597 cpi->recent_ref_frame_usage[GOLDEN_FRAME] = 1;
2598 cpi->recent_ref_frame_usage[ALTREF_FRAME] = 1;
2600 /* ******** Fixed Q test code only ************ */
2601 /* If we are going to use the ALT reference for the next group of
2602 * frames set a flag to say so.
2604 if (cpi->oxcf.fixed_q >= 0 && cpi->oxcf.play_alternate &&
2605 !cpi->common.refresh_alt_ref_frame) {
2606 cpi->source_alt_ref_pending = 1;
2607 cpi->frames_till_gf_update_due = cpi->baseline_gf_interval;
2610 if (!cpi->source_alt_ref_pending) cpi->source_alt_ref_active = 0;
2612 /* Decrement count down till next gf */
2613 if (cpi->frames_till_gf_update_due > 0) cpi->frames_till_gf_update_due--;
2615 } else if (!cpi->common.refresh_alt_ref_frame) {
2616 /* Decrement count down till next gf */
2617 if (cpi->frames_till_gf_update_due > 0) cpi->frames_till_gf_update_due--;
2619 if (cpi->frames_till_alt_ref_frame) cpi->frames_till_alt_ref_frame--;
2621 cpi->frames_since_golden++;
2623 if (cpi->frames_since_golden > 1) {
2624 cpi->recent_ref_frame_usage[INTRA_FRAME] +=
2625 cpi->mb.count_mb_ref_frame_usage[INTRA_FRAME];
2626 cpi->recent_ref_frame_usage[LAST_FRAME] +=
2627 cpi->mb.count_mb_ref_frame_usage[LAST_FRAME];
2628 cpi->recent_ref_frame_usage[GOLDEN_FRAME] +=
2629 cpi->mb.count_mb_ref_frame_usage[GOLDEN_FRAME];
2630 cpi->recent_ref_frame_usage[ALTREF_FRAME] +=
2631 cpi->mb.count_mb_ref_frame_usage[ALTREF_FRAME];
2636 /* This function updates the reference frame probability estimates that
2637 * will be used during mode selection
2639 static void update_rd_ref_frame_probs(VP8_COMP *cpi) {
2640 VP8_COMMON *cm = &cpi->common;
2642 const int *const rfct = cpi->mb.count_mb_ref_frame_usage;
2643 const int rf_intra = rfct[INTRA_FRAME];
2644 const int rf_inter =
2645 rfct[LAST_FRAME] + rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME];
2647 if (cm->frame_type == KEY_FRAME) {
2648 cpi->prob_intra_coded = 255;
2649 cpi->prob_last_coded = 128;
2650 cpi->prob_gf_coded = 128;
2651 } else if (!(rf_intra + rf_inter)) {
2652 cpi->prob_intra_coded = 63;
2653 cpi->prob_last_coded = 128;
2654 cpi->prob_gf_coded = 128;
2657 /* update reference frame costs since we can do better than what we got
2660 if (cpi->oxcf.number_of_layers == 1) {
2661 if (cpi->common.refresh_alt_ref_frame) {
2662 cpi->prob_intra_coded += 40;
2663 if (cpi->prob_intra_coded > 255) cpi->prob_intra_coded = 255;
2664 cpi->prob_last_coded = 200;
2665 cpi->prob_gf_coded = 1;
2666 } else if (cpi->frames_since_golden == 0) {
2667 cpi->prob_last_coded = 214;
2668 } else if (cpi->frames_since_golden == 1) {
2669 cpi->prob_last_coded = 192;
2670 cpi->prob_gf_coded = 220;
2671 } else if (cpi->source_alt_ref_active) {
2672 cpi->prob_gf_coded -= 20;
2674 if (cpi->prob_gf_coded < 10) cpi->prob_gf_coded = 10;
2676 if (!cpi->source_alt_ref_active) cpi->prob_gf_coded = 255;
2680 #if !CONFIG_REALTIME_ONLY
2681 /* 1 = key, 0 = inter */
2682 static int decide_key_frame(VP8_COMP *cpi) {
2683 VP8_COMMON *cm = &cpi->common;
2685 int code_key_frame = 0;
2689 if (cpi->Speed > 11) return 0;
2691 /* Clear down mmx registers */
2692 vp8_clear_system_state();
2694 if ((cpi->compressor_speed == 2) && (cpi->Speed >= 5) && (cpi->sf.RD == 0)) {
2695 double change = 1.0 *
2696 abs((int)(cpi->mb.intra_error - cpi->last_intra_error)) /
2697 (1 + cpi->last_intra_error);
2700 abs((int)(cpi->mb.prediction_error - cpi->last_prediction_error)) /
2701 (1 + cpi->last_prediction_error);
2702 double minerror = cm->MBs * 256;
2704 cpi->last_intra_error = cpi->mb.intra_error;
2705 cpi->last_prediction_error = cpi->mb.prediction_error;
2707 if (10 * cpi->mb.intra_error / (1 + cpi->mb.prediction_error) < 15 &&
2708 cpi->mb.prediction_error > minerror &&
2709 (change > .25 || change2 > .25)) {
2710 /*(change > 1.4 || change < .75)&& cpi->this_frame_percent_intra >
2711 * cpi->last_frame_percent_intra + 3*/
2718 /* If the following are true we might as well code a key frame */
2719 if (((cpi->this_frame_percent_intra == 100) &&
2720 (cpi->this_frame_percent_intra > (cpi->last_frame_percent_intra + 2))) ||
2721 ((cpi->this_frame_percent_intra > 95) &&
2722 (cpi->this_frame_percent_intra >=
2723 (cpi->last_frame_percent_intra + 5)))) {
2726 /* in addition if the following are true and this is not a golden frame
2727 * then code a key frame Note that on golden frames there often seems
2728 * to be a pop in intra useage anyway hence this restriction is
2729 * designed to prevent spurious key frames. The Intra pop needs to be
2732 else if (((cpi->this_frame_percent_intra > 60) &&
2733 (cpi->this_frame_percent_intra >
2734 (cpi->last_frame_percent_intra * 2))) ||
2735 ((cpi->this_frame_percent_intra > 75) &&
2736 (cpi->this_frame_percent_intra >
2737 (cpi->last_frame_percent_intra * 3 / 2))) ||
2738 ((cpi->this_frame_percent_intra > 90) &&
2739 (cpi->this_frame_percent_intra >
2740 (cpi->last_frame_percent_intra + 10)))) {
2741 if (!cm->refresh_golden_frame) code_key_frame = 1;
2744 return code_key_frame;
2747 static void Pass1Encode(VP8_COMP *cpi, size_t *size, unsigned char *dest,
2748 unsigned int *frame_flags) {
2752 vp8_set_quantizer(cpi, 26);
2754 vp8_first_pass(cpi);
2759 void write_cx_frame_to_file(YV12_BUFFER_CONFIG *frame, int this_frame)
2762 /* write the frame */
2767 sprintf(filename, "cx\\y%04d.raw", this_frame);
2768 yframe = fopen(filename, "wb");
2770 for (i = 0; i < frame->y_height; ++i)
2771 fwrite(frame->y_buffer + i * frame->y_stride, frame->y_width, 1, yframe);
2774 sprintf(filename, "cx\\u%04d.raw", this_frame);
2775 yframe = fopen(filename, "wb");
2777 for (i = 0; i < frame->uv_height; ++i)
2778 fwrite(frame->u_buffer + i * frame->uv_stride, frame->uv_width, 1, yframe);
2781 sprintf(filename, "cx\\v%04d.raw", this_frame);
2782 yframe = fopen(filename, "wb");
2784 for (i = 0; i < frame->uv_height; ++i)
2785 fwrite(frame->v_buffer + i * frame->uv_stride, frame->uv_width, 1, yframe);
2790 /* return of 0 means drop frame */
2792 #if !CONFIG_REALTIME_ONLY
2793 /* Function to test for conditions that indeicate we should loop
2794 * back and recode a frame.
2796 static int recode_loop_test(VP8_COMP *cpi, int high_limit, int low_limit, int q,
2797 int maxq, int minq) {
2798 int force_recode = 0;
2799 VP8_COMMON *cm = &cpi->common;
2801 /* Is frame recode allowed at all
2802 * Yes if either recode mode 1 is selected or mode two is selcted
2803 * and the frame is a key frame. golden frame or alt_ref_frame
2805 if ((cpi->sf.recode_loop == 1) ||
2806 ((cpi->sf.recode_loop == 2) &&
2807 ((cm->frame_type == KEY_FRAME) || cm->refresh_golden_frame ||
2808 cm->refresh_alt_ref_frame))) {
2809 /* General over and under shoot tests */
2810 if (((cpi->projected_frame_size > high_limit) && (q < maxq)) ||
2811 ((cpi->projected_frame_size < low_limit) && (q > minq))) {
2814 /* Special Constrained quality tests */
2815 else if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) {
2816 /* Undershoot and below auto cq level */
2817 if ((q > cpi->cq_target_quality) &&
2818 (cpi->projected_frame_size < ((cpi->this_frame_target * 7) >> 3))) {
2821 /* Severe undershoot and between auto and user cq level */
2822 else if ((q > cpi->oxcf.cq_level) &&
2823 (cpi->projected_frame_size < cpi->min_frame_bandwidth) &&
2824 (cpi->active_best_quality > cpi->oxcf.cq_level)) {
2826 cpi->active_best_quality = cpi->oxcf.cq_level;
2831 return force_recode;
2833 #endif // !CONFIG_REALTIME_ONLY
2835 static void update_reference_frames(VP8_COMP *cpi) {
2836 VP8_COMMON *cm = &cpi->common;
2837 YV12_BUFFER_CONFIG *yv12_fb = cm->yv12_fb;
2839 /* At this point the new frame has been encoded.
2840 * If any buffer copy / swapping is signaled it should be done here.
2843 if (cm->frame_type == KEY_FRAME) {
2844 yv12_fb[cm->new_fb_idx].flags |= VP8_GOLD_FRAME | VP8_ALTR_FRAME;
2846 yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
2847 yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
2849 cm->alt_fb_idx = cm->gld_fb_idx = cm->new_fb_idx;
2851 cpi->current_ref_frames[GOLDEN_FRAME] = cm->current_video_frame;
2852 cpi->current_ref_frames[ALTREF_FRAME] = cm->current_video_frame;
2853 } else /* For non key frames */
2855 if (cm->refresh_alt_ref_frame) {
2856 assert(!cm->copy_buffer_to_arf);
2858 cm->yv12_fb[cm->new_fb_idx].flags |= VP8_ALTR_FRAME;
2859 cm->yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
2860 cm->alt_fb_idx = cm->new_fb_idx;
2862 cpi->current_ref_frames[ALTREF_FRAME] = cm->current_video_frame;
2863 } else if (cm->copy_buffer_to_arf) {
2864 assert(!(cm->copy_buffer_to_arf & ~0x3));
2866 if (cm->copy_buffer_to_arf == 1) {
2867 if (cm->alt_fb_idx != cm->lst_fb_idx) {
2868 yv12_fb[cm->lst_fb_idx].flags |= VP8_ALTR_FRAME;
2869 yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
2870 cm->alt_fb_idx = cm->lst_fb_idx;
2872 cpi->current_ref_frames[ALTREF_FRAME] =
2873 cpi->current_ref_frames[LAST_FRAME];
2875 } else /* if (cm->copy_buffer_to_arf == 2) */
2877 if (cm->alt_fb_idx != cm->gld_fb_idx) {
2878 yv12_fb[cm->gld_fb_idx].flags |= VP8_ALTR_FRAME;
2879 yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
2880 cm->alt_fb_idx = cm->gld_fb_idx;
2882 cpi->current_ref_frames[ALTREF_FRAME] =
2883 cpi->current_ref_frames[GOLDEN_FRAME];
2888 if (cm->refresh_golden_frame) {
2889 assert(!cm->copy_buffer_to_gf);
2891 cm->yv12_fb[cm->new_fb_idx].flags |= VP8_GOLD_FRAME;
2892 cm->yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
2893 cm->gld_fb_idx = cm->new_fb_idx;
2895 cpi->current_ref_frames[GOLDEN_FRAME] = cm->current_video_frame;
2896 } else if (cm->copy_buffer_to_gf) {
2897 assert(!(cm->copy_buffer_to_arf & ~0x3));
2899 if (cm->copy_buffer_to_gf == 1) {
2900 if (cm->gld_fb_idx != cm->lst_fb_idx) {
2901 yv12_fb[cm->lst_fb_idx].flags |= VP8_GOLD_FRAME;
2902 yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
2903 cm->gld_fb_idx = cm->lst_fb_idx;
2905 cpi->current_ref_frames[GOLDEN_FRAME] =
2906 cpi->current_ref_frames[LAST_FRAME];
2908 } else /* if (cm->copy_buffer_to_gf == 2) */
2910 if (cm->alt_fb_idx != cm->gld_fb_idx) {
2911 yv12_fb[cm->alt_fb_idx].flags |= VP8_GOLD_FRAME;
2912 yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
2913 cm->gld_fb_idx = cm->alt_fb_idx;
2915 cpi->current_ref_frames[GOLDEN_FRAME] =
2916 cpi->current_ref_frames[ALTREF_FRAME];
2922 if (cm->refresh_last_frame) {
2923 cm->yv12_fb[cm->new_fb_idx].flags |= VP8_LAST_FRAME;
2924 cm->yv12_fb[cm->lst_fb_idx].flags &= ~VP8_LAST_FRAME;
2925 cm->lst_fb_idx = cm->new_fb_idx;
2927 cpi->current_ref_frames[LAST_FRAME] = cm->current_video_frame;
2930 #if CONFIG_TEMPORAL_DENOISING
2931 if (cpi->oxcf.noise_sensitivity) {
2932 /* we shouldn't have to keep multiple copies as we know in advance which
2933 * buffer we should start - for now to get something up and running
2934 * I've chosen to copy the buffers
2936 if (cm->frame_type == KEY_FRAME) {
2938 for (i = LAST_FRAME; i < MAX_REF_FRAMES; ++i)
2939 vp8_yv12_copy_frame(cpi->Source, &cpi->denoiser.yv12_running_avg[i]);
2940 } else /* For non key frames */
2942 vp8_yv12_extend_frame_borders(
2943 &cpi->denoiser.yv12_running_avg[INTRA_FRAME]);
2945 if (cm->refresh_alt_ref_frame || cm->copy_buffer_to_arf) {
2946 vp8_yv12_copy_frame(&cpi->denoiser.yv12_running_avg[INTRA_FRAME],
2947 &cpi->denoiser.yv12_running_avg[ALTREF_FRAME]);
2949 if (cm->refresh_golden_frame || cm->copy_buffer_to_gf) {
2950 vp8_yv12_copy_frame(&cpi->denoiser.yv12_running_avg[INTRA_FRAME],
2951 &cpi->denoiser.yv12_running_avg[GOLDEN_FRAME]);
2953 if (cm->refresh_last_frame) {
2954 vp8_yv12_copy_frame(&cpi->denoiser.yv12_running_avg[INTRA_FRAME],
2955 &cpi->denoiser.yv12_running_avg[LAST_FRAME]);
2958 if (cpi->oxcf.noise_sensitivity == 4)
2959 vp8_yv12_copy_frame(cpi->Source, &cpi->denoiser.yv12_last_source);
2964 static int measure_square_diff_partial(YV12_BUFFER_CONFIG *source,
2965 YV12_BUFFER_CONFIG *dest,
2971 int min_consec_zero_last = 10;
2972 int tot_num_blocks = (source->y_height * source->y_width) >> 8;
2973 unsigned char *src = source->y_buffer;
2974 unsigned char *dst = dest->y_buffer;
2976 /* Loop through the Y plane, every |skip| blocks along rows and colmumns,
2977 * summing the square differences, and only for blocks that have been
2978 * zero_last mode at least |x| frames in a row.
2980 for (i = 0; i < source->y_height; i += 16 * skip) {
2981 int block_index_row = (i >> 4) * cpi->common.mb_cols;
2982 for (j = 0; j < source->y_width; j += 16 * skip) {
2983 int index = block_index_row + (j >> 4);
2984 if (cpi->consec_zero_last[index] >= min_consec_zero_last) {
2986 Total += vpx_mse16x16(src + j, source->y_stride, dst + j,
2987 dest->y_stride, &sse);
2991 src += 16 * skip * source->y_stride;
2992 dst += 16 * skip * dest->y_stride;
2994 // Only return non-zero if we have at least ~1/16 samples for estimate.
2995 if (num_blocks > (tot_num_blocks >> 4)) {
2996 return (Total / num_blocks);
3002 #if CONFIG_TEMPORAL_DENOISING
3003 static void process_denoiser_mode_change(VP8_COMP *cpi) {
3004 const VP8_COMMON *const cm = &cpi->common;
3008 // Number of blocks skipped along row/column in computing the
3009 // nmse (normalized mean square error) of source.
3011 // Only select blocks for computing nmse that have been encoded
3012 // as ZERO LAST min_consec_zero_last frames in a row.
3013 // Scale with number of temporal layers.
3014 int min_consec_zero_last = 12 / cpi->oxcf.number_of_layers;
3015 // Decision is tested for changing the denoising mode every
3016 // num_mode_change times this function is called. Note that this
3017 // function called every 8 frames, so (8 * num_mode_change) is number
3018 // of frames where denoising mode change is tested for switch.
3019 int num_mode_change = 20;
3020 // Framerate factor, to compensate for larger mse at lower framerates.
3021 // Use ref_framerate, which is full source framerate for temporal layers.
3022 // TODO(marpan): Adjust this factor.
3023 int fac_framerate = cpi->ref_framerate < 25.0f ? 80 : 100;
3024 int tot_num_blocks = cm->mb_rows * cm->mb_cols;
3025 int ystride = cpi->Source->y_stride;
3026 unsigned char *src = cpi->Source->y_buffer;
3027 unsigned char *dst = cpi->denoiser.yv12_last_source.y_buffer;
3028 static const unsigned char const_source[16] = { 128, 128, 128, 128, 128, 128,
3029 128, 128, 128, 128, 128, 128,
3030 128, 128, 128, 128 };
3031 int bandwidth = (int)(cpi->target_bandwidth);
3032 // For temporal layers, use full bandwidth (top layer).
3033 if (cpi->oxcf.number_of_layers > 1) {
3034 LAYER_CONTEXT *lc = &cpi->layer_context[cpi->oxcf.number_of_layers - 1];
3035 bandwidth = (int)(lc->target_bandwidth);
3037 // Loop through the Y plane, every skip blocks along rows and columns,
3038 // summing the normalized mean square error, only for blocks that have
3039 // been encoded as ZEROMV LAST at least min_consec_zero_last least frames in
3040 // a row and have small sum difference between current and previous frame.
3041 // Normalization here is by the contrast of the current frame block.
3042 for (i = 0; i < cm->Height; i += 16 * skip) {
3043 int block_index_row = (i >> 4) * cm->mb_cols;
3044 for (j = 0; j < cm->Width; j += 16 * skip) {
3045 int index = block_index_row + (j >> 4);
3046 if (cpi->consec_zero_last[index] >= min_consec_zero_last) {
3048 const unsigned int var =
3049 vpx_variance16x16(src + j, ystride, dst + j, ystride, &sse);
3050 // Only consider this block as valid for noise measurement
3051 // if the sum_diff average of the current and previous frame
3052 // is small (to avoid effects from lighting change).
3053 if ((sse - var) < 128) {
3055 const unsigned int act =
3056 vpx_variance16x16(src + j, ystride, const_source, 0, &sse2);
3057 if (act > 0) total += sse / act;
3062 src += 16 * skip * ystride;
3063 dst += 16 * skip * ystride;
3065 total = total * fac_framerate / 100;
3067 // Only consider this frame as valid sample if we have computed nmse over
3068 // at least ~1/16 blocks, and Total > 0 (Total == 0 can happen if the
3069 // application inputs duplicate frames, or contrast is all zero).
3070 if (total > 0 && (num_blocks > (tot_num_blocks >> 4))) {
3071 // Update the recursive mean square source_diff.
3072 total = (total << 8) / num_blocks;
3073 if (cpi->denoiser.nmse_source_diff_count == 0) {
3074 // First sample in new interval.
3075 cpi->denoiser.nmse_source_diff = total;
3076 cpi->denoiser.qp_avg = cm->base_qindex;
3078 // For subsequent samples, use average with weight ~1/4 for new sample.
3079 cpi->denoiser.nmse_source_diff =
3080 (int)((total + 3 * cpi->denoiser.nmse_source_diff) >> 2);
3081 cpi->denoiser.qp_avg =
3082 (int)((cm->base_qindex + 3 * cpi->denoiser.qp_avg) >> 2);
3084 cpi->denoiser.nmse_source_diff_count++;
3086 // Check for changing the denoiser mode, when we have obtained #samples =
3087 // num_mode_change. Condition the change also on the bitrate and QP.
3088 if (cpi->denoiser.nmse_source_diff_count == num_mode_change) {
3089 // Check for going up: from normal to aggressive mode.
3090 if ((cpi->denoiser.denoiser_mode == kDenoiserOnYUV) &&
3091 (cpi->denoiser.nmse_source_diff >
3092 cpi->denoiser.threshold_aggressive_mode) &&
3093 (cpi->denoiser.qp_avg < cpi->denoiser.qp_threshold_up &&
3094 bandwidth > cpi->denoiser.bitrate_threshold)) {
3095 vp8_denoiser_set_parameters(&cpi->denoiser, kDenoiserOnYUVAggressive);
3097 // Check for going down: from aggressive to normal mode.
3098 if (((cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive) &&
3099 (cpi->denoiser.nmse_source_diff <
3100 cpi->denoiser.threshold_aggressive_mode)) ||
3101 ((cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive) &&
3102 (cpi->denoiser.qp_avg > cpi->denoiser.qp_threshold_down ||
3103 bandwidth < cpi->denoiser.bitrate_threshold))) {
3104 vp8_denoiser_set_parameters(&cpi->denoiser, kDenoiserOnYUV);
3107 // Reset metric and counter for next interval.
3108 cpi->denoiser.nmse_source_diff = 0;
3109 cpi->denoiser.qp_avg = 0;
3110 cpi->denoiser.nmse_source_diff_count = 0;
3115 void vp8_loopfilter_frame(VP8_COMP *cpi, VP8_COMMON *cm) {
3116 const FRAME_TYPE frame_type = cm->frame_type;
3118 int update_any_ref_buffers = 1;
3119 if (cpi->common.refresh_last_frame == 0 &&
3120 cpi->common.refresh_golden_frame == 0 &&
3121 cpi->common.refresh_alt_ref_frame == 0) {
3122 update_any_ref_buffers = 0;
3126 cm->filter_level = 0;
3128 struct vpx_usec_timer timer;
3130 vp8_clear_system_state();
3132 vpx_usec_timer_start(&timer);
3133 if (cpi->sf.auto_filter == 0) {
3134 #if CONFIG_TEMPORAL_DENOISING
3135 if (cpi->oxcf.noise_sensitivity && cm->frame_type != KEY_FRAME) {
3136 // Use the denoised buffer for selecting base loop filter level.
3137 // Denoised signal for current frame is stored in INTRA_FRAME.
3138 // No denoising on key frames.
3139 vp8cx_pick_filter_level_fast(
3140 &cpi->denoiser.yv12_running_avg[INTRA_FRAME], cpi);
3142 vp8cx_pick_filter_level_fast(cpi->Source, cpi);
3145 vp8cx_pick_filter_level_fast(cpi->Source, cpi);
3148 #if CONFIG_TEMPORAL_DENOISING
3149 if (cpi->oxcf.noise_sensitivity && cm->frame_type != KEY_FRAME) {
3150 // Use the denoised buffer for selecting base loop filter level.
3151 // Denoised signal for current frame is stored in INTRA_FRAME.
3152 // No denoising on key frames.
3153 vp8cx_pick_filter_level(&cpi->denoiser.yv12_running_avg[INTRA_FRAME],
3156 vp8cx_pick_filter_level(cpi->Source, cpi);
3159 vp8cx_pick_filter_level(cpi->Source, cpi);
3163 if (cm->filter_level > 0) {
3164 vp8cx_set_alt_lf_level(cpi, cm->filter_level);
3167 vpx_usec_timer_mark(&timer);
3168 cpi->time_pick_lpf += vpx_usec_timer_elapsed(&timer);
3171 #if CONFIG_MULTITHREAD
3172 if (cpi->b_multi_threaded) {
3173 sem_post(&cpi->h_event_end_lpf); /* signal that we have set filter_level */
3177 // No need to apply loop-filter if the encoded frame does not update
3178 // any reference buffers.
3179 if (cm->filter_level > 0 && update_any_ref_buffers) {
3180 vp8_loop_filter_frame(cm, &cpi->mb.e_mbd, frame_type);
3183 vp8_yv12_extend_frame_borders(cm->frame_to_show);
3186 static void encode_frame_to_data_rate(VP8_COMP *cpi, size_t *size,
3187 unsigned char *dest,
3188 unsigned char *dest_end,
3189 unsigned int *frame_flags) {
3191 int frame_over_shoot_limit;
3192 int frame_under_shoot_limit;
3197 VP8_COMMON *cm = &cpi->common;
3198 int active_worst_qchanged = 0;
3200 #if !CONFIG_REALTIME_ONLY
3204 int zbin_oq_low = 0;
3207 int overshoot_seen = 0;
3208 int undershoot_seen = 0;
3211 int drop_mark = (int)(cpi->oxcf.drop_frames_water_mark *
3212 cpi->oxcf.optimal_buffer_level / 100);
3213 int drop_mark75 = drop_mark * 2 / 3;
3214 int drop_mark50 = drop_mark / 4;
3215 int drop_mark25 = drop_mark / 8;
3217 /* Clear down mmx registers to allow floating point in what follows */
3218 vp8_clear_system_state();
3220 if (cpi->force_next_frame_intra) {
3221 cm->frame_type = KEY_FRAME; /* delayed intra frame */
3222 cpi->force_next_frame_intra = 0;
3225 /* For an alt ref frame in 2 pass we skip the call to the second pass
3226 * function that sets the target bandwidth
3228 switch (cpi->pass) {
3229 #if !CONFIG_REALTIME_ONLY
3231 if (cpi->common.refresh_alt_ref_frame) {
3232 /* Per frame bit target for the alt ref frame */
3233 cpi->per_frame_bandwidth = cpi->twopass.gf_bits;
3234 /* per second target bitrate */
3235 cpi->target_bandwidth =
3236 (int)(cpi->twopass.gf_bits * cpi->output_framerate);
3239 #endif // !CONFIG_REALTIME_ONLY
3241 cpi->per_frame_bandwidth =
3242 (int)(cpi->target_bandwidth / cpi->output_framerate);
3246 /* Default turn off buffer to buffer copying */
3247 cm->copy_buffer_to_gf = 0;
3248 cm->copy_buffer_to_arf = 0;
3250 /* Clear zbin over-quant value and mode boost values. */
3251 cpi->mb.zbin_over_quant = 0;
3252 cpi->mb.zbin_mode_boost = 0;
3254 /* Enable or disable mode based tweaking of the zbin
3255 * For 2 Pass Only used where GF/ARF prediction quality
3256 * is above a threshold
3258 cpi->mb.zbin_mode_boost_enabled = 1;
3259 if (cpi->pass == 2) {
3260 if (cpi->gfu_boost <= 400) {
3261 cpi->mb.zbin_mode_boost_enabled = 0;
3265 /* Current default encoder behaviour for the altref sign bias */
3266 if (cpi->source_alt_ref_active) {
3267 cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 1;
3269 cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 0;
3272 /* Check to see if a key frame is signaled
3273 * For two pass with auto key frame enabled cm->frame_type may already
3274 * be set, but not for one pass.
3276 if ((cm->current_video_frame == 0) || (cm->frame_flags & FRAMEFLAGS_KEY) ||
3277 (cpi->oxcf.auto_key &&
3278 (cpi->frames_since_key % cpi->key_frame_frequency == 0))) {
3279 /* Key frame from VFW/auto-keyframe/first frame */
3280 cm->frame_type = KEY_FRAME;
3281 #if CONFIG_TEMPORAL_DENOISING
3282 if (cpi->oxcf.noise_sensitivity == 4) {
3283 // For adaptive mode, reset denoiser to normal mode on key frame.
3284 vp8_denoiser_set_parameters(&cpi->denoiser, kDenoiserOnYUV);
3289 #if CONFIG_MULTI_RES_ENCODING
3290 if (cpi->oxcf.mr_total_resolutions > 1) {
3291 LOWER_RES_FRAME_INFO *low_res_frame_info =
3292 (LOWER_RES_FRAME_INFO *)cpi->oxcf.mr_low_res_mode_info;
3294 if (cpi->oxcf.mr_encoder_id) {
3295 // TODO(marpan): This constraint shouldn't be needed, as we would like
3296 // to allow for key frame setting (forced or periodic) defined per
3297 // spatial layer. For now, keep this in.
3298 cm->frame_type = low_res_frame_info->frame_type;
3300 // Check if lower resolution is available for motion vector reuse.
3301 if (cm->frame_type != KEY_FRAME) {
3302 cpi->mr_low_res_mv_avail = 1;
3303 cpi->mr_low_res_mv_avail &= !(low_res_frame_info->is_frame_dropped);
3305 if (cpi->ref_frame_flags & VP8_LAST_FRAME)
3306 cpi->mr_low_res_mv_avail &=
3307 (cpi->current_ref_frames[LAST_FRAME] ==
3308 low_res_frame_info->low_res_ref_frames[LAST_FRAME]);
3310 if (cpi->ref_frame_flags & VP8_GOLD_FRAME)
3311 cpi->mr_low_res_mv_avail &=
3312 (cpi->current_ref_frames[GOLDEN_FRAME] ==
3313 low_res_frame_info->low_res_ref_frames[GOLDEN_FRAME]);
3315 // Don't use altref to determine whether low res is available.
3316 // TODO (marpan): Should we make this type of condition on a
3317 // per-reference frame basis?
3319 if (cpi->ref_frame_flags & VP8_ALTR_FRAME)
3320 cpi->mr_low_res_mv_avail &= (cpi->current_ref_frames[ALTREF_FRAME]
3321 == low_res_frame_info->low_res_ref_frames[ALTREF_FRAME]);
3326 // On a key frame: For the lowest resolution, keep track of the key frame
3327 // counter value. For the higher resolutions, reset the current video
3328 // frame counter to that of the lowest resolution.
3329 // This is done to the handle the case where we may stop/start encoding
3330 // higher layer(s). The restart-encoding of higher layer is only signaled
3331 // by a key frame for now.
3332 // TODO (marpan): Add flag to indicate restart-encoding of higher layer.
3333 if (cm->frame_type == KEY_FRAME) {
3334 if (cpi->oxcf.mr_encoder_id) {
3335 // If the initial starting value of the buffer level is zero (this can
3336 // happen because we may have not started encoding this higher stream),
3337 // then reset it to non-zero value based on |starting_buffer_level|.
3338 if (cpi->common.current_video_frame == 0 && cpi->buffer_level == 0) {
3340 cpi->bits_off_target = cpi->oxcf.starting_buffer_level;
3341 cpi->buffer_level = cpi->oxcf.starting_buffer_level;
3342 for (i = 0; i < cpi->oxcf.number_of_layers; ++i) {
3343 LAYER_CONTEXT *lc = &cpi->layer_context[i];
3344 lc->bits_off_target = lc->starting_buffer_level;
3345 lc->buffer_level = lc->starting_buffer_level;
3348 cpi->common.current_video_frame =
3349 low_res_frame_info->key_frame_counter_value;
3351 low_res_frame_info->key_frame_counter_value =
3352 cpi->common.current_video_frame;
3358 // Find the reference frame closest to the current frame.
3359 cpi->closest_reference_frame = LAST_FRAME;
3360 if (cm->frame_type != KEY_FRAME) {
3362 MV_REFERENCE_FRAME closest_ref = INTRA_FRAME;
3363 if (cpi->ref_frame_flags & VP8_LAST_FRAME) {
3364 closest_ref = LAST_FRAME;
3365 } else if (cpi->ref_frame_flags & VP8_GOLD_FRAME) {
3366 closest_ref = GOLDEN_FRAME;
3367 } else if (cpi->ref_frame_flags & VP8_ALTR_FRAME) {
3368 closest_ref = ALTREF_FRAME;
3370 for (i = 1; i <= 3; ++i) {
3371 vpx_ref_frame_type_t ref_frame_type =
3372 (vpx_ref_frame_type_t)((i == 3) ? 4 : i);
3373 if (cpi->ref_frame_flags & ref_frame_type) {
3374 if ((cm->current_video_frame - cpi->current_ref_frames[i]) <
3375 (cm->current_video_frame - cpi->current_ref_frames[closest_ref])) {
3380 cpi->closest_reference_frame = closest_ref;
3383 /* Set various flags etc to special state if it is a key frame */
3384 if (cm->frame_type == KEY_FRAME) {
3387 // Set the loop filter deltas and segmentation map update
3388 setup_features(cpi);
3390 /* The alternate reference frame cannot be active for a key frame */
3391 cpi->source_alt_ref_active = 0;
3393 /* Reset the RD threshold multipliers to default of * 1 (128) */
3394 for (i = 0; i < MAX_MODES; ++i) {
3395 cpi->mb.rd_thresh_mult[i] = 128;
3398 // Reset the zero_last counter to 0 on key frame.
3399 memset(cpi->consec_zero_last, 0, cm->mb_rows * cm->mb_cols);
3400 memset(cpi->consec_zero_last_mvbias, 0,
3401 (cpi->common.mb_rows * cpi->common.mb_cols));
3405 /* Experimental code for lagged compress and one pass
3406 * Initialise one_pass GF frames stats
3407 * Update stats used for GF selection
3410 cpi->one_pass_frame_index = cm->current_video_frame % MAX_LAG_BUFFERS;
3412 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frames_so_far = 0;
3413 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_intra_error = 0.0;
3414 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_coded_error = 0.0;
3415 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_pcnt_inter = 0.0;
3416 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_pcnt_motion = 0.0;
3417 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvr = 0.0;
3418 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvr_abs = 0.0;
3419 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvc = 0.0;
3420 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvc_abs = 0.0;
3424 update_rd_ref_frame_probs(cpi);
3426 if (cpi->drop_frames_allowed) {
3427 /* The reset to decimation 0 is only done here for one pass.
3428 * Once it is set two pass leaves decimation on till the next kf.
3430 if ((cpi->buffer_level > drop_mark) && (cpi->decimation_factor > 0)) {
3431 cpi->decimation_factor--;
3434 if (cpi->buffer_level > drop_mark75 && cpi->decimation_factor > 0) {
3435 cpi->decimation_factor = 1;
3437 } else if (cpi->buffer_level < drop_mark25 &&
3438 (cpi->decimation_factor == 2 || cpi->decimation_factor == 3)) {
3439 cpi->decimation_factor = 3;
3440 } else if (cpi->buffer_level < drop_mark50 &&
3441 (cpi->decimation_factor == 1 || cpi->decimation_factor == 2)) {
3442 cpi->decimation_factor = 2;
3443 } else if (cpi->buffer_level < drop_mark75 &&
3444 (cpi->decimation_factor == 0 || cpi->decimation_factor == 1)) {
3445 cpi->decimation_factor = 1;
3449 /* The following decimates the frame rate according to a regular
3450 * pattern (i.e. to 1/2 or 2/3 frame rate) This can be used to help
3451 * prevent buffer under-run in CBR mode. Alternatively it might be
3452 * desirable in some situations to drop frame rate but throw more bits
3455 * Note that dropping a key frame can be problematic if spatial
3456 * resampling is also active
3458 if (cpi->decimation_factor > 0) {
3459 switch (cpi->decimation_factor) {
3461 cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 3 / 2;
3464 cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 5 / 4;
3467 cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 5 / 4;
3471 /* Note that we should not throw out a key frame (especially when
3472 * spatial resampling is enabled).
3474 if (cm->frame_type == KEY_FRAME) {
3475 cpi->decimation_count = cpi->decimation_factor;
3476 } else if (cpi->decimation_count > 0) {
3477 cpi->decimation_count--;
3479 cpi->bits_off_target += cpi->av_per_frame_bandwidth;
3480 if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size) {
3481 cpi->bits_off_target = cpi->oxcf.maximum_buffer_size;
3484 #if CONFIG_MULTI_RES_ENCODING
3485 vp8_store_drop_frame_info(cpi);
3488 cm->current_video_frame++;
3489 cpi->frames_since_key++;
3490 // We advance the temporal pattern for dropped frames.
3491 cpi->temporal_pattern_counter++;
3493 #if CONFIG_INTERNAL_STATS
3497 cpi->buffer_level = cpi->bits_off_target;
3499 if (cpi->oxcf.number_of_layers > 1) {
3502 /* Propagate bits saved by dropping the frame to higher
3505 for (i = cpi->current_layer + 1; i < cpi->oxcf.number_of_layers; ++i) {
3506 LAYER_CONTEXT *lc = &cpi->layer_context[i];
3507 lc->bits_off_target += (int)(lc->target_bandwidth / lc->framerate);
3508 if (lc->bits_off_target > lc->maximum_buffer_size) {
3509 lc->bits_off_target = lc->maximum_buffer_size;
3511 lc->buffer_level = lc->bits_off_target;
3517 cpi->decimation_count = cpi->decimation_factor;
3520 cpi->decimation_count = 0;
3523 /* Decide how big to make the frame */
3524 if (!vp8_pick_frame_size(cpi)) {
3525 /*TODO: 2 drop_frame and return code could be put together. */
3526 #if CONFIG_MULTI_RES_ENCODING
3527 vp8_store_drop_frame_info(cpi);
3529 cm->current_video_frame++;
3530 cpi->frames_since_key++;
3531 // We advance the temporal pattern for dropped frames.
3532 cpi->temporal_pattern_counter++;
3536 /* Reduce active_worst_allowed_q for CBR if our buffer is getting too full.
3537 * This has a knock on effect on active best quality as well.
3538 * For CBR if the buffer reaches its maximum level then we can no longer
3539 * save up bits for later frames so we might as well use them up
3540 * on the current frame.
3542 if ((cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) &&
3543 (cpi->buffer_level >= cpi->oxcf.optimal_buffer_level) &&
3544 cpi->buffered_mode) {
3545 /* Max adjustment is 1/4 */
3546 int Adjustment = cpi->active_worst_quality / 4;
3551 if (cpi->buffer_level < cpi->oxcf.maximum_buffer_size) {
3552 buff_lvl_step = (int)((cpi->oxcf.maximum_buffer_size -
3553 cpi->oxcf.optimal_buffer_level) /
3556 if (buff_lvl_step) {
3558 (int)((cpi->buffer_level - cpi->oxcf.optimal_buffer_level) /
3565 cpi->active_worst_quality -= Adjustment;
3567 if (cpi->active_worst_quality < cpi->active_best_quality) {
3568 cpi->active_worst_quality = cpi->active_best_quality;
3573 /* Set an active best quality and if necessary active worst quality
3574 * There is some odd behavior for one pass here that needs attention.
3576 if ((cpi->pass == 2) || (cpi->ni_frames > 150)) {
3577 vp8_clear_system_state();
3579 Q = cpi->active_worst_quality;
3581 if (cm->frame_type == KEY_FRAME) {
3582 if (cpi->pass == 2) {
3583 if (cpi->gfu_boost > 600) {
3584 cpi->active_best_quality = kf_low_motion_minq[Q];
3586 cpi->active_best_quality = kf_high_motion_minq[Q];
3589 /* Special case for key frames forced because we have reached
3590 * the maximum key frame interval. Here force the Q to a range
3591 * based on the ambient Q to reduce the risk of popping
3593 if (cpi->this_key_frame_forced) {
3594 if (cpi->active_best_quality > cpi->avg_frame_qindex * 7 / 8) {
3595 cpi->active_best_quality = cpi->avg_frame_qindex * 7 / 8;
3596 } else if (cpi->active_best_quality<cpi->avg_frame_qindex>> 2) {
3597 cpi->active_best_quality = cpi->avg_frame_qindex >> 2;
3601 /* One pass more conservative */
3603 cpi->active_best_quality = kf_high_motion_minq[Q];
3607 else if (cpi->oxcf.number_of_layers == 1 &&
3608 (cm->refresh_golden_frame || cpi->common.refresh_alt_ref_frame)) {
3609 /* Use the lower of cpi->active_worst_quality and recent
3610 * average Q as basis for GF/ARF Q limit unless last frame was
3613 if ((cpi->frames_since_key > 1) &&
3614 (cpi->avg_frame_qindex < cpi->active_worst_quality)) {
3615 Q = cpi->avg_frame_qindex;
3618 /* For constrained quality dont allow Q less than the cq level */
3619 if ((cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
3620 (Q < cpi->cq_target_quality)) {
3621 Q = cpi->cq_target_quality;
3624 if (cpi->pass == 2) {
3625 if (cpi->gfu_boost > 1000) {
3626 cpi->active_best_quality = gf_low_motion_minq[Q];
3627 } else if (cpi->gfu_boost < 400) {
3628 cpi->active_best_quality = gf_high_motion_minq[Q];
3630 cpi->active_best_quality = gf_mid_motion_minq[Q];
3633 /* Constrained quality use slightly lower active best. */
3634 if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) {
3635 cpi->active_best_quality = cpi->active_best_quality * 15 / 16;
3638 /* One pass more conservative */
3640 cpi->active_best_quality = gf_high_motion_minq[Q];
3643 cpi->active_best_quality = inter_minq[Q];
3645 /* For the constant/constrained quality mode we dont want
3646 * q to fall below the cq level.
3648 if ((cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
3649 (cpi->active_best_quality < cpi->cq_target_quality)) {
3650 /* If we are strongly undershooting the target rate in the last
3651 * frames then use the user passed in cq value not the auto
3654 if (cpi->rolling_actual_bits < cpi->min_frame_bandwidth) {
3655 cpi->active_best_quality = cpi->oxcf.cq_level;
3657 cpi->active_best_quality = cpi->cq_target_quality;
3662 /* If CBR and the buffer is as full then it is reasonable to allow
3663 * higher quality on the frames to prevent bits just going to waste.
3665 if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) {
3666 /* Note that the use of >= here elliminates the risk of a devide
3667 * by 0 error in the else if clause
3669 if (cpi->buffer_level >= cpi->oxcf.maximum_buffer_size) {
3670 cpi->active_best_quality = cpi->best_quality;
3672 } else if (cpi->buffer_level > cpi->oxcf.optimal_buffer_level) {
3674 (int)(((cpi->buffer_level - cpi->oxcf.optimal_buffer_level) * 128) /
3675 (cpi->oxcf.maximum_buffer_size -
3676 cpi->oxcf.optimal_buffer_level));
3677 int min_qadjustment =
3678 ((cpi->active_best_quality - cpi->best_quality) * Fraction) / 128;
3680 cpi->active_best_quality -= min_qadjustment;
3684 /* Make sure constrained quality mode limits are adhered to for the first
3685 * few frames of one pass encodes
3687 else if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) {
3688 if ((cm->frame_type == KEY_FRAME) || cm->refresh_golden_frame ||
3689 cpi->common.refresh_alt_ref_frame) {
3690 cpi->active_best_quality = cpi->best_quality;
3691 } else if (cpi->active_best_quality < cpi->cq_target_quality) {
3692 cpi->active_best_quality = cpi->cq_target_quality;
3696 /* Clip the active best and worst quality values to limits */
3697 if (cpi->active_worst_quality > cpi->worst_quality) {
3698 cpi->active_worst_quality = cpi->worst_quality;
3701 if (cpi->active_best_quality < cpi->best_quality) {
3702 cpi->active_best_quality = cpi->best_quality;
3705 if (cpi->active_worst_quality < cpi->active_best_quality) {
3706 cpi->active_worst_quality = cpi->active_best_quality;
3709 /* Determine initial Q to try */
3710 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
3712 #if !CONFIG_REALTIME_ONLY
3714 /* Set highest allowed value for Zbin over quant */
3715 if (cm->frame_type == KEY_FRAME) {
3717 } else if ((cpi->oxcf.number_of_layers == 1) &&
3718 ((cm->refresh_alt_ref_frame ||
3719 (cm->refresh_golden_frame && !cpi->source_alt_ref_active)))) {
3722 zbin_oq_high = ZBIN_OQ_MAX;
3726 /* Setup background Q adjustment for error resilient mode.
3727 * For multi-layer encodes only enable this for the base layer.
3729 if (cpi->cyclic_refresh_mode_enabled) {
3730 // Special case for screen_content_mode with golden frame updates.
3732 (cpi->oxcf.screen_content_mode == 2 && cm->refresh_golden_frame);
3733 if (cpi->current_layer == 0 && cpi->force_maxqp == 0 && !disable_cr_gf) {
3734 cyclic_background_refresh(cpi, Q, 0);
3736 disable_segmentation(cpi);
3740 vp8_compute_frame_size_bounds(cpi, &frame_under_shoot_limit,
3741 &frame_over_shoot_limit);
3743 #if !CONFIG_REALTIME_ONLY
3744 /* Limit Q range for the adaptive loop. */
3745 bottom_index = cpi->active_best_quality;
3746 top_index = cpi->active_worst_quality;
3747 q_low = cpi->active_best_quality;
3748 q_high = cpi->active_worst_quality;
3751 vp8_save_coding_context(cpi);
3755 scale_and_extend_source(cpi->un_scaled_source, cpi);
3757 #if CONFIG_TEMPORAL_DENOISING && CONFIG_POSTPROC
3758 // Option to apply spatial blur under the aggressive or adaptive
3759 // (temporal denoising) mode.
3760 if (cpi->oxcf.noise_sensitivity >= 3) {
3761 if (cpi->denoiser.denoise_pars.spatial_blur != 0) {
3762 vp8_de_noise(cm, cpi->Source, cpi->Source,
3763 cpi->denoiser.denoise_pars.spatial_blur, 1, 0, 0);
3768 #if !(CONFIG_REALTIME_ONLY) && CONFIG_POSTPROC && !(CONFIG_TEMPORAL_DENOISING)
3770 if (cpi->oxcf.noise_sensitivity > 0) {
3774 switch (cpi->oxcf.noise_sensitivity) {
3775 case 1: l = 20; break;
3776 case 2: l = 40; break;
3777 case 3: l = 60; break;
3778 case 4: l = 80; break;
3779 case 5: l = 100; break;
3780 case 6: l = 150; break;
3783 if (cm->frame_type == KEY_FRAME) {
3784 vp8_de_noise(cm, cpi->Source, cpi->Source, l, 1, 0, 1);
3786 vp8_de_noise(cm, cpi->Source, cpi->Source, l, 1, 0, 1);
3788 src = cpi->Source->y_buffer;
3790 if (cpi->Source->y_stride < 0) {
3791 src += cpi->Source->y_stride * (cpi->Source->y_height - 1);
3798 #ifdef OUTPUT_YUV_SRC
3799 vp8_write_yuv_frame(yuv_file, cpi->Source);
3803 vp8_clear_system_state();
3805 vp8_set_quantizer(cpi, Q);
3807 /* setup skip prob for costing in mode/mv decision */
3808 if (cpi->common.mb_no_coeff_skip) {
3809 cpi->prob_skip_false = cpi->base_skip_false_prob[Q];
3811 if (cm->frame_type != KEY_FRAME) {
3812 if (cpi->common.refresh_alt_ref_frame) {
3813 if (cpi->last_skip_false_probs[2] != 0) {
3814 cpi->prob_skip_false = cpi->last_skip_false_probs[2];
3818 if(cpi->last_skip_false_probs[2]!=0 && abs(Q-
3819 cpi->last_skip_probs_q[2])<=16 )
3820 cpi->prob_skip_false = cpi->last_skip_false_probs[2];
3821 else if (cpi->last_skip_false_probs[2]!=0)
3822 cpi->prob_skip_false = (cpi->last_skip_false_probs[2] +
3823 cpi->prob_skip_false ) / 2;
3825 } else if (cpi->common.refresh_golden_frame) {
3826 if (cpi->last_skip_false_probs[1] != 0) {
3827 cpi->prob_skip_false = cpi->last_skip_false_probs[1];
3831 if(cpi->last_skip_false_probs[1]!=0 && abs(Q-
3832 cpi->last_skip_probs_q[1])<=16 )
3833 cpi->prob_skip_false = cpi->last_skip_false_probs[1];
3834 else if (cpi->last_skip_false_probs[1]!=0)
3835 cpi->prob_skip_false = (cpi->last_skip_false_probs[1] +
3836 cpi->prob_skip_false ) / 2;
3839 if (cpi->last_skip_false_probs[0] != 0) {
3840 cpi->prob_skip_false = cpi->last_skip_false_probs[0];
3844 if(cpi->last_skip_false_probs[0]!=0 && abs(Q-
3845 cpi->last_skip_probs_q[0])<=16 )
3846 cpi->prob_skip_false = cpi->last_skip_false_probs[0];
3847 else if(cpi->last_skip_false_probs[0]!=0)
3848 cpi->prob_skip_false = (cpi->last_skip_false_probs[0] +
3849 cpi->prob_skip_false ) / 2;
3853 /* as this is for cost estimate, let's make sure it does not
3854 * go extreme eitehr way
3856 if (cpi->prob_skip_false < 5) cpi->prob_skip_false = 5;
3858 if (cpi->prob_skip_false > 250) cpi->prob_skip_false = 250;
3860 if (cpi->oxcf.number_of_layers == 1 && cpi->is_src_frame_alt_ref) {
3861 cpi->prob_skip_false = 1;
3869 FILE *f = fopen("skip.stt", "a");
3870 fprintf(f, "%d, %d, %4d ", cpi->common.refresh_golden_frame, cpi->common.refresh_alt_ref_frame, cpi->prob_skip_false);
3877 if (cm->frame_type == KEY_FRAME) {
3878 if (resize_key_frame(cpi)) {
3879 /* If the frame size has changed, need to reset Q, quantizer,
3880 * and background refresh.
3882 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
3883 if (cpi->cyclic_refresh_mode_enabled) {
3884 if (cpi->current_layer == 0) {
3885 cyclic_background_refresh(cpi, Q, 0);
3887 disable_segmentation(cpi);
3890 // Reset the zero_last counter to 0 on key frame.
3891 memset(cpi->consec_zero_last, 0, cm->mb_rows * cm->mb_cols);
3892 memset(cpi->consec_zero_last_mvbias, 0,
3893 (cpi->common.mb_rows * cpi->common.mb_cols));
3894 vp8_set_quantizer(cpi, Q);
3897 vp8_setup_key_frame(cpi);
3900 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
3902 if (cpi->oxcf.error_resilient_mode) cm->refresh_entropy_probs = 0;
3904 if (cpi->oxcf.error_resilient_mode & VPX_ERROR_RESILIENT_PARTITIONS) {
3905 if (cm->frame_type == KEY_FRAME) cm->refresh_entropy_probs = 1;
3908 if (cm->refresh_entropy_probs == 0) {
3909 /* save a copy for later refresh */
3910 memcpy(&cm->lfc, &cm->fc, sizeof(cm->fc));
3913 vp8_update_coef_context(cpi);
3915 vp8_update_coef_probs(cpi);
3917 /* transform / motion compensation build reconstruction frame
3918 * +pack coef partitions
3920 vp8_encode_frame(cpi);
3922 /* cpi->projected_frame_size is not needed for RT mode */
3925 /* transform / motion compensation build reconstruction frame */
3926 vp8_encode_frame(cpi);
3928 if (cpi->oxcf.screen_content_mode == 2) {
3929 if (vp8_drop_encodedframe_overshoot(cpi, Q)) return;
3932 cpi->projected_frame_size -= vp8_estimate_entropy_savings(cpi);
3933 cpi->projected_frame_size =
3934 (cpi->projected_frame_size > 0) ? cpi->projected_frame_size : 0;
3936 vp8_clear_system_state();
3938 /* Test to see if the stats generated for this frame indicate that
3939 * we should have coded a key frame (assuming that we didn't)!
3942 if (cpi->pass != 2 && cpi->oxcf.auto_key && cm->frame_type != KEY_FRAME &&
3943 cpi->compressor_speed != 2) {
3944 #if !CONFIG_REALTIME_ONLY
3945 if (decide_key_frame(cpi)) {
3946 /* Reset all our sizing numbers and recode */
3947 cm->frame_type = KEY_FRAME;
3949 vp8_pick_frame_size(cpi);
3951 /* Clear the Alt reference frame active flag when we have
3954 cpi->source_alt_ref_active = 0;
3956 // Set the loop filter deltas and segmentation map update
3957 setup_features(cpi);
3959 vp8_restore_coding_context(cpi);
3961 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
3963 vp8_compute_frame_size_bounds(cpi, &frame_under_shoot_limit,
3964 &frame_over_shoot_limit);
3966 /* Limit Q range for the adaptive loop. */
3967 bottom_index = cpi->active_best_quality;
3968 top_index = cpi->active_worst_quality;
3969 q_low = cpi->active_best_quality;
3970 q_high = cpi->active_worst_quality;
3980 vp8_clear_system_state();
3982 if (frame_over_shoot_limit == 0) frame_over_shoot_limit = 1;
3984 /* Are we are overshooting and up against the limit of active max Q. */
3985 if (((cpi->pass != 2) ||
3986 (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)) &&
3987 (Q == cpi->active_worst_quality) &&
3988 (cpi->active_worst_quality < cpi->worst_quality) &&
3989 (cpi->projected_frame_size > frame_over_shoot_limit)) {
3990 int over_size_percent =
3991 ((cpi->projected_frame_size - frame_over_shoot_limit) * 100) /
3992 frame_over_shoot_limit;
3994 /* If so is there any scope for relaxing it */
3995 while ((cpi->active_worst_quality < cpi->worst_quality) &&
3996 (over_size_percent > 0)) {
3997 cpi->active_worst_quality++;
3998 /* Assume 1 qstep = about 4% on frame size. */
3999 over_size_percent = (int)(over_size_percent * 0.96);
4001 #if !CONFIG_REALTIME_ONLY
4002 top_index = cpi->active_worst_quality;
4003 #endif // !CONFIG_REALTIME_ONLY
4004 /* If we have updated the active max Q do not call
4005 * vp8_update_rate_correction_factors() this loop.
4007 active_worst_qchanged = 1;
4009 active_worst_qchanged = 0;
4012 #if CONFIG_REALTIME_ONLY
4015 /* Special case handling for forced key frames */
4016 if ((cm->frame_type == KEY_FRAME) && cpi->this_key_frame_forced) {
4018 int kf_err = vp8_calc_ss_err(cpi->Source, &cm->yv12_fb[cm->new_fb_idx]);
4020 /* The key frame is not good enough */
4021 if (kf_err > ((cpi->ambient_err * 7) >> 3)) {
4023 q_high = (Q > q_low) ? (Q - 1) : q_low;
4026 Q = (q_high + q_low) >> 1;
4028 /* The key frame is much better than the previous frame */
4029 else if (kf_err < (cpi->ambient_err >> 1)) {
4031 q_low = (Q < q_high) ? (Q + 1) : q_high;
4034 Q = (q_high + q_low + 1) >> 1;
4037 /* Clamp Q to upper and lower limits: */
4040 } else if (Q < q_low) {
4047 /* Is the projected frame size out of range and are we allowed
4048 * to attempt to recode.
4050 else if (recode_loop_test(cpi, frame_over_shoot_limit,
4051 frame_under_shoot_limit, Q, top_index,
4056 /* Frame size out of permitted range. Update correction factor
4057 * & compute new Q to try...
4060 /* Frame is too large */
4061 if (cpi->projected_frame_size > cpi->this_frame_target) {
4062 /* Raise Qlow as to at least the current value */
4063 q_low = (Q < q_high) ? (Q + 1) : q_high;
4065 /* If we are using over quant do the same for zbin_oq_low */
4066 if (cpi->mb.zbin_over_quant > 0) {
4067 zbin_oq_low = (cpi->mb.zbin_over_quant < zbin_oq_high)
4068 ? (cpi->mb.zbin_over_quant + 1)
4072 if (undershoot_seen) {
4073 /* Update rate_correction_factor unless
4074 * cpi->active_worst_quality has changed.
4076 if (!active_worst_qchanged) {
4077 vp8_update_rate_correction_factors(cpi, 1);
4080 Q = (q_high + q_low + 1) / 2;
4082 /* Adjust cpi->zbin_over_quant (only allowed when Q
4086 cpi->mb.zbin_over_quant = 0;
4088 zbin_oq_low = (cpi->mb.zbin_over_quant < zbin_oq_high)
4089 ? (cpi->mb.zbin_over_quant + 1)
4091 cpi->mb.zbin_over_quant = (zbin_oq_high + zbin_oq_low) / 2;
4094 /* Update rate_correction_factor unless
4095 * cpi->active_worst_quality has changed.
4097 if (!active_worst_qchanged) {
4098 vp8_update_rate_correction_factors(cpi, 0);
4101 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4103 while (((Q < q_low) || (cpi->mb.zbin_over_quant < zbin_oq_low)) &&
4105 vp8_update_rate_correction_factors(cpi, 0);
4106 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4113 /* Frame is too small */
4115 if (cpi->mb.zbin_over_quant == 0) {
4116 /* Lower q_high if not using over quant */
4117 q_high = (Q > q_low) ? (Q - 1) : q_low;
4119 /* else lower zbin_oq_high */
4120 zbin_oq_high = (cpi->mb.zbin_over_quant > zbin_oq_low)
4121 ? (cpi->mb.zbin_over_quant - 1)
4125 if (overshoot_seen) {
4126 /* Update rate_correction_factor unless
4127 * cpi->active_worst_quality has changed.
4129 if (!active_worst_qchanged) {
4130 vp8_update_rate_correction_factors(cpi, 1);
4133 Q = (q_high + q_low) / 2;
4135 /* Adjust cpi->zbin_over_quant (only allowed when Q
4139 cpi->mb.zbin_over_quant = 0;
4141 cpi->mb.zbin_over_quant = (zbin_oq_high + zbin_oq_low) / 2;
4144 /* Update rate_correction_factor unless
4145 * cpi->active_worst_quality has changed.
4147 if (!active_worst_qchanged) {
4148 vp8_update_rate_correction_factors(cpi, 0);
4151 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4153 /* Special case reset for qlow for constrained quality.
4154 * This should only trigger where there is very substantial
4155 * undershoot on a frame and the auto cq level is above
4156 * the user passsed in value.
4158 if ((cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
4163 while (((Q > q_high) || (cpi->mb.zbin_over_quant > zbin_oq_high)) &&
4165 vp8_update_rate_correction_factors(cpi, 0);
4166 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4171 undershoot_seen = 1;
4174 /* Clamp Q to upper and lower limits: */
4177 } else if (Q < q_low) {
4181 /* Clamp cpi->zbin_over_quant */
4182 cpi->mb.zbin_over_quant = (cpi->mb.zbin_over_quant < zbin_oq_low)
4184 : (cpi->mb.zbin_over_quant > zbin_oq_high)
4186 : cpi->mb.zbin_over_quant;
4192 #endif // CONFIG_REALTIME_ONLY
4194 if (cpi->is_src_frame_alt_ref) Loop = 0;
4197 vp8_restore_coding_context(cpi);
4199 #if CONFIG_INTERNAL_STATS
4200 cpi->tot_recode_hits++;
4203 } while (Loop == 1);
4206 /* Experimental code for lagged and one pass
4207 * Update stats used for one pass GF selection
4210 cpi->one_pass_frame_stats[cpi->one_pass_frame_index].frame_coded_error = (double)cpi->prediction_error;
4211 cpi->one_pass_frame_stats[cpi->one_pass_frame_index].frame_intra_error = (double)cpi->intra_error;
4212 cpi->one_pass_frame_stats[cpi->one_pass_frame_index].frame_pcnt_inter = (double)(100 - cpi->this_frame_percent_intra) / 100.0;
4216 /* Special case code to reduce pulsing when key frames are forced at a
4217 * fixed interval. Note the reconstruction error if it is the frame before
4218 * the force key frame
4220 if (cpi->next_key_frame_forced && (cpi->twopass.frames_to_key == 0)) {
4222 vp8_calc_ss_err(cpi->Source, &cm->yv12_fb[cm->new_fb_idx]);
4225 /* This frame's MVs are saved and will be used in next frame's MV predictor.
4226 * Last frame has one more line(add to bottom) and one more column(add to
4227 * right) than cm->mip. The edge elements are initialized to 0.
4229 #if CONFIG_MULTI_RES_ENCODING
4230 if (!cpi->oxcf.mr_encoder_id && cm->show_frame)
4232 if (cm->show_frame) /* do not save for altref frame */
4237 /* Point to beginning of allocated MODE_INFO arrays. */
4238 MODE_INFO *tmp = cm->mip;
4240 if (cm->frame_type != KEY_FRAME) {
4241 for (mb_row = 0; mb_row < cm->mb_rows + 1; ++mb_row) {
4242 for (mb_col = 0; mb_col < cm->mb_cols + 1; ++mb_col) {
4243 if (tmp->mbmi.ref_frame != INTRA_FRAME) {
4244 cpi->lfmv[mb_col + mb_row * (cm->mode_info_stride + 1)].as_int =
4245 tmp->mbmi.mv.as_int;
4248 cpi->lf_ref_frame_sign_bias[mb_col +
4249 mb_row * (cm->mode_info_stride + 1)] =
4250 cm->ref_frame_sign_bias[tmp->mbmi.ref_frame];
4251 cpi->lf_ref_frame[mb_col + mb_row * (cm->mode_info_stride + 1)] =
4252 tmp->mbmi.ref_frame;
4259 /* Count last ref frame 0,0 usage on current encoded frame. */
4263 /* Point to beginning of MODE_INFO arrays. */
4264 MODE_INFO *tmp = cm->mi;
4266 cpi->zeromv_count = 0;
4268 if (cm->frame_type != KEY_FRAME) {
4269 for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) {
4270 for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) {
4271 if (tmp->mbmi.mode == ZEROMV && tmp->mbmi.ref_frame == LAST_FRAME) {
4272 cpi->zeromv_count++;
4281 #if CONFIG_MULTI_RES_ENCODING
4282 vp8_cal_dissimilarity(cpi);
4285 /* Update the GF useage maps.
4286 * This is done after completing the compression of a frame when all
4287 * modes etc. are finalized but before loop filter
4289 if (cpi->oxcf.number_of_layers == 1) {
4290 vp8_update_gf_useage_maps(cpi, cm, &cpi->mb);
4293 if (cm->frame_type == KEY_FRAME) cm->refresh_last_frame = 1;
4297 FILE *f = fopen("gfactive.stt", "a");
4298 fprintf(f, "%8d %8d %8d %8d %8d\n", cm->current_video_frame, (100 * cpi->gf_active_count) / (cpi->common.mb_rows * cpi->common.mb_cols), cpi->this_iiratio, cpi->next_iiratio, cm->refresh_golden_frame);
4303 /* For inter frames the current default behavior is that when
4304 * cm->refresh_golden_frame is set we copy the old GF over to the ARF buffer
4305 * This is purely an encoder decision at present.
4307 if (!cpi->oxcf.error_resilient_mode && cm->refresh_golden_frame) {
4308 cm->copy_buffer_to_arf = 2;
4310 cm->copy_buffer_to_arf = 0;
4313 cm->frame_to_show = &cm->yv12_fb[cm->new_fb_idx];
4315 #if CONFIG_TEMPORAL_DENOISING
4316 // Get some measure of the amount of noise, by measuring the (partial) mse
4317 // between source and denoised buffer, for y channel. Partial refers to
4318 // computing the sse for a sub-sample of the frame (i.e., skip x blocks along
4320 // and only for blocks in that set that are consecutive ZEROMV_LAST mode.
4321 // Do this every ~8 frames, to further reduce complexity.
4322 // TODO(marpan): Keep this for now for the case cpi->oxcf.noise_sensitivity <
4324 // should be removed in favor of the process_denoiser_mode_change() function
4326 if (cpi->oxcf.noise_sensitivity > 0 && cpi->oxcf.noise_sensitivity < 4 &&
4327 !cpi->oxcf.screen_content_mode && cpi->frames_since_key % 8 == 0 &&
4328 cm->frame_type != KEY_FRAME) {
4329 cpi->mse_source_denoised = measure_square_diff_partial(
4330 &cpi->denoiser.yv12_running_avg[INTRA_FRAME], cpi->Source, cpi);
4333 // For the adaptive denoising mode (noise_sensitivity == 4), sample the mse
4334 // of source diff (between current and previous frame), and determine if we
4335 // should switch the denoiser mode. Sampling refers to computing the mse for
4336 // a sub-sample of the frame (i.e., skip x blocks along row/column), and
4337 // only for blocks in that set that have used ZEROMV LAST, along with some
4338 // constraint on the sum diff between blocks. This process is called every
4339 // ~8 frames, to further reduce complexity.
4340 if (cpi->oxcf.noise_sensitivity == 4 && !cpi->oxcf.screen_content_mode &&
4341 cpi->frames_since_key % 8 == 0 && cm->frame_type != KEY_FRAME) {
4342 process_denoiser_mode_change(cpi);
4346 #if CONFIG_MULTITHREAD
4347 if (cpi->b_multi_threaded) {
4348 /* start loopfilter in separate thread */
4349 sem_post(&cpi->h_event_start_lpf);
4350 cpi->b_lpf_running = 1;
4354 vp8_loopfilter_frame(cpi, cm);
4357 update_reference_frames(cpi);
4359 #ifdef OUTPUT_YUV_DENOISED
4360 vp8_write_yuv_frame(yuv_denoised_file,
4361 &cpi->denoiser.yv12_running_avg[INTRA_FRAME]);
4364 #if !(CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
4365 if (cpi->oxcf.error_resilient_mode) {
4366 cm->refresh_entropy_probs = 0;
4370 #if CONFIG_MULTITHREAD
4371 /* wait that filter_level is picked so that we can continue with stream
4373 if (cpi->b_multi_threaded) sem_wait(&cpi->h_event_end_lpf);
4376 /* build the bitstream */
4377 vp8_pack_bitstream(cpi, dest, dest_end, size);
4379 /* Move storing frame_type out of the above loop since it is also
4380 * needed in motion search besides loopfilter */
4381 cm->last_frame_type = cm->frame_type;
4383 /* Update rate control heuristics */
4384 cpi->total_byte_count += (*size);
4385 cpi->projected_frame_size = (int)(*size) << 3;
4387 if (cpi->oxcf.number_of_layers > 1) {
4389 for (i = cpi->current_layer + 1; i < cpi->oxcf.number_of_layers; ++i) {
4390 cpi->layer_context[i].total_byte_count += (*size);
4394 if (!active_worst_qchanged) vp8_update_rate_correction_factors(cpi, 2);
4396 cpi->last_q[cm->frame_type] = cm->base_qindex;
4398 if (cm->frame_type == KEY_FRAME) {
4399 vp8_adjust_key_frame_context(cpi);
4402 /* Keep a record of ambient average Q. */
4403 if (cm->frame_type != KEY_FRAME) {
4404 cpi->avg_frame_qindex =
4405 (2 + 3 * cpi->avg_frame_qindex + cm->base_qindex) >> 2;
4408 /* Keep a record from which we can calculate the average Q excluding
4409 * GF updates and key frames
4411 if ((cm->frame_type != KEY_FRAME) &&
4412 ((cpi->oxcf.number_of_layers > 1) ||
4413 (!cm->refresh_golden_frame && !cm->refresh_alt_ref_frame))) {
4416 /* Calculate the average Q for normal inter frames (not key or GFU
4419 if (cpi->pass == 2) {
4420 cpi->ni_tot_qi += Q;
4421 cpi->ni_av_qi = (cpi->ni_tot_qi / cpi->ni_frames);
4423 /* Damp value for first few frames */
4424 if (cpi->ni_frames > 150) {
4425 cpi->ni_tot_qi += Q;
4426 cpi->ni_av_qi = (cpi->ni_tot_qi / cpi->ni_frames);
4428 /* For one pass, early in the clip ... average the current frame Q
4429 * value with the worstq entered by the user as a dampening measure
4432 cpi->ni_tot_qi += Q;
4434 ((cpi->ni_tot_qi / cpi->ni_frames) + cpi->worst_quality + 1) / 2;
4437 /* If the average Q is higher than what was used in the last
4438 * frame (after going through the recode loop to keep the frame
4439 * size within range) then use the last frame value - 1. The -1
4440 * is designed to stop Q and hence the data rate, from
4441 * progressively falling away during difficult sections, but at
4442 * the same time reduce the number of itterations around the
4445 if (Q > cpi->ni_av_qi) cpi->ni_av_qi = Q - 1;
4449 /* Update the buffer level variable. */
4450 /* Non-viewable frames are a special case and are treated as pure overhead. */
4451 if (!cm->show_frame) {
4452 cpi->bits_off_target -= cpi->projected_frame_size;
4454 cpi->bits_off_target +=
4455 cpi->av_per_frame_bandwidth - cpi->projected_frame_size;
4458 /* Clip the buffer level to the maximum specified buffer size */
4459 if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size) {
4460 cpi->bits_off_target = cpi->oxcf.maximum_buffer_size;
4463 // If the frame dropper is not enabled, don't let the buffer level go below
4464 // some threshold, given here by -|maximum_buffer_size|. For now we only do
4465 // this for screen content input.
4466 if (cpi->drop_frames_allowed == 0 && cpi->oxcf.screen_content_mode &&
4467 cpi->bits_off_target < -cpi->oxcf.maximum_buffer_size) {
4468 cpi->bits_off_target = -cpi->oxcf.maximum_buffer_size;
4471 /* Rolling monitors of whether we are over or underspending used to
4472 * help regulate min and Max Q in two pass.
4474 cpi->rolling_target_bits =
4475 ((cpi->rolling_target_bits * 3) + cpi->this_frame_target + 2) / 4;
4476 cpi->rolling_actual_bits =
4477 ((cpi->rolling_actual_bits * 3) + cpi->projected_frame_size + 2) / 4;
4478 cpi->long_rolling_target_bits =
4479 ((cpi->long_rolling_target_bits * 31) + cpi->this_frame_target + 16) / 32;
4480 cpi->long_rolling_actual_bits =
4481 ((cpi->long_rolling_actual_bits * 31) + cpi->projected_frame_size + 16) /
4484 /* Actual bits spent */
4485 cpi->total_actual_bits += cpi->projected_frame_size;
4488 cpi->total_target_vs_actual +=
4489 (cpi->this_frame_target - cpi->projected_frame_size);
4491 cpi->buffer_level = cpi->bits_off_target;
4493 /* Propagate values to higher temporal layers */
4494 if (cpi->oxcf.number_of_layers > 1) {
4497 for (i = cpi->current_layer + 1; i < cpi->oxcf.number_of_layers; ++i) {
4498 LAYER_CONTEXT *lc = &cpi->layer_context[i];
4499 int bits_off_for_this_layer = (int)(lc->target_bandwidth / lc->framerate -
4500 cpi->projected_frame_size);
4502 lc->bits_off_target += bits_off_for_this_layer;
4504 /* Clip buffer level to maximum buffer size for the layer */
4505 if (lc->bits_off_target > lc->maximum_buffer_size) {
4506 lc->bits_off_target = lc->maximum_buffer_size;
4509 lc->total_actual_bits += cpi->projected_frame_size;
4510 lc->total_target_vs_actual += bits_off_for_this_layer;
4511 lc->buffer_level = lc->bits_off_target;
4515 /* Update bits left to the kf and gf groups to account for overshoot
4516 * or undershoot on these frames
4518 if (cm->frame_type == KEY_FRAME) {
4519 cpi->twopass.kf_group_bits +=
4520 cpi->this_frame_target - cpi->projected_frame_size;
4522 if (cpi->twopass.kf_group_bits < 0) cpi->twopass.kf_group_bits = 0;
4523 } else if (cm->refresh_golden_frame || cm->refresh_alt_ref_frame) {
4524 cpi->twopass.gf_group_bits +=
4525 cpi->this_frame_target - cpi->projected_frame_size;
4527 if (cpi->twopass.gf_group_bits < 0) cpi->twopass.gf_group_bits = 0;
4530 if (cm->frame_type != KEY_FRAME) {
4531 if (cpi->common.refresh_alt_ref_frame) {
4532 cpi->last_skip_false_probs[2] = cpi->prob_skip_false;
4533 cpi->last_skip_probs_q[2] = cm->base_qindex;
4534 } else if (cpi->common.refresh_golden_frame) {
4535 cpi->last_skip_false_probs[1] = cpi->prob_skip_false;
4536 cpi->last_skip_probs_q[1] = cm->base_qindex;
4538 cpi->last_skip_false_probs[0] = cpi->prob_skip_false;
4539 cpi->last_skip_probs_q[0] = cm->base_qindex;
4541 /* update the baseline */
4542 cpi->base_skip_false_prob[cm->base_qindex] = cpi->prob_skip_false;
4546 #if 0 && CONFIG_INTERNAL_STATS
4548 FILE *f = fopen("tmp.stt", "a");
4550 vp8_clear_system_state();
4552 if (cpi->twopass.total_left_stats.coded_error != 0.0)
4553 fprintf(f, "%10d %10d %10d %10d %10d %10"PRId64" %10"PRId64
4554 "%10"PRId64" %10d %6d %6d %6d %6d %5d %5d %5d %8d "
4555 "%8.2lf %"PRId64" %10.3lf %10"PRId64" %8d\n",
4556 cpi->common.current_video_frame, cpi->this_frame_target,
4557 cpi->projected_frame_size,
4558 (cpi->projected_frame_size - cpi->this_frame_target),
4559 cpi->total_target_vs_actual,
4561 (cpi->oxcf.starting_buffer_level-cpi->bits_off_target),
4562 cpi->total_actual_bits, cm->base_qindex,
4563 cpi->active_best_quality, cpi->active_worst_quality,
4564 cpi->ni_av_qi, cpi->cq_target_quality,
4565 cm->refresh_golden_frame, cm->refresh_alt_ref_frame,
4566 cm->frame_type, cpi->gfu_boost,
4567 cpi->twopass.est_max_qcorrection_factor,
4568 cpi->twopass.bits_left,
4569 cpi->twopass.total_left_stats.coded_error,
4570 (double)cpi->twopass.bits_left /
4571 cpi->twopass.total_left_stats.coded_error,
4572 cpi->tot_recode_hits);
4574 fprintf(f, "%10d %10d %10d %10d %10d %10"PRId64" %10"PRId64
4575 "%10"PRId64" %10d %6d %6d %6d %6d %5d %5d %5d %8d "
4576 "%8.2lf %"PRId64" %10.3lf %8d\n",
4577 cpi->common.current_video_frame, cpi->this_frame_target,
4578 cpi->projected_frame_size,
4579 (cpi->projected_frame_size - cpi->this_frame_target),
4580 cpi->total_target_vs_actual,
4582 (cpi->oxcf.starting_buffer_level-cpi->bits_off_target),
4583 cpi->total_actual_bits, cm->base_qindex,
4584 cpi->active_best_quality, cpi->active_worst_quality,
4585 cpi->ni_av_qi, cpi->cq_target_quality,
4586 cm->refresh_golden_frame, cm->refresh_alt_ref_frame,
4587 cm->frame_type, cpi->gfu_boost,
4588 cpi->twopass.est_max_qcorrection_factor,
4589 cpi->twopass.bits_left,
4590 cpi->twopass.total_left_stats.coded_error,
4591 cpi->tot_recode_hits);
4596 FILE *fmodes = fopen("Modes.stt", "a");
4598 fprintf(fmodes, "%6d:%1d:%1d:%1d ",
4599 cpi->common.current_video_frame,
4600 cm->frame_type, cm->refresh_golden_frame,
4601 cm->refresh_alt_ref_frame);
4603 fprintf(fmodes, "\n");
4611 if (cm->refresh_golden_frame == 1) {
4612 cm->frame_flags = cm->frame_flags | FRAMEFLAGS_GOLDEN;
4614 cm->frame_flags = cm->frame_flags & ~FRAMEFLAGS_GOLDEN;
4617 if (cm->refresh_alt_ref_frame == 1) {
4618 cm->frame_flags = cm->frame_flags | FRAMEFLAGS_ALTREF;
4620 cm->frame_flags = cm->frame_flags & ~FRAMEFLAGS_ALTREF;
4623 if (cm->refresh_last_frame & cm->refresh_golden_frame) { /* both refreshed */
4624 cpi->gold_is_last = 1;
4625 } else if (cm->refresh_last_frame ^ cm->refresh_golden_frame) {
4626 /* 1 refreshed but not the other */
4627 cpi->gold_is_last = 0;
4630 if (cm->refresh_last_frame & cm->refresh_alt_ref_frame) { /* both refreshed */
4631 cpi->alt_is_last = 1;
4632 } else if (cm->refresh_last_frame ^ cm->refresh_alt_ref_frame) {
4633 /* 1 refreshed but not the other */
4634 cpi->alt_is_last = 0;
4637 if (cm->refresh_alt_ref_frame &
4638 cm->refresh_golden_frame) { /* both refreshed */
4639 cpi->gold_is_alt = 1;
4640 } else if (cm->refresh_alt_ref_frame ^ cm->refresh_golden_frame) {
4641 /* 1 refreshed but not the other */
4642 cpi->gold_is_alt = 0;
4645 cpi->ref_frame_flags = VP8_ALTR_FRAME | VP8_GOLD_FRAME | VP8_LAST_FRAME;
4647 if (cpi->gold_is_last) cpi->ref_frame_flags &= ~VP8_GOLD_FRAME;
4649 if (cpi->alt_is_last) cpi->ref_frame_flags &= ~VP8_ALTR_FRAME;
4651 if (cpi->gold_is_alt) cpi->ref_frame_flags &= ~VP8_ALTR_FRAME;
4653 if (!cpi->oxcf.error_resilient_mode) {
4654 if (cpi->oxcf.play_alternate && cm->refresh_alt_ref_frame &&
4655 (cm->frame_type != KEY_FRAME)) {
4656 /* Update the alternate reference frame stats as appropriate. */
4657 update_alt_ref_frame_stats(cpi);
4659 /* Update the Golden frame stats as appropriate. */
4660 update_golden_frame_stats(cpi);
4664 if (cm->frame_type == KEY_FRAME) {
4665 /* Tell the caller that the frame was coded as a key frame */
4666 *frame_flags = cm->frame_flags | FRAMEFLAGS_KEY;
4668 /* As this frame is a key frame the next defaults to an inter frame. */
4669 cm->frame_type = INTER_FRAME;
4671 cpi->last_frame_percent_intra = 100;
4673 *frame_flags = cm->frame_flags & ~FRAMEFLAGS_KEY;
4675 cpi->last_frame_percent_intra = cpi->this_frame_percent_intra;
4678 /* Clear the one shot update flags for segmentation map and mode/ref
4679 * loop filter deltas.
4681 cpi->mb.e_mbd.update_mb_segmentation_map = 0;
4682 cpi->mb.e_mbd.update_mb_segmentation_data = 0;
4683 cpi->mb.e_mbd.mode_ref_lf_delta_update = 0;
4685 /* Dont increment frame counters if this was an altref buffer update
4688 if (cm->show_frame) {
4689 cm->current_video_frame++;
4690 cpi->frames_since_key++;
4691 cpi->temporal_pattern_counter++;
4694 /* reset to normal state now that we are done. */
4700 sprintf(filename, "enc%04d.yuv", (int) cm->current_video_frame);
4701 recon_file = fopen(filename, "wb");
4702 fwrite(cm->yv12_fb[cm->lst_fb_idx].buffer_alloc,
4703 cm->yv12_fb[cm->lst_fb_idx].frame_size, 1, recon_file);
4709 /* vp8_write_yuv_frame("encoder_recon.yuv", cm->frame_to_show); */
4711 #if !CONFIG_REALTIME_ONLY
4712 static void Pass2Encode(VP8_COMP *cpi, size_t *size, unsigned char *dest,
4713 unsigned char *dest_end, unsigned int *frame_flags) {
4714 if (!cpi->common.refresh_alt_ref_frame) vp8_second_pass(cpi);
4716 encode_frame_to_data_rate(cpi, size, dest, dest_end, frame_flags);
4717 cpi->twopass.bits_left -= 8 * (int)(*size);
4719 if (!cpi->common.refresh_alt_ref_frame) {
4720 double two_pass_min_rate =
4721 (double)(cpi->oxcf.target_bandwidth *
4722 cpi->oxcf.two_pass_vbrmin_section / 100);
4723 cpi->twopass.bits_left += (int64_t)(two_pass_min_rate / cpi->framerate);
4728 int vp8_receive_raw_frame(VP8_COMP *cpi, unsigned int frame_flags,
4729 YV12_BUFFER_CONFIG *sd, int64_t time_stamp,
4731 struct vpx_usec_timer timer;
4734 vpx_usec_timer_start(&timer);
4736 /* Reinit the lookahead buffer if the frame size changes */
4737 if (sd->y_width != cpi->oxcf.Width || sd->y_height != cpi->oxcf.Height) {
4738 assert(cpi->oxcf.lag_in_frames < 2);
4739 dealloc_raw_frame_buffers(cpi);
4740 alloc_raw_frame_buffers(cpi);
4743 if (vp8_lookahead_push(cpi->lookahead, sd, time_stamp, end_time, frame_flags,
4744 cpi->active_map_enabled ? cpi->active_map : NULL)) {
4747 vpx_usec_timer_mark(&timer);
4748 cpi->time_receive_data += vpx_usec_timer_elapsed(&timer);
4753 static int frame_is_reference(const VP8_COMP *cpi) {
4754 const VP8_COMMON *cm = &cpi->common;
4755 const MACROBLOCKD *xd = &cpi->mb.e_mbd;
4757 return cm->frame_type == KEY_FRAME || cm->refresh_last_frame ||
4758 cm->refresh_golden_frame || cm->refresh_alt_ref_frame ||
4759 cm->copy_buffer_to_gf || cm->copy_buffer_to_arf ||
4760 cm->refresh_entropy_probs || xd->mode_ref_lf_delta_update ||
4761 xd->update_mb_segmentation_map || xd->update_mb_segmentation_data;
4764 int vp8_get_compressed_data(VP8_COMP *cpi, unsigned int *frame_flags,
4765 size_t *size, unsigned char *dest,
4766 unsigned char *dest_end, int64_t *time_stamp,
4767 int64_t *time_end, int flush) {
4769 struct vpx_usec_timer tsctimer;
4770 struct vpx_usec_timer ticktimer;
4771 struct vpx_usec_timer cmptimer;
4772 YV12_BUFFER_CONFIG *force_src_buffer = NULL;
4774 if (!cpi) return -1;
4778 if (setjmp(cpi->common.error.jmp)) {
4779 cpi->common.error.setjmp = 0;
4780 vp8_clear_system_state();
4781 return VPX_CODEC_CORRUPT_FRAME;
4784 cpi->common.error.setjmp = 1;
4786 vpx_usec_timer_start(&cmptimer);
4790 #if !CONFIG_REALTIME_ONLY
4791 /* Should we code an alternate reference frame */
4792 if (cpi->oxcf.error_resilient_mode == 0 && cpi->oxcf.play_alternate &&
4793 cpi->source_alt_ref_pending) {
4794 if ((cpi->source = vp8_lookahead_peek(
4795 cpi->lookahead, cpi->frames_till_gf_update_due, PEEK_FORWARD))) {
4796 cpi->alt_ref_source = cpi->source;
4797 if (cpi->oxcf.arnr_max_frames > 0) {
4798 vp8_temporal_filter_prepare_c(cpi, cpi->frames_till_gf_update_due);
4799 force_src_buffer = &cpi->alt_ref_buffer;
4801 cpi->frames_till_alt_ref_frame = cpi->frames_till_gf_update_due;
4802 cm->refresh_alt_ref_frame = 1;
4803 cm->refresh_golden_frame = 0;
4804 cm->refresh_last_frame = 0;
4806 /* Clear Pending alt Ref flag. */
4807 cpi->source_alt_ref_pending = 0;
4808 cpi->is_src_frame_alt_ref = 0;
4814 /* Read last frame source if we are encoding first pass. */
4815 if (cpi->pass == 1 && cm->current_video_frame > 0) {
4816 if ((cpi->last_source =
4817 vp8_lookahead_peek(cpi->lookahead, 1, PEEK_BACKWARD)) == NULL) {
4822 if ((cpi->source = vp8_lookahead_pop(cpi->lookahead, flush))) {
4825 cpi->is_src_frame_alt_ref =
4826 cpi->alt_ref_source && (cpi->source == cpi->alt_ref_source);
4828 if (cpi->is_src_frame_alt_ref) cpi->alt_ref_source = NULL;
4833 cpi->Source = force_src_buffer ? force_src_buffer : &cpi->source->img;
4834 cpi->un_scaled_source = cpi->Source;
4835 *time_stamp = cpi->source->ts_start;
4836 *time_end = cpi->source->ts_end;
4837 *frame_flags = cpi->source->flags;
4839 if (cpi->pass == 1 && cm->current_video_frame > 0) {
4840 cpi->last_frame_unscaled_source = &cpi->last_source->img;
4844 #if !CONFIG_REALTIME_ONLY
4846 if (flush && cpi->pass == 1 && !cpi->twopass.first_pass_done) {
4847 vp8_end_first_pass(cpi); /* get last stats packet */
4848 cpi->twopass.first_pass_done = 1;
4856 if (cpi->source->ts_start < cpi->first_time_stamp_ever) {
4857 cpi->first_time_stamp_ever = cpi->source->ts_start;
4858 cpi->last_end_time_stamp_seen = cpi->source->ts_start;
4861 /* adjust frame rates based on timestamps given */
4862 if (cm->show_frame) {
4863 int64_t this_duration;
4866 if (cpi->source->ts_start == cpi->first_time_stamp_ever) {
4867 this_duration = cpi->source->ts_end - cpi->source->ts_start;
4870 int64_t last_duration;
4872 this_duration = cpi->source->ts_end - cpi->last_end_time_stamp_seen;
4873 last_duration = cpi->last_end_time_stamp_seen - cpi->last_time_stamp_seen;
4874 /* do a step update if the duration changes by 10% */
4875 if (last_duration) {
4876 step = (int)(((this_duration - last_duration) * 10 / last_duration));
4880 if (this_duration) {
4882 cpi->ref_framerate = 10000000.0 / this_duration;
4884 double avg_duration, interval;
4886 /* Average this frame's rate into the last second's average
4887 * frame rate. If we haven't seen 1 second yet, then average
4888 * over the whole interval seen.
4890 interval = (double)(cpi->source->ts_end - cpi->first_time_stamp_ever);
4891 if (interval > 10000000.0) interval = 10000000;
4893 avg_duration = 10000000.0 / cpi->ref_framerate;
4894 avg_duration *= (interval - avg_duration + this_duration);
4895 avg_duration /= interval;
4897 cpi->ref_framerate = 10000000.0 / avg_duration;
4899 #if CONFIG_MULTI_RES_ENCODING
4900 if (cpi->oxcf.mr_total_resolutions > 1) {
4901 LOWER_RES_FRAME_INFO *low_res_frame_info =
4902 (LOWER_RES_FRAME_INFO *)cpi->oxcf.mr_low_res_mode_info;
4903 // Frame rate should be the same for all spatial layers in
4904 // multi-res-encoding (simulcast), so we constrain the frame for
4905 // higher layers to be that of lowest resolution. This is needed
4906 // as he application may decide to skip encoding a high layer and
4907 // then start again, in which case a big jump in time-stamps will
4908 // be received for that high layer, which will yield an incorrect
4909 // frame rate (from time-stamp adjustment in above calculation).
4910 if (cpi->oxcf.mr_encoder_id) {
4911 cpi->ref_framerate = low_res_frame_info->low_res_framerate;
4913 // Keep track of frame rate for lowest resolution.
4914 low_res_frame_info->low_res_framerate = cpi->ref_framerate;
4918 if (cpi->oxcf.number_of_layers > 1) {
4921 /* Update frame rates for each layer */
4922 assert(cpi->oxcf.number_of_layers <= VPX_TS_MAX_LAYERS);
4923 for (i = 0; i < cpi->oxcf.number_of_layers && i < VPX_TS_MAX_LAYERS;
4925 LAYER_CONTEXT *lc = &cpi->layer_context[i];
4926 lc->framerate = cpi->ref_framerate / cpi->oxcf.rate_decimator[i];
4929 vp8_new_framerate(cpi, cpi->ref_framerate);
4933 cpi->last_time_stamp_seen = cpi->source->ts_start;
4934 cpi->last_end_time_stamp_seen = cpi->source->ts_end;
4937 if (cpi->oxcf.number_of_layers > 1) {
4940 update_layer_contexts(cpi);
4942 /* Restore layer specific context & set frame rate */
4943 if (cpi->temporal_layer_id >= 0) {
4944 layer = cpi->temporal_layer_id;
4948 .layer_id[cpi->temporal_pattern_counter % cpi->oxcf.periodicity];
4950 restore_layer_context(cpi, layer);
4951 vp8_new_framerate(cpi, cpi->layer_context[layer].framerate);
4954 if (cpi->compressor_speed == 2) {
4955 vpx_usec_timer_start(&tsctimer);
4956 vpx_usec_timer_start(&ticktimer);
4959 cpi->lf_zeromv_pct = (cpi->zeromv_count * 100) / cm->MBs;
4961 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
4964 const int num_part = (1 << cm->multi_token_partition);
4965 /* the available bytes in dest */
4966 const unsigned long dest_size = dest_end - dest;
4967 const int tok_part_buff_size = (dest_size * 9) / (10 * num_part);
4969 unsigned char *dp = dest;
4971 cpi->partition_d[0] = dp;
4972 dp += dest_size / 10; /* reserve 1/10 for control partition */
4973 cpi->partition_d_end[0] = dp;
4975 for (i = 0; i < num_part; ++i) {
4976 cpi->partition_d[i + 1] = dp;
4977 dp += tok_part_buff_size;
4978 cpi->partition_d_end[i + 1] = dp;
4983 /* start with a 0 size frame */
4986 /* Clear down mmx registers */
4987 vp8_clear_system_state();
4989 cm->frame_type = INTER_FRAME;
4990 cm->frame_flags = *frame_flags;
4994 if (cm->refresh_alt_ref_frame)
4996 cm->refresh_golden_frame = 0;
4997 cm->refresh_last_frame = 0;
5001 cm->refresh_golden_frame = 0;
5002 cm->refresh_last_frame = 1;
5006 /* find a free buffer for the new frame */
5009 for (; i < NUM_YV12_BUFFERS; ++i) {
5010 if (!cm->yv12_fb[i].flags) {
5016 assert(i < NUM_YV12_BUFFERS);
5018 switch (cpi->pass) {
5019 #if !CONFIG_REALTIME_ONLY
5020 case 1: Pass1Encode(cpi, size, dest, frame_flags); break;
5021 case 2: Pass2Encode(cpi, size, dest, dest_end, frame_flags); break;
5022 #endif // !CONFIG_REALTIME_ONLY
5024 encode_frame_to_data_rate(cpi, size, dest, dest_end, frame_flags);
5028 if (cpi->compressor_speed == 2) {
5029 unsigned int duration, duration2;
5030 vpx_usec_timer_mark(&tsctimer);
5031 vpx_usec_timer_mark(&ticktimer);
5033 duration = (int)(vpx_usec_timer_elapsed(&ticktimer));
5034 duration2 = (unsigned int)((double)duration / 2);
5036 if (cm->frame_type != KEY_FRAME) {
5037 if (cpi->avg_encode_time == 0) {
5038 cpi->avg_encode_time = duration;
5040 cpi->avg_encode_time = (7 * cpi->avg_encode_time + duration) >> 3;
5046 if (cpi->avg_pick_mode_time == 0) {
5047 cpi->avg_pick_mode_time = duration2;
5049 cpi->avg_pick_mode_time =
5050 (7 * cpi->avg_pick_mode_time + duration2) >> 3;
5056 if (cm->refresh_entropy_probs == 0) {
5057 memcpy(&cm->fc, &cm->lfc, sizeof(cm->fc));
5060 /* Save the contexts separately for alt ref, gold and last. */
5061 /* (TODO jbb -> Optimize this with pointers to avoid extra copies. ) */
5062 if (cm->refresh_alt_ref_frame) memcpy(&cpi->lfc_a, &cm->fc, sizeof(cm->fc));
5064 if (cm->refresh_golden_frame) memcpy(&cpi->lfc_g, &cm->fc, sizeof(cm->fc));
5066 if (cm->refresh_last_frame) memcpy(&cpi->lfc_n, &cm->fc, sizeof(cm->fc));
5068 /* if its a dropped frame honor the requests on subsequent frames */
5070 cpi->droppable = !frame_is_reference(cpi);
5072 /* return to normal state */
5073 cm->refresh_entropy_probs = 1;
5074 cm->refresh_alt_ref_frame = 0;
5075 cm->refresh_golden_frame = 0;
5076 cm->refresh_last_frame = 1;
5077 cm->frame_type = INTER_FRAME;
5080 /* Save layer specific state */
5081 if (cpi->oxcf.number_of_layers > 1) save_layer_context(cpi);
5083 vpx_usec_timer_mark(&cmptimer);
5084 cpi->time_compress_data += vpx_usec_timer_elapsed(&cmptimer);
5086 if (cpi->b_calculate_psnr && cpi->pass != 1 && cm->show_frame) {
5087 generate_psnr_packet(cpi);
5090 #if CONFIG_INTERNAL_STATS
5092 if (cpi->pass != 1) {
5093 cpi->bytes += *size;
5095 if (cm->show_frame) {
5096 cpi->common.show_frame_mi = cpi->common.mi;
5099 if (cpi->b_calculate_psnr) {
5100 uint64_t ye, ue, ve;
5102 YV12_BUFFER_CONFIG *orig = cpi->Source;
5103 YV12_BUFFER_CONFIG *recon = cpi->common.frame_to_show;
5104 unsigned int y_width = cpi->common.Width;
5105 unsigned int y_height = cpi->common.Height;
5106 unsigned int uv_width = (y_width + 1) / 2;
5107 unsigned int uv_height = (y_height + 1) / 2;
5108 int y_samples = y_height * y_width;
5109 int uv_samples = uv_height * uv_width;
5110 int t_samples = y_samples + 2 * uv_samples;
5113 ye = calc_plane_error(orig->y_buffer, orig->y_stride, recon->y_buffer,
5114 recon->y_stride, y_width, y_height);
5116 ue = calc_plane_error(orig->u_buffer, orig->uv_stride, recon->u_buffer,
5117 recon->uv_stride, uv_width, uv_height);
5119 ve = calc_plane_error(orig->v_buffer, orig->uv_stride, recon->v_buffer,
5120 recon->uv_stride, uv_width, uv_height);
5122 sq_error = (double)(ye + ue + ve);
5124 frame_psnr = vpx_sse_to_psnr(t_samples, 255.0, sq_error);
5126 cpi->total_y += vpx_sse_to_psnr(y_samples, 255.0, (double)ye);
5127 cpi->total_u += vpx_sse_to_psnr(uv_samples, 255.0, (double)ue);
5128 cpi->total_v += vpx_sse_to_psnr(uv_samples, 255.0, (double)ve);
5129 cpi->total_sq_error += sq_error;
5130 cpi->total += frame_psnr;
5133 YV12_BUFFER_CONFIG *pp = &cm->post_proc_buffer;
5135 double frame_psnr2, frame_ssim2 = 0;
5138 vp8_deblock(cm, cm->frame_to_show, &cm->post_proc_buffer,
5139 cm->filter_level * 10 / 6, 1, 0);
5140 vp8_clear_system_state();
5142 ye = calc_plane_error(orig->y_buffer, orig->y_stride, pp->y_buffer,
5143 pp->y_stride, y_width, y_height);
5145 ue = calc_plane_error(orig->u_buffer, orig->uv_stride, pp->u_buffer,
5146 pp->uv_stride, uv_width, uv_height);
5148 ve = calc_plane_error(orig->v_buffer, orig->uv_stride, pp->v_buffer,
5149 pp->uv_stride, uv_width, uv_height);
5151 sq_error2 = (double)(ye + ue + ve);
5153 frame_psnr2 = vpx_sse_to_psnr(t_samples, 255.0, sq_error2);
5155 cpi->totalp_y += vpx_sse_to_psnr(y_samples, 255.0, (double)ye);
5156 cpi->totalp_u += vpx_sse_to_psnr(uv_samples, 255.0, (double)ue);
5157 cpi->totalp_v += vpx_sse_to_psnr(uv_samples, 255.0, (double)ve);
5158 cpi->total_sq_error2 += sq_error2;
5159 cpi->totalp += frame_psnr2;
5162 vpx_calc_ssim(cpi->Source, &cm->post_proc_buffer, &weight);
5164 cpi->summed_quality += frame_ssim2 * weight;
5165 cpi->summed_weights += weight;
5167 if (cpi->oxcf.number_of_layers > 1) {
5170 for (i = cpi->current_layer; i < cpi->oxcf.number_of_layers; ++i) {
5171 cpi->frames_in_layer[i]++;
5173 cpi->bytes_in_layer[i] += *size;
5174 cpi->sum_psnr[i] += frame_psnr;
5175 cpi->sum_psnr_p[i] += frame_psnr2;
5176 cpi->total_error2[i] += sq_error;
5177 cpi->total_error2_p[i] += sq_error2;
5178 cpi->sum_ssim[i] += frame_ssim2 * weight;
5179 cpi->sum_weights[i] += weight;
5190 if (cpi->common.frame_type != 0 && cpi->common.base_qindex == cpi->oxcf.worst_allowed_q)
5192 skiptruecount += cpi->skip_true_count;
5193 skipfalsecount += cpi->skip_false_count;
5201 FILE *f = fopen("skip.stt", "a");
5202 fprintf(f, "frame:%4d flags:%4x Q:%4d P:%4d Size:%5d\n", cpi->common.current_video_frame, *frame_flags, cpi->common.base_qindex, cpi->prob_skip_false, *size);
5204 if (cpi->is_src_frame_alt_ref == 1)
5205 fprintf(f, "skipcount: %4d framesize: %d\n", cpi->skip_true_count , *size);
5213 cpi->common.error.setjmp = 0;
5215 #if CONFIG_MULTITHREAD
5216 /* wait for the lpf thread done */
5217 if (cpi->b_multi_threaded && cpi->b_lpf_running) {
5218 sem_wait(&cpi->h_event_end_lpf);
5219 cpi->b_lpf_running = 0;
5226 int vp8_get_preview_raw_frame(VP8_COMP *cpi, YV12_BUFFER_CONFIG *dest,
5227 vp8_ppflags_t *flags) {
5228 if (cpi->common.refresh_alt_ref_frame) {
5234 cpi->common.show_frame_mi = cpi->common.mi;
5235 ret = vp8_post_proc_frame(&cpi->common, dest, flags);
5239 if (cpi->common.frame_to_show) {
5240 *dest = *cpi->common.frame_to_show;
5241 dest->y_width = cpi->common.Width;
5242 dest->y_height = cpi->common.Height;
5243 dest->uv_height = cpi->common.Height / 2;
5250 vp8_clear_system_state();
5255 int vp8_set_roimap(VP8_COMP *cpi, unsigned char *map, unsigned int rows,
5256 unsigned int cols, int delta_q[4], int delta_lf[4],
5257 unsigned int threshold[4]) {
5258 signed char feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS];
5259 int internal_delta_q[MAX_MB_SEGMENTS];
5260 const int range = 63;
5263 // This method is currently incompatible with the cyclic refresh method
5264 if (cpi->cyclic_refresh_mode_enabled) return -1;
5266 // Check number of rows and columns match
5267 if (cpi->common.mb_rows != (int)rows || cpi->common.mb_cols != (int)cols) {
5271 // Range check the delta Q values and convert the external Q range values
5272 // to internal ones.
5273 if ((abs(delta_q[0]) > range) || (abs(delta_q[1]) > range) ||
5274 (abs(delta_q[2]) > range) || (abs(delta_q[3]) > range)) {
5278 // Range check the delta lf values
5279 if ((abs(delta_lf[0]) > range) || (abs(delta_lf[1]) > range) ||
5280 (abs(delta_lf[2]) > range) || (abs(delta_lf[3]) > range)) {
5285 disable_segmentation(cpi);
5289 // Translate the external delta q values to internal values.
5290 for (i = 0; i < MAX_MB_SEGMENTS; ++i) {
5291 internal_delta_q[i] =
5292 (delta_q[i] >= 0) ? q_trans[delta_q[i]] : -q_trans[-delta_q[i]];
5295 /* Set the segmentation Map */
5296 set_segmentation_map(cpi, map);
5298 /* Activate segmentation. */
5299 enable_segmentation(cpi);
5301 /* Set up the quant segment data */
5302 feature_data[MB_LVL_ALT_Q][0] = internal_delta_q[0];
5303 feature_data[MB_LVL_ALT_Q][1] = internal_delta_q[1];
5304 feature_data[MB_LVL_ALT_Q][2] = internal_delta_q[2];
5305 feature_data[MB_LVL_ALT_Q][3] = internal_delta_q[3];
5307 /* Set up the loop segment data s */
5308 feature_data[MB_LVL_ALT_LF][0] = delta_lf[0];
5309 feature_data[MB_LVL_ALT_LF][1] = delta_lf[1];
5310 feature_data[MB_LVL_ALT_LF][2] = delta_lf[2];
5311 feature_data[MB_LVL_ALT_LF][3] = delta_lf[3];
5313 cpi->segment_encode_breakout[0] = threshold[0];
5314 cpi->segment_encode_breakout[1] = threshold[1];
5315 cpi->segment_encode_breakout[2] = threshold[2];
5316 cpi->segment_encode_breakout[3] = threshold[3];
5318 /* Initialise the feature data structure */
5319 set_segment_data(cpi, &feature_data[0][0], SEGMENT_DELTADATA);
5324 int vp8_set_active_map(VP8_COMP *cpi, unsigned char *map, unsigned int rows,
5325 unsigned int cols) {
5326 if ((int)rows == cpi->common.mb_rows && (int)cols == cpi->common.mb_cols) {
5328 memcpy(cpi->active_map, map, rows * cols);
5329 cpi->active_map_enabled = 1;
5331 cpi->active_map_enabled = 0;
5340 int vp8_set_internal_size(VP8_COMP *cpi, VPX_SCALING horiz_mode,
5341 VPX_SCALING vert_mode) {
5342 if (horiz_mode <= ONETWO) {
5343 cpi->common.horiz_scale = horiz_mode;
5348 if (vert_mode <= ONETWO) {
5349 cpi->common.vert_scale = vert_mode;
5357 int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest) {
5361 unsigned char *src = source->y_buffer;
5362 unsigned char *dst = dest->y_buffer;
5364 /* Loop through the Y plane raw and reconstruction data summing
5365 * (square differences)
5367 for (i = 0; i < source->y_height; i += 16) {
5368 for (j = 0; j < source->y_width; j += 16) {
5370 Total += vpx_mse16x16(src + j, source->y_stride, dst + j, dest->y_stride,
5374 src += 16 * source->y_stride;
5375 dst += 16 * dest->y_stride;
5381 int vp8_get_quantizer(VP8_COMP *cpi) { return cpi->common.base_qindex; }