2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
15 #include "vpx/vpx_encoder.h"
16 #include "vpx_dsp/bitwriter_buffer.h"
17 #include "vpx_dsp/vpx_dsp_common.h"
18 #include "vpx_mem/vpx_mem.h"
19 #include "vpx_ports/mem_ops.h"
20 #include "vpx_ports/system_state.h"
22 #include "vp10/common/entropy.h"
23 #include "vp10/common/entropymode.h"
24 #include "vp10/common/entropymv.h"
25 #include "vp10/common/mvref_common.h"
26 #include "vp10/common/pred_common.h"
27 #include "vp10/common/seg_common.h"
28 #include "vp10/common/tile_common.h"
30 #include "vp10/encoder/cost.h"
31 #include "vp10/encoder/bitstream.h"
32 #include "vp10/encoder/encodemv.h"
33 #include "vp10/encoder/mcomp.h"
34 #include "vp10/encoder/segmentation.h"
35 #include "vp10/encoder/subexp.h"
36 #include "vp10/encoder/tokenize.h"
38 static const struct vp10_token intra_mode_encodings[INTRA_MODES] = {
39 {0, 1}, {6, 3}, {28, 5}, {30, 5}, {58, 6}, {59, 6}, {126, 7}, {127, 7},
41 static const struct vp10_token switchable_interp_encodings[SWITCHABLE_FILTERS] =
42 {{0, 1}, {2, 2}, {3, 2}};
43 static const struct vp10_token partition_encodings[PARTITION_TYPES] =
44 {{0, 1}, {2, 2}, {6, 3}, {7, 3}};
45 static const struct vp10_token inter_mode_encodings[INTER_MODES] =
46 {{2, 2}, {6, 3}, {0, 1}, {7, 3}};
47 static const struct vp10_token palette_size_encodings[] = {
48 {0, 1}, {2, 2}, {6, 3}, {14, 4}, {30, 5}, {62, 6}, {63, 6},
50 static const struct vp10_token
51 palette_color_encodings[PALETTE_MAX_SIZE - 1][8] = {
52 {{0, 1}, {1, 1}}, // 2 colors
53 {{0, 1}, {2, 2}, {3, 2}}, // 3 colors
54 {{0, 1}, {2, 2}, {6, 3}, {7, 3}}, // 4 colors
55 {{0, 1}, {2, 2}, {6, 3}, {14, 4}, {15, 4}}, // 5 colors
56 {{0, 1}, {2, 2}, {6, 3}, {14, 4}, {30, 5}, {31, 5}}, // 6 colors
57 {{0, 1}, {2, 2}, {6, 3}, {14, 4}, {30, 5}, {62, 6}, {63, 6}}, // 7 colors
58 {{0, 1}, {2, 2}, {6, 3}, {14, 4},
59 {30, 5}, {62, 6}, {126, 7}, {127, 7}}, // 8 colors
62 static INLINE void write_uniform(vpx_writer *w, int n, int v) {
63 int l = get_unsigned_bits(n);
68 vpx_write_literal(w, v, l - 1);
70 vpx_write_literal(w, m + ((v - m) >> 1), l - 1);
71 vpx_write_literal(w, (v - m) & 1, 1);
75 static void write_intra_mode(vpx_writer *w, PREDICTION_MODE mode,
76 const vpx_prob *probs) {
77 vp10_write_token(w, vp10_intra_mode_tree, probs, &intra_mode_encodings[mode]);
80 static void write_inter_mode(vpx_writer *w, PREDICTION_MODE mode,
81 const vpx_prob *probs) {
82 assert(is_inter_mode(mode));
83 vp10_write_token(w, vp10_inter_mode_tree, probs,
84 &inter_mode_encodings[INTER_OFFSET(mode)]);
87 static void encode_unsigned_max(struct vpx_write_bit_buffer *wb,
89 vpx_wb_write_literal(wb, data, get_unsigned_bits(max));
92 static void prob_diff_update(const vpx_tree_index *tree,
93 vpx_prob probs[/*n - 1*/],
94 const unsigned int counts[/*n - 1*/],
95 int n, vpx_writer *w) {
97 unsigned int branch_ct[32][2];
99 // Assuming max number of probabilities <= 32
102 vp10_tree_probs_from_distribution(tree, branch_ct, counts);
103 for (i = 0; i < n - 1; ++i)
104 vp10_cond_prob_diff_update(w, &probs[i], branch_ct[i]);
107 static void write_selected_tx_size(const VP10_COMMON *cm,
108 const MACROBLOCKD *xd, vpx_writer *w) {
109 TX_SIZE tx_size = xd->mi[0]->mbmi.tx_size;
110 BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
111 const TX_SIZE max_tx_size = max_txsize_lookup[bsize];
112 const vpx_prob *const tx_probs = get_tx_probs2(max_tx_size, xd,
114 vpx_write(w, tx_size != TX_4X4, tx_probs[0]);
115 if (tx_size != TX_4X4 && max_tx_size >= TX_16X16) {
116 vpx_write(w, tx_size != TX_8X8, tx_probs[1]);
117 if (tx_size != TX_8X8 && max_tx_size >= TX_32X32)
118 vpx_write(w, tx_size != TX_16X16, tx_probs[2]);
122 static int write_skip(const VP10_COMMON *cm, const MACROBLOCKD *xd,
123 int segment_id, const MODE_INFO *mi, vpx_writer *w) {
124 if (segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)) {
127 const int skip = mi->mbmi.skip;
128 vpx_write(w, skip, vp10_get_skip_prob(cm, xd));
133 static void update_skip_probs(VP10_COMMON *cm, vpx_writer *w,
134 FRAME_COUNTS *counts) {
137 for (k = 0; k < SKIP_CONTEXTS; ++k)
138 vp10_cond_prob_diff_update(w, &cm->fc->skip_probs[k], counts->skip[k]);
141 static void update_switchable_interp_probs(VP10_COMMON *cm, vpx_writer *w,
142 FRAME_COUNTS *counts) {
144 for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j)
145 prob_diff_update(vp10_switchable_interp_tree,
146 cm->fc->switchable_interp_prob[j],
147 counts->switchable_interp[j], SWITCHABLE_FILTERS, w);
150 static void pack_palette_tokens(vpx_writer *w, TOKENEXTRA **tp,
151 BLOCK_SIZE bsize, int n) {
152 int rows = 4 * num_4x4_blocks_high_lookup[bsize];
153 int cols = 4 * num_4x4_blocks_wide_lookup[bsize];
157 for (i = 0; i < rows * cols -1; ++i) {
158 vp10_write_token(w, vp10_palette_color_tree[n - 2], p->context_tree,
159 &palette_color_encodings[n - 2][p->token]);
166 static void pack_mb_tokens(vpx_writer *w,
167 TOKENEXTRA **tp, const TOKENEXTRA *const stop,
168 vpx_bit_depth_t bit_depth, const TX_SIZE tx) {
170 #if !CONFIG_MISC_FIXES
174 while (p < stop && p->token != EOSB_TOKEN) {
175 const int t = p->token;
176 const struct vp10_token *const a = &vp10_coef_encodings[t];
180 #if CONFIG_VP9_HIGHBITDEPTH
181 const vp10_extra_bit *b;
182 if (bit_depth == VPX_BITS_12)
183 b = &vp10_extra_bits_high12[t];
184 else if (bit_depth == VPX_BITS_10)
185 b = &vp10_extra_bits_high10[t];
187 b = &vp10_extra_bits[t];
189 const vp10_extra_bit *const b = &vp10_extra_bits[t];
191 #endif // CONFIG_VP9_HIGHBITDEPTH
193 /* skip one or two nodes */
194 if (p->skip_eob_node) {
195 n -= p->skip_eob_node;
196 i = 2 * p->skip_eob_node;
199 // TODO(jbb): expanding this can lead to big gains. It allows
200 // much better branch prediction and would enable us to avoid numerous
201 // lookups and compares.
203 // If we have a token that's in the constrained set, the coefficient tree
204 // is split into two treed writes. The first treed write takes care of the
205 // unconstrained nodes. The second treed write takes care of the
206 // constrained nodes.
207 if (t >= TWO_TOKEN && t < EOB_TOKEN) {
208 int len = UNCONSTRAINED_NODES - p->skip_eob_node;
209 int bits = v >> (n - len);
210 vp10_write_tree(w, vp10_coef_tree, p->context_tree, bits, len, i);
211 vp10_write_tree(w, vp10_coef_con_tree,
212 vp10_pareto8_full[p->context_tree[PIVOT_NODE] - 1],
215 vp10_write_tree(w, vp10_coef_tree, p->context_tree, v, n, i);
219 const int e = p->extra, l = b->len;
220 #if CONFIG_MISC_FIXES
222 (b->base_val == CAT6_MIN_VAL) ? TX_SIZES - 1 - tx : 0;
228 const unsigned char *pb = b->prob;
230 int n = l; /* number of bits in v, assumed nonzero */
234 const int bb = (v >> --n) & 1;
239 vpx_write(w, bb, pb[i >> 1]);
245 vpx_write_bit(w, e & 1);
253 static void write_segment_id(vpx_writer *w, const struct segmentation *seg,
255 if (seg->enabled && seg->update_map)
256 vp10_write_tree(w, vp10_segment_tree, seg->tree_probs, segment_id, 3, 0);
259 // This function encodes the reference frame
260 static void write_ref_frames(const VP10_COMMON *cm, const MACROBLOCKD *xd,
262 const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
263 const int is_compound = has_second_ref(mbmi);
264 const int segment_id = mbmi->segment_id;
266 // If segment level coding of this signal is disabled...
267 // or the segment allows multiple reference frame options
268 if (segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) {
269 assert(!is_compound);
270 assert(mbmi->ref_frame[0] ==
271 get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME));
273 // does the feature use compound prediction or not
274 // (if not specified at the frame/segment level)
275 if (cm->reference_mode == REFERENCE_MODE_SELECT) {
276 vpx_write(w, is_compound, vp10_get_reference_mode_prob(cm, xd));
278 assert(!is_compound == (cm->reference_mode == SINGLE_REFERENCE));
282 vpx_write(w, mbmi->ref_frame[0] == GOLDEN_FRAME,
283 vp10_get_pred_prob_comp_ref_p(cm, xd));
285 const int bit0 = mbmi->ref_frame[0] != LAST_FRAME;
286 vpx_write(w, bit0, vp10_get_pred_prob_single_ref_p1(cm, xd));
288 const int bit1 = mbmi->ref_frame[0] != GOLDEN_FRAME;
289 vpx_write(w, bit1, vp10_get_pred_prob_single_ref_p2(cm, xd));
295 static void pack_inter_mode_mvs(VP10_COMP *cpi, const MODE_INFO *mi,
297 VP10_COMMON *const cm = &cpi->common;
298 const nmv_context *nmvc = &cm->fc->nmvc;
299 const MACROBLOCK *const x = &cpi->td.mb;
300 const MACROBLOCKD *const xd = &x->e_mbd;
301 const struct segmentation *const seg = &cm->seg;
302 const MB_MODE_INFO *const mbmi = &mi->mbmi;
303 const MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
304 const PREDICTION_MODE mode = mbmi->mode;
305 const int segment_id = mbmi->segment_id;
306 const BLOCK_SIZE bsize = mbmi->sb_type;
307 const int allow_hp = cm->allow_high_precision_mv;
308 const int is_inter = is_inter_block(mbmi);
309 const int is_compound = has_second_ref(mbmi);
312 if (seg->update_map) {
313 if (seg->temporal_update) {
314 const int pred_flag = mbmi->seg_id_predicted;
315 vpx_prob pred_prob = vp10_get_pred_prob_seg_id(seg, xd);
316 vpx_write(w, pred_flag, pred_prob);
318 write_segment_id(w, seg, segment_id);
320 write_segment_id(w, seg, segment_id);
324 skip = write_skip(cm, xd, segment_id, mi, w);
326 if (!segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME))
327 vpx_write(w, is_inter, vp10_get_intra_inter_prob(cm, xd));
329 if (bsize >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT &&
330 !(is_inter && skip)) {
331 write_selected_tx_size(cm, xd, w);
335 if (bsize >= BLOCK_8X8) {
336 write_intra_mode(w, mode, cm->fc->y_mode_prob[size_group_lookup[bsize]]);
339 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
340 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
341 for (idy = 0; idy < 2; idy += num_4x4_h) {
342 for (idx = 0; idx < 2; idx += num_4x4_w) {
343 const PREDICTION_MODE b_mode = mi->bmi[idy * 2 + idx].as_mode;
344 write_intra_mode(w, b_mode, cm->fc->y_mode_prob[0]);
348 write_intra_mode(w, mbmi->uv_mode, cm->fc->uv_mode_prob[mode]);
350 const int mode_ctx = mbmi_ext->mode_context[mbmi->ref_frame[0]];
351 const vpx_prob *const inter_probs = cm->fc->inter_mode_probs[mode_ctx];
352 write_ref_frames(cm, xd, w);
354 // If segment skip is not enabled code the mode.
355 if (!segfeature_active(seg, segment_id, SEG_LVL_SKIP)) {
356 if (bsize >= BLOCK_8X8) {
357 write_inter_mode(w, mode, inter_probs);
361 if (cm->interp_filter == SWITCHABLE) {
362 const int ctx = vp10_get_pred_context_switchable_interp(xd);
363 vp10_write_token(w, vp10_switchable_interp_tree,
364 cm->fc->switchable_interp_prob[ctx],
365 &switchable_interp_encodings[mbmi->interp_filter]);
366 ++cpi->interp_filter_selected[0][mbmi->interp_filter];
368 assert(mbmi->interp_filter == cm->interp_filter);
371 if (bsize < BLOCK_8X8) {
372 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
373 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
375 for (idy = 0; idy < 2; idy += num_4x4_h) {
376 for (idx = 0; idx < 2; idx += num_4x4_w) {
377 const int j = idy * 2 + idx;
378 const PREDICTION_MODE b_mode = mi->bmi[j].as_mode;
379 write_inter_mode(w, b_mode, inter_probs);
380 if (b_mode == NEWMV) {
381 for (ref = 0; ref < 1 + is_compound; ++ref)
382 vp10_encode_mv(cpi, w, &mi->bmi[j].as_mv[ref].as_mv,
383 &mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][0].as_mv,
390 for (ref = 0; ref < 1 + is_compound; ++ref)
391 vp10_encode_mv(cpi, w, &mbmi->mv[ref].as_mv,
392 &mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][0].as_mv, nmvc,
399 static void write_palette_mode_info(const VP10_COMMON *cm,
400 const MACROBLOCKD *xd,
401 const MODE_INFO *const mi,
403 const MB_MODE_INFO *const mbmi = &mi->mbmi;
404 const MODE_INFO *const above_mi = xd->above_mi;
405 const MODE_INFO *const left_mi = xd->left_mi;
406 const BLOCK_SIZE bsize = mbmi->sb_type;
407 const PALETTE_MODE_INFO *pmi = &mbmi->palette_mode_info;
411 n = pmi->palette_size[0];
413 palette_ctx += (above_mi->mbmi.palette_mode_info.palette_size[0] > 0);
415 palette_ctx += (left_mi->mbmi.palette_mode_info.palette_size[0] > 0);
417 vp10_default_palette_y_mode_prob[bsize - BLOCK_8X8][palette_ctx]);
419 vp10_write_token(w, vp10_palette_size_tree,
420 vp10_default_palette_y_size_prob[bsize - BLOCK_8X8],
421 &palette_size_encodings[n - 2]);
422 for (i = 0; i < n; ++i)
423 vpx_write_literal(w, pmi->palette_colors[i],
425 write_uniform(w, n, pmi->palette_first_color_idx[0]);
429 static void write_mb_modes_kf(const VP10_COMMON *cm, const MACROBLOCKD *xd,
430 MODE_INFO **mi_8x8, vpx_writer *w) {
431 const struct segmentation *const seg = &cm->seg;
432 const MODE_INFO *const mi = mi_8x8[0];
433 const MODE_INFO *const above_mi = xd->above_mi;
434 const MODE_INFO *const left_mi = xd->left_mi;
435 const MB_MODE_INFO *const mbmi = &mi->mbmi;
436 const BLOCK_SIZE bsize = mbmi->sb_type;
439 write_segment_id(w, seg, mbmi->segment_id);
441 write_skip(cm, xd, mbmi->segment_id, mi, w);
443 if (bsize >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT)
444 write_selected_tx_size(cm, xd, w);
446 if (bsize >= BLOCK_8X8) {
447 write_intra_mode(w, mbmi->mode, get_y_mode_probs(mi, above_mi, left_mi, 0));
449 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
450 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
453 for (idy = 0; idy < 2; idy += num_4x4_h) {
454 for (idx = 0; idx < 2; idx += num_4x4_w) {
455 const int block = idy * 2 + idx;
456 write_intra_mode(w, mi->bmi[block].as_mode,
457 get_y_mode_probs(mi, above_mi, left_mi, block));
462 write_intra_mode(w, mbmi->uv_mode, vp10_kf_uv_mode_prob[mbmi->mode]);
464 if (bsize >= BLOCK_8X8 && cm->allow_screen_content_tools &&
465 mbmi->mode == DC_PRED)
466 write_palette_mode_info(cm, xd, mi, w);
469 static void write_modes_b(VP10_COMP *cpi, const TileInfo *const tile,
470 vpx_writer *w, TOKENEXTRA **tok,
471 const TOKENEXTRA *const tok_end,
472 int mi_row, int mi_col) {
473 const VP10_COMMON *const cm = &cpi->common;
474 MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
478 xd->mi = cm->mi_grid_visible + (mi_row * cm->mi_stride + mi_col);
481 cpi->td.mb.mbmi_ext = cpi->mbmi_ext_base + (mi_row * cm->mi_cols + mi_col);
483 set_mi_row_col(xd, tile,
484 mi_row, num_8x8_blocks_high_lookup[m->mbmi.sb_type],
485 mi_col, num_8x8_blocks_wide_lookup[m->mbmi.sb_type],
486 cm->mi_rows, cm->mi_cols);
487 if (frame_is_intra_only(cm)) {
488 write_mb_modes_kf(cm, xd, xd->mi, w);
490 pack_inter_mode_mvs(cpi, m, w);
493 if (m->mbmi.palette_mode_info.palette_size[0] > 0) {
494 assert(*tok < tok_end);
495 pack_palette_tokens(w, tok, m->mbmi.sb_type,
496 m->mbmi.palette_mode_info.palette_size[0]);
497 assert(*tok < tok_end);
501 assert(*tok < tok_end);
502 for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
503 TX_SIZE tx = plane ? get_uv_tx_size(&m->mbmi, &xd->plane[plane])
505 pack_mb_tokens(w, tok, tok_end, cm->bit_depth, tx);
506 assert(*tok < tok_end && (*tok)->token == EOSB_TOKEN);
512 static void write_partition(const VP10_COMMON *const cm,
513 const MACROBLOCKD *const xd,
514 int hbs, int mi_row, int mi_col,
515 PARTITION_TYPE p, BLOCK_SIZE bsize, vpx_writer *w) {
516 const int ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
517 const vpx_prob *const probs = xd->partition_probs[ctx];
518 const int has_rows = (mi_row + hbs) < cm->mi_rows;
519 const int has_cols = (mi_col + hbs) < cm->mi_cols;
521 if (has_rows && has_cols) {
522 vp10_write_token(w, vp10_partition_tree, probs, &partition_encodings[p]);
523 } else if (!has_rows && has_cols) {
524 assert(p == PARTITION_SPLIT || p == PARTITION_HORZ);
525 vpx_write(w, p == PARTITION_SPLIT, probs[1]);
526 } else if (has_rows && !has_cols) {
527 assert(p == PARTITION_SPLIT || p == PARTITION_VERT);
528 vpx_write(w, p == PARTITION_SPLIT, probs[2]);
530 assert(p == PARTITION_SPLIT);
534 static void write_modes_sb(VP10_COMP *cpi,
535 const TileInfo *const tile, vpx_writer *w,
536 TOKENEXTRA **tok, const TOKENEXTRA *const tok_end,
537 int mi_row, int mi_col, BLOCK_SIZE bsize) {
538 const VP10_COMMON *const cm = &cpi->common;
539 MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
541 const int bsl = b_width_log2_lookup[bsize];
542 const int bs = (1 << bsl) / 4;
543 PARTITION_TYPE partition;
545 const MODE_INFO *m = NULL;
547 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
550 m = cm->mi_grid_visible[mi_row * cm->mi_stride + mi_col];
552 partition = partition_lookup[bsl][m->mbmi.sb_type];
553 write_partition(cm, xd, bs, mi_row, mi_col, partition, bsize, w);
554 subsize = get_subsize(bsize, partition);
555 if (subsize < BLOCK_8X8) {
556 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col);
560 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col);
563 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col);
564 if (mi_row + bs < cm->mi_rows)
565 write_modes_b(cpi, tile, w, tok, tok_end, mi_row + bs, mi_col);
568 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col);
569 if (mi_col + bs < cm->mi_cols)
570 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col + bs);
572 case PARTITION_SPLIT:
573 write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col, subsize);
574 write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col + bs,
576 write_modes_sb(cpi, tile, w, tok, tok_end, mi_row + bs, mi_col,
578 write_modes_sb(cpi, tile, w, tok, tok_end, mi_row + bs, mi_col + bs,
586 // update partition context
587 if (bsize >= BLOCK_8X8 &&
588 (bsize == BLOCK_8X8 || partition != PARTITION_SPLIT))
589 update_partition_context(xd, mi_row, mi_col, subsize, bsize);
592 static void write_modes(VP10_COMP *cpi,
593 const TileInfo *const tile, vpx_writer *w,
594 TOKENEXTRA **tok, const TOKENEXTRA *const tok_end) {
595 const VP10_COMMON *const cm = &cpi->common;
596 MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
599 set_partition_probs(cm, xd);
601 for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end;
602 mi_row += MI_BLOCK_SIZE) {
603 vp10_zero(xd->left_seg_context);
604 for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end;
605 mi_col += MI_BLOCK_SIZE)
606 write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col,
611 static void build_tree_distribution(VP10_COMP *cpi, TX_SIZE tx_size,
612 vp10_coeff_stats *coef_branch_ct,
613 vp10_coeff_probs_model *coef_probs) {
614 vp10_coeff_count *coef_counts = cpi->td.rd_counts.coef_counts[tx_size];
615 unsigned int (*eob_branch_ct)[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS] =
616 cpi->common.counts.eob_branch[tx_size];
619 for (i = 0; i < PLANE_TYPES; ++i) {
620 for (j = 0; j < REF_TYPES; ++j) {
621 for (k = 0; k < COEF_BANDS; ++k) {
622 for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
623 vp10_tree_probs_from_distribution(vp10_coef_tree,
624 coef_branch_ct[i][j][k][l],
625 coef_counts[i][j][k][l]);
626 coef_branch_ct[i][j][k][l][0][1] = eob_branch_ct[i][j][k][l] -
627 coef_branch_ct[i][j][k][l][0][0];
628 for (m = 0; m < UNCONSTRAINED_NODES; ++m)
629 coef_probs[i][j][k][l][m] = get_binary_prob(
630 coef_branch_ct[i][j][k][l][m][0],
631 coef_branch_ct[i][j][k][l][m][1]);
638 static void update_coef_probs_common(vpx_writer* const bc, VP10_COMP *cpi,
640 vp10_coeff_stats *frame_branch_ct,
641 vp10_coeff_probs_model *new_coef_probs) {
642 vp10_coeff_probs_model *old_coef_probs = cpi->common.fc->coef_probs[tx_size];
643 const vpx_prob upd = DIFF_UPDATE_PROB;
644 const int entropy_nodes_update = UNCONSTRAINED_NODES;
646 int stepsize = cpi->sf.coeff_prob_appx_step;
648 switch (cpi->sf.use_fast_coef_updates) {
650 /* dry run to see if there is any update at all needed */
652 int update[2] = {0, 0};
653 for (i = 0; i < PLANE_TYPES; ++i) {
654 for (j = 0; j < REF_TYPES; ++j) {
655 for (k = 0; k < COEF_BANDS; ++k) {
656 for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
657 for (t = 0; t < entropy_nodes_update; ++t) {
658 vpx_prob newp = new_coef_probs[i][j][k][l][t];
659 const vpx_prob oldp = old_coef_probs[i][j][k][l][t];
663 s = vp10_prob_diff_update_savings_search_model(
664 frame_branch_ct[i][j][k][l][0],
665 old_coef_probs[i][j][k][l], &newp, upd, stepsize);
667 s = vp10_prob_diff_update_savings_search(
668 frame_branch_ct[i][j][k][l][t], oldp, &newp, upd);
669 if (s > 0 && newp != oldp)
672 savings += s - (int)(vp10_cost_zero(upd));
674 savings -= (int)(vp10_cost_zero(upd));
682 // printf("Update %d %d, savings %d\n", update[0], update[1], savings);
683 /* Is coef updated at all */
684 if (update[1] == 0 || savings < 0) {
685 vpx_write_bit(bc, 0);
688 vpx_write_bit(bc, 1);
689 for (i = 0; i < PLANE_TYPES; ++i) {
690 for (j = 0; j < REF_TYPES; ++j) {
691 for (k = 0; k < COEF_BANDS; ++k) {
692 for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
693 // calc probs and branch cts for this frame only
694 for (t = 0; t < entropy_nodes_update; ++t) {
695 vpx_prob newp = new_coef_probs[i][j][k][l][t];
696 vpx_prob *oldp = old_coef_probs[i][j][k][l] + t;
697 const vpx_prob upd = DIFF_UPDATE_PROB;
701 s = vp10_prob_diff_update_savings_search_model(
702 frame_branch_ct[i][j][k][l][0],
703 old_coef_probs[i][j][k][l], &newp, upd, stepsize);
705 s = vp10_prob_diff_update_savings_search(
706 frame_branch_ct[i][j][k][l][t],
708 if (s > 0 && newp != *oldp)
710 vpx_write(bc, u, upd);
712 /* send/use new probability */
713 vp10_write_prob_diff_update(bc, newp, *oldp);
724 case ONE_LOOP_REDUCED: {
726 int noupdates_before_first = 0;
727 for (i = 0; i < PLANE_TYPES; ++i) {
728 for (j = 0; j < REF_TYPES; ++j) {
729 for (k = 0; k < COEF_BANDS; ++k) {
730 for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
731 // calc probs and branch cts for this frame only
732 for (t = 0; t < entropy_nodes_update; ++t) {
733 vpx_prob newp = new_coef_probs[i][j][k][l][t];
734 vpx_prob *oldp = old_coef_probs[i][j][k][l] + t;
738 if (t == PIVOT_NODE) {
739 s = vp10_prob_diff_update_savings_search_model(
740 frame_branch_ct[i][j][k][l][0],
741 old_coef_probs[i][j][k][l], &newp, upd, stepsize);
743 s = vp10_prob_diff_update_savings_search(
744 frame_branch_ct[i][j][k][l][t],
748 if (s > 0 && newp != *oldp)
751 if (u == 0 && updates == 0) {
752 noupdates_before_first++;
755 if (u == 1 && updates == 1) {
758 vpx_write_bit(bc, 1);
759 for (v = 0; v < noupdates_before_first; ++v)
760 vpx_write(bc, 0, upd);
762 vpx_write(bc, u, upd);
764 /* send/use new probability */
765 vp10_write_prob_diff_update(bc, newp, *oldp);
774 vpx_write_bit(bc, 0); // no updates
783 static void update_coef_probs(VP10_COMP *cpi, vpx_writer* w) {
784 const TX_MODE tx_mode = cpi->common.tx_mode;
785 const TX_SIZE max_tx_size = tx_mode_to_biggest_tx_size[tx_mode];
787 for (tx_size = TX_4X4; tx_size <= max_tx_size; ++tx_size) {
788 vp10_coeff_stats frame_branch_ct[PLANE_TYPES];
789 vp10_coeff_probs_model frame_coef_probs[PLANE_TYPES];
790 if (cpi->td.counts->tx.tx_totals[tx_size] <= 20 ||
791 (tx_size >= TX_16X16 && cpi->sf.tx_size_search_method == USE_TX_8X8)) {
794 build_tree_distribution(cpi, tx_size, frame_branch_ct,
796 update_coef_probs_common(w, cpi, tx_size, frame_branch_ct,
802 static void encode_loopfilter(struct loopfilter *lf,
803 struct vpx_write_bit_buffer *wb) {
806 // Encode the loop filter level and type
807 vpx_wb_write_literal(wb, lf->filter_level, 6);
808 vpx_wb_write_literal(wb, lf->sharpness_level, 3);
810 // Write out loop filter deltas applied at the MB level based on mode or
811 // ref frame (if they are enabled).
812 vpx_wb_write_bit(wb, lf->mode_ref_delta_enabled);
814 if (lf->mode_ref_delta_enabled) {
815 vpx_wb_write_bit(wb, lf->mode_ref_delta_update);
816 if (lf->mode_ref_delta_update) {
817 for (i = 0; i < MAX_REF_FRAMES; i++) {
818 const int delta = lf->ref_deltas[i];
819 const int changed = delta != lf->last_ref_deltas[i];
820 vpx_wb_write_bit(wb, changed);
822 lf->last_ref_deltas[i] = delta;
823 vpx_wb_write_inv_signed_literal(wb, delta, 6);
827 for (i = 0; i < MAX_MODE_LF_DELTAS; i++) {
828 const int delta = lf->mode_deltas[i];
829 const int changed = delta != lf->last_mode_deltas[i];
830 vpx_wb_write_bit(wb, changed);
832 lf->last_mode_deltas[i] = delta;
833 vpx_wb_write_inv_signed_literal(wb, delta, 6);
840 static void write_delta_q(struct vpx_write_bit_buffer *wb, int delta_q) {
842 vpx_wb_write_bit(wb, 1);
843 vpx_wb_write_inv_signed_literal(wb, delta_q, CONFIG_MISC_FIXES ? 6 : 4);
845 vpx_wb_write_bit(wb, 0);
849 static void encode_quantization(const VP10_COMMON *const cm,
850 struct vpx_write_bit_buffer *wb) {
851 vpx_wb_write_literal(wb, cm->base_qindex, QINDEX_BITS);
852 write_delta_q(wb, cm->y_dc_delta_q);
853 write_delta_q(wb, cm->uv_dc_delta_q);
854 write_delta_q(wb, cm->uv_ac_delta_q);
857 static void encode_segmentation(VP10_COMMON *cm, MACROBLOCKD *xd,
858 struct vpx_write_bit_buffer *wb) {
861 const struct segmentation *seg = &cm->seg;
863 vpx_wb_write_bit(wb, seg->enabled);
868 if (!frame_is_intra_only(cm) && !cm->error_resilient_mode) {
869 vpx_wb_write_bit(wb, seg->update_map);
871 assert(seg->update_map == 1);
873 if (seg->update_map) {
874 // Select the coding strategy (temporal or spatial)
875 vp10_choose_segmap_coding_method(cm, xd);
876 // Write out probabilities used to decode unpredicted macro-block segments
877 for (i = 0; i < SEG_TREE_PROBS; i++) {
878 const int prob = seg->tree_probs[i];
879 const int update = prob != MAX_PROB;
880 vpx_wb_write_bit(wb, update);
882 vpx_wb_write_literal(wb, prob, 8);
885 // Write out the chosen coding method.
886 if (!frame_is_intra_only(cm) && !cm->error_resilient_mode) {
887 vpx_wb_write_bit(wb, seg->temporal_update);
889 assert(seg->temporal_update == 0);
891 if (seg->temporal_update) {
892 for (i = 0; i < PREDICTION_PROBS; i++) {
893 const int prob = seg->pred_probs[i];
894 const int update = prob != MAX_PROB;
895 vpx_wb_write_bit(wb, update);
897 vpx_wb_write_literal(wb, prob, 8);
903 vpx_wb_write_bit(wb, seg->update_data);
904 if (seg->update_data) {
905 vpx_wb_write_bit(wb, seg->abs_delta);
907 for (i = 0; i < MAX_SEGMENTS; i++) {
908 for (j = 0; j < SEG_LVL_MAX; j++) {
909 const int active = segfeature_active(seg, i, j);
910 vpx_wb_write_bit(wb, active);
912 const int data = get_segdata(seg, i, j);
913 const int data_max = vp10_seg_feature_data_max(j);
915 if (vp10_is_segfeature_signed(j)) {
916 encode_unsigned_max(wb, abs(data), data_max);
917 vpx_wb_write_bit(wb, data < 0);
919 encode_unsigned_max(wb, data, data_max);
927 #if CONFIG_MISC_FIXES
928 static void write_txfm_mode(TX_MODE mode, struct vpx_write_bit_buffer *wb) {
929 vpx_wb_write_bit(wb, mode == TX_MODE_SELECT);
930 if (mode != TX_MODE_SELECT)
931 vpx_wb_write_literal(wb, mode, 2);
935 static void update_txfm_probs(VP10_COMMON *cm, vpx_writer *w,
936 FRAME_COUNTS *counts) {
937 #if !CONFIG_MISC_FIXES
939 vpx_write_literal(w, VPXMIN(cm->tx_mode, ALLOW_32X32), 2);
940 if (cm->tx_mode >= ALLOW_32X32)
941 vpx_write_bit(w, cm->tx_mode == TX_MODE_SELECT);
946 if (cm->tx_mode == TX_MODE_SELECT) {
948 unsigned int ct_8x8p[TX_SIZES - 3][2];
949 unsigned int ct_16x16p[TX_SIZES - 2][2];
950 unsigned int ct_32x32p[TX_SIZES - 1][2];
953 for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
954 vp10_tx_counts_to_branch_counts_8x8(counts->tx.p8x8[i], ct_8x8p);
955 for (j = 0; j < TX_SIZES - 3; j++)
956 vp10_cond_prob_diff_update(w, &cm->fc->tx_probs.p8x8[i][j], ct_8x8p[j]);
959 for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
960 vp10_tx_counts_to_branch_counts_16x16(counts->tx.p16x16[i], ct_16x16p);
961 for (j = 0; j < TX_SIZES - 2; j++)
962 vp10_cond_prob_diff_update(w, &cm->fc->tx_probs.p16x16[i][j],
966 for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
967 vp10_tx_counts_to_branch_counts_32x32(counts->tx.p32x32[i], ct_32x32p);
968 for (j = 0; j < TX_SIZES - 1; j++)
969 vp10_cond_prob_diff_update(w, &cm->fc->tx_probs.p32x32[i][j],
975 static void write_interp_filter(INTERP_FILTER filter,
976 struct vpx_write_bit_buffer *wb) {
977 vpx_wb_write_bit(wb, filter == SWITCHABLE);
978 if (filter != SWITCHABLE)
979 vpx_wb_write_literal(wb, filter, 2);
982 static void fix_interp_filter(VP10_COMMON *cm, FRAME_COUNTS *counts) {
983 if (cm->interp_filter == SWITCHABLE) {
984 // Check to see if only one of the filters is actually used
985 int count[SWITCHABLE_FILTERS];
987 for (i = 0; i < SWITCHABLE_FILTERS; ++i) {
989 for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j)
990 count[i] += counts->switchable_interp[j][i];
994 // Only one filter is used. So set the filter at frame level
995 for (i = 0; i < SWITCHABLE_FILTERS; ++i) {
997 cm->interp_filter = i;
1005 static void write_tile_info(const VP10_COMMON *const cm,
1006 struct vpx_write_bit_buffer *wb) {
1007 int min_log2_tile_cols, max_log2_tile_cols, ones;
1008 vp10_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
1011 ones = cm->log2_tile_cols - min_log2_tile_cols;
1013 vpx_wb_write_bit(wb, 1);
1015 if (cm->log2_tile_cols < max_log2_tile_cols)
1016 vpx_wb_write_bit(wb, 0);
1019 vpx_wb_write_bit(wb, cm->log2_tile_rows != 0);
1020 if (cm->log2_tile_rows != 0)
1021 vpx_wb_write_bit(wb, cm->log2_tile_rows != 1);
1024 static int get_refresh_mask(VP10_COMP *cpi) {
1025 if (vp10_preserve_existing_gf(cpi)) {
1026 // We have decided to preserve the previously existing golden frame as our
1027 // new ARF frame. However, in the short term we leave it in the GF slot and,
1028 // if we're updating the GF with the current decoded frame, we save it
1029 // instead to the ARF slot.
1030 // Later, in the function vp10_encoder.c:vp10_update_reference_frames() we
1031 // will swap gld_fb_idx and alt_fb_idx to achieve our objective. We do it
1032 // there so that it can be done outside of the recode loop.
1033 // Note: This is highly specific to the use of ARF as a forward reference,
1034 // and this needs to be generalized as other uses are implemented
1035 // (like RTC/temporal scalability).
1036 return (cpi->refresh_last_frame << cpi->lst_fb_idx) |
1037 (cpi->refresh_golden_frame << cpi->alt_fb_idx);
1039 int arf_idx = cpi->alt_fb_idx;
1040 if ((cpi->oxcf.pass == 2) && cpi->multi_arf_allowed) {
1041 const GF_GROUP *const gf_group = &cpi->twopass.gf_group;
1042 arf_idx = gf_group->arf_update_idx[gf_group->index];
1044 return (cpi->refresh_last_frame << cpi->lst_fb_idx) |
1045 (cpi->refresh_golden_frame << cpi->gld_fb_idx) |
1046 (cpi->refresh_alt_ref_frame << arf_idx);
1050 static size_t encode_tiles(VP10_COMP *cpi, uint8_t *data_ptr,
1051 unsigned int *max_tile_sz) {
1052 VP10_COMMON *const cm = &cpi->common;
1053 vpx_writer residual_bc;
1054 int tile_row, tile_col;
1055 TOKENEXTRA *tok_end;
1056 size_t total_size = 0;
1057 const int tile_cols = 1 << cm->log2_tile_cols;
1058 const int tile_rows = 1 << cm->log2_tile_rows;
1059 unsigned int max_tile = 0;
1061 memset(cm->above_seg_context, 0,
1062 sizeof(*cm->above_seg_context) * mi_cols_aligned_to_sb(cm->mi_cols));
1064 for (tile_row = 0; tile_row < tile_rows; tile_row++) {
1065 for (tile_col = 0; tile_col < tile_cols; tile_col++) {
1066 int tile_idx = tile_row * tile_cols + tile_col;
1067 TOKENEXTRA *tok = cpi->tile_tok[tile_row][tile_col];
1069 tok_end = cpi->tile_tok[tile_row][tile_col] +
1070 cpi->tok_count[tile_row][tile_col];
1072 if (tile_col < tile_cols - 1 || tile_row < tile_rows - 1)
1073 vpx_start_encode(&residual_bc, data_ptr + total_size + 4);
1075 vpx_start_encode(&residual_bc, data_ptr + total_size);
1077 write_modes(cpi, &cpi->tile_data[tile_idx].tile_info,
1078 &residual_bc, &tok, tok_end);
1079 assert(tok == tok_end);
1080 vpx_stop_encode(&residual_bc);
1081 if (tile_col < tile_cols - 1 || tile_row < tile_rows - 1) {
1082 // size of this tile
1083 mem_put_le32(data_ptr + total_size, residual_bc.pos);
1084 max_tile = max_tile > residual_bc.pos ? max_tile : residual_bc.pos;
1088 total_size += residual_bc.pos;
1091 *max_tile_sz = max_tile;
1096 static void write_render_size(const VP10_COMMON *cm,
1097 struct vpx_write_bit_buffer *wb) {
1098 const int scaling_active = cm->width != cm->render_width ||
1099 cm->height != cm->render_height;
1100 vpx_wb_write_bit(wb, scaling_active);
1101 if (scaling_active) {
1102 vpx_wb_write_literal(wb, cm->render_width - 1, 16);
1103 vpx_wb_write_literal(wb, cm->render_height - 1, 16);
1107 static void write_frame_size(const VP10_COMMON *cm,
1108 struct vpx_write_bit_buffer *wb) {
1109 vpx_wb_write_literal(wb, cm->width - 1, 16);
1110 vpx_wb_write_literal(wb, cm->height - 1, 16);
1112 write_render_size(cm, wb);
1115 static void write_frame_size_with_refs(VP10_COMP *cpi,
1116 struct vpx_write_bit_buffer *wb) {
1117 VP10_COMMON *const cm = &cpi->common;
1120 MV_REFERENCE_FRAME ref_frame;
1121 for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
1122 YV12_BUFFER_CONFIG *cfg = get_ref_frame_buffer(cpi, ref_frame);
1125 found = cm->width == cfg->y_crop_width &&
1126 cm->height == cfg->y_crop_height;
1127 #if CONFIG_MISC_FIXES
1128 found &= cm->render_width == cfg->render_width &&
1129 cm->render_height == cfg->render_height;
1132 vpx_wb_write_bit(wb, found);
1139 vpx_wb_write_literal(wb, cm->width - 1, 16);
1140 vpx_wb_write_literal(wb, cm->height - 1, 16);
1142 #if CONFIG_MISC_FIXES
1143 write_render_size(cm, wb);
1147 #if !CONFIG_MISC_FIXES
1148 write_render_size(cm, wb);
1152 static void write_sync_code(struct vpx_write_bit_buffer *wb) {
1153 vpx_wb_write_literal(wb, VP10_SYNC_CODE_0, 8);
1154 vpx_wb_write_literal(wb, VP10_SYNC_CODE_1, 8);
1155 vpx_wb_write_literal(wb, VP10_SYNC_CODE_2, 8);
1158 static void write_profile(BITSTREAM_PROFILE profile,
1159 struct vpx_write_bit_buffer *wb) {
1162 vpx_wb_write_literal(wb, 0, 2);
1165 vpx_wb_write_literal(wb, 2, 2);
1168 vpx_wb_write_literal(wb, 1, 2);
1171 vpx_wb_write_literal(wb, 6, 3);
1178 static void write_bitdepth_colorspace_sampling(
1179 VP10_COMMON *const cm, struct vpx_write_bit_buffer *wb) {
1180 if (cm->profile >= PROFILE_2) {
1181 assert(cm->bit_depth > VPX_BITS_8);
1182 vpx_wb_write_bit(wb, cm->bit_depth == VPX_BITS_10 ? 0 : 1);
1184 vpx_wb_write_literal(wb, cm->color_space, 3);
1185 if (cm->color_space != VPX_CS_SRGB) {
1186 // 0: [16, 235] (i.e. xvYCC), 1: [0, 255]
1187 vpx_wb_write_bit(wb, cm->color_range);
1188 if (cm->profile == PROFILE_1 || cm->profile == PROFILE_3) {
1189 assert(cm->subsampling_x != 1 || cm->subsampling_y != 1);
1190 vpx_wb_write_bit(wb, cm->subsampling_x);
1191 vpx_wb_write_bit(wb, cm->subsampling_y);
1192 vpx_wb_write_bit(wb, 0); // unused
1194 assert(cm->subsampling_x == 1 && cm->subsampling_y == 1);
1197 assert(cm->profile == PROFILE_1 || cm->profile == PROFILE_3);
1198 vpx_wb_write_bit(wb, 0); // unused
1202 static void write_uncompressed_header(VP10_COMP *cpi,
1203 struct vpx_write_bit_buffer *wb) {
1204 VP10_COMMON *const cm = &cpi->common;
1205 MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
1207 vpx_wb_write_literal(wb, VP9_FRAME_MARKER, 2);
1209 write_profile(cm->profile, wb);
1211 vpx_wb_write_bit(wb, 0); // show_existing_frame
1212 vpx_wb_write_bit(wb, cm->frame_type);
1213 vpx_wb_write_bit(wb, cm->show_frame);
1214 vpx_wb_write_bit(wb, cm->error_resilient_mode);
1216 if (cm->frame_type == KEY_FRAME) {
1217 write_sync_code(wb);
1218 write_bitdepth_colorspace_sampling(cm, wb);
1219 write_frame_size(cm, wb);
1220 if (frame_is_intra_only(cm))
1221 vpx_wb_write_bit(wb, cm->allow_screen_content_tools);
1223 if (!cm->show_frame)
1224 vpx_wb_write_bit(wb, cm->intra_only);
1226 if (!cm->error_resilient_mode) {
1227 #if CONFIG_MISC_FIXES
1228 if (cm->intra_only) {
1229 vpx_wb_write_bit(wb,
1230 cm->reset_frame_context == RESET_FRAME_CONTEXT_ALL);
1232 vpx_wb_write_bit(wb,
1233 cm->reset_frame_context != RESET_FRAME_CONTEXT_NONE);
1234 if (cm->reset_frame_context != RESET_FRAME_CONTEXT_NONE)
1235 vpx_wb_write_bit(wb,
1236 cm->reset_frame_context == RESET_FRAME_CONTEXT_ALL);
1239 static const int reset_frame_context_conv_tbl[3] = { 0, 2, 3 };
1241 vpx_wb_write_literal(wb,
1242 reset_frame_context_conv_tbl[cm->reset_frame_context], 2);
1246 if (cm->intra_only) {
1247 write_sync_code(wb);
1249 // Note for profile 0, 420 8bpp is assumed.
1250 if (cm->profile > PROFILE_0) {
1251 write_bitdepth_colorspace_sampling(cm, wb);
1254 vpx_wb_write_literal(wb, get_refresh_mask(cpi), REF_FRAMES);
1255 write_frame_size(cm, wb);
1257 MV_REFERENCE_FRAME ref_frame;
1258 vpx_wb_write_literal(wb, get_refresh_mask(cpi), REF_FRAMES);
1259 for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
1260 assert(get_ref_frame_map_idx(cpi, ref_frame) != INVALID_IDX);
1261 vpx_wb_write_literal(wb, get_ref_frame_map_idx(cpi, ref_frame),
1263 vpx_wb_write_bit(wb, cm->ref_frame_sign_bias[ref_frame]);
1266 write_frame_size_with_refs(cpi, wb);
1268 vpx_wb_write_bit(wb, cm->allow_high_precision_mv);
1270 fix_interp_filter(cm, cpi->td.counts);
1271 write_interp_filter(cm->interp_filter, wb);
1275 if (!cm->error_resilient_mode) {
1276 vpx_wb_write_bit(wb,
1277 cm->refresh_frame_context != REFRESH_FRAME_CONTEXT_OFF);
1278 #if CONFIG_MISC_FIXES
1279 if (cm->refresh_frame_context != REFRESH_FRAME_CONTEXT_OFF)
1281 vpx_wb_write_bit(wb, cm->refresh_frame_context !=
1282 REFRESH_FRAME_CONTEXT_BACKWARD);
1285 vpx_wb_write_literal(wb, cm->frame_context_idx, FRAME_CONTEXTS_LOG2);
1287 encode_loopfilter(&cm->lf, wb);
1288 encode_quantization(cm, wb);
1289 encode_segmentation(cm, xd, wb);
1290 #if CONFIG_MISC_FIXES
1292 cm->tx_mode = TX_4X4;
1294 write_txfm_mode(cm->tx_mode, wb);
1295 if (cpi->allow_comp_inter_inter) {
1296 const int use_hybrid_pred = cm->reference_mode == REFERENCE_MODE_SELECT;
1297 const int use_compound_pred = cm->reference_mode != SINGLE_REFERENCE;
1299 vpx_wb_write_bit(wb, use_hybrid_pred);
1300 if (!use_hybrid_pred)
1301 vpx_wb_write_bit(wb, use_compound_pred);
1305 write_tile_info(cm, wb);
1308 static size_t write_compressed_header(VP10_COMP *cpi, uint8_t *data) {
1309 VP10_COMMON *const cm = &cpi->common;
1310 FRAME_CONTEXT *const fc = cm->fc;
1311 FRAME_COUNTS *counts = cpi->td.counts;
1312 vpx_writer header_bc;
1314 vpx_start_encode(&header_bc, data);
1316 #if !CONFIG_MISC_FIXES
1317 if (cpi->td.mb.e_mbd.lossless)
1318 cm->tx_mode = TX_4X4;
1320 update_txfm_probs(cm, &header_bc, counts);
1322 update_txfm_probs(cm, &header_bc, counts);
1324 update_coef_probs(cpi, &header_bc);
1325 update_skip_probs(cm, &header_bc, counts);
1327 if (!frame_is_intra_only(cm)) {
1330 for (i = 0; i < INTER_MODE_CONTEXTS; ++i)
1331 prob_diff_update(vp10_inter_mode_tree, cm->fc->inter_mode_probs[i],
1332 counts->inter_mode[i], INTER_MODES, &header_bc);
1334 if (cm->interp_filter == SWITCHABLE)
1335 update_switchable_interp_probs(cm, &header_bc, counts);
1337 for (i = 0; i < INTRA_INTER_CONTEXTS; i++)
1338 vp10_cond_prob_diff_update(&header_bc, &fc->intra_inter_prob[i],
1339 counts->intra_inter[i]);
1341 if (cpi->allow_comp_inter_inter) {
1342 const int use_hybrid_pred = cm->reference_mode == REFERENCE_MODE_SELECT;
1343 #if !CONFIG_MISC_FIXES
1344 const int use_compound_pred = cm->reference_mode != SINGLE_REFERENCE;
1346 vpx_write_bit(&header_bc, use_compound_pred);
1347 if (use_compound_pred) {
1348 vpx_write_bit(&header_bc, use_hybrid_pred);
1349 if (use_hybrid_pred)
1350 for (i = 0; i < COMP_INTER_CONTEXTS; i++)
1351 vp10_cond_prob_diff_update(&header_bc, &fc->comp_inter_prob[i],
1352 counts->comp_inter[i]);
1355 if (use_hybrid_pred)
1356 for (i = 0; i < COMP_INTER_CONTEXTS; i++)
1357 vp10_cond_prob_diff_update(&header_bc, &fc->comp_inter_prob[i],
1358 counts->comp_inter[i]);
1362 if (cm->reference_mode != COMPOUND_REFERENCE) {
1363 for (i = 0; i < REF_CONTEXTS; i++) {
1364 vp10_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][0],
1365 counts->single_ref[i][0]);
1366 vp10_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][1],
1367 counts->single_ref[i][1]);
1371 if (cm->reference_mode != SINGLE_REFERENCE)
1372 for (i = 0; i < REF_CONTEXTS; i++)
1373 vp10_cond_prob_diff_update(&header_bc, &fc->comp_ref_prob[i],
1374 counts->comp_ref[i]);
1376 for (i = 0; i < BLOCK_SIZE_GROUPS; ++i)
1377 prob_diff_update(vp10_intra_mode_tree, cm->fc->y_mode_prob[i],
1378 counts->y_mode[i], INTRA_MODES, &header_bc);
1380 #if CONFIG_MISC_FIXES
1381 for (i = 0; i < INTRA_MODES; ++i)
1382 prob_diff_update(vp10_intra_mode_tree, cm->fc->uv_mode_prob[i],
1383 counts->uv_mode[i], INTRA_MODES, &header_bc);
1386 for (i = 0; i < PARTITION_CONTEXTS; ++i)
1387 prob_diff_update(vp10_partition_tree, fc->partition_prob[i],
1388 counts->partition[i], PARTITION_TYPES, &header_bc);
1390 vp10_write_nmv_probs(cm, cm->allow_high_precision_mv, &header_bc,
1394 vpx_stop_encode(&header_bc);
1395 assert(header_bc.pos <= 0xffff);
1397 return header_bc.pos;
1400 #if CONFIG_MISC_FIXES
1401 static int remux_tiles(uint8_t *dest, const int sz,
1402 const int n_tiles, const int mag) {
1403 int rpos = 0, wpos = 0, n;
1405 for (n = 0; n < n_tiles; n++) {
1408 if (n == n_tiles - 1) {
1409 tile_sz = sz - rpos;
1411 tile_sz = mem_get_le32(&dest[rpos]);
1415 dest[wpos] = tile_sz;
1418 mem_put_le16(&dest[wpos], tile_sz);
1421 mem_put_le24(&dest[wpos], tile_sz);
1423 case 3: // remuxing should only happen if mag < 3
1425 assert("Invalid value for tile size magnitude" && 0);
1430 memmove(&dest[wpos], &dest[rpos], tile_sz);
1435 assert(rpos > wpos);
1442 void vp10_pack_bitstream(VP10_COMP *const cpi, uint8_t *dest, size_t *size) {
1443 uint8_t *data = dest;
1444 size_t first_part_size, uncompressed_hdr_size, data_sz;
1445 struct vpx_write_bit_buffer wb = {data, 0};
1446 struct vpx_write_bit_buffer saved_wb;
1447 unsigned int max_tile;
1448 #if CONFIG_MISC_FIXES
1449 VP10_COMMON *const cm = &cpi->common;
1450 const int n_log2_tiles = cm->log2_tile_rows + cm->log2_tile_cols;
1451 const int have_tiles = n_log2_tiles > 0;
1453 const int have_tiles = 0; // we have tiles, but we don't want to write a
1454 // tile size marker in the header
1457 write_uncompressed_header(cpi, &wb);
1459 // don't know in advance first part. size
1460 vpx_wb_write_literal(&wb, 0, 16 + have_tiles * 2);
1462 uncompressed_hdr_size = vpx_wb_bytes_written(&wb);
1463 data += uncompressed_hdr_size;
1465 vpx_clear_system_state();
1467 first_part_size = write_compressed_header(cpi, data);
1468 data += first_part_size;
1470 data_sz = encode_tiles(cpi, data, &max_tile);
1471 #if CONFIG_MISC_FIXES
1476 // Choose the (tile size) magnitude
1477 for (mag = 0, mask = 0xff; mag < 4; mag++) {
1478 if (max_tile <= mask)
1483 assert(n_log2_tiles > 0);
1484 vpx_wb_write_literal(&saved_wb, mag, 2);
1486 data_sz = remux_tiles(data, data_sz, 1 << n_log2_tiles, mag);
1488 assert(n_log2_tiles == 0);
1493 // TODO(jbb): Figure out what to do if first_part_size > 16 bits.
1494 vpx_wb_write_literal(&saved_wb, (int)first_part_size, 16);
1496 *size = data - dest;