2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
15 #include "vpx/vpx_encoder.h"
16 #include "vpx_dsp/bitwriter_buffer.h"
17 #include "vpx_dsp/vpx_dsp_common.h"
18 #include "vpx_mem/vpx_mem.h"
19 #include "vpx_ports/mem_ops.h"
20 #include "vpx_ports/system_state.h"
22 #include "vp9/common/vp9_entropy.h"
23 #include "vp9/common/vp9_entropymode.h"
24 #include "vp9/common/vp9_entropymv.h"
25 #include "vp9/common/vp9_mvref_common.h"
26 #include "vp9/common/vp9_pred_common.h"
27 #include "vp9/common/vp9_seg_common.h"
28 #include "vp9/common/vp9_tile_common.h"
30 #include "vp9/encoder/vp9_cost.h"
31 #include "vp9/encoder/vp9_bitstream.h"
32 #include "vp9/encoder/vp9_encodemv.h"
33 #include "vp9/encoder/vp9_mcomp.h"
34 #include "vp9/encoder/vp9_segmentation.h"
35 #include "vp9/encoder/vp9_subexp.h"
36 #include "vp9/encoder/vp9_tokenize.h"
38 static const struct vp9_token intra_mode_encodings[INTRA_MODES] = {
39 {0, 1}, {6, 3}, {28, 5}, {30, 5}, {58, 6}, {59, 6}, {126, 7}, {127, 7},
41 static const struct vp9_token switchable_interp_encodings[SWITCHABLE_FILTERS] =
42 {{0, 1}, {2, 2}, {3, 2}};
43 static const struct vp9_token partition_encodings[PARTITION_TYPES] =
44 {{0, 1}, {2, 2}, {6, 3}, {7, 3}};
45 static const struct vp9_token inter_mode_encodings[INTER_MODES] =
46 {{2, 2}, {6, 3}, {0, 1}, {7, 3}};
48 static void write_intra_mode(vpx_writer *w, PREDICTION_MODE mode,
49 const vpx_prob *probs) {
50 vp9_write_token(w, vp9_intra_mode_tree, probs, &intra_mode_encodings[mode]);
53 static void write_inter_mode(vpx_writer *w, PREDICTION_MODE mode,
54 const vpx_prob *probs) {
55 assert(is_inter_mode(mode));
56 vp9_write_token(w, vp9_inter_mode_tree, probs,
57 &inter_mode_encodings[INTER_OFFSET(mode)]);
60 static void encode_unsigned_max(struct vpx_write_bit_buffer *wb,
62 vpx_wb_write_literal(wb, data, get_unsigned_bits(max));
65 static void prob_diff_update(const vpx_tree_index *tree,
66 vpx_prob probs[/*n - 1*/],
67 const unsigned int counts[/*n - 1*/],
68 int n, vpx_writer *w) {
70 unsigned int branch_ct[32][2];
72 // Assuming max number of probabilities <= 32
75 vp9_tree_probs_from_distribution(tree, branch_ct, counts);
76 for (i = 0; i < n - 1; ++i)
77 vp9_cond_prob_diff_update(w, &probs[i], branch_ct[i]);
80 static void write_selected_tx_size(const VP9_COMMON *cm,
81 const MACROBLOCKD *xd, vpx_writer *w) {
82 TX_SIZE tx_size = xd->mi[0]->tx_size;
83 BLOCK_SIZE bsize = xd->mi[0]->sb_type;
84 const TX_SIZE max_tx_size = max_txsize_lookup[bsize];
85 const vpx_prob *const tx_probs = get_tx_probs2(max_tx_size, xd,
87 vpx_write(w, tx_size != TX_4X4, tx_probs[0]);
88 if (tx_size != TX_4X4 && max_tx_size >= TX_16X16) {
89 vpx_write(w, tx_size != TX_8X8, tx_probs[1]);
90 if (tx_size != TX_8X8 && max_tx_size >= TX_32X32)
91 vpx_write(w, tx_size != TX_16X16, tx_probs[2]);
95 static int write_skip(const VP9_COMMON *cm, const MACROBLOCKD *xd,
96 int segment_id, const MODE_INFO *mi, vpx_writer *w) {
97 if (segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)) {
100 const int skip = mi->skip;
101 vpx_write(w, skip, vp9_get_skip_prob(cm, xd));
106 static void update_skip_probs(VP9_COMMON *cm, vpx_writer *w,
107 FRAME_COUNTS *counts) {
110 for (k = 0; k < SKIP_CONTEXTS; ++k)
111 vp9_cond_prob_diff_update(w, &cm->fc->skip_probs[k], counts->skip[k]);
114 static void update_switchable_interp_probs(VP9_COMMON *cm, vpx_writer *w,
115 FRAME_COUNTS *counts) {
117 for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j)
118 prob_diff_update(vp9_switchable_interp_tree,
119 cm->fc->switchable_interp_prob[j],
120 counts->switchable_interp[j], SWITCHABLE_FILTERS, w);
123 static void pack_mb_tokens(vpx_writer *w,
124 TOKENEXTRA **tp, const TOKENEXTRA *const stop,
125 vpx_bit_depth_t bit_depth) {
127 const vp9_extra_bit *const extra_bits =
128 #if CONFIG_VP9_HIGHBITDEPTH
129 (bit_depth == VPX_BITS_12) ? vp9_extra_bits_high12 :
130 (bit_depth == VPX_BITS_10) ? vp9_extra_bits_high10 :
135 #endif // CONFIG_VP9_HIGHBITDEPTH
137 for (p = *tp; p < stop && p->token != EOSB_TOKEN; ++p) {
138 if (p->token == EOB_TOKEN) {
139 vpx_write(w, 0, p->context_tree[0]);
142 vpx_write(w, 1, p->context_tree[0]);
143 while (p->token == ZERO_TOKEN) {
144 vpx_write(w, 0, p->context_tree[1]);
146 if (p == stop || p->token == EOSB_TOKEN) {
147 *tp = (TOKENEXTRA*)(uintptr_t)p + (p->token == EOSB_TOKEN);
153 const int t = p->token;
154 const vpx_prob *const context_tree = p->context_tree;
155 assert(t != ZERO_TOKEN);
156 assert(t != EOB_TOKEN);
157 assert(t != EOSB_TOKEN);
158 vpx_write(w, 1, context_tree[1]);
159 if (t == ONE_TOKEN) {
160 vpx_write(w, 0, context_tree[2]);
161 vpx_write_bit(w, p->extra & 1);
162 } else { // t >= TWO_TOKEN && t < EOB_TOKEN
163 const struct vp9_token *const a = &vp9_coef_encodings[t];
164 const int v = a->value;
165 const int n = a->len;
166 const int e = p->extra;
167 vpx_write(w, 1, context_tree[2]);
168 vp9_write_tree(w, vp9_coef_con_tree,
169 vp9_pareto8_full[context_tree[PIVOT_NODE] - 1], v,
170 n - UNCONSTRAINED_NODES, 0);
171 if (t >= CATEGORY1_TOKEN) {
172 const vp9_extra_bit *const b = &extra_bits[t];
173 const unsigned char *pb = b->prob;
175 int n = b->len; // number of bits in v, assumed nonzero
177 const int bb = (v >> --n) & 1;
178 vpx_write(w, bb, *pb++);
181 vpx_write_bit(w, e & 1);
185 *tp = (TOKENEXTRA*)(uintptr_t)p + (p->token == EOSB_TOKEN);
188 static void write_segment_id(vpx_writer *w, const struct segmentation *seg,
190 if (seg->enabled && seg->update_map)
191 vp9_write_tree(w, vp9_segment_tree, seg->tree_probs, segment_id, 3, 0);
194 // This function encodes the reference frame
195 static void write_ref_frames(const VP9_COMMON *cm, const MACROBLOCKD *xd,
197 const MODE_INFO *const mi = xd->mi[0];
198 const int is_compound = has_second_ref(mi);
199 const int segment_id = mi->segment_id;
201 // If segment level coding of this signal is disabled...
202 // or the segment allows multiple reference frame options
203 if (segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) {
204 assert(!is_compound);
205 assert(mi->ref_frame[0] ==
206 get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME));
208 // does the feature use compound prediction or not
209 // (if not specified at the frame/segment level)
210 if (cm->reference_mode == REFERENCE_MODE_SELECT) {
211 vpx_write(w, is_compound, vp9_get_reference_mode_prob(cm, xd));
213 assert(!is_compound == (cm->reference_mode == SINGLE_REFERENCE));
217 vpx_write(w, mi->ref_frame[0] == GOLDEN_FRAME,
218 vp9_get_pred_prob_comp_ref_p(cm, xd));
220 const int bit0 = mi->ref_frame[0] != LAST_FRAME;
221 vpx_write(w, bit0, vp9_get_pred_prob_single_ref_p1(cm, xd));
223 const int bit1 = mi->ref_frame[0] != GOLDEN_FRAME;
224 vpx_write(w, bit1, vp9_get_pred_prob_single_ref_p2(cm, xd));
230 static void pack_inter_mode_mvs(VP9_COMP *cpi, const MODE_INFO *mi,
232 VP9_COMMON *const cm = &cpi->common;
233 const nmv_context *nmvc = &cm->fc->nmvc;
234 const MACROBLOCK *const x = &cpi->td.mb;
235 const MACROBLOCKD *const xd = &x->e_mbd;
236 const struct segmentation *const seg = &cm->seg;
237 const MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
238 const PREDICTION_MODE mode = mi->mode;
239 const int segment_id = mi->segment_id;
240 const BLOCK_SIZE bsize = mi->sb_type;
241 const int allow_hp = cm->allow_high_precision_mv;
242 const int is_inter = is_inter_block(mi);
243 const int is_compound = has_second_ref(mi);
246 if (seg->update_map) {
247 if (seg->temporal_update) {
248 const int pred_flag = mi->seg_id_predicted;
249 vpx_prob pred_prob = vp9_get_pred_prob_seg_id(seg, xd);
250 vpx_write(w, pred_flag, pred_prob);
252 write_segment_id(w, seg, segment_id);
254 write_segment_id(w, seg, segment_id);
258 skip = write_skip(cm, xd, segment_id, mi, w);
260 if (!segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME))
261 vpx_write(w, is_inter, vp9_get_intra_inter_prob(cm, xd));
263 if (bsize >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT &&
264 !(is_inter && skip)) {
265 write_selected_tx_size(cm, xd, w);
269 if (bsize >= BLOCK_8X8) {
270 write_intra_mode(w, mode, cm->fc->y_mode_prob[size_group_lookup[bsize]]);
273 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
274 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
275 for (idy = 0; idy < 2; idy += num_4x4_h) {
276 for (idx = 0; idx < 2; idx += num_4x4_w) {
277 const PREDICTION_MODE b_mode = mi->bmi[idy * 2 + idx].as_mode;
278 write_intra_mode(w, b_mode, cm->fc->y_mode_prob[0]);
282 write_intra_mode(w, mi->uv_mode, cm->fc->uv_mode_prob[mode]);
284 const int mode_ctx = mbmi_ext->mode_context[mi->ref_frame[0]];
285 const vpx_prob *const inter_probs = cm->fc->inter_mode_probs[mode_ctx];
286 write_ref_frames(cm, xd, w);
288 // If segment skip is not enabled code the mode.
289 if (!segfeature_active(seg, segment_id, SEG_LVL_SKIP)) {
290 if (bsize >= BLOCK_8X8) {
291 write_inter_mode(w, mode, inter_probs);
295 if (cm->interp_filter == SWITCHABLE) {
296 const int ctx = vp9_get_pred_context_switchable_interp(xd);
297 vp9_write_token(w, vp9_switchable_interp_tree,
298 cm->fc->switchable_interp_prob[ctx],
299 &switchable_interp_encodings[mi->interp_filter]);
300 ++cpi->interp_filter_selected[0][mi->interp_filter];
302 assert(mi->interp_filter == cm->interp_filter);
305 if (bsize < BLOCK_8X8) {
306 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
307 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
309 for (idy = 0; idy < 2; idy += num_4x4_h) {
310 for (idx = 0; idx < 2; idx += num_4x4_w) {
311 const int j = idy * 2 + idx;
312 const PREDICTION_MODE b_mode = mi->bmi[j].as_mode;
313 write_inter_mode(w, b_mode, inter_probs);
314 if (b_mode == NEWMV) {
315 for (ref = 0; ref < 1 + is_compound; ++ref)
316 vp9_encode_mv(cpi, w, &mi->bmi[j].as_mv[ref].as_mv,
317 &mbmi_ext->ref_mvs[mi->ref_frame[ref]][0].as_mv,
324 for (ref = 0; ref < 1 + is_compound; ++ref)
325 vp9_encode_mv(cpi, w, &mi->mv[ref].as_mv,
326 &mbmi_ext->ref_mvs[mi->ref_frame[ref]][0].as_mv, nmvc,
333 static void write_mb_modes_kf(const VP9_COMMON *cm, const MACROBLOCKD *xd,
334 MODE_INFO **mi_8x8, vpx_writer *w) {
335 const struct segmentation *const seg = &cm->seg;
336 const MODE_INFO *const mi = mi_8x8[0];
337 const MODE_INFO *const above_mi = xd->above_mi;
338 const MODE_INFO *const left_mi = xd->left_mi;
339 const BLOCK_SIZE bsize = mi->sb_type;
342 write_segment_id(w, seg, mi->segment_id);
344 write_skip(cm, xd, mi->segment_id, mi, w);
346 if (bsize >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT)
347 write_selected_tx_size(cm, xd, w);
349 if (bsize >= BLOCK_8X8) {
350 write_intra_mode(w, mi->mode, get_y_mode_probs(mi, above_mi, left_mi, 0));
352 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
353 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
356 for (idy = 0; idy < 2; idy += num_4x4_h) {
357 for (idx = 0; idx < 2; idx += num_4x4_w) {
358 const int block = idy * 2 + idx;
359 write_intra_mode(w, mi->bmi[block].as_mode,
360 get_y_mode_probs(mi, above_mi, left_mi, block));
365 write_intra_mode(w, mi->uv_mode, vp9_kf_uv_mode_prob[mi->mode]);
368 static void write_modes_b(VP9_COMP *cpi, const TileInfo *const tile,
369 vpx_writer *w, TOKENEXTRA **tok,
370 const TOKENEXTRA *const tok_end,
371 int mi_row, int mi_col) {
372 const VP9_COMMON *const cm = &cpi->common;
373 MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
376 xd->mi = cm->mi_grid_visible + (mi_row * cm->mi_stride + mi_col);
379 cpi->td.mb.mbmi_ext = cpi->td.mb.mbmi_ext_base +
380 (mi_row * cm->mi_cols + mi_col);
382 set_mi_row_col(xd, tile,
383 mi_row, num_8x8_blocks_high_lookup[m->sb_type],
384 mi_col, num_8x8_blocks_wide_lookup[m->sb_type],
385 cm->mi_rows, cm->mi_cols);
386 if (frame_is_intra_only(cm)) {
387 write_mb_modes_kf(cm, xd, xd->mi, w);
389 pack_inter_mode_mvs(cpi, m, w);
392 assert(*tok < tok_end);
393 pack_mb_tokens(w, tok, tok_end, cm->bit_depth);
396 static void write_partition(const VP9_COMMON *const cm,
397 const MACROBLOCKD *const xd,
398 int hbs, int mi_row, int mi_col,
399 PARTITION_TYPE p, BLOCK_SIZE bsize, vpx_writer *w) {
400 const int ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
401 const vpx_prob *const probs = xd->partition_probs[ctx];
402 const int has_rows = (mi_row + hbs) < cm->mi_rows;
403 const int has_cols = (mi_col + hbs) < cm->mi_cols;
405 if (has_rows && has_cols) {
406 vp9_write_token(w, vp9_partition_tree, probs, &partition_encodings[p]);
407 } else if (!has_rows && has_cols) {
408 assert(p == PARTITION_SPLIT || p == PARTITION_HORZ);
409 vpx_write(w, p == PARTITION_SPLIT, probs[1]);
410 } else if (has_rows && !has_cols) {
411 assert(p == PARTITION_SPLIT || p == PARTITION_VERT);
412 vpx_write(w, p == PARTITION_SPLIT, probs[2]);
414 assert(p == PARTITION_SPLIT);
418 static void write_modes_sb(VP9_COMP *cpi,
419 const TileInfo *const tile, vpx_writer *w,
420 TOKENEXTRA **tok, const TOKENEXTRA *const tok_end,
421 int mi_row, int mi_col, BLOCK_SIZE bsize) {
422 const VP9_COMMON *const cm = &cpi->common;
423 MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
425 const int bsl = b_width_log2_lookup[bsize];
426 const int bs = (1 << bsl) / 4;
427 PARTITION_TYPE partition;
429 const MODE_INFO *m = NULL;
431 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
434 m = cm->mi_grid_visible[mi_row * cm->mi_stride + mi_col];
436 partition = partition_lookup[bsl][m->sb_type];
437 write_partition(cm, xd, bs, mi_row, mi_col, partition, bsize, w);
438 subsize = get_subsize(bsize, partition);
439 if (subsize < BLOCK_8X8) {
440 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col);
444 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col);
447 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col);
448 if (mi_row + bs < cm->mi_rows)
449 write_modes_b(cpi, tile, w, tok, tok_end, mi_row + bs, mi_col);
452 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col);
453 if (mi_col + bs < cm->mi_cols)
454 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col + bs);
456 case PARTITION_SPLIT:
457 write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col, subsize);
458 write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col + bs,
460 write_modes_sb(cpi, tile, w, tok, tok_end, mi_row + bs, mi_col,
462 write_modes_sb(cpi, tile, w, tok, tok_end, mi_row + bs, mi_col + bs,
470 // update partition context
471 if (bsize >= BLOCK_8X8 &&
472 (bsize == BLOCK_8X8 || partition != PARTITION_SPLIT))
473 update_partition_context(xd, mi_row, mi_col, subsize, bsize);
476 static void write_modes(VP9_COMP *cpi,
477 const TileInfo *const tile, vpx_writer *w,
478 TOKENEXTRA **tok, const TOKENEXTRA *const tok_end) {
479 const VP9_COMMON *const cm = &cpi->common;
480 MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
483 set_partition_probs(cm, xd);
485 for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end;
486 mi_row += MI_BLOCK_SIZE) {
487 vp9_zero(xd->left_seg_context);
488 for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end;
489 mi_col += MI_BLOCK_SIZE)
490 write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col,
495 static void build_tree_distribution(VP9_COMP *cpi, TX_SIZE tx_size,
496 vp9_coeff_stats *coef_branch_ct,
497 vp9_coeff_probs_model *coef_probs) {
498 vp9_coeff_count *coef_counts = cpi->td.rd_counts.coef_counts[tx_size];
499 unsigned int (*eob_branch_ct)[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS] =
500 cpi->common.counts.eob_branch[tx_size];
503 for (i = 0; i < PLANE_TYPES; ++i) {
504 for (j = 0; j < REF_TYPES; ++j) {
505 for (k = 0; k < COEF_BANDS; ++k) {
506 for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
507 vp9_tree_probs_from_distribution(vp9_coef_tree,
508 coef_branch_ct[i][j][k][l],
509 coef_counts[i][j][k][l]);
510 coef_branch_ct[i][j][k][l][0][1] = eob_branch_ct[i][j][k][l] -
511 coef_branch_ct[i][j][k][l][0][0];
512 for (m = 0; m < UNCONSTRAINED_NODES; ++m)
513 coef_probs[i][j][k][l][m] = get_binary_prob(
514 coef_branch_ct[i][j][k][l][m][0],
515 coef_branch_ct[i][j][k][l][m][1]);
522 static void update_coef_probs_common(vpx_writer* const bc, VP9_COMP *cpi,
524 vp9_coeff_stats *frame_branch_ct,
525 vp9_coeff_probs_model *new_coef_probs) {
526 vp9_coeff_probs_model *old_coef_probs = cpi->common.fc->coef_probs[tx_size];
527 const vpx_prob upd = DIFF_UPDATE_PROB;
528 const int entropy_nodes_update = UNCONSTRAINED_NODES;
530 int stepsize = cpi->sf.coeff_prob_appx_step;
532 switch (cpi->sf.use_fast_coef_updates) {
534 /* dry run to see if there is any update at all needed */
536 int update[2] = {0, 0};
537 for (i = 0; i < PLANE_TYPES; ++i) {
538 for (j = 0; j < REF_TYPES; ++j) {
539 for (k = 0; k < COEF_BANDS; ++k) {
540 for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
541 for (t = 0; t < entropy_nodes_update; ++t) {
542 vpx_prob newp = new_coef_probs[i][j][k][l][t];
543 const vpx_prob oldp = old_coef_probs[i][j][k][l][t];
547 s = vp9_prob_diff_update_savings_search_model(
548 frame_branch_ct[i][j][k][l][0],
549 old_coef_probs[i][j][k][l], &newp, upd, stepsize);
551 s = vp9_prob_diff_update_savings_search(
552 frame_branch_ct[i][j][k][l][t], oldp, &newp, upd);
553 if (s > 0 && newp != oldp)
556 savings += s - (int)(vp9_cost_zero(upd));
558 savings -= (int)(vp9_cost_zero(upd));
566 // printf("Update %d %d, savings %d\n", update[0], update[1], savings);
567 /* Is coef updated at all */
568 if (update[1] == 0 || savings < 0) {
569 vpx_write_bit(bc, 0);
572 vpx_write_bit(bc, 1);
573 for (i = 0; i < PLANE_TYPES; ++i) {
574 for (j = 0; j < REF_TYPES; ++j) {
575 for (k = 0; k < COEF_BANDS; ++k) {
576 for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
577 // calc probs and branch cts for this frame only
578 for (t = 0; t < entropy_nodes_update; ++t) {
579 vpx_prob newp = new_coef_probs[i][j][k][l][t];
580 vpx_prob *oldp = old_coef_probs[i][j][k][l] + t;
581 const vpx_prob upd = DIFF_UPDATE_PROB;
585 s = vp9_prob_diff_update_savings_search_model(
586 frame_branch_ct[i][j][k][l][0],
587 old_coef_probs[i][j][k][l], &newp, upd, stepsize);
589 s = vp9_prob_diff_update_savings_search(
590 frame_branch_ct[i][j][k][l][t],
592 if (s > 0 && newp != *oldp)
594 vpx_write(bc, u, upd);
596 /* send/use new probability */
597 vp9_write_prob_diff_update(bc, newp, *oldp);
608 case ONE_LOOP_REDUCED: {
610 int noupdates_before_first = 0;
611 for (i = 0; i < PLANE_TYPES; ++i) {
612 for (j = 0; j < REF_TYPES; ++j) {
613 for (k = 0; k < COEF_BANDS; ++k) {
614 for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
615 // calc probs and branch cts for this frame only
616 for (t = 0; t < entropy_nodes_update; ++t) {
617 vpx_prob newp = new_coef_probs[i][j][k][l][t];
618 vpx_prob *oldp = old_coef_probs[i][j][k][l] + t;
622 if (t == PIVOT_NODE) {
623 s = vp9_prob_diff_update_savings_search_model(
624 frame_branch_ct[i][j][k][l][0],
625 old_coef_probs[i][j][k][l], &newp, upd, stepsize);
627 s = vp9_prob_diff_update_savings_search(
628 frame_branch_ct[i][j][k][l][t],
632 if (s > 0 && newp != *oldp)
635 if (u == 0 && updates == 0) {
636 noupdates_before_first++;
639 if (u == 1 && updates == 1) {
642 vpx_write_bit(bc, 1);
643 for (v = 0; v < noupdates_before_first; ++v)
644 vpx_write(bc, 0, upd);
646 vpx_write(bc, u, upd);
648 /* send/use new probability */
649 vp9_write_prob_diff_update(bc, newp, *oldp);
658 vpx_write_bit(bc, 0); // no updates
667 static void update_coef_probs(VP9_COMP *cpi, vpx_writer* w) {
668 const TX_MODE tx_mode = cpi->common.tx_mode;
669 const TX_SIZE max_tx_size = tx_mode_to_biggest_tx_size[tx_mode];
671 for (tx_size = TX_4X4; tx_size <= max_tx_size; ++tx_size) {
672 vp9_coeff_stats frame_branch_ct[PLANE_TYPES];
673 vp9_coeff_probs_model frame_coef_probs[PLANE_TYPES];
674 if (cpi->td.counts->tx.tx_totals[tx_size] <= 20 ||
675 (tx_size >= TX_16X16 && cpi->sf.tx_size_search_method == USE_TX_8X8)) {
678 build_tree_distribution(cpi, tx_size, frame_branch_ct,
680 update_coef_probs_common(w, cpi, tx_size, frame_branch_ct,
686 static void encode_loopfilter(struct loopfilter *lf,
687 struct vpx_write_bit_buffer *wb) {
690 // Encode the loop filter level and type
691 vpx_wb_write_literal(wb, lf->filter_level, 6);
692 vpx_wb_write_literal(wb, lf->sharpness_level, 3);
694 // Write out loop filter deltas applied at the MB level based on mode or
695 // ref frame (if they are enabled).
696 vpx_wb_write_bit(wb, lf->mode_ref_delta_enabled);
698 if (lf->mode_ref_delta_enabled) {
699 vpx_wb_write_bit(wb, lf->mode_ref_delta_update);
700 if (lf->mode_ref_delta_update) {
701 for (i = 0; i < MAX_REF_LF_DELTAS; i++) {
702 const int delta = lf->ref_deltas[i];
703 const int changed = delta != lf->last_ref_deltas[i];
704 vpx_wb_write_bit(wb, changed);
706 lf->last_ref_deltas[i] = delta;
707 vpx_wb_write_literal(wb, abs(delta) & 0x3F, 6);
708 vpx_wb_write_bit(wb, delta < 0);
712 for (i = 0; i < MAX_MODE_LF_DELTAS; i++) {
713 const int delta = lf->mode_deltas[i];
714 const int changed = delta != lf->last_mode_deltas[i];
715 vpx_wb_write_bit(wb, changed);
717 lf->last_mode_deltas[i] = delta;
718 vpx_wb_write_literal(wb, abs(delta) & 0x3F, 6);
719 vpx_wb_write_bit(wb, delta < 0);
726 static void write_delta_q(struct vpx_write_bit_buffer *wb, int delta_q) {
728 vpx_wb_write_bit(wb, 1);
729 vpx_wb_write_literal(wb, abs(delta_q), 4);
730 vpx_wb_write_bit(wb, delta_q < 0);
732 vpx_wb_write_bit(wb, 0);
736 static void encode_quantization(const VP9_COMMON *const cm,
737 struct vpx_write_bit_buffer *wb) {
738 vpx_wb_write_literal(wb, cm->base_qindex, QINDEX_BITS);
739 write_delta_q(wb, cm->y_dc_delta_q);
740 write_delta_q(wb, cm->uv_dc_delta_q);
741 write_delta_q(wb, cm->uv_ac_delta_q);
744 static void encode_segmentation(VP9_COMMON *cm, MACROBLOCKD *xd,
745 struct vpx_write_bit_buffer *wb) {
748 const struct segmentation *seg = &cm->seg;
750 vpx_wb_write_bit(wb, seg->enabled);
755 vpx_wb_write_bit(wb, seg->update_map);
756 if (seg->update_map) {
757 // Select the coding strategy (temporal or spatial)
758 vp9_choose_segmap_coding_method(cm, xd);
759 // Write out probabilities used to decode unpredicted macro-block segments
760 for (i = 0; i < SEG_TREE_PROBS; i++) {
761 const int prob = seg->tree_probs[i];
762 const int update = prob != MAX_PROB;
763 vpx_wb_write_bit(wb, update);
765 vpx_wb_write_literal(wb, prob, 8);
768 // Write out the chosen coding method.
769 vpx_wb_write_bit(wb, seg->temporal_update);
770 if (seg->temporal_update) {
771 for (i = 0; i < PREDICTION_PROBS; i++) {
772 const int prob = seg->pred_probs[i];
773 const int update = prob != MAX_PROB;
774 vpx_wb_write_bit(wb, update);
776 vpx_wb_write_literal(wb, prob, 8);
782 vpx_wb_write_bit(wb, seg->update_data);
783 if (seg->update_data) {
784 vpx_wb_write_bit(wb, seg->abs_delta);
786 for (i = 0; i < MAX_SEGMENTS; i++) {
787 for (j = 0; j < SEG_LVL_MAX; j++) {
788 const int active = segfeature_active(seg, i, j);
789 vpx_wb_write_bit(wb, active);
791 const int data = get_segdata(seg, i, j);
792 const int data_max = vp9_seg_feature_data_max(j);
794 if (vp9_is_segfeature_signed(j)) {
795 encode_unsigned_max(wb, abs(data), data_max);
796 vpx_wb_write_bit(wb, data < 0);
798 encode_unsigned_max(wb, data, data_max);
806 static void encode_txfm_probs(VP9_COMMON *cm, vpx_writer *w,
807 FRAME_COUNTS *counts) {
809 vpx_write_literal(w, VPXMIN(cm->tx_mode, ALLOW_32X32), 2);
810 if (cm->tx_mode >= ALLOW_32X32)
811 vpx_write_bit(w, cm->tx_mode == TX_MODE_SELECT);
814 if (cm->tx_mode == TX_MODE_SELECT) {
816 unsigned int ct_8x8p[TX_SIZES - 3][2];
817 unsigned int ct_16x16p[TX_SIZES - 2][2];
818 unsigned int ct_32x32p[TX_SIZES - 1][2];
821 for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
822 tx_counts_to_branch_counts_8x8(counts->tx.p8x8[i], ct_8x8p);
823 for (j = 0; j < TX_SIZES - 3; j++)
824 vp9_cond_prob_diff_update(w, &cm->fc->tx_probs.p8x8[i][j], ct_8x8p[j]);
827 for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
828 tx_counts_to_branch_counts_16x16(counts->tx.p16x16[i], ct_16x16p);
829 for (j = 0; j < TX_SIZES - 2; j++)
830 vp9_cond_prob_diff_update(w, &cm->fc->tx_probs.p16x16[i][j],
834 for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
835 tx_counts_to_branch_counts_32x32(counts->tx.p32x32[i], ct_32x32p);
836 for (j = 0; j < TX_SIZES - 1; j++)
837 vp9_cond_prob_diff_update(w, &cm->fc->tx_probs.p32x32[i][j],
843 static void write_interp_filter(INTERP_FILTER filter,
844 struct vpx_write_bit_buffer *wb) {
845 const int filter_to_literal[] = { 1, 0, 2, 3 };
847 vpx_wb_write_bit(wb, filter == SWITCHABLE);
848 if (filter != SWITCHABLE)
849 vpx_wb_write_literal(wb, filter_to_literal[filter], 2);
852 static void fix_interp_filter(VP9_COMMON *cm, FRAME_COUNTS *counts) {
853 if (cm->interp_filter == SWITCHABLE) {
854 // Check to see if only one of the filters is actually used
855 int count[SWITCHABLE_FILTERS];
857 for (i = 0; i < SWITCHABLE_FILTERS; ++i) {
859 for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j)
860 count[i] += counts->switchable_interp[j][i];
864 // Only one filter is used. So set the filter at frame level
865 for (i = 0; i < SWITCHABLE_FILTERS; ++i) {
867 cm->interp_filter = i;
875 static void write_tile_info(const VP9_COMMON *const cm,
876 struct vpx_write_bit_buffer *wb) {
877 int min_log2_tile_cols, max_log2_tile_cols, ones;
878 vp9_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
881 ones = cm->log2_tile_cols - min_log2_tile_cols;
883 vpx_wb_write_bit(wb, 1);
885 if (cm->log2_tile_cols < max_log2_tile_cols)
886 vpx_wb_write_bit(wb, 0);
889 vpx_wb_write_bit(wb, cm->log2_tile_rows != 0);
890 if (cm->log2_tile_rows != 0)
891 vpx_wb_write_bit(wb, cm->log2_tile_rows != 1);
894 static int get_refresh_mask(VP9_COMP *cpi) {
895 if (vp9_preserve_existing_gf(cpi)) {
896 // We have decided to preserve the previously existing golden frame as our
897 // new ARF frame. However, in the short term we leave it in the GF slot and,
898 // if we're updating the GF with the current decoded frame, we save it
899 // instead to the ARF slot.
900 // Later, in the function vp9_encoder.c:vp9_update_reference_frames() we
901 // will swap gld_fb_idx and alt_fb_idx to achieve our objective. We do it
902 // there so that it can be done outside of the recode loop.
903 // Note: This is highly specific to the use of ARF as a forward reference,
904 // and this needs to be generalized as other uses are implemented
905 // (like RTC/temporal scalability).
906 return (cpi->refresh_last_frame << cpi->lst_fb_idx) |
907 (cpi->refresh_golden_frame << cpi->alt_fb_idx);
909 int arf_idx = cpi->alt_fb_idx;
910 if ((cpi->oxcf.pass == 2) && cpi->multi_arf_allowed) {
911 const GF_GROUP *const gf_group = &cpi->twopass.gf_group;
912 arf_idx = gf_group->arf_update_idx[gf_group->index];
914 return (cpi->refresh_last_frame << cpi->lst_fb_idx) |
915 (cpi->refresh_golden_frame << cpi->gld_fb_idx) |
916 (cpi->refresh_alt_ref_frame << arf_idx);
920 static size_t encode_tiles(VP9_COMP *cpi, uint8_t *data_ptr) {
921 VP9_COMMON *const cm = &cpi->common;
922 vpx_writer residual_bc;
923 int tile_row, tile_col;
925 size_t total_size = 0;
926 const int tile_cols = 1 << cm->log2_tile_cols;
927 const int tile_rows = 1 << cm->log2_tile_rows;
929 memset(cm->above_seg_context, 0,
930 sizeof(*cm->above_seg_context) * mi_cols_aligned_to_sb(cm->mi_cols));
932 for (tile_row = 0; tile_row < tile_rows; tile_row++) {
933 for (tile_col = 0; tile_col < tile_cols; tile_col++) {
934 int tile_idx = tile_row * tile_cols + tile_col;
935 TOKENEXTRA *tok = cpi->tile_tok[tile_row][tile_col];
937 tok_end = cpi->tile_tok[tile_row][tile_col] +
938 cpi->tok_count[tile_row][tile_col];
940 if (tile_col < tile_cols - 1 || tile_row < tile_rows - 1)
941 vpx_start_encode(&residual_bc, data_ptr + total_size + 4);
943 vpx_start_encode(&residual_bc, data_ptr + total_size);
945 write_modes(cpi, &cpi->tile_data[tile_idx].tile_info,
946 &residual_bc, &tok, tok_end);
947 assert(tok == tok_end);
948 vpx_stop_encode(&residual_bc);
949 if (tile_col < tile_cols - 1 || tile_row < tile_rows - 1) {
951 mem_put_be32(data_ptr + total_size, residual_bc.pos);
955 total_size += residual_bc.pos;
962 static void write_render_size(const VP9_COMMON *cm,
963 struct vpx_write_bit_buffer *wb) {
964 const int scaling_active = cm->width != cm->render_width ||
965 cm->height != cm->render_height;
966 vpx_wb_write_bit(wb, scaling_active);
967 if (scaling_active) {
968 vpx_wb_write_literal(wb, cm->render_width - 1, 16);
969 vpx_wb_write_literal(wb, cm->render_height - 1, 16);
973 static void write_frame_size(const VP9_COMMON *cm,
974 struct vpx_write_bit_buffer *wb) {
975 vpx_wb_write_literal(wb, cm->width - 1, 16);
976 vpx_wb_write_literal(wb, cm->height - 1, 16);
978 write_render_size(cm, wb);
981 static void write_frame_size_with_refs(VP9_COMP *cpi,
982 struct vpx_write_bit_buffer *wb) {
983 VP9_COMMON *const cm = &cpi->common;
986 MV_REFERENCE_FRAME ref_frame;
987 for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
988 YV12_BUFFER_CONFIG *cfg = get_ref_frame_buffer(cpi, ref_frame);
990 // Set "found" to 0 for temporal svc and for spatial svc key frame
992 ((cpi->svc.number_temporal_layers > 1 &&
993 cpi->oxcf.rc_mode == VPX_CBR) ||
994 (cpi->svc.number_spatial_layers > 1 &&
995 cpi->svc.layer_context[cpi->svc.spatial_layer_id].is_key_frame) ||
996 (is_two_pass_svc(cpi) &&
997 cpi->svc.encode_empty_frame_state == ENCODING &&
998 cpi->svc.layer_context[0].frames_from_key_frame <
999 cpi->svc.number_temporal_layers + 1))) {
1001 } else if (cfg != NULL) {
1002 found = cm->width == cfg->y_crop_width &&
1003 cm->height == cfg->y_crop_height;
1005 vpx_wb_write_bit(wb, found);
1012 vpx_wb_write_literal(wb, cm->width - 1, 16);
1013 vpx_wb_write_literal(wb, cm->height - 1, 16);
1016 write_render_size(cm, wb);
1019 static void write_sync_code(struct vpx_write_bit_buffer *wb) {
1020 vpx_wb_write_literal(wb, VP9_SYNC_CODE_0, 8);
1021 vpx_wb_write_literal(wb, VP9_SYNC_CODE_1, 8);
1022 vpx_wb_write_literal(wb, VP9_SYNC_CODE_2, 8);
1025 static void write_profile(BITSTREAM_PROFILE profile,
1026 struct vpx_write_bit_buffer *wb) {
1029 vpx_wb_write_literal(wb, 0, 2);
1032 vpx_wb_write_literal(wb, 2, 2);
1035 vpx_wb_write_literal(wb, 1, 2);
1038 vpx_wb_write_literal(wb, 6, 3);
1045 static void write_bitdepth_colorspace_sampling(
1046 VP9_COMMON *const cm, struct vpx_write_bit_buffer *wb) {
1047 if (cm->profile >= PROFILE_2) {
1048 assert(cm->bit_depth > VPX_BITS_8);
1049 vpx_wb_write_bit(wb, cm->bit_depth == VPX_BITS_10 ? 0 : 1);
1051 vpx_wb_write_literal(wb, cm->color_space, 3);
1052 if (cm->color_space != VPX_CS_SRGB) {
1053 // 0: [16, 235] (i.e. xvYCC), 1: [0, 255]
1054 vpx_wb_write_bit(wb, cm->color_range);
1055 if (cm->profile == PROFILE_1 || cm->profile == PROFILE_3) {
1056 assert(cm->subsampling_x != 1 || cm->subsampling_y != 1);
1057 vpx_wb_write_bit(wb, cm->subsampling_x);
1058 vpx_wb_write_bit(wb, cm->subsampling_y);
1059 vpx_wb_write_bit(wb, 0); // unused
1061 assert(cm->subsampling_x == 1 && cm->subsampling_y == 1);
1064 assert(cm->profile == PROFILE_1 || cm->profile == PROFILE_3);
1065 vpx_wb_write_bit(wb, 0); // unused
1069 static void write_uncompressed_header(VP9_COMP *cpi,
1070 struct vpx_write_bit_buffer *wb) {
1071 VP9_COMMON *const cm = &cpi->common;
1072 MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
1074 vpx_wb_write_literal(wb, VP9_FRAME_MARKER, 2);
1076 write_profile(cm->profile, wb);
1078 vpx_wb_write_bit(wb, 0); // show_existing_frame
1079 vpx_wb_write_bit(wb, cm->frame_type);
1080 vpx_wb_write_bit(wb, cm->show_frame);
1081 vpx_wb_write_bit(wb, cm->error_resilient_mode);
1083 if (cm->frame_type == KEY_FRAME) {
1084 write_sync_code(wb);
1085 write_bitdepth_colorspace_sampling(cm, wb);
1086 write_frame_size(cm, wb);
1088 // In spatial svc if it's not error_resilient_mode then we need to code all
1089 // visible frames as invisible. But we need to keep the show_frame flag so
1090 // that the publisher could know whether it is supposed to be visible.
1091 // So we will code the show_frame flag as it is. Then code the intra_only
1092 // bit here. This will make the bitstream incompatible. In the player we
1093 // will change to show_frame flag to 0, then add an one byte frame with
1094 // show_existing_frame flag which tells the decoder which frame we want to
1096 if (!cm->show_frame)
1097 vpx_wb_write_bit(wb, cm->intra_only);
1099 if (!cm->error_resilient_mode)
1100 vpx_wb_write_literal(wb, cm->reset_frame_context, 2);
1102 if (cm->intra_only) {
1103 write_sync_code(wb);
1105 // Note for profile 0, 420 8bpp is assumed.
1106 if (cm->profile > PROFILE_0) {
1107 write_bitdepth_colorspace_sampling(cm, wb);
1110 vpx_wb_write_literal(wb, get_refresh_mask(cpi), REF_FRAMES);
1111 write_frame_size(cm, wb);
1113 MV_REFERENCE_FRAME ref_frame;
1114 vpx_wb_write_literal(wb, get_refresh_mask(cpi), REF_FRAMES);
1115 for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
1116 assert(get_ref_frame_map_idx(cpi, ref_frame) != INVALID_IDX);
1117 vpx_wb_write_literal(wb, get_ref_frame_map_idx(cpi, ref_frame),
1119 vpx_wb_write_bit(wb, cm->ref_frame_sign_bias[ref_frame]);
1122 write_frame_size_with_refs(cpi, wb);
1124 vpx_wb_write_bit(wb, cm->allow_high_precision_mv);
1126 fix_interp_filter(cm, cpi->td.counts);
1127 write_interp_filter(cm->interp_filter, wb);
1131 if (!cm->error_resilient_mode) {
1132 vpx_wb_write_bit(wb, cm->refresh_frame_context);
1133 vpx_wb_write_bit(wb, cm->frame_parallel_decoding_mode);
1136 vpx_wb_write_literal(wb, cm->frame_context_idx, FRAME_CONTEXTS_LOG2);
1138 encode_loopfilter(&cm->lf, wb);
1139 encode_quantization(cm, wb);
1140 encode_segmentation(cm, xd, wb);
1142 write_tile_info(cm, wb);
1145 static size_t write_compressed_header(VP9_COMP *cpi, uint8_t *data) {
1146 VP9_COMMON *const cm = &cpi->common;
1147 MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
1148 FRAME_CONTEXT *const fc = cm->fc;
1149 FRAME_COUNTS *counts = cpi->td.counts;
1150 vpx_writer header_bc;
1152 vpx_start_encode(&header_bc, data);
1155 cm->tx_mode = ONLY_4X4;
1157 encode_txfm_probs(cm, &header_bc, counts);
1159 update_coef_probs(cpi, &header_bc);
1160 update_skip_probs(cm, &header_bc, counts);
1162 if (!frame_is_intra_only(cm)) {
1165 for (i = 0; i < INTER_MODE_CONTEXTS; ++i)
1166 prob_diff_update(vp9_inter_mode_tree, cm->fc->inter_mode_probs[i],
1167 counts->inter_mode[i], INTER_MODES, &header_bc);
1169 if (cm->interp_filter == SWITCHABLE)
1170 update_switchable_interp_probs(cm, &header_bc, counts);
1172 for (i = 0; i < INTRA_INTER_CONTEXTS; i++)
1173 vp9_cond_prob_diff_update(&header_bc, &fc->intra_inter_prob[i],
1174 counts->intra_inter[i]);
1176 if (cpi->allow_comp_inter_inter) {
1177 const int use_compound_pred = cm->reference_mode != SINGLE_REFERENCE;
1178 const int use_hybrid_pred = cm->reference_mode == REFERENCE_MODE_SELECT;
1180 vpx_write_bit(&header_bc, use_compound_pred);
1181 if (use_compound_pred) {
1182 vpx_write_bit(&header_bc, use_hybrid_pred);
1183 if (use_hybrid_pred)
1184 for (i = 0; i < COMP_INTER_CONTEXTS; i++)
1185 vp9_cond_prob_diff_update(&header_bc, &fc->comp_inter_prob[i],
1186 counts->comp_inter[i]);
1190 if (cm->reference_mode != COMPOUND_REFERENCE) {
1191 for (i = 0; i < REF_CONTEXTS; i++) {
1192 vp9_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][0],
1193 counts->single_ref[i][0]);
1194 vp9_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][1],
1195 counts->single_ref[i][1]);
1199 if (cm->reference_mode != SINGLE_REFERENCE)
1200 for (i = 0; i < REF_CONTEXTS; i++)
1201 vp9_cond_prob_diff_update(&header_bc, &fc->comp_ref_prob[i],
1202 counts->comp_ref[i]);
1204 for (i = 0; i < BLOCK_SIZE_GROUPS; ++i)
1205 prob_diff_update(vp9_intra_mode_tree, cm->fc->y_mode_prob[i],
1206 counts->y_mode[i], INTRA_MODES, &header_bc);
1208 for (i = 0; i < PARTITION_CONTEXTS; ++i)
1209 prob_diff_update(vp9_partition_tree, fc->partition_prob[i],
1210 counts->partition[i], PARTITION_TYPES, &header_bc);
1212 vp9_write_nmv_probs(cm, cm->allow_high_precision_mv, &header_bc,
1216 vpx_stop_encode(&header_bc);
1217 assert(header_bc.pos <= 0xffff);
1219 return header_bc.pos;
1222 void vp9_pack_bitstream(VP9_COMP *cpi, uint8_t *dest, size_t *size) {
1223 uint8_t *data = dest;
1224 size_t first_part_size, uncompressed_hdr_size;
1225 struct vpx_write_bit_buffer wb = {data, 0};
1226 struct vpx_write_bit_buffer saved_wb;
1228 write_uncompressed_header(cpi, &wb);
1230 vpx_wb_write_literal(&wb, 0, 16); // don't know in advance first part. size
1232 uncompressed_hdr_size = vpx_wb_bytes_written(&wb);
1233 data += uncompressed_hdr_size;
1235 vpx_clear_system_state();
1237 first_part_size = write_compressed_header(cpi, data);
1238 data += first_part_size;
1239 // TODO(jbb): Figure out what to do if first_part_size > 16 bits.
1240 vpx_wb_write_literal(&saved_wb, (int)first_part_size, 16);
1242 data += encode_tiles(cpi, data);
1244 *size = data - dest;