2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
12 #include <stdlib.h> // qsort()
14 #include "./vp9_rtcd.h"
15 #include "./vpx_scale_rtcd.h"
17 #include "vpx_mem/vpx_mem.h"
18 #include "vpx_ports/mem.h"
19 #include "vpx_ports/mem_ops.h"
20 #include "vpx_scale/vpx_scale.h"
22 #include "vp9/common/vp9_alloccommon.h"
23 #include "vp9/common/vp9_common.h"
24 #include "vp9/common/vp9_entropy.h"
25 #include "vp9/common/vp9_entropymode.h"
26 #include "vp9/common/vp9_idct.h"
27 #include "vp9/common/vp9_thread_common.h"
28 #include "vp9/common/vp9_pred_common.h"
29 #include "vp9/common/vp9_quant_common.h"
30 #include "vp9/common/vp9_reconintra.h"
31 #include "vp9/common/vp9_reconinter.h"
32 #include "vp9/common/vp9_seg_common.h"
33 #include "vp9/common/vp9_thread.h"
34 #include "vp9/common/vp9_tile_common.h"
36 #include "vp9/decoder/vp9_decodeframe.h"
37 #include "vp9/decoder/vp9_detokenize.h"
38 #include "vp9/decoder/vp9_decodemv.h"
39 #include "vp9/decoder/vp9_decoder.h"
40 #include "vp9/decoder/vp9_dsubexp.h"
41 #include "vp9/decoder/vp9_read_bit_buffer.h"
42 #include "vp9/decoder/vp9_reader.h"
44 #define MAX_VP9_HEADER_SIZE 80
46 static int is_compound_reference_allowed(const VP9_COMMON *cm) {
48 for (i = 1; i < REFS_PER_FRAME; ++i)
49 if (cm->ref_frame_sign_bias[i + 1] != cm->ref_frame_sign_bias[1])
55 static void setup_compound_reference_mode(VP9_COMMON *cm) {
56 if (cm->ref_frame_sign_bias[LAST_FRAME] ==
57 cm->ref_frame_sign_bias[GOLDEN_FRAME]) {
58 cm->comp_fixed_ref = ALTREF_FRAME;
59 cm->comp_var_ref[0] = LAST_FRAME;
60 cm->comp_var_ref[1] = GOLDEN_FRAME;
61 } else if (cm->ref_frame_sign_bias[LAST_FRAME] ==
62 cm->ref_frame_sign_bias[ALTREF_FRAME]) {
63 cm->comp_fixed_ref = GOLDEN_FRAME;
64 cm->comp_var_ref[0] = LAST_FRAME;
65 cm->comp_var_ref[1] = ALTREF_FRAME;
67 cm->comp_fixed_ref = LAST_FRAME;
68 cm->comp_var_ref[0] = GOLDEN_FRAME;
69 cm->comp_var_ref[1] = ALTREF_FRAME;
73 static int read_is_valid(const uint8_t *start, size_t len, const uint8_t *end) {
74 return len != 0 && len <= (size_t)(end - start);
77 static int decode_unsigned_max(struct vp9_read_bit_buffer *rb, int max) {
78 const int data = vp9_rb_read_literal(rb, get_unsigned_bits(max));
79 return data > max ? max : data;
82 static TX_MODE read_tx_mode(vp9_reader *r) {
83 TX_MODE tx_mode = vp9_read_literal(r, 2);
84 if (tx_mode == ALLOW_32X32)
85 tx_mode += vp9_read_bit(r);
89 static void read_tx_mode_probs(struct tx_probs *tx_probs, vp9_reader *r) {
92 for (i = 0; i < TX_SIZE_CONTEXTS; ++i)
93 for (j = 0; j < TX_SIZES - 3; ++j)
94 vp9_diff_update_prob(r, &tx_probs->p8x8[i][j]);
96 for (i = 0; i < TX_SIZE_CONTEXTS; ++i)
97 for (j = 0; j < TX_SIZES - 2; ++j)
98 vp9_diff_update_prob(r, &tx_probs->p16x16[i][j]);
100 for (i = 0; i < TX_SIZE_CONTEXTS; ++i)
101 for (j = 0; j < TX_SIZES - 1; ++j)
102 vp9_diff_update_prob(r, &tx_probs->p32x32[i][j]);
105 static void read_switchable_interp_probs(FRAME_CONTEXT *fc, vp9_reader *r) {
107 for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j)
108 for (i = 0; i < SWITCHABLE_FILTERS - 1; ++i)
109 vp9_diff_update_prob(r, &fc->switchable_interp_prob[j][i]);
112 static void read_inter_mode_probs(FRAME_CONTEXT *fc, vp9_reader *r) {
114 for (i = 0; i < INTER_MODE_CONTEXTS; ++i)
115 for (j = 0; j < INTER_MODES - 1; ++j)
116 vp9_diff_update_prob(r, &fc->inter_mode_probs[i][j]);
119 static REFERENCE_MODE read_frame_reference_mode(const VP9_COMMON *cm,
121 if (is_compound_reference_allowed(cm)) {
122 return vp9_read_bit(r) ? (vp9_read_bit(r) ? REFERENCE_MODE_SELECT
123 : COMPOUND_REFERENCE)
126 return SINGLE_REFERENCE;
130 static void read_frame_reference_mode_probs(VP9_COMMON *cm, vp9_reader *r) {
131 FRAME_CONTEXT *const fc = cm->fc;
134 if (cm->reference_mode == REFERENCE_MODE_SELECT)
135 for (i = 0; i < COMP_INTER_CONTEXTS; ++i)
136 vp9_diff_update_prob(r, &fc->comp_inter_prob[i]);
138 if (cm->reference_mode != COMPOUND_REFERENCE)
139 for (i = 0; i < REF_CONTEXTS; ++i) {
140 vp9_diff_update_prob(r, &fc->single_ref_prob[i][0]);
141 vp9_diff_update_prob(r, &fc->single_ref_prob[i][1]);
144 if (cm->reference_mode != SINGLE_REFERENCE)
145 for (i = 0; i < REF_CONTEXTS; ++i)
146 vp9_diff_update_prob(r, &fc->comp_ref_prob[i]);
149 static void update_mv_probs(vp9_prob *p, int n, vp9_reader *r) {
151 for (i = 0; i < n; ++i)
152 if (vp9_read(r, MV_UPDATE_PROB))
153 p[i] = (vp9_read_literal(r, 7) << 1) | 1;
156 static void read_mv_probs(nmv_context *ctx, int allow_hp, vp9_reader *r) {
159 update_mv_probs(ctx->joints, MV_JOINTS - 1, r);
161 for (i = 0; i < 2; ++i) {
162 nmv_component *const comp_ctx = &ctx->comps[i];
163 update_mv_probs(&comp_ctx->sign, 1, r);
164 update_mv_probs(comp_ctx->classes, MV_CLASSES - 1, r);
165 update_mv_probs(comp_ctx->class0, CLASS0_SIZE - 1, r);
166 update_mv_probs(comp_ctx->bits, MV_OFFSET_BITS, r);
169 for (i = 0; i < 2; ++i) {
170 nmv_component *const comp_ctx = &ctx->comps[i];
171 for (j = 0; j < CLASS0_SIZE; ++j)
172 update_mv_probs(comp_ctx->class0_fp[j], MV_FP_SIZE - 1, r);
173 update_mv_probs(comp_ctx->fp, 3, r);
177 for (i = 0; i < 2; ++i) {
178 nmv_component *const comp_ctx = &ctx->comps[i];
179 update_mv_probs(&comp_ctx->class0_hp, 1, r);
180 update_mv_probs(&comp_ctx->hp, 1, r);
185 static void inverse_transform_block(MACROBLOCKD* xd, int plane, int block,
186 TX_SIZE tx_size, uint8_t *dst, int stride,
188 struct macroblockd_plane *const pd = &xd->plane[plane];
190 TX_TYPE tx_type = DCT_DCT;
191 tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
192 #if CONFIG_VP9_HIGHBITDEPTH
193 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
196 vp9_highbd_iwht4x4_add(dqcoeff, dst, stride, eob, xd->bd);
198 const PLANE_TYPE plane_type = pd->plane_type;
201 tx_type = get_tx_type_4x4(plane_type, xd, block);
202 vp9_highbd_iht4x4_add(tx_type, dqcoeff, dst, stride, eob, xd->bd);
205 tx_type = get_tx_type(plane_type, xd);
206 vp9_highbd_iht8x8_add(tx_type, dqcoeff, dst, stride, eob, xd->bd);
209 tx_type = get_tx_type(plane_type, xd);
210 vp9_highbd_iht16x16_add(tx_type, dqcoeff, dst, stride, eob, xd->bd);
214 vp9_highbd_idct32x32_add(dqcoeff, dst, stride, eob, xd->bd);
217 assert(0 && "Invalid transform size");
223 vp9_iwht4x4_add(dqcoeff, dst, stride, eob);
225 const PLANE_TYPE plane_type = pd->plane_type;
228 tx_type = get_tx_type_4x4(plane_type, xd, block);
229 vp9_iht4x4_add(tx_type, dqcoeff, dst, stride, eob);
232 tx_type = get_tx_type(plane_type, xd);
233 vp9_iht8x8_add(tx_type, dqcoeff, dst, stride, eob);
236 tx_type = get_tx_type(plane_type, xd);
237 vp9_iht16x16_add(tx_type, dqcoeff, dst, stride, eob);
241 vp9_idct32x32_add(dqcoeff, dst, stride, eob);
244 assert(0 && "Invalid transform size");
252 vp9_iwht4x4_add(dqcoeff, dst, stride, eob);
254 const PLANE_TYPE plane_type = pd->plane_type;
257 tx_type = get_tx_type_4x4(plane_type, xd, block);
258 vp9_iht4x4_add(tx_type, dqcoeff, dst, stride, eob);
261 tx_type = get_tx_type(plane_type, xd);
262 vp9_iht8x8_add(tx_type, dqcoeff, dst, stride, eob);
265 tx_type = get_tx_type(plane_type, xd);
266 vp9_iht16x16_add(tx_type, dqcoeff, dst, stride, eob);
270 vp9_idct32x32_add(dqcoeff, dst, stride, eob);
273 assert(0 && "Invalid transform size");
277 #endif // CONFIG_VP9_HIGHBITDEPTH
280 memset(dqcoeff, 0, 2 * sizeof(dqcoeff[0]));
282 if (tx_type == DCT_DCT && tx_size <= TX_16X16 && eob <= 10)
283 memset(dqcoeff, 0, 4 * (4 << tx_size) * sizeof(dqcoeff[0]));
284 else if (tx_size == TX_32X32 && eob <= 34)
285 memset(dqcoeff, 0, 256 * sizeof(dqcoeff[0]));
287 memset(dqcoeff, 0, (16 << (tx_size << 1)) * sizeof(dqcoeff[0]));
295 FRAME_COUNTS *counts;
300 static void predict_and_reconstruct_intra_block(int plane, int block,
301 BLOCK_SIZE plane_bsize,
302 TX_SIZE tx_size, void *arg) {
303 struct intra_args *const args = (struct intra_args *)arg;
304 VP9_COMMON *const cm = args->cm;
305 MACROBLOCKD *const xd = args->xd;
306 struct macroblockd_plane *const pd = &xd->plane[plane];
307 MODE_INFO *const mi = xd->mi[0];
308 const PREDICTION_MODE mode = (plane == 0) ? get_y_mode(mi, block)
312 txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &x, &y);
313 dst = &pd->dst.buf[4 * y * pd->dst.stride + 4 * x];
315 vp9_predict_intra_block(xd, block >> (tx_size << 1),
316 b_width_log2_lookup[plane_bsize], tx_size, mode,
317 dst, pd->dst.stride, dst, pd->dst.stride,
320 if (!mi->mbmi.skip) {
321 const int eob = vp9_decode_block_tokens(cm, xd, args->counts, plane, block,
322 plane_bsize, x, y, tx_size,
323 args->r, args->seg_id);
324 inverse_transform_block(xd, plane, block, tx_size, dst, pd->dst.stride,
333 FRAME_COUNTS *counts;
338 static void reconstruct_inter_block(int plane, int block,
339 BLOCK_SIZE plane_bsize,
340 TX_SIZE tx_size, void *arg) {
341 struct inter_args *args = (struct inter_args *)arg;
342 VP9_COMMON *const cm = args->cm;
343 MACROBLOCKD *const xd = args->xd;
344 struct macroblockd_plane *const pd = &xd->plane[plane];
346 txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &x, &y);
347 eob = vp9_decode_block_tokens(cm, xd, args->counts, plane, block, plane_bsize,
348 x, y, tx_size, args->r, args->seg_id);
349 inverse_transform_block(xd, plane, block, tx_size,
350 &pd->dst.buf[4 * y * pd->dst.stride + 4 * x],
351 pd->dst.stride, eob);
352 *args->eobtotal += eob;
355 static MB_MODE_INFO *set_offsets(VP9_COMMON *const cm, MACROBLOCKD *const xd,
356 const TileInfo *const tile,
357 BLOCK_SIZE bsize, int mi_row, int mi_col) {
358 const int bw = num_8x8_blocks_wide_lookup[bsize];
359 const int bh = num_8x8_blocks_high_lookup[bsize];
360 const int x_mis = MIN(bw, cm->mi_cols - mi_col);
361 const int y_mis = MIN(bh, cm->mi_rows - mi_row);
362 const int offset = mi_row * cm->mi_stride + mi_col;
365 xd->mi = cm->mi_grid_visible + offset;
366 xd->mi[0] = &cm->mi[offset];
367 xd->mi[0]->mbmi.sb_type = bsize;
368 for (y = 0; y < y_mis; ++y)
369 for (x = !y; x < x_mis; ++x) {
370 xd->mi[y * cm->mi_stride + x] = xd->mi[0];
373 set_skip_context(xd, mi_row, mi_col);
375 // Distance of Mb to the various image edges. These are specified to 8th pel
376 // as they are always compared to values that are in 1/8th pel units
377 set_mi_row_col(xd, tile, mi_row, bh, mi_col, bw, cm->mi_rows, cm->mi_cols);
379 vp9_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
380 return &xd->mi[0]->mbmi;
383 static void decode_block(VP9Decoder *const pbi, MACROBLOCKD *const xd,
384 FRAME_COUNTS *counts,
385 const TileInfo *const tile,
386 int mi_row, int mi_col,
387 vp9_reader *r, BLOCK_SIZE bsize) {
388 VP9_COMMON *const cm = &pbi->common;
389 const int less8x8 = bsize < BLOCK_8X8;
390 MB_MODE_INFO *mbmi = set_offsets(cm, xd, tile, bsize, mi_row, mi_col);
391 vp9_read_mode_info(pbi, xd, counts, tile, mi_row, mi_col, r);
397 reset_skip_context(xd, bsize);
400 if (!is_inter_block(mbmi)) {
401 struct intra_args arg = {cm, xd, counts, r, mbmi->segment_id};
402 vp9_foreach_transformed_block(xd, bsize,
403 predict_and_reconstruct_intra_block, &arg);
406 vp9_dec_build_inter_predictors_sb(pbi, xd, mi_row, mi_col, bsize);
411 struct inter_args arg = {cm, xd, r, counts, &eobtotal, mbmi->segment_id};
412 vp9_foreach_transformed_block(xd, bsize, reconstruct_inter_block, &arg);
413 if (!less8x8 && eobtotal == 0)
414 mbmi->skip = 1; // skip loopfilter
418 xd->corrupted |= vp9_reader_has_error(r);
421 static PARTITION_TYPE read_partition(VP9_COMMON *cm, MACROBLOCKD *xd,
422 FRAME_COUNTS *counts, int hbs,
423 int mi_row, int mi_col, BLOCK_SIZE bsize,
425 const int ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
426 const vp9_prob *const probs = get_partition_probs(cm, ctx);
427 const int has_rows = (mi_row + hbs) < cm->mi_rows;
428 const int has_cols = (mi_col + hbs) < cm->mi_cols;
431 if (has_rows && has_cols)
432 p = (PARTITION_TYPE)vp9_read_tree(r, vp9_partition_tree, probs);
433 else if (!has_rows && has_cols)
434 p = vp9_read(r, probs[1]) ? PARTITION_SPLIT : PARTITION_HORZ;
435 else if (has_rows && !has_cols)
436 p = vp9_read(r, probs[2]) ? PARTITION_SPLIT : PARTITION_VERT;
440 if (!cm->frame_parallel_decoding_mode)
441 ++counts->partition[ctx][p];
446 static void decode_partition(VP9Decoder *const pbi, MACROBLOCKD *const xd,
447 FRAME_COUNTS *counts,
448 const TileInfo *const tile,
449 int mi_row, int mi_col,
450 vp9_reader* r, BLOCK_SIZE bsize) {
451 VP9_COMMON *const cm = &pbi->common;
452 const int hbs = num_8x8_blocks_wide_lookup[bsize] / 2;
453 PARTITION_TYPE partition;
454 BLOCK_SIZE subsize, uv_subsize;
456 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
459 partition = read_partition(cm, xd, counts, hbs, mi_row, mi_col, bsize, r);
460 subsize = get_subsize(bsize, partition);
461 uv_subsize = ss_size_lookup[subsize][cm->subsampling_x][cm->subsampling_y];
462 if (subsize >= BLOCK_8X8 && uv_subsize == BLOCK_INVALID)
463 vpx_internal_error(xd->error_info,
464 VPX_CODEC_CORRUPT_FRAME, "Invalid block size.");
465 if (subsize < BLOCK_8X8) {
466 decode_block(pbi, xd, counts, tile, mi_row, mi_col, r, subsize);
470 decode_block(pbi, xd, counts, tile, mi_row, mi_col, r, subsize);
473 decode_block(pbi, xd, counts, tile, mi_row, mi_col, r, subsize);
474 if (mi_row + hbs < cm->mi_rows)
475 decode_block(pbi, xd, counts, tile, mi_row + hbs, mi_col, r, subsize);
478 decode_block(pbi, xd, counts, tile, mi_row, mi_col, r, subsize);
479 if (mi_col + hbs < cm->mi_cols)
480 decode_block(pbi, xd, counts, tile, mi_row, mi_col + hbs, r, subsize);
482 case PARTITION_SPLIT:
483 decode_partition(pbi, xd, counts, tile, mi_row, mi_col, r, subsize);
484 decode_partition(pbi, xd, counts, tile, mi_row, mi_col + hbs, r,
486 decode_partition(pbi, xd, counts, tile, mi_row + hbs, mi_col, r,
488 decode_partition(pbi, xd, counts, tile, mi_row + hbs, mi_col + hbs, r,
492 assert(0 && "Invalid partition type");
496 // update partition context
497 if (bsize >= BLOCK_8X8 &&
498 (bsize == BLOCK_8X8 || partition != PARTITION_SPLIT))
499 update_partition_context(xd, mi_row, mi_col, subsize, bsize);
502 static void setup_token_decoder(const uint8_t *data,
503 const uint8_t *data_end,
505 struct vpx_internal_error_info *error_info,
507 vpx_decrypt_cb decrypt_cb,
508 void *decrypt_state) {
509 // Validate the calculated partition length. If the buffer
510 // described by the partition can't be fully read, then restrict
511 // it to the portion that can be (for EC mode) or throw an error.
512 if (!read_is_valid(data, read_size, data_end))
513 vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME,
514 "Truncated packet or corrupt tile length");
516 if (vp9_reader_init(r, data, read_size, decrypt_cb, decrypt_state))
517 vpx_internal_error(error_info, VPX_CODEC_MEM_ERROR,
518 "Failed to allocate bool decoder %d", 1);
521 static void read_coef_probs_common(vp9_coeff_probs_model *coef_probs,
526 for (i = 0; i < PLANE_TYPES; ++i)
527 for (j = 0; j < REF_TYPES; ++j)
528 for (k = 0; k < COEF_BANDS; ++k)
529 for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l)
530 for (m = 0; m < UNCONSTRAINED_NODES; ++m)
531 vp9_diff_update_prob(r, &coef_probs[i][j][k][l][m]);
534 static void read_coef_probs(FRAME_CONTEXT *fc, TX_MODE tx_mode,
536 const TX_SIZE max_tx_size = tx_mode_to_biggest_tx_size[tx_mode];
538 for (tx_size = TX_4X4; tx_size <= max_tx_size; ++tx_size)
539 read_coef_probs_common(fc->coef_probs[tx_size], r);
542 static void setup_segmentation(struct segmentation *seg,
543 struct vp9_read_bit_buffer *rb) {
547 seg->update_data = 0;
549 seg->enabled = vp9_rb_read_bit(rb);
553 // Segmentation map update
554 seg->update_map = vp9_rb_read_bit(rb);
555 if (seg->update_map) {
556 for (i = 0; i < SEG_TREE_PROBS; i++)
557 seg->tree_probs[i] = vp9_rb_read_bit(rb) ? vp9_rb_read_literal(rb, 8)
560 seg->temporal_update = vp9_rb_read_bit(rb);
561 if (seg->temporal_update) {
562 for (i = 0; i < PREDICTION_PROBS; i++)
563 seg->pred_probs[i] = vp9_rb_read_bit(rb) ? vp9_rb_read_literal(rb, 8)
566 for (i = 0; i < PREDICTION_PROBS; i++)
567 seg->pred_probs[i] = MAX_PROB;
571 // Segmentation data update
572 seg->update_data = vp9_rb_read_bit(rb);
573 if (seg->update_data) {
574 seg->abs_delta = vp9_rb_read_bit(rb);
576 vp9_clearall_segfeatures(seg);
578 for (i = 0; i < MAX_SEGMENTS; i++) {
579 for (j = 0; j < SEG_LVL_MAX; j++) {
581 const int feature_enabled = vp9_rb_read_bit(rb);
582 if (feature_enabled) {
583 vp9_enable_segfeature(seg, i, j);
584 data = decode_unsigned_max(rb, vp9_seg_feature_data_max(j));
585 if (vp9_is_segfeature_signed(j))
586 data = vp9_rb_read_bit(rb) ? -data : data;
588 vp9_set_segdata(seg, i, j, data);
594 static void setup_loopfilter(struct loopfilter *lf,
595 struct vp9_read_bit_buffer *rb) {
596 lf->filter_level = vp9_rb_read_literal(rb, 6);
597 lf->sharpness_level = vp9_rb_read_literal(rb, 3);
599 // Read in loop filter deltas applied at the MB level based on mode or ref
601 lf->mode_ref_delta_update = 0;
603 lf->mode_ref_delta_enabled = vp9_rb_read_bit(rb);
604 if (lf->mode_ref_delta_enabled) {
605 lf->mode_ref_delta_update = vp9_rb_read_bit(rb);
606 if (lf->mode_ref_delta_update) {
609 for (i = 0; i < MAX_REF_LF_DELTAS; i++)
610 if (vp9_rb_read_bit(rb))
611 lf->ref_deltas[i] = vp9_rb_read_signed_literal(rb, 6);
613 for (i = 0; i < MAX_MODE_LF_DELTAS; i++)
614 if (vp9_rb_read_bit(rb))
615 lf->mode_deltas[i] = vp9_rb_read_signed_literal(rb, 6);
620 static INLINE int read_delta_q(struct vp9_read_bit_buffer *rb) {
621 return vp9_rb_read_bit(rb) ? vp9_rb_read_signed_literal(rb, 4) : 0;
624 static void setup_quantization(VP9_COMMON *const cm, MACROBLOCKD *const xd,
625 struct vp9_read_bit_buffer *rb) {
626 cm->base_qindex = vp9_rb_read_literal(rb, QINDEX_BITS);
627 cm->y_dc_delta_q = read_delta_q(rb);
628 cm->uv_dc_delta_q = read_delta_q(rb);
629 cm->uv_ac_delta_q = read_delta_q(rb);
630 cm->dequant_bit_depth = cm->bit_depth;
631 xd->lossless = cm->base_qindex == 0 &&
632 cm->y_dc_delta_q == 0 &&
633 cm->uv_dc_delta_q == 0 &&
634 cm->uv_ac_delta_q == 0;
636 #if CONFIG_VP9_HIGHBITDEPTH
637 xd->bd = (int)cm->bit_depth;
641 static void setup_segmentation_dequant(VP9_COMMON *const cm) {
642 // Build y/uv dequant values based on segmentation.
643 if (cm->seg.enabled) {
645 for (i = 0; i < MAX_SEGMENTS; ++i) {
646 const int qindex = vp9_get_qindex(&cm->seg, i, cm->base_qindex);
647 cm->y_dequant[i][0] = vp9_dc_quant(qindex, cm->y_dc_delta_q,
649 cm->y_dequant[i][1] = vp9_ac_quant(qindex, 0, cm->bit_depth);
650 cm->uv_dequant[i][0] = vp9_dc_quant(qindex, cm->uv_dc_delta_q,
652 cm->uv_dequant[i][1] = vp9_ac_quant(qindex, cm->uv_ac_delta_q,
656 const int qindex = cm->base_qindex;
657 // When segmentation is disabled, only the first value is used. The
658 // remaining are don't cares.
659 cm->y_dequant[0][0] = vp9_dc_quant(qindex, cm->y_dc_delta_q, cm->bit_depth);
660 cm->y_dequant[0][1] = vp9_ac_quant(qindex, 0, cm->bit_depth);
661 cm->uv_dequant[0][0] = vp9_dc_quant(qindex, cm->uv_dc_delta_q,
663 cm->uv_dequant[0][1] = vp9_ac_quant(qindex, cm->uv_ac_delta_q,
668 static INTERP_FILTER read_interp_filter(struct vp9_read_bit_buffer *rb) {
669 const INTERP_FILTER literal_to_filter[] = { EIGHTTAP_SMOOTH,
673 return vp9_rb_read_bit(rb) ? SWITCHABLE
674 : literal_to_filter[vp9_rb_read_literal(rb, 2)];
677 void vp9_read_frame_size(struct vp9_read_bit_buffer *rb,
678 int *width, int *height) {
679 *width = vp9_rb_read_literal(rb, 16) + 1;
680 *height = vp9_rb_read_literal(rb, 16) + 1;
683 static void setup_display_size(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) {
684 cm->display_width = cm->width;
685 cm->display_height = cm->height;
686 if (vp9_rb_read_bit(rb))
687 vp9_read_frame_size(rb, &cm->display_width, &cm->display_height);
690 static void resize_mv_buffer(VP9_COMMON *cm) {
691 vpx_free(cm->cur_frame->mvs);
692 cm->cur_frame->mi_rows = cm->mi_rows;
693 cm->cur_frame->mi_cols = cm->mi_cols;
694 cm->cur_frame->mvs = (MV_REF *)vpx_calloc(cm->mi_rows * cm->mi_cols,
695 sizeof(*cm->cur_frame->mvs));
698 static void resize_context_buffers(VP9_COMMON *cm, int width, int height) {
699 #if CONFIG_SIZE_LIMIT
700 if (width > DECODE_WIDTH_LIMIT || height > DECODE_HEIGHT_LIMIT)
701 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
702 "Width and height beyond allowed size.");
704 if (cm->width != width || cm->height != height) {
705 const int new_mi_rows =
706 ALIGN_POWER_OF_TWO(height, MI_SIZE_LOG2) >> MI_SIZE_LOG2;
707 const int new_mi_cols =
708 ALIGN_POWER_OF_TWO(width, MI_SIZE_LOG2) >> MI_SIZE_LOG2;
710 // Allocations in vp9_alloc_context_buffers() depend on individual
711 // dimensions as well as the overall size.
712 if (new_mi_cols > cm->mi_cols || new_mi_rows > cm->mi_rows) {
713 if (vp9_alloc_context_buffers(cm, width, height))
714 vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
715 "Failed to allocate context buffers");
717 vp9_set_mb_mi(cm, width, height);
719 vp9_init_context_buffers(cm);
723 if (cm->cur_frame->mvs == NULL || cm->mi_rows > cm->cur_frame->mi_rows ||
724 cm->mi_cols > cm->cur_frame->mi_cols) {
725 resize_mv_buffer(cm);
729 static void setup_frame_size(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) {
731 BufferPool *const pool = cm->buffer_pool;
732 vp9_read_frame_size(rb, &width, &height);
733 resize_context_buffers(cm, width, height);
734 setup_display_size(cm, rb);
736 lock_buffer_pool(pool);
737 if (vp9_realloc_frame_buffer(
738 get_frame_new_buffer(cm), cm->width, cm->height,
739 cm->subsampling_x, cm->subsampling_y,
740 #if CONFIG_VP9_HIGHBITDEPTH
741 cm->use_highbitdepth,
743 VP9_DEC_BORDER_IN_PIXELS,
745 &pool->frame_bufs[cm->new_fb_idx].raw_frame_buffer, pool->get_fb_cb,
747 unlock_buffer_pool(pool);
748 vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
749 "Failed to allocate frame buffer");
751 unlock_buffer_pool(pool);
753 pool->frame_bufs[cm->new_fb_idx].buf.subsampling_x = cm->subsampling_x;
754 pool->frame_bufs[cm->new_fb_idx].buf.subsampling_y = cm->subsampling_y;
755 pool->frame_bufs[cm->new_fb_idx].buf.bit_depth = (unsigned int)cm->bit_depth;
756 pool->frame_bufs[cm->new_fb_idx].buf.color_space = cm->color_space;
759 static INLINE int valid_ref_frame_img_fmt(vpx_bit_depth_t ref_bit_depth,
760 int ref_xss, int ref_yss,
761 vpx_bit_depth_t this_bit_depth,
762 int this_xss, int this_yss) {
763 return ref_bit_depth == this_bit_depth && ref_xss == this_xss &&
767 static void setup_frame_size_with_refs(VP9_COMMON *cm,
768 struct vp9_read_bit_buffer *rb) {
771 int has_valid_ref_frame = 0;
772 BufferPool *const pool = cm->buffer_pool;
773 for (i = 0; i < REFS_PER_FRAME; ++i) {
774 if (vp9_rb_read_bit(rb)) {
775 YV12_BUFFER_CONFIG *const buf = cm->frame_refs[i].buf;
776 width = buf->y_crop_width;
777 height = buf->y_crop_height;
784 vp9_read_frame_size(rb, &width, &height);
786 if (width <= 0 || height <= 0)
787 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
788 "Invalid frame size");
790 // Check to make sure at least one of frames that this frame references
791 // has valid dimensions.
792 for (i = 0; i < REFS_PER_FRAME; ++i) {
793 RefBuffer *const ref_frame = &cm->frame_refs[i];
794 has_valid_ref_frame |= valid_ref_frame_size(ref_frame->buf->y_crop_width,
795 ref_frame->buf->y_crop_height,
798 if (!has_valid_ref_frame)
799 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
800 "Referenced frame has invalid size");
801 for (i = 0; i < REFS_PER_FRAME; ++i) {
802 RefBuffer *const ref_frame = &cm->frame_refs[i];
803 if (!valid_ref_frame_img_fmt(
804 ref_frame->buf->bit_depth,
805 ref_frame->buf->subsampling_x,
806 ref_frame->buf->subsampling_y,
810 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
811 "Referenced frame has incompatible color format");
814 resize_context_buffers(cm, width, height);
815 setup_display_size(cm, rb);
817 lock_buffer_pool(pool);
818 if (vp9_realloc_frame_buffer(
819 get_frame_new_buffer(cm), cm->width, cm->height,
820 cm->subsampling_x, cm->subsampling_y,
821 #if CONFIG_VP9_HIGHBITDEPTH
822 cm->use_highbitdepth,
824 VP9_DEC_BORDER_IN_PIXELS,
826 &pool->frame_bufs[cm->new_fb_idx].raw_frame_buffer, pool->get_fb_cb,
828 unlock_buffer_pool(pool);
829 vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
830 "Failed to allocate frame buffer");
832 unlock_buffer_pool(pool);
834 pool->frame_bufs[cm->new_fb_idx].buf.subsampling_x = cm->subsampling_x;
835 pool->frame_bufs[cm->new_fb_idx].buf.subsampling_y = cm->subsampling_y;
836 pool->frame_bufs[cm->new_fb_idx].buf.bit_depth = (unsigned int)cm->bit_depth;
837 pool->frame_bufs[cm->new_fb_idx].buf.color_space = cm->color_space;
840 static void setup_tile_info(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) {
841 int min_log2_tile_cols, max_log2_tile_cols, max_ones;
842 vp9_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
845 max_ones = max_log2_tile_cols - min_log2_tile_cols;
846 cm->log2_tile_cols = min_log2_tile_cols;
847 while (max_ones-- && vp9_rb_read_bit(rb))
848 cm->log2_tile_cols++;
850 if (cm->log2_tile_cols > 6)
851 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
852 "Invalid number of tile columns");
855 cm->log2_tile_rows = vp9_rb_read_bit(rb);
856 if (cm->log2_tile_rows)
857 cm->log2_tile_rows += vp9_rb_read_bit(rb);
860 typedef struct TileBuffer {
863 int col; // only used with multi-threaded decoding
866 // Reads the next tile returning its size and adjusting '*data' accordingly
867 // based on 'is_last'.
868 static void get_tile_buffer(const uint8_t *const data_end,
870 struct vpx_internal_error_info *error_info,
871 const uint8_t **data,
872 vpx_decrypt_cb decrypt_cb, void *decrypt_state,
877 if (!read_is_valid(*data, 4, data_end))
878 vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME,
879 "Truncated packet or corrupt tile length");
883 decrypt_cb(decrypt_state, *data, be_data, 4);
884 size = mem_get_be32(be_data);
886 size = mem_get_be32(*data);
890 if (size > (size_t)(data_end - *data))
891 vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME,
892 "Truncated packet or corrupt tile size");
894 size = data_end - *data;
903 static void get_tile_buffers(VP9Decoder *pbi,
904 const uint8_t *data, const uint8_t *data_end,
905 int tile_cols, int tile_rows,
906 TileBuffer (*tile_buffers)[1 << 6]) {
909 for (r = 0; r < tile_rows; ++r) {
910 for (c = 0; c < tile_cols; ++c) {
911 const int is_last = (r == tile_rows - 1) && (c == tile_cols - 1);
912 TileBuffer *const buf = &tile_buffers[r][c];
914 get_tile_buffer(data_end, is_last, &pbi->common.error, &data,
915 pbi->decrypt_cb, pbi->decrypt_state, buf);
920 static const uint8_t *decode_tiles(VP9Decoder *pbi,
922 const uint8_t *data_end) {
923 VP9_COMMON *const cm = &pbi->common;
924 const VP9WorkerInterface *const winterface = vp9_get_worker_interface();
925 const int aligned_cols = mi_cols_aligned_to_sb(cm->mi_cols);
926 const int tile_cols = 1 << cm->log2_tile_cols;
927 const int tile_rows = 1 << cm->log2_tile_rows;
928 TileBuffer tile_buffers[4][1 << 6];
929 int tile_row, tile_col;
931 TileData *tile_data = NULL;
933 if (cm->lf.filter_level && pbi->lf_worker.data1 == NULL) {
934 CHECK_MEM_ERROR(cm, pbi->lf_worker.data1,
935 vpx_memalign(32, sizeof(LFWorkerData)));
936 pbi->lf_worker.hook = (VP9WorkerHook)vp9_loop_filter_worker;
937 if (pbi->max_threads > 1 && !winterface->reset(&pbi->lf_worker)) {
938 vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
939 "Loop filter thread creation failed");
943 if (cm->lf.filter_level) {
944 LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1;
945 // Be sure to sync as we might be resuming after a failed frame decode.
946 winterface->sync(&pbi->lf_worker);
947 vp9_loop_filter_data_reset(lf_data, get_frame_new_buffer(cm), cm,
951 assert(tile_rows <= 4);
952 assert(tile_cols <= (1 << 6));
954 // Note: this memset assumes above_context[0], [1] and [2]
955 // are allocated as part of the same buffer.
956 memset(cm->above_context, 0,
957 sizeof(*cm->above_context) * MAX_MB_PLANE * 2 * aligned_cols);
959 memset(cm->above_seg_context, 0,
960 sizeof(*cm->above_seg_context) * aligned_cols);
962 get_tile_buffers(pbi, data, data_end, tile_cols, tile_rows, tile_buffers);
964 if (pbi->tile_data == NULL ||
965 (tile_cols * tile_rows) != pbi->total_tiles) {
966 vpx_free(pbi->tile_data);
970 vpx_memalign(32, tile_cols * tile_rows * (sizeof(*pbi->tile_data))));
971 pbi->total_tiles = tile_rows * tile_cols;
974 // Load all tile information into tile_data.
975 for (tile_row = 0; tile_row < tile_rows; ++tile_row) {
976 for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
978 const TileBuffer *const buf = &tile_buffers[tile_row][tile_col];
979 tile_data = pbi->tile_data + tile_cols * tile_row + tile_col;
981 tile_data->xd = pbi->mb;
982 tile_data->xd.corrupted = 0;
983 vp9_tile_init(&tile, tile_data->cm, tile_row, tile_col);
984 setup_token_decoder(buf->data, data_end, buf->size, &cm->error,
985 &tile_data->bit_reader, pbi->decrypt_cb,
987 init_macroblockd(cm, &tile_data->xd);
991 for (tile_row = 0; tile_row < tile_rows; ++tile_row) {
993 vp9_tile_set_row(&tile, cm, tile_row);
994 for (mi_row = tile.mi_row_start; mi_row < tile.mi_row_end;
995 mi_row += MI_BLOCK_SIZE) {
996 for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
997 const int col = pbi->inv_tile_order ?
998 tile_cols - tile_col - 1 : tile_col;
999 tile_data = pbi->tile_data + tile_cols * tile_row + col;
1000 vp9_tile_set_col(&tile, tile_data->cm, col);
1001 vp9_zero(tile_data->xd.left_context);
1002 vp9_zero(tile_data->xd.left_seg_context);
1003 for (mi_col = tile.mi_col_start; mi_col < tile.mi_col_end;
1004 mi_col += MI_BLOCK_SIZE) {
1005 decode_partition(pbi, &tile_data->xd, &cm->counts, &tile, mi_row,
1006 mi_col, &tile_data->bit_reader, BLOCK_64X64);
1008 pbi->mb.corrupted |= tile_data->xd.corrupted;
1009 if (pbi->mb.corrupted)
1010 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
1011 "Failed to decode tile data");
1013 // Loopfilter one row.
1014 if (cm->lf.filter_level) {
1015 const int lf_start = mi_row - MI_BLOCK_SIZE;
1016 LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1;
1018 // delay the loopfilter by 1 macroblock row.
1019 if (lf_start < 0) continue;
1021 // decoding has completed: finish up the loop filter in this thread.
1022 if (mi_row + MI_BLOCK_SIZE >= cm->mi_rows) continue;
1024 winterface->sync(&pbi->lf_worker);
1025 lf_data->start = lf_start;
1026 lf_data->stop = mi_row;
1027 if (pbi->max_threads > 1) {
1028 winterface->launch(&pbi->lf_worker);
1030 winterface->execute(&pbi->lf_worker);
1033 // After loopfiltering, the last 7 row pixels in each superblock row may
1034 // still be changed by the longest loopfilter of the next superblock
1036 if (pbi->frame_parallel_decode)
1037 vp9_frameworker_broadcast(pbi->cur_buf,
1038 mi_row << MI_BLOCK_SIZE_LOG2);
1042 // Loopfilter remaining rows in the frame.
1043 if (cm->lf.filter_level) {
1044 LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1;
1045 winterface->sync(&pbi->lf_worker);
1046 lf_data->start = lf_data->stop;
1047 lf_data->stop = cm->mi_rows;
1048 winterface->execute(&pbi->lf_worker);
1051 // Get last tile data.
1052 tile_data = pbi->tile_data + tile_cols * tile_rows - 1;
1054 if (pbi->frame_parallel_decode)
1055 vp9_frameworker_broadcast(pbi->cur_buf, INT_MAX);
1056 return vp9_reader_find_end(&tile_data->bit_reader);
1059 static int tile_worker_hook(TileWorkerData *const tile_data,
1060 const TileInfo *const tile) {
1063 if (setjmp(tile_data->error_info.jmp)) {
1064 tile_data->error_info.setjmp = 0;
1065 tile_data->xd.corrupted = 1;
1069 tile_data->error_info.setjmp = 1;
1070 tile_data->xd.error_info = &tile_data->error_info;
1072 for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end;
1073 mi_row += MI_BLOCK_SIZE) {
1074 vp9_zero(tile_data->xd.left_context);
1075 vp9_zero(tile_data->xd.left_seg_context);
1076 for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end;
1077 mi_col += MI_BLOCK_SIZE) {
1078 decode_partition(tile_data->pbi, &tile_data->xd, &tile_data->counts,
1079 tile, mi_row, mi_col, &tile_data->bit_reader,
1083 return !tile_data->xd.corrupted;
1086 // sorts in descending order
1087 static int compare_tile_buffers(const void *a, const void *b) {
1088 const TileBuffer *const buf1 = (const TileBuffer*)a;
1089 const TileBuffer *const buf2 = (const TileBuffer*)b;
1090 return (int)(buf2->size - buf1->size);
1093 static const uint8_t *decode_tiles_mt(VP9Decoder *pbi,
1094 const uint8_t *data,
1095 const uint8_t *data_end) {
1096 VP9_COMMON *const cm = &pbi->common;
1097 const VP9WorkerInterface *const winterface = vp9_get_worker_interface();
1098 const uint8_t *bit_reader_end = NULL;
1099 const int aligned_mi_cols = mi_cols_aligned_to_sb(cm->mi_cols);
1100 const int tile_cols = 1 << cm->log2_tile_cols;
1101 const int tile_rows = 1 << cm->log2_tile_rows;
1102 const int num_workers = MIN(pbi->max_threads & ~1, tile_cols);
1103 TileBuffer tile_buffers[1][1 << 6];
1105 int final_worker = -1;
1107 assert(tile_cols <= (1 << 6));
1108 assert(tile_rows == 1);
1111 // TODO(jzern): See if we can remove the restriction of passing in max
1112 // threads to the decoder.
1113 if (pbi->num_tile_workers == 0) {
1114 const int num_threads = pbi->max_threads & ~1;
1116 // TODO(jzern): Allocate one less worker, as in the current code we only
1117 // use num_threads - 1 workers.
1118 CHECK_MEM_ERROR(cm, pbi->tile_workers,
1119 vpx_malloc(num_threads * sizeof(*pbi->tile_workers)));
1120 // Ensure tile data offsets will be properly aligned. This may fail on
1121 // platforms without DECLARE_ALIGNED().
1122 assert((sizeof(*pbi->tile_worker_data) % 16) == 0);
1123 CHECK_MEM_ERROR(cm, pbi->tile_worker_data,
1124 vpx_memalign(32, num_threads *
1125 sizeof(*pbi->tile_worker_data)));
1126 CHECK_MEM_ERROR(cm, pbi->tile_worker_info,
1127 vpx_malloc(num_threads * sizeof(*pbi->tile_worker_info)));
1128 for (i = 0; i < num_threads; ++i) {
1129 VP9Worker *const worker = &pbi->tile_workers[i];
1130 ++pbi->num_tile_workers;
1132 winterface->init(worker);
1133 if (i < num_threads - 1 && !winterface->reset(worker)) {
1134 vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
1135 "Tile decoder thread creation failed");
1140 // Reset tile decoding hook
1141 for (n = 0; n < num_workers; ++n) {
1142 VP9Worker *const worker = &pbi->tile_workers[n];
1143 winterface->sync(worker);
1144 worker->hook = (VP9WorkerHook)tile_worker_hook;
1145 worker->data1 = &pbi->tile_worker_data[n];
1146 worker->data2 = &pbi->tile_worker_info[n];
1149 // Note: this memset assumes above_context[0], [1] and [2]
1150 // are allocated as part of the same buffer.
1151 memset(cm->above_context, 0,
1152 sizeof(*cm->above_context) * MAX_MB_PLANE * 2 * aligned_mi_cols);
1153 memset(cm->above_seg_context, 0,
1154 sizeof(*cm->above_seg_context) * aligned_mi_cols);
1156 // Load tile data into tile_buffers
1157 get_tile_buffers(pbi, data, data_end, tile_cols, tile_rows, tile_buffers);
1159 // Sort the buffers based on size in descending order.
1160 qsort(tile_buffers[0], tile_cols, sizeof(tile_buffers[0][0]),
1161 compare_tile_buffers);
1163 // Rearrange the tile buffers such that per-tile group the largest, and
1164 // presumably the most difficult, tile will be decoded in the main thread.
1165 // This should help minimize the number of instances where the main thread is
1166 // waiting for a worker to complete.
1168 int group_start = 0;
1169 while (group_start < tile_cols) {
1170 const TileBuffer largest = tile_buffers[0][group_start];
1171 const int group_end = MIN(group_start + num_workers, tile_cols) - 1;
1172 memmove(tile_buffers[0] + group_start, tile_buffers[0] + group_start + 1,
1173 (group_end - group_start) * sizeof(tile_buffers[0][0]));
1174 tile_buffers[0][group_end] = largest;
1175 group_start = group_end + 1;
1179 // Initialize thread frame counts.
1180 if (!cm->frame_parallel_decoding_mode) {
1183 for (i = 0; i < num_workers; ++i) {
1184 TileWorkerData *const tile_data =
1185 (TileWorkerData*)pbi->tile_workers[i].data1;
1186 vp9_zero(tile_data->counts);
1191 while (n < tile_cols) {
1193 for (i = 0; i < num_workers && n < tile_cols; ++i) {
1194 VP9Worker *const worker = &pbi->tile_workers[i];
1195 TileWorkerData *const tile_data = (TileWorkerData*)worker->data1;
1196 TileInfo *const tile = (TileInfo*)worker->data2;
1197 TileBuffer *const buf = &tile_buffers[0][n];
1199 tile_data->pbi = pbi;
1200 tile_data->xd = pbi->mb;
1201 tile_data->xd.corrupted = 0;
1202 vp9_tile_init(tile, cm, 0, buf->col);
1203 setup_token_decoder(buf->data, data_end, buf->size, &cm->error,
1204 &tile_data->bit_reader, pbi->decrypt_cb,
1205 pbi->decrypt_state);
1206 init_macroblockd(cm, &tile_data->xd);
1208 worker->had_error = 0;
1209 if (i == num_workers - 1 || n == tile_cols - 1) {
1210 winterface->execute(worker);
1212 winterface->launch(worker);
1215 if (buf->col == tile_cols - 1) {
1222 for (; i > 0; --i) {
1223 VP9Worker *const worker = &pbi->tile_workers[i - 1];
1224 // TODO(jzern): The tile may have specific error data associated with
1225 // its vpx_internal_error_info which could be propagated to the main info
1226 // in cm. Additionally once the threads have been synced and an error is
1227 // detected, there's no point in continuing to decode tiles.
1228 pbi->mb.corrupted |= !winterface->sync(worker);
1230 if (final_worker > -1) {
1231 TileWorkerData *const tile_data =
1232 (TileWorkerData*)pbi->tile_workers[final_worker].data1;
1233 bit_reader_end = vp9_reader_find_end(&tile_data->bit_reader);
1237 // Accumulate thread frame counts.
1238 if (n >= tile_cols && !cm->frame_parallel_decoding_mode) {
1239 for (i = 0; i < num_workers; ++i) {
1240 TileWorkerData *const tile_data =
1241 (TileWorkerData*)pbi->tile_workers[i].data1;
1242 vp9_accumulate_frame_counts(cm, &tile_data->counts, 1);
1247 return bit_reader_end;
1250 static void error_handler(void *data) {
1251 VP9_COMMON *const cm = (VP9_COMMON *)data;
1252 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, "Truncated packet");
1255 int vp9_read_sync_code(struct vp9_read_bit_buffer *const rb) {
1256 return vp9_rb_read_literal(rb, 8) == VP9_SYNC_CODE_0 &&
1257 vp9_rb_read_literal(rb, 8) == VP9_SYNC_CODE_1 &&
1258 vp9_rb_read_literal(rb, 8) == VP9_SYNC_CODE_2;
1261 BITSTREAM_PROFILE vp9_read_profile(struct vp9_read_bit_buffer *rb) {
1262 int profile = vp9_rb_read_bit(rb);
1263 profile |= vp9_rb_read_bit(rb) << 1;
1265 profile += vp9_rb_read_bit(rb);
1266 return (BITSTREAM_PROFILE) profile;
1269 static void read_bitdepth_colorspace_sampling(
1270 VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) {
1271 if (cm->profile >= PROFILE_2) {
1272 cm->bit_depth = vp9_rb_read_bit(rb) ? VPX_BITS_12 : VPX_BITS_10;
1273 #if CONFIG_VP9_HIGHBITDEPTH
1274 cm->use_highbitdepth = 1;
1277 cm->bit_depth = VPX_BITS_8;
1278 #if CONFIG_VP9_HIGHBITDEPTH
1279 cm->use_highbitdepth = 0;
1282 cm->color_space = vp9_rb_read_literal(rb, 3);
1283 if (cm->color_space != VPX_CS_SRGB) {
1284 vp9_rb_read_bit(rb); // [16,235] (including xvycc) vs [0,255] range
1285 if (cm->profile == PROFILE_1 || cm->profile == PROFILE_3) {
1286 cm->subsampling_x = vp9_rb_read_bit(rb);
1287 cm->subsampling_y = vp9_rb_read_bit(rb);
1288 if (cm->subsampling_x == 1 && cm->subsampling_y == 1)
1289 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
1290 "4:2:0 color not supported in profile 1 or 3");
1291 if (vp9_rb_read_bit(rb))
1292 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
1293 "Reserved bit set");
1295 cm->subsampling_y = cm->subsampling_x = 1;
1298 if (cm->profile == PROFILE_1 || cm->profile == PROFILE_3) {
1299 // Note if colorspace is SRGB then 4:4:4 chroma sampling is assumed.
1300 // 4:2:2 or 4:4:0 chroma sampling is not allowed.
1301 cm->subsampling_y = cm->subsampling_x = 0;
1302 if (vp9_rb_read_bit(rb))
1303 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
1304 "Reserved bit set");
1306 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
1307 "4:4:4 color not supported in profile 0 or 2");
1312 static size_t read_uncompressed_header(VP9Decoder *pbi,
1313 struct vp9_read_bit_buffer *rb) {
1314 VP9_COMMON *const cm = &pbi->common;
1315 BufferPool *const pool = cm->buffer_pool;
1316 RefCntBuffer *const frame_bufs = pool->frame_bufs;
1317 int i, mask, ref_index = 0;
1320 cm->last_frame_type = cm->frame_type;
1322 if (vp9_rb_read_literal(rb, 2) != VP9_FRAME_MARKER)
1323 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
1324 "Invalid frame marker");
1326 cm->profile = vp9_read_profile(rb);
1328 if (cm->profile >= MAX_PROFILES)
1329 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
1330 "Unsupported bitstream profile");
1332 cm->show_existing_frame = vp9_rb_read_bit(rb);
1333 if (cm->show_existing_frame) {
1334 // Show an existing frame directly.
1335 const int frame_to_show = cm->ref_frame_map[vp9_rb_read_literal(rb, 3)];
1336 lock_buffer_pool(pool);
1337 if (frame_to_show < 0 || frame_bufs[frame_to_show].ref_count < 1) {
1338 unlock_buffer_pool(pool);
1339 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
1340 "Buffer %d does not contain a decoded frame",
1344 ref_cnt_fb(frame_bufs, &cm->new_fb_idx, frame_to_show);
1345 unlock_buffer_pool(pool);
1346 pbi->refresh_frame_flags = 0;
1347 cm->lf.filter_level = 0;
1350 if (pbi->frame_parallel_decode) {
1351 for (i = 0; i < REF_FRAMES; ++i)
1352 cm->next_ref_frame_map[i] = cm->ref_frame_map[i];
1357 cm->frame_type = (FRAME_TYPE) vp9_rb_read_bit(rb);
1358 cm->show_frame = vp9_rb_read_bit(rb);
1359 cm->error_resilient_mode = vp9_rb_read_bit(rb);
1361 if (cm->frame_type == KEY_FRAME) {
1362 if (!vp9_read_sync_code(rb))
1363 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
1364 "Invalid frame sync code");
1366 read_bitdepth_colorspace_sampling(cm, rb);
1367 pbi->refresh_frame_flags = (1 << REF_FRAMES) - 1;
1369 for (i = 0; i < REFS_PER_FRAME; ++i) {
1370 cm->frame_refs[i].idx = INVALID_IDX;
1371 cm->frame_refs[i].buf = NULL;
1374 setup_frame_size(cm, rb);
1375 if (pbi->need_resync) {
1376 memset(&cm->ref_frame_map, -1, sizeof(cm->ref_frame_map));
1377 pbi->need_resync = 0;
1380 cm->intra_only = cm->show_frame ? 0 : vp9_rb_read_bit(rb);
1382 cm->reset_frame_context = cm->error_resilient_mode ?
1383 0 : vp9_rb_read_literal(rb, 2);
1385 if (cm->intra_only) {
1386 if (!vp9_read_sync_code(rb))
1387 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
1388 "Invalid frame sync code");
1389 if (cm->profile > PROFILE_0) {
1390 read_bitdepth_colorspace_sampling(cm, rb);
1392 // NOTE: The intra-only frame header does not include the specification
1393 // of either the color format or color sub-sampling in profile 0. VP9
1394 // specifies that the default color format should be YUV 4:2:0 in this
1395 // case (normative).
1396 cm->color_space = VPX_CS_BT_601;
1397 cm->subsampling_y = cm->subsampling_x = 1;
1398 cm->bit_depth = VPX_BITS_8;
1399 #if CONFIG_VP9_HIGHBITDEPTH
1400 cm->use_highbitdepth = 0;
1404 pbi->refresh_frame_flags = vp9_rb_read_literal(rb, REF_FRAMES);
1405 setup_frame_size(cm, rb);
1406 if (pbi->need_resync) {
1407 memset(&cm->ref_frame_map, -1, sizeof(cm->ref_frame_map));
1408 pbi->need_resync = 0;
1410 } else if (pbi->need_resync != 1) { /* Skip if need resync */
1411 pbi->refresh_frame_flags = vp9_rb_read_literal(rb, REF_FRAMES);
1412 for (i = 0; i < REFS_PER_FRAME; ++i) {
1413 const int ref = vp9_rb_read_literal(rb, REF_FRAMES_LOG2);
1414 const int idx = cm->ref_frame_map[ref];
1415 RefBuffer *const ref_frame = &cm->frame_refs[i];
1416 ref_frame->idx = idx;
1417 ref_frame->buf = &frame_bufs[idx].buf;
1418 cm->ref_frame_sign_bias[LAST_FRAME + i] = vp9_rb_read_bit(rb);
1421 setup_frame_size_with_refs(cm, rb);
1423 cm->allow_high_precision_mv = vp9_rb_read_bit(rb);
1424 cm->interp_filter = read_interp_filter(rb);
1426 for (i = 0; i < REFS_PER_FRAME; ++i) {
1427 RefBuffer *const ref_buf = &cm->frame_refs[i];
1428 #if CONFIG_VP9_HIGHBITDEPTH
1429 vp9_setup_scale_factors_for_frame(&ref_buf->sf,
1430 ref_buf->buf->y_crop_width,
1431 ref_buf->buf->y_crop_height,
1432 cm->width, cm->height,
1433 cm->use_highbitdepth);
1435 vp9_setup_scale_factors_for_frame(&ref_buf->sf,
1436 ref_buf->buf->y_crop_width,
1437 ref_buf->buf->y_crop_height,
1438 cm->width, cm->height);
1443 #if CONFIG_VP9_HIGHBITDEPTH
1444 get_frame_new_buffer(cm)->bit_depth = cm->bit_depth;
1446 get_frame_new_buffer(cm)->color_space = cm->color_space;
1448 if (pbi->need_resync) {
1449 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
1450 "Keyframe / intra-only frame required to reset decoder"
1454 if (!cm->error_resilient_mode) {
1455 cm->refresh_frame_context = vp9_rb_read_bit(rb);
1456 cm->frame_parallel_decoding_mode = vp9_rb_read_bit(rb);
1458 cm->refresh_frame_context = 0;
1459 cm->frame_parallel_decoding_mode = 1;
1462 // This flag will be overridden by the call to vp9_setup_past_independence
1463 // below, forcing the use of context 0 for those frame types.
1464 cm->frame_context_idx = vp9_rb_read_literal(rb, FRAME_CONTEXTS_LOG2);
1466 // Generate next_ref_frame_map.
1467 lock_buffer_pool(pool);
1468 for (mask = pbi->refresh_frame_flags; mask; mask >>= 1) {
1470 cm->next_ref_frame_map[ref_index] = cm->new_fb_idx;
1471 ++frame_bufs[cm->new_fb_idx].ref_count;
1473 cm->next_ref_frame_map[ref_index] = cm->ref_frame_map[ref_index];
1475 // Current thread holds the reference frame.
1476 if (cm->ref_frame_map[ref_index] >= 0)
1477 ++frame_bufs[cm->ref_frame_map[ref_index]].ref_count;
1481 for (; ref_index < REF_FRAMES; ++ref_index) {
1482 cm->next_ref_frame_map[ref_index] = cm->ref_frame_map[ref_index];
1483 // Current thread holds the reference frame.
1484 if (cm->ref_frame_map[ref_index] >= 0)
1485 ++frame_bufs[cm->ref_frame_map[ref_index]].ref_count;
1487 unlock_buffer_pool(pool);
1488 pbi->hold_ref_buf = 1;
1490 if (frame_is_intra_only(cm) || cm->error_resilient_mode)
1491 vp9_setup_past_independence(cm);
1493 setup_loopfilter(&cm->lf, rb);
1494 setup_quantization(cm, &pbi->mb, rb);
1495 setup_segmentation(&cm->seg, rb);
1496 setup_segmentation_dequant(cm);
1498 setup_tile_info(cm, rb);
1499 sz = vp9_rb_read_literal(rb, 16);
1502 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
1503 "Invalid header size");
1508 static int read_compressed_header(VP9Decoder *pbi, const uint8_t *data,
1509 size_t partition_size) {
1510 VP9_COMMON *const cm = &pbi->common;
1511 MACROBLOCKD *const xd = &pbi->mb;
1512 FRAME_CONTEXT *const fc = cm->fc;
1516 if (vp9_reader_init(&r, data, partition_size, pbi->decrypt_cb,
1517 pbi->decrypt_state))
1518 vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
1519 "Failed to allocate bool decoder 0");
1521 cm->tx_mode = xd->lossless ? ONLY_4X4 : read_tx_mode(&r);
1522 if (cm->tx_mode == TX_MODE_SELECT)
1523 read_tx_mode_probs(&fc->tx_probs, &r);
1524 read_coef_probs(fc, cm->tx_mode, &r);
1526 for (k = 0; k < SKIP_CONTEXTS; ++k)
1527 vp9_diff_update_prob(&r, &fc->skip_probs[k]);
1529 if (!frame_is_intra_only(cm)) {
1530 nmv_context *const nmvc = &fc->nmvc;
1533 read_inter_mode_probs(fc, &r);
1535 if (cm->interp_filter == SWITCHABLE)
1536 read_switchable_interp_probs(fc, &r);
1538 for (i = 0; i < INTRA_INTER_CONTEXTS; i++)
1539 vp9_diff_update_prob(&r, &fc->intra_inter_prob[i]);
1541 cm->reference_mode = read_frame_reference_mode(cm, &r);
1542 if (cm->reference_mode != SINGLE_REFERENCE)
1543 setup_compound_reference_mode(cm);
1544 read_frame_reference_mode_probs(cm, &r);
1546 for (j = 0; j < BLOCK_SIZE_GROUPS; j++)
1547 for (i = 0; i < INTRA_MODES - 1; ++i)
1548 vp9_diff_update_prob(&r, &fc->y_mode_prob[j][i]);
1550 for (j = 0; j < PARTITION_CONTEXTS; ++j)
1551 for (i = 0; i < PARTITION_TYPES - 1; ++i)
1552 vp9_diff_update_prob(&r, &fc->partition_prob[j][i]);
1554 read_mv_probs(nmvc, cm->allow_high_precision_mv, &r);
1557 return vp9_reader_has_error(&r);
1561 #define debug_check_frame_counts(cm) (void)0
1563 // Counts should only be incremented when frame_parallel_decoding_mode and
1564 // error_resilient_mode are disabled.
1565 static void debug_check_frame_counts(const VP9_COMMON *const cm) {
1566 FRAME_COUNTS zero_counts;
1567 vp9_zero(zero_counts);
1568 assert(cm->frame_parallel_decoding_mode || cm->error_resilient_mode);
1569 assert(!memcmp(cm->counts.y_mode, zero_counts.y_mode,
1570 sizeof(cm->counts.y_mode)));
1571 assert(!memcmp(cm->counts.uv_mode, zero_counts.uv_mode,
1572 sizeof(cm->counts.uv_mode)));
1573 assert(!memcmp(cm->counts.partition, zero_counts.partition,
1574 sizeof(cm->counts.partition)));
1575 assert(!memcmp(cm->counts.coef, zero_counts.coef,
1576 sizeof(cm->counts.coef)));
1577 assert(!memcmp(cm->counts.eob_branch, zero_counts.eob_branch,
1578 sizeof(cm->counts.eob_branch)));
1579 assert(!memcmp(cm->counts.switchable_interp, zero_counts.switchable_interp,
1580 sizeof(cm->counts.switchable_interp)));
1581 assert(!memcmp(cm->counts.inter_mode, zero_counts.inter_mode,
1582 sizeof(cm->counts.inter_mode)));
1583 assert(!memcmp(cm->counts.intra_inter, zero_counts.intra_inter,
1584 sizeof(cm->counts.intra_inter)));
1585 assert(!memcmp(cm->counts.comp_inter, zero_counts.comp_inter,
1586 sizeof(cm->counts.comp_inter)));
1587 assert(!memcmp(cm->counts.single_ref, zero_counts.single_ref,
1588 sizeof(cm->counts.single_ref)));
1589 assert(!memcmp(cm->counts.comp_ref, zero_counts.comp_ref,
1590 sizeof(cm->counts.comp_ref)));
1591 assert(!memcmp(&cm->counts.tx, &zero_counts.tx, sizeof(cm->counts.tx)));
1592 assert(!memcmp(cm->counts.skip, zero_counts.skip, sizeof(cm->counts.skip)));
1593 assert(!memcmp(&cm->counts.mv, &zero_counts.mv, sizeof(cm->counts.mv)));
1597 static struct vp9_read_bit_buffer* init_read_bit_buffer(
1599 struct vp9_read_bit_buffer *rb,
1600 const uint8_t *data,
1601 const uint8_t *data_end,
1602 uint8_t *clear_data /* buffer size MAX_VP9_HEADER_SIZE */) {
1604 rb->error_handler = error_handler;
1605 rb->error_handler_data = &pbi->common;
1606 if (pbi->decrypt_cb) {
1607 const int n = (int)MIN(MAX_VP9_HEADER_SIZE, data_end - data);
1608 pbi->decrypt_cb(pbi->decrypt_state, data, clear_data, n);
1609 rb->bit_buffer = clear_data;
1610 rb->bit_buffer_end = clear_data + n;
1612 rb->bit_buffer = data;
1613 rb->bit_buffer_end = data_end;
1618 void vp9_decode_frame(VP9Decoder *pbi,
1619 const uint8_t *data, const uint8_t *data_end,
1620 const uint8_t **p_data_end) {
1621 VP9_COMMON *const cm = &pbi->common;
1622 MACROBLOCKD *const xd = &pbi->mb;
1623 struct vp9_read_bit_buffer rb = { NULL, NULL, 0, NULL, 0};
1624 int context_updated = 0;
1625 uint8_t clear_data[MAX_VP9_HEADER_SIZE];
1626 const size_t first_partition_size = read_uncompressed_header(pbi,
1627 init_read_bit_buffer(pbi, &rb, data, data_end, clear_data));
1628 const int tile_rows = 1 << cm->log2_tile_rows;
1629 const int tile_cols = 1 << cm->log2_tile_cols;
1630 YV12_BUFFER_CONFIG *const new_fb = get_frame_new_buffer(cm);
1631 xd->cur_buf = new_fb;
1633 if (!first_partition_size) {
1634 // showing a frame directly
1635 *p_data_end = data + (cm->profile <= PROFILE_2 ? 1 : 2);
1639 data += vp9_rb_bytes_read(&rb);
1640 if (!read_is_valid(data, first_partition_size, data_end))
1641 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
1642 "Truncated packet or corrupt header length");
1644 cm->use_prev_frame_mvs = !cm->error_resilient_mode &&
1645 cm->width == cm->last_width &&
1646 cm->height == cm->last_height &&
1648 cm->last_show_frame;
1650 vp9_setup_block_planes(xd, cm->subsampling_x, cm->subsampling_y);
1652 *cm->fc = cm->frame_contexts[cm->frame_context_idx];
1653 if (!cm->fc->initialized)
1654 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
1655 "Uninitialized entropy context.");
1657 vp9_zero(cm->counts);
1660 new_fb->corrupted = read_compressed_header(pbi, data, first_partition_size);
1661 if (new_fb->corrupted)
1662 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
1663 "Decode failed. Frame data header is corrupted.");
1665 if (cm->lf.filter_level) {
1666 vp9_loop_filter_frame_init(cm, cm->lf.filter_level);
1669 // If encoded in frame parallel mode, frame context is ready after decoding
1670 // the frame header.
1671 if (pbi->frame_parallel_decode && cm->frame_parallel_decoding_mode) {
1672 VP9Worker *const worker = pbi->frame_worker_owner;
1673 FrameWorkerData *const frame_worker_data = worker->data1;
1674 if (cm->refresh_frame_context) {
1675 context_updated = 1;
1676 cm->frame_contexts[cm->frame_context_idx] = *cm->fc;
1678 vp9_frameworker_lock_stats(worker);
1679 pbi->cur_buf->row = -1;
1680 pbi->cur_buf->col = -1;
1681 frame_worker_data->frame_context_ready = 1;
1682 // Signal the main thread that context is ready.
1683 vp9_frameworker_signal_stats(worker);
1684 vp9_frameworker_unlock_stats(worker);
1687 if (pbi->max_threads > 1 && tile_rows == 1 && tile_cols > 1) {
1688 // Multi-threaded tile decoder
1689 *p_data_end = decode_tiles_mt(pbi, data + first_partition_size, data_end);
1690 if (!xd->corrupted) {
1691 // If multiple threads are used to decode tiles, then we use those threads
1692 // to do parallel loopfiltering.
1693 vp9_loop_filter_frame_mt(new_fb, cm, pbi->mb.plane, cm->lf.filter_level,
1694 0, 0, pbi->tile_workers, pbi->num_tile_workers,
1697 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
1698 "Decode failed. Frame data is corrupted.");
1702 *p_data_end = decode_tiles(pbi, data + first_partition_size, data_end);
1705 if (!xd->corrupted) {
1706 if (!cm->error_resilient_mode && !cm->frame_parallel_decoding_mode) {
1707 vp9_adapt_coef_probs(cm);
1709 if (!frame_is_intra_only(cm)) {
1710 vp9_adapt_mode_probs(cm);
1711 vp9_adapt_mv_probs(cm, cm->allow_high_precision_mv);
1714 debug_check_frame_counts(cm);
1717 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
1718 "Decode failed. Frame data is corrupted.");
1721 // Non frame parallel update frame context here.
1722 if (cm->refresh_frame_context && !context_updated)
1723 cm->frame_contexts[cm->frame_context_idx] = *cm->fc;
1726 static void build_mc_border(const uint8_t *src, int src_stride,
1727 uint8_t *dst, int dst_stride,
1728 int x, int y, int b_w, int b_h, int w, int h) {
1729 // Get a pointer to the start of the real data for this row.
1730 const uint8_t *ref_row = src - x - y * src_stride;
1733 ref_row += (h - 1) * src_stride;
1735 ref_row += y * src_stride;
1738 int right = 0, copy;
1739 int left = x < 0 ? -x : 0;
1745 right = x + b_w - w;
1750 copy = b_w - left - right;
1753 memset(dst, ref_row[0], left);
1756 memcpy(dst + left, ref_row + x + left, copy);
1759 memset(dst + left + copy, ref_row[w - 1], right);
1765 ref_row += src_stride;
1769 #if CONFIG_VP9_HIGHBITDEPTH
1770 static void high_build_mc_border(const uint8_t *src8, int src_stride,
1771 uint16_t *dst, int dst_stride,
1772 int x, int y, int b_w, int b_h,
1774 // Get a pointer to the start of the real data for this row.
1775 const uint16_t *src = CONVERT_TO_SHORTPTR(src8);
1776 const uint16_t *ref_row = src - x - y * src_stride;
1779 ref_row += (h - 1) * src_stride;
1781 ref_row += y * src_stride;
1784 int right = 0, copy;
1785 int left = x < 0 ? -x : 0;
1791 right = x + b_w - w;
1796 copy = b_w - left - right;
1799 vpx_memset16(dst, ref_row[0], left);
1802 memcpy(dst + left, ref_row + x + left, copy * sizeof(uint16_t));
1805 vpx_memset16(dst + left + copy, ref_row[w - 1], right);
1811 ref_row += src_stride;
1814 #endif // CONFIG_VP9_HIGHBITDEPTH
1816 #if CONFIG_VP9_HIGHBITDEPTH
1817 static void extend_and_predict(const uint8_t *buf_ptr1, int pre_buf_stride,
1818 int x0, int y0, int b_w, int b_h,
1819 int frame_width, int frame_height,
1821 uint8_t *const dst, int dst_buf_stride,
1822 int subpel_x, int subpel_y,
1823 const InterpKernel *kernel,
1824 const struct scale_factors *sf,
1826 int w, int h, int ref, int xs, int ys) {
1827 DECLARE_ALIGNED(16, uint16_t, mc_buf_high[80 * 2 * 80 * 2]);
1828 const uint8_t *buf_ptr;
1830 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
1831 high_build_mc_border(buf_ptr1, pre_buf_stride, mc_buf_high, b_w,
1832 x0, y0, b_w, b_h, frame_width, frame_height);
1833 buf_ptr = CONVERT_TO_BYTEPTR(mc_buf_high) + border_offset;
1835 build_mc_border(buf_ptr1, pre_buf_stride, (uint8_t *)mc_buf_high, b_w,
1836 x0, y0, b_w, b_h, frame_width, frame_height);
1837 buf_ptr = ((uint8_t *)mc_buf_high) + border_offset;
1840 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
1841 high_inter_predictor(buf_ptr, b_w, dst, dst_buf_stride, subpel_x,
1842 subpel_y, sf, w, h, ref, kernel, xs, ys, xd->bd);
1844 inter_predictor(buf_ptr, b_w, dst, dst_buf_stride, subpel_x,
1845 subpel_y, sf, w, h, ref, kernel, xs, ys);
1849 static void extend_and_predict(const uint8_t *buf_ptr1, int pre_buf_stride,
1850 int x0, int y0, int b_w, int b_h,
1851 int frame_width, int frame_height,
1853 uint8_t *const dst, int dst_buf_stride,
1854 int subpel_x, int subpel_y,
1855 const InterpKernel *kernel,
1856 const struct scale_factors *sf,
1857 int w, int h, int ref, int xs, int ys) {
1858 DECLARE_ALIGNED(16, uint8_t, mc_buf[80 * 2 * 80 * 2]);
1859 const uint8_t *buf_ptr;
1861 build_mc_border(buf_ptr1, pre_buf_stride, mc_buf, b_w,
1862 x0, y0, b_w, b_h, frame_width, frame_height);
1863 buf_ptr = mc_buf + border_offset;
1865 inter_predictor(buf_ptr, b_w, dst, dst_buf_stride, subpel_x,
1866 subpel_y, sf, w, h, ref, kernel, xs, ys);
1868 #endif // CONFIG_VP9_HIGHBITDEPTH
1870 static void dec_build_inter_predictors(VP9Decoder *const pbi, MACROBLOCKD *xd,
1871 int plane, int bw, int bh, int x,
1872 int y, int w, int h, int mi_x, int mi_y,
1873 const InterpKernel *kernel,
1874 const struct scale_factors *sf,
1875 struct buf_2d *pre_buf,
1876 struct buf_2d *dst_buf, const MV* mv,
1877 RefCntBuffer *ref_frame_buf,
1878 int is_scaled, int ref) {
1879 struct macroblockd_plane *const pd = &xd->plane[plane];
1880 uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x;
1882 int xs, ys, x0, y0, x0_16, y0_16, frame_width, frame_height,
1883 buf_stride, subpel_x, subpel_y;
1884 uint8_t *ref_frame, *buf_ptr;
1886 // Get reference frame pointer, width and height.
1888 frame_width = ref_frame_buf->buf.y_crop_width;
1889 frame_height = ref_frame_buf->buf.y_crop_height;
1890 ref_frame = ref_frame_buf->buf.y_buffer;
1892 frame_width = ref_frame_buf->buf.uv_crop_width;
1893 frame_height = ref_frame_buf->buf.uv_crop_height;
1894 ref_frame = plane == 1 ? ref_frame_buf->buf.u_buffer
1895 : ref_frame_buf->buf.v_buffer;
1899 const MV mv_q4 = clamp_mv_to_umv_border_sb(xd, mv, bw, bh,
1902 // Co-ordinate of containing block to pixel precision.
1903 int x_start = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x));
1904 int y_start = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y));
1906 // Co-ordinate of the block to 1/16th pixel precision.
1907 x0_16 = (x_start + x) << SUBPEL_BITS;
1908 y0_16 = (y_start + y) << SUBPEL_BITS;
1910 // Co-ordinate of current block in reference frame
1911 // to 1/16th pixel precision.
1912 x0_16 = sf->scale_value_x(x0_16, sf);
1913 y0_16 = sf->scale_value_y(y0_16, sf);
1915 // Map the top left corner of the block into the reference frame.
1916 x0 = sf->scale_value_x(x_start + x, sf);
1917 y0 = sf->scale_value_y(y_start + y, sf);
1919 // Scale the MV and incorporate the sub-pixel offset of the block
1920 // in the reference frame.
1921 scaled_mv = vp9_scale_mv(&mv_q4, mi_x + x, mi_y + y, sf);
1925 // Co-ordinate of containing block to pixel precision.
1926 x0 = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x)) + x;
1927 y0 = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y)) + y;
1929 // Co-ordinate of the block to 1/16th pixel precision.
1930 x0_16 = x0 << SUBPEL_BITS;
1931 y0_16 = y0 << SUBPEL_BITS;
1933 scaled_mv.row = mv->row * (1 << (1 - pd->subsampling_y));
1934 scaled_mv.col = mv->col * (1 << (1 - pd->subsampling_x));
1937 subpel_x = scaled_mv.col & SUBPEL_MASK;
1938 subpel_y = scaled_mv.row & SUBPEL_MASK;
1940 // Calculate the top left corner of the best matching block in the
1942 x0 += scaled_mv.col >> SUBPEL_BITS;
1943 y0 += scaled_mv.row >> SUBPEL_BITS;
1944 x0_16 += scaled_mv.col;
1945 y0_16 += scaled_mv.row;
1947 // Get reference block pointer.
1948 buf_ptr = ref_frame + y0 * pre_buf->stride + x0;
1949 buf_stride = pre_buf->stride;
1951 // Do border extension if there is motion or the
1952 // width/height is not a multiple of 8 pixels.
1953 if (is_scaled || scaled_mv.col || scaled_mv.row ||
1954 (frame_width & 0x7) || (frame_height & 0x7)) {
1955 int y1 = (y0_16 + (h - 1) * ys) >> SUBPEL_BITS;
1957 // Get reference block bottom right horizontal coordinate.
1958 int x1 = (x0_16 + (w - 1) * xs) >> SUBPEL_BITS;
1959 int x_pad = 0, y_pad = 0;
1961 if (subpel_x || (sf->x_step_q4 != SUBPEL_SHIFTS)) {
1962 x0 -= VP9_INTERP_EXTEND - 1;
1963 x1 += VP9_INTERP_EXTEND;
1967 if (subpel_y || (sf->y_step_q4 != SUBPEL_SHIFTS)) {
1968 y0 -= VP9_INTERP_EXTEND - 1;
1969 y1 += VP9_INTERP_EXTEND;
1973 // Wait until reference block is ready. Pad 7 more pixels as last 7
1974 // pixels of each superblock row can be changed by next superblock row.
1975 if (pbi->frame_parallel_decode)
1976 vp9_frameworker_wait(pbi->frame_worker_owner, ref_frame_buf,
1977 MAX(0, (y1 + 7)) << (plane == 0 ? 0 : 1));
1979 // Skip border extension if block is inside the frame.
1980 if (x0 < 0 || x0 > frame_width - 1 || x1 < 0 || x1 > frame_width - 1 ||
1981 y0 < 0 || y0 > frame_height - 1 || y1 < 0 || y1 > frame_height - 1) {
1982 // Extend the border.
1983 const uint8_t *const buf_ptr1 = ref_frame + y0 * buf_stride + x0;
1984 const int b_w = x1 - x0 + 1;
1985 const int b_h = y1 - y0 + 1;
1986 const int border_offset = y_pad * 3 * b_w + x_pad * 3;
1988 extend_and_predict(buf_ptr1, buf_stride, x0, y0, b_w, b_h,
1989 frame_width, frame_height, border_offset,
1990 dst, dst_buf->stride,
1993 #if CONFIG_VP9_HIGHBITDEPTH
2000 // Wait until reference block is ready. Pad 7 more pixels as last 7
2001 // pixels of each superblock row can be changed by next superblock row.
2002 if (pbi->frame_parallel_decode) {
2003 const int y1 = (y0_16 + (h - 1) * ys) >> SUBPEL_BITS;
2004 vp9_frameworker_wait(pbi->frame_worker_owner, ref_frame_buf,
2005 MAX(0, (y1 + 7)) << (plane == 0 ? 0 : 1));
2008 #if CONFIG_VP9_HIGHBITDEPTH
2009 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
2010 high_inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x,
2011 subpel_y, sf, w, h, ref, kernel, xs, ys, xd->bd);
2013 inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x,
2014 subpel_y, sf, w, h, ref, kernel, xs, ys);
2017 inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x,
2018 subpel_y, sf, w, h, ref, kernel, xs, ys);
2019 #endif // CONFIG_VP9_HIGHBITDEPTH
2022 void vp9_dec_build_inter_predictors_sb(VP9Decoder *const pbi, MACROBLOCKD *xd,
2023 int mi_row, int mi_col,
2026 const int mi_x = mi_col * MI_SIZE;
2027 const int mi_y = mi_row * MI_SIZE;
2028 const MODE_INFO *mi = xd->mi[0];
2029 const InterpKernel *kernel = vp9_get_interp_kernel(mi->mbmi.interp_filter);
2030 const BLOCK_SIZE sb_type = mi->mbmi.sb_type;
2031 const int is_compound = has_second_ref(&mi->mbmi);
2033 for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
2034 const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize,
2036 struct macroblockd_plane *const pd = &xd->plane[plane];
2037 struct buf_2d *const dst_buf = &pd->dst;
2038 const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize];
2039 const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize];
2041 const int bw = 4 * num_4x4_w;
2042 const int bh = 4 * num_4x4_h;
2045 for (ref = 0; ref < 1 + is_compound; ++ref) {
2046 const struct scale_factors *const sf = &xd->block_refs[ref]->sf;
2047 struct buf_2d *const pre_buf = &pd->pre[ref];
2048 const int idx = xd->block_refs[ref]->idx;
2049 BufferPool *const pool = pbi->common.buffer_pool;
2050 RefCntBuffer *const ref_frame_buf = &pool->frame_bufs[idx];
2051 const int is_scaled = vp9_is_scaled(sf);
2053 if (sb_type < BLOCK_8X8) {
2055 assert(bsize == BLOCK_8X8);
2056 for (y = 0; y < num_4x4_h; ++y) {
2057 for (x = 0; x < num_4x4_w; ++x) {
2058 const MV mv = average_split_mvs(pd, mi, ref, i++);
2059 dec_build_inter_predictors(pbi, xd, plane, bw, bh,
2060 4 * x, 4 * y, 4, 4, mi_x, mi_y, kernel,
2061 sf, pre_buf, dst_buf, &mv,
2062 ref_frame_buf, is_scaled, ref);
2066 const MV mv = mi->mbmi.mv[ref].as_mv;
2067 dec_build_inter_predictors(pbi, xd, plane, bw, bh,
2068 0, 0, bw, bh, mi_x, mi_y, kernel,
2069 sf, pre_buf, dst_buf, &mv, ref_frame_buf,