2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
12 #include <stdlib.h> // qsort()
14 #include "./vp9_rtcd.h"
15 #include "./vpx_scale_rtcd.h"
17 #include "vpx_mem/vpx_mem.h"
18 #include "vpx_ports/mem_ops.h"
19 #include "vpx_scale/vpx_scale.h"
21 #include "vp9/common/vp9_alloccommon.h"
22 #include "vp9/common/vp9_common.h"
23 #include "vp9/common/vp9_entropy.h"
24 #include "vp9/common/vp9_entropymode.h"
25 #include "vp9/common/vp9_idct.h"
26 #include "vp9/common/vp9_thread_common.h"
27 #include "vp9/common/vp9_pred_common.h"
28 #include "vp9/common/vp9_quant_common.h"
29 #include "vp9/common/vp9_reconintra.h"
30 #include "vp9/common/vp9_reconinter.h"
31 #include "vp9/common/vp9_seg_common.h"
32 #include "vp9/common/vp9_thread.h"
33 #include "vp9/common/vp9_tile_common.h"
35 #include "vp9/decoder/vp9_decodeframe.h"
36 #include "vp9/decoder/vp9_detokenize.h"
37 #include "vp9/decoder/vp9_decodemv.h"
38 #include "vp9/decoder/vp9_decoder.h"
39 #include "vp9/decoder/vp9_dsubexp.h"
40 #include "vp9/decoder/vp9_read_bit_buffer.h"
41 #include "vp9/decoder/vp9_reader.h"
43 #define MAX_VP9_HEADER_SIZE 80
45 static int is_compound_reference_allowed(const VP9_COMMON *cm) {
47 for (i = 1; i < REFS_PER_FRAME; ++i)
48 if (cm->ref_frame_sign_bias[i + 1] != cm->ref_frame_sign_bias[1])
54 static void setup_compound_reference_mode(VP9_COMMON *cm) {
55 if (cm->ref_frame_sign_bias[LAST_FRAME] ==
56 cm->ref_frame_sign_bias[GOLDEN_FRAME]) {
57 cm->comp_fixed_ref = ALTREF_FRAME;
58 cm->comp_var_ref[0] = LAST_FRAME;
59 cm->comp_var_ref[1] = GOLDEN_FRAME;
60 } else if (cm->ref_frame_sign_bias[LAST_FRAME] ==
61 cm->ref_frame_sign_bias[ALTREF_FRAME]) {
62 cm->comp_fixed_ref = GOLDEN_FRAME;
63 cm->comp_var_ref[0] = LAST_FRAME;
64 cm->comp_var_ref[1] = ALTREF_FRAME;
66 cm->comp_fixed_ref = LAST_FRAME;
67 cm->comp_var_ref[0] = GOLDEN_FRAME;
68 cm->comp_var_ref[1] = ALTREF_FRAME;
72 static int read_is_valid(const uint8_t *start, size_t len, const uint8_t *end) {
73 return len != 0 && len <= (size_t)(end - start);
76 static int decode_unsigned_max(struct vp9_read_bit_buffer *rb, int max) {
77 const int data = vp9_rb_read_literal(rb, get_unsigned_bits(max));
78 return data > max ? max : data;
81 static TX_MODE read_tx_mode(vp9_reader *r) {
82 TX_MODE tx_mode = vp9_read_literal(r, 2);
83 if (tx_mode == ALLOW_32X32)
84 tx_mode += vp9_read_bit(r);
88 static void read_tx_mode_probs(struct tx_probs *tx_probs, vp9_reader *r) {
91 for (i = 0; i < TX_SIZE_CONTEXTS; ++i)
92 for (j = 0; j < TX_SIZES - 3; ++j)
93 vp9_diff_update_prob(r, &tx_probs->p8x8[i][j]);
95 for (i = 0; i < TX_SIZE_CONTEXTS; ++i)
96 for (j = 0; j < TX_SIZES - 2; ++j)
97 vp9_diff_update_prob(r, &tx_probs->p16x16[i][j]);
99 for (i = 0; i < TX_SIZE_CONTEXTS; ++i)
100 for (j = 0; j < TX_SIZES - 1; ++j)
101 vp9_diff_update_prob(r, &tx_probs->p32x32[i][j]);
104 static void read_switchable_interp_probs(FRAME_CONTEXT *fc, vp9_reader *r) {
106 for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j)
107 for (i = 0; i < SWITCHABLE_FILTERS - 1; ++i)
108 vp9_diff_update_prob(r, &fc->switchable_interp_prob[j][i]);
111 static void read_inter_mode_probs(FRAME_CONTEXT *fc, vp9_reader *r) {
113 for (i = 0; i < INTER_MODE_CONTEXTS; ++i)
114 for (j = 0; j < INTER_MODES - 1; ++j)
115 vp9_diff_update_prob(r, &fc->inter_mode_probs[i][j]);
118 static REFERENCE_MODE read_frame_reference_mode(const VP9_COMMON *cm,
120 if (is_compound_reference_allowed(cm)) {
121 return vp9_read_bit(r) ? (vp9_read_bit(r) ? REFERENCE_MODE_SELECT
122 : COMPOUND_REFERENCE)
125 return SINGLE_REFERENCE;
129 static void read_frame_reference_mode_probs(VP9_COMMON *cm, vp9_reader *r) {
130 FRAME_CONTEXT *const fc = cm->fc;
133 if (cm->reference_mode == REFERENCE_MODE_SELECT)
134 for (i = 0; i < COMP_INTER_CONTEXTS; ++i)
135 vp9_diff_update_prob(r, &fc->comp_inter_prob[i]);
137 if (cm->reference_mode != COMPOUND_REFERENCE)
138 for (i = 0; i < REF_CONTEXTS; ++i) {
139 vp9_diff_update_prob(r, &fc->single_ref_prob[i][0]);
140 vp9_diff_update_prob(r, &fc->single_ref_prob[i][1]);
143 if (cm->reference_mode != SINGLE_REFERENCE)
144 for (i = 0; i < REF_CONTEXTS; ++i)
145 vp9_diff_update_prob(r, &fc->comp_ref_prob[i]);
148 static void update_mv_probs(vp9_prob *p, int n, vp9_reader *r) {
150 for (i = 0; i < n; ++i)
151 if (vp9_read(r, MV_UPDATE_PROB))
152 p[i] = (vp9_read_literal(r, 7) << 1) | 1;
155 static void read_mv_probs(nmv_context *ctx, int allow_hp, vp9_reader *r) {
158 update_mv_probs(ctx->joints, MV_JOINTS - 1, r);
160 for (i = 0; i < 2; ++i) {
161 nmv_component *const comp_ctx = &ctx->comps[i];
162 update_mv_probs(&comp_ctx->sign, 1, r);
163 update_mv_probs(comp_ctx->classes, MV_CLASSES - 1, r);
164 update_mv_probs(comp_ctx->class0, CLASS0_SIZE - 1, r);
165 update_mv_probs(comp_ctx->bits, MV_OFFSET_BITS, r);
168 for (i = 0; i < 2; ++i) {
169 nmv_component *const comp_ctx = &ctx->comps[i];
170 for (j = 0; j < CLASS0_SIZE; ++j)
171 update_mv_probs(comp_ctx->class0_fp[j], MV_FP_SIZE - 1, r);
172 update_mv_probs(comp_ctx->fp, 3, r);
176 for (i = 0; i < 2; ++i) {
177 nmv_component *const comp_ctx = &ctx->comps[i];
178 update_mv_probs(&comp_ctx->class0_hp, 1, r);
179 update_mv_probs(&comp_ctx->hp, 1, r);
184 static void inverse_transform_block(MACROBLOCKD* xd, int plane, int block,
185 TX_SIZE tx_size, uint8_t *dst, int stride,
187 struct macroblockd_plane *const pd = &xd->plane[plane];
189 TX_TYPE tx_type = DCT_DCT;
190 tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
191 #if CONFIG_VP9_HIGHBITDEPTH
192 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
195 vp9_highbd_iwht4x4_add(dqcoeff, dst, stride, eob, xd->bd);
197 const PLANE_TYPE plane_type = pd->plane_type;
200 tx_type = get_tx_type_4x4(plane_type, xd, block);
201 vp9_highbd_iht4x4_add(tx_type, dqcoeff, dst, stride, eob, xd->bd);
204 tx_type = get_tx_type(plane_type, xd);
205 vp9_highbd_iht8x8_add(tx_type, dqcoeff, dst, stride, eob, xd->bd);
208 tx_type = get_tx_type(plane_type, xd);
209 vp9_highbd_iht16x16_add(tx_type, dqcoeff, dst, stride, eob, xd->bd);
213 vp9_highbd_idct32x32_add(dqcoeff, dst, stride, eob, xd->bd);
216 assert(0 && "Invalid transform size");
222 vp9_iwht4x4_add(dqcoeff, dst, stride, eob);
224 const PLANE_TYPE plane_type = pd->plane_type;
227 tx_type = get_tx_type_4x4(plane_type, xd, block);
228 vp9_iht4x4_add(tx_type, dqcoeff, dst, stride, eob);
231 tx_type = get_tx_type(plane_type, xd);
232 vp9_iht8x8_add(tx_type, dqcoeff, dst, stride, eob);
235 tx_type = get_tx_type(plane_type, xd);
236 vp9_iht16x16_add(tx_type, dqcoeff, dst, stride, eob);
240 vp9_idct32x32_add(dqcoeff, dst, stride, eob);
243 assert(0 && "Invalid transform size");
251 vp9_iwht4x4_add(dqcoeff, dst, stride, eob);
253 const PLANE_TYPE plane_type = pd->plane_type;
256 tx_type = get_tx_type_4x4(plane_type, xd, block);
257 vp9_iht4x4_add(tx_type, dqcoeff, dst, stride, eob);
260 tx_type = get_tx_type(plane_type, xd);
261 vp9_iht8x8_add(tx_type, dqcoeff, dst, stride, eob);
264 tx_type = get_tx_type(plane_type, xd);
265 vp9_iht16x16_add(tx_type, dqcoeff, dst, stride, eob);
269 vp9_idct32x32_add(dqcoeff, dst, stride, eob);
272 assert(0 && "Invalid transform size");
276 #endif // CONFIG_VP9_HIGHBITDEPTH
279 vpx_memset(dqcoeff, 0, 2 * sizeof(dqcoeff[0]));
281 if (tx_type == DCT_DCT && tx_size <= TX_16X16 && eob <= 10)
282 vpx_memset(dqcoeff, 0, 4 * (4 << tx_size) * sizeof(dqcoeff[0]));
283 else if (tx_size == TX_32X32 && eob <= 34)
284 vpx_memset(dqcoeff, 0, 256 * sizeof(dqcoeff[0]));
286 vpx_memset(dqcoeff, 0, (16 << (tx_size << 1)) * sizeof(dqcoeff[0]));
294 FRAME_COUNTS *counts;
296 const int16_t *const y_dequant;
297 const int16_t *const uv_dequant;
300 static void predict_and_reconstruct_intra_block(int plane, int block,
301 BLOCK_SIZE plane_bsize,
302 TX_SIZE tx_size, void *arg) {
303 struct intra_args *const args = (struct intra_args *)arg;
304 VP9_COMMON *const cm = args->cm;
305 MACROBLOCKD *const xd = args->xd;
306 struct macroblockd_plane *const pd = &xd->plane[plane];
307 MODE_INFO *const mi = xd->mi[0].src_mi;
308 const PREDICTION_MODE mode = (plane == 0) ? get_y_mode(mi, block)
310 const int16_t *const dequant = (plane == 0) ? args->y_dequant
314 txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &x, &y);
315 dst = &pd->dst.buf[4 * y * pd->dst.stride + 4 * x];
317 vp9_predict_intra_block(xd, block >> (tx_size << 1),
318 b_width_log2_lookup[plane_bsize], tx_size, mode,
319 dst, pd->dst.stride, dst, pd->dst.stride,
322 if (!mi->mbmi.skip) {
323 const int eob = vp9_decode_block_tokens(cm, xd, args->counts, plane, block,
324 plane_bsize, x, y, tx_size,
326 inverse_transform_block(xd, plane, block, tx_size, dst, pd->dst.stride,
335 FRAME_COUNTS *counts;
337 const int16_t *const y_dequant;
338 const int16_t *const uv_dequant;
341 static void reconstruct_inter_block(int plane, int block,
342 BLOCK_SIZE plane_bsize,
343 TX_SIZE tx_size, void *arg) {
344 struct inter_args *args = (struct inter_args *)arg;
345 VP9_COMMON *const cm = args->cm;
346 MACROBLOCKD *const xd = args->xd;
347 struct macroblockd_plane *const pd = &xd->plane[plane];
348 const int16_t *const dequant = (plane == 0) ? args->y_dequant
351 txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &x, &y);
352 eob = vp9_decode_block_tokens(cm, xd, args->counts, plane, block, plane_bsize,
353 x, y, tx_size, args->r, dequant);
354 inverse_transform_block(xd, plane, block, tx_size,
355 &pd->dst.buf[4 * y * pd->dst.stride + 4 * x],
356 pd->dst.stride, eob);
357 *args->eobtotal += eob;
360 static MB_MODE_INFO *set_offsets(VP9_COMMON *const cm, MACROBLOCKD *const xd,
361 const TileInfo *const tile,
362 BLOCK_SIZE bsize, int mi_row, int mi_col) {
363 const int bw = num_8x8_blocks_wide_lookup[bsize];
364 const int bh = num_8x8_blocks_high_lookup[bsize];
365 const int x_mis = MIN(bw, cm->mi_cols - mi_col);
366 const int y_mis = MIN(bh, cm->mi_rows - mi_row);
367 const int offset = mi_row * cm->mi_stride + mi_col;
370 xd->mi = cm->mi + offset;
371 xd->mi[0].src_mi = &xd->mi[0]; // Point to self.
372 xd->mi[0].mbmi.sb_type = bsize;
374 for (y = 0; y < y_mis; ++y)
375 for (x = !y; x < x_mis; ++x) {
376 xd->mi[y * cm->mi_stride + x].src_mi = &xd->mi[0];
379 set_skip_context(xd, mi_row, mi_col);
381 // Distance of Mb to the various image edges. These are specified to 8th pel
382 // as they are always compared to values that are in 1/8th pel units
383 set_mi_row_col(xd, tile, mi_row, bh, mi_col, bw, cm->mi_rows, cm->mi_cols);
385 vp9_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
386 return &xd->mi[0].mbmi;
389 static void decode_block(VP9Decoder *const pbi, MACROBLOCKD *const xd,
390 FRAME_COUNTS *counts,
391 const TileInfo *const tile,
392 int mi_row, int mi_col,
393 vp9_reader *r, BLOCK_SIZE bsize) {
394 VP9_COMMON *const cm = &pbi->common;
395 const int less8x8 = bsize < BLOCK_8X8;
396 int16_t y_dequant[2], uv_dequant[2];
397 int qindex = cm->base_qindex;
398 MB_MODE_INFO *mbmi = set_offsets(cm, xd, tile, bsize, mi_row, mi_col);
399 vp9_read_mode_info(pbi, xd, counts, tile, mi_row, mi_col, r);
405 reset_skip_context(xd, bsize);
406 } else if (cm->seg.enabled) {
407 qindex = vp9_get_qindex(&cm->seg, mbmi->segment_id, cm->base_qindex);
410 y_dequant[0] = vp9_dc_quant(qindex, cm->y_dc_delta_q, cm->bit_depth);
411 y_dequant[1] = vp9_ac_quant(qindex, 0, cm->bit_depth);
412 uv_dequant[0] = vp9_dc_quant(qindex, cm->uv_dc_delta_q, cm->bit_depth);
413 uv_dequant[1] = vp9_ac_quant(qindex, cm->uv_ac_delta_q, cm->bit_depth);
415 if (!is_inter_block(mbmi)) {
416 struct intra_args arg = {cm, xd, counts, r , y_dequant, uv_dequant};
417 vp9_foreach_transformed_block(xd, bsize,
418 predict_and_reconstruct_intra_block, &arg);
421 vp9_dec_build_inter_predictors_sb(pbi, xd, mi_row, mi_col, bsize);
426 struct inter_args arg = {cm, xd, r, counts, &eobtotal, y_dequant,
428 vp9_foreach_transformed_block(xd, bsize, reconstruct_inter_block, &arg);
429 if (!less8x8 && eobtotal == 0)
430 mbmi->skip = 1; // skip loopfilter
434 xd->corrupted |= vp9_reader_has_error(r);
437 static PARTITION_TYPE read_partition(VP9_COMMON *cm, MACROBLOCKD *xd,
438 FRAME_COUNTS *counts, int hbs,
439 int mi_row, int mi_col, BLOCK_SIZE bsize,
441 const int ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
442 const vp9_prob *const probs = get_partition_probs(cm, ctx);
443 const int has_rows = (mi_row + hbs) < cm->mi_rows;
444 const int has_cols = (mi_col + hbs) < cm->mi_cols;
447 if (has_rows && has_cols)
448 p = (PARTITION_TYPE)vp9_read_tree(r, vp9_partition_tree, probs);
449 else if (!has_rows && has_cols)
450 p = vp9_read(r, probs[1]) ? PARTITION_SPLIT : PARTITION_HORZ;
451 else if (has_rows && !has_cols)
452 p = vp9_read(r, probs[2]) ? PARTITION_SPLIT : PARTITION_VERT;
456 if (!cm->frame_parallel_decoding_mode)
457 ++counts->partition[ctx][p];
462 static void decode_partition(VP9Decoder *const pbi, MACROBLOCKD *const xd,
463 FRAME_COUNTS *counts,
464 const TileInfo *const tile,
465 int mi_row, int mi_col,
466 vp9_reader* r, BLOCK_SIZE bsize) {
467 VP9_COMMON *const cm = &pbi->common;
468 const int hbs = num_8x8_blocks_wide_lookup[bsize] / 2;
469 PARTITION_TYPE partition;
470 BLOCK_SIZE subsize, uv_subsize;
472 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
475 partition = read_partition(cm, xd, counts, hbs, mi_row, mi_col, bsize, r);
476 subsize = get_subsize(bsize, partition);
477 uv_subsize = ss_size_lookup[subsize][cm->subsampling_x][cm->subsampling_y];
478 if (subsize >= BLOCK_8X8 && uv_subsize == BLOCK_INVALID)
479 vpx_internal_error(xd->error_info,
480 VPX_CODEC_CORRUPT_FRAME, "Invalid block size.");
481 if (subsize < BLOCK_8X8) {
482 decode_block(pbi, xd, counts, tile, mi_row, mi_col, r, subsize);
486 decode_block(pbi, xd, counts, tile, mi_row, mi_col, r, subsize);
489 decode_block(pbi, xd, counts, tile, mi_row, mi_col, r, subsize);
490 if (mi_row + hbs < cm->mi_rows)
491 decode_block(pbi, xd, counts, tile, mi_row + hbs, mi_col, r, subsize);
494 decode_block(pbi, xd, counts, tile, mi_row, mi_col, r, subsize);
495 if (mi_col + hbs < cm->mi_cols)
496 decode_block(pbi, xd, counts, tile, mi_row, mi_col + hbs, r, subsize);
498 case PARTITION_SPLIT:
499 decode_partition(pbi, xd, counts, tile, mi_row, mi_col, r, subsize);
500 decode_partition(pbi, xd, counts, tile, mi_row, mi_col + hbs, r,
502 decode_partition(pbi, xd, counts, tile, mi_row + hbs, mi_col, r,
504 decode_partition(pbi, xd, counts, tile, mi_row + hbs, mi_col + hbs, r,
508 assert(0 && "Invalid partition type");
512 // update partition context
513 if (bsize >= BLOCK_8X8 &&
514 (bsize == BLOCK_8X8 || partition != PARTITION_SPLIT))
515 update_partition_context(xd, mi_row, mi_col, subsize, bsize);
518 static void setup_token_decoder(const uint8_t *data,
519 const uint8_t *data_end,
521 struct vpx_internal_error_info *error_info,
523 vpx_decrypt_cb decrypt_cb,
524 void *decrypt_state) {
525 // Validate the calculated partition length. If the buffer
526 // described by the partition can't be fully read, then restrict
527 // it to the portion that can be (for EC mode) or throw an error.
528 if (!read_is_valid(data, read_size, data_end))
529 vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME,
530 "Truncated packet or corrupt tile length");
532 if (vp9_reader_init(r, data, read_size, decrypt_cb, decrypt_state))
533 vpx_internal_error(error_info, VPX_CODEC_MEM_ERROR,
534 "Failed to allocate bool decoder %d", 1);
537 static void read_coef_probs_common(vp9_coeff_probs_model *coef_probs,
542 for (i = 0; i < PLANE_TYPES; ++i)
543 for (j = 0; j < REF_TYPES; ++j)
544 for (k = 0; k < COEF_BANDS; ++k)
545 for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l)
546 for (m = 0; m < UNCONSTRAINED_NODES; ++m)
547 vp9_diff_update_prob(r, &coef_probs[i][j][k][l][m]);
550 static void read_coef_probs(FRAME_CONTEXT *fc, TX_MODE tx_mode,
552 const TX_SIZE max_tx_size = tx_mode_to_biggest_tx_size[tx_mode];
554 for (tx_size = TX_4X4; tx_size <= max_tx_size; ++tx_size)
555 read_coef_probs_common(fc->coef_probs[tx_size], r);
558 static void setup_segmentation(struct segmentation *seg,
559 struct vp9_read_bit_buffer *rb) {
563 seg->update_data = 0;
565 seg->enabled = vp9_rb_read_bit(rb);
569 // Segmentation map update
570 seg->update_map = vp9_rb_read_bit(rb);
571 if (seg->update_map) {
572 for (i = 0; i < SEG_TREE_PROBS; i++)
573 seg->tree_probs[i] = vp9_rb_read_bit(rb) ? vp9_rb_read_literal(rb, 8)
576 seg->temporal_update = vp9_rb_read_bit(rb);
577 if (seg->temporal_update) {
578 for (i = 0; i < PREDICTION_PROBS; i++)
579 seg->pred_probs[i] = vp9_rb_read_bit(rb) ? vp9_rb_read_literal(rb, 8)
582 for (i = 0; i < PREDICTION_PROBS; i++)
583 seg->pred_probs[i] = MAX_PROB;
587 // Segmentation data update
588 seg->update_data = vp9_rb_read_bit(rb);
589 if (seg->update_data) {
590 seg->abs_delta = vp9_rb_read_bit(rb);
592 vp9_clearall_segfeatures(seg);
594 for (i = 0; i < MAX_SEGMENTS; i++) {
595 for (j = 0; j < SEG_LVL_MAX; j++) {
597 const int feature_enabled = vp9_rb_read_bit(rb);
598 if (feature_enabled) {
599 vp9_enable_segfeature(seg, i, j);
600 data = decode_unsigned_max(rb, vp9_seg_feature_data_max(j));
601 if (vp9_is_segfeature_signed(j))
602 data = vp9_rb_read_bit(rb) ? -data : data;
604 vp9_set_segdata(seg, i, j, data);
610 static void setup_loopfilter(struct loopfilter *lf,
611 struct vp9_read_bit_buffer *rb) {
612 lf->filter_level = vp9_rb_read_literal(rb, 6);
613 lf->sharpness_level = vp9_rb_read_literal(rb, 3);
615 // Read in loop filter deltas applied at the MB level based on mode or ref
617 lf->mode_ref_delta_update = 0;
619 lf->mode_ref_delta_enabled = vp9_rb_read_bit(rb);
620 if (lf->mode_ref_delta_enabled) {
621 lf->mode_ref_delta_update = vp9_rb_read_bit(rb);
622 if (lf->mode_ref_delta_update) {
625 for (i = 0; i < MAX_REF_LF_DELTAS; i++)
626 if (vp9_rb_read_bit(rb))
627 lf->ref_deltas[i] = vp9_rb_read_signed_literal(rb, 6);
629 for (i = 0; i < MAX_MODE_LF_DELTAS; i++)
630 if (vp9_rb_read_bit(rb))
631 lf->mode_deltas[i] = vp9_rb_read_signed_literal(rb, 6);
636 static INLINE int read_delta_q(struct vp9_read_bit_buffer *rb) {
637 return vp9_rb_read_bit(rb) ? vp9_rb_read_signed_literal(rb, 4) : 0;
640 static void setup_quantization(VP9_COMMON *const cm, MACROBLOCKD *const xd,
641 struct vp9_read_bit_buffer *rb) {
642 cm->base_qindex = vp9_rb_read_literal(rb, QINDEX_BITS);
643 cm->y_dc_delta_q = read_delta_q(rb);
644 cm->uv_dc_delta_q = read_delta_q(rb);
645 cm->uv_ac_delta_q = read_delta_q(rb);
646 cm->dequant_bit_depth = cm->bit_depth;
647 xd->lossless = cm->base_qindex == 0 &&
648 cm->y_dc_delta_q == 0 &&
649 cm->uv_dc_delta_q == 0 &&
650 cm->uv_ac_delta_q == 0;
651 #if CONFIG_VP9_HIGHBITDEPTH
652 xd->bd = (int)cm->bit_depth;
656 static INTERP_FILTER read_interp_filter(struct vp9_read_bit_buffer *rb) {
657 const INTERP_FILTER literal_to_filter[] = { EIGHTTAP_SMOOTH,
661 return vp9_rb_read_bit(rb) ? SWITCHABLE
662 : literal_to_filter[vp9_rb_read_literal(rb, 2)];
665 void vp9_read_frame_size(struct vp9_read_bit_buffer *rb,
666 int *width, int *height) {
667 *width = vp9_rb_read_literal(rb, 16) + 1;
668 *height = vp9_rb_read_literal(rb, 16) + 1;
671 static void setup_display_size(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) {
672 cm->display_width = cm->width;
673 cm->display_height = cm->height;
674 if (vp9_rb_read_bit(rb))
675 vp9_read_frame_size(rb, &cm->display_width, &cm->display_height);
678 static void resize_mv_buffer(VP9_COMMON *cm) {
679 vpx_free(cm->cur_frame->mvs);
680 cm->cur_frame->mi_rows = cm->mi_rows;
681 cm->cur_frame->mi_cols = cm->mi_cols;
682 cm->cur_frame->mvs = (MV_REF *)vpx_calloc(cm->mi_rows * cm->mi_cols,
683 sizeof(*cm->cur_frame->mvs));
686 static void resize_context_buffers(VP9_COMMON *cm, int width, int height) {
687 #if CONFIG_SIZE_LIMIT
688 if (width > DECODE_WIDTH_LIMIT || height > DECODE_HEIGHT_LIMIT)
689 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
690 "Width and height beyond allowed size.");
692 if (cm->width != width || cm->height != height) {
693 const int new_mi_rows =
694 ALIGN_POWER_OF_TWO(height, MI_SIZE_LOG2) >> MI_SIZE_LOG2;
695 const int new_mi_cols =
696 ALIGN_POWER_OF_TWO(width, MI_SIZE_LOG2) >> MI_SIZE_LOG2;
698 // Allocations in vp9_alloc_context_buffers() depend on individual
699 // dimensions as well as the overall size.
700 if (new_mi_cols > cm->mi_cols || new_mi_rows > cm->mi_rows) {
701 if (vp9_alloc_context_buffers(cm, width, height))
702 vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
703 "Failed to allocate context buffers");
705 vp9_set_mb_mi(cm, width, height);
707 vp9_init_context_buffers(cm);
711 if (cm->cur_frame->mvs == NULL || cm->mi_rows > cm->cur_frame->mi_rows ||
712 cm->mi_cols > cm->cur_frame->mi_cols) {
713 resize_mv_buffer(cm);
717 static void setup_frame_size(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) {
719 BufferPool *const pool = cm->buffer_pool;
720 vp9_read_frame_size(rb, &width, &height);
721 resize_context_buffers(cm, width, height);
722 setup_display_size(cm, rb);
724 lock_buffer_pool(pool);
725 if (vp9_realloc_frame_buffer(
726 get_frame_new_buffer(cm), cm->width, cm->height,
727 cm->subsampling_x, cm->subsampling_y,
728 #if CONFIG_VP9_HIGHBITDEPTH
729 cm->use_highbitdepth,
731 VP9_DEC_BORDER_IN_PIXELS,
733 &pool->frame_bufs[cm->new_fb_idx].raw_frame_buffer, pool->get_fb_cb,
735 unlock_buffer_pool(pool);
736 vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
737 "Failed to allocate frame buffer");
739 unlock_buffer_pool(pool);
741 pool->frame_bufs[cm->new_fb_idx].buf.subsampling_x = cm->subsampling_x;
742 pool->frame_bufs[cm->new_fb_idx].buf.subsampling_y = cm->subsampling_y;
743 pool->frame_bufs[cm->new_fb_idx].buf.bit_depth = (unsigned int)cm->bit_depth;
744 pool->frame_bufs[cm->new_fb_idx].buf.color_space = cm->color_space;
747 static INLINE int valid_ref_frame_img_fmt(vpx_bit_depth_t ref_bit_depth,
748 int ref_xss, int ref_yss,
749 vpx_bit_depth_t this_bit_depth,
750 int this_xss, int this_yss) {
751 return ref_bit_depth == this_bit_depth && ref_xss == this_xss &&
755 static void setup_frame_size_with_refs(VP9_COMMON *cm,
756 struct vp9_read_bit_buffer *rb) {
759 int has_valid_ref_frame = 0;
760 BufferPool *const pool = cm->buffer_pool;
761 for (i = 0; i < REFS_PER_FRAME; ++i) {
762 if (vp9_rb_read_bit(rb)) {
763 YV12_BUFFER_CONFIG *const buf = cm->frame_refs[i].buf;
764 width = buf->y_crop_width;
765 height = buf->y_crop_height;
772 vp9_read_frame_size(rb, &width, &height);
774 if (width <= 0 || height <= 0)
775 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
776 "Invalid frame size");
778 // Check to make sure at least one of frames that this frame references
779 // has valid dimensions.
780 for (i = 0; i < REFS_PER_FRAME; ++i) {
781 RefBuffer *const ref_frame = &cm->frame_refs[i];
782 has_valid_ref_frame |= valid_ref_frame_size(ref_frame->buf->y_crop_width,
783 ref_frame->buf->y_crop_height,
786 if (!has_valid_ref_frame)
787 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
788 "Referenced frame has invalid size");
789 for (i = 0; i < REFS_PER_FRAME; ++i) {
790 RefBuffer *const ref_frame = &cm->frame_refs[i];
791 if (!valid_ref_frame_img_fmt(
792 ref_frame->buf->bit_depth,
793 ref_frame->buf->subsampling_x,
794 ref_frame->buf->subsampling_y,
798 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
799 "Referenced frame has incompatible color format");
802 resize_context_buffers(cm, width, height);
803 setup_display_size(cm, rb);
805 lock_buffer_pool(pool);
806 if (vp9_realloc_frame_buffer(
807 get_frame_new_buffer(cm), cm->width, cm->height,
808 cm->subsampling_x, cm->subsampling_y,
809 #if CONFIG_VP9_HIGHBITDEPTH
810 cm->use_highbitdepth,
812 VP9_DEC_BORDER_IN_PIXELS,
814 &pool->frame_bufs[cm->new_fb_idx].raw_frame_buffer, pool->get_fb_cb,
816 unlock_buffer_pool(pool);
817 vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
818 "Failed to allocate frame buffer");
820 unlock_buffer_pool(pool);
822 pool->frame_bufs[cm->new_fb_idx].buf.subsampling_x = cm->subsampling_x;
823 pool->frame_bufs[cm->new_fb_idx].buf.subsampling_y = cm->subsampling_y;
824 pool->frame_bufs[cm->new_fb_idx].buf.bit_depth = (unsigned int)cm->bit_depth;
825 pool->frame_bufs[cm->new_fb_idx].buf.color_space = cm->color_space;
828 static void setup_tile_info(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) {
829 int min_log2_tile_cols, max_log2_tile_cols, max_ones;
830 vp9_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
833 max_ones = max_log2_tile_cols - min_log2_tile_cols;
834 cm->log2_tile_cols = min_log2_tile_cols;
835 while (max_ones-- && vp9_rb_read_bit(rb))
836 cm->log2_tile_cols++;
838 if (cm->log2_tile_cols > 6)
839 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
840 "Invalid number of tile columns");
843 cm->log2_tile_rows = vp9_rb_read_bit(rb);
844 if (cm->log2_tile_rows)
845 cm->log2_tile_rows += vp9_rb_read_bit(rb);
848 typedef struct TileBuffer {
851 int col; // only used with multi-threaded decoding
854 // Reads the next tile returning its size and adjusting '*data' accordingly
855 // based on 'is_last'.
856 static void get_tile_buffer(const uint8_t *const data_end,
858 struct vpx_internal_error_info *error_info,
859 const uint8_t **data,
860 vpx_decrypt_cb decrypt_cb, void *decrypt_state,
865 if (!read_is_valid(*data, 4, data_end))
866 vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME,
867 "Truncated packet or corrupt tile length");
871 decrypt_cb(decrypt_state, *data, be_data, 4);
872 size = mem_get_be32(be_data);
874 size = mem_get_be32(*data);
878 if (size > (size_t)(data_end - *data))
879 vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME,
880 "Truncated packet or corrupt tile size");
882 size = data_end - *data;
891 static void get_tile_buffers(VP9Decoder *pbi,
892 const uint8_t *data, const uint8_t *data_end,
893 int tile_cols, int tile_rows,
894 TileBuffer (*tile_buffers)[1 << 6]) {
897 for (r = 0; r < tile_rows; ++r) {
898 for (c = 0; c < tile_cols; ++c) {
899 const int is_last = (r == tile_rows - 1) && (c == tile_cols - 1);
900 TileBuffer *const buf = &tile_buffers[r][c];
902 get_tile_buffer(data_end, is_last, &pbi->common.error, &data,
903 pbi->decrypt_cb, pbi->decrypt_state, buf);
908 static const uint8_t *decode_tiles(VP9Decoder *pbi,
910 const uint8_t *data_end) {
911 VP9_COMMON *const cm = &pbi->common;
912 const VP9WorkerInterface *const winterface = vp9_get_worker_interface();
913 const int aligned_cols = mi_cols_aligned_to_sb(cm->mi_cols);
914 const int tile_cols = 1 << cm->log2_tile_cols;
915 const int tile_rows = 1 << cm->log2_tile_rows;
916 TileBuffer tile_buffers[4][1 << 6];
917 int tile_row, tile_col;
919 TileData *tile_data = NULL;
921 if (cm->lf.filter_level && pbi->lf_worker.data1 == NULL) {
922 CHECK_MEM_ERROR(cm, pbi->lf_worker.data1,
923 vpx_memalign(32, sizeof(LFWorkerData)));
924 pbi->lf_worker.hook = (VP9WorkerHook)vp9_loop_filter_worker;
925 if (pbi->max_threads > 1 && !winterface->reset(&pbi->lf_worker)) {
926 vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
927 "Loop filter thread creation failed");
931 if (cm->lf.filter_level) {
932 LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1;
933 // Be sure to sync as we might be resuming after a failed frame decode.
934 winterface->sync(&pbi->lf_worker);
935 vp9_loop_filter_data_reset(lf_data, get_frame_new_buffer(cm), cm,
939 assert(tile_rows <= 4);
940 assert(tile_cols <= (1 << 6));
942 // Note: this memset assumes above_context[0], [1] and [2]
943 // are allocated as part of the same buffer.
944 vpx_memset(cm->above_context, 0,
945 sizeof(*cm->above_context) * MAX_MB_PLANE * 2 * aligned_cols);
947 vpx_memset(cm->above_seg_context, 0,
948 sizeof(*cm->above_seg_context) * aligned_cols);
950 get_tile_buffers(pbi, data, data_end, tile_cols, tile_rows, tile_buffers);
952 if (pbi->tile_data == NULL ||
953 (tile_cols * tile_rows) != pbi->total_tiles) {
954 vpx_free(pbi->tile_data);
958 vpx_memalign(32, tile_cols * tile_rows * (sizeof(*pbi->tile_data))));
959 pbi->total_tiles = tile_rows * tile_cols;
962 // Load all tile information into tile_data.
963 for (tile_row = 0; tile_row < tile_rows; ++tile_row) {
964 for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
966 const TileBuffer *const buf = &tile_buffers[tile_row][tile_col];
967 tile_data = pbi->tile_data + tile_cols * tile_row + tile_col;
969 tile_data->xd = pbi->mb;
970 tile_data->xd.corrupted = 0;
971 vp9_tile_init(&tile, tile_data->cm, tile_row, tile_col);
972 setup_token_decoder(buf->data, data_end, buf->size, &cm->error,
973 &tile_data->bit_reader, pbi->decrypt_cb,
975 init_macroblockd(cm, &tile_data->xd);
979 for (tile_row = 0; tile_row < tile_rows; ++tile_row) {
981 vp9_tile_set_row(&tile, cm, tile_row);
982 for (mi_row = tile.mi_row_start; mi_row < tile.mi_row_end;
983 mi_row += MI_BLOCK_SIZE) {
984 for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
985 const int col = pbi->inv_tile_order ?
986 tile_cols - tile_col - 1 : tile_col;
987 tile_data = pbi->tile_data + tile_cols * tile_row + col;
988 vp9_tile_set_col(&tile, tile_data->cm, col);
989 vp9_zero(tile_data->xd.left_context);
990 vp9_zero(tile_data->xd.left_seg_context);
991 for (mi_col = tile.mi_col_start; mi_col < tile.mi_col_end;
992 mi_col += MI_BLOCK_SIZE) {
993 decode_partition(pbi, &tile_data->xd, &cm->counts, &tile, mi_row,
994 mi_col, &tile_data->bit_reader, BLOCK_64X64);
996 pbi->mb.corrupted |= tile_data->xd.corrupted;
997 if (pbi->mb.corrupted)
998 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
999 "Failed to decode tile data");
1001 // Loopfilter one row.
1002 if (cm->lf.filter_level) {
1003 const int lf_start = mi_row - MI_BLOCK_SIZE;
1004 LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1;
1006 // delay the loopfilter by 1 macroblock row.
1007 if (lf_start < 0) continue;
1009 // decoding has completed: finish up the loop filter in this thread.
1010 if (mi_row + MI_BLOCK_SIZE >= cm->mi_rows) continue;
1012 winterface->sync(&pbi->lf_worker);
1013 lf_data->start = lf_start;
1014 lf_data->stop = mi_row;
1015 if (pbi->max_threads > 1) {
1016 winterface->launch(&pbi->lf_worker);
1018 winterface->execute(&pbi->lf_worker);
1021 // After loopfiltering, the last 7 row pixels in each superblock row may
1022 // still be changed by the longest loopfilter of the next superblock
1024 if (pbi->frame_parallel_decode)
1025 vp9_frameworker_broadcast(pbi->cur_buf,
1026 mi_row << MI_BLOCK_SIZE_LOG2);
1030 // Loopfilter remaining rows in the frame.
1031 if (cm->lf.filter_level) {
1032 LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1;
1033 winterface->sync(&pbi->lf_worker);
1034 lf_data->start = lf_data->stop;
1035 lf_data->stop = cm->mi_rows;
1036 winterface->execute(&pbi->lf_worker);
1039 // Get last tile data.
1040 tile_data = pbi->tile_data + tile_cols * tile_rows - 1;
1042 if (pbi->frame_parallel_decode)
1043 vp9_frameworker_broadcast(pbi->cur_buf, INT_MAX);
1044 return vp9_reader_find_end(&tile_data->bit_reader);
1047 static int tile_worker_hook(TileWorkerData *const tile_data,
1048 const TileInfo *const tile) {
1051 if (setjmp(tile_data->error_info.jmp)) {
1052 tile_data->error_info.setjmp = 0;
1053 tile_data->xd.corrupted = 1;
1057 tile_data->error_info.setjmp = 1;
1058 tile_data->xd.error_info = &tile_data->error_info;
1060 for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end;
1061 mi_row += MI_BLOCK_SIZE) {
1062 vp9_zero(tile_data->xd.left_context);
1063 vp9_zero(tile_data->xd.left_seg_context);
1064 for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end;
1065 mi_col += MI_BLOCK_SIZE) {
1066 decode_partition(tile_data->pbi, &tile_data->xd, &tile_data->counts,
1067 tile, mi_row, mi_col, &tile_data->bit_reader,
1071 return !tile_data->xd.corrupted;
1074 // sorts in descending order
1075 static int compare_tile_buffers(const void *a, const void *b) {
1076 const TileBuffer *const buf1 = (const TileBuffer*)a;
1077 const TileBuffer *const buf2 = (const TileBuffer*)b;
1078 if (buf1->size < buf2->size) {
1080 } else if (buf1->size == buf2->size) {
1087 static const uint8_t *decode_tiles_mt(VP9Decoder *pbi,
1088 const uint8_t *data,
1089 const uint8_t *data_end) {
1090 VP9_COMMON *const cm = &pbi->common;
1091 const VP9WorkerInterface *const winterface = vp9_get_worker_interface();
1092 const uint8_t *bit_reader_end = NULL;
1093 const int aligned_mi_cols = mi_cols_aligned_to_sb(cm->mi_cols);
1094 const int tile_cols = 1 << cm->log2_tile_cols;
1095 const int tile_rows = 1 << cm->log2_tile_rows;
1096 const int num_workers = MIN(pbi->max_threads & ~1, tile_cols);
1097 TileBuffer tile_buffers[1][1 << 6];
1099 int final_worker = -1;
1101 assert(tile_cols <= (1 << 6));
1102 assert(tile_rows == 1);
1105 // TODO(jzern): See if we can remove the restriction of passing in max
1106 // threads to the decoder.
1107 if (pbi->num_tile_workers == 0) {
1108 const int num_threads = pbi->max_threads & ~1;
1110 // TODO(jzern): Allocate one less worker, as in the current code we only
1111 // use num_threads - 1 workers.
1112 CHECK_MEM_ERROR(cm, pbi->tile_workers,
1113 vpx_malloc(num_threads * sizeof(*pbi->tile_workers)));
1114 // Ensure tile data offsets will be properly aligned. This may fail on
1115 // platforms without DECLARE_ALIGNED().
1116 assert((sizeof(*pbi->tile_worker_data) % 16) == 0);
1117 CHECK_MEM_ERROR(cm, pbi->tile_worker_data,
1118 vpx_memalign(32, num_threads *
1119 sizeof(*pbi->tile_worker_data)));
1120 CHECK_MEM_ERROR(cm, pbi->tile_worker_info,
1121 vpx_malloc(num_threads * sizeof(*pbi->tile_worker_info)));
1122 for (i = 0; i < num_threads; ++i) {
1123 VP9Worker *const worker = &pbi->tile_workers[i];
1124 ++pbi->num_tile_workers;
1126 winterface->init(worker);
1127 if (i < num_threads - 1 && !winterface->reset(worker)) {
1128 vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
1129 "Tile decoder thread creation failed");
1134 // Reset tile decoding hook
1135 for (n = 0; n < num_workers; ++n) {
1136 VP9Worker *const worker = &pbi->tile_workers[n];
1137 winterface->sync(worker);
1138 worker->hook = (VP9WorkerHook)tile_worker_hook;
1139 worker->data1 = &pbi->tile_worker_data[n];
1140 worker->data2 = &pbi->tile_worker_info[n];
1143 // Note: this memset assumes above_context[0], [1] and [2]
1144 // are allocated as part of the same buffer.
1145 vpx_memset(cm->above_context, 0,
1146 sizeof(*cm->above_context) * MAX_MB_PLANE * 2 * aligned_mi_cols);
1147 vpx_memset(cm->above_seg_context, 0,
1148 sizeof(*cm->above_seg_context) * aligned_mi_cols);
1150 // Load tile data into tile_buffers
1151 get_tile_buffers(pbi, data, data_end, tile_cols, tile_rows, tile_buffers);
1153 // Sort the buffers based on size in descending order.
1154 qsort(tile_buffers[0], tile_cols, sizeof(tile_buffers[0][0]),
1155 compare_tile_buffers);
1157 // Rearrange the tile buffers such that per-tile group the largest, and
1158 // presumably the most difficult, tile will be decoded in the main thread.
1159 // This should help minimize the number of instances where the main thread is
1160 // waiting for a worker to complete.
1162 int group_start = 0;
1163 while (group_start < tile_cols) {
1164 const TileBuffer largest = tile_buffers[0][group_start];
1165 const int group_end = MIN(group_start + num_workers, tile_cols) - 1;
1166 memmove(tile_buffers[0] + group_start, tile_buffers[0] + group_start + 1,
1167 (group_end - group_start) * sizeof(tile_buffers[0][0]));
1168 tile_buffers[0][group_end] = largest;
1169 group_start = group_end + 1;
1173 // Initialize thread frame counts.
1174 if (!cm->frame_parallel_decoding_mode) {
1177 for (i = 0; i < num_workers; ++i) {
1178 TileWorkerData *const tile_data =
1179 (TileWorkerData*)pbi->tile_workers[i].data1;
1180 vp9_zero(tile_data->counts);
1185 while (n < tile_cols) {
1187 for (i = 0; i < num_workers && n < tile_cols; ++i) {
1188 VP9Worker *const worker = &pbi->tile_workers[i];
1189 TileWorkerData *const tile_data = (TileWorkerData*)worker->data1;
1190 TileInfo *const tile = (TileInfo*)worker->data2;
1191 TileBuffer *const buf = &tile_buffers[0][n];
1193 tile_data->pbi = pbi;
1194 tile_data->xd = pbi->mb;
1195 tile_data->xd.corrupted = 0;
1196 vp9_tile_init(tile, cm, 0, buf->col);
1197 setup_token_decoder(buf->data, data_end, buf->size, &cm->error,
1198 &tile_data->bit_reader, pbi->decrypt_cb,
1199 pbi->decrypt_state);
1200 init_macroblockd(cm, &tile_data->xd);
1202 worker->had_error = 0;
1203 if (i == num_workers - 1 || n == tile_cols - 1) {
1204 winterface->execute(worker);
1206 winterface->launch(worker);
1209 if (buf->col == tile_cols - 1) {
1216 for (; i > 0; --i) {
1217 VP9Worker *const worker = &pbi->tile_workers[i - 1];
1218 // TODO(jzern): The tile may have specific error data associated with
1219 // its vpx_internal_error_info which could be propagated to the main info
1220 // in cm. Additionally once the threads have been synced and an error is
1221 // detected, there's no point in continuing to decode tiles.
1222 pbi->mb.corrupted |= !winterface->sync(worker);
1224 if (final_worker > -1) {
1225 TileWorkerData *const tile_data =
1226 (TileWorkerData*)pbi->tile_workers[final_worker].data1;
1227 bit_reader_end = vp9_reader_find_end(&tile_data->bit_reader);
1231 // Accumulate thread frame counts.
1232 if (n >= tile_cols && !cm->frame_parallel_decoding_mode) {
1233 for (i = 0; i < num_workers; ++i) {
1234 TileWorkerData *const tile_data =
1235 (TileWorkerData*)pbi->tile_workers[i].data1;
1236 vp9_accumulate_frame_counts(cm, &tile_data->counts, 1);
1241 return bit_reader_end;
1244 static void error_handler(void *data) {
1245 VP9_COMMON *const cm = (VP9_COMMON *)data;
1246 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, "Truncated packet");
1249 int vp9_read_sync_code(struct vp9_read_bit_buffer *const rb) {
1250 return vp9_rb_read_literal(rb, 8) == VP9_SYNC_CODE_0 &&
1251 vp9_rb_read_literal(rb, 8) == VP9_SYNC_CODE_1 &&
1252 vp9_rb_read_literal(rb, 8) == VP9_SYNC_CODE_2;
1255 BITSTREAM_PROFILE vp9_read_profile(struct vp9_read_bit_buffer *rb) {
1256 int profile = vp9_rb_read_bit(rb);
1257 profile |= vp9_rb_read_bit(rb) << 1;
1259 profile += vp9_rb_read_bit(rb);
1260 return (BITSTREAM_PROFILE) profile;
1263 static void read_bitdepth_colorspace_sampling(
1264 VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) {
1265 if (cm->profile >= PROFILE_2) {
1266 cm->bit_depth = vp9_rb_read_bit(rb) ? VPX_BITS_12 : VPX_BITS_10;
1267 #if CONFIG_VP9_HIGHBITDEPTH
1268 cm->use_highbitdepth = 1;
1271 cm->bit_depth = VPX_BITS_8;
1272 #if CONFIG_VP9_HIGHBITDEPTH
1273 cm->use_highbitdepth = 0;
1276 cm->color_space = vp9_rb_read_literal(rb, 3);
1277 if (cm->color_space != VPX_CS_SRGB) {
1278 vp9_rb_read_bit(rb); // [16,235] (including xvycc) vs [0,255] range
1279 if (cm->profile == PROFILE_1 || cm->profile == PROFILE_3) {
1280 cm->subsampling_x = vp9_rb_read_bit(rb);
1281 cm->subsampling_y = vp9_rb_read_bit(rb);
1282 if (cm->subsampling_x == 1 && cm->subsampling_y == 1)
1283 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
1284 "4:2:0 color not supported in profile 1 or 3");
1285 if (vp9_rb_read_bit(rb))
1286 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
1287 "Reserved bit set");
1289 cm->subsampling_y = cm->subsampling_x = 1;
1292 if (cm->profile == PROFILE_1 || cm->profile == PROFILE_3) {
1293 // Note if colorspace is SRGB then 4:4:4 chroma sampling is assumed.
1294 // 4:2:2 or 4:4:0 chroma sampling is not allowed.
1295 cm->subsampling_y = cm->subsampling_x = 0;
1296 if (vp9_rb_read_bit(rb))
1297 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
1298 "Reserved bit set");
1300 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
1301 "4:4:4 color not supported in profile 0 or 2");
1306 static size_t read_uncompressed_header(VP9Decoder *pbi,
1307 struct vp9_read_bit_buffer *rb) {
1308 VP9_COMMON *const cm = &pbi->common;
1309 RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
1310 BufferPool *const pool = pbi->common.buffer_pool;
1311 int i, mask, ref_index = 0;
1314 cm->last_frame_type = cm->frame_type;
1316 if (vp9_rb_read_literal(rb, 2) != VP9_FRAME_MARKER)
1317 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
1318 "Invalid frame marker");
1320 cm->profile = vp9_read_profile(rb);
1322 if (cm->profile >= MAX_PROFILES)
1323 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
1324 "Unsupported bitstream profile");
1326 cm->show_existing_frame = vp9_rb_read_bit(rb);
1327 if (cm->show_existing_frame) {
1328 // Show an existing frame directly.
1329 const int frame_to_show = cm->ref_frame_map[vp9_rb_read_literal(rb, 3)];
1330 lock_buffer_pool(pool);
1331 if (frame_to_show < 0 || frame_bufs[frame_to_show].ref_count < 1) {
1332 unlock_buffer_pool(pool);
1333 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
1334 "Buffer %d does not contain a decoded frame",
1338 ref_cnt_fb(frame_bufs, &cm->new_fb_idx, frame_to_show);
1339 unlock_buffer_pool(pool);
1340 pbi->refresh_frame_flags = 0;
1341 cm->lf.filter_level = 0;
1344 if (pbi->frame_parallel_decode) {
1345 for (i = 0; i < REF_FRAMES; ++i)
1346 cm->next_ref_frame_map[i] = cm->ref_frame_map[i];
1351 cm->frame_type = (FRAME_TYPE) vp9_rb_read_bit(rb);
1352 cm->show_frame = vp9_rb_read_bit(rb);
1353 cm->error_resilient_mode = vp9_rb_read_bit(rb);
1355 if (cm->frame_type == KEY_FRAME) {
1356 if (!vp9_read_sync_code(rb))
1357 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
1358 "Invalid frame sync code");
1360 read_bitdepth_colorspace_sampling(cm, rb);
1361 pbi->refresh_frame_flags = (1 << REF_FRAMES) - 1;
1363 for (i = 0; i < REFS_PER_FRAME; ++i) {
1364 cm->frame_refs[i].idx = INVALID_IDX;
1365 cm->frame_refs[i].buf = NULL;
1368 setup_frame_size(cm, rb);
1369 if (pbi->need_resync) {
1370 vpx_memset(&cm->ref_frame_map, -1, sizeof(cm->ref_frame_map));
1371 pbi->need_resync = 0;
1374 cm->intra_only = cm->show_frame ? 0 : vp9_rb_read_bit(rb);
1376 cm->reset_frame_context = cm->error_resilient_mode ?
1377 0 : vp9_rb_read_literal(rb, 2);
1379 if (cm->intra_only) {
1380 if (!vp9_read_sync_code(rb))
1381 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
1382 "Invalid frame sync code");
1383 if (cm->profile > PROFILE_0) {
1384 read_bitdepth_colorspace_sampling(cm, rb);
1386 // NOTE: The intra-only frame header does not include the specification
1387 // of either the color format or color sub-sampling in profile 0. VP9
1388 // specifies that the default color format should be YUV 4:2:0 in this
1389 // case (normative).
1390 cm->color_space = VPX_CS_BT_601;
1391 cm->subsampling_y = cm->subsampling_x = 1;
1392 cm->bit_depth = VPX_BITS_8;
1393 #if CONFIG_VP9_HIGHBITDEPTH
1394 cm->use_highbitdepth = 0;
1398 pbi->refresh_frame_flags = vp9_rb_read_literal(rb, REF_FRAMES);
1399 setup_frame_size(cm, rb);
1400 if (pbi->need_resync) {
1401 vpx_memset(&cm->ref_frame_map, -1, sizeof(cm->ref_frame_map));
1402 pbi->need_resync = 0;
1404 } else if (pbi->need_resync != 1) { /* Skip if need resync */
1405 pbi->refresh_frame_flags = vp9_rb_read_literal(rb, REF_FRAMES);
1406 for (i = 0; i < REFS_PER_FRAME; ++i) {
1407 const int ref = vp9_rb_read_literal(rb, REF_FRAMES_LOG2);
1408 const int idx = cm->ref_frame_map[ref];
1409 RefBuffer *const ref_frame = &cm->frame_refs[i];
1410 ref_frame->idx = idx;
1411 ref_frame->buf = &frame_bufs[idx].buf;
1412 cm->ref_frame_sign_bias[LAST_FRAME + i] = vp9_rb_read_bit(rb);
1415 setup_frame_size_with_refs(cm, rb);
1417 cm->allow_high_precision_mv = vp9_rb_read_bit(rb);
1418 cm->interp_filter = read_interp_filter(rb);
1420 for (i = 0; i < REFS_PER_FRAME; ++i) {
1421 RefBuffer *const ref_buf = &cm->frame_refs[i];
1422 #if CONFIG_VP9_HIGHBITDEPTH
1423 vp9_setup_scale_factors_for_frame(&ref_buf->sf,
1424 ref_buf->buf->y_crop_width,
1425 ref_buf->buf->y_crop_height,
1426 cm->width, cm->height,
1427 cm->use_highbitdepth);
1429 vp9_setup_scale_factors_for_frame(&ref_buf->sf,
1430 ref_buf->buf->y_crop_width,
1431 ref_buf->buf->y_crop_height,
1432 cm->width, cm->height);
1437 #if CONFIG_VP9_HIGHBITDEPTH
1438 get_frame_new_buffer(cm)->bit_depth = cm->bit_depth;
1440 get_frame_new_buffer(cm)->color_space = cm->color_space;
1442 if (pbi->need_resync) {
1443 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
1444 "Keyframe / intra-only frame required to reset decoder"
1448 if (!cm->error_resilient_mode) {
1449 cm->refresh_frame_context = vp9_rb_read_bit(rb);
1450 cm->frame_parallel_decoding_mode = vp9_rb_read_bit(rb);
1452 cm->refresh_frame_context = 0;
1453 cm->frame_parallel_decoding_mode = 1;
1456 // This flag will be overridden by the call to vp9_setup_past_independence
1457 // below, forcing the use of context 0 for those frame types.
1458 cm->frame_context_idx = vp9_rb_read_literal(rb, FRAME_CONTEXTS_LOG2);
1460 // Generate next_ref_frame_map.
1461 lock_buffer_pool(pool);
1462 for (mask = pbi->refresh_frame_flags; mask; mask >>= 1) {
1464 cm->next_ref_frame_map[ref_index] = cm->new_fb_idx;
1465 ++frame_bufs[cm->new_fb_idx].ref_count;
1467 cm->next_ref_frame_map[ref_index] = cm->ref_frame_map[ref_index];
1469 // Current thread holds the reference frame.
1470 if (cm->ref_frame_map[ref_index] >= 0)
1471 ++frame_bufs[cm->ref_frame_map[ref_index]].ref_count;
1475 for (; ref_index < REF_FRAMES; ++ref_index) {
1476 cm->next_ref_frame_map[ref_index] = cm->ref_frame_map[ref_index];
1477 // Current thread holds the reference frame.
1478 if (cm->ref_frame_map[ref_index] >= 0)
1479 ++frame_bufs[cm->ref_frame_map[ref_index]].ref_count;
1481 unlock_buffer_pool(pool);
1482 pbi->hold_ref_buf = 1;
1484 if (frame_is_intra_only(cm) || cm->error_resilient_mode)
1485 vp9_setup_past_independence(cm);
1487 setup_loopfilter(&cm->lf, rb);
1488 setup_quantization(cm, &pbi->mb, rb);
1489 setup_segmentation(&cm->seg, rb);
1491 setup_tile_info(cm, rb);
1492 sz = vp9_rb_read_literal(rb, 16);
1495 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
1496 "Invalid header size");
1501 static int read_compressed_header(VP9Decoder *pbi, const uint8_t *data,
1502 size_t partition_size) {
1503 VP9_COMMON *const cm = &pbi->common;
1504 MACROBLOCKD *const xd = &pbi->mb;
1505 FRAME_CONTEXT *const fc = cm->fc;
1509 if (vp9_reader_init(&r, data, partition_size, pbi->decrypt_cb,
1510 pbi->decrypt_state))
1511 vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
1512 "Failed to allocate bool decoder 0");
1514 cm->tx_mode = xd->lossless ? ONLY_4X4 : read_tx_mode(&r);
1515 if (cm->tx_mode == TX_MODE_SELECT)
1516 read_tx_mode_probs(&fc->tx_probs, &r);
1517 read_coef_probs(fc, cm->tx_mode, &r);
1519 for (k = 0; k < SKIP_CONTEXTS; ++k)
1520 vp9_diff_update_prob(&r, &fc->skip_probs[k]);
1522 if (!frame_is_intra_only(cm)) {
1523 nmv_context *const nmvc = &fc->nmvc;
1526 read_inter_mode_probs(fc, &r);
1528 if (cm->interp_filter == SWITCHABLE)
1529 read_switchable_interp_probs(fc, &r);
1531 for (i = 0; i < INTRA_INTER_CONTEXTS; i++)
1532 vp9_diff_update_prob(&r, &fc->intra_inter_prob[i]);
1534 cm->reference_mode = read_frame_reference_mode(cm, &r);
1535 if (cm->reference_mode != SINGLE_REFERENCE)
1536 setup_compound_reference_mode(cm);
1537 read_frame_reference_mode_probs(cm, &r);
1539 for (j = 0; j < BLOCK_SIZE_GROUPS; j++)
1540 for (i = 0; i < INTRA_MODES - 1; ++i)
1541 vp9_diff_update_prob(&r, &fc->y_mode_prob[j][i]);
1543 for (j = 0; j < PARTITION_CONTEXTS; ++j)
1544 for (i = 0; i < PARTITION_TYPES - 1; ++i)
1545 vp9_diff_update_prob(&r, &fc->partition_prob[j][i]);
1547 read_mv_probs(nmvc, cm->allow_high_precision_mv, &r);
1550 return vp9_reader_has_error(&r);
1554 #define debug_check_frame_counts(cm) (void)0
1556 // Counts should only be incremented when frame_parallel_decoding_mode and
1557 // error_resilient_mode are disabled.
1558 static void debug_check_frame_counts(const VP9_COMMON *const cm) {
1559 FRAME_COUNTS zero_counts;
1560 vp9_zero(zero_counts);
1561 assert(cm->frame_parallel_decoding_mode || cm->error_resilient_mode);
1562 assert(!memcmp(cm->counts.y_mode, zero_counts.y_mode,
1563 sizeof(cm->counts.y_mode)));
1564 assert(!memcmp(cm->counts.uv_mode, zero_counts.uv_mode,
1565 sizeof(cm->counts.uv_mode)));
1566 assert(!memcmp(cm->counts.partition, zero_counts.partition,
1567 sizeof(cm->counts.partition)));
1568 assert(!memcmp(cm->counts.coef, zero_counts.coef,
1569 sizeof(cm->counts.coef)));
1570 assert(!memcmp(cm->counts.eob_branch, zero_counts.eob_branch,
1571 sizeof(cm->counts.eob_branch)));
1572 assert(!memcmp(cm->counts.switchable_interp, zero_counts.switchable_interp,
1573 sizeof(cm->counts.switchable_interp)));
1574 assert(!memcmp(cm->counts.inter_mode, zero_counts.inter_mode,
1575 sizeof(cm->counts.inter_mode)));
1576 assert(!memcmp(cm->counts.intra_inter, zero_counts.intra_inter,
1577 sizeof(cm->counts.intra_inter)));
1578 assert(!memcmp(cm->counts.comp_inter, zero_counts.comp_inter,
1579 sizeof(cm->counts.comp_inter)));
1580 assert(!memcmp(cm->counts.single_ref, zero_counts.single_ref,
1581 sizeof(cm->counts.single_ref)));
1582 assert(!memcmp(cm->counts.comp_ref, zero_counts.comp_ref,
1583 sizeof(cm->counts.comp_ref)));
1584 assert(!memcmp(&cm->counts.tx, &zero_counts.tx, sizeof(cm->counts.tx)));
1585 assert(!memcmp(cm->counts.skip, zero_counts.skip, sizeof(cm->counts.skip)));
1586 assert(!memcmp(&cm->counts.mv, &zero_counts.mv, sizeof(cm->counts.mv)));
1590 static struct vp9_read_bit_buffer* init_read_bit_buffer(
1592 struct vp9_read_bit_buffer *rb,
1593 const uint8_t *data,
1594 const uint8_t *data_end,
1595 uint8_t *clear_data /* buffer size MAX_VP9_HEADER_SIZE */) {
1597 rb->error_handler = error_handler;
1598 rb->error_handler_data = &pbi->common;
1599 if (pbi->decrypt_cb) {
1600 const int n = (int)MIN(MAX_VP9_HEADER_SIZE, data_end - data);
1601 pbi->decrypt_cb(pbi->decrypt_state, data, clear_data, n);
1602 rb->bit_buffer = clear_data;
1603 rb->bit_buffer_end = clear_data + n;
1605 rb->bit_buffer = data;
1606 rb->bit_buffer_end = data_end;
1611 void vp9_decode_frame(VP9Decoder *pbi,
1612 const uint8_t *data, const uint8_t *data_end,
1613 const uint8_t **p_data_end) {
1614 VP9_COMMON *const cm = &pbi->common;
1615 MACROBLOCKD *const xd = &pbi->mb;
1616 struct vp9_read_bit_buffer rb = { NULL, NULL, 0, NULL, 0};
1617 int context_updated = 0;
1618 uint8_t clear_data[MAX_VP9_HEADER_SIZE];
1619 const size_t first_partition_size = read_uncompressed_header(pbi,
1620 init_read_bit_buffer(pbi, &rb, data, data_end, clear_data));
1621 const int tile_rows = 1 << cm->log2_tile_rows;
1622 const int tile_cols = 1 << cm->log2_tile_cols;
1623 YV12_BUFFER_CONFIG *const new_fb = get_frame_new_buffer(cm);
1624 xd->cur_buf = new_fb;
1626 if (!first_partition_size) {
1627 // showing a frame directly
1628 *p_data_end = data + (cm->profile <= PROFILE_2 ? 1 : 2);
1632 data += vp9_rb_bytes_read(&rb);
1633 if (!read_is_valid(data, first_partition_size, data_end))
1634 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
1635 "Truncated packet or corrupt header length");
1637 cm->use_prev_frame_mvs = !cm->error_resilient_mode &&
1638 cm->width == cm->last_width &&
1639 cm->height == cm->last_height &&
1641 cm->last_show_frame;
1643 vp9_setup_block_planes(xd, cm->subsampling_x, cm->subsampling_y);
1645 *cm->fc = cm->frame_contexts[cm->frame_context_idx];
1646 if (!cm->fc->initialized)
1647 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
1648 "Uninitialized entropy context.");
1650 vp9_zero(cm->counts);
1653 new_fb->corrupted = read_compressed_header(pbi, data, first_partition_size);
1654 if (new_fb->corrupted)
1655 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
1656 "Decode failed. Frame data header is corrupted.");
1658 if (cm->lf.filter_level) {
1659 vp9_loop_filter_frame_init(cm, cm->lf.filter_level);
1662 // If encoded in frame parallel mode, frame context is ready after decoding
1663 // the frame header.
1664 if (pbi->frame_parallel_decode && cm->frame_parallel_decoding_mode) {
1665 VP9Worker *const worker = pbi->frame_worker_owner;
1666 FrameWorkerData *const frame_worker_data = worker->data1;
1667 if (cm->refresh_frame_context) {
1668 context_updated = 1;
1669 cm->frame_contexts[cm->frame_context_idx] = *cm->fc;
1671 vp9_frameworker_lock_stats(worker);
1672 pbi->cur_buf->row = -1;
1673 pbi->cur_buf->col = -1;
1674 frame_worker_data->frame_context_ready = 1;
1675 // Signal the main thread that context is ready.
1676 vp9_frameworker_signal_stats(worker);
1677 vp9_frameworker_unlock_stats(worker);
1680 if (pbi->max_threads > 1 && tile_rows == 1 && tile_cols > 1) {
1681 // Multi-threaded tile decoder
1682 *p_data_end = decode_tiles_mt(pbi, data + first_partition_size, data_end);
1683 if (!xd->corrupted) {
1684 // If multiple threads are used to decode tiles, then we use those threads
1685 // to do parallel loopfiltering.
1686 vp9_loop_filter_frame_mt(new_fb, cm, pbi->mb.plane, cm->lf.filter_level,
1687 0, 0, pbi->tile_workers, pbi->num_tile_workers,
1690 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
1691 "Decode failed. Frame data is corrupted.");
1695 *p_data_end = decode_tiles(pbi, data + first_partition_size, data_end);
1698 if (!xd->corrupted) {
1699 if (!cm->error_resilient_mode && !cm->frame_parallel_decoding_mode) {
1700 vp9_adapt_coef_probs(cm);
1702 if (!frame_is_intra_only(cm)) {
1703 vp9_adapt_mode_probs(cm);
1704 vp9_adapt_mv_probs(cm, cm->allow_high_precision_mv);
1707 debug_check_frame_counts(cm);
1710 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
1711 "Decode failed. Frame data is corrupted.");
1714 // Non frame parallel update frame context here.
1715 if (cm->refresh_frame_context && !context_updated)
1716 cm->frame_contexts[cm->frame_context_idx] = *cm->fc;
1719 static void build_mc_border(const uint8_t *src, int src_stride,
1720 uint8_t *dst, int dst_stride,
1721 int x, int y, int b_w, int b_h, int w, int h) {
1722 // Get a pointer to the start of the real data for this row.
1723 const uint8_t *ref_row = src - x - y * src_stride;
1726 ref_row += (h - 1) * src_stride;
1728 ref_row += y * src_stride;
1731 int right = 0, copy;
1732 int left = x < 0 ? -x : 0;
1738 right = x + b_w - w;
1743 copy = b_w - left - right;
1746 memset(dst, ref_row[0], left);
1749 memcpy(dst + left, ref_row + x + left, copy);
1752 memset(dst + left + copy, ref_row[w - 1], right);
1758 ref_row += src_stride;
1762 #if CONFIG_VP9_HIGHBITDEPTH
1763 static void high_build_mc_border(const uint8_t *src8, int src_stride,
1764 uint16_t *dst, int dst_stride,
1765 int x, int y, int b_w, int b_h,
1767 // Get a pointer to the start of the real data for this row.
1768 const uint16_t *src = CONVERT_TO_SHORTPTR(src8);
1769 const uint16_t *ref_row = src - x - y * src_stride;
1772 ref_row += (h - 1) * src_stride;
1774 ref_row += y * src_stride;
1777 int right = 0, copy;
1778 int left = x < 0 ? -x : 0;
1784 right = x + b_w - w;
1789 copy = b_w - left - right;
1792 vpx_memset16(dst, ref_row[0], left);
1795 memcpy(dst + left, ref_row + x + left, copy * sizeof(uint16_t));
1798 vpx_memset16(dst + left + copy, ref_row[w - 1], right);
1804 ref_row += src_stride;
1807 #endif // CONFIG_VP9_HIGHBITDEPTH
1809 void dec_build_inter_predictors(VP9Decoder *const pbi, MACROBLOCKD *xd,
1810 int plane, int block, int bw, int bh, int x,
1811 int y, int w, int h, int mi_x, int mi_y) {
1812 struct macroblockd_plane *const pd = &xd->plane[plane];
1813 const MODE_INFO *mi = xd->mi[0].src_mi;
1814 const int is_compound = has_second_ref(&mi->mbmi);
1815 const InterpKernel *kernel = vp9_get_interp_kernel(mi->mbmi.interp_filter);
1818 for (ref = 0; ref < 1 + is_compound; ++ref) {
1819 const struct scale_factors *const sf = &xd->block_refs[ref]->sf;
1820 struct buf_2d *const pre_buf = &pd->pre[ref];
1821 struct buf_2d *const dst_buf = &pd->dst;
1822 uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x;
1823 const MV mv = mi->mbmi.sb_type < BLOCK_8X8
1824 ? average_split_mvs(pd, mi, ref, block)
1825 : mi->mbmi.mv[ref].as_mv;
1827 const MV mv_q4 = clamp_mv_to_umv_border_sb(xd, &mv, bw, bh,
1832 int xs, ys, x0, y0, x0_16, y0_16, y1, frame_width, frame_height,
1833 buf_stride, subpel_x, subpel_y;
1834 uint8_t *ref_frame, *buf_ptr;
1835 const int idx = xd->block_refs[ref]->idx;
1836 BufferPool *const pool = pbi->common.buffer_pool;
1837 RefCntBuffer *const ref_frame_buf = &pool->frame_bufs[idx];
1838 const int is_scaled = vp9_is_scaled(sf);
1840 // Get reference frame pointer, width and height.
1842 frame_width = ref_frame_buf->buf.y_crop_width;
1843 frame_height = ref_frame_buf->buf.y_crop_height;
1844 ref_frame = ref_frame_buf->buf.y_buffer;
1846 frame_width = ref_frame_buf->buf.uv_crop_width;
1847 frame_height = ref_frame_buf->buf.uv_crop_height;
1848 ref_frame = plane == 1 ? ref_frame_buf->buf.u_buffer
1849 : ref_frame_buf->buf.v_buffer;
1853 // Co-ordinate of containing block to pixel precision.
1854 int x_start = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x));
1855 int y_start = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y));
1857 // Co-ordinate of the block to 1/16th pixel precision.
1858 x0_16 = (x_start + x) << SUBPEL_BITS;
1859 y0_16 = (y_start + y) << SUBPEL_BITS;
1861 // Co-ordinate of current block in reference frame
1862 // to 1/16th pixel precision.
1863 x0_16 = sf->scale_value_x(x0_16, sf);
1864 y0_16 = sf->scale_value_y(y0_16, sf);
1866 // Map the top left corner of the block into the reference frame.
1867 x0 = sf->scale_value_x(x_start + x, sf);
1868 y0 = sf->scale_value_y(y_start + y, sf);
1870 // Scale the MV and incorporate the sub-pixel offset of the block
1871 // in the reference frame.
1872 scaled_mv = vp9_scale_mv(&mv_q4, mi_x + x, mi_y + y, sf);
1876 // Co-ordinate of containing block to pixel precision.
1877 x0 = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x)) + x;
1878 y0 = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y)) + y;
1880 // Co-ordinate of the block to 1/16th pixel precision.
1881 x0_16 = x0 << SUBPEL_BITS;
1882 y0_16 = y0 << SUBPEL_BITS;
1884 scaled_mv.row = mv_q4.row;
1885 scaled_mv.col = mv_q4.col;
1888 subpel_x = scaled_mv.col & SUBPEL_MASK;
1889 subpel_y = scaled_mv.row & SUBPEL_MASK;
1891 // Calculate the top left corner of the best matching block in the
1893 x0 += scaled_mv.col >> SUBPEL_BITS;
1894 y0 += scaled_mv.row >> SUBPEL_BITS;
1895 x0_16 += scaled_mv.col;
1896 y0_16 += scaled_mv.row;
1898 // Get reference block pointer.
1899 buf_ptr = ref_frame + y0 * pre_buf->stride + x0;
1900 buf_stride = pre_buf->stride;
1902 // Get reference block bottom right vertical coordinate.
1903 y1 = ((y0_16 + (h - 1) * ys) >> SUBPEL_BITS) + 1;
1905 // Do border extension if there is motion or the
1906 // width/height is not a multiple of 8 pixels.
1907 if (is_scaled || scaled_mv.col || scaled_mv.row ||
1908 (frame_width & 0x7) || (frame_height & 0x7)) {
1909 // Get reference block bottom right horizontal coordinate.
1910 int x1 = ((x0_16 + (w - 1) * xs) >> SUBPEL_BITS) + 1;
1911 int x_pad = 0, y_pad = 0;
1913 if (subpel_x || (sf->x_step_q4 != SUBPEL_SHIFTS)) {
1914 x0 -= VP9_INTERP_EXTEND - 1;
1915 x1 += VP9_INTERP_EXTEND;
1919 if (subpel_y || (sf->y_step_q4 != SUBPEL_SHIFTS)) {
1920 y0 -= VP9_INTERP_EXTEND - 1;
1921 y1 += VP9_INTERP_EXTEND;
1925 // Wait until reference block is ready. Pad 7 more pixels as last 7
1926 // pixels of each superblock row can be changed by next superblock row.
1927 if (pbi->frame_parallel_decode)
1928 vp9_frameworker_wait(pbi->frame_worker_owner, ref_frame_buf,
1929 MAX(0, (y1 + 7) << (plane == 0 ? 0 : 1)));
1931 // Skip border extension if block is inside the frame.
1932 if (x0 < 0 || x0 > frame_width - 1 || x1 < 0 || x1 > frame_width - 1 ||
1933 y0 < 0 || y0 > frame_height - 1 || y1 < 0 || y1 > frame_height - 1) {
1934 uint8_t *buf_ptr1 = ref_frame + y0 * pre_buf->stride + x0;
1935 // Extend the border.
1936 #if CONFIG_VP9_HIGHBITDEPTH
1937 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
1938 high_build_mc_border(buf_ptr1,
1948 buf_stride = x1 - x0 + 1;
1949 buf_ptr = CONVERT_TO_BYTEPTR(xd->mc_buf_high) +
1950 y_pad * 3 * buf_stride + x_pad * 3;
1952 build_mc_border(buf_ptr1,
1962 buf_stride = x1 - x0 + 1;
1963 buf_ptr = xd->mc_buf + y_pad * 3 * buf_stride + x_pad * 3;
1966 build_mc_border(buf_ptr1,
1976 buf_stride = x1 - x0 + 1;
1977 buf_ptr = xd->mc_buf + y_pad * 3 * buf_stride + x_pad * 3;
1978 #endif // CONFIG_VP9_HIGHBITDEPTH
1981 // Wait until reference block is ready. Pad 7 more pixels as last 7
1982 // pixels of each superblock row can be changed by next superblock row.
1983 if (pbi->frame_parallel_decode)
1984 vp9_frameworker_wait(pbi->frame_worker_owner, ref_frame_buf,
1985 MAX(0, (y1 + 7) << (plane == 0 ? 0 : 1)));
1987 #if CONFIG_VP9_HIGHBITDEPTH
1988 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
1989 high_inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x,
1990 subpel_y, sf, w, h, ref, kernel, xs, ys, xd->bd);
1992 inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x,
1993 subpel_y, sf, w, h, ref, kernel, xs, ys);
1996 inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x,
1997 subpel_y, sf, w, h, ref, kernel, xs, ys);
1998 #endif // CONFIG_VP9_HIGHBITDEPTH
2002 void vp9_dec_build_inter_predictors_sb(VP9Decoder *const pbi, MACROBLOCKD *xd,
2003 int mi_row, int mi_col,
2006 const int mi_x = mi_col * MI_SIZE;
2007 const int mi_y = mi_row * MI_SIZE;
2008 for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
2009 const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize,
2011 const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize];
2012 const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize];
2013 const int bw = 4 * num_4x4_w;
2014 const int bh = 4 * num_4x4_h;
2016 if (xd->mi[0].src_mi->mbmi.sb_type < BLOCK_8X8) {
2018 assert(bsize == BLOCK_8X8);
2019 for (y = 0; y < num_4x4_h; ++y)
2020 for (x = 0; x < num_4x4_w; ++x)
2021 dec_build_inter_predictors(pbi, xd, plane, i++, bw, bh,
2022 4 * x, 4 * y, 4, 4, mi_x, mi_y);
2024 dec_build_inter_predictors(pbi, xd, plane, 0, bw, bh,
2025 0, 0, bw, bh, mi_x, mi_y);