spatial_svc
fp_mb_stats
emulate_hardware
+ var_tx
+ ext_tx
misc_fixes
- ext_intra
+ universal_hp
- ext_ipred_bltr
"
CONFIG_LIST="
dependency_tracking
const MODE_INFO *const mi = xd->mi[0];
const MB_MODE_INFO *const mbmi = &mi->mbmi;
- if (plane_type != PLANE_TYPE_Y || xd->lossless[mbmi->segment_id] ||
- is_inter_block(mbmi) || mbmi->tx_size >= TX_32X32)
+#if CONFIG_EXT_TX
+ if (xd->lossless || tx_size >= TX_32X32)
return DCT_DCT;
+ if (mbmi->sb_type >= BLOCK_8X8) {
+ if (plane_type == PLANE_TYPE_Y || is_inter_block(mbmi))
+ return mbmi->tx_type;
+ }
- if (plane_type != PLANE_TYPE_Y || xd->lossless || is_inter_block(mbmi) ||
- tx_size >= TX_32X32)
+ if (is_inter_block(mbmi))
+ return DCT_DCT;
+ else
+ return intra_mode_to_tx_type_lookup[plane_type == PLANE_TYPE_Y ?
+ get_y_mode(mi, block_idx) : mbmi->uv_mode];
+#else
++ if (plane_type != PLANE_TYPE_Y || xd->lossless[mbmi->segment_id] ||
++ is_inter_block(mbmi) || tx_size >= TX_32X32)
+ return DCT_DCT;
return intra_mode_to_tx_type_lookup[get_y_mode(mi, block_idx)];
+#endif // CONFIG_EXT_TX
}
void vp10_setup_block_planes(MACROBLOCKD *xd, int ss_x, int ss_y);
}
}
- static INLINE const vpx_prob *get_y_mode_probs(const MODE_INFO *mi,
- const MODE_INFO *above_mi,
- const MODE_INFO *left_mi,
- int block) {
- const PREDICTION_MODE above = vp10_above_block_mode(mi, above_mi, block);
- const PREDICTION_MODE left = vp10_left_block_mode(mi, left_mi, block);
- return vp10_kf_y_mode_prob[above][left];
- }
-
typedef void (*foreach_transformed_block_visitor)(int plane, int block,
+ int blk_row, int blk_col,
BLOCK_SIZE plane_bsize,
TX_SIZE tx_size,
void *arg);
{ 149, 144, },
};
+#if CONFIG_EXT_TX
+const vpx_tree_index vp10_tx_type_tree[TREE_SIZE(TX_TYPES)] = {
+ -IDTX, 2,
+ -DCT_DCT, 4,
+ -DST_DST, 6,
+ 8, 18,
+ 10, 12,
+ -DST_DCT, -DCT_DST,
+ 14, 16,
+ -ADST_DCT, -DCT_ADST,
+ -FLIPADST_DCT, -DCT_FLIPADST,
+ 20, 26,
+ 22, 24,
+ -DST_ADST, -ADST_DST,
+ -DST_FLIPADST, -FLIPADST_DST,
+ 28, 30,
+ -ADST_ADST, -FLIPADST_FLIPADST,
+ -ADST_FLIPADST, -FLIPADST_ADST
+};
+
+static const vpx_prob
+default_inter_tx_type_prob[EXT_TX_SIZES][TX_TYPES - 1] = {
+ { 12, 112, 16, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
+ 128 },
+ { 12, 112, 16, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
+ 128 },
+ { 12, 112, 16, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
+ 128 },
+};
+
+static const vpx_prob
+default_intra_tx_type_prob[EXT_TX_SIZES][INTRA_MODES][TX_TYPES - 1] = {
+ {
+ { 8, 11, 24, 112, 87, 137, 127, 134,
+ 128, 86, 128, 124, 125, 133, 176, 123, },
+ { 10, 9, 39, 106, 73, 155, 163, 228,
+ 35, 62, 129, 127, 133, 114, 213, 234, },
+ { 10, 9, 14, 88, 91, 127, 151, 51,
+ 210, 89, 126, 58, 52, 116, 217, 24, },
+ { 9, 6, 29, 113, 98, 131, 149, 210,
+ 119, 60, 124, 93, 90, 143, 170, 197, },
+ { 8, 8, 38, 101, 111, 166, 167, 141,
+ 130, 105, 128, 75, 75, 118, 197, 117, },
+ { 7, 8, 39, 91, 101, 153, 166, 200,
+ 99, 77, 123, 90, 83, 144, 224, 192, },
+ { 7, 10, 26, 86, 119, 154, 130, 101,
+ 152, 91, 129, 75, 79, 137, 219, 77, },
+ { 10, 13, 20, 86, 102, 162, 112, 76,
+ 171, 86, 134, 122, 106, 124, 196, 44, },
+ { 8, 9, 33, 108, 100, 144, 148, 215,
+ 77, 60, 125, 125, 128, 126, 198, 220, },
+ { 3, 10, 29, 111, 69, 141, 204, 141,
+ 139, 93, 120, 75, 77, 163, 242, 124, },
+ },
+ {
+ { 2, 53, 18, 147, 96, 98, 136, 133,
+ 131, 120, 153, 163, 169, 137, 173, 124, },
+ { 4, 18, 34, 133, 54, 130, 179, 228,
+ 28, 72, 153, 164, 168, 118, 227, 239, },
+ { 4, 18, 13, 125, 72, 110, 176, 36,
+ 221, 104, 148, 75, 72, 117, 225, 19, },
+ { 8, 33, 24, 162, 113, 99, 147, 226,
+ 103, 85, 153, 143, 153, 124, 155, 210, },
+ { 2, 15, 35, 107, 127, 158, 192, 128,
+ 126, 116, 151, 95, 88, 182, 241, 119, },
+ { 3, 15, 36, 112, 100, 146, 194, 189,
+ 90, 98, 152, 99, 100, 165, 235, 175, },
+ { 3, 16, 29, 109, 103, 140, 182, 76,
+ 173, 104, 147, 82, 85, 159, 235, 70, },
+ { 9, 24, 14, 120, 86, 156, 161, 34,
+ 177, 121, 142, 128, 128, 126, 185, 37, },
+ { 5, 24, 29, 152, 98, 99, 174, 228,
+ 82, 76, 147, 149, 128, 132, 191, 225, },
+ { 2, 15, 29, 111, 77, 126, 200, 135,
+ 117, 93, 152, 96, 84, 191, 245, 135, },
+ },
+ {
+ { 2, 69, 13, 173, 111, 69, 137, 159,
+ 159, 146, 151, 193, 203, 131, 180, 123, },
+ { 1, 12, 33, 164, 32, 98, 204, 242,
+ 23, 99, 149, 215, 232, 110, 239, 245, },
+ { 1, 17, 9, 136, 82, 83, 171, 28,
+ 231, 128, 135, 76, 64, 118, 235, 17, },
+ { 4, 41, 17, 195, 131, 58, 161, 237,
+ 141, 97, 153, 189, 191, 117, 182, 202, },
+ { 2, 17, 36, 104, 149, 137, 217, 139,
+ 191, 119, 125, 107, 115, 223, 249, 110, },
+ { 2, 14, 24, 127, 91, 135, 219, 198,
+ 113, 91, 164, 125, 173, 211, 250, 116, },
+ { 3, 19, 24, 120, 102, 130, 209, 81,
+ 187, 95, 143, 102, 50, 190, 244, 56, },
+ { 4, 27, 10, 128, 91, 157, 181, 33,
+ 181, 150, 141, 141, 166, 114, 215, 25, },
+ { 2, 34, 27, 187, 102, 77, 210, 245,
+ 113, 107, 136, 184, 188, 121, 210, 234, },
+ { 1, 15, 22, 141, 59, 94, 208, 133,
+ 154, 95, 152, 112, 105, 191, 242, 111, },
+ },
+};
+#endif // CONFIG_EXT_TX
+ #if CONFIG_MISC_FIXES
+ // FIXME(someone) need real defaults here
+ static const struct segmentation_probs default_seg_probs = {
+ { 128, 128, 128, 128, 128, 128, 128 },
+ { 128, 128, 128 },
+ };
+ #endif
static void init_mode_probs(FRAME_CONTEXT *fc) {
- vp10_copy(fc->uv_mode_prob, default_if_uv_probs);
+ vp10_copy(fc->uv_mode_prob, default_uv_probs);
vp10_copy(fc->y_mode_prob, default_if_y_probs);
vp10_copy(fc->switchable_interp_prob, default_switchable_interp_prob);
vp10_copy(fc->partition_prob, default_partition_probs);
fc->tx_probs = default_tx_probs;
vp10_copy(fc->skip_probs, default_skip_probs);
vp10_copy(fc->inter_mode_probs, default_inter_mode_probs);
+#if CONFIG_EXT_TX
+ vp10_copy(fc->inter_tx_type_prob, default_inter_tx_type_prob);
+ vp10_copy(fc->intra_tx_type_prob, default_intra_tx_type_prob);
+#endif // CONFIG_EXT_TX
+ #if CONFIG_MISC_FIXES
+ vp10_copy(fc->seg.tree_probs, default_seg_probs.tree_probs);
+ vp10_copy(fc->seg.pred_probs, default_seg_probs.pred_probs);
+ #endif
}
const vpx_tree_index vp10_switchable_interp_tree
fc->skip_probs[i] = mode_mv_merge_probs(
pre_fc->skip_probs[i], counts->skip[i]);
+#if CONFIG_EXT_TX
+ for (i = TX_4X4; i <= TX_16X16; ++i) {
+ vpx_tree_merge_probs(vp10_tx_type_tree, pre_fc->inter_tx_type_prob[i],
+ counts->inter_tx_type[i], fc->inter_tx_type_prob[i]);
+
+ for (j = 0; j < INTRA_MODES; ++j)
+ vpx_tree_merge_probs(vp10_tx_type_tree, pre_fc->intra_tx_type_prob[i][j],
+ counts->intra_tx_type[i][j],
+ fc->intra_tx_type_prob[i][j]);
+ }
+#endif // CONFIG_EXT_TX
+ #if CONFIG_MISC_FIXES
+ if (cm->seg.temporal_update) {
+ for (i = 0; i < INTRA_INTER_CONTEXTS; i++)
+ fc->seg.pred_probs[i] = mode_mv_merge_probs(pre_fc->seg.pred_probs[i],
+ counts->seg.pred[i]);
+
+ vpx_tree_merge_probs(vp10_segment_tree, pre_fc->seg.tree_probs,
+ counts->seg.tree_mispred, fc->seg.tree_probs);
+ } else {
+ vpx_tree_merge_probs(vp10_segment_tree, pre_fc->seg.tree_probs,
+ counts->seg.tree_total, fc->seg.tree_probs);
+ }
+
+ for (i = 0; i < INTRA_MODES; ++i)
+ vpx_tree_merge_probs(vp10_intra_mode_tree, pre_fc->uv_mode_prob[i],
+ counts->uv_mode[i], fc->uv_mode_prob[i]);
+
+ for (i = 0; i < PARTITION_CONTEXTS; i++)
+ vpx_tree_merge_probs(vp10_partition_tree, pre_fc->partition_prob[i],
+ counts->partition[i], fc->partition_prob[i]);
+ #endif
}
static void set_default_lf_deltas(struct loopfilter *lf) {
struct tx_probs tx_probs;
vpx_prob skip_probs[SKIP_CONTEXTS];
nmv_context nmvc;
+#if CONFIG_EXT_TX
+ vpx_prob inter_tx_type_prob[EXT_TX_SIZES][TX_TYPES - 1];
+ vpx_prob intra_tx_type_prob[EXT_TX_SIZES][INTRA_MODES][TX_TYPES - 1];
+#endif // CONFIG_EXT_TX
+ #if CONFIG_MISC_FIXES
+ struct segmentation_probs seg;
+ #endif
int initialized;
} FRAME_CONTEXT;
struct tx_counts tx;
unsigned int skip[SKIP_CONTEXTS][2];
nmv_context_counts mv;
+#if CONFIG_EXT_TX
+ unsigned int inter_tx_type[EXT_TX_SIZES][TX_TYPES];
+ unsigned int intra_tx_type[EXT_TX_SIZES][INTRA_MODES][TX_TYPES];
+#endif // CONFIG_EXT_TX
+ #if CONFIG_MISC_FIXES
+ struct seg_counts seg;
+ #endif
} FRAME_COUNTS;
- extern const vpx_prob vp10_kf_uv_mode_prob[INTRA_MODES][INTRA_MODES - 1];
extern const vpx_prob vp10_kf_y_mode_prob[INTRA_MODES][INTRA_MODES]
[INTRA_MODES - 1];
+ #if !CONFIG_MISC_FIXES
+ extern const vpx_prob vp10_kf_uv_mode_prob[INTRA_MODES][INTRA_MODES - 1];
extern const vpx_prob vp10_kf_partition_probs[PARTITION_CONTEXTS]
[PARTITION_TYPES - 1];
+ #endif
+ extern const vpx_prob
+ vp10_default_palette_y_mode_prob[PALETTE_BLOCK_SIZES][PALETTE_Y_MODE_CONTEXTS];
+ extern const vpx_prob
+ vp10_default_palette_y_size_prob[PALETTE_BLOCK_SIZES][PALETTE_SIZES - 1];
+ extern const vpx_prob
+ vp10_default_palette_uv_size_prob[PALETTE_BLOCK_SIZES][PALETTE_SIZES - 1];
+ extern const vpx_prob vp10_default_palette_y_color_prob
+ [PALETTE_MAX_SIZE - 1][PALETTE_COLOR_CONTEXTS][PALETTE_COLORS - 1];
+ extern const vpx_prob vp10_default_palette_uv_color_prob
+ [PALETTE_MAX_SIZE - 1][PALETTE_COLOR_CONTEXTS][PALETTE_COLORS - 1];
+
extern const vpx_tree_index vp10_intra_mode_tree[TREE_SIZE(INTRA_MODES)];
extern const vpx_tree_index vp10_inter_mode_tree[TREE_SIZE(INTER_MODES)];
extern const vpx_tree_index vp10_partition_tree[TREE_SIZE(PARTITION_TYPES)];
void vp10_tx_counts_to_branch_counts_8x8(const unsigned int *tx_count_8x8p,
unsigned int (*ct_8x8p)[2]);
+#if CONFIG_EXT_TX
+extern const vpx_tree_index vp10_tx_type_tree[TREE_SIZE(TX_TYPES)];
+#endif // CONFIG_EXT_TX
+
+ static INLINE int vp10_ceil_log2(int n) {
+ int i = 1, p = 2;
+ while (p < n) {
+ i++;
+ p = p << 1;
+ }
+ return i;
+ }
+
+ int vp10_get_palette_color_context(const uint8_t *color_map, int cols,
+ int r, int c, int n, int *color_order);
+
#ifdef __cplusplus
} // extern "C"
#endif
case ADST_ADST:
vp10_iht4x4_16_add(input, dest, stride, tx_type);
break;
- break;
+#if CONFIG_EXT_TX
+ case FLIPADST_DCT:
+ flipud(dest, stride, 4);
+ vp10_iht4x4_16_add(input, dest, stride, ADST_DCT);
+ flipud(dest, stride, 4);
++ break;
+ case DCT_FLIPADST:
+ fliplr(dest, stride, 4);
+ vp10_iht4x4_16_add(input, dest, stride, DCT_ADST);
+ fliplr(dest, stride, 4);
+ break;
+ case FLIPADST_FLIPADST:
+ fliplrud(dest, stride, 4);
+ vp10_iht4x4_16_add(input, dest, stride, ADST_ADST);
+ fliplrud(dest, stride, 4);
+ break;
+ case ADST_FLIPADST:
+ fliplr(dest, stride, 4);
+ vp10_iht4x4_16_add(input, dest, stride, ADST_ADST);
+ fliplr(dest, stride, 4);
+ break;
+ case FLIPADST_ADST:
+ flipud(dest, stride, 4);
+ vp10_iht4x4_16_add(input, dest, stride, ADST_ADST);
+ flipud(dest, stride, 4);
+ break;
+ case DST_DST:
+ case DST_DCT:
+ case DCT_DST:
+ case DST_ADST:
+ case ADST_DST:
+ // Use C version since DST only exists in C code
+ vp10_iht4x4_16_add_c(input, dest, stride, tx_type);
+ break;
+ case FLIPADST_DST:
+ flipud(dest, stride, 4);
+ vp10_iht4x4_16_add_c(input, dest, stride, ADST_DST);
+ flipud(dest, stride, 4);
+ break;
+ case DST_FLIPADST:
+ fliplr(dest, stride, 4);
+ vp10_iht4x4_16_add_c(input, dest, stride, DST_ADST);
+ fliplr(dest, stride, 4);
+ break;
+ case IDTX:
+ inv_idtx_add_c(input, dest, stride, 4);
+ break;
+#endif // CONFIG_EXT_TX
default:
assert(0);
break;
case ADST_ADST:
vp10_highbd_iht4x4_16_add(input, dest, stride, tx_type, bd);
break;
- break;
+#if CONFIG_EXT_TX
+ case FLIPADST_DCT:
+ flipud16(CONVERT_TO_SHORTPTR(dest), stride, 4);
+ vp10_highbd_iht4x4_16_add(input, dest, stride, ADST_DCT, bd);
+ flipud16(CONVERT_TO_SHORTPTR(dest), stride, 4);
++ break;
+ case DCT_FLIPADST:
+ fliplr16(CONVERT_TO_SHORTPTR(dest), stride, 4);
+ vp10_highbd_iht4x4_16_add(input, dest, stride, DCT_ADST, bd);
+ fliplr16(CONVERT_TO_SHORTPTR(dest), stride, 4);
+ break;
+ case FLIPADST_FLIPADST:
+ fliplrud16(CONVERT_TO_SHORTPTR(dest), stride, 4);
+ vp10_highbd_iht4x4_16_add(input, dest, stride, ADST_ADST, bd);
+ fliplrud16(CONVERT_TO_SHORTPTR(dest), stride, 4);
+ break;
+ case ADST_FLIPADST:
+ fliplr16(CONVERT_TO_SHORTPTR(dest), stride, 4);
+ vp10_highbd_iht4x4_16_add(input, dest, stride, ADST_ADST, bd);
+ fliplr16(CONVERT_TO_SHORTPTR(dest), stride, 4);
+ break;
+ case FLIPADST_ADST:
+ flipud16(CONVERT_TO_SHORTPTR(dest), stride, 4);
+ vp10_highbd_iht4x4_16_add(input, dest, stride, ADST_ADST, bd);
+ flipud16(CONVERT_TO_SHORTPTR(dest), stride, 4);
+ break;
+ case DST_DST:
+ case DST_DCT:
+ case DCT_DST:
+ case DST_ADST:
+ case ADST_DST:
+ // Use C version since DST only exists in C code
+ vp10_highbd_iht4x4_16_add_c(input, dest, stride, tx_type, bd);
+ break;
+ case FLIPADST_DST:
+ flipud16(CONVERT_TO_SHORTPTR(dest), stride, 4);
+ vp10_highbd_iht4x4_16_add_c(input, dest, stride, ADST_DST, bd);
+ flipud16(CONVERT_TO_SHORTPTR(dest), stride, 4);
+ break;
+ case DST_FLIPADST:
+ fliplr16(CONVERT_TO_SHORTPTR(dest), stride, 4);
+ vp10_highbd_iht4x4_16_add_c(input, dest, stride, DST_ADST, bd);
+ fliplr16(CONVERT_TO_SHORTPTR(dest), stride, 4);
+ break;
+ case IDTX:
+ highbd_inv_idtx_add_c(input, dest, stride, 4, bd);
+ break;
+#endif // CONFIG_EXT_TX
default:
assert(0);
break;
uint8_t *dst, int stride,
int eob, int block) {
struct macroblockd_plane *const pd = &xd->plane[plane];
- TX_TYPE tx_type = get_tx_type(pd->plane_type, xd, block);
+ TX_TYPE tx_type = get_tx_type(pd->plane_type, xd, block, tx_size);
+ const int seg_id = xd->mi[0]->mbmi.segment_id;
if (eob > 0) {
tran_low_t *const dqcoeff = pd->dqcoeff;
#if CONFIG_VP9_HIGHBITDEPTH
for (j = 0; j < PARTITION_CONTEXTS; ++j)
for (i = 0; i < PARTITION_TYPES - 1; ++i)
vp10_diff_update_prob(&r, &fc->partition_prob[j][i]);
+ #endif
read_mv_probs(nmvc, cm->allow_high_precision_mv, &r);
+#if CONFIG_EXT_TX
+ read_ext_tx_probs(fc, &r);
+#endif
}
return vpx_reader_has_error(&r);
return NEARESTMV + mode;
}
- static int read_segment_id(vpx_reader *r, const struct segmentation *seg) {
- return vpx_read_tree(r, vp10_segment_tree, seg->tree_probs);
+ static int read_segment_id(vpx_reader *r,
+ const struct segmentation_probs *segp) {
+ return vpx_read_tree(r, vp10_segment_tree, segp->tree_probs);
}
+#if CONFIG_VAR_TX
+static void read_tx_size_inter(VP10_COMMON *cm, MACROBLOCKD *xd,
+ MB_MODE_INFO *mbmi,
+ TX_SIZE tx_size, int blk_row, int blk_col,
+ vpx_reader *r) {
+ int is_split = 0;
+ const int tx_idx = (blk_row >> 1) * 8 + (blk_col >> 1);
+ int max_blocks_high = num_4x4_blocks_high_lookup[mbmi->sb_type];
+ int max_blocks_wide = num_4x4_blocks_wide_lookup[mbmi->sb_type];
+ if (xd->mb_to_bottom_edge < 0)
+ max_blocks_high += xd->mb_to_bottom_edge >> 5;
+ if (xd->mb_to_right_edge < 0)
+ max_blocks_wide += xd->mb_to_right_edge >> 5;
+
+ if (blk_row >= max_blocks_high || blk_col >= max_blocks_wide)
+ return;
+
+ is_split = vpx_read_bit(r);
+
+ if (is_split) {
+ BLOCK_SIZE bsize = txsize_to_bsize[tx_size];
+ int bsl = b_width_log2_lookup[bsize];
+ int i;
+ if (tx_size == TX_8X8) {
+ mbmi->inter_tx_size[tx_idx] = TX_4X4;
+ mbmi->tx_size = mbmi->inter_tx_size[tx_idx];
+ return;
+ }
+
+ assert(bsl > 0);
+ --bsl;
+ for (i = 0; i < 4; ++i) {
+ int offsetr = blk_row + ((i >> 1) << bsl);
+ int offsetc = blk_col + ((i & 0x01) << bsl);
+ read_tx_size_inter(cm, xd, mbmi, tx_size - 1, offsetr, offsetc, r);
+ }
+ } else {
+ mbmi->inter_tx_size[tx_idx] = tx_size;
+ mbmi->tx_size = mbmi->inter_tx_size[tx_idx];
+ }
+}
+#endif
+
static TX_SIZE read_selected_tx_size(VP10_COMMON *cm, MACROBLOCKD *xd,
TX_SIZE max_tx_size, vpx_reader *r) {
FRAME_COUNTS *counts = xd->counts;
break;
default:
mbmi->mode = read_intra_mode(r,
- get_y_mode_probs(mi, above_mi, left_mi, 0));
+ get_y_mode_probs(cm, mi, above_mi, left_mi, 0));
}
- mbmi->uv_mode = read_intra_mode(r, vp10_kf_uv_mode_prob[mbmi->mode]);
+ mbmi->uv_mode = read_intra_mode_uv(cm, xd, r, mbmi->mode);
+
+ mbmi->palette_mode_info.palette_size[0] = 0;
+ mbmi->palette_mode_info.palette_size[1] = 0;
+ if (bsize >= BLOCK_8X8 && cm->allow_screen_content_tools &&
+ mbmi->mode == DC_PRED)
+ read_palette_mode_info(cm, xd, r);
++
+
+#if CONFIG_EXT_TX
+ if (mbmi->tx_size <= TX_16X16 && cm->base_qindex > 0 &&
+ mbmi->sb_type >= BLOCK_8X8 && !mbmi->skip &&
+ !segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
+ mbmi->tx_type =
+ vpx_read_tree(r, vp10_tx_type_tree,
+ cm->fc->intra_tx_type_prob[mbmi->tx_size][mbmi->mode]);
+ } else {
+ mbmi->tx_type = DCT_DCT;
+ }
+#endif // CONFIG_EXT_TX
}
static int read_mv_component(vpx_reader *r,
{{0, 1}, {2, 2}, {6, 3}, {7, 3}};
static const struct vp10_token inter_mode_encodings[INTER_MODES] =
{{2, 2}, {6, 3}, {0, 1}, {7, 3}};
+ static const struct vp10_token palette_size_encodings[] = {
+ {0, 1}, {2, 2}, {6, 3}, {14, 4}, {30, 5}, {62, 6}, {63, 6},
+ };
+ static const struct vp10_token
+ palette_color_encodings[PALETTE_MAX_SIZE - 1][PALETTE_MAX_SIZE] = {
+ {{0, 1}, {1, 1}}, // 2 colors
+ {{0, 1}, {2, 2}, {3, 2}}, // 3 colors
+ {{0, 1}, {2, 2}, {6, 3}, {7, 3}}, // 4 colors
+ {{0, 1}, {2, 2}, {6, 3}, {14, 4}, {15, 4}}, // 5 colors
+ {{0, 1}, {2, 2}, {6, 3}, {14, 4}, {30, 5}, {31, 5}}, // 6 colors
+ {{0, 1}, {2, 2}, {6, 3}, {14, 4}, {30, 5}, {62, 6}, {63, 6}}, // 7 colors
+ {{0, 1}, {2, 2}, {6, 3}, {14, 4},
+ {30, 5}, {62, 6}, {126, 7}, {127, 7}}, // 8 colors
+ };
+
+ static INLINE void write_uniform(vpx_writer *w, int n, int v) {
+ int l = get_unsigned_bits(n);
+ int m = (1 << l) - n;
+ if (l == 0)
+ return;
+ if (v < m) {
+ vpx_write_literal(w, v, l - 1);
+ } else {
+ vpx_write_literal(w, m + ((v - m) >> 1), l - 1);
+ vpx_write_literal(w, (v - m) & 1, 1);
+ }
+ }
+#if CONFIG_EXT_TX
+static struct vp10_token tx_type_encodings[TX_TYPES];
+#endif // CONFIG_EXT_TX
+
+void vp10_encode_token_init() {
+#if CONFIG_EXT_TX
+ vp10_tokens_from_tree(tx_type_encodings, vp10_tx_type_tree);
+#endif // CONFIG_EXT_TX
+}
+
static void write_intra_mode(vpx_writer *w, PREDICTION_MODE mode,
const vpx_prob *probs) {
vp10_write_token(w, vp10_intra_mode_tree, probs, &intra_mode_encodings[mode]);
counts->switchable_interp[j], SWITCHABLE_FILTERS, w);
}
+#if CONFIG_EXT_TX
+static void update_ext_tx_probs(VP10_COMMON *cm, vpx_writer *w) {
+ const int savings_thresh = vp10_cost_one(GROUP_DIFF_UPDATE_PROB) -
+ vp10_cost_zero(GROUP_DIFF_UPDATE_PROB);
+ int i, j;
+ int savings = 0;
+ int do_update = 0;
+ for (i = TX_4X4; i <= TX_16X16; ++i) {
+ savings += prob_diff_update_savings(
+ vp10_tx_type_tree, cm->fc->inter_tx_type_prob[i],
+ cm->counts.inter_tx_type[i], TX_TYPES);
+ }
+ do_update = savings > savings_thresh;
+ vpx_write(w, do_update, GROUP_DIFF_UPDATE_PROB);
+ if (do_update) {
+ for (i = TX_4X4; i <= TX_16X16; ++i) {
+ prob_diff_update(vp10_tx_type_tree, cm->fc->inter_tx_type_prob[i],
+ cm->counts.inter_tx_type[i], TX_TYPES, w);
+ }
+ }
+
+ savings = 0;
+ do_update = 0;
+
+ for (i = TX_4X4; i <= TX_16X16; ++i)
+ for (j = 0; j < INTRA_MODES; ++j)
+ savings += prob_diff_update_savings(
+ vp10_tx_type_tree, cm->fc->intra_tx_type_prob[i][j],
+ cm->counts.intra_tx_type[i][j], TX_TYPES);
+ do_update = savings > savings_thresh;
+ vpx_write(w, do_update, GROUP_DIFF_UPDATE_PROB);
+ if (do_update) {
+ for (i = TX_4X4; i <= TX_16X16; ++i)
+ for (j = 0; j < INTRA_MODES; ++j)
+ prob_diff_update(vp10_tx_type_tree, cm->fc->intra_tx_type_prob[i][j],
+ cm->counts.intra_tx_type[i][j], TX_TYPES, w);
+ }
+}
+#endif // CONFIG_EXT_TX
+
+ static void pack_palette_tokens(vpx_writer *w, TOKENEXTRA **tp,
+ BLOCK_SIZE bsize, int n) {
+ int rows = 4 * num_4x4_blocks_high_lookup[bsize];
+ int cols = 4 * num_4x4_blocks_wide_lookup[bsize];
+ int i;
+ TOKENEXTRA *p = *tp;
+
+ for (i = 0; i < rows * cols -1; ++i) {
+ vp10_write_token(w, vp10_palette_color_tree[n - 2], p->context_tree,
+ &palette_color_encodings[n - 2][p->token]);
+ ++p;
+ }
+
+ *tp = p;
+ }
+
static void pack_mb_tokens(vpx_writer *w,
TOKENEXTRA **tp, const TOKENEXTRA *const stop,
- vpx_bit_depth_t bit_depth) {
+ vpx_bit_depth_t bit_depth, const TX_SIZE tx) {
TOKENEXTRA *p = *tp;
+ #if !CONFIG_MISC_FIXES
+ (void) tx;
+ #endif
while (p < stop && p->token != EOSB_TOKEN) {
const int t = p->token;
}
}
}
+
+#if CONFIG_EXT_TX
+ if (mbmi->tx_size <= TX_16X16 && cm->base_qindex > 0 &&
+ bsize >= BLOCK_8X8 && !mbmi->skip &&
+ !segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
+ if (is_inter)
+ vp10_write_token(w, vp10_tx_type_tree,
+ cm->fc->inter_tx_type_prob[mbmi->tx_size],
+ &tx_type_encodings[mbmi->tx_type]);
+ else
+ vp10_write_token(w, vp10_tx_type_tree,
+ cm->fc->intra_tx_type_prob[mbmi->tx_size][mbmi->mode],
+ &tx_type_encodings[mbmi->tx_type]);
+ }
+#endif // CONFIG_EXT_TX
}
+ static void write_palette_mode_info(const VP10_COMMON *cm,
+ const MACROBLOCKD *xd,
+ const MODE_INFO *const mi,
+ vpx_writer *w) {
+ const MB_MODE_INFO *const mbmi = &mi->mbmi;
+ const MODE_INFO *const above_mi = xd->above_mi;
+ const MODE_INFO *const left_mi = xd->left_mi;
+ const BLOCK_SIZE bsize = mbmi->sb_type;
+ const PALETTE_MODE_INFO *pmi = &mbmi->palette_mode_info;
+ int palette_ctx = 0;
+ int n, i;
+
+ n = pmi->palette_size[0];
+ if (above_mi)
+ palette_ctx += (above_mi->mbmi.palette_mode_info.palette_size[0] > 0);
+ if (left_mi)
+ palette_ctx += (left_mi->mbmi.palette_mode_info.palette_size[0] > 0);
+ vpx_write(w, n > 0,
+ vp10_default_palette_y_mode_prob[bsize - BLOCK_8X8][palette_ctx]);
+ if (n > 0) {
+ vp10_write_token(w, vp10_palette_size_tree,
+ vp10_default_palette_y_size_prob[bsize - BLOCK_8X8],
+ &palette_size_encodings[n - 2]);
+ for (i = 0; i < n; ++i)
+ vpx_write_literal(w, pmi->palette_colors[i],
+ cm->bit_depth);
+ write_uniform(w, n, pmi->palette_first_color_idx[0]);
+ }
+ }
+
static void write_mb_modes_kf(const VP10_COMMON *cm, const MACROBLOCKD *xd,
MODE_INFO **mi_8x8, vpx_writer *w) {
const struct segmentation *const seg = &cm->seg;
}
}
- write_intra_mode(w, mbmi->uv_mode, vp10_kf_uv_mode_prob[mbmi->mode]);
+ write_intra_mode(w, mbmi->uv_mode, cm->fc->uv_mode_prob[mbmi->mode]);
+
+ if (bsize >= BLOCK_8X8 && cm->allow_screen_content_tools &&
+ mbmi->mode == DC_PRED)
+ write_palette_mode_info(cm, xd, mi, w);
++
+
+#if CONFIG_EXT_TX
+ if (mbmi->tx_size <= TX_16X16 && cm->base_qindex > 0 &&
+ bsize >= BLOCK_8X8 && !mbmi->skip &&
+ !segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
+ vp10_write_token(w, vp10_tx_type_tree,
+ cm->fc->intra_tx_type_prob[mbmi->tx_size][mbmi->mode],
+ &tx_type_encodings[mbmi->tx_type]);
+ }
+#endif // CONFIG_EXT_TX
}
static void write_modes_b(VP10_COMP *cpi, const TileInfo *const tile,
#include "vp10/encoder/encoder.h"
- void vp10_pack_bitstream(VP10_COMP *cpi, uint8_t *dest, size_t *size);
+ void vp10_pack_bitstream(VP10_COMP *const cpi, uint8_t *dest, size_t *size);
+void vp10_encode_token_init();
+
static INLINE int vp10_preserve_existing_gf(VP10_COMP *cpi) {
return !cpi->multi_arf_allowed && cpi->refresh_golden_frame &&
cpi->rc.is_src_frame_alt_ref;
}
}
-void vp10_fwd_txfm_4x4(const int16_t *src_diff, tran_low_t *coeff,
- int diff_stride, TX_TYPE tx_type, int lossless) {
- if (lossless) {
- vp10_fwht4x4(src_diff, coeff, diff_stride);
- } else {
- switch (tx_type) {
- case DCT_DCT:
- vpx_fdct4x4(src_diff, coeff, diff_stride);
- break;
- case ADST_DCT:
- case DCT_ADST:
- case ADST_ADST:
- vp10_fht4x4(src_diff, coeff, diff_stride, tx_type);
+static void fwd_txfm_8x8_1(const int16_t *src_diff, tran_low_t *coeff,
+ int diff_stride, TX_TYPE tx_type) {
+#if CONFIG_EXT_TX
+ int16_t src_diff2[64];
+#endif // CONFIG_EXT_TX
+ switch (tx_type) {
+ case DCT_DCT:
+ case ADST_DCT:
+ case DCT_ADST:
+ case ADST_ADST:
+ vpx_fdct8x8_1(src_diff, coeff, diff_stride);
+ break;
+#if CONFIG_EXT_TX
+ case FLIPADST_DCT:
+ copy_flipud(src_diff, diff_stride, 8, src_diff2, 8);
+ vp10_fht8x8(src_diff2, coeff, 8, ADST_DCT);
+ break;
+ case DCT_FLIPADST:
+ copy_fliplr(src_diff, diff_stride, 8, src_diff2, 8);
+ vp10_fht8x8(src_diff2, coeff, 8, DCT_ADST);
+ break;
+ case FLIPADST_FLIPADST:
+ copy_fliplrud(src_diff, diff_stride, 8, src_diff2, 8);
+ vp10_fht8x8(src_diff2, coeff, 8, ADST_ADST);
+ break;
+ case ADST_FLIPADST:
+ copy_fliplr(src_diff, diff_stride, 8, src_diff2, 8);
+ vp10_fht8x8(src_diff2, coeff, 8, ADST_ADST);
+ break;
+ case FLIPADST_ADST:
+ copy_flipud(src_diff, diff_stride, 8, src_diff2, 8);
+ vp10_fht8x8(src_diff2, coeff, 8, ADST_ADST);
+ break;
+ case DST_DST:
+ case DCT_DST:
+ case DST_DCT:
+ case DST_ADST:
+ case ADST_DST:
+ // Use C version since DST exists only in C
+ vp10_fht8x8_c(src_diff, coeff, diff_stride, tx_type);
- break;
+ break;
+ case DST_FLIPADST:
+ copy_fliplr(src_diff, diff_stride, 8, src_diff2, 8);
+ vp10_fht8x8_c(src_diff2, coeff, 8, DST_ADST);
+ break;
+ case FLIPADST_DST:
+ copy_flipud(src_diff, diff_stride, 8, src_diff2, 8);
+ vp10_fht8x8_c(src_diff2, coeff, 8, ADST_DST);
+ break;
+ case IDTX:
+ fwd_idtx_c(src_diff, coeff, diff_stride, 8);
+ break;
+#endif // CONFIG_EXT_TX
- default:
- assert(0);
- break;
+ default:
+ assert(0);
+ break;
- }
}
}
assert(tx_type == DCT_DCT);
vp10_highbd_fwht4x4(src_diff, coeff, diff_stride);
} else {
- switch (tx_type) {
- case DCT_DCT:
+#if CONFIG_EXT_TX
+ int16_t src_diff2[16];
+#endif // CONFIG_EXT_TX
- vpx_highbd_fdct4x4(src_diff, coeff, diff_stride);
+ switch (tx_type) {
+ case DCT_DCT:
- break;
- case ADST_DCT:
- case DCT_ADST:
- case ADST_ADST:
- vp10_highbd_fht4x4(src_diff, coeff, diff_stride, tx_type);
- break;
+ vpx_highbd_fdct4x4(src_diff, coeff, diff_stride);
- default:
- assert(0);
- break;
+ break;
+ case ADST_DCT:
+ case DCT_ADST:
+ case ADST_ADST:
+ vp10_highbd_fht4x4(src_diff, coeff, diff_stride, tx_type);
+ break;
+#if CONFIG_EXT_TX
+ case FLIPADST_DCT:
+ copy_flipud(src_diff, diff_stride, 4, src_diff2, 4);
+ vp10_highbd_fht4x4(src_diff2, coeff, 4, ADST_DCT);
+ break;
+ case DCT_FLIPADST:
+ copy_fliplr(src_diff, diff_stride, 4, src_diff2, 4);
+ vp10_highbd_fht4x4(src_diff2, coeff, 4, DCT_ADST);
+ break;
+ case FLIPADST_FLIPADST:
+ copy_fliplrud(src_diff, diff_stride, 4, src_diff2, 4);
+ vp10_highbd_fht4x4(src_diff2, coeff, 4, ADST_ADST);
+ break;
+ case ADST_FLIPADST:
+ copy_fliplr(src_diff, diff_stride, 4, src_diff2, 4);
+ vp10_highbd_fht4x4(src_diff2, coeff, 4, ADST_ADST);
+ break;
+ case FLIPADST_ADST:
+ copy_flipud(src_diff, diff_stride, 4, src_diff2, 4);
+ vp10_highbd_fht4x4(src_diff2, coeff, 4, ADST_ADST);
+ break;
+ case DST_DST:
+ case DCT_DST:
+ case DST_DCT:
+ case DST_ADST:
+ case ADST_DST:
+ // Use C version since DST exists only in C
+ vp10_highbd_fht4x4_c(src_diff, coeff, diff_stride, tx_type);
+ break;
+ case DST_FLIPADST:
+ copy_fliplr(src_diff, diff_stride, 4, src_diff2, 4);
+ vp10_highbd_fht4x4_c(src_diff2, coeff, 4, DST_ADST);
+ break;
+ case FLIPADST_DST:
+ copy_flipud(src_diff, diff_stride, 4, src_diff2, 4);
+ vp10_highbd_fht4x4_c(src_diff2, coeff, 4, ADST_DST);
+ break;
+ case IDTX:
+ fwd_idtx_c(src_diff, coeff, diff_stride, 4);
+ break;
+#endif // CONFIG_EXT_TX
+ default:
+ assert(0);
+ break;
}
}
}
break;
}
}
- xd->lossless);
+
+static void highbd_fwd_txfm_32x32_1(const int16_t *src_diff,
+ tran_low_t *coeff, int diff_stride,
+ TX_TYPE tx_type) {
+ switch (tx_type) {
+ case DCT_DCT:
+ vpx_highbd_fdct32x32_1(src_diff, coeff, diff_stride);
+ break;
+ case ADST_DCT:
+ case DCT_ADST:
+ case ADST_ADST:
+ assert(0);
+ break;
+ default:
+ assert(0);
+ break;
+ }
+}
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+void vp10_xform_quant_fp(MACROBLOCK *x, int plane, int block,
+ int blk_row, int blk_col,
+ BLOCK_SIZE plane_bsize, TX_SIZE tx_size) {
+ MACROBLOCKD *const xd = &x->e_mbd;
+ const struct macroblock_plane *const p = &x->plane[plane];
+ const struct macroblockd_plane *const pd = &xd->plane[plane];
+ PLANE_TYPE plane_type = (plane == 0) ? PLANE_TYPE_Y : PLANE_TYPE_UV;
+ TX_TYPE tx_type = get_tx_type(plane_type, xd, block, tx_size);
+ const scan_order *const scan_order =
+ get_scan(tx_size, tx_type, is_inter_block(&xd->mi[0]->mbmi));
+ tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block);
+ tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
+ tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
+ uint16_t *const eob = &p->eobs[block];
+ const int diff_stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
+ const int16_t *src_diff;
+ src_diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)];
+
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+ switch (tx_size) {
+ case TX_32X32:
+ highbd_fwd_txfm_32x32(x->use_lp32x32fdct, src_diff, coeff, diff_stride,
+ tx_type);
+ vp10_highbd_quantize_fp_32x32(coeff, 1024, x->skip_block, p->zbin,
+ p->round_fp, p->quant_fp, p->quant_shift,
+ qcoeff, dqcoeff, pd->dequant,
+ eob, scan_order->scan,
+ scan_order->iscan);
+ break;
+ case TX_16X16:
+ highbd_fwd_txfm_16x16(src_diff, coeff, diff_stride, tx_type);
+ vp10_highbd_quantize_fp(coeff, 256, x->skip_block, p->zbin, p->round_fp,
+ p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
+ pd->dequant, eob,
+ scan_order->scan, scan_order->iscan);
+ break;
+ case TX_8X8:
+ highbd_fwd_txfm_8x8(src_diff, coeff, diff_stride, tx_type);
+ vp10_highbd_quantize_fp(coeff, 64, x->skip_block, p->zbin, p->round_fp,
+ p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
+ pd->dequant, eob,
+ scan_order->scan, scan_order->iscan);
+ break;
+ case TX_4X4:
+ vp10_highbd_fwd_txfm_4x4(src_diff, coeff, diff_stride, tx_type,
+ xd->lossless);
+ vp10_highbd_quantize_fp(coeff, 16, x->skip_block, p->zbin, p->round_fp,
+ p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
+ pd->dequant, eob,
+ scan_order->scan, scan_order->iscan);
+ break;
+ default:
+ assert(0);
+ }
+ return;
+ }
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+ switch (tx_size) {
+ case TX_32X32:
+ fwd_txfm_32x32(x->use_lp32x32fdct, src_diff, coeff, diff_stride, tx_type);
+ vp10_quantize_fp_32x32(coeff, 1024, x->skip_block, p->zbin, p->round_fp,
+ p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
+ pd->dequant, eob, scan_order->scan,
+ scan_order->iscan);
+ break;
+ case TX_16X16:
+ fwd_txfm_16x16(src_diff, coeff, diff_stride, tx_type);
+ vp10_quantize_fp(coeff, 256, x->skip_block, p->zbin, p->round_fp,
+ p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
+ pd->dequant, eob,
+ scan_order->scan, scan_order->iscan);
+ break;
+ case TX_8X8:
+ fwd_txfm_8x8(src_diff, coeff, diff_stride, tx_type);
+ vp10_quantize_fp(coeff, 64, x->skip_block, p->zbin, p->round_fp,
+ p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
+ pd->dequant, eob,
+ scan_order->scan, scan_order->iscan);
+ break;
+ case TX_4X4:
+ vp10_fwd_txfm_4x4(src_diff, coeff, diff_stride, tx_type,
++ xd->lossless[xd->mi[0]->mbmi.segment_id]);
+ vp10_quantize_fp(coeff, 16, x->skip_block, p->zbin, p->round_fp,
+ p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
+ pd->dequant, eob,
+ scan_order->scan, scan_order->iscan);
+ break;
+ default:
+ assert(0);
+ break;
+ }
+}
+
+void vp10_xform_quant_dc(MACROBLOCK *x, int plane, int block,
+ int blk_row, int blk_col,
+ BLOCK_SIZE plane_bsize, TX_SIZE tx_size) {
+ MACROBLOCKD *const xd = &x->e_mbd;
+ const struct macroblock_plane *const p = &x->plane[plane];
+ const struct macroblockd_plane *const pd = &xd->plane[plane];
+ PLANE_TYPE plane_type = (plane == 0) ? PLANE_TYPE_Y : PLANE_TYPE_UV;
+ TX_TYPE tx_type = get_tx_type(plane_type, xd, block, tx_size);
+ tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block);
+ tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
+ tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
+ uint16_t *const eob = &p->eobs[block];
+ const int diff_stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
+ const int16_t *src_diff;
+ src_diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)];
+
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+ switch (tx_size) {
+ case TX_32X32:
+ highbd_fwd_txfm_32x32_1(src_diff, coeff, diff_stride, tx_type);
+ vpx_highbd_quantize_dc_32x32(coeff, x->skip_block, p->round,
+ p->quant_fp[0], qcoeff, dqcoeff,
+ pd->dequant[0], eob);
+ break;
+ case TX_16X16:
+ highbd_fwd_txfm_16x16_1(src_diff, coeff, diff_stride, tx_type);
+ vpx_highbd_quantize_dc(coeff, 256, x->skip_block, p->round,
+ p->quant_fp[0], qcoeff, dqcoeff,
+ pd->dequant[0], eob);
+ break;
+ case TX_8X8:
+ highbd_fwd_txfm_8x8_1(src_diff, coeff, diff_stride, tx_type);
+ vpx_highbd_quantize_dc(coeff, 64, x->skip_block, p->round,
+ p->quant_fp[0], qcoeff, dqcoeff,
+ pd->dequant[0], eob);
+ break;
+ case TX_4X4:
+ vp10_highbd_fwd_txfm_4x4(src_diff, coeff, diff_stride, tx_type,
+ xd->lossless);
+ vpx_highbd_quantize_dc(coeff, 16, x->skip_block, p->round,
+ p->quant_fp[0], qcoeff, dqcoeff,
+ pd->dequant[0], eob);
+ break;
+ default:
+ assert(0);
+ }
+ return;
+ }
#endif // CONFIG_VP9_HIGHBITDEPTH
- xd->lossless);
+ switch (tx_size) {
+ case TX_32X32:
+ fwd_txfm_32x32_1(src_diff, coeff, diff_stride, tx_type);
+ vpx_quantize_dc_32x32(coeff, x->skip_block, p->round,
+ p->quant_fp[0], qcoeff, dqcoeff,
+ pd->dequant[0], eob);
+ break;
+ case TX_16X16:
+ fwd_txfm_16x16_1(src_diff, coeff, diff_stride, tx_type);
+ vpx_quantize_dc(coeff, 256, x->skip_block, p->round,
+ p->quant_fp[0], qcoeff, dqcoeff,
+ pd->dequant[0], eob);
+ break;
+ case TX_8X8:
+ fwd_txfm_8x8_1(src_diff, coeff, diff_stride, tx_type);
+ vpx_quantize_dc(coeff, 64, x->skip_block, p->round,
+ p->quant_fp[0], qcoeff, dqcoeff,
+ pd->dequant[0], eob);
+ break;
+ case TX_4X4:
+ vp10_fwd_txfm_4x4(src_diff, coeff, diff_stride, tx_type,
++ xd->lossless[xd->mi[0]->mbmi.segment_id]);
+ vpx_quantize_dc(coeff, 16, x->skip_block, p->round,
+ p->quant_fp[0], qcoeff, dqcoeff,
+ pd->dequant[0], eob);
+ break;
+ default:
+ assert(0);
+ break;
+ }
+}
+
void vp10_xform_quant(MACROBLOCK *x, int plane, int block,
- BLOCK_SIZE plane_bsize, TX_SIZE tx_size) {
+ int blk_row, int blk_col,
+ BLOCK_SIZE plane_bsize, TX_SIZE tx_size) {
MACROBLOCKD *const xd = &x->e_mbd;
const struct macroblock_plane *const p = &x->plane[plane];
const struct macroblockd_plane *const pd = &xd->plane[plane];
tran_low_t *qcoeff = BLOCK_OFFSET(p->qcoeff, block);
tran_low_t *dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
PLANE_TYPE plane_type = (plane == 0) ? PLANE_TYPE_Y : PLANE_TYPE_UV;
- TX_TYPE tx_type = get_tx_type(plane_type, xd, block);
- const scan_order *const scan_order = get_scan(tx_size, tx_type);
+ TX_TYPE tx_type = get_tx_type(plane_type, xd, block, tx_size);
+ const scan_order *const scan_order = get_scan(tx_size, tx_type, 0);
PREDICTION_MODE mode;
const int bwl = b_width_log2_lookup[plane_bsize];
+ const int bhl = b_height_log2_lookup[plane_bsize];
const int diff_stride = 4 * (1 << bwl);
uint8_t *src, *dst;
int16_t *src_diff;
uint16_t *eob = &p->eobs[block];
const int src_stride = p->src.stride;
const int dst_stride = pd->dst.stride;
- int i, j;
- txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &i, &j);
- dst = &pd->dst.buf[4 * (j * dst_stride + i)];
- src = &p->src.buf[4 * (j * src_stride + i)];
- src_diff = &p->src_diff[4 * (j * diff_stride + i)];
+ dst = &pd->dst.buf[4 * (blk_row * dst_stride + blk_col)];
+ src = &p->src.buf[4 * (blk_row * src_stride + blk_col)];
+ src_diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)];
mode = plane == 0 ? get_y_mode(xd->mi[0], block) : mbmi->uv_mode;
- vp10_predict_intra_block(xd, bwl, tx_size, mode, dst, dst_stride,
- dst, dst_stride, blk_col, blk_row, plane);
+ vp10_predict_intra_block(xd, bwl, bhl, tx_size, mode, dst, dst_stride,
- dst, dst_stride, i, j, plane);
++ dst, dst_stride, blk_col, blk_row, plane);
#if CONFIG_VP9_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
// which is significant (not just an optimization) for the lossless
// case.
vp10_inv_txfm_add_4x4(dqcoeff, dst, dst_stride, *eob, tx_type,
- xd->lossless);
- xd->lossless[mbmi->segment_id]);
++ xd->lossless[xd->mi[0]->mbmi.segment_id]);
}
break;
default:
int y_mode_costs[INTRA_MODES][INTRA_MODES][INTRA_MODES];
int switchable_interp_costs[SWITCHABLE_FILTER_CONTEXTS][SWITCHABLE_FILTERS];
int partition_cost[PARTITION_CONTEXTS][PARTITION_TYPES];
+ int palette_y_size_cost[PALETTE_BLOCK_SIZES][PALETTE_SIZES];
+ int palette_uv_size_cost[PALETTE_BLOCK_SIZES][PALETTE_SIZES];
+ int palette_y_color_cost[PALETTE_MAX_SIZE - 1][PALETTE_COLOR_CONTEXTS]
+ [PALETTE_COLORS];
+ int palette_uv_color_cost[PALETTE_MAX_SIZE - 1][PALETTE_COLOR_CONTEXTS]
+ [PALETTE_COLORS];
+#if CONFIG_EXT_TX
+ int inter_tx_type_costs[EXT_TX_SIZES][TX_TYPES];
+ int intra_tx_type_costs[EXT_TX_SIZES][INTRA_MODES][TX_TYPES];
+#endif // CONFIG_EXT_TX
int multi_arf_allowed;
int multi_arf_enabled;
for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
vp10_cost_tokens(cpi->switchable_interp_costs[i],
fc->switchable_interp_prob[i], vp10_switchable_interp_tree);
+
+ for (i = 0; i < PALETTE_BLOCK_SIZES; ++i) {
+ vp10_cost_tokens(cpi->palette_y_size_cost[i],
+ vp10_default_palette_y_size_prob[i],
+ vp10_palette_size_tree);
+ vp10_cost_tokens(cpi->palette_uv_size_cost[i],
+ vp10_default_palette_uv_size_prob[i],
+ vp10_palette_size_tree);
+ }
+
+ for (i = 0; i < PALETTE_MAX_SIZE - 1; ++i)
+ for (j = 0; j < PALETTE_COLOR_CONTEXTS; ++j) {
+ vp10_cost_tokens(cpi->palette_y_color_cost[i][j],
+ vp10_default_palette_y_color_prob[i][j],
+ vp10_palette_color_tree[i]);
+ vp10_cost_tokens(cpi->palette_uv_color_cost[i][j],
+ vp10_default_palette_uv_color_prob[i][j],
+ vp10_palette_color_tree[i]);
+ }
+#if CONFIG_EXT_TX
+ for (i = TX_4X4; i <= TX_16X16; ++i) {
+ vp10_cost_tokens(cpi->inter_tx_type_costs[i], fc->inter_tx_type_prob[i],
+ vp10_tx_type_tree);
+ for (j = 0; j < INTRA_MODES; ++j)
+ vp10_cost_tokens(cpi->intra_tx_type_costs[i][j],
+ fc->intra_tx_type_prob[i][j], vp10_tx_type_tree);
+ }
+#endif // CONFIG_EXT_TX
}
static void fill_token_costs(vp10_coeff_cost *c,
txfm_rd_in_plane(x, rate, distortion, skip,
sse, ref_best_rd, 0, bs,
mbmi->tx_size, cpi->sf.use_fast_coef_costing);
+
+#if CONFIG_EXT_TX
+ if (bs >= BLOCK_8X8 && mbmi->tx_size <= TX_16X16 &&
+ !xd->lossless && *rate != INT_MAX) {
+ if (is_inter_block(mbmi))
+ *rate += cpi->inter_tx_type_costs[mbmi->tx_size][mbmi->tx_type];
+ else
+ *rate += cpi->intra_tx_type_costs[mbmi->tx_size]
+ [mbmi->mode][mbmi->tx_type];
+ }
+#endif // CONFIG_EXT_TX
}
+ static void choose_smallest_tx_size(VP10_COMP *cpi, MACROBLOCK *x,
+ int *rate, int64_t *distortion,
+ int *skip, int64_t *sse,
+ int64_t ref_best_rd,
+ BLOCK_SIZE bs) {
+ MACROBLOCKD *const xd = &x->e_mbd;
+ MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
+
+ mbmi->tx_size = TX_4X4;
+
+ txfm_rd_in_plane(x, rate, distortion, skip,
+ sse, ref_best_rd, 0, bs,
+ mbmi->tx_size, cpi->sf.use_fast_coef_costing);
+ }
+
static void choose_tx_size_from_rd(VP10_COMP *cpi, MACROBLOCK *x,
int *rate,
int64_t *distortion,
col + idx, row + idy, 0);
vpx_highbd_subtract_block(4, 4, src_diff, 8, src, src_stride,
dst, dst_stride, xd->bd);
- if (xd->lossless) {
+ if (xd->lossless[xd->mi[0]->mbmi.segment_id]) {
- TX_TYPE tx_type = get_tx_type(PLANE_TYPE_Y, xd, block);
- const scan_order *so = get_scan(TX_4X4, tx_type);
+ TX_TYPE tx_type = get_tx_type(PLANE_TYPE_Y, xd, block, TX_4X4);
+ const scan_order *so = get_scan(TX_4X4, tx_type, 0);
+#if CONFIG_VAR_TX
+ const int coeff_ctx = combine_entropy_contexts(*(tempa + idx),
+ *(templ + idy));
+#endif
vp10_highbd_fwd_txfm_4x4(src_diff, coeff, 8, DCT_DCT, 1);
vp10_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan);
- ratey += cost_coeffs(x, 0, block, tempa + idx, templ + idy, TX_4X4,
+ ratey += cost_coeffs(x, 0, block,
+#if CONFIG_VAR_TX
+ coeff_ctx,
+#else
+ tempa + idx, templ + idy,
+#endif
+ TX_4X4,
so->scan, so->neighbors,
cpi->sf.use_fast_coef_costing);
if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd)
dst, dst_stride, col + idx, row + idy, 0);
vpx_subtract_block(4, 4, src_diff, 8, src, src_stride, dst, dst_stride);
- if (xd->lossless) {
+ if (xd->lossless[xd->mi[0]->mbmi.segment_id]) {
- TX_TYPE tx_type = get_tx_type(PLANE_TYPE_Y, xd, block);
- const scan_order *so = get_scan(TX_4X4, tx_type);
+ TX_TYPE tx_type = get_tx_type(PLANE_TYPE_Y, xd, block, TX_4X4);
+ const scan_order *so = get_scan(TX_4X4, tx_type, 0);
+#if CONFIG_VAR_TX
+ int coeff_ctx = combine_entropy_contexts(*(tempa + idx),
+ *(templ + idy));
+#endif
vp10_fwd_txfm_4x4(src_diff, coeff, 8, DCT_DCT, 1);
vp10_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan);
- ratey += cost_coeffs(x, 0, block, tempa + idx, templ + idy, TX_4X4,
+#if CONFIG_VAR_TX
+ ratey += cost_coeffs(x, 0, block, coeff_ctx, TX_4X4, so->scan,
+ so->neighbors, cpi->sf.use_fast_coef_costing);
+ *(tempa + idx) = !(p->eobs[block] == 0);
+ *(templ + idy) = !(p->eobs[block] == 0);
+#else
+ ratey += cost_coeffs(x, 0, block, tempa + idx, templ + idy,
+ TX_4X4,
so->scan, so->neighbors,
cpi->sf.use_fast_coef_costing);
+#endif
if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd)
goto next;
vp10_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block),
int this_rate, this_rate_tokenonly, s;
int64_t this_distortion, this_rd;
TX_SIZE best_tx = TX_4X4;
+#if CONFIG_EXT_TX
+ TX_TYPE best_tx_type = DCT_DCT;
+#endif // CONFIG_EXT_TX
int *bmode_costs;
+ PALETTE_MODE_INFO palette_mode_info;
+ uint8_t *best_palette_color_map = cpi->common.allow_screen_content_tools ?
+ x->palette_buffer->best_palette_color_map : NULL;
+ int rows = 4 * num_4x4_blocks_high_lookup[bsize];
+ int cols = 4 * num_4x4_blocks_wide_lookup[bsize];
+ int palette_ctx = 0;
const MODE_INFO *above_mi = xd->above_mi;
const MODE_INFO *left_mi = xd->left_mi;
const PREDICTION_MODE A = vp10_above_block_mode(mic, above_mi, 0);
}
}
+ if (cpi->common.allow_screen_content_tools)
+ rd_pick_palette_intra_sby(cpi, x, bsize, palette_ctx, bmode_costs[DC_PRED],
+ &palette_mode_info, best_palette_color_map,
+ &best_tx, &mode_selected, &best_rd);
+
mic->mbmi.mode = mode_selected;
mic->mbmi.tx_size = best_tx;
+#if CONFIG_EXT_TX
+ mic->mbmi.tx_type = best_tx_type;
+#endif // CONFIG_EXT_TX
+ mic->mbmi.palette_mode_info.palette_size[0] =
+ palette_mode_info.palette_size[0];
+ if (palette_mode_info.palette_size[0] > 0) {
+ memcpy(mic->mbmi.palette_mode_info.palette_colors,
+ palette_mode_info.palette_colors,
+ PALETTE_MAX_SIZE * sizeof(palette_mode_info.palette_colors[0]));
+ memcpy(xd->plane[0].color_index_map, best_palette_color_map,
+ rows * cols * sizeof(best_palette_color_map[0]));
+ }
return best_rd;
}
return segfeature_active(seg, segment_id, SEG_LVL_SKIP) ? 0 : eob_max;
}
-static void tokenize_b(int plane, int block, BLOCK_SIZE plane_bsize,
+ void vp10_tokenize_palette_sb(struct ThreadData *const td,
+ BLOCK_SIZE bsize, int plane,
+ TOKENEXTRA **t) {
+ MACROBLOCK *const x = &td->mb;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
+ uint8_t *color_map = xd->plane[0].color_index_map;
+ PALETTE_MODE_INFO *pmi = &mbmi->palette_mode_info;
+ int n = pmi->palette_size[plane != 0];
+ int i, j, k;
+ int color_new_idx = -1, color_ctx, color_order[PALETTE_MAX_SIZE];
+ int rows = 4 * num_4x4_blocks_high_lookup[bsize];
+ int cols = 4 * num_4x4_blocks_wide_lookup[bsize];
+
+ for (i = 0; i < rows; ++i) {
+ for (j = (i == 0 ? 1 : 0); j < cols; ++j) {
+ color_ctx = vp10_get_palette_color_context(color_map, cols, i, j, n,
+ color_order);
+ for (k = 0; k < n; ++k)
+ if (color_map[i * cols + j] == color_order[k]) {
+ color_new_idx = k;
+ break;
+ }
+ assert(color_new_idx >= 0 && color_new_idx < n);
+
+ (*t)->token = color_new_idx;
+ (*t)->context_tree = vp10_default_palette_y_color_prob[n - 2][color_ctx];
+ (*t)->skip_eob_node = 0;
+ ++(*t);
+ }
+ }
+ }
+
+static void tokenize_b(int plane, int block, int blk_row, int blk_col,
+ BLOCK_SIZE plane_bsize,
TX_SIZE tx_size, void *arg) {
struct tokenize_b_args* const args = arg;
VP10_COMP *cpi = args->cpi;
struct VP10_COMP;
struct ThreadData;
+#if CONFIG_VAR_TX
+void vp10_tokenize_sb_inter(struct VP10_COMP *cpi, struct ThreadData *td,
+ TOKENEXTRA **t, int dry_run, int mi_row, int mi_col,
+ BLOCK_SIZE bsize);
+#endif
+
+ void vp10_tokenize_palette_sb(struct ThreadData *const td,
+ BLOCK_SIZE bsize, int plane,
+ TOKENEXTRA **t);
void vp10_tokenize_sb(struct VP10_COMP *cpi, struct ThreadData *td,
TOKENEXTRA **t, int dry_run, BLOCK_SIZE bsize);
#define VPX_CTRL_VP9E_SET_MAX_GF_INTERVAL
VPX_CTRL_USE_TYPE(VP9E_GET_ACTIVEMAP, vpx_active_map_t *)
+ #define VPX_CTRL_VP9E_GET_ACTIVEMAP
- /*!\brief
- *
- * TODO(rbultje) : add support of the control in ffmpeg
- */
- #define VPX_CTRL_VP9E_SET_COLOR_RANGE
VPX_CTRL_USE_TYPE(VP9E_SET_COLOR_RANGE, int)
+ #define VPX_CTRL_VP9E_SET_COLOR_RANGE
VPX_CTRL_USE_TYPE(VP9E_SET_SVC_REF_FRAME_CONFIG, vpx_svc_ref_frame_config_t *)
+ #define VPX_CTRL_VP9E_SET_SVC_REF_FRAME_CONFIG
-VPX_CTRL_USE_TYPE(VP9E_SET_RENDER_SIZE, int *)
+/*!\brief
+ *
+ * TODO(rbultje) : add support of the control in ffmpeg
+ */
#define VPX_CTRL_VP9E_SET_RENDER_SIZE
-
+VPX_CTRL_USE_TYPE(VP9E_SET_RENDER_SIZE, int *)
+ /*!\endcond */
/*! @} - end defgroup vp8_encoder */
#ifdef __cplusplus
} // extern "C"