For local symbols, make them static instead.
Change-Id: I13d60947a46f711bc8991e16100cea2a13e3a22e
#include "vpx_mem/vpx_mem.h"
-const unsigned char vp8_block2left[25] = {
+const unsigned char vp9_block2left[25] = {
0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8
};
-const unsigned char vp8_block2above[25] = {
+const unsigned char vp9_block2above[25] = {
0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 4, 5, 6, 7, 6, 7, 8
};
-const unsigned char vp8_block2left_8x8[25] = {
+const unsigned char vp9_block2left_8x8[25] = {
0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4, 4, 6, 6, 6, 6, 8
};
-const unsigned char vp8_block2above_8x8[25] = {
+const unsigned char vp9_block2above_8x8[25] = {
0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0, 2, 2, 2, 2, 4, 4, 4, 4, 6, 6, 6, 6, 8
};
ENTROPY_CONTEXT y2;
} ENTROPY_CONTEXT_PLANES;
-extern const unsigned char vp8_block2left[25];
-extern const unsigned char vp8_block2above[25];
-extern const unsigned char vp8_block2left_8x8[25];
-extern const unsigned char vp8_block2above_8x8[25];
+extern const unsigned char vp9_block2left[25];
+extern const unsigned char vp9_block2above[25];
+extern const unsigned char vp9_block2left_8x8[25];
+extern const unsigned char vp9_block2above_8x8[25];
#define VP8_COMBINEENTROPYCONTEXTS( Dest, A, B) \
Dest = ((A)!=0) + ((B)!=0);
};
// Update probabilities for the nodes in the token entropy tree.
-const vp8_prob tree_update_probs[vp8_coef_tree_dimen] = {
+const vp8_prob tree_update_probs[vp9_coef_tree_dimen] = {
{
{
{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
#include "coefupdateprobs.h"
-const int vp8_i8x8_block[4] = {0, 2, 8, 10};
+const int vp9_i8x8_block[4] = {0, 2, 8, 10};
-DECLARE_ALIGNED(16, const unsigned char, vp8_norm[256]) = {
+DECLARE_ALIGNED(16, const unsigned char, vp9_norm[256]) = {
0, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
};
-DECLARE_ALIGNED(16, const int, vp8_coef_bands[16]) = {
+DECLARE_ALIGNED(16, const int, vp9_coef_bands[16]) = {
0, 1, 2, 3, 6, 4, 5, 6, 6, 6, 6, 6, 6, 7, 7, 7
};
-DECLARE_ALIGNED(16, cuchar, vp8_prev_token_class[MAX_ENTROPY_TOKENS]) = {
+DECLARE_ALIGNED(16, cuchar, vp9_prev_token_class[MAX_ENTROPY_TOKENS]) = {
0, 1, 2, 2, 3, 3, 3, 3, 3, 3, 3, 0
};
-DECLARE_ALIGNED(16, const int, vp8_default_zig_zag1d[16]) = {
+DECLARE_ALIGNED(16, const int, vp9_default_zig_zag1d[16]) = {
0, 1, 4, 8,
5, 2, 3, 6,
9, 12, 13, 10,
7, 11, 14, 15,
};
-DECLARE_ALIGNED(16, const int, vp8_col_scan[16]) = {
+DECLARE_ALIGNED(16, const int, vp9_col_scan[16]) = {
0, 4, 8, 12,
1, 5, 9, 13,
2, 6, 10, 14,
3, 7, 11, 15
};
-DECLARE_ALIGNED(16, const int, vp8_row_scan[16]) = {
+DECLARE_ALIGNED(16, const int, vp9_row_scan[16]) = {
0, 1, 2, 3,
4, 5, 6, 7,
8, 9, 10, 11,
};
-DECLARE_ALIGNED(64, const int, vp8_coef_bands_8x8[64]) = { 0, 1, 2, 3, 5, 4, 4, 5,
+DECLARE_ALIGNED(64, const int, vp9_coef_bands_8x8[64]) = { 0, 1, 2, 3, 5, 4, 4, 5,
5, 3, 6, 3, 5, 4, 6, 6,
6, 5, 5, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6,
7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7
};
-DECLARE_ALIGNED(64, const int, vp8_default_zig_zag1d_8x8[64]) = {
+DECLARE_ALIGNED(64, const int, vp9_default_zig_zag1d_8x8[64]) = {
0, 1, 8, 16, 9, 2, 3, 10, 17, 24, 32, 25, 18, 11, 4, 5,
12, 19, 26, 33, 40, 48, 41, 34, 27, 20, 13, 6, 7, 14, 21, 28,
35, 42, 49, 56, 57, 50, 43, 36, 29, 22, 15, 23, 30, 37, 44, 51,
};
// Table can be optimized.
-DECLARE_ALIGNED(16, const int, vp8_coef_bands_16x16[256]) = {
+DECLARE_ALIGNED(16, const int, vp9_coef_bands_16x16[256]) = {
0, 1, 2, 3, 5, 4, 4, 5, 5, 3, 6, 3, 5, 4, 6, 6,
6, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
};
-DECLARE_ALIGNED(16, const int, vp8_default_zig_zag1d_16x16[256]) = {
+DECLARE_ALIGNED(16, const int, vp9_default_zig_zag1d_16x16[256]) = {
0, 1, 16, 32, 17, 2, 3, 18, 33, 48, 64, 49, 34, 19, 4, 5,
20, 35, 50, 65, 80, 96, 81, 66, 51, 36, 21, 6, 7, 22, 37, 52,
67, 82, 97, 112, 128, 113, 98, 83, 68, 53, 38, 23, 8, 9, 24, 39,
/* Array indices are identical to previously-existing CONTEXT_NODE indices */
-const vp8_tree_index vp8_coef_tree[ 22] = /* corresponding _CONTEXT_NODEs */
+const vp8_tree_index vp9_coef_tree[ 22] = /* corresponding _CONTEXT_NODEs */
{
-DCT_EOB_TOKEN, 2, /* 0 = EOB */
-ZERO_TOKEN, 4, /* 1 = ZERO */
-DCT_VAL_CATEGORY5, -DCT_VAL_CATEGORY6 /* 10 = CAT_FIVE */
};
-struct vp8_token_struct vp8_coef_encodings[MAX_ENTROPY_TOKENS];
+struct vp8_token_struct vp9_coef_encodings[MAX_ENTROPY_TOKENS];
/* Trees for extra bits. Probabilities are constant and
do not depend on previously encoded bits */
init_bit_tree(cat6, 13);
}
-vp8_extra_bit_struct vp8_extra_bits[12] = {
+vp8_extra_bit_struct vp9_extra_bits[12] = {
{ 0, 0, 0, 0},
{ 0, 0, 0, 1},
{ 0, 0, 0, 2},
void vp9_coef_tree_initialize() {
init_bit_trees();
- vp9_tokens_from_tree(vp8_coef_encodings, vp8_coef_tree);
+ vp9_tokens_from_tree(vp9_coef_encodings, vp9_coef_tree);
}
// #define COEF_COUNT_TESTING
if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
continue;
vp9_tree_probs_from_distribution(
- MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
+ MAX_ENTROPY_TOKENS, vp9_coef_encodings, vp9_coef_tree,
coef_probs, branch_ct, cm->fc.coef_counts [i][j][k],
256, 1);
for (t = 0; t < ENTROPY_NODES; ++t) {
if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
continue;
vp9_tree_probs_from_distribution(
- MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
+ MAX_ENTROPY_TOKENS, vp9_coef_encodings, vp9_coef_tree,
coef_probs, branch_ct, cm->fc.hybrid_coef_counts [i][j][k],
256, 1);
for (t = 0; t < ENTROPY_NODES; ++t) {
if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
continue;
vp9_tree_probs_from_distribution(
- MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
+ MAX_ENTROPY_TOKENS, vp9_coef_encodings, vp9_coef_tree,
coef_probs, branch_ct, cm->fc.coef_counts_8x8 [i][j][k],
256, 1);
for (t = 0; t < ENTROPY_NODES; ++t) {
if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
continue;
vp9_tree_probs_from_distribution(
- MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
+ MAX_ENTROPY_TOKENS, vp9_coef_encodings, vp9_coef_tree,
coef_probs, branch_ct, cm->fc.hybrid_coef_counts_8x8 [i][j][k],
256, 1);
for (t = 0; t < ENTROPY_NODES; ++t) {
if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
continue;
vp9_tree_probs_from_distribution(
- MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
+ MAX_ENTROPY_TOKENS, vp9_coef_encodings, vp9_coef_tree,
coef_probs, branch_ct, cm->fc.coef_counts_16x16[i][j][k], 256, 1);
for (t = 0; t < ENTROPY_NODES; ++t) {
int prob;
if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
continue;
vp9_tree_probs_from_distribution(
- MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
+ MAX_ENTROPY_TOKENS, vp9_coef_encodings, vp9_coef_tree,
coef_probs, branch_ct, cm->fc.hybrid_coef_counts_16x16[i][j][k], 256, 1);
for (t = 0; t < ENTROPY_NODES; ++t) {
int prob;
//#define SUBMVREF_COUNT 5
//#define VP8_NUMMBSPLITS 4
-extern const int vp8_i8x8_block[4];
+extern const int vp9_i8x8_block[4];
/* Coefficient token alphabet */
#define ENTROPY_NODES 11
#define EOSB_TOKEN 127 /* Not signalled, encoder only */
-extern const vp8_tree_index vp8_coef_tree[];
+extern const vp8_tree_index vp9_coef_tree[];
-extern struct vp8_token_struct vp8_coef_encodings[MAX_ENTROPY_TOKENS];
+extern struct vp8_token_struct vp9_coef_encodings[MAX_ENTROPY_TOKENS];
typedef struct {
vp8_tree_p tree;
int base_val;
} vp8_extra_bit_struct;
-extern vp8_extra_bit_struct vp8_extra_bits[12]; /* indexed by token value */
+extern vp8_extra_bit_struct vp9_extra_bits[12]; /* indexed by token value */
#define PROB_UPDATE_BASELINE_COST 7
position within the 4x4 DCT. */
#define COEF_BANDS 8
-extern DECLARE_ALIGNED(16, const int, vp8_coef_bands[16]);
-extern DECLARE_ALIGNED(64, const int, vp8_coef_bands_8x8[64]);
-extern DECLARE_ALIGNED(16, const int, vp8_coef_bands_16x16[256]);
+extern DECLARE_ALIGNED(16, const int, vp9_coef_bands[16]);
+extern DECLARE_ALIGNED(64, const int, vp9_coef_bands_8x8[64]);
+extern DECLARE_ALIGNED(16, const int, vp9_coef_bands_16x16[256]);
/* Inside dimension is 3-valued measure of nearby complexity, that is,
the extent to which nearby coefficients are nonzero. For the first
#define SUBEXP_PARAM 4 /* Subexponential code parameter */
#define MODULUS_PARAM 13 /* Modulus parameter */
-extern DECLARE_ALIGNED(16, const unsigned char, vp8_prev_token_class[MAX_ENTROPY_TOKENS]);
+extern DECLARE_ALIGNED(16, const unsigned char, vp9_prev_token_class[MAX_ENTROPY_TOKENS]);
struct VP8Common;
void vp9_default_coef_probs(struct VP8Common *);
-extern DECLARE_ALIGNED(16, const int, vp8_default_zig_zag1d[16]);
+extern DECLARE_ALIGNED(16, const int, vp9_default_zig_zag1d[16]);
-extern DECLARE_ALIGNED(16, const int, vp8_col_scan[16]);
-extern DECLARE_ALIGNED(16, const int, vp8_row_scan[16]);
+extern DECLARE_ALIGNED(16, const int, vp9_col_scan[16]);
+extern DECLARE_ALIGNED(16, const int, vp9_row_scan[16]);
extern short vp8_default_zig_zag_mask[16];
-extern DECLARE_ALIGNED(64, const int, vp8_default_zig_zag1d_8x8[64]);
+extern DECLARE_ALIGNED(64, const int, vp9_default_zig_zag1d_8x8[64]);
void vp9_coef_tree_initialize(void);
-extern DECLARE_ALIGNED(16, const int, vp8_default_zig_zag1d_16x16[256]);
+extern DECLARE_ALIGNED(16, const int, vp9_default_zig_zag1d_16x16[256]);
void vp9_adapt_coef_probs(struct VP8Common *);
#endif
#include "vpx_mem/vpx_mem.h"
-const unsigned int kf_y_mode_cts[8][VP8_YMODES] = {
+static const unsigned int kf_y_mode_cts[8][VP8_YMODES] = {
/* DC V H D45 135 117 153 D27 D63 TM i8x8 BPRED */
{12, 6, 5, 5, 5, 5, 5, 5, 5, 2, 22, 200},
{25, 13, 13, 7, 7, 7, 7, 7, 7, 6, 27, 160},
return SUBMVREF_NORMAL;
}
-const vp8_prob vp8_sub_mv_ref_prob [VP8_SUBMVREFS - 1] = { 180, 162, 25};
+const vp8_prob vp9_sub_mv_ref_prob [VP8_SUBMVREFS - 1] = { 180, 162, 25};
-const vp8_prob vp8_sub_mv_ref_prob2 [SUBMVREF_COUNT][VP8_SUBMVREFS - 1] = {
+const vp8_prob vp9_sub_mv_ref_prob2 [SUBMVREF_COUNT][VP8_SUBMVREFS - 1] = {
{ 147, 136, 18 },
{ 106, 145, 1 },
{ 179, 121, 1 },
-vp8_mbsplit vp8_mbsplits [VP8_NUMMBSPLITS] = {
+vp9_mbsplit vp9_mbsplits [VP8_NUMMBSPLITS] = {
{
0, 0, 0, 0,
0, 0, 0, 0,
},
};
-const int vp8_mbsplit_count [VP8_NUMMBSPLITS] = { 2, 2, 4, 16};
+const int vp9_mbsplit_count [VP8_NUMMBSPLITS] = { 2, 2, 4, 16};
-const vp8_prob vp8_mbsplit_probs [VP8_NUMMBSPLITS - 1] = { 110, 111, 150};
+const vp8_prob vp9_mbsplit_probs [VP8_NUMMBSPLITS - 1] = { 110, 111, 150};
/* Array indices are identical to previously-existing INTRAMODECONTEXTNODES. */
-const vp8_tree_index vp8_bmode_tree[VP8_BINTRAMODES * 2 - 2] = /* INTRAMODECONTEXTNODE value */
+const vp8_tree_index vp9_bmode_tree[VP8_BINTRAMODES * 2 - 2] = /* INTRAMODECONTEXTNODE value */
{
-B_DC_PRED, 2, /* 0 = DC_NODE */
-B_TM_PRED, 4, /* 1 = TM_NODE */
/* Again, these trees use the same probability indices as their
explicitly-programmed predecessors. */
-const vp8_tree_index vp8_ymode_tree[VP8_YMODES * 2 - 2] = {
+const vp8_tree_index vp9_ymode_tree[VP8_YMODES * 2 - 2] = {
2, 14,
-DC_PRED, 4,
6, 8,
-B_PRED, -I8X8_PRED
};
-const vp8_tree_index vp8_kf_ymode_tree[VP8_YMODES * 2 - 2] = {
+const vp8_tree_index vp9_kf_ymode_tree[VP8_YMODES * 2 - 2] = {
2, 14,
-DC_PRED, 4,
6, 8,
-B_PRED, -I8X8_PRED
};
-const vp8_tree_index vp8_i8x8_mode_tree[VP8_I8X8_MODES * 2 - 2] = {
+const vp8_tree_index vp9_i8x8_mode_tree[VP8_I8X8_MODES * 2 - 2] = {
2, 14,
-DC_PRED, 4,
6, 8,
-H_PRED, -TM_PRED
};
-const vp8_tree_index vp8_uv_mode_tree[VP8_UV_MODES * 2 - 2] = {
+const vp8_tree_index vp9_uv_mode_tree[VP8_UV_MODES * 2 - 2] = {
2, 14,
-DC_PRED, 4,
6, 8,
-H_PRED, -TM_PRED
};
-const vp8_tree_index vp8_mbsplit_tree[6] = {
+const vp8_tree_index vp9_mbsplit_tree[6] = {
-PARTITIONING_4X4, 2,
-PARTITIONING_8X8, 4,
-PARTITIONING_16X8, -PARTITIONING_8X16,
};
-const vp8_tree_index vp8_mv_ref_tree[8] = {
+const vp8_tree_index vp9_mv_ref_tree[8] = {
-ZEROMV, 2,
-NEARESTMV, 4,
-NEARMV, 6,
};
#if CONFIG_SUPERBLOCKS
-const vp8_tree_index vp8_sb_mv_ref_tree[6] = {
+const vp8_tree_index vp9_sb_mv_ref_tree[6] = {
-ZEROMV, 2,
-NEARESTMV, 4,
-NEARMV, -NEWMV
};
#endif
-const vp8_tree_index vp8_sub_mv_ref_tree[6] = {
+const vp8_tree_index vp9_sub_mv_ref_tree[6] = {
-LEFT4X4, 2,
-ABOVE4X4, 4,
-ZERO4X4, -NEW4X4
};
-struct vp8_token_struct vp8_bmode_encodings [VP8_BINTRAMODES];
-struct vp8_token_struct vp8_ymode_encodings [VP8_YMODES];
+struct vp8_token_struct vp9_bmode_encodings [VP8_BINTRAMODES];
+struct vp8_token_struct vp9_ymode_encodings [VP8_YMODES];
#if CONFIG_SUPERBLOCKS
-struct vp8_token_struct vp8_sb_kf_ymode_encodings [VP8_I32X32_MODES];
+struct vp8_token_struct vp9_sb_kf_ymode_encodings [VP8_I32X32_MODES];
#endif
-struct vp8_token_struct vp8_kf_ymode_encodings [VP8_YMODES];
-struct vp8_token_struct vp8_uv_mode_encodings [VP8_UV_MODES];
-struct vp8_token_struct vp8_i8x8_mode_encodings [VP8_I8X8_MODES];
-struct vp8_token_struct vp8_mbsplit_encodings [VP8_NUMMBSPLITS];
+struct vp8_token_struct vp9_kf_ymode_encodings [VP8_YMODES];
+struct vp8_token_struct vp9_uv_mode_encodings [VP8_UV_MODES];
+struct vp8_token_struct vp9_i8x8_mode_encodings [VP8_I8X8_MODES];
+struct vp8_token_struct vp9_mbsplit_encodings [VP8_NUMMBSPLITS];
-struct vp8_token_struct vp8_mv_ref_encoding_array [VP8_MVREFS];
+struct vp8_token_struct vp9_mv_ref_encoding_array [VP8_MVREFS];
#if CONFIG_SUPERBLOCKS
-struct vp8_token_struct vp8_sb_mv_ref_encoding_array [VP8_MVREFS];
+struct vp8_token_struct vp9_sb_mv_ref_encoding_array [VP8_MVREFS];
#endif
-struct vp8_token_struct vp8_sub_mv_ref_encoding_array [VP8_SUBMVREFS];
+struct vp8_token_struct vp9_sub_mv_ref_encoding_array [VP8_SUBMVREFS];
void vp9_init_mbmode_probs(VP8_COMMON *x) {
unsigned int bct [VP8_YMODES] [2]; /* num Ymodes > num UV modes */
- vp9_tree_probs_from_distribution(VP8_YMODES, vp8_ymode_encodings,
- vp8_ymode_tree, x->fc.ymode_prob, bct, y_mode_cts, 256, 1);
+ vp9_tree_probs_from_distribution(VP8_YMODES, vp9_ymode_encodings,
+ vp9_ymode_tree, x->fc.ymode_prob, bct, y_mode_cts, 256, 1);
{
int i;
for (i = 0; i < 8; i++) {
vp9_tree_probs_from_distribution(
- VP8_YMODES, vp8_kf_ymode_encodings, vp8_kf_ymode_tree,
+ VP8_YMODES, vp9_kf_ymode_encodings, vp9_kf_ymode_tree,
x->kf_ymode_prob[i], bct, kf_y_mode_cts[i],
256, 1);
#if CONFIG_SUPERBLOCKS
vp9_tree_probs_from_distribution(
- VP8_I32X32_MODES, vp8_sb_kf_ymode_encodings, vp8_sb_ymode_tree,
+ VP8_I32X32_MODES, vp9_sb_kf_ymode_encodings, vp8_sb_ymode_tree,
x->sb_kf_ymode_prob[i], bct, kf_y_mode_cts[i],
256, 1);
#endif
int i;
for (i = 0; i < VP8_YMODES; i++) {
vp9_tree_probs_from_distribution(
- VP8_UV_MODES, vp8_uv_mode_encodings, vp8_uv_mode_tree,
+ VP8_UV_MODES, vp9_uv_mode_encodings, vp9_uv_mode_tree,
x->kf_uv_mode_prob[i], bct, kf_uv_mode_cts[i],
256, 1);
vp9_tree_probs_from_distribution(
- VP8_UV_MODES, vp8_uv_mode_encodings, vp8_uv_mode_tree,
+ VP8_UV_MODES, vp9_uv_mode_encodings, vp9_uv_mode_tree,
x->fc.uv_mode_prob[i], bct, uv_mode_cts[i],
256, 1);
}
}
vp9_tree_probs_from_distribution(
- VP8_I8X8_MODES, vp8_i8x8_mode_encodings, vp8_i8x8_mode_tree,
+ VP8_I8X8_MODES, vp9_i8x8_mode_encodings, vp9_i8x8_mode_tree,
x->fc.i8x8_mode_prob, bct, i8x8_mode_cts,
256, 1);
- vpx_memcpy(x->fc.sub_mv_ref_prob, vp8_sub_mv_ref_prob2, sizeof(vp8_sub_mv_ref_prob2));
- vpx_memcpy(x->fc.mbsplit_prob, vp8_mbsplit_probs, sizeof(vp8_mbsplit_probs));
- vpx_memcpy(x->fc.switchable_interp_prob, vp8_switchable_interp_prob,
- sizeof(vp8_switchable_interp_prob));
+ vpx_memcpy(x->fc.sub_mv_ref_prob, vp9_sub_mv_ref_prob2, sizeof(vp9_sub_mv_ref_prob2));
+ vpx_memcpy(x->fc.mbsplit_prob, vp9_mbsplit_probs, sizeof(vp9_mbsplit_probs));
+ vpx_memcpy(x->fc.switchable_interp_prob, vp9_switchable_interp_prob,
+ sizeof(vp9_switchable_interp_prob));
}
vp8_prob p [VP8_BINTRAMODES - 1],
unsigned int branch_ct [VP8_BINTRAMODES - 1] [2],
const unsigned int events [VP8_BINTRAMODES]) {
- vp9_tree_probs_from_distribution(VP8_BINTRAMODES, vp8_bmode_encodings,
- vp8_bmode_tree, p, branch_ct, events, 256, 1);
+ vp9_tree_probs_from_distribution(VP8_BINTRAMODES, vp9_bmode_encodings,
+ vp9_bmode_tree, p, branch_ct, events, 256, 1);
}
void vp9_default_bmode_probs(vp8_prob p [VP8_BINTRAMODES - 1]) {
do {
intra_bmode_probs_from_distribution(
- p[i][j], branch_ct, vp8_kf_default_bmode_counts[i][j]);
+ p[i][j], branch_ct, vp9_kf_default_bmode_counts[i][j]);
} while (++j < VP8_BINTRAMODES);
} while (++i < VP8_BINTRAMODES);
}
#if VP8_SWITCHABLE_FILTERS == 3
-const vp8_tree_index vp8_switchable_interp_tree[VP8_SWITCHABLE_FILTERS*2-2] = {
+const vp8_tree_index vp9_switchable_interp_tree[VP8_SWITCHABLE_FILTERS*2-2] = {
-0, 2,
-1, -2
};
-struct vp8_token_struct vp8_switchable_interp_encodings[VP8_SWITCHABLE_FILTERS];
-const INTERPOLATIONFILTERTYPE vp8_switchable_interp[VP8_SWITCHABLE_FILTERS] = {
+struct vp8_token_struct vp9_switchable_interp_encodings[VP8_SWITCHABLE_FILTERS];
+const INTERPOLATIONFILTERTYPE vp9_switchable_interp[VP8_SWITCHABLE_FILTERS] = {
EIGHTTAP, SIXTAP, EIGHTTAP_SHARP};
-const int vp8_switchable_interp_map[SWITCHABLE+1] = {1, -1, 0, 2, -1};
-const vp8_prob vp8_switchable_interp_prob [VP8_SWITCHABLE_FILTERS+1]
+const int vp9_switchable_interp_map[SWITCHABLE+1] = {1, -1, 0, 2, -1};
+const vp8_prob vp9_switchable_interp_prob [VP8_SWITCHABLE_FILTERS+1]
[VP8_SWITCHABLE_FILTERS-1] = {
{248, 192}, { 32, 248}, { 32, 32}, {192, 160}
};
#elif VP8_SWITCHABLE_FILTERS == 2
-const vp8_tree_index vp8_switchable_interp_tree[VP8_SWITCHABLE_FILTERS*2-2] = {
+const vp8_tree_index vp9_switchable_interp_tree[VP8_SWITCHABLE_FILTERS*2-2] = {
-0, -1,
};
-struct vp8_token_struct vp8_switchable_interp_encodings[VP8_SWITCHABLE_FILTERS];
-const vp8_prob vp8_switchable_interp_prob [VP8_SWITCHABLE_FILTERS+1]
+struct vp8_token_struct vp9_switchable_interp_encodings[VP8_SWITCHABLE_FILTERS];
+const vp8_prob vp9_switchable_interp_prob [VP8_SWITCHABLE_FILTERS+1]
[VP8_SWITCHABLE_FILTERS-1] = {
{248},
{ 64},
{192},
};
-const INTERPOLATIONFILTERTYPE vp8_switchable_interp[VP8_SWITCHABLE_FILTERS] = {
+const INTERPOLATIONFILTERTYPE vp9_switchable_interp[VP8_SWITCHABLE_FILTERS] = {
EIGHTTAP, EIGHTTAP_SHARP};
-const int vp8_switchable_interp_map[SWITCHABLE+1] = {-1, -1, 0, 1, -1}; //8, 8s
+const int vp9_switchable_interp_map[SWITCHABLE+1] = {-1, -1, 0, 1, -1}; //8, 8s
#endif
void vp9_entropy_mode_init() {
- vp9_tokens_from_tree(vp8_bmode_encodings, vp8_bmode_tree);
- vp9_tokens_from_tree(vp8_ymode_encodings, vp8_ymode_tree);
- vp9_tokens_from_tree(vp8_kf_ymode_encodings, vp8_kf_ymode_tree);
+ vp9_tokens_from_tree(vp9_bmode_encodings, vp9_bmode_tree);
+ vp9_tokens_from_tree(vp9_ymode_encodings, vp9_ymode_tree);
+ vp9_tokens_from_tree(vp9_kf_ymode_encodings, vp9_kf_ymode_tree);
#if CONFIG_SUPERBLOCKS
- vp9_tokens_from_tree(vp8_sb_kf_ymode_encodings, vp8_sb_ymode_tree);
+ vp9_tokens_from_tree(vp9_sb_kf_ymode_encodings, vp8_sb_ymode_tree);
#endif
- vp9_tokens_from_tree(vp8_uv_mode_encodings, vp8_uv_mode_tree);
- vp9_tokens_from_tree(vp8_i8x8_mode_encodings, vp8_i8x8_mode_tree);
- vp9_tokens_from_tree(vp8_mbsplit_encodings, vp8_mbsplit_tree);
- vp9_tokens_from_tree(vp8_switchable_interp_encodings,
- vp8_switchable_interp_tree);
-
- vp9_tokens_from_tree_offset(vp8_mv_ref_encoding_array,
- vp8_mv_ref_tree, NEARESTMV);
+ vp9_tokens_from_tree(vp9_uv_mode_encodings, vp9_uv_mode_tree);
+ vp9_tokens_from_tree(vp9_i8x8_mode_encodings, vp9_i8x8_mode_tree);
+ vp9_tokens_from_tree(vp9_mbsplit_encodings, vp9_mbsplit_tree);
+ vp9_tokens_from_tree(vp9_switchable_interp_encodings,
+ vp9_switchable_interp_tree);
+
+ vp9_tokens_from_tree_offset(vp9_mv_ref_encoding_array,
+ vp9_mv_ref_tree, NEARESTMV);
#if CONFIG_SUPERBLOCKS
- vp9_tokens_from_tree_offset(vp8_sb_mv_ref_encoding_array,
- vp8_sb_mv_ref_tree, NEARESTMV);
+ vp9_tokens_from_tree_offset(vp9_sb_mv_ref_encoding_array,
+ vp9_sb_mv_ref_tree, NEARESTMV);
#endif
- vp9_tokens_from_tree_offset(vp8_sub_mv_ref_encoding_array,
- vp8_sub_mv_ref_tree, LEFT4X4);
+ vp9_tokens_from_tree_offset(vp9_sub_mv_ref_encoding_array,
+ vp9_sub_mv_ref_tree, LEFT4X4);
}
void vp9_init_mode_contexts(VP8_COMMON *pc) {
vpx_memset(pc->fc.mv_ref_ct_a, 0, sizeof(pc->fc.mv_ref_ct_a));
vpx_memcpy(pc->fc.mode_context,
- default_vp8_mode_contexts,
+ vp9_default_mode_contexts,
sizeof(pc->fc.mode_context));
vpx_memcpy(pc->fc.mode_context_a,
- default_vp8_mode_contexts,
+ vp9_default_mode_contexts_a,
sizeof(pc->fc.mode_context_a));
}
printf("};\n");
#endif
vp9_tree_probs_from_distribution(
- VP8_YMODES, vp8_ymode_encodings, vp8_ymode_tree,
+ VP8_YMODES, vp9_ymode_encodings, vp9_ymode_tree,
ymode_probs, branch_ct, cm->fc.ymode_counts,
256, 1);
for (t = 0; t < VP8_YMODES - 1; ++t) {
}
for (i = 0; i < VP8_YMODES; ++i) {
vp9_tree_probs_from_distribution(
- VP8_UV_MODES, vp8_uv_mode_encodings, vp8_uv_mode_tree,
+ VP8_UV_MODES, vp9_uv_mode_encodings, vp9_uv_mode_tree,
uvmode_probs, branch_ct, cm->fc.uv_mode_counts[i],
256, 1);
for (t = 0; t < VP8_UV_MODES - 1; ++t) {
}
}
vp9_tree_probs_from_distribution(
- VP8_BINTRAMODES, vp8_bmode_encodings, vp8_bmode_tree,
+ VP8_BINTRAMODES, vp9_bmode_encodings, vp9_bmode_tree,
bmode_probs, branch_ct, cm->fc.bmode_counts,
256, 1);
for (t = 0; t < VP8_BINTRAMODES - 1; ++t) {
else cm->fc.bmode_prob[t] = prob;
}
vp9_tree_probs_from_distribution(
- VP8_I8X8_MODES, vp8_i8x8_mode_encodings, vp8_i8x8_mode_tree,
+ VP8_I8X8_MODES, vp9_i8x8_mode_encodings, vp9_i8x8_mode_tree,
i8x8_mode_probs, branch_ct, cm->fc.i8x8_mode_counts,
256, 1);
for (t = 0; t < VP8_I8X8_MODES - 1; ++t) {
}
for (i = 0; i < SUBMVREF_COUNT; ++i) {
vp9_tree_probs_from_distribution(
- VP8_SUBMVREFS, vp8_sub_mv_ref_encoding_array, vp8_sub_mv_ref_tree,
+ VP8_SUBMVREFS, vp9_sub_mv_ref_encoding_array, vp9_sub_mv_ref_tree,
sub_mv_ref_probs, branch_ct, cm->fc.sub_mv_ref_counts[i],
256, 1);
for (t = 0; t < VP8_SUBMVREFS - 1; ++t) {
}
}
vp9_tree_probs_from_distribution(
- VP8_NUMMBSPLITS, vp8_mbsplit_encodings, vp8_mbsplit_tree,
+ VP8_NUMMBSPLITS, vp9_mbsplit_encodings, vp9_mbsplit_tree,
mbsplit_probs, branch_ct, cm->fc.mbsplit_counts,
256, 1);
for (t = 0; t < VP8_NUMMBSPLITS - 1; ++t) {
#define SUBMVREF_COUNT 5
#define VP8_NUMMBSPLITS 4
-typedef const int vp8_mbsplit[16];
+typedef const int vp9_mbsplit[16];
-extern vp8_mbsplit vp8_mbsplits [VP8_NUMMBSPLITS];
+extern vp9_mbsplit vp9_mbsplits [VP8_NUMMBSPLITS];
-extern const int vp8_mbsplit_count [VP8_NUMMBSPLITS]; /* # of subsets */
+extern const int vp9_mbsplit_count [VP8_NUMMBSPLITS]; /* # of subsets */
-extern const vp8_prob vp8_mbsplit_probs [VP8_NUMMBSPLITS - 1];
+extern const vp8_prob vp9_mbsplit_probs [VP8_NUMMBSPLITS - 1];
extern int vp9_mv_cont(const int_mv *l, const int_mv *a);
-extern const vp8_prob vp8_sub_mv_ref_prob [VP8_SUBMVREFS - 1];
-extern const vp8_prob vp8_sub_mv_ref_prob2 [SUBMVREF_COUNT][VP8_SUBMVREFS - 1];
+extern const vp8_prob vp9_sub_mv_ref_prob [VP8_SUBMVREFS - 1];
+extern const vp8_prob vp9_sub_mv_ref_prob2 [SUBMVREF_COUNT][VP8_SUBMVREFS - 1];
-extern const unsigned int vp8_kf_default_bmode_counts [VP8_BINTRAMODES] [VP8_BINTRAMODES] [VP8_BINTRAMODES];
+extern const unsigned int vp9_kf_default_bmode_counts[VP8_BINTRAMODES][VP8_BINTRAMODES][VP8_BINTRAMODES];
-extern const vp8_tree_index vp8_bmode_tree[];
+extern const vp8_tree_index vp9_bmode_tree[];
-extern const vp8_tree_index vp8_ymode_tree[];
-extern const vp8_tree_index vp8_kf_ymode_tree[];
-extern const vp8_tree_index vp8_uv_mode_tree[];
-#define vp8_sb_ymode_tree vp8_uv_mode_tree
-extern const vp8_tree_index vp8_i8x8_mode_tree[];
-extern const vp8_tree_index vp8_mbsplit_tree[];
-extern const vp8_tree_index vp8_mv_ref_tree[];
-extern const vp8_tree_index vp8_sb_mv_ref_tree[];
-extern const vp8_tree_index vp8_sub_mv_ref_tree[];
+extern const vp8_tree_index vp9_ymode_tree[];
+extern const vp8_tree_index vp9_kf_ymode_tree[];
+extern const vp8_tree_index vp9_uv_mode_tree[];
+#define vp8_sb_ymode_tree vp9_uv_mode_tree
+extern const vp8_tree_index vp9_i8x8_mode_tree[];
+extern const vp8_tree_index vp9_mbsplit_tree[];
+extern const vp8_tree_index vp9_mv_ref_tree[];
+extern const vp8_tree_index vp9_sb_mv_ref_tree[];
+extern const vp8_tree_index vp9_sub_mv_ref_tree[];
-extern struct vp8_token_struct vp8_bmode_encodings [VP8_BINTRAMODES];
-extern struct vp8_token_struct vp8_ymode_encodings [VP8_YMODES];
-extern struct vp8_token_struct vp8_sb_kf_ymode_encodings [VP8_I32X32_MODES];
-extern struct vp8_token_struct vp8_kf_ymode_encodings [VP8_YMODES];
-extern struct vp8_token_struct vp8_i8x8_mode_encodings [VP8_I8X8_MODES];
-extern struct vp8_token_struct vp8_uv_mode_encodings [VP8_UV_MODES];
-extern struct vp8_token_struct vp8_mbsplit_encodings [VP8_NUMMBSPLITS];
+extern struct vp8_token_struct vp9_bmode_encodings [VP8_BINTRAMODES];
+extern struct vp8_token_struct vp9_ymode_encodings [VP8_YMODES];
+extern struct vp8_token_struct vp9_sb_kf_ymode_encodings [VP8_I32X32_MODES];
+extern struct vp8_token_struct vp9_kf_ymode_encodings [VP8_YMODES];
+extern struct vp8_token_struct vp9_i8x8_mode_encodings [VP8_I8X8_MODES];
+extern struct vp8_token_struct vp9_uv_mode_encodings [VP8_UV_MODES];
+extern struct vp8_token_struct vp9_mbsplit_encodings [VP8_NUMMBSPLITS];
/* Inter mode values do not start at zero */
-extern struct vp8_token_struct vp8_mv_ref_encoding_array [VP8_MVREFS];
-extern struct vp8_token_struct vp8_sb_mv_ref_encoding_array [VP8_MVREFS];
-extern struct vp8_token_struct vp8_sub_mv_ref_encoding_array [VP8_SUBMVREFS];
+extern struct vp8_token_struct vp9_mv_ref_encoding_array [VP8_MVREFS];
+extern struct vp8_token_struct vp9_sb_mv_ref_encoding_array [VP8_MVREFS];
+extern struct vp8_token_struct vp9_sub_mv_ref_encoding_array [VP8_SUBMVREFS];
void vp9_entropy_mode_init(void);
void vp9_adapt_mode_probs(struct VP8Common *);
#define VP8_SWITCHABLE_FILTERS 2 /* number of switchable filters */
-extern const INTERPOLATIONFILTERTYPE vp8_switchable_interp
+extern const INTERPOLATIONFILTERTYPE vp9_switchable_interp
[VP8_SWITCHABLE_FILTERS];
-extern const int vp8_switchable_interp_map[SWITCHABLE + 1];
-extern const vp8_tree_index vp8_switchable_interp_tree
+extern const int vp9_switchable_interp_map[SWITCHABLE + 1];
+extern const vp8_tree_index vp9_switchable_interp_tree
[2*(VP8_SWITCHABLE_FILTERS - 1)];
-extern struct vp8_token_struct vp8_switchable_interp_encodings
+extern struct vp8_token_struct vp9_switchable_interp_encodings
[VP8_SWITCHABLE_FILTERS];
-extern const vp8_prob vp8_switchable_interp_prob
+extern const vp8_prob vp9_switchable_interp_prob
[VP8_SWITCHABLE_FILTERS + 1][VP8_SWITCHABLE_FILTERS - 1];
#endif
/* Smooth or bias the mv-counts before prob computation */
/* #define SMOOTH_MV_COUNTS */
-const vp8_tree_index vp8_mv_joint_tree[2 * MV_JOINTS - 2] = {
+const vp8_tree_index vp9_mv_joint_tree[2 * MV_JOINTS - 2] = {
-MV_JOINT_ZERO, 2,
-MV_JOINT_HNZVZ, 4,
-MV_JOINT_HZVNZ, -MV_JOINT_HNZVNZ
};
-struct vp8_token_struct vp8_mv_joint_encodings[MV_JOINTS];
+struct vp8_token_struct vp9_mv_joint_encodings[MV_JOINTS];
-const vp8_tree_index vp8_mv_class_tree[2 * MV_CLASSES - 2] = {
+const vp8_tree_index vp9_mv_class_tree[2 * MV_CLASSES - 2] = {
-MV_CLASS_0, 2,
-MV_CLASS_1, 4,
6, 8,
-MV_CLASS_4, -MV_CLASS_5,
-MV_CLASS_6, -MV_CLASS_7,
};
-struct vp8_token_struct vp8_mv_class_encodings[MV_CLASSES];
+struct vp8_token_struct vp9_mv_class_encodings[MV_CLASSES];
-const vp8_tree_index vp8_mv_class0_tree [2 * CLASS0_SIZE - 2] = {
+const vp8_tree_index vp9_mv_class0_tree [2 * CLASS0_SIZE - 2] = {
-0, -1,
};
-struct vp8_token_struct vp8_mv_class0_encodings[CLASS0_SIZE];
+struct vp8_token_struct vp9_mv_class0_encodings[CLASS0_SIZE];
-const vp8_tree_index vp8_mv_fp_tree [2 * 4 - 2] = {
+const vp8_tree_index vp9_mv_fp_tree [2 * 4 - 2] = {
-0, 2,
-1, 4,
-2, -3
};
-struct vp8_token_struct vp8_mv_fp_encodings[4];
+struct vp8_token_struct vp9_mv_fp_encodings[4];
-const nmv_context vp8_default_nmv_context = {
+const nmv_context vp9_default_nmv_context = {
{32, 64, 96},
{
{ /* vert component */
counts_to_context(&NMVcount->comps[0], usehp);
counts_to_context(&NMVcount->comps[1], usehp);
vp9_tree_probs_from_distribution(MV_JOINTS,
- vp8_mv_joint_encodings,
- vp8_mv_joint_tree,
+ vp9_mv_joint_encodings,
+ vp9_mv_joint_tree,
prob->joints,
branch_ct_joint,
NMVcount->joints,
branch_ct_sign[i][0] = NMVcount->comps[i].sign[0];
branch_ct_sign[i][1] = NMVcount->comps[i].sign[1];
vp9_tree_probs_from_distribution(MV_CLASSES,
- vp8_mv_class_encodings,
- vp8_mv_class_tree,
+ vp9_mv_class_encodings,
+ vp9_mv_class_tree,
prob->comps[i].classes,
branch_ct_classes[i],
NMVcount->comps[i].classes,
256, 1);
vp9_tree_probs_from_distribution(CLASS0_SIZE,
- vp8_mv_class0_encodings,
- vp8_mv_class0_tree,
+ vp9_mv_class0_encodings,
+ vp9_mv_class0_tree,
prob->comps[i].class0,
branch_ct_class0[i],
NMVcount->comps[i].class0,
for (i = 0; i < 2; ++i) {
for (k = 0; k < CLASS0_SIZE; ++k) {
vp9_tree_probs_from_distribution(4,
- vp8_mv_fp_encodings,
- vp8_mv_fp_tree,
+ vp9_mv_fp_encodings,
+ vp9_mv_fp_tree,
prob->comps[i].class0_fp[k],
branch_ct_class0_fp[i][k],
NMVcount->comps[i].class0_fp[k],
256, 1);
}
vp9_tree_probs_from_distribution(4,
- vp8_mv_fp_encodings,
- vp8_mv_fp_tree,
+ vp9_mv_fp_encodings,
+ vp9_mv_fp_tree,
prob->comps[i].fp,
branch_ct_fp[i],
NMVcount->comps[i].fp,
}
void vp9_entropy_mv_init() {
- vp9_tokens_from_tree(vp8_mv_joint_encodings, vp8_mv_joint_tree);
- vp9_tokens_from_tree(vp8_mv_class_encodings, vp8_mv_class_tree);
- vp9_tokens_from_tree(vp8_mv_class0_encodings, vp8_mv_class0_tree);
- vp9_tokens_from_tree(vp8_mv_fp_encodings, vp8_mv_fp_tree);
+ vp9_tokens_from_tree(vp9_mv_joint_encodings, vp9_mv_joint_tree);
+ vp9_tokens_from_tree(vp9_mv_class_encodings, vp9_mv_class_tree);
+ vp9_tokens_from_tree(vp9_mv_class0_encodings, vp9_mv_class0_tree);
+ vp9_tokens_from_tree(vp9_mv_fp_encodings, vp9_mv_fp_tree);
}
void vp9_init_mv_probs(VP8_COMMON *cm) {
- vpx_memcpy(&cm->fc.nmvc, &vp8_default_nmv_context, sizeof(nmv_context));
+ vpx_memcpy(&cm->fc.nmvc, &vp9_default_nmv_context, sizeof(nmv_context));
}
MV_JOINT_HNZVNZ = 3, /* Both components nonzero */
} MV_JOINT_TYPE;
-extern const vp8_tree_index vp8_mv_joint_tree[2 * MV_JOINTS - 2];
-extern struct vp8_token_struct vp8_mv_joint_encodings [MV_JOINTS];
+extern const vp8_tree_index vp9_mv_joint_tree[2 * MV_JOINTS - 2];
+extern struct vp8_token_struct vp9_mv_joint_encodings [MV_JOINTS];
/* Symbols for coding magnitude class of nonzero components */
#define MV_CLASSES 8
MV_CLASS_7 = 7, /* (128, 256] integer pel */
} MV_CLASS_TYPE;
-extern const vp8_tree_index vp8_mv_class_tree[2 * MV_CLASSES - 2];
-extern struct vp8_token_struct vp8_mv_class_encodings [MV_CLASSES];
+extern const vp8_tree_index vp9_mv_class_tree[2 * MV_CLASSES - 2];
+extern struct vp8_token_struct vp9_mv_class_encodings [MV_CLASSES];
#define CLASS0_BITS 1 /* bits at integer precision for class 0 */
#define CLASS0_SIZE (1 << CLASS0_BITS)
#define MV_MAX ((1 << MV_MAX_BITS) - 1)
#define MV_VALS ((MV_MAX << 1) + 1)
-extern const vp8_tree_index vp8_mv_class0_tree[2 * CLASS0_SIZE - 2];
-extern struct vp8_token_struct vp8_mv_class0_encodings[CLASS0_SIZE];
+extern const vp8_tree_index vp9_mv_class0_tree[2 * CLASS0_SIZE - 2];
+extern struct vp8_token_struct vp9_mv_class0_encodings[CLASS0_SIZE];
-extern const vp8_tree_index vp8_mv_fp_tree[2 * 4 - 2];
-extern struct vp8_token_struct vp8_mv_fp_encodings[4];
+extern const vp8_tree_index vp9_mv_fp_tree[2 * 4 - 2];
+extern struct vp8_token_struct vp9_mv_fp_encodings[4];
typedef struct {
vp8_prob sign;
void vp9_increment_nmv(const MV *mv, const MV *ref, nmv_context_counts *mvctx,
int usehp);
-extern const nmv_context vp8_default_nmv_context;
+extern const nmv_context vp9_default_nmv_context;
void vp9_counts_to_nmv_context(
nmv_context_counts *NMVcount,
nmv_context *prob,
#include "vpx_ports/mem.h"
#include "vpx_rtcd.h"
-DECLARE_ALIGNED(16, const short, vp8_bilinear_filters[SUBPEL_SHIFTS][2]) = {
+DECLARE_ALIGNED(16, const short, vp9_bilinear_filters[SUBPEL_SHIFTS][2]) = {
{ 128, 0 },
{ 120, 8 },
{ 112, 16 },
#define FILTER_ALPHA 0
#define FILTER_ALPHA_SHARP 1
-DECLARE_ALIGNED(16, const short, vp8_sub_pel_filters_8[SUBPEL_SHIFTS][8]) = {
+DECLARE_ALIGNED(16, const short, vp9_sub_pel_filters_8[SUBPEL_SHIFTS][8]) = {
#if FILTER_ALPHA == 0
/* Lagrangian interpolation filter */
{ 0, 0, 0, 128, 0, 0, 0, 0},
#endif /* FILTER_ALPHA */
};
-DECLARE_ALIGNED(16, const short, vp8_sub_pel_filters_8s[SUBPEL_SHIFTS][8]) = {
+DECLARE_ALIGNED(16, const short, vp9_sub_pel_filters_8s[SUBPEL_SHIFTS][8]) = {
#if FILTER_ALPHA_SHARP == 1
/* dct based filter */
{0, 0, 0, 128, 0, 0, 0, 0},
#endif /* FILTER_ALPHA_SHARP */
};
-DECLARE_ALIGNED(16, const short, vp8_sub_pel_filters_6[SUBPEL_SHIFTS][6]) = {
+DECLARE_ALIGNED(16, const short, vp9_sub_pel_filters_6[SUBPEL_SHIFTS][6]) = {
{0, 0, 128, 0, 0, 0},
{1, -5, 125, 8, -2, 1},
{1, -8, 122, 17, -5, 1},
const short *HFilter;
const short *VFilter;
- HFilter = vp8_sub_pel_filters_6[xoffset]; /* 6 tap */
- VFilter = vp8_sub_pel_filters_6[yoffset]; /* 6 tap */
+ HFilter = vp9_sub_pel_filters_6[xoffset]; /* 6 tap */
+ VFilter = vp9_sub_pel_filters_6[yoffset]; /* 6 tap */
filter_block2d_6(src_ptr, dst_ptr, src_pixels_per_line, dst_pitch, HFilter, VFilter);
}
const short *HFilter;
const short *VFilter;
- HFilter = vp8_sub_pel_filters_6[xoffset]; /* 6 tap */
- VFilter = vp8_sub_pel_filters_6[yoffset]; /* 6 tap */
+ HFilter = vp9_sub_pel_filters_6[xoffset]; /* 6 tap */
+ VFilter = vp9_sub_pel_filters_6[yoffset]; /* 6 tap */
filter_block2d_avg_6(src_ptr, dst_ptr, src_pixels_per_line,
dst_pitch, HFilter, VFilter);
// int FData[(7+Interp_Extend*2)*16]; /* Temp data buffer used in filtering */
int FData[(7 + Interp_Extend * 2) * 8]; /* Temp data buffer used in filtering */
- HFilter = vp8_sub_pel_filters_6[xoffset]; /* 6 tap */
- VFilter = vp8_sub_pel_filters_6[yoffset]; /* 6 tap */
+ HFilter = vp9_sub_pel_filters_6[xoffset]; /* 6 tap */
+ VFilter = vp9_sub_pel_filters_6[yoffset]; /* 6 tap */
/* First filter 1-D horizontally... */
filter_block2d_first_pass_6(src_ptr - ((Interp_Extend - 1) * src_pixels_per_line), FData, src_pixels_per_line, 1,
// int FData[(7+Interp_Extend*2)*16]; /* Temp data buffer used in filtering */
int FData[(7 + Interp_Extend * 2) * 8]; /* Temp data buffer used in filtering */
- HFilter = vp8_sub_pel_filters_6[xoffset]; /* 6 tap */
- VFilter = vp8_sub_pel_filters_6[yoffset]; /* 6 tap */
+ HFilter = vp9_sub_pel_filters_6[xoffset]; /* 6 tap */
+ VFilter = vp9_sub_pel_filters_6[yoffset]; /* 6 tap */
/* First filter 1-D horizontally... */
filter_block2d_first_pass_6(src_ptr - ((Interp_Extend - 1) * src_pixels_per_line), FData, src_pixels_per_line, 1,
// int FData[(7+Interp_Extend*2)*16]; /* Temp data buffer used in filtering */
int FData[(3 + Interp_Extend * 2) * 8]; /* Temp data buffer used in filtering */
- HFilter = vp8_sub_pel_filters_6[xoffset]; /* 6 tap */
- VFilter = vp8_sub_pel_filters_6[yoffset]; /* 6 tap */
+ HFilter = vp9_sub_pel_filters_6[xoffset]; /* 6 tap */
+ VFilter = vp9_sub_pel_filters_6[yoffset]; /* 6 tap */
/* First filter 1-D horizontally... */
filter_block2d_first_pass_6(src_ptr - ((Interp_Extend - 1) * src_pixels_per_line), FData, src_pixels_per_line, 1,
int FData[(15 + Interp_Extend * 2) * 16]; /* Temp data buffer used in filtering */
- HFilter = vp8_sub_pel_filters_6[xoffset]; /* 6 tap */
- VFilter = vp8_sub_pel_filters_6[yoffset]; /* 6 tap */
+ HFilter = vp9_sub_pel_filters_6[xoffset]; /* 6 tap */
+ VFilter = vp9_sub_pel_filters_6[yoffset]; /* 6 tap */
/* First filter 1-D horizontally... */
filter_block2d_first_pass_6(src_ptr - ((Interp_Extend - 1) * src_pixels_per_line), FData, src_pixels_per_line, 1,
// int FData[(15+Interp_Extend*2)*24]; /* Temp data buffer used in filtering */
int FData[(15 + Interp_Extend * 2) * 16]; /* Temp data buffer used in filtering */
- HFilter = vp8_sub_pel_filters_6[xoffset]; /* 6 tap */
- VFilter = vp8_sub_pel_filters_6[yoffset]; /* 6 tap */
+ HFilter = vp9_sub_pel_filters_6[xoffset]; /* 6 tap */
+ VFilter = vp9_sub_pel_filters_6[yoffset]; /* 6 tap */
/* First filter 1-D horizontally... */
filter_block2d_first_pass_6(src_ptr - ((Interp_Extend - 1) * src_pixels_per_line), FData,
const short *HFilter;
const short *VFilter;
- HFilter = vp8_sub_pel_filters_8[xoffset];
- VFilter = vp8_sub_pel_filters_8[yoffset];
+ HFilter = vp9_sub_pel_filters_8[xoffset];
+ VFilter = vp9_sub_pel_filters_8[yoffset];
vp9_filter_block2d_4x4_8(src_ptr, src_pixels_per_line,
HFilter, VFilter,
unsigned char *dst_ptr,
int dst_pitch
) {
- const short *HFilter = vp8_sub_pel_filters_8[xoffset];
- const short *VFilter = vp8_sub_pel_filters_8[yoffset];
+ const short *HFilter = vp9_sub_pel_filters_8[xoffset];
+ const short *VFilter = vp9_sub_pel_filters_8[yoffset];
unsigned char tmp[4 * 4];
vp9_filter_block2d_4x4_8(src_ptr, src_pixels_per_line,
const short *HFilter;
const short *VFilter;
- HFilter = vp8_sub_pel_filters_8s[xoffset];
- VFilter = vp8_sub_pel_filters_8s[yoffset];
+ HFilter = vp9_sub_pel_filters_8s[xoffset];
+ VFilter = vp9_sub_pel_filters_8s[yoffset];
vp9_filter_block2d_4x4_8(src_ptr, src_pixels_per_line,
HFilter, VFilter,
unsigned char *dst_ptr,
int dst_pitch
) {
- const short *HFilter = vp8_sub_pel_filters_8s[xoffset];
- const short *VFilter = vp8_sub_pel_filters_8s[yoffset];
+ const short *HFilter = vp9_sub_pel_filters_8s[xoffset];
+ const short *VFilter = vp9_sub_pel_filters_8s[yoffset];
unsigned char tmp[4 * 4];
vp9_filter_block2d_4x4_8(src_ptr, src_pixels_per_line,
unsigned char *dst_ptr,
int dst_pitch
) {
- const short *HFilter = vp8_sub_pel_filters_8[xoffset];
- const short *VFilter = vp8_sub_pel_filters_8[yoffset];
+ const short *HFilter = vp9_sub_pel_filters_8[xoffset];
+ const short *VFilter = vp9_sub_pel_filters_8[yoffset];
vp9_filter_block2d_8x8_8(src_ptr, src_pixels_per_line,
HFilter, VFilter,
unsigned char *dst_ptr,
int dst_pitch
) {
- const short *HFilter = vp8_sub_pel_filters_8s[xoffset];
- const short *VFilter = vp8_sub_pel_filters_8s[yoffset];
+ const short *HFilter = vp9_sub_pel_filters_8s[xoffset];
+ const short *VFilter = vp9_sub_pel_filters_8s[yoffset];
vp9_filter_block2d_8x8_8(src_ptr, src_pixels_per_line,
HFilter, VFilter,
int dst_pitch
) {
unsigned char tmp[8 * 8];
- const short *HFilter = vp8_sub_pel_filters_8[xoffset];
- const short *VFilter = vp8_sub_pel_filters_8[yoffset];
+ const short *HFilter = vp9_sub_pel_filters_8[xoffset];
+ const short *VFilter = vp9_sub_pel_filters_8[yoffset];
vp9_filter_block2d_8x8_8(src_ptr, src_pixels_per_line,
HFilter, VFilter,
int dst_pitch
) {
unsigned char tmp[8 * 8];
- const short *HFilter = vp8_sub_pel_filters_8s[xoffset];
- const short *VFilter = vp8_sub_pel_filters_8s[yoffset];
+ const short *HFilter = vp9_sub_pel_filters_8s[xoffset];
+ const short *VFilter = vp9_sub_pel_filters_8s[yoffset];
vp9_filter_block2d_8x8_8(src_ptr, src_pixels_per_line,
HFilter, VFilter,
int dst_pitch
) {
- const short *HFilter = vp8_sub_pel_filters_8[xoffset];
- const short *VFilter = vp8_sub_pel_filters_8[yoffset];
+ const short *HFilter = vp9_sub_pel_filters_8[xoffset];
+ const short *VFilter = vp9_sub_pel_filters_8[yoffset];
vp9_filter_block2d_8x4_8(src_ptr, src_pixels_per_line,
HFilter, VFilter,
unsigned char *dst_ptr,
int dst_pitch
) {
- const short *HFilter = vp8_sub_pel_filters_8s[xoffset];
- const short *VFilter = vp8_sub_pel_filters_8s[yoffset];
+ const short *HFilter = vp9_sub_pel_filters_8s[xoffset];
+ const short *VFilter = vp9_sub_pel_filters_8s[yoffset];
vp9_filter_block2d_8x4_8(src_ptr, src_pixels_per_line,
HFilter, VFilter,
unsigned char *dst_ptr,
int dst_pitch
) {
- const short *HFilter = vp8_sub_pel_filters_8[xoffset];
- const short *VFilter = vp8_sub_pel_filters_8[yoffset];
+ const short *HFilter = vp9_sub_pel_filters_8[xoffset];
+ const short *VFilter = vp9_sub_pel_filters_8[yoffset];
vp9_filter_block2d_16x16_8(src_ptr, src_pixels_per_line,
HFilter, VFilter,
unsigned char *dst_ptr,
int dst_pitch
) {
- const short *HFilter = vp8_sub_pel_filters_8s[xoffset];
- const short *VFilter = vp8_sub_pel_filters_8s[yoffset];
+ const short *HFilter = vp9_sub_pel_filters_8s[xoffset];
+ const short *VFilter = vp9_sub_pel_filters_8s[yoffset];
vp9_filter_block2d_16x16_8(src_ptr, src_pixels_per_line,
HFilter, VFilter,
int dst_pitch
) {
DECLARE_ALIGNED_ARRAY(16, unsigned char, tmp, 16 * 16);
- const short *HFilter = vp8_sub_pel_filters_8[xoffset];
- const short *VFilter = vp8_sub_pel_filters_8[yoffset];
+ const short *HFilter = vp9_sub_pel_filters_8[xoffset];
+ const short *VFilter = vp9_sub_pel_filters_8[yoffset];
vp9_filter_block2d_16x16_8(src_ptr, src_pixels_per_line,
HFilter, VFilter,
int dst_pitch
) {
DECLARE_ALIGNED_ARRAY(16, unsigned char, tmp, 16 * 16);
- const short *HFilter = vp8_sub_pel_filters_8s[xoffset];
- const short *VFilter = vp8_sub_pel_filters_8s[yoffset];
+ const short *HFilter = vp9_sub_pel_filters_8s[xoffset];
+ const short *VFilter = vp9_sub_pel_filters_8s[yoffset];
vp9_filter_block2d_16x16_8(src_ptr, src_pixels_per_line,
HFilter, VFilter,
const short *HFilter;
const short *VFilter;
- HFilter = vp8_bilinear_filters[xoffset];
- VFilter = vp8_bilinear_filters[yoffset];
+ HFilter = vp9_bilinear_filters[xoffset];
+ VFilter = vp9_bilinear_filters[yoffset];
#if 0
{
int i;
const short *HFilter;
const short *VFilter;
- HFilter = vp8_bilinear_filters[xoffset];
- VFilter = vp8_bilinear_filters[yoffset];
+ HFilter = vp9_bilinear_filters[xoffset];
+ VFilter = vp9_bilinear_filters[yoffset];
filter_block2d_bil_avg(src_ptr, dst_ptr, src_pixels_per_line,
dst_pitch, HFilter, VFilter, 4, 4);
const short *HFilter;
const short *VFilter;
- HFilter = vp8_bilinear_filters[xoffset];
- VFilter = vp8_bilinear_filters[yoffset];
+ HFilter = vp9_bilinear_filters[xoffset];
+ VFilter = vp9_bilinear_filters[yoffset];
filter_block2d_bil(src_ptr, dst_ptr, src_pixels_per_line, dst_pitch, HFilter, VFilter, 8, 8);
const short *HFilter;
const short *VFilter;
- HFilter = vp8_bilinear_filters[xoffset];
- VFilter = vp8_bilinear_filters[yoffset];
+ HFilter = vp9_bilinear_filters[xoffset];
+ VFilter = vp9_bilinear_filters[yoffset];
filter_block2d_bil_avg(src_ptr, dst_ptr, src_pixels_per_line,
dst_pitch, HFilter, VFilter, 8, 8);
const short *HFilter;
const short *VFilter;
- HFilter = vp8_bilinear_filters[xoffset];
- VFilter = vp8_bilinear_filters[yoffset];
+ HFilter = vp9_bilinear_filters[xoffset];
+ VFilter = vp9_bilinear_filters[yoffset];
filter_block2d_bil(src_ptr, dst_ptr, src_pixels_per_line, dst_pitch, HFilter, VFilter, 8, 4);
const short *HFilter;
const short *VFilter;
- HFilter = vp8_bilinear_filters[xoffset];
- VFilter = vp8_bilinear_filters[yoffset];
+ HFilter = vp9_bilinear_filters[xoffset];
+ VFilter = vp9_bilinear_filters[yoffset];
filter_block2d_bil(src_ptr, dst_ptr, src_pixels_per_line, dst_pitch, HFilter, VFilter, 16, 16);
}
const short *HFilter;
const short *VFilter;
- HFilter = vp8_bilinear_filters[xoffset];
- VFilter = vp8_bilinear_filters[yoffset];
+ HFilter = vp9_bilinear_filters[xoffset];
+ VFilter = vp9_bilinear_filters[yoffset];
filter_block2d_bil_avg(src_ptr, dst_ptr, src_pixels_per_line,
dst_pitch, HFilter, VFilter, 16, 16);
#define SUBPEL_SHIFTS 16
-extern const short vp8_bilinear_filters[SUBPEL_SHIFTS][2];
-extern const short vp8_sub_pel_filters_6[SUBPEL_SHIFTS][6];
-extern const short vp8_sub_pel_filters_8[SUBPEL_SHIFTS][8];
-extern const short vp8_sub_pel_filters_8s[SUBPEL_SHIFTS][8];
+extern const short vp9_bilinear_filters[SUBPEL_SHIFTS][2];
+extern const short vp9_sub_pel_filters_6[SUBPEL_SHIFTS][6];
+extern const short vp9_sub_pel_filters_8[SUBPEL_SHIFTS][8];
+extern const short vp9_sub_pel_filters_8s[SUBPEL_SHIFTS][8];
#endif // FILTER_H
#include "vp8/common/sadmxn.h"
#include <limits.h>
-const unsigned char vp8_mbsplit_offset[4][16] = {
+const unsigned char vp9_mbsplit_offset[4][16] = {
{ 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{ 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{ 0, 2, 8, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
vp8_prob p[VP8_MVREFS - 1], const int near_mv_ref_ct[4]
);
-extern const unsigned char vp8_mbsplit_offset[4][16];
+extern const unsigned char vp9_mbsplit_offset[4][16];
static int left_block_mv(const MODE_INFO *cur_mb, int b) {
// TODO: these transforms can be further converted into integer forms
// for complexity optimization
-float idct_4[16] = {
+static const float idct_4[16] = {
0.500000000000000, 0.653281482438188, 0.500000000000000, 0.270598050073099,
0.500000000000000, 0.270598050073099, -0.500000000000000, -0.653281482438188,
0.500000000000000, -0.270598050073099, -0.500000000000000, 0.653281482438188,
0.500000000000000, -0.653281482438188, 0.500000000000000, -0.270598050073099
};
-float iadst_4[16] = {
+static const float iadst_4[16] = {
0.228013428883779, 0.577350269189626, 0.656538502008139, 0.428525073124360,
0.428525073124360, 0.577350269189626, -0.228013428883779, -0.656538502008139,
0.577350269189626, 0, -0.577350269189626, 0.577350269189626,
0.656538502008139, -0.577350269189626, 0.428525073124359, -0.228013428883779
};
-float idct_8[64] = {
+static const float idct_8[64] = {
0.353553390593274, 0.490392640201615, 0.461939766255643, 0.415734806151273,
0.353553390593274, 0.277785116509801, 0.191341716182545, 0.097545161008064,
0.353553390593274, 0.415734806151273, 0.191341716182545, -0.097545161008064,
0.353553390593274, -0.277785116509801, 0.191341716182545, -0.097545161008064
};
-float iadst_8[64] = {
+static const float iadst_8[64] = {
0.089131608307533, 0.255357107325376, 0.387095214016349, 0.466553967085785,
0.483002021635509, 0.434217976756762, 0.326790388032145, 0.175227946595735,
0.175227946595735, 0.434217976756762, 0.466553967085785, 0.255357107325376,
0.326790388032145, -0.255357107325375, 0.175227946595736, -0.089131608307532
};
-const int16_t idct_i4[16] = {
+static const int16_t idct_i4[16] = {
8192, 10703, 8192, 4433,
8192, 4433, -8192, -10703,
8192, -4433, -8192, 10703,
8192, -10703, 8192, -4433
};
-const int16_t iadst_i4[16] = {
+static const int16_t iadst_i4[16] = {
3736, 9459, 10757, 7021,
7021, 9459, -3736, -10757,
9459, 0, -9459, 9459,
10757, -9459, 7021, -3736
};
-const int16_t idct_i8[64] = {
+static const int16_t idct_i8[64] = {
5793, 8035, 7568, 6811,
5793, 4551, 3135, 1598,
5793, 6811, 3135, -1598,
5793, -4551, 3135, -1598
};
-const int16_t iadst_i8[64] = {
+static const int16_t iadst_i8[64] = {
1460, 4184, 6342, 7644,
7914, 7114, 5354, 2871,
2871, 7114, 7644, 4184,
5354, -4184, 2871, -1460
};
-float idct_16[256] = {
+static float idct_16[256] = {
0.250000, 0.351851, 0.346760, 0.338330, 0.326641, 0.311806, 0.293969, 0.273300,
0.250000, 0.224292, 0.196424, 0.166664, 0.135299, 0.102631, 0.068975, 0.034654,
0.250000, 0.338330, 0.293969, 0.224292, 0.135299, 0.034654, -0.068975, -0.166664,
0.250000, -0.224292, 0.196424, -0.166664, 0.135299, -0.102631, 0.068975, -0.034654
};
-float iadst_16[256] = {
+static float iadst_16[256] = {
0.033094, 0.098087, 0.159534, 0.215215, 0.263118, 0.301511, 0.329007, 0.344612,
0.347761, 0.338341, 0.316693, 0.283599, 0.240255, 0.188227, 0.129396, 0.065889,
0.065889, 0.188227, 0.283599, 0.338341, 0.344612, 0.301511, 0.215215, 0.098087,
0.240255, -0.215215, 0.188227, -0.159534, 0.129396, -0.098087, 0.065889, -0.033094
};
-const int16_t idct_i16[256] = {
+static const int16_t idct_i16[256] = {
4096, 5765, 5681, 5543, 5352, 5109, 4816, 4478,
4096, 3675, 3218, 2731, 2217, 1682, 1130, 568,
4096, 5543, 4816, 3675, 2217, 568, -1130, -2731,
4096, -3675, 3218, -2731, 2217, -1682, 1130, -568
};
-const int16_t iadst_i16[256] = {
+static const int16_t iadst_i16[256] = {
542, 1607, 2614, 3526, 4311, 4940, 5390, 5646,
5698, 5543, 5189, 4646, 3936, 3084, 2120, 1080,
1080, 3084, 4646, 5543, 5646, 4940, 3526, 1607,
float *pfb = &bufb[0];
// pointers to vertical and horizontal transforms
- float *ptv, *pth;
+ const float *ptv, *pth;
assert(tx_type != DCT_DCT);
// load and convert residual array into floating-point
#include "entropy.h"
-const int default_vp8_mode_contexts[6][4] = {
+const int vp9_default_mode_contexts[6][4] = {
{
/* 0 */
7, 1, 1, 183
234, 188, 128, 28
},
};
-const int default_vp8_mode_contexts_a[6][4] = {
+const int vp9_default_mode_contexts_a[6][4] = {
{
/* 0 */
4, 1, 1, 143
#ifndef __INC_MODECONT_H
#define __INC_MODECONT_H
-extern const int default_vp8_mode_contexts[6][4];
-extern const int default_vp8_mode_contexts_a[6][4];
+extern const int vp9_default_mode_contexts[6][4];
+extern const int vp9_default_mode_contexts_a[6][4];
#endif
#include "entropymode.h"
-const unsigned int vp8_kf_default_bmode_counts [VP8_BINTRAMODES] [VP8_BINTRAMODES] [VP8_BINTRAMODES] = {
+const unsigned int vp9_kf_default_bmode_counts[VP8_BINTRAMODES][VP8_BINTRAMODES][VP8_BINTRAMODES] = {
{
/*Above Mode : 0*/
{ 43438, 2195, 470, 316, 615, 171, 217, 412, 124, 160, }, /* left_mode 0 */
1, 1, 4, 1, 1
};
-const short vp8_rv[] = {
+const short vp9_rv[] = {
8, 5, 2, 2, 8, 12, 4, 9, 8, 3,
0, 3, 9, 0, 0, 0, 8, 3, 14, 4,
10, 1, 11, 14, 1, 14, 9, 6, 12, 11,
void vp9_mbpost_proc_down_c(unsigned char *dst, int pitch, int rows, int cols, int flimit) {
int r, c, i;
- const short *rv3 = &vp8_rv[63 & rand()];
+ const short *rv3 = &vp9_rv[63 & rand()];
for (c = 0; c < cols; c++) {
unsigned char *s = &dst[c];
int above_mode = (m - cm->mode_info_stride)->mbmi.mode;
int left_interp, above_interp;
if (left_in_image && left_mode >= NEARESTMV && left_mode <= SPLITMV)
- left_interp = vp8_switchable_interp_map[(m - 1)->mbmi.interp_filter];
+ left_interp = vp9_switchable_interp_map[(m - 1)->mbmi.interp_filter];
else
left_interp = VP8_SWITCHABLE_FILTERS;
if (above_in_image && above_mode >= NEARESTMV && above_mode <= SPLITMV)
- above_interp = vp8_switchable_interp_map[
+ above_interp = vp9_switchable_interp_map[
(m - cm->mode_info_stride)->mbmi.interp_filter];
else
above_interp = VP8_SWITCHABLE_FILTERS;
#include "vp8/common/seg_common.h"
-const int segfeaturedata_signed[SEG_LVL_MAX] = {1, 1, 0, 0, 0, 0};
-const int vp8_seg_feature_data_bits[SEG_LVL_MAX] =
-{QINDEX_BITS, 6, 4, 4, 6, 2};
+static const int segfeaturedata_signed[SEG_LVL_MAX] = { 1, 1, 0, 0, 0, 0 };
+static const int seg_feature_data_bits[SEG_LVL_MAX] = { QINDEX_BITS, 6, 4, 4, 6, 2 };
// These functions provide access to new segment level features.
// Eventually these function may be "optimized out" but for the moment,
}
int vp9_seg_feature_data_bits(SEG_LVL_FEATURES feature_id) {
- return vp8_seg_feature_data_bits[feature_id];
+ return seg_feature_data_bits[feature_id];
}
int vp9_is_segfeature_signed(SEG_LVL_FEATURES feature_id) {
;void vp9_mbpost_proc_down_mmx(unsigned char *dst,
; int pitch, int rows, int cols,int flimit)
-extern sym(vp8_rv)
+extern sym(vp9_rv)
global sym(vp9_mbpost_proc_down_mmx)
sym(vp9_mbpost_proc_down_mmx):
push rbp
%define flimit2 [rsp+128]
%if ABI_IS_32BIT=0
- lea r8, [GLOBAL(sym(vp8_rv))]
+ lea r8, [GLOBAL(sym(vp9_rv))]
%endif
;rows +=8;
and rcx, 127
%if ABI_IS_32BIT=1 && CONFIG_PIC=1
push rax
- lea rax, [GLOBAL(sym(vp8_rv))]
- movq mm4, [rax + rcx*2] ;vp8_rv[rcx*2]
+ lea rax, [GLOBAL(sym(vp9_rv))]
+ movq mm4, [rax + rcx*2] ;vp9_rv[rcx*2]
pop rax
%elif ABI_IS_32BIT=0
- movq mm4, [r8 + rcx*2] ;vp8_rv[rcx*2]
+ movq mm4, [r8 + rcx*2] ;vp9_rv[rcx*2]
%else
- movq mm4, [sym(vp8_rv) + rcx*2]
+ movq mm4, [sym(vp9_rv) + rcx*2]
%endif
paddw mm1, mm4
;paddw xmm1, eight8s
;void vp9_mbpost_proc_down_xmm(unsigned char *dst,
; int pitch, int rows, int cols,int flimit)
-extern sym(vp8_rv)
+extern sym(vp9_rv)
global sym(vp9_mbpost_proc_down_xmm)
sym(vp9_mbpost_proc_down_xmm):
push rbp
%define flimit4 [rsp+128]
%if ABI_IS_32BIT=0
- lea r8, [GLOBAL(sym(vp8_rv))]
+ lea r8, [GLOBAL(sym(vp9_rv))]
%endif
;rows +=8;
and rcx, 127
%if ABI_IS_32BIT=1 && CONFIG_PIC=1
push rax
- lea rax, [GLOBAL(sym(vp8_rv))]
- movdqu xmm4, [rax + rcx*2] ;vp8_rv[rcx*2]
+ lea rax, [GLOBAL(sym(vp9_rv))]
+ movdqu xmm4, [rax + rcx*2] ;vp9_rv[rcx*2]
pop rax
%elif ABI_IS_32BIT=0
- movdqu xmm4, [r8 + rcx*2] ;vp8_rv[rcx*2]
+ movdqu xmm4, [r8 + rcx*2] ;vp9_rv[rcx*2]
%else
- movdqu xmm4, [sym(vp8_rv) + rcx*2]
+ movdqu xmm4, [sym(vp9_rv) + rcx*2]
%endif
paddw xmm1, xmm4
mov rdi, arg(4) ;dst_ptr ;
shl rax, 5 ; offset * 32
- lea rcx, [GLOBAL(sym(vp9_bilinear_filters_mmx))]
+ lea rcx, [GLOBAL(sym(vp9_bilinear_filters_8x_mmx))]
add rax, rcx ; HFilter
mov rsi, arg(0) ;src_ptr ;
movsxd rax, dword ptr arg(2) ;xoffset
mov rdi, arg(4) ;dst_ptr ;
- lea rcx, [GLOBAL(sym(vp9_bilinear_filters_mmx))]
+ lea rcx, [GLOBAL(sym(vp9_bilinear_filters_8x_mmx))]
shl rax, 5
mov rsi, arg(0) ;src_ptr ;
movsxd rax, dword ptr arg(2) ;xoffset
mov rdi, arg(4) ;dst_ptr ;
- lea rcx, [GLOBAL(sym(vp9_bilinear_filters_mmx))]
+ lea rcx, [GLOBAL(sym(vp9_bilinear_filters_8x_mmx))]
shl rax, 5
add rax, rcx ; HFilter
align 16
-global HIDDEN_DATA(sym(vp9_bilinear_filters_mmx))
-sym(vp9_bilinear_filters_mmx):
+global HIDDEN_DATA(sym(vp9_bilinear_filters_8x_mmx))
+sym(vp9_bilinear_filters_8x_mmx):
times 8 dw 128
times 8 dw 0
#include "vp8/common/subpixel.h"
extern const short vp9_six_tap_mmx[16][6 * 8];
-extern const short vp9_bilinear_filters_mmx[16][2 * 8];
+extern const short vp9_bilinear_filters_8x_mmx[16][2 * 8];
extern void vp9_filter_block1d_h6_mmx
(
DEFINE(detok_scan, offsetof(DETOK, scan));
DEFINE(detok_ptr_block2leftabove, offsetof(DETOK, ptr_block2leftabove));
-DEFINE(detok_coef_tree_ptr, offsetof(DETOK, vp8_coef_tree_ptr));
+DEFINE(detok_coef_tree_ptr, offsetof(DETOK, vp9_coef_tree_ptr));
DEFINE(detok_norm_ptr, offsetof(DETOK, norm_ptr));
DEFINE(detok_ptr_coef_bands_x, offsetof(DETOK, ptr_coef_bands_x));
unsigned int range;
} BOOL_DECODER;
-DECLARE_ALIGNED(16, extern const unsigned char, vp8_norm[256]);
+DECLARE_ALIGNED(16, extern const unsigned char, vp9_norm[256]);
int vp9_start_decode(BOOL_DECODER *br,
const unsigned char *source,
}
{
- register unsigned int shift = vp8_norm[range];
+ register unsigned int shift = vp9_norm[range];
range <<= shift;
value <<= shift;
count -= shift;
#endif
static int vp8_read_bmode(vp8_reader *bc, const vp8_prob *p) {
- return vp8_treed_read(bc, vp8_bmode_tree, p);
+ return vp8_treed_read(bc, vp9_bmode_tree, p);
}
static int vp8_read_ymode(vp8_reader *bc, const vp8_prob *p) {
- return vp8_treed_read(bc, vp8_ymode_tree, p);
+ return vp8_treed_read(bc, vp9_ymode_tree, p);
}
#if CONFIG_SUPERBLOCKS
static int vp8_sb_kfread_ymode(vp8_reader *bc, const vp8_prob *p) {
- return vp8_treed_read(bc, vp8_uv_mode_tree, p);
+ return vp8_treed_read(bc, vp9_uv_mode_tree, p);
}
#endif
static int vp8_kfread_ymode(vp8_reader *bc, const vp8_prob *p) {
- return vp8_treed_read(bc, vp8_kf_ymode_tree, p);
+ return vp8_treed_read(bc, vp9_kf_ymode_tree, p);
}
static int vp8_read_i8x8_mode(vp8_reader *bc, const vp8_prob *p) {
- return vp8_treed_read(bc, vp8_i8x8_mode_tree, p);
+ return vp8_treed_read(bc, vp9_i8x8_mode_tree, p);
}
static int vp8_read_uv_mode(vp8_reader *bc, const vp8_prob *p) {
- return vp8_treed_read(bc, vp8_uv_mode_tree, p);
+ return vp8_treed_read(bc, vp9_uv_mode_tree, p);
}
// This function reads the current macro block's segnent id from the bitstream
}
#endif
-extern const int vp8_i8x8_block[4];
+extern const int vp9_i8x8_block[4];
static void kfread_modes(VP8D_COMP *pbi,
MODE_INFO *m,
int mb_row,
int i;
int mode8x8;
for (i = 0; i < 4; i++) {
- int ib = vp8_i8x8_block[i];
+ int ib = vp9_i8x8_block[i];
mode8x8 = vp8_read_i8x8_mode(bc, pbi->common.fc.i8x8_mode_prob);
m->bmi[ib + 0].as_mode.first = mode8x8;
m->bmi[ib + 1].as_mode.first = mode8x8;
const nmv_component *mvcomp) {
int v, s, z, c, o, d;
s = vp8_read(r, mvcomp->sign);
- c = vp8_treed_read(r, vp8_mv_class_tree, mvcomp->classes);
+ c = vp8_treed_read(r, vp9_mv_class_tree, mvcomp->classes);
if (c == MV_CLASS_0) {
- d = vp8_treed_read(r, vp8_mv_class0_tree, mvcomp->class0);
+ d = vp8_treed_read(r, vp9_mv_class0_tree, mvcomp->class0);
} else {
int i, b;
d = 0;
d = o >> 3;
if (c == MV_CLASS_0) {
- f = vp8_treed_read(r, vp8_mv_fp_tree, mvcomp->class0_fp[d]);
+ f = vp8_treed_read(r, vp9_mv_fp_tree, mvcomp->class0_fp[d]);
} else {
- f = vp8_treed_read(r, vp8_mv_fp_tree, mvcomp->fp);
+ f = vp8_treed_read(r, vp9_mv_fp_tree, mvcomp->fp);
}
o += (f << 1);
static void read_nmv(vp8_reader *r, MV *mv, const MV *ref,
const nmv_context *mvctx) {
- MV_JOINT_TYPE j = vp8_treed_read(r, vp8_mv_joint_tree, mvctx->joints);
+ MV_JOINT_TYPE j = vp8_treed_read(r, vp9_mv_joint_tree, mvctx->joints);
mv->row = mv-> col = 0;
if (j == MV_JOINT_HZVNZ || j == MV_JOINT_HNZVNZ) {
mv->row = read_nmv_component(r, ref->row, &mvctx->comps[0]);
#if CONFIG_SUPERBLOCKS
static MB_PREDICTION_MODE read_sb_mv_ref(vp8_reader *bc, const vp8_prob *p) {
- return (MB_PREDICTION_MODE) vp8_treed_read(bc, vp8_sb_mv_ref_tree, p);
+ return (MB_PREDICTION_MODE) vp8_treed_read(bc, vp9_sb_mv_ref_tree, p);
}
#endif
static MB_PREDICTION_MODE read_mv_ref(vp8_reader *bc, const vp8_prob *p) {
- return (MB_PREDICTION_MODE) vp8_treed_read(bc, vp8_mv_ref_tree, p);
+ return (MB_PREDICTION_MODE) vp8_treed_read(bc, vp9_mv_ref_tree, p);
}
static B_PREDICTION_MODE sub_mv_ref(vp8_reader *bc, const vp8_prob *p) {
- return (B_PREDICTION_MODE) vp8_treed_read(bc, vp8_sub_mv_ref_tree, p);
+ return (B_PREDICTION_MODE) vp8_treed_read(bc, vp9_sub_mv_ref_tree, p);
}
#ifdef VPX_MODE_COUNT
if (mbmi->mode >= NEARESTMV && mbmi->mode <= SPLITMV)
{
if (cm->mcomp_filter_type == SWITCHABLE) {
- mbmi->interp_filter = vp8_switchable_interp[
- vp8_treed_read(bc, vp8_switchable_interp_tree,
+ mbmi->interp_filter = vp9_switchable_interp[
+ vp8_treed_read(bc, vp9_switchable_interp_tree,
vp9_get_pred_probs(cm, xd, PRED_SWITCHABLE_INTERP))];
} else {
mbmi->interp_filter = cm->mcomp_filter_type;
switch (mbmi->mode) {
case SPLITMV: {
const int s = mbmi->partitioning =
- vp8_treed_read(bc, vp8_mbsplit_tree, cm->fc.mbsplit_prob);
- const int num_p = vp8_mbsplit_count [s];
+ vp8_treed_read(bc, vp9_mbsplit_tree, cm->fc.mbsplit_prob);
+ const int num_p = vp9_mbsplit_count [s];
int j = 0;
cm->fc.mbsplit_counts[s]++;
int mv_contz;
int blockmode;
- k = vp8_mbsplit_offset[s][j];
+ k = vp9_mbsplit_offset[s][j];
leftmv.as_int = left_block_mv(mi, k);
abovemv.as_int = above_block_mv(mi, k, mis);
int i;
int mode8x8;
for (i = 0; i < 4; i++) {
- int ib = vp8_i8x8_block[i];
+ int ib = vp9_i8x8_block[i];
mode8x8 = vp8_read_i8x8_mode(bc, pbi->common.fc.i8x8_mode_prob);
mi->bmi[ib + 0].as_mode.first = mode8x8;
mi->bmi[ib + 1].as_mode.first = mode8x8;
/* all the ac values =; */
for (i = 1; i < 16; i++) {
- int rc = vp8_default_zig_zag1d[i];
+ int rc = vp9_default_zig_zag1d[i];
pc->Y1dequant[Q][rc] = (short)vp9_ac_yquant(Q);
pc->Y2dequant[Q][rc] = (short)vp9_ac2quant(Q, pc->y2ac_delta_q);
/* dequantization and idct */
if (mode == I8X8_PRED) {
for (i = 0; i < 4; i++) {
- int ib = vp8_i8x8_block[i];
+ int ib = vp9_i8x8_block[i];
const int iblock[4] = {0, 1, 4, 5};
int j;
int i8x8mode;
#define OCB_X PREV_COEF_CONTEXTS * ENTROPY_NODES
-DECLARE_ALIGNED(16, const int, coef_bands_x[16]) = {
+DECLARE_ALIGNED(16, static const int, coef_bands_x[16]) = {
0 * OCB_X, 1 * OCB_X, 2 * OCB_X, 3 * OCB_X,
6 * OCB_X, 4 * OCB_X, 5 * OCB_X, 6 * OCB_X,
6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X,
6 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X
};
-DECLARE_ALIGNED(16, const int, coef_bands_x_8x8[64]) = {
+DECLARE_ALIGNED(16, static const int, coef_bands_x_8x8[64]) = {
0 * OCB_X, 1 * OCB_X, 2 * OCB_X, 3 * OCB_X, 5 * OCB_X, 4 * OCB_X, 4 * OCB_X, 5 * OCB_X,
5 * OCB_X, 3 * OCB_X, 6 * OCB_X, 3 * OCB_X, 5 * OCB_X, 4 * OCB_X, 6 * OCB_X, 6 * OCB_X,
6 * OCB_X, 5 * OCB_X, 5 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X,
7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X,
};
-DECLARE_ALIGNED(16, const int, coef_bands_x_16x16[256]) = {
+DECLARE_ALIGNED(16, static const int, coef_bands_x_16x16[256]) = {
0 * OCB_X, 1 * OCB_X, 2 * OCB_X, 3 * OCB_X, 5 * OCB_X, 4 * OCB_X, 4 * OCB_X, 5 * OCB_X, 5 * OCB_X, 3 * OCB_X, 6 * OCB_X, 3 * OCB_X, 5 * OCB_X, 4 * OCB_X, 6 * OCB_X, 6 * OCB_X,
6 * OCB_X, 5 * OCB_X, 5 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X,
6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X,
}
}
-DECLARE_ALIGNED(16, extern const unsigned char, vp8_norm[256]);
+DECLARE_ALIGNED(16, extern const unsigned char, vp9_norm[256]);
// #define PREV_CONTEXT_INC(val) (2+((val)>2))
-// #define PREV_CONTEXT_INC(val) (vp8_prev_token_class[(val)])
-#define PREV_CONTEXT_INC(val) (vp8_prev_token_class[(val)>10?10:(val)])
+// #define PREV_CONTEXT_INC(val) (vp9_prev_token_class[(val)])
+#define PREV_CONTEXT_INC(val) (vp9_prev_token_class[(val)>10?10:(val)])
static int get_token(int v) {
if (v < 0) v = -v;
switch(tx_type) {
case ADST_DCT :
- scan = vp8_row_scan;
+ scan = vp9_row_scan;
break;
case DCT_ADST :
- scan = vp8_col_scan;
+ scan = vp9_col_scan;
break;
default :
- scan = vp8_default_zig_zag1d;
+ scan = vp9_default_zig_zag1d;
break;
}
for (c = !type; c < eob; ++c) {
int rc = scan[c];
int v = qcoeff_ptr[rc];
- band = vp8_coef_bands[c];
+ band = vp9_coef_bands[c];
token = get_token(v);
if (tx_type != DCT_DCT)
fc->hybrid_coef_counts[type][band][pt][token]++;
else
fc->coef_counts[type][band][pt][token]++;
- pt = vp8_prev_token_class[token];
+ pt = vp9_prev_token_class[token];
}
if (eob < seg_eob) {
- band = vp8_coef_bands[c];
+ band = vp9_coef_bands[c];
if (tx_type != DCT_DCT)
fc->hybrid_coef_counts[type][band][pt][DCT_EOB_TOKEN]++;
else
int c, pt, token, band;
VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
for (c = !type; c < eob; ++c) {
- int rc = vp8_default_zig_zag1d[c];
+ int rc = vp9_default_zig_zag1d[c];
int v = qcoeff_ptr[rc];
- band = vp8_coef_bands[c];
+ band = vp9_coef_bands[c];
token = get_token(v);
fc->coef_counts[type][band][pt][token]++;
- pt = vp8_prev_token_class[token];
+ pt = vp9_prev_token_class[token];
}
if (eob < seg_eob) {
- band = vp8_coef_bands[c];
+ band = vp9_coef_bands[c];
fc->coef_counts[type][band][pt][DCT_EOB_TOKEN]++;
}
}
int c, pt, token, band;
VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
for (c = !type; c < eob; ++c) {
- int rc = (type == 1 ? vp8_default_zig_zag1d[c] : vp8_default_zig_zag1d_8x8[c]);
+ int rc = (type == 1 ? vp9_default_zig_zag1d[c] : vp9_default_zig_zag1d_8x8[c]);
int v = qcoeff_ptr[rc];
- band = (type == 1 ? vp8_coef_bands[c] : vp8_coef_bands_8x8[c]);
+ band = (type == 1 ? vp9_coef_bands[c] : vp9_coef_bands_8x8[c]);
token = get_token(v);
if (tx_type != DCT_DCT)
fc->hybrid_coef_counts_8x8[type][band][pt][token]++;
else
fc->coef_counts_8x8[type][band][pt][token]++;
- pt = vp8_prev_token_class[token];
+ pt = vp9_prev_token_class[token];
}
if (eob < seg_eob) {
- band = (type == 1 ? vp8_coef_bands[c] : vp8_coef_bands_8x8[c]);
+ band = (type == 1 ? vp9_coef_bands[c] : vp9_coef_bands_8x8[c]);
if (tx_type != DCT_DCT)
fc->hybrid_coef_counts_8x8[type][band][pt][DCT_EOB_TOKEN]++;
else
int c, pt, token;
VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
for (c = !type; c < eob; ++c) {
- int rc = vp8_default_zig_zag1d_16x16[c];
+ int rc = vp9_default_zig_zag1d_16x16[c];
int v = qcoeff_ptr[rc];
- int band = vp8_coef_bands_16x16[c];
+ int band = vp9_coef_bands_16x16[c];
token = get_token(v);
if (tx_type != DCT_DCT)
fc->hybrid_coef_counts_16x16[type][band][pt][token]++;
else
fc->coef_counts_16x16[type][band][pt][token]++;
- pt = vp8_prev_token_class[token];
+ pt = vp9_prev_token_class[token];
}
if (eob < seg_eob) {
- int band = vp8_coef_bands_16x16[c];
+ int band = vp9_coef_bands_16x16[c];
if (tx_type != DCT_DCT)
fc->hybrid_coef_counts_16x16[type][band][pt][DCT_EOB_TOKEN]++;
else
// Luma block
{
- const int* const scan = vp8_default_zig_zag1d_16x16;
+ const int* const scan = vp9_default_zig_zag1d_16x16;
c = decode_coefs(pbi, xd, bc, A, L, type,
tx_type,
seg_eob, qcoeff_ptr,
else
seg_eob = 64;
for (i = 16; i < 24; i += 4) {
- ENTROPY_CONTEXT* const a = A + vp8_block2above_8x8[i];
- ENTROPY_CONTEXT* const l = L + vp8_block2left_8x8[i];
- const int* const scan = vp8_default_zig_zag1d_8x8;
+ ENTROPY_CONTEXT* const a = A + vp9_block2above_8x8[i];
+ ENTROPY_CONTEXT* const l = L + vp9_block2left_8x8[i];
+ const int* const scan = vp9_default_zig_zag1d_8x8;
c = decode_coefs(pbi, xd, bc, a, l, type,
tx_type,
if (xd->mode_info_context->mbmi.mode != B_PRED &&
xd->mode_info_context->mbmi.mode != SPLITMV &&
xd->mode_info_context->mbmi.mode != I8X8_PRED) {
- ENTROPY_CONTEXT *const a = A + vp8_block2above_8x8[24];
- ENTROPY_CONTEXT *const l = L + vp8_block2left_8x8[24];
- const int *const scan = vp8_default_zig_zag1d;
+ ENTROPY_CONTEXT *const a = A + vp9_block2above_8x8[24];
+ ENTROPY_CONTEXT *const l = L + vp9_block2left_8x8[24];
+ const int *const scan = vp9_default_zig_zag1d;
type = PLANE_TYPE_Y2;
if (seg_active)
seg_eob = 64;
for (i = 0; i < bufthred ; i += 4) {
- ENTROPY_CONTEXT *const a = A + vp8_block2above_8x8[i];
- ENTROPY_CONTEXT *const l = L + vp8_block2left_8x8[i];
- const int *const scan = vp8_default_zig_zag1d_8x8;
+ ENTROPY_CONTEXT *const a = A + vp9_block2above_8x8[i];
+ ENTROPY_CONTEXT *const l = L + vp9_block2left_8x8[i];
+ const int *const scan = vp9_default_zig_zag1d_8x8;
tx_type = DCT_DCT;
if (i == 16)
// use 4x4 transform for U, V components in I8X8 prediction mode
for (i = 16; i < 24; i++) {
- ENTROPY_CONTEXT *const a = A + vp8_block2above[i];
- ENTROPY_CONTEXT *const l = L + vp8_block2left[i];
- const int *scan = vp8_default_zig_zag1d;
+ ENTROPY_CONTEXT *const a = A + vp9_block2above[i];
+ ENTROPY_CONTEXT *const l = L + vp9_block2left[i];
+ const int *scan = vp9_default_zig_zag1d;
c = decode_coefs(pbi, xd, bc, a, l, type,
tx_type,
ENTROPY_CONTEXT *const L = (ENTROPY_CONTEXT *)xd->left_context;
char *const eobs = xd->eobs;
- const int *scan = vp8_default_zig_zag1d;
+ const int *scan = vp9_default_zig_zag1d;
PLANE_TYPE type;
int c, i, eobtotal = 0, seg_eob = 16;
INT16 *qcoeff_ptr = &xd->qcoeff[0];
if (xd->mode_info_context->mbmi.mode != B_PRED &&
xd->mode_info_context->mbmi.mode != I8X8_PRED &&
xd->mode_info_context->mbmi.mode != SPLITMV) {
- ENTROPY_CONTEXT *const a = A + vp8_block2above[24];
- ENTROPY_CONTEXT *const l = L + vp8_block2left[24];
+ ENTROPY_CONTEXT *const a = A + vp9_block2above[24];
+ ENTROPY_CONTEXT *const l = L + vp9_block2left[24];
type = PLANE_TYPE_Y2;
c = decode_coefs(dx, xd, bc, a, l, type,
}
for (i = 0; i < 24; ++i) {
- ENTROPY_CONTEXT *const a = A + vp8_block2above[i];
- ENTROPY_CONTEXT *const l = L + vp8_block2left[i];
+ ENTROPY_CONTEXT *const a = A + vp9_block2above[i];
+ ENTROPY_CONTEXT *const l = L + vp9_block2left[i];
TX_TYPE tx_type = DCT_DCT;
if (i == 16)
type = PLANE_TYPE_UV;
tx_type = get_tx_type(xd, &xd->block[i]);
switch(tx_type) {
case ADST_DCT :
- scan = vp8_row_scan;
+ scan = vp9_row_scan;
break;
case DCT_ADST :
- scan = vp8_col_scan;
+ scan = vp9_col_scan;
break;
default :
- scan = vp8_default_zig_zag1d;
+ scan = vp9_default_zig_zag1d;
break;
}
int const *scan;
int const *scan_8x8;
UINT8 const *ptr_block2leftabove;
- vp8_tree_index const *vp8_coef_tree_ptr;
+ vp8_tree_index const *vp9_coef_tree_ptr;
unsigned char *norm_ptr;
UINT8 *ptr_coef_bands_x;
UINT8 *ptr_coef_bands_x_8x8;
BEGIN
/* regular quantize */
-DEFINE(vp8_block_coeff, offsetof(BLOCK, coeff));
-DEFINE(vp8_block_zbin, offsetof(BLOCK, zbin));
-DEFINE(vp8_block_round, offsetof(BLOCK, round));
-DEFINE(vp8_block_quant, offsetof(BLOCK, quant));
-DEFINE(vp8_block_quant_fast, offsetof(BLOCK, quant_fast));
-DEFINE(vp8_block_zbin_extra, offsetof(BLOCK, zbin_extra));
-DEFINE(vp8_block_zrun_zbin_boost, offsetof(BLOCK, zrun_zbin_boost));
-DEFINE(vp8_block_quant_shift, offsetof(BLOCK, quant_shift));
-
-DEFINE(vp8_blockd_qcoeff, offsetof(BLOCKD, qcoeff));
-DEFINE(vp8_blockd_dequant, offsetof(BLOCKD, dequant));
-DEFINE(vp8_blockd_dqcoeff, offsetof(BLOCKD, dqcoeff));
-DEFINE(vp8_blockd_eob, offsetof(BLOCKD, eob));
+DEFINE(vp9_block_coeff, offsetof(BLOCK, coeff));
+DEFINE(vp9_block_zbin, offsetof(BLOCK, zbin));
+DEFINE(vp9_block_round, offsetof(BLOCK, round));
+DEFINE(vp9_block_quant, offsetof(BLOCK, quant));
+DEFINE(vp9_block_quant_fast, offsetof(BLOCK, quant_fast));
+DEFINE(vp9_block_zbin_extra, offsetof(BLOCK, zbin_extra));
+DEFINE(vp9_block_zrun_zbin_boost, offsetof(BLOCK, zrun_zbin_boost));
+DEFINE(vp9_block_quant_shift, offsetof(BLOCK, quant_shift));
+
+DEFINE(vp9_blockd_qcoeff, offsetof(BLOCKD, qcoeff));
+DEFINE(vp9_blockd_dequant, offsetof(BLOCKD, dequant));
+DEFINE(vp9_blockd_dqcoeff, offsetof(BLOCKD, dqcoeff));
+DEFINE(vp9_blockd_eob, offsetof(BLOCKD, eob));
/* subtract */
-DEFINE(vp8_block_base_src, offsetof(BLOCK, base_src));
-DEFINE(vp8_block_src, offsetof(BLOCK, src));
-DEFINE(vp8_block_src_diff, offsetof(BLOCK, src_diff));
-DEFINE(vp8_block_src_stride, offsetof(BLOCK, src_stride));
+DEFINE(vp9_block_base_src, offsetof(BLOCK, base_src));
+DEFINE(vp9_block_src, offsetof(BLOCK, src));
+DEFINE(vp9_block_src_diff, offsetof(BLOCK, src_diff));
+DEFINE(vp9_block_src_stride, offsetof(BLOCK, src_stride));
-DEFINE(vp8_blockd_predictor, offsetof(BLOCKD, predictor));
+DEFINE(vp9_blockd_predictor, offsetof(BLOCKD, predictor));
/* pack tokens */
-DEFINE(vp8_writer_lowvalue, offsetof(vp8_writer, lowvalue));
-DEFINE(vp8_writer_range, offsetof(vp8_writer, range));
-DEFINE(vp8_writer_value, offsetof(vp8_writer, value));
-DEFINE(vp8_writer_count, offsetof(vp8_writer, count));
-DEFINE(vp8_writer_pos, offsetof(vp8_writer, pos));
-DEFINE(vp8_writer_buffer, offsetof(vp8_writer, buffer));
+DEFINE(vp9_writer_lowvalue, offsetof(vp8_writer, lowvalue));
+DEFINE(vp9_writer_range, offsetof(vp8_writer, range));
+DEFINE(vp9_writer_value, offsetof(vp8_writer, value));
+DEFINE(vp9_writer_count, offsetof(vp8_writer, count));
+DEFINE(vp9_writer_pos, offsetof(vp8_writer, pos));
+DEFINE(vp9_writer_buffer, offsetof(vp8_writer, buffer));
DEFINE(tokenextra_token, offsetof(TOKENEXTRA, Token));
DEFINE(tokenextra_extra, offsetof(TOKENEXTRA, Extra));
DEFINE(tokenextra_skip_eob_node, offsetof(TOKENEXTRA, skip_eob_node));
DEFINE(TOKENEXTRA_SZ, sizeof(TOKENEXTRA));
-DEFINE(vp8_extra_bit_struct_sz, sizeof(vp8_extra_bit_struct));
+DEFINE(vp9_extra_bit_struct_sz, sizeof(vp8_extra_bit_struct));
-DEFINE(vp8_token_value, offsetof(vp8_token, value));
-DEFINE(vp8_token_len, offsetof(vp8_token, Len));
+DEFINE(vp9_token_value, offsetof(vp8_token, value));
+DEFINE(vp9_token_len, offsetof(vp8_token, Len));
-DEFINE(vp8_extra_bit_struct_tree, offsetof(vp8_extra_bit_struct, tree));
-DEFINE(vp8_extra_bit_struct_prob, offsetof(vp8_extra_bit_struct, prob));
-DEFINE(vp8_extra_bit_struct_len, offsetof(vp8_extra_bit_struct, Len));
-DEFINE(vp8_extra_bit_struct_base_val, offsetof(vp8_extra_bit_struct, base_val));
+DEFINE(vp9_extra_bit_struct_tree, offsetof(vp8_extra_bit_struct, tree));
+DEFINE(vp9_extra_bit_struct_prob, offsetof(vp8_extra_bit_struct, prob));
+DEFINE(vp9_extra_bit_struct_len, offsetof(vp8_extra_bit_struct, Len));
+DEFINE(vp9_extra_bit_struct_base_val, offsetof(vp8_extra_bit_struct, base_val));
-DEFINE(vp8_comp_tplist, offsetof(VP8_COMP, tplist));
-DEFINE(vp8_comp_common, offsetof(VP8_COMP, common));
+DEFINE(vp9_comp_tplist, offsetof(VP8_COMP, tplist));
+DEFINE(vp9_comp_common, offsetof(VP8_COMP, common));
DEFINE(tokenlist_start, offsetof(TOKENLIST, start));
DEFINE(tokenlist_stop, offsetof(TOKENLIST, stop));
DEFINE(TOKENLIST_SZ, sizeof(TOKENLIST));
-DEFINE(vp8_common_mb_rows, offsetof(VP8_COMMON, mb_rows));
+DEFINE(vp9_common_mb_rows, offsetof(VP8_COMMON, mb_rows));
END
#if HAVE_ARMV5TE
ct_assert(TOKENEXTRA_SZ, sizeof(TOKENEXTRA) == 8)
-ct_assert(vp8_extra_bit_struct_sz, sizeof(vp8_extra_bit_struct) == 16)
+ct_assert(vp9_extra_bit_struct_sz, sizeof(vp8_extra_bit_struct) == 16)
#endif
unsigned int bct [VP8_YMODES - 1] [2];
update_mode(
- bc, VP8_YMODES, vp8_ymode_encodings, vp8_ymode_tree,
+ bc, VP8_YMODES, vp9_ymode_encodings, vp9_ymode_tree,
Pnew, cm->fc.ymode_prob, bct, (unsigned int *)cpi->ymode_count
);
}
for (j = 0; j <= VP8_SWITCHABLE_FILTERS; ++j) {
vp9_tree_probs_from_distribution(
VP8_SWITCHABLE_FILTERS,
- vp8_switchable_interp_encodings, vp8_switchable_interp_tree,
+ vp9_switchable_interp_encodings, vp9_switchable_interp_tree,
pc->fc.switchable_interp_prob[j], branch_ct,
cpi->switchable_interp_count[j], 256, 1);
for (i = 0; i < VP8_SWITCHABLE_FILTERS - 1; ++i) {
}
static void write_ymode(vp8_writer *bc, int m, const vp8_prob *p) {
- vp8_write_token(bc, vp8_ymode_tree, p, vp8_ymode_encodings + m);
+ vp8_write_token(bc, vp9_ymode_tree, p, vp9_ymode_encodings + m);
}
static void kfwrite_ymode(vp8_writer *bc, int m, const vp8_prob *p) {
- vp8_write_token(bc, vp8_kf_ymode_tree, p, vp8_kf_ymode_encodings + m);
+ vp8_write_token(bc, vp9_kf_ymode_tree, p, vp9_kf_ymode_encodings + m);
}
#if CONFIG_SUPERBLOCKS
static void sb_kfwrite_ymode(vp8_writer *bc, int m, const vp8_prob *p) {
- vp8_write_token(bc, vp8_uv_mode_tree, p, vp8_sb_kf_ymode_encodings + m);
+ vp8_write_token(bc, vp9_uv_mode_tree, p, vp9_sb_kf_ymode_encodings + m);
}
#endif
static void write_i8x8_mode(vp8_writer *bc, int m, const vp8_prob *p) {
- vp8_write_token(bc, vp8_i8x8_mode_tree, p, vp8_i8x8_mode_encodings + m);
+ vp8_write_token(bc, vp9_i8x8_mode_tree, p, vp9_i8x8_mode_encodings + m);
}
static void write_uv_mode(vp8_writer *bc, int m, const vp8_prob *p) {
- vp8_write_token(bc, vp8_uv_mode_tree, p, vp8_uv_mode_encodings + m);
+ vp8_write_token(bc, vp9_uv_mode_tree, p, vp9_uv_mode_encodings + m);
}
static void write_bmode(vp8_writer *bc, int m, const vp8_prob *p) {
- vp8_write_token(bc, vp8_bmode_tree, p, vp8_bmode_encodings + m);
+ vp8_write_token(bc, vp9_bmode_tree, p, vp9_bmode_encodings + m);
}
static void write_split(vp8_writer *bc, int x, const vp8_prob *p) {
vp8_write_token(
- bc, vp8_mbsplit_tree, p, vp8_mbsplit_encodings + x
+ bc, vp9_mbsplit_tree, p, vp9_mbsplit_encodings + x
);
}
while (p < stop) {
const int t = p->Token;
- vp8_token *const a = vp8_coef_encodings + t;
- const vp8_extra_bit_struct *const b = vp8_extra_bits + t;
+ vp8_token *const a = vp9_coef_encodings + t;
+ const vp8_extra_bit_struct *const b = vp9_extra_bits + t;
int i = 0;
const unsigned char *pp = p->context_tree;
int v = a->value;
do {
const int bb = (v >> --n) & 1;
split = 1 + (((range - 1) * pp[i >> 1]) >> 8);
- i = vp8_coef_tree[i + bb];
+ i = vp9_coef_tree[i + bb];
if (bb) {
lowvalue += split;
range = split;
}
- shift = vp8_norm[range];
+ shift = vp9_norm[range];
range <<= shift;
count += shift;
range = split;
}
- shift = vp8_norm[range];
+ shift = vp9_norm[range];
range <<= shift;
count += shift;
#if CONFIG_DEBUG
assert(NEARESTMV <= m && m <= SPLITMV);
#endif
- vp8_write_token(bc, vp8_mv_ref_tree, p,
- vp8_mv_ref_encoding_array - NEARESTMV + m);
+ vp8_write_token(bc, vp9_mv_ref_tree, p,
+ vp9_mv_ref_encoding_array - NEARESTMV + m);
}
#if CONFIG_SUPERBLOCKS
#if CONFIG_DEBUG
assert(NEARESTMV <= m && m < SPLITMV);
#endif
- vp8_write_token(bc, vp8_sb_mv_ref_tree, p,
- vp8_sb_mv_ref_encoding_array - NEARESTMV + m);
+ vp8_write_token(bc, vp9_sb_mv_ref_tree, p,
+ vp9_sb_mv_ref_encoding_array - NEARESTMV + m);
}
#endif
#if CONFIG_DEBUG
assert(LEFT4X4 <= m && m <= NEW4X4);
#endif
- vp8_write_token(bc, vp8_sub_mv_ref_tree, p,
- vp8_sub_mv_ref_encoding_array - LEFT4X4 + m);
+ vp8_write_token(bc, vp9_sub_mv_ref_tree, p,
+ vp9_sub_mv_ref_encoding_array - LEFT4X4 + m);
}
static void write_nmv(vp8_writer *bc, const MV *mv, const int_mv *ref,
if (mode >= NEARESTMV && mode <= SPLITMV)
{
if (cpi->common.mcomp_filter_type == SWITCHABLE) {
- vp8_write_token(bc, vp8_switchable_interp_tree,
+ vp8_write_token(bc, vp9_switchable_interp_tree,
vp9_get_pred_probs(&cpi->common, xd,
PRED_SWITCHABLE_INTERP),
- vp8_switchable_interp_encodings +
- vp8_switchable_interp_map[mi->interp_filter]);
+ vp9_switchable_interp_encodings +
+ vp9_switchable_interp_map[mi->interp_filter]);
} else {
assert (mi->interp_filter ==
cpi->common.mcomp_filter_type);
B_PREDICTION_MODE blockmode;
int_mv blockmv;
const int *const L =
- vp8_mbsplits [mi->partitioning];
+ vp9_mbsplits [mi->partitioning];
int k = -1; /* first block in subset j */
int mv_contz;
int_mv leftmv, abovemv;
if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
continue;
vp9_tree_probs_from_distribution(
- MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
+ MAX_ENTROPY_TOKENS, vp9_coef_encodings, vp9_coef_tree,
cpi->frame_coef_probs [i][j][k],
cpi->frame_branch_ct [i][j][k],
cpi->coef_counts [i][j][k],
if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
continue;
vp9_tree_probs_from_distribution(
- MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
+ MAX_ENTROPY_TOKENS, vp9_coef_encodings, vp9_coef_tree,
cpi->frame_hybrid_coef_probs [i][j][k],
cpi->frame_hybrid_branch_ct [i][j][k],
cpi->hybrid_coef_counts [i][j][k],
if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
continue;
vp9_tree_probs_from_distribution(
- MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
+ MAX_ENTROPY_TOKENS, vp9_coef_encodings, vp9_coef_tree,
cpi->frame_coef_probs_8x8 [i][j][k],
cpi->frame_branch_ct_8x8 [i][j][k],
cpi->coef_counts_8x8 [i][j][k],
if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
continue;
vp9_tree_probs_from_distribution(
- MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
+ MAX_ENTROPY_TOKENS, vp9_coef_encodings, vp9_coef_tree,
cpi->frame_hybrid_coef_probs_8x8 [i][j][k],
cpi->frame_hybrid_branch_ct_8x8 [i][j][k],
cpi->hybrid_coef_counts_8x8 [i][j][k],
if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
continue;
vp9_tree_probs_from_distribution(
- MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
+ MAX_ENTROPY_TOKENS, vp9_coef_encodings, vp9_coef_tree,
cpi->frame_coef_probs_16x16[i][j][k],
cpi->frame_branch_ct_16x16[i][j][k],
cpi->coef_counts_16x16[i][j][k], 256, 1);
if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
continue;
vp9_tree_probs_from_distribution(
- MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
+ MAX_ENTROPY_TOKENS, vp9_coef_encodings, vp9_coef_tree,
cpi->frame_hybrid_coef_probs_16x16[i][j][k],
cpi->frame_hybrid_branch_ct_16x16[i][j][k],
cpi->hybrid_coef_counts_16x16[i][j][k], 256, 1);
int i, j;
for (i = 0; i < 8; i++) {
- vp9_cost_tokens(mode_cost, cpi->common.kf_ymode_prob[i], vp8_kf_ymode_tree);
+ vp9_cost_tokens(mode_cost, cpi->common.kf_ymode_prob[i], vp9_kf_ymode_tree);
cost = 0;
for (j = 0; j < VP8_YMODES; j++) {
cost += mode_cost[j] * cpi->ymode_count[j];
/* Only one filter is used. So set the filter at frame level */
for (i = 0; i < VP8_SWITCHABLE_FILTERS; ++i) {
if (count[i]) {
- pc->mcomp_filter_type = vp8_switchable_interp[i];
+ pc->mcomp_filter_type = vp9_switchable_interp[i];
break;
}
}
#ifndef __INC_BITSTREAM_H
#define __INC_BITSTREAM_H
-#if HAVE_ARMV5TE
-void vp8cx_pack_tokens_armv5(vp8_writer *w, const TOKENEXTRA *p, int xcount,
- vp8_token *,
- vp8_extra_bit_struct *,
- const vp8_tree_index *);
-# define pack_tokens(a,b,c) \
- vp8cx_pack_tokens_armv5(a,b,c,vp8_coef_encodings,vp8_extra_bits,vp8_coef_tree)
-#else
-# define pack_tokens(a,b,c) pack_tokens_c(a,b,c)
-#endif
-#endif
-
void vp9_update_skip_probs(VP8_COMP *cpi);
+
+#endif
unsigned int active_section = 0;
#endif
-const unsigned int vp8_prob_cost[256] = {
+const unsigned int vp9_prob_cost[256] = {
2047, 2047, 1791, 1641, 1535, 1452, 1385, 1328, 1279, 1235, 1196, 1161, 1129, 1099, 1072, 1046,
1023, 1000, 979, 959, 940, 922, 905, 889, 873, 858, 843, 829, 816, 803, 790, 778,
767, 755, 744, 733, 723, 713, 703, 693, 684, 675, 666, 657, 649, 641, 633, 625,
extern void vp9_encode_value(BOOL_CODER *br, int data, int bits);
extern void vp9_stop_encode(BOOL_CODER *bc);
-extern const unsigned int vp8_prob_cost[256];
+extern const unsigned int vp9_prob_cost[256];
extern void vp9_encode_uniform(BOOL_CODER *bc, int v, int n);
extern void vp9_encode_term_subexp(BOOL_CODER *bc, int v, int k, int n);
extern int vp9_count_term_subexp(int v, int k, int n);
extern int vp9_recenter_nonneg(int v, int m);
-DECLARE_ALIGNED(16, extern const unsigned char, vp8_norm[256]);
+DECLARE_ALIGNED(16, extern const unsigned char, vp9_norm[256]);
static void vp8_encode_bool(BOOL_CODER *br, int bit, int probability) {
#if defined(SECTIONBITS_OUTPUT)
if (bit)
- Sectionbits[active_section] += vp8_prob_cost[255 - probability];
+ Sectionbits[active_section] += vp9_prob_cost[255 - probability];
else
- Sectionbits[active_section] += vp8_prob_cost[probability];
+ Sectionbits[active_section] += vp9_prob_cost[probability];
#endif
#endif
range = br->range - split;
}
- shift = vp8_norm[range];
+ shift = vp9_norm[range];
range <<= shift;
count += shift;
// TODO: these transforms can be converted into integer forms to reduce
// the complexity
-float dct_4[16] = {
+static const float dct_4[16] = {
0.500000000000000, 0.500000000000000, 0.500000000000000, 0.500000000000000,
0.653281482438188, 0.270598050073099, -0.270598050073099, -0.653281482438188,
0.500000000000000, -0.500000000000000, -0.500000000000000, 0.500000000000000,
0.270598050073099, -0.653281482438188, 0.653281482438188, -0.270598050073099
};
-float adst_4[16] = {
+static const float adst_4[16] = {
0.228013428883779, 0.428525073124360, 0.577350269189626, 0.656538502008139,
0.577350269189626, 0.577350269189626, 0.000000000000000, -0.577350269189626,
0.656538502008139, -0.228013428883779, -0.577350269189626, 0.428525073124359,
0.428525073124360, -0.656538502008139, 0.577350269189626, -0.228013428883779
};
-float dct_8[64] = {
+static const float dct_8[64] = {
0.353553390593274, 0.353553390593274, 0.353553390593274, 0.353553390593274,
0.353553390593274, 0.353553390593274, 0.353553390593274, 0.353553390593274,
0.490392640201615, 0.415734806151273, 0.277785116509801, 0.097545161008064,
0.490392640201615, -0.415734806151273, 0.277785116509801, -0.097545161008064
};
-float adst_8[64] = {
+static const float adst_8[64] = {
0.089131608307533, 0.175227946595735, 0.255357107325376, 0.326790388032145,
0.387095214016349, 0.434217976756762, 0.466553967085785, 0.483002021635509,
0.255357107325376, 0.434217976756762, 0.483002021635509, 0.387095214016349,
};
/* Converted the transforms to integers. */
-const int16_t dct_i4[16] = {
+static const int16_t dct_i4[16] = {
16384, 16384, 16384, 16384,
21407, 8867, -8867, -21407,
16384, -16384, -16384, 16384,
8867, -21407, 21407, -8867
};
-const int16_t adst_i4[16] = {
+static const int16_t adst_i4[16] = {
7472, 14042, 18919, 21513,
18919, 18919, 0, -18919,
21513, -7472, -18919, 14042,
14042, -21513, 18919, -7472
};
-const int16_t dct_i8[64] = {
+static const int16_t dct_i8[64] = {
11585, 11585, 11585, 11585,
11585, 11585, 11585, 11585,
16069, 13623, 9102, 3196,
16069, -13623, 9102, -3196
};
-const int16_t adst_i8[64] = {
+static const int16_t adst_i8[64] = {
2921, 5742, 8368, 10708,
12684, 14228, 15288, 15827,
8368, 14228, 15827, 12684,
15288, -12684, 8368, -2921
};
-float dct_16[256] = {
+static const float dct_16[256] = {
0.250000, 0.250000, 0.250000, 0.250000, 0.250000, 0.250000, 0.250000, 0.250000,
0.250000, 0.250000, 0.250000, 0.250000, 0.250000, 0.250000, 0.250000, 0.250000,
0.351851, 0.338330, 0.311806, 0.273300, 0.224292, 0.166664, 0.102631, 0.034654,
0.351851, -0.338330, 0.311806, -0.273300, 0.224292, -0.166664, 0.102631, -0.034654
};
-float adst_16[256] = {
+static const float adst_16[256] = {
0.033094, 0.065889, 0.098087, 0.129396, 0.159534, 0.188227, 0.215215, 0.240255,
0.263118, 0.283599, 0.301511, 0.316693, 0.329007, 0.338341, 0.344612, 0.347761,
0.098087, 0.188227, 0.263118, 0.316693, 0.344612, 0.344612, 0.316693, 0.263118,
};
/* Converted the transforms to integers. */
-const int16_t dct_i16[256] = {
+static const int16_t dct_i16[256] = {
8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
11529, 11086, 10217, 8955, 7350, 5461, 3363, 1136,
11529, -11086, 10217, -8955, 7350, -5461, 3363, -1136
};
-const int16_t adst_i16[256] = {
+static const int16_t adst_i16[256] = {
1084, 2159, 3214, 4240, 5228, 6168, 7052, 7873,
8622, 9293, 9880, 10377, 10781, 11087, 11292, 11395,
3214, 6168, 8622, 10377, 11292, 11292, 10377, 8622,
float *pfb = &bufb[0];
// pointers to vertical and horizontal transforms
- float *ptv, *pth;
+ const float *ptv, *pth;
assert(tx_type != DCT_DCT);
// load and convert residual array into floating-point
int i, ib;
for (i = 0; i < 4; i++) {
- ib = vp8_i8x8_block[i];
+ ib = vp9_i8x8_block[i];
vp9_encode_intra8x8(rtcd, x, ib);
}
}
BLOCKD *b;
for (i = 0; i < 4; i++) {
- ib = vp8_i8x8_block[i];
+ ib = vp9_i8x8_block[i];
b = &x->e_mbd.block[ib];
mode = b->bmi.as_mode.first;
#if CONFIG_COMP_INTRA_PRED
switch (tx_size) {
default:
case TX_4X4:
- scan = vp8_default_zig_zag1d;
- bands = vp8_coef_bands;
+ scan = vp9_default_zig_zag1d;
+ bands = vp9_coef_bands;
default_eob = 16;
// TODO: this isn't called (for intra4x4 modes), but will be left in
// since it could be used later
if (tx_type != DCT_DCT) {
switch (tx_type) {
case ADST_DCT:
- scan = vp8_row_scan;
+ scan = vp9_row_scan;
break;
case DCT_ADST:
- scan = vp8_col_scan;
+ scan = vp9_col_scan;
break;
default:
- scan = vp8_default_zig_zag1d;
+ scan = vp9_default_zig_zag1d;
break;
}
} else {
- scan = vp8_default_zig_zag1d;
+ scan = vp9_default_zig_zag1d;
}
}
break;
case TX_8X8:
- scan = vp8_default_zig_zag1d_8x8;
- bands = vp8_coef_bands_8x8;
+ scan = vp9_default_zig_zag1d_8x8;
+ bands = vp9_coef_bands_8x8;
default_eob = 64;
break;
}
/* Evaluate the first possibility for this state. */
rate0 = tokens[next][0].rate;
rate1 = tokens[next][1].rate;
- t0 = (vp8_dct_value_tokens_ptr + x)->Token;
+ t0 = (vp9_dct_value_tokens_ptr + x)->Token;
/* Consider both possible successor states. */
if (next < default_eob) {
band = bands[i + 1];
- pt = vp8_prev_token_class[t0];
+ pt = vp9_prev_token_class[t0];
rate0 +=
mb->token_costs[tx_size][type][band][pt][tokens[next][0].token];
rate1 +=
UPDATE_RD_COST();
/* And pick the best. */
best = rd_cost1 < rd_cost0;
- base_bits = *(vp8_dct_value_cost_ptr + x);
+ base_bits = *(vp9_dct_value_cost_ptr + x);
dx = dqcoeff_ptr[rc] - coeff_ptr[rc];
d2 = dx * dx;
tokens[i][0].rate = base_bits + (best ? rate1 : rate0);
t1 = tokens[next][1].token == DCT_EOB_TOKEN ?
DCT_EOB_TOKEN : ZERO_TOKEN;
} else {
- t0 = t1 = (vp8_dct_value_tokens_ptr + x)->Token;
+ t0 = t1 = (vp9_dct_value_tokens_ptr + x)->Token;
}
if (next < default_eob) {
band = bands[i + 1];
if (t0 != DCT_EOB_TOKEN) {
- pt = vp8_prev_token_class[t0];
+ pt = vp9_prev_token_class[t0];
rate0 += mb->token_costs[tx_size][type][band][pt][
tokens[next][0].token];
}
if (t1 != DCT_EOB_TOKEN) {
- pt = vp8_prev_token_class[t1];
+ pt = vp9_prev_token_class[t1];
rate1 += mb->token_costs[tx_size][type][band][pt][
tokens[next][1].token];
}
UPDATE_RD_COST();
/* And pick the best. */
best = rd_cost1 < rd_cost0;
- base_bits = *(vp8_dct_value_cost_ptr + x);
+ base_bits = *(vp9_dct_value_cost_ptr + x);
if (shortcut) {
dx -= (dequant_ptr[rc != 0] + sz) ^ sz;
return;
for (i = 0; i < bd->eob; i++) {
- int coef = bd->dqcoeff[vp8_default_zig_zag1d[i]];
+ int coef = bd->dqcoeff[vp9_default_zig_zag1d[i]];
sum += (coef >= 0) ? coef : -coef;
if (sum >= SUM_2ND_COEFF_THRESH)
return;
if (sum < SUM_2ND_COEFF_THRESH) {
for (i = 0; i < bd->eob; i++) {
- int rc = vp8_default_zig_zag1d[i];
+ int rc = vp9_default_zig_zag1d[i];
bd->qcoeff[rc] = 0;
bd->dqcoeff[rc] = 0;
}
for (b = 0; b < 16; b++) {
optimize_b(x, b, type,
- ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd, TX_4X4);
+ ta + vp9_block2above[b], tl + vp9_block2left[b], rtcd, TX_4X4);
}
if (has_2nd_order) {
b = 24;
optimize_b(x, b, PLANE_TYPE_Y2,
- ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd, TX_4X4);
+ ta + vp9_block2above[b], tl + vp9_block2left[b], rtcd, TX_4X4);
check_reset_2nd_coeffs(&x->e_mbd,
- ta + vp8_block2above[b], tl + vp8_block2left[b]);
+ ta + vp9_block2above[b], tl + vp9_block2left[b]);
}
}
for (b = 16; b < 24; b++) {
optimize_b(x, b, PLANE_TYPE_UV,
- ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd, TX_4X4);
+ ta + vp9_block2above[b], tl + vp9_block2left[b], rtcd, TX_4X4);
}
}
type = has_2nd_order ? PLANE_TYPE_Y_NO_DC : PLANE_TYPE_Y_WITH_DC;
for (b = 0; b < 16; b += 4) {
optimize_b(x, b, type,
- ta + vp8_block2above_8x8[b], tl + vp8_block2left_8x8[b],
+ ta + vp9_block2above_8x8[b], tl + vp9_block2left_8x8[b],
rtcd, TX_8X8);
- ta[vp8_block2above_8x8[b] + 1] = ta[vp8_block2above_8x8[b]];
- tl[vp8_block2left_8x8[b] + 1] = tl[vp8_block2left_8x8[b]];
+ ta[vp9_block2above_8x8[b] + 1] = ta[vp9_block2above_8x8[b]];
+ tl[vp9_block2left_8x8[b] + 1] = tl[vp9_block2left_8x8[b]];
}
// 8x8 always have 2nd roder haar block
if (has_2nd_order) {
check_reset_8x8_2nd_coeffs(&x->e_mbd,
- ta + vp8_block2above_8x8[24],
- tl + vp8_block2left_8x8[24]);
+ ta + vp9_block2above_8x8[24],
+ tl + vp9_block2left_8x8[24]);
}
}
for (b = 16; b < 24; b += 4) {
optimize_b(x, b, PLANE_TYPE_UV,
- ta + vp8_block2above_8x8[b], tl + vp8_block2left_8x8[b],
+ ta + vp9_block2above_8x8[b], tl + vp9_block2left_8x8[b],
rtcd, TX_8X8);
- ta[vp8_block2above_8x8[b] + 1] = ta[vp8_block2above_8x8[b]];
- tl[vp8_block2left_8x8[b] + 1] = tl[vp8_block2left_8x8[b]];
+ ta[vp9_block2above_8x8[b] + 1] = ta[vp9_block2above_8x8[b]];
+ tl[vp9_block2left_8x8[b] + 1] = tl[vp9_block2left_8x8[b]];
}
}
for (i = eob; i-- > 0;) {
int base_bits, d2, dx;
- rc = vp8_default_zig_zag1d_16x16[i];
+ rc = vp9_default_zig_zag1d_16x16[i];
x = qcoeff_ptr[rc];
/* Only add a trellis state for non-zero coefficients. */
if (x) {
/* Evaluate the first possibility for this state. */
rate0 = tokens[next][0].rate;
rate1 = tokens[next][1].rate;
- t0 = (vp8_dct_value_tokens_ptr + x)->Token;
+ t0 = (vp9_dct_value_tokens_ptr + x)->Token;
/* Consider both possible successor states. */
if (next < 256) {
- band = vp8_coef_bands_16x16[i + 1];
- pt = vp8_prev_token_class[t0];
+ band = vp9_coef_bands_16x16[i + 1];
+ pt = vp9_prev_token_class[t0];
rate0 += mb->token_costs[TX_16X16][type][band][pt][tokens[next][0].token];
rate1 += mb->token_costs[TX_16X16][type][band][pt][tokens[next][1].token];
}
UPDATE_RD_COST();
/* And pick the best. */
best = rd_cost1 < rd_cost0;
- base_bits = *(vp8_dct_value_cost_ptr + x);
+ base_bits = *(vp9_dct_value_cost_ptr + x);
dx = dqcoeff_ptr[rc] - coeff_ptr[rc];
d2 = dx*dx;
tokens[i][0].rate = base_bits + (best ? rate1 : rate0);
DCT_EOB_TOKEN : ZERO_TOKEN;
}
else
- t0=t1 = (vp8_dct_value_tokens_ptr + x)->Token;
+ t0=t1 = (vp9_dct_value_tokens_ptr + x)->Token;
if (next < 256) {
- band = vp8_coef_bands_16x16[i + 1];
+ band = vp9_coef_bands_16x16[i + 1];
if (t0 != DCT_EOB_TOKEN) {
- pt = vp8_prev_token_class[t0];
+ pt = vp9_prev_token_class[t0];
rate0 += mb->token_costs[TX_16X16][type][band][pt]
[tokens[next][0].token];
}
if (t1!=DCT_EOB_TOKEN) {
- pt = vp8_prev_token_class[t1];
+ pt = vp9_prev_token_class[t1];
rate1 += mb->token_costs[TX_16X16][type][band][pt]
[tokens[next][1].token];
}
UPDATE_RD_COST();
/* And pick the best. */
best = rd_cost1 < rd_cost0;
- base_bits = *(vp8_dct_value_cost_ptr + x);
+ base_bits = *(vp9_dct_value_cost_ptr + x);
if(shortcut) {
dx -= (dequant_ptr[rc!=0] + sz) ^ sz;
* add a new trellis node, but we do need to update the costs.
*/
else {
- band = vp8_coef_bands_16x16[i + 1];
+ band = vp9_coef_bands_16x16[i + 1];
t0 = tokens[next][0].token;
t1 = tokens[next][1].token;
/* Update the cost of each path if we're past the EOB token. */
}
/* Now pick the best path through the whole trellis. */
- band = vp8_coef_bands_16x16[i + 1];
+ band = vp9_coef_bands_16x16[i + 1];
VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
rate0 = tokens[next][0].rate;
rate1 = tokens[next][1].rate;
x = tokens[i][best].qc;
if (x)
final_eob = i;
- rc = vp8_default_zig_zag1d_16x16[i];
+ rc = vp9_default_zig_zag1d_16x16[i];
qcoeff_ptr[rc] = x;
dqcoeff_ptr[rc] = (x * dequant_ptr[rc!=0]);
c = vp9_get_mv_class(z, &o);
- vp8_write_token(bc, vp8_mv_class_tree, mvcomp->classes,
- vp8_mv_class_encodings + c);
+ vp8_write_token(bc, vp9_mv_class_tree, mvcomp->classes,
+ vp9_mv_class_encodings + c);
d = (o >> 3); /* int mv data */
if (c == MV_CLASS_0) {
- vp8_write_token(bc, vp8_mv_class0_tree, mvcomp->class0,
- vp8_mv_class0_encodings + d);
+ vp8_write_token(bc, vp9_mv_class0_tree, mvcomp->class0,
+ vp9_mv_class0_encodings + d);
} else {
int i, b;
b = c + CLASS0_BITS - 1; /* number of bits */
/* Code the fractional pel bits */
if (c == MV_CLASS_0) {
- vp8_write_token(bc, vp8_mv_fp_tree, mvcomp->class0_fp[d],
- vp8_mv_fp_encodings + f);
+ vp8_write_token(bc, vp9_mv_fp_tree, mvcomp->class0_fp[d],
+ vp9_mv_fp_encodings + f);
} else {
- vp8_write_token(bc, vp8_mv_fp_tree, mvcomp->fp,
- vp8_mv_fp_encodings + f);
+ vp8_write_token(bc, vp9_mv_fp_tree, mvcomp->fp,
+ vp9_mv_fp_encodings + f);
}
/* Code the high precision bit */
if (usehp) {
sign_cost[0] = vp8_cost_zero(mvcomp->sign);
sign_cost[1] = vp8_cost_one(mvcomp->sign);
- vp9_cost_tokens(class_cost, mvcomp->classes, vp8_mv_class_tree);
- vp9_cost_tokens(class0_cost, mvcomp->class0, vp8_mv_class0_tree);
+ vp9_cost_tokens(class_cost, mvcomp->classes, vp9_mv_class_tree);
+ vp9_cost_tokens(class0_cost, mvcomp->class0, vp9_mv_class0_tree);
for (i = 0; i < MV_OFFSET_BITS; ++i) {
bits_cost[i][0] = vp8_cost_zero(mvcomp->bits[i]);
bits_cost[i][1] = vp8_cost_one(mvcomp->bits[i]);
}
for (i = 0; i < CLASS0_SIZE; ++i)
- vp9_cost_tokens(class0_fp_cost[i], mvcomp->class0_fp[i], vp8_mv_fp_tree);
- vp9_cost_tokens(fp_cost, mvcomp->fp, vp8_mv_fp_tree);
+ vp9_cost_tokens(class0_fp_cost[i], mvcomp->class0_fp[i], vp9_mv_fp_tree);
+ vp9_cost_tokens(fp_cost, mvcomp->fp, vp9_mv_fp_tree);
if (usehp) {
class0_hp_cost[0] = vp8_cost_zero(mvcomp->class0_hp);
void vp9_encode_nmv(vp8_writer* const bc, const MV* const mv,
const MV* const ref, const nmv_context* const mvctx) {
MV_JOINT_TYPE j = vp9_get_mv_joint(*mv);
- vp8_write_token(bc, vp8_mv_joint_tree, mvctx->joints,
- vp8_mv_joint_encodings + j);
+ vp8_write_token(bc, vp9_mv_joint_tree, mvctx->joints,
+ vp9_mv_joint_encodings + j);
if (j == MV_JOINT_HZVNZ || j == MV_JOINT_HNZVNZ) {
encode_nmv_component(bc, mv->row, ref->col, &mvctx->comps[0]);
}
int mvc_flag_v,
int mvc_flag_h) {
vp8_clear_system_state();
- vp9_cost_tokens(mvjoint, mvctx->joints, vp8_mv_joint_tree);
+ vp9_cost_tokens(mvjoint, mvctx->joints, vp9_mv_joint_tree);
if (mvc_flag_v)
build_nmv_component_cost_table(mvcost[0], &mvctx->comps[0], usehp);
if (mvc_flag_h)
void vp9_init_mode_costs(VP8_COMP *c) {
VP8_COMMON *x = &c->common;
- const vp8_tree_p T = vp8_bmode_tree;
+ const vp8_tree_p T = vp9_bmode_tree;
int i, j;
for (i = 0; i < VP8_BINTRAMODES; i++) {
vp9_cost_tokens((int *)c->mb.inter_bmode_costs, x->fc.bmode_prob, T);
vp9_cost_tokens((int *)c->mb.inter_bmode_costs,
- x->fc.sub_mv_ref_prob[0], vp8_sub_mv_ref_tree);
+ x->fc.sub_mv_ref_prob[0], vp9_sub_mv_ref_tree);
- vp9_cost_tokens(c->mb.mbmode_cost[1], x->fc.ymode_prob, vp8_ymode_tree);
+ vp9_cost_tokens(c->mb.mbmode_cost[1], x->fc.ymode_prob, vp9_ymode_tree);
vp9_cost_tokens(c->mb.mbmode_cost[0],
x->kf_ymode_prob[c->common.kf_ymode_probs_index],
- vp8_kf_ymode_tree);
+ vp9_kf_ymode_tree);
vp9_cost_tokens(c->mb.intra_uv_mode_cost[1],
- x->fc.uv_mode_prob[VP8_YMODES - 1], vp8_uv_mode_tree);
+ x->fc.uv_mode_prob[VP8_YMODES - 1], vp9_uv_mode_tree);
vp9_cost_tokens(c->mb.intra_uv_mode_cost[0],
- x->kf_uv_mode_prob[VP8_YMODES - 1], vp8_uv_mode_tree);
+ x->kf_uv_mode_prob[VP8_YMODES - 1], vp9_uv_mode_tree);
vp9_cost_tokens(c->mb.i8x8_mode_costs,
- x->fc.i8x8_mode_prob, vp8_i8x8_mode_tree);
+ x->fc.i8x8_mode_prob, vp9_i8x8_mode_tree);
for (i = 0; i <= VP8_SWITCHABLE_FILTERS; ++i)
vp9_cost_tokens((int *)c->mb.switchable_interp_costs[i],
x->fc.switchable_interp_prob[i],
- vp8_switchable_interp_tree);
+ vp9_switchable_interp_tree);
}
extern void vp9_init_quantizer(VP8_COMP *cpi);
-int vp8cx_base_skip_false_prob[QINDEX_RANGE][3];
+static int base_skip_false_prob[QINDEX_RANGE][3];
// Tables relating active max Q to active min Q
static int kf_low_motion_minq[QINDEX_RANGE];
skip_prob = 1;
else if (skip_prob > 255)
skip_prob = 255;
- vp8cx_base_skip_false_prob[i][1] = skip_prob;
+ base_skip_false_prob[i][1] = skip_prob;
skip_prob = t * 0.75;
if (skip_prob < 1)
skip_prob = 1;
else if (skip_prob > 255)
skip_prob = 255;
- vp8cx_base_skip_false_prob[i][2] = skip_prob;
+ base_skip_false_prob[i][2] = skip_prob;
skip_prob = t * 1.25;
if (skip_prob < 1)
skip_prob = 1;
else if (skip_prob > 255)
skip_prob = 255;
- vp8cx_base_skip_false_prob[i][0] = skip_prob;
+ base_skip_false_prob[i][0] = skip_prob;
}
}
init_config((VP8_PTR)cpi, oxcf);
- memcpy(cpi->base_skip_false_prob, vp8cx_base_skip_false_prob, sizeof(vp8cx_base_skip_false_prob));
+ memcpy(cpi->base_skip_false_prob, base_skip_false_prob, sizeof(base_skip_false_prob));
cpi->common.current_video_frame = 0;
cpi->kf_overspend_bits = 0;
cpi->kf_bitrate_adjustment = 0;
/* Mostly one filter is used. So set the filter at frame level */
for (i = 0; i < VP8_SWITCHABLE_FILTERS; ++i) {
if (count[i]) {
- cm->mcomp_filter_type = vp8_switchable_interp[i];
+ cm->mcomp_filter_type = vp9_switchable_interp[i];
Loop = TRUE; /* Make sure to loop since the filter changed */
break;
}
switch (tx_type) {
case ADST_DCT :
- pt_scan = vp8_row_scan;
+ pt_scan = vp9_row_scan;
break;
case DCT_ADST :
- pt_scan = vp8_col_scan;
+ pt_scan = vp9_col_scan;
break;
default :
- pt_scan = vp8_default_zig_zag1d;
+ pt_scan = vp9_default_zig_zag1d;
break;
}
eob = -1;
for (i = 0; i < b->eob_max_offset; i++) {
- rc = vp8_default_zig_zag1d[i];
+ rc = vp9_default_zig_zag1d[i];
z = coeff_ptr[rc];
zbin = zbin_ptr[rc] + *zbin_boost_ptr + zbin_oq_value;
eob = -1;
for (i = 0; i < b->eob_max_offset_8x8; i++) {
- rc = vp8_default_zig_zag1d[i];
+ rc = vp9_default_zig_zag1d[i];
z = coeff_ptr[rc];
zbin_boost_ptr = &b->zrun_zbin_boost[zbin_zrun_index];
eob = -1;
for (i = 0; i < b->eob_max_offset_8x8; i++) {
- rc = vp8_default_zig_zag1d_8x8[i];
+ rc = vp9_default_zig_zag1d_8x8[i];
z = coeff_ptr[rc];
zbin = (zbin_ptr[rc != 0] + *zbin_boost_ptr + zbin_oq_value);
eob = -1;
for (i = 0; i < b->eob_max_offset_16x16; i++) {
- rc = vp8_default_zig_zag1d_16x16[i];
+ rc = vp9_default_zig_zag1d_16x16[i];
z = coeff_ptr[rc];
zbin = (zbin_ptr[rc!=0] + *zbin_boost_ptr + zbin_oq_value);
// all the 4x4 ac values =;
for (i = 1; i < 16; i++) {
- int rc = vp8_default_zig_zag1d[i];
+ int rc = vp9_default_zig_zag1d[i];
quant_val = vp9_ac_yquant(Q);
invert_quant(cpi->Y1quant[Q] + rc,
// This needs cleaning up for 8x8 especially if we are to add
// support for non flat Q matices
for (i = 1; i < 64; i++) {
- int rc = vp8_default_zig_zag1d_8x8[i];
+ int rc = vp9_default_zig_zag1d_8x8[i];
quant_val = vp9_ac_yquant(Q);
cpi->Y1zbin_8x8[Q][rc] = ((qzbin_factor * quant_val) + 64) >> 7;
// 16x16 structures. Same comment above applies.
for (i = 1; i < 256; i++) {
- int rc = vp8_default_zig_zag1d_16x16[i];
+ int rc = vp9_default_zig_zag1d_16x16[i];
quant_val = vp9_ac_yquant(Q);
cpi->Y1zbin_16x16[Q][rc] = ((qzbin_factor * quant_val) + 64) >> 7;
#define MIN_BPB_FACTOR 0.005
#define MAX_BPB_FACTOR 50
-extern const MODE_DEFINITION vp8_mode_order[MAX_MODES];
-
-
#ifdef MODE_STATS
extern unsigned int y_modes[VP8_YMODES];
extern unsigned int uv_modes[VP8_UV_MODES];
};
#if CONFIG_PRED_FILTER
-const MODE_DEFINITION vp8_mode_order[MAX_MODES] = {
+const MODE_DEFINITION vp9_mode_order[MAX_MODES] = {
{ZEROMV, LAST_FRAME, 0, 0},
{ZEROMV, LAST_FRAME, 0, 1},
{DC_PRED, INTRA_FRAME, 0, 0},
{SPLITMV, GOLDEN_FRAME, ALTREF_FRAME, 0}
};
#else
-const MODE_DEFINITION vp8_mode_order[MAX_MODES] = {
+const MODE_DEFINITION vp9_mode_order[MAX_MODES] = {
{ZEROMV, LAST_FRAME, 0},
{DC_PRED, INTRA_FRAME, 0},
if (k == 0 && ((j > 0 && i > 0) || (j > 1 && i == 0)))
vp9_cost_tokens_skip((int *)(c[i][j][k]),
p[i][j][k],
- vp8_coef_tree);
+ vp9_coef_tree);
else
vp9_cost_tokens((int *)(c[i][j][k]),
p[i][j][k],
- vp8_coef_tree);
+ vp9_coef_tree);
}
}
assert(eob <= 4);
for (; c < eob; c++) {
- int v = qcoeff_ptr[vp8_default_zig_zag1d[c]];
- int t = vp8_dct_value_tokens_ptr[v].Token;
- cost += mb->token_costs[TX_8X8][type][vp8_coef_bands[c]][pt][t];
- cost += vp8_dct_value_cost_ptr[v];
- pt = vp8_prev_token_class[t];
+ int v = qcoeff_ptr[vp9_default_zig_zag1d[c]];
+ int t = vp9_dct_value_tokens_ptr[v].Token;
+ cost += mb->token_costs[TX_8X8][type][vp9_coef_bands[c]][pt][t];
+ cost += vp9_dct_value_cost_ptr[v];
+ pt = vp9_prev_token_class[t];
}
if (c < 4)
- cost += mb->token_costs[TX_8X8][type][vp8_coef_bands[c]]
+ cost += mb->token_costs[TX_8X8][type][vp9_coef_bands[c]]
[pt] [DCT_EOB_TOKEN];
pt = (c != !type); // is eob first coefficient;
switch (tx_size) {
case TX_4X4:
- scan = vp8_default_zig_zag1d;
- band = vp8_coef_bands;
+ scan = vp9_default_zig_zag1d;
+ band = vp9_coef_bands;
default_eob = 16;
if (type == PLANE_TYPE_Y_WITH_DC) {
tx_type = get_tx_type_4x4(xd, b);
if (tx_type != DCT_DCT) {
switch (tx_type) {
case ADST_DCT:
- scan = vp8_row_scan;
+ scan = vp9_row_scan;
break;
case DCT_ADST:
- scan = vp8_col_scan;
+ scan = vp9_col_scan;
break;
default:
- scan = vp8_default_zig_zag1d;
+ scan = vp9_default_zig_zag1d;
break;
}
}
break;
case TX_8X8:
- scan = vp8_default_zig_zag1d_8x8;
- band = vp8_coef_bands_8x8;
+ scan = vp9_default_zig_zag1d_8x8;
+ band = vp9_coef_bands_8x8;
default_eob = 64;
if (type == PLANE_TYPE_Y_WITH_DC) {
BLOCKD *bb;
}
break;
case TX_16X16:
- scan = vp8_default_zig_zag1d_16x16;
- band = vp8_coef_bands_16x16;
+ scan = vp9_default_zig_zag1d_16x16;
+ band = vp9_coef_bands_16x16;
default_eob = 256;
if (type == PLANE_TYPE_Y_WITH_DC) {
tx_type = get_tx_type_16x16(xd, b);
if (tx_type != DCT_DCT) {
for (; c < eob; c++) {
int v = qcoeff_ptr[scan[c]];
- int t = vp8_dct_value_tokens_ptr[v].Token;
+ int t = vp9_dct_value_tokens_ptr[v].Token;
cost += mb->hybrid_token_costs[tx_size][type][band[c]][pt][t];
- cost += vp8_dct_value_cost_ptr[v];
- pt = vp8_prev_token_class[t];
+ cost += vp9_dct_value_cost_ptr[v];
+ pt = vp9_prev_token_class[t];
}
if (c < seg_eob)
cost += mb->hybrid_token_costs[tx_size][type][band[c]]
} else {
for (; c < eob; c++) {
int v = qcoeff_ptr[scan[c]];
- int t = vp8_dct_value_tokens_ptr[v].Token;
+ int t = vp9_dct_value_tokens_ptr[v].Token;
cost += mb->token_costs[tx_size][type][band[c]][pt][t];
- cost += vp8_dct_value_cost_ptr[v];
- pt = vp8_prev_token_class[t];
+ cost += vp9_dct_value_cost_ptr[v];
+ pt = vp9_prev_token_class[t];
}
if (c < seg_eob)
cost += mb->token_costs[tx_size][type][band[c]]
for (b = 0; b < 16; b++)
cost += cost_coeffs(mb, xd->block + b, PLANE_TYPE_Y_NO_DC,
- ta + vp8_block2above[b], tl + vp8_block2left[b],
+ ta + vp9_block2above[b], tl + vp9_block2left[b],
TX_4X4);
cost += cost_coeffs(mb, xd->block + 24, PLANE_TYPE_Y2,
- ta + vp8_block2above[24], tl + vp8_block2left[24],
+ ta + vp9_block2above[24], tl + vp9_block2left[24],
TX_4X4);
return cost;
for (b = 0; b < 16; b += 4)
cost += cost_coeffs(mb, xd->block + b, PLANE_TYPE_Y_NO_DC,
- ta + vp8_block2above_8x8[b], tl + vp8_block2left_8x8[b],
+ ta + vp9_block2above_8x8[b], tl + vp9_block2left_8x8[b],
TX_8X8);
cost += cost_coeffs_2x2(mb, xd->block + 24, PLANE_TYPE_Y2,
- ta + vp8_block2above[24], tl + vp8_block2left[24]);
+ ta + vp9_block2above[24], tl + vp9_block2left[24]);
return cost;
}
#if CONFIG_COMP_INTRA_PRED
& best_second_mode, allow_comp,
#endif
- bmode_costs, ta + vp8_block2above[i],
- tl + vp8_block2left[i], &r, &ry, &d);
+ bmode_costs, ta + vp9_block2above[i],
+ tl + vp9_block2left[i], &r, &ry, &d);
cost += r;
distortion += d;
// compute quantization mse of 8x8 block
distortion = vp9_block_error_c((x->block + idx)->coeff,
(xd->block + idx)->dqcoeff, 64);
- ta0 = a[vp8_block2above_8x8[idx]];
- tl0 = l[vp8_block2left_8x8[idx]];
+ ta0 = a[vp9_block2above_8x8[idx]];
+ tl0 = l[vp9_block2left_8x8[idx]];
rate_t = cost_coeffs(x, xd->block + idx, PLANE_TYPE_Y_WITH_DC,
&ta0, &tl0, TX_8X8);
distortion += vp9_block_error_c((x->block + ib + 5)->coeff,
(xd->block + ib + 5)->dqcoeff, 16);
- ta0 = a[vp8_block2above[ib]];
- ta1 = a[vp8_block2above[ib + 1]];
- tl0 = l[vp8_block2left[ib]];
- tl1 = l[vp8_block2left[ib + 4]];
+ ta0 = a[vp9_block2above[ib]];
+ ta1 = a[vp9_block2above[ib + 1]];
+ tl0 = l[vp9_block2left[ib]];
+ tl1 = l[vp9_block2left[ib + 4]];
rate_t = cost_coeffs(x, xd->block + ib, PLANE_TYPE_Y_WITH_DC,
&ta0, &tl0, TX_4X4);
rate_t += cost_coeffs(x, xd->block + ib + 1, PLANE_TYPE_Y_WITH_DC,
vp9_encode_intra8x8(IF_RTCD(&cpi->rtcd), x, ib);
if (xd->mode_info_context->mbmi.txfm_size == TX_8X8) {
- a[vp8_block2above_8x8[idx]] = besta0;
- a[vp8_block2above_8x8[idx] + 1] = besta1;
- l[vp8_block2left_8x8[idx]] = bestl0;
- l[vp8_block2left_8x8[idx] + 1] = bestl1;
+ a[vp9_block2above_8x8[idx]] = besta0;
+ a[vp9_block2above_8x8[idx] + 1] = besta1;
+ l[vp9_block2left_8x8[idx]] = bestl0;
+ l[vp9_block2left_8x8[idx] + 1] = bestl1;
} else {
- a[vp8_block2above[ib]] = besta0;
- a[vp8_block2above[ib + 1]] = besta1;
- l[vp8_block2left[ib]] = bestl0;
- l[vp8_block2left[ib + 4]] = bestl1;
+ a[vp9_block2above[ib]] = besta0;
+ a[vp9_block2above[ib + 1]] = besta1;
+ l[vp9_block2left[ib]] = bestl0;
+ l[vp9_block2left[ib + 4]] = bestl1;
}
return best_rd;
#endif
int UNINITIALIZED_IS_SAFE(r), UNINITIALIZED_IS_SAFE(ry), UNINITIALIZED_IS_SAFE(d);
- ib = vp8_i8x8_block[i];
+ ib = vp9_i8x8_block[i];
total_rd += rd_pick_intra8x8block(
cpi, mb, ib, &best_mode,
#if CONFIG_COMP_INTRA_PRED
for (b = 16; b < 24; b++)
cost += cost_coeffs(mb, xd->block + b, PLANE_TYPE_UV,
- ta + vp8_block2above[b], tl + vp8_block2left[b],
+ ta + vp9_block2above[b], tl + vp9_block2left[b],
TX_4X4);
return cost;
for (b = 16; b < 24; b += 4)
cost += cost_coeffs(mb, xd->block + b, PLANE_TYPE_UV,
- ta + vp8_block2above_8x8[b],
- tl + vp8_block2left_8x8[b], TX_8X8);
+ ta + vp9_block2above_8x8[b],
+ tl + vp9_block2left_8x8[b], TX_8X8);
return cost;
}
vp8_prob p [VP8_MVREFS - 1];
assert(NEARESTMV <= m && m <= SPLITMV);
vp9_mv_ref_probs(pc, p, near_mv_ref_ct);
- return vp8_cost_token(vp8_mv_ref_tree, p,
- vp8_mv_ref_encoding_array - NEARESTMV + m);
+ return vp8_cost_token(vp9_mv_ref_tree, p,
+ vp9_mv_ref_encoding_array - NEARESTMV + m);
} else
return 0;
}
thisdistortion = vp9_block_error(be->coeff, bd->dqcoeff, 16);
*distortion += thisdistortion;
*labelyrate += cost_coeffs(x, bd, PLANE_TYPE_Y_WITH_DC,
- ta + vp8_block2above[i],
- tl + vp8_block2left[i], TX_4X4);
+ ta + vp9_block2above[i],
+ tl + vp9_block2left[i], TX_4X4);
}
}
*distortion >>= 2;
*distortion = 0;
*labelyrate = 0;
for (i = 0; i < 4; i++) {
- int ib = vp8_i8x8_block[i];
+ int ib = vp9_i8x8_block[i];
if (labels[ib] == which_label) {
int idx = (ib & 8) + ((ib & 2) << 1);
thisdistortion = vp9_block_error_c(be2->coeff, bd2->dqcoeff, 64);
otherdist += thisdistortion;
othercost += cost_coeffs(x, bd2, PLANE_TYPE_Y_WITH_DC,
- tacp + vp8_block2above_8x8[idx],
- tlcp + vp8_block2left_8x8[idx], TX_8X8);
+ tacp + vp9_block2above_8x8[idx],
+ tlcp + vp9_block2left_8x8[idx], TX_8X8);
}
for (j = 0; j < 4; j += 2) {
bd = &xd->block[ib + iblock[j]];
thisdistortion = vp9_block_error_c(be->coeff, bd->dqcoeff, 32);
*distortion += thisdistortion;
*labelyrate += cost_coeffs(x, bd, PLANE_TYPE_Y_WITH_DC,
- ta + vp8_block2above[ib + iblock[j]],
- tl + vp8_block2left[ib + iblock[j]],
+ ta + vp9_block2above[ib + iblock[j]],
+ tl + vp9_block2left[ib + iblock[j]],
TX_4X4);
*labelyrate += cost_coeffs(x, bd + 1, PLANE_TYPE_Y_WITH_DC,
- ta + vp8_block2above[ib + iblock[j] + 1],
- tl + vp8_block2left[ib + iblock[j]],
+ ta + vp9_block2above[ib + iblock[j] + 1],
+ tl + vp9_block2left[ib + iblock[j]],
TX_4X4);
}
} else /* 8x8 */ {
thisdistortion = vp9_block_error_c(be3->coeff, bd3->dqcoeff, 32);
otherdist += thisdistortion;
othercost += cost_coeffs(x, bd3, PLANE_TYPE_Y_WITH_DC,
- tacp + vp8_block2above[ib + iblock[j]],
- tlcp + vp8_block2left[ib + iblock[j]],
+ tacp + vp9_block2above[ib + iblock[j]],
+ tlcp + vp9_block2left[ib + iblock[j]],
TX_4X4);
othercost += cost_coeffs(x, bd3 + 1, PLANE_TYPE_Y_WITH_DC,
- tacp + vp8_block2above[ib + iblock[j] + 1],
- tlcp + vp8_block2left[ib + iblock[j]],
+ tacp + vp9_block2above[ib + iblock[j] + 1],
+ tlcp + vp9_block2left[ib + iblock[j]],
TX_4X4);
}
}
thisdistortion = vp9_block_error_c(be2->coeff, bd2->dqcoeff, 64);
*distortion += thisdistortion;
*labelyrate += cost_coeffs(x, bd2, PLANE_TYPE_Y_WITH_DC,
- ta + vp8_block2above_8x8[idx],
- tl + vp8_block2left_8x8[idx], TX_8X8);
+ ta + vp9_block2above_8x8[idx],
+ tl + vp9_block2left_8x8[idx], TX_8X8);
}
}
}
tl_b = (ENTROPY_CONTEXT *)&t_left_b;
v_fn_ptr = &cpi->fn_ptr[segmentation];
- labels = vp8_mbsplits[segmentation];
- label_count = vp8_mbsplit_count[segmentation];
+ labels = vp9_mbsplits[segmentation];
+ label_count = vp9_mbsplit_count[segmentation];
// 64 makes this threshold really big effectively
// making it so that we very rarely check mvs on
label_mv_thresh = 1 * bsi->mvthresh / label_count;
// Segmentation method overheads
- rate = vp8_cost_token(vp8_mbsplit_tree, vp8_mbsplit_probs,
- vp8_mbsplit_encodings + segmentation);
+ rate = vp8_cost_token(vp9_mbsplit_tree, vp9_mbsplit_probs,
+ vp9_mbsplit_encodings + segmentation);
rate += vp9_cost_mv_ref(cpi, SPLITMV, bsi->mdcounts);
this_segment_rd += RDCOST(x->rdmult, x->rddiv, rate, 0);
br += rate;
mvp_full.as_mv.col = bsi->mvp.as_mv.col >> 3;
// find first label
- n = vp8_mbsplit_offset[segmentation][i];
+ n = vp9_mbsplit_offset[segmentation][i];
c = &x->block[n];
e = &x->e_mbd.block[n];
best_eobs[j] = x->e_mbd.block[j].eob;
} else {
for (j = 0; j < 4; j++) {
- int ib = vp8_i8x8_block[j], idx = j * 4;
+ int ib = vp9_i8x8_block[j], idx = j * 4;
if (labels[ib] == i)
best_eobs[idx] = x->e_mbd.block[idx].eob;
/* 16 = n_blocks */
int_mv seg_mvs[16][MAX_REF_FRAMES - 1],
int64_t txfm_cache[NB_TXFM_MODES]) {
- int i, n, c = vp8_mbsplit_count[segmentation];
+ int i, n, c = vp9_mbsplit_count[segmentation];
if (segmentation == PARTITIONING_4X4) {
int64_t rd[16];
/* save partitions */
mbmi->txfm_size = bsi.txfm_size;
mbmi->partitioning = bsi.segment_num;
- x->partition_info->count = vp8_mbsplit_count[bsi.segment_num];
+ x->partition_info->count = vp9_mbsplit_count[bsi.segment_num];
for (i = 0; i < x->partition_info->count; i++) {
int j;
- j = vp8_mbsplit_offset[bsi.segment_num][i];
+ j = vp9_mbsplit_offset[bsi.segment_num][i];
x->partition_info->bmi[i].mode = bsi.modes[j];
x->partition_info->bmi[i].mv.as_mv = bsi.mvs[j].as_mv;
int i;
MACROBLOCKD *xd = &x->e_mbd;
for (i = 0; i < 4; i++) {
- int ib = vp8_i8x8_block[i];
+ int ib = vp9_i8x8_block[i];
xd->mode_info_context->bmi[ib + 0].as_mode.first = modes[0][i];
xd->mode_info_context->bmi[ib + 1].as_mode.first = modes[0][i];
xd->mode_info_context->bmi[ib + 4].as_mode.first = modes[0][i];
#if CONFIG_PRED_FILTER
// Filtered prediction:
- mbmi->pred_filter_enabled = vp8_mode_order[mode_index].pred_filter_flag;
+ mbmi->pred_filter_enabled = vp9_mode_order[mode_index].pred_filter_flag;
*rate2 += vp8_cost_bit(cpi->common.prob_pred_filter_off,
mbmi->pred_filter_enabled);
#endif
if (cpi->common.mcomp_filter_type == SWITCHABLE) {
const int c = vp9_get_pred_context(cm, xd, PRED_SWITCHABLE_INTERP);
- const int m = vp8_switchable_interp_map[mbmi->interp_filter];
+ const int m = vp9_switchable_interp_map[mbmi->interp_filter];
*rate2 += SWITCHABLE_INTERP_RATE_FACTOR * x->switchable_interp_costs[c][m];
}
rate_y = 0;
rate_uv = 0;
- this_mode = vp8_mode_order[mode_index].mode;
+ this_mode = vp9_mode_order[mode_index].mode;
mbmi->mode = this_mode;
mbmi->uv_mode = DC_PRED;
- mbmi->ref_frame = vp8_mode_order[mode_index].ref_frame;
- mbmi->second_ref_frame = vp8_mode_order[mode_index].second_ref_frame;
+ mbmi->ref_frame = vp9_mode_order[mode_index].ref_frame;
+ mbmi->second_ref_frame = vp9_mode_order[mode_index].second_ref_frame;
#if CONFIG_PRED_FILTER
mbmi->pred_filter_enabled = 0;
#endif
if (cpi->common.mcomp_filter_type == SWITCHABLE &&
this_mode >= NEARESTMV && this_mode <= SPLITMV) {
mbmi->interp_filter =
- vp8_switchable_interp[switchable_filter_index++];
+ vp9_switchable_interp[switchable_filter_index++];
if (switchable_filter_index == VP8_SWITCHABLE_FILTERS)
switchable_filter_index = 0;
} else {
// Experimental code. Special case for gf and arf zeromv modes.
// Increase zbin size to suppress noise
if (cpi->zbin_mode_boost_enabled) {
- if (vp8_mode_order[mode_index].ref_frame == INTRA_FRAME)
+ if (vp9_mode_order[mode_index].ref_frame == INTRA_FRAME)
cpi->zbin_mode_boost = 0;
else {
- if (vp8_mode_order[mode_index].mode == ZEROMV) {
- if (vp8_mode_order[mode_index].ref_frame != LAST_FRAME)
+ if (vp9_mode_order[mode_index].mode == ZEROMV) {
+ if (vp9_mode_order[mode_index].ref_frame != LAST_FRAME)
cpi->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
else
cpi->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST;
- } else if (vp8_mode_order[mode_index].mode == SPLITMV)
+ } else if (vp9_mode_order[mode_index].mode == SPLITMV)
cpi->zbin_mode_boost = 0;
else
cpi->zbin_mode_boost = MV_ZBIN_BOOST;
if (cpi->common.mcomp_filter_type == SWITCHABLE)
rate2 += SWITCHABLE_INTERP_RATE_FACTOR * x->switchable_interp_costs
[vp9_get_pred_context(&cpi->common, xd, PRED_SWITCHABLE_INTERP)]
- [vp8_switchable_interp_map[mbmi->interp_filter]];
+ [vp9_switchable_interp_map[mbmi->interp_filter]];
// If even the 'Y' rd value of split is higher than best so far
// then dont bother looking at UV
if (tmp_rd < best_yrd) {
best_mbmode.mode <= SPLITMV) {
++cpi->switchable_interp_count
[vp9_get_pred_context(&cpi->common, xd, PRED_SWITCHABLE_INTERP)]
- [vp8_switchable_interp_map[best_mbmode.interp_filter]];
+ [vp9_switchable_interp_map[best_mbmode.interp_filter]];
}
// Reduce the activation RD thresholds for the best choice mode
continue;
}
- this_mode = vp8_mode_order[mode_index].mode;
- ref_frame = vp8_mode_order[mode_index].ref_frame;
+ this_mode = vp9_mode_order[mode_index].mode;
+ ref_frame = vp9_mode_order[mode_index].ref_frame;
mbmi->ref_frame = ref_frame;
- comp_pred = vp8_mode_order[mode_index].second_ref_frame != INTRA_FRAME;
+ comp_pred = vp9_mode_order[mode_index].second_ref_frame != INTRA_FRAME;
mbmi->mode = this_mode;
mbmi->uv_mode = DC_PRED;
#if CONFIG_COMP_INTRA_PRED
void vp9_fix_contexts(MACROBLOCKD *xd);
static TOKENVALUE dct_value_tokens[DCT_MAX_VALUE * 2];
-const TOKENVALUE *vp8_dct_value_tokens_ptr;
+const TOKENVALUE *vp9_dct_value_tokens_ptr;
static int dct_value_cost[DCT_MAX_VALUE * 2];
-const int *vp8_dct_value_cost_ptr;
+const int *vp9_dct_value_cost_ptr;
static void fill_value_tokens() {
TOKENVALUE *const t = dct_value_tokens + DCT_MAX_VALUE;
- vp8_extra_bit_struct *const e = vp8_extra_bits;
+ vp8_extra_bit_struct *const e = vp9_extra_bits;
int i = -DCT_MAX_VALUE;
int sign = 1;
// initialize the cost for extra bits for all possible coefficient value.
{
int cost = 0;
- vp8_extra_bit_struct *p = vp8_extra_bits + t[i].Token;
+ vp8_extra_bit_struct *p = vp9_extra_bits + t[i].Token;
if (p->base_val) {
const int extra = t[i].Extra;
} while (++i < DCT_MAX_VALUE);
- vp8_dct_value_tokens_ptr = dct_value_tokens + DCT_MAX_VALUE;
- vp8_dct_value_cost_ptr = dct_value_cost + DCT_MAX_VALUE;
+ vp9_dct_value_tokens_ptr = dct_value_tokens + DCT_MAX_VALUE;
+ vp9_dct_value_cost_ptr = dct_value_cost + DCT_MAX_VALUE;
}
static void tokenize_b(VP8_COMP *cpi,
default:
case TX_4X4:
seg_eob = 16;
- bands = vp8_coef_bands;
- scan = vp8_default_zig_zag1d;
+ bands = vp9_coef_bands;
+ scan = vp9_default_zig_zag1d;
if (tx_type != DCT_DCT) {
counts = cpi->hybrid_coef_counts;
probs = cpi->common.fc.hybrid_coef_probs;
if (tx_type == ADST_DCT) {
- scan = vp8_row_scan;
+ scan = vp9_row_scan;
} else if (tx_type == DCT_ADST) {
- scan = vp8_col_scan;
+ scan = vp9_col_scan;
}
} else {
counts = cpi->coef_counts;
case TX_8X8:
if (type == PLANE_TYPE_Y2) {
seg_eob = 4;
- bands = vp8_coef_bands;
- scan = vp8_default_zig_zag1d;
+ bands = vp9_coef_bands;
+ scan = vp9_default_zig_zag1d;
} else {
seg_eob = 64;
- bands = vp8_coef_bands_8x8;
- scan = vp8_default_zig_zag1d_8x8;
+ bands = vp9_coef_bands_8x8;
+ scan = vp9_default_zig_zag1d_8x8;
}
if (tx_type != DCT_DCT) {
counts = cpi->hybrid_coef_counts_8x8;
break;
case TX_16X16:
seg_eob = 256;
- bands = vp8_coef_bands_16x16;
- scan = vp8_default_zig_zag1d_16x16;
+ bands = vp9_coef_bands_16x16;
+ scan = vp9_default_zig_zag1d_16x16;
if (tx_type != DCT_DCT) {
counts = cpi->hybrid_coef_counts_16x16;
probs = cpi->common.fc.hybrid_coef_probs_16x16;
assert(-DCT_MAX_VALUE <= v && v < DCT_MAX_VALUE);
- t->Extra = vp8_dct_value_tokens_ptr[v].Extra;
- token = vp8_dct_value_tokens_ptr[v].Token;
+ t->Extra = vp9_dct_value_tokens_ptr[v].Extra;
+ token = vp9_dct_value_tokens_ptr[v].Token;
} else {
token = DCT_EOB_TOKEN;
}
t->context_tree = probs[type][band][pt];
t->skip_eob_node = (pt == 0) && ((band > 0 && type != PLANE_TYPE_Y_NO_DC) ||
(band > 1 && type == PLANE_TYPE_Y_NO_DC));
- assert(vp8_coef_encodings[t->Token].Len - t->skip_eob_node > 0);
+ assert(vp9_coef_encodings[t->Token].Len - t->skip_eob_node > 0);
if (!dry_run) {
++counts[type][band][pt][token];
}
- pt = vp8_prev_token_class[token];
+ pt = vp9_prev_token_class[token];
++t;
} while (c < eob && ++c < seg_eob);
if (has_y2_block) {
if (tx_size == TX_8X8) {
tokenize_b(cpi, xd, xd->block + 24, t, PLANE_TYPE_Y2,
- A + vp8_block2above_8x8[24], L + vp8_block2left_8x8[24],
+ A + vp9_block2above_8x8[24], L + vp9_block2left_8x8[24],
TX_8X8, dry_run);
} else {
tokenize_b(cpi, xd, xd->block + 24, t, PLANE_TYPE_Y2,
- A + vp8_block2above[24], L + vp8_block2left[24],
+ A + vp9_block2above[24], L + vp9_block2left[24],
TX_4X4, dry_run);
}
for (b = 16; b < 24; b += 4) {
tokenize_b(cpi, xd, xd->block + b, t, PLANE_TYPE_UV,
- A + vp8_block2above_8x8[b], L + vp8_block2left_8x8[b],
+ A + vp9_block2above_8x8[b], L + vp9_block2left_8x8[b],
TX_8X8, dry_run);
- A[vp8_block2above_8x8[b] + 1] = A[vp8_block2above_8x8[b]];
- L[vp8_block2left_8x8[b] + 1] = L[vp8_block2left_8x8[b]];
+ A[vp9_block2above_8x8[b] + 1] = A[vp9_block2above_8x8[b]];
+ L[vp9_block2left_8x8[b] + 1] = L[vp9_block2left_8x8[b]];
}
vpx_memset(&A[8], 0, sizeof(A[8]));
vpx_memset(&L[8], 0, sizeof(L[8]));
} else if (tx_size == TX_8X8) {
for (b = 0; b < 16; b += 4) {
tokenize_b(cpi, xd, xd->block + b, t, plane_type,
- A + vp8_block2above_8x8[b], L + vp8_block2left_8x8[b],
+ A + vp9_block2above_8x8[b], L + vp9_block2left_8x8[b],
TX_8X8, dry_run);
- A[vp8_block2above_8x8[b] + 1] = A[vp8_block2above_8x8[b]];
- L[vp8_block2left_8x8[b] + 1] = L[vp8_block2left_8x8[b]];
+ A[vp9_block2above_8x8[b] + 1] = A[vp9_block2above_8x8[b]];
+ L[vp9_block2left_8x8[b] + 1] = L[vp9_block2left_8x8[b]];
}
if (xd->mode_info_context->mbmi.mode == I8X8_PRED ||
xd->mode_info_context->mbmi.mode == SPLITMV) {
for (b = 16; b < 24; b++) {
tokenize_b(cpi, xd, xd->block + b, t, PLANE_TYPE_UV,
- A + vp8_block2above[b], L + vp8_block2left[b],
+ A + vp9_block2above[b], L + vp9_block2left[b],
TX_4X4, dry_run);
}
} else {
for (b = 16; b < 24; b += 4) {
tokenize_b(cpi, xd, xd->block + b, t, PLANE_TYPE_UV,
- A + vp8_block2above_8x8[b], L + vp8_block2left_8x8[b],
+ A + vp9_block2above_8x8[b], L + vp9_block2left_8x8[b],
TX_8X8, dry_run);
- A[vp8_block2above_8x8[b] + 1] = A[vp8_block2above_8x8[b]];
- L[vp8_block2left_8x8[b] + 1] = L[vp8_block2left_8x8[b]];
+ A[vp9_block2above_8x8[b] + 1] = A[vp9_block2above_8x8[b]];
+ L[vp9_block2left_8x8[b] + 1] = L[vp9_block2left_8x8[b]];
}
}
} else {
for (b = 0; b < 16; b++) {
tokenize_b(cpi, xd, xd->block + b, t, plane_type,
- A + vp8_block2above[b], L + vp8_block2left[b],
+ A + vp9_block2above[b], L + vp9_block2left[b],
TX_4X4, dry_run);
}
for (b = 16; b < 24; b++) {
tokenize_b(cpi, xd, xd->block + b, t, PLANE_TYPE_UV,
- A + vp8_block2above[b], L + vp8_block2left[b],
+ A + vp9_block2above[b], L + vp9_block2left[b],
TX_4X4, dry_run);
}
}
for (t = 0; t < MAX_ENTROPY_TOKENS; ++t)
coef_counts[t] = context_counters [type] [band] [pt] [t];
vp9_tree_probs_from_distribution(
- MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
+ MAX_ENTROPY_TOKENS, vp9_coef_encodings, vp9_coef_tree,
coef_probs, branch_ct, coef_counts, 256, 1);
fprintf(f, "%s\n {", Comma(pt));
for (t = 0; t < MAX_ENTROPY_TOKENS; ++t)
coef_counts[t] = context_counters_8x8[type] [band] [pt] [t];
vp9_tree_probs_from_distribution(
- MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
+ MAX_ENTROPY_TOKENS, vp9_coef_encodings, vp9_coef_tree,
coef_probs, branch_ct, coef_counts, 256, 1);
fprintf(f, "%s\n {", Comma(pt));
for (t = 0; t < MAX_ENTROPY_TOKENS; ++t)
coef_counts[t] = context_counters_16x16[type] [band] [pt] [t];
vp9_tree_probs_from_distribution(
- MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
+ MAX_ENTROPY_TOKENS, vp9_coef_encodings, vp9_coef_tree,
coef_probs, branch_ct, coef_counts, 256, 1);
fprintf(f, "%s\n {", Comma(pt));
switch (tx_size) {
default:
case TX_4X4:
- bands = vp8_coef_bands;
+ bands = vp9_coef_bands;
if (tx_type != DCT_DCT) {
counts = cpi->hybrid_coef_counts;
probs = cpi->common.fc.hybrid_coef_probs;
}
break;
case TX_8X8:
- bands = vp8_coef_bands_8x8;
+ bands = vp9_coef_bands_8x8;
if (tx_type != DCT_DCT) {
counts = cpi->hybrid_coef_counts_8x8;
probs = cpi->common.fc.hybrid_coef_probs_8x8;
}
break;
case TX_16X16:
- bands = vp8_coef_bands_16x16;
+ bands = vp9_coef_bands_16x16;
if (tx_type != DCT_DCT) {
counts = cpi->hybrid_coef_counts_16x16;
probs = cpi->common.fc.hybrid_coef_probs_16x16;
if (has_y2_block) {
stuff_b(cpi, xd, xd->block + 24, t, PLANE_TYPE_Y2,
- A + vp8_block2above_8x8[24], L + vp8_block2left_8x8[24],
+ A + vp9_block2above_8x8[24], L + vp9_block2left_8x8[24],
TX_8X8, dry_run);
plane_type = PLANE_TYPE_Y_NO_DC;
} else {
}
for (b = 0; b < 16; b += 4) {
- stuff_b(cpi, xd, xd->block + b, t, plane_type, A + vp8_block2above_8x8[b],
- L + vp8_block2left_8x8[b], TX_8X8, dry_run);
- A[vp8_block2above_8x8[b] + 1] = A[vp8_block2above_8x8[b]];
- L[vp8_block2left_8x8[b] + 1] = L[vp8_block2left_8x8[b]];
+ stuff_b(cpi, xd, xd->block + b, t, plane_type, A + vp9_block2above_8x8[b],
+ L + vp9_block2left_8x8[b], TX_8X8, dry_run);
+ A[vp9_block2above_8x8[b] + 1] = A[vp9_block2above_8x8[b]];
+ L[vp9_block2left_8x8[b] + 1] = L[vp9_block2left_8x8[b]];
}
for (b = 16; b < 24; b += 4) {
stuff_b(cpi, xd, xd->block + b, t, PLANE_TYPE_UV,
- A + vp8_block2above_8x8[b], L + vp8_block2left_8x8[b],
+ A + vp9_block2above_8x8[b], L + vp9_block2left_8x8[b],
TX_8X8, dry_run);
- A[vp8_block2above_8x8[b] + 1] = A[vp8_block2above_8x8[b]];
- L[vp8_block2left_8x8[b] + 1] = L[vp8_block2left_8x8[b]];
+ A[vp9_block2above_8x8[b] + 1] = A[vp9_block2above_8x8[b]];
+ L[vp9_block2left_8x8[b] + 1] = L[vp9_block2left_8x8[b]];
}
}
A[1] = A[2] = A[3] = A[0];
L[1] = L[2] = L[3] = L[0];
for (b = 16; b < 24; b += 4) {
- stuff_b(cpi, xd, xd->block + b, t, PLANE_TYPE_UV, A + vp8_block2above[b],
- L + vp8_block2above_8x8[b], TX_8X8, dry_run);
- A[vp8_block2above_8x8[b] + 1] = A[vp8_block2above_8x8[b]];
- L[vp8_block2left_8x8[b] + 1] = L[vp8_block2left_8x8[b]];
+ stuff_b(cpi, xd, xd->block + b, t, PLANE_TYPE_UV, A + vp9_block2above[b],
+ L + vp9_block2above_8x8[b], TX_8X8, dry_run);
+ A[vp9_block2above_8x8[b] + 1] = A[vp9_block2above_8x8[b]];
+ L[vp9_block2left_8x8[b] + 1] = L[vp9_block2left_8x8[b]];
}
vpx_memset(&A[8], 0, sizeof(A[8]));
vpx_memset(&L[8], 0, sizeof(L[8]));
xd->mode_info_context->mbmi.mode != SPLITMV);
if (has_y2_block) {
- stuff_b(cpi, xd, xd->block + 24, t, PLANE_TYPE_Y2, A + vp8_block2above[24],
- L + vp8_block2left[24], TX_4X4, dry_run);
+ stuff_b(cpi, xd, xd->block + 24, t, PLANE_TYPE_Y2, A + vp9_block2above[24],
+ L + vp9_block2left[24], TX_4X4, dry_run);
plane_type = PLANE_TYPE_Y_NO_DC;
} else {
plane_type = PLANE_TYPE_Y_WITH_DC;
}
for (b = 0; b < 16; b++)
- stuff_b(cpi, xd, xd->block + b, t, plane_type, A + vp8_block2above[b],
- L + vp8_block2left[b], TX_4X4, dry_run);
+ stuff_b(cpi, xd, xd->block + b, t, plane_type, A + vp9_block2above[b],
+ L + vp9_block2left[b], TX_4X4, dry_run);
for (b = 16; b < 24; b++)
- stuff_b(cpi, xd, xd->block + b, t, PLANE_TYPE_UV, A + vp8_block2above[b],
- L + vp8_block2left[b], TX_4X4, dry_run);
+ stuff_b(cpi, xd, xd->block + b, t, PLANE_TYPE_UV, A + vp9_block2above[b],
+ L + vp9_block2left[b], TX_4X4, dry_run);
}
static void vp9_stuff_mb_8x8_4x4uv(VP8_COMP *cpi, MACROBLOCKD *xd,
for (b = 0; b < 16; b += 4) {
stuff_b(cpi, xd, xd->block + b, t, PLANE_TYPE_Y_WITH_DC,
- A + vp8_block2above_8x8[b], L + vp8_block2left_8x8[b],
+ A + vp9_block2above_8x8[b], L + vp9_block2left_8x8[b],
TX_8X8, dry_run);
- A[vp8_block2above_8x8[b] + 1] = A[vp8_block2above_8x8[b]];
- L[vp8_block2left_8x8[b] + 1] = L[vp8_block2left_8x8[b]];
+ A[vp9_block2above_8x8[b] + 1] = A[vp9_block2above_8x8[b]];
+ L[vp9_block2left_8x8[b] + 1] = L[vp9_block2left_8x8[b]];
}
for (b = 16; b < 24; b++)
- stuff_b(cpi, xd, xd->block + b, t, PLANE_TYPE_UV, A + vp8_block2above[b],
- L + vp8_block2left[b], TX_4X4, dry_run);
+ stuff_b(cpi, xd, xd->block + b, t, PLANE_TYPE_UV, A + vp9_block2above[b],
+ L + vp9_block2left[b], TX_4X4, dry_run);
}
void vp9_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run) {
[PREV_COEF_CONTEXTS][MAX_ENTROPY_TOKENS];
#endif
-extern const int *vp8_dct_value_cost_ptr;
+extern const int *vp9_dct_value_cost_ptr;
/* TODO: The Token field should be broken out into a separate char array to
* improve cache locality, since it's needed for costing when the rest of the
* fields are not.
*/
-extern const TOKENVALUE *vp8_dct_value_tokens_ptr;
+extern const TOKENVALUE *vp9_dct_value_tokens_ptr;
#endif /* tokenize_h */
/* Approximate length of an encoded bool in 256ths of a bit at given prob */
-#define vp8_cost_zero( x) ( vp8_prob_cost[x])
+#define vp8_cost_zero( x) ( vp9_prob_cost[x])
#define vp8_cost_one( x) vp8_cost_zero( vp8_complement(x))
#define vp8_cost_bit( x, b) vp8_cost_zero( (b)? vp8_complement(x) : (x) )
const short *HFilter, *VFilter;
unsigned short FData3[5 * 4]; // Temp data bufffer used in filtering
- HFilter = vp8_bilinear_filters[xoffset];
- VFilter = vp8_bilinear_filters[yoffset];
+ HFilter = vp9_bilinear_filters[xoffset];
+ VFilter = vp9_bilinear_filters[yoffset];
// First filter 1d Horizontal
var_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line, 1, 5, 4, HFilter);
unsigned char temp2[20 * 16];
const short *HFilter, *VFilter;
- HFilter = vp8_bilinear_filters[xoffset];
- VFilter = vp8_bilinear_filters[yoffset];
+ HFilter = vp9_bilinear_filters[xoffset];
+ VFilter = vp9_bilinear_filters[yoffset];
var_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line, 1, 9, 8, HFilter);
var_filter_block2d_bil_second_pass(FData3, temp2, 8, 8, 8, 8, VFilter);
unsigned char temp2[20 * 16];
const short *HFilter, *VFilter;
- HFilter = vp8_bilinear_filters[xoffset];
- VFilter = vp8_bilinear_filters[yoffset];
+ HFilter = vp9_bilinear_filters[xoffset];
+ VFilter = vp9_bilinear_filters[yoffset];
var_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line, 1, 17, 16, HFilter);
var_filter_block2d_bil_second_pass(FData3, temp2, 16, 16, 16, 16, VFilter);
unsigned char temp2[36 * 32];
const short *HFilter, *VFilter;
- HFilter = vp8_bilinear_filters[xoffset];
- VFilter = vp8_bilinear_filters[yoffset];
+ HFilter = vp9_bilinear_filters[xoffset];
+ VFilter = vp9_bilinear_filters[yoffset];
var_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line, 1, 33, 32, HFilter);
var_filter_block2d_bil_second_pass(FData3, temp2, 32, 32, 32, 32, VFilter);
unsigned char temp2[20 * 16];
const short *HFilter, *VFilter;
- HFilter = vp8_bilinear_filters[xoffset];
- VFilter = vp8_bilinear_filters[yoffset];
+ HFilter = vp9_bilinear_filters[xoffset];
+ VFilter = vp9_bilinear_filters[yoffset];
var_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line, 1, 9, 16, HFilter);
var_filter_block2d_bil_second_pass(FData3, temp2, 16, 16, 8, 16, VFilter);
unsigned char temp2[20 * 16];
const short *HFilter, *VFilter;
- HFilter = vp8_bilinear_filters[xoffset];
- VFilter = vp8_bilinear_filters[yoffset];
+ HFilter = vp9_bilinear_filters[xoffset];
+ VFilter = vp9_bilinear_filters[yoffset];
var_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line,
1, 17, 8, HFilter);
unsigned char temp2[20 * 16];
const short *HFilter, *VFilter;
- HFilter = vp8_bilinear_filters[xoffset];
- VFilter = vp8_bilinear_filters[yoffset];
+ HFilter = vp9_bilinear_filters[xoffset];
+ VFilter = vp9_bilinear_filters[yoffset];
var_filter_block2d_bil_first_pass(src_ptr, FData3,
src_pixels_per_line, 1, 3, 16, HFilter);
unsigned char temp2[2 * 16];
const short *HFilter, *VFilter;
- HFilter = vp8_bilinear_filters[xoffset];
- VFilter = vp8_bilinear_filters[yoffset];
+ HFilter = vp9_bilinear_filters[xoffset];
+ VFilter = vp9_bilinear_filters[yoffset];
var_filter_block2d_bil_first_pass(src_ptr, FData3,
src_pixels_per_line, 1, 17, 2, HFilter);
%endif
%endif
- mov rdx, [rdi + vp8_block_coeff] ; coeff_ptr
- mov rcx, [rdi + vp8_block_zbin] ; zbin_ptr
- movd xmm7, [rdi + vp8_block_zbin_extra] ; zbin_oq_value
+ mov rdx, [rdi + vp9_block_coeff] ; coeff_ptr
+ mov rcx, [rdi + vp9_block_zbin] ; zbin_ptr
+ movd xmm7, [rdi + vp9_block_zbin_extra] ; zbin_oq_value
; z
movdqa xmm0, [rdx]
movdqa xmm4, [rdx + 16]
- mov rdx, [rdi + vp8_block_round] ; round_ptr
+ mov rdx, [rdi + vp9_block_round] ; round_ptr
pshuflw xmm7, xmm7, 0
punpcklwd xmm7, xmm7 ; duplicated zbin_oq_value
movdqa xmm2, [rcx]
movdqa xmm3, [rcx + 16]
- mov rcx, [rdi + vp8_block_quant] ; quant_ptr
+ mov rcx, [rdi + vp9_block_quant] ; quant_ptr
; *zbin_ptr + zbin_oq_value
paddw xmm2, xmm7
movdqa [rsp + qcoeff], xmm6
movdqa [rsp + qcoeff + 16], xmm6
- mov rdx, [rdi + vp8_block_zrun_zbin_boost] ; zbin_boost_ptr
- mov rax, [rdi + vp8_block_quant_shift] ; quant_shift_ptr
+ mov rdx, [rdi + vp9_block_zrun_zbin_boost] ; zbin_boost_ptr
+ mov rax, [rdi + vp9_block_quant_shift] ; quant_shift_ptr
mov [rsp + zrun_zbin_boost], rdx
%macro ZIGZAG_LOOP 1
mov rdx, [rsp + zrun_zbin_boost] ; reset to b->zrun_zbin_boost
.rq_zigzag_loop_%1:
%endmacro
-; in vp8_default_zig_zag1d order: see vp8/common/entropy.c
+; in vp9_default_zig_zag1d order: see vp8/common/entropy.c
ZIGZAG_LOOP 0
ZIGZAG_LOOP 1
ZIGZAG_LOOP 4
movdqa xmm2, [rsp + qcoeff]
movdqa xmm3, [rsp + qcoeff + 16]
- mov rcx, [rsi + vp8_blockd_dequant] ; dequant_ptr
- mov rdi, [rsi + vp8_blockd_dqcoeff] ; dqcoeff_ptr
+ mov rcx, [rsi + vp9_blockd_dequant] ; dequant_ptr
+ mov rdi, [rsi + vp9_blockd_dqcoeff] ; dqcoeff_ptr
; y ^ sz
pxor xmm2, xmm0
movdqa xmm0, [rcx]
movdqa xmm1, [rcx + 16]
- mov rcx, [rsi + vp8_blockd_qcoeff] ; qcoeff_ptr
+ mov rcx, [rsi + vp9_blockd_qcoeff] ; qcoeff_ptr
pmullw xmm0, xmm2
pmullw xmm1, xmm3
pmaxsw xmm2, xmm3
movd eax, xmm2
and eax, 0xff
- mov [rsi + vp8_blockd_eob], eax
+ mov [rsi + vp9_blockd_eob], eax
; begin epilog
add rsp, stack_size
%endif
%endif
- mov rax, [rdi + vp8_block_coeff]
- mov rcx, [rdi + vp8_block_round]
- mov rdx, [rdi + vp8_block_quant_fast]
+ mov rax, [rdi + vp9_block_coeff]
+ mov rcx, [rdi + vp9_block_round]
+ mov rdx, [rdi + vp9_block_quant_fast]
; z = coeff
movdqa xmm0, [rax]
paddw xmm1, [rcx]
paddw xmm5, [rcx + 16]
- mov rax, [rsi + vp8_blockd_qcoeff]
- mov rcx, [rsi + vp8_blockd_dequant]
- mov rdi, [rsi + vp8_blockd_dqcoeff]
+ mov rax, [rsi + vp9_blockd_qcoeff]
+ mov rcx, [rsi + vp9_blockd_dequant]
+ mov rdi, [rsi + vp9_blockd_dqcoeff]
; y = x * quant >> 16
pmulhw xmm1, [rdx]
movd eax, xmm1
and eax, 0xff
- mov [rsi + vp8_blockd_eob], eax
+ mov [rsi + vp9_blockd_eob], eax
; begin epilog
%if ABI_IS_32BIT
%endif
%endif
- mov rax, [rdi + vp8_block_coeff]
- mov rcx, [rdi + vp8_block_zbin]
- mov rdx, [rdi + vp8_block_round]
- movd xmm7, [rdi + vp8_block_zbin_extra]
+ mov rax, [rdi + vp9_block_coeff]
+ mov rcx, [rdi + vp9_block_zbin]
+ mov rdx, [rdi + vp9_block_round]
+ movd xmm7, [rdi + vp9_block_zbin_extra]
; z
movdqa xmm0, [rax]
movdqa xmm4, [rdx]
movdqa xmm5, [rdx + 16]
- mov rax, [rdi + vp8_block_quant_shift]
- mov rcx, [rdi + vp8_block_quant]
- mov rdx, [rdi + vp8_block_zrun_zbin_boost]
+ mov rax, [rdi + vp9_block_quant_shift]
+ mov rcx, [rdi + vp9_block_quant]
+ mov rdx, [rdi + vp9_block_zrun_zbin_boost]
; x + round
paddw xmm2, xmm4
mov rdx, rax ; reset to b->zrun_zbin_boost
.rq_zigzag_loop_%1:
%endmacro
-; in vp8_default_zig_zag1d order: see vp8/common/entropy.c
+; in vp9_default_zig_zag1d order: see vp8/common/entropy.c
ZIGZAG_LOOP 0, 0, xmm2, xmm6, xmm4
ZIGZAG_LOOP 1, 1, xmm2, xmm6, xmm4
ZIGZAG_LOOP 4, 4, xmm2, xmm6, xmm4
ZIGZAG_LOOP 14, 6, xmm3, xmm7, xmm8
ZIGZAG_LOOP 15, 7, xmm3, xmm7, xmm8
- mov rcx, [rsi + vp8_blockd_dequant]
- mov rdi, [rsi + vp8_blockd_dqcoeff]
+ mov rcx, [rsi + vp9_blockd_dequant]
+ mov rdi, [rsi + vp9_blockd_dqcoeff]
%if ABI_IS_32BIT
movdqa xmm4, [rsp + qcoeff]
movdqa xmm0, [rcx]
movdqa xmm1, [rcx + 16]
- mov rcx, [rsi + vp8_blockd_qcoeff]
+ mov rcx, [rsi + vp9_blockd_qcoeff]
pmullw xmm0, xmm4
pmullw xmm1, xmm5
add eax, 1
and eax, edi
- mov [rsi + vp8_blockd_eob], eax
+ mov [rsi + vp9_blockd_eob], eax
; begin epilog
%if ABI_IS_32BIT
SECTION_RODATA
align 16
-; vp8/common/entropy.c: vp8_default_zig_zag1d
+; vp8/common/entropy.c: vp9_default_zig_zag1d
zig_zag1d:
db 0, 1, 4, 8, 5, 2, 3, 6, 9, 12, 13, 10, 7, 11, 14, 15
%endif
%endif
- mov rax, [rdi + vp8_block_coeff]
- mov rcx, [rdi + vp8_block_round]
- mov rdx, [rdi + vp8_block_quant_fast]
+ mov rax, [rdi + vp9_block_coeff]
+ mov rcx, [rdi + vp9_block_round]
+ mov rdx, [rdi + vp9_block_quant_fast]
; coeff
movdqa xmm0, [rax]
pmulhw xmm1, [rdx]
pmulhw xmm5, [rdx + 16]
- mov rax, [rsi + vp8_blockd_qcoeff]
- mov rdi, [rsi + vp8_blockd_dequant]
- mov rcx, [rsi + vp8_blockd_dqcoeff]
+ mov rax, [rsi + vp9_blockd_qcoeff]
+ mov rdi, [rsi + vp9_blockd_dequant]
+ mov rcx, [rsi + vp9_blockd_dqcoeff]
pxor xmm1, xmm0
pxor xmm5, xmm4
add eax, 1
and eax, edi ;if the bit mask was all zero,
;then eob = 0
- mov [rsi + vp8_blockd_eob], eax
+ mov [rsi + vp9_blockd_eob], eax
; begin epilog
%if ABI_IS_32BIT
// the mmx function that does the bilinear filtering and var calculation //
// int one pass //
///////////////////////////////////////////////////////////////////////////
-DECLARE_ALIGNED(16, const short, vp8_vp7_bilinear_filters_mmx[16][8]) = {
+DECLARE_ALIGNED(16, const short, vp9_bilinear_filters_mmx[16][8]) = {
{ 128, 128, 128, 128, 0, 0, 0, 0 },
{ 120, 120, 120, 120, 8, 8, 8, 8 },
{ 112, 112, 112, 112, 16, 16, 16, 16 },
vp9_filter_block2d_bil4x4_var_mmx(
src_ptr, src_pixels_per_line,
dst_ptr, dst_pixels_per_line,
- vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset],
+ vp9_bilinear_filters_mmx[xoffset], vp9_bilinear_filters_mmx[yoffset],
&xsum, &xxsum
);
*sse = xxsum;
vp9_filter_block2d_bil_var_mmx(
src_ptr, src_pixels_per_line,
dst_ptr, dst_pixels_per_line, 8,
- vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset],
+ vp9_bilinear_filters_mmx[xoffset], vp9_bilinear_filters_mmx[yoffset],
&xsum, &xxsum
);
*sse = xxsum;
vp9_filter_block2d_bil_var_mmx(
src_ptr, src_pixels_per_line,
dst_ptr, dst_pixels_per_line, 16,
- vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset],
+ vp9_bilinear_filters_mmx[xoffset], vp9_bilinear_filters_mmx[yoffset],
&xsum0, &xxsum0
);
vp9_filter_block2d_bil_var_mmx(
src_ptr + 8, src_pixels_per_line,
dst_ptr + 8, dst_pixels_per_line, 16,
- vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset],
+ vp9_bilinear_filters_mmx[xoffset], vp9_bilinear_filters_mmx[yoffset],
&xsum1, &xxsum1
);
vp9_filter_block2d_bil_var_mmx(
src_ptr, src_pixels_per_line,
dst_ptr, dst_pixels_per_line, 8,
- vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset],
+ vp9_bilinear_filters_mmx[xoffset], vp9_bilinear_filters_mmx[yoffset],
&xsum0, &xxsum0
);
vp9_filter_block2d_bil_var_mmx(
src_ptr + 8, src_pixels_per_line,
dst_ptr + 8, dst_pixels_per_line, 8,
- vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset],
+ vp9_bilinear_filters_mmx[xoffset], vp9_bilinear_filters_mmx[yoffset],
&xsum1, &xxsum1
);
vp9_filter_block2d_bil_var_mmx(
src_ptr, src_pixels_per_line,
dst_ptr, dst_pixels_per_line, 16,
- vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset],
+ vp9_bilinear_filters_mmx[xoffset], vp9_bilinear_filters_mmx[yoffset],
&xsum, &xxsum
);
*sse = xxsum;
unsigned int *sumsquared
);
-DECLARE_ALIGNED(16, extern short, vp8_vp7_bilinear_filters_mmx[16][8]);
+DECLARE_ALIGNED(16, extern short, vp9_bilinear_filters_mmx[16][8]);
unsigned int vp9_variance4x4_wmt(
const unsigned char *src_ptr,
vp9_filter_block2d_bil4x4_var_mmx(
src_ptr, src_pixels_per_line,
dst_ptr, dst_pixels_per_line,
- vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset],
+ vp9_bilinear_filters_mmx[xoffset], vp9_bilinear_filters_mmx[yoffset],
&xsum, &xxsum
);
*sse = xxsum;
}
-vpx_codec_ctrl_fn_map_t vp8_ctf_maps[] = {
+static vpx_codec_ctrl_fn_map_t ctf_maps[] = {
{VP8_SET_REFERENCE, vp9_set_reference},
{VP8_COPY_REFERENCE, vp9_get_reference},
{VP8_SET_POSTPROC, vp8_set_postproc},
/* vpx_codec_caps_t caps; */
vp8_init, /* vpx_codec_init_fn_t init; */
vp8_destroy, /* vpx_codec_destroy_fn_t destroy; */
- vp8_ctf_maps, /* vpx_codec_ctrl_fn_map_t *ctrl_maps; */
+ ctf_maps, /* vpx_codec_ctrl_fn_map_t *ctrl_maps; */
vp8_xma_get_mmap, /* vpx_codec_get_mmap_fn_t get_mmap; */
vp8_xma_set_mmap, /* vpx_codec_set_mmap_fn_t set_mmap; */
{
/* vpx_codec_caps_t caps; */
vp8_init, /* vpx_codec_init_fn_t init; */
vp8_destroy, /* vpx_codec_destroy_fn_t destroy; */
- vp8_ctf_maps, /* vpx_codec_ctrl_fn_map_t *ctrl_maps; */
+ ctf_maps, /* vpx_codec_ctrl_fn_map_t *ctrl_maps; */
vp8_xma_get_mmap, /* vpx_codec_get_mmap_fn_t get_mmap; */
vp8_xma_set_mmap, /* vpx_codec_set_mmap_fn_t set_mmap; */
{