palette
new_quant
newmvref
+ intrabc
"
CONFIG_LIST="
external_build
D207_PRED, // Directional 207 deg = 180 + 27
D63_PRED, // Directional 63 deg = round(arctan(2/1) * 180/pi)
TM_PRED, // True-motion
+#if CONFIG_INTRABC
+ NEWDV, // New displacement vector within the same frame buffer
+#endif // CONFIG_INTRABC
NEARESTMV,
NEARMV,
ZEROMV,
#endif // CONFIG_COMPOUND_MODES
}
-#define INTRA_MODES (TM_PRED + 1)
+#if CONFIG_INTRABC
+static INLINE int is_intrabc_mode(PREDICTION_MODE mode) {
+ return mode == NEWDV;
+}
+#define INTRA_MODES (NEWDV + 1) // XXX
+#else
+#define INTRA_MODES (TM_PRED + 1) // XXX
+#endif // CONFIG_INTRABC
#if CONFIG_NEWMVREF
#define INTER_MODES (1 + NEAR_FORNEWMV - NEARESTMV)
#if CONFIG_FILTERINTRA
static INLINE int is_filter_allowed(PREDICTION_MODE mode) {
+#if CONFIG_INTRABC
+ return !is_intrabc_mode(mode);
+#else
(void)mode;
return 1;
+#endif // CONFIG_INTRABC
}
static INLINE int is_filter_enabled(TX_SIZE txsize) {
#endif // CONFIG_TX_SKIP
const vp9_prob vp9_kf_y_mode_prob[INTRA_MODES][INTRA_MODES][INTRA_MODES - 1] = {
+#if CONFIG_INTRABC
+ { // above = dc
+ { 137, 30, 42, 148, 151, 207, 70, 52, 91, 128 }, // left = dc
+ { 92, 45, 102, 136, 116, 180, 74, 90, 100, 128 }, // left = v
+ { 73, 32, 19, 187, 222, 215, 46, 34, 100, 128 }, // left = h
+ { 91, 30, 32, 116, 121, 186, 93, 86, 94, 128 }, // left = d45
+ { 72, 35, 36, 149, 68, 206, 68, 63, 105, 128 }, // left = d135
+ { 73, 31, 28, 138, 57, 124, 55, 122, 151, 128 }, // left = d117
+ { 67, 23, 21, 140, 126, 197, 40, 37, 171, 128 }, // left = d153
+ { 86, 27, 28, 128, 154, 212, 45, 43, 53, 128 }, // left = d207
+ { 74, 32, 27, 107, 86, 160, 63, 134, 102, 128 }, // left = d63
+ { 59, 67, 44, 140, 161, 202, 78, 67, 119, 128 }, // left = tm
+ { 137, 30, 42, 148, 151, 207, 70, 52, 91, 128 } // left = bc
+ }, { // above = v
+ { 63, 36, 126, 146, 123, 158, 60, 90, 96, 128 }, // left = dc
+ { 43, 46, 168, 134, 107, 128, 69, 142, 92, 128 }, // left = v
+ { 44, 29, 68, 159, 201, 177, 50, 57, 77, 128 }, // left = h
+ { 58, 38, 76, 114, 97, 172, 78, 133, 92, 128 }, // left = d45
+ { 46, 41, 76, 140, 63, 184, 69, 112, 57, 128 }, // left = d135
+ { 38, 32, 85, 140, 46, 112, 54, 151, 133, 128 }, // left = d117
+ { 39, 27, 61, 131, 110, 175, 44, 75, 136, 128 }, // left = d153
+ { 52, 30, 74, 113, 130, 175, 51, 64, 58, 128 }, // left = d207
+ { 47, 35, 80, 100, 74, 143, 64, 163, 74, 128 }, // left = d63
+ { 36, 61, 116, 114, 128, 162, 80, 125, 82, 128 }, // left = tm
+ { 63, 36, 126, 146, 123, 158, 60, 90, 96, 128 } // left = bc
+ }, { // above = h
+ { 82, 26, 26, 171, 208, 204, 44, 32, 105, 128 }, // left = dc
+ { 55, 44, 68, 166, 179, 192, 57, 57, 108, 128 }, // left = v
+ { 42, 26, 11, 199, 241, 228, 23, 15, 85, 128 }, // left = h
+ { 68, 42, 19, 131, 160, 199, 55, 52, 83, 128 }, // left = d45
+ { 58, 50, 25, 139, 115, 232, 39, 52, 118, 128 }, // left = d135
+ { 50, 35, 33, 153, 104, 162, 64, 59, 131, 128 }, // left = d117
+ { 44, 24, 16, 150, 177, 202, 33, 19, 156, 128 }, // left = d153
+ { 55, 27, 12, 153, 203, 218, 26, 27, 49, 128 }, // left = d207
+ { 53, 49, 21, 110, 116, 168, 59, 80, 76, 128 }, // left = d63
+ { 38, 72, 19, 168, 203, 212, 50, 50, 107, 128 }, // left = tm
+ { 82, 26, 26, 171, 208, 204, 44, 32, 105, 128 } // left = bc
+ }, { // above = d45
+ { 103, 26, 36, 129, 132, 201, 83, 80, 93, 128 }, // left = dc
+ { 59, 38, 83, 112, 103, 162, 98, 136, 90, 128 }, // left = v
+ { 62, 30, 23, 158, 200, 207, 59, 57, 50, 128 }, // left = h
+ { 67, 30, 29, 84, 86, 191, 102, 91, 59, 128 }, // left = d45
+ { 60, 32, 33, 112, 71, 220, 64, 89, 104, 128 }, // left = d135
+ { 53, 26, 34, 130, 56, 149, 84, 120, 103, 128 }, // left = d117
+ { 53, 21, 23, 133, 109, 210, 56, 77, 172, 128 }, // left = d153
+ { 77, 19, 29, 112, 142, 228, 55, 66, 36, 128 }, // left = d207
+ { 61, 29, 29, 93, 97, 165, 83, 175, 162, 128 }, // left = d63
+ { 47, 47, 43, 114, 137, 181, 100, 99, 95, 128 }, // left = tm
+ { 103, 26, 36, 129, 132, 201, 83, 80, 93, 128 } // left = bc
+ }, { // above = d135
+ { 69, 23, 29, 128, 83, 199, 46, 44, 101, 128 }, // left = dc
+ { 53, 40, 55, 139, 69, 183, 61, 80, 110, 128 }, // left = v
+ { 40, 29, 19, 161, 180, 207, 43, 24, 91, 128 }, // left = h
+ { 60, 34, 19, 105, 61, 198, 53, 64, 89, 128 }, // left = d45
+ { 52, 31, 22, 158, 40, 209, 58, 62, 89, 128 }, // left = d135
+ { 44, 31, 29, 147, 46, 158, 56, 102, 198, 128 }, // left = d117
+ { 35, 19, 12, 135, 87, 209, 41, 45, 167, 128 }, // left = d153
+ { 55, 25, 21, 118, 95, 215, 38, 39, 66, 128 }, // left = d207
+ { 51, 38, 25, 113, 58, 164, 70, 93, 97, 128 }, // left = d63
+ { 47, 54, 34, 146, 108, 203, 72, 103, 151, 128 }, // left = tm
+ { 69, 23, 29, 128, 83, 199, 46, 44, 101, 128 } // left = bc
+ }, { // above = d117
+ { 64, 19, 37, 156, 66, 138, 49, 95, 133, 128 }, // left = dc
+ { 46, 27, 80, 150, 55, 124, 55, 121, 135, 128 }, // left = v
+ { 36, 23, 27, 165, 149, 166, 54, 64, 118, 128 }, // left = h
+ { 53, 21, 36, 131, 63, 163, 60, 109, 81, 128 }, // left = d45
+ { 40, 26, 35, 154, 40, 185, 51, 97, 123, 128 }, // left = d135
+ { 35, 19, 34, 179, 19, 97, 48, 129, 124, 128 }, // left = d117
+ { 36, 20, 26, 136, 62, 164, 33, 77, 154, 128 }, // left = d153
+ { 45, 18, 32, 130, 90, 157, 40, 79, 91, 128 }, // left = d207
+ { 45, 26, 28, 129, 45, 129, 49, 147, 123, 128 }, // left = d63
+ { 38, 44, 51, 136, 74, 162, 57, 97, 121, 128 }, // left = tm
+ { 64, 19, 37, 156, 66, 138, 49, 95, 133, 128 } // left = bc
+ }, { // above = d153
+ { 75, 17, 22, 136, 138, 185, 32, 34, 166, 128 }, // left = dc
+ { 56, 39, 58, 133, 117, 173, 48, 53, 187, 128 }, // left = v
+ { 35, 21, 12, 161, 212, 207, 20, 23, 145, 128 }, // left = h
+ { 56, 29, 19, 117, 109, 181, 55, 68, 112, 128 }, // left = d45
+ { 47, 29, 17, 153, 64, 220, 59, 51, 114, 128 }, // left = d135
+ { 46, 16, 24, 136, 76, 147, 41, 64, 172, 128 }, // left = d117
+ { 34, 17, 11, 108, 152, 187, 13, 15, 209, 128 }, // left = d153
+ { 51, 24, 14, 115, 133, 209, 32, 26, 104, 128 }, // left = d207
+ { 55, 30, 18, 122, 79, 179, 44, 88, 116, 128 }, // left = d63
+ { 37, 49, 25, 129, 168, 164, 41, 54, 148, 128 }, // left = tm
+ { 75, 17, 22, 136, 138, 185, 32, 34, 166, 128 } // left = bc
+ }, { // above = d207
+ { 82, 22, 32, 127, 143, 213, 39, 41, 70, 128 }, // left = dc
+ { 62, 44, 61, 123, 105, 189, 48, 57, 64, 128 }, // left = v
+ { 47, 25, 17, 175, 222, 220, 24, 30, 86, 128 }, // left = h
+ { 68, 36, 17, 106, 102, 206, 59, 74, 74, 128 }, // left = d45
+ { 57, 39, 23, 151, 68, 216, 55, 63, 58, 128 }, // left = d135
+ { 49, 30, 35, 141, 70, 168, 82, 40, 115, 128 }, // left = d117
+ { 51, 25, 15, 136, 129, 202, 38, 35, 139, 128 }, // left = d153
+ { 68, 26, 16, 111, 141, 215, 29, 28, 28, 128 }, // left = d207
+ { 59, 39, 19, 114, 75, 180, 77, 104, 42, 128 }, // left = d63
+ { 40, 61, 26, 126, 152, 206, 61, 59, 93, 128 }, // left = tm
+ { 82, 22, 32, 127, 143, 213, 39, 41, 70, 128 } // left = bc
+ }, { // above = d63
+ { 78, 23, 39, 111, 117, 170, 74, 124, 94, 128 }, // left = dc
+ { 48, 34, 86, 101, 92, 146, 78, 179, 134, 128 }, // left = v
+ { 47, 22, 24, 138, 187, 178, 68, 69, 59, 128 }, // left = h
+ { 56, 25, 33, 105, 112, 187, 95, 177, 129, 128 }, // left = d45
+ { 48, 31, 27, 114, 63, 183, 82, 116, 56, 128 }, // left = d135
+ { 43, 28, 37, 121, 63, 123, 61, 192, 169, 128 }, // left = d117
+ { 42, 17, 24, 109, 97, 177, 56, 76, 122, 128 }, // left = d153
+ { 58, 18, 28, 105, 139, 182, 70, 92, 63, 128 }, // left = d207
+ { 46, 23, 32, 74, 86, 150, 67, 183, 88, 128 }, // left = d63
+ { 36, 38, 48, 92, 122, 165, 88, 137, 91, 128 }, // left = tm
+ { 78, 23, 39, 111, 117, 170, 74, 124, 94, 128 } // left = bc
+ }, { // above = tm
+ { 65, 70, 60, 155, 159, 199, 61, 60, 81, 128 }, // left = dc
+ { 44, 78, 115, 132, 119, 173, 71, 112, 93, 128 }, // left = v
+ { 39, 38, 21, 184, 227, 206, 42, 32, 64, 128 }, // left = h
+ { 58, 47, 36, 124, 137, 193, 80, 82, 78, 128 }, // left = d45
+ { 49, 50, 35, 144, 95, 205, 63, 78, 59, 128 }, // left = d135
+ { 41, 53, 52, 148, 71, 142, 65, 128, 51, 128 }, // left = d117
+ { 40, 36, 28, 143, 143, 202, 40, 55, 137, 128 }, // left = d153
+ { 52, 34, 29, 129, 183, 227, 42, 35, 43, 128 }, // left = d207
+ { 42, 44, 44, 104, 105, 164, 64, 130, 80, 128 }, // left = d63
+ { 43, 81, 53, 140, 169, 204, 68, 84, 72, 128 }, // left = tm
+ { 65, 70, 60, 155, 159, 199, 61, 60, 81, 128 } // left = bc
+ }, { // above = bc
+ { 137, 30, 42, 148, 151, 207, 70, 52, 91, 128 }, // left = dc
+ { 92, 45, 102, 136, 116, 180, 74, 90, 100, 128 }, // left = v
+ { 73, 32, 19, 187, 222, 215, 46, 34, 100, 128 }, // left = h
+ { 91, 30, 32, 116, 121, 186, 93, 86, 94, 128 }, // left = d45
+ { 72, 35, 36, 149, 68, 206, 68, 63, 105, 128 }, // left = d135
+ { 73, 31, 28, 138, 57, 124, 55, 122, 151, 128 }, // left = d117
+ { 67, 23, 21, 140, 126, 197, 40, 37, 171, 128 }, // left = d153
+ { 86, 27, 28, 128, 154, 212, 45, 43, 53, 128 }, // left = d207
+ { 74, 32, 27, 107, 86, 160, 63, 134, 102, 128 }, // left = d63
+ { 59, 67, 44, 140, 161, 202, 78, 67, 119, 128 }, // left = tm
+ { 137, 30, 42, 148, 151, 207, 70, 52, 91, 128 } // left = bc
+ }
+#else
{ // above = dc
{ 137, 30, 42, 148, 151, 207, 70, 52, 91 }, // left = dc
{ 92, 45, 102, 136, 116, 180, 74, 90, 100 }, // left = v
{ 42, 44, 44, 104, 105, 164, 64, 130, 80 }, // left = d63
{ 43, 81, 53, 140, 169, 204, 68, 84, 72 } // left = tm
}
+#endif // CONFIG_INTRABC
};
+// FIXME(aconverse): INTRABC UV is always the same as INTRABC Y
const vp9_prob vp9_kf_uv_mode_prob[INTRA_MODES][INTRA_MODES - 1] = {
+#if CONFIG_INTRABC
+ { 144, 11, 54, 157, 195, 130, 46, 58, 108, 1 }, // y = dc
+ { 118, 15, 123, 148, 131, 101, 44, 93, 131, 1 }, // y = v
+ { 113, 12, 23, 188, 226, 142, 26, 32, 125, 1 }, // y = h
+ { 120, 11, 50, 123, 163, 135, 64, 77, 103, 1 }, // y = d45
+ { 113, 9, 36, 155, 111, 157, 32, 44, 161, 1 }, // y = d135
+ { 116, 9, 55, 176, 76, 96, 37, 61, 149, 1 }, // y = d117
+ { 115, 9, 28, 141, 161, 167, 21, 25, 193, 1 }, // y = d153
+ { 120, 12, 32, 145, 195, 142, 32, 38, 86, 1 }, // y = d207
+ { 116, 12, 64, 120, 140, 125, 49, 115, 121, 1 }, // y = d63
+ { 102, 19, 66, 162, 182, 122, 35, 59, 128, 1 }, // y = tm
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } // y = bc
+#else
{ 144, 11, 54, 157, 195, 130, 46, 58, 108 }, // y = dc
{ 118, 15, 123, 148, 131, 101, 44, 93, 131 }, // y = v
{ 113, 12, 23, 188, 226, 142, 26, 32, 125 }, // y = h
{ 120, 12, 32, 145, 195, 142, 32, 38, 86 }, // y = d207
{ 116, 12, 64, 120, 140, 125, 49, 115, 121 }, // y = d63
{ 102, 19, 66, 162, 182, 122, 35, 59, 128 } // y = tm
+#endif // CONFIG_INTRABC
};
static const vp9_prob default_if_y_probs[BLOCK_SIZE_GROUPS][INTRA_MODES - 1] = {
+#if CONFIG_INTRABC
+ { 65, 32, 18, 144, 162, 194, 41, 51, 98, 1 }, // block_size < 8x8
+ { 132, 68, 18, 165, 217, 196, 45, 40, 78, 1 }, // block_size < 16x16
+ { 173, 80, 19, 176, 240, 193, 64, 35, 46, 1 }, // block_size < 32x32
+ { 221, 135, 38, 194, 248, 121, 96, 85, 29, 1 } // block_size >= 32x32
+#else
{ 65, 32, 18, 144, 162, 194, 41, 51, 98 }, // block_size < 8x8
{ 132, 68, 18, 165, 217, 196, 45, 40, 78 }, // block_size < 16x16
{ 173, 80, 19, 176, 240, 193, 64, 35, 46 }, // block_size < 32x32
{ 221, 135, 38, 194, 248, 121, 96, 85, 29 } // block_size >= 32x32
+#endif // CONFIG_INTRABC
};
static const vp9_prob default_if_uv_probs[INTRA_MODES][INTRA_MODES - 1] = {
+#if CONFIG_INTRABC
+ { 120, 7, 76, 176, 208, 126, 28, 54, 103, 1 }, // y = dc
+ { 48, 12, 154, 155, 139, 90, 34, 117, 119, 1 }, // y = v
+ { 67, 6, 25, 204, 243, 158, 13, 21, 96, 1 }, // y = h
+ { 97, 5, 44, 131, 176, 139, 48, 68, 97, 1 }, // y = d45
+ { 83, 5, 42, 156, 111, 152, 26, 49, 152, 1 }, // y = d135
+ { 80, 5, 58, 178, 74, 83, 33, 62, 145, 1 }, // y = d117
+ { 86, 5, 32, 154, 192, 168, 14, 22, 163, 1 }, // y = d153
+ { 85, 5, 32, 156, 216, 148, 19, 29, 73, 1 }, // y = d207
+ { 77, 7, 64, 116, 132, 122, 37, 126, 120, 1 }, // y = d63
+ { 101, 21, 107, 181, 192, 103, 19, 67, 125, 1 }, // y = tm
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } // y = bc
+#else
{ 120, 7, 76, 176, 208, 126, 28, 54, 103 }, // y = dc
{ 48, 12, 154, 155, 139, 90, 34, 117, 119 }, // y = v
{ 67, 6, 25, 204, 243, 158, 13, 21, 96 }, // y = h
{ 85, 5, 32, 156, 216, 148, 19, 29, 73 }, // y = d207
{ 77, 7, 64, 116, 132, 122, 37, 126, 120 }, // y = d63
{ 101, 21, 107, 181, 192, 103, 19, 67, 125 } // y = tm
+#endif // CONFIG_INTRABC
};
#if CONFIG_FILTERINTRA
-D135_PRED, -D117_PRED, /* 5 = D135_NODE */
-D45_PRED, 14, /* 6 = D45_NODE */
-D63_PRED, 16, /* 7 = D63_NODE */
+#if CONFIG_INTRABC
+ -D153_PRED, 18, /* 8 = D153_NODE */
+ -D207_PRED, -NEWDV /* 9 = D207_NODE */
+#else
-D153_PRED, -D207_PRED /* 8 = D153_NODE */
+#endif // CONFIG_INTRABC
};
const vp9_tree_index vp9_inter_mode_tree[TREE_SIZE(INTER_MODES)] = {
struct tx_probs tx_probs;
vp9_prob skip_probs[SKIP_CONTEXTS];
nmv_context nmvc;
+#if CONFIG_INTRABC
+ nmv_context ndvc;
+#endif // CONFIG_INTRABC
#if CONFIG_FILTERINTRA
vp9_prob filterintra_prob[TX_SIZES][INTRA_MODES];
#endif // CONFIG_FILTERINTRA
struct tx_counts tx;
unsigned int skip[SKIP_CONTEXTS][2];
nmv_context_counts mv;
+#if CONFIG_INTRABC
+ nmv_context_counts dv;
+#endif // CONFIG_INTRABC
#if CONFIG_FILTERINTRA
unsigned int filterintra[TX_SIZES][INTRA_MODES][2];
#endif // CONFIG_FILTERINTRA
void vp9_init_mv_probs(VP9_COMMON *cm) {
cm->fc.nmvc = default_nmv_context;
+#if CONFIG_INTRABC
+ cm->fc.ndvc = default_nmv_context;
+#endif // CONFIG_INTRABC
}
static const int mode_lf_lut[MB_MODE_COUNT] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // INTRA_MODES
+#if CONFIG_INTRABC
+ 0,
+#endif // CONFIG_INTRABC
1, 1, 0, 1, // INTER_MODES (ZEROMV == 0)
#if CONFIG_NEWMVREF
1, // NEAR_FORNEWMV mode
return ref_num;
}
#endif // CONFIG_COPY_MODE
+
+#if CONFIG_INTRABC
+void vp9_find_ref_dv(int_mv *ref_dv, int mi_row, int mi_col) {
+ (void) mi_col;
+ if (mi_row < 8) {
+ ref_dv->as_mv.row = 0;
+ ref_dv->as_mv.col = -8 * 8;
+ } else {
+ ref_dv->as_mv.row = -8 * 8;
+ ref_dv->as_mv.col = 0;
+ }
+}
+#endif // CONFIG_INTRABC
MB_MODE_INFO *ref_list[18]);
#endif // CONFIG_COPY_MODE
+#if CONFIG_INTRABC
+void vp9_find_ref_dv(int_mv *ref_dv, int mi_row, int mi_col);
+#endif // CONFIOG_INTRABC
#ifdef __cplusplus
} // extern "C"
#endif
struct macroblockd_plane *const pd = &xd->plane[plane];
const MODE_INFO *mi = xd->mi[0].src_mi;
const int is_compound = has_second_ref(&mi->mbmi);
+#if CONFIG_INTRABC
+ const int is_intrabc = is_intrabc_mode(mi->mbmi.mode);
+#endif // CONFIG_INTRABC
const InterpKernel *kernel = vp9_get_interp_kernel(mi->mbmi.interp_filter);
int ref;
+#if CONFIG_INTRABC
+ assert(!is_intrabc || mi->mbmi.interp_filter == BILINEAR);
+#endif // CONFIG_INTRABC
for (ref = 0; ref < 1 + is_compound; ++ref) {
const struct scale_factors *const sf = &xd->block_refs[ref]->sf;
- struct buf_2d *const pre_buf = &pd->pre[ref];
struct buf_2d *const dst_buf = &pd->dst;
+ struct buf_2d *const pre_buf =
+#if CONFIG_INTRABC
+ is_intrabc ? dst_buf :
+#endif // CONFIG_INTRABC
+ &pd->pre[ref];
uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x;
const MV mv = mi->mbmi.sb_type < BLOCK_8X8
? average_split_mvs(pd, mi, ref, block)
const int is_scaled = vp9_is_scaled(sf);
if (is_scaled) {
+#if CONFIG_INTRABC
+ assert(!is_intrabc);
+#endif // CONFIG_INTRABC
pre = pre_buf->buf + scaled_buffer_offset(x, y, pre_buf->stride, sf);
scaled_mv = vp9_scale_mv(&mv_q4, mi_x + x, mi_y + y, sf);
xs = sf->x_step_q4;
build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 0, 0);
#if CONFIG_INTERINTRA
if (xd->mi[0].src_mi->mbmi.ref_frame[1] == INTRA_FRAME &&
+#if CONFIG_INTRABC
+ xd->mi[0].src_mi->mbmi.ref_frame[0] != INTRA_FRAME &&
+#endif // CONFIG_INTRABC
is_interintra_allowed(xd->mi[0].src_mi->mbmi.sb_type))
vp9_build_interintra_predictors_sby(xd, xd->plane[0].dst.buf,
xd->plane[0].dst.stride, bsize);
MAX_MB_PLANE - 1);
#if CONFIG_INTERINTRA
if (xd->mi[0].src_mi->mbmi.ref_frame[1] == INTRA_FRAME &&
+#if CONFIG_INTRABC
+ xd->mi[0].src_mi->mbmi.ref_frame[0] != INTRA_FRAME &&
+#endif // CONFIG_INTRABC
is_interintra_allowed(xd->mi[0].src_mi->mbmi.sb_type))
vp9_build_interintra_predictors_sbuv(xd, xd->plane[1].dst.buf,
xd->plane[2].dst.buf,
MAX_MB_PLANE - 1);
#if CONFIG_INTERINTRA
if (xd->mi[0].src_mi->mbmi.ref_frame[1] == INTRA_FRAME &&
+#if CONFIG_INTRABC
+ xd->mi[0].src_mi->mbmi.ref_frame[0] != INTRA_FRAME &&
+#endif // CONFIG_INTRABC
is_interintra_allowed(xd->mi[0].src_mi->mbmi.sb_type))
vp9_build_interintra_predictors(xd, xd->plane[0].dst.buf,
xd->plane[1].dst.buf, xd->plane[2].dst.buf,
const int is_compound = has_second_ref(&mi->mbmi);
const InterpKernel *kernel = vp9_get_interp_kernel(mi->mbmi.interp_filter);
int ref;
+#if CONFIG_INTRABC
+ const int is_intrabc = is_intrabc_mode(mi->mbmi.mode);
+ struct scale_factors sf1;
+
+ vp9_setup_scale_factors_for_frame(&sf1, 64, 64, 64, 64);
+
+ assert(!is_intrabc || !is_compound);
+#endif // CONFIG_INTRABC
for (ref = 0; ref < 1 + is_compound; ++ref) {
+ struct buf_2d *const dst_buf = &pd->dst;
+#if CONFIG_INTRABC
+ const struct scale_factors *const sf =
+ is_intrabc ? &sf1 : &xd->block_refs[ref]->sf;
+ struct buf_2d *const pre_buf =
+ is_intrabc ? dst_buf : &pd->pre[ref];
+ const YV12_BUFFER_CONFIG *ref_buf =
+ is_intrabc ? xd->cur_buf : xd->block_refs[ref]->buf;
+#else
const struct scale_factors *const sf = &xd->block_refs[ref]->sf;
struct buf_2d *const pre_buf = &pd->pre[ref];
- struct buf_2d *const dst_buf = &pd->dst;
+ const YV12_BUFFER_CONFIG *ref_buf = xd->block_refs[ref]->buf;
+#endif // CONFIG_INTRABC
uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x;
const MV mv = mi->mbmi.sb_type < BLOCK_8X8
? average_split_mvs(pd, mi, ref, block)
int xs, ys, x0, y0, x0_16, y0_16, frame_width, frame_height, buf_stride,
subpel_x, subpel_y;
uint8_t *ref_frame, *buf_ptr;
- const YV12_BUFFER_CONFIG *ref_buf = xd->block_refs[ref]->buf;
const int is_scaled = vp9_is_scaled(sf);
// Get reference frame pointer, width and height.
int x_start = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x));
int y_start = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y));
+#if CONFIG_INTRABC
+ assert(!is_intrabc);
+#endif // CONFIG_INTRABC
+
// Co-ordinate of the block to 1/16th pixel precision.
x0_16 = (x_start + x) << SUBPEL_BITS;
y0_16 = (y_start + y) << SUBPEL_BITS;
int y1 = ((y0_16 + (h - 1) * ys) >> SUBPEL_BITS) + 1;
int x_pad = 0, y_pad = 0;
- if (subpel_x || (sf->x_step_q4 != SUBPEL_SHIFTS)) {
+ if (subpel_x || (sf && sf->x_step_q4 != SUBPEL_SHIFTS)) {
x0 -= VP9_INTERP_EXTEND - 1;
x1 += VP9_INTERP_EXTEND;
x_pad = 1;
}
- if (subpel_y || (sf->y_step_q4 != SUBPEL_SHIFTS)) {
+ if (subpel_y || (sf && sf->y_step_q4 != SUBPEL_SHIFTS)) {
y0 -= VP9_INTERP_EXTEND - 1;
y1 += VP9_INTERP_EXTEND;
y_pad = 1;
DCT_ADST, // D207
ADST_DCT, // D63
ADST_ADST, // TM
+#if CONFIG_INTRABC
+ DCT_DCT, // BC
+#endif // CONFIG_INTRABC
};
// This serves as a wrapper function, so that all the prediction functions
int x0, y0;
const struct macroblockd_plane *const pd = &xd->plane[plane];
+#if CONFIG_INTRABC
+ assert(!is_intrabc_mode(mode));
+#endif // CONFIG_INTRABC
// 127 127 127 .. 127 127 127 127 127 127
// 129 A B .. Y Z
// 129 C D .. W X
}
static INLINE int vp9_is_scaled(const struct scale_factors *sf) {
- return vp9_is_valid_scale(sf) &&
+ return sf && vp9_is_valid_scale(sf) &&
(sf->x_scale_fp != REF_NO_SCALE || sf->y_scale_fp != REF_NO_SCALE);
}
cm->base_qindex));
}
}
- if (!is_inter_block(mbmi)) {
+
+ if (!is_inter_block(mbmi)
+#if CONFIG_INTRABC
+ && !is_intrabc_mode(mbmi->mode)
+#endif // CONFIG_INTRABC
+ ) {
struct intra_args arg = { cm, xd, r };
vp9_foreach_transformed_block(xd, bsize,
predict_and_reconstruct_intra_block, &arg);
}
pbi->mb.corrupted |= tile_data->xd.corrupted;
}
+#if !CONFIG_INTRABC
// Loopfilter one row.
if (cm->lf.filter_level && !pbi->mb.corrupted) {
const int lf_start = mi_row - MI_BLOCK_SIZE;
winterface->execute(&pbi->lf_worker);
}
}
+#endif // !CONFIG_INTRABC
}
}
}
}
+static INLINE int is_mv_valid(const MV *mv) {
+ return mv->row > MV_LOW && mv->row < MV_UPP &&
+ mv->col > MV_LOW && mv->col < MV_UPP;
+}
+
+#if CONFIG_INTRABC
+static INLINE void read_mv(vp9_reader *r, MV *mv, const MV *ref,
+ const nmv_context *ctx,
+ nmv_context_counts *counts,
+ int use_subpel,
+ int allow_hp);
+
+static INLINE int assign_dv(VP9_COMMON *cm, PREDICTION_MODE mode,
+ int_mv *mv, const int_mv *ref_mv, vp9_reader *r) {
+ int ret = 1;
+
+ switch (mode) {
+ case NEWDV: {
+ nmv_context_counts *const mv_counts = cm->frame_parallel_decoding_mode ?
+ NULL : &cm->counts.dv;
+ read_mv(r, &mv->as_mv, &ref_mv->as_mv, &cm->fc.ndvc, mv_counts,
+ 0, 0);
+ ret = ret && is_mv_valid(&mv->as_mv) && (mv->as_int != 0);
+ // TODO(aconverse): additional validation
+ break;
+ }
+ default: {
+ ret = 1;
+ }
+ }
+ return ret;
+}
+#endif // CONFIG_INTRABC
+
static void read_intra_frame_mode_info(VP9_COMMON *const cm,
MACROBLOCKD *const xd,
int mi_row, int mi_col, vp9_reader *r) {
const MODE_INFO *left_mi = xd->left_available ? xd->mi[-1].src_mi : NULL;
const BLOCK_SIZE bsize = mbmi->sb_type;
int i;
+#if CONFIG_INTRABC
+ int_mv dv_ref;
+ vp9_find_ref_dv(&dv_ref, mi_row, mi_col);
+#endif // CONFIG_INTRABC
mbmi->segment_id = read_intra_segment_id(cm, xd, mi_row, mi_col, r);
mbmi->skip = read_skip(cm, xd, mbmi->segment_id, r);
}
mbmi->filterbit = mi->b_filter_info[3];
#endif
+#if CONFIG_INTRABC
+ xd->corrupted |= !assign_dv(cm, mbmi->mode, &mbmi->mv[0], &dv_ref, r);
+#endif // CONFIG_INTRABC
mbmi->mode = mi->bmi[3].as_mode;
break;
case BLOCK_4X8:
else
mi->b_filter_info[0] = mi->b_filter_info[2] = 0;
#endif
+#if CONFIG_INTRABC
+ xd->corrupted |= !assign_dv(cm, mbmi->mode, &mbmi->mv[0], &dv_ref, r);
+#endif // CONFIG_INTRABC
mi->bmi[1].as_mode = mi->bmi[3].as_mode = mbmi->mode =
read_intra_mode(r, get_y_mode_probs(mi, above_mi, left_mi, 1));
#if CONFIG_FILTERINTRA
else
mi->b_filter_info[1] = mi->b_filter_info[3] = mbmi->filterbit = 0;
#endif
+#if CONFIG_INTRABC
+ xd->corrupted |= !assign_dv(cm, mbmi->mode, &mbmi->mv[0], &dv_ref, r);
+#endif // CONFIG_INTRABC
break;
case BLOCK_8X4:
mi->bmi[0].as_mode = mi->bmi[1].as_mode =
else
mi->b_filter_info[0] = mi->b_filter_info[1] = 0;
#endif
+#if CONFIG_INTRABC
+ xd->corrupted |= !assign_dv(cm, mbmi->mode, &mbmi->mv[0], &dv_ref, r);
+#endif // CONFIG_INTRABC
mi->bmi[2].as_mode = mi->bmi[3].as_mode = mbmi->mode =
read_intra_mode(r, get_y_mode_probs(mi, above_mi, left_mi, 2));
#if CONFIG_FILTERINTRA
else
mi->b_filter_info[2] = mi->b_filter_info[3] = mbmi->filterbit = 0;
#endif
+#if CONFIG_INTRABC
+ xd->corrupted |= !assign_dv(cm, mbmi->mode, &mbmi->mv[0], &dv_ref, r);
+#endif // CONFIG_INTRABC
break;
default:
#if CONFIG_PALETTE
else
mbmi->filterbit = 0;
#endif // CONFIG_FILTERINTRA
+#if CONFIG_INTRABC
+ xd->corrupted |= !assign_dv(cm, mbmi->mode, &mbmi->mv[0], &dv_ref, r);
+#endif // CONFIG_INTRABC
}
+#if CONFIG_INTRABC
+ if (is_intrabc_mode(mbmi->mode)) {
+ mbmi->uv_mode = mbmi->mode;
+ mbmi->interp_filter = BILINEAR;
+ } else
+#endif // CONFIG_INTRABC
#if CONFIG_PALETTE
if (!mbmi->palette_enabled[1])
- mbmi->uv_mode = read_intra_mode(r, vp9_kf_uv_mode_prob[mbmi->mode]);
-#else
+#endif // CONFIG_PALETTE
mbmi->uv_mode = read_intra_mode(r, vp9_kf_uv_mode_prob[mbmi->mode]);
-#endif
+
#if CONFIG_FILTERINTRA
if (is_filter_enabled(get_uv_tx_size(mbmi, &xd->plane[1])) &&
is_filter_allowed(mbmi->uv_mode)
}
static int read_mv_component(vp9_reader *r,
- const nmv_component *mvcomp, int usehp) {
+ const nmv_component *mvcomp,
+#if CONFIG_INTRABC
+ int usesubpel,
+#endif // CONFIG_INTRABC
+ int usehp) {
int mag, d, fr, hp;
const int sign = vp9_read(r, mvcomp->sign);
const int mv_class = vp9_read_tree(r, vp9_mv_class_tree, mvcomp->classes);
d |= vp9_read(r, mvcomp->bits[i]) << i;
}
+#if CONFIG_INTRABC
+ if (usesubpel) {
+#endif // CONFIG_INTRABC
// Fractional part
fr = vp9_read_tree(r, vp9_mv_fp_tree, class0 ? mvcomp->class0_fp[d]
: mvcomp->fp);
// High precision part (if hp is not used, the default value of the hp is 1)
hp = usehp ? vp9_read(r, class0 ? mvcomp->class0_hp : mvcomp->hp)
: 1;
+#if CONFIG_INTRABC
+ } else {
+ fr = 3;
+ hp = 1;
+ }
+#endif // CONFIG_INTRABC
// Result
mag = vp9_get_mv_mag(mv_class, (d << 3) | (fr << 1) | hp) + 1;
static INLINE void read_mv(vp9_reader *r, MV *mv, const MV *ref,
const nmv_context *ctx,
- nmv_context_counts *counts, int allow_hp) {
+ nmv_context_counts *counts,
+#if CONFIG_INTRABC
+ int use_subpel,
+#endif // CONFIG_INTRABC
+ int allow_hp) {
const MV_JOINT_TYPE joint_type =
(MV_JOINT_TYPE)vp9_read_tree(r, vp9_mv_joint_tree, ctx->joints);
const int use_hp = allow_hp && vp9_use_mv_hp(ref);
MV diff = {0, 0};
if (mv_joint_vertical(joint_type))
- diff.row = read_mv_component(r, &ctx->comps[0], use_hp);
+ diff.row = read_mv_component(r, &ctx->comps[0],
+#if CONFIG_INTRABC
+ use_subpel,
+#endif // CONFIG_INTRABC
+ use_hp);
if (mv_joint_horizontal(joint_type))
- diff.col = read_mv_component(r, &ctx->comps[1], use_hp);
+ diff.col = read_mv_component(r, &ctx->comps[1],
+#if CONFIG_INTRABC
+ use_subpel,
+#endif // CONFIG_INTRABC
+ use_hp);
vp9_inc_mv(&diff, counts);
#endif // CONFIG_FILTERINTRA
}
-static INLINE int is_mv_valid(const MV *mv) {
- return mv->row > MV_LOW && mv->row < MV_UPP &&
- mv->col > MV_LOW && mv->col < MV_UPP;
-}
-
static INLINE int assign_mv(VP9_COMMON *cm, PREDICTION_MODE mode,
int_mv mv[2], int_mv ref_mv[2],
int_mv nearest_mv[2], int_mv near_mv[2],
NULL : &cm->counts.mv;
for (i = 0; i < 1 + is_compound; ++i) {
read_mv(r, &mv[i].as_mv, &ref_mv[i].as_mv, &cm->fc.nmvc, mv_counts,
+#if CONFIG_INTRABC
+ 1,
+#endif // CONFIG_INTRABC
allow_hp);
ret = ret && is_mv_valid(&mv[i].as_mv);
assert(ret);
assert(is_compound);
for (i = 0; i < 2; ++i) {
read_mv(r, &mv[i].as_mv, &ref_mv[i].as_mv, &cm->fc.nmvc, mv_counts,
+#if CONFIG_INTRABC
+ 1,
+#endif // CONFIG_INTRABC
allow_hp);
ret = ret && is_mv_valid(&mv[i].as_mv);
}
NULL : &cm->counts.mv;
assert(is_compound);
read_mv(r, &mv[0].as_mv, &ref_mv[0].as_mv, &cm->fc.nmvc, mv_counts,
+#if CONFIG_INTRABC
+ 1,
+#endif // CONFIG_INTRABC
allow_hp);
ret = ret && is_mv_valid(&mv[0].as_mv);
mv[1].as_int = nearest_mv[1].as_int;
assert(is_compound);
mv[0].as_int = nearest_mv[0].as_int;
read_mv(r, &mv[1].as_mv, &ref_mv[1].as_mv, &cm->fc.nmvc, mv_counts,
+#if CONFIG_INTRABC
+ 1,
+#endif // CONFIG_INTRABC
allow_hp);
ret = ret && is_mv_valid(&mv[1].as_mv);
break;
assert(is_compound);
mv[0].as_int = near_mv[0].as_int;
read_mv(r, &mv[1].as_mv, &ref_mv[1].as_mv, &cm->fc.nmvc, mv_counts,
+#if CONFIG_INTRABC
+ 1,
+#endif // CONFIG_INTRABC
allow_hp);
ret = ret && is_mv_valid(&mv[1].as_mv);
break;
NULL : &cm->counts.mv;
assert(is_compound);
read_mv(r, &mv[0].as_mv, &ref_mv[0].as_mv, &cm->fc.nmvc, mv_counts,
+#if CONFIG_INTRABC
+ 1,
+#endif // CONFIG_INTRABC
allow_hp);
ret = ret && is_mv_valid(&mv[0].as_mv);
mv[1].as_int = near_mv[1].as_int;
}
#if CONFIG_PALETTE
if (!mbmi->palette_enabled[1])
- write_intra_mode(w, mbmi->uv_mode, cm->fc.uv_mode_prob[mode]);
-#else
- write_intra_mode(w, mbmi->uv_mode, cm->fc.uv_mode_prob[mode]);
#endif // CONFIG_PALETTE
+#if CONFIG_INTRABC
+ if (!is_intrabc_mode(mode))
+#endif // CONFIG_INTRABC
+ write_intra_mode(w, mbmi->uv_mode, cm->fc.uv_mode_prob[mode]);
#if CONFIG_FILTERINTRA
if (is_filter_allowed(mbmi->uv_mode) &&
is_filter_enabled(get_uv_tx_size(mbmi, &xd->plane[1]))
#else
const MACROBLOCKD *xd,
#endif // CONFIG_PALETTE
+#if CONFIG_INTRABC
+ int mi_row, int mi_col,
+#endif // CONFIG_INTRABC
MODE_INFO *mi_8x8, vp9_writer *w) {
const struct segmentation *const seg = &cm->seg;
const MODE_INFO *const mi = mi_8x8;
xd->left_available ? mi_8x8[-1].src_mi : NULL;
const MB_MODE_INFO *const mbmi = &mi->mbmi;
const BLOCK_SIZE bsize = mbmi->sb_type;
+#if CONFIG_INTRABC
+ const nmv_context *ndvc = &cm->fc.ndvc;
+ int_mv dv_ref;
+ vp9_find_ref_dv(&dv_ref, mi_row, mi_col);
+#endif // CONFIG_INTRABC
if (seg->update_map)
write_segment_id(w, seg, mbmi->segment_id);
vp9_write(w, mbmi->filterbit,
cm->fc.filterintra_prob[mbmi->tx_size][mbmi->mode]);
#endif // CONFIG_FILTERINTRA
+#if CONFIG_INTRABC
+ if (mbmi->mode == NEWDV) {
+ vp9_encode_dv(w, &mbmi->mv[0].as_mv, &dv_ref.as_mv, ndvc);
+ }
+#endif // CONFIG_INTRABC
} else {
const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
#if CONFIG_PALETTE
if (!mbmi->palette_enabled[1])
- write_intra_mode(w, mbmi->uv_mode, vp9_kf_uv_mode_prob[mbmi->mode]);
-#else
- write_intra_mode(w, mbmi->uv_mode, vp9_kf_uv_mode_prob[mbmi->mode]);
#endif // CONFIG_PALETTE
+#if CONFIG_INTRABC
+ if (!is_intrabc_mode(mbmi->mode))
+#endif // CONFIG_INTRABC
+ write_intra_mode(w, mbmi->uv_mode, vp9_kf_uv_mode_prob[mbmi->mode]);
+
#if CONFIG_FILTERINTRA
if (is_filter_allowed(mbmi->uv_mode) &&
is_filter_enabled(get_uv_tx_size(mbmi, &xd->plane[1]))
mi_col, num_8x8_blocks_wide_lookup[m->mbmi.sb_type],
cm->mi_rows, cm->mi_cols);
if (frame_is_intra_only(cm)) {
- write_mb_modes_kf(cm, xd, xd->mi, w);
+ write_mb_modes_kf(cm, xd,
+#if CONFIG_INTRABC
+ mi_row, mi_col,
+#endif // CONFIG_INTRABC
+ xd->mi, w);
} else {
pack_inter_mode_mvs(cpi, m,
#if CONFIG_SUPERTX
int pred_mv_sad[MAX_REF_FRAMES];
int nmvjointcost[MV_JOINTS];
+ int *ndvcost[2];
int *nmvcost[2];
int *nmvcost_hp[2];
int **mvcost;
vpx_memcpy(cpi->common.current_palette_count, ctx->palette_count_buf,
ctx->palette_buf_size * sizeof(ctx->palette_count_buf[0]));
#endif
- vp9_rd_pick_intra_mode_sb(cpi, x, rd_cost, bsize, ctx, best_rd);
+ vp9_rd_pick_intra_mode_sb(cpi, x,
+#if CONFIG_INTRABC
+ mi_row, mi_col,
+#endif // CONFIG_INTRABC
+ rd_cost, bsize, ctx, best_rd);
#if CONFIG_PALETTE
cpi->common.current_palette_size = n;
vpx_memcpy(cpi->common.current_palette_colors,
set_ref_ptrs(cm, xd, mbmi->ref_frame[0], mbmi->ref_frame[1]);
- if (!is_inter_block(mbmi)) {
+ if (!is_inter_block(mbmi)
+#if CONFIG_INTRABC
+ && !is_intrabc_mode(mbmi->mode)
+#endif // CONFIG_INTRABC
+ ) {
int plane;
mbmi->skip = 1;
for (plane = 0; plane < MAX_MB_PLANE; ++plane)
// TODO(jingning): per transformed block zero forcing only enabled for
// luma component. will integrate chroma components as well.
- if (x->zcoeff_blk[tx_size][block] && plane == 0) {
+ if (plane == 0 && x->zcoeff_blk[tx_size][block]) {
p->eobs[block] = 0;
*a = *l = 0;
return;
}
static void encode_mv_component(vp9_writer* w, int comp,
- const nmv_component* mvcomp, int usehp) {
+ const nmv_component* mvcomp,
+#if CONFIG_INTRABC
+ int usesubpel,
+#endif // CONFIG_INTRABC
+ int usehp) {
int offset;
const int sign = comp < 0;
const int mag = sign ? -comp : comp;
vp9_write(w, (d >> i) & 1, mvcomp->bits[i]);
}
+#if CONFIG_INTRABC
+ if (usesubpel) {
+#endif // CONFIG_INTRABC
// Fractional bits
vp9_write_token(w, vp9_mv_fp_tree,
mv_class == MV_CLASS_0 ? mvcomp->class0_fp[d] : mvcomp->fp,
if (usehp)
vp9_write(w, hp,
mv_class == MV_CLASS_0 ? mvcomp->class0_hp : mvcomp->hp);
+#if CONFIG_INTRABC
+ }
+#endif // CONFIG_INTRABC
}
static void build_nmv_component_cost_table(int *mvcost,
const nmv_component* const mvcomp,
+#if CONFIG_INTRABC
+ int usesubpel,
+#endif // CONFIG_INTRABC
int usehp) {
int i, v;
int sign_cost[2], class_cost[MV_CLASSES], class0_cost[CLASS0_SIZE];
for (i = 0; i < CLASS0_SIZE; ++i)
vp9_cost_tokens(class0_fp_cost[i], mvcomp->class0_fp[i], vp9_mv_fp_tree);
- vp9_cost_tokens(fp_cost, mvcomp->fp, vp9_mv_fp_tree);
+#if CONFIG_INTRABC
+ if (usesubpel) {
+#endif // CONFIG_INTRABC
+ vp9_cost_tokens(fp_cost, mvcomp->fp, vp9_mv_fp_tree);
if (usehp) {
class0_hp_cost[0] = vp9_cost_zero(mvcomp->class0_hp);
class0_hp_cost[1] = vp9_cost_one(mvcomp->class0_hp);
hp_cost[0] = vp9_cost_zero(mvcomp->hp);
hp_cost[1] = vp9_cost_one(mvcomp->hp);
}
+#if CONFIG_INTRABC
+ }
+#endif // CONFIG_INTRABC
mvcost[0] = 0;
for (v = 1; v <= MV_MAX; ++v) {
int z, c, o, d, e, f, cost = 0;
for (i = 0; i < b; ++i)
cost += bits_cost[i][((d >> i) & 1)];
}
+#if CONFIG_INTRABC
+ if (usesubpel) {
+#endif // CONFIG_INTRABC
if (c == MV_CLASS_0) {
cost += class0_fp_cost[d][f];
} else {
cost += hp_cost[e];
}
}
+#if CONFIG_INTRABC
+ }
+#endif // CONFIG_INTRABC
mvcost[v] = cost + sign_cost[0];
mvcost[-v] = cost + sign_cost[1];
}
vp9_write_token(w, vp9_mv_joint_tree, mvctx->joints, &mv_joint_encodings[j]);
if (mv_joint_vertical(j))
- encode_mv_component(w, diff.row, &mvctx->comps[0], usehp);
+ encode_mv_component(w, diff.row, &mvctx->comps[0],
+#if CONFIG_INTRABC
+ 1,
+#endif // CONFIG_INTRABC
+ usehp);
if (mv_joint_horizontal(j))
- encode_mv_component(w, diff.col, &mvctx->comps[1], usehp);
+ encode_mv_component(w, diff.col, &mvctx->comps[1],
+#if CONFIG_INTRABC
+ 1,
+#endif // CONFIG_INTRABC
+ usehp);
// If auto_mv_step_size is enabled then keep track of the largest
// motion vector component used.
}
}
+#if CONFIG_INTRABC
+void vp9_encode_dv(vp9_writer* w,
+ const MV* mv, const MV* ref,
+ const nmv_context* mvctx) {
+ const MV diff = {mv->row - ref->row,
+ mv->col - ref->col};
+ const MV_JOINT_TYPE j = vp9_get_mv_joint(&diff);
+
+ vp9_write_token(w, vp9_mv_joint_tree, mvctx->joints, &mv_joint_encodings[j]);
+ if (mv_joint_vertical(j))
+ encode_mv_component(w, diff.row, &mvctx->comps[0], 0, 0);
+
+ if (mv_joint_horizontal(j))
+ encode_mv_component(w, diff.col, &mvctx->comps[1], 0, 0);
+}
+#endif // CONFIG_INTRABC
+
void vp9_build_nmv_cost_table(int *mvjoint, int *mvcost[2],
- const nmv_context* ctx, int usehp) {
+ const nmv_context* ctx,
+#if CONFIG_INTRABC
+ int usesubpel,
+#endif // CONFIG_INTRABC
+ int usehp) {
vp9_cost_tokens(mvjoint, ctx->joints, vp9_mv_joint_tree);
+#if CONFIG_INTRABC
+ build_nmv_component_cost_table(mvcost[0], &ctx->comps[0], usesubpel, usehp);
+ build_nmv_component_cost_table(mvcost[1], &ctx->comps[1], usesubpel, usehp);
+#else
build_nmv_component_cost_table(mvcost[0], &ctx->comps[0], usehp);
build_nmv_component_cost_table(mvcost[1], &ctx->comps[1], usehp);
+#endif // CONFIG_INTRABC
}
static void inc_mvs(const MB_MODE_INFO *mbmi, const int_mv mv[2],
void vp9_encode_mv(VP9_COMP *cpi, vp9_writer* w, const MV* mv, const MV* ref,
const nmv_context* mvctx, int usehp);
+#if CONFIG_INTRABC
+void vp9_encode_dv(vp9_writer* w, const MV* mv, const MV* ref,
+ const nmv_context* mvctx);
+#endif // CONFIG_INTRABC
+
void vp9_build_nmv_cost_table(int *mvjoint, int *mvcost[2],
- const nmv_context* mvctx, int usehp);
+ const nmv_context* mvctx,
+#if CONFIG_INTRABC
+ int usesubpel,
+#endif // CONFIG_INTRABC
+ int usehp);
void vp9_update_mv_count(VP9_COMMON *cm, const MACROBLOCKD *xd);
vpx_free(cpi->complexity_map);
cpi->complexity_map = NULL;
+#if CONFIG_INTRABC
+ vpx_free(cpi->ndvcosts[0]);
+ vpx_free(cpi->ndvcosts[1]);
+ cpi->ndvcosts[0] = NULL;
+ cpi->ndvcosts[1] = NULL;
+#endif // CONFIG_INTRABC
+
vpx_free(cpi->nmvcosts[0]);
vpx_free(cpi->nmvcosts[1]);
cpi->nmvcosts[0] = NULL;
// quantizer value is adjusted between loop iterations.
vp9_copy(cc->nmvjointcost, cpi->mb.nmvjointcost);
+#if CONFIG_INTRABC
+ vpx_memcpy(cc->ndvcosts[0], cpi->ndvcosts[0],
+ MV_VALS * sizeof(*cpi->ndvcosts[0]));
+ vpx_memcpy(cc->ndvcosts[1], cpi->ndvcosts[1],
+ MV_VALS * sizeof(*cpi->ndvcosts[1]));
+#endif // CONFIG_INTRABC
vpx_memcpy(cc->nmvcosts[0], cpi->nmvcosts[0],
MV_VALS * sizeof(*cpi->nmvcosts[0]));
vpx_memcpy(cc->nmvcosts[1], cpi->nmvcosts[1],
// previous call to vp9_save_coding_context.
vp9_copy(cpi->mb.nmvjointcost, cc->nmvjointcost);
+#if CONFIG_INTRABC
+ vpx_memcpy(cpi->ndvcosts[0], cc->ndvcosts[0],
+ MV_VALS * sizeof(*cc->ndvcosts[0]));
+ vpx_memcpy(cpi->ndvcosts[1], cc->ndvcosts[1],
+ MV_VALS * sizeof(*cc->ndvcosts[1]));
+#endif // CONFIG_INTRABC
vpx_memcpy(cpi->nmvcosts[0], cc->nmvcosts[0],
MV_VALS * sizeof(*cc->nmvcosts[0]));
vpx_memcpy(cpi->nmvcosts[1], cc->nmvcosts[1],
CHECK_MEM_ERROR(cm, cpi->coding_context.last_frame_seg_map_copy,
vpx_calloc(cm->mi_rows * cm->mi_cols, 1));
+#if CONFIG_INTRABC
+ CHECK_MEM_ERROR(cm, cpi->ndvcosts[0],
+ vpx_calloc(MV_VALS, sizeof(*cpi->ndvcosts[0])));
+ CHECK_MEM_ERROR(cm, cpi->ndvcosts[1],
+ vpx_calloc(MV_VALS, sizeof(*cpi->ndvcosts[1])));
+#endif // CONFIG_INTRABC
CHECK_MEM_ERROR(cm, cpi->nmvcosts[0],
vpx_calloc(MV_VALS, sizeof(*cpi->nmvcosts[0])));
CHECK_MEM_ERROR(cm, cpi->nmvcosts[1],
cpi->first_time_stamp_ever = INT64_MAX;
cal_nmvjointsadcost(cpi->mb.nmvjointsadcost);
+#if CONFIG_INTRABC
+ cpi->mb.ndvcost[0] = &cpi->ndvcosts[0][MV_MAX];
+ cpi->mb.ndvcost[1] = &cpi->ndvcosts[1][MV_MAX];
+#endif // CONFIG_INTRABC
cpi->mb.nmvcost[0] = &cpi->nmvcosts[0][MV_MAX];
cpi->mb.nmvcost[1] = &cpi->nmvcosts[1][MV_MAX];
cpi->mb.nmvsadcost[0] = &cpi->nmvsadcosts[0][MV_MAX];
typedef struct {
int nmvjointcost[MV_JOINTS];
+#if CONFIG_INTRABC
+ int ndvcosts[2][MV_VALS];
+#endif // CONFIG_INTRABC
int nmvcosts[2][MV_VALS];
int nmvcosts_hp[2][MV_VALS];
CODING_CONTEXT coding_context;
+ int *ndvcosts[2];
int *nmvcosts[2];
int *nmvcosts_hp[2];
int *nmvsadcosts[2];
best_address = in_what;
// Check the starting position
+ assert(what != NULL);
+ assert(in_what != NULL);
bestsad = fn_ptr->sdf(what, what_stride, in_what, in_what_stride)
+ mvsad_err_cost(x, best_mv, &fcenter_mv, sad_per_bit);
vp9_build_nmv_cost_table(x->nmvjointcost,
cm->allow_high_precision_mv ? x->nmvcost_hp
: x->nmvcost,
- &cm->fc.nmvc, cm->allow_high_precision_mv);
+ &cm->fc.nmvc,
+#if CONFIG_INTRABC
+ 1,
+#endif // CONFIG_INTRABC
+ cm->allow_high_precision_mv);
for (i = 0; i < INTER_MODE_CONTEXTS; ++i)
vp9_cost_tokens((int *)cpi->inter_mode_cost[i],
cm->fc.inter_compound_mode_probs[i],
vp9_inter_compound_mode_tree);
#endif
+#if CONFIG_INTRABC
+ } else {
+ vp9_build_nmv_cost_table(x->nmvjointcost,
+ x->ndvcost,
+ &cm->fc.ndvc, 0, 0);
+#endif // CONFIG_INTRABC
}
}
{{INTRA_FRAME, NONE}},
};
+static int super_block_uvrd(const VP9_COMP *cpi, MACROBLOCK *x,
+ int *rate, int64_t *distortion, int *skippable,
+ int64_t *sse, BLOCK_SIZE bsize,
+ int64_t ref_best_rd);
+
static int raster_block_offset(BLOCK_SIZE plane_bsize,
int raster_block, int stride) {
const int bw = b_width_log2_lookup[plane_bsize];
if (args->skip)
return;
- if (!is_inter_block(mbmi)) {
+ if (!is_inter_block(mbmi)
+#if CONFIG_INTRABC
+ && !is_intrabc_mode(mbmi->mode)
+#endif // CONFIG_INTRABC
+ ) {
vp9_encode_block_intra(x, plane, block, plane_bsize, tx_size, &mbmi->skip);
#if CONFIG_VP9_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
return RDCOST(mb->rdmult, mb->rddiv, cost, total_distortion);
}
+static INLINE int mv_check_bounds(const MACROBLOCK *x, const MV *mv) {
+ return (mv->row >> 3) < x->mv_row_min ||
+ (mv->row >> 3) > x->mv_row_max ||
+ (mv->col >> 3) < x->mv_col_min ||
+ (mv->col >> 3) > x->mv_col_max;
+}
+
+#if CONFIG_INTRABC
+#define ODD_PEL_DV 0 // Allow odd pel displacemnt vectors
+static void intrabc_search(VP9_COMP *cpi, MACROBLOCK *x,
+ BLOCK_SIZE bsize,
+ int mi_row, int mi_col,
+ int_mv *tmp_mv, int *rate_mv) {
+ const VP9_COMMON *cm = &cpi->common;
+ int bestsme = INT_MAX;
+ int step_param;
+ int sadpb = x->sadperbit16;
+ MV mvp_full;
+ MV ref_mv;
+
+ int tmp_col_min = x->mv_col_min;
+ int tmp_col_max = x->mv_col_max;
+ int tmp_row_min = x->mv_row_min;
+ int tmp_row_max = x->mv_row_max;
+ int cost_list[5];
+ int corner_row = 0;
+ int corner_col = 0;
+ int sb_mi_row = mi_row & ~0x07;
+ int sb_mi_col = mi_col & ~0x07;
+ int w = num_4x4_blocks_wide_lookup[bsize] * 4;
+ int h = num_4x4_blocks_high_lookup[bsize] * 4;
+
+ tmp_mv->as_int = INVALID_MV;
+ *rate_mv = 0; // compiler bug?
+ vp9_find_ref_dv((int_mv*)&ref_mv, mi_row, mi_col);
+
+ vp9_set_mv_search_range(x, &ref_mv);
+
+ // TODO(aconverse): Allow copying from the border.
+ if (ref_mv.row < 0) {
+ corner_row = sb_mi_row * 8;
+ corner_col = cm->display_width;
+ } else {
+ corner_row = MIN((sb_mi_row + 8) * 8, cm->display_height);
+ corner_col = sb_mi_col * 8;
+ }
+
+ if (corner_row <= 0 || corner_col <= 0)
+ return;
+
+ // Clamp the MV search range to the valid portion of the image
+ x->mv_col_min = MAX(x->mv_col_min, -mi_col * 8);
+ x->mv_col_max = MIN(x->mv_col_max, corner_col - mi_col * 8 - w);
+ x->mv_row_min = MAX(x->mv_row_min, -mi_row * 8);
+ x->mv_row_max = MIN(x->mv_row_max, corner_row - mi_row * 8 - h);
+
+ if (x->mv_row_min > x->mv_row_max)
+ goto cleanup;
+ if (x->mv_col_min > x->mv_col_max)
+ goto cleanup;
+
+ step_param = cpi->mv_step_param;
+
+ mvp_full = ref_mv;
+
+ mvp_full.col >>= 3;
+ mvp_full.row >>= 3;
+
+ bestsme = vp9_full_pixel_search(cpi, x, bsize, &mvp_full, step_param, sadpb,
+ cond_cost_list(cpi, cost_list),
+ &ref_mv, &tmp_mv->as_mv, INT_MAX, 1);
+
+ assert(tmp_mv->as_int != 0);
+
+ if (bestsme < INT_MAX) {
+#if ODD_PEL_DV
+ if (tmp_mv->as_mv.col > -w) {
+ assert(tmp_mv->as_mv.row <= -h);
+ if (tmp_mv->as_mv.row == -h - 1) {
+ tmp_mv->as_mv.row = tmp_mv->as_mv.row / 2 * 2;
+ }
+ } else {
+ assert(tmp_mv->as_mv.row <= 8*8 - h);
+ if (tmp_mv->as_mv.row == 8*8 - h - 1) {
+ tmp_mv->as_mv.row = tmp_mv->as_mv.row / 2 * 2;
+ }
+ }
+ tmp_mv->as_mv.row *= 8;
+ tmp_mv->as_mv.col *= 8;
+#else
+ tmp_mv->as_mv.row /= 2;
+ tmp_mv->as_mv.col /= 2;
+ tmp_mv->as_mv.row *= 16;
+ tmp_mv->as_mv.col *= 16;
+#endif // ODD_PEL_DV
+ }
+ *rate_mv = vp9_mv_bit_cost(&tmp_mv->as_mv, &ref_mv,
+ x->nmvjointcost, x->ndvcost, MV_COST_WEIGHT);
+cleanup:
+ x->mv_col_min = tmp_col_min;
+ x->mv_col_max = tmp_col_max;
+ x->mv_row_min = tmp_row_min;
+ x->mv_row_max = tmp_row_max;
+}
+
+static int64_t handle_intrabc_mode(VP9_COMP *cpi, MACROBLOCK *x,
+ BLOCK_SIZE bsize,
+ int64_t txfm_cache[],
+ int *rate2, int64_t *distortion,
+ int *skippable,
+ int *rate_y, int *rate_uv,
+ int *disable_skip,
+ int_mv (*mode_mv)[MAX_REF_FRAMES],
+ int mi_row, int mi_col,
+ int64_t *psse,
+ const int64_t ref_best_rd) {
+ VP9_COMMON *cm = &cpi->common;
+ RD_OPT *rd_opt = &cpi->rd;
+ MACROBLOCKD *xd = &x->e_mbd;
+ MB_MODE_INFO *mbmi = &xd->mi[0].src_mi->mbmi;
+ const int this_mode = mbmi->mode;
+ int_mv *frame_mv = mode_mv[this_mode];
+ int i;
+ int_mv cur_mv[2];
+ int64_t rd;
+ uint8_t skip_txfm[MAX_MB_PLANE << 2] = {0};
+ int64_t bsse[MAX_MB_PLANE << 2] = {0};
+
+ int skip_txfm_sb = 0;
+ int64_t skip_sse_sb = INT64_MAX;
+ int64_t distortion_y = 0, distortion_uv = 0;
+
+ assert(mbmi->ref_frame[0] == INTRA_FRAME);
+ assert(this_mode == NEWDV);
+
+ if (this_mode == NEWDV) {
+ int rate_mv;
+ int_mv tmp_mv;
+ intrabc_search(cpi, x, bsize, mi_row, mi_col,
+ &tmp_mv, &rate_mv);
+ if (tmp_mv.as_int == INVALID_MV)
+ return INT64_MAX;
+ *rate2 += rate_mv;
+ frame_mv[INTRA_FRAME].as_int =
+ xd->mi[0].src_mi->bmi[0].as_mv[0].as_int = tmp_mv.as_int;
+ }
+
+ cur_mv[0] = frame_mv[INTRA_FRAME];
+ // Clip "next_nearest" so that it does not extend to far out of image
+ if (this_mode != NEWDV)
+ clamp_mv2(&cur_mv[0].as_mv, xd);
+
+ if (mv_check_bounds(x, &cur_mv[0].as_mv))
+ return INT64_MAX;
+ mbmi->mv[0].as_int = cur_mv[0].as_int;
+
+ if (RDCOST(x->rdmult, x->rddiv, *rate2, 0) > ref_best_rd &&
+ mbmi->mode != NEARESTMV)
+ return INT64_MAX;
+
+ // Search for best switchable filter by checking the variance of
+ // pred error irrespective of whether the filter will be used
+ rd_opt->mask_filter = 0;
+ for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
+ rd_opt->filter_cache[i] = INT64_MAX;
+
+ // Set the appropriate filter
+ mbmi->interp_filter = BILINEAR;
+
+ {
+ int tmp_rate;
+ int64_t tmp_dist;
+ vp9_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
+ model_rd_for_sb(cpi, bsize, x, xd, &tmp_rate, &tmp_dist,
+ &skip_txfm_sb, &skip_sse_sb);
+ rd = RDCOST(x->rdmult, x->rddiv, tmp_rate, tmp_dist);
+ vpx_memcpy(skip_txfm, x->skip_txfm, sizeof(skip_txfm));
+ vpx_memcpy(bsse, x->bsse, sizeof(bsse));
+ }
+
+ if (cpi->sf.use_rd_breakout && ref_best_rd < INT64_MAX) {
+ // if current pred_error modeled rd is substantially more than the best
+ // so far, do not bother doing full rd
+ if (rd / 2 > ref_best_rd) {
+ return INT64_MAX;
+ }
+ }
+
+ vpx_memcpy(x->skip_txfm, skip_txfm, sizeof(skip_txfm));
+ vpx_memcpy(x->bsse, bsse, sizeof(bsse));
+
+ if (!skip_txfm_sb) {
+ int skippable_y, skippable_uv;
+ int64_t sseuv = INT64_MAX;
+ int64_t rdcosty = INT64_MAX;
+#if CONFIG_TX_SKIP
+ int rate_s, skippable_s;
+ int64_t distortion_s, psse_s;
+ MB_MODE_INFO mbmi_temp;
+ int64_t tx_cache_s[TX_MODES];
+#endif // CONFIG_TX_SKIP
+
+ vp9_subtract_plane(x, bsize, 0);
+
+ // Y cost and distortion
+ super_block_yrd(cpi, x, rate_y, &distortion_y, &skippable_y, psse,
+ bsize, txfm_cache, ref_best_rd);
+#if CONFIG_TX_SKIP
+ mbmi_temp = *(mbmi);
+ mbmi->tx_skip[0] = 1;
+ super_block_yrd(cpi, x, &rate_s, &distortion_s, &skippable_s, &psse_s,
+ bsize, tx_cache_s, ref_best_rd);
+
+ if (mbmi->tx_size < TX_32X32)
+ distortion_s = distortion_s << 2;
+
+ if (rate_s != INT_MAX) {
+ if (*rate_y == INT_MAX ||
+ RDCOST(x->rdmult, x->rddiv, *rate_y, distortion_y) >
+ RDCOST(x->rdmult, x->rddiv, rate_s, distortion_s)) {
+ *rate_y = rate_s;
+ distortion_y = distortion_s;
+ *skippable = skippable_s;
+ *psse = psse_s;
+ } else {
+ *(mbmi) = mbmi_temp;
+ mbmi->tx_skip[0] = 0;
+ }
+ } else {
+ *(mbmi) = mbmi_temp;
+ mbmi->tx_skip[0] = 0;
+ }
+#endif // CONFIG_TX_SKIP
+
+ if (*rate_y == INT_MAX) {
+ *rate2 = INT_MAX;
+ *distortion = INT64_MAX;
+ return INT64_MAX;
+ }
+
+ *rate2 += *rate_y;
+ *distortion += distortion_y;
+
+ rdcosty = RDCOST(x->rdmult, x->rddiv, *rate2, *distortion);
+ rdcosty = MIN(rdcosty, RDCOST(x->rdmult, x->rddiv, 0, *psse));
+
+#if CONFIG_TX_SKIP
+ super_block_uvrd(cpi, x, rate_uv, &distortion_uv, &skippable_uv,
+ &sseuv, bsize, ref_best_rd - rdcosty);
+ mbmi->tx_skip[1] = 1;
+ mbmi_temp = *(mbmi);
+ super_block_uvrd(cpi, x, &rate_s, &distortion_s, &skippable_s,
+ &psse_s, bsize, ref_best_rd - rdcosty);
+
+ if (rate_s != INT_MAX) {
+ if (get_uv_tx_size(mbmi, &xd->plane[1]) < TX_32X32)
+ distortion_s = distortion_s << 2;
+
+ if (*rate_uv == INT_MAX ||
+ RDCOST(x->rdmult, x->rddiv, *rate_uv, distortion_uv) >
+ RDCOST(x->rdmult, x->rddiv, rate_s, distortion_s)) {
+ *rate_uv = rate_s;
+ distortion_uv = distortion_s;
+ skippable_uv = skippable_s;
+ sseuv = psse_s;
+ } else {
+ *(mbmi) = mbmi_temp;
+ mbmi->tx_skip[1] = 0;
+ }
+ } else {
+ *(mbmi) = mbmi_temp;
+ mbmi->tx_skip[1] = 0;
+ }
+
+ if (*rate_uv == INT_MAX) {
+ *rate2 = INT_MAX;
+ *distortion = INT64_MAX;
+ return INT64_MAX;
+ }
+#else
+ if (!super_block_uvrd(cpi, x, rate_uv, &distortion_uv, &skippable_uv,
+ &sseuv, bsize, ref_best_rd - rdcosty)) {
+ *rate2 = INT_MAX;
+ *distortion = INT64_MAX;
+ return INT64_MAX;
+ }
+#endif // CONFIG_TX_SKIP
+
+ *psse += sseuv;
+ *rate2 += *rate_uv;
+ *distortion += distortion_uv;
+ *skippable = skippable_y && skippable_uv;
+ } else {
+ x->skip = 1;
+ *disable_skip = 1;
+
+ // The cost of skip bit needs to be added.
+ *rate2 += vp9_cost_bit(vp9_get_skip_prob(cm, xd), 1);
+
+ *distortion = skip_sse_sb;
+ }
+
+ return 0; // The rate-distortion cost will be re-calculated by caller.
+}
+#endif // CONFIG_INTRABC
+
// This function is used only for intra_only frames
static int64_t rd_pick_intra_sby_mode(VP9_COMP *cpi, MACROBLOCK *x,
int *rate, int *rate_tokenonly,
return best_rd;
}
+#if CONFIG_INTRABC
+// This function is used only for intra_only frames
+static int64_t rd_pick_intrabc_sb_mode(VP9_COMP *cpi, MACROBLOCK *x,
+ int mi_row, int mi_col,
+ int *rate, int *rate_tokenonly,
+ int64_t *distortion, int *skippable,
+ BLOCK_SIZE bsize,
+ int64_t tx_cache[TX_MODES],
+ int64_t best_rd) {
+ PREDICTION_MODE mode;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ MODE_INFO *const mic = xd->mi[0].src_mi;
+ PREDICTION_MODE mode_selected = mic->mbmi.mode;
+ PREDICTION_MODE mode_selected_uv = mic->mbmi.uv_mode;
+ int filter_selected = mic->mbmi.interp_filter;
+ TX_SIZE best_tx = mic->mbmi.tx_size;
+ const MODE_INFO *above_mi = xd->mi[-xd->mi_stride].src_mi;
+ const MODE_INFO *left_mi = xd->left_available ? xd->mi[-1].src_mi : NULL;
+ const PREDICTION_MODE A = vp9_above_block_mode(mic, above_mi, 0);
+ const PREDICTION_MODE L = vp9_left_block_mode(mic, left_mi, 0);
+ const int *bmode_costs = cpi->y_mode_costs[A][L];
+ struct buf_2d yv12_mb[MAX_MB_PLANE];
+ int_mv frame_dv[MB_MODE_COUNT][MAX_REF_FRAMES];
+ int i;
+#if CONFIG_TX_SKIP
+ int q_idx = vp9_get_qindex(&cpi->common.seg, mic->mbmi.segment_id,
+ cpi->common.base_qindex);
+ int try_tx_skip = q_idx <= TX_SKIP_Q_THRESH_INTRA;
+ int tx_skipped_y = mic->mbmi.tx_skip[0];
+ int tx_skipped_uv = mic->mbmi.tx_skip[1];
+#endif // CONFIG_TX_SKIP
+#if CONFIG_FILTERINTRA
+ int filterbit = mic->mbmi.filterbit;
+ int uv_filterbit = mic->mbmi.uv_filterbit;
+#endif // CONFIG_FILTER_INTRA
+#if CONFIG_PALETTE
+ int palette_enabled[2];
+ palette_enabled[0] = mic->mbmi.palette_enabled[0];
+ palette_enabled[1] = mic->mbmi.palette_enabled[1];
+#endif // CONFIG_PALETTE
+
+ if (cpi->sf.tx_size_search_method == USE_FULL_RD)
+ for (i = 0; i < TX_MODES; i++)
+ tx_cache[i] = INT64_MAX;
+
+ vp9_setup_pred_block(xd, yv12_mb, xd->cur_buf, mi_row, mi_col,
+ NULL, NULL);
+ for (i = 0; i < MAX_MB_PLANE; i++) {
+ xd->plane[i].pre[0] = yv12_mb[i];
+ }
+
+ vpx_memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
+
+ for (mode = NEWDV; mode <= NEWDV; ++mode) {
+ int this_skippable = 0;
+ int rate_y = 0;
+ int rate_uv = 0;
+ int64_t tx_cache[TX_MODES];
+ int64_t total_sse;
+ int disable_skip = 0;
+ const int saved_interp_filter = cpi->common.interp_filter;
+ int this_rate;
+ int this_rate_tokenonly = 0;
+ int64_t this_distortion = 0;
+ int64_t this_rd;
+ mic->mbmi.mode = mode;
+ assert(mic->mbmi.sb_type >= BLOCK_8X8);
+ cpi->common.interp_filter = BILINEAR;
+ this_rd = handle_intrabc_mode(cpi, x, bsize,
+ tx_cache,
+ &this_rate_tokenonly, &this_distortion,
+ &this_skippable,
+ &rate_y, &rate_uv,
+ &disable_skip, frame_dv,
+ mi_row, mi_col,
+ &total_sse, best_rd);
+ cpi->common.interp_filter = saved_interp_filter;
+ if (this_rd == INT64_MAX)
+ continue;
+ this_rate = this_rate_tokenonly + bmode_costs[mode];
+#if CONFIG_TX_SKIP
+ if (try_tx_skip)
+ this_rate += vp9_cost_bit(cpi->common.fc.y_tx_skip_prob[0], 0) +
+ vp9_cost_bit(cpi->common.fc.uv_tx_skip_prob[0], 0);
+#endif // CONFIG_TX_SKIP
+ this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion);
+ if (this_rd < best_rd) {
+ mode_selected = mode;
+ mode_selected_uv = mode;
+ filter_selected = mic->mbmi.interp_filter;
+ best_rd = this_rd;
+ best_tx = mic->mbmi.tx_size;
+ *rate = this_rate;
+ *rate_tokenonly = this_rate_tokenonly;
+ *distortion = this_distortion;
+ *skippable = this_skippable;
+#if CONFIG_TX_SKIP
+ tx_skipped_y = mic->mbmi.tx_skip[0];
+ tx_skipped_uv = mic->mbmi.tx_skip[1];
+#endif // CONFIG_TX_SKIP
+#if CONFIG_PALETTE
+ palette_enabled[0] = 0;
+ palette_enabled[1] = 0;
+#endif // CONFIG_PALETTE
+#if CONFIG_FILTERINTRA
+ filterbit = 0;
+ uv_filterbit = 0;
+#endif // CONFIG_FILTERINTRA
+ }
+ }
+
+ mic->mbmi.mode = mode_selected;
+ mic->mbmi.uv_mode = mode_selected_uv;
+ mic->mbmi.tx_size = best_tx;
+ mic->mbmi.interp_filter = filter_selected;
+#if CONFIG_TX_SKIP
+ mic->mbmi.tx_skip[0] = tx_skipped_y;
+ mic->mbmi.tx_skip[1] = tx_skipped_uv;
+#endif // CONFIG_TX_SKIP
+#if CONFIG_PALETTE
+ mic->mbmi.palette_enabled[0] = palette_enabled[0];
+ mic->mbmi.palette_enabled[1] = palette_enabled[1];
+#endif // CONFIG_PALETTE
+#if CONFIG_FILTERINTRA
+ mic->mbmi.filterbit = filterbit;
+ mic->mbmi.uv_filterbit = uv_filterbit;
+#endif // CONFIG_FILTERINTRA
+
+ return best_rd;
+}
+#endif // CONFIG_INTRABC
+
// Return value 0: early termination triggered, no valid rd cost available;
// 1: rd cost values are valid.
static int super_block_uvrd(const VP9_COMP *cpi, MACROBLOCK *x,
int mvthresh;
} BEST_SEG_INFO;
-static INLINE int mv_check_bounds(const MACROBLOCK *x, const MV *mv) {
- return (mv->row >> 3) < x->mv_row_min ||
- (mv->row >> 3) > x->mv_row_max ||
- (mv->col >> 3) < x->mv_col_min ||
- (mv->col >> 3) > x->mv_col_max;
-}
-
static INLINE void mi_buf_shift(MACROBLOCK *x, int i) {
MB_MODE_INFO *const mbmi = &x->e_mbd.mi[0].src_mi->mbmi;
struct macroblock_plane *const p = &x->plane[0];
}
void vp9_rd_pick_intra_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
- RD_COST *rd_cost, BLOCK_SIZE bsize,
+#if CONFIG_INTRABC
+ int mi_row, int mi_col,
+#endif // CONFIG_INTRABC
+ RD_COST *rd_cost,
+ BLOCK_SIZE bsize,
PICK_MODE_CONTEXT *ctx, int64_t best_rd) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
xd->mi[0].src_mi->mbmi.ref_frame[0] = INTRA_FRAME;
if (bsize >= BLOCK_8X8) {
- if (rd_pick_intra_sby_mode(cpi, x, &rate_y, &rate_y_tokenonly,
+ if (rd_pick_intra_sby_mode(cpi, x, &rate_y,
+ &rate_y_tokenonly,
&dist_y, &y_skip, bsize, tx_cache,
best_rd) >= best_rd) {
rd_cost->rate = INT_MAX;
ctx->mic = *xd->mi[0].src_mi;
}
}
+#endif // CONFIG_PALETTE
+#if CONFIG_INTRABC
+ rd_cost->rdcost = RDCOST(x->rdmult, x->rddiv, rd_cost->rate, rd_cost->dist);
+
+ if (bsize >= BLOCK_8X8) {
+ best_rd = MIN(best_rd, rd_cost->rdcost);
+ if (rd_pick_intrabc_sb_mode(cpi, x, mi_row, mi_col, &rate_y,
+ &rate_y_tokenonly, &dist_y, &y_skip, bsize,
+ tx_cache, best_rd) < best_rd) {
+ if (y_skip && uv_skip) {
+ rd_cost->rate = rate_y + rate_uv - rate_y_tokenonly +
+ vp9_cost_bit(vp9_get_skip_prob(cm, xd), 1);
+ rd_cost->dist = dist_y + dist_uv;
+ vp9_zero(ctx->tx_rd_diff);
+ } else {
+ int i;
+ rd_cost->rate =
+ rate_y + rate_uv + vp9_cost_bit(vp9_get_skip_prob(cm, xd), 0);
+ rd_cost->dist = dist_y + dist_uv;
+ if (cpi->sf.tx_size_search_method == USE_FULL_RD)
+ for (i = 0; i < TX_MODES; i++) {
+ if (tx_cache[i] < INT64_MAX && tx_cache[cm->tx_mode] < INT64_MAX)
+ ctx->tx_rd_diff[i] = tx_cache[i] - tx_cache[cm->tx_mode];
+ else
+ ctx->tx_rd_diff[i] = 0;
+ }
+ }
+ }
+ ctx->mic = *xd->mi[0].src_mi;
+ rd_cost->rdcost = RDCOST(x->rdmult, x->rddiv, rd_cost->rate, rd_cost->dist);
+ }
+#endif // CONFIG_INTRABC
+
+#if CONFIG_PALETTE
if (xd->mi[0].src_mi->mbmi.palette_enabled[0]) {
vp9_palette_color_insertion(ctx->palette_colors_buf,
&ctx->palette_buf_size,
struct RD_COST;
void vp9_rd_pick_intra_mode_sb(struct VP9_COMP *cpi, struct macroblock *x,
+#if CONFIG_INTRABC
+ int mi_row, int mi_col,
+#endif // CONFIG_INTRABC
struct RD_COST *rd_cost, BLOCK_SIZE bsize,
PICK_MODE_CONTEXT *ctx, int64_t best_rd);