fp_mb_stats
emulate_hardware
tx64x64
+ filterintra
"
CONFIG_LIST="
external_build
// Common for both INTER and INTRA blocks
BLOCK_SIZE sb_type;
PREDICTION_MODE mode;
+#if CONFIG_FILTERINTRA
+ int filterbit, uv_filterbit;
+#endif
TX_SIZE tx_size;
int8_t skip;
int8_t segment_id;
typedef struct MODE_INFO {
struct MODE_INFO *src_mi;
MB_MODE_INFO mbmi;
+#if CONFIG_FILTERINTRA
+ int b_filter_info[4];
+#endif
b_mode_info bmi[4];
} MODE_INFO;
: mi->mbmi.mode;
}
+#if CONFIG_FILTERINTRA
+static INLINE int is_filter_allowed(PREDICTION_MODE mode) {
+ return 1;
+}
+
+static INLINE int is_filter_enabled(TX_SIZE txsize) {
+ return (txsize <= TX_32X32);
+}
+#endif
+
static INLINE int is_inter_block(const MB_MODE_INFO *mbmi) {
return mbmi->ref_frame[0] > INTRA_FRAME;
}
{ 101, 21, 107, 181, 192, 103, 19, 67, 125 } // y = tm
};
+#if CONFIG_FILTERINTRA
+static const vp9_prob default_filterintra_prob[TX_SIZES][INTRA_MODES] = {
+ // DC V H D45 D135 D117 D153 D207 D63 TM
+ {153, 171, 147, 150, 129, 101, 100, 153, 132, 111},
+ {171, 173, 185, 131, 70, 53, 70, 148, 127, 114},
+ {175, 203, 213, 86, 45, 71, 41, 150, 125, 154},
+ {235, 230, 154, 202, 154, 205, 37, 128, 0, 202}
+};
+#endif
+
const vp9_prob vp9_kf_partition_probs[PARTITION_CONTEXTS]
[PARTITION_TYPES - 1] = {
// 8x8 -> 4x4
fc->tx_probs = default_tx_probs;
vp9_copy(fc->skip_probs, default_skip_probs);
vp9_copy(fc->inter_mode_probs, default_inter_mode_probs);
+#if CONFIG_FILTERINTRA
+ vp9_copy(fc->filterintra_prob, default_filterintra_prob);
+#endif
}
const vp9_tree_index vp9_switchable_interp_tree
}
}
+#if CONFIG_FILTERINTRA
+ for (i = 0; i < TX_SIZES; ++i)
+ for (j = 0; j < INTRA_MODES; ++j)
+ fc->filterintra_prob[i][j] = adapt_prob(pre_fc->filterintra_prob[i][j],
+ counts->filterintra[i][j]);
+#endif
+
for (i = 0; i < SKIP_CONTEXTS; ++i)
fc->skip_probs[i] = adapt_prob(pre_fc->skip_probs[i], counts->skip[i]);
}
struct tx_probs tx_probs;
vp9_prob skip_probs[SKIP_CONTEXTS];
nmv_context nmvc;
+#if CONFIG_FILTERINTRA
+ vp9_prob filterintra_prob[TX_SIZES][INTRA_MODES];
+#endif
} FRAME_CONTEXT;
typedef struct {
struct tx_counts tx;
unsigned int skip[SKIP_CONTEXTS][2];
nmv_context_counts mv;
+#if CONFIG_FILTERINTRA
+ unsigned int filterintra[TX_SIZES][INTRA_MODES][2];
+#endif
} FRAME_COUNTS;
extern const vp9_prob vp9_kf_uv_mode_prob[INTRA_MODES][INTRA_MODES - 1];
pred[mode][tx_size](dst, dst_stride, const_above_row, left_col);
}
}
+#if CONFIG_FILTERINTRA
+static void filter_intra_predictors_4tap(uint8_t *ypred_ptr, int y_stride,
+ int bs,
+ const uint8_t *yabove_row,
+ const uint8_t *yleft_col,
+ int mode) {
+ static const int prec_bits = 10;
+ static const int round_val = 511;
+
+ int k, r, c;
+#if CONFIG_TX64X64
+ int pred[65][129];
+#else
+ int pred[33][65];
+#endif
+ int mean, ipred;
+
+ int taps4_4[10][4] = {
+ {735, 881, -537, -54},
+ {1005, 519, -488, -11},
+ {383, 990, -343, -6},
+ {442, 805, -542, 319},
+ {658, 616, -133, -116},
+ {875, 442, -141, -151},
+ {386, 741, -23, -80},
+ {390, 1027, -446, 51},
+ {679, 606, -523, 262},
+ {903, 922, -778, -23}
+ };
+ int taps4_8[10][4] = {
+ {648, 803, -444, 16},
+ {972, 620, -576, 7},
+ {561, 967, -499, -5},
+ {585, 762, -468, 144},
+ {596, 619, -182, -9},
+ {895, 459, -176, -153},
+ {557, 722, -126, -129},
+ {601, 839, -523, 105},
+ {562, 709, -499, 251},
+ {803, 872, -695, 43}
+ };
+ int taps4_16[10][4] = {
+ {423, 728, -347, 111},
+ {963, 685, -665, 23},
+ {281, 1024, -480, 216},
+ {640, 596, -437, 78},
+ {429, 669, -259, 99},
+ {740, 646, -415, 23},
+ {568, 771, -346, 40},
+ {404, 833, -486, 209},
+ {398, 712, -423, 307},
+ {939, 935, -887, 17}
+ };
+ int taps4_32[10][4] = {
+ {477, 737, -393, 150},
+ {881, 630, -546, 67},
+ {506, 984, -443, -20},
+ {114, 459, -270, 528},
+ {433, 528, 14, 3},
+ {837, 470, -301, -30},
+ {181, 777, 89, -107},
+ {-29, 716, -232, 259},
+ {589, 646, -495, 255},
+ {740, 884, -728, 77}
+ };
+
+ const int c1 = (bs >= 32) ? taps4_32[mode][0] : ((bs >= 16) ?
+ taps4_16[mode][0] : ((bs >= 8) ? taps4_8[mode][0] : taps4_4[mode][0]));
+ const int c2 = (bs >= 32) ? taps4_32[mode][1] : ((bs >= 16) ?
+ taps4_16[mode][1] : ((bs >= 8) ? taps4_8[mode][1] : taps4_4[mode][1]));
+ const int c3 = (bs >= 32) ? taps4_32[mode][2] : ((bs >= 16) ?
+ taps4_16[mode][2] : ((bs >= 8) ? taps4_8[mode][2] : taps4_4[mode][2]));
+ const int c4 = (bs >= 32) ? taps4_32[mode][3] : ((bs >= 16) ?
+ taps4_16[mode][3] : ((bs >= 8) ? taps4_8[mode][3] : taps4_4[mode][3]));
+
+ k = 0;
+ mean = 0;
+ while (k < bs) {
+ mean = mean + (int)yleft_col[k];
+ mean = mean + (int)yabove_row[k];
+ k++;
+ }
+ mean = (mean + bs) / (2 * bs);
+
+ for (r = 0; r < bs; r++)
+ pred[r + 1][0] = (int)yleft_col[r] - mean;
+
+ for (c = 0; c < 2 * bs + 1; c++)
+ pred[0][c] = (int)yabove_row[c - 1] - mean;
+
+ for (r = 1; r < bs + 1; r++)
+ for (c = 1; c < 2 * bs + 1 - r; c++) {
+ ipred = c1 * pred[r - 1][c] + c2 * pred[r][c - 1]
+ + c3 * pred[r - 1][c - 1] + c4 * pred[r - 1][c + 1];
+ pred[r][c] = ipred < 0 ? -((-ipred + round_val) >> prec_bits) :
+ ((ipred + round_val) >> prec_bits);
+ }
+
+ for (r = 0; r < bs; r++) {
+ for (c = 0; c < bs; c++) {
+ ipred = pred[r + 1][c + 1] + mean;
+ ypred_ptr[c] = clip_pixel(ipred);
+ }
+ ypred_ptr += y_stride;
+ }
+}
+
+static void build_filter_intra_predictors(const MACROBLOCKD *xd,
+ const uint8_t *ref, int ref_stride,
+ uint8_t *dst, int dst_stride,
+ PREDICTION_MODE mode, TX_SIZE tx_size,
+ int up_available, int left_available,
+ int right_available, int x, int y,
+ int plane) {
+ int i;
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, left_col, 64);
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, above_data, 128 + 16);
+ uint8_t *above_row = above_data + 16;
+ const uint8_t *const_above_row = above_row;
+ const int bs = 4 << tx_size;
+ int frame_width, frame_height;
+ int x0, y0;
+ const struct macroblockd_plane *const pd = &xd->plane[plane];
+
+ // Get current frame pointer, width and height.
+ if (plane == 0) {
+ frame_width = xd->cur_buf->y_width;
+ frame_height = xd->cur_buf->y_height;
+ } else {
+ frame_width = xd->cur_buf->uv_width;
+ frame_height = xd->cur_buf->uv_height;
+ }
+
+ // Get block position in current frame.
+ x0 = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x)) + x;
+ y0 = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y)) + y;
+
+ vpx_memset(left_col, 129, 64);
+
+ // left
+ if (left_available) {
+ if (xd->mb_to_bottom_edge < 0) {
+ /* slower path if the block needs border extension */
+ if (y0 + bs <= frame_height) {
+ for (i = 0; i < bs; ++i)
+ left_col[i] = ref[i * ref_stride - 1];
+ } else {
+ const int extend_bottom = frame_height - y0;
+ for (i = 0; i < extend_bottom; ++i)
+ left_col[i] = ref[i * ref_stride - 1];
+ for (; i < bs; ++i)
+ left_col[i] = ref[(extend_bottom - 1) * ref_stride - 1];
+ }
+ } else {
+ /* faster path if the block does not need extension */
+ for (i = 0; i < bs; ++i)
+ left_col[i] = ref[i * ref_stride - 1];
+ }
+ }
+
+ // TODO(hkuang) do not extend 2*bs pixels for all modes.
+ // above
+ if (up_available) {
+ const uint8_t *above_ref = ref - ref_stride;
+ if (xd->mb_to_right_edge < 0) {
+ /* slower path if the block needs border extension */
+ if (x0 + 2 * bs <= frame_width) {
+ if (right_available && bs == 4) {
+ vpx_memcpy(above_row, above_ref, 2 * bs);
+ } else {
+ vpx_memcpy(above_row, above_ref, bs);
+ vpx_memset(above_row + bs, above_row[bs - 1], bs);
+ }
+ } else if (x0 + bs <= frame_width) {
+ const int r = frame_width - x0;
+ if (right_available && bs == 4) {
+ vpx_memcpy(above_row, above_ref, r);
+ vpx_memset(above_row + r, above_row[r - 1],
+ x0 + 2 * bs - frame_width);
+ } else {
+ vpx_memcpy(above_row, above_ref, bs);
+ vpx_memset(above_row + bs, above_row[bs - 1], bs);
+ }
+ } else if (x0 <= frame_width) {
+ const int r = frame_width - x0;
+ if (right_available && bs == 4) {
+ vpx_memcpy(above_row, above_ref, r);
+ vpx_memset(above_row + r, above_row[r - 1],
+ x0 + 2 * bs - frame_width);
+ } else {
+ vpx_memcpy(above_row, above_ref, r);
+ vpx_memset(above_row + r, above_row[r - 1],
+ x0 + 2 * bs - frame_width);
+ }
+ }
+ above_row[-1] = left_available ? above_ref[-1] : 129;
+ } else {
+ /* faster path if the block does not need extension */
+ if (bs == 4 && right_available && left_available) {
+ const_above_row = above_ref;
+ } else {
+ vpx_memcpy(above_row, above_ref, bs);
+ if (bs == 4 && right_available)
+ vpx_memcpy(above_row + bs, above_ref + bs, bs);
+ else
+ vpx_memset(above_row + bs, above_row[bs - 1], bs);
+ above_row[-1] = left_available ? above_ref[-1] : 129;
+ }
+ }
+ } else {
+ vpx_memset(above_row, 127, bs * 2);
+ above_row[-1] = 127;
+ }
+
+ // predict
+ filter_intra_predictors_4tap(dst, dst_stride, bs, const_above_row, left_col,
+ mode);
+}
+#endif
+
void vp9_predict_intra_block(const MACROBLOCKD *xd, int block_idx, int bwl_in,
TX_SIZE tx_size, PREDICTION_MODE mode,
- const uint8_t *ref, int ref_stride,
+#if CONFIG_FILTERINTRA
+ int filterbit,
+#endif
+ const uint8_t *ref, int ref_stride,
uint8_t *dst, int dst_stride,
int aoff, int loff, int plane) {
const int bwl = bwl_in - tx_size;
const int have_right = ((block_idx & wmask) != wmask);
const int x = aoff * 4;
const int y = loff * 4;
+#if CONFIG_FILTERINTRA
+ const int filterflag = is_filter_allowed(mode) && is_filter_enabled(tx_size)
+ && filterbit;
+#endif
assert(bwl >= 0);
#if CONFIG_VP9_HIGHBITDEPTH
x, y, plane, xd->bd);
return;
}
+#endif
+#if CONFIG_FILTERINTRA
+ if (!filterflag) {
#endif
build_intra_predictors(xd, ref, ref_stride, dst, dst_stride, mode, tx_size,
have_top, have_left, have_right, x, y, plane);
+#if CONFIG_FILTERINTRA
+ } else {
+ build_filter_intra_predictors(xd, ref, ref_stride, dst, dst_stride, mode,
+ tx_size, have_top, have_left, have_right, x, y, plane);
+ }
+#endif
}
void vp9_predict_intra_block(const MACROBLOCKD *xd, int block_idx, int bwl_in,
TX_SIZE tx_size, PREDICTION_MODE mode,
+#if CONFIG_FILTERINTRA
+ int filterbit,
+#endif
const uint8_t *ref, int ref_stride,
uint8_t *dst, int dst_stride,
int aoff, int loff, int plane);
: mi->mbmi.uv_mode;
int x, y;
uint8_t *dst;
+#if CONFIG_FILTERINTRA
+ int fbit;
+ if (plane == 0)
+ if (mi->mbmi.sb_type < BLOCK_8X8)
+ fbit = mi->b_filter_info[block];
+ else
+ fbit = is_filter_enabled(tx_size) ? mi->mbmi.filterbit : 0;
+ else
+ fbit = is_filter_enabled(tx_size) ? mi->mbmi.uv_filterbit : 0;
+#endif
txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &x, &y);
dst = &pd->dst.buf[4 * y * pd->dst.stride + 4 * x];
vp9_predict_intra_block(xd, block >> (tx_size << 1),
b_width_log2_lookup[plane_bsize], tx_size, mode,
+#if CONFIG_FILTERINTRA
+ fbit,
+#endif
dst, pd->dst.stride, dst, pd->dst.stride,
x, y, plane);
if (!mi->mbmi.skip) {
switch (bsize) {
case BLOCK_4X4:
+#if !CONFIG_FILTERINTRA
for (i = 0; i < 4; ++i)
+#else
+ for (i = 0; i < 4; ++i) {
+#endif
mi->bmi[i].as_mode =
read_intra_mode(r, get_y_mode_probs(mi, above_mi, left_mi, i));
+#if CONFIG_FILTERINTRA
+ if (is_filter_allowed(mi->bmi[i].as_mode))
+ mi->b_filter_info[i] =
+ vp9_read(r, cm->fc.filterintra_prob[0][mi->bmi[i].as_mode]);
+ else
+ mi->b_filter_info[i] = 0;
+ }
+ mbmi->filterbit = mi->b_filter_info[3];
+#endif
mbmi->mode = mi->bmi[3].as_mode;
break;
case BLOCK_4X8:
mi->bmi[0].as_mode = mi->bmi[2].as_mode =
read_intra_mode(r, get_y_mode_probs(mi, above_mi, left_mi, 0));
+#if CONFIG_FILTERINTRA
+ if (is_filter_allowed(mi->bmi[0].as_mode))
+ mi->b_filter_info[0] = mi->b_filter_info[2] =
+ vp9_read(r, cm->fc.filterintra_prob[0][mi->bmi[0].as_mode]);
+ else
+ mi->b_filter_info[0] = mi->b_filter_info[2] = 0;
+#endif
mi->bmi[1].as_mode = mi->bmi[3].as_mode = mbmi->mode =
read_intra_mode(r, get_y_mode_probs(mi, above_mi, left_mi, 1));
+#if CONFIG_FILTERINTRA
+ if (is_filter_allowed(mi->bmi[1].as_mode))
+ mi->b_filter_info[1] = mi->b_filter_info[3] = mbmi->filterbit =
+ vp9_read(r, cm->fc.filterintra_prob[0][mi->bmi[1].as_mode]);
+ else
+ mi->b_filter_info[1] = mi->b_filter_info[3] = mbmi->filterbit = 0;
+#endif
break;
case BLOCK_8X4:
mi->bmi[0].as_mode = mi->bmi[1].as_mode =
read_intra_mode(r, get_y_mode_probs(mi, above_mi, left_mi, 0));
+#if CONFIG_FILTERINTRA
+ if (is_filter_allowed(mi->bmi[0].as_mode))
+ mi->b_filter_info[0] = mi->b_filter_info[1] =
+ vp9_read(r, cm->fc.filterintra_prob[0][mi->bmi[0].as_mode]);
+ else
+ mi->b_filter_info[0] = mi->b_filter_info[1] = 0;
+#endif
mi->bmi[2].as_mode = mi->bmi[3].as_mode = mbmi->mode =
read_intra_mode(r, get_y_mode_probs(mi, above_mi, left_mi, 2));
+#if CONFIG_FILTERINTRA
+ if (is_filter_allowed(mi->bmi[2].as_mode))
+ mi->b_filter_info[2] = mi->b_filter_info[3] = mbmi->filterbit =
+ vp9_read(r, cm->fc.filterintra_prob[0][mi->bmi[2].as_mode]);
+ else
+ mi->b_filter_info[2] = mi->b_filter_info[3] = mbmi->filterbit = 0;
+#endif
break;
default:
mbmi->mode = read_intra_mode(r,
get_y_mode_probs(mi, above_mi, left_mi, 0));
+#if CONFIG_FILTERINTRA
+ if (is_filter_enabled(mbmi->tx_size) && is_filter_allowed(mbmi->mode))
+ mbmi->filterbit = vp9_read(r,
+ cm->fc.filterintra_prob[mbmi->tx_size][mbmi->mode]);
+ else
+ mbmi->filterbit = 0;
+#endif
}
mbmi->uv_mode = read_intra_mode(r, vp9_kf_uv_mode_prob[mbmi->mode]);
+#if CONFIG_FILTERINTRA
+ if (is_filter_enabled(get_uv_tx_size(mbmi, &xd->plane[1])) &&
+ is_filter_allowed(mbmi->uv_mode))
+ mbmi->uv_filterbit = vp9_read(r,
+ cm->fc.filterintra_prob[get_uv_tx_size(mbmi, &xd->plane[1])][mbmi->uv_mode]);
+ else
+ mbmi->uv_filterbit = 0;
+#endif
}
static int read_mv_component(vp9_reader *r,
}
static void read_intra_block_mode_info(VP9_COMMON *const cm, MODE_INFO *mi,
+#if CONFIG_FILTERINTRA
+ MACROBLOCKD *const xd,
+#endif
vp9_reader *r) {
MB_MODE_INFO *const mbmi = &mi->mbmi;
const BLOCK_SIZE bsize = mi->mbmi.sb_type;
switch (bsize) {
case BLOCK_4X4:
+#if !CONFIG_FILTERINTRA
for (i = 0; i < 4; ++i)
+#else
+ for (i = 0; i < 4; ++i) {
+#endif
mi->bmi[i].as_mode = read_intra_mode_y(cm, r, 0);
+#if CONFIG_FILTERINTRA
+ if (is_filter_allowed(mi->bmi[i].as_mode)) {
+ mi->b_filter_info[i] =
+ vp9_read(r, cm->fc.filterintra_prob[0][mi->bmi[i].as_mode]);
+ cm->counts.filterintra[0][mi->bmi[i].as_mode]
+ [mi->b_filter_info[i]]++;
+ } else {
+ mi->b_filter_info[i] = 0;
+ }
+ }
+ mbmi->filterbit = mi->b_filter_info[3];
+#endif
mbmi->mode = mi->bmi[3].as_mode;
break;
case BLOCK_4X8:
mi->bmi[0].as_mode = mi->bmi[2].as_mode = read_intra_mode_y(cm, r, 0);
+#if CONFIG_FILTERINTRA
+ if (is_filter_allowed(mi->bmi[0].as_mode)) {
+ mi->b_filter_info[0] = mi->b_filter_info[2] =
+ vp9_read(r, cm->fc.filterintra_prob[0][mi->bmi[0].as_mode]);
+ cm->counts.filterintra[0][mi->bmi[0].as_mode][mi->b_filter_info[0]]++;
+ } else {
+ mi->b_filter_info[0] = mi->b_filter_info[2] = 0;
+ }
+#endif
mi->bmi[1].as_mode = mi->bmi[3].as_mode = mbmi->mode =
read_intra_mode_y(cm, r, 0);
+#if CONFIG_FILTERINTRA
+ if (is_filter_allowed(mi->bmi[1].as_mode)) {
+ mi->b_filter_info[1] = mi->b_filter_info[3] = mbmi->filterbit =
+ vp9_read(r, cm->fc.filterintra_prob[0][mi->bmi[1].as_mode]);
+ cm->counts.filterintra[0][mi->bmi[1].as_mode][mi->b_filter_info[1]]++;
+ } else {
+ mi->b_filter_info[1] = mi->b_filter_info[3] = mbmi->filterbit = 0;
+ }
+#endif
break;
case BLOCK_8X4:
mi->bmi[0].as_mode = mi->bmi[1].as_mode = read_intra_mode_y(cm, r, 0);
+#if CONFIG_FILTERINTRA
+ if (is_filter_allowed(mi->bmi[0].as_mode)) {
+ mi->b_filter_info[0] = mi->b_filter_info[1] =
+ vp9_read(r, cm->fc.filterintra_prob[0][mi->bmi[0].as_mode]);
+ cm->counts.filterintra[0][mi->bmi[0].as_mode][mi->b_filter_info[0]]++;
+ } else {
+ mi->b_filter_info[0] = mi->b_filter_info[1] = 0;
+ }
+#endif
mi->bmi[2].as_mode = mi->bmi[3].as_mode = mbmi->mode =
read_intra_mode_y(cm, r, 0);
+#if CONFIG_FILTERINTRA
+ if (is_filter_allowed(mi->bmi[2].as_mode)) {
+ mi->b_filter_info[2] = mi->b_filter_info[3] = mbmi->filterbit =
+ vp9_read(r, cm->fc.filterintra_prob[0][mi->bmi[2].as_mode]);
+ cm->counts.filterintra[0][mi->bmi[2].as_mode][mi->b_filter_info[2]]++;
+ } else {
+ mi->b_filter_info[2] = mi->b_filter_info[3] = mbmi->filterbit = 0;
+ }
+#endif
break;
default:
mbmi->mode = read_intra_mode_y(cm, r, size_group_lookup[bsize]);
+#if CONFIG_FILTERINTRA
+ if (is_filter_allowed(mbmi->mode) && is_filter_enabled(mbmi->tx_size)) {
+ mbmi->filterbit = vp9_read(r,
+ cm->fc.filterintra_prob[mbmi->tx_size][mbmi->mode]);
+ cm->counts.filterintra[mbmi->tx_size][mbmi->mode][mbmi->filterbit]++;
+ } else {
+ mbmi->filterbit = 0;
+ }
+#endif
}
mbmi->uv_mode = read_intra_mode_uv(cm, r, mbmi->mode);
+#if CONFIG_FILTERINTRA
+ if (is_filter_allowed(mbmi->uv_mode) &&
+ is_filter_enabled(get_uv_tx_size(mbmi, &xd->plane[1]))) {
+ mbmi->uv_filterbit = vp9_read(r,
+ cm->fc.filterintra_prob[get_uv_tx_size(mbmi, &xd->plane[1])][mbmi->uv_mode]);
+ cm->counts.filterintra[get_uv_tx_size(mbmi, &xd->plane[1])]
+ [mbmi->uv_mode][mbmi->uv_filterbit]++;
+ } else {
+ mbmi->uv_filterbit = 0;
+ }
+#endif
}
static INLINE int is_mv_valid(const MV *mv) {
if (inter_block)
read_inter_block_mode_info(cm, xd, tile, mi, mi_row, mi_col, r);
else
- read_intra_block_mode_info(cm, mi, r);
+ read_intra_block_mode_info(cm, mi,
+#if CONFIG_FILTERINTRA
+ xd,
+#endif
+ r);
}
void vp9_read_mode_info(VP9_COMMON *cm, MACROBLOCKD *xd,
if (!is_inter) {
if (bsize >= BLOCK_8X8) {
write_intra_mode(w, mode, cm->fc.y_mode_prob[size_group_lookup[bsize]]);
+#if CONFIG_FILTERINTRA
+ if (is_filter_allowed(mode) && is_filter_enabled(mbmi->tx_size)) {
+ vp9_write(w, mbmi->filterbit,
+ cm->fc.filterintra_prob[mbmi->tx_size][mode]);
+ }
+#endif
} else {
int idx, idy;
const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
for (idx = 0; idx < 2; idx += num_4x4_w) {
const PREDICTION_MODE b_mode = mi->bmi[idy * 2 + idx].as_mode;
write_intra_mode(w, b_mode, cm->fc.y_mode_prob[0]);
+#if CONFIG_FILTERINTRA
+ if (is_filter_allowed(b_mode)) {
+ vp9_write(w, mi->b_filter_info[idy * 2 + idx],
+ cm->fc.filterintra_prob[0][b_mode]);
+ }
+#endif
}
}
}
write_intra_mode(w, mbmi->uv_mode, cm->fc.uv_mode_prob[mode]);
+#if CONFIG_FILTERINTRA
+ if (is_filter_allowed(mbmi->uv_mode) &&
+ is_filter_enabled(get_uv_tx_size(mbmi, &xd->plane[1]))) {
+ vp9_write(w, mbmi->uv_filterbit,
+ cm->fc.filterintra_prob[get_uv_tx_size(mbmi, &xd->plane[1])][mbmi->uv_mode]);
+ }
+#endif
} else {
const int mode_ctx = mbmi->mode_context[mbmi->ref_frame[0]];
const vp9_prob *const inter_probs = cm->fc.inter_mode_probs[mode_ctx];
if (bsize >= BLOCK_8X8) {
write_intra_mode(w, mbmi->mode, get_y_mode_probs(mi, above_mi, left_mi, 0));
+#if CONFIG_FILTERINTRA
+ if (is_filter_allowed(mbmi->mode) && is_filter_enabled(mbmi->tx_size))
+ vp9_write(w, mbmi->filterbit,
+ cm->fc.filterintra_prob[mbmi->tx_size][mbmi->mode]);
+#endif
} else {
const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
const int block = idy * 2 + idx;
write_intra_mode(w, mi->bmi[block].as_mode,
get_y_mode_probs(mi, above_mi, left_mi, block));
+#if CONFIG_FILTERINTRA
+ if (is_filter_allowed(mi->bmi[block].as_mode))
+ vp9_write(w, mi->b_filter_info[block],
+ cm->fc.filterintra_prob[0][mi->bmi[block].as_mode]);
+#endif
}
}
}
write_intra_mode(w, mbmi->uv_mode, vp9_kf_uv_mode_prob[mbmi->mode]);
+#if CONFIG_FILTERINTRA
+ if (is_filter_allowed(mbmi->uv_mode) &&
+ is_filter_enabled(get_uv_tx_size(mbmi, &xd->plane[1])))
+ vp9_write(w, mbmi->uv_filterbit,
+ cm->fc.filterintra_prob[get_uv_tx_size(mbmi, &xd->plane[1])][mbmi->uv_mode]);
+#endif
}
static void write_modes_b(VP9_COMP *cpi, const TileInfo *const tile,
}
}
-static void sum_intra_stats(FRAME_COUNTS *counts, const MODE_INFO *mi) {
+static void sum_intra_stats(FRAME_COUNTS *counts,
+#if CONFIG_FILTERINTRA
+ const MACROBLOCKD* xd,
+#endif
+ const MODE_INFO *mi) {
const PREDICTION_MODE y_mode = mi->mbmi.mode;
const PREDICTION_MODE uv_mode = mi->mbmi.uv_mode;
const BLOCK_SIZE bsize = mi->mbmi.sb_type;
+#if CONFIG_FILTERINTRA
+ const int uv_fbit = mi->mbmi.uv_filterbit;
+ int fbit = mi->mbmi.filterbit;
+#endif
if (bsize < BLOCK_8X8) {
int idx, idy;
const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
for (idy = 0; idy < 2; idy += num_4x4_h)
for (idx = 0; idx < 2; idx += num_4x4_w)
+#if CONFIG_FILTERINTRA
+ {
+#endif
++counts->y_mode[0][mi->bmi[idy * 2 + idx].as_mode];
+#if CONFIG_FILTERINTRA
+ if (is_filter_allowed(mi->bmi[idy * 2 + idx].as_mode)) {
+ fbit = mi->b_filter_info[idy * 2 + idx];
+ ++counts->filterintra[0][mi->bmi[idy * 2 + idx].as_mode][fbit];
+ }
+ }
+#endif
} else {
++counts->y_mode[size_group_lookup[bsize]][y_mode];
+#if CONFIG_FILTERINTRA
+ if (is_filter_allowed(y_mode) && is_filter_enabled(mi->mbmi.tx_size))
+ ++counts->filterintra[mi->mbmi.tx_size][y_mode][fbit];
+#endif
}
++counts->uv_mode[y_mode][uv_mode];
+#if CONFIG_FILTERINTRA
+ if (is_filter_allowed(uv_mode) &&
+ is_filter_enabled(get_uv_tx_size(&(mi->mbmi), &xd->plane[1])))
+ ++counts->filterintra[get_uv_tx_size(&(mi->mbmi), &xd->plane[1])][uv_mode][uv_fbit];
+#endif
}
static int get_zbin_mode_boost(const MB_MODE_INFO *mbmi, int enabled) {
for (plane = 0; plane < MAX_MB_PLANE; ++plane)
vp9_encode_intra_block_plane(x, MAX(bsize, BLOCK_8X8), plane);
if (output_enabled)
- sum_intra_stats(&cm->counts, mi);
+ sum_intra_stats(&cm->counts,
+#if CONFIG_FILTERINTRA
+ xd,
+#endif
+ mi);
vp9_tokenize_sb(cpi, t, !output_enabled, MAX(bsize, BLOCK_8X8));
} else {
int ref;
const scan_order *scan_order;
TX_TYPE tx_type;
PREDICTION_MODE mode;
+#if CONFIG_FILTERINTRA
+ int fbit = 0;
+#endif
const int bwl = b_width_log2_lookup[plane_bsize];
const int diff_stride = 4 * (1 << bwl);
uint8_t *src, *dst;
scan_order = &vp9_default_scan_orders[TX_64X64];
mode = plane == 0 ? mbmi->mode : mbmi->uv_mode;
vp9_predict_intra_block(xd, block >> 8, bwl, TX_64X64, mode,
+#if CONFIG_FILTERINTRA
+ fbit,
+#endif
x->skip_encode ? src : dst,
x->skip_encode ? src_stride : dst_stride,
dst, dst_stride, i, j, plane);
scan_order = &vp9_default_scan_orders[TX_32X32];
mode = plane == 0 ? mbmi->mode : mbmi->uv_mode;
vp9_predict_intra_block(xd, block >> 6, bwl, TX_32X32, mode,
+#if CONFIG_FILTERINTRA
+ fbit,
+#endif
x->skip_encode ? src : dst,
x->skip_encode ? src_stride : dst_stride,
dst, dst_stride, i, j, plane);
scan_order = &vp9_scan_orders[TX_16X16][tx_type];
mode = plane == 0 ? mbmi->mode : mbmi->uv_mode;
vp9_predict_intra_block(xd, block >> 4, bwl, TX_16X16, mode,
+#if CONFIG_FILTERINTRA
+ fbit,
+#endif
x->skip_encode ? src : dst,
x->skip_encode ? src_stride : dst_stride,
dst, dst_stride, i, j, plane);
scan_order = &vp9_scan_orders[TX_8X8][tx_type];
mode = plane == 0 ? mbmi->mode : mbmi->uv_mode;
vp9_predict_intra_block(xd, block >> 2, bwl, TX_8X8, mode,
+#if CONFIG_FILTERINTRA
+ fbit,
+#endif
x->skip_encode ? src : dst,
x->skip_encode ? src_stride : dst_stride,
dst, dst_stride, i, j, plane);
scan_order = &vp9_scan_orders[TX_4X4][tx_type];
mode = plane == 0 ? get_y_mode(xd->mi[0].src_mi, block) : mbmi->uv_mode;
vp9_predict_intra_block(xd, block, bwl, TX_4X4, mode,
+#if CONFIG_FILTERINTRA
+ fbit,
+#endif
x->skip_encode ? src : dst,
x->skip_encode ? src_stride : dst_stride,
dst, dst_stride, i, j, plane);
return;
}
#endif // CONFIG_VP9_HIGHBITDEPTH
-
+#if CONFIG_FILTERINTRA
+ if (mbmi->sb_type < BLOCK_8X8 && plane == 0)
+ fbit = xd->mi[0].b_filter_info[block];
+ else
+ fbit = plane == 0 ? mbmi->filterbit : mbmi->uv_filterbit;
+#endif
switch (tx_size) {
#if CONFIG_TX64X64
case TX_64X64:
scan_order = &vp9_default_scan_orders[TX_64X64];
mode = plane == 0 ? mbmi->mode : mbmi->uv_mode;
vp9_predict_intra_block(xd, block >> 8, bwl, TX_64X64, mode,
+#if CONFIG_FILTERINTRA
+ fbit,
+#endif
x->skip_encode ? src : dst,
x->skip_encode ? src_stride : dst_stride,
dst, dst_stride, i, j, plane);
scan_order = &vp9_default_scan_orders[TX_32X32];
mode = plane == 0 ? mbmi->mode : mbmi->uv_mode;
vp9_predict_intra_block(xd, block >> 6, bwl, TX_32X32, mode,
+#if CONFIG_FILTERINTRA
+ fbit,
+#endif
x->skip_encode ? src : dst,
x->skip_encode ? src_stride : dst_stride,
dst, dst_stride, i, j, plane);
scan_order = &vp9_scan_orders[TX_16X16][tx_type];
mode = plane == 0 ? mbmi->mode : mbmi->uv_mode;
vp9_predict_intra_block(xd, block >> 4, bwl, TX_16X16, mode,
+#if CONFIG_FILTERINTRA
+ fbit,
+#endif
x->skip_encode ? src : dst,
x->skip_encode ? src_stride : dst_stride,
dst, dst_stride, i, j, plane);
scan_order = &vp9_scan_orders[TX_8X8][tx_type];
mode = plane == 0 ? mbmi->mode : mbmi->uv_mode;
vp9_predict_intra_block(xd, block >> 2, bwl, TX_8X8, mode,
+#if CONFIG_FILTERINTRA
+ fbit,
+#endif
x->skip_encode ? src : dst,
x->skip_encode ? src_stride : dst_stride,
dst, dst_stride, i, j, plane);
scan_order = &vp9_scan_orders[TX_4X4][tx_type];
mode = plane == 0 ? get_y_mode(xd->mi[0].src_mi, block) : mbmi->uv_mode;
vp9_predict_intra_block(xd, block, bwl, TX_4X4, mode,
+#if CONFIG_FILTERINTRA
+ fbit,
+#endif
x->skip_encode ? src : dst,
x->skip_encode ? src_stride : dst_stride,
dst, dst_stride, i, j, plane);
xd->mi[0].src_mi->mbmi.mode = mode;
vp9_predict_intra_block(xd, 0, 2, TX_16X16, mode,
+#if CONFIG_FILTERINTRA
+ 0,
+#endif
x->plane[0].src.buf, x->plane[0].src.stride,
xd->plane[0].dst.buf, xd->plane[0].dst.stride,
0, 0, 0);
vp9_predict_intra_block(xd, block >> (2 * tx_size),
b_width_log2_lookup[plane_bsize],
tx_size, args->mode,
+#if CONFIG_FILTERINTRA
+ 0,
+#endif
p->src.buf, src_stride,
pd->dst.buf, dst_stride,
i, j, 0);
static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, int ib,
PREDICTION_MODE *best_mode,
+#if CONFIG_FILTERINTRA
+ int *best_fbit,
+#endif
const int *bmode_costs,
ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l,
int *bestrate, int *bestratey,
#if CONFIG_VP9_HIGHBITDEPTH
uint16_t best_dst16[8 * 8];
#endif
+#if CONFIG_FILTERINTRA
+ int mode_ext, fbit;
+#endif
assert(ib < 4);
}
#endif // CONFIG_VP9_HIGHBITDEPTH
+#if !CONFIG_FILTERINTRA
for (mode = DC_PRED; mode <= TM_PRED; ++mode) {
int64_t this_rd;
int ratey = 0;
if (conditional_skipintra(mode, *best_mode))
continue;
}
+#else
+ for (mode_ext = 2 * DC_PRED; mode_ext <= 2 * TM_PRED + 1; ++mode_ext) {
+ int64_t this_rd;
+ int ratey = 0;
+ int64_t distortion = 0;
+ int rate;
+
+ fbit = mode_ext & 1;
+ mode = mode_ext >> 1;
+ if (fbit && !is_filter_allowed(mode))
+ continue;
+
+ rate = bmode_costs[mode];
+ if (is_filter_allowed(mode))
+ rate += vp9_cost_bit(cpi->common.fc.filterintra_prob[0][mode], fbit);
+#endif
vpx_memcpy(tempa, ta, sizeof(ta));
vpx_memcpy(templ, tl, sizeof(tl));
p->src_diff);
tran_low_t *const coeff = BLOCK_OFFSET(x->plane[0].coeff, block);
xd->mi[0].src_mi->bmi[block].as_mode = mode;
+#if CONFIG_FILTERINTRA
+ xd->mi[0].b_filter_info[block] = fbit;
+#endif
vp9_predict_intra_block(xd, block, 1,
TX_4X4, mode,
+#if CONFIG_FILTERINTRA
+ fbit,
+#endif
x->skip_encode ? src : dst,
x->skip_encode ? src_stride : dst_stride,
dst, dst_stride, idx, idy, 0);
*bestdistortion = distortion;
best_rd = this_rd;
*best_mode = mode;
+#if CONFIG_FILTERINTRA
+ *best_fbit = fbit;
+#endif
vpx_memcpy(a, tempa, sizeof(tempa));
vpx_memcpy(l, templ, sizeof(templ));
for (idy = 0; idy < num_4x4_blocks_high * 4; ++idy)
for (idy = 0; idy < 2; idy += num_4x4_blocks_high) {
for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) {
PREDICTION_MODE best_mode = DC_PRED;
+#if CONFIG_FILTERINTRA
+ int best_fbit = 0;
+#endif
int r = INT_MAX, ry = INT_MAX;
int64_t d = INT64_MAX, this_rd = INT64_MAX;
i = idy * 2 + idx;
bmode_costs = cpi->y_mode_costs[A][L];
}
- this_rd = rd_pick_intra4x4block(cpi, mb, i, &best_mode, bmode_costs,
+ this_rd = rd_pick_intra4x4block(cpi, mb, i, &best_mode,
+#if CONFIG_FILTERINTRA
+ &best_fbit,
+#endif
+ bmode_costs,
t_above + idx, t_left + idy, &r, &ry, &d,
bsize, best_rd - total_rd);
if (this_rd >= best_rd - total_rd)
mic->bmi[i + j * 2].as_mode = best_mode;
for (j = 1; j < num_4x4_blocks_wide; ++j)
mic->bmi[i + j].as_mode = best_mode;
+#if CONFIG_FILTERINTRA
+ mic->b_filter_info[i] = best_fbit;
+ for (j = 1; j < num_4x4_blocks_high; ++j)
+ mic->b_filter_info[i + j * 2] = best_fbit;
+ for (j = 1; j < num_4x4_blocks_wide; ++j)
+ mic->b_filter_info[i + j] = best_fbit;
+#endif
if (total_rd >= best_rd)
return INT64_MAX;
*rate_y = tot_rate_y;
*distortion = total_distortion;
mic->mbmi.mode = mic->bmi[3].as_mode;
+#if CONFIG_FILTERINTRA
+ mic->mbmi.filterbit = mic->b_filter_info[3];
+#endif
return RDCOST(mb->rdmult, mb->rddiv, cost, total_distortion);
}
TX_SIZE best_tx = TX_4X4;
int i;
int *bmode_costs;
+#if CONFIG_FILTERINTRA
+ int mode_ext, fbit, fbit_selected = 0;
+#endif
const MODE_INFO *above_mi = xd->mi[-xd->mi_stride].src_mi;
const MODE_INFO *left_mi = xd->left_available ? xd->mi[-1].src_mi : NULL;
const PREDICTION_MODE A = vp9_above_block_mode(mic, above_mi, 0);
vpx_memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
/* Y Search for intra prediction mode */
+#if !CONFIG_FILTERINTRA
for (mode = DC_PRED; mode <= TM_PRED; mode++) {
int64_t local_tx_cache[TX_MODES];
mic->mbmi.mode = mode;
+#else
+ for (mode_ext = 2 * DC_PRED; mode_ext <= 2 * TM_PRED + 1; mode_ext++) {
+ int64_t local_tx_cache[TX_MODES];
+ mic->mbmi.mode = mode;
+ fbit = mode_ext & 1;
+ mode = mode_ext >> 1;
+ if (fbit && !is_filter_allowed(mode))
+ continue;
+#endif
+ mic->mbmi.mode = mode;
+#if CONFIG_FILTERINTRA
+ mic->mbmi.filterbit = fbit;
+#endif
super_block_yrd(cpi, x, &this_rate_tokenonly, &this_distortion,
&s, NULL, bsize, local_tx_cache, best_rd);
continue;
this_rate = this_rate_tokenonly + bmode_costs[mode];
+#if CONFIG_FILTERINTRA
+ if (is_filter_allowed(mode) && is_filter_enabled(mic->mbmi.tx_size))
+ this_rate += vp9_cost_bit(cpi->common.fc.filterintra_prob
+ [mic->mbmi.tx_size][mode], fbit);
+#endif
this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion);
if (this_rd < best_rd) {
mode_selected = mode;
+#if CONFIG_FILTERINTRA
+ fbit_selected = fbit;
+#endif
best_rd = this_rd;
best_tx = mic->mbmi.tx_size;
*rate = this_rate;
}
mic->mbmi.mode = mode_selected;
+#if CONFIG_FILTERINTRA
+ if (is_filter_enabled(best_tx))
+ mic->mbmi.filterbit = fbit_selected;
+ else
+ mic->mbmi.filterbit = 0;
+#endif
mic->mbmi.tx_size = best_tx;
return best_rd;
int64_t this_distortion, this_sse;
vpx_memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
+#if CONFIG_FILTERINTRA
+ int mode_ext, fbit = 0, fbit_selected = 0;
+ for (mode_ext = 2 * DC_PRED; mode_ext <= 2 * TM_PRED + 1; mode_ext++) {
+ mode = mode_ext >> 1;
+ fbit = mode_ext & 1;
+
+ if (fbit && !is_filter_allowed(mode))
+ continue;
+ if (fbit &&
+ !is_filter_enabled(get_uv_tx_size(&(x->e_mbd.mi[0].mbmi), &xd->plane[1])))
+ continue;
+
+ x->e_mbd.mi[0].mbmi.uv_filterbit = fbit;
+#else
for (mode = DC_PRED; mode <= TM_PRED; ++mode) {
if (!(cpi->sf.intra_uv_mode_mask[max_tx_size] & (1 << mode)))
continue;
+#endif
xd->mi[0].src_mi->mbmi.uv_mode = mode;
continue;
this_rate = this_rate_tokenonly +
cpi->intra_uv_mode_cost[cpi->common.frame_type][mode];
+#if CONFIG_FILTERINTRA
+ if (is_filter_allowed(mode) &&
+ is_filter_enabled(get_uv_tx_size(&(x->e_mbd.mi[0].mbmi), &xd->plane[1])))
+ this_rate += vp9_cost_bit(cpi->common.fc.filterintra_prob
+ [get_uv_tx_size(&(x->e_mbd.mi[0].mbmi), &xd->plane[1])][mode], fbit);
+#endif
this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion);
if (this_rd < best_rd) {
mode_selected = mode;
+#if CONFIG_FILTERINTRA
+ fbit_selected = fbit;
+#endif
best_rd = this_rd;
*rate = this_rate;
*rate_tokenonly = this_rate_tokenonly;
}
xd->mi[0].src_mi->mbmi.uv_mode = mode_selected;
+#if CONFIG_FILTERINTRA
+ xd->mi[0].mbmi.uv_filterbit = fbit_selected;
+#endif
return best_rd;
}
BLOCK_SIZE bsize, TX_SIZE max_tx_size,
int *rate_uv, int *rate_uv_tokenonly,
int64_t *dist_uv, int *skip_uv,
+#if CONFIG_FILTERINTRA
+ int *fbit_uv,
+#endif
PREDICTION_MODE *mode_uv) {
MACROBLOCK *const x = &cpi->mb;
bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize, max_tx_size);
}
*mode_uv = x->e_mbd.mi[0].src_mi->mbmi.uv_mode;
+#if CONFIG_FILTERINTRA
+ *fbit_uv = x->e_mbd.mi[0].src_mi->mbmi.uv_filterbit;
+#endif
}
static int cost_mv_ref(const VP9_COMP *cpi, PREDICTION_MODE mode,
int64_t dist_uv[TX_SIZES];
int skip_uv[TX_SIZES];
PREDICTION_MODE mode_uv[TX_SIZES];
+#if CONFIG_FILTERINTRA
+ int fbit_uv[TX_SIZES];
+#endif
const int intra_cost_penalty = vp9_get_intra_cost_penalty(
cm->base_qindex, cm->y_dc_delta_q, cm->bit_depth);
int best_skip2 = 0;
if (ref_frame == INTRA_FRAME) {
TX_SIZE uv_tx;
+#if CONFIG_FILTERINTRA
+ mbmi->filterbit = 0;
+#endif
struct macroblockd_plane *const pd = &xd->plane[1];
vpx_memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
super_block_yrd(cpi, x, &rate_y, &distortion_y, &skippable,
NULL, bsize, tx_cache, best_rd);
+#if CONFIG_FILTERINTRA
+ if (is_filter_allowed(this_mode) && is_filter_enabled(mbmi->tx_size)) {
+ int rate_y_tmp, rate0, rate1, skippable_tmp;
+ int64_t distortion_y_tmp, tx_cache_tmp[TX_MODES];
+ int tx_size_tmp = mbmi->tx_size;
+ mbmi->filterbit = 0;
+
+ super_block_yrd(cpi, x, &rate_y_tmp, &distortion_y_tmp,
+ &skippable_tmp, NULL, bsize, tx_cache_tmp,
+ best_rd);
+
+ if (rate_y == INT_MAX && rate_y_tmp == INT_MAX)
+ continue;
+
+ rate0 = vp9_cost_bit(
+ cm->fc.filterintra_prob[tx_size_tmp][mbmi->mode], 0);
+ rate1 = vp9_cost_bit(
+ cm->fc.filterintra_prob[mbmi->tx_size][mbmi->mode], 1);
+
+ if (rate_y_tmp == INT_MAX ||
+ RDCOST(x->rdmult, x->rddiv, rate_y + rate0, distortion_y) <=
+ RDCOST(x->rdmult, x->rddiv, rate_y_tmp + rate1, distortion_y_tmp)) {
+ mbmi->filterbit = 0;
+ mbmi->tx_size = tx_size_tmp;
+ } else {
+ rate_y = rate_y_tmp;
+ distortion_y = distortion_y_tmp;
+ skippable = skippable_tmp;
+ vpx_memcpy(tx_cache, tx_cache_tmp, TX_MODES * sizeof(int64_t));
+ }
+ }
+#endif
+
if (rate_y == INT_MAX)
continue;
if (rate_uv_intra[uv_tx] == INT_MAX) {
choose_intra_uv_mode(cpi, ctx, bsize, uv_tx,
&rate_uv_intra[uv_tx], &rate_uv_tokenonly[uv_tx],
- &dist_uv[uv_tx], &skip_uv[uv_tx], &mode_uv[uv_tx]);
+ &dist_uv[uv_tx], &skip_uv[uv_tx],
+#if CONFIG_FILTERINTRA
+ &fbit_uv[uv_tx],
+#endif
+ &mode_uv[uv_tx]);
+
}
rate_uv = rate_uv_tokenonly[uv_tx];
distortion_uv = dist_uv[uv_tx];
skippable = skippable && skip_uv[uv_tx];
mbmi->uv_mode = mode_uv[uv_tx];
+#if CONFIG_FILTERINTRA
+ mbmi->uv_filterbit = fbit_uv[uv_tx];
+#endif
rate2 = rate_y + cpi->mbmode_cost[mbmi->mode] + rate_uv_intra[uv_tx];
+#if CONFIG_FILTERINTRA
+ if (is_filter_allowed(mbmi->mode) && is_filter_enabled(mbmi->tx_size))
+ rate2 += vp9_cost_bit(
+ cm->fc.filterintra_prob[mbmi->tx_size][mbmi->mode], mbmi->filterbit);
+#endif
if (this_mode != DC_PRED && this_mode != TM_PRED)
rate2 += intra_cost_penalty;
distortion2 = distortion_y + distortion_uv;
int64_t dist_uv;
int skip_uv;
PREDICTION_MODE mode_uv = DC_PRED;
+#if CONFIG_FILTERINTRA
+ int fbit_uv = 0;
+#endif
const int intra_cost_penalty = vp9_get_intra_cost_penalty(
cm->base_qindex, cm->y_dc_delta_q, cm->bit_depth);
int_mv seg_mvs[4][MAX_REF_FRAMES];
&rate_uv_intra,
&rate_uv_tokenonly,
&dist_uv, &skip_uv,
+#if CONFIG_FILTERINTRA
+ &fbit_uv,
+#endif
&mode_uv);
}
rate2 += rate_uv_intra;
distortion2 += dist_uv;
distortion_uv = dist_uv;
mbmi->uv_mode = mode_uv;
+#if CONFIG_FILTERINTRA
+ mbmi->uv_filterbit = fbit_uv;
+#endif
} else {
int rate;
int64_t distortion;