#include "vp10/common/loopfilter.h"
#include "vp10/common/onyxc_int.h"
#include "vp10/common/reconinter.h"
+#include "vpx_dsp/vpx_dsp_common.h"
#include "vpx_mem/vpx_mem.h"
#include "vpx_ports/mem.h"
[mode_lf_lut[mbmi->mode]];
}
-void vp10_loop_filter_init(VP9_COMMON *cm) {
+void vp10_loop_filter_init(VP10_COMMON *cm) {
loop_filter_info_n *lfi = &cm->lf_info;
struct loopfilter *lf = &cm->lf;
int lvl;
memset(lfi->lfthr[lvl].hev_thr, (lvl >> 4), SIMD_WIDTH);
}
-void vp10_loop_filter_frame_init(VP9_COMMON *cm, int default_filt_lvl) {
+void vp10_loop_filter_frame_init(VP10_COMMON *cm, int default_filt_lvl) {
int seg_id;
// n_shift is the multiplier for lf_deltas
// the multiplier is 1 for when filter_lvl is between 0 and 31;
lfi0->hev_thr, lfi1->mblim, lfi1->lim,
lfi1->hev_thr);
} else if (mask_8x8_0 & 1) {
- vpx_lpf_vertical_8(s, pitch, lfi0->mblim, lfi0->lim, lfi0->hev_thr,
- 1);
+ vpx_lpf_vertical_8(s, pitch, lfi0->mblim, lfi0->lim, lfi0->hev_thr);
} else {
vpx_lpf_vertical_8(s + 8 * pitch, pitch, lfi1->mblim, lfi1->lim,
- lfi1->hev_thr, 1);
+ lfi1->hev_thr);
}
}
lfi0->hev_thr, lfi1->mblim, lfi1->lim,
lfi1->hev_thr);
} else if (mask_4x4_0 & 1) {
- vpx_lpf_vertical_4(s, pitch, lfi0->mblim, lfi0->lim, lfi0->hev_thr,
- 1);
+ vpx_lpf_vertical_4(s, pitch, lfi0->mblim, lfi0->lim, lfi0->hev_thr);
} else {
vpx_lpf_vertical_4(s + 8 * pitch, pitch, lfi1->mblim, lfi1->lim,
- lfi1->hev_thr, 1);
+ lfi1->hev_thr);
}
}
lfi1->hev_thr);
} else if (mask_4x4_int_0 & 1) {
vpx_lpf_vertical_4(s + 4, pitch, lfi0->mblim, lfi0->lim,
- lfi0->hev_thr, 1);
+ lfi0->hev_thr);
} else {
vpx_lpf_vertical_4(s + 8 * pitch + 4, pitch, lfi1->mblim, lfi1->lim,
- lfi1->hev_thr, 1);
+ lfi1->hev_thr);
}
}
}
lfi1->hev_thr, bd);
} else if (mask_8x8_0 & 1) {
vpx_highbd_lpf_vertical_8(s, pitch, lfi0->mblim, lfi0->lim,
- lfi0->hev_thr, 1, bd);
+ lfi0->hev_thr, bd);
} else {
vpx_highbd_lpf_vertical_8(s + 8 * pitch, pitch, lfi1->mblim,
- lfi1->lim, lfi1->hev_thr, 1, bd);
+ lfi1->lim, lfi1->hev_thr, bd);
}
}
lfi1->hev_thr, bd);
} else if (mask_4x4_0 & 1) {
vpx_highbd_lpf_vertical_4(s, pitch, lfi0->mblim, lfi0->lim,
- lfi0->hev_thr, 1, bd);
+ lfi0->hev_thr, bd);
} else {
vpx_highbd_lpf_vertical_4(s + 8 * pitch, pitch, lfi1->mblim,
- lfi1->lim, lfi1->hev_thr, 1, bd);
+ lfi1->lim, lfi1->hev_thr, bd);
}
}
lfi1->hev_thr, bd);
} else if (mask_4x4_int_0 & 1) {
vpx_highbd_lpf_vertical_4(s + 4, pitch, lfi0->mblim, lfi0->lim,
- lfi0->hev_thr, 1, bd);
+ lfi0->hev_thr, bd);
} else {
vpx_highbd_lpf_vertical_4(s + 8 * pitch + 4, pitch, lfi1->mblim,
- lfi1->lim, lfi1->hev_thr, 1, bd);
+ lfi1->lim, lfi1->hev_thr, bd);
}
}
}
} else {
if (mask_4x4_int & 1)
vpx_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
- lfi->hev_thr, 1);
+ lfi->hev_thr);
else if (mask_4x4_int & 2)
vpx_lpf_horizontal_4(s + 8 + 4 * pitch, pitch, lfin->mblim,
- lfin->lim, lfin->hev_thr, 1);
+ lfin->lim, lfin->hev_thr);
}
count = 2;
} else {
- vpx_lpf_horizontal_8(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr, 1);
+ vpx_lpf_horizontal_8(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr);
if (mask_4x4_int & 1)
vpx_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
- lfi->hev_thr, 1);
+ lfi->hev_thr);
}
} else if (mask_4x4 & 1) {
if ((mask_4x4 & 3) == 3) {
} else {
if (mask_4x4_int & 1)
vpx_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
- lfi->hev_thr, 1);
+ lfi->hev_thr);
else if (mask_4x4_int & 2)
vpx_lpf_horizontal_4(s + 8 + 4 * pitch, pitch, lfin->mblim,
- lfin->lim, lfin->hev_thr, 1);
+ lfin->lim, lfin->hev_thr);
}
count = 2;
} else {
- vpx_lpf_horizontal_4(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr, 1);
+ vpx_lpf_horizontal_4(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr);
if (mask_4x4_int & 1)
vpx_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
- lfi->hev_thr, 1);
+ lfi->hev_thr);
}
} else if (mask_4x4_int & 1) {
vpx_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
- lfi->hev_thr, 1);
+ lfi->hev_thr);
}
}
s += 8 * count;
} else {
if (mask_4x4_int & 1) {
vpx_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim,
- lfi->lim, lfi->hev_thr, 1, bd);
+ lfi->lim, lfi->hev_thr, bd);
} else if (mask_4x4_int & 2) {
vpx_highbd_lpf_horizontal_4(s + 8 + 4 * pitch, pitch, lfin->mblim,
- lfin->lim, lfin->hev_thr, 1, bd);
+ lfin->lim, lfin->hev_thr, bd);
}
}
count = 2;
} else {
vpx_highbd_lpf_horizontal_8(s, pitch, lfi->mblim, lfi->lim,
- lfi->hev_thr, 1, bd);
+ lfi->hev_thr, bd);
if (mask_4x4_int & 1) {
vpx_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim,
- lfi->lim, lfi->hev_thr, 1, bd);
+ lfi->lim, lfi->hev_thr, bd);
}
}
} else if (mask_4x4 & 1) {
} else {
if (mask_4x4_int & 1) {
vpx_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim,
- lfi->lim, lfi->hev_thr, 1, bd);
+ lfi->lim, lfi->hev_thr, bd);
} else if (mask_4x4_int & 2) {
vpx_highbd_lpf_horizontal_4(s + 8 + 4 * pitch, pitch, lfin->mblim,
- lfin->lim, lfin->hev_thr, 1, bd);
+ lfin->lim, lfin->hev_thr, bd);
}
}
count = 2;
} else {
vpx_highbd_lpf_horizontal_4(s, pitch, lfi->mblim, lfi->lim,
- lfi->hev_thr, 1, bd);
+ lfi->hev_thr, bd);
if (mask_4x4_int & 1) {
vpx_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim,
- lfi->lim, lfi->hev_thr, 1, bd);
+ lfi->lim, lfi->hev_thr, bd);
}
}
} else if (mask_4x4_int & 1) {
vpx_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
- lfi->hev_thr, 1, bd);
+ lfi->hev_thr, bd);
}
}
s += 8 * count;
uint64_t *const int_4x4_y = &lfm->int_4x4_y;
uint16_t *const left_uv = &lfm->left_uv[tx_size_uv];
uint16_t *const above_uv = &lfm->above_uv[tx_size_uv];
+#if CONFIG_MISC_FIXES
+ uint16_t *const int_4x4_uv = &lfm->left_int_4x4_uv;
+#else
uint16_t *const int_4x4_uv = &lfm->int_4x4_uv;
+#endif
int i;
// If filter level is 0 we don't loop filter.
// If the block has no coefficients and is not intra we skip applying
// the loop filter on block edges.
+#if CONFIG_MISC_FIXES
+ if ((mbmi->skip || mbmi->has_no_coeffs) && is_inter_block(mbmi))
+ return;
+#else
if (mbmi->skip && is_inter_block(mbmi))
return;
+#endif
// Here we are adding a mask for the transform size. The transform
// size mask is set to be correct for a 64x64 prediction block size. We
*above_y |= above_prediction_mask[block_size] << shift_y;
*left_y |= left_prediction_mask[block_size] << shift_y;
+#if CONFIG_MISC_FIXES
+ if ((mbmi->skip || mbmi->has_no_coeffs) && is_inter_block(mbmi))
+ return;
+#else
if (mbmi->skip && is_inter_block(mbmi))
return;
+#endif
*above_y |= (size_mask[block_size] &
above_64x64_txform_mask[tx_size_y]) << shift_y;
// This function sets up the bit masks for the entire 64x64 region represented
// by mi_row, mi_col.
// TODO(JBB): This function only works for yv12.
-void vp10_setup_mask(VP9_COMMON *const cm, const int mi_row, const int mi_col,
+void vp10_setup_mask(VP10_COMMON *const cm, const int mi_row, const int mi_col,
MODE_INFO **mi, const int mode_info_stride,
LOOP_FILTER_MASK *lfm) {
int idx_32, idx_16, idx_8;
lfm->above_uv[i] &= mask_uv;
}
lfm->int_4x4_y &= mask_y;
+#if CONFIG_MISC_FIXES
+ lfm->above_int_4x4_uv = lfm->left_int_4x4_uv & mask_uv;
+#else
lfm->int_4x4_uv &= mask_uv;
+#endif
// We don't apply a wide loop filter on the last uv block row. If set
// apply the shorter one instead.
lfm->above_uv[i] &= mask_uv;
}
lfm->int_4x4_y &= mask_y;
+#if CONFIG_MISC_FIXES
+ lfm->left_int_4x4_uv &= mask_uv_int;
+#else
lfm->int_4x4_uv &= mask_uv_int;
+#endif
// We don't apply a wide loop filter on the last uv column. If set
// apply the shorter one instead.
assert(!(lfm->left_uv[TX_16X16]&lfm->left_uv[TX_8X8]));
assert(!(lfm->left_uv[TX_16X16] & lfm->left_uv[TX_4X4]));
assert(!(lfm->left_uv[TX_8X8] & lfm->left_uv[TX_4X4]));
+#if CONFIG_MISC_FIXES
+ assert(!(lfm->left_int_4x4_uv & lfm->left_uv[TX_16X16]));
+#else
assert(!(lfm->int_4x4_uv & lfm->left_uv[TX_16X16]));
+#endif
assert(!(lfm->above_y[TX_16X16] & lfm->above_y[TX_8X8]));
assert(!(lfm->above_y[TX_16X16] & lfm->above_y[TX_4X4]));
assert(!(lfm->above_y[TX_8X8] & lfm->above_y[TX_4X4]));
assert(!(lfm->above_uv[TX_16X16] & lfm->above_uv[TX_8X8]));
assert(!(lfm->above_uv[TX_16X16] & lfm->above_uv[TX_4X4]));
assert(!(lfm->above_uv[TX_8X8] & lfm->above_uv[TX_4X4]));
+#if CONFIG_MISC_FIXES
+ assert(!(lfm->above_int_4x4_uv & lfm->above_uv[TX_16X16]));
+#else
assert(!(lfm->int_4x4_uv & lfm->above_uv[TX_16X16]));
+#endif
}
static void filter_selectively_vert(uint8_t *s, int pitch,
if (mask_16x16 & 1) {
vpx_lpf_vertical_16(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr);
} else if (mask_8x8 & 1) {
- vpx_lpf_vertical_8(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr, 1);
+ vpx_lpf_vertical_8(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr);
} else if (mask_4x4 & 1) {
- vpx_lpf_vertical_4(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr, 1);
+ vpx_lpf_vertical_4(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr);
}
}
if (mask_4x4_int & 1)
- vpx_lpf_vertical_4(s + 4, pitch, lfi->mblim, lfi->lim, lfi->hev_thr, 1);
+ vpx_lpf_vertical_4(s + 4, pitch, lfi->mblim, lfi->lim, lfi->hev_thr);
s += 8;
lfl += 1;
mask_16x16 >>= 1;
lfi->hev_thr, bd);
} else if (mask_8x8 & 1) {
vpx_highbd_lpf_vertical_8(s, pitch, lfi->mblim, lfi->lim,
- lfi->hev_thr, 1, bd);
+ lfi->hev_thr, bd);
} else if (mask_4x4 & 1) {
vpx_highbd_lpf_vertical_4(s, pitch, lfi->mblim, lfi->lim,
- lfi->hev_thr, 1, bd);
+ lfi->hev_thr, bd);
}
}
if (mask_4x4_int & 1)
vpx_highbd_lpf_vertical_4(s + 4, pitch, lfi->mblim, lfi->lim,
- lfi->hev_thr, 1, bd);
+ lfi->hev_thr, bd);
s += 8;
lfl += 1;
mask_16x16 >>= 1;
}
#endif // CONFIG_VP9_HIGHBITDEPTH
-void vp10_filter_block_plane_non420(VP9_COMMON *cm,
+void vp10_filter_block_plane_non420(VP10_COMMON *cm,
struct macroblockd_plane *plane,
MODE_INFO **mi_8x8,
int mi_row, int mi_col) {
}
}
-void vp10_filter_block_plane_ss00(VP9_COMMON *const cm,
+void vp10_filter_block_plane_ss00(VP10_COMMON *const cm,
struct macroblockd_plane *const plane,
int mi_row,
LOOP_FILTER_MASK *lfm) {
}
}
-void vp10_filter_block_plane_ss11(VP9_COMMON *const cm,
+void vp10_filter_block_plane_ss11(VP10_COMMON *const cm,
struct macroblockd_plane *const plane,
int mi_row,
LOOP_FILTER_MASK *lfm) {
uint16_t mask_16x16 = lfm->left_uv[TX_16X16];
uint16_t mask_8x8 = lfm->left_uv[TX_8X8];
uint16_t mask_4x4 = lfm->left_uv[TX_4X4];
+#if CONFIG_MISC_FIXES
+ uint16_t mask_4x4_int = lfm->left_int_4x4_uv;
+#else
uint16_t mask_4x4_int = lfm->int_4x4_uv;
+#endif
assert(plane->subsampling_x == 1 && plane->subsampling_y == 1);
mask_16x16 = lfm->above_uv[TX_16X16];
mask_8x8 = lfm->above_uv[TX_8X8];
mask_4x4 = lfm->above_uv[TX_4X4];
+#if CONFIG_MISC_FIXES
+ mask_4x4_int = lfm->above_int_4x4_uv;
+#else
mask_4x4_int = lfm->int_4x4_uv;
+#endif
for (r = 0; r < MI_BLOCK_SIZE && mi_row + r < cm->mi_rows; r += 2) {
const int skip_border_4x4_r = mi_row + r == cm->mi_rows - 1;
}
void vp10_loop_filter_rows(YV12_BUFFER_CONFIG *frame_buffer,
- VP9_COMMON *cm,
+ VP10_COMMON *cm,
struct macroblockd_plane planes[MAX_MB_PLANE],
int start, int stop, int y_only) {
const int num_planes = y_only ? 1 : MAX_MB_PLANE;
}
void vp10_loop_filter_frame(YV12_BUFFER_CONFIG *frame,
- VP9_COMMON *cm, MACROBLOCKD *xd,
+ VP10_COMMON *cm, MACROBLOCKD *xd,
int frame_filter_level,
int y_only, int partial_frame) {
int start_mi_row, end_mi_row, mi_rows_to_filter;
if (partial_frame && cm->mi_rows > 8) {
start_mi_row = cm->mi_rows >> 1;
start_mi_row &= 0xfffffff8;
- mi_rows_to_filter = MAX(cm->mi_rows / 8, 8);
+ mi_rows_to_filter = VPXMAX(cm->mi_rows / 8, 8);
}
end_mi_row = start_mi_row + mi_rows_to_filter;
vp10_loop_filter_frame_init(cm, frame_filter_level);
void vp10_loop_filter_data_reset(
LFWorkerData *lf_data, YV12_BUFFER_CONFIG *frame_buffer,
- struct VP9Common *cm, const struct macroblockd_plane planes[MAX_MB_PLANE]) {
+ struct VP10Common *cm,
+ const struct macroblockd_plane planes[MAX_MB_PLANE]) {
lf_data->frame_buffer = frame_buffer;
lf_data->cm = cm;
lf_data->start = 0;