X-Git-Url: https://granicus.if.org/sourcecode?a=blobdiff_plain;f=vp10%2Fcommon%2Floopfilter.c;h=a659aaea416d4e40b2e543672b05c392114af647;hb=e7a23d703bc8f62fb387d71c0f70121253dede30;hp=f1083741c0a37c656ea03a67853e6439da1fee56;hpb=4dcbf0a809d63752e784abe19ac2c04c95561235;p=libvpx diff --git a/vp10/common/loopfilter.c b/vp10/common/loopfilter.c index f1083741c..a659aaea4 100644 --- a/vp10/common/loopfilter.c +++ b/vp10/common/loopfilter.c @@ -13,6 +13,7 @@ #include "vp10/common/loopfilter.h" #include "vp10/common/onyxc_int.h" #include "vp10/common/reconinter.h" +#include "vpx_dsp/vpx_dsp_common.h" #include "vpx_mem/vpx_mem.h" #include "vpx_ports/mem.h" @@ -344,11 +345,10 @@ static void filter_selectively_vert_row2(int subsampling_factor, lfi0->hev_thr, lfi1->mblim, lfi1->lim, lfi1->hev_thr); } else if (mask_8x8_0 & 1) { - vpx_lpf_vertical_8(s, pitch, lfi0->mblim, lfi0->lim, lfi0->hev_thr, - 1); + vpx_lpf_vertical_8(s, pitch, lfi0->mblim, lfi0->lim, lfi0->hev_thr); } else { vpx_lpf_vertical_8(s + 8 * pitch, pitch, lfi1->mblim, lfi1->lim, - lfi1->hev_thr, 1); + lfi1->hev_thr); } } @@ -358,11 +358,10 @@ static void filter_selectively_vert_row2(int subsampling_factor, lfi0->hev_thr, lfi1->mblim, lfi1->lim, lfi1->hev_thr); } else if (mask_4x4_0 & 1) { - vpx_lpf_vertical_4(s, pitch, lfi0->mblim, lfi0->lim, lfi0->hev_thr, - 1); + vpx_lpf_vertical_4(s, pitch, lfi0->mblim, lfi0->lim, lfi0->hev_thr); } else { vpx_lpf_vertical_4(s + 8 * pitch, pitch, lfi1->mblim, lfi1->lim, - lfi1->hev_thr, 1); + lfi1->hev_thr); } } @@ -373,10 +372,10 @@ static void filter_selectively_vert_row2(int subsampling_factor, lfi1->hev_thr); } else if (mask_4x4_int_0 & 1) { vpx_lpf_vertical_4(s + 4, pitch, lfi0->mblim, lfi0->lim, - lfi0->hev_thr, 1); + lfi0->hev_thr); } else { vpx_lpf_vertical_4(s + 8 * pitch + 4, pitch, lfi1->mblim, lfi1->lim, - lfi1->hev_thr, 1); + lfi1->hev_thr); } } } @@ -445,10 +444,10 @@ static void highbd_filter_selectively_vert_row2(int subsampling_factor, lfi1->hev_thr, bd); } else if (mask_8x8_0 & 1) { vpx_highbd_lpf_vertical_8(s, pitch, lfi0->mblim, lfi0->lim, - lfi0->hev_thr, 1, bd); + lfi0->hev_thr, bd); } else { vpx_highbd_lpf_vertical_8(s + 8 * pitch, pitch, lfi1->mblim, - lfi1->lim, lfi1->hev_thr, 1, bd); + lfi1->lim, lfi1->hev_thr, bd); } } @@ -459,10 +458,10 @@ static void highbd_filter_selectively_vert_row2(int subsampling_factor, lfi1->hev_thr, bd); } else if (mask_4x4_0 & 1) { vpx_highbd_lpf_vertical_4(s, pitch, lfi0->mblim, lfi0->lim, - lfi0->hev_thr, 1, bd); + lfi0->hev_thr, bd); } else { vpx_highbd_lpf_vertical_4(s + 8 * pitch, pitch, lfi1->mblim, - lfi1->lim, lfi1->hev_thr, 1, bd); + lfi1->lim, lfi1->hev_thr, bd); } } @@ -473,10 +472,10 @@ static void highbd_filter_selectively_vert_row2(int subsampling_factor, lfi1->hev_thr, bd); } else if (mask_4x4_int_0 & 1) { vpx_highbd_lpf_vertical_4(s + 4, pitch, lfi0->mblim, lfi0->lim, - lfi0->hev_thr, 1, bd); + lfi0->hev_thr, bd); } else { vpx_highbd_lpf_vertical_4(s + 8 * pitch + 4, pitch, lfi1->mblim, - lfi1->lim, lfi1->hev_thr, 1, bd); + lfi1->lim, lfi1->hev_thr, bd); } } } @@ -536,18 +535,18 @@ static void filter_selectively_horiz(uint8_t *s, int pitch, } else { if (mask_4x4_int & 1) vpx_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim, - lfi->hev_thr, 1); + lfi->hev_thr); else if (mask_4x4_int & 2) vpx_lpf_horizontal_4(s + 8 + 4 * pitch, pitch, lfin->mblim, - lfin->lim, lfin->hev_thr, 1); + lfin->lim, lfin->hev_thr); } count = 2; } else { - vpx_lpf_horizontal_8(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr, 1); + vpx_lpf_horizontal_8(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr); if (mask_4x4_int & 1) vpx_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim, - lfi->hev_thr, 1); + lfi->hev_thr); } } else if (mask_4x4 & 1) { if ((mask_4x4 & 3) == 3) { @@ -564,22 +563,22 @@ static void filter_selectively_horiz(uint8_t *s, int pitch, } else { if (mask_4x4_int & 1) vpx_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim, - lfi->hev_thr, 1); + lfi->hev_thr); else if (mask_4x4_int & 2) vpx_lpf_horizontal_4(s + 8 + 4 * pitch, pitch, lfin->mblim, - lfin->lim, lfin->hev_thr, 1); + lfin->lim, lfin->hev_thr); } count = 2; } else { - vpx_lpf_horizontal_4(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr, 1); + vpx_lpf_horizontal_4(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr); if (mask_4x4_int & 1) vpx_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim, - lfi->hev_thr, 1); + lfi->hev_thr); } } else if (mask_4x4_int & 1) { vpx_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim, - lfi->hev_thr, 1); + lfi->hev_thr); } } s += 8 * count; @@ -634,20 +633,20 @@ static void highbd_filter_selectively_horiz(uint16_t *s, int pitch, } else { if (mask_4x4_int & 1) { vpx_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, - lfi->lim, lfi->hev_thr, 1, bd); + lfi->lim, lfi->hev_thr, bd); } else if (mask_4x4_int & 2) { vpx_highbd_lpf_horizontal_4(s + 8 + 4 * pitch, pitch, lfin->mblim, - lfin->lim, lfin->hev_thr, 1, bd); + lfin->lim, lfin->hev_thr, bd); } } count = 2; } else { vpx_highbd_lpf_horizontal_8(s, pitch, lfi->mblim, lfi->lim, - lfi->hev_thr, 1, bd); + lfi->hev_thr, bd); if (mask_4x4_int & 1) { vpx_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, - lfi->lim, lfi->hev_thr, 1, bd); + lfi->lim, lfi->hev_thr, bd); } } } else if (mask_4x4 & 1) { @@ -666,25 +665,25 @@ static void highbd_filter_selectively_horiz(uint16_t *s, int pitch, } else { if (mask_4x4_int & 1) { vpx_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, - lfi->lim, lfi->hev_thr, 1, bd); + lfi->lim, lfi->hev_thr, bd); } else if (mask_4x4_int & 2) { vpx_highbd_lpf_horizontal_4(s + 8 + 4 * pitch, pitch, lfin->mblim, - lfin->lim, lfin->hev_thr, 1, bd); + lfin->lim, lfin->hev_thr, bd); } } count = 2; } else { vpx_highbd_lpf_horizontal_4(s, pitch, lfi->mblim, lfi->lim, - lfi->hev_thr, 1, bd); + lfi->hev_thr, bd); if (mask_4x4_int & 1) { vpx_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, - lfi->lim, lfi->hev_thr, 1, bd); + lfi->lim, lfi->hev_thr, bd); } } } else if (mask_4x4_int & 1) { vpx_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim, - lfi->hev_thr, 1, bd); + lfi->hev_thr, bd); } } s += 8 * count; @@ -718,7 +717,11 @@ static void build_masks(const loop_filter_info_n *const lfi_n, uint64_t *const int_4x4_y = &lfm->int_4x4_y; uint16_t *const left_uv = &lfm->left_uv[tx_size_uv]; uint16_t *const above_uv = &lfm->above_uv[tx_size_uv]; +#if CONFIG_MISC_FIXES + uint16_t *const int_4x4_uv = &lfm->left_int_4x4_uv; +#else uint16_t *const int_4x4_uv = &lfm->int_4x4_uv; +#endif int i; // If filter level is 0 we don't loop filter. @@ -753,8 +756,13 @@ static void build_masks(const loop_filter_info_n *const lfi_n, // If the block has no coefficients and is not intra we skip applying // the loop filter on block edges. +#if CONFIG_MISC_FIXES + if ((mbmi->skip || mbmi->has_no_coeffs) && is_inter_block(mbmi)) + return; +#else if (mbmi->skip && is_inter_block(mbmi)) return; +#endif // Here we are adding a mask for the transform size. The transform // size mask is set to be correct for a 64x64 prediction block size. We @@ -811,8 +819,13 @@ static void build_y_mask(const loop_filter_info_n *const lfi_n, *above_y |= above_prediction_mask[block_size] << shift_y; *left_y |= left_prediction_mask[block_size] << shift_y; +#if CONFIG_MISC_FIXES + if ((mbmi->skip || mbmi->has_no_coeffs) && is_inter_block(mbmi)) + return; +#else if (mbmi->skip && is_inter_block(mbmi)) return; +#endif *above_y |= (size_mask[block_size] & above_64x64_txform_mask[tx_size_y]) << shift_y; @@ -1004,7 +1017,11 @@ void vp10_setup_mask(VP10_COMMON *const cm, const int mi_row, const int mi_col, lfm->above_uv[i] &= mask_uv; } lfm->int_4x4_y &= mask_y; +#if CONFIG_MISC_FIXES + lfm->above_int_4x4_uv = lfm->left_int_4x4_uv & mask_uv; +#else lfm->int_4x4_uv &= mask_uv; +#endif // We don't apply a wide loop filter on the last uv block row. If set // apply the shorter one instead. @@ -1038,7 +1055,11 @@ void vp10_setup_mask(VP10_COMMON *const cm, const int mi_row, const int mi_col, lfm->above_uv[i] &= mask_uv; } lfm->int_4x4_y &= mask_y; +#if CONFIG_MISC_FIXES + lfm->left_int_4x4_uv &= mask_uv_int; +#else lfm->int_4x4_uv &= mask_uv_int; +#endif // We don't apply a wide loop filter on the last uv column. If set // apply the shorter one instead. @@ -1068,7 +1089,11 @@ void vp10_setup_mask(VP10_COMMON *const cm, const int mi_row, const int mi_col, assert(!(lfm->left_uv[TX_16X16]&lfm->left_uv[TX_8X8])); assert(!(lfm->left_uv[TX_16X16] & lfm->left_uv[TX_4X4])); assert(!(lfm->left_uv[TX_8X8] & lfm->left_uv[TX_4X4])); +#if CONFIG_MISC_FIXES + assert(!(lfm->left_int_4x4_uv & lfm->left_uv[TX_16X16])); +#else assert(!(lfm->int_4x4_uv & lfm->left_uv[TX_16X16])); +#endif assert(!(lfm->above_y[TX_16X16] & lfm->above_y[TX_8X8])); assert(!(lfm->above_y[TX_16X16] & lfm->above_y[TX_4X4])); assert(!(lfm->above_y[TX_8X8] & lfm->above_y[TX_4X4])); @@ -1076,7 +1101,11 @@ void vp10_setup_mask(VP10_COMMON *const cm, const int mi_row, const int mi_col, assert(!(lfm->above_uv[TX_16X16] & lfm->above_uv[TX_8X8])); assert(!(lfm->above_uv[TX_16X16] & lfm->above_uv[TX_4X4])); assert(!(lfm->above_uv[TX_8X8] & lfm->above_uv[TX_4X4])); +#if CONFIG_MISC_FIXES + assert(!(lfm->above_int_4x4_uv & lfm->above_uv[TX_16X16])); +#else assert(!(lfm->int_4x4_uv & lfm->above_uv[TX_16X16])); +#endif } static void filter_selectively_vert(uint8_t *s, int pitch, @@ -1096,13 +1125,13 @@ static void filter_selectively_vert(uint8_t *s, int pitch, if (mask_16x16 & 1) { vpx_lpf_vertical_16(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr); } else if (mask_8x8 & 1) { - vpx_lpf_vertical_8(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr, 1); + vpx_lpf_vertical_8(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr); } else if (mask_4x4 & 1) { - vpx_lpf_vertical_4(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr, 1); + vpx_lpf_vertical_4(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr); } } if (mask_4x4_int & 1) - vpx_lpf_vertical_4(s + 4, pitch, lfi->mblim, lfi->lim, lfi->hev_thr, 1); + vpx_lpf_vertical_4(s + 4, pitch, lfi->mblim, lfi->lim, lfi->hev_thr); s += 8; lfl += 1; mask_16x16 >>= 1; @@ -1132,15 +1161,15 @@ static void highbd_filter_selectively_vert(uint16_t *s, int pitch, lfi->hev_thr, bd); } else if (mask_8x8 & 1) { vpx_highbd_lpf_vertical_8(s, pitch, lfi->mblim, lfi->lim, - lfi->hev_thr, 1, bd); + lfi->hev_thr, bd); } else if (mask_4x4 & 1) { vpx_highbd_lpf_vertical_4(s, pitch, lfi->mblim, lfi->lim, - lfi->hev_thr, 1, bd); + lfi->hev_thr, bd); } } if (mask_4x4_int & 1) vpx_highbd_lpf_vertical_4(s + 4, pitch, lfi->mblim, lfi->lim, - lfi->hev_thr, 1, bd); + lfi->hev_thr, bd); s += 8; lfl += 1; mask_16x16 >>= 1; @@ -1431,7 +1460,11 @@ void vp10_filter_block_plane_ss11(VP10_COMMON *const cm, uint16_t mask_16x16 = lfm->left_uv[TX_16X16]; uint16_t mask_8x8 = lfm->left_uv[TX_8X8]; uint16_t mask_4x4 = lfm->left_uv[TX_4X4]; +#if CONFIG_MISC_FIXES + uint16_t mask_4x4_int = lfm->left_int_4x4_uv; +#else uint16_t mask_4x4_int = lfm->int_4x4_uv; +#endif assert(plane->subsampling_x == 1 && plane->subsampling_y == 1); @@ -1483,7 +1516,11 @@ void vp10_filter_block_plane_ss11(VP10_COMMON *const cm, mask_16x16 = lfm->above_uv[TX_16X16]; mask_8x8 = lfm->above_uv[TX_8X8]; mask_4x4 = lfm->above_uv[TX_4X4]; +#if CONFIG_MISC_FIXES + mask_4x4_int = lfm->above_int_4x4_uv; +#else mask_4x4_int = lfm->int_4x4_uv; +#endif for (r = 0; r < MI_BLOCK_SIZE && mi_row + r < cm->mi_rows; r += 2) { const int skip_border_4x4_r = mi_row + r == cm->mi_rows - 1;