2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
11 #ifndef VP10_COMMON_RECONINTER_H_
12 #define VP10_COMMON_RECONINTER_H_
14 #include "vp10/common/filter.h"
15 #include "vp10/common/onyxc_int.h"
16 #include "vpx/vpx_integer.h"
17 #include "vpx_dsp/vpx_filter.h"
23 static INLINE void inter_predictor(const uint8_t *src, int src_stride,
24 uint8_t *dst, int dst_stride,
27 const struct scale_factors *sf,
28 int w, int h, int ref,
29 const InterpKernel *kernel,
31 sf->predict[subpel_x != 0][subpel_y != 0][ref](
32 src, src_stride, dst, dst_stride,
33 kernel[subpel_x], xs, kernel[subpel_y], ys, w, h);
36 #if CONFIG_VP9_HIGHBITDEPTH
37 static INLINE void high_inter_predictor(const uint8_t *src, int src_stride,
38 uint8_t *dst, int dst_stride,
41 const struct scale_factors *sf,
42 int w, int h, int ref,
43 const InterpKernel *kernel,
44 int xs, int ys, int bd) {
45 sf->highbd_predict[subpel_x != 0][subpel_y != 0][ref](
46 src, src_stride, dst, dst_stride,
47 kernel[subpel_x], xs, kernel[subpel_y], ys, w, h, bd);
49 #endif // CONFIG_VP9_HIGHBITDEPTH
51 static INLINE int round_mv_comp_q4(int value) {
52 return (value < 0 ? value - 2 : value + 2) / 4;
55 static MV mi_mv_pred_q4(const MODE_INFO *mi, int idx) {
56 MV res = { round_mv_comp_q4(mi->bmi[0].as_mv[idx].as_mv.row +
57 mi->bmi[1].as_mv[idx].as_mv.row +
58 mi->bmi[2].as_mv[idx].as_mv.row +
59 mi->bmi[3].as_mv[idx].as_mv.row),
60 round_mv_comp_q4(mi->bmi[0].as_mv[idx].as_mv.col +
61 mi->bmi[1].as_mv[idx].as_mv.col +
62 mi->bmi[2].as_mv[idx].as_mv.col +
63 mi->bmi[3].as_mv[idx].as_mv.col) };
67 static INLINE int round_mv_comp_q2(int value) {
68 return (value < 0 ? value - 1 : value + 1) / 2;
71 static MV mi_mv_pred_q2(const MODE_INFO *mi, int idx, int block0, int block1) {
72 MV res = { round_mv_comp_q2(mi->bmi[block0].as_mv[idx].as_mv.row +
73 mi->bmi[block1].as_mv[idx].as_mv.row),
74 round_mv_comp_q2(mi->bmi[block0].as_mv[idx].as_mv.col +
75 mi->bmi[block1].as_mv[idx].as_mv.col) };
79 // TODO(jkoleszar): yet another mv clamping function :-(
80 static INLINE MV clamp_mv_to_umv_border_sb(const MACROBLOCKD *xd,
82 int bw, int bh, int ss_x, int ss_y) {
83 // If the MV points so far into the UMV border that no visible pixels
84 // are used for reconstruction, the subpel part of the MV can be
85 // discarded and the MV limited to 16 pixels with equivalent results.
86 const int spel_left = (VP9_INTERP_EXTEND + bw) << SUBPEL_BITS;
87 const int spel_right = spel_left - SUBPEL_SHIFTS;
88 const int spel_top = (VP9_INTERP_EXTEND + bh) << SUBPEL_BITS;
89 const int spel_bottom = spel_top - SUBPEL_SHIFTS;
91 src_mv->row * (1 << (1 - ss_y)),
92 src_mv->col * (1 << (1 - ss_x))
98 xd->mb_to_left_edge * (1 << (1 - ss_x)) - spel_left,
99 xd->mb_to_right_edge * (1 << (1 - ss_x)) + spel_right,
100 xd->mb_to_top_edge * (1 << (1 - ss_y)) - spel_top,
101 xd->mb_to_bottom_edge * (1 << (1 - ss_y)) + spel_bottom);
106 static INLINE MV average_split_mvs(const struct macroblockd_plane *pd,
107 const MODE_INFO *mi, int ref, int block) {
108 const int ss_idx = ((pd->subsampling_x > 0) << 1) | (pd->subsampling_y > 0);
112 res = mi->bmi[block].as_mv[ref].as_mv;
115 res = mi_mv_pred_q2(mi, ref, block, block + 2);
118 res = mi_mv_pred_q2(mi, ref, block, block + 1);
121 res = mi_mv_pred_q4(mi, ref);
124 assert(ss_idx <= 3 && ss_idx >= 0);
129 void build_inter_predictors(MACROBLOCKD *xd, int plane, int block,
131 int x, int y, int w, int h,
134 void vp10_build_inter_predictors_sby(MACROBLOCKD *xd, int mi_row, int mi_col,
137 void vp10_build_inter_predictors_sbp(MACROBLOCKD *xd, int mi_row, int mi_col,
138 BLOCK_SIZE bsize, int plane);
140 void vp10_build_inter_predictors_sbuv(MACROBLOCKD *xd, int mi_row, int mi_col,
143 void vp10_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col,
146 void vp10_build_inter_predictor(const uint8_t *src, int src_stride,
147 uint8_t *dst, int dst_stride,
149 const struct scale_factors *sf,
150 int w, int h, int do_avg,
151 const InterpKernel *kernel,
152 enum mv_precision precision,
155 #if CONFIG_VP9_HIGHBITDEPTH
156 void vp10_highbd_build_inter_predictor(const uint8_t *src, int src_stride,
157 uint8_t *dst, int dst_stride,
159 const struct scale_factors *sf,
160 int w, int h, int do_avg,
161 const InterpKernel *kernel,
162 enum mv_precision precision,
163 int x, int y, int bd);
166 static INLINE int scaled_buffer_offset(int x_offset, int y_offset, int stride,
167 const struct scale_factors *sf) {
168 const int x = sf ? sf->scale_value_x(x_offset, sf) : x_offset;
169 const int y = sf ? sf->scale_value_y(y_offset, sf) : y_offset;
170 return y * stride + x;
173 static INLINE void setup_pred_plane(struct buf_2d *dst,
174 uint8_t *src, int stride,
175 int mi_row, int mi_col,
176 const struct scale_factors *scale,
177 int subsampling_x, int subsampling_y) {
178 const int x = (MI_SIZE * mi_col) >> subsampling_x;
179 const int y = (MI_SIZE * mi_row) >> subsampling_y;
180 dst->buf = src + scaled_buffer_offset(x, y, stride, scale);
181 dst->stride = stride;
184 void vp10_setup_dst_planes(struct macroblockd_plane planes[MAX_MB_PLANE],
185 const YV12_BUFFER_CONFIG *src,
186 int mi_row, int mi_col);
188 void vp10_setup_pre_planes(MACROBLOCKD *xd, int idx,
189 const YV12_BUFFER_CONFIG *src, int mi_row, int mi_col,
190 const struct scale_factors *sf);
196 #endif // VP10_COMMON_RECONINTER_H_