2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
11 #include "./vpx_config.h"
12 #include "./vpx_dsp_rtcd.h"
14 #if CONFIG_VP9_HIGHBITDEPTH
15 #include "vpx_dsp/vpx_dsp_common.h"
16 #endif // CONFIG_VP9_HIGHBITDEPTH
17 #include "vpx_mem/vpx_mem.h"
18 #include "vpx_ports/mem.h"
19 #include "vpx_ports/vpx_once.h"
21 #include "vp10/common/reconintra.h"
22 #include "vp10/common/onyxc_int.h"
27 NEED_ABOVERIGHT = 1 << 3,
30 static const uint8_t extend_modes[INTRA_MODES] = {
31 NEED_ABOVE | NEED_LEFT, // DC
34 NEED_ABOVERIGHT, // D45
35 NEED_LEFT | NEED_ABOVE, // D135
36 NEED_LEFT | NEED_ABOVE, // D117
37 NEED_LEFT | NEED_ABOVE, // D153
39 NEED_ABOVERIGHT, // D63
40 NEED_LEFT | NEED_ABOVE, // TM
43 typedef void (*intra_pred_fn)(uint8_t *dst, ptrdiff_t stride,
44 const uint8_t *above, const uint8_t *left);
46 static intra_pred_fn pred[INTRA_MODES][TX_SIZES];
47 static intra_pred_fn dc_pred[2][2][TX_SIZES];
49 #if CONFIG_VP9_HIGHBITDEPTH
50 typedef void (*intra_high_pred_fn)(uint16_t *dst, ptrdiff_t stride,
51 const uint16_t *above, const uint16_t *left,
53 static intra_high_pred_fn pred_high[INTRA_MODES][4];
54 static intra_high_pred_fn dc_pred_high[2][2][4];
55 #endif // CONFIG_VP9_HIGHBITDEPTH
57 static void vp10_init_intra_predictors_internal(void) {
58 #define INIT_ALL_SIZES(p, type) \
59 p[TX_4X4] = vpx_##type##_predictor_4x4; \
60 p[TX_8X8] = vpx_##type##_predictor_8x8; \
61 p[TX_16X16] = vpx_##type##_predictor_16x16; \
62 p[TX_32X32] = vpx_##type##_predictor_32x32
64 INIT_ALL_SIZES(pred[V_PRED], v);
65 INIT_ALL_SIZES(pred[H_PRED], h);
66 INIT_ALL_SIZES(pred[D207_PRED], d207);
67 INIT_ALL_SIZES(pred[D45_PRED], d45);
68 INIT_ALL_SIZES(pred[D63_PRED], d63);
69 INIT_ALL_SIZES(pred[D117_PRED], d117);
70 INIT_ALL_SIZES(pred[D135_PRED], d135);
71 INIT_ALL_SIZES(pred[D153_PRED], d153);
72 INIT_ALL_SIZES(pred[TM_PRED], tm);
74 INIT_ALL_SIZES(dc_pred[0][0], dc_128);
75 INIT_ALL_SIZES(dc_pred[0][1], dc_top);
76 INIT_ALL_SIZES(dc_pred[1][0], dc_left);
77 INIT_ALL_SIZES(dc_pred[1][1], dc);
79 #if CONFIG_VP9_HIGHBITDEPTH
80 INIT_ALL_SIZES(pred_high[V_PRED], highbd_v);
81 INIT_ALL_SIZES(pred_high[H_PRED], highbd_h);
82 INIT_ALL_SIZES(pred_high[D207_PRED], highbd_d207);
83 INIT_ALL_SIZES(pred_high[D45_PRED], highbd_d45);
84 INIT_ALL_SIZES(pred_high[D63_PRED], highbd_d63);
85 INIT_ALL_SIZES(pred_high[D117_PRED], highbd_d117);
86 INIT_ALL_SIZES(pred_high[D135_PRED], highbd_d135);
87 INIT_ALL_SIZES(pred_high[D153_PRED], highbd_d153);
88 INIT_ALL_SIZES(pred_high[TM_PRED], highbd_tm);
90 INIT_ALL_SIZES(dc_pred_high[0][0], highbd_dc_128);
91 INIT_ALL_SIZES(dc_pred_high[0][1], highbd_dc_top);
92 INIT_ALL_SIZES(dc_pred_high[1][0], highbd_dc_left);
93 INIT_ALL_SIZES(dc_pred_high[1][1], highbd_dc);
94 #endif // CONFIG_VP9_HIGHBITDEPTH
96 #undef intra_pred_allsizes
99 #if CONFIG_VP9_HIGHBITDEPTH
100 static void build_intra_predictors_high(const MACROBLOCKD *xd,
105 PREDICTION_MODE mode,
113 uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);
114 uint16_t *ref = CONVERT_TO_SHORTPTR(ref8);
115 DECLARE_ALIGNED(16, uint16_t, left_col[32]);
116 DECLARE_ALIGNED(16, uint16_t, above_data[64 + 16]);
117 uint16_t *above_row = above_data + 16;
118 const uint16_t *const_above_row = above_row;
119 const int bs = 4 << tx_size;
120 int frame_width, frame_height;
122 const struct macroblockd_plane *const pd = &xd->plane[plane];
124 int base = 128 << (bd - 8);
125 // 127 127 127 .. 127 127 127 127 127 127
129 // 129 G H .. S T T T T T
131 // Get current frame pointer, width and height.
133 frame_width = xd->cur_buf->y_width;
134 frame_height = xd->cur_buf->y_height;
136 frame_width = xd->cur_buf->uv_width;
137 frame_height = xd->cur_buf->uv_height;
140 // Get block position in current frame.
141 x0 = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x)) + x;
142 y0 = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y)) + y;
145 if (left_available) {
146 if (xd->mb_to_bottom_edge < 0) {
147 /* slower path if the block needs border extension */
148 if (y0 + bs <= frame_height) {
149 for (i = 0; i < bs; ++i)
150 left_col[i] = ref[i * ref_stride - 1];
152 const int extend_bottom = frame_height - y0;
153 for (i = 0; i < extend_bottom; ++i)
154 left_col[i] = ref[i * ref_stride - 1];
156 left_col[i] = ref[(extend_bottom - 1) * ref_stride - 1];
159 /* faster path if the block does not need extension */
160 for (i = 0; i < bs; ++i)
161 left_col[i] = ref[i * ref_stride - 1];
164 // TODO(Peter): this value should probably change for high bitdepth
165 vpx_memset16(left_col, base + 1, bs);
168 // TODO(hkuang) do not extend 2*bs pixels for all modes.
171 const uint16_t *above_ref = ref - ref_stride;
172 if (xd->mb_to_right_edge < 0) {
173 /* slower path if the block needs border extension */
174 if (x0 + 2 * bs <= frame_width) {
175 if (right_available && bs == 4) {
176 memcpy(above_row, above_ref, 2 * bs * sizeof(above_row[0]));
178 memcpy(above_row, above_ref, bs * sizeof(above_row[0]));
179 vpx_memset16(above_row + bs, above_row[bs - 1], bs);
181 } else if (x0 + bs <= frame_width) {
182 const int r = frame_width - x0;
183 if (right_available && bs == 4) {
184 memcpy(above_row, above_ref, r * sizeof(above_row[0]));
185 vpx_memset16(above_row + r, above_row[r - 1],
186 x0 + 2 * bs - frame_width);
188 memcpy(above_row, above_ref, bs * sizeof(above_row[0]));
189 vpx_memset16(above_row + bs, above_row[bs - 1], bs);
191 } else if (x0 <= frame_width) {
192 const int r = frame_width - x0;
193 memcpy(above_row, above_ref, r * sizeof(above_row[0]));
194 vpx_memset16(above_row + r, above_row[r - 1],
195 x0 + 2 * bs - frame_width);
197 // TODO(Peter) this value should probably change for high bitdepth
198 above_row[-1] = left_available ? above_ref[-1] : (base+1);
200 /* faster path if the block does not need extension */
201 if (bs == 4 && right_available && left_available) {
202 const_above_row = above_ref;
204 memcpy(above_row, above_ref, bs * sizeof(above_row[0]));
205 if (bs == 4 && right_available)
206 memcpy(above_row + bs, above_ref + bs, bs * sizeof(above_row[0]));
208 vpx_memset16(above_row + bs, above_row[bs - 1], bs);
209 // TODO(Peter): this value should probably change for high bitdepth
210 above_row[-1] = left_available ? above_ref[-1] : (base+1);
214 vpx_memset16(above_row, base - 1, bs * 2);
215 // TODO(Peter): this value should probably change for high bitdepth
216 above_row[-1] = base - 1;
220 if (mode == DC_PRED) {
221 dc_pred_high[left_available][up_available][tx_size](dst, dst_stride,
225 pred_high[mode][tx_size](dst, dst_stride, const_above_row, left_col,
229 #endif // CONFIG_VP9_HIGHBITDEPTH
231 static void build_intra_predictors(const MACROBLOCKD *xd, const uint8_t *ref,
232 int ref_stride, uint8_t *dst, int dst_stride,
233 PREDICTION_MODE mode, TX_SIZE tx_size,
234 int up_available, int left_available,
235 int right_available, int x, int y,
238 DECLARE_ALIGNED(16, uint8_t, left_col[32]);
239 DECLARE_ALIGNED(16, uint8_t, above_data[64 + 16]);
240 uint8_t *above_row = above_data + 16;
241 const uint8_t *const_above_row = above_row;
242 const int bs = 4 << tx_size;
243 int frame_width, frame_height;
245 const struct macroblockd_plane *const pd = &xd->plane[plane];
247 // 127 127 127 .. 127 127 127 127 127 127
251 // 129 G H .. S T T T T T
254 // Get current frame pointer, width and height.
256 frame_width = xd->cur_buf->y_width;
257 frame_height = xd->cur_buf->y_height;
259 frame_width = xd->cur_buf->uv_width;
260 frame_height = xd->cur_buf->uv_height;
263 // Get block position in current frame.
264 x0 = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x)) + x;
265 y0 = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y)) + y;
268 if (extend_modes[mode] & NEED_LEFT) {
269 if (left_available) {
270 if (xd->mb_to_bottom_edge < 0) {
271 /* slower path if the block needs border extension */
272 if (y0 + bs <= frame_height) {
273 for (i = 0; i < bs; ++i)
274 left_col[i] = ref[i * ref_stride - 1];
276 const int extend_bottom = frame_height - y0;
277 for (i = 0; i < extend_bottom; ++i)
278 left_col[i] = ref[i * ref_stride - 1];
280 left_col[i] = ref[(extend_bottom - 1) * ref_stride - 1];
283 /* faster path if the block does not need extension */
284 for (i = 0; i < bs; ++i)
285 left_col[i] = ref[i * ref_stride - 1];
288 memset(left_col, 129, bs);
293 if (extend_modes[mode] & NEED_ABOVE) {
295 const uint8_t *above_ref = ref - ref_stride;
296 if (xd->mb_to_right_edge < 0) {
297 /* slower path if the block needs border extension */
298 if (x0 + bs <= frame_width) {
299 memcpy(above_row, above_ref, bs);
300 } else if (x0 <= frame_width) {
301 const int r = frame_width - x0;
302 memcpy(above_row, above_ref, r);
303 memset(above_row + r, above_row[r - 1], x0 + bs - frame_width);
306 /* faster path if the block does not need extension */
307 if (bs == 4 && right_available && left_available) {
308 const_above_row = above_ref;
310 memcpy(above_row, above_ref, bs);
313 above_row[-1] = left_available ? above_ref[-1] : 129;
315 memset(above_row, 127, bs);
321 if (extend_modes[mode] & NEED_ABOVERIGHT) {
323 const uint8_t *above_ref = ref - ref_stride;
324 if (xd->mb_to_right_edge < 0) {
325 /* slower path if the block needs border extension */
326 if (x0 + 2 * bs <= frame_width) {
327 if (right_available && bs == 4) {
328 memcpy(above_row, above_ref, 2 * bs);
330 memcpy(above_row, above_ref, bs);
331 memset(above_row + bs, above_row[bs - 1], bs);
333 } else if (x0 + bs <= frame_width) {
334 const int r = frame_width - x0;
335 if (right_available && bs == 4) {
336 memcpy(above_row, above_ref, r);
337 memset(above_row + r, above_row[r - 1], x0 + 2 * bs - frame_width);
339 memcpy(above_row, above_ref, bs);
340 memset(above_row + bs, above_row[bs - 1], bs);
342 } else if (x0 <= frame_width) {
343 const int r = frame_width - x0;
344 memcpy(above_row, above_ref, r);
345 memset(above_row + r, above_row[r - 1], x0 + 2 * bs - frame_width);
348 /* faster path if the block does not need extension */
349 if (bs == 4 && right_available && left_available) {
350 const_above_row = above_ref;
352 memcpy(above_row, above_ref, bs);
353 if (bs == 4 && right_available)
354 memcpy(above_row + bs, above_ref + bs, bs);
356 memset(above_row + bs, above_row[bs - 1], bs);
359 above_row[-1] = left_available ? above_ref[-1] : 129;
361 memset(above_row, 127, bs * 2);
367 if (mode == DC_PRED) {
368 dc_pred[left_available][up_available][tx_size](dst, dst_stride,
369 const_above_row, left_col);
371 pred[mode][tx_size](dst, dst_stride, const_above_row, left_col);
375 void vp10_predict_intra_block(const MACROBLOCKD *xd, int bwl_in,
376 TX_SIZE tx_size, PREDICTION_MODE mode,
377 const uint8_t *ref, int ref_stride,
378 uint8_t *dst, int dst_stride,
379 int aoff, int loff, int plane) {
380 const int bw = (1 << bwl_in);
381 const int txw = (1 << tx_size);
382 const int have_top = loff || xd->up_available;
383 const int have_left = aoff || xd->left_available;
384 const int have_right = (aoff + txw) < bw;
385 const int x = aoff * 4;
386 const int y = loff * 4;
388 #if CONFIG_VP9_HIGHBITDEPTH
389 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
390 build_intra_predictors_high(xd, ref, ref_stride, dst, dst_stride, mode,
391 tx_size, have_top, have_left, have_right,
392 x, y, plane, xd->bd);
396 build_intra_predictors(xd, ref, ref_stride, dst, dst_stride, mode, tx_size,
397 have_top, have_left, have_right, x, y, plane);
400 void vp10_init_intra_predictors(void) {
401 once(vp10_init_intra_predictors_internal);