2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
11 #include "./vpx_config.h"
12 #include "./vpx_dsp_rtcd.h"
13 #include "vp10/common/loopfilter.h"
14 #include "vp10/common/onyxc_int.h"
15 #include "vp10/common/reconinter.h"
16 #include "vpx_mem/vpx_mem.h"
17 #include "vpx_ports/mem.h"
19 #include "vp10/common/seg_common.h"
21 // 64 bit masks for left transform size. Each 1 represents a position where
22 // we should apply a loop filter across the left border of an 8x8 block
25 // In the case of TX_16X16-> ( in low order byte first we end up with
26 // a mask that looks like this
37 // A loopfilter should be applied to every other 8x8 horizontally.
38 static const uint64_t left_64x64_txform_mask[TX_SIZES]= {
39 0xffffffffffffffffULL, // TX_4X4
40 0xffffffffffffffffULL, // TX_8x8
41 0x5555555555555555ULL, // TX_16x16
42 0x1111111111111111ULL, // TX_32x32
45 // 64 bit masks for above transform size. Each 1 represents a position where
46 // we should apply a loop filter across the top border of an 8x8 block
49 // In the case of TX_32x32 -> ( in low order byte first we end up with
50 // a mask that looks like this
61 // A loopfilter should be applied to every other 4 the row vertically.
62 static const uint64_t above_64x64_txform_mask[TX_SIZES]= {
63 0xffffffffffffffffULL, // TX_4X4
64 0xffffffffffffffffULL, // TX_8x8
65 0x00ff00ff00ff00ffULL, // TX_16x16
66 0x000000ff000000ffULL, // TX_32x32
69 // 64 bit masks for prediction sizes (left). Each 1 represents a position
70 // where left border of an 8x8 block. These are aligned to the right most
71 // appropriate bit, and then shifted into place.
73 // In the case of TX_16x32 -> ( low order byte first ) we end up with
74 // a mask that looks like this :
84 static const uint64_t left_prediction_mask[BLOCK_SIZES] = {
85 0x0000000000000001ULL, // BLOCK_4X4,
86 0x0000000000000001ULL, // BLOCK_4X8,
87 0x0000000000000001ULL, // BLOCK_8X4,
88 0x0000000000000001ULL, // BLOCK_8X8,
89 0x0000000000000101ULL, // BLOCK_8X16,
90 0x0000000000000001ULL, // BLOCK_16X8,
91 0x0000000000000101ULL, // BLOCK_16X16,
92 0x0000000001010101ULL, // BLOCK_16X32,
93 0x0000000000000101ULL, // BLOCK_32X16,
94 0x0000000001010101ULL, // BLOCK_32X32,
95 0x0101010101010101ULL, // BLOCK_32X64,
96 0x0000000001010101ULL, // BLOCK_64X32,
97 0x0101010101010101ULL, // BLOCK_64X64
100 // 64 bit mask to shift and set for each prediction size.
101 static const uint64_t above_prediction_mask[BLOCK_SIZES] = {
102 0x0000000000000001ULL, // BLOCK_4X4
103 0x0000000000000001ULL, // BLOCK_4X8
104 0x0000000000000001ULL, // BLOCK_8X4
105 0x0000000000000001ULL, // BLOCK_8X8
106 0x0000000000000001ULL, // BLOCK_8X16,
107 0x0000000000000003ULL, // BLOCK_16X8
108 0x0000000000000003ULL, // BLOCK_16X16
109 0x0000000000000003ULL, // BLOCK_16X32,
110 0x000000000000000fULL, // BLOCK_32X16,
111 0x000000000000000fULL, // BLOCK_32X32,
112 0x000000000000000fULL, // BLOCK_32X64,
113 0x00000000000000ffULL, // BLOCK_64X32,
114 0x00000000000000ffULL, // BLOCK_64X64
116 // 64 bit mask to shift and set for each prediction size. A bit is set for
117 // each 8x8 block that would be in the left most block of the given block
118 // size in the 64x64 block.
119 static const uint64_t size_mask[BLOCK_SIZES] = {
120 0x0000000000000001ULL, // BLOCK_4X4
121 0x0000000000000001ULL, // BLOCK_4X8
122 0x0000000000000001ULL, // BLOCK_8X4
123 0x0000000000000001ULL, // BLOCK_8X8
124 0x0000000000000101ULL, // BLOCK_8X16,
125 0x0000000000000003ULL, // BLOCK_16X8
126 0x0000000000000303ULL, // BLOCK_16X16
127 0x0000000003030303ULL, // BLOCK_16X32,
128 0x0000000000000f0fULL, // BLOCK_32X16,
129 0x000000000f0f0f0fULL, // BLOCK_32X32,
130 0x0f0f0f0f0f0f0f0fULL, // BLOCK_32X64,
131 0x00000000ffffffffULL, // BLOCK_64X32,
132 0xffffffffffffffffULL, // BLOCK_64X64
135 // These are used for masking the left and above borders.
136 static const uint64_t left_border = 0x1111111111111111ULL;
137 static const uint64_t above_border = 0x000000ff000000ffULL;
139 // 16 bit masks for uv transform sizes.
140 static const uint16_t left_64x64_txform_mask_uv[TX_SIZES]= {
147 static const uint16_t above_64x64_txform_mask_uv[TX_SIZES]= {
154 // 16 bit left mask to shift and set for each uv prediction size.
155 static const uint16_t left_prediction_mask_uv[BLOCK_SIZES] = {
156 0x0001, // BLOCK_4X4,
157 0x0001, // BLOCK_4X8,
158 0x0001, // BLOCK_8X4,
159 0x0001, // BLOCK_8X8,
160 0x0001, // BLOCK_8X16,
161 0x0001, // BLOCK_16X8,
162 0x0001, // BLOCK_16X16,
163 0x0011, // BLOCK_16X32,
164 0x0001, // BLOCK_32X16,
165 0x0011, // BLOCK_32X32,
166 0x1111, // BLOCK_32X64
167 0x0011, // BLOCK_64X32,
168 0x1111, // BLOCK_64X64
170 // 16 bit above mask to shift and set for uv each prediction size.
171 static const uint16_t above_prediction_mask_uv[BLOCK_SIZES] = {
176 0x0001, // BLOCK_8X16,
177 0x0001, // BLOCK_16X8
178 0x0001, // BLOCK_16X16
179 0x0001, // BLOCK_16X32,
180 0x0003, // BLOCK_32X16,
181 0x0003, // BLOCK_32X32,
182 0x0003, // BLOCK_32X64,
183 0x000f, // BLOCK_64X32,
184 0x000f, // BLOCK_64X64
187 // 64 bit mask to shift and set for each uv prediction size
188 static const uint16_t size_mask_uv[BLOCK_SIZES] = {
193 0x0001, // BLOCK_8X16,
194 0x0001, // BLOCK_16X8
195 0x0001, // BLOCK_16X16
196 0x0011, // BLOCK_16X32,
197 0x0003, // BLOCK_32X16,
198 0x0033, // BLOCK_32X32,
199 0x3333, // BLOCK_32X64,
200 0x00ff, // BLOCK_64X32,
201 0xffff, // BLOCK_64X64
203 static const uint16_t left_border_uv = 0x1111;
204 static const uint16_t above_border_uv = 0x000f;
206 static const int mode_lf_lut[MB_MODE_COUNT] = {
207 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // INTRA_MODES
208 1, 1, 0, 1 // INTER_MODES (ZEROMV == 0)
211 static void update_sharpness(loop_filter_info_n *lfi, int sharpness_lvl) {
214 // For each possible value for the loop filter fill out limits
215 for (lvl = 0; lvl <= MAX_LOOP_FILTER; lvl++) {
216 // Set loop filter parameters that control sharpness.
217 int block_inside_limit = lvl >> ((sharpness_lvl > 0) + (sharpness_lvl > 4));
219 if (sharpness_lvl > 0) {
220 if (block_inside_limit > (9 - sharpness_lvl))
221 block_inside_limit = (9 - sharpness_lvl);
224 if (block_inside_limit < 1)
225 block_inside_limit = 1;
227 memset(lfi->lfthr[lvl].lim, block_inside_limit, SIMD_WIDTH);
228 memset(lfi->lfthr[lvl].mblim, (2 * (lvl + 2) + block_inside_limit),
233 static uint8_t get_filter_level(const loop_filter_info_n *lfi_n,
234 const MB_MODE_INFO *mbmi) {
235 return lfi_n->lvl[mbmi->segment_id][mbmi->ref_frame[0]]
236 [mode_lf_lut[mbmi->mode]];
239 void vp10_loop_filter_init(VP10_COMMON *cm) {
240 loop_filter_info_n *lfi = &cm->lf_info;
241 struct loopfilter *lf = &cm->lf;
244 // init limits for given sharpness
245 update_sharpness(lfi, lf->sharpness_level);
246 lf->last_sharpness_level = lf->sharpness_level;
248 // init hev threshold const vectors
249 for (lvl = 0; lvl <= MAX_LOOP_FILTER; lvl++)
250 memset(lfi->lfthr[lvl].hev_thr, (lvl >> 4), SIMD_WIDTH);
253 void vp10_loop_filter_frame_init(VP10_COMMON *cm, int default_filt_lvl) {
255 // n_shift is the multiplier for lf_deltas
256 // the multiplier is 1 for when filter_lvl is between 0 and 31;
257 // 2 when filter_lvl is between 32 and 63
258 const int scale = 1 << (default_filt_lvl >> 5);
259 loop_filter_info_n *const lfi = &cm->lf_info;
260 struct loopfilter *const lf = &cm->lf;
261 const struct segmentation *const seg = &cm->seg;
263 // update limits if sharpness has changed
264 if (lf->last_sharpness_level != lf->sharpness_level) {
265 update_sharpness(lfi, lf->sharpness_level);
266 lf->last_sharpness_level = lf->sharpness_level;
269 for (seg_id = 0; seg_id < MAX_SEGMENTS; seg_id++) {
270 int lvl_seg = default_filt_lvl;
271 if (segfeature_active(seg, seg_id, SEG_LVL_ALT_LF)) {
272 const int data = get_segdata(seg, seg_id, SEG_LVL_ALT_LF);
273 lvl_seg = clamp(seg->abs_delta == SEGMENT_ABSDATA ?
274 data : default_filt_lvl + data,
278 if (!lf->mode_ref_delta_enabled) {
279 // we could get rid of this if we assume that deltas are set to
280 // zero when not in use; encoder always uses deltas
281 memset(lfi->lvl[seg_id], lvl_seg, sizeof(lfi->lvl[seg_id]));
284 const int intra_lvl = lvl_seg + lf->ref_deltas[INTRA_FRAME] * scale;
285 lfi->lvl[seg_id][INTRA_FRAME][0] = clamp(intra_lvl, 0, MAX_LOOP_FILTER);
287 for (ref = LAST_FRAME; ref < MAX_REF_FRAMES; ++ref) {
288 for (mode = 0; mode < MAX_MODE_LF_DELTAS; ++mode) {
289 const int inter_lvl = lvl_seg + lf->ref_deltas[ref] * scale
290 + lf->mode_deltas[mode] * scale;
291 lfi->lvl[seg_id][ref][mode] = clamp(inter_lvl, 0, MAX_LOOP_FILTER);
298 static void filter_selectively_vert_row2(int subsampling_factor,
299 uint8_t *s, int pitch,
300 unsigned int mask_16x16_l,
301 unsigned int mask_8x8_l,
302 unsigned int mask_4x4_l,
303 unsigned int mask_4x4_int_l,
304 const loop_filter_info_n *lfi_n,
305 const uint8_t *lfl) {
306 const int mask_shift = subsampling_factor ? 4 : 8;
307 const int mask_cutoff = subsampling_factor ? 0xf : 0xff;
308 const int lfl_forward = subsampling_factor ? 4 : 8;
310 unsigned int mask_16x16_0 = mask_16x16_l & mask_cutoff;
311 unsigned int mask_8x8_0 = mask_8x8_l & mask_cutoff;
312 unsigned int mask_4x4_0 = mask_4x4_l & mask_cutoff;
313 unsigned int mask_4x4_int_0 = mask_4x4_int_l & mask_cutoff;
314 unsigned int mask_16x16_1 = (mask_16x16_l >> mask_shift) & mask_cutoff;
315 unsigned int mask_8x8_1 = (mask_8x8_l >> mask_shift) & mask_cutoff;
316 unsigned int mask_4x4_1 = (mask_4x4_l >> mask_shift) & mask_cutoff;
317 unsigned int mask_4x4_int_1 = (mask_4x4_int_l >> mask_shift) & mask_cutoff;
320 for (mask = mask_16x16_0 | mask_8x8_0 | mask_4x4_0 | mask_4x4_int_0 |
321 mask_16x16_1 | mask_8x8_1 | mask_4x4_1 | mask_4x4_int_1;
323 const loop_filter_thresh *lfi0 = lfi_n->lfthr + *lfl;
324 const loop_filter_thresh *lfi1 = lfi_n->lfthr + *(lfl + lfl_forward);
326 // TODO(yunqingwang): count in loopfilter functions should be removed.
328 if ((mask_16x16_0 | mask_16x16_1) & 1) {
329 if ((mask_16x16_0 & mask_16x16_1) & 1) {
330 vpx_lpf_vertical_16_dual(s, pitch, lfi0->mblim, lfi0->lim,
332 } else if (mask_16x16_0 & 1) {
333 vpx_lpf_vertical_16(s, pitch, lfi0->mblim, lfi0->lim,
336 vpx_lpf_vertical_16(s + 8 *pitch, pitch, lfi1->mblim,
337 lfi1->lim, lfi1->hev_thr);
341 if ((mask_8x8_0 | mask_8x8_1) & 1) {
342 if ((mask_8x8_0 & mask_8x8_1) & 1) {
343 vpx_lpf_vertical_8_dual(s, pitch, lfi0->mblim, lfi0->lim,
344 lfi0->hev_thr, lfi1->mblim, lfi1->lim,
346 } else if (mask_8x8_0 & 1) {
347 vpx_lpf_vertical_8(s, pitch, lfi0->mblim, lfi0->lim, lfi0->hev_thr,
350 vpx_lpf_vertical_8(s + 8 * pitch, pitch, lfi1->mblim, lfi1->lim,
355 if ((mask_4x4_0 | mask_4x4_1) & 1) {
356 if ((mask_4x4_0 & mask_4x4_1) & 1) {
357 vpx_lpf_vertical_4_dual(s, pitch, lfi0->mblim, lfi0->lim,
358 lfi0->hev_thr, lfi1->mblim, lfi1->lim,
360 } else if (mask_4x4_0 & 1) {
361 vpx_lpf_vertical_4(s, pitch, lfi0->mblim, lfi0->lim, lfi0->hev_thr,
364 vpx_lpf_vertical_4(s + 8 * pitch, pitch, lfi1->mblim, lfi1->lim,
369 if ((mask_4x4_int_0 | mask_4x4_int_1) & 1) {
370 if ((mask_4x4_int_0 & mask_4x4_int_1) & 1) {
371 vpx_lpf_vertical_4_dual(s + 4, pitch, lfi0->mblim, lfi0->lim,
372 lfi0->hev_thr, lfi1->mblim, lfi1->lim,
374 } else if (mask_4x4_int_0 & 1) {
375 vpx_lpf_vertical_4(s + 4, pitch, lfi0->mblim, lfi0->lim,
378 vpx_lpf_vertical_4(s + 8 * pitch + 4, pitch, lfi1->mblim, lfi1->lim,
389 mask_4x4_int_0 >>= 1;
393 mask_4x4_int_1 >>= 1;
397 #if CONFIG_VP9_HIGHBITDEPTH
398 static void highbd_filter_selectively_vert_row2(int subsampling_factor,
399 uint16_t *s, int pitch,
400 unsigned int mask_16x16_l,
401 unsigned int mask_8x8_l,
402 unsigned int mask_4x4_l,
403 unsigned int mask_4x4_int_l,
404 const loop_filter_info_n *lfi_n,
405 const uint8_t *lfl, int bd) {
406 const int mask_shift = subsampling_factor ? 4 : 8;
407 const int mask_cutoff = subsampling_factor ? 0xf : 0xff;
408 const int lfl_forward = subsampling_factor ? 4 : 8;
410 unsigned int mask_16x16_0 = mask_16x16_l & mask_cutoff;
411 unsigned int mask_8x8_0 = mask_8x8_l & mask_cutoff;
412 unsigned int mask_4x4_0 = mask_4x4_l & mask_cutoff;
413 unsigned int mask_4x4_int_0 = mask_4x4_int_l & mask_cutoff;
414 unsigned int mask_16x16_1 = (mask_16x16_l >> mask_shift) & mask_cutoff;
415 unsigned int mask_8x8_1 = (mask_8x8_l >> mask_shift) & mask_cutoff;
416 unsigned int mask_4x4_1 = (mask_4x4_l >> mask_shift) & mask_cutoff;
417 unsigned int mask_4x4_int_1 = (mask_4x4_int_l >> mask_shift) & mask_cutoff;
420 for (mask = mask_16x16_0 | mask_8x8_0 | mask_4x4_0 | mask_4x4_int_0 |
421 mask_16x16_1 | mask_8x8_1 | mask_4x4_1 | mask_4x4_int_1;
423 const loop_filter_thresh *lfi0 = lfi_n->lfthr + *lfl;
424 const loop_filter_thresh *lfi1 = lfi_n->lfthr + *(lfl + lfl_forward);
426 // TODO(yunqingwang): count in loopfilter functions should be removed.
428 if ((mask_16x16_0 | mask_16x16_1) & 1) {
429 if ((mask_16x16_0 & mask_16x16_1) & 1) {
430 vpx_highbd_lpf_vertical_16_dual(s, pitch, lfi0->mblim, lfi0->lim,
432 } else if (mask_16x16_0 & 1) {
433 vpx_highbd_lpf_vertical_16(s, pitch, lfi0->mblim, lfi0->lim,
436 vpx_highbd_lpf_vertical_16(s + 8 *pitch, pitch, lfi1->mblim,
437 lfi1->lim, lfi1->hev_thr, bd);
441 if ((mask_8x8_0 | mask_8x8_1) & 1) {
442 if ((mask_8x8_0 & mask_8x8_1) & 1) {
443 vpx_highbd_lpf_vertical_8_dual(s, pitch, lfi0->mblim, lfi0->lim,
444 lfi0->hev_thr, lfi1->mblim, lfi1->lim,
446 } else if (mask_8x8_0 & 1) {
447 vpx_highbd_lpf_vertical_8(s, pitch, lfi0->mblim, lfi0->lim,
448 lfi0->hev_thr, 1, bd);
450 vpx_highbd_lpf_vertical_8(s + 8 * pitch, pitch, lfi1->mblim,
451 lfi1->lim, lfi1->hev_thr, 1, bd);
455 if ((mask_4x4_0 | mask_4x4_1) & 1) {
456 if ((mask_4x4_0 & mask_4x4_1) & 1) {
457 vpx_highbd_lpf_vertical_4_dual(s, pitch, lfi0->mblim, lfi0->lim,
458 lfi0->hev_thr, lfi1->mblim, lfi1->lim,
460 } else if (mask_4x4_0 & 1) {
461 vpx_highbd_lpf_vertical_4(s, pitch, lfi0->mblim, lfi0->lim,
462 lfi0->hev_thr, 1, bd);
464 vpx_highbd_lpf_vertical_4(s + 8 * pitch, pitch, lfi1->mblim,
465 lfi1->lim, lfi1->hev_thr, 1, bd);
469 if ((mask_4x4_int_0 | mask_4x4_int_1) & 1) {
470 if ((mask_4x4_int_0 & mask_4x4_int_1) & 1) {
471 vpx_highbd_lpf_vertical_4_dual(s + 4, pitch, lfi0->mblim, lfi0->lim,
472 lfi0->hev_thr, lfi1->mblim, lfi1->lim,
474 } else if (mask_4x4_int_0 & 1) {
475 vpx_highbd_lpf_vertical_4(s + 4, pitch, lfi0->mblim, lfi0->lim,
476 lfi0->hev_thr, 1, bd);
478 vpx_highbd_lpf_vertical_4(s + 8 * pitch + 4, pitch, lfi1->mblim,
479 lfi1->lim, lfi1->hev_thr, 1, bd);
489 mask_4x4_int_0 >>= 1;
493 mask_4x4_int_1 >>= 1;
496 #endif // CONFIG_VP9_HIGHBITDEPTH
498 static void filter_selectively_horiz(uint8_t *s, int pitch,
499 unsigned int mask_16x16,
500 unsigned int mask_8x8,
501 unsigned int mask_4x4,
502 unsigned int mask_4x4_int,
503 const loop_filter_info_n *lfi_n,
504 const uint8_t *lfl) {
508 for (mask = mask_16x16 | mask_8x8 | mask_4x4 | mask_4x4_int;
509 mask; mask >>= count) {
510 const loop_filter_thresh *lfi = lfi_n->lfthr + *lfl;
514 if (mask_16x16 & 1) {
515 if ((mask_16x16 & 3) == 3) {
516 vpx_lpf_horizontal_16(s, pitch, lfi->mblim, lfi->lim,
520 vpx_lpf_horizontal_16(s, pitch, lfi->mblim, lfi->lim,
523 } else if (mask_8x8 & 1) {
524 if ((mask_8x8 & 3) == 3) {
525 // Next block's thresholds.
526 const loop_filter_thresh *lfin = lfi_n->lfthr + *(lfl + 1);
528 vpx_lpf_horizontal_8_dual(s, pitch, lfi->mblim, lfi->lim,
529 lfi->hev_thr, lfin->mblim, lfin->lim,
532 if ((mask_4x4_int & 3) == 3) {
533 vpx_lpf_horizontal_4_dual(s + 4 * pitch, pitch, lfi->mblim,
534 lfi->lim, lfi->hev_thr, lfin->mblim,
535 lfin->lim, lfin->hev_thr);
537 if (mask_4x4_int & 1)
538 vpx_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
540 else if (mask_4x4_int & 2)
541 vpx_lpf_horizontal_4(s + 8 + 4 * pitch, pitch, lfin->mblim,
542 lfin->lim, lfin->hev_thr, 1);
546 vpx_lpf_horizontal_8(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr, 1);
548 if (mask_4x4_int & 1)
549 vpx_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
552 } else if (mask_4x4 & 1) {
553 if ((mask_4x4 & 3) == 3) {
554 // Next block's thresholds.
555 const loop_filter_thresh *lfin = lfi_n->lfthr + *(lfl + 1);
557 vpx_lpf_horizontal_4_dual(s, pitch, lfi->mblim, lfi->lim,
558 lfi->hev_thr, lfin->mblim, lfin->lim,
560 if ((mask_4x4_int & 3) == 3) {
561 vpx_lpf_horizontal_4_dual(s + 4 * pitch, pitch, lfi->mblim,
562 lfi->lim, lfi->hev_thr, lfin->mblim,
563 lfin->lim, lfin->hev_thr);
565 if (mask_4x4_int & 1)
566 vpx_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
568 else if (mask_4x4_int & 2)
569 vpx_lpf_horizontal_4(s + 8 + 4 * pitch, pitch, lfin->mblim,
570 lfin->lim, lfin->hev_thr, 1);
574 vpx_lpf_horizontal_4(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr, 1);
576 if (mask_4x4_int & 1)
577 vpx_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
580 } else if (mask_4x4_int & 1) {
581 vpx_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
587 mask_16x16 >>= count;
590 mask_4x4_int >>= count;
594 #if CONFIG_VP9_HIGHBITDEPTH
595 static void highbd_filter_selectively_horiz(uint16_t *s, int pitch,
596 unsigned int mask_16x16,
597 unsigned int mask_8x8,
598 unsigned int mask_4x4,
599 unsigned int mask_4x4_int,
600 const loop_filter_info_n *lfi_n,
601 const uint8_t *lfl, int bd) {
605 for (mask = mask_16x16 | mask_8x8 | mask_4x4 | mask_4x4_int;
606 mask; mask >>= count) {
607 const loop_filter_thresh *lfi = lfi_n->lfthr + *lfl;
611 if (mask_16x16 & 1) {
612 if ((mask_16x16 & 3) == 3) {
613 vpx_highbd_lpf_horizontal_16(s, pitch, lfi->mblim, lfi->lim,
614 lfi->hev_thr, 2, bd);
617 vpx_highbd_lpf_horizontal_16(s, pitch, lfi->mblim, lfi->lim,
618 lfi->hev_thr, 1, bd);
620 } else if (mask_8x8 & 1) {
621 if ((mask_8x8 & 3) == 3) {
622 // Next block's thresholds.
623 const loop_filter_thresh *lfin = lfi_n->lfthr + *(lfl + 1);
625 vpx_highbd_lpf_horizontal_8_dual(s, pitch, lfi->mblim, lfi->lim,
626 lfi->hev_thr, lfin->mblim, lfin->lim,
629 if ((mask_4x4_int & 3) == 3) {
630 vpx_highbd_lpf_horizontal_4_dual(s + 4 * pitch, pitch, lfi->mblim,
631 lfi->lim, lfi->hev_thr,
632 lfin->mblim, lfin->lim,
635 if (mask_4x4_int & 1) {
636 vpx_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim,
637 lfi->lim, lfi->hev_thr, 1, bd);
638 } else if (mask_4x4_int & 2) {
639 vpx_highbd_lpf_horizontal_4(s + 8 + 4 * pitch, pitch, lfin->mblim,
640 lfin->lim, lfin->hev_thr, 1, bd);
645 vpx_highbd_lpf_horizontal_8(s, pitch, lfi->mblim, lfi->lim,
646 lfi->hev_thr, 1, bd);
648 if (mask_4x4_int & 1) {
649 vpx_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim,
650 lfi->lim, lfi->hev_thr, 1, bd);
653 } else if (mask_4x4 & 1) {
654 if ((mask_4x4 & 3) == 3) {
655 // Next block's thresholds.
656 const loop_filter_thresh *lfin = lfi_n->lfthr + *(lfl + 1);
658 vpx_highbd_lpf_horizontal_4_dual(s, pitch, lfi->mblim, lfi->lim,
659 lfi->hev_thr, lfin->mblim, lfin->lim,
661 if ((mask_4x4_int & 3) == 3) {
662 vpx_highbd_lpf_horizontal_4_dual(s + 4 * pitch, pitch, lfi->mblim,
663 lfi->lim, lfi->hev_thr,
664 lfin->mblim, lfin->lim,
667 if (mask_4x4_int & 1) {
668 vpx_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim,
669 lfi->lim, lfi->hev_thr, 1, bd);
670 } else if (mask_4x4_int & 2) {
671 vpx_highbd_lpf_horizontal_4(s + 8 + 4 * pitch, pitch, lfin->mblim,
672 lfin->lim, lfin->hev_thr, 1, bd);
677 vpx_highbd_lpf_horizontal_4(s, pitch, lfi->mblim, lfi->lim,
678 lfi->hev_thr, 1, bd);
680 if (mask_4x4_int & 1) {
681 vpx_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim,
682 lfi->lim, lfi->hev_thr, 1, bd);
685 } else if (mask_4x4_int & 1) {
686 vpx_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
687 lfi->hev_thr, 1, bd);
692 mask_16x16 >>= count;
695 mask_4x4_int >>= count;
698 #endif // CONFIG_VP9_HIGHBITDEPTH
700 // This function ors into the current lfm structure, where to do loop
701 // filters for the specific mi we are looking at. It uses information
702 // including the block_size_type (32x16, 32x32, etc.), the transform size,
703 // whether there were any coefficients encoded, and the loop filter strength
704 // block we are currently looking at. Shift is used to position the
706 // TODO(JBB) Need another function for different resolution color..
707 static void build_masks(const loop_filter_info_n *const lfi_n,
708 const MODE_INFO *mi, const int shift_y,
710 LOOP_FILTER_MASK *lfm) {
711 const MB_MODE_INFO *mbmi = &mi->mbmi;
712 const BLOCK_SIZE block_size = mbmi->sb_type;
713 const TX_SIZE tx_size_y = mbmi->tx_size;
714 const TX_SIZE tx_size_uv = get_uv_tx_size_impl(tx_size_y, block_size, 1, 1);
715 const int filter_level = get_filter_level(lfi_n, mbmi);
716 uint64_t *const left_y = &lfm->left_y[tx_size_y];
717 uint64_t *const above_y = &lfm->above_y[tx_size_y];
718 uint64_t *const int_4x4_y = &lfm->int_4x4_y;
719 uint16_t *const left_uv = &lfm->left_uv[tx_size_uv];
720 uint16_t *const above_uv = &lfm->above_uv[tx_size_uv];
721 uint16_t *const int_4x4_uv = &lfm->int_4x4_uv;
724 // If filter level is 0 we don't loop filter.
728 const int w = num_8x8_blocks_wide_lookup[block_size];
729 const int h = num_8x8_blocks_high_lookup[block_size];
731 for (i = 0; i < h; i++) {
732 memset(&lfm->lfl_y[index], filter_level, w);
737 // These set 1 in the current block size for the block size edges.
738 // For instance if the block size is 32x16, we'll set:
744 // NOTE : In this example the low bit is left most ( 1000 ) is stored as
747 // U and V set things on a 16 bit scale.
749 *above_y |= above_prediction_mask[block_size] << shift_y;
750 *above_uv |= above_prediction_mask_uv[block_size] << shift_uv;
751 *left_y |= left_prediction_mask[block_size] << shift_y;
752 *left_uv |= left_prediction_mask_uv[block_size] << shift_uv;
754 // If the block has no coefficients and is not intra we skip applying
755 // the loop filter on block edges.
756 if (mbmi->skip && is_inter_block(mbmi))
759 // Here we are adding a mask for the transform size. The transform
760 // size mask is set to be correct for a 64x64 prediction block size. We
761 // mask to match the size of the block we are working on and then shift it
763 *above_y |= (size_mask[block_size] &
764 above_64x64_txform_mask[tx_size_y]) << shift_y;
765 *above_uv |= (size_mask_uv[block_size] &
766 above_64x64_txform_mask_uv[tx_size_uv]) << shift_uv;
768 *left_y |= (size_mask[block_size] &
769 left_64x64_txform_mask[tx_size_y]) << shift_y;
770 *left_uv |= (size_mask_uv[block_size] &
771 left_64x64_txform_mask_uv[tx_size_uv]) << shift_uv;
773 // Here we are trying to determine what to do with the internal 4x4 block
774 // boundaries. These differ from the 4x4 boundaries on the outside edge of
775 // an 8x8 in that the internal ones can be skipped and don't depend on
776 // the prediction block size.
777 if (tx_size_y == TX_4X4)
778 *int_4x4_y |= (size_mask[block_size] & 0xffffffffffffffffULL) << shift_y;
780 if (tx_size_uv == TX_4X4)
781 *int_4x4_uv |= (size_mask_uv[block_size] & 0xffff) << shift_uv;
784 // This function does the same thing as the one above with the exception that
785 // it only affects the y masks. It exists because for blocks < 16x16 in size,
786 // we only update u and v masks on the first block.
787 static void build_y_mask(const loop_filter_info_n *const lfi_n,
788 const MODE_INFO *mi, const int shift_y,
789 LOOP_FILTER_MASK *lfm) {
790 const MB_MODE_INFO *mbmi = &mi->mbmi;
791 const BLOCK_SIZE block_size = mbmi->sb_type;
792 const TX_SIZE tx_size_y = mbmi->tx_size;
793 const int filter_level = get_filter_level(lfi_n, mbmi);
794 uint64_t *const left_y = &lfm->left_y[tx_size_y];
795 uint64_t *const above_y = &lfm->above_y[tx_size_y];
796 uint64_t *const int_4x4_y = &lfm->int_4x4_y;
802 const int w = num_8x8_blocks_wide_lookup[block_size];
803 const int h = num_8x8_blocks_high_lookup[block_size];
805 for (i = 0; i < h; i++) {
806 memset(&lfm->lfl_y[index], filter_level, w);
811 *above_y |= above_prediction_mask[block_size] << shift_y;
812 *left_y |= left_prediction_mask[block_size] << shift_y;
814 if (mbmi->skip && is_inter_block(mbmi))
817 *above_y |= (size_mask[block_size] &
818 above_64x64_txform_mask[tx_size_y]) << shift_y;
820 *left_y |= (size_mask[block_size] &
821 left_64x64_txform_mask[tx_size_y]) << shift_y;
823 if (tx_size_y == TX_4X4)
824 *int_4x4_y |= (size_mask[block_size] & 0xffffffffffffffffULL) << shift_y;
827 // This function sets up the bit masks for the entire 64x64 region represented
828 // by mi_row, mi_col.
829 // TODO(JBB): This function only works for yv12.
830 void vp10_setup_mask(VP10_COMMON *const cm, const int mi_row, const int mi_col,
831 MODE_INFO **mi, const int mode_info_stride,
832 LOOP_FILTER_MASK *lfm) {
833 int idx_32, idx_16, idx_8;
834 const loop_filter_info_n *const lfi_n = &cm->lf_info;
835 MODE_INFO **mip = mi;
836 MODE_INFO **mip2 = mi;
838 // These are offsets to the next mi in the 64x64 block. It is what gets
839 // added to the mi ptr as we go through each loop. It helps us to avoid
840 // setting up special row and column counters for each index. The last step
841 // brings us out back to the starting position.
842 const int offset_32[] = {4, (mode_info_stride << 2) - 4, 4,
843 -(mode_info_stride << 2) - 4};
844 const int offset_16[] = {2, (mode_info_stride << 1) - 2, 2,
845 -(mode_info_stride << 1) - 2};
846 const int offset[] = {1, mode_info_stride - 1, 1, -mode_info_stride - 1};
848 // Following variables represent shifts to position the current block
849 // mask over the appropriate block. A shift of 36 to the left will move
850 // the bits for the final 32 by 32 block in the 64x64 up 4 rows and left
851 // 4 rows to the appropriate spot.
852 const int shift_32_y[] = {0, 4, 32, 36};
853 const int shift_16_y[] = {0, 2, 16, 18};
854 const int shift_8_y[] = {0, 1, 8, 9};
855 const int shift_32_uv[] = {0, 2, 8, 10};
856 const int shift_16_uv[] = {0, 1, 4, 5};
858 const int max_rows = (mi_row + MI_BLOCK_SIZE > cm->mi_rows ?
859 cm->mi_rows - mi_row : MI_BLOCK_SIZE);
860 const int max_cols = (mi_col + MI_BLOCK_SIZE > cm->mi_cols ?
861 cm->mi_cols - mi_col : MI_BLOCK_SIZE);
864 assert(mip[0] != NULL);
866 // TODO(jimbankoski): Try moving most of the following code into decode
867 // loop and storing lfm in the mbmi structure so that we don't have to go
868 // through the recursive loop structure multiple times.
869 switch (mip[0]->mbmi.sb_type) {
871 build_masks(lfi_n, mip[0] , 0, 0, lfm);
874 build_masks(lfi_n, mip[0], 0, 0, lfm);
875 mip2 = mip + mode_info_stride * 4;
878 build_masks(lfi_n, mip2[0], 32, 8, lfm);
881 build_masks(lfi_n, mip[0], 0, 0, lfm);
885 build_masks(lfi_n, mip2[0], 4, 2, lfm);
888 for (idx_32 = 0; idx_32 < 4; mip += offset_32[idx_32], ++idx_32) {
889 const int shift_y = shift_32_y[idx_32];
890 const int shift_uv = shift_32_uv[idx_32];
891 const int mi_32_col_offset = ((idx_32 & 1) << 2);
892 const int mi_32_row_offset = ((idx_32 >> 1) << 2);
893 if (mi_32_col_offset >= max_cols || mi_32_row_offset >= max_rows)
895 switch (mip[0]->mbmi.sb_type) {
897 build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm);
900 build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm);
901 if (mi_32_row_offset + 2 >= max_rows)
903 mip2 = mip + mode_info_stride * 2;
904 build_masks(lfi_n, mip2[0], shift_y + 16, shift_uv + 4, lfm);
907 build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm);
908 if (mi_32_col_offset + 2 >= max_cols)
911 build_masks(lfi_n, mip2[0], shift_y + 2, shift_uv + 1, lfm);
914 for (idx_16 = 0; idx_16 < 4; mip += offset_16[idx_16], ++idx_16) {
915 const int shift_y = shift_32_y[idx_32] + shift_16_y[idx_16];
916 const int shift_uv = shift_32_uv[idx_32] + shift_16_uv[idx_16];
917 const int mi_16_col_offset = mi_32_col_offset +
919 const int mi_16_row_offset = mi_32_row_offset +
920 ((idx_16 >> 1) << 1);
922 if (mi_16_col_offset >= max_cols || mi_16_row_offset >= max_rows)
925 switch (mip[0]->mbmi.sb_type) {
927 build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm);
930 build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm);
931 if (mi_16_row_offset + 1 >= max_rows)
933 mip2 = mip + mode_info_stride;
934 build_y_mask(lfi_n, mip2[0], shift_y+8, lfm);
937 build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm);
938 if (mi_16_col_offset +1 >= max_cols)
941 build_y_mask(lfi_n, mip2[0], shift_y+1, lfm);
944 const int shift_y = shift_32_y[idx_32] +
947 build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm);
949 for (idx_8 = 1; idx_8 < 4; mip += offset[idx_8], ++idx_8) {
950 const int shift_y = shift_32_y[idx_32] +
953 const int mi_8_col_offset = mi_16_col_offset +
955 const int mi_8_row_offset = mi_16_row_offset +
958 if (mi_8_col_offset >= max_cols ||
959 mi_8_row_offset >= max_rows)
961 build_y_mask(lfi_n, mip[0], shift_y, lfm);
972 // The largest loopfilter we have is 16x16 so we use the 16x16 mask
973 // for 32x32 transforms also.
974 lfm->left_y[TX_16X16] |= lfm->left_y[TX_32X32];
975 lfm->above_y[TX_16X16] |= lfm->above_y[TX_32X32];
976 lfm->left_uv[TX_16X16] |= lfm->left_uv[TX_32X32];
977 lfm->above_uv[TX_16X16] |= lfm->above_uv[TX_32X32];
979 // We do at least 8 tap filter on every 32x32 even if the transform size
980 // is 4x4. So if the 4x4 is set on a border pixel add it to the 8x8 and
981 // remove it from the 4x4.
982 lfm->left_y[TX_8X8] |= lfm->left_y[TX_4X4] & left_border;
983 lfm->left_y[TX_4X4] &= ~left_border;
984 lfm->above_y[TX_8X8] |= lfm->above_y[TX_4X4] & above_border;
985 lfm->above_y[TX_4X4] &= ~above_border;
986 lfm->left_uv[TX_8X8] |= lfm->left_uv[TX_4X4] & left_border_uv;
987 lfm->left_uv[TX_4X4] &= ~left_border_uv;
988 lfm->above_uv[TX_8X8] |= lfm->above_uv[TX_4X4] & above_border_uv;
989 lfm->above_uv[TX_4X4] &= ~above_border_uv;
991 // We do some special edge handling.
992 if (mi_row + MI_BLOCK_SIZE > cm->mi_rows) {
993 const uint64_t rows = cm->mi_rows - mi_row;
995 // Each pixel inside the border gets a 1,
996 const uint64_t mask_y = (((uint64_t) 1 << (rows << 3)) - 1);
997 const uint16_t mask_uv = (((uint16_t) 1 << (((rows + 1) >> 1) << 2)) - 1);
999 // Remove values completely outside our border.
1000 for (i = 0; i < TX_32X32; i++) {
1001 lfm->left_y[i] &= mask_y;
1002 lfm->above_y[i] &= mask_y;
1003 lfm->left_uv[i] &= mask_uv;
1004 lfm->above_uv[i] &= mask_uv;
1006 lfm->int_4x4_y &= mask_y;
1007 lfm->int_4x4_uv &= mask_uv;
1009 // We don't apply a wide loop filter on the last uv block row. If set
1010 // apply the shorter one instead.
1012 lfm->above_uv[TX_8X8] |= lfm->above_uv[TX_16X16];
1013 lfm->above_uv[TX_16X16] = 0;
1016 lfm->above_uv[TX_8X8] |= lfm->above_uv[TX_16X16] & 0xff00;
1017 lfm->above_uv[TX_16X16] &= ~(lfm->above_uv[TX_16X16] & 0xff00);
1021 if (mi_col + MI_BLOCK_SIZE > cm->mi_cols) {
1022 const uint64_t columns = cm->mi_cols - mi_col;
1024 // Each pixel inside the border gets a 1, the multiply copies the border
1025 // to where we need it.
1026 const uint64_t mask_y = (((1 << columns) - 1)) * 0x0101010101010101ULL;
1027 const uint16_t mask_uv = ((1 << ((columns + 1) >> 1)) - 1) * 0x1111;
1029 // Internal edges are not applied on the last column of the image so
1030 // we mask 1 more for the internal edges
1031 const uint16_t mask_uv_int = ((1 << (columns >> 1)) - 1) * 0x1111;
1033 // Remove the bits outside the image edge.
1034 for (i = 0; i < TX_32X32; i++) {
1035 lfm->left_y[i] &= mask_y;
1036 lfm->above_y[i] &= mask_y;
1037 lfm->left_uv[i] &= mask_uv;
1038 lfm->above_uv[i] &= mask_uv;
1040 lfm->int_4x4_y &= mask_y;
1041 lfm->int_4x4_uv &= mask_uv_int;
1043 // We don't apply a wide loop filter on the last uv column. If set
1044 // apply the shorter one instead.
1046 lfm->left_uv[TX_8X8] |= lfm->left_uv[TX_16X16];
1047 lfm->left_uv[TX_16X16] = 0;
1050 lfm->left_uv[TX_8X8] |= (lfm->left_uv[TX_16X16] & 0xcccc);
1051 lfm->left_uv[TX_16X16] &= ~(lfm->left_uv[TX_16X16] & 0xcccc);
1054 // We don't apply a loop filter on the first column in the image, mask that
1057 for (i = 0; i < TX_32X32; i++) {
1058 lfm->left_y[i] &= 0xfefefefefefefefeULL;
1059 lfm->left_uv[i] &= 0xeeee;
1063 // Assert if we try to apply 2 different loop filters at the same position.
1064 assert(!(lfm->left_y[TX_16X16] & lfm->left_y[TX_8X8]));
1065 assert(!(lfm->left_y[TX_16X16] & lfm->left_y[TX_4X4]));
1066 assert(!(lfm->left_y[TX_8X8] & lfm->left_y[TX_4X4]));
1067 assert(!(lfm->int_4x4_y & lfm->left_y[TX_16X16]));
1068 assert(!(lfm->left_uv[TX_16X16]&lfm->left_uv[TX_8X8]));
1069 assert(!(lfm->left_uv[TX_16X16] & lfm->left_uv[TX_4X4]));
1070 assert(!(lfm->left_uv[TX_8X8] & lfm->left_uv[TX_4X4]));
1071 assert(!(lfm->int_4x4_uv & lfm->left_uv[TX_16X16]));
1072 assert(!(lfm->above_y[TX_16X16] & lfm->above_y[TX_8X8]));
1073 assert(!(lfm->above_y[TX_16X16] & lfm->above_y[TX_4X4]));
1074 assert(!(lfm->above_y[TX_8X8] & lfm->above_y[TX_4X4]));
1075 assert(!(lfm->int_4x4_y & lfm->above_y[TX_16X16]));
1076 assert(!(lfm->above_uv[TX_16X16] & lfm->above_uv[TX_8X8]));
1077 assert(!(lfm->above_uv[TX_16X16] & lfm->above_uv[TX_4X4]));
1078 assert(!(lfm->above_uv[TX_8X8] & lfm->above_uv[TX_4X4]));
1079 assert(!(lfm->int_4x4_uv & lfm->above_uv[TX_16X16]));
1082 static void filter_selectively_vert(uint8_t *s, int pitch,
1083 unsigned int mask_16x16,
1084 unsigned int mask_8x8,
1085 unsigned int mask_4x4,
1086 unsigned int mask_4x4_int,
1087 const loop_filter_info_n *lfi_n,
1088 const uint8_t *lfl) {
1091 for (mask = mask_16x16 | mask_8x8 | mask_4x4 | mask_4x4_int;
1093 const loop_filter_thresh *lfi = lfi_n->lfthr + *lfl;
1096 if (mask_16x16 & 1) {
1097 vpx_lpf_vertical_16(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr);
1098 } else if (mask_8x8 & 1) {
1099 vpx_lpf_vertical_8(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr, 1);
1100 } else if (mask_4x4 & 1) {
1101 vpx_lpf_vertical_4(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr, 1);
1104 if (mask_4x4_int & 1)
1105 vpx_lpf_vertical_4(s + 4, pitch, lfi->mblim, lfi->lim, lfi->hev_thr, 1);
1115 #if CONFIG_VP9_HIGHBITDEPTH
1116 static void highbd_filter_selectively_vert(uint16_t *s, int pitch,
1117 unsigned int mask_16x16,
1118 unsigned int mask_8x8,
1119 unsigned int mask_4x4,
1120 unsigned int mask_4x4_int,
1121 const loop_filter_info_n *lfi_n,
1122 const uint8_t *lfl, int bd) {
1125 for (mask = mask_16x16 | mask_8x8 | mask_4x4 | mask_4x4_int;
1127 const loop_filter_thresh *lfi = lfi_n->lfthr + *lfl;
1130 if (mask_16x16 & 1) {
1131 vpx_highbd_lpf_vertical_16(s, pitch, lfi->mblim, lfi->lim,
1133 } else if (mask_8x8 & 1) {
1134 vpx_highbd_lpf_vertical_8(s, pitch, lfi->mblim, lfi->lim,
1135 lfi->hev_thr, 1, bd);
1136 } else if (mask_4x4 & 1) {
1137 vpx_highbd_lpf_vertical_4(s, pitch, lfi->mblim, lfi->lim,
1138 lfi->hev_thr, 1, bd);
1141 if (mask_4x4_int & 1)
1142 vpx_highbd_lpf_vertical_4(s + 4, pitch, lfi->mblim, lfi->lim,
1143 lfi->hev_thr, 1, bd);
1152 #endif // CONFIG_VP9_HIGHBITDEPTH
1154 void vp10_filter_block_plane_non420(VP10_COMMON *cm,
1155 struct macroblockd_plane *plane,
1157 int mi_row, int mi_col) {
1158 const int ss_x = plane->subsampling_x;
1159 const int ss_y = plane->subsampling_y;
1160 const int row_step = 1 << ss_y;
1161 const int col_step = 1 << ss_x;
1162 const int row_step_stride = cm->mi_stride * row_step;
1163 struct buf_2d *const dst = &plane->dst;
1164 uint8_t* const dst0 = dst->buf;
1165 unsigned int mask_16x16[MI_BLOCK_SIZE] = {0};
1166 unsigned int mask_8x8[MI_BLOCK_SIZE] = {0};
1167 unsigned int mask_4x4[MI_BLOCK_SIZE] = {0};
1168 unsigned int mask_4x4_int[MI_BLOCK_SIZE] = {0};
1169 uint8_t lfl[MI_BLOCK_SIZE * MI_BLOCK_SIZE];
1172 for (r = 0; r < MI_BLOCK_SIZE && mi_row + r < cm->mi_rows; r += row_step) {
1173 unsigned int mask_16x16_c = 0;
1174 unsigned int mask_8x8_c = 0;
1175 unsigned int mask_4x4_c = 0;
1176 unsigned int border_mask;
1178 // Determine the vertical edges that need filtering
1179 for (c = 0; c < MI_BLOCK_SIZE && mi_col + c < cm->mi_cols; c += col_step) {
1180 const MODE_INFO *mi = mi_8x8[c];
1181 const BLOCK_SIZE sb_type = mi[0].mbmi.sb_type;
1182 const int skip_this = mi[0].mbmi.skip && is_inter_block(&mi[0].mbmi);
1183 // left edge of current unit is block/partition edge -> no skip
1184 const int block_edge_left = (num_4x4_blocks_wide_lookup[sb_type] > 1) ?
1185 !(c & (num_8x8_blocks_wide_lookup[sb_type] - 1)) : 1;
1186 const int skip_this_c = skip_this && !block_edge_left;
1187 // top edge of current unit is block/partition edge -> no skip
1188 const int block_edge_above = (num_4x4_blocks_high_lookup[sb_type] > 1) ?
1189 !(r & (num_8x8_blocks_high_lookup[sb_type] - 1)) : 1;
1190 const int skip_this_r = skip_this && !block_edge_above;
1191 const TX_SIZE tx_size = (plane->plane_type == PLANE_TYPE_UV)
1192 ? get_uv_tx_size(&mi[0].mbmi, plane)
1193 : mi[0].mbmi.tx_size;
1194 const int skip_border_4x4_c = ss_x && mi_col + c == cm->mi_cols - 1;
1195 const int skip_border_4x4_r = ss_y && mi_row + r == cm->mi_rows - 1;
1197 // Filter level can vary per MI
1198 if (!(lfl[(r << 3) + (c >> ss_x)] =
1199 get_filter_level(&cm->lf_info, &mi[0].mbmi)))
1202 // Build masks based on the transform size of each block
1203 if (tx_size == TX_32X32) {
1204 if (!skip_this_c && ((c >> ss_x) & 3) == 0) {
1205 if (!skip_border_4x4_c)
1206 mask_16x16_c |= 1 << (c >> ss_x);
1208 mask_8x8_c |= 1 << (c >> ss_x);
1210 if (!skip_this_r && ((r >> ss_y) & 3) == 0) {
1211 if (!skip_border_4x4_r)
1212 mask_16x16[r] |= 1 << (c >> ss_x);
1214 mask_8x8[r] |= 1 << (c >> ss_x);
1216 } else if (tx_size == TX_16X16) {
1217 if (!skip_this_c && ((c >> ss_x) & 1) == 0) {
1218 if (!skip_border_4x4_c)
1219 mask_16x16_c |= 1 << (c >> ss_x);
1221 mask_8x8_c |= 1 << (c >> ss_x);
1223 if (!skip_this_r && ((r >> ss_y) & 1) == 0) {
1224 if (!skip_border_4x4_r)
1225 mask_16x16[r] |= 1 << (c >> ss_x);
1227 mask_8x8[r] |= 1 << (c >> ss_x);
1230 // force 8x8 filtering on 32x32 boundaries
1232 if (tx_size == TX_8X8 || ((c >> ss_x) & 3) == 0)
1233 mask_8x8_c |= 1 << (c >> ss_x);
1235 mask_4x4_c |= 1 << (c >> ss_x);
1239 if (tx_size == TX_8X8 || ((r >> ss_y) & 3) == 0)
1240 mask_8x8[r] |= 1 << (c >> ss_x);
1242 mask_4x4[r] |= 1 << (c >> ss_x);
1245 if (!skip_this && tx_size < TX_8X8 && !skip_border_4x4_c)
1246 mask_4x4_int[r] |= 1 << (c >> ss_x);
1250 // Disable filtering on the leftmost column
1251 border_mask = ~(mi_col == 0);
1252 #if CONFIG_VP9_HIGHBITDEPTH
1253 if (cm->use_highbitdepth) {
1254 highbd_filter_selectively_vert(CONVERT_TO_SHORTPTR(dst->buf),
1256 mask_16x16_c & border_mask,
1257 mask_8x8_c & border_mask,
1258 mask_4x4_c & border_mask,
1260 &cm->lf_info, &lfl[r << 3],
1261 (int)cm->bit_depth);
1263 filter_selectively_vert(dst->buf, dst->stride,
1264 mask_16x16_c & border_mask,
1265 mask_8x8_c & border_mask,
1266 mask_4x4_c & border_mask,
1268 &cm->lf_info, &lfl[r << 3]);
1271 filter_selectively_vert(dst->buf, dst->stride,
1272 mask_16x16_c & border_mask,
1273 mask_8x8_c & border_mask,
1274 mask_4x4_c & border_mask,
1276 &cm->lf_info, &lfl[r << 3]);
1277 #endif // CONFIG_VP9_HIGHBITDEPTH
1278 dst->buf += 8 * dst->stride;
1279 mi_8x8 += row_step_stride;
1282 // Now do horizontal pass
1284 for (r = 0; r < MI_BLOCK_SIZE && mi_row + r < cm->mi_rows; r += row_step) {
1285 const int skip_border_4x4_r = ss_y && mi_row + r == cm->mi_rows - 1;
1286 const unsigned int mask_4x4_int_r = skip_border_4x4_r ? 0 : mask_4x4_int[r];
1288 unsigned int mask_16x16_r;
1289 unsigned int mask_8x8_r;
1290 unsigned int mask_4x4_r;
1292 if (mi_row + r == 0) {
1297 mask_16x16_r = mask_16x16[r];
1298 mask_8x8_r = mask_8x8[r];
1299 mask_4x4_r = mask_4x4[r];
1301 #if CONFIG_VP9_HIGHBITDEPTH
1302 if (cm->use_highbitdepth) {
1303 highbd_filter_selectively_horiz(CONVERT_TO_SHORTPTR(dst->buf),
1309 &cm->lf_info, &lfl[r << 3],
1310 (int)cm->bit_depth);
1312 filter_selectively_horiz(dst->buf, dst->stride,
1317 &cm->lf_info, &lfl[r << 3]);
1320 filter_selectively_horiz(dst->buf, dst->stride,
1325 &cm->lf_info, &lfl[r << 3]);
1326 #endif // CONFIG_VP9_HIGHBITDEPTH
1327 dst->buf += 8 * dst->stride;
1331 void vp10_filter_block_plane_ss00(VP10_COMMON *const cm,
1332 struct macroblockd_plane *const plane,
1334 LOOP_FILTER_MASK *lfm) {
1335 struct buf_2d *const dst = &plane->dst;
1336 uint8_t *const dst0 = dst->buf;
1338 uint64_t mask_16x16 = lfm->left_y[TX_16X16];
1339 uint64_t mask_8x8 = lfm->left_y[TX_8X8];
1340 uint64_t mask_4x4 = lfm->left_y[TX_4X4];
1341 uint64_t mask_4x4_int = lfm->int_4x4_y;
1343 assert(plane->subsampling_x == 0 && plane->subsampling_y == 0);
1345 // Vertical pass: do 2 rows at one time
1346 for (r = 0; r < MI_BLOCK_SIZE && mi_row + r < cm->mi_rows; r += 2) {
1347 unsigned int mask_16x16_l = mask_16x16 & 0xffff;
1348 unsigned int mask_8x8_l = mask_8x8 & 0xffff;
1349 unsigned int mask_4x4_l = mask_4x4 & 0xffff;
1350 unsigned int mask_4x4_int_l = mask_4x4_int & 0xffff;
1352 // Disable filtering on the leftmost column.
1353 #if CONFIG_VP9_HIGHBITDEPTH
1354 if (cm->use_highbitdepth) {
1355 highbd_filter_selectively_vert_row2(
1356 plane->subsampling_x, CONVERT_TO_SHORTPTR(dst->buf), dst->stride,
1357 mask_16x16_l, mask_8x8_l, mask_4x4_l, mask_4x4_int_l, &cm->lf_info,
1358 &lfm->lfl_y[r << 3], (int)cm->bit_depth);
1360 filter_selectively_vert_row2(
1361 plane->subsampling_x, dst->buf, dst->stride, mask_16x16_l, mask_8x8_l,
1362 mask_4x4_l, mask_4x4_int_l, &cm->lf_info, &lfm->lfl_y[r << 3]);
1365 filter_selectively_vert_row2(
1366 plane->subsampling_x, dst->buf, dst->stride, mask_16x16_l, mask_8x8_l,
1367 mask_4x4_l, mask_4x4_int_l, &cm->lf_info, &lfm->lfl_y[r << 3]);
1368 #endif // CONFIG_VP9_HIGHBITDEPTH
1369 dst->buf += 16 * dst->stride;
1373 mask_4x4_int >>= 16;
1378 mask_16x16 = lfm->above_y[TX_16X16];
1379 mask_8x8 = lfm->above_y[TX_8X8];
1380 mask_4x4 = lfm->above_y[TX_4X4];
1381 mask_4x4_int = lfm->int_4x4_y;
1383 for (r = 0; r < MI_BLOCK_SIZE && mi_row + r < cm->mi_rows; r++) {
1384 unsigned int mask_16x16_r;
1385 unsigned int mask_8x8_r;
1386 unsigned int mask_4x4_r;
1388 if (mi_row + r == 0) {
1393 mask_16x16_r = mask_16x16 & 0xff;
1394 mask_8x8_r = mask_8x8 & 0xff;
1395 mask_4x4_r = mask_4x4 & 0xff;
1398 #if CONFIG_VP9_HIGHBITDEPTH
1399 if (cm->use_highbitdepth) {
1400 highbd_filter_selectively_horiz(
1401 CONVERT_TO_SHORTPTR(dst->buf), dst->stride, mask_16x16_r, mask_8x8_r,
1402 mask_4x4_r, mask_4x4_int & 0xff, &cm->lf_info, &lfm->lfl_y[r << 3],
1403 (int)cm->bit_depth);
1405 filter_selectively_horiz(dst->buf, dst->stride, mask_16x16_r, mask_8x8_r,
1406 mask_4x4_r, mask_4x4_int & 0xff, &cm->lf_info,
1407 &lfm->lfl_y[r << 3]);
1410 filter_selectively_horiz(dst->buf, dst->stride, mask_16x16_r, mask_8x8_r,
1411 mask_4x4_r, mask_4x4_int & 0xff, &cm->lf_info,
1412 &lfm->lfl_y[r << 3]);
1413 #endif // CONFIG_VP9_HIGHBITDEPTH
1415 dst->buf += 8 * dst->stride;
1423 void vp10_filter_block_plane_ss11(VP10_COMMON *const cm,
1424 struct macroblockd_plane *const plane,
1426 LOOP_FILTER_MASK *lfm) {
1427 struct buf_2d *const dst = &plane->dst;
1428 uint8_t *const dst0 = dst->buf;
1431 uint16_t mask_16x16 = lfm->left_uv[TX_16X16];
1432 uint16_t mask_8x8 = lfm->left_uv[TX_8X8];
1433 uint16_t mask_4x4 = lfm->left_uv[TX_4X4];
1434 uint16_t mask_4x4_int = lfm->int_4x4_uv;
1436 assert(plane->subsampling_x == 1 && plane->subsampling_y == 1);
1438 // Vertical pass: do 2 rows at one time
1439 for (r = 0; r < MI_BLOCK_SIZE && mi_row + r < cm->mi_rows; r += 4) {
1440 if (plane->plane_type == 1) {
1441 for (c = 0; c < (MI_BLOCK_SIZE >> 1); c++) {
1442 lfm->lfl_uv[(r << 1) + c] = lfm->lfl_y[(r << 3) + (c << 1)];
1443 lfm->lfl_uv[((r + 2) << 1) + c] = lfm->lfl_y[((r + 2) << 3) + (c << 1)];
1448 unsigned int mask_16x16_l = mask_16x16 & 0xff;
1449 unsigned int mask_8x8_l = mask_8x8 & 0xff;
1450 unsigned int mask_4x4_l = mask_4x4 & 0xff;
1451 unsigned int mask_4x4_int_l = mask_4x4_int & 0xff;
1453 // Disable filtering on the leftmost column.
1454 #if CONFIG_VP9_HIGHBITDEPTH
1455 if (cm->use_highbitdepth) {
1456 highbd_filter_selectively_vert_row2(
1457 plane->subsampling_x, CONVERT_TO_SHORTPTR(dst->buf), dst->stride,
1458 mask_16x16_l, mask_8x8_l, mask_4x4_l, mask_4x4_int_l, &cm->lf_info,
1459 &lfm->lfl_uv[r << 1], (int)cm->bit_depth);
1461 filter_selectively_vert_row2(
1462 plane->subsampling_x, dst->buf, dst->stride,
1463 mask_16x16_l, mask_8x8_l, mask_4x4_l, mask_4x4_int_l, &cm->lf_info,
1464 &lfm->lfl_uv[r << 1]);
1467 filter_selectively_vert_row2(
1468 plane->subsampling_x, dst->buf, dst->stride,
1469 mask_16x16_l, mask_8x8_l, mask_4x4_l, mask_4x4_int_l, &cm->lf_info,
1470 &lfm->lfl_uv[r << 1]);
1471 #endif // CONFIG_VP9_HIGHBITDEPTH
1473 dst->buf += 16 * dst->stride;
1483 mask_16x16 = lfm->above_uv[TX_16X16];
1484 mask_8x8 = lfm->above_uv[TX_8X8];
1485 mask_4x4 = lfm->above_uv[TX_4X4];
1486 mask_4x4_int = lfm->int_4x4_uv;
1488 for (r = 0; r < MI_BLOCK_SIZE && mi_row + r < cm->mi_rows; r += 2) {
1489 const int skip_border_4x4_r = mi_row + r == cm->mi_rows - 1;
1490 const unsigned int mask_4x4_int_r =
1491 skip_border_4x4_r ? 0 : (mask_4x4_int & 0xf);
1492 unsigned int mask_16x16_r;
1493 unsigned int mask_8x8_r;
1494 unsigned int mask_4x4_r;
1496 if (mi_row + r == 0) {
1501 mask_16x16_r = mask_16x16 & 0xf;
1502 mask_8x8_r = mask_8x8 & 0xf;
1503 mask_4x4_r = mask_4x4 & 0xf;
1506 #if CONFIG_VP9_HIGHBITDEPTH
1507 if (cm->use_highbitdepth) {
1508 highbd_filter_selectively_horiz(CONVERT_TO_SHORTPTR(dst->buf),
1509 dst->stride, mask_16x16_r, mask_8x8_r,
1510 mask_4x4_r, mask_4x4_int_r, &cm->lf_info,
1511 &lfm->lfl_uv[r << 1], (int)cm->bit_depth);
1513 filter_selectively_horiz(dst->buf, dst->stride, mask_16x16_r, mask_8x8_r,
1514 mask_4x4_r, mask_4x4_int_r, &cm->lf_info,
1515 &lfm->lfl_uv[r << 1]);
1518 filter_selectively_horiz(dst->buf, dst->stride, mask_16x16_r, mask_8x8_r,
1519 mask_4x4_r, mask_4x4_int_r, &cm->lf_info,
1520 &lfm->lfl_uv[r << 1]);
1521 #endif // CONFIG_VP9_HIGHBITDEPTH
1523 dst->buf += 8 * dst->stride;
1531 void vp10_loop_filter_rows(YV12_BUFFER_CONFIG *frame_buffer,
1533 struct macroblockd_plane planes[MAX_MB_PLANE],
1534 int start, int stop, int y_only) {
1535 const int num_planes = y_only ? 1 : MAX_MB_PLANE;
1537 LOOP_FILTER_MASK lfm;
1542 else if (planes[1].subsampling_y == 1 && planes[1].subsampling_x == 1)
1544 else if (planes[1].subsampling_y == 0 && planes[1].subsampling_x == 0)
1547 path = LF_PATH_SLOW;
1549 for (mi_row = start; mi_row < stop; mi_row += MI_BLOCK_SIZE) {
1550 MODE_INFO **mi = cm->mi_grid_visible + mi_row * cm->mi_stride;
1552 for (mi_col = 0; mi_col < cm->mi_cols; mi_col += MI_BLOCK_SIZE) {
1555 vp10_setup_dst_planes(planes, frame_buffer, mi_row, mi_col);
1557 // TODO(JBB): Make setup_mask work for non 420.
1558 vp10_setup_mask(cm, mi_row, mi_col, mi + mi_col, cm->mi_stride,
1561 vp10_filter_block_plane_ss00(cm, &planes[0], mi_row, &lfm);
1562 for (plane = 1; plane < num_planes; ++plane) {
1565 vp10_filter_block_plane_ss11(cm, &planes[plane], mi_row, &lfm);
1568 vp10_filter_block_plane_ss00(cm, &planes[plane], mi_row, &lfm);
1571 vp10_filter_block_plane_non420(cm, &planes[plane], mi + mi_col,
1580 void vp10_loop_filter_frame(YV12_BUFFER_CONFIG *frame,
1581 VP10_COMMON *cm, MACROBLOCKD *xd,
1582 int frame_filter_level,
1583 int y_only, int partial_frame) {
1584 int start_mi_row, end_mi_row, mi_rows_to_filter;
1585 if (!frame_filter_level) return;
1587 mi_rows_to_filter = cm->mi_rows;
1588 if (partial_frame && cm->mi_rows > 8) {
1589 start_mi_row = cm->mi_rows >> 1;
1590 start_mi_row &= 0xfffffff8;
1591 mi_rows_to_filter = VPXMAX(cm->mi_rows / 8, 8);
1593 end_mi_row = start_mi_row + mi_rows_to_filter;
1594 vp10_loop_filter_frame_init(cm, frame_filter_level);
1595 vp10_loop_filter_rows(frame, cm, xd->plane,
1596 start_mi_row, end_mi_row,
1600 void vp10_loop_filter_data_reset(
1601 LFWorkerData *lf_data, YV12_BUFFER_CONFIG *frame_buffer,
1602 struct VP10Common *cm,
1603 const struct macroblockd_plane planes[MAX_MB_PLANE]) {
1604 lf_data->frame_buffer = frame_buffer;
1608 lf_data->y_only = 0;
1609 memcpy(lf_data->planes, planes, sizeof(lf_data->planes));
1612 int vp10_loop_filter_worker(LFWorkerData *const lf_data, void *unused) {
1614 vp10_loop_filter_rows(lf_data->frame_buffer, lf_data->cm, lf_data->planes,
1615 lf_data->start, lf_data->stop, lf_data->y_only);