2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
14 #include "vp9/common/vp9_alloccommon.h"
15 #include "vp9/common/vp9_onyxc_int.h"
16 #include "vp9/common/vp9_quant_common.h"
17 #include "vp9/common/vp9_reconinter.h"
18 #include "vp9/common/vp9_systemdependent.h"
19 #include "vp9/encoder/vp9_extend.h"
20 #include "vp9/encoder/vp9_firstpass.h"
21 #include "vp9/encoder/vp9_mcomp.h"
22 #include "vp9/encoder/vp9_encoder.h"
23 #include "vp9/encoder/vp9_quantize.h"
24 #include "vp9/encoder/vp9_ratectrl.h"
25 #include "vp9/encoder/vp9_segmentation.h"
26 #include "vp9/encoder/vp9_temporal_filter.h"
27 #include "vpx_mem/vpx_mem.h"
28 #include "vpx_ports/mem.h"
29 #include "vpx_ports/vpx_timer.h"
30 #include "vpx_scale/vpx_scale.h"
32 static int fixed_divide[512];
34 static void temporal_filter_predictors_mb_c(MACROBLOCKD *xd,
44 struct scale_factors *scale,
46 const int which_mv = 0;
47 const MV mv = { mv_row, mv_col };
48 const InterpKernel *const kernel =
49 vp9_get_interp_kernel(xd->mi[0]->mbmi.interp_filter);
51 enum mv_precision mv_precision_uv;
53 if (uv_block_width == 8) {
54 uv_stride = (stride + 1) >> 1;
55 mv_precision_uv = MV_PRECISION_Q4;
58 mv_precision_uv = MV_PRECISION_Q3;
61 #if CONFIG_VP9_HIGHBITDEPTH
62 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
63 vp9_highbd_build_inter_predictor(y_mb_ptr, stride,
69 kernel, MV_PRECISION_Q3, x, y, xd->bd);
71 vp9_highbd_build_inter_predictor(u_mb_ptr, uv_stride,
72 &pred[256], uv_block_width,
75 uv_block_width, uv_block_height,
77 kernel, mv_precision_uv, x, y, xd->bd);
79 vp9_highbd_build_inter_predictor(v_mb_ptr, uv_stride,
80 &pred[512], uv_block_width,
83 uv_block_width, uv_block_height,
85 kernel, mv_precision_uv, x, y, xd->bd);
88 #endif // CONFIG_VP9_HIGHBITDEPTH
89 vp9_build_inter_predictor(y_mb_ptr, stride,
95 kernel, MV_PRECISION_Q3, x, y);
97 vp9_build_inter_predictor(u_mb_ptr, uv_stride,
98 &pred[256], uv_block_width,
101 uv_block_width, uv_block_height,
103 kernel, mv_precision_uv, x, y);
105 vp9_build_inter_predictor(v_mb_ptr, uv_stride,
106 &pred[512], uv_block_width,
109 uv_block_width, uv_block_height,
111 kernel, mv_precision_uv, x, y);
114 void vp9_temporal_filter_init(void) {
118 for (i = 1; i < 512; ++i)
119 fixed_divide[i] = 0x80000 / i;
122 void vp9_temporal_filter_apply_c(uint8_t *frame1,
125 unsigned int block_width,
126 unsigned int block_height,
129 unsigned int *accumulator,
131 unsigned int i, j, k;
134 const int rounding = strength > 0 ? 1 << (strength - 1) : 0;
136 for (i = 0, k = 0; i < block_height; i++) {
137 for (j = 0; j < block_width; j++, k++) {
138 int src_byte = frame1[byte];
139 int pixel_value = *frame2++;
141 modifier = src_byte - pixel_value;
142 // This is an integer approximation of:
143 // float coeff = (3.0 * modifer * modifier) / pow(2, strength);
144 // modifier = (int)roundf(coeff > 16 ? 0 : 16-coeff);
145 modifier *= modifier;
147 modifier += rounding;
148 modifier >>= strength;
153 modifier = 16 - modifier;
154 modifier *= filter_weight;
156 count[k] += modifier;
157 accumulator[k] += modifier * pixel_value;
162 byte += stride - block_width;
166 #if CONFIG_VP9_HIGHBITDEPTH
167 void vp9_highbd_temporal_filter_apply_c(uint8_t *frame1_8,
170 unsigned int block_width,
171 unsigned int block_height,
174 unsigned int *accumulator,
176 uint16_t *frame1 = CONVERT_TO_SHORTPTR(frame1_8);
177 uint16_t *frame2 = CONVERT_TO_SHORTPTR(frame2_8);
178 unsigned int i, j, k;
181 const int rounding = strength > 0 ? 1 << (strength - 1) : 0;
183 for (i = 0, k = 0; i < block_height; i++) {
184 for (j = 0; j < block_width; j++, k++) {
185 int src_byte = frame1[byte];
186 int pixel_value = *frame2++;
188 modifier = src_byte - pixel_value;
189 // This is an integer approximation of:
190 // float coeff = (3.0 * modifer * modifier) / pow(2, strength);
191 // modifier = (int)roundf(coeff > 16 ? 0 : 16-coeff);
192 modifier *= modifier;
194 modifier += rounding;
195 modifier >>= strength;
200 modifier = 16 - modifier;
201 modifier *= filter_weight;
203 count[k] += modifier;
204 accumulator[k] += modifier * pixel_value;
209 byte += stride - block_width;
212 #endif // CONFIG_VP9_HIGHBITDEPTH
214 static int temporal_filter_find_matching_mb_c(VP9_COMP *cpi,
215 uint8_t *arf_frame_buf,
216 uint8_t *frame_ptr_buf,
218 MACROBLOCK *const x = &cpi->td.mb;
219 MACROBLOCKD *const xd = &x->e_mbd;
220 const MV_SPEED_FEATURES *const mv_sf = &cpi->sf.mv;
222 int sadpb = x->sadperbit16;
223 int bestsme = INT_MAX;
228 MV best_ref_mv1 = {0, 0};
229 MV best_ref_mv1_full; /* full-pixel value of best_ref_mv1 */
230 MV *ref_mv = &x->e_mbd.mi[0]->bmi[0].as_mv[0].as_mv;
233 struct buf_2d src = x->plane[0].src;
234 struct buf_2d pre = xd->plane[0].pre[0];
236 best_ref_mv1_full.col = best_ref_mv1.col >> 3;
237 best_ref_mv1_full.row = best_ref_mv1.row >> 3;
239 // Setup frame pointers
240 x->plane[0].src.buf = arf_frame_buf;
241 x->plane[0].src.stride = stride;
242 xd->plane[0].pre[0].buf = frame_ptr_buf;
243 xd->plane[0].pre[0].stride = stride;
245 step_param = mv_sf->reduce_first_step_size;
246 step_param = MIN(step_param, MAX_MVSEARCH_STEPS - 2);
248 // Ignore mv costing by sending NULL pointer instead of cost arrays
249 vp9_hex_search(x, &best_ref_mv1_full, step_param, sadpb, 1,
250 cond_cost_list(cpi, cost_list),
251 &cpi->fn_ptr[BLOCK_16X16], 0, &best_ref_mv1, ref_mv);
253 // Ignore mv costing by sending NULL pointer instead of cost array
254 bestsme = cpi->find_fractional_mv_step(x, ref_mv,
256 cpi->common.allow_high_precision_mv,
258 &cpi->fn_ptr[BLOCK_16X16],
259 0, mv_sf->subpel_iters_per_step,
260 cond_cost_list(cpi, cost_list),
262 &distortion, &sse, NULL, 0, 0);
264 // Restore input state
265 x->plane[0].src = src;
266 xd->plane[0].pre[0] = pre;
271 static void temporal_filter_iterate_c(VP9_COMP *cpi,
272 YV12_BUFFER_CONFIG **frames,
276 struct scale_factors *scale) {
280 unsigned int filter_weight;
281 int mb_cols = (frames[alt_ref_index]->y_crop_width + 15) >> 4;
282 int mb_rows = (frames[alt_ref_index]->y_crop_height + 15) >> 4;
284 int mb_uv_offset = 0;
285 DECLARE_ALIGNED(16, unsigned int, accumulator[16 * 16 * 3]);
286 DECLARE_ALIGNED(16, uint16_t, count[16 * 16 * 3]);
287 MACROBLOCKD *mbd = &cpi->td.mb.e_mbd;
288 YV12_BUFFER_CONFIG *f = frames[alt_ref_index];
289 uint8_t *dst1, *dst2;
290 #if CONFIG_VP9_HIGHBITDEPTH
291 DECLARE_ALIGNED(16, uint16_t, predictor16[16 * 16 * 3]);
292 DECLARE_ALIGNED(16, uint8_t, predictor8[16 * 16 * 3]);
295 DECLARE_ALIGNED(16, uint8_t, predictor[16 * 16 * 3]);
297 const int mb_uv_height = 16 >> mbd->plane[1].subsampling_y;
298 const int mb_uv_width = 16 >> mbd->plane[1].subsampling_x;
301 uint8_t* input_buffer[MAX_MB_PLANE];
303 #if CONFIG_VP9_HIGHBITDEPTH
304 if (mbd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
305 predictor = CONVERT_TO_BYTEPTR(predictor16);
307 predictor = predictor8;
311 for (i = 0; i < MAX_MB_PLANE; i++)
312 input_buffer[i] = mbd->plane[i].pre[0].buf;
314 for (mb_row = 0; mb_row < mb_rows; mb_row++) {
315 // Source frames are extended to 16 pixels. This is different than
316 // L/A/G reference frames that have a border of 32 (VP9ENCBORDERINPIXELS)
317 // A 6/8 tap filter is used for motion search. This requires 2 pixels
318 // before and 3 pixels after. So the largest Y mv on a border would
319 // then be 16 - VP9_INTERP_EXTEND. The UV blocks are half the size of the
320 // Y and therefore only extended by 8. The largest mv that a UV block
321 // can support is 8 - VP9_INTERP_EXTEND. A UV mv is half of a Y mv.
322 // (16 - VP9_INTERP_EXTEND) >> 1 which is greater than
323 // 8 - VP9_INTERP_EXTEND.
324 // To keep the mv in play for both Y and UV planes the max that it
325 // can be on a border is therefore 16 - (2*VP9_INTERP_EXTEND+1).
326 cpi->td.mb.mv_row_min = -((mb_row * 16) + (17 - 2 * VP9_INTERP_EXTEND));
327 cpi->td.mb.mv_row_max = ((mb_rows - 1 - mb_row) * 16)
328 + (17 - 2 * VP9_INTERP_EXTEND);
330 for (mb_col = 0; mb_col < mb_cols; mb_col++) {
334 memset(accumulator, 0, 16 * 16 * 3 * sizeof(accumulator[0]));
335 memset(count, 0, 16 * 16 * 3 * sizeof(count[0]));
337 cpi->td.mb.mv_col_min = -((mb_col * 16) + (17 - 2 * VP9_INTERP_EXTEND));
338 cpi->td.mb.mv_col_max = ((mb_cols - 1 - mb_col) * 16)
339 + (17 - 2 * VP9_INTERP_EXTEND);
341 for (frame = 0; frame < frame_count; frame++) {
342 const int thresh_low = 10000;
343 const int thresh_high = 20000;
345 if (frames[frame] == NULL)
348 mbd->mi[0]->bmi[0].as_mv[0].as_mv.row = 0;
349 mbd->mi[0]->bmi[0].as_mv[0].as_mv.col = 0;
351 if (frame == alt_ref_index) {
354 // Find best match in this frame by MC
355 int err = temporal_filter_find_matching_mb_c(cpi,
356 frames[alt_ref_index]->y_buffer + mb_y_offset,
357 frames[frame]->y_buffer + mb_y_offset,
358 frames[frame]->y_stride);
360 // Assign higher weight to matching MB if it's error
361 // score is lower. If not applying MC default behavior
362 // is to weight all MBs equal.
363 filter_weight = err < thresh_low
364 ? 2 : err < thresh_high ? 1 : 0;
367 if (filter_weight != 0) {
368 // Construct the predictors
369 temporal_filter_predictors_mb_c(mbd,
370 frames[frame]->y_buffer + mb_y_offset,
371 frames[frame]->u_buffer + mb_uv_offset,
372 frames[frame]->v_buffer + mb_uv_offset,
373 frames[frame]->y_stride,
374 mb_uv_width, mb_uv_height,
375 mbd->mi[0]->bmi[0].as_mv[0].as_mv.row,
376 mbd->mi[0]->bmi[0].as_mv[0].as_mv.col,
378 mb_col * 16, mb_row * 16);
380 #if CONFIG_VP9_HIGHBITDEPTH
381 if (mbd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
382 int adj_strength = strength + 2 * (mbd->bd - 8);
383 // Apply the filter (YUV)
384 vp9_highbd_temporal_filter_apply(f->y_buffer + mb_y_offset,
386 predictor, 16, 16, adj_strength,
389 vp9_highbd_temporal_filter_apply(f->u_buffer + mb_uv_offset,
390 f->uv_stride, predictor + 256,
391 mb_uv_width, mb_uv_height,
393 filter_weight, accumulator + 256,
395 vp9_highbd_temporal_filter_apply(f->v_buffer + mb_uv_offset,
396 f->uv_stride, predictor + 512,
397 mb_uv_width, mb_uv_height,
398 adj_strength, filter_weight,
399 accumulator + 512, count + 512);
401 // Apply the filter (YUV)
402 vp9_temporal_filter_apply(f->y_buffer + mb_y_offset, f->y_stride,
404 strength, filter_weight,
406 vp9_temporal_filter_apply(f->u_buffer + mb_uv_offset, f->uv_stride,
408 mb_uv_width, mb_uv_height, strength,
409 filter_weight, accumulator + 256,
411 vp9_temporal_filter_apply(f->v_buffer + mb_uv_offset, f->uv_stride,
413 mb_uv_width, mb_uv_height, strength,
414 filter_weight, accumulator + 512,
418 // Apply the filter (YUV)
419 vp9_temporal_filter_apply(f->y_buffer + mb_y_offset, f->y_stride,
421 strength, filter_weight,
423 vp9_temporal_filter_apply(f->u_buffer + mb_uv_offset, f->uv_stride,
425 mb_uv_width, mb_uv_height, strength,
426 filter_weight, accumulator + 256,
428 vp9_temporal_filter_apply(f->v_buffer + mb_uv_offset, f->uv_stride,
430 mb_uv_width, mb_uv_height, strength,
431 filter_weight, accumulator + 512,
433 #endif // CONFIG_VP9_HIGHBITDEPTH
437 #if CONFIG_VP9_HIGHBITDEPTH
438 if (mbd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
441 // Normalize filter output to produce AltRef frame
442 dst1 = cpi->alt_ref_buffer.y_buffer;
443 dst1_16 = CONVERT_TO_SHORTPTR(dst1);
444 stride = cpi->alt_ref_buffer.y_stride;
446 for (i = 0, k = 0; i < 16; i++) {
447 for (j = 0; j < 16; j++, k++) {
448 unsigned int pval = accumulator[k] + (count[k] >> 1);
449 pval *= fixed_divide[count[k]];
452 dst1_16[byte] = (uint16_t)pval;
454 // move to next pixel
461 dst1 = cpi->alt_ref_buffer.u_buffer;
462 dst2 = cpi->alt_ref_buffer.v_buffer;
463 dst1_16 = CONVERT_TO_SHORTPTR(dst1);
464 dst2_16 = CONVERT_TO_SHORTPTR(dst2);
465 stride = cpi->alt_ref_buffer.uv_stride;
467 for (i = 0, k = 256; i < mb_uv_height; i++) {
468 for (j = 0; j < mb_uv_width; j++, k++) {
472 unsigned int pval = accumulator[k] + (count[k] >> 1);
473 pval *= fixed_divide[count[k]];
475 dst1_16[byte] = (uint16_t)pval;
478 pval = accumulator[m] + (count[m] >> 1);
479 pval *= fixed_divide[count[m]];
481 dst2_16[byte] = (uint16_t)pval;
483 // move to next pixel
487 byte += stride - mb_uv_width;
490 // Normalize filter output to produce AltRef frame
491 dst1 = cpi->alt_ref_buffer.y_buffer;
492 stride = cpi->alt_ref_buffer.y_stride;
494 for (i = 0, k = 0; i < 16; i++) {
495 for (j = 0; j < 16; j++, k++) {
496 unsigned int pval = accumulator[k] + (count[k] >> 1);
497 pval *= fixed_divide[count[k]];
500 dst1[byte] = (uint8_t)pval;
502 // move to next pixel
508 dst1 = cpi->alt_ref_buffer.u_buffer;
509 dst2 = cpi->alt_ref_buffer.v_buffer;
510 stride = cpi->alt_ref_buffer.uv_stride;
512 for (i = 0, k = 256; i < mb_uv_height; i++) {
513 for (j = 0; j < mb_uv_width; j++, k++) {
517 unsigned int pval = accumulator[k] + (count[k] >> 1);
518 pval *= fixed_divide[count[k]];
520 dst1[byte] = (uint8_t)pval;
523 pval = accumulator[m] + (count[m] >> 1);
524 pval *= fixed_divide[count[m]];
526 dst2[byte] = (uint8_t)pval;
528 // move to next pixel
531 byte += stride - mb_uv_width;
535 // Normalize filter output to produce AltRef frame
536 dst1 = cpi->alt_ref_buffer.y_buffer;
537 stride = cpi->alt_ref_buffer.y_stride;
539 for (i = 0, k = 0; i < 16; i++) {
540 for (j = 0; j < 16; j++, k++) {
541 unsigned int pval = accumulator[k] + (count[k] >> 1);
542 pval *= fixed_divide[count[k]];
545 dst1[byte] = (uint8_t)pval;
547 // move to next pixel
553 dst1 = cpi->alt_ref_buffer.u_buffer;
554 dst2 = cpi->alt_ref_buffer.v_buffer;
555 stride = cpi->alt_ref_buffer.uv_stride;
557 for (i = 0, k = 256; i < mb_uv_height; i++) {
558 for (j = 0; j < mb_uv_width; j++, k++) {
562 unsigned int pval = accumulator[k] + (count[k] >> 1);
563 pval *= fixed_divide[count[k]];
565 dst1[byte] = (uint8_t)pval;
568 pval = accumulator[m] + (count[m] >> 1);
569 pval *= fixed_divide[count[m]];
571 dst2[byte] = (uint8_t)pval;
573 // move to next pixel
576 byte += stride - mb_uv_width;
578 #endif // CONFIG_VP9_HIGHBITDEPTH
580 mb_uv_offset += mb_uv_width;
582 mb_y_offset += 16 * (f->y_stride - mb_cols);
583 mb_uv_offset += mb_uv_height * f->uv_stride - mb_uv_width * mb_cols;
586 // Restore input state
587 for (i = 0; i < MAX_MB_PLANE; i++)
588 mbd->plane[i].pre[0].buf = input_buffer[i];
591 // Apply buffer limits and context specific adjustments to arnr filter.
592 static void adjust_arnr_filter(VP9_COMP *cpi,
593 int distance, int group_boost,
594 int *arnr_frames, int *arnr_strength) {
595 const VP9EncoderConfig *const oxcf = &cpi->oxcf;
596 const int frames_after_arf =
597 vp9_lookahead_depth(cpi->lookahead) - distance - 1;
598 int frames_fwd = (cpi->oxcf.arnr_max_frames - 1) >> 1;
600 int q, frames, strength;
602 // Define the forward and backwards filter limits for this arnr group.
603 if (frames_fwd > frames_after_arf)
604 frames_fwd = frames_after_arf;
605 if (frames_fwd > distance)
606 frames_fwd = distance;
608 frames_bwd = frames_fwd;
610 // For even length filter there is one more frame backward
611 // than forward: e.g. len=6 ==> bbbAff, len=7 ==> bbbAfff.
612 if (frames_bwd < distance)
613 frames_bwd += (oxcf->arnr_max_frames + 1) & 0x1;
615 // Set the baseline active filter size.
616 frames = frames_bwd + 1 + frames_fwd;
618 // Adjust the strength based on active max q.
619 if (cpi->common.current_video_frame > 1)
620 q = ((int)vp9_convert_qindex_to_q(
621 cpi->rc.avg_frame_qindex[INTER_FRAME], cpi->common.bit_depth));
623 q = ((int)vp9_convert_qindex_to_q(
624 cpi->rc.avg_frame_qindex[KEY_FRAME], cpi->common.bit_depth));
626 strength = oxcf->arnr_strength;
628 strength = oxcf->arnr_strength - ((16 - q) / 2);
633 // Adjust number of frames in filter and strength based on gf boost level.
634 if (frames > group_boost / 150) {
635 frames = group_boost / 150;
636 frames += !(frames & 1);
639 if (strength > group_boost / 300) {
640 strength = group_boost / 300;
643 // Adjustments for second level arf in multi arf case.
644 if (cpi->oxcf.pass == 2 && cpi->multi_arf_allowed) {
645 const GF_GROUP *const gf_group = &cpi->twopass.gf_group;
646 if (gf_group->rf_level[gf_group->index] != GF_ARF_STD) {
651 *arnr_frames = frames;
652 *arnr_strength = strength;
655 void vp9_temporal_filter(VP9_COMP *cpi, int distance) {
656 VP9_COMMON *const cm = &cpi->common;
657 RATE_CONTROL *const rc = &cpi->rc;
658 MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
663 int frames_to_blur_backward;
664 int frames_to_blur_forward;
665 struct scale_factors sf;
666 YV12_BUFFER_CONFIG *frames[MAX_LAG_BUFFERS] = {NULL};
668 // Apply context specific adjustments to the arnr filter parameters.
669 adjust_arnr_filter(cpi, distance, rc->gfu_boost, &frames_to_blur, &strength);
670 frames_to_blur_backward = (frames_to_blur / 2);
671 frames_to_blur_forward = ((frames_to_blur - 1) / 2);
672 start_frame = distance + frames_to_blur_forward;
674 // Setup frame pointers, NULL indicates frame not included in filter.
675 for (frame = 0; frame < frames_to_blur; ++frame) {
676 const int which_buffer = start_frame - frame;
677 struct lookahead_entry *buf = vp9_lookahead_peek(cpi->lookahead,
679 frames[frames_to_blur - 1 - frame] = &buf->img;
682 if (frames_to_blur > 0) {
683 // Setup scaling factors. Scaling on each of the arnr frames is not
685 if (is_two_pass_svc(cpi)) {
686 // In spatial svc the scaling factors might be less then 1/2.
687 // So we will use non-normative scaling.
689 #if CONFIG_VP9_HIGHBITDEPTH
690 vp9_setup_scale_factors_for_frame(
692 get_frame_new_buffer(cm)->y_crop_width,
693 get_frame_new_buffer(cm)->y_crop_height,
694 get_frame_new_buffer(cm)->y_crop_width,
695 get_frame_new_buffer(cm)->y_crop_height,
696 cm->use_highbitdepth);
698 vp9_setup_scale_factors_for_frame(
700 get_frame_new_buffer(cm)->y_crop_width,
701 get_frame_new_buffer(cm)->y_crop_height,
702 get_frame_new_buffer(cm)->y_crop_width,
703 get_frame_new_buffer(cm)->y_crop_height);
704 #endif // CONFIG_VP9_HIGHBITDEPTH
706 for (frame = 0; frame < frames_to_blur; ++frame) {
707 if (cm->mi_cols * MI_SIZE != frames[frame]->y_width ||
708 cm->mi_rows * MI_SIZE != frames[frame]->y_height) {
709 if (vp9_realloc_frame_buffer(&cpi->svc.scaled_frames[frame_used],
710 cm->width, cm->height,
711 cm->subsampling_x, cm->subsampling_y,
712 #if CONFIG_VP9_HIGHBITDEPTH
713 cm->use_highbitdepth,
715 VP9_ENC_BORDER_IN_PIXELS,
718 vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
719 "Failed to reallocate alt_ref_buffer");
721 frames[frame] = vp9_scale_if_required(
722 cm, frames[frame], &cpi->svc.scaled_frames[frame_used]);
726 cm->mi = cm->mip + cm->mi_stride + 1;
727 xd->mi = cm->mi_grid_visible;
730 // ARF is produced at the native frame size and resized when coded.
731 #if CONFIG_VP9_HIGHBITDEPTH
732 vp9_setup_scale_factors_for_frame(&sf,
733 frames[0]->y_crop_width,
734 frames[0]->y_crop_height,
735 frames[0]->y_crop_width,
736 frames[0]->y_crop_height,
737 cm->use_highbitdepth);
739 vp9_setup_scale_factors_for_frame(&sf,
740 frames[0]->y_crop_width,
741 frames[0]->y_crop_height,
742 frames[0]->y_crop_width,
743 frames[0]->y_crop_height);
744 #endif // CONFIG_VP9_HIGHBITDEPTH
748 temporal_filter_iterate_c(cpi, frames, frames_to_blur,
749 frames_to_blur_backward, strength, &sf);