2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
14 #include "vp9/common/vp9_alloccommon.h"
15 #include "vp9/common/vp9_onyxc_int.h"
16 #include "vp9/common/vp9_quant_common.h"
17 #include "vp9/common/vp9_reconinter.h"
18 #include "vp9/common/vp9_systemdependent.h"
19 #include "vp9/encoder/vp9_extend.h"
20 #include "vp9/encoder/vp9_firstpass.h"
21 #include "vp9/encoder/vp9_mcomp.h"
22 #include "vp9/encoder/vp9_encoder.h"
23 #include "vp9/encoder/vp9_quantize.h"
24 #include "vp9/encoder/vp9_ratectrl.h"
25 #include "vp9/encoder/vp9_segmentation.h"
26 #include "vpx_mem/vpx_mem.h"
27 #include "vpx_ports/vpx_timer.h"
28 #include "vpx_scale/vpx_scale.h"
30 static int fixed_divide[512];
32 static void temporal_filter_predictors_mb_c(MACROBLOCKD *xd,
42 struct scale_factors *scale,
44 const int which_mv = 0;
45 const MV mv = { mv_row, mv_col };
46 const InterpKernel *const kernel =
47 vp9_get_interp_kernel(xd->mi[0].src_mi->mbmi.interp_filter);
49 enum mv_precision mv_precision_uv;
51 if (uv_block_width == 8) {
52 uv_stride = (stride + 1) >> 1;
53 mv_precision_uv = MV_PRECISION_Q4;
56 mv_precision_uv = MV_PRECISION_Q3;
59 #if CONFIG_VP9_HIGHBITDEPTH
60 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
61 vp9_highbd_build_inter_predictor(y_mb_ptr, stride,
67 kernel, MV_PRECISION_Q3, x, y, xd->bd);
69 vp9_highbd_build_inter_predictor(u_mb_ptr, uv_stride,
70 &pred[256], uv_block_width,
73 uv_block_width, uv_block_height,
75 kernel, mv_precision_uv, x, y, xd->bd);
77 vp9_highbd_build_inter_predictor(v_mb_ptr, uv_stride,
78 &pred[512], uv_block_width,
81 uv_block_width, uv_block_height,
83 kernel, mv_precision_uv, x, y, xd->bd);
86 #endif // CONFIG_VP9_HIGHBITDEPTH
87 vp9_build_inter_predictor(y_mb_ptr, stride,
93 kernel, MV_PRECISION_Q3, x, y);
95 vp9_build_inter_predictor(u_mb_ptr, uv_stride,
96 &pred[256], uv_block_width,
99 uv_block_width, uv_block_height,
101 kernel, mv_precision_uv, x, y);
103 vp9_build_inter_predictor(v_mb_ptr, uv_stride,
104 &pred[512], uv_block_width,
107 uv_block_width, uv_block_height,
109 kernel, mv_precision_uv, x, y);
112 void vp9_temporal_filter_init() {
116 for (i = 1; i < 512; ++i)
117 fixed_divide[i] = 0x80000 / i;
120 void vp9_temporal_filter_apply_c(uint8_t *frame1,
123 unsigned int block_width,
124 unsigned int block_height,
127 unsigned int *accumulator,
129 unsigned int i, j, k;
132 const int rounding = strength > 0 ? 1 << (strength - 1) : 0;
134 for (i = 0, k = 0; i < block_height; i++) {
135 for (j = 0; j < block_width; j++, k++) {
136 int src_byte = frame1[byte];
137 int pixel_value = *frame2++;
139 modifier = src_byte - pixel_value;
140 // This is an integer approximation of:
141 // float coeff = (3.0 * modifer * modifier) / pow(2, strength);
142 // modifier = (int)roundf(coeff > 16 ? 0 : 16-coeff);
143 modifier *= modifier;
145 modifier += rounding;
146 modifier >>= strength;
151 modifier = 16 - modifier;
152 modifier *= filter_weight;
154 count[k] += modifier;
155 accumulator[k] += modifier * pixel_value;
160 byte += stride - block_width;
164 #if CONFIG_VP9_HIGHBITDEPTH
165 void vp9_highbd_temporal_filter_apply_c(uint8_t *frame1_8,
168 unsigned int block_width,
169 unsigned int block_height,
172 unsigned int *accumulator,
174 uint16_t *frame1 = CONVERT_TO_SHORTPTR(frame1_8);
175 uint16_t *frame2 = CONVERT_TO_SHORTPTR(frame2_8);
176 unsigned int i, j, k;
179 const int rounding = strength > 0 ? 1 << (strength - 1) : 0;
181 for (i = 0, k = 0; i < block_height; i++) {
182 for (j = 0; j < block_width; j++, k++) {
183 int src_byte = frame1[byte];
184 int pixel_value = *frame2++;
186 modifier = src_byte - pixel_value;
187 // This is an integer approximation of:
188 // float coeff = (3.0 * modifer * modifier) / pow(2, strength);
189 // modifier = (int)roundf(coeff > 16 ? 0 : 16-coeff);
190 modifier *= modifier;
192 modifier += rounding;
193 modifier >>= strength;
198 modifier = 16 - modifier;
199 modifier *= filter_weight;
201 count[k] += modifier;
202 accumulator[k] += modifier * pixel_value;
207 byte += stride - block_width;
210 #endif // CONFIG_VP9_HIGHBITDEPTH
212 static int temporal_filter_find_matching_mb_c(VP9_COMP *cpi,
213 uint8_t *arf_frame_buf,
214 uint8_t *frame_ptr_buf,
216 MACROBLOCK *const x = &cpi->td.mb;
217 MACROBLOCKD *const xd = &x->e_mbd;
218 const MV_SPEED_FEATURES *const mv_sf = &cpi->sf.mv;
220 int sadpb = x->sadperbit16;
221 int bestsme = INT_MAX;
226 MV best_ref_mv1 = {0, 0};
227 MV best_ref_mv1_full; /* full-pixel value of best_ref_mv1 */
228 MV *ref_mv = &x->e_mbd.mi[0].src_mi->bmi[0].as_mv[0].as_mv;
231 struct buf_2d src = x->plane[0].src;
232 struct buf_2d pre = xd->plane[0].pre[0];
234 best_ref_mv1_full.col = best_ref_mv1.col >> 3;
235 best_ref_mv1_full.row = best_ref_mv1.row >> 3;
237 // Setup frame pointers
238 x->plane[0].src.buf = arf_frame_buf;
239 x->plane[0].src.stride = stride;
240 xd->plane[0].pre[0].buf = frame_ptr_buf;
241 xd->plane[0].pre[0].stride = stride;
243 step_param = mv_sf->reduce_first_step_size;
244 step_param = MIN(step_param, MAX_MVSEARCH_STEPS - 2);
246 // Ignore mv costing by sending NULL pointer instead of cost arrays
247 vp9_hex_search(x, &best_ref_mv1_full, step_param, sadpb, 1,
248 cond_cost_list(cpi, cost_list),
249 &cpi->fn_ptr[BLOCK_16X16], 0, &best_ref_mv1, ref_mv);
251 // Ignore mv costing by sending NULL pointer instead of cost array
252 bestsme = cpi->find_fractional_mv_step(x, ref_mv,
254 cpi->common.allow_high_precision_mv,
256 &cpi->fn_ptr[BLOCK_16X16],
257 0, mv_sf->subpel_iters_per_step,
258 cond_cost_list(cpi, cost_list),
260 &distortion, &sse, NULL, 0, 0);
262 // Restore input state
263 x->plane[0].src = src;
264 xd->plane[0].pre[0] = pre;
269 static void temporal_filter_iterate_c(VP9_COMP *cpi,
270 YV12_BUFFER_CONFIG **frames,
274 struct scale_factors *scale) {
278 unsigned int filter_weight;
279 int mb_cols = (frames[alt_ref_index]->y_crop_width + 15) >> 4;
280 int mb_rows = (frames[alt_ref_index]->y_crop_height + 15) >> 4;
282 int mb_uv_offset = 0;
283 DECLARE_ALIGNED_ARRAY(16, unsigned int, accumulator, 16 * 16 * 3);
284 DECLARE_ALIGNED_ARRAY(16, uint16_t, count, 16 * 16 * 3);
285 MACROBLOCKD *mbd = &cpi->td.mb.e_mbd;
286 YV12_BUFFER_CONFIG *f = frames[alt_ref_index];
287 uint8_t *dst1, *dst2;
288 #if CONFIG_VP9_HIGHBITDEPTH
289 DECLARE_ALIGNED_ARRAY(16, uint16_t, predictor16, 16 * 16 * 3);
290 DECLARE_ALIGNED_ARRAY(16, uint8_t, predictor8, 16 * 16 * 3);
293 DECLARE_ALIGNED_ARRAY(16, uint8_t, predictor, 16 * 16 * 3);
295 const int mb_uv_height = 16 >> mbd->plane[1].subsampling_y;
296 const int mb_uv_width = 16 >> mbd->plane[1].subsampling_x;
299 uint8_t* input_buffer[MAX_MB_PLANE];
301 #if CONFIG_VP9_HIGHBITDEPTH
302 if (mbd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
303 predictor = CONVERT_TO_BYTEPTR(predictor16);
305 predictor = predictor8;
309 for (i = 0; i < MAX_MB_PLANE; i++)
310 input_buffer[i] = mbd->plane[i].pre[0].buf;
312 for (mb_row = 0; mb_row < mb_rows; mb_row++) {
313 // Source frames are extended to 16 pixels. This is different than
314 // L/A/G reference frames that have a border of 32 (VP9ENCBORDERINPIXELS)
315 // A 6/8 tap filter is used for motion search. This requires 2 pixels
316 // before and 3 pixels after. So the largest Y mv on a border would
317 // then be 16 - VP9_INTERP_EXTEND. The UV blocks are half the size of the
318 // Y and therefore only extended by 8. The largest mv that a UV block
319 // can support is 8 - VP9_INTERP_EXTEND. A UV mv is half of a Y mv.
320 // (16 - VP9_INTERP_EXTEND) >> 1 which is greater than
321 // 8 - VP9_INTERP_EXTEND.
322 // To keep the mv in play for both Y and UV planes the max that it
323 // can be on a border is therefore 16 - (2*VP9_INTERP_EXTEND+1).
324 cpi->td.mb.mv_row_min = -((mb_row * 16) + (17 - 2 * VP9_INTERP_EXTEND));
325 cpi->td.mb.mv_row_max = ((mb_rows - 1 - mb_row) * 16)
326 + (17 - 2 * VP9_INTERP_EXTEND);
328 for (mb_col = 0; mb_col < mb_cols; mb_col++) {
332 vpx_memset(accumulator, 0, 16 * 16 * 3 * sizeof(accumulator[0]));
333 vpx_memset(count, 0, 16 * 16 * 3 * sizeof(count[0]));
335 cpi->td.mb.mv_col_min = -((mb_col * 16) + (17 - 2 * VP9_INTERP_EXTEND));
336 cpi->td.mb.mv_col_max = ((mb_cols - 1 - mb_col) * 16)
337 + (17 - 2 * VP9_INTERP_EXTEND);
339 for (frame = 0; frame < frame_count; frame++) {
340 const int thresh_low = 10000;
341 const int thresh_high = 20000;
343 if (frames[frame] == NULL)
346 mbd->mi[0].src_mi->bmi[0].as_mv[0].as_mv.row = 0;
347 mbd->mi[0].src_mi->bmi[0].as_mv[0].as_mv.col = 0;
349 if (frame == alt_ref_index) {
352 // Find best match in this frame by MC
353 int err = temporal_filter_find_matching_mb_c(cpi,
354 frames[alt_ref_index]->y_buffer + mb_y_offset,
355 frames[frame]->y_buffer + mb_y_offset,
356 frames[frame]->y_stride);
358 // Assign higher weight to matching MB if it's error
359 // score is lower. If not applying MC default behavior
360 // is to weight all MBs equal.
361 filter_weight = err < thresh_low
362 ? 2 : err < thresh_high ? 1 : 0;
365 if (filter_weight != 0) {
366 // Construct the predictors
367 temporal_filter_predictors_mb_c(mbd,
368 frames[frame]->y_buffer + mb_y_offset,
369 frames[frame]->u_buffer + mb_uv_offset,
370 frames[frame]->v_buffer + mb_uv_offset,
371 frames[frame]->y_stride,
372 mb_uv_width, mb_uv_height,
373 mbd->mi[0].src_mi->bmi[0].as_mv[0].as_mv.row,
374 mbd->mi[0].src_mi->bmi[0].as_mv[0].as_mv.col,
376 mb_col * 16, mb_row * 16);
378 #if CONFIG_VP9_HIGHBITDEPTH
379 if (mbd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
380 int adj_strength = strength + 2 * (mbd->bd - 8);
381 // Apply the filter (YUV)
382 vp9_highbd_temporal_filter_apply(f->y_buffer + mb_y_offset,
384 predictor, 16, 16, adj_strength,
387 vp9_highbd_temporal_filter_apply(f->u_buffer + mb_uv_offset,
388 f->uv_stride, predictor + 256,
389 mb_uv_width, mb_uv_height,
391 filter_weight, accumulator + 256,
393 vp9_highbd_temporal_filter_apply(f->v_buffer + mb_uv_offset,
394 f->uv_stride, predictor + 512,
395 mb_uv_width, mb_uv_height,
396 adj_strength, filter_weight,
397 accumulator + 512, count + 512);
399 // Apply the filter (YUV)
400 vp9_temporal_filter_apply(f->y_buffer + mb_y_offset, f->y_stride,
402 strength, filter_weight,
404 vp9_temporal_filter_apply(f->u_buffer + mb_uv_offset, f->uv_stride,
406 mb_uv_width, mb_uv_height, strength,
407 filter_weight, accumulator + 256,
409 vp9_temporal_filter_apply(f->v_buffer + mb_uv_offset, f->uv_stride,
411 mb_uv_width, mb_uv_height, strength,
412 filter_weight, accumulator + 512,
416 // Apply the filter (YUV)
417 vp9_temporal_filter_apply(f->y_buffer + mb_y_offset, f->y_stride,
419 strength, filter_weight,
421 vp9_temporal_filter_apply(f->u_buffer + mb_uv_offset, f->uv_stride,
423 mb_uv_width, mb_uv_height, strength,
424 filter_weight, accumulator + 256,
426 vp9_temporal_filter_apply(f->v_buffer + mb_uv_offset, f->uv_stride,
428 mb_uv_width, mb_uv_height, strength,
429 filter_weight, accumulator + 512,
431 #endif // CONFIG_VP9_HIGHBITDEPTH
435 #if CONFIG_VP9_HIGHBITDEPTH
436 if (mbd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
439 // Normalize filter output to produce AltRef frame
440 dst1 = cpi->alt_ref_buffer.y_buffer;
441 dst1_16 = CONVERT_TO_SHORTPTR(dst1);
442 stride = cpi->alt_ref_buffer.y_stride;
444 for (i = 0, k = 0; i < 16; i++) {
445 for (j = 0; j < 16; j++, k++) {
446 unsigned int pval = accumulator[k] + (count[k] >> 1);
447 pval *= fixed_divide[count[k]];
450 dst1_16[byte] = (uint16_t)pval;
452 // move to next pixel
459 dst1 = cpi->alt_ref_buffer.u_buffer;
460 dst2 = cpi->alt_ref_buffer.v_buffer;
461 dst1_16 = CONVERT_TO_SHORTPTR(dst1);
462 dst2_16 = CONVERT_TO_SHORTPTR(dst2);
463 stride = cpi->alt_ref_buffer.uv_stride;
465 for (i = 0, k = 256; i < mb_uv_height; i++) {
466 for (j = 0; j < mb_uv_width; j++, k++) {
470 unsigned int pval = accumulator[k] + (count[k] >> 1);
471 pval *= fixed_divide[count[k]];
473 dst1_16[byte] = (uint16_t)pval;
476 pval = accumulator[m] + (count[m] >> 1);
477 pval *= fixed_divide[count[m]];
479 dst2_16[byte] = (uint16_t)pval;
481 // move to next pixel
485 byte += stride - mb_uv_width;
488 // Normalize filter output to produce AltRef frame
489 dst1 = cpi->alt_ref_buffer.y_buffer;
490 stride = cpi->alt_ref_buffer.y_stride;
492 for (i = 0, k = 0; i < 16; i++) {
493 for (j = 0; j < 16; j++, k++) {
494 unsigned int pval = accumulator[k] + (count[k] >> 1);
495 pval *= fixed_divide[count[k]];
498 dst1[byte] = (uint8_t)pval;
500 // move to next pixel
506 dst1 = cpi->alt_ref_buffer.u_buffer;
507 dst2 = cpi->alt_ref_buffer.v_buffer;
508 stride = cpi->alt_ref_buffer.uv_stride;
510 for (i = 0, k = 256; i < mb_uv_height; i++) {
511 for (j = 0; j < mb_uv_width; j++, k++) {
515 unsigned int pval = accumulator[k] + (count[k] >> 1);
516 pval *= fixed_divide[count[k]];
518 dst1[byte] = (uint8_t)pval;
521 pval = accumulator[m] + (count[m] >> 1);
522 pval *= fixed_divide[count[m]];
524 dst2[byte] = (uint8_t)pval;
526 // move to next pixel
529 byte += stride - mb_uv_width;
533 // Normalize filter output to produce AltRef frame
534 dst1 = cpi->alt_ref_buffer.y_buffer;
535 stride = cpi->alt_ref_buffer.y_stride;
537 for (i = 0, k = 0; i < 16; i++) {
538 for (j = 0; j < 16; j++, k++) {
539 unsigned int pval = accumulator[k] + (count[k] >> 1);
540 pval *= fixed_divide[count[k]];
543 dst1[byte] = (uint8_t)pval;
545 // move to next pixel
551 dst1 = cpi->alt_ref_buffer.u_buffer;
552 dst2 = cpi->alt_ref_buffer.v_buffer;
553 stride = cpi->alt_ref_buffer.uv_stride;
555 for (i = 0, k = 256; i < mb_uv_height; i++) {
556 for (j = 0; j < mb_uv_width; j++, k++) {
560 unsigned int pval = accumulator[k] + (count[k] >> 1);
561 pval *= fixed_divide[count[k]];
563 dst1[byte] = (uint8_t)pval;
566 pval = accumulator[m] + (count[m] >> 1);
567 pval *= fixed_divide[count[m]];
569 dst2[byte] = (uint8_t)pval;
571 // move to next pixel
574 byte += stride - mb_uv_width;
576 #endif // CONFIG_VP9_HIGHBITDEPTH
578 mb_uv_offset += mb_uv_width;
580 mb_y_offset += 16 * (f->y_stride - mb_cols);
581 mb_uv_offset += mb_uv_height * f->uv_stride - mb_uv_width * mb_cols;
584 // Restore input state
585 for (i = 0; i < MAX_MB_PLANE; i++)
586 mbd->plane[i].pre[0].buf = input_buffer[i];
589 // Apply buffer limits and context specific adjustments to arnr filter.
590 static void adjust_arnr_filter(VP9_COMP *cpi,
591 int distance, int group_boost,
592 int *arnr_frames, int *arnr_strength) {
593 const VP9EncoderConfig *const oxcf = &cpi->oxcf;
594 const int frames_after_arf =
595 vp9_lookahead_depth(cpi->lookahead) - distance - 1;
596 int frames_fwd = (cpi->oxcf.arnr_max_frames - 1) >> 1;
598 int q, frames, strength;
600 // Define the forward and backwards filter limits for this arnr group.
601 if (frames_fwd > frames_after_arf)
602 frames_fwd = frames_after_arf;
603 if (frames_fwd > distance)
604 frames_fwd = distance;
606 frames_bwd = frames_fwd;
608 // For even length filter there is one more frame backward
609 // than forward: e.g. len=6 ==> bbbAff, len=7 ==> bbbAfff.
610 if (frames_bwd < distance)
611 frames_bwd += (oxcf->arnr_max_frames + 1) & 0x1;
613 // Set the baseline active filter size.
614 frames = frames_bwd + 1 + frames_fwd;
616 // Adjust the strength based on active max q.
617 if (cpi->common.current_video_frame > 1)
618 q = ((int)vp9_convert_qindex_to_q(
619 cpi->rc.avg_frame_qindex[INTER_FRAME], cpi->common.bit_depth));
621 q = ((int)vp9_convert_qindex_to_q(
622 cpi->rc.avg_frame_qindex[KEY_FRAME], cpi->common.bit_depth));
624 strength = oxcf->arnr_strength;
626 strength = oxcf->arnr_strength - ((16 - q) / 2);
631 // Adjust number of frames in filter and strength based on gf boost level.
632 if (frames > group_boost / 150) {
633 frames = group_boost / 150;
634 frames += !(frames & 1);
637 if (strength > group_boost / 300) {
638 strength = group_boost / 300;
641 // Adjustments for second level arf in multi arf case.
642 if (cpi->oxcf.pass == 2 && cpi->multi_arf_allowed) {
643 const GF_GROUP *const gf_group = &cpi->twopass.gf_group;
644 if (gf_group->rf_level[gf_group->index] != GF_ARF_STD) {
649 *arnr_frames = frames;
650 *arnr_strength = strength;
653 void vp9_temporal_filter(VP9_COMP *cpi, int distance) {
654 VP9_COMMON *const cm = &cpi->common;
655 RATE_CONTROL *const rc = &cpi->rc;
656 MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
661 int frames_to_blur_backward;
662 int frames_to_blur_forward;
663 struct scale_factors sf;
664 YV12_BUFFER_CONFIG *frames[MAX_LAG_BUFFERS] = {NULL};
666 // Apply context specific adjustments to the arnr filter parameters.
667 adjust_arnr_filter(cpi, distance, rc->gfu_boost, &frames_to_blur, &strength);
668 frames_to_blur_backward = (frames_to_blur / 2);
669 frames_to_blur_forward = ((frames_to_blur - 1) / 2);
670 start_frame = distance + frames_to_blur_forward;
672 // Setup frame pointers, NULL indicates frame not included in filter.
673 for (frame = 0; frame < frames_to_blur; ++frame) {
674 const int which_buffer = start_frame - frame;
675 struct lookahead_entry *buf = vp9_lookahead_peek(cpi->lookahead,
677 frames[frames_to_blur - 1 - frame] = &buf->img;
680 if (frames_to_blur > 0) {
681 // Setup scaling factors. Scaling on each of the arnr frames is not
683 if (is_two_pass_svc(cpi)) {
684 // In spatial svc the scaling factors might be less then 1/2.
685 // So we will use non-normative scaling.
687 #if CONFIG_VP9_HIGHBITDEPTH
688 vp9_setup_scale_factors_for_frame(
690 get_frame_new_buffer(cm)->y_crop_width,
691 get_frame_new_buffer(cm)->y_crop_height,
692 get_frame_new_buffer(cm)->y_crop_width,
693 get_frame_new_buffer(cm)->y_crop_height,
694 cm->use_highbitdepth);
696 vp9_setup_scale_factors_for_frame(
698 get_frame_new_buffer(cm)->y_crop_width,
699 get_frame_new_buffer(cm)->y_crop_height,
700 get_frame_new_buffer(cm)->y_crop_width,
701 get_frame_new_buffer(cm)->y_crop_height);
702 #endif // CONFIG_VP9_HIGHBITDEPTH
704 for (frame = 0; frame < frames_to_blur; ++frame) {
705 if (cm->mi_cols * MI_SIZE != frames[frame]->y_width ||
706 cm->mi_rows * MI_SIZE != frames[frame]->y_height) {
707 if (vp9_realloc_frame_buffer(&cpi->svc.scaled_frames[frame_used],
708 cm->width, cm->height,
709 cm->subsampling_x, cm->subsampling_y,
710 #if CONFIG_VP9_HIGHBITDEPTH
711 cm->use_highbitdepth,
713 VP9_ENC_BORDER_IN_PIXELS, NULL, NULL,
715 vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
716 "Failed to reallocate alt_ref_buffer");
718 frames[frame] = vp9_scale_if_required(
719 cm, frames[frame], &cpi->svc.scaled_frames[frame_used]);
723 cm->mi = cm->mip + cm->mi_stride + 1;
725 xd->mi[0].src_mi = &xd->mi[0];
727 // ARF is produced at the native frame size and resized when coded.
728 #if CONFIG_VP9_HIGHBITDEPTH
729 vp9_setup_scale_factors_for_frame(&sf,
730 frames[0]->y_crop_width,
731 frames[0]->y_crop_height,
732 frames[0]->y_crop_width,
733 frames[0]->y_crop_height,
734 cm->use_highbitdepth);
736 vp9_setup_scale_factors_for_frame(&sf,
737 frames[0]->y_crop_width,
738 frames[0]->y_crop_height,
739 frames[0]->y_crop_width,
740 frames[0]->y_crop_height);
741 #endif // CONFIG_VP9_HIGHBITDEPTH
745 temporal_filter_iterate_c(cpi, frames, frames_to_blur,
746 frames_to_blur_backward, strength, &sf);