From 749bc98618c8b75bc44dedecb5f18dd3b6c78bf1 Mon Sep 17 00:00:00 2001 From: Scott LaVarnway Date: Fri, 20 Jan 2012 13:52:16 -0500 Subject: [PATCH] BLOCKD structure cleanup Removed redundancies. All of the information can be found in the MACROBLOCKD structure. Change-Id: I7556392c6f67b43bef2a5e9932180a737466ef93 --- vp8/common/blockd.h | 10 +-- vp8/common/mbpitch.c | 39 ++-------- vp8/common/reconinter.c | 134 ++++++++++++++++++++-------------- vp8/common/reconinter.h | 2 + vp8/common/reconintra4x4.c | 9 ++- vp8/decoder/decodframe.c | 14 ++-- vp8/decoder/reconintra_mt.c | 24 +++--- vp8/decoder/threading.c | 12 +-- vp8/encoder/encodeintra.c | 15 ++-- vp8/encoder/firstpass.c | 4 +- vp8/encoder/mcomp.c | 98 +++++++++++++++---------- vp8/encoder/pickinter.c | 11 ++- vp8/encoder/rdopt.c | 19 +++-- vp8/encoder/temporal_filter.c | 18 ++--- 14 files changed, 219 insertions(+), 190 deletions(-) diff --git a/vp8/common/blockd.h b/vp8/common/blockd.h index d8df42b48..87b83694f 100644 --- a/vp8/common/blockd.h +++ b/vp8/common/blockd.h @@ -185,15 +185,7 @@ typedef struct blockd unsigned char *predictor; short *dequant; - /* 16 Y blocks, 4 U blocks, 4 V blocks each with 16 entries */ - unsigned char **base_pre; - int pre; - int pre_stride; - - unsigned char **base_dst; - int dst; - int dst_stride; - + int offset; char *eob; union b_mode_info bmi; diff --git a/vp8/common/mbpitch.c b/vp8/common/mbpitch.c index f8971d754..32025fa2b 100644 --- a/vp8/common/mbpitch.c +++ b/vp8/common/mbpitch.c @@ -17,33 +17,6 @@ typedef enum DEST = 1 } BLOCKSET; -static void setup_block -( - BLOCKD *b, - int mv_stride, - unsigned char **base, - int Stride, - int offset, - BLOCKSET bs -) -{ - - if (bs == DEST) - { - b->dst_stride = Stride; - b->dst = offset; - b->base_dst = base; - } - else - { - b->pre_stride = Stride; - b->pre = offset; - b->base_pre = base; - } - -} - - static void setup_macroblock(MACROBLOCKD *x, BLOCKSET bs) { int block; @@ -65,17 +38,15 @@ static void setup_macroblock(MACROBLOCKD *x, BLOCKSET bs) for (block = 0; block < 16; block++) /* y blocks */ { - setup_block(&x->block[block], x->dst.y_stride, y, x->dst.y_stride, - (block >> 2) * 4 * x->dst.y_stride + (block & 3) * 4, bs); + x->block[block].offset = + (block >> 2) * 4 * x->dst.y_stride + (block & 3) * 4; } for (block = 16; block < 20; block++) /* U and V blocks */ { - setup_block(&x->block[block], x->dst.uv_stride, u, x->dst.uv_stride, - ((block - 16) >> 1) * 4 * x->dst.uv_stride + (block & 1) * 4, bs); - - setup_block(&x->block[block+4], x->dst.uv_stride, v, x->dst.uv_stride, - ((block - 16) >> 1) * 4 * x->dst.uv_stride + (block & 1) * 4, bs); + x->block[block+4].offset = + x->block[block].offset = + ((block - 16) >> 1) * 4 * x->dst.uv_stride + (block & 1) * 4; } } diff --git a/vp8/common/reconinter.c b/vp8/common/reconinter.c index c4bf7d2bb..77d0dfb50 100644 --- a/vp8/common/reconinter.c +++ b/vp8/common/reconinter.c @@ -122,25 +122,19 @@ void vp8_copy_mem8x4_c( } -void vp8_build_inter_predictors_b(BLOCKD *d, int pitch, vp8_subpix_fn_t sppf) +void vp8_build_inter_predictors_b(BLOCKD *d, int pitch, unsigned char *base_pre, int pre_stride, vp8_subpix_fn_t sppf) { int r; - unsigned char *ptr_base; - unsigned char *ptr; unsigned char *pred_ptr = d->predictor; - - ptr_base = *(d->base_pre); + unsigned char *ptr; + ptr = base_pre + d->offset + (d->bmi.mv.as_mv.row >> 3) * pre_stride + (d->bmi.mv.as_mv.col >> 3); if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7) { - ptr = ptr_base + d->pre + (d->bmi.mv.as_mv.row >> 3) * d->pre_stride + (d->bmi.mv.as_mv.col >> 3); - sppf(ptr, d->pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, pred_ptr, pitch); + sppf(ptr, pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, pred_ptr, pitch); } else { - ptr_base += d->pre + (d->bmi.mv.as_mv.row >> 3) * d->pre_stride + (d->bmi.mv.as_mv.col >> 3); - ptr = ptr_base; - for (r = 0; r < 4; r++) { #if !(CONFIG_FAST_UNALIGNED) @@ -152,65 +146,53 @@ void vp8_build_inter_predictors_b(BLOCKD *d, int pitch, vp8_subpix_fn_t sppf) *(uint32_t *)pred_ptr = *(uint32_t *)ptr ; #endif pred_ptr += pitch; - ptr += d->pre_stride; + ptr += pre_stride; } } } -static void build_inter_predictors4b(MACROBLOCKD *x, BLOCKD *d, unsigned char *dst, int dst_stride) +static void build_inter_predictors4b(MACROBLOCKD *x, BLOCKD *d, unsigned char *dst, int dst_stride, unsigned char *base_pre, int pre_stride) { - unsigned char *ptr_base; unsigned char *ptr; - - ptr_base = *(d->base_pre); - ptr = ptr_base + d->pre + (d->bmi.mv.as_mv.row >> 3) * d->pre_stride + (d->bmi.mv.as_mv.col >> 3); + ptr = base_pre + d->offset + (d->bmi.mv.as_mv.row >> 3) * pre_stride + (d->bmi.mv.as_mv.col >> 3); if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7) { - x->subpixel_predict8x8(ptr, d->pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, dst, dst_stride); + x->subpixel_predict8x8(ptr, pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, dst, dst_stride); } else { - vp8_copy_mem8x8(ptr, d->pre_stride, dst, dst_stride); + vp8_copy_mem8x8(ptr, pre_stride, dst, dst_stride); } } -static void build_inter_predictors2b(MACROBLOCKD *x, BLOCKD *d, unsigned char *dst, int dst_stride) +static void build_inter_predictors2b(MACROBLOCKD *x, BLOCKD *d, unsigned char *dst, int dst_stride, unsigned char *base_pre, int pre_stride) { - unsigned char *ptr_base; unsigned char *ptr; - - ptr_base = *(d->base_pre); - ptr = ptr_base + d->pre + (d->bmi.mv.as_mv.row >> 3) * d->pre_stride + (d->bmi.mv.as_mv.col >> 3); + ptr = base_pre + d->offset + (d->bmi.mv.as_mv.row >> 3) * pre_stride + (d->bmi.mv.as_mv.col >> 3); if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7) { - x->subpixel_predict8x4(ptr, d->pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, dst, dst_stride); + x->subpixel_predict8x4(ptr, pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, dst, dst_stride); } else { - vp8_copy_mem8x4(ptr, d->pre_stride, dst, dst_stride); + vp8_copy_mem8x4(ptr, pre_stride, dst, dst_stride); } } -static void build_inter_predictors_b(BLOCKD *d, unsigned char *dst, int dst_stride, vp8_subpix_fn_t sppf) +static void build_inter_predictors_b(BLOCKD *d, unsigned char *dst, int dst_stride, unsigned char *base_pre, int pre_stride, vp8_subpix_fn_t sppf) { int r; - unsigned char *ptr_base; unsigned char *ptr; - - ptr_base = *(d->base_pre); + ptr = base_pre + d->offset + (d->bmi.mv.as_mv.row >> 3) * pre_stride + (d->bmi.mv.as_mv.col >> 3); if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7) { - ptr = ptr_base + d->pre + (d->bmi.mv.as_mv.row >> 3) * d->pre_stride + (d->bmi.mv.as_mv.col >> 3); - sppf(ptr, d->pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, dst, dst_stride); + sppf(ptr, pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, dst, dst_stride); } else { - ptr_base += d->pre + (d->bmi.mv.as_mv.row >> 3) * d->pre_stride + (d->bmi.mv.as_mv.col >> 3); - ptr = ptr_base; - for (r = 0; r < 4; r++) { #if !(CONFIG_FAST_UNALIGNED) @@ -222,7 +204,7 @@ static void build_inter_predictors_b(BLOCKD *d, unsigned char *dst, int dst_stri *(uint32_t *)dst = *(uint32_t *)ptr ; #endif dst += dst_stride; - ptr += d->pre_stride; + ptr += pre_stride; } } } @@ -238,7 +220,7 @@ void vp8_build_inter16x16_predictors_mbuv(MACROBLOCKD *x) int mv_row = x->mode_info_context->mbmi.mv.as_mv.row; int mv_col = x->mode_info_context->mbmi.mv.as_mv.col; int offset; - int pre_stride = x->block[16].pre_stride; + int pre_stride = x->pre.uv_stride; /* calc uv motion vectors */ if (mv_row < 0) @@ -277,6 +259,8 @@ void vp8_build_inter16x16_predictors_mbuv(MACROBLOCKD *x) void vp8_build_inter4x4_predictors_mbuv(MACROBLOCKD *x) { int i, j; + int pre_stride = x->pre.uv_stride; + unsigned char *base_pre; /* build uv mvs */ for (i = 0; i < 2; i++) @@ -316,17 +300,33 @@ void vp8_build_inter4x4_predictors_mbuv(MACROBLOCKD *x) } } - for (i = 16; i < 24; i += 2) + base_pre = x->pre.u_buffer; + for (i = 16; i < 20; i += 2) { BLOCKD *d0 = &x->block[i]; BLOCKD *d1 = &x->block[i+1]; if (d0->bmi.mv.as_int == d1->bmi.mv.as_int) - build_inter_predictors2b(x, d0, d0->predictor, 8); + build_inter_predictors2b(x, d0, d0->predictor, 8, base_pre, pre_stride); else { - vp8_build_inter_predictors_b(d0, 8, x->subpixel_predict); - vp8_build_inter_predictors_b(d1, 8, x->subpixel_predict); + vp8_build_inter_predictors_b(d0, 8, base_pre, pre_stride, x->subpixel_predict); + vp8_build_inter_predictors_b(d1, 8, base_pre, pre_stride, x->subpixel_predict); + } + } + + base_pre = x->pre.v_buffer; + for (i = 20; i < 24; i += 2) + { + BLOCKD *d0 = &x->block[i]; + BLOCKD *d1 = &x->block[i+1]; + + if (d0->bmi.mv.as_int == d1->bmi.mv.as_int) + build_inter_predictors2b(x, d0, d0->predictor, 8, base_pre, pre_stride); + else + { + vp8_build_inter_predictors_b(d0, 8, base_pre, pre_stride, x->subpixel_predict); + vp8_build_inter_predictors_b(d1, 8, base_pre, pre_stride, x->subpixel_predict); } } } @@ -341,7 +341,7 @@ void vp8_build_inter16x16_predictors_mby(MACROBLOCKD *x, unsigned char *ptr; int mv_row = x->mode_info_context->mbmi.mv.as_mv.row; int mv_col = x->mode_info_context->mbmi.mv.as_mv.col; - int pre_stride = x->block[0].pre_stride; + int pre_stride = x->pre.y_stride; ptr_base = x->pre.y_buffer; ptr = ptr_base + (mv_row >> 3) * pre_stride + (mv_col >> 3); @@ -408,7 +408,7 @@ void vp8_build_inter16x16_predictors_mb(MACROBLOCKD *x, int_mv _16x16mv; unsigned char *ptr_base = x->pre.y_buffer; - int pre_stride = x->block[0].pre_stride; + int pre_stride = x->pre.y_stride; _16x16mv.as_int = x->mode_info_context->mbmi.mv.as_int; @@ -465,11 +465,13 @@ void vp8_build_inter16x16_predictors_mb(MACROBLOCKD *x, static void build_inter4x4_predictors_mb(MACROBLOCKD *x) { int i; + unsigned char *base_dst = x->dst.y_buffer; + unsigned char *base_pre = x->pre.y_buffer; if (x->mode_info_context->mbmi.partitioning < 3) { BLOCKD *b; - int dst_stride = x->block[ 0].dst_stride; + int dst_stride = x->dst.y_stride; x->block[ 0].bmi = x->mode_info_context->bmi[ 0]; x->block[ 2].bmi = x->mode_info_context->bmi[ 2]; @@ -484,13 +486,13 @@ static void build_inter4x4_predictors_mb(MACROBLOCKD *x) } b = &x->block[ 0]; - build_inter_predictors4b(x, b, *(b->base_dst) + b->dst, dst_stride); + build_inter_predictors4b(x, b, base_dst + b->offset, dst_stride, base_pre, dst_stride); b = &x->block[ 2]; - build_inter_predictors4b(x, b, *(b->base_dst) + b->dst, dst_stride); + build_inter_predictors4b(x, b, base_dst + b->offset, dst_stride, base_pre, dst_stride); b = &x->block[ 8]; - build_inter_predictors4b(x, b, *(b->base_dst) + b->dst, dst_stride); + build_inter_predictors4b(x, b, base_dst + b->offset, dst_stride, base_pre, dst_stride); b = &x->block[10]; - build_inter_predictors4b(x, b, *(b->base_dst) + b->dst, dst_stride); + build_inter_predictors4b(x, b, base_dst + b->offset, dst_stride, base_pre, dst_stride); } else { @@ -498,7 +500,7 @@ static void build_inter4x4_predictors_mb(MACROBLOCKD *x) { BLOCKD *d0 = &x->block[i]; BLOCKD *d1 = &x->block[i+1]; - int dst_stride = x->block[ 0].dst_stride; + int dst_stride = x->dst.y_stride; x->block[i+0].bmi = x->mode_info_context->bmi[i+0]; x->block[i+1].bmi = x->mode_info_context->bmi[i+1]; @@ -509,31 +511,51 @@ static void build_inter4x4_predictors_mb(MACROBLOCKD *x) } if (d0->bmi.mv.as_int == d1->bmi.mv.as_int) - build_inter_predictors2b(x, d0, *(d0->base_dst) + d0->dst, dst_stride); + build_inter_predictors2b(x, d0, base_dst + d0->offset, dst_stride, base_pre, dst_stride); else { - build_inter_predictors_b(d0, *(d0->base_dst) + d0->dst, dst_stride, x->subpixel_predict); - build_inter_predictors_b(d1, *(d1->base_dst) + d1->dst, dst_stride, x->subpixel_predict); + build_inter_predictors_b(d0, base_dst + d0->offset, dst_stride, base_pre, dst_stride, x->subpixel_predict); + build_inter_predictors_b(d1, base_dst + d1->offset, dst_stride, base_pre, dst_stride, x->subpixel_predict); } } } + base_dst = x->dst.u_buffer; + base_pre = x->pre.u_buffer; + for (i = 16; i < 20; i += 2) + { + BLOCKD *d0 = &x->block[i]; + BLOCKD *d1 = &x->block[i+1]; + int dst_stride = x->dst.uv_stride; + + /* Note: uv mvs already clamped in build_4x4uvmvs() */ + + if (d0->bmi.mv.as_int == d1->bmi.mv.as_int) + build_inter_predictors2b(x, d0, base_dst + d0->offset, dst_stride, base_pre, dst_stride); + else + { + build_inter_predictors_b(d0, base_dst + d0->offset, dst_stride, base_pre, dst_stride, x->subpixel_predict); + build_inter_predictors_b(d1, base_dst + d1->offset, dst_stride, base_pre, dst_stride, x->subpixel_predict); + } + } - for (i = 16; i < 24; i += 2) + base_dst = x->dst.v_buffer; + base_pre = x->pre.v_buffer; + for (i = 20; i < 24; i += 2) { BLOCKD *d0 = &x->block[i]; BLOCKD *d1 = &x->block[i+1]; - int dst_stride = x->block[ 16].dst_stride; + int dst_stride = x->dst.uv_stride; /* Note: uv mvs already clamped in build_4x4uvmvs() */ if (d0->bmi.mv.as_int == d1->bmi.mv.as_int) - build_inter_predictors2b(x, d0, *(d0->base_dst) + d0->dst, dst_stride); + build_inter_predictors2b(x, d0, base_dst + d0->offset, dst_stride, base_pre, dst_stride); else { - build_inter_predictors_b(d0, *(d0->base_dst) + d0->dst, dst_stride, x->subpixel_predict); - build_inter_predictors_b(d1, *(d1->base_dst) + d1->dst, dst_stride, x->subpixel_predict); + build_inter_predictors_b(d0, base_dst + d0->offset, dst_stride, base_pre, dst_stride, x->subpixel_predict); + build_inter_predictors_b(d1, base_dst + d1->offset, dst_stride, base_pre, dst_stride, x->subpixel_predict); } } } diff --git a/vp8/common/reconinter.h b/vp8/common/reconinter.h index f57ff73c5..233c02e5b 100644 --- a/vp8/common/reconinter.h +++ b/vp8/common/reconinter.h @@ -25,6 +25,8 @@ extern void vp8_build_inter16x16_predictors_mby(MACROBLOCKD *x, unsigned char *dst_y, int dst_ystride); extern void vp8_build_inter_predictors_b(BLOCKD *d, int pitch, + unsigned char *base_pre, + int pre_stride, vp8_subpix_fn_t sppf); extern void vp8_build_inter16x16_predictors_mbuv(MACROBLOCKD *x); diff --git a/vp8/common/reconintra4x4.c b/vp8/common/reconintra4x4.c index 7c2b46d43..f4424ffdd 100644 --- a/vp8/common/reconintra4x4.c +++ b/vp8/common/reconintra4x4.c @@ -304,12 +304,13 @@ void vp8_intra4x4_predict_c(unsigned char *src, int src_stride, */ void vp8_intra_prediction_down_copy(MACROBLOCKD *x) { - unsigned char *above_right = *(x->block[0].base_dst) + x->block[0].dst - x->block[0].dst_stride + 16; + int dst_stride = x->dst.y_stride; + unsigned char *above_right = x->dst.y_buffer - dst_stride + 16; unsigned int *src_ptr = (unsigned int *)above_right; - unsigned int *dst_ptr0 = (unsigned int *)(above_right + 4 * x->block[0].dst_stride); - unsigned int *dst_ptr1 = (unsigned int *)(above_right + 8 * x->block[0].dst_stride); - unsigned int *dst_ptr2 = (unsigned int *)(above_right + 12 * x->block[0].dst_stride); + unsigned int *dst_ptr0 = (unsigned int *)(above_right + 4 * dst_stride); + unsigned int *dst_ptr1 = (unsigned int *)(above_right + 8 * dst_stride); + unsigned int *dst_ptr2 = (unsigned int *)(above_right + 12 * dst_stride); *dst_ptr0 = *src_ptr; *dst_ptr1 = *src_ptr; diff --git a/vp8/decoder/decodframe.c b/vp8/decoder/decodframe.c index 7ba573123..ad86ea5d3 100644 --- a/vp8/decoder/decodframe.c +++ b/vp8/decoder/decodframe.c @@ -165,6 +165,8 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd, else { short *DQC = xd->dequant_y1; + int dst_stride = xd->dst.y_stride; + unsigned char *base_dst = xd->dst.y_buffer; /* clear out residual eob info */ if(xd->mode_info_context->mbmi.mb_skip_coeff) @@ -177,9 +179,9 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd, BLOCKD *b = &xd->block[i]; int b_mode = xd->mode_info_context->bmi[i].as_mode; - vp8_intra4x4_predict - ( *(b->base_dst) + b->dst, b->dst_stride, b_mode, - *(b->base_dst) + b->dst, b->dst_stride ); + + vp8_intra4x4_predict (base_dst + b->offset, dst_stride, b_mode, + base_dst + b->offset, dst_stride ); if (xd->eobs[i]) { @@ -187,14 +189,14 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd, { vp8_dequant_idct_add (b->qcoeff, DQC, - *(b->base_dst) + b->dst, b->dst_stride); + base_dst + b->offset, dst_stride); } else { vp8_dc_only_idct_add (b->qcoeff[0] * DQC[0], - *(b->base_dst) + b->dst, b->dst_stride, - *(b->base_dst) + b->dst, b->dst_stride); + base_dst + b->offset, dst_stride, + base_dst + b->offset, dst_stride); ((int *)b->qcoeff)[0] = 0; } } diff --git a/vp8/decoder/reconintra_mt.c b/vp8/decoder/reconintra_mt.c index 31425a429..a8fedf48b 100644 --- a/vp8/decoder/reconintra_mt.c +++ b/vp8/decoder/reconintra_mt.c @@ -617,12 +617,15 @@ void vp8mt_predict_intra4x4(VP8D_COMP *pbi, unsigned char top_left; /* = Above[-1]; */ BLOCKD *x = &xd->block[num]; + int dst_stride = xd->dst.y_stride; + unsigned char *base_dst = xd->dst.y_buffer; + /*Caution: For some b_mode, it needs 8 pixels (4 above + 4 above-right).*/ if (num < 4 && pbi->common.filter_level) Above = pbi->mt_yabove_row[mb_row] + mb_col*16 + num*4 + 32; else - Above = *(x->base_dst) + x->dst - x->dst_stride; + Above = base_dst + x->offset - dst_stride; if (num%4==0 && pbi->common.filter_level) { @@ -630,10 +633,10 @@ void vp8mt_predict_intra4x4(VP8D_COMP *pbi, Left[i] = pbi->mt_yleft_col[mb_row][num + i]; }else { - Left[0] = (*(x->base_dst))[x->dst - 1]; - Left[1] = (*(x->base_dst))[x->dst - 1 + x->dst_stride]; - Left[2] = (*(x->base_dst))[x->dst - 1 + 2 * x->dst_stride]; - Left[3] = (*(x->base_dst))[x->dst - 1 + 3 * x->dst_stride]; + Left[0] = (base_dst)[x->offset - 1]; + Left[1] = (base_dst)[x->offset - 1 + dst_stride]; + Left[2] = (base_dst)[x->offset - 1 + 2 * dst_stride]; + Left[3] = (base_dst)[x->offset - 1 + 3 * dst_stride]; } if ((num==4 || num==8 || num==12) && pbi->common.filter_level) @@ -918,19 +921,22 @@ void vp8mt_intra_prediction_down_copy(VP8D_COMP *pbi, MACROBLOCKD *x, int mb_row unsigned int *dst_ptr0; unsigned int *dst_ptr1; unsigned int *dst_ptr2; + int dst_stride = x->dst.y_stride; + unsigned char *base_dst = x->dst.y_buffer; + if (pbi->common.filter_level) above_right = pbi->mt_yabove_row[mb_row] + mb_col*16 + 32 +16; else - above_right = *(x->block[0].base_dst) + x->block[0].dst - x->block[0].dst_stride + 16; + above_right = base_dst + x->block[0].offset - dst_stride + 16; src_ptr = (unsigned int *)above_right; /*dst_ptr0 = (unsigned int *)(above_right + 4 * x->block[0].dst_stride); dst_ptr1 = (unsigned int *)(above_right + 8 * x->block[0].dst_stride); dst_ptr2 = (unsigned int *)(above_right + 12 * x->block[0].dst_stride);*/ - dst_ptr0 = (unsigned int *)(*(x->block[0].base_dst) + x->block[0].dst + 16 + 3 * x->block[0].dst_stride); - dst_ptr1 = (unsigned int *)(*(x->block[0].base_dst) + x->block[0].dst + 16 + 7 * x->block[0].dst_stride); - dst_ptr2 = (unsigned int *)(*(x->block[0].base_dst) + x->block[0].dst + 16 + 11 * x->block[0].dst_stride); + dst_ptr0 = (unsigned int *)(base_dst + x->block[0].offset + 16 + 3 * dst_stride); + dst_ptr1 = (unsigned int *)(base_dst + x->block[0].offset + 16 + 7 * dst_stride); + dst_ptr2 = (unsigned int *)(base_dst + x->block[0].offset + 16 + 11 * dst_stride); *dst_ptr0 = *src_ptr; *dst_ptr1 = *src_ptr; *dst_ptr2 = *src_ptr; diff --git a/vp8/decoder/threading.c b/vp8/decoder/threading.c index c2b15cf18..30665fd29 100644 --- a/vp8/decoder/threading.c +++ b/vp8/decoder/threading.c @@ -171,14 +171,16 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd, int mb_row, int m if (xd->mode_info_context->mbmi.mode == B_PRED) { short *DQC = xd->dequant_y1; + int dst_stride = xd->dst.y_stride; + unsigned char *base_dst = xd->dst.y_buffer; for (i = 0; i < 16; i++) { BLOCKD *b = &xd->block[i]; int b_mode = xd->mode_info_context->bmi[i].as_mode; - vp8mt_predict_intra4x4(pbi, xd, b_mode, *(b->base_dst) + b->dst, - b->dst_stride, mb_row, mb_col, i); + vp8mt_predict_intra4x4(pbi, xd, b_mode, base_dst + b->offset, + dst_stride, mb_row, mb_col, i); if (xd->eobs[i] ) { @@ -186,14 +188,14 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd, int mb_row, int m { vp8_dequant_idct_add (b->qcoeff, DQC, - *(b->base_dst) + b->dst, b->dst_stride); + base_dst + b->offset, dst_stride); } else { vp8_dc_only_idct_add (b->qcoeff[0] * DQC[0], - *(b->base_dst) + b->dst, b->dst_stride, - *(b->base_dst) + b->dst, b->dst_stride); + base_dst + b->offset, dst_stride, + base_dst + b->offset, dst_stride); ((int *)b->qcoeff)[0] = 0; } } diff --git a/vp8/encoder/encodeintra.c b/vp8/encoder/encodeintra.c index 1f83d2a49..d481c99d3 100644 --- a/vp8/encoder/encodeintra.c +++ b/vp8/encoder/encodeintra.c @@ -53,9 +53,10 @@ void vp8_encode_intra4x4block(MACROBLOCK *x, int ib) { BLOCKD *b = &x->e_mbd.block[ib]; BLOCK *be = &x->block[ib]; + int dst_stride = x->e_mbd.dst.y_stride; + unsigned char *base_dst = x->e_mbd.dst.y_buffer; - vp8_intra4x4_predict - (*(b->base_dst) + b->dst, b->dst_stride, + vp8_intra4x4_predict(base_dst + b->offset, dst_stride, b->bmi.as_mode, b->predictor, 16); vp8_subtract_b(be, b, 16); @@ -66,14 +67,14 @@ void vp8_encode_intra4x4block(MACROBLOCK *x, int ib) if (*b->eob > 1) { - vp8_short_idct4x4llm(b->dqcoeff, - b->predictor, 16, *(b->base_dst) + b->dst, b->dst_stride); + vp8_short_idct4x4llm(b->dqcoeff, + b->predictor, 16, base_dst + b->offset, dst_stride); } else { - vp8_dc_only_idct_add - (b->dqcoeff[0], b->predictor, 16, *(b->base_dst) + b->dst, - b->dst_stride); + vp8_dc_only_idct_add + (b->dqcoeff[0], b->predictor, 16, base_dst + b->offset, + dst_stride); } } diff --git a/vp8/encoder/firstpass.c b/vp8/encoder/firstpass.c index 6e5532c01..b3b06ee70 100644 --- a/vp8/encoder/firstpass.c +++ b/vp8/encoder/firstpass.c @@ -396,12 +396,12 @@ static void zz_motion_search( VP8_COMP *cpi, MACROBLOCK * x, YV12_BUFFER_CONFIG unsigned char *src_ptr = (*(b->base_src) + b->src); int src_stride = b->src_stride; unsigned char *ref_ptr; - int ref_stride=d->pre_stride; + int ref_stride = x->e_mbd.pre.y_stride; // Set up pointers for this macro block recon buffer xd->pre.y_buffer = recon_buffer->y_buffer + recon_yoffset; - ref_ptr = (unsigned char *)(*(d->base_pre) + d->pre ); + ref_ptr = (unsigned char *)(xd->pre.y_buffer + d->offset ); vp8_mse16x16 ( src_ptr, src_stride, ref_ptr, ref_stride, (unsigned int *)(best_motion_err)); } diff --git a/vp8/encoder/mcomp.c b/vp8/encoder/mcomp.c index dad89c311..67e4f7ead 100644 --- a/vp8/encoder/mcomp.c +++ b/vp8/encoder/mcomp.c @@ -211,10 +211,13 @@ int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int y_stride; int offset; + int pre_stride = x->e_mbd.pre.y_stride; + unsigned char *base_pre = x->e_mbd.pre.y_buffer; + #if ARCH_X86 || ARCH_X86_64 MACROBLOCKD *xd = &x->e_mbd; - unsigned char *y0 = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col; + unsigned char *y0 = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride + bestmv->as_mv.col; unsigned char *y; int buf_r1, buf_r2, buf_c1, buf_c2; @@ -226,11 +229,11 @@ int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d, y_stride = 32; /* Copy to intermediate buffer before searching. */ - vfp->copymem(y0 - buf_c1 - d->pre_stride*buf_r1, d->pre_stride, xd->y_buf, y_stride, 16+buf_r1+buf_r2); + vfp->copymem(y0 - buf_c1 - pre_stride*buf_r1, pre_stride, xd->y_buf, y_stride, 16+buf_r1+buf_r2); y = xd->y_buf + y_stride*buf_r1 +buf_c1; #else - unsigned char *y = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col; - y_stride = d->pre_stride; + unsigned char *y = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride + bestmv->as_mv.col; + y_stride = pre_stride; #endif offset = (bestmv->as_mv.row) * y_stride + bestmv->as_mv.col; @@ -347,19 +350,21 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int whichdir ; int thismse; int y_stride; + int pre_stride = x->e_mbd.pre.y_stride; + unsigned char *base_pre = x->e_mbd.pre.y_buffer; #if ARCH_X86 || ARCH_X86_64 MACROBLOCKD *xd = &x->e_mbd; - unsigned char *y0 = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col; + unsigned char *y0 = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride + bestmv->as_mv.col; unsigned char *y; y_stride = 32; /* Copy 18 rows x 32 cols area to intermediate buffer before searching. */ - vfp->copymem(y0 - 1 - d->pre_stride, d->pre_stride, xd->y_buf, y_stride, 18); + vfp->copymem(y0 - 1 - pre_stride, pre_stride, xd->y_buf, y_stride, 18); y = xd->y_buf + y_stride + 1; #else - unsigned char *y = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col; - y_stride = d->pre_stride; + unsigned char *y = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride + bestmv->as_mv.col; + y_stride = pre_stride; #endif // central mv @@ -662,19 +667,21 @@ int vp8_find_best_half_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int whichdir ; int thismse; int y_stride; + int pre_stride = x->e_mbd.pre.y_stride; + unsigned char *base_pre = x->e_mbd.pre.y_buffer; #if ARCH_X86 || ARCH_X86_64 MACROBLOCKD *xd = &x->e_mbd; - unsigned char *y0 = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col; + unsigned char *y0 = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride + bestmv->as_mv.col; unsigned char *y; y_stride = 32; /* Copy 18 rows x 32 cols area to intermediate buffer before searching. */ - vfp->copymem(y0 - 1 - d->pre_stride, d->pre_stride, xd->y_buf, y_stride, 18); + vfp->copymem(y0 - 1 - pre_stride, pre_stride, xd->y_buf, y_stride, 18); y = xd->y_buf + y_stride + 1; #else - unsigned char *y = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col; - y_stride = d->pre_stride; + unsigned char *y = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride + bestmv->as_mv.col; + y_stride = pre_stride; #endif // central mv @@ -842,7 +849,10 @@ int vp8_hex_search unsigned char *what = (*(b->base_src) + b->src); int what_stride = b->src_stride; - int in_what_stride = d->pre_stride; + int pre_stride = x->e_mbd.pre.y_stride; + unsigned char *base_pre = x->e_mbd.pre.y_buffer; + + int in_what_stride = pre_stride; int br, bc; int_mv this_mv; unsigned int bestsad = 0x7fffffff; @@ -865,8 +875,8 @@ int vp8_hex_search bc = ref_mv->as_mv.col; // Work out the start point for the search - base_offset = (unsigned char *)(*(d->base_pre) + d->pre); - this_offset = base_offset + (br * (d->pre_stride)) + bc; + base_offset = (unsigned char *)(base_pre + d->offset); + this_offset = base_offset + (br * (pre_stride)) + bc; this_mv.as_mv.row = br; this_mv.as_mv.col = bc; bestsad = vfp->sdf( what, what_stride, this_offset, @@ -1029,7 +1039,9 @@ int vp8_diamond_search_sad_c unsigned char *what = (*(b->base_src) + b->src); int what_stride = b->src_stride; unsigned char *in_what; - int in_what_stride = d->pre_stride; + int pre_stride = x->e_mbd.pre.y_stride; + unsigned char *base_pre = x->e_mbd.pre.y_buffer; + int in_what_stride = pre_stride; unsigned char *best_address; int tot_steps; @@ -1061,7 +1073,7 @@ int vp8_diamond_search_sad_c best_mv->as_mv.col = ref_col; // Work out the start point for the search - in_what = (unsigned char *)(*(d->base_pre) + d->pre + (ref_row * (d->pre_stride)) + ref_col); + in_what = (unsigned char *)(base_pre + d->offset + (ref_row * pre_stride) + ref_col); best_address = in_what; // Check the starting position @@ -1150,7 +1162,9 @@ int vp8_diamond_search_sadx4 unsigned char *what = (*(b->base_src) + b->src); int what_stride = b->src_stride; unsigned char *in_what; - int in_what_stride = d->pre_stride; + int pre_stride = x->e_mbd.pre.y_stride; + unsigned char *base_pre = x->e_mbd.pre.y_buffer; + int in_what_stride = pre_stride; unsigned char *best_address; int tot_steps; @@ -1182,7 +1196,7 @@ int vp8_diamond_search_sadx4 best_mv->as_mv.col = ref_col; // Work out the start point for the search - in_what = (unsigned char *)(*(d->base_pre) + d->pre + (ref_row * (d->pre_stride)) + ref_col); + in_what = (unsigned char *)(base_pre + d->offset + (ref_row * pre_stride) + ref_col); best_address = in_what; // Check the starting position @@ -1300,8 +1314,10 @@ int vp8_full_search_sad_c(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv, unsigned char *what = (*(b->base_src) + b->src); int what_stride = b->src_stride; unsigned char *in_what; - int in_what_stride = d->pre_stride; - int mv_stride = d->pre_stride; + int pre_stride = x->e_mbd.pre.y_stride; + unsigned char *base_pre = x->e_mbd.pre.y_buffer; + int in_what_stride = pre_stride; + int mv_stride = pre_stride; unsigned char *bestaddress; int_mv *best_mv = &d->bmi.mv; int_mv this_mv; @@ -1325,8 +1341,8 @@ int vp8_full_search_sad_c(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv, fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3; // Work out the mid point for the search - in_what = *(d->base_pre) + d->pre; - bestaddress = in_what + (ref_row * d->pre_stride) + ref_col; + in_what = base_pre + d->offset; + bestaddress = in_what + (ref_row * pre_stride) + ref_col; best_mv->as_mv.row = ref_row; best_mv->as_mv.col = ref_col; @@ -1392,8 +1408,10 @@ int vp8_full_search_sadx3(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv, unsigned char *what = (*(b->base_src) + b->src); int what_stride = b->src_stride; unsigned char *in_what; - int in_what_stride = d->pre_stride; - int mv_stride = d->pre_stride; + int pre_stride = x->e_mbd.pre.y_stride; + unsigned char *base_pre = x->e_mbd.pre.y_buffer; + int in_what_stride = pre_stride; + int mv_stride = pre_stride; unsigned char *bestaddress; int_mv *best_mv = &d->bmi.mv; int_mv this_mv; @@ -1419,8 +1437,8 @@ int vp8_full_search_sadx3(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv, fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3; // Work out the mid point for the search - in_what = *(d->base_pre) + d->pre; - bestaddress = in_what + (ref_row * d->pre_stride) + ref_col; + in_what = base_pre + d->offset; + bestaddress = in_what + (ref_row * pre_stride) + ref_col; best_mv->as_mv.row = ref_row; best_mv->as_mv.col = ref_col; @@ -1521,9 +1539,11 @@ int vp8_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv, { unsigned char *what = (*(b->base_src) + b->src); int what_stride = b->src_stride; + int pre_stride = x->e_mbd.pre.y_stride; + unsigned char *base_pre = x->e_mbd.pre.y_buffer; unsigned char *in_what; - int in_what_stride = d->pre_stride; - int mv_stride = d->pre_stride; + int in_what_stride = pre_stride; + int mv_stride = pre_stride; unsigned char *bestaddress; int_mv *best_mv = &d->bmi.mv; int_mv this_mv; @@ -1550,8 +1570,8 @@ int vp8_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv, fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3; // Work out the mid point for the search - in_what = *(d->base_pre) + d->pre; - bestaddress = in_what + (ref_row * d->pre_stride) + ref_col; + in_what = base_pre + d->offset; + bestaddress = in_what + (ref_row * pre_stride) + ref_col; best_mv->as_mv.row = ref_row; best_mv->as_mv.col = ref_col; @@ -1684,10 +1704,12 @@ int vp8_refining_search_sad_c(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv short this_row_offset, this_col_offset; int what_stride = b->src_stride; - int in_what_stride = d->pre_stride; + int pre_stride = x->e_mbd.pre.y_stride; + unsigned char *base_pre = x->e_mbd.pre.y_buffer; + int in_what_stride = pre_stride; unsigned char *what = (*(b->base_src) + b->src); - unsigned char *best_address = (unsigned char *)(*(d->base_pre) + d->pre + - (ref_mv->as_mv.row * (d->pre_stride)) + ref_mv->as_mv.col); + unsigned char *best_address = (unsigned char *)(base_pre + d->offset + + (ref_mv->as_mv.row * pre_stride) + ref_mv->as_mv.col); unsigned char *check_here; unsigned int thissad; int_mv this_mv; @@ -1761,10 +1783,12 @@ int vp8_refining_search_sadx4(MACROBLOCK *x, BLOCK *b, BLOCKD *d, short this_row_offset, this_col_offset; int what_stride = b->src_stride; - int in_what_stride = d->pre_stride; + int pre_stride = x->e_mbd.pre.y_stride; + unsigned char *base_pre = x->e_mbd.pre.y_buffer; + int in_what_stride = pre_stride; unsigned char *what = (*(b->base_src) + b->src); - unsigned char *best_address = (unsigned char *)(*(d->base_pre) + d->pre + - (ref_mv->as_mv.row * (d->pre_stride)) + ref_mv->as_mv.col); + unsigned char *best_address = (unsigned char *)(base_pre + d->offset + + (ref_mv->as_mv.row * pre_stride) + ref_mv->as_mv.col); unsigned char *check_here; unsigned int thissad; int_mv this_mv; diff --git a/vp8/encoder/pickinter.c b/vp8/encoder/pickinter.c index 732701126..1f9d05533 100644 --- a/vp8/encoder/pickinter.c +++ b/vp8/encoder/pickinter.c @@ -68,12 +68,13 @@ static int get_inter_mbpred_error(MACROBLOCK *mb, BLOCKD *d = &mb->e_mbd.block[0]; unsigned char *what = (*(b->base_src) + b->src); int what_stride = b->src_stride; - unsigned char *in_what = *(d->base_pre) + d->pre ; - int in_what_stride = d->pre_stride; + int pre_stride = mb->e_mbd.pre.y_stride; + unsigned char *in_what = mb->e_mbd.pre.y_buffer + d->offset ; + int in_what_stride = pre_stride; int xoffset = this_mv.as_mv.col & 7; int yoffset = this_mv.as_mv.row & 7; - in_what += (this_mv.as_mv.row >> 3) * d->pre_stride + (this_mv.as_mv.col >> 3); + in_what += (this_mv.as_mv.row >> 3) * pre_stride + (this_mv.as_mv.col >> 3); if (xoffset | yoffset) { @@ -136,6 +137,8 @@ static int pick_intra4x4block( BLOCKD *b = &x->e_mbd.block[ib]; BLOCK *be = &x->block[ib]; + int dst_stride = x->e_mbd.dst.y_stride; + unsigned char *base_dst = x->e_mbd.dst.y_buffer; B_PREDICTION_MODE mode; int best_rd = INT_MAX; // 1<<30 int rate; @@ -147,7 +150,7 @@ static int pick_intra4x4block( rate = mode_costs[mode]; vp8_intra4x4_predict - (*(b->base_dst) + b->dst, b->dst_stride, + (base_dst + b->offset, dst_stride, mode, b->predictor, 16); distortion = get_prediction_error(be, b); this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion); diff --git a/vp8/encoder/rdopt.c b/vp8/encoder/rdopt.c index 76c764844..3362285c2 100644 --- a/vp8/encoder/rdopt.c +++ b/vp8/encoder/rdopt.c @@ -457,7 +457,7 @@ int VP8_UVSSE(MACROBLOCK *x) int mv_row = x->e_mbd.mode_info_context->mbmi.mv.as_mv.row; int mv_col = x->e_mbd.mode_info_context->mbmi.mv.as_mv.col; int offset; - int pre_stride = x->e_mbd.block[16].pre_stride; + int pre_stride = x->e_mbd.pre.uv_stride; if (mv_row < 0) mv_row -= 1; @@ -635,6 +635,8 @@ static int rd_pick_intra4x4block( * */ DECLARE_ALIGNED_ARRAY(16, unsigned char, best_predictor, 16*4); DECLARE_ALIGNED_ARRAY(16, short, best_dqcoeff, 16); + int dst_stride = x->e_mbd.dst.y_stride; + unsigned char *base_dst = x->e_mbd.dst.y_buffer; for (mode = B_DC_PRED; mode <= B_HU_PRED; mode++) { @@ -643,9 +645,8 @@ static int rd_pick_intra4x4block( rate = bmode_costs[mode]; - vp8_intra4x4_predict - (*(b->base_dst) + b->dst, b->dst_stride, - mode, b->predictor, 16); + vp8_intra4x4_predict(base_dst + b->offset, dst_stride, mode, + b->predictor, 16); vp8_subtract_b(be, b, 16); x->short_fdct4x4(be->src_diff, be->coeff, 32); x->quantize_b(be, b); @@ -674,8 +675,8 @@ static int rd_pick_intra4x4block( } b->bmi.as_mode = (B_PREDICTION_MODE)(*best_mode); - vp8_short_idct4x4llm(best_dqcoeff, - best_predictor, 16, *(b->base_dst) + b->dst, b->dst_stride); + vp8_short_idct4x4llm(best_dqcoeff, best_predictor, 16, base_dst + b->offset, + dst_stride); return best_rd; } @@ -1008,6 +1009,9 @@ static unsigned int vp8_encode_inter_mb_segment(MACROBLOCK *x, int const *labels { int i; unsigned int distortion = 0; + int pre_stride = x->e_mbd.pre.y_stride; + unsigned char *base_pre = x->e_mbd.pre.y_buffer; + for (i = 0; i < 16; i++) { @@ -1016,8 +1020,7 @@ static unsigned int vp8_encode_inter_mb_segment(MACROBLOCK *x, int const *labels BLOCKD *bd = &x->e_mbd.block[i]; BLOCK *be = &x->block[i]; - - vp8_build_inter_predictors_b(bd, 16, x->e_mbd.subpixel_predict); + vp8_build_inter_predictors_b(bd, 16, base_pre, pre_stride, x->e_mbd.subpixel_predict); vp8_subtract_b(be, bd, 16); x->short_fdct4x4(be->src_diff, be->coeff, 32); diff --git a/vp8/encoder/temporal_filter.c b/vp8/encoder/temporal_filter.c index 57caccf24..709f6e2b4 100644 --- a/vp8/encoder/temporal_filter.c +++ b/vp8/encoder/temporal_filter.c @@ -164,9 +164,9 @@ static int vp8_temporal_filter_find_matching_mb_c unsigned char **base_src = b->base_src; int src = b->src; int src_stride = b->src_stride; - unsigned char **base_pre = d->base_pre; - int pre = d->pre; - int pre_stride = d->pre_stride; + unsigned char *base_pre = x->e_mbd.pre.y_buffer; + int pre = d->offset; + int pre_stride = x->e_mbd.pre.y_stride; best_ref_mv1.as_int = 0; best_ref_mv1_full.as_mv.col = best_ref_mv1.as_mv.col >>3; @@ -177,9 +177,9 @@ static int vp8_temporal_filter_find_matching_mb_c b->src_stride = arf_frame->y_stride; b->src = mb_offset; - d->base_pre = &frame_ptr->y_buffer; - d->pre_stride = frame_ptr->y_stride; - d->pre = mb_offset; + x->e_mbd.pre.y_buffer = frame_ptr->y_buffer; + x->e_mbd.pre.y_stride = frame_ptr->y_stride; + d->offset = mb_offset; // Further step/diamond searches as necessary if (cpi->Speed < 8) @@ -221,9 +221,9 @@ static int vp8_temporal_filter_find_matching_mb_c b->base_src = base_src; b->src = src; b->src_stride = src_stride; - d->base_pre = base_pre; - d->pre = pre; - d->pre_stride = pre_stride; + x->e_mbd.pre.y_buffer = base_pre; + d->offset = pre; + x->e_mbd.pre.y_stride = pre_stride; return bestsme; } -- 2.40.0