}
}
static int raster_block_offset(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize,
- int plane, int block) {
+ int plane, int block, int stride) {
const int bw = b_width_log2(bsize) - xd->plane[plane].subsampling_x;
- const int stride = 4 << bw;
const int y = 4 * (block >> bw), x = 4 * (block & ((1 << bw) - 1));
return y * stride + x;
}
static int16_t* raster_block_offset_int16(MACROBLOCKD *xd,
BLOCK_SIZE_TYPE bsize,
int plane, int block, int16_t *base) {
- return base + raster_block_offset(xd, bsize, plane, block);
+ const int bw = b_width_log2(bsize) - xd->plane[plane].subsampling_x;
+ const int stride = 4 << bw;
+ return base + raster_block_offset(xd, bsize, plane, block, stride);
+}
+static uint8_t* raster_block_offset_uint8(MACROBLOCKD *xd,
+ BLOCK_SIZE_TYPE bsize,
+ int plane, int block,
+ uint8_t *base, int stride) {
+ return base + raster_block_offset(xd, bsize, plane, block, stride);
}
#if CONFIG_CODE_ZEROGROUP
struct loop_filter_info;
/* Encoder forward decls */
-struct block;
struct macroblock;
struct vp9_variance_vtable;
#
# Motion search
#
-prototype int vp9_full_search_sad "struct macroblock *x, struct block *b, struct blockd *d, union int_mv *ref_mv, int sad_per_bit, int distance, struct vp9_variance_vtable *fn_ptr, DEC_MVCOSTS, union int_mv *center_mv"
+prototype int vp9_full_search_sad "struct macroblock *x, struct blockd *d, union int_mv *ref_mv, int sad_per_bit, int distance, struct vp9_variance_vtable *fn_ptr, DEC_MVCOSTS, union int_mv *center_mv"
specialize vp9_full_search_sad sse3 sse4_1
vp9_full_search_sad_sse3=vp9_full_search_sadx3
vp9_full_search_sad_sse4_1=vp9_full_search_sadx8
-prototype int vp9_refining_search_sad "struct macroblock *x, struct block *b, struct blockd *d, union int_mv *ref_mv, int sad_per_bit, int distance, struct vp9_variance_vtable *fn_ptr, DEC_MVCOSTS, union int_mv *center_mv"
+prototype int vp9_refining_search_sad "struct macroblock *x, struct blockd *d, union int_mv *ref_mv, int sad_per_bit, int distance, struct vp9_variance_vtable *fn_ptr, DEC_MVCOSTS, union int_mv *center_mv"
specialize vp9_refining_search_sad sse3
vp9_refining_search_sad_sse3=vp9_refining_search_sadx4
-prototype int vp9_diamond_search_sad "struct macroblock *x, struct block *b, struct blockd *d, union int_mv *ref_mv, union int_mv *best_mv, int search_param, int sad_per_bit, int *num00, struct vp9_variance_vtable *fn_ptr, DEC_MVCOSTS, union int_mv *center_mv"
+prototype int vp9_diamond_search_sad "struct macroblock *x, struct blockd *d, union int_mv *ref_mv, union int_mv *best_mv, int search_param, int sad_per_bit, int *num00, struct vp9_variance_vtable *fn_ptr, DEC_MVCOSTS, union int_mv *center_mv"
specialize vp9_diamond_search_sad sse3
vp9_diamond_search_sad_sse3=vp9_diamond_search_sadx4
int offset;
} search_site;
-typedef struct block {
- uint8_t **base_src;
- uint8_t **base_second_src;
- int src;
- int src_stride;
-} BLOCK;
-
typedef struct {
int count;
struct {
struct macroblock_plane {
DECLARE_ALIGNED(16, int16_t, src_diff[64*64]);
DECLARE_ALIGNED(16, int16_t, coeff[64*64]);
+ struct buf_2d src;
// Quantizer setings
int16_t *quant;
typedef struct macroblock MACROBLOCK;
struct macroblock {
struct macroblock_plane plane[MAX_MB_PLANE];
- int skip_block;
- // 16 Y blocks, 4 U blocks, 4 V blocks,
- BLOCK block[24];
-
- YV12_BUFFER_CONFIG src;
MACROBLOCKD e_mbd;
+ int skip_block;
PARTITION_INFO *partition_info; /* work pointer */
PARTITION_INFO *pi; /* Corresponds to upper left visible macroblock */
PARTITION_INFO *pip; /* Base of allocated array */
* lambda using a non-linear combination (e.g., the smallest, or second
* smallest, etc.).
*/
- act = vp9_variance16x16(x->src.y_buffer, x->src.y_stride, VP9_VAR_OFFS, 0,
- &sse);
+ act = vp9_variance16x16(x->plane[0].src.buf, x->plane[0].src.stride,
+ VP9_VAR_OFFS, 0, &sse);
act <<= 4;
/* If the region is flat, lower the activity some more. */
x->mb_activity_ptr++;
// adjust to the next column of source macroblocks
- x->src.y_buffer += 16;
+ x->plane[0].src.buf += 16;
}
// adjust to the next row of mbs
- x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols;
+ x->plane[0].src.buf += 16 * x->plane[0].src.stride - 16 * cm->mb_cols;
#if ALT_ACT_MEASURE
// extend the recon for intra prediction
return seg_id;
}
+void vp9_setup_src_planes(MACROBLOCK *x,
+ const YV12_BUFFER_CONFIG *src,
+ int mb_row, int mb_col) {
+ setup_pred_plane(&x->plane[0].src,
+ src->y_buffer, src->y_stride,
+ mb_row, mb_col, NULL,
+ x->e_mbd.plane[0].subsampling_x,
+ x->e_mbd.plane[0].subsampling_y);
+ setup_pred_plane(&x->plane[1].src,
+ src->u_buffer, src->uv_stride,
+ mb_row, mb_col, NULL,
+ x->e_mbd.plane[1].subsampling_x,
+ x->e_mbd.plane[1].subsampling_y);
+ setup_pred_plane(&x->plane[2].src,
+ src->v_buffer, src->uv_stride,
+ mb_row, mb_col, NULL,
+ x->e_mbd.plane[2].subsampling_x,
+ x->e_mbd.plane[2].subsampling_y);
+}
+
static void set_offsets(VP9_COMP *cpi,
int mb_row, int mb_col, BLOCK_SIZE_TYPE bsize) {
MACROBLOCK *const x = &cpi->mb;
set_mb_row_col(cm, xd, mb_row, bh, mb_col, bw);
/* set up source buffers */
- setup_pred_block(&x->src, cpi->Source, mb_row, mb_col, NULL, NULL);
+ vp9_setup_src_planes(x, cpi->Source, mb_row, mb_col);
/* R/D setup */
x->rddiv = cpi->RDDIV;
vp9_init_mbmode_probs(cm);
// Copy data over into macro block data structures.
- x->src = *cpi->Source;
+ vp9_setup_src_planes(x, cpi->Source, 0, 0);
// TODO(jkoleszar): are these initializations required?
setup_pre_planes(xd, &cm->yv12_fb[cm->ref_frame_map[cpi->lst_fb_idx]], NULL,
}
void vp9_build_block_offsets(MACROBLOCK *x) {
- int block = 0;
- int br, bc;
-
vp9_build_block_doffsets(&x->e_mbd);
-
- for (br = 0; br < 4; br++) {
- for (bc = 0; bc < 4; bc++) {
- BLOCK *this_block = &x->block[block];
- // this_block->base_src = &x->src.y_buffer;
- // this_block->src_stride = x->src.y_stride;
- // this_block->src = 4 * br * this_block->src_stride + 4 * bc;
- this_block->base_src = &x->src.y_buffer;
- this_block->src_stride = x->src.y_stride;
- this_block->src = 4 * br * this_block->src_stride + 4 * bc;
- ++block;
- }
- }
-
- // u blocks
- for (br = 0; br < 2; br++) {
- for (bc = 0; bc < 2; bc++) {
- BLOCK *this_block = &x->block[block];
- this_block->base_src = &x->src.u_buffer;
- this_block->src_stride = x->src.uv_stride;
- this_block->src = 4 * br * this_block->src_stride + 4 * bc;
- ++block;
- }
- }
-
- // v blocks
- for (br = 0; br < 2; br++) {
- for (bc = 0; bc < 2; bc++) {
- BLOCK *this_block = &x->block[block];
- this_block->base_src = &x->src.v_buffer;
- this_block->src_stride = x->src.uv_stride;
- this_block->src = 4 * br * this_block->src_stride + 4 * bc;
- ++block;
- }
- }
}
static void sum_intra_stats(VP9_COMP *cpi, MACROBLOCK *x) {
#define VP9_ENCODER_VP9_ENCODEFRAME_H_
struct macroblock;
+struct yv12_buffer_config;
void vp9_build_block_offsets(struct macroblock *x);
+void vp9_setup_src_planes(struct macroblock *x,
+ const struct yv12_buffer_config *src,
+ int mb_row, int mb_col);
+
#endif // VP9_ENCODER_VP9_ENCODEFRAME_H_
static void encode_intra4x4block(MACROBLOCK *x, int ib) {
BLOCKD *b = &x->e_mbd.block[ib];
- BLOCK *be = &x->block[ib];
MACROBLOCKD * const xd = &x->e_mbd;
TX_TYPE tx_type;
+ uint8_t* const src =
+ raster_block_offset_uint8(xd, BLOCK_SIZE_MB16X16, 0, ib,
+ x->plane[0].src.buf, x->plane[0].src.stride);
int16_t* const src_diff =
raster_block_offset_int16(xd, BLOCK_SIZE_MB16X16, 0, ib,
x->plane[0].src_diff);
vp9_intra4x4_predict(&x->e_mbd, b, b->bmi.as_mode.first,
*(b->base_dst) + b->dst, b->dst_stride);
vp9_subtract_block(4, 4, src_diff, 16,
- *(be->base_src) + be->src, be->src_stride,
+ src, x->plane[0].src.stride,
*(b->base_dst) + b->dst, b->dst_stride);
tx_type = get_tx_type_4x4(&x->e_mbd, ib);
void vp9_encode_intra8x8(MACROBLOCK *x, int ib) {
MACROBLOCKD *xd = &x->e_mbd;
BLOCKD *b = &xd->block[ib];
- BLOCK *be = &x->block[ib];
+ uint8_t* const src =
+ raster_block_offset_uint8(xd, BLOCK_SIZE_MB16X16, 0, ib,
+ x->plane[0].src.buf, x->plane[0].src.stride);
int16_t* const src_diff =
raster_block_offset_int16(xd, BLOCK_SIZE_MB16X16, 0, ib,
x->plane[0].src_diff);
*(b->base_dst) + b->dst, b->dst_stride);
// generate residual blocks
vp9_subtract_block(8, 8, src_diff, 16,
- *(be->base_src) + be->src, be->src_stride,
+ src, x->plane[0].src.stride,
*(b->base_dst) + b->dst, b->dst_stride);
if (xd->mode_info_context->mbmi.txfm_size == TX_8X8) {
assert(idx < 16);
b = &xd->block[ib + iblock[i]];
- be = &x->block[ib + iblock[i]];
tx_type = get_tx_type_4x4(xd, ib + iblock[i]);
if (tx_type != DCT_DCT) {
vp9_short_fht4x4(src_diff, coeff, 16, tx_type);
static void encode_intra_uv4x4(MACROBLOCK *x, int ib, int mode) {
MACROBLOCKD * const xd = &x->e_mbd;
BLOCKD *b = &x->e_mbd.block[ib];
- BLOCK *be = &x->block[ib];
int16_t * const dqcoeff = MB_SUBBLOCK_FIELD(xd, dqcoeff, ib);
int16_t* const coeff = MB_SUBBLOCK_FIELD(x, coeff, ib);
const int plane = ib < 20 ? 1 : 2;
const int block = ib < 20 ? ib - 16 : ib - 20;
+ uint8_t* const src =
+ raster_block_offset_uint8(xd, BLOCK_SIZE_MB16X16, plane, block,
+ x->plane[plane].src.buf,
+ x->plane[plane].src.stride);
int16_t* const src_diff =
raster_block_offset_int16(xd, BLOCK_SIZE_MB16X16, plane, block,
x->plane[plane].src_diff);
assert(xd->plane[1].subsampling_x == 1);
vp9_subtract_block(4, 4, src_diff, 8,
- *(be->base_src) + be->src, be->src_stride,
+ src, x->plane[plane].src.stride,
*(b->base_dst) + b->dst, b->dst_stride);
x->fwd_txm4x4(src_diff, coeff, 16);
const MACROBLOCKD * const xd = &x->e_mbd;
const int bw = 4 << (b_width_log2(bsize) - xd->plane[plane].subsampling_x);
const int bh = 4 << (b_height_log2(bsize) - xd->plane[plane].subsampling_y);
- const uint8_t *src = plane == 0 ? x->src.y_buffer :
- plane == 1 ? x->src.u_buffer : x->src.v_buffer;
- const int src_stride = plane == 0 ? x->src.y_stride : x->src.uv_stride;
+ const uint8_t *src = x->plane[plane].src.buf;
+ const int src_stride = x->plane[plane].src.stride;
assert(plane < 3);
vp9_subtract_block(bh, bw,
static void zz_motion_search(VP9_COMP *cpi, MACROBLOCK *x, YV12_BUFFER_CONFIG *recon_buffer, int *best_motion_err, int recon_yoffset) {
MACROBLOCKD *const xd = &x->e_mbd;
- BLOCK *b = &x->block[0];
BLOCKD *d = &x->e_mbd.block[0];
- uint8_t *src_ptr = (*(b->base_src) + b->src);
- int src_stride = b->src_stride;
+ uint8_t *src_ptr = x->plane[0].src.buf;
+ int src_stride = x->plane[0].src.stride;
uint8_t *ref_ptr;
int ref_stride = d->pre_stride;
YV12_BUFFER_CONFIG *recon_buffer,
int *best_motion_err, int recon_yoffset) {
MACROBLOCKD *const xd = &x->e_mbd;
- BLOCK *b = &x->block[0];
BLOCKD *d = &x->e_mbd.block[0];
int num00;
tmp_mv.as_int = 0;
ref_mv_full.as_mv.col = ref_mv->as_mv.col >> 3;
ref_mv_full.as_mv.row = ref_mv->as_mv.row >> 3;
- tmp_err = cpi->diamond_search_sad(x, b, d, &ref_mv_full, &tmp_mv, step_param,
+ tmp_err = cpi->diamond_search_sad(x, d, &ref_mv_full, &tmp_mv, step_param,
x->sadperbit16, &num00, &v_fn_ptr,
x->nmvjointcost,
x->mvcost, ref_mv);
if (num00)
num00--;
else {
- tmp_err = cpi->diamond_search_sad(x, b, d, &ref_mv_full, &tmp_mv,
+ tmp_err = cpi->diamond_search_sad(x, d, &ref_mv_full, &tmp_mv,
step_param + n, x->sadperbit16,
&num00, &v_fn_ptr,
x->nmvjointcost,
vp9_clear_system_state(); // __asm emms;
- x->src = * cpi->Source;
+ vp9_setup_src_planes(x, cpi->Source, 0, 0);
setup_pre_planes(xd, lst_yv12, NULL, 0, 0, NULL, NULL);
setup_dst_planes(xd, new_yv12, 0, 0);
coded_error += (int64_t)this_error;
// adjust to the next column of macroblocks
- x->src.y_buffer += 16;
- x->src.u_buffer += 8;
- x->src.v_buffer += 8;
+ x->plane[0].src.buf += 16;
+ x->plane[1].src.buf += 8;
+ x->plane[2].src.buf += 8;
recon_yoffset += 16;
recon_uvoffset += 8;
}
// adjust to the next row of mbs
- x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols;
- x->src.u_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
- x->src.v_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
+ x->plane[0].src.buf += 16 * x->plane[0].src.stride - 16 * cm->mb_cols;
+ x->plane[1].src.buf += 8 * x->plane[1].src.stride - 8 * cm->mb_cols;
+ x->plane[2].src.buf += 8 * x->plane[1].src.stride - 8 * cm->mb_cols;
// extend the recon for intra prediction
vp9_extend_mb_row(new_yv12, xd->plane[0].dst.buf + 16,
int mb_col) {
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
- BLOCK *b = &x->block[0];
BLOCKD *d = &xd->block[0];
vp9_variance_fn_ptr_t v_fn_ptr = cpi->fn_ptr[BLOCK_16X16];
unsigned int best_err;
/*cpi->sf.search_method == HEX*/
best_err = vp9_hex_search(
- x, b, d,
+ x, d,
&ref_full, dst_mv,
step_param,
x->errorperbit,
int distortion;
unsigned int sse;
best_err = cpi->find_fractional_mv_step(
- x, b, d,
+ x, d,
dst_mv, ref_mv,
x->errorperbit, &v_fn_ptr,
NULL, NULL,
vp9_set_mbmode_and_mvs(x, NEWMV, dst_mv);
vp9_build_inter_predictors_sby(xd, mb_row, mb_col, BLOCK_SIZE_MB16X16);
- best_err = vp9_sad16x16(x->src.y_buffer, x->src.y_stride,
+ best_err = vp9_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride,
xd->plane[0].dst.buf, xd->plane[0].dst.stride,
INT_MAX);
for (n = 0; n < 16; n++) {
BLOCKD *d = &xd->block[n];
- BLOCK *b = &x->block[n];
-
- b->base_src = &x->src.y_buffer;
- b->src_stride = x->src.y_stride;
- b->src = x->src.y_stride * (n & 12) + (n & 3) * 4;
d->base_pre = &xd->plane[0].pre[0].buf;
d->pre_stride = xd->plane[0].pre[0].stride;
// Try zero MV first
// FIXME should really use something like near/nearest MV and/or MV prediction
- err = vp9_sad16x16(x->src.y_buffer, x->src.y_stride,
+ err = vp9_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride,
xd->plane[0].pre[0].buf, xd->plane[0].pre[0].stride,
INT_MAX);
dst_mv->as_int = 0;
// Try zero MV first
// FIXME should really use something like near/nearest MV and/or MV prediction
- err = vp9_sad16x16(x->src.y_buffer, x->src.y_stride,
+ err = vp9_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride,
xd->plane[0].pre[0].buf, xd->plane[0].pre[0].stride,
INT_MAX);
const int bhl = b_height_log2(BLOCK_SIZE_MB16X16), bh = 4 << bhl;
xd->mode_info_context->mbmi.mode = mode;
- vp9_build_intra_predictors(x->src.y_buffer, x->src.y_stride,
+ vp9_build_intra_predictors(x->plane[0].src.buf, x->plane[0].src.stride,
xd->plane[0].dst.buf, xd->plane[0].dst.stride,
xd->mode_info_context->mbmi.mode,
bw, bh,
xd->up_available, xd->left_available,
xd->right_available);
- err = vp9_sad16x16(x->src.y_buffer, x->src.y_stride,
+ err = vp9_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride,
xd->plane[0].dst.buf, xd->plane[0].dst.stride, best_err);
// find best
VP9_COMMON *cm = &cpi->common;
// FIXME in practice we're completely ignoring chroma here
- x->src.y_buffer = buf->y_buffer + mb_y_offset;
- x->src.y_stride = buf->y_stride;
+ x->plane[0].src.buf = buf->y_buffer + mb_y_offset;
+ x->plane[0].src.stride = buf->y_stride;
xd->plane[0].dst.buf = cm->yv12_fb[cm->new_fb_idx].y_buffer + mb_y_offset;
xd->plane[0].dst.stride = cm->yv12_fb[cm->new_fb_idx].y_stride;
/* returns subpixel variance error function */
#define DIST(r, c) \
- vfp->svf(PRE(r, c), y_stride, SP(c), SP(r), z, b->src_stride, &sse)
+ vfp->svf(PRE(r, c), y_stride, SP(c), SP(r), z, src_stride, &sse)
/* checks if (r, c) has better score than previous best */
#define CHECK_BETTER(v, r, c) \
}, \
v = INT_MAX;)
-int vp9_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
+int vp9_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCKD *d,
int_mv *bestmv, int_mv *ref_mv,
int error_per_bit,
const vp9_variance_fn_ptr_t *vfp,
int *mvjcost, int *mvcost[2],
int *distortion,
unsigned int *sse1) {
- uint8_t *z = (*(b->base_src) + b->src);
+ uint8_t *z = x->plane[0].src.buf;
+ int src_stride = x->plane[0].src.stride;
MACROBLOCKD *xd = &x->e_mbd;
int rr, rc, br, bc, hstep;
bestmv->as_mv.col <<= 3;
// calculate central point error
- besterr = vfp->vf(y, y_stride, z, b->src_stride, sse1);
+ besterr = vfp->vf(y, y_stride, z, src_stride, sse1);
*distortion = besterr;
besterr += mv_err_cost(bestmv, ref_mv, mvjcost, mvcost,
error_per_bit, xd->allow_high_precision_mv);
#undef MIN
#undef MAX
-int vp9_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
+int vp9_find_best_sub_pixel_step(MACROBLOCK *x, BLOCKD *d,
int_mv *bestmv, int_mv *ref_mv,
int error_per_bit,
const vp9_variance_fn_ptr_t *vfp,
int_mv this_mv;
int_mv orig_mv;
int yrow_movedback = 0, ycol_movedback = 0;
- uint8_t *z = (*(b->base_src) + b->src);
+ uint8_t *z = x->plane[0].src.buf;
+ int src_stride = x->plane[0].src.stride;
int left, right, up, down, diag;
unsigned int sse;
int whichdir;
orig_mv = *bestmv;
// calculate central point error
- bestmse = vfp->vf(y, y_stride, z, b->src_stride, sse1);
+ bestmse = vfp->vf(y, y_stride, z, src_stride, sse1);
*distortion = bestmse;
bestmse += mv_err_cost(bestmv, ref_mv, mvjcost, mvcost, error_per_bit,
xd->allow_high_precision_mv);
// go left then right and check error
this_mv.as_mv.row = startmv.as_mv.row;
this_mv.as_mv.col = ((startmv.as_mv.col - 8) | 4);
- thismse = vfp->svf_halfpix_h(y - 1, y_stride, z, b->src_stride, &sse);
+ thismse = vfp->svf_halfpix_h(y - 1, y_stride, z, src_stride, &sse);
left = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit,
xd->allow_high_precision_mv);
}
this_mv.as_mv.col += 8;
- thismse = vfp->svf_halfpix_h(y, y_stride, z, b->src_stride, &sse);
+ thismse = vfp->svf_halfpix_h(y, y_stride, z, src_stride, &sse);
right = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost,
error_per_bit, xd->allow_high_precision_mv);
// go up then down and check error
this_mv.as_mv.col = startmv.as_mv.col;
this_mv.as_mv.row = ((startmv.as_mv.row - 8) | 4);
- thismse = vfp->svf_halfpix_v(y - y_stride, y_stride, z, b->src_stride, &sse);
+ thismse = vfp->svf_halfpix_v(y - y_stride, y_stride, z, src_stride, &sse);
up = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit,
xd->allow_high_precision_mv);
}
this_mv.as_mv.row += 8;
- thismse = vfp->svf_halfpix_v(y, y_stride, z, b->src_stride, &sse);
+ thismse = vfp->svf_halfpix_v(y, y_stride, z, src_stride, &sse);
down = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit,
xd->allow_high_precision_mv);
case 0:
this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4;
- thismse = vfp->svf_halfpix_hv(y - 1 - y_stride, y_stride, z, b->src_stride, &sse);
+ thismse = vfp->svf_halfpix_hv(y - 1 - y_stride, y_stride, z, src_stride,
+ &sse);
break;
case 1:
this_mv.as_mv.col += 4;
this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4;
- thismse = vfp->svf_halfpix_hv(y - y_stride, y_stride, z, b->src_stride, &sse);
+ thismse = vfp->svf_halfpix_hv(y - y_stride, y_stride, z, src_stride,
+ &sse);
break;
case 2:
this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
this_mv.as_mv.row += 4;
- thismse = vfp->svf_halfpix_hv(y - 1, y_stride, z, b->src_stride, &sse);
+ thismse = vfp->svf_halfpix_hv(y - 1, y_stride, z, src_stride, &sse);
break;
case 3:
default:
this_mv.as_mv.col += 4;
this_mv.as_mv.row += 4;
- thismse = vfp->svf_halfpix_hv(y, y_stride, z, b->src_stride, &sse);
+ thismse = vfp->svf_halfpix_hv(y, y_stride, z, src_stride, &sse);
break;
}
this_mv.as_mv.col = startmv.as_mv.col - 2;
thismse = vfp->svf(y, y_stride,
SP(this_mv.as_mv.col), SP(this_mv.as_mv.row),
- z, b->src_stride, &sse);
+ z, src_stride, &sse);
} else {
this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
thismse = vfp->svf(y - 1, y_stride, SP(6), SP(this_mv.as_mv.row), z,
- b->src_stride, &sse);
+ src_stride, &sse);
}
left = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit,
this_mv.as_mv.col += 4;
thismse = vfp->svf(y, y_stride,
SP(this_mv.as_mv.col), SP(this_mv.as_mv.row),
- z, b->src_stride, &sse);
+ z, src_stride, &sse);
right = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost,
error_per_bit, xd->allow_high_precision_mv);
this_mv.as_mv.row = startmv.as_mv.row - 2;
thismse = vfp->svf(y, y_stride,
SP(this_mv.as_mv.col), SP(this_mv.as_mv.row),
- z, b->src_stride, &sse);
+ z, src_stride, &sse);
} else {
this_mv.as_mv.row = (startmv.as_mv.row - 8) | 6;
thismse = vfp->svf(y - y_stride, y_stride, SP(this_mv.as_mv.col), SP(6),
- z, b->src_stride, &sse);
+ z, src_stride, &sse);
}
up = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit,
this_mv.as_mv.row += 4;
thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row),
- z, b->src_stride, &sse);
+ z, src_stride, &sse);
down = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit,
xd->allow_high_precision_mv);
if (startmv.as_mv.col & 7) {
this_mv.as_mv.col -= 2;
- thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
+ thismse = vfp->svf(y, y_stride,
+ SP(this_mv.as_mv.col), SP(this_mv.as_mv.row),
+ z, src_stride, &sse);
} else {
this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
- thismse = vfp->svf(y - 1, y_stride, SP(6), SP(this_mv.as_mv.row), z, b->src_stride, &sse);;
+ thismse = vfp->svf(y - 1, y_stride,
+ SP(6), SP(this_mv.as_mv.row), z, src_stride, &sse);
}
} else {
this_mv.as_mv.row = (startmv.as_mv.row - 8) | 6;
if (startmv.as_mv.col & 7) {
this_mv.as_mv.col -= 2;
- thismse = vfp->svf(y - y_stride, y_stride, SP(this_mv.as_mv.col), SP(6), z, b->src_stride, &sse);
+ thismse = vfp->svf(y - y_stride, y_stride,
+ SP(this_mv.as_mv.col), SP(6), z, src_stride, &sse);
} else {
this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
- thismse = vfp->svf(y - y_stride - 1, y_stride, SP(6), SP(6), z, b->src_stride, &sse);
+ thismse = vfp->svf(y - y_stride - 1, y_stride,
+ SP(6), SP(6), z, src_stride, &sse);
}
}
if (startmv.as_mv.row & 7) {
this_mv.as_mv.row -= 2;
- thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
+ thismse = vfp->svf(y, y_stride,
+ SP(this_mv.as_mv.col), SP(this_mv.as_mv.row),
+ z, src_stride, &sse);
} else {
this_mv.as_mv.row = (startmv.as_mv.row - 8) | 6;
- thismse = vfp->svf(y - y_stride, y_stride, SP(this_mv.as_mv.col), SP(6), z, b->src_stride, &sse);
+ thismse = vfp->svf(y - y_stride, y_stride,
+ SP(this_mv.as_mv.col), SP(6), z, src_stride, &sse);
}
break;
if (startmv.as_mv.col & 7) {
this_mv.as_mv.col -= 2;
- thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row),
- z, b->src_stride, &sse);
+ thismse = vfp->svf(y, y_stride,
+ SP(this_mv.as_mv.col), SP(this_mv.as_mv.row),
+ z, src_stride, &sse);
} else {
this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
thismse = vfp->svf(y - 1, y_stride, SP(6), SP(this_mv.as_mv.row), z,
- b->src_stride, &sse);
+ src_stride, &sse);
}
break;
this_mv.as_mv.row += 2;
thismse = vfp->svf(y, y_stride,
SP(this_mv.as_mv.col), SP(this_mv.as_mv.row),
- z, b->src_stride, &sse);
+ z, src_stride, &sse);
break;
}
this_mv.as_mv.col = startmv.as_mv.col - 1;
thismse = vfp->svf(y, y_stride,
SP(this_mv.as_mv.col), SP(this_mv.as_mv.row),
- z, b->src_stride, &sse);
+ z, src_stride, &sse);
} else {
this_mv.as_mv.col = (startmv.as_mv.col - 8) | 7;
thismse = vfp->svf(y - 1, y_stride, SP(7), SP(this_mv.as_mv.row),
- z, b->src_stride, &sse);
+ z, src_stride, &sse);
}
left = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit,
this_mv.as_mv.col += 2;
thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row),
- z, b->src_stride, &sse);
+ z, src_stride, &sse);
right = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost,
error_per_bit, xd->allow_high_precision_mv);
if (startmv.as_mv.row & 7) {
this_mv.as_mv.row = startmv.as_mv.row - 1;
- thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
+ thismse = vfp->svf(y, y_stride,
+ SP(this_mv.as_mv.col), SP(this_mv.as_mv.row),
+ z, src_stride, &sse);
} else {
this_mv.as_mv.row = (startmv.as_mv.row - 8) | 7;
- thismse = vfp->svf(y - y_stride, y_stride, SP(this_mv.as_mv.col), SP(7), z, b->src_stride, &sse);
+ thismse = vfp->svf(y - y_stride, y_stride,
+ SP(this_mv.as_mv.col), SP(7), z, src_stride, &sse);
}
up = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit,
}
this_mv.as_mv.row += 2;
- thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
+ thismse = vfp->svf(y, y_stride,
+ SP(this_mv.as_mv.col), SP(this_mv.as_mv.row),
+ z, src_stride, &sse);
down = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit,
xd->allow_high_precision_mv);
if (startmv.as_mv.col & 7) {
this_mv.as_mv.col -= 1;
- thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
+ thismse = vfp->svf(y, y_stride,
+ SP(this_mv.as_mv.col), SP(this_mv.as_mv.row),
+ z, src_stride, &sse);
} else {
this_mv.as_mv.col = (startmv.as_mv.col - 8) | 7;
- thismse = vfp->svf(y - 1, y_stride, SP(7), SP(this_mv.as_mv.row), z, b->src_stride, &sse);;
+ thismse = vfp->svf(y - 1, y_stride,
+ SP(7), SP(this_mv.as_mv.row),
+ z, src_stride, &sse);
}
} else {
this_mv.as_mv.row = (startmv.as_mv.row - 8) | 7;
if (startmv.as_mv.col & 7) {
this_mv.as_mv.col -= 1;
- thismse = vfp->svf(y - y_stride, y_stride, SP(this_mv.as_mv.col), SP(7), z, b->src_stride, &sse);
+ thismse = vfp->svf(y - y_stride, y_stride,
+ SP(this_mv.as_mv.col), SP(7), z, src_stride, &sse);
} else {
this_mv.as_mv.col = (startmv.as_mv.col - 8) | 7;
- thismse = vfp->svf(y - y_stride - 1, y_stride, SP(7), SP(7), z, b->src_stride, &sse);
+ thismse = vfp->svf(y - y_stride - 1, y_stride,
+ SP(7), SP(7), z, src_stride, &sse);
}
}
if (startmv.as_mv.row & 7) {
this_mv.as_mv.row -= 1;
- thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
+ thismse = vfp->svf(y, y_stride,
+ SP(this_mv.as_mv.col), SP(this_mv.as_mv.row),
+ z, src_stride, &sse);
} else {
this_mv.as_mv.row = (startmv.as_mv.row - 8) | 7;
- thismse = vfp->svf(y - y_stride, y_stride, SP(this_mv.as_mv.col), SP(7), z, b->src_stride, &sse);
+ thismse = vfp->svf(y - y_stride, y_stride,
+ SP(this_mv.as_mv.col), SP(7), z, src_stride, &sse);
}
break;
if (startmv.as_mv.col & 7) {
this_mv.as_mv.col -= 1;
- thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
+ thismse = vfp->svf(y, y_stride,
+ SP(this_mv.as_mv.col), SP(this_mv.as_mv.row),
+ z, src_stride, &sse);
} else {
this_mv.as_mv.col = (startmv.as_mv.col - 8) | 7;
- thismse = vfp->svf(y - 1, y_stride, SP(7), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
+ thismse = vfp->svf(y - 1, y_stride,
+ SP(7), SP(this_mv.as_mv.row), z, src_stride, &sse);
}
break;
case 3:
this_mv.as_mv.col += 1;
this_mv.as_mv.row += 1;
- thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
+ thismse = vfp->svf(y, y_stride,
+ SP(this_mv.as_mv.col), SP(this_mv.as_mv.row),
+ z, src_stride, &sse);
break;
}
#undef SP
-int vp9_find_best_half_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
+int vp9_find_best_half_pixel_step(MACROBLOCK *x, BLOCKD *d,
int_mv *bestmv, int_mv *ref_mv,
int error_per_bit,
const vp9_variance_fn_ptr_t *vfp,
int bestmse = INT_MAX;
int_mv startmv;
int_mv this_mv;
- uint8_t *z = (*(b->base_src) + b->src);
+ uint8_t *z = x->plane[0].src.buf;
+ int src_stride = x->plane[0].src.stride;
int left, right, up, down, diag;
unsigned int sse;
int whichdir;
startmv = *bestmv;
// calculate central point error
- bestmse = vfp->vf(y, y_stride, z, b->src_stride, sse1);
+ bestmse = vfp->vf(y, y_stride, z, src_stride, sse1);
*distortion = bestmse;
bestmse += mv_err_cost(bestmv, ref_mv, mvjcost, mvcost, error_per_bit,
xd->allow_high_precision_mv);
// go left then right and check error
this_mv.as_mv.row = startmv.as_mv.row;
this_mv.as_mv.col = ((startmv.as_mv.col - 8) | 4);
- thismse = vfp->svf_halfpix_h(y - 1, y_stride, z, b->src_stride, &sse);
+ thismse = vfp->svf_halfpix_h(y - 1, y_stride, z, src_stride, &sse);
left = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit,
xd->allow_high_precision_mv);
}
this_mv.as_mv.col += 8;
- thismse = vfp->svf_halfpix_h(y, y_stride, z, b->src_stride, &sse);
+ thismse = vfp->svf_halfpix_h(y, y_stride, z, src_stride, &sse);
right = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost,
error_per_bit, xd->allow_high_precision_mv);
// go up then down and check error
this_mv.as_mv.col = startmv.as_mv.col;
this_mv.as_mv.row = ((startmv.as_mv.row - 8) | 4);
- thismse = vfp->svf_halfpix_v(y - y_stride, y_stride, z, b->src_stride, &sse);
+ thismse = vfp->svf_halfpix_v(y - y_stride, y_stride, z, src_stride, &sse);
up = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit,
xd->allow_high_precision_mv);
}
this_mv.as_mv.row += 8;
- thismse = vfp->svf_halfpix_v(y, y_stride, z, b->src_stride, &sse);
+ thismse = vfp->svf_halfpix_v(y, y_stride, z, src_stride, &sse);
down = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit,
xd->allow_high_precision_mv);
case 0:
this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4;
- thismse = vfp->svf_halfpix_hv(y - 1 - y_stride, y_stride, z, b->src_stride, &sse);
+ thismse = vfp->svf_halfpix_hv(y - 1 - y_stride, y_stride,
+ z, src_stride, &sse);
break;
case 1:
this_mv.as_mv.col += 4;
this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4;
- thismse = vfp->svf_halfpix_hv(y - y_stride, y_stride, z, b->src_stride, &sse);
+ thismse = vfp->svf_halfpix_hv(y - y_stride, y_stride,
+ z, src_stride, &sse);
break;
case 2:
this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
this_mv.as_mv.row += 4;
- thismse = vfp->svf_halfpix_hv(y - 1, y_stride, z, b->src_stride, &sse);
+ thismse = vfp->svf_halfpix_hv(y - 1, y_stride, z, src_stride, &sse);
break;
case 3:
default:
this_mv.as_mv.col += 4;
this_mv.as_mv.row += 4;
- thismse = vfp->svf_halfpix_hv(y, y_stride, z, b->src_stride, &sse);
+ thismse = vfp->svf_halfpix_hv(y, y_stride, z, src_stride, &sse);
break;
}
int vp9_hex_search
(
MACROBLOCK *x,
- BLOCK *b,
BLOCKD *d,
int_mv *ref_mv,
int_mv *best_mv,
MV neighbors[4] = {{0, -1}, { -1, 0}, {1, 0}, {0, 1}};
int i, j;
- uint8_t *what = (*(b->base_src) + b->src);
- int what_stride = b->src_stride;
+ uint8_t *what = x->plane[0].src.buf;
+ int what_stride = x->plane[0].src.stride;
int in_what_stride = d->pre_stride;
int br, bc;
int_mv this_mv;
#undef CHECK_POINT
#undef CHECK_BETTER
-int vp9_diamond_search_sad_c(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
+int vp9_diamond_search_sad_c(MACROBLOCK *x, BLOCKD *d,
int_mv *ref_mv, int_mv *best_mv,
int search_param, int sad_per_bit, int *num00,
vp9_variance_fn_ptr_t *fn_ptr, int *mvjcost,
int *mvcost[2], int_mv *center_mv) {
int i, j, step;
- uint8_t *what = (*(b->base_src) + b->src);
- int what_stride = b->src_stride;
+ uint8_t *what = x->plane[0].src.buf;
+ int what_stride = x->plane[0].src.stride;
uint8_t *in_what;
int in_what_stride = d->pre_stride;
uint8_t *best_address;
xd->allow_high_precision_mv);
}
-int vp9_diamond_search_sadx4(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
+int vp9_diamond_search_sadx4(MACROBLOCK *x, BLOCKD *d,
int_mv *ref_mv, int_mv *best_mv, int search_param,
int sad_per_bit, int *num00,
vp9_variance_fn_ptr_t *fn_ptr,
int *mvjcost, int *mvcost[2], int_mv *center_mv) {
int i, j, step;
- uint8_t *what = (*(b->base_src) + b->src);
- int what_stride = b->src_stride;
+ uint8_t *what = x->plane[0].src.buf;
+ int what_stride = x->plane[0].src.stride;
uint8_t *in_what;
int in_what_stride = d->pre_stride;
uint8_t *best_address;
/* do_refine: If last step (1-away) of n-step search doesn't pick the center
point as the best match, we will do a final 1-away diamond
refining search */
-int vp9_full_pixel_diamond(VP9_COMP *cpi, MACROBLOCK *x, BLOCK *b,
+int vp9_full_pixel_diamond(VP9_COMP *cpi, MACROBLOCK *x,
BLOCKD *d, int_mv *mvp_full, int step_param,
int sadpb, int further_steps,
int do_refine, vp9_variance_fn_ptr_t *fn_ptr,
int_mv *ref_mv, int_mv *dst_mv) {
int_mv temp_mv;
int thissme, n, num00;
- int bestsme = cpi->diamond_search_sad(x, b, d, mvp_full, &temp_mv,
+ int bestsme = cpi->diamond_search_sad(x, d, mvp_full, &temp_mv,
step_param, sadpb, &num00,
fn_ptr, x->nmvjointcost,
x->mvcost, ref_mv);
if (num00)
num00--;
else {
- thissme = cpi->diamond_search_sad(x, b, d, mvp_full, &temp_mv,
+ thissme = cpi->diamond_search_sad(x, d, mvp_full, &temp_mv,
step_param + n, sadpb, &num00,
fn_ptr, x->nmvjointcost, x->mvcost,
ref_mv);
int search_range = 8;
int_mv best_mv;
best_mv.as_int = dst_mv->as_int;
- thissme = cpi->refining_search_sad(x, b, d, &best_mv, sadpb, search_range,
+ thissme = cpi->refining_search_sad(x, d, &best_mv, sadpb, search_range,
fn_ptr, x->nmvjointcost, x->mvcost,
ref_mv);
return bestsme;
}
-int vp9_full_search_sad_c(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
+int vp9_full_search_sad_c(MACROBLOCK *x, BLOCKD *d, int_mv *ref_mv,
int sad_per_bit, int distance,
vp9_variance_fn_ptr_t *fn_ptr, int *mvjcost,
int *mvcost[2],
int_mv *center_mv) {
- uint8_t *what = (*(b->base_src) + b->src);
- int what_stride = b->src_stride;
+ uint8_t *what = x->plane[0].src.buf;
+ int what_stride = x->plane[0].src.stride;
uint8_t *in_what;
int in_what_stride = d->pre_stride;
int mv_stride = d->pre_stride;
return INT_MAX;
}
-int vp9_full_search_sadx3(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
+int vp9_full_search_sadx3(MACROBLOCK *x, BLOCKD *d, int_mv *ref_mv,
int sad_per_bit, int distance,
vp9_variance_fn_ptr_t *fn_ptr, int *mvjcost,
int *mvcost[2], int_mv *center_mv) {
- uint8_t *what = (*(b->base_src) + b->src);
- int what_stride = b->src_stride;
+ uint8_t *what = x->plane[0].src.buf;
+ int what_stride = x->plane[0].src.stride;
uint8_t *in_what;
int in_what_stride = d->pre_stride;
int mv_stride = d->pre_stride;
return INT_MAX;
}
-int vp9_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
+int vp9_full_search_sadx8(MACROBLOCK *x, BLOCKD *d, int_mv *ref_mv,
int sad_per_bit, int distance,
vp9_variance_fn_ptr_t *fn_ptr,
int *mvjcost, int *mvcost[2],
int_mv *center_mv) {
- uint8_t *what = (*(b->base_src) + b->src);
- int what_stride = b->src_stride;
+ uint8_t *what = x->plane[0].src.buf;
+ int what_stride = x->plane[0].src.stride;
uint8_t *in_what;
int in_what_stride = d->pre_stride;
int mv_stride = d->pre_stride;
else
return INT_MAX;
}
-int vp9_refining_search_sad_c(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
+int vp9_refining_search_sad_c(MACROBLOCK *x, BLOCKD *d,
int_mv *ref_mv, int error_per_bit,
int search_range, vp9_variance_fn_ptr_t *fn_ptr,
int *mvjcost, int *mvcost[2], int_mv *center_mv) {
int i, j;
int this_row_offset, this_col_offset;
- int what_stride = b->src_stride;
+ int what_stride = x->plane[0].src.stride;
int in_what_stride = d->pre_stride;
- uint8_t *what = (*(b->base_src) + b->src);
+ uint8_t *what = x->plane[0].src.buf;
uint8_t *best_address = (uint8_t *)(*(d->base_pre) + d->pre +
(ref_mv->as_mv.row * (d->pre_stride)) +
ref_mv->as_mv.col);
return INT_MAX;
}
-int vp9_refining_search_sadx4(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
+int vp9_refining_search_sadx4(MACROBLOCK *x, BLOCKD *d,
int_mv *ref_mv, int error_per_bit,
int search_range, vp9_variance_fn_ptr_t *fn_ptr,
int *mvjcost, int *mvcost[2], int_mv *center_mv) {
int i, j;
int this_row_offset, this_col_offset;
- int what_stride = b->src_stride;
+ int what_stride = x->plane[0].src.stride;
int in_what_stride = d->pre_stride;
- uint8_t *what = (*(b->base_src) + b->src);
+ uint8_t *what = x->plane[0].src.buf;
uint8_t *best_address = (uint8_t *)(*(d->base_pre) + d->pre +
(ref_mv->as_mv.row * (d->pre_stride)) +
ref_mv->as_mv.col);
// Runs sequence of diamond searches in smaller steps for RD
struct VP9_COMP;
-int vp9_full_pixel_diamond(struct VP9_COMP *cpi, MACROBLOCK *x, BLOCK *b,
+int vp9_full_pixel_diamond(struct VP9_COMP *cpi, MACROBLOCK *x,
BLOCKD *d, int_mv *mvp_full, int step_param,
int sadpb, int further_steps, int do_refine,
vp9_variance_fn_ptr_t *fn_ptr,
int_mv *ref_mv, int_mv *dst_mv);
-int vp9_hex_search(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
+int vp9_hex_search(MACROBLOCK *x, BLOCKD *d,
int_mv *ref_mv, int_mv *best_mv,
int search_param, int error_per_bit,
const vp9_variance_fn_ptr_t *vf,
int *mvjcost, int *mvcost[2],
int_mv *center_mv);
-typedef int (fractional_mv_step_fp) (MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv
+typedef int (fractional_mv_step_fp) (MACROBLOCK *x, BLOCKD *d, int_mv
*bestmv, int_mv *ref_mv, int error_per_bit, const vp9_variance_fn_ptr_t *vfp,
int *mvjcost, int *mvcost[2], int *distortion, unsigned int *sse);
extern fractional_mv_step_fp vp9_find_best_sub_pixel_step_iteratively;
extern fractional_mv_step_fp vp9_find_best_sub_pixel_step;
extern fractional_mv_step_fp vp9_find_best_half_pixel_step;
-typedef int (*vp9_full_search_fn_t)(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
+typedef int (*vp9_full_search_fn_t)(MACROBLOCK *x, BLOCKD *d,
int_mv *ref_mv, int sad_per_bit,
int distance, vp9_variance_fn_ptr_t *fn_ptr,
int *mvjcost, int *mvcost[2],
int_mv *center_mv);
-typedef int (*vp9_refining_search_fn_t)(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
+typedef int (*vp9_refining_search_fn_t)(MACROBLOCK *x, BLOCKD *d,
int_mv *ref_mv, int sad_per_bit,
int distance,
vp9_variance_fn_ptr_t *fn_ptr,
int *mvjcost, int *mvcost[2],
int_mv *center_mv);
-typedef int (*vp9_diamond_search_fn_t)(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
+typedef int (*vp9_diamond_search_fn_t)(MACROBLOCK *x, BLOCKD *d,
int_mv *ref_mv, int_mv *best_mv,
int search_param, int sad_per_bit,
int *num00,
int rate = 0;
int distortion;
VP9_COMMON *const cm = &cpi->common;
- BLOCK *be = x->block + ib;
BLOCKD *b = xd->block + ib;
+ const int src_stride = x->plane[0].src.stride;
+ uint8_t* const src =
+ raster_block_offset_uint8(xd, BLOCK_SIZE_MB16X16, 0, ib,
+ x->plane[0].src.buf, src_stride);
int16_t* const src_diff =
raster_block_offset_int16(xd, BLOCK_SIZE_MB16X16, 0, ib,
x->plane[0].src_diff);
vp9_intra4x4_predict(xd, b, mode, *(b->base_dst) + b->dst, b->dst_stride);
vp9_subtract_block(4, 4, src_diff, 16,
- *(be->base_src) + be->src, be->src_stride,
+ src, src_stride,
*(b->base_dst) + b->dst, b->dst_stride);
b->bmi.as_mode.first = mode;
- tx_type = get_tx_type_4x4(xd, be - x->block);
+ tx_type = get_tx_type_4x4(xd, ib);
if (tx_type != DCT_DCT) {
vp9_short_fht4x4(src_diff, coeff, 16, tx_type);
- vp9_ht_quantize_b_4x4(x, be - x->block, tx_type);
+ vp9_ht_quantize_b_4x4(x, ib, tx_type);
} else {
x->fwd_txm4x4(src_diff, coeff, 32);
- x->quantize_b_4x4(x, be - x->block, 16);
+ x->quantize_b_4x4(x, ib, 16);
}
tempa = ta;
MACROBLOCKD *xd = &x->e_mbd;
int64_t best_rd = INT64_MAX;
int distortion = 0, rate = 0;
- BLOCK *be = x->block + ib;
BLOCKD *b = xd->block + ib;
ENTROPY_CONTEXT_PLANES ta, tl;
ENTROPY_CONTEXT *ta0, *ta1, besta0 = 0, besta1 = 0;
// perform transformation of dimension 8x8
// note the input and output index mapping
int idx = (ib & 0x02) ? (ib + 2) : ib;
+ const int src_stride = x->plane[0].src.stride;
+ uint8_t* const src =
+ raster_block_offset_uint8(xd, BLOCK_SIZE_MB16X16, 0, ib,
+ x->plane[0].src.buf, src_stride);
int16_t* const src_diff =
raster_block_offset_int16(xd, BLOCK_SIZE_MB16X16, 0, ib,
x->plane[0].src_diff);
vp9_intra8x8_predict(xd, b, mode, *(b->base_dst) + b->dst, b->dst_stride);
vp9_subtract_block(8, 8, src_diff, 16,
- *(be->base_src) + be->src, be->src_stride,
+ src, src_stride,
*(b->base_dst) + b->dst, b->dst_stride);
if (xd->mode_info_context->mbmi.txfm_size == TX_8X8) {
ib + iblock[i], 16);
int do_two = 0;
b = &xd->block[ib + iblock[i]];
- be = &x->block[ib + iblock[i]];
tx_type = get_tx_type_4x4(xd, ib + iblock[i]);
if (tx_type != DCT_DCT) {
vp9_short_fht4x4(src_diff, coeff, 16, tx_type);
}
}
b = &xd->block[ib];
- be = &x->block[ib];
rate += rate_t;
}
for (i = 0; i < 16; i++) {
if (labels[i] == which_label) {
BLOCKD *bd = &x->e_mbd.block[i];
- BLOCK *be = &x->block[i];
+ const int src_stride = x->plane[0].src.stride;
+ uint8_t* const src =
+ raster_block_offset_uint8(xd, BLOCK_SIZE_MB16X16, 0, i,
+ x->plane[0].src.buf, src_stride);
int16_t* const src_diff =
raster_block_offset_int16(xd, BLOCK_SIZE_MB16X16, 0, i,
x->plane[0].src_diff);
}
vp9_subtract_block(4, 4, src_diff, 16,
- *(be->base_src) + be->src, be->src_stride,
+ src, src_stride,
*(bd->base_dst) + bd->dst, bd->dst_stride);
x->fwd_txm4x4(src_diff, coeff, 32);
x->quantize_b_4x4(x, i, 16);
int which_mv;
const int idx = (ib & 8) + ((ib & 2) << 1);
BLOCKD *bd = &xd->block[ib];
- BLOCK *be = &x->block[ib];
+ const int src_stride = x->plane[0].src.stride;
+ uint8_t* const src =
+ raster_block_offset_uint8(xd, BLOCK_SIZE_MB16X16, 0, ib,
+ x->plane[0].src.buf, src_stride);
int16_t* const src_diff =
raster_block_offset_int16(xd, BLOCK_SIZE_MB16X16, 0, ib,
x->plane[0].src_diff);
}
vp9_subtract_block(8, 8, src_diff, 16,
- *(be->base_src) + be->src, be->src_stride,
+ src, src_stride,
*(bd->base_dst) + bd->dst, bd->dst_stride);
if (xd->mode_info_context->mbmi.txfm_size == TX_4X4) {
int16_t* const coeff = BLOCK_OFFSET(x->plane[0].coeff,
ib + iblock[j], 16);
bd = &xd->block[ib + iblock[j]];
- be = &x->block[ib + iblock[j]];
x->fwd_txm8x4(src_diff, coeff, 32);
x->quantize_b_4x4_pair(x, ib + iblock[j], ib + iblock[j] + 1, 16);
thisdistortion = vp9_block_error_c(coeff,
int step_param = 0;
int further_steps;
int thissme, bestsme = INT_MAX;
- BLOCK *c;
BLOCKD *e;
+ const struct buf_2d orig_src = x->plane[0].src;
/* Is the best so far sufficiently good that we cant justify doing
* and new motion search. */
// find first label
n = vp9_mbsplit_offset[segmentation][i];
- c = &x->block[n];
+ // adjust src pointer for this segment
+ x->plane[0].src.buf =
+ raster_block_offset_uint8(&x->e_mbd, BLOCK_SIZE_MB16X16, 0, n,
+ x->plane[0].src.buf,
+ x->plane[0].src.stride);
e = &x->e_mbd.block[n];
- bestsme = vp9_full_pixel_diamond(cpi, x, c, e, &mvp_full, step_param,
+ bestsme = vp9_full_pixel_diamond(cpi, x, e, &mvp_full, step_param,
sadpb, further_steps, 0, v_fn_ptr,
bsi->ref_mv, &mode_mv[NEW4X4]);
clamp_mv(&mvp_full, x->mv_col_min, x->mv_col_max,
x->mv_row_min, x->mv_row_max);
- thissme = cpi->full_search_sad(x, c, e, &mvp_full,
+ thissme = cpi->full_search_sad(x, e, &mvp_full,
sadpb, 16, v_fn_ptr,
x->nmvjointcost, x->mvcost,
bsi->ref_mv);
if (bestsme < INT_MAX) {
int distortion;
unsigned int sse;
- cpi->find_fractional_mv_step(x, c, e, &mode_mv[NEW4X4],
+ cpi->find_fractional_mv_step(x, e, &mode_mv[NEW4X4],
bsi->ref_mv, x->errorperbit, v_fn_ptr,
x->nmvjointcost, x->mvcost,
&distortion, &sse);
// safe motion search result for use in compound prediction
seg_mvs[i][mbmi->ref_frame - 1].as_int = mode_mv[NEW4X4].as_int;
}
+
+ // restore src pointer
+ x->plane[0].src = orig_src;
} else if (mbmi->second_ref_frame > 0 && this_mode == NEW4X4) {
/* NEW4X4 */
/* motion search not completed? Then skip newmv for this block with
int best_sad = INT_MAX;
int this_sad = INT_MAX;
- BLOCK *b = &x->block[0];
- uint8_t *src_y_ptr = *(b->base_src);
+ uint8_t *src_y_ptr = x->plane[0].src.buf;
uint8_t *ref_y_ptr;
int row_offset, col_offset;
ref_y_ptr = ref_y_buffer + (ref_y_stride * row_offset) + col_offset;
// Find sad for current vector.
- this_sad = cpi->fn_ptr[block_size].sdf(src_y_ptr, b->src_stride,
+ this_sad = cpi->fn_ptr[block_size].sdf(src_y_ptr, x->plane[0].src.stride,
ref_y_ptr, ref_y_stride,
0x7fffffff);
VP9_COMMON *cm = &cpi->common;
MACROBLOCKD *xd = &x->e_mbd;
MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi;
- BLOCK *b = &x->block[0];
BLOCKD *d = &xd->block[0];
const int is_comp_pred = (mbmi->second_ref_frame > 0);
#if CONFIG_COMP_INTERINTRA_PRED
// Further step/diamond searches as necessary
further_steps = (cpi->sf.max_step_search_steps - 1) - step_param;
- bestsme = vp9_full_pixel_diamond(cpi, x, b, d, &mvp_full, step_param,
+ bestsme = vp9_full_pixel_diamond(cpi, x, d, &mvp_full, step_param,
sadpb, further_steps, 1,
&cpi->fn_ptr[block_size],
&ref_mv[0], &tmp_mv);
if (bestsme < INT_MAX) {
int dis; /* TODO: use dis in distortion calculation later. */
unsigned int sse;
- cpi->find_fractional_mv_step(x, b, d, &tmp_mv,
+ cpi->find_fractional_mv_step(x, d, &tmp_mv,
&ref_mv[0],
x->errorperbit,
&cpi->fn_ptr[block_size],
int tmp_rate_y, tmp_rate_u, tmp_rate_v;
int tmp_dist_y, tmp_dist_u, tmp_dist_v;
vp9_build_inter_predictors_sb(xd, mb_row, mb_col, bsize);
- var = cpi->fn_ptr[block_size].vf(*(b->base_src), b->src_stride,
+ var = cpi->fn_ptr[block_size].vf(x->plane[0].src.buf,
+ x->plane[0].src.stride,
xd->plane[0].dst.buf,
xd->plane[0].dst.stride,
&sse);
model_rd_from_var_lapndz(var, 16 * bw * 16 * bh,
xd->block[0].dequant[1] >> 3,
&tmp_rate_y, &tmp_dist_y);
- var = cpi->fn_ptr[uv_block_size].vf(x->src.u_buffer, x->src.uv_stride,
+ var = cpi->fn_ptr[uv_block_size].vf(x->plane[1].src.buf,
+ x->plane[1].src.stride,
xd->plane[1].dst.buf,
xd->plane[1].dst.stride,
&sse);
model_rd_from_var_lapndz(var, 8 * bw * 8 * bh,
xd->block[16].dequant[1] >> 3,
&tmp_rate_u, &tmp_dist_u);
- var = cpi->fn_ptr[uv_block_size].vf(x->src.v_buffer, x->src.uv_stride,
+ var = cpi->fn_ptr[uv_block_size].vf(x->plane[2].src.buf,
+ x->plane[1].src.stride,
xd->plane[2].dst.buf,
xd->plane[1].dst.stride,
&sse);
int tmp_rate_y, tmp_rate_u, tmp_rate_v;
int tmp_dist_y, tmp_dist_u, tmp_dist_v;
vp9_build_inter_predictors_sb(xd, mb_row, mb_col, BLOCK_SIZE_MB16X16);
- var = vp9_variance16x16(*(b->base_src), b->src_stride,
+ var = vp9_variance16x16(x->plane[0].src.buf, x->plane[0].src.stride,
xd->plane[0].dst.buf, xd->plane[0].dst.stride,
&sse);
// Note our transform coeffs are 8 times an orthogonal transform.
// we need to divide by 8 before sending to modeling function.
model_rd_from_var_lapndz(var, 16 * 16, xd->block[0].dequant[1] >> 3,
&tmp_rate_y, &tmp_dist_y);
- var = vp9_variance8x8(x->src.u_buffer, x->src.uv_stride,
+ var = vp9_variance8x8(x->plane[1].src.buf, x->plane[1].src.stride,
xd->plane[1].dst.buf, xd->plane[1].dst.stride,
&sse);
model_rd_from_var_lapndz(var, 8 * 8, xd->block[16].dequant[1] >> 3,
&tmp_rate_u, &tmp_dist_u);
- var = vp9_variance8x8(x->src.v_buffer, x->src.uv_stride,
+ var = vp9_variance8x8(x->plane[2].src.buf, x->plane[1].src.stride,
xd->plane[2].dst.buf, xd->plane[1].dst.stride,
&sse);
model_rd_from_var_lapndz(var, 8 * 8, xd->block[20].dequant[1] >> 3,
threshold = x->encode_breakout;
if (bsize != BLOCK_SIZE_MB16X16) {
- var = cpi->fn_ptr[block_size].vf(*(b->base_src), b->src_stride,
+ var = cpi->fn_ptr[block_size].vf(x->plane[0].src.buf,
+ x->plane[0].src.stride,
xd->plane[0].dst.buf,
xd->plane[0].dst.stride,
&sse);
} else {
- var = vp9_variance16x16(*(b->base_src), b->src_stride,
+ var = vp9_variance16x16(x->plane[0].src.buf, x->plane[0].src.stride,
xd->plane[0].dst.buf, xd->plane[0].dst.stride,
&sse);
}
if (bsize != BLOCK_SIZE_MB16X16) {
unsigned int sse2u, sse2v;
// FIXME(rbultje): mb predictors predict into xd->predictor
- var = cpi->fn_ptr[uv_block_size].vf(x->src.u_buffer, x->src.uv_stride,
+ var = cpi->fn_ptr[uv_block_size].vf(x->plane[1].src.buf,
+ x->plane[1].src.stride,
xd->plane[1].dst.buf,
xd->plane[1].dst.stride, &sse2u);
- var = cpi->fn_ptr[uv_block_size].vf(x->src.v_buffer, x->src.uv_stride,
+ var = cpi->fn_ptr[uv_block_size].vf(x->plane[2].src.buf,
+ x->plane[1].src.stride,
xd->plane[2].dst.buf,
xd->plane[1].dst.stride, &sse2v);
sse2 = sse2u + sse2v;
} else {
unsigned int sse2u, sse2v;
- var = vp9_variance8x8(x->src.u_buffer, x->src.uv_stride,
+ var = vp9_variance8x8(x->plane[1].src.buf, x->plane[1].src.stride,
xd->plane[1].dst.buf, xd->plane[1].dst.stride,
&sse2u);
- var = vp9_variance8x8(x->src.v_buffer, x->src.uv_stride,
+ var = vp9_variance8x8(x->plane[2].src.buf, x->plane[1].src.stride,
xd->plane[2].dst.buf, xd->plane[1].dst.stride,
&sse2v);
sse2 = sse2u + sse2v;
int sadpb = x->sadperbit16;
int bestsme = INT_MAX;
- BLOCK *b = &x->block[0];
BLOCKD *d = &x->e_mbd.block[0];
int_mv best_ref_mv1;
int_mv best_ref_mv1_full; /* full-pixel value of best_ref_mv1 */
// Save input state
- uint8_t **base_src = b->base_src;
- int src = b->src;
- int src_stride = b->src_stride;
+ struct buf_2d src = x->plane[0].src;
uint8_t **base_pre = d->base_pre;
int pre = d->pre;
int pre_stride = d->pre_stride;
best_ref_mv1_full.as_mv.row = best_ref_mv1.as_mv.row >> 3;
// Setup frame pointers
- b->base_src = &arf_frame->y_buffer;
- b->src_stride = arf_frame->y_stride;
- b->src = mb_offset;
+ x->plane[0].src.buf = arf_frame->y_buffer + mb_offset;
+ x->plane[0].src.stride = arf_frame->y_stride;
d->base_pre = &frame_ptr->y_buffer;
d->pre_stride = frame_ptr->y_stride;
/*cpi->sf.search_method == HEX*/
// TODO Check that the 16x16 vf & sdf are selected here
// Ignore mv costing by sending NULL pointer instead of cost arrays
- bestsme = vp9_hex_search(x, b, d, &best_ref_mv1_full, &d->bmi.as_mv[0],
+ bestsme = vp9_hex_search(x, d, &best_ref_mv1_full, &d->bmi.as_mv[0],
step_param, sadpb, &cpi->fn_ptr[BLOCK_16X16],
NULL, NULL, NULL, NULL,
&best_ref_mv1);
int distortion;
unsigned int sse;
// Ignore mv costing by sending NULL pointer instead of cost array
- bestsme = cpi->find_fractional_mv_step(x, b, d, &d->bmi.as_mv[0],
+ bestsme = cpi->find_fractional_mv_step(x, d, &d->bmi.as_mv[0],
&best_ref_mv1,
x->errorperbit,
&cpi->fn_ptr[BLOCK_16X16],
#endif
// Save input state
- b->base_src = base_src;
- b->src = src;
- b->src_stride = src_stride;
+ x->plane[0].src = src;
d->base_pre = base_pre;
d->pre = pre;
d->pre_stride = pre_stride;