int dst_fb_idx = cm->new_fb_idx;
int recon_y_stride = cm->yv12_fb[ref_fb_idx].y_stride;
int recon_uv_stride = cm->yv12_fb[ref_fb_idx].uv_stride;
- int seg_map_index = (mb_row * cpi->common.mb_cols);
+ int map_index = (mb_row * cpi->common.mb_cols);
-
+#if CONFIG_SEGMENTATION
+ int left_id, above_id;
+ int sum;
+#endif
#if CONFIG_MULTITHREAD
const int nsync = cpi->mt_sync_range;
const int rightmost_col = cm->mb_cols - 1;
cpi->tplist[mb_row].stop = *tp;
- x->gf_active_ptr++; // Increment pointer into gf useage flags structure for next mb
+ // Increment pointer into gf useage flags structure.
+ x->gf_active_ptr++;
+
+ // Increment the activity mask pointers.
+ x->mb_activity_ptr++;
+ x->mb_norm_activity_ptr++;
+ if ((xd->mode_info_context->mbmi.mode == ZEROMV) && (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME))
+ xd->mode_info_context->mbmi.segment_id = 0;
+ else
+ xd->mode_info_context->mbmi.segment_id = 1;
+
for (i = 0; i < 16; i++)
vpx_memcpy(&xd->mode_info_context->bmi[i], &xd->block[i].bmi, sizeof(xd->block[i].bmi));
// this is to account for the border
xd->mode_info_context++;
x->partition_info++;
- x->activity_sum += activity_sum;
-
#if CONFIG_MULTITHREAD
if ((cpi->b_multi_threaded != 0) && (mb_row == cm->mb_rows - 1))
{