if (cm->fb_idx_ref_cnt[cm->new_fb_idx] > 0) {
cm->fb_idx_ref_cnt[cm->new_fb_idx]--;
}
+ pbi->common.error.setjmp = 0;
goto decode_exit;
}
}
pbi->common.error.error_code = VPX_CODEC_ERROR;
+ // Propagate the error info.
+ if (pbi->mb.error_info.error_code != 0) {
+ pbi->common.error.error_code = pbi->mb.error_info.error_code;
+ memcpy(pbi->common.error.detail, pbi->mb.error_info.detail,
+ sizeof(pbi->mb.error_info.detail));
+ }
goto decode_exit;
}
pbi->last_time_stamp = time_stamp;
decode_exit:
- pbi->common.error.setjmp = 0;
vpx_clear_system_state();
return retcode;
}
xd->dst.u_buffer = dst_buffer[1] + recon_uvoffset;
xd->dst.v_buffer = dst_buffer[2] + recon_uvoffset;
+ /* propagate errors from reference frames */
+ xd->corrupted |= ref_fb_corrupted[xd->mode_info_context->mbmi.ref_frame];
+
+ if (xd->corrupted) {
+ // Move current decoding marcoblock to the end of row for all rows
+ // assigned to this thread, such that other threads won't be waiting.
+ for (; mb_row < pc->mb_rows;
+ mb_row += (pbi->decoding_thread_count + 1)) {
+ current_mb_col = &pbi->mt_current_mb_col[mb_row];
+ vpx_atomic_store_release(current_mb_col, pc->mb_cols + nsync);
+ }
+ vpx_internal_error(&xd->error_info, VPX_CODEC_CORRUPT_FRAME,
+ "Corrupted reference frame");
+ }
+
xd->pre.y_buffer =
ref_buffer[xd->mode_info_context->mbmi.ref_frame][0] + recon_yoffset;
xd->pre.u_buffer =
xd->pre.v_buffer =
ref_buffer[xd->mode_info_context->mbmi.ref_frame][2] + recon_uvoffset;
- /* propagate errors from reference frames */
- xd->corrupted |= ref_fb_corrupted[xd->mode_info_context->mbmi.ref_frame];
-
mt_decode_macroblock(pbi, xd, 0);
xd->left_available = 1;
xd->mode_info_context += xd->mode_info_stride * pbi->decoding_thread_count;
}
- /* signal end of frame decoding if this thread processed the last mb_row */
- if (last_mb_row == (pc->mb_rows - 1)) sem_post(&pbi->h_event_end_decoding);
+ /* signal end of decoding of current thread for current frame */
+ if (last_mb_row + (int)pbi->decoding_thread_count + 1 >= pc->mb_rows)
+ sem_post(&pbi->h_event_end_decoding);
}
static THREAD_FUNCTION thread_decoding_proc(void *p_data) {
} else {
MACROBLOCKD *xd = &mbrd->mbd;
xd->left_context = &mb_row_left_context;
-
+ if (setjmp(xd->error_info.jmp)) {
+ xd->error_info.setjmp = 0;
+ // Signal the end of decoding for current thread.
+ sem_post(&pbi->h_event_end_decoding);
+ continue;
+ }
+ xd->error_info.setjmp = 1;
mt_decode_mb_rows(pbi, xd, ithread + 1);
}
}
}
}
-void vp8mt_decode_mb_rows(VP8D_COMP *pbi, MACROBLOCKD *xd) {
+int vp8mt_decode_mb_rows(VP8D_COMP *pbi, MACROBLOCKD *xd) {
VP8_COMMON *pc = &pbi->common;
unsigned int i;
int j;
sem_post(&pbi->h_event_start_decoding[i]);
}
+ if (setjmp(xd->error_info.jmp)) {
+ xd->error_info.setjmp = 0;
+ xd->corrupted = 1;
+ // Wait for other threads to finish. This prevents other threads decoding
+ // the current frame while the main thread starts decoding the next frame,
+ // which causes a data race.
+ for (i = 0; i < pbi->decoding_thread_count; ++i)
+ sem_wait(&pbi->h_event_end_decoding);
+ return -1;
+ }
+
+ xd->error_info.setjmp = 1;
mt_decode_mb_rows(pbi, xd, 0);
- sem_wait(&pbi->h_event_end_decoding); /* add back for each frame */
+ for (i = 0; i < pbi->decoding_thread_count + 1; ++i)
+ sem_wait(&pbi->h_event_end_decoding); /* add back for each frame */
+
+ return 0;
}