2 * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
11 #include "./vpx_config.h"
12 #include "vpx_mem/vpx_mem.h"
13 #include "vp9/common/vp9_entropymode.h"
14 #include "vp9/common/vp9_thread_common.h"
15 #include "vp9/common/vp9_reconinter.h"
17 #if CONFIG_MULTITHREAD
18 static INLINE void mutex_lock(pthread_mutex_t *const mutex) {
19 const int kMaxTryLocks = 4000;
23 for (i = 0; i < kMaxTryLocks; ++i) {
24 if (!pthread_mutex_trylock(mutex)) {
31 pthread_mutex_lock(mutex);
33 #endif // CONFIG_MULTITHREAD
35 static INLINE void sync_read(VP9LfSync *const lf_sync, int r, int c) {
36 #if CONFIG_MULTITHREAD
37 const int nsync = lf_sync->sync_range;
39 if (r && !(c & (nsync - 1))) {
40 pthread_mutex_t *const mutex = &lf_sync->mutex_[r - 1];
43 while (c > lf_sync->cur_sb_col[r - 1] - nsync) {
44 pthread_cond_wait(&lf_sync->cond_[r - 1], mutex);
46 pthread_mutex_unlock(mutex);
52 #endif // CONFIG_MULTITHREAD
55 static INLINE void sync_write(VP9LfSync *const lf_sync, int r, int c,
57 #if CONFIG_MULTITHREAD
58 const int nsync = lf_sync->sync_range;
60 // Only signal when there are enough filtered SB for next row to run.
63 if (c < sb_cols - 1) {
68 cur = sb_cols + nsync;
72 mutex_lock(&lf_sync->mutex_[r]);
74 lf_sync->cur_sb_col[r] = cur;
76 pthread_cond_signal(&lf_sync->cond_[r]);
77 pthread_mutex_unlock(&lf_sync->mutex_[r]);
84 #endif // CONFIG_MULTITHREAD
87 // Implement row loopfiltering for each thread.
89 void thread_loop_filter_rows(const YV12_BUFFER_CONFIG *const frame_buffer,
91 struct macroblockd_plane planes[MAX_MB_PLANE],
92 int start, int stop, int y_only,
93 VP9LfSync *const lf_sync) {
94 const int num_planes = y_only ? 1 : MAX_MB_PLANE;
95 const int use_420 = y_only || (planes[1].subsampling_y == 1 &&
96 planes[1].subsampling_x == 1);
97 const int sb_cols = mi_cols_aligned_to_sb(cm->mi_cols) >> MI_BLOCK_SIZE_LOG2;
100 for (mi_row = start; mi_row < stop;
101 mi_row += lf_sync->num_workers * MI_BLOCK_SIZE) {
102 MODE_INFO *const mi = cm->mi + mi_row * cm->mi_stride;
104 for (mi_col = 0; mi_col < cm->mi_cols; mi_col += MI_BLOCK_SIZE) {
105 const int r = mi_row >> MI_BLOCK_SIZE_LOG2;
106 const int c = mi_col >> MI_BLOCK_SIZE_LOG2;
107 LOOP_FILTER_MASK lfm;
110 sync_read(lf_sync, r, c);
112 vp9_setup_dst_planes(planes, frame_buffer, mi_row, mi_col);
114 // TODO(JBB): Make setup_mask work for non 420.
116 vp9_setup_mask(cm, mi_row, mi_col, mi + mi_col, cm->mi_stride,
119 for (plane = 0; plane < num_planes; ++plane) {
121 vp9_filter_block_plane(cm, &planes[plane], mi_row, &lfm);
123 vp9_filter_block_plane_non420(cm, &planes[plane], mi + mi_col,
127 sync_write(lf_sync, r, c, sb_cols);
132 // Row-based multi-threaded loopfilter hook
133 static int loop_filter_row_worker(VP9LfSync *const lf_sync,
134 LFWorkerData *const lf_data) {
135 thread_loop_filter_rows(lf_data->frame_buffer, lf_data->cm, lf_data->planes,
136 lf_data->start, lf_data->stop, lf_data->y_only,
141 static void loop_filter_rows_mt(YV12_BUFFER_CONFIG *frame,
143 struct macroblockd_plane planes[MAX_MB_PLANE],
144 int start, int stop, int y_only,
145 VP9Worker *workers, int nworkers,
146 VP9LfSync *lf_sync) {
147 const VP9WorkerInterface *const winterface = vp9_get_worker_interface();
148 // Number of superblock rows and cols
149 const int sb_rows = mi_cols_aligned_to_sb(cm->mi_rows) >> MI_BLOCK_SIZE_LOG2;
150 // Decoder may allocate more threads than number of tiles based on user's
152 const int tile_cols = 1 << cm->log2_tile_cols;
153 const int num_workers = MIN(nworkers, tile_cols);
156 if (!lf_sync->sync_range || sb_rows != lf_sync->rows ||
157 num_workers > lf_sync->num_workers) {
158 vp9_loop_filter_dealloc(lf_sync);
159 vp9_loop_filter_alloc(lf_sync, cm, sb_rows, cm->width, num_workers);
162 // Initialize cur_sb_col to -1 for all SB rows.
163 vpx_memset(lf_sync->cur_sb_col, -1, sizeof(*lf_sync->cur_sb_col) * sb_rows);
165 // Set up loopfilter thread data.
166 // The decoder is capping num_workers because it has been observed that using
167 // more threads on the loopfilter than there are cores will hurt performance
168 // on Android. This is because the system will only schedule the tile decode
169 // workers on cores equal to the number of tile columns. Then if the decoder
170 // tries to use more threads for the loopfilter, it will hurt performance
171 // because of contention. If the multithreading code changes in the future
172 // then the number of workers used by the loopfilter should be revisited.
173 for (i = 0; i < num_workers; ++i) {
174 VP9Worker *const worker = &workers[i];
175 LFWorkerData *const lf_data = &lf_sync->lfdata[i];
177 worker->hook = (VP9WorkerHook)loop_filter_row_worker;
178 worker->data1 = lf_sync;
179 worker->data2 = lf_data;
182 vp9_loop_filter_data_reset(lf_data, frame, cm, planes);
183 lf_data->start = start + i * MI_BLOCK_SIZE;
184 lf_data->stop = stop;
185 lf_data->y_only = y_only;
187 // Start loopfiltering
188 if (i == num_workers - 1) {
189 winterface->execute(worker);
191 winterface->launch(worker);
195 // Wait till all rows are finished
196 for (i = 0; i < num_workers; ++i) {
197 winterface->sync(&workers[i]);
201 void vp9_loop_filter_frame_mt(YV12_BUFFER_CONFIG *frame,
203 struct macroblockd_plane planes[MAX_MB_PLANE],
204 int frame_filter_level,
205 int y_only, int partial_frame,
206 VP9Worker *workers, int num_workers,
207 VP9LfSync *lf_sync) {
208 int start_mi_row, end_mi_row, mi_rows_to_filter;
210 if (!frame_filter_level) return;
213 mi_rows_to_filter = cm->mi_rows;
214 if (partial_frame && cm->mi_rows > 8) {
215 start_mi_row = cm->mi_rows >> 1;
216 start_mi_row &= 0xfffffff8;
217 mi_rows_to_filter = MAX(cm->mi_rows / 8, 8);
219 end_mi_row = start_mi_row + mi_rows_to_filter;
220 vp9_loop_filter_frame_init(cm, frame_filter_level);
222 loop_filter_rows_mt(frame, cm, planes, start_mi_row, end_mi_row,
223 y_only, workers, num_workers, lf_sync);
226 // Set up nsync by width.
227 static INLINE int get_sync_range(int width) {
228 // nsync numbers are picked by testing. For example, for 4k
229 // video, using 4 gives best performance.
232 else if (width <= 1280)
234 else if (width <= 4096)
240 // Allocate memory for lf row synchronization
241 void vp9_loop_filter_alloc(VP9LfSync *lf_sync, VP9_COMMON *cm, int rows,
242 int width, int num_workers) {
243 lf_sync->rows = rows;
244 #if CONFIG_MULTITHREAD
248 CHECK_MEM_ERROR(cm, lf_sync->mutex_,
249 vpx_malloc(sizeof(*lf_sync->mutex_) * rows));
250 if (lf_sync->mutex_) {
251 for (i = 0; i < rows; ++i) {
252 pthread_mutex_init(&lf_sync->mutex_[i], NULL);
256 CHECK_MEM_ERROR(cm, lf_sync->cond_,
257 vpx_malloc(sizeof(*lf_sync->cond_) * rows));
258 if (lf_sync->cond_) {
259 for (i = 0; i < rows; ++i) {
260 pthread_cond_init(&lf_sync->cond_[i], NULL);
264 #endif // CONFIG_MULTITHREAD
266 CHECK_MEM_ERROR(cm, lf_sync->lfdata,
267 vpx_malloc(num_workers * sizeof(*lf_sync->lfdata)));
268 lf_sync->num_workers = num_workers;
270 CHECK_MEM_ERROR(cm, lf_sync->cur_sb_col,
271 vpx_malloc(sizeof(*lf_sync->cur_sb_col) * rows));
274 lf_sync->sync_range = get_sync_range(width);
277 // Deallocate lf synchronization related mutex and data
278 void vp9_loop_filter_dealloc(VP9LfSync *lf_sync) {
279 if (lf_sync != NULL) {
280 #if CONFIG_MULTITHREAD
283 if (lf_sync->mutex_ != NULL) {
284 for (i = 0; i < lf_sync->rows; ++i) {
285 pthread_mutex_destroy(&lf_sync->mutex_[i]);
287 vpx_free(lf_sync->mutex_);
289 if (lf_sync->cond_ != NULL) {
290 for (i = 0; i < lf_sync->rows; ++i) {
291 pthread_cond_destroy(&lf_sync->cond_[i]);
293 vpx_free(lf_sync->cond_);
295 #endif // CONFIG_MULTITHREAD
296 vpx_free(lf_sync->lfdata);
297 vpx_free(lf_sync->cur_sb_col);
298 // clear the structure as the source of this call may be a resize in which
299 // case this call will be followed by an _alloc() which may fail.
304 // Accumulate frame counts.
305 void vp9_accumulate_frame_counts(VP9_COMMON *cm, FRAME_COUNTS *counts,
309 for (i = 0; i < BLOCK_SIZE_GROUPS; i++)
310 for (j = 0; j < INTRA_MODES; j++)
311 cm->counts.y_mode[i][j] += counts->y_mode[i][j];
313 for (i = 0; i < INTRA_MODES; i++)
314 for (j = 0; j < INTRA_MODES; j++)
315 cm->counts.uv_mode[i][j] += counts->uv_mode[i][j];
317 for (i = 0; i < PARTITION_CONTEXTS; i++)
318 for (j = 0; j < PARTITION_TYPES; j++)
319 cm->counts.partition[i][j] += counts->partition[i][j];
323 for (i = 0; i < TX_SIZES; i++)
324 for (j = 0; j < PLANE_TYPES; j++)
325 for (k = 0; k < REF_TYPES; k++)
326 for (l = 0; l < COEF_BANDS; l++)
327 for (m = 0; m < COEFF_CONTEXTS; m++) {
328 cm->counts.eob_branch[i][j][k][l][m] +=
329 counts->eob_branch[i][j][k][l][m];
330 for (n = 0; n < UNCONSTRAINED_NODES + 1; n++)
331 cm->counts.coef[i][j][k][l][m][n] +=
332 counts->coef[i][j][k][l][m][n];
335 for (i = 0; i < TX_SIZES; i++)
336 for (j = 0; j < PLANE_TYPES; j++)
337 for (k = 0; k < REF_TYPES; k++)
338 for (l = 0; l < COEF_BANDS; l++)
339 for (m = 0; m < COEFF_CONTEXTS; m++)
340 cm->counts.eob_branch[i][j][k][l][m] +=
341 counts->eob_branch[i][j][k][l][m];
342 // In the encoder, cm->counts.coef is only updated at frame
343 // level, so not need to accumulate it here.
344 // for (n = 0; n < UNCONSTRAINED_NODES + 1; n++)
345 // cm->counts.coef[i][j][k][l][m][n] +=
346 // counts->coef[i][j][k][l][m][n];
349 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++)
350 for (j = 0; j < SWITCHABLE_FILTERS; j++)
351 cm->counts.switchable_interp[i][j] += counts->switchable_interp[i][j];
353 for (i = 0; i < INTER_MODE_CONTEXTS; i++)
354 for (j = 0; j < INTER_MODES; j++)
355 cm->counts.inter_mode[i][j] += counts->inter_mode[i][j];
357 for (i = 0; i < INTRA_INTER_CONTEXTS; i++)
358 for (j = 0; j < 2; j++)
359 cm->counts.intra_inter[i][j] += counts->intra_inter[i][j];
361 for (i = 0; i < COMP_INTER_CONTEXTS; i++)
362 for (j = 0; j < 2; j++)
363 cm->counts.comp_inter[i][j] += counts->comp_inter[i][j];
365 for (i = 0; i < REF_CONTEXTS; i++)
366 for (j = 0; j < 2; j++)
367 for (k = 0; k < 2; k++)
368 cm->counts.single_ref[i][j][k] += counts->single_ref[i][j][k];
370 for (i = 0; i < REF_CONTEXTS; i++)
371 for (j = 0; j < 2; j++)
372 cm->counts.comp_ref[i][j] += counts->comp_ref[i][j];
374 for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
375 for (j = 0; j < TX_SIZES; j++)
376 cm->counts.tx.p32x32[i][j] += counts->tx.p32x32[i][j];
378 for (j = 0; j < TX_SIZES - 1; j++)
379 cm->counts.tx.p16x16[i][j] += counts->tx.p16x16[i][j];
381 for (j = 0; j < TX_SIZES - 2; j++)
382 cm->counts.tx.p8x8[i][j] += counts->tx.p8x8[i][j];
385 for (i = 0; i < SKIP_CONTEXTS; i++)
386 for (j = 0; j < 2; j++)
387 cm->counts.skip[i][j] += counts->skip[i][j];
389 for (i = 0; i < MV_JOINTS; i++)
390 cm->counts.mv.joints[i] += counts->mv.joints[i];
392 for (k = 0; k < 2; k++) {
393 nmv_component_counts *comps = &cm->counts.mv.comps[k];
394 nmv_component_counts *comps_t = &counts->mv.comps[k];
396 for (i = 0; i < 2; i++) {
397 comps->sign[i] += comps_t->sign[i];
398 comps->class0_hp[i] += comps_t->class0_hp[i];
399 comps->hp[i] += comps_t->hp[i];
402 for (i = 0; i < MV_CLASSES; i++)
403 comps->classes[i] += comps_t->classes[i];
405 for (i = 0; i < CLASS0_SIZE; i++) {
406 comps->class0[i] += comps_t->class0[i];
407 for (j = 0; j < MV_FP_SIZE; j++)
408 comps->class0_fp[i][j] += comps_t->class0_fp[i][j];
411 for (i = 0; i < MV_OFFSET_BITS; i++)
412 for (j = 0; j < 2; j++)
413 comps->bits[i][j] += comps_t->bits[i][j];
415 for (i = 0; i < MV_FP_SIZE; i++)
416 comps->fp[i] += comps_t->fp[i];