]> granicus.if.org Git - handbrake/commitdiff
libhb: tasksets API provided by scsiguy
authorjstebbins <jstebbins.hb@gmail.com>
Thu, 17 May 2012 19:51:35 +0000 (19:51 +0000)
committerjstebbins <jstebbins.hb@gmail.com>
Thu, 17 May 2012 19:51:35 +0000 (19:51 +0000)
This is an easier to use API for launching multithreaded tasks. And it is more
portable since it does not rely on undefined/implementation specific behavior
of POSIX mutexes. That is, the ability for one thread to unlock a mutex owned
by another thread.

git-svn-id: svn://svn.handbrake.fr/HandBrake/trunk@4685 b64f7644-9d1e-0410-96f1-a4d463321fa5

libhb/decomb.c
libhb/deinterlace.c
libhb/ports.c
libhb/ports.h
libhb/rotate.c

index 50773b772a58632e8636873402f3230f916f6e7e..20041459d6743732270413b43974455309ccd638 100644 (file)
@@ -80,6 +80,7 @@ which will feed EEDI2 interpolations to yadif.
 #include "mpeg2dec/mpeg2.h"
 #include "eedi2.h"
 #include "mcdeint.h"
+#include "taskset.h"
 
 #define PARITY_DEFAULT   -1
 
@@ -107,21 +108,10 @@ struct yadif_arguments_s {
     uint8_t **dst;
     int parity;
     int tff;
-    int stop;
     int is_combed;
 };
 
-struct decomb_arguments_s {
-    int stop;
-};
-
-struct eedi2_arguments_s {
-    int stop;
-};
-
 typedef struct yadif_arguments_s yadif_arguments_t;
-typedef struct decomb_arguments_s decomb_arguments_t;
-typedef struct eedi2_arguments_s eedi2_arguments_t;
 
 typedef struct eedi2_thread_arg_s {
     hb_filter_private_t *pv;
@@ -203,20 +193,12 @@ struct hb_filter_private_s
 
     int              cpu_count;
 
-    hb_thread_t    ** yadif_threads;         // Threads for Yadif - one per CPU
-    hb_lock_t      ** yadif_begin_lock;      // Thread has work
-    hb_lock_t      ** yadif_complete_lock;   // Thread has completed work
-    yadif_arguments_t *yadif_arguments;      // Arguments to thread for work
+    taskset_t        yadif_taskset;       // Threads for Yadif - one per CPU
+    yadif_arguments_t *yadif_arguments;   // Arguments to thread for work
 
-    hb_thread_t    ** decomb_threads;        // Threads for comb detection - one per CPU
-    hb_lock_t      ** decomb_begin_lock;     // Thread has work
-    hb_lock_t      ** decomb_complete_lock;  // Thread has completed work
-    decomb_arguments_t *decomb_arguments;    // Arguments to thread for work
+    taskset_t        decomb_taskset;      // Threads for comb detection - one per CPU
 
-    hb_thread_t    ** eedi2_threads;        // Threads for eedi2 - one per plane
-    hb_lock_t      ** eedi2_begin_lock;     // Thread has work
-    hb_lock_t      ** eedi2_complete_lock;  // Thread has completed work
-    eedi2_arguments_t *eedi2_arguments;    // Arguments to thread for work
+    taskset_t        eedi2_taskset;       // Threads for eedi2 - one per plane
 };
 
 static int hb_decomb_init( hb_filter_object_t * filter,
@@ -1314,7 +1296,6 @@ void eedi2_interpolate_plane( hb_filter_private_t * pv, int k )
  */
 void eedi2_filter_thread( void *thread_args_v )
 {
-    eedi2_arguments_t *eedi2_work = NULL;
     hb_filter_private_t * pv;
     int run = 1;
     int plane;
@@ -1328,31 +1309,29 @@ void eedi2_filter_thread( void *thread_args_v )
     while( run )
     {
         /*
-         * Wait here until there is work to do. hb_lock() blocks until
-         * render releases it to say that there is more work to do.
+         * Wait here until there is work to do.
          */
-        hb_lock( pv->eedi2_begin_lock[plane] );
+        taskset_thread_wait4start( &pv->eedi2_taskset, plane );
 
-        eedi2_work = &pv->eedi2_arguments[plane];
-
-        if( eedi2_work->stop )
+        if( taskset_thread_stop( &pv->eedi2_taskset, plane ) )
         {
             /*
              * No more work to do, exit this thread.
              */
             run = 0;
-            continue;
         }
-
-        /*
-         * Process plane
-         */
+        else
+        { 
+            /*
+             * Process plane
+             */
             eedi2_interpolate_plane( pv, plane );
-
+        }
+        
         /*
          * Finished this segment, let everyone know.
          */
-        hb_unlock( pv->eedi2_complete_lock[plane] );
+        taskset_thread_complete( &pv->eedi2_taskset, plane );
     }
     free( thread_args_v );
 }
@@ -1370,30 +1349,11 @@ void eedi2_planer( hb_filter_private_t * pv )
         eedi2_fill_half_height_buffer_plane( &pv->ref[1][i][pitch*start_line], pv->eedi_half[SRCPF][i], pitch, pv->height[i] );
     }
 
-    int plane;
-    for( plane = 0; plane < 3; plane++ )
-    {
-        /*
-         * Let the thread for this plane know that we've setup work
-         * for it by releasing the begin lock (ensuring that the
-         * complete lock is already locked so that we block when
-         * we try to lock it again below).
-         */
-        hb_lock( pv->eedi2_complete_lock[plane] );
-        hb_unlock( pv->eedi2_begin_lock[plane] );
-    }
-
     /*
-     * Wait until all three threads have completed by trying to get
-     * the complete lock that we locked earlier for each thread, which
-     * will block until that thread has completed the work on that
-     * plane.
+     * Now that all data is ready for our threads, fire them off
+     * and wait for their completion.
      */
-    for( plane = 0; plane < 3; plane++ )
-    {
-        hb_lock( pv->eedi2_complete_lock[plane] );
-        hb_unlock( pv->eedi2_complete_lock[plane] );
-    }
+    taskset_cycle( &pv->eedi2_taskset );
 }
 
 
@@ -1402,7 +1362,6 @@ void eedi2_planer( hb_filter_private_t * pv )
  */
 void decomb_filter_thread( void *thread_args_v )
 {
-    decomb_arguments_t *decomb_work = NULL;
     hb_filter_private_t * pv;
     int run = 1;
     int segment, segment_start, segment_stop, plane;
@@ -1416,21 +1375,18 @@ void decomb_filter_thread( void *thread_args_v )
     while( run )
     {
         /*
-         * Wait here until there is work to do. hb_lock() blocks until
-         * render releases it to say that there is more work to do.
+         * Wait here until there is work to do.
          */
-        hb_lock( pv->decomb_begin_lock[segment] );
+        taskset_thread_wait4start( &pv->decomb_taskset, segment );
 
-        decomb_work = &pv->decomb_arguments[segment];
-
-        if( decomb_work->stop )
+        if( taskset_thread_stop( &pv->decomb_taskset, segment ) )
         {
             /*
              * No more work to do, exit this thread.
              */
             run = 0;
-            continue;
-        }
+            goto report_completion;
+        } 
 
         /*
          * Process segment (for now just from luma)
@@ -1459,42 +1415,23 @@ void decomb_filter_thread( void *thread_args_v )
                 detect_combed_segment( pv, segment_start, segment_stop );
             }
         }
+
+report_completion:
         /*
          * Finished this segment, let everyone know.
          */
-        hb_unlock( pv->decomb_complete_lock[segment] );
+        taskset_thread_complete( &pv->decomb_taskset, segment );
     }
-    free( thread_args_v );
 }
 
 int comb_segmenter( hb_filter_private_t * pv )
 {
-    int segment;
-
-    for( segment = 0; segment < pv->cpu_count; segment++ )
-    {
-        /*
-         * Let the thread for this plane know that we've setup work
-         * for it by releasing the begin lock (ensuring that the
-         * complete lock is already locked so that we block when
-         * we try to lock it again below).
-         */
-        hb_lock( pv->decomb_complete_lock[segment] );
-        hb_unlock( pv->decomb_begin_lock[segment] );
-    }
-
     /*
-     * Wait until all three threads have completed by trying to get
-     * the complete lock that we locked earlier for each thread, which
-     * will block until that thread has completed the work on that
-     * plane.
+     * Now that all data for decomb detection is ready for
+     * our threads, fire them off and wait for their completion.
      */
-    for( segment = 0; segment < pv->cpu_count; segment++ )
-    {
-        hb_lock( pv->decomb_complete_lock[segment] );
-        hb_unlock( pv->decomb_complete_lock[segment] );
-    }
-
+    taskset_cycle( &pv->decomb_taskset );
+    
     if( pv->mode & MODE_FILTER )
     {
         filter_combing_mask( pv );
@@ -1689,27 +1626,26 @@ void yadif_decomb_filter_thread( void *thread_args_v )
     while( run )
     {
         /*
-         * Wait here until there is work to do. hb_lock() blocks until
-         * render releases it to say that there is more work to do.
+         * Wait here until there is work to do.
          */
-        hb_lock( pv->yadif_begin_lock[segment] );
-
-        yadif_work = &pv->yadif_arguments[segment];
-
-        if( yadif_work->stop )
+        taskset_thread_wait4start( &pv->yadif_taskset, segment );
+        
+        if( taskset_thread_stop( &pv->yadif_taskset, segment ) )
         {
             /*
              * No more work to do, exit this thread.
              */
             run = 0;
-            continue;
-        }
+            goto report_completion;
+        } 
+
+        yadif_work = &pv->yadif_arguments[segment];
 
         if( yadif_work->dst == NULL )
         {
             hb_error( "thread started when no work available" );
             hb_snooze(500);
-            continue;
+            goto report_completion;
         }
 
         is_combed = pv->yadif_arguments[segment].is_combed;
@@ -1822,12 +1758,13 @@ void yadif_decomb_filter_thread( void *thread_args_v )
                 }
             }
         }
+
+report_completion:
         /*
          * Finished this segment, let everyone know.
          */
-        hb_unlock( pv->yadif_complete_lock[segment] );
+        taskset_thread_complete( &pv->yadif_taskset, segment );
     }
-    free( thread_args_v );
 }
 
 static void yadif_filter( uint8_t ** dst,
@@ -1923,28 +1860,12 @@ static void yadif_filter( uint8_t ** dst,
                 pv->yadif_arguments[segment].tff = tff;
                 pv->yadif_arguments[segment].dst = dst;
                 pv->yadif_arguments[segment].is_combed = is_combed;
-
-                /*
-                 * Let the thread for this plane know that we've setup work
-                 * for it by releasing the begin lock (ensuring that the
-                 * complete lock is already locked so that we block when
-                 * we try to lock it again below).
-                 */
-                hb_lock( pv->yadif_complete_lock[segment] );
-                hb_unlock( pv->yadif_begin_lock[segment] );
             }
 
             /*
-             * Wait until all three threads have completed by trying to get
-             * the complete lock that we locked earlier for each thread, which
-             * will block until that thread has completed the work on that
-             * plane.
+             * Allow the taskset threads to make one pass over the data.
              */
-            for( segment = 0; segment < pv->cpu_count; segment++ )
-            {
-                hb_lock( pv->yadif_complete_lock[segment] );
-                hb_unlock( pv->yadif_complete_lock[segment] );
-            }
+            taskset_cycle( &pv->yadif_taskset );
 
             /*
              * Entire frame is now deinterlaced.
@@ -2120,99 +2041,71 @@ static int hb_decomb_init( hb_filter_object_t * filter,
             }
         }
     }
+    
+    /*
+     * Setup yadif taskset.
+     */
+    pv->yadif_arguments = malloc( sizeof( yadif_arguments_t ) * pv->cpu_count );
+    if( pv->yadif_arguments == NULL ||
+        taskset_init( &pv->yadif_taskset, /*thread_count*/pv->cpu_count,
+                      sizeof( yadif_thread_arg_t ) ) == 0 )
+    {
+        hb_error( "yadif could not initialize taskset" );
+    }
 
-     /*
-      * Create yadif threads and locks.
-      */
-     pv->yadif_threads = malloc( sizeof( hb_thread_t* ) * pv->cpu_count );
-     pv->yadif_begin_lock = malloc( sizeof( hb_lock_t * ) * pv->cpu_count );
-     pv->yadif_complete_lock = malloc( sizeof( hb_lock_t * ) * pv->cpu_count );
-     pv->yadif_arguments = malloc( sizeof( yadif_arguments_t ) * pv->cpu_count );
-
-     for( i = 0; i < pv->cpu_count; i++ )
-     {
-         yadif_thread_arg_t *thread_args;
-
-         thread_args = malloc( sizeof( yadif_thread_arg_t ) );
-
-         if( thread_args )
-         {
-             thread_args->pv = pv;
-             thread_args->segment = i;
-
-             pv->yadif_begin_lock[i] = hb_lock_init();
-             pv->yadif_complete_lock[i] = hb_lock_init();
-
-             /*
-              * Important to start off with the threads locked waiting
-              * on input.
-              */
-             hb_lock( pv->yadif_begin_lock[i] );
-
-             pv->yadif_arguments[i].stop = 0;
-             pv->yadif_arguments[i].dst = NULL;
+    for( i = 0; i < pv->cpu_count; i++ )
+    {
+        yadif_thread_arg_t *thread_args;
 
-             pv->yadif_threads[i] = hb_thread_init( "yadif_filter_segment",
-                                                    yadif_decomb_filter_thread,
-                                                    thread_args,
-                                                    HB_NORMAL_PRIORITY );
-         }
-         else
-         {
-             hb_error( "yadif could not create threads" );
-         }
+        thread_args = taskset_thread_args( &pv->yadif_taskset, i );
+        thread_args->pv = pv;
+        thread_args->segment = i;
+        pv->yadif_arguments[i].dst = NULL;
+        if( taskset_thread_spawn( &pv->yadif_taskset, i,
+                                 "yadif_filter_segment",
+                                 yadif_decomb_filter_thread,
+                                 HB_NORMAL_PRIORITY ) == 0 )
+        {
+            hb_error( "yadif could not spawn thread" );
+        }
     }
 
     /*
-     * Create decomb threads and locks.
+     * Create decomb taskset.
      */
-    pv->decomb_threads = malloc( sizeof( hb_thread_t* ) * pv->cpu_count );
-    pv->decomb_begin_lock = malloc( sizeof( hb_lock_t * ) * pv->cpu_count );
-    pv->decomb_complete_lock = malloc( sizeof( hb_lock_t * ) * pv->cpu_count );
-    pv->decomb_arguments = malloc( sizeof( decomb_arguments_t ) * pv->cpu_count );
-
+    if( taskset_init( &pv->decomb_taskset, /*thread_count*/pv->cpu_count,
+                      sizeof( decomb_thread_arg_t ) ) == 0 )
+    {
+        hb_error( "decomb could not initialize taskset" );
+    }
     for( i = 0; i < pv->cpu_count; i++ )
     {
         decomb_thread_arg_t *decomb_thread_args;
-
-        decomb_thread_args = malloc( sizeof( decomb_thread_arg_t ) );
-
-        if( decomb_thread_args )
-        {
-            decomb_thread_args->pv = pv;
-            decomb_thread_args->segment = i;
-
-            pv->decomb_begin_lock[i] = hb_lock_init();
-            pv->decomb_complete_lock[i] = hb_lock_init();
-
-            /*
-             * Important to start off with the threads locked waiting
-             * on input.
-             */
-            hb_lock( pv->decomb_begin_lock[i] );
-
-            pv->decomb_arguments[i].stop = 0;
-
-            pv->decomb_threads[i] = hb_thread_init( "decomb_filter_segment",
-                                                   decomb_filter_thread,
-                                                   decomb_thread_args,
-                                                   HB_NORMAL_PRIORITY );
-        }
-        else
+    
+        decomb_thread_args = taskset_thread_args( &pv->decomb_taskset, i );
+        decomb_thread_args->pv = pv;
+        decomb_thread_args->segment = i;
+
+        if( taskset_thread_spawn( &pv->decomb_taskset, i,
+                                 "decomb_filter_segment",
+                                 decomb_filter_thread,
+                                 HB_NORMAL_PRIORITY ) == 0 )
         {
-            hb_error( "decomb could not create threads" );
+            hb_error( "decomb could not spawn thread" );
         }
     }
 
     if( pv->mode & MODE_EEDI2 )
     {
+
         /*
-         * Create eedi2 threads and locks.
+         * Create eedi2 taskset.
          */
-        pv->eedi2_threads = malloc( sizeof( hb_thread_t* ) * 3 );
-        pv->eedi2_begin_lock = malloc( sizeof( hb_lock_t * ) * 3 );
-        pv->eedi2_complete_lock = malloc( sizeof( hb_lock_t * ) * 3 );
-        pv->eedi2_arguments = malloc( sizeof( eedi2_arguments_t ) * 3 );
+        if( taskset_init( &pv->eedi2_taskset, /*thread_count*/3,
+                          sizeof( eedi2_thread_arg_t ) ) == 0 )
+        {
+            hb_error( "eedi2 could not initialize taskset" );
+        }
 
         if( pv->post_processing > 1 )
         {
@@ -2230,32 +2123,17 @@ static int hb_decomb_init( hb_filter_object_t * filter,
         {
             eedi2_thread_arg_t *eedi2_thread_args;
 
-            eedi2_thread_args = malloc( sizeof( eedi2_thread_arg_t ) );
-
-            if( eedi2_thread_args )
-            {
-                eedi2_thread_args->pv = pv;
-                eedi2_thread_args->plane = i;
-
-                pv->eedi2_begin_lock[i] = hb_lock_init();
-                pv->eedi2_complete_lock[i] = hb_lock_init();
-
-                /*
-                 * Important to start off with the threads locked waiting
-                 * on input.
-                 */
-                hb_lock( pv->eedi2_begin_lock[i] );
+            eedi2_thread_args = taskset_thread_args( &pv->eedi2_taskset, i );
 
-                pv->eedi2_arguments[i].stop = 0;
+            eedi2_thread_args->pv = pv;
+            eedi2_thread_args->plane = i;
 
-                pv->eedi2_threads[i] = hb_thread_init( "eedi2_filter_segment",
-                                                       eedi2_filter_thread,
-                                                       eedi2_thread_args,
-                                                       HB_NORMAL_PRIORITY );
-            }
-            else
+            if( taskset_thread_spawn( &pv->eedi2_taskset, i,
+                                      "eedi2_filter_segment",
+                                      eedi2_filter_thread,
+                                      HB_NORMAL_PRIORITY ) == 0 )
             {
-                hb_error( "eedi2 could not create threads" );
+                hb_error( "eedi2 could not spawn thread" );
             }
         }
     }
@@ -2366,71 +2244,18 @@ static void hb_decomb_close( hb_filter_object_t * filter )
         if (pv->cxy) eedi2_aligned_free(pv->cxy);
         if (pv->tmpc) eedi2_aligned_free(pv->tmpc);
     }
-
-    for( i = 0; i < pv->cpu_count; i++)
-    {
-        /*
-         * Tell each yadif thread to stop, and then cleanup.
-         */
-        pv->yadif_arguments[i].stop = 1;
-        hb_unlock(  pv->yadif_begin_lock[i] );
-
-        hb_thread_close( &pv->yadif_threads[i] );
-        hb_lock_close( &pv->yadif_begin_lock[i] );
-        hb_lock_close( &pv->yadif_complete_lock[i] );
-    }
-
+    
+    taskset_fini( &pv->yadif_taskset );
+    taskset_fini( &pv->decomb_taskset );
+    
     /*
      * free memory for yadif structs
      */
-    free( pv->yadif_threads );
-    free( pv->yadif_begin_lock );
-    free( pv->yadif_complete_lock );
     free( pv->yadif_arguments );
-
-    for( i = 0; i < pv->cpu_count; i++)
-    {
-        /*
-         * Tell each decomb thread to stop, and then cleanup.
-         */
-        pv->decomb_arguments[i].stop = 1;
-        hb_unlock(  pv->decomb_begin_lock[i] );
-
-        hb_thread_close( &pv->decomb_threads[i] );
-        hb_lock_close( &pv->decomb_begin_lock[i] );
-        hb_lock_close( &pv->decomb_complete_lock[i] );
-    }
-
-    /*
-     * free memory for decomb structs
-     */
-    free( pv->decomb_threads );
-    free( pv->decomb_begin_lock );
-    free( pv->decomb_complete_lock );
-    free( pv->decomb_arguments );
-
+    
     if( pv->mode & MODE_EEDI2 )
     {
-        for( i = 0; i < 3; i++)
-        {
-            /*
-             * Tell each eedi2 thread to stop, and then cleanup.
-             */
-            pv->eedi2_arguments[i].stop = 1;
-            hb_unlock(  pv->eedi2_begin_lock[i] );
-
-            hb_thread_close( &pv->eedi2_threads[i] );
-            hb_lock_close( &pv->eedi2_begin_lock[i] );
-            hb_lock_close( &pv->eedi2_complete_lock[i] );
-        }
-
-        /*
-         * free memory for eedi2 structs
-         */
-        free( pv->eedi2_threads );
-        free( pv->eedi2_begin_lock );
-        free( pv->eedi2_complete_lock );
-        free( pv->eedi2_arguments );
+        taskset_fini( &pv->eedi2_taskset );
     }
 
     /* Cleanup mcdeint specific buffers */
index 49681512c29424f9d9b501c386a35527b928e1e2..e654817c3664b227dea93dba335c1a142e20d396 100644 (file)
@@ -20,6 +20,7 @@
 #include "hbffmpeg.h"
 #include "mpeg2dec/mpeg2.h"
 #include "mcdeint.h"
+#include "taskset.h"
 
 // yadif_mode is a bit vector with the following flags
 // Note that 2PASS should be enabled when using MCDEINT
@@ -42,7 +43,6 @@ typedef struct yadif_arguments_s {
     uint8_t **dst;
     int parity;
     int tff;
-    int stop;
 } yadif_arguments_t;
 
 struct hb_filter_private_s
@@ -59,9 +59,8 @@ struct hb_filter_private_s
 
     int              cpu_count;
 
-    hb_thread_t    ** yadif_threads;        // Threads for Yadif - one per CPU
-    hb_lock_t      ** yadif_begin_lock;     // Thread has work
-    hb_lock_t      ** yadif_complete_lock;  // Thread has completed work
+    taskset_t        yadif_taskset;         // Threads for Yadif - one per CPU
+
     yadif_arguments_t *yadif_arguments;     // Arguments to thread for work
 
     int              mcdeint_mode;
@@ -220,27 +219,27 @@ void yadif_filter_thread( void *thread_args_v )
     while( run )
     {
         /*
-         * Wait here until there is work to do. hb_lock() blocks until
-         * render releases it to say that there is more work to do.
+         * Wait here until there is work to do.
          */
-        hb_lock( pv->yadif_begin_lock[segment] );
+        taskset_thread_wait4start( &pv->yadif_taskset, segment );
 
-        yadif_work = &pv->yadif_arguments[segment];
 
-        if( yadif_work->stop )
+        if( taskset_thread_stop( &pv->yadif_taskset, segment ) )
         {
             /*
              * No more work to do, exit this thread.
              */
             run = 0;
-            continue;
+            goto report_completion;
         } 
 
+        yadif_work = &pv->yadif_arguments[segment];
+
         if( yadif_work->dst == NULL )
         {
             hb_error( "Thread started when no work available" );
             hb_snooze(500);
-            continue;
+            goto report_completion;
         }
         
         /*
@@ -332,12 +331,13 @@ void yadif_filter_thread( void *thread_args_v )
                 }
             }
         }
+
+report_completion:
         /*
          * Finished this segment, let everyone know.
          */
-        hb_unlock( pv->yadif_complete_lock[segment] );
+        taskset_thread_complete( &pv->yadif_taskset, segment );
     }
-    free( thread_args_v );
 }
 
 
@@ -364,28 +364,10 @@ static void yadif_filter( uint8_t ** dst,
         pv->yadif_arguments[segment].parity = parity;
         pv->yadif_arguments[segment].tff = tff;
         pv->yadif_arguments[segment].dst = dst;
-
-        /*
-         * Let the thread for this plane know that we've setup work 
-         * for it by releasing the begin lock (ensuring that the
-         * complete lock is already locked so that we block when
-         * we try to lock it again below).
-         */
-        hb_lock( pv->yadif_complete_lock[segment] );
-        hb_unlock( pv->yadif_begin_lock[segment] );
     }
 
-    /*
-     * Wait until all three threads have completed by trying to get
-     * the complete lock that we locked earlier for each thread, which
-     * will block until that thread has completed the work on that
-     * plane.
-     */
-    for( segment = 0; segment < pv->cpu_count; segment++ )
-    {
-        hb_lock( pv->yadif_complete_lock[segment] );
-        hb_unlock( pv->yadif_complete_lock[segment] );
-    }
+    /* Allow the taskset threads to make one pass over the data. */
+    taskset_cycle( &pv->yadif_taskset );
 
     /*
      * Entire frame is now deinterlaced.
@@ -444,41 +426,32 @@ static int hb_deinterlace_init( hb_filter_object_t * filter,
         }
 
         /*
-         * Create yadif threads and locks.
+         * Setup yadif taskset.
          */
-        pv->yadif_threads = malloc( sizeof( hb_thread_t* ) * pv->cpu_count );
-        pv->yadif_begin_lock = malloc( sizeof( hb_lock_t * ) * pv->cpu_count );
-        pv->yadif_complete_lock = malloc( sizeof( hb_lock_t * ) * pv->cpu_count );
         pv->yadif_arguments = malloc( sizeof( yadif_arguments_t ) * pv->cpu_count );
+        if( pv->yadif_arguments == NULL ||
+            taskset_init( &pv->yadif_taskset, /*thread_count*/pv->cpu_count,
+                          sizeof( yadif_arguments_t ) ) == 0 )
+        {
+            hb_error( "yadif could not initialize taskset" );
+        }
 
         for( i = 0; i < pv->cpu_count; i++ )
         {
             yadif_thread_arg_t *thread_args;
 
-            thread_args = malloc( sizeof( yadif_thread_arg_t ) );
-
-            if( thread_args ) {
-                thread_args->pv = pv;
-                thread_args->segment = i;
+            thread_args = taskset_thread_args( &pv->yadif_taskset, i );
 
-                pv->yadif_begin_lock[i] = hb_lock_init();
-                pv->yadif_complete_lock[i] = hb_lock_init();
+            thread_args->pv = pv;
+            thread_args->segment = i;
+            pv->yadif_arguments[i].dst = NULL;
 
-                /*
-                 * Important to start off with the threads locked waiting
-                 * on input.
-                 */
-                hb_lock( pv->yadif_begin_lock[i] );
-
-                pv->yadif_arguments[i].stop = 0;
-                pv->yadif_arguments[i].dst = NULL;
-                
-                pv->yadif_threads[i] = hb_thread_init( "yadif_filter_segment",
-                                                       yadif_filter_thread,
-                                                       thread_args,
-                                                       HB_NORMAL_PRIORITY );
-            } else {
-                hb_error( "Yadif could not create threads" );
+            if( taskset_thread_spawn( &pv->yadif_taskset, i,
+                                      "yadif_filter_segment",
+                                      yadif_filter_thread,
+                                      HB_NORMAL_PRIORITY ) == 0 )
+            {
+                hb_error( "yadif could not spawn thread" );
             }
         }
     }
@@ -526,25 +499,7 @@ static void hb_deinterlace_close( hb_filter_object_t * filter )
             }
         }
 
-        for( i = 0; i < pv->cpu_count; i++)
-        {
-            /*
-             * Tell each yadif thread to stop, and then cleanup.
-             */
-            pv->yadif_arguments[i].stop = 1;
-            hb_unlock(  pv->yadif_begin_lock[i] );
-
-            hb_thread_close( &pv->yadif_threads[i] );
-            hb_lock_close( &pv->yadif_begin_lock[i] );
-            hb_lock_close( &pv->yadif_complete_lock[i] );
-        }
-        
-        /*
-         * free memory for yadif structs
-         */
-        free( pv->yadif_threads );
-        free( pv->yadif_begin_lock );
-        free( pv->yadif_complete_lock );
+        taskset_fini( &pv->yadif_taskset );
         free( pv->yadif_arguments );
     }
 
index cec4efd963cfd5d81fe27b1302e694c00473b1cf..6f3a48240bd4180bb8b9cd90f74b9e31d999d6bd 100644 (file)
@@ -294,20 +294,20 @@ void hb_mkdir( char * name )
  ***********************************************************************/
 struct hb_thread_s
 {
-    char       * name;
-    int          priority;
-    void      (* function) ( void * );
-    void       * arg;
+    char          * name;
+    int             priority;
+    thread_func_t * function;
+    void          * arg;
 
-    hb_lock_t  * lock;
-    int          exited;
+    hb_lock_t     * lock;
+    int             exited;
 
 #if defined( SYS_BEOS )
-    thread_id    thread;
+    thread_id       thread;
 #elif USE_PTHREAD
-    pthread_t    thread;
+    pthread_t       thread;
 //#elif defined( SYS_CYGWIN )
-//    HANDLE       thread;
+//    HANDLE          thread;
 #endif
 };
 
@@ -346,7 +346,7 @@ static void attribute_align_thread hb_thread_func( void * _t )
 {
     hb_thread_t * t = (hb_thread_t *) _t;
 
-#if defined( SYS_DARWIN )
+#if defined( SYS_DARWIN ) || defined( SYS_FREEBSD )
     /* Set the thread priority */
     struct sched_param param;
     memset( &param, 0, sizeof( struct sched_param ) );
@@ -376,7 +376,7 @@ static void attribute_align_thread hb_thread_func( void * _t )
  * arg:      argument of the routine
  * priority: HB_LOW_PRIORITY or HB_NORMAL_PRIORITY
  ***********************************************************************/
-hb_thread_t * hb_thread_init( char * name, void (* function)(void *),
+hb_thread_t * hb_thread_init( const char * name, void (* function)(void *),
                               void * arg, int priority )
 {
     hb_thread_t * t = calloc( sizeof( hb_thread_t ), 1 );
@@ -489,7 +489,7 @@ hb_lock_t * hb_lock_init()
 
     pthread_mutexattr_init(&mta);
 
-#if defined( SYS_CYGWIN )
+#if defined( SYS_CYGWIN ) || defined( SYS_FREEBSD )
     pthread_mutexattr_settype(&mta, PTHREAD_MUTEX_NORMAL);
 #endif
 
@@ -566,6 +566,9 @@ hb_cond_t * hb_cond_init()
 {
     hb_cond_t * c = calloc( sizeof( hb_cond_t ), 1 );
 
+    if( c == NULL )
+        return NULL;
+
 #if defined( SYS_BEOS )
     c->thread = -1;
 #elif USE_PTHREAD
index 7b743c437eed8d9bff3b3a9ab35a746da76c3484..c1cadf78513cac6f53978188237743ba7c783ae1 100644 (file)
@@ -62,7 +62,8 @@ typedef struct hb_thread_s hb_thread_t;
 #  define HB_NORMAL_PRIORITY 0
 #endif
 
-hb_thread_t * hb_thread_init( char * name, void (* function)(void *),
+typedef void (thread_func_t)(void *);
+hb_thread_t * hb_thread_init( const char * name, thread_func_t *function,
                               void * arg, int priority );
 void          hb_thread_close( hb_thread_t ** );
 int           hb_thread_has_exited( hb_thread_t * );
index 5f9558764e09b981cf39b400892caa745c8930f0..a60f92ce5ccb93e58ef73669f5fe19a4db085253 100644 (file)
@@ -2,6 +2,7 @@
 #include "hb.h"
 #include "hbffmpeg.h"
 //#include "mpeg2dec/mpeg2.h"
+#include "taskset.h"
 
 #define MODE_DEFAULT     3
 // Mode 1: Flip vertically (y0 becomes yN and yN becomes y0)
@@ -11,7 +12,6 @@
 typedef struct rotate_arguments_s {
     hb_buffer_t *dst;
     hb_buffer_t *src;
-    int stop;
 } rotate_arguments_t;
 
 struct hb_filter_private_s
@@ -24,9 +24,7 @@ struct hb_filter_private_s
 
     int              cpu_count;
 
-    hb_thread_t    ** rotate_threads;        // Threads for Rotate - one per CPU
-    hb_lock_t      ** rotate_begin_lock;     // Thread has work
-    hb_lock_t      ** rotate_complete_lock;  // Thread has completed work
+    taskset_t         rotate_taskset;        // Threads for Rotate - one per CPU
     rotate_arguments_t *rotate_arguments;     // Arguments to thread for work
 };
 
@@ -85,27 +83,25 @@ void rotate_filter_thread( void *thread_args_v )
     while( run )
     {
         /*
-         * Wait here until there is work to do. hb_lock() blocks until
-         * render releases it to say that there is more work to do.
+         * Wait here until there is work to do.
          */
-        hb_lock( pv->rotate_begin_lock[segment] );
+        taskset_thread_wait4start( &pv->rotate_taskset, segment );
 
-        rotate_work = &pv->rotate_arguments[segment];
-
-        if( rotate_work->stop )
+        if( taskset_thread_stop( &pv->rotate_taskset, segment ) )
         {
             /*
              * No more work to do, exit this thread.
              */
             run = 0;
-            continue;
+            goto report_completion;
         } 
 
+        rotate_work = &pv->rotate_arguments[segment];
         if( rotate_work->dst == NULL )
         {
             hb_error( "Thread started when no work available" );
             hb_snooze(500);
-            continue;
+            goto report_completion;
         }
         
         /*
@@ -168,12 +164,13 @@ void rotate_filter_thread( void *thread_args_v )
                 }
             }
         }
+
+report_completion:
         /*
          * Finished this segment, let everyone know.
          */
-        hb_unlock( pv->rotate_complete_lock[segment] );
+        taskset_thread_complete( &pv->rotate_taskset, segment );
     }
-    free( thread_args_v );
 }
 
 
@@ -199,28 +196,12 @@ static void rotate_filter(
          */
         pv->rotate_arguments[segment].dst = out;
         pv->rotate_arguments[segment].src = in;
-
-        /*
-         * Let the thread for this plane know that we've setup work 
-         * for it by releasing the begin lock (ensuring that the
-         * complete lock is already locked so that we block when
-         * we try to lock it again below).
-         */
-        hb_lock( pv->rotate_complete_lock[segment] );
-        hb_unlock( pv->rotate_begin_lock[segment] );
     }
 
     /*
-     * Wait until all three threads have completed by trying to get
-     * the complete lock that we locked earlier for each thread, which
-     * will block until that thread has completed the work on that
-     * plane.
+     * Allow the taskset threads to make one pass over the data.
      */
-    for( segment = 0; segment < pv->cpu_count; segment++ )
-    {
-        hb_lock( pv->rotate_complete_lock[segment] );
-        hb_unlock( pv->rotate_complete_lock[segment] );
-    }
+    taskset_cycle( &pv->rotate_taskset );
 
     /*
      * Entire frame is now rotated.
@@ -244,44 +225,34 @@ static int hb_rotate_init( hb_filter_object_t * filter,
 
     pv->cpu_count = hb_get_cpu_count();
 
-
     /*
-     * Create threads and locks.
+     * Create rotate taskset.
      */
-    pv->rotate_threads = malloc( sizeof( hb_thread_t* ) * pv->cpu_count );
-    pv->rotate_begin_lock = malloc( sizeof( hb_lock_t * ) * pv->cpu_count );
-    pv->rotate_complete_lock = malloc( sizeof( hb_lock_t * ) * pv->cpu_count );
     pv->rotate_arguments = malloc( sizeof( rotate_arguments_t ) * pv->cpu_count );
+    if( pv->rotate_arguments == NULL ||
+        taskset_init( &pv->rotate_taskset, /*thread_count*/pv->cpu_count,
+                      sizeof( rotate_thread_arg_t ) ) == 0 )
+    {
+            hb_error( "rotate could not initialize taskset" );
+    }
 
     int i;
     for( i = 0; i < pv->cpu_count; i++ )
     {
         rotate_thread_arg_t *thread_args;
     
-        thread_args = malloc( sizeof( rotate_thread_arg_t ) );
-    
-        if( thread_args ) {
-            thread_args->pv = pv;
-            thread_args->segment = i;
-    
-            pv->rotate_begin_lock[i] = hb_lock_init();
-            pv->rotate_complete_lock[i] = hb_lock_init();
+        thread_args = taskset_thread_args( &pv->rotate_taskset, i );
     
-            /*
-             * Important to start off with the threads locked waiting
-             * on input.
-             */
-            hb_lock( pv->rotate_begin_lock[i] );
+        thread_args->pv = pv;
+        thread_args->segment = i;
+        pv->rotate_arguments[i].dst = NULL;
     
-            pv->rotate_arguments[i].stop = 0;
-            pv->rotate_arguments[i].dst = NULL;
-            
-            pv->rotate_threads[i] = hb_thread_init( "rotate_filter_segment",
-                                                   rotate_filter_thread,
-                                                   thread_args,
-                                                   HB_NORMAL_PRIORITY );
-        } else {
-            hb_error( "rotate could not create threads" );
+        if( taskset_thread_spawn( &pv->rotate_taskset, i,
+                                  "rotate_filter_segment",
+                                  rotate_filter_thread,
+                                  HB_NORMAL_PRIORITY ) == 0 )
+        {
+            hb_error( "rotate could not spawn thread" );
         }
     }
     // Set init width/height so the next stage in the pipline
@@ -344,26 +315,11 @@ static void hb_rotate_close( hb_filter_object_t * filter )
         return;
     }
 
-    int i;
-    for( i = 0; i < pv->cpu_count; i++)
-    {
-        /*
-         * Tell each rotate thread to stop, and then cleanup.
-         */
-        pv->rotate_arguments[i].stop = 1;
-        hb_unlock(  pv->rotate_begin_lock[i] );
-    
-        hb_thread_close( &pv->rotate_threads[i] );
-        hb_lock_close( &pv->rotate_begin_lock[i] );
-        hb_lock_close( &pv->rotate_complete_lock[i] );
-    }
+    taskset_fini( &pv->rotate_taskset );
     
     /*
      * free memory for rotate structs
      */
-    free( pv->rotate_threads );
-    free( pv->rotate_begin_lock );
-    free( pv->rotate_complete_lock );
     free( pv->rotate_arguments );
 
     free( pv );