]> granicus.if.org Git - handbrake/commitdiff
QSV: default MSDK VPP is now AFTER other filter(s) used, plus some cosmetics
authorhandbrake <no-reply@handbrake.fr>
Fri, 26 Jul 2013 14:41:49 +0000 (14:41 +0000)
committerhandbrake <no-reply@handbrake.fr>
Fri, 26 Jul 2013 14:41:49 +0000 (14:41 +0000)
git-svn-id: svn://svn.handbrake.fr/HandBrake/branches/qsv@5662 b64f7644-9d1e-0410-96f1-a4d463321fa5

libhb/common.h
libhb/enc_qsv.c
libhb/qsv_filter.c
libhb/qsv_filter_pp.c
libhb/qsv_memory.c

index bbe42a60b337a251fe65db9888f5106027978907..5b364b6a79c1cc9d9537fe9f940c048bb51086a1 100644 (file)
@@ -1091,10 +1091,8 @@ struct hb_filter_object_s
 
 enum
 {
-    // default MSDK VPP filters 
-    HB_FILTER_QSV = 1,
     // for QSV - important to have before other filters
-    HB_FILTER_QSV_PRE,
+    HB_FILTER_QSV_PRE = 1,
 
     // First, filters that may change the framerate (drop or dup frames)
     HB_FILTER_DETELECINE,
@@ -1112,6 +1110,8 @@ enum
 
     // for QSV - important to have as a last one
     HB_FILTER_QSV_POST,
+    // default MSDK VPP filter
+    HB_FILTER_QSV,
 };
 
 hb_filter_object_t * hb_filter_init( int filter_id );
index 8f2843a161b40424dd624022882db2176031c6b2..2189821f246520f1885eb85020ed2b953b2469aa 100644 (file)
@@ -1015,7 +1015,11 @@ int encqsvWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
             received_item = NULL;
         }
         int sync_idx = av_qsv_get_free_sync( qsv_encode, qsv );
-
+        if (sync_idx == -1)
+        {
+            hb_error("qsv: Not enough resources allocated for QSV encode");
+            return 0;
+        }
         av_qsv_task *task = av_qsv_list_item( qsv_encode->tasks, pv->async_depth );
 
         for (;;)
index 59fbd31494d31bc21737fb9121fd2c992c28d42e..49755e63e323ff4035e70c1acf3a397e02443194 100644 (file)
@@ -198,7 +198,7 @@ static int filter_init( av_qsv_context* qsv, hb_filter_private_t * pv ){
             memcpy(&(qsv_vpp->p_surfaces[i]->Info), &(qsv_vpp->m_mfxVideoParam.vpp.Out), sizeof(mfxFrameInfo));
         }
 
-        qsv_vpp->sync_num = FFMIN( qsv_vpp->surface_num, AV_QSV_SYNC_NUM ); // AV_QSV_SYNC_NUM;
+        qsv_vpp->sync_num = FFMIN( qsv_vpp->surface_num, AV_QSV_SYNC_NUM );
 
         for (i = 0; i < qsv_vpp->sync_num; i++){
             qsv_vpp->p_syncp[i] = av_mallocz(sizeof(av_qsv_sync));
@@ -454,12 +454,18 @@ int process_frame(av_qsv_list* received_item, av_qsv_context* qsv, hb_filter_pri
     int sync_idx = av_qsv_get_free_sync(qsv_vpp, qsv);
     int surface_idx = -1;
 
-    for(;;){
-
+    for(;;)
+    {
+            if (sync_idx == -1)
+            {
+                hb_error("qsv: Not enough resources allocated for QSV filter");
+                ret = 0;
+                break;
+            }
             if( sts == MFX_ERR_MORE_SURFACE || sts == MFX_ERR_NONE )
                surface_idx = av_qsv_get_free_surface(qsv_vpp, qsv,  &(qsv_vpp->m_mfxVideoParam.vpp.Out), QSV_PART_ANY);
             if (surface_idx == -1) {
-                hb_log("qsv: Not enough resources allocated for the filter");
+                hb_error("qsv: Not enough resources allocated for QSV filter");
                 ret = 0;
                 break;
             }
index 9d145ce34368d8876b6ffa1d85a17fcb086507c3..84006a15edfdba14909e1a12bbbd42e35e9d529a 100644 (file)
@@ -148,9 +148,12 @@ static int filter_pre_init( av_qsv_context* qsv, hb_filter_private_t * pv ){
     AV_QSV_ZERO_MEMORY(qsv_vpp->m_mfxVideoParam);
 
 
-    if(prev_vpp)
+    if (prev_vpp)
+    {
         memcpy( &qsv_vpp->m_mfxVideoParam.vpp,  &prev_vpp->m_mfxVideoParam.vpp, sizeof(prev_vpp->m_mfxVideoParam.vpp));
-    else{
+    }
+    else
+    {
         AV_QSV_ZERO_MEMORY(qsv_vpp->m_mfxVideoParam);
 
         // FrameRate is important for VPP to start with
@@ -195,7 +198,7 @@ static int filter_pre_init( av_qsv_context* qsv, hb_filter_private_t * pv ){
 
     qsv_vpp->m_mfxVideoParam.IOPattern = MFX_IOPATTERN_IN_OPAQUE_MEMORY | MFX_IOPATTERN_OUT_OPAQUE_MEMORY;
 
-    qsv_vpp->surface_num = FFMIN(prev_vpp? prev_vpp->surface_num : qsv->dec_space->surface_num/2, AV_QSV_SURFACE_NUM );
+    qsv_vpp->surface_num = FFMIN(prev_vpp ? prev_vpp->surface_num : qsv->dec_space->surface_num/2, AV_QSV_SURFACE_NUM);
 
     for(i = 0; i < qsv_vpp->surface_num; i++){
         qsv_vpp->p_surfaces[i] = av_mallocz( sizeof(mfxFrameSurface1) );
@@ -203,7 +206,7 @@ static int filter_pre_init( av_qsv_context* qsv, hb_filter_private_t * pv ){
         memcpy(&(qsv_vpp->p_surfaces[i]->Info), &(qsv_vpp->m_mfxVideoParam.vpp.Out), sizeof(mfxFrameInfo));
     }
 
-    qsv_vpp->sync_num = FFMIN(prev_vpp? prev_vpp->sync_num : qsv->dec_space->sync_num/2, AV_QSV_SYNC_NUM );
+    qsv_vpp->sync_num = FFMIN(prev_vpp ? prev_vpp->sync_num : qsv->dec_space->sync_num, AV_QSV_SYNC_NUM);
     for (i = 0; i < qsv_vpp->sync_num; i++){
         qsv_vpp->p_syncp[i] = av_mallocz(sizeof(av_qsv_sync));
         AV_QSV_CHECK_POINTER(qsv_vpp->p_syncp[i], MFX_ERR_MEMORY_ALLOC);
@@ -297,14 +300,13 @@ static int hb_qsv_filter_pre_init( hb_filter_object_t * filter,
     // PIX_FMT_YUV420P,   ///< planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples) , 3 planes: Y, U, V
     // PIX_FMT_NV12,      ///< planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (first byte U and the following byte V)
     pv->sws_context_from_nv12 = hb_sws_get_context(
-                        pv->job->width, pv->job->height, AV_PIX_FMT_NV12,
-                        pv->job->width, pv->job->height, AV_PIX_FMT_YUV420P,
+                        pv->job->title->width, pv->job->title->height, AV_PIX_FMT_NV12,
+                        pv->job->title->width, pv->job->title->height, AV_PIX_FMT_YUV420P,
                         SWS_LANCZOS|SWS_ACCURATE_RND);
     pv->sws_context_to_nv12 = hb_sws_get_context(
-                        pv->job->width, pv->job->height, AV_PIX_FMT_YUV420P,
-                        pv->job->width, pv->job->height, AV_PIX_FMT_NV12,
+                        pv->job->title->width, pv->job->title->height, AV_PIX_FMT_YUV420P,
+                        pv->job->title->width, pv->job->title->height, AV_PIX_FMT_NV12,
                         SWS_LANCZOS|SWS_ACCURATE_RND);
-
     return 0;
 }
 int pre_process_frame(hb_buffer_t *in, av_qsv_context* qsv, hb_filter_private_t * pv ){
@@ -320,7 +322,8 @@ int pre_process_frame(hb_buffer_t *in, av_qsv_context* qsv, hb_filter_private_t
 
     av_qsv_space *qsv_vpp = pv->vpp_space;
 
-    if(received_item){
+    if (received_item)
+    {
         stage = av_qsv_get_last_stage( received_item );
         work_surface = stage->out.p_surface;
     }
@@ -330,18 +333,27 @@ int pre_process_frame(hb_buffer_t *in, av_qsv_context* qsv, hb_filter_private_t
 
     for (;;)
     {
-            if( sts == MFX_ERR_MORE_SURFACE || sts == MFX_ERR_NONE )
+            if (sync_idx == -1)
+            {
+                hb_error("qsv: Not enough resources allocated for the preprocessing filter");
+                ret = 0;
+                break;
+            }
+
+            if (sts == MFX_ERR_MORE_SURFACE || sts == MFX_ERR_NONE)
                surface_idx = av_qsv_get_free_surface(qsv_vpp, qsv,  &(qsv_vpp->m_mfxVideoParam.vpp.Out), QSV_PART_ANY);
             if (surface_idx == -1) {
-                hb_log("qsv: Not enough resources allocated for the filter");
+                hb_error("qsv: Not enough resources allocated for the preprocessing filter");
                 ret = 0;
                 break;
             }
 
             sts = MFXVideoUSER_ProcessFrameAsync(qsv->mfx_session, &work_surface, 1, &qsv_vpp->p_surfaces[surface_idx] , 1, qsv_vpp->p_syncp[sync_idx]->p_sync);
 
-            if( MFX_ERR_MORE_DATA == sts ){
-                if(!qsv_vpp->pending){
+            if (MFX_ERR_MORE_DATA == sts)
+            {
+                if (!qsv_vpp->pending)
+                {
                     qsv_vpp->pending = av_qsv_list_init(0);
                 }
 
index 72a18d2781eb8891793c913f61370217c6be1428..f04c77a7660c7e5b9e568475bef326fe5ff65e1f 100644 (file)
@@ -30,16 +30,15 @@ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 #include "hbffmpeg.h"
 #include "qsv_memory.h"
 
-
 int qsv_nv12_to_yuv420(struct SwsContext* sws_context,hb_buffer_t* dst, mfxFrameSurface1* src, mfxCoreInterface *core){
     int ret = 0;
     int i,j;
 
-    int in_pitch = src->Data.Pitch;
-    int h = src->Info.CropH;
-    int w = src->Info.CropW;
-    uint8_t *in_luma = 0;
-    uint8_t *in_chroma = 0;
+    int in_pitch        = src->Data.Pitch;
+    int w               = AV_QSV_ALIGN16(src->Info.Width);
+    int h               = (MFX_PICSTRUCT_PROGRESSIVE == src->Info.PicStruct) ? AV_QSV_ALIGN16(src->Info.Height) : AV_QSV_ALIGN32(src->Info.Height);
+    uint8_t *in_luma    = 0;
+    uint8_t *in_chroma  = 0;
     static int copyframe_in_use = 1;
 
 
@@ -48,18 +47,19 @@ int qsv_nv12_to_yuv420(struct SwsContext* sws_context,hb_buffer_t* dst, mfxFrame
 
     if (copyframe_in_use)
     {
-    accel_dst.Info.FourCC = src->Info.FourCC;
-    accel_dst.Info.CropH = src->Info.CropH;
-    accel_dst.Info.CropW = src->Info.CropW;
-    accel_dst.Info.CropY = src->Info.CropY;
-    accel_dst.Info.CropX = src->Info.CropX;
-    accel_dst.Info.Width = src->Info.Width;
-    accel_dst.Info.Height = src->Info.Height;
-    accel_dst.Data.Pitch = src->Data.Pitch;
-    accel_dst.Data.Y = calloc( 1, in_pitch*h );
-    accel_dst.Data.VU = calloc( 1, in_pitch*h/2 );
-
-    sts = core->CopyFrame(core->pthis, &accel_dst, src );
+        accel_dst.Info.FourCC   = src->Info.FourCC;
+        accel_dst.Info.CropH    = src->Info.CropH;
+        accel_dst.Info.CropW    = src->Info.CropW;
+        accel_dst.Info.CropY    = src->Info.CropY;
+        accel_dst.Info.CropX    = src->Info.CropX;
+        accel_dst.Info.Width    = w;
+        accel_dst.Info.Height   = h;
+        accel_dst.Data.Pitch    = src->Data.Pitch;
+        accel_dst.Data.Y        = calloc( 1, in_pitch*h );
+        accel_dst.Data.VU       = calloc( 1, in_pitch*h/2 );
+
+        sts = core->CopyFrame(core->pthis, &accel_dst, src);
+
         if (sts < MFX_ERR_NONE)
         {
             free(accel_dst.Data.Y);
@@ -81,18 +81,18 @@ int qsv_nv12_to_yuv420(struct SwsContext* sws_context,hb_buffer_t* dst, mfxFrame
 
     hb_video_buffer_realloc( dst, w, h );
 
-    uint8_t *srcs[] = { in_luma, in_chroma };
+    uint8_t *srcs[]   = { in_luma, in_chroma };
     int srcs_stride[] = { in_pitch, in_pitch };
 
-    uint8_t *dsts[] = { dst->plane[0].data, dst->plane[1].data, dst->plane[2].data };
+    uint8_t *dsts[]   = { dst->plane[0].data, dst->plane[1].data, dst->plane[2].data };
     int dsts_stride[] = { dst->plane[0].stride, dst->plane[1].stride, dst->plane[2].stride };
 
     ret = sws_scale(sws_context, srcs, srcs_stride, 0, h, dsts, dsts_stride );
 
     if (copyframe_in_use)
     {
-    free(accel_dst.Data.Y);
-    free(accel_dst.Data.VU);
+        free(accel_dst.Data.Y);
+        free(accel_dst.Data.VU);
     }
 
     return ret;
@@ -104,15 +104,15 @@ int qsv_yuv420_to_nv12(struct SwsContext* sws_context,mfxFrameSurface1* dst, hb_
     int w = src->plane[0].width;
     int h = src->plane[0].height;
 
-    int out_pitch = dst->Data.Pitch;
-    uint8_t *out_luma = dst->Data.Y;
+    int out_pitch       = dst->Data.Pitch;
+    uint8_t *out_luma   = dst->Data.Y;
     uint8_t *out_chroma = dst->Data.VU;
 
-    uint8_t *srcs[] = { src->plane[0].data, src->plane[1].data, src->plane[2].data };
-    int srcs_stride[] = { src->plane[0].stride, src->plane[1].stride, src->plane[2].stride };
+    uint8_t *srcs[]     = { src->plane[0].data, src->plane[1].data, src->plane[2].data };
+    int srcs_stride[]   = { src->plane[0].stride, src->plane[1].stride, src->plane[2].stride };
 
-    uint8_t *dsts[] = { out_luma, out_chroma };
-    int dsts_stride[] = { out_pitch, out_pitch };
+    uint8_t *dsts[]     = { out_luma, out_chroma };
+    int dsts_stride[]   = { out_pitch, out_pitch };
 
     ret = sws_scale(sws_context, srcs, srcs_stride, 0, h, dsts, dsts_stride );