HB_GID_VCODEC_MPEG2,
HB_GID_VCODEC_MPEG4,
HB_GID_VCODEC_THEORA,
+ HB_GID_VCODEC_VP8,
HB_GID_ACODEC_AAC,
HB_GID_ACODEC_AAC_HE,
HB_GID_ACODEC_AAC_PASS,
{ { "H.265 (x265)", "x265", "H.265 (libx265)", HB_VCODEC_X265, HB_MUX_AV_MP4|HB_MUX_AV_MKV, }, NULL, 1, HB_GID_VCODEC_H265, },
{ { "MPEG-4", "mpeg4", "MPEG-4 (libavcodec)", HB_VCODEC_FFMPEG_MPEG4, HB_MUX_MASK_MP4|HB_MUX_MASK_MKV, }, NULL, 1, HB_GID_VCODEC_MPEG4, },
{ { "MPEG-2", "mpeg2", "MPEG-2 (libavcodec)", HB_VCODEC_FFMPEG_MPEG2, HB_MUX_MASK_MP4|HB_MUX_MASK_MKV, }, NULL, 1, HB_GID_VCODEC_MPEG2, },
+ { { "VP8", "VP8", "VP8 (libvpx)", HB_VCODEC_FFMPEG_VP8, HB_MUX_MASK_MKV, }, NULL, 1, HB_GID_VCODEC_VP8, },
{ { "Theora", "theora", "Theora (libtheora)", HB_VCODEC_THEORA, HB_MUX_MASK_MKV, }, NULL, 1, HB_GID_VCODEC_THEORA, },
};
int hb_video_encoders_count = sizeof(hb_video_encoders) / sizeof(hb_video_encoders[0]);
case HB_VCODEC_THEORA:
case HB_VCODEC_FFMPEG_MPEG4:
case HB_VCODEC_FFMPEG_MPEG2:
+ case HB_VCODEC_FFMPEG_VP8:
#ifdef USE_X265
case HB_VCODEC_X265:
#endif
*high = 63.;
break;
+ case HB_VCODEC_FFMPEG_VP8:
+ *direction = 1;
+ *granularity = 1.;
+ *low = 0.;
+ *high = 63.;
+ break;
+
case HB_VCODEC_FFMPEG_MPEG2:
case HB_VCODEC_FFMPEG_MPEG4:
default:
#endif
return "RF";
+ case HB_VCODEC_FFMPEG_VP8:
+ return "CQ";
+
default:
return "QP";
}
{
hb_log("encavcodecInit: MPEG-2 encoder");
} break;
+ case AV_CODEC_ID_VP8:
+ {
+ hb_log("encavcodecInit: VP8 encoder");
+ } break;
default:
{
hb_error("encavcodecInit: unsupported encoder!");
{
hb_log( "encavcodecInit: avcodec_find_encoder "
"failed" );
+ return 1;
}
context = avcodec_alloc_context3( codec );
// what was previously used
context->flags |= CODEC_FLAG_QSCALE;
context->global_quality = FF_QP2LAMBDA * job->vquality + 0.5;
- hb_log( "encavcodec: encoding at constant quantizer %d",
- context->global_quality );
+ //Set constant quality for libvpx
+ if ( w->codec_param == AV_CODEC_ID_VP8 )
+ {
+ char quality[7];
+ snprintf(quality, 7, "%.2f", job->vquality);
+ av_dict_set( &av_opts, "crf", quality, 0 );
+ //Setting the deadline to good and cpu-used to 0
+ //causes the encoder to balance video quality and
+ //encode time, with a bias to video quality.
+ av_dict_set( &av_opts, "deadline", "good", 0);
+ av_dict_set( &av_opts, "cpu-used", "0", 0);
+ //This value was chosen to make the bitrate high enough
+ //for libvpx to "turn off" the maximum bitrate feature
+ //that is normally applied to constant quality.
+ context->bit_rate = job->width*job->height*( (double)fps.num / (double)fps.den );
+ hb_log( "encavcodec: encoding at CQ %.2f", job->vquality );
+ }
+ else
+ {
+ hb_log( "encavcodec: encoding at constant quantizer %d",
+ context->global_quality );
+ }
}
context->width = job->width;
context->height = job->height;
}
}
+static uint8_t convert_pict_type( int pict_type, char pkt_flag_key, uint16_t* sflags )
+{
+ uint8_t retval = 0;
+ switch ( pict_type )
+ {
+ case AV_PICTURE_TYPE_P:
+ {
+ retval = HB_FRAME_P;
+ } break;
+
+ case AV_PICTURE_TYPE_B:
+ {
+ retval = HB_FRAME_B;
+ } break;
+
+ case AV_PICTURE_TYPE_S:
+ {
+ retval = HB_FRAME_P;
+ } break;
+
+ case AV_PICTURE_TYPE_SP:
+ {
+ retval = HB_FRAME_P;
+ } break;
+
+ case AV_PICTURE_TYPE_BI:
+ case AV_PICTURE_TYPE_SI:
+ case AV_PICTURE_TYPE_I:
+ {
+ *sflags |= HB_FRAME_REF;
+ if ( pkt_flag_key )
+ {
+ retval = HB_FRAME_IDR;
+ }
+ else
+ {
+ retval = HB_FRAME_I;
+ }
+ } break;
+
+ default:
+ {
+ if ( pkt_flag_key )
+ {
+ //buf->s.flags |= HB_FRAME_REF;
+ *sflags |= HB_FRAME_REF;
+ retval = HB_FRAME_KEY;
+ }
+ else
+ {
+ retval = HB_FRAME_REF;
+ }
+ } break;
+ }
+ return retval;
+}
+
// Generate DTS by rearranging PTS in this sequence:
// pts0 - delay, pts1 - delay, pts2 - delay, pts1, pts2, pts3...
//
hb_job_t * job = pv->job;
AVFrame * frame;
hb_buffer_t * in = *buf_in, * buf;
-
- if ( in->size <= 0 )
+ char final_flushing_call = (in->size <= 0);
+ if ( final_flushing_call )
{
+ //make a flushing call to encode for codecs that can encode out of order
/* EOF on input - send it downstream & say we're done */
- *buf_out = in;
*buf_in = NULL;
- return HB_WORK_DONE;
+ frame = NULL;
+ }
+ else
+ {
+ frame = av_frame_alloc();
+ frame->data[0] = in->plane[0].data;
+ frame->data[1] = in->plane[1].data;
+ frame->data[2] = in->plane[2].data;
+ frame->linesize[0] = in->plane[0].stride;
+ frame->linesize[1] = in->plane[1].stride;
+ frame->linesize[2] = in->plane[2].stride;
+
+ // For constant quality, setting the quality in AVCodecContext
+ // doesn't do the trick. It must be set in the AVFrame.
+ frame->quality = pv->context->global_quality;
+
+ // Remember info about this frame that we need to pass across
+ // the avcodec_encode_video call (since it reorders frames).
+ save_frame_info( pv, in );
+ compute_dts_offset( pv, in );
+
+ // Bizarro ffmpeg appears to require the input AVFrame.pts to be
+ // set to a frame number. Setting it to an actual pts causes
+ // jerky video.
+ // frame->pts = in->s.start;
+ frame->pts = pv->frameno_in++;
}
-
- frame = av_frame_alloc();
- frame->data[0] = in->plane[0].data;
- frame->data[1] = in->plane[1].data;
- frame->data[2] = in->plane[2].data;
- frame->linesize[0] = in->plane[0].stride;
- frame->linesize[1] = in->plane[1].stride;
- frame->linesize[2] = in->plane[2].stride;
-
- // For constant quality, setting the quality in AVCodecContext
- // doesn't do the trick. It must be set in the AVFrame.
- frame->quality = pv->context->global_quality;
-
- // Remember info about this frame that we need to pass across
- // the avcodec_encode_video call (since it reorders frames).
- save_frame_info( pv, in );
- compute_dts_offset( pv, in );
-
- // Bizarro ffmpeg appears to require the input AVFrame.pts to be
- // set to a frame number. Setting it to an actual pts causes
- // jerky video.
- // frame->pts = in->s.start;
- frame->pts = pv->frameno_in++;
if ( pv->context->codec )
{
int ret;
AVPacket pkt;
int got_packet;
+ char still_flushing = final_flushing_call;
+ hb_buffer_t* buf_head = NULL;
+ hb_buffer_t* buf_last = NULL;
- av_init_packet(&pkt);
- /* Should be way too large */
- buf = hb_video_buffer_init( job->width, job->height );
- pkt.data = buf->data;
- pkt.size = buf->alloc;
-
- ret = avcodec_encode_video2( pv->context, &pkt, frame, &got_packet );
- if ( ret < 0 || pkt.size <= 0 || !got_packet )
- {
- hb_buffer_close( &buf );
- }
- else
+ do
{
- int64_t frameno = pkt.pts;
- buf->size = pkt.size;
- buf->s.start = get_frame_start( pv, frameno );
- buf->s.duration = get_frame_duration( pv, frameno );
- buf->s.stop = buf->s.stop + buf->s.duration;
- buf->s.flags &= ~HB_FRAME_REF;
- switch ( pv->context->coded_frame->pict_type )
+ av_init_packet(&pkt);
+ /* Should be way too large */
+ buf = hb_video_buffer_init( job->width, job->height );
+ pkt.data = buf->data;
+ pkt.size = buf->alloc;
+
+ ret = avcodec_encode_video2( pv->context, &pkt, frame, &got_packet );
+ if ( ret < 0 || pkt.size <= 0 || !got_packet )
{
- case AV_PICTURE_TYPE_P:
- {
- buf->s.frametype = HB_FRAME_P;
- } break;
-
- case AV_PICTURE_TYPE_B:
- {
- buf->s.frametype = HB_FRAME_B;
- } break;
-
- case AV_PICTURE_TYPE_S:
- {
- buf->s.frametype = HB_FRAME_P;
- } break;
-
- case AV_PICTURE_TYPE_SP:
- {
- buf->s.frametype = HB_FRAME_P;
- } break;
-
- case AV_PICTURE_TYPE_BI:
- case AV_PICTURE_TYPE_SI:
- case AV_PICTURE_TYPE_I:
+ hb_buffer_close( &buf );
+ still_flushing = 0;
+ }
+ else
+ {
+ int64_t frameno = pkt.pts;
+ buf->size = pkt.size;
+ buf->s.start = get_frame_start( pv, frameno );
+ buf->s.duration = get_frame_duration( pv, frameno );
+ buf->s.stop = buf->s.stop + buf->s.duration;
+ buf->s.flags &= ~HB_FRAME_REF;
+ buf->s.frametype = convert_pict_type( pv->context->coded_frame->pict_type, pkt.flags & AV_PKT_FLAG_KEY, &buf->s.flags );
+ buf = process_delay_list( pv, buf );
+
+ if (buf_head == NULL)
{
- buf->s.flags |= HB_FRAME_REF;
- if ( pkt.flags & AV_PKT_FLAG_KEY )
- {
- buf->s.frametype = HB_FRAME_IDR;
- }
- else
- {
- buf->s.frametype = HB_FRAME_I;
- }
- } break;
-
- default:
+ buf_head = buf;
+ }
+ else
{
- if ( pkt.flags & AV_PKT_FLAG_KEY )
- {
- buf->s.flags |= HB_FRAME_REF;
- buf->s.frametype = HB_FRAME_KEY;
- }
- else
- {
- buf->s.frametype = HB_FRAME_REF;
- }
- } break;
+ buf_last->next = buf;
+ }
+ buf_last = buf;
+ }
+ /* Write stats */
+ if (job->pass == 1 && pv->context->stats_out != NULL)
+ {
+ fprintf( pv->file, "%s", pv->context->stats_out );
}
- buf = process_delay_list( pv, buf );
+ } while (still_flushing);
+ if (buf_last != NULL && final_flushing_call)
+ {
+ buf_last->next = in;
+ buf = buf_head;
}
-
- if( job->pass == 1 )
+ else if (final_flushing_call)
{
- /* Write stats */
- fprintf( pv->file, "%s", pv->context->stats_out );
+ buf = in;
}
}
else
hb_error( "encavcodec: codec context has uninitialized codec; skipping frame" );
}
- av_frame_free(&frame);
+ av_frame_free( &frame );
*buf_out = buf;
- return HB_WORK_OK;
+ return final_flushing_call? HB_WORK_DONE : HB_WORK_OK;
}
/* Begin PBXBuildFile section */
226268E01572CC7300477B4E /* libavresample.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 226268DF1572CC7300477B4E /* libavresample.a */; };
226268E11572CC7300477B4E /* libavresample.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 226268DF1572CC7300477B4E /* libavresample.a */; };
+ 22DD2C4A177B94DB00EF50D3 /* libvpx.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 22DD2C49177B94DB00EF50D3 /* libvpx.a */; };
+ 22DD2C4B177B95DA00EF50D3 /* libvpx.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 22DD2C49177B94DB00EF50D3 /* libvpx.a */; };
273F202314ADB8650021BE6D /* IOKit.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 273F202214ADB8650021BE6D /* IOKit.framework */; };
273F202614ADB8A40021BE6D /* libz.dylib in Frameworks */ = {isa = PBXBuildFile; fileRef = 273F202514ADB8A40021BE6D /* libz.dylib */; };
273F202814ADB8BE0021BE6D /* libbz2.dylib in Frameworks */ = {isa = PBXBuildFile; fileRef = 273F202714ADB8BE0021BE6D /* libbz2.dylib */; };
/* Begin PBXFileReference section */
226268DF1572CC7300477B4E /* libavresample.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; name = libavresample.a; path = external/contrib/lib/libavresample.a; sourceTree = BUILT_PRODUCTS_DIR; };
+ 22DD2C49177B94DB00EF50D3 /* libvpx.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; name = libvpx.a; path = external//contrib/lib/libvpx.a; sourceTree = BUILT_PRODUCTS_DIR; };
271BA4C014B119F800BC1D2C /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist; name = Info.plist; path = external/macosx/Info.plist; sourceTree = BUILT_PRODUCTS_DIR; };
271E74EF182F260C0077C311 /* osx109.i386.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; path = osx109.i386.xcconfig; sourceTree = "<group>"; };
271E74F0182F260C0077C311 /* osx109.x86_64.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; path = osx109.x86_64.xcconfig; sourceTree = "<group>"; };
isa = PBXFrameworksBuildPhase;
buildActionMask = 2147483647;
files = (
+ 22DD2C4B177B95DA00EF50D3 /* libvpx.a in Frameworks */,
273F203014ADB9790021BE6D /* AudioToolbox.framework in Frameworks */,
273F202314ADB8650021BE6D /* IOKit.framework in Frameworks */,
273F203314ADB9F00021BE6D /* CoreServices.framework in Frameworks */,
isa = PBXFrameworksBuildPhase;
buildActionMask = 2147483647;
files = (
+ 22DD2C4A177B94DB00EF50D3 /* libvpx.a in Frameworks */,
A9E1467B16BC2ABD00C307BC /* QuartzCore.framework in Frameworks */,
273F21C114ADE7A20021BE6D /* Growl.framework in Frameworks */,
273F21C214ADE7BC0021BE6D /* Sparkle.framework in Frameworks */,
273F1FDE14AD9DA40021BE6D = {
isa = PBXGroup;
children = (
+ 22DD2C49177B94DB00EF50D3 /* libvpx.a */,
273F204114ADBC210021BE6D /* HandBrake */,
273F200214ADAE950021BE6D /* HandBrakeCLI */,
273F200014ADAE950021BE6D /* Products */,