*y = den;
}
+/**********************************************************************
+ * hb_buffer_list implementation
+ *********************************************************************/
+void hb_buffer_list_append(hb_buffer_list_t *list, hb_buffer_t *buf)
+{
+ int count = 1;
+ hb_buffer_t *end = buf;
+
+ if (buf == NULL)
+ {
+ return;
+ }
+
+ // Input buffer may be a list of buffers, find the end.
+ while (end != NULL && end->next != NULL)
+ {
+ end = end->next;
+ count++;
+ }
+ if (list->tail == NULL)
+ {
+ list->head = buf;
+ list->tail = end;
+ }
+ else
+ {
+ list->tail->next = buf;
+ list->tail = end;
+ }
+ list->count += count;
+}
+
+void hb_buffer_list_prepend(hb_buffer_list_t *list, hb_buffer_t *buf)
+{
+ int count = 1;
+ hb_buffer_t *end = buf;
+
+ if (buf == NULL)
+ {
+ return;
+ }
+
+ // Input buffer may be a list of buffers, find the end.
+ while (end != NULL && end->next != NULL)
+ {
+ end = end->next;
+ count++;
+ }
+ if (list->tail == NULL)
+ {
+ list->head = buf;
+ list->tail = end;
+ }
+ else
+ {
+ end->next = list->head;
+ list->head = buf;
+ }
+ list->count += count;
+}
+
+hb_buffer_t* hb_buffer_list_rem_head(hb_buffer_list_t *list)
+{
+ if (list == NULL)
+ {
+ return NULL;
+ }
+ hb_buffer_t *head = list->head;
+ if (list->head != NULL)
+ {
+ if (list->head == list->tail)
+ {
+ list->tail = NULL;
+ }
+ list->head = list->head->next;
+ list->count--;
+ }
+ if (head != NULL)
+ {
+ head->next = NULL;
+ }
+ return head;
+}
+
+hb_buffer_t* hb_buffer_list_rem_tail(hb_buffer_list_t *list)
+{
+ if (list == NULL)
+ {
+ return NULL;
+ }
+ hb_buffer_t *tail = list->tail;
+
+ if (list->head == list->tail)
+ {
+ list->head = list->tail = NULL;
+ list->count = 0;
+ }
+ else if (list->tail != NULL)
+ {
+ hb_buffer_t *end = list->head;
+ while (end != NULL && end->next != list->tail)
+ {
+ end = end->next;
+ }
+ end->next = NULL;
+ list->tail = end;
+ list->count--;
+ }
+ if (tail != NULL)
+ {
+ tail->next = NULL;
+ }
+ return tail;
+}
+
+hb_buffer_t* hb_buffer_list_head(hb_buffer_list_t *list)
+{
+ if (list == NULL)
+ {
+ return NULL;
+ }
+ return list->head;
+}
+
+hb_buffer_t* hb_buffer_list_tail(hb_buffer_list_t *list)
+{
+ if (list == NULL)
+ {
+ return NULL;
+ }
+ return list->tail;
+}
+
+hb_buffer_t* hb_buffer_list_set(hb_buffer_list_t *list, hb_buffer_t *buf)
+{
+ int count = 0;
+
+ if (list == NULL)
+ {
+ return NULL;
+ }
+
+ hb_buffer_t *head = list->head;
+ hb_buffer_t *end = buf;
+ if (end != NULL)
+ {
+ count++;
+ while (end->next != NULL)
+ {
+ end = end->next;
+ count++;
+ }
+ }
+ list->head = buf;
+ list->tail = end;
+ list->count = count;
+ return head;
+}
+
+hb_buffer_t* hb_buffer_list_clear(hb_buffer_list_t *list)
+{
+ if (list == NULL)
+ {
+ return NULL;
+ }
+ hb_buffer_t *head = list->head;
+ list->head = list->tail = NULL;
+ list->count = 0;
+ return head;
+}
+
+void hb_buffer_list_close(hb_buffer_list_t *list)
+{
+ hb_buffer_t *buf = hb_buffer_list_clear(list);
+ hb_buffer_close(&buf);
+}
+
+int hb_buffer_list_count(hb_buffer_list_t *list)
+{
+ return list->count;
+}
+
/**********************************************************************
* hb_list implementation
**********************************************************************
typedef struct hb_handle_s hb_handle_t;
typedef struct hb_hwd_s hb_hwd_t;
typedef struct hb_list_s hb_list_t;
+typedef struct hb_buffer_list_s hb_buffer_list_t;
typedef struct hb_rate_s hb_rate_t;
typedef struct hb_dither_s hb_dither_t;
typedef struct hb_mixdown_s hb_mixdown_t;
#include "libavcodec/qsv.h"
#endif
+struct hb_buffer_list_s
+{
+ hb_buffer_t *head;
+ hb_buffer_t *tail;
+ int count;
+};
+
+void hb_buffer_list_append(hb_buffer_list_t *list, hb_buffer_t *buf);
+void hb_buffer_list_prepend(hb_buffer_list_t *list, hb_buffer_t *buf);
+hb_buffer_t* hb_buffer_list_head(hb_buffer_list_t *list);
+hb_buffer_t* hb_buffer_list_rem_head(hb_buffer_list_t *list);
+hb_buffer_t* hb_buffer_list_tail(hb_buffer_list_t *list);
+hb_buffer_t* hb_buffer_list_rem_tail(hb_buffer_list_t *list);
+hb_buffer_t* hb_buffer_list_clear(hb_buffer_list_t *list);
+hb_buffer_t* hb_buffer_list_set(hb_buffer_list_t *list, hb_buffer_t *buf);
+void hb_buffer_list_close(hb_buffer_list_t *list);
+int hb_buffer_list_count(hb_buffer_list_t *list);
+
hb_list_t * hb_list_init();
int hb_list_count( const hb_list_t * );
void hb_list_add( hb_list_t *, void * );
struct hb_work_private_s
{
- hb_job_t *job;
- hb_title_t *title;
- AVCodecContext *context;
- AVCodecParserContext *parser;
- AVFrame *frame;
- hb_buffer_t *palette;
- int threads;
- int video_codec_opened;
- hb_list_t *list;
- double duration; // frame duration (for video)
- double field_duration; // field duration (for video)
- int frame_duration_set; // Indicates valid timing was found in stream
- double pts_next; // next pts we expect to generate
- int64_t chap_time; // time of next chap mark (if new_chap != 0)
- int new_chap; // output chapter mark pending
- uint32_t nframes;
- uint32_t ndrops;
- uint32_t decode_errors;
- int64_t prev_pts;
- int brokenTS; // video stream may contain packed b-frames
- hb_buffer_t* delayq[HEAP_SIZE];
- int queue_primed;
- pts_heap_t pts_heap;
- void* buffer;
- struct SwsContext *sws_context; // if we have to rescale or convert color space
- int sws_width;
- int sws_height;
- int sws_pix_fmt;
- int cadence[12];
- int wait_for_keyframe;
+ hb_job_t * job;
+ hb_title_t * title;
+ AVCodecContext * context;
+ AVCodecParserContext * parser;
+ AVFrame * frame;
+ hb_buffer_t * palette;
+ int threads;
+ int video_codec_opened;
+ hb_buffer_list_t list;
+ double duration; // frame duration (for video)
+ double field_duration; // field duration (for video)
+ int frame_duration_set; // Indicates valid timing was found in stream
+ double pts_next; // next pts we expect to generate
+ int64_t chap_time; // time of next chap mark (if new_chap != 0)
+ int new_chap; // output chapter mark pending
+ uint32_t nframes;
+ uint32_t ndrops;
+ uint32_t decode_errors;
+ int64_t prev_pts;
+ int brokenTS; // video stream may contain packed b-frames
+ hb_buffer_t* delayq[HEAP_SIZE];
+ int queue_primed;
+ pts_heap_t pts_heap;
+ void* buffer;
+ struct SwsContext * sws_context; // if we have to rescale or convert color space
+ int sws_width;
+ int sws_height;
+ int sws_pix_fmt;
+ int cadence[12];
+ int wait_for_keyframe;
#ifdef USE_HWD
- hb_va_dxva2_t *dxva2;
- uint8_t *dst_frame;
- hb_oclscale_t *opencl_scale;
+ hb_va_dxva2_t * dxva2;
+ uint8_t * dst_frame;
+ hb_oclscale_t * opencl_scale;
#endif
- hb_audio_resample_t *resample;
+ hb_audio_resample_t * resample;
#ifdef USE_QSV
// QSV-specific settings
struct
{
- int decode;
- av_qsv_config config;
- const char *codec_name;
+ int decode;
+ av_qsv_config config;
+ const char * codec_name;
#define USE_QSV_PTS_WORKAROUND // work around out-of-order output timestamps
#ifdef USE_QSV_PTS_WORKAROUND
- hb_list_t *pts_list;
+ hb_list_t * pts_list;
#endif
} qsv;
#endif
- hb_list_t * list_subtitle;
+ hb_list_t * list_subtitle;
};
#ifdef USE_QSV_PTS_WORKAROUND
#endif
static void decodeAudio( hb_audio_t * audio, hb_work_private_t *pv, uint8_t *data, int size, int64_t pts );
-static hb_buffer_t *link_buf_list( hb_work_private_t *pv );
static int64_t heap_pop( pts_heap_t *heap )
pv->title = job->title;
else
pv->title = w->title;
- pv->list = hb_list_init();
+ hb_buffer_list_clear(&pv->list);
codec = avcodec_find_decoder(w->codec_param);
pv->context = avcodec_alloc_context3(codec);
if ( pv )
{
flushDelayQueue( pv );
- hb_buffer_t *buf = link_buf_list( pv );
- hb_buffer_close( &buf );
+ hb_buffer_list_close(&pv->list);
if ( pv->job && pv->context && pv->context->codec )
{
av_freep( &pv->context->extradata );
av_freep( &pv->context );
}
- if ( pv->list )
- {
- hb_list_empty( &pv->list );
- }
hb_audio_resample_free(pv->resample);
#ifdef USE_HWD
decodeAudio( w->audio, pv, pout, pout_len, cur );
}
}
- *buf_out = link_buf_list( pv );
+ *buf_out = hb_buffer_list_clear(&pv->list);
return HB_WORK_OK;
}
int slot = pv->queue_primed ? pv->nframes & (HEAP_SIZE-1) : 0;
// flush all the video packets left on our timestamp-reordering delay q
- while ( ( buf = pv->delayq[slot] ) != NULL )
+ while ((buf = pv->delayq[slot]) != NULL)
{
- buf->s.start = heap_pop( &pv->pts_heap );
- hb_list_add( pv->list, buf );
+ buf->s.start = heap_pop(&pv->pts_heap);
+ hb_buffer_list_append(&pv->list, buf);
pv->delayq[slot] = NULL;
slot = ( slot + 1 ) & (HEAP_SIZE-1);
}
log_chapter( pv, pv->job->chapter_start, buf->s.start );
}
checkCadence( pv->cadence, flags, buf->s.start );
- hb_list_add( pv->list, buf );
+ hb_buffer_list_append(&pv->list, buf);
++pv->nframes;
return got_picture;
}
log_chapter( pv, pv->job->chapter_start, buf->s.start );
}
checkCadence( pv->cadence, buf->s.flags, buf->s.start );
- hb_list_add( pv->list, buf );
+ hb_buffer_list_append(&pv->list, buf);
}
// add the new frame to the delayq & push its timestamp on the heap
}
}
-/*
- * Removes all packets from 'pv->list', links them together into
- * a linked-list, and returns the first packet in the list.
- */
-static hb_buffer_t *link_buf_list( hb_work_private_t *pv )
-{
- hb_buffer_t *head = hb_list_item( pv->list, 0 );
-
- if ( head )
- {
- hb_list_rem( pv->list, head );
-
- hb_buffer_t *last = head, *buf;
-
- while ( ( buf = hb_list_item( pv->list, 0 ) ) != NULL )
- {
- hb_list_rem( pv->list, buf );
- last->next = buf;
- last = buf;
- }
- }
- return head;
-}
-
static int decavcodecvInit( hb_work_object_t * w, hb_job_t * job )
{
pv->title = job->title;
else
pv->title = w->title;
- pv->list = hb_list_init();
+ hb_buffer_list_clear(&pv->list);
#ifdef USE_QSV
if (hb_qsv_decode_is_enabled(job))
{
decodeVideo(w, in->data, 0, 0, pts, dts, 0);
}
- hb_list_add( pv->list, in );
- *buf_out = link_buf_list( pv );
+ hb_buffer_list_append(&pv->list, in);
+ *buf_out = hb_buffer_list_clear(&pv->list);
return HB_WORK_DONE;
}
}
decodeVideo( w, in->data, in->size, in->sequence, pts, dts, in->s.frametype );
hb_buffer_close( &in );
- *buf_out = link_buf_list( pv );
+ *buf_out = hb_buffer_list_clear(&pv->list);
return HB_WORK_OK;
}
if (pv->context != NULL && pv->context->codec != NULL)
{
flushDelayQueue( pv );
- hb_buffer_t *buf = link_buf_list( pv );
- hb_buffer_close( &buf );
+ hb_buffer_list_close(&pv->list);
if ( pv->title->opaque_priv == NULL )
{
pv->video_codec_opened = 0;
out->s.duration = duration;
out->s.stop = duration + pv->pts_next;
pv->pts_next = duration + pv->pts_next;
- hb_list_add(pv->list, out);
+ hb_buffer_list_append(&pv->list, out);
}
}
}
wb->new_channel = 1;
wb->in_xds_mode = 0;
- wb->hb_buffer = NULL;
- wb->hb_last_buffer = NULL;
+ hb_buffer_list_clear(&wb->list);
wb->last_pts = 0;
return 0;
}
if( wb->subline ) {
free(wb->subline);
}
-
- if( wb->hb_buffer ) {
- hb_buffer_close( &wb->hb_buffer );
- }
+ hb_buffer_list_close(&wb->list);
}
sprintf((char*)buffer->data, "%d,,Default,,0,0,0,,", ++wb->line);
len = strlen((char*)buffer->data);
memcpy(buffer->data + len, wb->enc_buffer, wb->enc_buffer_used);
- if (wb->hb_last_buffer)
- {
- wb->hb_last_buffer->next = buffer;
- }
- else
- {
- wb->hb_buffer = buffer;
- }
- wb->hb_last_buffer = buffer;
+ hb_buffer_list_append(&wb->list, buffer);
wrote_something=1;
wb->clear_sub_needed = 1;
}
buffer->s.start = ms_start;
buffer->s.stop = ms_start;
buffer->data[0] = 0;
- if (wb->hb_last_buffer != NULL)
- {
- wb->hb_last_buffer->next = buffer;
- }
- else
- {
- wb->hb_buffer = buffer;
- }
- wb->hb_last_buffer = buffer;
+ hb_buffer_list_append(&wb->list, buffer);
wb->clear_sub_needed = 0;
}
if (debug_608)
/*
* Grab any pending buffer and output them with the EOF on the end
*/
- if (pv->cc608->hb_last_buffer) {
- pv->cc608->hb_last_buffer->next = in;
- *buf_out = pv->cc608->hb_buffer;
- *buf_in = NULL;
- pv->cc608->hb_buffer = NULL;
- pv->cc608->hb_last_buffer = NULL;
- } else {
- *buf_out = in;
- *buf_in = NULL;
- }
+ *buf_in = NULL;
+ hb_buffer_list_append(&pv->cc608->list, in);
+ *buf_out = hb_buffer_list_clear(&pv->cc608->list);
return HB_WORK_DONE;
}
/*
* If there is one waiting then pass it on
*/
- *buf_out = pv->cc608->hb_buffer;
-
- pv->cc608->hb_buffer = NULL;
- pv->cc608->hb_last_buffer = NULL;
-
+ *buf_out = hb_buffer_list_clear(&pv->cc608->list);
return HB_WORK_OK;
}
int new_sentence;
int new_channel;
int in_xds_mode;
+ hb_buffer_list_t list;
hb_buffer_t *hb_buffer;
hb_buffer_t *hb_last_buffer;
uint64_t last_pts;
hb_work_private_t * pv = w->private_data;
hb_buffer_t *in = *buf_in;
hb_buffer_t *buf = NULL;
+ hb_buffer_list_t list;
+ hb_buffer_list_clear(&list);
if (in->s.flags & HB_BUF_FLAG_EOF)
{
/* EOF on input stream - send it downstream & say that we're done */
pv->sequence = in->sequence;
- /* if we have a frame to finish, add enough data from this buf to finish it */
- if ( pv->size )
+ // if we have a frame to finish, add enough data from this buf
+ // to finish it
+ if (pv->size)
{
- memcpy( pv->frame + pv->pos, in->data + 6, pv->size - pv->pos );
+ memcpy(pv->frame + pv->pos, in->data + 6, pv->size - pv->pos);
buf = Decode( w );
+ hb_buffer_list_append(&list, buf);
}
- *buf_out = buf;
/* save the (rest of) data from this buf in our frame buffer */
lpcmInfo( w, in );
int amt = in->size - off;
pv->pos = amt;
memcpy( pv->frame, in->data + off, amt );
- if ( amt >= pv->size )
+ if (amt >= pv->size)
{
- if ( buf )
- {
- buf->next = Decode( w );
- }
- else
- {
- *buf_out = Decode( w );
- }
+ buf = Decode( w );
+ hb_buffer_list_append(&list, buf);
pv->size = 0;
}
+
+ *buf_out = hb_buffer_list_clear(&list);
return HB_WORK_OK;
}
{
hb_filter_private_t * pv = filter->private_data;
hb_buffer_t * in = *buf_in;
- hb_buffer_t * last = NULL, * out = NULL;
+ hb_buffer_list_t list;
+ hb_buffer_list_clear(&list);
if (in->s.flags & HB_BUF_FLAG_EOF)
{
*buf_out = in;
pv->is_combed == 0 ||
frame == num_frames - 1)
{
- if ( out == NULL )
- {
- last = out = o_buf[idx];
- }
- else
- {
- last->next = o_buf[idx];
- last = last->next;
- }
- last->next = NULL;
+ /* Copy buffered settings to output buffer settings */
+ o_buf[idx]->s = pv->ref[1]->s;
+
+ o_buf[idx]->next = NULL;
+ hb_buffer_list_append(&list, o_buf[idx]);
// Indicate that buffer was consumed
o_buf[idx] = NULL;
- /* Copy buffered settings to output buffer settings */
- last->s = pv->ref[1]->s;
idx ^= 1;
if ((pv->mode & MODE_MASK) && pv->spatial_metric >= 0 )
((pv->mode & MODE_MASK) && (pv->mode & MODE_GAMMA)) ||
pv->is_combed)
{
- apply_mask(pv, last);
+ apply_mask(pv, hb_buffer_list_tail(&list));
}
}
}
the duration of the saved timestamps. */
if ((pv->mode & MODE_BOB) && pv->is_combed)
{
- out->s.stop -= (out->s.stop - out->s.start) / 2LL;
- last->s.start = out->s.stop;
- last->s.new_chap = 0;
+ hb_buffer_t *first = hb_buffer_list_head(&list);
+ hb_buffer_t *second = hb_buffer_list_tail(&list);
+ first->s.stop -= (first->s.stop - first->s.start) / 2LL;
+ second->s.start = first->s.stop;
+ second->s.new_chap = 0;
}
- *buf_out = out;
-
+ *buf_out = hb_buffer_list_clear(&list);
return HB_FILTER_OK;
}
// packet until we have processed several packets. So we cache
// all the packets we see until libav returns a subtitle with
// the information we need.
- hb_buffer_t * list_pass_buffer;
- hb_buffer_t * last_pass_buffer;
+ hb_buffer_list_t list_pass;
// It is possible for multiple subtitles to be enncapsulated in
// one packet. This won't happen for PGS subs, but may for other
// types of subtitles. Since I plan to generalize this code to handle
// other than PGS, we will need to keep a list of all subtitles seen
// while parsing an input packet.
- hb_buffer_t * list_buffer;
- hb_buffer_t * last_buffer;
+ hb_buffer_list_t list;
// XXX: we may occasionally see subtitles with broken timestamps
// while this should really get fixed elsewhere,
// dropping subtitles should be avoided as much as possible
pv = calloc( 1, sizeof( hb_work_private_t ) );
w->private_data = pv;
+ hb_buffer_list_clear(&pv->list);
+ hb_buffer_list_clear(&pv->list_pass);
pv->discard_subtitle = 1;
pv->seen_forced_sub = 0;
pv->last_pts = 0;
if (in->s.flags & HB_BUF_FLAG_EOF)
{
/* EOF on input stream - send it downstream & say that we're done */
- if ( pv->list_buffer == NULL )
- {
- pv->list_buffer = pv->last_buffer = in;
- }
- else
- {
- pv->last_buffer->next = in;
- }
*buf_in = NULL;
- *buf_out = pv->list_buffer;
- pv->list_buffer = NULL;
+ hb_buffer_list_append(&pv->list, in);
+ *buf_out = hb_buffer_list_clear(&pv->list);
return HB_WORK_DONE;
}
{
// Append to buffer list. It will be sent to fifo after we determine
// if this is a packet we need.
- if ( pv->list_pass_buffer == NULL )
- {
- pv->list_pass_buffer = pv->last_pass_buffer = in;
- }
- else
- {
- pv->last_pass_buffer->next = in;
- pv->last_pass_buffer = in;
- }
+ hb_buffer_list_append(&pv->list_pass, in);
+
// We are keeping the buffer, so prevent the filter loop from
// deleting it.
*buf_in = NULL;
//
// If passthru, create an empty subtitle.
// Also, flag an empty subtitle for subtitle RENDER.
- make_empty_pgs(pv->list_pass_buffer);
+ make_empty_pgs(hb_buffer_list_head(&pv->list_pass));
clear_subtitle = 1;
}
// is the subtitle forced?
if ( w->subtitle->config.dest == PASSTHRUSUB &&
hb_subtitle_can_pass( PGSSUB, pv->job->mux ) )
{
- /* PGS subtitles are spread across multiple packets (1 per segment).
- * In the MKV container, all segments are found in the same packet
- * (this is expected by some devices, such as the WD TV Live).
- * So if there are multiple packets, merge them. */
- if (pv->list_pass_buffer->next == NULL)
+ /* PGS subtitles are spread across multiple packets,
+ * 1 per segment.
+ *
+ * In the MKV container, all segments are found in the same
+ * packet (this is expected by some devices, such as the
+ * WD TV Live). So if there are multiple packets,
+ * merge them. */
+ if (hb_buffer_list_count(&pv->list_pass) == 1)
{
// packets already merged (e.g. MKV sources)
- out = pv->list_pass_buffer;
- pv->list_pass_buffer = NULL;
+ out = hb_buffer_list_clear(&pv->list_pass);
}
else
{
uint8_t * data;
hb_buffer_t * b;
- b = pv->list_pass_buffer;
+ b = hb_buffer_list_head(&pv->list_pass);
while (b != NULL)
{
size += b->size;
out = hb_buffer_init( size );
data = out->data;
- b = pv->list_pass_buffer;
+ b = hb_buffer_list_head(&pv->list_pass);
while (b != NULL)
{
- memcpy( data, b->data, b->size );
+ memcpy(data, b->data, b->size);
data += b->size;
b = b->next;
}
- hb_buffer_close( &pv->list_pass_buffer );
+ hb_buffer_list_close(&pv->list_pass);
out->s = in->s;
out->sequence = in->sequence;
alpha += out->plane[3].stride;
}
}
- if ( pv->list_buffer == NULL )
- {
- pv->list_buffer = pv->last_buffer = out;
- }
- else
- {
- pv->last_buffer->next = out;
- pv->last_buffer = out;
- }
+ hb_buffer_list_append(&pv->list, out);
out = NULL;
}
else
out->f.height = 0;
}
}
- if ( pv->list_buffer == NULL )
- {
- pv->list_buffer = pv->last_buffer = out;
- }
- else
- {
- pv->last_buffer->next = out;
- }
- while (pv->last_buffer && pv->last_buffer->next)
- {
- pv->last_buffer = pv->last_buffer->next;
- }
+ hb_buffer_list_append(&pv->list, out);
}
- else if ( has_subtitle )
+ else if (has_subtitle)
{
- hb_buffer_close( &pv->list_pass_buffer );
- pv->list_pass_buffer = NULL;
+ hb_buffer_list_close(&pv->list_pass);
}
- if ( has_subtitle )
+ if (has_subtitle)
{
avsubtitle_free(&subtitle);
}
} while (avp.size > 0);
- *buf_out = pv->list_buffer;
- pv->list_buffer = NULL;
+ *buf_out = hb_buffer_list_clear(&pv->list);
return HB_WORK_OK;
}
hb_buffer_realloc(in, ++in->size);
in->data[in->size - 1] = '\0';
- hb_buffer_t *out_list = NULL;
- hb_buffer_t **nextPtr = &out_list;
+ hb_buffer_list_t list;
+ hb_buffer_t *buf;
+ hb_buffer_list_clear(&list);
const char *EOL = "\r\n";
char *curLine, *curLine_parserData;
for ( curLine = strtok_r( (char *) in->data, EOL, &curLine_parserData );
continue;
// Decode an individual SSA line
- hb_buffer_t *out;
- out = ssa_decode_line_to_mkv_ssa(w, (uint8_t *)curLine, strlen(curLine), in->sequence);
- if ( out == NULL )
- continue;
-
- // Append 'out' to 'out_list'
- *nextPtr = out;
- nextPtr = &out->next;
+ buf = ssa_decode_line_to_mkv_ssa(w, (uint8_t *)curLine,
+ strlen(curLine), in->sequence);
+ hb_buffer_list_append(&list, buf);
}
// For point-to-point encoding, when the start time of the stream
// such that first output packet's display time aligns with the
// input packet's display time. This should give the correct time
// when point-to-point encoding is in effect.
- if (out_list && out_list->s.start > in->s.start)
+ buf = hb_buffer_list_head(&list);
+ if (buf && buf->s.start > in->s.start)
{
- int64_t slip = out_list->s.start - in->s.start;
- hb_buffer_t *out;
-
- out = out_list;
- while (out)
+ int64_t slip = buf->s.start - in->s.start;
+ while (buf != NULL)
{
- out->s.start -= slip;
- out->s.stop -= slip;
- out = out->next;
+ buf->s.start -= slip;
+ buf->s.stop -= slip;
+ buf = buf->next;
}
}
- return out_list;
+ return hb_buffer_list_clear(&list);
}
/*
int ii;
hb_buffer_t *dst, *src;
+ hb_buffer_list_t list;
+ hb_buffer_list_clear(&list);
if (in != NULL)
{
dst = hb_frame_buffer_init(in->f.fmt, in->f.width, in->f.height);
taskset_cycle( &pv->deint_taskset );
}
- hb_buffer_t *first = NULL, *last = NULL;
for (ii = 0; ii < pv->deint_nsegs; ii++)
{
src = pv->deint_arguments[ii].src;
dst = pv->deint_arguments[ii].dst;
pv->deint_arguments[ii].src = NULL;
pv->deint_arguments[ii].dst = NULL;
- if (first == NULL)
- {
- first = dst;
- }
- if (last != NULL)
- {
- last->next = dst;
- }
- last = dst;
+ hb_buffer_list_append(&list, dst);
dst->s = src->s;
hb_buffer_close(&src);
}
- if (in == NULL)
- {
- // Flushing final buffers. Append EOS marker buffer.
- dst = hb_buffer_eof_init();
- if (first == NULL)
- {
- first = dst;
- }
- else
- {
- last->next = dst;
- }
- }
pv->deint_nsegs = 0;
- return first;
+ return hb_buffer_list_clear(&list);
}
static int hb_deinterlace_init( hb_filter_object_t * filter,
{
hb_filter_private_t * pv = filter->private_data;
hb_buffer_t * in = *buf_in;
- hb_buffer_t * last = NULL, * out = NULL;
+ hb_buffer_list_t list;
+ *buf_in = NULL;
+ hb_buffer_list_clear(&list);
if (in->s.flags & HB_BUF_FLAG_EOF)
{
- *buf_out = in;
- *buf_in = NULL;
if( !( pv->yadif_mode & MODE_YADIF_ENABLE ) )
{
// Flush final frames
- *buf_out = deint_fast(pv, NULL);
+ hb_buffer_list_append(&list, deint_fast(pv, NULL));
}
+ hb_buffer_list_append(&list, in);
+
+ *buf_out = hb_buffer_list_clear(&list);
return HB_FILTER_DONE;
}
- /* Use libavcodec deinterlace if yadif_mode < 0 */
+ /* Use fast deinterlace if yadif_mode < 0 */
if( !( pv->yadif_mode & MODE_YADIF_ENABLE ) )
{
- *buf_in = NULL;
*buf_out = deint_fast(pv, in);
return HB_FILTER_OK;
}
/* Store current frame in yadif cache */
- *buf_in = NULL;
yadif_store_ref(pv, in);
// yadif requires 3 buffers, prev, cur, and next. For the first
if (o_buf[idx] == NULL)
{
- o_buf[idx] = hb_frame_buffer_init(in->f.fmt, in->f.width, in->f.height);
+ o_buf[idx] = hb_frame_buffer_init(in->f.fmt,
+ in->f.width, in->f.height);
}
yadif_filter(pv, o_buf[idx], parity, tff);
// else, add only final frame
if (( pv->yadif_mode & MODE_YADIF_BOB ) || frame == num_frames - 1)
{
- if ( out == NULL )
- {
- last = out = o_buf[idx];
- }
- else
- {
- last->next = o_buf[idx];
- last = last->next;
- }
- last->next = NULL;
+ /* Copy buffered settings to output buffer settings */
+ o_buf[idx]->s = pv->yadif_ref[1]->s;
+ o_buf[idx]->next = NULL;
+ hb_buffer_list_append(&list, o_buf[idx]);
// Indicate that buffer was consumed
o_buf[idx] = NULL;
-
- /* Copy buffered settings to output buffer settings */
- last->s = pv->yadif_ref[1]->s;
idx ^= 1;
}
}
* timestamps. */
if (pv->yadif_mode & MODE_YADIF_BOB)
{
- out->s.stop -= (out->s.stop - out->s.start) / 2LL;
- last->s.start = out->s.stop;
- last->s.new_chap = 0;
+ hb_buffer_t *first = hb_buffer_list_head(&list);
+ hb_buffer_t *second = hb_buffer_list_tail(&list);
+ first->s.stop -= (first->s.stop - first->s.start) / 2LL;
+ second->s.start = first->s.stop;
+ second->s.new_chap = 0;
}
- *buf_out = out;
-
+ *buf_out = hb_buffer_list_clear(&list);
return HB_FILTER_OK;
}
/* Basic MPEG demuxer */
-void hb_demux_dvd_ps( hb_buffer_t * buf, hb_list_t * list_es, hb_psdemux_t* state )
+void hb_demux_dvd_ps( hb_buffer_t * buf, hb_buffer_list_t * list_es, hb_psdemux_t* state )
{
hb_buffer_t * buf_es;
int pos = 0;
}
memcpy( buf_es->data, d + pos, pes_packet_end - pos );
- hb_list_add( list_es, buf_es );
+ hb_buffer_list_append(list_es, buf_es);
pos = pes_packet_end;
}
// stripped off and buf has all the info gleaned from them: id is set,
// start contains the pts (if any), renderOffset contains the dts (if any)
// and stop contains the pcr (if it changed).
-void hb_demux_mpeg(hb_buffer_t *buf, hb_list_t *list_es,
+void hb_demux_mpeg(hb_buffer_t *buf, hb_buffer_list_t *list_es,
hb_psdemux_t *state, int tolerance)
{
while ( buf )
hb_buffer_t *tmp = buf->next;
buf->next = NULL;
- hb_list_add( list_es, buf );
+ hb_buffer_list_append(list_es, buf);
buf = tmp;
}
}
-void hb_demux_ts(hb_buffer_t *buf, hb_list_t *list_es, hb_psdemux_t *state)
+void hb_demux_ts(hb_buffer_t *buf, hb_buffer_list_t *list_es, hb_psdemux_t *state)
{
// Distance between PCRs in TS is up to 100ms, but we have seen
// streams that exceed this, so allow up to 300ms.
hb_demux_mpeg(buf, list_es, state, 300);
}
-void hb_demux_ps(hb_buffer_t *buf, hb_list_t *list_es, hb_psdemux_t *state)
+void hb_demux_ps(hb_buffer_t *buf, hb_buffer_list_t *list_es, hb_psdemux_t *state)
{
// Distance between SCRs in PS is up to 700ms
hb_demux_mpeg(buf, list_es, state, 700);
// "null" demuxer (makes a copy of input buf & returns it in list)
// used when the reader for some format includes its own demuxer.
// for example, ffmpeg.
-void hb_demux_null( hb_buffer_t * buf, hb_list_t * list_es, hb_psdemux_t* state )
+void hb_demux_null( hb_buffer_t * buf, hb_buffer_list_t * list_es, hb_psdemux_t* state )
{
while ( buf )
{
hb_buffer_t *tmp = buf->next;
buf->next = NULL;
- hb_list_add( list_es, buf );
+ hb_buffer_list_append(list_es, buf);
buf = tmp;
}
}
struct hb_work_private_s
{
- hb_job_t *job;
- uint32_t frames_in;
- uint32_t frames_out;
- int64_t last_start;
+ hb_job_t * job;
+ uint32_t frames_in;
+ uint32_t frames_out;
+ int64_t last_start;
- hb_qsv_param_t param;
- av_qsv_space enc_space;
- hb_qsv_info_t *qsv_info;
+ hb_qsv_param_t param;
+ av_qsv_space enc_space;
+ hb_qsv_info_t * qsv_info;
- hb_list_t *delayed_chapters;
- int64_t next_chapter_pts;
+ hb_list_t * delayed_chapters;
+ int64_t next_chapter_pts;
#define BFRM_DELAY_MAX 16
- uint32_t *init_delay;
- int bfrm_delay;
- int64_t init_pts[BFRM_DELAY_MAX + 1];
- hb_list_t *list_dts;
+ int * init_delay;
+ int bfrm_delay;
+ int64_t init_pts[BFRM_DELAY_MAX + 1];
+ hb_list_t * list_dts;
- int64_t frame_duration[FRAME_INFO_SIZE];
+ int64_t frame_duration[FRAME_INFO_SIZE];
- int async_depth;
- int max_async_depth;
+ int async_depth;
+ int max_async_depth;
// if encode-only, system memory used
- int is_sys_mem;
- mfxSession mfx_session;
- struct SwsContext *sws_context_to_nv12;
+ int is_sys_mem;
+ mfxSession mfx_session;
+ struct SwsContext * sws_context_to_nv12;
// whether to expect input from VPP or from QSV decode
- int is_vpp_present;
+ int is_vpp_present;
// whether the encoder is initialized
- int init_done;
+ int init_done;
- hb_list_t *delayed_processing;
- hb_list_t *encoded_frames;
+ hb_list_t * delayed_processing;
+ hb_buffer_list_t encoded_frames;
- hb_list_t *loaded_plugins;
+ hb_list_t * loaded_plugins;
};
// used in delayed_chapters list
pv->is_sys_mem = hb_qsv_decode_is_enabled(job) == 0;
pv->qsv_info = hb_qsv_info_get(job->vcodec);
pv->delayed_processing = hb_list_init();
- pv->encoded_frames = hb_list_init();
pv->last_start = INT64_MIN;
+ hb_buffer_list_clear(&pv->encoded_frames);
pv->next_chapter_pts = AV_NOPTS_VALUE;
pv->delayed_chapters = hb_list_init();
}
else
{
- hb_error("encqsvInit: invalid rate control (%d, %d)",
+ hb_error("encqsvInit: invalid rate control (%f, %d)",
job->vquality, job->vbitrate);
return -1;
}
}
hb_list_close(&pv->delayed_chapters);
}
- if (pv->encoded_frames != NULL)
- {
- hb_buffer_t *item;
- while ((item = hb_list_item(pv->encoded_frames, 0)) != NULL)
- {
- hb_list_rem(pv->encoded_frames, item);
- hb_buffer_close(&item);
- }
- hb_list_close(&pv->encoded_frames);
- }
+ hb_buffer_list_close(&pv->encoded_frames);
}
free(pv);
}
/* This can come in handy */
- hb_deep_log(2, "compute_init_delay: %"PRId64" (%d frames)", pv->init_delay[0], pv->bfrm_delay);
+ hb_deep_log(2, "compute_init_delay: %d (%d frames)", pv->init_delay[0], pv->bfrm_delay);
/* The delay only needs to be set once. */
pv->init_delay = NULL;
restore_chapter(pv, buf);
}
- hb_list_add(pv->encoded_frames, buf);
+ hb_buffer_list_append(&pv->encoded_frames, buf);
pv->frames_out++;
return;
return 0;
}
-static hb_buffer_t* link_buffer_list(hb_list_t *list)
-{
- hb_buffer_t *buf, *prev = NULL, *out = NULL;
-
- while ((buf = hb_list_item(list, 0)) != NULL)
- {
- hb_list_rem(list, buf);
-
- if (prev == NULL)
- {
- prev = out = buf;
- }
- else
- {
- prev->next = buf;
- prev = buf;
- }
- }
-
- return out;
-}
-
int encqsvWork(hb_work_object_t *w, hb_buffer_t **buf_in, hb_buffer_t **buf_out)
{
hb_work_private_t *pv = w->private_data;
if (in->s.flags & HB_BUF_FLAG_EOF)
{
qsv_enc_work(pv, NULL, NULL);
- hb_list_add(pv->encoded_frames, in);
- *buf_out = link_buffer_list(pv->encoded_frames);
+ hb_buffer_list_append(&pv->encoded_frames, in);
+ *buf_out = hb_buffer_list_clear(&pv->encoded_frames);
*buf_in = NULL; // don't let 'work_loop' close this buffer
return HB_WORK_DONE;
}
goto fail;
}
- *buf_out = link_buffer_list(pv->encoded_frames);
+ *buf_out = hb_buffer_list_clear(&pv->encoded_frames);
return HB_WORK_OK;
fail:
int frameno_in;
int frameno_out;
- hb_buffer_t * delay_head;
- hb_buffer_t * delay_tail;
+ hb_buffer_list_t delay_list;
int64_t dts_delay;
w->private_data = pv;
pv->job = job;
+ hb_buffer_list_clear(&pv->delay_list);
+
int clock_min, clock_max, clock;
hb_video_framerate_get_limits(&clock_min, &clock_max, &clock);
// This is similar to how x264 generates DTS
static hb_buffer_t * process_delay_list( hb_work_private_t * pv, hb_buffer_t * buf )
{
- if ( pv->job->areBframes )
+ if (pv->job->areBframes)
{
// Has dts_delay been set yet?
- if ( pv->frameno_in <= pv->job->areBframes )
+ hb_buffer_list_append(&pv->delay_list, buf);
+ if (pv->frameno_in <= pv->job->areBframes)
{
// dts_delay not yet set. queue up buffers till it is set.
- if ( pv->delay_tail == NULL )
- {
- pv->delay_head = pv->delay_tail = buf;
- }
- else
- {
- pv->delay_tail->next = buf;
- pv->delay_tail = buf;
- }
return NULL;
}
// We have dts_delay. Apply it to any queued buffers renderOffset
// and return all queued buffers.
- if ( pv->delay_tail == NULL && buf != NULL )
+ buf = hb_buffer_list_head(&pv->delay_list);
+ while (buf != NULL)
{
// Use the cached frame info to get the start time of Nth frame
// Note that start Nth frame != start time this buffer since the
}
else
{
- buf->s.renderOffset =
- get_frame_start(pv, pv->frameno_out - pv->job->areBframes);
- }
- pv->frameno_out++;
- return buf;
- }
- else
- {
- pv->delay_tail->next = buf;
- buf = pv->delay_head;
- while ( buf )
- {
- // Use the cached frame info to get the start time of Nth frame
- // Note that start Nth frame != start time this buffer since the
- // output buffers have rearranged start times.
- if (pv->frameno_out < pv->job->areBframes)
- {
- int64_t start = get_frame_start( pv, pv->frameno_out );
- buf->s.renderOffset = start - pv->dts_delay;
- }
- else
- {
- buf->s.renderOffset = get_frame_start(pv,
+ buf->s.renderOffset = get_frame_start(pv,
pv->frameno_out - pv->job->areBframes);
- }
- buf = buf->next;
- pv->frameno_out++;
}
- buf = pv->delay_head;
- pv->delay_head = pv->delay_tail = NULL;
- return buf;
+ buf = buf->next;
+ pv->frameno_out++;
}
+ buf = hb_buffer_list_clear(&pv->delay_list);
+ return buf;
}
- else if ( buf )
+ else if (buf != NULL)
{
buf->s.renderOffset = buf->s.start;
return buf;
hb_job_t * job = pv->job;
AVFrame * frame;
hb_buffer_t * in = *buf_in, * buf;
+ hb_buffer_list_t list;
char final_flushing_call = !!(in->s.flags & HB_BUF_FLAG_EOF);
- if ( final_flushing_call )
+
+ hb_buffer_list_clear(&list);
+ if (final_flushing_call)
{
- //make a flushing call to encode for codecs that can encode out of order
/* EOF on input - send it downstream & say we're done */
- *buf_in = NULL;
+ // make a flushing call to encode for codecs that can encode
+ // out of order
frame = NULL;
}
else
AVPacket pkt;
int got_packet;
char still_flushing = final_flushing_call;
- hb_buffer_t* buf_head = NULL;
- hb_buffer_t* buf_last = NULL;
-
do
{
av_init_packet(&pkt);
buf->s.frametype = convert_pict_type( pv->context->coded_frame->pict_type, pkt.flags & AV_PKT_FLAG_KEY, &buf->s.flags );
buf = process_delay_list( pv, buf );
- if (buf_head == NULL)
- {
- buf_head = buf;
- }
- else
- {
- buf_last->next = buf;
- }
- buf_last = buf;
+ hb_buffer_list_append(&list, buf);
}
/* Write stats */
if (job->pass_id == HB_PASS_ENCODE_1ST &&
fprintf( pv->file, "%s", pv->context->stats_out );
}
} while (still_flushing);
- if (buf_last != NULL && final_flushing_call)
- {
- buf_last->next = in;
- buf = buf_head;
- }
- else if (final_flushing_call)
+
+ if (final_flushing_call)
{
- buf = in;
+ *buf_in = NULL;
+ hb_buffer_list_append(&list, in);
}
}
else
{
- buf = NULL;
-
hb_error( "encavcodec: codec context has uninitialized codec; skipping frame" );
}
av_frame_free( &frame );
- *buf_out = buf;
-
+ *buf_out = hb_buffer_list_clear(&list);
return final_flushing_call? HB_WORK_DONE : HB_WORK_OK;
}
static hb_buffer_t * Flush( hb_work_object_t * w )
{
- hb_buffer_t *first, *buf, *last;
+ hb_buffer_list_t list;
+ hb_buffer_t *buf;
- first = last = buf = Encode( w );
- while( buf )
+ hb_buffer_list_clear(&list);
+ buf = Encode( w );
+ while (buf != NULL)
{
- last = buf;
- buf->next = Encode( w );
- buf = buf->next;
+ hb_buffer_list_append(&list, buf);
+ buf = Encode( w );
}
- if( last )
- {
- last->next = hb_buffer_eof_init();
- }
- else
- {
- first = hb_buffer_eof_init();
- }
-
- return first;
+ hb_buffer_list_append(&list, hb_buffer_eof_init());
+ return hb_buffer_list_clear(&list);
}
/***********************************************************************
{
hb_work_private_t * pv = w->private_data;
hb_buffer_t * in = *buf_in, * buf;
+ hb_buffer_list_t list;
if (in->s.flags & HB_BUF_FLAG_EOF)
{
hb_list_add( pv->list, in );
*buf_in = NULL;
- *buf_out = buf = Encode( w );
-
- while ( buf )
+ hb_buffer_list_clear(&list);
+ buf = Encode( w );
+ while (buf != NULL)
{
- buf->next = Encode( w );
- buf = buf->next;
+ hb_buffer_list_append(&list, buf);
+ buf = Encode( w );
}
+ *buf_out = hb_buffer_list_clear(&list);
return HB_WORK_OK;
}
hb_audio_t * audio = w->audio;
hb_buffer_t * in = *buf_in;
hb_buffer_t * buf;
+ hb_buffer_list_t list;
+ *buf_in = NULL;
+ hb_buffer_list_clear(&list);
if (in->s.flags & HB_BUF_FLAG_EOF)
{
/* EOF on input - send it downstream & say we're done */
{
hb_buffer_close( &buf );
}
-
- // Add the flushed data
- *buf_out = buf;
-
+ hb_buffer_list_append(&list, buf);
// Add the eof
- if ( buf )
- {
- buf->next = in;
- }
- else
- {
- *buf_out = in;
- }
+ hb_buffer_list_append(&list, in);
- *buf_in = NULL;
+ *buf_out = hb_buffer_list_clear(&list);
return HB_WORK_DONE;
}
- hb_list_add( pv->list, *buf_in );
- *buf_in = NULL;
-
- *buf_out = buf = Encode( w );
+ hb_list_add(pv->list, in);
- while( buf )
+ buf = Encode( w );
+ while (buf)
{
- buf->next = Encode( w );
- buf = buf->next;
+ hb_buffer_list_append(&list, buf);
+ buf = Encode( w );
}
+ *buf_out = hb_buffer_list_clear(&list);
return HB_WORK_OK;
}
hb_work_private_t * pv = w->private_data;
hb_buffer_t * in = *buf_in;
hb_buffer_t * buf;
+ hb_buffer_list_t list;
*buf_in = NULL;
+ hb_buffer_list_clear(&list);
if (in->s.flags & HB_BUF_FLAG_EOF)
{
/* EOF on input - send it downstream & say we're done */
*buf_out = in;
- return HB_WORK_DONE;
+ return HB_WORK_DONE;
}
- hb_list_add( pv->list, in );
+ hb_list_add(pv->list, in);
- *buf_out = buf = Encode( w );
- while( buf )
+ buf = Encode( w );
+ while (buf)
{
- buf->next = Encode( w );
- buf = buf->next;
+ hb_buffer_list_append(&list, buf);
+ buf = Encode( w );
}
+ *buf_out = hb_buffer_list_clear(&list);
return HB_WORK_OK;
}
x264_picture_t pic_in;
uint8_t * grey_data;
- uint32_t frames_in;
- uint32_t frames_out;
int64_t last_stop; // Debugging - stop time of previous input frame
hb_list_t *delayed_chapters;
x264_picture_t pic_out;
int i_nal;
x264_nal_t *nal;
- hb_buffer_t *last_buf = NULL;
+ hb_buffer_list_t list;
+ hb_buffer_list_clear(&list);
+
+ // flush delayed frames
while ( x264_encoder_delayed_frames( pv->x264 ) )
{
x264_encoder_encode( pv->x264, &nal, &i_nal, NULL, &pic_out );
break;
hb_buffer_t *buf = nal_encode( w, &pic_out, i_nal, nal );
- if ( buf )
- {
- ++pv->frames_out;
- if ( last_buf == NULL )
- *buf_out = buf;
- else
- last_buf->next = buf;
- last_buf = buf;
- }
+ hb_buffer_list_append(&list, buf);
}
- // Flushed everything - add the eof to the end of the chain.
- if ( last_buf == NULL )
- *buf_out = in;
- else
- last_buf->next = in;
+ // add the EOF to the end of the chain
+ hb_buffer_list_append(&list, in);
+ *buf_out = hb_buffer_list_clear(&list);
*buf_in = NULL;
return HB_WORK_DONE;
}
// Not EOF - encode the packet & wrap it in a NAL
- ++pv->frames_in;
- ++pv->frames_out;
*buf_out = x264_encode( w, in );
return HB_WORK_OK;
}
uint32_t nnal;
x265_nal *nal;
x265_picture pic_out;
- hb_buffer_t *last_buf = NULL;
+ hb_buffer_list_t list;
+
+ hb_buffer_list_clear(&list);
// flush delayed frames
while (x265_encoder_encode(pv->x265, &nal, &nnal, NULL, &pic_out) > 0)
{
hb_buffer_t *buf = nal_encode(w, &pic_out, nal, nnal);
- if (buf != NULL)
- {
- if (last_buf == NULL)
- {
- *buf_out = buf;
- }
- else
- {
- last_buf->next = buf;
- }
- last_buf = buf;
- }
+ hb_buffer_list_append(&list, buf);
}
-
// add the EOF to the end of the chain
- if (last_buf == NULL)
- {
- *buf_out = in;
- }
- else
- {
- last_buf->next = in;
- }
+ hb_buffer_list_append(&list, in);
+ *buf_out = hb_buffer_list_clear(&list);
*buf_in = NULL;
return HB_WORK_DONE;
}
int new_chap;
} hb_psdemux_t;
-typedef void (*hb_muxer_t)(hb_buffer_t *, hb_list_t *, hb_psdemux_t*);
+typedef void (*hb_muxer_t)(hb_buffer_t *, hb_buffer_list_t *, hb_psdemux_t*);
-void hb_demux_ps( hb_buffer_t * ps_buf, hb_list_t * es_list, hb_psdemux_t * );
-void hb_demux_ts( hb_buffer_t * ps_buf, hb_list_t * es_list, hb_psdemux_t * );
-void hb_demux_null( hb_buffer_t * ps_buf, hb_list_t * es_list, hb_psdemux_t * );
+void hb_demux_ps(hb_buffer_t * ps_buf, hb_buffer_list_t * es_list, hb_psdemux_t *);
+void hb_demux_ts(hb_buffer_t * ps_buf, hb_buffer_list_t * es_list, hb_psdemux_t *);
+void hb_demux_null(hb_buffer_t * ps_buf, hb_buffer_list_t * es_list, hb_psdemux_t *);
extern const hb_muxer_t hb_demux[];
pv->next_frame -= pv->thread_count;
// Collect results from taskset
- hb_buffer_t *last = NULL, *out = NULL;
+ hb_buffer_list_t list;
+ hb_buffer_list_clear(&list);
for (int t = 0; t < pv->thread_count; t++)
{
- if (out == NULL)
- {
- out = last = pv->thread_data[t]->out;
- }
- else
- {
- last->next = pv->thread_data[t]->out;
- last = pv->thread_data[t]->out;
- }
+ hb_buffer_list_append(&list, pv->thread_data[t]->out);
}
- return out;
+ return hb_buffer_list_clear(&list);
}
static hb_buffer_t * nlmeans_filter_flush(hb_filter_private_t *pv)
{
- hb_buffer_t *out = NULL, *last = NULL;
+ hb_buffer_list_t list;
+ hb_buffer_list_clear(&list);
for (int f = 0; f < pv->next_frame; f++)
{
Frame *frame = &pv->frame[f];
pv->diff_max[c]);
}
buf->s = frame->s;
- if (out == NULL)
- {
- out = last = buf;
- }
- else
- {
- last->next = buf;
- last = buf;
- }
+ hb_buffer_list_append(&list, buf);
}
- return out;
+ return hb_buffer_list_clear(&list);
}
static int nlmeans_work(hb_filter_object_t *filter,
if (in->s.flags & HB_BUF_FLAG_EOF)
{
- hb_buffer_t *last;
+ hb_buffer_list_t list;
+ hb_buffer_t *buf;
+
// Flush buffered frames
- last = *buf_out = nlmeans_filter_flush(pv);
+ buf = nlmeans_filter_flush(pv);
+ hb_buffer_list_set(&list, buf);
+
+ // And terminate the buffer list with a EOF buffer
+ hb_buffer_list_append(&list, in);
+ *buf_out = hb_buffer_list_clear(&list);
- // And terminate the buffer list with a null buffer
- if (last != NULL)
- {
- while (last->next != NULL)
- last = last->next;
- last->next = in;
- }
- else
- {
- *buf_out = in;
- }
*buf_in = NULL;
return HB_FILTER_DONE;
}
struct hb_filter_private_s
{
- hb_job_t *job;
- hb_list_t *list;
-
- int width_in;
- int height_in;
- int pix_fmt;
- int pix_fmt_out;
- int width_out;
- int height_out;
- int crop[4];
- int deinterlace;
- int is_frc_used;
+ hb_job_t * job;
+ hb_buffer_list_t list;
+
+ int width_in;
+ int height_in;
+ int pix_fmt;
+ int pix_fmt_out;
+ int width_out;
+ int height_out;
+ int crop[4];
+ int deinterlace;
+ int is_frc_used;
// set during init, used to configure input surfaces' "area of interest"
- mfxU16 CropX;
- mfxU16 CropY;
- mfxU16 CropH;
- mfxU16 CropW;
+ mfxU16 CropX;
+ mfxU16 CropY;
+ mfxU16 CropH;
+ mfxU16 CropW;
- av_qsv_space *vpp_space;
+ av_qsv_space * vpp_space;
// FRC param(s)
- mfxExtVPPFrameRateConversion frc_config;
+ mfxExtVPPFrameRateConversion frc_config;
};
static int hb_qsv_filter_init( hb_filter_object_t * filter,
filter->private_data = calloc( 1, sizeof(struct hb_filter_private_s) );
hb_filter_private_t * pv = filter->private_data;
- pv->list = hb_list_init();
+ hb_buffer_list_clear(&pv->list);
// list of init params provided at work.c:~700
pv->width_in = init->geometry.width;
pv->height_in = init->geometry.height;
// closing the commong stuff
av_qsv_context_clean(qsv);
}
- hb_list_close(&pv->list);
+ hb_buffer_list_close(&pv->list);
free( pv );
filter->private_data = NULL;
}
return HB_FILTER_OK;
}
- while(1){
+ while(1)
+ {
int ret = filter_init(qsv,pv);
if(ret >= 2)
av_qsv_sleep(1);
if (in->s.flags & HB_BUF_FLAG_EOF)
{
- while(1){
+ while(1)
+ {
sts = process_frame(in->qsv_details.qsv_atom, qsv, pv);
if(sts)
- hb_list_add(pv->list,in);
+ hb_buffer_list_append(&pv->list, in);
else
break;
}
- hb_list_add( pv->list, in );
- *buf_out = link_buf_list( pv );
+ hb_buffer_list_append(&pv->list, in);
+ *buf_out = hb_buffer_list_clear(&pv->list);
return HB_FILTER_DONE;
}
sts = process_frame(in->qsv_details.qsv_atom, qsv, pv);
- if(sts){
- hb_list_add(pv->list,in);
+ if(sts)
+ {
+ hb_buffer_list_append(&pv->list, in);
}
- if( hb_list_count(pv->list) ){
- *buf_out = hb_list_item(pv->list,0);
- out = *buf_out;
- if(pv->is_frc_used && out)
+ out = *buf_out = hb_buffer_list_rem_head(&pv->list);
+ if (pv->is_frc_used && out != NULL)
+ {
+ if (out->qsv_details.qsv_atom)
{
- if(out->qsv_details.qsv_atom){
- av_qsv_stage* stage = av_qsv_get_last_stage( out->qsv_details.qsv_atom );
- mfxFrameSurface1 *work_surface = stage->out.p_surface;
-
- av_qsv_wait_on_sync( qsv,stage );
-
- av_qsv_space *qsv_vpp = pv->vpp_space;
- int64_t duration = ((double)qsv_vpp->m_mfxVideoParam.vpp.Out.FrameRateExtD/(double)qsv_vpp->m_mfxVideoParam.vpp.Out.FrameRateExtN ) * 90000.;
- out->s.start = work_surface->Data.TimeStamp;
- out->s.stop = work_surface->Data.TimeStamp + duration;
- }
+ av_qsv_stage* stage;
+ mfxFrameSurface1 *work_surface;
+ int64_t duration;
+ av_qsv_space *qsv_vpp;
+
+ stage = av_qsv_get_last_stage(out->qsv_details.qsv_atom);
+ work_surface = stage->out.p_surface;
+
+ av_qsv_wait_on_sync( qsv,stage );
+
+ qsv_vpp = pv->vpp_space;
+ duration =
+ ((double)qsv_vpp->m_mfxVideoParam.vpp.Out.FrameRateExtD /
+ (double)qsv_vpp->m_mfxVideoParam.vpp.Out.FrameRateExtN ) *
+ 90000.;
+ out->s.start = work_surface->Data.TimeStamp;
+ out->s.stop = work_surface->Data.TimeStamp + duration;
}
- hb_list_rem(pv->list,*buf_out);
}
- else
- *buf_out = NULL;
return HB_FILTER_OK;
}
-// see devavcode.c
-hb_buffer_t *link_buf_list( hb_filter_private_t *pv )
-{
- hb_buffer_t *head = hb_list_item( pv->list, 0 );
-
- if ( head )
- {
- hb_list_rem( pv->list, head );
- hb_buffer_t *last = head, *buf;
- while ( ( buf = hb_list_item( pv->list, 0 ) ) != NULL )
- {
- hb_list_rem( pv->list, buf );
- last->next = buf;
- last = buf;
- }
- }
- return head;
-}
-
#endif // USE_QSV
#ifndef QSV_FILTER_H
#define QSV_FILTER_H
-hb_buffer_t *link_buf_list( hb_filter_private_t *pv );
void qsv_filter_close( av_qsv_context* qsv, AV_QSV_STAGE_TYPE vpp_type );
#endif // QSV_FILTER_H
#include "msdk/mfxplugin.h"
-extern hb_buffer_t *link_buf_list( hb_filter_private_t *pv );
-
struct qsv_filter_task_s;
typedef struct{
**********************************************************************/
void ReadLoop( void * _w )
{
- hb_work_object_t * w = _w;
+ hb_work_object_t * w = _w;
hb_work_private_t * r = w->private_data;
- hb_fifo_t ** fifos;
- hb_buffer_t * buf = NULL;
- hb_list_t * list;
- int n;
- int chapter = -1;
- int chapter_end = r->job->chapter_end;
- uint8_t done = 0;
+ hb_fifo_t ** fifos;
+ hb_buffer_t * buf = NULL;
+ hb_buffer_list_t list;
+ int n;
+ int chapter = -1;
+ int chapter_end = r->job->chapter_end;
+ uint8_t done = 0;
if (r->bd)
{
hb_stream_seek_chapter( r->stream, start );
}
- list = hb_list_init();
+ hb_buffer_list_clear(&list);
while(!*r->die && !r->job->done && !done)
{
}
}
- (hb_demux[r->title->demuxer])( buf, list, &r->demux );
+ (hb_demux[r->title->demuxer])(buf, &list, &r->demux);
- while( ( buf = hb_list_item( list, 0 ) ) )
+ while ((buf = hb_buffer_list_rem_head(&list)) != NULL)
{
- hb_list_rem( list, buf );
fifos = GetFifoForId( r, buf->s.id );
if (fifos && r->stream && r->start_found == 2 )
}
}
- hb_list_empty( &list );
+ hb_buffer_list_close(&list);
hb_log( "reader: done. %d scr changes", r->demux.scr_changes );
}
**********************************************************************/
static int DecodePreviews( hb_scan_t * data, hb_title_t * title, int flush )
{
- int i, npreviews = 0, abort = 0;
- hb_buffer_t * buf, * buf_es;
- hb_list_t * list_es;
- int progressive_count = 0;
- int pulldown_count = 0;
- int doubled_frame_count = 0;
- int interlaced_preview_count = 0;
- int frame_wait = 0;
- int cc_wait = 10;
- int frames;
- hb_stream_t * stream = NULL;
- info_list_t * info_list = calloc( data->preview_count+1, sizeof(*info_list) );
+ int i, npreviews = 0, abort = 0;
+ hb_buffer_t * buf, * buf_es;
+ hb_buffer_list_t list_es;
+ int progressive_count = 0;
+ int pulldown_count = 0;
+ int doubled_frame_count = 0;
+ int interlaced_preview_count = 0;
+ int frame_wait = 0;
+ int cc_wait = 10;
+ int frames;
+ hb_stream_t * stream = NULL;
+ info_list_t * info_list;
+
+ info_list = calloc(data->preview_count+1, sizeof(*info_list));
crop_record_t *crops = crop_record_init( data->preview_count );
- list_es = hb_list_init();
+ hb_buffer_list_clear(&list_es);
if( data->batch )
{
total_read += buf->size;
packets++;
- (hb_demux[title->demuxer])(buf, list_es, 0 );
+ (hb_demux[title->demuxer])(buf, &list_es, 0 );
- while( ( buf_es = hb_list_item( list_es, 0 ) ) )
+ while ((buf_es = hb_buffer_list_rem_head(&list_es)) != NULL)
{
- hb_list_rem( list_es, buf_es );
if( buf_es->s.id == title->video_id && vid_buf == NULL )
{
vid_decoder->work( vid_decoder, &buf_es, &vid_buf );
progressive_count++;
}
- while( ( buf_es = hb_list_item( list_es, 0 ) ) )
- {
- hb_list_rem( list_es, buf_es );
- hb_buffer_close( &buf_es );
- }
+ hb_buffer_list_close(&list_es);
/* Check preview for interlacing artifacts */
if( hb_detect_comb( vid_buf, 10, 30, 9, 10, 30, 9 ) )
crop_record_free( crops );
free( info_list );
- while( ( buf_es = hb_list_item( list_es, 0 ) ) )
- {
- hb_list_rem( list_es, buf_es );
- hb_buffer_close( &buf_es );
- }
- hb_list_close( &list_es );
+ hb_buffer_list_close(&list_es);
+
if (data->bd)
hb_bd_stop( data->bd );
if (data->dvd)
static hb_buffer_t * generate_output_data(hb_stream_t *stream, int curstream)
{
- hb_buffer_t *buf = NULL, *first = NULL;
+ hb_buffer_list_t list;
+ hb_buffer_t *buf = NULL;
+ hb_buffer_list_clear(&list);
hb_ts_stream_t * ts_stream = &stream->ts.list[curstream];
hb_buffer_t * b = ts_stream->buf;
if (!ts_stream->pes_info_valid)
// we want the whole TS stream including all substreams.
// DTS-HD is an example of this.
- if (first == NULL)
- {
- first = buf = hb_buffer_init(es_size);
- }
- else
- {
- hb_buffer_t *tmp = hb_buffer_init(es_size);
- buf->next = tmp;
- buf = tmp;
- }
+ buf = hb_buffer_init(es_size);
+ hb_buffer_list_append(&list, buf);
buf->s.id = get_id(pes_stream);
buf->s.type = stream_kind_to_buf_type(pes_stream->stream_kind);
}
b->size = 0;
ts_stream->packet_offset = 0;
- return first;
+ return hb_buffer_list_clear(&list);
}
static void hb_ts_stream_append_pkt(hb_stream_t *stream, int idx,
static hb_buffer_t * flush_ts_streams( hb_stream_t *stream )
{
- hb_buffer_t *out, **last;
+ hb_buffer_list_t list;
+ hb_buffer_t *buf;
int ii;
- last = &out;
+ hb_buffer_list_clear(&list);
for (ii = 0; ii < stream->ts.count; ii++)
{
- *last = generate_output_data(stream, ii);
- // generate_output_data can generate 0 or multiple output buffers
- while (*last != NULL)
- last = &(*last)->next;
+ buf = generate_output_data(stream, ii);
+ hb_buffer_list_append(&list, buf);
}
- return out;
+ return hb_buffer_list_clear(&list);
}
/***********************************************************************
*/
int video_index = ts_index_of_video(stream);
int curstream;
- hb_buffer_t *out = NULL;
- hb_buffer_t **last;
+ hb_buffer_t *buf = NULL;
+ hb_buffer_list_t list;
- last = &out;
+ hb_buffer_list_clear(&list);
if (chapter > 0)
{
if (discontinuity)
{
// If there is a discontinuity, flush all data
- *last = flush_ts_streams(stream);
- // flush_ts_streams can generate 0 or multiple output buffers
- while (*last != NULL)
- last = &(*last)->next;
+ buf = flush_ts_streams(stream);
+ hb_buffer_list_append(&list, buf);
}
if (adapt_len > 0)
{
// When we get a new pcr, we flush all data that was
// referenced to the last pcr. This makes it easier
// for reader to resolve pcr discontinuities.
- *last = flush_ts_streams(stream);
- // flush_ts_streams can generate 0 or multiple output buffers
- while (*last != NULL)
- last = &(*last)->next;
+ buf = flush_ts_streams(stream);
+ hb_buffer_list_append(&list, buf);
int64_t pcr;
pcr = ((uint64_t)pkt[ 6] << (33 - 8) ) |
// the video stream DTS for the PCR.
if (!stream->ts.found_pcr && (stream->ts_flags & TS_HAS_PCR))
{
- return out;
+ return hb_buffer_list_clear(&list);
}
// Get continuity
// a PCR when one is needed). The only thing that can
// change in the dup is the PCR which we grabbed above
// so ignore the rest.
- return out;
+ return hb_buffer_list_clear(&list);
}
}
if ( !start && (ts_stream->continuity != -1) &&
(int)continuity,
(ts_stream->continuity + 1) & 0xf );
ts_stream->continuity = continuity;
- return out;
+ return hb_buffer_list_clear(&list);
}
ts_stream->continuity = continuity;
// I ran across a poorly mastered BD that does not properly pad
// the adaptation field and causes parsing errors below if we
// do not exit early here.
- return out;
+ return hb_buffer_list_clear(&list);
}
/* If we get here the packet is valid - process its data */
// we have to ship the old packet before updating the pcr
// since the packet we've been accumulating is referenced
// to the old pcr.
- *last = generate_output_data(stream, curstream);
- // generate_output_data can generate 0 or multiple output buffers
- while (*last != NULL)
- last = &(*last)->next;
+ buf = generate_output_data(stream, curstream);
+ hb_buffer_list_append(&list, buf);
ts_stream->pes_info_valid = 0;
ts_stream->packet_len = 0;
}
{
ts_err( stream, curstream, "missing start code" );
ts_stream->skipbad = 1;
- return out;
+ return hb_buffer_list_clear(&list);
}
// If we were skipping a bad packet, start fresh on this new PES packet
// a DTS or PTS.
if (stream->ts.last_timestamp < 0 && (pes[7] >> 6) == 0)
{
- return out;
+ return hb_buffer_list_clear(&list);
}
if ((pes[7] >> 6) != 0)
{
ts_stream->pes_info.packet_len > 0 &&
ts_stream->packet_len >= ts_stream->pes_info.packet_len + 6)
{
- // generate_output_data can generate 0 or multiple output buffers
- *last = generate_output_data(stream, curstream);
- while (*last != NULL)
- last = &(*last)->next;
+ buf = generate_output_data(stream, curstream);
+ hb_buffer_list_append(&list, buf);
}
- return out;
+ return hb_buffer_list_clear(&list);
}
static hb_buffer_t * hb_ts_stream_decode( hb_stream_t *stream )
typedef struct
{
- int link;
- int merge;
- hb_buffer_t * list_current;
- hb_buffer_t * last;
+ int link;
+ int merge;
+ hb_buffer_list_t list_current;
} subtitle_sanitizer_t;
typedef struct
return ret;
}
-static void InitSubtitle( hb_job_t * job, hb_sync_video_t * sync, int i )
+static void InitSubtitle( hb_job_t * job, hb_sync_video_t * sync, int ii )
{
hb_subtitle_t * subtitle;
- subtitle = hb_list_item( job->list_subtitle, i );
+ subtitle = hb_list_item( job->list_subtitle, ii );
if (subtitle->format == TEXTSUB &&
subtitle->config.dest == PASSTHRUSUB &&
(job->mux & HB_MUX_MASK_MP4))
{
// Merge overlapping subtitles since mpv tx3g does not support them
- sync->subtitle_sanitizer[i].merge = 1;
+ sync->subtitle_sanitizer[ii].merge = 1;
}
// PGS subtitles don't need to be linked because there are explicit
// "clear" subtitle packets that indicate the end time of the
subtitle->source != PGSSUB)
{
// Fill in stop time when it is missing
- sync->subtitle_sanitizer[i].link = 1;
+ sync->subtitle_sanitizer[ii].link = 1;
}
+ hb_buffer_list_clear(&sync->subtitle_sanitizer[ii].list_current);
}
static void CloseSubtitle(hb_sync_video_t * sync, int ii)
{
- hb_buffer_close(&sync->subtitle_sanitizer[ii].list_current);
+ hb_buffer_list_close(&sync->subtitle_sanitizer[ii].list_current);
}
/***********************************************************************
static hb_buffer_t * mergeSubtitles(subtitle_sanitizer_t *sanitizer, int end)
{
- hb_buffer_t *a, *b, *buf, *out = NULL, *last = NULL;
+ hb_buffer_t *a, *b, *buf;
+ hb_buffer_list_t list;
+
+ hb_buffer_list_clear(&list);
do
{
- a = sanitizer->list_current;
- b = a != NULL ? a->next : NULL;
+ a = hb_buffer_list_head(&sanitizer->list_current);
+ if (a == NULL)
+ {
+ break;
+ }
+ b = a->next;
buf = NULL;
- if (a != NULL && b == NULL && end)
+ if (b == NULL && end)
{
- sanitizer->list_current = a->next;
- if (sanitizer->list_current == NULL)
- sanitizer->last = NULL;
- a->next = NULL;
- buf = a;
+ buf = hb_buffer_list_rem_head(&sanitizer->list_current);
}
- else if (a != NULL && a->s.stop != AV_NOPTS_VALUE)
+ else if (a->s.stop != AV_NOPTS_VALUE)
{
if (!sanitizer->merge)
{
- sanitizer->list_current = a->next;
- if (sanitizer->list_current == NULL)
- sanitizer->last = NULL;
- a->next = NULL;
- buf = a;
+ buf = hb_buffer_list_rem_head(&sanitizer->list_current);
}
else if (b != NULL && a->s.stop > b->s.start)
{
// Overlap
if (ABS(a->s.start - b->s.start) <= 18000)
{
+ if (b->s.stop == AV_NOPTS_VALUE && !end)
+ {
+ // To evaluate overlaps, we need the stop times
+ // for a and b
+ break;
+ }
+ a = hb_buffer_list_rem_head(&sanitizer->list_current);
+
// subtitles start within 1/5 second of eachother, merge
- if (a->s.stop > b->s.stop)
+ if (a->s.stop > b->s.stop && b->s.stop != AV_NOPTS_VALUE)
{
// a continues after b, reorder the list and swap
- hb_buffer_t *tmp = a;
- a->next = b->next;
- b->next = a;
- if (sanitizer->last == b)
- {
- sanitizer->last = a;
- }
- a = b;
- b = tmp;
- sanitizer->list_current = a;
+ b = a;
+ a = hb_buffer_list_rem_head(&sanitizer->list_current);
+ hb_buffer_list_prepend(&sanitizer->list_current, b);
}
- a->next = NULL;
b->s.start = a->s.stop;
buf = merge_ssa(a, b);
hb_buffer_close(&a);
a = buf;
- buf = NULL;
- sanitizer->list_current = a;
if (b->s.stop != AV_NOPTS_VALUE &&
ABS(b->s.stop - b->s.start) <= 18000)
{
// b and a completely overlap, remove b
- a->next = b->next;
- b->next = NULL;
- if (sanitizer->last == b)
- {
- sanitizer->last = a;
- }
+ b = hb_buffer_list_rem_head(&sanitizer->list_current);
hb_buffer_close(&b);
}
- else
- {
- a->next = b;
- }
}
else
{
- // a starts before b, output copy of a and
+ // a starts before b, output copy of a and update a start
buf = hb_buffer_dup(a);
buf->s.stop = b->s.start;
a->s.start = b->s.start;
}
else if (b != NULL && a->s.stop <= b->s.start)
{
- sanitizer->list_current = a->next;
- if (sanitizer->list_current == NULL)
- sanitizer->last = NULL;
- a->next = NULL;
- buf = a;
+ // a starts and ends before b
+ buf = hb_buffer_list_rem_head(&sanitizer->list_current);
}
}
buf->s.duration = buf->s.stop - buf->s.start;
else
buf->s.duration = AV_NOPTS_VALUE;
- if (last == NULL)
- {
- out = last = buf;
- }
- else
- {
- last->next = buf;
- last = buf;
- }
+ hb_buffer_list_append(&list, buf);
}
- } while (buf != NULL);
+ } while (hb_buffer_list_count(&list) >= 2 || end);
- return out;
+ return hb_buffer_list_clear(&list);
}
static hb_buffer_t * sanitizeSubtitle(
if (sub->s.renderOffset != AV_NOPTS_VALUE)
sub->s.renderOffset -= pv->common->video_pts_slip;
- if (sanitizer->last != NULL && sanitizer->last->s.stop == AV_NOPTS_VALUE)
+ hb_buffer_t *last = hb_buffer_list_tail(&sanitizer->list_current);
+ if (last != NULL && last->s.stop == AV_NOPTS_VALUE)
{
- sanitizer->last->s.stop = sub->s.start;
+ last->s.stop = sub->s.start;
}
if (sub->s.start == sub->s.stop)
// of subtitles is not encoded in the stream
hb_buffer_close(&sub);
}
- if (sub != NULL)
- {
- if (sanitizer->last == NULL)
- {
- sanitizer->list_current = sanitizer->last = sub;
- }
- else
- {
- sanitizer->last->next = sub;
- sanitizer->last = sub;
- }
- }
+ hb_buffer_list_append(&sanitizer->list_current, sub);
return mergeSubtitles(sanitizer, 0);
}
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
-
+
#include "hb.h"
struct hb_filter_private_s
}
}
-// insert buffer 'succ' after buffer chain element 'pred'.
-// caller must guarantee that 'pred' and 'succ' are non-null.
-static hb_buffer_t *insert_buffer_in_chain(
- hb_buffer_t *pred,
- hb_buffer_t *succ )
-{
- succ->next = pred->next;
- pred->next = succ;
- return succ;
-}
-
#define DUP_THRESH_SSE 5.0
// Compute ths sum of squared errors for a 16x16 block
// times are left alone.
//
-static void adjust_frame_rate( hb_filter_private_t *pv, hb_buffer_t **buf_out )
+static void adjust_frame_rate( hb_filter_private_t *pv, hb_buffer_list_t *list )
{
- hb_buffer_t *out = *buf_out;
+ hb_buffer_t *out = hb_buffer_list_tail(list);
- if ( out && out->size > 0 )
+ if (out == NULL || out->size <= 0 )
{
- if ( pv->cfr == 0 )
- {
- ++pv->count_frames;
- pv->out_last_stop = out->s.stop;
- return;
- }
+ return;
+ }
+
+ if ( pv->cfr == 0 )
+ {
+ ++pv->count_frames;
+ pv->out_last_stop = out->s.stop;
+ return;
+ }
+
+ // compute where this frame would stop if the frame rate were constant
+ // (this is our target stopping time for CFR and earliest possible
+ // stopping time for PFR).
+ double cfr_stop = pv->frame_rate * ( pv->count_frames + 1 );
- // compute where this frame would stop if the frame rate were constant
- // (this is our target stopping time for CFR and earliest possible
- // stopping time for PFR).
- double cfr_stop = pv->frame_rate * ( pv->count_frames + 1 );
+ hb_buffer_t * next = hb_fifo_see( pv->delay_queue );
- hb_buffer_t * next = hb_fifo_see( pv->delay_queue );
+ float next_metric = 0;
+ if( next )
+ next_metric = motion_metric( pv, out, next );
+
+ if( pv->out_last_stop >= out->s.stop )
+ {
+ ++pv->drops;
+ hb_buffer_list_rem_tail(list);
+ hb_buffer_close(&out);
- float next_metric = 0;
- if( next )
- next_metric = motion_metric( pv, out, next );
+ pv->frame_metric = next_metric;
+ if( next_metric > pv->max_metric )
+ pv->max_metric = next_metric;
- if( pv->out_last_stop >= out->s.stop )
+ return;
+ }
+
+ if( out->s.start <= pv->out_last_stop &&
+ out->s.stop > pv->out_last_stop &&
+ next && next->s.stop < cfr_stop )
+ {
+ // This frame starts before the end of the last output
+ // frame and ends after the end of the last output
+ // frame (i.e. it straddles it). Also the next frame
+ // ends before the end of the next output frame. If the
+ // next frame is not a duplicate, and we haven't seen
+ // a changed frame since the last output frame,
+ // then drop this frame.
+ //
+ // This causes us to sync to the pattern of progressive
+ // 23.976 fps content that has been upsampled to
+ // progressive 59.94 fps.
+ if( pv->out_metric > pv->max_metric &&
+ next_metric > pv->max_metric )
{
+ // Pattern: N R R N
+ // o c n
+ // N == new frame
+ // R == repeat frame
+ // o == last output frame
+ // c == current frame
+ // n == next frame
+ // We haven't seen a frame change since the last output
+ // frame and the next frame changes. Use the next frame,
+ // drop this one.
++pv->drops;
- hb_buffer_close( buf_out );
-
pv->frame_metric = next_metric;
- if( next_metric > pv->max_metric )
- pv->max_metric = next_metric;
-
+ pv->max_metric = next_metric;
+ pv->sync_parity = 1;
+ hb_buffer_list_rem_tail(list);
+ hb_buffer_close(&out);
return;
}
-
- if( out->s.start <= pv->out_last_stop &&
- out->s.stop > pv->out_last_stop &&
- next && next->s.stop < cfr_stop )
+ else if( pv->sync_parity &&
+ pv->out_metric < pv->max_metric &&
+ pv->max_metric > pv->frame_metric &&
+ pv->frame_metric < next_metric )
{
- // This frame starts before the end of the last output
- // frame and ends after the end of the last output
- // frame (i.e. it straddles it). Also the next frame
- // ends before the end of the next output frame. If the
- // next frame is not a duplicate, and we haven't seen
- // a changed frame since the last output frame,
- // then drop this frame.
- //
- // This causes us to sync to the pattern of progressive
- // 23.976 fps content that has been upsampled to
- // progressive 59.94 fps.
- if( pv->out_metric > pv->max_metric &&
- next_metric > pv->max_metric )
- {
- // Pattern: N R R N
- // o c n
- // N == new frame
- // R == repeat frame
- // o == last output frame
- // c == current frame
- // n == next frame
- // We haven't seen a frame change since the last output
- // frame and the next frame changes. Use the next frame,
- // drop this one.
- ++pv->drops;
- pv->frame_metric = next_metric;
- pv->max_metric = next_metric;
- pv->sync_parity = 1;
- hb_buffer_close( buf_out );
- return;
- }
- else if( pv->sync_parity &&
- pv->out_metric < pv->max_metric &&
- pv->max_metric > pv->frame_metric &&
- pv->frame_metric < next_metric )
- {
- // Pattern: R N R N
- // o c n
- // N == new frame
- // R == repeat frame
- // o == last output frame
- // c == current frame
- // n == next frame
- // If we see this pattern, we must not use the next
- // frame when straddling the current frame.
- pv->sync_parity = 0;
- }
- else if( pv->sync_parity )
- {
- // The pattern is indeterminate. Continue dropping
- // frames on the same schedule
- ++pv->drops;
- pv->frame_metric = next_metric;
- pv->max_metric = next_metric;
- pv->sync_parity = 1;
- hb_buffer_close( buf_out );
- return;
- }
-
+ // Pattern: R N R N
+ // o c n
+ // N == new frame
+ // R == repeat frame
+ // o == last output frame
+ // c == current frame
+ // n == next frame
+ // If we see this pattern, we must not use the next
+ // frame when straddling the current frame.
+ pv->sync_parity = 0;
+ }
+ else if( pv->sync_parity )
+ {
+ // The pattern is indeterminate. Continue dropping
+ // frames on the same schedule
+ ++pv->drops;
+ pv->frame_metric = next_metric;
+ pv->max_metric = next_metric;
+ pv->sync_parity = 1;
+ hb_buffer_list_rem_tail(list);
+ hb_buffer_close(&out);
+ return;
}
- // this frame has to start where the last one stopped.
- out->s.start = pv->out_last_stop;
+ }
- pv->out_metric = pv->frame_metric;
- pv->frame_metric = next_metric;
- pv->max_metric = next_metric;
+ // this frame has to start where the last one stopped.
+ out->s.start = pv->out_last_stop;
- // at this point we know that this frame doesn't push the average
- // rate over the limit so we just pass it on for PFR. For CFR we're
- // going to return it (with its start & stop times modified) and
- // we may have to dup it.
- ++pv->count_frames;
- if ( pv->cfr > 1 )
+ pv->out_metric = pv->frame_metric;
+ pv->frame_metric = next_metric;
+ pv->max_metric = next_metric;
+
+ // at this point we know that this frame doesn't push the average
+ // rate over the limit so we just pass it on for PFR. For CFR we're
+ // going to return it (with its start & stop times modified) and
+ // we may have to dup it.
+ ++pv->count_frames;
+ if ( pv->cfr > 1 )
+ {
+ // PFR - we're going to keep the frame but may need to
+ // adjust it's stop time to meet the average rate constraint.
+ if ( out->s.stop <= cfr_stop )
{
- // PFR - we're going to keep the frame but may need to
- // adjust it's stop time to meet the average rate constraint.
- if ( out->s.stop <= cfr_stop )
- {
- out->s.stop = cfr_stop;
- }
- pv->out_last_stop = out->s.stop;
+ out->s.stop = cfr_stop;
}
- else
+ pv->out_last_stop = out->s.stop;
+ }
+ else
+ {
+ // we're doing CFR so we have to either trim some time from a
+ // buffer that ends too far in the future or, if the buffer is
+ // two or more frame times long, split it into multiple pieces,
+ // each of which is a frame time long.
+ double excess_dur = (double)out->s.stop - cfr_stop;
+ out->s.stop = cfr_stop;
+ pv->out_last_stop = out->s.stop;
+ for ( ; excess_dur >= pv->frame_rate; excess_dur -= pv->frame_rate )
{
- // we're doing CFR so we have to either trim some time from a
- // buffer that ends too far in the future or, if the buffer is
- // two or more frame times long, split it into multiple pieces,
- // each of which is a frame time long.
- double excess_dur = (double)out->s.stop - cfr_stop;
- out->s.stop = cfr_stop;
- pv->out_last_stop = out->s.stop;
- for ( ; excess_dur >= pv->frame_rate; excess_dur -= pv->frame_rate )
- {
- /* next frame too far ahead - dup current frame */
- hb_buffer_t *dup = hb_buffer_dup( out );
- dup->s.new_chap = 0;
- dup->s.start = cfr_stop;
- cfr_stop += pv->frame_rate;
- dup->s.stop = cfr_stop;
- pv->out_last_stop = dup->s.stop;
- out = insert_buffer_in_chain( out, dup );
- ++pv->dups;
- ++pv->count_frames;
- }
+ /* next frame too far ahead - dup current frame */
+ hb_buffer_t *dup = hb_buffer_dup( out );
+ dup->s.new_chap = 0;
+ dup->s.start = cfr_stop;
+ cfr_stop += pv->frame_rate;
+ dup->s.stop = cfr_stop;
+ pv->out_last_stop = dup->s.stop;
+ hb_buffer_list_append(list, dup);
+ ++pv->dups;
+ ++pv->count_frames;
}
}
}
if ( pv->cfr == 0 )
{
/* Ensure we're using "Same as source" FPS */
- sprintf( info->human_readable_desc,
+ sprintf( info->human_readable_desc,
"frame rate: same as source (around %.3f fps)",
(float)pv->vrate.num / pv->vrate.den );
}
else if ( pv->cfr == 2 )
{
- // For PFR, we want the framerate based on the source's actual
- // framerate, unless it's higher than the specified peak framerate.
+ // For PFR, we want the framerate based on the source's actual
+ // framerate, unless it's higher than the specified peak framerate.
double source_fps = (double)pv->input_vrate.num / pv->input_vrate.den;
double peak_fps = (double)pv->vrate.num / pv->vrate.den;
- sprintf( info->human_readable_desc,
+ sprintf( info->human_readable_desc,
"frame rate: %.3f fps -> peak rate limited to %.3f fps",
source_fps , peak_fps );
}
// Constant framerate. Signal the framerate we are using.
double source_fps = (double)pv->input_vrate.num / pv->input_vrate.den;
double constant_fps = (double)pv->vrate.num / pv->vrate.den;
- sprintf( info->human_readable_desc,
+ sprintf( info->human_readable_desc,
"frame rate: %.3f fps -> constant %.3f fps",
source_fps , constant_fps );
}
if( pv->job )
{
hb_interjob_t * interjob = hb_interjob_get( pv->job->h );
-
- /* Preserve dropped frame count for more accurate
- * framerates in 2nd passes.
+
+ /* Preserve dropped frame count for more accurate
+ * framerates in 2nd passes.
*/
interjob->out_frame_count = pv->count_frames;
interjob->total_time = pv->out_last_stop;
}
- hb_log("render: lost time: %"PRId64" (%i frames)",
+ hb_log("render: lost time: %"PRId64" (%i frames)",
pv->total_lost_time, pv->dropped_frames);
- hb_log("render: gained time: %"PRId64" (%i frames) (%"PRId64" not accounted for)",
- pv->total_gained_time, pv->extended_frames,
+ hb_log("render: gained time: %"PRId64" (%i frames) (%"PRId64" not accounted for)",
+ pv->total_gained_time, pv->extended_frames,
pv->total_lost_time - pv->total_gained_time);
if (pv->dropped_frames)
{
- hb_log("render: average dropped frame duration: %"PRId64,
+ hb_log("render: average dropped frame duration: %"PRId64,
(pv->total_lost_time / pv->dropped_frames) );
}
hb_buffer_t ** buf_out )
{
hb_filter_private_t * pv = filter->private_data;
- hb_buffer_t * in = *buf_in;
- hb_buffer_t * out = NULL;
+ hb_buffer_list_t list;
+ hb_buffer_t * in = *buf_in;
+ hb_buffer_t * out = NULL;
*buf_in = NULL;
*buf_out = NULL;
+ hb_buffer_list_clear(&list);
+
if (in->s.flags & HB_BUF_FLAG_EOF)
{
- hb_buffer_t *head = NULL, *tail = NULL, *next;
- int counter = 2;
+ hb_buffer_t * next;
+ int counter = 2;
/* If the input buffer is end of stream, send out an empty one
* to the next stage as well. To avoid losing the contents of
- * the delay queue connect the buffers in the delay queue in
+ * the delay queue connect the buffers in the delay queue in
* the correct order, and add the end of stream buffer to the
* end.
- */
- while( ( next = hb_fifo_get( pv->delay_queue ) ) != NULL )
+ */
+ while ((next = hb_fifo_get(pv->delay_queue)) != NULL)
{
-
+
/* We can't use the given time stamps. Previous frames
might already have been extended, throwing off the
raw values fed to render.c. Instead, their
If it needed its duration extended to make up
lost time, it will have happened above. */
next->s.start = pv->last_start[counter];
- next->s.stop = pv->last_stop[counter--];
-
- adjust_frame_rate( pv, &next );
-
- if( next )
- {
- if( !head && !tail )
- {
- head = next;
- } else {
- tail->next = next;
- }
- // Move tail to the end of the list that
- // adjust_frame_rate could return
- while (next)
- {
- tail = next;
- next = next->next;
- }
- }
+ next->s.stop = pv->last_stop[counter--];
+
+ hb_buffer_list_append(&list, next);
+ adjust_frame_rate(pv, &list);
}
- if( tail )
- {
- tail->next = in;
- *buf_out = head;
- } else {
- *buf_out = in;
- }
+ hb_buffer_list_append(&list, in);
+ *buf_out = hb_buffer_list_clear(&list);
return HB_FILTER_DONE;
}
{
/* We need to compensate for the time lost by dropping frame(s).
Spread its duration out in quarters, because usually dropped frames
- maintain a 1-out-of-5 pattern and this spreads it out amongst
+ maintain a 1-out-of-5 pattern and this spreads it out amongst
the remaining ones. Store these in the lost_time array, which
- has 4 slots in it. Because not every frame duration divides
- evenly by 4, and we can't lose the remainder, we have to go
- through an awkward process to preserve it in the 4th array index.
+ has 4 slots in it. Because not every frame duration divides
+ evenly by 4, and we can't lose the remainder, we have to go
+ through an awkward process to preserve it in the 4th array index.
*/
uint64_t temp_duration = in->s.start - pv->last_stop[0];
pv->lost_time[0] += (temp_duration / 4);
hb_fifo_push( pv->delay_queue, in );
/*
- * Keep the last three frames in our queue, this ensures that we have
- * the last two always in there should we need to rewrite the
+ * Keep the last three frames in our queue, this ensures that we have
+ * the last two always in there should we need to rewrite the
* durations on them.
*/
- if( hb_fifo_size( pv->delay_queue ) >= 4 )
+ if (hb_fifo_size(pv->delay_queue) < 4)
{
- out = hb_fifo_get( pv->delay_queue );
+ *buf_out = NULL;
+ return HB_FILTER_OK;
}
- if( out )
+ out = hb_fifo_get(pv->delay_queue);
+ /* The current frame exists. That means it hasn't been dropped by a
+ * filter. We may edit its duration if needed.
+ */
+ if( pv->lost_time[3] > 0 )
{
- /* The current frame exists. That means it hasn't been dropped by a
- * filter. We may edit its duration if needed.
- */
- if( pv->lost_time[3] > 0 )
+ int time_shift = 0;
+
+ for( i = 3; i >= 0; i-- )
{
- int time_shift = 0;
-
- for( i = 3; i >= 0; i-- )
- {
- /*
- * A frame's been dropped earlier by VFR detelecine.
- * Gotta make up the lost time. This will also
- * slow down the video.
- * The dropped frame's has to be accounted for, so
- * divvy it up amongst the 4 frames left behind.
- * This is what the delay_queue is for;
- * telecined sequences start 2 frames before
- * the dropped frame, so to slow down the right
- * ones you need a 2 frame delay between
- * reading input and writing output.
- */
-
- /* We want to extend the outputted frame's duration by the value
- stored in the 4th slot of the lost_time array. Because we need
- to adjust all the values in the array so they're contiguous,
- extend the duration inside the array first, before applying
- it to the current frame buffer. */
- pv->last_start[i] += time_shift;
- pv->last_stop[i] += pv->lost_time[i] + time_shift;
-
- /* Log how much time has been added back in to the video. */
- pv->total_gained_time += pv->lost_time[i];
- time_shift += pv->lost_time[i];
-
- pv->lost_time[i] = 0;
-
- /* Log how many frames have had their durations extended. */
- pv->extended_frames++;
- }
+ /*
+ * A frame's been dropped earlier by VFR detelecine.
+ * Gotta make up the lost time. This will also
+ * slow down the video.
+ * The dropped frame's has to be accounted for, so
+ * divvy it up amongst the 4 frames left behind.
+ * This is what the delay_queue is for;
+ * telecined sequences start 2 frames before
+ * the dropped frame, so to slow down the right
+ * ones you need a 2 frame delay between
+ * reading input and writing output.
+ */
+
+ /* We want to extend the outputted frame's duration by the value
+ stored in the 4th slot of the lost_time array. Because we need
+ to adjust all the values in the array so they're contiguous,
+ extend the duration inside the array first, before applying
+ it to the current frame buffer. */
+ pv->last_start[i] += time_shift;
+ pv->last_stop[i] += pv->lost_time[i] + time_shift;
+
+ /* Log how much time has been added back in to the video. */
+ pv->total_gained_time += pv->lost_time[i];
+ time_shift += pv->lost_time[i];
+
+ pv->lost_time[i] = 0;
+
+ /* Log how many frames have had their durations extended. */
+ pv->extended_frames++;
}
-
- /* We can't use the given time stamps. Previous frames
- might already have been extended, throwing off the
- raw values fed to render.c. Instead, their
- stop and start times are stored in arrays.
- The 4th cached frame will be the to use.
- If it needed its duration extended to make up
- lost time, it will have happened above. */
- out->s.start = pv->last_start[3];
- out->s.stop = pv->last_stop[3];
-
- adjust_frame_rate( pv, &out );
}
- *buf_out = out;
+ /* We can't use the given time stamps. Previous frames
+ might already have been extended, throwing off the
+ raw values fed to render.c. Instead, their
+ stop and start times are stored in arrays.
+ The 4th cached frame will be the to use.
+ If it needed its duration extended to make up
+ lost time, it will have happened above. */
+ out->s.start = pv->last_start[3];
+ out->s.stop = pv->last_stop[3];
+
+ hb_buffer_list_append(&list, out);
+ adjust_frame_rate(pv, &list);
+
+ *buf_out = hb_buffer_list_clear(&list);
return HB_FILTER_OK;
}