Allocate AVFifoBuffer through the fifo API to reduce future API/ABI issues.
Yes this breaks ABI/API but ive already broken it and will bump avutil major soon. Originally committed as revision 17869 to svn://svn.ffmpeg.org/ffmpeg/trunk
This commit is contained in:
parent
48d58e592a
commit
41dd680dd8
25
ffmpeg.c
25
ffmpeg.c
@ -263,7 +263,7 @@ typedef struct AVOutputStream {
|
|||||||
ReSampleContext *resample; /* for audio resampling */
|
ReSampleContext *resample; /* for audio resampling */
|
||||||
int reformat_pair;
|
int reformat_pair;
|
||||||
AVAudioConvert *reformat_ctx;
|
AVAudioConvert *reformat_ctx;
|
||||||
AVFifoBuffer fifo; /* for compression: one audio fifo per codec */
|
AVFifoBuffer *fifo; /* for compression: one audio fifo per codec */
|
||||||
FILE *logfile;
|
FILE *logfile;
|
||||||
} AVOutputStream;
|
} AVOutputStream;
|
||||||
|
|
||||||
@ -585,7 +585,7 @@ static void do_audio_out(AVFormatContext *s,
|
|||||||
|
|
||||||
if(audio_sync_method){
|
if(audio_sync_method){
|
||||||
double delta = get_sync_ipts(ost) * enc->sample_rate - ost->sync_opts
|
double delta = get_sync_ipts(ost) * enc->sample_rate - ost->sync_opts
|
||||||
- av_fifo_size(&ost->fifo)/(ost->st->codec->channels * 2);
|
- av_fifo_size(ost->fifo)/(ost->st->codec->channels * 2);
|
||||||
double idelta= delta*ist->st->codec->sample_rate / enc->sample_rate;
|
double idelta= delta*ist->st->codec->sample_rate / enc->sample_rate;
|
||||||
int byte_delta= ((int)idelta)*2*ist->st->codec->channels;
|
int byte_delta= ((int)idelta)*2*ist->st->codec->channels;
|
||||||
|
|
||||||
@ -622,13 +622,13 @@ static void do_audio_out(AVFormatContext *s,
|
|||||||
assert(ost->audio_resample);
|
assert(ost->audio_resample);
|
||||||
if(verbose > 2)
|
if(verbose > 2)
|
||||||
fprintf(stderr, "compensating audio timestamp drift:%f compensation:%d in:%d\n", delta, comp, enc->sample_rate);
|
fprintf(stderr, "compensating audio timestamp drift:%f compensation:%d in:%d\n", delta, comp, enc->sample_rate);
|
||||||
// fprintf(stderr, "drift:%f len:%d opts:%"PRId64" ipts:%"PRId64" fifo:%d\n", delta, -1, ost->sync_opts, (int64_t)(get_sync_ipts(ost) * enc->sample_rate), av_fifo_size(&ost->fifo)/(ost->st->codec->channels * 2));
|
// fprintf(stderr, "drift:%f len:%d opts:%"PRId64" ipts:%"PRId64" fifo:%d\n", delta, -1, ost->sync_opts, (int64_t)(get_sync_ipts(ost) * enc->sample_rate), av_fifo_size(ost->fifo)/(ost->st->codec->channels * 2));
|
||||||
av_resample_compensate(*(struct AVResampleContext**)ost->resample, comp, enc->sample_rate);
|
av_resample_compensate(*(struct AVResampleContext**)ost->resample, comp, enc->sample_rate);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}else
|
}else
|
||||||
ost->sync_opts= lrintf(get_sync_ipts(ost) * enc->sample_rate)
|
ost->sync_opts= lrintf(get_sync_ipts(ost) * enc->sample_rate)
|
||||||
- av_fifo_size(&ost->fifo)/(ost->st->codec->channels * 2); //FIXME wrong
|
- av_fifo_size(ost->fifo)/(ost->st->codec->channels * 2); //FIXME wrong
|
||||||
|
|
||||||
if (ost->audio_resample) {
|
if (ost->audio_resample) {
|
||||||
buftmp = audio_buf;
|
buftmp = audio_buf;
|
||||||
@ -660,19 +660,19 @@ static void do_audio_out(AVFormatContext *s,
|
|||||||
/* now encode as many frames as possible */
|
/* now encode as many frames as possible */
|
||||||
if (enc->frame_size > 1) {
|
if (enc->frame_size > 1) {
|
||||||
/* output resampled raw samples */
|
/* output resampled raw samples */
|
||||||
if (av_fifo_realloc2(&ost->fifo, av_fifo_size(&ost->fifo) + size_out) < 0) {
|
if (av_fifo_realloc2(ost->fifo, av_fifo_size(ost->fifo) + size_out) < 0) {
|
||||||
fprintf(stderr, "av_fifo_realloc2() failed\n");
|
fprintf(stderr, "av_fifo_realloc2() failed\n");
|
||||||
av_exit(1);
|
av_exit(1);
|
||||||
}
|
}
|
||||||
av_fifo_generic_write(&ost->fifo, buftmp, size_out, NULL);
|
av_fifo_generic_write(ost->fifo, buftmp, size_out, NULL);
|
||||||
|
|
||||||
frame_bytes = enc->frame_size * osize * enc->channels;
|
frame_bytes = enc->frame_size * osize * enc->channels;
|
||||||
|
|
||||||
while (av_fifo_size(&ost->fifo) >= frame_bytes) {
|
while (av_fifo_size(ost->fifo) >= frame_bytes) {
|
||||||
AVPacket pkt;
|
AVPacket pkt;
|
||||||
av_init_packet(&pkt);
|
av_init_packet(&pkt);
|
||||||
|
|
||||||
av_fifo_read(&ost->fifo, audio_buf, frame_bytes);
|
av_fifo_read(ost->fifo, audio_buf, frame_bytes);
|
||||||
|
|
||||||
//FIXME pass ost->sync_opts as AVFrame.pts in avcodec_encode_audio()
|
//FIXME pass ost->sync_opts as AVFrame.pts in avcodec_encode_audio()
|
||||||
|
|
||||||
@ -1446,13 +1446,13 @@ static int output_packet(AVInputStream *ist, int ist_index,
|
|||||||
|
|
||||||
switch(ost->st->codec->codec_type) {
|
switch(ost->st->codec->codec_type) {
|
||||||
case CODEC_TYPE_AUDIO:
|
case CODEC_TYPE_AUDIO:
|
||||||
fifo_bytes = av_fifo_size(&ost->fifo);
|
fifo_bytes = av_fifo_size(ost->fifo);
|
||||||
ret = 0;
|
ret = 0;
|
||||||
/* encode any samples remaining in fifo */
|
/* encode any samples remaining in fifo */
|
||||||
if(fifo_bytes > 0 && enc->codec->capabilities & CODEC_CAP_SMALL_LAST_FRAME) {
|
if(fifo_bytes > 0 && enc->codec->capabilities & CODEC_CAP_SMALL_LAST_FRAME) {
|
||||||
int fs_tmp = enc->frame_size;
|
int fs_tmp = enc->frame_size;
|
||||||
enc->frame_size = fifo_bytes / (2 * enc->channels);
|
enc->frame_size = fifo_bytes / (2 * enc->channels);
|
||||||
av_fifo_read(&ost->fifo, (uint8_t *)samples, fifo_bytes);
|
av_fifo_read(ost->fifo, (uint8_t *)samples, fifo_bytes);
|
||||||
ret = avcodec_encode_audio(enc, bit_buffer, bit_buffer_size, samples);
|
ret = avcodec_encode_audio(enc, bit_buffer, bit_buffer_size, samples);
|
||||||
enc->frame_size = fs_tmp;
|
enc->frame_size = fs_tmp;
|
||||||
}
|
}
|
||||||
@ -1783,7 +1783,8 @@ static int av_encode(AVFormatContext **output_files,
|
|||||||
} else {
|
} else {
|
||||||
switch(codec->codec_type) {
|
switch(codec->codec_type) {
|
||||||
case CODEC_TYPE_AUDIO:
|
case CODEC_TYPE_AUDIO:
|
||||||
if (av_fifo_init(&ost->fifo, 1024))
|
ost->fifo= av_fifo_alloc(1024);
|
||||||
|
if(!ost->fifo)
|
||||||
goto fail;
|
goto fail;
|
||||||
ost->reformat_pair = MAKE_SFMT_PAIR(SAMPLE_FMT_NONE,SAMPLE_FMT_NONE);
|
ost->reformat_pair = MAKE_SFMT_PAIR(SAMPLE_FMT_NONE,SAMPLE_FMT_NONE);
|
||||||
ost->audio_resample = codec->sample_rate != icodec->sample_rate || audio_sync_method > 1;
|
ost->audio_resample = codec->sample_rate != icodec->sample_rate || audio_sync_method > 1;
|
||||||
@ -2235,7 +2236,7 @@ static int av_encode(AVFormatContext **output_files,
|
|||||||
fclose(ost->logfile);
|
fclose(ost->logfile);
|
||||||
ost->logfile = NULL;
|
ost->logfile = NULL;
|
||||||
}
|
}
|
||||||
av_fifo_free(&ost->fifo); /* works even if fifo is not
|
av_fifo_free(ost->fifo); /* works even if fifo is not
|
||||||
initialized but set to zero */
|
initialized but set to zero */
|
||||||
av_free(ost->pict_tmp.data[0]);
|
av_free(ost->pict_tmp.data[0]);
|
||||||
if (ost->video_resample)
|
if (ost->video_resample)
|
||||||
|
@ -33,7 +33,7 @@ void ff_audio_interleave_close(AVFormatContext *s)
|
|||||||
AudioInterleaveContext *aic = st->priv_data;
|
AudioInterleaveContext *aic = st->priv_data;
|
||||||
|
|
||||||
if (st->codec->codec_type == CODEC_TYPE_AUDIO)
|
if (st->codec->codec_type == CODEC_TYPE_AUDIO)
|
||||||
av_fifo_free(&aic->fifo);
|
av_fifo_free(aic->fifo);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -62,7 +62,7 @@ int ff_audio_interleave_init(AVFormatContext *s,
|
|||||||
aic->time_base = time_base;
|
aic->time_base = time_base;
|
||||||
|
|
||||||
aic->fifo_size = 100* *aic->samples;
|
aic->fifo_size = 100* *aic->samples;
|
||||||
av_fifo_init(&aic->fifo, 100 * *aic->samples);
|
aic->fifo= av_fifo_alloc(100 * *aic->samples);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -75,12 +75,12 @@ static int ff_interleave_new_audio_packet(AVFormatContext *s, AVPacket *pkt,
|
|||||||
AVStream *st = s->streams[stream_index];
|
AVStream *st = s->streams[stream_index];
|
||||||
AudioInterleaveContext *aic = st->priv_data;
|
AudioInterleaveContext *aic = st->priv_data;
|
||||||
|
|
||||||
int size = FFMIN(av_fifo_size(&aic->fifo), *aic->samples * aic->sample_size);
|
int size = FFMIN(av_fifo_size(aic->fifo), *aic->samples * aic->sample_size);
|
||||||
if (!size || (!flush && size == av_fifo_size(&aic->fifo)))
|
if (!size || (!flush && size == av_fifo_size(aic->fifo)))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
av_new_packet(pkt, size);
|
av_new_packet(pkt, size);
|
||||||
av_fifo_read(&aic->fifo, pkt->data, size);
|
av_fifo_read(aic->fifo, pkt->data, size);
|
||||||
|
|
||||||
pkt->dts = pkt->pts = aic->dts;
|
pkt->dts = pkt->pts = aic->dts;
|
||||||
pkt->duration = av_rescale_q(*aic->samples, st->time_base, aic->time_base);
|
pkt->duration = av_rescale_q(*aic->samples, st->time_base, aic->time_base);
|
||||||
@ -104,13 +104,13 @@ int ff_audio_rechunk_interleave(AVFormatContext *s, AVPacket *out, AVPacket *pkt
|
|||||||
AVStream *st = s->streams[pkt->stream_index];
|
AVStream *st = s->streams[pkt->stream_index];
|
||||||
AudioInterleaveContext *aic = st->priv_data;
|
AudioInterleaveContext *aic = st->priv_data;
|
||||||
if (st->codec->codec_type == CODEC_TYPE_AUDIO) {
|
if (st->codec->codec_type == CODEC_TYPE_AUDIO) {
|
||||||
unsigned new_size = av_fifo_size(&aic->fifo) + pkt->size;
|
unsigned new_size = av_fifo_size(aic->fifo) + pkt->size;
|
||||||
if (new_size > aic->fifo_size) {
|
if (new_size > aic->fifo_size) {
|
||||||
if (av_fifo_realloc2(&aic->fifo, new_size) < 0)
|
if (av_fifo_realloc2(aic->fifo, new_size) < 0)
|
||||||
return -1;
|
return -1;
|
||||||
aic->fifo_size = new_size;
|
aic->fifo_size = new_size;
|
||||||
}
|
}
|
||||||
av_fifo_generic_write(&aic->fifo, pkt->data, pkt->size, NULL);
|
av_fifo_generic_write(aic->fifo, pkt->data, pkt->size, NULL);
|
||||||
} else {
|
} else {
|
||||||
// rewrite pts and dts to be decoded time line position
|
// rewrite pts and dts to be decoded time line position
|
||||||
pkt->pts = pkt->dts = aic->dts;
|
pkt->pts = pkt->dts = aic->dts;
|
||||||
|
@ -27,7 +27,7 @@
|
|||||||
#include "avformat.h"
|
#include "avformat.h"
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
AVFifoBuffer fifo;
|
AVFifoBuffer *fifo;
|
||||||
unsigned fifo_size; ///< size of currently allocated FIFO
|
unsigned fifo_size; ///< size of currently allocated FIFO
|
||||||
uint64_t dts; ///< current dts
|
uint64_t dts; ///< current dts
|
||||||
int sample_size; ///< size of one sample all channels included
|
int sample_size; ///< size of one sample all channels included
|
||||||
|
@ -38,7 +38,7 @@ struct DVMuxContext {
|
|||||||
const DVprofile* sys; /* current DV profile, e.g.: 525/60, 625/50 */
|
const DVprofile* sys; /* current DV profile, e.g.: 525/60, 625/50 */
|
||||||
int n_ast; /* number of stereo audio streams (up to 2) */
|
int n_ast; /* number of stereo audio streams (up to 2) */
|
||||||
AVStream *ast[2]; /* stereo audio streams */
|
AVStream *ast[2]; /* stereo audio streams */
|
||||||
AVFifoBuffer audio_data[2]; /* FIFO for storing excessive amounts of PCM */
|
AVFifoBuffer *audio_data[2]; /* FIFO for storing excessive amounts of PCM */
|
||||||
int frames; /* current frame number */
|
int frames; /* current frame number */
|
||||||
time_t start_time; /* recording start time */
|
time_t start_time; /* recording start time */
|
||||||
int has_audio; /* frame under contruction has audio */
|
int has_audio; /* frame under contruction has audio */
|
||||||
@ -189,8 +189,8 @@ static void dv_inject_audio(DVMuxContext *c, int channel, uint8_t* frame_ptr)
|
|||||||
if (of*2 >= size)
|
if (of*2 >= size)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
frame_ptr[d] = av_fifo_peek(&c->audio_data[channel], of*2+1); // FIXME: maybe we have to admit
|
frame_ptr[d] = av_fifo_peek(c->audio_data[channel], of*2+1); // FIXME: maybe we have to admit
|
||||||
frame_ptr[d+1] = av_fifo_peek(&c->audio_data[channel], of*2); // that DV is a big-endian PCM
|
frame_ptr[d+1] = av_fifo_peek(c->audio_data[channel], of*2); // that DV is a big-endian PCM
|
||||||
}
|
}
|
||||||
frame_ptr += 16 * 80; /* 15 Video DIFs + 1 Audio DIF */
|
frame_ptr += 16 * 80; /* 15 Video DIFs + 1 Audio DIF */
|
||||||
}
|
}
|
||||||
@ -251,12 +251,12 @@ int dv_assemble_frame(DVMuxContext *c, AVStream* st,
|
|||||||
for (i = 0; i < c->n_ast && st != c->ast[i]; i++);
|
for (i = 0; i < c->n_ast && st != c->ast[i]; i++);
|
||||||
|
|
||||||
/* FIXME: we have to have more sensible approach than this one */
|
/* FIXME: we have to have more sensible approach than this one */
|
||||||
if (av_fifo_size(&c->audio_data[i]) + data_size >= 100*AVCODEC_MAX_AUDIO_FRAME_SIZE)
|
if (av_fifo_size(c->audio_data[i]) + data_size >= 100*AVCODEC_MAX_AUDIO_FRAME_SIZE)
|
||||||
av_log(st->codec, AV_LOG_ERROR, "Can't process DV frame #%d. Insufficient video data or severe sync problem.\n", c->frames);
|
av_log(st->codec, AV_LOG_ERROR, "Can't process DV frame #%d. Insufficient video data or severe sync problem.\n", c->frames);
|
||||||
av_fifo_generic_write(&c->audio_data[i], data, data_size, NULL);
|
av_fifo_generic_write(c->audio_data[i], data, data_size, NULL);
|
||||||
|
|
||||||
/* Let us see if we've got enough audio for one DV frame. */
|
/* Let us see if we've got enough audio for one DV frame. */
|
||||||
c->has_audio |= ((reqasize <= av_fifo_size(&c->audio_data[i])) << i);
|
c->has_audio |= ((reqasize <= av_fifo_size(c->audio_data[i])) << i);
|
||||||
|
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
@ -269,8 +269,8 @@ int dv_assemble_frame(DVMuxContext *c, AVStream* st,
|
|||||||
c->has_audio = 0;
|
c->has_audio = 0;
|
||||||
for (i=0; i < c->n_ast; i++) {
|
for (i=0; i < c->n_ast; i++) {
|
||||||
dv_inject_audio(c, i, *frame);
|
dv_inject_audio(c, i, *frame);
|
||||||
av_fifo_drain(&c->audio_data[i], reqasize);
|
av_fifo_drain(c->audio_data[i], reqasize);
|
||||||
c->has_audio |= ((reqasize <= av_fifo_size(&c->audio_data[i])) << i);
|
c->has_audio |= ((reqasize <= av_fifo_size(c->audio_data[i])) << i);
|
||||||
}
|
}
|
||||||
|
|
||||||
c->has_video = 0;
|
c->has_video = 0;
|
||||||
@ -337,10 +337,10 @@ DVMuxContext* dv_init_mux(AVFormatContext* s)
|
|||||||
c->start_time = (time_t)s->timestamp;
|
c->start_time = (time_t)s->timestamp;
|
||||||
|
|
||||||
for (i=0; i < c->n_ast; i++) {
|
for (i=0; i < c->n_ast; i++) {
|
||||||
if (c->ast[i] && av_fifo_init(&c->audio_data[i], 100*AVCODEC_MAX_AUDIO_FRAME_SIZE) < 0) {
|
if (c->ast[i] && !(c->audio_data[i]=av_fifo_alloc(100*AVCODEC_MAX_AUDIO_FRAME_SIZE))) {
|
||||||
while (i > 0) {
|
while (i > 0) {
|
||||||
i--;
|
i--;
|
||||||
av_fifo_free(&c->audio_data[i]);
|
av_fifo_free(c->audio_data[i]);
|
||||||
}
|
}
|
||||||
goto bail_out;
|
goto bail_out;
|
||||||
}
|
}
|
||||||
@ -356,7 +356,7 @@ void dv_delete_mux(DVMuxContext *c)
|
|||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
for (i=0; i < c->n_ast; i++)
|
for (i=0; i < c->n_ast; i++)
|
||||||
av_fifo_free(&c->audio_data[i]);
|
av_fifo_free(c->audio_data[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if CONFIG_DV_MUXER
|
#if CONFIG_DV_MUXER
|
||||||
|
@ -40,7 +40,7 @@ typedef struct PacketDesc {
|
|||||||
} PacketDesc;
|
} PacketDesc;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
AVFifoBuffer fifo;
|
AVFifoBuffer *fifo;
|
||||||
uint8_t id;
|
uint8_t id;
|
||||||
int max_buffer_size; /* in bytes */
|
int max_buffer_size; /* in bytes */
|
||||||
int buffer_index;
|
int buffer_index;
|
||||||
@ -381,7 +381,7 @@ static int mpeg_mux_init(AVFormatContext *ctx)
|
|||||||
default:
|
default:
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
av_fifo_init(&stream->fifo, 16);
|
stream->fifo= av_fifo_alloc(16);
|
||||||
}
|
}
|
||||||
bitrate = 0;
|
bitrate = 0;
|
||||||
audio_bitrate = 0;
|
audio_bitrate = 0;
|
||||||
@ -786,7 +786,7 @@ static int flush_packet(AVFormatContext *ctx, int stream_index,
|
|||||||
startcode = 0x100 + id;
|
startcode = 0x100 + id;
|
||||||
}
|
}
|
||||||
|
|
||||||
stuffing_size = payload_size - av_fifo_size(&stream->fifo);
|
stuffing_size = payload_size - av_fifo_size(stream->fifo);
|
||||||
|
|
||||||
// first byte does not fit -> reset pts/dts + stuffing
|
// first byte does not fit -> reset pts/dts + stuffing
|
||||||
if(payload_size <= trailer_size && pts != AV_NOPTS_VALUE){
|
if(payload_size <= trailer_size && pts != AV_NOPTS_VALUE){
|
||||||
@ -913,8 +913,8 @@ static int flush_packet(AVFormatContext *ctx, int stream_index,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* output data */
|
/* output data */
|
||||||
assert(payload_size - stuffing_size <= av_fifo_size(&stream->fifo));
|
assert(payload_size - stuffing_size <= av_fifo_size(stream->fifo));
|
||||||
av_fifo_generic_read(&stream->fifo, payload_size - stuffing_size, &put_buffer, ctx->pb);
|
av_fifo_generic_read(stream->fifo, payload_size - stuffing_size, &put_buffer, ctx->pb);
|
||||||
stream->bytes_to_iframe -= payload_size - stuffing_size;
|
stream->bytes_to_iframe -= payload_size - stuffing_size;
|
||||||
}else{
|
}else{
|
||||||
payload_size=
|
payload_size=
|
||||||
@ -1031,7 +1031,7 @@ retry:
|
|||||||
for(i=0; i<ctx->nb_streams; i++){
|
for(i=0; i<ctx->nb_streams; i++){
|
||||||
AVStream *st = ctx->streams[i];
|
AVStream *st = ctx->streams[i];
|
||||||
StreamInfo *stream = st->priv_data;
|
StreamInfo *stream = st->priv_data;
|
||||||
const int avail_data= av_fifo_size(&stream->fifo);
|
const int avail_data= av_fifo_size(stream->fifo);
|
||||||
const int space= stream->max_buffer_size - stream->buffer_index;
|
const int space= stream->max_buffer_size - stream->buffer_index;
|
||||||
int rel_space= 1024*space / stream->max_buffer_size;
|
int rel_space= 1024*space / stream->max_buffer_size;
|
||||||
PacketDesc *next_pkt= stream->premux_packet;
|
PacketDesc *next_pkt= stream->premux_packet;
|
||||||
@ -1091,7 +1091,7 @@ retry:
|
|||||||
st = ctx->streams[best_i];
|
st = ctx->streams[best_i];
|
||||||
stream = st->priv_data;
|
stream = st->priv_data;
|
||||||
|
|
||||||
assert(av_fifo_size(&stream->fifo) > 0);
|
assert(av_fifo_size(stream->fifo) > 0);
|
||||||
|
|
||||||
assert(avail_space >= s->packet_size || ignore_constraints);
|
assert(avail_space >= s->packet_size || ignore_constraints);
|
||||||
|
|
||||||
@ -1107,7 +1107,7 @@ retry:
|
|||||||
//av_log(ctx, AV_LOG_DEBUG, "dts:%f pts:%f scr:%f stream:%d\n", timestamp_packet->dts/90000.0, timestamp_packet->pts/90000.0, scr/90000.0, best_i);
|
//av_log(ctx, AV_LOG_DEBUG, "dts:%f pts:%f scr:%f stream:%d\n", timestamp_packet->dts/90000.0, timestamp_packet->pts/90000.0, scr/90000.0, best_i);
|
||||||
es_size= flush_packet(ctx, best_i, timestamp_packet->pts, timestamp_packet->dts, scr, trailer_size);
|
es_size= flush_packet(ctx, best_i, timestamp_packet->pts, timestamp_packet->dts, scr, trailer_size);
|
||||||
}else{
|
}else{
|
||||||
assert(av_fifo_size(&stream->fifo) == trailer_size);
|
assert(av_fifo_size(stream->fifo) == trailer_size);
|
||||||
es_size= flush_packet(ctx, best_i, AV_NOPTS_VALUE, AV_NOPTS_VALUE, scr, trailer_size);
|
es_size= flush_packet(ctx, best_i, AV_NOPTS_VALUE, AV_NOPTS_VALUE, scr, trailer_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1170,18 +1170,18 @@ static int mpeg_mux_write_packet(AVFormatContext *ctx, AVPacket *pkt)
|
|||||||
stream->predecode_packet= pkt_desc;
|
stream->predecode_packet= pkt_desc;
|
||||||
stream->next_packet= &pkt_desc->next;
|
stream->next_packet= &pkt_desc->next;
|
||||||
|
|
||||||
if (av_fifo_realloc2(&stream->fifo, av_fifo_size(&stream->fifo) + size) < 0)
|
if (av_fifo_realloc2(stream->fifo, av_fifo_size(stream->fifo) + size) < 0)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
if (s->is_dvd){
|
if (s->is_dvd){
|
||||||
if (is_iframe && (s->packet_number == 0 || (pts - stream->vobu_start_pts >= 36000))) { // min VOBU length 0.4 seconds (mpucoder)
|
if (is_iframe && (s->packet_number == 0 || (pts - stream->vobu_start_pts >= 36000))) { // min VOBU length 0.4 seconds (mpucoder)
|
||||||
stream->bytes_to_iframe = av_fifo_size(&stream->fifo);
|
stream->bytes_to_iframe = av_fifo_size(stream->fifo);
|
||||||
stream->align_iframe = 1;
|
stream->align_iframe = 1;
|
||||||
stream->vobu_start_pts = pts;
|
stream->vobu_start_pts = pts;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
av_fifo_generic_write(&stream->fifo, buf, size, NULL);
|
av_fifo_generic_write(stream->fifo, buf, size, NULL);
|
||||||
|
|
||||||
for(;;){
|
for(;;){
|
||||||
int ret= output_packet(ctx, 0);
|
int ret= output_packet(ctx, 0);
|
||||||
@ -1213,8 +1213,8 @@ static int mpeg_mux_end(AVFormatContext *ctx)
|
|||||||
for(i=0;i<ctx->nb_streams;i++) {
|
for(i=0;i<ctx->nb_streams;i++) {
|
||||||
stream = ctx->streams[i]->priv_data;
|
stream = ctx->streams[i]->priv_data;
|
||||||
|
|
||||||
assert(av_fifo_size(&stream->fifo) == 0);
|
assert(av_fifo_size(stream->fifo) == 0);
|
||||||
av_fifo_free(&stream->fifo);
|
av_fifo_free(stream->fifo);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -75,7 +75,7 @@ typedef struct {
|
|||||||
int video_frame_number;
|
int video_frame_number;
|
||||||
int frame_rate;
|
int frame_rate;
|
||||||
int tag;
|
int tag;
|
||||||
AVFifoBuffer audio_fifo;
|
AVFifoBuffer *audio_fifo;
|
||||||
AVCodecContext *audio_enc, *video_enc;
|
AVCodecContext *audio_enc, *video_enc;
|
||||||
} SWFContext;
|
} SWFContext;
|
||||||
|
|
||||||
|
@ -192,7 +192,7 @@ static int swf_write_header(AVFormatContext *s)
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
swf->audio_enc = enc;
|
swf->audio_enc = enc;
|
||||||
av_fifo_init(&swf->audio_fifo, AUDIO_FIFO_SIZE);
|
swf->audio_fifo= av_fifo_alloc(AUDIO_FIFO_SIZE);
|
||||||
} else {
|
} else {
|
||||||
av_log(s, AV_LOG_ERROR, "SWF muxer only supports MP3\n");
|
av_log(s, AV_LOG_ERROR, "SWF muxer only supports MP3\n");
|
||||||
return -1;
|
return -1;
|
||||||
@ -414,12 +414,12 @@ static int swf_write_video(AVFormatContext *s,
|
|||||||
swf->swf_frame_number++;
|
swf->swf_frame_number++;
|
||||||
|
|
||||||
/* streaming sound always should be placed just before showframe tags */
|
/* streaming sound always should be placed just before showframe tags */
|
||||||
if (swf->audio_enc && av_fifo_size(&swf->audio_fifo)) {
|
if (swf->audio_enc && av_fifo_size(swf->audio_fifo)) {
|
||||||
int frame_size = av_fifo_size(&swf->audio_fifo);
|
int frame_size = av_fifo_size(swf->audio_fifo);
|
||||||
put_swf_tag(s, TAG_STREAMBLOCK | TAG_LONG);
|
put_swf_tag(s, TAG_STREAMBLOCK | TAG_LONG);
|
||||||
put_le16(pb, swf->sound_samples);
|
put_le16(pb, swf->sound_samples);
|
||||||
put_le16(pb, 0); // seek samples
|
put_le16(pb, 0); // seek samples
|
||||||
av_fifo_generic_read(&swf->audio_fifo, frame_size, &put_buffer, pb);
|
av_fifo_generic_read(swf->audio_fifo, frame_size, &put_buffer, pb);
|
||||||
put_swf_end_tag(s);
|
put_swf_end_tag(s);
|
||||||
|
|
||||||
/* update FIFO */
|
/* update FIFO */
|
||||||
@ -444,12 +444,12 @@ static int swf_write_audio(AVFormatContext *s,
|
|||||||
if (swf->swf_frame_number == 16000)
|
if (swf->swf_frame_number == 16000)
|
||||||
av_log(enc, AV_LOG_INFO, "warning: Flash Player limit of 16000 frames reached\n");
|
av_log(enc, AV_LOG_INFO, "warning: Flash Player limit of 16000 frames reached\n");
|
||||||
|
|
||||||
if (av_fifo_size(&swf->audio_fifo) + size > AUDIO_FIFO_SIZE) {
|
if (av_fifo_size(swf->audio_fifo) + size > AUDIO_FIFO_SIZE) {
|
||||||
av_log(s, AV_LOG_ERROR, "audio fifo too small to mux audio essence\n");
|
av_log(s, AV_LOG_ERROR, "audio fifo too small to mux audio essence\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
av_fifo_generic_write(&swf->audio_fifo, buf, size, NULL);
|
av_fifo_generic_write(swf->audio_fifo, buf, size, NULL);
|
||||||
swf->sound_samples += enc->frame_size;
|
swf->sound_samples += enc->frame_size;
|
||||||
|
|
||||||
/* if audio only stream make sure we add swf frames */
|
/* if audio only stream make sure we add swf frames */
|
||||||
@ -481,7 +481,7 @@ static int swf_write_trailer(AVFormatContext *s)
|
|||||||
if (enc->codec_type == CODEC_TYPE_VIDEO)
|
if (enc->codec_type == CODEC_TYPE_VIDEO)
|
||||||
video_enc = enc;
|
video_enc = enc;
|
||||||
else
|
else
|
||||||
av_fifo_free(&swf->audio_fifo);
|
av_fifo_free(swf->audio_fifo);
|
||||||
}
|
}
|
||||||
|
|
||||||
put_swf_tag(s, TAG_END);
|
put_swf_tag(s, TAG_END);
|
||||||
|
@ -22,20 +22,25 @@
|
|||||||
#include "common.h"
|
#include "common.h"
|
||||||
#include "fifo.h"
|
#include "fifo.h"
|
||||||
|
|
||||||
int av_fifo_init(AVFifoBuffer *f, unsigned int size)
|
AVFifoBuffer *av_fifo_alloc(unsigned int size)
|
||||||
{
|
{
|
||||||
|
AVFifoBuffer *f= av_mallocz(sizeof(AVFifoBuffer));
|
||||||
|
if(!f)
|
||||||
|
return NULL;
|
||||||
f->wptr = f->rptr =
|
f->wptr = f->rptr =
|
||||||
f->buffer = av_malloc(size);
|
f->buffer = av_malloc(size);
|
||||||
f->end = f->buffer + size;
|
f->end = f->buffer + size;
|
||||||
f->rndx = f->wndx = 0;
|
|
||||||
if (!f->buffer)
|
if (!f->buffer)
|
||||||
return -1;
|
av_freep(&f);
|
||||||
return 0;
|
return f;
|
||||||
}
|
}
|
||||||
|
|
||||||
void av_fifo_free(AVFifoBuffer *f)
|
void av_fifo_free(AVFifoBuffer *f)
|
||||||
{
|
{
|
||||||
|
if(f){
|
||||||
av_free(f->buffer);
|
av_free(f->buffer);
|
||||||
|
av_free(f);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int av_fifo_size(AVFifoBuffer *f)
|
int av_fifo_size(AVFifoBuffer *f)
|
||||||
@ -59,15 +64,16 @@ int av_fifo_realloc2(AVFifoBuffer *f, unsigned int new_size) {
|
|||||||
|
|
||||||
if(old_size < new_size){
|
if(old_size < new_size){
|
||||||
int len= av_fifo_size(f);
|
int len= av_fifo_size(f);
|
||||||
AVFifoBuffer f2;
|
AVFifoBuffer *f2= av_fifo_alloc(new_size);
|
||||||
|
|
||||||
if (av_fifo_init(&f2, new_size) < 0)
|
if (!f2)
|
||||||
return -1;
|
return -1;
|
||||||
av_fifo_read(f, f2.buffer, len);
|
av_fifo_read(f, f2->buffer, len);
|
||||||
f2.wptr += len;
|
f2->wptr += len;
|
||||||
f2.wndx += len;
|
f2->wndx += len;
|
||||||
av_free(f->buffer);
|
av_free(f->buffer);
|
||||||
*f= f2;
|
*f= *f2;
|
||||||
|
av_free(f2);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -36,11 +36,10 @@ typedef struct AVFifoBuffer {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Initializes an AVFifoBuffer.
|
* Initializes an AVFifoBuffer.
|
||||||
* @param *f AVFifoBuffer to initialize
|
|
||||||
* @param size of FIFO
|
* @param size of FIFO
|
||||||
* @return <0 for failure >=0 otherwise
|
* @return AVFifoBuffer or NULL if mem allocation failure
|
||||||
*/
|
*/
|
||||||
int av_fifo_init(AVFifoBuffer *f, unsigned int size);
|
AVFifoBuffer *av_fifo_alloc(unsigned int size);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Frees an AVFifoBuffer.
|
* Frees an AVFifoBuffer.
|
||||||
|
Loading…
x
Reference in New Issue
Block a user