avfilter/af_afade: stop using ff_outlink_get_status on inputs

This commit is contained in:
Paul B Mahol 2023-06-17 22:32:11 +02:00
parent a756ae41ed
commit c44fe10160

View File

@ -42,8 +42,8 @@ typedef struct AudioFadeContext {
double silence; double silence;
double unity; double unity;
int overlap; int overlap;
int cf0_eof; int status[2];
int crossfade_is_over; int passthrough;
int64_t pts; int64_t pts;
void (*fade_samples)(uint8_t **dst, uint8_t * const *src, void (*fade_samples)(uint8_t **dst, uint8_t * const *src,
@ -521,6 +521,13 @@ CROSSFADE(flt, float)
CROSSFADE(s16, int16_t) CROSSFADE(s16, int16_t)
CROSSFADE(s32, int32_t) CROSSFADE(s32, int32_t)
static int check_input(AVFilterLink *inlink)
{
const int queued_samples = ff_inlink_queued_samples(inlink);
return ff_inlink_check_available_samples(inlink, queued_samples + 1) == 1;
}
static int activate(AVFilterContext *ctx) static int activate(AVFilterContext *ctx)
{ {
AudioFadeContext *s = ctx->priv; AudioFadeContext *s = ctx->priv;
@ -531,7 +538,7 @@ static int activate(AVFilterContext *ctx)
FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, ctx); FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, ctx);
if (s->crossfade_is_over) { if (s->passthrough && s->status[0]) {
ret = ff_inlink_consume_frame(ctx->inputs[1], &in); ret = ff_inlink_consume_frame(ctx->inputs[1], &in);
if (ret > 0) { if (ret > 0) {
in->pts = s->pts; in->pts = s->pts;
@ -541,10 +548,10 @@ static int activate(AVFilterContext *ctx)
} else if (ret < 0) { } else if (ret < 0) {
return ret; return ret;
} else if (ff_inlink_acknowledge_status(ctx->inputs[1], &status, &pts)) { } else if (ff_inlink_acknowledge_status(ctx->inputs[1], &status, &pts)) {
ff_outlink_set_status(ctx->outputs[0], status, pts); ff_outlink_set_status(outlink, status, pts);
return 0; return 0;
} else if (!ret) { } else if (!ret) {
if (ff_outlink_frame_wanted(ctx->outputs[0])) { if (ff_outlink_frame_wanted(outlink)) {
ff_inlink_request_frame(ctx->inputs[1]); ff_inlink_request_frame(ctx->inputs[1]);
return 0; return 0;
} }
@ -554,6 +561,7 @@ static int activate(AVFilterContext *ctx)
nb_samples = ff_inlink_queued_samples(ctx->inputs[0]); nb_samples = ff_inlink_queued_samples(ctx->inputs[0]);
if (nb_samples > s->nb_samples) { if (nb_samples > s->nb_samples) {
nb_samples -= s->nb_samples; nb_samples -= s->nb_samples;
s->passthrough = 1;
ret = ff_inlink_consume_samples(ctx->inputs[0], nb_samples, nb_samples, &in); ret = ff_inlink_consume_samples(ctx->inputs[0], nb_samples, nb_samples, &in);
if (ret < 0) if (ret < 0)
return ret; return ret;
@ -561,7 +569,7 @@ static int activate(AVFilterContext *ctx)
s->pts += av_rescale_q(in->nb_samples, s->pts += av_rescale_q(in->nb_samples,
(AVRational){ 1, outlink->sample_rate }, outlink->time_base); (AVRational){ 1, outlink->sample_rate }, outlink->time_base);
return ff_filter_frame(outlink, in); return ff_filter_frame(outlink, in);
} else if (s->cf0_eof && nb_samples >= s->nb_samples && } else if (s->status[0] && nb_samples >= s->nb_samples &&
ff_inlink_queued_samples(ctx->inputs[1]) >= s->nb_samples) { ff_inlink_queued_samples(ctx->inputs[1]) >= s->nb_samples) {
if (s->overlap) { if (s->overlap) {
out = ff_get_audio_buffer(outlink, s->nb_samples); out = ff_get_audio_buffer(outlink, s->nb_samples);
@ -587,7 +595,7 @@ static int activate(AVFilterContext *ctx)
out->pts = s->pts; out->pts = s->pts;
s->pts += av_rescale_q(s->nb_samples, s->pts += av_rescale_q(s->nb_samples,
(AVRational){ 1, outlink->sample_rate }, outlink->time_base); (AVRational){ 1, outlink->sample_rate }, outlink->time_base);
s->crossfade_is_over = 1; s->passthrough = 1;
av_frame_free(&cf[0]); av_frame_free(&cf[0]);
av_frame_free(&cf[1]); av_frame_free(&cf[1]);
return ff_filter_frame(outlink, out); return ff_filter_frame(outlink, out);
@ -627,19 +635,20 @@ static int activate(AVFilterContext *ctx)
out->pts = s->pts; out->pts = s->pts;
s->pts += av_rescale_q(s->nb_samples, s->pts += av_rescale_q(s->nb_samples,
(AVRational){ 1, outlink->sample_rate }, outlink->time_base); (AVRational){ 1, outlink->sample_rate }, outlink->time_base);
s->crossfade_is_over = 1; s->passthrough = 1;
av_frame_free(&cf[1]); av_frame_free(&cf[1]);
return ff_filter_frame(outlink, out); return ff_filter_frame(outlink, out);
} }
} else if (ff_outlink_frame_wanted(ctx->outputs[0])) { } else if (ff_outlink_frame_wanted(outlink)) {
if (!s->cf0_eof && ff_outlink_get_status(ctx->inputs[0])) { if (!s->status[0] && check_input(ctx->inputs[0]))
s->cf0_eof = 1; s->status[0] = AVERROR_EOF;
} s->passthrough = !s->status[0];
if (ff_outlink_get_status(ctx->inputs[1])) { if (check_input(ctx->inputs[1])) {
ff_outlink_set_status(ctx->outputs[0], AVERROR_EOF, AV_NOPTS_VALUE); s->status[1] = AVERROR_EOF;
ff_outlink_set_status(outlink, AVERROR_EOF, AV_NOPTS_VALUE);
return 0; return 0;
} }
if (!s->cf0_eof) if (!s->status[0])
ff_inlink_request_frame(ctx->inputs[0]); ff_inlink_request_frame(ctx->inputs[0]);
else else
ff_inlink_request_frame(ctx->inputs[1]); ff_inlink_request_frame(ctx->inputs[1]);
@ -677,8 +686,7 @@ static AVFrame *get_audio_buffer(AVFilterLink *inlink, int nb_samples)
AVFilterContext *ctx = inlink->dst; AVFilterContext *ctx = inlink->dst;
AudioFadeContext *s = ctx->priv; AudioFadeContext *s = ctx->priv;
return (s->crossfade_is_over || return s->passthrough ?
(ff_inlink_queued_samples(inlink) > s->nb_samples)) ?
ff_null_get_audio_buffer (inlink, nb_samples) : ff_null_get_audio_buffer (inlink, nb_samples) :
ff_default_get_audio_buffer(inlink, nb_samples); ff_default_get_audio_buffer(inlink, nb_samples);
} }