examples/muxing: add support to audio resampling
Allows to encode to output in case the destination sample format is different from AV_SAMPLE_FMT_S16.
This commit is contained in:
		
							parent
							
								
									561e05136f
								
							
						
					
					
						commit
						2aa2b4ac65
					
				| @ -34,9 +34,11 @@ | ||||
| #include <string.h> | ||||
| #include <math.h> | ||||
| 
 | ||||
| #include <libavutil/opt.h> | ||||
| #include <libavutil/mathematics.h> | ||||
| #include <libavformat/avformat.h> | ||||
| #include <libswscale/swscale.h> | ||||
| #include <libswresample/swresample.h> | ||||
| 
 | ||||
| /* 5 seconds stream duration */ | ||||
| #define STREAM_DURATION   200.0 | ||||
| @ -46,13 +48,6 @@ | ||||
| 
 | ||||
| static int sws_flags = SWS_BICUBIC; | ||||
| 
 | ||||
| /**************************************************************/ | ||||
| /* audio output */ | ||||
| 
 | ||||
| static float t, tincr, tincr2; | ||||
| static int16_t *samples; | ||||
| static int audio_input_frame_size; | ||||
| 
 | ||||
| /* Add an output stream. */ | ||||
| static AVStream *add_stream(AVFormatContext *oc, AVCodec **codec, | ||||
|                             enum AVCodecID codec_id) | ||||
| @ -78,7 +73,7 @@ static AVStream *add_stream(AVFormatContext *oc, AVCodec **codec, | ||||
| 
 | ||||
|     switch ((*codec)->type) { | ||||
|     case AVMEDIA_TYPE_AUDIO: | ||||
|         c->sample_fmt  = AV_SAMPLE_FMT_S16; | ||||
|         c->sample_fmt  = AV_SAMPLE_FMT_FLTP; | ||||
|         c->bit_rate    = 64000; | ||||
|         c->sample_rate = 44100; | ||||
|         c->channels    = 2; | ||||
| @ -126,8 +121,17 @@ static AVStream *add_stream(AVFormatContext *oc, AVCodec **codec, | ||||
| /* audio output */ | ||||
| 
 | ||||
| static float t, tincr, tincr2; | ||||
| static int16_t *samples; | ||||
| static int audio_input_frame_size; | ||||
| 
 | ||||
| static uint8_t **src_samples_data; | ||||
| static int       src_samples_linesize; | ||||
| static int       src_nb_samples; | ||||
| 
 | ||||
| static int max_dst_nb_samples; | ||||
| uint8_t **dst_samples_data; | ||||
| int       dst_samples_linesize; | ||||
| int       dst_samples_size; | ||||
| 
 | ||||
| struct SwrContext *swr_ctx = NULL; | ||||
| 
 | ||||
| static void open_audio(AVFormatContext *oc, AVCodec *codec, AVStream *st) | ||||
| { | ||||
| @ -149,17 +153,51 @@ static void open_audio(AVFormatContext *oc, AVCodec *codec, AVStream *st) | ||||
|     /* increment frequency by 110 Hz per second */ | ||||
|     tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate; | ||||
| 
 | ||||
|     if (c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE) | ||||
|         audio_input_frame_size = 10000; | ||||
|     else | ||||
|         audio_input_frame_size = c->frame_size; | ||||
|     samples = av_malloc(audio_input_frame_size * | ||||
|                         av_get_bytes_per_sample(c->sample_fmt) * | ||||
|                         c->channels); | ||||
|     if (!samples) { | ||||
|         fprintf(stderr, "Could not allocate audio samples buffer\n"); | ||||
|     src_nb_samples = c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE ? | ||||
|         10000 : c->frame_size; | ||||
| 
 | ||||
|     ret = av_samples_alloc_array_and_samples(&src_samples_data, &src_samples_linesize, c->channels, | ||||
|                                              src_nb_samples, c->sample_fmt, 0); | ||||
|     if (ret < 0) { | ||||
|         fprintf(stderr, "Could not allocate source samples\n"); | ||||
|         exit(1); | ||||
|     } | ||||
| 
 | ||||
|     /* create resampler context */ | ||||
|     if (c->sample_fmt != AV_SAMPLE_FMT_S16) { | ||||
|         swr_ctx = swr_alloc(); | ||||
|         if (!swr_ctx) { | ||||
|             fprintf(stderr, "Could not allocate resampler context\n"); | ||||
|             exit(1); | ||||
|         } | ||||
| 
 | ||||
|         /* set options */ | ||||
|         av_opt_set_int       (swr_ctx, "in_channel_count",   c->channels,       0); | ||||
|         av_opt_set_int       (swr_ctx, "in_sample_rate",     c->sample_rate,    0); | ||||
|         av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt",      AV_SAMPLE_FMT_S16, 0); | ||||
|         av_opt_set_int       (swr_ctx, "out_channel_count",  c->channels,       0); | ||||
|         av_opt_set_int       (swr_ctx, "out_sample_rate",    c->sample_rate,    0); | ||||
|         av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt",     c->sample_fmt,     0); | ||||
| 
 | ||||
|         /* initialize the resampling context */ | ||||
|         if ((ret = swr_init(swr_ctx)) < 0) { | ||||
|             fprintf(stderr, "Failed to initialize the resampling context\n"); | ||||
|             exit(1); | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     /* compute the number of converted samples: buffering is avoided
 | ||||
|      * ensuring that the output buffer will contain at least all the | ||||
|      * converted input samples */ | ||||
|     max_dst_nb_samples = src_nb_samples; | ||||
|     ret = av_samples_alloc_array_and_samples(&dst_samples_data, &dst_samples_linesize, c->channels, | ||||
|                                              max_dst_nb_samples, c->sample_fmt, 0); | ||||
|     if (ret < 0) { | ||||
|         fprintf(stderr, "Could not allocate destination samples\n"); | ||||
|         exit(1); | ||||
|     } | ||||
|     dst_samples_size = av_samples_get_buffer_size(NULL, c->channels, max_dst_nb_samples, | ||||
|                                                   c->sample_fmt, 0); | ||||
| } | ||||
| 
 | ||||
| /* Prepare a 16 bit dummy audio frame of 'frame_size' samples and
 | ||||
| @ -184,18 +222,45 @@ static void write_audio_frame(AVFormatContext *oc, AVStream *st) | ||||
|     AVCodecContext *c; | ||||
|     AVPacket pkt = { 0 }; // data and size must be 0;
 | ||||
|     AVFrame *frame = avcodec_alloc_frame(); | ||||
|     int got_packet, ret; | ||||
|     int got_packet, ret, dst_nb_samples; | ||||
| 
 | ||||
|     av_init_packet(&pkt); | ||||
|     c = st->codec; | ||||
| 
 | ||||
|     get_audio_frame(samples, audio_input_frame_size, c->channels); | ||||
|     frame->nb_samples = audio_input_frame_size; | ||||
|     get_audio_frame((int16_t *)src_samples_data[0], src_nb_samples, c->channels); | ||||
| 
 | ||||
|     /* convert samples from native format to destination codec format, using the resampler */ | ||||
|     if (swr_ctx) { | ||||
|         /* compute destination number of samples */ | ||||
|         dst_nb_samples = av_rescale_rnd(swr_get_delay(swr_ctx, c->sample_rate) + src_nb_samples, | ||||
|                                         c->sample_rate, c->sample_rate, AV_ROUND_UP); | ||||
|         if (dst_nb_samples > max_dst_nb_samples) { | ||||
|             av_free(dst_samples_data[0]); | ||||
|             ret = av_samples_alloc(dst_samples_data, &dst_samples_linesize, c->channels, | ||||
|                                    dst_nb_samples, c->sample_fmt, 0); | ||||
|             if (ret < 0) | ||||
|                 exit(1); | ||||
|             max_dst_nb_samples = dst_nb_samples; | ||||
|             dst_samples_size = av_samples_get_buffer_size(NULL, c->channels, dst_nb_samples, | ||||
|                                                           c->sample_fmt, 0); | ||||
|         } | ||||
| 
 | ||||
|         /* convert to destination format */ | ||||
|         ret = swr_convert(swr_ctx, | ||||
|                           dst_samples_data, dst_nb_samples, | ||||
|                           (const uint8_t **)src_samples_data, src_nb_samples); | ||||
|         if (ret < 0) { | ||||
|             fprintf(stderr, "Error while converting\n"); | ||||
|             exit(1); | ||||
|         } | ||||
|     } else { | ||||
|         dst_samples_data[0] = src_samples_data[0]; | ||||
|         dst_nb_samples = src_nb_samples; | ||||
|     } | ||||
| 
 | ||||
|     frame->nb_samples = dst_nb_samples; | ||||
|     avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt, | ||||
|                              (uint8_t *)samples, | ||||
|                              audio_input_frame_size * | ||||
|                              av_get_bytes_per_sample(c->sample_fmt) * | ||||
|                              c->channels, 1); | ||||
|                              dst_samples_data[0], dst_samples_size, 0); | ||||
| 
 | ||||
|     ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet); | ||||
|     if (ret < 0) { | ||||
| @ -221,8 +286,8 @@ static void write_audio_frame(AVFormatContext *oc, AVStream *st) | ||||
| static void close_audio(AVFormatContext *oc, AVStream *st) | ||||
| { | ||||
|     avcodec_close(st->codec); | ||||
| 
 | ||||
|     av_free(samples); | ||||
|     av_free(src_samples_data[0]); | ||||
|     av_free(dst_samples_data[0]); | ||||
| } | ||||
| 
 | ||||
| /**************************************************************/ | ||||
|  | ||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user