output-example: K&R formatting cosmetics, comment spelling fixes
This commit is contained in:
		
							parent
							
								
									93f6d0475f
								
							
						
					
					
						commit
						afd8a3957b
					
				| @ -44,7 +44,7 @@ | ||||
| #define STREAM_DURATION   5.0 | ||||
| #define STREAM_FRAME_RATE 25 /* 25 images/s */ | ||||
| #define STREAM_NB_FRAMES  ((int)(STREAM_DURATION * STREAM_FRAME_RATE)) | ||||
| #define STREAM_PIX_FMT PIX_FMT_YUV420P /* default pix_fmt */ | ||||
| #define STREAM_PIX_FMT    PIX_FMT_YUV420P /* default pix_fmt */ | ||||
| 
 | ||||
| static int sws_flags = SWS_BICUBIC; | ||||
| 
 | ||||
| @ -80,13 +80,13 @@ static AVStream *add_audio_stream(AVFormatContext *oc, enum CodecID codec_id) | ||||
|     c = st->codec; | ||||
| 
 | ||||
|     /* put sample parameters */ | ||||
|     c->sample_fmt = AV_SAMPLE_FMT_S16; | ||||
|     c->bit_rate = 64000; | ||||
|     c->sample_fmt  = AV_SAMPLE_FMT_S16; | ||||
|     c->bit_rate    = 64000; | ||||
|     c->sample_rate = 44100; | ||||
|     c->channels = 2; | ||||
|     c->channels    = 2; | ||||
| 
 | ||||
|     // some formats want stream headers to be separate
 | ||||
|     if(oc->oformat->flags & AVFMT_GLOBALHEADER) | ||||
|     if (oc->oformat->flags & AVFMT_GLOBALHEADER) | ||||
|         c->flags |= CODEC_FLAG_GLOBAL_HEADER; | ||||
| 
 | ||||
|     return st; | ||||
| @ -105,7 +105,7 @@ static void open_audio(AVFormatContext *oc, AVStream *st) | ||||
|     } | ||||
| 
 | ||||
|     /* init signal generator */ | ||||
|     t = 0; | ||||
|     t     = 0; | ||||
|     tincr = 2 * M_PI * 110.0 / c->sample_rate; | ||||
|     /* increment frequency by 110 Hz per second */ | ||||
|     tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate; | ||||
| @ -114,23 +114,24 @@ static void open_audio(AVFormatContext *oc, AVStream *st) | ||||
|         audio_input_frame_size = 10000; | ||||
|     else | ||||
|         audio_input_frame_size = c->frame_size; | ||||
|     samples = av_malloc(audio_input_frame_size * av_get_bytes_per_sample(c->sample_fmt) | ||||
|                         * c->channels); | ||||
|     samples = av_malloc(audio_input_frame_size * | ||||
|                         av_get_bytes_per_sample(c->sample_fmt) * | ||||
|                         c->channels); | ||||
| } | ||||
| 
 | ||||
| /* prepare a 16 bit dummy audio frame of 'frame_size' samples and
 | ||||
|    'nb_channels' channels */ | ||||
| /* Prepare a 16 bit dummy audio frame of 'frame_size' samples and
 | ||||
|  * 'nb_channels' channels. */ | ||||
| static void get_audio_frame(int16_t *samples, int frame_size, int nb_channels) | ||||
| { | ||||
|     int j, i, v; | ||||
|     int16_t *q; | ||||
| 
 | ||||
|     q = samples; | ||||
|     for(j=0;j<frame_size;j++) { | ||||
|     for (j = 0; j < frame_size; j++) { | ||||
|         v = (int)(sin(t) * 10000); | ||||
|         for(i = 0; i < nb_channels; i++) | ||||
|         for (i = 0; i < nb_channels; i++) | ||||
|             *q++ = v; | ||||
|         t += tincr; | ||||
|         t     += tincr; | ||||
|         tincr += tincr2; | ||||
|     } | ||||
| } | ||||
| @ -147,17 +148,19 @@ static void write_audio_frame(AVFormatContext *oc, AVStream *st) | ||||
| 
 | ||||
|     get_audio_frame(samples, audio_input_frame_size, c->channels); | ||||
|     frame->nb_samples = audio_input_frame_size; | ||||
|     avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt, (uint8_t *)samples, | ||||
|                              audio_input_frame_size * av_get_bytes_per_sample(c->sample_fmt) | ||||
|                              * c->channels, 1); | ||||
|     avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt, | ||||
|                              (uint8_t *)samples, | ||||
|                              audio_input_frame_size * | ||||
|                              av_get_bytes_per_sample(c->sample_fmt) * | ||||
|                              c->channels, 1); | ||||
| 
 | ||||
|     avcodec_encode_audio2(c, &pkt, frame, &got_packet); | ||||
|     if (!got_packet) | ||||
|         return; | ||||
| 
 | ||||
|     pkt.stream_index= st->index; | ||||
|     pkt.stream_index = st->index; | ||||
| 
 | ||||
|     /* write the compressed frame in the media file */ | ||||
|     /* Write the compressed frame to the media file. */ | ||||
|     if (av_interleaved_write_frame(oc, &pkt) != 0) { | ||||
|         fprintf(stderr, "Error while writing audio frame\n"); | ||||
|         exit(1); | ||||
| @ -178,7 +181,7 @@ static AVFrame *picture, *tmp_picture; | ||||
| static uint8_t *video_outbuf; | ||||
| static int frame_count, video_outbuf_size; | ||||
| 
 | ||||
| /* add a video output stream */ | ||||
| /* Add a video output stream. */ | ||||
| static AVStream *add_video_stream(AVFormatContext *oc, enum CodecID codec_id) | ||||
| { | ||||
|     AVCodecContext *c; | ||||
| @ -200,31 +203,31 @@ static AVStream *add_video_stream(AVFormatContext *oc, enum CodecID codec_id) | ||||
| 
 | ||||
|     c = st->codec; | ||||
| 
 | ||||
|     /* put sample parameters */ | ||||
|     /* Put sample parameters. */ | ||||
|     c->bit_rate = 400000; | ||||
|     /* resolution must be a multiple of two */ | ||||
|     c->width = 352; | ||||
|     c->height = 288; | ||||
|     /* time base: this is the fundamental unit of time (in seconds) in terms
 | ||||
|        of which frame timestamps are represented. for fixed-fps content, | ||||
|        timebase should be 1/framerate and timestamp increments should be | ||||
|        identically 1. */ | ||||
|     /* Resolution must be a multiple of two. */ | ||||
|     c->width    = 352; | ||||
|     c->height   = 288; | ||||
|     /* timebase: This is the fundamental unit of time (in seconds) in terms
 | ||||
|      * of which frame timestamps are represented. For fixed-fps content, | ||||
|      * timebase should be 1/framerate and timestamp increments should be | ||||
|      * identical to 1. */ | ||||
|     c->time_base.den = STREAM_FRAME_RATE; | ||||
|     c->time_base.num = 1; | ||||
|     c->gop_size = 12; /* emit one intra frame every twelve frames at most */ | ||||
|     c->pix_fmt = STREAM_PIX_FMT; | ||||
|     c->gop_size      = 12; /* emit one intra frame every twelve frames at most */ | ||||
|     c->pix_fmt       = STREAM_PIX_FMT; | ||||
|     if (c->codec_id == CODEC_ID_MPEG2VIDEO) { | ||||
|         /* just for testing, we also add B frames */ | ||||
|         c->max_b_frames = 2; | ||||
|     } | ||||
|     if (c->codec_id == CODEC_ID_MPEG1VIDEO){ | ||||
|     if (c->codec_id == CODEC_ID_MPEG1VIDEO) { | ||||
|         /* Needed to avoid using macroblocks in which some coeffs overflow.
 | ||||
|            This does not happen with normal video, it just happens here as | ||||
|            the motion of the chroma plane does not match the luma plane. */ | ||||
|         c->mb_decision=2; | ||||
|          * This does not happen with normal video, it just happens here as | ||||
|          * the motion of the chroma plane does not match the luma plane. */ | ||||
|         c->mb_decision = 2; | ||||
|     } | ||||
|     // some formats want stream headers to be separate
 | ||||
|     if(oc->oformat->flags & AVFMT_GLOBALHEADER) | ||||
|     /* Some formats want stream headers to be separate. */ | ||||
|     if (oc->oformat->flags & AVFMT_GLOBALHEADER) | ||||
|         c->flags |= CODEC_FLAG_GLOBAL_HEADER; | ||||
| 
 | ||||
|     return st; | ||||
| @ -239,7 +242,7 @@ static AVFrame *alloc_picture(enum PixelFormat pix_fmt, int width, int height) | ||||
|     picture = avcodec_alloc_frame(); | ||||
|     if (!picture) | ||||
|         return NULL; | ||||
|     size = avpicture_get_size(pix_fmt, width, height); | ||||
|     size        = avpicture_get_size(pix_fmt, width, height); | ||||
|     picture_buf = av_malloc(size); | ||||
|     if (!picture_buf) { | ||||
|         av_free(picture); | ||||
| @ -264,26 +267,26 @@ static void open_video(AVFormatContext *oc, AVStream *st) | ||||
| 
 | ||||
|     video_outbuf = NULL; | ||||
|     if (!(oc->oformat->flags & AVFMT_RAWPICTURE)) { | ||||
|         /* allocate output buffer */ | ||||
|         /* XXX: API change will be done */ | ||||
|         /* buffers passed into lav* can be allocated any way you prefer,
 | ||||
|            as long as they're aligned enough for the architecture, and | ||||
|            they're freed appropriately (such as using av_free for buffers | ||||
|            allocated with av_malloc) */ | ||||
|         /* Allocate output buffer. */ | ||||
|         /* XXX: API change will be done. */ | ||||
|         /* Buffers passed into lav* can be allocated any way you prefer,
 | ||||
|          * as long as they're aligned enough for the architecture, and | ||||
|          * they're freed appropriately (such as using av_free for buffers | ||||
|          * allocated with av_malloc). */ | ||||
|         video_outbuf_size = 200000; | ||||
|         video_outbuf = av_malloc(video_outbuf_size); | ||||
|         video_outbuf      = av_malloc(video_outbuf_size); | ||||
|     } | ||||
| 
 | ||||
|     /* allocate the encoded raw picture */ | ||||
|     /* Allocate the encoded raw picture. */ | ||||
|     picture = alloc_picture(c->pix_fmt, c->width, c->height); | ||||
|     if (!picture) { | ||||
|         fprintf(stderr, "Could not allocate picture\n"); | ||||
|         exit(1); | ||||
|     } | ||||
| 
 | ||||
|     /* if the output format is not YUV420P, then a temporary YUV420P
 | ||||
|        picture is needed too. It is then converted to the required | ||||
|        output format */ | ||||
|     /* If the output format is not YUV420P, then a temporary YUV420P
 | ||||
|      * picture is needed too. It is then converted to the required | ||||
|      * output format. */ | ||||
|     tmp_picture = NULL; | ||||
|     if (c->pix_fmt != PIX_FMT_YUV420P) { | ||||
|         tmp_picture = alloc_picture(PIX_FMT_YUV420P, c->width, c->height); | ||||
| @ -294,23 +297,22 @@ static void open_video(AVFormatContext *oc, AVStream *st) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| /* prepare a dummy image */ | ||||
| static void fill_yuv_image(AVFrame *pict, int frame_index, int width, int height) | ||||
| /* Prepare a dummy image. */ | ||||
| static void fill_yuv_image(AVFrame *pict, int frame_index, | ||||
|                            int width, int height) | ||||
| { | ||||
|     int x, y, i; | ||||
| 
 | ||||
|     i = frame_index; | ||||
| 
 | ||||
|     /* Y */ | ||||
|     for(y=0;y<height;y++) { | ||||
|         for(x=0;x<width;x++) { | ||||
|     for (y = 0; y < height; y++) | ||||
|         for (x = 0; x < width; x++) | ||||
|             pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3; | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     /* Cb and Cr */ | ||||
|     for(y=0;y<height/2;y++) { | ||||
|         for(x=0;x<width/2;x++) { | ||||
|     for (y = 0; y < height / 2; y++) { | ||||
|         for (x = 0; x < width / 2; x++) { | ||||
|             pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2; | ||||
|             pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5; | ||||
|         } | ||||
| @ -326,13 +328,13 @@ static void write_video_frame(AVFormatContext *oc, AVStream *st) | ||||
|     c = st->codec; | ||||
| 
 | ||||
|     if (frame_count >= STREAM_NB_FRAMES) { | ||||
|         /* no more frame to compress. The codec has a latency of a few
 | ||||
|            frames if using B frames, so we get the last frames by | ||||
|            passing the same picture again */ | ||||
|         /* No more frames to compress. The codec has a latency of a few
 | ||||
|          * frames if using B-frames, so we get the last frames by | ||||
|          * passing the same picture again. */ | ||||
|     } else { | ||||
|         if (c->pix_fmt != PIX_FMT_YUV420P) { | ||||
|             /* as we only generate a YUV420P picture, we must convert it
 | ||||
|                to the codec pixel format if needed */ | ||||
|              * to the codec pixel format if needed */ | ||||
|             if (img_convert_ctx == NULL) { | ||||
|                 img_convert_ctx = sws_getContext(c->width, c->height, | ||||
|                                                  PIX_FMT_YUV420P, | ||||
| @ -340,7 +342,8 @@ static void write_video_frame(AVFormatContext *oc, AVStream *st) | ||||
|                                                  c->pix_fmt, | ||||
|                                                  sws_flags, NULL, NULL, NULL); | ||||
|                 if (img_convert_ctx == NULL) { | ||||
|                     fprintf(stderr, "Cannot initialize the conversion context\n"); | ||||
|                     fprintf(stderr, | ||||
|                             "Cannot initialize the conversion context\n"); | ||||
|                     exit(1); | ||||
|                 } | ||||
|             } | ||||
| @ -352,36 +355,37 @@ static void write_video_frame(AVFormatContext *oc, AVStream *st) | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
| 
 | ||||
|     if (oc->oformat->flags & AVFMT_RAWPICTURE) { | ||||
|         /* raw video case. The API will change slightly in the near
 | ||||
|            futur for that */ | ||||
|         /* Raw video case - the API will change slightly in the near
 | ||||
|          * future for that. */ | ||||
|         AVPacket pkt; | ||||
|         av_init_packet(&pkt); | ||||
| 
 | ||||
|         pkt.flags |= AV_PKT_FLAG_KEY; | ||||
|         pkt.stream_index= st->index; | ||||
|         pkt.data= (uint8_t *)picture; | ||||
|         pkt.size= sizeof(AVPicture); | ||||
|         pkt.flags        |= AV_PKT_FLAG_KEY; | ||||
|         pkt.stream_index  = st->index; | ||||
|         pkt.data          = (uint8_t *)picture; | ||||
|         pkt.size          = sizeof(AVPicture); | ||||
| 
 | ||||
|         ret = av_interleaved_write_frame(oc, &pkt); | ||||
|     } else { | ||||
|         /* encode the image */ | ||||
|         out_size = avcodec_encode_video(c, video_outbuf, video_outbuf_size, picture); | ||||
|         /* if zero size, it means the image was buffered */ | ||||
|         out_size = avcodec_encode_video(c, video_outbuf, | ||||
|                                         video_outbuf_size, picture); | ||||
|         /* If size is zero, it means the image was buffered. */ | ||||
|         if (out_size > 0) { | ||||
|             AVPacket pkt; | ||||
|             av_init_packet(&pkt); | ||||
| 
 | ||||
|             if (c->coded_frame->pts != AV_NOPTS_VALUE) | ||||
|                 pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base); | ||||
|             if(c->coded_frame->key_frame) | ||||
|                 pkt.pts = av_rescale_q(c->coded_frame->pts, | ||||
|                                        c->time_base, st->time_base); | ||||
|             if (c->coded_frame->key_frame) | ||||
|                 pkt.flags |= AV_PKT_FLAG_KEY; | ||||
|             pkt.stream_index= st->index; | ||||
|             pkt.data= video_outbuf; | ||||
|             pkt.size= out_size; | ||||
|             pkt.stream_index = st->index; | ||||
|             pkt.data         = video_outbuf; | ||||
|             pkt.size         = out_size; | ||||
| 
 | ||||
|             /* write the compressed frame in the media file */ | ||||
|             /* Write the compressed frame to the media file. */ | ||||
|             ret = av_interleaved_write_frame(oc, &pkt); | ||||
|         } else { | ||||
|             ret = 0; | ||||
| @ -418,7 +422,7 @@ int main(int argc, char **argv) | ||||
|     double audio_pts, video_pts; | ||||
|     int i; | ||||
| 
 | ||||
|     /* initialize libavcodec, and register all codecs and formats */ | ||||
|     /* Initialize libavcodec, and register all codecs and formats. */ | ||||
|     av_register_all(); | ||||
| 
 | ||||
|     if (argc != 2) { | ||||
| @ -432,8 +436,7 @@ int main(int argc, char **argv) | ||||
| 
 | ||||
|     filename = argv[1]; | ||||
| 
 | ||||
|     /* auto detect the output format from the name. default is
 | ||||
|        mpeg. */ | ||||
|     /* Autodetect the output format from the name. default is MPEG. */ | ||||
|     fmt = av_guess_format(NULL, filename, NULL); | ||||
|     if (!fmt) { | ||||
|         printf("Could not deduce output format from file extension: using MPEG.\n"); | ||||
| @ -444,7 +447,7 @@ int main(int argc, char **argv) | ||||
|         return 1; | ||||
|     } | ||||
| 
 | ||||
|     /* allocate the output media context */ | ||||
|     /* Allocate the output media context. */ | ||||
|     oc = avformat_alloc_context(); | ||||
|     if (!oc) { | ||||
|         fprintf(stderr, "Memory error\n"); | ||||
| @ -453,8 +456,8 @@ int main(int argc, char **argv) | ||||
|     oc->oformat = fmt; | ||||
|     snprintf(oc->filename, sizeof(oc->filename), "%s", filename); | ||||
| 
 | ||||
|     /* add the audio and video streams using the default format codecs
 | ||||
|        and initialize the codecs */ | ||||
|     /* Add the audio and video streams using the default format codecs
 | ||||
|      * and initialize the codecs. */ | ||||
|     video_st = NULL; | ||||
|     audio_st = NULL; | ||||
|     if (fmt->video_codec != CODEC_ID_NONE) { | ||||
| @ -464,8 +467,8 @@ int main(int argc, char **argv) | ||||
|         audio_st = add_audio_stream(oc, fmt->audio_codec); | ||||
|     } | ||||
| 
 | ||||
|     /* now that all the parameters are set, we can open the audio and
 | ||||
|        video codecs and allocate the necessary encode buffers */ | ||||
|     /* Now that all the parameters are set, we can open the audio and
 | ||||
|      * video codecs and allocate the necessary encode buffers. */ | ||||
|     if (video_st) | ||||
|         open_video(oc, video_st); | ||||
|     if (audio_st) | ||||
| @ -481,18 +484,19 @@ int main(int argc, char **argv) | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     /* write the stream header, if any */ | ||||
|     /* Write the stream header, if any. */ | ||||
|     avformat_write_header(oc, NULL); | ||||
| 
 | ||||
|     for(;;) { | ||||
|         /* compute current audio and video time */ | ||||
|     for (;;) { | ||||
|         /* Compute current audio and video time. */ | ||||
|         if (audio_st) | ||||
|             audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den; | ||||
|         else | ||||
|             audio_pts = 0.0; | ||||
| 
 | ||||
|         if (video_st) | ||||
|             video_pts = (double)video_st->pts.val * video_st->time_base.num / video_st->time_base.den; | ||||
|             video_pts = (double)video_st->pts.val * video_st->time_base.num / | ||||
|                         video_st->time_base.den; | ||||
|         else | ||||
|             video_pts = 0.0; | ||||
| 
 | ||||
| @ -508,28 +512,27 @@ int main(int argc, char **argv) | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     /* write the trailer, if any.  the trailer must be written
 | ||||
|      * before you close the CodecContexts open when you wrote the | ||||
|      * header; otherwise write_trailer may try to use memory that | ||||
|      * was freed on av_codec_close() */ | ||||
|     /* Write the trailer, if any. The trailer must be written before you
 | ||||
|      * close the CodecContexts open when you wrote the header; otherwise | ||||
|      * av_write_trailer() may try to use memory that was freed on | ||||
|      * av_codec_close(). */ | ||||
|     av_write_trailer(oc); | ||||
| 
 | ||||
|     /* close each codec */ | ||||
|     /* Close each codec. */ | ||||
|     if (video_st) | ||||
|         close_video(oc, video_st); | ||||
|     if (audio_st) | ||||
|         close_audio(oc, audio_st); | ||||
| 
 | ||||
|     /* free the streams */ | ||||
|     for(i = 0; i < oc->nb_streams; i++) { | ||||
|     /* Free the streams. */ | ||||
|     for (i = 0; i < oc->nb_streams; i++) { | ||||
|         av_freep(&oc->streams[i]->codec); | ||||
|         av_freep(&oc->streams[i]); | ||||
|     } | ||||
| 
 | ||||
|     if (!(fmt->flags & AVFMT_NOFILE)) { | ||||
|         /* close the output file */ | ||||
|     if (!(fmt->flags & AVFMT_NOFILE)) | ||||
|         /* Close the output file. */ | ||||
|         avio_close(oc->pb); | ||||
|     } | ||||
| 
 | ||||
|     /* free the stream */ | ||||
|     av_free(oc); | ||||
|  | ||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user