Merge remote-tracking branch 'qatar/master'

* qatar/master: (40 commits)
  swf: check return values for av_get/new_packet().
  wavpack: Don't shift minclip/maxclip
  rtpenc: Expose the max packet size via an avoption
  rtpenc: Move max_packet_size to a context variable
  rtpenc: Add an option for not sending RTCP packets
  lavc: drop encode() support for video.
  snowenc: switch to encode2().
  snowenc: don't abuse input picture for storing information.
  a64multienc: switch to encode2().
  a64multienc: don't write into output buffer when there's no output.
  libxvid: switch to encode2().
  tiffenc: switch to encode2().
  tiffenc: properly forward error codes in encode_frame().
  lavc: drop libdirac encoder.
  gifenc: switch to encode2().
  libvpxenc: switch to encode2().
  flashsvenc: switch to encode2().
  Remove libpostproc.
  lcl: don't overwrite input memory.
  swscale: take first/lastline over/underflows into account for MMX.
  ...

Conflicts:
	.gitignore
	Makefile
	cmdutils.c
	configure
	doc/APIchanges
	libavcodec/Makefile
	libavcodec/allcodecs.c
	libavcodec/libdiracenc.c
	libavcodec/libxvidff.c
	libavcodec/qtrleenc.c
	libavcodec/tiffenc.c
	libavcodec/utils.c
	libavformat/mov.c
	libavformat/movenc.c
	libpostproc/Makefile
	libpostproc/postprocess.c
	libpostproc/postprocess.h
	libpostproc/postprocess_altivec_template.c
	libpostproc/postprocess_internal.h
	libpostproc/postprocess_template.c
	libswscale/swscale.c
	libswscale/utils.c

Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
Michael Niedermayer 2012-02-24 02:57:18 +01:00
commit e2cc39b609
44 changed files with 721 additions and 748 deletions

5
configure vendored
View File

@ -1545,7 +1545,6 @@ h264_parser_select="golomb h264chroma h264dsp h264pred"
libaacplus_encoder_deps="libaacplus" libaacplus_encoder_deps="libaacplus"
libcelt_decoder_deps="libcelt" libcelt_decoder_deps="libcelt"
libdirac_decoder_deps="libdirac !libschroedinger" libdirac_decoder_deps="libdirac !libschroedinger"
libdirac_encoder_deps="libdirac"
libfaac_encoder_deps="libfaac" libfaac_encoder_deps="libfaac"
libgsm_decoder_deps="libgsm" libgsm_decoder_deps="libgsm"
libgsm_encoder_deps="libgsm" libgsm_encoder_deps="libgsm"
@ -3145,8 +3144,8 @@ enabled libcelt && require libcelt celt/celt.h celt_decode -lcelt0 &&
{ check_lib celt/celt.h celt_decoder_create_custom -lcelt0 || { check_lib celt/celt.h celt_decoder_create_custom -lcelt0 ||
die "ERROR: libcelt version must be >= 0.11.0."; } die "ERROR: libcelt version must be >= 0.11.0."; }
enabled libdirac && require_pkg_config dirac \ enabled libdirac && require_pkg_config dirac \
"libdirac_decoder/dirac_parser.h libdirac_encoder/dirac_encoder.h" \ "libdirac_decoder/dirac_parser.h" \
"dirac_decoder_init dirac_encoder_init" "dirac_decoder_init"
enabled libfaac && require2 libfaac "stdint.h faac.h" faacEncGetVersion -lfaac enabled libfaac && require2 libfaac "stdint.h faac.h" faacEncGetVersion -lfaac
enabled libfreetype && require_pkg_config freetype2 "ft2build.h freetype/freetype.h" FT_Init_FreeType enabled libfreetype && require_pkg_config freetype2 "ft2build.h freetype/freetype.h" FT_Init_FreeType
enabled libgsm && require libgsm gsm/gsm.h gsm_create -lgsm enabled libgsm && require libgsm gsm/gsm.h gsm_create -lgsm

View File

@ -603,7 +603,7 @@ OBJS-$(CONFIG_MATROSKA_MUXER) += xiph.o mpeg4audio.o \
flacdec.o flacdata.o flac.o \ flacdec.o flacdata.o flac.o \
mpegaudiodata.o vorbis_data.o mpegaudiodata.o vorbis_data.o
OBJS-$(CONFIG_MP3_MUXER) += mpegaudiodata.o mpegaudiodecheader.o OBJS-$(CONFIG_MP3_MUXER) += mpegaudiodata.o mpegaudiodecheader.o
OBJS-$(CONFIG_MOV_DEMUXER) += mpeg4audio.o mpegaudiodata.o timecode.o OBJS-$(CONFIG_MOV_DEMUXER) += mpeg4audio.o mpegaudiodata.o ac3tab.o timecode.o
OBJS-$(CONFIG_MOV_MUXER) += mpeg4audio.o mpegaudiodata.o OBJS-$(CONFIG_MOV_MUXER) += mpeg4audio.o mpegaudiodata.o
OBJS-$(CONFIG_MPEGTS_MUXER) += mpegvideo.o mpeg4audio.o OBJS-$(CONFIG_MPEGTS_MUXER) += mpegvideo.o mpeg4audio.o
OBJS-$(CONFIG_MPEGTS_DEMUXER) += mpeg4audio.o mpegaudiodata.o OBJS-$(CONFIG_MPEGTS_DEMUXER) += mpeg4audio.o mpegaudiodata.o
@ -624,7 +624,6 @@ OBJS-$(CONFIG_WTV_DEMUXER) += mpeg4audio.o mpegaudiodata.o
OBJS-$(CONFIG_LIBAACPLUS_ENCODER) += libaacplus.o OBJS-$(CONFIG_LIBAACPLUS_ENCODER) += libaacplus.o
OBJS-$(CONFIG_LIBCELT_DECODER) += libcelt_dec.o OBJS-$(CONFIG_LIBCELT_DECODER) += libcelt_dec.o
OBJS-$(CONFIG_LIBDIRAC_DECODER) += libdiracdec.o OBJS-$(CONFIG_LIBDIRAC_DECODER) += libdiracdec.o
OBJS-$(CONFIG_LIBDIRAC_ENCODER) += libdiracenc.o libdirac_libschro.o
OBJS-$(CONFIG_LIBFAAC_ENCODER) += libfaac.o OBJS-$(CONFIG_LIBFAAC_ENCODER) += libfaac.o
OBJS-$(CONFIG_LIBGSM_DECODER) += libgsm.o OBJS-$(CONFIG_LIBGSM_DECODER) += libgsm.o
OBJS-$(CONFIG_LIBGSM_ENCODER) += libgsm.o OBJS-$(CONFIG_LIBGSM_ENCODER) += libgsm.o

View File

@ -50,6 +50,9 @@ typedef struct A64Context {
uint8_t *mc_colram; uint8_t *mc_colram;
uint8_t *mc_palette; uint8_t *mc_palette;
int mc_pal_size; int mc_pal_size;
/* pts of the next packet that will be output */
int64_t next_pts;
} A64Context; } A64Context;
#endif /* AVCODEC_A64ENC_H */ #endif /* AVCODEC_A64ENC_H */

View File

@ -28,6 +28,7 @@
#include "a64colors.h" #include "a64colors.h"
#include "a64tables.h" #include "a64tables.h"
#include "elbg.h" #include "elbg.h"
#include "internal.h"
#include "libavutil/intreadwrite.h" #include "libavutil/intreadwrite.h"
#define DITHERSTEPS 8 #define DITHERSTEPS 8
@ -221,6 +222,8 @@ static av_cold int a64multi_init_encoder(AVCodecContext *avctx)
if (!avctx->codec_tag) if (!avctx->codec_tag)
avctx->codec_tag = AV_RL32("a64m"); avctx->codec_tag = AV_RL32("a64m");
c->next_pts = AV_NOPTS_VALUE;
return 0; return 0;
} }
@ -239,11 +242,10 @@ static void a64_compress_colram(unsigned char *buf, int *charmap, uint8_t *colra
} }
} }
static int a64multi_encode_frame(AVCodecContext *avctx, unsigned char *buf, static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
int buf_size, void *data) const AVFrame *pict, int *got_packet)
{ {
A64Context *c = avctx->priv_data; A64Context *c = avctx->priv_data;
AVFrame *pict = data;
AVFrame *const p = (AVFrame *) & c->picture; AVFrame *const p = (AVFrame *) & c->picture;
int frame; int frame;
@ -251,7 +253,8 @@ static int a64multi_encode_frame(AVCodecContext *avctx, unsigned char *buf,
int b_height; int b_height;
int b_width; int b_width;
int req_size; int req_size, ret;
uint8_t *buf;
int *charmap = c->mc_charmap; int *charmap = c->mc_charmap;
uint8_t *colram = c->mc_colram; uint8_t *colram = c->mc_colram;
@ -274,7 +277,7 @@ static int a64multi_encode_frame(AVCodecContext *avctx, unsigned char *buf,
} }
/* no data, means end encoding asap */ /* no data, means end encoding asap */
if (!data) { if (!pict) {
/* all done, end encoding */ /* all done, end encoding */
if (!c->mc_lifetime) return 0; if (!c->mc_lifetime) return 0;
/* no more frames in queue, prepare to flush remaining frames */ /* no more frames in queue, prepare to flush remaining frames */
@ -292,6 +295,8 @@ static int a64multi_encode_frame(AVCodecContext *avctx, unsigned char *buf,
p->key_frame = 1; p->key_frame = 1;
to_meta_with_crop(avctx, p, meta + 32000 * c->mc_frame_counter); to_meta_with_crop(avctx, p, meta + 32000 * c->mc_frame_counter);
c->mc_frame_counter++; c->mc_frame_counter++;
if (c->next_pts == AV_NOPTS_VALUE)
c->next_pts = pict->pts;
/* lifetime is not reached so wait for next frame first */ /* lifetime is not reached so wait for next frame first */
return 0; return 0;
} }
@ -302,6 +307,13 @@ static int a64multi_encode_frame(AVCodecContext *avctx, unsigned char *buf,
req_size = 0; req_size = 0;
/* any frames to encode? */ /* any frames to encode? */
if (c->mc_lifetime) { if (c->mc_lifetime) {
req_size = charset_size + c->mc_lifetime*(screen_size + colram_size);
if ((ret = ff_alloc_packet(pkt, req_size)) < 0) {
av_log(avctx, AV_LOG_ERROR, "Error getting output packet of size %d.\n", req_size);
return ret;
}
buf = pkt->data;
/* calc optimal new charset + charmaps */ /* calc optimal new charset + charmaps */
ff_init_elbg(meta, 32, 1000 * c->mc_lifetime, best_cb, CHARSET_CHARS, 50, charmap, &c->randctx); ff_init_elbg(meta, 32, 1000 * c->mc_lifetime, best_cb, CHARSET_CHARS, 50, charmap, &c->randctx);
ff_do_elbg (meta, 32, 1000 * c->mc_lifetime, best_cb, CHARSET_CHARS, 50, charmap, &c->randctx); ff_do_elbg (meta, 32, 1000 * c->mc_lifetime, best_cb, CHARSET_CHARS, 50, charmap, &c->randctx);
@ -310,15 +322,12 @@ static int a64multi_encode_frame(AVCodecContext *avctx, unsigned char *buf,
render_charset(avctx, charset, colram); render_charset(avctx, charset, colram);
/* copy charset to buf */ /* copy charset to buf */
memcpy(buf,charset, charset_size); memcpy(buf, charset, charset_size);
/* advance pointers */ /* advance pointers */
buf += charset_size; buf += charset_size;
charset += charset_size; charset += charset_size;
req_size += charset_size;
} }
/* no charset so clean buf */
else memset(buf, 0, charset_size);
/* write x frames to buf */ /* write x frames to buf */
for (frame = 0; frame < c->mc_lifetime; frame++) { for (frame = 0; frame < c->mc_lifetime; frame++) {
@ -351,11 +360,12 @@ static int a64multi_encode_frame(AVCodecContext *avctx, unsigned char *buf,
/* reset counter */ /* reset counter */
c->mc_frame_counter = 0; c->mc_frame_counter = 0;
if (req_size > buf_size) { pkt->pts = pkt->dts = c->next_pts;
av_log(avctx, AV_LOG_ERROR, "buf size too small (need %d, got %d)\n", req_size, buf_size); c->next_pts = AV_NOPTS_VALUE;
return -1;
} pkt->size = req_size;
return req_size; pkt->flags |= AV_PKT_FLAG_KEY;
*got_packet = !!req_size;
} }
return 0; return 0;
} }
@ -366,7 +376,7 @@ AVCodec ff_a64multi_encoder = {
.id = CODEC_ID_A64_MULTI, .id = CODEC_ID_A64_MULTI,
.priv_data_size = sizeof(A64Context), .priv_data_size = sizeof(A64Context),
.init = a64multi_init_encoder, .init = a64multi_init_encoder,
.encode = a64multi_encode_frame, .encode2 = a64multi_encode_frame,
.close = a64multi_close_encoder, .close = a64multi_close_encoder,
.pix_fmts = (const enum PixelFormat[]) {PIX_FMT_GRAY8, PIX_FMT_NONE}, .pix_fmts = (const enum PixelFormat[]) {PIX_FMT_GRAY8, PIX_FMT_NONE},
.long_name = NULL_IF_CONFIG_SMALL("Multicolor charset for Commodore 64"), .long_name = NULL_IF_CONFIG_SMALL("Multicolor charset for Commodore 64"),
@ -379,7 +389,7 @@ AVCodec ff_a64multi5_encoder = {
.id = CODEC_ID_A64_MULTI5, .id = CODEC_ID_A64_MULTI5,
.priv_data_size = sizeof(A64Context), .priv_data_size = sizeof(A64Context),
.init = a64multi_init_encoder, .init = a64multi_init_encoder,
.encode = a64multi_encode_frame, .encode2 = a64multi_encode_frame,
.close = a64multi_close_encoder, .close = a64multi_close_encoder,
.pix_fmts = (const enum PixelFormat[]) {PIX_FMT_GRAY8, PIX_FMT_NONE}, .pix_fmts = (const enum PixelFormat[]) {PIX_FMT_GRAY8, PIX_FMT_NONE},
.long_name = NULL_IF_CONFIG_SMALL("Multicolor charset for Commodore 64, extended with 5th color (colram)"), .long_name = NULL_IF_CONFIG_SMALL("Multicolor charset for Commodore 64, extended with 5th color (colram)"),

View File

@ -134,7 +134,7 @@ int avpriv_ac3_parse_header(GetBitContext *gbc, AC3HeaderInfo *hdr)
(hdr->num_blocks * 256.0)); (hdr->num_blocks * 256.0));
hdr->channels = ff_ac3_channels_tab[hdr->channel_mode] + hdr->lfe_on; hdr->channels = ff_ac3_channels_tab[hdr->channel_mode] + hdr->lfe_on;
} }
hdr->channel_layout = ff_ac3_channel_layout_tab[hdr->channel_mode]; hdr->channel_layout = avpriv_ac3_channel_layout_tab[hdr->channel_mode];
if (hdr->lfe_on) if (hdr->lfe_on)
hdr->channel_layout |= AV_CH_LOW_FREQUENCY; hdr->channel_layout |= AV_CH_LOW_FREQUENCY;

View File

@ -1378,7 +1378,7 @@ static int ac3_decode_frame(AVCodecContext * avctx, void *data,
avctx->request_channels < s->channels) { avctx->request_channels < s->channels) {
s->out_channels = avctx->request_channels; s->out_channels = avctx->request_channels;
s->output_mode = avctx->request_channels == 1 ? AC3_CHMODE_MONO : AC3_CHMODE_STEREO; s->output_mode = avctx->request_channels == 1 ? AC3_CHMODE_MONO : AC3_CHMODE_STEREO;
s->channel_layout = ff_ac3_channel_layout_tab[s->output_mode]; s->channel_layout = avpriv_ac3_channel_layout_tab[s->output_mode];
} }
avctx->channels = s->out_channels; avctx->channels = s->out_channels;
avctx->channel_layout = s->channel_layout; avctx->channel_layout = s->channel_layout;

View File

@ -84,7 +84,7 @@ const uint8_t ff_ac3_channels_tab[8] = {
/** /**
* Map audio coding mode (acmod) to channel layout mask. * Map audio coding mode (acmod) to channel layout mask.
*/ */
const uint16_t ff_ac3_channel_layout_tab[8] = { const uint16_t avpriv_ac3_channel_layout_tab[8] = {
AV_CH_LAYOUT_STEREO, AV_CH_LAYOUT_STEREO,
AV_CH_LAYOUT_MONO, AV_CH_LAYOUT_MONO,
AV_CH_LAYOUT_STEREO, AV_CH_LAYOUT_STEREO,

View File

@ -33,7 +33,7 @@
extern const uint16_t ff_ac3_frame_size_tab[38][3]; extern const uint16_t ff_ac3_frame_size_tab[38][3];
extern const uint8_t ff_ac3_channels_tab[8]; extern const uint8_t ff_ac3_channels_tab[8];
extern const uint16_t ff_ac3_channel_layout_tab[8]; extern const uint16_t avpriv_ac3_channel_layout_tab[8];
extern const uint8_t ff_ac3_enc_channel_map[8][2][6]; extern const uint8_t ff_ac3_enc_channel_map[8][2][6];
extern const uint8_t ff_ac3_dec_channel_map[8][2][6]; extern const uint8_t ff_ac3_dec_channel_map[8][2][6];
extern const uint16_t ff_ac3_sample_rate_tab[3]; extern const uint16_t ff_ac3_sample_rate_tab[3];

View File

@ -400,7 +400,7 @@ void avcodec_register_all(void)
/* external libraries */ /* external libraries */
REGISTER_ENCODER (LIBAACPLUS, libaacplus); REGISTER_ENCODER (LIBAACPLUS, libaacplus);
REGISTER_DECODER (LIBCELT, libcelt); REGISTER_DECODER (LIBCELT, libcelt);
REGISTER_ENCDEC (LIBDIRAC, libdirac); REGISTER_DECODER (LIBDIRAC, libdirac);
REGISTER_ENCODER (LIBFAAC, libfaac); REGISTER_ENCODER (LIBFAAC, libfaac);
REGISTER_ENCDEC (LIBGSM, libgsm); REGISTER_ENCDEC (LIBGSM, libgsm);
REGISTER_ENCDEC (LIBGSM_MS, libgsm_ms); REGISTER_ENCDEC (LIBGSM_MS, libgsm_ms);

View File

@ -325,10 +325,12 @@ static inline int decode_mb(ASV1Context *a, DCTELEM block[6][64]){
return 0; return 0;
} }
#define MAX_MB_SIZE (30*16*16*3/2/8)
static inline int encode_mb(ASV1Context *a, DCTELEM block[6][64]){ static inline int encode_mb(ASV1Context *a, DCTELEM block[6][64]){
int i; int i;
if(a->pb.buf_end - a->pb.buf - (put_bits_count(&a->pb)>>3) < 30*16*16*3/2/8){ if (a->pb.buf_end - a->pb.buf - (put_bits_count(&a->pb)>>3) < MAX_MB_SIZE) {
av_log(a->avctx, AV_LOG_ERROR, "encoded frame too large\n"); av_log(a->avctx, AV_LOG_ERROR, "encoded frame too large\n");
return -1; return -1;
} }
@ -461,14 +463,22 @@ static int decode_frame(AVCodecContext *avctx,
} }
#if CONFIG_ASV1_ENCODER || CONFIG_ASV2_ENCODER #if CONFIG_ASV1_ENCODER || CONFIG_ASV2_ENCODER
static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *pict, int *got_packet)
{
ASV1Context * const a = avctx->priv_data; ASV1Context * const a = avctx->priv_data;
AVFrame *pict = data;
AVFrame * const p= &a->picture; AVFrame * const p= &a->picture;
int size; int size, ret;
int mb_x, mb_y; int mb_x, mb_y;
init_put_bits(&a->pb, buf, buf_size); if (!pkt->data &&
(ret = av_new_packet(pkt, a->mb_height*a->mb_width*MAX_MB_SIZE +
FF_MIN_BUFFER_SIZE)) < 0) {
av_log(avctx, AV_LOG_ERROR, "Error getting output packet.\n");
return ret;
}
init_put_bits(&a->pb, pkt->data, pkt->size);
*p = *pict; *p = *pict;
p->pict_type= AV_PICTURE_TYPE_I; p->pict_type= AV_PICTURE_TYPE_I;
@ -505,14 +515,18 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
size= put_bits_count(&a->pb)/32; size= put_bits_count(&a->pb)/32;
if(avctx->codec_id == CODEC_ID_ASV1) if(avctx->codec_id == CODEC_ID_ASV1)
a->dsp.bswap_buf((uint32_t*)buf, (uint32_t*)buf, size); a->dsp.bswap_buf((uint32_t*)pkt->data, (uint32_t*)pkt->data, size);
else{ else{
int i; int i;
for(i=0; i<4*size; i++) for(i=0; i<4*size; i++)
buf[i]= av_reverse[ buf[i] ]; pkt->data[i] = av_reverse[pkt->data[i]];
} }
return size*4; pkt->size = size*4;
pkt->flags |= AV_PKT_FLAG_KEY;
*got_packet = 1;
return 0;
} }
#endif /* CONFIG_ASV1_ENCODER || CONFIG_ASV2_ENCODER */ #endif /* CONFIG_ASV1_ENCODER || CONFIG_ASV2_ENCODER */
@ -634,7 +648,7 @@ AVCodec ff_asv1_encoder = {
.id = CODEC_ID_ASV1, .id = CODEC_ID_ASV1,
.priv_data_size = sizeof(ASV1Context), .priv_data_size = sizeof(ASV1Context),
.init = encode_init, .init = encode_init,
.encode = encode_frame, .encode2 = encode_frame,
//encode_end, //encode_end,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
.long_name= NULL_IF_CONFIG_SMALL("ASUS V1"), .long_name= NULL_IF_CONFIG_SMALL("ASUS V1"),
@ -648,7 +662,7 @@ AVCodec ff_asv2_encoder = {
.id = CODEC_ID_ASV2, .id = CODEC_ID_ASV2,
.priv_data_size = sizeof(ASV1Context), .priv_data_size = sizeof(ASV1Context),
.init = encode_init, .init = encode_init,
.encode = encode_frame, .encode2 = encode_frame,
//encode_end, //encode_end,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
.long_name= NULL_IF_CONFIG_SMALL("ASUS V2"), .long_name= NULL_IF_CONFIG_SMALL("ASUS V2"),

View File

@ -1135,17 +1135,25 @@ static int encode_slice(AVCodecContext *c, void *arg){
return 0; return 0;
} }
static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *pict, int *got_packet)
{
FFV1Context *f = avctx->priv_data; FFV1Context *f = avctx->priv_data;
RangeCoder * const c= &f->slice_context[0]->c; RangeCoder * const c= &f->slice_context[0]->c;
AVFrame *pict = data;
AVFrame * const p= &f->picture; AVFrame * const p= &f->picture;
int used_count= 0; int used_count= 0;
uint8_t keystate=128; uint8_t keystate=128;
uint8_t *buf_p; uint8_t *buf_p;
int i; int i, ret;
ff_init_range_encoder(c, buf, buf_size); if (!pkt->data &&
(ret = av_new_packet(pkt, avctx->width*avctx->height*((8*2+1+1)*4)/8
+ FF_MIN_BUFFER_SIZE)) < 0) {
av_log(avctx, AV_LOG_ERROR, "Error getting output packet.\n");
return ret;
}
ff_init_range_encoder(c, pkt->data, pkt->size);
ff_build_rac_states(c, 0.05*(1LL<<32), 256-8); ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
*p = *pict; *p = *pict;
@ -1165,7 +1173,7 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
if(!f->ac){ if(!f->ac){
used_count += ff_rac_terminate(c); used_count += ff_rac_terminate(c);
//printf("pos=%d\n", used_count); //printf("pos=%d\n", used_count);
init_put_bits(&f->slice_context[0]->pb, buf + used_count, buf_size - used_count); init_put_bits(&f->slice_context[0]->pb, pkt->data + used_count, pkt->size - used_count);
}else if (f->ac>1){ }else if (f->ac>1){
int i; int i;
for(i=1; i<256; i++){ for(i=1; i<256; i++){
@ -1176,8 +1184,8 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
for(i=1; i<f->slice_count; i++){ for(i=1; i<f->slice_count; i++){
FFV1Context *fs= f->slice_context[i]; FFV1Context *fs= f->slice_context[i];
uint8_t *start= buf + (buf_size-used_count)*i/f->slice_count; uint8_t *start = pkt->data + (pkt->size-used_count)*i/f->slice_count;
int len= buf_size/f->slice_count; int len = pkt->size/f->slice_count;
if(fs->ac){ if(fs->ac){
ff_init_range_encoder(&fs->c, start, len); ff_init_range_encoder(&fs->c, start, len);
@ -1187,7 +1195,7 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
} }
avctx->execute(avctx, encode_slice, &f->slice_context[0], NULL, f->slice_count, sizeof(void*)); avctx->execute(avctx, encode_slice, &f->slice_context[0], NULL, f->slice_count, sizeof(void*));
buf_p=buf; buf_p = pkt->data;
for(i=0; i<f->slice_count; i++){ for(i=0; i<f->slice_count; i++){
FFV1Context *fs= f->slice_context[i]; FFV1Context *fs= f->slice_context[i];
int bytes; int bytes;
@ -1202,7 +1210,7 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
used_count= 0; used_count= 0;
} }
if(i>0){ if(i>0){
av_assert0(bytes < buf_size/f->slice_count); av_assert0(bytes < pkt->size/f->slice_count);
memmove(buf_p, fs->ac ? fs->c.bytestream_start : fs->pb.buf, bytes); memmove(buf_p, fs->ac ? fs->c.bytestream_start : fs->pb.buf, bytes);
av_assert0(bytes < (1<<24)); av_assert0(bytes < (1<<24));
AV_WB24(buf_p+bytes, bytes); AV_WB24(buf_p+bytes, bytes);
@ -1255,7 +1263,11 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
avctx->stats_out[0] = '\0'; avctx->stats_out[0] = '\0';
f->picture_number++; f->picture_number++;
return buf_p-buf; pkt->size = buf_p - pkt->data;
pkt->flags |= AV_PKT_FLAG_KEY*p->key_frame;
*got_packet = 1;
return 0;
} }
#endif /* CONFIG_FFV1_ENCODER */ #endif /* CONFIG_FFV1_ENCODER */
@ -1843,7 +1855,7 @@ AVCodec ff_ffv1_encoder = {
.id = CODEC_ID_FFV1, .id = CODEC_ID_FFV1,
.priv_data_size = sizeof(FFV1Context), .priv_data_size = sizeof(FFV1Context),
.init = encode_init, .init = encode_init,
.encode = encode_frame, .encode2 = encode_frame,
.close = common_end, .close = common_end,
.capabilities = CODEC_CAP_SLICE_THREADS, .capabilities = CODEC_CAP_SLICE_THREADS,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUVA420P, PIX_FMT_YUV444P, PIX_FMT_YUVA444P, PIX_FMT_YUV440P, PIX_FMT_YUV422P, PIX_FMT_YUV411P, PIX_FMT_YUV410P, PIX_FMT_0RGB32, PIX_FMT_RGB32, PIX_FMT_YUV420P16, PIX_FMT_YUV422P16, PIX_FMT_YUV444P16, PIX_FMT_YUV420P9, PIX_FMT_YUV420P10, PIX_FMT_YUV422P10, PIX_FMT_GRAY16, PIX_FMT_GRAY8, PIX_FMT_NONE}, .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUVA420P, PIX_FMT_YUV444P, PIX_FMT_YUVA444P, PIX_FMT_YUV440P, PIX_FMT_YUV422P, PIX_FMT_YUV411P, PIX_FMT_YUV410P, PIX_FMT_0RGB32, PIX_FMT_RGB32, PIX_FMT_YUV420P16, PIX_FMT_YUV422P16, PIX_FMT_YUV444P16, PIX_FMT_YUV420P9, PIX_FMT_YUV420P10, PIX_FMT_YUV422P10, PIX_FMT_GRAY16, PIX_FMT_GRAY8, PIX_FMT_NONE},

View File

@ -33,6 +33,7 @@
#include <limits.h> #include <limits.h>
#include "libavutil/audioconvert.h"
#include "libavutil/crc.h" #include "libavutil/crc.h"
#include "avcodec.h" #include "avcodec.h"
#include "internal.h" #include "internal.h"
@ -62,6 +63,15 @@ typedef struct FLACContext {
int32_t *decoded[FLAC_MAX_CHANNELS]; ///< decoded samples int32_t *decoded[FLAC_MAX_CHANNELS]; ///< decoded samples
} FLACContext; } FLACContext;
static const int64_t flac_channel_layouts[6] = {
AV_CH_LAYOUT_MONO,
AV_CH_LAYOUT_STEREO,
AV_CH_LAYOUT_SURROUND,
AV_CH_LAYOUT_QUAD,
AV_CH_LAYOUT_5POINT0,
AV_CH_LAYOUT_5POINT1
};
static void allocate_buffers(FLACContext *s); static void allocate_buffers(FLACContext *s);
int avpriv_flac_is_extradata_valid(AVCodecContext *avctx, int avpriv_flac_is_extradata_valid(AVCodecContext *avctx,
@ -120,6 +130,9 @@ static av_cold int flac_decode_init(AVCodecContext *avctx)
avcodec_get_frame_defaults(&s->frame); avcodec_get_frame_defaults(&s->frame);
avctx->coded_frame = &s->frame; avctx->coded_frame = &s->frame;
if (avctx->channels <= FF_ARRAY_ELEMS(flac_channel_layouts))
avctx->channel_layout = flac_channel_layouts[avctx->channels - 1];
return 0; return 0;
} }

View File

@ -49,6 +49,7 @@
#include <zlib.h> #include <zlib.h>
#include "avcodec.h" #include "avcodec.h"
#include "internal.h"
#include "put_bits.h" #include "put_bits.h"
#include "bytestream.h" #include "bytestream.h"
@ -194,11 +195,10 @@ static int encode_bitstream(FlashSVContext *s, AVFrame *p, uint8_t *buf,
} }
static int flashsv_encode_frame(AVCodecContext *avctx, uint8_t *buf, static int flashsv_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
int buf_size, void *data) const AVFrame *pict, int *got_packet)
{ {
FlashSVContext * const s = avctx->priv_data; FlashSVContext * const s = avctx->priv_data;
AVFrame *pict = data;
AVFrame * const p = &s->frame; AVFrame * const p = &s->frame;
uint8_t *pfptr; uint8_t *pfptr;
int res; int res;
@ -228,14 +228,14 @@ static int flashsv_encode_frame(AVCodecContext *avctx, uint8_t *buf,
I_frame = 1; I_frame = 1;
} }
if (buf_size < s->image_width * s->image_height * 3) { if ((res = ff_alloc_packet(pkt, s->image_width * s->image_height * 3)) < 0) {
//Conservative upper bound check for compressed data //Conservative upper bound check for compressed data
av_log(avctx, AV_LOG_ERROR, "buf_size %d < %d\n", av_log(avctx, AV_LOG_ERROR, "Error getting output packet of size %d.\n",
buf_size, s->image_width * s->image_height * 3); s->image_width * s->image_height * 3);
return -1; return res;
} }
res = encode_bitstream(s, p, buf, buf_size, opt_w * 16, opt_h * 16, pkt->size = encode_bitstream(s, p, pkt->data, pkt->size, opt_w * 16, opt_h * 16,
pfptr, &I_frame); pfptr, &I_frame);
//save the current frame //save the current frame
@ -259,7 +259,11 @@ static int flashsv_encode_frame(AVCodecContext *avctx, uint8_t *buf,
avctx->coded_frame = p; avctx->coded_frame = p;
return res; if (p->key_frame)
pkt->flags |= AV_PKT_FLAG_KEY;
*got_packet = 1;
return 0;
} }
static av_cold int flashsv_encode_end(AVCodecContext *avctx) static av_cold int flashsv_encode_end(AVCodecContext *avctx)
@ -281,7 +285,7 @@ AVCodec ff_flashsv_encoder = {
.id = CODEC_ID_FLASHSV, .id = CODEC_ID_FLASHSV,
.priv_data_size = sizeof(FlashSVContext), .priv_data_size = sizeof(FlashSVContext),
.init = flashsv_encode_init, .init = flashsv_encode_init,
.encode = flashsv_encode_frame, .encode2 = flashsv_encode_frame,
.close = flashsv_encode_end, .close = flashsv_encode_end,
.pix_fmts = (const enum PixelFormat[]){PIX_FMT_BGR24, PIX_FMT_NONE}, .pix_fmts = (const enum PixelFormat[]){PIX_FMT_BGR24, PIX_FMT_NONE},
.long_name = NULL_IF_CONFIG_SMALL("Flash Screen Video"), .long_name = NULL_IF_CONFIG_SMALL("Flash Screen Video"),

View File

@ -43,6 +43,7 @@
#include "avcodec.h" #include "avcodec.h"
#include "bytestream.h" #include "bytestream.h"
#include "internal.h"
#include "lzw.h" #include "lzw.h"
/* The GIF format uses reversed order for bitstreams... */ /* The GIF format uses reversed order for bitstreams... */
@ -155,20 +156,32 @@ static av_cold int gif_encode_init(AVCodecContext *avctx)
} }
/* better than nothing gif encoder */ /* better than nothing gif encoder */
static int gif_encode_frame(AVCodecContext *avctx, unsigned char *outbuf, int buf_size, void *data) static int gif_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *pict, int *got_packet)
{ {
GIFContext *s = avctx->priv_data; GIFContext *s = avctx->priv_data;
AVFrame *pict = data;
AVFrame *const p = (AVFrame *)&s->picture; AVFrame *const p = (AVFrame *)&s->picture;
uint8_t *outbuf_ptr = outbuf; uint8_t *outbuf_ptr, *end;
uint8_t *end = outbuf + buf_size; int ret;
if ((ret = ff_alloc_packet(pkt, avctx->width*avctx->height*7/5 + FF_MIN_BUFFER_SIZE)) < 0) {
av_log(avctx, AV_LOG_ERROR, "Error getting output packet.\n");
return ret;
}
outbuf_ptr = pkt->data;
end = pkt->data + pkt->size;
*p = *pict; *p = *pict;
p->pict_type = AV_PICTURE_TYPE_I; p->pict_type = AV_PICTURE_TYPE_I;
p->key_frame = 1; p->key_frame = 1;
gif_image_write_header(avctx, &outbuf_ptr, (uint32_t *)pict->data[1]); gif_image_write_header(avctx, &outbuf_ptr, (uint32_t *)pict->data[1]);
gif_image_write_image(avctx, &outbuf_ptr, end, pict->data[0], pict->linesize[0]); gif_image_write_image(avctx, &outbuf_ptr, end, pict->data[0], pict->linesize[0]);
return outbuf_ptr - outbuf;
pkt->size = outbuf_ptr - pkt->data;
pkt->flags |= AV_PKT_FLAG_KEY;
*got_packet = 1;
return 0;
} }
static int gif_encode_close(AVCodecContext *avctx) static int gif_encode_close(AVCodecContext *avctx)
@ -186,7 +199,7 @@ AVCodec ff_gif_encoder = {
.id = CODEC_ID_GIF, .id = CODEC_ID_GIF,
.priv_data_size = sizeof(GIFContext), .priv_data_size = sizeof(GIFContext),
.init = gif_encode_init, .init = gif_encode_init,
.encode = gif_encode_frame, .encode2 = gif_encode_frame,
.close = gif_encode_close, .close = gif_encode_close,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_RGB8, PIX_FMT_BGR8, PIX_FMT_RGB4_BYTE, PIX_FMT_BGR4_BYTE, PIX_FMT_GRAY8, PIX_FMT_PAL8, PIX_FMT_NONE}, .pix_fmts= (const enum PixelFormat[]){PIX_FMT_RGB8, PIX_FMT_BGR8, PIX_FMT_RGB4_BYTE, PIX_FMT_BGR4_BYTE, PIX_FMT_GRAY8, PIX_FMT_PAL8, PIX_FMT_NONE},
.long_name= NULL_IF_CONFIG_SMALL("GIF (Graphics Interchange Format)"), .long_name= NULL_IF_CONFIG_SMALL("GIF (Graphics Interchange Format)"),

View File

@ -28,6 +28,7 @@
#include "avcodec.h" #include "avcodec.h"
#include "get_bits.h" #include "get_bits.h"
#include "golomb.h" #include "golomb.h"
#include "internal.h"
#include "mathops.h" #include "mathops.h"
#include "dsputil.h" #include "dsputil.h"
#include "mjpeg.h" #include "mjpeg.h"
@ -227,23 +228,19 @@ static void ls_store_lse(JLSState *state, PutBitContext *pb){
put_bits(pb, 16, state->reset); put_bits(pb, 16, state->reset);
} }
static int encode_picture_ls(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){ static int encode_picture_ls(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *pict, int *got_packet)
{
JpeglsContext * const s = avctx->priv_data; JpeglsContext * const s = avctx->priv_data;
AVFrame *pict = data;
AVFrame * const p= (AVFrame*)&s->picture; AVFrame * const p= (AVFrame*)&s->picture;
const int near = avctx->prediction_method; const int near = avctx->prediction_method;
PutBitContext pb, pb2; PutBitContext pb, pb2;
GetBitContext gb; GetBitContext gb;
uint8_t *buf2, *zero, *cur, *last; uint8_t *buf2, *zero, *cur, *last;
JLSState *state; JLSState *state;
int i, size; int i, size, ret;
int comps; int comps;
buf2 = av_malloc(buf_size);
init_put_bits(&pb, buf, buf_size);
init_put_bits(&pb2, buf2, buf_size);
*p = *pict; *p = *pict;
p->pict_type= AV_PICTURE_TYPE_I; p->pict_type= AV_PICTURE_TYPE_I;
p->key_frame= 1; p->key_frame= 1;
@ -253,6 +250,17 @@ static int encode_picture_ls(AVCodecContext *avctx, unsigned char *buf, int buf_
else else
comps = 3; comps = 3;
if ((ret = ff_alloc_packet(pkt, avctx->width*avctx->height*comps*4 +
FF_MIN_BUFFER_SIZE)) < 0) {
av_log(avctx, AV_LOG_ERROR, "Error getting output packet.\n");
return ret;
}
buf2 = av_malloc(pkt->size);
init_put_bits(&pb, pkt->data, pkt->size);
init_put_bits(&pb2, buf2, pkt->size);
/* write our own JPEG header, can't use mjpeg_picture_header */ /* write our own JPEG header, can't use mjpeg_picture_header */
put_marker(&pb, SOI); put_marker(&pb, SOI);
put_marker(&pb, SOF48); put_marker(&pb, SOF48);
@ -366,7 +374,10 @@ static int encode_picture_ls(AVCodecContext *avctx, unsigned char *buf, int buf_
emms_c(); emms_c();
return put_bits_count(&pb) >> 3; pkt->size = put_bits_count(&pb) >> 3;
pkt->flags |= AV_PKT_FLAG_KEY;
*got_packet = 1;
return 0;
} }
static av_cold int encode_init_ls(AVCodecContext *ctx) { static av_cold int encode_init_ls(AVCodecContext *ctx) {
@ -388,7 +399,7 @@ AVCodec ff_jpegls_encoder = { //FIXME avoid MPV_* lossless JPEG should not need
.id = CODEC_ID_JPEGLS, .id = CODEC_ID_JPEGLS,
.priv_data_size = sizeof(JpeglsContext), .priv_data_size = sizeof(JpeglsContext),
.init = encode_init_ls, .init = encode_init_ls,
.encode = encode_picture_ls, .encode2 = encode_picture_ls,
.pix_fmts = (const enum PixelFormat[]){PIX_FMT_BGR24, PIX_FMT_RGB24, PIX_FMT_GRAY8, PIX_FMT_GRAY16, PIX_FMT_NONE}, .pix_fmts = (const enum PixelFormat[]){PIX_FMT_BGR24, PIX_FMT_RGB24, PIX_FMT_GRAY8, PIX_FMT_GRAY16, PIX_FMT_NONE},
.long_name = NULL_IF_CONFIG_SMALL("JPEG-LS"), .long_name = NULL_IF_CONFIG_SMALL("JPEG-LS"),
}; };

View File

@ -242,9 +242,14 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
* gives a file with ZLIB fourcc, but frame is really uncompressed. * gives a file with ZLIB fourcc, but frame is really uncompressed.
* To be sure that's true check also frame size */ * To be sure that's true check also frame size */
if (c->compression == COMP_ZLIB_NORMAL && c->imgtype == IMGTYPE_RGB24 && if (c->compression == COMP_ZLIB_NORMAL && c->imgtype == IMGTYPE_RGB24 &&
len == width * height * 3) len == width * height * 3) {
if (c->flags & FLAG_PNGFILTER) {
memcpy(c->decomp_buf, encoded, len);
encoded = c->decomp_buf;
} else {
break; break;
if (c->flags & FLAG_MULTITHREAD) { }
} else if (c->flags & FLAG_MULTITHREAD) {
int ret; int ret;
mthread_inlen = AV_RL32(encoded); mthread_inlen = AV_RL32(encoded);
mthread_inlen = FFMIN(mthread_inlen, len - 8); mthread_inlen = FFMIN(mthread_inlen, len - 8);

View File

@ -68,12 +68,20 @@ typedef struct LclEncContext {
* Encode a frame * Encode a frame
* *
*/ */
static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *pict, int *got_packet)
{
LclEncContext *c = avctx->priv_data; LclEncContext *c = avctx->priv_data;
AVFrame *pict = data;
AVFrame * const p = &c->pic; AVFrame * const p = &c->pic;
int i; int i, ret;
int zret; // Zlib return code int zret; // Zlib return code
int max_size = deflateBound(&c->zstream, avctx->width * avctx->height * 3);
if (!pkt->data &&
(ret = av_new_packet(pkt, max_size)) < 0) {
av_log(avctx, AV_LOG_ERROR, "Error allocating packet of size %d.\n", max_size);
return ret;
}
*p = *pict; *p = *pict;
p->pict_type= AV_PICTURE_TYPE_I; p->pict_type= AV_PICTURE_TYPE_I;
@ -89,8 +97,8 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
av_log(avctx, AV_LOG_ERROR, "Deflate reset error: %d\n", zret); av_log(avctx, AV_LOG_ERROR, "Deflate reset error: %d\n", zret);
return -1; return -1;
} }
c->zstream.next_out = buf; c->zstream.next_out = pkt->data;
c->zstream.avail_out = buf_size; c->zstream.avail_out = pkt->size;
for(i = avctx->height - 1; i >= 0; i--) { for(i = avctx->height - 1; i >= 0; i--) {
c->zstream.next_in = p->data[0]+p->linesize[0]*i; c->zstream.next_in = p->data[0]+p->linesize[0]*i;
@ -107,7 +115,11 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
return -1; return -1;
} }
return c->zstream.total_out; pkt->size = c->zstream.total_out;
pkt->flags |= AV_PKT_FLAG_KEY;
*got_packet = 1;
return 0;
} }
/* /*
@ -176,7 +188,7 @@ AVCodec ff_zlib_encoder = {
.id = CODEC_ID_ZLIB, .id = CODEC_ID_ZLIB,
.priv_data_size = sizeof(LclEncContext), .priv_data_size = sizeof(LclEncContext),
.init = encode_init, .init = encode_init,
.encode = encode_frame, .encode2 = encode_frame,
.close = encode_end, .close = encode_end,
.pix_fmts = (const enum PixelFormat[]) { PIX_FMT_BGR24, PIX_FMT_NONE }, .pix_fmts = (const enum PixelFormat[]) { PIX_FMT_BGR24, PIX_FMT_NONE },
.long_name = NULL_IF_CONFIG_SMALL("LCL (LossLess Codec Library) ZLIB"), .long_name = NULL_IF_CONFIG_SMALL("LCL (LossLess Codec Library) ZLIB"),

View File

@ -1,405 +0,0 @@
/*
* Dirac encoding support via libdirac library
* Copyright (c) 2005 BBC, Andrew Kennedy <dirac at rd dot bbc dot co dot uk>
* Copyright (c) 2006-2008 BBC, Anuradha Suraparaju <asuraparaju at gmail dot com >
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Dirac encoding support via libdirac library; more details about the
* Dirac project can be found at http://dirac.sourceforge.net/.
* The libdirac_encoder library implements Dirac specification version 2.2
* (http://dirac.sourceforge.net/specification.html).
*/
#include "libdirac_libschro.h"
#include "libdirac.h"
#undef NDEBUG
#include <assert.h>
#include <libdirac_encoder/dirac_encoder.h>
/** Dirac encoder private data */
typedef struct DiracEncoderParams {
/** Dirac encoder context */
dirac_encoder_context_t enc_ctx;
/** frame being encoded */
AVFrame picture;
/** frame size */
int frame_size;
/** Dirac encoder handle */
dirac_encoder_t* p_encoder;
/** input frame buffer */
unsigned char *p_in_frame_buf;
/** buffer to store encoder output before writing it to the frame queue */
unsigned char *enc_buf;
/** size of encoder buffer */
int enc_buf_size;
/** queue storing encoded frames */
DiracSchroQueue enc_frame_queue;
/** end of sequence signalled by user, 0 - false, 1 - true */
int eos_signalled;
/** end of sequence returned by encoder, 0 - false, 1 - true */
int eos_pulled;
} DiracEncoderParams;
/**
* Works out Dirac-compatible chroma format.
*/
static dirac_chroma_t GetDiracChromaFormat(enum PixelFormat ff_pix_fmt)
{
int num_formats = sizeof(dirac_pixel_format_map) /
sizeof(dirac_pixel_format_map[0]);
int idx;
for (idx = 0; idx < num_formats; ++idx)
if (dirac_pixel_format_map[idx].ff_pix_fmt == ff_pix_fmt)
return dirac_pixel_format_map[idx].dirac_pix_fmt;
return formatNK;
}
/**
* Dirac video preset table. Ensure that this tables matches up correctly
* with the ff_dirac_schro_video_format_info table in libdirac_libschro.c.
*/
static const VideoFormat ff_dirac_video_formats[]={
VIDEO_FORMAT_CUSTOM ,
VIDEO_FORMAT_QSIF525 ,
VIDEO_FORMAT_QCIF ,
VIDEO_FORMAT_SIF525 ,
VIDEO_FORMAT_CIF ,
VIDEO_FORMAT_4SIF525 ,
VIDEO_FORMAT_4CIF ,
VIDEO_FORMAT_SD_480I60 ,
VIDEO_FORMAT_SD_576I50 ,
VIDEO_FORMAT_HD_720P60 ,
VIDEO_FORMAT_HD_720P50 ,
VIDEO_FORMAT_HD_1080I60 ,
VIDEO_FORMAT_HD_1080I50 ,
VIDEO_FORMAT_HD_1080P60 ,
VIDEO_FORMAT_HD_1080P50 ,
VIDEO_FORMAT_DIGI_CINEMA_2K24 ,
VIDEO_FORMAT_DIGI_CINEMA_4K24 ,
};
/**
* Returns the video format preset matching the input video dimensions and
* time base.
*/
static VideoFormat GetDiracVideoFormatPreset(AVCodecContext *avccontext)
{
unsigned int num_formats = sizeof(ff_dirac_video_formats) /
sizeof(ff_dirac_video_formats[0]);
unsigned int idx = ff_dirac_schro_get_video_format_idx(avccontext);
return (idx < num_formats) ?
ff_dirac_video_formats[idx] : VIDEO_FORMAT_CUSTOM;
}
static av_cold int libdirac_encode_init(AVCodecContext *avccontext)
{
DiracEncoderParams* p_dirac_params = avccontext->priv_data;
int no_local = 1;
int verbose = avccontext->debug;
VideoFormat preset;
/* get Dirac preset */
preset = GetDiracVideoFormatPreset(avccontext);
/* initialize the encoder context */
dirac_encoder_context_init(&p_dirac_params->enc_ctx, preset);
p_dirac_params->enc_ctx.src_params.chroma = GetDiracChromaFormat(avccontext->pix_fmt);
if (p_dirac_params->enc_ctx.src_params.chroma == formatNK) {
av_log(avccontext, AV_LOG_ERROR,
"Unsupported pixel format %d. This codec supports only "
"Planar YUV formats (yuv420p, yuv422p, yuv444p\n",
avccontext->pix_fmt);
return -1;
}
p_dirac_params->enc_ctx.src_params.frame_rate.numerator = avccontext->time_base.den;
p_dirac_params->enc_ctx.src_params.frame_rate.denominator = avccontext->time_base.num;
p_dirac_params->enc_ctx.src_params.width = avccontext->width;
p_dirac_params->enc_ctx.src_params.height = avccontext->height;
p_dirac_params->frame_size = avpicture_get_size(avccontext->pix_fmt,
avccontext->width,
avccontext->height);
avccontext->coded_frame = &p_dirac_params->picture;
if (no_local) {
p_dirac_params->enc_ctx.decode_flag = 0;
p_dirac_params->enc_ctx.instr_flag = 0;
} else {
p_dirac_params->enc_ctx.decode_flag = 1;
p_dirac_params->enc_ctx.instr_flag = 1;
}
/* Intra-only sequence */
if (!avccontext->gop_size) {
p_dirac_params->enc_ctx.enc_params.num_L1 = 0;
if (avccontext->coder_type == FF_CODER_TYPE_VLC)
p_dirac_params->enc_ctx.enc_params.using_ac = 0;
} else
avccontext->has_b_frames = 1;
if (avccontext->flags & CODEC_FLAG_QSCALE) {
if (avccontext->global_quality) {
p_dirac_params->enc_ctx.enc_params.qf = avccontext->global_quality
/ (FF_QP2LAMBDA * 10.0);
/* if it is not default bitrate then send target rate. */
if (avccontext->bit_rate >= 1000 &&
avccontext->bit_rate != 200000)
p_dirac_params->enc_ctx.enc_params.trate = avccontext->bit_rate
/ 1000;
} else
p_dirac_params->enc_ctx.enc_params.lossless = 1;
} else if (avccontext->bit_rate >= 1000)
p_dirac_params->enc_ctx.enc_params.trate = avccontext->bit_rate / 1000;
if ((preset > VIDEO_FORMAT_QCIF || preset < VIDEO_FORMAT_QSIF525) &&
avccontext->bit_rate == 200000)
p_dirac_params->enc_ctx.enc_params.trate = 0;
if (avccontext->flags & CODEC_FLAG_INTERLACED_ME)
/* all material can be coded as interlaced or progressive
* irrespective of the type of source material */
p_dirac_params->enc_ctx.enc_params.picture_coding_mode = 1;
p_dirac_params->p_encoder = dirac_encoder_init(&p_dirac_params->enc_ctx,
verbose);
if (!p_dirac_params->p_encoder) {
av_log(avccontext, AV_LOG_ERROR,
"Unrecoverable Error: dirac_encoder_init failed. ");
return EXIT_FAILURE;
}
/* allocate enough memory for the incoming data */
p_dirac_params->p_in_frame_buf = av_malloc(p_dirac_params->frame_size);
/* initialize the encoded frame queue */
ff_dirac_schro_queue_init(&p_dirac_params->enc_frame_queue);
return 0;
}
static void DiracFreeFrame(void *data)
{
DiracSchroEncodedFrame *enc_frame = data;
av_freep(&enc_frame->p_encbuf);
av_free(enc_frame);
}
static int libdirac_encode_frame(AVCodecContext *avccontext,
unsigned char *frame,
int buf_size, void *data)
{
int enc_size = 0;
dirac_encoder_state_t state;
DiracEncoderParams *p_dirac_params = avccontext->priv_data;
DiracSchroEncodedFrame *p_frame_output = NULL;
DiracSchroEncodedFrame *p_next_output_frame = NULL;
int go = 1;
int last_frame_in_sequence = 0;
if (!data) {
/* push end of sequence if not already signalled */
if (!p_dirac_params->eos_signalled) {
dirac_encoder_end_sequence(p_dirac_params->p_encoder);
p_dirac_params->eos_signalled = 1;
}
} else {
/* Allocate frame data to Dirac input buffer.
* Input line size may differ from what the codec supports,
* especially when transcoding from one format to another.
* So use avpicture_layout to copy the frame. */
avpicture_layout((AVPicture *)data, avccontext->pix_fmt,
avccontext->width, avccontext->height,
p_dirac_params->p_in_frame_buf,
p_dirac_params->frame_size);
/* load next frame */
if (dirac_encoder_load(p_dirac_params->p_encoder,
p_dirac_params->p_in_frame_buf,
p_dirac_params->frame_size) < 0) {
av_log(avccontext, AV_LOG_ERROR, "Unrecoverable Encoder Error."
" dirac_encoder_load failed...\n");
return -1;
}
}
if (p_dirac_params->eos_pulled)
go = 0;
while (go) {
p_dirac_params->p_encoder->enc_buf.buffer = frame;
p_dirac_params->p_encoder->enc_buf.size = buf_size;
/* process frame */
state = dirac_encoder_output(p_dirac_params->p_encoder);
switch (state) {
case ENC_STATE_AVAIL:
case ENC_STATE_EOS:
assert(p_dirac_params->p_encoder->enc_buf.size > 0);
/* All non-frame data is prepended to actual frame data to
* be able to set the pts correctly. So we don't write data
* to the frame output queue until we actually have a frame
*/
p_dirac_params->enc_buf = av_realloc(p_dirac_params->enc_buf,
p_dirac_params->enc_buf_size +
p_dirac_params->p_encoder->enc_buf.size);
memcpy(p_dirac_params->enc_buf + p_dirac_params->enc_buf_size,
p_dirac_params->p_encoder->enc_buf.buffer,
p_dirac_params->p_encoder->enc_buf.size);
p_dirac_params->enc_buf_size += p_dirac_params->p_encoder->enc_buf.size;
if (state == ENC_STATE_EOS) {
p_dirac_params->eos_pulled = 1;
go = 0;
}
/* If non-frame data, don't output it until it we get an
* encoded frame back from the encoder. */
if (p_dirac_params->p_encoder->enc_pparams.pnum == -1)
break;
/* create output frame */
p_frame_output = av_mallocz(sizeof(DiracSchroEncodedFrame));
/* set output data */
p_frame_output->size = p_dirac_params->enc_buf_size;
p_frame_output->p_encbuf = p_dirac_params->enc_buf;
p_frame_output->frame_num = p_dirac_params->p_encoder->enc_pparams.pnum;
if (p_dirac_params->p_encoder->enc_pparams.ptype == INTRA_PICTURE &&
p_dirac_params->p_encoder->enc_pparams.rtype == REFERENCE_PICTURE)
p_frame_output->key_frame = 1;
ff_dirac_schro_queue_push_back(&p_dirac_params->enc_frame_queue,
p_frame_output);
p_dirac_params->enc_buf_size = 0;
p_dirac_params->enc_buf = NULL;
break;
case ENC_STATE_BUFFER:
go = 0;
break;
case ENC_STATE_INVALID:
av_log(avccontext, AV_LOG_ERROR,
"Unrecoverable Dirac Encoder Error. Quitting...\n");
return -1;
default:
av_log(avccontext, AV_LOG_ERROR, "Unknown Dirac Encoder state\n");
return -1;
}
}
/* copy 'next' frame in queue */
if (p_dirac_params->enc_frame_queue.size == 1 && p_dirac_params->eos_pulled)
last_frame_in_sequence = 1;
p_next_output_frame = ff_dirac_schro_queue_pop(&p_dirac_params->enc_frame_queue);
if (!p_next_output_frame)
return 0;
memcpy(frame, p_next_output_frame->p_encbuf, p_next_output_frame->size);
avccontext->coded_frame->key_frame = p_next_output_frame->key_frame;
/* Use the frame number of the encoded frame as the pts. It is OK to do
* so since Dirac is a constant framerate codec. It expects input to be
* of constant framerate. */
avccontext->coded_frame->pts = p_next_output_frame->frame_num;
enc_size = p_next_output_frame->size;
/* Append the end of sequence information to the last frame in the
* sequence. */
if (last_frame_in_sequence && p_dirac_params->enc_buf_size > 0) {
memcpy(frame + enc_size, p_dirac_params->enc_buf,
p_dirac_params->enc_buf_size);
enc_size += p_dirac_params->enc_buf_size;
av_freep(&p_dirac_params->enc_buf);
p_dirac_params->enc_buf_size = 0;
}
/* free frame */
DiracFreeFrame(p_next_output_frame);
return enc_size;
}
static av_cold int libdirac_encode_close(AVCodecContext *avccontext)
{
DiracEncoderParams *p_dirac_params = avccontext->priv_data;
/* close the encoder */
dirac_encoder_close(p_dirac_params->p_encoder);
/* free data in the output frame queue */
ff_dirac_schro_queue_free(&p_dirac_params->enc_frame_queue,
DiracFreeFrame);
/* free the encoder buffer */
if (p_dirac_params->enc_buf_size)
av_freep(&p_dirac_params->enc_buf);
/* free the input frame buffer */
av_freep(&p_dirac_params->p_in_frame_buf);
return 0;
}
AVCodec ff_libdirac_encoder = {
.name = "libdirac",
.type = AVMEDIA_TYPE_VIDEO,
.id = CODEC_ID_DIRAC,
.priv_data_size = sizeof(DiracEncoderParams),
.init = libdirac_encode_init,
.encode = libdirac_encode_frame,
.close = libdirac_encode_close,
.capabilities = CODEC_CAP_DELAY,
.pix_fmts = (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_YUV444P, PIX_FMT_NONE},
.long_name = NULL_IF_CONFIG_SMALL("libdirac Dirac 2.2"),
};

View File

@ -35,6 +35,7 @@
#include <schroedinger/schrovideoformat.h> #include <schroedinger/schrovideoformat.h>
#include "avcodec.h" #include "avcodec.h"
#include "internal.h"
#include "libdirac_libschro.h" #include "libdirac_libschro.h"
#include "libschroedinger.h" #include "libschroedinger.h"
#include "bytestream.h" #include "bytestream.h"
@ -71,6 +72,9 @@ typedef struct SchroEncoderParams {
/** end of sequence pulled */ /** end of sequence pulled */
int eos_pulled; int eos_pulled;
/* counter for frames submitted to encoder, used as dts */
int64_t dts;
} SchroEncoderParams; } SchroEncoderParams;
/** /**
@ -175,6 +179,7 @@ static int libschroedinger_encode_init(AVCodecContext *avccontext)
schro_encoder_setting_set_double(p_schro_params->encoder, schro_encoder_setting_set_double(p_schro_params->encoder,
"au_distance", avccontext->gop_size); "au_distance", avccontext->gop_size);
avccontext->has_b_frames = 1; avccontext->has_b_frames = 1;
p_schro_params->dts = -1;
} }
/* FIXME - Need to handle SCHRO_ENCODER_RATE_CONTROL_LOW_DELAY. */ /* FIXME - Need to handle SCHRO_ENCODER_RATE_CONTROL_LOW_DELAY. */
@ -236,7 +241,7 @@ static int libschroedinger_encode_init(AVCodecContext *avccontext)
} }
static SchroFrame *libschroedinger_frame_from_data(AVCodecContext *avccontext, static SchroFrame *libschroedinger_frame_from_data(AVCodecContext *avccontext,
void *in_data) const AVFrame *frame)
{ {
SchroEncoderParams *p_schro_params = avccontext->priv_data; SchroEncoderParams *p_schro_params = avccontext->priv_data;
SchroFrame *in_frame; SchroFrame *in_frame;
@ -246,7 +251,7 @@ static SchroFrame *libschroedinger_frame_from_data(AVCodecContext *avccontext,
in_frame = ff_create_schro_frame(avccontext, p_schro_params->frame_format); in_frame = ff_create_schro_frame(avccontext, p_schro_params->frame_format);
if (in_frame) if (in_frame)
avpicture_layout((AVPicture *)in_data, avccontext->pix_fmt, avpicture_layout((const AVPicture *)frame, avccontext->pix_fmt,
avccontext->width, avccontext->height, avccontext->width, avccontext->height,
in_frame->components[0].data, in_frame->components[0].data,
p_schro_params->frame_size); p_schro_params->frame_size);
@ -262,9 +267,8 @@ static void SchroedingerFreeFrame(void *data)
av_free(enc_frame); av_free(enc_frame);
} }
static int libschroedinger_encode_frame(AVCodecContext *avccontext, static int libschroedinger_encode_frame(AVCodecContext *avccontext, AVPacket *pkt,
unsigned char *frame, const AVFrame *frame, int *got_packet)
int buf_size, void *data)
{ {
int enc_size = 0; int enc_size = 0;
SchroEncoderParams *p_schro_params = avccontext->priv_data; SchroEncoderParams *p_schro_params = avccontext->priv_data;
@ -275,8 +279,9 @@ static int libschroedinger_encode_frame(AVCodecContext *avccontext,
int presentation_frame; int presentation_frame;
int parse_code; int parse_code;
int last_frame_in_sequence = 0; int last_frame_in_sequence = 0;
int pkt_size, ret;
if (!data) { if (!frame) {
/* Push end of sequence if not already signalled. */ /* Push end of sequence if not already signalled. */
if (!p_schro_params->eos_signalled) { if (!p_schro_params->eos_signalled) {
schro_encoder_end_of_stream(encoder); schro_encoder_end_of_stream(encoder);
@ -285,7 +290,7 @@ static int libschroedinger_encode_frame(AVCodecContext *avccontext,
} else { } else {
/* Allocate frame data to schro input buffer. */ /* Allocate frame data to schro input buffer. */
SchroFrame *in_frame = libschroedinger_frame_from_data(avccontext, SchroFrame *in_frame = libschroedinger_frame_from_data(avccontext,
data); frame);
/* Load next frame. */ /* Load next frame. */
schro_encoder_push_frame(encoder, in_frame); schro_encoder_push_frame(encoder, in_frame);
} }
@ -373,28 +378,42 @@ static int libschroedinger_encode_frame(AVCodecContext *avccontext,
if (!p_frame_output) if (!p_frame_output)
return 0; return 0;
memcpy(frame, p_frame_output->p_encbuf, p_frame_output->size); pkt_size = p_frame_output->size;
if (last_frame_in_sequence && p_schro_params->enc_buf_size > 0)
pkt_size += p_schro_params->enc_buf_size;
if ((ret = ff_alloc_packet(pkt, pkt_size)) < 0) {
av_log(avccontext, AV_LOG_ERROR, "Error getting output packet of size %d.\n", pkt_size);
goto error;
}
memcpy(pkt->data, p_frame_output->p_encbuf, p_frame_output->size);
avccontext->coded_frame->key_frame = p_frame_output->key_frame; avccontext->coded_frame->key_frame = p_frame_output->key_frame;
/* Use the frame number of the encoded frame as the pts. It is OK to /* Use the frame number of the encoded frame as the pts. It is OK to
* do so since Dirac is a constant frame rate codec. It expects input * do so since Dirac is a constant frame rate codec. It expects input
* to be of constant frame rate. */ * to be of constant frame rate. */
pkt->pts =
avccontext->coded_frame->pts = p_frame_output->frame_num; avccontext->coded_frame->pts = p_frame_output->frame_num;
pkt->dts = p_schro_params->dts++;
enc_size = p_frame_output->size; enc_size = p_frame_output->size;
/* Append the end of sequence information to the last frame in the /* Append the end of sequence information to the last frame in the
* sequence. */ * sequence. */
if (last_frame_in_sequence && p_schro_params->enc_buf_size > 0) { if (last_frame_in_sequence && p_schro_params->enc_buf_size > 0) {
memcpy(frame + enc_size, p_schro_params->enc_buf, memcpy(pkt->data + enc_size, p_schro_params->enc_buf,
p_schro_params->enc_buf_size); p_schro_params->enc_buf_size);
enc_size += p_schro_params->enc_buf_size; enc_size += p_schro_params->enc_buf_size;
av_freep(&p_schro_params->enc_buf); av_freep(&p_schro_params->enc_buf);
p_schro_params->enc_buf_size = 0; p_schro_params->enc_buf_size = 0;
} }
if (p_frame_output->key_frame)
pkt->flags |= AV_PKT_FLAG_KEY;
*got_packet = 1;
error:
/* free frame */ /* free frame */
SchroedingerFreeFrame(p_frame_output); SchroedingerFreeFrame(p_frame_output);
return ret;
return enc_size;
} }
@ -427,7 +446,7 @@ AVCodec ff_libschroedinger_encoder = {
.id = CODEC_ID_DIRAC, .id = CODEC_ID_DIRAC,
.priv_data_size = sizeof(SchroEncoderParams), .priv_data_size = sizeof(SchroEncoderParams),
.init = libschroedinger_encode_init, .init = libschroedinger_encode_init,
.encode = libschroedinger_encode_frame, .encode2 = libschroedinger_encode_frame,
.close = libschroedinger_encode_close, .close = libschroedinger_encode_close,
.capabilities = CODEC_CAP_DELAY, .capabilities = CODEC_CAP_DELAY,
.pix_fmts = (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_YUV444P, PIX_FMT_NONE}, .pix_fmts = (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_YUV444P, PIX_FMT_NONE},

View File

@ -35,6 +35,7 @@
#include "libavutil/log.h" #include "libavutil/log.h"
#include "libavutil/base64.h" #include "libavutil/base64.h"
#include "avcodec.h" #include "avcodec.h"
#include "internal.h"
/* libtheora includes */ /* libtheora includes */
#include <theora/theoraenc.h> #include <theora/theoraenc.h>
@ -260,14 +261,13 @@ static av_cold int encode_init(AVCodecContext* avc_context)
return 0; return 0;
} }
static int encode_frame(AVCodecContext* avc_context, uint8_t *outbuf, static int encode_frame(AVCodecContext* avc_context, AVPacket *pkt,
int buf_size, void *data) const AVFrame *frame, int *got_packet)
{ {
th_ycbcr_buffer t_yuv_buffer; th_ycbcr_buffer t_yuv_buffer;
TheoraContext *h = avc_context->priv_data; TheoraContext *h = avc_context->priv_data;
AVFrame *frame = data;
ogg_packet o_packet; ogg_packet o_packet;
int result, i; int result, i, ret;
// EOS, finish and get 1st pass stats if applicable // EOS, finish and get 1st pass stats if applicable
if (!frame) { if (!frame) {
@ -328,18 +328,21 @@ static int encode_frame(AVCodecContext* avc_context, uint8_t *outbuf,
} }
/* Copy ogg_packet content out to buffer */ /* Copy ogg_packet content out to buffer */
if (buf_size < o_packet.bytes) { if ((ret = ff_alloc_packet(pkt, o_packet.bytes)) < 0) {
av_log(avc_context, AV_LOG_ERROR, "encoded frame too large\n"); av_log(avc_context, AV_LOG_ERROR, "Error getting output packet of size %ld.\n", o_packet.bytes);
return -1; return ret;
} }
memcpy(outbuf, o_packet.packet, o_packet.bytes); memcpy(pkt->data, o_packet.packet, o_packet.bytes);
// HACK: assumes no encoder delay, this is true until libtheora becomes // HACK: assumes no encoder delay, this is true until libtheora becomes
// multithreaded (which will be disabled unless explictly requested) // multithreaded (which will be disabled unless explictly requested)
avc_context->coded_frame->pts = frame->pts; pkt->pts = pkt->dts = frame->pts;
avc_context->coded_frame->key_frame = !(o_packet.granulepos & h->keyframe_mask); avc_context->coded_frame->key_frame = !(o_packet.granulepos & h->keyframe_mask);
if (avc_context->coded_frame->key_frame)
pkt->flags |= AV_PKT_FLAG_KEY;
*got_packet = 1;
return o_packet.bytes; return 0;
} }
static av_cold int encode_close(AVCodecContext* avc_context) static av_cold int encode_close(AVCodecContext* avc_context)
@ -364,7 +367,7 @@ AVCodec ff_libtheora_encoder = {
.priv_data_size = sizeof(TheoraContext), .priv_data_size = sizeof(TheoraContext),
.init = encode_init, .init = encode_init,
.close = encode_close, .close = encode_close,
.encode = encode_frame, .encode2 = encode_frame,
.capabilities = CODEC_CAP_DELAY, // needed to get the statsfile summary .capabilities = CODEC_CAP_DELAY, // needed to get the statsfile summary
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_YUV444P, PIX_FMT_NONE}, .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_YUV444P, PIX_FMT_NONE},
.long_name = NULL_IF_CONFIG_SMALL("libtheora Theora"), .long_name = NULL_IF_CONFIG_SMALL("libtheora Theora"),

View File

@ -380,33 +380,33 @@ static inline void cx_pktcpy(struct FrameListData *dst,
} }
/** /**
* Store coded frame information in format suitable for return from encode(). * Store coded frame information in format suitable for return from encode2().
* *
* Write buffer information from @a cx_frame to @a buf & @a buf_size. * Write information from @a cx_frame to @a pkt
* Timing/frame details to @a coded_frame. * @return packet data size on success
* @return Frame size written to @a buf on success * @return a negative AVERROR on error
* @return AVERROR(EINVAL) on error
*/ */
static int storeframe(AVCodecContext *avctx, struct FrameListData *cx_frame, static int storeframe(AVCodecContext *avctx, struct FrameListData *cx_frame,
uint8_t *buf, int buf_size, AVFrame *coded_frame) AVPacket *pkt, AVFrame *coded_frame)
{ {
if ((int) cx_frame->sz <= buf_size) { int ret = ff_alloc_packet(pkt, cx_frame->sz);
buf_size = cx_frame->sz; if (ret >= 0) {
memcpy(buf, cx_frame->buf, buf_size); memcpy(pkt->data, cx_frame->buf, pkt->size);
pkt->pts = pkt->dts = cx_frame->pts;
coded_frame->pts = cx_frame->pts; coded_frame->pts = cx_frame->pts;
coded_frame->key_frame = !!(cx_frame->flags & VPX_FRAME_IS_KEY); coded_frame->key_frame = !!(cx_frame->flags & VPX_FRAME_IS_KEY);
if (coded_frame->key_frame) if (coded_frame->key_frame) {
coded_frame->pict_type = AV_PICTURE_TYPE_I; coded_frame->pict_type = AV_PICTURE_TYPE_I;
else pkt->flags |= AV_PKT_FLAG_KEY;
} else
coded_frame->pict_type = AV_PICTURE_TYPE_P; coded_frame->pict_type = AV_PICTURE_TYPE_P;
} else { } else {
av_log(avctx, AV_LOG_ERROR, av_log(avctx, AV_LOG_ERROR,
"Compressed frame larger than storage provided! (%zu/%d)\n", "Error getting output packet of size %zu.\n", cx_frame->sz);
cx_frame->sz, buf_size); return ret;
return AVERROR(EINVAL);
} }
return buf_size; return pkt->size;
} }
/** /**
@ -417,7 +417,7 @@ static int storeframe(AVCodecContext *avctx, struct FrameListData *cx_frame,
* @return AVERROR(EINVAL) on output size error * @return AVERROR(EINVAL) on output size error
* @return AVERROR(ENOMEM) on coded frame queue data allocation error * @return AVERROR(ENOMEM) on coded frame queue data allocation error
*/ */
static int queue_frames(AVCodecContext *avctx, uint8_t *buf, int buf_size, static int queue_frames(AVCodecContext *avctx, AVPacket *pkt_out,
AVFrame *coded_frame) AVFrame *coded_frame)
{ {
VP8Context *ctx = avctx->priv_data; VP8Context *ctx = avctx->priv_data;
@ -428,9 +428,9 @@ static int queue_frames(AVCodecContext *avctx, uint8_t *buf, int buf_size,
if (ctx->coded_frame_list) { if (ctx->coded_frame_list) {
struct FrameListData *cx_frame = ctx->coded_frame_list; struct FrameListData *cx_frame = ctx->coded_frame_list;
/* return the leading frame if we've already begun queueing */ /* return the leading frame if we've already begun queueing */
size = storeframe(avctx, cx_frame, buf, buf_size, coded_frame); size = storeframe(avctx, cx_frame, pkt_out, coded_frame);
if (size < 0) if (size < 0)
return AVERROR(EINVAL); return size;
ctx->coded_frame_list = cx_frame->next; ctx->coded_frame_list = cx_frame->next;
free_coded_frame(cx_frame); free_coded_frame(cx_frame);
} }
@ -447,9 +447,9 @@ static int queue_frames(AVCodecContext *avctx, uint8_t *buf, int buf_size,
provided a frame for output */ provided a frame for output */
assert(!ctx->coded_frame_list); assert(!ctx->coded_frame_list);
cx_pktcpy(&cx_frame, pkt); cx_pktcpy(&cx_frame, pkt);
size = storeframe(avctx, &cx_frame, buf, buf_size, coded_frame); size = storeframe(avctx, &cx_frame, pkt_out, coded_frame);
if (size < 0) if (size < 0)
return AVERROR(EINVAL); return size;
} else { } else {
struct FrameListData *cx_frame = struct FrameListData *cx_frame =
av_malloc(sizeof(struct FrameListData)); av_malloc(sizeof(struct FrameListData));
@ -495,11 +495,10 @@ static int queue_frames(AVCodecContext *avctx, uint8_t *buf, int buf_size,
return size; return size;
} }
static int vp8_encode(AVCodecContext *avctx, uint8_t *buf, int buf_size, static int vp8_encode(AVCodecContext *avctx, AVPacket *pkt,
void *data) const AVFrame *frame, int *got_packet)
{ {
VP8Context *ctx = avctx->priv_data; VP8Context *ctx = avctx->priv_data;
AVFrame *frame = data;
struct vpx_image *rawimg = NULL; struct vpx_image *rawimg = NULL;
int64_t timestamp = 0; int64_t timestamp = 0;
int res, coded_size; int res, coded_size;
@ -521,7 +520,7 @@ static int vp8_encode(AVCodecContext *avctx, uint8_t *buf, int buf_size,
log_encoder_error(avctx, "Error encoding frame"); log_encoder_error(avctx, "Error encoding frame");
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
coded_size = queue_frames(avctx, buf, buf_size, avctx->coded_frame); coded_size = queue_frames(avctx, pkt, avctx->coded_frame);
if (!frame && avctx->flags & CODEC_FLAG_PASS1) { if (!frame && avctx->flags & CODEC_FLAG_PASS1) {
unsigned int b64_size = AV_BASE64_SIZE(ctx->twopass_stats.sz); unsigned int b64_size = AV_BASE64_SIZE(ctx->twopass_stats.sz);
@ -535,7 +534,9 @@ static int vp8_encode(AVCodecContext *avctx, uint8_t *buf, int buf_size,
av_base64_encode(avctx->stats_out, b64_size, ctx->twopass_stats.buf, av_base64_encode(avctx->stats_out, b64_size, ctx->twopass_stats.buf,
ctx->twopass_stats.sz); ctx->twopass_stats.sz);
} }
return coded_size;
*got_packet = !!coded_size;
return 0;
} }
#define OFFSET(x) offsetof(VP8Context, x) #define OFFSET(x) offsetof(VP8Context, x)
@ -598,7 +599,7 @@ AVCodec ff_libvpx_encoder = {
.id = CODEC_ID_VP8, .id = CODEC_ID_VP8,
.priv_data_size = sizeof(VP8Context), .priv_data_size = sizeof(VP8Context),
.init = vp8_init, .init = vp8_init,
.encode = vp8_encode, .encode2 = vp8_encode,
.close = vp8_free, .close = vp8_free,
.capabilities = CODEC_CAP_DELAY | CODEC_CAP_AUTO_THREADS, .capabilities = CODEC_CAP_DELAY | CODEC_CAP_AUTO_THREADS,
.pix_fmts = (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, .pix_fmts = (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},

View File

@ -37,6 +37,7 @@
#define XAVS_PART_B8X8 0x100 /* Analyze b16x8, b*/ #define XAVS_PART_B8X8 0x100 /* Analyze b16x8, b*/
typedef struct XavsContext { typedef struct XavsContext {
AVClass *class;
xavs_param_t params; xavs_param_t params;
xavs_t *enc; xavs_t *enc;
xavs_picture_t pic; xavs_picture_t pic;
@ -53,6 +54,9 @@ typedef struct XavsContext {
int fast_pskip; int fast_pskip;
int mbtree; int mbtree;
int mixed_refs; int mixed_refs;
int64_t *pts_buffer;
int out_frame_count;
} XavsContext; } XavsContext;
static void XAVS_log(void *p, int level, const char *fmt, va_list args) static void XAVS_log(void *p, int level, const char *fmt, va_list args)
@ -70,13 +74,24 @@ static void XAVS_log(void *p, int level, const char *fmt, va_list args)
av_vlog(p, level_map[level], fmt, args); av_vlog(p, level_map[level], fmt, args);
} }
static int encode_nals(AVCodecContext *ctx, uint8_t *buf, static int encode_nals(AVCodecContext *ctx, AVPacket *pkt,
int size, xavs_nal_t *nals, xavs_nal_t *nals, int nnal)
int nnal, int skip_sei)
{ {
XavsContext *x4 = ctx->priv_data; XavsContext *x4 = ctx->priv_data;
uint8_t *p = buf; uint8_t *p;
int i, s; int i, s, ret, size = x4->sei_size + FF_MIN_BUFFER_SIZE;
if (!nnal)
return 0;
for (i = 0; i < nnal; i++)
size += nals[i].i_payload;
if ((ret = ff_alloc_packet(pkt, size)) < 0) {
av_log(ctx, AV_LOG_ERROR, "Error getting output packet of size %d.\n", size);
return ret;
}
p = pkt->data;
/* Write the SEI as part of the first frame. */ /* Write the SEI as part of the first frame. */
if (x4->sei_size > 0 && nnal > 0) { if (x4->sei_size > 0 && nnal > 0) {
@ -86,30 +101,22 @@ static int encode_nals(AVCodecContext *ctx, uint8_t *buf,
} }
for (i = 0; i < nnal; i++) { for (i = 0; i < nnal; i++) {
/* Don't put the SEI in extradata. */
if (skip_sei && nals[i].i_type == NAL_SEI) {
x4->sei = av_malloc( 5 + nals[i].i_payload * 4 / 3 );
if (xavs_nal_encode(x4->sei, &x4->sei_size, 1, nals + i) < 0)
return -1;
continue;
}
s = xavs_nal_encode(p, &size, 1, nals + i); s = xavs_nal_encode(p, &size, 1, nals + i);
if (s < 0) if (s < 0)
return -1; return -1;
p += s; p += s;
} }
pkt->size = p - pkt->data;
return p - buf; return 1;
} }
static int XAVS_frame(AVCodecContext *ctx, uint8_t *buf, static int XAVS_frame(AVCodecContext *ctx, AVPacket *pkt,
int bufsize, void *data) const AVFrame *frame, int *got_packet)
{ {
XavsContext *x4 = ctx->priv_data; XavsContext *x4 = ctx->priv_data;
AVFrame *frame = data;
xavs_nal_t *nal; xavs_nal_t *nal;
int nnal, i; int nnal, i, ret;
xavs_picture_t pic_out; xavs_picture_t pic_out;
x4->pic.img.i_csp = XAVS_CSP_I420; x4->pic.img.i_csp = XAVS_CSP_I420;
@ -123,29 +130,44 @@ static int XAVS_frame(AVCodecContext *ctx, uint8_t *buf,
x4->pic.i_pts = frame->pts; x4->pic.i_pts = frame->pts;
x4->pic.i_type = XAVS_TYPE_AUTO; x4->pic.i_type = XAVS_TYPE_AUTO;
x4->pts_buffer[ctx->frame_number % (ctx->max_b_frames+1)] = frame->pts;
} }
if (xavs_encoder_encode(x4->enc, &nal, &nnal, if (xavs_encoder_encode(x4->enc, &nal, &nnal,
frame? &x4->pic: NULL, &pic_out) < 0) frame? &x4->pic: NULL, &pic_out) < 0)
return -1; return -1;
bufsize = encode_nals(ctx, buf, bufsize, nal, nnal, 0); ret = encode_nals(ctx, pkt, nal, nnal);
if (bufsize < 0) if (ret < 0)
return -1; return -1;
if (!bufsize && !frame && !(x4->end_of_stream)){ if (!ret) {
buf[bufsize] = 0x0; if (!frame && !(x4->end_of_stream)) {
buf[bufsize+1] = 0x0; if ((ret = ff_alloc_packet(pkt, 4)) < 0)
buf[bufsize+2] = 0x01; return ret;
buf[bufsize+3] = 0xb1;
bufsize += 4; pkt->data[0] = 0x0;
pkt->data[1] = 0x0;
pkt->data[2] = 0x01;
pkt->data[3] = 0xb1;
pkt->dts = 2*x4->pts_buffer[(x4->out_frame_count-1)%(ctx->max_b_frames+1)] -
x4->pts_buffer[(x4->out_frame_count-2)%(ctx->max_b_frames+1)];
x4->end_of_stream = END_OF_STREAM; x4->end_of_stream = END_OF_STREAM;
return bufsize; *got_packet = 1;
} }
/* FIXME: libxavs now provides DTS */ return 0;
/* but AVFrame doesn't have a field for it. */ }
x4->out_pic.pts = pic_out.i_pts; x4->out_pic.pts = pic_out.i_pts;
pkt->pts = pic_out.i_pts;
if (ctx->has_b_frames) {
if (!x4->out_frame_count)
pkt->dts = pkt->pts - (x4->pts_buffer[1] - x4->pts_buffer[0]);
else
pkt->dts = x4->pts_buffer[(x4->out_frame_count-1)%(ctx->max_b_frames+1)];
} else
pkt->dts = pkt->pts;
switch (pic_out.i_type) { switch (pic_out.i_type) {
case XAVS_TYPE_IDR: case XAVS_TYPE_IDR:
@ -163,11 +185,16 @@ static int XAVS_frame(AVCodecContext *ctx, uint8_t *buf,
/* There is no IDR frame in AVS JiZhun */ /* There is no IDR frame in AVS JiZhun */
/* Sequence header is used as a flag */ /* Sequence header is used as a flag */
x4->out_pic.key_frame = pic_out.i_type == XAVS_TYPE_I; if (pic_out.i_type == XAVS_TYPE_I) {
x4->out_pic.key_frame = 1;
pkt->flags |= AV_PKT_FLAG_KEY;
}
x4->out_pic.quality = (pic_out.i_qpplus1 - 1) * FF_QP2LAMBDA; x4->out_pic.quality = (pic_out.i_qpplus1 - 1) * FF_QP2LAMBDA;
return bufsize; x4->out_frame_count++;
*got_packet = ret;
return 0;
} }
static av_cold int XAVS_close(AVCodecContext *avctx) static av_cold int XAVS_close(AVCodecContext *avctx)
@ -176,6 +203,7 @@ static av_cold int XAVS_close(AVCodecContext *avctx)
av_freep(&avctx->extradata); av_freep(&avctx->extradata);
av_free(x4->sei); av_free(x4->sei);
av_freep(&x4->pts_buffer);
if (x4->enc) if (x4->enc)
xavs_encoder_close(x4->enc); xavs_encoder_close(x4->enc);
@ -324,17 +352,35 @@ static av_cold int XAVS_init(AVCodecContext *avctx)
if (!x4->enc) if (!x4->enc)
return -1; return -1;
if (!(x4->pts_buffer = av_mallocz((avctx->max_b_frames+1) * sizeof(*x4->pts_buffer))))
return AVERROR(ENOMEM);
avctx->coded_frame = &x4->out_pic; avctx->coded_frame = &x4->out_pic;
/* TAG: Do we have GLOBAL HEADER in AVS */ /* TAG: Do we have GLOBAL HEADER in AVS */
/* We Have PPS and SPS in AVS */ /* We Have PPS and SPS in AVS */
if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) { if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) {
xavs_nal_t *nal; xavs_nal_t *nal;
int nnal, s; int nnal, s, i, size;
uint8_t *p;
s = xavs_encoder_headers(x4->enc, &nal, &nnal); s = xavs_encoder_headers(x4->enc, &nal, &nnal);
avctx->extradata = av_malloc(s); avctx->extradata = p = av_malloc(s);
avctx->extradata_size = encode_nals(avctx, avctx->extradata, s, nal, nnal, 1); for (i = 0; i < nnal; i++) {
/* Don't put the SEI in extradata. */
if (nal[i].i_type == NAL_SEI) {
x4->sei = av_malloc( 5 + nal[i].i_payload * 4 / 3 );
if (xavs_nal_encode(x4->sei, &x4->sei_size, 1, nal + i) < 0)
return -1;
continue;
}
size = xavs_nal_encode(p, &s, 1, nal + i);
if (size < 0)
return -1;
p += size;
}
avctx->extradata_size = p - avctx->extradata;
} }
return 0; return 0;
} }
@ -376,7 +422,7 @@ AVCodec ff_libxavs_encoder = {
.id = CODEC_ID_CAVS, .id = CODEC_ID_CAVS,
.priv_data_size = sizeof(XavsContext), .priv_data_size = sizeof(XavsContext),
.init = XAVS_init, .init = XAVS_init,
.encode = XAVS_frame, .encode2 = XAVS_frame,
.close = XAVS_close, .close = XAVS_close,
.capabilities = CODEC_CAP_DELAY | CODEC_CAP_AUTO_THREADS, .capabilities = CODEC_CAP_DELAY | CODEC_CAP_AUTO_THREADS,
.pix_fmts = (const enum PixelFormat[]) { PIX_FMT_YUV420P, PIX_FMT_NONE }, .pix_fmts = (const enum PixelFormat[]) { PIX_FMT_YUV420P, PIX_FMT_NONE },

View File

@ -33,6 +33,7 @@
#include "libavutil/intreadwrite.h" #include "libavutil/intreadwrite.h"
#include "libavutil/mathematics.h" #include "libavutil/mathematics.h"
#include "libxvid_internal.h" #include "libxvid_internal.h"
#include "mpegvideo.h"
/** /**
* Buffer management macros. * Buffer management macros.
@ -71,7 +72,7 @@ struct xvid_ff_pass1 {
}; };
/* Prototypes - See function implementation for details */ /* Prototypes - See function implementation for details */
int xvid_strip_vol_header(AVCodecContext *avctx, unsigned char *frame, unsigned int header_len, unsigned int frame_len); int xvid_strip_vol_header(AVCodecContext *avctx, AVPacket *pkt, unsigned int header_len, unsigned int frame_len);
int xvid_ff_2pass(void *ref, int opt, void *p1, void *p2); int xvid_ff_2pass(void *ref, int opt, void *p1, void *p2);
void xvid_correct_framerate(AVCodecContext *avctx); void xvid_correct_framerate(AVCodecContext *avctx);
@ -370,17 +371,25 @@ static av_cold int xvid_encode_init(AVCodecContext *avctx) {
* @param data Pointer to AVFrame of unencoded frame * @param data Pointer to AVFrame of unencoded frame
* @return Returns 0 on success, -1 on failure * @return Returns 0 on success, -1 on failure
*/ */
static int xvid_encode_frame(AVCodecContext *avctx, static int xvid_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
unsigned char *frame, int buf_size, void *data) { const AVFrame *picture, int *got_packet)
int xerr, i; {
int xerr, i, ret, user_packet = !!pkt->data;
char *tmp; char *tmp;
struct xvid_context *x = avctx->priv_data; struct xvid_context *x = avctx->priv_data;
AVFrame *picture = data;
AVFrame *p = &x->encoded_picture; AVFrame *p = &x->encoded_picture;
int mb_width = (avctx->width + 15) / 16;
int mb_height = (avctx->height + 15) / 16;
xvid_enc_frame_t xvid_enc_frame; xvid_enc_frame_t xvid_enc_frame;
xvid_enc_stats_t xvid_enc_stats; xvid_enc_stats_t xvid_enc_stats;
if (!user_packet &&
(ret = av_new_packet(pkt, mb_width*mb_height*MAX_MB_BYTES + FF_MIN_BUFFER_SIZE)) < 0) {
av_log(avctx, AV_LOG_ERROR, "Error getting output packet.\n");
return ret;
}
/* Start setting up the frame */ /* Start setting up the frame */
memset(&xvid_enc_frame, 0, sizeof(xvid_enc_frame)); memset(&xvid_enc_frame, 0, sizeof(xvid_enc_frame));
xvid_enc_frame.version = XVID_VERSION; xvid_enc_frame.version = XVID_VERSION;
@ -389,8 +398,8 @@ static int xvid_encode_frame(AVCodecContext *avctx,
*p = *picture; *p = *picture;
/* Let Xvid know where to put the frame. */ /* Let Xvid know where to put the frame. */
xvid_enc_frame.bitstream = frame; xvid_enc_frame.bitstream = pkt->data;
xvid_enc_frame.length = buf_size; xvid_enc_frame.length = pkt->size;
/* Initialize input image fields */ /* Initialize input image fields */
if( avctx->pix_fmt != PIX_FMT_YUV420P ) { if( avctx->pix_fmt != PIX_FMT_YUV420P ) {
@ -450,7 +459,9 @@ static int xvid_encode_frame(AVCodecContext *avctx,
} }
} }
if( 0 <= xerr ) { if (xerr > 0) {
*got_packet = 1;
p->quality = xvid_enc_stats.quant * FF_QP2LAMBDA; p->quality = xvid_enc_stats.quant * FF_QP2LAMBDA;
if( xvid_enc_stats.type == XVID_TYPE_PVOP ) if( xvid_enc_stats.type == XVID_TYPE_PVOP )
p->pict_type = AV_PICTURE_TYPE_P; p->pict_type = AV_PICTURE_TYPE_P;
@ -462,14 +473,21 @@ static int xvid_encode_frame(AVCodecContext *avctx,
p->pict_type = AV_PICTURE_TYPE_I; p->pict_type = AV_PICTURE_TYPE_I;
if( xvid_enc_frame.out_flags & XVID_KEYFRAME ) { if( xvid_enc_frame.out_flags & XVID_KEYFRAME ) {
p->key_frame = 1; p->key_frame = 1;
pkt->flags |= AV_PKT_FLAG_KEY;
if( x->quicktime_format ) if( x->quicktime_format )
return xvid_strip_vol_header(avctx, frame, return xvid_strip_vol_header(avctx, pkt,
xvid_enc_stats.hlength, xerr); xvid_enc_stats.hlength, xerr);
} else } else
p->key_frame = 0; p->key_frame = 0;
return xerr; pkt->size = xerr;
return 0;
} else { } else {
if (!user_packet)
av_free_packet(pkt);
if (!xerr)
return 0;
av_log(avctx, AV_LOG_ERROR, "Xvid: Encoding Error Occurred: %i\n", xerr); av_log(avctx, AV_LOG_ERROR, "Xvid: Encoding Error Occurred: %i\n", xerr);
return -1; return -1;
} }
@ -514,16 +532,16 @@ static av_cold int xvid_encode_close(AVCodecContext *avctx) {
* @return Returns new length of frame data * @return Returns new length of frame data
*/ */
int xvid_strip_vol_header(AVCodecContext *avctx, int xvid_strip_vol_header(AVCodecContext *avctx,
unsigned char *frame, AVPacket *pkt,
unsigned int header_len, unsigned int header_len,
unsigned int frame_len) { unsigned int frame_len) {
int vo_len = 0, i; int vo_len = 0, i;
for( i = 0; i < header_len - 3; i++ ) { for( i = 0; i < header_len - 3; i++ ) {
if( frame[i] == 0x00 && if( pkt->data[i] == 0x00 &&
frame[i+1] == 0x00 && pkt->data[i+1] == 0x00 &&
frame[i+2] == 0x01 && pkt->data[i+2] == 0x01 &&
frame[i+3] == 0xB6 ) { pkt->data[i+3] == 0xB6 ) {
vo_len = i; vo_len = i;
break; break;
} }
@ -533,15 +551,15 @@ int xvid_strip_vol_header(AVCodecContext *avctx,
/* We need to store the header, so extract it */ /* We need to store the header, so extract it */
if( avctx->extradata == NULL ) { if( avctx->extradata == NULL ) {
avctx->extradata = av_malloc(vo_len); avctx->extradata = av_malloc(vo_len);
memcpy(avctx->extradata, frame, vo_len); memcpy(avctx->extradata, pkt->data, vo_len);
avctx->extradata_size = vo_len; avctx->extradata_size = vo_len;
} }
/* Less dangerous now, memmove properly copies the two /* Less dangerous now, memmove properly copies the two
chunks of overlapping data */ chunks of overlapping data */
memmove(frame, &frame[vo_len], frame_len - vo_len); memmove(pkt->data, &pkt->data[vo_len], frame_len - vo_len);
return frame_len - vo_len; pkt->size = frame_len - vo_len;
} else }
return frame_len; return 0;
} }
/** /**
@ -777,7 +795,7 @@ AVCodec ff_libxvid_encoder = {
.id = CODEC_ID_MPEG4, .id = CODEC_ID_MPEG4,
.priv_data_size = sizeof(struct xvid_context), .priv_data_size = sizeof(struct xvid_context),
.init = xvid_encode_init, .init = xvid_encode_init,
.encode = xvid_encode_frame, .encode2 = xvid_encode_frame,
.close = xvid_encode_close, .close = xvid_encode_close,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
.long_name= NULL_IF_CONFIG_SMALL("libxvidcore MPEG-4 part 2"), .long_name= NULL_IF_CONFIG_SMALL("libxvidcore MPEG-4 part 2"),

View File

@ -32,21 +32,37 @@
#include "avcodec.h" #include "avcodec.h"
#include "dsputil.h" #include "dsputil.h"
#include "internal.h"
#include "mpegvideo.h" #include "mpegvideo.h"
#include "mjpeg.h" #include "mjpeg.h"
#include "mjpegenc.h" #include "mjpegenc.h"
static int encode_picture_lossless(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){ static int encode_picture_lossless(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *pict, int *got_packet)
{
MpegEncContext * const s = avctx->priv_data; MpegEncContext * const s = avctx->priv_data;
MJpegContext * const m = s->mjpeg_ctx; MJpegContext * const m = s->mjpeg_ctx;
AVFrame *pict = data;
const int width= s->width; const int width= s->width;
const int height= s->height; const int height= s->height;
AVFrame * const p= (AVFrame*)&s->current_picture; AVFrame * const p= (AVFrame*)&s->current_picture;
const int predictor= avctx->prediction_method+1; const int predictor= avctx->prediction_method+1;
const int mb_width = (width + s->mjpeg_hsample[0] - 1) / s->mjpeg_hsample[0];
const int mb_height = (height + s->mjpeg_vsample[0] - 1) / s->mjpeg_vsample[0];
int ret, max_pkt_size = FF_MIN_BUFFER_SIZE;
init_put_bits(&s->pb, buf, buf_size); if (avctx->pix_fmt == PIX_FMT_BGRA)
max_pkt_size += width * height * 3 * 4;
else {
max_pkt_size += mb_width * mb_height * 3 * 4
* s->mjpeg_hsample[0] * s->mjpeg_vsample[0];
}
if ((ret = ff_alloc_packet(pkt, max_pkt_size)) < 0) {
av_log(avctx, AV_LOG_ERROR, "Error getting output packet of size %d.\n", max_pkt_size);
return ret;
}
init_put_bits(&s->pb, pkt->data, pkt->size);
*p = *pict; *p = *pict;
p->pict_type= AV_PICTURE_TYPE_I; p->pict_type= AV_PICTURE_TYPE_I;
@ -112,8 +128,6 @@ static int encode_picture_lossless(AVCodecContext *avctx, unsigned char *buf, in
} }
}else{ }else{
int mb_x, mb_y, i; int mb_x, mb_y, i;
const int mb_width = (width + s->mjpeg_hsample[0] - 1) / s->mjpeg_hsample[0];
const int mb_height = (height + s->mjpeg_vsample[0] - 1) / s->mjpeg_vsample[0];
for(mb_y = 0; mb_y < mb_height; mb_y++) { for(mb_y = 0; mb_y < mb_height; mb_y++) {
if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < mb_width * 4 * 3 * s->mjpeg_hsample[0] * s->mjpeg_vsample[0]){ if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < mb_width * 4 * 3 * s->mjpeg_hsample[0] * s->mjpeg_vsample[0]){
@ -189,7 +203,11 @@ static int encode_picture_lossless(AVCodecContext *avctx, unsigned char *buf, in
s->picture_number++; s->picture_number++;
flush_put_bits(&s->pb); flush_put_bits(&s->pb);
return put_bits_ptr(&s->pb) - s->pb.buf; pkt->size = put_bits_ptr(&s->pb) - s->pb.buf;
pkt->flags |= AV_PKT_FLAG_KEY;
*got_packet = 1;
return 0;
// return (put_bits_count(&f->pb)+7)/8; // return (put_bits_count(&f->pb)+7)/8;
} }
@ -200,7 +218,7 @@ AVCodec ff_ljpeg_encoder = { //FIXME avoid MPV_* lossless JPEG should not need t
.id = CODEC_ID_LJPEG, .id = CODEC_ID_LJPEG,
.priv_data_size = sizeof(MpegEncContext), .priv_data_size = sizeof(MpegEncContext),
.init = ff_MPV_encode_init, .init = ff_MPV_encode_init,
.encode = encode_picture_lossless, .encode2 = encode_picture_lossless,
.close = ff_MPV_encode_end, .close = ff_MPV_encode_end,
.long_name = NULL_IF_CONFIG_SMALL("Lossless JPEG"), .long_name = NULL_IF_CONFIG_SMALL("Lossless JPEG"),
}; };

View File

@ -25,6 +25,7 @@
#include "libavutil/imgutils.h" #include "libavutil/imgutils.h"
#include "avcodec.h" #include "avcodec.h"
#include "bytestream.h" #include "bytestream.h"
#include "internal.h"
/** Maximum RLE code for bulk copy */ /** Maximum RLE code for bulk copy */
#define MAX_RLE_BULK 127 #define MAX_RLE_BULK 127
@ -102,7 +103,7 @@ static av_cold int qtrle_encode_init(AVCodecContext *avctx)
return -1; return -1;
} }
s->max_buf_size = s->logical_width*s->avctx->height*s->pixel_size /* image base material */ s->max_buf_size = s->logical_width*s->avctx->height*s->pixel_size*2 /* image base material */
+ 15 /* header + footer */ + 15 /* header + footer */
+ s->avctx->height*2 /* skip code+rle end */ + s->avctx->height*2 /* skip code+rle end */
+ s->logical_width/MAX_RLE_BULK + 1 /* rle codes */; + s->logical_width/MAX_RLE_BULK + 1 /* rle codes */;
@ -113,7 +114,7 @@ static av_cold int qtrle_encode_init(AVCodecContext *avctx)
/** /**
* Compute the best RLE sequence for a line * Compute the best RLE sequence for a line
*/ */
static void qtrle_encode_line(QtrleEncContext *s, AVFrame *p, int line, uint8_t **buf) static void qtrle_encode_line(QtrleEncContext *s, const AVFrame *p, int line, uint8_t **buf)
{ {
int width=s->logical_width; int width=s->logical_width;
int i; int i;
@ -259,7 +260,7 @@ static void qtrle_encode_line(QtrleEncContext *s, AVFrame *p, int line, uint8_t
} }
/** Encode frame including header */ /** Encode frame including header */
static int encode_frame(QtrleEncContext *s, AVFrame *p, uint8_t *buf) static int encode_frame(QtrleEncContext *s, const AVFrame *p, uint8_t *buf)
{ {
int i; int i;
int start_line = 0; int start_line = 0;
@ -300,19 +301,19 @@ static int encode_frame(QtrleEncContext *s, AVFrame *p, uint8_t *buf)
return buf - orig_buf; return buf - orig_buf;
} }
static int qtrle_encode_frame(AVCodecContext *avctx, uint8_t *buf, int buf_size, void *data) static int qtrle_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *pict, int *got_packet)
{ {
QtrleEncContext * const s = avctx->priv_data; QtrleEncContext * const s = avctx->priv_data;
AVFrame *pict = data;
AVFrame * const p = &s->frame; AVFrame * const p = &s->frame;
int chunksize; int ret;
*p = *pict; *p = *pict;
if (buf_size < s->max_buf_size) { if ((ret = ff_alloc_packet(pkt, s->max_buf_size)) < 0) {
/* Upper bound check for compressed data */ /* Upper bound check for compressed data */
av_log(avctx, AV_LOG_ERROR, "buf_size %d < %d\n", buf_size, s->max_buf_size); av_log(avctx, AV_LOG_ERROR, "Error getting output packet of size %d.\n", s->max_buf_size);
return -1; return ret;
} }
if (avctx->gop_size == 0 || (s->avctx->frame_number % avctx->gop_size) == 0) { if (avctx->gop_size == 0 || (s->avctx->frame_number % avctx->gop_size) == 0) {
@ -325,11 +326,16 @@ static int qtrle_encode_frame(AVCodecContext *avctx, uint8_t *buf, int buf_size,
p->key_frame = 0; p->key_frame = 0;
} }
chunksize = encode_frame(s, pict, buf); pkt->size = encode_frame(s, pict, pkt->data);
/* save the current frame */ /* save the current frame */
av_picture_copy(&s->previous_frame, (AVPicture *)p, avctx->pix_fmt, avctx->width, avctx->height); av_picture_copy(&s->previous_frame, (AVPicture *)p, avctx->pix_fmt, avctx->width, avctx->height);
return chunksize;
if (p->key_frame)
pkt->flags |= AV_PKT_FLAG_KEY;
*got_packet = 1;
return 0;
} }
static av_cold int qtrle_encode_end(AVCodecContext *avctx) static av_cold int qtrle_encode_end(AVCodecContext *avctx)
@ -349,7 +355,7 @@ AVCodec ff_qtrle_encoder = {
.id = CODEC_ID_QTRLE, .id = CODEC_ID_QTRLE,
.priv_data_size = sizeof(QtrleEncContext), .priv_data_size = sizeof(QtrleEncContext),
.init = qtrle_encode_init, .init = qtrle_encode_init,
.encode = qtrle_encode_frame, .encode2 = qtrle_encode_frame,
.close = qtrle_encode_end, .close = qtrle_encode_end,
.pix_fmts = (const enum PixelFormat[]){PIX_FMT_RGB24, PIX_FMT_RGB555BE, PIX_FMT_ARGB, PIX_FMT_GRAY8, PIX_FMT_NONE}, .pix_fmts = (const enum PixelFormat[]){PIX_FMT_RGB24, PIX_FMT_RGB555BE, PIX_FMT_ARGB, PIX_FMT_GRAY8, PIX_FMT_NONE},
.long_name = NULL_IF_CONFIG_SMALL("QuickTime Animation (RLE) video"), .long_name = NULL_IF_CONFIG_SMALL("QuickTime Animation (RLE) video"),

View File

@ -1600,17 +1600,25 @@ static void calculate_visual_weight(SnowContext *s, Plane *p){
} }
} }
static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *pict, int *got_packet)
{
SnowContext *s = avctx->priv_data; SnowContext *s = avctx->priv_data;
RangeCoder * const c= &s->c; RangeCoder * const c= &s->c;
AVFrame *pict = data; AVFrame *pic = &s->new_picture;
const int width= s->avctx->width; const int width= s->avctx->width;
const int height= s->avctx->height; const int height= s->avctx->height;
int level, orientation, plane_index, i, y; int level, orientation, plane_index, i, y, ret;
uint8_t rc_header_bak[sizeof(s->header_state)]; uint8_t rc_header_bak[sizeof(s->header_state)];
uint8_t rc_block_bak[sizeof(s->block_state)]; uint8_t rc_block_bak[sizeof(s->block_state)];
ff_init_range_encoder(c, buf, buf_size); if (!pkt->data &&
(ret = av_new_packet(pkt, s->b_width*s->b_height*MB_SIZE*MB_SIZE*3 + FF_MIN_BUFFER_SIZE)) < 0) {
av_log(avctx, AV_LOG_ERROR, "Error getting output packet.\n");
return ret;
}
ff_init_range_encoder(c, pkt->data, pkt->size);
ff_build_rac_states(c, 0.05*(1LL<<32), 256-8); ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
for(i=0; i<3; i++){ for(i=0; i<3; i++){
@ -1624,27 +1632,25 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
s->m.picture_number= avctx->frame_number; s->m.picture_number= avctx->frame_number;
if(avctx->flags&CODEC_FLAG_PASS2){ if(avctx->flags&CODEC_FLAG_PASS2){
s->m.pict_type = s->m.pict_type = pic->pict_type = s->m.rc_context.entry[avctx->frame_number].new_pict_type;
pict->pict_type= s->m.rc_context.entry[avctx->frame_number].new_pict_type; s->keyframe = pic->pict_type == AV_PICTURE_TYPE_I;
s->keyframe= pict->pict_type==AV_PICTURE_TYPE_I;
if(!(avctx->flags&CODEC_FLAG_QSCALE)) { if(!(avctx->flags&CODEC_FLAG_QSCALE)) {
pict->quality= ff_rate_estimate_qscale(&s->m, 0); pic->quality = ff_rate_estimate_qscale(&s->m, 0);
if (pict->quality < 0) if (pic->quality < 0)
return -1; return -1;
} }
}else{ }else{
s->keyframe= avctx->gop_size==0 || avctx->frame_number % avctx->gop_size == 0; s->keyframe= avctx->gop_size==0 || avctx->frame_number % avctx->gop_size == 0;
s->m.pict_type= s->m.pict_type = pic->pict_type = s->keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
pict->pict_type= s->keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
} }
if(s->pass1_rc && avctx->frame_number == 0) if(s->pass1_rc && avctx->frame_number == 0)
pict->quality= 2*FF_QP2LAMBDA; pic->quality = 2*FF_QP2LAMBDA;
if(pict->quality){ if (pic->quality) {
s->qlog= qscale2qlog(pict->quality); s->qlog = qscale2qlog(pic->quality);
s->lambda = pict->quality * 3/2; s->lambda = pic->quality * 3/2;
} }
if(s->qlog < 0 || (!pict->quality && (avctx->flags & CODEC_FLAG_QSCALE))){ if (s->qlog < 0 || (!pic->quality && (avctx->flags & CODEC_FLAG_QSCALE))) {
s->qlog= LOSSLESS_QLOG; s->qlog= LOSSLESS_QLOG;
s->lambda = 0; s->lambda = 0;
}//else keep previous frame's qlog until after motion estimation }//else keep previous frame's qlog until after motion estimation
@ -1654,7 +1660,7 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
s->m.current_picture_ptr= &s->m.current_picture; s->m.current_picture_ptr= &s->m.current_picture;
s->m.last_picture.f.pts = s->m.current_picture.f.pts; s->m.last_picture.f.pts = s->m.current_picture.f.pts;
s->m.current_picture.f.pts = pict->pts; s->m.current_picture.f.pts = pict->pts;
if(pict->pict_type == AV_PICTURE_TYPE_P){ if(pic->pict_type == AV_PICTURE_TYPE_P){
int block_width = (width +15)>>4; int block_width = (width +15)>>4;
int block_height= (height+15)>>4; int block_height= (height+15)>>4;
int stride= s->current_picture.linesize[0]; int stride= s->current_picture.linesize[0];
@ -1679,7 +1685,7 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
s->m.mb_stride= s->m.mb_width+1; s->m.mb_stride= s->m.mb_width+1;
s->m.b8_stride= 2*s->m.mb_width+1; s->m.b8_stride= 2*s->m.mb_width+1;
s->m.f_code=1; s->m.f_code=1;
s->m.pict_type= pict->pict_type; s->m.pict_type = pic->pict_type;
s->m.me_method= s->avctx->me_method; s->m.me_method= s->avctx->me_method;
s->m.me.scene_change_score=0; s->m.me.scene_change_score=0;
s->m.flags= s->avctx->flags; s->m.flags= s->avctx->flags;
@ -1703,13 +1709,13 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
redo_frame: redo_frame:
if(pict->pict_type == AV_PICTURE_TYPE_I) if (pic->pict_type == AV_PICTURE_TYPE_I)
s->spatial_decomposition_count= 5; s->spatial_decomposition_count= 5;
else else
s->spatial_decomposition_count= 5; s->spatial_decomposition_count= 5;
s->m.pict_type = pict->pict_type; s->m.pict_type = pic->pict_type;
s->qbias= pict->pict_type == AV_PICTURE_TYPE_P ? 2 : 0; s->qbias = pic->pict_type == AV_PICTURE_TYPE_P ? 2 : 0;
ff_snow_common_init_after_header(avctx); ff_snow_common_init_after_header(avctx);
@ -1742,12 +1748,12 @@ redo_frame:
predict_plane(s, s->spatial_idwt_buffer, plane_index, 0); predict_plane(s, s->spatial_idwt_buffer, plane_index, 0);
if( plane_index==0 if( plane_index==0
&& pict->pict_type == AV_PICTURE_TYPE_P && pic->pict_type == AV_PICTURE_TYPE_P
&& !(avctx->flags&CODEC_FLAG_PASS2) && !(avctx->flags&CODEC_FLAG_PASS2)
&& s->m.me.scene_change_score > s->avctx->scenechange_threshold){ && s->m.me.scene_change_score > s->avctx->scenechange_threshold){
ff_init_range_encoder(c, buf, buf_size); ff_init_range_encoder(c, pkt->data, pkt->size);
ff_build_rac_states(c, 0.05*(1LL<<32), 256-8); ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
pict->pict_type= AV_PICTURE_TYPE_I; pic->pict_type= AV_PICTURE_TYPE_I;
s->keyframe=1; s->keyframe=1;
s->current_picture.key_frame=1; s->current_picture.key_frame=1;
goto redo_frame; goto redo_frame;
@ -1773,12 +1779,12 @@ redo_frame:
ff_spatial_dwt(s->spatial_dwt_buffer, w, h, w, s->spatial_decomposition_type, s->spatial_decomposition_count); ff_spatial_dwt(s->spatial_dwt_buffer, w, h, w, s->spatial_decomposition_type, s->spatial_decomposition_count);
if(s->pass1_rc && plane_index==0){ if(s->pass1_rc && plane_index==0){
int delta_qlog = ratecontrol_1pass(s, pict); int delta_qlog = ratecontrol_1pass(s, pic);
if (delta_qlog <= INT_MIN) if (delta_qlog <= INT_MIN)
return -1; return -1;
if(delta_qlog){ if(delta_qlog){
//reordering qlog in the bitstream would eliminate this reset //reordering qlog in the bitstream would eliminate this reset
ff_init_range_encoder(c, buf, buf_size); ff_init_range_encoder(c, pkt->data, pkt->size);
memcpy(s->header_state, rc_header_bak, sizeof(s->header_state)); memcpy(s->header_state, rc_header_bak, sizeof(s->header_state));
memcpy(s->block_state, rc_block_bak, sizeof(s->block_state)); memcpy(s->block_state, rc_block_bak, sizeof(s->block_state));
encode_header(s); encode_header(s);
@ -1793,7 +1799,7 @@ redo_frame:
if(!QUANTIZE2) if(!QUANTIZE2)
quantize(s, b, b->ibuf, b->buf, b->stride, s->qbias); quantize(s, b, b->ibuf, b->buf, b->stride, s->qbias);
if(orientation==0) if(orientation==0)
decorrelate(s, b, b->ibuf, b->stride, pict->pict_type == AV_PICTURE_TYPE_P, 0); decorrelate(s, b, b->ibuf, b->stride, pic->pict_type == AV_PICTURE_TYPE_P, 0);
encode_subband(s, b, b->ibuf, b->parent ? b->parent->ibuf : NULL, b->stride, orientation); encode_subband(s, b, b->ibuf, b->parent ? b->parent->ibuf : NULL, b->stride, orientation);
assert(b->parent==NULL || b->parent->stride == b->stride*2); assert(b->parent==NULL || b->parent->stride == b->stride*2);
if(orientation==0) if(orientation==0)
@ -1820,7 +1826,7 @@ redo_frame:
predict_plane(s, s->spatial_idwt_buffer, plane_index, 1); predict_plane(s, s->spatial_idwt_buffer, plane_index, 1);
}else{ }else{
//ME/MC only //ME/MC only
if(pict->pict_type == AV_PICTURE_TYPE_I){ if(pic->pict_type == AV_PICTURE_TYPE_I){
for(y=0; y<h; y++){ for(y=0; y<h; y++){
for(x=0; x<w; x++){ for(x=0; x<w; x++){
s->current_picture.data[plane_index][y*s->current_picture.linesize[plane_index] + x]= s->current_picture.data[plane_index][y*s->current_picture.linesize[plane_index] + x]=
@ -1859,7 +1865,7 @@ redo_frame:
s->m.p_tex_bits = s->m.frame_bits - s->m.misc_bits - s->m.mv_bits; s->m.p_tex_bits = s->m.frame_bits - s->m.misc_bits - s->m.mv_bits;
s->m.current_picture.f.display_picture_number = s->m.current_picture.f.display_picture_number =
s->m.current_picture.f.coded_picture_number = avctx->frame_number; s->m.current_picture.f.coded_picture_number = avctx->frame_number;
s->m.current_picture.f.quality = pict->quality; s->m.current_picture.f.quality = pic->quality;
s->m.total_bits += 8*(s->c.bytestream - s->c.bytestream_start); s->m.total_bits += 8*(s->c.bytestream - s->c.bytestream_start);
if(s->pass1_rc) if(s->pass1_rc)
if (ff_rate_estimate_qscale(&s->m, 0) < 0) if (ff_rate_estimate_qscale(&s->m, 0) < 0)
@ -1874,7 +1880,12 @@ redo_frame:
emms_c(); emms_c();
return ff_rac_terminate(c); pkt->size = ff_rac_terminate(c);
if (avctx->coded_frame->key_frame)
pkt->flags |= AV_PKT_FLAG_KEY;
*got_packet = 1;
return 0;
} }
static av_cold int encode_end(AVCodecContext *avctx) static av_cold int encode_end(AVCodecContext *avctx)
@ -1909,7 +1920,7 @@ AVCodec ff_snow_encoder = {
.id = CODEC_ID_SNOW, .id = CODEC_ID_SNOW,
.priv_data_size = sizeof(SnowContext), .priv_data_size = sizeof(SnowContext),
.init = encode_init, .init = encode_init,
.encode = encode_frame, .encode2 = encode_frame,
.close = encode_end, .close = encode_end,
.long_name = NULL_IF_CONFIG_SMALL("Snow"), .long_name = NULL_IF_CONFIG_SMALL("Snow"),
.priv_class = &snowenc_class, .priv_class = &snowenc_class,

View File

@ -202,15 +202,13 @@ static void pack_yuv(TiffEncoderContext * s, uint8_t * dst, int lnum)
} }
} }
static int encode_frame(AVCodecContext * avctx, unsigned char *buf, static int encode_frame(AVCodecContext * avctx, AVPacket *pkt,
int buf_size, void *data) const AVFrame *pict, int *got_packet)
{ {
TiffEncoderContext *s = avctx->priv_data; TiffEncoderContext *s = avctx->priv_data;
AVFrame *pict = data;
AVFrame *const p = (AVFrame *) & s->picture; AVFrame *const p = (AVFrame *) & s->picture;
int i; int i;
int n; uint8_t *ptr;
uint8_t *ptr = buf;
uint8_t *offset; uint8_t *offset;
uint32_t strips; uint32_t strips;
uint32_t *strip_sizes = NULL; uint32_t *strip_sizes = NULL;
@ -224,9 +222,6 @@ static int encode_frame(AVCodecContext * avctx, unsigned char *buf,
int shift_h, shift_v; int shift_h, shift_v;
s->avctx = avctx; s->avctx = avctx;
s->buf_start = buf;
s->buf = &ptr;
s->buf_size = buf_size;
*p = *pict; *p = *pict;
p->pict_type = AV_PICTURE_TYPE_I; p->pict_type = AV_PICTURE_TYPE_I;
@ -308,6 +303,17 @@ static int encode_frame(AVCodecContext * avctx, unsigned char *buf,
strips = (s->height - 1) / s->rps + 1; strips = (s->height - 1) / s->rps + 1;
if (!pkt->data &&
(ret = av_new_packet(pkt, avctx->width * avctx->height * s->bpp * 2 +
avctx->height * 4 + FF_MIN_BUFFER_SIZE)) < 0) {
av_log(avctx, AV_LOG_ERROR, "Error getting output packet.\n");
return ret;
}
ptr = pkt->data;
s->buf_start = pkt->data;
s->buf = &ptr;
s->buf_size = pkt->size;
if (check_size(s, 8)) if (check_size(s, 8))
goto fail; goto fail;
@ -339,7 +345,7 @@ static int encode_frame(AVCodecContext * avctx, unsigned char *buf,
zlen = bytes_per_row * s->rps; zlen = bytes_per_row * s->rps;
zbuf = av_malloc(zlen); zbuf = av_malloc(zlen);
strip_offsets[0] = ptr - buf; strip_offsets[0] = ptr - pkt->data;
zn = 0; zn = 0;
for (j = 0; j < s->rps; j++) { for (j = 0; j < s->rps; j++) {
if (is_yuv){ if (is_yuv){
@ -352,14 +358,14 @@ static int encode_frame(AVCodecContext * avctx, unsigned char *buf,
p->data[0] + j * p->linesize[0], bytes_per_row); p->data[0] + j * p->linesize[0], bytes_per_row);
zn += bytes_per_row; zn += bytes_per_row;
} }
n = encode_strip(s, zbuf, ptr, zn, s->compr); ret = encode_strip(s, zbuf, ptr, zn, s->compr);
av_free(zbuf); av_free(zbuf);
if (n<0) { if (ret < 0) {
av_log(s->avctx, AV_LOG_ERROR, "Encode strip failed\n"); av_log(s->avctx, AV_LOG_ERROR, "Encode strip failed\n");
goto fail; goto fail;
} }
ptr += n; ptr += ret;
strip_sizes[0] = ptr - buf - strip_offsets[0]; strip_sizes[0] = ptr - pkt->data - strip_offsets[0];
} else } else
#endif #endif
{ {
@ -371,24 +377,23 @@ static int encode_frame(AVCodecContext * avctx, unsigned char *buf,
ff_lzw_encode_init(s->lzws, ptr, s->buf_size - (*s->buf - s->buf_start), ff_lzw_encode_init(s->lzws, ptr, s->buf_size - (*s->buf - s->buf_start),
12, FF_LZW_TIFF, put_bits); 12, FF_LZW_TIFF, put_bits);
} }
strip_offsets[i / s->rps] = ptr - buf; strip_offsets[i / s->rps] = ptr - pkt->data;
} }
if (is_yuv){ if (is_yuv){
pack_yuv(s, yuv_line, i); pack_yuv(s, yuv_line, i);
n = encode_strip(s, yuv_line, ptr, bytes_per_row, s->compr); ret = encode_strip(s, yuv_line, ptr, bytes_per_row, s->compr);
i += s->subsampling[1] - 1; i += s->subsampling[1] - 1;
} }
else else
n = encode_strip(s, p->data[0] + i * p->linesize[0], ret = encode_strip(s, p->data[0] + i * p->linesize[0],
ptr, bytes_per_row, s->compr); ptr, bytes_per_row, s->compr);
if (n < 0) { if (ret < 0) {
av_log(s->avctx, AV_LOG_ERROR, "Encode strip failed\n"); av_log(s->avctx, AV_LOG_ERROR, "Encode strip failed\n");
goto fail; goto fail;
} }
strip_sizes[i / s->rps] += n; strip_sizes[i / s->rps] += ret;
ptr += n; ptr += ret;
if(s->compr == TIFF_LZW && (i==s->height-1 || i%s->rps == s->rps-1)){ if(s->compr == TIFF_LZW && (i==s->height-1 || i%s->rps == s->rps-1)){
int ret;
ret = ff_lzw_encode_flush(s->lzws, flush_put_bits); ret = ff_lzw_encode_flush(s->lzws, flush_put_bits);
strip_sizes[(i / s->rps )] += ret ; strip_sizes[(i / s->rps )] += ret ;
ptr += ret; ptr += ret;
@ -440,15 +445,19 @@ static int encode_frame(AVCodecContext * avctx, unsigned char *buf,
add_entry(s, TIFF_YCBCR_SUBSAMPLING, TIFF_SHORT, 2, s->subsampling); add_entry(s, TIFF_YCBCR_SUBSAMPLING, TIFF_SHORT, 2, s->subsampling);
add_entry(s, TIFF_REFERENCE_BW, TIFF_RATIONAL, 6, refbw); add_entry(s, TIFF_REFERENCE_BW, TIFF_RATIONAL, 6, refbw);
} }
bytestream_put_le32(&offset, ptr - buf); // write offset to dir bytestream_put_le32(&offset, ptr - pkt->data); // write offset to dir
if (check_size(s, 6 + s->num_entries * 12)) if (check_size(s, 6 + s->num_entries * 12)) {
ret = AVERROR(EINVAL);
goto fail; goto fail;
}
bytestream_put_le16(&ptr, s->num_entries); // write tag count bytestream_put_le16(&ptr, s->num_entries); // write tag count
bytestream_put_buffer(&ptr, s->entries, s->num_entries * 12); bytestream_put_buffer(&ptr, s->entries, s->num_entries * 12);
bytestream_put_le32(&ptr, 0); bytestream_put_le32(&ptr, 0);
ret = ptr - buf; pkt->size = ptr - pkt->data;
pkt->flags |= AV_PKT_FLAG_KEY;
*got_packet = 1;
fail: fail:
av_free(strip_sizes); av_free(strip_sizes);
@ -483,7 +492,7 @@ AVCodec ff_tiff_encoder = {
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.id = CODEC_ID_TIFF, .id = CODEC_ID_TIFF,
.priv_data_size = sizeof(TiffEncoderContext), .priv_data_size = sizeof(TiffEncoderContext),
.encode = encode_frame, .encode2 = encode_frame,
.pix_fmts = .pix_fmts =
(const enum PixelFormat[]) {PIX_FMT_RGB24, PIX_FMT_PAL8, PIX_FMT_GRAY8, (const enum PixelFormat[]) {PIX_FMT_RGB24, PIX_FMT_PAL8, PIX_FMT_GRAY8,
PIX_FMT_MONOBLACK, PIX_FMT_MONOWHITE, PIX_FMT_MONOBLACK, PIX_FMT_MONOWHITE,

View File

@ -135,7 +135,7 @@ static int tm2_build_huff_table(TM2Context *ctx, TM2Codes *code)
huff.val_bits, huff.max_bits); huff.val_bits, huff.max_bits);
return -1; return -1;
} }
if((huff.nodes < 0) || (huff.nodes > 0x10000)) { if((huff.nodes <= 0) || (huff.nodes > 0x10000)) {
av_log(ctx->avctx, AV_LOG_ERROR, "Incorrect number of Huffman tree nodes: %i\n", huff.nodes); av_log(ctx->avctx, AV_LOG_ERROR, "Incorrect number of Huffman tree nodes: %i\n", huff.nodes);
return -1; return -1;
} }

View File

@ -1188,9 +1188,6 @@ int attribute_align_arg avcodec_encode_video(AVCodecContext *avctx, uint8_t *buf
} }
#endif #endif
#define MAX_CODED_FRAME_SIZE(width, height)\
(9*(width)*(height) + FF_MIN_BUFFER_SIZE)
int attribute_align_arg avcodec_encode_video2(AVCodecContext *avctx, int attribute_align_arg avcodec_encode_video2(AVCodecContext *avctx,
AVPacket *avpkt, AVPacket *avpkt,
const AVFrame *frame, const AVFrame *frame,
@ -1209,7 +1206,8 @@ int attribute_align_arg avcodec_encode_video2(AVCodecContext *avctx,
if (av_image_check_size(avctx->width, avctx->height, 0, avctx)) if (av_image_check_size(avctx->width, avctx->height, 0, avctx))
return AVERROR(EINVAL); return AVERROR(EINVAL);
if (avctx->codec->encode2) { av_assert0(avctx->codec->encode2);
*got_packet_ptr = 0; *got_packet_ptr = 0;
ret = avctx->codec->encode2(avctx, avpkt, frame, got_packet_ptr); ret = avctx->codec->encode2(avctx, avpkt, frame, got_packet_ptr);
if (!ret) { if (!ret) {
@ -1218,36 +1216,6 @@ int attribute_align_arg avcodec_encode_video2(AVCodecContext *avctx,
else if (!(avctx->codec->capabilities & CODEC_CAP_DELAY)) else if (!(avctx->codec->capabilities & CODEC_CAP_DELAY))
avpkt->pts = avpkt->dts = frame->pts; avpkt->pts = avpkt->dts = frame->pts;
} }
} else {
/* for compatibility with encoders not supporting encode2(), we need to
allocate a packet buffer if the user has not provided one or check
the size otherwise */
int buf_size = avpkt->size;
if (!user_packet)
buf_size = MAX_CODED_FRAME_SIZE(avctx->width, avctx->height);
if ((ret = ff_alloc_packet(avpkt, buf_size)))
return ret;
/* encode the frame */
ret = avctx->codec->encode(avctx, avpkt->data, avpkt->size, frame);
if (ret >= 0) {
if (!ret) {
/* no output. if the packet data was allocated by libavcodec,
free it */
if (!user_packet)
av_freep(&avpkt->data);
} else if (avctx->coded_frame) {
avpkt->pts = avctx->coded_frame->pts;
avpkt->flags |= AV_PKT_FLAG_KEY*!!avctx->coded_frame->key_frame;
}
avpkt->size = ret;
*got_packet_ptr = (ret > 0);
ret = 0;
}
}
if (!ret) if (!ret)
avctx->frame_number++; avctx->frame_number++;

View File

@ -813,8 +813,8 @@ static int wavpack_decode_block(AVCodecContext *avctx, int block_no,
s->hybrid = s->frame_flags & WV_HYBRID_MODE; s->hybrid = s->frame_flags & WV_HYBRID_MODE;
s->hybrid_bitrate = s->frame_flags & WV_HYBRID_BITRATE; s->hybrid_bitrate = s->frame_flags & WV_HYBRID_BITRATE;
s->post_shift = bpp * 8 - orig_bpp + ((s->frame_flags >> 13) & 0x1f); s->post_shift = bpp * 8 - orig_bpp + ((s->frame_flags >> 13) & 0x1f);
s->hybrid_maxclip = (( 1LL << (orig_bpp - 1)) - 1) >> s->post_shift; s->hybrid_maxclip = (( 1LL << (orig_bpp - 1)) - 1);
s->hybrid_minclip = ((-1LL << (orig_bpp - 1))) >> s->post_shift; s->hybrid_minclip = ((-1LL << (orig_bpp - 1)));
s->CRC = AV_RL32(buf); buf += 4; s->CRC = AV_RL32(buf); buf += 4;
if (wc->mkv_mode) if (wc->mkv_mode)
buf += 4; //skip block size; buf += 4; //skip block size;

View File

@ -258,6 +258,7 @@ OBJS-$(CONFIG_RTP_MUXER) += rtp.o \
rtpenc_latm.o \ rtpenc_latm.o \
rtpenc_amr.o \ rtpenc_amr.o \
rtpenc_h263.o \ rtpenc_h263.o \
rtpenc_h263_rfc2190.o \
rtpenc_mpv.o \ rtpenc_mpv.o \
rtpenc.o \ rtpenc.o \
rtpenc_h264.o \ rtpenc_h264.o \

View File

@ -25,6 +25,7 @@
//#define DEBUG //#define DEBUG
//#define MOV_EXPORT_ALL_METADATA //#define MOV_EXPORT_ALL_METADATA
#include "libavutil/audioconvert.h"
#include "libavutil/intreadwrite.h" #include "libavutil/intreadwrite.h"
#include "libavutil/intfloat.h" #include "libavutil/intfloat.h"
#include "libavutil/mathematics.h" #include "libavutil/mathematics.h"
@ -32,6 +33,7 @@
#include "libavutil/dict.h" #include "libavutil/dict.h"
#include "libavutil/opt.h" #include "libavutil/opt.h"
#include "libavutil/timecode.h" #include "libavutil/timecode.h"
#include "libavcodec/ac3tab.h"
#include "avformat.h" #include "avformat.h"
#include "internal.h" #include "internal.h"
#include "avio_internal.h" #include "avio_internal.h"
@ -570,6 +572,9 @@ static int mov_read_dac3(MOVContext *c, AVIOContext *pb, MOVAtom atom)
acmod = (ac3info >> 11) & 0x7; acmod = (ac3info >> 11) & 0x7;
lfeon = (ac3info >> 10) & 0x1; lfeon = (ac3info >> 10) & 0x1;
st->codec->channels = ((int[]){2,1,2,3,3,4,4,5})[acmod] + lfeon; st->codec->channels = ((int[]){2,1,2,3,3,4,4,5})[acmod] + lfeon;
st->codec->channel_layout = avpriv_ac3_channel_layout_tab[acmod];
if (lfeon)
st->codec->channel_layout |= AV_CH_LOW_FREQUENCY;
st->codec->audio_service_type = bsmod; st->codec->audio_service_type = bsmod;
if (st->codec->channels > 1 && bsmod == 0x7) if (st->codec->channels > 1 && bsmod == 0x7)
st->codec->audio_service_type = AV_AUDIO_SERVICE_TYPE_KARAOKE; st->codec->audio_service_type = AV_AUDIO_SERVICE_TYPE_KARAOKE;

View File

@ -52,7 +52,7 @@ static const AVOption options[] = {
{ "separate_moof", "Write separate moof/mdat atoms for each track", 0, AV_OPT_TYPE_CONST, {.dbl = FF_MOV_FLAG_SEPARATE_MOOF}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "movflags" }, { "separate_moof", "Write separate moof/mdat atoms for each track", 0, AV_OPT_TYPE_CONST, {.dbl = FF_MOV_FLAG_SEPARATE_MOOF}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "movflags" },
{ "frag_custom", "Flush fragments on caller requests", 0, AV_OPT_TYPE_CONST, {.dbl = FF_MOV_FLAG_FRAG_CUSTOM}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "movflags" }, { "frag_custom", "Flush fragments on caller requests", 0, AV_OPT_TYPE_CONST, {.dbl = FF_MOV_FLAG_FRAG_CUSTOM}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "movflags" },
{ "isml", "Create a live smooth streaming feed (for pushing to a publishing point)", 0, AV_OPT_TYPE_CONST, {.dbl = FF_MOV_FLAG_ISML}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "movflags" }, { "isml", "Create a live smooth streaming feed (for pushing to a publishing point)", 0, AV_OPT_TYPE_CONST, {.dbl = FF_MOV_FLAG_ISML}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "movflags" },
FF_RTP_FLAG_OPTS(MOVMuxContext, rtp_flags), FF_RTP_FLAG_OPTS(MOVMuxContext, rtp_flags)
{ "skip_iods", "Skip writing iods atom.", offsetof(MOVMuxContext, iods_skip), AV_OPT_TYPE_INT, {.dbl = 1}, 0, 1, AV_OPT_FLAG_ENCODING_PARAM}, { "skip_iods", "Skip writing iods atom.", offsetof(MOVMuxContext, iods_skip), AV_OPT_TYPE_INT, {.dbl = 1}, 0, 1, AV_OPT_FLAG_ENCODING_PARAM},
{ "iods_audio_profile", "iods audio profile atom.", offsetof(MOVMuxContext, iods_audio_profile), AV_OPT_TYPE_INT, {.dbl = -1}, -1, 255, AV_OPT_FLAG_ENCODING_PARAM}, { "iods_audio_profile", "iods audio profile atom.", offsetof(MOVMuxContext, iods_audio_profile), AV_OPT_TYPE_INT, {.dbl = -1}, -1, 255, AV_OPT_FLAG_ENCODING_PARAM},
{ "iods_video_profile", "iods video profile atom.", offsetof(MOVMuxContext, iods_video_profile), AV_OPT_TYPE_INT, {.dbl = -1}, -1, 255, AV_OPT_FLAG_ENCODING_PARAM}, { "iods_video_profile", "iods video profile atom.", offsetof(MOVMuxContext, iods_video_profile), AV_OPT_TYPE_INT, {.dbl = -1}, -1, 255, AV_OPT_FLAG_ENCODING_PARAM},

View File

@ -106,7 +106,9 @@ int ff_rtp_get_payload_type(AVFormatContext *fmt, AVCodecContext *codec)
/* static payload type */ /* static payload type */
for (i = 0; AVRtpPayloadTypes[i].pt >= 0; ++i) for (i = 0; AVRtpPayloadTypes[i].pt >= 0; ++i)
if (AVRtpPayloadTypes[i].codec_id == codec->codec_id) { if (AVRtpPayloadTypes[i].codec_id == codec->codec_id) {
if (codec->codec_id == CODEC_ID_H263) if (codec->codec_id == CODEC_ID_H263 && (!fmt ||
!fmt->oformat->priv_class ||
!av_opt_flag_is_set(fmt->priv_data, "rtpflags", "rfc2190")))
continue; continue;
if (codec->codec_id == CODEC_ID_PCM_S16BE) if (codec->codec_id == CODEC_ID_PCM_S16BE)
if (codec->channels != AVRtpPayloadTypes[i].audio_channels) if (codec->channels != AVRtpPayloadTypes[i].audio_channels)

View File

@ -31,8 +31,9 @@
//#define DEBUG //#define DEBUG
static const AVOption options[] = { static const AVOption options[] = {
FF_RTP_FLAG_OPTS(RTPMuxContext, flags), FF_RTP_FLAG_OPTS(RTPMuxContext, flags)
{ "payload_type", "Specify RTP payload type", offsetof(RTPMuxContext, payload_type), AV_OPT_TYPE_INT, {.dbl = -1 }, -1, 127, AV_OPT_FLAG_ENCODING_PARAM }, { "payload_type", "Specify RTP payload type", offsetof(RTPMuxContext, payload_type), AV_OPT_TYPE_INT, {.dbl = -1 }, -1, 127, AV_OPT_FLAG_ENCODING_PARAM },
{ "max_packet_size", "Max packet size", offsetof(RTPMuxContext, max_packet_size), AV_OPT_TYPE_INT, {.dbl = 0 }, 0, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM },
{ NULL }, { NULL },
}; };
@ -82,11 +83,13 @@ static int is_supported(enum CodecID id)
static int rtp_write_header(AVFormatContext *s1) static int rtp_write_header(AVFormatContext *s1)
{ {
RTPMuxContext *s = s1->priv_data; RTPMuxContext *s = s1->priv_data;
int max_packet_size, n; int n;
AVStream *st; AVStream *st;
if (s1->nb_streams != 1) if (s1->nb_streams != 1) {
return -1; av_log(s1, AV_LOG_ERROR, "Only one stream supported in the RTP muxer\n");
return AVERROR(EINVAL);
}
st = s1->streams[0]; st = s1->streams[0];
if (!is_supported(st->codec->codec_id)) { if (!is_supported(st->codec->codec_id)) {
av_log(s1, AV_LOG_ERROR, "Unsupported codec %s\n", avcodec_get_name(st->codec->codec_id)); av_log(s1, AV_LOG_ERROR, "Unsupported codec %s\n", avcodec_get_name(st->codec->codec_id));
@ -107,16 +110,21 @@ static int rtp_write_header(AVFormatContext *s1)
s->first_rtcp_ntp_time = (s1->start_time_realtime / 1000) * 1000 + s->first_rtcp_ntp_time = (s1->start_time_realtime / 1000) * 1000 +
NTP_OFFSET_US; NTP_OFFSET_US;
max_packet_size = s1->pb->max_packet_size; if (s->max_packet_size) {
if (max_packet_size <= 12) { if (s1->pb->max_packet_size)
av_log(s1, AV_LOG_ERROR, "Max packet size %d too low\n", max_packet_size); s->max_packet_size = FFMIN(s->max_payload_size,
s1->pb->max_packet_size);
} else
s->max_packet_size = s1->pb->max_packet_size;
if (s->max_packet_size <= 12) {
av_log(s1, AV_LOG_ERROR, "Max packet size %d too low\n", s->max_packet_size);
return AVERROR(EIO); return AVERROR(EIO);
} }
s->buf = av_malloc(max_packet_size); s->buf = av_malloc(s->max_packet_size);
if (s->buf == NULL) { if (s->buf == NULL) {
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
} }
s->max_payload_size = max_packet_size - 12; s->max_payload_size = s->max_packet_size - 12;
s->max_frames_per_packet = 0; s->max_frames_per_packet = 0;
if (s1->max_delay) { if (s1->max_delay) {
@ -386,8 +394,9 @@ static int rtp_write_packet(AVFormatContext *s1, AVPacket *pkt)
rtcp_bytes = ((s->octet_count - s->last_octet_count) * RTCP_TX_RATIO_NUM) / rtcp_bytes = ((s->octet_count - s->last_octet_count) * RTCP_TX_RATIO_NUM) /
RTCP_TX_RATIO_DEN; RTCP_TX_RATIO_DEN;
if (s->first_packet || ((rtcp_bytes >= RTCP_SR_SIZE) && if ((s->first_packet || ((rtcp_bytes >= RTCP_SR_SIZE) &&
(ff_ntp_time() - s->last_rtcp_ntp_time > 5000000))) { (ff_ntp_time() - s->last_rtcp_ntp_time > 5000000))) &&
!(s->flags & FF_RTP_FLAG_SKIP_RTCP)) {
rtcp_send_sr(s1, ff_ntp_time()); rtcp_send_sr(s1, ff_ntp_time());
s->last_octet_count = s->octet_count; s->last_octet_count = s->octet_count;
s->first_packet = 0; s->first_packet = 0;
@ -443,6 +452,11 @@ static int rtp_write_packet(AVFormatContext *s1, AVPacket *pkt)
ff_rtp_send_h264(s1, pkt->data, size); ff_rtp_send_h264(s1, pkt->data, size);
break; break;
case CODEC_ID_H263: case CODEC_ID_H263:
if (s->flags & FF_RTP_FLAG_RFC2190) {
ff_rtp_send_h263_rfc2190(s1, pkt->data, size);
break;
}
/* Fallthrough */
case CODEC_ID_H263P: case CODEC_ID_H263P:
ff_rtp_send_h263(s1, pkt->data, size); ff_rtp_send_h263(s1, pkt->data, size);
break; break;

View File

@ -34,6 +34,7 @@ struct RTPMuxContext {
uint32_t timestamp; uint32_t timestamp;
uint32_t base_timestamp; uint32_t base_timestamp;
uint32_t cur_timestamp; uint32_t cur_timestamp;
int max_packet_size;
int max_payload_size; int max_payload_size;
int num_frames; int num_frames;
@ -64,15 +65,20 @@ struct RTPMuxContext {
typedef struct RTPMuxContext RTPMuxContext; typedef struct RTPMuxContext RTPMuxContext;
#define FF_RTP_FLAG_MP4A_LATM 1 #define FF_RTP_FLAG_MP4A_LATM 1
#define FF_RTP_FLAG_RFC2190 2
#define FF_RTP_FLAG_SKIP_RTCP 4
#define FF_RTP_FLAG_OPTS(ctx, fieldname) \ #define FF_RTP_FLAG_OPTS(ctx, fieldname) \
{ "rtpflags", "RTP muxer flags", offsetof(ctx, fieldname), AV_OPT_TYPE_FLAGS, {.dbl = 0}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "rtpflags" }, \ { "rtpflags", "RTP muxer flags", offsetof(ctx, fieldname), AV_OPT_TYPE_FLAGS, {.dbl = 0}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "rtpflags" }, \
{ "latm", "Use MP4A-LATM packetization instead of MPEG4-GENERIC for AAC", 0, AV_OPT_TYPE_CONST, {.dbl = FF_RTP_FLAG_MP4A_LATM}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "rtpflags" } \ { "latm", "Use MP4A-LATM packetization instead of MPEG4-GENERIC for AAC", 0, AV_OPT_TYPE_CONST, {.dbl = FF_RTP_FLAG_MP4A_LATM}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "rtpflags" }, \
{ "rfc2190", "Use RFC 2190 packetization instead of RFC 4629 for H.263", 0, AV_OPT_TYPE_CONST, {.dbl = FF_RTP_FLAG_RFC2190}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "rtpflags" }, \
{ "skip_rtcp", "Don't send RTCP sender reports", 0, AV_OPT_TYPE_CONST, {.dbl = FF_RTP_FLAG_SKIP_RTCP}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "rtpflags" }, \
void ff_rtp_send_data(AVFormatContext *s1, const uint8_t *buf1, int len, int m); void ff_rtp_send_data(AVFormatContext *s1, const uint8_t *buf1, int len, int m);
void ff_rtp_send_h264(AVFormatContext *s1, const uint8_t *buf1, int size); void ff_rtp_send_h264(AVFormatContext *s1, const uint8_t *buf1, int size);
void ff_rtp_send_h263(AVFormatContext *s1, const uint8_t *buf1, int size); void ff_rtp_send_h263(AVFormatContext *s1, const uint8_t *buf1, int size);
void ff_rtp_send_h263_rfc2190(AVFormatContext *s1, const uint8_t *buf1, int size);
void ff_rtp_send_aac(AVFormatContext *s1, const uint8_t *buff, int size); void ff_rtp_send_aac(AVFormatContext *s1, const uint8_t *buff, int size);
void ff_rtp_send_latm(AVFormatContext *s1, const uint8_t *buff, int size); void ff_rtp_send_latm(AVFormatContext *s1, const uint8_t *buff, int size);
void ff_rtp_send_amr(AVFormatContext *s1, const uint8_t *buff, int size); void ff_rtp_send_amr(AVFormatContext *s1, const uint8_t *buff, int size);
@ -80,4 +86,7 @@ void ff_rtp_send_mpegvideo(AVFormatContext *s1, const uint8_t *buf1, int size);
void ff_rtp_send_xiph(AVFormatContext *s1, const uint8_t *buff, int size); void ff_rtp_send_xiph(AVFormatContext *s1, const uint8_t *buff, int size);
void ff_rtp_send_vp8(AVFormatContext *s1, const uint8_t *buff, int size); void ff_rtp_send_vp8(AVFormatContext *s1, const uint8_t *buff, int size);
const uint8_t *ff_h263_find_resync_marker_reverse(const uint8_t *restrict start,
const uint8_t *restrict end);
#endif /* AVFORMAT_RTPENC_H */ #endif /* AVFORMAT_RTPENC_H */

View File

@ -23,7 +23,7 @@
#include "avformat.h" #include "avformat.h"
#include "rtpenc.h" #include "rtpenc.h"
static const uint8_t *find_resync_marker_reverse(const uint8_t *restrict start, const uint8_t *ff_h263_find_resync_marker_reverse(const uint8_t *restrict start,
const uint8_t *restrict end) const uint8_t *restrict end)
{ {
const uint8_t *p = end - 1; const uint8_t *p = end - 1;
@ -63,7 +63,8 @@ void ff_rtp_send_h263(AVFormatContext *s1, const uint8_t *buf1, int size)
/* Look for a better place to split the frame into packets. */ /* Look for a better place to split the frame into packets. */
if (len < size) { if (len < size) {
const uint8_t *end = find_resync_marker_reverse(buf1, buf1 + len); const uint8_t *end = ff_h263_find_resync_marker_reverse(buf1,
buf1 + len);
len = end - buf1; len = end - buf1;
} }

View File

@ -0,0 +1,104 @@
/*
* RTP packetization for H.263 video
* Copyright (c) 2012 Martin Storsjo
*
* This file is part of Libav.
*
* Libav is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Libav; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "avformat.h"
#include "rtpenc.h"
#include "libavcodec/put_bits.h"
#include "libavcodec/get_bits.h"
struct H263Info {
int src;
int i;
int u;
int s;
int a;
int pb;
int tr;
};
static void send_mode_a(AVFormatContext *s1, const struct H263Info *info,
const uint8_t *buf, int len, int m)
{
RTPMuxContext *s = s1->priv_data;
PutBitContext pb;
init_put_bits(&pb, s->buf, 32);
put_bits(&pb, 1, 0); /* F - 0, mode A */
put_bits(&pb, 1, 0); /* P - 0, normal I/P */
put_bits(&pb, 3, 0); /* SBIT - 0 bits */
put_bits(&pb, 3, 0); /* EBIT - 0 bits */
put_bits(&pb, 3, info->src); /* SRC - source format */
put_bits(&pb, 1, info->i); /* I - inter/intra */
put_bits(&pb, 1, info->u); /* U - unrestricted motion vector */
put_bits(&pb, 1, info->s); /* S - syntax-baesd arithmetic coding */
put_bits(&pb, 1, info->a); /* A - advanced prediction */
put_bits(&pb, 4, 0); /* R - reserved */
put_bits(&pb, 2, 0); /* DBQ - 0 */
put_bits(&pb, 3, 0); /* TRB - 0 */
put_bits(&pb, 8, info->tr); /* TR */
flush_put_bits(&pb);
memcpy(s->buf + 4, buf, len);
ff_rtp_send_data(s1, s->buf, len + 4, m);
}
void ff_rtp_send_h263_rfc2190(AVFormatContext *s1, const uint8_t *buf, int size)
{
RTPMuxContext *s = s1->priv_data;
int len;
GetBitContext gb;
struct H263Info info = { 0 };
s->timestamp = s->cur_timestamp;
init_get_bits(&gb, buf, size*8);
if (get_bits(&gb, 22) == 0x20) { /* Picture Start Code */
info.tr = get_bits(&gb, 8);
skip_bits(&gb, 2); /* PTYPE start, H261 disambiguation */
skip_bits(&gb, 3); /* Split screen, document camera, freeze picture release */
info.src = get_bits(&gb, 3);
info.i = get_bits(&gb, 1);
info.u = get_bits(&gb, 1);
info.s = get_bits(&gb, 1);
info.a = get_bits(&gb, 1);
info.pb = get_bits(&gb, 1);
}
while (size > 0) {
len = FFMIN(s->max_payload_size - 4, size);
/* Look for a better place to split the frame into packets. */
if (len < size) {
const uint8_t *end = ff_h263_find_resync_marker_reverse(buf,
buf + len);
len = end - buf;
if (len == s->max_payload_size - 4)
av_log(s1, AV_LOG_WARNING,
"No GOB boundary found within MTU size, splitting at "
"a random boundary\n");
}
send_mode_a(s1, &info, buf, len, len == size);
buf += len;
size -= len;
}
}

View File

@ -73,7 +73,7 @@
const AVOption ff_rtsp_options[] = { const AVOption ff_rtsp_options[] = {
{ "initial_pause", "Don't start playing the stream immediately", OFFSET(initial_pause), AV_OPT_TYPE_INT, {0}, 0, 1, DEC }, { "initial_pause", "Don't start playing the stream immediately", OFFSET(initial_pause), AV_OPT_TYPE_INT, {0}, 0, 1, DEC },
FF_RTP_FLAG_OPTS(RTSPState, rtp_muxer_flags), FF_RTP_FLAG_OPTS(RTSPState, rtp_muxer_flags)
{ "rtsp_transport", "RTSP transport protocols", OFFSET(lower_transport_mask), AV_OPT_TYPE_FLAGS, {0}, INT_MIN, INT_MAX, DEC|ENC, "rtsp_transport" }, \ { "rtsp_transport", "RTSP transport protocols", OFFSET(lower_transport_mask), AV_OPT_TYPE_FLAGS, {0}, INT_MIN, INT_MAX, DEC|ENC, "rtsp_transport" }, \
{ "udp", "UDP", 0, AV_OPT_TYPE_CONST, {1 << RTSP_LOWER_TRANSPORT_UDP}, 0, 0, DEC|ENC, "rtsp_transport" }, \ { "udp", "UDP", 0, AV_OPT_TYPE_CONST, {1 << RTSP_LOWER_TRANSPORT_UDP}, 0, 0, DEC|ENC, "rtsp_transport" }, \
{ "tcp", "TCP", 0, AV_OPT_TYPE_CONST, {1 << RTSP_LOWER_TRANSPORT_TCP}, 0, 0, DEC|ENC, "rtsp_transport" }, \ { "tcp", "TCP", 0, AV_OPT_TYPE_CONST, {1 << RTSP_LOWER_TRANSPORT_TCP}, 0, 0, DEC|ENC, "rtsp_transport" }, \

View File

@ -404,6 +404,9 @@ static char *sdp_write_media_attributes(char *buff, int size, AVCodecContext *c,
* actually specifies the maximum video size, but we only know * actually specifies the maximum video size, but we only know
* the current size. This is required for playback on Android * the current size. This is required for playback on Android
* stagefright and on Samsung bada. */ * stagefright and on Samsung bada. */
if (!fmt || !fmt->oformat->priv_class ||
!av_opt_flag_is_set(fmt->priv_data, "rtpflags", "rfc2190") ||
c->codec_id == CODEC_ID_H263P)
av_strlcatf(buff, size, "a=rtpmap:%d H263-2000/90000\r\n" av_strlcatf(buff, size, "a=rtpmap:%d H263-2000/90000\r\n"
"a=framesize:%d %d-%d\r\n", "a=framesize:%d %d-%d\r\n",
payload_type, payload_type,

View File

@ -84,7 +84,7 @@ static int swf_read_packet(AVFormatContext *s, AVPacket *pkt)
SWFContext *swf = s->priv_data; SWFContext *swf = s->priv_data;
AVIOContext *pb = s->pb; AVIOContext *pb = s->pb;
AVStream *vst = NULL, *ast = NULL, *st = 0; AVStream *vst = NULL, *ast = NULL, *st = 0;
int tag, len, i, frame, v; int tag, len, i, frame, v, res;
for(;;) { for(;;) {
uint64_t pos = avio_tell(pb); uint64_t pos = avio_tell(pb);
@ -147,7 +147,8 @@ static int swf_read_packet(AVFormatContext *s, AVPacket *pkt)
st = s->streams[i]; st = s->streams[i];
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO && st->id == ch_id) { if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO && st->id == ch_id) {
frame = avio_rl16(pb); frame = avio_rl16(pb);
av_get_packet(pb, pkt, len-2); if ((res = av_get_packet(pb, pkt, len-2)) < 0)
return res;
pkt->pos = pos; pkt->pos = pos;
pkt->pts = frame; pkt->pts = frame;
pkt->stream_index = st->index; pkt->stream_index = st->index;
@ -160,9 +161,11 @@ static int swf_read_packet(AVFormatContext *s, AVPacket *pkt)
if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO && st->id == -1) { if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO && st->id == -1) {
if (st->codec->codec_id == CODEC_ID_MP3) { if (st->codec->codec_id == CODEC_ID_MP3) {
avio_skip(pb, 4); avio_skip(pb, 4);
av_get_packet(pb, pkt, len-4); if ((res = av_get_packet(pb, pkt, len-4)) < 0)
return res;
} else { // ADPCM, PCM } else { // ADPCM, PCM
av_get_packet(pb, pkt, len); if ((res = av_get_packet(pb, pkt, len)) < 0)
return res;
} }
pkt->pos = pos; pkt->pos = pos;
pkt->stream_index = st->index; pkt->stream_index = st->index;
@ -187,7 +190,8 @@ static int swf_read_packet(AVFormatContext *s, AVPacket *pkt)
st = vst; st = vst;
} }
avio_rl16(pb); /* BITMAP_ID */ avio_rl16(pb); /* BITMAP_ID */
av_new_packet(pkt, len-2); if ((res = av_new_packet(pkt, len-2)) < 0)
return res;
avio_read(pb, pkt->data, 4); avio_read(pb, pkt->data, 4);
if (AV_RB32(pkt->data) == 0xffd8ffd9 || if (AV_RB32(pkt->data) == 0xffd8ffd9 ||
AV_RB32(pkt->data) == 0xffd9ffd8) { AV_RB32(pkt->data) == 0xffd9ffd8) {

View File

@ -433,12 +433,14 @@ static int swScale(SwsContext *c, const uint8_t* src[],
}; };
int use_mmx_vfilter= c->use_mmx_vfilter; int use_mmx_vfilter= c->use_mmx_vfilter;
const int firstLumSrcY= vLumFilterPos[dstY]; //First line needed as input const int firstLumSrcY= FFMAX(1 - vLumFilterSize, vLumFilterPos[dstY]); //First line needed as input
const int firstLumSrcY2= vLumFilterPos[FFMIN(dstY | ((1<<c->chrDstVSubSample) - 1), dstH-1)]; const int firstLumSrcY2= FFMAX(1 - vLumFilterSize, vLumFilterPos[FFMIN(dstY | ((1<<c->chrDstVSubSample) - 1), dstH-1)]);
const int firstChrSrcY= vChrFilterPos[chrDstY]; //First line needed as input const int firstChrSrcY= FFMAX(1 - vChrFilterSize, vChrFilterPos[chrDstY]); //First line needed as input
int lastLumSrcY= firstLumSrcY + vLumFilterSize -1; // Last line needed as input
int lastLumSrcY2=firstLumSrcY2+ vLumFilterSize -1; // Last line needed as input // Last line needed as input
int lastChrSrcY= firstChrSrcY + vChrFilterSize -1; // Last line needed as input int lastLumSrcY = FFMIN(c->srcH, firstLumSrcY + vLumFilterSize) - 1;
int lastLumSrcY2 = FFMIN(c->srcH, firstLumSrcY2 + vLumFilterSize) - 1;
int lastChrSrcY = FFMIN(c->chrSrcH, firstChrSrcY + vChrFilterSize) - 1;
int enough_lines; int enough_lines;
//handle holes (FAST_BILINEAR & weird filters) //handle holes (FAST_BILINEAR & weird filters)

View File

@ -275,7 +275,8 @@ static int initFilter(int16_t **outFilter, int16_t **filterPos, int *outFilterSi
if (xInc <= 1<<16) filterSize= 1 + sizeFactor; // upscale if (xInc <= 1<<16) filterSize= 1 + sizeFactor; // upscale
else filterSize= 1 + (sizeFactor*srcW + dstW - 1)/ dstW; else filterSize= 1 + (sizeFactor*srcW + dstW - 1)/ dstW;
filterSize = av_clip(filterSize, 1, srcW - 2); filterSize = FFMIN(filterSize, srcW - 2);
filterSize = FFMAX(filterSize, 1);
FF_ALLOC_OR_GOTO(NULL, filter, dstW*sizeof(*filter)*filterSize, fail); FF_ALLOC_OR_GOTO(NULL, filter, dstW*sizeof(*filter)*filterSize, fail);
@ -841,8 +842,8 @@ int sws_init_context(SwsContext *c, SwsFilter *srcFilter, SwsFilter *dstFilter)
if (!dstFilter) dstFilter= &dummyFilter; if (!dstFilter) dstFilter= &dummyFilter;
if (!srcFilter) srcFilter= &dummyFilter; if (!srcFilter) srcFilter= &dummyFilter;
c->lumXInc= ((srcW<<16) + (dstW>>1))/dstW; c->lumXInc= (((int64_t)srcW<<16) + (dstW>>1))/dstW;
c->lumYInc= ((srcH<<16) + (dstH>>1))/dstH; c->lumYInc= (((int64_t)srcH<<16) + (dstH>>1))/dstH;
c->dstFormatBpp = av_get_bits_per_pixel(&av_pix_fmt_descriptors[dstFormat]); c->dstFormatBpp = av_get_bits_per_pixel(&av_pix_fmt_descriptors[dstFormat]);
c->srcFormatBpp = av_get_bits_per_pixel(&av_pix_fmt_descriptors[srcFormat]); c->srcFormatBpp = av_get_bits_per_pixel(&av_pix_fmt_descriptors[srcFormat]);
c->vRounder= 4* 0x0001000100010001ULL; c->vRounder= 4* 0x0001000100010001ULL;
@ -921,8 +922,8 @@ int sws_init_context(SwsContext *c, SwsFilter *srcFilter, SwsFilter *dstFilter)
else else
c->canMMX2BeUsed=0; c->canMMX2BeUsed=0;
c->chrXInc= ((c->chrSrcW<<16) + (c->chrDstW>>1))/c->chrDstW; c->chrXInc= (((int64_t)c->chrSrcW<<16) + (c->chrDstW>>1))/c->chrDstW;
c->chrYInc= ((c->chrSrcH<<16) + (c->chrDstH>>1))/c->chrDstH; c->chrYInc= (((int64_t)c->chrSrcH<<16) + (c->chrDstH>>1))/c->chrDstH;
// match pixel 0 of the src to pixel 0 of dst and match pixel n-2 of src to pixel n-2 of dst // match pixel 0 of the src to pixel 0 of dst and match pixel n-2 of src to pixel n-2 of dst
// but only for the FAST_BILINEAR mode otherwise do correct scaling // but only for the FAST_BILINEAR mode otherwise do correct scaling
@ -937,8 +938,8 @@ int sws_init_context(SwsContext *c, SwsFilter *srcFilter, SwsFilter *dstFilter)
} }
//we don't use the x86 asm scaler if MMX is available //we don't use the x86 asm scaler if MMX is available
else if (HAVE_MMX && cpu_flags & AV_CPU_FLAG_MMX && c->dstBpc <= 10) { else if (HAVE_MMX && cpu_flags & AV_CPU_FLAG_MMX && c->dstBpc <= 10) {
c->lumXInc = ((srcW-2)<<16)/(dstW-2) - 20; c->lumXInc = ((int64_t)(srcW-2)<<16)/(dstW-2) - 20;
c->chrXInc = ((c->chrSrcW-2)<<16)/(c->chrDstW-2) - 20; c->chrXInc = ((int64_t)(c->chrSrcW-2)<<16)/(c->chrDstW-2) - 20;
} }
} }

View File

@ -118,6 +118,44 @@ void updateMMXDitherTables(SwsContext *c, int dstY, int lumBufIndex, int chrBufI
const int16_t **chrUSrcPtr= (const int16_t **)(void*) chrUPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize; const int16_t **chrUSrcPtr= (const int16_t **)(void*) chrUPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
const int16_t **alpSrcPtr= (CONFIG_SWSCALE_ALPHA && alpPixBuf) ? (const int16_t **)(void*) alpPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize : NULL; const int16_t **alpSrcPtr= (CONFIG_SWSCALE_ALPHA && alpPixBuf) ? (const int16_t **)(void*) alpPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize : NULL;
int i; int i;
if (firstLumSrcY < 0 || firstLumSrcY + vLumFilterSize > c->srcH) {
const int16_t **tmpY = (const int16_t **) lumPixBuf + 2 * vLumBufSize;
int neg = -firstLumSrcY, i, end = FFMIN(c->srcH - firstLumSrcY, vLumFilterSize);
for (i = 0; i < neg; i++)
tmpY[i] = lumSrcPtr[neg];
for ( ; i < end; i++)
tmpY[i] = lumSrcPtr[i];
for ( ; i < vLumFilterSize; i++)
tmpY[i] = tmpY[i-1];
lumSrcPtr = tmpY;
if (alpSrcPtr) {
const int16_t **tmpA = (const int16_t **) alpPixBuf + 2 * vLumBufSize;
for (i = 0; i < neg; i++)
tmpA[i] = alpSrcPtr[neg];
for ( ; i < end; i++)
tmpA[i] = alpSrcPtr[i];
for ( ; i < vLumFilterSize; i++)
tmpA[i] = tmpA[i - 1];
alpSrcPtr = tmpA;
}
}
if (firstChrSrcY < 0 || firstChrSrcY + vChrFilterSize > c->chrSrcH) {
const int16_t **tmpU = (const int16_t **) chrUPixBuf + 2 * vChrBufSize;
int neg = -firstChrSrcY, i, end = FFMIN(c->chrSrcH - firstChrSrcY, vChrFilterSize);
for (i = 0; i < neg; i++) {
tmpU[i] = chrUSrcPtr[neg];
}
for ( ; i < end; i++) {
tmpU[i] = chrUSrcPtr[i];
}
for ( ; i < vChrFilterSize; i++) {
tmpU[i] = tmpU[i - 1];
}
chrUSrcPtr = tmpU;
}
if (flags & SWS_ACCURATE_RND) { if (flags & SWS_ACCURATE_RND) {
int s= APCK_SIZE / 8; int s= APCK_SIZE / 8;
for (i=0; i<vLumFilterSize; i+=2) { for (i=0; i<vLumFilterSize; i+=2) {