mpeg12: cosmetics: reformat as K&R
This commit is contained in:
parent
ae264bb29b
commit
6192b6f3e7
@ -49,19 +49,11 @@
|
|||||||
#define MB_PTYPE_VLC_BITS 6
|
#define MB_PTYPE_VLC_BITS 6
|
||||||
#define MB_BTYPE_VLC_BITS 6
|
#define MB_BTYPE_VLC_BITS 6
|
||||||
|
|
||||||
static inline int mpeg1_decode_block_intra(MpegEncContext *s,
|
static inline int mpeg1_decode_block_intra(MpegEncContext *s, DCTELEM *block, int n);
|
||||||
DCTELEM *block,
|
static inline int mpeg1_decode_block_inter(MpegEncContext *s, DCTELEM *block, int n);
|
||||||
int n);
|
|
||||||
static inline int mpeg1_decode_block_inter(MpegEncContext *s,
|
|
||||||
DCTELEM *block,
|
|
||||||
int n);
|
|
||||||
static inline int mpeg1_fast_decode_block_inter(MpegEncContext *s, DCTELEM *block, int n);
|
static inline int mpeg1_fast_decode_block_inter(MpegEncContext *s, DCTELEM *block, int n);
|
||||||
static inline int mpeg2_decode_block_non_intra(MpegEncContext *s,
|
static inline int mpeg2_decode_block_non_intra(MpegEncContext *s, DCTELEM *block, int n);
|
||||||
DCTELEM *block,
|
static inline int mpeg2_decode_block_intra(MpegEncContext *s, DCTELEM *block, int n);
|
||||||
int n);
|
|
||||||
static inline int mpeg2_decode_block_intra(MpegEncContext *s,
|
|
||||||
DCTELEM *block,
|
|
||||||
int n);
|
|
||||||
static inline int mpeg2_fast_decode_block_non_intra(MpegEncContext *s, DCTELEM *block, int n);
|
static inline int mpeg2_fast_decode_block_non_intra(MpegEncContext *s, DCTELEM *block, int n);
|
||||||
static inline int mpeg2_fast_decode_block_intra(MpegEncContext *s, DCTELEM *block, int n);
|
static inline int mpeg2_fast_decode_block_intra(MpegEncContext *s, DCTELEM *block, int n);
|
||||||
static int mpeg_decode_motion(MpegEncContext *s, int fcode, int pred);
|
static int mpeg_decode_motion(MpegEncContext *s, int fcode, int pred);
|
||||||
@ -127,7 +119,8 @@ void ff_mpeg12_common_init(MpegEncContext *s)
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void ff_mpeg1_clean_buffers(MpegEncContext *s){
|
void ff_mpeg1_clean_buffers(MpegEncContext *s)
|
||||||
|
{
|
||||||
s->last_dc[0] = 1 << (7 + s->intra_dc_precision);
|
s->last_dc[0] = 1 << (7 + s->intra_dc_precision);
|
||||||
s->last_dc[1] = s->last_dc[0];
|
s->last_dc[1] = s->last_dc[0];
|
||||||
s->last_dc[2] = s->last_dc[0];
|
s->last_dc[2] = s->last_dc[0];
|
||||||
@ -208,8 +201,7 @@ static inline int get_qscale(MpegEncContext *s)
|
|||||||
#define MT_16X8 2
|
#define MT_16X8 2
|
||||||
#define MT_DMV 3
|
#define MT_DMV 3
|
||||||
|
|
||||||
static int mpeg_decode_mb(MpegEncContext *s,
|
static int mpeg_decode_mb(MpegEncContext *s, DCTELEM block[12][64])
|
||||||
DCTELEM block[12][64])
|
|
||||||
{
|
{
|
||||||
int i, j, k, cbp, val, mb_type, motion_type;
|
int i, j, k, cbp, val, mb_type, motion_type;
|
||||||
const int mb_block_count = 4 + (1 << s->chroma_format);
|
const int mb_block_count = 4 + (1 << s->chroma_format);
|
||||||
@ -613,9 +605,7 @@ static int mpeg_decode_motion(MpegEncContext *s, int fcode, int pred)
|
|||||||
return val;
|
return val;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int mpeg1_decode_block_intra(MpegEncContext *s,
|
static inline int mpeg1_decode_block_intra(MpegEncContext *s, DCTELEM *block, int n)
|
||||||
DCTELEM *block,
|
|
||||||
int n)
|
|
||||||
{
|
{
|
||||||
int level, dc, diff, i, j, run;
|
int level, dc, diff, i, j, run;
|
||||||
int component;
|
int component;
|
||||||
@ -686,16 +676,12 @@ static inline int mpeg1_decode_block_intra(MpegEncContext *s,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int ff_mpeg1_decode_block_intra(MpegEncContext *s,
|
int ff_mpeg1_decode_block_intra(MpegEncContext *s, DCTELEM *block, int n)
|
||||||
DCTELEM *block,
|
|
||||||
int n)
|
|
||||||
{
|
{
|
||||||
return mpeg1_decode_block_intra(s, block, n);
|
return mpeg1_decode_block_intra(s, block, n);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int mpeg1_decode_block_inter(MpegEncContext *s,
|
static inline int mpeg1_decode_block_inter(MpegEncContext *s, DCTELEM *block, int n)
|
||||||
DCTELEM *block,
|
|
||||||
int n)
|
|
||||||
{
|
{
|
||||||
int level, i, j, run;
|
int level, i, j, run;
|
||||||
RLTable *rl = &ff_rl_mpeg1;
|
RLTable *rl = &ff_rl_mpeg1;
|
||||||
@ -842,9 +828,7 @@ end:
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static inline int mpeg2_decode_block_non_intra(MpegEncContext *s,
|
static inline int mpeg2_decode_block_non_intra(MpegEncContext *s, DCTELEM *block, int n)
|
||||||
DCTELEM *block,
|
|
||||||
int n)
|
|
||||||
{
|
{
|
||||||
int level, i, j, run;
|
int level, i, j, run;
|
||||||
RLTable *rl = &ff_rl_mpeg1;
|
RLTable *rl = &ff_rl_mpeg1;
|
||||||
@ -924,8 +908,7 @@ end:
|
|||||||
}
|
}
|
||||||
|
|
||||||
static inline int mpeg2_fast_decode_block_non_intra(MpegEncContext *s,
|
static inline int mpeg2_fast_decode_block_non_intra(MpegEncContext *s,
|
||||||
DCTELEM *block,
|
DCTELEM *block, int n)
|
||||||
int n)
|
|
||||||
{
|
{
|
||||||
int level, i, j, run;
|
int level, i, j, run;
|
||||||
RLTable *rl = &ff_rl_mpeg1;
|
RLTable *rl = &ff_rl_mpeg1;
|
||||||
@ -986,9 +969,7 @@ end:
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static inline int mpeg2_decode_block_intra(MpegEncContext *s,
|
static inline int mpeg2_decode_block_intra(MpegEncContext *s, DCTELEM *block, int n)
|
||||||
DCTELEM *block,
|
|
||||||
int n)
|
|
||||||
{
|
{
|
||||||
int level, dc, diff, i, j, run;
|
int level, dc, diff, i, j, run;
|
||||||
int component;
|
int component;
|
||||||
@ -1066,9 +1047,7 @@ static inline int mpeg2_decode_block_intra(MpegEncContext *s,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int mpeg2_fast_decode_block_intra(MpegEncContext *s,
|
static inline int mpeg2_fast_decode_block_intra(MpegEncContext *s, DCTELEM *block, int n)
|
||||||
DCTELEM *block,
|
|
||||||
int n)
|
|
||||||
{
|
{
|
||||||
int level, dc, diff, j, run;
|
int level, dc, diff, j, run;
|
||||||
int component;
|
int component;
|
||||||
@ -1202,7 +1181,8 @@ static int mpeg_decode_update_thread_context(AVCodecContext *avctx, const AVCode
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void quant_matrix_rebuild(uint16_t *matrix, const uint8_t *old_perm,
|
static void quant_matrix_rebuild(uint16_t *matrix, const uint8_t *old_perm,
|
||||||
const uint8_t *new_perm){
|
const uint8_t *new_perm)
|
||||||
|
{
|
||||||
uint16_t temp_matrix[64];
|
uint16_t temp_matrix[64];
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
@ -1213,7 +1193,8 @@ static void quant_matrix_rebuild(uint16_t *matrix, const uint8_t *old_perm,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static enum PixelFormat mpeg_get_pixelformat(AVCodecContext *avctx){
|
static enum PixelFormat mpeg_get_pixelformat(AVCodecContext *avctx)
|
||||||
|
{
|
||||||
Mpeg1Context *s1 = avctx->priv_data;
|
Mpeg1Context *s1 = avctx->priv_data;
|
||||||
MpegEncContext *s = &s1->mpeg_enc_ctx;
|
MpegEncContext *s = &s1->mpeg_enc_ctx;
|
||||||
|
|
||||||
@ -1236,13 +1217,13 @@ static enum PixelFormat mpeg_get_pixelformat(AVCodecContext *avctx){
|
|||||||
|
|
||||||
/* Call this function when we know all parameters.
|
/* Call this function when we know all parameters.
|
||||||
* It may be called in different places for MPEG-1 and MPEG-2. */
|
* It may be called in different places for MPEG-1 and MPEG-2. */
|
||||||
static int mpeg_decode_postinit(AVCodecContext *avctx){
|
static int mpeg_decode_postinit(AVCodecContext *avctx)
|
||||||
|
{
|
||||||
Mpeg1Context *s1 = avctx->priv_data;
|
Mpeg1Context *s1 = avctx->priv_data;
|
||||||
MpegEncContext *s = &s1->mpeg_enc_ctx;
|
MpegEncContext *s = &s1->mpeg_enc_ctx;
|
||||||
uint8_t old_permutation[64];
|
uint8_t old_permutation[64];
|
||||||
|
|
||||||
if (
|
if ((s1->mpeg_enc_ctx_allocated == 0) ||
|
||||||
(s1->mpeg_enc_ctx_allocated == 0)||
|
|
||||||
avctx->coded_width != s->width ||
|
avctx->coded_width != s->width ||
|
||||||
avctx->coded_height != s->height ||
|
avctx->coded_height != s->height ||
|
||||||
s1->save_width != s->width ||
|
s1->save_width != s->width ||
|
||||||
@ -1279,13 +1260,11 @@ static int mpeg_decode_postinit(AVCodecContext *avctx){
|
|||||||
avctx->time_base.den = ff_frame_rate_tab[s->frame_rate_index].num;
|
avctx->time_base.den = ff_frame_rate_tab[s->frame_rate_index].num;
|
||||||
avctx->time_base.num = ff_frame_rate_tab[s->frame_rate_index].den;
|
avctx->time_base.num = ff_frame_rate_tab[s->frame_rate_index].den;
|
||||||
//MPEG-1 aspect
|
//MPEG-1 aspect
|
||||||
avctx->sample_aspect_ratio= av_d2q(
|
avctx->sample_aspect_ratio = av_d2q(1.0/ff_mpeg1_aspect[s->aspect_ratio_info], 255);
|
||||||
1.0/ff_mpeg1_aspect[s->aspect_ratio_info], 255);
|
|
||||||
avctx->ticks_per_frame=1;
|
avctx->ticks_per_frame=1;
|
||||||
} else {//MPEG-2
|
} else {//MPEG-2
|
||||||
//MPEG-2 fps
|
//MPEG-2 fps
|
||||||
av_reduce(
|
av_reduce(&s->avctx->time_base.den,
|
||||||
&s->avctx->time_base.den,
|
|
||||||
&s->avctx->time_base.num,
|
&s->avctx->time_base.num,
|
||||||
ff_frame_rate_tab[s->frame_rate_index].num * s1->frame_rate_ext.num*2,
|
ff_frame_rate_tab[s->frame_rate_index].num * s1->frame_rate_ext.num*2,
|
||||||
ff_frame_rate_tab[s->frame_rate_index].den * s1->frame_rate_ext.den,
|
ff_frame_rate_tab[s->frame_rate_index].den * s1->frame_rate_ext.den,
|
||||||
@ -1294,8 +1273,7 @@ static int mpeg_decode_postinit(AVCodecContext *avctx){
|
|||||||
//MPEG-2 aspect
|
//MPEG-2 aspect
|
||||||
if (s->aspect_ratio_info > 1) {
|
if (s->aspect_ratio_info > 1) {
|
||||||
AVRational dar =
|
AVRational dar =
|
||||||
av_mul_q(
|
av_mul_q(av_div_q(ff_mpeg2_aspect[s->aspect_ratio_info],
|
||||||
av_div_q(ff_mpeg2_aspect[s->aspect_ratio_info],
|
|
||||||
(AVRational) {s1->pan_scan.width, s1->pan_scan.height}),
|
(AVRational) {s1->pan_scan.width, s1->pan_scan.height}),
|
||||||
(AVRational) {s->width, s->height});
|
(AVRational) {s->width, s->height});
|
||||||
|
|
||||||
@ -1305,16 +1283,12 @@ static int mpeg_decode_postinit(AVCodecContext *avctx){
|
|||||||
if ((s1->pan_scan.width == 0) || (s1->pan_scan.height == 0) ||
|
if ((s1->pan_scan.width == 0) || (s1->pan_scan.height == 0) ||
|
||||||
(av_cmp_q(dar, (AVRational) {4, 3}) && av_cmp_q(dar, (AVRational) {16, 9}))) {
|
(av_cmp_q(dar, (AVRational) {4, 3}) && av_cmp_q(dar, (AVRational) {16, 9}))) {
|
||||||
s->avctx->sample_aspect_ratio =
|
s->avctx->sample_aspect_ratio =
|
||||||
av_div_q(
|
av_div_q(ff_mpeg2_aspect[s->aspect_ratio_info],
|
||||||
ff_mpeg2_aspect[s->aspect_ratio_info],
|
(AVRational) {s->width, s->height});
|
||||||
(AVRational){s->width, s->height}
|
|
||||||
);
|
|
||||||
} else {
|
} else {
|
||||||
s->avctx->sample_aspect_ratio =
|
s->avctx->sample_aspect_ratio =
|
||||||
av_div_q(
|
av_div_q(ff_mpeg2_aspect[s->aspect_ratio_info],
|
||||||
ff_mpeg2_aspect[s->aspect_ratio_info],
|
(AVRational) {s1->pan_scan.width, s1->pan_scan.height});
|
||||||
(AVRational){s1->pan_scan.width, s1->pan_scan.height}
|
|
||||||
);
|
|
||||||
//issue1613 4/3 16/9 -> 16/9
|
//issue1613 4/3 16/9 -> 16/9
|
||||||
//res_change_ffmpeg_aspect.ts 4/3 225/44 ->4/3
|
//res_change_ffmpeg_aspect.ts 4/3 225/44 ->4/3
|
||||||
//widescreen-issue562.mpg 4/3 16/9 -> 16/9
|
//widescreen-issue562.mpg 4/3 16/9 -> 16/9
|
||||||
@ -1417,7 +1391,8 @@ static void mpeg_decode_sequence_extension(Mpeg1Context *s1)
|
|||||||
s->avctx->rc_buffer_size += get_bits(&s->gb, 8) * 1024 * 16 << 10;
|
s->avctx->rc_buffer_size += get_bits(&s->gb, 8) * 1024 * 16 << 10;
|
||||||
|
|
||||||
s->low_delay = get_bits1(&s->gb);
|
s->low_delay = get_bits1(&s->gb);
|
||||||
if(s->flags & CODEC_FLAG_LOW_DELAY) s->low_delay=1;
|
if (s->flags & CODEC_FLAG_LOW_DELAY)
|
||||||
|
s->low_delay = 1;
|
||||||
|
|
||||||
s1->frame_rate_ext.num = get_bits(&s->gb, 2) + 1;
|
s1->frame_rate_ext.num = get_bits(&s->gb, 2) + 1;
|
||||||
s1->frame_rate_ext.den = get_bits(&s->gb, 5) + 1;
|
s1->frame_rate_ext.den = get_bits(&s->gb, 5) + 1;
|
||||||
@ -1486,11 +1461,11 @@ static void mpeg_decode_picture_display_extension(Mpeg1Context *s1)
|
|||||||
av_log(s->avctx, AV_LOG_DEBUG, "pde (%d,%d) (%d,%d) (%d,%d)\n",
|
av_log(s->avctx, AV_LOG_DEBUG, "pde (%d,%d) (%d,%d) (%d,%d)\n",
|
||||||
s1->pan_scan.position[0][0], s1->pan_scan.position[0][1],
|
s1->pan_scan.position[0][0], s1->pan_scan.position[0][1],
|
||||||
s1->pan_scan.position[1][0], s1->pan_scan.position[1][1],
|
s1->pan_scan.position[1][0], s1->pan_scan.position[1][1],
|
||||||
s1->pan_scan.position[2][0], s1->pan_scan.position[2][1]
|
s1->pan_scan.position[2][0], s1->pan_scan.position[2][1]);
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int load_matrix(MpegEncContext *s, uint16_t matrix0[64], uint16_t matrix1[64], int intra){
|
static int load_matrix(MpegEncContext *s, uint16_t matrix0[64], uint16_t matrix1[64], int intra)
|
||||||
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < 64; i++) {
|
for (i = 0; i < 64; i++) {
|
||||||
@ -1598,7 +1573,8 @@ static void mpeg_decode_picture_coding_extension(Mpeg1Context *s1)
|
|||||||
av_dlog(s->avctx, "progressive_frame=%d\n", s->progressive_frame);
|
av_dlog(s->avctx, "progressive_frame=%d\n", s->progressive_frame);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void exchange_uv(MpegEncContext *s){
|
static void exchange_uv(MpegEncContext *s)
|
||||||
|
{
|
||||||
DCTELEM (*tmp)[64];
|
DCTELEM (*tmp)[64];
|
||||||
|
|
||||||
tmp = s->pblocks[4];
|
tmp = s->pblocks[4];
|
||||||
@ -1606,7 +1582,8 @@ static void exchange_uv(MpegEncContext *s){
|
|||||||
s->pblocks[5] = tmp;
|
s->pblocks[5] = tmp;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size){
|
static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size)
|
||||||
|
{
|
||||||
AVCodecContext *avctx = s->avctx;
|
AVCodecContext *avctx = s->avctx;
|
||||||
Mpeg1Context *s1 = (Mpeg1Context*)s;
|
Mpeg1Context *s1 = (Mpeg1Context*)s;
|
||||||
|
|
||||||
@ -1677,8 +1654,8 @@ static int mpeg_decode_slice(Mpeg1Context *s1, int mb_y,
|
|||||||
{
|
{
|
||||||
MpegEncContext *s = &s1->mpeg_enc_ctx;
|
MpegEncContext *s = &s1->mpeg_enc_ctx;
|
||||||
AVCodecContext *avctx = s->avctx;
|
AVCodecContext *avctx = s->avctx;
|
||||||
const int field_pic= s->picture_structure != PICT_FRAME;
|
|
||||||
const int lowres = s->avctx->lowres;
|
const int lowres = s->avctx->lowres;
|
||||||
|
const int field_pic = s->picture_structure != PICT_FRAME;
|
||||||
|
|
||||||
s->resync_mb_x =
|
s->resync_mb_x =
|
||||||
s->resync_mb_y = -1;
|
s->resync_mb_y = -1;
|
||||||
@ -1894,7 +1871,8 @@ eos: // end of slice
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int slice_decode_thread(AVCodecContext *c, void *arg){
|
static int slice_decode_thread(AVCodecContext *c, void *arg)
|
||||||
|
{
|
||||||
MpegEncContext *s = *(void**)arg;
|
MpegEncContext *s = *(void**)arg;
|
||||||
const uint8_t *buf = s->gb.buffer;
|
const uint8_t *buf = s->gb.buffer;
|
||||||
int mb_y = s->start_mb_y;
|
int mb_y = s->start_mb_y;
|
||||||
@ -2051,7 +2029,8 @@ static int mpeg1_decode_sequence(AVCodecContext *avctx,
|
|||||||
avctx->sub_id = 1; /* indicates MPEG-1 */
|
avctx->sub_id = 1; /* indicates MPEG-1 */
|
||||||
s->out_format = FMT_MPEG1;
|
s->out_format = FMT_MPEG1;
|
||||||
s->swap_uv = 0; // AFAIK VCR2 does not have SEQ_HEADER
|
s->swap_uv = 0; // AFAIK VCR2 does not have SEQ_HEADER
|
||||||
if(s->flags & CODEC_FLAG_LOW_DELAY) s->low_delay=1;
|
if (s->flags & CODEC_FLAG_LOW_DELAY)
|
||||||
|
s->low_delay = 1;
|
||||||
|
|
||||||
if (s->avctx->debug & FF_DEBUG_PICT_INFO)
|
if (s->avctx->debug & FF_DEBUG_PICT_INFO)
|
||||||
av_log(s->avctx, AV_LOG_DEBUG, "vbv buffer: %d, bitrate:%d\n",
|
av_log(s->avctx, AV_LOG_DEBUG, "vbv buffer: %d, bitrate:%d\n",
|
||||||
@ -2138,7 +2117,8 @@ static void mpeg_decode_user_data(AVCodecContext *avctx,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void mpeg_decode_gop(AVCodecContext *avctx,
|
static void mpeg_decode_gop(AVCodecContext *avctx,
|
||||||
const uint8_t *buf, int buf_size){
|
const uint8_t *buf, int buf_size)
|
||||||
|
{
|
||||||
Mpeg1Context *s1 = avctx->priv_data;
|
Mpeg1Context *s1 = avctx->priv_data;
|
||||||
MpegEncContext *s = &s1->mpeg_enc_ctx;
|
MpegEncContext *s = &s1->mpeg_enc_ctx;
|
||||||
|
|
||||||
@ -2194,8 +2174,10 @@ int ff_mpeg1_find_frame_end(ParseContext *pc, const uint8_t *buf, int buf_size,
|
|||||||
if (state == EXT_START_CODE && (buf[i] & 0xF0) != 0x80)
|
if (state == EXT_START_CODE && (buf[i] & 0xF0) != 0x80)
|
||||||
pc->frame_start_found--;
|
pc->frame_start_found--;
|
||||||
else if (state == EXT_START_CODE + 2) {
|
else if (state == EXT_START_CODE + 2) {
|
||||||
if((buf[i]&3) == 3) pc->frame_start_found= 0;
|
if ((buf[i] & 3) == 3)
|
||||||
else pc->frame_start_found= (pc->frame_start_found+1)&3;
|
pc->frame_start_found = 0;
|
||||||
|
else
|
||||||
|
pc->frame_start_found = (pc->frame_start_found + 1) & 3;
|
||||||
}
|
}
|
||||||
state++;
|
state++;
|
||||||
} else {
|
} else {
|
||||||
@ -2323,8 +2305,7 @@ static int decode_chunks(AVCodecContext *avctx,
|
|||||||
switch (start_code) {
|
switch (start_code) {
|
||||||
case SEQ_START_CODE:
|
case SEQ_START_CODE:
|
||||||
if (last_code == 0) {
|
if (last_code == 0) {
|
||||||
mpeg1_decode_sequence(avctx, buf_ptr,
|
mpeg1_decode_sequence(avctx, buf_ptr, input_size);
|
||||||
input_size);
|
|
||||||
s->sync=1;
|
s->sync=1;
|
||||||
} else {
|
} else {
|
||||||
av_log(avctx, AV_LOG_ERROR, "ignoring SEQ_START_CODE after %X\n", last_code);
|
av_log(avctx, AV_LOG_ERROR, "ignoring SEQ_START_CODE after %X\n", last_code);
|
||||||
@ -2396,8 +2377,7 @@ static int decode_chunks(AVCodecContext *avctx,
|
|||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case USER_START_CODE:
|
case USER_START_CODE:
|
||||||
mpeg_decode_user_data(avctx,
|
mpeg_decode_user_data(avctx, buf_ptr, input_size);
|
||||||
buf_ptr, input_size);
|
|
||||||
break;
|
break;
|
||||||
case GOP_START_CODE:
|
case GOP_START_CODE:
|
||||||
if (last_code == 0) {
|
if (last_code == 0) {
|
||||||
@ -2438,12 +2418,13 @@ static int decode_chunks(AVCodecContext *avctx,
|
|||||||
/* Skip P-frames if we do not have a reference frame or we have an invalid header. */
|
/* Skip P-frames if we do not have a reference frame or we have an invalid header. */
|
||||||
if (s2->pict_type == AV_PICTURE_TYPE_P && !s->sync) break;
|
if (s2->pict_type == AV_PICTURE_TYPE_P && !s->sync) break;
|
||||||
}
|
}
|
||||||
if( (avctx->skip_frame >= AVDISCARD_NONREF && s2->pict_type==AV_PICTURE_TYPE_B)
|
if ((avctx->skip_frame >= AVDISCARD_NONREF && s2->pict_type == AV_PICTURE_TYPE_B) ||
|
||||||
||(avctx->skip_frame >= AVDISCARD_NONKEY && s2->pict_type!=AV_PICTURE_TYPE_I)
|
(avctx->skip_frame >= AVDISCARD_NONKEY && s2->pict_type != AV_PICTURE_TYPE_I) ||
|
||||||
|| avctx->skip_frame >= AVDISCARD_ALL)
|
avctx->skip_frame >= AVDISCARD_ALL)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
if (!s->mpeg_enc_ctx_allocated) break;
|
if (!s->mpeg_enc_ctx_allocated)
|
||||||
|
break;
|
||||||
|
|
||||||
if (s2->codec_id == CODEC_ID_MPEG2VIDEO) {
|
if (s2->codec_id == CODEC_ID_MPEG2VIDEO) {
|
||||||
if (mb_y < avctx->skip_top || mb_y >= s2->mb_height - avctx->skip_bottom)
|
if (mb_y < avctx->skip_top || mb_y >= s2->mb_height - avctx->skip_bottom)
|
||||||
@ -2506,7 +2487,8 @@ static int decode_chunks(AVCodecContext *avctx,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void flush(AVCodecContext *avctx){
|
static void flush(AVCodecContext *avctx)
|
||||||
|
{
|
||||||
Mpeg1Context *s = avctx->priv_data;
|
Mpeg1Context *s = avctx->priv_data;
|
||||||
|
|
||||||
s->sync=0;
|
s->sync=0;
|
||||||
@ -2582,7 +2564,8 @@ AVCodec ff_mpegvideo_decoder = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
#if CONFIG_MPEG_XVMC_DECODER
|
#if CONFIG_MPEG_XVMC_DECODER
|
||||||
static av_cold int mpeg_mc_decode_init(AVCodecContext *avctx){
|
static av_cold int mpeg_mc_decode_init(AVCodecContext *avctx)
|
||||||
|
{
|
||||||
if (avctx->active_thread_type & FF_THREAD_SLICE)
|
if (avctx->active_thread_type & FF_THREAD_SLICE)
|
||||||
return -1;
|
return -1;
|
||||||
if (!(avctx->slice_flags & SLICE_FLAG_CODED_ORDER))
|
if (!(avctx->slice_flags & SLICE_FLAG_CODED_ORDER))
|
||||||
|
Loading…
x
Reference in New Issue
Block a user