avcodec/mpegvideo: Shorten variable names
current_picture->cur_pic, last_picture->last_pic, similarly for new_picture and next_picture. Also rename the corresponding *_ptr fields. Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@outlook.com>
This commit is contained in:
parent
3a4e7694a1
commit
ec1eba792a
@ -44,7 +44,7 @@ static int d3d12va_mpeg2_start_frame(AVCodecContext *avctx, av_unused const uint
|
||||
{
|
||||
const MpegEncContext *s = avctx->priv_data;
|
||||
D3D12VADecodeContext *ctx = D3D12VA_DECODE_CONTEXT(avctx);
|
||||
D3D12DecodePictureContext *ctx_pic = s->current_picture_ptr->hwaccel_picture_private;
|
||||
D3D12DecodePictureContext *ctx_pic = s->cur_pic_ptr->hwaccel_picture_private;
|
||||
|
||||
if (!ctx)
|
||||
return -1;
|
||||
@ -69,7 +69,7 @@ static int d3d12va_mpeg2_start_frame(AVCodecContext *avctx, av_unused const uint
|
||||
static int d3d12va_mpeg2_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
|
||||
{
|
||||
const MpegEncContext *s = avctx->priv_data;
|
||||
D3D12DecodePictureContext *ctx_pic = s->current_picture_ptr->hwaccel_picture_private;
|
||||
D3D12DecodePictureContext *ctx_pic = s->cur_pic_ptr->hwaccel_picture_private;
|
||||
|
||||
if (ctx_pic->slice_count >= MAX_SLICES) {
|
||||
return AVERROR(ERANGE);
|
||||
@ -88,7 +88,7 @@ static int d3d12va_mpeg2_decode_slice(AVCodecContext *avctx, const uint8_t *buff
|
||||
static int update_input_arguments(AVCodecContext *avctx, D3D12_VIDEO_DECODE_INPUT_STREAM_ARGUMENTS *input_args, ID3D12Resource *buffer)
|
||||
{
|
||||
const MpegEncContext *s = avctx->priv_data;
|
||||
D3D12DecodePictureContext *ctx_pic = s->current_picture_ptr->hwaccel_picture_private;
|
||||
D3D12DecodePictureContext *ctx_pic = s->cur_pic_ptr->hwaccel_picture_private;
|
||||
|
||||
const int is_field = s->picture_structure != PICT_FRAME;
|
||||
const unsigned mb_count = s->mb_width * (s->mb_height >> is_field);
|
||||
@ -137,12 +137,12 @@ static int d3d12va_mpeg2_end_frame(AVCodecContext *avctx)
|
||||
{
|
||||
int ret;
|
||||
MpegEncContext *s = avctx->priv_data;
|
||||
D3D12DecodePictureContext *ctx_pic = s->current_picture_ptr->hwaccel_picture_private;
|
||||
D3D12DecodePictureContext *ctx_pic = s->cur_pic_ptr->hwaccel_picture_private;
|
||||
|
||||
if (ctx_pic->slice_count <= 0 || ctx_pic->bitstream_size <= 0)
|
||||
return -1;
|
||||
|
||||
ret = ff_d3d12va_common_end_frame(avctx, s->current_picture_ptr->f, &ctx_pic->pp, sizeof(ctx_pic->pp),
|
||||
ret = ff_d3d12va_common_end_frame(avctx, s->cur_pic_ptr->f, &ctx_pic->pp, sizeof(ctx_pic->pp),
|
||||
&ctx_pic->qm, sizeof(ctx_pic->qm), update_input_arguments);
|
||||
if (!ret)
|
||||
ff_mpeg_draw_horiz_band(s, 0, avctx->height);
|
||||
|
@ -45,7 +45,7 @@ static int d3d12va_vc1_start_frame(AVCodecContext *avctx, av_unused const uint8_
|
||||
{
|
||||
const VC1Context *v = avctx->priv_data;
|
||||
D3D12VADecodeContext *ctx = D3D12VA_DECODE_CONTEXT(avctx);
|
||||
D3D12DecodePictureContext *ctx_pic = v->s.current_picture_ptr->hwaccel_picture_private;
|
||||
D3D12DecodePictureContext *ctx_pic = v->s.cur_pic_ptr->hwaccel_picture_private;
|
||||
|
||||
if (!ctx)
|
||||
return -1;
|
||||
@ -67,7 +67,7 @@ static int d3d12va_vc1_start_frame(AVCodecContext *avctx, av_unused const uint8_
|
||||
static int d3d12va_vc1_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
|
||||
{
|
||||
const VC1Context *v = avctx->priv_data;
|
||||
D3D12DecodePictureContext *ctx_pic = v->s.current_picture_ptr->hwaccel_picture_private;
|
||||
D3D12DecodePictureContext *ctx_pic = v->s.cur_pic_ptr->hwaccel_picture_private;
|
||||
|
||||
if (ctx_pic->slice_count >= MAX_SLICES) {
|
||||
return AVERROR(ERANGE);
|
||||
@ -93,7 +93,7 @@ static int update_input_arguments(AVCodecContext *avctx, D3D12_VIDEO_DECODE_INPU
|
||||
{
|
||||
const VC1Context *v = avctx->priv_data;
|
||||
const MpegEncContext *s = &v->s;
|
||||
D3D12DecodePictureContext *ctx_pic = s->current_picture_ptr->hwaccel_picture_private;
|
||||
D3D12DecodePictureContext *ctx_pic = s->cur_pic_ptr->hwaccel_picture_private;
|
||||
D3D12_VIDEO_DECODE_FRAME_ARGUMENT *args = &input_args->FrameArguments[input_args->NumFrameArguments++];
|
||||
|
||||
const unsigned mb_count = s->mb_width * (s->mb_height >> v->field_mode);
|
||||
@ -151,12 +151,12 @@ static int update_input_arguments(AVCodecContext *avctx, D3D12_VIDEO_DECODE_INPU
|
||||
static int d3d12va_vc1_end_frame(AVCodecContext *avctx)
|
||||
{
|
||||
const VC1Context *v = avctx->priv_data;
|
||||
D3D12DecodePictureContext *ctx_pic = v->s.current_picture_ptr->hwaccel_picture_private;
|
||||
D3D12DecodePictureContext *ctx_pic = v->s.cur_pic_ptr->hwaccel_picture_private;
|
||||
|
||||
if (ctx_pic->slice_count <= 0 || ctx_pic->bitstream_size <= 0)
|
||||
return -1;
|
||||
|
||||
return ff_d3d12va_common_end_frame(avctx, v->s.current_picture_ptr->f,
|
||||
return ff_d3d12va_common_end_frame(avctx, v->s.cur_pic_ptr->f,
|
||||
&ctx_pic->pp, sizeof(ctx_pic->pp),
|
||||
NULL, 0,
|
||||
update_input_arguments);
|
||||
|
@ -45,17 +45,17 @@ void ff_dxva2_mpeg2_fill_picture_parameters(AVCodecContext *avctx,
|
||||
DXVA_PictureParameters *pp)
|
||||
{
|
||||
const struct MpegEncContext *s = avctx->priv_data;
|
||||
const Picture *current_picture = s->current_picture_ptr;
|
||||
const Picture *current_picture = s->cur_pic_ptr;
|
||||
int is_field = s->picture_structure != PICT_FRAME;
|
||||
|
||||
memset(pp, 0, sizeof(*pp));
|
||||
pp->wDeblockedPictureIndex = 0;
|
||||
if (s->pict_type != AV_PICTURE_TYPE_I)
|
||||
pp->wForwardRefPictureIndex = ff_dxva2_get_surface_index(avctx, ctx, s->last_picture.f, 0);
|
||||
pp->wForwardRefPictureIndex = ff_dxva2_get_surface_index(avctx, ctx, s->last_pic.f, 0);
|
||||
else
|
||||
pp->wForwardRefPictureIndex = 0xffff;
|
||||
if (s->pict_type == AV_PICTURE_TYPE_B)
|
||||
pp->wBackwardRefPictureIndex = ff_dxva2_get_surface_index(avctx, ctx, s->next_picture.f, 0);
|
||||
pp->wBackwardRefPictureIndex = ff_dxva2_get_surface_index(avctx, ctx, s->next_pic.f, 0);
|
||||
else
|
||||
pp->wBackwardRefPictureIndex = 0xffff;
|
||||
pp->wDecodedPictureIndex = ff_dxva2_get_surface_index(avctx, ctx, current_picture->f, 1);
|
||||
@ -157,7 +157,7 @@ static int commit_bitstream_and_slice_buffer(AVCodecContext *avctx,
|
||||
const struct MpegEncContext *s = avctx->priv_data;
|
||||
AVDXVAContext *ctx = DXVA_CONTEXT(avctx);
|
||||
struct dxva2_picture_context *ctx_pic =
|
||||
s->current_picture_ptr->hwaccel_picture_private;
|
||||
s->cur_pic_ptr->hwaccel_picture_private;
|
||||
const int is_field = s->picture_structure != PICT_FRAME;
|
||||
const unsigned mb_count = s->mb_width * (s->mb_height >> is_field);
|
||||
void *dxva_data_ptr;
|
||||
@ -260,7 +260,7 @@ static int dxva2_mpeg2_start_frame(AVCodecContext *avctx,
|
||||
const struct MpegEncContext *s = avctx->priv_data;
|
||||
AVDXVAContext *ctx = DXVA_CONTEXT(avctx);
|
||||
struct dxva2_picture_context *ctx_pic =
|
||||
s->current_picture_ptr->hwaccel_picture_private;
|
||||
s->cur_pic_ptr->hwaccel_picture_private;
|
||||
|
||||
if (!DXVA_CONTEXT_VALID(avctx, ctx))
|
||||
return -1;
|
||||
@ -280,7 +280,7 @@ static int dxva2_mpeg2_decode_slice(AVCodecContext *avctx,
|
||||
{
|
||||
const struct MpegEncContext *s = avctx->priv_data;
|
||||
struct dxva2_picture_context *ctx_pic =
|
||||
s->current_picture_ptr->hwaccel_picture_private;
|
||||
s->cur_pic_ptr->hwaccel_picture_private;
|
||||
unsigned position;
|
||||
|
||||
if (ctx_pic->slice_count >= MAX_SLICES) {
|
||||
@ -302,12 +302,12 @@ static int dxva2_mpeg2_end_frame(AVCodecContext *avctx)
|
||||
{
|
||||
struct MpegEncContext *s = avctx->priv_data;
|
||||
struct dxva2_picture_context *ctx_pic =
|
||||
s->current_picture_ptr->hwaccel_picture_private;
|
||||
s->cur_pic_ptr->hwaccel_picture_private;
|
||||
int ret;
|
||||
|
||||
if (ctx_pic->slice_count <= 0 || ctx_pic->bitstream_size <= 0)
|
||||
return -1;
|
||||
ret = ff_dxva2_common_end_frame(avctx, s->current_picture_ptr->f,
|
||||
ret = ff_dxva2_common_end_frame(avctx, s->cur_pic_ptr->f,
|
||||
&ctx_pic->pp, sizeof(ctx_pic->pp),
|
||||
&ctx_pic->qm, sizeof(ctx_pic->qm),
|
||||
commit_bitstream_and_slice_buffer);
|
||||
|
@ -46,7 +46,7 @@ void ff_dxva2_vc1_fill_picture_parameters(AVCodecContext *avctx,
|
||||
{
|
||||
const VC1Context *v = avctx->priv_data;
|
||||
const MpegEncContext *s = &v->s;
|
||||
const Picture *current_picture = s->current_picture_ptr;
|
||||
const Picture *current_picture = s->cur_pic_ptr;
|
||||
int intcomp = 0;
|
||||
|
||||
// determine if intensity compensation is needed
|
||||
@ -59,11 +59,11 @@ void ff_dxva2_vc1_fill_picture_parameters(AVCodecContext *avctx,
|
||||
|
||||
memset(pp, 0, sizeof(*pp));
|
||||
if (s->pict_type != AV_PICTURE_TYPE_I && !v->bi_type)
|
||||
pp->wForwardRefPictureIndex = ff_dxva2_get_surface_index(avctx, ctx, s->last_picture.f, 0);
|
||||
pp->wForwardRefPictureIndex = ff_dxva2_get_surface_index(avctx, ctx, s->last_pic.f, 0);
|
||||
else
|
||||
pp->wForwardRefPictureIndex = 0xffff;
|
||||
if (s->pict_type == AV_PICTURE_TYPE_B && !v->bi_type)
|
||||
pp->wBackwardRefPictureIndex = ff_dxva2_get_surface_index(avctx, ctx, s->next_picture.f, 0);
|
||||
pp->wBackwardRefPictureIndex = ff_dxva2_get_surface_index(avctx, ctx, s->next_pic.f, 0);
|
||||
else
|
||||
pp->wBackwardRefPictureIndex = 0xffff;
|
||||
pp->wDecodedPictureIndex =
|
||||
@ -191,7 +191,7 @@ static int commit_bitstream_and_slice_buffer(AVCodecContext *avctx,
|
||||
const VC1Context *v = avctx->priv_data;
|
||||
AVDXVAContext *ctx = DXVA_CONTEXT(avctx);
|
||||
const MpegEncContext *s = &v->s;
|
||||
struct dxva2_picture_context *ctx_pic = s->current_picture_ptr->hwaccel_picture_private;
|
||||
struct dxva2_picture_context *ctx_pic = s->cur_pic_ptr->hwaccel_picture_private;
|
||||
|
||||
static const uint8_t start_code[] = { 0, 0, 1, 0x0d };
|
||||
const unsigned start_code_size = avctx->codec_id == AV_CODEC_ID_VC1 ? sizeof(start_code) : 0;
|
||||
@ -317,7 +317,7 @@ static int dxva2_vc1_start_frame(AVCodecContext *avctx,
|
||||
{
|
||||
const VC1Context *v = avctx->priv_data;
|
||||
AVDXVAContext *ctx = DXVA_CONTEXT(avctx);
|
||||
struct dxva2_picture_context *ctx_pic = v->s.current_picture_ptr->hwaccel_picture_private;
|
||||
struct dxva2_picture_context *ctx_pic = v->s.cur_pic_ptr->hwaccel_picture_private;
|
||||
|
||||
if (!DXVA_CONTEXT_VALID(avctx, ctx))
|
||||
return -1;
|
||||
@ -336,7 +336,7 @@ static int dxva2_vc1_decode_slice(AVCodecContext *avctx,
|
||||
uint32_t size)
|
||||
{
|
||||
const VC1Context *v = avctx->priv_data;
|
||||
const Picture *current_picture = v->s.current_picture_ptr;
|
||||
const Picture *current_picture = v->s.cur_pic_ptr;
|
||||
struct dxva2_picture_context *ctx_pic = current_picture->hwaccel_picture_private;
|
||||
unsigned position;
|
||||
|
||||
@ -364,13 +364,13 @@ static int dxva2_vc1_decode_slice(AVCodecContext *avctx,
|
||||
static int dxva2_vc1_end_frame(AVCodecContext *avctx)
|
||||
{
|
||||
VC1Context *v = avctx->priv_data;
|
||||
struct dxva2_picture_context *ctx_pic = v->s.current_picture_ptr->hwaccel_picture_private;
|
||||
struct dxva2_picture_context *ctx_pic = v->s.cur_pic_ptr->hwaccel_picture_private;
|
||||
int ret;
|
||||
|
||||
if (ctx_pic->slice_count <= 0 || ctx_pic->bitstream_size <= 0)
|
||||
return -1;
|
||||
|
||||
ret = ff_dxva2_common_end_frame(avctx, v->s.current_picture_ptr->f,
|
||||
ret = ff_dxva2_common_end_frame(avctx, v->s.cur_pic_ptr->f,
|
||||
&ctx_pic->pp, sizeof(ctx_pic->pp),
|
||||
NULL, 0,
|
||||
commit_bitstream_and_slice_buffer);
|
||||
|
@ -228,17 +228,17 @@ static int h261_decode_mb_skipped(H261DecContext *h, int mba1, int mba2)
|
||||
|
||||
s->mv_dir = MV_DIR_FORWARD;
|
||||
s->mv_type = MV_TYPE_16X16;
|
||||
s->current_picture.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
|
||||
s->cur_pic.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
|
||||
s->mv[0][0][0] = 0;
|
||||
s->mv[0][0][1] = 0;
|
||||
s->mb_skipped = 1;
|
||||
h->common.mtype &= ~MB_TYPE_H261_FIL;
|
||||
|
||||
if (s->current_picture.motion_val[0]) {
|
||||
if (s->cur_pic.motion_val[0]) {
|
||||
int b_stride = 2*s->mb_width + 1;
|
||||
int b_xy = 2 * s->mb_x + (2 * s->mb_y) * b_stride;
|
||||
s->current_picture.motion_val[0][b_xy][0] = s->mv[0][0][0];
|
||||
s->current_picture.motion_val[0][b_xy][1] = s->mv[0][0][1];
|
||||
s->cur_pic.motion_val[0][b_xy][0] = s->mv[0][0][0];
|
||||
s->cur_pic.motion_val[0][b_xy][1] = s->mv[0][0][1];
|
||||
}
|
||||
|
||||
ff_mpv_reconstruct_mb(s, s->block);
|
||||
@ -452,22 +452,22 @@ static int h261_decode_mb(H261DecContext *h)
|
||||
cbp = get_vlc2(&s->gb, h261_cbp_vlc, H261_CBP_VLC_BITS, 1) + 1;
|
||||
|
||||
if (s->mb_intra) {
|
||||
s->current_picture.mb_type[xy] = MB_TYPE_INTRA;
|
||||
s->cur_pic.mb_type[xy] = MB_TYPE_INTRA;
|
||||
goto intra;
|
||||
}
|
||||
|
||||
//set motion vectors
|
||||
s->mv_dir = MV_DIR_FORWARD;
|
||||
s->mv_type = MV_TYPE_16X16;
|
||||
s->current_picture.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;
|
||||
s->cur_pic.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;
|
||||
s->mv[0][0][0] = h->current_mv_x * 2; // gets divided by 2 in motion compensation
|
||||
s->mv[0][0][1] = h->current_mv_y * 2;
|
||||
|
||||
if (s->current_picture.motion_val[0]) {
|
||||
if (s->cur_pic.motion_val[0]) {
|
||||
int b_stride = 2*s->mb_width + 1;
|
||||
int b_xy = 2 * s->mb_x + (2 * s->mb_y) * b_stride;
|
||||
s->current_picture.motion_val[0][b_xy][0] = s->mv[0][0][0];
|
||||
s->current_picture.motion_val[0][b_xy][1] = s->mv[0][0][1];
|
||||
s->cur_pic.motion_val[0][b_xy][0] = s->mv[0][0][0];
|
||||
s->cur_pic.motion_val[0][b_xy][1] = s->mv[0][0][1];
|
||||
}
|
||||
|
||||
intra:
|
||||
@ -649,12 +649,12 @@ static int h261_decode_frame(AVCodecContext *avctx, AVFrame *pict,
|
||||
}
|
||||
ff_mpv_frame_end(s);
|
||||
|
||||
av_assert0(s->current_picture.f->pict_type == s->current_picture_ptr->f->pict_type);
|
||||
av_assert0(s->current_picture.f->pict_type == s->pict_type);
|
||||
av_assert0(s->cur_pic.f->pict_type == s->cur_pic_ptr->f->pict_type);
|
||||
av_assert0(s->cur_pic.f->pict_type == s->pict_type);
|
||||
|
||||
if ((ret = av_frame_ref(pict, s->current_picture_ptr->f)) < 0)
|
||||
if ((ret = av_frame_ref(pict, s->cur_pic_ptr->f)) < 0)
|
||||
return ret;
|
||||
ff_print_debug_info(s, s->current_picture_ptr, pict);
|
||||
ff_print_debug_info(s, s->cur_pic_ptr, pict);
|
||||
|
||||
*got_frame = 1;
|
||||
|
||||
|
@ -73,21 +73,21 @@ void ff_h263_update_motion_val(MpegEncContext * s){
|
||||
s->p_field_mv_table[i][0][mb_xy][0]= s->mv[0][i][0];
|
||||
s->p_field_mv_table[i][0][mb_xy][1]= s->mv[0][i][1];
|
||||
}
|
||||
s->current_picture.ref_index[0][4*mb_xy ] =
|
||||
s->current_picture.ref_index[0][4*mb_xy + 1] = s->field_select[0][0];
|
||||
s->current_picture.ref_index[0][4*mb_xy + 2] =
|
||||
s->current_picture.ref_index[0][4*mb_xy + 3] = s->field_select[0][1];
|
||||
s->cur_pic.ref_index[0][4*mb_xy ] =
|
||||
s->cur_pic.ref_index[0][4*mb_xy + 1] = s->field_select[0][0];
|
||||
s->cur_pic.ref_index[0][4*mb_xy + 2] =
|
||||
s->cur_pic.ref_index[0][4*mb_xy + 3] = s->field_select[0][1];
|
||||
}
|
||||
|
||||
/* no update if 8X8 because it has been done during parsing */
|
||||
s->current_picture.motion_val[0][xy][0] = motion_x;
|
||||
s->current_picture.motion_val[0][xy][1] = motion_y;
|
||||
s->current_picture.motion_val[0][xy + 1][0] = motion_x;
|
||||
s->current_picture.motion_val[0][xy + 1][1] = motion_y;
|
||||
s->current_picture.motion_val[0][xy + wrap][0] = motion_x;
|
||||
s->current_picture.motion_val[0][xy + wrap][1] = motion_y;
|
||||
s->current_picture.motion_val[0][xy + 1 + wrap][0] = motion_x;
|
||||
s->current_picture.motion_val[0][xy + 1 + wrap][1] = motion_y;
|
||||
s->cur_pic.motion_val[0][xy][0] = motion_x;
|
||||
s->cur_pic.motion_val[0][xy][1] = motion_y;
|
||||
s->cur_pic.motion_val[0][xy + 1][0] = motion_x;
|
||||
s->cur_pic.motion_val[0][xy + 1][1] = motion_y;
|
||||
s->cur_pic.motion_val[0][xy + wrap][0] = motion_x;
|
||||
s->cur_pic.motion_val[0][xy + wrap][1] = motion_y;
|
||||
s->cur_pic.motion_val[0][xy + 1 + wrap][0] = motion_x;
|
||||
s->cur_pic.motion_val[0][xy + 1 + wrap][1] = motion_y;
|
||||
}
|
||||
}
|
||||
|
||||
@ -104,7 +104,7 @@ void ff_h263_loop_filter(MpegEncContext * s){
|
||||
Diag Top
|
||||
Left Center
|
||||
*/
|
||||
if (!IS_SKIP(s->current_picture.mb_type[xy])) {
|
||||
if (!IS_SKIP(s->cur_pic.mb_type[xy])) {
|
||||
qp_c= s->qscale;
|
||||
s->h263dsp.h263_v_loop_filter(dest_y + 8 * linesize, linesize, qp_c);
|
||||
s->h263dsp.h263_v_loop_filter(dest_y + 8 * linesize + 8, linesize, qp_c);
|
||||
@ -114,10 +114,10 @@ void ff_h263_loop_filter(MpegEncContext * s){
|
||||
if(s->mb_y){
|
||||
int qp_dt, qp_tt, qp_tc;
|
||||
|
||||
if (IS_SKIP(s->current_picture.mb_type[xy - s->mb_stride]))
|
||||
if (IS_SKIP(s->cur_pic.mb_type[xy - s->mb_stride]))
|
||||
qp_tt=0;
|
||||
else
|
||||
qp_tt = s->current_picture.qscale_table[xy - s->mb_stride];
|
||||
qp_tt = s->cur_pic.qscale_table[xy - s->mb_stride];
|
||||
|
||||
if(qp_c)
|
||||
qp_tc= qp_c;
|
||||
@ -137,10 +137,10 @@ void ff_h263_loop_filter(MpegEncContext * s){
|
||||
s->h263dsp.h263_h_loop_filter(dest_y - 8 * linesize + 8, linesize, qp_tt);
|
||||
|
||||
if(s->mb_x){
|
||||
if (qp_tt || IS_SKIP(s->current_picture.mb_type[xy - 1 - s->mb_stride]))
|
||||
if (qp_tt || IS_SKIP(s->cur_pic.mb_type[xy - 1 - s->mb_stride]))
|
||||
qp_dt= qp_tt;
|
||||
else
|
||||
qp_dt = s->current_picture.qscale_table[xy - 1 - s->mb_stride];
|
||||
qp_dt = s->cur_pic.qscale_table[xy - 1 - s->mb_stride];
|
||||
|
||||
if(qp_dt){
|
||||
const int chroma_qp= s->chroma_qscale_table[qp_dt];
|
||||
@ -159,10 +159,10 @@ void ff_h263_loop_filter(MpegEncContext * s){
|
||||
|
||||
if(s->mb_x){
|
||||
int qp_lc;
|
||||
if (qp_c || IS_SKIP(s->current_picture.mb_type[xy - 1]))
|
||||
if (qp_c || IS_SKIP(s->cur_pic.mb_type[xy - 1]))
|
||||
qp_lc= qp_c;
|
||||
else
|
||||
qp_lc = s->current_picture.qscale_table[xy - 1];
|
||||
qp_lc = s->cur_pic.qscale_table[xy - 1];
|
||||
|
||||
if(qp_lc){
|
||||
s->h263dsp.h263_h_loop_filter(dest_y, linesize, qp_lc);
|
||||
@ -184,7 +184,7 @@ int16_t *ff_h263_pred_motion(MpegEncContext * s, int block, int dir,
|
||||
static const int off[4]= {2, 1, 1, -1};
|
||||
|
||||
wrap = s->b8_stride;
|
||||
mot_val = s->current_picture.motion_val[dir] + s->block_index[block];
|
||||
mot_val = s->cur_pic.motion_val[dir] + s->block_index[block];
|
||||
|
||||
A = mot_val[ - 1];
|
||||
/* special case for first (slice) line */
|
||||
|
@ -432,22 +432,22 @@ int ff_h263_decode_frame(AVCodecContext *avctx, AVFrame *pict,
|
||||
/* no supplementary picture */
|
||||
if (buf_size == 0) {
|
||||
/* special case for last picture */
|
||||
if (s->low_delay == 0 && s->next_picture_ptr) {
|
||||
if ((ret = av_frame_ref(pict, s->next_picture_ptr->f)) < 0)
|
||||
if (s->low_delay == 0 && s->next_pic_ptr) {
|
||||
if ((ret = av_frame_ref(pict, s->next_pic_ptr->f)) < 0)
|
||||
return ret;
|
||||
s->next_picture_ptr = NULL;
|
||||
s->next_pic_ptr = NULL;
|
||||
|
||||
*got_frame = 1;
|
||||
} else if (s->skipped_last_frame && s->current_picture_ptr) {
|
||||
} else if (s->skipped_last_frame && s->cur_pic_ptr) {
|
||||
/* Output the last picture we decoded again if the stream ended with
|
||||
* an NVOP */
|
||||
if ((ret = av_frame_ref(pict, s->current_picture_ptr->f)) < 0)
|
||||
if ((ret = av_frame_ref(pict, s->cur_pic_ptr->f)) < 0)
|
||||
return ret;
|
||||
/* Copy props from the last input packet. Otherwise, props from the last
|
||||
* returned picture would be reused */
|
||||
if ((ret = ff_decode_frame_props(avctx, pict)) < 0)
|
||||
return ret;
|
||||
s->current_picture_ptr = NULL;
|
||||
s->cur_pic_ptr = NULL;
|
||||
|
||||
*got_frame = 1;
|
||||
}
|
||||
@ -561,7 +561,7 @@ retry:
|
||||
s->gob_index = H263_GOB_HEIGHT(s->height);
|
||||
|
||||
/* skip B-frames if we don't have reference frames */
|
||||
if (!s->last_picture_ptr &&
|
||||
if (!s->last_pic_ptr &&
|
||||
(s->pict_type == AV_PICTURE_TYPE_B || s->droppable))
|
||||
return get_consumed_bytes(s, buf_size);
|
||||
if ((avctx->skip_frame >= AVDISCARD_NONREF &&
|
||||
@ -647,21 +647,21 @@ frame_end:
|
||||
if (!s->divx_packed && avctx->hwaccel)
|
||||
ff_thread_finish_setup(avctx);
|
||||
|
||||
av_assert1(s->current_picture.f->pict_type == s->current_picture_ptr->f->pict_type);
|
||||
av_assert1(s->current_picture.f->pict_type == s->pict_type);
|
||||
av_assert1(s->cur_pic.f->pict_type == s->cur_pic_ptr->f->pict_type);
|
||||
av_assert1(s->cur_pic.f->pict_type == s->pict_type);
|
||||
if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
|
||||
if ((ret = av_frame_ref(pict, s->current_picture_ptr->f)) < 0)
|
||||
if ((ret = av_frame_ref(pict, s->cur_pic_ptr->f)) < 0)
|
||||
return ret;
|
||||
ff_print_debug_info(s, s->current_picture_ptr, pict);
|
||||
ff_mpv_export_qp_table(s, pict, s->current_picture_ptr, FF_MPV_QSCALE_TYPE_MPEG1);
|
||||
} else if (s->last_picture_ptr) {
|
||||
if ((ret = av_frame_ref(pict, s->last_picture_ptr->f)) < 0)
|
||||
ff_print_debug_info(s, s->cur_pic_ptr, pict);
|
||||
ff_mpv_export_qp_table(s, pict, s->cur_pic_ptr, FF_MPV_QSCALE_TYPE_MPEG1);
|
||||
} else if (s->last_pic_ptr) {
|
||||
if ((ret = av_frame_ref(pict, s->last_pic_ptr->f)) < 0)
|
||||
return ret;
|
||||
ff_print_debug_info(s, s->last_picture_ptr, pict);
|
||||
ff_mpv_export_qp_table(s, pict, s->last_picture_ptr, FF_MPV_QSCALE_TYPE_MPEG1);
|
||||
ff_print_debug_info(s, s->last_pic_ptr, pict);
|
||||
ff_mpv_export_qp_table(s, pict, s->last_pic_ptr, FF_MPV_QSCALE_TYPE_MPEG1);
|
||||
}
|
||||
|
||||
if (s->last_picture_ptr || s->low_delay) {
|
||||
if (s->last_pic_ptr || s->low_delay) {
|
||||
if ( pict->format == AV_PIX_FMT_YUV420P
|
||||
&& (s->codec_tag == AV_RL32("GEOV") || s->codec_tag == AV_RL32("GEOX"))) {
|
||||
for (int p = 0; p < 3; p++) {
|
||||
|
@ -357,20 +357,20 @@ static void preview_obmc(MpegEncContext *s){
|
||||
do{
|
||||
if (get_bits1(&s->gb)) {
|
||||
/* skip mb */
|
||||
mot_val = s->current_picture.motion_val[0][s->block_index[0]];
|
||||
mot_val = s->cur_pic.motion_val[0][s->block_index[0]];
|
||||
mot_val[0 ]= mot_val[2 ]=
|
||||
mot_val[0+stride]= mot_val[2+stride]= 0;
|
||||
mot_val[1 ]= mot_val[3 ]=
|
||||
mot_val[1+stride]= mot_val[3+stride]= 0;
|
||||
|
||||
s->current_picture.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
|
||||
s->cur_pic.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
|
||||
goto end;
|
||||
}
|
||||
cbpc = get_vlc2(&s->gb, ff_h263_inter_MCBPC_vlc, INTER_MCBPC_VLC_BITS, 2);
|
||||
}while(cbpc == 20);
|
||||
|
||||
if(cbpc & 4){
|
||||
s->current_picture.mb_type[xy] = MB_TYPE_INTRA;
|
||||
s->cur_pic.mb_type[xy] = MB_TYPE_INTRA;
|
||||
}else{
|
||||
get_vlc2(&s->gb, ff_h263_cbpy_vlc, CBPY_VLC_BITS, 1);
|
||||
if (cbpc & 8) {
|
||||
@ -382,7 +382,7 @@ static void preview_obmc(MpegEncContext *s){
|
||||
}
|
||||
|
||||
if ((cbpc & 16) == 0) {
|
||||
s->current_picture.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;
|
||||
s->cur_pic.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;
|
||||
/* 16x16 motion prediction */
|
||||
mot_val= ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
if (s->umvplus)
|
||||
@ -400,7 +400,7 @@ static void preview_obmc(MpegEncContext *s){
|
||||
mot_val[1 ]= mot_val[3 ]=
|
||||
mot_val[1+stride]= mot_val[3+stride]= my;
|
||||
} else {
|
||||
s->current_picture.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0;
|
||||
s->cur_pic.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0;
|
||||
for(i=0;i<4;i++) {
|
||||
mot_val = ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
if (s->umvplus)
|
||||
@ -750,12 +750,12 @@ static inline void set_one_direct_mv(MpegEncContext *s, Picture *p, int i)
|
||||
static int set_direct_mv(MpegEncContext *s)
|
||||
{
|
||||
const int mb_index = s->mb_x + s->mb_y * s->mb_stride;
|
||||
Picture *p = &s->next_picture;
|
||||
Picture *p = &s->next_pic;
|
||||
int colocated_mb_type = p->mb_type[mb_index];
|
||||
int i;
|
||||
|
||||
if (s->codec_tag == AV_RL32("U263") && p->f->pict_type == AV_PICTURE_TYPE_I) {
|
||||
p = &s->last_picture;
|
||||
p = &s->last_pic;
|
||||
colocated_mb_type = p->mb_type[mb_index];
|
||||
}
|
||||
|
||||
@ -803,7 +803,7 @@ int ff_h263_decode_mb(MpegEncContext *s,
|
||||
s->block_last_index[i] = -1;
|
||||
s->mv_dir = MV_DIR_FORWARD;
|
||||
s->mv_type = MV_TYPE_16X16;
|
||||
s->current_picture.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
|
||||
s->cur_pic.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
|
||||
s->mv[0][0][0] = 0;
|
||||
s->mv[0][0][1] = 0;
|
||||
s->mb_skipped = !(s->obmc | s->loop_filter);
|
||||
@ -841,7 +841,7 @@ int ff_h263_decode_mb(MpegEncContext *s,
|
||||
|
||||
s->mv_dir = MV_DIR_FORWARD;
|
||||
if ((cbpc & 16) == 0) {
|
||||
s->current_picture.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;
|
||||
s->cur_pic.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;
|
||||
/* 16x16 motion prediction */
|
||||
s->mv_type = MV_TYPE_16X16;
|
||||
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
@ -866,7 +866,7 @@ int ff_h263_decode_mb(MpegEncContext *s,
|
||||
if (s->umvplus && (mx - pred_x) == 1 && (my - pred_y) == 1)
|
||||
skip_bits1(&s->gb); /* Bit stuffing to prevent PSC */
|
||||
} else {
|
||||
s->current_picture.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0;
|
||||
s->cur_pic.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0;
|
||||
s->mv_type = MV_TYPE_8X8;
|
||||
for(i=0;i<4;i++) {
|
||||
mot_val = ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
@ -894,8 +894,8 @@ int ff_h263_decode_mb(MpegEncContext *s,
|
||||
} else if(s->pict_type==AV_PICTURE_TYPE_B) {
|
||||
int mb_type;
|
||||
const int stride= s->b8_stride;
|
||||
int16_t *mot_val0 = s->current_picture.motion_val[0][2 * (s->mb_x + s->mb_y * stride)];
|
||||
int16_t *mot_val1 = s->current_picture.motion_val[1][2 * (s->mb_x + s->mb_y * stride)];
|
||||
int16_t *mot_val0 = s->cur_pic.motion_val[0][2 * (s->mb_x + s->mb_y * stride)];
|
||||
int16_t *mot_val1 = s->cur_pic.motion_val[1][2 * (s->mb_x + s->mb_y * stride)];
|
||||
// const int mv_xy= s->mb_x + 1 + s->mb_y * s->mb_stride;
|
||||
|
||||
//FIXME ugly
|
||||
@ -1007,7 +1007,7 @@ int ff_h263_decode_mb(MpegEncContext *s,
|
||||
}
|
||||
}
|
||||
|
||||
s->current_picture.mb_type[xy] = mb_type;
|
||||
s->cur_pic.mb_type[xy] = mb_type;
|
||||
} else { /* I-Frame */
|
||||
do{
|
||||
cbpc = get_vlc2(&s->gb, ff_h263_intra_MCBPC_vlc, INTRA_MCBPC_VLC_BITS, 2);
|
||||
@ -1022,11 +1022,11 @@ int ff_h263_decode_mb(MpegEncContext *s,
|
||||
dquant = cbpc & 4;
|
||||
s->mb_intra = 1;
|
||||
intra:
|
||||
s->current_picture.mb_type[xy] = MB_TYPE_INTRA;
|
||||
s->cur_pic.mb_type[xy] = MB_TYPE_INTRA;
|
||||
if (s->h263_aic) {
|
||||
s->ac_pred = get_bits1(&s->gb);
|
||||
if(s->ac_pred){
|
||||
s->current_picture.mb_type[xy] = MB_TYPE_INTRA | MB_TYPE_ACPRED;
|
||||
s->cur_pic.mb_type[xy] = MB_TYPE_INTRA | MB_TYPE_ACPRED;
|
||||
|
||||
s->h263_aic_dir = get_bits1(&s->gb);
|
||||
}
|
||||
|
@ -271,7 +271,7 @@ void ff_h263_encode_gob_header(MpegEncContext * s, int mb_line)
|
||||
*/
|
||||
void ff_clean_h263_qscales(MpegEncContext *s){
|
||||
int i;
|
||||
int8_t * const qscale_table = s->current_picture.qscale_table;
|
||||
int8_t * const qscale_table = s->cur_pic.qscale_table;
|
||||
|
||||
ff_init_qscale_tab(s);
|
||||
|
||||
@ -565,8 +565,8 @@ void ff_h263_encode_mb(MpegEncContext * s,
|
||||
/* motion vectors: 8x8 mode*/
|
||||
ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
|
||||
motion_x = s->current_picture.motion_val[0][s->block_index[i]][0];
|
||||
motion_y = s->current_picture.motion_val[0][s->block_index[i]][1];
|
||||
motion_x = s->cur_pic.motion_val[0][s->block_index[i]][0];
|
||||
motion_y = s->cur_pic.motion_val[0][s->block_index[i]][1];
|
||||
if (!s->umvplus) {
|
||||
ff_h263_encode_motion_vector(s, motion_x - pred_x,
|
||||
motion_y - pred_y, 1);
|
||||
@ -692,15 +692,15 @@ void ff_h263_update_mb(MpegEncContext *s)
|
||||
{
|
||||
const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
|
||||
|
||||
if (s->current_picture.mbskip_table)
|
||||
s->current_picture.mbskip_table[mb_xy] = s->mb_skipped;
|
||||
if (s->cur_pic.mbskip_table)
|
||||
s->cur_pic.mbskip_table[mb_xy] = s->mb_skipped;
|
||||
|
||||
if (s->mv_type == MV_TYPE_8X8)
|
||||
s->current_picture.mb_type[mb_xy] = MB_TYPE_L0 | MB_TYPE_8x8;
|
||||
s->cur_pic.mb_type[mb_xy] = MB_TYPE_L0 | MB_TYPE_8x8;
|
||||
else if(s->mb_intra)
|
||||
s->current_picture.mb_type[mb_xy] = MB_TYPE_INTRA;
|
||||
s->cur_pic.mb_type[mb_xy] = MB_TYPE_INTRA;
|
||||
else
|
||||
s->current_picture.mb_type[mb_xy] = MB_TYPE_L0 | MB_TYPE_16x16;
|
||||
s->cur_pic.mb_type[mb_xy] = MB_TYPE_L0 | MB_TYPE_16x16;
|
||||
|
||||
ff_h263_update_motion_val(s);
|
||||
}
|
||||
|
@ -510,16 +510,16 @@ static inline void set_p_mv_tables(MpegEncContext * s, int mx, int my, int mv4)
|
||||
if(mv4){
|
||||
int mot_xy= s->block_index[0];
|
||||
|
||||
s->current_picture.motion_val[0][mot_xy ][0] = mx;
|
||||
s->current_picture.motion_val[0][mot_xy ][1] = my;
|
||||
s->current_picture.motion_val[0][mot_xy + 1][0] = mx;
|
||||
s->current_picture.motion_val[0][mot_xy + 1][1] = my;
|
||||
s->cur_pic.motion_val[0][mot_xy ][0] = mx;
|
||||
s->cur_pic.motion_val[0][mot_xy ][1] = my;
|
||||
s->cur_pic.motion_val[0][mot_xy + 1][0] = mx;
|
||||
s->cur_pic.motion_val[0][mot_xy + 1][1] = my;
|
||||
|
||||
mot_xy += s->b8_stride;
|
||||
s->current_picture.motion_val[0][mot_xy ][0] = mx;
|
||||
s->current_picture.motion_val[0][mot_xy ][1] = my;
|
||||
s->current_picture.motion_val[0][mot_xy + 1][0] = mx;
|
||||
s->current_picture.motion_val[0][mot_xy + 1][1] = my;
|
||||
s->cur_pic.motion_val[0][mot_xy ][0] = mx;
|
||||
s->cur_pic.motion_val[0][mot_xy ][1] = my;
|
||||
s->cur_pic.motion_val[0][mot_xy + 1][0] = mx;
|
||||
s->cur_pic.motion_val[0][mot_xy + 1][1] = my;
|
||||
}
|
||||
}
|
||||
|
||||
@ -601,8 +601,8 @@ static inline int h263_mv4_search(MpegEncContext *s, int mx, int my, int shift)
|
||||
c->ymax = - 16*s->mb_y + s->height - 8*(block>>1);
|
||||
}
|
||||
|
||||
P_LEFT[0] = s->current_picture.motion_val[0][mot_xy - 1][0];
|
||||
P_LEFT[1] = s->current_picture.motion_val[0][mot_xy - 1][1];
|
||||
P_LEFT[0] = s->cur_pic.motion_val[0][mot_xy - 1][0];
|
||||
P_LEFT[1] = s->cur_pic.motion_val[0][mot_xy - 1][1];
|
||||
|
||||
if (P_LEFT[0] > c->xmax * (1 << shift)) P_LEFT[0] = c->xmax * (1 << shift);
|
||||
|
||||
@ -611,10 +611,10 @@ static inline int h263_mv4_search(MpegEncContext *s, int mx, int my, int shift)
|
||||
c->pred_x= pred_x4= P_LEFT[0];
|
||||
c->pred_y= pred_y4= P_LEFT[1];
|
||||
} else {
|
||||
P_TOP[0] = s->current_picture.motion_val[0][mot_xy - mot_stride ][0];
|
||||
P_TOP[1] = s->current_picture.motion_val[0][mot_xy - mot_stride ][1];
|
||||
P_TOPRIGHT[0] = s->current_picture.motion_val[0][mot_xy - mot_stride + off[block]][0];
|
||||
P_TOPRIGHT[1] = s->current_picture.motion_val[0][mot_xy - mot_stride + off[block]][1];
|
||||
P_TOP[0] = s->cur_pic.motion_val[0][mot_xy - mot_stride ][0];
|
||||
P_TOP[1] = s->cur_pic.motion_val[0][mot_xy - mot_stride ][1];
|
||||
P_TOPRIGHT[0] = s->cur_pic.motion_val[0][mot_xy - mot_stride + off[block]][0];
|
||||
P_TOPRIGHT[1] = s->cur_pic.motion_val[0][mot_xy - mot_stride + off[block]][1];
|
||||
if (P_TOP[1] > c->ymax * (1 << shift)) P_TOP[1] = c->ymax * (1 << shift);
|
||||
if (P_TOPRIGHT[0] < c->xmin * (1 << shift)) P_TOPRIGHT[0] = c->xmin * (1 << shift);
|
||||
if (P_TOPRIGHT[0] > c->xmax * (1 << shift)) P_TOPRIGHT[0] = c->xmax * (1 << shift);
|
||||
@ -675,8 +675,8 @@ static inline int h263_mv4_search(MpegEncContext *s, int mx, int my, int shift)
|
||||
my4_sum+= my4;
|
||||
}
|
||||
|
||||
s->current_picture.motion_val[0][s->block_index[block]][0] = mx4;
|
||||
s->current_picture.motion_val[0][s->block_index[block]][1] = my4;
|
||||
s->cur_pic.motion_val[0][s->block_index[block]][0] = mx4;
|
||||
s->cur_pic.motion_val[0][s->block_index[block]][1] = my4;
|
||||
|
||||
if(mx4 != mx || my4 != my) same=0;
|
||||
}
|
||||
@ -686,7 +686,7 @@ static inline int h263_mv4_search(MpegEncContext *s, int mx, int my, int shift)
|
||||
|
||||
if (s->mecc.me_sub_cmp[0] != s->mecc.mb_cmp[0]) {
|
||||
dmin_sum += s->mecc.mb_cmp[0](s,
|
||||
s->new_picture->data[0] +
|
||||
s->new_pic->data[0] +
|
||||
s->mb_x * 16 + s->mb_y * 16 * stride,
|
||||
c->scratchpad, stride, 16);
|
||||
}
|
||||
@ -703,15 +703,15 @@ static inline int h263_mv4_search(MpegEncContext *s, int mx, int my, int shift)
|
||||
offset= (s->mb_x*8 + (mx>>1)) + (s->mb_y*8 + (my>>1))*s->uvlinesize;
|
||||
|
||||
if(s->no_rounding){
|
||||
s->hdsp.put_no_rnd_pixels_tab[1][dxy](c->scratchpad , s->last_picture.f->data[1] + offset, s->uvlinesize, 8);
|
||||
s->hdsp.put_no_rnd_pixels_tab[1][dxy](c->scratchpad + 8, s->last_picture.f->data[2] + offset, s->uvlinesize, 8);
|
||||
s->hdsp.put_no_rnd_pixels_tab[1][dxy](c->scratchpad , s->last_pic.f->data[1] + offset, s->uvlinesize, 8);
|
||||
s->hdsp.put_no_rnd_pixels_tab[1][dxy](c->scratchpad + 8, s->last_pic.f->data[2] + offset, s->uvlinesize, 8);
|
||||
}else{
|
||||
s->hdsp.put_pixels_tab [1][dxy](c->scratchpad , s->last_picture.f->data[1] + offset, s->uvlinesize, 8);
|
||||
s->hdsp.put_pixels_tab [1][dxy](c->scratchpad + 8, s->last_picture.f->data[2] + offset, s->uvlinesize, 8);
|
||||
s->hdsp.put_pixels_tab [1][dxy](c->scratchpad , s->last_pic.f->data[1] + offset, s->uvlinesize, 8);
|
||||
s->hdsp.put_pixels_tab [1][dxy](c->scratchpad + 8, s->last_pic.f->data[2] + offset, s->uvlinesize, 8);
|
||||
}
|
||||
|
||||
dmin_sum += s->mecc.mb_cmp[1](s, s->new_picture->data[1] + s->mb_x * 8 + s->mb_y * 8 * s->uvlinesize, c->scratchpad, s->uvlinesize, 8);
|
||||
dmin_sum += s->mecc.mb_cmp[1](s, s->new_picture->data[2] + s->mb_x * 8 + s->mb_y * 8 * s->uvlinesize, c->scratchpad + 8, s->uvlinesize, 8);
|
||||
dmin_sum += s->mecc.mb_cmp[1](s, s->new_pic->data[1] + s->mb_x * 8 + s->mb_y * 8 * s->uvlinesize, c->scratchpad, s->uvlinesize, 8);
|
||||
dmin_sum += s->mecc.mb_cmp[1](s, s->new_pic->data[2] + s->mb_x * 8 + s->mb_y * 8 * s->uvlinesize, c->scratchpad + 8, s->uvlinesize, 8);
|
||||
}
|
||||
|
||||
c->pred_x= mx;
|
||||
@ -899,7 +899,7 @@ void ff_estimate_p_frame_motion(MpegEncContext * s,
|
||||
const int shift= 1+s->quarter_sample;
|
||||
int mb_type=0;
|
||||
|
||||
init_ref(c, s->new_picture->data, s->last_picture.f->data, NULL, 16*mb_x, 16*mb_y, 0);
|
||||
init_ref(c, s->new_pic->data, s->last_pic.f->data, NULL, 16*mb_x, 16*mb_y, 0);
|
||||
|
||||
av_assert0(s->quarter_sample==0 || s->quarter_sample==1);
|
||||
av_assert0(s->linesize == c->stride);
|
||||
@ -927,17 +927,17 @@ void ff_estimate_p_frame_motion(MpegEncContext * s,
|
||||
const int mot_stride = s->b8_stride;
|
||||
const int mot_xy = s->block_index[0];
|
||||
|
||||
P_LEFT[0] = s->current_picture.motion_val[0][mot_xy - 1][0];
|
||||
P_LEFT[1] = s->current_picture.motion_val[0][mot_xy - 1][1];
|
||||
P_LEFT[0] = s->cur_pic.motion_val[0][mot_xy - 1][0];
|
||||
P_LEFT[1] = s->cur_pic.motion_val[0][mot_xy - 1][1];
|
||||
|
||||
if (P_LEFT[0] > (c->xmax << shift))
|
||||
P_LEFT[0] = c->xmax << shift;
|
||||
|
||||
if (!s->first_slice_line) {
|
||||
P_TOP[0] = s->current_picture.motion_val[0][mot_xy - mot_stride ][0];
|
||||
P_TOP[1] = s->current_picture.motion_val[0][mot_xy - mot_stride ][1];
|
||||
P_TOPRIGHT[0] = s->current_picture.motion_val[0][mot_xy - mot_stride + 2][0];
|
||||
P_TOPRIGHT[1] = s->current_picture.motion_val[0][mot_xy - mot_stride + 2][1];
|
||||
P_TOP[0] = s->cur_pic.motion_val[0][mot_xy - mot_stride ][0];
|
||||
P_TOP[1] = s->cur_pic.motion_val[0][mot_xy - mot_stride ][1];
|
||||
P_TOPRIGHT[0] = s->cur_pic.motion_val[0][mot_xy - mot_stride + 2][0];
|
||||
P_TOPRIGHT[1] = s->cur_pic.motion_val[0][mot_xy - mot_stride + 2][1];
|
||||
if (P_TOP[1] > (c->ymax << shift))
|
||||
P_TOP[1] = c->ymax << shift;
|
||||
if (P_TOPRIGHT[0] < (c->xmin * (1 << shift)))
|
||||
@ -1048,9 +1048,9 @@ void ff_estimate_p_frame_motion(MpegEncContext * s,
|
||||
|
||||
if(intra_score < dmin){
|
||||
mb_type= CANDIDATE_MB_TYPE_INTRA;
|
||||
s->current_picture.mb_type[mb_y*s->mb_stride + mb_x] = CANDIDATE_MB_TYPE_INTRA; //FIXME cleanup
|
||||
s->cur_pic.mb_type[mb_y*s->mb_stride + mb_x] = CANDIDATE_MB_TYPE_INTRA; //FIXME cleanup
|
||||
}else
|
||||
s->current_picture.mb_type[mb_y*s->mb_stride + mb_x] = 0;
|
||||
s->cur_pic.mb_type[mb_y*s->mb_stride + mb_x] = 0;
|
||||
|
||||
{
|
||||
int p_score= FFMIN(vard, varc-500+(s->lambda2>>FF_LAMBDA_SHIFT)*100);
|
||||
@ -1070,7 +1070,7 @@ int ff_pre_estimate_p_frame_motion(MpegEncContext * s,
|
||||
int P[10][2];
|
||||
const int shift= 1+s->quarter_sample;
|
||||
const int xy= mb_x + mb_y*s->mb_stride;
|
||||
init_ref(c, s->new_picture->data, s->last_picture.f->data, NULL, 16*mb_x, 16*mb_y, 0);
|
||||
init_ref(c, s->new_pic->data, s->last_pic.f->data, NULL, 16*mb_x, 16*mb_y, 0);
|
||||
|
||||
av_assert0(s->quarter_sample==0 || s->quarter_sample==1);
|
||||
|
||||
@ -1403,7 +1403,7 @@ static inline int direct_search(MpegEncContext * s, int mb_x, int mb_y)
|
||||
ymin= xmin=(-32)>>shift;
|
||||
ymax= xmax= 31>>shift;
|
||||
|
||||
if (IS_8X8(s->next_picture.mb_type[mot_xy])) {
|
||||
if (IS_8X8(s->next_pic.mb_type[mot_xy])) {
|
||||
s->mv_type= MV_TYPE_8X8;
|
||||
}else{
|
||||
s->mv_type= MV_TYPE_16X16;
|
||||
@ -1413,8 +1413,8 @@ static inline int direct_search(MpegEncContext * s, int mb_x, int mb_y)
|
||||
int index= s->block_index[i];
|
||||
int min, max;
|
||||
|
||||
c->co_located_mv[i][0] = s->next_picture.motion_val[0][index][0];
|
||||
c->co_located_mv[i][1] = s->next_picture.motion_val[0][index][1];
|
||||
c->co_located_mv[i][0] = s->next_pic.motion_val[0][index][0];
|
||||
c->co_located_mv[i][1] = s->next_pic.motion_val[0][index][1];
|
||||
c->direct_basis_mv[i][0]= c->co_located_mv[i][0]*time_pb/time_pp + ((i& 1)<<(shift+3));
|
||||
c->direct_basis_mv[i][1]= c->co_located_mv[i][1]*time_pb/time_pp + ((i>>1)<<(shift+3));
|
||||
// c->direct_basis_mv[1][i][0]= c->co_located_mv[i][0]*(time_pb - time_pp)/time_pp + ((i &1)<<(shift+3);
|
||||
@ -1495,14 +1495,14 @@ void ff_estimate_b_frame_motion(MpegEncContext * s,
|
||||
int fmin, bmin, dmin, fbmin, bimin, fimin;
|
||||
int type=0;
|
||||
const int xy = mb_y*s->mb_stride + mb_x;
|
||||
init_ref(c, s->new_picture->data, s->last_picture.f->data,
|
||||
s->next_picture.f->data, 16 * mb_x, 16 * mb_y, 2);
|
||||
init_ref(c, s->new_pic->data, s->last_pic.f->data,
|
||||
s->next_pic.f->data, 16 * mb_x, 16 * mb_y, 2);
|
||||
|
||||
get_limits(s, 16*mb_x, 16*mb_y);
|
||||
|
||||
c->skip=0;
|
||||
|
||||
if (s->codec_id == AV_CODEC_ID_MPEG4 && s->next_picture.mbskip_table[xy]) {
|
||||
if (s->codec_id == AV_CODEC_ID_MPEG4 && s->next_pic.mbskip_table[xy]) {
|
||||
int score= direct_search(s, mb_x, mb_y); //FIXME just check 0,0
|
||||
|
||||
score= ((unsigned)(score*score + 128*256))>>16;
|
||||
@ -1681,14 +1681,14 @@ void ff_fix_long_p_mvs(MpegEncContext * s, int type)
|
||||
int block;
|
||||
for(block=0; block<4; block++){
|
||||
int off= (block& 1) + (block>>1)*wrap;
|
||||
int mx = s->current_picture.motion_val[0][ xy + off ][0];
|
||||
int my = s->current_picture.motion_val[0][ xy + off ][1];
|
||||
int mx = s->cur_pic.motion_val[0][ xy + off ][0];
|
||||
int my = s->cur_pic.motion_val[0][ xy + off ][1];
|
||||
|
||||
if( mx >=range || mx <-range
|
||||
|| my >=range || my <-range){
|
||||
s->mb_type[i] &= ~CANDIDATE_MB_TYPE_INTER4V;
|
||||
s->mb_type[i] |= type;
|
||||
s->current_picture.mb_type[i] = type;
|
||||
s->cur_pic.mb_type[i] = type;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -437,21 +437,21 @@ static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64])
|
||||
if (s->mb_skip_run-- != 0) {
|
||||
if (s->pict_type == AV_PICTURE_TYPE_P) {
|
||||
s->mb_skipped = 1;
|
||||
s->current_picture.mb_type[s->mb_x + s->mb_y * s->mb_stride] =
|
||||
s->cur_pic.mb_type[s->mb_x + s->mb_y * s->mb_stride] =
|
||||
MB_TYPE_SKIP | MB_TYPE_L0 | MB_TYPE_16x16;
|
||||
} else {
|
||||
int mb_type;
|
||||
|
||||
if (s->mb_x)
|
||||
mb_type = s->current_picture.mb_type[s->mb_x + s->mb_y * s->mb_stride - 1];
|
||||
mb_type = s->cur_pic.mb_type[s->mb_x + s->mb_y * s->mb_stride - 1];
|
||||
else
|
||||
// FIXME not sure if this is allowed in MPEG at all
|
||||
mb_type = s->current_picture.mb_type[s->mb_width + (s->mb_y - 1) * s->mb_stride - 1];
|
||||
mb_type = s->cur_pic.mb_type[s->mb_width + (s->mb_y - 1) * s->mb_stride - 1];
|
||||
if (IS_INTRA(mb_type)) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "skip with previntra\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
s->current_picture.mb_type[s->mb_x + s->mb_y * s->mb_stride] =
|
||||
s->cur_pic.mb_type[s->mb_x + s->mb_y * s->mb_stride] =
|
||||
mb_type | MB_TYPE_SKIP;
|
||||
|
||||
if ((s->mv[0][0][0] | s->mv[0][0][1] | s->mv[1][0][0] | s->mv[1][0][1]) == 0)
|
||||
@ -784,7 +784,7 @@ static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64])
|
||||
}
|
||||
}
|
||||
|
||||
s->current_picture.mb_type[s->mb_x + s->mb_y * s->mb_stride] = mb_type;
|
||||
s->cur_pic.mb_type[s->mb_x + s->mb_y * s->mb_stride] = mb_type;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1292,36 +1292,36 @@ static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size)
|
||||
return ret;
|
||||
|
||||
if (s->picture_structure != PICT_FRAME) {
|
||||
s->current_picture_ptr->f->flags |= AV_FRAME_FLAG_TOP_FIELD_FIRST *
|
||||
(s->picture_structure == PICT_TOP_FIELD);
|
||||
s->cur_pic_ptr->f->flags |= AV_FRAME_FLAG_TOP_FIELD_FIRST *
|
||||
(s->picture_structure == PICT_TOP_FIELD);
|
||||
|
||||
for (int i = 0; i < 3; i++) {
|
||||
if (s->picture_structure == PICT_BOTTOM_FIELD) {
|
||||
s->current_picture.f->data[i] = FF_PTR_ADD(s->current_picture.f->data[i],
|
||||
s->current_picture.f->linesize[i]);
|
||||
s->cur_pic.f->data[i] = FF_PTR_ADD(s->cur_pic.f->data[i],
|
||||
s->cur_pic.f->linesize[i]);
|
||||
}
|
||||
s->current_picture.f->linesize[i] *= 2;
|
||||
s->last_picture.f->linesize[i] *= 2;
|
||||
s->next_picture.f->linesize[i] *= 2;
|
||||
s->cur_pic.f->linesize[i] *= 2;
|
||||
s->last_pic.f->linesize[i] *= 2;
|
||||
s->next_pic.f->linesize[i] *= 2;
|
||||
}
|
||||
}
|
||||
|
||||
ff_mpeg_er_frame_start(s);
|
||||
|
||||
/* first check if we must repeat the frame */
|
||||
s->current_picture_ptr->f->repeat_pict = 0;
|
||||
s->cur_pic_ptr->f->repeat_pict = 0;
|
||||
if (s->repeat_first_field) {
|
||||
if (s->progressive_sequence) {
|
||||
if (s->top_field_first)
|
||||
s->current_picture_ptr->f->repeat_pict = 4;
|
||||
s->cur_pic_ptr->f->repeat_pict = 4;
|
||||
else
|
||||
s->current_picture_ptr->f->repeat_pict = 2;
|
||||
s->cur_pic_ptr->f->repeat_pict = 2;
|
||||
} else if (s->progressive_frame) {
|
||||
s->current_picture_ptr->f->repeat_pict = 1;
|
||||
s->cur_pic_ptr->f->repeat_pict = 1;
|
||||
}
|
||||
}
|
||||
|
||||
ret = ff_frame_new_side_data(s->avctx, s->current_picture_ptr->f,
|
||||
ret = ff_frame_new_side_data(s->avctx, s->cur_pic_ptr->f,
|
||||
AV_FRAME_DATA_PANSCAN, sizeof(s1->pan_scan),
|
||||
&pan_scan);
|
||||
if (ret < 0)
|
||||
@ -1331,14 +1331,14 @@ static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size)
|
||||
|
||||
if (s1->a53_buf_ref) {
|
||||
ret = ff_frame_new_side_data_from_buf(
|
||||
s->avctx, s->current_picture_ptr->f, AV_FRAME_DATA_A53_CC,
|
||||
s->avctx, s->cur_pic_ptr->f, AV_FRAME_DATA_A53_CC,
|
||||
&s1->a53_buf_ref, NULL);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (s1->has_stereo3d) {
|
||||
AVStereo3D *stereo = av_stereo3d_create_side_data(s->current_picture_ptr->f);
|
||||
AVStereo3D *stereo = av_stereo3d_create_side_data(s->cur_pic_ptr->f);
|
||||
if (!stereo)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
@ -1348,7 +1348,7 @@ static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size)
|
||||
|
||||
if (s1->has_afd) {
|
||||
AVFrameSideData *sd;
|
||||
ret = ff_frame_new_side_data(s->avctx, s->current_picture_ptr->f,
|
||||
ret = ff_frame_new_side_data(s->avctx, s->cur_pic_ptr->f,
|
||||
AV_FRAME_DATA_AFD, 1, &sd);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
@ -1360,7 +1360,7 @@ static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size)
|
||||
if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_FRAME))
|
||||
ff_thread_finish_setup(avctx);
|
||||
} else { // second field
|
||||
if (!s->current_picture_ptr) {
|
||||
if (!s->cur_pic_ptr) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "first field missing\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
@ -1377,10 +1377,10 @@ static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size)
|
||||
return ret;
|
||||
|
||||
for (int i = 0; i < 3; i++) {
|
||||
s->current_picture.f->data[i] = s->current_picture_ptr->f->data[i];
|
||||
s->cur_pic.f->data[i] = s->cur_pic_ptr->f->data[i];
|
||||
if (s->picture_structure == PICT_BOTTOM_FIELD)
|
||||
s->current_picture.f->data[i] +=
|
||||
s->current_picture_ptr->f->linesize[i];
|
||||
s->cur_pic.f->data[i] +=
|
||||
s->cur_pic_ptr->f->linesize[i];
|
||||
}
|
||||
}
|
||||
|
||||
@ -1507,7 +1507,7 @@ static int mpeg_decode_slice(MpegEncContext *s, int mb_y,
|
||||
return ret;
|
||||
|
||||
// Note motion_val is normally NULL unless we want to extract the MVs.
|
||||
if (s->current_picture.motion_val[0]) {
|
||||
if (s->cur_pic.motion_val[0]) {
|
||||
const int wrap = s->b8_stride;
|
||||
int xy = s->mb_x * 2 + s->mb_y * 2 * wrap;
|
||||
int b8_xy = 4 * (s->mb_x + s->mb_y * s->mb_stride);
|
||||
@ -1527,12 +1527,12 @@ static int mpeg_decode_slice(MpegEncContext *s, int mb_y,
|
||||
motion_y = s->mv[dir][i][1];
|
||||
}
|
||||
|
||||
s->current_picture.motion_val[dir][xy][0] = motion_x;
|
||||
s->current_picture.motion_val[dir][xy][1] = motion_y;
|
||||
s->current_picture.motion_val[dir][xy + 1][0] = motion_x;
|
||||
s->current_picture.motion_val[dir][xy + 1][1] = motion_y;
|
||||
s->current_picture.ref_index [dir][b8_xy] =
|
||||
s->current_picture.ref_index [dir][b8_xy + 1] = s->field_select[dir][i];
|
||||
s->cur_pic.motion_val[dir][xy][0] = motion_x;
|
||||
s->cur_pic.motion_val[dir][xy][1] = motion_y;
|
||||
s->cur_pic.motion_val[dir][xy + 1][0] = motion_x;
|
||||
s->cur_pic.motion_val[dir][xy + 1][1] = motion_y;
|
||||
s->cur_pic.ref_index [dir][b8_xy] =
|
||||
s->cur_pic.ref_index [dir][b8_xy + 1] = s->field_select[dir][i];
|
||||
av_assert2(s->field_select[dir][i] == 0 ||
|
||||
s->field_select[dir][i] == 1);
|
||||
}
|
||||
@ -1735,7 +1735,7 @@ static int slice_end(AVCodecContext *avctx, AVFrame *pict, int *got_output)
|
||||
Mpeg1Context *s1 = avctx->priv_data;
|
||||
MpegEncContext *s = &s1->mpeg_enc_ctx;
|
||||
|
||||
if (!s->context_initialized || !s->current_picture_ptr)
|
||||
if (!s->context_initialized || !s->cur_pic_ptr)
|
||||
return 0;
|
||||
|
||||
if (s->avctx->hwaccel) {
|
||||
@ -1756,20 +1756,20 @@ static int slice_end(AVCodecContext *avctx, AVFrame *pict, int *got_output)
|
||||
ff_mpv_frame_end(s);
|
||||
|
||||
if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
|
||||
int ret = av_frame_ref(pict, s->current_picture_ptr->f);
|
||||
int ret = av_frame_ref(pict, s->cur_pic_ptr->f);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
ff_print_debug_info(s, s->current_picture_ptr, pict);
|
||||
ff_mpv_export_qp_table(s, pict, s->current_picture_ptr, FF_MPV_QSCALE_TYPE_MPEG2);
|
||||
ff_print_debug_info(s, s->cur_pic_ptr, pict);
|
||||
ff_mpv_export_qp_table(s, pict, s->cur_pic_ptr, FF_MPV_QSCALE_TYPE_MPEG2);
|
||||
*got_output = 1;
|
||||
} else {
|
||||
/* latency of 1 frame for I- and P-frames */
|
||||
if (s->last_picture_ptr && !s->last_picture_ptr->dummy) {
|
||||
int ret = av_frame_ref(pict, s->last_picture_ptr->f);
|
||||
if (s->last_pic_ptr && !s->last_pic_ptr->dummy) {
|
||||
int ret = av_frame_ref(pict, s->last_pic_ptr->f);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
ff_print_debug_info(s, s->last_picture_ptr, pict);
|
||||
ff_mpv_export_qp_table(s, pict, s->last_picture_ptr, FF_MPV_QSCALE_TYPE_MPEG2);
|
||||
ff_print_debug_info(s, s->last_pic_ptr, pict);
|
||||
ff_mpv_export_qp_table(s, pict, s->last_pic_ptr, FF_MPV_QSCALE_TYPE_MPEG2);
|
||||
*got_output = 1;
|
||||
}
|
||||
}
|
||||
@ -2405,7 +2405,7 @@ static int decode_chunks(AVCodecContext *avctx, AVFrame *picture,
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
if (!s2->last_picture_ptr) {
|
||||
if (!s2->last_pic_ptr) {
|
||||
/* Skip B-frames if we do not have reference frames and
|
||||
* GOP is not closed. */
|
||||
if (s2->pict_type == AV_PICTURE_TYPE_B) {
|
||||
@ -2419,7 +2419,7 @@ static int decode_chunks(AVCodecContext *avctx, AVFrame *picture,
|
||||
}
|
||||
if (s2->pict_type == AV_PICTURE_TYPE_I || (s2->avctx->flags2 & AV_CODEC_FLAG2_SHOW_ALL))
|
||||
s->sync = 1;
|
||||
if (!s2->next_picture_ptr) {
|
||||
if (!s2->next_pic_ptr) {
|
||||
/* Skip P-frames if we do not have a reference frame or
|
||||
* we have an invalid header. */
|
||||
if (s2->pict_type == AV_PICTURE_TYPE_P && !s->sync) {
|
||||
@ -2460,7 +2460,7 @@ static int decode_chunks(AVCodecContext *avctx, AVFrame *picture,
|
||||
if ((ret = mpeg_field_start(s2, buf, buf_size)) < 0)
|
||||
return ret;
|
||||
}
|
||||
if (!s2->current_picture_ptr) {
|
||||
if (!s2->cur_pic_ptr) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"current_picture not initialized\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
@ -2524,12 +2524,12 @@ static int mpeg_decode_frame(AVCodecContext *avctx, AVFrame *picture,
|
||||
|
||||
if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == SEQ_END_CODE)) {
|
||||
/* special case for last picture */
|
||||
if (s2->low_delay == 0 && s2->next_picture_ptr) {
|
||||
int ret = av_frame_ref(picture, s2->next_picture_ptr->f);
|
||||
if (s2->low_delay == 0 && s2->next_pic_ptr) {
|
||||
int ret = av_frame_ref(picture, s2->next_pic_ptr->f);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
s2->next_picture_ptr = NULL;
|
||||
s2->next_pic_ptr = NULL;
|
||||
|
||||
*got_output = 1;
|
||||
}
|
||||
@ -2552,14 +2552,14 @@ static int mpeg_decode_frame(AVCodecContext *avctx, AVFrame *picture,
|
||||
}
|
||||
s->extradata_decoded = 1;
|
||||
if (ret < 0 && (avctx->err_recognition & AV_EF_EXPLODE)) {
|
||||
s2->current_picture_ptr = NULL;
|
||||
s2->cur_pic_ptr = NULL;
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
ret = decode_chunks(avctx, picture, got_output, buf, buf_size);
|
||||
if (ret<0 || *got_output) {
|
||||
s2->current_picture_ptr = NULL;
|
||||
s2->cur_pic_ptr = NULL;
|
||||
|
||||
if (s->timecode_frame_start != -1 && *got_output) {
|
||||
char tcbuf[AV_TIMECODE_STR_SIZE];
|
||||
|
@ -290,7 +290,7 @@ static void mpeg1_encode_sequence_header(MpegEncContext *s)
|
||||
AVRational aspect_ratio = s->avctx->sample_aspect_ratio;
|
||||
int aspect_ratio_info;
|
||||
|
||||
if (!(s->current_picture.f->flags & AV_FRAME_FLAG_KEY))
|
||||
if (!(s->cur_pic.f->flags & AV_FRAME_FLAG_KEY))
|
||||
return;
|
||||
|
||||
if (aspect_ratio.num == 0 || aspect_ratio.den == 0)
|
||||
@ -382,7 +382,7 @@ static void mpeg1_encode_sequence_header(MpegEncContext *s)
|
||||
put_bits(&s->pb, 2, mpeg12->frame_rate_ext.num-1); // frame_rate_ext_n
|
||||
put_bits(&s->pb, 5, mpeg12->frame_rate_ext.den-1); // frame_rate_ext_d
|
||||
|
||||
side_data = av_frame_get_side_data(s->current_picture_ptr->f, AV_FRAME_DATA_PANSCAN);
|
||||
side_data = av_frame_get_side_data(s->cur_pic_ptr->f, AV_FRAME_DATA_PANSCAN);
|
||||
if (side_data) {
|
||||
const AVPanScan *pan_scan = (AVPanScan *)side_data->data;
|
||||
if (pan_scan->width && pan_scan->height) {
|
||||
@ -419,10 +419,10 @@ static void mpeg1_encode_sequence_header(MpegEncContext *s)
|
||||
/* time code: we must convert from the real frame rate to a
|
||||
* fake MPEG frame rate in case of low frame rate */
|
||||
fps = (framerate.num + framerate.den / 2) / framerate.den;
|
||||
time_code = s->current_picture_ptr->coded_picture_number +
|
||||
time_code = s->cur_pic_ptr->coded_picture_number +
|
||||
mpeg12->timecode_frame_start;
|
||||
|
||||
mpeg12->gop_picture_number = s->current_picture_ptr->coded_picture_number;
|
||||
mpeg12->gop_picture_number = s->cur_pic_ptr->coded_picture_number;
|
||||
|
||||
av_assert0(mpeg12->drop_frame_timecode == !!(mpeg12->tc.flags & AV_TIMECODE_FLAG_DROPFRAME));
|
||||
if (mpeg12->drop_frame_timecode)
|
||||
@ -530,7 +530,7 @@ void ff_mpeg1_encode_picture_header(MpegEncContext *s)
|
||||
if (s->progressive_sequence)
|
||||
put_bits(&s->pb, 1, 0); /* no repeat */
|
||||
else
|
||||
put_bits(&s->pb, 1, !!(s->current_picture_ptr->f->flags & AV_FRAME_FLAG_TOP_FIELD_FIRST));
|
||||
put_bits(&s->pb, 1, !!(s->cur_pic_ptr->f->flags & AV_FRAME_FLAG_TOP_FIELD_FIRST));
|
||||
/* XXX: optimize the generation of this flag with entropy measures */
|
||||
s->frame_pred_frame_dct = s->progressive_sequence;
|
||||
|
||||
@ -554,7 +554,7 @@ void ff_mpeg1_encode_picture_header(MpegEncContext *s)
|
||||
for (i = 0; i < sizeof(svcd_scan_offset_placeholder); i++)
|
||||
put_bits(&s->pb, 8, svcd_scan_offset_placeholder[i]);
|
||||
}
|
||||
side_data = av_frame_get_side_data(s->current_picture_ptr->f,
|
||||
side_data = av_frame_get_side_data(s->cur_pic_ptr->f,
|
||||
AV_FRAME_DATA_STEREO3D);
|
||||
if (side_data) {
|
||||
AVStereo3D *stereo = (AVStereo3D *)side_data->data;
|
||||
@ -594,7 +594,7 @@ void ff_mpeg1_encode_picture_header(MpegEncContext *s)
|
||||
}
|
||||
|
||||
if (CONFIG_MPEG2VIDEO_ENCODER && mpeg12->a53_cc) {
|
||||
side_data = av_frame_get_side_data(s->current_picture_ptr->f,
|
||||
side_data = av_frame_get_side_data(s->cur_pic_ptr->f,
|
||||
AV_FRAME_DATA_A53_CC);
|
||||
if (side_data) {
|
||||
if (side_data->size <= A53_MAX_CC_COUNT * 3 && side_data->size % 3 == 0) {
|
||||
|
@ -98,7 +98,7 @@ static inline void ff_mpeg4_set_one_direct_mv(MpegEncContext *s, int mx,
|
||||
uint16_t time_pb = s->pb_time;
|
||||
int p_mx, p_my;
|
||||
|
||||
p_mx = s->next_picture.motion_val[0][xy][0];
|
||||
p_mx = s->next_pic.motion_val[0][xy][0];
|
||||
if ((unsigned)(p_mx + tab_bias) < tab_size) {
|
||||
s->mv[0][i][0] = s->direct_scale_mv[0][p_mx + tab_bias] + mx;
|
||||
s->mv[1][i][0] = mx ? s->mv[0][i][0] - p_mx
|
||||
@ -108,7 +108,7 @@ static inline void ff_mpeg4_set_one_direct_mv(MpegEncContext *s, int mx,
|
||||
s->mv[1][i][0] = mx ? s->mv[0][i][0] - p_mx
|
||||
: p_mx * (time_pb - time_pp) / time_pp;
|
||||
}
|
||||
p_my = s->next_picture.motion_val[0][xy][1];
|
||||
p_my = s->next_pic.motion_val[0][xy][1];
|
||||
if ((unsigned)(p_my + tab_bias) < tab_size) {
|
||||
s->mv[0][i][1] = s->direct_scale_mv[0][p_my + tab_bias] + my;
|
||||
s->mv[1][i][1] = my ? s->mv[0][i][1] - p_my
|
||||
@ -129,7 +129,7 @@ static inline void ff_mpeg4_set_one_direct_mv(MpegEncContext *s, int mx,
|
||||
int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my)
|
||||
{
|
||||
const int mb_index = s->mb_x + s->mb_y * s->mb_stride;
|
||||
const int colocated_mb_type = s->next_picture.mb_type[mb_index];
|
||||
const int colocated_mb_type = s->next_pic.mb_type[mb_index];
|
||||
uint16_t time_pp;
|
||||
uint16_t time_pb;
|
||||
int i;
|
||||
@ -145,7 +145,7 @@ int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my)
|
||||
} else if (IS_INTERLACED(colocated_mb_type)) {
|
||||
s->mv_type = MV_TYPE_FIELD;
|
||||
for (i = 0; i < 2; i++) {
|
||||
int field_select = s->next_picture.ref_index[0][4 * mb_index + 2 * i];
|
||||
int field_select = s->next_pic.ref_index[0][4 * mb_index + 2 * i];
|
||||
s->field_select[0][i] = field_select;
|
||||
s->field_select[1][i] = i;
|
||||
if (s->top_field_first) {
|
||||
|
@ -316,7 +316,7 @@ void ff_mpeg4_pred_ac(MpegEncContext *s, int16_t *block, int n, int dir)
|
||||
{
|
||||
int i;
|
||||
int16_t *ac_val, *ac_val1;
|
||||
int8_t *const qscale_table = s->current_picture.qscale_table;
|
||||
int8_t *const qscale_table = s->cur_pic.qscale_table;
|
||||
|
||||
/* find prediction */
|
||||
ac_val = &s->ac_val[0][0][0] + s->block_index[n] * 16;
|
||||
@ -970,13 +970,13 @@ static int mpeg4_decode_partition_a(Mpeg4DecContext *ctx)
|
||||
} while (cbpc == 8);
|
||||
|
||||
s->cbp_table[xy] = cbpc & 3;
|
||||
s->current_picture.mb_type[xy] = MB_TYPE_INTRA;
|
||||
s->cur_pic.mb_type[xy] = MB_TYPE_INTRA;
|
||||
s->mb_intra = 1;
|
||||
|
||||
if (cbpc & 4)
|
||||
ff_set_qscale(s, s->qscale + quant_tab[get_bits(&s->gb, 2)]);
|
||||
|
||||
s->current_picture.qscale_table[xy] = s->qscale;
|
||||
s->cur_pic.qscale_table[xy] = s->qscale;
|
||||
|
||||
s->mbintra_table[xy] = 1;
|
||||
for (i = 0; i < 6; i++) {
|
||||
@ -994,7 +994,7 @@ static int mpeg4_decode_partition_a(Mpeg4DecContext *ctx)
|
||||
s->pred_dir_table[xy] = dir;
|
||||
} else { /* P/S_TYPE */
|
||||
int mx, my, pred_x, pred_y, bits;
|
||||
int16_t *const mot_val = s->current_picture.motion_val[0][s->block_index[0]];
|
||||
int16_t *const mot_val = s->cur_pic.motion_val[0][s->block_index[0]];
|
||||
const int stride = s->b8_stride * 2;
|
||||
|
||||
try_again:
|
||||
@ -1007,14 +1007,14 @@ try_again:
|
||||
/* skip mb */
|
||||
if (s->pict_type == AV_PICTURE_TYPE_S &&
|
||||
ctx->vol_sprite_usage == GMC_SPRITE) {
|
||||
s->current_picture.mb_type[xy] = MB_TYPE_SKIP |
|
||||
s->cur_pic.mb_type[xy] = MB_TYPE_SKIP |
|
||||
MB_TYPE_16x16 |
|
||||
MB_TYPE_GMC |
|
||||
MB_TYPE_L0;
|
||||
mx = get_amv(ctx, 0);
|
||||
my = get_amv(ctx, 1);
|
||||
} else {
|
||||
s->current_picture.mb_type[xy] = MB_TYPE_SKIP |
|
||||
s->cur_pic.mb_type[xy] = MB_TYPE_SKIP |
|
||||
MB_TYPE_16x16 |
|
||||
MB_TYPE_L0;
|
||||
mx = my = 0;
|
||||
@ -1047,7 +1047,7 @@ try_again:
|
||||
s->mb_intra = ((cbpc & 4) != 0);
|
||||
|
||||
if (s->mb_intra) {
|
||||
s->current_picture.mb_type[xy] = MB_TYPE_INTRA;
|
||||
s->cur_pic.mb_type[xy] = MB_TYPE_INTRA;
|
||||
s->mbintra_table[xy] = 1;
|
||||
mot_val[0] =
|
||||
mot_val[2] =
|
||||
@ -1080,12 +1080,12 @@ try_again:
|
||||
my = ff_h263_decode_motion(s, pred_y, s->f_code);
|
||||
if (my >= 0xffff)
|
||||
return AVERROR_INVALIDDATA;
|
||||
s->current_picture.mb_type[xy] = MB_TYPE_16x16 |
|
||||
s->cur_pic.mb_type[xy] = MB_TYPE_16x16 |
|
||||
MB_TYPE_L0;
|
||||
} else {
|
||||
mx = get_amv(ctx, 0);
|
||||
my = get_amv(ctx, 1);
|
||||
s->current_picture.mb_type[xy] = MB_TYPE_16x16 |
|
||||
s->cur_pic.mb_type[xy] = MB_TYPE_16x16 |
|
||||
MB_TYPE_GMC |
|
||||
MB_TYPE_L0;
|
||||
}
|
||||
@ -1100,7 +1100,7 @@ try_again:
|
||||
mot_val[3 + stride] = my;
|
||||
} else {
|
||||
int i;
|
||||
s->current_picture.mb_type[xy] = MB_TYPE_8x8 |
|
||||
s->cur_pic.mb_type[xy] = MB_TYPE_8x8 |
|
||||
MB_TYPE_L0;
|
||||
for (i = 0; i < 4; i++) {
|
||||
int16_t *mot_val = ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
@ -1156,9 +1156,9 @@ static int mpeg4_decode_partition_b(MpegEncContext *s, int mb_count)
|
||||
}
|
||||
|
||||
s->cbp_table[xy] |= cbpy << 2;
|
||||
s->current_picture.mb_type[xy] |= ac_pred * MB_TYPE_ACPRED;
|
||||
s->cur_pic.mb_type[xy] |= ac_pred * MB_TYPE_ACPRED;
|
||||
} else { /* P || S_TYPE */
|
||||
if (IS_INTRA(s->current_picture.mb_type[xy])) {
|
||||
if (IS_INTRA(s->cur_pic.mb_type[xy])) {
|
||||
int i;
|
||||
int dir = 0;
|
||||
int ac_pred = get_bits1(&s->gb);
|
||||
@ -1172,7 +1172,7 @@ static int mpeg4_decode_partition_b(MpegEncContext *s, int mb_count)
|
||||
|
||||
if (s->cbp_table[xy] & 8)
|
||||
ff_set_qscale(s, s->qscale + quant_tab[get_bits(&s->gb, 2)]);
|
||||
s->current_picture.qscale_table[xy] = s->qscale;
|
||||
s->cur_pic.qscale_table[xy] = s->qscale;
|
||||
|
||||
for (i = 0; i < 6; i++) {
|
||||
int dc_pred_dir;
|
||||
@ -1188,10 +1188,10 @@ static int mpeg4_decode_partition_b(MpegEncContext *s, int mb_count)
|
||||
}
|
||||
s->cbp_table[xy] &= 3; // remove dquant
|
||||
s->cbp_table[xy] |= cbpy << 2;
|
||||
s->current_picture.mb_type[xy] |= ac_pred * MB_TYPE_ACPRED;
|
||||
s->cur_pic.mb_type[xy] |= ac_pred * MB_TYPE_ACPRED;
|
||||
s->pred_dir_table[xy] = dir;
|
||||
} else if (IS_SKIP(s->current_picture.mb_type[xy])) {
|
||||
s->current_picture.qscale_table[xy] = s->qscale;
|
||||
} else if (IS_SKIP(s->cur_pic.mb_type[xy])) {
|
||||
s->cur_pic.qscale_table[xy] = s->qscale;
|
||||
s->cbp_table[xy] = 0;
|
||||
} else {
|
||||
int cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc, CBPY_VLC_BITS, 1);
|
||||
@ -1204,7 +1204,7 @@ static int mpeg4_decode_partition_b(MpegEncContext *s, int mb_count)
|
||||
|
||||
if (s->cbp_table[xy] & 8)
|
||||
ff_set_qscale(s, s->qscale + quant_tab[get_bits(&s->gb, 2)]);
|
||||
s->current_picture.qscale_table[xy] = s->qscale;
|
||||
s->cur_pic.qscale_table[xy] = s->qscale;
|
||||
|
||||
s->cbp_table[xy] &= 3; // remove dquant
|
||||
s->cbp_table[xy] |= (cbpy ^ 0xf) << 2;
|
||||
@ -1567,20 +1567,20 @@ static int mpeg4_decode_partitioned_mb(MpegEncContext *s, int16_t block[6][64])
|
||||
|
||||
av_assert2(s == (void*)ctx);
|
||||
|
||||
mb_type = s->current_picture.mb_type[xy];
|
||||
mb_type = s->cur_pic.mb_type[xy];
|
||||
cbp = s->cbp_table[xy];
|
||||
|
||||
use_intra_dc_vlc = s->qscale < ctx->intra_dc_threshold;
|
||||
|
||||
if (s->current_picture.qscale_table[xy] != s->qscale)
|
||||
ff_set_qscale(s, s->current_picture.qscale_table[xy]);
|
||||
if (s->cur_pic.qscale_table[xy] != s->qscale)
|
||||
ff_set_qscale(s, s->cur_pic.qscale_table[xy]);
|
||||
|
||||
if (s->pict_type == AV_PICTURE_TYPE_P ||
|
||||
s->pict_type == AV_PICTURE_TYPE_S) {
|
||||
int i;
|
||||
for (i = 0; i < 4; i++) {
|
||||
s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
|
||||
s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
|
||||
s->mv[0][i][0] = s->cur_pic.motion_val[0][s->block_index[i]][0];
|
||||
s->mv[0][i][1] = s->cur_pic.motion_val[0][s->block_index[i]][1];
|
||||
}
|
||||
s->mb_intra = IS_INTRA(mb_type);
|
||||
|
||||
@ -1594,14 +1594,14 @@ static int mpeg4_decode_partitioned_mb(MpegEncContext *s, int16_t block[6][64])
|
||||
&& ctx->vol_sprite_usage == GMC_SPRITE) {
|
||||
s->mcsel = 1;
|
||||
s->mb_skipped = 0;
|
||||
s->current_picture.mbskip_table[xy] = 0;
|
||||
s->cur_pic.mbskip_table[xy] = 0;
|
||||
} else {
|
||||
s->mcsel = 0;
|
||||
s->mb_skipped = 1;
|
||||
s->current_picture.mbskip_table[xy] = 1;
|
||||
s->cur_pic.mbskip_table[xy] = 1;
|
||||
}
|
||||
} else if (s->mb_intra) {
|
||||
s->ac_pred = IS_ACPRED(s->current_picture.mb_type[xy]);
|
||||
s->ac_pred = IS_ACPRED(s->cur_pic.mb_type[xy]);
|
||||
} else if (!s->mb_intra) {
|
||||
// s->mcsel = 0; // FIXME do we need to init that?
|
||||
|
||||
@ -1614,7 +1614,7 @@ static int mpeg4_decode_partitioned_mb(MpegEncContext *s, int16_t block[6][64])
|
||||
}
|
||||
} else { /* I-Frame */
|
||||
s->mb_intra = 1;
|
||||
s->ac_pred = IS_ACPRED(s->current_picture.mb_type[xy]);
|
||||
s->ac_pred = IS_ACPRED(s->cur_pic.mb_type[xy]);
|
||||
}
|
||||
|
||||
if (!IS_SKIP(mb_type)) {
|
||||
@ -1673,23 +1673,23 @@ static int mpeg4_decode_mb(MpegEncContext *s, int16_t block[6][64])
|
||||
s->mv_type = MV_TYPE_16X16;
|
||||
if (s->pict_type == AV_PICTURE_TYPE_S &&
|
||||
ctx->vol_sprite_usage == GMC_SPRITE) {
|
||||
s->current_picture.mb_type[xy] = MB_TYPE_SKIP |
|
||||
s->cur_pic.mb_type[xy] = MB_TYPE_SKIP |
|
||||
MB_TYPE_GMC |
|
||||
MB_TYPE_16x16 |
|
||||
MB_TYPE_L0;
|
||||
s->mcsel = 1;
|
||||
s->mv[0][0][0] = get_amv(ctx, 0);
|
||||
s->mv[0][0][1] = get_amv(ctx, 1);
|
||||
s->current_picture.mbskip_table[xy] = 0;
|
||||
s->cur_pic.mbskip_table[xy] = 0;
|
||||
s->mb_skipped = 0;
|
||||
} else {
|
||||
s->current_picture.mb_type[xy] = MB_TYPE_SKIP |
|
||||
s->cur_pic.mb_type[xy] = MB_TYPE_SKIP |
|
||||
MB_TYPE_16x16 |
|
||||
MB_TYPE_L0;
|
||||
s->mcsel = 0;
|
||||
s->mv[0][0][0] = 0;
|
||||
s->mv[0][0][1] = 0;
|
||||
s->current_picture.mbskip_table[xy] = 1;
|
||||
s->cur_pic.mbskip_table[xy] = 1;
|
||||
s->mb_skipped = 1;
|
||||
}
|
||||
goto end;
|
||||
@ -1730,7 +1730,7 @@ static int mpeg4_decode_mb(MpegEncContext *s, int16_t block[6][64])
|
||||
s->mv_dir = MV_DIR_FORWARD;
|
||||
if ((cbpc & 16) == 0) {
|
||||
if (s->mcsel) {
|
||||
s->current_picture.mb_type[xy] = MB_TYPE_GMC |
|
||||
s->cur_pic.mb_type[xy] = MB_TYPE_GMC |
|
||||
MB_TYPE_16x16 |
|
||||
MB_TYPE_L0;
|
||||
/* 16x16 global motion prediction */
|
||||
@ -1740,7 +1740,7 @@ static int mpeg4_decode_mb(MpegEncContext *s, int16_t block[6][64])
|
||||
s->mv[0][0][0] = mx;
|
||||
s->mv[0][0][1] = my;
|
||||
} else if ((!s->progressive_sequence) && get_bits1(&s->gb)) {
|
||||
s->current_picture.mb_type[xy] = MB_TYPE_16x8 |
|
||||
s->cur_pic.mb_type[xy] = MB_TYPE_16x8 |
|
||||
MB_TYPE_L0 |
|
||||
MB_TYPE_INTERLACED;
|
||||
/* 16x8 field motion prediction */
|
||||
@ -1764,7 +1764,7 @@ static int mpeg4_decode_mb(MpegEncContext *s, int16_t block[6][64])
|
||||
s->mv[0][i][1] = my;
|
||||
}
|
||||
} else {
|
||||
s->current_picture.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;
|
||||
s->cur_pic.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;
|
||||
/* 16x16 motion prediction */
|
||||
s->mv_type = MV_TYPE_16X16;
|
||||
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
@ -1781,7 +1781,7 @@ static int mpeg4_decode_mb(MpegEncContext *s, int16_t block[6][64])
|
||||
s->mv[0][0][1] = my;
|
||||
}
|
||||
} else {
|
||||
s->current_picture.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0;
|
||||
s->cur_pic.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0;
|
||||
s->mv_type = MV_TYPE_8X8;
|
||||
for (i = 0; i < 4; i++) {
|
||||
mot_val = ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
@ -1814,11 +1814,11 @@ static int mpeg4_decode_mb(MpegEncContext *s, int16_t block[6][64])
|
||||
s->last_mv[i][1][1] = 0;
|
||||
}
|
||||
|
||||
ff_thread_await_progress(&s->next_picture_ptr->tf, s->mb_y, 0);
|
||||
ff_thread_await_progress(&s->next_pic_ptr->tf, s->mb_y, 0);
|
||||
}
|
||||
|
||||
/* if we skipped it in the future P-frame than skip it now too */
|
||||
s->mb_skipped = s->next_picture.mbskip_table[s->mb_y * s->mb_stride + s->mb_x]; // Note, skiptab=0 if last was GMC
|
||||
s->mb_skipped = s->next_pic.mbskip_table[s->mb_y * s->mb_stride + s->mb_x]; // Note, skiptab=0 if last was GMC
|
||||
|
||||
if (s->mb_skipped) {
|
||||
/* skip mb */
|
||||
@ -1831,7 +1831,7 @@ static int mpeg4_decode_mb(MpegEncContext *s, int16_t block[6][64])
|
||||
s->mv[0][0][1] =
|
||||
s->mv[1][0][0] =
|
||||
s->mv[1][0][1] = 0;
|
||||
s->current_picture.mb_type[xy] = MB_TYPE_SKIP |
|
||||
s->cur_pic.mb_type[xy] = MB_TYPE_SKIP |
|
||||
MB_TYPE_16x16 |
|
||||
MB_TYPE_L0;
|
||||
goto end;
|
||||
@ -1951,7 +1951,7 @@ static int mpeg4_decode_mb(MpegEncContext *s, int16_t block[6][64])
|
||||
s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
|
||||
mb_type |= ff_mpeg4_set_direct_mv(s, mx, my);
|
||||
}
|
||||
s->current_picture.mb_type[xy] = mb_type;
|
||||
s->cur_pic.mb_type[xy] = mb_type;
|
||||
} else { /* I-Frame */
|
||||
int use_intra_dc_vlc;
|
||||
|
||||
@ -1970,9 +1970,9 @@ static int mpeg4_decode_mb(MpegEncContext *s, int16_t block[6][64])
|
||||
intra:
|
||||
s->ac_pred = get_bits1(&s->gb);
|
||||
if (s->ac_pred)
|
||||
s->current_picture.mb_type[xy] = MB_TYPE_INTRA | MB_TYPE_ACPRED;
|
||||
s->cur_pic.mb_type[xy] = MB_TYPE_INTRA | MB_TYPE_ACPRED;
|
||||
else
|
||||
s->current_picture.mb_type[xy] = MB_TYPE_INTRA;
|
||||
s->cur_pic.mb_type[xy] = MB_TYPE_INTRA;
|
||||
|
||||
cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc, CBPY_VLC_BITS, 1);
|
||||
if (cbpy < 0) {
|
||||
@ -2019,11 +2019,11 @@ end:
|
||||
|
||||
if (s->pict_type == AV_PICTURE_TYPE_B) {
|
||||
const int delta = s->mb_x + 1 == s->mb_width ? 2 : 1;
|
||||
ff_thread_await_progress(&s->next_picture_ptr->tf,
|
||||
ff_thread_await_progress(&s->next_pic_ptr->tf,
|
||||
(s->mb_x + delta >= s->mb_width)
|
||||
? FFMIN(s->mb_y + 1, s->mb_height - 1)
|
||||
: s->mb_y, 0);
|
||||
if (s->next_picture.mbskip_table[xy + delta])
|
||||
if (s->next_pic.mbskip_table[xy + delta])
|
||||
return SLICE_OK;
|
||||
}
|
||||
|
||||
|
@ -142,7 +142,7 @@ static inline int decide_ac_pred(MpegEncContext *s, int16_t block[6][64],
|
||||
{
|
||||
int score = 0;
|
||||
int i, n;
|
||||
int8_t *const qscale_table = s->current_picture.qscale_table;
|
||||
int8_t *const qscale_table = s->cur_pic.qscale_table;
|
||||
|
||||
memcpy(zigzag_last_index, s->block_last_index, sizeof(int) * 6);
|
||||
|
||||
@ -222,7 +222,7 @@ static inline int decide_ac_pred(MpegEncContext *s, int16_t block[6][64],
|
||||
void ff_clean_mpeg4_qscales(MpegEncContext *s)
|
||||
{
|
||||
int i;
|
||||
int8_t *const qscale_table = s->current_picture.qscale_table;
|
||||
int8_t *const qscale_table = s->cur_pic.qscale_table;
|
||||
|
||||
ff_clean_h263_qscales(s);
|
||||
|
||||
@ -511,7 +511,7 @@ void ff_mpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64],
|
||||
av_assert2(mb_type >= 0);
|
||||
|
||||
/* nothing to do if this MB was skipped in the next P-frame */
|
||||
if (s->next_picture.mbskip_table[s->mb_y * s->mb_stride + s->mb_x]) { // FIXME avoid DCT & ...
|
||||
if (s->next_pic.mbskip_table[s->mb_y * s->mb_stride + s->mb_x]) { // FIXME avoid DCT & ...
|
||||
s->mv[0][0][0] =
|
||||
s->mv[0][0][1] =
|
||||
s->mv[1][0][0] =
|
||||
@ -644,7 +644,7 @@ void ff_mpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64],
|
||||
y = s->mb_y * 16;
|
||||
|
||||
offset = x + y * s->linesize;
|
||||
p_pic = s->new_picture->data[0] + offset;
|
||||
p_pic = s->new_pic->data[0] + offset;
|
||||
|
||||
s->mb_skipped = 1;
|
||||
for (i = 0; i < s->max_b_frames; i++) {
|
||||
@ -777,8 +777,8 @@ void ff_mpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64],
|
||||
ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
|
||||
ff_h263_encode_motion_vector(s,
|
||||
s->current_picture.motion_val[0][s->block_index[i]][0] - pred_x,
|
||||
s->current_picture.motion_val[0][s->block_index[i]][1] - pred_y,
|
||||
s->cur_pic.motion_val[0][s->block_index[i]][0] - pred_x,
|
||||
s->cur_pic.motion_val[0][s->block_index[i]][1] - pred_y,
|
||||
s->f_code);
|
||||
}
|
||||
}
|
||||
@ -886,7 +886,7 @@ static void mpeg4_encode_gop_header(MpegEncContext *s)
|
||||
put_bits(&s->pb, 16, 0);
|
||||
put_bits(&s->pb, 16, GOP_STARTCODE);
|
||||
|
||||
time = s->current_picture_ptr->f->pts;
|
||||
time = s->cur_pic_ptr->f->pts;
|
||||
if (s->reordered_input_picture[1])
|
||||
time = FFMIN(time, s->reordered_input_picture[1]->f->pts);
|
||||
time = time * s->avctx->time_base.num;
|
||||
@ -1098,7 +1098,7 @@ int ff_mpeg4_encode_picture_header(MpegEncContext *s)
|
||||
}
|
||||
put_bits(&s->pb, 3, 0); /* intra dc VLC threshold */
|
||||
if (!s->progressive_sequence) {
|
||||
put_bits(&s->pb, 1, !!(s->current_picture_ptr->f->flags & AV_FRAME_FLAG_TOP_FIELD_FIRST));
|
||||
put_bits(&s->pb, 1, !!(s->cur_pic_ptr->f->flags & AV_FRAME_FLAG_TOP_FIELD_FIRST));
|
||||
put_bits(&s->pb, 1, s->alternate_scan);
|
||||
}
|
||||
// FIXME sprite stuff
|
||||
|
@ -49,9 +49,9 @@ void ff_mpeg_er_frame_start(MpegEncContext *s)
|
||||
{
|
||||
ERContext *er = &s->er;
|
||||
|
||||
set_erpic(&er->cur_pic, s->current_picture_ptr);
|
||||
set_erpic(&er->next_pic, s->next_picture_ptr);
|
||||
set_erpic(&er->last_pic, s->last_picture_ptr);
|
||||
set_erpic(&er->cur_pic, s->cur_pic_ptr);
|
||||
set_erpic(&er->next_pic, s->next_pic_ptr);
|
||||
set_erpic(&er->last_pic, s->last_pic_ptr);
|
||||
|
||||
er->pp_time = s->pp_time;
|
||||
er->pb_time = s->pb_time;
|
||||
@ -84,13 +84,13 @@ static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
|
||||
if (!s->chroma_y_shift)
|
||||
s->bdsp.clear_blocks(s->block[6]);
|
||||
|
||||
s->dest[0] = s->current_picture.f->data[0] +
|
||||
s->dest[0] = s->cur_pic.f->data[0] +
|
||||
s->mb_y * 16 * s->linesize +
|
||||
s->mb_x * 16;
|
||||
s->dest[1] = s->current_picture.f->data[1] +
|
||||
s->dest[1] = s->cur_pic.f->data[1] +
|
||||
s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize +
|
||||
s->mb_x * (16 >> s->chroma_x_shift);
|
||||
s->dest[2] = s->current_picture.f->data[2] +
|
||||
s->dest[2] = s->cur_pic.f->data[2] +
|
||||
s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize +
|
||||
s->mb_x * (16 >> s->chroma_x_shift);
|
||||
|
||||
|
@ -678,9 +678,9 @@ int ff_mpv_init_context_frame(MpegEncContext *s)
|
||||
static void clear_context(MpegEncContext *s)
|
||||
{
|
||||
memset(&s->buffer_pools, 0, sizeof(s->buffer_pools));
|
||||
memset(&s->next_picture, 0, sizeof(s->next_picture));
|
||||
memset(&s->last_picture, 0, sizeof(s->last_picture));
|
||||
memset(&s->current_picture, 0, sizeof(s->current_picture));
|
||||
memset(&s->next_pic, 0, sizeof(s->next_pic));
|
||||
memset(&s->last_pic, 0, sizeof(s->last_pic));
|
||||
memset(&s->cur_pic, 0, sizeof(s->cur_pic));
|
||||
|
||||
memset(s->thread_context, 0, sizeof(s->thread_context));
|
||||
|
||||
@ -763,9 +763,9 @@ av_cold int ff_mpv_common_init(MpegEncContext *s)
|
||||
goto fail_nomem;
|
||||
}
|
||||
|
||||
if (!(s->next_picture.f = av_frame_alloc()) ||
|
||||
!(s->last_picture.f = av_frame_alloc()) ||
|
||||
!(s->current_picture.f = av_frame_alloc()))
|
||||
if (!(s->next_pic.f = av_frame_alloc()) ||
|
||||
!(s->last_pic.f = av_frame_alloc()) ||
|
||||
!(s->cur_pic.f = av_frame_alloc()))
|
||||
goto fail_nomem;
|
||||
|
||||
if ((ret = ff_mpv_init_context_frame(s)))
|
||||
@ -840,15 +840,15 @@ void ff_mpv_common_end(MpegEncContext *s)
|
||||
ff_mpv_picture_free(&s->picture[i]);
|
||||
}
|
||||
av_freep(&s->picture);
|
||||
ff_mpv_picture_free(&s->last_picture);
|
||||
ff_mpv_picture_free(&s->current_picture);
|
||||
ff_mpv_picture_free(&s->next_picture);
|
||||
ff_mpv_picture_free(&s->last_pic);
|
||||
ff_mpv_picture_free(&s->cur_pic);
|
||||
ff_mpv_picture_free(&s->next_pic);
|
||||
|
||||
s->context_initialized = 0;
|
||||
s->context_reinit = 0;
|
||||
s->last_picture_ptr =
|
||||
s->next_picture_ptr =
|
||||
s->current_picture_ptr = NULL;
|
||||
s->last_pic_ptr =
|
||||
s->next_pic_ptr =
|
||||
s->cur_pic_ptr = NULL;
|
||||
s->linesize = s->uvlinesize = 0;
|
||||
}
|
||||
|
||||
@ -881,8 +881,8 @@ void ff_clean_intra_table_entries(MpegEncContext *s)
|
||||
}
|
||||
|
||||
void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
|
||||
const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
|
||||
const int uvlinesize = s->current_picture.f->linesize[1];
|
||||
const int linesize = s->cur_pic.f->linesize[0]; //not s->linesize as this would be wrong for field pics
|
||||
const int uvlinesize = s->cur_pic.f->linesize[1];
|
||||
const int width_of_mb = (4 + (s->avctx->bits_per_raw_sample > 8)) - s->avctx->lowres;
|
||||
const int height_of_mb = 4 - s->avctx->lowres;
|
||||
|
||||
@ -894,9 +894,9 @@ void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
|
||||
s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
|
||||
//block_index is not used by mpeg2, so it is not affected by chroma_format
|
||||
|
||||
s->dest[0] = s->current_picture.f->data[0] + (int)((s->mb_x - 1U) << width_of_mb);
|
||||
s->dest[1] = s->current_picture.f->data[1] + (int)((s->mb_x - 1U) << (width_of_mb - s->chroma_x_shift));
|
||||
s->dest[2] = s->current_picture.f->data[2] + (int)((s->mb_x - 1U) << (width_of_mb - s->chroma_x_shift));
|
||||
s->dest[0] = s->cur_pic.f->data[0] + (int)((s->mb_x - 1U) << width_of_mb);
|
||||
s->dest[1] = s->cur_pic.f->data[1] + (int)((s->mb_x - 1U) << (width_of_mb - s->chroma_x_shift));
|
||||
s->dest[2] = s->cur_pic.f->data[2] + (int)((s->mb_x - 1U) << (width_of_mb - s->chroma_x_shift));
|
||||
|
||||
if (s->picture_structure == PICT_FRAME) {
|
||||
s->dest[0] += s->mb_y * linesize << height_of_mb;
|
||||
|
@ -156,29 +156,29 @@ typedef struct MpegEncContext {
|
||||
* copy of the previous picture structure.
|
||||
* note, linesize & data, might not match the previous picture (for field pictures)
|
||||
*/
|
||||
Picture last_picture;
|
||||
Picture last_pic;
|
||||
|
||||
/**
|
||||
* copy of the next picture structure.
|
||||
* note, linesize & data, might not match the next picture (for field pictures)
|
||||
*/
|
||||
Picture next_picture;
|
||||
Picture next_pic;
|
||||
|
||||
/**
|
||||
* Reference to the source picture for encoding.
|
||||
* note, linesize & data, might not match the source picture (for field pictures)
|
||||
*/
|
||||
AVFrame *new_picture;
|
||||
AVFrame *new_pic;
|
||||
|
||||
/**
|
||||
* copy of the current picture structure.
|
||||
* note, linesize & data, might not match the current picture (for field pictures)
|
||||
*/
|
||||
Picture current_picture; ///< buffer to store the decompressed current picture
|
||||
Picture cur_pic; ///< buffer to store the decompressed current picture
|
||||
|
||||
Picture *last_picture_ptr; ///< pointer to the previous picture.
|
||||
Picture *next_picture_ptr; ///< pointer to the next picture (for bidir pred)
|
||||
Picture *current_picture_ptr; ///< pointer to the current picture
|
||||
Picture *last_pic_ptr; ///< pointer to the previous picture.
|
||||
Picture *next_pic_ptr; ///< pointer to the next picture (for bidir pred)
|
||||
Picture *cur_pic_ptr; ///< pointer to the current picture
|
||||
int skipped_last_frame;
|
||||
int last_dc[3]; ///< last DC values for MPEG-1
|
||||
int16_t *dc_val_base;
|
||||
|
@ -122,9 +122,9 @@ do {\
|
||||
}\
|
||||
} while (0)
|
||||
|
||||
UPDATE_PICTURE(current_picture);
|
||||
UPDATE_PICTURE(last_picture);
|
||||
UPDATE_PICTURE(next_picture);
|
||||
UPDATE_PICTURE(cur_pic);
|
||||
UPDATE_PICTURE(last_pic);
|
||||
UPDATE_PICTURE(next_pic);
|
||||
|
||||
s->linesize = s1->linesize;
|
||||
s->uvlinesize = s1->uvlinesize;
|
||||
@ -134,9 +134,9 @@ do {\
|
||||
pic < old_ctx->picture + MAX_PICTURE_COUNT) ? \
|
||||
&new_ctx->picture[pic - old_ctx->picture] : NULL)
|
||||
|
||||
s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
|
||||
s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
|
||||
s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
|
||||
s->last_pic_ptr = REBASE_PICTURE(s1->last_pic_ptr, s, s1);
|
||||
s->cur_pic_ptr = REBASE_PICTURE(s1->cur_pic_ptr, s, s1);
|
||||
s->next_pic_ptr = REBASE_PICTURE(s1->next_pic_ptr, s, s1);
|
||||
|
||||
// Error/bug resilience
|
||||
s->workaround_bugs = s1->workaround_bugs;
|
||||
@ -193,9 +193,9 @@ int ff_mpv_common_frame_size_change(MpegEncContext *s)
|
||||
|
||||
ff_mpv_free_context_frame(s);
|
||||
|
||||
s->last_picture_ptr =
|
||||
s->next_picture_ptr =
|
||||
s->current_picture_ptr = NULL;
|
||||
s->last_pic_ptr =
|
||||
s->next_pic_ptr =
|
||||
s->cur_pic_ptr = NULL;
|
||||
|
||||
if ((s->width || s->height) &&
|
||||
(err = av_image_check_size(s->width, s->height, 0, s->avctx)) < 0)
|
||||
@ -326,9 +326,9 @@ int ff_mpv_alloc_dummy_frames(MpegEncContext *s)
|
||||
AVCodecContext *avctx = s->avctx;
|
||||
int ret;
|
||||
|
||||
if ((!s->last_picture_ptr || !s->last_picture_ptr->f->buf[0]) &&
|
||||
if ((!s->last_pic_ptr || !s->last_pic_ptr->f->buf[0]) &&
|
||||
(s->pict_type != AV_PICTURE_TYPE_I)) {
|
||||
if (s->pict_type == AV_PICTURE_TYPE_B && s->next_picture_ptr && s->next_picture_ptr->f->buf[0])
|
||||
if (s->pict_type == AV_PICTURE_TYPE_B && s->next_pic_ptr && s->next_pic_ptr->f->buf[0])
|
||||
av_log(avctx, AV_LOG_DEBUG,
|
||||
"allocating dummy last picture for B frame\n");
|
||||
else if (s->codec_id != AV_CODEC_ID_H261 /* H.261 has no keyframes */ &&
|
||||
@ -337,25 +337,25 @@ int ff_mpv_alloc_dummy_frames(MpegEncContext *s)
|
||||
"warning: first frame is no keyframe\n");
|
||||
|
||||
/* Allocate a dummy frame */
|
||||
ret = alloc_dummy_frame(s, &s->last_picture_ptr, &s->last_picture);
|
||||
ret = alloc_dummy_frame(s, &s->last_pic_ptr, &s->last_pic);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (!avctx->hwaccel) {
|
||||
int luma_val = s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263 ? 16 : 0x80;
|
||||
color_frame(s->last_picture_ptr->f, luma_val);
|
||||
color_frame(s->last_pic_ptr->f, luma_val);
|
||||
}
|
||||
}
|
||||
if ((!s->next_picture_ptr || !s->next_picture_ptr->f->buf[0]) &&
|
||||
if ((!s->next_pic_ptr || !s->next_pic_ptr->f->buf[0]) &&
|
||||
s->pict_type == AV_PICTURE_TYPE_B) {
|
||||
/* Allocate a dummy frame */
|
||||
ret = alloc_dummy_frame(s, &s->next_picture_ptr, &s->next_picture);
|
||||
ret = alloc_dummy_frame(s, &s->next_pic_ptr, &s->next_pic);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
av_assert0(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
|
||||
s->last_picture_ptr->f->buf[0]));
|
||||
av_assert0(s->pict_type == AV_PICTURE_TYPE_I || (s->last_pic_ptr &&
|
||||
s->last_pic_ptr->f->buf[0]));
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -376,67 +376,65 @@ int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
|
||||
}
|
||||
|
||||
/* mark & release old frames */
|
||||
if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
|
||||
s->last_picture_ptr != s->next_picture_ptr &&
|
||||
s->last_picture_ptr->f->buf[0]) {
|
||||
ff_mpeg_unref_picture(s->last_picture_ptr);
|
||||
if (s->pict_type != AV_PICTURE_TYPE_B && s->last_pic_ptr &&
|
||||
s->last_pic_ptr != s->next_pic_ptr &&
|
||||
s->last_pic_ptr->f->buf[0]) {
|
||||
ff_mpeg_unref_picture(s->last_pic_ptr);
|
||||
}
|
||||
|
||||
/* release non reference/forgotten frames */
|
||||
for (int i = 0; i < MAX_PICTURE_COUNT; i++) {
|
||||
if (!s->picture[i].reference ||
|
||||
(&s->picture[i] != s->last_picture_ptr &&
|
||||
&s->picture[i] != s->next_picture_ptr)) {
|
||||
(&s->picture[i] != s->last_pic_ptr &&
|
||||
&s->picture[i] != s->next_pic_ptr)) {
|
||||
ff_mpeg_unref_picture(&s->picture[i]);
|
||||
}
|
||||
}
|
||||
|
||||
ff_mpeg_unref_picture(&s->current_picture);
|
||||
ff_mpeg_unref_picture(&s->last_picture);
|
||||
ff_mpeg_unref_picture(&s->next_picture);
|
||||
ff_mpeg_unref_picture(&s->cur_pic);
|
||||
ff_mpeg_unref_picture(&s->last_pic);
|
||||
ff_mpeg_unref_picture(&s->next_pic);
|
||||
|
||||
ret = alloc_picture(s, &s->current_picture_ptr,
|
||||
ret = alloc_picture(s, &s->cur_pic_ptr,
|
||||
s->pict_type != AV_PICTURE_TYPE_B && !s->droppable);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
s->current_picture_ptr->f->flags |= AV_FRAME_FLAG_TOP_FIELD_FIRST * !!s->top_field_first;
|
||||
s->current_picture_ptr->f->flags |= AV_FRAME_FLAG_INTERLACED * (!s->progressive_frame &&
|
||||
!s->progressive_sequence);
|
||||
s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
|
||||
s->cur_pic_ptr->f->flags |= AV_FRAME_FLAG_TOP_FIELD_FIRST * !!s->top_field_first;
|
||||
s->cur_pic_ptr->f->flags |= AV_FRAME_FLAG_INTERLACED *
|
||||
(!s->progressive_frame && !s->progressive_sequence);
|
||||
s->cur_pic_ptr->field_picture = s->picture_structure != PICT_FRAME;
|
||||
|
||||
s->current_picture_ptr->f->pict_type = s->pict_type;
|
||||
s->cur_pic_ptr->f->pict_type = s->pict_type;
|
||||
if (s->pict_type == AV_PICTURE_TYPE_I)
|
||||
s->current_picture_ptr->f->flags |= AV_FRAME_FLAG_KEY;
|
||||
s->cur_pic_ptr->f->flags |= AV_FRAME_FLAG_KEY;
|
||||
else
|
||||
s->current_picture_ptr->f->flags &= ~AV_FRAME_FLAG_KEY;
|
||||
s->cur_pic_ptr->f->flags &= ~AV_FRAME_FLAG_KEY;
|
||||
|
||||
if ((ret = ff_mpeg_ref_picture(&s->current_picture,
|
||||
s->current_picture_ptr)) < 0)
|
||||
if ((ret = ff_mpeg_ref_picture(&s->cur_pic, s->cur_pic_ptr)) < 0)
|
||||
return ret;
|
||||
|
||||
if (s->pict_type != AV_PICTURE_TYPE_B) {
|
||||
s->last_picture_ptr = s->next_picture_ptr;
|
||||
s->last_pic_ptr = s->next_pic_ptr;
|
||||
if (!s->droppable)
|
||||
s->next_picture_ptr = s->current_picture_ptr;
|
||||
s->next_pic_ptr = s->cur_pic_ptr;
|
||||
}
|
||||
ff_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
|
||||
s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
|
||||
s->last_picture_ptr ? s->last_picture_ptr->f->data[0] : NULL,
|
||||
s->next_picture_ptr ? s->next_picture_ptr->f->data[0] : NULL,
|
||||
s->current_picture_ptr ? s->current_picture_ptr->f->data[0] : NULL,
|
||||
s->last_pic_ptr, s->next_pic_ptr, s->cur_pic_ptr,
|
||||
s->last_pic_ptr ? s->last_pic_ptr->f->data[0] : NULL,
|
||||
s->next_pic_ptr ? s->next_pic_ptr->f->data[0] : NULL,
|
||||
s->cur_pic_ptr ? s->cur_pic_ptr->f->data[0] : NULL,
|
||||
s->pict_type, s->droppable);
|
||||
|
||||
if (s->last_picture_ptr) {
|
||||
if (s->last_picture_ptr->f->buf[0] &&
|
||||
(ret = ff_mpeg_ref_picture(&s->last_picture,
|
||||
s->last_picture_ptr)) < 0)
|
||||
if (s->last_pic_ptr) {
|
||||
if (s->last_pic_ptr->f->buf[0] &&
|
||||
(ret = ff_mpeg_ref_picture(&s->last_pic,
|
||||
s->last_pic_ptr)) < 0)
|
||||
return ret;
|
||||
}
|
||||
if (s->next_picture_ptr) {
|
||||
if (s->next_picture_ptr->f->buf[0] &&
|
||||
(ret = ff_mpeg_ref_picture(&s->next_picture,
|
||||
s->next_picture_ptr)) < 0)
|
||||
if (s->next_pic_ptr) {
|
||||
if (s->next_pic_ptr->f->buf[0] &&
|
||||
(ret = ff_mpeg_ref_picture(&s->next_pic, s->next_pic_ptr)) < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -459,7 +457,7 @@ int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
|
||||
}
|
||||
|
||||
if (s->avctx->debug & FF_DEBUG_NOMC)
|
||||
color_frame(s->current_picture_ptr->f, 0x80);
|
||||
color_frame(s->cur_pic_ptr->f, 0x80);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -469,8 +467,8 @@ void ff_mpv_frame_end(MpegEncContext *s)
|
||||
{
|
||||
emms_c();
|
||||
|
||||
if (s->current_picture.reference)
|
||||
ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
|
||||
if (s->cur_pic.reference)
|
||||
ff_thread_report_progress(&s->cur_pic_ptr->tf, INT_MAX, 0);
|
||||
}
|
||||
|
||||
void ff_print_debug_info(const MpegEncContext *s, const Picture *p, AVFrame *pict)
|
||||
@ -512,8 +510,8 @@ int ff_mpv_export_qp_table(const MpegEncContext *s, AVFrame *f, const Picture *p
|
||||
|
||||
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
|
||||
{
|
||||
ff_draw_horiz_band(s->avctx, s->current_picture_ptr->f,
|
||||
s->last_picture_ptr ? s->last_picture_ptr->f : NULL,
|
||||
ff_draw_horiz_band(s->avctx, s->cur_pic_ptr->f,
|
||||
s->last_pic_ptr ? s->last_pic_ptr->f : NULL,
|
||||
y, h, s->picture_structure,
|
||||
s->first_field, s->low_delay);
|
||||
}
|
||||
@ -527,11 +525,11 @@ void ff_mpeg_flush(AVCodecContext *avctx)
|
||||
|
||||
for (int i = 0; i < MAX_PICTURE_COUNT; i++)
|
||||
ff_mpeg_unref_picture(&s->picture[i]);
|
||||
s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
|
||||
s->cur_pic_ptr = s->last_pic_ptr = s->next_pic_ptr = NULL;
|
||||
|
||||
ff_mpeg_unref_picture(&s->current_picture);
|
||||
ff_mpeg_unref_picture(&s->last_picture);
|
||||
ff_mpeg_unref_picture(&s->next_picture);
|
||||
ff_mpeg_unref_picture(&s->cur_pic);
|
||||
ff_mpeg_unref_picture(&s->last_pic);
|
||||
ff_mpeg_unref_picture(&s->next_pic);
|
||||
|
||||
s->mb_x = s->mb_y = 0;
|
||||
|
||||
@ -542,7 +540,7 @@ void ff_mpeg_flush(AVCodecContext *avctx)
|
||||
void ff_mpv_report_decode_progress(MpegEncContext *s)
|
||||
{
|
||||
if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
|
||||
ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
|
||||
ff_thread_report_progress(&s->cur_pic_ptr->tf, s->mb_y, 0);
|
||||
}
|
||||
|
||||
|
||||
@ -615,8 +613,8 @@ static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
|
||||
const int h_edge_pos = s->h_edge_pos >> lowres;
|
||||
const int v_edge_pos = s->v_edge_pos >> lowres;
|
||||
int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
|
||||
linesize = s->current_picture.f->linesize[0] << field_based;
|
||||
uvlinesize = s->current_picture.f->linesize[1] << field_based;
|
||||
linesize = s->cur_pic.f->linesize[0] << field_based;
|
||||
uvlinesize = s->cur_pic.f->linesize[1] << field_based;
|
||||
|
||||
// FIXME obviously not perfect but qpel will not work in lowres anyway
|
||||
if (s->quarter_sample) {
|
||||
@ -861,7 +859,7 @@ static inline void MPV_motion_lowres(MpegEncContext *s,
|
||||
} else {
|
||||
if (s->picture_structure != s->field_select[dir][0] + 1 &&
|
||||
s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
|
||||
ref_picture = s->current_picture_ptr->f->data;
|
||||
ref_picture = s->cur_pic_ptr->f->data;
|
||||
}
|
||||
mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
|
||||
0, 0, s->field_select[dir][0],
|
||||
@ -878,7 +876,7 @@ static inline void MPV_motion_lowres(MpegEncContext *s,
|
||||
s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
|
||||
ref2picture = ref_picture;
|
||||
} else {
|
||||
ref2picture = s->current_picture_ptr->f->data;
|
||||
ref2picture = s->cur_pic_ptr->f->data;
|
||||
}
|
||||
|
||||
mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
|
||||
@ -919,7 +917,7 @@ static inline void MPV_motion_lowres(MpegEncContext *s,
|
||||
// opposite parity is always in the same
|
||||
// frame if this is second field
|
||||
if (!s->first_field) {
|
||||
ref_picture = s->current_picture_ptr->f->data;
|
||||
ref_picture = s->cur_pic_ptr->f->data;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -231,11 +231,11 @@ void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
|
||||
}
|
||||
|
||||
/**
|
||||
* init s->current_picture.qscale_table from s->lambda_table
|
||||
* init s->cur_pic.qscale_table from s->lambda_table
|
||||
*/
|
||||
void ff_init_qscale_tab(MpegEncContext *s)
|
||||
{
|
||||
int8_t * const qscale_table = s->current_picture.qscale_table;
|
||||
int8_t * const qscale_table = s->cur_pic.qscale_table;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < s->mb_num; i++) {
|
||||
@ -821,7 +821,7 @@ av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
|
||||
!FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix16, 32) ||
|
||||
!FF_ALLOCZ_TYPED_ARRAY(s->input_picture, MAX_B_FRAMES + 1) ||
|
||||
!FF_ALLOCZ_TYPED_ARRAY(s->reordered_input_picture, MAX_B_FRAMES + 1) ||
|
||||
!(s->new_picture = av_frame_alloc()))
|
||||
!(s->new_pic = av_frame_alloc()))
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
/* Allocate MV tables; the MV and MB tables will be copied
|
||||
@ -996,7 +996,7 @@ av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
|
||||
for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
|
||||
av_frame_free(&s->tmp_frames[i]);
|
||||
|
||||
av_frame_free(&s->new_picture);
|
||||
av_frame_free(&s->new_pic);
|
||||
|
||||
av_freep(&avctx->stats_out);
|
||||
|
||||
@ -1340,7 +1340,6 @@ static int estimate_best_b_count(MpegEncContext *s)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
//emms_c();
|
||||
//s->next_picture_ptr->quality;
|
||||
p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
|
||||
//p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
|
||||
b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
|
||||
@ -1351,7 +1350,7 @@ static int estimate_best_b_count(MpegEncContext *s)
|
||||
|
||||
for (i = 0; i < s->max_b_frames + 2; i++) {
|
||||
const Picture *pre_input_ptr = i ? s->input_picture[i - 1] :
|
||||
s->next_picture_ptr;
|
||||
s->next_pic_ptr;
|
||||
|
||||
if (pre_input_ptr) {
|
||||
const uint8_t *data[4];
|
||||
@ -1479,8 +1478,8 @@ static int select_input_picture(MpegEncContext *s)
|
||||
if (!s->reordered_input_picture[0] && s->input_picture[0]) {
|
||||
if (s->frame_skip_threshold || s->frame_skip_factor) {
|
||||
if (s->picture_in_gop_number < s->gop_size &&
|
||||
s->next_picture_ptr &&
|
||||
skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
|
||||
s->next_pic_ptr &&
|
||||
skip_check(s, s->input_picture[0], s->next_pic_ptr)) {
|
||||
// FIXME check that the gop check above is +-1 correct
|
||||
ff_mpeg_unref_picture(s->input_picture[0]);
|
||||
|
||||
@ -1491,7 +1490,7 @@ static int select_input_picture(MpegEncContext *s)
|
||||
}
|
||||
|
||||
if (/*s->picture_in_gop_number >= s->gop_size ||*/
|
||||
!s->next_picture_ptr || s->intra_only) {
|
||||
!s->next_pic_ptr || s->intra_only) {
|
||||
s->reordered_input_picture[0] = s->input_picture[0];
|
||||
s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
|
||||
s->reordered_input_picture[0]->coded_picture_number =
|
||||
@ -1594,14 +1593,14 @@ static int select_input_picture(MpegEncContext *s)
|
||||
}
|
||||
}
|
||||
no_output_pic:
|
||||
av_frame_unref(s->new_picture);
|
||||
av_frame_unref(s->new_pic);
|
||||
|
||||
if (s->reordered_input_picture[0]) {
|
||||
s->reordered_input_picture[0]->reference =
|
||||
s->reordered_input_picture[0]->f->pict_type !=
|
||||
AV_PICTURE_TYPE_B ? 3 : 0;
|
||||
|
||||
if ((ret = av_frame_ref(s->new_picture,
|
||||
if ((ret = av_frame_ref(s->new_pic,
|
||||
s->reordered_input_picture[0]->f)))
|
||||
goto fail;
|
||||
|
||||
@ -1631,16 +1630,16 @@ no_output_pic:
|
||||
/* mark us unused / free shared pic */
|
||||
ff_mpeg_unref_picture(s->reordered_input_picture[0]);
|
||||
|
||||
s->current_picture_ptr = pic;
|
||||
s->cur_pic_ptr = pic;
|
||||
} else {
|
||||
// input is not a shared pix -> reuse buffer for current_pix
|
||||
s->current_picture_ptr = s->reordered_input_picture[0];
|
||||
s->cur_pic_ptr = s->reordered_input_picture[0];
|
||||
for (i = 0; i < 4; i++) {
|
||||
if (s->new_picture->data[i])
|
||||
s->new_picture->data[i] += INPLACE_OFFSET;
|
||||
if (s->new_pic->data[i])
|
||||
s->new_pic->data[i] += INPLACE_OFFSET;
|
||||
}
|
||||
}
|
||||
s->picture_number = s->current_picture_ptr->display_picture_number;
|
||||
s->picture_number = s->cur_pic_ptr->display_picture_number;
|
||||
|
||||
}
|
||||
return 0;
|
||||
@ -1652,24 +1651,24 @@ fail:
|
||||
static void frame_end(MpegEncContext *s)
|
||||
{
|
||||
if (s->unrestricted_mv &&
|
||||
s->current_picture.reference &&
|
||||
s->cur_pic.reference &&
|
||||
!s->intra_only) {
|
||||
int hshift = s->chroma_x_shift;
|
||||
int vshift = s->chroma_y_shift;
|
||||
s->mpvencdsp.draw_edges(s->current_picture.f->data[0],
|
||||
s->current_picture.f->linesize[0],
|
||||
s->mpvencdsp.draw_edges(s->cur_pic.f->data[0],
|
||||
s->cur_pic.f->linesize[0],
|
||||
s->h_edge_pos, s->v_edge_pos,
|
||||
EDGE_WIDTH, EDGE_WIDTH,
|
||||
EDGE_TOP | EDGE_BOTTOM);
|
||||
s->mpvencdsp.draw_edges(s->current_picture.f->data[1],
|
||||
s->current_picture.f->linesize[1],
|
||||
s->mpvencdsp.draw_edges(s->cur_pic.f->data[1],
|
||||
s->cur_pic.f->linesize[1],
|
||||
s->h_edge_pos >> hshift,
|
||||
s->v_edge_pos >> vshift,
|
||||
EDGE_WIDTH >> hshift,
|
||||
EDGE_WIDTH >> vshift,
|
||||
EDGE_TOP | EDGE_BOTTOM);
|
||||
s->mpvencdsp.draw_edges(s->current_picture.f->data[2],
|
||||
s->current_picture.f->linesize[2],
|
||||
s->mpvencdsp.draw_edges(s->cur_pic.f->data[2],
|
||||
s->cur_pic.f->linesize[2],
|
||||
s->h_edge_pos >> hshift,
|
||||
s->v_edge_pos >> vshift,
|
||||
EDGE_WIDTH >> hshift,
|
||||
@ -1680,7 +1679,7 @@ static void frame_end(MpegEncContext *s)
|
||||
emms_c();
|
||||
|
||||
s->last_pict_type = s->pict_type;
|
||||
s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f->quality;
|
||||
s->last_lambda_for [s->pict_type] = s->cur_pic_ptr->f->quality;
|
||||
if (s->pict_type!= AV_PICTURE_TYPE_B)
|
||||
s->last_non_b_pict_type = s->pict_type;
|
||||
}
|
||||
@ -1711,36 +1710,33 @@ static int frame_start(MpegEncContext *s)
|
||||
int ret;
|
||||
|
||||
/* mark & release old frames */
|
||||
if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
|
||||
s->last_picture_ptr != s->next_picture_ptr &&
|
||||
s->last_picture_ptr->f->buf[0]) {
|
||||
ff_mpeg_unref_picture(s->last_picture_ptr);
|
||||
if (s->pict_type != AV_PICTURE_TYPE_B && s->last_pic_ptr &&
|
||||
s->last_pic_ptr != s->next_pic_ptr &&
|
||||
s->last_pic_ptr->f->buf[0]) {
|
||||
ff_mpeg_unref_picture(s->last_pic_ptr);
|
||||
}
|
||||
|
||||
s->current_picture_ptr->f->pict_type = s->pict_type;
|
||||
s->cur_pic_ptr->f->pict_type = s->pict_type;
|
||||
|
||||
ff_mpeg_unref_picture(&s->current_picture);
|
||||
if ((ret = ff_mpeg_ref_picture(&s->current_picture,
|
||||
s->current_picture_ptr)) < 0)
|
||||
ff_mpeg_unref_picture(&s->cur_pic);
|
||||
if ((ret = ff_mpeg_ref_picture(&s->cur_pic, s->cur_pic_ptr)) < 0)
|
||||
return ret;
|
||||
|
||||
if (s->pict_type != AV_PICTURE_TYPE_B) {
|
||||
s->last_picture_ptr = s->next_picture_ptr;
|
||||
s->next_picture_ptr = s->current_picture_ptr;
|
||||
s->last_pic_ptr = s->next_pic_ptr;
|
||||
s->next_pic_ptr = s->cur_pic_ptr;
|
||||
}
|
||||
|
||||
if (s->last_picture_ptr) {
|
||||
ff_mpeg_unref_picture(&s->last_picture);
|
||||
if (s->last_picture_ptr->f->buf[0] &&
|
||||
(ret = ff_mpeg_ref_picture(&s->last_picture,
|
||||
s->last_picture_ptr)) < 0)
|
||||
if (s->last_pic_ptr) {
|
||||
ff_mpeg_unref_picture(&s->last_pic);
|
||||
if (s->last_pic_ptr->f->buf[0] &&
|
||||
(ret = ff_mpeg_ref_picture(&s->last_pic, s->last_pic_ptr)) < 0)
|
||||
return ret;
|
||||
}
|
||||
if (s->next_picture_ptr) {
|
||||
ff_mpeg_unref_picture(&s->next_picture);
|
||||
if (s->next_picture_ptr->f->buf[0] &&
|
||||
(ret = ff_mpeg_ref_picture(&s->next_picture,
|
||||
s->next_picture_ptr)) < 0)
|
||||
if (s->next_pic_ptr) {
|
||||
ff_mpeg_unref_picture(&s->next_pic);
|
||||
if (s->next_pic_ptr->f->buf[0] &&
|
||||
(ret = ff_mpeg_ref_picture(&s->next_pic, s->next_pic_ptr)) < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1771,12 +1767,12 @@ int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
|
||||
}
|
||||
|
||||
/* output? */
|
||||
if (s->new_picture->data[0]) {
|
||||
if (s->new_pic->data[0]) {
|
||||
int growing_buffer = context_count == 1 && !s->data_partitioning;
|
||||
size_t pkt_size = 10000 + s->mb_width * s->mb_height *
|
||||
(growing_buffer ? 64 : (MAX_MB_BYTES + 100));
|
||||
if (CONFIG_MJPEG_ENCODER && avctx->codec_id == AV_CODEC_ID_MJPEG) {
|
||||
ret = ff_mjpeg_add_icc_profile_size(avctx, s->new_picture, &pkt_size);
|
||||
ret = ff_mjpeg_add_icc_profile_size(avctx, s->new_pic, &pkt_size);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
@ -1800,7 +1796,7 @@ int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
|
||||
init_put_bits(&s->thread_context[i]->pb, start, end - start);
|
||||
}
|
||||
|
||||
s->pict_type = s->new_picture->pict_type;
|
||||
s->pict_type = s->new_pic->pict_type;
|
||||
//emms_c();
|
||||
ret = frame_start(s);
|
||||
if (ret < 0)
|
||||
@ -1868,7 +1864,7 @@ vbv_retry:
|
||||
for (i = 0; i < 4; i++) {
|
||||
avctx->error[i] += s->encoding_error[i];
|
||||
}
|
||||
ff_side_data_set_encoder_stats(pkt, s->current_picture.f->quality,
|
||||
ff_side_data_set_encoder_stats(pkt, s->cur_pic.f->quality,
|
||||
s->encoding_error,
|
||||
(avctx->flags&AV_CODEC_FLAG_PSNR) ? MPEGVIDEO_MAX_PLANES : 0,
|
||||
s->pict_type);
|
||||
@ -1962,10 +1958,10 @@ vbv_retry:
|
||||
}
|
||||
s->total_bits += s->frame_bits;
|
||||
|
||||
pkt->pts = s->current_picture.f->pts;
|
||||
pkt->duration = s->current_picture.f->duration;
|
||||
pkt->pts = s->cur_pic.f->pts;
|
||||
pkt->duration = s->cur_pic.f->duration;
|
||||
if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
|
||||
if (!s->current_picture.coded_picture_number)
|
||||
if (!s->cur_pic.coded_picture_number)
|
||||
pkt->dts = pkt->pts - s->dts_delta;
|
||||
else
|
||||
pkt->dts = s->reordered_pts;
|
||||
@ -1975,12 +1971,12 @@ vbv_retry:
|
||||
|
||||
// the no-delay case is handled in generic code
|
||||
if (avctx->codec->capabilities & AV_CODEC_CAP_DELAY) {
|
||||
ret = ff_encode_reordered_opaque(avctx, pkt, s->current_picture.f);
|
||||
ret = ff_encode_reordered_opaque(avctx, pkt, s->cur_pic.f);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (s->current_picture.f->flags & AV_FRAME_FLAG_KEY)
|
||||
if (s->cur_pic.f->flags & AV_FRAME_FLAG_KEY)
|
||||
pkt->flags |= AV_PKT_FLAG_KEY;
|
||||
if (s->mb_info)
|
||||
av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
|
||||
@ -2150,7 +2146,7 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s,
|
||||
update_qscale(s);
|
||||
|
||||
if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
|
||||
s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
|
||||
s->qscale = s->cur_pic_ptr->qscale_table[mb_xy];
|
||||
s->dquant = s->qscale - last_qp;
|
||||
|
||||
if (s->out_format == FMT_H263) {
|
||||
@ -2174,11 +2170,11 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s,
|
||||
|
||||
wrap_y = s->linesize;
|
||||
wrap_c = s->uvlinesize;
|
||||
ptr_y = s->new_picture->data[0] +
|
||||
ptr_y = s->new_pic->data[0] +
|
||||
(mb_y * 16 * wrap_y) + mb_x * 16;
|
||||
ptr_cb = s->new_picture->data[1] +
|
||||
ptr_cb = s->new_pic->data[1] +
|
||||
(mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
|
||||
ptr_cr = s->new_picture->data[2] +
|
||||
ptr_cr = s->new_pic->data[2] +
|
||||
(mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
|
||||
|
||||
if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
|
||||
@ -2273,14 +2269,14 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s,
|
||||
|
||||
if (s->mv_dir & MV_DIR_FORWARD) {
|
||||
ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
|
||||
s->last_picture.f->data,
|
||||
s->last_pic.f->data,
|
||||
op_pix, op_qpix);
|
||||
op_pix = s->hdsp.avg_pixels_tab;
|
||||
op_qpix = s->qdsp.avg_qpel_pixels_tab;
|
||||
}
|
||||
if (s->mv_dir & MV_DIR_BACKWARD) {
|
||||
ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
|
||||
s->next_picture.f->data,
|
||||
s->next_pic.f->data,
|
||||
op_pix, op_qpix);
|
||||
}
|
||||
|
||||
@ -2664,26 +2660,26 @@ static int sse_mb(MpegEncContext *s){
|
||||
|
||||
if(w==16 && h==16)
|
||||
if(s->avctx->mb_cmp == FF_CMP_NSSE){
|
||||
return s->mecc.nsse[0](s, s->new_picture->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16,
|
||||
return s->mecc.nsse[0](s, s->new_pic->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16,
|
||||
s->dest[0], s->linesize, 16) +
|
||||
s->mecc.nsse[1](s, s->new_picture->data[1] + s->mb_x * chroma_mb_w + s->mb_y * s->uvlinesize * chroma_mb_h,
|
||||
s->mecc.nsse[1](s, s->new_pic->data[1] + s->mb_x * chroma_mb_w + s->mb_y * s->uvlinesize * chroma_mb_h,
|
||||
s->dest[1], s->uvlinesize, chroma_mb_h) +
|
||||
s->mecc.nsse[1](s, s->new_picture->data[2] + s->mb_x * chroma_mb_w + s->mb_y * s->uvlinesize * chroma_mb_h,
|
||||
s->mecc.nsse[1](s, s->new_pic->data[2] + s->mb_x * chroma_mb_w + s->mb_y * s->uvlinesize * chroma_mb_h,
|
||||
s->dest[2], s->uvlinesize, chroma_mb_h);
|
||||
}else{
|
||||
return s->mecc.sse[0](NULL, s->new_picture->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16,
|
||||
return s->mecc.sse[0](NULL, s->new_pic->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16,
|
||||
s->dest[0], s->linesize, 16) +
|
||||
s->mecc.sse[1](NULL, s->new_picture->data[1] + s->mb_x * chroma_mb_w + s->mb_y * s->uvlinesize * chroma_mb_h,
|
||||
s->mecc.sse[1](NULL, s->new_pic->data[1] + s->mb_x * chroma_mb_w + s->mb_y * s->uvlinesize * chroma_mb_h,
|
||||
s->dest[1], s->uvlinesize, chroma_mb_h) +
|
||||
s->mecc.sse[1](NULL, s->new_picture->data[2] + s->mb_x * chroma_mb_w + s->mb_y * s->uvlinesize * chroma_mb_h,
|
||||
s->mecc.sse[1](NULL, s->new_pic->data[2] + s->mb_x * chroma_mb_w + s->mb_y * s->uvlinesize * chroma_mb_h,
|
||||
s->dest[2], s->uvlinesize, chroma_mb_h);
|
||||
}
|
||||
else
|
||||
return sse(s, s->new_picture->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16,
|
||||
return sse(s, s->new_pic->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16,
|
||||
s->dest[0], w, h, s->linesize) +
|
||||
sse(s, s->new_picture->data[1] + s->mb_x * chroma_mb_w + s->mb_y * s->uvlinesize * chroma_mb_h,
|
||||
sse(s, s->new_pic->data[1] + s->mb_x * chroma_mb_w + s->mb_y * s->uvlinesize * chroma_mb_h,
|
||||
s->dest[1], w >> s->chroma_x_shift, h >> s->chroma_y_shift, s->uvlinesize) +
|
||||
sse(s, s->new_picture->data[2] + s->mb_x * chroma_mb_w + s->mb_y * s->uvlinesize * chroma_mb_h,
|
||||
sse(s, s->new_pic->data[2] + s->mb_x * chroma_mb_w + s->mb_y * s->uvlinesize * chroma_mb_h,
|
||||
s->dest[2], w >> s->chroma_x_shift, h >> s->chroma_y_shift, s->uvlinesize);
|
||||
}
|
||||
|
||||
@ -2739,7 +2735,7 @@ static int mb_var_thread(AVCodecContext *c, void *arg){
|
||||
for(mb_x=0; mb_x < s->mb_width; mb_x++) {
|
||||
int xx = mb_x * 16;
|
||||
int yy = mb_y * 16;
|
||||
const uint8_t *pix = s->new_picture->data[0] + (yy * s->linesize) + xx;
|
||||
const uint8_t *pix = s->new_pic->data[0] + (yy * s->linesize) + xx;
|
||||
int varc;
|
||||
int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
|
||||
|
||||
@ -3102,8 +3098,8 @@ static int encode_thread(AVCodecContext *c, void *arg){
|
||||
s->mv_type = MV_TYPE_8X8;
|
||||
s->mb_intra= 0;
|
||||
for(i=0; i<4; i++){
|
||||
s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
|
||||
s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
|
||||
s->mv[0][i][0] = s->cur_pic.motion_val[0][s->block_index[i]][0];
|
||||
s->mv[0][i][1] = s->cur_pic.motion_val[0][s->block_index[i]][1];
|
||||
}
|
||||
encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
|
||||
&dmin, &next_block, 0, 0);
|
||||
@ -3290,7 +3286,7 @@ static int encode_thread(AVCodecContext *c, void *arg){
|
||||
}
|
||||
}
|
||||
|
||||
s->current_picture.qscale_table[xy] = best_s.qscale;
|
||||
s->cur_pic.qscale_table[xy] = best_s.qscale;
|
||||
|
||||
copy_context_after_encode(s, &best_s);
|
||||
|
||||
@ -3357,8 +3353,8 @@ static int encode_thread(AVCodecContext *c, void *arg){
|
||||
s->mv_type = MV_TYPE_8X8;
|
||||
s->mb_intra= 0;
|
||||
for(i=0; i<4; i++){
|
||||
s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
|
||||
s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
|
||||
s->mv[0][i][0] = s->cur_pic.motion_val[0][s->block_index[i]][0];
|
||||
s->mv[0][i][1] = s->cur_pic.motion_val[0][s->block_index[i]][1];
|
||||
}
|
||||
break;
|
||||
case CANDIDATE_MB_TYPE_DIRECT:
|
||||
@ -3459,13 +3455,13 @@ static int encode_thread(AVCodecContext *c, void *arg){
|
||||
if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
|
||||
|
||||
s->encoding_error[0] += sse(
|
||||
s, s->new_picture->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
|
||||
s, s->new_pic->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
|
||||
s->dest[0], w, h, s->linesize);
|
||||
s->encoding_error[1] += sse(
|
||||
s, s->new_picture->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
|
||||
s, s->new_pic->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
|
||||
s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
|
||||
s->encoding_error[2] += sse(
|
||||
s, s->new_picture->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
|
||||
s, s->new_pic->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
|
||||
s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
|
||||
}
|
||||
if(s->loop_filter){
|
||||
@ -3522,14 +3518,14 @@ static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src)
|
||||
|
||||
static int estimate_qp(MpegEncContext *s, int dry_run){
|
||||
if (s->next_lambda){
|
||||
s->current_picture_ptr->f->quality =
|
||||
s->current_picture.f->quality = s->next_lambda;
|
||||
s->cur_pic_ptr->f->quality =
|
||||
s->cur_pic.f->quality = s->next_lambda;
|
||||
if(!dry_run) s->next_lambda= 0;
|
||||
} else if (!s->fixed_qscale) {
|
||||
int quality = ff_rate_estimate_qscale(s, dry_run);
|
||||
s->current_picture_ptr->f->quality =
|
||||
s->current_picture.f->quality = quality;
|
||||
if (s->current_picture.f->quality < 0)
|
||||
s->cur_pic_ptr->f->quality =
|
||||
s->cur_pic.f->quality = quality;
|
||||
if (s->cur_pic.f->quality < 0)
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -3552,15 +3548,15 @@ static int estimate_qp(MpegEncContext *s, int dry_run){
|
||||
s->lambda= s->lambda_table[0];
|
||||
//FIXME broken
|
||||
}else
|
||||
s->lambda = s->current_picture.f->quality;
|
||||
s->lambda = s->cur_pic.f->quality;
|
||||
update_qscale(s);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* must be called before writing the header */
|
||||
static void set_frame_distances(MpegEncContext * s){
|
||||
av_assert1(s->current_picture_ptr->f->pts != AV_NOPTS_VALUE);
|
||||
s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
|
||||
av_assert1(s->cur_pic_ptr->f->pts != AV_NOPTS_VALUE);
|
||||
s->time = s->cur_pic_ptr->f->pts * s->avctx->time_base.num;
|
||||
|
||||
if(s->pict_type==AV_PICTURE_TYPE_B){
|
||||
s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
|
||||
@ -3591,7 +3587,7 @@ static int encode_picture(MpegEncContext *s)
|
||||
|
||||
s->me.scene_change_score=0;
|
||||
|
||||
// s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
|
||||
// s->lambda= s->cur_pic_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
|
||||
|
||||
if(s->pict_type==AV_PICTURE_TYPE_I){
|
||||
if(s->msmpeg4_version >= 3) s->no_rounding=1;
|
||||
@ -3781,16 +3777,16 @@ static int encode_picture(MpegEncContext *s)
|
||||
|
||||
//FIXME var duplication
|
||||
if (s->pict_type == AV_PICTURE_TYPE_I) {
|
||||
s->current_picture_ptr->f->flags |= AV_FRAME_FLAG_KEY; //FIXME pic_ptr
|
||||
s->current_picture.f->flags |= AV_FRAME_FLAG_KEY;
|
||||
s->cur_pic_ptr->f->flags |= AV_FRAME_FLAG_KEY; //FIXME pic_ptr
|
||||
s->cur_pic.f->flags |= AV_FRAME_FLAG_KEY;
|
||||
} else {
|
||||
s->current_picture_ptr->f->flags &= ~AV_FRAME_FLAG_KEY; //FIXME pic_ptr
|
||||
s->current_picture.f->flags &= ~AV_FRAME_FLAG_KEY;
|
||||
s->cur_pic_ptr->f->flags &= ~AV_FRAME_FLAG_KEY; //FIXME pic_ptr
|
||||
s->cur_pic.f->flags &= ~AV_FRAME_FLAG_KEY;
|
||||
}
|
||||
s->current_picture_ptr->f->pict_type =
|
||||
s->current_picture.f->pict_type = s->pict_type;
|
||||
s->cur_pic_ptr->f->pict_type =
|
||||
s->cur_pic.f->pict_type = s->pict_type;
|
||||
|
||||
if (s->current_picture.f->flags & AV_FRAME_FLAG_KEY)
|
||||
if (s->cur_pic.f->flags & AV_FRAME_FLAG_KEY)
|
||||
s->picture_in_gop_number=0;
|
||||
|
||||
s->mb_x = s->mb_y = 0;
|
||||
|
@ -93,8 +93,8 @@ void mpeg_motion_internal(MpegEncContext *s,
|
||||
ptrdiff_t uvlinesize, linesize;
|
||||
|
||||
v_edge_pos = s->v_edge_pos >> field_based;
|
||||
linesize = s->current_picture.f->linesize[0] << field_based;
|
||||
uvlinesize = s->current_picture.f->linesize[1] << field_based;
|
||||
linesize = s->cur_pic.f->linesize[0] << field_based;
|
||||
uvlinesize = s->cur_pic.f->linesize[1] << field_based;
|
||||
block_y_half = (field_based | is_16x8);
|
||||
|
||||
dxy = ((motion_y & 1) << 1) | (motion_x & 1);
|
||||
@ -514,7 +514,7 @@ static inline void apply_obmc(MpegEncContext *s,
|
||||
op_pixels_func (*pix_op)[4])
|
||||
{
|
||||
LOCAL_ALIGNED_8(int16_t, mv_cache, [4], [4][2]);
|
||||
const Picture *cur_frame = &s->current_picture;
|
||||
const Picture *cur_frame = &s->cur_pic;
|
||||
int mb_x = s->mb_x;
|
||||
int mb_y = s->mb_y;
|
||||
const int xy = mb_x + mb_y * s->mb_stride;
|
||||
@ -749,7 +749,7 @@ static av_always_inline void mpv_motion_internal(MpegEncContext *s,
|
||||
av_assert2(s->out_format == FMT_MPEG1);
|
||||
if (s->picture_structure != s->field_select[dir][0] + 1 &&
|
||||
s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
|
||||
ref_picture = s->current_picture_ptr->f->data;
|
||||
ref_picture = s->cur_pic_ptr->f->data;
|
||||
}
|
||||
|
||||
mpeg_motion(s, dest_y, dest_cb, dest_cr,
|
||||
@ -767,7 +767,7 @@ static av_always_inline void mpv_motion_internal(MpegEncContext *s,
|
||||
s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
|
||||
ref2picture = ref_picture;
|
||||
} else {
|
||||
ref2picture = s->current_picture_ptr->f->data;
|
||||
ref2picture = s->cur_pic_ptr->f->data;
|
||||
}
|
||||
|
||||
mpeg_motion(s, dest_y, dest_cb, dest_cr,
|
||||
@ -807,7 +807,7 @@ static av_always_inline void mpv_motion_internal(MpegEncContext *s,
|
||||
/* opposite parity is always in the same frame if this is
|
||||
* second field */
|
||||
if (!s->first_field)
|
||||
ref_picture = s->current_picture_ptr->f->data;
|
||||
ref_picture = s->cur_pic_ptr->f->data;
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
@ -59,7 +59,7 @@ void mpv_reconstruct_mb_internal(MpegEncContext *s, int16_t block[12][64],
|
||||
#define IS_MPEG12(s) (is_mpeg12 == MAY_BE_MPEG12 ? ((s)->out_format == FMT_MPEG1) : is_mpeg12)
|
||||
const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
|
||||
|
||||
s->current_picture.qscale_table[mb_xy] = s->qscale;
|
||||
s->cur_pic.qscale_table[mb_xy] = s->qscale;
|
||||
|
||||
/* update DC predictors for P macroblocks */
|
||||
if (!s->mb_intra) {
|
||||
@ -82,8 +82,8 @@ void mpv_reconstruct_mb_internal(MpegEncContext *s, int16_t block[12][64],
|
||||
{
|
||||
uint8_t *dest_y, *dest_cb, *dest_cr;
|
||||
int dct_linesize, dct_offset;
|
||||
const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
|
||||
const int uvlinesize = s->current_picture.f->linesize[1];
|
||||
const int linesize = s->cur_pic.f->linesize[0]; //not s->linesize as this would be wrong for field pics
|
||||
const int uvlinesize = s->cur_pic.f->linesize[1];
|
||||
const int readable = IS_ENCODER || lowres_flag || s->pict_type != AV_PICTURE_TYPE_B;
|
||||
const int block_size = lowres_flag ? 8 >> s->avctx->lowres : 8;
|
||||
|
||||
@ -96,7 +96,7 @@ void mpv_reconstruct_mb_internal(MpegEncContext *s, int16_t block[12][64],
|
||||
s->mb_skipped = 0;
|
||||
av_assert2(s->pict_type!=AV_PICTURE_TYPE_I);
|
||||
*mbskip_ptr = 1;
|
||||
} else if(!s->current_picture.reference) {
|
||||
} else if (!s->cur_pic.reference) {
|
||||
*mbskip_ptr = 1;
|
||||
} else{
|
||||
*mbskip_ptr = 0; /* not skipped */
|
||||
@ -124,11 +124,11 @@ void mpv_reconstruct_mb_internal(MpegEncContext *s, int16_t block[12][64],
|
||||
if (HAVE_THREADS && is_mpeg12 != DEFINITELY_MPEG12 &&
|
||||
s->avctx->active_thread_type & FF_THREAD_FRAME) {
|
||||
if (s->mv_dir & MV_DIR_FORWARD) {
|
||||
ff_thread_await_progress(&s->last_picture_ptr->tf,
|
||||
ff_thread_await_progress(&s->last_pic_ptr->tf,
|
||||
lowest_referenced_row(s, 0), 0);
|
||||
}
|
||||
if (s->mv_dir & MV_DIR_BACKWARD) {
|
||||
ff_thread_await_progress(&s->next_picture_ptr->tf,
|
||||
ff_thread_await_progress(&s->next_pic_ptr->tf,
|
||||
lowest_referenced_row(s, 1), 0);
|
||||
}
|
||||
}
|
||||
@ -137,11 +137,11 @@ void mpv_reconstruct_mb_internal(MpegEncContext *s, int16_t block[12][64],
|
||||
const h264_chroma_mc_func *op_pix = s->h264chroma.put_h264_chroma_pixels_tab;
|
||||
|
||||
if (s->mv_dir & MV_DIR_FORWARD) {
|
||||
MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix);
|
||||
MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_pic.f->data, op_pix);
|
||||
op_pix = s->h264chroma.avg_h264_chroma_pixels_tab;
|
||||
}
|
||||
if (s->mv_dir & MV_DIR_BACKWARD) {
|
||||
MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix);
|
||||
MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_pic.f->data, op_pix);
|
||||
}
|
||||
} else {
|
||||
op_pixels_func (*op_pix)[4];
|
||||
@ -155,12 +155,12 @@ void mpv_reconstruct_mb_internal(MpegEncContext *s, int16_t block[12][64],
|
||||
op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
|
||||
}
|
||||
if (s->mv_dir & MV_DIR_FORWARD) {
|
||||
ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix);
|
||||
ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_pic.f->data, op_pix, op_qpix);
|
||||
op_pix = s->hdsp.avg_pixels_tab;
|
||||
op_qpix = s->qdsp.avg_qpel_pixels_tab;
|
||||
}
|
||||
if (s->mv_dir & MV_DIR_BACKWARD) {
|
||||
ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix);
|
||||
ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_pic.f->data, op_pix, op_qpix);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -282,10 +282,10 @@ int ff_msmpeg4_pred_dc(MpegEncContext *s, int n,
|
||||
int bs = 8 >> s->avctx->lowres;
|
||||
if(n<4){
|
||||
wrap= s->linesize;
|
||||
dest= s->current_picture.f->data[0] + (((n >> 1) + 2*s->mb_y) * bs* wrap ) + ((n & 1) + 2*s->mb_x) * bs;
|
||||
dest = s->cur_pic.f->data[0] + (((n >> 1) + 2*s->mb_y) * bs* wrap ) + ((n & 1) + 2*s->mb_x) * bs;
|
||||
}else{
|
||||
wrap= s->uvlinesize;
|
||||
dest= s->current_picture.f->data[n - 3] + (s->mb_y * bs * wrap) + s->mb_x * bs;
|
||||
dest = s->cur_pic.f->data[n - 3] + (s->mb_y * bs * wrap) + s->mb_x * bs;
|
||||
}
|
||||
if(s->mb_x==0) a= (1024 + (scale>>1))/scale;
|
||||
else a= get_dc(dest-bs, wrap, scale*8>>(2*s->avctx->lowres), bs);
|
||||
|
@ -105,7 +105,7 @@ static int msmpeg4v2_decode_motion(MpegEncContext * s, int pred, int f_code)
|
||||
static int msmpeg4v12_decode_mb(MpegEncContext *s, int16_t block[6][64])
|
||||
{
|
||||
int cbp, code, i;
|
||||
uint32_t * const mb_type_ptr = &s->current_picture.mb_type[s->mb_x + s->mb_y*s->mb_stride];
|
||||
uint32_t * const mb_type_ptr = &s->cur_pic.mb_type[s->mb_x + s->mb_y*s->mb_stride];
|
||||
|
||||
if (s->pict_type == AV_PICTURE_TYPE_P) {
|
||||
if (s->use_skip_mb_code) {
|
||||
@ -207,7 +207,7 @@ static int msmpeg4v34_decode_mb(MpegEncContext *s, int16_t block[6][64])
|
||||
{
|
||||
int cbp, code, i;
|
||||
uint8_t *coded_val;
|
||||
uint32_t * const mb_type_ptr = &s->current_picture.mb_type[s->mb_x + s->mb_y*s->mb_stride];
|
||||
uint32_t * const mb_type_ptr = &s->cur_pic.mb_type[s->mb_x + s->mb_y*s->mb_stride];
|
||||
|
||||
if (get_bits_left(&s->gb) <= 0)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
@ -431,7 +431,7 @@ static int decode_wmv9(AVCodecContext *avctx, const uint8_t *buf, int buf_size,
|
||||
|
||||
ff_mpv_frame_end(s);
|
||||
|
||||
f = s->current_picture.f;
|
||||
f = s->cur_pic.f;
|
||||
|
||||
if (v->respic == 3) {
|
||||
ctx->dsp.upsample_plane(f->data[0], f->linesize[0], w, h);
|
||||
|
@ -39,7 +39,7 @@ static int nvdec_mpeg12_start_frame(AVCodecContext *avctx, const uint8_t *buffer
|
||||
CUVIDMPEG2PICPARAMS *ppc = &pp->CodecSpecific.mpeg2;
|
||||
FrameDecodeData *fdd;
|
||||
NVDECFrame *cf;
|
||||
AVFrame *cur_frame = s->current_picture.f;
|
||||
AVFrame *cur_frame = s->cur_pic.f;
|
||||
|
||||
int ret, i;
|
||||
|
||||
@ -64,8 +64,8 @@ static int nvdec_mpeg12_start_frame(AVCodecContext *avctx, const uint8_t *buffer
|
||||
s->pict_type == AV_PICTURE_TYPE_P,
|
||||
|
||||
.CodecSpecific.mpeg2 = {
|
||||
.ForwardRefIdx = ff_nvdec_get_ref_idx(s->last_picture.f),
|
||||
.BackwardRefIdx = ff_nvdec_get_ref_idx(s->next_picture.f),
|
||||
.ForwardRefIdx = ff_nvdec_get_ref_idx(s->last_pic.f),
|
||||
.BackwardRefIdx = ff_nvdec_get_ref_idx(s->next_pic.f),
|
||||
|
||||
.picture_coding_type = s->pict_type,
|
||||
.full_pel_forward_vector = s->full_pel[0],
|
||||
|
@ -38,7 +38,7 @@ static int nvdec_mpeg4_start_frame(AVCodecContext *avctx, const uint8_t *buffer,
|
||||
CUVIDMPEG4PICPARAMS *ppc = &pp->CodecSpecific.mpeg4;
|
||||
FrameDecodeData *fdd;
|
||||
NVDECFrame *cf;
|
||||
AVFrame *cur_frame = s->current_picture.f;
|
||||
AVFrame *cur_frame = s->cur_pic.f;
|
||||
|
||||
int ret, i;
|
||||
|
||||
@ -60,8 +60,8 @@ static int nvdec_mpeg4_start_frame(AVCodecContext *avctx, const uint8_t *buffer,
|
||||
s->pict_type == AV_PICTURE_TYPE_S,
|
||||
|
||||
.CodecSpecific.mpeg4 = {
|
||||
.ForwardRefIdx = ff_nvdec_get_ref_idx(s->last_picture.f),
|
||||
.BackwardRefIdx = ff_nvdec_get_ref_idx(s->next_picture.f),
|
||||
.ForwardRefIdx = ff_nvdec_get_ref_idx(s->last_pic.f),
|
||||
.BackwardRefIdx = ff_nvdec_get_ref_idx(s->next_pic.f),
|
||||
|
||||
.video_object_layer_width = s->width,
|
||||
.video_object_layer_height = s->height,
|
||||
|
@ -38,7 +38,7 @@ static int nvdec_vc1_start_frame(AVCodecContext *avctx, const uint8_t *buffer, u
|
||||
CUVIDPICPARAMS *pp = &ctx->pic_params;
|
||||
FrameDecodeData *fdd;
|
||||
NVDECFrame *cf;
|
||||
AVFrame *cur_frame = s->current_picture.f;
|
||||
AVFrame *cur_frame = s->cur_pic.f;
|
||||
|
||||
int ret;
|
||||
|
||||
@ -63,8 +63,8 @@ static int nvdec_vc1_start_frame(AVCodecContext *avctx, const uint8_t *buffer, u
|
||||
s->pict_type == AV_PICTURE_TYPE_P,
|
||||
|
||||
.CodecSpecific.vc1 = {
|
||||
.ForwardRefIdx = ff_nvdec_get_ref_idx(s->last_picture.f),
|
||||
.BackwardRefIdx = ff_nvdec_get_ref_idx(s->next_picture.f),
|
||||
.ForwardRefIdx = ff_nvdec_get_ref_idx(s->last_pic.f),
|
||||
.BackwardRefIdx = ff_nvdec_get_ref_idx(s->next_pic.f),
|
||||
.FrameWidth = cur_frame->width,
|
||||
.FrameHeight = cur_frame->height,
|
||||
|
||||
|
@ -40,10 +40,10 @@ void ff_write_pass1_stats(MpegEncContext *s)
|
||||
snprintf(s->avctx->stats_out, 256,
|
||||
"in:%d out:%d type:%d q:%d itex:%d ptex:%d mv:%d misc:%d "
|
||||
"fcode:%d bcode:%d mc-var:%"PRId64" var:%"PRId64" icount:%d hbits:%d;\n",
|
||||
s->current_picture_ptr->display_picture_number,
|
||||
s->current_picture_ptr->coded_picture_number,
|
||||
s->cur_pic_ptr->display_picture_number,
|
||||
s->cur_pic_ptr->coded_picture_number,
|
||||
s->pict_type,
|
||||
s->current_picture.f->quality,
|
||||
s->cur_pic.f->quality,
|
||||
s->i_tex_bits,
|
||||
s->p_tex_bits,
|
||||
s->mv_bits,
|
||||
@ -936,9 +936,9 @@ float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
|
||||
* here instead of reordering but the reordering is simpler for now
|
||||
* until H.264 B-pyramid must be handled. */
|
||||
if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay)
|
||||
dts_pic = s->current_picture_ptr;
|
||||
dts_pic = s->cur_pic_ptr;
|
||||
else
|
||||
dts_pic = s->last_picture_ptr;
|
||||
dts_pic = s->last_pic_ptr;
|
||||
|
||||
if (!dts_pic || dts_pic->f->pts == AV_NOPTS_VALUE)
|
||||
wanted_bits = (uint64_t)(s->bit_rate * (double)picture_number / fps);
|
||||
|
@ -170,7 +170,7 @@ static int rv20_decode_picture_header(RVDecContext *rv, int whole_size)
|
||||
av_log(s->avctx, AV_LOG_ERROR, "low delay B\n");
|
||||
return -1;
|
||||
}
|
||||
if (!s->last_picture_ptr && s->pict_type == AV_PICTURE_TYPE_B) {
|
||||
if (!s->last_pic_ptr && s->pict_type == AV_PICTURE_TYPE_B) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "early B-frame\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
@ -458,9 +458,9 @@ static int rv10_decode_packet(AVCodecContext *avctx, const uint8_t *buf,
|
||||
if (whole_size < s->mb_width * s->mb_height / 8)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
if ((s->mb_x == 0 && s->mb_y == 0) || !s->current_picture_ptr) {
|
||||
if ((s->mb_x == 0 && s->mb_y == 0) || !s->cur_pic_ptr) {
|
||||
// FIXME write parser so we always have complete frames?
|
||||
if (s->current_picture_ptr) {
|
||||
if (s->cur_pic_ptr) {
|
||||
ff_er_frame_end(&s->er, NULL);
|
||||
ff_mpv_frame_end(s);
|
||||
s->mb_x = s->mb_y = s->resync_mb_x = s->resync_mb_y = 0;
|
||||
@ -469,7 +469,7 @@ static int rv10_decode_packet(AVCodecContext *avctx, const uint8_t *buf,
|
||||
return ret;
|
||||
ff_mpeg_er_frame_start(s);
|
||||
} else {
|
||||
if (s->current_picture_ptr->f->pict_type != s->pict_type) {
|
||||
if (s->cur_pic_ptr->f->pict_type != s->pict_type) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Slice type mismatch\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
@ -632,28 +632,28 @@ static int rv10_decode_frame(AVCodecContext *avctx, AVFrame *pict,
|
||||
i++;
|
||||
}
|
||||
|
||||
if (s->current_picture_ptr && s->mb_y >= s->mb_height) {
|
||||
if (s->cur_pic_ptr && s->mb_y >= s->mb_height) {
|
||||
ff_er_frame_end(&s->er, NULL);
|
||||
ff_mpv_frame_end(s);
|
||||
|
||||
if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
|
||||
if ((ret = av_frame_ref(pict, s->current_picture_ptr->f)) < 0)
|
||||
if ((ret = av_frame_ref(pict, s->cur_pic_ptr->f)) < 0)
|
||||
return ret;
|
||||
ff_print_debug_info(s, s->current_picture_ptr, pict);
|
||||
ff_mpv_export_qp_table(s, pict, s->current_picture_ptr, FF_MPV_QSCALE_TYPE_MPEG1);
|
||||
} else if (s->last_picture_ptr) {
|
||||
if ((ret = av_frame_ref(pict, s->last_picture_ptr->f)) < 0)
|
||||
ff_print_debug_info(s, s->cur_pic_ptr, pict);
|
||||
ff_mpv_export_qp_table(s, pict, s->cur_pic_ptr, FF_MPV_QSCALE_TYPE_MPEG1);
|
||||
} else if (s->last_pic_ptr) {
|
||||
if ((ret = av_frame_ref(pict, s->last_pic_ptr->f)) < 0)
|
||||
return ret;
|
||||
ff_print_debug_info(s, s->last_picture_ptr, pict);
|
||||
ff_mpv_export_qp_table(s, pict,s->last_picture_ptr, FF_MPV_QSCALE_TYPE_MPEG1);
|
||||
ff_print_debug_info(s, s->last_pic_ptr, pict);
|
||||
ff_mpv_export_qp_table(s, pict,s->last_pic_ptr, FF_MPV_QSCALE_TYPE_MPEG1);
|
||||
}
|
||||
|
||||
if (s->last_picture_ptr || s->low_delay) {
|
||||
if (s->last_pic_ptr || s->low_delay) {
|
||||
*got_frame = 1;
|
||||
}
|
||||
|
||||
// so we can detect if frame_end was not called (find some nicer solution...)
|
||||
s->current_picture_ptr = NULL;
|
||||
s->cur_pic_ptr = NULL;
|
||||
}
|
||||
|
||||
return avpkt->size;
|
||||
|
@ -160,7 +160,7 @@ static void rv30_loop_filter(RV34DecContext *r, int row)
|
||||
|
||||
mb_pos = row * s->mb_stride;
|
||||
for(mb_x = 0; mb_x < s->mb_width; mb_x++, mb_pos++){
|
||||
int mbtype = s->current_picture_ptr->mb_type[mb_pos];
|
||||
int mbtype = s->cur_pic_ptr->mb_type[mb_pos];
|
||||
if(IS_INTRA(mbtype) || IS_SEPARATE_DC(mbtype))
|
||||
r->deblock_coefs[mb_pos] = 0xFFFF;
|
||||
if(IS_INTRA(mbtype))
|
||||
@ -172,11 +172,11 @@ static void rv30_loop_filter(RV34DecContext *r, int row)
|
||||
*/
|
||||
mb_pos = row * s->mb_stride;
|
||||
for(mb_x = 0; mb_x < s->mb_width; mb_x++, mb_pos++){
|
||||
cur_lim = rv30_loop_filt_lim[s->current_picture_ptr->qscale_table[mb_pos]];
|
||||
cur_lim = rv30_loop_filt_lim[s->cur_pic_ptr->qscale_table[mb_pos]];
|
||||
if(mb_x)
|
||||
left_lim = rv30_loop_filt_lim[s->current_picture_ptr->qscale_table[mb_pos - 1]];
|
||||
left_lim = rv30_loop_filt_lim[s->cur_pic_ptr->qscale_table[mb_pos - 1]];
|
||||
for(j = 0; j < 16; j += 4){
|
||||
Y = s->current_picture_ptr->f->data[0] + mb_x*16 + (row*16 + j) * s->linesize + 4 * !mb_x;
|
||||
Y = s->cur_pic_ptr->f->data[0] + mb_x*16 + (row*16 + j) * s->linesize + 4 * !mb_x;
|
||||
for(i = !mb_x; i < 4; i++, Y += 4){
|
||||
int ij = i + j;
|
||||
loc_lim = 0;
|
||||
@ -196,7 +196,7 @@ static void rv30_loop_filter(RV34DecContext *r, int row)
|
||||
if(mb_x)
|
||||
left_cbp = (r->cbp_chroma[mb_pos - 1] >> (k*4)) & 0xF;
|
||||
for(j = 0; j < 8; j += 4){
|
||||
C = s->current_picture_ptr->f->data[k + 1] + mb_x*8 + (row*8 + j) * s->uvlinesize + 4 * !mb_x;
|
||||
C = s->cur_pic_ptr->f->data[k + 1] + mb_x*8 + (row*8 + j) * s->uvlinesize + 4 * !mb_x;
|
||||
for(i = !mb_x; i < 2; i++, C += 4){
|
||||
int ij = i + (j >> 1);
|
||||
loc_lim = 0;
|
||||
@ -214,11 +214,11 @@ static void rv30_loop_filter(RV34DecContext *r, int row)
|
||||
}
|
||||
mb_pos = row * s->mb_stride;
|
||||
for(mb_x = 0; mb_x < s->mb_width; mb_x++, mb_pos++){
|
||||
cur_lim = rv30_loop_filt_lim[s->current_picture_ptr->qscale_table[mb_pos]];
|
||||
cur_lim = rv30_loop_filt_lim[s->cur_pic_ptr->qscale_table[mb_pos]];
|
||||
if(row)
|
||||
top_lim = rv30_loop_filt_lim[s->current_picture_ptr->qscale_table[mb_pos - s->mb_stride]];
|
||||
top_lim = rv30_loop_filt_lim[s->cur_pic_ptr->qscale_table[mb_pos - s->mb_stride]];
|
||||
for(j = 4*!row; j < 16; j += 4){
|
||||
Y = s->current_picture_ptr->f->data[0] + mb_x*16 + (row*16 + j) * s->linesize;
|
||||
Y = s->cur_pic_ptr->f->data[0] + mb_x*16 + (row*16 + j) * s->linesize;
|
||||
for(i = 0; i < 4; i++, Y += 4){
|
||||
int ij = i + j;
|
||||
loc_lim = 0;
|
||||
@ -238,7 +238,7 @@ static void rv30_loop_filter(RV34DecContext *r, int row)
|
||||
if(row)
|
||||
top_cbp = (r->cbp_chroma[mb_pos - s->mb_stride] >> (k*4)) & 0xF;
|
||||
for(j = 4*!row; j < 8; j += 4){
|
||||
C = s->current_picture_ptr->f->data[k+1] + mb_x*8 + (row*8 + j) * s->uvlinesize;
|
||||
C = s->cur_pic_ptr->f->data[k+1] + mb_x*8 + (row*8 + j) * s->uvlinesize;
|
||||
for(i = 0; i < 2; i++, C += 4){
|
||||
int ij = i + (j >> 1);
|
||||
loc_lim = 0;
|
||||
|
@ -369,7 +369,7 @@ static int rv34_decode_intra_mb_header(RV34DecContext *r, int8_t *intra_types)
|
||||
|
||||
r->is16 = get_bits1(gb);
|
||||
if(r->is16){
|
||||
s->current_picture_ptr->mb_type[mb_pos] = MB_TYPE_INTRA16x16;
|
||||
s->cur_pic_ptr->mb_type[mb_pos] = MB_TYPE_INTRA16x16;
|
||||
r->block_type = RV34_MB_TYPE_INTRA16x16;
|
||||
t = get_bits(gb, 2);
|
||||
fill_rectangle(intra_types, 4, 4, r->intra_types_stride, t, sizeof(intra_types[0]));
|
||||
@ -379,7 +379,7 @@ static int rv34_decode_intra_mb_header(RV34DecContext *r, int8_t *intra_types)
|
||||
if(!get_bits1(gb))
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Need DQUANT\n");
|
||||
}
|
||||
s->current_picture_ptr->mb_type[mb_pos] = MB_TYPE_INTRA;
|
||||
s->cur_pic_ptr->mb_type[mb_pos] = MB_TYPE_INTRA;
|
||||
r->block_type = RV34_MB_TYPE_INTRA;
|
||||
if(r->decode_intra_types(r, gb, intra_types) < 0)
|
||||
return -1;
|
||||
@ -405,7 +405,7 @@ static int rv34_decode_inter_mb_header(RV34DecContext *r, int8_t *intra_types)
|
||||
r->block_type = r->decode_mb_info(r);
|
||||
if(r->block_type == -1)
|
||||
return -1;
|
||||
s->current_picture_ptr->mb_type[mb_pos] = rv34_mb_type_to_lavc[r->block_type];
|
||||
s->cur_pic_ptr->mb_type[mb_pos] = rv34_mb_type_to_lavc[r->block_type];
|
||||
r->mb_type[mb_pos] = r->block_type;
|
||||
if(r->block_type == RV34_MB_SKIP){
|
||||
if(s->pict_type == AV_PICTURE_TYPE_P)
|
||||
@ -413,7 +413,7 @@ static int rv34_decode_inter_mb_header(RV34DecContext *r, int8_t *intra_types)
|
||||
if(s->pict_type == AV_PICTURE_TYPE_B)
|
||||
r->mb_type[mb_pos] = RV34_MB_B_DIRECT;
|
||||
}
|
||||
r->is16 = !!IS_INTRA16x16(s->current_picture_ptr->mb_type[mb_pos]);
|
||||
r->is16 = !!IS_INTRA16x16(s->cur_pic_ptr->mb_type[mb_pos]);
|
||||
if (rv34_decode_mv(r, r->block_type) < 0)
|
||||
return -1;
|
||||
if(r->block_type == RV34_MB_SKIP){
|
||||
@ -423,7 +423,7 @@ static int rv34_decode_inter_mb_header(RV34DecContext *r, int8_t *intra_types)
|
||||
r->chroma_vlc = 1;
|
||||
r->luma_vlc = 0;
|
||||
|
||||
if(IS_INTRA(s->current_picture_ptr->mb_type[mb_pos])){
|
||||
if(IS_INTRA(s->cur_pic_ptr->mb_type[mb_pos])){
|
||||
if(r->is16){
|
||||
t = get_bits(gb, 2);
|
||||
fill_rectangle(intra_types, 4, 4, r->intra_types_stride, t, sizeof(intra_types[0]));
|
||||
@ -488,27 +488,27 @@ static void rv34_pred_mv(RV34DecContext *r, int block_type, int subblock_no, int
|
||||
c_off = -1;
|
||||
|
||||
if(avail[-1]){
|
||||
A[0] = s->current_picture_ptr->motion_val[0][mv_pos-1][0];
|
||||
A[1] = s->current_picture_ptr->motion_val[0][mv_pos-1][1];
|
||||
A[0] = s->cur_pic_ptr->motion_val[0][mv_pos-1][0];
|
||||
A[1] = s->cur_pic_ptr->motion_val[0][mv_pos-1][1];
|
||||
}
|
||||
if(avail[-4]){
|
||||
B[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride][0];
|
||||
B[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride][1];
|
||||
B[0] = s->cur_pic_ptr->motion_val[0][mv_pos-s->b8_stride][0];
|
||||
B[1] = s->cur_pic_ptr->motion_val[0][mv_pos-s->b8_stride][1];
|
||||
}else{
|
||||
B[0] = A[0];
|
||||
B[1] = A[1];
|
||||
}
|
||||
if(!avail[c_off-4]){
|
||||
if(avail[-4] && (avail[-1] || r->rv30)){
|
||||
C[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride-1][0];
|
||||
C[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride-1][1];
|
||||
C[0] = s->cur_pic_ptr->motion_val[0][mv_pos-s->b8_stride-1][0];
|
||||
C[1] = s->cur_pic_ptr->motion_val[0][mv_pos-s->b8_stride-1][1];
|
||||
}else{
|
||||
C[0] = A[0];
|
||||
C[1] = A[1];
|
||||
}
|
||||
}else{
|
||||
C[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride+c_off][0];
|
||||
C[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride+c_off][1];
|
||||
C[0] = s->cur_pic_ptr->motion_val[0][mv_pos-s->b8_stride+c_off][0];
|
||||
C[1] = s->cur_pic_ptr->motion_val[0][mv_pos-s->b8_stride+c_off][1];
|
||||
}
|
||||
mx = mid_pred(A[0], B[0], C[0]);
|
||||
my = mid_pred(A[1], B[1], C[1]);
|
||||
@ -516,8 +516,8 @@ static void rv34_pred_mv(RV34DecContext *r, int block_type, int subblock_no, int
|
||||
my += r->dmv[dmv_no][1];
|
||||
for(j = 0; j < part_sizes_h[block_type]; j++){
|
||||
for(i = 0; i < part_sizes_w[block_type]; i++){
|
||||
s->current_picture_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][0] = mx;
|
||||
s->current_picture_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][1] = my;
|
||||
s->cur_pic_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][0] = mx;
|
||||
s->cur_pic_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][1] = my;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -566,7 +566,7 @@ static void rv34_pred_mv_b(RV34DecContext *r, int block_type, int dir)
|
||||
int has_A = 0, has_B = 0, has_C = 0;
|
||||
int mx, my;
|
||||
int i, j;
|
||||
Picture *cur_pic = s->current_picture_ptr;
|
||||
Picture *cur_pic = s->cur_pic_ptr;
|
||||
const int mask = dir ? MB_TYPE_L1 : MB_TYPE_L0;
|
||||
int type = cur_pic->mb_type[mb_pos];
|
||||
|
||||
@ -619,27 +619,27 @@ static void rv34_pred_mv_rv3(RV34DecContext *r, int block_type, int dir)
|
||||
int* avail = r->avail_cache + avail_indexes[0];
|
||||
|
||||
if(avail[-1]){
|
||||
A[0] = s->current_picture_ptr->motion_val[0][mv_pos - 1][0];
|
||||
A[1] = s->current_picture_ptr->motion_val[0][mv_pos - 1][1];
|
||||
A[0] = s->cur_pic_ptr->motion_val[0][mv_pos - 1][0];
|
||||
A[1] = s->cur_pic_ptr->motion_val[0][mv_pos - 1][1];
|
||||
}
|
||||
if(avail[-4]){
|
||||
B[0] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride][0];
|
||||
B[1] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride][1];
|
||||
B[0] = s->cur_pic_ptr->motion_val[0][mv_pos - s->b8_stride][0];
|
||||
B[1] = s->cur_pic_ptr->motion_val[0][mv_pos - s->b8_stride][1];
|
||||
}else{
|
||||
B[0] = A[0];
|
||||
B[1] = A[1];
|
||||
}
|
||||
if(!avail[-4 + 2]){
|
||||
if(avail[-4] && (avail[-1])){
|
||||
C[0] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride - 1][0];
|
||||
C[1] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride - 1][1];
|
||||
C[0] = s->cur_pic_ptr->motion_val[0][mv_pos - s->b8_stride - 1][0];
|
||||
C[1] = s->cur_pic_ptr->motion_val[0][mv_pos - s->b8_stride - 1][1];
|
||||
}else{
|
||||
C[0] = A[0];
|
||||
C[1] = A[1];
|
||||
}
|
||||
}else{
|
||||
C[0] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride + 2][0];
|
||||
C[1] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride + 2][1];
|
||||
C[0] = s->cur_pic_ptr->motion_val[0][mv_pos - s->b8_stride + 2][0];
|
||||
C[1] = s->cur_pic_ptr->motion_val[0][mv_pos - s->b8_stride + 2][1];
|
||||
}
|
||||
mx = mid_pred(A[0], B[0], C[0]);
|
||||
my = mid_pred(A[1], B[1], C[1]);
|
||||
@ -648,8 +648,8 @@ static void rv34_pred_mv_rv3(RV34DecContext *r, int block_type, int dir)
|
||||
for(j = 0; j < 2; j++){
|
||||
for(i = 0; i < 2; i++){
|
||||
for(k = 0; k < 2; k++){
|
||||
s->current_picture_ptr->motion_val[k][mv_pos + i + j*s->b8_stride][0] = mx;
|
||||
s->current_picture_ptr->motion_val[k][mv_pos + i + j*s->b8_stride][1] = my;
|
||||
s->cur_pic_ptr->motion_val[k][mv_pos + i + j*s->b8_stride][0] = mx;
|
||||
s->cur_pic_ptr->motion_val[k][mv_pos + i + j*s->b8_stride][1] = my;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -688,24 +688,24 @@ static inline void rv34_mc(RV34DecContext *r, const int block_type,
|
||||
|
||||
if(thirdpel){
|
||||
int chroma_mx, chroma_my;
|
||||
mx = (s->current_picture_ptr->motion_val[dir][mv_pos][0] + (3 << 24)) / 3 - (1 << 24);
|
||||
my = (s->current_picture_ptr->motion_val[dir][mv_pos][1] + (3 << 24)) / 3 - (1 << 24);
|
||||
lx = (s->current_picture_ptr->motion_val[dir][mv_pos][0] + (3 << 24)) % 3;
|
||||
ly = (s->current_picture_ptr->motion_val[dir][mv_pos][1] + (3 << 24)) % 3;
|
||||
chroma_mx = s->current_picture_ptr->motion_val[dir][mv_pos][0] / 2;
|
||||
chroma_my = s->current_picture_ptr->motion_val[dir][mv_pos][1] / 2;
|
||||
mx = (s->cur_pic_ptr->motion_val[dir][mv_pos][0] + (3 << 24)) / 3 - (1 << 24);
|
||||
my = (s->cur_pic_ptr->motion_val[dir][mv_pos][1] + (3 << 24)) / 3 - (1 << 24);
|
||||
lx = (s->cur_pic_ptr->motion_val[dir][mv_pos][0] + (3 << 24)) % 3;
|
||||
ly = (s->cur_pic_ptr->motion_val[dir][mv_pos][1] + (3 << 24)) % 3;
|
||||
chroma_mx = s->cur_pic_ptr->motion_val[dir][mv_pos][0] / 2;
|
||||
chroma_my = s->cur_pic_ptr->motion_val[dir][mv_pos][1] / 2;
|
||||
umx = (chroma_mx + (3 << 24)) / 3 - (1 << 24);
|
||||
umy = (chroma_my + (3 << 24)) / 3 - (1 << 24);
|
||||
uvmx = chroma_coeffs[(chroma_mx + (3 << 24)) % 3];
|
||||
uvmy = chroma_coeffs[(chroma_my + (3 << 24)) % 3];
|
||||
}else{
|
||||
int cx, cy;
|
||||
mx = s->current_picture_ptr->motion_val[dir][mv_pos][0] >> 2;
|
||||
my = s->current_picture_ptr->motion_val[dir][mv_pos][1] >> 2;
|
||||
lx = s->current_picture_ptr->motion_val[dir][mv_pos][0] & 3;
|
||||
ly = s->current_picture_ptr->motion_val[dir][mv_pos][1] & 3;
|
||||
cx = s->current_picture_ptr->motion_val[dir][mv_pos][0] / 2;
|
||||
cy = s->current_picture_ptr->motion_val[dir][mv_pos][1] / 2;
|
||||
mx = s->cur_pic_ptr->motion_val[dir][mv_pos][0] >> 2;
|
||||
my = s->cur_pic_ptr->motion_val[dir][mv_pos][1] >> 2;
|
||||
lx = s->cur_pic_ptr->motion_val[dir][mv_pos][0] & 3;
|
||||
ly = s->cur_pic_ptr->motion_val[dir][mv_pos][1] & 3;
|
||||
cx = s->cur_pic_ptr->motion_val[dir][mv_pos][0] / 2;
|
||||
cy = s->cur_pic_ptr->motion_val[dir][mv_pos][1] / 2;
|
||||
umx = cx >> 2;
|
||||
umy = cy >> 2;
|
||||
uvmx = (cx & 3) << 1;
|
||||
@ -718,14 +718,14 @@ static inline void rv34_mc(RV34DecContext *r, const int block_type,
|
||||
if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME)) {
|
||||
/* wait for the referenced mb row to be finished */
|
||||
int mb_row = s->mb_y + ((yoff + my + 5 + 8 * height) >> 4);
|
||||
const ThreadFrame *f = dir ? &s->next_picture_ptr->tf : &s->last_picture_ptr->tf;
|
||||
const ThreadFrame *f = dir ? &s->next_pic_ptr->tf : &s->last_pic_ptr->tf;
|
||||
ff_thread_await_progress(f, mb_row, 0);
|
||||
}
|
||||
|
||||
dxy = ly*4 + lx;
|
||||
srcY = dir ? s->next_picture_ptr->f->data[0] : s->last_picture_ptr->f->data[0];
|
||||
srcU = dir ? s->next_picture_ptr->f->data[1] : s->last_picture_ptr->f->data[1];
|
||||
srcV = dir ? s->next_picture_ptr->f->data[2] : s->last_picture_ptr->f->data[2];
|
||||
srcY = dir ? s->next_pic_ptr->f->data[0] : s->last_pic_ptr->f->data[0];
|
||||
srcU = dir ? s->next_pic_ptr->f->data[1] : s->last_pic_ptr->f->data[1];
|
||||
srcV = dir ? s->next_pic_ptr->f->data[2] : s->last_pic_ptr->f->data[2];
|
||||
src_x = s->mb_x * 16 + xoff + mx;
|
||||
src_y = s->mb_y * 16 + yoff + my;
|
||||
uvsrc_x = s->mb_x * 8 + (xoff >> 1) + umx;
|
||||
@ -886,11 +886,11 @@ static int rv34_decode_mv(RV34DecContext *r, int block_type)
|
||||
switch(block_type){
|
||||
case RV34_MB_TYPE_INTRA:
|
||||
case RV34_MB_TYPE_INTRA16x16:
|
||||
ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
|
||||
ZERO8x2(s->cur_pic_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
|
||||
return 0;
|
||||
case RV34_MB_SKIP:
|
||||
if(s->pict_type == AV_PICTURE_TYPE_P){
|
||||
ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
|
||||
ZERO8x2(s->cur_pic_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
|
||||
rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, 0);
|
||||
break;
|
||||
}
|
||||
@ -898,23 +898,23 @@ static int rv34_decode_mv(RV34DecContext *r, int block_type)
|
||||
//surprisingly, it uses motion scheme from next reference frame
|
||||
/* wait for the current mb row to be finished */
|
||||
if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))
|
||||
ff_thread_await_progress(&s->next_picture_ptr->tf, FFMAX(0, s->mb_y-1), 0);
|
||||
ff_thread_await_progress(&s->next_pic_ptr->tf, FFMAX(0, s->mb_y-1), 0);
|
||||
|
||||
next_bt = s->next_picture_ptr->mb_type[s->mb_x + s->mb_y * s->mb_stride];
|
||||
next_bt = s->next_pic_ptr->mb_type[s->mb_x + s->mb_y * s->mb_stride];
|
||||
if(IS_INTRA(next_bt) || IS_SKIP(next_bt)){
|
||||
ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
|
||||
ZERO8x2(s->current_picture_ptr->motion_val[1][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
|
||||
ZERO8x2(s->cur_pic_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
|
||||
ZERO8x2(s->cur_pic_ptr->motion_val[1][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
|
||||
}else
|
||||
for(j = 0; j < 2; j++)
|
||||
for(i = 0; i < 2; i++)
|
||||
for(k = 0; k < 2; k++)
|
||||
for(l = 0; l < 2; l++)
|
||||
s->current_picture_ptr->motion_val[l][mv_pos + i + j*s->b8_stride][k] = calc_add_mv(r, l, s->next_picture_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][k]);
|
||||
s->cur_pic_ptr->motion_val[l][mv_pos + i + j*s->b8_stride][k] = calc_add_mv(r, l, s->next_pic_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][k]);
|
||||
if(!(IS_16X8(next_bt) || IS_8X16(next_bt) || IS_8X8(next_bt))) //we can use whole macroblock MC
|
||||
rv34_mc_2mv(r, block_type);
|
||||
else
|
||||
rv34_mc_2mv_skip(r);
|
||||
ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
|
||||
ZERO8x2(s->cur_pic_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
|
||||
break;
|
||||
case RV34_MB_P_16x16:
|
||||
case RV34_MB_P_MIX16x16:
|
||||
@ -1182,7 +1182,7 @@ static int rv34_set_deblock_coef(RV34DecContext *r)
|
||||
MpegEncContext *s = &r->s;
|
||||
int hmvmask = 0, vmvmask = 0, i, j;
|
||||
int midx = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
|
||||
int16_t (*motion_val)[2] = &s->current_picture_ptr->motion_val[0][midx];
|
||||
int16_t (*motion_val)[2] = &s->cur_pic_ptr->motion_val[0][midx];
|
||||
for(j = 0; j < 16; j += 8){
|
||||
for(i = 0; i < 2; i++){
|
||||
if(is_mv_diff_gt_3(motion_val + i, 1))
|
||||
@ -1225,26 +1225,26 @@ static int rv34_decode_inter_macroblock(RV34DecContext *r, int8_t *intra_types)
|
||||
dist = (s->mb_x - s->resync_mb_x) + (s->mb_y - s->resync_mb_y) * s->mb_width;
|
||||
if(s->mb_x && dist)
|
||||
r->avail_cache[5] =
|
||||
r->avail_cache[9] = s->current_picture_ptr->mb_type[mb_pos - 1];
|
||||
r->avail_cache[9] = s->cur_pic_ptr->mb_type[mb_pos - 1];
|
||||
if(dist >= s->mb_width)
|
||||
r->avail_cache[2] =
|
||||
r->avail_cache[3] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride];
|
||||
r->avail_cache[3] = s->cur_pic_ptr->mb_type[mb_pos - s->mb_stride];
|
||||
if(((s->mb_x+1) < s->mb_width) && dist >= s->mb_width - 1)
|
||||
r->avail_cache[4] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride + 1];
|
||||
r->avail_cache[4] = s->cur_pic_ptr->mb_type[mb_pos - s->mb_stride + 1];
|
||||
if(s->mb_x && dist > s->mb_width)
|
||||
r->avail_cache[1] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride - 1];
|
||||
r->avail_cache[1] = s->cur_pic_ptr->mb_type[mb_pos - s->mb_stride - 1];
|
||||
|
||||
s->qscale = r->si.quant;
|
||||
cbp = cbp2 = rv34_decode_inter_mb_header(r, intra_types);
|
||||
r->cbp_luma [mb_pos] = cbp;
|
||||
r->cbp_chroma[mb_pos] = cbp >> 16;
|
||||
r->deblock_coefs[mb_pos] = rv34_set_deblock_coef(r) | r->cbp_luma[mb_pos];
|
||||
s->current_picture_ptr->qscale_table[mb_pos] = s->qscale;
|
||||
s->cur_pic_ptr->qscale_table[mb_pos] = s->qscale;
|
||||
|
||||
if(cbp == -1)
|
||||
return -1;
|
||||
|
||||
if (IS_INTRA(s->current_picture_ptr->mb_type[mb_pos])){
|
||||
if (IS_INTRA(s->cur_pic_ptr->mb_type[mb_pos])){
|
||||
if(r->is16) rv34_output_i16x16(r, intra_types, cbp);
|
||||
else rv34_output_intra(r, intra_types, cbp);
|
||||
return 0;
|
||||
@ -1327,21 +1327,21 @@ static int rv34_decode_intra_macroblock(RV34DecContext *r, int8_t *intra_types)
|
||||
dist = (s->mb_x - s->resync_mb_x) + (s->mb_y - s->resync_mb_y) * s->mb_width;
|
||||
if(s->mb_x && dist)
|
||||
r->avail_cache[5] =
|
||||
r->avail_cache[9] = s->current_picture_ptr->mb_type[mb_pos - 1];
|
||||
r->avail_cache[9] = s->cur_pic_ptr->mb_type[mb_pos - 1];
|
||||
if(dist >= s->mb_width)
|
||||
r->avail_cache[2] =
|
||||
r->avail_cache[3] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride];
|
||||
r->avail_cache[3] = s->cur_pic_ptr->mb_type[mb_pos - s->mb_stride];
|
||||
if(((s->mb_x+1) < s->mb_width) && dist >= s->mb_width - 1)
|
||||
r->avail_cache[4] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride + 1];
|
||||
r->avail_cache[4] = s->cur_pic_ptr->mb_type[mb_pos - s->mb_stride + 1];
|
||||
if(s->mb_x && dist > s->mb_width)
|
||||
r->avail_cache[1] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride - 1];
|
||||
r->avail_cache[1] = s->cur_pic_ptr->mb_type[mb_pos - s->mb_stride - 1];
|
||||
|
||||
s->qscale = r->si.quant;
|
||||
cbp = rv34_decode_intra_mb_header(r, intra_types);
|
||||
r->cbp_luma [mb_pos] = cbp;
|
||||
r->cbp_chroma[mb_pos] = cbp >> 16;
|
||||
r->deblock_coefs[mb_pos] = 0xFFFF;
|
||||
s->current_picture_ptr->qscale_table[mb_pos] = s->qscale;
|
||||
s->cur_pic_ptr->qscale_table[mb_pos] = s->qscale;
|
||||
|
||||
if(cbp == -1)
|
||||
return -1;
|
||||
@ -1482,7 +1482,7 @@ static int rv34_decode_slice(RV34DecContext *r, int end, const uint8_t* buf, int
|
||||
r->loop_filter(r, s->mb_y - 2);
|
||||
|
||||
if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))
|
||||
ff_thread_report_progress(&s->current_picture_ptr->tf,
|
||||
ff_thread_report_progress(&s->cur_pic_ptr->tf,
|
||||
s->mb_y - 2, 0);
|
||||
|
||||
}
|
||||
@ -1580,19 +1580,19 @@ static int finish_frame(AVCodecContext *avctx, AVFrame *pict)
|
||||
s->mb_num_left = 0;
|
||||
|
||||
if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))
|
||||
ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
|
||||
ff_thread_report_progress(&s->cur_pic_ptr->tf, INT_MAX, 0);
|
||||
|
||||
if (s->pict_type == AV_PICTURE_TYPE_B) {
|
||||
if ((ret = av_frame_ref(pict, s->current_picture_ptr->f)) < 0)
|
||||
if ((ret = av_frame_ref(pict, s->cur_pic_ptr->f)) < 0)
|
||||
return ret;
|
||||
ff_print_debug_info(s, s->current_picture_ptr, pict);
|
||||
ff_mpv_export_qp_table(s, pict, s->current_picture_ptr, FF_MPV_QSCALE_TYPE_MPEG1);
|
||||
ff_print_debug_info(s, s->cur_pic_ptr, pict);
|
||||
ff_mpv_export_qp_table(s, pict, s->cur_pic_ptr, FF_MPV_QSCALE_TYPE_MPEG1);
|
||||
got_picture = 1;
|
||||
} else if (s->last_picture_ptr) {
|
||||
if ((ret = av_frame_ref(pict, s->last_picture_ptr->f)) < 0)
|
||||
} else if (s->last_pic_ptr) {
|
||||
if ((ret = av_frame_ref(pict, s->last_pic_ptr->f)) < 0)
|
||||
return ret;
|
||||
ff_print_debug_info(s, s->last_picture_ptr, pict);
|
||||
ff_mpv_export_qp_table(s, pict, s->last_picture_ptr, FF_MPV_QSCALE_TYPE_MPEG1);
|
||||
ff_print_debug_info(s, s->last_pic_ptr, pict);
|
||||
ff_mpv_export_qp_table(s, pict, s->last_pic_ptr, FF_MPV_QSCALE_TYPE_MPEG1);
|
||||
got_picture = 1;
|
||||
}
|
||||
|
||||
@ -1627,10 +1627,10 @@ int ff_rv34_decode_frame(AVCodecContext *avctx, AVFrame *pict,
|
||||
/* no supplementary picture */
|
||||
if (buf_size == 0) {
|
||||
/* special case for last picture */
|
||||
if (s->next_picture_ptr) {
|
||||
if ((ret = av_frame_ref(pict, s->next_picture_ptr->f)) < 0)
|
||||
if (s->next_pic_ptr) {
|
||||
if ((ret = av_frame_ref(pict, s->next_pic_ptr->f)) < 0)
|
||||
return ret;
|
||||
s->next_picture_ptr = NULL;
|
||||
s->next_pic_ptr = NULL;
|
||||
|
||||
*got_picture_ptr = 1;
|
||||
}
|
||||
@ -1653,7 +1653,7 @@ int ff_rv34_decode_frame(AVCodecContext *avctx, AVFrame *pict,
|
||||
av_log(avctx, AV_LOG_ERROR, "First slice header is incorrect\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if ((!s->last_picture_ptr || !s->last_picture_ptr->f->data[0]) &&
|
||||
if ((!s->last_pic_ptr || !s->last_pic_ptr->f->data[0]) &&
|
||||
si.type == AV_PICTURE_TYPE_B) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Invalid decoder state: B-frame without "
|
||||
"reference data.\n");
|
||||
@ -1666,7 +1666,7 @@ int ff_rv34_decode_frame(AVCodecContext *avctx, AVFrame *pict,
|
||||
|
||||
/* first slice */
|
||||
if (si.start == 0) {
|
||||
if (s->mb_num_left > 0 && s->current_picture_ptr) {
|
||||
if (s->mb_num_left > 0 && s->cur_pic_ptr) {
|
||||
av_log(avctx, AV_LOG_ERROR, "New frame but still %d MB left.\n",
|
||||
s->mb_num_left);
|
||||
if (!s->context_reinit)
|
||||
@ -1791,7 +1791,7 @@ int ff_rv34_decode_frame(AVCodecContext *avctx, AVFrame *pict,
|
||||
break;
|
||||
}
|
||||
|
||||
if (s->current_picture_ptr) {
|
||||
if (s->cur_pic_ptr) {
|
||||
if (last) {
|
||||
if(r->loop_filter)
|
||||
r->loop_filter(r, s->mb_height - 1);
|
||||
@ -1808,7 +1808,7 @@ int ff_rv34_decode_frame(AVCodecContext *avctx, AVFrame *pict,
|
||||
ff_er_frame_end(&s->er, NULL);
|
||||
ff_mpv_frame_end(s);
|
||||
s->mb_num_left = 0;
|
||||
ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
|
||||
ff_thread_report_progress(&s->cur_pic_ptr->tf, INT_MAX, 0);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
}
|
||||
|
@ -371,7 +371,7 @@ static void rv40_loop_filter(RV34DecContext *r, int row)
|
||||
|
||||
mb_pos = row * s->mb_stride;
|
||||
for(mb_x = 0; mb_x < s->mb_width; mb_x++, mb_pos++){
|
||||
int mbtype = s->current_picture_ptr->mb_type[mb_pos];
|
||||
int mbtype = s->cur_pic_ptr->mb_type[mb_pos];
|
||||
if(IS_INTRA(mbtype) || IS_SEPARATE_DC(mbtype))
|
||||
r->cbp_luma [mb_pos] = r->deblock_coefs[mb_pos] = 0xFFFF;
|
||||
if(IS_INTRA(mbtype))
|
||||
@ -386,7 +386,7 @@ static void rv40_loop_filter(RV34DecContext *r, int row)
|
||||
unsigned y_to_deblock;
|
||||
int c_to_deblock[2];
|
||||
|
||||
q = s->current_picture_ptr->qscale_table[mb_pos];
|
||||
q = s->cur_pic_ptr->qscale_table[mb_pos];
|
||||
alpha = rv40_alpha_tab[q];
|
||||
beta = rv40_beta_tab [q];
|
||||
betaY = betaC = beta * 3;
|
||||
@ -401,7 +401,7 @@ static void rv40_loop_filter(RV34DecContext *r, int row)
|
||||
if(avail[i]){
|
||||
int pos = mb_pos + neighbour_offs_x[i] + neighbour_offs_y[i]*s->mb_stride;
|
||||
mvmasks[i] = r->deblock_coefs[pos];
|
||||
mbtype [i] = s->current_picture_ptr->mb_type[pos];
|
||||
mbtype [i] = s->cur_pic_ptr->mb_type[pos];
|
||||
cbp [i] = r->cbp_luma[pos];
|
||||
uvcbp[i][0] = r->cbp_chroma[pos] & 0xF;
|
||||
uvcbp[i][1] = r->cbp_chroma[pos] >> 4;
|
||||
@ -460,7 +460,7 @@ static void rv40_loop_filter(RV34DecContext *r, int row)
|
||||
}
|
||||
|
||||
for(j = 0; j < 16; j += 4){
|
||||
Y = s->current_picture_ptr->f->data[0] + mb_x*16 + (row*16 + j) * s->linesize;
|
||||
Y = s->cur_pic_ptr->f->data[0] + mb_x*16 + (row*16 + j) * s->linesize;
|
||||
for(i = 0; i < 4; i++, Y += 4){
|
||||
int ij = i + j;
|
||||
int clip_cur = y_to_deblock & (MASK_CUR << ij) ? clip[POS_CUR] : 0;
|
||||
@ -505,7 +505,7 @@ static void rv40_loop_filter(RV34DecContext *r, int row)
|
||||
}
|
||||
for(k = 0; k < 2; k++){
|
||||
for(j = 0; j < 2; j++){
|
||||
C = s->current_picture_ptr->f->data[k + 1] + mb_x*8 + (row*8 + j*4) * s->uvlinesize;
|
||||
C = s->cur_pic_ptr->f->data[k + 1] + mb_x*8 + (row*8 + j*4) * s->uvlinesize;
|
||||
for(i = 0; i < 2; i++, C += 4){
|
||||
int ij = i + j*2;
|
||||
int clip_cur = c_to_deblock[k] & (MASK_CUR << ij) ? clip[POS_CUR] : 0;
|
||||
|
@ -1834,9 +1834,9 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
mpv->current_picture_ptr = &mpv->current_picture;
|
||||
mpv->current_picture.f = s->current_picture;
|
||||
mpv->current_picture.f->pts = pict->pts;
|
||||
mpv->cur_pic_ptr = &mpv->cur_pic;
|
||||
mpv->cur_pic.f = s->current_picture;
|
||||
mpv->cur_pic.f->pts = pict->pts;
|
||||
if(pic->pict_type == AV_PICTURE_TYPE_P){
|
||||
int block_width = (width +15)>>4;
|
||||
int block_height= (height+15)>>4;
|
||||
@ -1846,9 +1846,9 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
av_assert0(s->last_picture[0]->data[0]);
|
||||
|
||||
mpv->avctx = s->avctx;
|
||||
mpv->last_picture.f = s->last_picture[0];
|
||||
mpv-> new_picture = s->input_picture;
|
||||
mpv->last_picture_ptr = &mpv->last_picture;
|
||||
mpv->last_pic.f = s->last_picture[0];
|
||||
mpv-> new_pic = s->input_picture;
|
||||
mpv->last_pic_ptr = &mpv->last_pic;
|
||||
mpv->linesize = stride;
|
||||
mpv->uvlinesize = s->current_picture->linesize[1];
|
||||
mpv->width = width;
|
||||
@ -2043,9 +2043,9 @@ redo_frame:
|
||||
mpv->frame_bits = 8 * (s->c.bytestream - s->c.bytestream_start);
|
||||
mpv->p_tex_bits = mpv->frame_bits - mpv->misc_bits - mpv->mv_bits;
|
||||
mpv->total_bits += 8*(s->c.bytestream - s->c.bytestream_start);
|
||||
mpv->current_picture.display_picture_number =
|
||||
mpv->current_picture.coded_picture_number = avctx->frame_num;
|
||||
mpv->current_picture.f->quality = pic->quality;
|
||||
mpv->cur_pic.display_picture_number =
|
||||
mpv->cur_pic.coded_picture_number = avctx->frame_num;
|
||||
mpv->cur_pic.f->quality = pic->quality;
|
||||
if (enc->pass1_rc)
|
||||
if (ff_rate_estimate_qscale(mpv, 0) < 0)
|
||||
return -1;
|
||||
|
@ -326,13 +326,13 @@ static int svq1_encode_plane(SVQ1EncContext *s, int plane,
|
||||
|
||||
if (s->pict_type == AV_PICTURE_TYPE_P) {
|
||||
s->m.avctx = s->avctx;
|
||||
s->m.current_picture_ptr = &s->m.current_picture;
|
||||
s->m.last_picture_ptr = &s->m.last_picture;
|
||||
s->m.last_picture.f->data[0] = ref_plane;
|
||||
s->m.cur_pic_ptr = &s->m.cur_pic;
|
||||
s->m.last_pic_ptr = &s->m.last_pic;
|
||||
s->m.last_pic.f->data[0] = ref_plane;
|
||||
s->m.linesize =
|
||||
s->m.last_picture.f->linesize[0] =
|
||||
s->m.new_picture->linesize[0] =
|
||||
s->m.current_picture.f->linesize[0] = stride;
|
||||
s->m.last_pic.f->linesize[0] =
|
||||
s->m.new_pic->linesize[0] =
|
||||
s->m.cur_pic.f->linesize[0] = stride;
|
||||
s->m.width = width;
|
||||
s->m.height = height;
|
||||
s->m.mb_width = block_width;
|
||||
@ -370,9 +370,9 @@ static int svq1_encode_plane(SVQ1EncContext *s, int plane,
|
||||
s->m.mb_mean = (uint8_t *)s->dummy;
|
||||
s->m.mb_var = (uint16_t *)s->dummy;
|
||||
s->m.mc_mb_var = (uint16_t *)s->dummy;
|
||||
s->m.current_picture.mb_type = s->dummy;
|
||||
s->m.cur_pic.mb_type = s->dummy;
|
||||
|
||||
s->m.current_picture.motion_val[0] = s->motion_val8[plane] + 2;
|
||||
s->m.cur_pic.motion_val[0] = s->motion_val8[plane] + 2;
|
||||
s->m.p_mv_table = s->motion_val16[plane] +
|
||||
s->m.mb_stride + 1;
|
||||
s->m.mecc = s->mecc; // move
|
||||
@ -381,7 +381,7 @@ static int svq1_encode_plane(SVQ1EncContext *s, int plane,
|
||||
s->m.me.dia_size = s->avctx->dia_size;
|
||||
s->m.first_slice_line = 1;
|
||||
for (y = 0; y < block_height; y++) {
|
||||
s->m.new_picture->data[0] = src - y * 16 * stride; // ugly
|
||||
s->m.new_pic->data[0] = src - y * 16 * stride; // ugly
|
||||
s->m.mb_y = y;
|
||||
|
||||
for (i = 0; i < 16 && i + 16 * y < height; i++) {
|
||||
@ -561,7 +561,7 @@ static av_cold int svq1_encode_end(AVCodecContext *avctx)
|
||||
|
||||
av_frame_free(&s->current_picture);
|
||||
av_frame_free(&s->last_picture);
|
||||
av_frame_free(&s->m.new_picture);
|
||||
av_frame_free(&s->m.new_pic);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -624,10 +624,10 @@ static av_cold int svq1_encode_init(AVCodecContext *avctx)
|
||||
s->dummy = av_mallocz((s->y_block_width + 1) *
|
||||
s->y_block_height * sizeof(int32_t));
|
||||
s->m.me.map = av_mallocz(2 * ME_MAP_SIZE * sizeof(*s->m.me.map));
|
||||
s->m.new_picture = av_frame_alloc();
|
||||
s->m.new_pic = av_frame_alloc();
|
||||
|
||||
if (!s->m.me.scratchpad || !s->m.me.map ||
|
||||
!s->mb_type || !s->dummy || !s->m.new_picture)
|
||||
!s->mb_type || !s->dummy || !s->m.new_pic)
|
||||
return AVERROR(ENOMEM);
|
||||
s->m.me.score_map = s->m.me.map + ME_MAP_SIZE;
|
||||
|
||||
|
@ -42,12 +42,12 @@ static inline int mpeg2_get_is_frame_start(const MpegEncContext *s)
|
||||
static int vaapi_mpeg2_start_frame(AVCodecContext *avctx, av_unused const uint8_t *buffer, av_unused uint32_t size)
|
||||
{
|
||||
const MpegEncContext *s = avctx->priv_data;
|
||||
VAAPIDecodePicture *pic = s->current_picture_ptr->hwaccel_picture_private;
|
||||
VAAPIDecodePicture *pic = s->cur_pic_ptr->hwaccel_picture_private;
|
||||
VAPictureParameterBufferMPEG2 pic_param;
|
||||
VAIQMatrixBufferMPEG2 iq_matrix;
|
||||
int i, err;
|
||||
|
||||
pic->output_surface = ff_vaapi_get_surface_id(s->current_picture_ptr->f);
|
||||
pic->output_surface = ff_vaapi_get_surface_id(s->cur_pic_ptr->f);
|
||||
|
||||
pic_param = (VAPictureParameterBufferMPEG2) {
|
||||
.horizontal_size = s->width,
|
||||
@ -73,10 +73,10 @@ static int vaapi_mpeg2_start_frame(AVCodecContext *avctx, av_unused const uint8_
|
||||
|
||||
switch (s->pict_type) {
|
||||
case AV_PICTURE_TYPE_B:
|
||||
pic_param.backward_reference_picture = ff_vaapi_get_surface_id(s->next_picture.f);
|
||||
pic_param.backward_reference_picture = ff_vaapi_get_surface_id(s->next_pic.f);
|
||||
// fall-through
|
||||
case AV_PICTURE_TYPE_P:
|
||||
pic_param.forward_reference_picture = ff_vaapi_get_surface_id(s->last_picture.f);
|
||||
pic_param.forward_reference_picture = ff_vaapi_get_surface_id(s->last_pic.f);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -115,7 +115,7 @@ fail:
|
||||
static int vaapi_mpeg2_end_frame(AVCodecContext *avctx)
|
||||
{
|
||||
MpegEncContext *s = avctx->priv_data;
|
||||
VAAPIDecodePicture *pic = s->current_picture_ptr->hwaccel_picture_private;
|
||||
VAAPIDecodePicture *pic = s->cur_pic_ptr->hwaccel_picture_private;
|
||||
int ret;
|
||||
|
||||
ret = ff_vaapi_decode_issue(avctx, pic);
|
||||
@ -131,7 +131,7 @@ fail:
|
||||
static int vaapi_mpeg2_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
|
||||
{
|
||||
const MpegEncContext *s = avctx->priv_data;
|
||||
VAAPIDecodePicture *pic = s->current_picture_ptr->hwaccel_picture_private;
|
||||
VAAPIDecodePicture *pic = s->cur_pic_ptr->hwaccel_picture_private;
|
||||
VASliceParameterBufferMPEG2 slice_param;
|
||||
GetBitContext gb;
|
||||
uint32_t quantiser_scale_code, intra_slice_flag, macroblock_offset;
|
||||
|
@ -49,11 +49,11 @@ static int vaapi_mpeg4_start_frame(AVCodecContext *avctx, av_unused const uint8_
|
||||
{
|
||||
Mpeg4DecContext *ctx = avctx->priv_data;
|
||||
MpegEncContext *s = &ctx->m;
|
||||
VAAPIDecodePicture *pic = s->current_picture_ptr->hwaccel_picture_private;
|
||||
VAAPIDecodePicture *pic = s->cur_pic_ptr->hwaccel_picture_private;
|
||||
VAPictureParameterBufferMPEG4 pic_param;
|
||||
int i, err;
|
||||
|
||||
pic->output_surface = ff_vaapi_get_surface_id(s->current_picture_ptr->f);
|
||||
pic->output_surface = ff_vaapi_get_surface_id(s->cur_pic_ptr->f);
|
||||
|
||||
pic_param = (VAPictureParameterBufferMPEG4) {
|
||||
.vop_width = s->width,
|
||||
@ -78,7 +78,7 @@ static int vaapi_mpeg4_start_frame(AVCodecContext *avctx, av_unused const uint8_
|
||||
.vop_fields.bits = {
|
||||
.vop_coding_type = s->pict_type - AV_PICTURE_TYPE_I,
|
||||
.backward_reference_vop_coding_type =
|
||||
s->pict_type == AV_PICTURE_TYPE_B ? s->next_picture.f->pict_type - AV_PICTURE_TYPE_I : 0,
|
||||
s->pict_type == AV_PICTURE_TYPE_B ? s->next_pic.f->pict_type - AV_PICTURE_TYPE_I : 0,
|
||||
.vop_rounding_type = s->no_rounding,
|
||||
.intra_dc_vlc_thr = mpeg4_get_intra_dc_vlc_thr(ctx),
|
||||
.top_field_first = s->top_field_first,
|
||||
@ -100,9 +100,9 @@ static int vaapi_mpeg4_start_frame(AVCodecContext *avctx, av_unused const uint8_
|
||||
}
|
||||
|
||||
if (s->pict_type == AV_PICTURE_TYPE_B)
|
||||
pic_param.backward_reference_picture = ff_vaapi_get_surface_id(s->next_picture.f);
|
||||
pic_param.backward_reference_picture = ff_vaapi_get_surface_id(s->next_pic.f);
|
||||
if (s->pict_type != AV_PICTURE_TYPE_I)
|
||||
pic_param.forward_reference_picture = ff_vaapi_get_surface_id(s->last_picture.f);
|
||||
pic_param.forward_reference_picture = ff_vaapi_get_surface_id(s->last_pic.f);
|
||||
|
||||
err = ff_vaapi_decode_make_param_buffer(avctx, pic,
|
||||
VAPictureParameterBufferType,
|
||||
@ -139,7 +139,7 @@ fail:
|
||||
static int vaapi_mpeg4_end_frame(AVCodecContext *avctx)
|
||||
{
|
||||
MpegEncContext *s = avctx->priv_data;
|
||||
VAAPIDecodePicture *pic = s->current_picture_ptr->hwaccel_picture_private;
|
||||
VAAPIDecodePicture *pic = s->cur_pic_ptr->hwaccel_picture_private;
|
||||
int ret;
|
||||
|
||||
ret = ff_vaapi_decode_issue(avctx, pic);
|
||||
@ -155,7 +155,7 @@ fail:
|
||||
static int vaapi_mpeg4_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
|
||||
{
|
||||
MpegEncContext *s = avctx->priv_data;
|
||||
VAAPIDecodePicture *pic = s->current_picture_ptr->hwaccel_picture_private;
|
||||
VAAPIDecodePicture *pic = s->cur_pic_ptr->hwaccel_picture_private;
|
||||
VASliceParameterBufferMPEG4 slice_param;
|
||||
int err;
|
||||
|
||||
|
@ -253,11 +253,11 @@ static int vaapi_vc1_start_frame(AVCodecContext *avctx, av_unused const uint8_t
|
||||
{
|
||||
const VC1Context *v = avctx->priv_data;
|
||||
const MpegEncContext *s = &v->s;
|
||||
VAAPIDecodePicture *pic = s->current_picture_ptr->hwaccel_picture_private;
|
||||
VAAPIDecodePicture *pic = s->cur_pic_ptr->hwaccel_picture_private;
|
||||
VAPictureParameterBufferVC1 pic_param;
|
||||
int err;
|
||||
|
||||
pic->output_surface = ff_vaapi_get_surface_id(s->current_picture_ptr->f);
|
||||
pic->output_surface = ff_vaapi_get_surface_id(s->cur_pic_ptr->f);
|
||||
|
||||
pic_param = (VAPictureParameterBufferVC1) {
|
||||
.forward_reference_picture = VA_INVALID_ID,
|
||||
@ -374,10 +374,10 @@ static int vaapi_vc1_start_frame(AVCodecContext *avctx, av_unused const uint8_t
|
||||
|
||||
switch (s->pict_type) {
|
||||
case AV_PICTURE_TYPE_B:
|
||||
pic_param.backward_reference_picture = ff_vaapi_get_surface_id(s->next_picture.f);
|
||||
pic_param.backward_reference_picture = ff_vaapi_get_surface_id(s->next_pic.f);
|
||||
// fall-through
|
||||
case AV_PICTURE_TYPE_P:
|
||||
pic_param.forward_reference_picture = ff_vaapi_get_surface_id(s->last_picture.f);
|
||||
pic_param.forward_reference_picture = ff_vaapi_get_surface_id(s->last_pic.f);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -450,7 +450,7 @@ static int vaapi_vc1_end_frame(AVCodecContext *avctx)
|
||||
{
|
||||
VC1Context *v = avctx->priv_data;
|
||||
MpegEncContext *s = &v->s;
|
||||
VAAPIDecodePicture *pic = s->current_picture_ptr->hwaccel_picture_private;
|
||||
VAAPIDecodePicture *pic = s->cur_pic_ptr->hwaccel_picture_private;
|
||||
int ret;
|
||||
|
||||
ret = ff_vaapi_decode_issue(avctx, pic);
|
||||
@ -465,7 +465,7 @@ static int vaapi_vc1_decode_slice(AVCodecContext *avctx, const uint8_t *buffer,
|
||||
{
|
||||
const VC1Context *v = avctx->priv_data;
|
||||
const MpegEncContext *s = &v->s;
|
||||
VAAPIDecodePicture *pic = s->current_picture_ptr->hwaccel_picture_private;
|
||||
VAAPIDecodePicture *pic = s->cur_pic_ptr->hwaccel_picture_private;
|
||||
VASliceParameterBufferVC1 slice_param;
|
||||
int mb_height;
|
||||
int err;
|
||||
|
@ -856,7 +856,7 @@ int ff_vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb)
|
||||
v->s.pict_type = (v->fptype & 1) ? AV_PICTURE_TYPE_BI : AV_PICTURE_TYPE_B;
|
||||
else
|
||||
v->s.pict_type = (v->fptype & 1) ? AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_I;
|
||||
v->s.current_picture_ptr->f->pict_type = v->s.pict_type;
|
||||
v->s.cur_pic_ptr->f->pict_type = v->s.pict_type;
|
||||
if (!v->pic_header_flag)
|
||||
goto parse_common_info;
|
||||
}
|
||||
|
@ -59,9 +59,9 @@ static inline void init_block_index(VC1Context *v)
|
||||
MpegEncContext *s = &v->s;
|
||||
ff_init_block_index(s);
|
||||
if (v->field_mode && !(v->second_field ^ v->tff)) {
|
||||
s->dest[0] += s->current_picture_ptr->f->linesize[0];
|
||||
s->dest[1] += s->current_picture_ptr->f->linesize[1];
|
||||
s->dest[2] += s->current_picture_ptr->f->linesize[2];
|
||||
s->dest[0] += s->cur_pic_ptr->f->linesize[0];
|
||||
s->dest[1] += s->cur_pic_ptr->f->linesize[1];
|
||||
s->dest[2] += s->cur_pic_ptr->f->linesize[2];
|
||||
}
|
||||
}
|
||||
|
||||
@ -417,7 +417,7 @@ static inline int ff_vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
|
||||
int dqscale_index;
|
||||
|
||||
/* scale predictors if needed */
|
||||
q1 = FFABS(s->current_picture.qscale_table[mb_pos]);
|
||||
q1 = FFABS(s->cur_pic.qscale_table[mb_pos]);
|
||||
dqscale_index = s->y_dc_scale_table[q1] - 1;
|
||||
if (dqscale_index < 0)
|
||||
return 0;
|
||||
@ -433,12 +433,12 @@ static inline int ff_vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
|
||||
a = dc_val[ - wrap];
|
||||
|
||||
if (c_avail && (n != 1 && n != 3)) {
|
||||
q2 = FFABS(s->current_picture.qscale_table[mb_pos - 1]);
|
||||
q2 = FFABS(s->cur_pic.qscale_table[mb_pos - 1]);
|
||||
if (q2 && q2 != q1)
|
||||
c = (int)((unsigned)c * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
|
||||
}
|
||||
if (a_avail && (n != 2 && n != 3)) {
|
||||
q2 = FFABS(s->current_picture.qscale_table[mb_pos - s->mb_stride]);
|
||||
q2 = FFABS(s->cur_pic.qscale_table[mb_pos - s->mb_stride]);
|
||||
if (q2 && q2 != q1)
|
||||
a = (int)((unsigned)a * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
|
||||
}
|
||||
@ -448,7 +448,7 @@ static inline int ff_vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
|
||||
off--;
|
||||
if (n != 2)
|
||||
off -= s->mb_stride;
|
||||
q2 = FFABS(s->current_picture.qscale_table[off]);
|
||||
q2 = FFABS(s->cur_pic.qscale_table[off]);
|
||||
if (q2 && q2 != q1)
|
||||
b = (int)((unsigned)b * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
|
||||
}
|
||||
@ -771,19 +771,19 @@ static int vc1_decode_i_block_adv(VC1Context *v, int16_t block[64], int n,
|
||||
else // top
|
||||
ac_val -= 16 * s->block_wrap[n];
|
||||
|
||||
q1 = s->current_picture.qscale_table[mb_pos];
|
||||
q1 = s->cur_pic.qscale_table[mb_pos];
|
||||
if (n == 3)
|
||||
q2 = q1;
|
||||
else if (dc_pred_dir) {
|
||||
if (n == 1)
|
||||
q2 = q1;
|
||||
else if (c_avail && mb_pos)
|
||||
q2 = s->current_picture.qscale_table[mb_pos - 1];
|
||||
q2 = s->cur_pic.qscale_table[mb_pos - 1];
|
||||
} else {
|
||||
if (n == 2)
|
||||
q2 = q1;
|
||||
else if (a_avail && mb_pos >= s->mb_stride)
|
||||
q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
|
||||
q2 = s->cur_pic.qscale_table[mb_pos - s->mb_stride];
|
||||
}
|
||||
|
||||
//AC Decoding
|
||||
@ -973,11 +973,11 @@ static int vc1_decode_intra_block(VC1Context *v, int16_t block[64], int n,
|
||||
else //top
|
||||
ac_val -= 16 * s->block_wrap[n];
|
||||
|
||||
q1 = s->current_picture.qscale_table[mb_pos];
|
||||
q1 = s->cur_pic.qscale_table[mb_pos];
|
||||
if (dc_pred_dir && c_avail && mb_pos)
|
||||
q2 = s->current_picture.qscale_table[mb_pos - 1];
|
||||
q2 = s->cur_pic.qscale_table[mb_pos - 1];
|
||||
if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
|
||||
q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
|
||||
q2 = s->cur_pic.qscale_table[mb_pos - s->mb_stride];
|
||||
if (dc_pred_dir && n == 1)
|
||||
q2 = q1;
|
||||
if (!dc_pred_dir && n == 2)
|
||||
@ -1314,10 +1314,10 @@ static int vc1_decode_p_mb(VC1Context *v)
|
||||
GET_MVDATA(dmv_x, dmv_y);
|
||||
|
||||
if (s->mb_intra) {
|
||||
s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
|
||||
s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
|
||||
s->cur_pic.motion_val[1][s->block_index[0]][0] = 0;
|
||||
s->cur_pic.motion_val[1][s->block_index[0]][1] = 0;
|
||||
}
|
||||
s->current_picture.mb_type[mb_pos] = s->mb_intra ? MB_TYPE_INTRA : MB_TYPE_16x16;
|
||||
s->cur_pic.mb_type[mb_pos] = s->mb_intra ? MB_TYPE_INTRA : MB_TYPE_16x16;
|
||||
ff_vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
|
||||
|
||||
/* FIXME Set DC val for inter block ? */
|
||||
@ -1334,7 +1334,7 @@ static int vc1_decode_p_mb(VC1Context *v)
|
||||
mquant = v->pq;
|
||||
cbp = 0;
|
||||
}
|
||||
s->current_picture.qscale_table[mb_pos] = mquant;
|
||||
s->cur_pic.qscale_table[mb_pos] = mquant;
|
||||
|
||||
if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
|
||||
ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index],
|
||||
@ -1383,8 +1383,8 @@ static int vc1_decode_p_mb(VC1Context *v)
|
||||
v->mb_type[0][s->block_index[i]] = 0;
|
||||
s->dc_val[0][s->block_index[i]] = 0;
|
||||
}
|
||||
s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
|
||||
s->current_picture.qscale_table[mb_pos] = 0;
|
||||
s->cur_pic.mb_type[mb_pos] = MB_TYPE_SKIP;
|
||||
s->cur_pic.qscale_table[mb_pos] = 0;
|
||||
ff_vc1_pred_mv(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
|
||||
ff_vc1_mc_1mv(v, 0);
|
||||
}
|
||||
@ -1427,7 +1427,7 @@ static int vc1_decode_p_mb(VC1Context *v)
|
||||
if (!intra_count && !coded_inter)
|
||||
goto end;
|
||||
GET_MQUANT();
|
||||
s->current_picture.qscale_table[mb_pos] = mquant;
|
||||
s->cur_pic.qscale_table[mb_pos] = mquant;
|
||||
/* test if block is intra and has pred */
|
||||
{
|
||||
int intrapred = 0;
|
||||
@ -1484,7 +1484,7 @@ static int vc1_decode_p_mb(VC1Context *v)
|
||||
}
|
||||
} else { // skipped MB
|
||||
s->mb_intra = 0;
|
||||
s->current_picture.qscale_table[mb_pos] = 0;
|
||||
s->cur_pic.qscale_table[mb_pos] = 0;
|
||||
for (i = 0; i < 6; i++) {
|
||||
v->mb_type[0][s->block_index[i]] = 0;
|
||||
s->dc_val[0][s->block_index[i]] = 0;
|
||||
@ -1494,7 +1494,7 @@ static int vc1_decode_p_mb(VC1Context *v)
|
||||
ff_vc1_mc_4mv_luma(v, i, 0, 0);
|
||||
}
|
||||
ff_vc1_mc_4mv_chroma(v, 0);
|
||||
s->current_picture.qscale_table[mb_pos] = 0;
|
||||
s->cur_pic.qscale_table[mb_pos] = 0;
|
||||
}
|
||||
}
|
||||
end:
|
||||
@ -1574,19 +1574,19 @@ static int vc1_decode_p_mb_intfr(VC1Context *v)
|
||||
}
|
||||
if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_INTRA) { // intra MB
|
||||
for (i = 0; i < 4; i++) {
|
||||
s->current_picture.motion_val[1][s->block_index[i]][0] = 0;
|
||||
s->current_picture.motion_val[1][s->block_index[i]][1] = 0;
|
||||
s->cur_pic.motion_val[1][s->block_index[i]][0] = 0;
|
||||
s->cur_pic.motion_val[1][s->block_index[i]][1] = 0;
|
||||
}
|
||||
v->is_intra[s->mb_x] = 0x3f; // Set the bitfield to all 1.
|
||||
s->mb_intra = 1;
|
||||
s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
|
||||
s->cur_pic.mb_type[mb_pos] = MB_TYPE_INTRA;
|
||||
fieldtx = v->fieldtx_plane[mb_pos] = get_bits1(gb);
|
||||
mb_has_coeffs = get_bits1(gb);
|
||||
if (mb_has_coeffs)
|
||||
cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc, VC1_CBPCY_P_VLC_BITS, 2);
|
||||
v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
|
||||
GET_MQUANT();
|
||||
s->current_picture.qscale_table[mb_pos] = mquant;
|
||||
s->cur_pic.qscale_table[mb_pos] = mquant;
|
||||
/* Set DC scale - y and c use the same (not sure if necessary here) */
|
||||
s->y_dc_scale = s->y_dc_scale_table[FFABS(mquant)];
|
||||
s->c_dc_scale = s->c_dc_scale_table[FFABS(mquant)];
|
||||
@ -1666,7 +1666,7 @@ static int vc1_decode_p_mb_intfr(VC1Context *v)
|
||||
}
|
||||
if (cbp)
|
||||
GET_MQUANT(); // p. 227
|
||||
s->current_picture.qscale_table[mb_pos] = mquant;
|
||||
s->cur_pic.qscale_table[mb_pos] = mquant;
|
||||
if (!v->ttmbf && cbp)
|
||||
ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index], VC1_TTMB_VLC_BITS, 2);
|
||||
for (i = 0; i < 6; i++) {
|
||||
@ -1697,8 +1697,8 @@ static int vc1_decode_p_mb_intfr(VC1Context *v)
|
||||
v->mb_type[0][s->block_index[i]] = 0;
|
||||
s->dc_val[0][s->block_index[i]] = 0;
|
||||
}
|
||||
s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
|
||||
s->current_picture.qscale_table[mb_pos] = 0;
|
||||
s->cur_pic.mb_type[mb_pos] = MB_TYPE_SKIP;
|
||||
s->cur_pic.qscale_table[mb_pos] = 0;
|
||||
v->blk_mv_type[s->block_index[0]] = 0;
|
||||
v->blk_mv_type[s->block_index[1]] = 0;
|
||||
v->blk_mv_type[s->block_index[2]] = 0;
|
||||
@ -1742,11 +1742,11 @@ static int vc1_decode_p_mb_intfi(VC1Context *v)
|
||||
if (idx_mbmode <= 1) { // intra MB
|
||||
v->is_intra[s->mb_x] = 0x3f; // Set the bitfield to all 1.
|
||||
s->mb_intra = 1;
|
||||
s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
|
||||
s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
|
||||
s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
|
||||
s->cur_pic.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
|
||||
s->cur_pic.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
|
||||
s->cur_pic.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
|
||||
GET_MQUANT();
|
||||
s->current_picture.qscale_table[mb_pos] = mquant;
|
||||
s->cur_pic.qscale_table[mb_pos] = mquant;
|
||||
/* Set DC scale - y and c use the same (not sure if necessary here) */
|
||||
s->y_dc_scale = s->y_dc_scale_table[FFABS(mquant)];
|
||||
s->c_dc_scale = s->c_dc_scale_table[FFABS(mquant)];
|
||||
@ -1775,7 +1775,7 @@ static int vc1_decode_p_mb_intfi(VC1Context *v)
|
||||
}
|
||||
} else {
|
||||
s->mb_intra = v->is_intra[s->mb_x] = 0;
|
||||
s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
|
||||
s->cur_pic.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
|
||||
for (i = 0; i < 6; i++)
|
||||
v->mb_type[0][s->block_index[i]] = 0;
|
||||
if (idx_mbmode <= 5) { // 1-MV
|
||||
@ -1803,7 +1803,7 @@ static int vc1_decode_p_mb_intfi(VC1Context *v)
|
||||
if (cbp) {
|
||||
GET_MQUANT();
|
||||
}
|
||||
s->current_picture.qscale_table[mb_pos] = mquant;
|
||||
s->cur_pic.qscale_table[mb_pos] = mquant;
|
||||
if (!v->ttmbf && cbp) {
|
||||
ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index], VC1_TTMB_VLC_BITS, 2);
|
||||
}
|
||||
@ -1875,7 +1875,7 @@ static int vc1_decode_b_mb(VC1Context *v)
|
||||
v->mb_type[0][s->block_index[i]] = 0;
|
||||
s->dc_val[0][s->block_index[i]] = 0;
|
||||
}
|
||||
s->current_picture.qscale_table[mb_pos] = 0;
|
||||
s->cur_pic.qscale_table[mb_pos] = 0;
|
||||
|
||||
if (!direct) {
|
||||
if (!skipped) {
|
||||
@ -1912,7 +1912,7 @@ static int vc1_decode_b_mb(VC1Context *v)
|
||||
cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc, VC1_CBPCY_P_VLC_BITS, 2);
|
||||
GET_MQUANT();
|
||||
s->mb_intra = 0;
|
||||
s->current_picture.qscale_table[mb_pos] = mquant;
|
||||
s->cur_pic.qscale_table[mb_pos] = mquant;
|
||||
if (!v->ttmbf)
|
||||
ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index], VC1_TTMB_VLC_BITS, 2);
|
||||
dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0;
|
||||
@ -1927,7 +1927,7 @@ static int vc1_decode_b_mb(VC1Context *v)
|
||||
}
|
||||
if (s->mb_intra && !mb_has_coeffs) {
|
||||
GET_MQUANT();
|
||||
s->current_picture.qscale_table[mb_pos] = mquant;
|
||||
s->cur_pic.qscale_table[mb_pos] = mquant;
|
||||
s->ac_pred = get_bits1(gb);
|
||||
cbp = 0;
|
||||
ff_vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
|
||||
@ -1949,7 +1949,7 @@ static int vc1_decode_b_mb(VC1Context *v)
|
||||
s->ac_pred = get_bits1(gb);
|
||||
cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc, VC1_CBPCY_P_VLC_BITS, 2);
|
||||
GET_MQUANT();
|
||||
s->current_picture.qscale_table[mb_pos] = mquant;
|
||||
s->cur_pic.qscale_table[mb_pos] = mquant;
|
||||
if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
|
||||
ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index], VC1_TTMB_VLC_BITS, 2);
|
||||
}
|
||||
@ -2024,11 +2024,11 @@ static int vc1_decode_b_mb_intfi(VC1Context *v)
|
||||
if (idx_mbmode <= 1) { // intra MB
|
||||
v->is_intra[s->mb_x] = 0x3f; // Set the bitfield to all 1.
|
||||
s->mb_intra = 1;
|
||||
s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
|
||||
s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
|
||||
s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
|
||||
s->cur_pic.motion_val[1][s->block_index[0]][0] = 0;
|
||||
s->cur_pic.motion_val[1][s->block_index[0]][1] = 0;
|
||||
s->cur_pic.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
|
||||
GET_MQUANT();
|
||||
s->current_picture.qscale_table[mb_pos] = mquant;
|
||||
s->cur_pic.qscale_table[mb_pos] = mquant;
|
||||
/* Set DC scale - y and c use the same (not sure if necessary here) */
|
||||
s->y_dc_scale = s->y_dc_scale_table[FFABS(mquant)];
|
||||
s->c_dc_scale = s->c_dc_scale_table[FFABS(mquant)];
|
||||
@ -2064,7 +2064,7 @@ static int vc1_decode_b_mb_intfi(VC1Context *v)
|
||||
}
|
||||
} else {
|
||||
s->mb_intra = v->is_intra[s->mb_x] = 0;
|
||||
s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
|
||||
s->cur_pic.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
|
||||
for (i = 0; i < 6; i++)
|
||||
v->mb_type[0][s->block_index[i]] = 0;
|
||||
if (v->fmb_is_raw)
|
||||
@ -2101,7 +2101,7 @@ static int vc1_decode_b_mb_intfi(VC1Context *v)
|
||||
if (bmvtype == BMV_TYPE_DIRECT) {
|
||||
dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
|
||||
dmv_x[1] = dmv_y[1] = pred_flag[0] = 0;
|
||||
if (!s->next_picture_ptr->field_picture) {
|
||||
if (!s->next_pic_ptr->field_picture) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Mixed field/frame direct mode not supported\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
@ -2133,7 +2133,7 @@ static int vc1_decode_b_mb_intfi(VC1Context *v)
|
||||
if (cbp) {
|
||||
GET_MQUANT();
|
||||
}
|
||||
s->current_picture.qscale_table[mb_pos] = mquant;
|
||||
s->cur_pic.qscale_table[mb_pos] = mquant;
|
||||
if (!v->ttmbf && cbp) {
|
||||
ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index], VC1_TTMB_VLC_BITS, 2);
|
||||
}
|
||||
@ -2212,21 +2212,21 @@ static int vc1_decode_b_mb_intfr(VC1Context *v)
|
||||
|
||||
if (ff_vc1_mbmode_intfrp[0][idx_mbmode][0] == MV_PMODE_INTFR_INTRA) { // intra MB
|
||||
for (i = 0; i < 4; i++) {
|
||||
s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = 0;
|
||||
s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = 0;
|
||||
s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = 0;
|
||||
s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = 0;
|
||||
s->mv[0][i][0] = s->cur_pic.motion_val[0][s->block_index[i]][0] = 0;
|
||||
s->mv[0][i][1] = s->cur_pic.motion_val[0][s->block_index[i]][1] = 0;
|
||||
s->mv[1][i][0] = s->cur_pic.motion_val[1][s->block_index[i]][0] = 0;
|
||||
s->mv[1][i][1] = s->cur_pic.motion_val[1][s->block_index[i]][1] = 0;
|
||||
}
|
||||
v->is_intra[s->mb_x] = 0x3f; // Set the bitfield to all 1.
|
||||
s->mb_intra = 1;
|
||||
s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
|
||||
s->cur_pic.mb_type[mb_pos] = MB_TYPE_INTRA;
|
||||
fieldtx = v->fieldtx_plane[mb_pos] = get_bits1(gb);
|
||||
mb_has_coeffs = get_bits1(gb);
|
||||
if (mb_has_coeffs)
|
||||
cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc, VC1_CBPCY_P_VLC_BITS, 2);
|
||||
v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
|
||||
GET_MQUANT();
|
||||
s->current_picture.qscale_table[mb_pos] = mquant;
|
||||
s->cur_pic.qscale_table[mb_pos] = mquant;
|
||||
/* Set DC scale - y and c use the same (not sure if necessary here) */
|
||||
s->y_dc_scale = s->y_dc_scale_table[FFABS(mquant)];
|
||||
s->c_dc_scale = s->c_dc_scale_table[FFABS(mquant)];
|
||||
@ -2267,31 +2267,31 @@ static int vc1_decode_b_mb_intfr(VC1Context *v)
|
||||
direct = v->direct_mb_plane[mb_pos];
|
||||
|
||||
if (direct) {
|
||||
if (s->next_picture_ptr->field_picture)
|
||||
if (s->next_pic_ptr->field_picture)
|
||||
av_log(s->avctx, AV_LOG_WARNING, "Mixed frame/field direct mode not supported\n");
|
||||
s->mv[0][0][0] = s->current_picture.motion_val[0][s->block_index[0]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][0], v->bfraction, 0, s->quarter_sample);
|
||||
s->mv[0][0][1] = s->current_picture.motion_val[0][s->block_index[0]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][1], v->bfraction, 0, s->quarter_sample);
|
||||
s->mv[1][0][0] = s->current_picture.motion_val[1][s->block_index[0]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][0], v->bfraction, 1, s->quarter_sample);
|
||||
s->mv[1][0][1] = s->current_picture.motion_val[1][s->block_index[0]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][1], v->bfraction, 1, s->quarter_sample);
|
||||
s->mv[0][0][0] = s->cur_pic.motion_val[0][s->block_index[0]][0] = scale_mv(s->next_pic.motion_val[1][s->block_index[0]][0], v->bfraction, 0, s->quarter_sample);
|
||||
s->mv[0][0][1] = s->cur_pic.motion_val[0][s->block_index[0]][1] = scale_mv(s->next_pic.motion_val[1][s->block_index[0]][1], v->bfraction, 0, s->quarter_sample);
|
||||
s->mv[1][0][0] = s->cur_pic.motion_val[1][s->block_index[0]][0] = scale_mv(s->next_pic.motion_val[1][s->block_index[0]][0], v->bfraction, 1, s->quarter_sample);
|
||||
s->mv[1][0][1] = s->cur_pic.motion_val[1][s->block_index[0]][1] = scale_mv(s->next_pic.motion_val[1][s->block_index[0]][1], v->bfraction, 1, s->quarter_sample);
|
||||
|
||||
if (twomv) {
|
||||
s->mv[0][2][0] = s->current_picture.motion_val[0][s->block_index[2]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][0], v->bfraction, 0, s->quarter_sample);
|
||||
s->mv[0][2][1] = s->current_picture.motion_val[0][s->block_index[2]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][1], v->bfraction, 0, s->quarter_sample);
|
||||
s->mv[1][2][0] = s->current_picture.motion_val[1][s->block_index[2]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][0], v->bfraction, 1, s->quarter_sample);
|
||||
s->mv[1][2][1] = s->current_picture.motion_val[1][s->block_index[2]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][1], v->bfraction, 1, s->quarter_sample);
|
||||
s->mv[0][2][0] = s->cur_pic.motion_val[0][s->block_index[2]][0] = scale_mv(s->next_pic.motion_val[1][s->block_index[2]][0], v->bfraction, 0, s->quarter_sample);
|
||||
s->mv[0][2][1] = s->cur_pic.motion_val[0][s->block_index[2]][1] = scale_mv(s->next_pic.motion_val[1][s->block_index[2]][1], v->bfraction, 0, s->quarter_sample);
|
||||
s->mv[1][2][0] = s->cur_pic.motion_val[1][s->block_index[2]][0] = scale_mv(s->next_pic.motion_val[1][s->block_index[2]][0], v->bfraction, 1, s->quarter_sample);
|
||||
s->mv[1][2][1] = s->cur_pic.motion_val[1][s->block_index[2]][1] = scale_mv(s->next_pic.motion_val[1][s->block_index[2]][1], v->bfraction, 1, s->quarter_sample);
|
||||
|
||||
for (i = 1; i < 4; i += 2) {
|
||||
s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = s->mv[0][i-1][0];
|
||||
s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = s->mv[0][i-1][1];
|
||||
s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = s->mv[1][i-1][0];
|
||||
s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = s->mv[1][i-1][1];
|
||||
s->mv[0][i][0] = s->cur_pic.motion_val[0][s->block_index[i]][0] = s->mv[0][i-1][0];
|
||||
s->mv[0][i][1] = s->cur_pic.motion_val[0][s->block_index[i]][1] = s->mv[0][i-1][1];
|
||||
s->mv[1][i][0] = s->cur_pic.motion_val[1][s->block_index[i]][0] = s->mv[1][i-1][0];
|
||||
s->mv[1][i][1] = s->cur_pic.motion_val[1][s->block_index[i]][1] = s->mv[1][i-1][1];
|
||||
}
|
||||
} else {
|
||||
for (i = 1; i < 4; i++) {
|
||||
s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = s->mv[0][0][0];
|
||||
s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = s->mv[0][0][1];
|
||||
s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = s->mv[1][0][0];
|
||||
s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = s->mv[1][0][1];
|
||||
s->mv[0][i][0] = s->cur_pic.motion_val[0][s->block_index[i]][0] = s->mv[0][0][0];
|
||||
s->mv[0][i][1] = s->cur_pic.motion_val[0][s->block_index[i]][1] = s->mv[0][0][1];
|
||||
s->mv[1][i][0] = s->cur_pic.motion_val[1][s->block_index[i]][0] = s->mv[1][0][0];
|
||||
s->mv[1][i][1] = s->cur_pic.motion_val[1][s->block_index[i]][1] = s->mv[1][0][1];
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2393,10 +2393,10 @@ static int vc1_decode_b_mb_intfr(VC1Context *v)
|
||||
|
||||
if (mvsw) {
|
||||
for (i = 0; i < 2; i++) {
|
||||
s->mv[dir][i+2][0] = s->mv[dir][i][0] = s->current_picture.motion_val[dir][s->block_index[i+2]][0] = s->current_picture.motion_val[dir][s->block_index[i]][0];
|
||||
s->mv[dir][i+2][1] = s->mv[dir][i][1] = s->current_picture.motion_val[dir][s->block_index[i+2]][1] = s->current_picture.motion_val[dir][s->block_index[i]][1];
|
||||
s->mv[dir2][i+2][0] = s->mv[dir2][i][0] = s->current_picture.motion_val[dir2][s->block_index[i]][0] = s->current_picture.motion_val[dir2][s->block_index[i+2]][0];
|
||||
s->mv[dir2][i+2][1] = s->mv[dir2][i][1] = s->current_picture.motion_val[dir2][s->block_index[i]][1] = s->current_picture.motion_val[dir2][s->block_index[i+2]][1];
|
||||
s->mv[dir][i+2][0] = s->mv[dir][i][0] = s->cur_pic.motion_val[dir][s->block_index[i+2]][0] = s->cur_pic.motion_val[dir][s->block_index[i]][0];
|
||||
s->mv[dir][i+2][1] = s->mv[dir][i][1] = s->cur_pic.motion_val[dir][s->block_index[i+2]][1] = s->cur_pic.motion_val[dir][s->block_index[i]][1];
|
||||
s->mv[dir2][i+2][0] = s->mv[dir2][i][0] = s->cur_pic.motion_val[dir2][s->block_index[i]][0] = s->cur_pic.motion_val[dir2][s->block_index[i+2]][0];
|
||||
s->mv[dir2][i+2][1] = s->mv[dir2][i][1] = s->cur_pic.motion_val[dir2][s->block_index[i]][1] = s->cur_pic.motion_val[dir2][s->block_index[i+2]][1];
|
||||
}
|
||||
} else {
|
||||
ff_vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, !dir);
|
||||
@ -2423,15 +2423,15 @@ static int vc1_decode_b_mb_intfr(VC1Context *v)
|
||||
v->blk_mv_type[s->block_index[3]] = 1;
|
||||
ff_vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, !dir);
|
||||
for (i = 0; i < 2; i++) {
|
||||
s->mv[!dir][i+2][0] = s->mv[!dir][i][0] = s->current_picture.motion_val[!dir][s->block_index[i+2]][0] = s->current_picture.motion_val[!dir][s->block_index[i]][0];
|
||||
s->mv[!dir][i+2][1] = s->mv[!dir][i][1] = s->current_picture.motion_val[!dir][s->block_index[i+2]][1] = s->current_picture.motion_val[!dir][s->block_index[i]][1];
|
||||
s->mv[!dir][i+2][0] = s->mv[!dir][i][0] = s->cur_pic.motion_val[!dir][s->block_index[i+2]][0] = s->cur_pic.motion_val[!dir][s->block_index[i]][0];
|
||||
s->mv[!dir][i+2][1] = s->mv[!dir][i][1] = s->cur_pic.motion_val[!dir][s->block_index[i+2]][1] = s->cur_pic.motion_val[!dir][s->block_index[i]][1];
|
||||
}
|
||||
ff_vc1_mc_1mv(v, dir);
|
||||
}
|
||||
|
||||
if (cbp)
|
||||
GET_MQUANT(); // p. 227
|
||||
s->current_picture.qscale_table[mb_pos] = mquant;
|
||||
s->cur_pic.qscale_table[mb_pos] = mquant;
|
||||
if (!v->ttmbf && cbp)
|
||||
ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index], VC1_TTMB_VLC_BITS, 2);
|
||||
for (i = 0; i < 6; i++) {
|
||||
@ -2462,8 +2462,8 @@ static int vc1_decode_b_mb_intfr(VC1Context *v)
|
||||
v->mb_type[0][s->block_index[i]] = 0;
|
||||
s->dc_val[0][s->block_index[i]] = 0;
|
||||
}
|
||||
s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
|
||||
s->current_picture.qscale_table[mb_pos] = 0;
|
||||
s->cur_pic.mb_type[mb_pos] = MB_TYPE_SKIP;
|
||||
s->cur_pic.qscale_table[mb_pos] = 0;
|
||||
v->blk_mv_type[s->block_index[0]] = 0;
|
||||
v->blk_mv_type[s->block_index[1]] = 0;
|
||||
v->blk_mv_type[s->block_index[2]] = 0;
|
||||
@ -2481,10 +2481,10 @@ static int vc1_decode_b_mb_intfr(VC1Context *v)
|
||||
if (mvsw)
|
||||
dir2 = !dir;
|
||||
for (i = 0; i < 2; i++) {
|
||||
s->mv[dir][i+2][0] = s->mv[dir][i][0] = s->current_picture.motion_val[dir][s->block_index[i+2]][0] = s->current_picture.motion_val[dir][s->block_index[i]][0];
|
||||
s->mv[dir][i+2][1] = s->mv[dir][i][1] = s->current_picture.motion_val[dir][s->block_index[i+2]][1] = s->current_picture.motion_val[dir][s->block_index[i]][1];
|
||||
s->mv[dir2][i+2][0] = s->mv[dir2][i][0] = s->current_picture.motion_val[dir2][s->block_index[i]][0] = s->current_picture.motion_val[dir2][s->block_index[i+2]][0];
|
||||
s->mv[dir2][i+2][1] = s->mv[dir2][i][1] = s->current_picture.motion_val[dir2][s->block_index[i]][1] = s->current_picture.motion_val[dir2][s->block_index[i+2]][1];
|
||||
s->mv[dir][i+2][0] = s->mv[dir][i][0] = s->cur_pic.motion_val[dir][s->block_index[i+2]][0] = s->cur_pic.motion_val[dir][s->block_index[i]][0];
|
||||
s->mv[dir][i+2][1] = s->mv[dir][i][1] = s->cur_pic.motion_val[dir][s->block_index[i+2]][1] = s->cur_pic.motion_val[dir][s->block_index[i]][1];
|
||||
s->mv[dir2][i+2][0] = s->mv[dir2][i][0] = s->cur_pic.motion_val[dir2][s->block_index[i]][0] = s->cur_pic.motion_val[dir2][s->block_index[i+2]][0];
|
||||
s->mv[dir2][i+2][1] = s->mv[dir2][i][1] = s->cur_pic.motion_val[dir2][s->block_index[i]][1] = s->cur_pic.motion_val[dir2][s->block_index[i+2]][1];
|
||||
}
|
||||
} else {
|
||||
v->blk_mv_type[s->block_index[0]] = 1;
|
||||
@ -2493,8 +2493,8 @@ static int vc1_decode_b_mb_intfr(VC1Context *v)
|
||||
v->blk_mv_type[s->block_index[3]] = 1;
|
||||
ff_vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, !dir);
|
||||
for (i = 0; i < 2; i++) {
|
||||
s->mv[!dir][i+2][0] = s->mv[!dir][i][0] = s->current_picture.motion_val[!dir][s->block_index[i+2]][0] = s->current_picture.motion_val[!dir][s->block_index[i]][0];
|
||||
s->mv[!dir][i+2][1] = s->mv[!dir][i][1] = s->current_picture.motion_val[!dir][s->block_index[i+2]][1] = s->current_picture.motion_val[!dir][s->block_index[i]][1];
|
||||
s->mv[!dir][i+2][0] = s->mv[!dir][i][0] = s->cur_pic.motion_val[!dir][s->block_index[i+2]][0] = s->cur_pic.motion_val[!dir][s->block_index[i]][0];
|
||||
s->mv[!dir][i+2][1] = s->mv[!dir][i][1] = s->cur_pic.motion_val[!dir][s->block_index[i+2]][1] = s->cur_pic.motion_val[!dir][s->block_index[i]][1];
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2563,11 +2563,11 @@ static void vc1_decode_i_blocks(VC1Context *v)
|
||||
update_block_index(s);
|
||||
s->bdsp.clear_blocks(v->block[v->cur_blk_idx][0]);
|
||||
mb_pos = s->mb_x + s->mb_y * s->mb_width;
|
||||
s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
|
||||
s->current_picture.qscale_table[mb_pos] = v->pq;
|
||||
s->cur_pic.mb_type[mb_pos] = MB_TYPE_INTRA;
|
||||
s->cur_pic.qscale_table[mb_pos] = v->pq;
|
||||
for (int i = 0; i < 4; i++) {
|
||||
s->current_picture.motion_val[1][s->block_index[i]][0] = 0;
|
||||
s->current_picture.motion_val[1][s->block_index[i]][1] = 0;
|
||||
s->cur_pic.motion_val[1][s->block_index[i]][0] = 0;
|
||||
s->cur_pic.motion_val[1][s->block_index[i]][1] = 0;
|
||||
}
|
||||
|
||||
// do actual MB decoding and displaying
|
||||
@ -2693,10 +2693,10 @@ static int vc1_decode_i_blocks_adv(VC1Context *v)
|
||||
update_block_index(s);
|
||||
s->bdsp.clear_blocks(v->block[v->cur_blk_idx][0]);
|
||||
mb_pos = s->mb_x + s->mb_y * s->mb_stride;
|
||||
s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
|
||||
s->cur_pic.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
|
||||
for (int i = 0; i < 4; i++) {
|
||||
s->current_picture.motion_val[1][s->block_index[i] + v->blocks_off][0] = 0;
|
||||
s->current_picture.motion_val[1][s->block_index[i] + v->blocks_off][1] = 0;
|
||||
s->cur_pic.motion_val[1][s->block_index[i] + v->blocks_off][0] = 0;
|
||||
s->cur_pic.motion_val[1][s->block_index[i] + v->blocks_off][1] = 0;
|
||||
}
|
||||
|
||||
// do actual MB decoding and displaying
|
||||
@ -2719,7 +2719,7 @@ static int vc1_decode_i_blocks_adv(VC1Context *v)
|
||||
|
||||
GET_MQUANT();
|
||||
|
||||
s->current_picture.qscale_table[mb_pos] = mquant;
|
||||
s->cur_pic.qscale_table[mb_pos] = mquant;
|
||||
/* Set DC scale - y and c use the same */
|
||||
s->y_dc_scale = s->y_dc_scale_table[FFABS(mquant)];
|
||||
s->c_dc_scale = s->c_dc_scale_table[FFABS(mquant)];
|
||||
@ -2943,7 +2943,7 @@ static void vc1_decode_skip_blocks(VC1Context *v)
|
||||
{
|
||||
MpegEncContext *s = &v->s;
|
||||
|
||||
if (!v->s.last_picture.f->data[0])
|
||||
if (!v->s.last_pic.f->data[0])
|
||||
return;
|
||||
|
||||
ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_width - 1, s->end_mb_y - 1, ER_MB_END);
|
||||
@ -2952,9 +2952,9 @@ static void vc1_decode_skip_blocks(VC1Context *v)
|
||||
s->mb_x = 0;
|
||||
init_block_index(v);
|
||||
update_block_index(s);
|
||||
memcpy(s->dest[0], s->last_picture.f->data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16);
|
||||
memcpy(s->dest[1], s->last_picture.f->data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
|
||||
memcpy(s->dest[2], s->last_picture.f->data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
|
||||
memcpy(s->dest[0], s->last_pic.f->data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16);
|
||||
memcpy(s->dest[1], s->last_pic.f->data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
|
||||
memcpy(s->dest[2], s->last_pic.f->data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
|
||||
s->first_slice_line = 0;
|
||||
}
|
||||
}
|
||||
@ -2964,7 +2964,7 @@ void ff_vc1_decode_blocks(VC1Context *v)
|
||||
|
||||
v->s.esc3_level_length = 0;
|
||||
if (v->x8_type) {
|
||||
ff_intrax8_decode_picture(&v->x8, &v->s.current_picture,
|
||||
ff_intrax8_decode_picture(&v->x8, &v->s.cur_pic,
|
||||
&v->s.gb, &v->s.mb_x, &v->s.mb_y,
|
||||
2 * v->pq + v->halfpq, v->pq * !v->pquantizer,
|
||||
v->s.loop_filter, v->s.low_delay);
|
||||
|
@ -500,7 +500,7 @@ void ff_vc1_p_loop_filter(VC1Context *v)
|
||||
cbp,
|
||||
is_intra,
|
||||
i > 3 ? uvmv :
|
||||
&s->current_picture.motion_val[0][s->block_index[i] - 4 * s->b8_stride - 2 + v->blocks_off],
|
||||
&s->cur_pic.motion_val[0][s->block_index[i] - 4 * s->b8_stride - 2 + v->blocks_off],
|
||||
i > 3 ? &v->mv_f[0][s->block_index[i] - 2 * s->mb_stride - 1 + v->mb_off] :
|
||||
&v->mv_f[0][s->block_index[i] - 4 * s->b8_stride - 2 + v->blocks_off],
|
||||
ttblk,
|
||||
@ -520,7 +520,7 @@ void ff_vc1_p_loop_filter(VC1Context *v)
|
||||
cbp,
|
||||
is_intra,
|
||||
i > 3 ? uvmv :
|
||||
&s->current_picture.motion_val[0][s->block_index[i] - 4 * s->b8_stride + v->blocks_off],
|
||||
&s->cur_pic.motion_val[0][s->block_index[i] - 4 * s->b8_stride + v->blocks_off],
|
||||
i > 3 ? &v->mv_f[0][s->block_index[i] - 2 * s->mb_stride + v->mb_off] :
|
||||
&v->mv_f[0][s->block_index[i] - 4 * s->b8_stride + v->blocks_off],
|
||||
ttblk,
|
||||
@ -543,7 +543,7 @@ void ff_vc1_p_loop_filter(VC1Context *v)
|
||||
cbp,
|
||||
is_intra,
|
||||
i > 3 ? uvmv :
|
||||
&s->current_picture.motion_val[0][s->block_index[i] - 2 * s->b8_stride - 2 + v->blocks_off],
|
||||
&s->cur_pic.motion_val[0][s->block_index[i] - 2 * s->b8_stride - 2 + v->blocks_off],
|
||||
i > 3 ? &v->mv_f[0][s->block_index[i] - s->mb_stride - 1 + v->mb_off] :
|
||||
&v->mv_f[0][s->block_index[i] - 2 * s->b8_stride - 2 + v->blocks_off],
|
||||
ttblk,
|
||||
@ -562,7 +562,7 @@ void ff_vc1_p_loop_filter(VC1Context *v)
|
||||
cbp,
|
||||
is_intra,
|
||||
i > 3 ? uvmv :
|
||||
&s->current_picture.motion_val[0][s->block_index[i] - 2 + v->blocks_off],
|
||||
&s->cur_pic.motion_val[0][s->block_index[i] - 2 + v->blocks_off],
|
||||
i > 3 ? &v->mv_f[0][s->block_index[i] - 1 + v->mb_off] :
|
||||
&v->mv_f[0][s->block_index[i] - 2 + v->blocks_off],
|
||||
ttblk,
|
||||
@ -583,7 +583,7 @@ void ff_vc1_p_loop_filter(VC1Context *v)
|
||||
cbp,
|
||||
is_intra,
|
||||
i > 3 ? uvmv :
|
||||
&s->current_picture.motion_val[0][s->block_index[i] - 2 * s->b8_stride + v->blocks_off],
|
||||
&s->cur_pic.motion_val[0][s->block_index[i] - 2 * s->b8_stride + v->blocks_off],
|
||||
i > 3 ? &v->mv_f[0][s->block_index[i] - s->mb_stride + v->mb_off] :
|
||||
&v->mv_f[0][s->block_index[i] - 2 * s->b8_stride + v->blocks_off],
|
||||
ttblk,
|
||||
@ -602,7 +602,7 @@ void ff_vc1_p_loop_filter(VC1Context *v)
|
||||
cbp,
|
||||
is_intra,
|
||||
i > 3 ? uvmv :
|
||||
&s->current_picture.motion_val[0][s->block_index[i] + v->blocks_off],
|
||||
&s->cur_pic.motion_val[0][s->block_index[i] + v->blocks_off],
|
||||
i > 3 ? &v->mv_f[0][s->block_index[i] + v->mb_off] :
|
||||
&v->mv_f[0][s->block_index[i] + v->blocks_off],
|
||||
ttblk,
|
||||
@ -625,7 +625,7 @@ void ff_vc1_p_loop_filter(VC1Context *v)
|
||||
cbp,
|
||||
is_intra,
|
||||
i > 3 ? uvmv :
|
||||
&s->current_picture.motion_val[0][s->block_index[i] - 4 * s->b8_stride - 4 + v->blocks_off],
|
||||
&s->cur_pic.motion_val[0][s->block_index[i] - 4 * s->b8_stride - 4 + v->blocks_off],
|
||||
i > 3 ? &v->mv_f[0][s->block_index[i] - 2 * s->mb_stride - 2 + v->mb_off] :
|
||||
&v->mv_f[0][s->block_index[i] - 4 * s->b8_stride - 4 + v->blocks_off],
|
||||
ttblk,
|
||||
@ -646,7 +646,7 @@ void ff_vc1_p_loop_filter(VC1Context *v)
|
||||
cbp,
|
||||
is_intra,
|
||||
i > 3 ? uvmv :
|
||||
&s->current_picture.motion_val[0][s->block_index[i] - 4 * s->b8_stride - 2 + v->blocks_off],
|
||||
&s->cur_pic.motion_val[0][s->block_index[i] - 4 * s->b8_stride - 2 + v->blocks_off],
|
||||
i > 3 ? &v->mv_f[0][s->block_index[i] - 2 * s->mb_stride - 1 + v->mb_off] :
|
||||
&v->mv_f[0][s->block_index[i] - 4 * s->b8_stride - 2 + v->blocks_off],
|
||||
ttblk,
|
||||
@ -665,7 +665,7 @@ void ff_vc1_p_loop_filter(VC1Context *v)
|
||||
cbp,
|
||||
is_intra,
|
||||
i > 3 ? uvmv :
|
||||
&s->current_picture.motion_val[0][s->block_index[i] - 4 * s->b8_stride + v->blocks_off],
|
||||
&s->cur_pic.motion_val[0][s->block_index[i] - 4 * s->b8_stride + v->blocks_off],
|
||||
i > 3 ? &v->mv_f[0][s->block_index[i] - 2 * s->mb_stride + v->mb_off] :
|
||||
&v->mv_f[0][s->block_index[i] - 4 * s->b8_stride + v->blocks_off],
|
||||
ttblk,
|
||||
@ -688,7 +688,7 @@ void ff_vc1_p_loop_filter(VC1Context *v)
|
||||
cbp,
|
||||
is_intra,
|
||||
i > 3 ? uvmv :
|
||||
&s->current_picture.motion_val[0][s->block_index[i] - 2 * s->b8_stride - 4 + v->blocks_off],
|
||||
&s->cur_pic.motion_val[0][s->block_index[i] - 2 * s->b8_stride - 4 + v->blocks_off],
|
||||
i > 3 ? &v->mv_f[0][s->block_index[i] - s->mb_stride - 2 + v->mb_off] :
|
||||
&v->mv_f[0][s->block_index[i] - 2 * s->b8_stride - 4 + v->blocks_off],
|
||||
ttblk,
|
||||
@ -709,7 +709,7 @@ void ff_vc1_p_loop_filter(VC1Context *v)
|
||||
cbp,
|
||||
is_intra,
|
||||
i > 3 ? uvmv :
|
||||
&s->current_picture.motion_val[0][s->block_index[i] - 2 * s->b8_stride - 2 + v->blocks_off],
|
||||
&s->cur_pic.motion_val[0][s->block_index[i] - 2 * s->b8_stride - 2 + v->blocks_off],
|
||||
i > 3 ? &v->mv_f[0][s->block_index[i] - s->mb_stride - 1 + v->mb_off] :
|
||||
&v->mv_f[0][s->block_index[i] - 2 * s->b8_stride - 2 + v->blocks_off],
|
||||
ttblk,
|
||||
@ -728,7 +728,7 @@ void ff_vc1_p_loop_filter(VC1Context *v)
|
||||
cbp,
|
||||
is_intra,
|
||||
i > 3 ? uvmv :
|
||||
&s->current_picture.motion_val[0][s->block_index[i] - 2 * s->b8_stride + v->blocks_off],
|
||||
&s->cur_pic.motion_val[0][s->block_index[i] - 2 * s->b8_stride + v->blocks_off],
|
||||
i > 3 ? &v->mv_f[0][s->block_index[i] - s->mb_stride + v->mb_off] :
|
||||
&v->mv_f[0][s->block_index[i] - 2 * s->b8_stride + v->blocks_off],
|
||||
ttblk,
|
||||
@ -749,7 +749,7 @@ void ff_vc1_p_loop_filter(VC1Context *v)
|
||||
cbp,
|
||||
is_intra,
|
||||
i > 3 ? uvmv :
|
||||
&s->current_picture.motion_val[0][s->block_index[i] - 4 + v->blocks_off],
|
||||
&s->cur_pic.motion_val[0][s->block_index[i] - 4 + v->blocks_off],
|
||||
i > 3 ? &v->mv_f[0][s->block_index[i] - 2 + v->mb_off] :
|
||||
&v->mv_f[0][s->block_index[i] - 4 + v->blocks_off],
|
||||
ttblk,
|
||||
@ -770,7 +770,7 @@ void ff_vc1_p_loop_filter(VC1Context *v)
|
||||
cbp,
|
||||
is_intra,
|
||||
i > 3 ? uvmv :
|
||||
&s->current_picture.motion_val[0][s->block_index[i] - 2 + v->blocks_off],
|
||||
&s->cur_pic.motion_val[0][s->block_index[i] - 2 + v->blocks_off],
|
||||
i > 3 ? &v->mv_f[0][s->block_index[i] - 1 + v->mb_off] :
|
||||
&v->mv_f[0][s->block_index[i] - 2 + v->blocks_off],
|
||||
ttblk,
|
||||
@ -789,7 +789,7 @@ void ff_vc1_p_loop_filter(VC1Context *v)
|
||||
cbp,
|
||||
is_intra,
|
||||
i > 3 ? uvmv :
|
||||
&s->current_picture.motion_val[0][s->block_index[i] + v->blocks_off],
|
||||
&s->cur_pic.motion_val[0][s->block_index[i] + v->blocks_off],
|
||||
i > 3 ? &v->mv_f[0][s->block_index[i] + v->mb_off] :
|
||||
&v->mv_f[0][s->block_index[i] + v->blocks_off],
|
||||
ttblk,
|
||||
|
@ -184,11 +184,11 @@ void ff_vc1_mc_1mv(VC1Context *v, int dir)
|
||||
|
||||
if ((!v->field_mode ||
|
||||
(v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
|
||||
!v->s.last_picture.f->data[0])
|
||||
!v->s.last_pic.f->data[0])
|
||||
return;
|
||||
|
||||
linesize = s->current_picture_ptr->f->linesize[0];
|
||||
uvlinesize = s->current_picture_ptr->f->linesize[1];
|
||||
linesize = s->cur_pic_ptr->f->linesize[0];
|
||||
uvlinesize = s->cur_pic_ptr->f->linesize[1];
|
||||
|
||||
mx = s->mv[dir][0][0];
|
||||
my = s->mv[dir][0][1];
|
||||
@ -196,8 +196,8 @@ void ff_vc1_mc_1mv(VC1Context *v, int dir)
|
||||
// store motion vectors for further use in B-frames
|
||||
if (s->pict_type == AV_PICTURE_TYPE_P) {
|
||||
for (i = 0; i < 4; i++) {
|
||||
s->current_picture.motion_val[1][s->block_index[i] + v->blocks_off][0] = mx;
|
||||
s->current_picture.motion_val[1][s->block_index[i] + v->blocks_off][1] = my;
|
||||
s->cur_pic.motion_val[1][s->block_index[i] + v->blocks_off][0] = mx;
|
||||
s->cur_pic.motion_val[1][s->block_index[i] + v->blocks_off][1] = my;
|
||||
}
|
||||
}
|
||||
|
||||
@ -219,30 +219,30 @@ void ff_vc1_mc_1mv(VC1Context *v, int dir)
|
||||
}
|
||||
if (!dir) {
|
||||
if (v->field_mode && (v->cur_field_type != v->ref_field_type[dir]) && v->second_field) {
|
||||
srcY = s->current_picture.f->data[0];
|
||||
srcU = s->current_picture.f->data[1];
|
||||
srcV = s->current_picture.f->data[2];
|
||||
srcY = s->cur_pic.f->data[0];
|
||||
srcU = s->cur_pic.f->data[1];
|
||||
srcV = s->cur_pic.f->data[2];
|
||||
luty = v->curr_luty;
|
||||
lutuv = v->curr_lutuv;
|
||||
use_ic = *v->curr_use_ic;
|
||||
interlace = 1;
|
||||
} else {
|
||||
srcY = s->last_picture.f->data[0];
|
||||
srcU = s->last_picture.f->data[1];
|
||||
srcV = s->last_picture.f->data[2];
|
||||
srcY = s->last_pic.f->data[0];
|
||||
srcU = s->last_pic.f->data[1];
|
||||
srcV = s->last_pic.f->data[2];
|
||||
luty = v->last_luty;
|
||||
lutuv = v->last_lutuv;
|
||||
use_ic = v->last_use_ic;
|
||||
interlace = !!(s->last_picture.f->flags & AV_FRAME_FLAG_INTERLACED);
|
||||
interlace = !!(s->last_pic.f->flags & AV_FRAME_FLAG_INTERLACED);
|
||||
}
|
||||
} else {
|
||||
srcY = s->next_picture.f->data[0];
|
||||
srcU = s->next_picture.f->data[1];
|
||||
srcV = s->next_picture.f->data[2];
|
||||
srcY = s->next_pic.f->data[0];
|
||||
srcU = s->next_pic.f->data[1];
|
||||
srcV = s->next_pic.f->data[2];
|
||||
luty = v->next_luty;
|
||||
lutuv = v->next_lutuv;
|
||||
use_ic = v->next_use_ic;
|
||||
interlace = !!(s->next_picture.f->flags & AV_FRAME_FLAG_INTERLACED);
|
||||
interlace = !!(s->next_pic.f->flags & AV_FRAME_FLAG_INTERLACED);
|
||||
}
|
||||
|
||||
if (!srcY || !srcU) {
|
||||
@ -464,31 +464,31 @@ void ff_vc1_mc_4mv_luma(VC1Context *v, int n, int dir, int avg)
|
||||
|
||||
if ((!v->field_mode ||
|
||||
(v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
|
||||
!v->s.last_picture.f->data[0])
|
||||
!v->s.last_pic.f->data[0])
|
||||
return;
|
||||
|
||||
linesize = s->current_picture_ptr->f->linesize[0];
|
||||
linesize = s->cur_pic_ptr->f->linesize[0];
|
||||
|
||||
mx = s->mv[dir][n][0];
|
||||
my = s->mv[dir][n][1];
|
||||
|
||||
if (!dir) {
|
||||
if (v->field_mode && (v->cur_field_type != v->ref_field_type[dir]) && v->second_field) {
|
||||
srcY = s->current_picture.f->data[0];
|
||||
srcY = s->cur_pic.f->data[0];
|
||||
luty = v->curr_luty;
|
||||
use_ic = *v->curr_use_ic;
|
||||
interlace = 1;
|
||||
} else {
|
||||
srcY = s->last_picture.f->data[0];
|
||||
srcY = s->last_pic.f->data[0];
|
||||
luty = v->last_luty;
|
||||
use_ic = v->last_use_ic;
|
||||
interlace = !!(s->last_picture.f->flags & AV_FRAME_FLAG_INTERLACED);
|
||||
interlace = !!(s->last_pic.f->flags & AV_FRAME_FLAG_INTERLACED);
|
||||
}
|
||||
} else {
|
||||
srcY = s->next_picture.f->data[0];
|
||||
srcY = s->next_pic.f->data[0];
|
||||
luty = v->next_luty;
|
||||
use_ic = v->next_use_ic;
|
||||
interlace = !!(s->next_picture.f->flags & AV_FRAME_FLAG_INTERLACED);
|
||||
interlace = !!(s->next_pic.f->flags & AV_FRAME_FLAG_INTERLACED);
|
||||
}
|
||||
|
||||
if (!srcY) {
|
||||
@ -503,8 +503,8 @@ void ff_vc1_mc_4mv_luma(VC1Context *v, int n, int dir, int avg)
|
||||
|
||||
if (s->pict_type == AV_PICTURE_TYPE_P && n == 3 && v->field_mode) {
|
||||
int opp_count = get_luma_mv(v, 0,
|
||||
&s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0],
|
||||
&s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1]);
|
||||
&s->cur_pic.motion_val[1][s->block_index[0] + v->blocks_off][0],
|
||||
&s->cur_pic.motion_val[1][s->block_index[0] + v->blocks_off][1]);
|
||||
int k, f = opp_count > 2;
|
||||
for (k = 0; k < 4; k++)
|
||||
v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
|
||||
@ -515,8 +515,8 @@ void ff_vc1_mc_4mv_luma(VC1Context *v, int n, int dir, int avg)
|
||||
int width = s->avctx->coded_width;
|
||||
int height = s->avctx->coded_height >> 1;
|
||||
if (s->pict_type == AV_PICTURE_TYPE_P) {
|
||||
s->current_picture.motion_val[1][s->block_index[n] + v->blocks_off][0] = mx;
|
||||
s->current_picture.motion_val[1][s->block_index[n] + v->blocks_off][1] = my;
|
||||
s->cur_pic.motion_val[1][s->block_index[n] + v->blocks_off][0] = mx;
|
||||
s->cur_pic.motion_val[1][s->block_index[n] + v->blocks_off][1] = my;
|
||||
}
|
||||
qx = (s->mb_x * 16) + (mx >> 2);
|
||||
qy = (s->mb_y * 8) + (my >> 3);
|
||||
@ -645,7 +645,7 @@ void ff_vc1_mc_4mv_chroma(VC1Context *v, int dir)
|
||||
int interlace;
|
||||
int uvlinesize;
|
||||
|
||||
if (!v->field_mode && !v->s.last_picture.f->data[0])
|
||||
if (!v->field_mode && !v->s.last_pic.f->data[0])
|
||||
return;
|
||||
if (CONFIG_GRAY && s->avctx->flags & AV_CODEC_FLAG_GRAY)
|
||||
return;
|
||||
@ -654,8 +654,8 @@ void ff_vc1_mc_4mv_chroma(VC1Context *v, int dir)
|
||||
if (!v->field_mode || !v->numref) {
|
||||
int valid_count = get_chroma_mv(v, dir, &tx, &ty);
|
||||
if (!valid_count) {
|
||||
s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
|
||||
s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
|
||||
s->cur_pic.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
|
||||
s->cur_pic.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
|
||||
v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
|
||||
return; //no need to do MC for intra blocks
|
||||
}
|
||||
@ -664,12 +664,12 @@ void ff_vc1_mc_4mv_chroma(VC1Context *v, int dir)
|
||||
int opp_count = get_luma_mv(v, dir, &tx, &ty);
|
||||
chroma_ref_type = v->cur_field_type ^ (opp_count > 2);
|
||||
}
|
||||
if (v->field_mode && chroma_ref_type == 1 && v->cur_field_type == 1 && !v->s.last_picture.f->data[0])
|
||||
if (v->field_mode && chroma_ref_type == 1 && v->cur_field_type == 1 && !v->s.last_pic.f->data[0])
|
||||
return;
|
||||
s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
|
||||
s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
|
||||
s->cur_pic.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
|
||||
s->cur_pic.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
|
||||
|
||||
uvlinesize = s->current_picture_ptr->f->linesize[1];
|
||||
uvlinesize = s->cur_pic_ptr->f->linesize[1];
|
||||
|
||||
uvmx = (tx + ((tx & 3) == 3)) >> 1;
|
||||
uvmy = (ty + ((ty & 3) == 3)) >> 1;
|
||||
@ -698,24 +698,24 @@ void ff_vc1_mc_4mv_chroma(VC1Context *v, int dir)
|
||||
|
||||
if (!dir) {
|
||||
if (v->field_mode && (v->cur_field_type != chroma_ref_type) && v->second_field) {
|
||||
srcU = s->current_picture.f->data[1];
|
||||
srcV = s->current_picture.f->data[2];
|
||||
srcU = s->cur_pic.f->data[1];
|
||||
srcV = s->cur_pic.f->data[2];
|
||||
lutuv = v->curr_lutuv;
|
||||
use_ic = *v->curr_use_ic;
|
||||
interlace = 1;
|
||||
} else {
|
||||
srcU = s->last_picture.f->data[1];
|
||||
srcV = s->last_picture.f->data[2];
|
||||
srcU = s->last_pic.f->data[1];
|
||||
srcV = s->last_pic.f->data[2];
|
||||
lutuv = v->last_lutuv;
|
||||
use_ic = v->last_use_ic;
|
||||
interlace = !!(s->last_picture.f->flags & AV_FRAME_FLAG_INTERLACED);
|
||||
interlace = !!(s->last_pic.f->flags & AV_FRAME_FLAG_INTERLACED);
|
||||
}
|
||||
} else {
|
||||
srcU = s->next_picture.f->data[1];
|
||||
srcV = s->next_picture.f->data[2];
|
||||
srcU = s->next_pic.f->data[1];
|
||||
srcV = s->next_pic.f->data[2];
|
||||
lutuv = v->next_lutuv;
|
||||
use_ic = v->next_use_ic;
|
||||
interlace = !!(s->next_picture.f->flags & AV_FRAME_FLAG_INTERLACED);
|
||||
interlace = !!(s->next_pic.f->flags & AV_FRAME_FLAG_INTERLACED);
|
||||
}
|
||||
|
||||
if (!srcU) {
|
||||
@ -856,7 +856,7 @@ void ff_vc1_mc_4mv_chroma4(VC1Context *v, int dir, int dir2, int avg)
|
||||
if (CONFIG_GRAY && s->avctx->flags & AV_CODEC_FLAG_GRAY)
|
||||
return;
|
||||
|
||||
uvlinesize = s->current_picture_ptr->f->linesize[1];
|
||||
uvlinesize = s->cur_pic_ptr->f->linesize[1];
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
int d = i < 2 ? dir: dir2;
|
||||
@ -880,17 +880,17 @@ void ff_vc1_mc_4mv_chroma4(VC1Context *v, int dir, int dir2, int avg)
|
||||
else
|
||||
uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
|
||||
if (i < 2 ? dir : dir2) {
|
||||
srcU = s->next_picture.f->data[1];
|
||||
srcV = s->next_picture.f->data[2];
|
||||
srcU = s->next_pic.f->data[1];
|
||||
srcV = s->next_pic.f->data[2];
|
||||
lutuv = v->next_lutuv;
|
||||
use_ic = v->next_use_ic;
|
||||
interlace = !!(s->next_picture.f->flags & AV_FRAME_FLAG_INTERLACED);
|
||||
interlace = !!(s->next_pic.f->flags & AV_FRAME_FLAG_INTERLACED);
|
||||
} else {
|
||||
srcU = s->last_picture.f->data[1];
|
||||
srcV = s->last_picture.f->data[2];
|
||||
srcU = s->last_pic.f->data[1];
|
||||
srcV = s->last_pic.f->data[2];
|
||||
lutuv = v->last_lutuv;
|
||||
use_ic = v->last_use_ic;
|
||||
interlace = !!(s->last_picture.f->flags & AV_FRAME_FLAG_INTERLACED);
|
||||
interlace = !!(s->last_pic.f->flags & AV_FRAME_FLAG_INTERLACED);
|
||||
}
|
||||
if (!srcU)
|
||||
return;
|
||||
@ -1012,11 +1012,11 @@ void ff_vc1_interp_mc(VC1Context *v)
|
||||
int interlace;
|
||||
int linesize, uvlinesize;
|
||||
|
||||
if (!v->field_mode && !v->s.next_picture.f->data[0])
|
||||
if (!v->field_mode && !v->s.next_pic.f->data[0])
|
||||
return;
|
||||
|
||||
linesize = s->current_picture_ptr->f->linesize[0];
|
||||
uvlinesize = s->current_picture_ptr->f->linesize[1];
|
||||
linesize = s->cur_pic_ptr->f->linesize[0];
|
||||
uvlinesize = s->cur_pic_ptr->f->linesize[1];
|
||||
|
||||
mx = s->mv[1][0][0];
|
||||
my = s->mv[1][0][1];
|
||||
@ -1030,11 +1030,11 @@ void ff_vc1_interp_mc(VC1Context *v)
|
||||
uvmx = uvmx + ((uvmx < 0) ? -(uvmx & 1) : (uvmx & 1));
|
||||
uvmy = uvmy + ((uvmy < 0) ? -(uvmy & 1) : (uvmy & 1));
|
||||
}
|
||||
srcY = s->next_picture.f->data[0];
|
||||
srcU = s->next_picture.f->data[1];
|
||||
srcV = s->next_picture.f->data[2];
|
||||
srcY = s->next_pic.f->data[0];
|
||||
srcU = s->next_pic.f->data[1];
|
||||
srcV = s->next_pic.f->data[2];
|
||||
|
||||
interlace = !!(s->next_picture.f->flags & AV_FRAME_FLAG_INTERLACED);
|
||||
interlace = !!(s->next_pic.f->flags & AV_FRAME_FLAG_INTERLACED);
|
||||
|
||||
src_x = s->mb_x * 16 + (mx >> 2);
|
||||
src_y = s->mb_y * 16 + (my >> 2);
|
||||
|
@ -241,24 +241,24 @@ void ff_vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y,
|
||||
xy = s->block_index[n];
|
||||
|
||||
if (s->mb_intra) {
|
||||
s->mv[0][n][0] = s->current_picture.motion_val[0][xy + v->blocks_off][0] = 0;
|
||||
s->mv[0][n][1] = s->current_picture.motion_val[0][xy + v->blocks_off][1] = 0;
|
||||
s->current_picture.motion_val[1][xy + v->blocks_off][0] = 0;
|
||||
s->current_picture.motion_val[1][xy + v->blocks_off][1] = 0;
|
||||
s->mv[0][n][0] = s->cur_pic.motion_val[0][xy + v->blocks_off][0] = 0;
|
||||
s->mv[0][n][1] = s->cur_pic.motion_val[0][xy + v->blocks_off][1] = 0;
|
||||
s->cur_pic.motion_val[1][xy + v->blocks_off][0] = 0;
|
||||
s->cur_pic.motion_val[1][xy + v->blocks_off][1] = 0;
|
||||
if (mv1) { /* duplicate motion data for 1-MV block */
|
||||
s->current_picture.motion_val[0][xy + 1 + v->blocks_off][0] = 0;
|
||||
s->current_picture.motion_val[0][xy + 1 + v->blocks_off][1] = 0;
|
||||
s->current_picture.motion_val[0][xy + wrap + v->blocks_off][0] = 0;
|
||||
s->current_picture.motion_val[0][xy + wrap + v->blocks_off][1] = 0;
|
||||
s->current_picture.motion_val[0][xy + wrap + 1 + v->blocks_off][0] = 0;
|
||||
s->current_picture.motion_val[0][xy + wrap + 1 + v->blocks_off][1] = 0;
|
||||
s->cur_pic.motion_val[0][xy + 1 + v->blocks_off][0] = 0;
|
||||
s->cur_pic.motion_val[0][xy + 1 + v->blocks_off][1] = 0;
|
||||
s->cur_pic.motion_val[0][xy + wrap + v->blocks_off][0] = 0;
|
||||
s->cur_pic.motion_val[0][xy + wrap + v->blocks_off][1] = 0;
|
||||
s->cur_pic.motion_val[0][xy + wrap + 1 + v->blocks_off][0] = 0;
|
||||
s->cur_pic.motion_val[0][xy + wrap + 1 + v->blocks_off][1] = 0;
|
||||
v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
|
||||
s->current_picture.motion_val[1][xy + 1 + v->blocks_off][0] = 0;
|
||||
s->current_picture.motion_val[1][xy + 1 + v->blocks_off][1] = 0;
|
||||
s->current_picture.motion_val[1][xy + wrap + v->blocks_off][0] = 0;
|
||||
s->current_picture.motion_val[1][xy + wrap + v->blocks_off][1] = 0;
|
||||
s->current_picture.motion_val[1][xy + wrap + 1 + v->blocks_off][0] = 0;
|
||||
s->current_picture.motion_val[1][xy + wrap + 1 + v->blocks_off][1] = 0;
|
||||
s->cur_pic.motion_val[1][xy + 1 + v->blocks_off][0] = 0;
|
||||
s->cur_pic.motion_val[1][xy + 1 + v->blocks_off][1] = 0;
|
||||
s->cur_pic.motion_val[1][xy + wrap + v->blocks_off][0] = 0;
|
||||
s->cur_pic.motion_val[1][xy + wrap + v->blocks_off][1] = 0;
|
||||
s->cur_pic.motion_val[1][xy + wrap + 1 + v->blocks_off][0] = 0;
|
||||
s->cur_pic.motion_val[1][xy + wrap + 1 + v->blocks_off][1] = 0;
|
||||
}
|
||||
return;
|
||||
}
|
||||
@ -301,7 +301,7 @@ void ff_vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y,
|
||||
}
|
||||
|
||||
if (a_valid) {
|
||||
A = s->current_picture.motion_val[dir][xy - wrap + v->blocks_off];
|
||||
A = s->cur_pic.motion_val[dir][xy - wrap + v->blocks_off];
|
||||
a_f = v->mv_f[dir][xy - wrap + v->blocks_off];
|
||||
num_oppfield += a_f;
|
||||
num_samefield += 1 - a_f;
|
||||
@ -312,7 +312,7 @@ void ff_vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y,
|
||||
a_f = 0;
|
||||
}
|
||||
if (b_valid) {
|
||||
B = s->current_picture.motion_val[dir][xy - wrap + off + v->blocks_off];
|
||||
B = s->cur_pic.motion_val[dir][xy - wrap + off + v->blocks_off];
|
||||
b_f = v->mv_f[dir][xy - wrap + off + v->blocks_off];
|
||||
num_oppfield += b_f;
|
||||
num_samefield += 1 - b_f;
|
||||
@ -323,7 +323,7 @@ void ff_vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y,
|
||||
b_f = 0;
|
||||
}
|
||||
if (c_valid) {
|
||||
C = s->current_picture.motion_val[dir][xy - 1 + v->blocks_off];
|
||||
C = s->cur_pic.motion_val[dir][xy - 1 + v->blocks_off];
|
||||
c_f = v->mv_f[dir][xy - 1 + v->blocks_off];
|
||||
num_oppfield += c_f;
|
||||
num_samefield += 1 - c_f;
|
||||
@ -451,15 +451,15 @@ void ff_vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y,
|
||||
if (v->field_mode && v->cur_field_type && v->ref_field_type[dir] == 0)
|
||||
y_bias = 1;
|
||||
/* store MV using signed modulus of MV range defined in 4.11 */
|
||||
s->mv[dir][n][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
|
||||
s->mv[dir][n][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1] = ((py + dmv_y + r_y - y_bias) & ((r_y << 1) - 1)) - r_y + y_bias;
|
||||
s->mv[dir][n][0] = s->cur_pic.motion_val[dir][xy + v->blocks_off][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
|
||||
s->mv[dir][n][1] = s->cur_pic.motion_val[dir][xy + v->blocks_off][1] = ((py + dmv_y + r_y - y_bias) & ((r_y << 1) - 1)) - r_y + y_bias;
|
||||
if (mv1) { /* duplicate motion data for 1-MV block */
|
||||
s->current_picture.motion_val[dir][xy + 1 + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
|
||||
s->current_picture.motion_val[dir][xy + 1 + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
|
||||
s->current_picture.motion_val[dir][xy + wrap + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
|
||||
s->current_picture.motion_val[dir][xy + wrap + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
|
||||
s->current_picture.motion_val[dir][xy + wrap + 1 + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
|
||||
s->current_picture.motion_val[dir][xy + wrap + 1 + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
|
||||
s->cur_pic.motion_val[dir][xy + 1 + v->blocks_off][0] = s->cur_pic.motion_val[dir][xy + v->blocks_off][0];
|
||||
s->cur_pic.motion_val[dir][xy + 1 + v->blocks_off][1] = s->cur_pic.motion_val[dir][xy + v->blocks_off][1];
|
||||
s->cur_pic.motion_val[dir][xy + wrap + v->blocks_off][0] = s->cur_pic.motion_val[dir][xy + v->blocks_off][0];
|
||||
s->cur_pic.motion_val[dir][xy + wrap + v->blocks_off][1] = s->cur_pic.motion_val[dir][xy + v->blocks_off][1];
|
||||
s->cur_pic.motion_val[dir][xy + wrap + 1 + v->blocks_off][0] = s->cur_pic.motion_val[dir][xy + v->blocks_off][0];
|
||||
s->cur_pic.motion_val[dir][xy + wrap + 1 + v->blocks_off][1] = s->cur_pic.motion_val[dir][xy + v->blocks_off][1];
|
||||
v->mv_f[dir][xy + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
|
||||
v->mv_f[dir][xy + wrap + v->blocks_off] = v->mv_f[dir][xy + wrap + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
|
||||
}
|
||||
@ -483,24 +483,24 @@ void ff_vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y,
|
||||
xy = s->block_index[n];
|
||||
|
||||
if (s->mb_intra) {
|
||||
s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = 0;
|
||||
s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = 0;
|
||||
s->current_picture.motion_val[1][xy][0] = 0;
|
||||
s->current_picture.motion_val[1][xy][1] = 0;
|
||||
s->mv[0][n][0] = s->cur_pic.motion_val[0][xy][0] = 0;
|
||||
s->mv[0][n][1] = s->cur_pic.motion_val[0][xy][1] = 0;
|
||||
s->cur_pic.motion_val[1][xy][0] = 0;
|
||||
s->cur_pic.motion_val[1][xy][1] = 0;
|
||||
if (mvn == 1) { /* duplicate motion data for 1-MV block */
|
||||
s->current_picture.motion_val[0][xy + 1][0] = 0;
|
||||
s->current_picture.motion_val[0][xy + 1][1] = 0;
|
||||
s->current_picture.motion_val[0][xy + wrap][0] = 0;
|
||||
s->current_picture.motion_val[0][xy + wrap][1] = 0;
|
||||
s->current_picture.motion_val[0][xy + wrap + 1][0] = 0;
|
||||
s->current_picture.motion_val[0][xy + wrap + 1][1] = 0;
|
||||
s->cur_pic.motion_val[0][xy + 1][0] = 0;
|
||||
s->cur_pic.motion_val[0][xy + 1][1] = 0;
|
||||
s->cur_pic.motion_val[0][xy + wrap][0] = 0;
|
||||
s->cur_pic.motion_val[0][xy + wrap][1] = 0;
|
||||
s->cur_pic.motion_val[0][xy + wrap + 1][0] = 0;
|
||||
s->cur_pic.motion_val[0][xy + wrap + 1][1] = 0;
|
||||
v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
|
||||
s->current_picture.motion_val[1][xy + 1][0] = 0;
|
||||
s->current_picture.motion_val[1][xy + 1][1] = 0;
|
||||
s->current_picture.motion_val[1][xy + wrap][0] = 0;
|
||||
s->current_picture.motion_val[1][xy + wrap][1] = 0;
|
||||
s->current_picture.motion_val[1][xy + wrap + 1][0] = 0;
|
||||
s->current_picture.motion_val[1][xy + wrap + 1][1] = 0;
|
||||
s->cur_pic.motion_val[1][xy + 1][0] = 0;
|
||||
s->cur_pic.motion_val[1][xy + 1][1] = 0;
|
||||
s->cur_pic.motion_val[1][xy + wrap][0] = 0;
|
||||
s->cur_pic.motion_val[1][xy + wrap][1] = 0;
|
||||
s->cur_pic.motion_val[1][xy + wrap + 1][0] = 0;
|
||||
s->cur_pic.motion_val[1][xy + wrap + 1][1] = 0;
|
||||
}
|
||||
return;
|
||||
}
|
||||
@ -510,14 +510,14 @@ void ff_vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y,
|
||||
if (s->mb_x || (n == 1) || (n == 3)) {
|
||||
if ((v->blk_mv_type[xy]) // current block (MB) has a field MV
|
||||
|| (!v->blk_mv_type[xy] && !v->blk_mv_type[xy - 1])) { // or both have frame MV
|
||||
A[0] = s->current_picture.motion_val[dir][xy - 1][0];
|
||||
A[1] = s->current_picture.motion_val[dir][xy - 1][1];
|
||||
A[0] = s->cur_pic.motion_val[dir][xy - 1][0];
|
||||
A[1] = s->cur_pic.motion_val[dir][xy - 1][1];
|
||||
a_valid = 1;
|
||||
} else { // current block has frame mv and cand. has field MV (so average)
|
||||
A[0] = (s->current_picture.motion_val[dir][xy - 1][0]
|
||||
+ s->current_picture.motion_val[dir][xy - 1 + off * wrap][0] + 1) >> 1;
|
||||
A[1] = (s->current_picture.motion_val[dir][xy - 1][1]
|
||||
+ s->current_picture.motion_val[dir][xy - 1 + off * wrap][1] + 1) >> 1;
|
||||
A[0] = (s->cur_pic.motion_val[dir][xy - 1][0]
|
||||
+ s->cur_pic.motion_val[dir][xy - 1 + off * wrap][0] + 1) >> 1;
|
||||
A[1] = (s->cur_pic.motion_val[dir][xy - 1][1]
|
||||
+ s->cur_pic.motion_val[dir][xy - 1 + off * wrap][1] + 1) >> 1;
|
||||
a_valid = 1;
|
||||
}
|
||||
if (!(n & 1) && v->is_intra[s->mb_x - 1]) {
|
||||
@ -537,11 +537,11 @@ void ff_vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y,
|
||||
if (v->blk_mv_type[pos_b] && v->blk_mv_type[xy]) {
|
||||
n_adj = (n & 2) | (n & 1);
|
||||
}
|
||||
B[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap][0];
|
||||
B[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap][1];
|
||||
B[0] = s->cur_pic.motion_val[dir][s->block_index[n_adj] - 2 * wrap][0];
|
||||
B[1] = s->cur_pic.motion_val[dir][s->block_index[n_adj] - 2 * wrap][1];
|
||||
if (v->blk_mv_type[pos_b] && !v->blk_mv_type[xy]) {
|
||||
B[0] = (B[0] + s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap][0] + 1) >> 1;
|
||||
B[1] = (B[1] + s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap][1] + 1) >> 1;
|
||||
B[0] = (B[0] + s->cur_pic.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap][0] + 1) >> 1;
|
||||
B[1] = (B[1] + s->cur_pic.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap][1] + 1) >> 1;
|
||||
}
|
||||
}
|
||||
if (s->mb_width > 1) {
|
||||
@ -552,11 +552,11 @@ void ff_vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y,
|
||||
if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
|
||||
n_adj = n & 2;
|
||||
}
|
||||
C[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap + 2][0];
|
||||
C[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap + 2][1];
|
||||
C[0] = s->cur_pic.motion_val[dir][s->block_index[n_adj] - 2 * wrap + 2][0];
|
||||
C[1] = s->cur_pic.motion_val[dir][s->block_index[n_adj] - 2 * wrap + 2][1];
|
||||
if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
|
||||
C[0] = (1 + C[0] + (s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap + 2][0])) >> 1;
|
||||
C[1] = (1 + C[1] + (s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap + 2][1])) >> 1;
|
||||
C[0] = (1 + C[0] + (s->cur_pic.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap + 2][0])) >> 1;
|
||||
C[1] = (1 + C[1] + (s->cur_pic.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap + 2][1])) >> 1;
|
||||
}
|
||||
if (s->mb_x == s->mb_width - 1) {
|
||||
if (!v->is_intra[s->mb_x - s->mb_stride - 1]) {
|
||||
@ -566,11 +566,11 @@ void ff_vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y,
|
||||
if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
|
||||
n_adj = n | 1;
|
||||
}
|
||||
C[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap - 2][0];
|
||||
C[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap - 2][1];
|
||||
C[0] = s->cur_pic.motion_val[dir][s->block_index[n_adj] - 2 * wrap - 2][0];
|
||||
C[1] = s->cur_pic.motion_val[dir][s->block_index[n_adj] - 2 * wrap - 2][1];
|
||||
if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
|
||||
C[0] = (1 + C[0] + s->current_picture.motion_val[dir][s->block_index[1] - 2 * wrap - 2][0]) >> 1;
|
||||
C[1] = (1 + C[1] + s->current_picture.motion_val[dir][s->block_index[1] - 2 * wrap - 2][1]) >> 1;
|
||||
C[0] = (1 + C[0] + s->cur_pic.motion_val[dir][s->block_index[1] - 2 * wrap - 2][0]) >> 1;
|
||||
C[1] = (1 + C[1] + s->cur_pic.motion_val[dir][s->block_index[1] - 2 * wrap - 2][1]) >> 1;
|
||||
}
|
||||
} else
|
||||
c_valid = 0;
|
||||
@ -581,12 +581,12 @@ void ff_vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y,
|
||||
} else {
|
||||
pos_b = s->block_index[1];
|
||||
b_valid = 1;
|
||||
B[0] = s->current_picture.motion_val[dir][pos_b][0];
|
||||
B[1] = s->current_picture.motion_val[dir][pos_b][1];
|
||||
B[0] = s->cur_pic.motion_val[dir][pos_b][0];
|
||||
B[1] = s->cur_pic.motion_val[dir][pos_b][1];
|
||||
pos_c = s->block_index[0];
|
||||
c_valid = 1;
|
||||
C[0] = s->current_picture.motion_val[dir][pos_c][0];
|
||||
C[1] = s->current_picture.motion_val[dir][pos_c][1];
|
||||
C[0] = s->cur_pic.motion_val[dir][pos_c][0];
|
||||
C[1] = s->cur_pic.motion_val[dir][pos_c][1];
|
||||
}
|
||||
|
||||
total_valid = a_valid + b_valid + c_valid;
|
||||
@ -671,18 +671,18 @@ void ff_vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y,
|
||||
}
|
||||
|
||||
/* store MV using signed modulus of MV range defined in 4.11 */
|
||||
s->mv[dir][n][0] = s->current_picture.motion_val[dir][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
|
||||
s->mv[dir][n][1] = s->current_picture.motion_val[dir][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y;
|
||||
s->mv[dir][n][0] = s->cur_pic.motion_val[dir][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
|
||||
s->mv[dir][n][1] = s->cur_pic.motion_val[dir][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y;
|
||||
if (mvn == 1) { /* duplicate motion data for 1-MV block */
|
||||
s->current_picture.motion_val[dir][xy + 1 ][0] = s->current_picture.motion_val[dir][xy][0];
|
||||
s->current_picture.motion_val[dir][xy + 1 ][1] = s->current_picture.motion_val[dir][xy][1];
|
||||
s->current_picture.motion_val[dir][xy + wrap ][0] = s->current_picture.motion_val[dir][xy][0];
|
||||
s->current_picture.motion_val[dir][xy + wrap ][1] = s->current_picture.motion_val[dir][xy][1];
|
||||
s->current_picture.motion_val[dir][xy + wrap + 1][0] = s->current_picture.motion_val[dir][xy][0];
|
||||
s->current_picture.motion_val[dir][xy + wrap + 1][1] = s->current_picture.motion_val[dir][xy][1];
|
||||
s->cur_pic.motion_val[dir][xy + 1 ][0] = s->cur_pic.motion_val[dir][xy][0];
|
||||
s->cur_pic.motion_val[dir][xy + 1 ][1] = s->cur_pic.motion_val[dir][xy][1];
|
||||
s->cur_pic.motion_val[dir][xy + wrap ][0] = s->cur_pic.motion_val[dir][xy][0];
|
||||
s->cur_pic.motion_val[dir][xy + wrap ][1] = s->cur_pic.motion_val[dir][xy][1];
|
||||
s->cur_pic.motion_val[dir][xy + wrap + 1][0] = s->cur_pic.motion_val[dir][xy][0];
|
||||
s->cur_pic.motion_val[dir][xy + wrap + 1][1] = s->cur_pic.motion_val[dir][xy][1];
|
||||
} else if (mvn == 2) { /* duplicate motion data for 2-Field MV block */
|
||||
s->current_picture.motion_val[dir][xy + 1][0] = s->current_picture.motion_val[dir][xy][0];
|
||||
s->current_picture.motion_val[dir][xy + 1][1] = s->current_picture.motion_val[dir][xy][1];
|
||||
s->cur_pic.motion_val[dir][xy + 1][0] = s->cur_pic.motion_val[dir][xy][0];
|
||||
s->cur_pic.motion_val[dir][xy + 1][1] = s->cur_pic.motion_val[dir][xy][1];
|
||||
s->mv[dir][n + 1][0] = s->mv[dir][n][0];
|
||||
s->mv[dir][n + 1][1] = s->mv[dir][n][1];
|
||||
}
|
||||
@ -715,19 +715,19 @@ void ff_vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2],
|
||||
xy = s->block_index[0];
|
||||
|
||||
if (s->mb_intra) {
|
||||
s->current_picture.motion_val[0][xy][0] =
|
||||
s->current_picture.motion_val[0][xy][1] =
|
||||
s->current_picture.motion_val[1][xy][0] =
|
||||
s->current_picture.motion_val[1][xy][1] = 0;
|
||||
s->cur_pic.motion_val[0][xy][0] =
|
||||
s->cur_pic.motion_val[0][xy][1] =
|
||||
s->cur_pic.motion_val[1][xy][0] =
|
||||
s->cur_pic.motion_val[1][xy][1] = 0;
|
||||
return;
|
||||
}
|
||||
if (direct && s->next_picture_ptr->field_picture)
|
||||
if (direct && s->next_pic_ptr->field_picture)
|
||||
av_log(s->avctx, AV_LOG_WARNING, "Mixed frame/field direct mode not supported\n");
|
||||
|
||||
s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
|
||||
s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
|
||||
s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
|
||||
s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
|
||||
s->mv[0][0][0] = scale_mv(s->next_pic.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
|
||||
s->mv[0][0][1] = scale_mv(s->next_pic.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
|
||||
s->mv[1][0][0] = scale_mv(s->next_pic.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
|
||||
s->mv[1][0][1] = scale_mv(s->next_pic.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
|
||||
|
||||
/* Pullback predicted motion vectors as specified in 8.4.5.4 */
|
||||
s->mv[0][0][0] = av_clip(s->mv[0][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
|
||||
@ -735,18 +735,18 @@ void ff_vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2],
|
||||
s->mv[1][0][0] = av_clip(s->mv[1][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
|
||||
s->mv[1][0][1] = av_clip(s->mv[1][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
|
||||
if (direct) {
|
||||
s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
|
||||
s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
|
||||
s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
|
||||
s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
|
||||
s->cur_pic.motion_val[0][xy][0] = s->mv[0][0][0];
|
||||
s->cur_pic.motion_val[0][xy][1] = s->mv[0][0][1];
|
||||
s->cur_pic.motion_val[1][xy][0] = s->mv[1][0][0];
|
||||
s->cur_pic.motion_val[1][xy][1] = s->mv[1][0][1];
|
||||
return;
|
||||
}
|
||||
|
||||
if ((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
|
||||
C = s->current_picture.motion_val[0][xy - 2];
|
||||
A = s->current_picture.motion_val[0][xy - wrap * 2];
|
||||
C = s->cur_pic.motion_val[0][xy - 2];
|
||||
A = s->cur_pic.motion_val[0][xy - wrap * 2];
|
||||
off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
|
||||
B = s->current_picture.motion_val[0][xy - wrap * 2 + off];
|
||||
B = s->cur_pic.motion_val[0][xy - wrap * 2 + off];
|
||||
|
||||
if (!s->mb_x) C[0] = C[1] = 0;
|
||||
if (!s->first_slice_line) { // predictor A is not out of bounds
|
||||
@ -812,10 +812,10 @@ void ff_vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2],
|
||||
s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y;
|
||||
}
|
||||
if ((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
|
||||
C = s->current_picture.motion_val[1][xy - 2];
|
||||
A = s->current_picture.motion_val[1][xy - wrap * 2];
|
||||
C = s->cur_pic.motion_val[1][xy - 2];
|
||||
A = s->cur_pic.motion_val[1][xy - wrap * 2];
|
||||
off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
|
||||
B = s->current_picture.motion_val[1][xy - wrap * 2 + off];
|
||||
B = s->cur_pic.motion_val[1][xy - wrap * 2 + off];
|
||||
|
||||
if (!s->mb_x)
|
||||
C[0] = C[1] = 0;
|
||||
@ -882,10 +882,10 @@ void ff_vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2],
|
||||
s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x;
|
||||
s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y;
|
||||
}
|
||||
s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
|
||||
s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
|
||||
s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
|
||||
s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
|
||||
s->cur_pic.motion_val[0][xy][0] = s->mv[0][0][0];
|
||||
s->cur_pic.motion_val[0][xy][1] = s->mv[0][0][1];
|
||||
s->cur_pic.motion_val[1][xy][0] = s->mv[1][0][0];
|
||||
s->cur_pic.motion_val[1][xy][1] = s->mv[1][0][1];
|
||||
}
|
||||
|
||||
void ff_vc1_pred_b_mv_intfi(VC1Context *v, int n, int *dmv_x, int *dmv_y,
|
||||
@ -897,14 +897,14 @@ void ff_vc1_pred_b_mv_intfi(VC1Context *v, int n, int *dmv_x, int *dmv_y,
|
||||
|
||||
if (v->bmvtype == BMV_TYPE_DIRECT) {
|
||||
int total_opp, k, f;
|
||||
if (s->next_picture.mb_type[mb_pos + v->mb_off] != MB_TYPE_INTRA) {
|
||||
s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][0],
|
||||
if (s->next_pic.mb_type[mb_pos + v->mb_off] != MB_TYPE_INTRA) {
|
||||
s->mv[0][0][0] = scale_mv(s->next_pic.motion_val[1][s->block_index[0] + v->blocks_off][0],
|
||||
v->bfraction, 0, s->quarter_sample);
|
||||
s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][1],
|
||||
s->mv[0][0][1] = scale_mv(s->next_pic.motion_val[1][s->block_index[0] + v->blocks_off][1],
|
||||
v->bfraction, 0, s->quarter_sample);
|
||||
s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][0],
|
||||
s->mv[1][0][0] = scale_mv(s->next_pic.motion_val[1][s->block_index[0] + v->blocks_off][0],
|
||||
v->bfraction, 1, s->quarter_sample);
|
||||
s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][1],
|
||||
s->mv[1][0][1] = scale_mv(s->next_pic.motion_val[1][s->block_index[0] + v->blocks_off][1],
|
||||
v->bfraction, 1, s->quarter_sample);
|
||||
|
||||
total_opp = v->mv_f_next[0][s->block_index[0] + v->blocks_off]
|
||||
@ -919,10 +919,10 @@ void ff_vc1_pred_b_mv_intfi(VC1Context *v, int n, int *dmv_x, int *dmv_y,
|
||||
}
|
||||
v->ref_field_type[0] = v->ref_field_type[1] = v->cur_field_type ^ f;
|
||||
for (k = 0; k < 4; k++) {
|
||||
s->current_picture.motion_val[0][s->block_index[k] + v->blocks_off][0] = s->mv[0][0][0];
|
||||
s->current_picture.motion_val[0][s->block_index[k] + v->blocks_off][1] = s->mv[0][0][1];
|
||||
s->current_picture.motion_val[1][s->block_index[k] + v->blocks_off][0] = s->mv[1][0][0];
|
||||
s->current_picture.motion_val[1][s->block_index[k] + v->blocks_off][1] = s->mv[1][0][1];
|
||||
s->cur_pic.motion_val[0][s->block_index[k] + v->blocks_off][0] = s->mv[0][0][0];
|
||||
s->cur_pic.motion_val[0][s->block_index[k] + v->blocks_off][1] = s->mv[0][0][1];
|
||||
s->cur_pic.motion_val[1][s->block_index[k] + v->blocks_off][0] = s->mv[1][0][0];
|
||||
s->cur_pic.motion_val[1][s->block_index[k] + v->blocks_off][1] = s->mv[1][0][1];
|
||||
v->mv_f[0][s->block_index[k] + v->blocks_off] = f;
|
||||
v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
|
||||
}
|
||||
|
@ -235,15 +235,15 @@ static void vc1_draw_sprites(VC1Context *v, SpriteData* sd)
|
||||
v->sprite_output_frame->linesize[plane] * row;
|
||||
|
||||
for (sprite = 0; sprite <= v->two_sprites; sprite++) {
|
||||
uint8_t *iplane = s->current_picture.f->data[plane];
|
||||
int iline = s->current_picture.f->linesize[plane];
|
||||
uint8_t *iplane = s->cur_pic.f->data[plane];
|
||||
int iline = s->cur_pic.f->linesize[plane];
|
||||
int ycoord = yoff[sprite] + yadv[sprite] * row;
|
||||
int yline = ycoord >> 16;
|
||||
int next_line;
|
||||
ysub[sprite] = ycoord & 0xFFFF;
|
||||
if (sprite) {
|
||||
iplane = s->last_picture.f->data[plane];
|
||||
iline = s->last_picture.f->linesize[plane];
|
||||
iplane = s->last_pic.f->data[plane];
|
||||
iline = s->last_pic.f->linesize[plane];
|
||||
}
|
||||
next_line = FFMIN(yline + 1, (v->sprite_height >> !!plane) - 1) * iline;
|
||||
if (!(xoff[sprite] & 0xFFFF) && xadv[sprite] == 1 << 16) {
|
||||
@ -317,12 +317,12 @@ static int vc1_decode_sprites(VC1Context *v, GetBitContext* gb)
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (!s->current_picture.f || !s->current_picture.f->data[0]) {
|
||||
if (!s->cur_pic.f || !s->cur_pic.f->data[0]) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Got no sprites\n");
|
||||
return AVERROR_UNKNOWN;
|
||||
}
|
||||
|
||||
if (v->two_sprites && (!s->last_picture_ptr || !s->last_picture.f->data[0])) {
|
||||
if (v->two_sprites && (!s->last_pic_ptr || !s->last_pic.f->data[0])) {
|
||||
av_log(avctx, AV_LOG_WARNING, "Need two sprites, only got one\n");
|
||||
v->two_sprites = 0;
|
||||
}
|
||||
@ -340,7 +340,7 @@ static void vc1_sprite_flush(AVCodecContext *avctx)
|
||||
{
|
||||
VC1Context *v = avctx->priv_data;
|
||||
MpegEncContext *s = &v->s;
|
||||
AVFrame *f = s->current_picture.f;
|
||||
AVFrame *f = s->cur_pic.f;
|
||||
int plane, i;
|
||||
|
||||
/* Windows Media Image codecs have a convergence interval of two keyframes.
|
||||
@ -837,10 +837,10 @@ static int vc1_decode_frame(AVCodecContext *avctx, AVFrame *pict,
|
||||
/* no supplementary picture */
|
||||
if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == VC1_CODE_ENDOFSEQ)) {
|
||||
/* special case for last picture */
|
||||
if (s->low_delay == 0 && s->next_picture_ptr) {
|
||||
if ((ret = av_frame_ref(pict, s->next_picture_ptr->f)) < 0)
|
||||
if (s->low_delay == 0 && s->next_pic_ptr) {
|
||||
if ((ret = av_frame_ref(pict, s->next_pic_ptr->f)) < 0)
|
||||
return ret;
|
||||
s->next_picture_ptr = NULL;
|
||||
s->next_pic_ptr = NULL;
|
||||
|
||||
*got_frame = 1;
|
||||
}
|
||||
@ -1047,7 +1047,7 @@ static int vc1_decode_frame(AVCodecContext *avctx, AVFrame *pict,
|
||||
}
|
||||
|
||||
/* skip B-frames if we don't have reference frames */
|
||||
if (!s->last_picture_ptr && s->pict_type == AV_PICTURE_TYPE_B) {
|
||||
if (!s->last_pic_ptr && s->pict_type == AV_PICTURE_TYPE_B) {
|
||||
av_log(v->s.avctx, AV_LOG_DEBUG, "Skipping B frame without reference frames\n");
|
||||
goto end;
|
||||
}
|
||||
@ -1061,19 +1061,19 @@ static int vc1_decode_frame(AVCodecContext *avctx, AVFrame *pict,
|
||||
goto err;
|
||||
}
|
||||
|
||||
v->s.current_picture_ptr->field_picture = v->field_mode;
|
||||
v->s.current_picture_ptr->f->flags |= AV_FRAME_FLAG_INTERLACED * (v->fcm != PROGRESSIVE);
|
||||
v->s.current_picture_ptr->f->flags |= AV_FRAME_FLAG_TOP_FIELD_FIRST * !!v->tff;
|
||||
v->s.cur_pic_ptr->field_picture = v->field_mode;
|
||||
v->s.cur_pic_ptr->f->flags |= AV_FRAME_FLAG_INTERLACED * (v->fcm != PROGRESSIVE);
|
||||
v->s.cur_pic_ptr->f->flags |= AV_FRAME_FLAG_TOP_FIELD_FIRST * !!v->tff;
|
||||
|
||||
// process pulldown flags
|
||||
s->current_picture_ptr->f->repeat_pict = 0;
|
||||
s->cur_pic_ptr->f->repeat_pict = 0;
|
||||
// Pulldown flags are only valid when 'broadcast' has been set.
|
||||
if (v->rff) {
|
||||
// repeat field
|
||||
s->current_picture_ptr->f->repeat_pict = 1;
|
||||
s->cur_pic_ptr->f->repeat_pict = 1;
|
||||
} else if (v->rptfrm) {
|
||||
// repeat frames
|
||||
s->current_picture_ptr->f->repeat_pict = v->rptfrm * 2;
|
||||
s->cur_pic_ptr->f->repeat_pict = v->rptfrm * 2;
|
||||
}
|
||||
|
||||
if (avctx->hwaccel) {
|
||||
@ -1135,7 +1135,7 @@ static int vc1_decode_frame(AVCodecContext *avctx, AVFrame *pict,
|
||||
ret = AVERROR_INVALIDDATA;
|
||||
goto err;
|
||||
}
|
||||
v->s.current_picture_ptr->f->pict_type = v->s.pict_type;
|
||||
v->s.cur_pic_ptr->f->pict_type = v->s.pict_type;
|
||||
|
||||
ret = hwaccel->start_frame(avctx, buf_start_second_field,
|
||||
(buf + buf_size) - buf_start_second_field);
|
||||
@ -1230,9 +1230,9 @@ static int vc1_decode_frame(AVCodecContext *avctx, AVFrame *pict,
|
||||
|
||||
v->end_mb_x = s->mb_width;
|
||||
if (v->field_mode) {
|
||||
s->current_picture.f->linesize[0] <<= 1;
|
||||
s->current_picture.f->linesize[1] <<= 1;
|
||||
s->current_picture.f->linesize[2] <<= 1;
|
||||
s->cur_pic.f->linesize[0] <<= 1;
|
||||
s->cur_pic.f->linesize[1] <<= 1;
|
||||
s->cur_pic.f->linesize[2] <<= 1;
|
||||
s->linesize <<= 1;
|
||||
s->uvlinesize <<= 1;
|
||||
}
|
||||
@ -1307,9 +1307,9 @@ static int vc1_decode_frame(AVCodecContext *avctx, AVFrame *pict,
|
||||
}
|
||||
if (v->field_mode) {
|
||||
v->second_field = 0;
|
||||
s->current_picture.f->linesize[0] >>= 1;
|
||||
s->current_picture.f->linesize[1] >>= 1;
|
||||
s->current_picture.f->linesize[2] >>= 1;
|
||||
s->cur_pic.f->linesize[0] >>= 1;
|
||||
s->cur_pic.f->linesize[1] >>= 1;
|
||||
s->cur_pic.f->linesize[2] >>= 1;
|
||||
s->linesize >>= 1;
|
||||
s->uvlinesize >>= 1;
|
||||
if (v->s.pict_type != AV_PICTURE_TYPE_BI && v->s.pict_type != AV_PICTURE_TYPE_B) {
|
||||
@ -1353,16 +1353,16 @@ image:
|
||||
*got_frame = 1;
|
||||
} else {
|
||||
if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
|
||||
if ((ret = av_frame_ref(pict, s->current_picture_ptr->f)) < 0)
|
||||
if ((ret = av_frame_ref(pict, s->cur_pic_ptr->f)) < 0)
|
||||
goto err;
|
||||
if (!v->field_mode)
|
||||
ff_print_debug_info(s, s->current_picture_ptr, pict);
|
||||
ff_print_debug_info(s, s->cur_pic_ptr, pict);
|
||||
*got_frame = 1;
|
||||
} else if (s->last_picture_ptr) {
|
||||
if ((ret = av_frame_ref(pict, s->last_picture_ptr->f)) < 0)
|
||||
} else if (s->last_pic_ptr) {
|
||||
if ((ret = av_frame_ref(pict, s->last_pic_ptr->f)) < 0)
|
||||
goto err;
|
||||
if (!v->field_mode)
|
||||
ff_print_debug_info(s, s->last_picture_ptr, pict);
|
||||
ff_print_debug_info(s, s->last_pic_ptr, pict);
|
||||
*got_frame = 1;
|
||||
}
|
||||
}
|
||||
|
@ -370,7 +370,7 @@ int ff_vdpau_common_end_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||
int ff_vdpau_mpeg_end_frame(AVCodecContext *avctx)
|
||||
{
|
||||
MpegEncContext *s = avctx->priv_data;
|
||||
Picture *pic = s->current_picture_ptr;
|
||||
Picture *pic = s->cur_pic_ptr;
|
||||
struct vdpau_picture_context *pic_ctx = pic->hwaccel_picture_private;
|
||||
int val;
|
||||
|
||||
|
@ -35,7 +35,7 @@ static int vdpau_mpeg_start_frame(AVCodecContext *avctx,
|
||||
const uint8_t *buffer, uint32_t size)
|
||||
{
|
||||
MpegEncContext * const s = avctx->priv_data;
|
||||
Picture *pic = s->current_picture_ptr;
|
||||
Picture *pic = s->cur_pic_ptr;
|
||||
struct vdpau_picture_context *pic_ctx = pic->hwaccel_picture_private;
|
||||
VdpPictureInfoMPEG1Or2 *info = &pic_ctx->info.mpeg;
|
||||
VdpVideoSurface ref;
|
||||
@ -47,12 +47,12 @@ static int vdpau_mpeg_start_frame(AVCodecContext *avctx,
|
||||
|
||||
switch (s->pict_type) {
|
||||
case AV_PICTURE_TYPE_B:
|
||||
ref = ff_vdpau_get_surface_id(s->next_picture.f);
|
||||
ref = ff_vdpau_get_surface_id(s->next_pic.f);
|
||||
assert(ref != VDP_INVALID_HANDLE);
|
||||
info->backward_reference = ref;
|
||||
/* fall through to forward prediction */
|
||||
case AV_PICTURE_TYPE_P:
|
||||
ref = ff_vdpau_get_surface_id(s->last_picture.f);
|
||||
ref = ff_vdpau_get_surface_id(s->last_pic.f);
|
||||
info->forward_reference = ref;
|
||||
}
|
||||
|
||||
@ -87,7 +87,7 @@ static int vdpau_mpeg_decode_slice(AVCodecContext *avctx,
|
||||
const uint8_t *buffer, uint32_t size)
|
||||
{
|
||||
MpegEncContext * const s = avctx->priv_data;
|
||||
Picture *pic = s->current_picture_ptr;
|
||||
Picture *pic = s->cur_pic_ptr;
|
||||
struct vdpau_picture_context *pic_ctx = pic->hwaccel_picture_private;
|
||||
int val;
|
||||
|
||||
|
@ -34,7 +34,7 @@ static int vdpau_mpeg4_start_frame(AVCodecContext *avctx,
|
||||
{
|
||||
Mpeg4DecContext *ctx = avctx->priv_data;
|
||||
MpegEncContext * const s = &ctx->m;
|
||||
Picture *pic = s->current_picture_ptr;
|
||||
Picture *pic = s->cur_pic_ptr;
|
||||
struct vdpau_picture_context *pic_ctx = pic->hwaccel_picture_private;
|
||||
VdpPictureInfoMPEG4Part2 *info = &pic_ctx->info.mpeg4;
|
||||
VdpVideoSurface ref;
|
||||
@ -47,13 +47,13 @@ static int vdpau_mpeg4_start_frame(AVCodecContext *avctx,
|
||||
|
||||
switch (s->pict_type) {
|
||||
case AV_PICTURE_TYPE_B:
|
||||
ref = ff_vdpau_get_surface_id(s->next_picture.f);
|
||||
ref = ff_vdpau_get_surface_id(s->next_pic.f);
|
||||
assert(ref != VDP_INVALID_HANDLE);
|
||||
info->backward_reference = ref;
|
||||
info->vop_coding_type = 2;
|
||||
/* fall-through */
|
||||
case AV_PICTURE_TYPE_P:
|
||||
ref = ff_vdpau_get_surface_id(s->last_picture.f);
|
||||
ref = ff_vdpau_get_surface_id(s->last_pic.f);
|
||||
assert(ref != VDP_INVALID_HANDLE);
|
||||
info->forward_reference = ref;
|
||||
}
|
||||
|
@ -36,7 +36,7 @@ static int vdpau_vc1_start_frame(AVCodecContext *avctx,
|
||||
{
|
||||
VC1Context * const v = avctx->priv_data;
|
||||
MpegEncContext * const s = &v->s;
|
||||
Picture *pic = s->current_picture_ptr;
|
||||
Picture *pic = s->cur_pic_ptr;
|
||||
struct vdpau_picture_context *pic_ctx = pic->hwaccel_picture_private;
|
||||
VdpPictureInfoVC1 *info = &pic_ctx->info.vc1;
|
||||
VdpVideoSurface ref;
|
||||
@ -47,15 +47,15 @@ static int vdpau_vc1_start_frame(AVCodecContext *avctx,
|
||||
|
||||
switch (s->pict_type) {
|
||||
case AV_PICTURE_TYPE_B:
|
||||
if (s->next_picture_ptr) {
|
||||
ref = ff_vdpau_get_surface_id(s->next_picture.f);
|
||||
if (s->next_pic_ptr) {
|
||||
ref = ff_vdpau_get_surface_id(s->next_pic.f);
|
||||
assert(ref != VDP_INVALID_HANDLE);
|
||||
info->backward_reference = ref;
|
||||
}
|
||||
/* fall-through */
|
||||
case AV_PICTURE_TYPE_P:
|
||||
if (s->last_picture_ptr) {
|
||||
ref = ff_vdpau_get_surface_id(s->last_picture.f);
|
||||
if (s->last_pic_ptr) {
|
||||
ref = ff_vdpau_get_surface_id(s->last_pic.f);
|
||||
assert(ref != VDP_INVALID_HANDLE);
|
||||
info->forward_reference = ref;
|
||||
}
|
||||
@ -104,7 +104,7 @@ static int vdpau_vc1_decode_slice(AVCodecContext *avctx,
|
||||
{
|
||||
VC1Context * const v = avctx->priv_data;
|
||||
MpegEncContext * const s = &v->s;
|
||||
Picture *pic = s->current_picture_ptr;
|
||||
Picture *pic = s->cur_pic_ptr;
|
||||
struct vdpau_picture_context *pic_ctx = pic->hwaccel_picture_private;
|
||||
int val;
|
||||
|
||||
|
@ -1108,7 +1108,7 @@ static int videotoolbox_mpeg_decode_slice(AVCodecContext *avctx,
|
||||
static int videotoolbox_mpeg_end_frame(AVCodecContext *avctx)
|
||||
{
|
||||
MpegEncContext *s = avctx->priv_data;
|
||||
AVFrame *frame = s->current_picture_ptr->f;
|
||||
AVFrame *frame = s->cur_pic_ptr->f;
|
||||
|
||||
return ff_videotoolbox_common_end_frame(avctx, frame);
|
||||
}
|
||||
|
@ -103,7 +103,7 @@ static int parse_mb_skip(WMV2DecContext *w)
|
||||
int mb_x, mb_y;
|
||||
int coded_mb_count = 0;
|
||||
MpegEncContext *const s = &w->s;
|
||||
uint32_t *const mb_type = s->current_picture_ptr->mb_type;
|
||||
uint32_t *const mb_type = s->cur_pic_ptr->mb_type;
|
||||
|
||||
w->skip_type = get_bits(&s->gb, 2);
|
||||
switch (w->skip_type) {
|
||||
@ -238,8 +238,8 @@ int ff_wmv2_decode_secondary_picture_header(MpegEncContext *s)
|
||||
|
||||
if (s->pict_type == AV_PICTURE_TYPE_I) {
|
||||
/* Is filling with zeroes really the right thing to do? */
|
||||
memset(s->current_picture_ptr->mb_type, 0,
|
||||
sizeof(*s->current_picture_ptr->mb_type) *
|
||||
memset(s->cur_pic_ptr->mb_type, 0,
|
||||
sizeof(*s->cur_pic_ptr->mb_type) *
|
||||
s->mb_height * s->mb_stride);
|
||||
if (w->j_type_bit)
|
||||
w->j_type = get_bits1(&s->gb);
|
||||
@ -331,7 +331,7 @@ int ff_wmv2_decode_secondary_picture_header(MpegEncContext *s)
|
||||
s->esc3_run_length = 0;
|
||||
|
||||
if (w->j_type) {
|
||||
ff_intrax8_decode_picture(&w->x8, &s->current_picture,
|
||||
ff_intrax8_decode_picture(&w->x8, &s->cur_pic,
|
||||
&s->gb, &s->mb_x, &s->mb_y,
|
||||
2 * s->qscale, (s->qscale - 1) | 1,
|
||||
s->loop_filter, s->low_delay);
|
||||
@ -366,11 +366,11 @@ static int16_t *wmv2_pred_motion(WMV2DecContext *w, int *px, int *py)
|
||||
wrap = s->b8_stride;
|
||||
xy = s->block_index[0];
|
||||
|
||||
mot_val = s->current_picture.motion_val[0][xy];
|
||||
mot_val = s->cur_pic.motion_val[0][xy];
|
||||
|
||||
A = s->current_picture.motion_val[0][xy - 1];
|
||||
B = s->current_picture.motion_val[0][xy - wrap];
|
||||
C = s->current_picture.motion_val[0][xy + 2 - wrap];
|
||||
A = s->cur_pic.motion_val[0][xy - 1];
|
||||
B = s->cur_pic.motion_val[0][xy - wrap];
|
||||
C = s->cur_pic.motion_val[0][xy + 2 - wrap];
|
||||
|
||||
if (s->mb_x && !s->first_slice_line && !s->mspel && w->top_left_mv_flag)
|
||||
diff = FFMAX(FFABS(A[0] - B[0]), FFABS(A[1] - B[1]));
|
||||
@ -452,7 +452,7 @@ static int wmv2_decode_mb(MpegEncContext *s, int16_t block[6][64])
|
||||
return 0;
|
||||
|
||||
if (s->pict_type == AV_PICTURE_TYPE_P) {
|
||||
if (IS_SKIP(s->current_picture.mb_type[s->mb_y * s->mb_stride + s->mb_x])) {
|
||||
if (IS_SKIP(s->cur_pic.mb_type[s->mb_y * s->mb_stride + s->mb_x])) {
|
||||
/* skip mb */
|
||||
s->mb_intra = 0;
|
||||
for (i = 0; i < 6; i++)
|
||||
|
Loading…
x
Reference in New Issue
Block a user