Merge remote-tracking branch 'qatar/master'
* qatar/master: lavr: add x86-optimized functions for mixing 1-to-2 s16p with flt coeffs lavr: add x86-optimized functions for mixing 1-to-2 fltp with flt coeffs Add Dolby/DPLII downmix support to libavresample vorbisdec: replace div/mod in loop with a counter fate: vorbis: add 5.1 surround test rtpenc: Allow requesting H264 RTP packetization mode 0 configure: Sort the library listings in the help text alphabetically dwt: remove variable-length arrays RTMPT protocol support http: Properly handle chunked transfer-encoding for replies to post data http: Fail reading if the connection has gone away amr: Mark an array const amr: More space cleanup rtpenc: Fix memory leaks in the muxer open function Conflicts: Changelog configure doc/APIchanges libavformat/version.h Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
commit
82edf6727f
@ -9,6 +9,7 @@ version next:
|
|||||||
- setnsamples audio filter
|
- setnsamples audio filter
|
||||||
- atempo filter
|
- atempo filter
|
||||||
- ffprobe -show_data option
|
- ffprobe -show_data option
|
||||||
|
- RTMPT protocol support
|
||||||
|
|
||||||
|
|
||||||
version 0.11:
|
version 0.11:
|
||||||
|
10
configure
vendored
10
configure
vendored
@ -174,9 +174,6 @@ External library support:
|
|||||||
--enable-libass enable libass subtitles rendering [no]
|
--enable-libass enable libass subtitles rendering [no]
|
||||||
--enable-libbluray enable BluRay reading using libbluray [no]
|
--enable-libbluray enable BluRay reading using libbluray [no]
|
||||||
--enable-libcelt enable CELT decoding via libcelt [no]
|
--enable-libcelt enable CELT decoding via libcelt [no]
|
||||||
--enable-libopencore-amrnb enable AMR-NB de/encoding via libopencore-amrnb [no]
|
|
||||||
--enable-libopencore-amrwb enable AMR-WB decoding via libopencore-amrwb [no]
|
|
||||||
--enable-libopencv enable video filtering via libopencv [no]
|
|
||||||
--enable-libcdio enable audio CD grabbing with libcdio
|
--enable-libcdio enable audio CD grabbing with libcdio
|
||||||
--enable-libdc1394 enable IIDC-1394 grabbing using libdc1394
|
--enable-libdc1394 enable IIDC-1394 grabbing using libdc1394
|
||||||
and libraw1394 [no]
|
and libraw1394 [no]
|
||||||
@ -187,6 +184,9 @@ External library support:
|
|||||||
--enable-libmp3lame enable MP3 encoding via libmp3lame [no]
|
--enable-libmp3lame enable MP3 encoding via libmp3lame [no]
|
||||||
--enable-libnut enable NUT (de)muxing via libnut,
|
--enable-libnut enable NUT (de)muxing via libnut,
|
||||||
native (de)muxer exists [no]
|
native (de)muxer exists [no]
|
||||||
|
--enable-libopencore-amrnb enable AMR-NB de/encoding via libopencore-amrnb [no]
|
||||||
|
--enable-libopencore-amrwb enable AMR-WB decoding via libopencore-amrwb [no]
|
||||||
|
--enable-libopencv enable video filtering via libopencv [no]
|
||||||
--enable-libopenjpeg enable JPEG 2000 encoding/decoding via OpenJPEG [no]
|
--enable-libopenjpeg enable JPEG 2000 encoding/decoding via OpenJPEG [no]
|
||||||
--enable-libpulse enable Pulseaudio input via libpulse [no]
|
--enable-libpulse enable Pulseaudio input via libpulse [no]
|
||||||
--enable-librtmp enable RTMP[E] support via librtmp [no]
|
--enable-librtmp enable RTMP[E] support via librtmp [no]
|
||||||
@ -1689,6 +1689,10 @@ mmsh_protocol_select="http_protocol"
|
|||||||
mmst_protocol_deps="network"
|
mmst_protocol_deps="network"
|
||||||
rtmp_protocol_deps="!librtmp_protocol"
|
rtmp_protocol_deps="!librtmp_protocol"
|
||||||
rtmp_protocol_select="tcp_protocol"
|
rtmp_protocol_select="tcp_protocol"
|
||||||
|
rtmphttp_protocol_deps="!librtmp_protocol"
|
||||||
|
rtmphttp_protocol_select="http_protocol"
|
||||||
|
rtmpt_protocol_deps="!librtmp_protocol"
|
||||||
|
rtmpt_protocol_select="rtmphttp_protocol"
|
||||||
rtp_protocol_select="udp_protocol"
|
rtp_protocol_select="udp_protocol"
|
||||||
sctp_protocol_deps="network netinet_sctp_h"
|
sctp_protocol_deps="network netinet_sctp_h"
|
||||||
tcp_protocol_deps="network"
|
tcp_protocol_deps="network"
|
||||||
|
@ -38,6 +38,9 @@ API changes, most recent first:
|
|||||||
2012-03-26 - a67d9cf - lavfi 2.66.100
|
2012-03-26 - a67d9cf - lavfi 2.66.100
|
||||||
Add avfilter_fill_frame_from_{audio_,}buffer_ref() functions.
|
Add avfilter_fill_frame_from_{audio_,}buffer_ref() functions.
|
||||||
|
|
||||||
|
2012-xx-xx - xxxxxxx - lavr 0.0.3
|
||||||
|
Add a parameter to avresample_build_matrix() for Dolby/DPLII downmixing.
|
||||||
|
|
||||||
2012-xx-xx - xxxxxxx - lavfi 2.23.0 - avfilter.h
|
2012-xx-xx - xxxxxxx - lavfi 2.23.0 - avfilter.h
|
||||||
Add AVFilterContext.nb_inputs/outputs. Deprecate
|
Add AVFilterContext.nb_inputs/outputs. Deprecate
|
||||||
AVFilterContext.input/output_count.
|
AVFilterContext.input/output_count.
|
||||||
|
@ -273,6 +273,14 @@ For example to read with @command{ffplay} a multimedia resource named
|
|||||||
ffplay rtmp://myserver/vod/sample
|
ffplay rtmp://myserver/vod/sample
|
||||||
@end example
|
@end example
|
||||||
|
|
||||||
|
@section rtmpt
|
||||||
|
|
||||||
|
Real-Time Messaging Protocol tunneled through HTTP.
|
||||||
|
|
||||||
|
The Real-Time Messaging Protocol tunneled through HTTP (RTMPT) is used
|
||||||
|
for streaming multimedia content within HTTP requests to traverse
|
||||||
|
firewalls.
|
||||||
|
|
||||||
@section rtmp, rtmpe, rtmps, rtmpt, rtmpte
|
@section rtmp, rtmpe, rtmps, rtmpt, rtmpte
|
||||||
|
|
||||||
Real-Time Messaging Protocol and its variants supported through
|
Real-Time Messaging Protocol and its variants supported through
|
||||||
|
109
libavcodec/dwt.c
109
libavcodec/dwt.c
@ -245,9 +245,8 @@ static av_always_inline void inv_liftS(IDWTELEM *dst, IDWTELEM *src,
|
|||||||
}
|
}
|
||||||
#endif /* ! liftS */
|
#endif /* ! liftS */
|
||||||
|
|
||||||
static void horizontal_decompose53i(DWTELEM *b, int width)
|
static void horizontal_decompose53i(DWTELEM *b, DWTELEM *temp, int width)
|
||||||
{
|
{
|
||||||
DWTELEM temp[width];
|
|
||||||
const int width2 = width >> 1;
|
const int width2 = width >> 1;
|
||||||
int x;
|
int x;
|
||||||
const int w2 = (width + 1) >> 1;
|
const int w2 = (width + 1) >> 1;
|
||||||
@ -313,8 +312,8 @@ static void vertical_decompose53iL0(DWTELEM *b0, DWTELEM *b1, DWTELEM *b2,
|
|||||||
b1[i] += (b0[i] + b2[i] + 2) >> 2;
|
b1[i] += (b0[i] + b2[i] + 2) >> 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void spatial_decompose53i(DWTELEM *buffer, int width, int height,
|
static void spatial_decompose53i(DWTELEM *buffer, DWTELEM *temp,
|
||||||
int stride)
|
int width, int height, int stride)
|
||||||
{
|
{
|
||||||
int y;
|
int y;
|
||||||
DWTELEM *b0 = buffer + mirror(-2 - 1, height - 1) * stride;
|
DWTELEM *b0 = buffer + mirror(-2 - 1, height - 1) * stride;
|
||||||
@ -325,9 +324,9 @@ static void spatial_decompose53i(DWTELEM *buffer, int width, int height,
|
|||||||
DWTELEM *b3 = buffer + mirror(y + 2, height - 1) * stride;
|
DWTELEM *b3 = buffer + mirror(y + 2, height - 1) * stride;
|
||||||
|
|
||||||
if (y + 1 < (unsigned)height)
|
if (y + 1 < (unsigned)height)
|
||||||
horizontal_decompose53i(b2, width);
|
horizontal_decompose53i(b2, temp, width);
|
||||||
if (y + 2 < (unsigned)height)
|
if (y + 2 < (unsigned)height)
|
||||||
horizontal_decompose53i(b3, width);
|
horizontal_decompose53i(b3, temp, width);
|
||||||
|
|
||||||
if (y + 1 < (unsigned)height)
|
if (y + 1 < (unsigned)height)
|
||||||
vertical_decompose53iH0(b1, b2, b3, width);
|
vertical_decompose53iH0(b1, b2, b3, width);
|
||||||
@ -339,9 +338,8 @@ static void spatial_decompose53i(DWTELEM *buffer, int width, int height,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void horizontal_decompose97i(DWTELEM *b, int width)
|
static void horizontal_decompose97i(DWTELEM *b, DWTELEM *temp, int width)
|
||||||
{
|
{
|
||||||
DWTELEM temp[width];
|
|
||||||
const int w2 = (width + 1) >> 1;
|
const int w2 = (width + 1) >> 1;
|
||||||
|
|
||||||
lift(temp + w2, b + 1, b, 1, 2, 2, width, W_AM, W_AO, W_AS, 1, 1);
|
lift(temp + w2, b + 1, b, 1, 2, 2, width, W_AM, W_AO, W_AS, 1, 1);
|
||||||
@ -391,8 +389,8 @@ static void vertical_decompose97iL1(DWTELEM *b0, DWTELEM *b1, DWTELEM *b2,
|
|||||||
b1[i] += (W_DM * (b0[i] + b2[i]) + W_DO) >> W_DS;
|
b1[i] += (W_DM * (b0[i] + b2[i]) + W_DO) >> W_DS;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void spatial_decompose97i(DWTELEM *buffer, int width, int height,
|
static void spatial_decompose97i(DWTELEM *buffer, DWTELEM *temp,
|
||||||
int stride)
|
int width, int height, int stride)
|
||||||
{
|
{
|
||||||
int y;
|
int y;
|
||||||
DWTELEM *b0 = buffer + mirror(-4 - 1, height - 1) * stride;
|
DWTELEM *b0 = buffer + mirror(-4 - 1, height - 1) * stride;
|
||||||
@ -405,9 +403,9 @@ static void spatial_decompose97i(DWTELEM *buffer, int width, int height,
|
|||||||
DWTELEM *b5 = buffer + mirror(y + 4, height - 1) * stride;
|
DWTELEM *b5 = buffer + mirror(y + 4, height - 1) * stride;
|
||||||
|
|
||||||
if (y + 3 < (unsigned)height)
|
if (y + 3 < (unsigned)height)
|
||||||
horizontal_decompose97i(b4, width);
|
horizontal_decompose97i(b4, temp, width);
|
||||||
if (y + 4 < (unsigned)height)
|
if (y + 4 < (unsigned)height)
|
||||||
horizontal_decompose97i(b5, width);
|
horizontal_decompose97i(b5, temp, width);
|
||||||
|
|
||||||
if (y + 3 < (unsigned)height)
|
if (y + 3 < (unsigned)height)
|
||||||
vertical_decompose97iH0(b3, b4, b5, width);
|
vertical_decompose97iH0(b3, b4, b5, width);
|
||||||
@ -425,20 +423,20 @@ static void spatial_decompose97i(DWTELEM *buffer, int width, int height,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void ff_spatial_dwt(DWTELEM *buffer, int width, int height, int stride,
|
void ff_spatial_dwt(DWTELEM *buffer, DWTELEM *temp, int width, int height,
|
||||||
int type, int decomposition_count)
|
int stride, int type, int decomposition_count)
|
||||||
{
|
{
|
||||||
int level;
|
int level;
|
||||||
|
|
||||||
for (level = 0; level < decomposition_count; level++) {
|
for (level = 0; level < decomposition_count; level++) {
|
||||||
switch (type) {
|
switch (type) {
|
||||||
case DWT_97:
|
case DWT_97:
|
||||||
spatial_decompose97i(buffer,
|
spatial_decompose97i(buffer, temp,
|
||||||
width >> level, height >> level,
|
width >> level, height >> level,
|
||||||
stride << level);
|
stride << level);
|
||||||
break;
|
break;
|
||||||
case DWT_53:
|
case DWT_53:
|
||||||
spatial_decompose53i(buffer,
|
spatial_decompose53i(buffer, temp,
|
||||||
width >> level, height >> level,
|
width >> level, height >> level,
|
||||||
stride << level);
|
stride << level);
|
||||||
break;
|
break;
|
||||||
@ -446,9 +444,8 @@ void ff_spatial_dwt(DWTELEM *buffer, int width, int height, int stride,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void horizontal_compose53i(IDWTELEM *b, int width)
|
static void horizontal_compose53i(IDWTELEM *b, IDWTELEM *temp, int width)
|
||||||
{
|
{
|
||||||
IDWTELEM temp[width];
|
|
||||||
const int width2 = width >> 1;
|
const int width2 = width >> 1;
|
||||||
const int w2 = (width + 1) >> 1;
|
const int w2 = (width + 1) >> 1;
|
||||||
int x;
|
int x;
|
||||||
@ -508,6 +505,7 @@ static void spatial_compose53i_init(DWTCompose *cs, IDWTELEM *buffer,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void spatial_compose53i_dy_buffered(DWTCompose *cs, slice_buffer *sb,
|
static void spatial_compose53i_dy_buffered(DWTCompose *cs, slice_buffer *sb,
|
||||||
|
IDWTELEM *temp,
|
||||||
int width, int height,
|
int width, int height,
|
||||||
int stride_line)
|
int stride_line)
|
||||||
{
|
{
|
||||||
@ -537,17 +535,18 @@ static void spatial_compose53i_dy_buffered(DWTCompose *cs, slice_buffer *sb,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (y - 1 < (unsigned)height)
|
if (y - 1 < (unsigned)height)
|
||||||
horizontal_compose53i(b0, width);
|
horizontal_compose53i(b0, temp, width);
|
||||||
if (y + 0 < (unsigned)height)
|
if (y + 0 < (unsigned)height)
|
||||||
horizontal_compose53i(b1, width);
|
horizontal_compose53i(b1, temp, width);
|
||||||
|
|
||||||
cs->b0 = b2;
|
cs->b0 = b2;
|
||||||
cs->b1 = b3;
|
cs->b1 = b3;
|
||||||
cs->y += 2;
|
cs->y += 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void spatial_compose53i_dy(DWTCompose *cs, IDWTELEM *buffer, int width,
|
static void spatial_compose53i_dy(DWTCompose *cs, IDWTELEM *buffer,
|
||||||
int height, int stride)
|
IDWTELEM *temp, int width, int height,
|
||||||
|
int stride)
|
||||||
{
|
{
|
||||||
int y = cs->y;
|
int y = cs->y;
|
||||||
IDWTELEM *b0 = cs->b0;
|
IDWTELEM *b0 = cs->b0;
|
||||||
@ -561,27 +560,26 @@ static void spatial_compose53i_dy(DWTCompose *cs, IDWTELEM *buffer, int width,
|
|||||||
vertical_compose53iH0(b0, b1, b2, width);
|
vertical_compose53iH0(b0, b1, b2, width);
|
||||||
|
|
||||||
if (y - 1 < (unsigned)height)
|
if (y - 1 < (unsigned)height)
|
||||||
horizontal_compose53i(b0, width);
|
horizontal_compose53i(b0, temp, width);
|
||||||
if (y + 0 < (unsigned)height)
|
if (y + 0 < (unsigned)height)
|
||||||
horizontal_compose53i(b1, width);
|
horizontal_compose53i(b1, temp, width);
|
||||||
|
|
||||||
cs->b0 = b2;
|
cs->b0 = b2;
|
||||||
cs->b1 = b3;
|
cs->b1 = b3;
|
||||||
cs->y += 2;
|
cs->y += 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void av_unused spatial_compose53i(IDWTELEM *buffer, int width,
|
static void av_unused spatial_compose53i(IDWTELEM *buffer, IDWTELEM *temp,
|
||||||
int height, int stride)
|
int width, int height, int stride)
|
||||||
{
|
{
|
||||||
DWTCompose cs;
|
DWTCompose cs;
|
||||||
spatial_compose53i_init(&cs, buffer, height, stride);
|
spatial_compose53i_init(&cs, buffer, height, stride);
|
||||||
while (cs.y <= height)
|
while (cs.y <= height)
|
||||||
spatial_compose53i_dy(&cs, buffer, width, height, stride);
|
spatial_compose53i_dy(&cs, buffer, temp, width, height, stride);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ff_snow_horizontal_compose97i(IDWTELEM *b, int width)
|
void ff_snow_horizontal_compose97i(IDWTELEM *b, IDWTELEM *temp, int width)
|
||||||
{
|
{
|
||||||
IDWTELEM temp[width];
|
|
||||||
const int w2 = (width + 1) >> 1;
|
const int w2 = (width + 1) >> 1;
|
||||||
|
|
||||||
#if 0 //maybe more understadable but slower
|
#if 0 //maybe more understadable but slower
|
||||||
@ -695,8 +693,9 @@ static void spatial_compose97i_init(DWTCompose *cs, IDWTELEM *buffer, int height
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void spatial_compose97i_dy_buffered(DWTContext *dsp, DWTCompose *cs,
|
static void spatial_compose97i_dy_buffered(DWTContext *dsp, DWTCompose *cs,
|
||||||
slice_buffer *sb, int width,
|
slice_buffer * sb, IDWTELEM *temp,
|
||||||
int height, int stride_line)
|
int width, int height,
|
||||||
|
int stride_line)
|
||||||
{
|
{
|
||||||
int y = cs->y;
|
int y = cs->y;
|
||||||
|
|
||||||
@ -725,9 +724,9 @@ static void spatial_compose97i_dy_buffered(DWTContext *dsp, DWTCompose *cs,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (y - 1 < (unsigned)height)
|
if (y - 1 < (unsigned)height)
|
||||||
dsp->horizontal_compose97i(b0, width);
|
dsp->horizontal_compose97i(b0, temp, width);
|
||||||
if (y + 0 < (unsigned)height)
|
if (y + 0 < (unsigned)height)
|
||||||
dsp->horizontal_compose97i(b1, width);
|
dsp->horizontal_compose97i(b1, temp, width);
|
||||||
|
|
||||||
cs->b0 = b2;
|
cs->b0 = b2;
|
||||||
cs->b1 = b3;
|
cs->b1 = b3;
|
||||||
@ -736,8 +735,9 @@ static void spatial_compose97i_dy_buffered(DWTContext *dsp, DWTCompose *cs,
|
|||||||
cs->y += 2;
|
cs->y += 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void spatial_compose97i_dy(DWTCompose *cs, IDWTELEM *buffer, int width,
|
static void spatial_compose97i_dy(DWTCompose *cs, IDWTELEM *buffer,
|
||||||
int height, int stride)
|
IDWTELEM *temp, int width, int height,
|
||||||
|
int stride)
|
||||||
{
|
{
|
||||||
int y = cs->y;
|
int y = cs->y;
|
||||||
IDWTELEM *b0 = cs->b0;
|
IDWTELEM *b0 = cs->b0;
|
||||||
@ -757,9 +757,9 @@ static void spatial_compose97i_dy(DWTCompose *cs, IDWTELEM *buffer, int width,
|
|||||||
vertical_compose97iH0(b0, b1, b2, width);
|
vertical_compose97iH0(b0, b1, b2, width);
|
||||||
|
|
||||||
if (y - 1 < (unsigned)height)
|
if (y - 1 < (unsigned)height)
|
||||||
ff_snow_horizontal_compose97i(b0, width);
|
ff_snow_horizontal_compose97i(b0, temp, width);
|
||||||
if (y + 0 < (unsigned)height)
|
if (y + 0 < (unsigned)height)
|
||||||
ff_snow_horizontal_compose97i(b1, width);
|
ff_snow_horizontal_compose97i(b1, temp, width);
|
||||||
|
|
||||||
cs->b0 = b2;
|
cs->b0 = b2;
|
||||||
cs->b1 = b3;
|
cs->b1 = b3;
|
||||||
@ -768,13 +768,13 @@ static void spatial_compose97i_dy(DWTCompose *cs, IDWTELEM *buffer, int width,
|
|||||||
cs->y += 2;
|
cs->y += 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void av_unused spatial_compose97i(IDWTELEM *buffer, int width,
|
static void av_unused spatial_compose97i(IDWTELEM *buffer, IDWTELEM *temp,
|
||||||
int height, int stride)
|
int width, int height, int stride)
|
||||||
{
|
{
|
||||||
DWTCompose cs;
|
DWTCompose cs;
|
||||||
spatial_compose97i_init(&cs, buffer, height, stride);
|
spatial_compose97i_init(&cs, buffer, height, stride);
|
||||||
while (cs.y <= height)
|
while (cs.y <= height)
|
||||||
spatial_compose97i_dy(&cs, buffer, width, height, stride);
|
spatial_compose97i_dy(&cs, buffer, temp, width, height, stride);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ff_spatial_idwt_buffered_init(DWTCompose *cs, slice_buffer *sb, int width,
|
void ff_spatial_idwt_buffered_init(DWTCompose *cs, slice_buffer *sb, int width,
|
||||||
@ -797,9 +797,9 @@ void ff_spatial_idwt_buffered_init(DWTCompose *cs, slice_buffer *sb, int width,
|
|||||||
}
|
}
|
||||||
|
|
||||||
void ff_spatial_idwt_buffered_slice(DWTContext *dsp, DWTCompose *cs,
|
void ff_spatial_idwt_buffered_slice(DWTContext *dsp, DWTCompose *cs,
|
||||||
slice_buffer *slice_buf, int width,
|
slice_buffer *slice_buf, IDWTELEM *temp,
|
||||||
int height, int stride_line, int type,
|
int width, int height, int stride_line,
|
||||||
int decomposition_count, int y)
|
int type, int decomposition_count, int y)
|
||||||
{
|
{
|
||||||
const int support = type == 1 ? 3 : 5;
|
const int support = type == 1 ? 3 : 5;
|
||||||
int level;
|
int level;
|
||||||
@ -810,13 +810,13 @@ void ff_spatial_idwt_buffered_slice(DWTContext *dsp, DWTCompose *cs,
|
|||||||
while (cs[level].y <= FFMIN((y >> level) + support, height >> level)) {
|
while (cs[level].y <= FFMIN((y >> level) + support, height >> level)) {
|
||||||
switch (type) {
|
switch (type) {
|
||||||
case DWT_97:
|
case DWT_97:
|
||||||
spatial_compose97i_dy_buffered(dsp, cs + level, slice_buf,
|
spatial_compose97i_dy_buffered(dsp, cs + level, slice_buf, temp,
|
||||||
width >> level,
|
width >> level,
|
||||||
height >> level,
|
height >> level,
|
||||||
stride_line << level);
|
stride_line << level);
|
||||||
break;
|
break;
|
||||||
case DWT_53:
|
case DWT_53:
|
||||||
spatial_compose53i_dy_buffered(cs + level, slice_buf,
|
spatial_compose53i_dy_buffered(cs + level, slice_buf, temp,
|
||||||
width >> level,
|
width >> level,
|
||||||
height >> level,
|
height >> level,
|
||||||
stride_line << level);
|
stride_line << level);
|
||||||
@ -844,8 +844,9 @@ static void ff_spatial_idwt_init(DWTCompose *cs, IDWTELEM *buffer, int width,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ff_spatial_idwt_slice(DWTCompose *cs, IDWTELEM *buffer, int width,
|
static void ff_spatial_idwt_slice(DWTCompose *cs, IDWTELEM *buffer,
|
||||||
int height, int stride, int type,
|
IDWTELEM *temp, int width, int height,
|
||||||
|
int stride, int type,
|
||||||
int decomposition_count, int y)
|
int decomposition_count, int y)
|
||||||
{
|
{
|
||||||
const int support = type == 1 ? 3 : 5;
|
const int support = type == 1 ? 3 : 5;
|
||||||
@ -857,26 +858,26 @@ static void ff_spatial_idwt_slice(DWTCompose *cs, IDWTELEM *buffer, int width,
|
|||||||
while (cs[level].y <= FFMIN((y >> level) + support, height >> level)) {
|
while (cs[level].y <= FFMIN((y >> level) + support, height >> level)) {
|
||||||
switch (type) {
|
switch (type) {
|
||||||
case DWT_97:
|
case DWT_97:
|
||||||
spatial_compose97i_dy(cs + level, buffer, width >> level,
|
spatial_compose97i_dy(cs + level, buffer, temp, width >> level,
|
||||||
height >> level, stride << level);
|
height >> level, stride << level);
|
||||||
break;
|
break;
|
||||||
case DWT_53:
|
case DWT_53:
|
||||||
spatial_compose53i_dy(cs + level, buffer, width >> level,
|
spatial_compose53i_dy(cs + level, buffer, temp, width >> level,
|
||||||
height >> level, stride << level);
|
height >> level, stride << level);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void ff_spatial_idwt(IDWTELEM *buffer, int width, int height, int stride,
|
void ff_spatial_idwt(IDWTELEM *buffer, IDWTELEM *temp, int width, int height,
|
||||||
int type, int decomposition_count)
|
int stride, int type, int decomposition_count)
|
||||||
{
|
{
|
||||||
DWTCompose cs[MAX_DECOMPOSITIONS];
|
DWTCompose cs[MAX_DECOMPOSITIONS];
|
||||||
int y;
|
int y;
|
||||||
ff_spatial_idwt_init(cs, buffer, width, height, stride, type,
|
ff_spatial_idwt_init(cs, buffer, width, height, stride, type,
|
||||||
decomposition_count);
|
decomposition_count);
|
||||||
for (y = 0; y < height; y += 4)
|
for (y = 0; y < height; y += 4)
|
||||||
ff_spatial_idwt_slice(cs, buffer, width, height, stride, type,
|
ff_spatial_idwt_slice(cs, buffer, temp, width, height, stride, type,
|
||||||
decomposition_count, y);
|
decomposition_count, y);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -885,7 +886,7 @@ static inline int w_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size,
|
|||||||
{
|
{
|
||||||
int s, i, j;
|
int s, i, j;
|
||||||
const int dec_count = w == 8 ? 3 : 4;
|
const int dec_count = w == 8 ? 3 : 4;
|
||||||
int tmp[32 * 32];
|
int tmp[32 * 32], tmp2[32];
|
||||||
int level, ori;
|
int level, ori;
|
||||||
static const int scale[2][2][4][4] = {
|
static const int scale[2][2][4][4] = {
|
||||||
{
|
{
|
||||||
@ -927,7 +928,7 @@ static inline int w_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size,
|
|||||||
pix2 += line_size;
|
pix2 += line_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
ff_spatial_dwt(tmp, w, h, 32, type, dec_count);
|
ff_spatial_dwt(tmp, tmp2, w, h, 32, type, dec_count);
|
||||||
|
|
||||||
s = 0;
|
s = 0;
|
||||||
assert(w == h);
|
assert(w == h);
|
||||||
|
@ -79,7 +79,7 @@ typedef struct DWTContext {
|
|||||||
void (*vertical_compose97i)(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2,
|
void (*vertical_compose97i)(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2,
|
||||||
IDWTELEM *b3, IDWTELEM *b4, IDWTELEM *b5,
|
IDWTELEM *b3, IDWTELEM *b4, IDWTELEM *b5,
|
||||||
int width);
|
int width);
|
||||||
void (*horizontal_compose97i)(IDWTELEM *b, int width);
|
void (*horizontal_compose97i)(IDWTELEM *b, IDWTELEM *temp, int width);
|
||||||
void (*inner_add_yblock)(const uint8_t *obmc, const int obmc_stride,
|
void (*inner_add_yblock)(const uint8_t *obmc, const int obmc_stride,
|
||||||
uint8_t **block, int b_w, int b_h, int src_x,
|
uint8_t **block, int b_w, int b_h, int src_x,
|
||||||
int src_y, int src_stride, slice_buffer *sb,
|
int src_y, int src_stride, slice_buffer *sb,
|
||||||
@ -239,7 +239,7 @@ IDWTELEM *ff_slice_buffer_load_line(slice_buffer *buf, int line);
|
|||||||
void ff_snow_vertical_compose97i(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2,
|
void ff_snow_vertical_compose97i(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2,
|
||||||
IDWTELEM *b3, IDWTELEM *b4, IDWTELEM *b5,
|
IDWTELEM *b3, IDWTELEM *b4, IDWTELEM *b5,
|
||||||
int width);
|
int width);
|
||||||
void ff_snow_horizontal_compose97i(IDWTELEM *b, int width);
|
void ff_snow_horizontal_compose97i(IDWTELEM *b, IDWTELEM *temp, int width);
|
||||||
void ff_snow_inner_add_yblock(const uint8_t *obmc, const int obmc_stride,
|
void ff_snow_inner_add_yblock(const uint8_t *obmc, const int obmc_stride,
|
||||||
uint8_t **block, int b_w, int b_h, int src_x,
|
uint8_t **block, int b_w, int b_h, int src_x,
|
||||||
int src_y, int src_stride, slice_buffer *sb,
|
int src_y, int src_stride, slice_buffer *sb,
|
||||||
@ -248,18 +248,18 @@ void ff_snow_inner_add_yblock(const uint8_t *obmc, const int obmc_stride,
|
|||||||
int ff_w53_32_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h);
|
int ff_w53_32_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h);
|
||||||
int ff_w97_32_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h);
|
int ff_w97_32_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h);
|
||||||
|
|
||||||
void ff_spatial_dwt(int *buffer, int width, int height, int stride, int type,
|
void ff_spatial_dwt(int *buffer, int *temp, int width, int height, int stride,
|
||||||
int decomposition_count);
|
int type, int decomposition_count);
|
||||||
|
|
||||||
void ff_spatial_idwt_buffered_init(DWTCompose *cs, slice_buffer *sb, int width,
|
void ff_spatial_idwt_buffered_init(DWTCompose *cs, slice_buffer *sb, int width,
|
||||||
int height, int stride_line, int type,
|
int height, int stride_line, int type,
|
||||||
int decomposition_count);
|
int decomposition_count);
|
||||||
void ff_spatial_idwt_buffered_slice(DWTContext *dsp, DWTCompose *cs,
|
void ff_spatial_idwt_buffered_slice(DWTContext *dsp, DWTCompose *cs,
|
||||||
slice_buffer *slice_buf, int width,
|
slice_buffer *slice_buf, IDWTELEM *temp,
|
||||||
int height, int stride_line, int type,
|
int width, int height, int stride_line,
|
||||||
int decomposition_count, int y);
|
int type, int decomposition_count, int y);
|
||||||
void ff_spatial_idwt(IDWTELEM *buffer, int width, int height, int stride,
|
void ff_spatial_idwt(IDWTELEM *buffer, IDWTELEM *temp, int width, int height,
|
||||||
int type, int decomposition_count);
|
int stride, int type, int decomposition_count);
|
||||||
|
|
||||||
void ff_dwt_init(DWTContext *c);
|
void ff_dwt_init(DWTContext *c);
|
||||||
void ff_dwt_init_x86(DWTContext *c);
|
void ff_dwt_init_x86(DWTContext *c);
|
||||||
|
@ -440,6 +440,8 @@ av_cold int ff_snow_common_init(AVCodecContext *avctx){
|
|||||||
|
|
||||||
s->spatial_idwt_buffer= av_mallocz(width*height*sizeof(IDWTELEM));
|
s->spatial_idwt_buffer= av_mallocz(width*height*sizeof(IDWTELEM));
|
||||||
s->spatial_dwt_buffer= av_mallocz(width*height*sizeof(DWTELEM)); //FIXME this does not belong here
|
s->spatial_dwt_buffer= av_mallocz(width*height*sizeof(DWTELEM)); //FIXME this does not belong here
|
||||||
|
s->temp_dwt_buffer = av_mallocz(width * sizeof(DWTELEM));
|
||||||
|
s->temp_idwt_buffer = av_mallocz(width * sizeof(IDWTELEM));
|
||||||
|
|
||||||
for(i=0; i<MAX_REF_FRAMES; i++)
|
for(i=0; i<MAX_REF_FRAMES; i++)
|
||||||
for(j=0; j<MAX_REF_FRAMES; j++)
|
for(j=0; j<MAX_REF_FRAMES; j++)
|
||||||
@ -618,7 +620,9 @@ av_cold void ff_snow_common_end(SnowContext *s)
|
|||||||
int plane_index, level, orientation, i;
|
int plane_index, level, orientation, i;
|
||||||
|
|
||||||
av_freep(&s->spatial_dwt_buffer);
|
av_freep(&s->spatial_dwt_buffer);
|
||||||
|
av_freep(&s->temp_dwt_buffer);
|
||||||
av_freep(&s->spatial_idwt_buffer);
|
av_freep(&s->spatial_idwt_buffer);
|
||||||
|
av_freep(&s->temp_idwt_buffer);
|
||||||
|
|
||||||
s->m.me.temp= NULL;
|
s->m.me.temp= NULL;
|
||||||
av_freep(&s->m.me.scratchpad);
|
av_freep(&s->m.me.scratchpad);
|
||||||
|
@ -132,7 +132,9 @@ typedef struct SnowContext{
|
|||||||
int16_t (*ref_mvs[MAX_REF_FRAMES])[2];
|
int16_t (*ref_mvs[MAX_REF_FRAMES])[2];
|
||||||
uint32_t *ref_scores[MAX_REF_FRAMES];
|
uint32_t *ref_scores[MAX_REF_FRAMES];
|
||||||
DWTELEM *spatial_dwt_buffer;
|
DWTELEM *spatial_dwt_buffer;
|
||||||
|
DWTELEM *temp_dwt_buffer;
|
||||||
IDWTELEM *spatial_idwt_buffer;
|
IDWTELEM *spatial_idwt_buffer;
|
||||||
|
IDWTELEM *temp_idwt_buffer;
|
||||||
int colorspace_type;
|
int colorspace_type;
|
||||||
int chroma_h_shift;
|
int chroma_h_shift;
|
||||||
int chroma_v_shift;
|
int chroma_v_shift;
|
||||||
|
@ -502,7 +502,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
|
|||||||
}
|
}
|
||||||
|
|
||||||
for(; yd<slice_h; yd+=4){
|
for(; yd<slice_h; yd+=4){
|
||||||
ff_spatial_idwt_buffered_slice(&s->dwt, cs, &s->sb, w, h, 1, s->spatial_decomposition_type, s->spatial_decomposition_count, yd);
|
ff_spatial_idwt_buffered_slice(&s->dwt, cs, &s->sb, s->temp_idwt_buffer, w, h, 1, s->spatial_decomposition_type, s->spatial_decomposition_count, yd);
|
||||||
}
|
}
|
||||||
|
|
||||||
if(s->qlog == LOSSLESS_QLOG){
|
if(s->qlog == LOSSLESS_QLOG){
|
||||||
|
@ -94,7 +94,7 @@ static void dwt_quantize(SnowContext *s, Plane *p, DWTELEM *buffer, int width, i
|
|||||||
//FIXME pass the copy cleanly ?
|
//FIXME pass the copy cleanly ?
|
||||||
|
|
||||||
// memcpy(dwt_buffer, buffer, height * stride * sizeof(DWTELEM));
|
// memcpy(dwt_buffer, buffer, height * stride * sizeof(DWTELEM));
|
||||||
ff_spatial_dwt(buffer, width, height, stride, type, s->spatial_decomposition_count);
|
ff_spatial_dwt(buffer, s->temp_dwt_buffer, width, height, stride, type, s->spatial_decomposition_count);
|
||||||
|
|
||||||
for(level=0; level<s->spatial_decomposition_count; level++){
|
for(level=0; level<s->spatial_decomposition_count; level++){
|
||||||
for(orientation=level ? 1 : 0; orientation<4; orientation++){
|
for(orientation=level ? 1 : 0; orientation<4; orientation++){
|
||||||
@ -119,7 +119,7 @@ static void dwt_quantize(SnowContext *s, Plane *p, DWTELEM *buffer, int width, i
|
|||||||
for(xs= 0; xs<Q2_STEP; xs++){
|
for(xs= 0; xs<Q2_STEP; xs++){
|
||||||
memcpy(idwt2_buffer, best_dequant, height * stride * sizeof(IDWTELEM));
|
memcpy(idwt2_buffer, best_dequant, height * stride * sizeof(IDWTELEM));
|
||||||
dequantize_all(s, p, idwt2_buffer, width, height);
|
dequantize_all(s, p, idwt2_buffer, width, height);
|
||||||
ff_spatial_idwt(idwt2_buffer, width, height, stride, type, s->spatial_decomposition_count);
|
ff_spatial_idwt(idwt2_buffer, s->temp_idwt_buffer, width, height, stride, type, s->spatial_decomposition_count);
|
||||||
find_sse(s, p, best_score, score_stride, idwt2_buffer, s->spatial_idwt_buffer, level, orientation);
|
find_sse(s, p, best_score, score_stride, idwt2_buffer, s->spatial_idwt_buffer, level, orientation);
|
||||||
memcpy(idwt2_buffer, best_dequant, height * stride * sizeof(IDWTELEM));
|
memcpy(idwt2_buffer, best_dequant, height * stride * sizeof(IDWTELEM));
|
||||||
for(y=ys; y<b->height; y+= Q2_STEP){
|
for(y=ys; y<b->height; y+= Q2_STEP){
|
||||||
@ -130,7 +130,7 @@ static void dwt_quantize(SnowContext *s, Plane *p, DWTELEM *buffer, int width, i
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
dequantize_all(s, p, idwt2_buffer, width, height);
|
dequantize_all(s, p, idwt2_buffer, width, height);
|
||||||
ff_spatial_idwt(idwt2_buffer, width, height, stride, type, s->spatial_decomposition_count);
|
ff_spatial_idwt(idwt2_buffer, s->temp_idwt_buffer, width, height, stride, type, s->spatial_decomposition_count);
|
||||||
find_sse(s, p, score, score_stride, idwt2_buffer, s->spatial_idwt_buffer, level, orientation);
|
find_sse(s, p, score, score_stride, idwt2_buffer, s->spatial_idwt_buffer, level, orientation);
|
||||||
for(y=ys; y<b->height; y+= Q2_STEP){
|
for(y=ys; y<b->height; y+= Q2_STEP){
|
||||||
for(x=xs; x<b->width; x+= Q2_STEP){
|
for(x=xs; x<b->width; x+= Q2_STEP){
|
||||||
@ -1588,7 +1588,7 @@ static void calculate_visual_weight(SnowContext *s, Plane *p){
|
|||||||
|
|
||||||
memset(s->spatial_idwt_buffer, 0, sizeof(*s->spatial_idwt_buffer)*width*height);
|
memset(s->spatial_idwt_buffer, 0, sizeof(*s->spatial_idwt_buffer)*width*height);
|
||||||
ibuf[b->width/2 + b->height/2*b->stride]= 256*16;
|
ibuf[b->width/2 + b->height/2*b->stride]= 256*16;
|
||||||
ff_spatial_idwt(s->spatial_idwt_buffer, width, height, width, s->spatial_decomposition_type, s->spatial_decomposition_count);
|
ff_spatial_idwt(s->spatial_idwt_buffer, s->temp_idwt_buffer, width, height, width, s->spatial_decomposition_type, s->spatial_decomposition_count);
|
||||||
for(y=0; y<height; y++){
|
for(y=0; y<height; y++){
|
||||||
for(x=0; x<width; x++){
|
for(x=0; x<width; x++){
|
||||||
int64_t d= s->spatial_idwt_buffer[x + y*width]*16;
|
int64_t d= s->spatial_idwt_buffer[x + y*width]*16;
|
||||||
@ -1778,7 +1778,7 @@ redo_frame:
|
|||||||
/* if(QUANTIZE2)
|
/* if(QUANTIZE2)
|
||||||
dwt_quantize(s, p, s->spatial_dwt_buffer, w, h, w, s->spatial_decomposition_type);
|
dwt_quantize(s, p, s->spatial_dwt_buffer, w, h, w, s->spatial_decomposition_type);
|
||||||
else*/
|
else*/
|
||||||
ff_spatial_dwt(s->spatial_dwt_buffer, w, h, w, s->spatial_decomposition_type, s->spatial_decomposition_count);
|
ff_spatial_dwt(s->spatial_dwt_buffer, s->temp_dwt_buffer, w, h, w, s->spatial_decomposition_type, s->spatial_decomposition_count);
|
||||||
|
|
||||||
if(s->pass1_rc && plane_index==0){
|
if(s->pass1_rc && plane_index==0){
|
||||||
int delta_qlog = ratecontrol_1pass(s, pic);
|
int delta_qlog = ratecontrol_1pass(s, pic);
|
||||||
@ -1818,7 +1818,7 @@ redo_frame:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ff_spatial_idwt(s->spatial_idwt_buffer, w, h, w, s->spatial_decomposition_type, s->spatial_decomposition_count);
|
ff_spatial_idwt(s->spatial_idwt_buffer, s->temp_idwt_buffer, w, h, w, s->spatial_decomposition_type, s->spatial_decomposition_count);
|
||||||
if(s->qlog == LOSSLESS_QLOG){
|
if(s->qlog == LOSSLESS_QLOG){
|
||||||
for(y=0; y<h; y++){
|
for(y=0; y<h; y++){
|
||||||
for(x=0; x<w; x++){
|
for(x=0; x<w; x++){
|
||||||
|
@ -1413,17 +1413,24 @@ static av_always_inline int vorbis_residue_decode_internal(vorbis_context *vc,
|
|||||||
}
|
}
|
||||||
|
|
||||||
} else if (vr_type == 2) {
|
} else if (vr_type == 2) {
|
||||||
voffs = voffset;
|
unsigned voffs_div = FASTDIV(voffset, ch);
|
||||||
|
unsigned voffs_mod = voffset - voffs_div * ch;
|
||||||
|
|
||||||
for (k = 0; k < step; ++k) {
|
for (k = 0; k < step; ++k) {
|
||||||
coffs = get_vlc2(gb, codebook.vlc.table, codebook.nb_bits, 3) * dim;
|
coffs = get_vlc2(gb, codebook.vlc.table, codebook.nb_bits, 3) * dim;
|
||||||
for (l = 0; l < dim; ++l, ++voffs) {
|
for (l = 0; l < dim; ++l) {
|
||||||
vec[voffs / ch + (voffs % ch) * vlen] += codebook.codevectors[coffs + l]; // FPMATH FIXME use if and counter instead of / and %
|
vec[voffs_div + voffs_mod * vlen] +=
|
||||||
|
codebook.codevectors[coffs + l];
|
||||||
|
|
||||||
av_dlog(NULL, " pass %d offs: %d curr: %f change: %f cv offs.: %d+%d \n",
|
av_dlog(NULL, " pass %d offs: %d curr: %f change: %f cv offs.: %d+%d \n",
|
||||||
pass, voffset / ch + (voffs % ch) * vlen,
|
pass, voffs_div + voffs_mod * vlen,
|
||||||
vec[voffset / ch + (voffs % ch) * vlen],
|
vec[voffs_div + voffs_mod * vlen],
|
||||||
codebook.codevectors[coffs + l], coffs, l);
|
codebook.codevectors[coffs + l], coffs, l);
|
||||||
|
|
||||||
|
if (++voffs_mod == ch) {
|
||||||
|
voffs_div++;
|
||||||
|
voffs_mod = 0;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -26,9 +26,8 @@
|
|||||||
#include "libavcodec/dwt.h"
|
#include "libavcodec/dwt.h"
|
||||||
#include "dsputil_mmx.h"
|
#include "dsputil_mmx.h"
|
||||||
|
|
||||||
static void ff_snow_horizontal_compose97i_sse2(IDWTELEM *b, int width){
|
static void ff_snow_horizontal_compose97i_sse2(IDWTELEM *b, IDWTELEM *temp, int width){
|
||||||
const int w2= (width+1)>>1;
|
const int w2= (width+1)>>1;
|
||||||
DECLARE_ALIGNED(16, IDWTELEM, temp)[width>>1];
|
|
||||||
const int w_l= (width>>1);
|
const int w_l= (width>>1);
|
||||||
const int w_r= w2 - 1;
|
const int w_r= w2 - 1;
|
||||||
int i;
|
int i;
|
||||||
@ -215,9 +214,8 @@ static void ff_snow_horizontal_compose97i_sse2(IDWTELEM *b, int width){
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ff_snow_horizontal_compose97i_mmx(IDWTELEM *b, int width){
|
static void ff_snow_horizontal_compose97i_mmx(IDWTELEM *b, IDWTELEM *temp, int width){
|
||||||
const int w2= (width+1)>>1;
|
const int w2= (width+1)>>1;
|
||||||
IDWTELEM temp[width >> 1];
|
|
||||||
const int w_l= (width>>1);
|
const int w_l= (width>>1);
|
||||||
const int w_r= w2 - 1;
|
const int w_r= w2 - 1;
|
||||||
int i;
|
int i;
|
||||||
|
@ -373,6 +373,8 @@ OBJS-$(CONFIG_MMST_PROTOCOL) += mmst.o mms.o asf.o
|
|||||||
OBJS-$(CONFIG_MD5_PROTOCOL) += md5proto.o
|
OBJS-$(CONFIG_MD5_PROTOCOL) += md5proto.o
|
||||||
OBJS-$(CONFIG_PIPE_PROTOCOL) += file.o
|
OBJS-$(CONFIG_PIPE_PROTOCOL) += file.o
|
||||||
OBJS-$(CONFIG_RTMP_PROTOCOL) += rtmpproto.o rtmppkt.o
|
OBJS-$(CONFIG_RTMP_PROTOCOL) += rtmpproto.o rtmppkt.o
|
||||||
|
OBJS-$(CONFIG_RTMPHTTP_PROTOCOL) += rtmphttp.o
|
||||||
|
OBJS-$(CONFIG_RTMPT_PROTOCOL) += rtmpproto.o rtmppkt.o
|
||||||
OBJS-$(CONFIG_RTP_PROTOCOL) += rtpproto.o
|
OBJS-$(CONFIG_RTP_PROTOCOL) += rtpproto.o
|
||||||
OBJS-$(CONFIG_SCTP_PROTOCOL) += sctp.o
|
OBJS-$(CONFIG_SCTP_PROTOCOL) += sctp.o
|
||||||
OBJS-$(CONFIG_TCP_PROTOCOL) += tcp.o
|
OBJS-$(CONFIG_TCP_PROTOCOL) += tcp.o
|
||||||
|
@ -277,6 +277,8 @@ void av_register_all(void)
|
|||||||
REGISTER_PROTOCOL (MD5, md5);
|
REGISTER_PROTOCOL (MD5, md5);
|
||||||
REGISTER_PROTOCOL (PIPE, pipe);
|
REGISTER_PROTOCOL (PIPE, pipe);
|
||||||
REGISTER_PROTOCOL (RTMP, rtmp);
|
REGISTER_PROTOCOL (RTMP, rtmp);
|
||||||
|
REGISTER_PROTOCOL (RTMPHTTP, rtmphttp);
|
||||||
|
REGISTER_PROTOCOL (RTMPT, rtmpt);
|
||||||
REGISTER_PROTOCOL (RTP, rtp);
|
REGISTER_PROTOCOL (RTP, rtp);
|
||||||
REGISTER_PROTOCOL (SCTP, sctp);
|
REGISTER_PROTOCOL (SCTP, sctp);
|
||||||
REGISTER_PROTOCOL (TCP, tcp);
|
REGISTER_PROTOCOL (TCP, tcp);
|
||||||
|
@ -125,7 +125,7 @@ static int amr_read_packet(AVFormatContext *s, AVPacket *pkt)
|
|||||||
|
|
||||||
size = packed_size[mode] + 1;
|
size = packed_size[mode] + 1;
|
||||||
} else if (enc->codec_id == CODEC_ID_AMR_WB) {
|
} else if (enc->codec_id == CODEC_ID_AMR_WB) {
|
||||||
static uint8_t packed_size[16] = {
|
static const uint8_t packed_size[16] = {
|
||||||
18, 24, 33, 37, 41, 47, 51, 59, 61, 6, 6, 0, 0, 0, 1, 1
|
18, 24, 33, 37, 41, 47, 51, 59, 61, 6, 6, 0, 0, 0, 1, 1
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -352,6 +352,8 @@ static int http_read_header(URLContext *h, int *new_location)
|
|||||||
char line[1024];
|
char line[1024];
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
|
s->chunksize = -1;
|
||||||
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
if ((err = http_get_line(s, line, sizeof(line))) < 0)
|
if ((err = http_get_line(s, line, sizeof(line))) < 0)
|
||||||
return err;
|
return err;
|
||||||
@ -470,7 +472,6 @@ static int http_connect(URLContext *h, const char *path, const char *local_path,
|
|||||||
s->http_code = 200;
|
s->http_code = 200;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
s->chunksize = -1;
|
|
||||||
|
|
||||||
/* wait for header */
|
/* wait for header */
|
||||||
err = http_read_header(h, new_location);
|
err = http_read_header(h, new_location);
|
||||||
@ -510,16 +511,15 @@ static int http_read(URLContext *h, uint8_t *buf, int size)
|
|||||||
HTTPContext *s = h->priv_data;
|
HTTPContext *s = h->priv_data;
|
||||||
int err, new_location;
|
int err, new_location;
|
||||||
|
|
||||||
if (s->end_chunked_post) {
|
if (!s->hd)
|
||||||
if (!s->end_header) {
|
return AVERROR_EOF;
|
||||||
|
|
||||||
|
if (s->end_chunked_post && !s->end_header) {
|
||||||
err = http_read_header(h, &new_location);
|
err = http_read_header(h, &new_location);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
return http_buf_read(h, buf, size);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (s->chunksize >= 0) {
|
if (s->chunksize >= 0) {
|
||||||
if (!s->chunksize) {
|
if (!s->chunksize) {
|
||||||
char line[32];
|
char line[32];
|
||||||
|
239
libavformat/rtmphttp.c
Normal file
239
libavformat/rtmphttp.c
Normal file
@ -0,0 +1,239 @@
|
|||||||
|
/*
|
||||||
|
* RTMP HTTP network protocol
|
||||||
|
* Copyright (c) 2012 Samuel Pitoiset
|
||||||
|
*
|
||||||
|
* This file is part of FFmpeg.
|
||||||
|
*
|
||||||
|
* FFmpeg is free software; you can redistribute it and/or
|
||||||
|
* modify it under the terms of the GNU Lesser General Public
|
||||||
|
* License as published by the Free Software Foundation; either
|
||||||
|
* version 2.1 of the License, or (at your option) any later version.
|
||||||
|
*
|
||||||
|
* FFmpeg is distributed in the hope that it will be useful,
|
||||||
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||||
|
* Lesser General Public License for more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU Lesser General Public
|
||||||
|
* License along with FFmpeg; if not, write to the Free Software
|
||||||
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @file
|
||||||
|
* RTMP HTTP protocol
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "libavutil/avstring.h"
|
||||||
|
#include "libavutil/intfloat.h"
|
||||||
|
#include "libavutil/opt.h"
|
||||||
|
#include "internal.h"
|
||||||
|
#include "http.h"
|
||||||
|
|
||||||
|
#define RTMPT_DEFAULT_PORT 80
|
||||||
|
|
||||||
|
/* protocol handler context */
|
||||||
|
typedef struct RTMP_HTTPContext {
|
||||||
|
URLContext *stream; ///< HTTP stream
|
||||||
|
char host[256]; ///< hostname of the server
|
||||||
|
int port; ///< port to connect (default is 80)
|
||||||
|
char client_id[64]; ///< client ID used for all requests except the first one
|
||||||
|
int seq; ///< sequence ID used for all requests
|
||||||
|
uint8_t *out_data; ///< output buffer
|
||||||
|
int out_size; ///< current output buffer size
|
||||||
|
int out_capacity; ///< current output buffer capacity
|
||||||
|
int initialized; ///< flag indicating when the http context is initialized
|
||||||
|
int finishing; ///< flag indicating when the client closes the connection
|
||||||
|
} RTMP_HTTPContext;
|
||||||
|
|
||||||
|
static int rtmp_http_send_cmd(URLContext *h, const char *cmd)
|
||||||
|
{
|
||||||
|
RTMP_HTTPContext *rt = h->priv_data;
|
||||||
|
char uri[2048];
|
||||||
|
uint8_t c;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ff_url_join(uri, sizeof(uri), "http", NULL, rt->host, rt->port,
|
||||||
|
"/%s/%s/%d", cmd, rt->client_id, rt->seq++);
|
||||||
|
|
||||||
|
av_opt_set_bin(rt->stream->priv_data, "post_data", rt->out_data,
|
||||||
|
rt->out_size, 0);
|
||||||
|
|
||||||
|
/* send a new request to the server */
|
||||||
|
if ((ret = ff_http_do_new_request(rt->stream, uri)) < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
/* re-init output buffer */
|
||||||
|
rt->out_size = 0;
|
||||||
|
|
||||||
|
/* read the first byte which contains the polling interval */
|
||||||
|
if ((ret = ffurl_read(rt->stream, &c, 1)) < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int rtmp_http_write(URLContext *h, const uint8_t *buf, int size)
|
||||||
|
{
|
||||||
|
RTMP_HTTPContext *rt = h->priv_data;
|
||||||
|
void *ptr;
|
||||||
|
|
||||||
|
if (rt->out_size + size > rt->out_capacity) {
|
||||||
|
rt->out_capacity = (rt->out_size + size) * 2;
|
||||||
|
ptr = av_realloc(rt->out_data, rt->out_capacity);
|
||||||
|
if (!ptr)
|
||||||
|
return AVERROR(ENOMEM);
|
||||||
|
rt->out_data = ptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
memcpy(rt->out_data + rt->out_size, buf, size);
|
||||||
|
rt->out_size += size;
|
||||||
|
|
||||||
|
return size;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int rtmp_http_read(URLContext *h, uint8_t *buf, int size)
|
||||||
|
{
|
||||||
|
RTMP_HTTPContext *rt = h->priv_data;
|
||||||
|
int ret, off = 0;
|
||||||
|
|
||||||
|
/* try to read at least 1 byte of data */
|
||||||
|
do {
|
||||||
|
ret = ffurl_read(rt->stream, buf + off, size);
|
||||||
|
if (ret < 0 && ret != AVERROR_EOF)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
if (ret == AVERROR_EOF) {
|
||||||
|
if (rt->finishing) {
|
||||||
|
/* Do not send new requests when the client wants to
|
||||||
|
* close the connection. */
|
||||||
|
return AVERROR(EAGAIN);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* When the client has reached end of file for the last request,
|
||||||
|
* we have to send a new request if we have buffered data.
|
||||||
|
* Otherwise, we have to send an idle POST. */
|
||||||
|
if (rt->out_size > 0) {
|
||||||
|
if ((ret = rtmp_http_send_cmd(h, "send")) < 0)
|
||||||
|
return ret;
|
||||||
|
} else {
|
||||||
|
if ((ret = rtmp_http_write(h, "", 1)) < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
if ((ret = rtmp_http_send_cmd(h, "idle")) < 0)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (h->flags & AVIO_FLAG_NONBLOCK) {
|
||||||
|
/* no incoming data to handle in nonblocking mode */
|
||||||
|
return AVERROR(EAGAIN);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
off += ret;
|
||||||
|
size -= ret;
|
||||||
|
}
|
||||||
|
} while (off <= 0);
|
||||||
|
|
||||||
|
return off;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int rtmp_http_close(URLContext *h)
|
||||||
|
{
|
||||||
|
RTMP_HTTPContext *rt = h->priv_data;
|
||||||
|
uint8_t tmp_buf[2048];
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
if (rt->initialized) {
|
||||||
|
/* client wants to close the connection */
|
||||||
|
rt->finishing = 1;
|
||||||
|
|
||||||
|
do {
|
||||||
|
ret = rtmp_http_read(h, tmp_buf, sizeof(tmp_buf));
|
||||||
|
} while (ret > 0);
|
||||||
|
|
||||||
|
/* re-init output buffer before sending the close command */
|
||||||
|
rt->out_size = 0;
|
||||||
|
|
||||||
|
if ((ret = rtmp_http_write(h, "", 1)) == 1)
|
||||||
|
ret = rtmp_http_send_cmd(h, "close");
|
||||||
|
}
|
||||||
|
|
||||||
|
av_freep(&rt->out_data);
|
||||||
|
ffurl_close(rt->stream);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int rtmp_http_open(URLContext *h, const char *uri, int flags)
|
||||||
|
{
|
||||||
|
RTMP_HTTPContext *rt = h->priv_data;
|
||||||
|
char headers[1024], url[1024];
|
||||||
|
int ret, off = 0;
|
||||||
|
|
||||||
|
av_url_split(NULL, 0, NULL, 0, rt->host, sizeof(rt->host), &rt->port,
|
||||||
|
NULL, 0, uri);
|
||||||
|
|
||||||
|
if (rt->port < 0)
|
||||||
|
rt->port = RTMPT_DEFAULT_PORT;
|
||||||
|
|
||||||
|
/* This is the first request that is sent to the server in order to
|
||||||
|
* register a client on the server and start a new session. The server
|
||||||
|
* replies with a unique id (usually a number) that is used by the client
|
||||||
|
* for all future requests.
|
||||||
|
* Note: the reply doesn't contain a value for the polling interval.
|
||||||
|
* A successful connect resets the consecutive index that is used
|
||||||
|
* in the URLs. */
|
||||||
|
ff_url_join(url, sizeof(url), "http", NULL, rt->host, rt->port, "/open/1");
|
||||||
|
|
||||||
|
/* alloc the http context */
|
||||||
|
if ((ret = ffurl_alloc(&rt->stream, url, AVIO_FLAG_READ_WRITE, NULL)) < 0)
|
||||||
|
goto fail;
|
||||||
|
|
||||||
|
/* set options */
|
||||||
|
snprintf(headers, sizeof(headers),
|
||||||
|
"Cache-Control: no-cache\r\n"
|
||||||
|
"Content-type: application/x-fcs\r\n"
|
||||||
|
"User-Agent: Shockwave Flash\r\n");
|
||||||
|
av_opt_set(rt->stream->priv_data, "headers", headers, 0);
|
||||||
|
av_opt_set(rt->stream->priv_data, "multiple_requests", "1", 0);
|
||||||
|
av_opt_set_bin(rt->stream->priv_data, "post_data", "", 1, 0);
|
||||||
|
|
||||||
|
/* open the http context */
|
||||||
|
if ((ret = ffurl_connect(rt->stream, NULL)) < 0)
|
||||||
|
goto fail;
|
||||||
|
|
||||||
|
/* read the server reply which contains a unique ID */
|
||||||
|
for (;;) {
|
||||||
|
ret = ffurl_read(rt->stream, rt->client_id + off, sizeof(rt->client_id) - off);
|
||||||
|
if (ret == AVERROR_EOF)
|
||||||
|
break;
|
||||||
|
if (ret < 0)
|
||||||
|
goto fail;
|
||||||
|
off += ret;
|
||||||
|
if (off == sizeof(rt->client_id)) {
|
||||||
|
ret = AVERROR(EIO);
|
||||||
|
goto fail;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
while (off > 0 && isspace(rt->client_id[off - 1]))
|
||||||
|
off--;
|
||||||
|
rt->client_id[off] = '\0';
|
||||||
|
|
||||||
|
/* http context is now initialized */
|
||||||
|
rt->initialized = 1;
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
fail:
|
||||||
|
rtmp_http_close(h);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
URLProtocol ff_rtmphttp_protocol = {
|
||||||
|
.name = "rtmphttp",
|
||||||
|
.url_open = rtmp_http_open,
|
||||||
|
.url_read = rtmp_http_read,
|
||||||
|
.url_write = rtmp_http_write,
|
||||||
|
.url_close = rtmp_http_close,
|
||||||
|
.priv_data_size = sizeof(RTMP_HTTPContext),
|
||||||
|
.flags = URL_PROTOCOL_FLAG_NETWORK,
|
||||||
|
};
|
@ -1112,9 +1112,15 @@ static int rtmp_open(URLContext *s, const char *uri, int flags)
|
|||||||
av_url_split(proto, sizeof(proto), NULL, 0, hostname, sizeof(hostname), &port,
|
av_url_split(proto, sizeof(proto), NULL, 0, hostname, sizeof(hostname), &port,
|
||||||
path, sizeof(path), s->filename);
|
path, sizeof(path), s->filename);
|
||||||
|
|
||||||
|
if (!strcmp(proto, "rtmpt")) {
|
||||||
|
/* open the http tunneling connection */
|
||||||
|
ff_url_join(buf, sizeof(buf), "rtmphttp", NULL, hostname, port, NULL);
|
||||||
|
} else {
|
||||||
|
/* open the tcp connection */
|
||||||
if (port < 0)
|
if (port < 0)
|
||||||
port = RTMP_DEFAULT_PORT;
|
port = RTMP_DEFAULT_PORT;
|
||||||
ff_url_join(buf, sizeof(buf), "tcp", NULL, hostname, port, NULL);
|
ff_url_join(buf, sizeof(buf), "tcp", NULL, hostname, port, NULL);
|
||||||
|
}
|
||||||
|
|
||||||
if ((ret = ffurl_open(&rt->stream, buf, AVIO_FLAG_READ_WRITE,
|
if ((ret = ffurl_open(&rt->stream, buf, AVIO_FLAG_READ_WRITE,
|
||||||
&s->interrupt_callback, NULL)) < 0) {
|
&s->interrupt_callback, NULL)) < 0) {
|
||||||
@ -1425,3 +1431,21 @@ URLProtocol ff_rtmp_protocol = {
|
|||||||
.flags = URL_PROTOCOL_FLAG_NETWORK,
|
.flags = URL_PROTOCOL_FLAG_NETWORK,
|
||||||
.priv_data_class= &rtmp_class,
|
.priv_data_class= &rtmp_class,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const AVClass rtmpt_class = {
|
||||||
|
.class_name = "rtmpt",
|
||||||
|
.item_name = av_default_item_name,
|
||||||
|
.option = rtmp_options,
|
||||||
|
.version = LIBAVUTIL_VERSION_INT,
|
||||||
|
};
|
||||||
|
|
||||||
|
URLProtocol ff_rtmpt_protocol = {
|
||||||
|
.name = "rtmpt",
|
||||||
|
.url_open = rtmp_open,
|
||||||
|
.url_read = rtmp_read,
|
||||||
|
.url_write = rtmp_write,
|
||||||
|
.url_close = rtmp_close,
|
||||||
|
.priv_data_size = sizeof(RTMPContext),
|
||||||
|
.flags = URL_PROTOCOL_FLAG_NETWORK,
|
||||||
|
.priv_data_class = &rtmpt_class,
|
||||||
|
};
|
||||||
|
@ -198,11 +198,11 @@ static int rtp_write_header(AVFormatContext *s1)
|
|||||||
/* max_header_toc_size + the largest AMR payload must fit */
|
/* max_header_toc_size + the largest AMR payload must fit */
|
||||||
if (1 + s->max_frames_per_packet + n > s->max_payload_size) {
|
if (1 + s->max_frames_per_packet + n > s->max_payload_size) {
|
||||||
av_log(s1, AV_LOG_ERROR, "RTP max payload size too small for AMR\n");
|
av_log(s1, AV_LOG_ERROR, "RTP max payload size too small for AMR\n");
|
||||||
return -1;
|
goto fail;
|
||||||
}
|
}
|
||||||
if (st->codec->channels != 1) {
|
if (st->codec->channels != 1) {
|
||||||
av_log(s1, AV_LOG_ERROR, "Only mono is supported\n");
|
av_log(s1, AV_LOG_ERROR, "Only mono is supported\n");
|
||||||
return -1;
|
goto fail;
|
||||||
}
|
}
|
||||||
case CODEC_ID_AAC:
|
case CODEC_ID_AAC:
|
||||||
s->num_frames = 0;
|
s->num_frames = 0;
|
||||||
@ -216,6 +216,10 @@ defaultcase:
|
|||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
fail:
|
||||||
|
av_freep(&s->buf);
|
||||||
|
return AVERROR(EINVAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* send an rtcp sender report packet */
|
/* send an rtcp sender report packet */
|
||||||
|
@ -66,12 +66,14 @@ typedef struct RTPMuxContext RTPMuxContext;
|
|||||||
#define FF_RTP_FLAG_MP4A_LATM 1
|
#define FF_RTP_FLAG_MP4A_LATM 1
|
||||||
#define FF_RTP_FLAG_RFC2190 2
|
#define FF_RTP_FLAG_RFC2190 2
|
||||||
#define FF_RTP_FLAG_SKIP_RTCP 4
|
#define FF_RTP_FLAG_SKIP_RTCP 4
|
||||||
|
#define FF_RTP_FLAG_H264_MODE0 8
|
||||||
|
|
||||||
#define FF_RTP_FLAG_OPTS(ctx, fieldname) \
|
#define FF_RTP_FLAG_OPTS(ctx, fieldname) \
|
||||||
{ "rtpflags", "RTP muxer flags", offsetof(ctx, fieldname), AV_OPT_TYPE_FLAGS, {.dbl = 0}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "rtpflags" }, \
|
{ "rtpflags", "RTP muxer flags", offsetof(ctx, fieldname), AV_OPT_TYPE_FLAGS, {.dbl = 0}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "rtpflags" }, \
|
||||||
{ "latm", "Use MP4A-LATM packetization instead of MPEG4-GENERIC for AAC", 0, AV_OPT_TYPE_CONST, {.dbl = FF_RTP_FLAG_MP4A_LATM}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "rtpflags" }, \
|
{ "latm", "Use MP4A-LATM packetization instead of MPEG4-GENERIC for AAC", 0, AV_OPT_TYPE_CONST, {.dbl = FF_RTP_FLAG_MP4A_LATM}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "rtpflags" }, \
|
||||||
{ "rfc2190", "Use RFC 2190 packetization instead of RFC 4629 for H.263", 0, AV_OPT_TYPE_CONST, {.dbl = FF_RTP_FLAG_RFC2190}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "rtpflags" }, \
|
{ "rfc2190", "Use RFC 2190 packetization instead of RFC 4629 for H.263", 0, AV_OPT_TYPE_CONST, {.dbl = FF_RTP_FLAG_RFC2190}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "rtpflags" }, \
|
||||||
{ "skip_rtcp", "Don't send RTCP sender reports", 0, AV_OPT_TYPE_CONST, {.dbl = FF_RTP_FLAG_SKIP_RTCP}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "rtpflags" }, \
|
{ "skip_rtcp", "Don't send RTCP sender reports", 0, AV_OPT_TYPE_CONST, {.dbl = FF_RTP_FLAG_SKIP_RTCP}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "rtpflags" }, \
|
||||||
|
{ "h264_mode0", "Use mode 0 for H264 in RTP", 0, AV_OPT_TYPE_CONST, {.dbl = FF_RTP_FLAG_H264_MODE0}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "rtpflags" }, \
|
||||||
|
|
||||||
void ff_rtp_send_data(AVFormatContext *s1, const uint8_t *buf1, int len, int m);
|
void ff_rtp_send_data(AVFormatContext *s1, const uint8_t *buf1, int len, int m);
|
||||||
|
|
||||||
|
@ -55,6 +55,12 @@ static void nal_send(AVFormatContext *s1, const uint8_t *buf, int size, int last
|
|||||||
uint8_t type = buf[0] & 0x1F;
|
uint8_t type = buf[0] & 0x1F;
|
||||||
uint8_t nri = buf[0] & 0x60;
|
uint8_t nri = buf[0] & 0x60;
|
||||||
|
|
||||||
|
if (s->flags & FF_RTP_FLAG_H264_MODE0) {
|
||||||
|
av_log(s1, AV_LOG_ERROR,
|
||||||
|
"NAL size %d > %d, try -slice-max-size %d\n", size,
|
||||||
|
s->max_payload_size, s->max_payload_size);
|
||||||
|
return;
|
||||||
|
}
|
||||||
av_log(s1, AV_LOG_DEBUG, "NAL size %d > %d\n", size, s->max_payload_size);
|
av_log(s1, AV_LOG_DEBUG, "NAL size %d > %d\n", size, s->max_payload_size);
|
||||||
s->buf[0] = 28; /* FU Indicator; Type = 28 ---> FU-A */
|
s->buf[0] = 28; /* FU Indicator; Type = 28 ---> FU-A */
|
||||||
s->buf[0] |= nri;
|
s->buf[0] |= nri;
|
||||||
|
@ -388,15 +388,20 @@ static char *sdp_write_media_attributes(char *buff, int size, AVCodecContext *c,
|
|||||||
char *config = NULL;
|
char *config = NULL;
|
||||||
|
|
||||||
switch (c->codec_id) {
|
switch (c->codec_id) {
|
||||||
case CODEC_ID_H264:
|
case CODEC_ID_H264: {
|
||||||
|
int mode = 1;
|
||||||
|
if (fmt && fmt->oformat->priv_class &&
|
||||||
|
av_opt_flag_is_set(fmt->priv_data, "rtpflags", "h264_mode0"))
|
||||||
|
mode = 0;
|
||||||
if (c->extradata_size) {
|
if (c->extradata_size) {
|
||||||
config = extradata2psets(c);
|
config = extradata2psets(c);
|
||||||
}
|
}
|
||||||
av_strlcatf(buff, size, "a=rtpmap:%d H264/90000\r\n"
|
av_strlcatf(buff, size, "a=rtpmap:%d H264/90000\r\n"
|
||||||
"a=fmtp:%d packetization-mode=1%s\r\n",
|
"a=fmtp:%d packetization-mode=%d%s\r\n",
|
||||||
payload_type,
|
payload_type,
|
||||||
payload_type, config ? config : "");
|
payload_type, mode, config ? config : "");
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
case CODEC_ID_H263:
|
case CODEC_ID_H263:
|
||||||
case CODEC_ID_H263P:
|
case CODEC_ID_H263P:
|
||||||
/* a=framesize is required by 3GPP TS 26.234 (PSS). It
|
/* a=framesize is required by 3GPP TS 26.234 (PSS). It
|
||||||
|
@ -30,7 +30,7 @@
|
|||||||
#include "libavutil/avutil.h"
|
#include "libavutil/avutil.h"
|
||||||
|
|
||||||
#define LIBAVFORMAT_VERSION_MAJOR 54
|
#define LIBAVFORMAT_VERSION_MAJOR 54
|
||||||
#define LIBAVFORMAT_VERSION_MINOR 7
|
#define LIBAVFORMAT_VERSION_MINOR 8
|
||||||
#define LIBAVFORMAT_VERSION_MICRO 100
|
#define LIBAVFORMAT_VERSION_MICRO 100
|
||||||
|
|
||||||
#define LIBAVFORMAT_VERSION_INT AV_VERSION_INT(LIBAVFORMAT_VERSION_MAJOR, \
|
#define LIBAVFORMAT_VERSION_INT AV_VERSION_INT(LIBAVFORMAT_VERSION_MAJOR, \
|
||||||
|
@ -320,7 +320,8 @@ int ff_audio_mix_init(AVAudioResampleContext *avr)
|
|||||||
avr->center_mix_level,
|
avr->center_mix_level,
|
||||||
avr->surround_mix_level,
|
avr->surround_mix_level,
|
||||||
avr->lfe_mix_level, 1, matrix_dbl,
|
avr->lfe_mix_level, 1, matrix_dbl,
|
||||||
avr->in_channels);
|
avr->in_channels,
|
||||||
|
avr->matrix_encoding);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
av_free(matrix_dbl);
|
av_free(matrix_dbl);
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -54,6 +54,8 @@
|
|||||||
#define SURROUND_DIRECT_LEFT 33
|
#define SURROUND_DIRECT_LEFT 33
|
||||||
#define SURROUND_DIRECT_RIGHT 34
|
#define SURROUND_DIRECT_RIGHT 34
|
||||||
|
|
||||||
|
#define SQRT3_2 1.22474487139158904909 /* sqrt(3/2) */
|
||||||
|
|
||||||
static av_always_inline int even(uint64_t layout)
|
static av_always_inline int even(uint64_t layout)
|
||||||
{
|
{
|
||||||
return (!layout || (layout & (layout - 1)));
|
return (!layout || (layout & (layout - 1)));
|
||||||
@ -83,14 +85,21 @@ static int sane_layout(uint64_t layout)
|
|||||||
int avresample_build_matrix(uint64_t in_layout, uint64_t out_layout,
|
int avresample_build_matrix(uint64_t in_layout, uint64_t out_layout,
|
||||||
double center_mix_level, double surround_mix_level,
|
double center_mix_level, double surround_mix_level,
|
||||||
double lfe_mix_level, int normalize,
|
double lfe_mix_level, int normalize,
|
||||||
double *matrix_out, int stride)
|
double *matrix_out, int stride,
|
||||||
|
enum AVMatrixEncoding matrix_encoding)
|
||||||
{
|
{
|
||||||
int i, j, out_i, out_j;
|
int i, j, out_i, out_j;
|
||||||
double matrix[64][64] = {{0}};
|
double matrix[64][64] = {{0}};
|
||||||
int64_t unaccounted = in_layout & ~out_layout;
|
int64_t unaccounted;
|
||||||
double maxcoef = 0;
|
double maxcoef = 0;
|
||||||
int in_channels, out_channels;
|
int in_channels, out_channels;
|
||||||
|
|
||||||
|
if ((out_layout & AV_CH_LAYOUT_STEREO_DOWNMIX) == AV_CH_LAYOUT_STEREO_DOWNMIX) {
|
||||||
|
out_layout = AV_CH_LAYOUT_STEREO;
|
||||||
|
}
|
||||||
|
|
||||||
|
unaccounted = in_layout & ~out_layout;
|
||||||
|
|
||||||
in_channels = av_get_channel_layout_nb_channels( in_layout);
|
in_channels = av_get_channel_layout_nb_channels( in_layout);
|
||||||
out_channels = av_get_channel_layout_nb_channels(out_layout);
|
out_channels = av_get_channel_layout_nb_channels(out_layout);
|
||||||
|
|
||||||
@ -140,8 +149,19 @@ int avresample_build_matrix(uint64_t in_layout, uint64_t out_layout,
|
|||||||
matrix[SIDE_LEFT ][BACK_CENTER] += M_SQRT1_2;
|
matrix[SIDE_LEFT ][BACK_CENTER] += M_SQRT1_2;
|
||||||
matrix[SIDE_RIGHT][BACK_CENTER] += M_SQRT1_2;
|
matrix[SIDE_RIGHT][BACK_CENTER] += M_SQRT1_2;
|
||||||
} else if (out_layout & AV_CH_FRONT_LEFT) {
|
} else if (out_layout & AV_CH_FRONT_LEFT) {
|
||||||
|
if (matrix_encoding == AV_MATRIX_ENCODING_DOLBY ||
|
||||||
|
matrix_encoding == AV_MATRIX_ENCODING_DPLII) {
|
||||||
|
if (unaccounted & (AV_CH_BACK_LEFT | AV_CH_SIDE_LEFT)) {
|
||||||
|
matrix[FRONT_LEFT ][BACK_CENTER] -= surround_mix_level * M_SQRT1_2;
|
||||||
|
matrix[FRONT_RIGHT][BACK_CENTER] += surround_mix_level * M_SQRT1_2;
|
||||||
|
} else {
|
||||||
|
matrix[FRONT_LEFT ][BACK_CENTER] -= surround_mix_level;
|
||||||
|
matrix[FRONT_RIGHT][BACK_CENTER] += surround_mix_level;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
matrix[FRONT_LEFT ][BACK_CENTER] += surround_mix_level * M_SQRT1_2;
|
matrix[FRONT_LEFT ][BACK_CENTER] += surround_mix_level * M_SQRT1_2;
|
||||||
matrix[FRONT_RIGHT][BACK_CENTER] += surround_mix_level * M_SQRT1_2;
|
matrix[FRONT_RIGHT][BACK_CENTER] += surround_mix_level * M_SQRT1_2;
|
||||||
|
}
|
||||||
} else if (out_layout & AV_CH_FRONT_CENTER) {
|
} else if (out_layout & AV_CH_FRONT_CENTER) {
|
||||||
matrix[FRONT_CENTER][BACK_CENTER] += surround_mix_level * M_SQRT1_2;
|
matrix[FRONT_CENTER][BACK_CENTER] += surround_mix_level * M_SQRT1_2;
|
||||||
} else
|
} else
|
||||||
@ -163,8 +183,20 @@ int avresample_build_matrix(uint64_t in_layout, uint64_t out_layout,
|
|||||||
matrix[SIDE_RIGHT][BACK_RIGHT] += 1.0;
|
matrix[SIDE_RIGHT][BACK_RIGHT] += 1.0;
|
||||||
}
|
}
|
||||||
} else if (out_layout & AV_CH_FRONT_LEFT) {
|
} else if (out_layout & AV_CH_FRONT_LEFT) {
|
||||||
|
if (matrix_encoding == AV_MATRIX_ENCODING_DOLBY) {
|
||||||
|
matrix[FRONT_LEFT ][BACK_LEFT ] -= surround_mix_level * M_SQRT1_2;
|
||||||
|
matrix[FRONT_LEFT ][BACK_RIGHT] -= surround_mix_level * M_SQRT1_2;
|
||||||
|
matrix[FRONT_RIGHT][BACK_LEFT ] += surround_mix_level * M_SQRT1_2;
|
||||||
|
matrix[FRONT_RIGHT][BACK_RIGHT] += surround_mix_level * M_SQRT1_2;
|
||||||
|
} else if (matrix_encoding == AV_MATRIX_ENCODING_DPLII) {
|
||||||
|
matrix[FRONT_LEFT ][BACK_LEFT ] -= surround_mix_level * SQRT3_2;
|
||||||
|
matrix[FRONT_LEFT ][BACK_RIGHT] -= surround_mix_level * M_SQRT1_2;
|
||||||
|
matrix[FRONT_RIGHT][BACK_LEFT ] += surround_mix_level * M_SQRT1_2;
|
||||||
|
matrix[FRONT_RIGHT][BACK_RIGHT] += surround_mix_level * SQRT3_2;
|
||||||
|
} else {
|
||||||
matrix[FRONT_LEFT ][BACK_LEFT ] += surround_mix_level;
|
matrix[FRONT_LEFT ][BACK_LEFT ] += surround_mix_level;
|
||||||
matrix[FRONT_RIGHT][BACK_RIGHT] += surround_mix_level;
|
matrix[FRONT_RIGHT][BACK_RIGHT] += surround_mix_level;
|
||||||
|
}
|
||||||
} else if (out_layout & AV_CH_FRONT_CENTER) {
|
} else if (out_layout & AV_CH_FRONT_CENTER) {
|
||||||
matrix[FRONT_CENTER][BACK_LEFT ] += surround_mix_level * M_SQRT1_2;
|
matrix[FRONT_CENTER][BACK_LEFT ] += surround_mix_level * M_SQRT1_2;
|
||||||
matrix[FRONT_CENTER][BACK_RIGHT] += surround_mix_level * M_SQRT1_2;
|
matrix[FRONT_CENTER][BACK_RIGHT] += surround_mix_level * M_SQRT1_2;
|
||||||
@ -187,8 +219,20 @@ int avresample_build_matrix(uint64_t in_layout, uint64_t out_layout,
|
|||||||
matrix[BACK_CENTER][SIDE_LEFT ] += M_SQRT1_2;
|
matrix[BACK_CENTER][SIDE_LEFT ] += M_SQRT1_2;
|
||||||
matrix[BACK_CENTER][SIDE_RIGHT] += M_SQRT1_2;
|
matrix[BACK_CENTER][SIDE_RIGHT] += M_SQRT1_2;
|
||||||
} else if (out_layout & AV_CH_FRONT_LEFT) {
|
} else if (out_layout & AV_CH_FRONT_LEFT) {
|
||||||
|
if (matrix_encoding == AV_MATRIX_ENCODING_DOLBY) {
|
||||||
|
matrix[FRONT_LEFT ][SIDE_LEFT ] -= surround_mix_level * M_SQRT1_2;
|
||||||
|
matrix[FRONT_LEFT ][SIDE_RIGHT] -= surround_mix_level * M_SQRT1_2;
|
||||||
|
matrix[FRONT_RIGHT][SIDE_LEFT ] += surround_mix_level * M_SQRT1_2;
|
||||||
|
matrix[FRONT_RIGHT][SIDE_RIGHT] += surround_mix_level * M_SQRT1_2;
|
||||||
|
} else if (matrix_encoding == AV_MATRIX_ENCODING_DPLII) {
|
||||||
|
matrix[FRONT_LEFT ][SIDE_LEFT ] -= surround_mix_level * SQRT3_2;
|
||||||
|
matrix[FRONT_LEFT ][SIDE_RIGHT] -= surround_mix_level * M_SQRT1_2;
|
||||||
|
matrix[FRONT_RIGHT][SIDE_LEFT ] += surround_mix_level * M_SQRT1_2;
|
||||||
|
matrix[FRONT_RIGHT][SIDE_RIGHT] += surround_mix_level * SQRT3_2;
|
||||||
|
} else {
|
||||||
matrix[FRONT_LEFT ][SIDE_LEFT ] += surround_mix_level;
|
matrix[FRONT_LEFT ][SIDE_LEFT ] += surround_mix_level;
|
||||||
matrix[FRONT_RIGHT][SIDE_RIGHT] += surround_mix_level;
|
matrix[FRONT_RIGHT][SIDE_RIGHT] += surround_mix_level;
|
||||||
|
}
|
||||||
} else if (out_layout & AV_CH_FRONT_CENTER) {
|
} else if (out_layout & AV_CH_FRONT_CENTER) {
|
||||||
matrix[FRONT_CENTER][SIDE_LEFT ] += surround_mix_level * M_SQRT1_2;
|
matrix[FRONT_CENTER][SIDE_LEFT ] += surround_mix_level * M_SQRT1_2;
|
||||||
matrix[FRONT_CENTER][SIDE_RIGHT] += surround_mix_level * M_SQRT1_2;
|
matrix[FRONT_CENTER][SIDE_RIGHT] += surround_mix_level * M_SQRT1_2;
|
||||||
|
@ -131,12 +131,13 @@ void avresample_free(AVAudioResampleContext **avr);
|
|||||||
* the weight of input channel i in output channel o.
|
* the weight of input channel i in output channel o.
|
||||||
* @param stride distance between adjacent input channels in the
|
* @param stride distance between adjacent input channels in the
|
||||||
* matrix array
|
* matrix array
|
||||||
|
* @param matrix_encoding matrixed stereo downmix mode (e.g. dplii)
|
||||||
* @return 0 on success, negative AVERROR code on failure
|
* @return 0 on success, negative AVERROR code on failure
|
||||||
*/
|
*/
|
||||||
int avresample_build_matrix(uint64_t in_layout, uint64_t out_layout,
|
int avresample_build_matrix(uint64_t in_layout, uint64_t out_layout,
|
||||||
double center_mix_level, double surround_mix_level,
|
double center_mix_level, double surround_mix_level,
|
||||||
double lfe_mix_level, int normalize, double *matrix,
|
double lfe_mix_level, int normalize, double *matrix,
|
||||||
int stride);
|
int stride, enum AVMatrixEncoding matrix_encoding);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the current channel mixing matrix.
|
* Get the current channel mixing matrix.
|
||||||
|
@ -70,6 +70,7 @@ struct AVAudioResampleContext {
|
|||||||
AudioConvert *ac_out; /**< output sample format conversion context */
|
AudioConvert *ac_out; /**< output sample format conversion context */
|
||||||
ResampleContext *resample; /**< resampling context */
|
ResampleContext *resample; /**< resampling context */
|
||||||
AudioMix *am; /**< channel mixing context */
|
AudioMix *am; /**< channel mixing context */
|
||||||
|
enum AVMatrixEncoding matrix_encoding; /**< matrixed stereo encoding */
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /* AVRESAMPLE_INTERNAL_H */
|
#endif /* AVRESAMPLE_INTERNAL_H */
|
||||||
|
@ -52,6 +52,10 @@ static const AVOption options[] = {
|
|||||||
{ "phase_shift", "Resampling Phase Shift", OFFSET(phase_shift), AV_OPT_TYPE_INT, { 10 }, 0, 30, /* ??? */ PARAM },
|
{ "phase_shift", "Resampling Phase Shift", OFFSET(phase_shift), AV_OPT_TYPE_INT, { 10 }, 0, 30, /* ??? */ PARAM },
|
||||||
{ "linear_interp", "Use Linear Interpolation", OFFSET(linear_interp), AV_OPT_TYPE_INT, { 0 }, 0, 1, PARAM },
|
{ "linear_interp", "Use Linear Interpolation", OFFSET(linear_interp), AV_OPT_TYPE_INT, { 0 }, 0, 1, PARAM },
|
||||||
{ "cutoff", "Cutoff Frequency Ratio", OFFSET(cutoff), AV_OPT_TYPE_DOUBLE, { 0.8 }, 0.0, 1.0, PARAM },
|
{ "cutoff", "Cutoff Frequency Ratio", OFFSET(cutoff), AV_OPT_TYPE_DOUBLE, { 0.8 }, 0.0, 1.0, PARAM },
|
||||||
|
{ "matrix_encoding", "Matrixed Stereo Encoding", OFFSET(matrix_encoding), AV_OPT_TYPE_INT, { AV_MATRIX_ENCODING_NONE}, AV_MATRIX_ENCODING_NONE, AV_MATRIX_ENCODING_NB-1, PARAM, "matrix_encoding" },
|
||||||
|
{ "none", "None", 0, AV_OPT_TYPE_CONST, { AV_MATRIX_ENCODING_NONE }, INT_MIN, INT_MAX, PARAM, "matrix_encoding" },
|
||||||
|
{ "dolby", "Dolby", 0, AV_OPT_TYPE_CONST, { AV_MATRIX_ENCODING_DOLBY }, INT_MIN, INT_MAX, PARAM, "matrix_encoding" },
|
||||||
|
{ "dplii", "Dolby Pro Logic II", 0, AV_OPT_TYPE_CONST, { AV_MATRIX_ENCODING_DPLII }, INT_MIN, INT_MAX, PARAM, "matrix_encoding" },
|
||||||
{ NULL },
|
{ NULL },
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -21,7 +21,7 @@
|
|||||||
|
|
||||||
#define LIBAVRESAMPLE_VERSION_MAJOR 0
|
#define LIBAVRESAMPLE_VERSION_MAJOR 0
|
||||||
#define LIBAVRESAMPLE_VERSION_MINOR 0
|
#define LIBAVRESAMPLE_VERSION_MINOR 0
|
||||||
#define LIBAVRESAMPLE_VERSION_MICRO 2
|
#define LIBAVRESAMPLE_VERSION_MICRO 3
|
||||||
|
|
||||||
#define LIBAVRESAMPLE_VERSION_INT AV_VERSION_INT(LIBAVRESAMPLE_VERSION_MAJOR, \
|
#define LIBAVRESAMPLE_VERSION_INT AV_VERSION_INT(LIBAVRESAMPLE_VERSION_MAJOR, \
|
||||||
LIBAVRESAMPLE_VERSION_MINOR, \
|
LIBAVRESAMPLE_VERSION_MINOR, \
|
||||||
|
@ -150,3 +150,84 @@ cglobal mix_2_to_1_s16p_q8, 3,4,6, src, matrix, len, src1
|
|||||||
sub lend, mmsize/2
|
sub lend, mmsize/2
|
||||||
jg .loop
|
jg .loop
|
||||||
REP_RET
|
REP_RET
|
||||||
|
|
||||||
|
;-----------------------------------------------------------------------------
|
||||||
|
; void ff_mix_1_to_2_fltp_flt(float **src, float **matrix, int len,
|
||||||
|
; int out_ch, int in_ch);
|
||||||
|
;-----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
%macro MIX_1_TO_2_FLTP_FLT 0
|
||||||
|
cglobal mix_1_to_2_fltp_flt, 3,5,4, src0, matrix0, len, src1, matrix1
|
||||||
|
mov src1q, [src0q+gprsize]
|
||||||
|
mov src0q, [src0q]
|
||||||
|
sub src1q, src0q
|
||||||
|
mov matrix1q, [matrix0q+gprsize]
|
||||||
|
mov matrix0q, [matrix0q]
|
||||||
|
VBROADCASTSS m2, [matrix0q]
|
||||||
|
VBROADCASTSS m3, [matrix1q]
|
||||||
|
ALIGN 16
|
||||||
|
.loop:
|
||||||
|
mova m0, [src0q]
|
||||||
|
mulps m1, m0, m3
|
||||||
|
mulps m0, m0, m2
|
||||||
|
mova [src0q ], m0
|
||||||
|
mova [src0q+src1q], m1
|
||||||
|
add src0q, mmsize
|
||||||
|
sub lend, mmsize/4
|
||||||
|
jg .loop
|
||||||
|
REP_RET
|
||||||
|
%endmacro
|
||||||
|
|
||||||
|
INIT_XMM sse
|
||||||
|
MIX_1_TO_2_FLTP_FLT
|
||||||
|
%if HAVE_AVX
|
||||||
|
INIT_YMM avx
|
||||||
|
MIX_1_TO_2_FLTP_FLT
|
||||||
|
%endif
|
||||||
|
|
||||||
|
;-----------------------------------------------------------------------------
|
||||||
|
; void ff_mix_1_to_2_s16p_flt(int16_t **src, float **matrix, int len,
|
||||||
|
; int out_ch, int in_ch);
|
||||||
|
;-----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
%macro MIX_1_TO_2_S16P_FLT 0
|
||||||
|
cglobal mix_1_to_2_s16p_flt, 3,5,6, src0, matrix0, len, src1, matrix1
|
||||||
|
mov src1q, [src0q+gprsize]
|
||||||
|
mov src0q, [src0q]
|
||||||
|
sub src1q, src0q
|
||||||
|
mov matrix1q, [matrix0q+gprsize]
|
||||||
|
mov matrix0q, [matrix0q]
|
||||||
|
VBROADCASTSS m4, [matrix0q]
|
||||||
|
VBROADCASTSS m5, [matrix1q]
|
||||||
|
ALIGN 16
|
||||||
|
.loop:
|
||||||
|
mova m0, [src0q]
|
||||||
|
S16_TO_S32_SX 0, 2
|
||||||
|
cvtdq2ps m0, m0
|
||||||
|
cvtdq2ps m2, m2
|
||||||
|
mulps m1, m0, m5
|
||||||
|
mulps m0, m0, m4
|
||||||
|
mulps m3, m2, m5
|
||||||
|
mulps m2, m2, m4
|
||||||
|
cvtps2dq m0, m0
|
||||||
|
cvtps2dq m1, m1
|
||||||
|
cvtps2dq m2, m2
|
||||||
|
cvtps2dq m3, m3
|
||||||
|
packssdw m0, m2
|
||||||
|
packssdw m1, m3
|
||||||
|
mova [src0q ], m0
|
||||||
|
mova [src0q+src1q], m1
|
||||||
|
add src0q, mmsize
|
||||||
|
sub lend, mmsize/2
|
||||||
|
jg .loop
|
||||||
|
REP_RET
|
||||||
|
%endmacro
|
||||||
|
|
||||||
|
INIT_XMM sse2
|
||||||
|
MIX_1_TO_2_S16P_FLT
|
||||||
|
INIT_XMM sse4
|
||||||
|
MIX_1_TO_2_S16P_FLT
|
||||||
|
%if HAVE_AVX
|
||||||
|
INIT_XMM avx
|
||||||
|
MIX_1_TO_2_S16P_FLT
|
||||||
|
%endif
|
||||||
|
@ -35,6 +35,18 @@ extern void ff_mix_2_to_1_s16p_flt_sse4(int16_t **src, float **matrix, int len,
|
|||||||
extern void ff_mix_2_to_1_s16p_q8_sse2(int16_t **src, int16_t **matrix,
|
extern void ff_mix_2_to_1_s16p_q8_sse2(int16_t **src, int16_t **matrix,
|
||||||
int len, int out_ch, int in_ch);
|
int len, int out_ch, int in_ch);
|
||||||
|
|
||||||
|
extern void ff_mix_1_to_2_fltp_flt_sse(float **src, float **matrix, int len,
|
||||||
|
int out_ch, int in_ch);
|
||||||
|
extern void ff_mix_1_to_2_fltp_flt_avx(float **src, float **matrix, int len,
|
||||||
|
int out_ch, int in_ch);
|
||||||
|
|
||||||
|
extern void ff_mix_1_to_2_s16p_flt_sse2(int16_t **src, float **matrix, int len,
|
||||||
|
int out_ch, int in_ch);
|
||||||
|
extern void ff_mix_1_to_2_s16p_flt_sse4(int16_t **src, float **matrix, int len,
|
||||||
|
int out_ch, int in_ch);
|
||||||
|
extern void ff_mix_1_to_2_s16p_flt_avx (int16_t **src, float **matrix, int len,
|
||||||
|
int out_ch, int in_ch);
|
||||||
|
|
||||||
av_cold void ff_audio_mix_init_x86(AudioMix *am)
|
av_cold void ff_audio_mix_init_x86(AudioMix *am)
|
||||||
{
|
{
|
||||||
#if HAVE_YASM
|
#if HAVE_YASM
|
||||||
@ -43,20 +55,30 @@ av_cold void ff_audio_mix_init_x86(AudioMix *am)
|
|||||||
if (mm_flags & AV_CPU_FLAG_SSE && HAVE_SSE) {
|
if (mm_flags & AV_CPU_FLAG_SSE && HAVE_SSE) {
|
||||||
ff_audio_mix_set_func(am, AV_SAMPLE_FMT_FLTP, AV_MIX_COEFF_TYPE_FLT,
|
ff_audio_mix_set_func(am, AV_SAMPLE_FMT_FLTP, AV_MIX_COEFF_TYPE_FLT,
|
||||||
2, 1, 16, 8, "SSE", ff_mix_2_to_1_fltp_flt_sse);
|
2, 1, 16, 8, "SSE", ff_mix_2_to_1_fltp_flt_sse);
|
||||||
|
ff_audio_mix_set_func(am, AV_SAMPLE_FMT_FLTP, AV_MIX_COEFF_TYPE_FLT,
|
||||||
|
1, 2, 16, 4, "SSE", ff_mix_1_to_2_fltp_flt_sse);
|
||||||
}
|
}
|
||||||
if (mm_flags & AV_CPU_FLAG_SSE2 && HAVE_SSE) {
|
if (mm_flags & AV_CPU_FLAG_SSE2 && HAVE_SSE) {
|
||||||
ff_audio_mix_set_func(am, AV_SAMPLE_FMT_S16P, AV_MIX_COEFF_TYPE_FLT,
|
ff_audio_mix_set_func(am, AV_SAMPLE_FMT_S16P, AV_MIX_COEFF_TYPE_FLT,
|
||||||
2, 1, 16, 8, "SSE2", ff_mix_2_to_1_s16p_flt_sse2);
|
2, 1, 16, 8, "SSE2", ff_mix_2_to_1_s16p_flt_sse2);
|
||||||
ff_audio_mix_set_func(am, AV_SAMPLE_FMT_S16P, AV_MIX_COEFF_TYPE_Q8,
|
ff_audio_mix_set_func(am, AV_SAMPLE_FMT_S16P, AV_MIX_COEFF_TYPE_Q8,
|
||||||
2, 1, 16, 8, "SSE2", ff_mix_2_to_1_s16p_q8_sse2);
|
2, 1, 16, 8, "SSE2", ff_mix_2_to_1_s16p_q8_sse2);
|
||||||
|
ff_audio_mix_set_func(am, AV_SAMPLE_FMT_S16P, AV_MIX_COEFF_TYPE_FLT,
|
||||||
|
1, 2, 16, 8, "SSE2", ff_mix_1_to_2_s16p_flt_sse2);
|
||||||
}
|
}
|
||||||
if (mm_flags & AV_CPU_FLAG_SSE4 && HAVE_SSE) {
|
if (mm_flags & AV_CPU_FLAG_SSE4 && HAVE_SSE) {
|
||||||
ff_audio_mix_set_func(am, AV_SAMPLE_FMT_S16P, AV_MIX_COEFF_TYPE_FLT,
|
ff_audio_mix_set_func(am, AV_SAMPLE_FMT_S16P, AV_MIX_COEFF_TYPE_FLT,
|
||||||
2, 1, 16, 8, "SSE4", ff_mix_2_to_1_s16p_flt_sse4);
|
2, 1, 16, 8, "SSE4", ff_mix_2_to_1_s16p_flt_sse4);
|
||||||
|
ff_audio_mix_set_func(am, AV_SAMPLE_FMT_S16P, AV_MIX_COEFF_TYPE_FLT,
|
||||||
|
1, 2, 16, 8, "SSE4", ff_mix_1_to_2_s16p_flt_sse4);
|
||||||
}
|
}
|
||||||
if (mm_flags & AV_CPU_FLAG_AVX && HAVE_AVX) {
|
if (mm_flags & AV_CPU_FLAG_AVX && HAVE_AVX) {
|
||||||
ff_audio_mix_set_func(am, AV_SAMPLE_FMT_FLTP, AV_MIX_COEFF_TYPE_FLT,
|
ff_audio_mix_set_func(am, AV_SAMPLE_FMT_FLTP, AV_MIX_COEFF_TYPE_FLT,
|
||||||
2, 1, 32, 16, "AVX", ff_mix_2_to_1_fltp_flt_avx);
|
2, 1, 32, 16, "AVX", ff_mix_2_to_1_fltp_flt_avx);
|
||||||
|
ff_audio_mix_set_func(am, AV_SAMPLE_FMT_FLTP, AV_MIX_COEFF_TYPE_FLT,
|
||||||
|
1, 2, 32, 8, "AVX", ff_mix_1_to_2_fltp_flt_avx);
|
||||||
|
ff_audio_mix_set_func(am, AV_SAMPLE_FMT_S16P, AV_MIX_COEFF_TYPE_FLT,
|
||||||
|
1, 2, 16, 8, "AVX", ff_mix_1_to_2_s16p_flt_avx);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
@ -101,6 +101,13 @@
|
|||||||
#define AV_CH_LAYOUT_OCTAGONAL (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_LEFT|AV_CH_BACK_CENTER|AV_CH_BACK_RIGHT)
|
#define AV_CH_LAYOUT_OCTAGONAL (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_LEFT|AV_CH_BACK_CENTER|AV_CH_BACK_RIGHT)
|
||||||
#define AV_CH_LAYOUT_STEREO_DOWNMIX (AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT)
|
#define AV_CH_LAYOUT_STEREO_DOWNMIX (AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT)
|
||||||
|
|
||||||
|
enum AVMatrixEncoding {
|
||||||
|
AV_MATRIX_ENCODING_NONE,
|
||||||
|
AV_MATRIX_ENCODING_DOLBY,
|
||||||
|
AV_MATRIX_ENCODING_DPLII,
|
||||||
|
AV_MATRIX_ENCODING_NB
|
||||||
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @}
|
* @}
|
||||||
*/
|
*/
|
||||||
|
@ -76,6 +76,10 @@ FATE_VORBIS += fate-vorbis-19
|
|||||||
fate-vorbis-19: CMD = pcm -i $(SAMPLES)/vorbis/test-short2_small.ogg
|
fate-vorbis-19: CMD = pcm -i $(SAMPLES)/vorbis/test-short2_small.ogg
|
||||||
fate-vorbis-19: REF = $(SAMPLES)/vorbis/test-short2_small.pcm
|
fate-vorbis-19: REF = $(SAMPLES)/vorbis/test-short2_small.pcm
|
||||||
|
|
||||||
|
FATE_VORBIS += fate-vorbis-20
|
||||||
|
fate-vorbis-20: CMD = pcm -i $(SAMPLES)/vorbis/6.ogg
|
||||||
|
fate-vorbis-20: REF = $(SAMPLES)/vorbis/6.pcm
|
||||||
|
|
||||||
FATE_SAMPLES_AVCONV += $(FATE_VORBIS)
|
FATE_SAMPLES_AVCONV += $(FATE_VORBIS)
|
||||||
fate-vorbis: $(FATE_VORBIS)
|
fate-vorbis: $(FATE_VORBIS)
|
||||||
$(FATE_VORBIS): CMP = oneoff
|
$(FATE_VORBIS): CMP = oneoff
|
||||||
|
Loading…
x
Reference in New Issue
Block a user