Modify the asm accordingly. GLOBAL is now no longoer necessary for PIC-compliant loads. Originally committed as revision 23739 to svn://svn.ffmpeg.org/ffmpeg/trunk
		
			
				
	
	
		
			424 lines
		
	
	
		
			9.8 KiB
		
	
	
	
		
			NASM
		
	
	
	
	
	
			
		
		
	
	
			424 lines
		
	
	
		
			9.8 KiB
		
	
	
	
		
			NASM
		
	
	
	
	
	
| ;******************************************************************************
 | |
| ;* MMX optimized DSP utils
 | |
| ;* Copyright (c) 2008 Loren Merritt
 | |
| ;*
 | |
| ;* This file is part of FFmpeg.
 | |
| ;*
 | |
| ;* FFmpeg is free software; you can redistribute it and/or
 | |
| ;* modify it under the terms of the GNU Lesser General Public
 | |
| ;* License as published by the Free Software Foundation; either
 | |
| ;* version 2.1 of the License, or (at your option) any later version.
 | |
| ;*
 | |
| ;* FFmpeg is distributed in the hope that it will be useful,
 | |
| ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
 | |
| ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 | |
| ;* Lesser General Public License for more details.
 | |
| ;*
 | |
| ;* You should have received a copy of the GNU Lesser General Public
 | |
| ;* License along with FFmpeg; if not, write to the Free Software
 | |
| ;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 | |
| ;******************************************************************************
 | |
| 
 | |
| %include "x86inc.asm"
 | |
| 
 | |
| SECTION_RODATA
 | |
| pb_f: times 16 db 15
 | |
| pb_zzzzzzzz77777777: times 8 db -1
 | |
| pb_7: times 8 db 7
 | |
| pb_zzzz3333zzzzbbbb: db -1,-1,-1,-1,3,3,3,3,-1,-1,-1,-1,11,11,11,11
 | |
| pb_zz11zz55zz99zzdd: db -1,-1,1,1,-1,-1,5,5,-1,-1,9,9,-1,-1,13,13
 | |
| 
 | |
| section .text align=16
 | |
| 
 | |
| %macro PSWAPD_SSE 2
 | |
|     pshufw %1, %2, 0x4e
 | |
| %endmacro
 | |
| %macro PSWAPD_3DN1 2
 | |
|     movq  %1, %2
 | |
|     psrlq %1, 32
 | |
|     punpckldq %1, %2
 | |
| %endmacro
 | |
| 
 | |
| %macro FLOAT_TO_INT16_INTERLEAVE6 1
 | |
| ; void float_to_int16_interleave6_sse(int16_t *dst, const float **src, int len)
 | |
| cglobal float_to_int16_interleave6_%1, 2,7,0, dst, src, src1, src2, src3, src4, src5
 | |
| %ifdef ARCH_X86_64
 | |
|     %define lend r10d
 | |
|     mov     lend, r2d
 | |
| %else
 | |
|     %define lend dword r2m
 | |
| %endif
 | |
|     mov src1q, [srcq+1*gprsize]
 | |
|     mov src2q, [srcq+2*gprsize]
 | |
|     mov src3q, [srcq+3*gprsize]
 | |
|     mov src4q, [srcq+4*gprsize]
 | |
|     mov src5q, [srcq+5*gprsize]
 | |
|     mov srcq,  [srcq]
 | |
|     sub src1q, srcq
 | |
|     sub src2q, srcq
 | |
|     sub src3q, srcq
 | |
|     sub src4q, srcq
 | |
|     sub src5q, srcq
 | |
| .loop:
 | |
|     cvtps2pi   mm0, [srcq]
 | |
|     cvtps2pi   mm1, [srcq+src1q]
 | |
|     cvtps2pi   mm2, [srcq+src2q]
 | |
|     cvtps2pi   mm3, [srcq+src3q]
 | |
|     cvtps2pi   mm4, [srcq+src4q]
 | |
|     cvtps2pi   mm5, [srcq+src5q]
 | |
|     packssdw   mm0, mm3
 | |
|     packssdw   mm1, mm4
 | |
|     packssdw   mm2, mm5
 | |
|     pswapd     mm3, mm0
 | |
|     punpcklwd  mm0, mm1
 | |
|     punpckhwd  mm1, mm2
 | |
|     punpcklwd  mm2, mm3
 | |
|     pswapd     mm3, mm0
 | |
|     punpckldq  mm0, mm2
 | |
|     punpckhdq  mm2, mm1
 | |
|     punpckldq  mm1, mm3
 | |
|     movq [dstq   ], mm0
 | |
|     movq [dstq+16], mm2
 | |
|     movq [dstq+ 8], mm1
 | |
|     add srcq, 8
 | |
|     add dstq, 24
 | |
|     sub lend, 2
 | |
|     jg .loop
 | |
|     emms
 | |
|     RET
 | |
| %endmacro ; FLOAT_TO_INT16_INTERLEAVE6
 | |
| 
 | |
| %define pswapd PSWAPD_SSE
 | |
| FLOAT_TO_INT16_INTERLEAVE6 sse
 | |
| %define cvtps2pi pf2id
 | |
| %define pswapd PSWAPD_3DN1
 | |
| FLOAT_TO_INT16_INTERLEAVE6 3dnow
 | |
| %undef pswapd
 | |
| FLOAT_TO_INT16_INTERLEAVE6 3dn2
 | |
| %undef cvtps2pi
 | |
| 
 | |
| 
 | |
| 
 | |
| %macro SCALARPRODUCT 1
 | |
| ; int scalarproduct_int16(int16_t *v1, int16_t *v2, int order, int shift)
 | |
| cglobal scalarproduct_int16_%1, 3,3,4, v1, v2, order, shift
 | |
|     shl orderq, 1
 | |
|     add v1q, orderq
 | |
|     add v2q, orderq
 | |
|     neg orderq
 | |
|     movd    m3, shiftm
 | |
|     pxor    m2, m2
 | |
| .loop:
 | |
|     movu    m0, [v1q + orderq]
 | |
|     movu    m1, [v1q + orderq + mmsize]
 | |
|     pmaddwd m0, [v2q + orderq]
 | |
|     pmaddwd m1, [v2q + orderq + mmsize]
 | |
|     paddd   m2, m0
 | |
|     paddd   m2, m1
 | |
|     add     orderq, mmsize*2
 | |
|     jl .loop
 | |
| %if mmsize == 16
 | |
|     movhlps m0, m2
 | |
|     paddd   m2, m0
 | |
|     psrad   m2, m3
 | |
|     pshuflw m0, m2, 0x4e
 | |
| %else
 | |
|     psrad   m2, m3
 | |
|     pshufw  m0, m2, 0x4e
 | |
| %endif
 | |
|     paddd   m2, m0
 | |
|     movd   eax, m2
 | |
|     RET
 | |
| 
 | |
| ; int scalarproduct_and_madd_int16(int16_t *v1, int16_t *v2, int16_t *v3, int order, int mul)
 | |
| cglobal scalarproduct_and_madd_int16_%1, 4,4,8, v1, v2, v3, order, mul
 | |
|     shl orderq, 1
 | |
|     movd    m7, mulm
 | |
| %if mmsize == 16
 | |
|     pshuflw m7, m7, 0
 | |
|     punpcklqdq m7, m7
 | |
| %else
 | |
|     pshufw  m7, m7, 0
 | |
| %endif
 | |
|     pxor    m6, m6
 | |
|     add v1q, orderq
 | |
|     add v2q, orderq
 | |
|     add v3q, orderq
 | |
|     neg orderq
 | |
| .loop:
 | |
|     movu    m0, [v2q + orderq]
 | |
|     movu    m1, [v2q + orderq + mmsize]
 | |
|     mova    m4, [v1q + orderq]
 | |
|     mova    m5, [v1q + orderq + mmsize]
 | |
|     movu    m2, [v3q + orderq]
 | |
|     movu    m3, [v3q + orderq + mmsize]
 | |
|     pmaddwd m0, m4
 | |
|     pmaddwd m1, m5
 | |
|     pmullw  m2, m7
 | |
|     pmullw  m3, m7
 | |
|     paddd   m6, m0
 | |
|     paddd   m6, m1
 | |
|     paddw   m2, m4
 | |
|     paddw   m3, m5
 | |
|     mova    [v1q + orderq], m2
 | |
|     mova    [v1q + orderq + mmsize], m3
 | |
|     add     orderq, mmsize*2
 | |
|     jl .loop
 | |
| %if mmsize == 16
 | |
|     movhlps m0, m6
 | |
|     paddd   m6, m0
 | |
|     pshuflw m0, m6, 0x4e
 | |
| %else
 | |
|     pshufw  m0, m6, 0x4e
 | |
| %endif
 | |
|     paddd   m6, m0
 | |
|     movd   eax, m6
 | |
|     RET
 | |
| %endmacro
 | |
| 
 | |
| INIT_MMX
 | |
| SCALARPRODUCT mmx2
 | |
| INIT_XMM
 | |
| SCALARPRODUCT sse2
 | |
| 
 | |
| %macro SCALARPRODUCT_LOOP 1
 | |
| align 16
 | |
| .loop%1:
 | |
|     sub     orderq, mmsize*2
 | |
| %if %1
 | |
|     mova    m1, m4
 | |
|     mova    m4, [v2q + orderq]
 | |
|     mova    m0, [v2q + orderq + mmsize]
 | |
|     palignr m1, m0, %1
 | |
|     palignr m0, m4, %1
 | |
|     mova    m3, m5
 | |
|     mova    m5, [v3q + orderq]
 | |
|     mova    m2, [v3q + orderq + mmsize]
 | |
|     palignr m3, m2, %1
 | |
|     palignr m2, m5, %1
 | |
| %else
 | |
|     mova    m0, [v2q + orderq]
 | |
|     mova    m1, [v2q + orderq + mmsize]
 | |
|     mova    m2, [v3q + orderq]
 | |
|     mova    m3, [v3q + orderq + mmsize]
 | |
| %endif
 | |
|     %define t0  [v1q + orderq]
 | |
|     %define t1  [v1q + orderq + mmsize]
 | |
| %ifdef ARCH_X86_64
 | |
|     mova    m8, t0
 | |
|     mova    m9, t1
 | |
|     %define t0  m8
 | |
|     %define t1  m9
 | |
| %endif
 | |
|     pmaddwd m0, t0
 | |
|     pmaddwd m1, t1
 | |
|     pmullw  m2, m7
 | |
|     pmullw  m3, m7
 | |
|     paddw   m2, t0
 | |
|     paddw   m3, t1
 | |
|     paddd   m6, m0
 | |
|     paddd   m6, m1
 | |
|     mova    [v1q + orderq], m2
 | |
|     mova    [v1q + orderq + mmsize], m3
 | |
|     jg .loop%1
 | |
| %if %1
 | |
|     jmp .end
 | |
| %endif
 | |
| %endmacro
 | |
| 
 | |
| ; int scalarproduct_and_madd_int16(int16_t *v1, int16_t *v2, int16_t *v3, int order, int mul)
 | |
| cglobal scalarproduct_and_madd_int16_ssse3, 4,5,10, v1, v2, v3, order, mul
 | |
|     shl orderq, 1
 | |
|     movd    m7, mulm
 | |
|     pshuflw m7, m7, 0
 | |
|     punpcklqdq m7, m7
 | |
|     pxor    m6, m6
 | |
|     mov    r4d, v2d
 | |
|     and    r4d, 15
 | |
|     and    v2q, ~15
 | |
|     and    v3q, ~15
 | |
|     mova    m4, [v2q + orderq]
 | |
|     mova    m5, [v3q + orderq]
 | |
|     ; linear is faster than branch tree or jump table, because the branches taken are cyclic (i.e. predictable)
 | |
|     cmp    r4d, 0
 | |
|     je .loop0
 | |
|     cmp    r4d, 2
 | |
|     je .loop2
 | |
|     cmp    r4d, 4
 | |
|     je .loop4
 | |
|     cmp    r4d, 6
 | |
|     je .loop6
 | |
|     cmp    r4d, 8
 | |
|     je .loop8
 | |
|     cmp    r4d, 10
 | |
|     je .loop10
 | |
|     cmp    r4d, 12
 | |
|     je .loop12
 | |
| SCALARPRODUCT_LOOP 14
 | |
| SCALARPRODUCT_LOOP 12
 | |
| SCALARPRODUCT_LOOP 10
 | |
| SCALARPRODUCT_LOOP 8
 | |
| SCALARPRODUCT_LOOP 6
 | |
| SCALARPRODUCT_LOOP 4
 | |
| SCALARPRODUCT_LOOP 2
 | |
| SCALARPRODUCT_LOOP 0
 | |
| .end:
 | |
|     movhlps m0, m6
 | |
|     paddd   m6, m0
 | |
|     pshuflw m0, m6, 0x4e
 | |
|     paddd   m6, m0
 | |
|     movd   eax, m6
 | |
|     RET
 | |
| 
 | |
| 
 | |
| 
 | |
| ; void add_hfyu_median_prediction_mmx2(uint8_t *dst, const uint8_t *top, const uint8_t *diff, int w, int *left, int *left_top)
 | |
| cglobal add_hfyu_median_prediction_mmx2, 6,6,0, dst, top, diff, w, left, left_top
 | |
|     movq    mm0, [topq]
 | |
|     movq    mm2, mm0
 | |
|     movd    mm4, [left_topq]
 | |
|     psllq   mm2, 8
 | |
|     movq    mm1, mm0
 | |
|     por     mm4, mm2
 | |
|     movd    mm3, [leftq]
 | |
|     psubb   mm0, mm4 ; t-tl
 | |
|     add    dstq, wq
 | |
|     add    topq, wq
 | |
|     add   diffq, wq
 | |
|     neg      wq
 | |
|     jmp .skip
 | |
| .loop:
 | |
|     movq    mm4, [topq+wq]
 | |
|     movq    mm0, mm4
 | |
|     psllq   mm4, 8
 | |
|     por     mm4, mm1
 | |
|     movq    mm1, mm0 ; t
 | |
|     psubb   mm0, mm4 ; t-tl
 | |
| .skip:
 | |
|     movq    mm2, [diffq+wq]
 | |
| %assign i 0
 | |
| %rep 8
 | |
|     movq    mm4, mm0
 | |
|     paddb   mm4, mm3 ; t-tl+l
 | |
|     movq    mm5, mm3
 | |
|     pmaxub  mm3, mm1
 | |
|     pminub  mm5, mm1
 | |
|     pminub  mm3, mm4
 | |
|     pmaxub  mm3, mm5 ; median
 | |
|     paddb   mm3, mm2 ; +residual
 | |
| %if i==0
 | |
|     movq    mm7, mm3
 | |
|     psllq   mm7, 56
 | |
| %else
 | |
|     movq    mm6, mm3
 | |
|     psrlq   mm7, 8
 | |
|     psllq   mm6, 56
 | |
|     por     mm7, mm6
 | |
| %endif
 | |
| %if i<7
 | |
|     psrlq   mm0, 8
 | |
|     psrlq   mm1, 8
 | |
|     psrlq   mm2, 8
 | |
| %endif
 | |
| %assign i i+1
 | |
| %endrep
 | |
|     movq [dstq+wq], mm7
 | |
|     add      wq, 8
 | |
|     jl .loop
 | |
|     movzx   r2d, byte [dstq-1]
 | |
|     mov [leftq], r2d
 | |
|     movzx   r2d, byte [topq-1]
 | |
|     mov [left_topq], r2d
 | |
|     RET
 | |
| 
 | |
| 
 | |
| %macro ADD_HFYU_LEFT_LOOP 1 ; %1 = is_aligned
 | |
|     add     srcq, wq
 | |
|     add     dstq, wq
 | |
|     neg     wq
 | |
| %%.loop:
 | |
|     mova    m1, [srcq+wq]
 | |
|     mova    m2, m1
 | |
|     psllw   m1, 8
 | |
|     paddb   m1, m2
 | |
|     mova    m2, m1
 | |
|     pshufb  m1, m3
 | |
|     paddb   m1, m2
 | |
|     pshufb  m0, m5
 | |
|     mova    m2, m1
 | |
|     pshufb  m1, m4
 | |
|     paddb   m1, m2
 | |
| %if mmsize == 16
 | |
|     mova    m2, m1
 | |
|     pshufb  m1, m6
 | |
|     paddb   m1, m2
 | |
| %endif
 | |
|     paddb   m0, m1
 | |
| %if %1
 | |
|     mova    [dstq+wq], m0
 | |
| %else
 | |
|     movq    [dstq+wq], m0
 | |
|     movhps  [dstq+wq+8], m0
 | |
| %endif
 | |
|     add     wq, mmsize
 | |
|     jl %%.loop
 | |
|     mov     eax, mmsize-1
 | |
|     sub     eax, wd
 | |
|     movd    m1, eax
 | |
|     pshufb  m0, m1
 | |
|     movd    eax, m0
 | |
|     RET
 | |
| %endmacro
 | |
| 
 | |
| ; int add_hfyu_left_prediction(uint8_t *dst, const uint8_t *src, int w, int left)
 | |
| INIT_MMX
 | |
| cglobal add_hfyu_left_prediction_ssse3, 3,3,7, dst, src, w, left
 | |
| .skip_prologue:
 | |
|     mova    m5, [pb_7]
 | |
|     mova    m4, [pb_zzzz3333zzzzbbbb]
 | |
|     mova    m3, [pb_zz11zz55zz99zzdd]
 | |
|     movd    m0, leftm
 | |
|     psllq   m0, 56
 | |
|     ADD_HFYU_LEFT_LOOP 1
 | |
| 
 | |
| INIT_XMM
 | |
| cglobal add_hfyu_left_prediction_sse4, 3,3,7, dst, src, w, left
 | |
|     mova    m5, [pb_f]
 | |
|     mova    m6, [pb_zzzzzzzz77777777]
 | |
|     mova    m4, [pb_zzzz3333zzzzbbbb]
 | |
|     mova    m3, [pb_zz11zz55zz99zzdd]
 | |
|     movd    m0, leftm
 | |
|     pslldq  m0, 15
 | |
|     test    srcq, 15
 | |
|     jnz add_hfyu_left_prediction_ssse3.skip_prologue
 | |
|     test    dstq, 15
 | |
|     jnz .unaligned
 | |
|     ADD_HFYU_LEFT_LOOP 1
 | |
| .unaligned:
 | |
|     ADD_HFYU_LEFT_LOOP 0
 | |
| 
 | |
| 
 | |
| ; float scalarproduct_float_sse(const float *v1, const float *v2, int len)
 | |
| cglobal scalarproduct_float_sse, 3,3,2, v1, v2, offset
 | |
|     neg offsetq
 | |
|     shl offsetq, 2
 | |
|     sub v1q, offsetq
 | |
|     sub v2q, offsetq
 | |
|     xorps xmm0, xmm0
 | |
|     .loop:
 | |
|         movaps   xmm1, [v1q+offsetq]
 | |
|         mulps    xmm1, [v2q+offsetq]
 | |
|         addps    xmm0, xmm1
 | |
|         add      offsetq, 16
 | |
|         js       .loop
 | |
|     movhlps xmm1, xmm0
 | |
|     addps   xmm0, xmm1
 | |
|     movss   xmm1, xmm0
 | |
|     shufps  xmm0, xmm0, 1
 | |
|     addss   xmm0, xmm1
 | |
| %ifndef ARCH_X86_64
 | |
|     movd    r0m,  xmm0
 | |
|     fld     dword r0m
 | |
| %endif
 | |
|     RET
 |