--- mem_transfer_mmx.asm 2004/04/12 15:49:56 1.11 +++ mem_transfer_mmx.asm 2008/11/11 20:46:24 1.19 @@ -6,7 +6,6 @@ ; * Copyright (C) 2001 Peter Ross ; * 2001 Michael Militzer ; * 2002 Pascal Massimino -; * 2004 Jean-Marc Bastide ; * ; * This program is free software ; you can redistribute it and/or modify ; * it under the terms of the GNU General Public License as published by @@ -22,7 +21,7 @@ ; * along with this program ; if not, write to the Free Software ; * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ; * -; * $Id: mem_transfer_mmx.asm,v 1.11 2004/04/12 15:49:56 edgomez Exp $ +; * $Id: mem_transfer_mmx.asm,v 1.19 2008/11/11 20:46:24 Isibaar Exp $ ; * ; ***************************************************************************/ @@ -30,10 +29,23 @@ %macro cglobal 1 %ifdef PREFIX - global _%1 - %define %1 _%1 + %ifdef MARK_FUNCS + global _%1:function %1.endfunc-%1 + %define %1 _%1:function %1.endfunc-%1 + %define ENDFUNC .endfunc + %else + global _%1 + %define %1 _%1 + %define ENDFUNC + %endif %else - global %1 + %ifdef MARK_FUNCS + global %1:function %1.endfunc-%1 + %define ENDFUNC .endfunc + %else + global %1 + %define ENDFUNC + %endif %endif %endmacro @@ -42,9 +54,9 @@ ;============================================================================= %ifdef FORMAT_COFF -SECTION .rodata data +SECTION .rodata %else -SECTION .rodata data align=16 +SECTION .rodata align=16 %endif ALIGN 16 @@ -63,8 +75,10 @@ cglobal transfer_8to16subro_mmx cglobal transfer_8to16sub2_mmx cglobal transfer_8to16sub2_xmm +cglobal transfer_8to16sub2ro_xmm cglobal transfer_16to8add_mmx cglobal transfer8x8_copy_mmx +cglobal transfer8x4_copy_mmx ;----------------------------------------------------------------------------- ; @@ -103,6 +117,7 @@ COPY_8_TO_16 2 COPY_8_TO_16 3 ret +ENDFUNC ;----------------------------------------------------------------------------- ; @@ -138,6 +153,7 @@ lea ecx,[ecx+2*edx] COPY_16_TO_8 3 ret +ENDFUNC ;----------------------------------------------------------------------------- ; @@ -152,27 +168,32 @@ %macro COPY_8_TO_16_SUB 2 movq mm0, [eax] ; cur movq mm2, [eax+edx] + movq mm1, mm0 + movq mm3, mm2 + punpcklbw mm0, mm7 + punpcklbw mm2, mm7 movq mm4, [ebx] ; ref + punpckhbw mm1, mm7 + punpckhbw mm3, mm7 movq mm5, [ebx+edx] ; ref + movq mm6, mm4 %if %2 == 1 movq [eax], mm4 movq [eax+edx], mm5 %endif - + punpcklbw mm4, mm7 + punpckhbw mm6, mm7 + psubsw mm0, mm4 + psubsw mm1, mm6 + movq mm6, mm5 + punpcklbw mm5, mm7 + punpckhbw mm6, mm7 + psubsw mm2, mm5 lea eax, [eax+2*edx] - - psubsb mm0,mm4 - psubsb mm2,mm5 + psubsw mm3, mm6 lea ebx,[ebx+2*edx] - - movq mm1,mm0 - movq mm3,mm2 - punpcklbw mm0,mm7 - punpckhbw mm1,mm7 - punpcklbw mm2,mm7 - punpckhbw mm3,mm7 movq [ecx+%1*32+ 0], mm0 ; dst movq [ecx+%1*32+ 8], mm1 @@ -196,6 +217,7 @@ pop ebx ret +ENDFUNC ALIGN 16 @@ -214,6 +236,7 @@ pop ebx ret +ENDFUNC ;----------------------------------------------------------------------------- @@ -229,42 +252,64 @@ %macro COPY_8_TO_16_SUB2_MMX 1 movq mm0, [eax] ; cur movq mm2, [eax+edx] - + ; mm4 <- (ref1+ref2+1) / 2 - ;(a+b+1)/2 = (a|b)-((a^b)>>1) movq mm4, [ebx] ; ref1 movq mm1, [esi] ; ref2 - movq mm3, mm4 - pxor mm3,mm1 - por mm4,mm1 - pandn mm3,mm6 - psrlq mm3,1 - psubb mm4,mm3 - movq [eax],mm4 - - ; mm5 <- (ref1+ref2+1) / 2 + movq mm6, mm4 + movq mm3, mm1 + punpcklbw mm4, mm7 + punpcklbw mm1, mm7 + punpckhbw mm6, mm7 + punpckhbw mm3, mm7 + paddusw mm4, mm1 + paddusw mm6, mm3 + paddusw mm4, [mmx_one] + paddusw mm6, [mmx_one] + psrlw mm4, 1 + psrlw mm6, 1 + packuswb mm4, mm6 + movq [eax], mm4 + + ; mm5 <- (ref1+ref2+1) / 2 movq mm5, [ebx+edx] ; ref1 movq mm1, [esi+edx] ; ref2 - movq mm3, mm5 - pxor mm3,mm1 - por mm5,mm1 - pandn mm3,mm6 - psrlq mm3,1 - psubb mm5,mm3 - movq [eax+edx],mm5 - - psubsb mm0,mm4 - psubsb mm2,mm5 + movq mm6, mm5 + movq mm3, mm1 + punpcklbw mm5, mm7 + punpcklbw mm1, mm7 + punpckhbw mm6, mm7 + punpckhbw mm3, mm7 + paddusw mm5, mm1 + paddusw mm6, mm3 + paddusw mm5, [mmx_one] + paddusw mm6, [mmx_one] lea esi, [esi+2*edx] - movq mm1,mm0 - movq mm3,mm2 + psrlw mm5, 1 + psrlw mm6, 1 + packuswb mm5, mm6 + movq [eax+edx], mm5 + + movq mm1, mm0 + movq mm3, mm2 + punpcklbw mm0, mm7 + punpcklbw mm2, mm7 + punpckhbw mm1, mm7 + punpckhbw mm3, mm7 + + movq mm6, mm4 + punpcklbw mm4, mm7 + punpckhbw mm6, mm7 + psubsw mm0, mm4 + psubsw mm1, mm6 + movq mm6, mm5 + punpcklbw mm5, mm7 + punpckhbw mm6, mm7 + psubsw mm2, mm5 lea eax, [eax+2*edx] - punpcklbw mm0,mm7 - punpcklbw mm2,mm7 + psubsw mm3, mm6 lea ebx, [ebx+2*edx] - punpckhbw mm1,mm7 - punpckhbw mm3,mm7 - + movq [ecx+%1*32+ 0], mm0 ; dst movq [ecx+%1*32+ 8], mm1 movq [ecx+%1*32+16], mm2 @@ -280,10 +325,7 @@ push esi mov esi, [esp+8+16] ; Ref2 mov edx, [esp+8+20] ; Stride - pxor mm6,mm6 - pcmpeqb mm5,mm5 pxor mm7, mm7 - psubb mm6, mm5; mm6=1 COPY_8_TO_16_SUB2_MMX 0 COPY_8_TO_16_SUB2_MMX 1 @@ -293,6 +335,7 @@ pop esi pop ebx ret +ENDFUNC ;----------------------------------------------------------------------------- ; @@ -307,28 +350,34 @@ %macro COPY_8_TO_16_SUB2_SSE 1 movq mm0, [eax] ; cur movq mm2, [eax+edx] + movq mm1, mm0 + movq mm3, mm2 + punpcklbw mm0, mm7 + punpcklbw mm2, mm7 movq mm4, [ebx] ; ref1 pavgb mm4, [esi] ; ref2 - + movq [eax], mm4 + punpckhbw mm1, mm7 + punpckhbw mm3, mm7 movq mm5, [ebx+edx] ; ref pavgb mm5, [esi+edx] ; ref2 - - movq [eax], mm4 - movq [eax+edx], mm5 + movq [eax+edx], mm5 - psubsb mm0,mm4 - psubsb mm2,mm5 + movq mm6, mm4 + punpcklbw mm4, mm7 + punpckhbw mm6, mm7 + psubsw mm0, mm4 + psubsw mm1, mm6 lea esi, [esi+2*edx] - movq mm1,mm0 - movq mm3,mm2 + movq mm6, mm5 + punpcklbw mm5, mm7 + punpckhbw mm6, mm7 + psubsw mm2, mm5 lea eax, [eax+2*edx] - punpcklbw mm0,mm7 - punpcklbw mm2,mm7 + psubsw mm3, mm6 lea ebx, [ebx+2*edx] - punpckhbw mm1,mm7 - punpckhbw mm3,mm7 - + movq [ecx+%1*32+ 0], mm0 ; dst movq [ecx+%1*32+ 8], mm1 movq [ecx+%1*32+16], mm2 @@ -354,6 +403,75 @@ pop esi pop ebx ret +ENDFUNC + + +;----------------------------------------------------------------------------- +; +; void transfer_8to16sub2ro_xmm(int16_t * const dct, +; const uint8_t * const cur, +; const uint8_t * ref1, +; const uint8_t * ref2, +; const uint32_t stride) +; +;----------------------------------------------------------------------------- + +%macro COPY_8_TO_16_SUB2RO_SSE 1 + movq mm0, [eax] ; cur + movq mm2, [eax+edx] + movq mm1, mm0 + movq mm3, mm2 + + punpcklbw mm0, mm7 + punpcklbw mm2, mm7 + movq mm4, [ebx] ; ref1 + pavgb mm4, [esi] ; ref2 + punpckhbw mm1, mm7 + punpckhbw mm3, mm7 + movq mm5, [ebx+edx] ; ref + pavgb mm5, [esi+edx] ; ref2 + + movq mm6, mm4 + punpcklbw mm4, mm7 + punpckhbw mm6, mm7 + psubsw mm0, mm4 + psubsw mm1, mm6 + lea esi, [esi+2*edx] + movq mm6, mm5 + punpcklbw mm5, mm7 + punpckhbw mm6, mm7 + psubsw mm2, mm5 + lea eax, [eax+2*edx] + psubsw mm3, mm6 + lea ebx, [ebx+2*edx] + + movq [ecx+%1*32+ 0], mm0 ; dst + movq [ecx+%1*32+ 8], mm1 + movq [ecx+%1*32+16], mm2 + movq [ecx+%1*32+24], mm3 +%endmacro + +ALIGN 16 +transfer_8to16sub2ro_xmm: + pxor mm7, mm7 + mov ecx, [esp + 4] ; Dst + mov eax, [esp + 8] ; Cur + push ebx + mov ebx, [esp+4+12] ; Ref1 + push esi + mov esi, [esp+8+16] ; Ref2 + mov edx, [esp+8+20] ; Stride + + COPY_8_TO_16_SUB2RO_SSE 0 + COPY_8_TO_16_SUB2RO_SSE 1 + COPY_8_TO_16_SUB2RO_SSE 2 + COPY_8_TO_16_SUB2RO_SSE 3 + + pop esi + pop ebx + ret +ENDFUNC + ;----------------------------------------------------------------------------- ; @@ -364,16 +482,22 @@ ;----------------------------------------------------------------------------- %macro COPY_16_TO_8_ADD 1 - movq mm0, [eax+%1*32+ 0] ;src - packuswb mm0,[eax+%1*32+8] - movq mm1, [eax+%1*32+ 16] - packuswb mm1,[eax+%1*32+24] - - paddusb mm0,[ecx] - paddusb mm1,[ecx+edx] - movq [ecx],mm0 - movq [ecx+edx],mm1 - + movq mm0, [ecx] + movq mm2, [ecx+edx] + movq mm1, mm0 + movq mm3, mm2 + punpcklbw mm0, mm7 + punpcklbw mm2, mm7 + punpckhbw mm1, mm7 + punpckhbw mm3, mm7 + paddsw mm0, [eax+%1*32+ 0] + paddsw mm1, [eax+%1*32+ 8] + paddsw mm2, [eax+%1*32+16] + paddsw mm3, [eax+%1*32+24] + packuswb mm0, mm1 + movq [ecx], mm0 + packuswb mm2, mm3 + movq [ecx+edx], mm2 %endmacro @@ -382,7 +506,7 @@ mov ecx, [esp+ 4] ; Dst mov eax, [esp+ 8] ; Src mov edx, [esp+12] ; Stride - ; pxor mm7, mm7 + pxor mm7, mm7 COPY_16_TO_8_ADD 0 lea ecx,[ecx+2*edx] @@ -392,6 +516,7 @@ lea ecx,[ecx+2*edx] COPY_16_TO_8_ADD 3 ret +ENDFUNC ;----------------------------------------------------------------------------- ; @@ -402,43 +527,53 @@ ; ;----------------------------------------------------------------------------- +%macro COPY_8_TO_8 0 + movq mm0, [eax] + movq mm1, [eax+edx] + movq [ecx], mm0 + lea eax, [eax+2*edx] + movq [ecx+edx], mm1 +%endmacro + ALIGN 16 transfer8x8_copy_mmx: - + mov ecx, [esp+ 4] ; Dst mov eax, [esp+ 8] ; Src + mov edx, [esp+12] ; Stride + + COPY_8_TO_8 + lea ecx,[ecx+2*edx] + COPY_8_TO_8 + lea ecx,[ecx+2*edx] + COPY_8_TO_8 + lea ecx,[ecx+2*edx] + COPY_8_TO_8 + ret +ENDFUNC + +;----------------------------------------------------------------------------- +; +; void transfer8x4_copy_mmx(uint8_t * const dst, +; const uint8_t * const src, +; const uint32_t stride); +; +; +;----------------------------------------------------------------------------- + +ALIGN 16 +transfer8x4_copy_mmx: mov ecx, [esp+ 4] ; Dst + mov eax, [esp+ 8] ; Src mov edx, [esp+12] ; Stride - - movq mm0,[eax] - lea eax,[eax+edx] - movq mm1,[eax] - lea eax,[eax+edx] - movq mm2,[eax] - lea eax,[eax+edx] - movq mm3,[eax] - lea eax,[eax+edx] - movq mm4,[eax] - lea eax,[eax+edx] - movq mm5,[eax] - lea eax,[eax+edx] - movq mm6,[eax] - lea eax,[eax+edx] - movq mm7,[eax] - - movq [ecx],mm0 - lea ecx,[ecx+edx] - movq [ecx],mm1 - lea ecx,[ecx+edx] - movq [ecx],mm2 - lea ecx,[ecx+edx] - movq [ecx],mm3 - lea ecx,[ecx+edx] - movq [ecx],mm4 - lea ecx,[ecx+edx] - movq [ecx],mm5 - lea ecx,[ecx+edx] - movq [ecx],mm6 - lea ecx,[ecx+edx] - movq [ecx],mm7 - -ret + + COPY_8_TO_8 + lea ecx,[ecx+2*edx] + COPY_8_TO_8 + ret +ENDFUNC + + +%ifidn __OUTPUT_FORMAT__,elf +section ".note.GNU-stack" noalloc noexec nowrite progbits +%endif +