--- interpolate8x8_xmm.asm 2002/11/17 00:20:30 1.3 +++ interpolate8x8_xmm.asm 2004/07/24 11:38:12 1.5.2.1 @@ -1,80 +1,53 @@ ;/***************************************************************************** ; * ; * XVID MPEG-4 VIDEO CODEC -; * xmm 8x8 block-based halfpel interpolation +; * - mmx 8x8 block-based halfpel interpolation - ; * -; * Copyright(C) 2002 Michael Militzer -; * Copyright(C) 2002 -Skal- +; * Copyright(C) 2002 Michael Militzer +; * 2002 Pascal Massimino ; * -; * This file is part of XviD, a free MPEG-4 video encoder/decoder -; * -; * XviD is free software; you can redistribute it and/or modify it -; * under the terms of the GNU General Public License as published by -; * the Free Software Foundation; either version 2 of the License, or +; * This program is free software ; you can redistribute it and/or modify +; * it under the terms of the GNU General Public License as published by +; * the Free Software Foundation ; either version 2 of the License, or ; * (at your option) any later version. ; * ; * This program is distributed in the hope that it will be useful, -; * but WITHOUT ANY WARRANTY; without even the implied warranty of +; * but WITHOUT ANY WARRANTY ; without even the implied warranty of ; * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ; * GNU General Public License for more details. ; * ; * You should have received a copy of the GNU General Public License -; * along with this program; if not, write to the Free Software +; * along with this program ; if not, write to the Free Software ; * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ; * -; * Under section 8 of the GNU General Public License, the copyright -; * holders of XVID explicitly forbid distribution in the following -; * countries: -; * -; * - Japan -; * - United States of America -; * -; * Linking XviD statically or dynamically with other modules is making a -; * combined work based on XviD. Thus, the terms and conditions of the -; * GNU General Public License cover the whole combination. -; * -; * As a special exception, the copyright holders of XviD give you -; * permission to link XviD with independent modules that communicate with -; * XviD solely through the VFW1.1 and DShow interfaces, regardless of the -; * license terms of these independent modules, and to copy and distribute -; * the resulting combined work under terms of your choice, provided that -; * every copy of the combined work is accompanied by a complete copy of -; * the source code of XviD (the version of XviD used to produce the -; * combined work), being distributed under the terms of the GNU General -; * Public License plus this exception. An independent module is a module -; * which is not derived from or based on XviD. -; * -; * Note that people who make modified versions of XviD are not obligated -; * to grant this special exception for their modified versions; it is -; * their choice whether to do so. The GNU General Public License gives -; * permission to release a modified version without this exception; this -; * exception also makes it possible to release a modified version which -; * carries forward this exception. -; * -; * $Id: interpolate8x8_xmm.asm,v 1.3 2002/11/17 00:20:30 edgomez Exp $ -; * ; ****************************************************************************/ -bits 32 +BITS 32 -%macro cglobal 1 +%macro cglobal 1 %ifdef PREFIX - global _%1 + global _%1 %define %1 _%1 %else global %1 %endif %endmacro -section .data - - -align 16 +;============================================================================= +; Read only data +;============================================================================= + +%ifdef FORMAT_COFF +SECTION .rodata +%else +SECTION .rodata align=16 +%endif + +ALIGN 16 +mmx_one: + times 8 db 1 -mmx_one -times 8 db 1 - -section .text +SECTION .text cglobal interpolate8x8_halfpel_h_xmm cglobal interpolate8x8_halfpel_v_xmm @@ -104,29 +77,29 @@ movq mm1, [eax+edx] movq mm4, mm0 movq mm5, mm1 - movq mm2, [eax+1] + movq mm2, [eax+1] movq mm3, [eax+edx+1] pavgb mm0, mm2 pxor mm2, mm4 pavgb mm1, mm3 - lea eax,[eax+2*edx] + lea eax, [eax+2*edx] pxor mm3, mm5 pand mm2, mm7 pand mm3, mm7 psubb mm0, mm2 movq [ecx], mm0 psubb mm1, mm3 - movq [ecx+edx], mm1 + movq [ecx+edx], mm1 %endmacro -align 16 +ALIGN 16 interpolate8x8_halfpel_h_xmm: - mov eax, [esp+16]; rounding - mov ecx, [esp+ 4] ; Dst + mov eax, [esp+16] ; rounding + mov ecx, [esp+ 4] ; Dst test eax,eax - mov eax, [esp+ 8] ; Src - mov edx, [esp+12] ; stride + mov eax, [esp+ 8] ; Src + mov edx, [esp+12] ; stride jnz near .rounding1 @@ -140,7 +113,7 @@ ret .rounding1 - ; we use: (i+j)/2 = ( i+j+1 )/2 - (i^j)&1 + ; we use: (i+j)/2 = ( i+j+1 )/2 - (i^j)&1 movq mm7, [mmx_one] COPY_H_SSE_RND1 lea ecx, [ecx+2*edx] @@ -154,19 +127,19 @@ ;=========================================================================== ; ; void interpolate8x8_halfpel_v_xmm(uint8_t * const dst, -; const uint8_t * const src, -; const uint32_t stride, -; const uint32_t rounding); +; const uint8_t * const src, +; const uint32_t stride, +; const uint32_t rounding); ; ;=========================================================================== %macro COPY_V_SSE_RND0 0 - movq mm0, [eax] - movq mm1, [eax+edx] + movq mm0, [eax] + movq mm1, [eax+edx] pavgb mm0, mm1 pavgb mm1, [eax+2*edx] - lea eax,[eax+2*edx] - movq [ecx],mm0 + lea eax, [eax+2*edx] + movq [ecx], mm0 movq [ecx+edx],mm1 %endmacro @@ -178,28 +151,27 @@ movq mm4, mm0 movq mm5, mm1 pavgb mm0, mm1 - pxor mm4, mm1 + pxor mm4, mm1 pavgb mm1, mm2 pxor mm5, mm2 - pand mm4, mm7 ; lsb's of (i^j)... - pand mm5, mm7 ; lsb's of (i^j)... - psubb mm0, mm4 ; ...are substracted from result of pavgb + pand mm4, mm7 ; lsb's of (i^j)... + pand mm5, mm7 ; lsb's of (i^j)... + psubb mm0, mm4 ; ...are substracted from result of pavgb movq [ecx], mm0 - psubb mm1, mm5 ; ...are substracted from result of pavgb + psubb mm1, mm5 ; ...are substracted from result of pavgb movq [ecx+edx], mm1 %endmacro -align 16 +ALIGN 16 interpolate8x8_halfpel_v_xmm: mov eax, [esp+16]; rounding - mov ecx, [esp+ 4] ; Dst + mov ecx, [esp+ 4] ; Dst test eax,eax - mov eax, [esp+ 8] ; Src - mov edx, [esp+12] ; stride - - ; we process 2 line at a time + mov eax, [esp+ 8] ; Src + mov edx, [esp+12] ; stride + ; we process 2 line at a time jnz near .rounding1 COPY_V_SSE_RND0 @@ -212,9 +184,9 @@ ret .rounding1 - ; we use: (i+j)/2 = ( i+j+1 )/2 - (i^j)&1 + ; we use: (i+j)/2 = ( i+j+1 )/2 - (i^j)&1 movq mm7, [mmx_one] - movq mm2, [eax] ; loop invariant + movq mm2, [eax] ; loop invariant add eax, edx COPY_V_SSE_RND1 @@ -229,9 +201,9 @@ ;=========================================================================== ; ; void interpolate8x8_halfpel_hv_xmm(uint8_t * const dst, -; const uint8_t * const src, -; const uint32_t stride, -; const uint32_t rounding); +; const uint8_t * const src, +; const uint32_t stride, +; const uint32_t rounding); ; ; ;=========================================================================== @@ -239,7 +211,7 @@ ; The trick is to correct the result of 'pavgb' with some combination of the ; lsb's of the 4 input values i,j,k,l, and their intermediate 'pavgb' (s and t). ; The boolean relations are: -; (i+j+k+l+3)/4 = (s+t+1)/2 - (ij&kl)&st +; (i+j+k+l+3)/4 = (s+t+1)/2 - (ij&kl)&st ; (i+j+k+l+2)/4 = (s+t+1)/2 - (ij|kl)&st ; (i+j+k+l+1)/4 = (s+t+1)/2 - (ij&kl)|st ; (i+j+k+l+0)/4 = (s+t+1)/2 - (ij|kl)|st @@ -248,91 +220,91 @@ ; Moreover, we process 2 lines at a times, for better overlapping (~15% faster). %macro COPY_HV_SSE_RND0 0 - lea eax,[eax+edx] + lea eax, [eax+edx] + + movq mm0, [eax] + movq mm1, [eax+1] + + movq mm6, mm0 + pavgb mm0, mm1 ; mm0=(j+k+1)/2. preserved for next step + lea eax, [eax+edx] + pxor mm1, mm6 ; mm1=(j^k). preserved for next step + + por mm3, mm1 ; ij |= jk + movq mm6, mm2 + pxor mm6, mm0 ; mm6 = s^t + pand mm3, mm6 ; (ij|jk) &= st + pavgb mm2, mm0 ; mm2 = (s+t+1)/2 + pand mm3, mm7 ; mask lsb + psubb mm2, mm3 ; apply. - movq mm0, [eax] - movq mm1, [eax+1] + movq [ecx], mm2 - movq mm6, mm0 - pavgb mm0, mm1 ; mm0=(j+k+1)/2. preserved for next step - lea eax,[eax+edx] - pxor mm1, mm6 ; mm1=(j^k). preserved for next step - - por mm3, mm1 ; ij |= jk - movq mm6, mm2 - pxor mm6, mm0 ; mm6 = s^t - pand mm3, mm6 ; (ij|jk) &= st - pavgb mm2, mm0 ; mm2 = (s+t+1)/2 - pand mm3, mm7 ; mask lsb - psubb mm2, mm3 ; apply. - - movq [ecx], mm2 - - movq mm2, [eax] - movq mm3, [eax+1] - movq mm6, mm2 - pavgb mm2, mm3 ; preserved for next iteration - lea ecx,[ecx+edx] - pxor mm3, mm6 ; preserved for next iteration - - por mm1, mm3 - movq mm6, mm0 - pxor mm6, mm2 - pand mm1, mm6 - pavgb mm0, mm2 + movq mm2, [eax] + movq mm3, [eax+1] + movq mm6, mm2 + pavgb mm2, mm3 ; preserved for next iteration + lea ecx,[ecx+edx] + pxor mm3, mm6 ; preserved for next iteration + + por mm1, mm3 + movq mm6, mm0 + pxor mm6, mm2 + pand mm1, mm6 + pavgb mm0, mm2 - pand mm1, mm7 - psubb mm0, mm1 + pand mm1, mm7 + psubb mm0, mm1 - movq [ecx], mm0 + movq [ecx], mm0 %endmacro %macro COPY_HV_SSE_RND1 0 - lea eax,[eax+edx] + lea eax, [eax+edx] + + movq mm0, [eax] + movq mm1, [eax+1] + + movq mm6, mm0 + pavgb mm0, mm1 ; mm0=(j+k+1)/2. preserved for next step + lea eax, [eax+edx] + pxor mm1, mm6 ; mm1=(j^k). preserved for next step + + pand mm3, mm1 + movq mm6, mm2 + pxor mm6, mm0 + por mm3, mm6 + pavgb mm2, mm0 + pand mm3, mm7 + psubb mm2, mm3 - movq mm0, [eax] - movq mm1, [eax+1] + movq [ecx], mm2 - movq mm6, mm0 - pavgb mm0, mm1 ; mm0=(j+k+1)/2. preserved for next step - lea eax,[eax+edx] - pxor mm1, mm6 ; mm1=(j^k). preserved for next step - - pand mm3, mm1 - movq mm6, mm2 - pxor mm6, mm0 - por mm3, mm6 - pavgb mm2, mm0 - pand mm3, mm7 - psubb mm2, mm3 - - movq [ecx], mm2 - - movq mm2, [eax] - movq mm3, [eax+1] - movq mm6, mm2 - pavgb mm2, mm3 ; preserved for next iteration - lea ecx,[ecx+edx] - pxor mm3, mm6 ; preserved for next iteration - - pand mm1, mm3 - movq mm6, mm0 - pxor mm6, mm2 - por mm1, mm6 - pavgb mm0, mm2 - pand mm1, mm7 - psubb mm0, mm1 + movq mm2, [eax] + movq mm3, [eax+1] + movq mm6, mm2 + pavgb mm2, mm3 ; preserved for next iteration + lea ecx,[ecx+edx] + pxor mm3, mm6 ; preserved for next iteration + + pand mm1, mm3 + movq mm6, mm0 + pxor mm6, mm2 + por mm1, mm6 + pavgb mm0, mm2 + pand mm1, mm7 + psubb mm0, mm1 - movq [ecx], mm0 + movq [ecx], mm0 %endmacro -align 16 +ALIGN 16 interpolate8x8_halfpel_hv_xmm: - mov eax, [esp+16] ; rounding - mov ecx, [esp+ 4] ; Dst - test eax,eax - mov eax, [esp+ 8] ; Src - mov edx, [esp+12] ; stride + mov eax, [esp+16] ; rounding + mov ecx, [esp+ 4] ; Dst + test eax, eax + mov eax, [esp+ 8] ; Src + mov edx, [esp+12] ; stride movq mm7, [mmx_one] @@ -341,7 +313,7 @@ movq mm3, [eax+1] movq mm6, mm2 pavgb mm2, mm3 - pxor mm3, mm6 ; mm2/mm3 ready + pxor mm3, mm6 ; mm2/mm3 ready jnz near .rounding1 @@ -362,4 +334,4 @@ COPY_HV_SSE_RND1 add ecx, edx COPY_HV_SSE_RND1 - ret + ret