--- quantize_mpeg_mmx.asm 2003/10/07 13:02:35 1.1.2.1 +++ quantize_mpeg_mmx.asm 2004/08/22 11:46:10 1.4 @@ -1,681 +1,511 @@ -;/****************************************************************************** -; * * -; * This file is part of XviD, a free MPEG-4 video encoder/decoder * -; * * -; * XviD is an implementation of a part of one or more MPEG-4 Video tools * -; * as specified in ISO/IEC 14496-2 standard. Those intending to use this * -; * software module in hardware or software products are advised that its * -; * use may infringe existing patents or copyrights, and any such use * -; * would be at such party's own risk. The original developer of this * -; * software module and his/her company, and subsequent editors and their * -; * companies, will have no liability for use of this software or * -; * modifications or derivatives thereof. * -; * * -; * XviD is free software; you can redistribute it and/or modify it * -; * under the terms of the GNU General Public License as published by * -; * the Free Software Foundation; either version 2 of the License, or * -; * (at your option) any later version. * -; * * -; * XviD is distributed in the hope that it will be useful, but * -; * WITHOUT ANY WARRANTY; without even the implied warranty of * -; * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * -; * GNU General Public License for more details. * -; * * -; * You should have received a copy of the GNU General Public License * -; * along with this program; if not, write to the Free Software * -; * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * -; * * -; ******************************************************************************/ -; -;/****************************************************************************** -; * * -; * quantize4.asm, MMX optimized MPEG quantization/dequantization * -; * * -; * Copyright (C) 2002 - Peter Ross * -; * Copyright (C) 2002 - Michael Militzer * -; * * -; * For more information visit the XviD homepage: http://www.xvid.org * -; * * -; ******************************************************************************/ -; -;/****************************************************************************** -; * * -; * Revision history: * -; * * -; * 14.06.2002 mmx dequant4_* funcs revamped -Skal- * -; * 22.01.2002 initial version * -; * * -; ******************************************************************************/ - -; data/text alignment -%define ALIGN 8 +;/************************************************************************** +; * +; * XVID MPEG-4 VIDEO CODEC +; * - 3dne Quantization/Dequantization - +; * +; * Copyright (C) 2002-2003 Peter Ross +; * 2002-2003 Michael Militzer +; * 2002-2003 Pascal Massimino +; * +; * This program is free software ; you can redistribute it and/or modify +; * it under the terms of the GNU General Public License as published by +; * the Free Software Foundation ; either version 2 of the License, or +; * (at your option) any later version. +; * +; * This program is distributed in the hope that it will be useful, +; * but WITHOUT ANY WARRANTY ; without even the implied warranty of +; * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +; * GNU General Public License for more details. +; * +; * You should have received a copy of the GNU General Public License +; * along with this program ; if not, write to the Free Software +; * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +; * +; * $Id: quantize_mpeg_mmx.asm,v 1.4 2004/08/22 11:46:10 edgomez Exp $ +; * +; *************************************************************************/ %define SATURATE -bits 32 +BITS 32 -%ifdef FORMAT_COFF -SECTION .data data -%else -SECTION .data data align=8 -%endif - - -%macro cglobal 1 +%macro cglobal 1 %ifdef PREFIX - global _%1 - %define %1 _%1 + %ifdef MARK_FUNCS + global _%1:function + %define %1 _%1:function + %else + global _%1 + %define %1 _%1 + %endif %else - global %1 + %ifdef MARK_FUNCS + global %1:function + %else + global %1 + %endif %endif %endmacro -%macro cextern 1 +%macro cextern 1 %ifdef PREFIX - extern _%1 + extern _%1 %define %1 _%1 %else extern %1 %endif %endmacro -mmx_one times 4 dw 1 +;============================================================================= +; Local data (Read Only) +;============================================================================= -;=========================================================================== -; -; divide by 2Q table -; -;=========================================================================== - -%macro MMX_DIV 1 -times 4 dw (1 << 17) / (%1 * 2) + 1 -%endmacro +%ifdef FORMAT_COFF +SECTION .rodata +%else +SECTION .rodata align=16 +%endif -align ALIGN -mmx_div - MMX_DIV 1 - MMX_DIV 2 - MMX_DIV 3 - MMX_DIV 4 - MMX_DIV 5 - MMX_DIV 6 - MMX_DIV 7 - MMX_DIV 8 - MMX_DIV 9 - MMX_DIV 10 - MMX_DIV 11 - MMX_DIV 12 - MMX_DIV 13 - MMX_DIV 14 - MMX_DIV 15 - MMX_DIV 16 - MMX_DIV 17 - MMX_DIV 18 - MMX_DIV 19 - MMX_DIV 20 - MMX_DIV 21 - MMX_DIV 22 - MMX_DIV 23 - MMX_DIV 24 - MMX_DIV 25 - MMX_DIV 26 - MMX_DIV 27 - MMX_DIV 28 - MMX_DIV 29 - MMX_DIV 30 - MMX_DIV 31 +mmx_one: + times 4 dw 1 +;----------------------------------------------------------------------------- +; divide by 2Q table +;----------------------------------------------------------------------------- + +ALIGN 16 +mmx_div: + times 4 dw 65535 ; the div by 2 formula will overflow for the case + ; quant=1 but we don't care much because quant=1 + ; is handled by a different piece of code that + ; doesn't use this table. +%assign quant 2 +%rep 30 + times 4 dw (1<<17) / (quant*2) + 1 + %assign quant quant+1 +%endrep -;=========================================================================== -; -; intra matrix -; -;=========================================================================== +%define VM18P 3 +%define VM18Q 4 -cextern intra_matrix -cextern intra_matrix_fix -;=========================================================================== -; -; inter matrix -; -;=========================================================================== +;----------------------------------------------------------------------------- +; quantd table +;----------------------------------------------------------------------------- + +quantd: +%assign quant 1 +%rep 31 + times 4 dw ((VM18P*quant) + (VM18Q/2)) / VM18Q + %assign quant quant+1 +%endrep -cextern inter_matrix -cextern inter_matrix_fix +;----------------------------------------------------------------------------- +; multiple by 2Q table +;----------------------------------------------------------------------------- +mmx_mul_quant: +%assign quant 1 +%rep 31 + times 4 dw quant + %assign quant quant+1 +%endrep + +;----------------------------------------------------------------------------- +; saturation limits +;----------------------------------------------------------------------------- + +ALIGN 16 + +mmx_32767_minus_2047: + times 4 dw (32767-2047) +mmx_32768_minus_2048: + times 4 dw (32768-2048) +mmx_2047: + times 4 dw 2047 +mmx_minus_2048: + times 4 dw (-2048) +zero: + times 4 dw 0 + +;============================================================================= +; Code +;============================================================================= -%define VM18P 3 -%define VM18Q 4 +SECTION .text +cglobal quant_mpeg_intra_mmx +cglobal quant_mpeg_inter_mmx +cglobal dequant_mpeg_intra_mmx +cglobal dequant_mpeg_inter_mmx -;=========================================================================== +;----------------------------------------------------------------------------- ; -; quantd table +; uint32_t quant_mpeg_intra_mmx(int16_t * coeff, +; const int16_t const * data, +; const uint32_t quant, +; const uint32_t dcscalar, +; const uint16_t *mpeg_matrices); ; -;=========================================================================== - -%macro MMX_QUANTD 1 -times 4 dw ((VM18P*%1) + (VM18Q/2)) / VM18Q -%endmacro - -quantd - MMX_QUANTD 1 - MMX_QUANTD 2 - MMX_QUANTD 3 - MMX_QUANTD 4 - MMX_QUANTD 5 - MMX_QUANTD 6 - MMX_QUANTD 7 - MMX_QUANTD 8 - MMX_QUANTD 9 - MMX_QUANTD 10 - MMX_QUANTD 11 - MMX_QUANTD 12 - MMX_QUANTD 13 - MMX_QUANTD 14 - MMX_QUANTD 15 - MMX_QUANTD 16 - MMX_QUANTD 17 - MMX_QUANTD 18 - MMX_QUANTD 19 - MMX_QUANTD 20 - MMX_QUANTD 21 - MMX_QUANTD 22 - MMX_QUANTD 23 - MMX_QUANTD 24 - MMX_QUANTD 25 - MMX_QUANTD 26 - MMX_QUANTD 27 - MMX_QUANTD 28 - MMX_QUANTD 29 - MMX_QUANTD 30 - MMX_QUANTD 31 +;----------------------------------------------------------------------------- +ALIGN 16 +quant_mpeg_intra_mmx: -;=========================================================================== -; -; multiple by 2Q table -; -;=========================================================================== + push ecx + push esi + push edi + push ebx -%macro MMX_MUL_QUANT 1 -times 4 dw %1 -%endmacro + mov edi, [esp + 16 + 4] ; coeff + mov esi, [esp + 16 + 8] ; data + mov eax, [esp + 16 + 12] ; quant + mov ebx, [esp + 16 + 20] ; mpeg_quant_matrices -mmx_mul_quant - MMX_MUL_QUANT 1 - MMX_MUL_QUANT 2 - MMX_MUL_QUANT 3 - MMX_MUL_QUANT 4 - MMX_MUL_QUANT 5 - MMX_MUL_QUANT 6 - MMX_MUL_QUANT 7 - MMX_MUL_QUANT 8 - MMX_MUL_QUANT 9 - MMX_MUL_QUANT 10 - MMX_MUL_QUANT 11 - MMX_MUL_QUANT 12 - MMX_MUL_QUANT 13 - MMX_MUL_QUANT 14 - MMX_MUL_QUANT 15 - MMX_MUL_QUANT 16 - MMX_MUL_QUANT 17 - MMX_MUL_QUANT 18 - MMX_MUL_QUANT 19 - MMX_MUL_QUANT 20 - MMX_MUL_QUANT 21 - MMX_MUL_QUANT 22 - MMX_MUL_QUANT 23 - MMX_MUL_QUANT 24 - MMX_MUL_QUANT 25 - MMX_MUL_QUANT 26 - MMX_MUL_QUANT 27 - MMX_MUL_QUANT 28 - MMX_MUL_QUANT 29 - MMX_MUL_QUANT 30 - MMX_MUL_QUANT 31 + movq mm5, [quantd + eax * 8 - 8] ; quantd -> mm5 -;=========================================================================== -; -; saturation limits -; -;=========================================================================== + xor ecx, ecx + cmp al, 1 + jz near .q1loop -align 16 + cmp al, 2 + jz near .q2loop -mmx_32767_minus_2047 times 4 dw (32767-2047) -mmx_32768_minus_2048 times 4 dw (32768-2048) -mmx_2047 times 4 dw 2047 -mmx_minus_2048 times 4 dw (-2048) -zero times 4 dw 0 + movq mm7, [mmx_div + eax * 8 - 8] ; multipliers[quant] -> mm7 -section .text +ALIGN 16 +.loop + movq mm0, [esi + 8*ecx] ; mm0 = [1st] + movq mm3, [esi + 8*ecx + 8] ; + pxor mm1, mm1 ; mm1 = 0 + pxor mm4, mm4 + pcmpgtw mm1, mm0 ; mm1 = (0 > mm0) + pcmpgtw mm4, mm3 + pxor mm0, mm1 ; mm0 = |mm0| + pxor mm3, mm4 ; + psubw mm0, mm1 ; displace + psubw mm3, mm4 ; + psllw mm0, 4 ; level << 4 + psllw mm3, 4 + movq mm2, [ebx + 8*ecx] + psrlw mm2, 1 ; intra_matrix[i]>>1 + paddw mm0, mm2 + movq mm2, [ebx + 256 + ecx*8] + pmulhw mm0, mm2 ; (level<<4 + intra_matrix[i]>>1) / intra_matrix[i] + movq mm2, [ebx + 8*ecx + 8] + psrlw mm2, 1 + paddw mm3, mm2 + movq mm2, [ebx + 256 + ecx*8 + 8] + pmulhw mm3, mm2 + paddw mm0, mm5 ; + quantd + paddw mm3, mm5 + pmulhw mm0, mm7 ; mm0 = (mm0 / 2Q) >> 16 + pmulhw mm3, mm7 ; + psrlw mm0, 1 ; additional shift by 1 => 16 + 1 = 17 + psrlw mm3, 1 + pxor mm0, mm1 ; mm0 *= sign(mm0) + pxor mm3, mm4 ; + psubw mm0, mm1 ; undisplace + psubw mm3, mm4 ; -;=========================================================================== -; -; void quant_intra4_mmx(int16_t * coeff, -; const int16_t const * data, -; const uint32_t quant, -; const uint32_t dcscalar); -; -;=========================================================================== + movq [edi + 8*ecx], mm0 + movq [edi + 8*ecx + 8], mm3 -align ALIGN -cglobal quant_mpeg_intra_mmx -quant_mpeg_intra_mmx: + add ecx,2 + cmp ecx,16 + jnz near .loop - push ecx - push esi - push edi - - mov edi, [esp + 12 + 4] ; coeff - mov esi, [esp + 12 + 8] ; data - mov eax, [esp + 12 + 12] ; quant - - movq mm5, [quantd + eax * 8 - 8] ; quantd -> mm5 - - xor ecx, ecx - cmp al, 1 - jz near .q1loop - - cmp al, 2 - jz near .q2loop - - movq mm7, [mmx_div + eax * 8 - 8] ; multipliers[quant] -> mm7 - -align ALIGN -.loop - movq mm0, [esi + 8*ecx] ; mm0 = [1st] - movq mm3, [esi + 8*ecx + 8] ; - - pxor mm1, mm1 ; mm1 = 0 - pxor mm4, mm4 - - pcmpgtw mm1, mm0 ; mm1 = (0 > mm0) - pcmpgtw mm4, mm3 - - pxor mm0, mm1 ; mm0 = |mm0| - pxor mm3, mm4 ; - psubw mm0, mm1 ; displace - psubw mm3, mm4 ; - - psllw mm0, 4 ; level << 4 - psllw mm3, 4 ; - - movq mm2, [intra_matrix + 8*ecx] - psrlw mm2, 1 ; intra_matrix[i]>>1 - paddw mm0, mm2 - - movq mm2, [intra_matrix_fix + ecx*8] - pmulhw mm0, mm2 ; (level<<4 + intra_matrix[i]>>1) / intra_matrix[i] - - movq mm2, [intra_matrix + 8*ecx + 8] - psrlw mm2, 1 - paddw mm3, mm2 - - movq mm2, [intra_matrix_fix + ecx*8 + 8] - pmulhw mm3, mm2 - - paddw mm0, mm5 ; + quantd - paddw mm3, mm5 - - pmulhw mm0, mm7 ; mm0 = (mm0 / 2Q) >> 16 - pmulhw mm3, mm7 ; - psrlw mm0, 1 ; additional shift by 1 => 16 + 1 = 17 - psrlw mm3, 1 - - pxor mm0, mm1 ; mm0 *= sign(mm0) - pxor mm3, mm4 ; - psubw mm0, mm1 ; undisplace - psubw mm3, mm4 ; - - movq [edi + 8*ecx], mm0 - movq [edi + 8*ecx + 8], mm3 - - add ecx,2 - cmp ecx,16 - jnz near .loop - -.done - ; caclulate data[0] // (int32_t)dcscalar) - - mov ecx, [esp + 12 + 16] ; dcscalar - mov edx, ecx - movsx eax, word [esi] ; data[0] - shr edx, 1 ; edx = dcscalar /2 - cmp eax, 0 - jg .gtzero +.done + ; caclulate data[0] // (int32_t)dcscalar) + mov ecx, [esp + 16 + 16] ; dcscalar + mov edx, ecx + movsx eax, word [esi] ; data[0] + shr edx, 1 ; edx = dcscalar /2 + cmp eax, 0 + jg .gtzero - sub eax, edx - jmp short .mul + sub eax, edx + jmp short .mul .gtzero - add eax, edx + add eax, edx .mul - cdq ; expand eax -> edx:eax - idiv ecx ; eax = edx:eax / dcscalar - - mov [edi], ax ; coeff[0] = ax - - pop edi - pop esi - pop ecx + cdq ; expand eax -> edx:eax + idiv ecx ; eax = edx:eax / dcscalar - ret + mov [edi], ax ; coeff[0] = ax -align ALIGN -.q1loop - movq mm0, [esi + 8*ecx] ; mm0 = [1st] - movq mm3, [esi + 8*ecx + 8] ; + pop ebx + pop edi + pop esi + pop ecx - pxor mm1, mm1 ; mm1 = 0 - pxor mm4, mm4 ; + xor eax, eax ; return(0); + ret - pcmpgtw mm1, mm0 ; mm1 = (0 > mm0) - pcmpgtw mm4, mm3 ; - - pxor mm0, mm1 ; mm0 = |mm0| - pxor mm3, mm4 ; - psubw mm0, mm1 ; displace - psubw mm3, mm4 ; - - psllw mm0, 4 - psllw mm3, 4 - - movq mm2, [intra_matrix + 8*ecx] - psrlw mm2, 1 - paddw mm0, mm2 - - movq mm2, [intra_matrix_fix + ecx*8] - pmulhw mm0, mm2 ; (level<<4 + intra_matrix[i]>>1) / intra_matrix[i] - - movq mm2, [intra_matrix + 8*ecx + 8] - psrlw mm2, 1 - paddw mm3, mm2 - - movq mm2, [intra_matrix_fix + ecx*8 + 8] - pmulhw mm3, mm2 - - paddw mm0, mm5 - paddw mm3, mm5 - - psrlw mm0, 1 ; mm0 >>= 1 (/2) - psrlw mm3, 1 ; - - pxor mm0, mm1 ; mm0 *= sign(mm0) - pxor mm3, mm4 ; - psubw mm0, mm1 ; undisplace - psubw mm3, mm4 ; - - movq [edi + 8*ecx], mm0 - movq [edi + 8*ecx + 8], mm3 - - add ecx,2 - cmp ecx,16 - jnz near .q1loop - jmp near .done +ALIGN 16 +.q1loop + movq mm0, [esi + 8*ecx] ; mm0 = [1st] + movq mm3, [esi + 8*ecx + 8] ; + pxor mm1, mm1 ; mm1 = 0 + pxor mm4, mm4 ; + pcmpgtw mm1, mm0 ; mm1 = (0 > mm0) + pcmpgtw mm4, mm3 ; + pxor mm0, mm1 ; mm0 = |mm0| + pxor mm3, mm4 ; + psubw mm0, mm1 ; displace + psubw mm3, mm4 ; + psllw mm0, 4 + psllw mm3, 4 + movq mm2, [ebx + 8*ecx] + psrlw mm2, 1 + paddw mm0, mm2 + movq mm2, [ebx + 256 + ecx*8] + pmulhw mm0, mm2 ; (level<<4 + intra_matrix[i]>>1) / intra_matrix[i] + movq mm2, [ebx + 8*ecx + 8] + psrlw mm2, 1 + paddw mm3, mm2 + movq mm2, [ebx + 256 + ecx*8 + 8] + pmulhw mm3, mm2 + paddw mm0, mm5 + paddw mm3, mm5 + psrlw mm0, 1 ; mm0 >>= 1 (/2) + psrlw mm3, 1 ; + pxor mm0, mm1 ; mm0 *= sign(mm0) + pxor mm3, mm4 ; + psubw mm0, mm1 ; undisplace + psubw mm3, mm4 ; + movq [edi + 8*ecx], mm0 + movq [edi + 8*ecx + 8], mm3 + + add ecx, 2 + cmp ecx, 16 + jnz near .q1loop + jmp near .done -align ALIGN +ALIGN 16 .q2loop - movq mm0, [esi + 8*ecx] ; mm0 = [1st] - movq mm3, [esi + 8*ecx + 8] ; - - pxor mm1, mm1 ; mm1 = 0 - pxor mm4, mm4 ; - - pcmpgtw mm1, mm0 ; mm1 = (0 > mm0) - pcmpgtw mm4, mm3 ; - - pxor mm0, mm1 ; mm0 = |mm0| - pxor mm3, mm4 ; - psubw mm0, mm1 ; displace - psubw mm3, mm4 ; - - psllw mm0, 4 - psllw mm3, 4 - - movq mm2, [intra_matrix + 8*ecx] - psrlw mm2, 1 - paddw mm0, mm2 - - movq mm2, [intra_matrix_fix + ecx*8] - pmulhw mm0, mm2 ; (level<<4 + intra_matrix[i]>>1) / intra_matrix[i] - - movq mm2, [intra_matrix + 8*ecx + 8] - psrlw mm2, 1 - paddw mm3, mm2 - - movq mm2, [intra_matrix_fix + ecx*8 + 8] - pmulhw mm3, mm2 - - paddw mm0, mm5 - paddw mm3, mm5 - - psrlw mm0, 2 ; mm0 >>= 1 (/4) - psrlw mm3, 2 ; - - pxor mm0, mm1 ; mm0 *= sign(mm0) - pxor mm3, mm4 ; - psubw mm0, mm1 ; undisplace - psubw mm3, mm4 ; - - movq [edi + 8*ecx], mm0 - movq [edi + 8*ecx + 8], mm3 - - add ecx,2 - cmp ecx,16 - jnz near .q2loop - jmp near .done - - -;=========================================================================== -; -; uint32_t quant4_inter_mmx(int16_t * coeff, -; const int16_t const * data, -; const uint32_t quant); + movq mm0, [esi + 8*ecx] ; mm0 = [1st] + movq mm3, [esi + 8*ecx + 8] ; + pxor mm1, mm1 ; mm1 = 0 + pxor mm4, mm4 ; + pcmpgtw mm1, mm0 ; mm1 = (0 > mm0) + pcmpgtw mm4, mm3 ; + pxor mm0, mm1 ; mm0 = |mm0| + pxor mm3, mm4 ; + psubw mm0, mm1 ; displace + psubw mm3, mm4 ; + psllw mm0, 4 + psllw mm3, 4 + movq mm2, [ebx + 8*ecx] + psrlw mm2, 1 + paddw mm0, mm2 + movq mm2, [ebx + 256 + ecx*8] + pmulhw mm0, mm2 ; (level<<4 + intra_matrix[i]>>1) / intra_matrix[i] + movq mm2, [ebx + 8*ecx + 8] + psrlw mm2, 1 + paddw mm3, mm2 + movq mm2, [ebx + 256 + ecx*8 + 8] + pmulhw mm3, mm2 + paddw mm0, mm5 + paddw mm3, mm5 + psrlw mm0, 2 ; mm0 >>= 1 (/4) + psrlw mm3, 2 ; + pxor mm0, mm1 ; mm0 *= sign(mm0) + pxor mm3, mm4 ; + psubw mm0, mm1 ; undisplace + psubw mm3, mm4 ; + movq [edi + 8*ecx], mm0 + movq [edi + 8*ecx + 8], mm3 + + add ecx,2 + cmp ecx,16 + jnz near .q2loop + jmp near .done + + +;----------------------------------------------------------------------------- +; +; uint32_t quant_mpeg_inter_mmx(int16_t * coeff, +; const int16_t const * data, +; const uint32_t quant, +; const uint16_t *mpeg_matrices); ; -;=========================================================================== +;----------------------------------------------------------------------------- -align ALIGN -cglobal quant_mpeg_inter_mmx +ALIGN 16 quant_mpeg_inter_mmx: - push ecx - push esi - push edi + push ecx + push esi + push edi + push ebx - mov edi, [esp + 12 + 4] ; coeff - mov esi, [esp + 12 + 8] ; data - mov eax, [esp + 12 + 12] ; quant + mov edi, [esp + 16 + 4] ; coeff + mov esi, [esp + 16 + 8] ; data + mov eax, [esp + 16 + 12] ; quant + mov ebx, [esp + 16 + 16] ; mpeg_quant_matrices - xor ecx, ecx + xor ecx, ecx - pxor mm5, mm5 ; sum + pxor mm5, mm5 ; sum - cmp al, 1 - jz near .q1loop + cmp al, 1 + jz near .q1loop - cmp al, 2 - jz near .q2loop + cmp al, 2 + jz near .q2loop - movq mm7, [mmx_div + eax * 8 - 8] ; divider + movq mm7, [mmx_div + eax * 8 - 8] ; divider -align ALIGN +ALIGN 16 .loop - movq mm0, [esi + 8*ecx] ; mm0 = [1st] - movq mm3, [esi + 8*ecx + 8] ; - pxor mm1, mm1 ; mm1 = 0 - pxor mm4, mm4 ; - pcmpgtw mm1, mm0 ; mm1 = (0 > mm0) - pcmpgtw mm4, mm3 ; - pxor mm0, mm1 ; mm0 = |mm0| - pxor mm3, mm4 ; - psubw mm0, mm1 ; displace - psubw mm3, mm4 ; - - psllw mm0, 4 - psllw mm3, 4 - - movq mm2, [inter_matrix + 8*ecx] - psrlw mm2, 1 - paddw mm0, mm2 - - movq mm2, [inter_matrix_fix + ecx*8] - pmulhw mm0, mm2 ; (level<<4 + inter_matrix[i]>>1) / inter_matrix[i] - - movq mm2, [inter_matrix + 8*ecx + 8] - psrlw mm2, 1 - paddw mm3, mm2 - - movq mm2, [inter_matrix_fix + ecx*8 + 8] - pmulhw mm3, mm2 - - pmulhw mm0, mm7 ; mm0 = (mm0 / 2Q) >> 16 - pmulhw mm3, mm7 ; - psrlw mm0, 1 ; additional shift by 1 => 16 + 1 = 17 - psrlw mm3, 1 - - paddw mm5, mm0 ; sum += mm0 - pxor mm0, mm1 ; mm0 *= sign(mm0) - paddw mm5, mm3 ; - pxor mm3, mm4 ; - psubw mm0, mm1 ; undisplace - psubw mm3, mm4 - movq [edi + 8*ecx], mm0 - movq [edi + 8*ecx + 8], mm3 - - add ecx, 2 - cmp ecx, 16 - jnz near .loop + movq mm0, [esi + 8*ecx] ; mm0 = [1st] + movq mm3, [esi + 8*ecx + 8] ; + pxor mm1, mm1 ; mm1 = 0 + pxor mm4, mm4 ; + pcmpgtw mm1, mm0 ; mm1 = (0 > mm0) + pcmpgtw mm4, mm3 ; + pxor mm0, mm1 ; mm0 = |mm0| + pxor mm3, mm4 ; + psubw mm0, mm1 ; displace + psubw mm3, mm4 ; + psllw mm0, 4 + psllw mm3, 4 + movq mm2, [ebx + 512 + 8*ecx] + psrlw mm2, 1 + paddw mm0, mm2 + movq mm2, [ebx + 768 + ecx*8] + pmulhw mm0, mm2 ; (level<<4 + inter_matrix[i]>>1) / inter_matrix[i] + movq mm2, [ebx + 512 + 8*ecx + 8] + psrlw mm2, 1 + paddw mm3, mm2 + movq mm2, [ebx + 768 + ecx*8 + 8] + pmulhw mm3, mm2 + pmulhw mm0, mm7 ; mm0 = (mm0 / 2Q) >> 16 + pmulhw mm3, mm7 ; + psrlw mm0, 1 ; additional shift by 1 => 16 + 1 = 17 + psrlw mm3, 1 + paddw mm5, mm0 ; sum += mm0 + pxor mm0, mm1 ; mm0 *= sign(mm0) + paddw mm5, mm3 ; + pxor mm3, mm4 ; + psubw mm0, mm1 ; undisplace + psubw mm3, mm4 + movq [edi + 8*ecx], mm0 + movq [edi + 8*ecx + 8], mm3 + + add ecx, 2 + cmp ecx, 16 + jnz near .loop .done - pmaddwd mm5, [mmx_one] - movq mm0, mm5 - psrlq mm5, 32 - paddd mm0, mm5 - movd eax, mm0 ; return sum - - pop edi - pop esi - pop ecx + pmaddwd mm5, [mmx_one] + movq mm0, mm5 + psrlq mm5, 32 + paddd mm0, mm5 + movd eax, mm0 ; return sum + + pop ebx + pop edi + pop esi + pop ecx - ret + ret -align ALIGN +ALIGN 16 .q1loop - movq mm0, [esi + 8*ecx] ; mm0 = [1st] - movq mm3, [esi + 8*ecx+ 8] - ; - pxor mm1, mm1 ; mm1 = 0 - pxor mm4, mm4 ; - - pcmpgtw mm1, mm0 ; mm1 = (0 > mm0) - pcmpgtw mm4, mm3 ; - - pxor mm0, mm1 ; mm0 = |mm0| - pxor mm3, mm4 ; - psubw mm0, mm1 ; displace - psubw mm3, mm4 ; - - psllw mm0, 4 - psllw mm3, 4 - - movq mm2, [inter_matrix + 8*ecx] - psrlw mm2, 1 - paddw mm0, mm2 - - movq mm2, [inter_matrix_fix + ecx*8] - pmulhw mm0, mm2 ; (level<<4 + inter_matrix[i]>>1) / inter_matrix[i] - - movq mm2, [inter_matrix + 8*ecx + 8] - psrlw mm2, 1 - paddw mm3, mm2 - - movq mm2, [inter_matrix_fix + ecx*8 + 8] - pmulhw mm3, mm2 - - psrlw mm0, 1 ; mm0 >>= 1 (/2) - psrlw mm3, 1 ; - - paddw mm5, mm0 ; sum += mm0 - pxor mm0, mm1 ; mm0 *= sign(mm0) - paddw mm5, mm3 ; - pxor mm3, mm4 ; - psubw mm0, mm1 ; undisplace - psubw mm3, mm4 - - movq [edi + 8*ecx], mm0 - movq [edi + 8*ecx + 8], mm3 - - add ecx,2 - cmp ecx,16 - jnz near .q1loop + movq mm0, [esi + 8*ecx] ; mm0 = [1st] + movq mm3, [esi + 8*ecx+ 8] + pxor mm1, mm1 ; mm1 = 0 + pxor mm4, mm4 ; + pcmpgtw mm1, mm0 ; mm1 = (0 > mm0) + pcmpgtw mm4, mm3 ; + pxor mm0, mm1 ; mm0 = |mm0| + pxor mm3, mm4 ; + psubw mm0, mm1 ; displace + psubw mm3, mm4 ; + psllw mm0, 4 + psllw mm3, 4 + movq mm2, [ebx + 512 + 8*ecx] + psrlw mm2, 1 + paddw mm0, mm2 + movq mm2, [ebx + 768 + ecx*8] + pmulhw mm0, mm2 ; (level<<4 + inter_matrix[i]>>1) / inter_matrix[i] + movq mm2, [ebx + 512 + 8*ecx + 8] + psrlw mm2, 1 + paddw mm3, mm2 + movq mm2, [ebx + 768 + ecx*8 + 8] + pmulhw mm3, mm2 + psrlw mm0, 1 ; mm0 >>= 1 (/2) + psrlw mm3, 1 ; + paddw mm5, mm0 ; sum += mm0 + pxor mm0, mm1 ; mm0 *= sign(mm0) + paddw mm5, mm3 ; + pxor mm3, mm4 ; + psubw mm0, mm1 ; undisplace + psubw mm3, mm4 + movq [edi + 8*ecx], mm0 + movq [edi + 8*ecx + 8], mm3 + + add ecx, 2 + cmp ecx, 16 + jnz near .q1loop - jmp .done + jmp .done -align ALIGN +ALIGN 16 .q2loop - movq mm0, [esi + 8*ecx] ; mm0 = [1st] - movq mm3, [esi + 8*ecx+ 8] - ; - pxor mm1, mm1 ; mm1 = 0 - pxor mm4, mm4 ; - - pcmpgtw mm1, mm0 ; mm1 = (0 > mm0) - pcmpgtw mm4, mm3 ; - - pxor mm0, mm1 ; mm0 = |mm0| - pxor mm3, mm4 ; - psubw mm0, mm1 ; displace - psubw mm3, mm4 ; - - psllw mm0, 4 - psllw mm3, 4 - - movq mm2, [inter_matrix + 8*ecx] - psrlw mm2, 1 - paddw mm0, mm2 - - movq mm2, [inter_matrix_fix + ecx*8] - pmulhw mm0, mm2 ; (level<<4 + inter_matrix[i]>>1) / inter_matrix[i] - - movq mm2, [inter_matrix + 8*ecx + 8] - psrlw mm2, 1 - paddw mm3, mm2 - - movq mm2, [inter_matrix_fix + ecx*8 + 8] - pmulhw mm3, mm2 - - psrlw mm0, 2 ; mm0 >>= 1 (/2) - psrlw mm3, 2 ; - - paddw mm5, mm0 ; sum += mm0 - pxor mm0, mm1 ; mm0 *= sign(mm0) - paddw mm5, mm3 ; - pxor mm3, mm4 ; - psubw mm0, mm1 ; undisplace - psubw mm3, mm4 - - movq [edi + 8*ecx], mm0 - movq [edi + 8*ecx + 8], mm3 - - add ecx,2 - cmp ecx,16 - jnz near .q2loop + movq mm0, [esi + 8*ecx] ; mm0 = [1st] + movq mm3, [esi + 8*ecx+ 8] + pxor mm1, mm1 ; mm1 = 0 + pxor mm4, mm4 ; + pcmpgtw mm1, mm0 ; mm1 = (0 > mm0) + pcmpgtw mm4, mm3 ; + pxor mm0, mm1 ; mm0 = |mm0| + pxor mm3, mm4 ; + psubw mm0, mm1 ; displace + psubw mm3, mm4 ; + psllw mm0, 4 + psllw mm3, 4 + movq mm2, [ebx + 512 + 8*ecx] + psrlw mm2, 1 + paddw mm0, mm2 + movq mm2, [ebx + 768 + ecx*8] + pmulhw mm0, mm2 ; (level<<4 + inter_matrix[i]>>1) / inter_matrix[i] + movq mm2, [ebx + 512 + 8*ecx + 8] + psrlw mm2, 1 + paddw mm3, mm2 + movq mm2, [ebx + 768 + ecx*8 + 8] + pmulhw mm3, mm2 + psrlw mm0, 2 ; mm0 >>= 1 (/2) + psrlw mm3, 2 ; + paddw mm5, mm0 ; sum += mm0 + pxor mm0, mm1 ; mm0 *= sign(mm0) + paddw mm5, mm3 ; + pxor mm3, mm4 ; + psubw mm0, mm1 ; undisplace + psubw mm3, mm4 + movq [edi + 8*ecx], mm0 + movq [edi + 8*ecx + 8], mm3 + + add ecx, 2 + cmp ecx, 16 + jnz near .q2loop - jmp .done + jmp .done -;=========================================================================== +;----------------------------------------------------------------------------- ; -; void dequant4_intra_mmx(int16_t *data, -; const int16_t const *coeff, -; const uint32_t quant, -; const uint32_t dcscalar); +; uint32_t dequant_mpeg_intra_mmx(int16_t *data, +; const int16_t const *coeff, +; const uint32_t quant, +; const uint32_t dcscalar, +; const uint16_t *mpeg_matrices); ; -;=========================================================================== +;----------------------------------------------------------------------------- ; Note: in order to saturate 'easily', we pre-shift the quantifier ; by 4. Then, the high-word of (coeff[]*matrix[i]*quant) are used to @@ -684,7 +514,7 @@ ; Moreover, we perform the mult (matrix[i]*quant) first, instead of, e.g., ; (coeff[i]*matrix[i]). This is less prone to overflow if coeff[] are not ; checked. Input ranges are: coeff in [-127,127], inter_matrix in [1..255],a - ; and quant in [1..31]. + ; and quant in [1..31]. ; ; The original loop is: ; @@ -696,9 +526,9 @@ psubw mm0, mm1 ; -> mm0 = abs(coeff[i]), mm1 = sign of coeff[i] movq mm2, mm7 ; mm2 = quant - pmullw mm2, [intra_matrix + 8*eax + 8*16 ] ; matrix[i]*quant. + pmullw mm2, [ebx + 8*eax + 8*16 ] ; matrix[i]*quant. - movq mm6, mm2 + movq mm6, mm2 pmulhw mm2, mm0 ; high of coeff*(matrix*quant) (should be 0 if no overflow) pmullw mm0, mm6 ; low of coeff*(matrix*quant) @@ -716,20 +546,22 @@ ;******************************************************************** -align 16 -cglobal dequant_mpeg_intra_mmx +ALIGN 16 dequant_mpeg_intra_mmx: - mov edx, [esp+4] ; data - mov ecx, [esp+8] ; coeff - mov eax, [esp+12] ; quant + push ebx + + mov edx, [esp + 4 + 4] ; data + mov ecx, [esp + 4 + 8] ; coeff + mov eax, [esp + 4 + 12] ; quant + mov ebx, [esp + 4 + 20] ; mpeg_quant_matrices movq mm7, [mmx_mul_quant + eax*8 - 8] - mov eax, -16 ; to keep aligned, we regularly process coeff[0] - psllw mm7, 2 ; << 2. See comment. - pxor mm6, mm6 ; this is a NOP + mov eax, -16 ; to keep ALIGNed, we regularly process coeff[0] + psllw mm7, 2 ; << 2. See comment. + pxor mm6, mm6 ; this is a NOP -align 16 +ALIGN 16 .loop movq mm0, [ecx+8*eax + 8*16] ; mm0 = c = coeff[i] movq mm3, [ecx+8*eax + 8*16 +8]; mm3 = c' = coeff[i+1] @@ -737,25 +569,25 @@ pxor mm4, mm4 pcmpgtw mm1, mm0 ; mm1 = sgn(c) movq mm2, mm7 ; mm2 = quant - + pcmpgtw mm4, mm3 ; mm4 = sgn(c') - pmullw mm2, [intra_matrix + 8*eax + 8*16 ] ; matrix[i]*quant + pmullw mm2, [ebx + 8*eax + 8*16 ] ; matrix[i]*quant pxor mm0, mm1 ; negate if negative pxor mm3, mm4 ; negate if negative - + psubw mm0, mm1 psubw mm3, mm4 - - ; we're short on register, here. Poor pairing... - movq mm5, mm2 + ; we're short on register, here. Poor pairing... + + movq mm5, mm2 pmullw mm2, mm0 ; low of coeff*(matrix*quant) pmulhw mm0, mm5 ; high of coeff*(matrix*quant) movq mm5, mm7 ; mm2 = quant - pmullw mm5, [intra_matrix + 8*eax + 8*16 +8] ; matrix[i+1]*quant + pmullw mm5, [ebx + 8*eax + 8*16 +8] ; matrix[i+1]*quant movq mm6, mm5 add eax,2 ; z-flag will be tested later @@ -777,18 +609,17 @@ psubusw mm1, mm0 psubusw mm4, mm3 - psubw mm2, mm1 ; finish negating back - psubw mm6, mm4 ; finish negating back + psubw mm2, mm1 ; finish negating back + psubw mm6, mm4 ; finish negating back movq [edx + 8*eax + 8*16 -2*8 ], mm2 ; data[i] movq [edx + 8*eax + 8*16 -2*8 +8], mm6 ; data[i+1] - jnz near .loop + jnz near .loop ; deal with DC - movd mm0, [ecx] - pmullw mm0, [esp+16] ; dcscalar + pmullw mm0, [esp + 4 + 16] ; dcscalar movq mm2, [mmx_32767_minus_2047] paddsw mm0, mm2 psubsw mm0, mm2 @@ -798,38 +629,46 @@ movd eax, mm0 mov [edx], ax + xor eax, eax + + pop ebx + ret -;=========================================================================== +;----------------------------------------------------------------------------- ; -; void dequant4_inter_mmx(int16_t * data, -; const int16_t * const coeff, -; const uint32_t quant); +; uint32_t dequant_mpeg_inter_mmx(int16_t * data, +; const int16_t * const coeff, +; const uint32_t quant, +; const uint16_t *mpeg_matrices); ; -;=========================================================================== +;----------------------------------------------------------------------------- ; Note: We use (2*c + sgn(c) - sgn(-c)) as multiplier ; so we handle the 3 cases: c<0, c==0, and c>0 in one shot. ; sgn(x) is the result of 'pcmpgtw 0,x': 0 if x>=0, -1 if x<0. ; It's mixed with the extraction of the absolute value. -align 16 -cglobal dequant_mpeg_inter_mmx +ALIGN 16 dequant_mpeg_inter_mmx: - mov edx, [esp+ 4] ; data - mov ecx, [esp+ 8] ; coeff - mov eax, [esp+12] ; quant + push ebx + + mov edx, [esp + 4 + 4] ; data + mov ecx, [esp + 4 + 8] ; coeff + mov eax, [esp + 4 + 12] ; quant + mov ebx, [esp + 4 + 16] ; mpeg_quant_matrices + movq mm7, [mmx_mul_quant + eax*8 - 8] mov eax, -16 paddw mm7, mm7 ; << 1 - pxor mm6, mm6 ; mismatch sum + pxor mm6, mm6 ; mismatch sum -align 16 +ALIGN 16 .loop movq mm0, [ecx+8*eax + 8*16 ] ; mm0 = coeff[i] movq mm2, [ecx+8*eax + 8*16 +8] ; mm2 = coeff[i+1] - add eax,2 + add eax, 2 pxor mm1, mm1 pxor mm3, mm3 @@ -854,13 +693,13 @@ ; we're short on register, here. Poor pairing... movq mm4, mm7 ; (matrix*quant) - pmullw mm4, [inter_matrix + 8*eax + 8*16 -2*8] + pmullw mm4, [ebx + 512 + 8*eax + 8*16 -2*8] movq mm5, mm4 pmulhw mm5, mm0 ; high of c*(matrix*quant) pmullw mm0, mm4 ; low of c*(matrix*quant) movq mm4, mm7 ; (matrix*quant) - pmullw mm4, [inter_matrix + 8*eax + 8*16 -2*8 + 8] + pmullw mm4, [ebx + 512 + 8*eax + 8*16 -2*8 + 8] pcmpgtw mm5, [zero] paddusw mm0, mm5 @@ -876,9 +715,9 @@ pcmpgtw mm5, [zero] paddusw mm2, mm5 psrlw mm2, 5 - pxor mm2, mm3 ; start restoring sign + pxor mm2, mm3 ; start restoring sign psubusw mm3, mm5 - psubw mm2, mm3 ; finish restoring sign + psubw mm2, mm3 ; finish restoring sign pxor mm6, mm0 ; mismatch control movq [edx + 8*eax + 8*16 -2*8 ], mm0 ; data[i] @@ -903,5 +742,8 @@ xor eax, 1 xor word [edx + 2*63], ax - ret + xor eax, eax + + pop ebx + ret