1 |
;/************************************************************************** |
;/**************************************************************************** |
2 |
; * |
; * |
3 |
; * XVID MPEG-4 VIDEO CODEC |
; * XVID MPEG-4 VIDEO CODEC |
4 |
; * sse2 sum of absolute difference |
; * - SSE2 optimized SAD operators - |
5 |
; * |
; * |
6 |
; * This program is free software; you can redistribute it and/or modify |
; * Copyright(C) 2003 Pascal Massimino <skal@planet-d.net> |
7 |
; * it under the terms of the GNU General Public License as published by |
; * |
8 |
|
; * |
9 |
|
; * This program is free software; you can redistribute it and/or modify it |
10 |
|
; * under the terms of the GNU General Public License as published by |
11 |
; * the Free Software Foundation; either version 2 of the License, or |
; * the Free Software Foundation; either version 2 of the License, or |
12 |
; * (at your option) any later version. |
; * (at your option) any later version. |
13 |
; * |
; * |
18 |
; * |
; * |
19 |
; * You should have received a copy of the GNU General Public License |
; * You should have received a copy of the GNU General Public License |
20 |
; * along with this program; if not, write to the Free Software |
; * along with this program; if not, write to the Free Software |
21 |
; * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
; * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
|
; * |
|
|
; *************************************************************************/ |
|
|
|
|
|
;/************************************************************************** |
|
|
; * |
|
|
; * History: |
|
22 |
; * |
; * |
23 |
; * 24.05.2002 inital version; (c)2002 Dmitry Rozhdestvensky |
; * $Id$ |
24 |
; * |
; * |
25 |
; *************************************************************************/ |
; ***************************************************************************/ |
26 |
|
|
27 |
bits 32 |
BITS 32 |
28 |
|
|
29 |
%macro cglobal 1 |
%macro cglobal 1 |
30 |
%ifdef PREFIX |
%ifdef PREFIX |
35 |
%endif |
%endif |
36 |
%endmacro |
%endmacro |
37 |
|
|
38 |
%define sad_debug 0 ;1=unaligned 2=ref unaligned 3=aligned 0=autodetect |
;============================================================================= |
39 |
%define dev_debug 2 ;1=unaligned 2=aligned 0=autodetect |
; Read only data |
40 |
%define test_stride_alignment 0 ;test stride for alignment while autodetect |
;============================================================================= |
|
%define early_return 0 ;use early return in sad |
|
41 |
|
|
42 |
section .data |
SECTION .rodata |
43 |
|
|
44 |
align 64 |
ALIGN 64 |
|
buffer times 4*8 dd 0 ;8 128-bit words |
|
45 |
zero times 4 dd 0 |
zero times 4 dd 0 |
46 |
|
|
47 |
section .text |
;============================================================================= |
48 |
|
; Code |
49 |
|
;============================================================================= |
50 |
|
|
51 |
|
SECTION .text |
52 |
|
|
53 |
cglobal sad16_sse2 |
cglobal sad16_sse2 |
54 |
cglobal dev16_sse2 |
cglobal dev16_sse2 |
55 |
|
|
56 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
57 |
; General macros for SSE2 code |
; uint32_t sad16_sse2 (const uint8_t * const cur, <- assumed aligned! |
|
;=========================================================================== |
|
|
|
|
|
%macro load_stride 1 |
|
|
mov ecx,%1 |
|
|
add ecx,ecx |
|
|
mov edx,ecx |
|
|
add ecx,%1 ;stride*3 |
|
|
add edx,edx ;stride*4 |
|
|
%endmacro |
|
|
|
|
|
%macro sad8lines 1 |
|
|
|
|
|
psadbw xmm0,[%1] |
|
|
psadbw xmm1,[%1+ebx] |
|
|
psadbw xmm2,[%1+ebx*2] |
|
|
psadbw xmm3,[%1+ecx] |
|
|
|
|
|
add %1,edx |
|
|
|
|
|
psadbw xmm4,[%1] |
|
|
psadbw xmm5,[%1+ebx] |
|
|
psadbw xmm6,[%1+ebx*2] |
|
|
psadbw xmm7,[%1+ecx] |
|
|
|
|
|
add %1,edx |
|
|
%endmacro |
|
|
|
|
|
%macro after_sad 1 ; Summarizes 0th and 4th words of all xmm registers |
|
|
|
|
|
paddusw xmm0,xmm1 |
|
|
paddusw xmm2,xmm3 |
|
|
paddusw xmm4,xmm5 |
|
|
paddusw xmm6,xmm7 |
|
|
|
|
|
paddusw xmm0,xmm2 |
|
|
paddusw xmm4,xmm6 |
|
|
|
|
|
paddusw xmm4,xmm0 |
|
|
pshufd xmm5,xmm4,11111110b |
|
|
paddusw xmm5,xmm4 |
|
|
|
|
|
pextrw %1,xmm5,0 ;less latency then movd |
|
|
%endmacro |
|
|
|
|
|
%macro restore 1 ;restores used registers |
|
|
|
|
|
%if %1=1 |
|
|
pop ebp |
|
|
%endif |
|
|
pop edi |
|
|
pop esi |
|
|
pop ebx |
|
|
%endmacro |
|
|
|
|
|
;=========================================================================== |
|
|
; |
|
|
; uint32_t sad16_sse2 (const uint8_t * const cur, |
|
58 |
; const uint8_t * const ref, |
; const uint8_t * const ref, |
59 |
; const uint32_t stride, |
; const uint32_t stride, |
60 |
; const uint32_t best_sad); |
; const uint32_t /*ignored*/); |
61 |
; |
;----------------------------------------------------------------------------- |
|
; |
|
|
;=========================================================================== |
|
|
|
|
|
align 16 |
|
|
sad16_sse2 |
|
|
push ebx |
|
|
push esi |
|
|
push edi |
|
|
|
|
|
mov ebx,[esp + 3*4 + 12] ;stride |
|
|
|
|
|
%if sad_debug<>0 |
|
|
mov edi,[esp + 3*4 + 4] |
|
|
mov esi,[esp + 3*4 + 8] |
|
|
%endif |
|
|
|
|
|
%if sad_debug=1 |
|
|
jmp sad16_sse2_ul |
|
|
%endif |
|
|
%if sad_debug=2 |
|
|
jmp sad16_sse2_semial |
|
|
%endif |
|
|
%if sad_debug=3 |
|
|
jmp sad16_sse2_al |
|
|
%endif |
|
|
|
|
|
%if test_stride_alignment<>0 |
|
|
test ebx,15 |
|
|
jnz sad16_sse2_ul |
|
|
%endif |
|
|
mov edi,[esp + 3*4 + 4] ;cur (most likely aligned) |
|
|
|
|
|
test edi,15 |
|
|
cmovz esi,[esp + 3*4 + 8] ;load esi if edi is aligned |
|
|
cmovnz esi,edi ;move to esi and load edi |
|
|
cmovnz edi,[esp + 3*4 + 8] ;if not |
|
|
jnz esi_unaligned |
|
|
|
|
|
test esi,15 |
|
|
jnz near sad16_sse2_semial |
|
|
jmp sad16_sse2_al |
|
62 |
|
|
|
esi_unaligned: test edi,15 |
|
|
jnz near sad16_sse2_ul |
|
|
jmp sad16_sse2_semial |
|
|
|
|
|
;=========================================================================== |
|
|
; Branch requires 16-byte alignment of esi and edi and stride |
|
|
;=========================================================================== |
|
|
|
|
|
%macro sad16x8_al 1 |
|
|
|
|
|
movdqa xmm0,[esi] |
|
|
movdqa xmm1,[esi+ebx] |
|
|
movdqa xmm2,[esi+ebx*2] |
|
|
movdqa xmm3,[esi+ecx] |
|
|
|
|
|
add esi,edx |
|
|
|
|
|
movdqa xmm4,[esi] |
|
|
movdqa xmm5,[esi+ebx] |
|
|
movdqa xmm6,[esi+ebx*2] |
|
|
movdqa xmm7,[esi+ecx] |
|
|
|
|
|
add esi,edx |
|
|
|
|
|
sad8lines edi |
|
|
|
|
|
after_sad %1 |
|
63 |
|
|
64 |
|
%macro SAD_16x16_SSE2 0 |
65 |
|
movdqu xmm0, [edx] |
66 |
|
movdqu xmm1, [edx+ecx] |
67 |
|
lea edx,[edx+2*ecx] |
68 |
|
movdqa xmm2, [eax] |
69 |
|
movdqa xmm3, [eax+ecx] |
70 |
|
lea eax,[eax+2*ecx] |
71 |
|
psadbw xmm0, xmm2 |
72 |
|
paddusw xmm6,xmm0 |
73 |
|
psadbw xmm1, xmm3 |
74 |
|
paddusw xmm6,xmm1 |
75 |
%endmacro |
%endmacro |
76 |
|
|
77 |
align 16 |
align 16 |
78 |
sad16_sse2_al |
sad16_sse2: |
79 |
|
mov eax, [esp+ 4] ; cur (assumed aligned) |
80 |
load_stride ebx |
mov edx, [esp+ 8] ; ref |
81 |
|
mov ecx, [esp+12] ; stride |
82 |
sad16x8_al eax |
|
83 |
|
pxor xmm6, xmm6 ; accum |
84 |
%if early_return=1 |
|
85 |
cmp eax,[esp + 3*4 + 16] ;best_sad |
SAD_16x16_SSE2 |
86 |
jg continue_al |
SAD_16x16_SSE2 |
87 |
%endif |
SAD_16x16_SSE2 |
88 |
|
SAD_16x16_SSE2 |
89 |
sad16x8_al ebx |
SAD_16x16_SSE2 |
90 |
|
SAD_16x16_SSE2 |
91 |
add eax,ebx |
SAD_16x16_SSE2 |
92 |
|
SAD_16x16_SSE2 |
93 |
continue_al: restore 0 |
|
94 |
|
pshufd xmm5, xmm6, 00000010b |
95 |
|
paddusw xmm6, xmm5 |
96 |
|
pextrw eax, xmm6, 0 |
97 |
ret |
ret |
98 |
|
|
|
;=========================================================================== |
|
|
; Branch requires 16-byte alignment of the edi and stride only |
|
|
;=========================================================================== |
|
|
|
|
|
%macro sad16x8_semial 1 |
|
|
|
|
|
movdqu xmm0,[esi] |
|
|
movdqu xmm1,[esi+ebx] |
|
|
movdqu xmm2,[esi+ebx*2] |
|
|
movdqu xmm3,[esi+ecx] |
|
|
|
|
|
add esi,edx |
|
|
|
|
|
movdqu xmm4,[esi] |
|
|
movdqu xmm5,[esi+ebx] |
|
|
movdqu xmm6,[esi+ebx*2] |
|
|
movdqu xmm7,[esi+ecx] |
|
|
|
|
|
add esi,edx |
|
|
|
|
|
sad8lines edi |
|
|
|
|
|
after_sad %1 |
|
|
|
|
|
%endmacro |
|
|
|
|
|
align 16 |
|
|
sad16_sse2_semial |
|
|
|
|
|
load_stride ebx |
|
|
|
|
|
sad16x8_semial eax |
|
|
|
|
|
%if early_return=1 |
|
|
cmp eax,[esp + 3*4 + 16] ;best_sad |
|
|
jg cont_semial |
|
|
%endif |
|
|
|
|
|
sad16x8_semial ebx |
|
|
|
|
|
add eax,ebx |
|
|
|
|
|
cont_semial: restore 0 |
|
99 |
|
|
100 |
ret |
;----------------------------------------------------------------------------- |
101 |
|
; uint32_t dev16_sse2(const uint8_t * const cur, const uint32_t stride); |
102 |
|
;----------------------------------------------------------------------------- |
103 |
;=========================================================================== |
|
104 |
; Branch does not require alignment, even stride |
%macro MEAN_16x16_SSE2 0 ; eax: src, ecx:stride, mm7: zero or mean => mm6: result |
105 |
;=========================================================================== |
movdqu xmm0, [eax] |
106 |
|
movdqu xmm1, [eax+ecx] |
107 |
%macro sad16x4_ul 1 |
lea eax, [eax+2*ecx] ; + 2*stride |
108 |
|
psadbw xmm0, xmm7 |
109 |
movdqu xmm0,[esi] |
paddusw xmm6, xmm0 |
110 |
movdqu xmm1,[esi+ebx] |
psadbw xmm1, xmm7 |
111 |
movdqu xmm2,[esi+ebx*2] |
paddusw xmm6, xmm1 |
|
movdqu xmm3,[esi+ecx] |
|
|
|
|
|
add esi,edx |
|
|
|
|
|
movdqu xmm4,[edi] |
|
|
movdqu xmm5,[edi+ebx] |
|
|
movdqu xmm6,[edi+ebx*2] |
|
|
movdqu xmm7,[edi+ecx] |
|
|
|
|
|
add edi,edx |
|
|
|
|
|
psadbw xmm4,xmm0 |
|
|
psadbw xmm5,xmm1 |
|
|
psadbw xmm6,xmm2 |
|
|
psadbw xmm7,xmm3 |
|
|
|
|
|
paddusw xmm4,xmm5 |
|
|
paddusw xmm6,xmm7 |
|
|
|
|
|
paddusw xmm4,xmm6 |
|
|
pshufd xmm7,xmm4,11111110b |
|
|
paddusw xmm7,xmm4 |
|
|
|
|
|
pextrw %1,xmm7,0 |
|
|
%endmacro |
|
|
|
|
|
|
|
|
align 16 |
|
|
sad16_sse2_ul |
|
|
|
|
|
load_stride ebx |
|
|
|
|
|
push ebp |
|
|
|
|
|
sad16x4_ul eax |
|
|
|
|
|
%if early_return=1 |
|
|
cmp eax,[esp + 4*4 + 16] ;best_sad |
|
|
jg continue_ul |
|
|
%endif |
|
|
|
|
|
sad16x4_ul ebp |
|
|
add eax,ebp |
|
|
|
|
|
%if early_return=1 |
|
|
cmp eax,[esp + 4*4 + 16] ;best_sad |
|
|
jg continue_ul |
|
|
%endif |
|
|
|
|
|
sad16x4_ul ebp |
|
|
add eax,ebp |
|
|
|
|
|
%if early_return=1 |
|
|
cmp eax,[esp + 4*4 + 16] ;best_sad |
|
|
jg continue_ul |
|
|
%endif |
|
|
|
|
|
sad16x4_ul ebp |
|
|
add eax,ebp |
|
|
|
|
|
continue_ul: restore 1 |
|
|
|
|
|
ret |
|
|
|
|
|
;=========================================================================== |
|
|
; |
|
|
; uint32_t dev16_sse2(const uint8_t * const cur, |
|
|
; const uint32_t stride); |
|
|
; |
|
|
; experimental! |
|
|
; |
|
|
;=========================================================================== |
|
|
|
|
|
align 16 |
|
|
dev16_sse2 |
|
|
|
|
|
push ebx |
|
|
push esi |
|
|
push edi |
|
|
push ebp |
|
|
|
|
|
mov esi, [esp + 4*4 + 4] ; cur |
|
|
mov ebx, [esp + 4*4 + 8] ; stride |
|
|
mov edi, buffer |
|
|
|
|
|
%if dev_debug=1 |
|
|
jmp dev16_sse2_ul |
|
|
%endif |
|
|
|
|
|
%if dev_debug=2 |
|
|
jmp dev16_sse2_al |
|
|
%endif |
|
|
|
|
|
test esi,15 |
|
|
jnz near dev16_sse2_ul |
|
|
|
|
|
%if test_stride_alignment=1 |
|
|
test ebx,15 |
|
|
jnz dev16_sse2_ul |
|
|
%endif |
|
|
|
|
|
mov edi,esi |
|
|
jmp dev16_sse2_al |
|
|
|
|
|
;=========================================================================== |
|
|
; Branch requires alignment of both the cur and stride |
|
|
;=========================================================================== |
|
|
|
|
|
%macro make_mean 0 |
|
|
add eax,ebp ;mean 16-bit |
|
|
mov al,ah ;eax= {0 0 mean/256 mean/256} |
|
|
mov ebp,eax |
|
|
shl ebp,16 |
|
|
or eax,ebp |
|
112 |
%endmacro |
%endmacro |
113 |
|
|
|
%macro sad_mean16x8_al 3 ;destination,0=zero,1=mean from eax,source |
|
|
|
|
|
%if %2=0 |
|
|
pxor xmm0,xmm0 |
|
|
%else |
|
|
movd xmm0,eax |
|
|
pshufd xmm0,xmm0,0 |
|
|
%endif |
|
|
movdqa xmm1,xmm0 |
|
|
movdqa xmm2,xmm0 |
|
|
movdqa xmm3,xmm0 |
|
|
movdqa xmm4,xmm0 |
|
|
movdqa xmm5,xmm0 |
|
|
movdqa xmm6,xmm0 |
|
|
movdqa xmm7,xmm0 |
|
|
|
|
|
sad8lines %3 |
|
|
|
|
|
after_sad %1 |
|
|
|
|
|
%endmacro |
|
114 |
|
|
115 |
align 16 |
align 16 |
116 |
dev16_sse2_al |
dev16_sse2: |
117 |
|
mov eax, [esp+ 4] ; src |
118 |
load_stride ebx |
mov ecx, [esp+ 8] ; stride |
|
|
|
|
sad_mean16x8_al eax,0,esi |
|
|
sad_mean16x8_al ebp,0,esi |
|
|
|
|
|
make_mean |
|
119 |
|
|
120 |
sad_mean16x8_al ebp,1,edi |
pxor xmm6, xmm6 ; accum |
121 |
sad_mean16x8_al eax,1,edi |
pxor xmm7, xmm7 ; zero |
|
|
|
|
add eax,ebp |
|
|
|
|
|
restore 1 |
|
|
|
|
|
ret |
|
122 |
|
|
123 |
;=========================================================================== |
MEAN_16x16_SSE2 |
124 |
; Branch does not require alignment |
MEAN_16x16_SSE2 |
125 |
;=========================================================================== |
MEAN_16x16_SSE2 |
126 |
|
MEAN_16x16_SSE2 |
|
%macro sad_mean16x8_ul 2 |
|
|
|
|
|
pxor xmm7,xmm7 |
|
|
|
|
|
movdqu xmm0,[%1] |
|
|
movdqu xmm1,[%1+ebx] |
|
|
movdqu xmm2,[%1+ebx*2] |
|
|
movdqu xmm3,[%1+ecx] |
|
|
|
|
|
add %1,edx |
|
|
|
|
|
movdqa [buffer+16*0],xmm0 |
|
|
movdqa [buffer+16*1],xmm1 |
|
|
movdqa [buffer+16*2],xmm2 |
|
|
movdqa [buffer+16*3],xmm3 |
|
|
|
|
|
movdqu xmm4,[%1] |
|
|
movdqu xmm5,[%1+ebx] |
|
|
movdqu xmm6,[%1+ebx*2] |
|
|
movdqa [buffer+16*4],xmm4 |
|
|
movdqa [buffer+16*5],xmm5 |
|
|
movdqa [buffer+16*6],xmm6 |
|
|
|
|
|
psadbw xmm0,xmm7 |
|
|
psadbw xmm1,xmm7 |
|
|
psadbw xmm2,xmm7 |
|
|
psadbw xmm3,xmm7 |
|
|
psadbw xmm4,xmm7 |
|
|
psadbw xmm5,xmm7 |
|
|
psadbw xmm6,xmm7 |
|
|
|
|
|
movdqu xmm7,[%1+ecx] |
|
|
movdqa [buffer+16*7],xmm7 |
|
|
psadbw xmm7,[zero] |
|
|
|
|
|
add %1,edx |
|
|
|
|
|
after_sad %2 |
|
|
%endmacro |
|
|
|
|
|
align 16 |
|
|
dev16_sse2_ul |
|
127 |
|
|
128 |
load_stride ebx |
MEAN_16x16_SSE2 |
129 |
|
MEAN_16x16_SSE2 |
130 |
|
MEAN_16x16_SSE2 |
131 |
|
MEAN_16x16_SSE2 |
132 |
|
|
133 |
sad_mean16x8_ul esi,eax |
mov eax, [esp+ 4] ; src again |
|
sad_mean16x8_ul esi,ebp |
|
134 |
|
|
135 |
make_mean |
pshufd xmm7, xmm6, 0010b |
136 |
|
paddusw xmm7, xmm6 |
137 |
|
pxor xmm6, xmm6 ; zero accum |
138 |
|
psrlw xmm7, 8 ; => Mean |
139 |
|
pshuflw xmm7, xmm7, 0 ; replicate Mean |
140 |
|
packuswb xmm7,xmm7 |
141 |
|
|
142 |
sad_mean16x8_al ebp,1,edi |
MEAN_16x16_SSE2 |
143 |
sad_mean16x8_al eax,1,edi |
MEAN_16x16_SSE2 |
144 |
|
MEAN_16x16_SSE2 |
145 |
|
MEAN_16x16_SSE2 |
146 |
|
|
147 |
add eax,ebp |
MEAN_16x16_SSE2 |
148 |
|
MEAN_16x16_SSE2 |
149 |
|
MEAN_16x16_SSE2 |
150 |
|
MEAN_16x16_SSE2 |
151 |
|
|
152 |
restore 1 |
pshufd xmm5, xmm6, 0010b |
153 |
|
paddusw xmm6, xmm5 |
154 |
|
pextrw eax, xmm6, 0 |
155 |
|
|
156 |
ret |
ret |