1 |
;/************************************************************************** |
;/**************************************************************************** |
2 |
; * |
; * |
3 |
; * XVID MPEG-4 VIDEO CODEC |
; * XVID MPEG-4 VIDEO CODEC |
4 |
; * xmm sum of absolute difference |
; * - K7 optimized SAD operators - |
5 |
; * |
; * |
6 |
; * This program is an implementation of a part of one or more MPEG-4 |
; * Copyright(C) 2002 Jaan Kalda |
|
; * Video tools as specified in ISO/IEC 14496-2 standard. Those intending |
|
|
; * to use this software module in hardware or software products are |
|
|
; * advised that its use may infringe existing patents or copyrights, and |
|
|
; * any such use would be at such party's own risk. The original |
|
|
; * developer of this software module and his/her company, and subsequent |
|
|
; * editors and their companies, will have no liability for use of this |
|
|
; * software or modifications or derivatives thereof. |
|
7 |
; * |
; * |
8 |
; * This program is free software; you can redistribute it and/or modify |
; * This program is free software; you can redistribute it and/or modify it |
9 |
; * it under the terms of the GNU General Public License as published by |
; * under the terms of the GNU General Public License as published by |
10 |
; * the Free Software Foundation; either version 2 of the License, or |
; * the Free Software Foundation; either version 2 of the License, or |
11 |
; * (at your option) any later version. |
; * (at your option) any later version. |
12 |
; * |
; * |
17 |
; * |
; * |
18 |
; * You should have received a copy of the GNU General Public License |
; * You should have received a copy of the GNU General Public License |
19 |
; * along with this program; if not, write to the Free Software |
; * along with this program; if not, write to the Free Software |
20 |
; * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
; * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
21 |
; * |
; * |
22 |
; *************************************************************************/ |
; * $Id$ |
23 |
; |
; * |
24 |
; these 3dne functions are compatible with iSSE, but are optimized specifically for |
; ***************************************************************************/ |
|
; K7 pipelines |
|
|
; |
|
|
;------------------------------------------------------------------------------ |
|
|
; 09.12.2002 Athlon optimizations contributed by Jaan Kalda |
|
|
;------------------------------------------------------------------------------ |
|
25 |
|
|
26 |
bits 32 |
; these 3dne functions are compatible with iSSE, but are optimized specifically |
27 |
|
; for K7 pipelines |
28 |
|
|
29 |
|
BITS 32 |
30 |
|
|
31 |
%macro cglobal 1 |
%macro cglobal 1 |
32 |
%ifdef PREFIX |
%ifdef PREFIX |
37 |
%endif |
%endif |
38 |
%endmacro |
%endmacro |
39 |
|
|
40 |
|
;============================================================================= |
41 |
|
; Read only data |
42 |
|
;============================================================================= |
43 |
|
|
44 |
%ifdef FORMAT_COFF |
%ifdef FORMAT_COFF |
45 |
section .data data |
SECTION .rodata data |
46 |
%else |
%else |
47 |
section .data data align=16 |
SECTION .rodata data align=16 |
48 |
%endif |
%endif |
49 |
|
|
50 |
align 16 |
ALIGN 16 |
51 |
mmx_one times 4 dw 1 |
mmx_one: |
52 |
|
times 4 dw 1 |
53 |
|
|
54 |
|
;============================================================================= |
55 |
|
; Helper macros |
56 |
|
;============================================================================= |
57 |
|
|
58 |
section .text |
;; %1 block number (0..4) |
|
|
|
|
cglobal sad16_3dne |
|
|
cglobal sad8_3dne |
|
|
cglobal sad16bi_3dne |
|
|
cglobal sad8bi_3dne |
|
|
cglobal dev16_3dne |
|
|
|
|
|
;=========================================================================== |
|
|
; |
|
|
; uint32_t sad16_3dne(const uint8_t * const cur, |
|
|
; const uint8_t * const ref, |
|
|
; const uint32_t stride, |
|
|
; const uint32_t best_sad); |
|
|
; |
|
|
;=========================================================================== |
|
|
; optimization: 21% faster |
|
59 |
%macro SAD_16x16_SSE 1 |
%macro SAD_16x16_SSE 1 |
60 |
movq mm7, [eax] |
movq mm7, [eax] |
61 |
movq mm6, [eax+8] |
movq mm6, [eax+8] |
91 |
paddd mm1,mm3 |
paddd mm1,mm3 |
92 |
%endmacro |
%endmacro |
93 |
|
|
94 |
align 16 |
%macro SADBI_16x16_SSE0 0 |
95 |
sad16_3dne: |
movq mm2, [edx] |
96 |
|
movq mm3, [edx+8] |
97 |
|
|
98 |
|
movq mm5, [byte eax] |
99 |
|
movq mm6, [eax+8] |
100 |
|
pavgb mm2, [byte ebx] |
101 |
|
pavgb mm3, [ebx+8] |
102 |
|
|
103 |
|
add edx, ecx |
104 |
|
psadbw mm5, mm2 |
105 |
|
psadbw mm6, mm3 |
106 |
|
|
107 |
|
add eax, ecx |
108 |
|
add ebx, ecx |
109 |
|
movq mm2, [byte edx] |
110 |
|
|
111 |
|
movq mm3, [edx+8] |
112 |
|
movq mm0, [byte eax] |
113 |
|
|
114 |
|
movq mm1, [eax+8] |
115 |
|
pavgb mm2, [byte ebx] |
116 |
|
|
117 |
|
pavgb mm3, [ebx+8] |
118 |
|
add edx, ecx |
119 |
|
add eax, ecx |
120 |
|
|
121 |
|
add ebx, ecx |
122 |
|
psadbw mm0, mm2 |
123 |
|
psadbw mm1, mm3 |
124 |
|
|
125 |
|
%endmacro |
126 |
|
|
127 |
|
%macro SADBI_16x16_SSE 0 |
128 |
|
movq mm2, [byte edx] |
129 |
|
movq mm3, [edx+8] |
130 |
|
paddusw mm5, mm0 |
131 |
|
paddusw mm6, mm1 |
132 |
|
movq mm0, [eax] |
133 |
|
movq mm1, [eax+8] |
134 |
|
pavgb mm2, [ebx] |
135 |
|
pavgb mm3, [ebx+8] |
136 |
|
add edx, ecx |
137 |
|
add eax, ecx |
138 |
|
add ebx, ecx |
139 |
|
psadbw mm0, mm2 |
140 |
|
psadbw mm1, mm3 |
141 |
|
%endmacro |
142 |
|
|
143 |
|
%macro SADBI_8x8_3dne 0 |
144 |
|
movq mm2, [edx] |
145 |
|
movq mm3, [edx+ecx] |
146 |
|
pavgb mm2, [eax] |
147 |
|
pavgb mm3, [eax+ecx] |
148 |
|
lea edx, [edx+2*ecx] |
149 |
|
lea eax, [eax+2*ecx] |
150 |
|
paddusw mm5, mm0 |
151 |
|
paddusw mm6, mm1 |
152 |
|
movq mm0, [ebx] |
153 |
|
movq mm1, [ebx+ecx] |
154 |
|
lea ebx, [ebx+2*ecx] |
155 |
|
psadbw mm0, mm2 |
156 |
|
psadbw mm1, mm3 |
157 |
|
%endmacro |
158 |
|
|
159 |
|
%macro ABS_16x16_SSE 1 |
160 |
|
%if (%1 == 0) |
161 |
|
movq mm7, [eax] |
162 |
|
psadbw mm7, mm4 |
163 |
|
mov esi, esi |
164 |
|
movq mm6, [eax+8] |
165 |
|
movq mm5, [eax+ecx] |
166 |
|
movq mm3, [eax+ecx+8] |
167 |
|
psadbw mm6, mm4 |
168 |
|
|
169 |
|
movq mm2, [byte eax+2*ecx] |
170 |
|
psadbw mm5, mm4 |
171 |
|
movq mm1, [eax+2*ecx+8] |
172 |
|
psadbw mm3, mm4 |
173 |
|
|
174 |
|
movq mm0, [dword eax+edx] |
175 |
|
psadbw mm2, mm4 |
176 |
|
add eax, edx |
177 |
|
psadbw mm1, mm4 |
178 |
|
%endif |
179 |
|
%if (%1 == 1) |
180 |
|
psadbw mm0, mm4 |
181 |
|
paddd mm7, mm0 |
182 |
|
movq mm0, [eax+8] |
183 |
|
psadbw mm0, mm4 |
184 |
|
paddd mm6, mm0 |
185 |
|
|
186 |
|
movq mm0, [byte eax+ecx] |
187 |
|
psadbw mm0, mm4 |
188 |
|
|
189 |
|
paddd mm5, mm0 |
190 |
|
movq mm0, [eax+ecx+8] |
191 |
|
|
192 |
|
psadbw mm0, mm4 |
193 |
|
paddd mm3, mm0 |
194 |
|
movq mm0, [eax+2*ecx] |
195 |
|
psadbw mm0, mm4 |
196 |
|
paddd mm2, mm0 |
197 |
|
|
198 |
|
movq mm0, [eax+2*ecx+8] |
199 |
|
add eax, edx |
200 |
|
psadbw mm0, mm4 |
201 |
|
paddd mm1, mm0 |
202 |
|
movq mm0, [eax] |
203 |
|
%endif |
204 |
|
%if (%1 == 2) |
205 |
|
psadbw mm0, mm4 |
206 |
|
paddd mm7, mm0 |
207 |
|
movq mm0, [eax+8] |
208 |
|
psadbw mm0, mm4 |
209 |
|
paddd mm6, mm0 |
210 |
|
%endif |
211 |
|
%endmacro |
212 |
|
|
213 |
|
;============================================================================= |
214 |
|
; Code |
215 |
|
;============================================================================= |
216 |
|
|
217 |
|
SECTION .text |
218 |
|
|
219 |
|
cglobal sad16_3dne |
220 |
|
cglobal sad8_3dne |
221 |
|
cglobal sad16bi_3dne |
222 |
|
cglobal sad8bi_3dne |
223 |
|
cglobal dev16_3dne |
224 |
|
|
225 |
|
;----------------------------------------------------------------------------- |
226 |
|
; |
227 |
|
; uint32_t sad16_3dne(const uint8_t * const cur, |
228 |
|
; const uint8_t * const ref, |
229 |
|
; const uint32_t stride, |
230 |
|
; const uint32_t best_sad); |
231 |
|
; |
232 |
|
;----------------------------------------------------------------------------- |
233 |
|
|
234 |
|
; optimization: 21% faster |
235 |
|
|
236 |
|
ALIGN 16 |
237 |
|
sad16_3dne: |
238 |
mov eax, [esp+ 4] ; Src1 |
mov eax, [esp+ 4] ; Src1 |
239 |
mov edx, [esp+ 8] ; Src2 |
mov edx, [esp+ 8] ; Src2 |
240 |
mov ecx, [esp+12] ; Stride |
mov ecx, [esp+12] ; Stride |
241 |
push ebx |
push ebx |
242 |
lea ebx,[2*ecx+ecx] |
lea ebx,[2*ecx+ecx] |
243 |
|
|
244 |
SAD_16x16_SSE 0 |
SAD_16x16_SSE 0 |
245 |
SAD_16x16_SSE 1 |
SAD_16x16_SSE 1 |
246 |
SAD_16x16_SSE 2 |
SAD_16x16_SSE 2 |
247 |
SAD_16x16_SSE 3 |
SAD_16x16_SSE 3 |
248 |
|
|
249 |
mov ecx,[esp] |
mov ecx,[esp] |
250 |
add ecx,[esp+4] |
add ecx,[esp+4] |
251 |
add ecx,[esp+8] |
add ecx,[esp+8] |
254 |
add esp,byte 4+12 |
add esp,byte 4+12 |
255 |
movd eax, mm1 |
movd eax, mm1 |
256 |
add eax,ecx |
add eax,ecx |
257 |
|
|
258 |
ret |
ret |
259 |
|
|
260 |
|
|
261 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
262 |
; |
; |
263 |
; uint32_t sad8_3dne(const uint8_t * const cur, |
; uint32_t sad8_3dne(const uint8_t * const cur, |
264 |
; const uint8_t * const ref, |
; const uint8_t * const ref, |
265 |
; const uint32_t stride); |
; const uint32_t stride); |
266 |
; |
; |
267 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
268 |
align 16 |
|
269 |
|
ALIGN 16 |
270 |
sad8_3dne: |
sad8_3dne: |
271 |
|
|
272 |
mov eax, [esp+ 4] ; Src1 |
mov eax, [esp+ 4] ; Src1 |
312 |
ret |
ret |
313 |
|
|
314 |
|
|
315 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
316 |
; |
; |
317 |
; uint32_t sad16bi_3dne(const uint8_t * const cur, |
; uint32_t sad16bi_3dne(const uint8_t * const cur, |
318 |
; const uint8_t * const ref1, |
; const uint8_t * const ref1, |
319 |
; const uint8_t * const ref2, |
; const uint8_t * const ref2, |
320 |
; const uint32_t stride); |
; const uint32_t stride); |
321 |
; |
; |
322 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
323 |
;optimization: 14% faster |
;optimization: 14% faster |
|
%macro SADBI_16x16_SSE0 0 |
|
|
movq mm2, [edx] |
|
|
movq mm3, [edx+8] |
|
|
|
|
|
movq mm5, [byte eax] |
|
|
movq mm6, [eax+8] |
|
|
pavgb mm2, [byte ebx] |
|
|
pavgb mm3, [ebx+8] |
|
|
|
|
|
add edx, ecx |
|
|
psadbw mm5, mm2 |
|
|
psadbw mm6, mm3 |
|
|
|
|
|
add eax, ecx |
|
|
add ebx, ecx |
|
|
movq mm2, [byte edx] |
|
|
|
|
|
movq mm3, [edx+8] |
|
|
movq mm0, [byte eax] |
|
|
|
|
|
movq mm1, [eax+8] |
|
|
pavgb mm2, [byte ebx] |
|
|
|
|
|
pavgb mm3, [ebx+8] |
|
|
add edx, ecx |
|
|
add eax, ecx |
|
|
|
|
|
add ebx, ecx |
|
|
psadbw mm0, mm2 |
|
|
psadbw mm1, mm3 |
|
324 |
|
|
325 |
%endmacro |
ALIGN 16 |
|
%macro SADBI_16x16_SSE 0 |
|
|
movq mm2, [byte edx] |
|
|
movq mm3, [edx+8] |
|
|
paddusw mm5,mm0 |
|
|
paddusw mm6,mm1 |
|
|
movq mm0, [eax] |
|
|
movq mm1, [eax+8] |
|
|
pavgb mm2, [ebx] |
|
|
pavgb mm3, [ebx+8] |
|
|
add edx, ecx |
|
|
add eax, ecx |
|
|
add ebx, ecx |
|
|
psadbw mm0, mm2 |
|
|
psadbw mm1, mm3 |
|
|
%endmacro |
|
|
|
|
|
align 16 |
|
326 |
sad16bi_3dne: |
sad16bi_3dne: |
327 |
mov eax, [esp+ 4] ; Src |
mov eax, [esp+ 4] ; Src |
328 |
mov edx, [esp+ 8] ; Ref1 |
mov edx, [esp+ 8] ; Ref1 |
352 |
pop ebx |
pop ebx |
353 |
paddusw mm6,mm5 |
paddusw mm6,mm5 |
354 |
movd eax, mm6 |
movd eax, mm6 |
355 |
|
|
356 |
ret |
ret |
357 |
;=========================================================================== |
|
358 |
|
;----------------------------------------------------------------------------- |
359 |
; |
; |
360 |
; uint32_t sad8bi_3dne(const uint8_t * const cur, |
; uint32_t sad8bi_3dne(const uint8_t * const cur, |
361 |
; const uint8_t * const ref1, |
; const uint8_t * const ref1, |
362 |
; const uint8_t * const ref2, |
; const uint8_t * const ref2, |
363 |
; const uint32_t stride); |
; const uint32_t stride); |
364 |
; |
; |
365 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
|
|
|
|
%macro SADBI_8x8_3dne 0 |
|
|
movq mm2, [edx] |
|
|
movq mm3, [edx+ecx] |
|
|
pavgb mm2, [eax] |
|
|
pavgb mm3, [eax+ecx] |
|
|
lea edx, [edx+2*ecx] |
|
|
lea eax, [eax+2*ecx] |
|
|
paddusw mm5,mm0 |
|
|
paddusw mm6,mm1 |
|
|
movq mm0, [ebx] |
|
|
movq mm1, [ebx+ecx] |
|
|
lea ebx, [ebx+2*ecx] |
|
|
psadbw mm0, mm2 |
|
|
psadbw mm1, mm3 |
|
|
%endmacro |
|
366 |
|
|
367 |
align 16 |
ALIGN 16 |
368 |
sad8bi_3dne: |
sad8bi_3dne: |
369 |
mov eax, [esp+12] ; Ref2 |
mov eax, [esp+12] ; Ref2 |
370 |
mov edx, [esp+ 8] ; Ref1 |
mov edx, [esp+ 8] ; Ref1 |
427 |
mov ebx,[esp] |
mov ebx,[esp] |
428 |
add esp,byte 4 |
add esp,byte 4 |
429 |
movd eax, mm6 |
movd eax, mm6 |
430 |
|
|
431 |
ret |
ret |
432 |
|
|
433 |
|
|
438 |
; |
; |
439 |
;=========================================================================== |
;=========================================================================== |
440 |
; optimization: 25 % faster |
; optimization: 25 % faster |
|
%macro ABS_16x16_SSE 1 |
|
|
%if (%1 == 0) |
|
|
movq mm7, [eax] |
|
|
psadbw mm7, mm4 |
|
|
mov esi,esi |
|
|
movq mm6, [eax+8] |
|
|
movq mm5, [eax+ecx] |
|
|
movq mm3, [eax+ecx+8] |
|
|
psadbw mm6, mm4 |
|
|
|
|
|
movq mm2, [byte eax+2*ecx] |
|
|
psadbw mm5, mm4 |
|
|
movq mm1, [eax+2*ecx+8] |
|
|
psadbw mm3, mm4 |
|
|
|
|
|
movq mm0, [dword eax+edx] |
|
|
psadbw mm2, mm4 |
|
|
add eax,edx |
|
|
psadbw mm1, mm4 |
|
|
%endif |
|
|
%if (%1 == 1) |
|
|
psadbw mm0, mm4 |
|
|
paddd mm7, mm0 |
|
|
movq mm0, [eax+8] |
|
|
psadbw mm0, mm4 |
|
|
paddd mm6, mm0 |
|
441 |
|
|
442 |
movq mm0, [byte eax+ecx] |
ALIGN 16 |
|
psadbw mm0, mm4 |
|
|
|
|
|
paddd mm5, mm0 |
|
|
movq mm0, [eax+ecx+8] |
|
|
|
|
|
psadbw mm0, mm4 |
|
|
paddd mm3, mm0 |
|
|
movq mm0, [eax+2*ecx] |
|
|
psadbw mm0, mm4 |
|
|
paddd mm2, mm0 |
|
|
|
|
|
movq mm0, [eax+2*ecx+8] |
|
|
add eax,edx |
|
|
psadbw mm0, mm4 |
|
|
paddd mm1, mm0 |
|
|
movq mm0, [eax] |
|
|
%endif |
|
|
%if (%1 == 2) |
|
|
psadbw mm0, mm4 |
|
|
paddd mm7, mm0 |
|
|
movq mm0, [eax+8] |
|
|
psadbw mm0, mm4 |
|
|
paddd mm6, mm0 |
|
|
%endif |
|
|
%endmacro |
|
|
|
|
|
align 16 |
|
443 |
dev16_3dne: |
dev16_3dne: |
444 |
|
|
445 |
mov eax, [esp+ 4] ; Src |
mov eax, [esp+ 4] ; Src |
447 |
lea edx,[ecx+2*ecx] |
lea edx,[ecx+2*ecx] |
448 |
|
|
449 |
pxor mm4, mm4 |
pxor mm4, mm4 |
450 |
align 8 |
|
451 |
|
ALIGN 8 |
452 |
ABS_16x16_SSE 0 |
ABS_16x16_SSE 0 |
453 |
ABS_16x16_SSE 1 |
ABS_16x16_SSE 1 |
454 |
ABS_16x16_SSE 1 |
ABS_16x16_SSE 1 |
455 |
ABS_16x16_SSE 1 |
ABS_16x16_SSE 1 |
456 |
ABS_16x16_SSE 1 |
ABS_16x16_SSE 1 |
457 |
|
|
458 |
paddd mm1, mm2 |
paddd mm1, mm2 |
459 |
paddd mm3, mm5 |
paddd mm3, mm5 |
460 |
|
|
461 |
ABS_16x16_SSE 2 |
ABS_16x16_SSE 2 |
462 |
|
|
463 |
paddd mm7, mm6 |
paddd mm7, mm6 |
464 |
paddd mm1, mm3 |
paddd mm1, mm3 |
465 |
mov eax, [esp+ 4] ; Src |
mov eax, [esp+ 4] ; Src |
466 |
paddd mm7,mm1 |
paddd mm7,mm1 |
467 |
punpcklbw mm7,mm7 ;xxyyaazz |
punpcklbw mm7,mm7 ;xxyyaazz |
468 |
pshufw mm4,mm7,055h |
pshufw mm4, mm7, 055h ; mm4 contains the mean |
469 |
; mm4 contains the mean |
|
470 |
|
|
471 |
pxor mm1, mm1 |
pxor mm1, mm1 |
472 |
|
|
473 |
ABS_16x16_SSE 0 |
ABS_16x16_SSE 0 |
475 |
ABS_16x16_SSE 1 |
ABS_16x16_SSE 1 |
476 |
ABS_16x16_SSE 1 |
ABS_16x16_SSE 1 |
477 |
ABS_16x16_SSE 1 |
ABS_16x16_SSE 1 |
478 |
|
|
479 |
paddd mm1, mm2 |
paddd mm1, mm2 |
480 |
paddd mm3, mm5 |
paddd mm3, mm5 |
481 |
|
|
482 |
ABS_16x16_SSE 2 |
ABS_16x16_SSE 2 |
483 |
|
|
484 |
paddd mm7, mm6 |
paddd mm7, mm6 |
485 |
paddd mm1, mm3 |
paddd mm1, mm3 |
486 |
paddd mm7,mm1 |
paddd mm7,mm1 |
487 |
movd eax, mm7 |
movd eax, mm7 |
488 |
|
|
489 |
ret |
ret |