1 |
;/************************************************************************** |
;/**************************************************************************** |
2 |
; * |
; * |
3 |
; * XVID MPEG-4 VIDEO CODEC |
; * XVID MPEG-4 VIDEO CODEC |
4 |
; * xmm sum of absolute difference |
; * - K7 optimized SAD operators - |
5 |
; * |
; * |
6 |
; * This program is an implementation of a part of one or more MPEG-4 |
; * Copyright(C) 2002 Jaan Kalda |
|
; * Video tools as specified in ISO/IEC 14496-2 standard. Those intending |
|
|
; * to use this software module in hardware or software products are |
|
|
; * advised that its use may infringe existing patents or copyrights, and |
|
|
; * any such use would be at such party's own risk. The original |
|
|
; * developer of this software module and his/her company, and subsequent |
|
|
; * editors and their companies, will have no liability for use of this |
|
|
; * software or modifications or derivatives thereof. |
|
7 |
; * |
; * |
8 |
; * This program is free software; you can redistribute it and/or modify |
; * This program is free software; you can redistribute it and/or modify it |
9 |
; * it under the terms of the GNU General Public License as published by |
; * under the terms of the GNU General Public License as published by |
10 |
; * the Free Software Foundation; either version 2 of the License, or |
; * the Free Software Foundation; either version 2 of the License, or |
11 |
; * (at your option) any later version. |
; * (at your option) any later version. |
12 |
; * |
; * |
17 |
; * |
; * |
18 |
; * You should have received a copy of the GNU General Public License |
; * You should have received a copy of the GNU General Public License |
19 |
; * along with this program; if not, write to the Free Software |
; * along with this program; if not, write to the Free Software |
20 |
; * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
; * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
21 |
; * |
; * |
22 |
; *************************************************************************/ |
; * $Id$ |
23 |
; |
; * |
24 |
; these 3dne functions are compatible with iSSE, but are optimized specifically for |
; ***************************************************************************/ |
|
; K7 pipelines |
|
|
; |
|
|
;------------------------------------------------------------------------------ |
|
|
; 09.12.2002 Athlon optimizations contributed by Jaan Kalda |
|
|
;------------------------------------------------------------------------------ |
|
25 |
|
|
26 |
bits 32 |
; these 3dne functions are compatible with iSSE, but are optimized specifically |
27 |
|
; for K7 pipelines |
28 |
|
|
29 |
|
BITS 32 |
30 |
|
|
31 |
%macro cglobal 1 |
%macro cglobal 1 |
32 |
%ifdef PREFIX |
%ifdef PREFIX |
37 |
%endif |
%endif |
38 |
%endmacro |
%endmacro |
39 |
|
|
40 |
%ifdef FORMAT_COFF |
;============================================================================= |
41 |
section .data data |
; Read only data |
42 |
%else |
;============================================================================= |
|
section .data data align=16 |
|
|
%endif |
|
43 |
|
|
44 |
align 16 |
SECTION .rodata |
|
mmx_one times 4 dw 1 |
|
45 |
|
|
46 |
section .text |
ALIGN 16 |
47 |
|
mmx_one: |
48 |
|
times 4 dw 1 |
49 |
|
|
50 |
cglobal sad16_3dne |
;============================================================================= |
51 |
cglobal sad8_3dne |
; Helper macros |
52 |
cglobal sad16bi_3dne |
;============================================================================= |
|
cglobal sad8bi_3dne |
|
|
cglobal dev16_3dne |
|
53 |
|
|
54 |
;=========================================================================== |
;; %1 block number (0..4) |
|
; |
|
|
; uint32_t sad16_3dne(const uint8_t * const cur, |
|
|
; const uint8_t * const ref, |
|
|
; const uint32_t stride, |
|
|
; const uint32_t best_sad); |
|
|
; |
|
|
;=========================================================================== |
|
|
; optimization: 21% faster |
|
55 |
%macro SAD_16x16_SSE 1 |
%macro SAD_16x16_SSE 1 |
56 |
movq mm7, [eax] |
movq mm7, [eax] |
57 |
movq mm6, [eax+8] |
movq mm6, [eax+8] |
87 |
paddd mm1,mm3 |
paddd mm1,mm3 |
88 |
%endmacro |
%endmacro |
89 |
|
|
90 |
align 16 |
%macro SADBI_16x16_SSE0 0 |
91 |
sad16_3dne: |
movq mm2, [edx] |
92 |
|
movq mm3, [edx+8] |
93 |
|
|
94 |
|
movq mm5, [byte eax] |
95 |
|
movq mm6, [eax+8] |
96 |
|
pavgb mm2, [byte ebx] |
97 |
|
pavgb mm3, [ebx+8] |
98 |
|
|
99 |
|
add edx, ecx |
100 |
|
psadbw mm5, mm2 |
101 |
|
psadbw mm6, mm3 |
102 |
|
|
103 |
|
add eax, ecx |
104 |
|
add ebx, ecx |
105 |
|
movq mm2, [byte edx] |
106 |
|
|
107 |
|
movq mm3, [edx+8] |
108 |
|
movq mm0, [byte eax] |
109 |
|
|
110 |
|
movq mm1, [eax+8] |
111 |
|
pavgb mm2, [byte ebx] |
112 |
|
|
113 |
|
pavgb mm3, [ebx+8] |
114 |
|
add edx, ecx |
115 |
|
add eax, ecx |
116 |
|
|
117 |
|
add ebx, ecx |
118 |
|
psadbw mm0, mm2 |
119 |
|
psadbw mm1, mm3 |
120 |
|
|
121 |
|
%endmacro |
122 |
|
|
123 |
|
%macro SADBI_16x16_SSE 0 |
124 |
|
movq mm2, [byte edx] |
125 |
|
movq mm3, [edx+8] |
126 |
|
paddusw mm5, mm0 |
127 |
|
paddusw mm6, mm1 |
128 |
|
movq mm0, [eax] |
129 |
|
movq mm1, [eax+8] |
130 |
|
pavgb mm2, [ebx] |
131 |
|
pavgb mm3, [ebx+8] |
132 |
|
add edx, ecx |
133 |
|
add eax, ecx |
134 |
|
add ebx, ecx |
135 |
|
psadbw mm0, mm2 |
136 |
|
psadbw mm1, mm3 |
137 |
|
%endmacro |
138 |
|
|
139 |
|
%macro SADBI_8x8_3dne 0 |
140 |
|
movq mm2, [edx] |
141 |
|
movq mm3, [edx+ecx] |
142 |
|
pavgb mm2, [eax] |
143 |
|
pavgb mm3, [eax+ecx] |
144 |
|
lea edx, [edx+2*ecx] |
145 |
|
lea eax, [eax+2*ecx] |
146 |
|
paddusw mm5, mm0 |
147 |
|
paddusw mm6, mm1 |
148 |
|
movq mm0, [ebx] |
149 |
|
movq mm1, [ebx+ecx] |
150 |
|
lea ebx, [ebx+2*ecx] |
151 |
|
psadbw mm0, mm2 |
152 |
|
psadbw mm1, mm3 |
153 |
|
%endmacro |
154 |
|
|
155 |
|
%macro ABS_16x16_SSE 1 |
156 |
|
%if (%1 == 0) |
157 |
|
movq mm7, [eax] |
158 |
|
psadbw mm7, mm4 |
159 |
|
mov esi, esi |
160 |
|
movq mm6, [eax+8] |
161 |
|
movq mm5, [eax+ecx] |
162 |
|
movq mm3, [eax+ecx+8] |
163 |
|
psadbw mm6, mm4 |
164 |
|
|
165 |
|
movq mm2, [byte eax+2*ecx] |
166 |
|
psadbw mm5, mm4 |
167 |
|
movq mm1, [eax+2*ecx+8] |
168 |
|
psadbw mm3, mm4 |
169 |
|
|
170 |
|
movq mm0, [dword eax+edx] |
171 |
|
psadbw mm2, mm4 |
172 |
|
add eax, edx |
173 |
|
psadbw mm1, mm4 |
174 |
|
%endif |
175 |
|
%if (%1 == 1) |
176 |
|
psadbw mm0, mm4 |
177 |
|
paddd mm7, mm0 |
178 |
|
movq mm0, [eax+8] |
179 |
|
psadbw mm0, mm4 |
180 |
|
paddd mm6, mm0 |
181 |
|
|
182 |
|
movq mm0, [byte eax+ecx] |
183 |
|
psadbw mm0, mm4 |
184 |
|
|
185 |
|
paddd mm5, mm0 |
186 |
|
movq mm0, [eax+ecx+8] |
187 |
|
|
188 |
|
psadbw mm0, mm4 |
189 |
|
paddd mm3, mm0 |
190 |
|
movq mm0, [eax+2*ecx] |
191 |
|
psadbw mm0, mm4 |
192 |
|
paddd mm2, mm0 |
193 |
|
|
194 |
|
movq mm0, [eax+2*ecx+8] |
195 |
|
add eax, edx |
196 |
|
psadbw mm0, mm4 |
197 |
|
paddd mm1, mm0 |
198 |
|
movq mm0, [eax] |
199 |
|
%endif |
200 |
|
%if (%1 == 2) |
201 |
|
psadbw mm0, mm4 |
202 |
|
paddd mm7, mm0 |
203 |
|
movq mm0, [eax+8] |
204 |
|
psadbw mm0, mm4 |
205 |
|
paddd mm6, mm0 |
206 |
|
%endif |
207 |
|
%endmacro |
208 |
|
|
209 |
|
;============================================================================= |
210 |
|
; Code |
211 |
|
;============================================================================= |
212 |
|
|
213 |
|
SECTION .text |
214 |
|
|
215 |
|
cglobal sad16_3dne |
216 |
|
cglobal sad8_3dne |
217 |
|
cglobal sad16bi_3dne |
218 |
|
cglobal sad8bi_3dne |
219 |
|
cglobal dev16_3dne |
220 |
|
|
221 |
|
;----------------------------------------------------------------------------- |
222 |
|
; |
223 |
|
; uint32_t sad16_3dne(const uint8_t * const cur, |
224 |
|
; const uint8_t * const ref, |
225 |
|
; const uint32_t stride, |
226 |
|
; const uint32_t best_sad); |
227 |
|
; |
228 |
|
;----------------------------------------------------------------------------- |
229 |
|
|
230 |
|
; optimization: 21% faster |
231 |
|
|
232 |
|
ALIGN 16 |
233 |
|
sad16_3dne: |
234 |
mov eax, [esp+ 4] ; Src1 |
mov eax, [esp+ 4] ; Src1 |
235 |
mov edx, [esp+ 8] ; Src2 |
mov edx, [esp+ 8] ; Src2 |
236 |
mov ecx, [esp+12] ; Stride |
mov ecx, [esp+12] ; Stride |
237 |
push ebx |
push ebx |
238 |
lea ebx,[2*ecx+ecx] |
lea ebx,[2*ecx+ecx] |
239 |
|
|
240 |
SAD_16x16_SSE 0 |
SAD_16x16_SSE 0 |
241 |
SAD_16x16_SSE 1 |
SAD_16x16_SSE 1 |
242 |
SAD_16x16_SSE 2 |
SAD_16x16_SSE 2 |
243 |
SAD_16x16_SSE 3 |
SAD_16x16_SSE 3 |
244 |
|
|
245 |
mov ecx,[esp] |
mov ecx,[esp] |
246 |
add ecx,[esp+4] |
add ecx,[esp+4] |
247 |
add ecx,[esp+8] |
add ecx,[esp+8] |
250 |
add esp,byte 4+12 |
add esp,byte 4+12 |
251 |
movd eax, mm1 |
movd eax, mm1 |
252 |
add eax,ecx |
add eax,ecx |
253 |
|
|
254 |
ret |
ret |
255 |
|
|
256 |
|
|
257 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
258 |
; |
; |
259 |
; uint32_t sad8_3dne(const uint8_t * const cur, |
; uint32_t sad8_3dne(const uint8_t * const cur, |
260 |
; const uint8_t * const ref, |
; const uint8_t * const ref, |
261 |
; const uint32_t stride); |
; const uint32_t stride); |
262 |
; |
; |
263 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
264 |
align 16 |
|
265 |
|
ALIGN 16 |
266 |
sad8_3dne: |
sad8_3dne: |
267 |
|
|
268 |
mov eax, [esp+ 4] ; Src1 |
mov eax, [esp+ 4] ; Src1 |
308 |
ret |
ret |
309 |
|
|
310 |
|
|
311 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
312 |
; |
; |
313 |
; uint32_t sad16bi_3dne(const uint8_t * const cur, |
; uint32_t sad16bi_3dne(const uint8_t * const cur, |
314 |
; const uint8_t * const ref1, |
; const uint8_t * const ref1, |
315 |
; const uint8_t * const ref2, |
; const uint8_t * const ref2, |
316 |
; const uint32_t stride); |
; const uint32_t stride); |
317 |
; |
; |
318 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
319 |
;optimization: 14% faster |
;optimization: 14% faster |
|
%macro SADBI_16x16_SSE0 0 |
|
|
movq mm2, [edx] |
|
|
movq mm3, [edx+8] |
|
|
|
|
|
movq mm5, [byte eax] |
|
|
movq mm6, [eax+8] |
|
|
pavgb mm2, [byte ebx] |
|
|
pavgb mm3, [ebx+8] |
|
|
|
|
|
add edx, ecx |
|
|
psadbw mm5, mm2 |
|
|
psadbw mm6, mm3 |
|
|
|
|
|
add eax, ecx |
|
|
add ebx, ecx |
|
|
movq mm2, [byte edx] |
|
|
|
|
|
movq mm3, [edx+8] |
|
|
movq mm0, [byte eax] |
|
|
|
|
|
movq mm1, [eax+8] |
|
|
pavgb mm2, [byte ebx] |
|
|
|
|
|
pavgb mm3, [ebx+8] |
|
|
add edx, ecx |
|
|
add eax, ecx |
|
|
|
|
|
add ebx, ecx |
|
|
psadbw mm0, mm2 |
|
|
psadbw mm1, mm3 |
|
320 |
|
|
321 |
%endmacro |
ALIGN 16 |
|
%macro SADBI_16x16_SSE 0 |
|
|
movq mm2, [byte edx] |
|
|
movq mm3, [edx+8] |
|
|
paddusw mm5,mm0 |
|
|
paddusw mm6,mm1 |
|
|
movq mm0, [eax] |
|
|
movq mm1, [eax+8] |
|
|
pavgb mm2, [ebx] |
|
|
pavgb mm3, [ebx+8] |
|
|
add edx, ecx |
|
|
add eax, ecx |
|
|
add ebx, ecx |
|
|
psadbw mm0, mm2 |
|
|
psadbw mm1, mm3 |
|
|
%endmacro |
|
|
|
|
|
align 16 |
|
322 |
sad16bi_3dne: |
sad16bi_3dne: |
323 |
mov eax, [esp+ 4] ; Src |
mov eax, [esp+ 4] ; Src |
324 |
mov edx, [esp+ 8] ; Ref1 |
mov edx, [esp+ 8] ; Ref1 |
348 |
pop ebx |
pop ebx |
349 |
paddusw mm6,mm5 |
paddusw mm6,mm5 |
350 |
movd eax, mm6 |
movd eax, mm6 |
351 |
|
|
352 |
ret |
ret |
353 |
;=========================================================================== |
|
354 |
|
;----------------------------------------------------------------------------- |
355 |
; |
; |
356 |
; uint32_t sad8bi_3dne(const uint8_t * const cur, |
; uint32_t sad8bi_3dne(const uint8_t * const cur, |
357 |
; const uint8_t * const ref1, |
; const uint8_t * const ref1, |
358 |
; const uint8_t * const ref2, |
; const uint8_t * const ref2, |
359 |
; const uint32_t stride); |
; const uint32_t stride); |
360 |
; |
; |
361 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
|
|
|
|
%macro SADBI_8x8_3dne 0 |
|
|
movq mm2, [edx] |
|
|
movq mm3, [edx+ecx] |
|
|
pavgb mm2, [eax] |
|
|
pavgb mm3, [eax+ecx] |
|
|
lea edx, [edx+2*ecx] |
|
|
lea eax, [eax+2*ecx] |
|
|
paddusw mm5,mm0 |
|
|
paddusw mm6,mm1 |
|
|
movq mm0, [ebx] |
|
|
movq mm1, [ebx+ecx] |
|
|
lea ebx, [ebx+2*ecx] |
|
|
psadbw mm0, mm2 |
|
|
psadbw mm1, mm3 |
|
|
%endmacro |
|
362 |
|
|
363 |
align 16 |
ALIGN 16 |
364 |
sad8bi_3dne: |
sad8bi_3dne: |
365 |
mov eax, [esp+12] ; Ref2 |
mov eax, [esp+12] ; Ref2 |
366 |
mov edx, [esp+ 8] ; Ref1 |
mov edx, [esp+ 8] ; Ref1 |
423 |
mov ebx,[esp] |
mov ebx,[esp] |
424 |
add esp,byte 4 |
add esp,byte 4 |
425 |
movd eax, mm6 |
movd eax, mm6 |
426 |
|
|
427 |
ret |
ret |
428 |
|
|
429 |
|
|
434 |
; |
; |
435 |
;=========================================================================== |
;=========================================================================== |
436 |
; optimization: 25 % faster |
; optimization: 25 % faster |
|
%macro ABS_16x16_SSE 1 |
|
|
%if (%1 == 0) |
|
|
movq mm7, [eax] |
|
|
psadbw mm7, mm4 |
|
|
mov esi,esi |
|
|
movq mm6, [eax+8] |
|
|
movq mm5, [eax+ecx] |
|
|
movq mm3, [eax+ecx+8] |
|
|
psadbw mm6, mm4 |
|
|
|
|
|
movq mm2, [byte eax+2*ecx] |
|
|
psadbw mm5, mm4 |
|
|
movq mm1, [eax+2*ecx+8] |
|
|
psadbw mm3, mm4 |
|
|
|
|
|
movq mm0, [dword eax+edx] |
|
|
psadbw mm2, mm4 |
|
|
add eax,edx |
|
|
psadbw mm1, mm4 |
|
|
%endif |
|
|
%if (%1 == 1) |
|
|
psadbw mm0, mm4 |
|
|
paddd mm7, mm0 |
|
|
movq mm0, [eax+8] |
|
|
psadbw mm0, mm4 |
|
|
paddd mm6, mm0 |
|
437 |
|
|
438 |
movq mm0, [byte eax+ecx] |
ALIGN 16 |
|
psadbw mm0, mm4 |
|
|
|
|
|
paddd mm5, mm0 |
|
|
movq mm0, [eax+ecx+8] |
|
|
|
|
|
psadbw mm0, mm4 |
|
|
paddd mm3, mm0 |
|
|
movq mm0, [eax+2*ecx] |
|
|
psadbw mm0, mm4 |
|
|
paddd mm2, mm0 |
|
|
|
|
|
movq mm0, [eax+2*ecx+8] |
|
|
add eax,edx |
|
|
psadbw mm0, mm4 |
|
|
paddd mm1, mm0 |
|
|
movq mm0, [eax] |
|
|
%endif |
|
|
%if (%1 == 2) |
|
|
psadbw mm0, mm4 |
|
|
paddd mm7, mm0 |
|
|
movq mm0, [eax+8] |
|
|
psadbw mm0, mm4 |
|
|
paddd mm6, mm0 |
|
|
%endif |
|
|
%endmacro |
|
|
|
|
|
align 16 |
|
439 |
dev16_3dne: |
dev16_3dne: |
440 |
|
|
441 |
mov eax, [esp+ 4] ; Src |
mov eax, [esp+ 4] ; Src |
443 |
lea edx,[ecx+2*ecx] |
lea edx,[ecx+2*ecx] |
444 |
|
|
445 |
pxor mm4, mm4 |
pxor mm4, mm4 |
446 |
align 8 |
|
447 |
|
ALIGN 8 |
448 |
ABS_16x16_SSE 0 |
ABS_16x16_SSE 0 |
449 |
ABS_16x16_SSE 1 |
ABS_16x16_SSE 1 |
450 |
ABS_16x16_SSE 1 |
ABS_16x16_SSE 1 |
451 |
ABS_16x16_SSE 1 |
ABS_16x16_SSE 1 |
452 |
ABS_16x16_SSE 1 |
ABS_16x16_SSE 1 |
453 |
|
|
454 |
paddd mm1, mm2 |
paddd mm1, mm2 |
455 |
paddd mm3, mm5 |
paddd mm3, mm5 |
456 |
|
|
457 |
ABS_16x16_SSE 2 |
ABS_16x16_SSE 2 |
458 |
|
|
459 |
paddd mm7, mm6 |
paddd mm7, mm6 |
460 |
paddd mm1, mm3 |
paddd mm1, mm3 |
461 |
mov eax, [esp+ 4] ; Src |
mov eax, [esp+ 4] ; Src |
462 |
paddd mm7,mm1 |
paddd mm7,mm1 |
463 |
punpcklbw mm7,mm7 ;xxyyaazz |
punpcklbw mm7,mm7 ;xxyyaazz |
464 |
pshufw mm4,mm7,055h |
pshufw mm4, mm7, 055h ; mm4 contains the mean |
465 |
; mm4 contains the mean |
|
466 |
|
|
467 |
pxor mm1, mm1 |
pxor mm1, mm1 |
468 |
|
|
469 |
ABS_16x16_SSE 0 |
ABS_16x16_SSE 0 |
471 |
ABS_16x16_SSE 1 |
ABS_16x16_SSE 1 |
472 |
ABS_16x16_SSE 1 |
ABS_16x16_SSE 1 |
473 |
ABS_16x16_SSE 1 |
ABS_16x16_SSE 1 |
474 |
|
|
475 |
paddd mm1, mm2 |
paddd mm1, mm2 |
476 |
paddd mm3, mm5 |
paddd mm3, mm5 |
477 |
|
|
478 |
ABS_16x16_SSE 2 |
ABS_16x16_SSE 2 |
479 |
|
|
480 |
paddd mm7, mm6 |
paddd mm7, mm6 |
481 |
paddd mm1, mm3 |
paddd mm1, mm3 |
482 |
paddd mm7,mm1 |
paddd mm7,mm1 |
483 |
movd eax, mm7 |
movd eax, mm7 |
484 |
|
|
485 |
ret |
ret |