1 |
;/************************************************************************** |
;/**************************************************************************** |
2 |
; * |
; * |
3 |
; * XVID MPEG-4 VIDEO CODEC |
; * XVID MPEG-4 VIDEO CODEC |
4 |
; * mmx sum of absolute difference |
; * - K7 optimized SAD operators - |
5 |
; * |
; * |
6 |
; * This program is free software; you can redistribute it and/or modify |
; * Copyright(C) 2001 Peter Ross <pross@xvid.org> |
7 |
; * it under the terms of the GNU General Public License as published by |
; * 2002 Pascal Massimino <skal@planet-d.net> |
8 |
|
; * |
9 |
|
; * This program is free software; you can redistribute it and/or modify it |
10 |
|
; * under the terms of the GNU General Public License as published by |
11 |
; * the Free Software Foundation; either version 2 of the License, or |
; * the Free Software Foundation; either version 2 of the License, or |
12 |
; * (at your option) any later version. |
; * (at your option) any later version. |
13 |
; * |
; * |
18 |
; * |
; * |
19 |
; * You should have received a copy of the GNU General Public License |
; * You should have received a copy of the GNU General Public License |
20 |
; * along with this program; if not, write to the Free Software |
; * along with this program; if not, write to the Free Software |
21 |
; * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
; * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
|
; * |
|
|
; *************************************************************************/ |
|
|
|
|
|
;/************************************************************************** |
|
22 |
; * |
; * |
23 |
; * History: |
; * $Id$ |
24 |
; * |
; * |
25 |
; * 23.07.2002 sad[16,8]bi_xmm; <pross@xvid.org> |
; ***************************************************************************/ |
|
; * 04.06.2002 cleanup -Skal- |
|
|
; * 12.11.2001 inital version; (c)2001 peter ross <pross@cs.rmit.edu.au> |
|
|
; * |
|
|
; *************************************************************************/ |
|
26 |
|
|
27 |
bits 32 |
BITS 32 |
28 |
|
|
29 |
%macro cglobal 1 |
%macro cglobal 1 |
30 |
|
%ifdef PREFIX |
31 |
|
%ifdef MARK_FUNCS |
32 |
|
global _%1:function %1.endfunc-%1 |
33 |
|
%define %1 _%1:function %1.endfunc-%1 |
34 |
|
%else |
35 |
global _%1 |
global _%1 |
36 |
%define %1 _%1 |
%define %1 _%1 |
37 |
|
%endif |
38 |
|
%else |
39 |
|
%ifdef MARK_FUNCS |
40 |
|
global %1:function %1.endfunc-%1 |
41 |
%else |
%else |
42 |
global %1 |
global %1 |
43 |
%endif |
%endif |
44 |
|
%endif |
45 |
%endmacro |
%endmacro |
46 |
|
|
47 |
section .data |
;============================================================================= |
48 |
|
; Read only data |
49 |
align 16 |
;============================================================================= |
|
mmx_one times 4 dw 1 |
|
50 |
|
|
51 |
section .text |
%ifdef FORMAT_COFF |
52 |
|
SECTION .rodata |
53 |
|
%else |
54 |
|
SECTION .rodata align=16 |
55 |
|
%endif |
56 |
|
|
57 |
cglobal sad16_mmx |
ALIGN 16 |
58 |
cglobal sad8_mmx |
mmx_one: |
59 |
cglobal sad16bi_mmx |
times 4 dw 1 |
60 |
cglobal sad8bi_mmx |
|
61 |
cglobal dev16_mmx |
;============================================================================= |
62 |
|
; Helper macros |
63 |
;=========================================================================== |
;============================================================================= |
|
; |
|
|
; uint32_t sad16_mmx(const uint8_t * const cur, |
|
|
; const uint8_t * const ref, |
|
|
; const uint32_t stride, |
|
|
; (early termination ignore; slows this down) |
|
|
; |
|
|
;=========================================================================== |
|
64 |
|
|
65 |
%macro SAD_16x16_MMX 0 |
%macro SAD_16x16_MMX 0 |
66 |
movq mm0, [eax] |
movq mm0, [eax] |
74 |
lea eax,[eax+ecx] |
lea eax,[eax+ecx] |
75 |
movq mm5, mm2 |
movq mm5, mm2 |
76 |
psubusb mm2, mm3 |
psubusb mm2, mm3 |
|
lea edx,[edx+ecx] |
|
77 |
|
|
78 |
psubusb mm1, mm4 |
psubusb mm1, mm4 |
|
por mm0, mm1 |
|
79 |
psubusb mm3, mm5 |
psubusb mm3, mm5 |
80 |
|
por mm0, mm1 |
81 |
por mm2, mm3 |
por mm2, mm3 |
82 |
|
|
83 |
movq mm1,mm0 |
movq mm1,mm0 |
|
movq mm3,mm2 |
|
|
|
|
84 |
punpcklbw mm0,mm7 |
punpcklbw mm0,mm7 |
85 |
|
movq mm3, mm2 |
86 |
punpckhbw mm1,mm7 |
punpckhbw mm1,mm7 |
87 |
|
lea edx, [edx+ecx] |
88 |
punpcklbw mm2,mm7 |
punpcklbw mm2,mm7 |
|
punpckhbw mm3,mm7 |
|
|
|
|
89 |
paddusw mm0,mm1 |
paddusw mm0,mm1 |
90 |
|
punpckhbw mm3,mm7 |
91 |
paddusw mm6,mm0 |
paddusw mm6,mm0 |
92 |
paddusw mm2,mm3 |
paddusw mm2,mm3 |
93 |
paddusw mm6,mm2 |
paddusw mm6,mm2 |
|
%endmacro |
|
|
|
|
|
align 16 |
|
|
sad16_mmx: |
|
|
|
|
|
mov eax, [esp+ 4] ; Src1 |
|
|
mov edx, [esp+ 8] ; Src2 |
|
|
mov ecx, [esp+12] ; Stride |
|
|
|
|
|
pxor mm6, mm6 ; accum |
|
|
pxor mm7, mm7 ; zero |
|
94 |
|
|
95 |
SAD_16x16_MMX |
%endmacro |
|
SAD_16x16_MMX |
|
|
SAD_16x16_MMX |
|
|
SAD_16x16_MMX |
|
|
SAD_16x16_MMX |
|
|
SAD_16x16_MMX |
|
|
SAD_16x16_MMX |
|
|
SAD_16x16_MMX |
|
|
|
|
|
SAD_16x16_MMX |
|
|
SAD_16x16_MMX |
|
|
SAD_16x16_MMX |
|
|
SAD_16x16_MMX |
|
|
SAD_16x16_MMX |
|
|
SAD_16x16_MMX |
|
|
SAD_16x16_MMX |
|
|
SAD_16x16_MMX |
|
|
|
|
|
pmaddwd mm6, [mmx_one] ; collapse |
|
|
movq mm7, mm6 |
|
|
psrlq mm7, 32 |
|
|
paddd mm6, mm7 |
|
|
|
|
|
movd eax, mm6 |
|
|
|
|
|
ret |
|
|
|
|
|
|
|
|
;=========================================================================== |
|
|
; |
|
|
; uint32_t sad8_mmx(const uint8_t * const cur, |
|
|
; const uint8_t * const ref, |
|
|
; const uint32_t stride); |
|
|
; |
|
|
;=========================================================================== |
|
96 |
|
|
97 |
%macro SAD_8x8_MMX 0 |
%macro SAD_8x8_MMX 0 |
98 |
movq mm0, [eax] |
movq mm0, [eax] |
110 |
psubusb mm2, mm3 |
psubusb mm2, mm3 |
111 |
|
|
112 |
psubusb mm1, mm4 |
psubusb mm1, mm4 |
|
por mm0, mm1 |
|
113 |
psubusb mm3, mm5 |
psubusb mm3, mm5 |
114 |
|
por mm0, mm1 |
115 |
por mm2, mm3 |
por mm2, mm3 |
116 |
|
|
117 |
movq mm1,mm0 |
movq mm1,mm0 |
|
movq mm3,mm2 |
|
|
|
|
118 |
punpcklbw mm0,mm7 |
punpcklbw mm0,mm7 |
119 |
|
movq mm3,mm2 |
120 |
punpckhbw mm1,mm7 |
punpckhbw mm1,mm7 |
121 |
punpcklbw mm2,mm7 |
punpcklbw mm2,mm7 |
122 |
|
paddusw mm0,mm1 |
123 |
punpckhbw mm3,mm7 |
punpckhbw mm3,mm7 |
124 |
|
paddusw mm6,mm0 |
125 |
|
paddusw mm2,mm3 |
126 |
|
paddusw mm6,mm2 |
127 |
|
%endmacro |
128 |
|
|
129 |
|
|
130 |
|
%macro SADV_16x16_MMX 0 |
131 |
|
movq mm0, [eax] |
132 |
|
movq mm1, [edx] |
133 |
|
|
134 |
|
movq mm2, [eax+8] |
135 |
|
movq mm4, mm0 |
136 |
|
movq mm3, [edx+8] |
137 |
|
psubusb mm0, mm1 |
138 |
|
|
139 |
|
psubusb mm1, mm4 |
140 |
|
lea eax,[eax+ecx] |
141 |
|
por mm0, mm1 |
142 |
|
|
143 |
|
movq mm4, mm2 |
144 |
|
psubusb mm2, mm3 |
145 |
|
|
146 |
|
psubusb mm3, mm4 |
147 |
|
por mm2, mm3 |
148 |
|
|
149 |
|
movq mm1,mm0 |
150 |
|
punpcklbw mm0,mm7 |
151 |
|
movq mm3,mm2 |
152 |
|
punpckhbw mm1,mm7 |
153 |
|
punpcklbw mm2,mm7 |
154 |
paddusw mm0,mm1 |
paddusw mm0,mm1 |
155 |
paddusw mm6,mm0 |
punpckhbw mm3,mm7 |
156 |
|
paddusw mm5, mm0 |
157 |
paddusw mm2,mm3 |
paddusw mm2,mm3 |
158 |
|
lea edx,[edx+ecx] |
159 |
paddusw mm6,mm2 |
paddusw mm6,mm2 |
160 |
%endmacro |
%endmacro |
161 |
|
|
162 |
align 16 |
%macro SADBI_16x16_MMX 2 ; SADBI_16x16_MMX( int_ptr_offset, bool_increment_ptr ); |
|
sad8_mmx: |
|
163 |
|
|
164 |
mov eax, [esp+ 4] ; Src1 |
movq mm0, [edx+%1] |
165 |
mov edx, [esp+ 8] ; Src2 |
movq mm2, [ebx+%1] |
166 |
mov ecx, [esp+12] ; Stride |
movq mm1, mm0 |
167 |
|
movq mm3, mm2 |
168 |
|
|
169 |
pxor mm6, mm6 ; accum |
%if %2 != 0 |
170 |
pxor mm7, mm7 ; zero |
add edx, ecx |
171 |
|
%endif |
172 |
|
|
173 |
SAD_8x8_MMX |
punpcklbw mm0, mm7 |
174 |
SAD_8x8_MMX |
punpckhbw mm1, mm7 |
175 |
SAD_8x8_MMX |
punpcklbw mm2, mm7 |
176 |
SAD_8x8_MMX |
punpckhbw mm3, mm7 |
177 |
|
|
178 |
pmaddwd mm6, [mmx_one] ; collapse |
%if %2 != 0 |
179 |
movq mm7, mm6 |
add ebx, ecx |
180 |
psrlq mm7, 32 |
%endif |
|
paddd mm6, mm7 |
|
181 |
|
|
182 |
movd eax, mm6 |
paddusw mm0, mm2 ; mm01 = ref1 + ref2 |
183 |
|
paddusw mm1, mm3 |
184 |
|
paddusw mm0, [mmx_one] ; mm01 += 1 |
185 |
|
paddusw mm1, [mmx_one] |
186 |
|
psrlw mm0, 1 ; mm01 >>= 1 |
187 |
|
psrlw mm1, 1 |
188 |
|
|
189 |
ret |
movq mm2, [eax+%1] |
190 |
|
movq mm3, mm2 |
191 |
|
punpcklbw mm2, mm7 ; mm23 = src |
192 |
|
punpckhbw mm3, mm7 |
193 |
|
|
194 |
|
%if %2 != 0 |
195 |
|
add eax, ecx |
196 |
|
%endif |
197 |
|
|
198 |
|
movq mm4, mm0 |
199 |
|
movq mm5, mm1 |
200 |
|
psubusw mm0, mm2 |
201 |
|
psubusw mm1, mm3 |
202 |
|
psubusw mm2, mm4 |
203 |
|
psubusw mm3, mm5 |
204 |
|
por mm0, mm2 ; mm01 = ABS(mm01 - mm23) |
205 |
|
por mm1, mm3 |
206 |
|
|
207 |
|
paddusw mm6, mm0 ; mm6 += mm01 |
208 |
|
paddusw mm6, mm1 |
209 |
|
|
210 |
|
%endmacro |
211 |
|
|
212 |
;=========================================================================== |
%macro MEAN_16x16_MMX 0 |
|
; |
|
|
; uint32_t sad16bi_mmx(const uint8_t * const cur, |
|
|
; const uint8_t * const ref1, |
|
213 |
movq mm0, [eax] |
movq mm0, [eax] |
214 |
movq mm2, [eax+8] |
movq mm2, [eax+8] |
215 |
lea eax,[eax+ecx] |
lea eax,[eax+ecx] |
216 |
movq mm1, mm0 |
movq mm1, mm0 |
|
movq mm3, mm2 |
|
217 |
punpcklbw mm0,mm7 |
punpcklbw mm0,mm7 |
218 |
punpcklbw mm2,mm7 |
movq mm3, mm2 |
219 |
punpckhbw mm1,mm7 |
punpckhbw mm1,mm7 |
|
punpckhbw mm3,mm7 |
|
220 |
paddw mm5, mm0 |
paddw mm5, mm0 |
221 |
|
punpcklbw mm2, mm7 |
222 |
paddw mm6, mm1 |
paddw mm6, mm1 |
223 |
|
punpckhbw mm3, mm7 |
224 |
paddw mm5, mm2 |
paddw mm5, mm2 |
225 |
paddw mm6, mm3 |
paddw mm6, mm3 |
226 |
%endmacro |
%endmacro |
260 |
paddw mm5, mm2 |
paddw mm5, mm2 |
261 |
%endmacro |
%endmacro |
262 |
|
|
263 |
align 16 |
;============================================================================= |
264 |
|
; Code |
265 |
|
;============================================================================= |
266 |
|
|
267 |
|
SECTION .text |
268 |
|
|
269 |
|
cglobal sad16_mmx |
270 |
|
cglobal sad16v_mmx |
271 |
|
cglobal sad8_mmx |
272 |
|
cglobal sad16bi_mmx |
273 |
|
cglobal sad8bi_mmx |
274 |
|
cglobal dev16_mmx |
275 |
|
cglobal sse8_16bit_mmx |
276 |
|
cglobal sse8_8bit_mmx |
277 |
|
|
278 |
|
;----------------------------------------------------------------------------- |
279 |
|
; |
280 |
|
; uint32_t sad16_mmx(const uint8_t * const cur, |
281 |
|
; const uint8_t * const ref, |
282 |
|
; const uint32_t stride, |
283 |
|
; const uint32_t best_sad); |
284 |
|
; |
285 |
|
; (early termination ignore; slows this down) |
286 |
|
; |
287 |
|
;----------------------------------------------------------------------------- |
288 |
|
|
289 |
|
ALIGN 16 |
290 |
|
sad16_mmx: |
291 |
|
|
292 |
|
mov eax, [esp+ 4] ; Src1 |
293 |
|
mov edx, [esp+ 8] ; Src2 |
294 |
|
mov ecx, [esp+12] ; Stride |
295 |
|
|
296 |
|
pxor mm6, mm6 ; accum |
297 |
|
pxor mm7, mm7 ; zero |
298 |
|
|
299 |
|
SAD_16x16_MMX |
300 |
|
SAD_16x16_MMX |
301 |
|
SAD_16x16_MMX |
302 |
|
SAD_16x16_MMX |
303 |
|
SAD_16x16_MMX |
304 |
|
SAD_16x16_MMX |
305 |
|
SAD_16x16_MMX |
306 |
|
SAD_16x16_MMX |
307 |
|
|
308 |
|
SAD_16x16_MMX |
309 |
|
SAD_16x16_MMX |
310 |
|
SAD_16x16_MMX |
311 |
|
SAD_16x16_MMX |
312 |
|
SAD_16x16_MMX |
313 |
|
SAD_16x16_MMX |
314 |
|
SAD_16x16_MMX |
315 |
|
SAD_16x16_MMX |
316 |
|
|
317 |
|
pmaddwd mm6, [mmx_one] ; collapse |
318 |
|
movq mm7, mm6 |
319 |
|
psrlq mm7, 32 |
320 |
|
paddd mm6, mm7 |
321 |
|
|
322 |
|
movd eax, mm6 |
323 |
|
|
324 |
|
ret |
325 |
|
.endfunc |
326 |
|
|
327 |
|
;----------------------------------------------------------------------------- |
328 |
|
; |
329 |
|
; uint32_t sad8_mmx(const uint8_t * const cur, |
330 |
|
; const uint8_t * const ref, |
331 |
|
; const uint32_t stride); |
332 |
|
; |
333 |
|
;----------------------------------------------------------------------------- |
334 |
|
|
335 |
|
ALIGN 16 |
336 |
|
sad8_mmx: |
337 |
|
|
338 |
|
mov eax, [esp+ 4] ; Src1 |
339 |
|
mov edx, [esp+ 8] ; Src2 |
340 |
|
mov ecx, [esp+12] ; Stride |
341 |
|
|
342 |
|
pxor mm6, mm6 ; accum |
343 |
|
pxor mm7, mm7 ; zero |
344 |
|
|
345 |
|
SAD_8x8_MMX |
346 |
|
SAD_8x8_MMX |
347 |
|
SAD_8x8_MMX |
348 |
|
SAD_8x8_MMX |
349 |
|
|
350 |
|
pmaddwd mm6, [mmx_one] ; collapse |
351 |
|
movq mm7, mm6 |
352 |
|
psrlq mm7, 32 |
353 |
|
paddd mm6, mm7 |
354 |
|
|
355 |
|
movd eax, mm6 |
356 |
|
|
357 |
|
ret |
358 |
|
.endfunc |
359 |
|
|
360 |
|
;----------------------------------------------------------------------------- |
361 |
|
; |
362 |
|
; uint32_t sad16v_mmx(const uint8_t * const cur, |
363 |
|
; const uint8_t * const ref, |
364 |
|
; const uint32_t stride, |
365 |
|
; int32_t *sad); |
366 |
|
; |
367 |
|
;----------------------------------------------------------------------------- |
368 |
|
|
369 |
|
ALIGN 16 |
370 |
|
sad16v_mmx: |
371 |
|
|
372 |
|
push ebx |
373 |
|
push edi |
374 |
|
|
375 |
|
mov eax, [esp + 8 + 4] ; Src1 |
376 |
|
mov edx, [esp + 8 + 8] ; Src2 |
377 |
|
mov ecx, [esp + 8 + 12] ; Stride |
378 |
|
mov ebx, [esp + 8 + 16] ; sad ptr |
379 |
|
|
380 |
|
pxor mm5, mm5 ; accum |
381 |
|
pxor mm6, mm6 ; accum |
382 |
|
pxor mm7, mm7 ; zero |
383 |
|
|
384 |
|
SADV_16x16_MMX |
385 |
|
SADV_16x16_MMX |
386 |
|
SADV_16x16_MMX |
387 |
|
SADV_16x16_MMX |
388 |
|
SADV_16x16_MMX |
389 |
|
SADV_16x16_MMX |
390 |
|
SADV_16x16_MMX |
391 |
|
SADV_16x16_MMX |
392 |
|
|
393 |
|
pmaddwd mm5, [mmx_one] ; collapse |
394 |
|
pmaddwd mm6, [mmx_one] ; collapse |
395 |
|
|
396 |
|
movq mm2, mm5 |
397 |
|
movq mm3, mm6 |
398 |
|
|
399 |
|
psrlq mm2, 32 |
400 |
|
psrlq mm3, 32 |
401 |
|
|
402 |
|
paddd mm5, mm2 |
403 |
|
paddd mm6, mm3 |
404 |
|
|
405 |
|
movd [ebx], mm5 |
406 |
|
movd [ebx + 4], mm6 |
407 |
|
|
408 |
|
paddd mm5, mm6 |
409 |
|
|
410 |
|
movd edi, mm5 |
411 |
|
|
412 |
|
pxor mm5, mm5 |
413 |
|
pxor mm6, mm6 |
414 |
|
|
415 |
|
SADV_16x16_MMX |
416 |
|
SADV_16x16_MMX |
417 |
|
SADV_16x16_MMX |
418 |
|
SADV_16x16_MMX |
419 |
|
SADV_16x16_MMX |
420 |
|
SADV_16x16_MMX |
421 |
|
SADV_16x16_MMX |
422 |
|
SADV_16x16_MMX |
423 |
|
|
424 |
|
pmaddwd mm5, [mmx_one] ; collapse |
425 |
|
pmaddwd mm6, [mmx_one] ; collapse |
426 |
|
|
427 |
|
movq mm2, mm5 |
428 |
|
movq mm3, mm6 |
429 |
|
|
430 |
|
psrlq mm2, 32 |
431 |
|
psrlq mm3, 32 |
432 |
|
|
433 |
|
paddd mm5, mm2 |
434 |
|
paddd mm6, mm3 |
435 |
|
|
436 |
|
movd [ebx + 8], mm5 |
437 |
|
movd [ebx + 12], mm6 |
438 |
|
|
439 |
|
paddd mm5, mm6 |
440 |
|
|
441 |
|
movd eax, mm5 |
442 |
|
|
443 |
|
add eax, edi |
444 |
|
|
445 |
|
pop edi |
446 |
|
pop ebx |
447 |
|
|
448 |
|
ret |
449 |
|
.endfunc |
450 |
|
|
451 |
|
;----------------------------------------------------------------------------- |
452 |
|
; |
453 |
|
; uint32_t sad16bi_mmx(const uint8_t * const cur, |
454 |
|
; const uint8_t * const ref1, |
455 |
|
; const uint8_t * const ref2, |
456 |
|
; const uint32_t stride); |
457 |
|
; |
458 |
|
;----------------------------------------------------------------------------- |
459 |
|
|
460 |
|
ALIGN 16 |
461 |
|
sad16bi_mmx: |
462 |
|
push ebx |
463 |
|
mov eax, [esp+4+ 4] ; Src |
464 |
|
mov edx, [esp+4+ 8] ; Ref1 |
465 |
|
mov ebx, [esp+4+12] ; Ref2 |
466 |
|
mov ecx, [esp+4+16] ; Stride |
467 |
|
|
468 |
|
pxor mm6, mm6 ; accum2 |
469 |
|
pxor mm7, mm7 |
470 |
|
.Loop |
471 |
|
SADBI_16x16_MMX 0, 0 |
472 |
|
SADBI_16x16_MMX 8, 1 |
473 |
|
SADBI_16x16_MMX 0, 0 |
474 |
|
SADBI_16x16_MMX 8, 1 |
475 |
|
SADBI_16x16_MMX 0, 0 |
476 |
|
SADBI_16x16_MMX 8, 1 |
477 |
|
SADBI_16x16_MMX 0, 0 |
478 |
|
SADBI_16x16_MMX 8, 1 |
479 |
|
SADBI_16x16_MMX 0, 0 |
480 |
|
SADBI_16x16_MMX 8, 1 |
481 |
|
SADBI_16x16_MMX 0, 0 |
482 |
|
SADBI_16x16_MMX 8, 1 |
483 |
|
SADBI_16x16_MMX 0, 0 |
484 |
|
SADBI_16x16_MMX 8, 1 |
485 |
|
SADBI_16x16_MMX 0, 0 |
486 |
|
SADBI_16x16_MMX 8, 1 |
487 |
|
|
488 |
|
SADBI_16x16_MMX 0, 0 |
489 |
|
SADBI_16x16_MMX 8, 1 |
490 |
|
SADBI_16x16_MMX 0, 0 |
491 |
|
SADBI_16x16_MMX 8, 1 |
492 |
|
SADBI_16x16_MMX 0, 0 |
493 |
|
SADBI_16x16_MMX 8, 1 |
494 |
|
SADBI_16x16_MMX 0, 0 |
495 |
|
SADBI_16x16_MMX 8, 1 |
496 |
|
SADBI_16x16_MMX 0, 0 |
497 |
|
SADBI_16x16_MMX 8, 1 |
498 |
|
SADBI_16x16_MMX 0, 0 |
499 |
|
SADBI_16x16_MMX 8, 1 |
500 |
|
SADBI_16x16_MMX 0, 0 |
501 |
|
SADBI_16x16_MMX 8, 1 |
502 |
|
SADBI_16x16_MMX 0, 0 |
503 |
|
SADBI_16x16_MMX 8, 1 |
504 |
|
|
505 |
|
pmaddwd mm6, [mmx_one] ; collapse |
506 |
|
movq mm7, mm6 |
507 |
|
psrlq mm7, 32 |
508 |
|
paddd mm6, mm7 |
509 |
|
|
510 |
|
movd eax, mm6 |
511 |
|
pop ebx |
512 |
|
|
513 |
|
ret |
514 |
|
.endfunc |
515 |
|
|
516 |
|
;----------------------------------------------------------------------------- |
517 |
|
; |
518 |
|
; uint32_t sad8bi_mmx(const uint8_t * const cur, |
519 |
|
; const uint8_t * const ref1, |
520 |
|
; const uint8_t * const ref2, |
521 |
|
; const uint32_t stride); |
522 |
|
; |
523 |
|
;----------------------------------------------------------------------------- |
524 |
|
|
525 |
|
ALIGN 16 |
526 |
|
sad8bi_mmx: |
527 |
|
push ebx |
528 |
|
mov eax, [esp+4+ 4] ; Src |
529 |
|
mov edx, [esp+4+ 8] ; Ref1 |
530 |
|
mov ebx, [esp+4+12] ; Ref2 |
531 |
|
mov ecx, [esp+4+16] ; Stride |
532 |
|
|
533 |
|
pxor mm6, mm6 ; accum2 |
534 |
|
pxor mm7, mm7 |
535 |
|
.Loop |
536 |
|
SADBI_16x16_MMX 0, 1 |
537 |
|
SADBI_16x16_MMX 0, 1 |
538 |
|
SADBI_16x16_MMX 0, 1 |
539 |
|
SADBI_16x16_MMX 0, 1 |
540 |
|
SADBI_16x16_MMX 0, 1 |
541 |
|
SADBI_16x16_MMX 0, 1 |
542 |
|
SADBI_16x16_MMX 0, 1 |
543 |
|
SADBI_16x16_MMX 0, 1 |
544 |
|
|
545 |
|
pmaddwd mm6, [mmx_one] ; collapse |
546 |
|
movq mm7, mm6 |
547 |
|
psrlq mm7, 32 |
548 |
|
paddd mm6, mm7 |
549 |
|
|
550 |
|
movd eax, mm6 |
551 |
|
pop ebx |
552 |
|
ret |
553 |
|
.endfunc |
554 |
|
|
555 |
|
;----------------------------------------------------------------------------- |
556 |
|
; |
557 |
|
; uint32_t dev16_mmx(const uint8_t * const cur, |
558 |
|
; const uint32_t stride); |
559 |
|
; |
560 |
|
;----------------------------------------------------------------------------- |
561 |
|
|
562 |
|
ALIGN 16 |
563 |
dev16_mmx: |
dev16_mmx: |
564 |
mov eax, [esp+ 4] ; Src |
mov eax, [esp+ 4] ; Src |
565 |
mov ecx, [esp+ 8] ; Stride |
mov ecx, [esp+ 8] ; Stride |
628 |
paddd mm6, mm5 |
paddd mm6, mm5 |
629 |
|
|
630 |
movd eax, mm6 |
movd eax, mm6 |
631 |
|
|
632 |
|
ret |
633 |
|
.endfunc |
634 |
|
|
635 |
|
;----------------------------------------------------------------------------- |
636 |
|
; |
637 |
|
; uint32_t sse8_16bit_mmx(const int16_t *b1, |
638 |
|
; const int16_t *b2, |
639 |
|
; const uint32_t stride); |
640 |
|
; |
641 |
|
;----------------------------------------------------------------------------- |
642 |
|
|
643 |
|
%macro ROW_SSE_16bit_MMX 2 |
644 |
|
movq mm0, [%1] |
645 |
|
movq mm1, [%1+8] |
646 |
|
psubw mm0, [%2] |
647 |
|
psubw mm1, [%2+8] |
648 |
|
pmaddwd mm0, mm0 |
649 |
|
pmaddwd mm1, mm1 |
650 |
|
paddd mm2, mm0 |
651 |
|
paddd mm2, mm1 |
652 |
|
%endmacro |
653 |
|
|
654 |
|
sse8_16bit_mmx: |
655 |
|
push esi |
656 |
|
push edi |
657 |
|
|
658 |
|
;; Load the function params |
659 |
|
mov esi, [esp+8+4] |
660 |
|
mov edi, [esp+8+8] |
661 |
|
mov edx, [esp+8+12] |
662 |
|
|
663 |
|
;; Reset the sse accumulator |
664 |
|
pxor mm2, mm2 |
665 |
|
|
666 |
|
;; Let's go |
667 |
|
%rep 8 |
668 |
|
ROW_SSE_16bit_MMX esi, edi |
669 |
|
lea esi, [esi+edx] |
670 |
|
lea edi, [edi+edx] |
671 |
|
%endrep |
672 |
|
|
673 |
|
;; Finish adding each dword of the accumulator |
674 |
|
movq mm3, mm2 |
675 |
|
psrlq mm2, 32 |
676 |
|
paddd mm2, mm3 |
677 |
|
movd eax, mm2 |
678 |
|
|
679 |
|
;; All done |
680 |
|
pop edi |
681 |
|
pop esi |
682 |
|
ret |
683 |
|
.endfunc |
684 |
|
|
685 |
|
;----------------------------------------------------------------------------- |
686 |
|
; |
687 |
|
; uint32_t sse8_8bit_mmx(const int8_t *b1, |
688 |
|
; const int8_t *b2, |
689 |
|
; const uint32_t stride); |
690 |
|
; |
691 |
|
;----------------------------------------------------------------------------- |
692 |
|
|
693 |
|
%macro ROW_SSE_8bit_MMX 2 |
694 |
|
movq mm0, [%1] ; load a row |
695 |
|
movq mm2, [%2] ; load a row |
696 |
|
|
697 |
|
movq mm1, mm0 ; copy row |
698 |
|
movq mm3, mm2 ; copy row |
699 |
|
|
700 |
|
punpcklbw mm0, mm7 ; turn the 4low elements into 16bit |
701 |
|
punpckhbw mm1, mm7 ; turn the 4high elements into 16bit |
702 |
|
|
703 |
|
punpcklbw mm2, mm7 ; turn the 4low elements into 16bit |
704 |
|
punpckhbw mm3, mm7 ; turn the 4high elements into 16bit |
705 |
|
|
706 |
|
psubw mm0, mm2 ; low part of src-dst |
707 |
|
psubw mm1, mm3 ; high part of src-dst |
708 |
|
|
709 |
|
pmaddwd mm0, mm0 ; compute the square sum |
710 |
|
pmaddwd mm1, mm1 ; compute the square sum |
711 |
|
|
712 |
|
paddd mm6, mm0 ; add to the accumulator |
713 |
|
paddd mm6, mm1 ; add to the accumulator |
714 |
|
%endmacro |
715 |
|
|
716 |
|
sse8_8bit_mmx: |
717 |
|
push esi |
718 |
|
push edi |
719 |
|
|
720 |
|
;; Load the function params |
721 |
|
mov esi, [esp+8+4] |
722 |
|
mov edi, [esp+8+8] |
723 |
|
mov edx, [esp+8+12] |
724 |
|
|
725 |
|
;; Reset the sse accumulator |
726 |
|
pxor mm6, mm6 |
727 |
|
|
728 |
|
;; Used to interleave 8bit data with 0x00 values |
729 |
|
pxor mm7, mm7 |
730 |
|
|
731 |
|
;; Let's go |
732 |
|
%rep 8 |
733 |
|
ROW_SSE_8bit_MMX esi, edi |
734 |
|
lea esi, [esi+edx] |
735 |
|
lea edi, [edi+edx] |
736 |
|
%endrep |
737 |
|
|
738 |
|
;; Finish adding each dword of the accumulator |
739 |
|
movq mm7, mm6 |
740 |
|
psrlq mm6, 32 |
741 |
|
paddd mm6, mm7 |
742 |
|
movd eax, mm6 |
743 |
|
|
744 |
|
;; All done |
745 |
|
pop edi |
746 |
|
pop esi |
747 |
ret |
ret |
748 |
|
.endfunc |
749 |
|
|