6 |
; * Copyright (C) 2001 Peter Ross <pross@xvid.org> |
; * Copyright (C) 2001 Peter Ross <pross@xvid.org> |
7 |
; * 2001 Michael Militzer <isibaar@xvid.org> |
; * 2001 Michael Militzer <isibaar@xvid.org> |
8 |
; * 2002 Pascal Massimino <skal@planet-d.net> |
; * 2002 Pascal Massimino <skal@planet-d.net> |
|
; * 2004 Jean-Marc Bastide <jmtest@voila.fr> |
|
9 |
; * |
; * |
10 |
; * This program is free software ; you can redistribute it and/or modify |
; * This program is free software ; you can redistribute it and/or modify |
11 |
; * it under the terms of the GNU General Public License as published by |
; * it under the terms of the GNU General Public License as published by |
29 |
|
|
30 |
%macro cglobal 1 |
%macro cglobal 1 |
31 |
%ifdef PREFIX |
%ifdef PREFIX |
32 |
|
%ifdef MARK_FUNCS |
33 |
|
global _%1:function %1.endfunc-%1 |
34 |
|
%define %1 _%1:function %1.endfunc-%1 |
35 |
|
%else |
36 |
global _%1 |
global _%1 |
37 |
%define %1 _%1 |
%define %1 _%1 |
38 |
|
%endif |
39 |
|
%else |
40 |
|
%ifdef MARK_FUNCS |
41 |
|
global %1:function %1.endfunc-%1 |
42 |
%else |
%else |
43 |
global %1 |
global %1 |
44 |
%endif |
%endif |
45 |
|
%endif |
46 |
%endmacro |
%endmacro |
47 |
|
|
48 |
;============================================================================= |
;============================================================================= |
50 |
;============================================================================= |
;============================================================================= |
51 |
|
|
52 |
%ifdef FORMAT_COFF |
%ifdef FORMAT_COFF |
53 |
SECTION .rodata data |
SECTION .rodata |
54 |
%else |
%else |
55 |
SECTION .rodata data align=16 |
SECTION .rodata align=16 |
56 |
%endif |
%endif |
57 |
|
|
58 |
ALIGN 16 |
ALIGN 16 |
71 |
cglobal transfer_8to16subro_mmx |
cglobal transfer_8to16subro_mmx |
72 |
cglobal transfer_8to16sub2_mmx |
cglobal transfer_8to16sub2_mmx |
73 |
cglobal transfer_8to16sub2_xmm |
cglobal transfer_8to16sub2_xmm |
74 |
|
cglobal transfer_8to16sub2ro_xmm |
75 |
cglobal transfer_16to8add_mmx |
cglobal transfer_16to8add_mmx |
76 |
cglobal transfer8x8_copy_mmx |
cglobal transfer8x8_copy_mmx |
77 |
|
cglobal transfer8x4_copy_mmx |
78 |
|
|
79 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
80 |
; |
; |
113 |
COPY_8_TO_16 2 |
COPY_8_TO_16 2 |
114 |
COPY_8_TO_16 3 |
COPY_8_TO_16 3 |
115 |
ret |
ret |
116 |
|
.endfunc |
117 |
|
|
118 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
119 |
; |
; |
149 |
lea ecx,[ecx+2*edx] |
lea ecx,[ecx+2*edx] |
150 |
COPY_16_TO_8 3 |
COPY_16_TO_8 3 |
151 |
ret |
ret |
152 |
|
.endfunc |
153 |
|
|
154 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
155 |
; |
; |
164 |
%macro COPY_8_TO_16_SUB 2 |
%macro COPY_8_TO_16_SUB 2 |
165 |
movq mm0, [eax] ; cur |
movq mm0, [eax] ; cur |
166 |
movq mm2, [eax+edx] |
movq mm2, [eax+edx] |
167 |
|
movq mm1, mm0 |
168 |
|
movq mm3, mm2 |
169 |
|
|
170 |
|
punpcklbw mm0, mm7 |
171 |
|
punpcklbw mm2, mm7 |
172 |
movq mm4, [ebx] ; ref |
movq mm4, [ebx] ; ref |
173 |
|
punpckhbw mm1, mm7 |
174 |
|
punpckhbw mm3, mm7 |
175 |
movq mm5, [ebx+edx] ; ref |
movq mm5, [ebx+edx] ; ref |
176 |
|
|
177 |
|
movq mm6, mm4 |
178 |
%if %2 == 1 |
%if %2 == 1 |
179 |
movq [eax], mm4 |
movq [eax], mm4 |
180 |
movq [eax+edx], mm5 |
movq [eax+edx], mm5 |
181 |
%endif |
%endif |
182 |
|
punpcklbw mm4, mm7 |
183 |
|
punpckhbw mm6, mm7 |
184 |
|
psubsw mm0, mm4 |
185 |
|
psubsw mm1, mm6 |
186 |
|
movq mm6, mm5 |
187 |
|
punpcklbw mm5, mm7 |
188 |
|
punpckhbw mm6, mm7 |
189 |
|
psubsw mm2, mm5 |
190 |
lea eax, [eax+2*edx] |
lea eax, [eax+2*edx] |
191 |
|
psubsw mm3, mm6 |
|
psubsb mm0,mm4 |
|
|
psubsb mm2,mm5 |
|
192 |
lea ebx,[ebx+2*edx] |
lea ebx,[ebx+2*edx] |
193 |
|
|
|
movq mm1,mm0 |
|
|
movq mm3,mm2 |
|
|
punpcklbw mm0,mm7 |
|
|
punpckhbw mm1,mm7 |
|
|
punpcklbw mm2,mm7 |
|
|
punpckhbw mm3,mm7 |
|
|
|
|
194 |
movq [ecx+%1*32+ 0], mm0 ; dst |
movq [ecx+%1*32+ 0], mm0 ; dst |
195 |
movq [ecx+%1*32+ 8], mm1 |
movq [ecx+%1*32+ 8], mm1 |
196 |
movq [ecx+%1*32+16], mm2 |
movq [ecx+%1*32+16], mm2 |
213 |
|
|
214 |
pop ebx |
pop ebx |
215 |
ret |
ret |
216 |
|
.endfunc |
217 |
|
|
218 |
|
|
219 |
ALIGN 16 |
ALIGN 16 |
232 |
|
|
233 |
pop ebx |
pop ebx |
234 |
ret |
ret |
235 |
|
.endfunc |
236 |
|
|
237 |
|
|
238 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
250 |
movq mm2, [eax+edx] |
movq mm2, [eax+edx] |
251 |
|
|
252 |
; mm4 <- (ref1+ref2+1) / 2 |
; mm4 <- (ref1+ref2+1) / 2 |
|
;(a+b+1)/2 = (a|b)-((a^b)>>1) |
|
253 |
movq mm4, [ebx] ; ref1 |
movq mm4, [ebx] ; ref1 |
254 |
movq mm1, [esi] ; ref2 |
movq mm1, [esi] ; ref2 |
255 |
movq mm3, mm4 |
movq mm6, mm4 |
256 |
pxor mm3,mm1 |
movq mm3, mm1 |
257 |
por mm4,mm1 |
punpcklbw mm4, mm7 |
258 |
pandn mm3,mm6 |
punpcklbw mm1, mm7 |
259 |
psrlq mm3,1 |
punpckhbw mm6, mm7 |
260 |
psubb mm4,mm3 |
punpckhbw mm3, mm7 |
261 |
|
paddusw mm4, mm1 |
262 |
|
paddusw mm6, mm3 |
263 |
|
paddusw mm4, [mmx_one] |
264 |
|
paddusw mm6, [mmx_one] |
265 |
|
psrlw mm4, 1 |
266 |
|
psrlw mm6, 1 |
267 |
|
packuswb mm4, mm6 |
268 |
movq [eax],mm4 |
movq [eax],mm4 |
269 |
|
|
270 |
; mm5 <- (ref1+ref2+1) / 2 |
; mm5 <- (ref1+ref2+1) / 2 |
271 |
movq mm5, [ebx+edx] ; ref1 |
movq mm5, [ebx+edx] ; ref1 |
272 |
movq mm1, [esi+edx] ; ref2 |
movq mm1, [esi+edx] ; ref2 |
273 |
movq mm3, mm5 |
movq mm6, mm5 |
274 |
pxor mm3,mm1 |
movq mm3, mm1 |
275 |
por mm5,mm1 |
punpcklbw mm5, mm7 |
276 |
pandn mm3,mm6 |
punpcklbw mm1, mm7 |
277 |
psrlq mm3,1 |
punpckhbw mm6, mm7 |
278 |
psubb mm5,mm3 |
punpckhbw mm3, mm7 |
279 |
|
paddusw mm5, mm1 |
280 |
|
paddusw mm6, mm3 |
281 |
|
paddusw mm5, [mmx_one] |
282 |
|
paddusw mm6, [mmx_one] |
283 |
|
lea esi, [esi+2*edx] |
284 |
|
psrlw mm5, 1 |
285 |
|
psrlw mm6, 1 |
286 |
|
packuswb mm5, mm6 |
287 |
movq [eax+edx],mm5 |
movq [eax+edx],mm5 |
288 |
|
|
|
psubsb mm0,mm4 |
|
|
psubsb mm2,mm5 |
|
|
lea esi, [esi+2*edx] |
|
289 |
movq mm1,mm0 |
movq mm1,mm0 |
290 |
movq mm3,mm2 |
movq mm3,mm2 |
|
lea eax, [eax+2*edx] |
|
291 |
punpcklbw mm0,mm7 |
punpcklbw mm0,mm7 |
292 |
punpcklbw mm2,mm7 |
punpcklbw mm2,mm7 |
|
lea ebx, [ebx+2*edx] |
|
293 |
punpckhbw mm1,mm7 |
punpckhbw mm1,mm7 |
294 |
punpckhbw mm3,mm7 |
punpckhbw mm3,mm7 |
295 |
|
|
296 |
|
movq mm6, mm4 |
297 |
|
punpcklbw mm4, mm7 |
298 |
|
punpckhbw mm6, mm7 |
299 |
|
psubsw mm0, mm4 |
300 |
|
psubsw mm1, mm6 |
301 |
|
movq mm6, mm5 |
302 |
|
punpcklbw mm5, mm7 |
303 |
|
punpckhbw mm6, mm7 |
304 |
|
psubsw mm2, mm5 |
305 |
|
lea eax, [eax+2*edx] |
306 |
|
psubsw mm3, mm6 |
307 |
|
lea ebx, [ebx+2*edx] |
308 |
|
|
309 |
movq [ecx+%1*32+ 0], mm0 ; dst |
movq [ecx+%1*32+ 0], mm0 ; dst |
310 |
movq [ecx+%1*32+ 8], mm1 |
movq [ecx+%1*32+ 8], mm1 |
311 |
movq [ecx+%1*32+16], mm2 |
movq [ecx+%1*32+16], mm2 |
321 |
push esi |
push esi |
322 |
mov esi, [esp+8+16] ; Ref2 |
mov esi, [esp+8+16] ; Ref2 |
323 |
mov edx, [esp+8+20] ; Stride |
mov edx, [esp+8+20] ; Stride |
|
pxor mm6,mm6 |
|
|
pcmpeqb mm5,mm5 |
|
324 |
pxor mm7, mm7 |
pxor mm7, mm7 |
|
psubb mm6, mm5; mm6=1 |
|
325 |
|
|
326 |
COPY_8_TO_16_SUB2_MMX 0 |
COPY_8_TO_16_SUB2_MMX 0 |
327 |
COPY_8_TO_16_SUB2_MMX 1 |
COPY_8_TO_16_SUB2_MMX 1 |
331 |
pop esi |
pop esi |
332 |
pop ebx |
pop ebx |
333 |
ret |
ret |
334 |
|
.endfunc |
335 |
|
|
336 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
337 |
; |
; |
346 |
%macro COPY_8_TO_16_SUB2_SSE 1 |
%macro COPY_8_TO_16_SUB2_SSE 1 |
347 |
movq mm0, [eax] ; cur |
movq mm0, [eax] ; cur |
348 |
movq mm2, [eax+edx] |
movq mm2, [eax+edx] |
349 |
|
movq mm1, mm0 |
350 |
|
movq mm3, mm2 |
351 |
|
|
352 |
|
punpcklbw mm0, mm7 |
353 |
|
punpcklbw mm2, mm7 |
354 |
movq mm4, [ebx] ; ref1 |
movq mm4, [ebx] ; ref1 |
355 |
pavgb mm4, [esi] ; ref2 |
pavgb mm4, [esi] ; ref2 |
356 |
|
movq [eax], mm4 |
357 |
|
punpckhbw mm1, mm7 |
358 |
|
punpckhbw mm3, mm7 |
359 |
movq mm5, [ebx+edx] ; ref |
movq mm5, [ebx+edx] ; ref |
360 |
pavgb mm5, [esi+edx] ; ref2 |
pavgb mm5, [esi+edx] ; ref2 |
|
|
|
|
movq [eax], mm4 |
|
361 |
movq [eax+edx], mm5 |
movq [eax+edx], mm5 |
362 |
|
|
363 |
psubsb mm0,mm4 |
movq mm6, mm4 |
364 |
psubsb mm2,mm5 |
punpcklbw mm4, mm7 |
365 |
|
punpckhbw mm6, mm7 |
366 |
|
psubsw mm0, mm4 |
367 |
|
psubsw mm1, mm6 |
368 |
lea esi, [esi+2*edx] |
lea esi, [esi+2*edx] |
369 |
movq mm1,mm0 |
movq mm6, mm5 |
370 |
movq mm3,mm2 |
punpcklbw mm5, mm7 |
371 |
|
punpckhbw mm6, mm7 |
372 |
|
psubsw mm2, mm5 |
373 |
lea eax, [eax+2*edx] |
lea eax, [eax+2*edx] |
374 |
punpcklbw mm0,mm7 |
psubsw mm3, mm6 |
|
punpcklbw mm2,mm7 |
|
375 |
lea ebx, [ebx+2*edx] |
lea ebx, [ebx+2*edx] |
|
punpckhbw mm1,mm7 |
|
|
punpckhbw mm3,mm7 |
|
376 |
|
|
377 |
movq [ecx+%1*32+ 0], mm0 ; dst |
movq [ecx+%1*32+ 0], mm0 ; dst |
378 |
movq [ecx+%1*32+ 8], mm1 |
movq [ecx+%1*32+ 8], mm1 |
399 |
pop esi |
pop esi |
400 |
pop ebx |
pop ebx |
401 |
ret |
ret |
402 |
|
.endfunc |
403 |
|
|
404 |
|
|
405 |
|
;----------------------------------------------------------------------------- |
406 |
|
; |
407 |
|
; void transfer_8to16sub2ro_xmm(int16_t * const dct, |
408 |
|
; const uint8_t * const cur, |
409 |
|
; const uint8_t * ref1, |
410 |
|
; const uint8_t * ref2, |
411 |
|
; const uint32_t stride) |
412 |
|
; |
413 |
|
;----------------------------------------------------------------------------- |
414 |
|
|
415 |
|
%macro COPY_8_TO_16_SUB2RO_SSE 1 |
416 |
|
movq mm0, [eax] ; cur |
417 |
|
movq mm2, [eax+edx] |
418 |
|
movq mm1, mm0 |
419 |
|
movq mm3, mm2 |
420 |
|
|
421 |
|
punpcklbw mm0, mm7 |
422 |
|
punpcklbw mm2, mm7 |
423 |
|
movq mm4, [ebx] ; ref1 |
424 |
|
pavgb mm4, [esi] ; ref2 |
425 |
|
punpckhbw mm1, mm7 |
426 |
|
punpckhbw mm3, mm7 |
427 |
|
movq mm5, [ebx+edx] ; ref |
428 |
|
pavgb mm5, [esi+edx] ; ref2 |
429 |
|
|
430 |
|
movq mm6, mm4 |
431 |
|
punpcklbw mm4, mm7 |
432 |
|
punpckhbw mm6, mm7 |
433 |
|
psubsw mm0, mm4 |
434 |
|
psubsw mm1, mm6 |
435 |
|
lea esi, [esi+2*edx] |
436 |
|
movq mm6, mm5 |
437 |
|
punpcklbw mm5, mm7 |
438 |
|
punpckhbw mm6, mm7 |
439 |
|
psubsw mm2, mm5 |
440 |
|
lea eax, [eax+2*edx] |
441 |
|
psubsw mm3, mm6 |
442 |
|
lea ebx, [ebx+2*edx] |
443 |
|
|
444 |
|
movq [ecx+%1*32+ 0], mm0 ; dst |
445 |
|
movq [ecx+%1*32+ 8], mm1 |
446 |
|
movq [ecx+%1*32+16], mm2 |
447 |
|
movq [ecx+%1*32+24], mm3 |
448 |
|
%endmacro |
449 |
|
|
450 |
|
ALIGN 16 |
451 |
|
transfer_8to16sub2ro_xmm: |
452 |
|
pxor mm7, mm7 |
453 |
|
mov ecx, [esp + 4] ; Dst |
454 |
|
mov eax, [esp + 8] ; Cur |
455 |
|
push ebx |
456 |
|
mov ebx, [esp+4+12] ; Ref1 |
457 |
|
push esi |
458 |
|
mov esi, [esp+8+16] ; Ref2 |
459 |
|
mov edx, [esp+8+20] ; Stride |
460 |
|
|
461 |
|
COPY_8_TO_16_SUB2RO_SSE 0 |
462 |
|
COPY_8_TO_16_SUB2RO_SSE 1 |
463 |
|
COPY_8_TO_16_SUB2RO_SSE 2 |
464 |
|
COPY_8_TO_16_SUB2RO_SSE 3 |
465 |
|
|
466 |
|
pop esi |
467 |
|
pop ebx |
468 |
|
ret |
469 |
|
.endfunc |
470 |
|
|
471 |
|
|
472 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
473 |
; |
; |
478 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
479 |
|
|
480 |
%macro COPY_16_TO_8_ADD 1 |
%macro COPY_16_TO_8_ADD 1 |
481 |
movq mm0, [eax+%1*32+ 0] ;src |
movq mm0, [ecx] |
482 |
packuswb mm0,[eax+%1*32+8] |
movq mm2, [ecx+edx] |
483 |
movq mm1, [eax+%1*32+ 16] |
movq mm1, mm0 |
484 |
packuswb mm1,[eax+%1*32+24] |
movq mm3, mm2 |
485 |
|
punpcklbw mm0, mm7 |
486 |
paddusb mm0,[ecx] |
punpcklbw mm2, mm7 |
487 |
paddusb mm1,[ecx+edx] |
punpckhbw mm1, mm7 |
488 |
|
punpckhbw mm3, mm7 |
489 |
|
paddsw mm0, [eax+%1*32+ 0] |
490 |
|
paddsw mm1, [eax+%1*32+ 8] |
491 |
|
paddsw mm2, [eax+%1*32+16] |
492 |
|
paddsw mm3, [eax+%1*32+24] |
493 |
|
packuswb mm0, mm1 |
494 |
movq [ecx],mm0 |
movq [ecx],mm0 |
495 |
movq [ecx+edx],mm1 |
packuswb mm2, mm3 |
496 |
|
movq [ecx+edx], mm2 |
497 |
%endmacro |
%endmacro |
498 |
|
|
499 |
|
|
502 |
mov ecx, [esp+ 4] ; Dst |
mov ecx, [esp+ 4] ; Dst |
503 |
mov eax, [esp+ 8] ; Src |
mov eax, [esp+ 8] ; Src |
504 |
mov edx, [esp+12] ; Stride |
mov edx, [esp+12] ; Stride |
505 |
; pxor mm7, mm7 |
pxor mm7, mm7 |
506 |
|
|
507 |
COPY_16_TO_8_ADD 0 |
COPY_16_TO_8_ADD 0 |
508 |
lea ecx,[ecx+2*edx] |
lea ecx,[ecx+2*edx] |
512 |
lea ecx,[ecx+2*edx] |
lea ecx,[ecx+2*edx] |
513 |
COPY_16_TO_8_ADD 3 |
COPY_16_TO_8_ADD 3 |
514 |
ret |
ret |
515 |
|
.endfunc |
516 |
|
|
517 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
518 |
; |
; |
523 |
; |
; |
524 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
525 |
|
|
526 |
|
%macro COPY_8_TO_8 0 |
527 |
|
movq mm0, [eax] |
528 |
|
movq mm1, [eax+edx] |
529 |
|
movq [ecx], mm0 |
530 |
|
lea eax, [eax+2*edx] |
531 |
|
movq [ecx+edx], mm1 |
532 |
|
%endmacro |
533 |
|
|
534 |
ALIGN 16 |
ALIGN 16 |
535 |
transfer8x8_copy_mmx: |
transfer8x8_copy_mmx: |
|
|
|
|
mov eax, [esp+ 8] ; Src |
|
536 |
mov ecx, [esp+ 4] ; Dst |
mov ecx, [esp+ 4] ; Dst |
537 |
|
mov eax, [esp+ 8] ; Src |
538 |
mov edx, [esp+12] ; Stride |
mov edx, [esp+12] ; Stride |
539 |
|
|
540 |
movq mm0,[eax] |
COPY_8_TO_8 |
541 |
lea eax,[eax+edx] |
lea ecx,[ecx+2*edx] |
542 |
movq mm1,[eax] |
COPY_8_TO_8 |
543 |
lea eax,[eax+edx] |
lea ecx,[ecx+2*edx] |
544 |
movq mm2,[eax] |
COPY_8_TO_8 |
545 |
lea eax,[eax+edx] |
lea ecx,[ecx+2*edx] |
546 |
movq mm3,[eax] |
COPY_8_TO_8 |
547 |
lea eax,[eax+edx] |
ret |
548 |
movq mm4,[eax] |
.endfunc |
|
lea eax,[eax+edx] |
|
|
movq mm5,[eax] |
|
|
lea eax,[eax+edx] |
|
|
movq mm6,[eax] |
|
|
lea eax,[eax+edx] |
|
|
movq mm7,[eax] |
|
549 |
|
|
550 |
movq [ecx],mm0 |
;----------------------------------------------------------------------------- |
551 |
lea ecx,[ecx+edx] |
; |
552 |
movq [ecx],mm1 |
; void transfer8x4_copy_mmx(uint8_t * const dst, |
553 |
lea ecx,[ecx+edx] |
; const uint8_t * const src, |
554 |
movq [ecx],mm2 |
; const uint32_t stride); |
555 |
lea ecx,[ecx+edx] |
; |
556 |
movq [ecx],mm3 |
; |
557 |
lea ecx,[ecx+edx] |
;----------------------------------------------------------------------------- |
558 |
movq [ecx],mm4 |
|
559 |
lea ecx,[ecx+edx] |
ALIGN 16 |
560 |
movq [ecx],mm5 |
transfer8x4_copy_mmx: |
561 |
lea ecx,[ecx+edx] |
mov ecx, [esp+ 4] ; Dst |
562 |
movq [ecx],mm6 |
mov eax, [esp+ 8] ; Src |
563 |
lea ecx,[ecx+edx] |
mov edx, [esp+12] ; Stride |
|
movq [ecx],mm7 |
|
564 |
|
|
565 |
|
COPY_8_TO_8 |
566 |
|
lea ecx,[ecx+2*edx] |
567 |
|
COPY_8_TO_8 |
568 |
ret |
ret |
569 |
|
.endfunc |
570 |
|
|