1 |
;/***************************************************************************** |
;/***************************************************************************** |
2 |
; * |
; * |
3 |
; * XVID MPEG-4 VIDEO CODEC |
; * XVID MPEG-4 VIDEO CODEC |
4 |
; * xmm 8x8 block-based halfpel interpolation |
; * - mmx 8x8 block-based halfpel interpolation - |
5 |
; * |
; * |
6 |
; * Copyright(C) 2002 Michael Militzer <michael@xvid.org> |
; * Copyright(C) 2002 Michael Militzer <isibaar@xvid.org> |
7 |
; * Copyright(C) 2002 -Skal- |
; * 2002 Pascal Massimino <skal@planet-d.net> |
8 |
; * |
; * |
9 |
; * This file is part of XviD, a free MPEG-4 video encoder/decoder |
; * This program is free software ; you can redistribute it and/or modify |
10 |
; * |
; * it under the terms of the GNU General Public License as published by |
|
; * XviD is free software; you can redistribute it and/or modify it |
|
|
; * under the terms of the GNU General Public License as published by |
|
11 |
; * the Free Software Foundation; either version 2 of the License, or |
; * the Free Software Foundation; either version 2 of the License, or |
12 |
; * (at your option) any later version. |
; * (at your option) any later version. |
13 |
; * |
; * |
20 |
; * along with this program; if not, write to the Free Software |
; * along with this program; if not, write to the Free Software |
21 |
; * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
; * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
22 |
; * |
; * |
|
; * Under section 8 of the GNU General Public License, the copyright |
|
|
; * holders of XVID explicitly forbid distribution in the following |
|
|
; * countries: |
|
|
; * |
|
|
; * - Japan |
|
|
; * - United States of America |
|
|
; * |
|
|
; * Linking XviD statically or dynamically with other modules is making a |
|
|
; * combined work based on XviD. Thus, the terms and conditions of the |
|
|
; * GNU General Public License cover the whole combination. |
|
|
; * |
|
|
; * As a special exception, the copyright holders of XviD give you |
|
|
; * permission to link XviD with independent modules that communicate with |
|
|
; * XviD solely through the VFW1.1 and DShow interfaces, regardless of the |
|
|
; * license terms of these independent modules, and to copy and distribute |
|
|
; * the resulting combined work under terms of your choice, provided that |
|
|
; * every copy of the combined work is accompanied by a complete copy of |
|
|
; * the source code of XviD (the version of XviD used to produce the |
|
|
; * combined work), being distributed under the terms of the GNU General |
|
|
; * Public License plus this exception. An independent module is a module |
|
|
; * which is not derived from or based on XviD. |
|
|
; * |
|
|
; * Note that people who make modified versions of XviD are not obligated |
|
|
; * to grant this special exception for their modified versions; it is |
|
|
; * their choice whether to do so. The GNU General Public License gives |
|
|
; * permission to release a modified version without this exception; this |
|
|
; * exception also makes it possible to release a modified version which |
|
|
; * carries forward this exception. |
|
|
; * |
|
|
; * $Id$ |
|
|
; * |
|
23 |
; ****************************************************************************/ |
; ****************************************************************************/ |
24 |
|
|
25 |
bits 32 |
BITS 32 |
26 |
|
|
27 |
%macro cglobal 1 |
%macro cglobal 1 |
28 |
%ifdef PREFIX |
%ifdef PREFIX |
33 |
%endif |
%endif |
34 |
%endmacro |
%endmacro |
35 |
|
|
36 |
section .data |
;============================================================================= |
37 |
|
; Read only data |
38 |
|
;============================================================================= |
39 |
|
|
40 |
align 16 |
%ifdef FORMAT_COFF |
41 |
|
SECTION .rodata |
42 |
|
%else |
43 |
|
SECTION .rodata align=16 |
44 |
|
%endif |
45 |
|
|
46 |
mmx_one |
ALIGN 16 |
47 |
|
mmx_one: |
48 |
times 8 db 1 |
times 8 db 1 |
49 |
|
|
50 |
section .text |
SECTION .text |
51 |
|
|
52 |
cglobal interpolate8x8_halfpel_h_xmm |
cglobal interpolate8x8_halfpel_h_xmm |
53 |
cglobal interpolate8x8_halfpel_v_xmm |
cglobal interpolate8x8_halfpel_v_xmm |
54 |
cglobal interpolate8x8_halfpel_hv_xmm |
cglobal interpolate8x8_halfpel_hv_xmm |
55 |
|
|
56 |
|
cglobal interpolate8x8_halfpel_add_xmm |
57 |
|
cglobal interpolate8x8_halfpel_h_add_xmm |
58 |
|
cglobal interpolate8x8_halfpel_v_add_xmm |
59 |
|
cglobal interpolate8x8_halfpel_hv_add_xmm |
60 |
|
|
61 |
;=========================================================================== |
;=========================================================================== |
62 |
; |
; |
63 |
; void interpolate8x8_halfpel_h_xmm(uint8_t * const dst, |
; void interpolate8x8_halfpel_h_xmm(uint8_t * const dst, |
97 |
movq [ecx+edx], mm1 |
movq [ecx+edx], mm1 |
98 |
%endmacro |
%endmacro |
99 |
|
|
100 |
align 16 |
ALIGN 16 |
101 |
interpolate8x8_halfpel_h_xmm: |
interpolate8x8_halfpel_h_xmm: |
102 |
|
|
103 |
mov eax, [esp+16]; rounding |
mov eax, [esp+16]; rounding |
167 |
movq [ecx+edx], mm1 |
movq [ecx+edx], mm1 |
168 |
%endmacro |
%endmacro |
169 |
|
|
170 |
align 16 |
ALIGN 16 |
171 |
interpolate8x8_halfpel_v_xmm: |
interpolate8x8_halfpel_v_xmm: |
172 |
|
|
173 |
mov eax, [esp+16]; rounding |
mov eax, [esp+16]; rounding |
177 |
mov edx, [esp+12] ; stride |
mov edx, [esp+12] ; stride |
178 |
|
|
179 |
; we process 2 line at a time |
; we process 2 line at a time |
|
|
|
180 |
jnz near .rounding1 |
jnz near .rounding1 |
181 |
|
|
182 |
COPY_V_SSE_RND0 |
COPY_V_SSE_RND0 |
303 |
movq [ecx], mm0 |
movq [ecx], mm0 |
304 |
%endmacro |
%endmacro |
305 |
|
|
306 |
align 16 |
ALIGN 16 |
307 |
interpolate8x8_halfpel_hv_xmm: |
interpolate8x8_halfpel_hv_xmm: |
308 |
mov eax, [esp+16] ; rounding |
mov eax, [esp+16] ; rounding |
309 |
mov ecx, [esp+ 4] ; Dst |
mov ecx, [esp+ 4] ; Dst |
340 |
add ecx, edx |
add ecx, edx |
341 |
COPY_HV_SSE_RND1 |
COPY_HV_SSE_RND1 |
342 |
ret |
ret |
343 |
|
|
344 |
|
;=========================================================================== |
345 |
|
; |
346 |
|
; The next functions combine both source halfpel interpolation step and the |
347 |
|
; averaging (with rouding) step to avoid wasting memory bandwidth computing |
348 |
|
; intermediate halfpel images and then averaging them. |
349 |
|
; |
350 |
|
;=========================================================================== |
351 |
|
|
352 |
|
%macro PROLOG0 0 |
353 |
|
mov ecx, [esp+ 4] ; Dst |
354 |
|
mov eax, [esp+ 8] ; Src |
355 |
|
mov edx, [esp+12] ; BpS |
356 |
|
%endmacro |
357 |
|
%macro PROLOG1 0 |
358 |
|
PROLOG0 |
359 |
|
test dword [esp+16], 1; Rounding? |
360 |
|
%endmacro |
361 |
|
%macro EPILOG 0 |
362 |
|
ret |
363 |
|
%endmacro |
364 |
|
|
365 |
|
;=========================================================================== |
366 |
|
; |
367 |
|
; void interpolate8x8_halfpel_add_xmm(uint8_t * const dst, |
368 |
|
; const uint8_t * const src, |
369 |
|
; const uint32_t stride, |
370 |
|
; const uint32_t rounding); |
371 |
|
; |
372 |
|
; |
373 |
|
;=========================================================================== |
374 |
|
|
375 |
|
%macro ADD_FF 2 |
376 |
|
movq mm0, [eax+%1] |
377 |
|
movq mm1, [eax+%2] |
378 |
|
;;--- |
379 |
|
;; movq mm2, mm0 |
380 |
|
;; movq mm3, mm1 |
381 |
|
;;--- |
382 |
|
pavgb mm0, [ecx+%1] |
383 |
|
pavgb mm1, [ecx+%2] |
384 |
|
;;-- |
385 |
|
;; por mm2, [ecx+%1] |
386 |
|
;; por mm3, [ecx+%2] |
387 |
|
;; pand mm2, [mmx_one] |
388 |
|
;; pand mm3, [mmx_one] |
389 |
|
;; psubsb mm0, mm2 |
390 |
|
;; psubsb mm1, mm3 |
391 |
|
;;-- |
392 |
|
movq [ecx+%1], mm0 |
393 |
|
movq [ecx+%2], mm1 |
394 |
|
%endmacro |
395 |
|
|
396 |
|
ALIGN 16 |
397 |
|
interpolate8x8_halfpel_add_xmm: ; 23c |
398 |
|
PROLOG1 |
399 |
|
ADD_FF 0, edx |
400 |
|
lea eax,[eax+2*edx] |
401 |
|
lea ecx,[ecx+2*edx] |
402 |
|
ADD_FF 0, edx |
403 |
|
lea eax,[eax+2*edx] |
404 |
|
lea ecx,[ecx+2*edx] |
405 |
|
ADD_FF 0, edx |
406 |
|
lea eax,[eax+2*edx] |
407 |
|
lea ecx,[ecx+2*edx] |
408 |
|
ADD_FF 0, edx |
409 |
|
EPILOG |
410 |
|
|
411 |
|
;=========================================================================== |
412 |
|
; |
413 |
|
; void interpolate8x8_halfpel_h_add_xmm(uint8_t * const dst, |
414 |
|
; const uint8_t * const src, |
415 |
|
; const uint32_t stride, |
416 |
|
; const uint32_t rounding); |
417 |
|
; |
418 |
|
; |
419 |
|
;=========================================================================== |
420 |
|
|
421 |
|
|
422 |
|
%macro ADD_FH_RND0 2 |
423 |
|
movq mm0, [eax+%1] |
424 |
|
movq mm1, [eax+%2] |
425 |
|
pavgb mm0, [eax+%1+1] |
426 |
|
pavgb mm1, [eax+%2+1] |
427 |
|
pavgb mm0, [ecx+%1] |
428 |
|
pavgb mm1, [ecx+%2] |
429 |
|
movq [ecx+%1],mm0 |
430 |
|
movq [ecx+%2],mm1 |
431 |
|
%endmacro |
432 |
|
|
433 |
|
%macro ADD_FH_RND1 2 |
434 |
|
movq mm0, [eax+%1] |
435 |
|
movq mm1, [eax+%2] |
436 |
|
movq mm4, mm0 |
437 |
|
movq mm5, mm1 |
438 |
|
movq mm2, [eax+%1+1] |
439 |
|
movq mm3, [eax+%2+1] |
440 |
|
pavgb mm0, mm2 |
441 |
|
; lea ?? |
442 |
|
pxor mm2, mm4 |
443 |
|
pavgb mm1, mm3 |
444 |
|
pxor mm3, mm5 |
445 |
|
pand mm2, [mmx_one] |
446 |
|
pand mm3, [mmx_one] |
447 |
|
psubb mm0, mm2 |
448 |
|
psubb mm1, mm3 |
449 |
|
pavgb mm0, [ecx+%1] |
450 |
|
pavgb mm1, [ecx+%2] |
451 |
|
movq [ecx+%1],mm0 |
452 |
|
movq [ecx+%2],mm1 |
453 |
|
%endmacro |
454 |
|
|
455 |
|
ALIGN 16 |
456 |
|
interpolate8x8_halfpel_h_add_xmm: ; 32c |
457 |
|
PROLOG1 |
458 |
|
jnz near .Loop1 |
459 |
|
ADD_FH_RND0 0, edx |
460 |
|
lea eax,[eax+2*edx] |
461 |
|
lea ecx,[ecx+2*edx] |
462 |
|
ADD_FH_RND0 0, edx |
463 |
|
lea eax,[eax+2*edx] |
464 |
|
lea ecx,[ecx+2*edx] |
465 |
|
ADD_FH_RND0 0, edx |
466 |
|
lea eax,[eax+2*edx] |
467 |
|
lea ecx,[ecx+2*edx] |
468 |
|
ADD_FH_RND0 0, edx |
469 |
|
EPILOG |
470 |
|
|
471 |
|
.Loop1 |
472 |
|
; we use: (i+j)/2 = ( i+j+1 )/2 - (i^j)&1 |
473 |
|
; movq mm7, [mmx_one] |
474 |
|
ADD_FH_RND1 0, edx |
475 |
|
lea eax,[eax+2*edx] |
476 |
|
lea ecx,[ecx+2*edx] |
477 |
|
ADD_FH_RND1 0, edx |
478 |
|
lea eax,[eax+2*edx] |
479 |
|
lea ecx,[ecx+2*edx] |
480 |
|
ADD_FH_RND1 0, edx |
481 |
|
lea eax,[eax+2*edx] |
482 |
|
lea ecx,[ecx+2*edx] |
483 |
|
ADD_FH_RND1 0, edx |
484 |
|
EPILOG |
485 |
|
|
486 |
|
|
487 |
|
;=========================================================================== |
488 |
|
; |
489 |
|
; void interpolate8x8_halfpel_v_add_xmm(uint8_t * const dst, |
490 |
|
; const uint8_t * const src, |
491 |
|
; const uint32_t stride, |
492 |
|
; const uint32_t rounding); |
493 |
|
; |
494 |
|
; |
495 |
|
;=========================================================================== |
496 |
|
|
497 |
|
%macro ADD_8_HF_RND0 0 |
498 |
|
movq mm0, [eax] |
499 |
|
movq mm1, [eax+edx] |
500 |
|
pavgb mm0, mm1 |
501 |
|
pavgb mm1, [eax+2*edx] |
502 |
|
lea eax,[eax+2*edx] |
503 |
|
pavgb mm0, [ecx] |
504 |
|
pavgb mm1, [ecx+edx] |
505 |
|
movq [ecx],mm0 |
506 |
|
movq [ecx+edx],mm1 |
507 |
|
%endmacro |
508 |
|
|
509 |
|
%macro ADD_8_HF_RND1 0 |
510 |
|
movq mm1, [eax+edx] |
511 |
|
movq mm2, [eax+2*edx] |
512 |
|
lea eax,[eax+2*edx] |
513 |
|
movq mm4, mm0 |
514 |
|
movq mm5, mm1 |
515 |
|
pavgb mm0, mm1 |
516 |
|
pxor mm4, mm1 |
517 |
|
pavgb mm1, mm2 |
518 |
|
pxor mm5, mm2 |
519 |
|
pand mm4, mm7 ; lsb's of (i^j)... |
520 |
|
pand mm5, mm7 ; lsb's of (i^j)... |
521 |
|
psubb mm0, mm4 ; ...are substracted from result of pavgb |
522 |
|
pavgb mm0, [ecx] |
523 |
|
movq [ecx], mm0 |
524 |
|
psubb mm1, mm5 ; ...are substracted from result of pavgb |
525 |
|
pavgb mm1, [ecx+edx] |
526 |
|
movq [ecx+edx], mm1 |
527 |
|
%endmacro |
528 |
|
|
529 |
|
ALIGN 16 |
530 |
|
interpolate8x8_halfpel_v_add_xmm: |
531 |
|
PROLOG1 |
532 |
|
|
533 |
|
jnz near .Loop1 |
534 |
|
pxor mm7, mm7 ; this is a NOP |
535 |
|
|
536 |
|
ADD_8_HF_RND0 |
537 |
|
lea ecx,[ecx+2*edx] |
538 |
|
ADD_8_HF_RND0 |
539 |
|
lea ecx,[ecx+2*edx] |
540 |
|
ADD_8_HF_RND0 |
541 |
|
lea ecx,[ecx+2*edx] |
542 |
|
ADD_8_HF_RND0 |
543 |
|
EPILOG |
544 |
|
|
545 |
|
.Loop1 |
546 |
|
movq mm0, [eax] ; loop invariant |
547 |
|
movq mm7, [mmx_one] |
548 |
|
|
549 |
|
ADD_8_HF_RND1 |
550 |
|
movq mm0, mm2 |
551 |
|
lea ecx,[ecx+2*edx] |
552 |
|
ADD_8_HF_RND1 |
553 |
|
movq mm0, mm2 |
554 |
|
lea ecx,[ecx+2*edx] |
555 |
|
ADD_8_HF_RND1 |
556 |
|
movq mm0, mm2 |
557 |
|
lea ecx,[ecx+2*edx] |
558 |
|
ADD_8_HF_RND1 |
559 |
|
EPILOG |
560 |
|
|
561 |
|
; The trick is to correct the result of 'pavgb' with some combination of the |
562 |
|
; lsb's of the 4 input values i,j,k,l, and their intermediate 'pavgb' (s and t). |
563 |
|
; The boolean relations are: |
564 |
|
; (i+j+k+l+3)/4 = (s+t+1)/2 - (ij&kl)&st |
565 |
|
; (i+j+k+l+2)/4 = (s+t+1)/2 - (ij|kl)&st |
566 |
|
; (i+j+k+l+1)/4 = (s+t+1)/2 - (ij&kl)|st |
567 |
|
; (i+j+k+l+0)/4 = (s+t+1)/2 - (ij|kl)|st |
568 |
|
; with s=(i+j+1)/2, t=(k+l+1)/2, ij = i^j, kl = k^l, st = s^t. |
569 |
|
|
570 |
|
; Moreover, we process 2 lines at a times, for better overlapping (~15% faster). |
571 |
|
|
572 |
|
;=========================================================================== |
573 |
|
; |
574 |
|
; void interpolate8x8_halfpel_hv_add_xmm(uint8_t * const dst, |
575 |
|
; const uint8_t * const src, |
576 |
|
; const uint32_t stride, |
577 |
|
; const uint32_t rounding); |
578 |
|
; |
579 |
|
; |
580 |
|
;=========================================================================== |
581 |
|
|
582 |
|
%macro ADD_HH_RND0 0 |
583 |
|
lea eax,[eax+edx] |
584 |
|
|
585 |
|
movq mm0, [eax] |
586 |
|
movq mm1, [eax+1] |
587 |
|
|
588 |
|
movq mm6, mm0 |
589 |
|
pavgb mm0, mm1 ; mm0=(j+k+1)/2. preserved for next step |
590 |
|
lea eax,[eax+edx] |
591 |
|
pxor mm1, mm6 ; mm1=(j^k). preserved for next step |
592 |
|
|
593 |
|
por mm3, mm1 ; ij |= jk |
594 |
|
movq mm6, mm2 |
595 |
|
pxor mm6, mm0 ; mm6 = s^t |
596 |
|
pand mm3, mm6 ; (ij|jk) &= st |
597 |
|
pavgb mm2, mm0 ; mm2 = (s+t+1)/2 |
598 |
|
pand mm3, mm7 ; mask lsb |
599 |
|
psubb mm2, mm3 ; apply. |
600 |
|
|
601 |
|
pavgb mm2, [ecx] |
602 |
|
movq [ecx], mm2 |
603 |
|
|
604 |
|
movq mm2, [eax] |
605 |
|
movq mm3, [eax+1] |
606 |
|
movq mm6, mm2 |
607 |
|
pavgb mm2, mm3 ; preserved for next iteration |
608 |
|
lea ecx,[ecx+edx] |
609 |
|
pxor mm3, mm6 ; preserved for next iteration |
610 |
|
|
611 |
|
por mm1, mm3 |
612 |
|
movq mm6, mm0 |
613 |
|
pxor mm6, mm2 |
614 |
|
pand mm1, mm6 |
615 |
|
pavgb mm0, mm2 |
616 |
|
|
617 |
|
pand mm1, mm7 |
618 |
|
psubb mm0, mm1 |
619 |
|
|
620 |
|
pavgb mm0, [ecx] |
621 |
|
movq [ecx], mm0 |
622 |
|
%endmacro |
623 |
|
|
624 |
|
%macro ADD_HH_RND1 0 |
625 |
|
lea eax,[eax+edx] |
626 |
|
|
627 |
|
movq mm0, [eax] |
628 |
|
movq mm1, [eax+1] |
629 |
|
|
630 |
|
movq mm6, mm0 |
631 |
|
pavgb mm0, mm1 ; mm0=(j+k+1)/2. preserved for next step |
632 |
|
lea eax,[eax+edx] |
633 |
|
pxor mm1, mm6 ; mm1=(j^k). preserved for next step |
634 |
|
|
635 |
|
pand mm3, mm1 |
636 |
|
movq mm6, mm2 |
637 |
|
pxor mm6, mm0 |
638 |
|
por mm3, mm6 |
639 |
|
pavgb mm2, mm0 |
640 |
|
pand mm3, mm7 |
641 |
|
psubb mm2, mm3 |
642 |
|
|
643 |
|
pavgb mm2, [ecx] |
644 |
|
movq [ecx], mm2 |
645 |
|
|
646 |
|
movq mm2, [eax] |
647 |
|
movq mm3, [eax+1] |
648 |
|
movq mm6, mm2 |
649 |
|
pavgb mm2, mm3 ; preserved for next iteration |
650 |
|
lea ecx,[ecx+edx] |
651 |
|
pxor mm3, mm6 ; preserved for next iteration |
652 |
|
|
653 |
|
pand mm1, mm3 |
654 |
|
movq mm6, mm0 |
655 |
|
pxor mm6, mm2 |
656 |
|
por mm1, mm6 |
657 |
|
pavgb mm0, mm2 |
658 |
|
pand mm1, mm7 |
659 |
|
psubb mm0, mm1 |
660 |
|
|
661 |
|
pavgb mm0, [ecx] |
662 |
|
movq [ecx], mm0 |
663 |
|
%endmacro |
664 |
|
|
665 |
|
ALIGN 16 |
666 |
|
interpolate8x8_halfpel_hv_add_xmm: |
667 |
|
PROLOG1 |
668 |
|
|
669 |
|
movq mm7, [mmx_one] |
670 |
|
|
671 |
|
; loop invariants: mm2=(i+j+1)/2 and mm3= i^j |
672 |
|
movq mm2, [eax] |
673 |
|
movq mm3, [eax+1] |
674 |
|
movq mm6, mm2 |
675 |
|
pavgb mm2, mm3 |
676 |
|
pxor mm3, mm6 ; mm2/mm3 ready |
677 |
|
|
678 |
|
jnz near .Loop1 |
679 |
|
|
680 |
|
ADD_HH_RND0 |
681 |
|
add ecx, edx |
682 |
|
ADD_HH_RND0 |
683 |
|
add ecx, edx |
684 |
|
ADD_HH_RND0 |
685 |
|
add ecx, edx |
686 |
|
ADD_HH_RND0 |
687 |
|
EPILOG |
688 |
|
|
689 |
|
.Loop1 |
690 |
|
ADD_HH_RND1 |
691 |
|
add ecx, edx |
692 |
|
ADD_HH_RND1 |
693 |
|
add ecx, edx |
694 |
|
ADD_HH_RND1 |
695 |
|
add ecx, edx |
696 |
|
ADD_HH_RND1 |
697 |
|
|
698 |
|
EPILOG |