1 |
;/************************************************************************** |
;/***************************************************************************** |
2 |
; * |
; * |
3 |
; * XVID MPEG-4 VIDEO CODEC |
; * XVID MPEG-4 VIDEO CODEC |
4 |
; * mmx 8x8 block-based halfpel interpolation |
; * - mmx 8x8 block-based halfpel interpolation - |
5 |
|
; * |
6 |
|
; * Copyright(C) 2001 Peter Ross <pross@xvid.org> |
7 |
|
; * 2002 Michael Militzer <isibaar@xvid.org> |
8 |
; * |
; * |
9 |
; * This program is free software; you can redistribute it and/or modify |
; * This program is free software; you can redistribute it and/or modify |
10 |
; * it under the terms of the GNU General Public License as published by |
; * it under the terms of the GNU General Public License as published by |
18 |
; * |
; * |
19 |
; * You should have received a copy of the GNU General Public License |
; * You should have received a copy of the GNU General Public License |
20 |
; * along with this program; if not, write to the Free Software |
; * along with this program; if not, write to the Free Software |
21 |
; * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
; * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
|
; * |
|
|
; *************************************************************************/ |
|
|
|
|
|
;/************************************************************************** |
|
|
; * |
|
|
; * History: |
|
22 |
; * |
; * |
23 |
; * 05.10.2002 added some qpel mmx code - Isibaar |
; ****************************************************************************/ |
|
; * 06.07.2002 mmx cleanup - Isibaar |
|
|
; * 22.12.2001 inital version; (c)2001 peter ross <pross@cs.rmit.edu.au> |
|
|
; * |
|
|
; *************************************************************************/ |
|
|
|
|
24 |
|
|
25 |
bits 32 |
BITS 32 |
26 |
|
|
27 |
%macro cglobal 1 |
%macro cglobal 1 |
28 |
%ifdef PREFIX |
%ifdef PREFIX |
33 |
%endif |
%endif |
34 |
%endmacro |
%endmacro |
35 |
|
|
36 |
section .data |
;============================================================================= |
37 |
|
; Read only data |
38 |
|
;============================================================================= |
39 |
|
|
40 |
align 16 |
%ifdef FORMAT_COFF |
41 |
|
SECTION .rodata |
42 |
|
%else |
43 |
|
SECTION .rodata align=16 |
44 |
|
%endif |
45 |
|
|
46 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
47 |
; (16 - r) rounding table |
; (16 - r) rounding table |
48 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
49 |
|
|
50 |
rounding_lowpass_mmx |
ALIGN 16 |
51 |
|
rounding_lowpass_mmx: |
52 |
times 4 dw 16 |
times 4 dw 16 |
53 |
times 4 dw 15 |
times 4 dw 15 |
54 |
|
|
55 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
56 |
; (1 - r) rounding table |
; (1 - r) rounding table |
57 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
58 |
|
|
59 |
rounding1_mmx |
rounding1_mmx: |
60 |
times 4 dw 1 |
times 4 dw 1 |
61 |
times 4 dw 0 |
times 4 dw 0 |
62 |
|
|
63 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
64 |
; (2 - r) rounding table |
; (2 - r) rounding table |
65 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
66 |
|
|
67 |
rounding2_mmx |
rounding2_mmx: |
68 |
times 4 dw 2 |
times 4 dw 2 |
69 |
times 4 dw 1 |
times 4 dw 1 |
70 |
|
|
71 |
mmx_one |
mmx_one: |
72 |
times 8 db 1 |
times 8 db 1 |
73 |
|
|
74 |
mmx_two |
mmx_two: |
75 |
times 8 db 2 |
times 8 db 2 |
76 |
|
|
77 |
mmx_three |
mmx_three: |
78 |
times 8 db 3 |
times 8 db 3 |
79 |
|
|
80 |
mmx_five |
mmx_five: |
81 |
times 4 dw 5 |
times 4 dw 5 |
82 |
|
|
83 |
mmx_mask |
mmx_mask: |
84 |
times 8 db 254 |
times 8 db 254 |
85 |
|
|
86 |
mmx_mask2 |
mmx_mask2: |
87 |
times 8 db 252 |
times 8 db 252 |
88 |
|
|
89 |
section .text |
;============================================================================= |
90 |
|
; Code |
91 |
|
;============================================================================= |
92 |
|
|
93 |
|
SECTION .text |
94 |
|
|
95 |
|
cglobal interpolate8x8_halfpel_h_mmx |
96 |
|
cglobal interpolate8x8_halfpel_v_mmx |
97 |
|
cglobal interpolate8x8_halfpel_hv_mmx |
98 |
|
|
99 |
|
cglobal interpolate8x8_avg4_mmx |
100 |
|
cglobal interpolate8x8_avg2_mmx |
101 |
|
|
102 |
|
cglobal interpolate8x8_6tap_lowpass_h_mmx |
103 |
|
cglobal interpolate8x8_6tap_lowpass_v_mmx |
104 |
|
|
105 |
|
cglobal interpolate8x8_halfpel_add_mmx |
106 |
|
cglobal interpolate8x8_halfpel_h_add_mmx |
107 |
|
cglobal interpolate8x8_halfpel_v_add_mmx |
108 |
|
cglobal interpolate8x8_halfpel_hv_add_mmx |
109 |
|
|
110 |
%macro CALC_AVG 6 |
%macro CALC_AVG 6 |
111 |
punpcklbw %3, %6 |
punpcklbw %3, %6 |
118 |
|
|
119 |
psrlw %1, 1 ; mm01 >>= 1 |
psrlw %1, 1 ; mm01 >>= 1 |
120 |
psrlw %2, 1 |
psrlw %2, 1 |
|
|
|
121 |
%endmacro |
%endmacro |
122 |
|
|
123 |
|
|
124 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
125 |
; |
; |
126 |
; void interpolate8x8_halfpel_h_mmx(uint8_t * const dst, |
; void interpolate8x8_halfpel_h_mmx(uint8_t * const dst, |
127 |
; const uint8_t * const src, |
; const uint8_t * const src, |
128 |
; const uint32_t stride, |
; const uint32_t stride, |
129 |
; const uint32_t rounding); |
; const uint32_t rounding); |
130 |
; |
; |
131 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
132 |
|
|
133 |
%macro COPY_H_MMX 0 |
%macro COPY_H_MMX 0 |
134 |
movq mm0, [esi] |
movq mm0, [esi] |
148 |
add edi, edx ; dst += stride |
add edi, edx ; dst += stride |
149 |
%endmacro |
%endmacro |
150 |
|
|
151 |
align 16 |
ALIGN 16 |
152 |
cglobal interpolate8x8_halfpel_h_mmx |
interpolate8x8_halfpel_h_mmx: |
|
interpolate8x8_halfpel_h_mmx |
|
153 |
|
|
154 |
push esi |
push esi |
155 |
push edi |
push edi |
|
|
|
156 |
mov eax, [esp + 8 + 16] ; rounding |
mov eax, [esp + 8 + 16] ; rounding |
157 |
|
|
|
interpolate8x8_halfpel_h_mmx.start |
|
158 |
movq mm7, [rounding1_mmx + eax * 8] |
movq mm7, [rounding1_mmx + eax * 8] |
159 |
|
|
160 |
mov edi, [esp + 8 + 4] ; dst |
mov edi, [esp + 8 + 4] ; dst |
178 |
ret |
ret |
179 |
|
|
180 |
|
|
181 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
182 |
; |
; |
183 |
; void interpolate8x8_halfpel_v_mmx(uint8_t * const dst, |
; void interpolate8x8_halfpel_v_mmx(uint8_t * const dst, |
184 |
; const uint8_t * const src, |
; const uint8_t * const src, |
185 |
; const uint32_t stride, |
; const uint32_t stride, |
186 |
; const uint32_t rounding); |
; const uint32_t rounding); |
187 |
; |
; |
188 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
189 |
|
|
190 |
%macro COPY_V_MMX 0 |
%macro COPY_V_MMX 0 |
191 |
movq mm0, [esi] |
movq mm0, [esi] |
205 |
add edi, edx ; dst += stride |
add edi, edx ; dst += stride |
206 |
%endmacro |
%endmacro |
207 |
|
|
208 |
align 16 |
ALIGN 16 |
209 |
cglobal interpolate8x8_halfpel_v_mmx |
interpolate8x8_halfpel_v_mmx: |
|
interpolate8x8_halfpel_v_mmx |
|
210 |
|
|
211 |
push esi |
push esi |
212 |
push edi |
push edi |
213 |
|
|
214 |
mov eax, [esp + 8 + 16] ; rounding |
mov eax, [esp + 8 + 16] ; rounding |
215 |
|
|
|
interpolate8x8_halfpel_v_mmx.start |
|
216 |
movq mm7, [rounding1_mmx + eax * 8] |
movq mm7, [rounding1_mmx + eax * 8] |
217 |
|
|
218 |
mov edi, [esp + 8 + 4] ; dst |
mov edi, [esp + 8 + 4] ; dst |
237 |
ret |
ret |
238 |
|
|
239 |
|
|
240 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
241 |
; |
; |
242 |
; void interpolate8x8_halfpel_hv_mmx(uint8_t * const dst, |
; void interpolate8x8_halfpel_hv_mmx(uint8_t * const dst, |
243 |
; const uint8_t * const src, |
; const uint8_t * const src, |
245 |
; const uint32_t rounding); |
; const uint32_t rounding); |
246 |
; |
; |
247 |
; |
; |
248 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
249 |
|
|
250 |
%macro COPY_HV_MMX 0 |
%macro COPY_HV_MMX 0 |
251 |
; current row |
; current row |
|
|
|
252 |
movq mm0, [esi] |
movq mm0, [esi] |
253 |
movq mm2, [esi + 1] |
movq mm2, [esi + 1] |
254 |
|
|
264 |
paddusw mm1, mm3 |
paddusw mm1, mm3 |
265 |
|
|
266 |
; next row |
; next row |
|
|
|
267 |
movq mm4, [esi + edx] |
movq mm4, [esi + edx] |
268 |
movq mm2, [esi + edx + 1] |
movq mm2, [esi + edx + 1] |
269 |
|
|
279 |
paddusw mm5, mm3 |
paddusw mm5, mm3 |
280 |
|
|
281 |
; add current + next row |
; add current + next row |
|
|
|
282 |
paddusw mm0, mm4 ; mm01 += mm45 |
paddusw mm0, mm4 ; mm01 += mm45 |
283 |
paddusw mm1, mm5 |
paddusw mm1, mm5 |
284 |
paddusw mm0, mm7 ; mm01 += rounding2 |
paddusw mm0, mm7 ; mm01 += rounding2 |
294 |
add edi, edx ; dst += stride |
add edi, edx ; dst += stride |
295 |
%endmacro |
%endmacro |
296 |
|
|
297 |
align 16 |
ALIGN 16 |
298 |
cglobal interpolate8x8_halfpel_hv_mmx |
interpolate8x8_halfpel_hv_mmx: |
|
interpolate8x8_halfpel_hv_mmx |
|
299 |
|
|
300 |
push esi |
push esi |
301 |
push edi |
push edi |
302 |
|
|
303 |
mov eax, [esp + 8 + 16] ; rounding |
mov eax, [esp + 8 + 16] ; rounding |
|
interpolate8x8_halfpel_hv_mmx.start |
|
304 |
|
|
305 |
movq mm7, [rounding2_mmx + eax * 8] |
movq mm7, [rounding2_mmx + eax * 8] |
306 |
|
|
327 |
|
|
328 |
ret |
ret |
329 |
|
|
330 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
331 |
; |
; |
332 |
; void interpolate8x8_avg2_mmx(uint8_t const *dst, |
; void interpolate8x8_avg2_mmx(uint8_t const *dst, |
333 |
; const uint8_t * const src1, |
; const uint8_t * const src1, |
336 |
; const uint32_t rounding, |
; const uint32_t rounding, |
337 |
; const uint32_t height); |
; const uint32_t height); |
338 |
; |
; |
339 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
340 |
|
|
341 |
%macro AVG2_MMX_RND0 0 |
%macro AVG2_MMX_RND0 0 |
342 |
movq mm0, [eax] ; src1 -> mm0 |
movq mm0, [eax] ; src1 -> mm0 |
432 |
movq [ecx+edx], mm4 |
movq [ecx+edx], mm4 |
433 |
%endmacro |
%endmacro |
434 |
|
|
435 |
align 16 |
ALIGN 16 |
436 |
cglobal interpolate8x8_avg2_mmx |
interpolate8x8_avg2_mmx: |
|
interpolate8x8_avg2_mmx |
|
437 |
|
|
438 |
push ebx |
push ebx |
439 |
|
|
502 |
ret |
ret |
503 |
|
|
504 |
|
|
505 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
506 |
; |
; |
507 |
; void interpolate8x8_avg4_mmx(uint8_t const *dst, |
; void interpolate8x8_avg4_mmx(uint8_t const *dst, |
508 |
; const uint8_t * const src1, |
; const uint8_t * const src1, |
512 |
; const uint32_t stride, |
; const uint32_t stride, |
513 |
; const uint32_t rounding); |
; const uint32_t rounding); |
514 |
; |
; |
515 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
516 |
|
|
517 |
%macro AVG4_MMX_RND0 0 |
%macro AVG4_MMX_RND0 0 |
518 |
movq mm0, [eax] ; src1 -> mm0 |
movq mm0, [eax] ; src1 -> mm0 |
624 |
movq [ecx], mm0 ; (src1 + src2 + src3 + src4 + 2) / 4 -> dst |
movq [ecx], mm0 ; (src1 + src2 + src3 + src4 + 2) / 4 -> dst |
625 |
%endmacro |
%endmacro |
626 |
|
|
627 |
align 16 |
ALIGN 16 |
628 |
cglobal interpolate8x8_avg4_mmx |
interpolate8x8_avg4_mmx: |
|
interpolate8x8_avg4_mmx |
|
629 |
|
|
630 |
push ebx |
push ebx |
631 |
push edi |
push edi |
690 |
ret |
ret |
691 |
|
|
692 |
|
|
693 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
694 |
; |
; |
695 |
; void interpolate8x8_6tap_lowpass_h_mmx(uint8_t const *dst, |
; void interpolate8x8_6tap_lowpass_h_mmx(uint8_t const *dst, |
696 |
; const uint8_t * const src, |
; const uint8_t * const src, |
697 |
; const uint32_t stride, |
; const uint32_t stride, |
698 |
; const uint32_t rounding); |
; const uint32_t rounding); |
699 |
; |
; |
700 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
701 |
|
|
702 |
%macro LOWPASS_6TAP_H_MMX 0 |
%macro LOWPASS_6TAP_H_MMX 0 |
703 |
movq mm0, [eax] |
movq mm0, [eax] |
768 |
movq [ecx], mm0 |
movq [ecx], mm0 |
769 |
%endmacro |
%endmacro |
770 |
|
|
771 |
align 16 |
ALIGN 16 |
772 |
cglobal interpolate8x8_6tap_lowpass_h_mmx |
interpolate8x8_6tap_lowpass_h_mmx: |
|
interpolate8x8_6tap_lowpass_h_mmx |
|
773 |
|
|
774 |
mov eax, [esp + 16] ; rounding |
mov eax, [esp + 16] ; rounding |
775 |
|
|
799 |
|
|
800 |
ret |
ret |
801 |
|
|
802 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
803 |
; |
; |
804 |
; void interpolate8x8_6tap_lowpass_v_mmx(uint8_t const *dst, |
; void interpolate8x8_6tap_lowpass_v_mmx(uint8_t const *dst, |
805 |
; const uint8_t * const src, |
; const uint8_t * const src, |
806 |
; const uint32_t stride, |
; const uint32_t stride, |
807 |
; const uint32_t rounding); |
; const uint32_t rounding); |
808 |
; |
; |
809 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
810 |
|
|
811 |
%macro LOWPASS_6TAP_V_MMX 0 |
%macro LOWPASS_6TAP_V_MMX 0 |
812 |
movq mm0, [eax] |
movq mm0, [eax] |
878 |
movq [ecx], mm0 |
movq [ecx], mm0 |
879 |
%endmacro |
%endmacro |
880 |
|
|
881 |
align 16 |
ALIGN 16 |
882 |
cglobal interpolate8x8_6tap_lowpass_v_mmx |
interpolate8x8_6tap_lowpass_v_mmx: |
|
interpolate8x8_6tap_lowpass_v_mmx |
|
883 |
|
|
884 |
push ebx |
push ebx |
885 |
|
|
915 |
|
|
916 |
pop ebx |
pop ebx |
917 |
ret |
ret |
918 |
|
|
919 |
|
;=========================================================================== |
920 |
|
; |
921 |
|
; The next functions combine both source halfpel interpolation step and the |
922 |
|
; averaging (with rouding) step to avoid wasting memory bandwidth computing |
923 |
|
; intermediate halfpel images and then averaging them. |
924 |
|
; |
925 |
|
;=========================================================================== |
926 |
|
|
927 |
|
%macro PROLOG0 0 |
928 |
|
mov ecx, [esp+ 4] ; Dst |
929 |
|
mov eax, [esp+ 8] ; Src |
930 |
|
mov edx, [esp+12] ; BpS |
931 |
|
%endmacro |
932 |
|
|
933 |
|
%macro PROLOG 2 ; %1: Rounder, %2 load Dst-Rounder |
934 |
|
pxor mm6, mm6 |
935 |
|
movq mm7, [%1] ; TODO: dangerous! (eax isn't checked) |
936 |
|
%if %2 |
937 |
|
movq mm5, [rounding1_mmx] |
938 |
|
%endif |
939 |
|
|
940 |
|
PROLOG0 |
941 |
|
%endmacro |
942 |
|
|
943 |
|
; performs: mm0 == (mm0+mm2) mm1 == (mm1+mm3) |
944 |
|
%macro MIX 0 |
945 |
|
punpcklbw mm0, mm6 |
946 |
|
punpcklbw mm2, mm6 |
947 |
|
punpckhbw mm1, mm6 |
948 |
|
punpckhbw mm3, mm6 |
949 |
|
paddusw mm0, mm2 |
950 |
|
paddusw mm1, mm3 |
951 |
|
%endmacro |
952 |
|
|
953 |
|
%macro MIX_DST 0 |
954 |
|
movq mm3, mm2 |
955 |
|
paddusw mm0, mm7 ; rounder |
956 |
|
paddusw mm1, mm7 ; rounder |
957 |
|
punpcklbw mm2, mm6 |
958 |
|
punpckhbw mm3, mm6 |
959 |
|
psrlw mm0, 1 |
960 |
|
psrlw mm1, 1 |
961 |
|
|
962 |
|
paddusw mm0, mm2 ; mix Src(mm0/mm1) with Dst(mm2/mm3) |
963 |
|
paddusw mm1, mm3 |
964 |
|
paddusw mm0, mm5 |
965 |
|
paddusw mm1, mm5 |
966 |
|
psrlw mm0, 1 |
967 |
|
psrlw mm1, 1 |
968 |
|
|
969 |
|
packuswb mm0, mm1 |
970 |
|
%endmacro |
971 |
|
|
972 |
|
%macro MIX2 0 |
973 |
|
punpcklbw mm0, mm6 |
974 |
|
punpcklbw mm2, mm6 |
975 |
|
paddusw mm0, mm2 |
976 |
|
paddusw mm0, mm7 |
977 |
|
punpckhbw mm1, mm6 |
978 |
|
punpckhbw mm3, mm6 |
979 |
|
paddusw mm1, mm7 |
980 |
|
paddusw mm1, mm3 |
981 |
|
psrlw mm0, 1 |
982 |
|
psrlw mm1, 1 |
983 |
|
|
984 |
|
packuswb mm0, mm1 |
985 |
|
%endmacro |
986 |
|
|
987 |
|
;=========================================================================== |
988 |
|
; |
989 |
|
; void interpolate8x8_halfpel_add_mmx(uint8_t * const dst, |
990 |
|
; const uint8_t * const src, |
991 |
|
; const uint32_t stride, |
992 |
|
; const uint32_t rounding); |
993 |
|
; |
994 |
|
; |
995 |
|
;=========================================================================== |
996 |
|
|
997 |
|
%macro ADD_FF_MMX 1 |
998 |
|
movq mm0, [eax] |
999 |
|
movq mm2, [ecx] |
1000 |
|
movq mm1, mm0 |
1001 |
|
movq mm3, mm2 |
1002 |
|
%if (%1!=0) |
1003 |
|
lea eax,[eax+%1*edx] |
1004 |
|
%endif |
1005 |
|
MIX |
1006 |
|
paddusw mm0, mm5 ; rounder |
1007 |
|
paddusw mm1, mm5 ; rounder |
1008 |
|
psrlw mm0, 1 |
1009 |
|
psrlw mm1, 1 |
1010 |
|
|
1011 |
|
packuswb mm0, mm1 |
1012 |
|
movq [ecx], mm0 |
1013 |
|
%if (%1!=0) |
1014 |
|
lea ecx,[ecx+%1*edx] |
1015 |
|
%endif |
1016 |
|
%endmacro |
1017 |
|
|
1018 |
|
ALIGN 16 |
1019 |
|
interpolate8x8_halfpel_add_mmx: |
1020 |
|
PROLOG rounding1_mmx, 1 |
1021 |
|
ADD_FF_MMX 1 |
1022 |
|
ADD_FF_MMX 1 |
1023 |
|
ADD_FF_MMX 1 |
1024 |
|
ADD_FF_MMX 1 |
1025 |
|
ADD_FF_MMX 1 |
1026 |
|
ADD_FF_MMX 1 |
1027 |
|
ADD_FF_MMX 1 |
1028 |
|
ADD_FF_MMX 0 |
1029 |
|
ret |
1030 |
|
|
1031 |
|
;=========================================================================== |
1032 |
|
; |
1033 |
|
; void interpolate8x8_halfpel_h_add_mmx(uint8_t * const dst, |
1034 |
|
; const uint8_t * const src, |
1035 |
|
; const uint32_t stride, |
1036 |
|
; const uint32_t rounding); |
1037 |
|
; |
1038 |
|
; |
1039 |
|
;=========================================================================== |
1040 |
|
|
1041 |
|
%macro ADD_FH_MMX 0 |
1042 |
|
movq mm0, [eax] |
1043 |
|
movq mm2, [eax+1] |
1044 |
|
movq mm1, mm0 |
1045 |
|
movq mm3, mm2 |
1046 |
|
|
1047 |
|
lea eax,[eax+edx] |
1048 |
|
|
1049 |
|
MIX |
1050 |
|
movq mm2, [ecx] ; prepare mix with Dst[0] |
1051 |
|
MIX_DST |
1052 |
|
movq [ecx], mm0 |
1053 |
|
%endmacro |
1054 |
|
|
1055 |
|
ALIGN 16 |
1056 |
|
interpolate8x8_halfpel_h_add_mmx: |
1057 |
|
PROLOG rounding1_mmx, 1 |
1058 |
|
|
1059 |
|
ADD_FH_MMX |
1060 |
|
lea ecx,[ecx+edx] |
1061 |
|
ADD_FH_MMX |
1062 |
|
lea ecx,[ecx+edx] |
1063 |
|
ADD_FH_MMX |
1064 |
|
lea ecx,[ecx+edx] |
1065 |
|
ADD_FH_MMX |
1066 |
|
lea ecx,[ecx+edx] |
1067 |
|
ADD_FH_MMX |
1068 |
|
lea ecx,[ecx+edx] |
1069 |
|
ADD_FH_MMX |
1070 |
|
lea ecx,[ecx+edx] |
1071 |
|
ADD_FH_MMX |
1072 |
|
lea ecx,[ecx+edx] |
1073 |
|
ADD_FH_MMX |
1074 |
|
ret |
1075 |
|
|
1076 |
|
;=========================================================================== |
1077 |
|
; |
1078 |
|
; void interpolate8x8_halfpel_v_add_mmx(uint8_t * const dst, |
1079 |
|
; const uint8_t * const src, |
1080 |
|
; const uint32_t stride, |
1081 |
|
; const uint32_t rounding); |
1082 |
|
; |
1083 |
|
; |
1084 |
|
;=========================================================================== |
1085 |
|
|
1086 |
|
%macro ADD_HF_MMX 0 |
1087 |
|
movq mm0, [eax] |
1088 |
|
movq mm2, [eax+edx] |
1089 |
|
movq mm1, mm0 |
1090 |
|
movq mm3, mm2 |
1091 |
|
|
1092 |
|
lea eax,[eax+edx] |
1093 |
|
|
1094 |
|
MIX |
1095 |
|
movq mm2, [ecx] ; prepare mix with Dst[0] |
1096 |
|
MIX_DST |
1097 |
|
movq [ecx], mm0 |
1098 |
|
|
1099 |
|
%endmacro |
1100 |
|
|
1101 |
|
ALIGN 16 |
1102 |
|
interpolate8x8_halfpel_v_add_mmx: |
1103 |
|
PROLOG rounding1_mmx, 1 |
1104 |
|
|
1105 |
|
ADD_HF_MMX |
1106 |
|
lea ecx,[ecx+edx] |
1107 |
|
ADD_HF_MMX |
1108 |
|
lea ecx,[ecx+edx] |
1109 |
|
ADD_HF_MMX |
1110 |
|
lea ecx,[ecx+edx] |
1111 |
|
ADD_HF_MMX |
1112 |
|
lea ecx,[ecx+edx] |
1113 |
|
ADD_HF_MMX |
1114 |
|
lea ecx,[ecx+edx] |
1115 |
|
ADD_HF_MMX |
1116 |
|
lea ecx,[ecx+edx] |
1117 |
|
ADD_HF_MMX |
1118 |
|
lea ecx,[ecx+edx] |
1119 |
|
ADD_HF_MMX |
1120 |
|
ret |
1121 |
|
|
1122 |
|
; The trick is to correct the result of 'pavgb' with some combination of the |
1123 |
|
; lsb's of the 4 input values i,j,k,l, and their intermediate 'pavgb' (s and t). |
1124 |
|
; The boolean relations are: |
1125 |
|
; (i+j+k+l+3)/4 = (s+t+1)/2 - (ij&kl)&st |
1126 |
|
; (i+j+k+l+2)/4 = (s+t+1)/2 - (ij|kl)&st |
1127 |
|
; (i+j+k+l+1)/4 = (s+t+1)/2 - (ij&kl)|st |
1128 |
|
; (i+j+k+l+0)/4 = (s+t+1)/2 - (ij|kl)|st |
1129 |
|
; with s=(i+j+1)/2, t=(k+l+1)/2, ij = i^j, kl = k^l, st = s^t. |
1130 |
|
|
1131 |
|
; Moreover, we process 2 lines at a times, for better overlapping (~15% faster). |
1132 |
|
|
1133 |
|
;=========================================================================== |
1134 |
|
; |
1135 |
|
; void interpolate8x8_halfpel_hv_add_mmx(uint8_t * const dst, |
1136 |
|
; const uint8_t * const src, |
1137 |
|
; const uint32_t stride, |
1138 |
|
; const uint32_t rounding); |
1139 |
|
; |
1140 |
|
; |
1141 |
|
;=========================================================================== |
1142 |
|
|
1143 |
|
%macro ADD_HH_MMX 0 |
1144 |
|
lea eax,[eax+edx] |
1145 |
|
|
1146 |
|
; transfert prev line to mm0/mm1 |
1147 |
|
movq mm0, mm2 |
1148 |
|
movq mm1, mm3 |
1149 |
|
|
1150 |
|
; load new line in mm2/mm3 |
1151 |
|
movq mm2, [eax] |
1152 |
|
movq mm4, [eax+1] |
1153 |
|
movq mm3, mm2 |
1154 |
|
movq mm5, mm4 |
1155 |
|
|
1156 |
|
punpcklbw mm2, mm6 |
1157 |
|
punpcklbw mm4, mm6 |
1158 |
|
paddusw mm2, mm4 |
1159 |
|
punpckhbw mm3, mm6 |
1160 |
|
punpckhbw mm5, mm6 |
1161 |
|
paddusw mm3, mm5 |
1162 |
|
|
1163 |
|
; mix current line (mm2/mm3) with previous (mm0,mm1); |
1164 |
|
; we'll preserve mm2/mm3 for next line... |
1165 |
|
|
1166 |
|
paddusw mm0, mm2 |
1167 |
|
paddusw mm1, mm3 |
1168 |
|
|
1169 |
|
movq mm4, [ecx] ; prepare mix with Dst[0] |
1170 |
|
movq mm5, mm4 |
1171 |
|
|
1172 |
|
paddusw mm0, mm7 ; finish mixing current line |
1173 |
|
paddusw mm1, mm7 |
1174 |
|
|
1175 |
|
punpcklbw mm4, mm6 |
1176 |
|
punpckhbw mm5, mm6 |
1177 |
|
|
1178 |
|
psrlw mm0, 2 |
1179 |
|
psrlw mm1, 2 |
1180 |
|
|
1181 |
|
paddusw mm0, mm4 ; mix Src(mm0/mm1) with Dst(mm2/mm3) |
1182 |
|
paddusw mm1, mm5 |
1183 |
|
|
1184 |
|
paddusw mm0, [rounding1_mmx] |
1185 |
|
paddusw mm1, [rounding1_mmx] |
1186 |
|
|
1187 |
|
psrlw mm0, 1 |
1188 |
|
psrlw mm1, 1 |
1189 |
|
|
1190 |
|
packuswb mm0, mm1 |
1191 |
|
|
1192 |
|
movq [ecx], mm0 |
1193 |
|
%endmacro |
1194 |
|
|
1195 |
|
ALIGN 16 |
1196 |
|
interpolate8x8_halfpel_hv_add_mmx: |
1197 |
|
PROLOG rounding2_mmx, 0 ; mm5 is busy. Don't load dst-rounder |
1198 |
|
|
1199 |
|
; preprocess first line |
1200 |
|
movq mm0, [eax] |
1201 |
|
movq mm2, [eax+1] |
1202 |
|
movq mm1, mm0 |
1203 |
|
movq mm3, mm2 |
1204 |
|
|
1205 |
|
punpcklbw mm0, mm6 |
1206 |
|
punpcklbw mm2, mm6 |
1207 |
|
punpckhbw mm1, mm6 |
1208 |
|
punpckhbw mm3, mm6 |
1209 |
|
paddusw mm2, mm0 |
1210 |
|
paddusw mm3, mm1 |
1211 |
|
|
1212 |
|
; Input: mm2/mm3 contains the value (Src[0]+Src[1]) of previous line |
1213 |
|
|
1214 |
|
ADD_HH_MMX |
1215 |
|
lea ecx,[ecx+edx] |
1216 |
|
ADD_HH_MMX |
1217 |
|
lea ecx,[ecx+edx] |
1218 |
|
ADD_HH_MMX |
1219 |
|
lea ecx,[ecx+edx] |
1220 |
|
ADD_HH_MMX |
1221 |
|
lea ecx,[ecx+edx] |
1222 |
|
ADD_HH_MMX |
1223 |
|
lea ecx,[ecx+edx] |
1224 |
|
ADD_HH_MMX |
1225 |
|
lea ecx,[ecx+edx] |
1226 |
|
ADD_HH_MMX |
1227 |
|
lea ecx,[ecx+edx] |
1228 |
|
ADD_HH_MMX |
1229 |
|
|
1230 |
|
ret |
1231 |
|
|