3 |
; * XVID MPEG-4 VIDEO CODEC |
; * XVID MPEG-4 VIDEO CODEC |
4 |
; * - mmx 8x8 block-based halfpel interpolation - |
; * - mmx 8x8 block-based halfpel interpolation - |
5 |
; * |
; * |
6 |
; * Copyright(C) 2002 Michael Militzer <isibaar@xvid.org> |
; * Copyright(C) 2002-2008 Michael Militzer <michael@xvid.org> |
7 |
; * 2002 Pascal Massimino <skal@planet-d.net> |
; * 2002 Pascal Massimino <skal@planet-d.net> |
8 |
; * |
; * |
9 |
; * This program is free software ; you can redistribute it and/or modify |
; * This program is free software ; you can redistribute it and/or modify |
22 |
; * |
; * |
23 |
; ****************************************************************************/ |
; ****************************************************************************/ |
24 |
|
|
25 |
BITS 32 |
%include "nasm.inc" |
|
|
|
|
%macro cglobal 1 |
|
|
%ifdef PREFIX |
|
|
%ifdef MARK_FUNCS |
|
|
global _%1:function %1.endfunc-%1 |
|
|
%define %1 _%1:function %1.endfunc-%1 |
|
|
%define ENDFUNC .endfunc |
|
|
%else |
|
|
global _%1 |
|
|
%define %1 _%1 |
|
|
%define ENDFUNC |
|
|
%endif |
|
|
%else |
|
|
%ifdef MARK_FUNCS |
|
|
global %1:function %1.endfunc-%1 |
|
|
%define ENDFUNC .endfunc |
|
|
%else |
|
|
global %1 |
|
|
%define ENDFUNC |
|
|
%endif |
|
|
%endif |
|
|
%endmacro |
|
26 |
|
|
27 |
;============================================================================= |
;============================================================================= |
28 |
; Read only data |
; Read only data |
29 |
;============================================================================= |
;============================================================================= |
30 |
|
|
31 |
%ifdef FORMAT_COFF |
DATA |
|
SECTION .rodata |
|
|
%else |
|
|
SECTION .rodata align=16 |
|
|
%endif |
|
32 |
|
|
33 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
34 |
mmx_one: |
mmx_one: |
35 |
times 8 db 1 |
times 8 db 1 |
36 |
|
|
37 |
SECTION .text |
SECTION .rotext align=SECTION_ALIGN |
38 |
|
|
39 |
cglobal interpolate8x8_halfpel_h_xmm |
cglobal interpolate8x8_halfpel_h_xmm |
40 |
cglobal interpolate8x8_halfpel_v_xmm |
cglobal interpolate8x8_halfpel_v_xmm |
59 |
;=========================================================================== |
;=========================================================================== |
60 |
|
|
61 |
%macro COPY_H_SSE_RND0 0 |
%macro COPY_H_SSE_RND0 0 |
62 |
movq mm0, [eax] |
movq mm0, [_EAX] |
63 |
pavgb mm0, [eax+1] |
pavgb mm0, [_EAX+1] |
64 |
movq mm1, [eax+edx] |
movq mm1, [_EAX+TMP1] |
65 |
pavgb mm1, [eax+edx+1] |
pavgb mm1, [_EAX+TMP1+1] |
66 |
lea eax,[eax+2*edx] |
lea _EAX,[_EAX+2*TMP1] |
67 |
movq [ecx],mm0 |
movq [TMP0],mm0 |
68 |
movq [ecx+edx],mm1 |
movq [TMP0+TMP1],mm1 |
69 |
%endmacro |
%endmacro |
70 |
|
|
71 |
%macro COPY_H_SSE_RND1 0 |
%macro COPY_H_SSE_RND1 0 |
72 |
movq mm0, [eax] |
movq mm0, [_EAX] |
73 |
movq mm1, [eax+edx] |
movq mm1, [_EAX+TMP1] |
74 |
movq mm4, mm0 |
movq mm4, mm0 |
75 |
movq mm5, mm1 |
movq mm5, mm1 |
76 |
movq mm2, [eax+1] |
movq mm2, [_EAX+1] |
77 |
movq mm3, [eax+edx+1] |
movq mm3, [_EAX+TMP1+1] |
78 |
pavgb mm0, mm2 |
pavgb mm0, mm2 |
79 |
pxor mm2, mm4 |
pxor mm2, mm4 |
80 |
pavgb mm1, mm3 |
pavgb mm1, mm3 |
81 |
lea eax, [eax+2*edx] |
lea _EAX, [_EAX+2*TMP1] |
82 |
pxor mm3, mm5 |
pxor mm3, mm5 |
83 |
pand mm2, mm7 |
pand mm2, mm7 |
84 |
pand mm3, mm7 |
pand mm3, mm7 |
85 |
psubb mm0, mm2 |
psubb mm0, mm2 |
86 |
movq [ecx], mm0 |
movq [TMP0], mm0 |
87 |
psubb mm1, mm3 |
psubb mm1, mm3 |
88 |
movq [ecx+edx], mm1 |
movq [TMP0+TMP1], mm1 |
89 |
%endmacro |
%endmacro |
90 |
|
|
91 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
92 |
interpolate8x8_halfpel_h_xmm: |
interpolate8x8_halfpel_h_xmm: |
93 |
|
|
94 |
mov eax, [esp+16] ; rounding |
mov _EAX, prm4 ; rounding |
95 |
mov ecx, [esp+ 4] ; Dst |
mov TMP0, prm1 ; Dst |
96 |
test eax,eax |
test _EAX,_EAX |
97 |
mov eax, [esp+ 8] ; Src |
mov _EAX, prm2 ; Src |
98 |
mov edx, [esp+12] ; stride |
mov TMP1, prm3 ; stride |
99 |
|
|
100 |
jnz near .rounding1 |
jnz near .rounding1 |
101 |
|
|
102 |
COPY_H_SSE_RND0 |
COPY_H_SSE_RND0 |
103 |
lea ecx,[ecx+2*edx] |
lea TMP0,[TMP0+2*TMP1] |
104 |
COPY_H_SSE_RND0 |
COPY_H_SSE_RND0 |
105 |
lea ecx,[ecx+2*edx] |
lea TMP0,[TMP0+2*TMP1] |
106 |
COPY_H_SSE_RND0 |
COPY_H_SSE_RND0 |
107 |
lea ecx,[ecx+2*edx] |
lea TMP0,[TMP0+2*TMP1] |
108 |
COPY_H_SSE_RND0 |
COPY_H_SSE_RND0 |
109 |
ret |
ret |
110 |
|
|
112 |
; we use: (i+j)/2 = ( i+j+1 )/2 - (i^j)&1 |
; we use: (i+j)/2 = ( i+j+1 )/2 - (i^j)&1 |
113 |
movq mm7, [mmx_one] |
movq mm7, [mmx_one] |
114 |
COPY_H_SSE_RND1 |
COPY_H_SSE_RND1 |
115 |
lea ecx, [ecx+2*edx] |
lea TMP0, [TMP0+2*TMP1] |
116 |
COPY_H_SSE_RND1 |
COPY_H_SSE_RND1 |
117 |
lea ecx,[ecx+2*edx] |
lea TMP0,[TMP0+2*TMP1] |
118 |
COPY_H_SSE_RND1 |
COPY_H_SSE_RND1 |
119 |
lea ecx,[ecx+2*edx] |
lea TMP0,[TMP0+2*TMP1] |
120 |
COPY_H_SSE_RND1 |
COPY_H_SSE_RND1 |
121 |
ret |
ret |
122 |
ENDFUNC |
ENDFUNC |
131 |
;=========================================================================== |
;=========================================================================== |
132 |
|
|
133 |
%macro COPY_V_SSE_RND0 0 |
%macro COPY_V_SSE_RND0 0 |
134 |
movq mm0, [eax] |
movq mm0, [_EAX] |
135 |
movq mm1, [eax+edx] |
movq mm1, [_EAX+TMP1] |
136 |
pavgb mm0, mm1 |
pavgb mm0, mm1 |
137 |
pavgb mm1, [eax+2*edx] |
pavgb mm1, [_EAX+2*TMP1] |
138 |
lea eax, [eax+2*edx] |
lea _EAX, [_EAX+2*TMP1] |
139 |
movq [ecx], mm0 |
movq [TMP0], mm0 |
140 |
movq [ecx+edx],mm1 |
movq [TMP0+TMP1],mm1 |
141 |
%endmacro |
%endmacro |
142 |
|
|
143 |
%macro COPY_V_SSE_RND1 0 |
%macro COPY_V_SSE_RND1 0 |
144 |
movq mm0, mm2 |
movq mm0, mm2 |
145 |
movq mm1, [eax] |
movq mm1, [_EAX] |
146 |
movq mm2, [eax+edx] |
movq mm2, [_EAX+TMP1] |
147 |
lea eax,[eax+2*edx] |
lea _EAX,[_EAX+2*TMP1] |
148 |
movq mm4, mm0 |
movq mm4, mm0 |
149 |
movq mm5, mm1 |
movq mm5, mm1 |
150 |
pavgb mm0, mm1 |
pavgb mm0, mm1 |
154 |
pand mm4, mm7 ; lsb's of (i^j)... |
pand mm4, mm7 ; lsb's of (i^j)... |
155 |
pand mm5, mm7 ; lsb's of (i^j)... |
pand mm5, mm7 ; lsb's of (i^j)... |
156 |
psubb mm0, mm4 ; ...are substracted from result of pavgb |
psubb mm0, mm4 ; ...are substracted from result of pavgb |
157 |
movq [ecx], mm0 |
movq [TMP0], mm0 |
158 |
psubb mm1, mm5 ; ...are substracted from result of pavgb |
psubb mm1, mm5 ; ...are substracted from result of pavgb |
159 |
movq [ecx+edx], mm1 |
movq [TMP0+TMP1], mm1 |
160 |
%endmacro |
%endmacro |
161 |
|
|
162 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
163 |
interpolate8x8_halfpel_v_xmm: |
interpolate8x8_halfpel_v_xmm: |
164 |
|
|
165 |
mov eax, [esp+16]; rounding |
mov _EAX, prm4 ; rounding |
166 |
mov ecx, [esp+ 4] ; Dst |
mov TMP0, prm1 ; Dst |
167 |
test eax,eax |
test _EAX,_EAX |
168 |
mov eax, [esp+ 8] ; Src |
mov _EAX, prm2 ; Src |
169 |
mov edx, [esp+12] ; stride |
mov TMP1, prm3 ; stride |
170 |
|
|
171 |
; we process 2 line at a time |
; we process 2 line at a time |
172 |
jnz near .rounding1 |
jnz near .rounding1 |
173 |
|
|
174 |
COPY_V_SSE_RND0 |
COPY_V_SSE_RND0 |
175 |
lea ecx, [ecx+2*edx] |
lea TMP0, [TMP0+2*TMP1] |
176 |
COPY_V_SSE_RND0 |
COPY_V_SSE_RND0 |
177 |
lea ecx, [ecx+2*edx] |
lea TMP0, [TMP0+2*TMP1] |
178 |
COPY_V_SSE_RND0 |
COPY_V_SSE_RND0 |
179 |
lea ecx, [ecx+2*edx] |
lea TMP0, [TMP0+2*TMP1] |
180 |
COPY_V_SSE_RND0 |
COPY_V_SSE_RND0 |
181 |
ret |
ret |
182 |
|
|
183 |
.rounding1: |
.rounding1: |
184 |
; we use: (i+j)/2 = ( i+j+1 )/2 - (i^j)&1 |
; we use: (i+j)/2 = ( i+j+1 )/2 - (i^j)&1 |
185 |
movq mm7, [mmx_one] |
movq mm7, [mmx_one] |
186 |
movq mm2, [eax] ; loop invariant |
movq mm2, [_EAX] ; loop invariant |
187 |
add eax, edx |
add _EAX, TMP1 |
188 |
|
|
189 |
COPY_V_SSE_RND1 |
COPY_V_SSE_RND1 |
190 |
lea ecx,[ecx+2*edx] |
lea TMP0,[TMP0+2*TMP1] |
191 |
COPY_V_SSE_RND1 |
COPY_V_SSE_RND1 |
192 |
lea ecx,[ecx+2*edx] |
lea TMP0,[TMP0+2*TMP1] |
193 |
COPY_V_SSE_RND1 |
COPY_V_SSE_RND1 |
194 |
lea ecx,[ecx+2*edx] |
lea TMP0,[TMP0+2*TMP1] |
195 |
COPY_V_SSE_RND1 |
COPY_V_SSE_RND1 |
196 |
ret |
ret |
197 |
ENDFUNC |
ENDFUNC |
218 |
; Moreover, we process 2 lines at a times, for better overlapping (~15% faster). |
; Moreover, we process 2 lines at a times, for better overlapping (~15% faster). |
219 |
|
|
220 |
%macro COPY_HV_SSE_RND0 0 |
%macro COPY_HV_SSE_RND0 0 |
221 |
lea eax, [eax+edx] |
lea _EAX, [_EAX+TMP1] |
222 |
|
|
223 |
movq mm0, [eax] |
movq mm0, [_EAX] |
224 |
movq mm1, [eax+1] |
movq mm1, [_EAX+1] |
225 |
|
|
226 |
movq mm6, mm0 |
movq mm6, mm0 |
227 |
pavgb mm0, mm1 ; mm0=(j+k+1)/2. preserved for next step |
pavgb mm0, mm1 ; mm0=(j+k+1)/2. preserved for next step |
228 |
lea eax, [eax+edx] |
lea _EAX, [_EAX+TMP1] |
229 |
pxor mm1, mm6 ; mm1=(j^k). preserved for next step |
pxor mm1, mm6 ; mm1=(j^k). preserved for next step |
230 |
|
|
231 |
por mm3, mm1 ; ij |= jk |
por mm3, mm1 ; ij |= jk |
236 |
pand mm3, mm7 ; mask lsb |
pand mm3, mm7 ; mask lsb |
237 |
psubb mm2, mm3 ; apply. |
psubb mm2, mm3 ; apply. |
238 |
|
|
239 |
movq [ecx], mm2 |
movq [TMP0], mm2 |
240 |
|
|
241 |
movq mm2, [eax] |
movq mm2, [_EAX] |
242 |
movq mm3, [eax+1] |
movq mm3, [_EAX+1] |
243 |
movq mm6, mm2 |
movq mm6, mm2 |
244 |
pavgb mm2, mm3 ; preserved for next iteration |
pavgb mm2, mm3 ; preserved for next iteration |
245 |
lea ecx,[ecx+edx] |
lea TMP0,[TMP0+TMP1] |
246 |
pxor mm3, mm6 ; preserved for next iteration |
pxor mm3, mm6 ; preserved for next iteration |
247 |
|
|
248 |
por mm1, mm3 |
por mm1, mm3 |
254 |
pand mm1, mm7 |
pand mm1, mm7 |
255 |
psubb mm0, mm1 |
psubb mm0, mm1 |
256 |
|
|
257 |
movq [ecx], mm0 |
movq [TMP0], mm0 |
258 |
%endmacro |
%endmacro |
259 |
|
|
260 |
%macro COPY_HV_SSE_RND1 0 |
%macro COPY_HV_SSE_RND1 0 |
261 |
lea eax, [eax+edx] |
lea _EAX, [_EAX+TMP1] |
262 |
|
|
263 |
movq mm0, [eax] |
movq mm0, [_EAX] |
264 |
movq mm1, [eax+1] |
movq mm1, [_EAX+1] |
265 |
|
|
266 |
movq mm6, mm0 |
movq mm6, mm0 |
267 |
pavgb mm0, mm1 ; mm0=(j+k+1)/2. preserved for next step |
pavgb mm0, mm1 ; mm0=(j+k+1)/2. preserved for next step |
268 |
lea eax, [eax+edx] |
lea _EAX, [_EAX+TMP1] |
269 |
pxor mm1, mm6 ; mm1=(j^k). preserved for next step |
pxor mm1, mm6 ; mm1=(j^k). preserved for next step |
270 |
|
|
271 |
pand mm3, mm1 |
pand mm3, mm1 |
276 |
pand mm3, mm7 |
pand mm3, mm7 |
277 |
psubb mm2, mm3 |
psubb mm2, mm3 |
278 |
|
|
279 |
movq [ecx], mm2 |
movq [TMP0], mm2 |
280 |
|
|
281 |
movq mm2, [eax] |
movq mm2, [_EAX] |
282 |
movq mm3, [eax+1] |
movq mm3, [_EAX+1] |
283 |
movq mm6, mm2 |
movq mm6, mm2 |
284 |
pavgb mm2, mm3 ; preserved for next iteration |
pavgb mm2, mm3 ; preserved for next iteration |
285 |
lea ecx,[ecx+edx] |
lea TMP0,[TMP0+TMP1] |
286 |
pxor mm3, mm6 ; preserved for next iteration |
pxor mm3, mm6 ; preserved for next iteration |
287 |
|
|
288 |
pand mm1, mm3 |
pand mm1, mm3 |
293 |
pand mm1, mm7 |
pand mm1, mm7 |
294 |
psubb mm0, mm1 |
psubb mm0, mm1 |
295 |
|
|
296 |
movq [ecx], mm0 |
movq [TMP0], mm0 |
297 |
%endmacro |
%endmacro |
298 |
|
|
299 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
300 |
interpolate8x8_halfpel_hv_xmm: |
interpolate8x8_halfpel_hv_xmm: |
301 |
mov eax, [esp+16] ; rounding |
mov _EAX, prm4 ; rounding |
302 |
mov ecx, [esp+ 4] ; Dst |
mov TMP0, prm1 ; Dst |
303 |
test eax, eax |
test _EAX, _EAX |
304 |
mov eax, [esp+ 8] ; Src |
mov _EAX, prm2 ; Src |
305 |
mov edx, [esp+12] ; stride |
mov TMP1, prm3 ; stride |
306 |
|
|
307 |
movq mm7, [mmx_one] |
movq mm7, [mmx_one] |
308 |
|
|
309 |
; loop invariants: mm2=(i+j+1)/2 and mm3= i^j |
; loop invariants: mm2=(i+j+1)/2 and mm3= i^j |
310 |
movq mm2, [eax] |
movq mm2, [_EAX] |
311 |
movq mm3, [eax+1] |
movq mm3, [_EAX+1] |
312 |
movq mm6, mm2 |
movq mm6, mm2 |
313 |
pavgb mm2, mm3 |
pavgb mm2, mm3 |
314 |
pxor mm3, mm6 ; mm2/mm3 ready |
pxor mm3, mm6 ; mm2/mm3 ready |
316 |
jnz near .rounding1 |
jnz near .rounding1 |
317 |
|
|
318 |
COPY_HV_SSE_RND0 |
COPY_HV_SSE_RND0 |
319 |
add ecx, edx |
add TMP0, TMP1 |
320 |
COPY_HV_SSE_RND0 |
COPY_HV_SSE_RND0 |
321 |
add ecx, edx |
add TMP0, TMP1 |
322 |
COPY_HV_SSE_RND0 |
COPY_HV_SSE_RND0 |
323 |
add ecx, edx |
add TMP0, TMP1 |
324 |
COPY_HV_SSE_RND0 |
COPY_HV_SSE_RND0 |
325 |
ret |
ret |
326 |
|
|
327 |
.rounding1: |
.rounding1: |
328 |
COPY_HV_SSE_RND1 |
COPY_HV_SSE_RND1 |
329 |
add ecx, edx |
add TMP0, TMP1 |
330 |
COPY_HV_SSE_RND1 |
COPY_HV_SSE_RND1 |
331 |
add ecx, edx |
add TMP0, TMP1 |
332 |
COPY_HV_SSE_RND1 |
COPY_HV_SSE_RND1 |
333 |
add ecx, edx |
add TMP0, TMP1 |
334 |
COPY_HV_SSE_RND1 |
COPY_HV_SSE_RND1 |
335 |
ret |
ret |
336 |
ENDFUNC |
ENDFUNC |
344 |
; |
; |
345 |
;=========================================================================== |
;=========================================================================== |
346 |
|
|
347 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
348 |
interpolate8x4_halfpel_h_xmm: |
interpolate8x4_halfpel_h_xmm: |
349 |
|
|
350 |
mov eax, [esp+16] ; rounding |
mov _EAX, prm4 ; rounding |
351 |
mov ecx, [esp+ 4] ; Dst |
mov TMP0, prm1 ; Dst |
352 |
test eax,eax |
test _EAX,_EAX |
353 |
mov eax, [esp+ 8] ; Src |
mov _EAX, prm2 ; Src |
354 |
mov edx, [esp+12] ; stride |
mov TMP1, prm3 ; stride |
355 |
|
|
356 |
jnz near .rounding1 |
jnz near .rounding1 |
357 |
|
|
358 |
COPY_H_SSE_RND0 |
COPY_H_SSE_RND0 |
359 |
lea ecx,[ecx+2*edx] |
lea TMP0,[TMP0+2*TMP1] |
360 |
COPY_H_SSE_RND0 |
COPY_H_SSE_RND0 |
361 |
ret |
ret |
362 |
|
|
364 |
; we use: (i+j)/2 = ( i+j+1 )/2 - (i^j)&1 |
; we use: (i+j)/2 = ( i+j+1 )/2 - (i^j)&1 |
365 |
movq mm7, [mmx_one] |
movq mm7, [mmx_one] |
366 |
COPY_H_SSE_RND1 |
COPY_H_SSE_RND1 |
367 |
lea ecx, [ecx+2*edx] |
lea TMP0, [TMP0+2*TMP1] |
368 |
COPY_H_SSE_RND1 |
COPY_H_SSE_RND1 |
369 |
ret |
ret |
370 |
ENDFUNC |
ENDFUNC |
378 |
; |
; |
379 |
;=========================================================================== |
;=========================================================================== |
380 |
|
|
381 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
382 |
interpolate8x4_halfpel_v_xmm: |
interpolate8x4_halfpel_v_xmm: |
383 |
|
|
384 |
mov eax, [esp+16]; rounding |
mov _EAX, prm4 ; rounding |
385 |
mov ecx, [esp+ 4] ; Dst |
mov TMP0, prm1 ; Dst |
386 |
test eax,eax |
test _EAX,_EAX |
387 |
mov eax, [esp+ 8] ; Src |
mov _EAX, prm2 ; Src |
388 |
mov edx, [esp+12] ; stride |
mov TMP1, prm3 ; stride |
389 |
|
|
390 |
; we process 2 line at a time |
; we process 2 line at a time |
391 |
jnz near .rounding1 |
jnz near .rounding1 |
392 |
|
|
393 |
COPY_V_SSE_RND0 |
COPY_V_SSE_RND0 |
394 |
lea ecx, [ecx+2*edx] |
lea TMP0, [TMP0+2*TMP1] |
395 |
COPY_V_SSE_RND0 |
COPY_V_SSE_RND0 |
396 |
ret |
ret |
397 |
|
|
398 |
.rounding1: |
.rounding1: |
399 |
; we use: (i+j)/2 = ( i+j+1 )/2 - (i^j)&1 |
; we use: (i+j)/2 = ( i+j+1 )/2 - (i^j)&1 |
400 |
movq mm7, [mmx_one] |
movq mm7, [mmx_one] |
401 |
movq mm2, [eax] ; loop invariant |
movq mm2, [_EAX] ; loop invariant |
402 |
add eax, edx |
add _EAX, TMP1 |
403 |
|
|
404 |
COPY_V_SSE_RND1 |
COPY_V_SSE_RND1 |
405 |
lea ecx,[ecx+2*edx] |
lea TMP0,[TMP0+2*TMP1] |
406 |
COPY_V_SSE_RND1 |
COPY_V_SSE_RND1 |
407 |
ret |
ret |
408 |
ENDFUNC |
ENDFUNC |
428 |
|
|
429 |
; Moreover, we process 2 lines at a times, for better overlapping (~15% faster). |
; Moreover, we process 2 lines at a times, for better overlapping (~15% faster). |
430 |
|
|
431 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
432 |
interpolate8x4_halfpel_hv_xmm: |
interpolate8x4_halfpel_hv_xmm: |
433 |
mov eax, [esp+16] ; rounding |
mov _EAX, prm4 ; rounding |
434 |
mov ecx, [esp+ 4] ; Dst |
mov TMP0, prm1 ; Dst |
435 |
test eax, eax |
test _EAX, _EAX |
436 |
mov eax, [esp+ 8] ; Src |
mov _EAX, prm2 ; Src |
437 |
mov edx, [esp+12] ; stride |
mov TMP1, prm3 ; stride |
438 |
|
|
439 |
movq mm7, [mmx_one] |
movq mm7, [mmx_one] |
440 |
|
|
441 |
; loop invariants: mm2=(i+j+1)/2 and mm3= i^j |
; loop invariants: mm2=(i+j+1)/2 and mm3= i^j |
442 |
movq mm2, [eax] |
movq mm2, [_EAX] |
443 |
movq mm3, [eax+1] |
movq mm3, [_EAX+1] |
444 |
movq mm6, mm2 |
movq mm6, mm2 |
445 |
pavgb mm2, mm3 |
pavgb mm2, mm3 |
446 |
pxor mm3, mm6 ; mm2/mm3 ready |
pxor mm3, mm6 ; mm2/mm3 ready |
448 |
jnz near .rounding1 |
jnz near .rounding1 |
449 |
|
|
450 |
COPY_HV_SSE_RND0 |
COPY_HV_SSE_RND0 |
451 |
add ecx, edx |
add TMP0, TMP1 |
452 |
COPY_HV_SSE_RND0 |
COPY_HV_SSE_RND0 |
453 |
ret |
ret |
454 |
|
|
455 |
.rounding1: |
.rounding1: |
456 |
COPY_HV_SSE_RND1 |
COPY_HV_SSE_RND1 |
457 |
add ecx, edx |
add TMP0, TMP1 |
458 |
COPY_HV_SSE_RND1 |
COPY_HV_SSE_RND1 |
459 |
ret |
ret |
460 |
ENDFUNC |
ENDFUNC |
468 |
;=========================================================================== |
;=========================================================================== |
469 |
|
|
470 |
%macro PROLOG0 0 |
%macro PROLOG0 0 |
471 |
mov ecx, [esp+ 4] ; Dst |
mov TMP0, prm1 ; Dst |
472 |
mov eax, [esp+ 8] ; Src |
mov _EAX, prm2 ; Src |
473 |
mov edx, [esp+12] ; BpS |
mov TMP1, prm3 ; BpS |
474 |
%endmacro |
%endmacro |
475 |
%macro PROLOG1 0 |
%macro PROLOG1 0 |
476 |
PROLOG0 |
PROLOG0 |
477 |
test dword [esp+16], 1; Rounding? |
test prm4d, 1; Rounding? |
478 |
%endmacro |
%endmacro |
479 |
%macro EPILOG 0 |
%macro EPILOG 0 |
480 |
ret |
ret |
491 |
;=========================================================================== |
;=========================================================================== |
492 |
|
|
493 |
%macro ADD_FF 2 |
%macro ADD_FF 2 |
494 |
movq mm0, [eax+%1] |
movq mm0, [_EAX+%1] |
495 |
movq mm1, [eax+%2] |
movq mm1, [_EAX+%2] |
496 |
;;--- |
;;--- |
497 |
;; movq mm2, mm0 |
;; movq mm2, mm0 |
498 |
;; movq mm3, mm1 |
;; movq mm3, mm1 |
499 |
;;--- |
;;--- |
500 |
pavgb mm0, [ecx+%1] |
pavgb mm0, [TMP0+%1] |
501 |
pavgb mm1, [ecx+%2] |
pavgb mm1, [TMP0+%2] |
502 |
;;-- |
;;-- |
503 |
;; por mm2, [ecx+%1] |
;; por mm2, [TMP0+%1] |
504 |
;; por mm3, [ecx+%2] |
;; por mm3, [TMP0+%2] |
505 |
;; pand mm2, [mmx_one] |
;; pand mm2, [mmx_one] |
506 |
;; pand mm3, [mmx_one] |
;; pand mm3, [mmx_one] |
507 |
;; psubsb mm0, mm2 |
;; psubsb mm0, mm2 |
508 |
;; psubsb mm1, mm3 |
;; psubsb mm1, mm3 |
509 |
;;-- |
;;-- |
510 |
movq [ecx+%1], mm0 |
movq [TMP0+%1], mm0 |
511 |
movq [ecx+%2], mm1 |
movq [TMP0+%2], mm1 |
512 |
%endmacro |
%endmacro |
513 |
|
|
514 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
515 |
interpolate8x8_halfpel_add_xmm: ; 23c |
interpolate8x8_halfpel_add_xmm: ; 23c |
516 |
PROLOG1 |
PROLOG1 |
517 |
ADD_FF 0, edx |
ADD_FF 0, TMP1 |
518 |
lea eax,[eax+2*edx] |
lea _EAX,[_EAX+2*TMP1] |
519 |
lea ecx,[ecx+2*edx] |
lea TMP0,[TMP0+2*TMP1] |
520 |
ADD_FF 0, edx |
ADD_FF 0, TMP1 |
521 |
lea eax,[eax+2*edx] |
lea _EAX,[_EAX+2*TMP1] |
522 |
lea ecx,[ecx+2*edx] |
lea TMP0,[TMP0+2*TMP1] |
523 |
ADD_FF 0, edx |
ADD_FF 0, TMP1 |
524 |
lea eax,[eax+2*edx] |
lea _EAX,[_EAX+2*TMP1] |
525 |
lea ecx,[ecx+2*edx] |
lea TMP0,[TMP0+2*TMP1] |
526 |
ADD_FF 0, edx |
ADD_FF 0, TMP1 |
527 |
EPILOG |
EPILOG |
528 |
ENDFUNC |
ENDFUNC |
529 |
|
|
539 |
|
|
540 |
|
|
541 |
%macro ADD_FH_RND0 2 |
%macro ADD_FH_RND0 2 |
542 |
movq mm0, [eax+%1] |
movq mm0, [_EAX+%1] |
543 |
movq mm1, [eax+%2] |
movq mm1, [_EAX+%2] |
544 |
pavgb mm0, [eax+%1+1] |
pavgb mm0, [_EAX+%1+1] |
545 |
pavgb mm1, [eax+%2+1] |
pavgb mm1, [_EAX+%2+1] |
546 |
pavgb mm0, [ecx+%1] |
pavgb mm0, [TMP0+%1] |
547 |
pavgb mm1, [ecx+%2] |
pavgb mm1, [TMP0+%2] |
548 |
movq [ecx+%1],mm0 |
movq [TMP0+%1],mm0 |
549 |
movq [ecx+%2],mm1 |
movq [TMP0+%2],mm1 |
550 |
%endmacro |
%endmacro |
551 |
|
|
552 |
%macro ADD_FH_RND1 2 |
%macro ADD_FH_RND1 2 |
553 |
movq mm0, [eax+%1] |
movq mm0, [_EAX+%1] |
554 |
movq mm1, [eax+%2] |
movq mm1, [_EAX+%2] |
555 |
movq mm4, mm0 |
movq mm4, mm0 |
556 |
movq mm5, mm1 |
movq mm5, mm1 |
557 |
movq mm2, [eax+%1+1] |
movq mm2, [_EAX+%1+1] |
558 |
movq mm3, [eax+%2+1] |
movq mm3, [_EAX+%2+1] |
559 |
pavgb mm0, mm2 |
pavgb mm0, mm2 |
560 |
; lea ?? |
; lea ?? |
561 |
pxor mm2, mm4 |
pxor mm2, mm4 |
565 |
pand mm3, [mmx_one] |
pand mm3, [mmx_one] |
566 |
psubb mm0, mm2 |
psubb mm0, mm2 |
567 |
psubb mm1, mm3 |
psubb mm1, mm3 |
568 |
pavgb mm0, [ecx+%1] |
pavgb mm0, [TMP0+%1] |
569 |
pavgb mm1, [ecx+%2] |
pavgb mm1, [TMP0+%2] |
570 |
movq [ecx+%1],mm0 |
movq [TMP0+%1],mm0 |
571 |
movq [ecx+%2],mm1 |
movq [TMP0+%2],mm1 |
572 |
%endmacro |
%endmacro |
573 |
|
|
574 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
575 |
interpolate8x8_halfpel_h_add_xmm: ; 32c |
interpolate8x8_halfpel_h_add_xmm: ; 32c |
576 |
PROLOG1 |
PROLOG1 |
577 |
jnz near .Loop1 |
jnz near .Loop1 |
578 |
ADD_FH_RND0 0, edx |
ADD_FH_RND0 0, TMP1 |
579 |
lea eax,[eax+2*edx] |
lea _EAX,[_EAX+2*TMP1] |
580 |
lea ecx,[ecx+2*edx] |
lea TMP0,[TMP0+2*TMP1] |
581 |
ADD_FH_RND0 0, edx |
ADD_FH_RND0 0, TMP1 |
582 |
lea eax,[eax+2*edx] |
lea _EAX,[_EAX+2*TMP1] |
583 |
lea ecx,[ecx+2*edx] |
lea TMP0,[TMP0+2*TMP1] |
584 |
ADD_FH_RND0 0, edx |
ADD_FH_RND0 0, TMP1 |
585 |
lea eax,[eax+2*edx] |
lea _EAX,[_EAX+2*TMP1] |
586 |
lea ecx,[ecx+2*edx] |
lea TMP0,[TMP0+2*TMP1] |
587 |
ADD_FH_RND0 0, edx |
ADD_FH_RND0 0, TMP1 |
588 |
EPILOG |
EPILOG |
589 |
|
|
590 |
.Loop1: |
.Loop1: |
591 |
; we use: (i+j)/2 = ( i+j+1 )/2 - (i^j)&1 |
; we use: (i+j)/2 = ( i+j+1 )/2 - (i^j)&1 |
592 |
; movq mm7, [mmx_one] |
; movq mm7, [mmx_one] |
593 |
ADD_FH_RND1 0, edx |
ADD_FH_RND1 0, TMP1 |
594 |
lea eax,[eax+2*edx] |
lea _EAX,[_EAX+2*TMP1] |
595 |
lea ecx,[ecx+2*edx] |
lea TMP0,[TMP0+2*TMP1] |
596 |
ADD_FH_RND1 0, edx |
ADD_FH_RND1 0, TMP1 |
597 |
lea eax,[eax+2*edx] |
lea _EAX,[_EAX+2*TMP1] |
598 |
lea ecx,[ecx+2*edx] |
lea TMP0,[TMP0+2*TMP1] |
599 |
ADD_FH_RND1 0, edx |
ADD_FH_RND1 0, TMP1 |
600 |
lea eax,[eax+2*edx] |
lea _EAX,[_EAX+2*TMP1] |
601 |
lea ecx,[ecx+2*edx] |
lea TMP0,[TMP0+2*TMP1] |
602 |
ADD_FH_RND1 0, edx |
ADD_FH_RND1 0, TMP1 |
603 |
EPILOG |
EPILOG |
604 |
ENDFUNC |
ENDFUNC |
605 |
|
|
615 |
;=========================================================================== |
;=========================================================================== |
616 |
|
|
617 |
%macro ADD_8_HF_RND0 0 |
%macro ADD_8_HF_RND0 0 |
618 |
movq mm0, [eax] |
movq mm0, [_EAX] |
619 |
movq mm1, [eax+edx] |
movq mm1, [_EAX+TMP1] |
620 |
pavgb mm0, mm1 |
pavgb mm0, mm1 |
621 |
pavgb mm1, [eax+2*edx] |
pavgb mm1, [_EAX+2*TMP1] |
622 |
lea eax,[eax+2*edx] |
lea _EAX,[_EAX+2*TMP1] |
623 |
pavgb mm0, [ecx] |
pavgb mm0, [TMP0] |
624 |
pavgb mm1, [ecx+edx] |
pavgb mm1, [TMP0+TMP1] |
625 |
movq [ecx],mm0 |
movq [TMP0],mm0 |
626 |
movq [ecx+edx],mm1 |
movq [TMP0+TMP1],mm1 |
627 |
%endmacro |
%endmacro |
628 |
|
|
629 |
%macro ADD_8_HF_RND1 0 |
%macro ADD_8_HF_RND1 0 |
630 |
movq mm1, [eax+edx] |
movq mm1, [_EAX+TMP1] |
631 |
movq mm2, [eax+2*edx] |
movq mm2, [_EAX+2*TMP1] |
632 |
lea eax,[eax+2*edx] |
lea _EAX,[_EAX+2*TMP1] |
633 |
movq mm4, mm0 |
movq mm4, mm0 |
634 |
movq mm5, mm1 |
movq mm5, mm1 |
635 |
pavgb mm0, mm1 |
pavgb mm0, mm1 |
639 |
pand mm4, mm7 ; lsb's of (i^j)... |
pand mm4, mm7 ; lsb's of (i^j)... |
640 |
pand mm5, mm7 ; lsb's of (i^j)... |
pand mm5, mm7 ; lsb's of (i^j)... |
641 |
psubb mm0, mm4 ; ...are substracted from result of pavgb |
psubb mm0, mm4 ; ...are substracted from result of pavgb |
642 |
pavgb mm0, [ecx] |
pavgb mm0, [TMP0] |
643 |
movq [ecx], mm0 |
movq [TMP0], mm0 |
644 |
psubb mm1, mm5 ; ...are substracted from result of pavgb |
psubb mm1, mm5 ; ...are substracted from result of pavgb |
645 |
pavgb mm1, [ecx+edx] |
pavgb mm1, [TMP0+TMP1] |
646 |
movq [ecx+edx], mm1 |
movq [TMP0+TMP1], mm1 |
647 |
%endmacro |
%endmacro |
648 |
|
|
649 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
650 |
interpolate8x8_halfpel_v_add_xmm: |
interpolate8x8_halfpel_v_add_xmm: |
651 |
PROLOG1 |
PROLOG1 |
652 |
|
|
654 |
pxor mm7, mm7 ; this is a NOP |
pxor mm7, mm7 ; this is a NOP |
655 |
|
|
656 |
ADD_8_HF_RND0 |
ADD_8_HF_RND0 |
657 |
lea ecx,[ecx+2*edx] |
lea TMP0,[TMP0+2*TMP1] |
658 |
ADD_8_HF_RND0 |
ADD_8_HF_RND0 |
659 |
lea ecx,[ecx+2*edx] |
lea TMP0,[TMP0+2*TMP1] |
660 |
ADD_8_HF_RND0 |
ADD_8_HF_RND0 |
661 |
lea ecx,[ecx+2*edx] |
lea TMP0,[TMP0+2*TMP1] |
662 |
ADD_8_HF_RND0 |
ADD_8_HF_RND0 |
663 |
EPILOG |
EPILOG |
664 |
|
|
665 |
.Loop1: |
.Loop1: |
666 |
movq mm0, [eax] ; loop invariant |
movq mm0, [_EAX] ; loop invariant |
667 |
movq mm7, [mmx_one] |
movq mm7, [mmx_one] |
668 |
|
|
669 |
ADD_8_HF_RND1 |
ADD_8_HF_RND1 |
670 |
movq mm0, mm2 |
movq mm0, mm2 |
671 |
lea ecx,[ecx+2*edx] |
lea TMP0,[TMP0+2*TMP1] |
672 |
ADD_8_HF_RND1 |
ADD_8_HF_RND1 |
673 |
movq mm0, mm2 |
movq mm0, mm2 |
674 |
lea ecx,[ecx+2*edx] |
lea TMP0,[TMP0+2*TMP1] |
675 |
ADD_8_HF_RND1 |
ADD_8_HF_RND1 |
676 |
movq mm0, mm2 |
movq mm0, mm2 |
677 |
lea ecx,[ecx+2*edx] |
lea TMP0,[TMP0+2*TMP1] |
678 |
ADD_8_HF_RND1 |
ADD_8_HF_RND1 |
679 |
EPILOG |
EPILOG |
680 |
ENDFUNC |
ENDFUNC |
701 |
;=========================================================================== |
;=========================================================================== |
702 |
|
|
703 |
%macro ADD_HH_RND0 0 |
%macro ADD_HH_RND0 0 |
704 |
lea eax,[eax+edx] |
lea _EAX,[_EAX+TMP1] |
705 |
|
|
706 |
movq mm0, [eax] |
movq mm0, [_EAX] |
707 |
movq mm1, [eax+1] |
movq mm1, [_EAX+1] |
708 |
|
|
709 |
movq mm6, mm0 |
movq mm6, mm0 |
710 |
pavgb mm0, mm1 ; mm0=(j+k+1)/2. preserved for next step |
pavgb mm0, mm1 ; mm0=(j+k+1)/2. preserved for next step |
711 |
lea eax,[eax+edx] |
lea _EAX,[_EAX+TMP1] |
712 |
pxor mm1, mm6 ; mm1=(j^k). preserved for next step |
pxor mm1, mm6 ; mm1=(j^k). preserved for next step |
713 |
|
|
714 |
por mm3, mm1 ; ij |= jk |
por mm3, mm1 ; ij |= jk |
719 |
pand mm3, mm7 ; mask lsb |
pand mm3, mm7 ; mask lsb |
720 |
psubb mm2, mm3 ; apply. |
psubb mm2, mm3 ; apply. |
721 |
|
|
722 |
pavgb mm2, [ecx] |
pavgb mm2, [TMP0] |
723 |
movq [ecx], mm2 |
movq [TMP0], mm2 |
724 |
|
|
725 |
movq mm2, [eax] |
movq mm2, [_EAX] |
726 |
movq mm3, [eax+1] |
movq mm3, [_EAX+1] |
727 |
movq mm6, mm2 |
movq mm6, mm2 |
728 |
pavgb mm2, mm3 ; preserved for next iteration |
pavgb mm2, mm3 ; preserved for next iteration |
729 |
lea ecx,[ecx+edx] |
lea TMP0,[TMP0+TMP1] |
730 |
pxor mm3, mm6 ; preserved for next iteration |
pxor mm3, mm6 ; preserved for next iteration |
731 |
|
|
732 |
por mm1, mm3 |
por mm1, mm3 |
738 |
pand mm1, mm7 |
pand mm1, mm7 |
739 |
psubb mm0, mm1 |
psubb mm0, mm1 |
740 |
|
|
741 |
pavgb mm0, [ecx] |
pavgb mm0, [TMP0] |
742 |
movq [ecx], mm0 |
movq [TMP0], mm0 |
743 |
%endmacro |
%endmacro |
744 |
|
|
745 |
%macro ADD_HH_RND1 0 |
%macro ADD_HH_RND1 0 |
746 |
lea eax,[eax+edx] |
lea _EAX,[_EAX+TMP1] |
747 |
|
|
748 |
movq mm0, [eax] |
movq mm0, [_EAX] |
749 |
movq mm1, [eax+1] |
movq mm1, [_EAX+1] |
750 |
|
|
751 |
movq mm6, mm0 |
movq mm6, mm0 |
752 |
pavgb mm0, mm1 ; mm0=(j+k+1)/2. preserved for next step |
pavgb mm0, mm1 ; mm0=(j+k+1)/2. preserved for next step |
753 |
lea eax,[eax+edx] |
lea _EAX,[_EAX+TMP1] |
754 |
pxor mm1, mm6 ; mm1=(j^k). preserved for next step |
pxor mm1, mm6 ; mm1=(j^k). preserved for next step |
755 |
|
|
756 |
pand mm3, mm1 |
pand mm3, mm1 |
761 |
pand mm3, mm7 |
pand mm3, mm7 |
762 |
psubb mm2, mm3 |
psubb mm2, mm3 |
763 |
|
|
764 |
pavgb mm2, [ecx] |
pavgb mm2, [TMP0] |
765 |
movq [ecx], mm2 |
movq [TMP0], mm2 |
766 |
|
|
767 |
movq mm2, [eax] |
movq mm2, [_EAX] |
768 |
movq mm3, [eax+1] |
movq mm3, [_EAX+1] |
769 |
movq mm6, mm2 |
movq mm6, mm2 |
770 |
pavgb mm2, mm3 ; preserved for next iteration |
pavgb mm2, mm3 ; preserved for next iteration |
771 |
lea ecx,[ecx+edx] |
lea TMP0,[TMP0+TMP1] |
772 |
pxor mm3, mm6 ; preserved for next iteration |
pxor mm3, mm6 ; preserved for next iteration |
773 |
|
|
774 |
pand mm1, mm3 |
pand mm1, mm3 |
779 |
pand mm1, mm7 |
pand mm1, mm7 |
780 |
psubb mm0, mm1 |
psubb mm0, mm1 |
781 |
|
|
782 |
pavgb mm0, [ecx] |
pavgb mm0, [TMP0] |
783 |
movq [ecx], mm0 |
movq [TMP0], mm0 |
784 |
%endmacro |
%endmacro |
785 |
|
|
786 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
787 |
interpolate8x8_halfpel_hv_add_xmm: |
interpolate8x8_halfpel_hv_add_xmm: |
788 |
PROLOG1 |
PROLOG1 |
789 |
|
|
790 |
movq mm7, [mmx_one] |
movq mm7, [mmx_one] |
791 |
|
|
792 |
; loop invariants: mm2=(i+j+1)/2 and mm3= i^j |
; loop invariants: mm2=(i+j+1)/2 and mm3= i^j |
793 |
movq mm2, [eax] |
movq mm2, [_EAX] |
794 |
movq mm3, [eax+1] |
movq mm3, [_EAX+1] |
795 |
movq mm6, mm2 |
movq mm6, mm2 |
796 |
pavgb mm2, mm3 |
pavgb mm2, mm3 |
797 |
pxor mm3, mm6 ; mm2/mm3 ready |
pxor mm3, mm6 ; mm2/mm3 ready |
799 |
jnz near .Loop1 |
jnz near .Loop1 |
800 |
|
|
801 |
ADD_HH_RND0 |
ADD_HH_RND0 |
802 |
add ecx, edx |
add TMP0, TMP1 |
803 |
ADD_HH_RND0 |
ADD_HH_RND0 |
804 |
add ecx, edx |
add TMP0, TMP1 |
805 |
ADD_HH_RND0 |
ADD_HH_RND0 |
806 |
add ecx, edx |
add TMP0, TMP1 |
807 |
ADD_HH_RND0 |
ADD_HH_RND0 |
808 |
EPILOG |
EPILOG |
809 |
|
|
810 |
.Loop1: |
.Loop1: |
811 |
ADD_HH_RND1 |
ADD_HH_RND1 |
812 |
add ecx, edx |
add TMP0, TMP1 |
813 |
ADD_HH_RND1 |
ADD_HH_RND1 |
814 |
add ecx, edx |
add TMP0, TMP1 |
815 |
ADD_HH_RND1 |
ADD_HH_RND1 |
816 |
add ecx, edx |
add TMP0, TMP1 |
817 |
ADD_HH_RND1 |
ADD_HH_RND1 |
818 |
|
|
819 |
EPILOG |
EPILOG |