4 |
; * - mmx 8x8 block-based halfpel interpolation - |
; * - mmx 8x8 block-based halfpel interpolation - |
5 |
; * |
; * |
6 |
; * Copyright(C) 2001 Peter Ross <pross@xvid.org> |
; * Copyright(C) 2001 Peter Ross <pross@xvid.org> |
7 |
; * 2002 Michael Militzer <isibaar@xvid.org> |
; * 2002-2008 Michael Militzer <michael@xvid.org> |
8 |
; * |
; * |
9 |
; * This program is free software ; you can redistribute it and/or modify |
; * This program is free software ; you can redistribute it and/or modify |
10 |
; * it under the terms of the GNU General Public License as published by |
; * it under the terms of the GNU General Public License as published by |
22 |
; * |
; * |
23 |
; ****************************************************************************/ |
; ****************************************************************************/ |
24 |
|
|
25 |
BITS 32 |
%include "nasm.inc" |
|
|
|
|
%macro cglobal 1 |
|
|
%ifdef PREFIX |
|
|
global _%1 |
|
|
%define %1 _%1 |
|
|
%else |
|
|
global %1 |
|
|
%endif |
|
|
%endmacro |
|
26 |
|
|
27 |
;============================================================================= |
;============================================================================= |
28 |
; Read only data |
; Read only data |
29 |
;============================================================================= |
;============================================================================= |
30 |
|
|
31 |
SECTION .rodata |
DATA |
32 |
|
|
33 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
34 |
; (16 - r) rounding table |
; (16 - r) rounding table |
35 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
36 |
|
|
37 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
38 |
rounding_lowpass_mmx: |
rounding_lowpass_mmx: |
39 |
times 4 dw 16 |
times 4 dw 16 |
40 |
times 4 dw 15 |
times 4 dw 15 |
77 |
; Code |
; Code |
78 |
;============================================================================= |
;============================================================================= |
79 |
|
|
80 |
SECTION .text |
SECTION .rotext align=SECTION_ALIGN |
81 |
|
|
82 |
cglobal interpolate8x8_halfpel_h_mmx |
cglobal interpolate8x8_halfpel_h_mmx |
83 |
cglobal interpolate8x8_halfpel_v_mmx |
cglobal interpolate8x8_halfpel_v_mmx |
84 |
cglobal interpolate8x8_halfpel_hv_mmx |
cglobal interpolate8x8_halfpel_hv_mmx |
85 |
|
|
86 |
|
cglobal interpolate8x4_halfpel_h_mmx |
87 |
|
cglobal interpolate8x4_halfpel_v_mmx |
88 |
|
cglobal interpolate8x4_halfpel_hv_mmx |
89 |
|
|
90 |
cglobal interpolate8x8_avg4_mmx |
cglobal interpolate8x8_avg4_mmx |
91 |
cglobal interpolate8x8_avg2_mmx |
cglobal interpolate8x8_avg2_mmx |
92 |
|
|
93 |
cglobal interpolate8x8_6tap_lowpass_h_mmx |
cglobal interpolate8x8_6tap_lowpass_h_mmx |
94 |
cglobal interpolate8x8_6tap_lowpass_v_mmx |
cglobal interpolate8x8_6tap_lowpass_v_mmx |
95 |
|
|
96 |
|
cglobal interpolate8x8_halfpel_add_mmx |
97 |
|
cglobal interpolate8x8_halfpel_h_add_mmx |
98 |
|
cglobal interpolate8x8_halfpel_v_add_mmx |
99 |
|
cglobal interpolate8x8_halfpel_hv_add_mmx |
100 |
|
|
101 |
%macro CALC_AVG 6 |
%macro CALC_AVG 6 |
102 |
punpcklbw %3, %6 |
punpcklbw %3, %6 |
103 |
punpckhbw %4, %6 |
punpckhbw %4, %6 |
122 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
123 |
|
|
124 |
%macro COPY_H_MMX 0 |
%macro COPY_H_MMX 0 |
125 |
movq mm0, [esi] |
movq mm0, [TMP0] |
126 |
movq mm2, [esi + 1] |
movq mm2, [TMP0 + 1] |
127 |
movq mm1, mm0 |
movq mm1, mm0 |
128 |
movq mm3, mm2 |
movq mm3, mm2 |
129 |
|
|
133 |
CALC_AVG mm0, mm1, mm2, mm3, mm7, mm6 |
CALC_AVG mm0, mm1, mm2, mm3, mm7, mm6 |
134 |
|
|
135 |
packuswb mm0, mm1 |
packuswb mm0, mm1 |
136 |
movq [edi], mm0 ; [dst] = mm01 |
movq [_EAX], mm0 ; [dst] = mm01 |
137 |
|
|
138 |
add esi, edx ; src += stride |
add TMP0, TMP1 ; src += stride |
139 |
add edi, edx ; dst += stride |
add _EAX, TMP1 ; dst += stride |
140 |
%endmacro |
%endmacro |
141 |
|
|
142 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
143 |
interpolate8x8_halfpel_h_mmx: |
interpolate8x8_halfpel_h_mmx: |
144 |
|
|
145 |
push esi |
mov _EAX, prm4 ; rounding |
146 |
push edi |
lea TMP0, [rounding1_mmx] |
147 |
mov eax, [esp + 8 + 16] ; rounding |
movq mm7, [TMP0 + _EAX * 8] |
148 |
|
|
149 |
movq mm7, [rounding1_mmx + eax * 8] |
mov _EAX, prm1 ; dst |
150 |
|
mov TMP0, prm2 ; src |
151 |
mov edi, [esp + 8 + 4] ; dst |
mov TMP1, prm3 ; stride |
|
mov esi, [esp + 8 + 8] ; src |
|
|
mov edx, [esp + 8 + 12] ; stride |
|
152 |
|
|
153 |
pxor mm6, mm6 ; zero |
pxor mm6, mm6 ; zero |
154 |
|
|
161 |
COPY_H_MMX |
COPY_H_MMX |
162 |
COPY_H_MMX |
COPY_H_MMX |
163 |
|
|
|
pop edi |
|
|
pop esi |
|
|
|
|
164 |
ret |
ret |
165 |
|
ENDFUNC |
166 |
|
|
167 |
|
|
168 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
175 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
176 |
|
|
177 |
%macro COPY_V_MMX 0 |
%macro COPY_V_MMX 0 |
178 |
movq mm0, [esi] |
movq mm0, [TMP0] |
179 |
movq mm2, [esi + edx] |
movq mm2, [TMP0 + TMP1] |
180 |
movq mm1, mm0 |
movq mm1, mm0 |
181 |
movq mm3, mm2 |
movq mm3, mm2 |
182 |
|
|
186 |
CALC_AVG mm0, mm1, mm2, mm3, mm7, mm6 |
CALC_AVG mm0, mm1, mm2, mm3, mm7, mm6 |
187 |
|
|
188 |
packuswb mm0, mm1 |
packuswb mm0, mm1 |
189 |
movq [edi], mm0 ; [dst] = mm01 |
movq [_EAX], mm0 ; [dst] = mm01 |
190 |
|
|
191 |
add esi, edx ; src += stride |
add TMP0, TMP1 ; src += stride |
192 |
add edi, edx ; dst += stride |
add _EAX, TMP1 ; dst += stride |
193 |
%endmacro |
%endmacro |
194 |
|
|
195 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
196 |
interpolate8x8_halfpel_v_mmx: |
interpolate8x8_halfpel_v_mmx: |
197 |
|
|
198 |
push esi |
mov _EAX, prm4 ; rounding |
199 |
push edi |
lea TMP0, [rounding1_mmx] |
200 |
|
movq mm7, [TMP0 + _EAX * 8] |
201 |
mov eax, [esp + 8 + 16] ; rounding |
|
202 |
|
mov _EAX, prm1 ; dst |
203 |
movq mm7, [rounding1_mmx + eax * 8] |
mov TMP0, prm2 ; src |
204 |
|
mov TMP1, prm3 ; stride |
|
mov edi, [esp + 8 + 4] ; dst |
|
|
mov esi, [esp + 8 + 8] ; src |
|
|
mov edx, [esp + 8 + 12] ; stride |
|
205 |
|
|
206 |
pxor mm6, mm6 ; zero |
pxor mm6, mm6 ; zero |
207 |
|
|
215 |
COPY_V_MMX |
COPY_V_MMX |
216 |
COPY_V_MMX |
COPY_V_MMX |
217 |
|
|
|
pop edi |
|
|
pop esi |
|
|
|
|
218 |
ret |
ret |
219 |
|
ENDFUNC |
220 |
|
|
221 |
|
|
222 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
231 |
|
|
232 |
%macro COPY_HV_MMX 0 |
%macro COPY_HV_MMX 0 |
233 |
; current row |
; current row |
234 |
movq mm0, [esi] |
movq mm0, [TMP0] |
235 |
movq mm2, [esi + 1] |
movq mm2, [TMP0 + 1] |
236 |
|
|
237 |
movq mm1, mm0 |
movq mm1, mm0 |
238 |
movq mm3, mm2 |
movq mm3, mm2 |
246 |
paddusw mm1, mm3 |
paddusw mm1, mm3 |
247 |
|
|
248 |
; next row |
; next row |
249 |
movq mm4, [esi + edx] |
movq mm4, [TMP0 + TMP1] |
250 |
movq mm2, [esi + edx + 1] |
movq mm2, [TMP0 + TMP1 + 1] |
251 |
|
|
252 |
movq mm5, mm4 |
movq mm5, mm4 |
253 |
movq mm3, mm2 |
movq mm3, mm2 |
270 |
psrlw mm1, 2 |
psrlw mm1, 2 |
271 |
|
|
272 |
packuswb mm0, mm1 |
packuswb mm0, mm1 |
273 |
movq [edi], mm0 ; [dst] = mm01 |
movq [_EAX], mm0 ; [dst] = mm01 |
274 |
|
|
275 |
add esi, edx ; src += stride |
add TMP0, TMP1 ; src += stride |
276 |
add edi, edx ; dst += stride |
add _EAX, TMP1 ; dst += stride |
277 |
%endmacro |
%endmacro |
278 |
|
|
279 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
280 |
interpolate8x8_halfpel_hv_mmx: |
interpolate8x8_halfpel_hv_mmx: |
281 |
|
|
282 |
push esi |
mov _EAX, prm4 ; rounding |
283 |
push edi |
lea TMP0, [rounding2_mmx] |
284 |
|
movq mm7, [TMP0 + _EAX * 8] |
|
mov eax, [esp + 8 + 16] ; rounding |
|
285 |
|
|
286 |
movq mm7, [rounding2_mmx + eax * 8] |
mov _EAX, prm1 ; dst |
287 |
|
mov TMP0, prm2 ; src |
|
mov edi, [esp + 8 + 4] ; dst |
|
|
mov esi, [esp + 8 + 8] ; src |
|
|
|
|
|
mov eax, 8 |
|
288 |
|
|
289 |
pxor mm6, mm6 ; zero |
pxor mm6, mm6 ; zero |
290 |
|
|
291 |
mov edx, [esp + 8 + 12] ; stride |
mov TMP1, prm3 ; stride |
292 |
|
|
293 |
COPY_HV_MMX |
COPY_HV_MMX |
294 |
COPY_HV_MMX |
COPY_HV_MMX |
299 |
COPY_HV_MMX |
COPY_HV_MMX |
300 |
COPY_HV_MMX |
COPY_HV_MMX |
301 |
|
|
302 |
pop edi |
ret |
303 |
pop esi |
ENDFUNC |
304 |
|
|
305 |
|
;----------------------------------------------------------------------------- |
306 |
|
; |
307 |
|
; void interpolate8x4_halfpel_h_mmx(uint8_t * const dst, |
308 |
|
; const uint8_t * const src, |
309 |
|
; const uint32_t stride, |
310 |
|
; const uint32_t rounding); |
311 |
|
; |
312 |
|
;----------------------------------------------------------------------------- |
313 |
|
|
314 |
|
ALIGN SECTION_ALIGN |
315 |
|
interpolate8x4_halfpel_h_mmx: |
316 |
|
|
317 |
|
mov _EAX, prm4 ; rounding |
318 |
|
lea TMP0, [rounding1_mmx] |
319 |
|
movq mm7, [TMP0 + _EAX * 8] |
320 |
|
|
321 |
|
mov _EAX, prm1 ; dst |
322 |
|
mov TMP0, prm2 ; src |
323 |
|
mov TMP1, prm3 ; stride |
324 |
|
|
325 |
|
pxor mm6, mm6 ; zero |
326 |
|
|
327 |
|
COPY_H_MMX |
328 |
|
COPY_H_MMX |
329 |
|
COPY_H_MMX |
330 |
|
COPY_H_MMX |
331 |
|
|
332 |
ret |
ret |
333 |
|
ENDFUNC |
334 |
|
|
335 |
|
|
336 |
|
;----------------------------------------------------------------------------- |
337 |
|
; |
338 |
|
; void interpolate8x4_halfpel_v_mmx(uint8_t * const dst, |
339 |
|
; const uint8_t * const src, |
340 |
|
; const uint32_t stride, |
341 |
|
; const uint32_t rounding); |
342 |
|
; |
343 |
|
;----------------------------------------------------------------------------- |
344 |
|
|
345 |
|
ALIGN SECTION_ALIGN |
346 |
|
interpolate8x4_halfpel_v_mmx: |
347 |
|
|
348 |
|
mov _EAX, prm4 ; rounding |
349 |
|
lea TMP0, [rounding1_mmx] |
350 |
|
movq mm7, [TMP0 + _EAX * 8] |
351 |
|
|
352 |
|
mov _EAX, prm1 ; dst |
353 |
|
mov TMP0, prm2 ; src |
354 |
|
mov TMP1, prm3 ; stride |
355 |
|
|
356 |
|
pxor mm6, mm6 ; zero |
357 |
|
|
358 |
|
|
359 |
|
COPY_V_MMX |
360 |
|
COPY_V_MMX |
361 |
|
COPY_V_MMX |
362 |
|
COPY_V_MMX |
363 |
|
|
364 |
|
ret |
365 |
|
ENDFUNC |
366 |
|
|
367 |
|
|
368 |
|
;----------------------------------------------------------------------------- |
369 |
|
; |
370 |
|
; void interpolate8x4_halfpel_hv_mmx(uint8_t * const dst, |
371 |
|
; const uint8_t * const src, |
372 |
|
; const uint32_t stride, |
373 |
|
; const uint32_t rounding); |
374 |
|
; |
375 |
|
; |
376 |
|
;----------------------------------------------------------------------------- |
377 |
|
|
378 |
|
ALIGN SECTION_ALIGN |
379 |
|
interpolate8x4_halfpel_hv_mmx: |
380 |
|
|
381 |
|
mov _EAX, prm4 ; rounding |
382 |
|
lea TMP0, [rounding2_mmx] |
383 |
|
movq mm7, [TMP0 + _EAX * 8] |
384 |
|
|
385 |
|
mov _EAX, prm1 ; dst |
386 |
|
mov TMP0, prm2 ; src |
387 |
|
|
388 |
|
pxor mm6, mm6 ; zero |
389 |
|
|
390 |
|
mov TMP1, prm3 ; stride |
391 |
|
|
392 |
|
COPY_HV_MMX |
393 |
|
COPY_HV_MMX |
394 |
|
COPY_HV_MMX |
395 |
|
COPY_HV_MMX |
396 |
|
|
397 |
|
ret |
398 |
|
ENDFUNC |
399 |
|
|
400 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
401 |
; |
; |
409 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
410 |
|
|
411 |
%macro AVG2_MMX_RND0 0 |
%macro AVG2_MMX_RND0 0 |
412 |
movq mm0, [eax] ; src1 -> mm0 |
movq mm0, [_EAX] ; src1 -> mm0 |
413 |
movq mm1, [ebx] ; src2 -> mm1 |
movq mm1, [_EBX] ; src2 -> mm1 |
414 |
|
|
415 |
movq mm4, [eax+edx] |
movq mm4, [_EAX+TMP1] |
416 |
movq mm5, [ebx+edx] |
movq mm5, [_EBX+TMP1] |
417 |
|
|
418 |
movq mm2, mm0 ; src1 -> mm2 |
movq mm2, mm0 ; src1 -> mm2 |
419 |
movq mm3, mm1 ; src2 -> mm3 |
movq mm3, mm1 ; src2 -> mm3 |
448 |
paddb mm4, mm5 |
paddb mm4, mm5 |
449 |
paddb mm4, mm3 |
paddb mm4, mm3 |
450 |
|
|
451 |
lea eax, [eax+2*edx] |
lea _EAX, [_EAX+2*TMP1] |
452 |
lea ebx, [ebx+2*edx] |
lea _EBX, [_EBX+2*TMP1] |
453 |
|
|
454 |
movq [ecx], mm0 ; (src1 + src2 + 1) / 2 -> dst |
movq [TMP0], mm0 ; (src1 + src2 + 1) / 2 -> dst |
455 |
movq [ecx+edx], mm4 |
movq [TMP0+TMP1], mm4 |
456 |
%endmacro |
%endmacro |
457 |
|
|
458 |
%macro AVG2_MMX_RND1 0 |
%macro AVG2_MMX_RND1 0 |
459 |
movq mm0, [eax] ; src1 -> mm0 |
movq mm0, [_EAX] ; src1 -> mm0 |
460 |
movq mm1, [ebx] ; src2 -> mm1 |
movq mm1, [_EBX] ; src2 -> mm1 |
461 |
|
|
462 |
movq mm4, [eax+edx] |
movq mm4, [_EAX+TMP1] |
463 |
movq mm5, [ebx+edx] |
movq mm5, [_EBX+TMP1] |
464 |
|
|
465 |
movq mm2, mm0 ; src1 -> mm2 |
movq mm2, mm0 ; src1 -> mm2 |
466 |
movq mm3, mm1 ; src2 -> mm3 |
movq mm3, mm1 ; src2 -> mm3 |
495 |
paddb mm4, mm5 |
paddb mm4, mm5 |
496 |
paddb mm4, mm3 |
paddb mm4, mm3 |
497 |
|
|
498 |
lea eax, [eax+2*edx] |
lea _EAX, [_EAX+2*TMP1] |
499 |
lea ebx, [ebx+2*edx] |
lea _EBX, [_EBX+2*TMP1] |
500 |
|
|
501 |
movq [ecx], mm0 ; (src1 + src2 + 1) / 2 -> dst |
movq [TMP0], mm0 ; (src1 + src2 + 1) / 2 -> dst |
502 |
movq [ecx+edx], mm4 |
movq [TMP0+TMP1], mm4 |
503 |
%endmacro |
%endmacro |
504 |
|
|
505 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
506 |
interpolate8x8_avg2_mmx: |
interpolate8x8_avg2_mmx: |
507 |
|
|
508 |
push ebx |
mov eax, prm5d ; rounding |
509 |
|
test _EAX, _EAX |
|
mov eax, [esp + 4 + 20] ; rounding |
|
|
test eax, eax |
|
510 |
|
|
511 |
jnz near .rounding1 |
jnz near .rounding1 |
512 |
|
|
513 |
mov eax, [esp + 4 + 24] ; height -> eax |
mov eax, prm6d ; height -> _EAX |
514 |
sub eax, 8 |
sub _EAX, 8 |
515 |
test eax, eax |
test _EAX, _EAX |
516 |
|
|
517 |
mov ecx, [esp + 4 + 4] ; dst -> edi |
mov TMP0, prm1 ; dst -> edi |
518 |
mov eax, [esp + 4 + 8] ; src1 -> esi |
mov _EAX, prm2 ; src1 -> esi |
519 |
mov ebx, [esp + 4 + 12] ; src2 -> eax |
mov TMP1, prm4 ; stride -> TMP1 |
520 |
mov edx, [esp + 4 + 16] ; stride -> edx |
|
521 |
|
push _EBX |
522 |
|
%ifdef ARCH_IS_X86_64 |
523 |
|
mov _EBX, prm3 |
524 |
|
%else |
525 |
|
mov _EBX, [esp + 4 + 12] ; src2 -> eax |
526 |
|
%endif |
527 |
|
|
528 |
movq mm7, [mmx_one] |
movq mm7, [mmx_one] |
529 |
|
|
530 |
jz near .start0 |
jz near .start0 |
531 |
|
|
532 |
AVG2_MMX_RND0 |
AVG2_MMX_RND0 |
533 |
lea ecx, [ecx+2*edx] |
lea TMP0, [TMP0+2*TMP1] |
534 |
|
|
535 |
.start0 |
.start0: |
536 |
|
|
537 |
AVG2_MMX_RND0 |
AVG2_MMX_RND0 |
538 |
lea ecx, [ecx+2*edx] |
lea TMP0, [TMP0+2*TMP1] |
539 |
AVG2_MMX_RND0 |
AVG2_MMX_RND0 |
540 |
lea ecx, [ecx+2*edx] |
lea TMP0, [TMP0+2*TMP1] |
541 |
AVG2_MMX_RND0 |
AVG2_MMX_RND0 |
542 |
lea ecx, [ecx+2*edx] |
lea TMP0, [TMP0+2*TMP1] |
543 |
AVG2_MMX_RND0 |
AVG2_MMX_RND0 |
544 |
|
|
545 |
pop ebx |
pop _EBX |
546 |
ret |
ret |
547 |
|
|
548 |
.rounding1 |
.rounding1: |
549 |
mov eax, [esp + 4 + 24] ; height -> eax |
mov eax, prm6d ; height -> _EAX |
550 |
sub eax, 8 |
sub _EAX, 8 |
551 |
test eax, eax |
test _EAX, _EAX |
552 |
|
|
553 |
mov ecx, [esp + 4 + 4] ; dst -> edi |
mov TMP0, prm1 ; dst -> edi |
554 |
mov eax, [esp + 4 + 8] ; src1 -> esi |
mov _EAX, prm2 ; src1 -> esi |
555 |
mov ebx, [esp + 4 + 12] ; src2 -> eax |
mov TMP1, prm4 ; stride -> TMP1 |
556 |
mov edx, [esp + 4 + 16] ; stride -> edx |
|
557 |
|
push _EBX |
558 |
|
%ifdef ARCH_IS_X86_64 |
559 |
|
mov _EBX, prm3 |
560 |
|
%else |
561 |
|
mov _EBX, [esp + 4 + 12] ; src2 -> eax |
562 |
|
%endif |
563 |
|
|
564 |
movq mm7, [mmx_one] |
movq mm7, [mmx_one] |
565 |
|
|
566 |
jz near .start1 |
jz near .start1 |
567 |
|
|
568 |
AVG2_MMX_RND1 |
AVG2_MMX_RND1 |
569 |
lea ecx, [ecx+2*edx] |
lea TMP0, [TMP0+2*TMP1] |
570 |
|
|
571 |
.start1 |
.start1: |
572 |
|
|
573 |
AVG2_MMX_RND1 |
AVG2_MMX_RND1 |
574 |
lea ecx, [ecx+2*edx] |
lea TMP0, [TMP0+2*TMP1] |
575 |
AVG2_MMX_RND1 |
AVG2_MMX_RND1 |
576 |
lea ecx, [ecx+2*edx] |
lea TMP0, [TMP0+2*TMP1] |
577 |
AVG2_MMX_RND1 |
AVG2_MMX_RND1 |
578 |
lea ecx, [ecx+2*edx] |
lea TMP0, [TMP0+2*TMP1] |
579 |
AVG2_MMX_RND1 |
AVG2_MMX_RND1 |
580 |
|
|
581 |
pop ebx |
pop _EBX |
582 |
ret |
ret |
583 |
|
ENDFUNC |
584 |
|
|
585 |
|
|
586 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
596 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
597 |
|
|
598 |
%macro AVG4_MMX_RND0 0 |
%macro AVG4_MMX_RND0 0 |
599 |
movq mm0, [eax] ; src1 -> mm0 |
movq mm0, [_EAX] ; src1 -> mm0 |
600 |
movq mm1, [ebx] ; src2 -> mm1 |
movq mm1, [_EBX] ; src2 -> mm1 |
601 |
|
|
602 |
movq mm2, mm0 |
movq mm2, mm0 |
603 |
movq mm3, mm1 |
movq mm3, mm1 |
611 |
psrlq mm0, 2 |
psrlq mm0, 2 |
612 |
psrlq mm1, 2 |
psrlq mm1, 2 |
613 |
|
|
614 |
lea eax, [eax+edx] |
lea _EAX, [_EAX+TMP1] |
615 |
lea ebx, [ebx+edx] |
lea _EBX, [_EBX+TMP1] |
616 |
|
|
617 |
paddb mm0, mm1 |
paddb mm0, mm1 |
618 |
paddb mm2, mm3 |
paddb mm2, mm3 |
619 |
|
|
620 |
movq mm4, [esi] ; src3 -> mm0 |
movq mm4, [_ESI] ; src3 -> mm0 |
621 |
movq mm5, [edi] ; src4 -> mm1 |
movq mm5, [_EDI] ; src4 -> mm1 |
622 |
|
|
623 |
movq mm1, mm4 |
movq mm1, mm4 |
624 |
movq mm3, mm5 |
movq mm3, mm5 |
644 |
psrlq mm2, 2 |
psrlq mm2, 2 |
645 |
paddb mm0, mm2 |
paddb mm0, mm2 |
646 |
|
|
647 |
lea esi, [esi+edx] |
lea _ESI, [_ESI+TMP1] |
648 |
lea edi, [edi+edx] |
lea _EDI, [_EDI+TMP1] |
649 |
|
|
650 |
movq [ecx], mm0 ; (src1 + src2 + src3 + src4 + 2) / 4 -> dst |
movq [TMP0], mm0 ; (src1 + src2 + src3 + src4 + 2) / 4 -> dst |
651 |
%endmacro |
%endmacro |
652 |
|
|
653 |
%macro AVG4_MMX_RND1 0 |
%macro AVG4_MMX_RND1 0 |
654 |
movq mm0, [eax] ; src1 -> mm0 |
movq mm0, [_EAX] ; src1 -> mm0 |
655 |
movq mm1, [ebx] ; src2 -> mm1 |
movq mm1, [_EBX] ; src2 -> mm1 |
656 |
|
|
657 |
movq mm2, mm0 |
movq mm2, mm0 |
658 |
movq mm3, mm1 |
movq mm3, mm1 |
666 |
psrlq mm0, 2 |
psrlq mm0, 2 |
667 |
psrlq mm1, 2 |
psrlq mm1, 2 |
668 |
|
|
669 |
lea eax,[eax+edx] |
lea _EAX,[_EAX+TMP1] |
670 |
lea ebx,[ebx+edx] |
lea _EBX,[_EBX+TMP1] |
671 |
|
|
672 |
paddb mm0, mm1 |
paddb mm0, mm1 |
673 |
paddb mm2, mm3 |
paddb mm2, mm3 |
674 |
|
|
675 |
movq mm4, [esi] ; src3 -> mm0 |
movq mm4, [_ESI] ; src3 -> mm0 |
676 |
movq mm5, [edi] ; src4 -> mm1 |
movq mm5, [_EDI] ; src4 -> mm1 |
677 |
|
|
678 |
movq mm1, mm4 |
movq mm1, mm4 |
679 |
movq mm3, mm5 |
movq mm3, mm5 |
699 |
psrlq mm2, 2 |
psrlq mm2, 2 |
700 |
paddb mm0, mm2 |
paddb mm0, mm2 |
701 |
|
|
702 |
lea esi,[esi+edx] |
lea _ESI,[_ESI+TMP1] |
703 |
lea edi,[edi+edx] |
lea _EDI,[_EDI+TMP1] |
704 |
|
|
705 |
movq [ecx], mm0 ; (src1 + src2 + src3 + src4 + 2) / 4 -> dst |
movq [TMP0], mm0 ; (src1 + src2 + src3 + src4 + 2) / 4 -> dst |
706 |
%endmacro |
%endmacro |
707 |
|
|
708 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
709 |
interpolate8x8_avg4_mmx: |
interpolate8x8_avg4_mmx: |
710 |
|
|
711 |
push ebx |
mov eax, prm7d ; rounding |
712 |
push edi |
test _EAX, _EAX |
713 |
push esi |
|
714 |
|
mov TMP0, prm1 ; dst -> edi |
715 |
mov eax, [esp + 12 + 28] ; rounding |
mov _EAX, prm5 ; src4 -> edi |
716 |
|
mov TMP1d, prm6d ; stride -> TMP1 |
717 |
test eax, eax |
|
718 |
|
|
719 |
mov ecx, [esp + 12 + 4] ; dst -> edi |
push _EBX |
720 |
mov eax, [esp + 12 + 8] ; src1 -> esi |
push _EDI |
721 |
mov ebx, [esp + 12 + 12] ; src2 -> eax |
push _ESI |
722 |
mov esi, [esp + 12 + 16] ; src3 -> esi |
|
723 |
mov edi, [esp + 12 + 20] ; src4 -> edi |
mov _EDI, _EAX |
724 |
mov edx, [esp + 12 + 24] ; stride -> edx |
|
725 |
|
%ifdef ARCH_IS_X86_64 |
726 |
|
mov _EAX, prm2 |
727 |
|
mov _EBX, prm3 |
728 |
|
mov _ESI, prm4 |
729 |
|
%else |
730 |
|
mov _EAX, [esp + 12 + 8] ; src1 -> esi |
731 |
|
mov _EBX, [esp + 12 + 12] ; src2 -> _EAX |
732 |
|
mov _ESI, [esp + 12 + 16] ; src3 -> esi |
733 |
|
%endif |
734 |
|
|
735 |
movq mm7, [mmx_one] |
movq mm7, [mmx_one] |
736 |
|
|
737 |
jnz near .rounding1 |
jnz near .rounding1 |
738 |
|
|
739 |
AVG4_MMX_RND0 |
AVG4_MMX_RND0 |
740 |
lea ecx, [ecx+edx] |
lea TMP0, [TMP0+TMP1] |
741 |
AVG4_MMX_RND0 |
AVG4_MMX_RND0 |
742 |
lea ecx, [ecx+edx] |
lea TMP0, [TMP0+TMP1] |
743 |
AVG4_MMX_RND0 |
AVG4_MMX_RND0 |
744 |
lea ecx, [ecx+edx] |
lea TMP0, [TMP0+TMP1] |
745 |
AVG4_MMX_RND0 |
AVG4_MMX_RND0 |
746 |
lea ecx, [ecx+edx] |
lea TMP0, [TMP0+TMP1] |
747 |
AVG4_MMX_RND0 |
AVG4_MMX_RND0 |
748 |
lea ecx, [ecx+edx] |
lea TMP0, [TMP0+TMP1] |
749 |
AVG4_MMX_RND0 |
AVG4_MMX_RND0 |
750 |
lea ecx, [ecx+edx] |
lea TMP0, [TMP0+TMP1] |
751 |
AVG4_MMX_RND0 |
AVG4_MMX_RND0 |
752 |
lea ecx, [ecx+edx] |
lea TMP0, [TMP0+TMP1] |
753 |
AVG4_MMX_RND0 |
AVG4_MMX_RND0 |
754 |
|
|
755 |
pop esi |
pop _ESI |
756 |
pop edi |
pop _EDI |
757 |
pop ebx |
pop _EBX |
758 |
ret |
ret |
759 |
|
|
760 |
.rounding1 |
.rounding1: |
761 |
AVG4_MMX_RND1 |
AVG4_MMX_RND1 |
762 |
lea ecx, [ecx+edx] |
lea TMP0, [TMP0+TMP1] |
763 |
AVG4_MMX_RND1 |
AVG4_MMX_RND1 |
764 |
lea ecx, [ecx+edx] |
lea TMP0, [TMP0+TMP1] |
765 |
AVG4_MMX_RND1 |
AVG4_MMX_RND1 |
766 |
lea ecx, [ecx+edx] |
lea TMP0, [TMP0+TMP1] |
767 |
AVG4_MMX_RND1 |
AVG4_MMX_RND1 |
768 |
lea ecx, [ecx+edx] |
lea TMP0, [TMP0+TMP1] |
769 |
AVG4_MMX_RND1 |
AVG4_MMX_RND1 |
770 |
lea ecx, [ecx+edx] |
lea TMP0, [TMP0+TMP1] |
771 |
AVG4_MMX_RND1 |
AVG4_MMX_RND1 |
772 |
lea ecx, [ecx+edx] |
lea TMP0, [TMP0+TMP1] |
773 |
AVG4_MMX_RND1 |
AVG4_MMX_RND1 |
774 |
lea ecx, [ecx+edx] |
lea TMP0, [TMP0+TMP1] |
775 |
AVG4_MMX_RND1 |
AVG4_MMX_RND1 |
776 |
|
|
777 |
pop esi |
pop _ESI |
778 |
pop edi |
pop _EDI |
779 |
pop ebx |
pop _EBX |
780 |
ret |
ret |
781 |
|
ENDFUNC |
782 |
|
|
783 |
|
|
784 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
791 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
792 |
|
|
793 |
%macro LOWPASS_6TAP_H_MMX 0 |
%macro LOWPASS_6TAP_H_MMX 0 |
794 |
movq mm0, [eax] |
movq mm0, [_EAX] |
795 |
movq mm2, [eax+1] |
movq mm2, [_EAX+1] |
796 |
|
|
797 |
movq mm1, mm0 |
movq mm1, mm0 |
798 |
movq mm3, mm2 |
movq mm3, mm2 |
809 |
psllw mm0, 2 |
psllw mm0, 2 |
810 |
psllw mm1, 2 |
psllw mm1, 2 |
811 |
|
|
812 |
movq mm2, [eax-1] |
movq mm2, [_EAX-1] |
813 |
movq mm4, [eax+2] |
movq mm4, [_EAX+2] |
814 |
|
|
815 |
movq mm3, mm2 |
movq mm3, mm2 |
816 |
movq mm5, mm4 |
movq mm5, mm4 |
830 |
pmullw mm0, [mmx_five] |
pmullw mm0, [mmx_five] |
831 |
pmullw mm1, [mmx_five] |
pmullw mm1, [mmx_five] |
832 |
|
|
833 |
movq mm2, [eax-2] |
movq mm2, [_EAX-2] |
834 |
movq mm4, [eax+3] |
movq mm4, [_EAX+3] |
835 |
|
|
836 |
movq mm3, mm2 |
movq mm3, mm2 |
837 |
movq mm5, mm4 |
movq mm5, mm4 |
854 |
psraw mm0, 5 |
psraw mm0, 5 |
855 |
psraw mm1, 5 |
psraw mm1, 5 |
856 |
|
|
857 |
lea eax, [eax+edx] |
lea _EAX, [_EAX+TMP1] |
858 |
packuswb mm0, mm1 |
packuswb mm0, mm1 |
859 |
movq [ecx], mm0 |
movq [TMP0], mm0 |
860 |
%endmacro |
%endmacro |
861 |
|
|
862 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
863 |
interpolate8x8_6tap_lowpass_h_mmx: |
interpolate8x8_6tap_lowpass_h_mmx: |
864 |
|
|
865 |
mov eax, [esp + 16] ; rounding |
mov _EAX, prm4 ; rounding |
866 |
|
|
867 |
movq mm6, [rounding_lowpass_mmx + eax * 8] |
lea TMP0, [rounding_lowpass_mmx] |
868 |
|
movq mm6, [TMP0 + _EAX * 8] |
869 |
|
|
870 |
mov ecx, [esp + 4] ; dst -> edi |
mov TMP0, prm1 ; dst -> edi |
871 |
mov eax, [esp + 8] ; src -> esi |
mov _EAX, prm2 ; src -> esi |
872 |
mov edx, [esp + 12] ; stride -> edx |
mov TMP1, prm3 ; stride -> edx |
873 |
|
|
874 |
pxor mm7, mm7 |
pxor mm7, mm7 |
875 |
|
|
876 |
LOWPASS_6TAP_H_MMX |
LOWPASS_6TAP_H_MMX |
877 |
lea ecx, [ecx+edx] |
lea TMP0, [TMP0+TMP1] |
878 |
LOWPASS_6TAP_H_MMX |
LOWPASS_6TAP_H_MMX |
879 |
lea ecx, [ecx+edx] |
lea TMP0, [TMP0+TMP1] |
880 |
LOWPASS_6TAP_H_MMX |
LOWPASS_6TAP_H_MMX |
881 |
lea ecx, [ecx+edx] |
lea TMP0, [TMP0+TMP1] |
882 |
LOWPASS_6TAP_H_MMX |
LOWPASS_6TAP_H_MMX |
883 |
lea ecx, [ecx+edx] |
lea TMP0, [TMP0+TMP1] |
884 |
LOWPASS_6TAP_H_MMX |
LOWPASS_6TAP_H_MMX |
885 |
lea ecx, [ecx+edx] |
lea TMP0, [TMP0+TMP1] |
886 |
LOWPASS_6TAP_H_MMX |
LOWPASS_6TAP_H_MMX |
887 |
lea ecx, [ecx+edx] |
lea TMP0, [TMP0+TMP1] |
888 |
LOWPASS_6TAP_H_MMX |
LOWPASS_6TAP_H_MMX |
889 |
lea ecx, [ecx+edx] |
lea TMP0, [TMP0+TMP1] |
890 |
LOWPASS_6TAP_H_MMX |
LOWPASS_6TAP_H_MMX |
891 |
|
|
892 |
ret |
ret |
893 |
|
ENDFUNC |
894 |
|
|
895 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
896 |
; |
; |
902 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
903 |
|
|
904 |
%macro LOWPASS_6TAP_V_MMX 0 |
%macro LOWPASS_6TAP_V_MMX 0 |
905 |
movq mm0, [eax] |
movq mm0, [_EAX] |
906 |
movq mm2, [eax+edx] |
movq mm2, [_EAX+TMP1] |
907 |
|
|
908 |
movq mm1, mm0 |
movq mm1, mm0 |
909 |
movq mm3, mm2 |
movq mm3, mm2 |
920 |
psllw mm0, 2 |
psllw mm0, 2 |
921 |
psllw mm1, 2 |
psllw mm1, 2 |
922 |
|
|
923 |
movq mm4, [eax+2*edx] |
movq mm4, [_EAX+2*TMP1] |
924 |
sub eax, ebx |
sub _EAX, _EBX |
925 |
movq mm2, [eax+2*edx] |
movq mm2, [_EAX+2*TMP1] |
926 |
|
|
927 |
movq mm3, mm2 |
movq mm3, mm2 |
928 |
movq mm5, mm4 |
movq mm5, mm4 |
942 |
pmullw mm0, [mmx_five] |
pmullw mm0, [mmx_five] |
943 |
pmullw mm1, [mmx_five] |
pmullw mm1, [mmx_five] |
944 |
|
|
945 |
movq mm2, [eax+edx] |
movq mm2, [_EAX+TMP1] |
946 |
movq mm4, [eax+2*ebx] |
movq mm4, [_EAX+2*_EBX] |
947 |
|
|
948 |
movq mm3, mm2 |
movq mm3, mm2 |
949 |
movq mm5, mm4 |
movq mm5, mm4 |
966 |
psraw mm0, 5 |
psraw mm0, 5 |
967 |
psraw mm1, 5 |
psraw mm1, 5 |
968 |
|
|
969 |
lea eax, [eax+4*edx] |
lea _EAX, [_EAX+4*TMP1] |
970 |
packuswb mm0, mm1 |
packuswb mm0, mm1 |
971 |
movq [ecx], mm0 |
movq [TMP0], mm0 |
972 |
%endmacro |
%endmacro |
973 |
|
|
974 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
975 |
interpolate8x8_6tap_lowpass_v_mmx: |
interpolate8x8_6tap_lowpass_v_mmx: |
976 |
|
|
977 |
push ebx |
mov _EAX, prm4 ; rounding |
978 |
|
|
979 |
mov eax, [esp + 4 + 16] ; rounding |
lea TMP0, [rounding_lowpass_mmx] |
980 |
|
movq mm6, [TMP0 + _EAX * 8] |
981 |
|
|
982 |
movq mm6, [rounding_lowpass_mmx + eax * 8] |
mov TMP0, prm1 ; dst -> edi |
983 |
|
mov _EAX, prm2 ; src -> esi |
984 |
|
mov TMP1, prm3 ; stride -> edx |
985 |
|
|
986 |
mov ecx, [esp + 4 + 4] ; dst -> edi |
push _EBX |
|
mov eax, [esp + 4 + 8] ; src -> esi |
|
|
mov edx, [esp + 4 + 12] ; stride -> edx |
|
987 |
|
|
988 |
mov ebx, edx |
mov _EBX, TMP1 |
989 |
shl ebx, 1 |
shl _EBX, 1 |
990 |
add ebx, edx |
add _EBX, TMP1 |
991 |
|
|
992 |
pxor mm7, mm7 |
pxor mm7, mm7 |
993 |
|
|
994 |
LOWPASS_6TAP_V_MMX |
LOWPASS_6TAP_V_MMX |
995 |
lea ecx, [ecx+edx] |
lea TMP0, [TMP0+TMP1] |
996 |
LOWPASS_6TAP_V_MMX |
LOWPASS_6TAP_V_MMX |
997 |
lea ecx, [ecx+edx] |
lea TMP0, [TMP0+TMP1] |
998 |
LOWPASS_6TAP_V_MMX |
LOWPASS_6TAP_V_MMX |
999 |
lea ecx, [ecx+edx] |
lea TMP0, [TMP0+TMP1] |
1000 |
LOWPASS_6TAP_V_MMX |
LOWPASS_6TAP_V_MMX |
1001 |
lea ecx, [ecx+edx] |
lea TMP0, [TMP0+TMP1] |
1002 |
LOWPASS_6TAP_V_MMX |
LOWPASS_6TAP_V_MMX |
1003 |
lea ecx, [ecx+edx] |
lea TMP0, [TMP0+TMP1] |
1004 |
LOWPASS_6TAP_V_MMX |
LOWPASS_6TAP_V_MMX |
1005 |
lea ecx, [ecx+edx] |
lea TMP0, [TMP0+TMP1] |
1006 |
LOWPASS_6TAP_V_MMX |
LOWPASS_6TAP_V_MMX |
1007 |
lea ecx, [ecx+edx] |
lea TMP0, [TMP0+TMP1] |
1008 |
LOWPASS_6TAP_V_MMX |
LOWPASS_6TAP_V_MMX |
1009 |
|
|
1010 |
pop ebx |
pop _EBX |
1011 |
|
ret |
1012 |
|
ENDFUNC |
1013 |
|
|
1014 |
|
;=========================================================================== |
1015 |
|
; |
1016 |
|
; The next functions combine both source halfpel interpolation step and the |
1017 |
|
; averaging (with rouding) step to avoid wasting memory bandwidth computing |
1018 |
|
; intermediate halfpel images and then averaging them. |
1019 |
|
; |
1020 |
|
;=========================================================================== |
1021 |
|
|
1022 |
|
%macro PROLOG0 0 |
1023 |
|
mov TMP0, prm1 ; Dst |
1024 |
|
mov _EAX, prm2 ; Src |
1025 |
|
mov TMP1, prm3 ; BpS |
1026 |
|
%endmacro |
1027 |
|
|
1028 |
|
%macro PROLOG 2 ; %1: Rounder, %2 load Dst-Rounder |
1029 |
|
pxor mm6, mm6 |
1030 |
|
movq mm7, [%1] ; TODO: dangerous! (eax isn't checked) |
1031 |
|
%if %2 |
1032 |
|
movq mm5, [rounding1_mmx] |
1033 |
|
%endif |
1034 |
|
|
1035 |
|
PROLOG0 |
1036 |
|
%endmacro |
1037 |
|
|
1038 |
|
; performs: mm0 == (mm0+mm2) mm1 == (mm1+mm3) |
1039 |
|
%macro MIX 0 |
1040 |
|
punpcklbw mm0, mm6 |
1041 |
|
punpcklbw mm2, mm6 |
1042 |
|
punpckhbw mm1, mm6 |
1043 |
|
punpckhbw mm3, mm6 |
1044 |
|
paddusw mm0, mm2 |
1045 |
|
paddusw mm1, mm3 |
1046 |
|
%endmacro |
1047 |
|
|
1048 |
|
%macro MIX_DST 0 |
1049 |
|
movq mm3, mm2 |
1050 |
|
paddusw mm0, mm7 ; rounder |
1051 |
|
paddusw mm1, mm7 ; rounder |
1052 |
|
punpcklbw mm2, mm6 |
1053 |
|
punpckhbw mm3, mm6 |
1054 |
|
psrlw mm0, 1 |
1055 |
|
psrlw mm1, 1 |
1056 |
|
|
1057 |
|
paddusw mm0, mm2 ; mix Src(mm0/mm1) with Dst(mm2/mm3) |
1058 |
|
paddusw mm1, mm3 |
1059 |
|
paddusw mm0, mm5 |
1060 |
|
paddusw mm1, mm5 |
1061 |
|
psrlw mm0, 1 |
1062 |
|
psrlw mm1, 1 |
1063 |
|
|
1064 |
|
packuswb mm0, mm1 |
1065 |
|
%endmacro |
1066 |
|
|
1067 |
|
%macro MIX2 0 |
1068 |
|
punpcklbw mm0, mm6 |
1069 |
|
punpcklbw mm2, mm6 |
1070 |
|
paddusw mm0, mm2 |
1071 |
|
paddusw mm0, mm7 |
1072 |
|
punpckhbw mm1, mm6 |
1073 |
|
punpckhbw mm3, mm6 |
1074 |
|
paddusw mm1, mm7 |
1075 |
|
paddusw mm1, mm3 |
1076 |
|
psrlw mm0, 1 |
1077 |
|
psrlw mm1, 1 |
1078 |
|
|
1079 |
|
packuswb mm0, mm1 |
1080 |
|
%endmacro |
1081 |
|
|
1082 |
|
;=========================================================================== |
1083 |
|
; |
1084 |
|
; void interpolate8x8_halfpel_add_mmx(uint8_t * const dst, |
1085 |
|
; const uint8_t * const src, |
1086 |
|
; const uint32_t stride, |
1087 |
|
; const uint32_t rounding); |
1088 |
|
; |
1089 |
|
; |
1090 |
|
;=========================================================================== |
1091 |
|
|
1092 |
|
%macro ADD_FF_MMX 1 |
1093 |
|
movq mm0, [_EAX] |
1094 |
|
movq mm2, [TMP0] |
1095 |
|
movq mm1, mm0 |
1096 |
|
movq mm3, mm2 |
1097 |
|
%if (%1!=0) |
1098 |
|
lea _EAX,[_EAX+%1*TMP1] |
1099 |
|
%endif |
1100 |
|
MIX |
1101 |
|
paddusw mm0, mm5 ; rounder |
1102 |
|
paddusw mm1, mm5 ; rounder |
1103 |
|
psrlw mm0, 1 |
1104 |
|
psrlw mm1, 1 |
1105 |
|
|
1106 |
|
packuswb mm0, mm1 |
1107 |
|
movq [TMP0], mm0 |
1108 |
|
%if (%1!=0) |
1109 |
|
lea TMP0,[TMP0+%1*TMP1] |
1110 |
|
%endif |
1111 |
|
%endmacro |
1112 |
|
|
1113 |
|
ALIGN SECTION_ALIGN |
1114 |
|
interpolate8x8_halfpel_add_mmx: |
1115 |
|
PROLOG rounding1_mmx, 1 |
1116 |
|
ADD_FF_MMX 1 |
1117 |
|
ADD_FF_MMX 1 |
1118 |
|
ADD_FF_MMX 1 |
1119 |
|
ADD_FF_MMX 1 |
1120 |
|
ADD_FF_MMX 1 |
1121 |
|
ADD_FF_MMX 1 |
1122 |
|
ADD_FF_MMX 1 |
1123 |
|
ADD_FF_MMX 0 |
1124 |
ret |
ret |
1125 |
|
ENDFUNC |
1126 |
|
|
1127 |
|
;=========================================================================== |
1128 |
|
; |
1129 |
|
; void interpolate8x8_halfpel_h_add_mmx(uint8_t * const dst, |
1130 |
|
; const uint8_t * const src, |
1131 |
|
; const uint32_t stride, |
1132 |
|
; const uint32_t rounding); |
1133 |
|
; |
1134 |
|
; |
1135 |
|
;=========================================================================== |
1136 |
|
|
1137 |
|
%macro ADD_FH_MMX 0 |
1138 |
|
movq mm0, [_EAX] |
1139 |
|
movq mm2, [_EAX+1] |
1140 |
|
movq mm1, mm0 |
1141 |
|
movq mm3, mm2 |
1142 |
|
|
1143 |
|
lea _EAX,[_EAX+TMP1] |
1144 |
|
|
1145 |
|
MIX |
1146 |
|
movq mm2, [TMP0] ; prepare mix with Dst[0] |
1147 |
|
MIX_DST |
1148 |
|
movq [TMP0], mm0 |
1149 |
|
%endmacro |
1150 |
|
|
1151 |
|
ALIGN SECTION_ALIGN |
1152 |
|
interpolate8x8_halfpel_h_add_mmx: |
1153 |
|
PROLOG rounding1_mmx, 1 |
1154 |
|
|
1155 |
|
ADD_FH_MMX |
1156 |
|
lea TMP0,[TMP0+TMP1] |
1157 |
|
ADD_FH_MMX |
1158 |
|
lea TMP0,[TMP0+TMP1] |
1159 |
|
ADD_FH_MMX |
1160 |
|
lea TMP0,[TMP0+TMP1] |
1161 |
|
ADD_FH_MMX |
1162 |
|
lea TMP0,[TMP0+TMP1] |
1163 |
|
ADD_FH_MMX |
1164 |
|
lea TMP0,[TMP0+TMP1] |
1165 |
|
ADD_FH_MMX |
1166 |
|
lea TMP0,[TMP0+TMP1] |
1167 |
|
ADD_FH_MMX |
1168 |
|
lea TMP0,[TMP0+TMP1] |
1169 |
|
ADD_FH_MMX |
1170 |
|
ret |
1171 |
|
ENDFUNC |
1172 |
|
|
1173 |
|
;=========================================================================== |
1174 |
|
; |
1175 |
|
; void interpolate8x8_halfpel_v_add_mmx(uint8_t * const dst, |
1176 |
|
; const uint8_t * const src, |
1177 |
|
; const uint32_t stride, |
1178 |
|
; const uint32_t rounding); |
1179 |
|
; |
1180 |
|
; |
1181 |
|
;=========================================================================== |
1182 |
|
|
1183 |
|
%macro ADD_HF_MMX 0 |
1184 |
|
movq mm0, [_EAX] |
1185 |
|
movq mm2, [_EAX+TMP1] |
1186 |
|
movq mm1, mm0 |
1187 |
|
movq mm3, mm2 |
1188 |
|
|
1189 |
|
lea _EAX,[_EAX+TMP1] |
1190 |
|
|
1191 |
|
MIX |
1192 |
|
movq mm2, [TMP0] ; prepare mix with Dst[0] |
1193 |
|
MIX_DST |
1194 |
|
movq [TMP0], mm0 |
1195 |
|
|
1196 |
|
%endmacro |
1197 |
|
|
1198 |
|
ALIGN SECTION_ALIGN |
1199 |
|
interpolate8x8_halfpel_v_add_mmx: |
1200 |
|
PROLOG rounding1_mmx, 1 |
1201 |
|
|
1202 |
|
ADD_HF_MMX |
1203 |
|
lea TMP0,[TMP0+TMP1] |
1204 |
|
ADD_HF_MMX |
1205 |
|
lea TMP0,[TMP0+TMP1] |
1206 |
|
ADD_HF_MMX |
1207 |
|
lea TMP0,[TMP0+TMP1] |
1208 |
|
ADD_HF_MMX |
1209 |
|
lea TMP0,[TMP0+TMP1] |
1210 |
|
ADD_HF_MMX |
1211 |
|
lea TMP0,[TMP0+TMP1] |
1212 |
|
ADD_HF_MMX |
1213 |
|
lea TMP0,[TMP0+TMP1] |
1214 |
|
ADD_HF_MMX |
1215 |
|
lea TMP0,[TMP0+TMP1] |
1216 |
|
ADD_HF_MMX |
1217 |
|
ret |
1218 |
|
ENDFUNC |
1219 |
|
|
1220 |
|
; The trick is to correct the result of 'pavgb' with some combination of the |
1221 |
|
; lsb's of the 4 input values i,j,k,l, and their intermediate 'pavgb' (s and t). |
1222 |
|
; The boolean relations are: |
1223 |
|
; (i+j+k+l+3)/4 = (s+t+1)/2 - (ij&kl)&st |
1224 |
|
; (i+j+k+l+2)/4 = (s+t+1)/2 - (ij|kl)&st |
1225 |
|
; (i+j+k+l+1)/4 = (s+t+1)/2 - (ij&kl)|st |
1226 |
|
; (i+j+k+l+0)/4 = (s+t+1)/2 - (ij|kl)|st |
1227 |
|
; with s=(i+j+1)/2, t=(k+l+1)/2, ij = i^j, kl = k^l, st = s^t. |
1228 |
|
|
1229 |
|
; Moreover, we process 2 lines at a times, for better overlapping (~15% faster). |
1230 |
|
|
1231 |
|
;=========================================================================== |
1232 |
|
; |
1233 |
|
; void interpolate8x8_halfpel_hv_add_mmx(uint8_t * const dst, |
1234 |
|
; const uint8_t * const src, |
1235 |
|
; const uint32_t stride, |
1236 |
|
; const uint32_t rounding); |
1237 |
|
; |
1238 |
|
; |
1239 |
|
;=========================================================================== |
1240 |
|
|
1241 |
|
%macro ADD_HH_MMX 0 |
1242 |
|
lea _EAX,[_EAX+TMP1] |
1243 |
|
|
1244 |
|
; transfert prev line to mm0/mm1 |
1245 |
|
movq mm0, mm2 |
1246 |
|
movq mm1, mm3 |
1247 |
|
|
1248 |
|
; load new line in mm2/mm3 |
1249 |
|
movq mm2, [_EAX] |
1250 |
|
movq mm4, [_EAX+1] |
1251 |
|
movq mm3, mm2 |
1252 |
|
movq mm5, mm4 |
1253 |
|
|
1254 |
|
punpcklbw mm2, mm6 |
1255 |
|
punpcklbw mm4, mm6 |
1256 |
|
paddusw mm2, mm4 |
1257 |
|
punpckhbw mm3, mm6 |
1258 |
|
punpckhbw mm5, mm6 |
1259 |
|
paddusw mm3, mm5 |
1260 |
|
|
1261 |
|
; mix current line (mm2/mm3) with previous (mm0,mm1); |
1262 |
|
; we'll preserve mm2/mm3 for next line... |
1263 |
|
|
1264 |
|
paddusw mm0, mm2 |
1265 |
|
paddusw mm1, mm3 |
1266 |
|
|
1267 |
|
movq mm4, [TMP0] ; prepare mix with Dst[0] |
1268 |
|
movq mm5, mm4 |
1269 |
|
|
1270 |
|
paddusw mm0, mm7 ; finish mixing current line |
1271 |
|
paddusw mm1, mm7 |
1272 |
|
|
1273 |
|
punpcklbw mm4, mm6 |
1274 |
|
punpckhbw mm5, mm6 |
1275 |
|
|
1276 |
|
psrlw mm0, 2 |
1277 |
|
psrlw mm1, 2 |
1278 |
|
|
1279 |
|
paddusw mm0, mm4 ; mix Src(mm0/mm1) with Dst(mm2/mm3) |
1280 |
|
paddusw mm1, mm5 |
1281 |
|
|
1282 |
|
paddusw mm0, [rounding1_mmx] |
1283 |
|
paddusw mm1, [rounding1_mmx] |
1284 |
|
|
1285 |
|
psrlw mm0, 1 |
1286 |
|
psrlw mm1, 1 |
1287 |
|
|
1288 |
|
packuswb mm0, mm1 |
1289 |
|
|
1290 |
|
movq [TMP0], mm0 |
1291 |
|
%endmacro |
1292 |
|
|
1293 |
|
ALIGN SECTION_ALIGN |
1294 |
|
interpolate8x8_halfpel_hv_add_mmx: |
1295 |
|
PROLOG rounding2_mmx, 0 ; mm5 is busy. Don't load dst-rounder |
1296 |
|
|
1297 |
|
; preprocess first line |
1298 |
|
movq mm0, [_EAX] |
1299 |
|
movq mm2, [_EAX+1] |
1300 |
|
movq mm1, mm0 |
1301 |
|
movq mm3, mm2 |
1302 |
|
|
1303 |
|
punpcklbw mm0, mm6 |
1304 |
|
punpcklbw mm2, mm6 |
1305 |
|
punpckhbw mm1, mm6 |
1306 |
|
punpckhbw mm3, mm6 |
1307 |
|
paddusw mm2, mm0 |
1308 |
|
paddusw mm3, mm1 |
1309 |
|
|
1310 |
|
; Input: mm2/mm3 contains the value (Src[0]+Src[1]) of previous line |
1311 |
|
|
1312 |
|
ADD_HH_MMX |
1313 |
|
lea TMP0,[TMP0+TMP1] |
1314 |
|
ADD_HH_MMX |
1315 |
|
lea TMP0,[TMP0+TMP1] |
1316 |
|
ADD_HH_MMX |
1317 |
|
lea TMP0,[TMP0+TMP1] |
1318 |
|
ADD_HH_MMX |
1319 |
|
lea TMP0,[TMP0+TMP1] |
1320 |
|
ADD_HH_MMX |
1321 |
|
lea TMP0,[TMP0+TMP1] |
1322 |
|
ADD_HH_MMX |
1323 |
|
lea TMP0,[TMP0+TMP1] |
1324 |
|
ADD_HH_MMX |
1325 |
|
lea TMP0,[TMP0+TMP1] |
1326 |
|
ADD_HH_MMX |
1327 |
|
|
1328 |
|
ret |
1329 |
|
ENDFUNC |
1330 |
|
|
1331 |
|
|
1332 |
|
%ifidn __OUTPUT_FORMAT__,elf |
1333 |
|
section ".note.GNU-stack" noalloc noexec nowrite progbits |
1334 |
|
%endif |
1335 |
|
|