1 |
;/***************************************************************************** |
;/***************************************************************************** |
2 |
; * |
; * |
3 |
; * XVID MPEG-4 VIDEO CODEC |
; * XVID MPEG-4 VIDEO CODEC |
4 |
; * mmx 8x8 block-based halfpel interpolation |
; * - mmx 8x8 block-based halfpel interpolation - |
5 |
; * |
; * |
6 |
; * Copyright(C) 2002 Peter Ross <pross@xvid.org> |
; * Copyright(C) 2001 Peter Ross <pross@xvid.org> |
7 |
|
; * 2002 Michael Militzer <isibaar@xvid.org> |
8 |
; * |
; * |
9 |
; * This file is part of XviD, a free MPEG-4 video encoder/decoder |
; * This program is free software ; you can redistribute it and/or modify |
10 |
; * |
; * it under the terms of the GNU General Public License as published by |
|
; * XviD is free software; you can redistribute it and/or modify it |
|
|
; * under the terms of the GNU General Public License as published by |
|
11 |
; * the Free Software Foundation; either version 2 of the License, or |
; * the Free Software Foundation; either version 2 of the License, or |
12 |
; * (at your option) any later version. |
; * (at your option) any later version. |
13 |
; * |
; * |
20 |
; * along with this program; if not, write to the Free Software |
; * along with this program; if not, write to the Free Software |
21 |
; * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
; * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
22 |
; * |
; * |
|
; * Under section 8 of the GNU General Public License, the copyright |
|
|
; * holders of XVID explicitly forbid distribution in the following |
|
|
; * countries: |
|
|
; * |
|
|
; * - Japan |
|
|
; * - United States of America |
|
|
; * |
|
|
; * Linking XviD statically or dynamically with other modules is making a |
|
|
; * combined work based on XviD. Thus, the terms and conditions of the |
|
|
; * GNU General Public License cover the whole combination. |
|
|
; * |
|
|
; * As a special exception, the copyright holders of XviD give you |
|
|
; * permission to link XviD with independent modules that communicate with |
|
|
; * XviD solely through the VFW1.1 and DShow interfaces, regardless of the |
|
|
; * license terms of these independent modules, and to copy and distribute |
|
|
; * the resulting combined work under terms of your choice, provided that |
|
|
; * every copy of the combined work is accompanied by a complete copy of |
|
|
; * the source code of XviD (the version of XviD used to produce the |
|
|
; * combined work), being distributed under the terms of the GNU General |
|
|
; * Public License plus this exception. An independent module is a module |
|
|
; * which is not derived from or based on XviD. |
|
|
; * |
|
|
; * Note that people who make modified versions of XviD are not obligated |
|
|
; * to grant this special exception for their modified versions; it is |
|
|
; * their choice whether to do so. The GNU General Public License gives |
|
|
; * permission to release a modified version without this exception; this |
|
|
; * exception also makes it possible to release a modified version which |
|
|
; * carries forward this exception. |
|
|
; * |
|
|
; * $Id$ |
|
|
; * |
|
23 |
; ****************************************************************************/ |
; ****************************************************************************/ |
24 |
|
|
25 |
bits 32 |
BITS 32 |
26 |
|
|
27 |
%macro cglobal 1 |
%macro cglobal 1 |
28 |
%ifdef PREFIX |
%ifdef PREFIX |
33 |
%endif |
%endif |
34 |
%endmacro |
%endmacro |
35 |
|
|
36 |
section .data |
;============================================================================= |
37 |
|
; Read only data |
38 |
|
;============================================================================= |
39 |
|
|
40 |
|
SECTION .rodata |
41 |
|
|
42 |
|
;----------------------------------------------------------------------------- |
43 |
|
; (16 - r) rounding table |
44 |
|
;----------------------------------------------------------------------------- |
45 |
|
|
46 |
|
ALIGN 16 |
47 |
|
rounding_lowpass_mmx: |
48 |
|
times 4 dw 16 |
49 |
|
times 4 dw 15 |
50 |
|
|
51 |
align 16 |
;----------------------------------------------------------------------------- |
|
|
|
|
;=========================================================================== |
|
52 |
; (1 - r) rounding table |
; (1 - r) rounding table |
53 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
54 |
|
|
55 |
rounding1_mmx |
rounding1_mmx: |
56 |
times 4 dw 1 |
times 4 dw 1 |
57 |
times 4 dw 0 |
times 4 dw 0 |
58 |
|
|
59 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
60 |
; (2 - r) rounding table |
; (2 - r) rounding table |
61 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
62 |
|
|
63 |
rounding2_mmx |
rounding2_mmx: |
64 |
times 4 dw 2 |
times 4 dw 2 |
65 |
times 4 dw 1 |
times 4 dw 1 |
66 |
|
|
67 |
mmx_one |
mmx_one: |
68 |
times 8 db 1 |
times 8 db 1 |
69 |
|
|
70 |
section .text |
mmx_two: |
71 |
|
times 8 db 2 |
72 |
|
|
73 |
|
mmx_three: |
74 |
|
times 8 db 3 |
75 |
|
|
76 |
|
mmx_five: |
77 |
|
times 4 dw 5 |
78 |
|
|
79 |
|
mmx_mask: |
80 |
|
times 8 db 254 |
81 |
|
|
82 |
|
mmx_mask2: |
83 |
|
times 8 db 252 |
84 |
|
|
85 |
|
;============================================================================= |
86 |
|
; Code |
87 |
|
;============================================================================= |
88 |
|
|
89 |
|
SECTION .text |
90 |
|
|
91 |
|
cglobal interpolate8x8_halfpel_h_mmx |
92 |
|
cglobal interpolate8x8_halfpel_v_mmx |
93 |
|
cglobal interpolate8x8_halfpel_hv_mmx |
94 |
|
cglobal interpolate8x8_avg4_mmx |
95 |
|
cglobal interpolate8x8_avg2_mmx |
96 |
|
cglobal interpolate8x8_6tap_lowpass_h_mmx |
97 |
|
cglobal interpolate8x8_6tap_lowpass_v_mmx |
98 |
|
|
99 |
%macro CALC_AVG 6 |
%macro CALC_AVG 6 |
100 |
punpcklbw %3, %6 |
punpcklbw %3, %6 |
107 |
|
|
108 |
psrlw %1, 1 ; mm01 >>= 1 |
psrlw %1, 1 ; mm01 >>= 1 |
109 |
psrlw %2, 1 |
psrlw %2, 1 |
|
|
|
110 |
%endmacro |
%endmacro |
111 |
|
|
112 |
|
|
113 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
114 |
; |
; |
115 |
; void interpolate8x8_halfpel_h_mmx(uint8_t * const dst, |
; void interpolate8x8_halfpel_h_mmx(uint8_t * const dst, |
116 |
; const uint8_t * const src, |
; const uint8_t * const src, |
117 |
; const uint32_t stride, |
; const uint32_t stride, |
118 |
; const uint32_t rounding); |
; const uint32_t rounding); |
119 |
; |
; |
120 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
121 |
|
|
122 |
%macro COPY_H_MMX 0 |
%macro COPY_H_MMX 0 |
123 |
movq mm0, [esi] |
movq mm0, [esi] |
137 |
add edi, edx ; dst += stride |
add edi, edx ; dst += stride |
138 |
%endmacro |
%endmacro |
139 |
|
|
140 |
align 16 |
ALIGN 16 |
141 |
cglobal interpolate8x8_halfpel_h_mmx |
interpolate8x8_halfpel_h_mmx: |
|
interpolate8x8_halfpel_h_mmx |
|
142 |
|
|
143 |
push esi |
push esi |
144 |
push edi |
push edi |
|
|
|
145 |
mov eax, [esp + 8 + 16] ; rounding |
mov eax, [esp + 8 + 16] ; rounding |
146 |
|
|
|
interpolate8x8_halfpel_h_mmx.start |
|
147 |
movq mm7, [rounding1_mmx + eax * 8] |
movq mm7, [rounding1_mmx + eax * 8] |
148 |
|
|
149 |
mov edi, [esp + 8 + 4] ; dst |
mov edi, [esp + 8 + 4] ; dst |
167 |
ret |
ret |
168 |
|
|
169 |
|
|
170 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
171 |
; |
; |
172 |
; void interpolate8x8_halfpel_v_mmx(uint8_t * const dst, |
; void interpolate8x8_halfpel_v_mmx(uint8_t * const dst, |
173 |
; const uint8_t * const src, |
; const uint8_t * const src, |
174 |
; const uint32_t stride, |
; const uint32_t stride, |
175 |
; const uint32_t rounding); |
; const uint32_t rounding); |
176 |
; |
; |
177 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
178 |
|
|
179 |
%macro COPY_V_MMX 0 |
%macro COPY_V_MMX 0 |
180 |
movq mm0, [esi] |
movq mm0, [esi] |
194 |
add edi, edx ; dst += stride |
add edi, edx ; dst += stride |
195 |
%endmacro |
%endmacro |
196 |
|
|
197 |
align 16 |
ALIGN 16 |
198 |
cglobal interpolate8x8_halfpel_v_mmx |
interpolate8x8_halfpel_v_mmx: |
|
interpolate8x8_halfpel_v_mmx |
|
199 |
|
|
200 |
push esi |
push esi |
201 |
push edi |
push edi |
202 |
|
|
203 |
mov eax, [esp + 8 + 16] ; rounding |
mov eax, [esp + 8 + 16] ; rounding |
204 |
|
|
|
interpolate8x8_halfpel_v_mmx.start |
|
205 |
movq mm7, [rounding1_mmx + eax * 8] |
movq mm7, [rounding1_mmx + eax * 8] |
206 |
|
|
207 |
mov edi, [esp + 8 + 4] ; dst |
mov edi, [esp + 8 + 4] ; dst |
226 |
ret |
ret |
227 |
|
|
228 |
|
|
229 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
230 |
; |
; |
231 |
; void interpolate8x8_halfpel_hv_mmx(uint8_t * const dst, |
; void interpolate8x8_halfpel_hv_mmx(uint8_t * const dst, |
232 |
; const uint8_t * const src, |
; const uint8_t * const src, |
234 |
; const uint32_t rounding); |
; const uint32_t rounding); |
235 |
; |
; |
236 |
; |
; |
237 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
238 |
|
|
239 |
%macro COPY_HV_MMX 0 |
%macro COPY_HV_MMX 0 |
240 |
; current row |
; current row |
|
|
|
241 |
movq mm0, [esi] |
movq mm0, [esi] |
242 |
movq mm2, [esi + 1] |
movq mm2, [esi + 1] |
243 |
|
|
253 |
paddusw mm1, mm3 |
paddusw mm1, mm3 |
254 |
|
|
255 |
; next row |
; next row |
|
|
|
256 |
movq mm4, [esi + edx] |
movq mm4, [esi + edx] |
257 |
movq mm2, [esi + edx + 1] |
movq mm2, [esi + edx + 1] |
258 |
|
|
268 |
paddusw mm5, mm3 |
paddusw mm5, mm3 |
269 |
|
|
270 |
; add current + next row |
; add current + next row |
|
|
|
271 |
paddusw mm0, mm4 ; mm01 += mm45 |
paddusw mm0, mm4 ; mm01 += mm45 |
272 |
paddusw mm1, mm5 |
paddusw mm1, mm5 |
273 |
paddusw mm0, mm7 ; mm01 += rounding2 |
paddusw mm0, mm7 ; mm01 += rounding2 |
283 |
add edi, edx ; dst += stride |
add edi, edx ; dst += stride |
284 |
%endmacro |
%endmacro |
285 |
|
|
286 |
align 16 |
ALIGN 16 |
287 |
cglobal interpolate8x8_halfpel_hv_mmx |
interpolate8x8_halfpel_hv_mmx: |
|
interpolate8x8_halfpel_hv_mmx |
|
288 |
|
|
289 |
push esi |
push esi |
290 |
push edi |
push edi |
291 |
|
|
292 |
mov eax, [esp + 8 + 16] ; rounding |
mov eax, [esp + 8 + 16] ; rounding |
|
interpolate8x8_halfpel_hv_mmx.start |
|
293 |
|
|
294 |
movq mm7, [rounding2_mmx + eax * 8] |
movq mm7, [rounding2_mmx + eax * 8] |
295 |
|
|
315 |
pop esi |
pop esi |
316 |
|
|
317 |
ret |
ret |
318 |
|
|
319 |
|
;----------------------------------------------------------------------------- |
320 |
|
; |
321 |
|
; void interpolate8x8_avg2_mmx(uint8_t const *dst, |
322 |
|
; const uint8_t * const src1, |
323 |
|
; const uint8_t * const src2, |
324 |
|
; const uint32_t stride, |
325 |
|
; const uint32_t rounding, |
326 |
|
; const uint32_t height); |
327 |
|
; |
328 |
|
;----------------------------------------------------------------------------- |
329 |
|
|
330 |
|
%macro AVG2_MMX_RND0 0 |
331 |
|
movq mm0, [eax] ; src1 -> mm0 |
332 |
|
movq mm1, [ebx] ; src2 -> mm1 |
333 |
|
|
334 |
|
movq mm4, [eax+edx] |
335 |
|
movq mm5, [ebx+edx] |
336 |
|
|
337 |
|
movq mm2, mm0 ; src1 -> mm2 |
338 |
|
movq mm3, mm1 ; src2 -> mm3 |
339 |
|
|
340 |
|
pand mm2, mm7 ; isolate the lsb |
341 |
|
pand mm3, mm7 ; isolate the lsb |
342 |
|
|
343 |
|
por mm2, mm3 ; ODD(src1) OR ODD(src2) -> mm2 |
344 |
|
|
345 |
|
movq mm3, mm4 |
346 |
|
movq mm6, mm5 |
347 |
|
|
348 |
|
pand mm3, mm7 |
349 |
|
pand mm6, mm7 |
350 |
|
|
351 |
|
por mm3, mm6 |
352 |
|
|
353 |
|
pand mm0, [mmx_mask] |
354 |
|
pand mm1, [mmx_mask] |
355 |
|
pand mm4, [mmx_mask] |
356 |
|
pand mm5, [mmx_mask] |
357 |
|
|
358 |
|
psrlq mm0, 1 ; src1 / 2 |
359 |
|
psrlq mm1, 1 ; src2 / 2 |
360 |
|
|
361 |
|
psrlq mm4, 1 |
362 |
|
psrlq mm5, 1 |
363 |
|
|
364 |
|
paddb mm0, mm1 ; src1/2 + src2/2 -> mm0 |
365 |
|
paddb mm0, mm2 ; correct rounding error |
366 |
|
|
367 |
|
paddb mm4, mm5 |
368 |
|
paddb mm4, mm3 |
369 |
|
|
370 |
|
lea eax, [eax+2*edx] |
371 |
|
lea ebx, [ebx+2*edx] |
372 |
|
|
373 |
|
movq [ecx], mm0 ; (src1 + src2 + 1) / 2 -> dst |
374 |
|
movq [ecx+edx], mm4 |
375 |
|
%endmacro |
376 |
|
|
377 |
|
%macro AVG2_MMX_RND1 0 |
378 |
|
movq mm0, [eax] ; src1 -> mm0 |
379 |
|
movq mm1, [ebx] ; src2 -> mm1 |
380 |
|
|
381 |
|
movq mm4, [eax+edx] |
382 |
|
movq mm5, [ebx+edx] |
383 |
|
|
384 |
|
movq mm2, mm0 ; src1 -> mm2 |
385 |
|
movq mm3, mm1 ; src2 -> mm3 |
386 |
|
|
387 |
|
pand mm2, mm7 ; isolate the lsb |
388 |
|
pand mm3, mm7 ; isolate the lsb |
389 |
|
|
390 |
|
pand mm2, mm3 ; ODD(src1) AND ODD(src2) -> mm2 |
391 |
|
|
392 |
|
movq mm3, mm4 |
393 |
|
movq mm6, mm5 |
394 |
|
|
395 |
|
pand mm3, mm7 |
396 |
|
pand mm6, mm7 |
397 |
|
|
398 |
|
pand mm3, mm6 |
399 |
|
|
400 |
|
pand mm0, [mmx_mask] |
401 |
|
pand mm1, [mmx_mask] |
402 |
|
pand mm4, [mmx_mask] |
403 |
|
pand mm5, [mmx_mask] |
404 |
|
|
405 |
|
psrlq mm0, 1 ; src1 / 2 |
406 |
|
psrlq mm1, 1 ; src2 / 2 |
407 |
|
|
408 |
|
psrlq mm4, 1 |
409 |
|
psrlq mm5, 1 |
410 |
|
|
411 |
|
paddb mm0, mm1 ; src1/2 + src2/2 -> mm0 |
412 |
|
paddb mm0, mm2 ; correct rounding error |
413 |
|
|
414 |
|
paddb mm4, mm5 |
415 |
|
paddb mm4, mm3 |
416 |
|
|
417 |
|
lea eax, [eax+2*edx] |
418 |
|
lea ebx, [ebx+2*edx] |
419 |
|
|
420 |
|
movq [ecx], mm0 ; (src1 + src2 + 1) / 2 -> dst |
421 |
|
movq [ecx+edx], mm4 |
422 |
|
%endmacro |
423 |
|
|
424 |
|
ALIGN 16 |
425 |
|
interpolate8x8_avg2_mmx: |
426 |
|
|
427 |
|
push ebx |
428 |
|
|
429 |
|
mov eax, [esp + 4 + 20] ; rounding |
430 |
|
test eax, eax |
431 |
|
|
432 |
|
jnz near .rounding1 |
433 |
|
|
434 |
|
mov eax, [esp + 4 + 24] ; height -> eax |
435 |
|
sub eax, 8 |
436 |
|
test eax, eax |
437 |
|
|
438 |
|
mov ecx, [esp + 4 + 4] ; dst -> edi |
439 |
|
mov eax, [esp + 4 + 8] ; src1 -> esi |
440 |
|
mov ebx, [esp + 4 + 12] ; src2 -> eax |
441 |
|
mov edx, [esp + 4 + 16] ; stride -> edx |
442 |
|
|
443 |
|
movq mm7, [mmx_one] |
444 |
|
|
445 |
|
jz near .start0 |
446 |
|
|
447 |
|
AVG2_MMX_RND0 |
448 |
|
lea ecx, [ecx+2*edx] |
449 |
|
|
450 |
|
.start0 |
451 |
|
|
452 |
|
AVG2_MMX_RND0 |
453 |
|
lea ecx, [ecx+2*edx] |
454 |
|
AVG2_MMX_RND0 |
455 |
|
lea ecx, [ecx+2*edx] |
456 |
|
AVG2_MMX_RND0 |
457 |
|
lea ecx, [ecx+2*edx] |
458 |
|
AVG2_MMX_RND0 |
459 |
|
|
460 |
|
pop ebx |
461 |
|
ret |
462 |
|
|
463 |
|
.rounding1 |
464 |
|
mov eax, [esp + 4 + 24] ; height -> eax |
465 |
|
sub eax, 8 |
466 |
|
test eax, eax |
467 |
|
|
468 |
|
mov ecx, [esp + 4 + 4] ; dst -> edi |
469 |
|
mov eax, [esp + 4 + 8] ; src1 -> esi |
470 |
|
mov ebx, [esp + 4 + 12] ; src2 -> eax |
471 |
|
mov edx, [esp + 4 + 16] ; stride -> edx |
472 |
|
|
473 |
|
movq mm7, [mmx_one] |
474 |
|
|
475 |
|
jz near .start1 |
476 |
|
|
477 |
|
AVG2_MMX_RND1 |
478 |
|
lea ecx, [ecx+2*edx] |
479 |
|
|
480 |
|
.start1 |
481 |
|
|
482 |
|
AVG2_MMX_RND1 |
483 |
|
lea ecx, [ecx+2*edx] |
484 |
|
AVG2_MMX_RND1 |
485 |
|
lea ecx, [ecx+2*edx] |
486 |
|
AVG2_MMX_RND1 |
487 |
|
lea ecx, [ecx+2*edx] |
488 |
|
AVG2_MMX_RND1 |
489 |
|
|
490 |
|
pop ebx |
491 |
|
ret |
492 |
|
|
493 |
|
|
494 |
|
;----------------------------------------------------------------------------- |
495 |
|
; |
496 |
|
; void interpolate8x8_avg4_mmx(uint8_t const *dst, |
497 |
|
; const uint8_t * const src1, |
498 |
|
; const uint8_t * const src2, |
499 |
|
; const uint8_t * const src3, |
500 |
|
; const uint8_t * const src4, |
501 |
|
; const uint32_t stride, |
502 |
|
; const uint32_t rounding); |
503 |
|
; |
504 |
|
;----------------------------------------------------------------------------- |
505 |
|
|
506 |
|
%macro AVG4_MMX_RND0 0 |
507 |
|
movq mm0, [eax] ; src1 -> mm0 |
508 |
|
movq mm1, [ebx] ; src2 -> mm1 |
509 |
|
|
510 |
|
movq mm2, mm0 |
511 |
|
movq mm3, mm1 |
512 |
|
|
513 |
|
pand mm2, [mmx_three] |
514 |
|
pand mm3, [mmx_three] |
515 |
|
|
516 |
|
pand mm0, [mmx_mask2] |
517 |
|
pand mm1, [mmx_mask2] |
518 |
|
|
519 |
|
psrlq mm0, 2 |
520 |
|
psrlq mm1, 2 |
521 |
|
|
522 |
|
lea eax, [eax+edx] |
523 |
|
lea ebx, [ebx+edx] |
524 |
|
|
525 |
|
paddb mm0, mm1 |
526 |
|
paddb mm2, mm3 |
527 |
|
|
528 |
|
movq mm4, [esi] ; src3 -> mm0 |
529 |
|
movq mm5, [edi] ; src4 -> mm1 |
530 |
|
|
531 |
|
movq mm1, mm4 |
532 |
|
movq mm3, mm5 |
533 |
|
|
534 |
|
pand mm1, [mmx_three] |
535 |
|
pand mm3, [mmx_three] |
536 |
|
|
537 |
|
pand mm4, [mmx_mask2] |
538 |
|
pand mm5, [mmx_mask2] |
539 |
|
|
540 |
|
psrlq mm4, 2 |
541 |
|
psrlq mm5, 2 |
542 |
|
|
543 |
|
paddb mm4, mm5 |
544 |
|
paddb mm0, mm4 |
545 |
|
|
546 |
|
paddb mm1, mm3 |
547 |
|
paddb mm2, mm1 |
548 |
|
|
549 |
|
paddb mm2, [mmx_two] |
550 |
|
pand mm2, [mmx_mask2] |
551 |
|
|
552 |
|
psrlq mm2, 2 |
553 |
|
paddb mm0, mm2 |
554 |
|
|
555 |
|
lea esi, [esi+edx] |
556 |
|
lea edi, [edi+edx] |
557 |
|
|
558 |
|
movq [ecx], mm0 ; (src1 + src2 + src3 + src4 + 2) / 4 -> dst |
559 |
|
%endmacro |
560 |
|
|
561 |
|
%macro AVG4_MMX_RND1 0 |
562 |
|
movq mm0, [eax] ; src1 -> mm0 |
563 |
|
movq mm1, [ebx] ; src2 -> mm1 |
564 |
|
|
565 |
|
movq mm2, mm0 |
566 |
|
movq mm3, mm1 |
567 |
|
|
568 |
|
pand mm2, [mmx_three] |
569 |
|
pand mm3, [mmx_three] |
570 |
|
|
571 |
|
pand mm0, [mmx_mask2] |
572 |
|
pand mm1, [mmx_mask2] |
573 |
|
|
574 |
|
psrlq mm0, 2 |
575 |
|
psrlq mm1, 2 |
576 |
|
|
577 |
|
lea eax,[eax+edx] |
578 |
|
lea ebx,[ebx+edx] |
579 |
|
|
580 |
|
paddb mm0, mm1 |
581 |
|
paddb mm2, mm3 |
582 |
|
|
583 |
|
movq mm4, [esi] ; src3 -> mm0 |
584 |
|
movq mm5, [edi] ; src4 -> mm1 |
585 |
|
|
586 |
|
movq mm1, mm4 |
587 |
|
movq mm3, mm5 |
588 |
|
|
589 |
|
pand mm1, [mmx_three] |
590 |
|
pand mm3, [mmx_three] |
591 |
|
|
592 |
|
pand mm4, [mmx_mask2] |
593 |
|
pand mm5, [mmx_mask2] |
594 |
|
|
595 |
|
psrlq mm4, 2 |
596 |
|
psrlq mm5, 2 |
597 |
|
|
598 |
|
paddb mm4, mm5 |
599 |
|
paddb mm0, mm4 |
600 |
|
|
601 |
|
paddb mm1, mm3 |
602 |
|
paddb mm2, mm1 |
603 |
|
|
604 |
|
paddb mm2, [mmx_one] |
605 |
|
pand mm2, [mmx_mask2] |
606 |
|
|
607 |
|
psrlq mm2, 2 |
608 |
|
paddb mm0, mm2 |
609 |
|
|
610 |
|
lea esi,[esi+edx] |
611 |
|
lea edi,[edi+edx] |
612 |
|
|
613 |
|
movq [ecx], mm0 ; (src1 + src2 + src3 + src4 + 2) / 4 -> dst |
614 |
|
%endmacro |
615 |
|
|
616 |
|
ALIGN 16 |
617 |
|
interpolate8x8_avg4_mmx: |
618 |
|
|
619 |
|
push ebx |
620 |
|
push edi |
621 |
|
push esi |
622 |
|
|
623 |
|
mov eax, [esp + 12 + 28] ; rounding |
624 |
|
|
625 |
|
test eax, eax |
626 |
|
|
627 |
|
mov ecx, [esp + 12 + 4] ; dst -> edi |
628 |
|
mov eax, [esp + 12 + 8] ; src1 -> esi |
629 |
|
mov ebx, [esp + 12 + 12] ; src2 -> eax |
630 |
|
mov esi, [esp + 12 + 16] ; src3 -> esi |
631 |
|
mov edi, [esp + 12 + 20] ; src4 -> edi |
632 |
|
mov edx, [esp + 12 + 24] ; stride -> edx |
633 |
|
|
634 |
|
movq mm7, [mmx_one] |
635 |
|
|
636 |
|
jnz near .rounding1 |
637 |
|
|
638 |
|
AVG4_MMX_RND0 |
639 |
|
lea ecx, [ecx+edx] |
640 |
|
AVG4_MMX_RND0 |
641 |
|
lea ecx, [ecx+edx] |
642 |
|
AVG4_MMX_RND0 |
643 |
|
lea ecx, [ecx+edx] |
644 |
|
AVG4_MMX_RND0 |
645 |
|
lea ecx, [ecx+edx] |
646 |
|
AVG4_MMX_RND0 |
647 |
|
lea ecx, [ecx+edx] |
648 |
|
AVG4_MMX_RND0 |
649 |
|
lea ecx, [ecx+edx] |
650 |
|
AVG4_MMX_RND0 |
651 |
|
lea ecx, [ecx+edx] |
652 |
|
AVG4_MMX_RND0 |
653 |
|
|
654 |
|
pop esi |
655 |
|
pop edi |
656 |
|
pop ebx |
657 |
|
ret |
658 |
|
|
659 |
|
.rounding1 |
660 |
|
AVG4_MMX_RND1 |
661 |
|
lea ecx, [ecx+edx] |
662 |
|
AVG4_MMX_RND1 |
663 |
|
lea ecx, [ecx+edx] |
664 |
|
AVG4_MMX_RND1 |
665 |
|
lea ecx, [ecx+edx] |
666 |
|
AVG4_MMX_RND1 |
667 |
|
lea ecx, [ecx+edx] |
668 |
|
AVG4_MMX_RND1 |
669 |
|
lea ecx, [ecx+edx] |
670 |
|
AVG4_MMX_RND1 |
671 |
|
lea ecx, [ecx+edx] |
672 |
|
AVG4_MMX_RND1 |
673 |
|
lea ecx, [ecx+edx] |
674 |
|
AVG4_MMX_RND1 |
675 |
|
|
676 |
|
pop esi |
677 |
|
pop edi |
678 |
|
pop ebx |
679 |
|
ret |
680 |
|
|
681 |
|
|
682 |
|
;----------------------------------------------------------------------------- |
683 |
|
; |
684 |
|
; void interpolate8x8_6tap_lowpass_h_mmx(uint8_t const *dst, |
685 |
|
; const uint8_t * const src, |
686 |
|
; const uint32_t stride, |
687 |
|
; const uint32_t rounding); |
688 |
|
; |
689 |
|
;----------------------------------------------------------------------------- |
690 |
|
|
691 |
|
%macro LOWPASS_6TAP_H_MMX 0 |
692 |
|
movq mm0, [eax] |
693 |
|
movq mm2, [eax+1] |
694 |
|
|
695 |
|
movq mm1, mm0 |
696 |
|
movq mm3, mm2 |
697 |
|
|
698 |
|
punpcklbw mm0, mm7 |
699 |
|
punpcklbw mm2, mm7 |
700 |
|
|
701 |
|
punpckhbw mm1, mm7 |
702 |
|
punpckhbw mm3, mm7 |
703 |
|
|
704 |
|
paddw mm0, mm2 |
705 |
|
paddw mm1, mm3 |
706 |
|
|
707 |
|
psllw mm0, 2 |
708 |
|
psllw mm1, 2 |
709 |
|
|
710 |
|
movq mm2, [eax-1] |
711 |
|
movq mm4, [eax+2] |
712 |
|
|
713 |
|
movq mm3, mm2 |
714 |
|
movq mm5, mm4 |
715 |
|
|
716 |
|
punpcklbw mm2, mm7 |
717 |
|
punpcklbw mm4, mm7 |
718 |
|
|
719 |
|
punpckhbw mm3, mm7 |
720 |
|
punpckhbw mm5, mm7 |
721 |
|
|
722 |
|
paddw mm2, mm4 |
723 |
|
paddw mm3, mm5 |
724 |
|
|
725 |
|
psubsw mm0, mm2 |
726 |
|
psubsw mm1, mm3 |
727 |
|
|
728 |
|
pmullw mm0, [mmx_five] |
729 |
|
pmullw mm1, [mmx_five] |
730 |
|
|
731 |
|
movq mm2, [eax-2] |
732 |
|
movq mm4, [eax+3] |
733 |
|
|
734 |
|
movq mm3, mm2 |
735 |
|
movq mm5, mm4 |
736 |
|
|
737 |
|
punpcklbw mm2, mm7 |
738 |
|
punpcklbw mm4, mm7 |
739 |
|
|
740 |
|
punpckhbw mm3, mm7 |
741 |
|
punpckhbw mm5, mm7 |
742 |
|
|
743 |
|
paddw mm2, mm4 |
744 |
|
paddw mm3, mm5 |
745 |
|
|
746 |
|
paddsw mm0, mm2 |
747 |
|
paddsw mm1, mm3 |
748 |
|
|
749 |
|
paddsw mm0, mm6 |
750 |
|
paddsw mm1, mm6 |
751 |
|
|
752 |
|
psraw mm0, 5 |
753 |
|
psraw mm1, 5 |
754 |
|
|
755 |
|
lea eax, [eax+edx] |
756 |
|
packuswb mm0, mm1 |
757 |
|
movq [ecx], mm0 |
758 |
|
%endmacro |
759 |
|
|
760 |
|
ALIGN 16 |
761 |
|
interpolate8x8_6tap_lowpass_h_mmx: |
762 |
|
|
763 |
|
mov eax, [esp + 16] ; rounding |
764 |
|
|
765 |
|
movq mm6, [rounding_lowpass_mmx + eax * 8] |
766 |
|
|
767 |
|
mov ecx, [esp + 4] ; dst -> edi |
768 |
|
mov eax, [esp + 8] ; src -> esi |
769 |
|
mov edx, [esp + 12] ; stride -> edx |
770 |
|
|
771 |
|
pxor mm7, mm7 |
772 |
|
|
773 |
|
LOWPASS_6TAP_H_MMX |
774 |
|
lea ecx, [ecx+edx] |
775 |
|
LOWPASS_6TAP_H_MMX |
776 |
|
lea ecx, [ecx+edx] |
777 |
|
LOWPASS_6TAP_H_MMX |
778 |
|
lea ecx, [ecx+edx] |
779 |
|
LOWPASS_6TAP_H_MMX |
780 |
|
lea ecx, [ecx+edx] |
781 |
|
LOWPASS_6TAP_H_MMX |
782 |
|
lea ecx, [ecx+edx] |
783 |
|
LOWPASS_6TAP_H_MMX |
784 |
|
lea ecx, [ecx+edx] |
785 |
|
LOWPASS_6TAP_H_MMX |
786 |
|
lea ecx, [ecx+edx] |
787 |
|
LOWPASS_6TAP_H_MMX |
788 |
|
|
789 |
|
ret |
790 |
|
|
791 |
|
;----------------------------------------------------------------------------- |
792 |
|
; |
793 |
|
; void interpolate8x8_6tap_lowpass_v_mmx(uint8_t const *dst, |
794 |
|
; const uint8_t * const src, |
795 |
|
; const uint32_t stride, |
796 |
|
; const uint32_t rounding); |
797 |
|
; |
798 |
|
;----------------------------------------------------------------------------- |
799 |
|
|
800 |
|
%macro LOWPASS_6TAP_V_MMX 0 |
801 |
|
movq mm0, [eax] |
802 |
|
movq mm2, [eax+edx] |
803 |
|
|
804 |
|
movq mm1, mm0 |
805 |
|
movq mm3, mm2 |
806 |
|
|
807 |
|
punpcklbw mm0, mm7 |
808 |
|
punpcklbw mm2, mm7 |
809 |
|
|
810 |
|
punpckhbw mm1, mm7 |
811 |
|
punpckhbw mm3, mm7 |
812 |
|
|
813 |
|
paddw mm0, mm2 |
814 |
|
paddw mm1, mm3 |
815 |
|
|
816 |
|
psllw mm0, 2 |
817 |
|
psllw mm1, 2 |
818 |
|
|
819 |
|
movq mm4, [eax+2*edx] |
820 |
|
sub eax, ebx |
821 |
|
movq mm2, [eax+2*edx] |
822 |
|
|
823 |
|
movq mm3, mm2 |
824 |
|
movq mm5, mm4 |
825 |
|
|
826 |
|
punpcklbw mm2, mm7 |
827 |
|
punpcklbw mm4, mm7 |
828 |
|
|
829 |
|
punpckhbw mm3, mm7 |
830 |
|
punpckhbw mm5, mm7 |
831 |
|
|
832 |
|
paddw mm2, mm4 |
833 |
|
paddw mm3, mm5 |
834 |
|
|
835 |
|
psubsw mm0, mm2 |
836 |
|
psubsw mm1, mm3 |
837 |
|
|
838 |
|
pmullw mm0, [mmx_five] |
839 |
|
pmullw mm1, [mmx_five] |
840 |
|
|
841 |
|
movq mm2, [eax+edx] |
842 |
|
movq mm4, [eax+2*ebx] |
843 |
|
|
844 |
|
movq mm3, mm2 |
845 |
|
movq mm5, mm4 |
846 |
|
|
847 |
|
punpcklbw mm2, mm7 |
848 |
|
punpcklbw mm4, mm7 |
849 |
|
|
850 |
|
punpckhbw mm3, mm7 |
851 |
|
punpckhbw mm5, mm7 |
852 |
|
|
853 |
|
paddw mm2, mm4 |
854 |
|
paddw mm3, mm5 |
855 |
|
|
856 |
|
paddsw mm0, mm2 |
857 |
|
paddsw mm1, mm3 |
858 |
|
|
859 |
|
paddsw mm0, mm6 |
860 |
|
paddsw mm1, mm6 |
861 |
|
|
862 |
|
psraw mm0, 5 |
863 |
|
psraw mm1, 5 |
864 |
|
|
865 |
|
lea eax, [eax+4*edx] |
866 |
|
packuswb mm0, mm1 |
867 |
|
movq [ecx], mm0 |
868 |
|
%endmacro |
869 |
|
|
870 |
|
ALIGN 16 |
871 |
|
interpolate8x8_6tap_lowpass_v_mmx: |
872 |
|
|
873 |
|
push ebx |
874 |
|
|
875 |
|
mov eax, [esp + 4 + 16] ; rounding |
876 |
|
|
877 |
|
movq mm6, [rounding_lowpass_mmx + eax * 8] |
878 |
|
|
879 |
|
mov ecx, [esp + 4 + 4] ; dst -> edi |
880 |
|
mov eax, [esp + 4 + 8] ; src -> esi |
881 |
|
mov edx, [esp + 4 + 12] ; stride -> edx |
882 |
|
|
883 |
|
mov ebx, edx |
884 |
|
shl ebx, 1 |
885 |
|
add ebx, edx |
886 |
|
|
887 |
|
pxor mm7, mm7 |
888 |
|
|
889 |
|
LOWPASS_6TAP_V_MMX |
890 |
|
lea ecx, [ecx+edx] |
891 |
|
LOWPASS_6TAP_V_MMX |
892 |
|
lea ecx, [ecx+edx] |
893 |
|
LOWPASS_6TAP_V_MMX |
894 |
|
lea ecx, [ecx+edx] |
895 |
|
LOWPASS_6TAP_V_MMX |
896 |
|
lea ecx, [ecx+edx] |
897 |
|
LOWPASS_6TAP_V_MMX |
898 |
|
lea ecx, [ecx+edx] |
899 |
|
LOWPASS_6TAP_V_MMX |
900 |
|
lea ecx, [ecx+edx] |
901 |
|
LOWPASS_6TAP_V_MMX |
902 |
|
lea ecx, [ecx+edx] |
903 |
|
LOWPASS_6TAP_V_MMX |
904 |
|
|
905 |
|
pop ebx |
906 |
|
ret |