1 |
;/************************************************************************** |
;/**************************************************************************** |
2 |
; * |
; * |
3 |
; * XVID MPEG-4 VIDEO CODEC |
; * XVID MPEG-4 VIDEO CODEC |
4 |
; * mmx 8bit<->16bit transfers |
; * - 8<->16 bit transfer functions - |
5 |
; * |
; * |
6 |
; * This file is part of XviD, a free MPEG-4 video encoder/decoder |
; * Copyright (C) 2001 Peter Ross <pross@xvid.org> |
7 |
|
; * 2001 Michael Militzer <isibaar@xvid.org> |
8 |
|
; * 2002 Pascal Massimino <skal@planet-d.net> |
9 |
; * |
; * |
10 |
; * XviD is free software; you can redistribute it and/or modify it |
; * This program is free software ; you can redistribute it and/or modify |
11 |
; * under the terms of the GNU General Public License as published by |
; * it under the terms of the GNU General Public License as published by |
12 |
; * the Free Software Foundation; either version 2 of the License, or |
; * the Free Software Foundation; either version 2 of the License, or |
13 |
; * (at your option) any later version. |
; * (at your option) any later version. |
14 |
; * |
; * |
21 |
; * along with this program; if not, write to the Free Software |
; * along with this program; if not, write to the Free Software |
22 |
; * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
; * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
23 |
; * |
; * |
|
; * Under section 8 of the GNU General Public License, the copyright |
|
|
; * holders of XVID explicitly forbid distribution in the following |
|
|
; * countries: |
|
|
; * |
|
|
; * - Japan |
|
|
; * - United States of America |
|
|
; * |
|
|
; * Linking XviD statically or dynamically with other modules is making a |
|
|
; * combined work based on XviD. Thus, the terms and conditions of the |
|
|
; * GNU General Public License cover the whole combination. |
|
|
; * |
|
|
; * As a special exception, the copyright holders of XviD give you |
|
|
; * permission to link XviD with independent modules that communicate with |
|
|
; * XviD solely through the VFW1.1 and DShow interfaces, regardless of the |
|
|
; * license terms of these independent modules, and to copy and distribute |
|
|
; * the resulting combined work under terms of your choice, provided that |
|
|
; * every copy of the combined work is accompanied by a complete copy of |
|
|
; * the source code of XviD (the version of XviD used to produce the |
|
|
; * combined work), being distributed under the terms of the GNU General |
|
|
; * Public License plus this exception. An independent module is a module |
|
|
; * which is not derived from or based on XviD. |
|
|
; * |
|
|
; * Note that people who make modified versions of XviD are not obligated |
|
|
; * to grant this special exception for their modified versions; it is |
|
|
; * their choice whether to do so. The GNU General Public License gives |
|
|
; * permission to release a modified version without this exception; this |
|
|
; * exception also makes it possible to release a modified version which |
|
|
; * carries forward this exception. |
|
|
; * |
|
24 |
; * $Id$ |
; * $Id$ |
25 |
; * |
; * |
26 |
; *************************************************************************/ |
; ***************************************************************************/ |
27 |
|
|
28 |
;/************************************************************************** |
BITS 32 |
|
; * |
|
|
; * History: |
|
|
; * |
|
|
; * 04.06.2002 speed enhancement (unroll+overlap). -Skal- |
|
|
; * + added transfer_8to16sub2_mmx/xmm |
|
|
; * 07.01.2002 merge functions from compensate_mmx; rename functions |
|
|
; * 07.11.2001 initial version; (c)2001 peter ross <pross@xvid.org> |
|
|
; * |
|
|
; *************************************************************************/ |
|
|
|
|
|
|
|
|
bits 32 |
|
29 |
|
|
30 |
%macro cglobal 1 |
%macro cglobal 1 |
31 |
%ifdef PREFIX |
%ifdef PREFIX |
32 |
|
%ifdef MARK_FUNCS |
33 |
|
global _%1:function %1.endfunc-%1 |
34 |
|
%define %1 _%1:function %1.endfunc-%1 |
35 |
|
%define ENDFUNC .endfunc |
36 |
|
%else |
37 |
global _%1 |
global _%1 |
38 |
%define %1 _%1 |
%define %1 _%1 |
39 |
|
%define ENDFUNC |
40 |
|
%endif |
41 |
|
%else |
42 |
|
%ifdef MARK_FUNCS |
43 |
|
global %1:function %1.endfunc-%1 |
44 |
|
%define ENDFUNC .endfunc |
45 |
%else |
%else |
46 |
global %1 |
global %1 |
47 |
|
%define ENDFUNC |
48 |
|
%endif |
49 |
%endif |
%endif |
50 |
%endmacro |
%endmacro |
51 |
|
|
52 |
|
;============================================================================= |
53 |
|
; Read only data |
54 |
|
;============================================================================= |
55 |
|
|
56 |
section .text |
%ifdef FORMAT_COFF |
57 |
|
SECTION .rodata |
58 |
|
%else |
59 |
|
SECTION .rodata align=16 |
60 |
|
%endif |
61 |
|
|
62 |
|
ALIGN 16 |
63 |
|
mmx_one: |
64 |
|
dw 1, 1, 1, 1 |
65 |
|
|
66 |
|
;============================================================================= |
67 |
|
; Code |
68 |
|
;============================================================================= |
69 |
|
|
70 |
|
SECTION .text |
71 |
|
|
72 |
cglobal transfer_8to16copy_mmx |
cglobal transfer_8to16copy_mmx |
73 |
cglobal transfer_16to8copy_mmx |
cglobal transfer_16to8copy_mmx |
74 |
cglobal transfer_8to16sub_mmx |
cglobal transfer_8to16sub_mmx |
75 |
|
cglobal transfer_8to16subro_mmx |
76 |
cglobal transfer_8to16sub2_mmx |
cglobal transfer_8to16sub2_mmx |
77 |
cglobal transfer_8to16sub2_xmm |
cglobal transfer_8to16sub2_xmm |
78 |
|
cglobal transfer_8to16sub2ro_xmm |
79 |
cglobal transfer_16to8add_mmx |
cglobal transfer_16to8add_mmx |
80 |
cglobal transfer8x8_copy_mmx |
cglobal transfer8x8_copy_mmx |
81 |
|
cglobal transfer8x4_copy_mmx |
82 |
|
|
83 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
84 |
; |
; |
85 |
; void transfer_8to16copy_mmx(int16_t * const dst, |
; void transfer_8to16copy_mmx(int16_t * const dst, |
86 |
; const uint8_t * const src, |
; const uint8_t * const src, |
87 |
; uint32_t stride); |
; uint32_t stride); |
88 |
; |
; |
89 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
90 |
|
|
91 |
%macro COPY_8_TO_16 1 |
%macro COPY_8_TO_16 1 |
92 |
movq mm0, [eax] |
movq mm0, [eax] |
104 |
movq [ecx+%1*32+24], mm3 |
movq [ecx+%1*32+24], mm3 |
105 |
%endmacro |
%endmacro |
106 |
|
|
107 |
align 16 |
ALIGN 16 |
108 |
transfer_8to16copy_mmx: |
transfer_8to16copy_mmx: |
109 |
|
|
110 |
mov ecx, [esp+ 4] ; Dst |
mov ecx, [esp+ 4] ; Dst |
117 |
COPY_8_TO_16 2 |
COPY_8_TO_16 2 |
118 |
COPY_8_TO_16 3 |
COPY_8_TO_16 3 |
119 |
ret |
ret |
120 |
|
ENDFUNC |
121 |
|
|
122 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
123 |
; |
; |
124 |
; void transfer_16to8copy_mmx(uint8_t * const dst, |
; void transfer_16to8copy_mmx(uint8_t * const dst, |
125 |
; const int16_t * const src, |
; const int16_t * const src, |
126 |
; uint32_t stride); |
; uint32_t stride); |
127 |
; |
; |
128 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
129 |
|
|
130 |
%macro COPY_16_TO_8 1 |
%macro COPY_16_TO_8 1 |
131 |
movq mm0, [eax+%1*32] |
movq mm0, [eax+%1*32] |
138 |
movq [ecx+edx], mm2 |
movq [ecx+edx], mm2 |
139 |
%endmacro |
%endmacro |
140 |
|
|
141 |
align 16 |
ALIGN 16 |
142 |
transfer_16to8copy_mmx: |
transfer_16to8copy_mmx: |
143 |
|
|
144 |
mov ecx, [esp+ 4] ; Dst |
mov ecx, [esp+ 4] ; Dst |
153 |
lea ecx,[ecx+2*edx] |
lea ecx,[ecx+2*edx] |
154 |
COPY_16_TO_8 3 |
COPY_16_TO_8 3 |
155 |
ret |
ret |
156 |
|
ENDFUNC |
157 |
|
|
158 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
159 |
; |
; |
160 |
; void transfer_8to16sub_mmx(int16_t * const dct, |
; void transfer_8to16sub_mmx(int16_t * const dct, |
161 |
; uint8_t * const cur, |
; uint8_t * const cur, |
162 |
; const uint8_t * const ref, |
; const uint8_t * const ref, |
163 |
; const uint32_t stride); |
; const uint32_t stride); |
164 |
; |
; |
165 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
|
;/************************************************************************** |
|
|
; * |
|
|
; * History: |
|
|
; * |
|
|
; * 27.12.2001 renamed from 'compensate' to 'transfer_8to16sub' |
|
|
; * 02.12.2001 loop unrolled, code runs 10% faster now (Isibaar) |
|
|
; * 30.11.2001 16 pixels are processed per iteration (Isibaar) |
|
|
; * 30.11.2001 .text missing |
|
|
; * 06.11.2001 inital version; (c)2001 peter ross <pross@xvid.org> |
|
|
; * |
|
|
; *************************************************************************/ |
|
166 |
|
|
167 |
%macro COPY_8_TO_16_SUB 1 |
; when second argument == 1, reference (ebx) block is to current (eax) |
168 |
|
%macro COPY_8_TO_16_SUB 2 |
169 |
movq mm0, [eax] ; cur |
movq mm0, [eax] ; cur |
170 |
movq mm2, [eax+edx] |
movq mm2, [eax+edx] |
171 |
movq mm1, mm0 |
movq mm1, mm0 |
179 |
movq mm5, [ebx+edx] ; ref |
movq mm5, [ebx+edx] ; ref |
180 |
|
|
181 |
movq mm6, mm4 |
movq mm6, mm4 |
182 |
|
%if %2 == 1 |
183 |
movq [eax], mm4 |
movq [eax], mm4 |
184 |
movq [eax+edx], mm5 |
movq [eax+edx], mm5 |
185 |
|
%endif |
186 |
punpcklbw mm4, mm7 |
punpcklbw mm4, mm7 |
187 |
punpckhbw mm6, mm7 |
punpckhbw mm6, mm7 |
188 |
psubsw mm0, mm4 |
psubsw mm0, mm4 |
201 |
movq [ecx+%1*32+24], mm3 |
movq [ecx+%1*32+24], mm3 |
202 |
%endmacro |
%endmacro |
203 |
|
|
204 |
align 16 |
ALIGN 16 |
205 |
transfer_8to16sub_mmx: |
transfer_8to16sub_mmx: |
206 |
mov ecx, [esp + 4] ; Dst |
mov ecx, [esp + 4] ; Dst |
207 |
mov eax, [esp + 8] ; Cur |
mov eax, [esp + 8] ; Cur |
210 |
mov edx, [esp+4+16] ; Stride |
mov edx, [esp+4+16] ; Stride |
211 |
pxor mm7, mm7 |
pxor mm7, mm7 |
212 |
|
|
213 |
COPY_8_TO_16_SUB 0 |
COPY_8_TO_16_SUB 0, 1 |
214 |
COPY_8_TO_16_SUB 1 |
COPY_8_TO_16_SUB 1, 1 |
215 |
COPY_8_TO_16_SUB 2 |
COPY_8_TO_16_SUB 2, 1 |
216 |
COPY_8_TO_16_SUB 3 |
COPY_8_TO_16_SUB 3, 1 |
217 |
|
|
218 |
|
pop ebx |
219 |
|
ret |
220 |
|
ENDFUNC |
221 |
|
|
222 |
|
|
223 |
|
ALIGN 16 |
224 |
|
transfer_8to16subro_mmx: |
225 |
|
mov ecx, [esp + 4] ; Dst |
226 |
|
mov eax, [esp + 8] ; Cur |
227 |
|
push ebx |
228 |
|
mov ebx, [esp+4+12] ; Ref |
229 |
|
mov edx, [esp+4+16] ; Stride |
230 |
|
pxor mm7, mm7 |
231 |
|
|
232 |
|
COPY_8_TO_16_SUB 0, 0 |
233 |
|
COPY_8_TO_16_SUB 1, 0 |
234 |
|
COPY_8_TO_16_SUB 2, 0 |
235 |
|
COPY_8_TO_16_SUB 3, 0 |
236 |
|
|
237 |
pop ebx |
pop ebx |
238 |
ret |
ret |
239 |
|
ENDFUNC |
240 |
|
|
241 |
|
|
242 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
243 |
; |
; |
244 |
; void transfer_8to16sub2_mmx(int16_t * const dct, |
; void transfer_8to16sub2_mmx(int16_t * const dct, |
245 |
; uint8_t * const cur, |
; uint8_t * const cur, |
247 |
; const uint8_t * ref2, |
; const uint8_t * ref2, |
248 |
; const uint32_t stride) |
; const uint32_t stride) |
249 |
; |
; |
250 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
251 |
|
|
252 |
%macro COPY_8_TO_16_SUB2_MMX 1 |
%macro COPY_8_TO_16_SUB2_MMX 1 |
253 |
movq mm0, [eax] ; cur |
movq mm0, [eax] ; cur |
264 |
punpckhbw mm3, mm7 |
punpckhbw mm3, mm7 |
265 |
paddusw mm4, mm1 |
paddusw mm4, mm1 |
266 |
paddusw mm6, mm3 |
paddusw mm6, mm3 |
267 |
|
paddusw mm4, [mmx_one] |
268 |
|
paddusw mm6, [mmx_one] |
269 |
psrlw mm4,1 |
psrlw mm4,1 |
270 |
psrlw mm6,1 |
psrlw mm6,1 |
271 |
packuswb mm4, mm6 |
packuswb mm4, mm6 |
272 |
|
movq [eax], mm4 |
273 |
|
|
274 |
; mm5 <- (ref1+ref2+1) / 2 |
; mm5 <- (ref1+ref2+1) / 2 |
275 |
movq mm5, [ebx+edx] ; ref1 |
movq mm5, [ebx+edx] ; ref1 |
282 |
punpckhbw mm3, mm7 |
punpckhbw mm3, mm7 |
283 |
paddusw mm5, mm1 |
paddusw mm5, mm1 |
284 |
paddusw mm6, mm3 |
paddusw mm6, mm3 |
285 |
|
paddusw mm5, [mmx_one] |
286 |
|
paddusw mm6, [mmx_one] |
287 |
lea esi,[esi+2*edx] |
lea esi,[esi+2*edx] |
288 |
psrlw mm5,1 |
psrlw mm5,1 |
289 |
psrlw mm6,1 |
psrlw mm6,1 |
290 |
packuswb mm5, mm6 |
packuswb mm5, mm6 |
291 |
|
movq [eax+edx], mm5 |
292 |
|
|
293 |
movq mm1, mm0 |
movq mm1, mm0 |
294 |
movq mm3, mm2 |
movq mm3, mm2 |
316 |
movq [ecx+%1*32+24], mm3 |
movq [ecx+%1*32+24], mm3 |
317 |
%endmacro |
%endmacro |
318 |
|
|
319 |
align 16 |
ALIGN 16 |
320 |
transfer_8to16sub2_mmx: |
transfer_8to16sub2_mmx: |
321 |
mov ecx, [esp + 4] ; Dst |
mov ecx, [esp + 4] ; Dst |
322 |
mov eax, [esp + 8] ; Cur |
mov eax, [esp + 8] ; Cur |
335 |
pop esi |
pop esi |
336 |
pop ebx |
pop ebx |
337 |
ret |
ret |
338 |
|
ENDFUNC |
339 |
|
|
340 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
341 |
; |
; |
342 |
; void transfer_8to16sub2_xmm(int16_t * const dct, |
; void transfer_8to16sub2_xmm(int16_t * const dct, |
343 |
; uint8_t * const cur, |
; uint8_t * const cur, |
345 |
; const uint8_t * ref2, |
; const uint8_t * ref2, |
346 |
; const uint32_t stride) |
; const uint32_t stride) |
347 |
; |
; |
348 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
349 |
|
|
350 |
%macro COPY_8_TO_16_SUB2_SSE 1 |
%macro COPY_8_TO_16_SUB2_SSE 1 |
351 |
movq mm0, [eax] ; cur |
movq mm0, [eax] ; cur |
357 |
punpcklbw mm2, mm7 |
punpcklbw mm2, mm7 |
358 |
movq mm4, [ebx] ; ref1 |
movq mm4, [ebx] ; ref1 |
359 |
pavgb mm4, [esi] ; ref2 |
pavgb mm4, [esi] ; ref2 |
360 |
|
movq [eax], mm4 |
361 |
punpckhbw mm1, mm7 |
punpckhbw mm1, mm7 |
362 |
punpckhbw mm3, mm7 |
punpckhbw mm3, mm7 |
363 |
movq mm5, [ebx+edx] ; ref |
movq mm5, [ebx+edx] ; ref |
364 |
pavgb mm5, [esi+edx] ; ref2 |
pavgb mm5, [esi+edx] ; ref2 |
365 |
|
movq [eax+edx], mm5 |
366 |
|
|
367 |
movq mm6, mm4 |
movq mm6, mm4 |
368 |
punpcklbw mm4, mm7 |
punpcklbw mm4, mm7 |
384 |
movq [ecx+%1*32+24], mm3 |
movq [ecx+%1*32+24], mm3 |
385 |
%endmacro |
%endmacro |
386 |
|
|
387 |
align 16 |
ALIGN 16 |
388 |
transfer_8to16sub2_xmm: |
transfer_8to16sub2_xmm: |
389 |
mov ecx, [esp + 4] ; Dst |
mov ecx, [esp + 4] ; Dst |
390 |
mov eax, [esp + 8] ; Cur |
mov eax, [esp + 8] ; Cur |
403 |
pop esi |
pop esi |
404 |
pop ebx |
pop ebx |
405 |
ret |
ret |
406 |
|
ENDFUNC |
407 |
|
|
408 |
|
|
409 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
410 |
|
; |
411 |
|
; void transfer_8to16sub2ro_xmm(int16_t * const dct, |
412 |
|
; const uint8_t * const cur, |
413 |
|
; const uint8_t * ref1, |
414 |
|
; const uint8_t * ref2, |
415 |
|
; const uint32_t stride) |
416 |
|
; |
417 |
|
;----------------------------------------------------------------------------- |
418 |
|
|
419 |
|
%macro COPY_8_TO_16_SUB2RO_SSE 1 |
420 |
|
movq mm0, [eax] ; cur |
421 |
|
movq mm2, [eax+edx] |
422 |
|
movq mm1, mm0 |
423 |
|
movq mm3, mm2 |
424 |
|
|
425 |
|
punpcklbw mm0, mm7 |
426 |
|
punpcklbw mm2, mm7 |
427 |
|
movq mm4, [ebx] ; ref1 |
428 |
|
pavgb mm4, [esi] ; ref2 |
429 |
|
punpckhbw mm1, mm7 |
430 |
|
punpckhbw mm3, mm7 |
431 |
|
movq mm5, [ebx+edx] ; ref |
432 |
|
pavgb mm5, [esi+edx] ; ref2 |
433 |
|
|
434 |
|
movq mm6, mm4 |
435 |
|
punpcklbw mm4, mm7 |
436 |
|
punpckhbw mm6, mm7 |
437 |
|
psubsw mm0, mm4 |
438 |
|
psubsw mm1, mm6 |
439 |
|
lea esi, [esi+2*edx] |
440 |
|
movq mm6, mm5 |
441 |
|
punpcklbw mm5, mm7 |
442 |
|
punpckhbw mm6, mm7 |
443 |
|
psubsw mm2, mm5 |
444 |
|
lea eax, [eax+2*edx] |
445 |
|
psubsw mm3, mm6 |
446 |
|
lea ebx, [ebx+2*edx] |
447 |
|
|
448 |
|
movq [ecx+%1*32+ 0], mm0 ; dst |
449 |
|
movq [ecx+%1*32+ 8], mm1 |
450 |
|
movq [ecx+%1*32+16], mm2 |
451 |
|
movq [ecx+%1*32+24], mm3 |
452 |
|
%endmacro |
453 |
|
|
454 |
|
ALIGN 16 |
455 |
|
transfer_8to16sub2ro_xmm: |
456 |
|
pxor mm7, mm7 |
457 |
|
mov ecx, [esp + 4] ; Dst |
458 |
|
mov eax, [esp + 8] ; Cur |
459 |
|
push ebx |
460 |
|
mov ebx, [esp+4+12] ; Ref1 |
461 |
|
push esi |
462 |
|
mov esi, [esp+8+16] ; Ref2 |
463 |
|
mov edx, [esp+8+20] ; Stride |
464 |
|
|
465 |
|
COPY_8_TO_16_SUB2RO_SSE 0 |
466 |
|
COPY_8_TO_16_SUB2RO_SSE 1 |
467 |
|
COPY_8_TO_16_SUB2RO_SSE 2 |
468 |
|
COPY_8_TO_16_SUB2RO_SSE 3 |
469 |
|
|
470 |
|
pop esi |
471 |
|
pop ebx |
472 |
|
ret |
473 |
|
ENDFUNC |
474 |
|
|
475 |
|
|
476 |
|
;----------------------------------------------------------------------------- |
477 |
; |
; |
478 |
; void transfer_16to8add_mmx(uint8_t * const dst, |
; void transfer_16to8add_mmx(uint8_t * const dst, |
479 |
; const int16_t * const src, |
; const int16_t * const src, |
480 |
; uint32_t stride); |
; uint32_t stride); |
481 |
; |
; |
482 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
483 |
|
|
484 |
%macro COPY_16_TO_8_ADD 1 |
%macro COPY_16_TO_8_ADD 1 |
485 |
movq mm0, [ecx] |
movq mm0, [ecx] |
501 |
%endmacro |
%endmacro |
502 |
|
|
503 |
|
|
504 |
align 16 |
ALIGN 16 |
505 |
transfer_16to8add_mmx: |
transfer_16to8add_mmx: |
506 |
mov ecx, [esp+ 4] ; Dst |
mov ecx, [esp+ 4] ; Dst |
507 |
mov eax, [esp+ 8] ; Src |
mov eax, [esp+ 8] ; Src |
516 |
lea ecx,[ecx+2*edx] |
lea ecx,[ecx+2*edx] |
517 |
COPY_16_TO_8_ADD 3 |
COPY_16_TO_8_ADD 3 |
518 |
ret |
ret |
519 |
|
ENDFUNC |
520 |
|
|
521 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
522 |
; |
; |
523 |
; void transfer8x8_copy_mmx(uint8_t * const dst, |
; void transfer8x8_copy_mmx(uint8_t * const dst, |
524 |
; const uint8_t * const src, |
; const uint8_t * const src, |
525 |
; const uint32_t stride); |
; const uint32_t stride); |
526 |
; |
; |
527 |
; |
; |
528 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
529 |
|
|
530 |
%macro COPY_8_TO_8 0 |
%macro COPY_8_TO_8 0 |
531 |
movq mm0, [eax] |
movq mm0, [eax] |
535 |
movq [ecx+edx], mm1 |
movq [ecx+edx], mm1 |
536 |
%endmacro |
%endmacro |
537 |
|
|
538 |
align 16 |
ALIGN 16 |
539 |
transfer8x8_copy_mmx: |
transfer8x8_copy_mmx: |
540 |
mov ecx, [esp+ 4] ; Dst |
mov ecx, [esp+ 4] ; Dst |
541 |
mov eax, [esp+ 8] ; Src |
mov eax, [esp+ 8] ; Src |
549 |
lea ecx,[ecx+2*edx] |
lea ecx,[ecx+2*edx] |
550 |
COPY_8_TO_8 |
COPY_8_TO_8 |
551 |
ret |
ret |
552 |
|
ENDFUNC |
553 |
|
|
554 |
|
;----------------------------------------------------------------------------- |
555 |
|
; |
556 |
|
; void transfer8x4_copy_mmx(uint8_t * const dst, |
557 |
|
; const uint8_t * const src, |
558 |
|
; const uint32_t stride); |
559 |
|
; |
560 |
|
; |
561 |
|
;----------------------------------------------------------------------------- |
562 |
|
|
563 |
|
ALIGN 16 |
564 |
|
transfer8x4_copy_mmx: |
565 |
|
mov ecx, [esp+ 4] ; Dst |
566 |
|
mov eax, [esp+ 8] ; Src |
567 |
|
mov edx, [esp+12] ; Stride |
568 |
|
|
569 |
|
COPY_8_TO_8 |
570 |
|
lea ecx,[ecx+2*edx] |
571 |
|
COPY_8_TO_8 |
572 |
|
ret |
573 |
|
ENDFUNC |
574 |
|
|
575 |
|
|
576 |
|
%ifidn __OUTPUT_FORMAT__,elf |
577 |
|
section ".note.GNU-stack" noalloc noexec nowrite progbits |
578 |
|
%endif |
579 |
|
|