1 |
;/************************************************************************** |
;/**************************************************************************** |
2 |
; * |
; * |
3 |
; * XVID MPEG-4 VIDEO CODEC |
; * XVID MPEG-4 VIDEO CODEC |
4 |
; * mmx 8bit<->16bit transfers |
; * - 8<->16 bit transfer functions - |
5 |
; * |
; * |
6 |
; * This program is an implementation of a part of one or more MPEG-4 |
; * Copyright (C) 2001 Peter Ross <pross@xvid.org> |
7 |
; * Video tools as specified in ISO/IEC 14496-2 standard. Those intending |
; * 2001 Michael Militzer <isibaar@xvid.org> |
8 |
; * to use this software module in hardware or software products are |
; * 2002 Pascal Massimino <skal@planet-d.net> |
9 |
; * advised that its use may infringe existing patents or copyrights, and |
; * 2004 Jean-Marc Bastide <jmtest@voila.fr> |
|
; * any such use would be at such party's own risk. The original |
|
|
; * developer of this software module and his/her company, and subsequent |
|
|
; * editors and their companies, will have no liability for use of this |
|
|
; * software or modifications or derivatives thereof. |
|
10 |
; * |
; * |
11 |
; * This program is free software; you can redistribute it and/or modify |
; * This program is free software; you can redistribute it and/or modify |
12 |
; * it under the terms of the GNU General Public License as published by |
; * it under the terms of the GNU General Public License as published by |
20 |
; * |
; * |
21 |
; * You should have received a copy of the GNU General Public License |
; * You should have received a copy of the GNU General Public License |
22 |
; * along with this program; if not, write to the Free Software |
; * along with this program; if not, write to the Free Software |
23 |
; * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
; * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
24 |
; * |
; * |
25 |
; *************************************************************************/ |
; * $Id$ |
|
|
|
|
;/************************************************************************** |
|
|
; * |
|
|
; * History: |
|
26 |
; * |
; * |
27 |
; * 04.06.2002 speed enhancement (unroll+overlap). -Skal- |
; ***************************************************************************/ |
|
; * + added transfer_8to16sub2_mmx/xmm |
|
|
; * 07.01.2002 merge functions from compensate_mmx; rename functions |
|
|
; * 07.11.2001 initial version; (c)2001 peter ross <pross@cs.rmit.edu.au> |
|
|
; * |
|
|
; *************************************************************************/ |
|
|
|
|
28 |
|
|
29 |
bits 32 |
BITS 32 |
30 |
|
|
31 |
%macro cglobal 1 |
%macro cglobal 1 |
32 |
%ifdef PREFIX |
%ifdef PREFIX |
37 |
%endif |
%endif |
38 |
%endmacro |
%endmacro |
39 |
|
|
40 |
|
;============================================================================= |
41 |
|
; Read only data |
42 |
|
;============================================================================= |
43 |
|
|
44 |
|
%ifdef FORMAT_COFF |
45 |
|
SECTION .rodata data |
46 |
|
%else |
47 |
|
SECTION .rodata data align=16 |
48 |
|
%endif |
49 |
|
|
50 |
section .text |
ALIGN 16 |
51 |
|
mmx_one: |
52 |
|
dw 1, 1, 1, 1 |
53 |
|
|
54 |
|
;============================================================================= |
55 |
|
; Code |
56 |
|
;============================================================================= |
57 |
|
|
58 |
|
SECTION .text |
59 |
|
|
60 |
cglobal transfer_8to16copy_mmx |
cglobal transfer_8to16copy_mmx |
61 |
cglobal transfer_16to8copy_mmx |
cglobal transfer_16to8copy_mmx |
62 |
cglobal transfer_8to16sub_mmx |
cglobal transfer_8to16sub_mmx |
63 |
|
cglobal transfer_8to16subro_mmx |
64 |
cglobal transfer_8to16sub2_mmx |
cglobal transfer_8to16sub2_mmx |
65 |
cglobal transfer_8to16sub2_xmm |
cglobal transfer_8to16sub2_xmm |
66 |
cglobal transfer_16to8add_mmx |
cglobal transfer_16to8add_mmx |
67 |
cglobal transfer8x8_copy_mmx |
cglobal transfer8x8_copy_mmx |
68 |
|
|
69 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
70 |
; |
; |
71 |
; void transfer_8to16copy_mmx(int16_t * const dst, |
; void transfer_8to16copy_mmx(int16_t * const dst, |
72 |
; const uint8_t * const src, |
; const uint8_t * const src, |
73 |
; uint32_t stride); |
; uint32_t stride); |
74 |
; |
; |
75 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
76 |
|
|
77 |
%macro COPY_8_TO_16 1 |
%macro COPY_8_TO_16 1 |
78 |
movq mm0, [eax] |
movq mm0, [eax] |
90 |
movq [ecx+%1*32+24], mm3 |
movq [ecx+%1*32+24], mm3 |
91 |
%endmacro |
%endmacro |
92 |
|
|
93 |
align 16 |
ALIGN 16 |
94 |
transfer_8to16copy_mmx: |
transfer_8to16copy_mmx: |
95 |
|
|
96 |
mov ecx, [esp+ 4] ; Dst |
mov ecx, [esp+ 4] ; Dst |
104 |
COPY_8_TO_16 3 |
COPY_8_TO_16 3 |
105 |
ret |
ret |
106 |
|
|
107 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
108 |
; |
; |
109 |
; void transfer_16to8copy_mmx(uint8_t * const dst, |
; void transfer_16to8copy_mmx(uint8_t * const dst, |
110 |
; const int16_t * const src, |
; const int16_t * const src, |
111 |
; uint32_t stride); |
; uint32_t stride); |
112 |
; |
; |
113 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
114 |
|
|
115 |
%macro COPY_16_TO_8 1 |
%macro COPY_16_TO_8 1 |
116 |
movq mm0, [eax+%1*32] |
movq mm0, [eax+%1*32] |
123 |
movq [ecx+edx], mm2 |
movq [ecx+edx], mm2 |
124 |
%endmacro |
%endmacro |
125 |
|
|
126 |
align 16 |
ALIGN 16 |
127 |
transfer_16to8copy_mmx: |
transfer_16to8copy_mmx: |
128 |
|
|
129 |
mov ecx, [esp+ 4] ; Dst |
mov ecx, [esp+ 4] ; Dst |
139 |
COPY_16_TO_8 3 |
COPY_16_TO_8 3 |
140 |
ret |
ret |
141 |
|
|
142 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
143 |
; |
; |
144 |
; void transfer_8to16sub_mmx(int16_t * const dct, |
; void transfer_8to16sub_mmx(int16_t * const dct, |
145 |
; uint8_t * const cur, |
; uint8_t * const cur, |
146 |
; const uint8_t * const ref, |
; const uint8_t * const ref, |
147 |
; const uint32_t stride); |
; const uint32_t stride); |
148 |
; |
; |
149 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
|
;/************************************************************************** |
|
|
; * |
|
|
; * History: |
|
|
; * |
|
|
; * 27.12.2001 renamed from 'compensate' to 'transfer_8to16sub' |
|
|
; * 02.12.2001 loop unrolled, code runs 10% faster now (Isibaar) |
|
|
; * 30.11.2001 16 pixels are processed per iteration (Isibaar) |
|
|
; * 30.11.2001 .text missing |
|
|
; * 06.11.2001 inital version; (c)2001 peter ross <pross@cs.rmit.edu.au> |
|
|
; * |
|
|
; *************************************************************************/ |
|
150 |
|
|
151 |
%macro COPY_8_TO_16_SUB 1 |
; when second argument == 1, reference (ebx) block is to current (eax) |
152 |
|
%macro COPY_8_TO_16_SUB 2 |
153 |
movq mm0, [eax] ; cur |
movq mm0, [eax] ; cur |
154 |
movq mm2, [eax+edx] |
movq mm2, [eax+edx] |
|
movq mm1, mm0 |
|
|
movq mm3, mm2 |
|
155 |
|
|
|
punpcklbw mm0, mm7 |
|
|
punpcklbw mm2, mm7 |
|
156 |
movq mm4, [ebx] ; ref |
movq mm4, [ebx] ; ref |
|
punpckhbw mm1, mm7 |
|
|
punpckhbw mm3, mm7 |
|
157 |
movq mm5, [ebx+edx] ; ref |
movq mm5, [ebx+edx] ; ref |
158 |
|
|
159 |
movq mm6, mm4 |
%if %2 == 1 |
160 |
movq [eax], mm4 |
movq [eax], mm4 |
161 |
movq [eax+edx], mm5 |
movq [eax+edx], mm5 |
162 |
punpcklbw mm4, mm7 |
%endif |
163 |
punpckhbw mm6, mm7 |
|
|
psubsw mm0, mm4 |
|
|
psubsw mm1, mm6 |
|
|
movq mm6, mm5 |
|
|
punpcklbw mm5, mm7 |
|
|
punpckhbw mm6, mm7 |
|
|
psubsw mm2, mm5 |
|
164 |
lea eax,[eax+2*edx] |
lea eax,[eax+2*edx] |
165 |
psubsw mm3, mm6 |
|
166 |
|
psubsb mm0,mm4 |
167 |
|
psubsb mm2,mm5 |
168 |
lea ebx,[ebx+2*edx] |
lea ebx,[ebx+2*edx] |
169 |
|
|
170 |
|
movq mm1,mm0 |
171 |
|
movq mm3,mm2 |
172 |
|
punpcklbw mm0,mm7 |
173 |
|
punpckhbw mm1,mm7 |
174 |
|
punpcklbw mm2,mm7 |
175 |
|
punpckhbw mm3,mm7 |
176 |
|
|
177 |
movq [ecx+%1*32+ 0], mm0 ; dst |
movq [ecx+%1*32+ 0], mm0 ; dst |
178 |
movq [ecx+%1*32+ 8], mm1 |
movq [ecx+%1*32+ 8], mm1 |
179 |
movq [ecx+%1*32+16], mm2 |
movq [ecx+%1*32+16], mm2 |
180 |
movq [ecx+%1*32+24], mm3 |
movq [ecx+%1*32+24], mm3 |
181 |
%endmacro |
%endmacro |
182 |
|
|
183 |
align 16 |
ALIGN 16 |
184 |
transfer_8to16sub_mmx: |
transfer_8to16sub_mmx: |
185 |
mov ecx, [esp + 4] ; Dst |
mov ecx, [esp + 4] ; Dst |
186 |
mov eax, [esp + 8] ; Cur |
mov eax, [esp + 8] ; Cur |
189 |
mov edx, [esp+4+16] ; Stride |
mov edx, [esp+4+16] ; Stride |
190 |
pxor mm7, mm7 |
pxor mm7, mm7 |
191 |
|
|
192 |
COPY_8_TO_16_SUB 0 |
COPY_8_TO_16_SUB 0, 1 |
193 |
COPY_8_TO_16_SUB 1 |
COPY_8_TO_16_SUB 1, 1 |
194 |
COPY_8_TO_16_SUB 2 |
COPY_8_TO_16_SUB 2, 1 |
195 |
COPY_8_TO_16_SUB 3 |
COPY_8_TO_16_SUB 3, 1 |
196 |
|
|
197 |
|
pop ebx |
198 |
|
ret |
199 |
|
|
200 |
|
|
201 |
|
ALIGN 16 |
202 |
|
transfer_8to16subro_mmx: |
203 |
|
mov ecx, [esp + 4] ; Dst |
204 |
|
mov eax, [esp + 8] ; Cur |
205 |
|
push ebx |
206 |
|
mov ebx, [esp+4+12] ; Ref |
207 |
|
mov edx, [esp+4+16] ; Stride |
208 |
|
pxor mm7, mm7 |
209 |
|
|
210 |
|
COPY_8_TO_16_SUB 0, 0 |
211 |
|
COPY_8_TO_16_SUB 1, 0 |
212 |
|
COPY_8_TO_16_SUB 2, 0 |
213 |
|
COPY_8_TO_16_SUB 3, 0 |
214 |
|
|
215 |
pop ebx |
pop ebx |
216 |
ret |
ret |
217 |
|
|
218 |
;=========================================================================== |
|
219 |
|
;----------------------------------------------------------------------------- |
220 |
; |
; |
221 |
; void transfer_8to16sub2_mmx(int16_t * const dct, |
; void transfer_8to16sub2_mmx(int16_t * const dct, |
222 |
; uint8_t * const cur, |
; uint8_t * const cur, |
224 |
; const uint8_t * ref2, |
; const uint8_t * ref2, |
225 |
; const uint32_t stride) |
; const uint32_t stride) |
226 |
; |
; |
227 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
228 |
|
|
229 |
%macro COPY_8_TO_16_SUB2_MMX 1 |
%macro COPY_8_TO_16_SUB2_MMX 1 |
230 |
movq mm0, [eax] ; cur |
movq mm0, [eax] ; cur |
231 |
movq mm2, [eax+edx] |
movq mm2, [eax+edx] |
232 |
|
|
233 |
; mm4 <- (ref1+ref2+1) / 2 |
; mm4 <- (ref1+ref2+1) / 2 |
234 |
|
;(a+b+1)/2 = (a|b)-((a^b)>>1) |
235 |
movq mm4, [ebx] ; ref1 |
movq mm4, [ebx] ; ref1 |
236 |
movq mm1, [esi] ; ref2 |
movq mm1, [esi] ; ref2 |
237 |
movq mm6, mm4 |
movq mm3, mm4 |
238 |
movq mm3, mm1 |
pxor mm3,mm1 |
239 |
punpcklbw mm4, mm7 |
por mm4,mm1 |
240 |
punpcklbw mm1, mm7 |
pandn mm3,mm6 |
241 |
punpckhbw mm6, mm7 |
psrlq mm3,1 |
242 |
punpckhbw mm3, mm7 |
psubb mm4,mm3 |
243 |
paddusw mm4, mm1 |
movq [eax],mm4 |
|
paddusw mm6, mm3 |
|
|
psrlw mm4,1 |
|
|
psrlw mm6,1 |
|
|
packuswb mm4, mm6 |
|
244 |
|
|
245 |
; mm5 <- (ref1+ref2+1) / 2 |
; mm5 <- (ref1+ref2+1) / 2 |
246 |
movq mm5, [ebx+edx] ; ref1 |
movq mm5, [ebx+edx] ; ref1 |
247 |
movq mm1, [esi+edx] ; ref2 |
movq mm1, [esi+edx] ; ref2 |
248 |
movq mm6, mm5 |
movq mm3, mm5 |
249 |
movq mm3, mm1 |
pxor mm3,mm1 |
250 |
punpcklbw mm5, mm7 |
por mm5,mm1 |
251 |
punpcklbw mm1, mm7 |
pandn mm3,mm6 |
252 |
punpckhbw mm6, mm7 |
psrlq mm3,1 |
253 |
punpckhbw mm3, mm7 |
psubb mm5,mm3 |
254 |
paddusw mm5, mm1 |
movq [eax+edx],mm5 |
|
paddusw mm6, mm3 |
|
|
lea esi,[esi+2*edx] |
|
|
psrlw mm5,1 |
|
|
psrlw mm6,1 |
|
|
packuswb mm5, mm6 |
|
|
|
|
255 |
|
|
256 |
|
psubsb mm0,mm4 |
257 |
|
psubsb mm2,mm5 |
258 |
|
lea esi, [esi+2*edx] |
259 |
movq mm1, mm0 |
movq mm1, mm0 |
260 |
movq mm3, mm2 |
movq mm3, mm2 |
261 |
|
lea eax, [eax+2*edx] |
262 |
punpcklbw mm0, mm7 |
punpcklbw mm0, mm7 |
263 |
punpcklbw mm2, mm7 |
punpcklbw mm2, mm7 |
264 |
|
lea ebx, [ebx+2*edx] |
265 |
punpckhbw mm1, mm7 |
punpckhbw mm1, mm7 |
266 |
punpckhbw mm3, mm7 |
punpckhbw mm3, mm7 |
267 |
|
|
|
movq mm6, mm4 |
|
|
punpcklbw mm4, mm7 |
|
|
punpckhbw mm6, mm7 |
|
|
psubsw mm0, mm4 |
|
|
psubsw mm1, mm6 |
|
|
movq mm6, mm5 |
|
|
punpcklbw mm5, mm7 |
|
|
punpckhbw mm6, mm7 |
|
|
psubsw mm2, mm5 |
|
|
lea eax,[eax+2*edx] |
|
|
psubsw mm3, mm6 |
|
|
lea ebx,[ebx+2*edx] |
|
|
|
|
268 |
movq [ecx+%1*32+ 0], mm0 ; dst |
movq [ecx+%1*32+ 0], mm0 ; dst |
269 |
movq [ecx+%1*32+ 8], mm1 |
movq [ecx+%1*32+ 8], mm1 |
270 |
movq [ecx+%1*32+16], mm2 |
movq [ecx+%1*32+16], mm2 |
271 |
movq [ecx+%1*32+24], mm3 |
movq [ecx+%1*32+24], mm3 |
272 |
%endmacro |
%endmacro |
273 |
|
|
274 |
align 16 |
ALIGN 16 |
275 |
transfer_8to16sub2_mmx: |
transfer_8to16sub2_mmx: |
276 |
mov ecx, [esp + 4] ; Dst |
mov ecx, [esp + 4] ; Dst |
277 |
mov eax, [esp + 8] ; Cur |
mov eax, [esp + 8] ; Cur |
280 |
push esi |
push esi |
281 |
mov esi, [esp+8+16] ; Ref2 |
mov esi, [esp+8+16] ; Ref2 |
282 |
mov edx, [esp+8+20] ; Stride |
mov edx, [esp+8+20] ; Stride |
283 |
|
pxor mm6,mm6 |
284 |
|
pcmpeqb mm5,mm5 |
285 |
pxor mm7, mm7 |
pxor mm7, mm7 |
286 |
|
psubb mm6, mm5; mm6=1 |
287 |
|
|
288 |
COPY_8_TO_16_SUB2_MMX 0 |
COPY_8_TO_16_SUB2_MMX 0 |
289 |
COPY_8_TO_16_SUB2_MMX 1 |
COPY_8_TO_16_SUB2_MMX 1 |
294 |
pop ebx |
pop ebx |
295 |
ret |
ret |
296 |
|
|
297 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
298 |
; |
; |
299 |
; void transfer_8to16sub2_xmm(int16_t * const dct, |
; void transfer_8to16sub2_xmm(int16_t * const dct, |
300 |
; uint8_t * const cur, |
; uint8_t * const cur, |
302 |
; const uint8_t * ref2, |
; const uint8_t * ref2, |
303 |
; const uint32_t stride) |
; const uint32_t stride) |
304 |
; |
; |
305 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
306 |
|
|
307 |
%macro COPY_8_TO_16_SUB2_SSE 1 |
%macro COPY_8_TO_16_SUB2_SSE 1 |
308 |
movq mm0, [eax] ; cur |
movq mm0, [eax] ; cur |
309 |
movq mm2, [eax+edx] |
movq mm2, [eax+edx] |
|
movq mm1, mm0 |
|
|
movq mm3, mm2 |
|
310 |
|
|
|
punpcklbw mm0, mm7 |
|
|
punpcklbw mm2, mm7 |
|
311 |
movq mm4, [ebx] ; ref1 |
movq mm4, [ebx] ; ref1 |
312 |
pavgb mm4, [esi] ; ref2 |
pavgb mm4, [esi] ; ref2 |
313 |
punpckhbw mm1, mm7 |
|
|
punpckhbw mm3, mm7 |
|
314 |
movq mm5, [ebx+edx] ; ref |
movq mm5, [ebx+edx] ; ref |
315 |
pavgb mm5, [esi+edx] ; ref2 |
pavgb mm5, [esi+edx] ; ref2 |
316 |
|
|
317 |
movq mm6, mm4 |
movq [eax], mm4 |
318 |
punpcklbw mm4, mm7 |
movq [eax+edx], mm5 |
319 |
punpckhbw mm6, mm7 |
|
320 |
psubsw mm0, mm4 |
psubsb mm0,mm4 |
321 |
psubsw mm1, mm6 |
psubsb mm2,mm5 |
322 |
lea esi,[esi+2*edx] |
lea esi,[esi+2*edx] |
323 |
movq mm6, mm5 |
movq mm1,mm0 |
324 |
punpcklbw mm5, mm7 |
movq mm3,mm2 |
|
punpckhbw mm6, mm7 |
|
|
psubsw mm2, mm5 |
|
325 |
lea eax,[eax+2*edx] |
lea eax,[eax+2*edx] |
326 |
psubsw mm3, mm6 |
punpcklbw mm0,mm7 |
327 |
|
punpcklbw mm2,mm7 |
328 |
lea ebx,[ebx+2*edx] |
lea ebx,[ebx+2*edx] |
329 |
|
punpckhbw mm1,mm7 |
330 |
|
punpckhbw mm3,mm7 |
331 |
|
|
332 |
movq [ecx+%1*32+ 0], mm0 ; dst |
movq [ecx+%1*32+ 0], mm0 ; dst |
333 |
movq [ecx+%1*32+ 8], mm1 |
movq [ecx+%1*32+ 8], mm1 |
335 |
movq [ecx+%1*32+24], mm3 |
movq [ecx+%1*32+24], mm3 |
336 |
%endmacro |
%endmacro |
337 |
|
|
338 |
align 16 |
ALIGN 16 |
339 |
transfer_8to16sub2_xmm: |
transfer_8to16sub2_xmm: |
340 |
mov ecx, [esp + 4] ; Dst |
mov ecx, [esp + 4] ; Dst |
341 |
mov eax, [esp + 8] ; Cur |
mov eax, [esp + 8] ; Cur |
355 |
pop ebx |
pop ebx |
356 |
ret |
ret |
357 |
|
|
358 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
359 |
; |
; |
360 |
; void transfer_16to8add_mmx(uint8_t * const dst, |
; void transfer_16to8add_mmx(uint8_t * const dst, |
361 |
; const int16_t * const src, |
; const int16_t * const src, |
362 |
; uint32_t stride); |
; uint32_t stride); |
363 |
; |
; |
364 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
365 |
|
|
366 |
%macro COPY_16_TO_8_ADD 1 |
%macro COPY_16_TO_8_ADD 1 |
367 |
movq mm0, [ecx] |
movq mm0, [eax+%1*32+ 0] ;src |
368 |
movq mm2, [ecx+edx] |
packuswb mm0,[eax+%1*32+8] |
369 |
movq mm1, mm0 |
movq mm1, [eax+%1*32+ 16] |
370 |
movq mm3, mm2 |
packuswb mm1,[eax+%1*32+24] |
371 |
punpcklbw mm0, mm7 |
|
372 |
punpcklbw mm2, mm7 |
paddusb mm0,[ecx] |
373 |
punpckhbw mm1, mm7 |
paddusb mm1,[ecx+edx] |
|
punpckhbw mm3, mm7 |
|
|
paddsw mm0, [eax+%1*32+ 0] |
|
|
paddsw mm1, [eax+%1*32+ 8] |
|
|
paddsw mm2, [eax+%1*32+16] |
|
|
paddsw mm3, [eax+%1*32+24] |
|
|
packuswb mm0, mm1 |
|
374 |
movq [ecx], mm0 |
movq [ecx], mm0 |
375 |
packuswb mm2, mm3 |
movq [ecx+edx],mm1 |
376 |
movq [ecx+edx], mm2 |
|
377 |
%endmacro |
%endmacro |
378 |
|
|
379 |
|
|
380 |
align 16 |
ALIGN 16 |
381 |
transfer_16to8add_mmx: |
transfer_16to8add_mmx: |
382 |
mov ecx, [esp+ 4] ; Dst |
mov ecx, [esp+ 4] ; Dst |
383 |
mov eax, [esp+ 8] ; Src |
mov eax, [esp+ 8] ; Src |
384 |
mov edx, [esp+12] ; Stride |
mov edx, [esp+12] ; Stride |
385 |
pxor mm7, mm7 |
; pxor mm7, mm7 |
386 |
|
|
387 |
COPY_16_TO_8_ADD 0 |
COPY_16_TO_8_ADD 0 |
388 |
lea ecx,[ecx+2*edx] |
lea ecx,[ecx+2*edx] |
393 |
COPY_16_TO_8_ADD 3 |
COPY_16_TO_8_ADD 3 |
394 |
ret |
ret |
395 |
|
|
396 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
397 |
; |
; |
398 |
; void transfer8x8_copy_mmx(uint8_t * const dst, |
; void transfer8x8_copy_mmx(uint8_t * const dst, |
399 |
; const uint8_t * const src, |
; const uint8_t * const src, |
400 |
; const uint32_t stride); |
; const uint32_t stride); |
401 |
; |
; |
402 |
; |
; |
403 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
404 |
|
|
405 |
%macro COPY_8_TO_8 0 |
ALIGN 16 |
|
movq mm0, [eax] |
|
|
movq mm1, [eax+edx] |
|
|
movq [ecx], mm0 |
|
|
lea eax,[eax+2*edx] |
|
|
movq [ecx+edx], mm1 |
|
|
%endmacro |
|
|
|
|
|
align 16 |
|
406 |
transfer8x8_copy_mmx: |
transfer8x8_copy_mmx: |
407 |
mov ecx, [esp+ 4] ; Dst |
|
408 |
mov eax, [esp+ 8] ; Src |
mov eax, [esp+ 8] ; Src |
409 |
|
mov ecx, [esp+ 4] ; Dst |
410 |
mov edx, [esp+12] ; Stride |
mov edx, [esp+12] ; Stride |
411 |
|
|
412 |
COPY_8_TO_8 |
movq mm0,[eax] |
413 |
lea ecx,[ecx+2*edx] |
lea eax,[eax+edx] |
414 |
COPY_8_TO_8 |
movq mm1,[eax] |
415 |
lea ecx,[ecx+2*edx] |
lea eax,[eax+edx] |
416 |
COPY_8_TO_8 |
movq mm2,[eax] |
417 |
lea ecx,[ecx+2*edx] |
lea eax,[eax+edx] |
418 |
COPY_8_TO_8 |
movq mm3,[eax] |
419 |
|
lea eax,[eax+edx] |
420 |
|
movq mm4,[eax] |
421 |
|
lea eax,[eax+edx] |
422 |
|
movq mm5,[eax] |
423 |
|
lea eax,[eax+edx] |
424 |
|
movq mm6,[eax] |
425 |
|
lea eax,[eax+edx] |
426 |
|
movq mm7,[eax] |
427 |
|
|
428 |
|
movq [ecx],mm0 |
429 |
|
lea ecx,[ecx+edx] |
430 |
|
movq [ecx],mm1 |
431 |
|
lea ecx,[ecx+edx] |
432 |
|
movq [ecx],mm2 |
433 |
|
lea ecx,[ecx+edx] |
434 |
|
movq [ecx],mm3 |
435 |
|
lea ecx,[ecx+edx] |
436 |
|
movq [ecx],mm4 |
437 |
|
lea ecx,[ecx+edx] |
438 |
|
movq [ecx],mm5 |
439 |
|
lea ecx,[ecx+edx] |
440 |
|
movq [ecx],mm6 |
441 |
|
lea ecx,[ecx+edx] |
442 |
|
movq [ecx],mm7 |
443 |
|
|
444 |
ret |
ret |