1 |
;/************************************************************************** |
;/**************************************************************************** |
2 |
; * |
; * |
3 |
; * XVID MPEG-4 VIDEO CODEC |
; * XVID MPEG-4 VIDEO CODEC |
4 |
; * mmx 8bit<->16bit transfers |
; * - 8<->16 bit transfer functions - |
5 |
; * |
; * |
6 |
; * This program is an implementation of a part of one or more MPEG-4 |
; * Copyright (C) 2001 Peter Ross <pross@xvid.org> |
7 |
; * Video tools as specified in ISO/IEC 14496-2 standard. Those intending |
; * 2001 Michael Militzer <isibaar@xvid.org> |
8 |
; * to use this software module in hardware or software products are |
; * 2002 Pascal Massimino <skal@planet-d.net> |
|
; * advised that its use may infringe existing patents or copyrights, and |
|
|
; * any such use would be at such party's own risk. The original |
|
|
; * developer of this software module and his/her company, and subsequent |
|
|
; * editors and their companies, will have no liability for use of this |
|
|
; * software or modifications or derivatives thereof. |
|
9 |
; * |
; * |
10 |
; * This program is free software; you can redistribute it and/or modify |
; * This program is free software; you can redistribute it and/or modify |
11 |
; * it under the terms of the GNU General Public License as published by |
; * it under the terms of the GNU General Public License as published by |
19 |
; * |
; * |
20 |
; * You should have received a copy of the GNU General Public License |
; * You should have received a copy of the GNU General Public License |
21 |
; * along with this program; if not, write to the Free Software |
; * along with this program; if not, write to the Free Software |
22 |
; * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
; * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
23 |
; * |
; * |
24 |
; *************************************************************************/ |
; * $Id$ |
|
|
|
|
;/************************************************************************** |
|
|
; * |
|
|
; * History: |
|
|
; * |
|
|
; * 04.06.2002 speed enhancement (unroll+overlap). -Skal- |
|
|
; * + added transfer_8to16sub2_mmx/xmm |
|
|
; * 07.01.2002 merge functions from compensate_mmx; rename functions |
|
|
; * 07.11.2001 initial version; (c)2001 peter ross <pross@xvid.org> |
|
25 |
; * |
; * |
26 |
; *************************************************************************/ |
; ***************************************************************************/ |
27 |
|
|
28 |
|
BITS 32 |
|
bits 32 |
|
29 |
|
|
30 |
%macro cglobal 1 |
%macro cglobal 1 |
31 |
%ifdef PREFIX |
%ifdef PREFIX |
32 |
|
%ifdef MARK_FUNCS |
33 |
|
global _%1:function |
34 |
|
%define %1 _%1:function |
35 |
|
%else |
36 |
global _%1 |
global _%1 |
37 |
%define %1 _%1 |
%define %1 _%1 |
38 |
|
%endif |
39 |
|
%else |
40 |
|
%ifdef MARK_FUNCS |
41 |
|
global %1:function |
42 |
%else |
%else |
43 |
global %1 |
global %1 |
44 |
%endif |
%endif |
45 |
|
%endif |
46 |
%endmacro |
%endmacro |
47 |
|
|
48 |
|
;============================================================================= |
49 |
|
; Read only data |
50 |
|
;============================================================================= |
51 |
|
|
52 |
section .text |
%ifdef FORMAT_COFF |
53 |
|
SECTION .rodata |
54 |
|
%else |
55 |
|
SECTION .rodata align=16 |
56 |
|
%endif |
57 |
|
|
58 |
|
ALIGN 16 |
59 |
|
mmx_one: |
60 |
|
dw 1, 1, 1, 1 |
61 |
|
|
62 |
|
;============================================================================= |
63 |
|
; Code |
64 |
|
;============================================================================= |
65 |
|
|
66 |
|
SECTION .text |
67 |
|
|
68 |
cglobal transfer_8to16copy_mmx |
cglobal transfer_8to16copy_mmx |
69 |
cglobal transfer_16to8copy_mmx |
cglobal transfer_16to8copy_mmx |
70 |
cglobal transfer_8to16sub_mmx |
cglobal transfer_8to16sub_mmx |
71 |
|
cglobal transfer_8to16subro_mmx |
72 |
cglobal transfer_8to16sub2_mmx |
cglobal transfer_8to16sub2_mmx |
73 |
cglobal transfer_8to16sub2_xmm |
cglobal transfer_8to16sub2_xmm |
74 |
cglobal transfer_16to8add_mmx |
cglobal transfer_16to8add_mmx |
75 |
cglobal transfer8x8_copy_mmx |
cglobal transfer8x8_copy_mmx |
76 |
|
|
77 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
78 |
; |
; |
79 |
; void transfer_8to16copy_mmx(int16_t * const dst, |
; void transfer_8to16copy_mmx(int16_t * const dst, |
80 |
; const uint8_t * const src, |
; const uint8_t * const src, |
81 |
; uint32_t stride); |
; uint32_t stride); |
82 |
; |
; |
83 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
84 |
|
|
85 |
%macro COPY_8_TO_16 1 |
%macro COPY_8_TO_16 1 |
86 |
movq mm0, [eax] |
movq mm0, [eax] |
98 |
movq [ecx+%1*32+24], mm3 |
movq [ecx+%1*32+24], mm3 |
99 |
%endmacro |
%endmacro |
100 |
|
|
101 |
align 16 |
ALIGN 16 |
102 |
transfer_8to16copy_mmx: |
transfer_8to16copy_mmx: |
103 |
|
|
104 |
mov ecx, [esp+ 4] ; Dst |
mov ecx, [esp+ 4] ; Dst |
112 |
COPY_8_TO_16 3 |
COPY_8_TO_16 3 |
113 |
ret |
ret |
114 |
|
|
115 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
116 |
; |
; |
117 |
; void transfer_16to8copy_mmx(uint8_t * const dst, |
; void transfer_16to8copy_mmx(uint8_t * const dst, |
118 |
; const int16_t * const src, |
; const int16_t * const src, |
119 |
; uint32_t stride); |
; uint32_t stride); |
120 |
; |
; |
121 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
122 |
|
|
123 |
%macro COPY_16_TO_8 1 |
%macro COPY_16_TO_8 1 |
124 |
movq mm0, [eax+%1*32] |
movq mm0, [eax+%1*32] |
131 |
movq [ecx+edx], mm2 |
movq [ecx+edx], mm2 |
132 |
%endmacro |
%endmacro |
133 |
|
|
134 |
align 16 |
ALIGN 16 |
135 |
transfer_16to8copy_mmx: |
transfer_16to8copy_mmx: |
136 |
|
|
137 |
mov ecx, [esp+ 4] ; Dst |
mov ecx, [esp+ 4] ; Dst |
147 |
COPY_16_TO_8 3 |
COPY_16_TO_8 3 |
148 |
ret |
ret |
149 |
|
|
150 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
151 |
; |
; |
152 |
; void transfer_8to16sub_mmx(int16_t * const dct, |
; void transfer_8to16sub_mmx(int16_t * const dct, |
153 |
; uint8_t * const cur, |
; uint8_t * const cur, |
154 |
; const uint8_t * const ref, |
; const uint8_t * const ref, |
155 |
; const uint32_t stride); |
; const uint32_t stride); |
156 |
; |
; |
157 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
|
;/************************************************************************** |
|
|
; * |
|
|
; * History: |
|
|
; * |
|
|
; * 27.12.2001 renamed from 'compensate' to 'transfer_8to16sub' |
|
|
; * 02.12.2001 loop unrolled, code runs 10% faster now (Isibaar) |
|
|
; * 30.11.2001 16 pixels are processed per iteration (Isibaar) |
|
|
; * 30.11.2001 .text missing |
|
|
; * 06.11.2001 inital version; (c)2001 peter ross <pross@xvid.org> |
|
|
; * |
|
|
; *************************************************************************/ |
|
158 |
|
|
159 |
%macro COPY_8_TO_16_SUB 1 |
; when second argument == 1, reference (ebx) block is to current (eax) |
160 |
|
%macro COPY_8_TO_16_SUB 2 |
161 |
movq mm0, [eax] ; cur |
movq mm0, [eax] ; cur |
162 |
movq mm2, [eax+edx] |
movq mm2, [eax+edx] |
163 |
movq mm1, mm0 |
movq mm1, mm0 |
171 |
movq mm5, [ebx+edx] ; ref |
movq mm5, [ebx+edx] ; ref |
172 |
|
|
173 |
movq mm6, mm4 |
movq mm6, mm4 |
174 |
|
%if %2 == 1 |
175 |
movq [eax], mm4 |
movq [eax], mm4 |
176 |
movq [eax+edx], mm5 |
movq [eax+edx], mm5 |
177 |
|
%endif |
178 |
punpcklbw mm4, mm7 |
punpcklbw mm4, mm7 |
179 |
punpckhbw mm6, mm7 |
punpckhbw mm6, mm7 |
180 |
psubsw mm0, mm4 |
psubsw mm0, mm4 |
193 |
movq [ecx+%1*32+24], mm3 |
movq [ecx+%1*32+24], mm3 |
194 |
%endmacro |
%endmacro |
195 |
|
|
196 |
align 16 |
ALIGN 16 |
197 |
transfer_8to16sub_mmx: |
transfer_8to16sub_mmx: |
198 |
mov ecx, [esp + 4] ; Dst |
mov ecx, [esp + 4] ; Dst |
199 |
mov eax, [esp + 8] ; Cur |
mov eax, [esp + 8] ; Cur |
202 |
mov edx, [esp+4+16] ; Stride |
mov edx, [esp+4+16] ; Stride |
203 |
pxor mm7, mm7 |
pxor mm7, mm7 |
204 |
|
|
205 |
COPY_8_TO_16_SUB 0 |
COPY_8_TO_16_SUB 0, 1 |
206 |
COPY_8_TO_16_SUB 1 |
COPY_8_TO_16_SUB 1, 1 |
207 |
COPY_8_TO_16_SUB 2 |
COPY_8_TO_16_SUB 2, 1 |
208 |
COPY_8_TO_16_SUB 3 |
COPY_8_TO_16_SUB 3, 1 |
209 |
|
|
210 |
|
pop ebx |
211 |
|
ret |
212 |
|
|
213 |
|
|
214 |
|
ALIGN 16 |
215 |
|
transfer_8to16subro_mmx: |
216 |
|
mov ecx, [esp + 4] ; Dst |
217 |
|
mov eax, [esp + 8] ; Cur |
218 |
|
push ebx |
219 |
|
mov ebx, [esp+4+12] ; Ref |
220 |
|
mov edx, [esp+4+16] ; Stride |
221 |
|
pxor mm7, mm7 |
222 |
|
|
223 |
|
COPY_8_TO_16_SUB 0, 0 |
224 |
|
COPY_8_TO_16_SUB 1, 0 |
225 |
|
COPY_8_TO_16_SUB 2, 0 |
226 |
|
COPY_8_TO_16_SUB 3, 0 |
227 |
|
|
228 |
pop ebx |
pop ebx |
229 |
ret |
ret |
230 |
|
|
231 |
;=========================================================================== |
|
232 |
|
;----------------------------------------------------------------------------- |
233 |
; |
; |
234 |
; void transfer_8to16sub2_mmx(int16_t * const dct, |
; void transfer_8to16sub2_mmx(int16_t * const dct, |
235 |
; uint8_t * const cur, |
; uint8_t * const cur, |
237 |
; const uint8_t * ref2, |
; const uint8_t * ref2, |
238 |
; const uint32_t stride) |
; const uint32_t stride) |
239 |
; |
; |
240 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
241 |
|
|
242 |
%macro COPY_8_TO_16_SUB2_MMX 1 |
%macro COPY_8_TO_16_SUB2_MMX 1 |
243 |
movq mm0, [eax] ; cur |
movq mm0, [eax] ; cur |
254 |
punpckhbw mm3, mm7 |
punpckhbw mm3, mm7 |
255 |
paddusw mm4, mm1 |
paddusw mm4, mm1 |
256 |
paddusw mm6, mm3 |
paddusw mm6, mm3 |
257 |
|
paddusw mm4, [mmx_one] |
258 |
|
paddusw mm6, [mmx_one] |
259 |
psrlw mm4,1 |
psrlw mm4,1 |
260 |
psrlw mm6,1 |
psrlw mm6,1 |
261 |
packuswb mm4, mm6 |
packuswb mm4, mm6 |
262 |
|
movq [eax], mm4 |
263 |
|
|
264 |
; mm5 <- (ref1+ref2+1) / 2 |
; mm5 <- (ref1+ref2+1) / 2 |
265 |
movq mm5, [ebx+edx] ; ref1 |
movq mm5, [ebx+edx] ; ref1 |
272 |
punpckhbw mm3, mm7 |
punpckhbw mm3, mm7 |
273 |
paddusw mm5, mm1 |
paddusw mm5, mm1 |
274 |
paddusw mm6, mm3 |
paddusw mm6, mm3 |
275 |
|
paddusw mm5, [mmx_one] |
276 |
|
paddusw mm6, [mmx_one] |
277 |
lea esi,[esi+2*edx] |
lea esi,[esi+2*edx] |
278 |
psrlw mm5,1 |
psrlw mm5,1 |
279 |
psrlw mm6,1 |
psrlw mm6,1 |
280 |
packuswb mm5, mm6 |
packuswb mm5, mm6 |
281 |
|
movq [eax+edx], mm5 |
282 |
|
|
283 |
movq mm1, mm0 |
movq mm1, mm0 |
284 |
movq mm3, mm2 |
movq mm3, mm2 |
306 |
movq [ecx+%1*32+24], mm3 |
movq [ecx+%1*32+24], mm3 |
307 |
%endmacro |
%endmacro |
308 |
|
|
309 |
align 16 |
ALIGN 16 |
310 |
transfer_8to16sub2_mmx: |
transfer_8to16sub2_mmx: |
311 |
mov ecx, [esp + 4] ; Dst |
mov ecx, [esp + 4] ; Dst |
312 |
mov eax, [esp + 8] ; Cur |
mov eax, [esp + 8] ; Cur |
326 |
pop ebx |
pop ebx |
327 |
ret |
ret |
328 |
|
|
329 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
330 |
; |
; |
331 |
; void transfer_8to16sub2_xmm(int16_t * const dct, |
; void transfer_8to16sub2_xmm(int16_t * const dct, |
332 |
; uint8_t * const cur, |
; uint8_t * const cur, |
334 |
; const uint8_t * ref2, |
; const uint8_t * ref2, |
335 |
; const uint32_t stride) |
; const uint32_t stride) |
336 |
; |
; |
337 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
338 |
|
|
339 |
%macro COPY_8_TO_16_SUB2_SSE 1 |
%macro COPY_8_TO_16_SUB2_SSE 1 |
340 |
movq mm0, [eax] ; cur |
movq mm0, [eax] ; cur |
346 |
punpcklbw mm2, mm7 |
punpcklbw mm2, mm7 |
347 |
movq mm4, [ebx] ; ref1 |
movq mm4, [ebx] ; ref1 |
348 |
pavgb mm4, [esi] ; ref2 |
pavgb mm4, [esi] ; ref2 |
349 |
|
movq [eax], mm4 |
350 |
punpckhbw mm1, mm7 |
punpckhbw mm1, mm7 |
351 |
punpckhbw mm3, mm7 |
punpckhbw mm3, mm7 |
352 |
movq mm5, [ebx+edx] ; ref |
movq mm5, [ebx+edx] ; ref |
353 |
pavgb mm5, [esi+edx] ; ref2 |
pavgb mm5, [esi+edx] ; ref2 |
354 |
|
movq [eax+edx], mm5 |
355 |
|
|
356 |
movq mm6, mm4 |
movq mm6, mm4 |
357 |
punpcklbw mm4, mm7 |
punpcklbw mm4, mm7 |
373 |
movq [ecx+%1*32+24], mm3 |
movq [ecx+%1*32+24], mm3 |
374 |
%endmacro |
%endmacro |
375 |
|
|
376 |
align 16 |
ALIGN 16 |
377 |
transfer_8to16sub2_xmm: |
transfer_8to16sub2_xmm: |
378 |
mov ecx, [esp + 4] ; Dst |
mov ecx, [esp + 4] ; Dst |
379 |
mov eax, [esp + 8] ; Cur |
mov eax, [esp + 8] ; Cur |
393 |
pop ebx |
pop ebx |
394 |
ret |
ret |
395 |
|
|
396 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
397 |
; |
; |
398 |
; void transfer_16to8add_mmx(uint8_t * const dst, |
; void transfer_16to8add_mmx(uint8_t * const dst, |
399 |
; const int16_t * const src, |
; const int16_t * const src, |
400 |
; uint32_t stride); |
; uint32_t stride); |
401 |
; |
; |
402 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
403 |
|
|
404 |
%macro COPY_16_TO_8_ADD 1 |
%macro COPY_16_TO_8_ADD 1 |
405 |
movq mm0, [ecx] |
movq mm0, [ecx] |
421 |
%endmacro |
%endmacro |
422 |
|
|
423 |
|
|
424 |
align 16 |
ALIGN 16 |
425 |
transfer_16to8add_mmx: |
transfer_16to8add_mmx: |
426 |
mov ecx, [esp+ 4] ; Dst |
mov ecx, [esp+ 4] ; Dst |
427 |
mov eax, [esp+ 8] ; Src |
mov eax, [esp+ 8] ; Src |
437 |
COPY_16_TO_8_ADD 3 |
COPY_16_TO_8_ADD 3 |
438 |
ret |
ret |
439 |
|
|
440 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
441 |
; |
; |
442 |
; void transfer8x8_copy_mmx(uint8_t * const dst, |
; void transfer8x8_copy_mmx(uint8_t * const dst, |
443 |
; const uint8_t * const src, |
; const uint8_t * const src, |
444 |
; const uint32_t stride); |
; const uint32_t stride); |
445 |
; |
; |
446 |
; |
; |
447 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
448 |
|
|
449 |
%macro COPY_8_TO_8 0 |
%macro COPY_8_TO_8 0 |
450 |
movq mm0, [eax] |
movq mm0, [eax] |
454 |
movq [ecx+edx], mm1 |
movq [ecx+edx], mm1 |
455 |
%endmacro |
%endmacro |
456 |
|
|
457 |
align 16 |
ALIGN 16 |
458 |
transfer8x8_copy_mmx: |
transfer8x8_copy_mmx: |
459 |
mov ecx, [esp+ 4] ; Dst |
mov ecx, [esp+ 4] ; Dst |
460 |
mov eax, [esp+ 8] ; Src |
mov eax, [esp+ 8] ; Src |