1 |
;/************************************************************************** |
;/**************************************************************************** |
2 |
; * |
; * |
3 |
; * XVID MPEG-4 VIDEO CODEC |
; * XVID MPEG-4 VIDEO CODEC |
4 |
; * mmx 8bit<->16bit transfers |
; * - 8<->16 bit transfer functions - |
5 |
; * |
; * |
6 |
; * This program is an implementation of a part of one or more MPEG-4 |
; * Copyright (C) 2001 Peter Ross <pross@xvid.org> |
7 |
; * Video tools as specified in ISO/IEC 14496-2 standard. Those intending |
; * 2001-2008 Michael Militzer <michael@xvid.org> |
8 |
; * to use this software module in hardware or software products are |
; * 2002 Pascal Massimino <skal@planet-d.net> |
|
; * advised that its use may infringe existing patents or copyrights, and |
|
|
; * any such use would be at such party's own risk. The original |
|
|
; * developer of this software module and his/her company, and subsequent |
|
|
; * editors and their companies, will have no liability for use of this |
|
|
; * software or modifications or derivatives thereof. |
|
9 |
; * |
; * |
10 |
; * This program is free software; you can redistribute it and/or modify |
; * This program is free software; you can redistribute it and/or modify |
11 |
; * it under the terms of the GNU General Public License as published by |
; * it under the terms of the GNU General Public License as published by |
19 |
; * |
; * |
20 |
; * You should have received a copy of the GNU General Public License |
; * You should have received a copy of the GNU General Public License |
21 |
; * along with this program; if not, write to the Free Software |
; * along with this program; if not, write to the Free Software |
22 |
; * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
; * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
23 |
; * |
; * |
24 |
; *************************************************************************/ |
; * $Id$ |
|
|
|
|
;/************************************************************************** |
|
|
; * |
|
|
; * History: |
|
|
; * |
|
|
; * 04.06.2002 speed enhancement (unroll+overlap). -Skal- |
|
|
; * + added transfer_8to16sub2_mmx/xmm |
|
|
; * 07.01.2002 merge functions from compensate_mmx; rename functions |
|
|
; * 07.11.2001 initial version; (c)2001 peter ross <pross@cs.rmit.edu.au> |
|
25 |
; * |
; * |
26 |
; *************************************************************************/ |
; ***************************************************************************/ |
27 |
|
|
28 |
|
%include "nasm.inc" |
29 |
|
|
30 |
bits 32 |
;============================================================================= |
31 |
|
; Read only data |
32 |
|
;============================================================================= |
33 |
|
|
34 |
%macro cglobal 1 |
DATA |
|
%ifdef PREFIX |
|
|
global _%1 |
|
|
%define %1 _%1 |
|
|
%else |
|
|
global %1 |
|
|
%endif |
|
|
%endmacro |
|
35 |
|
|
36 |
|
ALIGN SECTION_ALIGN |
37 |
|
mmx_one: |
38 |
|
dw 1, 1, 1, 1 |
39 |
|
|
40 |
section .text |
;============================================================================= |
41 |
|
; Code |
42 |
|
;============================================================================= |
43 |
|
|
44 |
|
SECTION .rotext align=SECTION_ALIGN |
45 |
|
|
|
cglobal transfer_8to16copy_mmx |
|
46 |
cglobal transfer_8to16copy_mmx |
cglobal transfer_8to16copy_mmx |
47 |
cglobal transfer_16to8copy_mmx |
cglobal transfer_16to8copy_mmx |
48 |
cglobal transfer_8to16sub_mmx |
cglobal transfer_8to16sub_mmx |
49 |
|
cglobal transfer_8to16subro_mmx |
50 |
cglobal transfer_8to16sub2_mmx |
cglobal transfer_8to16sub2_mmx |
51 |
cglobal transfer_8to16sub2_xmm |
cglobal transfer_8to16sub2_xmm |
52 |
|
cglobal transfer_8to16sub2ro_xmm |
53 |
cglobal transfer_16to8add_mmx |
cglobal transfer_16to8add_mmx |
54 |
cglobal transfer8x8_copy_mmx |
cglobal transfer8x8_copy_mmx |
55 |
|
cglobal transfer8x4_copy_mmx |
56 |
|
|
57 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
58 |
; |
; |
59 |
; void transfer_8to16copy_mmx(int16_t * const dst, |
; void transfer_8to16copy_mmx(int16_t * const dst, |
60 |
; const uint8_t * const src, |
; const uint8_t * const src, |
61 |
; uint32_t stride); |
; uint32_t stride); |
62 |
; |
; |
63 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
64 |
|
|
65 |
%macro COPY_8_TO_16 1 |
%macro COPY_8_TO_16 1 |
66 |
movq mm0, [eax] |
movq mm0, [_EAX] |
67 |
movq mm1, [eax+edx] |
movq mm1, [_EAX+TMP1] |
68 |
movq mm2, mm0 |
movq mm2, mm0 |
69 |
movq mm3, mm1 |
movq mm3, mm1 |
70 |
punpcklbw mm0, mm7 |
punpcklbw mm0, mm7 |
71 |
movq [ecx+%1*32], mm0 |
movq [TMP0+%1*32], mm0 |
72 |
punpcklbw mm1, mm7 |
punpcklbw mm1, mm7 |
73 |
movq [ecx+%1*32+16], mm1 |
movq [TMP0+%1*32+16], mm1 |
74 |
punpckhbw mm2, mm7 |
punpckhbw mm2, mm7 |
75 |
punpckhbw mm3, mm7 |
punpckhbw mm3, mm7 |
76 |
lea eax,[eax+2*edx] |
lea _EAX, [_EAX+2*TMP1] |
77 |
movq [ecx+%1*32+8], mm2 |
movq [TMP0+%1*32+8], mm2 |
78 |
movq [ecx+%1*32+24], mm3 |
movq [TMP0+%1*32+24], mm3 |
79 |
%endmacro |
%endmacro |
80 |
|
|
81 |
align 16 |
ALIGN SECTION_ALIGN |
82 |
transfer_8to16copy_mmx: |
transfer_8to16copy_mmx: |
83 |
|
|
84 |
mov ecx, [esp+ 4] ; Dst |
mov TMP0, prm1 ; Dst |
85 |
mov eax, [esp+ 8] ; Src |
mov _EAX, prm2 ; Src |
86 |
mov edx, [esp+12] ; Stride |
mov TMP1, prm3 ; Stride |
87 |
pxor mm7,mm7 |
pxor mm7,mm7 |
88 |
|
|
89 |
COPY_8_TO_16 0 |
COPY_8_TO_16 0 |
91 |
COPY_8_TO_16 2 |
COPY_8_TO_16 2 |
92 |
COPY_8_TO_16 3 |
COPY_8_TO_16 3 |
93 |
ret |
ret |
94 |
|
ENDFUNC |
95 |
|
|
96 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
97 |
; |
; |
98 |
; void transfer_16to8copy_mmx(uint8_t * const dst, |
; void transfer_16to8copy_mmx(uint8_t * const dst, |
99 |
; const int16_t * const src, |
; const int16_t * const src, |
100 |
; uint32_t stride); |
; uint32_t stride); |
101 |
; |
; |
102 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
103 |
|
|
104 |
%macro COPY_16_TO_8 1 |
%macro COPY_16_TO_8 1 |
105 |
movq mm0, [eax+%1*32] |
movq mm0, [_EAX+%1*32] |
106 |
movq mm1, [eax+%1*32+8] |
movq mm1, [_EAX+%1*32+8] |
107 |
packuswb mm0, mm1 |
packuswb mm0, mm1 |
108 |
movq [ecx], mm0 |
movq [TMP0], mm0 |
109 |
movq mm2, [eax+%1*32+16] |
movq mm2, [_EAX+%1*32+16] |
110 |
movq mm3, [eax+%1*32+24] |
movq mm3, [_EAX+%1*32+24] |
111 |
packuswb mm2, mm3 |
packuswb mm2, mm3 |
112 |
movq [ecx+edx], mm2 |
movq [TMP0+TMP1], mm2 |
113 |
%endmacro |
%endmacro |
114 |
|
|
115 |
align 16 |
ALIGN SECTION_ALIGN |
116 |
transfer_16to8copy_mmx: |
transfer_16to8copy_mmx: |
117 |
|
|
118 |
mov ecx, [esp+ 4] ; Dst |
mov TMP0, prm1 ; Dst |
119 |
mov eax, [esp+ 8] ; Src |
mov _EAX, prm2 ; Src |
120 |
mov edx, [esp+12] ; Stride |
mov TMP1, prm3 ; Stride |
121 |
|
|
122 |
COPY_16_TO_8 0 |
COPY_16_TO_8 0 |
123 |
lea ecx,[ecx+2*edx] |
lea TMP0,[TMP0+2*TMP1] |
124 |
COPY_16_TO_8 1 |
COPY_16_TO_8 1 |
125 |
lea ecx,[ecx+2*edx] |
lea TMP0,[TMP0+2*TMP1] |
126 |
COPY_16_TO_8 2 |
COPY_16_TO_8 2 |
127 |
lea ecx,[ecx+2*edx] |
lea TMP0,[TMP0+2*TMP1] |
128 |
COPY_16_TO_8 3 |
COPY_16_TO_8 3 |
129 |
ret |
ret |
130 |
|
ENDFUNC |
131 |
|
|
132 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
133 |
; |
; |
134 |
; void transfer_8to16sub_mmx(int16_t * const dct, |
; void transfer_8to16sub_mmx(int16_t * const dct, |
135 |
; uint8_t * const cur, |
; uint8_t * const cur, |
136 |
; const uint8_t * const ref, |
; const uint8_t * const ref, |
137 |
; const uint32_t stride); |
; const uint32_t stride); |
138 |
; |
; |
139 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
|
;/************************************************************************** |
|
|
; * |
|
|
; * History: |
|
|
; * |
|
|
; * 27.12.2001 renamed from 'compensate' to 'transfer_8to16sub' |
|
|
; * 02.12.2001 loop unrolled, code runs 10% faster now (Isibaar) |
|
|
; * 30.11.2001 16 pixels are processed per iteration (Isibaar) |
|
|
; * 30.11.2001 .text missing |
|
|
; * 06.11.2001 inital version; (c)2001 peter ross <pross@cs.rmit.edu.au> |
|
|
; * |
|
|
; *************************************************************************/ |
|
140 |
|
|
141 |
%macro COPY_8_TO_16_SUB 1 |
; when second argument == 1, reference (ebx) block is to current (_EAX) |
142 |
movq mm0, [eax] ; cur |
%macro COPY_8_TO_16_SUB 2 |
143 |
movq mm2, [eax+edx] |
movq mm0, [_EAX] ; cur |
144 |
|
movq mm2, [_EAX+TMP1] |
145 |
movq mm1, mm0 |
movq mm1, mm0 |
146 |
movq mm3, mm2 |
movq mm3, mm2 |
147 |
|
|
148 |
punpcklbw mm0, mm7 |
punpcklbw mm0, mm7 |
149 |
punpcklbw mm2, mm7 |
punpcklbw mm2, mm7 |
150 |
movq mm4, [ebx] ; ref |
movq mm4, [_EBX] ; ref |
151 |
punpckhbw mm1, mm7 |
punpckhbw mm1, mm7 |
152 |
punpckhbw mm3, mm7 |
punpckhbw mm3, mm7 |
153 |
movq mm5, [ebx+edx] ; ref |
movq mm5, [_EBX+TMP1] ; ref |
154 |
|
|
155 |
movq mm6, mm4 |
movq mm6, mm4 |
156 |
movq [eax], mm4 |
%if %2 == 1 |
157 |
movq [eax+edx], mm5 |
movq [_EAX], mm4 |
158 |
|
movq [_EAX+TMP1], mm5 |
159 |
|
%endif |
160 |
punpcklbw mm4, mm7 |
punpcklbw mm4, mm7 |
161 |
punpckhbw mm6, mm7 |
punpckhbw mm6, mm7 |
162 |
psubsw mm0, mm4 |
psubsw mm0, mm4 |
165 |
punpcklbw mm5, mm7 |
punpcklbw mm5, mm7 |
166 |
punpckhbw mm6, mm7 |
punpckhbw mm6, mm7 |
167 |
psubsw mm2, mm5 |
psubsw mm2, mm5 |
168 |
lea eax,[eax+2*edx] |
lea _EAX, [_EAX+2*TMP1] |
169 |
psubsw mm3, mm6 |
psubsw mm3, mm6 |
170 |
lea ebx,[ebx+2*edx] |
lea _EBX,[_EBX+2*TMP1] |
171 |
|
|
172 |
movq [ecx+%1*32+ 0], mm0 ; dst |
movq [TMP0+%1*32+ 0], mm0 ; dst |
173 |
movq [ecx+%1*32+ 8], mm1 |
movq [TMP0+%1*32+ 8], mm1 |
174 |
movq [ecx+%1*32+16], mm2 |
movq [TMP0+%1*32+16], mm2 |
175 |
movq [ecx+%1*32+24], mm3 |
movq [TMP0+%1*32+24], mm3 |
176 |
%endmacro |
%endmacro |
177 |
|
|
178 |
align 16 |
ALIGN SECTION_ALIGN |
179 |
transfer_8to16sub_mmx: |
transfer_8to16sub_mmx: |
180 |
mov ecx, [esp + 4] ; Dst |
mov TMP0, prm1 ; Dst |
181 |
mov eax, [esp + 8] ; Cur |
mov _EAX, prm2 ; Cur |
182 |
push ebx |
mov TMP1, prm4 ; Stride |
183 |
mov ebx, [esp+4+12] ; Ref |
|
184 |
mov edx, [esp+4+16] ; Stride |
push _EBX |
185 |
|
%ifdef ARCH_IS_X86_64 |
186 |
|
mov _EBX, prm3 |
187 |
|
%else |
188 |
|
mov _EBX, [_ESP+4+12] ; Ref |
189 |
|
%endif |
190 |
pxor mm7, mm7 |
pxor mm7, mm7 |
191 |
|
|
192 |
COPY_8_TO_16_SUB 0 |
COPY_8_TO_16_SUB 0, 1 |
193 |
COPY_8_TO_16_SUB 1 |
COPY_8_TO_16_SUB 1, 1 |
194 |
COPY_8_TO_16_SUB 2 |
COPY_8_TO_16_SUB 2, 1 |
195 |
COPY_8_TO_16_SUB 3 |
COPY_8_TO_16_SUB 3, 1 |
196 |
|
|
197 |
pop ebx |
pop _EBX |
198 |
ret |
ret |
199 |
|
ENDFUNC |
200 |
|
|
201 |
|
|
202 |
|
ALIGN SECTION_ALIGN |
203 |
|
transfer_8to16subro_mmx: |
204 |
|
mov TMP0, prm1 ; Dst |
205 |
|
mov _EAX, prm2 ; Cur |
206 |
|
mov TMP1, prm4 ; Stride |
207 |
|
|
208 |
|
push _EBX |
209 |
|
%ifdef ARCH_IS_X86_64 |
210 |
|
mov _EBX, prm3 |
211 |
|
%else |
212 |
|
mov _EBX, [_ESP+4+12] ; Ref |
213 |
|
%endif |
214 |
|
pxor mm7, mm7 |
215 |
|
|
216 |
|
COPY_8_TO_16_SUB 0, 0 |
217 |
|
COPY_8_TO_16_SUB 1, 0 |
218 |
|
COPY_8_TO_16_SUB 2, 0 |
219 |
|
COPY_8_TO_16_SUB 3, 0 |
220 |
|
|
221 |
|
pop _EBX |
222 |
|
ret |
223 |
|
ENDFUNC |
224 |
|
|
225 |
|
|
226 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
227 |
; |
; |
228 |
; void transfer_8to16sub2_mmx(int16_t * const dct, |
; void transfer_8to16sub2_mmx(int16_t * const dct, |
229 |
; uint8_t * const cur, |
; uint8_t * const cur, |
231 |
; const uint8_t * ref2, |
; const uint8_t * ref2, |
232 |
; const uint32_t stride) |
; const uint32_t stride) |
233 |
; |
; |
234 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
235 |
|
|
236 |
%macro COPY_8_TO_16_SUB2_MMX 1 |
%macro COPY_8_TO_16_SUB2_MMX 1 |
237 |
movq mm0, [eax] ; cur |
movq mm0, [_EAX] ; cur |
238 |
movq mm2, [eax+edx] |
movq mm2, [_EAX+TMP1] |
239 |
|
|
240 |
; mm4 <- (ref1+ref2+1) / 2 |
; mm4 <- (ref1+ref2+1) / 2 |
241 |
movq mm4, [ebx] ; ref1 |
movq mm4, [_EBX] ; ref1 |
242 |
movq mm1, [esi] ; ref2 |
movq mm1, [_ESI] ; ref2 |
243 |
movq mm6, mm4 |
movq mm6, mm4 |
244 |
movq mm3, mm1 |
movq mm3, mm1 |
245 |
punpcklbw mm4, mm7 |
punpcklbw mm4, mm7 |
248 |
punpckhbw mm3, mm7 |
punpckhbw mm3, mm7 |
249 |
paddusw mm4, mm1 |
paddusw mm4, mm1 |
250 |
paddusw mm6, mm3 |
paddusw mm6, mm3 |
251 |
|
paddusw mm4, [mmx_one] |
252 |
|
paddusw mm6, [mmx_one] |
253 |
psrlw mm4,1 |
psrlw mm4,1 |
254 |
psrlw mm6,1 |
psrlw mm6,1 |
255 |
packuswb mm4, mm6 |
packuswb mm4, mm6 |
256 |
|
movq [_EAX], mm4 |
257 |
|
|
258 |
; mm5 <- (ref1+ref2+1) / 2 |
; mm5 <- (ref1+ref2+1) / 2 |
259 |
movq mm5, [ebx+edx] ; ref1 |
movq mm5, [_EBX+TMP1] ; ref1 |
260 |
movq mm1, [esi+edx] ; ref2 |
movq mm1, [_ESI+TMP1] ; ref2 |
261 |
movq mm6, mm5 |
movq mm6, mm5 |
262 |
movq mm3, mm1 |
movq mm3, mm1 |
263 |
punpcklbw mm5, mm7 |
punpcklbw mm5, mm7 |
266 |
punpckhbw mm3, mm7 |
punpckhbw mm3, mm7 |
267 |
paddusw mm5, mm1 |
paddusw mm5, mm1 |
268 |
paddusw mm6, mm3 |
paddusw mm6, mm3 |
269 |
lea esi,[esi+2*edx] |
paddusw mm5, [mmx_one] |
270 |
|
paddusw mm6, [mmx_one] |
271 |
|
lea _ESI, [_ESI+2*TMP1] |
272 |
psrlw mm5,1 |
psrlw mm5,1 |
273 |
psrlw mm6,1 |
psrlw mm6,1 |
274 |
packuswb mm5, mm6 |
packuswb mm5, mm6 |
275 |
|
movq [_EAX+TMP1], mm5 |
276 |
|
|
277 |
movq mm1, mm0 |
movq mm1, mm0 |
278 |
movq mm3, mm2 |
movq mm3, mm2 |
290 |
punpcklbw mm5, mm7 |
punpcklbw mm5, mm7 |
291 |
punpckhbw mm6, mm7 |
punpckhbw mm6, mm7 |
292 |
psubsw mm2, mm5 |
psubsw mm2, mm5 |
293 |
lea eax,[eax+2*edx] |
lea _EAX, [_EAX+2*TMP1] |
294 |
psubsw mm3, mm6 |
psubsw mm3, mm6 |
295 |
lea ebx,[ebx+2*edx] |
lea _EBX, [_EBX+2*TMP1] |
296 |
|
|
297 |
movq [ecx+%1*32+ 0], mm0 ; dst |
movq [TMP0+%1*32+ 0], mm0 ; dst |
298 |
movq [ecx+%1*32+ 8], mm1 |
movq [TMP0+%1*32+ 8], mm1 |
299 |
movq [ecx+%1*32+16], mm2 |
movq [TMP0+%1*32+16], mm2 |
300 |
movq [ecx+%1*32+24], mm3 |
movq [TMP0+%1*32+24], mm3 |
301 |
%endmacro |
%endmacro |
302 |
|
|
303 |
align 16 |
ALIGN SECTION_ALIGN |
304 |
transfer_8to16sub2_mmx: |
transfer_8to16sub2_mmx: |
305 |
mov ecx, [esp + 4] ; Dst |
mov TMP0, prm1 ; Dst |
306 |
mov eax, [esp + 8] ; Cur |
mov TMP1d, prm5d ; Stride |
307 |
push ebx |
mov _EAX, prm2 ; Cur |
308 |
mov ebx, [esp+4+12] ; Ref1 |
|
309 |
push esi |
push _EBX |
310 |
mov esi, [esp+8+16] ; Ref2 |
%ifdef ARCH_IS_X86_64 |
311 |
mov edx, [esp+8+20] ; Stride |
mov _EBX, prm3 |
312 |
|
%else |
313 |
|
mov _EBX, [_ESP+4+12] ; Ref1 |
314 |
|
%endif |
315 |
|
|
316 |
|
push _ESI |
317 |
|
%ifdef ARCH_IS_X86_64 |
318 |
|
mov _ESI, prm4 |
319 |
|
%else |
320 |
|
mov _ESI, [_ESP+8+16] ; Ref2 |
321 |
|
%endif |
322 |
|
|
323 |
pxor mm7, mm7 |
pxor mm7, mm7 |
324 |
|
|
325 |
COPY_8_TO_16_SUB2_MMX 0 |
COPY_8_TO_16_SUB2_MMX 0 |
327 |
COPY_8_TO_16_SUB2_MMX 2 |
COPY_8_TO_16_SUB2_MMX 2 |
328 |
COPY_8_TO_16_SUB2_MMX 3 |
COPY_8_TO_16_SUB2_MMX 3 |
329 |
|
|
330 |
pop esi |
pop _ESI |
331 |
pop ebx |
pop _EBX |
332 |
ret |
ret |
333 |
|
ENDFUNC |
334 |
|
|
335 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
336 |
; |
; |
337 |
; void transfer_8to16sub2_xmm(int16_t * const dct, |
; void transfer_8to16sub2_xmm(int16_t * const dct, |
338 |
; uint8_t * const cur, |
; uint8_t * const cur, |
340 |
; const uint8_t * ref2, |
; const uint8_t * ref2, |
341 |
; const uint32_t stride) |
; const uint32_t stride) |
342 |
; |
; |
343 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
344 |
|
|
345 |
%macro COPY_8_TO_16_SUB2_SSE 1 |
%macro COPY_8_TO_16_SUB2_SSE 1 |
346 |
movq mm0, [eax] ; cur |
movq mm0, [_EAX] ; cur |
347 |
movq mm2, [eax+edx] |
movq mm2, [_EAX+TMP1] |
348 |
movq mm1, mm0 |
movq mm1, mm0 |
349 |
movq mm3, mm2 |
movq mm3, mm2 |
350 |
|
|
351 |
punpcklbw mm0, mm7 |
punpcklbw mm0, mm7 |
352 |
punpcklbw mm2, mm7 |
punpcklbw mm2, mm7 |
353 |
movq mm4, [ebx] ; ref1 |
movq mm4, [_EBX] ; ref1 |
354 |
pavgb mm4, [esi] ; ref2 |
pavgb mm4, [_ESI] ; ref2 |
355 |
|
movq [_EAX], mm4 |
356 |
punpckhbw mm1, mm7 |
punpckhbw mm1, mm7 |
357 |
punpckhbw mm3, mm7 |
punpckhbw mm3, mm7 |
358 |
movq mm5, [ebx+edx] ; ref |
movq mm5, [_EBX+TMP1] ; ref |
359 |
pavgb mm5, [esi+edx] ; ref2 |
pavgb mm5, [_ESI+TMP1] ; ref2 |
360 |
|
movq [_EAX+TMP1], mm5 |
361 |
|
|
362 |
movq mm6, mm4 |
movq mm6, mm4 |
363 |
punpcklbw mm4, mm7 |
punpcklbw mm4, mm7 |
364 |
punpckhbw mm6, mm7 |
punpckhbw mm6, mm7 |
365 |
psubsw mm0, mm4 |
psubsw mm0, mm4 |
366 |
psubsw mm1, mm6 |
psubsw mm1, mm6 |
367 |
lea esi,[esi+2*edx] |
lea _ESI, [_ESI+2*TMP1] |
368 |
movq mm6, mm5 |
movq mm6, mm5 |
369 |
punpcklbw mm5, mm7 |
punpcklbw mm5, mm7 |
370 |
punpckhbw mm6, mm7 |
punpckhbw mm6, mm7 |
371 |
psubsw mm2, mm5 |
psubsw mm2, mm5 |
372 |
lea eax,[eax+2*edx] |
lea _EAX, [_EAX+2*TMP1] |
373 |
psubsw mm3, mm6 |
psubsw mm3, mm6 |
374 |
lea ebx,[ebx+2*edx] |
lea _EBX, [_EBX+2*TMP1] |
375 |
|
|
376 |
movq [ecx+%1*32+ 0], mm0 ; dst |
movq [TMP0+%1*32+ 0], mm0 ; dst |
377 |
movq [ecx+%1*32+ 8], mm1 |
movq [TMP0+%1*32+ 8], mm1 |
378 |
movq [ecx+%1*32+16], mm2 |
movq [TMP0+%1*32+16], mm2 |
379 |
movq [ecx+%1*32+24], mm3 |
movq [TMP0+%1*32+24], mm3 |
380 |
%endmacro |
%endmacro |
381 |
|
|
382 |
align 16 |
ALIGN SECTION_ALIGN |
383 |
transfer_8to16sub2_xmm: |
transfer_8to16sub2_xmm: |
384 |
mov ecx, [esp + 4] ; Dst |
mov TMP0, prm1 ; Dst |
385 |
mov eax, [esp + 8] ; Cur |
mov _EAX, prm2 ; Cur |
386 |
push ebx |
mov TMP1d, prm5d ; Stride |
387 |
mov ebx, [esp+4+12] ; Ref1 |
|
388 |
push esi |
push _EBX |
389 |
mov esi, [esp+8+16] ; Ref2 |
%ifdef ARCH_IS_X86_64 |
390 |
mov edx, [esp+8+20] ; Stride |
mov _EBX, prm3 ; Ref1 |
391 |
|
%else |
392 |
|
mov _EBX, [_ESP+4+12] ; Ref1 |
393 |
|
%endif |
394 |
|
|
395 |
|
push _ESI |
396 |
|
%ifdef ARCH_IS_X86_64 |
397 |
|
mov _ESI, prm4 ; Ref1 |
398 |
|
%else |
399 |
|
mov _ESI, [_ESP+8+16] ; Ref2 |
400 |
|
%endif |
401 |
|
|
402 |
pxor mm7, mm7 |
pxor mm7, mm7 |
403 |
|
|
404 |
COPY_8_TO_16_SUB2_SSE 0 |
COPY_8_TO_16_SUB2_SSE 0 |
406 |
COPY_8_TO_16_SUB2_SSE 2 |
COPY_8_TO_16_SUB2_SSE 2 |
407 |
COPY_8_TO_16_SUB2_SSE 3 |
COPY_8_TO_16_SUB2_SSE 3 |
408 |
|
|
409 |
pop esi |
pop _ESI |
410 |
pop ebx |
pop _EBX |
411 |
|
ret |
412 |
|
ENDFUNC |
413 |
|
|
414 |
|
|
415 |
|
;----------------------------------------------------------------------------- |
416 |
|
; |
417 |
|
; void transfer_8to16sub2ro_xmm(int16_t * const dct, |
418 |
|
; const uint8_t * const cur, |
419 |
|
; const uint8_t * ref1, |
420 |
|
; const uint8_t * ref2, |
421 |
|
; const uint32_t stride) |
422 |
|
; |
423 |
|
;----------------------------------------------------------------------------- |
424 |
|
|
425 |
|
%macro COPY_8_TO_16_SUB2RO_SSE 1 |
426 |
|
movq mm0, [_EAX] ; cur |
427 |
|
movq mm2, [_EAX+TMP1] |
428 |
|
movq mm1, mm0 |
429 |
|
movq mm3, mm2 |
430 |
|
|
431 |
|
punpcklbw mm0, mm7 |
432 |
|
punpcklbw mm2, mm7 |
433 |
|
movq mm4, [_EBX] ; ref1 |
434 |
|
pavgb mm4, [_ESI] ; ref2 |
435 |
|
punpckhbw mm1, mm7 |
436 |
|
punpckhbw mm3, mm7 |
437 |
|
movq mm5, [_EBX+TMP1] ; ref |
438 |
|
pavgb mm5, [_ESI+TMP1] ; ref2 |
439 |
|
|
440 |
|
movq mm6, mm4 |
441 |
|
punpcklbw mm4, mm7 |
442 |
|
punpckhbw mm6, mm7 |
443 |
|
psubsw mm0, mm4 |
444 |
|
psubsw mm1, mm6 |
445 |
|
lea _ESI, [_ESI+2*TMP1] |
446 |
|
movq mm6, mm5 |
447 |
|
punpcklbw mm5, mm7 |
448 |
|
punpckhbw mm6, mm7 |
449 |
|
psubsw mm2, mm5 |
450 |
|
lea _EAX, [_EAX+2*TMP1] |
451 |
|
psubsw mm3, mm6 |
452 |
|
lea _EBX, [_EBX+2*TMP1] |
453 |
|
|
454 |
|
movq [TMP0+%1*32+ 0], mm0 ; dst |
455 |
|
movq [TMP0+%1*32+ 8], mm1 |
456 |
|
movq [TMP0+%1*32+16], mm2 |
457 |
|
movq [TMP0+%1*32+24], mm3 |
458 |
|
%endmacro |
459 |
|
|
460 |
|
ALIGN SECTION_ALIGN |
461 |
|
transfer_8to16sub2ro_xmm: |
462 |
|
pxor mm7, mm7 |
463 |
|
mov TMP0, prm1 ; Dst |
464 |
|
mov _EAX, prm2 ; Cur |
465 |
|
mov TMP1d, prm5d ; Stride |
466 |
|
|
467 |
|
push _EBX |
468 |
|
%ifdef ARCH_IS_X86_64 |
469 |
|
mov _EBX, prm3 |
470 |
|
%else |
471 |
|
mov _EBX, [_ESP+4+12] ; Ref1 |
472 |
|
%endif |
473 |
|
|
474 |
|
push _ESI |
475 |
|
%ifdef ARCH_IS_X86_64 |
476 |
|
mov _ESI, prm4 |
477 |
|
%else |
478 |
|
mov _ESI, [_ESP+8+16] ; Ref2 |
479 |
|
%endif |
480 |
|
|
481 |
|
COPY_8_TO_16_SUB2RO_SSE 0 |
482 |
|
COPY_8_TO_16_SUB2RO_SSE 1 |
483 |
|
COPY_8_TO_16_SUB2RO_SSE 2 |
484 |
|
COPY_8_TO_16_SUB2RO_SSE 3 |
485 |
|
|
486 |
|
pop _ESI |
487 |
|
pop _EBX |
488 |
ret |
ret |
489 |
|
ENDFUNC |
490 |
|
|
491 |
;=========================================================================== |
|
492 |
|
;----------------------------------------------------------------------------- |
493 |
; |
; |
494 |
; void transfer_16to8add_mmx(uint8_t * const dst, |
; void transfer_16to8add_mmx(uint8_t * const dst, |
495 |
; const int16_t * const src, |
; const int16_t * const src, |
496 |
; uint32_t stride); |
; uint32_t stride); |
497 |
; |
; |
498 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
499 |
|
|
500 |
%macro COPY_16_TO_8_ADD 1 |
%macro COPY_16_TO_8_ADD 1 |
501 |
movq mm0, [ecx] |
movq mm0, [TMP0] |
502 |
movq mm2, [ecx+edx] |
movq mm2, [TMP0+TMP1] |
503 |
movq mm1, mm0 |
movq mm1, mm0 |
504 |
movq mm3, mm2 |
movq mm3, mm2 |
505 |
punpcklbw mm0, mm7 |
punpcklbw mm0, mm7 |
506 |
punpcklbw mm2, mm7 |
punpcklbw mm2, mm7 |
507 |
punpckhbw mm1, mm7 |
punpckhbw mm1, mm7 |
508 |
punpckhbw mm3, mm7 |
punpckhbw mm3, mm7 |
509 |
paddsw mm0, [eax+%1*32+ 0] |
paddsw mm0, [_EAX+%1*32+ 0] |
510 |
paddsw mm1, [eax+%1*32+ 8] |
paddsw mm1, [_EAX+%1*32+ 8] |
511 |
paddsw mm2, [eax+%1*32+16] |
paddsw mm2, [_EAX+%1*32+16] |
512 |
paddsw mm3, [eax+%1*32+24] |
paddsw mm3, [_EAX+%1*32+24] |
513 |
packuswb mm0, mm1 |
packuswb mm0, mm1 |
514 |
movq [ecx], mm0 |
movq [TMP0], mm0 |
515 |
packuswb mm2, mm3 |
packuswb mm2, mm3 |
516 |
movq [ecx+edx], mm2 |
movq [TMP0+TMP1], mm2 |
517 |
%endmacro |
%endmacro |
518 |
|
|
519 |
|
|
520 |
align 16 |
ALIGN SECTION_ALIGN |
521 |
transfer_16to8add_mmx: |
transfer_16to8add_mmx: |
522 |
mov ecx, [esp+ 4] ; Dst |
mov TMP0, prm1 ; Dst |
523 |
mov eax, [esp+ 8] ; Src |
mov _EAX, prm2 ; Src |
524 |
mov edx, [esp+12] ; Stride |
mov TMP1, prm3 ; Stride |
525 |
pxor mm7, mm7 |
pxor mm7, mm7 |
526 |
|
|
527 |
COPY_16_TO_8_ADD 0 |
COPY_16_TO_8_ADD 0 |
528 |
lea ecx,[ecx+2*edx] |
lea TMP0,[TMP0+2*TMP1] |
529 |
COPY_16_TO_8_ADD 1 |
COPY_16_TO_8_ADD 1 |
530 |
lea ecx,[ecx+2*edx] |
lea TMP0,[TMP0+2*TMP1] |
531 |
COPY_16_TO_8_ADD 2 |
COPY_16_TO_8_ADD 2 |
532 |
lea ecx,[ecx+2*edx] |
lea TMP0,[TMP0+2*TMP1] |
533 |
COPY_16_TO_8_ADD 3 |
COPY_16_TO_8_ADD 3 |
534 |
ret |
ret |
535 |
|
ENDFUNC |
536 |
|
|
537 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
538 |
; |
; |
539 |
; void transfer8x8_copy_mmx(uint8_t * const dst, |
; void transfer8x8_copy_mmx(uint8_t * const dst, |
540 |
; const uint8_t * const src, |
; const uint8_t * const src, |
541 |
; const uint32_t stride); |
; const uint32_t stride); |
542 |
; |
; |
543 |
; |
; |
544 |
;=========================================================================== |
;----------------------------------------------------------------------------- |
545 |
|
|
546 |
%macro COPY_8_TO_8 0 |
%macro COPY_8_TO_8 0 |
547 |
movq mm0, [eax] |
movq mm0, [_EAX] |
548 |
movq mm1, [eax+edx] |
movq mm1, [_EAX+TMP1] |
549 |
movq [ecx], mm0 |
movq [TMP0], mm0 |
550 |
lea eax,[eax+2*edx] |
lea _EAX, [_EAX+2*TMP1] |
551 |
movq [ecx+edx], mm1 |
movq [TMP0+TMP1], mm1 |
552 |
%endmacro |
%endmacro |
553 |
|
|
554 |
align 16 |
ALIGN SECTION_ALIGN |
555 |
transfer8x8_copy_mmx: |
transfer8x8_copy_mmx: |
556 |
mov ecx, [esp+ 4] ; Dst |
mov TMP0, prm1 ; Dst |
557 |
mov eax, [esp+ 8] ; Src |
mov _EAX, prm2 ; Src |
558 |
mov edx, [esp+12] ; Stride |
mov TMP1, prm3 ; Stride |
559 |
|
|
560 |
COPY_8_TO_8 |
COPY_8_TO_8 |
561 |
lea ecx,[ecx+2*edx] |
lea TMP0,[TMP0+2*TMP1] |
562 |
|
COPY_8_TO_8 |
563 |
|
lea TMP0,[TMP0+2*TMP1] |
564 |
|
COPY_8_TO_8 |
565 |
|
lea TMP0,[TMP0+2*TMP1] |
566 |
COPY_8_TO_8 |
COPY_8_TO_8 |
567 |
lea ecx,[ecx+2*edx] |
ret |
568 |
|
ENDFUNC |
569 |
|
|
570 |
|
;----------------------------------------------------------------------------- |
571 |
|
; |
572 |
|
; void transfer8x4_copy_mmx(uint8_t * const dst, |
573 |
|
; const uint8_t * const src, |
574 |
|
; const uint32_t stride); |
575 |
|
; |
576 |
|
; |
577 |
|
;----------------------------------------------------------------------------- |
578 |
|
|
579 |
|
ALIGN SECTION_ALIGN |
580 |
|
transfer8x4_copy_mmx: |
581 |
|
mov TMP0, prm1 ; Dst |
582 |
|
mov _EAX, prm2 ; Src |
583 |
|
mov TMP1, prm3 ; Stride |
584 |
|
|
585 |
COPY_8_TO_8 |
COPY_8_TO_8 |
586 |
lea ecx,[ecx+2*edx] |
lea TMP0,[TMP0+2*TMP1] |
587 |
COPY_8_TO_8 |
COPY_8_TO_8 |
588 |
ret |
ret |
589 |
|
ENDFUNC |
590 |
|
|
591 |
|
|
592 |
|
%ifidn __OUTPUT_FORMAT__,elf |
593 |
|
section ".note.GNU-stack" noalloc noexec nowrite progbits |
594 |
|
%endif |
595 |
|
|