3 |
; * XVID MPEG-4 VIDEO CODEC |
; * XVID MPEG-4 VIDEO CODEC |
4 |
; * mmx 8bit<->16bit transfers |
; * mmx 8bit<->16bit transfers |
5 |
; * |
; * |
6 |
; * This program is an implementation of a part of one or more MPEG-4 |
; * This file is part of XviD, a free MPEG-4 video encoder/decoder |
|
; * Video tools as specified in ISO/IEC 14496-2 standard. Those intending |
|
|
; * to use this software module in hardware or software products are |
|
|
; * advised that its use may infringe existing patents or copyrights, and |
|
|
; * any such use would be at such party's own risk. The original |
|
|
; * developer of this software module and his/her company, and subsequent |
|
|
; * editors and their companies, will have no liability for use of this |
|
|
; * software or modifications or derivatives thereof. |
|
7 |
; * |
; * |
8 |
; * This program is free software; you can redistribute it and/or modify |
; * XviD is free software; you can redistribute it and/or modify it |
9 |
; * it under the terms of the GNU General Public License as published by |
; * under the terms of the GNU General Public License as published by |
10 |
; * the Free Software Foundation; either version 2 of the License, or |
; * the Free Software Foundation; either version 2 of the License, or |
11 |
; * (at your option) any later version. |
; * (at your option) any later version. |
12 |
; * |
; * |
17 |
; * |
; * |
18 |
; * You should have received a copy of the GNU General Public License |
; * You should have received a copy of the GNU General Public License |
19 |
; * along with this program; if not, write to the Free Software |
; * along with this program; if not, write to the Free Software |
20 |
; * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
; * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
21 |
|
; * |
22 |
|
; * Under section 8 of the GNU General Public License, the copyright |
23 |
|
; * holders of XVID explicitly forbid distribution in the following |
24 |
|
; * countries: |
25 |
|
; * |
26 |
|
; * - Japan |
27 |
|
; * - United States of America |
28 |
|
; * |
29 |
|
; * Linking XviD statically or dynamically with other modules is making a |
30 |
|
; * combined work based on XviD. Thus, the terms and conditions of the |
31 |
|
; * GNU General Public License cover the whole combination. |
32 |
|
; * |
33 |
|
; * As a special exception, the copyright holders of XviD give you |
34 |
|
; * permission to link XviD with independent modules that communicate with |
35 |
|
; * XviD solely through the VFW1.1 and DShow interfaces, regardless of the |
36 |
|
; * license terms of these independent modules, and to copy and distribute |
37 |
|
; * the resulting combined work under terms of your choice, provided that |
38 |
|
; * every copy of the combined work is accompanied by a complete copy of |
39 |
|
; * the source code of XviD (the version of XviD used to produce the |
40 |
|
; * combined work), being distributed under the terms of the GNU General |
41 |
|
; * Public License plus this exception. An independent module is a module |
42 |
|
; * which is not derived from or based on XviD. |
43 |
|
; * |
44 |
|
; * Note that people who make modified versions of XviD are not obligated |
45 |
|
; * to grant this special exception for their modified versions; it is |
46 |
|
; * their choice whether to do so. The GNU General Public License gives |
47 |
|
; * permission to release a modified version without this exception; this |
48 |
|
; * exception also makes it possible to release a modified version which |
49 |
|
; * carries forward this exception. |
50 |
|
; * |
51 |
|
; * $Id$ |
52 |
; * |
; * |
53 |
; *************************************************************************/ |
; *************************************************************************/ |
54 |
|
|
56 |
; * |
; * |
57 |
; * History: |
; * History: |
58 |
; * |
; * |
59 |
|
; * 04.06.2002 speed enhancement (unroll+overlap). -Skal- |
60 |
|
; * + added transfer_8to16sub2_mmx/xmm |
61 |
; * 07.01.2002 merge functions from compensate_mmx; rename functions |
; * 07.01.2002 merge functions from compensate_mmx; rename functions |
62 |
; * 07.11.2001 initial version; (c)2001 peter ross <pross@cs.rmit.edu.au> |
; * 07.11.2001 initial version; (c)2001 peter ross <pross@xvid.org> |
63 |
; * |
; * |
64 |
; *************************************************************************/ |
; *************************************************************************/ |
65 |
|
|
78 |
|
|
79 |
section .text |
section .text |
80 |
|
|
81 |
|
cglobal transfer_8to16copy_mmx |
82 |
|
cglobal transfer_16to8copy_mmx |
83 |
|
cglobal transfer_8to16sub_mmx |
84 |
|
cglobal transfer_8to16sub2_mmx |
85 |
|
cglobal transfer_8to16sub2_xmm |
86 |
|
cglobal transfer_16to8add_mmx |
87 |
|
cglobal transfer8x8_copy_mmx |
88 |
|
|
89 |
;=========================================================================== |
;=========================================================================== |
90 |
; |
; |
94 |
; |
; |
95 |
;=========================================================================== |
;=========================================================================== |
96 |
|
|
97 |
align 16 |
%macro COPY_8_TO_16 1 |
98 |
cglobal transfer_8to16copy_mmx |
movq mm0, [eax] |
99 |
transfer_8to16copy_mmx |
movq mm1, [eax+edx] |
100 |
|
movq mm2, mm0 |
101 |
push esi |
movq mm3, mm1 |
102 |
push edi |
punpcklbw mm0, mm7 |
103 |
|
movq [ecx+%1*32], mm0 |
104 |
mov edi, [esp + 8 + 4] ; dst |
punpcklbw mm1, mm7 |
105 |
mov esi, [esp + 8 + 8] ; src |
movq [ecx+%1*32+16], mm1 |
106 |
mov ecx, [esp + 8 + 12] ; stride |
punpckhbw mm2, mm7 |
107 |
|
punpckhbw mm3, mm7 |
108 |
pxor mm7, mm7 ; mm7 = zero |
lea eax,[eax+2*edx] |
109 |
|
movq [ecx+%1*32+8], mm2 |
110 |
mov eax, 8 |
movq [ecx+%1*32+24], mm3 |
111 |
|
%endmacro |
|
.loop |
|
|
movq mm0, [esi] |
|
|
movq mm1, mm0 |
|
|
punpcklbw mm0, mm7 ; mm01 = unpack([src]) |
|
|
punpckhbw mm1, mm7 |
|
|
|
|
|
movq [edi], mm0 ; [dst] = mm01 |
|
|
movq [edi + 8], mm1 |
|
112 |
|
|
113 |
add edi, 16 |
align 16 |
114 |
add esi, ecx |
transfer_8to16copy_mmx: |
|
dec eax |
|
|
jnz .loop |
|
115 |
|
|
116 |
pop edi |
mov ecx, [esp+ 4] ; Dst |
117 |
pop esi |
mov eax, [esp+ 8] ; Src |
118 |
|
mov edx, [esp+12] ; Stride |
119 |
|
pxor mm7,mm7 |
120 |
|
|
121 |
|
COPY_8_TO_16 0 |
122 |
|
COPY_8_TO_16 1 |
123 |
|
COPY_8_TO_16 2 |
124 |
|
COPY_8_TO_16 3 |
125 |
ret |
ret |
126 |
|
|
|
|
|
|
|
|
127 |
;=========================================================================== |
;=========================================================================== |
128 |
; |
; |
129 |
; void transfer_16to8copy_mmx(uint8_t * const dst, |
; void transfer_16to8copy_mmx(uint8_t * const dst, |
132 |
; |
; |
133 |
;=========================================================================== |
;=========================================================================== |
134 |
|
|
135 |
align 16 |
%macro COPY_16_TO_8 1 |
136 |
cglobal transfer_16to8copy_mmx |
movq mm0, [eax+%1*32] |
137 |
transfer_16to8copy_mmx |
movq mm1, [eax+%1*32+8] |
138 |
|
packuswb mm0, mm1 |
139 |
push esi |
movq [ecx], mm0 |
140 |
push edi |
movq mm2, [eax+%1*32+16] |
141 |
|
movq mm3, [eax+%1*32+24] |
142 |
mov edi, [esp + 8 + 4] ; dst |
packuswb mm2, mm3 |
143 |
mov esi, [esp + 8 + 8] ; src |
movq [ecx+edx], mm2 |
144 |
mov ecx, [esp + 8 + 12] ; stride |
%endmacro |
|
|
|
|
mov eax, 8 |
|
|
|
|
|
.loop |
|
|
movq mm0, [esi] |
|
|
packuswb mm0, [esi + 8] ; mm0 = pack([src]) |
|
|
|
|
|
movq [edi], mm0 ; [dst] = mm0 |
|
|
|
|
|
add esi, 16 |
|
|
add edi, ecx |
|
|
dec eax |
|
|
jnz .loop |
|
145 |
|
|
146 |
pop edi |
align 16 |
147 |
pop esi |
transfer_16to8copy_mmx: |
148 |
|
|
149 |
|
mov ecx, [esp+ 4] ; Dst |
150 |
|
mov eax, [esp+ 8] ; Src |
151 |
|
mov edx, [esp+12] ; Stride |
152 |
|
|
153 |
|
COPY_16_TO_8 0 |
154 |
|
lea ecx,[ecx+2*edx] |
155 |
|
COPY_16_TO_8 1 |
156 |
|
lea ecx,[ecx+2*edx] |
157 |
|
COPY_16_TO_8 2 |
158 |
|
lea ecx,[ecx+2*edx] |
159 |
|
COPY_16_TO_8 3 |
160 |
ret |
ret |
161 |
|
|
|
|
|
162 |
;=========================================================================== |
;=========================================================================== |
163 |
; |
; |
164 |
; void transfer_8to16sub_mmx(int16_t * const dct, |
; void transfer_8to16sub_mmx(int16_t * const dct, |
175 |
; * 02.12.2001 loop unrolled, code runs 10% faster now (Isibaar) |
; * 02.12.2001 loop unrolled, code runs 10% faster now (Isibaar) |
176 |
; * 30.11.2001 16 pixels are processed per iteration (Isibaar) |
; * 30.11.2001 16 pixels are processed per iteration (Isibaar) |
177 |
; * 30.11.2001 .text missing |
; * 30.11.2001 .text missing |
178 |
; * 06.11.2001 inital version; (c)2001 peter ross <pross@cs.rmit.edu.au> |
; * 06.11.2001 inital version; (c)2001 peter ross <pross@xvid.org> |
179 |
; * |
; * |
180 |
; *************************************************************************/ |
; *************************************************************************/ |
181 |
|
|
182 |
align 16 |
%macro COPY_8_TO_16_SUB 1 |
183 |
cglobal transfer_8to16sub_mmx |
movq mm0, [eax] ; cur |
184 |
transfer_8to16sub_mmx |
movq mm2, [eax+edx] |
|
push esi |
|
|
push edi |
|
|
push ebx |
|
|
|
|
|
mov edi, [esp + 12 + 4] ; dct [out] |
|
|
mov edx, [esp + 12 + 8] ; cur [in/out] |
|
|
mov esi, [esp + 12 + 12] ; ref [in] |
|
|
mov ecx, [esp + 12 + 16] ; stride [in] |
|
|
|
|
|
mov eax, edx ; cur -> eax |
|
|
mov ebx, esi ; ref -> ebx |
|
|
add eax, ecx ; cur + stride |
|
|
add ebx, ecx ; ref + stride |
|
|
|
|
|
shl ecx, 1 |
|
|
|
|
|
pxor mm7, mm7 ; mm7 = zero |
|
|
|
|
|
movq mm0, [edx] ; mm01 = [cur] |
|
185 |
movq mm1, mm0 |
movq mm1, mm0 |
|
|
|
|
punpcklbw mm0, mm7 |
|
|
punpckhbw mm1, mm7 |
|
|
|
|
|
movq mm4, [eax] |
|
|
movq mm5, mm4 |
|
|
|
|
|
punpcklbw mm4, mm7 |
|
|
punpckhbw mm5, mm7 |
|
|
|
|
|
movq mm2, [esi] ; mm23 = [ref] |
|
186 |
movq mm3, mm2 |
movq mm3, mm2 |
187 |
|
|
|
movq mm6, [ebx] |
|
|
|
|
|
movq [edx], mm2 ; [cur] = [ref] |
|
|
movq [eax], mm6 |
|
|
|
|
|
punpcklbw mm2, mm7 |
|
|
punpckhbw mm3, mm7 |
|
|
|
|
|
psubsw mm0, mm2 ; mm01 -= mm23 |
|
|
|
|
|
movq mm2, mm6 |
|
|
|
|
|
punpcklbw mm2, mm7 |
|
|
punpckhbw mm6, mm7 |
|
|
|
|
|
psubsw mm1, mm3 |
|
|
|
|
|
psubsw mm4, mm2 |
|
|
psubsw mm5, mm6 |
|
|
|
|
|
movq [edi], mm0 ; dct[] = mm01 |
|
|
movq [edi + 8], mm1 |
|
|
movq [edi + 16], mm4 |
|
|
movq [edi + 24], mm5 |
|
|
|
|
|
add edx, ecx |
|
|
add esi, ecx |
|
|
add eax, ecx |
|
|
add ebx, ecx |
|
|
|
|
|
movq mm0, [edx] ; mm01 = [cur] |
|
|
movq mm1, mm0 |
|
|
|
|
188 |
punpcklbw mm0, mm7 |
punpcklbw mm0, mm7 |
|
punpckhbw mm1, mm7 |
|
|
|
|
|
movq mm4, [eax] |
|
|
movq mm5, mm4 |
|
|
|
|
|
punpcklbw mm4, mm7 |
|
|
punpckhbw mm5, mm7 |
|
|
|
|
|
movq mm2, [esi] ; mm23 = [ref] |
|
|
movq mm3, mm2 |
|
|
|
|
|
movq mm6, [ebx] |
|
|
|
|
|
movq [edx], mm2 ; [cur] = [ref] |
|
|
movq [eax], mm6 |
|
|
|
|
189 |
punpcklbw mm2, mm7 |
punpcklbw mm2, mm7 |
190 |
|
movq mm4, [ebx] ; ref |
191 |
|
punpckhbw mm1, mm7 |
192 |
punpckhbw mm3, mm7 |
punpckhbw mm3, mm7 |
193 |
|
movq mm5, [ebx+edx] ; ref |
194 |
|
|
195 |
psubsw mm0, mm2 ; mm01 -= mm23 |
movq mm6, mm4 |
196 |
|
movq [eax], mm4 |
197 |
movq mm2, mm6 |
movq [eax+edx], mm5 |
198 |
|
punpcklbw mm4, mm7 |
|
punpcklbw mm2, mm7 |
|
199 |
punpckhbw mm6, mm7 |
punpckhbw mm6, mm7 |
200 |
|
psubsw mm0, mm4 |
201 |
|
psubsw mm1, mm6 |
202 |
|
movq mm6, mm5 |
203 |
|
punpcklbw mm5, mm7 |
204 |
|
punpckhbw mm6, mm7 |
205 |
|
psubsw mm2, mm5 |
206 |
|
lea eax,[eax+2*edx] |
207 |
|
psubsw mm3, mm6 |
208 |
|
lea ebx,[ebx+2*edx] |
209 |
|
|
210 |
|
movq [ecx+%1*32+ 0], mm0 ; dst |
211 |
|
movq [ecx+%1*32+ 8], mm1 |
212 |
|
movq [ecx+%1*32+16], mm2 |
213 |
|
movq [ecx+%1*32+24], mm3 |
214 |
|
%endmacro |
215 |
|
|
216 |
psubsw mm1, mm3 |
align 16 |
217 |
|
transfer_8to16sub_mmx: |
218 |
psubsw mm4, mm2 |
mov ecx, [esp + 4] ; Dst |
219 |
psubsw mm5, mm6 |
mov eax, [esp + 8] ; Cur |
220 |
|
push ebx |
221 |
|
mov ebx, [esp+4+12] ; Ref |
222 |
|
mov edx, [esp+4+16] ; Stride |
223 |
|
pxor mm7, mm7 |
224 |
|
|
225 |
movq [edi + 32], mm0 ; dct[] = mm01 |
COPY_8_TO_16_SUB 0 |
226 |
movq [edi + 40], mm1 |
COPY_8_TO_16_SUB 1 |
227 |
movq [edi + 48], mm4 |
COPY_8_TO_16_SUB 2 |
228 |
movq [edi + 56], mm5 |
COPY_8_TO_16_SUB 3 |
|
|
|
|
add edx, ecx |
|
|
add esi, ecx |
|
|
add eax, ecx |
|
|
add ebx, ecx |
|
229 |
|
|
230 |
movq mm0, [edx] ; mm01 = [cur] |
pop ebx |
231 |
movq mm1, mm0 |
ret |
232 |
|
|
233 |
punpcklbw mm0, mm7 |
;=========================================================================== |
234 |
punpckhbw mm1, mm7 |
; |
235 |
|
; void transfer_8to16sub2_mmx(int16_t * const dct, |
236 |
|
; uint8_t * const cur, |
237 |
|
; const uint8_t * ref1, |
238 |
|
; const uint8_t * ref2, |
239 |
|
; const uint32_t stride) |
240 |
|
; |
241 |
|
;=========================================================================== |
242 |
|
|
243 |
movq mm4, [eax] |
%macro COPY_8_TO_16_SUB2_MMX 1 |
244 |
movq mm5, mm4 |
movq mm0, [eax] ; cur |
245 |
|
movq mm2, [eax+edx] |
246 |
|
|
247 |
|
; mm4 <- (ref1+ref2+1) / 2 |
248 |
|
movq mm4, [ebx] ; ref1 |
249 |
|
movq mm1, [esi] ; ref2 |
250 |
|
movq mm6, mm4 |
251 |
|
movq mm3, mm1 |
252 |
punpcklbw mm4, mm7 |
punpcklbw mm4, mm7 |
253 |
punpckhbw mm5, mm7 |
punpcklbw mm1, mm7 |
254 |
|
punpckhbw mm6, mm7 |
|
movq mm2, [esi] ; mm23 = [ref] |
|
|
movq mm3, mm2 |
|
|
|
|
|
movq mm6, [ebx] |
|
|
|
|
|
movq [edx], mm2 ; [cur] = [ref] |
|
|
movq [eax], mm6 |
|
|
|
|
|
punpcklbw mm2, mm7 |
|
255 |
punpckhbw mm3, mm7 |
punpckhbw mm3, mm7 |
256 |
|
paddusw mm4, mm1 |
257 |
psubsw mm0, mm2 ; mm01 -= mm23 |
paddusw mm6, mm3 |
258 |
|
psrlw mm4,1 |
259 |
movq mm2, mm6 |
psrlw mm6,1 |
260 |
|
packuswb mm4, mm6 |
261 |
punpcklbw mm2, mm7 |
|
262 |
|
; mm5 <- (ref1+ref2+1) / 2 |
263 |
|
movq mm5, [ebx+edx] ; ref1 |
264 |
|
movq mm1, [esi+edx] ; ref2 |
265 |
|
movq mm6, mm5 |
266 |
|
movq mm3, mm1 |
267 |
|
punpcklbw mm5, mm7 |
268 |
|
punpcklbw mm1, mm7 |
269 |
punpckhbw mm6, mm7 |
punpckhbw mm6, mm7 |
270 |
|
punpckhbw mm3, mm7 |
271 |
|
paddusw mm5, mm1 |
272 |
|
paddusw mm6, mm3 |
273 |
|
lea esi,[esi+2*edx] |
274 |
|
psrlw mm5,1 |
275 |
|
psrlw mm6,1 |
276 |
|
packuswb mm5, mm6 |
277 |
|
|
|
psubsw mm1, mm3 |
|
|
|
|
|
psubsw mm4, mm2 |
|
|
psubsw mm5, mm6 |
|
|
|
|
|
movq [edi + 64], mm0 ; dct[] = mm01 |
|
|
movq [edi + 72], mm1 |
|
|
movq [edi + 80], mm4 |
|
|
movq [edi + 88], mm5 |
|
|
|
|
|
add edx, ecx |
|
|
add esi, ecx |
|
|
add eax, ecx |
|
|
add ebx, ecx |
|
278 |
|
|
|
movq mm0, [edx] ; mm01 = [cur] |
|
279 |
movq mm1, mm0 |
movq mm1, mm0 |
|
|
|
|
punpcklbw mm0, mm7 |
|
|
punpckhbw mm1, mm7 |
|
|
|
|
|
movq mm4, [eax] |
|
|
movq mm5, mm4 |
|
|
|
|
|
punpcklbw mm4, mm7 |
|
|
punpckhbw mm5, mm7 |
|
|
|
|
|
movq mm2, [esi] ; mm23 = [ref] |
|
280 |
movq mm3, mm2 |
movq mm3, mm2 |
281 |
|
punpcklbw mm0, mm7 |
|
movq mm6, [ebx] |
|
|
|
|
|
movq [edx], mm2 ; [cur] = [ref] |
|
|
movq [eax], mm6 |
|
|
|
|
282 |
punpcklbw mm2, mm7 |
punpcklbw mm2, mm7 |
283 |
|
punpckhbw mm1, mm7 |
284 |
punpckhbw mm3, mm7 |
punpckhbw mm3, mm7 |
285 |
|
|
286 |
psubsw mm0, mm2 ; mm01 -= mm23 |
movq mm6, mm4 |
287 |
|
punpcklbw mm4, mm7 |
|
movq mm2, mm6 |
|
|
|
|
|
punpcklbw mm2, mm7 |
|
288 |
punpckhbw mm6, mm7 |
punpckhbw mm6, mm7 |
289 |
|
psubsw mm0, mm4 |
290 |
|
psubsw mm1, mm6 |
291 |
|
movq mm6, mm5 |
292 |
|
punpcklbw mm5, mm7 |
293 |
|
punpckhbw mm6, mm7 |
294 |
|
psubsw mm2, mm5 |
295 |
|
lea eax,[eax+2*edx] |
296 |
|
psubsw mm3, mm6 |
297 |
|
lea ebx,[ebx+2*edx] |
298 |
|
|
299 |
|
movq [ecx+%1*32+ 0], mm0 ; dst |
300 |
|
movq [ecx+%1*32+ 8], mm1 |
301 |
|
movq [ecx+%1*32+16], mm2 |
302 |
|
movq [ecx+%1*32+24], mm3 |
303 |
|
%endmacro |
304 |
|
|
305 |
psubsw mm1, mm3 |
align 16 |
306 |
|
transfer_8to16sub2_mmx: |
307 |
psubsw mm4, mm2 |
mov ecx, [esp + 4] ; Dst |
308 |
psubsw mm5, mm6 |
mov eax, [esp + 8] ; Cur |
309 |
|
push ebx |
310 |
|
mov ebx, [esp+4+12] ; Ref1 |
311 |
|
push esi |
312 |
|
mov esi, [esp+8+16] ; Ref2 |
313 |
|
mov edx, [esp+8+20] ; Stride |
314 |
|
pxor mm7, mm7 |
315 |
|
|
316 |
movq [edi + 96], mm0 ; dct[] = mm01 |
COPY_8_TO_16_SUB2_MMX 0 |
317 |
movq [edi + 104], mm1 |
COPY_8_TO_16_SUB2_MMX 1 |
318 |
movq [edi + 112], mm4 |
COPY_8_TO_16_SUB2_MMX 2 |
319 |
movq [edi + 120], mm5 |
COPY_8_TO_16_SUB2_MMX 3 |
320 |
|
|
|
pop ebx |
|
|
pop edi |
|
321 |
pop esi |
pop esi |
322 |
|
pop ebx |
323 |
ret |
ret |
324 |
|
|
|
|
|
325 |
;=========================================================================== |
;=========================================================================== |
326 |
; |
; |
327 |
; void transfer_8to16sub2_xmm(int16_t * const dct, |
; void transfer_8to16sub2_xmm(int16_t * const dct, |
328 |
; uint8_t * const cur, |
; uint8_t * const cur, |
329 |
; const uint8_t * ref1, |
; const uint8_t * ref1, |
330 |
; const uint8_t * ref2, |
; const uint8_t * ref2, |
331 |
; const uint32_t stride); |
; const uint32_t stride) |
332 |
; |
; |
333 |
;=========================================================================== |
;=========================================================================== |
334 |
|
|
335 |
align 16 |
%macro COPY_8_TO_16_SUB2_SSE 1 |
336 |
cglobal transfer_8to16sub2_xmm |
movq mm0, [eax] ; cur |
337 |
transfer_8to16sub2_xmm |
movq mm2, [eax+edx] |
338 |
|
movq mm1, mm0 |
339 |
push edi |
movq mm3, mm2 |
|
push esi |
|
|
push ebx |
|
|
|
|
|
mov edi, [esp + 12 + 4] ; edi = &dct |
|
|
mov esi, [esp + 12 + 8] ; esi = &cur |
|
|
mov ebx, [esp + 12 + 12] ; ebx = &ref1 |
|
|
mov edx, [esp + 12 + 16] ; edx = &ref2 |
|
|
mov eax, [esp + 12 + 20] ; eax = stride |
|
|
|
|
|
pxor mm7, mm7 ; mm7 = 0 |
|
|
shl eax, 1 ; eax = stride<<1 |
|
|
|
|
|
; Row processing |
|
|
; One row at a time |
|
|
movq mm0, [esi + 0] ; mm0 = cur row |
|
|
movq mm2, [ebx + 0] ; mm2 = ref1 row |
|
|
movq mm3, [edx + 0] ; mm3 = ref2 row |
|
|
movq mm1, mm0 ; mm1 = cur row |
|
|
|
|
|
pavgb mm2, mm3 ; mm2 = (ref1 + ref2 + 1)/2 (== avg) |
|
|
punpcklbw mm0, mm7 ; mm0 = cur(3-0) <-> 16bit |
|
|
|
|
|
movq mm3,mm2 ; mm3 = avg |
|
|
punpckhbw mm1, mm7 ; mm1 = cur(7-4) <-> 16bit |
|
|
|
|
|
punpcklbw mm2, mm7 ; mm2 = avg(3-0) <-> 16bit |
|
|
punpckhbw mm3, mm7 ; mm3 = avg(7-4) <-> 16bit |
|
|
|
|
|
psubw mm0, mm2 ; mm0 = cur(3-0) - avg(3-0) |
|
|
psubw mm1, mm3 ; mm1 = cur(7-4) - avg(7-4) |
|
|
|
|
|
movq [edi + 0], mm0 ; dct(3-0) = mm0 |
|
|
movq [edi + 8], mm1 ; dct(7-4) = mm1 |
|
|
|
|
|
; Increment all pointers |
|
|
add edi, eax ; edi = &(next dct row) |
|
|
|
|
|
; Row processing |
|
|
; One row at a time |
|
|
movq mm0, [esi + 8] ; mm0 = cur row |
|
|
movq mm2, [ebx + 8] ; mm2 = ref1 row |
|
|
movq mm3, [edx + 8] ; mm3 = ref2 row |
|
|
movq mm1, mm0 ; mm1 = cur row |
|
|
|
|
|
pavgb mm2, mm3 ; mm2 = (ref1 + ref2 + 1)/2 (== avg) |
|
|
punpcklbw mm0, mm7 ; mm0 = cur(3-0) <-> 16bit |
|
|
|
|
|
movq mm3,mm2 ; mm3 = avg |
|
|
punpckhbw mm1, mm7 ; mm1 = cur(7-4) <-> 16bit |
|
|
|
|
|
punpcklbw mm2, mm7 ; mm2 = avg(3-0) <-> 16bit |
|
|
punpckhbw mm3, mm7 ; mm3 = avg(7-4) <-> 16bit |
|
|
|
|
|
psubw mm0, mm2 ; mm0 = cur(3-0) - avg(3-0) |
|
|
psubw mm1, mm3 ; mm1 = cur(7-4) - avg(7-4) |
|
|
|
|
|
movq [edi + 0], mm0 ; dct(3-0) = mm0 |
|
|
movq [edi + 8], mm1 ; dct(7-4) = mm1 |
|
|
|
|
|
; Increment all pointers |
|
|
add edi, eax ; edi = &(next dct row) |
|
|
|
|
|
; Row processing |
|
|
; One row at a time |
|
|
movq mm0, [esi + 16] ; mm0 = cur row |
|
|
movq mm2, [ebx + 16] ; mm2 = ref1 row |
|
|
movq mm3, [edx + 16] ; mm3 = ref2 row |
|
|
movq mm1, mm0 ; mm1 = cur row |
|
|
|
|
|
pavgb mm2, mm3 ; mm2 = (ref1 + ref2 + 1)/2 (== avg) |
|
|
punpcklbw mm0, mm7 ; mm0 = cur(3-0) <-> 16bit |
|
|
|
|
|
movq mm3,mm2 ; mm3 = avg |
|
|
punpckhbw mm1, mm7 ; mm1 = cur(7-4) <-> 16bit |
|
|
|
|
|
punpcklbw mm2, mm7 ; mm2 = avg(3-0) <-> 16bit |
|
|
punpckhbw mm3, mm7 ; mm3 = avg(7-4) <-> 16bit |
|
|
|
|
|
psubw mm0, mm2 ; mm0 = cur(3-0) - avg(3-0) |
|
|
psubw mm1, mm3 ; mm1 = cur(7-4) - avg(7-4) |
|
|
|
|
|
movq [edi + 0], mm0 ; dct(3-0) = mm0 |
|
|
movq [edi + 8], mm1 ; dct(7-4) = mm1 |
|
|
|
|
|
; Increment all pointers |
|
|
add edi, eax ; edi = &(next dct row) |
|
|
|
|
|
; Row processing |
|
|
; One row at a time |
|
|
movq mm0, [esi + 24] ; mm0 = cur row |
|
|
movq mm2, [ebx + 24] ; mm2 = ref1 row |
|
|
movq mm3, [edx + 24] ; mm3 = ref2 row |
|
|
movq mm1, mm0 ; mm1 = cur row |
|
|
|
|
|
pavgb mm2, mm3 ; mm2 = (ref1 + ref2 + 1)/2 (== avg) |
|
|
punpcklbw mm0, mm7 ; mm0 = cur(3-0) <-> 16bit |
|
|
|
|
|
movq mm3,mm2 ; mm3 = avg |
|
|
punpckhbw mm1, mm7 ; mm1 = cur(7-4) <-> 16bit |
|
|
|
|
|
punpcklbw mm2, mm7 ; mm2 = avg(3-0) <-> 16bit |
|
|
punpckhbw mm3, mm7 ; mm3 = avg(7-4) <-> 16bit |
|
|
|
|
|
psubw mm0, mm2 ; mm0 = cur(3-0) - avg(3-0) |
|
|
psubw mm1, mm3 ; mm1 = cur(7-4) - avg(7-4) |
|
|
|
|
|
movq [edi + 0], mm0 ; dct(3-0) = mm0 |
|
|
movq [edi + 8], mm1 ; dct(7-4) = mm1 |
|
|
|
|
|
; Increment all pointers |
|
|
add edi, eax ; edi = &(next dct row) |
|
|
|
|
|
; Row processing |
|
|
; One row at a time |
|
|
movq mm0, [esi + 32] ; mm0 = cur row |
|
|
movq mm2, [ebx + 32] ; mm2 = ref1 row |
|
|
movq mm3, [edx + 32] ; mm3 = ref2 row |
|
|
movq mm1, mm0 ; mm1 = cur row |
|
|
|
|
|
pavgb mm2, mm3 ; mm2 = (ref1 + ref2 + 1)/2 (== avg) |
|
|
punpcklbw mm0, mm7 ; mm0 = cur(3-0) <-> 16bit |
|
|
|
|
|
movq mm3,mm2 ; mm3 = avg |
|
|
punpckhbw mm1, mm7 ; mm1 = cur(7-4) <-> 16bit |
|
|
|
|
|
punpcklbw mm2, mm7 ; mm2 = avg(3-0) <-> 16bit |
|
|
punpckhbw mm3, mm7 ; mm3 = avg(7-4) <-> 16bit |
|
|
|
|
|
psubw mm0, mm2 ; mm0 = cur(3-0) - avg(3-0) |
|
|
psubw mm1, mm3 ; mm1 = cur(7-4) - avg(7-4) |
|
|
|
|
|
movq [edi + 0], mm0 ; dct(3-0) = mm0 |
|
|
movq [edi + 8], mm1 ; dct(7-4) = mm1 |
|
|
|
|
|
; Increment all pointers |
|
|
add edi, eax ; edi = &(next dct row) |
|
|
|
|
|
; Row processing |
|
|
; One row at a time |
|
|
movq mm0, [esi + 40] ; mm0 = cur row |
|
|
movq mm2, [ebx + 40] ; mm2 = ref1 row |
|
|
movq mm3, [edx + 40] ; mm3 = ref2 row |
|
|
movq mm1, mm0 ; mm1 = cur row |
|
|
|
|
|
pavgb mm2, mm3 ; mm2 = (ref1 + ref2 + 1)/2 (== avg) |
|
|
punpcklbw mm0, mm7 ; mm0 = cur(3-0) <-> 16bit |
|
|
|
|
|
movq mm3,mm2 ; mm3 = avg |
|
|
punpckhbw mm1, mm7 ; mm1 = cur(7-4) <-> 16bit |
|
|
|
|
|
punpcklbw mm2, mm7 ; mm2 = avg(3-0) <-> 16bit |
|
|
punpckhbw mm3, mm7 ; mm3 = avg(7-4) <-> 16bit |
|
|
|
|
|
psubw mm0, mm2 ; mm0 = cur(3-0) - avg(3-0) |
|
|
psubw mm1, mm3 ; mm1 = cur(7-4) - avg(7-4) |
|
|
|
|
|
movq [edi + 0], mm0 ; dct(3-0) = mm0 |
|
|
movq [edi + 8], mm1 ; dct(7-4) = mm1 |
|
|
|
|
|
; Increment all pointers |
|
|
add edi, eax ; edi = &(next dct row) |
|
|
|
|
|
; Row processing |
|
|
; One row at a time |
|
|
movq mm0, [esi + 48] ; mm0 = cur row |
|
|
movq mm2, [ebx + 48] ; mm2 = ref1 row |
|
|
movq mm3, [edx + 48] ; mm3 = ref2 row |
|
|
movq mm1, mm0 ; mm1 = cur row |
|
|
|
|
|
pavgb mm2, mm3 ; mm2 = (ref1 + ref2 + 1)/2 (== avg) |
|
|
punpcklbw mm0, mm7 ; mm0 = cur(3-0) <-> 16bit |
|
|
|
|
|
movq mm3,mm2 ; mm3 = avg |
|
|
punpckhbw mm1, mm7 ; mm1 = cur(7-4) <-> 16bit |
|
|
|
|
|
punpcklbw mm2, mm7 ; mm2 = avg(3-0) <-> 16bit |
|
|
punpckhbw mm3, mm7 ; mm3 = avg(7-4) <-> 16bit |
|
|
|
|
|
psubw mm0, mm2 ; mm0 = cur(3-0) - avg(3-0) |
|
|
psubw mm1, mm3 ; mm1 = cur(7-4) - avg(7-4) |
|
|
|
|
|
movq [edi + 0], mm0 ; dct(3-0) = mm0 |
|
|
movq [edi + 8], mm1 ; dct(7-4) = mm1 |
|
|
|
|
|
; Increment all pointers |
|
|
add edi, eax ; edi = &(next dct row) |
|
|
|
|
|
; Row processing |
|
|
; One row at a time |
|
|
movq mm0, [esi + 56] ; mm0 = cur row |
|
|
movq mm2, [ebx + 56] ; mm2 = ref1 row |
|
|
movq mm3, [edx + 56] ; mm3 = ref2 row |
|
|
movq mm1, mm0 ; mm1 = cur row |
|
|
|
|
|
pavgb mm2, mm3 ; mm2 = (ref1 + ref2 + 1)/2 (== avg) |
|
|
punpcklbw mm0, mm7 ; mm0 = cur(3-0) <-> 16bit |
|
|
|
|
|
movq mm3,mm2 ; mm3 = avg |
|
|
punpckhbw mm1, mm7 ; mm1 = cur(7-4) <-> 16bit |
|
340 |
|
|
341 |
punpcklbw mm2, mm7 ; mm2 = avg(3-0) <-> 16bit |
punpcklbw mm0, mm7 |
342 |
punpckhbw mm3, mm7 ; mm3 = avg(7-4) <-> 16bit |
punpcklbw mm2, mm7 |
343 |
|
movq mm4, [ebx] ; ref1 |
344 |
|
pavgb mm4, [esi] ; ref2 |
345 |
|
punpckhbw mm1, mm7 |
346 |
|
punpckhbw mm3, mm7 |
347 |
|
movq mm5, [ebx+edx] ; ref |
348 |
|
pavgb mm5, [esi+edx] ; ref2 |
349 |
|
|
350 |
psubw mm0, mm2 ; mm0 = cur(3-0) - avg(3-0) |
movq mm6, mm4 |
351 |
psubw mm1, mm3 ; mm1 = cur(7-4) - avg(7-4) |
punpcklbw mm4, mm7 |
352 |
|
punpckhbw mm6, mm7 |
353 |
|
psubsw mm0, mm4 |
354 |
|
psubsw mm1, mm6 |
355 |
|
lea esi,[esi+2*edx] |
356 |
|
movq mm6, mm5 |
357 |
|
punpcklbw mm5, mm7 |
358 |
|
punpckhbw mm6, mm7 |
359 |
|
psubsw mm2, mm5 |
360 |
|
lea eax,[eax+2*edx] |
361 |
|
psubsw mm3, mm6 |
362 |
|
lea ebx,[ebx+2*edx] |
363 |
|
|
364 |
|
movq [ecx+%1*32+ 0], mm0 ; dst |
365 |
|
movq [ecx+%1*32+ 8], mm1 |
366 |
|
movq [ecx+%1*32+16], mm2 |
367 |
|
movq [ecx+%1*32+24], mm3 |
368 |
|
%endmacro |
369 |
|
|
370 |
movq [edi + 0], mm0 ; dct(3-0) = mm0 |
align 16 |
371 |
movq [edi + 8], mm1 ; dct(7-4) = mm1 |
transfer_8to16sub2_xmm: |
372 |
|
mov ecx, [esp + 4] ; Dst |
373 |
|
mov eax, [esp + 8] ; Cur |
374 |
|
push ebx |
375 |
|
mov ebx, [esp+4+12] ; Ref1 |
376 |
|
push esi |
377 |
|
mov esi, [esp+8+16] ; Ref2 |
378 |
|
mov edx, [esp+8+20] ; Stride |
379 |
|
pxor mm7, mm7 |
380 |
|
|
381 |
; Exit |
COPY_8_TO_16_SUB2_SSE 0 |
382 |
|
COPY_8_TO_16_SUB2_SSE 1 |
383 |
|
COPY_8_TO_16_SUB2_SSE 2 |
384 |
|
COPY_8_TO_16_SUB2_SSE 3 |
385 |
|
|
|
pop ebx |
|
386 |
pop esi |
pop esi |
387 |
pop edi |
pop ebx |
|
|
|
388 |
ret |
ret |
389 |
|
|
390 |
;=========================================================================== |
;=========================================================================== |
395 |
; |
; |
396 |
;=========================================================================== |
;=========================================================================== |
397 |
|
|
398 |
align 16 |
%macro COPY_16_TO_8_ADD 1 |
399 |
cglobal transfer_16to8add_mmx |
movq mm0, [ecx] |
400 |
transfer_16to8add_mmx |
movq mm2, [ecx+edx] |
|
|
|
|
push esi |
|
|
push edi |
|
|
|
|
|
mov edi, [esp + 8 + 4] ; dst |
|
|
mov esi, [esp + 8 + 8] ; src |
|
|
mov ecx, [esp + 8 + 12] ; stride |
|
|
|
|
|
pxor mm7, mm7 |
|
|
|
|
|
mov eax, 8 |
|
|
|
|
|
.loop |
|
|
movq mm0, [edi] |
|
401 |
movq mm1, mm0 |
movq mm1, mm0 |
402 |
punpcklbw mm0, mm7 ; mm23 = unpack([dst]) |
movq mm3, mm2 |
403 |
|
punpcklbw mm0, mm7 |
404 |
|
punpcklbw mm2, mm7 |
405 |
punpckhbw mm1, mm7 |
punpckhbw mm1, mm7 |
406 |
|
punpckhbw mm3, mm7 |
407 |
|
paddsw mm0, [eax+%1*32+ 0] |
408 |
|
paddsw mm1, [eax+%1*32+ 8] |
409 |
|
paddsw mm2, [eax+%1*32+16] |
410 |
|
paddsw mm3, [eax+%1*32+24] |
411 |
|
packuswb mm0, mm1 |
412 |
|
movq [ecx], mm0 |
413 |
|
packuswb mm2, mm3 |
414 |
|
movq [ecx+edx], mm2 |
415 |
|
%endmacro |
416 |
|
|
|
movq mm2, [esi] ; mm01 = [src] |
|
|
movq mm3, [esi + 8] |
|
|
|
|
|
paddsw mm0, mm2 ; mm01 += mm23 |
|
|
paddsw mm1, mm3 |
|
|
|
|
|
packuswb mm0, mm1 ; [dst] = pack(mm01) |
|
|
movq [edi], mm0 |
|
|
|
|
|
add esi, 16 |
|
|
add edi, ecx |
|
|
dec eax |
|
|
jnz .loop |
|
417 |
|
|
418 |
pop edi |
align 16 |
419 |
pop esi |
transfer_16to8add_mmx: |
420 |
|
mov ecx, [esp+ 4] ; Dst |
421 |
|
mov eax, [esp+ 8] ; Src |
422 |
|
mov edx, [esp+12] ; Stride |
423 |
|
pxor mm7, mm7 |
424 |
|
|
425 |
|
COPY_16_TO_8_ADD 0 |
426 |
|
lea ecx,[ecx+2*edx] |
427 |
|
COPY_16_TO_8_ADD 1 |
428 |
|
lea ecx,[ecx+2*edx] |
429 |
|
COPY_16_TO_8_ADD 2 |
430 |
|
lea ecx,[ecx+2*edx] |
431 |
|
COPY_16_TO_8_ADD 3 |
432 |
ret |
ret |
433 |
|
|
|
|
|
434 |
;=========================================================================== |
;=========================================================================== |
435 |
; |
; |
436 |
; void transfer8x8_copy_mmx(uint8_t * const dst, |
; void transfer8x8_copy_mmx(uint8_t * const dst, |
440 |
; |
; |
441 |
;=========================================================================== |
;=========================================================================== |
442 |
|
|
443 |
align 16 |
%macro COPY_8_TO_8 0 |
444 |
cglobal transfer8x8_copy_mmx |
movq mm0, [eax] |
445 |
transfer8x8_copy_mmx |
movq mm1, [eax+edx] |
446 |
push esi |
movq [ecx], mm0 |
447 |
push edi |
lea eax,[eax+2*edx] |
448 |
|
movq [ecx+edx], mm1 |
449 |
mov edi, [esp + 8 + 4] ; dst [out] |
%endmacro |
|
mov esi, [esp + 8 + 8] ; src [in] |
|
|
mov eax, [esp + 8 + 12] ; stride [in] |
|
|
|
|
|
movq mm0, [esi] |
|
|
movq mm1, [esi+eax] |
|
|
movq [edi], mm0 |
|
|
movq [edi+eax], mm1 |
|
|
|
|
|
add esi, eax |
|
|
add edi, eax |
|
|
add esi, eax |
|
|
add edi, eax |
|
|
|
|
|
movq mm0, [esi] |
|
|
movq mm1, [esi+eax] |
|
|
movq [edi], mm0 |
|
|
movq [edi+eax], mm1 |
|
|
|
|
|
add esi, eax |
|
|
add edi, eax |
|
|
add esi, eax |
|
|
add edi, eax |
|
|
|
|
|
movq mm0, [esi] |
|
|
movq mm1, [esi+eax] |
|
|
movq [edi], mm0 |
|
|
movq [edi+eax], mm1 |
|
|
|
|
|
add esi, eax |
|
|
add edi, eax |
|
|
add esi, eax |
|
|
add edi, eax |
|
|
|
|
|
movq mm0, [esi] |
|
|
movq mm1, [esi+eax] |
|
|
movq [edi], mm0 |
|
|
movq [edi+eax], mm1 |
|
|
|
|
|
add esi, eax |
|
|
add edi, eax |
|
|
add esi, eax |
|
|
add edi, eax |
|
|
|
|
|
pop edi |
|
|
pop esi |
|
450 |
|
|
451 |
|
align 16 |
452 |
|
transfer8x8_copy_mmx: |
453 |
|
mov ecx, [esp+ 4] ; Dst |
454 |
|
mov eax, [esp+ 8] ; Src |
455 |
|
mov edx, [esp+12] ; Stride |
456 |
|
|
457 |
|
COPY_8_TO_8 |
458 |
|
lea ecx,[ecx+2*edx] |
459 |
|
COPY_8_TO_8 |
460 |
|
lea ecx,[ecx+2*edx] |
461 |
|
COPY_8_TO_8 |
462 |
|
lea ecx,[ecx+2*edx] |
463 |
|
COPY_8_TO_8 |
464 |
ret |
ret |