1 |
|
;/***************************************************************************** |
2 |
|
; * |
3 |
|
; * XVID MPEG-4 VIDEO CODEC |
4 |
|
; * - MMX/SSE forward discrete cosine transform - |
5 |
|
; * Copyright(C) 2002 Pascal Massimino <skal@planet-d.net> |
6 |
|
; * |
7 |
|
; * This file is part of XviD, a free MPEG-4 video encoder/decoder |
8 |
|
; * |
9 |
|
; * XviD is free software; you can redistribute it and/or modify it |
10 |
|
; * under the terms of the GNU General Public License as published by |
11 |
|
; * the Free Software Foundation; either version 2 of the License, or |
12 |
|
; * (at your option) any later version. |
13 |
|
; * |
14 |
|
; * This program is distributed in the hope that it will be useful, |
15 |
|
; * but WITHOUT ANY WARRANTY; without even the implied warranty of |
16 |
|
; * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
17 |
|
; * GNU General Public License for more details. |
18 |
|
; * |
19 |
|
; * You should have received a copy of the GNU General Public License |
20 |
|
; * along with this program; if not, write to the Free Software |
21 |
|
; * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
22 |
|
; * |
23 |
|
; * Under section 8 of the GNU General Public License, the copyright |
24 |
|
; * holders of XVID explicitly forbid distribution in the following |
25 |
|
; * countries: |
26 |
|
; * |
27 |
|
; * - Japan |
28 |
|
; * - United States of America |
29 |
|
; * |
30 |
|
; * Linking XviD statically or dynamically with other modules is making a |
31 |
|
; * combined work based on XviD. Thus, the terms and conditions of the |
32 |
|
; * GNU General Public License cover the whole combination. |
33 |
|
; * |
34 |
|
; * As a special exception, the copyright holders of XviD give you |
35 |
|
; * permission to link XviD with independent modules that communicate with |
36 |
|
; * XviD solely through the VFW1.1 and DShow interfaces, regardless of the |
37 |
|
; * license terms of these independent modules, and to copy and distribute |
38 |
|
; * the resulting combined work under terms of your choice, provided that |
39 |
|
; * every copy of the combined work is accompanied by a complete copy of |
40 |
|
; * the source code of XviD (the version of XviD used to produce the |
41 |
|
; * combined work), being distributed under the terms of the GNU General |
42 |
|
; * Public License plus this exception. An independent module is a module |
43 |
|
; * which is not derived from or based on XviD. |
44 |
|
; * |
45 |
|
; * Note that people who make modified versions of XviD are not obligated |
46 |
|
; * to grant this special exception for their modified versions; it is |
47 |
|
; * their choice whether to do so. The GNU General Public License gives |
48 |
|
; * permission to release a modified version without this exception; this |
49 |
|
; * exception also makes it possible to release a modified version which |
50 |
|
; * carries forward this exception. |
51 |
|
; * |
52 |
|
; * $Id$ |
53 |
|
; * |
54 |
|
; *************************************************************************/ |
55 |
|
|
56 |
|
;/************************************************************************** |
57 |
|
; * |
58 |
|
; * History: |
59 |
|
; * |
60 |
|
; * 01.10.2002 creation - Skal - |
61 |
|
; * |
62 |
|
; *************************************************************************/ |
63 |
|
|
64 |
|
bits 32 |
65 |
|
|
66 |
|
%macro cglobal 1 |
67 |
|
%ifdef PREFIX |
68 |
|
global _%1 |
69 |
|
%define %1 _%1 |
70 |
|
%else |
71 |
|
global %1 |
72 |
|
%endif |
73 |
|
%endmacro |
74 |
|
|
75 |
|
cglobal xvid_fdct_sse |
76 |
|
cglobal xvid_fdct_mmx |
77 |
|
|
78 |
|
;////////////////////////////////////////////////////////////////////// |
79 |
|
; |
80 |
|
; Vertical pass is an implementation of the scheme: |
81 |
|
; Loeffler C., Ligtenberg A., and Moschytz C.S.: |
82 |
|
; Practical Fast 1D DCT Algorithm with Eleven Multiplications, |
83 |
|
; Proc. ICASSP 1989, 988-991. |
84 |
|
; |
85 |
|
; Horizontal pass is a double 4x4 vector/matrix multiplication, |
86 |
|
; (see also Intel's Application Note 922: |
87 |
|
; http://developer.intel.com/vtune/cbts/strmsimd/922down.htm |
88 |
|
; Copyright (C) 1999 Intel Corporation) |
89 |
|
; |
90 |
|
; Notes: |
91 |
|
; * tan(3pi/16) is greater than 0.5, and would use the |
92 |
|
; sign bit when turned into 16b fixed-point precision. So, |
93 |
|
; we use the trick: x*tan3 = x*(tan3-1)+x |
94 |
|
; |
95 |
|
; * There's only one SSE-specific instruction (pshufw). |
96 |
|
; Porting to SSE2 also seems straightforward. |
97 |
|
; |
98 |
|
; * There's still 1 or 2 ticks to save in fLLM_PASS, but |
99 |
|
; I prefer having a readable code, instead of a tightly |
100 |
|
; scheduled one... |
101 |
|
; |
102 |
|
; * Quantization stage (as well as pre-transposition for the |
103 |
|
; idct way back) can be included in the fTab* constants |
104 |
|
; (with induced loss of precision, somehow) |
105 |
|
; |
106 |
|
; * Some more details at: http://skal.planet-d.net/coding/dct.html |
107 |
|
; |
108 |
|
;////////////////////////////////////////////////////////////////////// |
109 |
|
; |
110 |
|
; idct-like IEEE errors: |
111 |
|
; |
112 |
|
; ========================= |
113 |
|
; Peak error: 1.0000 |
114 |
|
; Peak MSE: 0.0365 |
115 |
|
; Overall MSE: 0.0201 |
116 |
|
; Peak ME: 0.0265 |
117 |
|
; Overall ME: 0.0006 |
118 |
|
; |
119 |
|
; == Mean square errors == |
120 |
|
; 0.000 0.001 0.001 0.002 0.000 0.002 0.001 0.000 [0.001] |
121 |
|
; 0.035 0.029 0.032 0.032 0.031 0.032 0.034 0.035 [0.032] |
122 |
|
; 0.026 0.028 0.027 0.027 0.025 0.028 0.028 0.025 [0.027] |
123 |
|
; 0.037 0.032 0.031 0.030 0.028 0.029 0.026 0.031 [0.030] |
124 |
|
; 0.000 0.001 0.001 0.002 0.000 0.002 0.001 0.001 [0.001] |
125 |
|
; 0.025 0.024 0.022 0.022 0.022 0.022 0.023 0.023 [0.023] |
126 |
|
; 0.026 0.028 0.025 0.028 0.030 0.025 0.026 0.027 [0.027] |
127 |
|
; 0.021 0.020 0.020 0.022 0.020 0.022 0.017 0.019 [0.020] |
128 |
|
; |
129 |
|
; == Abs Mean errors == |
130 |
|
; 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 [0.000] |
131 |
|
; 0.020 0.001 0.003 0.003 0.000 0.004 0.002 0.003 [0.002] |
132 |
|
; 0.000 0.001 0.001 0.001 0.001 0.004 0.000 0.000 [0.000] |
133 |
|
; 0.027 0.001 0.000 0.002 0.002 0.002 0.001 0.000 [0.003] |
134 |
|
; 0.000 0.000 0.000 0.000 0.000 0.001 0.000 0.001 [-0.000] |
135 |
|
; 0.001 0.003 0.001 0.001 0.002 0.001 0.000 0.000 [-0.000] |
136 |
|
; 0.000 0.002 0.002 0.001 0.001 0.002 0.001 0.000 [-0.000] |
137 |
|
; 0.000 0.002 0.001 0.002 0.001 0.002 0.001 0.001 [-0.000] |
138 |
|
; |
139 |
|
;////////////////////////////////////////////////////////////////////// |
140 |
|
|
141 |
|
section .data |
142 |
|
|
143 |
|
align 16 |
144 |
|
tan1: dw 0x32ec,0x32ec,0x32ec,0x32ec ; tan( pi/16) |
145 |
|
tan2: dw 0x6a0a,0x6a0a,0x6a0a,0x6a0a ; tan(2pi/16) (=sqrt(2)-1) |
146 |
|
tan3: dw 0xab0e,0xab0e,0xab0e,0xab0e ; tan(3pi/16)-1 |
147 |
|
sqrt2: dw 0x5a82,0x5a82,0x5a82,0x5a82 ; 0.5/sqrt(2) |
148 |
|
|
149 |
|
;////////////////////////////////////////////////////////////////////// |
150 |
|
|
151 |
|
align 16 |
152 |
|
fTab1: |
153 |
|
dw 0x4000, 0x4000, 0x58c5, 0x4b42, |
154 |
|
dw 0x4000, 0x4000, 0x3249, 0x11a8, |
155 |
|
dw 0x539f, 0x22a3, 0x4b42, 0xee58, |
156 |
|
dw 0xdd5d, 0xac61, 0xa73b, 0xcdb7, |
157 |
|
dw 0x4000, 0xc000, 0x3249, 0xa73b, |
158 |
|
dw 0xc000, 0x4000, 0x11a8, 0x4b42, |
159 |
|
dw 0x22a3, 0xac61, 0x11a8, 0xcdb7, |
160 |
|
dw 0x539f, 0xdd5d, 0x4b42, 0xa73b |
161 |
|
|
162 |
|
fTab2: |
163 |
|
dw 0x58c5, 0x58c5, 0x7b21, 0x6862, |
164 |
|
dw 0x58c5, 0x58c5, 0x45bf, 0x187e, |
165 |
|
dw 0x73fc, 0x300b, 0x6862, 0xe782, |
166 |
|
dw 0xcff5, 0x8c04, 0x84df, 0xba41, |
167 |
|
dw 0x58c5, 0xa73b, 0x45bf, 0x84df, |
168 |
|
dw 0xa73b, 0x58c5, 0x187e, 0x6862, |
169 |
|
dw 0x300b, 0x8c04, 0x187e, 0xba41, |
170 |
|
dw 0x73fc, 0xcff5, 0x6862, 0x84df |
171 |
|
|
172 |
|
fTab3: |
173 |
|
dw 0x539f, 0x539f, 0x73fc, 0x6254, |
174 |
|
dw 0x539f, 0x539f, 0x41b3, 0x1712, |
175 |
|
dw 0x6d41, 0x2d41, 0x6254, 0xe8ee, |
176 |
|
dw 0xd2bf, 0x92bf, 0x8c04, 0xbe4d, |
177 |
|
dw 0x539f, 0xac61, 0x41b3, 0x8c04, |
178 |
|
dw 0xac61, 0x539f, 0x1712, 0x6254, |
179 |
|
dw 0x2d41, 0x92bf, 0x1712, 0xbe4d, |
180 |
|
dw 0x6d41, 0xd2bf, 0x6254, 0x8c04 |
181 |
|
|
182 |
|
fTab4: |
183 |
|
dw 0x4b42, 0x4b42, 0x6862, 0x587e, |
184 |
|
dw 0x4b42, 0x4b42, 0x3b21, 0x14c3, |
185 |
|
dw 0x6254, 0x28ba, 0x587e, 0xeb3d, |
186 |
|
dw 0xd746, 0x9dac, 0x979e, 0xc4df, |
187 |
|
dw 0x4b42, 0xb4be, 0x3b21, 0x979e, |
188 |
|
dw 0xb4be, 0x4b42, 0x14c3, 0x587e, |
189 |
|
dw 0x28ba, 0x9dac, 0x14c3, 0xc4df, |
190 |
|
dw 0x6254, 0xd746, 0x587e, 0x979e |
191 |
|
|
192 |
|
align 16 |
193 |
|
Fdct_Rnd0: dw 6,8,8,8 |
194 |
|
Fdct_Rnd1: dw 8,8,8,8 |
195 |
|
Fdct_Rnd2: dw 10,8,8,8 |
196 |
|
MMX_One: dw 1,1,1,1 |
197 |
|
|
198 |
|
;////////////////////////////////////////////////////////////////////// |
199 |
|
|
200 |
|
section .text |
201 |
|
|
202 |
|
;////////////////////////////////////////////////////////////////////// |
203 |
|
;// FDCT LLM vertical pass (~39c) |
204 |
|
;////////////////////////////////////////////////////////////////////// |
205 |
|
|
206 |
|
%macro fLLM_PASS 2 ; %1: src/dst, %2:Shift |
207 |
|
|
208 |
|
movq mm0, [%1+0*16] ; In0 |
209 |
|
movq mm2, [%1+2*16] ; In2 |
210 |
|
movq mm3, mm0 |
211 |
|
movq mm4, mm2 |
212 |
|
movq mm7, [%1+7*16] ; In7 |
213 |
|
movq mm5, [%1+5*16] ; In5 |
214 |
|
|
215 |
|
psubsw mm0, mm7 ; t7 = In0-In7 |
216 |
|
paddsw mm7, mm3 ; t0 = In0+In7 |
217 |
|
psubsw mm2, mm5 ; t5 = In2-In5 |
218 |
|
paddsw mm5, mm4 ; t2 = In2+In5 |
219 |
|
|
220 |
|
movq mm3, [%1+3*16] ; In3 |
221 |
|
movq mm4, [%1+4*16] ; In4 |
222 |
|
movq mm1, mm3 |
223 |
|
psubsw mm3, mm4 ; t4 = In3-In4 |
224 |
|
paddsw mm4, mm1 ; t3 = In3+In4 |
225 |
|
movq mm6, [%1+6*16] ; In6 |
226 |
|
movq mm1, [%1+1*16] ; In1 |
227 |
|
psubsw mm1, mm6 ; t6 = In1-In6 |
228 |
|
paddsw mm6, [%1+1*16] ; t1 = In1+In6 |
229 |
|
|
230 |
|
psubsw mm7, mm4 ; tm03 = t0-t3 |
231 |
|
psubsw mm6, mm5 ; tm12 = t1-t2 |
232 |
|
paddsw mm4, mm4 ; 2.t3 |
233 |
|
paddsw mm5, mm5 ; 2.t2 |
234 |
|
paddsw mm4, mm7 ; tp03 = t0+t3 |
235 |
|
paddsw mm5, mm6 ; tp12 = t1+t2 |
236 |
|
|
237 |
|
psllw mm2, %2+1 ; shift t5 (shift +1 to.. |
238 |
|
psllw mm1, %2+1 ; shift t6 ..compensate cos4/2) |
239 |
|
psllw mm4, %2 ; shift t3 |
240 |
|
psllw mm5, %2 ; shift t2 |
241 |
|
psllw mm7, %2 ; shift t0 |
242 |
|
psllw mm6, %2 ; shift t1 |
243 |
|
psllw mm3, %2 ; shift t4 |
244 |
|
psllw mm0, %2 ; shift t7 |
245 |
|
|
246 |
|
psubsw mm4, mm5 ; out4 = tp03-tp12 |
247 |
|
psubsw mm1, mm2 ; mm1: t6-t5 |
248 |
|
paddsw mm5, mm5 |
249 |
|
paddsw mm2, mm2 |
250 |
|
paddsw mm5, mm4 ; out0 = tp03+tp12 |
251 |
|
movq [%1+4*16], mm4 ; => out4 |
252 |
|
paddsw mm2, mm1 ; mm2: t6+t5 |
253 |
|
movq [%1+0*16], mm5 ; => out0 |
254 |
|
|
255 |
|
movq mm4, [tan2] ; mm4 <= tan2 |
256 |
|
pmulhw mm4, mm7 ; tm03*tan2 |
257 |
|
movq mm5, [tan2] ; mm5 <= tan2 |
258 |
|
psubsw mm4, mm6 ; out6 = tm03*tan2 - tm12 |
259 |
|
pmulhw mm5, mm6 ; tm12*tan2 |
260 |
|
paddsw mm5, mm7 ; out2 = tm12*tan2 + tm03 |
261 |
|
|
262 |
|
movq mm6, [sqrt2] |
263 |
|
movq mm7, [MMX_One] |
264 |
|
|
265 |
|
pmulhw mm2, mm6 ; mm2: tp65 = (t6 + t5)*cos4 |
266 |
|
por mm5, mm7 ; correct out2 |
267 |
|
por mm4, mm7 ; correct out6 |
268 |
|
pmulhw mm1, mm6 ; mm1: tm65 = (t6 - t5)*cos4 |
269 |
|
por mm2, mm7 ; correct tp65 |
270 |
|
|
271 |
|
movq [%1+2*16], mm5 ; => out2 |
272 |
|
movq mm5, mm3 ; save t4 |
273 |
|
movq [%1+6*16], mm4 ; => out6 |
274 |
|
movq mm4, mm0 ; save t7 |
275 |
|
|
276 |
|
psubsw mm3, mm1 ; mm3: tm465 = t4 - tm65 |
277 |
|
psubsw mm0, mm2 ; mm0: tm765 = t7 - tp65 |
278 |
|
paddsw mm2, mm4 ; mm2: tp765 = t7 + tp65 |
279 |
|
paddsw mm1, mm5 ; mm1: tp465 = t4 + tm65 |
280 |
|
|
281 |
|
movq mm4, [tan3] ; tan3 - 1 |
282 |
|
movq mm5, [tan1] ; tan1 |
283 |
|
|
284 |
|
movq mm7, mm3 ; save tm465 |
285 |
|
pmulhw mm3, mm4 ; tm465*(tan3-1) |
286 |
|
movq mm6, mm1 ; save tp465 |
287 |
|
pmulhw mm1, mm5 ; tp465*tan1 |
288 |
|
|
289 |
|
paddsw mm3, mm7 ; tm465*tan3 |
290 |
|
pmulhw mm4, mm0 ; tm765*(tan3-1) |
291 |
|
paddsw mm4, mm0 ; tm765*tan3 |
292 |
|
pmulhw mm5, mm2 ; tp765*tan1 |
293 |
|
|
294 |
|
paddsw mm1, mm2 ; out1 = tp765 + tp465*tan1 |
295 |
|
psubsw mm0, mm3 ; out3 = tm765 - tm465*tan3 |
296 |
|
paddsw mm7, mm4 ; out5 = tm465 + tm765*tan3 |
297 |
|
psubsw mm5, mm6 ; out7 =-tp465 + tp765*tan1 |
298 |
|
|
299 |
|
movq [%1+1*16], mm1 ; => out1 |
300 |
|
movq [%1+3*16], mm0 ; => out3 |
301 |
|
movq [%1+5*16], mm7 ; => out5 |
302 |
|
movq [%1+7*16], mm5 ; => out7 |
303 |
|
|
304 |
|
%endmacro |
305 |
|
|
306 |
|
;////////////////////////////////////////////////////////////////////// |
307 |
|
;// fMTX_MULT (~20c) |
308 |
|
;////////////////////////////////////////////////////////////////////// |
309 |
|
|
310 |
|
%macro fMTX_MULT 4 ; %1=src, %2 = Coeffs, %3/%4=rounders |
311 |
|
movq mm0, [ecx+%1*16+0] ; mm0 = [0123] |
312 |
|
|
313 |
|
; the 'pshufw' below is the only SSE instruction. |
314 |
|
; For MMX-only version, it should be emulated with |
315 |
|
; some 'punpck' soup... |
316 |
|
|
317 |
|
pshufw mm1, [ecx+%1*16+8], 00011011b ; mm1 = [7654] |
318 |
|
movq mm7, mm0 |
319 |
|
|
320 |
|
paddsw mm0, mm1 ; mm0 = [a0 a1 a2 a3] |
321 |
|
psubsw mm7, mm1 ; mm7 = [b0 b1 b2 b3] |
322 |
|
|
323 |
|
movq mm1, mm0 |
324 |
|
punpckldq mm0, mm7 ; mm0 = [a0 a1 b0 b1] |
325 |
|
punpckhdq mm1, mm7 ; mm1 = [b2 b3 a2 a3] |
326 |
|
|
327 |
|
movq mm2, qword [%2+ 0] ; [ M00 M01 M16 M17] |
328 |
|
movq mm3, qword [%2+ 8] ; [ M02 M03 M18 M19] |
329 |
|
pmaddwd mm2, mm0 ; [a0.M00+a1.M01 | b0.M16+b1.M17] |
330 |
|
movq mm4, qword [%2+16] ; [ M04 M05 M20 M21] |
331 |
|
pmaddwd mm3, mm1 ; [a2.M02+a3.M03 | b2.M18+b3.M19] |
332 |
|
movq mm5, qword [%2+24] ; [ M06 M07 M22 M23] |
333 |
|
pmaddwd mm4, mm0 ; [a0.M04+a1.M05 | b0.M20+b1.M21] |
334 |
|
movq mm6, qword [%2+32] ; [ M08 M09 M24 M25] |
335 |
|
pmaddwd mm5, mm1 ; [a2.M06+a3.M07 | b2.M22+b3.M23] |
336 |
|
movq mm7, qword [%2+40] ; [ M10 M11 M26 M27] |
337 |
|
pmaddwd mm6, mm0 ; [a0.M08+a1.M09 | b0.M24+b1.M25] |
338 |
|
paddd mm2, mm3 ; [ out0 | out1 ] |
339 |
|
pmaddwd mm7, mm1 ; [a0.M10+a1.M11 | b0.M26+b1.M27] |
340 |
|
psrad mm2, 16 |
341 |
|
pmaddwd mm0, qword [%2+48] ; [a0.M12+a1.M13 | b0.M28+b1.M29] |
342 |
|
paddd mm4, mm5 ; [ out2 | out3 ] |
343 |
|
pmaddwd mm1, qword [%2+56] ; [a0.M14+a1.M15 | b0.M30+b1.M31] |
344 |
|
psrad mm4, 16 |
345 |
|
|
346 |
|
paddd mm6, mm7 ; [ out4 | out5 ] |
347 |
|
psrad mm6, 16 |
348 |
|
paddd mm0, mm1 ; [ out6 | out7 ] |
349 |
|
psrad mm0, 16 |
350 |
|
|
351 |
|
packssdw mm2, mm4 ; [ out0|out1|out2|out3 ] |
352 |
|
paddsw mm2, [%3] ; Round |
353 |
|
packssdw mm6, mm0 ; [ out4|out5|out6|out7 ] |
354 |
|
paddsw mm6, [%4] ; Round |
355 |
|
|
356 |
|
psraw mm2, 4 ; => [-2048, 2047] |
357 |
|
psraw mm6, 4 |
358 |
|
|
359 |
|
movq [ecx+%1*16+0], mm2 |
360 |
|
movq [ecx+%1*16+8], mm6 |
361 |
|
%endmacro |
362 |
|
|
363 |
|
align 16 |
364 |
|
xvid_fdct_sse: ; ~240c |
365 |
|
mov ecx, [esp+4] |
366 |
|
|
367 |
|
fLLM_PASS ecx+0, 3 |
368 |
|
fLLM_PASS ecx+8, 3 |
369 |
|
fMTX_MULT 0, fTab1, Fdct_Rnd0, Fdct_Rnd0 |
370 |
|
fMTX_MULT 1, fTab2, Fdct_Rnd2, Fdct_Rnd1 |
371 |
|
fMTX_MULT 2, fTab3, Fdct_Rnd1, Fdct_Rnd1 |
372 |
|
fMTX_MULT 3, fTab4, Fdct_Rnd1, Fdct_Rnd1 |
373 |
|
fMTX_MULT 4, fTab1, Fdct_Rnd0, Fdct_Rnd0 |
374 |
|
fMTX_MULT 5, fTab4, Fdct_Rnd1, Fdct_Rnd1 |
375 |
|
fMTX_MULT 6, fTab3, Fdct_Rnd1, Fdct_Rnd1 |
376 |
|
fMTX_MULT 7, fTab2, Fdct_Rnd1, Fdct_Rnd1 |
377 |
|
|
378 |
|
ret |
379 |
|
|
380 |
|
|
381 |
|
;////////////////////////////////////////////////////////////////////// |
382 |
|
;// fMTX_MULT_MMX (~26c) |
383 |
|
;////////////////////////////////////////////////////////////////////// |
384 |
|
|
385 |
|
%macro fMTX_MULT_MMX 4 ; %1=src, %2 = Coeffs, %3/%4=rounders |
386 |
|
|
387 |
|
; MMX-only version (no 'pshufw'. ~10% overall slower than SSE) |
388 |
|
|
389 |
|
movd mm1, [ecx+%1*16+8+4] ; [67..] |
390 |
|
movq mm0, [ecx+%1*16+0] ; mm0 = [0123] |
391 |
|
movq mm7, mm0 |
392 |
|
punpcklwd mm1, [ecx+%1*16+8] ; [6475] |
393 |
|
movq mm2, mm1 |
394 |
|
psrlq mm1, 32 ; [75..] |
395 |
|
punpcklwd mm1,mm2 ; [7654] |
396 |
|
|
397 |
|
paddsw mm0, mm1 ; mm0 = [a0 a1 a2 a3] |
398 |
|
psubsw mm7, mm1 ; mm7 = [b0 b1 b2 b3] |
399 |
|
|
400 |
|
movq mm1, mm0 |
401 |
|
punpckldq mm0, mm7 ; mm0 = [a0 a1 b0 b1] |
402 |
|
punpckhdq mm1, mm7 ; mm1 = [b2 b3 a2 a3] |
403 |
|
|
404 |
|
movq mm2, qword [%2+ 0] ; [ M00 M01 M16 M17] |
405 |
|
movq mm3, qword [%2+ 8] ; [ M02 M03 M18 M19] |
406 |
|
pmaddwd mm2, mm0 ; [a0.M00+a1.M01 | b0.M16+b1.M17] |
407 |
|
movq mm4, qword [%2+16] ; [ M04 M05 M20 M21] |
408 |
|
pmaddwd mm3, mm1 ; [a2.M02+a3.M03 | b2.M18+b3.M19] |
409 |
|
movq mm5, qword [%2+24] ; [ M06 M07 M22 M23] |
410 |
|
pmaddwd mm4, mm0 ; [a0.M04+a1.M05 | b0.M20+b1.M21] |
411 |
|
movq mm6, qword [%2+32] ; [ M08 M09 M24 M25] |
412 |
|
pmaddwd mm5, mm1 ; [a2.M06+a3.M07 | b2.M22+b3.M23] |
413 |
|
movq mm7, qword [%2+40] ; [ M10 M11 M26 M27] |
414 |
|
pmaddwd mm6, mm0 ; [a0.M08+a1.M09 | b0.M24+b1.M25] |
415 |
|
paddd mm2, mm3 ; [ out0 | out1 ] |
416 |
|
pmaddwd mm7, mm1 ; [a0.M10+a1.M11 | b0.M26+b1.M27] |
417 |
|
psrad mm2, 16 |
418 |
|
pmaddwd mm0, qword [%2+48] ; [a0.M12+a1.M13 | b0.M28+b1.M29] |
419 |
|
paddd mm4, mm5 ; [ out2 | out3 ] |
420 |
|
pmaddwd mm1, qword [%2+56] ; [a0.M14+a1.M15 | b0.M30+b1.M31] |
421 |
|
psrad mm4, 16 |
422 |
|
|
423 |
|
paddd mm6, mm7 ; [ out4 | out5 ] |
424 |
|
psrad mm6, 16 |
425 |
|
paddd mm0, mm1 ; [ out6 | out7 ] |
426 |
|
psrad mm0, 16 |
427 |
|
|
428 |
|
packssdw mm2, mm4 ; [ out0|out1|out2|out3 ] |
429 |
|
paddsw mm2, [%3] ; Round |
430 |
|
packssdw mm6, mm0 ; [ out4|out5|out6|out7 ] |
431 |
|
paddsw mm6, [%4] ; Round |
432 |
|
|
433 |
|
psraw mm2, 4 ; => [-2048, 2047] |
434 |
|
psraw mm6, 4 |
435 |
|
|
436 |
|
movq [ecx+%1*16+0], mm2 |
437 |
|
movq [ecx+%1*16+8], mm6 |
438 |
|
%endmacro |
439 |
|
|
440 |
|
align 16 |
441 |
|
xvid_fdct_mmx: ; ~269c |
442 |
|
mov ecx, [esp+4] |
443 |
|
|
444 |
|
fLLM_PASS ecx+0, 3 |
445 |
|
fLLM_PASS ecx+8, 3 |
446 |
|
fMTX_MULT_MMX 0, fTab1, Fdct_Rnd0, Fdct_Rnd0 |
447 |
|
fMTX_MULT_MMX 1, fTab2, Fdct_Rnd2, Fdct_Rnd1 |
448 |
|
fMTX_MULT_MMX 2, fTab3, Fdct_Rnd1, Fdct_Rnd1 |
449 |
|
fMTX_MULT_MMX 3, fTab4, Fdct_Rnd1, Fdct_Rnd1 |
450 |
|
fMTX_MULT_MMX 4, fTab1, Fdct_Rnd0, Fdct_Rnd0 |
451 |
|
fMTX_MULT_MMX 5, fTab4, Fdct_Rnd1, Fdct_Rnd1 |
452 |
|
fMTX_MULT_MMX 6, fTab3, Fdct_Rnd1, Fdct_Rnd1 |
453 |
|
fMTX_MULT_MMX 7, fTab2, Fdct_Rnd1, Fdct_Rnd1 |
454 |
|
|
455 |
|
ret |
456 |
|
|
457 |
|
;////////////////////////////////////////////////////////////////////// |