1 |
|
;/**************************************************************************** |
2 |
|
; * |
3 |
|
; * XVID MPEG-4 VIDEO CODEC |
4 |
|
; * - MMX and XMM forward discrete cosine transform - |
5 |
|
; * |
6 |
|
; * Copyright(C) 2002 Pascal Massimino <skal@planet-d.net> |
7 |
|
; * |
8 |
|
; * This program is free software; you can redistribute it and/or modify it |
9 |
|
; * under the terms of the GNU General Public License as published by |
10 |
|
; * the Free Software Foundation; either version 2 of the License, or |
11 |
|
; * (at your option) any later version. |
12 |
|
; * |
13 |
|
; * This program is distributed in the hope that it will be useful, |
14 |
|
; * but WITHOUT ANY WARRANTY; without even the implied warranty of |
15 |
|
; * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
16 |
|
; * GNU General Public License for more details. |
17 |
|
; * |
18 |
|
; * You should have received a copy of the GNU General Public License |
19 |
|
; * along with this program; if not, write to the Free Software |
20 |
|
; * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
21 |
|
; * |
22 |
|
; * $Id$ |
23 |
|
; * |
24 |
|
; ***************************************************************************/ |
25 |
|
|
26 |
|
BITS 32 |
27 |
|
|
28 |
|
%macro cglobal 1 |
29 |
|
%ifdef PREFIX |
30 |
|
global _%1 |
31 |
|
%define %1 _%1 |
32 |
|
%else |
33 |
|
global %1 |
34 |
|
%endif |
35 |
|
%endmacro |
36 |
|
|
37 |
|
;;; Define this if you want an unrolled version of the code |
38 |
|
%define UNROLLED_LOOP |
39 |
|
|
40 |
|
;============================================================================= |
41 |
|
; |
42 |
|
; Vertical pass is an implementation of the scheme: |
43 |
|
; Loeffler C., Ligtenberg A., and Moschytz C.S.: |
44 |
|
; Practical Fast 1D DCT Algorithm with Eleven Multiplications, |
45 |
|
; Proc. ICASSP 1989, 988-991. |
46 |
|
; |
47 |
|
; Horizontal pass is a double 4x4 vector/matrix multiplication, |
48 |
|
; (see also Intel's Application Note 922: |
49 |
|
; http://developer.intel.com/vtune/cbts/strmsimd/922down.htm |
50 |
|
; Copyright (C) 1999 Intel Corporation) |
51 |
|
; |
52 |
|
; Notes: |
53 |
|
; * tan(3pi/16) is greater than 0.5, and would use the |
54 |
|
; sign bit when turned into 16b fixed-point precision. So, |
55 |
|
; we use the trick: x*tan3 = x*(tan3-1)+x |
56 |
|
; |
57 |
|
; * There's only one SSE-specific instruction (pshufw). |
58 |
|
; Porting to SSE2 also seems straightforward. |
59 |
|
; |
60 |
|
; * There's still 1 or 2 ticks to save in fLLM_PASS, but |
61 |
|
; I prefer having a readable code, instead of a tightly |
62 |
|
; scheduled one... |
63 |
|
; |
64 |
|
; * Quantization stage (as well as pre-transposition for the |
65 |
|
; idct way back) can be included in the fTab* constants |
66 |
|
; (with induced loss of precision, somehow) |
67 |
|
; |
68 |
|
; * Some more details at: http://skal.planet-d.net/coding/dct.html |
69 |
|
; |
70 |
|
;============================================================================= |
71 |
|
; |
72 |
|
; idct-like IEEE errors: |
73 |
|
; |
74 |
|
; ========================= |
75 |
|
; Peak error: 1.0000 |
76 |
|
; Peak MSE: 0.0365 |
77 |
|
; Overall MSE: 0.0201 |
78 |
|
; Peak ME: 0.0265 |
79 |
|
; Overall ME: 0.0006 |
80 |
|
; |
81 |
|
; == Mean square errors == |
82 |
|
; 0.000 0.001 0.001 0.002 0.000 0.002 0.001 0.000 [0.001] |
83 |
|
; 0.035 0.029 0.032 0.032 0.031 0.032 0.034 0.035 [0.032] |
84 |
|
; 0.026 0.028 0.027 0.027 0.025 0.028 0.028 0.025 [0.027] |
85 |
|
; 0.037 0.032 0.031 0.030 0.028 0.029 0.026 0.031 [0.030] |
86 |
|
; 0.000 0.001 0.001 0.002 0.000 0.002 0.001 0.001 [0.001] |
87 |
|
; 0.025 0.024 0.022 0.022 0.022 0.022 0.023 0.023 [0.023] |
88 |
|
; 0.026 0.028 0.025 0.028 0.030 0.025 0.026 0.027 [0.027] |
89 |
|
; 0.021 0.020 0.020 0.022 0.020 0.022 0.017 0.019 [0.020] |
90 |
|
; |
91 |
|
; == Abs Mean errors == |
92 |
|
; 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 [0.000] |
93 |
|
; 0.020 0.001 0.003 0.003 0.000 0.004 0.002 0.003 [0.002] |
94 |
|
; 0.000 0.001 0.001 0.001 0.001 0.004 0.000 0.000 [0.000] |
95 |
|
; 0.027 0.001 0.000 0.002 0.002 0.002 0.001 0.000 [0.003] |
96 |
|
; 0.000 0.000 0.000 0.000 0.000 0.001 0.000 0.001 [-0.000] |
97 |
|
; 0.001 0.003 0.001 0.001 0.002 0.001 0.000 0.000 [-0.000] |
98 |
|
; 0.000 0.002 0.002 0.001 0.001 0.002 0.001 0.000 [-0.000] |
99 |
|
; 0.000 0.002 0.001 0.002 0.001 0.002 0.001 0.001 [-0.000] |
100 |
|
; |
101 |
|
;============================================================================= |
102 |
|
|
103 |
|
;============================================================================= |
104 |
|
; Read only data |
105 |
|
;============================================================================= |
106 |
|
|
107 |
|
%ifdef FORMAT_COFF |
108 |
|
SECTION .rodata data |
109 |
|
%else |
110 |
|
SECTION .rodata data align=16 |
111 |
|
%endif |
112 |
|
|
113 |
|
ALIGN 16 |
114 |
|
tan1: |
115 |
|
dw 0x32ec,0x32ec,0x32ec,0x32ec ; tan( pi/16) |
116 |
|
tan2: |
117 |
|
dw 0x6a0a,0x6a0a,0x6a0a,0x6a0a ; tan(2pi/16) (=sqrt(2)-1) |
118 |
|
tan3: |
119 |
|
dw 0xab0e,0xab0e,0xab0e,0xab0e ; tan(3pi/16)-1 |
120 |
|
sqrt2: |
121 |
|
dw 0x5a82,0x5a82,0x5a82,0x5a82 ; 0.5/sqrt(2) |
122 |
|
|
123 |
|
ALIGN 16 |
124 |
|
fdct_table: |
125 |
|
;fTab1: |
126 |
|
dw 0x4000, 0x4000, 0x58c5, 0x4b42 |
127 |
|
dw 0x4000, 0x4000, 0x3249, 0x11a8 |
128 |
|
dw 0x539f, 0x22a3, 0x4b42, 0xee58 |
129 |
|
dw 0xdd5d, 0xac61, 0xa73b, 0xcdb7 |
130 |
|
dw 0x4000, 0xc000, 0x3249, 0xa73b |
131 |
|
dw 0xc000, 0x4000, 0x11a8, 0x4b42 |
132 |
|
dw 0x22a3, 0xac61, 0x11a8, 0xcdb7 |
133 |
|
dw 0x539f, 0xdd5d, 0x4b42, 0xa73b |
134 |
|
|
135 |
|
;fTab2: |
136 |
|
dw 0x58c5, 0x58c5, 0x7b21, 0x6862 |
137 |
|
dw 0x58c5, 0x58c5, 0x45bf, 0x187e |
138 |
|
dw 0x73fc, 0x300b, 0x6862, 0xe782 |
139 |
|
dw 0xcff5, 0x8c04, 0x84df, 0xba41 |
140 |
|
dw 0x58c5, 0xa73b, 0x45bf, 0x84df |
141 |
|
dw 0xa73b, 0x58c5, 0x187e, 0x6862 |
142 |
|
dw 0x300b, 0x8c04, 0x187e, 0xba41 |
143 |
|
dw 0x73fc, 0xcff5, 0x6862, 0x84df |
144 |
|
|
145 |
|
;fTab3: |
146 |
|
dw 0x539f, 0x539f, 0x73fc, 0x6254 |
147 |
|
dw 0x539f, 0x539f, 0x41b3, 0x1712 |
148 |
|
dw 0x6d41, 0x2d41, 0x6254, 0xe8ee |
149 |
|
dw 0xd2bf, 0x92bf, 0x8c04, 0xbe4d |
150 |
|
dw 0x539f, 0xac61, 0x41b3, 0x8c04 |
151 |
|
dw 0xac61, 0x539f, 0x1712, 0x6254 |
152 |
|
dw 0x2d41, 0x92bf, 0x1712, 0xbe4d |
153 |
|
dw 0x6d41, 0xd2bf, 0x6254, 0x8c04 |
154 |
|
|
155 |
|
;fTab4: |
156 |
|
dw 0x4b42, 0x4b42, 0x6862, 0x587e |
157 |
|
dw 0x4b42, 0x4b42, 0x3b21, 0x14c3 |
158 |
|
dw 0x6254, 0x28ba, 0x587e, 0xeb3d |
159 |
|
dw 0xd746, 0x9dac, 0x979e, 0xc4df |
160 |
|
dw 0x4b42, 0xb4be, 0x3b21, 0x979e |
161 |
|
dw 0xb4be, 0x4b42, 0x14c3, 0x587e |
162 |
|
dw 0x28ba, 0x9dac, 0x14c3, 0xc4df |
163 |
|
dw 0x6254, 0xd746, 0x587e, 0x979e |
164 |
|
|
165 |
|
;fTab1: |
166 |
|
dw 0x4000, 0x4000, 0x58c5, 0x4b42 |
167 |
|
dw 0x4000, 0x4000, 0x3249, 0x11a8 |
168 |
|
dw 0x539f, 0x22a3, 0x4b42, 0xee58 |
169 |
|
dw 0xdd5d, 0xac61, 0xa73b, 0xcdb7 |
170 |
|
dw 0x4000, 0xc000, 0x3249, 0xa73b |
171 |
|
dw 0xc000, 0x4000, 0x11a8, 0x4b42 |
172 |
|
dw 0x22a3, 0xac61, 0x11a8, 0xcdb7 |
173 |
|
dw 0x539f, 0xdd5d, 0x4b42, 0xa73b |
174 |
|
|
175 |
|
;fTab4: |
176 |
|
dw 0x4b42, 0x4b42, 0x6862, 0x587e |
177 |
|
dw 0x4b42, 0x4b42, 0x3b21, 0x14c3 |
178 |
|
dw 0x6254, 0x28ba, 0x587e, 0xeb3d |
179 |
|
dw 0xd746, 0x9dac, 0x979e, 0xc4df |
180 |
|
dw 0x4b42, 0xb4be, 0x3b21, 0x979e |
181 |
|
dw 0xb4be, 0x4b42, 0x14c3, 0x587e |
182 |
|
dw 0x28ba, 0x9dac, 0x14c3, 0xc4df |
183 |
|
dw 0x6254, 0xd746, 0x587e, 0x979e |
184 |
|
|
185 |
|
;fTab3: |
186 |
|
dw 0x539f, 0x539f, 0x73fc, 0x6254 |
187 |
|
dw 0x539f, 0x539f, 0x41b3, 0x1712 |
188 |
|
dw 0x6d41, 0x2d41, 0x6254, 0xe8ee |
189 |
|
dw 0xd2bf, 0x92bf, 0x8c04, 0xbe4d |
190 |
|
dw 0x539f, 0xac61, 0x41b3, 0x8c04 |
191 |
|
dw 0xac61, 0x539f, 0x1712, 0x6254 |
192 |
|
dw 0x2d41, 0x92bf, 0x1712, 0xbe4d |
193 |
|
dw 0x6d41, 0xd2bf, 0x6254, 0x8c04 |
194 |
|
|
195 |
|
;fTab2: |
196 |
|
dw 0x58c5, 0x58c5, 0x7b21, 0x6862 |
197 |
|
dw 0x58c5, 0x58c5, 0x45bf, 0x187e |
198 |
|
dw 0x73fc, 0x300b, 0x6862, 0xe782 |
199 |
|
dw 0xcff5, 0x8c04, 0x84df, 0xba41 |
200 |
|
dw 0x58c5, 0xa73b, 0x45bf, 0x84df |
201 |
|
dw 0xa73b, 0x58c5, 0x187e, 0x6862 |
202 |
|
dw 0x300b, 0x8c04, 0x187e, 0xba41 |
203 |
|
dw 0x73fc, 0xcff5, 0x6862, 0x84df |
204 |
|
|
205 |
|
ALIGN 16 |
206 |
|
fdct_rounding_1: |
207 |
|
dw 6, 8, 8, 8 |
208 |
|
dw 10, 8, 8, 8 |
209 |
|
dw 8, 8, 8, 8 |
210 |
|
dw 8, 8, 8, 8 |
211 |
|
dw 6, 8, 8, 8 |
212 |
|
dw 8, 8, 8, 8 |
213 |
|
dw 8, 8, 8, 8 |
214 |
|
dw 8, 8, 8, 8 |
215 |
|
|
216 |
|
ALIGN 16 |
217 |
|
fdct_rounding_2: |
218 |
|
dw 6, 8, 8, 8 |
219 |
|
dw 8, 8, 8, 8 |
220 |
|
dw 8, 8, 8, 8 |
221 |
|
dw 8, 8, 8, 8 |
222 |
|
dw 6, 8, 8, 8 |
223 |
|
dw 8, 8, 8, 8 |
224 |
|
dw 8, 8, 8, 8 |
225 |
|
dw 8, 8, 8, 8 |
226 |
|
|
227 |
|
ALIGN 16 |
228 |
|
MMX_One: |
229 |
|
dw 1, 1, 1, 1 |
230 |
|
|
231 |
|
;============================================================================= |
232 |
|
; Helper Macros for real code |
233 |
|
;============================================================================= |
234 |
|
|
235 |
|
;----------------------------------------------------------------------------- |
236 |
|
; FDCT LLM vertical pass (~39c) |
237 |
|
; %1=dst, %2=src, %3:Shift |
238 |
|
;----------------------------------------------------------------------------- |
239 |
|
|
240 |
|
%macro fLLM_PASS 3 |
241 |
|
movq mm0, [%2+0*16] ; In0 |
242 |
|
movq mm2, [%2+2*16] ; In2 |
243 |
|
movq mm3, mm0 |
244 |
|
movq mm4, mm2 |
245 |
|
movq mm7, [%2+7*16] ; In7 |
246 |
|
movq mm5, [%2+5*16] ; In5 |
247 |
|
|
248 |
|
psubsw mm0, mm7 ; t7 = In0-In7 |
249 |
|
paddsw mm7, mm3 ; t0 = In0+In7 |
250 |
|
psubsw mm2, mm5 ; t5 = In2-In5 |
251 |
|
paddsw mm5, mm4 ; t2 = In2+In5 |
252 |
|
|
253 |
|
movq mm3, [%2+3*16] ; In3 |
254 |
|
movq mm4, [%2+4*16] ; In4 |
255 |
|
movq mm1, mm3 |
256 |
|
psubsw mm3, mm4 ; t4 = In3-In4 |
257 |
|
paddsw mm4, mm1 ; t3 = In3+In4 |
258 |
|
movq mm6, [%2+6*16] ; In6 |
259 |
|
movq mm1, [%2+1*16] ; In1 |
260 |
|
psubsw mm1, mm6 ; t6 = In1-In6 |
261 |
|
paddsw mm6, [%2+1*16] ; t1 = In1+In6 |
262 |
|
|
263 |
|
psubsw mm7, mm4 ; tm03 = t0-t3 |
264 |
|
psubsw mm6, mm5 ; tm12 = t1-t2 |
265 |
|
paddsw mm4, mm4 ; 2.t3 |
266 |
|
paddsw mm5, mm5 ; 2.t2 |
267 |
|
paddsw mm4, mm7 ; tp03 = t0+t3 |
268 |
|
paddsw mm5, mm6 ; tp12 = t1+t2 |
269 |
|
|
270 |
|
psllw mm2, %3+1 ; shift t5 (shift +1 to.. |
271 |
|
psllw mm1, %3+1 ; shift t6 ..compensate cos4/2) |
272 |
|
psllw mm4, %3 ; shift t3 |
273 |
|
psllw mm5, %3 ; shift t2 |
274 |
|
psllw mm7, %3 ; shift t0 |
275 |
|
psllw mm6, %3 ; shift t1 |
276 |
|
psllw mm3, %3 ; shift t4 |
277 |
|
psllw mm0, %3 ; shift t7 |
278 |
|
|
279 |
|
psubsw mm4, mm5 ; out4 = tp03-tp12 |
280 |
|
psubsw mm1, mm2 ; mm1: t6-t5 |
281 |
|
paddsw mm5, mm5 |
282 |
|
paddsw mm2, mm2 |
283 |
|
paddsw mm5, mm4 ; out0 = tp03+tp12 |
284 |
|
movq [%1+4*16], mm4 ; => out4 |
285 |
|
paddsw mm2, mm1 ; mm2: t6+t5 |
286 |
|
movq [%1+0*16], mm5 ; => out0 |
287 |
|
|
288 |
|
movq mm4, [tan2] ; mm4 <= tan2 |
289 |
|
pmulhw mm4, mm7 ; tm03*tan2 |
290 |
|
movq mm5, [tan2] ; mm5 <= tan2 |
291 |
|
psubsw mm4, mm6 ; out6 = tm03*tan2 - tm12 |
292 |
|
pmulhw mm5, mm6 ; tm12*tan2 |
293 |
|
paddsw mm5, mm7 ; out2 = tm12*tan2 + tm03 |
294 |
|
|
295 |
|
movq mm6, [sqrt2] |
296 |
|
movq mm7, [MMX_One] |
297 |
|
|
298 |
|
pmulhw mm2, mm6 ; mm2: tp65 = (t6 + t5)*cos4 |
299 |
|
por mm5, mm7 ; correct out2 |
300 |
|
por mm4, mm7 ; correct out6 |
301 |
|
pmulhw mm1, mm6 ; mm1: tm65 = (t6 - t5)*cos4 |
302 |
|
por mm2, mm7 ; correct tp65 |
303 |
|
|
304 |
|
movq [%1+2*16], mm5 ; => out2 |
305 |
|
movq mm5, mm3 ; save t4 |
306 |
|
movq [%1+6*16], mm4 ; => out6 |
307 |
|
movq mm4, mm0 ; save t7 |
308 |
|
|
309 |
|
psubsw mm3, mm1 ; mm3: tm465 = t4 - tm65 |
310 |
|
psubsw mm0, mm2 ; mm0: tm765 = t7 - tp65 |
311 |
|
paddsw mm2, mm4 ; mm2: tp765 = t7 + tp65 |
312 |
|
paddsw mm1, mm5 ; mm1: tp465 = t4 + tm65 |
313 |
|
|
314 |
|
movq mm4, [tan3] ; tan3 - 1 |
315 |
|
movq mm5, [tan1] ; tan1 |
316 |
|
|
317 |
|
movq mm7, mm3 ; save tm465 |
318 |
|
pmulhw mm3, mm4 ; tm465*(tan3-1) |
319 |
|
movq mm6, mm1 ; save tp465 |
320 |
|
pmulhw mm1, mm5 ; tp465*tan1 |
321 |
|
|
322 |
|
paddsw mm3, mm7 ; tm465*tan3 |
323 |
|
pmulhw mm4, mm0 ; tm765*(tan3-1) |
324 |
|
paddsw mm4, mm0 ; tm765*tan3 |
325 |
|
pmulhw mm5, mm2 ; tp765*tan1 |
326 |
|
|
327 |
|
paddsw mm1, mm2 ; out1 = tp765 + tp465*tan1 |
328 |
|
psubsw mm0, mm3 ; out3 = tm765 - tm465*tan3 |
329 |
|
paddsw mm7, mm4 ; out5 = tm465 + tm765*tan3 |
330 |
|
psubsw mm5, mm6 ; out7 =-tp465 + tp765*tan1 |
331 |
|
|
332 |
|
movq [%1+1*16], mm1 ; => out1 |
333 |
|
movq [%1+3*16], mm0 ; => out3 |
334 |
|
movq [%1+5*16], mm7 ; => out5 |
335 |
|
movq [%1+7*16], mm5 ; => out7 |
336 |
|
%endmacro |
337 |
|
|
338 |
|
;----------------------------------------------------------------------------- |
339 |
|
; fMTX_MULT_XMM (~20c) |
340 |
|
; %1=dst, %2=src, %3 = Coeffs, %4/%5=rounders |
341 |
|
;----------------------------------------------------------------------------- |
342 |
|
|
343 |
|
%macro fMTX_MULT_XMM 5 |
344 |
|
movq mm0, [%2 + 0] ; mm0 = [0123] |
345 |
|
; the 'pshufw' below is the only SSE instruction. |
346 |
|
; For MMX-only version, it should be emulated with |
347 |
|
; some 'punpck' soup... |
348 |
|
pshufw mm1, [%2 + 8], 00011011b ; mm1 = [7654] |
349 |
|
movq mm7, mm0 |
350 |
|
|
351 |
|
paddsw mm0, mm1 ; mm0 = [a0 a1 a2 a3] |
352 |
|
psubsw mm7, mm1 ; mm7 = [b0 b1 b2 b3] |
353 |
|
|
354 |
|
movq mm1, mm0 |
355 |
|
punpckldq mm0, mm7 ; mm0 = [a0 a1 b0 b1] |
356 |
|
punpckhdq mm1, mm7 ; mm1 = [b2 b3 a2 a3] |
357 |
|
|
358 |
|
movq mm2, qword [%3 + 0] ; [ M00 M01 M16 M17] |
359 |
|
movq mm3, qword [%3 + 8] ; [ M02 M03 M18 M19] |
360 |
|
pmaddwd mm2, mm0 ; [a0.M00+a1.M01 | b0.M16+b1.M17] |
361 |
|
movq mm4, qword [%3 + 16] ; [ M04 M05 M20 M21] |
362 |
|
pmaddwd mm3, mm1 ; [a2.M02+a3.M03 | b2.M18+b3.M19] |
363 |
|
movq mm5, qword [%3 + 24] ; [ M06 M07 M22 M23] |
364 |
|
pmaddwd mm4, mm0 ; [a0.M04+a1.M05 | b0.M20+b1.M21] |
365 |
|
movq mm6, qword [%3 + 32] ; [ M08 M09 M24 M25] |
366 |
|
pmaddwd mm5, mm1 ; [a2.M06+a3.M07 | b2.M22+b3.M23] |
367 |
|
movq mm7, qword [%3 + 40] ; [ M10 M11 M26 M27] |
368 |
|
pmaddwd mm6, mm0 ; [a0.M08+a1.M09 | b0.M24+b1.M25] |
369 |
|
paddd mm2, mm3 ; [ out0 | out1 ] |
370 |
|
pmaddwd mm7, mm1 ; [a0.M10+a1.M11 | b0.M26+b1.M27] |
371 |
|
psrad mm2, 16 |
372 |
|
pmaddwd mm0, qword [%3 + 48] ; [a0.M12+a1.M13 | b0.M28+b1.M29] |
373 |
|
paddd mm4, mm5 ; [ out2 | out3 ] |
374 |
|
pmaddwd mm1, qword [%3 + 56] ; [a0.M14+a1.M15 | b0.M30+b1.M31] |
375 |
|
psrad mm4, 16 |
376 |
|
|
377 |
|
paddd mm6, mm7 ; [ out4 | out5 ] |
378 |
|
psrad mm6, 16 |
379 |
|
paddd mm0, mm1 ; [ out6 | out7 ] |
380 |
|
psrad mm0, 16 |
381 |
|
|
382 |
|
packssdw mm2, mm4 ; [ out0|out1|out2|out3 ] |
383 |
|
paddsw mm2, [%4] ; Round |
384 |
|
packssdw mm6, mm0 ; [ out4|out5|out6|out7 ] |
385 |
|
paddsw mm6, [%5] ; Round |
386 |
|
|
387 |
|
psraw mm2, 4 ; => [-2048, 2047] |
388 |
|
psraw mm6, 4 |
389 |
|
|
390 |
|
movq [%1 + 0], mm2 |
391 |
|
movq [%1 + 8], mm6 |
392 |
|
%endmacro |
393 |
|
|
394 |
|
;----------------------------------------------------------------------------- |
395 |
|
; fMTX_MULT_MMX (~22c) |
396 |
|
; %1=dst, %2=src, %3 = Coeffs, %4/%5=rounders |
397 |
|
;----------------------------------------------------------------------------- |
398 |
|
|
399 |
|
%macro fMTX_MULT_MMX 5 |
400 |
|
; MMX-only version (no 'pshufw'. ~10% overall slower than SSE) |
401 |
|
movd mm1, [%2 + 8 + 4] ; [67..] |
402 |
|
movq mm0, [%2 + 0] ; mm0 = [0123] |
403 |
|
movq mm7, mm0 |
404 |
|
punpcklwd mm1, [%2 + 8] ; [6475] |
405 |
|
movq mm2, mm1 |
406 |
|
psrlq mm1, 32 ; [75..] |
407 |
|
punpcklwd mm1,mm2 ; [7654] |
408 |
|
|
409 |
|
paddsw mm0, mm1 ; mm0 = [a0 a1 a2 a3] |
410 |
|
psubsw mm7, mm1 ; mm7 = [b0 b1 b2 b3] |
411 |
|
|
412 |
|
movq mm1, mm0 |
413 |
|
punpckldq mm0, mm7 ; mm0 = [a0 a1 b0 b1] |
414 |
|
punpckhdq mm1, mm7 ; mm1 = [b2 b3 a2 a3] |
415 |
|
|
416 |
|
movq mm2, qword [%3 + 0] ; [ M00 M01 M16 M17] |
417 |
|
movq mm3, qword [%3 + 8] ; [ M02 M03 M18 M19] |
418 |
|
pmaddwd mm2, mm0 ; [a0.M00+a1.M01 | b0.M16+b1.M17] |
419 |
|
movq mm4, qword [%3 + 16] ; [ M04 M05 M20 M21] |
420 |
|
pmaddwd mm3, mm1 ; [a2.M02+a3.M03 | b2.M18+b3.M19] |
421 |
|
movq mm5, qword [%3 + 24] ; [ M06 M07 M22 M23] |
422 |
|
pmaddwd mm4, mm0 ; [a0.M04+a1.M05 | b0.M20+b1.M21] |
423 |
|
movq mm6, qword [%3 + 32] ; [ M08 M09 M24 M25] |
424 |
|
pmaddwd mm5, mm1 ; [a2.M06+a3.M07 | b2.M22+b3.M23] |
425 |
|
movq mm7, qword [%3 + 40] ; [ M10 M11 M26 M27] |
426 |
|
pmaddwd mm6, mm0 ; [a0.M08+a1.M09 | b0.M24+b1.M25] |
427 |
|
paddd mm2, mm3 ; [ out0 | out1 ] |
428 |
|
pmaddwd mm7, mm1 ; [a0.M10+a1.M11 | b0.M26+b1.M27] |
429 |
|
psrad mm2, 16 |
430 |
|
pmaddwd mm0, qword [%3 + 48] ; [a0.M12+a1.M13 | b0.M28+b1.M29] |
431 |
|
paddd mm4, mm5 ; [ out2 | out3 ] |
432 |
|
pmaddwd mm1, qword [%3 + 56] ; [a0.M14+a1.M15 | b0.M30+b1.M31] |
433 |
|
psrad mm4, 16 |
434 |
|
|
435 |
|
paddd mm6, mm7 ; [ out4 | out5 ] |
436 |
|
psrad mm6, 16 |
437 |
|
paddd mm0, mm1 ; [ out6 | out7 ] |
438 |
|
psrad mm0, 16 |
439 |
|
|
440 |
|
packssdw mm2, mm4 ; [ out0|out1|out2|out3 ] |
441 |
|
paddsw mm2, [%4] ; Round |
442 |
|
packssdw mm6, mm0 ; [ out4|out5|out6|out7 ] |
443 |
|
paddsw mm6, [%5] ; Round |
444 |
|
|
445 |
|
psraw mm2, 4 ; => [-2048, 2047] |
446 |
|
psraw mm6, 4 |
447 |
|
|
448 |
|
movq [%1 + 0], mm2 |
449 |
|
movq [%1 + 8], mm6 |
450 |
|
%endmacro |
451 |
|
|
452 |
|
;----------------------------------------------------------------------------- |
453 |
|
; MAKE_FDCT_FUNC |
454 |
|
; %1 funcname, %2 macro for row dct |
455 |
|
;----------------------------------------------------------------------------- |
456 |
|
|
457 |
|
%macro MAKE_FDCT_FUNC 2 |
458 |
|
ALIGN 16 |
459 |
|
cglobal %1 |
460 |
|
%1: |
461 |
|
%ifdef UNROLLED_LOOP |
462 |
|
mov ecx, [esp + 4] |
463 |
|
%else |
464 |
|
push ebx |
465 |
|
push edi |
466 |
|
mov ecx, [esp + 8 + 4] |
467 |
|
%endif |
468 |
|
|
469 |
|
fLLM_PASS ecx+0, ecx+0, 3 |
470 |
|
fLLM_PASS ecx+8, ecx+8, 3 |
471 |
|
|
472 |
|
%ifdef UNROLLED_LOOP |
473 |
|
%assign i 0 |
474 |
|
%rep 8 |
475 |
|
%2 ecx+i*16, ecx+i*16, fdct_table+i*64, fdct_rounding_1+i*8, fdct_rounding_2+i*8 |
476 |
|
%assign i i+1 |
477 |
|
%endrep |
478 |
|
%else |
479 |
|
mov eax, 8 |
480 |
|
mov edx, fdct_table |
481 |
|
mov ebx, fdct_rounding_1 |
482 |
|
mov edi, fdct_rounding_2 |
483 |
|
.loop |
484 |
|
%2 ecx, ecx, edx, ebx, edi |
485 |
|
add eax, 2*16 |
486 |
|
add edx, 2*32 |
487 |
|
add ebx, 2*4 |
488 |
|
add edi, 2*4 |
489 |
|
dec eax |
490 |
|
jne .loop |
491 |
|
|
492 |
|
pop edi |
493 |
|
pop ebx |
494 |
|
%endif |
495 |
|
|
496 |
|
ret |
497 |
|
%endmacro |
498 |
|
|
499 |
|
;============================================================================= |
500 |
|
; Code |
501 |
|
;============================================================================= |
502 |
|
|
503 |
|
SECTION .text |
504 |
|
|
505 |
|
;----------------------------------------------------------------------------- |
506 |
|
; void fdct_mmx_skal(int16_t block[64]]; |
507 |
|
;----------------------------------------------------------------------------- |
508 |
|
|
509 |
|
MAKE_FDCT_FUNC fdct_mmx_skal, fMTX_MULT_MMX |
510 |
|
|
511 |
|
;----------------------------------------------------------------------------- |
512 |
|
; void fdct_xmm_skal(int16_t block[64]]; |
513 |
|
;----------------------------------------------------------------------------- |
514 |
|
|
515 |
|
MAKE_FDCT_FUNC fdct_xmm_skal, fMTX_MULT_XMM |