1 |
|
;/************************************************************************** |
2 |
|
; * |
3 |
|
; * XVID MPEG-4 VIDEO CODEC |
4 |
|
; * - 3dne Quantization/Dequantization - |
5 |
|
; * |
6 |
|
; * Copyright (C) 2002-2003 Peter Ross <pross@xvid.org> |
7 |
|
; * 2002-2003 Michael Militzer <isibaar@xvid.org> |
8 |
|
; * 2002-2003 Pascal Massimino <skal@planet-d.net> |
9 |
|
; * |
10 |
|
; * This program is free software ; you can redistribute it and/or modify |
11 |
|
; * it under the terms of the GNU General Public License as published by |
12 |
|
; * the Free Software Foundation ; either version 2 of the License, or |
13 |
|
; * (at your option) any later version. |
14 |
|
; * |
15 |
|
; * This program is distributed in the hope that it will be useful, |
16 |
|
; * but WITHOUT ANY WARRANTY ; without even the implied warranty of |
17 |
|
; * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
18 |
|
; * GNU General Public License for more details. |
19 |
|
; * |
20 |
|
; * You should have received a copy of the GNU General Public License |
21 |
|
; * along with this program ; if not, write to the Free Software |
22 |
|
; * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
23 |
|
; * |
24 |
|
; * $Id$ |
25 |
|
; * |
26 |
|
; *************************************************************************/ |
27 |
|
|
28 |
|
%define SATURATE |
29 |
|
|
30 |
|
BITS 32 |
31 |
|
|
32 |
|
%macro cglobal 1 |
33 |
|
%ifdef PREFIX |
34 |
|
global _%1 |
35 |
|
%define %1 _%1 |
36 |
|
%else |
37 |
|
global %1 |
38 |
|
%endif |
39 |
|
%endmacro |
40 |
|
|
41 |
|
%macro cextern 1 |
42 |
|
%ifdef PREFIX |
43 |
|
extern _%1 |
44 |
|
%define %1 _%1 |
45 |
|
%else |
46 |
|
extern %1 |
47 |
|
%endif |
48 |
|
%endmacro |
49 |
|
|
50 |
|
;============================================================================= |
51 |
|
; Local data (Read Only) |
52 |
|
;============================================================================= |
53 |
|
|
54 |
|
SECTION .rodata |
55 |
|
|
56 |
|
mmx_one: |
57 |
|
times 4 dw 1 |
58 |
|
|
59 |
|
;----------------------------------------------------------------------------- |
60 |
|
; divide by 2Q table |
61 |
|
;----------------------------------------------------------------------------- |
62 |
|
|
63 |
|
ALIGN 16 |
64 |
|
mmx_div: |
65 |
|
times 4 dw 65535 ; the div by 2 formula will overflow for the case |
66 |
|
; quant=1 but we don't care much because quant=1 |
67 |
|
; is handled by a different piece of code that |
68 |
|
; doesn't use this table. |
69 |
|
%assign quant 2 |
70 |
|
%rep 30 |
71 |
|
times 4 dw (1<<17) / (quant*2) + 1 |
72 |
|
%assign quant quant+1 |
73 |
|
%endrep |
74 |
|
|
75 |
|
;----------------------------------------------------------------------------- |
76 |
|
; intra matrix |
77 |
|
;----------------------------------------------------------------------------- |
78 |
|
|
79 |
|
cextern intra_matrix |
80 |
|
cextern intra_matrix_fix |
81 |
|
|
82 |
|
;----------------------------------------------------------------------------- |
83 |
|
; inter matrix |
84 |
|
;----------------------------------------------------------------------------- |
85 |
|
|
86 |
|
cextern inter_matrix |
87 |
|
cextern inter_matrix_fix |
88 |
|
|
89 |
|
|
90 |
|
%define VM18P 3 |
91 |
|
%define VM18Q 4 |
92 |
|
|
93 |
|
|
94 |
|
;----------------------------------------------------------------------------- |
95 |
|
; quantd table |
96 |
|
;----------------------------------------------------------------------------- |
97 |
|
|
98 |
|
quantd: |
99 |
|
%assign quant 1 |
100 |
|
%rep 31 |
101 |
|
times 4 dw ((VM18P*quant) + (VM18Q/2)) / VM18Q |
102 |
|
%assign quant quant+1 |
103 |
|
%endrep |
104 |
|
|
105 |
|
;----------------------------------------------------------------------------- |
106 |
|
; multiple by 2Q table |
107 |
|
;----------------------------------------------------------------------------- |
108 |
|
|
109 |
|
mmx_mul_quant: |
110 |
|
%assign quant 1 |
111 |
|
%rep 31 |
112 |
|
times 4 dw quant |
113 |
|
%assign quant quant+1 |
114 |
|
%endrep |
115 |
|
|
116 |
|
;----------------------------------------------------------------------------- |
117 |
|
; saturation limits |
118 |
|
;----------------------------------------------------------------------------- |
119 |
|
|
120 |
|
ALIGN 16 |
121 |
|
|
122 |
|
mmx_32767_minus_2047: |
123 |
|
times 4 dw (32767-2047) |
124 |
|
mmx_32768_minus_2048: |
125 |
|
times 4 dw (32768-2048) |
126 |
|
mmx_2047: |
127 |
|
times 4 dw 2047 |
128 |
|
mmx_minus_2048: |
129 |
|
times 4 dw (-2048) |
130 |
|
zero: |
131 |
|
times 4 dw 0 |
132 |
|
|
133 |
|
;============================================================================= |
134 |
|
; Code |
135 |
|
;============================================================================= |
136 |
|
|
137 |
|
SECTION .text |
138 |
|
|
139 |
|
cglobal quant_mpeg_intra_mmx |
140 |
|
cglobal quant_mpeg_inter_mmx |
141 |
|
cglobal dequant_mpeg_intra_mmx |
142 |
|
cglobal dequant_mpeg_inter_mmx |
143 |
|
|
144 |
|
;----------------------------------------------------------------------------- |
145 |
|
; |
146 |
|
; uint32_t quant_mpeg_intra_mmx(int16_t * coeff, |
147 |
|
; const int16_t const * data, |
148 |
|
; const uint32_t quant, |
149 |
|
; const uint32_t dcscalar); |
150 |
|
; |
151 |
|
;----------------------------------------------------------------------------- |
152 |
|
|
153 |
|
ALIGN 16 |
154 |
|
quant_mpeg_intra_mmx: |
155 |
|
|
156 |
|
push ecx |
157 |
|
push esi |
158 |
|
push edi |
159 |
|
|
160 |
|
mov edi, [esp + 12 + 4] ; coeff |
161 |
|
mov esi, [esp + 12 + 8] ; data |
162 |
|
mov eax, [esp + 12 + 12] ; quant |
163 |
|
|
164 |
|
movq mm5, [quantd + eax * 8 - 8] ; quantd -> mm5 |
165 |
|
|
166 |
|
xor ecx, ecx |
167 |
|
cmp al, 1 |
168 |
|
jz near .q1loop |
169 |
|
|
170 |
|
cmp al, 2 |
171 |
|
jz near .q2loop |
172 |
|
|
173 |
|
movq mm7, [mmx_div + eax * 8 - 8] ; multipliers[quant] -> mm7 |
174 |
|
|
175 |
|
ALIGN 16 |
176 |
|
.loop |
177 |
|
movq mm0, [esi + 8*ecx] ; mm0 = [1st] |
178 |
|
movq mm3, [esi + 8*ecx + 8] ; |
179 |
|
pxor mm1, mm1 ; mm1 = 0 |
180 |
|
pxor mm4, mm4 |
181 |
|
pcmpgtw mm1, mm0 ; mm1 = (0 > mm0) |
182 |
|
pcmpgtw mm4, mm3 |
183 |
|
pxor mm0, mm1 ; mm0 = |mm0| |
184 |
|
pxor mm3, mm4 ; |
185 |
|
psubw mm0, mm1 ; displace |
186 |
|
psubw mm3, mm4 ; |
187 |
|
psllw mm0, 4 ; level << 4 |
188 |
|
psllw mm3, 4 |
189 |
|
movq mm2, [intra_matrix + 8*ecx] |
190 |
|
psrlw mm2, 1 ; intra_matrix[i]>>1 |
191 |
|
paddw mm0, mm2 |
192 |
|
movq mm2, [intra_matrix_fix + ecx*8] |
193 |
|
pmulhw mm0, mm2 ; (level<<4 + intra_matrix[i]>>1) / intra_matrix[i] |
194 |
|
movq mm2, [intra_matrix + 8*ecx + 8] |
195 |
|
psrlw mm2, 1 |
196 |
|
paddw mm3, mm2 |
197 |
|
movq mm2, [intra_matrix_fix + ecx*8 + 8] |
198 |
|
pmulhw mm3, mm2 |
199 |
|
paddw mm0, mm5 ; + quantd |
200 |
|
paddw mm3, mm5 |
201 |
|
pmulhw mm0, mm7 ; mm0 = (mm0 / 2Q) >> 16 |
202 |
|
pmulhw mm3, mm7 ; |
203 |
|
psrlw mm0, 1 ; additional shift by 1 => 16 + 1 = 17 |
204 |
|
psrlw mm3, 1 |
205 |
|
pxor mm0, mm1 ; mm0 *= sign(mm0) |
206 |
|
pxor mm3, mm4 ; |
207 |
|
psubw mm0, mm1 ; undisplace |
208 |
|
psubw mm3, mm4 ; |
209 |
|
|
210 |
|
movq [edi + 8*ecx], mm0 |
211 |
|
movq [edi + 8*ecx + 8], mm3 |
212 |
|
|
213 |
|
add ecx,2 |
214 |
|
cmp ecx,16 |
215 |
|
jnz near .loop |
216 |
|
|
217 |
|
.done |
218 |
|
; caclulate data[0] // (int32_t)dcscalar) |
219 |
|
mov ecx, [esp + 12 + 16] ; dcscalar |
220 |
|
mov edx, ecx |
221 |
|
movsx eax, word [esi] ; data[0] |
222 |
|
shr edx, 1 ; edx = dcscalar /2 |
223 |
|
cmp eax, 0 |
224 |
|
jg .gtzero |
225 |
|
|
226 |
|
sub eax, edx |
227 |
|
jmp short .mul |
228 |
|
.gtzero |
229 |
|
add eax, edx |
230 |
|
.mul |
231 |
|
cdq ; expand eax -> edx:eax |
232 |
|
idiv ecx ; eax = edx:eax / dcscalar |
233 |
|
|
234 |
|
mov [edi], ax ; coeff[0] = ax |
235 |
|
|
236 |
|
pop edi |
237 |
|
pop esi |
238 |
|
pop ecx |
239 |
|
|
240 |
|
xor eax, eax ; return(0); |
241 |
|
ret |
242 |
|
|
243 |
|
ALIGN 16 |
244 |
|
.q1loop |
245 |
|
movq mm0, [esi + 8*ecx] ; mm0 = [1st] |
246 |
|
movq mm3, [esi + 8*ecx + 8] ; |
247 |
|
pxor mm1, mm1 ; mm1 = 0 |
248 |
|
pxor mm4, mm4 ; |
249 |
|
pcmpgtw mm1, mm0 ; mm1 = (0 > mm0) |
250 |
|
pcmpgtw mm4, mm3 ; |
251 |
|
pxor mm0, mm1 ; mm0 = |mm0| |
252 |
|
pxor mm3, mm4 ; |
253 |
|
psubw mm0, mm1 ; displace |
254 |
|
psubw mm3, mm4 ; |
255 |
|
psllw mm0, 4 |
256 |
|
psllw mm3, 4 |
257 |
|
movq mm2, [intra_matrix + 8*ecx] |
258 |
|
psrlw mm2, 1 |
259 |
|
paddw mm0, mm2 |
260 |
|
movq mm2, [intra_matrix_fix + ecx*8] |
261 |
|
pmulhw mm0, mm2 ; (level<<4 + intra_matrix[i]>>1) / intra_matrix[i] |
262 |
|
movq mm2, [intra_matrix + 8*ecx + 8] |
263 |
|
psrlw mm2, 1 |
264 |
|
paddw mm3, mm2 |
265 |
|
movq mm2, [intra_matrix_fix + ecx*8 + 8] |
266 |
|
pmulhw mm3, mm2 |
267 |
|
paddw mm0, mm5 |
268 |
|
paddw mm3, mm5 |
269 |
|
psrlw mm0, 1 ; mm0 >>= 1 (/2) |
270 |
|
psrlw mm3, 1 ; |
271 |
|
pxor mm0, mm1 ; mm0 *= sign(mm0) |
272 |
|
pxor mm3, mm4 ; |
273 |
|
psubw mm0, mm1 ; undisplace |
274 |
|
psubw mm3, mm4 ; |
275 |
|
movq [edi + 8*ecx], mm0 |
276 |
|
movq [edi + 8*ecx + 8], mm3 |
277 |
|
|
278 |
|
add ecx, 2 |
279 |
|
cmp ecx, 16 |
280 |
|
jnz near .q1loop |
281 |
|
jmp near .done |
282 |
|
|
283 |
|
|
284 |
|
ALIGN 16 |
285 |
|
.q2loop |
286 |
|
movq mm0, [esi + 8*ecx] ; mm0 = [1st] |
287 |
|
movq mm3, [esi + 8*ecx + 8] ; |
288 |
|
pxor mm1, mm1 ; mm1 = 0 |
289 |
|
pxor mm4, mm4 ; |
290 |
|
pcmpgtw mm1, mm0 ; mm1 = (0 > mm0) |
291 |
|
pcmpgtw mm4, mm3 ; |
292 |
|
pxor mm0, mm1 ; mm0 = |mm0| |
293 |
|
pxor mm3, mm4 ; |
294 |
|
psubw mm0, mm1 ; displace |
295 |
|
psubw mm3, mm4 ; |
296 |
|
psllw mm0, 4 |
297 |
|
psllw mm3, 4 |
298 |
|
movq mm2, [intra_matrix + 8*ecx] |
299 |
|
psrlw mm2, 1 |
300 |
|
paddw mm0, mm2 |
301 |
|
movq mm2, [intra_matrix_fix + ecx*8] |
302 |
|
pmulhw mm0, mm2 ; (level<<4 + intra_matrix[i]>>1) / intra_matrix[i] |
303 |
|
movq mm2, [intra_matrix + 8*ecx + 8] |
304 |
|
psrlw mm2, 1 |
305 |
|
paddw mm3, mm2 |
306 |
|
movq mm2, [intra_matrix_fix + ecx*8 + 8] |
307 |
|
pmulhw mm3, mm2 |
308 |
|
paddw mm0, mm5 |
309 |
|
paddw mm3, mm5 |
310 |
|
psrlw mm0, 2 ; mm0 >>= 1 (/4) |
311 |
|
psrlw mm3, 2 ; |
312 |
|
pxor mm0, mm1 ; mm0 *= sign(mm0) |
313 |
|
pxor mm3, mm4 ; |
314 |
|
psubw mm0, mm1 ; undisplace |
315 |
|
psubw mm3, mm4 ; |
316 |
|
movq [edi + 8*ecx], mm0 |
317 |
|
movq [edi + 8*ecx + 8], mm3 |
318 |
|
|
319 |
|
add ecx,2 |
320 |
|
cmp ecx,16 |
321 |
|
jnz near .q2loop |
322 |
|
jmp near .done |
323 |
|
|
324 |
|
|
325 |
|
;----------------------------------------------------------------------------- |
326 |
|
; |
327 |
|
; uint32_t quant_mpeg_inter_mmx(int16_t * coeff, |
328 |
|
; const int16_t const * data, |
329 |
|
; const uint32_t quant); |
330 |
|
; |
331 |
|
;----------------------------------------------------------------------------- |
332 |
|
|
333 |
|
ALIGN 16 |
334 |
|
quant_mpeg_inter_mmx: |
335 |
|
|
336 |
|
push ecx |
337 |
|
push esi |
338 |
|
push edi |
339 |
|
|
340 |
|
mov edi, [esp + 12 + 4] ; coeff |
341 |
|
mov esi, [esp + 12 + 8] ; data |
342 |
|
mov eax, [esp + 12 + 12] ; quant |
343 |
|
|
344 |
|
xor ecx, ecx |
345 |
|
|
346 |
|
pxor mm5, mm5 ; sum |
347 |
|
|
348 |
|
cmp al, 1 |
349 |
|
jz near .q1loop |
350 |
|
|
351 |
|
cmp al, 2 |
352 |
|
jz near .q2loop |
353 |
|
|
354 |
|
movq mm7, [mmx_div + eax * 8 - 8] ; divider |
355 |
|
|
356 |
|
ALIGN 16 |
357 |
|
.loop |
358 |
|
movq mm0, [esi + 8*ecx] ; mm0 = [1st] |
359 |
|
movq mm3, [esi + 8*ecx + 8] ; |
360 |
|
pxor mm1, mm1 ; mm1 = 0 |
361 |
|
pxor mm4, mm4 ; |
362 |
|
pcmpgtw mm1, mm0 ; mm1 = (0 > mm0) |
363 |
|
pcmpgtw mm4, mm3 ; |
364 |
|
pxor mm0, mm1 ; mm0 = |mm0| |
365 |
|
pxor mm3, mm4 ; |
366 |
|
psubw mm0, mm1 ; displace |
367 |
|
psubw mm3, mm4 ; |
368 |
|
psllw mm0, 4 |
369 |
|
psllw mm3, 4 |
370 |
|
movq mm2, [inter_matrix + 8*ecx] |
371 |
|
psrlw mm2, 1 |
372 |
|
paddw mm0, mm2 |
373 |
|
movq mm2, [inter_matrix_fix + ecx*8] |
374 |
|
pmulhw mm0, mm2 ; (level<<4 + inter_matrix[i]>>1) / inter_matrix[i] |
375 |
|
movq mm2, [inter_matrix + 8*ecx + 8] |
376 |
|
psrlw mm2, 1 |
377 |
|
paddw mm3, mm2 |
378 |
|
movq mm2, [inter_matrix_fix + ecx*8 + 8] |
379 |
|
pmulhw mm3, mm2 |
380 |
|
pmulhw mm0, mm7 ; mm0 = (mm0 / 2Q) >> 16 |
381 |
|
pmulhw mm3, mm7 ; |
382 |
|
psrlw mm0, 1 ; additional shift by 1 => 16 + 1 = 17 |
383 |
|
psrlw mm3, 1 |
384 |
|
paddw mm5, mm0 ; sum += mm0 |
385 |
|
pxor mm0, mm1 ; mm0 *= sign(mm0) |
386 |
|
paddw mm5, mm3 ; |
387 |
|
pxor mm3, mm4 ; |
388 |
|
psubw mm0, mm1 ; undisplace |
389 |
|
psubw mm3, mm4 |
390 |
|
movq [edi + 8*ecx], mm0 |
391 |
|
movq [edi + 8*ecx + 8], mm3 |
392 |
|
|
393 |
|
add ecx, 2 |
394 |
|
cmp ecx, 16 |
395 |
|
jnz near .loop |
396 |
|
|
397 |
|
.done |
398 |
|
pmaddwd mm5, [mmx_one] |
399 |
|
movq mm0, mm5 |
400 |
|
psrlq mm5, 32 |
401 |
|
paddd mm0, mm5 |
402 |
|
movd eax, mm0 ; return sum |
403 |
|
|
404 |
|
pop edi |
405 |
|
pop esi |
406 |
|
pop ecx |
407 |
|
|
408 |
|
ret |
409 |
|
|
410 |
|
ALIGN 16 |
411 |
|
.q1loop |
412 |
|
movq mm0, [esi + 8*ecx] ; mm0 = [1st] |
413 |
|
movq mm3, [esi + 8*ecx+ 8] |
414 |
|
pxor mm1, mm1 ; mm1 = 0 |
415 |
|
pxor mm4, mm4 ; |
416 |
|
pcmpgtw mm1, mm0 ; mm1 = (0 > mm0) |
417 |
|
pcmpgtw mm4, mm3 ; |
418 |
|
pxor mm0, mm1 ; mm0 = |mm0| |
419 |
|
pxor mm3, mm4 ; |
420 |
|
psubw mm0, mm1 ; displace |
421 |
|
psubw mm3, mm4 ; |
422 |
|
psllw mm0, 4 |
423 |
|
psllw mm3, 4 |
424 |
|
movq mm2, [inter_matrix + 8*ecx] |
425 |
|
psrlw mm2, 1 |
426 |
|
paddw mm0, mm2 |
427 |
|
movq mm2, [inter_matrix_fix + ecx*8] |
428 |
|
pmulhw mm0, mm2 ; (level<<4 + inter_matrix[i]>>1) / inter_matrix[i] |
429 |
|
movq mm2, [inter_matrix + 8*ecx + 8] |
430 |
|
psrlw mm2, 1 |
431 |
|
paddw mm3, mm2 |
432 |
|
movq mm2, [inter_matrix_fix + ecx*8 + 8] |
433 |
|
pmulhw mm3, mm2 |
434 |
|
psrlw mm0, 1 ; mm0 >>= 1 (/2) |
435 |
|
psrlw mm3, 1 ; |
436 |
|
paddw mm5, mm0 ; sum += mm0 |
437 |
|
pxor mm0, mm1 ; mm0 *= sign(mm0) |
438 |
|
paddw mm5, mm3 ; |
439 |
|
pxor mm3, mm4 ; |
440 |
|
psubw mm0, mm1 ; undisplace |
441 |
|
psubw mm3, mm4 |
442 |
|
movq [edi + 8*ecx], mm0 |
443 |
|
movq [edi + 8*ecx + 8], mm3 |
444 |
|
|
445 |
|
add ecx, 2 |
446 |
|
cmp ecx, 16 |
447 |
|
jnz near .q1loop |
448 |
|
|
449 |
|
jmp .done |
450 |
|
|
451 |
|
|
452 |
|
ALIGN 16 |
453 |
|
.q2loop |
454 |
|
movq mm0, [esi + 8*ecx] ; mm0 = [1st] |
455 |
|
movq mm3, [esi + 8*ecx+ 8] |
456 |
|
pxor mm1, mm1 ; mm1 = 0 |
457 |
|
pxor mm4, mm4 ; |
458 |
|
pcmpgtw mm1, mm0 ; mm1 = (0 > mm0) |
459 |
|
pcmpgtw mm4, mm3 ; |
460 |
|
pxor mm0, mm1 ; mm0 = |mm0| |
461 |
|
pxor mm3, mm4 ; |
462 |
|
psubw mm0, mm1 ; displace |
463 |
|
psubw mm3, mm4 ; |
464 |
|
psllw mm0, 4 |
465 |
|
psllw mm3, 4 |
466 |
|
movq mm2, [inter_matrix + 8*ecx] |
467 |
|
psrlw mm2, 1 |
468 |
|
paddw mm0, mm2 |
469 |
|
movq mm2, [inter_matrix_fix + ecx*8] |
470 |
|
pmulhw mm0, mm2 ; (level<<4 + inter_matrix[i]>>1) / inter_matrix[i] |
471 |
|
movq mm2, [inter_matrix + 8*ecx + 8] |
472 |
|
psrlw mm2, 1 |
473 |
|
paddw mm3, mm2 |
474 |
|
movq mm2, [inter_matrix_fix + ecx*8 + 8] |
475 |
|
pmulhw mm3, mm2 |
476 |
|
psrlw mm0, 2 ; mm0 >>= 1 (/2) |
477 |
|
psrlw mm3, 2 ; |
478 |
|
paddw mm5, mm0 ; sum += mm0 |
479 |
|
pxor mm0, mm1 ; mm0 *= sign(mm0) |
480 |
|
paddw mm5, mm3 ; |
481 |
|
pxor mm3, mm4 ; |
482 |
|
psubw mm0, mm1 ; undisplace |
483 |
|
psubw mm3, mm4 |
484 |
|
movq [edi + 8*ecx], mm0 |
485 |
|
movq [edi + 8*ecx + 8], mm3 |
486 |
|
|
487 |
|
add ecx, 2 |
488 |
|
cmp ecx, 16 |
489 |
|
jnz near .q2loop |
490 |
|
|
491 |
|
jmp .done |
492 |
|
|
493 |
|
|
494 |
|
;----------------------------------------------------------------------------- |
495 |
|
; |
496 |
|
; uint32_t dequant_mpeg_intra_mmx(int16_t *data, |
497 |
|
; const int16_t const *coeff, |
498 |
|
; const uint32_t quant, |
499 |
|
; const uint32_t dcscalar); |
500 |
|
; |
501 |
|
;----------------------------------------------------------------------------- |
502 |
|
|
503 |
|
; Note: in order to saturate 'easily', we pre-shift the quantifier |
504 |
|
; by 4. Then, the high-word of (coeff[]*matrix[i]*quant) are used to |
505 |
|
; build a saturating mask. It is non-zero only when an overflow occured. |
506 |
|
; We thus avoid packing/unpacking toward double-word. |
507 |
|
; Moreover, we perform the mult (matrix[i]*quant) first, instead of, e.g., |
508 |
|
; (coeff[i]*matrix[i]). This is less prone to overflow if coeff[] are not |
509 |
|
; checked. Input ranges are: coeff in [-127,127], inter_matrix in [1..255],a |
510 |
|
; and quant in [1..31]. |
511 |
|
; |
512 |
|
; The original loop is: |
513 |
|
; |
514 |
|
%if 0 |
515 |
|
movq mm0, [ecx+8*eax + 8*16] ; mm0 = coeff[i] |
516 |
|
pxor mm1, mm1 |
517 |
|
pcmpgtw mm1, mm0 |
518 |
|
pxor mm0, mm1 ; change sign if negative |
519 |
|
psubw mm0, mm1 ; -> mm0 = abs(coeff[i]), mm1 = sign of coeff[i] |
520 |
|
|
521 |
|
movq mm2, mm7 ; mm2 = quant |
522 |
|
pmullw mm2, [intra_matrix + 8*eax + 8*16 ] ; matrix[i]*quant. |
523 |
|
|
524 |
|
movq mm6, mm2 |
525 |
|
pmulhw mm2, mm0 ; high of coeff*(matrix*quant) (should be 0 if no overflow) |
526 |
|
pmullw mm0, mm6 ; low of coeff*(matrix*quant) |
527 |
|
|
528 |
|
pxor mm5, mm5 |
529 |
|
pcmpgtw mm2, mm5 ; otherflow? |
530 |
|
psrlw mm2, 5 ; =0 if no clamp, 2047 otherwise |
531 |
|
psrlw mm0, 5 |
532 |
|
paddw mm0, mm1 ; start restoring sign |
533 |
|
por mm0, mm2 ; saturate to 2047 if needed |
534 |
|
pxor mm0, mm1 ; finish negating back |
535 |
|
|
536 |
|
movq [edx + 8*eax + 8*16], mm0 ; data[i] |
537 |
|
add eax, 1 |
538 |
|
%endif |
539 |
|
|
540 |
|
;******************************************************************** |
541 |
|
|
542 |
|
ALIGN 16 |
543 |
|
dequant_mpeg_intra_mmx: |
544 |
|
|
545 |
|
mov edx, [esp+4] ; data |
546 |
|
mov ecx, [esp+8] ; coeff |
547 |
|
mov eax, [esp+12] ; quant |
548 |
|
|
549 |
|
movq mm7, [mmx_mul_quant + eax*8 - 8] |
550 |
|
mov eax, -16 ; to keep ALIGNed, we regularly process coeff[0] |
551 |
|
psllw mm7, 2 ; << 2. See comment. |
552 |
|
pxor mm6, mm6 ; this is a NOP |
553 |
|
|
554 |
|
ALIGN 16 |
555 |
|
.loop |
556 |
|
movq mm0, [ecx+8*eax + 8*16] ; mm0 = c = coeff[i] |
557 |
|
movq mm3, [ecx+8*eax + 8*16 +8]; mm3 = c' = coeff[i+1] |
558 |
|
pxor mm1, mm1 |
559 |
|
pxor mm4, mm4 |
560 |
|
pcmpgtw mm1, mm0 ; mm1 = sgn(c) |
561 |
|
movq mm2, mm7 ; mm2 = quant |
562 |
|
|
563 |
|
pcmpgtw mm4, mm3 ; mm4 = sgn(c') |
564 |
|
pmullw mm2, [intra_matrix + 8*eax + 8*16 ] ; matrix[i]*quant |
565 |
|
|
566 |
|
pxor mm0, mm1 ; negate if negative |
567 |
|
pxor mm3, mm4 ; negate if negative |
568 |
|
|
569 |
|
psubw mm0, mm1 |
570 |
|
psubw mm3, mm4 |
571 |
|
|
572 |
|
; we're short on register, here. Poor pairing... |
573 |
|
|
574 |
|
movq mm5, mm2 |
575 |
|
pmullw mm2, mm0 ; low of coeff*(matrix*quant) |
576 |
|
|
577 |
|
pmulhw mm0, mm5 ; high of coeff*(matrix*quant) |
578 |
|
movq mm5, mm7 ; mm2 = quant |
579 |
|
|
580 |
|
pmullw mm5, [intra_matrix + 8*eax + 8*16 +8] ; matrix[i+1]*quant |
581 |
|
|
582 |
|
movq mm6, mm5 |
583 |
|
add eax,2 ; z-flag will be tested later |
584 |
|
|
585 |
|
pmullw mm6, mm3 ; low of coeff*(matrix*quant) |
586 |
|
pmulhw mm3, mm5 ; high of coeff*(matrix*quant) |
587 |
|
|
588 |
|
pcmpgtw mm0, [zero] |
589 |
|
paddusw mm2, mm0 |
590 |
|
psrlw mm2, 5 |
591 |
|
|
592 |
|
pcmpgtw mm3, [zero] |
593 |
|
paddusw mm6, mm3 |
594 |
|
psrlw mm6, 5 |
595 |
|
|
596 |
|
pxor mm2, mm1 ; start negating back |
597 |
|
pxor mm6, mm4 ; start negating back |
598 |
|
|
599 |
|
psubusw mm1, mm0 |
600 |
|
psubusw mm4, mm3 |
601 |
|
|
602 |
|
psubw mm2, mm1 ; finish negating back |
603 |
|
psubw mm6, mm4 ; finish negating back |
604 |
|
|
605 |
|
movq [edx + 8*eax + 8*16 -2*8 ], mm2 ; data[i] |
606 |
|
movq [edx + 8*eax + 8*16 -2*8 +8], mm6 ; data[i+1] |
607 |
|
|
608 |
|
jnz near .loop |
609 |
|
|
610 |
|
; deal with DC |
611 |
|
movd mm0, [ecx] |
612 |
|
pmullw mm0, [esp+16] ; dcscalar |
613 |
|
movq mm2, [mmx_32767_minus_2047] |
614 |
|
paddsw mm0, mm2 |
615 |
|
psubsw mm0, mm2 |
616 |
|
movq mm2, [mmx_32768_minus_2048] |
617 |
|
psubsw mm0, mm2 |
618 |
|
paddsw mm0, mm2 |
619 |
|
movd eax, mm0 |
620 |
|
mov [edx], ax |
621 |
|
|
622 |
|
xor eax, eax |
623 |
|
ret |
624 |
|
|
625 |
|
;----------------------------------------------------------------------------- |
626 |
|
; |
627 |
|
; uint32_t dequant_mpeg_inter_mmx(int16_t * data, |
628 |
|
; const int16_t * const coeff, |
629 |
|
; const uint32_t quant); |
630 |
|
; |
631 |
|
;----------------------------------------------------------------------------- |
632 |
|
|
633 |
|
; Note: We use (2*c + sgn(c) - sgn(-c)) as multiplier |
634 |
|
; so we handle the 3 cases: c<0, c==0, and c>0 in one shot. |
635 |
|
; sgn(x) is the result of 'pcmpgtw 0,x': 0 if x>=0, -1 if x<0. |
636 |
|
; It's mixed with the extraction of the absolute value. |
637 |
|
|
638 |
|
ALIGN 16 |
639 |
|
dequant_mpeg_inter_mmx: |
640 |
|
|
641 |
|
mov edx, [esp+ 4] ; data |
642 |
|
mov ecx, [esp+ 8] ; coeff |
643 |
|
mov eax, [esp+12] ; quant |
644 |
|
movq mm7, [mmx_mul_quant + eax*8 - 8] |
645 |
|
mov eax, -16 |
646 |
|
paddw mm7, mm7 ; << 1 |
647 |
|
pxor mm6, mm6 ; mismatch sum |
648 |
|
|
649 |
|
ALIGN 16 |
650 |
|
.loop |
651 |
|
movq mm0, [ecx+8*eax + 8*16 ] ; mm0 = coeff[i] |
652 |
|
movq mm2, [ecx+8*eax + 8*16 +8] ; mm2 = coeff[i+1] |
653 |
|
add eax, 2 |
654 |
|
|
655 |
|
pxor mm1, mm1 |
656 |
|
pxor mm3, mm3 |
657 |
|
pcmpgtw mm1, mm0 ; mm1 = sgn(c) (preserved) |
658 |
|
pcmpgtw mm3, mm2 ; mm3 = sgn(c') (preserved) |
659 |
|
paddsw mm0, mm1 ; c += sgn(c) |
660 |
|
paddsw mm2, mm3 ; c += sgn(c') |
661 |
|
paddw mm0, mm0 ; c *= 2 |
662 |
|
paddw mm2, mm2 ; c'*= 2 |
663 |
|
|
664 |
|
pxor mm4, mm4 |
665 |
|
pxor mm5, mm5 |
666 |
|
psubw mm4, mm0 ; -c |
667 |
|
psubw mm5, mm2 ; -c' |
668 |
|
psraw mm4, 16 ; mm4 = sgn(-c) |
669 |
|
psraw mm5, 16 ; mm5 = sgn(-c') |
670 |
|
psubsw mm0, mm4 ; c -= sgn(-c) |
671 |
|
psubsw mm2, mm5 ; c' -= sgn(-c') |
672 |
|
pxor mm0, mm1 ; finish changing sign if needed |
673 |
|
pxor mm2, mm3 ; finish changing sign if needed |
674 |
|
|
675 |
|
; we're short on register, here. Poor pairing... |
676 |
|
|
677 |
|
movq mm4, mm7 ; (matrix*quant) |
678 |
|
pmullw mm4, [inter_matrix + 8*eax + 8*16 -2*8] |
679 |
|
movq mm5, mm4 |
680 |
|
pmulhw mm5, mm0 ; high of c*(matrix*quant) |
681 |
|
pmullw mm0, mm4 ; low of c*(matrix*quant) |
682 |
|
|
683 |
|
movq mm4, mm7 ; (matrix*quant) |
684 |
|
pmullw mm4, [inter_matrix + 8*eax + 8*16 -2*8 + 8] |
685 |
|
|
686 |
|
pcmpgtw mm5, [zero] |
687 |
|
paddusw mm0, mm5 |
688 |
|
psrlw mm0, 5 |
689 |
|
pxor mm0, mm1 ; start restoring sign |
690 |
|
psubusw mm1, mm5 |
691 |
|
|
692 |
|
movq mm5, mm4 |
693 |
|
pmulhw mm5, mm2 ; high of c*(matrix*quant) |
694 |
|
pmullw mm2, mm4 ; low of c*(matrix*quant) |
695 |
|
psubw mm0, mm1 ; finish restoring sign |
696 |
|
|
697 |
|
pcmpgtw mm5, [zero] |
698 |
|
paddusw mm2, mm5 |
699 |
|
psrlw mm2, 5 |
700 |
|
pxor mm2, mm3 ; start restoring sign |
701 |
|
psubusw mm3, mm5 |
702 |
|
psubw mm2, mm3 ; finish restoring sign |
703 |
|
|
704 |
|
pxor mm6, mm0 ; mismatch control |
705 |
|
movq [edx + 8*eax + 8*16 -2*8 ], mm0 ; data[i] |
706 |
|
pxor mm6, mm2 ; mismatch control |
707 |
|
movq [edx + 8*eax + 8*16 -2*8 +8], mm2 ; data[i+1] |
708 |
|
|
709 |
|
jnz near .loop |
710 |
|
|
711 |
|
; mismatch control |
712 |
|
|
713 |
|
movq mm0, mm6 |
714 |
|
psrlq mm0, 48 |
715 |
|
movq mm1, mm6 |
716 |
|
movq mm2, mm6 |
717 |
|
psrlq mm1, 32 |
718 |
|
pxor mm6, mm0 |
719 |
|
psrlq mm2, 16 |
720 |
|
pxor mm6, mm1 |
721 |
|
pxor mm6, mm2 |
722 |
|
movd eax, mm6 |
723 |
|
and eax, 1 |
724 |
|
xor eax, 1 |
725 |
|
xor word [edx + 2*63], ax |
726 |
|
|
727 |
|
xor eax, eax |
728 |
|
ret |