128 |
add _EAX,TMP0 ; + (dcscalar/2)*sgn(data[0]) |
add _EAX,TMP0 ; + (dcscalar/2)*sgn(data[0]) |
129 |
|
|
130 |
mov TMP0, prm3 ; quant |
mov TMP0, prm3 ; quant |
|
cdq |
|
|
idiv prm4d ; dcscalar |
|
131 |
lea TMP1, [mmx_div] |
lea TMP1, [mmx_div] |
132 |
movq mm7, [TMP1+TMP0 * 8 - 8] |
movq mm7, [TMP1+TMP0 * 8 - 8] |
133 |
|
%ifdef ARCH_IS_X86_64 |
134 |
|
%ifdef WINDOWS |
135 |
|
mov TMP1, prm2 |
136 |
|
%endif |
137 |
|
%endif |
138 |
|
cdq |
139 |
|
idiv prm4d ; dcscalar |
140 |
|
%ifdef ARCH_IS_X86_64 |
141 |
|
%ifdef WINDOWS |
142 |
|
mov prm2, TMP1 |
143 |
|
%endif |
144 |
|
%endif |
145 |
cmp TMP0, 1 |
cmp TMP0, 1 |
146 |
mov TMP1, prm1 ; coeff |
mov TMP1, prm1 ; coeff |
147 |
je .low |
je .low |
259 |
sub TMP1,TMP0 |
sub TMP1,TMP0 |
260 |
cmovl _EAX,TMP1 ; +/- dcscalar/2 |
cmovl _EAX,TMP1 ; +/- dcscalar/2 |
261 |
mov TMP0, prm3 ; quant |
mov TMP0, prm3 ; quant |
262 |
|
lea TMP1, [mmx_div] |
263 |
|
movq xmm7, [TMP1+TMP0 * 8 - 8] |
264 |
|
|
265 |
|
%ifdef ARCH_IS_X86_64 |
266 |
|
%ifdef WINDOWS |
267 |
|
mov TMP1, prm2 |
268 |
|
%endif |
269 |
|
%endif |
270 |
cdq |
cdq |
271 |
idiv prm4d ; dcscalar |
idiv prm4d ; dcscalar |
272 |
|
%ifdef ARCH_IS_X86_64 |
273 |
|
%ifdef WINDOWS |
274 |
|
mov prm2, TMP1 |
275 |
|
%endif |
276 |
|
%endif |
277 |
cmp TMP0, 1 |
cmp TMP0, 1 |
|
lea TMP1, [mmx_div] |
|
|
movq xmm7, [TMP1+TMP0 * 8 - 8] |
|
278 |
mov TMP1, prm1 ; coeff |
mov TMP1, prm1 ; coeff |
279 |
je near .low |
je near .low |
280 |
|
|