32 |
bits 32 |
bits 32 |
33 |
|
|
34 |
%macro cglobal 1 |
%macro cglobal 1 |
35 |
|
%ifdef PREFIX |
36 |
global _%1 |
global _%1 |
37 |
%define %1 _%1 |
%define %1 _%1 |
38 |
%else |
%else |
58 |
; uint32_t sad16_mmx(const uint8_t * const cur, |
; uint32_t sad16_mmx(const uint8_t * const cur, |
59 |
; const uint8_t * const ref, |
; const uint8_t * const ref, |
60 |
; const uint32_t stride, |
; const uint32_t stride, |
61 |
|
; const uint32_t best_sad); |
62 |
|
; |
63 |
; (early termination ignore; slows this down) |
; (early termination ignore; slows this down) |
64 |
; |
; |
65 |
;=========================================================================== |
;=========================================================================== |
209 |
; |
; |
210 |
; uint32_t sad16bi_mmx(const uint8_t * const cur, |
; uint32_t sad16bi_mmx(const uint8_t * const cur, |
211 |
; const uint8_t * const ref1, |
; const uint8_t * const ref1, |
212 |
|
; const uint8_t * const ref2, |
213 |
|
; const uint32_t stride); |
214 |
|
; |
215 |
|
;=========================================================================== |
216 |
|
%macro SADBI_16x16_MMX 2 ; SADBI_16x16_MMX( int_ptr_offset, bool_increment_ptr ); |
217 |
|
|
218 |
|
movq mm0, [edx+%1] |
219 |
|
movq mm2, [ebx+%1] |
220 |
|
movq mm1, mm0 |
221 |
|
movq mm3, mm2 |
222 |
|
|
223 |
|
%if %2 != 0 |
224 |
|
add edx, ecx |
225 |
|
%endif |
226 |
|
|
227 |
|
punpcklbw mm0, mm7 |
228 |
|
punpckhbw mm1, mm7 |
229 |
|
punpcklbw mm2, mm7 |
230 |
|
punpckhbw mm3, mm7 |
231 |
|
|
232 |
|
%if %2 != 0 |
233 |
|
add ebx, ecx |
234 |
|
%endif |
235 |
|
|
236 |
|
paddusw mm0, mm2 ; mm01 = ref1 + ref2 |
237 |
|
paddusw mm1, mm3 |
238 |
|
paddusw mm0, [mmx_one] ; mm01 += 1 |
239 |
|
paddusw mm1, [mmx_one] |
240 |
|
psrlw mm0, 1 ; mm01 >>= 1 |
241 |
|
psrlw mm1, 1 |
242 |
|
|
243 |
|
movq mm2, [eax+%1] |
244 |
|
movq mm3, mm2 |
245 |
|
punpcklbw mm2, mm7 ; mm23 = src |
246 |
|
punpckhbw mm3, mm7 |
247 |
|
|
248 |
|
%if %2 != 0 |
249 |
|
add eax, ecx |
250 |
|
%endif |
251 |
|
|
252 |
|
movq mm4, mm0 |
253 |
|
movq mm5, mm1 |
254 |
|
psubusw mm0, mm2 |
255 |
|
psubusw mm1, mm3 |
256 |
|
psubusw mm2, mm4 |
257 |
|
psubusw mm3, mm5 |
258 |
|
por mm0, mm2 ; mm01 = ABS(mm01 - mm23) |
259 |
|
por mm1, mm3 |
260 |
|
|
261 |
|
paddusw mm6,mm0 ; mm6 += mm01 |
262 |
|
paddusw mm6,mm1 |
263 |
|
|
264 |
|
%endmacro |
265 |
|
|
266 |
|
align 16 |
267 |
|
sad16bi_mmx: |
268 |
|
push ebx |
269 |
|
mov eax, [esp+4+ 4] ; Src |
270 |
|
mov edx, [esp+4+ 8] ; Ref1 |
271 |
|
mov ebx, [esp+4+12] ; Ref2 |
272 |
|
mov ecx, [esp+4+16] ; Stride |
273 |
|
|
274 |
|
pxor mm6, mm6 ; accum2 |
275 |
|
pxor mm7, mm7 |
276 |
|
.Loop |
277 |
|
SADBI_16x16_MMX 0, 0 |
278 |
|
SADBI_16x16_MMX 8, 1 |
279 |
|
SADBI_16x16_MMX 0, 0 |
280 |
|
SADBI_16x16_MMX 8, 1 |
281 |
|
SADBI_16x16_MMX 0, 0 |
282 |
|
SADBI_16x16_MMX 8, 1 |
283 |
|
SADBI_16x16_MMX 0, 0 |
284 |
|
SADBI_16x16_MMX 8, 1 |
285 |
|
SADBI_16x16_MMX 0, 0 |
286 |
|
SADBI_16x16_MMX 8, 1 |
287 |
|
SADBI_16x16_MMX 0, 0 |
288 |
|
SADBI_16x16_MMX 8, 1 |
289 |
|
SADBI_16x16_MMX 0, 0 |
290 |
|
SADBI_16x16_MMX 8, 1 |
291 |
|
SADBI_16x16_MMX 0, 0 |
292 |
|
SADBI_16x16_MMX 8, 1 |
293 |
|
|
294 |
|
SADBI_16x16_MMX 0, 0 |
295 |
|
SADBI_16x16_MMX 8, 1 |
296 |
|
SADBI_16x16_MMX 0, 0 |
297 |
|
SADBI_16x16_MMX 8, 1 |
298 |
|
SADBI_16x16_MMX 0, 0 |
299 |
|
SADBI_16x16_MMX 8, 1 |
300 |
|
SADBI_16x16_MMX 0, 0 |
301 |
|
SADBI_16x16_MMX 8, 1 |
302 |
|
SADBI_16x16_MMX 0, 0 |
303 |
|
SADBI_16x16_MMX 8, 1 |
304 |
|
SADBI_16x16_MMX 0, 0 |
305 |
|
SADBI_16x16_MMX 8, 1 |
306 |
|
SADBI_16x16_MMX 0, 0 |
307 |
|
SADBI_16x16_MMX 8, 1 |
308 |
|
SADBI_16x16_MMX 0, 0 |
309 |
|
SADBI_16x16_MMX 8, 1 |
310 |
|
|
311 |
|
pmaddwd mm6, [mmx_one] ; collapse |
312 |
|
movq mm7, mm6 |
313 |
|
psrlq mm7, 32 |
314 |
|
paddd mm6, mm7 |
315 |
|
|
316 |
|
movd eax, mm6 |
317 |
|
pop ebx |
318 |
|
ret |
319 |
|
|
320 |
|
;=========================================================================== |
321 |
|
; |
322 |
|
; uint32_t sad8bi_mmx(const uint8_t * const cur, |
323 |
|
; const uint8_t * const ref1, |
324 |
|
; const uint8_t * const ref2, |
325 |
|
; const uint32_t stride); |
326 |
|
; |
327 |
|
;=========================================================================== |
328 |
|
align 16 |
329 |
|
sad8bi_mmx: |
330 |
|
push ebx |
331 |
|
mov eax, [esp+4+ 4] ; Src |
332 |
|
mov edx, [esp+4+ 8] ; Ref1 |
333 |
|
mov ebx, [esp+4+12] ; Ref2 |
334 |
|
mov ecx, [esp+4+16] ; Stride |
335 |
|
|
336 |
|
pxor mm6, mm6 ; accum2 |
337 |
|
pxor mm7, mm7 |
338 |
|
.Loop |
339 |
|
SADBI_16x16_MMX 0, 1 |
340 |
|
SADBI_16x16_MMX 0, 1 |
341 |
|
SADBI_16x16_MMX 0, 1 |
342 |
|
SADBI_16x16_MMX 0, 1 |
343 |
|
SADBI_16x16_MMX 0, 1 |
344 |
|
SADBI_16x16_MMX 0, 1 |
345 |
|
SADBI_16x16_MMX 0, 1 |
346 |
|
SADBI_16x16_MMX 0, 1 |
347 |
|
|
348 |
|
pmaddwd mm6, [mmx_one] ; collapse |
349 |
|
movq mm7, mm6 |
350 |
|
psrlq mm7, 32 |
351 |
|
paddd mm6, mm7 |
352 |
|
|
353 |
|
movd eax, mm6 |
354 |
|
pop ebx |
355 |
|
ret |
356 |
|
|
357 |
|
|
358 |
|
|
359 |
|
|
360 |
|
;=========================================================================== |
361 |
|
; |
362 |
|
; uint32_t dev16_mmx(const uint8_t * const cur, |
363 |
|
; const uint32_t stride); |
364 |
|
; |
365 |
|
;=========================================================================== |
366 |
|
|
367 |
|
%macro MEAN_16x16_MMX 0 |
368 |
movq mm0, [eax] |
movq mm0, [eax] |
369 |
movq mm2, [eax+8] |
movq mm2, [eax+8] |
370 |
lea eax,[eax+ecx] |
lea eax,[eax+ecx] |