62 |
cglobal interpolate8x8_halfpel_v_xmm |
cglobal interpolate8x8_halfpel_v_xmm |
63 |
cglobal interpolate8x8_halfpel_hv_xmm |
cglobal interpolate8x8_halfpel_hv_xmm |
64 |
|
|
65 |
|
cglobal interpolate8x4_halfpel_h_xmm |
66 |
|
cglobal interpolate8x4_halfpel_v_xmm |
67 |
|
cglobal interpolate8x4_halfpel_hv_xmm |
68 |
|
|
69 |
cglobal interpolate8x8_halfpel_add_xmm |
cglobal interpolate8x8_halfpel_add_xmm |
70 |
cglobal interpolate8x8_halfpel_h_add_xmm |
cglobal interpolate8x8_halfpel_h_add_xmm |
71 |
cglobal interpolate8x8_halfpel_v_add_xmm |
cglobal interpolate8x8_halfpel_v_add_xmm |
359 |
|
|
360 |
;=========================================================================== |
;=========================================================================== |
361 |
; |
; |
362 |
|
; void interpolate8x4_halfpel_h_xmm(uint8_t * const dst, |
363 |
|
; const uint8_t * const src, |
364 |
|
; const uint32_t stride, |
365 |
|
; const uint32_t rounding); |
366 |
|
; |
367 |
|
;=========================================================================== |
368 |
|
|
369 |
|
ALIGN 16 |
370 |
|
interpolate8x4_halfpel_h_xmm: |
371 |
|
|
372 |
|
mov eax, [esp+16] ; rounding |
373 |
|
mov ecx, [esp+ 4] ; Dst |
374 |
|
test eax,eax |
375 |
|
mov eax, [esp+ 8] ; Src |
376 |
|
mov edx, [esp+12] ; stride |
377 |
|
|
378 |
|
jnz near .rounding1 |
379 |
|
|
380 |
|
COPY_H_SSE_RND0 |
381 |
|
lea ecx,[ecx+2*edx] |
382 |
|
COPY_H_SSE_RND0 |
383 |
|
ret |
384 |
|
|
385 |
|
.rounding1 |
386 |
|
; we use: (i+j)/2 = ( i+j+1 )/2 - (i^j)&1 |
387 |
|
movq mm7, [mmx_one] |
388 |
|
COPY_H_SSE_RND1 |
389 |
|
lea ecx, [ecx+2*edx] |
390 |
|
COPY_H_SSE_RND1 |
391 |
|
ret |
392 |
|
.endfunc |
393 |
|
|
394 |
|
;=========================================================================== |
395 |
|
; |
396 |
|
; void interpolate8x4_halfpel_v_xmm(uint8_t * const dst, |
397 |
|
; const uint8_t * const src, |
398 |
|
; const uint32_t stride, |
399 |
|
; const uint32_t rounding); |
400 |
|
; |
401 |
|
;=========================================================================== |
402 |
|
|
403 |
|
ALIGN 16 |
404 |
|
interpolate8x4_halfpel_v_xmm: |
405 |
|
|
406 |
|
mov eax, [esp+16]; rounding |
407 |
|
mov ecx, [esp+ 4] ; Dst |
408 |
|
test eax,eax |
409 |
|
mov eax, [esp+ 8] ; Src |
410 |
|
mov edx, [esp+12] ; stride |
411 |
|
|
412 |
|
; we process 2 line at a time |
413 |
|
jnz near .rounding1 |
414 |
|
|
415 |
|
COPY_V_SSE_RND0 |
416 |
|
lea ecx, [ecx+2*edx] |
417 |
|
COPY_V_SSE_RND0 |
418 |
|
ret |
419 |
|
|
420 |
|
.rounding1 |
421 |
|
; we use: (i+j)/2 = ( i+j+1 )/2 - (i^j)&1 |
422 |
|
movq mm7, [mmx_one] |
423 |
|
movq mm2, [eax] ; loop invariant |
424 |
|
add eax, edx |
425 |
|
|
426 |
|
COPY_V_SSE_RND1 |
427 |
|
lea ecx,[ecx+2*edx] |
428 |
|
COPY_V_SSE_RND1 |
429 |
|
ret |
430 |
|
.endfunc |
431 |
|
|
432 |
|
;=========================================================================== |
433 |
|
; |
434 |
|
; void interpolate8x4_halfpel_hv_xmm(uint8_t * const dst, |
435 |
|
; const uint8_t * const src, |
436 |
|
; const uint32_t stride, |
437 |
|
; const uint32_t rounding); |
438 |
|
; |
439 |
|
; |
440 |
|
;=========================================================================== |
441 |
|
|
442 |
|
; The trick is to correct the result of 'pavgb' with some combination of the |
443 |
|
; lsb's of the 4 input values i,j,k,l, and their intermediate 'pavgb' (s and t). |
444 |
|
; The boolean relations are: |
445 |
|
; (i+j+k+l+3)/4 = (s+t+1)/2 - (ij&kl)&st |
446 |
|
; (i+j+k+l+2)/4 = (s+t+1)/2 - (ij|kl)&st |
447 |
|
; (i+j+k+l+1)/4 = (s+t+1)/2 - (ij&kl)|st |
448 |
|
; (i+j+k+l+0)/4 = (s+t+1)/2 - (ij|kl)|st |
449 |
|
; with s=(i+j+1)/2, t=(k+l+1)/2, ij = i^j, kl = k^l, st = s^t. |
450 |
|
|
451 |
|
; Moreover, we process 2 lines at a times, for better overlapping (~15% faster). |
452 |
|
|
453 |
|
ALIGN 16 |
454 |
|
interpolate8x4_halfpel_hv_xmm: |
455 |
|
mov eax, [esp+16] ; rounding |
456 |
|
mov ecx, [esp+ 4] ; Dst |
457 |
|
test eax, eax |
458 |
|
mov eax, [esp+ 8] ; Src |
459 |
|
mov edx, [esp+12] ; stride |
460 |
|
|
461 |
|
movq mm7, [mmx_one] |
462 |
|
|
463 |
|
; loop invariants: mm2=(i+j+1)/2 and mm3= i^j |
464 |
|
movq mm2, [eax] |
465 |
|
movq mm3, [eax+1] |
466 |
|
movq mm6, mm2 |
467 |
|
pavgb mm2, mm3 |
468 |
|
pxor mm3, mm6 ; mm2/mm3 ready |
469 |
|
|
470 |
|
jnz near .rounding1 |
471 |
|
|
472 |
|
COPY_HV_SSE_RND0 |
473 |
|
add ecx, edx |
474 |
|
COPY_HV_SSE_RND0 |
475 |
|
ret |
476 |
|
|
477 |
|
.rounding1 |
478 |
|
COPY_HV_SSE_RND1 |
479 |
|
add ecx, edx |
480 |
|
COPY_HV_SSE_RND1 |
481 |
|
ret |
482 |
|
.endfunc |
483 |
|
|
484 |
|
;=========================================================================== |
485 |
|
; |
486 |
; The next functions combine both source halfpel interpolation step and the |
; The next functions combine both source halfpel interpolation step and the |
487 |
; averaging (with rouding) step to avoid wasting memory bandwidth computing |
; averaging (with rouding) step to avoid wasting memory bandwidth computing |
488 |
; intermediate halfpel images and then averaging them. |
; intermediate halfpel images and then averaging them. |